Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/kernel/signal.c
4 *
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 *
7 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 *
9 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
10 * Changes to use preallocated sigqueue structures
11 * to allow signals to be sent reliably.
12 */
13
14#include <linux/slab.h>
15#include <linux/export.h>
16#include <linux/init.h>
17#include <linux/sched/mm.h>
18#include <linux/sched/user.h>
19#include <linux/sched/debug.h>
20#include <linux/sched/task.h>
21#include <linux/sched/task_stack.h>
22#include <linux/sched/cputime.h>
23#include <linux/file.h>
24#include <linux/fs.h>
25#include <linux/proc_fs.h>
26#include <linux/tty.h>
27#include <linux/binfmts.h>
28#include <linux/coredump.h>
29#include <linux/security.h>
30#include <linux/syscalls.h>
31#include <linux/ptrace.h>
32#include <linux/signal.h>
33#include <linux/signalfd.h>
34#include <linux/ratelimit.h>
35#include <linux/task_work.h>
36#include <linux/capability.h>
37#include <linux/freezer.h>
38#include <linux/pid_namespace.h>
39#include <linux/nsproxy.h>
40#include <linux/user_namespace.h>
41#include <linux/uprobes.h>
42#include <linux/compat.h>
43#include <linux/cn_proc.h>
44#include <linux/compiler.h>
45#include <linux/posix-timers.h>
46#include <linux/cgroup.h>
47#include <linux/audit.h>
48
49#define CREATE_TRACE_POINTS
50#include <trace/events/signal.h>
51
52#include <asm/param.h>
53#include <linux/uaccess.h>
54#include <asm/unistd.h>
55#include <asm/siginfo.h>
56#include <asm/cacheflush.h>
57#include <asm/syscall.h> /* for syscall_get_* */
58
59/*
60 * SLAB caches for signal bits.
61 */
62
63static struct kmem_cache *sigqueue_cachep;
64
65int print_fatal_signals __read_mostly;
66
67static void __user *sig_handler(struct task_struct *t, int sig)
68{
69 return t->sighand->action[sig - 1].sa.sa_handler;
70}
71
72static inline bool sig_handler_ignored(void __user *handler, int sig)
73{
74 /* Is it explicitly or implicitly ignored? */
75 return handler == SIG_IGN ||
76 (handler == SIG_DFL && sig_kernel_ignore(sig));
77}
78
79static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
80{
81 void __user *handler;
82
83 handler = sig_handler(t, sig);
84
85 /* SIGKILL and SIGSTOP may not be sent to the global init */
86 if (unlikely(is_global_init(t) && sig_kernel_only(sig)))
87 return true;
88
89 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
90 handler == SIG_DFL && !(force && sig_kernel_only(sig)))
91 return true;
92
93 /* Only allow kernel generated signals to this kthread */
94 if (unlikely((t->flags & PF_KTHREAD) &&
95 (handler == SIG_KTHREAD_KERNEL) && !force))
96 return true;
97
98 return sig_handler_ignored(handler, sig);
99}
100
101static bool sig_ignored(struct task_struct *t, int sig, bool force)
102{
103 /*
104 * Blocked signals are never ignored, since the
105 * signal handler may change by the time it is
106 * unblocked.
107 */
108 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
109 return false;
110
111 /*
112 * Tracers may want to know about even ignored signal unless it
113 * is SIGKILL which can't be reported anyway but can be ignored
114 * by SIGNAL_UNKILLABLE task.
115 */
116 if (t->ptrace && sig != SIGKILL)
117 return false;
118
119 return sig_task_ignored(t, sig, force);
120}
121
122/*
123 * Re-calculate pending state from the set of locally pending
124 * signals, globally pending signals, and blocked signals.
125 */
126static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked)
127{
128 unsigned long ready;
129 long i;
130
131 switch (_NSIG_WORDS) {
132 default:
133 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
134 ready |= signal->sig[i] &~ blocked->sig[i];
135 break;
136
137 case 4: ready = signal->sig[3] &~ blocked->sig[3];
138 ready |= signal->sig[2] &~ blocked->sig[2];
139 ready |= signal->sig[1] &~ blocked->sig[1];
140 ready |= signal->sig[0] &~ blocked->sig[0];
141 break;
142
143 case 2: ready = signal->sig[1] &~ blocked->sig[1];
144 ready |= signal->sig[0] &~ blocked->sig[0];
145 break;
146
147 case 1: ready = signal->sig[0] &~ blocked->sig[0];
148 }
149 return ready != 0;
150}
151
152#define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
153
154static bool recalc_sigpending_tsk(struct task_struct *t)
155{
156 if ((t->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) ||
157 PENDING(&t->pending, &t->blocked) ||
158 PENDING(&t->signal->shared_pending, &t->blocked) ||
159 cgroup_task_frozen(t)) {
160 set_tsk_thread_flag(t, TIF_SIGPENDING);
161 return true;
162 }
163
164 /*
165 * We must never clear the flag in another thread, or in current
166 * when it's possible the current syscall is returning -ERESTART*.
167 * So we don't clear it here, and only callers who know they should do.
168 */
169 return false;
170}
171
172/*
173 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
174 * This is superfluous when called on current, the wakeup is a harmless no-op.
175 */
176void recalc_sigpending_and_wake(struct task_struct *t)
177{
178 if (recalc_sigpending_tsk(t))
179 signal_wake_up(t, 0);
180}
181
182void recalc_sigpending(void)
183{
184 if (!recalc_sigpending_tsk(current) && !freezing(current))
185 clear_thread_flag(TIF_SIGPENDING);
186
187}
188EXPORT_SYMBOL(recalc_sigpending);
189
190void calculate_sigpending(void)
191{
192 /* Have any signals or users of TIF_SIGPENDING been delayed
193 * until after fork?
194 */
195 spin_lock_irq(¤t->sighand->siglock);
196 set_tsk_thread_flag(current, TIF_SIGPENDING);
197 recalc_sigpending();
198 spin_unlock_irq(¤t->sighand->siglock);
199}
200
201/* Given the mask, find the first available signal that should be serviced. */
202
203#define SYNCHRONOUS_MASK \
204 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
205 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
206
207int next_signal(struct sigpending *pending, sigset_t *mask)
208{
209 unsigned long i, *s, *m, x;
210 int sig = 0;
211
212 s = pending->signal.sig;
213 m = mask->sig;
214
215 /*
216 * Handle the first word specially: it contains the
217 * synchronous signals that need to be dequeued first.
218 */
219 x = *s &~ *m;
220 if (x) {
221 if (x & SYNCHRONOUS_MASK)
222 x &= SYNCHRONOUS_MASK;
223 sig = ffz(~x) + 1;
224 return sig;
225 }
226
227 switch (_NSIG_WORDS) {
228 default:
229 for (i = 1; i < _NSIG_WORDS; ++i) {
230 x = *++s &~ *++m;
231 if (!x)
232 continue;
233 sig = ffz(~x) + i*_NSIG_BPW + 1;
234 break;
235 }
236 break;
237
238 case 2:
239 x = s[1] &~ m[1];
240 if (!x)
241 break;
242 sig = ffz(~x) + _NSIG_BPW + 1;
243 break;
244
245 case 1:
246 /* Nothing to do */
247 break;
248 }
249
250 return sig;
251}
252
253static inline void print_dropped_signal(int sig)
254{
255 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
256
257 if (!print_fatal_signals)
258 return;
259
260 if (!__ratelimit(&ratelimit_state))
261 return;
262
263 pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
264 current->comm, current->pid, sig);
265}
266
267/**
268 * task_set_jobctl_pending - set jobctl pending bits
269 * @task: target task
270 * @mask: pending bits to set
271 *
272 * Clear @mask from @task->jobctl. @mask must be subset of
273 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
274 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
275 * cleared. If @task is already being killed or exiting, this function
276 * becomes noop.
277 *
278 * CONTEXT:
279 * Must be called with @task->sighand->siglock held.
280 *
281 * RETURNS:
282 * %true if @mask is set, %false if made noop because @task was dying.
283 */
284bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
285{
286 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
287 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
288 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
289
290 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
291 return false;
292
293 if (mask & JOBCTL_STOP_SIGMASK)
294 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
295
296 task->jobctl |= mask;
297 return true;
298}
299
300/**
301 * task_clear_jobctl_trapping - clear jobctl trapping bit
302 * @task: target task
303 *
304 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
305 * Clear it and wake up the ptracer. Note that we don't need any further
306 * locking. @task->siglock guarantees that @task->parent points to the
307 * ptracer.
308 *
309 * CONTEXT:
310 * Must be called with @task->sighand->siglock held.
311 */
312void task_clear_jobctl_trapping(struct task_struct *task)
313{
314 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
315 task->jobctl &= ~JOBCTL_TRAPPING;
316 smp_mb(); /* advised by wake_up_bit() */
317 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
318 }
319}
320
321/**
322 * task_clear_jobctl_pending - clear jobctl pending bits
323 * @task: target task
324 * @mask: pending bits to clear
325 *
326 * Clear @mask from @task->jobctl. @mask must be subset of
327 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
328 * STOP bits are cleared together.
329 *
330 * If clearing of @mask leaves no stop or trap pending, this function calls
331 * task_clear_jobctl_trapping().
332 *
333 * CONTEXT:
334 * Must be called with @task->sighand->siglock held.
335 */
336void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
337{
338 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
339
340 if (mask & JOBCTL_STOP_PENDING)
341 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
342
343 task->jobctl &= ~mask;
344
345 if (!(task->jobctl & JOBCTL_PENDING_MASK))
346 task_clear_jobctl_trapping(task);
347}
348
349/**
350 * task_participate_group_stop - participate in a group stop
351 * @task: task participating in a group stop
352 *
353 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
354 * Group stop states are cleared and the group stop count is consumed if
355 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
356 * stop, the appropriate `SIGNAL_*` flags are set.
357 *
358 * CONTEXT:
359 * Must be called with @task->sighand->siglock held.
360 *
361 * RETURNS:
362 * %true if group stop completion should be notified to the parent, %false
363 * otherwise.
364 */
365static bool task_participate_group_stop(struct task_struct *task)
366{
367 struct signal_struct *sig = task->signal;
368 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
369
370 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
371
372 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
373
374 if (!consume)
375 return false;
376
377 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
378 sig->group_stop_count--;
379
380 /*
381 * Tell the caller to notify completion iff we are entering into a
382 * fresh group stop. Read comment in do_signal_stop() for details.
383 */
384 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
385 signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
386 return true;
387 }
388 return false;
389}
390
391void task_join_group_stop(struct task_struct *task)
392{
393 unsigned long mask = current->jobctl & JOBCTL_STOP_SIGMASK;
394 struct signal_struct *sig = current->signal;
395
396 if (sig->group_stop_count) {
397 sig->group_stop_count++;
398 mask |= JOBCTL_STOP_CONSUME;
399 } else if (!(sig->flags & SIGNAL_STOP_STOPPED))
400 return;
401
402 /* Have the new thread join an on-going signal group stop */
403 task_set_jobctl_pending(task, mask | JOBCTL_STOP_PENDING);
404}
405
406/*
407 * allocate a new signal queue record
408 * - this may be called without locks if and only if t == current, otherwise an
409 * appropriate lock must be held to stop the target task from exiting
410 */
411static struct sigqueue *
412__sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags,
413 int override_rlimit, const unsigned int sigqueue_flags)
414{
415 struct sigqueue *q = NULL;
416 struct ucounts *ucounts = NULL;
417 long sigpending;
418
419 /*
420 * Protect access to @t credentials. This can go away when all
421 * callers hold rcu read lock.
422 *
423 * NOTE! A pending signal will hold on to the user refcount,
424 * and we get/put the refcount only when the sigpending count
425 * changes from/to zero.
426 */
427 rcu_read_lock();
428 ucounts = task_ucounts(t);
429 sigpending = inc_rlimit_get_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING);
430 rcu_read_unlock();
431 if (!sigpending)
432 return NULL;
433
434 if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {
435 q = kmem_cache_alloc(sigqueue_cachep, gfp_flags);
436 } else {
437 print_dropped_signal(sig);
438 }
439
440 if (unlikely(q == NULL)) {
441 dec_rlimit_put_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING);
442 } else {
443 INIT_LIST_HEAD(&q->list);
444 q->flags = sigqueue_flags;
445 q->ucounts = ucounts;
446 }
447 return q;
448}
449
450static void __sigqueue_free(struct sigqueue *q)
451{
452 if (q->flags & SIGQUEUE_PREALLOC)
453 return;
454 if (q->ucounts) {
455 dec_rlimit_put_ucounts(q->ucounts, UCOUNT_RLIMIT_SIGPENDING);
456 q->ucounts = NULL;
457 }
458 kmem_cache_free(sigqueue_cachep, q);
459}
460
461void flush_sigqueue(struct sigpending *queue)
462{
463 struct sigqueue *q;
464
465 sigemptyset(&queue->signal);
466 while (!list_empty(&queue->list)) {
467 q = list_entry(queue->list.next, struct sigqueue , list);
468 list_del_init(&q->list);
469 __sigqueue_free(q);
470 }
471}
472
473/*
474 * Flush all pending signals for this kthread.
475 */
476void flush_signals(struct task_struct *t)
477{
478 unsigned long flags;
479
480 spin_lock_irqsave(&t->sighand->siglock, flags);
481 clear_tsk_thread_flag(t, TIF_SIGPENDING);
482 flush_sigqueue(&t->pending);
483 flush_sigqueue(&t->signal->shared_pending);
484 spin_unlock_irqrestore(&t->sighand->siglock, flags);
485}
486EXPORT_SYMBOL(flush_signals);
487
488#ifdef CONFIG_POSIX_TIMERS
489static void __flush_itimer_signals(struct sigpending *pending)
490{
491 sigset_t signal, retain;
492 struct sigqueue *q, *n;
493
494 signal = pending->signal;
495 sigemptyset(&retain);
496
497 list_for_each_entry_safe(q, n, &pending->list, list) {
498 int sig = q->info.si_signo;
499
500 if (likely(q->info.si_code != SI_TIMER)) {
501 sigaddset(&retain, sig);
502 } else {
503 sigdelset(&signal, sig);
504 list_del_init(&q->list);
505 __sigqueue_free(q);
506 }
507 }
508
509 sigorsets(&pending->signal, &signal, &retain);
510}
511
512void flush_itimer_signals(void)
513{
514 struct task_struct *tsk = current;
515 unsigned long flags;
516
517 spin_lock_irqsave(&tsk->sighand->siglock, flags);
518 __flush_itimer_signals(&tsk->pending);
519 __flush_itimer_signals(&tsk->signal->shared_pending);
520 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
521}
522#endif
523
524void ignore_signals(struct task_struct *t)
525{
526 int i;
527
528 for (i = 0; i < _NSIG; ++i)
529 t->sighand->action[i].sa.sa_handler = SIG_IGN;
530
531 flush_signals(t);
532}
533
534/*
535 * Flush all handlers for a task.
536 */
537
538void
539flush_signal_handlers(struct task_struct *t, int force_default)
540{
541 int i;
542 struct k_sigaction *ka = &t->sighand->action[0];
543 for (i = _NSIG ; i != 0 ; i--) {
544 if (force_default || ka->sa.sa_handler != SIG_IGN)
545 ka->sa.sa_handler = SIG_DFL;
546 ka->sa.sa_flags = 0;
547#ifdef __ARCH_HAS_SA_RESTORER
548 ka->sa.sa_restorer = NULL;
549#endif
550 sigemptyset(&ka->sa.sa_mask);
551 ka++;
552 }
553}
554
555bool unhandled_signal(struct task_struct *tsk, int sig)
556{
557 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
558 if (is_global_init(tsk))
559 return true;
560
561 if (handler != SIG_IGN && handler != SIG_DFL)
562 return false;
563
564 /* if ptraced, let the tracer determine */
565 return !tsk->ptrace;
566}
567
568static void collect_signal(int sig, struct sigpending *list, kernel_siginfo_t *info,
569 bool *resched_timer)
570{
571 struct sigqueue *q, *first = NULL;
572
573 /*
574 * Collect the siginfo appropriate to this signal. Check if
575 * there is another siginfo for the same signal.
576 */
577 list_for_each_entry(q, &list->list, list) {
578 if (q->info.si_signo == sig) {
579 if (first)
580 goto still_pending;
581 first = q;
582 }
583 }
584
585 sigdelset(&list->signal, sig);
586
587 if (first) {
588still_pending:
589 list_del_init(&first->list);
590 copy_siginfo(info, &first->info);
591
592 *resched_timer =
593 (first->flags & SIGQUEUE_PREALLOC) &&
594 (info->si_code == SI_TIMER) &&
595 (info->si_sys_private);
596
597 __sigqueue_free(first);
598 } else {
599 /*
600 * Ok, it wasn't in the queue. This must be
601 * a fast-pathed signal or we must have been
602 * out of queue space. So zero out the info.
603 */
604 clear_siginfo(info);
605 info->si_signo = sig;
606 info->si_errno = 0;
607 info->si_code = SI_USER;
608 info->si_pid = 0;
609 info->si_uid = 0;
610 }
611}
612
613static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
614 kernel_siginfo_t *info, bool *resched_timer)
615{
616 int sig = next_signal(pending, mask);
617
618 if (sig)
619 collect_signal(sig, pending, info, resched_timer);
620 return sig;
621}
622
623/*
624 * Dequeue a signal and return the element to the caller, which is
625 * expected to free it.
626 *
627 * All callers have to hold the siglock.
628 */
629int dequeue_signal(struct task_struct *tsk, sigset_t *mask,
630 kernel_siginfo_t *info, enum pid_type *type)
631{
632 bool resched_timer = false;
633 int signr;
634
635 /* We only dequeue private signals from ourselves, we don't let
636 * signalfd steal them
637 */
638 *type = PIDTYPE_PID;
639 signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
640 if (!signr) {
641 *type = PIDTYPE_TGID;
642 signr = __dequeue_signal(&tsk->signal->shared_pending,
643 mask, info, &resched_timer);
644#ifdef CONFIG_POSIX_TIMERS
645 /*
646 * itimer signal ?
647 *
648 * itimers are process shared and we restart periodic
649 * itimers in the signal delivery path to prevent DoS
650 * attacks in the high resolution timer case. This is
651 * compliant with the old way of self-restarting
652 * itimers, as the SIGALRM is a legacy signal and only
653 * queued once. Changing the restart behaviour to
654 * restart the timer in the signal dequeue path is
655 * reducing the timer noise on heavy loaded !highres
656 * systems too.
657 */
658 if (unlikely(signr == SIGALRM)) {
659 struct hrtimer *tmr = &tsk->signal->real_timer;
660
661 if (!hrtimer_is_queued(tmr) &&
662 tsk->signal->it_real_incr != 0) {
663 hrtimer_forward(tmr, tmr->base->get_time(),
664 tsk->signal->it_real_incr);
665 hrtimer_restart(tmr);
666 }
667 }
668#endif
669 }
670
671 recalc_sigpending();
672 if (!signr)
673 return 0;
674
675 if (unlikely(sig_kernel_stop(signr))) {
676 /*
677 * Set a marker that we have dequeued a stop signal. Our
678 * caller might release the siglock and then the pending
679 * stop signal it is about to process is no longer in the
680 * pending bitmasks, but must still be cleared by a SIGCONT
681 * (and overruled by a SIGKILL). So those cases clear this
682 * shared flag after we've set it. Note that this flag may
683 * remain set after the signal we return is ignored or
684 * handled. That doesn't matter because its only purpose
685 * is to alert stop-signal processing code when another
686 * processor has come along and cleared the flag.
687 */
688 current->jobctl |= JOBCTL_STOP_DEQUEUED;
689 }
690#ifdef CONFIG_POSIX_TIMERS
691 if (resched_timer) {
692 /*
693 * Release the siglock to ensure proper locking order
694 * of timer locks outside of siglocks. Note, we leave
695 * irqs disabled here, since the posix-timers code is
696 * about to disable them again anyway.
697 */
698 spin_unlock(&tsk->sighand->siglock);
699 posixtimer_rearm(info);
700 spin_lock(&tsk->sighand->siglock);
701
702 /* Don't expose the si_sys_private value to userspace */
703 info->si_sys_private = 0;
704 }
705#endif
706 return signr;
707}
708EXPORT_SYMBOL_GPL(dequeue_signal);
709
710static int dequeue_synchronous_signal(kernel_siginfo_t *info)
711{
712 struct task_struct *tsk = current;
713 struct sigpending *pending = &tsk->pending;
714 struct sigqueue *q, *sync = NULL;
715
716 /*
717 * Might a synchronous signal be in the queue?
718 */
719 if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
720 return 0;
721
722 /*
723 * Return the first synchronous signal in the queue.
724 */
725 list_for_each_entry(q, &pending->list, list) {
726 /* Synchronous signals have a positive si_code */
727 if ((q->info.si_code > SI_USER) &&
728 (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
729 sync = q;
730 goto next;
731 }
732 }
733 return 0;
734next:
735 /*
736 * Check if there is another siginfo for the same signal.
737 */
738 list_for_each_entry_continue(q, &pending->list, list) {
739 if (q->info.si_signo == sync->info.si_signo)
740 goto still_pending;
741 }
742
743 sigdelset(&pending->signal, sync->info.si_signo);
744 recalc_sigpending();
745still_pending:
746 list_del_init(&sync->list);
747 copy_siginfo(info, &sync->info);
748 __sigqueue_free(sync);
749 return info->si_signo;
750}
751
752/*
753 * Tell a process that it has a new active signal..
754 *
755 * NOTE! we rely on the previous spin_lock to
756 * lock interrupts for us! We can only be called with
757 * "siglock" held, and the local interrupt must
758 * have been disabled when that got acquired!
759 *
760 * No need to set need_resched since signal event passing
761 * goes through ->blocked
762 */
763void signal_wake_up_state(struct task_struct *t, unsigned int state)
764{
765 lockdep_assert_held(&t->sighand->siglock);
766
767 set_tsk_thread_flag(t, TIF_SIGPENDING);
768
769 /*
770 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
771 * case. We don't check t->state here because there is a race with it
772 * executing another processor and just now entering stopped state.
773 * By using wake_up_state, we ensure the process will wake up and
774 * handle its death signal.
775 */
776 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
777 kick_process(t);
778}
779
780/*
781 * Remove signals in mask from the pending set and queue.
782 * Returns 1 if any signals were found.
783 *
784 * All callers must be holding the siglock.
785 */
786static void flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
787{
788 struct sigqueue *q, *n;
789 sigset_t m;
790
791 sigandsets(&m, mask, &s->signal);
792 if (sigisemptyset(&m))
793 return;
794
795 sigandnsets(&s->signal, &s->signal, mask);
796 list_for_each_entry_safe(q, n, &s->list, list) {
797 if (sigismember(mask, q->info.si_signo)) {
798 list_del_init(&q->list);
799 __sigqueue_free(q);
800 }
801 }
802}
803
804static inline int is_si_special(const struct kernel_siginfo *info)
805{
806 return info <= SEND_SIG_PRIV;
807}
808
809static inline bool si_fromuser(const struct kernel_siginfo *info)
810{
811 return info == SEND_SIG_NOINFO ||
812 (!is_si_special(info) && SI_FROMUSER(info));
813}
814
815/*
816 * called with RCU read lock from check_kill_permission()
817 */
818static bool kill_ok_by_cred(struct task_struct *t)
819{
820 const struct cred *cred = current_cred();
821 const struct cred *tcred = __task_cred(t);
822
823 return uid_eq(cred->euid, tcred->suid) ||
824 uid_eq(cred->euid, tcred->uid) ||
825 uid_eq(cred->uid, tcred->suid) ||
826 uid_eq(cred->uid, tcred->uid) ||
827 ns_capable(tcred->user_ns, CAP_KILL);
828}
829
830/*
831 * Bad permissions for sending the signal
832 * - the caller must hold the RCU read lock
833 */
834static int check_kill_permission(int sig, struct kernel_siginfo *info,
835 struct task_struct *t)
836{
837 struct pid *sid;
838 int error;
839
840 if (!valid_signal(sig))
841 return -EINVAL;
842
843 if (!si_fromuser(info))
844 return 0;
845
846 error = audit_signal_info(sig, t); /* Let audit system see the signal */
847 if (error)
848 return error;
849
850 if (!same_thread_group(current, t) &&
851 !kill_ok_by_cred(t)) {
852 switch (sig) {
853 case SIGCONT:
854 sid = task_session(t);
855 /*
856 * We don't return the error if sid == NULL. The
857 * task was unhashed, the caller must notice this.
858 */
859 if (!sid || sid == task_session(current))
860 break;
861 fallthrough;
862 default:
863 return -EPERM;
864 }
865 }
866
867 return security_task_kill(t, info, sig, NULL);
868}
869
870/**
871 * ptrace_trap_notify - schedule trap to notify ptracer
872 * @t: tracee wanting to notify tracer
873 *
874 * This function schedules sticky ptrace trap which is cleared on the next
875 * TRAP_STOP to notify ptracer of an event. @t must have been seized by
876 * ptracer.
877 *
878 * If @t is running, STOP trap will be taken. If trapped for STOP and
879 * ptracer is listening for events, tracee is woken up so that it can
880 * re-trap for the new event. If trapped otherwise, STOP trap will be
881 * eventually taken without returning to userland after the existing traps
882 * are finished by PTRACE_CONT.
883 *
884 * CONTEXT:
885 * Must be called with @task->sighand->siglock held.
886 */
887static void ptrace_trap_notify(struct task_struct *t)
888{
889 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
890 lockdep_assert_held(&t->sighand->siglock);
891
892 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
893 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
894}
895
896/*
897 * Handle magic process-wide effects of stop/continue signals. Unlike
898 * the signal actions, these happen immediately at signal-generation
899 * time regardless of blocking, ignoring, or handling. This does the
900 * actual continuing for SIGCONT, but not the actual stopping for stop
901 * signals. The process stop is done as a signal action for SIG_DFL.
902 *
903 * Returns true if the signal should be actually delivered, otherwise
904 * it should be dropped.
905 */
906static bool prepare_signal(int sig, struct task_struct *p, bool force)
907{
908 struct signal_struct *signal = p->signal;
909 struct task_struct *t;
910 sigset_t flush;
911
912 if (signal->flags & SIGNAL_GROUP_EXIT) {
913 if (signal->core_state)
914 return sig == SIGKILL;
915 /*
916 * The process is in the middle of dying, drop the signal.
917 */
918 return false;
919 } else if (sig_kernel_stop(sig)) {
920 /*
921 * This is a stop signal. Remove SIGCONT from all queues.
922 */
923 siginitset(&flush, sigmask(SIGCONT));
924 flush_sigqueue_mask(&flush, &signal->shared_pending);
925 for_each_thread(p, t)
926 flush_sigqueue_mask(&flush, &t->pending);
927 } else if (sig == SIGCONT) {
928 unsigned int why;
929 /*
930 * Remove all stop signals from all queues, wake all threads.
931 */
932 siginitset(&flush, SIG_KERNEL_STOP_MASK);
933 flush_sigqueue_mask(&flush, &signal->shared_pending);
934 for_each_thread(p, t) {
935 flush_sigqueue_mask(&flush, &t->pending);
936 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
937 if (likely(!(t->ptrace & PT_SEIZED))) {
938 t->jobctl &= ~JOBCTL_STOPPED;
939 wake_up_state(t, __TASK_STOPPED);
940 } else
941 ptrace_trap_notify(t);
942 }
943
944 /*
945 * Notify the parent with CLD_CONTINUED if we were stopped.
946 *
947 * If we were in the middle of a group stop, we pretend it
948 * was already finished, and then continued. Since SIGCHLD
949 * doesn't queue we report only CLD_STOPPED, as if the next
950 * CLD_CONTINUED was dropped.
951 */
952 why = 0;
953 if (signal->flags & SIGNAL_STOP_STOPPED)
954 why |= SIGNAL_CLD_CONTINUED;
955 else if (signal->group_stop_count)
956 why |= SIGNAL_CLD_STOPPED;
957
958 if (why) {
959 /*
960 * The first thread which returns from do_signal_stop()
961 * will take ->siglock, notice SIGNAL_CLD_MASK, and
962 * notify its parent. See get_signal().
963 */
964 signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
965 signal->group_stop_count = 0;
966 signal->group_exit_code = 0;
967 }
968 }
969
970 return !sig_ignored(p, sig, force);
971}
972
973/*
974 * Test if P wants to take SIG. After we've checked all threads with this,
975 * it's equivalent to finding no threads not blocking SIG. Any threads not
976 * blocking SIG were ruled out because they are not running and already
977 * have pending signals. Such threads will dequeue from the shared queue
978 * as soon as they're available, so putting the signal on the shared queue
979 * will be equivalent to sending it to one such thread.
980 */
981static inline bool wants_signal(int sig, struct task_struct *p)
982{
983 if (sigismember(&p->blocked, sig))
984 return false;
985
986 if (p->flags & PF_EXITING)
987 return false;
988
989 if (sig == SIGKILL)
990 return true;
991
992 if (task_is_stopped_or_traced(p))
993 return false;
994
995 return task_curr(p) || !task_sigpending(p);
996}
997
998static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
999{
1000 struct signal_struct *signal = p->signal;
1001 struct task_struct *t;
1002
1003 /*
1004 * Now find a thread we can wake up to take the signal off the queue.
1005 *
1006 * If the main thread wants the signal, it gets first crack.
1007 * Probably the least surprising to the average bear.
1008 */
1009 if (wants_signal(sig, p))
1010 t = p;
1011 else if ((type == PIDTYPE_PID) || thread_group_empty(p))
1012 /*
1013 * There is just one thread and it does not need to be woken.
1014 * It will dequeue unblocked signals before it runs again.
1015 */
1016 return;
1017 else {
1018 /*
1019 * Otherwise try to find a suitable thread.
1020 */
1021 t = signal->curr_target;
1022 while (!wants_signal(sig, t)) {
1023 t = next_thread(t);
1024 if (t == signal->curr_target)
1025 /*
1026 * No thread needs to be woken.
1027 * Any eligible threads will see
1028 * the signal in the queue soon.
1029 */
1030 return;
1031 }
1032 signal->curr_target = t;
1033 }
1034
1035 /*
1036 * Found a killable thread. If the signal will be fatal,
1037 * then start taking the whole group down immediately.
1038 */
1039 if (sig_fatal(p, sig) &&
1040 (signal->core_state || !(signal->flags & SIGNAL_GROUP_EXIT)) &&
1041 !sigismember(&t->real_blocked, sig) &&
1042 (sig == SIGKILL || !p->ptrace)) {
1043 /*
1044 * This signal will be fatal to the whole group.
1045 */
1046 if (!sig_kernel_coredump(sig)) {
1047 /*
1048 * Start a group exit and wake everybody up.
1049 * This way we don't have other threads
1050 * running and doing things after a slower
1051 * thread has the fatal signal pending.
1052 */
1053 signal->flags = SIGNAL_GROUP_EXIT;
1054 signal->group_exit_code = sig;
1055 signal->group_stop_count = 0;
1056 t = p;
1057 do {
1058 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1059 sigaddset(&t->pending.signal, SIGKILL);
1060 signal_wake_up(t, 1);
1061 } while_each_thread(p, t);
1062 return;
1063 }
1064 }
1065
1066 /*
1067 * The signal is already in the shared-pending queue.
1068 * Tell the chosen thread to wake up and dequeue it.
1069 */
1070 signal_wake_up(t, sig == SIGKILL);
1071 return;
1072}
1073
1074static inline bool legacy_queue(struct sigpending *signals, int sig)
1075{
1076 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1077}
1078
1079static int __send_signal_locked(int sig, struct kernel_siginfo *info,
1080 struct task_struct *t, enum pid_type type, bool force)
1081{
1082 struct sigpending *pending;
1083 struct sigqueue *q;
1084 int override_rlimit;
1085 int ret = 0, result;
1086
1087 lockdep_assert_held(&t->sighand->siglock);
1088
1089 result = TRACE_SIGNAL_IGNORED;
1090 if (!prepare_signal(sig, t, force))
1091 goto ret;
1092
1093 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1094 /*
1095 * Short-circuit ignored signals and support queuing
1096 * exactly one non-rt signal, so that we can get more
1097 * detailed information about the cause of the signal.
1098 */
1099 result = TRACE_SIGNAL_ALREADY_PENDING;
1100 if (legacy_queue(pending, sig))
1101 goto ret;
1102
1103 result = TRACE_SIGNAL_DELIVERED;
1104 /*
1105 * Skip useless siginfo allocation for SIGKILL and kernel threads.
1106 */
1107 if ((sig == SIGKILL) || (t->flags & PF_KTHREAD))
1108 goto out_set;
1109
1110 /*
1111 * Real-time signals must be queued if sent by sigqueue, or
1112 * some other real-time mechanism. It is implementation
1113 * defined whether kill() does so. We attempt to do so, on
1114 * the principle of least surprise, but since kill is not
1115 * allowed to fail with EAGAIN when low on memory we just
1116 * make sure at least one signal gets delivered and don't
1117 * pass on the info struct.
1118 */
1119 if (sig < SIGRTMIN)
1120 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1121 else
1122 override_rlimit = 0;
1123
1124 q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit, 0);
1125
1126 if (q) {
1127 list_add_tail(&q->list, &pending->list);
1128 switch ((unsigned long) info) {
1129 case (unsigned long) SEND_SIG_NOINFO:
1130 clear_siginfo(&q->info);
1131 q->info.si_signo = sig;
1132 q->info.si_errno = 0;
1133 q->info.si_code = SI_USER;
1134 q->info.si_pid = task_tgid_nr_ns(current,
1135 task_active_pid_ns(t));
1136 rcu_read_lock();
1137 q->info.si_uid =
1138 from_kuid_munged(task_cred_xxx(t, user_ns),
1139 current_uid());
1140 rcu_read_unlock();
1141 break;
1142 case (unsigned long) SEND_SIG_PRIV:
1143 clear_siginfo(&q->info);
1144 q->info.si_signo = sig;
1145 q->info.si_errno = 0;
1146 q->info.si_code = SI_KERNEL;
1147 q->info.si_pid = 0;
1148 q->info.si_uid = 0;
1149 break;
1150 default:
1151 copy_siginfo(&q->info, info);
1152 break;
1153 }
1154 } else if (!is_si_special(info) &&
1155 sig >= SIGRTMIN && info->si_code != SI_USER) {
1156 /*
1157 * Queue overflow, abort. We may abort if the
1158 * signal was rt and sent by user using something
1159 * other than kill().
1160 */
1161 result = TRACE_SIGNAL_OVERFLOW_FAIL;
1162 ret = -EAGAIN;
1163 goto ret;
1164 } else {
1165 /*
1166 * This is a silent loss of information. We still
1167 * send the signal, but the *info bits are lost.
1168 */
1169 result = TRACE_SIGNAL_LOSE_INFO;
1170 }
1171
1172out_set:
1173 signalfd_notify(t, sig);
1174 sigaddset(&pending->signal, sig);
1175
1176 /* Let multiprocess signals appear after on-going forks */
1177 if (type > PIDTYPE_TGID) {
1178 struct multiprocess_signals *delayed;
1179 hlist_for_each_entry(delayed, &t->signal->multiprocess, node) {
1180 sigset_t *signal = &delayed->signal;
1181 /* Can't queue both a stop and a continue signal */
1182 if (sig == SIGCONT)
1183 sigdelsetmask(signal, SIG_KERNEL_STOP_MASK);
1184 else if (sig_kernel_stop(sig))
1185 sigdelset(signal, SIGCONT);
1186 sigaddset(signal, sig);
1187 }
1188 }
1189
1190 complete_signal(sig, t, type);
1191ret:
1192 trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result);
1193 return ret;
1194}
1195
1196static inline bool has_si_pid_and_uid(struct kernel_siginfo *info)
1197{
1198 bool ret = false;
1199 switch (siginfo_layout(info->si_signo, info->si_code)) {
1200 case SIL_KILL:
1201 case SIL_CHLD:
1202 case SIL_RT:
1203 ret = true;
1204 break;
1205 case SIL_TIMER:
1206 case SIL_POLL:
1207 case SIL_FAULT:
1208 case SIL_FAULT_TRAPNO:
1209 case SIL_FAULT_MCEERR:
1210 case SIL_FAULT_BNDERR:
1211 case SIL_FAULT_PKUERR:
1212 case SIL_FAULT_PERF_EVENT:
1213 case SIL_SYS:
1214 ret = false;
1215 break;
1216 }
1217 return ret;
1218}
1219
1220int send_signal_locked(int sig, struct kernel_siginfo *info,
1221 struct task_struct *t, enum pid_type type)
1222{
1223 /* Should SIGKILL or SIGSTOP be received by a pid namespace init? */
1224 bool force = false;
1225
1226 if (info == SEND_SIG_NOINFO) {
1227 /* Force if sent from an ancestor pid namespace */
1228 force = !task_pid_nr_ns(current, task_active_pid_ns(t));
1229 } else if (info == SEND_SIG_PRIV) {
1230 /* Don't ignore kernel generated signals */
1231 force = true;
1232 } else if (has_si_pid_and_uid(info)) {
1233 /* SIGKILL and SIGSTOP is special or has ids */
1234 struct user_namespace *t_user_ns;
1235
1236 rcu_read_lock();
1237 t_user_ns = task_cred_xxx(t, user_ns);
1238 if (current_user_ns() != t_user_ns) {
1239 kuid_t uid = make_kuid(current_user_ns(), info->si_uid);
1240 info->si_uid = from_kuid_munged(t_user_ns, uid);
1241 }
1242 rcu_read_unlock();
1243
1244 /* A kernel generated signal? */
1245 force = (info->si_code == SI_KERNEL);
1246
1247 /* From an ancestor pid namespace? */
1248 if (!task_pid_nr_ns(current, task_active_pid_ns(t))) {
1249 info->si_pid = 0;
1250 force = true;
1251 }
1252 }
1253 return __send_signal_locked(sig, info, t, type, force);
1254}
1255
1256static void print_fatal_signal(int signr)
1257{
1258 struct pt_regs *regs = task_pt_regs(current);
1259 pr_info("potentially unexpected fatal signal %d.\n", signr);
1260
1261#if defined(__i386__) && !defined(__arch_um__)
1262 pr_info("code at %08lx: ", regs->ip);
1263 {
1264 int i;
1265 for (i = 0; i < 16; i++) {
1266 unsigned char insn;
1267
1268 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1269 break;
1270 pr_cont("%02x ", insn);
1271 }
1272 }
1273 pr_cont("\n");
1274#endif
1275 preempt_disable();
1276 show_regs(regs);
1277 preempt_enable();
1278}
1279
1280static int __init setup_print_fatal_signals(char *str)
1281{
1282 get_option (&str, &print_fatal_signals);
1283
1284 return 1;
1285}
1286
1287__setup("print-fatal-signals=", setup_print_fatal_signals);
1288
1289int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p,
1290 enum pid_type type)
1291{
1292 unsigned long flags;
1293 int ret = -ESRCH;
1294
1295 if (lock_task_sighand(p, &flags)) {
1296 ret = send_signal_locked(sig, info, p, type);
1297 unlock_task_sighand(p, &flags);
1298 }
1299
1300 return ret;
1301}
1302
1303enum sig_handler {
1304 HANDLER_CURRENT, /* If reachable use the current handler */
1305 HANDLER_SIG_DFL, /* Always use SIG_DFL handler semantics */
1306 HANDLER_EXIT, /* Only visible as the process exit code */
1307};
1308
1309/*
1310 * Force a signal that the process can't ignore: if necessary
1311 * we unblock the signal and change any SIG_IGN to SIG_DFL.
1312 *
1313 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1314 * since we do not want to have a signal handler that was blocked
1315 * be invoked when user space had explicitly blocked it.
1316 *
1317 * We don't want to have recursive SIGSEGV's etc, for example,
1318 * that is why we also clear SIGNAL_UNKILLABLE.
1319 */
1320static int
1321force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t,
1322 enum sig_handler handler)
1323{
1324 unsigned long int flags;
1325 int ret, blocked, ignored;
1326 struct k_sigaction *action;
1327 int sig = info->si_signo;
1328
1329 spin_lock_irqsave(&t->sighand->siglock, flags);
1330 action = &t->sighand->action[sig-1];
1331 ignored = action->sa.sa_handler == SIG_IGN;
1332 blocked = sigismember(&t->blocked, sig);
1333 if (blocked || ignored || (handler != HANDLER_CURRENT)) {
1334 action->sa.sa_handler = SIG_DFL;
1335 if (handler == HANDLER_EXIT)
1336 action->sa.sa_flags |= SA_IMMUTABLE;
1337 if (blocked) {
1338 sigdelset(&t->blocked, sig);
1339 recalc_sigpending_and_wake(t);
1340 }
1341 }
1342 /*
1343 * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
1344 * debugging to leave init killable. But HANDLER_EXIT is always fatal.
1345 */
1346 if (action->sa.sa_handler == SIG_DFL &&
1347 (!t->ptrace || (handler == HANDLER_EXIT)))
1348 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1349 ret = send_signal_locked(sig, info, t, PIDTYPE_PID);
1350 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1351
1352 return ret;
1353}
1354
1355int force_sig_info(struct kernel_siginfo *info)
1356{
1357 return force_sig_info_to_task(info, current, HANDLER_CURRENT);
1358}
1359
1360/*
1361 * Nuke all other threads in the group.
1362 */
1363int zap_other_threads(struct task_struct *p)
1364{
1365 struct task_struct *t = p;
1366 int count = 0;
1367
1368 p->signal->group_stop_count = 0;
1369
1370 while_each_thread(p, t) {
1371 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1372 count++;
1373
1374 /* Don't bother with already dead threads */
1375 if (t->exit_state)
1376 continue;
1377 sigaddset(&t->pending.signal, SIGKILL);
1378 signal_wake_up(t, 1);
1379 }
1380
1381 return count;
1382}
1383
1384struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1385 unsigned long *flags)
1386{
1387 struct sighand_struct *sighand;
1388
1389 rcu_read_lock();
1390 for (;;) {
1391 sighand = rcu_dereference(tsk->sighand);
1392 if (unlikely(sighand == NULL))
1393 break;
1394
1395 /*
1396 * This sighand can be already freed and even reused, but
1397 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
1398 * initializes ->siglock: this slab can't go away, it has
1399 * the same object type, ->siglock can't be reinitialized.
1400 *
1401 * We need to ensure that tsk->sighand is still the same
1402 * after we take the lock, we can race with de_thread() or
1403 * __exit_signal(). In the latter case the next iteration
1404 * must see ->sighand == NULL.
1405 */
1406 spin_lock_irqsave(&sighand->siglock, *flags);
1407 if (likely(sighand == rcu_access_pointer(tsk->sighand)))
1408 break;
1409 spin_unlock_irqrestore(&sighand->siglock, *flags);
1410 }
1411 rcu_read_unlock();
1412
1413 return sighand;
1414}
1415
1416#ifdef CONFIG_LOCKDEP
1417void lockdep_assert_task_sighand_held(struct task_struct *task)
1418{
1419 struct sighand_struct *sighand;
1420
1421 rcu_read_lock();
1422 sighand = rcu_dereference(task->sighand);
1423 if (sighand)
1424 lockdep_assert_held(&sighand->siglock);
1425 else
1426 WARN_ON_ONCE(1);
1427 rcu_read_unlock();
1428}
1429#endif
1430
1431/*
1432 * send signal info to all the members of a group
1433 */
1434int group_send_sig_info(int sig, struct kernel_siginfo *info,
1435 struct task_struct *p, enum pid_type type)
1436{
1437 int ret;
1438
1439 rcu_read_lock();
1440 ret = check_kill_permission(sig, info, p);
1441 rcu_read_unlock();
1442
1443 if (!ret && sig)
1444 ret = do_send_sig_info(sig, info, p, type);
1445
1446 return ret;
1447}
1448
1449/*
1450 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1451 * control characters do (^C, ^Z etc)
1452 * - the caller must hold at least a readlock on tasklist_lock
1453 */
1454int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp)
1455{
1456 struct task_struct *p = NULL;
1457 int retval, success;
1458
1459 success = 0;
1460 retval = -ESRCH;
1461 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1462 int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID);
1463 success |= !err;
1464 retval = err;
1465 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1466 return success ? 0 : retval;
1467}
1468
1469int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid)
1470{
1471 int error = -ESRCH;
1472 struct task_struct *p;
1473
1474 for (;;) {
1475 rcu_read_lock();
1476 p = pid_task(pid, PIDTYPE_PID);
1477 if (p)
1478 error = group_send_sig_info(sig, info, p, PIDTYPE_TGID);
1479 rcu_read_unlock();
1480 if (likely(!p || error != -ESRCH))
1481 return error;
1482
1483 /*
1484 * The task was unhashed in between, try again. If it
1485 * is dead, pid_task() will return NULL, if we race with
1486 * de_thread() it will find the new leader.
1487 */
1488 }
1489}
1490
1491static int kill_proc_info(int sig, struct kernel_siginfo *info, pid_t pid)
1492{
1493 int error;
1494 rcu_read_lock();
1495 error = kill_pid_info(sig, info, find_vpid(pid));
1496 rcu_read_unlock();
1497 return error;
1498}
1499
1500static inline bool kill_as_cred_perm(const struct cred *cred,
1501 struct task_struct *target)
1502{
1503 const struct cred *pcred = __task_cred(target);
1504
1505 return uid_eq(cred->euid, pcred->suid) ||
1506 uid_eq(cred->euid, pcred->uid) ||
1507 uid_eq(cred->uid, pcred->suid) ||
1508 uid_eq(cred->uid, pcred->uid);
1509}
1510
1511/*
1512 * The usb asyncio usage of siginfo is wrong. The glibc support
1513 * for asyncio which uses SI_ASYNCIO assumes the layout is SIL_RT.
1514 * AKA after the generic fields:
1515 * kernel_pid_t si_pid;
1516 * kernel_uid32_t si_uid;
1517 * sigval_t si_value;
1518 *
1519 * Unfortunately when usb generates SI_ASYNCIO it assumes the layout
1520 * after the generic fields is:
1521 * void __user *si_addr;
1522 *
1523 * This is a practical problem when there is a 64bit big endian kernel
1524 * and a 32bit userspace. As the 32bit address will encoded in the low
1525 * 32bits of the pointer. Those low 32bits will be stored at higher
1526 * address than appear in a 32 bit pointer. So userspace will not
1527 * see the address it was expecting for it's completions.
1528 *
1529 * There is nothing in the encoding that can allow
1530 * copy_siginfo_to_user32 to detect this confusion of formats, so
1531 * handle this by requiring the caller of kill_pid_usb_asyncio to
1532 * notice when this situration takes place and to store the 32bit
1533 * pointer in sival_int, instead of sival_addr of the sigval_t addr
1534 * parameter.
1535 */
1536int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr,
1537 struct pid *pid, const struct cred *cred)
1538{
1539 struct kernel_siginfo info;
1540 struct task_struct *p;
1541 unsigned long flags;
1542 int ret = -EINVAL;
1543
1544 if (!valid_signal(sig))
1545 return ret;
1546
1547 clear_siginfo(&info);
1548 info.si_signo = sig;
1549 info.si_errno = errno;
1550 info.si_code = SI_ASYNCIO;
1551 *((sigval_t *)&info.si_pid) = addr;
1552
1553 rcu_read_lock();
1554 p = pid_task(pid, PIDTYPE_PID);
1555 if (!p) {
1556 ret = -ESRCH;
1557 goto out_unlock;
1558 }
1559 if (!kill_as_cred_perm(cred, p)) {
1560 ret = -EPERM;
1561 goto out_unlock;
1562 }
1563 ret = security_task_kill(p, &info, sig, cred);
1564 if (ret)
1565 goto out_unlock;
1566
1567 if (sig) {
1568 if (lock_task_sighand(p, &flags)) {
1569 ret = __send_signal_locked(sig, &info, p, PIDTYPE_TGID, false);
1570 unlock_task_sighand(p, &flags);
1571 } else
1572 ret = -ESRCH;
1573 }
1574out_unlock:
1575 rcu_read_unlock();
1576 return ret;
1577}
1578EXPORT_SYMBOL_GPL(kill_pid_usb_asyncio);
1579
1580/*
1581 * kill_something_info() interprets pid in interesting ways just like kill(2).
1582 *
1583 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1584 * is probably wrong. Should make it like BSD or SYSV.
1585 */
1586
1587static int kill_something_info(int sig, struct kernel_siginfo *info, pid_t pid)
1588{
1589 int ret;
1590
1591 if (pid > 0)
1592 return kill_proc_info(sig, info, pid);
1593
1594 /* -INT_MIN is undefined. Exclude this case to avoid a UBSAN warning */
1595 if (pid == INT_MIN)
1596 return -ESRCH;
1597
1598 read_lock(&tasklist_lock);
1599 if (pid != -1) {
1600 ret = __kill_pgrp_info(sig, info,
1601 pid ? find_vpid(-pid) : task_pgrp(current));
1602 } else {
1603 int retval = 0, count = 0;
1604 struct task_struct * p;
1605
1606 for_each_process(p) {
1607 if (task_pid_vnr(p) > 1 &&
1608 !same_thread_group(p, current)) {
1609 int err = group_send_sig_info(sig, info, p,
1610 PIDTYPE_MAX);
1611 ++count;
1612 if (err != -EPERM)
1613 retval = err;
1614 }
1615 }
1616 ret = count ? retval : -ESRCH;
1617 }
1618 read_unlock(&tasklist_lock);
1619
1620 return ret;
1621}
1622
1623/*
1624 * These are for backward compatibility with the rest of the kernel source.
1625 */
1626
1627int send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1628{
1629 /*
1630 * Make sure legacy kernel users don't send in bad values
1631 * (normal paths check this in check_kill_permission).
1632 */
1633 if (!valid_signal(sig))
1634 return -EINVAL;
1635
1636 return do_send_sig_info(sig, info, p, PIDTYPE_PID);
1637}
1638EXPORT_SYMBOL(send_sig_info);
1639
1640#define __si_special(priv) \
1641 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1642
1643int
1644send_sig(int sig, struct task_struct *p, int priv)
1645{
1646 return send_sig_info(sig, __si_special(priv), p);
1647}
1648EXPORT_SYMBOL(send_sig);
1649
1650void force_sig(int sig)
1651{
1652 struct kernel_siginfo info;
1653
1654 clear_siginfo(&info);
1655 info.si_signo = sig;
1656 info.si_errno = 0;
1657 info.si_code = SI_KERNEL;
1658 info.si_pid = 0;
1659 info.si_uid = 0;
1660 force_sig_info(&info);
1661}
1662EXPORT_SYMBOL(force_sig);
1663
1664void force_fatal_sig(int sig)
1665{
1666 struct kernel_siginfo info;
1667
1668 clear_siginfo(&info);
1669 info.si_signo = sig;
1670 info.si_errno = 0;
1671 info.si_code = SI_KERNEL;
1672 info.si_pid = 0;
1673 info.si_uid = 0;
1674 force_sig_info_to_task(&info, current, HANDLER_SIG_DFL);
1675}
1676
1677void force_exit_sig(int sig)
1678{
1679 struct kernel_siginfo info;
1680
1681 clear_siginfo(&info);
1682 info.si_signo = sig;
1683 info.si_errno = 0;
1684 info.si_code = SI_KERNEL;
1685 info.si_pid = 0;
1686 info.si_uid = 0;
1687 force_sig_info_to_task(&info, current, HANDLER_EXIT);
1688}
1689
1690/*
1691 * When things go south during signal handling, we
1692 * will force a SIGSEGV. And if the signal that caused
1693 * the problem was already a SIGSEGV, we'll want to
1694 * make sure we don't even try to deliver the signal..
1695 */
1696void force_sigsegv(int sig)
1697{
1698 if (sig == SIGSEGV)
1699 force_fatal_sig(SIGSEGV);
1700 else
1701 force_sig(SIGSEGV);
1702}
1703
1704int force_sig_fault_to_task(int sig, int code, void __user *addr
1705 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1706 , struct task_struct *t)
1707{
1708 struct kernel_siginfo info;
1709
1710 clear_siginfo(&info);
1711 info.si_signo = sig;
1712 info.si_errno = 0;
1713 info.si_code = code;
1714 info.si_addr = addr;
1715#ifdef __ia64__
1716 info.si_imm = imm;
1717 info.si_flags = flags;
1718 info.si_isr = isr;
1719#endif
1720 return force_sig_info_to_task(&info, t, HANDLER_CURRENT);
1721}
1722
1723int force_sig_fault(int sig, int code, void __user *addr
1724 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr))
1725{
1726 return force_sig_fault_to_task(sig, code, addr
1727 ___ARCH_SI_IA64(imm, flags, isr), current);
1728}
1729
1730int send_sig_fault(int sig, int code, void __user *addr
1731 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1732 , struct task_struct *t)
1733{
1734 struct kernel_siginfo info;
1735
1736 clear_siginfo(&info);
1737 info.si_signo = sig;
1738 info.si_errno = 0;
1739 info.si_code = code;
1740 info.si_addr = addr;
1741#ifdef __ia64__
1742 info.si_imm = imm;
1743 info.si_flags = flags;
1744 info.si_isr = isr;
1745#endif
1746 return send_sig_info(info.si_signo, &info, t);
1747}
1748
1749int force_sig_mceerr(int code, void __user *addr, short lsb)
1750{
1751 struct kernel_siginfo info;
1752
1753 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1754 clear_siginfo(&info);
1755 info.si_signo = SIGBUS;
1756 info.si_errno = 0;
1757 info.si_code = code;
1758 info.si_addr = addr;
1759 info.si_addr_lsb = lsb;
1760 return force_sig_info(&info);
1761}
1762
1763int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1764{
1765 struct kernel_siginfo info;
1766
1767 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1768 clear_siginfo(&info);
1769 info.si_signo = SIGBUS;
1770 info.si_errno = 0;
1771 info.si_code = code;
1772 info.si_addr = addr;
1773 info.si_addr_lsb = lsb;
1774 return send_sig_info(info.si_signo, &info, t);
1775}
1776EXPORT_SYMBOL(send_sig_mceerr);
1777
1778int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper)
1779{
1780 struct kernel_siginfo info;
1781
1782 clear_siginfo(&info);
1783 info.si_signo = SIGSEGV;
1784 info.si_errno = 0;
1785 info.si_code = SEGV_BNDERR;
1786 info.si_addr = addr;
1787 info.si_lower = lower;
1788 info.si_upper = upper;
1789 return force_sig_info(&info);
1790}
1791
1792#ifdef SEGV_PKUERR
1793int force_sig_pkuerr(void __user *addr, u32 pkey)
1794{
1795 struct kernel_siginfo info;
1796
1797 clear_siginfo(&info);
1798 info.si_signo = SIGSEGV;
1799 info.si_errno = 0;
1800 info.si_code = SEGV_PKUERR;
1801 info.si_addr = addr;
1802 info.si_pkey = pkey;
1803 return force_sig_info(&info);
1804}
1805#endif
1806
1807int send_sig_perf(void __user *addr, u32 type, u64 sig_data)
1808{
1809 struct kernel_siginfo info;
1810
1811 clear_siginfo(&info);
1812 info.si_signo = SIGTRAP;
1813 info.si_errno = 0;
1814 info.si_code = TRAP_PERF;
1815 info.si_addr = addr;
1816 info.si_perf_data = sig_data;
1817 info.si_perf_type = type;
1818
1819 /*
1820 * Signals generated by perf events should not terminate the whole
1821 * process if SIGTRAP is blocked, however, delivering the signal
1822 * asynchronously is better than not delivering at all. But tell user
1823 * space if the signal was asynchronous, so it can clearly be
1824 * distinguished from normal synchronous ones.
1825 */
1826 info.si_perf_flags = sigismember(¤t->blocked, info.si_signo) ?
1827 TRAP_PERF_FLAG_ASYNC :
1828 0;
1829
1830 return send_sig_info(info.si_signo, &info, current);
1831}
1832
1833/**
1834 * force_sig_seccomp - signals the task to allow in-process syscall emulation
1835 * @syscall: syscall number to send to userland
1836 * @reason: filter-supplied reason code to send to userland (via si_errno)
1837 * @force_coredump: true to trigger a coredump
1838 *
1839 * Forces a SIGSYS with a code of SYS_SECCOMP and related sigsys info.
1840 */
1841int force_sig_seccomp(int syscall, int reason, bool force_coredump)
1842{
1843 struct kernel_siginfo info;
1844
1845 clear_siginfo(&info);
1846 info.si_signo = SIGSYS;
1847 info.si_code = SYS_SECCOMP;
1848 info.si_call_addr = (void __user *)KSTK_EIP(current);
1849 info.si_errno = reason;
1850 info.si_arch = syscall_get_arch(current);
1851 info.si_syscall = syscall;
1852 return force_sig_info_to_task(&info, current,
1853 force_coredump ? HANDLER_EXIT : HANDLER_CURRENT);
1854}
1855
1856/* For the crazy architectures that include trap information in
1857 * the errno field, instead of an actual errno value.
1858 */
1859int force_sig_ptrace_errno_trap(int errno, void __user *addr)
1860{
1861 struct kernel_siginfo info;
1862
1863 clear_siginfo(&info);
1864 info.si_signo = SIGTRAP;
1865 info.si_errno = errno;
1866 info.si_code = TRAP_HWBKPT;
1867 info.si_addr = addr;
1868 return force_sig_info(&info);
1869}
1870
1871/* For the rare architectures that include trap information using
1872 * si_trapno.
1873 */
1874int force_sig_fault_trapno(int sig, int code, void __user *addr, int trapno)
1875{
1876 struct kernel_siginfo info;
1877
1878 clear_siginfo(&info);
1879 info.si_signo = sig;
1880 info.si_errno = 0;
1881 info.si_code = code;
1882 info.si_addr = addr;
1883 info.si_trapno = trapno;
1884 return force_sig_info(&info);
1885}
1886
1887/* For the rare architectures that include trap information using
1888 * si_trapno.
1889 */
1890int send_sig_fault_trapno(int sig, int code, void __user *addr, int trapno,
1891 struct task_struct *t)
1892{
1893 struct kernel_siginfo info;
1894
1895 clear_siginfo(&info);
1896 info.si_signo = sig;
1897 info.si_errno = 0;
1898 info.si_code = code;
1899 info.si_addr = addr;
1900 info.si_trapno = trapno;
1901 return send_sig_info(info.si_signo, &info, t);
1902}
1903
1904int kill_pgrp(struct pid *pid, int sig, int priv)
1905{
1906 int ret;
1907
1908 read_lock(&tasklist_lock);
1909 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1910 read_unlock(&tasklist_lock);
1911
1912 return ret;
1913}
1914EXPORT_SYMBOL(kill_pgrp);
1915
1916int kill_pid(struct pid *pid, int sig, int priv)
1917{
1918 return kill_pid_info(sig, __si_special(priv), pid);
1919}
1920EXPORT_SYMBOL(kill_pid);
1921
1922/*
1923 * These functions support sending signals using preallocated sigqueue
1924 * structures. This is needed "because realtime applications cannot
1925 * afford to lose notifications of asynchronous events, like timer
1926 * expirations or I/O completions". In the case of POSIX Timers
1927 * we allocate the sigqueue structure from the timer_create. If this
1928 * allocation fails we are able to report the failure to the application
1929 * with an EAGAIN error.
1930 */
1931struct sigqueue *sigqueue_alloc(void)
1932{
1933 return __sigqueue_alloc(-1, current, GFP_KERNEL, 0, SIGQUEUE_PREALLOC);
1934}
1935
1936void sigqueue_free(struct sigqueue *q)
1937{
1938 unsigned long flags;
1939 spinlock_t *lock = ¤t->sighand->siglock;
1940
1941 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1942 /*
1943 * We must hold ->siglock while testing q->list
1944 * to serialize with collect_signal() or with
1945 * __exit_signal()->flush_sigqueue().
1946 */
1947 spin_lock_irqsave(lock, flags);
1948 q->flags &= ~SIGQUEUE_PREALLOC;
1949 /*
1950 * If it is queued it will be freed when dequeued,
1951 * like the "regular" sigqueue.
1952 */
1953 if (!list_empty(&q->list))
1954 q = NULL;
1955 spin_unlock_irqrestore(lock, flags);
1956
1957 if (q)
1958 __sigqueue_free(q);
1959}
1960
1961int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type)
1962{
1963 int sig = q->info.si_signo;
1964 struct sigpending *pending;
1965 struct task_struct *t;
1966 unsigned long flags;
1967 int ret, result;
1968
1969 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1970
1971 ret = -1;
1972 rcu_read_lock();
1973 t = pid_task(pid, type);
1974 if (!t || !likely(lock_task_sighand(t, &flags)))
1975 goto ret;
1976
1977 ret = 1; /* the signal is ignored */
1978 result = TRACE_SIGNAL_IGNORED;
1979 if (!prepare_signal(sig, t, false))
1980 goto out;
1981
1982 ret = 0;
1983 if (unlikely(!list_empty(&q->list))) {
1984 /*
1985 * If an SI_TIMER entry is already queue just increment
1986 * the overrun count.
1987 */
1988 BUG_ON(q->info.si_code != SI_TIMER);
1989 q->info.si_overrun++;
1990 result = TRACE_SIGNAL_ALREADY_PENDING;
1991 goto out;
1992 }
1993 q->info.si_overrun = 0;
1994
1995 signalfd_notify(t, sig);
1996 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1997 list_add_tail(&q->list, &pending->list);
1998 sigaddset(&pending->signal, sig);
1999 complete_signal(sig, t, type);
2000 result = TRACE_SIGNAL_DELIVERED;
2001out:
2002 trace_signal_generate(sig, &q->info, t, type != PIDTYPE_PID, result);
2003 unlock_task_sighand(t, &flags);
2004ret:
2005 rcu_read_unlock();
2006 return ret;
2007}
2008
2009static void do_notify_pidfd(struct task_struct *task)
2010{
2011 struct pid *pid;
2012
2013 WARN_ON(task->exit_state == 0);
2014 pid = task_pid(task);
2015 wake_up_all(&pid->wait_pidfd);
2016}
2017
2018/*
2019 * Let a parent know about the death of a child.
2020 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
2021 *
2022 * Returns true if our parent ignored us and so we've switched to
2023 * self-reaping.
2024 */
2025bool do_notify_parent(struct task_struct *tsk, int sig)
2026{
2027 struct kernel_siginfo info;
2028 unsigned long flags;
2029 struct sighand_struct *psig;
2030 bool autoreap = false;
2031 u64 utime, stime;
2032
2033 WARN_ON_ONCE(sig == -1);
2034
2035 /* do_notify_parent_cldstop should have been called instead. */
2036 WARN_ON_ONCE(task_is_stopped_or_traced(tsk));
2037
2038 WARN_ON_ONCE(!tsk->ptrace &&
2039 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
2040
2041 /* Wake up all pidfd waiters */
2042 do_notify_pidfd(tsk);
2043
2044 if (sig != SIGCHLD) {
2045 /*
2046 * This is only possible if parent == real_parent.
2047 * Check if it has changed security domain.
2048 */
2049 if (tsk->parent_exec_id != READ_ONCE(tsk->parent->self_exec_id))
2050 sig = SIGCHLD;
2051 }
2052
2053 clear_siginfo(&info);
2054 info.si_signo = sig;
2055 info.si_errno = 0;
2056 /*
2057 * We are under tasklist_lock here so our parent is tied to
2058 * us and cannot change.
2059 *
2060 * task_active_pid_ns will always return the same pid namespace
2061 * until a task passes through release_task.
2062 *
2063 * write_lock() currently calls preempt_disable() which is the
2064 * same as rcu_read_lock(), but according to Oleg, this is not
2065 * correct to rely on this
2066 */
2067 rcu_read_lock();
2068 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
2069 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
2070 task_uid(tsk));
2071 rcu_read_unlock();
2072
2073 task_cputime(tsk, &utime, &stime);
2074 info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
2075 info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
2076
2077 info.si_status = tsk->exit_code & 0x7f;
2078 if (tsk->exit_code & 0x80)
2079 info.si_code = CLD_DUMPED;
2080 else if (tsk->exit_code & 0x7f)
2081 info.si_code = CLD_KILLED;
2082 else {
2083 info.si_code = CLD_EXITED;
2084 info.si_status = tsk->exit_code >> 8;
2085 }
2086
2087 psig = tsk->parent->sighand;
2088 spin_lock_irqsave(&psig->siglock, flags);
2089 if (!tsk->ptrace && sig == SIGCHLD &&
2090 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
2091 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
2092 /*
2093 * We are exiting and our parent doesn't care. POSIX.1
2094 * defines special semantics for setting SIGCHLD to SIG_IGN
2095 * or setting the SA_NOCLDWAIT flag: we should be reaped
2096 * automatically and not left for our parent's wait4 call.
2097 * Rather than having the parent do it as a magic kind of
2098 * signal handler, we just set this to tell do_exit that we
2099 * can be cleaned up without becoming a zombie. Note that
2100 * we still call __wake_up_parent in this case, because a
2101 * blocked sys_wait4 might now return -ECHILD.
2102 *
2103 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
2104 * is implementation-defined: we do (if you don't want
2105 * it, just use SIG_IGN instead).
2106 */
2107 autoreap = true;
2108 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
2109 sig = 0;
2110 }
2111 /*
2112 * Send with __send_signal as si_pid and si_uid are in the
2113 * parent's namespaces.
2114 */
2115 if (valid_signal(sig) && sig)
2116 __send_signal_locked(sig, &info, tsk->parent, PIDTYPE_TGID, false);
2117 __wake_up_parent(tsk, tsk->parent);
2118 spin_unlock_irqrestore(&psig->siglock, flags);
2119
2120 return autoreap;
2121}
2122
2123/**
2124 * do_notify_parent_cldstop - notify parent of stopped/continued state change
2125 * @tsk: task reporting the state change
2126 * @for_ptracer: the notification is for ptracer
2127 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
2128 *
2129 * Notify @tsk's parent that the stopped/continued state has changed. If
2130 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
2131 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
2132 *
2133 * CONTEXT:
2134 * Must be called with tasklist_lock at least read locked.
2135 */
2136static void do_notify_parent_cldstop(struct task_struct *tsk,
2137 bool for_ptracer, int why)
2138{
2139 struct kernel_siginfo info;
2140 unsigned long flags;
2141 struct task_struct *parent;
2142 struct sighand_struct *sighand;
2143 u64 utime, stime;
2144
2145 if (for_ptracer) {
2146 parent = tsk->parent;
2147 } else {
2148 tsk = tsk->group_leader;
2149 parent = tsk->real_parent;
2150 }
2151
2152 clear_siginfo(&info);
2153 info.si_signo = SIGCHLD;
2154 info.si_errno = 0;
2155 /*
2156 * see comment in do_notify_parent() about the following 4 lines
2157 */
2158 rcu_read_lock();
2159 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
2160 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
2161 rcu_read_unlock();
2162
2163 task_cputime(tsk, &utime, &stime);
2164 info.si_utime = nsec_to_clock_t(utime);
2165 info.si_stime = nsec_to_clock_t(stime);
2166
2167 info.si_code = why;
2168 switch (why) {
2169 case CLD_CONTINUED:
2170 info.si_status = SIGCONT;
2171 break;
2172 case CLD_STOPPED:
2173 info.si_status = tsk->signal->group_exit_code & 0x7f;
2174 break;
2175 case CLD_TRAPPED:
2176 info.si_status = tsk->exit_code & 0x7f;
2177 break;
2178 default:
2179 BUG();
2180 }
2181
2182 sighand = parent->sighand;
2183 spin_lock_irqsave(&sighand->siglock, flags);
2184 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
2185 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
2186 send_signal_locked(SIGCHLD, &info, parent, PIDTYPE_TGID);
2187 /*
2188 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
2189 */
2190 __wake_up_parent(tsk, parent);
2191 spin_unlock_irqrestore(&sighand->siglock, flags);
2192}
2193
2194/*
2195 * This must be called with current->sighand->siglock held.
2196 *
2197 * This should be the path for all ptrace stops.
2198 * We always set current->last_siginfo while stopped here.
2199 * That makes it a way to test a stopped process for
2200 * being ptrace-stopped vs being job-control-stopped.
2201 *
2202 * Returns the signal the ptracer requested the code resume
2203 * with. If the code did not stop because the tracer is gone,
2204 * the stop signal remains unchanged unless clear_code.
2205 */
2206static int ptrace_stop(int exit_code, int why, unsigned long message,
2207 kernel_siginfo_t *info)
2208 __releases(¤t->sighand->siglock)
2209 __acquires(¤t->sighand->siglock)
2210{
2211 bool gstop_done = false;
2212
2213 if (arch_ptrace_stop_needed()) {
2214 /*
2215 * The arch code has something special to do before a
2216 * ptrace stop. This is allowed to block, e.g. for faults
2217 * on user stack pages. We can't keep the siglock while
2218 * calling arch_ptrace_stop, so we must release it now.
2219 * To preserve proper semantics, we must do this before
2220 * any signal bookkeeping like checking group_stop_count.
2221 */
2222 spin_unlock_irq(¤t->sighand->siglock);
2223 arch_ptrace_stop();
2224 spin_lock_irq(¤t->sighand->siglock);
2225 }
2226
2227 /*
2228 * After this point ptrace_signal_wake_up or signal_wake_up
2229 * will clear TASK_TRACED if ptrace_unlink happens or a fatal
2230 * signal comes in. Handle previous ptrace_unlinks and fatal
2231 * signals here to prevent ptrace_stop sleeping in schedule.
2232 */
2233 if (!current->ptrace || __fatal_signal_pending(current))
2234 return exit_code;
2235
2236 set_special_state(TASK_TRACED);
2237 current->jobctl |= JOBCTL_TRACED;
2238
2239 /*
2240 * We're committing to trapping. TRACED should be visible before
2241 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
2242 * Also, transition to TRACED and updates to ->jobctl should be
2243 * atomic with respect to siglock and should be done after the arch
2244 * hook as siglock is released and regrabbed across it.
2245 *
2246 * TRACER TRACEE
2247 *
2248 * ptrace_attach()
2249 * [L] wait_on_bit(JOBCTL_TRAPPING) [S] set_special_state(TRACED)
2250 * do_wait()
2251 * set_current_state() smp_wmb();
2252 * ptrace_do_wait()
2253 * wait_task_stopped()
2254 * task_stopped_code()
2255 * [L] task_is_traced() [S] task_clear_jobctl_trapping();
2256 */
2257 smp_wmb();
2258
2259 current->ptrace_message = message;
2260 current->last_siginfo = info;
2261 current->exit_code = exit_code;
2262
2263 /*
2264 * If @why is CLD_STOPPED, we're trapping to participate in a group
2265 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
2266 * across siglock relocks since INTERRUPT was scheduled, PENDING
2267 * could be clear now. We act as if SIGCONT is received after
2268 * TASK_TRACED is entered - ignore it.
2269 */
2270 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
2271 gstop_done = task_participate_group_stop(current);
2272
2273 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
2274 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
2275 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
2276 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
2277
2278 /* entering a trap, clear TRAPPING */
2279 task_clear_jobctl_trapping(current);
2280
2281 spin_unlock_irq(¤t->sighand->siglock);
2282 read_lock(&tasklist_lock);
2283 /*
2284 * Notify parents of the stop.
2285 *
2286 * While ptraced, there are two parents - the ptracer and
2287 * the real_parent of the group_leader. The ptracer should
2288 * know about every stop while the real parent is only
2289 * interested in the completion of group stop. The states
2290 * for the two don't interact with each other. Notify
2291 * separately unless they're gonna be duplicates.
2292 */
2293 if (current->ptrace)
2294 do_notify_parent_cldstop(current, true, why);
2295 if (gstop_done && (!current->ptrace || ptrace_reparented(current)))
2296 do_notify_parent_cldstop(current, false, why);
2297
2298 /*
2299 * Don't want to allow preemption here, because
2300 * sys_ptrace() needs this task to be inactive.
2301 *
2302 * XXX: implement read_unlock_no_resched().
2303 */
2304 preempt_disable();
2305 read_unlock(&tasklist_lock);
2306 cgroup_enter_frozen();
2307 preempt_enable_no_resched();
2308 schedule();
2309 cgroup_leave_frozen(true);
2310
2311 /*
2312 * We are back. Now reacquire the siglock before touching
2313 * last_siginfo, so that we are sure to have synchronized with
2314 * any signal-sending on another CPU that wants to examine it.
2315 */
2316 spin_lock_irq(¤t->sighand->siglock);
2317 exit_code = current->exit_code;
2318 current->last_siginfo = NULL;
2319 current->ptrace_message = 0;
2320 current->exit_code = 0;
2321
2322 /* LISTENING can be set only during STOP traps, clear it */
2323 current->jobctl &= ~(JOBCTL_LISTENING | JOBCTL_PTRACE_FROZEN);
2324
2325 /*
2326 * Queued signals ignored us while we were stopped for tracing.
2327 * So check for any that we should take before resuming user mode.
2328 * This sets TIF_SIGPENDING, but never clears it.
2329 */
2330 recalc_sigpending_tsk(current);
2331 return exit_code;
2332}
2333
2334static int ptrace_do_notify(int signr, int exit_code, int why, unsigned long message)
2335{
2336 kernel_siginfo_t info;
2337
2338 clear_siginfo(&info);
2339 info.si_signo = signr;
2340 info.si_code = exit_code;
2341 info.si_pid = task_pid_vnr(current);
2342 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2343
2344 /* Let the debugger run. */
2345 return ptrace_stop(exit_code, why, message, &info);
2346}
2347
2348int ptrace_notify(int exit_code, unsigned long message)
2349{
2350 int signr;
2351
2352 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
2353 if (unlikely(task_work_pending(current)))
2354 task_work_run();
2355
2356 spin_lock_irq(¤t->sighand->siglock);
2357 signr = ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED, message);
2358 spin_unlock_irq(¤t->sighand->siglock);
2359 return signr;
2360}
2361
2362/**
2363 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
2364 * @signr: signr causing group stop if initiating
2365 *
2366 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
2367 * and participate in it. If already set, participate in the existing
2368 * group stop. If participated in a group stop (and thus slept), %true is
2369 * returned with siglock released.
2370 *
2371 * If ptraced, this function doesn't handle stop itself. Instead,
2372 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2373 * untouched. The caller must ensure that INTERRUPT trap handling takes
2374 * places afterwards.
2375 *
2376 * CONTEXT:
2377 * Must be called with @current->sighand->siglock held, which is released
2378 * on %true return.
2379 *
2380 * RETURNS:
2381 * %false if group stop is already cancelled or ptrace trap is scheduled.
2382 * %true if participated in group stop.
2383 */
2384static bool do_signal_stop(int signr)
2385 __releases(¤t->sighand->siglock)
2386{
2387 struct signal_struct *sig = current->signal;
2388
2389 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2390 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2391 struct task_struct *t;
2392
2393 /* signr will be recorded in task->jobctl for retries */
2394 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2395
2396 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2397 unlikely(sig->flags & SIGNAL_GROUP_EXIT) ||
2398 unlikely(sig->group_exec_task))
2399 return false;
2400 /*
2401 * There is no group stop already in progress. We must
2402 * initiate one now.
2403 *
2404 * While ptraced, a task may be resumed while group stop is
2405 * still in effect and then receive a stop signal and
2406 * initiate another group stop. This deviates from the
2407 * usual behavior as two consecutive stop signals can't
2408 * cause two group stops when !ptraced. That is why we
2409 * also check !task_is_stopped(t) below.
2410 *
2411 * The condition can be distinguished by testing whether
2412 * SIGNAL_STOP_STOPPED is already set. Don't generate
2413 * group_exit_code in such case.
2414 *
2415 * This is not necessary for SIGNAL_STOP_CONTINUED because
2416 * an intervening stop signal is required to cause two
2417 * continued events regardless of ptrace.
2418 */
2419 if (!(sig->flags & SIGNAL_STOP_STOPPED))
2420 sig->group_exit_code = signr;
2421
2422 sig->group_stop_count = 0;
2423
2424 if (task_set_jobctl_pending(current, signr | gstop))
2425 sig->group_stop_count++;
2426
2427 t = current;
2428 while_each_thread(current, t) {
2429 /*
2430 * Setting state to TASK_STOPPED for a group
2431 * stop is always done with the siglock held,
2432 * so this check has no races.
2433 */
2434 if (!task_is_stopped(t) &&
2435 task_set_jobctl_pending(t, signr | gstop)) {
2436 sig->group_stop_count++;
2437 if (likely(!(t->ptrace & PT_SEIZED)))
2438 signal_wake_up(t, 0);
2439 else
2440 ptrace_trap_notify(t);
2441 }
2442 }
2443 }
2444
2445 if (likely(!current->ptrace)) {
2446 int notify = 0;
2447
2448 /*
2449 * If there are no other threads in the group, or if there
2450 * is a group stop in progress and we are the last to stop,
2451 * report to the parent.
2452 */
2453 if (task_participate_group_stop(current))
2454 notify = CLD_STOPPED;
2455
2456 current->jobctl |= JOBCTL_STOPPED;
2457 set_special_state(TASK_STOPPED);
2458 spin_unlock_irq(¤t->sighand->siglock);
2459
2460 /*
2461 * Notify the parent of the group stop completion. Because
2462 * we're not holding either the siglock or tasklist_lock
2463 * here, ptracer may attach inbetween; however, this is for
2464 * group stop and should always be delivered to the real
2465 * parent of the group leader. The new ptracer will get
2466 * its notification when this task transitions into
2467 * TASK_TRACED.
2468 */
2469 if (notify) {
2470 read_lock(&tasklist_lock);
2471 do_notify_parent_cldstop(current, false, notify);
2472 read_unlock(&tasklist_lock);
2473 }
2474
2475 /* Now we don't run again until woken by SIGCONT or SIGKILL */
2476 cgroup_enter_frozen();
2477 schedule();
2478 return true;
2479 } else {
2480 /*
2481 * While ptraced, group stop is handled by STOP trap.
2482 * Schedule it and let the caller deal with it.
2483 */
2484 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2485 return false;
2486 }
2487}
2488
2489/**
2490 * do_jobctl_trap - take care of ptrace jobctl traps
2491 *
2492 * When PT_SEIZED, it's used for both group stop and explicit
2493 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
2494 * accompanying siginfo. If stopped, lower eight bits of exit_code contain
2495 * the stop signal; otherwise, %SIGTRAP.
2496 *
2497 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2498 * number as exit_code and no siginfo.
2499 *
2500 * CONTEXT:
2501 * Must be called with @current->sighand->siglock held, which may be
2502 * released and re-acquired before returning with intervening sleep.
2503 */
2504static void do_jobctl_trap(void)
2505{
2506 struct signal_struct *signal = current->signal;
2507 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2508
2509 if (current->ptrace & PT_SEIZED) {
2510 if (!signal->group_stop_count &&
2511 !(signal->flags & SIGNAL_STOP_STOPPED))
2512 signr = SIGTRAP;
2513 WARN_ON_ONCE(!signr);
2514 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2515 CLD_STOPPED, 0);
2516 } else {
2517 WARN_ON_ONCE(!signr);
2518 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2519 }
2520}
2521
2522/**
2523 * do_freezer_trap - handle the freezer jobctl trap
2524 *
2525 * Puts the task into frozen state, if only the task is not about to quit.
2526 * In this case it drops JOBCTL_TRAP_FREEZE.
2527 *
2528 * CONTEXT:
2529 * Must be called with @current->sighand->siglock held,
2530 * which is always released before returning.
2531 */
2532static void do_freezer_trap(void)
2533 __releases(¤t->sighand->siglock)
2534{
2535 /*
2536 * If there are other trap bits pending except JOBCTL_TRAP_FREEZE,
2537 * let's make another loop to give it a chance to be handled.
2538 * In any case, we'll return back.
2539 */
2540 if ((current->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) !=
2541 JOBCTL_TRAP_FREEZE) {
2542 spin_unlock_irq(¤t->sighand->siglock);
2543 return;
2544 }
2545
2546 /*
2547 * Now we're sure that there is no pending fatal signal and no
2548 * pending traps. Clear TIF_SIGPENDING to not get out of schedule()
2549 * immediately (if there is a non-fatal signal pending), and
2550 * put the task into sleep.
2551 */
2552 __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
2553 clear_thread_flag(TIF_SIGPENDING);
2554 spin_unlock_irq(¤t->sighand->siglock);
2555 cgroup_enter_frozen();
2556 schedule();
2557}
2558
2559static int ptrace_signal(int signr, kernel_siginfo_t *info, enum pid_type type)
2560{
2561 /*
2562 * We do not check sig_kernel_stop(signr) but set this marker
2563 * unconditionally because we do not know whether debugger will
2564 * change signr. This flag has no meaning unless we are going
2565 * to stop after return from ptrace_stop(). In this case it will
2566 * be checked in do_signal_stop(), we should only stop if it was
2567 * not cleared by SIGCONT while we were sleeping. See also the
2568 * comment in dequeue_signal().
2569 */
2570 current->jobctl |= JOBCTL_STOP_DEQUEUED;
2571 signr = ptrace_stop(signr, CLD_TRAPPED, 0, info);
2572
2573 /* We're back. Did the debugger cancel the sig? */
2574 if (signr == 0)
2575 return signr;
2576
2577 /*
2578 * Update the siginfo structure if the signal has
2579 * changed. If the debugger wanted something
2580 * specific in the siginfo structure then it should
2581 * have updated *info via PTRACE_SETSIGINFO.
2582 */
2583 if (signr != info->si_signo) {
2584 clear_siginfo(info);
2585 info->si_signo = signr;
2586 info->si_errno = 0;
2587 info->si_code = SI_USER;
2588 rcu_read_lock();
2589 info->si_pid = task_pid_vnr(current->parent);
2590 info->si_uid = from_kuid_munged(current_user_ns(),
2591 task_uid(current->parent));
2592 rcu_read_unlock();
2593 }
2594
2595 /* If the (new) signal is now blocked, requeue it. */
2596 if (sigismember(¤t->blocked, signr) ||
2597 fatal_signal_pending(current)) {
2598 send_signal_locked(signr, info, current, type);
2599 signr = 0;
2600 }
2601
2602 return signr;
2603}
2604
2605static void hide_si_addr_tag_bits(struct ksignal *ksig)
2606{
2607 switch (siginfo_layout(ksig->sig, ksig->info.si_code)) {
2608 case SIL_FAULT:
2609 case SIL_FAULT_TRAPNO:
2610 case SIL_FAULT_MCEERR:
2611 case SIL_FAULT_BNDERR:
2612 case SIL_FAULT_PKUERR:
2613 case SIL_FAULT_PERF_EVENT:
2614 ksig->info.si_addr = arch_untagged_si_addr(
2615 ksig->info.si_addr, ksig->sig, ksig->info.si_code);
2616 break;
2617 case SIL_KILL:
2618 case SIL_TIMER:
2619 case SIL_POLL:
2620 case SIL_CHLD:
2621 case SIL_RT:
2622 case SIL_SYS:
2623 break;
2624 }
2625}
2626
2627bool get_signal(struct ksignal *ksig)
2628{
2629 struct sighand_struct *sighand = current->sighand;
2630 struct signal_struct *signal = current->signal;
2631 int signr;
2632
2633 clear_notify_signal();
2634 if (unlikely(task_work_pending(current)))
2635 task_work_run();
2636
2637 if (!task_sigpending(current))
2638 return false;
2639
2640 if (unlikely(uprobe_deny_signal()))
2641 return false;
2642
2643 /*
2644 * Do this once, we can't return to user-mode if freezing() == T.
2645 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2646 * thus do not need another check after return.
2647 */
2648 try_to_freeze();
2649
2650relock:
2651 spin_lock_irq(&sighand->siglock);
2652
2653 /*
2654 * Every stopped thread goes here after wakeup. Check to see if
2655 * we should notify the parent, prepare_signal(SIGCONT) encodes
2656 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2657 */
2658 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2659 int why;
2660
2661 if (signal->flags & SIGNAL_CLD_CONTINUED)
2662 why = CLD_CONTINUED;
2663 else
2664 why = CLD_STOPPED;
2665
2666 signal->flags &= ~SIGNAL_CLD_MASK;
2667
2668 spin_unlock_irq(&sighand->siglock);
2669
2670 /*
2671 * Notify the parent that we're continuing. This event is
2672 * always per-process and doesn't make whole lot of sense
2673 * for ptracers, who shouldn't consume the state via
2674 * wait(2) either, but, for backward compatibility, notify
2675 * the ptracer of the group leader too unless it's gonna be
2676 * a duplicate.
2677 */
2678 read_lock(&tasklist_lock);
2679 do_notify_parent_cldstop(current, false, why);
2680
2681 if (ptrace_reparented(current->group_leader))
2682 do_notify_parent_cldstop(current->group_leader,
2683 true, why);
2684 read_unlock(&tasklist_lock);
2685
2686 goto relock;
2687 }
2688
2689 for (;;) {
2690 struct k_sigaction *ka;
2691 enum pid_type type;
2692
2693 /* Has this task already been marked for death? */
2694 if ((signal->flags & SIGNAL_GROUP_EXIT) ||
2695 signal->group_exec_task) {
2696 clear_siginfo(&ksig->info);
2697 ksig->info.si_signo = signr = SIGKILL;
2698 sigdelset(¤t->pending.signal, SIGKILL);
2699 trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
2700 &sighand->action[SIGKILL - 1]);
2701 recalc_sigpending();
2702 goto fatal;
2703 }
2704
2705 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2706 do_signal_stop(0))
2707 goto relock;
2708
2709 if (unlikely(current->jobctl &
2710 (JOBCTL_TRAP_MASK | JOBCTL_TRAP_FREEZE))) {
2711 if (current->jobctl & JOBCTL_TRAP_MASK) {
2712 do_jobctl_trap();
2713 spin_unlock_irq(&sighand->siglock);
2714 } else if (current->jobctl & JOBCTL_TRAP_FREEZE)
2715 do_freezer_trap();
2716
2717 goto relock;
2718 }
2719
2720 /*
2721 * If the task is leaving the frozen state, let's update
2722 * cgroup counters and reset the frozen bit.
2723 */
2724 if (unlikely(cgroup_task_frozen(current))) {
2725 spin_unlock_irq(&sighand->siglock);
2726 cgroup_leave_frozen(false);
2727 goto relock;
2728 }
2729
2730 /*
2731 * Signals generated by the execution of an instruction
2732 * need to be delivered before any other pending signals
2733 * so that the instruction pointer in the signal stack
2734 * frame points to the faulting instruction.
2735 */
2736 type = PIDTYPE_PID;
2737 signr = dequeue_synchronous_signal(&ksig->info);
2738 if (!signr)
2739 signr = dequeue_signal(current, ¤t->blocked,
2740 &ksig->info, &type);
2741
2742 if (!signr)
2743 break; /* will return 0 */
2744
2745 if (unlikely(current->ptrace) && (signr != SIGKILL) &&
2746 !(sighand->action[signr -1].sa.sa_flags & SA_IMMUTABLE)) {
2747 signr = ptrace_signal(signr, &ksig->info, type);
2748 if (!signr)
2749 continue;
2750 }
2751
2752 ka = &sighand->action[signr-1];
2753
2754 /* Trace actually delivered signals. */
2755 trace_signal_deliver(signr, &ksig->info, ka);
2756
2757 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
2758 continue;
2759 if (ka->sa.sa_handler != SIG_DFL) {
2760 /* Run the handler. */
2761 ksig->ka = *ka;
2762
2763 if (ka->sa.sa_flags & SA_ONESHOT)
2764 ka->sa.sa_handler = SIG_DFL;
2765
2766 break; /* will return non-zero "signr" value */
2767 }
2768
2769 /*
2770 * Now we are doing the default action for this signal.
2771 */
2772 if (sig_kernel_ignore(signr)) /* Default is nothing. */
2773 continue;
2774
2775 /*
2776 * Global init gets no signals it doesn't want.
2777 * Container-init gets no signals it doesn't want from same
2778 * container.
2779 *
2780 * Note that if global/container-init sees a sig_kernel_only()
2781 * signal here, the signal must have been generated internally
2782 * or must have come from an ancestor namespace. In either
2783 * case, the signal cannot be dropped.
2784 */
2785 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2786 !sig_kernel_only(signr))
2787 continue;
2788
2789 if (sig_kernel_stop(signr)) {
2790 /*
2791 * The default action is to stop all threads in
2792 * the thread group. The job control signals
2793 * do nothing in an orphaned pgrp, but SIGSTOP
2794 * always works. Note that siglock needs to be
2795 * dropped during the call to is_orphaned_pgrp()
2796 * because of lock ordering with tasklist_lock.
2797 * This allows an intervening SIGCONT to be posted.
2798 * We need to check for that and bail out if necessary.
2799 */
2800 if (signr != SIGSTOP) {
2801 spin_unlock_irq(&sighand->siglock);
2802
2803 /* signals can be posted during this window */
2804
2805 if (is_current_pgrp_orphaned())
2806 goto relock;
2807
2808 spin_lock_irq(&sighand->siglock);
2809 }
2810
2811 if (likely(do_signal_stop(ksig->info.si_signo))) {
2812 /* It released the siglock. */
2813 goto relock;
2814 }
2815
2816 /*
2817 * We didn't actually stop, due to a race
2818 * with SIGCONT or something like that.
2819 */
2820 continue;
2821 }
2822
2823 fatal:
2824 spin_unlock_irq(&sighand->siglock);
2825 if (unlikely(cgroup_task_frozen(current)))
2826 cgroup_leave_frozen(true);
2827
2828 /*
2829 * Anything else is fatal, maybe with a core dump.
2830 */
2831 current->flags |= PF_SIGNALED;
2832
2833 if (sig_kernel_coredump(signr)) {
2834 if (print_fatal_signals)
2835 print_fatal_signal(ksig->info.si_signo);
2836 proc_coredump_connector(current);
2837 /*
2838 * If it was able to dump core, this kills all
2839 * other threads in the group and synchronizes with
2840 * their demise. If we lost the race with another
2841 * thread getting here, it set group_exit_code
2842 * first and our do_group_exit call below will use
2843 * that value and ignore the one we pass it.
2844 */
2845 do_coredump(&ksig->info);
2846 }
2847
2848 /*
2849 * PF_IO_WORKER threads will catch and exit on fatal signals
2850 * themselves. They have cleanup that must be performed, so
2851 * we cannot call do_exit() on their behalf.
2852 */
2853 if (current->flags & PF_IO_WORKER)
2854 goto out;
2855
2856 /*
2857 * Death signals, no core dump.
2858 */
2859 do_group_exit(ksig->info.si_signo);
2860 /* NOTREACHED */
2861 }
2862 spin_unlock_irq(&sighand->siglock);
2863out:
2864 ksig->sig = signr;
2865
2866 if (!(ksig->ka.sa.sa_flags & SA_EXPOSE_TAGBITS))
2867 hide_si_addr_tag_bits(ksig);
2868
2869 return ksig->sig > 0;
2870}
2871
2872/**
2873 * signal_delivered - called after signal delivery to update blocked signals
2874 * @ksig: kernel signal struct
2875 * @stepping: nonzero if debugger single-step or block-step in use
2876 *
2877 * This function should be called when a signal has successfully been
2878 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
2879 * is always blocked), and the signal itself is blocked unless %SA_NODEFER
2880 * is set in @ksig->ka.sa.sa_flags. Tracing is notified.
2881 */
2882static void signal_delivered(struct ksignal *ksig, int stepping)
2883{
2884 sigset_t blocked;
2885
2886 /* A signal was successfully delivered, and the
2887 saved sigmask was stored on the signal frame,
2888 and will be restored by sigreturn. So we can
2889 simply clear the restore sigmask flag. */
2890 clear_restore_sigmask();
2891
2892 sigorsets(&blocked, ¤t->blocked, &ksig->ka.sa.sa_mask);
2893 if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2894 sigaddset(&blocked, ksig->sig);
2895 set_current_blocked(&blocked);
2896 if (current->sas_ss_flags & SS_AUTODISARM)
2897 sas_ss_reset(current);
2898 if (stepping)
2899 ptrace_notify(SIGTRAP, 0);
2900}
2901
2902void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2903{
2904 if (failed)
2905 force_sigsegv(ksig->sig);
2906 else
2907 signal_delivered(ksig, stepping);
2908}
2909
2910/*
2911 * It could be that complete_signal() picked us to notify about the
2912 * group-wide signal. Other threads should be notified now to take
2913 * the shared signals in @which since we will not.
2914 */
2915static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2916{
2917 sigset_t retarget;
2918 struct task_struct *t;
2919
2920 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2921 if (sigisemptyset(&retarget))
2922 return;
2923
2924 t = tsk;
2925 while_each_thread(tsk, t) {
2926 if (t->flags & PF_EXITING)
2927 continue;
2928
2929 if (!has_pending_signals(&retarget, &t->blocked))
2930 continue;
2931 /* Remove the signals this thread can handle. */
2932 sigandsets(&retarget, &retarget, &t->blocked);
2933
2934 if (!task_sigpending(t))
2935 signal_wake_up(t, 0);
2936
2937 if (sigisemptyset(&retarget))
2938 break;
2939 }
2940}
2941
2942void exit_signals(struct task_struct *tsk)
2943{
2944 int group_stop = 0;
2945 sigset_t unblocked;
2946
2947 /*
2948 * @tsk is about to have PF_EXITING set - lock out users which
2949 * expect stable threadgroup.
2950 */
2951 cgroup_threadgroup_change_begin(tsk);
2952
2953 if (thread_group_empty(tsk) || (tsk->signal->flags & SIGNAL_GROUP_EXIT)) {
2954 tsk->flags |= PF_EXITING;
2955 cgroup_threadgroup_change_end(tsk);
2956 return;
2957 }
2958
2959 spin_lock_irq(&tsk->sighand->siglock);
2960 /*
2961 * From now this task is not visible for group-wide signals,
2962 * see wants_signal(), do_signal_stop().
2963 */
2964 tsk->flags |= PF_EXITING;
2965
2966 cgroup_threadgroup_change_end(tsk);
2967
2968 if (!task_sigpending(tsk))
2969 goto out;
2970
2971 unblocked = tsk->blocked;
2972 signotset(&unblocked);
2973 retarget_shared_pending(tsk, &unblocked);
2974
2975 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2976 task_participate_group_stop(tsk))
2977 group_stop = CLD_STOPPED;
2978out:
2979 spin_unlock_irq(&tsk->sighand->siglock);
2980
2981 /*
2982 * If group stop has completed, deliver the notification. This
2983 * should always go to the real parent of the group leader.
2984 */
2985 if (unlikely(group_stop)) {
2986 read_lock(&tasklist_lock);
2987 do_notify_parent_cldstop(tsk, false, group_stop);
2988 read_unlock(&tasklist_lock);
2989 }
2990}
2991
2992/*
2993 * System call entry points.
2994 */
2995
2996/**
2997 * sys_restart_syscall - restart a system call
2998 */
2999SYSCALL_DEFINE0(restart_syscall)
3000{
3001 struct restart_block *restart = ¤t->restart_block;
3002 return restart->fn(restart);
3003}
3004
3005long do_no_restart_syscall(struct restart_block *param)
3006{
3007 return -EINTR;
3008}
3009
3010static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
3011{
3012 if (task_sigpending(tsk) && !thread_group_empty(tsk)) {
3013 sigset_t newblocked;
3014 /* A set of now blocked but previously unblocked signals. */
3015 sigandnsets(&newblocked, newset, ¤t->blocked);
3016 retarget_shared_pending(tsk, &newblocked);
3017 }
3018 tsk->blocked = *newset;
3019 recalc_sigpending();
3020}
3021
3022/**
3023 * set_current_blocked - change current->blocked mask
3024 * @newset: new mask
3025 *
3026 * It is wrong to change ->blocked directly, this helper should be used
3027 * to ensure the process can't miss a shared signal we are going to block.
3028 */
3029void set_current_blocked(sigset_t *newset)
3030{
3031 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
3032 __set_current_blocked(newset);
3033}
3034
3035void __set_current_blocked(const sigset_t *newset)
3036{
3037 struct task_struct *tsk = current;
3038
3039 /*
3040 * In case the signal mask hasn't changed, there is nothing we need
3041 * to do. The current->blocked shouldn't be modified by other task.
3042 */
3043 if (sigequalsets(&tsk->blocked, newset))
3044 return;
3045
3046 spin_lock_irq(&tsk->sighand->siglock);
3047 __set_task_blocked(tsk, newset);
3048 spin_unlock_irq(&tsk->sighand->siglock);
3049}
3050
3051/*
3052 * This is also useful for kernel threads that want to temporarily
3053 * (or permanently) block certain signals.
3054 *
3055 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
3056 * interface happily blocks "unblockable" signals like SIGKILL
3057 * and friends.
3058 */
3059int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
3060{
3061 struct task_struct *tsk = current;
3062 sigset_t newset;
3063
3064 /* Lockless, only current can change ->blocked, never from irq */
3065 if (oldset)
3066 *oldset = tsk->blocked;
3067
3068 switch (how) {
3069 case SIG_BLOCK:
3070 sigorsets(&newset, &tsk->blocked, set);
3071 break;
3072 case SIG_UNBLOCK:
3073 sigandnsets(&newset, &tsk->blocked, set);
3074 break;
3075 case SIG_SETMASK:
3076 newset = *set;
3077 break;
3078 default:
3079 return -EINVAL;
3080 }
3081
3082 __set_current_blocked(&newset);
3083 return 0;
3084}
3085EXPORT_SYMBOL(sigprocmask);
3086
3087/*
3088 * The api helps set app-provided sigmasks.
3089 *
3090 * This is useful for syscalls such as ppoll, pselect, io_pgetevents and
3091 * epoll_pwait where a new sigmask is passed from userland for the syscalls.
3092 *
3093 * Note that it does set_restore_sigmask() in advance, so it must be always
3094 * paired with restore_saved_sigmask_unless() before return from syscall.
3095 */
3096int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize)
3097{
3098 sigset_t kmask;
3099
3100 if (!umask)
3101 return 0;
3102 if (sigsetsize != sizeof(sigset_t))
3103 return -EINVAL;
3104 if (copy_from_user(&kmask, umask, sizeof(sigset_t)))
3105 return -EFAULT;
3106
3107 set_restore_sigmask();
3108 current->saved_sigmask = current->blocked;
3109 set_current_blocked(&kmask);
3110
3111 return 0;
3112}
3113
3114#ifdef CONFIG_COMPAT
3115int set_compat_user_sigmask(const compat_sigset_t __user *umask,
3116 size_t sigsetsize)
3117{
3118 sigset_t kmask;
3119
3120 if (!umask)
3121 return 0;
3122 if (sigsetsize != sizeof(compat_sigset_t))
3123 return -EINVAL;
3124 if (get_compat_sigset(&kmask, umask))
3125 return -EFAULT;
3126
3127 set_restore_sigmask();
3128 current->saved_sigmask = current->blocked;
3129 set_current_blocked(&kmask);
3130
3131 return 0;
3132}
3133#endif
3134
3135/**
3136 * sys_rt_sigprocmask - change the list of currently blocked signals
3137 * @how: whether to add, remove, or set signals
3138 * @nset: stores pending signals
3139 * @oset: previous value of signal mask if non-null
3140 * @sigsetsize: size of sigset_t type
3141 */
3142SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
3143 sigset_t __user *, oset, size_t, sigsetsize)
3144{
3145 sigset_t old_set, new_set;
3146 int error;
3147
3148 /* XXX: Don't preclude handling different sized sigset_t's. */
3149 if (sigsetsize != sizeof(sigset_t))
3150 return -EINVAL;
3151
3152 old_set = current->blocked;
3153
3154 if (nset) {
3155 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
3156 return -EFAULT;
3157 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3158
3159 error = sigprocmask(how, &new_set, NULL);
3160 if (error)
3161 return error;
3162 }
3163
3164 if (oset) {
3165 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
3166 return -EFAULT;
3167 }
3168
3169 return 0;
3170}
3171
3172#ifdef CONFIG_COMPAT
3173COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
3174 compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
3175{
3176 sigset_t old_set = current->blocked;
3177
3178 /* XXX: Don't preclude handling different sized sigset_t's. */
3179 if (sigsetsize != sizeof(sigset_t))
3180 return -EINVAL;
3181
3182 if (nset) {
3183 sigset_t new_set;
3184 int error;
3185 if (get_compat_sigset(&new_set, nset))
3186 return -EFAULT;
3187 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3188
3189 error = sigprocmask(how, &new_set, NULL);
3190 if (error)
3191 return error;
3192 }
3193 return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0;
3194}
3195#endif
3196
3197static void do_sigpending(sigset_t *set)
3198{
3199 spin_lock_irq(¤t->sighand->siglock);
3200 sigorsets(set, ¤t->pending.signal,
3201 ¤t->signal->shared_pending.signal);
3202 spin_unlock_irq(¤t->sighand->siglock);
3203
3204 /* Outside the lock because only this thread touches it. */
3205 sigandsets(set, ¤t->blocked, set);
3206}
3207
3208/**
3209 * sys_rt_sigpending - examine a pending signal that has been raised
3210 * while blocked
3211 * @uset: stores pending signals
3212 * @sigsetsize: size of sigset_t type or larger
3213 */
3214SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
3215{
3216 sigset_t set;
3217
3218 if (sigsetsize > sizeof(*uset))
3219 return -EINVAL;
3220
3221 do_sigpending(&set);
3222
3223 if (copy_to_user(uset, &set, sigsetsize))
3224 return -EFAULT;
3225
3226 return 0;
3227}
3228
3229#ifdef CONFIG_COMPAT
3230COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
3231 compat_size_t, sigsetsize)
3232{
3233 sigset_t set;
3234
3235 if (sigsetsize > sizeof(*uset))
3236 return -EINVAL;
3237
3238 do_sigpending(&set);
3239
3240 return put_compat_sigset(uset, &set, sigsetsize);
3241}
3242#endif
3243
3244static const struct {
3245 unsigned char limit, layout;
3246} sig_sicodes[] = {
3247 [SIGILL] = { NSIGILL, SIL_FAULT },
3248 [SIGFPE] = { NSIGFPE, SIL_FAULT },
3249 [SIGSEGV] = { NSIGSEGV, SIL_FAULT },
3250 [SIGBUS] = { NSIGBUS, SIL_FAULT },
3251 [SIGTRAP] = { NSIGTRAP, SIL_FAULT },
3252#if defined(SIGEMT)
3253 [SIGEMT] = { NSIGEMT, SIL_FAULT },
3254#endif
3255 [SIGCHLD] = { NSIGCHLD, SIL_CHLD },
3256 [SIGPOLL] = { NSIGPOLL, SIL_POLL },
3257 [SIGSYS] = { NSIGSYS, SIL_SYS },
3258};
3259
3260static bool known_siginfo_layout(unsigned sig, int si_code)
3261{
3262 if (si_code == SI_KERNEL)
3263 return true;
3264 else if ((si_code > SI_USER)) {
3265 if (sig_specific_sicodes(sig)) {
3266 if (si_code <= sig_sicodes[sig].limit)
3267 return true;
3268 }
3269 else if (si_code <= NSIGPOLL)
3270 return true;
3271 }
3272 else if (si_code >= SI_DETHREAD)
3273 return true;
3274 else if (si_code == SI_ASYNCNL)
3275 return true;
3276 return false;
3277}
3278
3279enum siginfo_layout siginfo_layout(unsigned sig, int si_code)
3280{
3281 enum siginfo_layout layout = SIL_KILL;
3282 if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
3283 if ((sig < ARRAY_SIZE(sig_sicodes)) &&
3284 (si_code <= sig_sicodes[sig].limit)) {
3285 layout = sig_sicodes[sig].layout;
3286 /* Handle the exceptions */
3287 if ((sig == SIGBUS) &&
3288 (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO))
3289 layout = SIL_FAULT_MCEERR;
3290 else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR))
3291 layout = SIL_FAULT_BNDERR;
3292#ifdef SEGV_PKUERR
3293 else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR))
3294 layout = SIL_FAULT_PKUERR;
3295#endif
3296 else if ((sig == SIGTRAP) && (si_code == TRAP_PERF))
3297 layout = SIL_FAULT_PERF_EVENT;
3298 else if (IS_ENABLED(CONFIG_SPARC) &&
3299 (sig == SIGILL) && (si_code == ILL_ILLTRP))
3300 layout = SIL_FAULT_TRAPNO;
3301 else if (IS_ENABLED(CONFIG_ALPHA) &&
3302 ((sig == SIGFPE) ||
3303 ((sig == SIGTRAP) && (si_code == TRAP_UNK))))
3304 layout = SIL_FAULT_TRAPNO;
3305 }
3306 else if (si_code <= NSIGPOLL)
3307 layout = SIL_POLL;
3308 } else {
3309 if (si_code == SI_TIMER)
3310 layout = SIL_TIMER;
3311 else if (si_code == SI_SIGIO)
3312 layout = SIL_POLL;
3313 else if (si_code < 0)
3314 layout = SIL_RT;
3315 }
3316 return layout;
3317}
3318
3319static inline char __user *si_expansion(const siginfo_t __user *info)
3320{
3321 return ((char __user *)info) + sizeof(struct kernel_siginfo);
3322}
3323
3324int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from)
3325{
3326 char __user *expansion = si_expansion(to);
3327 if (copy_to_user(to, from , sizeof(struct kernel_siginfo)))
3328 return -EFAULT;
3329 if (clear_user(expansion, SI_EXPANSION_SIZE))
3330 return -EFAULT;
3331 return 0;
3332}
3333
3334static int post_copy_siginfo_from_user(kernel_siginfo_t *info,
3335 const siginfo_t __user *from)
3336{
3337 if (unlikely(!known_siginfo_layout(info->si_signo, info->si_code))) {
3338 char __user *expansion = si_expansion(from);
3339 char buf[SI_EXPANSION_SIZE];
3340 int i;
3341 /*
3342 * An unknown si_code might need more than
3343 * sizeof(struct kernel_siginfo) bytes. Verify all of the
3344 * extra bytes are 0. This guarantees copy_siginfo_to_user
3345 * will return this data to userspace exactly.
3346 */
3347 if (copy_from_user(&buf, expansion, SI_EXPANSION_SIZE))
3348 return -EFAULT;
3349 for (i = 0; i < SI_EXPANSION_SIZE; i++) {
3350 if (buf[i] != 0)
3351 return -E2BIG;
3352 }
3353 }
3354 return 0;
3355}
3356
3357static int __copy_siginfo_from_user(int signo, kernel_siginfo_t *to,
3358 const siginfo_t __user *from)
3359{
3360 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3361 return -EFAULT;
3362 to->si_signo = signo;
3363 return post_copy_siginfo_from_user(to, from);
3364}
3365
3366int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from)
3367{
3368 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3369 return -EFAULT;
3370 return post_copy_siginfo_from_user(to, from);
3371}
3372
3373#ifdef CONFIG_COMPAT
3374/**
3375 * copy_siginfo_to_external32 - copy a kernel siginfo into a compat user siginfo
3376 * @to: compat siginfo destination
3377 * @from: kernel siginfo source
3378 *
3379 * Note: This function does not work properly for the SIGCHLD on x32, but
3380 * fortunately it doesn't have to. The only valid callers for this function are
3381 * copy_siginfo_to_user32, which is overriden for x32 and the coredump code.
3382 * The latter does not care because SIGCHLD will never cause a coredump.
3383 */
3384void copy_siginfo_to_external32(struct compat_siginfo *to,
3385 const struct kernel_siginfo *from)
3386{
3387 memset(to, 0, sizeof(*to));
3388
3389 to->si_signo = from->si_signo;
3390 to->si_errno = from->si_errno;
3391 to->si_code = from->si_code;
3392 switch(siginfo_layout(from->si_signo, from->si_code)) {
3393 case SIL_KILL:
3394 to->si_pid = from->si_pid;
3395 to->si_uid = from->si_uid;
3396 break;
3397 case SIL_TIMER:
3398 to->si_tid = from->si_tid;
3399 to->si_overrun = from->si_overrun;
3400 to->si_int = from->si_int;
3401 break;
3402 case SIL_POLL:
3403 to->si_band = from->si_band;
3404 to->si_fd = from->si_fd;
3405 break;
3406 case SIL_FAULT:
3407 to->si_addr = ptr_to_compat(from->si_addr);
3408 break;
3409 case SIL_FAULT_TRAPNO:
3410 to->si_addr = ptr_to_compat(from->si_addr);
3411 to->si_trapno = from->si_trapno;
3412 break;
3413 case SIL_FAULT_MCEERR:
3414 to->si_addr = ptr_to_compat(from->si_addr);
3415 to->si_addr_lsb = from->si_addr_lsb;
3416 break;
3417 case SIL_FAULT_BNDERR:
3418 to->si_addr = ptr_to_compat(from->si_addr);
3419 to->si_lower = ptr_to_compat(from->si_lower);
3420 to->si_upper = ptr_to_compat(from->si_upper);
3421 break;
3422 case SIL_FAULT_PKUERR:
3423 to->si_addr = ptr_to_compat(from->si_addr);
3424 to->si_pkey = from->si_pkey;
3425 break;
3426 case SIL_FAULT_PERF_EVENT:
3427 to->si_addr = ptr_to_compat(from->si_addr);
3428 to->si_perf_data = from->si_perf_data;
3429 to->si_perf_type = from->si_perf_type;
3430 to->si_perf_flags = from->si_perf_flags;
3431 break;
3432 case SIL_CHLD:
3433 to->si_pid = from->si_pid;
3434 to->si_uid = from->si_uid;
3435 to->si_status = from->si_status;
3436 to->si_utime = from->si_utime;
3437 to->si_stime = from->si_stime;
3438 break;
3439 case SIL_RT:
3440 to->si_pid = from->si_pid;
3441 to->si_uid = from->si_uid;
3442 to->si_int = from->si_int;
3443 break;
3444 case SIL_SYS:
3445 to->si_call_addr = ptr_to_compat(from->si_call_addr);
3446 to->si_syscall = from->si_syscall;
3447 to->si_arch = from->si_arch;
3448 break;
3449 }
3450}
3451
3452int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
3453 const struct kernel_siginfo *from)
3454{
3455 struct compat_siginfo new;
3456
3457 copy_siginfo_to_external32(&new, from);
3458 if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
3459 return -EFAULT;
3460 return 0;
3461}
3462
3463static int post_copy_siginfo_from_user32(kernel_siginfo_t *to,
3464 const struct compat_siginfo *from)
3465{
3466 clear_siginfo(to);
3467 to->si_signo = from->si_signo;
3468 to->si_errno = from->si_errno;
3469 to->si_code = from->si_code;
3470 switch(siginfo_layout(from->si_signo, from->si_code)) {
3471 case SIL_KILL:
3472 to->si_pid = from->si_pid;
3473 to->si_uid = from->si_uid;
3474 break;
3475 case SIL_TIMER:
3476 to->si_tid = from->si_tid;
3477 to->si_overrun = from->si_overrun;
3478 to->si_int = from->si_int;
3479 break;
3480 case SIL_POLL:
3481 to->si_band = from->si_band;
3482 to->si_fd = from->si_fd;
3483 break;
3484 case SIL_FAULT:
3485 to->si_addr = compat_ptr(from->si_addr);
3486 break;
3487 case SIL_FAULT_TRAPNO:
3488 to->si_addr = compat_ptr(from->si_addr);
3489 to->si_trapno = from->si_trapno;
3490 break;
3491 case SIL_FAULT_MCEERR:
3492 to->si_addr = compat_ptr(from->si_addr);
3493 to->si_addr_lsb = from->si_addr_lsb;
3494 break;
3495 case SIL_FAULT_BNDERR:
3496 to->si_addr = compat_ptr(from->si_addr);
3497 to->si_lower = compat_ptr(from->si_lower);
3498 to->si_upper = compat_ptr(from->si_upper);
3499 break;
3500 case SIL_FAULT_PKUERR:
3501 to->si_addr = compat_ptr(from->si_addr);
3502 to->si_pkey = from->si_pkey;
3503 break;
3504 case SIL_FAULT_PERF_EVENT:
3505 to->si_addr = compat_ptr(from->si_addr);
3506 to->si_perf_data = from->si_perf_data;
3507 to->si_perf_type = from->si_perf_type;
3508 to->si_perf_flags = from->si_perf_flags;
3509 break;
3510 case SIL_CHLD:
3511 to->si_pid = from->si_pid;
3512 to->si_uid = from->si_uid;
3513 to->si_status = from->si_status;
3514#ifdef CONFIG_X86_X32_ABI
3515 if (in_x32_syscall()) {
3516 to->si_utime = from->_sifields._sigchld_x32._utime;
3517 to->si_stime = from->_sifields._sigchld_x32._stime;
3518 } else
3519#endif
3520 {
3521 to->si_utime = from->si_utime;
3522 to->si_stime = from->si_stime;
3523 }
3524 break;
3525 case SIL_RT:
3526 to->si_pid = from->si_pid;
3527 to->si_uid = from->si_uid;
3528 to->si_int = from->si_int;
3529 break;
3530 case SIL_SYS:
3531 to->si_call_addr = compat_ptr(from->si_call_addr);
3532 to->si_syscall = from->si_syscall;
3533 to->si_arch = from->si_arch;
3534 break;
3535 }
3536 return 0;
3537}
3538
3539static int __copy_siginfo_from_user32(int signo, struct kernel_siginfo *to,
3540 const struct compat_siginfo __user *ufrom)
3541{
3542 struct compat_siginfo from;
3543
3544 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3545 return -EFAULT;
3546
3547 from.si_signo = signo;
3548 return post_copy_siginfo_from_user32(to, &from);
3549}
3550
3551int copy_siginfo_from_user32(struct kernel_siginfo *to,
3552 const struct compat_siginfo __user *ufrom)
3553{
3554 struct compat_siginfo from;
3555
3556 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3557 return -EFAULT;
3558
3559 return post_copy_siginfo_from_user32(to, &from);
3560}
3561#endif /* CONFIG_COMPAT */
3562
3563/**
3564 * do_sigtimedwait - wait for queued signals specified in @which
3565 * @which: queued signals to wait for
3566 * @info: if non-null, the signal's siginfo is returned here
3567 * @ts: upper bound on process time suspension
3568 */
3569static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
3570 const struct timespec64 *ts)
3571{
3572 ktime_t *to = NULL, timeout = KTIME_MAX;
3573 struct task_struct *tsk = current;
3574 sigset_t mask = *which;
3575 enum pid_type type;
3576 int sig, ret = 0;
3577
3578 if (ts) {
3579 if (!timespec64_valid(ts))
3580 return -EINVAL;
3581 timeout = timespec64_to_ktime(*ts);
3582 to = &timeout;
3583 }
3584
3585 /*
3586 * Invert the set of allowed signals to get those we want to block.
3587 */
3588 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
3589 signotset(&mask);
3590
3591 spin_lock_irq(&tsk->sighand->siglock);
3592 sig = dequeue_signal(tsk, &mask, info, &type);
3593 if (!sig && timeout) {
3594 /*
3595 * None ready, temporarily unblock those we're interested
3596 * while we are sleeping in so that we'll be awakened when
3597 * they arrive. Unblocking is always fine, we can avoid
3598 * set_current_blocked().
3599 */
3600 tsk->real_blocked = tsk->blocked;
3601 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
3602 recalc_sigpending();
3603 spin_unlock_irq(&tsk->sighand->siglock);
3604
3605 __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
3606 ret = schedule_hrtimeout_range(to, tsk->timer_slack_ns,
3607 HRTIMER_MODE_REL);
3608 spin_lock_irq(&tsk->sighand->siglock);
3609 __set_task_blocked(tsk, &tsk->real_blocked);
3610 sigemptyset(&tsk->real_blocked);
3611 sig = dequeue_signal(tsk, &mask, info, &type);
3612 }
3613 spin_unlock_irq(&tsk->sighand->siglock);
3614
3615 if (sig)
3616 return sig;
3617 return ret ? -EINTR : -EAGAIN;
3618}
3619
3620/**
3621 * sys_rt_sigtimedwait - synchronously wait for queued signals specified
3622 * in @uthese
3623 * @uthese: queued signals to wait for
3624 * @uinfo: if non-null, the signal's siginfo is returned here
3625 * @uts: upper bound on process time suspension
3626 * @sigsetsize: size of sigset_t type
3627 */
3628SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
3629 siginfo_t __user *, uinfo,
3630 const struct __kernel_timespec __user *, uts,
3631 size_t, sigsetsize)
3632{
3633 sigset_t these;
3634 struct timespec64 ts;
3635 kernel_siginfo_t info;
3636 int ret;
3637
3638 /* XXX: Don't preclude handling different sized sigset_t's. */
3639 if (sigsetsize != sizeof(sigset_t))
3640 return -EINVAL;
3641
3642 if (copy_from_user(&these, uthese, sizeof(these)))
3643 return -EFAULT;
3644
3645 if (uts) {
3646 if (get_timespec64(&ts, uts))
3647 return -EFAULT;
3648 }
3649
3650 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3651
3652 if (ret > 0 && uinfo) {
3653 if (copy_siginfo_to_user(uinfo, &info))
3654 ret = -EFAULT;
3655 }
3656
3657 return ret;
3658}
3659
3660#ifdef CONFIG_COMPAT_32BIT_TIME
3661SYSCALL_DEFINE4(rt_sigtimedwait_time32, const sigset_t __user *, uthese,
3662 siginfo_t __user *, uinfo,
3663 const struct old_timespec32 __user *, uts,
3664 size_t, sigsetsize)
3665{
3666 sigset_t these;
3667 struct timespec64 ts;
3668 kernel_siginfo_t info;
3669 int ret;
3670
3671 if (sigsetsize != sizeof(sigset_t))
3672 return -EINVAL;
3673
3674 if (copy_from_user(&these, uthese, sizeof(these)))
3675 return -EFAULT;
3676
3677 if (uts) {
3678 if (get_old_timespec32(&ts, uts))
3679 return -EFAULT;
3680 }
3681
3682 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3683
3684 if (ret > 0 && uinfo) {
3685 if (copy_siginfo_to_user(uinfo, &info))
3686 ret = -EFAULT;
3687 }
3688
3689 return ret;
3690}
3691#endif
3692
3693#ifdef CONFIG_COMPAT
3694COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64, compat_sigset_t __user *, uthese,
3695 struct compat_siginfo __user *, uinfo,
3696 struct __kernel_timespec __user *, uts, compat_size_t, sigsetsize)
3697{
3698 sigset_t s;
3699 struct timespec64 t;
3700 kernel_siginfo_t info;
3701 long ret;
3702
3703 if (sigsetsize != sizeof(sigset_t))
3704 return -EINVAL;
3705
3706 if (get_compat_sigset(&s, uthese))
3707 return -EFAULT;
3708
3709 if (uts) {
3710 if (get_timespec64(&t, uts))
3711 return -EFAULT;
3712 }
3713
3714 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3715
3716 if (ret > 0 && uinfo) {
3717 if (copy_siginfo_to_user32(uinfo, &info))
3718 ret = -EFAULT;
3719 }
3720
3721 return ret;
3722}
3723
3724#ifdef CONFIG_COMPAT_32BIT_TIME
3725COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32, compat_sigset_t __user *, uthese,
3726 struct compat_siginfo __user *, uinfo,
3727 struct old_timespec32 __user *, uts, compat_size_t, sigsetsize)
3728{
3729 sigset_t s;
3730 struct timespec64 t;
3731 kernel_siginfo_t info;
3732 long ret;
3733
3734 if (sigsetsize != sizeof(sigset_t))
3735 return -EINVAL;
3736
3737 if (get_compat_sigset(&s, uthese))
3738 return -EFAULT;
3739
3740 if (uts) {
3741 if (get_old_timespec32(&t, uts))
3742 return -EFAULT;
3743 }
3744
3745 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3746
3747 if (ret > 0 && uinfo) {
3748 if (copy_siginfo_to_user32(uinfo, &info))
3749 ret = -EFAULT;
3750 }
3751
3752 return ret;
3753}
3754#endif
3755#endif
3756
3757static inline void prepare_kill_siginfo(int sig, struct kernel_siginfo *info)
3758{
3759 clear_siginfo(info);
3760 info->si_signo = sig;
3761 info->si_errno = 0;
3762 info->si_code = SI_USER;
3763 info->si_pid = task_tgid_vnr(current);
3764 info->si_uid = from_kuid_munged(current_user_ns(), current_uid());
3765}
3766
3767/**
3768 * sys_kill - send a signal to a process
3769 * @pid: the PID of the process
3770 * @sig: signal to be sent
3771 */
3772SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
3773{
3774 struct kernel_siginfo info;
3775
3776 prepare_kill_siginfo(sig, &info);
3777
3778 return kill_something_info(sig, &info, pid);
3779}
3780
3781/*
3782 * Verify that the signaler and signalee either are in the same pid namespace
3783 * or that the signaler's pid namespace is an ancestor of the signalee's pid
3784 * namespace.
3785 */
3786static bool access_pidfd_pidns(struct pid *pid)
3787{
3788 struct pid_namespace *active = task_active_pid_ns(current);
3789 struct pid_namespace *p = ns_of_pid(pid);
3790
3791 for (;;) {
3792 if (!p)
3793 return false;
3794 if (p == active)
3795 break;
3796 p = p->parent;
3797 }
3798
3799 return true;
3800}
3801
3802static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo,
3803 siginfo_t __user *info)
3804{
3805#ifdef CONFIG_COMPAT
3806 /*
3807 * Avoid hooking up compat syscalls and instead handle necessary
3808 * conversions here. Note, this is a stop-gap measure and should not be
3809 * considered a generic solution.
3810 */
3811 if (in_compat_syscall())
3812 return copy_siginfo_from_user32(
3813 kinfo, (struct compat_siginfo __user *)info);
3814#endif
3815 return copy_siginfo_from_user(kinfo, info);
3816}
3817
3818static struct pid *pidfd_to_pid(const struct file *file)
3819{
3820 struct pid *pid;
3821
3822 pid = pidfd_pid(file);
3823 if (!IS_ERR(pid))
3824 return pid;
3825
3826 return tgid_pidfd_to_pid(file);
3827}
3828
3829/**
3830 * sys_pidfd_send_signal - Signal a process through a pidfd
3831 * @pidfd: file descriptor of the process
3832 * @sig: signal to send
3833 * @info: signal info
3834 * @flags: future flags
3835 *
3836 * The syscall currently only signals via PIDTYPE_PID which covers
3837 * kill(<positive-pid>, <signal>. It does not signal threads or process
3838 * groups.
3839 * In order to extend the syscall to threads and process groups the @flags
3840 * argument should be used. In essence, the @flags argument will determine
3841 * what is signaled and not the file descriptor itself. Put in other words,
3842 * grouping is a property of the flags argument not a property of the file
3843 * descriptor.
3844 *
3845 * Return: 0 on success, negative errno on failure
3846 */
3847SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
3848 siginfo_t __user *, info, unsigned int, flags)
3849{
3850 int ret;
3851 struct fd f;
3852 struct pid *pid;
3853 kernel_siginfo_t kinfo;
3854
3855 /* Enforce flags be set to 0 until we add an extension. */
3856 if (flags)
3857 return -EINVAL;
3858
3859 f = fdget(pidfd);
3860 if (!f.file)
3861 return -EBADF;
3862
3863 /* Is this a pidfd? */
3864 pid = pidfd_to_pid(f.file);
3865 if (IS_ERR(pid)) {
3866 ret = PTR_ERR(pid);
3867 goto err;
3868 }
3869
3870 ret = -EINVAL;
3871 if (!access_pidfd_pidns(pid))
3872 goto err;
3873
3874 if (info) {
3875 ret = copy_siginfo_from_user_any(&kinfo, info);
3876 if (unlikely(ret))
3877 goto err;
3878
3879 ret = -EINVAL;
3880 if (unlikely(sig != kinfo.si_signo))
3881 goto err;
3882
3883 /* Only allow sending arbitrary signals to yourself. */
3884 ret = -EPERM;
3885 if ((task_pid(current) != pid) &&
3886 (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL))
3887 goto err;
3888 } else {
3889 prepare_kill_siginfo(sig, &kinfo);
3890 }
3891
3892 ret = kill_pid_info(sig, &kinfo, pid);
3893
3894err:
3895 fdput(f);
3896 return ret;
3897}
3898
3899static int
3900do_send_specific(pid_t tgid, pid_t pid, int sig, struct kernel_siginfo *info)
3901{
3902 struct task_struct *p;
3903 int error = -ESRCH;
3904
3905 rcu_read_lock();
3906 p = find_task_by_vpid(pid);
3907 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
3908 error = check_kill_permission(sig, info, p);
3909 /*
3910 * The null signal is a permissions and process existence
3911 * probe. No signal is actually delivered.
3912 */
3913 if (!error && sig) {
3914 error = do_send_sig_info(sig, info, p, PIDTYPE_PID);
3915 /*
3916 * If lock_task_sighand() failed we pretend the task
3917 * dies after receiving the signal. The window is tiny,
3918 * and the signal is private anyway.
3919 */
3920 if (unlikely(error == -ESRCH))
3921 error = 0;
3922 }
3923 }
3924 rcu_read_unlock();
3925
3926 return error;
3927}
3928
3929static int do_tkill(pid_t tgid, pid_t pid, int sig)
3930{
3931 struct kernel_siginfo info;
3932
3933 clear_siginfo(&info);
3934 info.si_signo = sig;
3935 info.si_errno = 0;
3936 info.si_code = SI_TKILL;
3937 info.si_pid = task_tgid_vnr(current);
3938 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
3939
3940 return do_send_specific(tgid, pid, sig, &info);
3941}
3942
3943/**
3944 * sys_tgkill - send signal to one specific thread
3945 * @tgid: the thread group ID of the thread
3946 * @pid: the PID of the thread
3947 * @sig: signal to be sent
3948 *
3949 * This syscall also checks the @tgid and returns -ESRCH even if the PID
3950 * exists but it's not belonging to the target process anymore. This
3951 * method solves the problem of threads exiting and PIDs getting reused.
3952 */
3953SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
3954{
3955 /* This is only valid for single tasks */
3956 if (pid <= 0 || tgid <= 0)
3957 return -EINVAL;
3958
3959 return do_tkill(tgid, pid, sig);
3960}
3961
3962/**
3963 * sys_tkill - send signal to one specific task
3964 * @pid: the PID of the task
3965 * @sig: signal to be sent
3966 *
3967 * Send a signal to only one task, even if it's a CLONE_THREAD task.
3968 */
3969SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
3970{
3971 /* This is only valid for single tasks */
3972 if (pid <= 0)
3973 return -EINVAL;
3974
3975 return do_tkill(0, pid, sig);
3976}
3977
3978static int do_rt_sigqueueinfo(pid_t pid, int sig, kernel_siginfo_t *info)
3979{
3980 /* Not even root can pretend to send signals from the kernel.
3981 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3982 */
3983 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3984 (task_pid_vnr(current) != pid))
3985 return -EPERM;
3986
3987 /* POSIX.1b doesn't mention process groups. */
3988 return kill_proc_info(sig, info, pid);
3989}
3990
3991/**
3992 * sys_rt_sigqueueinfo - send signal information to a signal
3993 * @pid: the PID of the thread
3994 * @sig: signal to be sent
3995 * @uinfo: signal info to be sent
3996 */
3997SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
3998 siginfo_t __user *, uinfo)
3999{
4000 kernel_siginfo_t info;
4001 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
4002 if (unlikely(ret))
4003 return ret;
4004 return do_rt_sigqueueinfo(pid, sig, &info);
4005}
4006
4007#ifdef CONFIG_COMPAT
4008COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
4009 compat_pid_t, pid,
4010 int, sig,
4011 struct compat_siginfo __user *, uinfo)
4012{
4013 kernel_siginfo_t info;
4014 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
4015 if (unlikely(ret))
4016 return ret;
4017 return do_rt_sigqueueinfo(pid, sig, &info);
4018}
4019#endif
4020
4021static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, kernel_siginfo_t *info)
4022{
4023 /* This is only valid for single tasks */
4024 if (pid <= 0 || tgid <= 0)
4025 return -EINVAL;
4026
4027 /* Not even root can pretend to send signals from the kernel.
4028 * Nor can they impersonate a kill()/tgkill(), which adds source info.
4029 */
4030 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
4031 (task_pid_vnr(current) != pid))
4032 return -EPERM;
4033
4034 return do_send_specific(tgid, pid, sig, info);
4035}
4036
4037SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
4038 siginfo_t __user *, uinfo)
4039{
4040 kernel_siginfo_t info;
4041 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
4042 if (unlikely(ret))
4043 return ret;
4044 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
4045}
4046
4047#ifdef CONFIG_COMPAT
4048COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
4049 compat_pid_t, tgid,
4050 compat_pid_t, pid,
4051 int, sig,
4052 struct compat_siginfo __user *, uinfo)
4053{
4054 kernel_siginfo_t info;
4055 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
4056 if (unlikely(ret))
4057 return ret;
4058 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
4059}
4060#endif
4061
4062/*
4063 * For kthreads only, must not be used if cloned with CLONE_SIGHAND
4064 */
4065void kernel_sigaction(int sig, __sighandler_t action)
4066{
4067 spin_lock_irq(¤t->sighand->siglock);
4068 current->sighand->action[sig - 1].sa.sa_handler = action;
4069 if (action == SIG_IGN) {
4070 sigset_t mask;
4071
4072 sigemptyset(&mask);
4073 sigaddset(&mask, sig);
4074
4075 flush_sigqueue_mask(&mask, ¤t->signal->shared_pending);
4076 flush_sigqueue_mask(&mask, ¤t->pending);
4077 recalc_sigpending();
4078 }
4079 spin_unlock_irq(¤t->sighand->siglock);
4080}
4081EXPORT_SYMBOL(kernel_sigaction);
4082
4083void __weak sigaction_compat_abi(struct k_sigaction *act,
4084 struct k_sigaction *oact)
4085{
4086}
4087
4088int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
4089{
4090 struct task_struct *p = current, *t;
4091 struct k_sigaction *k;
4092 sigset_t mask;
4093
4094 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
4095 return -EINVAL;
4096
4097 k = &p->sighand->action[sig-1];
4098
4099 spin_lock_irq(&p->sighand->siglock);
4100 if (k->sa.sa_flags & SA_IMMUTABLE) {
4101 spin_unlock_irq(&p->sighand->siglock);
4102 return -EINVAL;
4103 }
4104 if (oact)
4105 *oact = *k;
4106
4107 /*
4108 * Make sure that we never accidentally claim to support SA_UNSUPPORTED,
4109 * e.g. by having an architecture use the bit in their uapi.
4110 */
4111 BUILD_BUG_ON(UAPI_SA_FLAGS & SA_UNSUPPORTED);
4112
4113 /*
4114 * Clear unknown flag bits in order to allow userspace to detect missing
4115 * support for flag bits and to allow the kernel to use non-uapi bits
4116 * internally.
4117 */
4118 if (act)
4119 act->sa.sa_flags &= UAPI_SA_FLAGS;
4120 if (oact)
4121 oact->sa.sa_flags &= UAPI_SA_FLAGS;
4122
4123 sigaction_compat_abi(act, oact);
4124
4125 if (act) {
4126 sigdelsetmask(&act->sa.sa_mask,
4127 sigmask(SIGKILL) | sigmask(SIGSTOP));
4128 *k = *act;
4129 /*
4130 * POSIX 3.3.1.3:
4131 * "Setting a signal action to SIG_IGN for a signal that is
4132 * pending shall cause the pending signal to be discarded,
4133 * whether or not it is blocked."
4134 *
4135 * "Setting a signal action to SIG_DFL for a signal that is
4136 * pending and whose default action is to ignore the signal
4137 * (for example, SIGCHLD), shall cause the pending signal to
4138 * be discarded, whether or not it is blocked"
4139 */
4140 if (sig_handler_ignored(sig_handler(p, sig), sig)) {
4141 sigemptyset(&mask);
4142 sigaddset(&mask, sig);
4143 flush_sigqueue_mask(&mask, &p->signal->shared_pending);
4144 for_each_thread(p, t)
4145 flush_sigqueue_mask(&mask, &t->pending);
4146 }
4147 }
4148
4149 spin_unlock_irq(&p->sighand->siglock);
4150 return 0;
4151}
4152
4153#ifdef CONFIG_DYNAMIC_SIGFRAME
4154static inline void sigaltstack_lock(void)
4155 __acquires(¤t->sighand->siglock)
4156{
4157 spin_lock_irq(¤t->sighand->siglock);
4158}
4159
4160static inline void sigaltstack_unlock(void)
4161 __releases(¤t->sighand->siglock)
4162{
4163 spin_unlock_irq(¤t->sighand->siglock);
4164}
4165#else
4166static inline void sigaltstack_lock(void) { }
4167static inline void sigaltstack_unlock(void) { }
4168#endif
4169
4170static int
4171do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp,
4172 size_t min_ss_size)
4173{
4174 struct task_struct *t = current;
4175 int ret = 0;
4176
4177 if (oss) {
4178 memset(oss, 0, sizeof(stack_t));
4179 oss->ss_sp = (void __user *) t->sas_ss_sp;
4180 oss->ss_size = t->sas_ss_size;
4181 oss->ss_flags = sas_ss_flags(sp) |
4182 (current->sas_ss_flags & SS_FLAG_BITS);
4183 }
4184
4185 if (ss) {
4186 void __user *ss_sp = ss->ss_sp;
4187 size_t ss_size = ss->ss_size;
4188 unsigned ss_flags = ss->ss_flags;
4189 int ss_mode;
4190
4191 if (unlikely(on_sig_stack(sp)))
4192 return -EPERM;
4193
4194 ss_mode = ss_flags & ~SS_FLAG_BITS;
4195 if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
4196 ss_mode != 0))
4197 return -EINVAL;
4198
4199 /*
4200 * Return before taking any locks if no actual
4201 * sigaltstack changes were requested.
4202 */
4203 if (t->sas_ss_sp == (unsigned long)ss_sp &&
4204 t->sas_ss_size == ss_size &&
4205 t->sas_ss_flags == ss_flags)
4206 return 0;
4207
4208 sigaltstack_lock();
4209 if (ss_mode == SS_DISABLE) {
4210 ss_size = 0;
4211 ss_sp = NULL;
4212 } else {
4213 if (unlikely(ss_size < min_ss_size))
4214 ret = -ENOMEM;
4215 if (!sigaltstack_size_valid(ss_size))
4216 ret = -ENOMEM;
4217 }
4218 if (!ret) {
4219 t->sas_ss_sp = (unsigned long) ss_sp;
4220 t->sas_ss_size = ss_size;
4221 t->sas_ss_flags = ss_flags;
4222 }
4223 sigaltstack_unlock();
4224 }
4225 return ret;
4226}
4227
4228SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
4229{
4230 stack_t new, old;
4231 int err;
4232 if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
4233 return -EFAULT;
4234 err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
4235 current_user_stack_pointer(),
4236 MINSIGSTKSZ);
4237 if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
4238 err = -EFAULT;
4239 return err;
4240}
4241
4242int restore_altstack(const stack_t __user *uss)
4243{
4244 stack_t new;
4245 if (copy_from_user(&new, uss, sizeof(stack_t)))
4246 return -EFAULT;
4247 (void)do_sigaltstack(&new, NULL, current_user_stack_pointer(),
4248 MINSIGSTKSZ);
4249 /* squash all but EFAULT for now */
4250 return 0;
4251}
4252
4253int __save_altstack(stack_t __user *uss, unsigned long sp)
4254{
4255 struct task_struct *t = current;
4256 int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
4257 __put_user(t->sas_ss_flags, &uss->ss_flags) |
4258 __put_user(t->sas_ss_size, &uss->ss_size);
4259 return err;
4260}
4261
4262#ifdef CONFIG_COMPAT
4263static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr,
4264 compat_stack_t __user *uoss_ptr)
4265{
4266 stack_t uss, uoss;
4267 int ret;
4268
4269 if (uss_ptr) {
4270 compat_stack_t uss32;
4271 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
4272 return -EFAULT;
4273 uss.ss_sp = compat_ptr(uss32.ss_sp);
4274 uss.ss_flags = uss32.ss_flags;
4275 uss.ss_size = uss32.ss_size;
4276 }
4277 ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
4278 compat_user_stack_pointer(),
4279 COMPAT_MINSIGSTKSZ);
4280 if (ret >= 0 && uoss_ptr) {
4281 compat_stack_t old;
4282 memset(&old, 0, sizeof(old));
4283 old.ss_sp = ptr_to_compat(uoss.ss_sp);
4284 old.ss_flags = uoss.ss_flags;
4285 old.ss_size = uoss.ss_size;
4286 if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t)))
4287 ret = -EFAULT;
4288 }
4289 return ret;
4290}
4291
4292COMPAT_SYSCALL_DEFINE2(sigaltstack,
4293 const compat_stack_t __user *, uss_ptr,
4294 compat_stack_t __user *, uoss_ptr)
4295{
4296 return do_compat_sigaltstack(uss_ptr, uoss_ptr);
4297}
4298
4299int compat_restore_altstack(const compat_stack_t __user *uss)
4300{
4301 int err = do_compat_sigaltstack(uss, NULL);
4302 /* squash all but -EFAULT for now */
4303 return err == -EFAULT ? err : 0;
4304}
4305
4306int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
4307{
4308 int err;
4309 struct task_struct *t = current;
4310 err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
4311 &uss->ss_sp) |
4312 __put_user(t->sas_ss_flags, &uss->ss_flags) |
4313 __put_user(t->sas_ss_size, &uss->ss_size);
4314 return err;
4315}
4316#endif
4317
4318#ifdef __ARCH_WANT_SYS_SIGPENDING
4319
4320/**
4321 * sys_sigpending - examine pending signals
4322 * @uset: where mask of pending signal is returned
4323 */
4324SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset)
4325{
4326 sigset_t set;
4327
4328 if (sizeof(old_sigset_t) > sizeof(*uset))
4329 return -EINVAL;
4330
4331 do_sigpending(&set);
4332
4333 if (copy_to_user(uset, &set, sizeof(old_sigset_t)))
4334 return -EFAULT;
4335
4336 return 0;
4337}
4338
4339#ifdef CONFIG_COMPAT
4340COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
4341{
4342 sigset_t set;
4343
4344 do_sigpending(&set);
4345
4346 return put_user(set.sig[0], set32);
4347}
4348#endif
4349
4350#endif
4351
4352#ifdef __ARCH_WANT_SYS_SIGPROCMASK
4353/**
4354 * sys_sigprocmask - examine and change blocked signals
4355 * @how: whether to add, remove, or set signals
4356 * @nset: signals to add or remove (if non-null)
4357 * @oset: previous value of signal mask if non-null
4358 *
4359 * Some platforms have their own version with special arguments;
4360 * others support only sys_rt_sigprocmask.
4361 */
4362
4363SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
4364 old_sigset_t __user *, oset)
4365{
4366 old_sigset_t old_set, new_set;
4367 sigset_t new_blocked;
4368
4369 old_set = current->blocked.sig[0];
4370
4371 if (nset) {
4372 if (copy_from_user(&new_set, nset, sizeof(*nset)))
4373 return -EFAULT;
4374
4375 new_blocked = current->blocked;
4376
4377 switch (how) {
4378 case SIG_BLOCK:
4379 sigaddsetmask(&new_blocked, new_set);
4380 break;
4381 case SIG_UNBLOCK:
4382 sigdelsetmask(&new_blocked, new_set);
4383 break;
4384 case SIG_SETMASK:
4385 new_blocked.sig[0] = new_set;
4386 break;
4387 default:
4388 return -EINVAL;
4389 }
4390
4391 set_current_blocked(&new_blocked);
4392 }
4393
4394 if (oset) {
4395 if (copy_to_user(oset, &old_set, sizeof(*oset)))
4396 return -EFAULT;
4397 }
4398
4399 return 0;
4400}
4401#endif /* __ARCH_WANT_SYS_SIGPROCMASK */
4402
4403#ifndef CONFIG_ODD_RT_SIGACTION
4404/**
4405 * sys_rt_sigaction - alter an action taken by a process
4406 * @sig: signal to be sent
4407 * @act: new sigaction
4408 * @oact: used to save the previous sigaction
4409 * @sigsetsize: size of sigset_t type
4410 */
4411SYSCALL_DEFINE4(rt_sigaction, int, sig,
4412 const struct sigaction __user *, act,
4413 struct sigaction __user *, oact,
4414 size_t, sigsetsize)
4415{
4416 struct k_sigaction new_sa, old_sa;
4417 int ret;
4418
4419 /* XXX: Don't preclude handling different sized sigset_t's. */
4420 if (sigsetsize != sizeof(sigset_t))
4421 return -EINVAL;
4422
4423 if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
4424 return -EFAULT;
4425
4426 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
4427 if (ret)
4428 return ret;
4429
4430 if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
4431 return -EFAULT;
4432
4433 return 0;
4434}
4435#ifdef CONFIG_COMPAT
4436COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
4437 const struct compat_sigaction __user *, act,
4438 struct compat_sigaction __user *, oact,
4439 compat_size_t, sigsetsize)
4440{
4441 struct k_sigaction new_ka, old_ka;
4442#ifdef __ARCH_HAS_SA_RESTORER
4443 compat_uptr_t restorer;
4444#endif
4445 int ret;
4446
4447 /* XXX: Don't preclude handling different sized sigset_t's. */
4448 if (sigsetsize != sizeof(compat_sigset_t))
4449 return -EINVAL;
4450
4451 if (act) {
4452 compat_uptr_t handler;
4453 ret = get_user(handler, &act->sa_handler);
4454 new_ka.sa.sa_handler = compat_ptr(handler);
4455#ifdef __ARCH_HAS_SA_RESTORER
4456 ret |= get_user(restorer, &act->sa_restorer);
4457 new_ka.sa.sa_restorer = compat_ptr(restorer);
4458#endif
4459 ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask);
4460 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
4461 if (ret)
4462 return -EFAULT;
4463 }
4464
4465 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4466 if (!ret && oact) {
4467 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
4468 &oact->sa_handler);
4469 ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask,
4470 sizeof(oact->sa_mask));
4471 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
4472#ifdef __ARCH_HAS_SA_RESTORER
4473 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4474 &oact->sa_restorer);
4475#endif
4476 }
4477 return ret;
4478}
4479#endif
4480#endif /* !CONFIG_ODD_RT_SIGACTION */
4481
4482#ifdef CONFIG_OLD_SIGACTION
4483SYSCALL_DEFINE3(sigaction, int, sig,
4484 const struct old_sigaction __user *, act,
4485 struct old_sigaction __user *, oact)
4486{
4487 struct k_sigaction new_ka, old_ka;
4488 int ret;
4489
4490 if (act) {
4491 old_sigset_t mask;
4492 if (!access_ok(act, sizeof(*act)) ||
4493 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
4494 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
4495 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4496 __get_user(mask, &act->sa_mask))
4497 return -EFAULT;
4498#ifdef __ARCH_HAS_KA_RESTORER
4499 new_ka.ka_restorer = NULL;
4500#endif
4501 siginitset(&new_ka.sa.sa_mask, mask);
4502 }
4503
4504 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4505
4506 if (!ret && oact) {
4507 if (!access_ok(oact, sizeof(*oact)) ||
4508 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
4509 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
4510 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4511 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4512 return -EFAULT;
4513 }
4514
4515 return ret;
4516}
4517#endif
4518#ifdef CONFIG_COMPAT_OLD_SIGACTION
4519COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
4520 const struct compat_old_sigaction __user *, act,
4521 struct compat_old_sigaction __user *, oact)
4522{
4523 struct k_sigaction new_ka, old_ka;
4524 int ret;
4525 compat_old_sigset_t mask;
4526 compat_uptr_t handler, restorer;
4527
4528 if (act) {
4529 if (!access_ok(act, sizeof(*act)) ||
4530 __get_user(handler, &act->sa_handler) ||
4531 __get_user(restorer, &act->sa_restorer) ||
4532 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4533 __get_user(mask, &act->sa_mask))
4534 return -EFAULT;
4535
4536#ifdef __ARCH_HAS_KA_RESTORER
4537 new_ka.ka_restorer = NULL;
4538#endif
4539 new_ka.sa.sa_handler = compat_ptr(handler);
4540 new_ka.sa.sa_restorer = compat_ptr(restorer);
4541 siginitset(&new_ka.sa.sa_mask, mask);
4542 }
4543
4544 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4545
4546 if (!ret && oact) {
4547 if (!access_ok(oact, sizeof(*oact)) ||
4548 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
4549 &oact->sa_handler) ||
4550 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4551 &oact->sa_restorer) ||
4552 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4553 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4554 return -EFAULT;
4555 }
4556 return ret;
4557}
4558#endif
4559
4560#ifdef CONFIG_SGETMASK_SYSCALL
4561
4562/*
4563 * For backwards compatibility. Functionality superseded by sigprocmask.
4564 */
4565SYSCALL_DEFINE0(sgetmask)
4566{
4567 /* SMP safe */
4568 return current->blocked.sig[0];
4569}
4570
4571SYSCALL_DEFINE1(ssetmask, int, newmask)
4572{
4573 int old = current->blocked.sig[0];
4574 sigset_t newset;
4575
4576 siginitset(&newset, newmask);
4577 set_current_blocked(&newset);
4578
4579 return old;
4580}
4581#endif /* CONFIG_SGETMASK_SYSCALL */
4582
4583#ifdef __ARCH_WANT_SYS_SIGNAL
4584/*
4585 * For backwards compatibility. Functionality superseded by sigaction.
4586 */
4587SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
4588{
4589 struct k_sigaction new_sa, old_sa;
4590 int ret;
4591
4592 new_sa.sa.sa_handler = handler;
4593 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
4594 sigemptyset(&new_sa.sa.sa_mask);
4595
4596 ret = do_sigaction(sig, &new_sa, &old_sa);
4597
4598 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
4599}
4600#endif /* __ARCH_WANT_SYS_SIGNAL */
4601
4602#ifdef __ARCH_WANT_SYS_PAUSE
4603
4604SYSCALL_DEFINE0(pause)
4605{
4606 while (!signal_pending(current)) {
4607 __set_current_state(TASK_INTERRUPTIBLE);
4608 schedule();
4609 }
4610 return -ERESTARTNOHAND;
4611}
4612
4613#endif
4614
4615static int sigsuspend(sigset_t *set)
4616{
4617 current->saved_sigmask = current->blocked;
4618 set_current_blocked(set);
4619
4620 while (!signal_pending(current)) {
4621 __set_current_state(TASK_INTERRUPTIBLE);
4622 schedule();
4623 }
4624 set_restore_sigmask();
4625 return -ERESTARTNOHAND;
4626}
4627
4628/**
4629 * sys_rt_sigsuspend - replace the signal mask for a value with the
4630 * @unewset value until a signal is received
4631 * @unewset: new signal mask value
4632 * @sigsetsize: size of sigset_t type
4633 */
4634SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
4635{
4636 sigset_t newset;
4637
4638 /* XXX: Don't preclude handling different sized sigset_t's. */
4639 if (sigsetsize != sizeof(sigset_t))
4640 return -EINVAL;
4641
4642 if (copy_from_user(&newset, unewset, sizeof(newset)))
4643 return -EFAULT;
4644 return sigsuspend(&newset);
4645}
4646
4647#ifdef CONFIG_COMPAT
4648COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
4649{
4650 sigset_t newset;
4651
4652 /* XXX: Don't preclude handling different sized sigset_t's. */
4653 if (sigsetsize != sizeof(sigset_t))
4654 return -EINVAL;
4655
4656 if (get_compat_sigset(&newset, unewset))
4657 return -EFAULT;
4658 return sigsuspend(&newset);
4659}
4660#endif
4661
4662#ifdef CONFIG_OLD_SIGSUSPEND
4663SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
4664{
4665 sigset_t blocked;
4666 siginitset(&blocked, mask);
4667 return sigsuspend(&blocked);
4668}
4669#endif
4670#ifdef CONFIG_OLD_SIGSUSPEND3
4671SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
4672{
4673 sigset_t blocked;
4674 siginitset(&blocked, mask);
4675 return sigsuspend(&blocked);
4676}
4677#endif
4678
4679__weak const char *arch_vma_name(struct vm_area_struct *vma)
4680{
4681 return NULL;
4682}
4683
4684static inline void siginfo_buildtime_checks(void)
4685{
4686 BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
4687
4688 /* Verify the offsets in the two siginfos match */
4689#define CHECK_OFFSET(field) \
4690 BUILD_BUG_ON(offsetof(siginfo_t, field) != offsetof(kernel_siginfo_t, field))
4691
4692 /* kill */
4693 CHECK_OFFSET(si_pid);
4694 CHECK_OFFSET(si_uid);
4695
4696 /* timer */
4697 CHECK_OFFSET(si_tid);
4698 CHECK_OFFSET(si_overrun);
4699 CHECK_OFFSET(si_value);
4700
4701 /* rt */
4702 CHECK_OFFSET(si_pid);
4703 CHECK_OFFSET(si_uid);
4704 CHECK_OFFSET(si_value);
4705
4706 /* sigchld */
4707 CHECK_OFFSET(si_pid);
4708 CHECK_OFFSET(si_uid);
4709 CHECK_OFFSET(si_status);
4710 CHECK_OFFSET(si_utime);
4711 CHECK_OFFSET(si_stime);
4712
4713 /* sigfault */
4714 CHECK_OFFSET(si_addr);
4715 CHECK_OFFSET(si_trapno);
4716 CHECK_OFFSET(si_addr_lsb);
4717 CHECK_OFFSET(si_lower);
4718 CHECK_OFFSET(si_upper);
4719 CHECK_OFFSET(si_pkey);
4720 CHECK_OFFSET(si_perf_data);
4721 CHECK_OFFSET(si_perf_type);
4722 CHECK_OFFSET(si_perf_flags);
4723
4724 /* sigpoll */
4725 CHECK_OFFSET(si_band);
4726 CHECK_OFFSET(si_fd);
4727
4728 /* sigsys */
4729 CHECK_OFFSET(si_call_addr);
4730 CHECK_OFFSET(si_syscall);
4731 CHECK_OFFSET(si_arch);
4732#undef CHECK_OFFSET
4733
4734 /* usb asyncio */
4735 BUILD_BUG_ON(offsetof(struct siginfo, si_pid) !=
4736 offsetof(struct siginfo, si_addr));
4737 if (sizeof(int) == sizeof(void __user *)) {
4738 BUILD_BUG_ON(sizeof_field(struct siginfo, si_pid) !=
4739 sizeof(void __user *));
4740 } else {
4741 BUILD_BUG_ON((sizeof_field(struct siginfo, si_pid) +
4742 sizeof_field(struct siginfo, si_uid)) !=
4743 sizeof(void __user *));
4744 BUILD_BUG_ON(offsetofend(struct siginfo, si_pid) !=
4745 offsetof(struct siginfo, si_uid));
4746 }
4747#ifdef CONFIG_COMPAT
4748 BUILD_BUG_ON(offsetof(struct compat_siginfo, si_pid) !=
4749 offsetof(struct compat_siginfo, si_addr));
4750 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4751 sizeof(compat_uptr_t));
4752 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4753 sizeof_field(struct siginfo, si_pid));
4754#endif
4755}
4756
4757void __init signals_init(void)
4758{
4759 siginfo_buildtime_checks();
4760
4761 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC | SLAB_ACCOUNT);
4762}
4763
4764#ifdef CONFIG_KGDB_KDB
4765#include <linux/kdb.h>
4766/*
4767 * kdb_send_sig - Allows kdb to send signals without exposing
4768 * signal internals. This function checks if the required locks are
4769 * available before calling the main signal code, to avoid kdb
4770 * deadlocks.
4771 */
4772void kdb_send_sig(struct task_struct *t, int sig)
4773{
4774 static struct task_struct *kdb_prev_t;
4775 int new_t, ret;
4776 if (!spin_trylock(&t->sighand->siglock)) {
4777 kdb_printf("Can't do kill command now.\n"
4778 "The sigmask lock is held somewhere else in "
4779 "kernel, try again later\n");
4780 return;
4781 }
4782 new_t = kdb_prev_t != t;
4783 kdb_prev_t = t;
4784 if (!task_is_running(t) && new_t) {
4785 spin_unlock(&t->sighand->siglock);
4786 kdb_printf("Process is not RUNNING, sending a signal from "
4787 "kdb risks deadlock\n"
4788 "on the run queue locks. "
4789 "The signal has _not_ been sent.\n"
4790 "Reissue the kill command if you want to risk "
4791 "the deadlock.\n");
4792 return;
4793 }
4794 ret = send_signal_locked(sig, SEND_SIG_PRIV, t, PIDTYPE_PID);
4795 spin_unlock(&t->sighand->siglock);
4796 if (ret)
4797 kdb_printf("Fail to deliver Signal %d to process %d.\n",
4798 sig, t->pid);
4799 else
4800 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
4801}
4802#endif /* CONFIG_KGDB_KDB */
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/kernel/signal.c
4 *
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 *
7 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 *
9 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
10 * Changes to use preallocated sigqueue structures
11 * to allow signals to be sent reliably.
12 */
13
14#include <linux/slab.h>
15#include <linux/export.h>
16#include <linux/init.h>
17#include <linux/sched/mm.h>
18#include <linux/sched/user.h>
19#include <linux/sched/debug.h>
20#include <linux/sched/task.h>
21#include <linux/sched/task_stack.h>
22#include <linux/sched/cputime.h>
23#include <linux/file.h>
24#include <linux/fs.h>
25#include <linux/mm.h>
26#include <linux/proc_fs.h>
27#include <linux/tty.h>
28#include <linux/binfmts.h>
29#include <linux/coredump.h>
30#include <linux/security.h>
31#include <linux/syscalls.h>
32#include <linux/ptrace.h>
33#include <linux/signal.h>
34#include <linux/signalfd.h>
35#include <linux/ratelimit.h>
36#include <linux/task_work.h>
37#include <linux/capability.h>
38#include <linux/freezer.h>
39#include <linux/pid_namespace.h>
40#include <linux/nsproxy.h>
41#include <linux/user_namespace.h>
42#include <linux/uprobes.h>
43#include <linux/compat.h>
44#include <linux/cn_proc.h>
45#include <linux/compiler.h>
46#include <linux/posix-timers.h>
47#include <linux/cgroup.h>
48#include <linux/audit.h>
49#include <linux/sysctl.h>
50
51#define CREATE_TRACE_POINTS
52#include <trace/events/signal.h>
53
54#include <asm/param.h>
55#include <linux/uaccess.h>
56#include <asm/unistd.h>
57#include <asm/siginfo.h>
58#include <asm/cacheflush.h>
59#include <asm/syscall.h> /* for syscall_get_* */
60
61/*
62 * SLAB caches for signal bits.
63 */
64
65static struct kmem_cache *sigqueue_cachep;
66
67int print_fatal_signals __read_mostly;
68
69static void __user *sig_handler(struct task_struct *t, int sig)
70{
71 return t->sighand->action[sig - 1].sa.sa_handler;
72}
73
74static inline bool sig_handler_ignored(void __user *handler, int sig)
75{
76 /* Is it explicitly or implicitly ignored? */
77 return handler == SIG_IGN ||
78 (handler == SIG_DFL && sig_kernel_ignore(sig));
79}
80
81static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
82{
83 void __user *handler;
84
85 handler = sig_handler(t, sig);
86
87 /* SIGKILL and SIGSTOP may not be sent to the global init */
88 if (unlikely(is_global_init(t) && sig_kernel_only(sig)))
89 return true;
90
91 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
92 handler == SIG_DFL && !(force && sig_kernel_only(sig)))
93 return true;
94
95 /* Only allow kernel generated signals to this kthread */
96 if (unlikely((t->flags & PF_KTHREAD) &&
97 (handler == SIG_KTHREAD_KERNEL) && !force))
98 return true;
99
100 return sig_handler_ignored(handler, sig);
101}
102
103static bool sig_ignored(struct task_struct *t, int sig, bool force)
104{
105 /*
106 * Blocked signals are never ignored, since the
107 * signal handler may change by the time it is
108 * unblocked.
109 */
110 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
111 return false;
112
113 /*
114 * Tracers may want to know about even ignored signal unless it
115 * is SIGKILL which can't be reported anyway but can be ignored
116 * by SIGNAL_UNKILLABLE task.
117 */
118 if (t->ptrace && sig != SIGKILL)
119 return false;
120
121 return sig_task_ignored(t, sig, force);
122}
123
124/*
125 * Re-calculate pending state from the set of locally pending
126 * signals, globally pending signals, and blocked signals.
127 */
128static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked)
129{
130 unsigned long ready;
131 long i;
132
133 switch (_NSIG_WORDS) {
134 default:
135 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
136 ready |= signal->sig[i] &~ blocked->sig[i];
137 break;
138
139 case 4: ready = signal->sig[3] &~ blocked->sig[3];
140 ready |= signal->sig[2] &~ blocked->sig[2];
141 ready |= signal->sig[1] &~ blocked->sig[1];
142 ready |= signal->sig[0] &~ blocked->sig[0];
143 break;
144
145 case 2: ready = signal->sig[1] &~ blocked->sig[1];
146 ready |= signal->sig[0] &~ blocked->sig[0];
147 break;
148
149 case 1: ready = signal->sig[0] &~ blocked->sig[0];
150 }
151 return ready != 0;
152}
153
154#define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
155
156static bool recalc_sigpending_tsk(struct task_struct *t)
157{
158 if ((t->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) ||
159 PENDING(&t->pending, &t->blocked) ||
160 PENDING(&t->signal->shared_pending, &t->blocked) ||
161 cgroup_task_frozen(t)) {
162 set_tsk_thread_flag(t, TIF_SIGPENDING);
163 return true;
164 }
165
166 /*
167 * We must never clear the flag in another thread, or in current
168 * when it's possible the current syscall is returning -ERESTART*.
169 * So we don't clear it here, and only callers who know they should do.
170 */
171 return false;
172}
173
174void recalc_sigpending(void)
175{
176 if (!recalc_sigpending_tsk(current) && !freezing(current))
177 clear_thread_flag(TIF_SIGPENDING);
178
179}
180EXPORT_SYMBOL(recalc_sigpending);
181
182void calculate_sigpending(void)
183{
184 /* Have any signals or users of TIF_SIGPENDING been delayed
185 * until after fork?
186 */
187 spin_lock_irq(¤t->sighand->siglock);
188 set_tsk_thread_flag(current, TIF_SIGPENDING);
189 recalc_sigpending();
190 spin_unlock_irq(¤t->sighand->siglock);
191}
192
193/* Given the mask, find the first available signal that should be serviced. */
194
195#define SYNCHRONOUS_MASK \
196 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
197 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
198
199int next_signal(struct sigpending *pending, sigset_t *mask)
200{
201 unsigned long i, *s, *m, x;
202 int sig = 0;
203
204 s = pending->signal.sig;
205 m = mask->sig;
206
207 /*
208 * Handle the first word specially: it contains the
209 * synchronous signals that need to be dequeued first.
210 */
211 x = *s &~ *m;
212 if (x) {
213 if (x & SYNCHRONOUS_MASK)
214 x &= SYNCHRONOUS_MASK;
215 sig = ffz(~x) + 1;
216 return sig;
217 }
218
219 switch (_NSIG_WORDS) {
220 default:
221 for (i = 1; i < _NSIG_WORDS; ++i) {
222 x = *++s &~ *++m;
223 if (!x)
224 continue;
225 sig = ffz(~x) + i*_NSIG_BPW + 1;
226 break;
227 }
228 break;
229
230 case 2:
231 x = s[1] &~ m[1];
232 if (!x)
233 break;
234 sig = ffz(~x) + _NSIG_BPW + 1;
235 break;
236
237 case 1:
238 /* Nothing to do */
239 break;
240 }
241
242 return sig;
243}
244
245static inline void print_dropped_signal(int sig)
246{
247 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
248
249 if (!print_fatal_signals)
250 return;
251
252 if (!__ratelimit(&ratelimit_state))
253 return;
254
255 pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
256 current->comm, current->pid, sig);
257}
258
259/**
260 * task_set_jobctl_pending - set jobctl pending bits
261 * @task: target task
262 * @mask: pending bits to set
263 *
264 * Clear @mask from @task->jobctl. @mask must be subset of
265 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
266 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
267 * cleared. If @task is already being killed or exiting, this function
268 * becomes noop.
269 *
270 * CONTEXT:
271 * Must be called with @task->sighand->siglock held.
272 *
273 * RETURNS:
274 * %true if @mask is set, %false if made noop because @task was dying.
275 */
276bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
277{
278 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
279 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
280 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
281
282 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
283 return false;
284
285 if (mask & JOBCTL_STOP_SIGMASK)
286 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
287
288 task->jobctl |= mask;
289 return true;
290}
291
292/**
293 * task_clear_jobctl_trapping - clear jobctl trapping bit
294 * @task: target task
295 *
296 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
297 * Clear it and wake up the ptracer. Note that we don't need any further
298 * locking. @task->siglock guarantees that @task->parent points to the
299 * ptracer.
300 *
301 * CONTEXT:
302 * Must be called with @task->sighand->siglock held.
303 */
304void task_clear_jobctl_trapping(struct task_struct *task)
305{
306 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
307 task->jobctl &= ~JOBCTL_TRAPPING;
308 smp_mb(); /* advised by wake_up_bit() */
309 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
310 }
311}
312
313/**
314 * task_clear_jobctl_pending - clear jobctl pending bits
315 * @task: target task
316 * @mask: pending bits to clear
317 *
318 * Clear @mask from @task->jobctl. @mask must be subset of
319 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
320 * STOP bits are cleared together.
321 *
322 * If clearing of @mask leaves no stop or trap pending, this function calls
323 * task_clear_jobctl_trapping().
324 *
325 * CONTEXT:
326 * Must be called with @task->sighand->siglock held.
327 */
328void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
329{
330 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
331
332 if (mask & JOBCTL_STOP_PENDING)
333 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
334
335 task->jobctl &= ~mask;
336
337 if (!(task->jobctl & JOBCTL_PENDING_MASK))
338 task_clear_jobctl_trapping(task);
339}
340
341/**
342 * task_participate_group_stop - participate in a group stop
343 * @task: task participating in a group stop
344 *
345 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
346 * Group stop states are cleared and the group stop count is consumed if
347 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
348 * stop, the appropriate `SIGNAL_*` flags are set.
349 *
350 * CONTEXT:
351 * Must be called with @task->sighand->siglock held.
352 *
353 * RETURNS:
354 * %true if group stop completion should be notified to the parent, %false
355 * otherwise.
356 */
357static bool task_participate_group_stop(struct task_struct *task)
358{
359 struct signal_struct *sig = task->signal;
360 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
361
362 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
363
364 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
365
366 if (!consume)
367 return false;
368
369 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
370 sig->group_stop_count--;
371
372 /*
373 * Tell the caller to notify completion iff we are entering into a
374 * fresh group stop. Read comment in do_signal_stop() for details.
375 */
376 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
377 signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
378 return true;
379 }
380 return false;
381}
382
383void task_join_group_stop(struct task_struct *task)
384{
385 unsigned long mask = current->jobctl & JOBCTL_STOP_SIGMASK;
386 struct signal_struct *sig = current->signal;
387
388 if (sig->group_stop_count) {
389 sig->group_stop_count++;
390 mask |= JOBCTL_STOP_CONSUME;
391 } else if (!(sig->flags & SIGNAL_STOP_STOPPED))
392 return;
393
394 /* Have the new thread join an on-going signal group stop */
395 task_set_jobctl_pending(task, mask | JOBCTL_STOP_PENDING);
396}
397
398/*
399 * allocate a new signal queue record
400 * - this may be called without locks if and only if t == current, otherwise an
401 * appropriate lock must be held to stop the target task from exiting
402 */
403static struct sigqueue *
404__sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags,
405 int override_rlimit, const unsigned int sigqueue_flags)
406{
407 struct sigqueue *q = NULL;
408 struct ucounts *ucounts;
409 long sigpending;
410
411 /*
412 * Protect access to @t credentials. This can go away when all
413 * callers hold rcu read lock.
414 *
415 * NOTE! A pending signal will hold on to the user refcount,
416 * and we get/put the refcount only when the sigpending count
417 * changes from/to zero.
418 */
419 rcu_read_lock();
420 ucounts = task_ucounts(t);
421 sigpending = inc_rlimit_get_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING);
422 rcu_read_unlock();
423 if (!sigpending)
424 return NULL;
425
426 if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {
427 q = kmem_cache_alloc(sigqueue_cachep, gfp_flags);
428 } else {
429 print_dropped_signal(sig);
430 }
431
432 if (unlikely(q == NULL)) {
433 dec_rlimit_put_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING);
434 } else {
435 INIT_LIST_HEAD(&q->list);
436 q->flags = sigqueue_flags;
437 q->ucounts = ucounts;
438 }
439 return q;
440}
441
442static void __sigqueue_free(struct sigqueue *q)
443{
444 if (q->flags & SIGQUEUE_PREALLOC)
445 return;
446 if (q->ucounts) {
447 dec_rlimit_put_ucounts(q->ucounts, UCOUNT_RLIMIT_SIGPENDING);
448 q->ucounts = NULL;
449 }
450 kmem_cache_free(sigqueue_cachep, q);
451}
452
453void flush_sigqueue(struct sigpending *queue)
454{
455 struct sigqueue *q;
456
457 sigemptyset(&queue->signal);
458 while (!list_empty(&queue->list)) {
459 q = list_entry(queue->list.next, struct sigqueue , list);
460 list_del_init(&q->list);
461 __sigqueue_free(q);
462 }
463}
464
465/*
466 * Flush all pending signals for this kthread.
467 */
468void flush_signals(struct task_struct *t)
469{
470 unsigned long flags;
471
472 spin_lock_irqsave(&t->sighand->siglock, flags);
473 clear_tsk_thread_flag(t, TIF_SIGPENDING);
474 flush_sigqueue(&t->pending);
475 flush_sigqueue(&t->signal->shared_pending);
476 spin_unlock_irqrestore(&t->sighand->siglock, flags);
477}
478EXPORT_SYMBOL(flush_signals);
479
480#ifdef CONFIG_POSIX_TIMERS
481static void __flush_itimer_signals(struct sigpending *pending)
482{
483 sigset_t signal, retain;
484 struct sigqueue *q, *n;
485
486 signal = pending->signal;
487 sigemptyset(&retain);
488
489 list_for_each_entry_safe(q, n, &pending->list, list) {
490 int sig = q->info.si_signo;
491
492 if (likely(q->info.si_code != SI_TIMER)) {
493 sigaddset(&retain, sig);
494 } else {
495 sigdelset(&signal, sig);
496 list_del_init(&q->list);
497 __sigqueue_free(q);
498 }
499 }
500
501 sigorsets(&pending->signal, &signal, &retain);
502}
503
504void flush_itimer_signals(void)
505{
506 struct task_struct *tsk = current;
507 unsigned long flags;
508
509 spin_lock_irqsave(&tsk->sighand->siglock, flags);
510 __flush_itimer_signals(&tsk->pending);
511 __flush_itimer_signals(&tsk->signal->shared_pending);
512 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
513}
514#endif
515
516void ignore_signals(struct task_struct *t)
517{
518 int i;
519
520 for (i = 0; i < _NSIG; ++i)
521 t->sighand->action[i].sa.sa_handler = SIG_IGN;
522
523 flush_signals(t);
524}
525
526/*
527 * Flush all handlers for a task.
528 */
529
530void
531flush_signal_handlers(struct task_struct *t, int force_default)
532{
533 int i;
534 struct k_sigaction *ka = &t->sighand->action[0];
535 for (i = _NSIG ; i != 0 ; i--) {
536 if (force_default || ka->sa.sa_handler != SIG_IGN)
537 ka->sa.sa_handler = SIG_DFL;
538 ka->sa.sa_flags = 0;
539#ifdef __ARCH_HAS_SA_RESTORER
540 ka->sa.sa_restorer = NULL;
541#endif
542 sigemptyset(&ka->sa.sa_mask);
543 ka++;
544 }
545}
546
547bool unhandled_signal(struct task_struct *tsk, int sig)
548{
549 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
550 if (is_global_init(tsk))
551 return true;
552
553 if (handler != SIG_IGN && handler != SIG_DFL)
554 return false;
555
556 /* If dying, we handle all new signals by ignoring them */
557 if (fatal_signal_pending(tsk))
558 return false;
559
560 /* if ptraced, let the tracer determine */
561 return !tsk->ptrace;
562}
563
564static void collect_signal(int sig, struct sigpending *list, kernel_siginfo_t *info,
565 bool *resched_timer)
566{
567 struct sigqueue *q, *first = NULL;
568
569 /*
570 * Collect the siginfo appropriate to this signal. Check if
571 * there is another siginfo for the same signal.
572 */
573 list_for_each_entry(q, &list->list, list) {
574 if (q->info.si_signo == sig) {
575 if (first)
576 goto still_pending;
577 first = q;
578 }
579 }
580
581 sigdelset(&list->signal, sig);
582
583 if (first) {
584still_pending:
585 list_del_init(&first->list);
586 copy_siginfo(info, &first->info);
587
588 *resched_timer =
589 (first->flags & SIGQUEUE_PREALLOC) &&
590 (info->si_code == SI_TIMER) &&
591 (info->si_sys_private);
592
593 __sigqueue_free(first);
594 } else {
595 /*
596 * Ok, it wasn't in the queue. This must be
597 * a fast-pathed signal or we must have been
598 * out of queue space. So zero out the info.
599 */
600 clear_siginfo(info);
601 info->si_signo = sig;
602 info->si_errno = 0;
603 info->si_code = SI_USER;
604 info->si_pid = 0;
605 info->si_uid = 0;
606 }
607}
608
609static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
610 kernel_siginfo_t *info, bool *resched_timer)
611{
612 int sig = next_signal(pending, mask);
613
614 if (sig)
615 collect_signal(sig, pending, info, resched_timer);
616 return sig;
617}
618
619/*
620 * Dequeue a signal and return the element to the caller, which is
621 * expected to free it.
622 *
623 * All callers have to hold the siglock.
624 */
625int dequeue_signal(struct task_struct *tsk, sigset_t *mask,
626 kernel_siginfo_t *info, enum pid_type *type)
627{
628 bool resched_timer = false;
629 int signr;
630
631 /* We only dequeue private signals from ourselves, we don't let
632 * signalfd steal them
633 */
634 *type = PIDTYPE_PID;
635 signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
636 if (!signr) {
637 *type = PIDTYPE_TGID;
638 signr = __dequeue_signal(&tsk->signal->shared_pending,
639 mask, info, &resched_timer);
640#ifdef CONFIG_POSIX_TIMERS
641 /*
642 * itimer signal ?
643 *
644 * itimers are process shared and we restart periodic
645 * itimers in the signal delivery path to prevent DoS
646 * attacks in the high resolution timer case. This is
647 * compliant with the old way of self-restarting
648 * itimers, as the SIGALRM is a legacy signal and only
649 * queued once. Changing the restart behaviour to
650 * restart the timer in the signal dequeue path is
651 * reducing the timer noise on heavy loaded !highres
652 * systems too.
653 */
654 if (unlikely(signr == SIGALRM)) {
655 struct hrtimer *tmr = &tsk->signal->real_timer;
656
657 if (!hrtimer_is_queued(tmr) &&
658 tsk->signal->it_real_incr != 0) {
659 hrtimer_forward(tmr, tmr->base->get_time(),
660 tsk->signal->it_real_incr);
661 hrtimer_restart(tmr);
662 }
663 }
664#endif
665 }
666
667 recalc_sigpending();
668 if (!signr)
669 return 0;
670
671 if (unlikely(sig_kernel_stop(signr))) {
672 /*
673 * Set a marker that we have dequeued a stop signal. Our
674 * caller might release the siglock and then the pending
675 * stop signal it is about to process is no longer in the
676 * pending bitmasks, but must still be cleared by a SIGCONT
677 * (and overruled by a SIGKILL). So those cases clear this
678 * shared flag after we've set it. Note that this flag may
679 * remain set after the signal we return is ignored or
680 * handled. That doesn't matter because its only purpose
681 * is to alert stop-signal processing code when another
682 * processor has come along and cleared the flag.
683 */
684 current->jobctl |= JOBCTL_STOP_DEQUEUED;
685 }
686#ifdef CONFIG_POSIX_TIMERS
687 if (resched_timer) {
688 /*
689 * Release the siglock to ensure proper locking order
690 * of timer locks outside of siglocks. Note, we leave
691 * irqs disabled here, since the posix-timers code is
692 * about to disable them again anyway.
693 */
694 spin_unlock(&tsk->sighand->siglock);
695 posixtimer_rearm(info);
696 spin_lock(&tsk->sighand->siglock);
697
698 /* Don't expose the si_sys_private value to userspace */
699 info->si_sys_private = 0;
700 }
701#endif
702 return signr;
703}
704EXPORT_SYMBOL_GPL(dequeue_signal);
705
706static int dequeue_synchronous_signal(kernel_siginfo_t *info)
707{
708 struct task_struct *tsk = current;
709 struct sigpending *pending = &tsk->pending;
710 struct sigqueue *q, *sync = NULL;
711
712 /*
713 * Might a synchronous signal be in the queue?
714 */
715 if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
716 return 0;
717
718 /*
719 * Return the first synchronous signal in the queue.
720 */
721 list_for_each_entry(q, &pending->list, list) {
722 /* Synchronous signals have a positive si_code */
723 if ((q->info.si_code > SI_USER) &&
724 (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
725 sync = q;
726 goto next;
727 }
728 }
729 return 0;
730next:
731 /*
732 * Check if there is another siginfo for the same signal.
733 */
734 list_for_each_entry_continue(q, &pending->list, list) {
735 if (q->info.si_signo == sync->info.si_signo)
736 goto still_pending;
737 }
738
739 sigdelset(&pending->signal, sync->info.si_signo);
740 recalc_sigpending();
741still_pending:
742 list_del_init(&sync->list);
743 copy_siginfo(info, &sync->info);
744 __sigqueue_free(sync);
745 return info->si_signo;
746}
747
748/*
749 * Tell a process that it has a new active signal..
750 *
751 * NOTE! we rely on the previous spin_lock to
752 * lock interrupts for us! We can only be called with
753 * "siglock" held, and the local interrupt must
754 * have been disabled when that got acquired!
755 *
756 * No need to set need_resched since signal event passing
757 * goes through ->blocked
758 */
759void signal_wake_up_state(struct task_struct *t, unsigned int state)
760{
761 lockdep_assert_held(&t->sighand->siglock);
762
763 set_tsk_thread_flag(t, TIF_SIGPENDING);
764
765 /*
766 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
767 * case. We don't check t->state here because there is a race with it
768 * executing another processor and just now entering stopped state.
769 * By using wake_up_state, we ensure the process will wake up and
770 * handle its death signal.
771 */
772 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
773 kick_process(t);
774}
775
776/*
777 * Remove signals in mask from the pending set and queue.
778 * Returns 1 if any signals were found.
779 *
780 * All callers must be holding the siglock.
781 */
782static void flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
783{
784 struct sigqueue *q, *n;
785 sigset_t m;
786
787 sigandsets(&m, mask, &s->signal);
788 if (sigisemptyset(&m))
789 return;
790
791 sigandnsets(&s->signal, &s->signal, mask);
792 list_for_each_entry_safe(q, n, &s->list, list) {
793 if (sigismember(mask, q->info.si_signo)) {
794 list_del_init(&q->list);
795 __sigqueue_free(q);
796 }
797 }
798}
799
800static inline int is_si_special(const struct kernel_siginfo *info)
801{
802 return info <= SEND_SIG_PRIV;
803}
804
805static inline bool si_fromuser(const struct kernel_siginfo *info)
806{
807 return info == SEND_SIG_NOINFO ||
808 (!is_si_special(info) && SI_FROMUSER(info));
809}
810
811/*
812 * called with RCU read lock from check_kill_permission()
813 */
814static bool kill_ok_by_cred(struct task_struct *t)
815{
816 const struct cred *cred = current_cred();
817 const struct cred *tcred = __task_cred(t);
818
819 return uid_eq(cred->euid, tcred->suid) ||
820 uid_eq(cred->euid, tcred->uid) ||
821 uid_eq(cred->uid, tcred->suid) ||
822 uid_eq(cred->uid, tcred->uid) ||
823 ns_capable(tcred->user_ns, CAP_KILL);
824}
825
826/*
827 * Bad permissions for sending the signal
828 * - the caller must hold the RCU read lock
829 */
830static int check_kill_permission(int sig, struct kernel_siginfo *info,
831 struct task_struct *t)
832{
833 struct pid *sid;
834 int error;
835
836 if (!valid_signal(sig))
837 return -EINVAL;
838
839 if (!si_fromuser(info))
840 return 0;
841
842 error = audit_signal_info(sig, t); /* Let audit system see the signal */
843 if (error)
844 return error;
845
846 if (!same_thread_group(current, t) &&
847 !kill_ok_by_cred(t)) {
848 switch (sig) {
849 case SIGCONT:
850 sid = task_session(t);
851 /*
852 * We don't return the error if sid == NULL. The
853 * task was unhashed, the caller must notice this.
854 */
855 if (!sid || sid == task_session(current))
856 break;
857 fallthrough;
858 default:
859 return -EPERM;
860 }
861 }
862
863 return security_task_kill(t, info, sig, NULL);
864}
865
866/**
867 * ptrace_trap_notify - schedule trap to notify ptracer
868 * @t: tracee wanting to notify tracer
869 *
870 * This function schedules sticky ptrace trap which is cleared on the next
871 * TRAP_STOP to notify ptracer of an event. @t must have been seized by
872 * ptracer.
873 *
874 * If @t is running, STOP trap will be taken. If trapped for STOP and
875 * ptracer is listening for events, tracee is woken up so that it can
876 * re-trap for the new event. If trapped otherwise, STOP trap will be
877 * eventually taken without returning to userland after the existing traps
878 * are finished by PTRACE_CONT.
879 *
880 * CONTEXT:
881 * Must be called with @task->sighand->siglock held.
882 */
883static void ptrace_trap_notify(struct task_struct *t)
884{
885 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
886 lockdep_assert_held(&t->sighand->siglock);
887
888 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
889 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
890}
891
892/*
893 * Handle magic process-wide effects of stop/continue signals. Unlike
894 * the signal actions, these happen immediately at signal-generation
895 * time regardless of blocking, ignoring, or handling. This does the
896 * actual continuing for SIGCONT, but not the actual stopping for stop
897 * signals. The process stop is done as a signal action for SIG_DFL.
898 *
899 * Returns true if the signal should be actually delivered, otherwise
900 * it should be dropped.
901 */
902static bool prepare_signal(int sig, struct task_struct *p, bool force)
903{
904 struct signal_struct *signal = p->signal;
905 struct task_struct *t;
906 sigset_t flush;
907
908 if (signal->flags & SIGNAL_GROUP_EXIT) {
909 if (signal->core_state)
910 return sig == SIGKILL;
911 /*
912 * The process is in the middle of dying, drop the signal.
913 */
914 return false;
915 } else if (sig_kernel_stop(sig)) {
916 /*
917 * This is a stop signal. Remove SIGCONT from all queues.
918 */
919 siginitset(&flush, sigmask(SIGCONT));
920 flush_sigqueue_mask(&flush, &signal->shared_pending);
921 for_each_thread(p, t)
922 flush_sigqueue_mask(&flush, &t->pending);
923 } else if (sig == SIGCONT) {
924 unsigned int why;
925 /*
926 * Remove all stop signals from all queues, wake all threads.
927 */
928 siginitset(&flush, SIG_KERNEL_STOP_MASK);
929 flush_sigqueue_mask(&flush, &signal->shared_pending);
930 for_each_thread(p, t) {
931 flush_sigqueue_mask(&flush, &t->pending);
932 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
933 if (likely(!(t->ptrace & PT_SEIZED))) {
934 t->jobctl &= ~JOBCTL_STOPPED;
935 wake_up_state(t, __TASK_STOPPED);
936 } else
937 ptrace_trap_notify(t);
938 }
939
940 /*
941 * Notify the parent with CLD_CONTINUED if we were stopped.
942 *
943 * If we were in the middle of a group stop, we pretend it
944 * was already finished, and then continued. Since SIGCHLD
945 * doesn't queue we report only CLD_STOPPED, as if the next
946 * CLD_CONTINUED was dropped.
947 */
948 why = 0;
949 if (signal->flags & SIGNAL_STOP_STOPPED)
950 why |= SIGNAL_CLD_CONTINUED;
951 else if (signal->group_stop_count)
952 why |= SIGNAL_CLD_STOPPED;
953
954 if (why) {
955 /*
956 * The first thread which returns from do_signal_stop()
957 * will take ->siglock, notice SIGNAL_CLD_MASK, and
958 * notify its parent. See get_signal().
959 */
960 signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
961 signal->group_stop_count = 0;
962 signal->group_exit_code = 0;
963 }
964 }
965
966 return !sig_ignored(p, sig, force);
967}
968
969/*
970 * Test if P wants to take SIG. After we've checked all threads with this,
971 * it's equivalent to finding no threads not blocking SIG. Any threads not
972 * blocking SIG were ruled out because they are not running and already
973 * have pending signals. Such threads will dequeue from the shared queue
974 * as soon as they're available, so putting the signal on the shared queue
975 * will be equivalent to sending it to one such thread.
976 */
977static inline bool wants_signal(int sig, struct task_struct *p)
978{
979 if (sigismember(&p->blocked, sig))
980 return false;
981
982 if (p->flags & PF_EXITING)
983 return false;
984
985 if (sig == SIGKILL)
986 return true;
987
988 if (task_is_stopped_or_traced(p))
989 return false;
990
991 return task_curr(p) || !task_sigpending(p);
992}
993
994static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
995{
996 struct signal_struct *signal = p->signal;
997 struct task_struct *t;
998
999 /*
1000 * Now find a thread we can wake up to take the signal off the queue.
1001 *
1002 * Try the suggested task first (may or may not be the main thread).
1003 */
1004 if (wants_signal(sig, p))
1005 t = p;
1006 else if ((type == PIDTYPE_PID) || thread_group_empty(p))
1007 /*
1008 * There is just one thread and it does not need to be woken.
1009 * It will dequeue unblocked signals before it runs again.
1010 */
1011 return;
1012 else {
1013 /*
1014 * Otherwise try to find a suitable thread.
1015 */
1016 t = signal->curr_target;
1017 while (!wants_signal(sig, t)) {
1018 t = next_thread(t);
1019 if (t == signal->curr_target)
1020 /*
1021 * No thread needs to be woken.
1022 * Any eligible threads will see
1023 * the signal in the queue soon.
1024 */
1025 return;
1026 }
1027 signal->curr_target = t;
1028 }
1029
1030 /*
1031 * Found a killable thread. If the signal will be fatal,
1032 * then start taking the whole group down immediately.
1033 */
1034 if (sig_fatal(p, sig) &&
1035 (signal->core_state || !(signal->flags & SIGNAL_GROUP_EXIT)) &&
1036 !sigismember(&t->real_blocked, sig) &&
1037 (sig == SIGKILL || !p->ptrace)) {
1038 /*
1039 * This signal will be fatal to the whole group.
1040 */
1041 if (!sig_kernel_coredump(sig)) {
1042 /*
1043 * Start a group exit and wake everybody up.
1044 * This way we don't have other threads
1045 * running and doing things after a slower
1046 * thread has the fatal signal pending.
1047 */
1048 signal->flags = SIGNAL_GROUP_EXIT;
1049 signal->group_exit_code = sig;
1050 signal->group_stop_count = 0;
1051 __for_each_thread(signal, t) {
1052 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1053 sigaddset(&t->pending.signal, SIGKILL);
1054 signal_wake_up(t, 1);
1055 }
1056 return;
1057 }
1058 }
1059
1060 /*
1061 * The signal is already in the shared-pending queue.
1062 * Tell the chosen thread to wake up and dequeue it.
1063 */
1064 signal_wake_up(t, sig == SIGKILL);
1065 return;
1066}
1067
1068static inline bool legacy_queue(struct sigpending *signals, int sig)
1069{
1070 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1071}
1072
1073static int __send_signal_locked(int sig, struct kernel_siginfo *info,
1074 struct task_struct *t, enum pid_type type, bool force)
1075{
1076 struct sigpending *pending;
1077 struct sigqueue *q;
1078 int override_rlimit;
1079 int ret = 0, result;
1080
1081 lockdep_assert_held(&t->sighand->siglock);
1082
1083 result = TRACE_SIGNAL_IGNORED;
1084 if (!prepare_signal(sig, t, force))
1085 goto ret;
1086
1087 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1088 /*
1089 * Short-circuit ignored signals and support queuing
1090 * exactly one non-rt signal, so that we can get more
1091 * detailed information about the cause of the signal.
1092 */
1093 result = TRACE_SIGNAL_ALREADY_PENDING;
1094 if (legacy_queue(pending, sig))
1095 goto ret;
1096
1097 result = TRACE_SIGNAL_DELIVERED;
1098 /*
1099 * Skip useless siginfo allocation for SIGKILL and kernel threads.
1100 */
1101 if ((sig == SIGKILL) || (t->flags & PF_KTHREAD))
1102 goto out_set;
1103
1104 /*
1105 * Real-time signals must be queued if sent by sigqueue, or
1106 * some other real-time mechanism. It is implementation
1107 * defined whether kill() does so. We attempt to do so, on
1108 * the principle of least surprise, but since kill is not
1109 * allowed to fail with EAGAIN when low on memory we just
1110 * make sure at least one signal gets delivered and don't
1111 * pass on the info struct.
1112 */
1113 if (sig < SIGRTMIN)
1114 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1115 else
1116 override_rlimit = 0;
1117
1118 q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit, 0);
1119
1120 if (q) {
1121 list_add_tail(&q->list, &pending->list);
1122 switch ((unsigned long) info) {
1123 case (unsigned long) SEND_SIG_NOINFO:
1124 clear_siginfo(&q->info);
1125 q->info.si_signo = sig;
1126 q->info.si_errno = 0;
1127 q->info.si_code = SI_USER;
1128 q->info.si_pid = task_tgid_nr_ns(current,
1129 task_active_pid_ns(t));
1130 rcu_read_lock();
1131 q->info.si_uid =
1132 from_kuid_munged(task_cred_xxx(t, user_ns),
1133 current_uid());
1134 rcu_read_unlock();
1135 break;
1136 case (unsigned long) SEND_SIG_PRIV:
1137 clear_siginfo(&q->info);
1138 q->info.si_signo = sig;
1139 q->info.si_errno = 0;
1140 q->info.si_code = SI_KERNEL;
1141 q->info.si_pid = 0;
1142 q->info.si_uid = 0;
1143 break;
1144 default:
1145 copy_siginfo(&q->info, info);
1146 break;
1147 }
1148 } else if (!is_si_special(info) &&
1149 sig >= SIGRTMIN && info->si_code != SI_USER) {
1150 /*
1151 * Queue overflow, abort. We may abort if the
1152 * signal was rt and sent by user using something
1153 * other than kill().
1154 */
1155 result = TRACE_SIGNAL_OVERFLOW_FAIL;
1156 ret = -EAGAIN;
1157 goto ret;
1158 } else {
1159 /*
1160 * This is a silent loss of information. We still
1161 * send the signal, but the *info bits are lost.
1162 */
1163 result = TRACE_SIGNAL_LOSE_INFO;
1164 }
1165
1166out_set:
1167 signalfd_notify(t, sig);
1168 sigaddset(&pending->signal, sig);
1169
1170 /* Let multiprocess signals appear after on-going forks */
1171 if (type > PIDTYPE_TGID) {
1172 struct multiprocess_signals *delayed;
1173 hlist_for_each_entry(delayed, &t->signal->multiprocess, node) {
1174 sigset_t *signal = &delayed->signal;
1175 /* Can't queue both a stop and a continue signal */
1176 if (sig == SIGCONT)
1177 sigdelsetmask(signal, SIG_KERNEL_STOP_MASK);
1178 else if (sig_kernel_stop(sig))
1179 sigdelset(signal, SIGCONT);
1180 sigaddset(signal, sig);
1181 }
1182 }
1183
1184 complete_signal(sig, t, type);
1185ret:
1186 trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result);
1187 return ret;
1188}
1189
1190static inline bool has_si_pid_and_uid(struct kernel_siginfo *info)
1191{
1192 bool ret = false;
1193 switch (siginfo_layout(info->si_signo, info->si_code)) {
1194 case SIL_KILL:
1195 case SIL_CHLD:
1196 case SIL_RT:
1197 ret = true;
1198 break;
1199 case SIL_TIMER:
1200 case SIL_POLL:
1201 case SIL_FAULT:
1202 case SIL_FAULT_TRAPNO:
1203 case SIL_FAULT_MCEERR:
1204 case SIL_FAULT_BNDERR:
1205 case SIL_FAULT_PKUERR:
1206 case SIL_FAULT_PERF_EVENT:
1207 case SIL_SYS:
1208 ret = false;
1209 break;
1210 }
1211 return ret;
1212}
1213
1214int send_signal_locked(int sig, struct kernel_siginfo *info,
1215 struct task_struct *t, enum pid_type type)
1216{
1217 /* Should SIGKILL or SIGSTOP be received by a pid namespace init? */
1218 bool force = false;
1219
1220 if (info == SEND_SIG_NOINFO) {
1221 /* Force if sent from an ancestor pid namespace */
1222 force = !task_pid_nr_ns(current, task_active_pid_ns(t));
1223 } else if (info == SEND_SIG_PRIV) {
1224 /* Don't ignore kernel generated signals */
1225 force = true;
1226 } else if (has_si_pid_and_uid(info)) {
1227 /* SIGKILL and SIGSTOP is special or has ids */
1228 struct user_namespace *t_user_ns;
1229
1230 rcu_read_lock();
1231 t_user_ns = task_cred_xxx(t, user_ns);
1232 if (current_user_ns() != t_user_ns) {
1233 kuid_t uid = make_kuid(current_user_ns(), info->si_uid);
1234 info->si_uid = from_kuid_munged(t_user_ns, uid);
1235 }
1236 rcu_read_unlock();
1237
1238 /* A kernel generated signal? */
1239 force = (info->si_code == SI_KERNEL);
1240
1241 /* From an ancestor pid namespace? */
1242 if (!task_pid_nr_ns(current, task_active_pid_ns(t))) {
1243 info->si_pid = 0;
1244 force = true;
1245 }
1246 }
1247 return __send_signal_locked(sig, info, t, type, force);
1248}
1249
1250static void print_fatal_signal(int signr)
1251{
1252 struct pt_regs *regs = task_pt_regs(current);
1253 struct file *exe_file;
1254
1255 exe_file = get_task_exe_file(current);
1256 if (exe_file) {
1257 pr_info("%pD: %s: potentially unexpected fatal signal %d.\n",
1258 exe_file, current->comm, signr);
1259 fput(exe_file);
1260 } else {
1261 pr_info("%s: potentially unexpected fatal signal %d.\n",
1262 current->comm, signr);
1263 }
1264
1265#if defined(__i386__) && !defined(__arch_um__)
1266 pr_info("code at %08lx: ", regs->ip);
1267 {
1268 int i;
1269 for (i = 0; i < 16; i++) {
1270 unsigned char insn;
1271
1272 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1273 break;
1274 pr_cont("%02x ", insn);
1275 }
1276 }
1277 pr_cont("\n");
1278#endif
1279 preempt_disable();
1280 show_regs(regs);
1281 preempt_enable();
1282}
1283
1284static int __init setup_print_fatal_signals(char *str)
1285{
1286 get_option (&str, &print_fatal_signals);
1287
1288 return 1;
1289}
1290
1291__setup("print-fatal-signals=", setup_print_fatal_signals);
1292
1293int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p,
1294 enum pid_type type)
1295{
1296 unsigned long flags;
1297 int ret = -ESRCH;
1298
1299 if (lock_task_sighand(p, &flags)) {
1300 ret = send_signal_locked(sig, info, p, type);
1301 unlock_task_sighand(p, &flags);
1302 }
1303
1304 return ret;
1305}
1306
1307enum sig_handler {
1308 HANDLER_CURRENT, /* If reachable use the current handler */
1309 HANDLER_SIG_DFL, /* Always use SIG_DFL handler semantics */
1310 HANDLER_EXIT, /* Only visible as the process exit code */
1311};
1312
1313/*
1314 * Force a signal that the process can't ignore: if necessary
1315 * we unblock the signal and change any SIG_IGN to SIG_DFL.
1316 *
1317 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1318 * since we do not want to have a signal handler that was blocked
1319 * be invoked when user space had explicitly blocked it.
1320 *
1321 * We don't want to have recursive SIGSEGV's etc, for example,
1322 * that is why we also clear SIGNAL_UNKILLABLE.
1323 */
1324static int
1325force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t,
1326 enum sig_handler handler)
1327{
1328 unsigned long int flags;
1329 int ret, blocked, ignored;
1330 struct k_sigaction *action;
1331 int sig = info->si_signo;
1332
1333 spin_lock_irqsave(&t->sighand->siglock, flags);
1334 action = &t->sighand->action[sig-1];
1335 ignored = action->sa.sa_handler == SIG_IGN;
1336 blocked = sigismember(&t->blocked, sig);
1337 if (blocked || ignored || (handler != HANDLER_CURRENT)) {
1338 action->sa.sa_handler = SIG_DFL;
1339 if (handler == HANDLER_EXIT)
1340 action->sa.sa_flags |= SA_IMMUTABLE;
1341 if (blocked)
1342 sigdelset(&t->blocked, sig);
1343 }
1344 /*
1345 * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
1346 * debugging to leave init killable. But HANDLER_EXIT is always fatal.
1347 */
1348 if (action->sa.sa_handler == SIG_DFL &&
1349 (!t->ptrace || (handler == HANDLER_EXIT)))
1350 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1351 ret = send_signal_locked(sig, info, t, PIDTYPE_PID);
1352 /* This can happen if the signal was already pending and blocked */
1353 if (!task_sigpending(t))
1354 signal_wake_up(t, 0);
1355 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1356
1357 return ret;
1358}
1359
1360int force_sig_info(struct kernel_siginfo *info)
1361{
1362 return force_sig_info_to_task(info, current, HANDLER_CURRENT);
1363}
1364
1365/*
1366 * Nuke all other threads in the group.
1367 */
1368int zap_other_threads(struct task_struct *p)
1369{
1370 struct task_struct *t;
1371 int count = 0;
1372
1373 p->signal->group_stop_count = 0;
1374
1375 for_other_threads(p, t) {
1376 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1377 /* Don't require de_thread to wait for the vhost_worker */
1378 if ((t->flags & (PF_IO_WORKER | PF_USER_WORKER)) != PF_USER_WORKER)
1379 count++;
1380
1381 /* Don't bother with already dead threads */
1382 if (t->exit_state)
1383 continue;
1384 sigaddset(&t->pending.signal, SIGKILL);
1385 signal_wake_up(t, 1);
1386 }
1387
1388 return count;
1389}
1390
1391struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1392 unsigned long *flags)
1393{
1394 struct sighand_struct *sighand;
1395
1396 rcu_read_lock();
1397 for (;;) {
1398 sighand = rcu_dereference(tsk->sighand);
1399 if (unlikely(sighand == NULL))
1400 break;
1401
1402 /*
1403 * This sighand can be already freed and even reused, but
1404 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
1405 * initializes ->siglock: this slab can't go away, it has
1406 * the same object type, ->siglock can't be reinitialized.
1407 *
1408 * We need to ensure that tsk->sighand is still the same
1409 * after we take the lock, we can race with de_thread() or
1410 * __exit_signal(). In the latter case the next iteration
1411 * must see ->sighand == NULL.
1412 */
1413 spin_lock_irqsave(&sighand->siglock, *flags);
1414 if (likely(sighand == rcu_access_pointer(tsk->sighand)))
1415 break;
1416 spin_unlock_irqrestore(&sighand->siglock, *flags);
1417 }
1418 rcu_read_unlock();
1419
1420 return sighand;
1421}
1422
1423#ifdef CONFIG_LOCKDEP
1424void lockdep_assert_task_sighand_held(struct task_struct *task)
1425{
1426 struct sighand_struct *sighand;
1427
1428 rcu_read_lock();
1429 sighand = rcu_dereference(task->sighand);
1430 if (sighand)
1431 lockdep_assert_held(&sighand->siglock);
1432 else
1433 WARN_ON_ONCE(1);
1434 rcu_read_unlock();
1435}
1436#endif
1437
1438/*
1439 * send signal info to all the members of a group
1440 */
1441int group_send_sig_info(int sig, struct kernel_siginfo *info,
1442 struct task_struct *p, enum pid_type type)
1443{
1444 int ret;
1445
1446 rcu_read_lock();
1447 ret = check_kill_permission(sig, info, p);
1448 rcu_read_unlock();
1449
1450 if (!ret && sig)
1451 ret = do_send_sig_info(sig, info, p, type);
1452
1453 return ret;
1454}
1455
1456/*
1457 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1458 * control characters do (^C, ^Z etc)
1459 * - the caller must hold at least a readlock on tasklist_lock
1460 */
1461int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp)
1462{
1463 struct task_struct *p = NULL;
1464 int ret = -ESRCH;
1465
1466 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1467 int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID);
1468 /*
1469 * If group_send_sig_info() succeeds at least once ret
1470 * becomes 0 and after that the code below has no effect.
1471 * Otherwise we return the last err or -ESRCH if this
1472 * process group is empty.
1473 */
1474 if (ret)
1475 ret = err;
1476 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1477
1478 return ret;
1479}
1480
1481int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid)
1482{
1483 int error = -ESRCH;
1484 struct task_struct *p;
1485
1486 for (;;) {
1487 rcu_read_lock();
1488 p = pid_task(pid, PIDTYPE_PID);
1489 if (p)
1490 error = group_send_sig_info(sig, info, p, PIDTYPE_TGID);
1491 rcu_read_unlock();
1492 if (likely(!p || error != -ESRCH))
1493 return error;
1494
1495 /*
1496 * The task was unhashed in between, try again. If it
1497 * is dead, pid_task() will return NULL, if we race with
1498 * de_thread() it will find the new leader.
1499 */
1500 }
1501}
1502
1503static int kill_proc_info(int sig, struct kernel_siginfo *info, pid_t pid)
1504{
1505 int error;
1506 rcu_read_lock();
1507 error = kill_pid_info(sig, info, find_vpid(pid));
1508 rcu_read_unlock();
1509 return error;
1510}
1511
1512static inline bool kill_as_cred_perm(const struct cred *cred,
1513 struct task_struct *target)
1514{
1515 const struct cred *pcred = __task_cred(target);
1516
1517 return uid_eq(cred->euid, pcred->suid) ||
1518 uid_eq(cred->euid, pcred->uid) ||
1519 uid_eq(cred->uid, pcred->suid) ||
1520 uid_eq(cred->uid, pcred->uid);
1521}
1522
1523/*
1524 * The usb asyncio usage of siginfo is wrong. The glibc support
1525 * for asyncio which uses SI_ASYNCIO assumes the layout is SIL_RT.
1526 * AKA after the generic fields:
1527 * kernel_pid_t si_pid;
1528 * kernel_uid32_t si_uid;
1529 * sigval_t si_value;
1530 *
1531 * Unfortunately when usb generates SI_ASYNCIO it assumes the layout
1532 * after the generic fields is:
1533 * void __user *si_addr;
1534 *
1535 * This is a practical problem when there is a 64bit big endian kernel
1536 * and a 32bit userspace. As the 32bit address will encoded in the low
1537 * 32bits of the pointer. Those low 32bits will be stored at higher
1538 * address than appear in a 32 bit pointer. So userspace will not
1539 * see the address it was expecting for it's completions.
1540 *
1541 * There is nothing in the encoding that can allow
1542 * copy_siginfo_to_user32 to detect this confusion of formats, so
1543 * handle this by requiring the caller of kill_pid_usb_asyncio to
1544 * notice when this situration takes place and to store the 32bit
1545 * pointer in sival_int, instead of sival_addr of the sigval_t addr
1546 * parameter.
1547 */
1548int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr,
1549 struct pid *pid, const struct cred *cred)
1550{
1551 struct kernel_siginfo info;
1552 struct task_struct *p;
1553 unsigned long flags;
1554 int ret = -EINVAL;
1555
1556 if (!valid_signal(sig))
1557 return ret;
1558
1559 clear_siginfo(&info);
1560 info.si_signo = sig;
1561 info.si_errno = errno;
1562 info.si_code = SI_ASYNCIO;
1563 *((sigval_t *)&info.si_pid) = addr;
1564
1565 rcu_read_lock();
1566 p = pid_task(pid, PIDTYPE_PID);
1567 if (!p) {
1568 ret = -ESRCH;
1569 goto out_unlock;
1570 }
1571 if (!kill_as_cred_perm(cred, p)) {
1572 ret = -EPERM;
1573 goto out_unlock;
1574 }
1575 ret = security_task_kill(p, &info, sig, cred);
1576 if (ret)
1577 goto out_unlock;
1578
1579 if (sig) {
1580 if (lock_task_sighand(p, &flags)) {
1581 ret = __send_signal_locked(sig, &info, p, PIDTYPE_TGID, false);
1582 unlock_task_sighand(p, &flags);
1583 } else
1584 ret = -ESRCH;
1585 }
1586out_unlock:
1587 rcu_read_unlock();
1588 return ret;
1589}
1590EXPORT_SYMBOL_GPL(kill_pid_usb_asyncio);
1591
1592/*
1593 * kill_something_info() interprets pid in interesting ways just like kill(2).
1594 *
1595 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1596 * is probably wrong. Should make it like BSD or SYSV.
1597 */
1598
1599static int kill_something_info(int sig, struct kernel_siginfo *info, pid_t pid)
1600{
1601 int ret;
1602
1603 if (pid > 0)
1604 return kill_proc_info(sig, info, pid);
1605
1606 /* -INT_MIN is undefined. Exclude this case to avoid a UBSAN warning */
1607 if (pid == INT_MIN)
1608 return -ESRCH;
1609
1610 read_lock(&tasklist_lock);
1611 if (pid != -1) {
1612 ret = __kill_pgrp_info(sig, info,
1613 pid ? find_vpid(-pid) : task_pgrp(current));
1614 } else {
1615 int retval = 0, count = 0;
1616 struct task_struct * p;
1617
1618 for_each_process(p) {
1619 if (task_pid_vnr(p) > 1 &&
1620 !same_thread_group(p, current)) {
1621 int err = group_send_sig_info(sig, info, p,
1622 PIDTYPE_MAX);
1623 ++count;
1624 if (err != -EPERM)
1625 retval = err;
1626 }
1627 }
1628 ret = count ? retval : -ESRCH;
1629 }
1630 read_unlock(&tasklist_lock);
1631
1632 return ret;
1633}
1634
1635/*
1636 * These are for backward compatibility with the rest of the kernel source.
1637 */
1638
1639int send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1640{
1641 /*
1642 * Make sure legacy kernel users don't send in bad values
1643 * (normal paths check this in check_kill_permission).
1644 */
1645 if (!valid_signal(sig))
1646 return -EINVAL;
1647
1648 return do_send_sig_info(sig, info, p, PIDTYPE_PID);
1649}
1650EXPORT_SYMBOL(send_sig_info);
1651
1652#define __si_special(priv) \
1653 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1654
1655int
1656send_sig(int sig, struct task_struct *p, int priv)
1657{
1658 return send_sig_info(sig, __si_special(priv), p);
1659}
1660EXPORT_SYMBOL(send_sig);
1661
1662void force_sig(int sig)
1663{
1664 struct kernel_siginfo info;
1665
1666 clear_siginfo(&info);
1667 info.si_signo = sig;
1668 info.si_errno = 0;
1669 info.si_code = SI_KERNEL;
1670 info.si_pid = 0;
1671 info.si_uid = 0;
1672 force_sig_info(&info);
1673}
1674EXPORT_SYMBOL(force_sig);
1675
1676void force_fatal_sig(int sig)
1677{
1678 struct kernel_siginfo info;
1679
1680 clear_siginfo(&info);
1681 info.si_signo = sig;
1682 info.si_errno = 0;
1683 info.si_code = SI_KERNEL;
1684 info.si_pid = 0;
1685 info.si_uid = 0;
1686 force_sig_info_to_task(&info, current, HANDLER_SIG_DFL);
1687}
1688
1689void force_exit_sig(int sig)
1690{
1691 struct kernel_siginfo info;
1692
1693 clear_siginfo(&info);
1694 info.si_signo = sig;
1695 info.si_errno = 0;
1696 info.si_code = SI_KERNEL;
1697 info.si_pid = 0;
1698 info.si_uid = 0;
1699 force_sig_info_to_task(&info, current, HANDLER_EXIT);
1700}
1701
1702/*
1703 * When things go south during signal handling, we
1704 * will force a SIGSEGV. And if the signal that caused
1705 * the problem was already a SIGSEGV, we'll want to
1706 * make sure we don't even try to deliver the signal..
1707 */
1708void force_sigsegv(int sig)
1709{
1710 if (sig == SIGSEGV)
1711 force_fatal_sig(SIGSEGV);
1712 else
1713 force_sig(SIGSEGV);
1714}
1715
1716int force_sig_fault_to_task(int sig, int code, void __user *addr,
1717 struct task_struct *t)
1718{
1719 struct kernel_siginfo info;
1720
1721 clear_siginfo(&info);
1722 info.si_signo = sig;
1723 info.si_errno = 0;
1724 info.si_code = code;
1725 info.si_addr = addr;
1726 return force_sig_info_to_task(&info, t, HANDLER_CURRENT);
1727}
1728
1729int force_sig_fault(int sig, int code, void __user *addr)
1730{
1731 return force_sig_fault_to_task(sig, code, addr, current);
1732}
1733
1734int send_sig_fault(int sig, int code, void __user *addr, struct task_struct *t)
1735{
1736 struct kernel_siginfo info;
1737
1738 clear_siginfo(&info);
1739 info.si_signo = sig;
1740 info.si_errno = 0;
1741 info.si_code = code;
1742 info.si_addr = addr;
1743 return send_sig_info(info.si_signo, &info, t);
1744}
1745
1746int force_sig_mceerr(int code, void __user *addr, short lsb)
1747{
1748 struct kernel_siginfo info;
1749
1750 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1751 clear_siginfo(&info);
1752 info.si_signo = SIGBUS;
1753 info.si_errno = 0;
1754 info.si_code = code;
1755 info.si_addr = addr;
1756 info.si_addr_lsb = lsb;
1757 return force_sig_info(&info);
1758}
1759
1760int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1761{
1762 struct kernel_siginfo info;
1763
1764 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1765 clear_siginfo(&info);
1766 info.si_signo = SIGBUS;
1767 info.si_errno = 0;
1768 info.si_code = code;
1769 info.si_addr = addr;
1770 info.si_addr_lsb = lsb;
1771 return send_sig_info(info.si_signo, &info, t);
1772}
1773EXPORT_SYMBOL(send_sig_mceerr);
1774
1775int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper)
1776{
1777 struct kernel_siginfo info;
1778
1779 clear_siginfo(&info);
1780 info.si_signo = SIGSEGV;
1781 info.si_errno = 0;
1782 info.si_code = SEGV_BNDERR;
1783 info.si_addr = addr;
1784 info.si_lower = lower;
1785 info.si_upper = upper;
1786 return force_sig_info(&info);
1787}
1788
1789#ifdef SEGV_PKUERR
1790int force_sig_pkuerr(void __user *addr, u32 pkey)
1791{
1792 struct kernel_siginfo info;
1793
1794 clear_siginfo(&info);
1795 info.si_signo = SIGSEGV;
1796 info.si_errno = 0;
1797 info.si_code = SEGV_PKUERR;
1798 info.si_addr = addr;
1799 info.si_pkey = pkey;
1800 return force_sig_info(&info);
1801}
1802#endif
1803
1804int send_sig_perf(void __user *addr, u32 type, u64 sig_data)
1805{
1806 struct kernel_siginfo info;
1807
1808 clear_siginfo(&info);
1809 info.si_signo = SIGTRAP;
1810 info.si_errno = 0;
1811 info.si_code = TRAP_PERF;
1812 info.si_addr = addr;
1813 info.si_perf_data = sig_data;
1814 info.si_perf_type = type;
1815
1816 /*
1817 * Signals generated by perf events should not terminate the whole
1818 * process if SIGTRAP is blocked, however, delivering the signal
1819 * asynchronously is better than not delivering at all. But tell user
1820 * space if the signal was asynchronous, so it can clearly be
1821 * distinguished from normal synchronous ones.
1822 */
1823 info.si_perf_flags = sigismember(¤t->blocked, info.si_signo) ?
1824 TRAP_PERF_FLAG_ASYNC :
1825 0;
1826
1827 return send_sig_info(info.si_signo, &info, current);
1828}
1829
1830/**
1831 * force_sig_seccomp - signals the task to allow in-process syscall emulation
1832 * @syscall: syscall number to send to userland
1833 * @reason: filter-supplied reason code to send to userland (via si_errno)
1834 * @force_coredump: true to trigger a coredump
1835 *
1836 * Forces a SIGSYS with a code of SYS_SECCOMP and related sigsys info.
1837 */
1838int force_sig_seccomp(int syscall, int reason, bool force_coredump)
1839{
1840 struct kernel_siginfo info;
1841
1842 clear_siginfo(&info);
1843 info.si_signo = SIGSYS;
1844 info.si_code = SYS_SECCOMP;
1845 info.si_call_addr = (void __user *)KSTK_EIP(current);
1846 info.si_errno = reason;
1847 info.si_arch = syscall_get_arch(current);
1848 info.si_syscall = syscall;
1849 return force_sig_info_to_task(&info, current,
1850 force_coredump ? HANDLER_EXIT : HANDLER_CURRENT);
1851}
1852
1853/* For the crazy architectures that include trap information in
1854 * the errno field, instead of an actual errno value.
1855 */
1856int force_sig_ptrace_errno_trap(int errno, void __user *addr)
1857{
1858 struct kernel_siginfo info;
1859
1860 clear_siginfo(&info);
1861 info.si_signo = SIGTRAP;
1862 info.si_errno = errno;
1863 info.si_code = TRAP_HWBKPT;
1864 info.si_addr = addr;
1865 return force_sig_info(&info);
1866}
1867
1868/* For the rare architectures that include trap information using
1869 * si_trapno.
1870 */
1871int force_sig_fault_trapno(int sig, int code, void __user *addr, int trapno)
1872{
1873 struct kernel_siginfo info;
1874
1875 clear_siginfo(&info);
1876 info.si_signo = sig;
1877 info.si_errno = 0;
1878 info.si_code = code;
1879 info.si_addr = addr;
1880 info.si_trapno = trapno;
1881 return force_sig_info(&info);
1882}
1883
1884/* For the rare architectures that include trap information using
1885 * si_trapno.
1886 */
1887int send_sig_fault_trapno(int sig, int code, void __user *addr, int trapno,
1888 struct task_struct *t)
1889{
1890 struct kernel_siginfo info;
1891
1892 clear_siginfo(&info);
1893 info.si_signo = sig;
1894 info.si_errno = 0;
1895 info.si_code = code;
1896 info.si_addr = addr;
1897 info.si_trapno = trapno;
1898 return send_sig_info(info.si_signo, &info, t);
1899}
1900
1901int kill_pgrp(struct pid *pid, int sig, int priv)
1902{
1903 int ret;
1904
1905 read_lock(&tasklist_lock);
1906 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1907 read_unlock(&tasklist_lock);
1908
1909 return ret;
1910}
1911EXPORT_SYMBOL(kill_pgrp);
1912
1913int kill_pid(struct pid *pid, int sig, int priv)
1914{
1915 return kill_pid_info(sig, __si_special(priv), pid);
1916}
1917EXPORT_SYMBOL(kill_pid);
1918
1919/*
1920 * These functions support sending signals using preallocated sigqueue
1921 * structures. This is needed "because realtime applications cannot
1922 * afford to lose notifications of asynchronous events, like timer
1923 * expirations or I/O completions". In the case of POSIX Timers
1924 * we allocate the sigqueue structure from the timer_create. If this
1925 * allocation fails we are able to report the failure to the application
1926 * with an EAGAIN error.
1927 */
1928struct sigqueue *sigqueue_alloc(void)
1929{
1930 return __sigqueue_alloc(-1, current, GFP_KERNEL, 0, SIGQUEUE_PREALLOC);
1931}
1932
1933void sigqueue_free(struct sigqueue *q)
1934{
1935 unsigned long flags;
1936 spinlock_t *lock = ¤t->sighand->siglock;
1937
1938 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1939 /*
1940 * We must hold ->siglock while testing q->list
1941 * to serialize with collect_signal() or with
1942 * __exit_signal()->flush_sigqueue().
1943 */
1944 spin_lock_irqsave(lock, flags);
1945 q->flags &= ~SIGQUEUE_PREALLOC;
1946 /*
1947 * If it is queued it will be freed when dequeued,
1948 * like the "regular" sigqueue.
1949 */
1950 if (!list_empty(&q->list))
1951 q = NULL;
1952 spin_unlock_irqrestore(lock, flags);
1953
1954 if (q)
1955 __sigqueue_free(q);
1956}
1957
1958int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type)
1959{
1960 int sig = q->info.si_signo;
1961 struct sigpending *pending;
1962 struct task_struct *t;
1963 unsigned long flags;
1964 int ret, result;
1965
1966 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1967
1968 ret = -1;
1969 rcu_read_lock();
1970
1971 /*
1972 * This function is used by POSIX timers to deliver a timer signal.
1973 * Where type is PIDTYPE_PID (such as for timers with SIGEV_THREAD_ID
1974 * set), the signal must be delivered to the specific thread (queues
1975 * into t->pending).
1976 *
1977 * Where type is not PIDTYPE_PID, signals must be delivered to the
1978 * process. In this case, prefer to deliver to current if it is in
1979 * the same thread group as the target process, which avoids
1980 * unnecessarily waking up a potentially idle task.
1981 */
1982 t = pid_task(pid, type);
1983 if (!t)
1984 goto ret;
1985 if (type != PIDTYPE_PID && same_thread_group(t, current))
1986 t = current;
1987 if (!likely(lock_task_sighand(t, &flags)))
1988 goto ret;
1989
1990 ret = 1; /* the signal is ignored */
1991 result = TRACE_SIGNAL_IGNORED;
1992 if (!prepare_signal(sig, t, false))
1993 goto out;
1994
1995 ret = 0;
1996 if (unlikely(!list_empty(&q->list))) {
1997 /*
1998 * If an SI_TIMER entry is already queue just increment
1999 * the overrun count.
2000 */
2001 BUG_ON(q->info.si_code != SI_TIMER);
2002 q->info.si_overrun++;
2003 result = TRACE_SIGNAL_ALREADY_PENDING;
2004 goto out;
2005 }
2006 q->info.si_overrun = 0;
2007
2008 signalfd_notify(t, sig);
2009 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
2010 list_add_tail(&q->list, &pending->list);
2011 sigaddset(&pending->signal, sig);
2012 complete_signal(sig, t, type);
2013 result = TRACE_SIGNAL_DELIVERED;
2014out:
2015 trace_signal_generate(sig, &q->info, t, type != PIDTYPE_PID, result);
2016 unlock_task_sighand(t, &flags);
2017ret:
2018 rcu_read_unlock();
2019 return ret;
2020}
2021
2022static void do_notify_pidfd(struct task_struct *task)
2023{
2024 struct pid *pid;
2025
2026 WARN_ON(task->exit_state == 0);
2027 pid = task_pid(task);
2028 wake_up_all(&pid->wait_pidfd);
2029}
2030
2031/*
2032 * Let a parent know about the death of a child.
2033 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
2034 *
2035 * Returns true if our parent ignored us and so we've switched to
2036 * self-reaping.
2037 */
2038bool do_notify_parent(struct task_struct *tsk, int sig)
2039{
2040 struct kernel_siginfo info;
2041 unsigned long flags;
2042 struct sighand_struct *psig;
2043 bool autoreap = false;
2044 u64 utime, stime;
2045
2046 WARN_ON_ONCE(sig == -1);
2047
2048 /* do_notify_parent_cldstop should have been called instead. */
2049 WARN_ON_ONCE(task_is_stopped_or_traced(tsk));
2050
2051 WARN_ON_ONCE(!tsk->ptrace &&
2052 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
2053
2054 /* Wake up all pidfd waiters */
2055 do_notify_pidfd(tsk);
2056
2057 if (sig != SIGCHLD) {
2058 /*
2059 * This is only possible if parent == real_parent.
2060 * Check if it has changed security domain.
2061 */
2062 if (tsk->parent_exec_id != READ_ONCE(tsk->parent->self_exec_id))
2063 sig = SIGCHLD;
2064 }
2065
2066 clear_siginfo(&info);
2067 info.si_signo = sig;
2068 info.si_errno = 0;
2069 /*
2070 * We are under tasklist_lock here so our parent is tied to
2071 * us and cannot change.
2072 *
2073 * task_active_pid_ns will always return the same pid namespace
2074 * until a task passes through release_task.
2075 *
2076 * write_lock() currently calls preempt_disable() which is the
2077 * same as rcu_read_lock(), but according to Oleg, this is not
2078 * correct to rely on this
2079 */
2080 rcu_read_lock();
2081 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
2082 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
2083 task_uid(tsk));
2084 rcu_read_unlock();
2085
2086 task_cputime(tsk, &utime, &stime);
2087 info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
2088 info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
2089
2090 info.si_status = tsk->exit_code & 0x7f;
2091 if (tsk->exit_code & 0x80)
2092 info.si_code = CLD_DUMPED;
2093 else if (tsk->exit_code & 0x7f)
2094 info.si_code = CLD_KILLED;
2095 else {
2096 info.si_code = CLD_EXITED;
2097 info.si_status = tsk->exit_code >> 8;
2098 }
2099
2100 psig = tsk->parent->sighand;
2101 spin_lock_irqsave(&psig->siglock, flags);
2102 if (!tsk->ptrace && sig == SIGCHLD &&
2103 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
2104 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
2105 /*
2106 * We are exiting and our parent doesn't care. POSIX.1
2107 * defines special semantics for setting SIGCHLD to SIG_IGN
2108 * or setting the SA_NOCLDWAIT flag: we should be reaped
2109 * automatically and not left for our parent's wait4 call.
2110 * Rather than having the parent do it as a magic kind of
2111 * signal handler, we just set this to tell do_exit that we
2112 * can be cleaned up without becoming a zombie. Note that
2113 * we still call __wake_up_parent in this case, because a
2114 * blocked sys_wait4 might now return -ECHILD.
2115 *
2116 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
2117 * is implementation-defined: we do (if you don't want
2118 * it, just use SIG_IGN instead).
2119 */
2120 autoreap = true;
2121 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
2122 sig = 0;
2123 }
2124 /*
2125 * Send with __send_signal as si_pid and si_uid are in the
2126 * parent's namespaces.
2127 */
2128 if (valid_signal(sig) && sig)
2129 __send_signal_locked(sig, &info, tsk->parent, PIDTYPE_TGID, false);
2130 __wake_up_parent(tsk, tsk->parent);
2131 spin_unlock_irqrestore(&psig->siglock, flags);
2132
2133 return autoreap;
2134}
2135
2136/**
2137 * do_notify_parent_cldstop - notify parent of stopped/continued state change
2138 * @tsk: task reporting the state change
2139 * @for_ptracer: the notification is for ptracer
2140 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
2141 *
2142 * Notify @tsk's parent that the stopped/continued state has changed. If
2143 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
2144 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
2145 *
2146 * CONTEXT:
2147 * Must be called with tasklist_lock at least read locked.
2148 */
2149static void do_notify_parent_cldstop(struct task_struct *tsk,
2150 bool for_ptracer, int why)
2151{
2152 struct kernel_siginfo info;
2153 unsigned long flags;
2154 struct task_struct *parent;
2155 struct sighand_struct *sighand;
2156 u64 utime, stime;
2157
2158 if (for_ptracer) {
2159 parent = tsk->parent;
2160 } else {
2161 tsk = tsk->group_leader;
2162 parent = tsk->real_parent;
2163 }
2164
2165 clear_siginfo(&info);
2166 info.si_signo = SIGCHLD;
2167 info.si_errno = 0;
2168 /*
2169 * see comment in do_notify_parent() about the following 4 lines
2170 */
2171 rcu_read_lock();
2172 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
2173 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
2174 rcu_read_unlock();
2175
2176 task_cputime(tsk, &utime, &stime);
2177 info.si_utime = nsec_to_clock_t(utime);
2178 info.si_stime = nsec_to_clock_t(stime);
2179
2180 info.si_code = why;
2181 switch (why) {
2182 case CLD_CONTINUED:
2183 info.si_status = SIGCONT;
2184 break;
2185 case CLD_STOPPED:
2186 info.si_status = tsk->signal->group_exit_code & 0x7f;
2187 break;
2188 case CLD_TRAPPED:
2189 info.si_status = tsk->exit_code & 0x7f;
2190 break;
2191 default:
2192 BUG();
2193 }
2194
2195 sighand = parent->sighand;
2196 spin_lock_irqsave(&sighand->siglock, flags);
2197 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
2198 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
2199 send_signal_locked(SIGCHLD, &info, parent, PIDTYPE_TGID);
2200 /*
2201 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
2202 */
2203 __wake_up_parent(tsk, parent);
2204 spin_unlock_irqrestore(&sighand->siglock, flags);
2205}
2206
2207/*
2208 * This must be called with current->sighand->siglock held.
2209 *
2210 * This should be the path for all ptrace stops.
2211 * We always set current->last_siginfo while stopped here.
2212 * That makes it a way to test a stopped process for
2213 * being ptrace-stopped vs being job-control-stopped.
2214 *
2215 * Returns the signal the ptracer requested the code resume
2216 * with. If the code did not stop because the tracer is gone,
2217 * the stop signal remains unchanged unless clear_code.
2218 */
2219static int ptrace_stop(int exit_code, int why, unsigned long message,
2220 kernel_siginfo_t *info)
2221 __releases(¤t->sighand->siglock)
2222 __acquires(¤t->sighand->siglock)
2223{
2224 bool gstop_done = false;
2225
2226 if (arch_ptrace_stop_needed()) {
2227 /*
2228 * The arch code has something special to do before a
2229 * ptrace stop. This is allowed to block, e.g. for faults
2230 * on user stack pages. We can't keep the siglock while
2231 * calling arch_ptrace_stop, so we must release it now.
2232 * To preserve proper semantics, we must do this before
2233 * any signal bookkeeping like checking group_stop_count.
2234 */
2235 spin_unlock_irq(¤t->sighand->siglock);
2236 arch_ptrace_stop();
2237 spin_lock_irq(¤t->sighand->siglock);
2238 }
2239
2240 /*
2241 * After this point ptrace_signal_wake_up or signal_wake_up
2242 * will clear TASK_TRACED if ptrace_unlink happens or a fatal
2243 * signal comes in. Handle previous ptrace_unlinks and fatal
2244 * signals here to prevent ptrace_stop sleeping in schedule.
2245 */
2246 if (!current->ptrace || __fatal_signal_pending(current))
2247 return exit_code;
2248
2249 set_special_state(TASK_TRACED);
2250 current->jobctl |= JOBCTL_TRACED;
2251
2252 /*
2253 * We're committing to trapping. TRACED should be visible before
2254 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
2255 * Also, transition to TRACED and updates to ->jobctl should be
2256 * atomic with respect to siglock and should be done after the arch
2257 * hook as siglock is released and regrabbed across it.
2258 *
2259 * TRACER TRACEE
2260 *
2261 * ptrace_attach()
2262 * [L] wait_on_bit(JOBCTL_TRAPPING) [S] set_special_state(TRACED)
2263 * do_wait()
2264 * set_current_state() smp_wmb();
2265 * ptrace_do_wait()
2266 * wait_task_stopped()
2267 * task_stopped_code()
2268 * [L] task_is_traced() [S] task_clear_jobctl_trapping();
2269 */
2270 smp_wmb();
2271
2272 current->ptrace_message = message;
2273 current->last_siginfo = info;
2274 current->exit_code = exit_code;
2275
2276 /*
2277 * If @why is CLD_STOPPED, we're trapping to participate in a group
2278 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
2279 * across siglock relocks since INTERRUPT was scheduled, PENDING
2280 * could be clear now. We act as if SIGCONT is received after
2281 * TASK_TRACED is entered - ignore it.
2282 */
2283 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
2284 gstop_done = task_participate_group_stop(current);
2285
2286 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
2287 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
2288 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
2289 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
2290
2291 /* entering a trap, clear TRAPPING */
2292 task_clear_jobctl_trapping(current);
2293
2294 spin_unlock_irq(¤t->sighand->siglock);
2295 read_lock(&tasklist_lock);
2296 /*
2297 * Notify parents of the stop.
2298 *
2299 * While ptraced, there are two parents - the ptracer and
2300 * the real_parent of the group_leader. The ptracer should
2301 * know about every stop while the real parent is only
2302 * interested in the completion of group stop. The states
2303 * for the two don't interact with each other. Notify
2304 * separately unless they're gonna be duplicates.
2305 */
2306 if (current->ptrace)
2307 do_notify_parent_cldstop(current, true, why);
2308 if (gstop_done && (!current->ptrace || ptrace_reparented(current)))
2309 do_notify_parent_cldstop(current, false, why);
2310
2311 /*
2312 * The previous do_notify_parent_cldstop() invocation woke ptracer.
2313 * One a PREEMPTION kernel this can result in preemption requirement
2314 * which will be fulfilled after read_unlock() and the ptracer will be
2315 * put on the CPU.
2316 * The ptracer is in wait_task_inactive(, __TASK_TRACED) waiting for
2317 * this task wait in schedule(). If this task gets preempted then it
2318 * remains enqueued on the runqueue. The ptracer will observe this and
2319 * then sleep for a delay of one HZ tick. In the meantime this task
2320 * gets scheduled, enters schedule() and will wait for the ptracer.
2321 *
2322 * This preemption point is not bad from a correctness point of
2323 * view but extends the runtime by one HZ tick time due to the
2324 * ptracer's sleep. The preempt-disable section ensures that there
2325 * will be no preemption between unlock and schedule() and so
2326 * improving the performance since the ptracer will observe that
2327 * the tracee is scheduled out once it gets on the CPU.
2328 *
2329 * On PREEMPT_RT locking tasklist_lock does not disable preemption.
2330 * Therefore the task can be preempted after do_notify_parent_cldstop()
2331 * before unlocking tasklist_lock so there is no benefit in doing this.
2332 *
2333 * In fact disabling preemption is harmful on PREEMPT_RT because
2334 * the spinlock_t in cgroup_enter_frozen() must not be acquired
2335 * with preemption disabled due to the 'sleeping' spinlock
2336 * substitution of RT.
2337 */
2338 if (!IS_ENABLED(CONFIG_PREEMPT_RT))
2339 preempt_disable();
2340 read_unlock(&tasklist_lock);
2341 cgroup_enter_frozen();
2342 if (!IS_ENABLED(CONFIG_PREEMPT_RT))
2343 preempt_enable_no_resched();
2344 schedule();
2345 cgroup_leave_frozen(true);
2346
2347 /*
2348 * We are back. Now reacquire the siglock before touching
2349 * last_siginfo, so that we are sure to have synchronized with
2350 * any signal-sending on another CPU that wants to examine it.
2351 */
2352 spin_lock_irq(¤t->sighand->siglock);
2353 exit_code = current->exit_code;
2354 current->last_siginfo = NULL;
2355 current->ptrace_message = 0;
2356 current->exit_code = 0;
2357
2358 /* LISTENING can be set only during STOP traps, clear it */
2359 current->jobctl &= ~(JOBCTL_LISTENING | JOBCTL_PTRACE_FROZEN);
2360
2361 /*
2362 * Queued signals ignored us while we were stopped for tracing.
2363 * So check for any that we should take before resuming user mode.
2364 * This sets TIF_SIGPENDING, but never clears it.
2365 */
2366 recalc_sigpending_tsk(current);
2367 return exit_code;
2368}
2369
2370static int ptrace_do_notify(int signr, int exit_code, int why, unsigned long message)
2371{
2372 kernel_siginfo_t info;
2373
2374 clear_siginfo(&info);
2375 info.si_signo = signr;
2376 info.si_code = exit_code;
2377 info.si_pid = task_pid_vnr(current);
2378 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2379
2380 /* Let the debugger run. */
2381 return ptrace_stop(exit_code, why, message, &info);
2382}
2383
2384int ptrace_notify(int exit_code, unsigned long message)
2385{
2386 int signr;
2387
2388 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
2389 if (unlikely(task_work_pending(current)))
2390 task_work_run();
2391
2392 spin_lock_irq(¤t->sighand->siglock);
2393 signr = ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED, message);
2394 spin_unlock_irq(¤t->sighand->siglock);
2395 return signr;
2396}
2397
2398/**
2399 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
2400 * @signr: signr causing group stop if initiating
2401 *
2402 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
2403 * and participate in it. If already set, participate in the existing
2404 * group stop. If participated in a group stop (and thus slept), %true is
2405 * returned with siglock released.
2406 *
2407 * If ptraced, this function doesn't handle stop itself. Instead,
2408 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2409 * untouched. The caller must ensure that INTERRUPT trap handling takes
2410 * places afterwards.
2411 *
2412 * CONTEXT:
2413 * Must be called with @current->sighand->siglock held, which is released
2414 * on %true return.
2415 *
2416 * RETURNS:
2417 * %false if group stop is already cancelled or ptrace trap is scheduled.
2418 * %true if participated in group stop.
2419 */
2420static bool do_signal_stop(int signr)
2421 __releases(¤t->sighand->siglock)
2422{
2423 struct signal_struct *sig = current->signal;
2424
2425 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2426 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2427 struct task_struct *t;
2428
2429 /* signr will be recorded in task->jobctl for retries */
2430 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2431
2432 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2433 unlikely(sig->flags & SIGNAL_GROUP_EXIT) ||
2434 unlikely(sig->group_exec_task))
2435 return false;
2436 /*
2437 * There is no group stop already in progress. We must
2438 * initiate one now.
2439 *
2440 * While ptraced, a task may be resumed while group stop is
2441 * still in effect and then receive a stop signal and
2442 * initiate another group stop. This deviates from the
2443 * usual behavior as two consecutive stop signals can't
2444 * cause two group stops when !ptraced. That is why we
2445 * also check !task_is_stopped(t) below.
2446 *
2447 * The condition can be distinguished by testing whether
2448 * SIGNAL_STOP_STOPPED is already set. Don't generate
2449 * group_exit_code in such case.
2450 *
2451 * This is not necessary for SIGNAL_STOP_CONTINUED because
2452 * an intervening stop signal is required to cause two
2453 * continued events regardless of ptrace.
2454 */
2455 if (!(sig->flags & SIGNAL_STOP_STOPPED))
2456 sig->group_exit_code = signr;
2457
2458 sig->group_stop_count = 0;
2459 if (task_set_jobctl_pending(current, signr | gstop))
2460 sig->group_stop_count++;
2461
2462 for_other_threads(current, t) {
2463 /*
2464 * Setting state to TASK_STOPPED for a group
2465 * stop is always done with the siglock held,
2466 * so this check has no races.
2467 */
2468 if (!task_is_stopped(t) &&
2469 task_set_jobctl_pending(t, signr | gstop)) {
2470 sig->group_stop_count++;
2471 if (likely(!(t->ptrace & PT_SEIZED)))
2472 signal_wake_up(t, 0);
2473 else
2474 ptrace_trap_notify(t);
2475 }
2476 }
2477 }
2478
2479 if (likely(!current->ptrace)) {
2480 int notify = 0;
2481
2482 /*
2483 * If there are no other threads in the group, or if there
2484 * is a group stop in progress and we are the last to stop,
2485 * report to the parent.
2486 */
2487 if (task_participate_group_stop(current))
2488 notify = CLD_STOPPED;
2489
2490 current->jobctl |= JOBCTL_STOPPED;
2491 set_special_state(TASK_STOPPED);
2492 spin_unlock_irq(¤t->sighand->siglock);
2493
2494 /*
2495 * Notify the parent of the group stop completion. Because
2496 * we're not holding either the siglock or tasklist_lock
2497 * here, ptracer may attach inbetween; however, this is for
2498 * group stop and should always be delivered to the real
2499 * parent of the group leader. The new ptracer will get
2500 * its notification when this task transitions into
2501 * TASK_TRACED.
2502 */
2503 if (notify) {
2504 read_lock(&tasklist_lock);
2505 do_notify_parent_cldstop(current, false, notify);
2506 read_unlock(&tasklist_lock);
2507 }
2508
2509 /* Now we don't run again until woken by SIGCONT or SIGKILL */
2510 cgroup_enter_frozen();
2511 schedule();
2512 return true;
2513 } else {
2514 /*
2515 * While ptraced, group stop is handled by STOP trap.
2516 * Schedule it and let the caller deal with it.
2517 */
2518 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2519 return false;
2520 }
2521}
2522
2523/**
2524 * do_jobctl_trap - take care of ptrace jobctl traps
2525 *
2526 * When PT_SEIZED, it's used for both group stop and explicit
2527 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
2528 * accompanying siginfo. If stopped, lower eight bits of exit_code contain
2529 * the stop signal; otherwise, %SIGTRAP.
2530 *
2531 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2532 * number as exit_code and no siginfo.
2533 *
2534 * CONTEXT:
2535 * Must be called with @current->sighand->siglock held, which may be
2536 * released and re-acquired before returning with intervening sleep.
2537 */
2538static void do_jobctl_trap(void)
2539{
2540 struct signal_struct *signal = current->signal;
2541 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2542
2543 if (current->ptrace & PT_SEIZED) {
2544 if (!signal->group_stop_count &&
2545 !(signal->flags & SIGNAL_STOP_STOPPED))
2546 signr = SIGTRAP;
2547 WARN_ON_ONCE(!signr);
2548 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2549 CLD_STOPPED, 0);
2550 } else {
2551 WARN_ON_ONCE(!signr);
2552 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2553 }
2554}
2555
2556/**
2557 * do_freezer_trap - handle the freezer jobctl trap
2558 *
2559 * Puts the task into frozen state, if only the task is not about to quit.
2560 * In this case it drops JOBCTL_TRAP_FREEZE.
2561 *
2562 * CONTEXT:
2563 * Must be called with @current->sighand->siglock held,
2564 * which is always released before returning.
2565 */
2566static void do_freezer_trap(void)
2567 __releases(¤t->sighand->siglock)
2568{
2569 /*
2570 * If there are other trap bits pending except JOBCTL_TRAP_FREEZE,
2571 * let's make another loop to give it a chance to be handled.
2572 * In any case, we'll return back.
2573 */
2574 if ((current->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) !=
2575 JOBCTL_TRAP_FREEZE) {
2576 spin_unlock_irq(¤t->sighand->siglock);
2577 return;
2578 }
2579
2580 /*
2581 * Now we're sure that there is no pending fatal signal and no
2582 * pending traps. Clear TIF_SIGPENDING to not get out of schedule()
2583 * immediately (if there is a non-fatal signal pending), and
2584 * put the task into sleep.
2585 */
2586 __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
2587 clear_thread_flag(TIF_SIGPENDING);
2588 spin_unlock_irq(¤t->sighand->siglock);
2589 cgroup_enter_frozen();
2590 schedule();
2591}
2592
2593static int ptrace_signal(int signr, kernel_siginfo_t *info, enum pid_type type)
2594{
2595 /*
2596 * We do not check sig_kernel_stop(signr) but set this marker
2597 * unconditionally because we do not know whether debugger will
2598 * change signr. This flag has no meaning unless we are going
2599 * to stop after return from ptrace_stop(). In this case it will
2600 * be checked in do_signal_stop(), we should only stop if it was
2601 * not cleared by SIGCONT while we were sleeping. See also the
2602 * comment in dequeue_signal().
2603 */
2604 current->jobctl |= JOBCTL_STOP_DEQUEUED;
2605 signr = ptrace_stop(signr, CLD_TRAPPED, 0, info);
2606
2607 /* We're back. Did the debugger cancel the sig? */
2608 if (signr == 0)
2609 return signr;
2610
2611 /*
2612 * Update the siginfo structure if the signal has
2613 * changed. If the debugger wanted something
2614 * specific in the siginfo structure then it should
2615 * have updated *info via PTRACE_SETSIGINFO.
2616 */
2617 if (signr != info->si_signo) {
2618 clear_siginfo(info);
2619 info->si_signo = signr;
2620 info->si_errno = 0;
2621 info->si_code = SI_USER;
2622 rcu_read_lock();
2623 info->si_pid = task_pid_vnr(current->parent);
2624 info->si_uid = from_kuid_munged(current_user_ns(),
2625 task_uid(current->parent));
2626 rcu_read_unlock();
2627 }
2628
2629 /* If the (new) signal is now blocked, requeue it. */
2630 if (sigismember(¤t->blocked, signr) ||
2631 fatal_signal_pending(current)) {
2632 send_signal_locked(signr, info, current, type);
2633 signr = 0;
2634 }
2635
2636 return signr;
2637}
2638
2639static void hide_si_addr_tag_bits(struct ksignal *ksig)
2640{
2641 switch (siginfo_layout(ksig->sig, ksig->info.si_code)) {
2642 case SIL_FAULT:
2643 case SIL_FAULT_TRAPNO:
2644 case SIL_FAULT_MCEERR:
2645 case SIL_FAULT_BNDERR:
2646 case SIL_FAULT_PKUERR:
2647 case SIL_FAULT_PERF_EVENT:
2648 ksig->info.si_addr = arch_untagged_si_addr(
2649 ksig->info.si_addr, ksig->sig, ksig->info.si_code);
2650 break;
2651 case SIL_KILL:
2652 case SIL_TIMER:
2653 case SIL_POLL:
2654 case SIL_CHLD:
2655 case SIL_RT:
2656 case SIL_SYS:
2657 break;
2658 }
2659}
2660
2661bool get_signal(struct ksignal *ksig)
2662{
2663 struct sighand_struct *sighand = current->sighand;
2664 struct signal_struct *signal = current->signal;
2665 int signr;
2666
2667 clear_notify_signal();
2668 if (unlikely(task_work_pending(current)))
2669 task_work_run();
2670
2671 if (!task_sigpending(current))
2672 return false;
2673
2674 if (unlikely(uprobe_deny_signal()))
2675 return false;
2676
2677 /*
2678 * Do this once, we can't return to user-mode if freezing() == T.
2679 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2680 * thus do not need another check after return.
2681 */
2682 try_to_freeze();
2683
2684relock:
2685 spin_lock_irq(&sighand->siglock);
2686
2687 /*
2688 * Every stopped thread goes here after wakeup. Check to see if
2689 * we should notify the parent, prepare_signal(SIGCONT) encodes
2690 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2691 */
2692 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2693 int why;
2694
2695 if (signal->flags & SIGNAL_CLD_CONTINUED)
2696 why = CLD_CONTINUED;
2697 else
2698 why = CLD_STOPPED;
2699
2700 signal->flags &= ~SIGNAL_CLD_MASK;
2701
2702 spin_unlock_irq(&sighand->siglock);
2703
2704 /*
2705 * Notify the parent that we're continuing. This event is
2706 * always per-process and doesn't make whole lot of sense
2707 * for ptracers, who shouldn't consume the state via
2708 * wait(2) either, but, for backward compatibility, notify
2709 * the ptracer of the group leader too unless it's gonna be
2710 * a duplicate.
2711 */
2712 read_lock(&tasklist_lock);
2713 do_notify_parent_cldstop(current, false, why);
2714
2715 if (ptrace_reparented(current->group_leader))
2716 do_notify_parent_cldstop(current->group_leader,
2717 true, why);
2718 read_unlock(&tasklist_lock);
2719
2720 goto relock;
2721 }
2722
2723 for (;;) {
2724 struct k_sigaction *ka;
2725 enum pid_type type;
2726
2727 /* Has this task already been marked for death? */
2728 if ((signal->flags & SIGNAL_GROUP_EXIT) ||
2729 signal->group_exec_task) {
2730 clear_siginfo(&ksig->info);
2731 ksig->info.si_signo = signr = SIGKILL;
2732 sigdelset(¤t->pending.signal, SIGKILL);
2733 trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
2734 &sighand->action[SIGKILL - 1]);
2735 recalc_sigpending();
2736 goto fatal;
2737 }
2738
2739 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2740 do_signal_stop(0))
2741 goto relock;
2742
2743 if (unlikely(current->jobctl &
2744 (JOBCTL_TRAP_MASK | JOBCTL_TRAP_FREEZE))) {
2745 if (current->jobctl & JOBCTL_TRAP_MASK) {
2746 do_jobctl_trap();
2747 spin_unlock_irq(&sighand->siglock);
2748 } else if (current->jobctl & JOBCTL_TRAP_FREEZE)
2749 do_freezer_trap();
2750
2751 goto relock;
2752 }
2753
2754 /*
2755 * If the task is leaving the frozen state, let's update
2756 * cgroup counters and reset the frozen bit.
2757 */
2758 if (unlikely(cgroup_task_frozen(current))) {
2759 spin_unlock_irq(&sighand->siglock);
2760 cgroup_leave_frozen(false);
2761 goto relock;
2762 }
2763
2764 /*
2765 * Signals generated by the execution of an instruction
2766 * need to be delivered before any other pending signals
2767 * so that the instruction pointer in the signal stack
2768 * frame points to the faulting instruction.
2769 */
2770 type = PIDTYPE_PID;
2771 signr = dequeue_synchronous_signal(&ksig->info);
2772 if (!signr)
2773 signr = dequeue_signal(current, ¤t->blocked,
2774 &ksig->info, &type);
2775
2776 if (!signr)
2777 break; /* will return 0 */
2778
2779 if (unlikely(current->ptrace) && (signr != SIGKILL) &&
2780 !(sighand->action[signr -1].sa.sa_flags & SA_IMMUTABLE)) {
2781 signr = ptrace_signal(signr, &ksig->info, type);
2782 if (!signr)
2783 continue;
2784 }
2785
2786 ka = &sighand->action[signr-1];
2787
2788 /* Trace actually delivered signals. */
2789 trace_signal_deliver(signr, &ksig->info, ka);
2790
2791 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
2792 continue;
2793 if (ka->sa.sa_handler != SIG_DFL) {
2794 /* Run the handler. */
2795 ksig->ka = *ka;
2796
2797 if (ka->sa.sa_flags & SA_ONESHOT)
2798 ka->sa.sa_handler = SIG_DFL;
2799
2800 break; /* will return non-zero "signr" value */
2801 }
2802
2803 /*
2804 * Now we are doing the default action for this signal.
2805 */
2806 if (sig_kernel_ignore(signr)) /* Default is nothing. */
2807 continue;
2808
2809 /*
2810 * Global init gets no signals it doesn't want.
2811 * Container-init gets no signals it doesn't want from same
2812 * container.
2813 *
2814 * Note that if global/container-init sees a sig_kernel_only()
2815 * signal here, the signal must have been generated internally
2816 * or must have come from an ancestor namespace. In either
2817 * case, the signal cannot be dropped.
2818 */
2819 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2820 !sig_kernel_only(signr))
2821 continue;
2822
2823 if (sig_kernel_stop(signr)) {
2824 /*
2825 * The default action is to stop all threads in
2826 * the thread group. The job control signals
2827 * do nothing in an orphaned pgrp, but SIGSTOP
2828 * always works. Note that siglock needs to be
2829 * dropped during the call to is_orphaned_pgrp()
2830 * because of lock ordering with tasklist_lock.
2831 * This allows an intervening SIGCONT to be posted.
2832 * We need to check for that and bail out if necessary.
2833 */
2834 if (signr != SIGSTOP) {
2835 spin_unlock_irq(&sighand->siglock);
2836
2837 /* signals can be posted during this window */
2838
2839 if (is_current_pgrp_orphaned())
2840 goto relock;
2841
2842 spin_lock_irq(&sighand->siglock);
2843 }
2844
2845 if (likely(do_signal_stop(ksig->info.si_signo))) {
2846 /* It released the siglock. */
2847 goto relock;
2848 }
2849
2850 /*
2851 * We didn't actually stop, due to a race
2852 * with SIGCONT or something like that.
2853 */
2854 continue;
2855 }
2856
2857 fatal:
2858 spin_unlock_irq(&sighand->siglock);
2859 if (unlikely(cgroup_task_frozen(current)))
2860 cgroup_leave_frozen(true);
2861
2862 /*
2863 * Anything else is fatal, maybe with a core dump.
2864 */
2865 current->flags |= PF_SIGNALED;
2866
2867 if (sig_kernel_coredump(signr)) {
2868 if (print_fatal_signals)
2869 print_fatal_signal(ksig->info.si_signo);
2870 proc_coredump_connector(current);
2871 /*
2872 * If it was able to dump core, this kills all
2873 * other threads in the group and synchronizes with
2874 * their demise. If we lost the race with another
2875 * thread getting here, it set group_exit_code
2876 * first and our do_group_exit call below will use
2877 * that value and ignore the one we pass it.
2878 */
2879 do_coredump(&ksig->info);
2880 }
2881
2882 /*
2883 * PF_USER_WORKER threads will catch and exit on fatal signals
2884 * themselves. They have cleanup that must be performed, so
2885 * we cannot call do_exit() on their behalf.
2886 */
2887 if (current->flags & PF_USER_WORKER)
2888 goto out;
2889
2890 /*
2891 * Death signals, no core dump.
2892 */
2893 do_group_exit(ksig->info.si_signo);
2894 /* NOTREACHED */
2895 }
2896 spin_unlock_irq(&sighand->siglock);
2897out:
2898 ksig->sig = signr;
2899
2900 if (!(ksig->ka.sa.sa_flags & SA_EXPOSE_TAGBITS))
2901 hide_si_addr_tag_bits(ksig);
2902
2903 return ksig->sig > 0;
2904}
2905
2906/**
2907 * signal_delivered - called after signal delivery to update blocked signals
2908 * @ksig: kernel signal struct
2909 * @stepping: nonzero if debugger single-step or block-step in use
2910 *
2911 * This function should be called when a signal has successfully been
2912 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
2913 * is always blocked), and the signal itself is blocked unless %SA_NODEFER
2914 * is set in @ksig->ka.sa.sa_flags. Tracing is notified.
2915 */
2916static void signal_delivered(struct ksignal *ksig, int stepping)
2917{
2918 sigset_t blocked;
2919
2920 /* A signal was successfully delivered, and the
2921 saved sigmask was stored on the signal frame,
2922 and will be restored by sigreturn. So we can
2923 simply clear the restore sigmask flag. */
2924 clear_restore_sigmask();
2925
2926 sigorsets(&blocked, ¤t->blocked, &ksig->ka.sa.sa_mask);
2927 if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2928 sigaddset(&blocked, ksig->sig);
2929 set_current_blocked(&blocked);
2930 if (current->sas_ss_flags & SS_AUTODISARM)
2931 sas_ss_reset(current);
2932 if (stepping)
2933 ptrace_notify(SIGTRAP, 0);
2934}
2935
2936void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2937{
2938 if (failed)
2939 force_sigsegv(ksig->sig);
2940 else
2941 signal_delivered(ksig, stepping);
2942}
2943
2944/*
2945 * It could be that complete_signal() picked us to notify about the
2946 * group-wide signal. Other threads should be notified now to take
2947 * the shared signals in @which since we will not.
2948 */
2949static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2950{
2951 sigset_t retarget;
2952 struct task_struct *t;
2953
2954 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2955 if (sigisemptyset(&retarget))
2956 return;
2957
2958 for_other_threads(tsk, t) {
2959 if (t->flags & PF_EXITING)
2960 continue;
2961
2962 if (!has_pending_signals(&retarget, &t->blocked))
2963 continue;
2964 /* Remove the signals this thread can handle. */
2965 sigandsets(&retarget, &retarget, &t->blocked);
2966
2967 if (!task_sigpending(t))
2968 signal_wake_up(t, 0);
2969
2970 if (sigisemptyset(&retarget))
2971 break;
2972 }
2973}
2974
2975void exit_signals(struct task_struct *tsk)
2976{
2977 int group_stop = 0;
2978 sigset_t unblocked;
2979
2980 /*
2981 * @tsk is about to have PF_EXITING set - lock out users which
2982 * expect stable threadgroup.
2983 */
2984 cgroup_threadgroup_change_begin(tsk);
2985
2986 if (thread_group_empty(tsk) || (tsk->signal->flags & SIGNAL_GROUP_EXIT)) {
2987 sched_mm_cid_exit_signals(tsk);
2988 tsk->flags |= PF_EXITING;
2989 cgroup_threadgroup_change_end(tsk);
2990 return;
2991 }
2992
2993 spin_lock_irq(&tsk->sighand->siglock);
2994 /*
2995 * From now this task is not visible for group-wide signals,
2996 * see wants_signal(), do_signal_stop().
2997 */
2998 sched_mm_cid_exit_signals(tsk);
2999 tsk->flags |= PF_EXITING;
3000
3001 cgroup_threadgroup_change_end(tsk);
3002
3003 if (!task_sigpending(tsk))
3004 goto out;
3005
3006 unblocked = tsk->blocked;
3007 signotset(&unblocked);
3008 retarget_shared_pending(tsk, &unblocked);
3009
3010 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
3011 task_participate_group_stop(tsk))
3012 group_stop = CLD_STOPPED;
3013out:
3014 spin_unlock_irq(&tsk->sighand->siglock);
3015
3016 /*
3017 * If group stop has completed, deliver the notification. This
3018 * should always go to the real parent of the group leader.
3019 */
3020 if (unlikely(group_stop)) {
3021 read_lock(&tasklist_lock);
3022 do_notify_parent_cldstop(tsk, false, group_stop);
3023 read_unlock(&tasklist_lock);
3024 }
3025}
3026
3027/*
3028 * System call entry points.
3029 */
3030
3031/**
3032 * sys_restart_syscall - restart a system call
3033 */
3034SYSCALL_DEFINE0(restart_syscall)
3035{
3036 struct restart_block *restart = ¤t->restart_block;
3037 return restart->fn(restart);
3038}
3039
3040long do_no_restart_syscall(struct restart_block *param)
3041{
3042 return -EINTR;
3043}
3044
3045static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
3046{
3047 if (task_sigpending(tsk) && !thread_group_empty(tsk)) {
3048 sigset_t newblocked;
3049 /* A set of now blocked but previously unblocked signals. */
3050 sigandnsets(&newblocked, newset, ¤t->blocked);
3051 retarget_shared_pending(tsk, &newblocked);
3052 }
3053 tsk->blocked = *newset;
3054 recalc_sigpending();
3055}
3056
3057/**
3058 * set_current_blocked - change current->blocked mask
3059 * @newset: new mask
3060 *
3061 * It is wrong to change ->blocked directly, this helper should be used
3062 * to ensure the process can't miss a shared signal we are going to block.
3063 */
3064void set_current_blocked(sigset_t *newset)
3065{
3066 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
3067 __set_current_blocked(newset);
3068}
3069
3070void __set_current_blocked(const sigset_t *newset)
3071{
3072 struct task_struct *tsk = current;
3073
3074 /*
3075 * In case the signal mask hasn't changed, there is nothing we need
3076 * to do. The current->blocked shouldn't be modified by other task.
3077 */
3078 if (sigequalsets(&tsk->blocked, newset))
3079 return;
3080
3081 spin_lock_irq(&tsk->sighand->siglock);
3082 __set_task_blocked(tsk, newset);
3083 spin_unlock_irq(&tsk->sighand->siglock);
3084}
3085
3086/*
3087 * This is also useful for kernel threads that want to temporarily
3088 * (or permanently) block certain signals.
3089 *
3090 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
3091 * interface happily blocks "unblockable" signals like SIGKILL
3092 * and friends.
3093 */
3094int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
3095{
3096 struct task_struct *tsk = current;
3097 sigset_t newset;
3098
3099 /* Lockless, only current can change ->blocked, never from irq */
3100 if (oldset)
3101 *oldset = tsk->blocked;
3102
3103 switch (how) {
3104 case SIG_BLOCK:
3105 sigorsets(&newset, &tsk->blocked, set);
3106 break;
3107 case SIG_UNBLOCK:
3108 sigandnsets(&newset, &tsk->blocked, set);
3109 break;
3110 case SIG_SETMASK:
3111 newset = *set;
3112 break;
3113 default:
3114 return -EINVAL;
3115 }
3116
3117 __set_current_blocked(&newset);
3118 return 0;
3119}
3120EXPORT_SYMBOL(sigprocmask);
3121
3122/*
3123 * The api helps set app-provided sigmasks.
3124 *
3125 * This is useful for syscalls such as ppoll, pselect, io_pgetevents and
3126 * epoll_pwait where a new sigmask is passed from userland for the syscalls.
3127 *
3128 * Note that it does set_restore_sigmask() in advance, so it must be always
3129 * paired with restore_saved_sigmask_unless() before return from syscall.
3130 */
3131int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize)
3132{
3133 sigset_t kmask;
3134
3135 if (!umask)
3136 return 0;
3137 if (sigsetsize != sizeof(sigset_t))
3138 return -EINVAL;
3139 if (copy_from_user(&kmask, umask, sizeof(sigset_t)))
3140 return -EFAULT;
3141
3142 set_restore_sigmask();
3143 current->saved_sigmask = current->blocked;
3144 set_current_blocked(&kmask);
3145
3146 return 0;
3147}
3148
3149#ifdef CONFIG_COMPAT
3150int set_compat_user_sigmask(const compat_sigset_t __user *umask,
3151 size_t sigsetsize)
3152{
3153 sigset_t kmask;
3154
3155 if (!umask)
3156 return 0;
3157 if (sigsetsize != sizeof(compat_sigset_t))
3158 return -EINVAL;
3159 if (get_compat_sigset(&kmask, umask))
3160 return -EFAULT;
3161
3162 set_restore_sigmask();
3163 current->saved_sigmask = current->blocked;
3164 set_current_blocked(&kmask);
3165
3166 return 0;
3167}
3168#endif
3169
3170/**
3171 * sys_rt_sigprocmask - change the list of currently blocked signals
3172 * @how: whether to add, remove, or set signals
3173 * @nset: stores pending signals
3174 * @oset: previous value of signal mask if non-null
3175 * @sigsetsize: size of sigset_t type
3176 */
3177SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
3178 sigset_t __user *, oset, size_t, sigsetsize)
3179{
3180 sigset_t old_set, new_set;
3181 int error;
3182
3183 /* XXX: Don't preclude handling different sized sigset_t's. */
3184 if (sigsetsize != sizeof(sigset_t))
3185 return -EINVAL;
3186
3187 old_set = current->blocked;
3188
3189 if (nset) {
3190 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
3191 return -EFAULT;
3192 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3193
3194 error = sigprocmask(how, &new_set, NULL);
3195 if (error)
3196 return error;
3197 }
3198
3199 if (oset) {
3200 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
3201 return -EFAULT;
3202 }
3203
3204 return 0;
3205}
3206
3207#ifdef CONFIG_COMPAT
3208COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
3209 compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
3210{
3211 sigset_t old_set = current->blocked;
3212
3213 /* XXX: Don't preclude handling different sized sigset_t's. */
3214 if (sigsetsize != sizeof(sigset_t))
3215 return -EINVAL;
3216
3217 if (nset) {
3218 sigset_t new_set;
3219 int error;
3220 if (get_compat_sigset(&new_set, nset))
3221 return -EFAULT;
3222 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3223
3224 error = sigprocmask(how, &new_set, NULL);
3225 if (error)
3226 return error;
3227 }
3228 return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0;
3229}
3230#endif
3231
3232static void do_sigpending(sigset_t *set)
3233{
3234 spin_lock_irq(¤t->sighand->siglock);
3235 sigorsets(set, ¤t->pending.signal,
3236 ¤t->signal->shared_pending.signal);
3237 spin_unlock_irq(¤t->sighand->siglock);
3238
3239 /* Outside the lock because only this thread touches it. */
3240 sigandsets(set, ¤t->blocked, set);
3241}
3242
3243/**
3244 * sys_rt_sigpending - examine a pending signal that has been raised
3245 * while blocked
3246 * @uset: stores pending signals
3247 * @sigsetsize: size of sigset_t type or larger
3248 */
3249SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
3250{
3251 sigset_t set;
3252
3253 if (sigsetsize > sizeof(*uset))
3254 return -EINVAL;
3255
3256 do_sigpending(&set);
3257
3258 if (copy_to_user(uset, &set, sigsetsize))
3259 return -EFAULT;
3260
3261 return 0;
3262}
3263
3264#ifdef CONFIG_COMPAT
3265COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
3266 compat_size_t, sigsetsize)
3267{
3268 sigset_t set;
3269
3270 if (sigsetsize > sizeof(*uset))
3271 return -EINVAL;
3272
3273 do_sigpending(&set);
3274
3275 return put_compat_sigset(uset, &set, sigsetsize);
3276}
3277#endif
3278
3279static const struct {
3280 unsigned char limit, layout;
3281} sig_sicodes[] = {
3282 [SIGILL] = { NSIGILL, SIL_FAULT },
3283 [SIGFPE] = { NSIGFPE, SIL_FAULT },
3284 [SIGSEGV] = { NSIGSEGV, SIL_FAULT },
3285 [SIGBUS] = { NSIGBUS, SIL_FAULT },
3286 [SIGTRAP] = { NSIGTRAP, SIL_FAULT },
3287#if defined(SIGEMT)
3288 [SIGEMT] = { NSIGEMT, SIL_FAULT },
3289#endif
3290 [SIGCHLD] = { NSIGCHLD, SIL_CHLD },
3291 [SIGPOLL] = { NSIGPOLL, SIL_POLL },
3292 [SIGSYS] = { NSIGSYS, SIL_SYS },
3293};
3294
3295static bool known_siginfo_layout(unsigned sig, int si_code)
3296{
3297 if (si_code == SI_KERNEL)
3298 return true;
3299 else if ((si_code > SI_USER)) {
3300 if (sig_specific_sicodes(sig)) {
3301 if (si_code <= sig_sicodes[sig].limit)
3302 return true;
3303 }
3304 else if (si_code <= NSIGPOLL)
3305 return true;
3306 }
3307 else if (si_code >= SI_DETHREAD)
3308 return true;
3309 else if (si_code == SI_ASYNCNL)
3310 return true;
3311 return false;
3312}
3313
3314enum siginfo_layout siginfo_layout(unsigned sig, int si_code)
3315{
3316 enum siginfo_layout layout = SIL_KILL;
3317 if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
3318 if ((sig < ARRAY_SIZE(sig_sicodes)) &&
3319 (si_code <= sig_sicodes[sig].limit)) {
3320 layout = sig_sicodes[sig].layout;
3321 /* Handle the exceptions */
3322 if ((sig == SIGBUS) &&
3323 (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO))
3324 layout = SIL_FAULT_MCEERR;
3325 else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR))
3326 layout = SIL_FAULT_BNDERR;
3327#ifdef SEGV_PKUERR
3328 else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR))
3329 layout = SIL_FAULT_PKUERR;
3330#endif
3331 else if ((sig == SIGTRAP) && (si_code == TRAP_PERF))
3332 layout = SIL_FAULT_PERF_EVENT;
3333 else if (IS_ENABLED(CONFIG_SPARC) &&
3334 (sig == SIGILL) && (si_code == ILL_ILLTRP))
3335 layout = SIL_FAULT_TRAPNO;
3336 else if (IS_ENABLED(CONFIG_ALPHA) &&
3337 ((sig == SIGFPE) ||
3338 ((sig == SIGTRAP) && (si_code == TRAP_UNK))))
3339 layout = SIL_FAULT_TRAPNO;
3340 }
3341 else if (si_code <= NSIGPOLL)
3342 layout = SIL_POLL;
3343 } else {
3344 if (si_code == SI_TIMER)
3345 layout = SIL_TIMER;
3346 else if (si_code == SI_SIGIO)
3347 layout = SIL_POLL;
3348 else if (si_code < 0)
3349 layout = SIL_RT;
3350 }
3351 return layout;
3352}
3353
3354static inline char __user *si_expansion(const siginfo_t __user *info)
3355{
3356 return ((char __user *)info) + sizeof(struct kernel_siginfo);
3357}
3358
3359int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from)
3360{
3361 char __user *expansion = si_expansion(to);
3362 if (copy_to_user(to, from , sizeof(struct kernel_siginfo)))
3363 return -EFAULT;
3364 if (clear_user(expansion, SI_EXPANSION_SIZE))
3365 return -EFAULT;
3366 return 0;
3367}
3368
3369static int post_copy_siginfo_from_user(kernel_siginfo_t *info,
3370 const siginfo_t __user *from)
3371{
3372 if (unlikely(!known_siginfo_layout(info->si_signo, info->si_code))) {
3373 char __user *expansion = si_expansion(from);
3374 char buf[SI_EXPANSION_SIZE];
3375 int i;
3376 /*
3377 * An unknown si_code might need more than
3378 * sizeof(struct kernel_siginfo) bytes. Verify all of the
3379 * extra bytes are 0. This guarantees copy_siginfo_to_user
3380 * will return this data to userspace exactly.
3381 */
3382 if (copy_from_user(&buf, expansion, SI_EXPANSION_SIZE))
3383 return -EFAULT;
3384 for (i = 0; i < SI_EXPANSION_SIZE; i++) {
3385 if (buf[i] != 0)
3386 return -E2BIG;
3387 }
3388 }
3389 return 0;
3390}
3391
3392static int __copy_siginfo_from_user(int signo, kernel_siginfo_t *to,
3393 const siginfo_t __user *from)
3394{
3395 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3396 return -EFAULT;
3397 to->si_signo = signo;
3398 return post_copy_siginfo_from_user(to, from);
3399}
3400
3401int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from)
3402{
3403 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3404 return -EFAULT;
3405 return post_copy_siginfo_from_user(to, from);
3406}
3407
3408#ifdef CONFIG_COMPAT
3409/**
3410 * copy_siginfo_to_external32 - copy a kernel siginfo into a compat user siginfo
3411 * @to: compat siginfo destination
3412 * @from: kernel siginfo source
3413 *
3414 * Note: This function does not work properly for the SIGCHLD on x32, but
3415 * fortunately it doesn't have to. The only valid callers for this function are
3416 * copy_siginfo_to_user32, which is overriden for x32 and the coredump code.
3417 * The latter does not care because SIGCHLD will never cause a coredump.
3418 */
3419void copy_siginfo_to_external32(struct compat_siginfo *to,
3420 const struct kernel_siginfo *from)
3421{
3422 memset(to, 0, sizeof(*to));
3423
3424 to->si_signo = from->si_signo;
3425 to->si_errno = from->si_errno;
3426 to->si_code = from->si_code;
3427 switch(siginfo_layout(from->si_signo, from->si_code)) {
3428 case SIL_KILL:
3429 to->si_pid = from->si_pid;
3430 to->si_uid = from->si_uid;
3431 break;
3432 case SIL_TIMER:
3433 to->si_tid = from->si_tid;
3434 to->si_overrun = from->si_overrun;
3435 to->si_int = from->si_int;
3436 break;
3437 case SIL_POLL:
3438 to->si_band = from->si_band;
3439 to->si_fd = from->si_fd;
3440 break;
3441 case SIL_FAULT:
3442 to->si_addr = ptr_to_compat(from->si_addr);
3443 break;
3444 case SIL_FAULT_TRAPNO:
3445 to->si_addr = ptr_to_compat(from->si_addr);
3446 to->si_trapno = from->si_trapno;
3447 break;
3448 case SIL_FAULT_MCEERR:
3449 to->si_addr = ptr_to_compat(from->si_addr);
3450 to->si_addr_lsb = from->si_addr_lsb;
3451 break;
3452 case SIL_FAULT_BNDERR:
3453 to->si_addr = ptr_to_compat(from->si_addr);
3454 to->si_lower = ptr_to_compat(from->si_lower);
3455 to->si_upper = ptr_to_compat(from->si_upper);
3456 break;
3457 case SIL_FAULT_PKUERR:
3458 to->si_addr = ptr_to_compat(from->si_addr);
3459 to->si_pkey = from->si_pkey;
3460 break;
3461 case SIL_FAULT_PERF_EVENT:
3462 to->si_addr = ptr_to_compat(from->si_addr);
3463 to->si_perf_data = from->si_perf_data;
3464 to->si_perf_type = from->si_perf_type;
3465 to->si_perf_flags = from->si_perf_flags;
3466 break;
3467 case SIL_CHLD:
3468 to->si_pid = from->si_pid;
3469 to->si_uid = from->si_uid;
3470 to->si_status = from->si_status;
3471 to->si_utime = from->si_utime;
3472 to->si_stime = from->si_stime;
3473 break;
3474 case SIL_RT:
3475 to->si_pid = from->si_pid;
3476 to->si_uid = from->si_uid;
3477 to->si_int = from->si_int;
3478 break;
3479 case SIL_SYS:
3480 to->si_call_addr = ptr_to_compat(from->si_call_addr);
3481 to->si_syscall = from->si_syscall;
3482 to->si_arch = from->si_arch;
3483 break;
3484 }
3485}
3486
3487int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
3488 const struct kernel_siginfo *from)
3489{
3490 struct compat_siginfo new;
3491
3492 copy_siginfo_to_external32(&new, from);
3493 if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
3494 return -EFAULT;
3495 return 0;
3496}
3497
3498static int post_copy_siginfo_from_user32(kernel_siginfo_t *to,
3499 const struct compat_siginfo *from)
3500{
3501 clear_siginfo(to);
3502 to->si_signo = from->si_signo;
3503 to->si_errno = from->si_errno;
3504 to->si_code = from->si_code;
3505 switch(siginfo_layout(from->si_signo, from->si_code)) {
3506 case SIL_KILL:
3507 to->si_pid = from->si_pid;
3508 to->si_uid = from->si_uid;
3509 break;
3510 case SIL_TIMER:
3511 to->si_tid = from->si_tid;
3512 to->si_overrun = from->si_overrun;
3513 to->si_int = from->si_int;
3514 break;
3515 case SIL_POLL:
3516 to->si_band = from->si_band;
3517 to->si_fd = from->si_fd;
3518 break;
3519 case SIL_FAULT:
3520 to->si_addr = compat_ptr(from->si_addr);
3521 break;
3522 case SIL_FAULT_TRAPNO:
3523 to->si_addr = compat_ptr(from->si_addr);
3524 to->si_trapno = from->si_trapno;
3525 break;
3526 case SIL_FAULT_MCEERR:
3527 to->si_addr = compat_ptr(from->si_addr);
3528 to->si_addr_lsb = from->si_addr_lsb;
3529 break;
3530 case SIL_FAULT_BNDERR:
3531 to->si_addr = compat_ptr(from->si_addr);
3532 to->si_lower = compat_ptr(from->si_lower);
3533 to->si_upper = compat_ptr(from->si_upper);
3534 break;
3535 case SIL_FAULT_PKUERR:
3536 to->si_addr = compat_ptr(from->si_addr);
3537 to->si_pkey = from->si_pkey;
3538 break;
3539 case SIL_FAULT_PERF_EVENT:
3540 to->si_addr = compat_ptr(from->si_addr);
3541 to->si_perf_data = from->si_perf_data;
3542 to->si_perf_type = from->si_perf_type;
3543 to->si_perf_flags = from->si_perf_flags;
3544 break;
3545 case SIL_CHLD:
3546 to->si_pid = from->si_pid;
3547 to->si_uid = from->si_uid;
3548 to->si_status = from->si_status;
3549#ifdef CONFIG_X86_X32_ABI
3550 if (in_x32_syscall()) {
3551 to->si_utime = from->_sifields._sigchld_x32._utime;
3552 to->si_stime = from->_sifields._sigchld_x32._stime;
3553 } else
3554#endif
3555 {
3556 to->si_utime = from->si_utime;
3557 to->si_stime = from->si_stime;
3558 }
3559 break;
3560 case SIL_RT:
3561 to->si_pid = from->si_pid;
3562 to->si_uid = from->si_uid;
3563 to->si_int = from->si_int;
3564 break;
3565 case SIL_SYS:
3566 to->si_call_addr = compat_ptr(from->si_call_addr);
3567 to->si_syscall = from->si_syscall;
3568 to->si_arch = from->si_arch;
3569 break;
3570 }
3571 return 0;
3572}
3573
3574static int __copy_siginfo_from_user32(int signo, struct kernel_siginfo *to,
3575 const struct compat_siginfo __user *ufrom)
3576{
3577 struct compat_siginfo from;
3578
3579 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3580 return -EFAULT;
3581
3582 from.si_signo = signo;
3583 return post_copy_siginfo_from_user32(to, &from);
3584}
3585
3586int copy_siginfo_from_user32(struct kernel_siginfo *to,
3587 const struct compat_siginfo __user *ufrom)
3588{
3589 struct compat_siginfo from;
3590
3591 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3592 return -EFAULT;
3593
3594 return post_copy_siginfo_from_user32(to, &from);
3595}
3596#endif /* CONFIG_COMPAT */
3597
3598/**
3599 * do_sigtimedwait - wait for queued signals specified in @which
3600 * @which: queued signals to wait for
3601 * @info: if non-null, the signal's siginfo is returned here
3602 * @ts: upper bound on process time suspension
3603 */
3604static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
3605 const struct timespec64 *ts)
3606{
3607 ktime_t *to = NULL, timeout = KTIME_MAX;
3608 struct task_struct *tsk = current;
3609 sigset_t mask = *which;
3610 enum pid_type type;
3611 int sig, ret = 0;
3612
3613 if (ts) {
3614 if (!timespec64_valid(ts))
3615 return -EINVAL;
3616 timeout = timespec64_to_ktime(*ts);
3617 to = &timeout;
3618 }
3619
3620 /*
3621 * Invert the set of allowed signals to get those we want to block.
3622 */
3623 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
3624 signotset(&mask);
3625
3626 spin_lock_irq(&tsk->sighand->siglock);
3627 sig = dequeue_signal(tsk, &mask, info, &type);
3628 if (!sig && timeout) {
3629 /*
3630 * None ready, temporarily unblock those we're interested
3631 * while we are sleeping in so that we'll be awakened when
3632 * they arrive. Unblocking is always fine, we can avoid
3633 * set_current_blocked().
3634 */
3635 tsk->real_blocked = tsk->blocked;
3636 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
3637 recalc_sigpending();
3638 spin_unlock_irq(&tsk->sighand->siglock);
3639
3640 __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
3641 ret = schedule_hrtimeout_range(to, tsk->timer_slack_ns,
3642 HRTIMER_MODE_REL);
3643 spin_lock_irq(&tsk->sighand->siglock);
3644 __set_task_blocked(tsk, &tsk->real_blocked);
3645 sigemptyset(&tsk->real_blocked);
3646 sig = dequeue_signal(tsk, &mask, info, &type);
3647 }
3648 spin_unlock_irq(&tsk->sighand->siglock);
3649
3650 if (sig)
3651 return sig;
3652 return ret ? -EINTR : -EAGAIN;
3653}
3654
3655/**
3656 * sys_rt_sigtimedwait - synchronously wait for queued signals specified
3657 * in @uthese
3658 * @uthese: queued signals to wait for
3659 * @uinfo: if non-null, the signal's siginfo is returned here
3660 * @uts: upper bound on process time suspension
3661 * @sigsetsize: size of sigset_t type
3662 */
3663SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
3664 siginfo_t __user *, uinfo,
3665 const struct __kernel_timespec __user *, uts,
3666 size_t, sigsetsize)
3667{
3668 sigset_t these;
3669 struct timespec64 ts;
3670 kernel_siginfo_t info;
3671 int ret;
3672
3673 /* XXX: Don't preclude handling different sized sigset_t's. */
3674 if (sigsetsize != sizeof(sigset_t))
3675 return -EINVAL;
3676
3677 if (copy_from_user(&these, uthese, sizeof(these)))
3678 return -EFAULT;
3679
3680 if (uts) {
3681 if (get_timespec64(&ts, uts))
3682 return -EFAULT;
3683 }
3684
3685 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3686
3687 if (ret > 0 && uinfo) {
3688 if (copy_siginfo_to_user(uinfo, &info))
3689 ret = -EFAULT;
3690 }
3691
3692 return ret;
3693}
3694
3695#ifdef CONFIG_COMPAT_32BIT_TIME
3696SYSCALL_DEFINE4(rt_sigtimedwait_time32, const sigset_t __user *, uthese,
3697 siginfo_t __user *, uinfo,
3698 const struct old_timespec32 __user *, uts,
3699 size_t, sigsetsize)
3700{
3701 sigset_t these;
3702 struct timespec64 ts;
3703 kernel_siginfo_t info;
3704 int ret;
3705
3706 if (sigsetsize != sizeof(sigset_t))
3707 return -EINVAL;
3708
3709 if (copy_from_user(&these, uthese, sizeof(these)))
3710 return -EFAULT;
3711
3712 if (uts) {
3713 if (get_old_timespec32(&ts, uts))
3714 return -EFAULT;
3715 }
3716
3717 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3718
3719 if (ret > 0 && uinfo) {
3720 if (copy_siginfo_to_user(uinfo, &info))
3721 ret = -EFAULT;
3722 }
3723
3724 return ret;
3725}
3726#endif
3727
3728#ifdef CONFIG_COMPAT
3729COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64, compat_sigset_t __user *, uthese,
3730 struct compat_siginfo __user *, uinfo,
3731 struct __kernel_timespec __user *, uts, compat_size_t, sigsetsize)
3732{
3733 sigset_t s;
3734 struct timespec64 t;
3735 kernel_siginfo_t info;
3736 long ret;
3737
3738 if (sigsetsize != sizeof(sigset_t))
3739 return -EINVAL;
3740
3741 if (get_compat_sigset(&s, uthese))
3742 return -EFAULT;
3743
3744 if (uts) {
3745 if (get_timespec64(&t, uts))
3746 return -EFAULT;
3747 }
3748
3749 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3750
3751 if (ret > 0 && uinfo) {
3752 if (copy_siginfo_to_user32(uinfo, &info))
3753 ret = -EFAULT;
3754 }
3755
3756 return ret;
3757}
3758
3759#ifdef CONFIG_COMPAT_32BIT_TIME
3760COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32, compat_sigset_t __user *, uthese,
3761 struct compat_siginfo __user *, uinfo,
3762 struct old_timespec32 __user *, uts, compat_size_t, sigsetsize)
3763{
3764 sigset_t s;
3765 struct timespec64 t;
3766 kernel_siginfo_t info;
3767 long ret;
3768
3769 if (sigsetsize != sizeof(sigset_t))
3770 return -EINVAL;
3771
3772 if (get_compat_sigset(&s, uthese))
3773 return -EFAULT;
3774
3775 if (uts) {
3776 if (get_old_timespec32(&t, uts))
3777 return -EFAULT;
3778 }
3779
3780 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3781
3782 if (ret > 0 && uinfo) {
3783 if (copy_siginfo_to_user32(uinfo, &info))
3784 ret = -EFAULT;
3785 }
3786
3787 return ret;
3788}
3789#endif
3790#endif
3791
3792static inline void prepare_kill_siginfo(int sig, struct kernel_siginfo *info)
3793{
3794 clear_siginfo(info);
3795 info->si_signo = sig;
3796 info->si_errno = 0;
3797 info->si_code = SI_USER;
3798 info->si_pid = task_tgid_vnr(current);
3799 info->si_uid = from_kuid_munged(current_user_ns(), current_uid());
3800}
3801
3802/**
3803 * sys_kill - send a signal to a process
3804 * @pid: the PID of the process
3805 * @sig: signal to be sent
3806 */
3807SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
3808{
3809 struct kernel_siginfo info;
3810
3811 prepare_kill_siginfo(sig, &info);
3812
3813 return kill_something_info(sig, &info, pid);
3814}
3815
3816/*
3817 * Verify that the signaler and signalee either are in the same pid namespace
3818 * or that the signaler's pid namespace is an ancestor of the signalee's pid
3819 * namespace.
3820 */
3821static bool access_pidfd_pidns(struct pid *pid)
3822{
3823 struct pid_namespace *active = task_active_pid_ns(current);
3824 struct pid_namespace *p = ns_of_pid(pid);
3825
3826 for (;;) {
3827 if (!p)
3828 return false;
3829 if (p == active)
3830 break;
3831 p = p->parent;
3832 }
3833
3834 return true;
3835}
3836
3837static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo,
3838 siginfo_t __user *info)
3839{
3840#ifdef CONFIG_COMPAT
3841 /*
3842 * Avoid hooking up compat syscalls and instead handle necessary
3843 * conversions here. Note, this is a stop-gap measure and should not be
3844 * considered a generic solution.
3845 */
3846 if (in_compat_syscall())
3847 return copy_siginfo_from_user32(
3848 kinfo, (struct compat_siginfo __user *)info);
3849#endif
3850 return copy_siginfo_from_user(kinfo, info);
3851}
3852
3853static struct pid *pidfd_to_pid(const struct file *file)
3854{
3855 struct pid *pid;
3856
3857 pid = pidfd_pid(file);
3858 if (!IS_ERR(pid))
3859 return pid;
3860
3861 return tgid_pidfd_to_pid(file);
3862}
3863
3864/**
3865 * sys_pidfd_send_signal - Signal a process through a pidfd
3866 * @pidfd: file descriptor of the process
3867 * @sig: signal to send
3868 * @info: signal info
3869 * @flags: future flags
3870 *
3871 * The syscall currently only signals via PIDTYPE_PID which covers
3872 * kill(<positive-pid>, <signal>. It does not signal threads or process
3873 * groups.
3874 * In order to extend the syscall to threads and process groups the @flags
3875 * argument should be used. In essence, the @flags argument will determine
3876 * what is signaled and not the file descriptor itself. Put in other words,
3877 * grouping is a property of the flags argument not a property of the file
3878 * descriptor.
3879 *
3880 * Return: 0 on success, negative errno on failure
3881 */
3882SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
3883 siginfo_t __user *, info, unsigned int, flags)
3884{
3885 int ret;
3886 struct fd f;
3887 struct pid *pid;
3888 kernel_siginfo_t kinfo;
3889
3890 /* Enforce flags be set to 0 until we add an extension. */
3891 if (flags)
3892 return -EINVAL;
3893
3894 f = fdget(pidfd);
3895 if (!f.file)
3896 return -EBADF;
3897
3898 /* Is this a pidfd? */
3899 pid = pidfd_to_pid(f.file);
3900 if (IS_ERR(pid)) {
3901 ret = PTR_ERR(pid);
3902 goto err;
3903 }
3904
3905 ret = -EINVAL;
3906 if (!access_pidfd_pidns(pid))
3907 goto err;
3908
3909 if (info) {
3910 ret = copy_siginfo_from_user_any(&kinfo, info);
3911 if (unlikely(ret))
3912 goto err;
3913
3914 ret = -EINVAL;
3915 if (unlikely(sig != kinfo.si_signo))
3916 goto err;
3917
3918 /* Only allow sending arbitrary signals to yourself. */
3919 ret = -EPERM;
3920 if ((task_pid(current) != pid) &&
3921 (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL))
3922 goto err;
3923 } else {
3924 prepare_kill_siginfo(sig, &kinfo);
3925 }
3926
3927 ret = kill_pid_info(sig, &kinfo, pid);
3928
3929err:
3930 fdput(f);
3931 return ret;
3932}
3933
3934static int
3935do_send_specific(pid_t tgid, pid_t pid, int sig, struct kernel_siginfo *info)
3936{
3937 struct task_struct *p;
3938 int error = -ESRCH;
3939
3940 rcu_read_lock();
3941 p = find_task_by_vpid(pid);
3942 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
3943 error = check_kill_permission(sig, info, p);
3944 /*
3945 * The null signal is a permissions and process existence
3946 * probe. No signal is actually delivered.
3947 */
3948 if (!error && sig) {
3949 error = do_send_sig_info(sig, info, p, PIDTYPE_PID);
3950 /*
3951 * If lock_task_sighand() failed we pretend the task
3952 * dies after receiving the signal. The window is tiny,
3953 * and the signal is private anyway.
3954 */
3955 if (unlikely(error == -ESRCH))
3956 error = 0;
3957 }
3958 }
3959 rcu_read_unlock();
3960
3961 return error;
3962}
3963
3964static int do_tkill(pid_t tgid, pid_t pid, int sig)
3965{
3966 struct kernel_siginfo info;
3967
3968 clear_siginfo(&info);
3969 info.si_signo = sig;
3970 info.si_errno = 0;
3971 info.si_code = SI_TKILL;
3972 info.si_pid = task_tgid_vnr(current);
3973 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
3974
3975 return do_send_specific(tgid, pid, sig, &info);
3976}
3977
3978/**
3979 * sys_tgkill - send signal to one specific thread
3980 * @tgid: the thread group ID of the thread
3981 * @pid: the PID of the thread
3982 * @sig: signal to be sent
3983 *
3984 * This syscall also checks the @tgid and returns -ESRCH even if the PID
3985 * exists but it's not belonging to the target process anymore. This
3986 * method solves the problem of threads exiting and PIDs getting reused.
3987 */
3988SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
3989{
3990 /* This is only valid for single tasks */
3991 if (pid <= 0 || tgid <= 0)
3992 return -EINVAL;
3993
3994 return do_tkill(tgid, pid, sig);
3995}
3996
3997/**
3998 * sys_tkill - send signal to one specific task
3999 * @pid: the PID of the task
4000 * @sig: signal to be sent
4001 *
4002 * Send a signal to only one task, even if it's a CLONE_THREAD task.
4003 */
4004SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
4005{
4006 /* This is only valid for single tasks */
4007 if (pid <= 0)
4008 return -EINVAL;
4009
4010 return do_tkill(0, pid, sig);
4011}
4012
4013static int do_rt_sigqueueinfo(pid_t pid, int sig, kernel_siginfo_t *info)
4014{
4015 /* Not even root can pretend to send signals from the kernel.
4016 * Nor can they impersonate a kill()/tgkill(), which adds source info.
4017 */
4018 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
4019 (task_pid_vnr(current) != pid))
4020 return -EPERM;
4021
4022 /* POSIX.1b doesn't mention process groups. */
4023 return kill_proc_info(sig, info, pid);
4024}
4025
4026/**
4027 * sys_rt_sigqueueinfo - send signal information to a signal
4028 * @pid: the PID of the thread
4029 * @sig: signal to be sent
4030 * @uinfo: signal info to be sent
4031 */
4032SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
4033 siginfo_t __user *, uinfo)
4034{
4035 kernel_siginfo_t info;
4036 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
4037 if (unlikely(ret))
4038 return ret;
4039 return do_rt_sigqueueinfo(pid, sig, &info);
4040}
4041
4042#ifdef CONFIG_COMPAT
4043COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
4044 compat_pid_t, pid,
4045 int, sig,
4046 struct compat_siginfo __user *, uinfo)
4047{
4048 kernel_siginfo_t info;
4049 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
4050 if (unlikely(ret))
4051 return ret;
4052 return do_rt_sigqueueinfo(pid, sig, &info);
4053}
4054#endif
4055
4056static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, kernel_siginfo_t *info)
4057{
4058 /* This is only valid for single tasks */
4059 if (pid <= 0 || tgid <= 0)
4060 return -EINVAL;
4061
4062 /* Not even root can pretend to send signals from the kernel.
4063 * Nor can they impersonate a kill()/tgkill(), which adds source info.
4064 */
4065 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
4066 (task_pid_vnr(current) != pid))
4067 return -EPERM;
4068
4069 return do_send_specific(tgid, pid, sig, info);
4070}
4071
4072SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
4073 siginfo_t __user *, uinfo)
4074{
4075 kernel_siginfo_t info;
4076 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
4077 if (unlikely(ret))
4078 return ret;
4079 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
4080}
4081
4082#ifdef CONFIG_COMPAT
4083COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
4084 compat_pid_t, tgid,
4085 compat_pid_t, pid,
4086 int, sig,
4087 struct compat_siginfo __user *, uinfo)
4088{
4089 kernel_siginfo_t info;
4090 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
4091 if (unlikely(ret))
4092 return ret;
4093 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
4094}
4095#endif
4096
4097/*
4098 * For kthreads only, must not be used if cloned with CLONE_SIGHAND
4099 */
4100void kernel_sigaction(int sig, __sighandler_t action)
4101{
4102 spin_lock_irq(¤t->sighand->siglock);
4103 current->sighand->action[sig - 1].sa.sa_handler = action;
4104 if (action == SIG_IGN) {
4105 sigset_t mask;
4106
4107 sigemptyset(&mask);
4108 sigaddset(&mask, sig);
4109
4110 flush_sigqueue_mask(&mask, ¤t->signal->shared_pending);
4111 flush_sigqueue_mask(&mask, ¤t->pending);
4112 recalc_sigpending();
4113 }
4114 spin_unlock_irq(¤t->sighand->siglock);
4115}
4116EXPORT_SYMBOL(kernel_sigaction);
4117
4118void __weak sigaction_compat_abi(struct k_sigaction *act,
4119 struct k_sigaction *oact)
4120{
4121}
4122
4123int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
4124{
4125 struct task_struct *p = current, *t;
4126 struct k_sigaction *k;
4127 sigset_t mask;
4128
4129 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
4130 return -EINVAL;
4131
4132 k = &p->sighand->action[sig-1];
4133
4134 spin_lock_irq(&p->sighand->siglock);
4135 if (k->sa.sa_flags & SA_IMMUTABLE) {
4136 spin_unlock_irq(&p->sighand->siglock);
4137 return -EINVAL;
4138 }
4139 if (oact)
4140 *oact = *k;
4141
4142 /*
4143 * Make sure that we never accidentally claim to support SA_UNSUPPORTED,
4144 * e.g. by having an architecture use the bit in their uapi.
4145 */
4146 BUILD_BUG_ON(UAPI_SA_FLAGS & SA_UNSUPPORTED);
4147
4148 /*
4149 * Clear unknown flag bits in order to allow userspace to detect missing
4150 * support for flag bits and to allow the kernel to use non-uapi bits
4151 * internally.
4152 */
4153 if (act)
4154 act->sa.sa_flags &= UAPI_SA_FLAGS;
4155 if (oact)
4156 oact->sa.sa_flags &= UAPI_SA_FLAGS;
4157
4158 sigaction_compat_abi(act, oact);
4159
4160 if (act) {
4161 sigdelsetmask(&act->sa.sa_mask,
4162 sigmask(SIGKILL) | sigmask(SIGSTOP));
4163 *k = *act;
4164 /*
4165 * POSIX 3.3.1.3:
4166 * "Setting a signal action to SIG_IGN for a signal that is
4167 * pending shall cause the pending signal to be discarded,
4168 * whether or not it is blocked."
4169 *
4170 * "Setting a signal action to SIG_DFL for a signal that is
4171 * pending and whose default action is to ignore the signal
4172 * (for example, SIGCHLD), shall cause the pending signal to
4173 * be discarded, whether or not it is blocked"
4174 */
4175 if (sig_handler_ignored(sig_handler(p, sig), sig)) {
4176 sigemptyset(&mask);
4177 sigaddset(&mask, sig);
4178 flush_sigqueue_mask(&mask, &p->signal->shared_pending);
4179 for_each_thread(p, t)
4180 flush_sigqueue_mask(&mask, &t->pending);
4181 }
4182 }
4183
4184 spin_unlock_irq(&p->sighand->siglock);
4185 return 0;
4186}
4187
4188#ifdef CONFIG_DYNAMIC_SIGFRAME
4189static inline void sigaltstack_lock(void)
4190 __acquires(¤t->sighand->siglock)
4191{
4192 spin_lock_irq(¤t->sighand->siglock);
4193}
4194
4195static inline void sigaltstack_unlock(void)
4196 __releases(¤t->sighand->siglock)
4197{
4198 spin_unlock_irq(¤t->sighand->siglock);
4199}
4200#else
4201static inline void sigaltstack_lock(void) { }
4202static inline void sigaltstack_unlock(void) { }
4203#endif
4204
4205static int
4206do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp,
4207 size_t min_ss_size)
4208{
4209 struct task_struct *t = current;
4210 int ret = 0;
4211
4212 if (oss) {
4213 memset(oss, 0, sizeof(stack_t));
4214 oss->ss_sp = (void __user *) t->sas_ss_sp;
4215 oss->ss_size = t->sas_ss_size;
4216 oss->ss_flags = sas_ss_flags(sp) |
4217 (current->sas_ss_flags & SS_FLAG_BITS);
4218 }
4219
4220 if (ss) {
4221 void __user *ss_sp = ss->ss_sp;
4222 size_t ss_size = ss->ss_size;
4223 unsigned ss_flags = ss->ss_flags;
4224 int ss_mode;
4225
4226 if (unlikely(on_sig_stack(sp)))
4227 return -EPERM;
4228
4229 ss_mode = ss_flags & ~SS_FLAG_BITS;
4230 if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
4231 ss_mode != 0))
4232 return -EINVAL;
4233
4234 /*
4235 * Return before taking any locks if no actual
4236 * sigaltstack changes were requested.
4237 */
4238 if (t->sas_ss_sp == (unsigned long)ss_sp &&
4239 t->sas_ss_size == ss_size &&
4240 t->sas_ss_flags == ss_flags)
4241 return 0;
4242
4243 sigaltstack_lock();
4244 if (ss_mode == SS_DISABLE) {
4245 ss_size = 0;
4246 ss_sp = NULL;
4247 } else {
4248 if (unlikely(ss_size < min_ss_size))
4249 ret = -ENOMEM;
4250 if (!sigaltstack_size_valid(ss_size))
4251 ret = -ENOMEM;
4252 }
4253 if (!ret) {
4254 t->sas_ss_sp = (unsigned long) ss_sp;
4255 t->sas_ss_size = ss_size;
4256 t->sas_ss_flags = ss_flags;
4257 }
4258 sigaltstack_unlock();
4259 }
4260 return ret;
4261}
4262
4263SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
4264{
4265 stack_t new, old;
4266 int err;
4267 if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
4268 return -EFAULT;
4269 err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
4270 current_user_stack_pointer(),
4271 MINSIGSTKSZ);
4272 if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
4273 err = -EFAULT;
4274 return err;
4275}
4276
4277int restore_altstack(const stack_t __user *uss)
4278{
4279 stack_t new;
4280 if (copy_from_user(&new, uss, sizeof(stack_t)))
4281 return -EFAULT;
4282 (void)do_sigaltstack(&new, NULL, current_user_stack_pointer(),
4283 MINSIGSTKSZ);
4284 /* squash all but EFAULT for now */
4285 return 0;
4286}
4287
4288int __save_altstack(stack_t __user *uss, unsigned long sp)
4289{
4290 struct task_struct *t = current;
4291 int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
4292 __put_user(t->sas_ss_flags, &uss->ss_flags) |
4293 __put_user(t->sas_ss_size, &uss->ss_size);
4294 return err;
4295}
4296
4297#ifdef CONFIG_COMPAT
4298static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr,
4299 compat_stack_t __user *uoss_ptr)
4300{
4301 stack_t uss, uoss;
4302 int ret;
4303
4304 if (uss_ptr) {
4305 compat_stack_t uss32;
4306 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
4307 return -EFAULT;
4308 uss.ss_sp = compat_ptr(uss32.ss_sp);
4309 uss.ss_flags = uss32.ss_flags;
4310 uss.ss_size = uss32.ss_size;
4311 }
4312 ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
4313 compat_user_stack_pointer(),
4314 COMPAT_MINSIGSTKSZ);
4315 if (ret >= 0 && uoss_ptr) {
4316 compat_stack_t old;
4317 memset(&old, 0, sizeof(old));
4318 old.ss_sp = ptr_to_compat(uoss.ss_sp);
4319 old.ss_flags = uoss.ss_flags;
4320 old.ss_size = uoss.ss_size;
4321 if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t)))
4322 ret = -EFAULT;
4323 }
4324 return ret;
4325}
4326
4327COMPAT_SYSCALL_DEFINE2(sigaltstack,
4328 const compat_stack_t __user *, uss_ptr,
4329 compat_stack_t __user *, uoss_ptr)
4330{
4331 return do_compat_sigaltstack(uss_ptr, uoss_ptr);
4332}
4333
4334int compat_restore_altstack(const compat_stack_t __user *uss)
4335{
4336 int err = do_compat_sigaltstack(uss, NULL);
4337 /* squash all but -EFAULT for now */
4338 return err == -EFAULT ? err : 0;
4339}
4340
4341int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
4342{
4343 int err;
4344 struct task_struct *t = current;
4345 err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
4346 &uss->ss_sp) |
4347 __put_user(t->sas_ss_flags, &uss->ss_flags) |
4348 __put_user(t->sas_ss_size, &uss->ss_size);
4349 return err;
4350}
4351#endif
4352
4353#ifdef __ARCH_WANT_SYS_SIGPENDING
4354
4355/**
4356 * sys_sigpending - examine pending signals
4357 * @uset: where mask of pending signal is returned
4358 */
4359SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset)
4360{
4361 sigset_t set;
4362
4363 if (sizeof(old_sigset_t) > sizeof(*uset))
4364 return -EINVAL;
4365
4366 do_sigpending(&set);
4367
4368 if (copy_to_user(uset, &set, sizeof(old_sigset_t)))
4369 return -EFAULT;
4370
4371 return 0;
4372}
4373
4374#ifdef CONFIG_COMPAT
4375COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
4376{
4377 sigset_t set;
4378
4379 do_sigpending(&set);
4380
4381 return put_user(set.sig[0], set32);
4382}
4383#endif
4384
4385#endif
4386
4387#ifdef __ARCH_WANT_SYS_SIGPROCMASK
4388/**
4389 * sys_sigprocmask - examine and change blocked signals
4390 * @how: whether to add, remove, or set signals
4391 * @nset: signals to add or remove (if non-null)
4392 * @oset: previous value of signal mask if non-null
4393 *
4394 * Some platforms have their own version with special arguments;
4395 * others support only sys_rt_sigprocmask.
4396 */
4397
4398SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
4399 old_sigset_t __user *, oset)
4400{
4401 old_sigset_t old_set, new_set;
4402 sigset_t new_blocked;
4403
4404 old_set = current->blocked.sig[0];
4405
4406 if (nset) {
4407 if (copy_from_user(&new_set, nset, sizeof(*nset)))
4408 return -EFAULT;
4409
4410 new_blocked = current->blocked;
4411
4412 switch (how) {
4413 case SIG_BLOCK:
4414 sigaddsetmask(&new_blocked, new_set);
4415 break;
4416 case SIG_UNBLOCK:
4417 sigdelsetmask(&new_blocked, new_set);
4418 break;
4419 case SIG_SETMASK:
4420 new_blocked.sig[0] = new_set;
4421 break;
4422 default:
4423 return -EINVAL;
4424 }
4425
4426 set_current_blocked(&new_blocked);
4427 }
4428
4429 if (oset) {
4430 if (copy_to_user(oset, &old_set, sizeof(*oset)))
4431 return -EFAULT;
4432 }
4433
4434 return 0;
4435}
4436#endif /* __ARCH_WANT_SYS_SIGPROCMASK */
4437
4438#ifndef CONFIG_ODD_RT_SIGACTION
4439/**
4440 * sys_rt_sigaction - alter an action taken by a process
4441 * @sig: signal to be sent
4442 * @act: new sigaction
4443 * @oact: used to save the previous sigaction
4444 * @sigsetsize: size of sigset_t type
4445 */
4446SYSCALL_DEFINE4(rt_sigaction, int, sig,
4447 const struct sigaction __user *, act,
4448 struct sigaction __user *, oact,
4449 size_t, sigsetsize)
4450{
4451 struct k_sigaction new_sa, old_sa;
4452 int ret;
4453
4454 /* XXX: Don't preclude handling different sized sigset_t's. */
4455 if (sigsetsize != sizeof(sigset_t))
4456 return -EINVAL;
4457
4458 if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
4459 return -EFAULT;
4460
4461 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
4462 if (ret)
4463 return ret;
4464
4465 if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
4466 return -EFAULT;
4467
4468 return 0;
4469}
4470#ifdef CONFIG_COMPAT
4471COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
4472 const struct compat_sigaction __user *, act,
4473 struct compat_sigaction __user *, oact,
4474 compat_size_t, sigsetsize)
4475{
4476 struct k_sigaction new_ka, old_ka;
4477#ifdef __ARCH_HAS_SA_RESTORER
4478 compat_uptr_t restorer;
4479#endif
4480 int ret;
4481
4482 /* XXX: Don't preclude handling different sized sigset_t's. */
4483 if (sigsetsize != sizeof(compat_sigset_t))
4484 return -EINVAL;
4485
4486 if (act) {
4487 compat_uptr_t handler;
4488 ret = get_user(handler, &act->sa_handler);
4489 new_ka.sa.sa_handler = compat_ptr(handler);
4490#ifdef __ARCH_HAS_SA_RESTORER
4491 ret |= get_user(restorer, &act->sa_restorer);
4492 new_ka.sa.sa_restorer = compat_ptr(restorer);
4493#endif
4494 ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask);
4495 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
4496 if (ret)
4497 return -EFAULT;
4498 }
4499
4500 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4501 if (!ret && oact) {
4502 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
4503 &oact->sa_handler);
4504 ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask,
4505 sizeof(oact->sa_mask));
4506 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
4507#ifdef __ARCH_HAS_SA_RESTORER
4508 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4509 &oact->sa_restorer);
4510#endif
4511 }
4512 return ret;
4513}
4514#endif
4515#endif /* !CONFIG_ODD_RT_SIGACTION */
4516
4517#ifdef CONFIG_OLD_SIGACTION
4518SYSCALL_DEFINE3(sigaction, int, sig,
4519 const struct old_sigaction __user *, act,
4520 struct old_sigaction __user *, oact)
4521{
4522 struct k_sigaction new_ka, old_ka;
4523 int ret;
4524
4525 if (act) {
4526 old_sigset_t mask;
4527 if (!access_ok(act, sizeof(*act)) ||
4528 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
4529 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
4530 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4531 __get_user(mask, &act->sa_mask))
4532 return -EFAULT;
4533#ifdef __ARCH_HAS_KA_RESTORER
4534 new_ka.ka_restorer = NULL;
4535#endif
4536 siginitset(&new_ka.sa.sa_mask, mask);
4537 }
4538
4539 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4540
4541 if (!ret && oact) {
4542 if (!access_ok(oact, sizeof(*oact)) ||
4543 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
4544 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
4545 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4546 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4547 return -EFAULT;
4548 }
4549
4550 return ret;
4551}
4552#endif
4553#ifdef CONFIG_COMPAT_OLD_SIGACTION
4554COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
4555 const struct compat_old_sigaction __user *, act,
4556 struct compat_old_sigaction __user *, oact)
4557{
4558 struct k_sigaction new_ka, old_ka;
4559 int ret;
4560 compat_old_sigset_t mask;
4561 compat_uptr_t handler, restorer;
4562
4563 if (act) {
4564 if (!access_ok(act, sizeof(*act)) ||
4565 __get_user(handler, &act->sa_handler) ||
4566 __get_user(restorer, &act->sa_restorer) ||
4567 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4568 __get_user(mask, &act->sa_mask))
4569 return -EFAULT;
4570
4571#ifdef __ARCH_HAS_KA_RESTORER
4572 new_ka.ka_restorer = NULL;
4573#endif
4574 new_ka.sa.sa_handler = compat_ptr(handler);
4575 new_ka.sa.sa_restorer = compat_ptr(restorer);
4576 siginitset(&new_ka.sa.sa_mask, mask);
4577 }
4578
4579 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4580
4581 if (!ret && oact) {
4582 if (!access_ok(oact, sizeof(*oact)) ||
4583 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
4584 &oact->sa_handler) ||
4585 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4586 &oact->sa_restorer) ||
4587 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4588 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4589 return -EFAULT;
4590 }
4591 return ret;
4592}
4593#endif
4594
4595#ifdef CONFIG_SGETMASK_SYSCALL
4596
4597/*
4598 * For backwards compatibility. Functionality superseded by sigprocmask.
4599 */
4600SYSCALL_DEFINE0(sgetmask)
4601{
4602 /* SMP safe */
4603 return current->blocked.sig[0];
4604}
4605
4606SYSCALL_DEFINE1(ssetmask, int, newmask)
4607{
4608 int old = current->blocked.sig[0];
4609 sigset_t newset;
4610
4611 siginitset(&newset, newmask);
4612 set_current_blocked(&newset);
4613
4614 return old;
4615}
4616#endif /* CONFIG_SGETMASK_SYSCALL */
4617
4618#ifdef __ARCH_WANT_SYS_SIGNAL
4619/*
4620 * For backwards compatibility. Functionality superseded by sigaction.
4621 */
4622SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
4623{
4624 struct k_sigaction new_sa, old_sa;
4625 int ret;
4626
4627 new_sa.sa.sa_handler = handler;
4628 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
4629 sigemptyset(&new_sa.sa.sa_mask);
4630
4631 ret = do_sigaction(sig, &new_sa, &old_sa);
4632
4633 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
4634}
4635#endif /* __ARCH_WANT_SYS_SIGNAL */
4636
4637#ifdef __ARCH_WANT_SYS_PAUSE
4638
4639SYSCALL_DEFINE0(pause)
4640{
4641 while (!signal_pending(current)) {
4642 __set_current_state(TASK_INTERRUPTIBLE);
4643 schedule();
4644 }
4645 return -ERESTARTNOHAND;
4646}
4647
4648#endif
4649
4650static int sigsuspend(sigset_t *set)
4651{
4652 current->saved_sigmask = current->blocked;
4653 set_current_blocked(set);
4654
4655 while (!signal_pending(current)) {
4656 __set_current_state(TASK_INTERRUPTIBLE);
4657 schedule();
4658 }
4659 set_restore_sigmask();
4660 return -ERESTARTNOHAND;
4661}
4662
4663/**
4664 * sys_rt_sigsuspend - replace the signal mask for a value with the
4665 * @unewset value until a signal is received
4666 * @unewset: new signal mask value
4667 * @sigsetsize: size of sigset_t type
4668 */
4669SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
4670{
4671 sigset_t newset;
4672
4673 /* XXX: Don't preclude handling different sized sigset_t's. */
4674 if (sigsetsize != sizeof(sigset_t))
4675 return -EINVAL;
4676
4677 if (copy_from_user(&newset, unewset, sizeof(newset)))
4678 return -EFAULT;
4679 return sigsuspend(&newset);
4680}
4681
4682#ifdef CONFIG_COMPAT
4683COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
4684{
4685 sigset_t newset;
4686
4687 /* XXX: Don't preclude handling different sized sigset_t's. */
4688 if (sigsetsize != sizeof(sigset_t))
4689 return -EINVAL;
4690
4691 if (get_compat_sigset(&newset, unewset))
4692 return -EFAULT;
4693 return sigsuspend(&newset);
4694}
4695#endif
4696
4697#ifdef CONFIG_OLD_SIGSUSPEND
4698SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
4699{
4700 sigset_t blocked;
4701 siginitset(&blocked, mask);
4702 return sigsuspend(&blocked);
4703}
4704#endif
4705#ifdef CONFIG_OLD_SIGSUSPEND3
4706SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
4707{
4708 sigset_t blocked;
4709 siginitset(&blocked, mask);
4710 return sigsuspend(&blocked);
4711}
4712#endif
4713
4714__weak const char *arch_vma_name(struct vm_area_struct *vma)
4715{
4716 return NULL;
4717}
4718
4719static inline void siginfo_buildtime_checks(void)
4720{
4721 BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
4722
4723 /* Verify the offsets in the two siginfos match */
4724#define CHECK_OFFSET(field) \
4725 BUILD_BUG_ON(offsetof(siginfo_t, field) != offsetof(kernel_siginfo_t, field))
4726
4727 /* kill */
4728 CHECK_OFFSET(si_pid);
4729 CHECK_OFFSET(si_uid);
4730
4731 /* timer */
4732 CHECK_OFFSET(si_tid);
4733 CHECK_OFFSET(si_overrun);
4734 CHECK_OFFSET(si_value);
4735
4736 /* rt */
4737 CHECK_OFFSET(si_pid);
4738 CHECK_OFFSET(si_uid);
4739 CHECK_OFFSET(si_value);
4740
4741 /* sigchld */
4742 CHECK_OFFSET(si_pid);
4743 CHECK_OFFSET(si_uid);
4744 CHECK_OFFSET(si_status);
4745 CHECK_OFFSET(si_utime);
4746 CHECK_OFFSET(si_stime);
4747
4748 /* sigfault */
4749 CHECK_OFFSET(si_addr);
4750 CHECK_OFFSET(si_trapno);
4751 CHECK_OFFSET(si_addr_lsb);
4752 CHECK_OFFSET(si_lower);
4753 CHECK_OFFSET(si_upper);
4754 CHECK_OFFSET(si_pkey);
4755 CHECK_OFFSET(si_perf_data);
4756 CHECK_OFFSET(si_perf_type);
4757 CHECK_OFFSET(si_perf_flags);
4758
4759 /* sigpoll */
4760 CHECK_OFFSET(si_band);
4761 CHECK_OFFSET(si_fd);
4762
4763 /* sigsys */
4764 CHECK_OFFSET(si_call_addr);
4765 CHECK_OFFSET(si_syscall);
4766 CHECK_OFFSET(si_arch);
4767#undef CHECK_OFFSET
4768
4769 /* usb asyncio */
4770 BUILD_BUG_ON(offsetof(struct siginfo, si_pid) !=
4771 offsetof(struct siginfo, si_addr));
4772 if (sizeof(int) == sizeof(void __user *)) {
4773 BUILD_BUG_ON(sizeof_field(struct siginfo, si_pid) !=
4774 sizeof(void __user *));
4775 } else {
4776 BUILD_BUG_ON((sizeof_field(struct siginfo, si_pid) +
4777 sizeof_field(struct siginfo, si_uid)) !=
4778 sizeof(void __user *));
4779 BUILD_BUG_ON(offsetofend(struct siginfo, si_pid) !=
4780 offsetof(struct siginfo, si_uid));
4781 }
4782#ifdef CONFIG_COMPAT
4783 BUILD_BUG_ON(offsetof(struct compat_siginfo, si_pid) !=
4784 offsetof(struct compat_siginfo, si_addr));
4785 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4786 sizeof(compat_uptr_t));
4787 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4788 sizeof_field(struct siginfo, si_pid));
4789#endif
4790}
4791
4792#if defined(CONFIG_SYSCTL)
4793static struct ctl_table signal_debug_table[] = {
4794#ifdef CONFIG_SYSCTL_EXCEPTION_TRACE
4795 {
4796 .procname = "exception-trace",
4797 .data = &show_unhandled_signals,
4798 .maxlen = sizeof(int),
4799 .mode = 0644,
4800 .proc_handler = proc_dointvec
4801 },
4802#endif
4803 { }
4804};
4805
4806static int __init init_signal_sysctls(void)
4807{
4808 register_sysctl_init("debug", signal_debug_table);
4809 return 0;
4810}
4811early_initcall(init_signal_sysctls);
4812#endif /* CONFIG_SYSCTL */
4813
4814void __init signals_init(void)
4815{
4816 siginfo_buildtime_checks();
4817
4818 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC | SLAB_ACCOUNT);
4819}
4820
4821#ifdef CONFIG_KGDB_KDB
4822#include <linux/kdb.h>
4823/*
4824 * kdb_send_sig - Allows kdb to send signals without exposing
4825 * signal internals. This function checks if the required locks are
4826 * available before calling the main signal code, to avoid kdb
4827 * deadlocks.
4828 */
4829void kdb_send_sig(struct task_struct *t, int sig)
4830{
4831 static struct task_struct *kdb_prev_t;
4832 int new_t, ret;
4833 if (!spin_trylock(&t->sighand->siglock)) {
4834 kdb_printf("Can't do kill command now.\n"
4835 "The sigmask lock is held somewhere else in "
4836 "kernel, try again later\n");
4837 return;
4838 }
4839 new_t = kdb_prev_t != t;
4840 kdb_prev_t = t;
4841 if (!task_is_running(t) && new_t) {
4842 spin_unlock(&t->sighand->siglock);
4843 kdb_printf("Process is not RUNNING, sending a signal from "
4844 "kdb risks deadlock\n"
4845 "on the run queue locks. "
4846 "The signal has _not_ been sent.\n"
4847 "Reissue the kill command if you want to risk "
4848 "the deadlock.\n");
4849 return;
4850 }
4851 ret = send_signal_locked(sig, SEND_SIG_PRIV, t, PIDTYPE_PID);
4852 spin_unlock(&t->sighand->siglock);
4853 if (ret)
4854 kdb_printf("Fail to deliver Signal %d to process %d.\n",
4855 sig, t->pid);
4856 else
4857 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
4858}
4859#endif /* CONFIG_KGDB_KDB */