Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/kernel/signal.c
4 *
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 *
7 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 *
9 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
10 * Changes to use preallocated sigqueue structures
11 * to allow signals to be sent reliably.
12 */
13
14#include <linux/slab.h>
15#include <linux/export.h>
16#include <linux/init.h>
17#include <linux/sched/mm.h>
18#include <linux/sched/user.h>
19#include <linux/sched/debug.h>
20#include <linux/sched/task.h>
21#include <linux/sched/task_stack.h>
22#include <linux/sched/cputime.h>
23#include <linux/file.h>
24#include <linux/fs.h>
25#include <linux/mm.h>
26#include <linux/proc_fs.h>
27#include <linux/tty.h>
28#include <linux/binfmts.h>
29#include <linux/coredump.h>
30#include <linux/security.h>
31#include <linux/syscalls.h>
32#include <linux/ptrace.h>
33#include <linux/signal.h>
34#include <linux/signalfd.h>
35#include <linux/ratelimit.h>
36#include <linux/task_work.h>
37#include <linux/capability.h>
38#include <linux/freezer.h>
39#include <linux/pid_namespace.h>
40#include <linux/nsproxy.h>
41#include <linux/user_namespace.h>
42#include <linux/uprobes.h>
43#include <linux/compat.h>
44#include <linux/cn_proc.h>
45#include <linux/compiler.h>
46#include <linux/posix-timers.h>
47#include <linux/cgroup.h>
48#include <linux/audit.h>
49#include <linux/sysctl.h>
50#include <uapi/linux/pidfd.h>
51
52#define CREATE_TRACE_POINTS
53#include <trace/events/signal.h>
54
55#include <asm/param.h>
56#include <linux/uaccess.h>
57#include <asm/unistd.h>
58#include <asm/siginfo.h>
59#include <asm/cacheflush.h>
60#include <asm/syscall.h> /* for syscall_get_* */
61
62#include "time/posix-timers.h"
63
64/*
65 * SLAB caches for signal bits.
66 */
67
68static struct kmem_cache *sigqueue_cachep;
69
70int print_fatal_signals __read_mostly;
71
72static void __user *sig_handler(struct task_struct *t, int sig)
73{
74 return t->sighand->action[sig - 1].sa.sa_handler;
75}
76
77static inline bool sig_handler_ignored(void __user *handler, int sig)
78{
79 /* Is it explicitly or implicitly ignored? */
80 return handler == SIG_IGN ||
81 (handler == SIG_DFL && sig_kernel_ignore(sig));
82}
83
84static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
85{
86 void __user *handler;
87
88 handler = sig_handler(t, sig);
89
90 /* SIGKILL and SIGSTOP may not be sent to the global init */
91 if (unlikely(is_global_init(t) && sig_kernel_only(sig)))
92 return true;
93
94 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
95 handler == SIG_DFL && !(force && sig_kernel_only(sig)))
96 return true;
97
98 /* Only allow kernel generated signals to this kthread */
99 if (unlikely((t->flags & PF_KTHREAD) &&
100 (handler == SIG_KTHREAD_KERNEL) && !force))
101 return true;
102
103 return sig_handler_ignored(handler, sig);
104}
105
106static bool sig_ignored(struct task_struct *t, int sig, bool force)
107{
108 /*
109 * Blocked signals are never ignored, since the
110 * signal handler may change by the time it is
111 * unblocked.
112 */
113 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
114 return false;
115
116 /*
117 * Tracers may want to know about even ignored signal unless it
118 * is SIGKILL which can't be reported anyway but can be ignored
119 * by SIGNAL_UNKILLABLE task.
120 */
121 if (t->ptrace && sig != SIGKILL)
122 return false;
123
124 return sig_task_ignored(t, sig, force);
125}
126
127/*
128 * Re-calculate pending state from the set of locally pending
129 * signals, globally pending signals, and blocked signals.
130 */
131static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked)
132{
133 unsigned long ready;
134 long i;
135
136 switch (_NSIG_WORDS) {
137 default:
138 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
139 ready |= signal->sig[i] &~ blocked->sig[i];
140 break;
141
142 case 4: ready = signal->sig[3] &~ blocked->sig[3];
143 ready |= signal->sig[2] &~ blocked->sig[2];
144 ready |= signal->sig[1] &~ blocked->sig[1];
145 ready |= signal->sig[0] &~ blocked->sig[0];
146 break;
147
148 case 2: ready = signal->sig[1] &~ blocked->sig[1];
149 ready |= signal->sig[0] &~ blocked->sig[0];
150 break;
151
152 case 1: ready = signal->sig[0] &~ blocked->sig[0];
153 }
154 return ready != 0;
155}
156
157#define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
158
159static bool recalc_sigpending_tsk(struct task_struct *t)
160{
161 if ((t->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) ||
162 PENDING(&t->pending, &t->blocked) ||
163 PENDING(&t->signal->shared_pending, &t->blocked) ||
164 cgroup_task_frozen(t)) {
165 set_tsk_thread_flag(t, TIF_SIGPENDING);
166 return true;
167 }
168
169 /*
170 * We must never clear the flag in another thread, or in current
171 * when it's possible the current syscall is returning -ERESTART*.
172 * So we don't clear it here, and only callers who know they should do.
173 */
174 return false;
175}
176
177void recalc_sigpending(void)
178{
179 if (!recalc_sigpending_tsk(current) && !freezing(current))
180 clear_thread_flag(TIF_SIGPENDING);
181
182}
183EXPORT_SYMBOL(recalc_sigpending);
184
185void calculate_sigpending(void)
186{
187 /* Have any signals or users of TIF_SIGPENDING been delayed
188 * until after fork?
189 */
190 spin_lock_irq(¤t->sighand->siglock);
191 set_tsk_thread_flag(current, TIF_SIGPENDING);
192 recalc_sigpending();
193 spin_unlock_irq(¤t->sighand->siglock);
194}
195
196/* Given the mask, find the first available signal that should be serviced. */
197
198#define SYNCHRONOUS_MASK \
199 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
200 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
201
202int next_signal(struct sigpending *pending, sigset_t *mask)
203{
204 unsigned long i, *s, *m, x;
205 int sig = 0;
206
207 s = pending->signal.sig;
208 m = mask->sig;
209
210 /*
211 * Handle the first word specially: it contains the
212 * synchronous signals that need to be dequeued first.
213 */
214 x = *s &~ *m;
215 if (x) {
216 if (x & SYNCHRONOUS_MASK)
217 x &= SYNCHRONOUS_MASK;
218 sig = ffz(~x) + 1;
219 return sig;
220 }
221
222 switch (_NSIG_WORDS) {
223 default:
224 for (i = 1; i < _NSIG_WORDS; ++i) {
225 x = *++s &~ *++m;
226 if (!x)
227 continue;
228 sig = ffz(~x) + i*_NSIG_BPW + 1;
229 break;
230 }
231 break;
232
233 case 2:
234 x = s[1] &~ m[1];
235 if (!x)
236 break;
237 sig = ffz(~x) + _NSIG_BPW + 1;
238 break;
239
240 case 1:
241 /* Nothing to do */
242 break;
243 }
244
245 return sig;
246}
247
248static inline void print_dropped_signal(int sig)
249{
250 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
251
252 if (!print_fatal_signals)
253 return;
254
255 if (!__ratelimit(&ratelimit_state))
256 return;
257
258 pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
259 current->comm, current->pid, sig);
260}
261
262/**
263 * task_set_jobctl_pending - set jobctl pending bits
264 * @task: target task
265 * @mask: pending bits to set
266 *
267 * Clear @mask from @task->jobctl. @mask must be subset of
268 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
269 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
270 * cleared. If @task is already being killed or exiting, this function
271 * becomes noop.
272 *
273 * CONTEXT:
274 * Must be called with @task->sighand->siglock held.
275 *
276 * RETURNS:
277 * %true if @mask is set, %false if made noop because @task was dying.
278 */
279bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
280{
281 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
282 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
283 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
284
285 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
286 return false;
287
288 if (mask & JOBCTL_STOP_SIGMASK)
289 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
290
291 task->jobctl |= mask;
292 return true;
293}
294
295/**
296 * task_clear_jobctl_trapping - clear jobctl trapping bit
297 * @task: target task
298 *
299 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
300 * Clear it and wake up the ptracer. Note that we don't need any further
301 * locking. @task->siglock guarantees that @task->parent points to the
302 * ptracer.
303 *
304 * CONTEXT:
305 * Must be called with @task->sighand->siglock held.
306 */
307void task_clear_jobctl_trapping(struct task_struct *task)
308{
309 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
310 task->jobctl &= ~JOBCTL_TRAPPING;
311 smp_mb(); /* advised by wake_up_bit() */
312 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
313 }
314}
315
316/**
317 * task_clear_jobctl_pending - clear jobctl pending bits
318 * @task: target task
319 * @mask: pending bits to clear
320 *
321 * Clear @mask from @task->jobctl. @mask must be subset of
322 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
323 * STOP bits are cleared together.
324 *
325 * If clearing of @mask leaves no stop or trap pending, this function calls
326 * task_clear_jobctl_trapping().
327 *
328 * CONTEXT:
329 * Must be called with @task->sighand->siglock held.
330 */
331void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
332{
333 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
334
335 if (mask & JOBCTL_STOP_PENDING)
336 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
337
338 task->jobctl &= ~mask;
339
340 if (!(task->jobctl & JOBCTL_PENDING_MASK))
341 task_clear_jobctl_trapping(task);
342}
343
344/**
345 * task_participate_group_stop - participate in a group stop
346 * @task: task participating in a group stop
347 *
348 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
349 * Group stop states are cleared and the group stop count is consumed if
350 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
351 * stop, the appropriate `SIGNAL_*` flags are set.
352 *
353 * CONTEXT:
354 * Must be called with @task->sighand->siglock held.
355 *
356 * RETURNS:
357 * %true if group stop completion should be notified to the parent, %false
358 * otherwise.
359 */
360static bool task_participate_group_stop(struct task_struct *task)
361{
362 struct signal_struct *sig = task->signal;
363 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
364
365 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
366
367 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
368
369 if (!consume)
370 return false;
371
372 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
373 sig->group_stop_count--;
374
375 /*
376 * Tell the caller to notify completion iff we are entering into a
377 * fresh group stop. Read comment in do_signal_stop() for details.
378 */
379 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
380 signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
381 return true;
382 }
383 return false;
384}
385
386void task_join_group_stop(struct task_struct *task)
387{
388 unsigned long mask = current->jobctl & JOBCTL_STOP_SIGMASK;
389 struct signal_struct *sig = current->signal;
390
391 if (sig->group_stop_count) {
392 sig->group_stop_count++;
393 mask |= JOBCTL_STOP_CONSUME;
394 } else if (!(sig->flags & SIGNAL_STOP_STOPPED))
395 return;
396
397 /* Have the new thread join an on-going signal group stop */
398 task_set_jobctl_pending(task, mask | JOBCTL_STOP_PENDING);
399}
400
401static struct ucounts *sig_get_ucounts(struct task_struct *t, int sig,
402 int override_rlimit)
403{
404 struct ucounts *ucounts;
405 long sigpending;
406
407 /*
408 * Protect access to @t credentials. This can go away when all
409 * callers hold rcu read lock.
410 *
411 * NOTE! A pending signal will hold on to the user refcount,
412 * and we get/put the refcount only when the sigpending count
413 * changes from/to zero.
414 */
415 rcu_read_lock();
416 ucounts = task_ucounts(t);
417 sigpending = inc_rlimit_get_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING,
418 override_rlimit);
419 rcu_read_unlock();
420 if (!sigpending)
421 return NULL;
422
423 if (unlikely(!override_rlimit && sigpending > task_rlimit(t, RLIMIT_SIGPENDING))) {
424 dec_rlimit_put_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING);
425 print_dropped_signal(sig);
426 return NULL;
427 }
428
429 return ucounts;
430}
431
432static void __sigqueue_init(struct sigqueue *q, struct ucounts *ucounts,
433 const unsigned int sigqueue_flags)
434{
435 INIT_LIST_HEAD(&q->list);
436 q->flags = sigqueue_flags;
437 q->ucounts = ucounts;
438}
439
440/*
441 * allocate a new signal queue record
442 * - this may be called without locks if and only if t == current, otherwise an
443 * appropriate lock must be held to stop the target task from exiting
444 */
445static struct sigqueue *sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags,
446 int override_rlimit)
447{
448 struct ucounts *ucounts = sig_get_ucounts(t, sig, override_rlimit);
449 struct sigqueue *q;
450
451 if (!ucounts)
452 return NULL;
453
454 q = kmem_cache_alloc(sigqueue_cachep, gfp_flags);
455 if (!q) {
456 dec_rlimit_put_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING);
457 return NULL;
458 }
459
460 __sigqueue_init(q, ucounts, 0);
461 return q;
462}
463
464static void __sigqueue_free(struct sigqueue *q)
465{
466 if (q->flags & SIGQUEUE_PREALLOC) {
467 posixtimer_sigqueue_putref(q);
468 return;
469 }
470 if (q->ucounts) {
471 dec_rlimit_put_ucounts(q->ucounts, UCOUNT_RLIMIT_SIGPENDING);
472 q->ucounts = NULL;
473 }
474 kmem_cache_free(sigqueue_cachep, q);
475}
476
477void flush_sigqueue(struct sigpending *queue)
478{
479 struct sigqueue *q;
480
481 sigemptyset(&queue->signal);
482 while (!list_empty(&queue->list)) {
483 q = list_entry(queue->list.next, struct sigqueue , list);
484 list_del_init(&q->list);
485 __sigqueue_free(q);
486 }
487}
488
489/*
490 * Flush all pending signals for this kthread.
491 */
492void flush_signals(struct task_struct *t)
493{
494 unsigned long flags;
495
496 spin_lock_irqsave(&t->sighand->siglock, flags);
497 clear_tsk_thread_flag(t, TIF_SIGPENDING);
498 flush_sigqueue(&t->pending);
499 flush_sigqueue(&t->signal->shared_pending);
500 spin_unlock_irqrestore(&t->sighand->siglock, flags);
501}
502EXPORT_SYMBOL(flush_signals);
503
504void ignore_signals(struct task_struct *t)
505{
506 int i;
507
508 for (i = 0; i < _NSIG; ++i)
509 t->sighand->action[i].sa.sa_handler = SIG_IGN;
510
511 flush_signals(t);
512}
513
514/*
515 * Flush all handlers for a task.
516 */
517
518void
519flush_signal_handlers(struct task_struct *t, int force_default)
520{
521 int i;
522 struct k_sigaction *ka = &t->sighand->action[0];
523 for (i = _NSIG ; i != 0 ; i--) {
524 if (force_default || ka->sa.sa_handler != SIG_IGN)
525 ka->sa.sa_handler = SIG_DFL;
526 ka->sa.sa_flags = 0;
527#ifdef __ARCH_HAS_SA_RESTORER
528 ka->sa.sa_restorer = NULL;
529#endif
530 sigemptyset(&ka->sa.sa_mask);
531 ka++;
532 }
533}
534
535bool unhandled_signal(struct task_struct *tsk, int sig)
536{
537 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
538 if (is_global_init(tsk))
539 return true;
540
541 if (handler != SIG_IGN && handler != SIG_DFL)
542 return false;
543
544 /* If dying, we handle all new signals by ignoring them */
545 if (fatal_signal_pending(tsk))
546 return false;
547
548 /* if ptraced, let the tracer determine */
549 return !tsk->ptrace;
550}
551
552static void collect_signal(int sig, struct sigpending *list, kernel_siginfo_t *info,
553 struct sigqueue **timer_sigq)
554{
555 struct sigqueue *q, *first = NULL;
556
557 /*
558 * Collect the siginfo appropriate to this signal. Check if
559 * there is another siginfo for the same signal.
560 */
561 list_for_each_entry(q, &list->list, list) {
562 if (q->info.si_signo == sig) {
563 if (first)
564 goto still_pending;
565 first = q;
566 }
567 }
568
569 sigdelset(&list->signal, sig);
570
571 if (first) {
572still_pending:
573 list_del_init(&first->list);
574 copy_siginfo(info, &first->info);
575
576 /*
577 * posix-timer signals are preallocated and freed when the last
578 * reference count is dropped in posixtimer_deliver_signal() or
579 * immediately on timer deletion when the signal is not pending.
580 * Spare the extra round through __sigqueue_free() which is
581 * ignoring preallocated signals.
582 */
583 if (unlikely((first->flags & SIGQUEUE_PREALLOC) && (info->si_code == SI_TIMER)))
584 *timer_sigq = first;
585 else
586 __sigqueue_free(first);
587 } else {
588 /*
589 * Ok, it wasn't in the queue. This must be
590 * a fast-pathed signal or we must have been
591 * out of queue space. So zero out the info.
592 */
593 clear_siginfo(info);
594 info->si_signo = sig;
595 info->si_errno = 0;
596 info->si_code = SI_USER;
597 info->si_pid = 0;
598 info->si_uid = 0;
599 }
600}
601
602static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
603 kernel_siginfo_t *info, struct sigqueue **timer_sigq)
604{
605 int sig = next_signal(pending, mask);
606
607 if (sig)
608 collect_signal(sig, pending, info, timer_sigq);
609 return sig;
610}
611
612/*
613 * Try to dequeue a signal. If a deliverable signal is found fill in the
614 * caller provided siginfo and return the signal number. Otherwise return
615 * 0.
616 */
617int dequeue_signal(sigset_t *mask, kernel_siginfo_t *info, enum pid_type *type)
618{
619 struct task_struct *tsk = current;
620 struct sigqueue *timer_sigq;
621 int signr;
622
623 lockdep_assert_held(&tsk->sighand->siglock);
624
625again:
626 *type = PIDTYPE_PID;
627 timer_sigq = NULL;
628 signr = __dequeue_signal(&tsk->pending, mask, info, &timer_sigq);
629 if (!signr) {
630 *type = PIDTYPE_TGID;
631 signr = __dequeue_signal(&tsk->signal->shared_pending,
632 mask, info, &timer_sigq);
633
634 if (unlikely(signr == SIGALRM))
635 posixtimer_rearm_itimer(tsk);
636 }
637
638 recalc_sigpending();
639 if (!signr)
640 return 0;
641
642 if (unlikely(sig_kernel_stop(signr))) {
643 /*
644 * Set a marker that we have dequeued a stop signal. Our
645 * caller might release the siglock and then the pending
646 * stop signal it is about to process is no longer in the
647 * pending bitmasks, but must still be cleared by a SIGCONT
648 * (and overruled by a SIGKILL). So those cases clear this
649 * shared flag after we've set it. Note that this flag may
650 * remain set after the signal we return is ignored or
651 * handled. That doesn't matter because its only purpose
652 * is to alert stop-signal processing code when another
653 * processor has come along and cleared the flag.
654 */
655 current->jobctl |= JOBCTL_STOP_DEQUEUED;
656 }
657
658 if (IS_ENABLED(CONFIG_POSIX_TIMERS) && unlikely(timer_sigq)) {
659 if (!posixtimer_deliver_signal(info, timer_sigq))
660 goto again;
661 }
662
663 return signr;
664}
665EXPORT_SYMBOL_GPL(dequeue_signal);
666
667static int dequeue_synchronous_signal(kernel_siginfo_t *info)
668{
669 struct task_struct *tsk = current;
670 struct sigpending *pending = &tsk->pending;
671 struct sigqueue *q, *sync = NULL;
672
673 /*
674 * Might a synchronous signal be in the queue?
675 */
676 if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
677 return 0;
678
679 /*
680 * Return the first synchronous signal in the queue.
681 */
682 list_for_each_entry(q, &pending->list, list) {
683 /* Synchronous signals have a positive si_code */
684 if ((q->info.si_code > SI_USER) &&
685 (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
686 sync = q;
687 goto next;
688 }
689 }
690 return 0;
691next:
692 /*
693 * Check if there is another siginfo for the same signal.
694 */
695 list_for_each_entry_continue(q, &pending->list, list) {
696 if (q->info.si_signo == sync->info.si_signo)
697 goto still_pending;
698 }
699
700 sigdelset(&pending->signal, sync->info.si_signo);
701 recalc_sigpending();
702still_pending:
703 list_del_init(&sync->list);
704 copy_siginfo(info, &sync->info);
705 __sigqueue_free(sync);
706 return info->si_signo;
707}
708
709/*
710 * Tell a process that it has a new active signal..
711 *
712 * NOTE! we rely on the previous spin_lock to
713 * lock interrupts for us! We can only be called with
714 * "siglock" held, and the local interrupt must
715 * have been disabled when that got acquired!
716 *
717 * No need to set need_resched since signal event passing
718 * goes through ->blocked
719 */
720void signal_wake_up_state(struct task_struct *t, unsigned int state)
721{
722 lockdep_assert_held(&t->sighand->siglock);
723
724 set_tsk_thread_flag(t, TIF_SIGPENDING);
725
726 /*
727 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
728 * case. We don't check t->state here because there is a race with it
729 * executing another processor and just now entering stopped state.
730 * By using wake_up_state, we ensure the process will wake up and
731 * handle its death signal.
732 */
733 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
734 kick_process(t);
735}
736
737static inline void posixtimer_sig_ignore(struct task_struct *tsk, struct sigqueue *q);
738
739static void sigqueue_free_ignored(struct task_struct *tsk, struct sigqueue *q)
740{
741 if (likely(!(q->flags & SIGQUEUE_PREALLOC) || q->info.si_code != SI_TIMER))
742 __sigqueue_free(q);
743 else
744 posixtimer_sig_ignore(tsk, q);
745}
746
747/* Remove signals in mask from the pending set and queue. */
748static void flush_sigqueue_mask(struct task_struct *p, sigset_t *mask, struct sigpending *s)
749{
750 struct sigqueue *q, *n;
751 sigset_t m;
752
753 lockdep_assert_held(&p->sighand->siglock);
754
755 sigandsets(&m, mask, &s->signal);
756 if (sigisemptyset(&m))
757 return;
758
759 sigandnsets(&s->signal, &s->signal, mask);
760 list_for_each_entry_safe(q, n, &s->list, list) {
761 if (sigismember(mask, q->info.si_signo)) {
762 list_del_init(&q->list);
763 sigqueue_free_ignored(p, q);
764 }
765 }
766}
767
768static inline int is_si_special(const struct kernel_siginfo *info)
769{
770 return info <= SEND_SIG_PRIV;
771}
772
773static inline bool si_fromuser(const struct kernel_siginfo *info)
774{
775 return info == SEND_SIG_NOINFO ||
776 (!is_si_special(info) && SI_FROMUSER(info));
777}
778
779/*
780 * called with RCU read lock from check_kill_permission()
781 */
782static bool kill_ok_by_cred(struct task_struct *t)
783{
784 const struct cred *cred = current_cred();
785 const struct cred *tcred = __task_cred(t);
786
787 return uid_eq(cred->euid, tcred->suid) ||
788 uid_eq(cred->euid, tcred->uid) ||
789 uid_eq(cred->uid, tcred->suid) ||
790 uid_eq(cred->uid, tcred->uid) ||
791 ns_capable(tcred->user_ns, CAP_KILL);
792}
793
794/*
795 * Bad permissions for sending the signal
796 * - the caller must hold the RCU read lock
797 */
798static int check_kill_permission(int sig, struct kernel_siginfo *info,
799 struct task_struct *t)
800{
801 struct pid *sid;
802 int error;
803
804 if (!valid_signal(sig))
805 return -EINVAL;
806
807 if (!si_fromuser(info))
808 return 0;
809
810 error = audit_signal_info(sig, t); /* Let audit system see the signal */
811 if (error)
812 return error;
813
814 if (!same_thread_group(current, t) &&
815 !kill_ok_by_cred(t)) {
816 switch (sig) {
817 case SIGCONT:
818 sid = task_session(t);
819 /*
820 * We don't return the error if sid == NULL. The
821 * task was unhashed, the caller must notice this.
822 */
823 if (!sid || sid == task_session(current))
824 break;
825 fallthrough;
826 default:
827 return -EPERM;
828 }
829 }
830
831 return security_task_kill(t, info, sig, NULL);
832}
833
834/**
835 * ptrace_trap_notify - schedule trap to notify ptracer
836 * @t: tracee wanting to notify tracer
837 *
838 * This function schedules sticky ptrace trap which is cleared on the next
839 * TRAP_STOP to notify ptracer of an event. @t must have been seized by
840 * ptracer.
841 *
842 * If @t is running, STOP trap will be taken. If trapped for STOP and
843 * ptracer is listening for events, tracee is woken up so that it can
844 * re-trap for the new event. If trapped otherwise, STOP trap will be
845 * eventually taken without returning to userland after the existing traps
846 * are finished by PTRACE_CONT.
847 *
848 * CONTEXT:
849 * Must be called with @task->sighand->siglock held.
850 */
851static void ptrace_trap_notify(struct task_struct *t)
852{
853 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
854 lockdep_assert_held(&t->sighand->siglock);
855
856 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
857 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
858}
859
860/*
861 * Handle magic process-wide effects of stop/continue signals. Unlike
862 * the signal actions, these happen immediately at signal-generation
863 * time regardless of blocking, ignoring, or handling. This does the
864 * actual continuing for SIGCONT, but not the actual stopping for stop
865 * signals. The process stop is done as a signal action for SIG_DFL.
866 *
867 * Returns true if the signal should be actually delivered, otherwise
868 * it should be dropped.
869 */
870static bool prepare_signal(int sig, struct task_struct *p, bool force)
871{
872 struct signal_struct *signal = p->signal;
873 struct task_struct *t;
874 sigset_t flush;
875
876 if (signal->flags & SIGNAL_GROUP_EXIT) {
877 if (signal->core_state)
878 return sig == SIGKILL;
879 /*
880 * The process is in the middle of dying, drop the signal.
881 */
882 return false;
883 } else if (sig_kernel_stop(sig)) {
884 /*
885 * This is a stop signal. Remove SIGCONT from all queues.
886 */
887 siginitset(&flush, sigmask(SIGCONT));
888 flush_sigqueue_mask(p, &flush, &signal->shared_pending);
889 for_each_thread(p, t)
890 flush_sigqueue_mask(p, &flush, &t->pending);
891 } else if (sig == SIGCONT) {
892 unsigned int why;
893 /*
894 * Remove all stop signals from all queues, wake all threads.
895 */
896 siginitset(&flush, SIG_KERNEL_STOP_MASK);
897 flush_sigqueue_mask(p, &flush, &signal->shared_pending);
898 for_each_thread(p, t) {
899 flush_sigqueue_mask(p, &flush, &t->pending);
900 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
901 if (likely(!(t->ptrace & PT_SEIZED))) {
902 t->jobctl &= ~JOBCTL_STOPPED;
903 wake_up_state(t, __TASK_STOPPED);
904 } else
905 ptrace_trap_notify(t);
906 }
907
908 /*
909 * Notify the parent with CLD_CONTINUED if we were stopped.
910 *
911 * If we were in the middle of a group stop, we pretend it
912 * was already finished, and then continued. Since SIGCHLD
913 * doesn't queue we report only CLD_STOPPED, as if the next
914 * CLD_CONTINUED was dropped.
915 */
916 why = 0;
917 if (signal->flags & SIGNAL_STOP_STOPPED)
918 why |= SIGNAL_CLD_CONTINUED;
919 else if (signal->group_stop_count)
920 why |= SIGNAL_CLD_STOPPED;
921
922 if (why) {
923 /*
924 * The first thread which returns from do_signal_stop()
925 * will take ->siglock, notice SIGNAL_CLD_MASK, and
926 * notify its parent. See get_signal().
927 */
928 signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
929 signal->group_stop_count = 0;
930 signal->group_exit_code = 0;
931 }
932 }
933
934 return !sig_ignored(p, sig, force);
935}
936
937/*
938 * Test if P wants to take SIG. After we've checked all threads with this,
939 * it's equivalent to finding no threads not blocking SIG. Any threads not
940 * blocking SIG were ruled out because they are not running and already
941 * have pending signals. Such threads will dequeue from the shared queue
942 * as soon as they're available, so putting the signal on the shared queue
943 * will be equivalent to sending it to one such thread.
944 */
945static inline bool wants_signal(int sig, struct task_struct *p)
946{
947 if (sigismember(&p->blocked, sig))
948 return false;
949
950 if (p->flags & PF_EXITING)
951 return false;
952
953 if (sig == SIGKILL)
954 return true;
955
956 if (task_is_stopped_or_traced(p))
957 return false;
958
959 return task_curr(p) || !task_sigpending(p);
960}
961
962static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
963{
964 struct signal_struct *signal = p->signal;
965 struct task_struct *t;
966
967 /*
968 * Now find a thread we can wake up to take the signal off the queue.
969 *
970 * Try the suggested task first (may or may not be the main thread).
971 */
972 if (wants_signal(sig, p))
973 t = p;
974 else if ((type == PIDTYPE_PID) || thread_group_empty(p))
975 /*
976 * There is just one thread and it does not need to be woken.
977 * It will dequeue unblocked signals before it runs again.
978 */
979 return;
980 else {
981 /*
982 * Otherwise try to find a suitable thread.
983 */
984 t = signal->curr_target;
985 while (!wants_signal(sig, t)) {
986 t = next_thread(t);
987 if (t == signal->curr_target)
988 /*
989 * No thread needs to be woken.
990 * Any eligible threads will see
991 * the signal in the queue soon.
992 */
993 return;
994 }
995 signal->curr_target = t;
996 }
997
998 /*
999 * Found a killable thread. If the signal will be fatal,
1000 * then start taking the whole group down immediately.
1001 */
1002 if (sig_fatal(p, sig) &&
1003 (signal->core_state || !(signal->flags & SIGNAL_GROUP_EXIT)) &&
1004 !sigismember(&t->real_blocked, sig) &&
1005 (sig == SIGKILL || !p->ptrace)) {
1006 /*
1007 * This signal will be fatal to the whole group.
1008 */
1009 if (!sig_kernel_coredump(sig)) {
1010 /*
1011 * Start a group exit and wake everybody up.
1012 * This way we don't have other threads
1013 * running and doing things after a slower
1014 * thread has the fatal signal pending.
1015 */
1016 signal->flags = SIGNAL_GROUP_EXIT;
1017 signal->group_exit_code = sig;
1018 signal->group_stop_count = 0;
1019 __for_each_thread(signal, t) {
1020 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1021 sigaddset(&t->pending.signal, SIGKILL);
1022 signal_wake_up(t, 1);
1023 }
1024 return;
1025 }
1026 }
1027
1028 /*
1029 * The signal is already in the shared-pending queue.
1030 * Tell the chosen thread to wake up and dequeue it.
1031 */
1032 signal_wake_up(t, sig == SIGKILL);
1033 return;
1034}
1035
1036static inline bool legacy_queue(struct sigpending *signals, int sig)
1037{
1038 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1039}
1040
1041static int __send_signal_locked(int sig, struct kernel_siginfo *info,
1042 struct task_struct *t, enum pid_type type, bool force)
1043{
1044 struct sigpending *pending;
1045 struct sigqueue *q;
1046 int override_rlimit;
1047 int ret = 0, result;
1048
1049 lockdep_assert_held(&t->sighand->siglock);
1050
1051 result = TRACE_SIGNAL_IGNORED;
1052 if (!prepare_signal(sig, t, force))
1053 goto ret;
1054
1055 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1056 /*
1057 * Short-circuit ignored signals and support queuing
1058 * exactly one non-rt signal, so that we can get more
1059 * detailed information about the cause of the signal.
1060 */
1061 result = TRACE_SIGNAL_ALREADY_PENDING;
1062 if (legacy_queue(pending, sig))
1063 goto ret;
1064
1065 result = TRACE_SIGNAL_DELIVERED;
1066 /*
1067 * Skip useless siginfo allocation for SIGKILL and kernel threads.
1068 */
1069 if ((sig == SIGKILL) || (t->flags & PF_KTHREAD))
1070 goto out_set;
1071
1072 /*
1073 * Real-time signals must be queued if sent by sigqueue, or
1074 * some other real-time mechanism. It is implementation
1075 * defined whether kill() does so. We attempt to do so, on
1076 * the principle of least surprise, but since kill is not
1077 * allowed to fail with EAGAIN when low on memory we just
1078 * make sure at least one signal gets delivered and don't
1079 * pass on the info struct.
1080 */
1081 if (sig < SIGRTMIN)
1082 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1083 else
1084 override_rlimit = 0;
1085
1086 q = sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit);
1087
1088 if (q) {
1089 list_add_tail(&q->list, &pending->list);
1090 switch ((unsigned long) info) {
1091 case (unsigned long) SEND_SIG_NOINFO:
1092 clear_siginfo(&q->info);
1093 q->info.si_signo = sig;
1094 q->info.si_errno = 0;
1095 q->info.si_code = SI_USER;
1096 q->info.si_pid = task_tgid_nr_ns(current,
1097 task_active_pid_ns(t));
1098 rcu_read_lock();
1099 q->info.si_uid =
1100 from_kuid_munged(task_cred_xxx(t, user_ns),
1101 current_uid());
1102 rcu_read_unlock();
1103 break;
1104 case (unsigned long) SEND_SIG_PRIV:
1105 clear_siginfo(&q->info);
1106 q->info.si_signo = sig;
1107 q->info.si_errno = 0;
1108 q->info.si_code = SI_KERNEL;
1109 q->info.si_pid = 0;
1110 q->info.si_uid = 0;
1111 break;
1112 default:
1113 copy_siginfo(&q->info, info);
1114 break;
1115 }
1116 } else if (!is_si_special(info) &&
1117 sig >= SIGRTMIN && info->si_code != SI_USER) {
1118 /*
1119 * Queue overflow, abort. We may abort if the
1120 * signal was rt and sent by user using something
1121 * other than kill().
1122 */
1123 result = TRACE_SIGNAL_OVERFLOW_FAIL;
1124 ret = -EAGAIN;
1125 goto ret;
1126 } else {
1127 /*
1128 * This is a silent loss of information. We still
1129 * send the signal, but the *info bits are lost.
1130 */
1131 result = TRACE_SIGNAL_LOSE_INFO;
1132 }
1133
1134out_set:
1135 signalfd_notify(t, sig);
1136 sigaddset(&pending->signal, sig);
1137
1138 /* Let multiprocess signals appear after on-going forks */
1139 if (type > PIDTYPE_TGID) {
1140 struct multiprocess_signals *delayed;
1141 hlist_for_each_entry(delayed, &t->signal->multiprocess, node) {
1142 sigset_t *signal = &delayed->signal;
1143 /* Can't queue both a stop and a continue signal */
1144 if (sig == SIGCONT)
1145 sigdelsetmask(signal, SIG_KERNEL_STOP_MASK);
1146 else if (sig_kernel_stop(sig))
1147 sigdelset(signal, SIGCONT);
1148 sigaddset(signal, sig);
1149 }
1150 }
1151
1152 complete_signal(sig, t, type);
1153ret:
1154 trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result);
1155 return ret;
1156}
1157
1158static inline bool has_si_pid_and_uid(struct kernel_siginfo *info)
1159{
1160 bool ret = false;
1161 switch (siginfo_layout(info->si_signo, info->si_code)) {
1162 case SIL_KILL:
1163 case SIL_CHLD:
1164 case SIL_RT:
1165 ret = true;
1166 break;
1167 case SIL_TIMER:
1168 case SIL_POLL:
1169 case SIL_FAULT:
1170 case SIL_FAULT_TRAPNO:
1171 case SIL_FAULT_MCEERR:
1172 case SIL_FAULT_BNDERR:
1173 case SIL_FAULT_PKUERR:
1174 case SIL_FAULT_PERF_EVENT:
1175 case SIL_SYS:
1176 ret = false;
1177 break;
1178 }
1179 return ret;
1180}
1181
1182int send_signal_locked(int sig, struct kernel_siginfo *info,
1183 struct task_struct *t, enum pid_type type)
1184{
1185 /* Should SIGKILL or SIGSTOP be received by a pid namespace init? */
1186 bool force = false;
1187
1188 if (info == SEND_SIG_NOINFO) {
1189 /* Force if sent from an ancestor pid namespace */
1190 force = !task_pid_nr_ns(current, task_active_pid_ns(t));
1191 } else if (info == SEND_SIG_PRIV) {
1192 /* Don't ignore kernel generated signals */
1193 force = true;
1194 } else if (has_si_pid_and_uid(info)) {
1195 /* SIGKILL and SIGSTOP is special or has ids */
1196 struct user_namespace *t_user_ns;
1197
1198 rcu_read_lock();
1199 t_user_ns = task_cred_xxx(t, user_ns);
1200 if (current_user_ns() != t_user_ns) {
1201 kuid_t uid = make_kuid(current_user_ns(), info->si_uid);
1202 info->si_uid = from_kuid_munged(t_user_ns, uid);
1203 }
1204 rcu_read_unlock();
1205
1206 /* A kernel generated signal? */
1207 force = (info->si_code == SI_KERNEL);
1208
1209 /* From an ancestor pid namespace? */
1210 if (!task_pid_nr_ns(current, task_active_pid_ns(t))) {
1211 info->si_pid = 0;
1212 force = true;
1213 }
1214 }
1215 return __send_signal_locked(sig, info, t, type, force);
1216}
1217
1218static void print_fatal_signal(int signr)
1219{
1220 struct pt_regs *regs = task_pt_regs(current);
1221 struct file *exe_file;
1222
1223 exe_file = get_task_exe_file(current);
1224 if (exe_file) {
1225 pr_info("%pD: %s: potentially unexpected fatal signal %d.\n",
1226 exe_file, current->comm, signr);
1227 fput(exe_file);
1228 } else {
1229 pr_info("%s: potentially unexpected fatal signal %d.\n",
1230 current->comm, signr);
1231 }
1232
1233#if defined(__i386__) && !defined(__arch_um__)
1234 pr_info("code at %08lx: ", regs->ip);
1235 {
1236 int i;
1237 for (i = 0; i < 16; i++) {
1238 unsigned char insn;
1239
1240 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1241 break;
1242 pr_cont("%02x ", insn);
1243 }
1244 }
1245 pr_cont("\n");
1246#endif
1247 preempt_disable();
1248 show_regs(regs);
1249 preempt_enable();
1250}
1251
1252static int __init setup_print_fatal_signals(char *str)
1253{
1254 get_option (&str, &print_fatal_signals);
1255
1256 return 1;
1257}
1258
1259__setup("print-fatal-signals=", setup_print_fatal_signals);
1260
1261int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p,
1262 enum pid_type type)
1263{
1264 unsigned long flags;
1265 int ret = -ESRCH;
1266
1267 if (lock_task_sighand(p, &flags)) {
1268 ret = send_signal_locked(sig, info, p, type);
1269 unlock_task_sighand(p, &flags);
1270 }
1271
1272 return ret;
1273}
1274
1275enum sig_handler {
1276 HANDLER_CURRENT, /* If reachable use the current handler */
1277 HANDLER_SIG_DFL, /* Always use SIG_DFL handler semantics */
1278 HANDLER_EXIT, /* Only visible as the process exit code */
1279};
1280
1281/*
1282 * Force a signal that the process can't ignore: if necessary
1283 * we unblock the signal and change any SIG_IGN to SIG_DFL.
1284 *
1285 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1286 * since we do not want to have a signal handler that was blocked
1287 * be invoked when user space had explicitly blocked it.
1288 *
1289 * We don't want to have recursive SIGSEGV's etc, for example,
1290 * that is why we also clear SIGNAL_UNKILLABLE.
1291 */
1292static int
1293force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t,
1294 enum sig_handler handler)
1295{
1296 unsigned long int flags;
1297 int ret, blocked, ignored;
1298 struct k_sigaction *action;
1299 int sig = info->si_signo;
1300
1301 spin_lock_irqsave(&t->sighand->siglock, flags);
1302 action = &t->sighand->action[sig-1];
1303 ignored = action->sa.sa_handler == SIG_IGN;
1304 blocked = sigismember(&t->blocked, sig);
1305 if (blocked || ignored || (handler != HANDLER_CURRENT)) {
1306 action->sa.sa_handler = SIG_DFL;
1307 if (handler == HANDLER_EXIT)
1308 action->sa.sa_flags |= SA_IMMUTABLE;
1309 if (blocked)
1310 sigdelset(&t->blocked, sig);
1311 }
1312 /*
1313 * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
1314 * debugging to leave init killable. But HANDLER_EXIT is always fatal.
1315 */
1316 if (action->sa.sa_handler == SIG_DFL &&
1317 (!t->ptrace || (handler == HANDLER_EXIT)))
1318 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1319 ret = send_signal_locked(sig, info, t, PIDTYPE_PID);
1320 /* This can happen if the signal was already pending and blocked */
1321 if (!task_sigpending(t))
1322 signal_wake_up(t, 0);
1323 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1324
1325 return ret;
1326}
1327
1328int force_sig_info(struct kernel_siginfo *info)
1329{
1330 return force_sig_info_to_task(info, current, HANDLER_CURRENT);
1331}
1332
1333/*
1334 * Nuke all other threads in the group.
1335 */
1336int zap_other_threads(struct task_struct *p)
1337{
1338 struct task_struct *t;
1339 int count = 0;
1340
1341 p->signal->group_stop_count = 0;
1342
1343 for_other_threads(p, t) {
1344 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1345 count++;
1346
1347 /* Don't bother with already dead threads */
1348 if (t->exit_state)
1349 continue;
1350 sigaddset(&t->pending.signal, SIGKILL);
1351 signal_wake_up(t, 1);
1352 }
1353
1354 return count;
1355}
1356
1357struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1358 unsigned long *flags)
1359{
1360 struct sighand_struct *sighand;
1361
1362 rcu_read_lock();
1363 for (;;) {
1364 sighand = rcu_dereference(tsk->sighand);
1365 if (unlikely(sighand == NULL))
1366 break;
1367
1368 /*
1369 * This sighand can be already freed and even reused, but
1370 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
1371 * initializes ->siglock: this slab can't go away, it has
1372 * the same object type, ->siglock can't be reinitialized.
1373 *
1374 * We need to ensure that tsk->sighand is still the same
1375 * after we take the lock, we can race with de_thread() or
1376 * __exit_signal(). In the latter case the next iteration
1377 * must see ->sighand == NULL.
1378 */
1379 spin_lock_irqsave(&sighand->siglock, *flags);
1380 if (likely(sighand == rcu_access_pointer(tsk->sighand)))
1381 break;
1382 spin_unlock_irqrestore(&sighand->siglock, *flags);
1383 }
1384 rcu_read_unlock();
1385
1386 return sighand;
1387}
1388
1389#ifdef CONFIG_LOCKDEP
1390void lockdep_assert_task_sighand_held(struct task_struct *task)
1391{
1392 struct sighand_struct *sighand;
1393
1394 rcu_read_lock();
1395 sighand = rcu_dereference(task->sighand);
1396 if (sighand)
1397 lockdep_assert_held(&sighand->siglock);
1398 else
1399 WARN_ON_ONCE(1);
1400 rcu_read_unlock();
1401}
1402#endif
1403
1404/*
1405 * send signal info to all the members of a thread group or to the
1406 * individual thread if type == PIDTYPE_PID.
1407 */
1408int group_send_sig_info(int sig, struct kernel_siginfo *info,
1409 struct task_struct *p, enum pid_type type)
1410{
1411 int ret;
1412
1413 rcu_read_lock();
1414 ret = check_kill_permission(sig, info, p);
1415 rcu_read_unlock();
1416
1417 if (!ret && sig)
1418 ret = do_send_sig_info(sig, info, p, type);
1419
1420 return ret;
1421}
1422
1423/*
1424 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1425 * control characters do (^C, ^Z etc)
1426 * - the caller must hold at least a readlock on tasklist_lock
1427 */
1428int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp)
1429{
1430 struct task_struct *p = NULL;
1431 int ret = -ESRCH;
1432
1433 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1434 int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID);
1435 /*
1436 * If group_send_sig_info() succeeds at least once ret
1437 * becomes 0 and after that the code below has no effect.
1438 * Otherwise we return the last err or -ESRCH if this
1439 * process group is empty.
1440 */
1441 if (ret)
1442 ret = err;
1443 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1444
1445 return ret;
1446}
1447
1448static int kill_pid_info_type(int sig, struct kernel_siginfo *info,
1449 struct pid *pid, enum pid_type type)
1450{
1451 int error = -ESRCH;
1452 struct task_struct *p;
1453
1454 for (;;) {
1455 rcu_read_lock();
1456 p = pid_task(pid, PIDTYPE_PID);
1457 if (p)
1458 error = group_send_sig_info(sig, info, p, type);
1459 rcu_read_unlock();
1460 if (likely(!p || error != -ESRCH))
1461 return error;
1462 /*
1463 * The task was unhashed in between, try again. If it
1464 * is dead, pid_task() will return NULL, if we race with
1465 * de_thread() it will find the new leader.
1466 */
1467 }
1468}
1469
1470int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid)
1471{
1472 return kill_pid_info_type(sig, info, pid, PIDTYPE_TGID);
1473}
1474
1475static int kill_proc_info(int sig, struct kernel_siginfo *info, pid_t pid)
1476{
1477 int error;
1478 rcu_read_lock();
1479 error = kill_pid_info(sig, info, find_vpid(pid));
1480 rcu_read_unlock();
1481 return error;
1482}
1483
1484static inline bool kill_as_cred_perm(const struct cred *cred,
1485 struct task_struct *target)
1486{
1487 const struct cred *pcred = __task_cred(target);
1488
1489 return uid_eq(cred->euid, pcred->suid) ||
1490 uid_eq(cred->euid, pcred->uid) ||
1491 uid_eq(cred->uid, pcred->suid) ||
1492 uid_eq(cred->uid, pcred->uid);
1493}
1494
1495/*
1496 * The usb asyncio usage of siginfo is wrong. The glibc support
1497 * for asyncio which uses SI_ASYNCIO assumes the layout is SIL_RT.
1498 * AKA after the generic fields:
1499 * kernel_pid_t si_pid;
1500 * kernel_uid32_t si_uid;
1501 * sigval_t si_value;
1502 *
1503 * Unfortunately when usb generates SI_ASYNCIO it assumes the layout
1504 * after the generic fields is:
1505 * void __user *si_addr;
1506 *
1507 * This is a practical problem when there is a 64bit big endian kernel
1508 * and a 32bit userspace. As the 32bit address will encoded in the low
1509 * 32bits of the pointer. Those low 32bits will be stored at higher
1510 * address than appear in a 32 bit pointer. So userspace will not
1511 * see the address it was expecting for it's completions.
1512 *
1513 * There is nothing in the encoding that can allow
1514 * copy_siginfo_to_user32 to detect this confusion of formats, so
1515 * handle this by requiring the caller of kill_pid_usb_asyncio to
1516 * notice when this situration takes place and to store the 32bit
1517 * pointer in sival_int, instead of sival_addr of the sigval_t addr
1518 * parameter.
1519 */
1520int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr,
1521 struct pid *pid, const struct cred *cred)
1522{
1523 struct kernel_siginfo info;
1524 struct task_struct *p;
1525 unsigned long flags;
1526 int ret = -EINVAL;
1527
1528 if (!valid_signal(sig))
1529 return ret;
1530
1531 clear_siginfo(&info);
1532 info.si_signo = sig;
1533 info.si_errno = errno;
1534 info.si_code = SI_ASYNCIO;
1535 *((sigval_t *)&info.si_pid) = addr;
1536
1537 rcu_read_lock();
1538 p = pid_task(pid, PIDTYPE_PID);
1539 if (!p) {
1540 ret = -ESRCH;
1541 goto out_unlock;
1542 }
1543 if (!kill_as_cred_perm(cred, p)) {
1544 ret = -EPERM;
1545 goto out_unlock;
1546 }
1547 ret = security_task_kill(p, &info, sig, cred);
1548 if (ret)
1549 goto out_unlock;
1550
1551 if (sig) {
1552 if (lock_task_sighand(p, &flags)) {
1553 ret = __send_signal_locked(sig, &info, p, PIDTYPE_TGID, false);
1554 unlock_task_sighand(p, &flags);
1555 } else
1556 ret = -ESRCH;
1557 }
1558out_unlock:
1559 rcu_read_unlock();
1560 return ret;
1561}
1562EXPORT_SYMBOL_GPL(kill_pid_usb_asyncio);
1563
1564/*
1565 * kill_something_info() interprets pid in interesting ways just like kill(2).
1566 *
1567 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1568 * is probably wrong. Should make it like BSD or SYSV.
1569 */
1570
1571static int kill_something_info(int sig, struct kernel_siginfo *info, pid_t pid)
1572{
1573 int ret;
1574
1575 if (pid > 0)
1576 return kill_proc_info(sig, info, pid);
1577
1578 /* -INT_MIN is undefined. Exclude this case to avoid a UBSAN warning */
1579 if (pid == INT_MIN)
1580 return -ESRCH;
1581
1582 read_lock(&tasklist_lock);
1583 if (pid != -1) {
1584 ret = __kill_pgrp_info(sig, info,
1585 pid ? find_vpid(-pid) : task_pgrp(current));
1586 } else {
1587 int retval = 0, count = 0;
1588 struct task_struct * p;
1589
1590 for_each_process(p) {
1591 if (task_pid_vnr(p) > 1 &&
1592 !same_thread_group(p, current)) {
1593 int err = group_send_sig_info(sig, info, p,
1594 PIDTYPE_MAX);
1595 ++count;
1596 if (err != -EPERM)
1597 retval = err;
1598 }
1599 }
1600 ret = count ? retval : -ESRCH;
1601 }
1602 read_unlock(&tasklist_lock);
1603
1604 return ret;
1605}
1606
1607/*
1608 * These are for backward compatibility with the rest of the kernel source.
1609 */
1610
1611int send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1612{
1613 /*
1614 * Make sure legacy kernel users don't send in bad values
1615 * (normal paths check this in check_kill_permission).
1616 */
1617 if (!valid_signal(sig))
1618 return -EINVAL;
1619
1620 return do_send_sig_info(sig, info, p, PIDTYPE_PID);
1621}
1622EXPORT_SYMBOL(send_sig_info);
1623
1624#define __si_special(priv) \
1625 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1626
1627int
1628send_sig(int sig, struct task_struct *p, int priv)
1629{
1630 return send_sig_info(sig, __si_special(priv), p);
1631}
1632EXPORT_SYMBOL(send_sig);
1633
1634void force_sig(int sig)
1635{
1636 struct kernel_siginfo info;
1637
1638 clear_siginfo(&info);
1639 info.si_signo = sig;
1640 info.si_errno = 0;
1641 info.si_code = SI_KERNEL;
1642 info.si_pid = 0;
1643 info.si_uid = 0;
1644 force_sig_info(&info);
1645}
1646EXPORT_SYMBOL(force_sig);
1647
1648void force_fatal_sig(int sig)
1649{
1650 struct kernel_siginfo info;
1651
1652 clear_siginfo(&info);
1653 info.si_signo = sig;
1654 info.si_errno = 0;
1655 info.si_code = SI_KERNEL;
1656 info.si_pid = 0;
1657 info.si_uid = 0;
1658 force_sig_info_to_task(&info, current, HANDLER_SIG_DFL);
1659}
1660
1661void force_exit_sig(int sig)
1662{
1663 struct kernel_siginfo info;
1664
1665 clear_siginfo(&info);
1666 info.si_signo = sig;
1667 info.si_errno = 0;
1668 info.si_code = SI_KERNEL;
1669 info.si_pid = 0;
1670 info.si_uid = 0;
1671 force_sig_info_to_task(&info, current, HANDLER_EXIT);
1672}
1673
1674/*
1675 * When things go south during signal handling, we
1676 * will force a SIGSEGV. And if the signal that caused
1677 * the problem was already a SIGSEGV, we'll want to
1678 * make sure we don't even try to deliver the signal..
1679 */
1680void force_sigsegv(int sig)
1681{
1682 if (sig == SIGSEGV)
1683 force_fatal_sig(SIGSEGV);
1684 else
1685 force_sig(SIGSEGV);
1686}
1687
1688int force_sig_fault_to_task(int sig, int code, void __user *addr,
1689 struct task_struct *t)
1690{
1691 struct kernel_siginfo info;
1692
1693 clear_siginfo(&info);
1694 info.si_signo = sig;
1695 info.si_errno = 0;
1696 info.si_code = code;
1697 info.si_addr = addr;
1698 return force_sig_info_to_task(&info, t, HANDLER_CURRENT);
1699}
1700
1701int force_sig_fault(int sig, int code, void __user *addr)
1702{
1703 return force_sig_fault_to_task(sig, code, addr, current);
1704}
1705
1706int send_sig_fault(int sig, int code, void __user *addr, struct task_struct *t)
1707{
1708 struct kernel_siginfo info;
1709
1710 clear_siginfo(&info);
1711 info.si_signo = sig;
1712 info.si_errno = 0;
1713 info.si_code = code;
1714 info.si_addr = addr;
1715 return send_sig_info(info.si_signo, &info, t);
1716}
1717
1718int force_sig_mceerr(int code, void __user *addr, short lsb)
1719{
1720 struct kernel_siginfo info;
1721
1722 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1723 clear_siginfo(&info);
1724 info.si_signo = SIGBUS;
1725 info.si_errno = 0;
1726 info.si_code = code;
1727 info.si_addr = addr;
1728 info.si_addr_lsb = lsb;
1729 return force_sig_info(&info);
1730}
1731
1732int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1733{
1734 struct kernel_siginfo info;
1735
1736 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1737 clear_siginfo(&info);
1738 info.si_signo = SIGBUS;
1739 info.si_errno = 0;
1740 info.si_code = code;
1741 info.si_addr = addr;
1742 info.si_addr_lsb = lsb;
1743 return send_sig_info(info.si_signo, &info, t);
1744}
1745EXPORT_SYMBOL(send_sig_mceerr);
1746
1747int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper)
1748{
1749 struct kernel_siginfo info;
1750
1751 clear_siginfo(&info);
1752 info.si_signo = SIGSEGV;
1753 info.si_errno = 0;
1754 info.si_code = SEGV_BNDERR;
1755 info.si_addr = addr;
1756 info.si_lower = lower;
1757 info.si_upper = upper;
1758 return force_sig_info(&info);
1759}
1760
1761#ifdef SEGV_PKUERR
1762int force_sig_pkuerr(void __user *addr, u32 pkey)
1763{
1764 struct kernel_siginfo info;
1765
1766 clear_siginfo(&info);
1767 info.si_signo = SIGSEGV;
1768 info.si_errno = 0;
1769 info.si_code = SEGV_PKUERR;
1770 info.si_addr = addr;
1771 info.si_pkey = pkey;
1772 return force_sig_info(&info);
1773}
1774#endif
1775
1776int send_sig_perf(void __user *addr, u32 type, u64 sig_data)
1777{
1778 struct kernel_siginfo info;
1779
1780 clear_siginfo(&info);
1781 info.si_signo = SIGTRAP;
1782 info.si_errno = 0;
1783 info.si_code = TRAP_PERF;
1784 info.si_addr = addr;
1785 info.si_perf_data = sig_data;
1786 info.si_perf_type = type;
1787
1788 /*
1789 * Signals generated by perf events should not terminate the whole
1790 * process if SIGTRAP is blocked, however, delivering the signal
1791 * asynchronously is better than not delivering at all. But tell user
1792 * space if the signal was asynchronous, so it can clearly be
1793 * distinguished from normal synchronous ones.
1794 */
1795 info.si_perf_flags = sigismember(¤t->blocked, info.si_signo) ?
1796 TRAP_PERF_FLAG_ASYNC :
1797 0;
1798
1799 return send_sig_info(info.si_signo, &info, current);
1800}
1801
1802/**
1803 * force_sig_seccomp - signals the task to allow in-process syscall emulation
1804 * @syscall: syscall number to send to userland
1805 * @reason: filter-supplied reason code to send to userland (via si_errno)
1806 * @force_coredump: true to trigger a coredump
1807 *
1808 * Forces a SIGSYS with a code of SYS_SECCOMP and related sigsys info.
1809 */
1810int force_sig_seccomp(int syscall, int reason, bool force_coredump)
1811{
1812 struct kernel_siginfo info;
1813
1814 clear_siginfo(&info);
1815 info.si_signo = SIGSYS;
1816 info.si_code = SYS_SECCOMP;
1817 info.si_call_addr = (void __user *)KSTK_EIP(current);
1818 info.si_errno = reason;
1819 info.si_arch = syscall_get_arch(current);
1820 info.si_syscall = syscall;
1821 return force_sig_info_to_task(&info, current,
1822 force_coredump ? HANDLER_EXIT : HANDLER_CURRENT);
1823}
1824
1825/* For the crazy architectures that include trap information in
1826 * the errno field, instead of an actual errno value.
1827 */
1828int force_sig_ptrace_errno_trap(int errno, void __user *addr)
1829{
1830 struct kernel_siginfo info;
1831
1832 clear_siginfo(&info);
1833 info.si_signo = SIGTRAP;
1834 info.si_errno = errno;
1835 info.si_code = TRAP_HWBKPT;
1836 info.si_addr = addr;
1837 return force_sig_info(&info);
1838}
1839
1840/* For the rare architectures that include trap information using
1841 * si_trapno.
1842 */
1843int force_sig_fault_trapno(int sig, int code, void __user *addr, int trapno)
1844{
1845 struct kernel_siginfo info;
1846
1847 clear_siginfo(&info);
1848 info.si_signo = sig;
1849 info.si_errno = 0;
1850 info.si_code = code;
1851 info.si_addr = addr;
1852 info.si_trapno = trapno;
1853 return force_sig_info(&info);
1854}
1855
1856/* For the rare architectures that include trap information using
1857 * si_trapno.
1858 */
1859int send_sig_fault_trapno(int sig, int code, void __user *addr, int trapno,
1860 struct task_struct *t)
1861{
1862 struct kernel_siginfo info;
1863
1864 clear_siginfo(&info);
1865 info.si_signo = sig;
1866 info.si_errno = 0;
1867 info.si_code = code;
1868 info.si_addr = addr;
1869 info.si_trapno = trapno;
1870 return send_sig_info(info.si_signo, &info, t);
1871}
1872
1873static int kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp)
1874{
1875 int ret;
1876 read_lock(&tasklist_lock);
1877 ret = __kill_pgrp_info(sig, info, pgrp);
1878 read_unlock(&tasklist_lock);
1879 return ret;
1880}
1881
1882int kill_pgrp(struct pid *pid, int sig, int priv)
1883{
1884 return kill_pgrp_info(sig, __si_special(priv), pid);
1885}
1886EXPORT_SYMBOL(kill_pgrp);
1887
1888int kill_pid(struct pid *pid, int sig, int priv)
1889{
1890 return kill_pid_info(sig, __si_special(priv), pid);
1891}
1892EXPORT_SYMBOL(kill_pid);
1893
1894#ifdef CONFIG_POSIX_TIMERS
1895/*
1896 * These functions handle POSIX timer signals. POSIX timers use
1897 * preallocated sigqueue structs for sending signals.
1898 */
1899static void __flush_itimer_signals(struct sigpending *pending)
1900{
1901 sigset_t signal, retain;
1902 struct sigqueue *q, *n;
1903
1904 signal = pending->signal;
1905 sigemptyset(&retain);
1906
1907 list_for_each_entry_safe(q, n, &pending->list, list) {
1908 int sig = q->info.si_signo;
1909
1910 if (likely(q->info.si_code != SI_TIMER)) {
1911 sigaddset(&retain, sig);
1912 } else {
1913 sigdelset(&signal, sig);
1914 list_del_init(&q->list);
1915 __sigqueue_free(q);
1916 }
1917 }
1918
1919 sigorsets(&pending->signal, &signal, &retain);
1920}
1921
1922void flush_itimer_signals(void)
1923{
1924 struct task_struct *tsk = current;
1925
1926 guard(spinlock_irqsave)(&tsk->sighand->siglock);
1927 __flush_itimer_signals(&tsk->pending);
1928 __flush_itimer_signals(&tsk->signal->shared_pending);
1929}
1930
1931bool posixtimer_init_sigqueue(struct sigqueue *q)
1932{
1933 struct ucounts *ucounts = sig_get_ucounts(current, -1, 0);
1934
1935 if (!ucounts)
1936 return false;
1937 clear_siginfo(&q->info);
1938 __sigqueue_init(q, ucounts, SIGQUEUE_PREALLOC);
1939 return true;
1940}
1941
1942static void posixtimer_queue_sigqueue(struct sigqueue *q, struct task_struct *t, enum pid_type type)
1943{
1944 struct sigpending *pending;
1945 int sig = q->info.si_signo;
1946
1947 signalfd_notify(t, sig);
1948 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1949 list_add_tail(&q->list, &pending->list);
1950 sigaddset(&pending->signal, sig);
1951 complete_signal(sig, t, type);
1952}
1953
1954/*
1955 * This function is used by POSIX timers to deliver a timer signal.
1956 * Where type is PIDTYPE_PID (such as for timers with SIGEV_THREAD_ID
1957 * set), the signal must be delivered to the specific thread (queues
1958 * into t->pending).
1959 *
1960 * Where type is not PIDTYPE_PID, signals must be delivered to the
1961 * process. In this case, prefer to deliver to current if it is in
1962 * the same thread group as the target process and its sighand is
1963 * stable, which avoids unnecessarily waking up a potentially idle task.
1964 */
1965static inline struct task_struct *posixtimer_get_target(struct k_itimer *tmr)
1966{
1967 struct task_struct *t = pid_task(tmr->it_pid, tmr->it_pid_type);
1968
1969 if (t && tmr->it_pid_type != PIDTYPE_PID &&
1970 same_thread_group(t, current) && !current->exit_state)
1971 t = current;
1972 return t;
1973}
1974
1975void posixtimer_send_sigqueue(struct k_itimer *tmr)
1976{
1977 struct sigqueue *q = &tmr->sigq;
1978 int sig = q->info.si_signo;
1979 struct task_struct *t;
1980 unsigned long flags;
1981 int result;
1982
1983 guard(rcu)();
1984
1985 t = posixtimer_get_target(tmr);
1986 if (!t)
1987 return;
1988
1989 if (!likely(lock_task_sighand(t, &flags)))
1990 return;
1991
1992 /*
1993 * Update @tmr::sigqueue_seq for posix timer signals with sighand
1994 * locked to prevent a race against dequeue_signal().
1995 */
1996 tmr->it_sigqueue_seq = tmr->it_signal_seq;
1997
1998 /*
1999 * Set the signal delivery status under sighand lock, so that the
2000 * ignored signal handling can distinguish between a periodic and a
2001 * non-periodic timer.
2002 */
2003 tmr->it_sig_periodic = tmr->it_status == POSIX_TIMER_REQUEUE_PENDING;
2004
2005 if (!prepare_signal(sig, t, false)) {
2006 result = TRACE_SIGNAL_IGNORED;
2007
2008 if (!list_empty(&q->list)) {
2009 /*
2010 * The signal was ignored and blocked. The timer
2011 * expiry queued it because blocked signals are
2012 * queued independent of the ignored state.
2013 *
2014 * The unblocking set SIGPENDING, but the signal
2015 * was not yet dequeued from the pending list.
2016 * So prepare_signal() sees unblocked and ignored,
2017 * which ends up here. Leave it queued like a
2018 * regular signal.
2019 *
2020 * The same happens when the task group is exiting
2021 * and the signal is already queued.
2022 * prepare_signal() treats SIGNAL_GROUP_EXIT as
2023 * ignored independent of its queued state. This
2024 * gets cleaned up in __exit_signal().
2025 */
2026 goto out;
2027 }
2028
2029 /* Periodic timers with SIG_IGN are queued on the ignored list */
2030 if (tmr->it_sig_periodic) {
2031 /*
2032 * Already queued means the timer was rearmed after
2033 * the previous expiry got it on the ignore list.
2034 * Nothing to do for that case.
2035 */
2036 if (hlist_unhashed(&tmr->ignored_list)) {
2037 /*
2038 * Take a signal reference and queue it on
2039 * the ignored list.
2040 */
2041 posixtimer_sigqueue_getref(q);
2042 posixtimer_sig_ignore(t, q);
2043 }
2044 } else if (!hlist_unhashed(&tmr->ignored_list)) {
2045 /*
2046 * Covers the case where a timer was periodic and
2047 * then the signal was ignored. Later it was rearmed
2048 * as oneshot timer. The previous signal is invalid
2049 * now, and this oneshot signal has to be dropped.
2050 * Remove it from the ignored list and drop the
2051 * reference count as the signal is not longer
2052 * queued.
2053 */
2054 hlist_del_init(&tmr->ignored_list);
2055 posixtimer_putref(tmr);
2056 }
2057 goto out;
2058 }
2059
2060 if (unlikely(!list_empty(&q->list))) {
2061 /* This holds a reference count already */
2062 result = TRACE_SIGNAL_ALREADY_PENDING;
2063 goto out;
2064 }
2065
2066 /*
2067 * If the signal is on the ignore list, it got blocked after it was
2068 * ignored earlier. But nothing lifted the ignore. Move it back to
2069 * the pending list to be consistent with the regular signal
2070 * handling. This already holds a reference count.
2071 *
2072 * If it's not on the ignore list acquire a reference count.
2073 */
2074 if (likely(hlist_unhashed(&tmr->ignored_list)))
2075 posixtimer_sigqueue_getref(q);
2076 else
2077 hlist_del_init(&tmr->ignored_list);
2078
2079 posixtimer_queue_sigqueue(q, t, tmr->it_pid_type);
2080 result = TRACE_SIGNAL_DELIVERED;
2081out:
2082 trace_signal_generate(sig, &q->info, t, tmr->it_pid_type != PIDTYPE_PID, result);
2083 unlock_task_sighand(t, &flags);
2084}
2085
2086static inline void posixtimer_sig_ignore(struct task_struct *tsk, struct sigqueue *q)
2087{
2088 struct k_itimer *tmr = container_of(q, struct k_itimer, sigq);
2089
2090 /*
2091 * If the timer is marked deleted already or the signal originates
2092 * from a non-periodic timer, then just drop the reference
2093 * count. Otherwise queue it on the ignored list.
2094 */
2095 if (tmr->it_signal && tmr->it_sig_periodic)
2096 hlist_add_head(&tmr->ignored_list, &tsk->signal->ignored_posix_timers);
2097 else
2098 posixtimer_putref(tmr);
2099}
2100
2101static void posixtimer_sig_unignore(struct task_struct *tsk, int sig)
2102{
2103 struct hlist_head *head = &tsk->signal->ignored_posix_timers;
2104 struct hlist_node *tmp;
2105 struct k_itimer *tmr;
2106
2107 if (likely(hlist_empty(head)))
2108 return;
2109
2110 /*
2111 * Rearming a timer with sighand lock held is not possible due to
2112 * lock ordering vs. tmr::it_lock. Just stick the sigqueue back and
2113 * let the signal delivery path deal with it whether it needs to be
2114 * rearmed or not. This cannot be decided here w/o dropping sighand
2115 * lock and creating a loop retry horror show.
2116 */
2117 hlist_for_each_entry_safe(tmr, tmp , head, ignored_list) {
2118 struct task_struct *target;
2119
2120 /*
2121 * tmr::sigq.info.si_signo is immutable, so accessing it
2122 * without holding tmr::it_lock is safe.
2123 */
2124 if (tmr->sigq.info.si_signo != sig)
2125 continue;
2126
2127 hlist_del_init(&tmr->ignored_list);
2128
2129 /* This should never happen and leaks a reference count */
2130 if (WARN_ON_ONCE(!list_empty(&tmr->sigq.list)))
2131 continue;
2132
2133 /*
2134 * Get the target for the signal. If target is a thread and
2135 * has exited by now, drop the reference count.
2136 */
2137 guard(rcu)();
2138 target = posixtimer_get_target(tmr);
2139 if (target)
2140 posixtimer_queue_sigqueue(&tmr->sigq, target, tmr->it_pid_type);
2141 else
2142 posixtimer_putref(tmr);
2143 }
2144}
2145#else /* CONFIG_POSIX_TIMERS */
2146static inline void posixtimer_sig_ignore(struct task_struct *tsk, struct sigqueue *q) { }
2147static inline void posixtimer_sig_unignore(struct task_struct *tsk, int sig) { }
2148#endif /* !CONFIG_POSIX_TIMERS */
2149
2150void do_notify_pidfd(struct task_struct *task)
2151{
2152 struct pid *pid = task_pid(task);
2153
2154 WARN_ON(task->exit_state == 0);
2155
2156 __wake_up(&pid->wait_pidfd, TASK_NORMAL, 0,
2157 poll_to_key(EPOLLIN | EPOLLRDNORM));
2158}
2159
2160/*
2161 * Let a parent know about the death of a child.
2162 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
2163 *
2164 * Returns true if our parent ignored us and so we've switched to
2165 * self-reaping.
2166 */
2167bool do_notify_parent(struct task_struct *tsk, int sig)
2168{
2169 struct kernel_siginfo info;
2170 unsigned long flags;
2171 struct sighand_struct *psig;
2172 bool autoreap = false;
2173 u64 utime, stime;
2174
2175 WARN_ON_ONCE(sig == -1);
2176
2177 /* do_notify_parent_cldstop should have been called instead. */
2178 WARN_ON_ONCE(task_is_stopped_or_traced(tsk));
2179
2180 WARN_ON_ONCE(!tsk->ptrace &&
2181 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
2182 /*
2183 * tsk is a group leader and has no threads, wake up the
2184 * non-PIDFD_THREAD waiters.
2185 */
2186 if (thread_group_empty(tsk))
2187 do_notify_pidfd(tsk);
2188
2189 if (sig != SIGCHLD) {
2190 /*
2191 * This is only possible if parent == real_parent.
2192 * Check if it has changed security domain.
2193 */
2194 if (tsk->parent_exec_id != READ_ONCE(tsk->parent->self_exec_id))
2195 sig = SIGCHLD;
2196 }
2197
2198 clear_siginfo(&info);
2199 info.si_signo = sig;
2200 info.si_errno = 0;
2201 /*
2202 * We are under tasklist_lock here so our parent is tied to
2203 * us and cannot change.
2204 *
2205 * task_active_pid_ns will always return the same pid namespace
2206 * until a task passes through release_task.
2207 *
2208 * write_lock() currently calls preempt_disable() which is the
2209 * same as rcu_read_lock(), but according to Oleg, this is not
2210 * correct to rely on this
2211 */
2212 rcu_read_lock();
2213 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
2214 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
2215 task_uid(tsk));
2216 rcu_read_unlock();
2217
2218 task_cputime(tsk, &utime, &stime);
2219 info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
2220 info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
2221
2222 info.si_status = tsk->exit_code & 0x7f;
2223 if (tsk->exit_code & 0x80)
2224 info.si_code = CLD_DUMPED;
2225 else if (tsk->exit_code & 0x7f)
2226 info.si_code = CLD_KILLED;
2227 else {
2228 info.si_code = CLD_EXITED;
2229 info.si_status = tsk->exit_code >> 8;
2230 }
2231
2232 psig = tsk->parent->sighand;
2233 spin_lock_irqsave(&psig->siglock, flags);
2234 if (!tsk->ptrace && sig == SIGCHLD &&
2235 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
2236 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
2237 /*
2238 * We are exiting and our parent doesn't care. POSIX.1
2239 * defines special semantics for setting SIGCHLD to SIG_IGN
2240 * or setting the SA_NOCLDWAIT flag: we should be reaped
2241 * automatically and not left for our parent's wait4 call.
2242 * Rather than having the parent do it as a magic kind of
2243 * signal handler, we just set this to tell do_exit that we
2244 * can be cleaned up without becoming a zombie. Note that
2245 * we still call __wake_up_parent in this case, because a
2246 * blocked sys_wait4 might now return -ECHILD.
2247 *
2248 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
2249 * is implementation-defined: we do (if you don't want
2250 * it, just use SIG_IGN instead).
2251 */
2252 autoreap = true;
2253 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
2254 sig = 0;
2255 }
2256 /*
2257 * Send with __send_signal as si_pid and si_uid are in the
2258 * parent's namespaces.
2259 */
2260 if (valid_signal(sig) && sig)
2261 __send_signal_locked(sig, &info, tsk->parent, PIDTYPE_TGID, false);
2262 __wake_up_parent(tsk, tsk->parent);
2263 spin_unlock_irqrestore(&psig->siglock, flags);
2264
2265 return autoreap;
2266}
2267
2268/**
2269 * do_notify_parent_cldstop - notify parent of stopped/continued state change
2270 * @tsk: task reporting the state change
2271 * @for_ptracer: the notification is for ptracer
2272 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
2273 *
2274 * Notify @tsk's parent that the stopped/continued state has changed. If
2275 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
2276 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
2277 *
2278 * CONTEXT:
2279 * Must be called with tasklist_lock at least read locked.
2280 */
2281static void do_notify_parent_cldstop(struct task_struct *tsk,
2282 bool for_ptracer, int why)
2283{
2284 struct kernel_siginfo info;
2285 unsigned long flags;
2286 struct task_struct *parent;
2287 struct sighand_struct *sighand;
2288 u64 utime, stime;
2289
2290 if (for_ptracer) {
2291 parent = tsk->parent;
2292 } else {
2293 tsk = tsk->group_leader;
2294 parent = tsk->real_parent;
2295 }
2296
2297 clear_siginfo(&info);
2298 info.si_signo = SIGCHLD;
2299 info.si_errno = 0;
2300 /*
2301 * see comment in do_notify_parent() about the following 4 lines
2302 */
2303 rcu_read_lock();
2304 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
2305 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
2306 rcu_read_unlock();
2307
2308 task_cputime(tsk, &utime, &stime);
2309 info.si_utime = nsec_to_clock_t(utime);
2310 info.si_stime = nsec_to_clock_t(stime);
2311
2312 info.si_code = why;
2313 switch (why) {
2314 case CLD_CONTINUED:
2315 info.si_status = SIGCONT;
2316 break;
2317 case CLD_STOPPED:
2318 info.si_status = tsk->signal->group_exit_code & 0x7f;
2319 break;
2320 case CLD_TRAPPED:
2321 info.si_status = tsk->exit_code & 0x7f;
2322 break;
2323 default:
2324 BUG();
2325 }
2326
2327 sighand = parent->sighand;
2328 spin_lock_irqsave(&sighand->siglock, flags);
2329 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
2330 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
2331 send_signal_locked(SIGCHLD, &info, parent, PIDTYPE_TGID);
2332 /*
2333 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
2334 */
2335 __wake_up_parent(tsk, parent);
2336 spin_unlock_irqrestore(&sighand->siglock, flags);
2337}
2338
2339/*
2340 * This must be called with current->sighand->siglock held.
2341 *
2342 * This should be the path for all ptrace stops.
2343 * We always set current->last_siginfo while stopped here.
2344 * That makes it a way to test a stopped process for
2345 * being ptrace-stopped vs being job-control-stopped.
2346 *
2347 * Returns the signal the ptracer requested the code resume
2348 * with. If the code did not stop because the tracer is gone,
2349 * the stop signal remains unchanged unless clear_code.
2350 */
2351static int ptrace_stop(int exit_code, int why, unsigned long message,
2352 kernel_siginfo_t *info)
2353 __releases(¤t->sighand->siglock)
2354 __acquires(¤t->sighand->siglock)
2355{
2356 bool gstop_done = false;
2357
2358 if (arch_ptrace_stop_needed()) {
2359 /*
2360 * The arch code has something special to do before a
2361 * ptrace stop. This is allowed to block, e.g. for faults
2362 * on user stack pages. We can't keep the siglock while
2363 * calling arch_ptrace_stop, so we must release it now.
2364 * To preserve proper semantics, we must do this before
2365 * any signal bookkeeping like checking group_stop_count.
2366 */
2367 spin_unlock_irq(¤t->sighand->siglock);
2368 arch_ptrace_stop();
2369 spin_lock_irq(¤t->sighand->siglock);
2370 }
2371
2372 /*
2373 * After this point ptrace_signal_wake_up or signal_wake_up
2374 * will clear TASK_TRACED if ptrace_unlink happens or a fatal
2375 * signal comes in. Handle previous ptrace_unlinks and fatal
2376 * signals here to prevent ptrace_stop sleeping in schedule.
2377 */
2378 if (!current->ptrace || __fatal_signal_pending(current))
2379 return exit_code;
2380
2381 set_special_state(TASK_TRACED);
2382 current->jobctl |= JOBCTL_TRACED;
2383
2384 /*
2385 * We're committing to trapping. TRACED should be visible before
2386 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
2387 * Also, transition to TRACED and updates to ->jobctl should be
2388 * atomic with respect to siglock and should be done after the arch
2389 * hook as siglock is released and regrabbed across it.
2390 *
2391 * TRACER TRACEE
2392 *
2393 * ptrace_attach()
2394 * [L] wait_on_bit(JOBCTL_TRAPPING) [S] set_special_state(TRACED)
2395 * do_wait()
2396 * set_current_state() smp_wmb();
2397 * ptrace_do_wait()
2398 * wait_task_stopped()
2399 * task_stopped_code()
2400 * [L] task_is_traced() [S] task_clear_jobctl_trapping();
2401 */
2402 smp_wmb();
2403
2404 current->ptrace_message = message;
2405 current->last_siginfo = info;
2406 current->exit_code = exit_code;
2407
2408 /*
2409 * If @why is CLD_STOPPED, we're trapping to participate in a group
2410 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
2411 * across siglock relocks since INTERRUPT was scheduled, PENDING
2412 * could be clear now. We act as if SIGCONT is received after
2413 * TASK_TRACED is entered - ignore it.
2414 */
2415 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
2416 gstop_done = task_participate_group_stop(current);
2417
2418 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
2419 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
2420 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
2421 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
2422
2423 /* entering a trap, clear TRAPPING */
2424 task_clear_jobctl_trapping(current);
2425
2426 spin_unlock_irq(¤t->sighand->siglock);
2427 read_lock(&tasklist_lock);
2428 /*
2429 * Notify parents of the stop.
2430 *
2431 * While ptraced, there are two parents - the ptracer and
2432 * the real_parent of the group_leader. The ptracer should
2433 * know about every stop while the real parent is only
2434 * interested in the completion of group stop. The states
2435 * for the two don't interact with each other. Notify
2436 * separately unless they're gonna be duplicates.
2437 */
2438 if (current->ptrace)
2439 do_notify_parent_cldstop(current, true, why);
2440 if (gstop_done && (!current->ptrace || ptrace_reparented(current)))
2441 do_notify_parent_cldstop(current, false, why);
2442
2443 /*
2444 * The previous do_notify_parent_cldstop() invocation woke ptracer.
2445 * One a PREEMPTION kernel this can result in preemption requirement
2446 * which will be fulfilled after read_unlock() and the ptracer will be
2447 * put on the CPU.
2448 * The ptracer is in wait_task_inactive(, __TASK_TRACED) waiting for
2449 * this task wait in schedule(). If this task gets preempted then it
2450 * remains enqueued on the runqueue. The ptracer will observe this and
2451 * then sleep for a delay of one HZ tick. In the meantime this task
2452 * gets scheduled, enters schedule() and will wait for the ptracer.
2453 *
2454 * This preemption point is not bad from a correctness point of
2455 * view but extends the runtime by one HZ tick time due to the
2456 * ptracer's sleep. The preempt-disable section ensures that there
2457 * will be no preemption between unlock and schedule() and so
2458 * improving the performance since the ptracer will observe that
2459 * the tracee is scheduled out once it gets on the CPU.
2460 *
2461 * On PREEMPT_RT locking tasklist_lock does not disable preemption.
2462 * Therefore the task can be preempted after do_notify_parent_cldstop()
2463 * before unlocking tasklist_lock so there is no benefit in doing this.
2464 *
2465 * In fact disabling preemption is harmful on PREEMPT_RT because
2466 * the spinlock_t in cgroup_enter_frozen() must not be acquired
2467 * with preemption disabled due to the 'sleeping' spinlock
2468 * substitution of RT.
2469 */
2470 if (!IS_ENABLED(CONFIG_PREEMPT_RT))
2471 preempt_disable();
2472 read_unlock(&tasklist_lock);
2473 cgroup_enter_frozen();
2474 if (!IS_ENABLED(CONFIG_PREEMPT_RT))
2475 preempt_enable_no_resched();
2476 schedule();
2477 cgroup_leave_frozen(true);
2478
2479 /*
2480 * We are back. Now reacquire the siglock before touching
2481 * last_siginfo, so that we are sure to have synchronized with
2482 * any signal-sending on another CPU that wants to examine it.
2483 */
2484 spin_lock_irq(¤t->sighand->siglock);
2485 exit_code = current->exit_code;
2486 current->last_siginfo = NULL;
2487 current->ptrace_message = 0;
2488 current->exit_code = 0;
2489
2490 /* LISTENING can be set only during STOP traps, clear it */
2491 current->jobctl &= ~(JOBCTL_LISTENING | JOBCTL_PTRACE_FROZEN);
2492
2493 /*
2494 * Queued signals ignored us while we were stopped for tracing.
2495 * So check for any that we should take before resuming user mode.
2496 * This sets TIF_SIGPENDING, but never clears it.
2497 */
2498 recalc_sigpending_tsk(current);
2499 return exit_code;
2500}
2501
2502static int ptrace_do_notify(int signr, int exit_code, int why, unsigned long message)
2503{
2504 kernel_siginfo_t info;
2505
2506 clear_siginfo(&info);
2507 info.si_signo = signr;
2508 info.si_code = exit_code;
2509 info.si_pid = task_pid_vnr(current);
2510 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2511
2512 /* Let the debugger run. */
2513 return ptrace_stop(exit_code, why, message, &info);
2514}
2515
2516int ptrace_notify(int exit_code, unsigned long message)
2517{
2518 int signr;
2519
2520 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
2521 if (unlikely(task_work_pending(current)))
2522 task_work_run();
2523
2524 spin_lock_irq(¤t->sighand->siglock);
2525 signr = ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED, message);
2526 spin_unlock_irq(¤t->sighand->siglock);
2527 return signr;
2528}
2529
2530/**
2531 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
2532 * @signr: signr causing group stop if initiating
2533 *
2534 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
2535 * and participate in it. If already set, participate in the existing
2536 * group stop. If participated in a group stop (and thus slept), %true is
2537 * returned with siglock released.
2538 *
2539 * If ptraced, this function doesn't handle stop itself. Instead,
2540 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2541 * untouched. The caller must ensure that INTERRUPT trap handling takes
2542 * places afterwards.
2543 *
2544 * CONTEXT:
2545 * Must be called with @current->sighand->siglock held, which is released
2546 * on %true return.
2547 *
2548 * RETURNS:
2549 * %false if group stop is already cancelled or ptrace trap is scheduled.
2550 * %true if participated in group stop.
2551 */
2552static bool do_signal_stop(int signr)
2553 __releases(¤t->sighand->siglock)
2554{
2555 struct signal_struct *sig = current->signal;
2556
2557 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2558 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2559 struct task_struct *t;
2560
2561 /* signr will be recorded in task->jobctl for retries */
2562 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2563
2564 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2565 unlikely(sig->flags & SIGNAL_GROUP_EXIT) ||
2566 unlikely(sig->group_exec_task))
2567 return false;
2568 /*
2569 * There is no group stop already in progress. We must
2570 * initiate one now.
2571 *
2572 * While ptraced, a task may be resumed while group stop is
2573 * still in effect and then receive a stop signal and
2574 * initiate another group stop. This deviates from the
2575 * usual behavior as two consecutive stop signals can't
2576 * cause two group stops when !ptraced. That is why we
2577 * also check !task_is_stopped(t) below.
2578 *
2579 * The condition can be distinguished by testing whether
2580 * SIGNAL_STOP_STOPPED is already set. Don't generate
2581 * group_exit_code in such case.
2582 *
2583 * This is not necessary for SIGNAL_STOP_CONTINUED because
2584 * an intervening stop signal is required to cause two
2585 * continued events regardless of ptrace.
2586 */
2587 if (!(sig->flags & SIGNAL_STOP_STOPPED))
2588 sig->group_exit_code = signr;
2589
2590 sig->group_stop_count = 0;
2591 if (task_set_jobctl_pending(current, signr | gstop))
2592 sig->group_stop_count++;
2593
2594 for_other_threads(current, t) {
2595 /*
2596 * Setting state to TASK_STOPPED for a group
2597 * stop is always done with the siglock held,
2598 * so this check has no races.
2599 */
2600 if (!task_is_stopped(t) &&
2601 task_set_jobctl_pending(t, signr | gstop)) {
2602 sig->group_stop_count++;
2603 if (likely(!(t->ptrace & PT_SEIZED)))
2604 signal_wake_up(t, 0);
2605 else
2606 ptrace_trap_notify(t);
2607 }
2608 }
2609 }
2610
2611 if (likely(!current->ptrace)) {
2612 int notify = 0;
2613
2614 /*
2615 * If there are no other threads in the group, or if there
2616 * is a group stop in progress and we are the last to stop,
2617 * report to the parent.
2618 */
2619 if (task_participate_group_stop(current))
2620 notify = CLD_STOPPED;
2621
2622 current->jobctl |= JOBCTL_STOPPED;
2623 set_special_state(TASK_STOPPED);
2624 spin_unlock_irq(¤t->sighand->siglock);
2625
2626 /*
2627 * Notify the parent of the group stop completion. Because
2628 * we're not holding either the siglock or tasklist_lock
2629 * here, ptracer may attach inbetween; however, this is for
2630 * group stop and should always be delivered to the real
2631 * parent of the group leader. The new ptracer will get
2632 * its notification when this task transitions into
2633 * TASK_TRACED.
2634 */
2635 if (notify) {
2636 read_lock(&tasklist_lock);
2637 do_notify_parent_cldstop(current, false, notify);
2638 read_unlock(&tasklist_lock);
2639 }
2640
2641 /* Now we don't run again until woken by SIGCONT or SIGKILL */
2642 cgroup_enter_frozen();
2643 schedule();
2644 return true;
2645 } else {
2646 /*
2647 * While ptraced, group stop is handled by STOP trap.
2648 * Schedule it and let the caller deal with it.
2649 */
2650 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2651 return false;
2652 }
2653}
2654
2655/**
2656 * do_jobctl_trap - take care of ptrace jobctl traps
2657 *
2658 * When PT_SEIZED, it's used for both group stop and explicit
2659 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
2660 * accompanying siginfo. If stopped, lower eight bits of exit_code contain
2661 * the stop signal; otherwise, %SIGTRAP.
2662 *
2663 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2664 * number as exit_code and no siginfo.
2665 *
2666 * CONTEXT:
2667 * Must be called with @current->sighand->siglock held, which may be
2668 * released and re-acquired before returning with intervening sleep.
2669 */
2670static void do_jobctl_trap(void)
2671{
2672 struct signal_struct *signal = current->signal;
2673 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2674
2675 if (current->ptrace & PT_SEIZED) {
2676 if (!signal->group_stop_count &&
2677 !(signal->flags & SIGNAL_STOP_STOPPED))
2678 signr = SIGTRAP;
2679 WARN_ON_ONCE(!signr);
2680 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2681 CLD_STOPPED, 0);
2682 } else {
2683 WARN_ON_ONCE(!signr);
2684 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2685 }
2686}
2687
2688/**
2689 * do_freezer_trap - handle the freezer jobctl trap
2690 *
2691 * Puts the task into frozen state, if only the task is not about to quit.
2692 * In this case it drops JOBCTL_TRAP_FREEZE.
2693 *
2694 * CONTEXT:
2695 * Must be called with @current->sighand->siglock held,
2696 * which is always released before returning.
2697 */
2698static void do_freezer_trap(void)
2699 __releases(¤t->sighand->siglock)
2700{
2701 /*
2702 * If there are other trap bits pending except JOBCTL_TRAP_FREEZE,
2703 * let's make another loop to give it a chance to be handled.
2704 * In any case, we'll return back.
2705 */
2706 if ((current->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) !=
2707 JOBCTL_TRAP_FREEZE) {
2708 spin_unlock_irq(¤t->sighand->siglock);
2709 return;
2710 }
2711
2712 /*
2713 * Now we're sure that there is no pending fatal signal and no
2714 * pending traps. Clear TIF_SIGPENDING to not get out of schedule()
2715 * immediately (if there is a non-fatal signal pending), and
2716 * put the task into sleep.
2717 */
2718 __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
2719 clear_thread_flag(TIF_SIGPENDING);
2720 spin_unlock_irq(¤t->sighand->siglock);
2721 cgroup_enter_frozen();
2722 schedule();
2723
2724 /*
2725 * We could've been woken by task_work, run it to clear
2726 * TIF_NOTIFY_SIGNAL. The caller will retry if necessary.
2727 */
2728 clear_notify_signal();
2729 if (unlikely(task_work_pending(current)))
2730 task_work_run();
2731}
2732
2733static int ptrace_signal(int signr, kernel_siginfo_t *info, enum pid_type type)
2734{
2735 /*
2736 * We do not check sig_kernel_stop(signr) but set this marker
2737 * unconditionally because we do not know whether debugger will
2738 * change signr. This flag has no meaning unless we are going
2739 * to stop after return from ptrace_stop(). In this case it will
2740 * be checked in do_signal_stop(), we should only stop if it was
2741 * not cleared by SIGCONT while we were sleeping. See also the
2742 * comment in dequeue_signal().
2743 */
2744 current->jobctl |= JOBCTL_STOP_DEQUEUED;
2745 signr = ptrace_stop(signr, CLD_TRAPPED, 0, info);
2746
2747 /* We're back. Did the debugger cancel the sig? */
2748 if (signr == 0)
2749 return signr;
2750
2751 /*
2752 * Update the siginfo structure if the signal has
2753 * changed. If the debugger wanted something
2754 * specific in the siginfo structure then it should
2755 * have updated *info via PTRACE_SETSIGINFO.
2756 */
2757 if (signr != info->si_signo) {
2758 clear_siginfo(info);
2759 info->si_signo = signr;
2760 info->si_errno = 0;
2761 info->si_code = SI_USER;
2762 rcu_read_lock();
2763 info->si_pid = task_pid_vnr(current->parent);
2764 info->si_uid = from_kuid_munged(current_user_ns(),
2765 task_uid(current->parent));
2766 rcu_read_unlock();
2767 }
2768
2769 /* If the (new) signal is now blocked, requeue it. */
2770 if (sigismember(¤t->blocked, signr) ||
2771 fatal_signal_pending(current)) {
2772 send_signal_locked(signr, info, current, type);
2773 signr = 0;
2774 }
2775
2776 return signr;
2777}
2778
2779static void hide_si_addr_tag_bits(struct ksignal *ksig)
2780{
2781 switch (siginfo_layout(ksig->sig, ksig->info.si_code)) {
2782 case SIL_FAULT:
2783 case SIL_FAULT_TRAPNO:
2784 case SIL_FAULT_MCEERR:
2785 case SIL_FAULT_BNDERR:
2786 case SIL_FAULT_PKUERR:
2787 case SIL_FAULT_PERF_EVENT:
2788 ksig->info.si_addr = arch_untagged_si_addr(
2789 ksig->info.si_addr, ksig->sig, ksig->info.si_code);
2790 break;
2791 case SIL_KILL:
2792 case SIL_TIMER:
2793 case SIL_POLL:
2794 case SIL_CHLD:
2795 case SIL_RT:
2796 case SIL_SYS:
2797 break;
2798 }
2799}
2800
2801bool get_signal(struct ksignal *ksig)
2802{
2803 struct sighand_struct *sighand = current->sighand;
2804 struct signal_struct *signal = current->signal;
2805 int signr;
2806
2807 clear_notify_signal();
2808 if (unlikely(task_work_pending(current)))
2809 task_work_run();
2810
2811 if (!task_sigpending(current))
2812 return false;
2813
2814 if (unlikely(uprobe_deny_signal()))
2815 return false;
2816
2817 /*
2818 * Do this once, we can't return to user-mode if freezing() == T.
2819 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2820 * thus do not need another check after return.
2821 */
2822 try_to_freeze();
2823
2824relock:
2825 spin_lock_irq(&sighand->siglock);
2826
2827 /*
2828 * Every stopped thread goes here after wakeup. Check to see if
2829 * we should notify the parent, prepare_signal(SIGCONT) encodes
2830 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2831 */
2832 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2833 int why;
2834
2835 if (signal->flags & SIGNAL_CLD_CONTINUED)
2836 why = CLD_CONTINUED;
2837 else
2838 why = CLD_STOPPED;
2839
2840 signal->flags &= ~SIGNAL_CLD_MASK;
2841
2842 spin_unlock_irq(&sighand->siglock);
2843
2844 /*
2845 * Notify the parent that we're continuing. This event is
2846 * always per-process and doesn't make whole lot of sense
2847 * for ptracers, who shouldn't consume the state via
2848 * wait(2) either, but, for backward compatibility, notify
2849 * the ptracer of the group leader too unless it's gonna be
2850 * a duplicate.
2851 */
2852 read_lock(&tasklist_lock);
2853 do_notify_parent_cldstop(current, false, why);
2854
2855 if (ptrace_reparented(current->group_leader))
2856 do_notify_parent_cldstop(current->group_leader,
2857 true, why);
2858 read_unlock(&tasklist_lock);
2859
2860 goto relock;
2861 }
2862
2863 for (;;) {
2864 struct k_sigaction *ka;
2865 enum pid_type type;
2866
2867 /* Has this task already been marked for death? */
2868 if ((signal->flags & SIGNAL_GROUP_EXIT) ||
2869 signal->group_exec_task) {
2870 signr = SIGKILL;
2871 sigdelset(¤t->pending.signal, SIGKILL);
2872 trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
2873 &sighand->action[SIGKILL-1]);
2874 recalc_sigpending();
2875 /*
2876 * implies do_group_exit() or return to PF_USER_WORKER,
2877 * no need to initialize ksig->info/etc.
2878 */
2879 goto fatal;
2880 }
2881
2882 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2883 do_signal_stop(0))
2884 goto relock;
2885
2886 if (unlikely(current->jobctl &
2887 (JOBCTL_TRAP_MASK | JOBCTL_TRAP_FREEZE))) {
2888 if (current->jobctl & JOBCTL_TRAP_MASK) {
2889 do_jobctl_trap();
2890 spin_unlock_irq(&sighand->siglock);
2891 } else if (current->jobctl & JOBCTL_TRAP_FREEZE)
2892 do_freezer_trap();
2893
2894 goto relock;
2895 }
2896
2897 /*
2898 * If the task is leaving the frozen state, let's update
2899 * cgroup counters and reset the frozen bit.
2900 */
2901 if (unlikely(cgroup_task_frozen(current))) {
2902 spin_unlock_irq(&sighand->siglock);
2903 cgroup_leave_frozen(false);
2904 goto relock;
2905 }
2906
2907 /*
2908 * Signals generated by the execution of an instruction
2909 * need to be delivered before any other pending signals
2910 * so that the instruction pointer in the signal stack
2911 * frame points to the faulting instruction.
2912 */
2913 type = PIDTYPE_PID;
2914 signr = dequeue_synchronous_signal(&ksig->info);
2915 if (!signr)
2916 signr = dequeue_signal(¤t->blocked, &ksig->info, &type);
2917
2918 if (!signr)
2919 break; /* will return 0 */
2920
2921 if (unlikely(current->ptrace) && (signr != SIGKILL) &&
2922 !(sighand->action[signr -1].sa.sa_flags & SA_IMMUTABLE)) {
2923 signr = ptrace_signal(signr, &ksig->info, type);
2924 if (!signr)
2925 continue;
2926 }
2927
2928 ka = &sighand->action[signr-1];
2929
2930 /* Trace actually delivered signals. */
2931 trace_signal_deliver(signr, &ksig->info, ka);
2932
2933 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
2934 continue;
2935 if (ka->sa.sa_handler != SIG_DFL) {
2936 /* Run the handler. */
2937 ksig->ka = *ka;
2938
2939 if (ka->sa.sa_flags & SA_ONESHOT)
2940 ka->sa.sa_handler = SIG_DFL;
2941
2942 break; /* will return non-zero "signr" value */
2943 }
2944
2945 /*
2946 * Now we are doing the default action for this signal.
2947 */
2948 if (sig_kernel_ignore(signr)) /* Default is nothing. */
2949 continue;
2950
2951 /*
2952 * Global init gets no signals it doesn't want.
2953 * Container-init gets no signals it doesn't want from same
2954 * container.
2955 *
2956 * Note that if global/container-init sees a sig_kernel_only()
2957 * signal here, the signal must have been generated internally
2958 * or must have come from an ancestor namespace. In either
2959 * case, the signal cannot be dropped.
2960 */
2961 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2962 !sig_kernel_only(signr))
2963 continue;
2964
2965 if (sig_kernel_stop(signr)) {
2966 /*
2967 * The default action is to stop all threads in
2968 * the thread group. The job control signals
2969 * do nothing in an orphaned pgrp, but SIGSTOP
2970 * always works. Note that siglock needs to be
2971 * dropped during the call to is_orphaned_pgrp()
2972 * because of lock ordering with tasklist_lock.
2973 * This allows an intervening SIGCONT to be posted.
2974 * We need to check for that and bail out if necessary.
2975 */
2976 if (signr != SIGSTOP) {
2977 spin_unlock_irq(&sighand->siglock);
2978
2979 /* signals can be posted during this window */
2980
2981 if (is_current_pgrp_orphaned())
2982 goto relock;
2983
2984 spin_lock_irq(&sighand->siglock);
2985 }
2986
2987 if (likely(do_signal_stop(signr))) {
2988 /* It released the siglock. */
2989 goto relock;
2990 }
2991
2992 /*
2993 * We didn't actually stop, due to a race
2994 * with SIGCONT or something like that.
2995 */
2996 continue;
2997 }
2998
2999 fatal:
3000 spin_unlock_irq(&sighand->siglock);
3001 if (unlikely(cgroup_task_frozen(current)))
3002 cgroup_leave_frozen(true);
3003
3004 /*
3005 * Anything else is fatal, maybe with a core dump.
3006 */
3007 current->flags |= PF_SIGNALED;
3008
3009 if (sig_kernel_coredump(signr)) {
3010 if (print_fatal_signals)
3011 print_fatal_signal(signr);
3012 proc_coredump_connector(current);
3013 /*
3014 * If it was able to dump core, this kills all
3015 * other threads in the group and synchronizes with
3016 * their demise. If we lost the race with another
3017 * thread getting here, it set group_exit_code
3018 * first and our do_group_exit call below will use
3019 * that value and ignore the one we pass it.
3020 */
3021 do_coredump(&ksig->info);
3022 }
3023
3024 /*
3025 * PF_USER_WORKER threads will catch and exit on fatal signals
3026 * themselves. They have cleanup that must be performed, so we
3027 * cannot call do_exit() on their behalf. Note that ksig won't
3028 * be properly initialized, PF_USER_WORKER's shouldn't use it.
3029 */
3030 if (current->flags & PF_USER_WORKER)
3031 goto out;
3032
3033 /*
3034 * Death signals, no core dump.
3035 */
3036 do_group_exit(signr);
3037 /* NOTREACHED */
3038 }
3039 spin_unlock_irq(&sighand->siglock);
3040
3041 ksig->sig = signr;
3042
3043 if (signr && !(ksig->ka.sa.sa_flags & SA_EXPOSE_TAGBITS))
3044 hide_si_addr_tag_bits(ksig);
3045out:
3046 return signr > 0;
3047}
3048
3049/**
3050 * signal_delivered - called after signal delivery to update blocked signals
3051 * @ksig: kernel signal struct
3052 * @stepping: nonzero if debugger single-step or block-step in use
3053 *
3054 * This function should be called when a signal has successfully been
3055 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
3056 * is always blocked), and the signal itself is blocked unless %SA_NODEFER
3057 * is set in @ksig->ka.sa.sa_flags. Tracing is notified.
3058 */
3059static void signal_delivered(struct ksignal *ksig, int stepping)
3060{
3061 sigset_t blocked;
3062
3063 /* A signal was successfully delivered, and the
3064 saved sigmask was stored on the signal frame,
3065 and will be restored by sigreturn. So we can
3066 simply clear the restore sigmask flag. */
3067 clear_restore_sigmask();
3068
3069 sigorsets(&blocked, ¤t->blocked, &ksig->ka.sa.sa_mask);
3070 if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
3071 sigaddset(&blocked, ksig->sig);
3072 set_current_blocked(&blocked);
3073 if (current->sas_ss_flags & SS_AUTODISARM)
3074 sas_ss_reset(current);
3075 if (stepping)
3076 ptrace_notify(SIGTRAP, 0);
3077}
3078
3079void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
3080{
3081 if (failed)
3082 force_sigsegv(ksig->sig);
3083 else
3084 signal_delivered(ksig, stepping);
3085}
3086
3087/*
3088 * It could be that complete_signal() picked us to notify about the
3089 * group-wide signal. Other threads should be notified now to take
3090 * the shared signals in @which since we will not.
3091 */
3092static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
3093{
3094 sigset_t retarget;
3095 struct task_struct *t;
3096
3097 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
3098 if (sigisemptyset(&retarget))
3099 return;
3100
3101 for_other_threads(tsk, t) {
3102 if (t->flags & PF_EXITING)
3103 continue;
3104
3105 if (!has_pending_signals(&retarget, &t->blocked))
3106 continue;
3107 /* Remove the signals this thread can handle. */
3108 sigandsets(&retarget, &retarget, &t->blocked);
3109
3110 if (!task_sigpending(t))
3111 signal_wake_up(t, 0);
3112
3113 if (sigisemptyset(&retarget))
3114 break;
3115 }
3116}
3117
3118void exit_signals(struct task_struct *tsk)
3119{
3120 int group_stop = 0;
3121 sigset_t unblocked;
3122
3123 /*
3124 * @tsk is about to have PF_EXITING set - lock out users which
3125 * expect stable threadgroup.
3126 */
3127 cgroup_threadgroup_change_begin(tsk);
3128
3129 if (thread_group_empty(tsk) || (tsk->signal->flags & SIGNAL_GROUP_EXIT)) {
3130 sched_mm_cid_exit_signals(tsk);
3131 tsk->flags |= PF_EXITING;
3132 cgroup_threadgroup_change_end(tsk);
3133 return;
3134 }
3135
3136 spin_lock_irq(&tsk->sighand->siglock);
3137 /*
3138 * From now this task is not visible for group-wide signals,
3139 * see wants_signal(), do_signal_stop().
3140 */
3141 sched_mm_cid_exit_signals(tsk);
3142 tsk->flags |= PF_EXITING;
3143
3144 cgroup_threadgroup_change_end(tsk);
3145
3146 if (!task_sigpending(tsk))
3147 goto out;
3148
3149 unblocked = tsk->blocked;
3150 signotset(&unblocked);
3151 retarget_shared_pending(tsk, &unblocked);
3152
3153 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
3154 task_participate_group_stop(tsk))
3155 group_stop = CLD_STOPPED;
3156out:
3157 spin_unlock_irq(&tsk->sighand->siglock);
3158
3159 /*
3160 * If group stop has completed, deliver the notification. This
3161 * should always go to the real parent of the group leader.
3162 */
3163 if (unlikely(group_stop)) {
3164 read_lock(&tasklist_lock);
3165 do_notify_parent_cldstop(tsk, false, group_stop);
3166 read_unlock(&tasklist_lock);
3167 }
3168}
3169
3170/*
3171 * System call entry points.
3172 */
3173
3174/**
3175 * sys_restart_syscall - restart a system call
3176 */
3177SYSCALL_DEFINE0(restart_syscall)
3178{
3179 struct restart_block *restart = ¤t->restart_block;
3180 return restart->fn(restart);
3181}
3182
3183long do_no_restart_syscall(struct restart_block *param)
3184{
3185 return -EINTR;
3186}
3187
3188static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
3189{
3190 if (task_sigpending(tsk) && !thread_group_empty(tsk)) {
3191 sigset_t newblocked;
3192 /* A set of now blocked but previously unblocked signals. */
3193 sigandnsets(&newblocked, newset, ¤t->blocked);
3194 retarget_shared_pending(tsk, &newblocked);
3195 }
3196 tsk->blocked = *newset;
3197 recalc_sigpending();
3198}
3199
3200/**
3201 * set_current_blocked - change current->blocked mask
3202 * @newset: new mask
3203 *
3204 * It is wrong to change ->blocked directly, this helper should be used
3205 * to ensure the process can't miss a shared signal we are going to block.
3206 */
3207void set_current_blocked(sigset_t *newset)
3208{
3209 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
3210 __set_current_blocked(newset);
3211}
3212
3213void __set_current_blocked(const sigset_t *newset)
3214{
3215 struct task_struct *tsk = current;
3216
3217 /*
3218 * In case the signal mask hasn't changed, there is nothing we need
3219 * to do. The current->blocked shouldn't be modified by other task.
3220 */
3221 if (sigequalsets(&tsk->blocked, newset))
3222 return;
3223
3224 spin_lock_irq(&tsk->sighand->siglock);
3225 __set_task_blocked(tsk, newset);
3226 spin_unlock_irq(&tsk->sighand->siglock);
3227}
3228
3229/*
3230 * This is also useful for kernel threads that want to temporarily
3231 * (or permanently) block certain signals.
3232 *
3233 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
3234 * interface happily blocks "unblockable" signals like SIGKILL
3235 * and friends.
3236 */
3237int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
3238{
3239 struct task_struct *tsk = current;
3240 sigset_t newset;
3241
3242 /* Lockless, only current can change ->blocked, never from irq */
3243 if (oldset)
3244 *oldset = tsk->blocked;
3245
3246 switch (how) {
3247 case SIG_BLOCK:
3248 sigorsets(&newset, &tsk->blocked, set);
3249 break;
3250 case SIG_UNBLOCK:
3251 sigandnsets(&newset, &tsk->blocked, set);
3252 break;
3253 case SIG_SETMASK:
3254 newset = *set;
3255 break;
3256 default:
3257 return -EINVAL;
3258 }
3259
3260 __set_current_blocked(&newset);
3261 return 0;
3262}
3263EXPORT_SYMBOL(sigprocmask);
3264
3265/*
3266 * The api helps set app-provided sigmasks.
3267 *
3268 * This is useful for syscalls such as ppoll, pselect, io_pgetevents and
3269 * epoll_pwait where a new sigmask is passed from userland for the syscalls.
3270 *
3271 * Note that it does set_restore_sigmask() in advance, so it must be always
3272 * paired with restore_saved_sigmask_unless() before return from syscall.
3273 */
3274int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize)
3275{
3276 sigset_t kmask;
3277
3278 if (!umask)
3279 return 0;
3280 if (sigsetsize != sizeof(sigset_t))
3281 return -EINVAL;
3282 if (copy_from_user(&kmask, umask, sizeof(sigset_t)))
3283 return -EFAULT;
3284
3285 set_restore_sigmask();
3286 current->saved_sigmask = current->blocked;
3287 set_current_blocked(&kmask);
3288
3289 return 0;
3290}
3291
3292#ifdef CONFIG_COMPAT
3293int set_compat_user_sigmask(const compat_sigset_t __user *umask,
3294 size_t sigsetsize)
3295{
3296 sigset_t kmask;
3297
3298 if (!umask)
3299 return 0;
3300 if (sigsetsize != sizeof(compat_sigset_t))
3301 return -EINVAL;
3302 if (get_compat_sigset(&kmask, umask))
3303 return -EFAULT;
3304
3305 set_restore_sigmask();
3306 current->saved_sigmask = current->blocked;
3307 set_current_blocked(&kmask);
3308
3309 return 0;
3310}
3311#endif
3312
3313/**
3314 * sys_rt_sigprocmask - change the list of currently blocked signals
3315 * @how: whether to add, remove, or set signals
3316 * @nset: stores pending signals
3317 * @oset: previous value of signal mask if non-null
3318 * @sigsetsize: size of sigset_t type
3319 */
3320SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
3321 sigset_t __user *, oset, size_t, sigsetsize)
3322{
3323 sigset_t old_set, new_set;
3324 int error;
3325
3326 /* XXX: Don't preclude handling different sized sigset_t's. */
3327 if (sigsetsize != sizeof(sigset_t))
3328 return -EINVAL;
3329
3330 old_set = current->blocked;
3331
3332 if (nset) {
3333 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
3334 return -EFAULT;
3335 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3336
3337 error = sigprocmask(how, &new_set, NULL);
3338 if (error)
3339 return error;
3340 }
3341
3342 if (oset) {
3343 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
3344 return -EFAULT;
3345 }
3346
3347 return 0;
3348}
3349
3350#ifdef CONFIG_COMPAT
3351COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
3352 compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
3353{
3354 sigset_t old_set = current->blocked;
3355
3356 /* XXX: Don't preclude handling different sized sigset_t's. */
3357 if (sigsetsize != sizeof(sigset_t))
3358 return -EINVAL;
3359
3360 if (nset) {
3361 sigset_t new_set;
3362 int error;
3363 if (get_compat_sigset(&new_set, nset))
3364 return -EFAULT;
3365 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3366
3367 error = sigprocmask(how, &new_set, NULL);
3368 if (error)
3369 return error;
3370 }
3371 return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0;
3372}
3373#endif
3374
3375static void do_sigpending(sigset_t *set)
3376{
3377 spin_lock_irq(¤t->sighand->siglock);
3378 sigorsets(set, ¤t->pending.signal,
3379 ¤t->signal->shared_pending.signal);
3380 spin_unlock_irq(¤t->sighand->siglock);
3381
3382 /* Outside the lock because only this thread touches it. */
3383 sigandsets(set, ¤t->blocked, set);
3384}
3385
3386/**
3387 * sys_rt_sigpending - examine a pending signal that has been raised
3388 * while blocked
3389 * @uset: stores pending signals
3390 * @sigsetsize: size of sigset_t type or larger
3391 */
3392SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
3393{
3394 sigset_t set;
3395
3396 if (sigsetsize > sizeof(*uset))
3397 return -EINVAL;
3398
3399 do_sigpending(&set);
3400
3401 if (copy_to_user(uset, &set, sigsetsize))
3402 return -EFAULT;
3403
3404 return 0;
3405}
3406
3407#ifdef CONFIG_COMPAT
3408COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
3409 compat_size_t, sigsetsize)
3410{
3411 sigset_t set;
3412
3413 if (sigsetsize > sizeof(*uset))
3414 return -EINVAL;
3415
3416 do_sigpending(&set);
3417
3418 return put_compat_sigset(uset, &set, sigsetsize);
3419}
3420#endif
3421
3422static const struct {
3423 unsigned char limit, layout;
3424} sig_sicodes[] = {
3425 [SIGILL] = { NSIGILL, SIL_FAULT },
3426 [SIGFPE] = { NSIGFPE, SIL_FAULT },
3427 [SIGSEGV] = { NSIGSEGV, SIL_FAULT },
3428 [SIGBUS] = { NSIGBUS, SIL_FAULT },
3429 [SIGTRAP] = { NSIGTRAP, SIL_FAULT },
3430#if defined(SIGEMT)
3431 [SIGEMT] = { NSIGEMT, SIL_FAULT },
3432#endif
3433 [SIGCHLD] = { NSIGCHLD, SIL_CHLD },
3434 [SIGPOLL] = { NSIGPOLL, SIL_POLL },
3435 [SIGSYS] = { NSIGSYS, SIL_SYS },
3436};
3437
3438static bool known_siginfo_layout(unsigned sig, int si_code)
3439{
3440 if (si_code == SI_KERNEL)
3441 return true;
3442 else if ((si_code > SI_USER)) {
3443 if (sig_specific_sicodes(sig)) {
3444 if (si_code <= sig_sicodes[sig].limit)
3445 return true;
3446 }
3447 else if (si_code <= NSIGPOLL)
3448 return true;
3449 }
3450 else if (si_code >= SI_DETHREAD)
3451 return true;
3452 else if (si_code == SI_ASYNCNL)
3453 return true;
3454 return false;
3455}
3456
3457enum siginfo_layout siginfo_layout(unsigned sig, int si_code)
3458{
3459 enum siginfo_layout layout = SIL_KILL;
3460 if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
3461 if ((sig < ARRAY_SIZE(sig_sicodes)) &&
3462 (si_code <= sig_sicodes[sig].limit)) {
3463 layout = sig_sicodes[sig].layout;
3464 /* Handle the exceptions */
3465 if ((sig == SIGBUS) &&
3466 (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO))
3467 layout = SIL_FAULT_MCEERR;
3468 else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR))
3469 layout = SIL_FAULT_BNDERR;
3470#ifdef SEGV_PKUERR
3471 else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR))
3472 layout = SIL_FAULT_PKUERR;
3473#endif
3474 else if ((sig == SIGTRAP) && (si_code == TRAP_PERF))
3475 layout = SIL_FAULT_PERF_EVENT;
3476 else if (IS_ENABLED(CONFIG_SPARC) &&
3477 (sig == SIGILL) && (si_code == ILL_ILLTRP))
3478 layout = SIL_FAULT_TRAPNO;
3479 else if (IS_ENABLED(CONFIG_ALPHA) &&
3480 ((sig == SIGFPE) ||
3481 ((sig == SIGTRAP) && (si_code == TRAP_UNK))))
3482 layout = SIL_FAULT_TRAPNO;
3483 }
3484 else if (si_code <= NSIGPOLL)
3485 layout = SIL_POLL;
3486 } else {
3487 if (si_code == SI_TIMER)
3488 layout = SIL_TIMER;
3489 else if (si_code == SI_SIGIO)
3490 layout = SIL_POLL;
3491 else if (si_code < 0)
3492 layout = SIL_RT;
3493 }
3494 return layout;
3495}
3496
3497static inline char __user *si_expansion(const siginfo_t __user *info)
3498{
3499 return ((char __user *)info) + sizeof(struct kernel_siginfo);
3500}
3501
3502int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from)
3503{
3504 char __user *expansion = si_expansion(to);
3505 if (copy_to_user(to, from , sizeof(struct kernel_siginfo)))
3506 return -EFAULT;
3507 if (clear_user(expansion, SI_EXPANSION_SIZE))
3508 return -EFAULT;
3509 return 0;
3510}
3511
3512static int post_copy_siginfo_from_user(kernel_siginfo_t *info,
3513 const siginfo_t __user *from)
3514{
3515 if (unlikely(!known_siginfo_layout(info->si_signo, info->si_code))) {
3516 char __user *expansion = si_expansion(from);
3517 char buf[SI_EXPANSION_SIZE];
3518 int i;
3519 /*
3520 * An unknown si_code might need more than
3521 * sizeof(struct kernel_siginfo) bytes. Verify all of the
3522 * extra bytes are 0. This guarantees copy_siginfo_to_user
3523 * will return this data to userspace exactly.
3524 */
3525 if (copy_from_user(&buf, expansion, SI_EXPANSION_SIZE))
3526 return -EFAULT;
3527 for (i = 0; i < SI_EXPANSION_SIZE; i++) {
3528 if (buf[i] != 0)
3529 return -E2BIG;
3530 }
3531 }
3532 return 0;
3533}
3534
3535static int __copy_siginfo_from_user(int signo, kernel_siginfo_t *to,
3536 const siginfo_t __user *from)
3537{
3538 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3539 return -EFAULT;
3540 to->si_signo = signo;
3541 return post_copy_siginfo_from_user(to, from);
3542}
3543
3544int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from)
3545{
3546 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3547 return -EFAULT;
3548 return post_copy_siginfo_from_user(to, from);
3549}
3550
3551#ifdef CONFIG_COMPAT
3552/**
3553 * copy_siginfo_to_external32 - copy a kernel siginfo into a compat user siginfo
3554 * @to: compat siginfo destination
3555 * @from: kernel siginfo source
3556 *
3557 * Note: This function does not work properly for the SIGCHLD on x32, but
3558 * fortunately it doesn't have to. The only valid callers for this function are
3559 * copy_siginfo_to_user32, which is overriden for x32 and the coredump code.
3560 * The latter does not care because SIGCHLD will never cause a coredump.
3561 */
3562void copy_siginfo_to_external32(struct compat_siginfo *to,
3563 const struct kernel_siginfo *from)
3564{
3565 memset(to, 0, sizeof(*to));
3566
3567 to->si_signo = from->si_signo;
3568 to->si_errno = from->si_errno;
3569 to->si_code = from->si_code;
3570 switch(siginfo_layout(from->si_signo, from->si_code)) {
3571 case SIL_KILL:
3572 to->si_pid = from->si_pid;
3573 to->si_uid = from->si_uid;
3574 break;
3575 case SIL_TIMER:
3576 to->si_tid = from->si_tid;
3577 to->si_overrun = from->si_overrun;
3578 to->si_int = from->si_int;
3579 break;
3580 case SIL_POLL:
3581 to->si_band = from->si_band;
3582 to->si_fd = from->si_fd;
3583 break;
3584 case SIL_FAULT:
3585 to->si_addr = ptr_to_compat(from->si_addr);
3586 break;
3587 case SIL_FAULT_TRAPNO:
3588 to->si_addr = ptr_to_compat(from->si_addr);
3589 to->si_trapno = from->si_trapno;
3590 break;
3591 case SIL_FAULT_MCEERR:
3592 to->si_addr = ptr_to_compat(from->si_addr);
3593 to->si_addr_lsb = from->si_addr_lsb;
3594 break;
3595 case SIL_FAULT_BNDERR:
3596 to->si_addr = ptr_to_compat(from->si_addr);
3597 to->si_lower = ptr_to_compat(from->si_lower);
3598 to->si_upper = ptr_to_compat(from->si_upper);
3599 break;
3600 case SIL_FAULT_PKUERR:
3601 to->si_addr = ptr_to_compat(from->si_addr);
3602 to->si_pkey = from->si_pkey;
3603 break;
3604 case SIL_FAULT_PERF_EVENT:
3605 to->si_addr = ptr_to_compat(from->si_addr);
3606 to->si_perf_data = from->si_perf_data;
3607 to->si_perf_type = from->si_perf_type;
3608 to->si_perf_flags = from->si_perf_flags;
3609 break;
3610 case SIL_CHLD:
3611 to->si_pid = from->si_pid;
3612 to->si_uid = from->si_uid;
3613 to->si_status = from->si_status;
3614 to->si_utime = from->si_utime;
3615 to->si_stime = from->si_stime;
3616 break;
3617 case SIL_RT:
3618 to->si_pid = from->si_pid;
3619 to->si_uid = from->si_uid;
3620 to->si_int = from->si_int;
3621 break;
3622 case SIL_SYS:
3623 to->si_call_addr = ptr_to_compat(from->si_call_addr);
3624 to->si_syscall = from->si_syscall;
3625 to->si_arch = from->si_arch;
3626 break;
3627 }
3628}
3629
3630int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
3631 const struct kernel_siginfo *from)
3632{
3633 struct compat_siginfo new;
3634
3635 copy_siginfo_to_external32(&new, from);
3636 if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
3637 return -EFAULT;
3638 return 0;
3639}
3640
3641static int post_copy_siginfo_from_user32(kernel_siginfo_t *to,
3642 const struct compat_siginfo *from)
3643{
3644 clear_siginfo(to);
3645 to->si_signo = from->si_signo;
3646 to->si_errno = from->si_errno;
3647 to->si_code = from->si_code;
3648 switch(siginfo_layout(from->si_signo, from->si_code)) {
3649 case SIL_KILL:
3650 to->si_pid = from->si_pid;
3651 to->si_uid = from->si_uid;
3652 break;
3653 case SIL_TIMER:
3654 to->si_tid = from->si_tid;
3655 to->si_overrun = from->si_overrun;
3656 to->si_int = from->si_int;
3657 break;
3658 case SIL_POLL:
3659 to->si_band = from->si_band;
3660 to->si_fd = from->si_fd;
3661 break;
3662 case SIL_FAULT:
3663 to->si_addr = compat_ptr(from->si_addr);
3664 break;
3665 case SIL_FAULT_TRAPNO:
3666 to->si_addr = compat_ptr(from->si_addr);
3667 to->si_trapno = from->si_trapno;
3668 break;
3669 case SIL_FAULT_MCEERR:
3670 to->si_addr = compat_ptr(from->si_addr);
3671 to->si_addr_lsb = from->si_addr_lsb;
3672 break;
3673 case SIL_FAULT_BNDERR:
3674 to->si_addr = compat_ptr(from->si_addr);
3675 to->si_lower = compat_ptr(from->si_lower);
3676 to->si_upper = compat_ptr(from->si_upper);
3677 break;
3678 case SIL_FAULT_PKUERR:
3679 to->si_addr = compat_ptr(from->si_addr);
3680 to->si_pkey = from->si_pkey;
3681 break;
3682 case SIL_FAULT_PERF_EVENT:
3683 to->si_addr = compat_ptr(from->si_addr);
3684 to->si_perf_data = from->si_perf_data;
3685 to->si_perf_type = from->si_perf_type;
3686 to->si_perf_flags = from->si_perf_flags;
3687 break;
3688 case SIL_CHLD:
3689 to->si_pid = from->si_pid;
3690 to->si_uid = from->si_uid;
3691 to->si_status = from->si_status;
3692#ifdef CONFIG_X86_X32_ABI
3693 if (in_x32_syscall()) {
3694 to->si_utime = from->_sifields._sigchld_x32._utime;
3695 to->si_stime = from->_sifields._sigchld_x32._stime;
3696 } else
3697#endif
3698 {
3699 to->si_utime = from->si_utime;
3700 to->si_stime = from->si_stime;
3701 }
3702 break;
3703 case SIL_RT:
3704 to->si_pid = from->si_pid;
3705 to->si_uid = from->si_uid;
3706 to->si_int = from->si_int;
3707 break;
3708 case SIL_SYS:
3709 to->si_call_addr = compat_ptr(from->si_call_addr);
3710 to->si_syscall = from->si_syscall;
3711 to->si_arch = from->si_arch;
3712 break;
3713 }
3714 return 0;
3715}
3716
3717static int __copy_siginfo_from_user32(int signo, struct kernel_siginfo *to,
3718 const struct compat_siginfo __user *ufrom)
3719{
3720 struct compat_siginfo from;
3721
3722 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3723 return -EFAULT;
3724
3725 from.si_signo = signo;
3726 return post_copy_siginfo_from_user32(to, &from);
3727}
3728
3729int copy_siginfo_from_user32(struct kernel_siginfo *to,
3730 const struct compat_siginfo __user *ufrom)
3731{
3732 struct compat_siginfo from;
3733
3734 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3735 return -EFAULT;
3736
3737 return post_copy_siginfo_from_user32(to, &from);
3738}
3739#endif /* CONFIG_COMPAT */
3740
3741/**
3742 * do_sigtimedwait - wait for queued signals specified in @which
3743 * @which: queued signals to wait for
3744 * @info: if non-null, the signal's siginfo is returned here
3745 * @ts: upper bound on process time suspension
3746 */
3747static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
3748 const struct timespec64 *ts)
3749{
3750 ktime_t *to = NULL, timeout = KTIME_MAX;
3751 struct task_struct *tsk = current;
3752 sigset_t mask = *which;
3753 enum pid_type type;
3754 int sig, ret = 0;
3755
3756 if (ts) {
3757 if (!timespec64_valid(ts))
3758 return -EINVAL;
3759 timeout = timespec64_to_ktime(*ts);
3760 to = &timeout;
3761 }
3762
3763 /*
3764 * Invert the set of allowed signals to get those we want to block.
3765 */
3766 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
3767 signotset(&mask);
3768
3769 spin_lock_irq(&tsk->sighand->siglock);
3770 sig = dequeue_signal(&mask, info, &type);
3771 if (!sig && timeout) {
3772 /*
3773 * None ready, temporarily unblock those we're interested
3774 * while we are sleeping in so that we'll be awakened when
3775 * they arrive. Unblocking is always fine, we can avoid
3776 * set_current_blocked().
3777 */
3778 tsk->real_blocked = tsk->blocked;
3779 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
3780 recalc_sigpending();
3781 spin_unlock_irq(&tsk->sighand->siglock);
3782
3783 __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
3784 ret = schedule_hrtimeout_range(to, tsk->timer_slack_ns,
3785 HRTIMER_MODE_REL);
3786 spin_lock_irq(&tsk->sighand->siglock);
3787 __set_task_blocked(tsk, &tsk->real_blocked);
3788 sigemptyset(&tsk->real_blocked);
3789 sig = dequeue_signal(&mask, info, &type);
3790 }
3791 spin_unlock_irq(&tsk->sighand->siglock);
3792
3793 if (sig)
3794 return sig;
3795 return ret ? -EINTR : -EAGAIN;
3796}
3797
3798/**
3799 * sys_rt_sigtimedwait - synchronously wait for queued signals specified
3800 * in @uthese
3801 * @uthese: queued signals to wait for
3802 * @uinfo: if non-null, the signal's siginfo is returned here
3803 * @uts: upper bound on process time suspension
3804 * @sigsetsize: size of sigset_t type
3805 */
3806SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
3807 siginfo_t __user *, uinfo,
3808 const struct __kernel_timespec __user *, uts,
3809 size_t, sigsetsize)
3810{
3811 sigset_t these;
3812 struct timespec64 ts;
3813 kernel_siginfo_t info;
3814 int ret;
3815
3816 /* XXX: Don't preclude handling different sized sigset_t's. */
3817 if (sigsetsize != sizeof(sigset_t))
3818 return -EINVAL;
3819
3820 if (copy_from_user(&these, uthese, sizeof(these)))
3821 return -EFAULT;
3822
3823 if (uts) {
3824 if (get_timespec64(&ts, uts))
3825 return -EFAULT;
3826 }
3827
3828 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3829
3830 if (ret > 0 && uinfo) {
3831 if (copy_siginfo_to_user(uinfo, &info))
3832 ret = -EFAULT;
3833 }
3834
3835 return ret;
3836}
3837
3838#ifdef CONFIG_COMPAT_32BIT_TIME
3839SYSCALL_DEFINE4(rt_sigtimedwait_time32, const sigset_t __user *, uthese,
3840 siginfo_t __user *, uinfo,
3841 const struct old_timespec32 __user *, uts,
3842 size_t, sigsetsize)
3843{
3844 sigset_t these;
3845 struct timespec64 ts;
3846 kernel_siginfo_t info;
3847 int ret;
3848
3849 if (sigsetsize != sizeof(sigset_t))
3850 return -EINVAL;
3851
3852 if (copy_from_user(&these, uthese, sizeof(these)))
3853 return -EFAULT;
3854
3855 if (uts) {
3856 if (get_old_timespec32(&ts, uts))
3857 return -EFAULT;
3858 }
3859
3860 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3861
3862 if (ret > 0 && uinfo) {
3863 if (copy_siginfo_to_user(uinfo, &info))
3864 ret = -EFAULT;
3865 }
3866
3867 return ret;
3868}
3869#endif
3870
3871#ifdef CONFIG_COMPAT
3872COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64, compat_sigset_t __user *, uthese,
3873 struct compat_siginfo __user *, uinfo,
3874 struct __kernel_timespec __user *, uts, compat_size_t, sigsetsize)
3875{
3876 sigset_t s;
3877 struct timespec64 t;
3878 kernel_siginfo_t info;
3879 long ret;
3880
3881 if (sigsetsize != sizeof(sigset_t))
3882 return -EINVAL;
3883
3884 if (get_compat_sigset(&s, uthese))
3885 return -EFAULT;
3886
3887 if (uts) {
3888 if (get_timespec64(&t, uts))
3889 return -EFAULT;
3890 }
3891
3892 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3893
3894 if (ret > 0 && uinfo) {
3895 if (copy_siginfo_to_user32(uinfo, &info))
3896 ret = -EFAULT;
3897 }
3898
3899 return ret;
3900}
3901
3902#ifdef CONFIG_COMPAT_32BIT_TIME
3903COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32, compat_sigset_t __user *, uthese,
3904 struct compat_siginfo __user *, uinfo,
3905 struct old_timespec32 __user *, uts, compat_size_t, sigsetsize)
3906{
3907 sigset_t s;
3908 struct timespec64 t;
3909 kernel_siginfo_t info;
3910 long ret;
3911
3912 if (sigsetsize != sizeof(sigset_t))
3913 return -EINVAL;
3914
3915 if (get_compat_sigset(&s, uthese))
3916 return -EFAULT;
3917
3918 if (uts) {
3919 if (get_old_timespec32(&t, uts))
3920 return -EFAULT;
3921 }
3922
3923 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3924
3925 if (ret > 0 && uinfo) {
3926 if (copy_siginfo_to_user32(uinfo, &info))
3927 ret = -EFAULT;
3928 }
3929
3930 return ret;
3931}
3932#endif
3933#endif
3934
3935static void prepare_kill_siginfo(int sig, struct kernel_siginfo *info,
3936 enum pid_type type)
3937{
3938 clear_siginfo(info);
3939 info->si_signo = sig;
3940 info->si_errno = 0;
3941 info->si_code = (type == PIDTYPE_PID) ? SI_TKILL : SI_USER;
3942 info->si_pid = task_tgid_vnr(current);
3943 info->si_uid = from_kuid_munged(current_user_ns(), current_uid());
3944}
3945
3946/**
3947 * sys_kill - send a signal to a process
3948 * @pid: the PID of the process
3949 * @sig: signal to be sent
3950 */
3951SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
3952{
3953 struct kernel_siginfo info;
3954
3955 prepare_kill_siginfo(sig, &info, PIDTYPE_TGID);
3956
3957 return kill_something_info(sig, &info, pid);
3958}
3959
3960/*
3961 * Verify that the signaler and signalee either are in the same pid namespace
3962 * or that the signaler's pid namespace is an ancestor of the signalee's pid
3963 * namespace.
3964 */
3965static bool access_pidfd_pidns(struct pid *pid)
3966{
3967 struct pid_namespace *active = task_active_pid_ns(current);
3968 struct pid_namespace *p = ns_of_pid(pid);
3969
3970 for (;;) {
3971 if (!p)
3972 return false;
3973 if (p == active)
3974 break;
3975 p = p->parent;
3976 }
3977
3978 return true;
3979}
3980
3981static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo,
3982 siginfo_t __user *info)
3983{
3984#ifdef CONFIG_COMPAT
3985 /*
3986 * Avoid hooking up compat syscalls and instead handle necessary
3987 * conversions here. Note, this is a stop-gap measure and should not be
3988 * considered a generic solution.
3989 */
3990 if (in_compat_syscall())
3991 return copy_siginfo_from_user32(
3992 kinfo, (struct compat_siginfo __user *)info);
3993#endif
3994 return copy_siginfo_from_user(kinfo, info);
3995}
3996
3997static struct pid *pidfd_to_pid(const struct file *file)
3998{
3999 struct pid *pid;
4000
4001 pid = pidfd_pid(file);
4002 if (!IS_ERR(pid))
4003 return pid;
4004
4005 return tgid_pidfd_to_pid(file);
4006}
4007
4008#define PIDFD_SEND_SIGNAL_FLAGS \
4009 (PIDFD_SIGNAL_THREAD | PIDFD_SIGNAL_THREAD_GROUP | \
4010 PIDFD_SIGNAL_PROCESS_GROUP)
4011
4012/**
4013 * sys_pidfd_send_signal - Signal a process through a pidfd
4014 * @pidfd: file descriptor of the process
4015 * @sig: signal to send
4016 * @info: signal info
4017 * @flags: future flags
4018 *
4019 * Send the signal to the thread group or to the individual thread depending
4020 * on PIDFD_THREAD.
4021 * In the future extension to @flags may be used to override the default scope
4022 * of @pidfd.
4023 *
4024 * Return: 0 on success, negative errno on failure
4025 */
4026SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
4027 siginfo_t __user *, info, unsigned int, flags)
4028{
4029 int ret;
4030 struct pid *pid;
4031 kernel_siginfo_t kinfo;
4032 enum pid_type type;
4033
4034 /* Enforce flags be set to 0 until we add an extension. */
4035 if (flags & ~PIDFD_SEND_SIGNAL_FLAGS)
4036 return -EINVAL;
4037
4038 /* Ensure that only a single signal scope determining flag is set. */
4039 if (hweight32(flags & PIDFD_SEND_SIGNAL_FLAGS) > 1)
4040 return -EINVAL;
4041
4042 CLASS(fd, f)(pidfd);
4043 if (fd_empty(f))
4044 return -EBADF;
4045
4046 /* Is this a pidfd? */
4047 pid = pidfd_to_pid(fd_file(f));
4048 if (IS_ERR(pid))
4049 return PTR_ERR(pid);
4050
4051 if (!access_pidfd_pidns(pid))
4052 return -EINVAL;
4053
4054 switch (flags) {
4055 case 0:
4056 /* Infer scope from the type of pidfd. */
4057 if (fd_file(f)->f_flags & PIDFD_THREAD)
4058 type = PIDTYPE_PID;
4059 else
4060 type = PIDTYPE_TGID;
4061 break;
4062 case PIDFD_SIGNAL_THREAD:
4063 type = PIDTYPE_PID;
4064 break;
4065 case PIDFD_SIGNAL_THREAD_GROUP:
4066 type = PIDTYPE_TGID;
4067 break;
4068 case PIDFD_SIGNAL_PROCESS_GROUP:
4069 type = PIDTYPE_PGID;
4070 break;
4071 }
4072
4073 if (info) {
4074 ret = copy_siginfo_from_user_any(&kinfo, info);
4075 if (unlikely(ret))
4076 return ret;
4077
4078 if (unlikely(sig != kinfo.si_signo))
4079 return -EINVAL;
4080
4081 /* Only allow sending arbitrary signals to yourself. */
4082 if ((task_pid(current) != pid || type > PIDTYPE_TGID) &&
4083 (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL))
4084 return -EPERM;
4085 } else {
4086 prepare_kill_siginfo(sig, &kinfo, type);
4087 }
4088
4089 if (type == PIDTYPE_PGID)
4090 return kill_pgrp_info(sig, &kinfo, pid);
4091 else
4092 return kill_pid_info_type(sig, &kinfo, pid, type);
4093}
4094
4095static int
4096do_send_specific(pid_t tgid, pid_t pid, int sig, struct kernel_siginfo *info)
4097{
4098 struct task_struct *p;
4099 int error = -ESRCH;
4100
4101 rcu_read_lock();
4102 p = find_task_by_vpid(pid);
4103 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
4104 error = check_kill_permission(sig, info, p);
4105 /*
4106 * The null signal is a permissions and process existence
4107 * probe. No signal is actually delivered.
4108 */
4109 if (!error && sig) {
4110 error = do_send_sig_info(sig, info, p, PIDTYPE_PID);
4111 /*
4112 * If lock_task_sighand() failed we pretend the task
4113 * dies after receiving the signal. The window is tiny,
4114 * and the signal is private anyway.
4115 */
4116 if (unlikely(error == -ESRCH))
4117 error = 0;
4118 }
4119 }
4120 rcu_read_unlock();
4121
4122 return error;
4123}
4124
4125static int do_tkill(pid_t tgid, pid_t pid, int sig)
4126{
4127 struct kernel_siginfo info;
4128
4129 prepare_kill_siginfo(sig, &info, PIDTYPE_PID);
4130
4131 return do_send_specific(tgid, pid, sig, &info);
4132}
4133
4134/**
4135 * sys_tgkill - send signal to one specific thread
4136 * @tgid: the thread group ID of the thread
4137 * @pid: the PID of the thread
4138 * @sig: signal to be sent
4139 *
4140 * This syscall also checks the @tgid and returns -ESRCH even if the PID
4141 * exists but it's not belonging to the target process anymore. This
4142 * method solves the problem of threads exiting and PIDs getting reused.
4143 */
4144SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
4145{
4146 /* This is only valid for single tasks */
4147 if (pid <= 0 || tgid <= 0)
4148 return -EINVAL;
4149
4150 return do_tkill(tgid, pid, sig);
4151}
4152
4153/**
4154 * sys_tkill - send signal to one specific task
4155 * @pid: the PID of the task
4156 * @sig: signal to be sent
4157 *
4158 * Send a signal to only one task, even if it's a CLONE_THREAD task.
4159 */
4160SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
4161{
4162 /* This is only valid for single tasks */
4163 if (pid <= 0)
4164 return -EINVAL;
4165
4166 return do_tkill(0, pid, sig);
4167}
4168
4169static int do_rt_sigqueueinfo(pid_t pid, int sig, kernel_siginfo_t *info)
4170{
4171 /* Not even root can pretend to send signals from the kernel.
4172 * Nor can they impersonate a kill()/tgkill(), which adds source info.
4173 */
4174 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
4175 (task_pid_vnr(current) != pid))
4176 return -EPERM;
4177
4178 /* POSIX.1b doesn't mention process groups. */
4179 return kill_proc_info(sig, info, pid);
4180}
4181
4182/**
4183 * sys_rt_sigqueueinfo - send signal information to a signal
4184 * @pid: the PID of the thread
4185 * @sig: signal to be sent
4186 * @uinfo: signal info to be sent
4187 */
4188SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
4189 siginfo_t __user *, uinfo)
4190{
4191 kernel_siginfo_t info;
4192 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
4193 if (unlikely(ret))
4194 return ret;
4195 return do_rt_sigqueueinfo(pid, sig, &info);
4196}
4197
4198#ifdef CONFIG_COMPAT
4199COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
4200 compat_pid_t, pid,
4201 int, sig,
4202 struct compat_siginfo __user *, uinfo)
4203{
4204 kernel_siginfo_t info;
4205 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
4206 if (unlikely(ret))
4207 return ret;
4208 return do_rt_sigqueueinfo(pid, sig, &info);
4209}
4210#endif
4211
4212static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, kernel_siginfo_t *info)
4213{
4214 /* This is only valid for single tasks */
4215 if (pid <= 0 || tgid <= 0)
4216 return -EINVAL;
4217
4218 /* Not even root can pretend to send signals from the kernel.
4219 * Nor can they impersonate a kill()/tgkill(), which adds source info.
4220 */
4221 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
4222 (task_pid_vnr(current) != pid))
4223 return -EPERM;
4224
4225 return do_send_specific(tgid, pid, sig, info);
4226}
4227
4228SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
4229 siginfo_t __user *, uinfo)
4230{
4231 kernel_siginfo_t info;
4232 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
4233 if (unlikely(ret))
4234 return ret;
4235 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
4236}
4237
4238#ifdef CONFIG_COMPAT
4239COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
4240 compat_pid_t, tgid,
4241 compat_pid_t, pid,
4242 int, sig,
4243 struct compat_siginfo __user *, uinfo)
4244{
4245 kernel_siginfo_t info;
4246 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
4247 if (unlikely(ret))
4248 return ret;
4249 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
4250}
4251#endif
4252
4253/*
4254 * For kthreads only, must not be used if cloned with CLONE_SIGHAND
4255 */
4256void kernel_sigaction(int sig, __sighandler_t action)
4257{
4258 spin_lock_irq(¤t->sighand->siglock);
4259 current->sighand->action[sig - 1].sa.sa_handler = action;
4260 if (action == SIG_IGN) {
4261 sigset_t mask;
4262
4263 sigemptyset(&mask);
4264 sigaddset(&mask, sig);
4265
4266 flush_sigqueue_mask(current, &mask, ¤t->signal->shared_pending);
4267 flush_sigqueue_mask(current, &mask, ¤t->pending);
4268 recalc_sigpending();
4269 }
4270 spin_unlock_irq(¤t->sighand->siglock);
4271}
4272EXPORT_SYMBOL(kernel_sigaction);
4273
4274void __weak sigaction_compat_abi(struct k_sigaction *act,
4275 struct k_sigaction *oact)
4276{
4277}
4278
4279int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
4280{
4281 struct task_struct *p = current, *t;
4282 struct k_sigaction *k;
4283 sigset_t mask;
4284
4285 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
4286 return -EINVAL;
4287
4288 k = &p->sighand->action[sig-1];
4289
4290 spin_lock_irq(&p->sighand->siglock);
4291 if (k->sa.sa_flags & SA_IMMUTABLE) {
4292 spin_unlock_irq(&p->sighand->siglock);
4293 return -EINVAL;
4294 }
4295 if (oact)
4296 *oact = *k;
4297
4298 /*
4299 * Make sure that we never accidentally claim to support SA_UNSUPPORTED,
4300 * e.g. by having an architecture use the bit in their uapi.
4301 */
4302 BUILD_BUG_ON(UAPI_SA_FLAGS & SA_UNSUPPORTED);
4303
4304 /*
4305 * Clear unknown flag bits in order to allow userspace to detect missing
4306 * support for flag bits and to allow the kernel to use non-uapi bits
4307 * internally.
4308 */
4309 if (act)
4310 act->sa.sa_flags &= UAPI_SA_FLAGS;
4311 if (oact)
4312 oact->sa.sa_flags &= UAPI_SA_FLAGS;
4313
4314 sigaction_compat_abi(act, oact);
4315
4316 if (act) {
4317 bool was_ignored = k->sa.sa_handler == SIG_IGN;
4318
4319 sigdelsetmask(&act->sa.sa_mask,
4320 sigmask(SIGKILL) | sigmask(SIGSTOP));
4321 *k = *act;
4322 /*
4323 * POSIX 3.3.1.3:
4324 * "Setting a signal action to SIG_IGN for a signal that is
4325 * pending shall cause the pending signal to be discarded,
4326 * whether or not it is blocked."
4327 *
4328 * "Setting a signal action to SIG_DFL for a signal that is
4329 * pending and whose default action is to ignore the signal
4330 * (for example, SIGCHLD), shall cause the pending signal to
4331 * be discarded, whether or not it is blocked"
4332 */
4333 if (sig_handler_ignored(sig_handler(p, sig), sig)) {
4334 sigemptyset(&mask);
4335 sigaddset(&mask, sig);
4336 flush_sigqueue_mask(p, &mask, &p->signal->shared_pending);
4337 for_each_thread(p, t)
4338 flush_sigqueue_mask(p, &mask, &t->pending);
4339 } else if (was_ignored) {
4340 posixtimer_sig_unignore(p, sig);
4341 }
4342 }
4343
4344 spin_unlock_irq(&p->sighand->siglock);
4345 return 0;
4346}
4347
4348#ifdef CONFIG_DYNAMIC_SIGFRAME
4349static inline void sigaltstack_lock(void)
4350 __acquires(¤t->sighand->siglock)
4351{
4352 spin_lock_irq(¤t->sighand->siglock);
4353}
4354
4355static inline void sigaltstack_unlock(void)
4356 __releases(¤t->sighand->siglock)
4357{
4358 spin_unlock_irq(¤t->sighand->siglock);
4359}
4360#else
4361static inline void sigaltstack_lock(void) { }
4362static inline void sigaltstack_unlock(void) { }
4363#endif
4364
4365static int
4366do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp,
4367 size_t min_ss_size)
4368{
4369 struct task_struct *t = current;
4370 int ret = 0;
4371
4372 if (oss) {
4373 memset(oss, 0, sizeof(stack_t));
4374 oss->ss_sp = (void __user *) t->sas_ss_sp;
4375 oss->ss_size = t->sas_ss_size;
4376 oss->ss_flags = sas_ss_flags(sp) |
4377 (current->sas_ss_flags & SS_FLAG_BITS);
4378 }
4379
4380 if (ss) {
4381 void __user *ss_sp = ss->ss_sp;
4382 size_t ss_size = ss->ss_size;
4383 unsigned ss_flags = ss->ss_flags;
4384 int ss_mode;
4385
4386 if (unlikely(on_sig_stack(sp)))
4387 return -EPERM;
4388
4389 ss_mode = ss_flags & ~SS_FLAG_BITS;
4390 if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
4391 ss_mode != 0))
4392 return -EINVAL;
4393
4394 /*
4395 * Return before taking any locks if no actual
4396 * sigaltstack changes were requested.
4397 */
4398 if (t->sas_ss_sp == (unsigned long)ss_sp &&
4399 t->sas_ss_size == ss_size &&
4400 t->sas_ss_flags == ss_flags)
4401 return 0;
4402
4403 sigaltstack_lock();
4404 if (ss_mode == SS_DISABLE) {
4405 ss_size = 0;
4406 ss_sp = NULL;
4407 } else {
4408 if (unlikely(ss_size < min_ss_size))
4409 ret = -ENOMEM;
4410 if (!sigaltstack_size_valid(ss_size))
4411 ret = -ENOMEM;
4412 }
4413 if (!ret) {
4414 t->sas_ss_sp = (unsigned long) ss_sp;
4415 t->sas_ss_size = ss_size;
4416 t->sas_ss_flags = ss_flags;
4417 }
4418 sigaltstack_unlock();
4419 }
4420 return ret;
4421}
4422
4423SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
4424{
4425 stack_t new, old;
4426 int err;
4427 if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
4428 return -EFAULT;
4429 err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
4430 current_user_stack_pointer(),
4431 MINSIGSTKSZ);
4432 if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
4433 err = -EFAULT;
4434 return err;
4435}
4436
4437int restore_altstack(const stack_t __user *uss)
4438{
4439 stack_t new;
4440 if (copy_from_user(&new, uss, sizeof(stack_t)))
4441 return -EFAULT;
4442 (void)do_sigaltstack(&new, NULL, current_user_stack_pointer(),
4443 MINSIGSTKSZ);
4444 /* squash all but EFAULT for now */
4445 return 0;
4446}
4447
4448int __save_altstack(stack_t __user *uss, unsigned long sp)
4449{
4450 struct task_struct *t = current;
4451 int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
4452 __put_user(t->sas_ss_flags, &uss->ss_flags) |
4453 __put_user(t->sas_ss_size, &uss->ss_size);
4454 return err;
4455}
4456
4457#ifdef CONFIG_COMPAT
4458static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr,
4459 compat_stack_t __user *uoss_ptr)
4460{
4461 stack_t uss, uoss;
4462 int ret;
4463
4464 if (uss_ptr) {
4465 compat_stack_t uss32;
4466 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
4467 return -EFAULT;
4468 uss.ss_sp = compat_ptr(uss32.ss_sp);
4469 uss.ss_flags = uss32.ss_flags;
4470 uss.ss_size = uss32.ss_size;
4471 }
4472 ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
4473 compat_user_stack_pointer(),
4474 COMPAT_MINSIGSTKSZ);
4475 if (ret >= 0 && uoss_ptr) {
4476 compat_stack_t old;
4477 memset(&old, 0, sizeof(old));
4478 old.ss_sp = ptr_to_compat(uoss.ss_sp);
4479 old.ss_flags = uoss.ss_flags;
4480 old.ss_size = uoss.ss_size;
4481 if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t)))
4482 ret = -EFAULT;
4483 }
4484 return ret;
4485}
4486
4487COMPAT_SYSCALL_DEFINE2(sigaltstack,
4488 const compat_stack_t __user *, uss_ptr,
4489 compat_stack_t __user *, uoss_ptr)
4490{
4491 return do_compat_sigaltstack(uss_ptr, uoss_ptr);
4492}
4493
4494int compat_restore_altstack(const compat_stack_t __user *uss)
4495{
4496 int err = do_compat_sigaltstack(uss, NULL);
4497 /* squash all but -EFAULT for now */
4498 return err == -EFAULT ? err : 0;
4499}
4500
4501int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
4502{
4503 int err;
4504 struct task_struct *t = current;
4505 err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
4506 &uss->ss_sp) |
4507 __put_user(t->sas_ss_flags, &uss->ss_flags) |
4508 __put_user(t->sas_ss_size, &uss->ss_size);
4509 return err;
4510}
4511#endif
4512
4513#ifdef __ARCH_WANT_SYS_SIGPENDING
4514
4515/**
4516 * sys_sigpending - examine pending signals
4517 * @uset: where mask of pending signal is returned
4518 */
4519SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset)
4520{
4521 sigset_t set;
4522
4523 if (sizeof(old_sigset_t) > sizeof(*uset))
4524 return -EINVAL;
4525
4526 do_sigpending(&set);
4527
4528 if (copy_to_user(uset, &set, sizeof(old_sigset_t)))
4529 return -EFAULT;
4530
4531 return 0;
4532}
4533
4534#ifdef CONFIG_COMPAT
4535COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
4536{
4537 sigset_t set;
4538
4539 do_sigpending(&set);
4540
4541 return put_user(set.sig[0], set32);
4542}
4543#endif
4544
4545#endif
4546
4547#ifdef __ARCH_WANT_SYS_SIGPROCMASK
4548/**
4549 * sys_sigprocmask - examine and change blocked signals
4550 * @how: whether to add, remove, or set signals
4551 * @nset: signals to add or remove (if non-null)
4552 * @oset: previous value of signal mask if non-null
4553 *
4554 * Some platforms have their own version with special arguments;
4555 * others support only sys_rt_sigprocmask.
4556 */
4557
4558SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
4559 old_sigset_t __user *, oset)
4560{
4561 old_sigset_t old_set, new_set;
4562 sigset_t new_blocked;
4563
4564 old_set = current->blocked.sig[0];
4565
4566 if (nset) {
4567 if (copy_from_user(&new_set, nset, sizeof(*nset)))
4568 return -EFAULT;
4569
4570 new_blocked = current->blocked;
4571
4572 switch (how) {
4573 case SIG_BLOCK:
4574 sigaddsetmask(&new_blocked, new_set);
4575 break;
4576 case SIG_UNBLOCK:
4577 sigdelsetmask(&new_blocked, new_set);
4578 break;
4579 case SIG_SETMASK:
4580 new_blocked.sig[0] = new_set;
4581 break;
4582 default:
4583 return -EINVAL;
4584 }
4585
4586 set_current_blocked(&new_blocked);
4587 }
4588
4589 if (oset) {
4590 if (copy_to_user(oset, &old_set, sizeof(*oset)))
4591 return -EFAULT;
4592 }
4593
4594 return 0;
4595}
4596#endif /* __ARCH_WANT_SYS_SIGPROCMASK */
4597
4598#ifndef CONFIG_ODD_RT_SIGACTION
4599/**
4600 * sys_rt_sigaction - alter an action taken by a process
4601 * @sig: signal to be sent
4602 * @act: new sigaction
4603 * @oact: used to save the previous sigaction
4604 * @sigsetsize: size of sigset_t type
4605 */
4606SYSCALL_DEFINE4(rt_sigaction, int, sig,
4607 const struct sigaction __user *, act,
4608 struct sigaction __user *, oact,
4609 size_t, sigsetsize)
4610{
4611 struct k_sigaction new_sa, old_sa;
4612 int ret;
4613
4614 /* XXX: Don't preclude handling different sized sigset_t's. */
4615 if (sigsetsize != sizeof(sigset_t))
4616 return -EINVAL;
4617
4618 if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
4619 return -EFAULT;
4620
4621 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
4622 if (ret)
4623 return ret;
4624
4625 if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
4626 return -EFAULT;
4627
4628 return 0;
4629}
4630#ifdef CONFIG_COMPAT
4631COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
4632 const struct compat_sigaction __user *, act,
4633 struct compat_sigaction __user *, oact,
4634 compat_size_t, sigsetsize)
4635{
4636 struct k_sigaction new_ka, old_ka;
4637#ifdef __ARCH_HAS_SA_RESTORER
4638 compat_uptr_t restorer;
4639#endif
4640 int ret;
4641
4642 /* XXX: Don't preclude handling different sized sigset_t's. */
4643 if (sigsetsize != sizeof(compat_sigset_t))
4644 return -EINVAL;
4645
4646 if (act) {
4647 compat_uptr_t handler;
4648 ret = get_user(handler, &act->sa_handler);
4649 new_ka.sa.sa_handler = compat_ptr(handler);
4650#ifdef __ARCH_HAS_SA_RESTORER
4651 ret |= get_user(restorer, &act->sa_restorer);
4652 new_ka.sa.sa_restorer = compat_ptr(restorer);
4653#endif
4654 ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask);
4655 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
4656 if (ret)
4657 return -EFAULT;
4658 }
4659
4660 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4661 if (!ret && oact) {
4662 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
4663 &oact->sa_handler);
4664 ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask,
4665 sizeof(oact->sa_mask));
4666 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
4667#ifdef __ARCH_HAS_SA_RESTORER
4668 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4669 &oact->sa_restorer);
4670#endif
4671 }
4672 return ret;
4673}
4674#endif
4675#endif /* !CONFIG_ODD_RT_SIGACTION */
4676
4677#ifdef CONFIG_OLD_SIGACTION
4678SYSCALL_DEFINE3(sigaction, int, sig,
4679 const struct old_sigaction __user *, act,
4680 struct old_sigaction __user *, oact)
4681{
4682 struct k_sigaction new_ka, old_ka;
4683 int ret;
4684
4685 if (act) {
4686 old_sigset_t mask;
4687 if (!access_ok(act, sizeof(*act)) ||
4688 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
4689 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
4690 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4691 __get_user(mask, &act->sa_mask))
4692 return -EFAULT;
4693#ifdef __ARCH_HAS_KA_RESTORER
4694 new_ka.ka_restorer = NULL;
4695#endif
4696 siginitset(&new_ka.sa.sa_mask, mask);
4697 }
4698
4699 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4700
4701 if (!ret && oact) {
4702 if (!access_ok(oact, sizeof(*oact)) ||
4703 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
4704 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
4705 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4706 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4707 return -EFAULT;
4708 }
4709
4710 return ret;
4711}
4712#endif
4713#ifdef CONFIG_COMPAT_OLD_SIGACTION
4714COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
4715 const struct compat_old_sigaction __user *, act,
4716 struct compat_old_sigaction __user *, oact)
4717{
4718 struct k_sigaction new_ka, old_ka;
4719 int ret;
4720 compat_old_sigset_t mask;
4721 compat_uptr_t handler, restorer;
4722
4723 if (act) {
4724 if (!access_ok(act, sizeof(*act)) ||
4725 __get_user(handler, &act->sa_handler) ||
4726 __get_user(restorer, &act->sa_restorer) ||
4727 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4728 __get_user(mask, &act->sa_mask))
4729 return -EFAULT;
4730
4731#ifdef __ARCH_HAS_KA_RESTORER
4732 new_ka.ka_restorer = NULL;
4733#endif
4734 new_ka.sa.sa_handler = compat_ptr(handler);
4735 new_ka.sa.sa_restorer = compat_ptr(restorer);
4736 siginitset(&new_ka.sa.sa_mask, mask);
4737 }
4738
4739 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4740
4741 if (!ret && oact) {
4742 if (!access_ok(oact, sizeof(*oact)) ||
4743 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
4744 &oact->sa_handler) ||
4745 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4746 &oact->sa_restorer) ||
4747 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4748 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4749 return -EFAULT;
4750 }
4751 return ret;
4752}
4753#endif
4754
4755#ifdef CONFIG_SGETMASK_SYSCALL
4756
4757/*
4758 * For backwards compatibility. Functionality superseded by sigprocmask.
4759 */
4760SYSCALL_DEFINE0(sgetmask)
4761{
4762 /* SMP safe */
4763 return current->blocked.sig[0];
4764}
4765
4766SYSCALL_DEFINE1(ssetmask, int, newmask)
4767{
4768 int old = current->blocked.sig[0];
4769 sigset_t newset;
4770
4771 siginitset(&newset, newmask);
4772 set_current_blocked(&newset);
4773
4774 return old;
4775}
4776#endif /* CONFIG_SGETMASK_SYSCALL */
4777
4778#ifdef __ARCH_WANT_SYS_SIGNAL
4779/*
4780 * For backwards compatibility. Functionality superseded by sigaction.
4781 */
4782SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
4783{
4784 struct k_sigaction new_sa, old_sa;
4785 int ret;
4786
4787 new_sa.sa.sa_handler = handler;
4788 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
4789 sigemptyset(&new_sa.sa.sa_mask);
4790
4791 ret = do_sigaction(sig, &new_sa, &old_sa);
4792
4793 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
4794}
4795#endif /* __ARCH_WANT_SYS_SIGNAL */
4796
4797#ifdef __ARCH_WANT_SYS_PAUSE
4798
4799SYSCALL_DEFINE0(pause)
4800{
4801 while (!signal_pending(current)) {
4802 __set_current_state(TASK_INTERRUPTIBLE);
4803 schedule();
4804 }
4805 return -ERESTARTNOHAND;
4806}
4807
4808#endif
4809
4810static int sigsuspend(sigset_t *set)
4811{
4812 current->saved_sigmask = current->blocked;
4813 set_current_blocked(set);
4814
4815 while (!signal_pending(current)) {
4816 __set_current_state(TASK_INTERRUPTIBLE);
4817 schedule();
4818 }
4819 set_restore_sigmask();
4820 return -ERESTARTNOHAND;
4821}
4822
4823/**
4824 * sys_rt_sigsuspend - replace the signal mask for a value with the
4825 * @unewset value until a signal is received
4826 * @unewset: new signal mask value
4827 * @sigsetsize: size of sigset_t type
4828 */
4829SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
4830{
4831 sigset_t newset;
4832
4833 /* XXX: Don't preclude handling different sized sigset_t's. */
4834 if (sigsetsize != sizeof(sigset_t))
4835 return -EINVAL;
4836
4837 if (copy_from_user(&newset, unewset, sizeof(newset)))
4838 return -EFAULT;
4839 return sigsuspend(&newset);
4840}
4841
4842#ifdef CONFIG_COMPAT
4843COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
4844{
4845 sigset_t newset;
4846
4847 /* XXX: Don't preclude handling different sized sigset_t's. */
4848 if (sigsetsize != sizeof(sigset_t))
4849 return -EINVAL;
4850
4851 if (get_compat_sigset(&newset, unewset))
4852 return -EFAULT;
4853 return sigsuspend(&newset);
4854}
4855#endif
4856
4857#ifdef CONFIG_OLD_SIGSUSPEND
4858SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
4859{
4860 sigset_t blocked;
4861 siginitset(&blocked, mask);
4862 return sigsuspend(&blocked);
4863}
4864#endif
4865#ifdef CONFIG_OLD_SIGSUSPEND3
4866SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
4867{
4868 sigset_t blocked;
4869 siginitset(&blocked, mask);
4870 return sigsuspend(&blocked);
4871}
4872#endif
4873
4874__weak const char *arch_vma_name(struct vm_area_struct *vma)
4875{
4876 return NULL;
4877}
4878
4879static inline void siginfo_buildtime_checks(void)
4880{
4881 BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
4882
4883 /* Verify the offsets in the two siginfos match */
4884#define CHECK_OFFSET(field) \
4885 BUILD_BUG_ON(offsetof(siginfo_t, field) != offsetof(kernel_siginfo_t, field))
4886
4887 /* kill */
4888 CHECK_OFFSET(si_pid);
4889 CHECK_OFFSET(si_uid);
4890
4891 /* timer */
4892 CHECK_OFFSET(si_tid);
4893 CHECK_OFFSET(si_overrun);
4894 CHECK_OFFSET(si_value);
4895
4896 /* rt */
4897 CHECK_OFFSET(si_pid);
4898 CHECK_OFFSET(si_uid);
4899 CHECK_OFFSET(si_value);
4900
4901 /* sigchld */
4902 CHECK_OFFSET(si_pid);
4903 CHECK_OFFSET(si_uid);
4904 CHECK_OFFSET(si_status);
4905 CHECK_OFFSET(si_utime);
4906 CHECK_OFFSET(si_stime);
4907
4908 /* sigfault */
4909 CHECK_OFFSET(si_addr);
4910 CHECK_OFFSET(si_trapno);
4911 CHECK_OFFSET(si_addr_lsb);
4912 CHECK_OFFSET(si_lower);
4913 CHECK_OFFSET(si_upper);
4914 CHECK_OFFSET(si_pkey);
4915 CHECK_OFFSET(si_perf_data);
4916 CHECK_OFFSET(si_perf_type);
4917 CHECK_OFFSET(si_perf_flags);
4918
4919 /* sigpoll */
4920 CHECK_OFFSET(si_band);
4921 CHECK_OFFSET(si_fd);
4922
4923 /* sigsys */
4924 CHECK_OFFSET(si_call_addr);
4925 CHECK_OFFSET(si_syscall);
4926 CHECK_OFFSET(si_arch);
4927#undef CHECK_OFFSET
4928
4929 /* usb asyncio */
4930 BUILD_BUG_ON(offsetof(struct siginfo, si_pid) !=
4931 offsetof(struct siginfo, si_addr));
4932 if (sizeof(int) == sizeof(void __user *)) {
4933 BUILD_BUG_ON(sizeof_field(struct siginfo, si_pid) !=
4934 sizeof(void __user *));
4935 } else {
4936 BUILD_BUG_ON((sizeof_field(struct siginfo, si_pid) +
4937 sizeof_field(struct siginfo, si_uid)) !=
4938 sizeof(void __user *));
4939 BUILD_BUG_ON(offsetofend(struct siginfo, si_pid) !=
4940 offsetof(struct siginfo, si_uid));
4941 }
4942#ifdef CONFIG_COMPAT
4943 BUILD_BUG_ON(offsetof(struct compat_siginfo, si_pid) !=
4944 offsetof(struct compat_siginfo, si_addr));
4945 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4946 sizeof(compat_uptr_t));
4947 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4948 sizeof_field(struct siginfo, si_pid));
4949#endif
4950}
4951
4952#if defined(CONFIG_SYSCTL)
4953static struct ctl_table signal_debug_table[] = {
4954#ifdef CONFIG_SYSCTL_EXCEPTION_TRACE
4955 {
4956 .procname = "exception-trace",
4957 .data = &show_unhandled_signals,
4958 .maxlen = sizeof(int),
4959 .mode = 0644,
4960 .proc_handler = proc_dointvec
4961 },
4962#endif
4963};
4964
4965static int __init init_signal_sysctls(void)
4966{
4967 register_sysctl_init("debug", signal_debug_table);
4968 return 0;
4969}
4970early_initcall(init_signal_sysctls);
4971#endif /* CONFIG_SYSCTL */
4972
4973void __init signals_init(void)
4974{
4975 siginfo_buildtime_checks();
4976
4977 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC | SLAB_ACCOUNT);
4978}
4979
4980#ifdef CONFIG_KGDB_KDB
4981#include <linux/kdb.h>
4982/*
4983 * kdb_send_sig - Allows kdb to send signals without exposing
4984 * signal internals. This function checks if the required locks are
4985 * available before calling the main signal code, to avoid kdb
4986 * deadlocks.
4987 */
4988void kdb_send_sig(struct task_struct *t, int sig)
4989{
4990 static struct task_struct *kdb_prev_t;
4991 int new_t, ret;
4992 if (!spin_trylock(&t->sighand->siglock)) {
4993 kdb_printf("Can't do kill command now.\n"
4994 "The sigmask lock is held somewhere else in "
4995 "kernel, try again later\n");
4996 return;
4997 }
4998 new_t = kdb_prev_t != t;
4999 kdb_prev_t = t;
5000 if (!task_is_running(t) && new_t) {
5001 spin_unlock(&t->sighand->siglock);
5002 kdb_printf("Process is not RUNNING, sending a signal from "
5003 "kdb risks deadlock\n"
5004 "on the run queue locks. "
5005 "The signal has _not_ been sent.\n"
5006 "Reissue the kill command if you want to risk "
5007 "the deadlock.\n");
5008 return;
5009 }
5010 ret = send_signal_locked(sig, SEND_SIG_PRIV, t, PIDTYPE_PID);
5011 spin_unlock(&t->sighand->siglock);
5012 if (ret)
5013 kdb_printf("Fail to deliver Signal %d to process %d.\n",
5014 sig, t->pid);
5015 else
5016 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
5017}
5018#endif /* CONFIG_KGDB_KDB */
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/kernel/signal.c
4 *
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 *
7 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 *
9 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
10 * Changes to use preallocated sigqueue structures
11 * to allow signals to be sent reliably.
12 */
13
14#include <linux/slab.h>
15#include <linux/export.h>
16#include <linux/init.h>
17#include <linux/sched/mm.h>
18#include <linux/sched/user.h>
19#include <linux/sched/debug.h>
20#include <linux/sched/task.h>
21#include <linux/sched/task_stack.h>
22#include <linux/sched/cputime.h>
23#include <linux/file.h>
24#include <linux/fs.h>
25#include <linux/proc_fs.h>
26#include <linux/tty.h>
27#include <linux/binfmts.h>
28#include <linux/coredump.h>
29#include <linux/security.h>
30#include <linux/syscalls.h>
31#include <linux/ptrace.h>
32#include <linux/signal.h>
33#include <linux/signalfd.h>
34#include <linux/ratelimit.h>
35#include <linux/tracehook.h>
36#include <linux/capability.h>
37#include <linux/freezer.h>
38#include <linux/pid_namespace.h>
39#include <linux/nsproxy.h>
40#include <linux/user_namespace.h>
41#include <linux/uprobes.h>
42#include <linux/compat.h>
43#include <linux/cn_proc.h>
44#include <linux/compiler.h>
45#include <linux/posix-timers.h>
46#include <linux/livepatch.h>
47#include <linux/cgroup.h>
48#include <linux/audit.h>
49
50#define CREATE_TRACE_POINTS
51#include <trace/events/signal.h>
52
53#include <asm/param.h>
54#include <linux/uaccess.h>
55#include <asm/unistd.h>
56#include <asm/siginfo.h>
57#include <asm/cacheflush.h>
58
59/*
60 * SLAB caches for signal bits.
61 */
62
63static struct kmem_cache *sigqueue_cachep;
64
65int print_fatal_signals __read_mostly;
66
67static void __user *sig_handler(struct task_struct *t, int sig)
68{
69 return t->sighand->action[sig - 1].sa.sa_handler;
70}
71
72static inline bool sig_handler_ignored(void __user *handler, int sig)
73{
74 /* Is it explicitly or implicitly ignored? */
75 return handler == SIG_IGN ||
76 (handler == SIG_DFL && sig_kernel_ignore(sig));
77}
78
79static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
80{
81 void __user *handler;
82
83 handler = sig_handler(t, sig);
84
85 /* SIGKILL and SIGSTOP may not be sent to the global init */
86 if (unlikely(is_global_init(t) && sig_kernel_only(sig)))
87 return true;
88
89 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
90 handler == SIG_DFL && !(force && sig_kernel_only(sig)))
91 return true;
92
93 /* Only allow kernel generated signals to this kthread */
94 if (unlikely((t->flags & PF_KTHREAD) &&
95 (handler == SIG_KTHREAD_KERNEL) && !force))
96 return true;
97
98 return sig_handler_ignored(handler, sig);
99}
100
101static bool sig_ignored(struct task_struct *t, int sig, bool force)
102{
103 /*
104 * Blocked signals are never ignored, since the
105 * signal handler may change by the time it is
106 * unblocked.
107 */
108 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
109 return false;
110
111 /*
112 * Tracers may want to know about even ignored signal unless it
113 * is SIGKILL which can't be reported anyway but can be ignored
114 * by SIGNAL_UNKILLABLE task.
115 */
116 if (t->ptrace && sig != SIGKILL)
117 return false;
118
119 return sig_task_ignored(t, sig, force);
120}
121
122/*
123 * Re-calculate pending state from the set of locally pending
124 * signals, globally pending signals, and blocked signals.
125 */
126static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked)
127{
128 unsigned long ready;
129 long i;
130
131 switch (_NSIG_WORDS) {
132 default:
133 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
134 ready |= signal->sig[i] &~ blocked->sig[i];
135 break;
136
137 case 4: ready = signal->sig[3] &~ blocked->sig[3];
138 ready |= signal->sig[2] &~ blocked->sig[2];
139 ready |= signal->sig[1] &~ blocked->sig[1];
140 ready |= signal->sig[0] &~ blocked->sig[0];
141 break;
142
143 case 2: ready = signal->sig[1] &~ blocked->sig[1];
144 ready |= signal->sig[0] &~ blocked->sig[0];
145 break;
146
147 case 1: ready = signal->sig[0] &~ blocked->sig[0];
148 }
149 return ready != 0;
150}
151
152#define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
153
154static bool recalc_sigpending_tsk(struct task_struct *t)
155{
156 if ((t->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) ||
157 PENDING(&t->pending, &t->blocked) ||
158 PENDING(&t->signal->shared_pending, &t->blocked) ||
159 cgroup_task_frozen(t)) {
160 set_tsk_thread_flag(t, TIF_SIGPENDING);
161 return true;
162 }
163
164 /*
165 * We must never clear the flag in another thread, or in current
166 * when it's possible the current syscall is returning -ERESTART*.
167 * So we don't clear it here, and only callers who know they should do.
168 */
169 return false;
170}
171
172/*
173 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
174 * This is superfluous when called on current, the wakeup is a harmless no-op.
175 */
176void recalc_sigpending_and_wake(struct task_struct *t)
177{
178 if (recalc_sigpending_tsk(t))
179 signal_wake_up(t, 0);
180}
181
182void recalc_sigpending(void)
183{
184 if (!recalc_sigpending_tsk(current) && !freezing(current) &&
185 !klp_patch_pending(current))
186 clear_thread_flag(TIF_SIGPENDING);
187
188}
189EXPORT_SYMBOL(recalc_sigpending);
190
191void calculate_sigpending(void)
192{
193 /* Have any signals or users of TIF_SIGPENDING been delayed
194 * until after fork?
195 */
196 spin_lock_irq(¤t->sighand->siglock);
197 set_tsk_thread_flag(current, TIF_SIGPENDING);
198 recalc_sigpending();
199 spin_unlock_irq(¤t->sighand->siglock);
200}
201
202/* Given the mask, find the first available signal that should be serviced. */
203
204#define SYNCHRONOUS_MASK \
205 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
206 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
207
208int next_signal(struct sigpending *pending, sigset_t *mask)
209{
210 unsigned long i, *s, *m, x;
211 int sig = 0;
212
213 s = pending->signal.sig;
214 m = mask->sig;
215
216 /*
217 * Handle the first word specially: it contains the
218 * synchronous signals that need to be dequeued first.
219 */
220 x = *s &~ *m;
221 if (x) {
222 if (x & SYNCHRONOUS_MASK)
223 x &= SYNCHRONOUS_MASK;
224 sig = ffz(~x) + 1;
225 return sig;
226 }
227
228 switch (_NSIG_WORDS) {
229 default:
230 for (i = 1; i < _NSIG_WORDS; ++i) {
231 x = *++s &~ *++m;
232 if (!x)
233 continue;
234 sig = ffz(~x) + i*_NSIG_BPW + 1;
235 break;
236 }
237 break;
238
239 case 2:
240 x = s[1] &~ m[1];
241 if (!x)
242 break;
243 sig = ffz(~x) + _NSIG_BPW + 1;
244 break;
245
246 case 1:
247 /* Nothing to do */
248 break;
249 }
250
251 return sig;
252}
253
254static inline void print_dropped_signal(int sig)
255{
256 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
257
258 if (!print_fatal_signals)
259 return;
260
261 if (!__ratelimit(&ratelimit_state))
262 return;
263
264 pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
265 current->comm, current->pid, sig);
266}
267
268/**
269 * task_set_jobctl_pending - set jobctl pending bits
270 * @task: target task
271 * @mask: pending bits to set
272 *
273 * Clear @mask from @task->jobctl. @mask must be subset of
274 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
275 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
276 * cleared. If @task is already being killed or exiting, this function
277 * becomes noop.
278 *
279 * CONTEXT:
280 * Must be called with @task->sighand->siglock held.
281 *
282 * RETURNS:
283 * %true if @mask is set, %false if made noop because @task was dying.
284 */
285bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
286{
287 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
288 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
289 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
290
291 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
292 return false;
293
294 if (mask & JOBCTL_STOP_SIGMASK)
295 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
296
297 task->jobctl |= mask;
298 return true;
299}
300
301/**
302 * task_clear_jobctl_trapping - clear jobctl trapping bit
303 * @task: target task
304 *
305 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
306 * Clear it and wake up the ptracer. Note that we don't need any further
307 * locking. @task->siglock guarantees that @task->parent points to the
308 * ptracer.
309 *
310 * CONTEXT:
311 * Must be called with @task->sighand->siglock held.
312 */
313void task_clear_jobctl_trapping(struct task_struct *task)
314{
315 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
316 task->jobctl &= ~JOBCTL_TRAPPING;
317 smp_mb(); /* advised by wake_up_bit() */
318 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
319 }
320}
321
322/**
323 * task_clear_jobctl_pending - clear jobctl pending bits
324 * @task: target task
325 * @mask: pending bits to clear
326 *
327 * Clear @mask from @task->jobctl. @mask must be subset of
328 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
329 * STOP bits are cleared together.
330 *
331 * If clearing of @mask leaves no stop or trap pending, this function calls
332 * task_clear_jobctl_trapping().
333 *
334 * CONTEXT:
335 * Must be called with @task->sighand->siglock held.
336 */
337void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
338{
339 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
340
341 if (mask & JOBCTL_STOP_PENDING)
342 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
343
344 task->jobctl &= ~mask;
345
346 if (!(task->jobctl & JOBCTL_PENDING_MASK))
347 task_clear_jobctl_trapping(task);
348}
349
350/**
351 * task_participate_group_stop - participate in a group stop
352 * @task: task participating in a group stop
353 *
354 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
355 * Group stop states are cleared and the group stop count is consumed if
356 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
357 * stop, the appropriate `SIGNAL_*` flags are set.
358 *
359 * CONTEXT:
360 * Must be called with @task->sighand->siglock held.
361 *
362 * RETURNS:
363 * %true if group stop completion should be notified to the parent, %false
364 * otherwise.
365 */
366static bool task_participate_group_stop(struct task_struct *task)
367{
368 struct signal_struct *sig = task->signal;
369 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
370
371 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
372
373 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
374
375 if (!consume)
376 return false;
377
378 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
379 sig->group_stop_count--;
380
381 /*
382 * Tell the caller to notify completion iff we are entering into a
383 * fresh group stop. Read comment in do_signal_stop() for details.
384 */
385 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
386 signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
387 return true;
388 }
389 return false;
390}
391
392void task_join_group_stop(struct task_struct *task)
393{
394 /* Have the new thread join an on-going signal group stop */
395 unsigned long jobctl = current->jobctl;
396 if (jobctl & JOBCTL_STOP_PENDING) {
397 struct signal_struct *sig = current->signal;
398 unsigned long signr = jobctl & JOBCTL_STOP_SIGMASK;
399 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
400 if (task_set_jobctl_pending(task, signr | gstop)) {
401 sig->group_stop_count++;
402 }
403 }
404}
405
406/*
407 * allocate a new signal queue record
408 * - this may be called without locks if and only if t == current, otherwise an
409 * appropriate lock must be held to stop the target task from exiting
410 */
411static struct sigqueue *
412__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
413{
414 struct sigqueue *q = NULL;
415 struct user_struct *user;
416 int sigpending;
417
418 /*
419 * Protect access to @t credentials. This can go away when all
420 * callers hold rcu read lock.
421 *
422 * NOTE! A pending signal will hold on to the user refcount,
423 * and we get/put the refcount only when the sigpending count
424 * changes from/to zero.
425 */
426 rcu_read_lock();
427 user = __task_cred(t)->user;
428 sigpending = atomic_inc_return(&user->sigpending);
429 if (sigpending == 1)
430 get_uid(user);
431 rcu_read_unlock();
432
433 if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {
434 q = kmem_cache_alloc(sigqueue_cachep, flags);
435 } else {
436 print_dropped_signal(sig);
437 }
438
439 if (unlikely(q == NULL)) {
440 if (atomic_dec_and_test(&user->sigpending))
441 free_uid(user);
442 } else {
443 INIT_LIST_HEAD(&q->list);
444 q->flags = 0;
445 q->user = user;
446 }
447
448 return q;
449}
450
451static void __sigqueue_free(struct sigqueue *q)
452{
453 if (q->flags & SIGQUEUE_PREALLOC)
454 return;
455 if (atomic_dec_and_test(&q->user->sigpending))
456 free_uid(q->user);
457 kmem_cache_free(sigqueue_cachep, q);
458}
459
460void flush_sigqueue(struct sigpending *queue)
461{
462 struct sigqueue *q;
463
464 sigemptyset(&queue->signal);
465 while (!list_empty(&queue->list)) {
466 q = list_entry(queue->list.next, struct sigqueue , list);
467 list_del_init(&q->list);
468 __sigqueue_free(q);
469 }
470}
471
472/*
473 * Flush all pending signals for this kthread.
474 */
475void flush_signals(struct task_struct *t)
476{
477 unsigned long flags;
478
479 spin_lock_irqsave(&t->sighand->siglock, flags);
480 clear_tsk_thread_flag(t, TIF_SIGPENDING);
481 flush_sigqueue(&t->pending);
482 flush_sigqueue(&t->signal->shared_pending);
483 spin_unlock_irqrestore(&t->sighand->siglock, flags);
484}
485EXPORT_SYMBOL(flush_signals);
486
487#ifdef CONFIG_POSIX_TIMERS
488static void __flush_itimer_signals(struct sigpending *pending)
489{
490 sigset_t signal, retain;
491 struct sigqueue *q, *n;
492
493 signal = pending->signal;
494 sigemptyset(&retain);
495
496 list_for_each_entry_safe(q, n, &pending->list, list) {
497 int sig = q->info.si_signo;
498
499 if (likely(q->info.si_code != SI_TIMER)) {
500 sigaddset(&retain, sig);
501 } else {
502 sigdelset(&signal, sig);
503 list_del_init(&q->list);
504 __sigqueue_free(q);
505 }
506 }
507
508 sigorsets(&pending->signal, &signal, &retain);
509}
510
511void flush_itimer_signals(void)
512{
513 struct task_struct *tsk = current;
514 unsigned long flags;
515
516 spin_lock_irqsave(&tsk->sighand->siglock, flags);
517 __flush_itimer_signals(&tsk->pending);
518 __flush_itimer_signals(&tsk->signal->shared_pending);
519 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
520}
521#endif
522
523void ignore_signals(struct task_struct *t)
524{
525 int i;
526
527 for (i = 0; i < _NSIG; ++i)
528 t->sighand->action[i].sa.sa_handler = SIG_IGN;
529
530 flush_signals(t);
531}
532
533/*
534 * Flush all handlers for a task.
535 */
536
537void
538flush_signal_handlers(struct task_struct *t, int force_default)
539{
540 int i;
541 struct k_sigaction *ka = &t->sighand->action[0];
542 for (i = _NSIG ; i != 0 ; i--) {
543 if (force_default || ka->sa.sa_handler != SIG_IGN)
544 ka->sa.sa_handler = SIG_DFL;
545 ka->sa.sa_flags = 0;
546#ifdef __ARCH_HAS_SA_RESTORER
547 ka->sa.sa_restorer = NULL;
548#endif
549 sigemptyset(&ka->sa.sa_mask);
550 ka++;
551 }
552}
553
554bool unhandled_signal(struct task_struct *tsk, int sig)
555{
556 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
557 if (is_global_init(tsk))
558 return true;
559
560 if (handler != SIG_IGN && handler != SIG_DFL)
561 return false;
562
563 /* if ptraced, let the tracer determine */
564 return !tsk->ptrace;
565}
566
567static void collect_signal(int sig, struct sigpending *list, kernel_siginfo_t *info,
568 bool *resched_timer)
569{
570 struct sigqueue *q, *first = NULL;
571
572 /*
573 * Collect the siginfo appropriate to this signal. Check if
574 * there is another siginfo for the same signal.
575 */
576 list_for_each_entry(q, &list->list, list) {
577 if (q->info.si_signo == sig) {
578 if (first)
579 goto still_pending;
580 first = q;
581 }
582 }
583
584 sigdelset(&list->signal, sig);
585
586 if (first) {
587still_pending:
588 list_del_init(&first->list);
589 copy_siginfo(info, &first->info);
590
591 *resched_timer =
592 (first->flags & SIGQUEUE_PREALLOC) &&
593 (info->si_code == SI_TIMER) &&
594 (info->si_sys_private);
595
596 __sigqueue_free(first);
597 } else {
598 /*
599 * Ok, it wasn't in the queue. This must be
600 * a fast-pathed signal or we must have been
601 * out of queue space. So zero out the info.
602 */
603 clear_siginfo(info);
604 info->si_signo = sig;
605 info->si_errno = 0;
606 info->si_code = SI_USER;
607 info->si_pid = 0;
608 info->si_uid = 0;
609 }
610}
611
612static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
613 kernel_siginfo_t *info, bool *resched_timer)
614{
615 int sig = next_signal(pending, mask);
616
617 if (sig)
618 collect_signal(sig, pending, info, resched_timer);
619 return sig;
620}
621
622/*
623 * Dequeue a signal and return the element to the caller, which is
624 * expected to free it.
625 *
626 * All callers have to hold the siglock.
627 */
628int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *info)
629{
630 bool resched_timer = false;
631 int signr;
632
633 /* We only dequeue private signals from ourselves, we don't let
634 * signalfd steal them
635 */
636 signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
637 if (!signr) {
638 signr = __dequeue_signal(&tsk->signal->shared_pending,
639 mask, info, &resched_timer);
640#ifdef CONFIG_POSIX_TIMERS
641 /*
642 * itimer signal ?
643 *
644 * itimers are process shared and we restart periodic
645 * itimers in the signal delivery path to prevent DoS
646 * attacks in the high resolution timer case. This is
647 * compliant with the old way of self-restarting
648 * itimers, as the SIGALRM is a legacy signal and only
649 * queued once. Changing the restart behaviour to
650 * restart the timer in the signal dequeue path is
651 * reducing the timer noise on heavy loaded !highres
652 * systems too.
653 */
654 if (unlikely(signr == SIGALRM)) {
655 struct hrtimer *tmr = &tsk->signal->real_timer;
656
657 if (!hrtimer_is_queued(tmr) &&
658 tsk->signal->it_real_incr != 0) {
659 hrtimer_forward(tmr, tmr->base->get_time(),
660 tsk->signal->it_real_incr);
661 hrtimer_restart(tmr);
662 }
663 }
664#endif
665 }
666
667 recalc_sigpending();
668 if (!signr)
669 return 0;
670
671 if (unlikely(sig_kernel_stop(signr))) {
672 /*
673 * Set a marker that we have dequeued a stop signal. Our
674 * caller might release the siglock and then the pending
675 * stop signal it is about to process is no longer in the
676 * pending bitmasks, but must still be cleared by a SIGCONT
677 * (and overruled by a SIGKILL). So those cases clear this
678 * shared flag after we've set it. Note that this flag may
679 * remain set after the signal we return is ignored or
680 * handled. That doesn't matter because its only purpose
681 * is to alert stop-signal processing code when another
682 * processor has come along and cleared the flag.
683 */
684 current->jobctl |= JOBCTL_STOP_DEQUEUED;
685 }
686#ifdef CONFIG_POSIX_TIMERS
687 if (resched_timer) {
688 /*
689 * Release the siglock to ensure proper locking order
690 * of timer locks outside of siglocks. Note, we leave
691 * irqs disabled here, since the posix-timers code is
692 * about to disable them again anyway.
693 */
694 spin_unlock(&tsk->sighand->siglock);
695 posixtimer_rearm(info);
696 spin_lock(&tsk->sighand->siglock);
697
698 /* Don't expose the si_sys_private value to userspace */
699 info->si_sys_private = 0;
700 }
701#endif
702 return signr;
703}
704EXPORT_SYMBOL_GPL(dequeue_signal);
705
706static int dequeue_synchronous_signal(kernel_siginfo_t *info)
707{
708 struct task_struct *tsk = current;
709 struct sigpending *pending = &tsk->pending;
710 struct sigqueue *q, *sync = NULL;
711
712 /*
713 * Might a synchronous signal be in the queue?
714 */
715 if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
716 return 0;
717
718 /*
719 * Return the first synchronous signal in the queue.
720 */
721 list_for_each_entry(q, &pending->list, list) {
722 /* Synchronous signals have a positive si_code */
723 if ((q->info.si_code > SI_USER) &&
724 (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
725 sync = q;
726 goto next;
727 }
728 }
729 return 0;
730next:
731 /*
732 * Check if there is another siginfo for the same signal.
733 */
734 list_for_each_entry_continue(q, &pending->list, list) {
735 if (q->info.si_signo == sync->info.si_signo)
736 goto still_pending;
737 }
738
739 sigdelset(&pending->signal, sync->info.si_signo);
740 recalc_sigpending();
741still_pending:
742 list_del_init(&sync->list);
743 copy_siginfo(info, &sync->info);
744 __sigqueue_free(sync);
745 return info->si_signo;
746}
747
748/*
749 * Tell a process that it has a new active signal..
750 *
751 * NOTE! we rely on the previous spin_lock to
752 * lock interrupts for us! We can only be called with
753 * "siglock" held, and the local interrupt must
754 * have been disabled when that got acquired!
755 *
756 * No need to set need_resched since signal event passing
757 * goes through ->blocked
758 */
759void signal_wake_up_state(struct task_struct *t, unsigned int state)
760{
761 set_tsk_thread_flag(t, TIF_SIGPENDING);
762 /*
763 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
764 * case. We don't check t->state here because there is a race with it
765 * executing another processor and just now entering stopped state.
766 * By using wake_up_state, we ensure the process will wake up and
767 * handle its death signal.
768 */
769 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
770 kick_process(t);
771}
772
773/*
774 * Remove signals in mask from the pending set and queue.
775 * Returns 1 if any signals were found.
776 *
777 * All callers must be holding the siglock.
778 */
779static void flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
780{
781 struct sigqueue *q, *n;
782 sigset_t m;
783
784 sigandsets(&m, mask, &s->signal);
785 if (sigisemptyset(&m))
786 return;
787
788 sigandnsets(&s->signal, &s->signal, mask);
789 list_for_each_entry_safe(q, n, &s->list, list) {
790 if (sigismember(mask, q->info.si_signo)) {
791 list_del_init(&q->list);
792 __sigqueue_free(q);
793 }
794 }
795}
796
797static inline int is_si_special(const struct kernel_siginfo *info)
798{
799 return info <= SEND_SIG_PRIV;
800}
801
802static inline bool si_fromuser(const struct kernel_siginfo *info)
803{
804 return info == SEND_SIG_NOINFO ||
805 (!is_si_special(info) && SI_FROMUSER(info));
806}
807
808/*
809 * called with RCU read lock from check_kill_permission()
810 */
811static bool kill_ok_by_cred(struct task_struct *t)
812{
813 const struct cred *cred = current_cred();
814 const struct cred *tcred = __task_cred(t);
815
816 return uid_eq(cred->euid, tcred->suid) ||
817 uid_eq(cred->euid, tcred->uid) ||
818 uid_eq(cred->uid, tcred->suid) ||
819 uid_eq(cred->uid, tcred->uid) ||
820 ns_capable(tcred->user_ns, CAP_KILL);
821}
822
823/*
824 * Bad permissions for sending the signal
825 * - the caller must hold the RCU read lock
826 */
827static int check_kill_permission(int sig, struct kernel_siginfo *info,
828 struct task_struct *t)
829{
830 struct pid *sid;
831 int error;
832
833 if (!valid_signal(sig))
834 return -EINVAL;
835
836 if (!si_fromuser(info))
837 return 0;
838
839 error = audit_signal_info(sig, t); /* Let audit system see the signal */
840 if (error)
841 return error;
842
843 if (!same_thread_group(current, t) &&
844 !kill_ok_by_cred(t)) {
845 switch (sig) {
846 case SIGCONT:
847 sid = task_session(t);
848 /*
849 * We don't return the error if sid == NULL. The
850 * task was unhashed, the caller must notice this.
851 */
852 if (!sid || sid == task_session(current))
853 break;
854 fallthrough;
855 default:
856 return -EPERM;
857 }
858 }
859
860 return security_task_kill(t, info, sig, NULL);
861}
862
863/**
864 * ptrace_trap_notify - schedule trap to notify ptracer
865 * @t: tracee wanting to notify tracer
866 *
867 * This function schedules sticky ptrace trap which is cleared on the next
868 * TRAP_STOP to notify ptracer of an event. @t must have been seized by
869 * ptracer.
870 *
871 * If @t is running, STOP trap will be taken. If trapped for STOP and
872 * ptracer is listening for events, tracee is woken up so that it can
873 * re-trap for the new event. If trapped otherwise, STOP trap will be
874 * eventually taken without returning to userland after the existing traps
875 * are finished by PTRACE_CONT.
876 *
877 * CONTEXT:
878 * Must be called with @task->sighand->siglock held.
879 */
880static void ptrace_trap_notify(struct task_struct *t)
881{
882 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
883 assert_spin_locked(&t->sighand->siglock);
884
885 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
886 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
887}
888
889/*
890 * Handle magic process-wide effects of stop/continue signals. Unlike
891 * the signal actions, these happen immediately at signal-generation
892 * time regardless of blocking, ignoring, or handling. This does the
893 * actual continuing for SIGCONT, but not the actual stopping for stop
894 * signals. The process stop is done as a signal action for SIG_DFL.
895 *
896 * Returns true if the signal should be actually delivered, otherwise
897 * it should be dropped.
898 */
899static bool prepare_signal(int sig, struct task_struct *p, bool force)
900{
901 struct signal_struct *signal = p->signal;
902 struct task_struct *t;
903 sigset_t flush;
904
905 if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
906 if (!(signal->flags & SIGNAL_GROUP_EXIT))
907 return sig == SIGKILL;
908 /*
909 * The process is in the middle of dying, nothing to do.
910 */
911 } else if (sig_kernel_stop(sig)) {
912 /*
913 * This is a stop signal. Remove SIGCONT from all queues.
914 */
915 siginitset(&flush, sigmask(SIGCONT));
916 flush_sigqueue_mask(&flush, &signal->shared_pending);
917 for_each_thread(p, t)
918 flush_sigqueue_mask(&flush, &t->pending);
919 } else if (sig == SIGCONT) {
920 unsigned int why;
921 /*
922 * Remove all stop signals from all queues, wake all threads.
923 */
924 siginitset(&flush, SIG_KERNEL_STOP_MASK);
925 flush_sigqueue_mask(&flush, &signal->shared_pending);
926 for_each_thread(p, t) {
927 flush_sigqueue_mask(&flush, &t->pending);
928 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
929 if (likely(!(t->ptrace & PT_SEIZED)))
930 wake_up_state(t, __TASK_STOPPED);
931 else
932 ptrace_trap_notify(t);
933 }
934
935 /*
936 * Notify the parent with CLD_CONTINUED if we were stopped.
937 *
938 * If we were in the middle of a group stop, we pretend it
939 * was already finished, and then continued. Since SIGCHLD
940 * doesn't queue we report only CLD_STOPPED, as if the next
941 * CLD_CONTINUED was dropped.
942 */
943 why = 0;
944 if (signal->flags & SIGNAL_STOP_STOPPED)
945 why |= SIGNAL_CLD_CONTINUED;
946 else if (signal->group_stop_count)
947 why |= SIGNAL_CLD_STOPPED;
948
949 if (why) {
950 /*
951 * The first thread which returns from do_signal_stop()
952 * will take ->siglock, notice SIGNAL_CLD_MASK, and
953 * notify its parent. See get_signal().
954 */
955 signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
956 signal->group_stop_count = 0;
957 signal->group_exit_code = 0;
958 }
959 }
960
961 return !sig_ignored(p, sig, force);
962}
963
964/*
965 * Test if P wants to take SIG. After we've checked all threads with this,
966 * it's equivalent to finding no threads not blocking SIG. Any threads not
967 * blocking SIG were ruled out because they are not running and already
968 * have pending signals. Such threads will dequeue from the shared queue
969 * as soon as they're available, so putting the signal on the shared queue
970 * will be equivalent to sending it to one such thread.
971 */
972static inline bool wants_signal(int sig, struct task_struct *p)
973{
974 if (sigismember(&p->blocked, sig))
975 return false;
976
977 if (p->flags & PF_EXITING)
978 return false;
979
980 if (sig == SIGKILL)
981 return true;
982
983 if (task_is_stopped_or_traced(p))
984 return false;
985
986 return task_curr(p) || !signal_pending(p);
987}
988
989static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
990{
991 struct signal_struct *signal = p->signal;
992 struct task_struct *t;
993
994 /*
995 * Now find a thread we can wake up to take the signal off the queue.
996 *
997 * If the main thread wants the signal, it gets first crack.
998 * Probably the least surprising to the average bear.
999 */
1000 if (wants_signal(sig, p))
1001 t = p;
1002 else if ((type == PIDTYPE_PID) || thread_group_empty(p))
1003 /*
1004 * There is just one thread and it does not need to be woken.
1005 * It will dequeue unblocked signals before it runs again.
1006 */
1007 return;
1008 else {
1009 /*
1010 * Otherwise try to find a suitable thread.
1011 */
1012 t = signal->curr_target;
1013 while (!wants_signal(sig, t)) {
1014 t = next_thread(t);
1015 if (t == signal->curr_target)
1016 /*
1017 * No thread needs to be woken.
1018 * Any eligible threads will see
1019 * the signal in the queue soon.
1020 */
1021 return;
1022 }
1023 signal->curr_target = t;
1024 }
1025
1026 /*
1027 * Found a killable thread. If the signal will be fatal,
1028 * then start taking the whole group down immediately.
1029 */
1030 if (sig_fatal(p, sig) &&
1031 !(signal->flags & SIGNAL_GROUP_EXIT) &&
1032 !sigismember(&t->real_blocked, sig) &&
1033 (sig == SIGKILL || !p->ptrace)) {
1034 /*
1035 * This signal will be fatal to the whole group.
1036 */
1037 if (!sig_kernel_coredump(sig)) {
1038 /*
1039 * Start a group exit and wake everybody up.
1040 * This way we don't have other threads
1041 * running and doing things after a slower
1042 * thread has the fatal signal pending.
1043 */
1044 signal->flags = SIGNAL_GROUP_EXIT;
1045 signal->group_exit_code = sig;
1046 signal->group_stop_count = 0;
1047 t = p;
1048 do {
1049 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1050 sigaddset(&t->pending.signal, SIGKILL);
1051 signal_wake_up(t, 1);
1052 } while_each_thread(p, t);
1053 return;
1054 }
1055 }
1056
1057 /*
1058 * The signal is already in the shared-pending queue.
1059 * Tell the chosen thread to wake up and dequeue it.
1060 */
1061 signal_wake_up(t, sig == SIGKILL);
1062 return;
1063}
1064
1065static inline bool legacy_queue(struct sigpending *signals, int sig)
1066{
1067 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1068}
1069
1070static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
1071 enum pid_type type, bool force)
1072{
1073 struct sigpending *pending;
1074 struct sigqueue *q;
1075 int override_rlimit;
1076 int ret = 0, result;
1077
1078 assert_spin_locked(&t->sighand->siglock);
1079
1080 result = TRACE_SIGNAL_IGNORED;
1081 if (!prepare_signal(sig, t, force))
1082 goto ret;
1083
1084 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1085 /*
1086 * Short-circuit ignored signals and support queuing
1087 * exactly one non-rt signal, so that we can get more
1088 * detailed information about the cause of the signal.
1089 */
1090 result = TRACE_SIGNAL_ALREADY_PENDING;
1091 if (legacy_queue(pending, sig))
1092 goto ret;
1093
1094 result = TRACE_SIGNAL_DELIVERED;
1095 /*
1096 * Skip useless siginfo allocation for SIGKILL and kernel threads.
1097 */
1098 if ((sig == SIGKILL) || (t->flags & PF_KTHREAD))
1099 goto out_set;
1100
1101 /*
1102 * Real-time signals must be queued if sent by sigqueue, or
1103 * some other real-time mechanism. It is implementation
1104 * defined whether kill() does so. We attempt to do so, on
1105 * the principle of least surprise, but since kill is not
1106 * allowed to fail with EAGAIN when low on memory we just
1107 * make sure at least one signal gets delivered and don't
1108 * pass on the info struct.
1109 */
1110 if (sig < SIGRTMIN)
1111 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1112 else
1113 override_rlimit = 0;
1114
1115 q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit);
1116 if (q) {
1117 list_add_tail(&q->list, &pending->list);
1118 switch ((unsigned long) info) {
1119 case (unsigned long) SEND_SIG_NOINFO:
1120 clear_siginfo(&q->info);
1121 q->info.si_signo = sig;
1122 q->info.si_errno = 0;
1123 q->info.si_code = SI_USER;
1124 q->info.si_pid = task_tgid_nr_ns(current,
1125 task_active_pid_ns(t));
1126 rcu_read_lock();
1127 q->info.si_uid =
1128 from_kuid_munged(task_cred_xxx(t, user_ns),
1129 current_uid());
1130 rcu_read_unlock();
1131 break;
1132 case (unsigned long) SEND_SIG_PRIV:
1133 clear_siginfo(&q->info);
1134 q->info.si_signo = sig;
1135 q->info.si_errno = 0;
1136 q->info.si_code = SI_KERNEL;
1137 q->info.si_pid = 0;
1138 q->info.si_uid = 0;
1139 break;
1140 default:
1141 copy_siginfo(&q->info, info);
1142 break;
1143 }
1144 } else if (!is_si_special(info) &&
1145 sig >= SIGRTMIN && info->si_code != SI_USER) {
1146 /*
1147 * Queue overflow, abort. We may abort if the
1148 * signal was rt and sent by user using something
1149 * other than kill().
1150 */
1151 result = TRACE_SIGNAL_OVERFLOW_FAIL;
1152 ret = -EAGAIN;
1153 goto ret;
1154 } else {
1155 /*
1156 * This is a silent loss of information. We still
1157 * send the signal, but the *info bits are lost.
1158 */
1159 result = TRACE_SIGNAL_LOSE_INFO;
1160 }
1161
1162out_set:
1163 signalfd_notify(t, sig);
1164 sigaddset(&pending->signal, sig);
1165
1166 /* Let multiprocess signals appear after on-going forks */
1167 if (type > PIDTYPE_TGID) {
1168 struct multiprocess_signals *delayed;
1169 hlist_for_each_entry(delayed, &t->signal->multiprocess, node) {
1170 sigset_t *signal = &delayed->signal;
1171 /* Can't queue both a stop and a continue signal */
1172 if (sig == SIGCONT)
1173 sigdelsetmask(signal, SIG_KERNEL_STOP_MASK);
1174 else if (sig_kernel_stop(sig))
1175 sigdelset(signal, SIGCONT);
1176 sigaddset(signal, sig);
1177 }
1178 }
1179
1180 complete_signal(sig, t, type);
1181ret:
1182 trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result);
1183 return ret;
1184}
1185
1186static inline bool has_si_pid_and_uid(struct kernel_siginfo *info)
1187{
1188 bool ret = false;
1189 switch (siginfo_layout(info->si_signo, info->si_code)) {
1190 case SIL_KILL:
1191 case SIL_CHLD:
1192 case SIL_RT:
1193 ret = true;
1194 break;
1195 case SIL_TIMER:
1196 case SIL_POLL:
1197 case SIL_FAULT:
1198 case SIL_FAULT_MCEERR:
1199 case SIL_FAULT_BNDERR:
1200 case SIL_FAULT_PKUERR:
1201 case SIL_SYS:
1202 ret = false;
1203 break;
1204 }
1205 return ret;
1206}
1207
1208static int send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
1209 enum pid_type type)
1210{
1211 /* Should SIGKILL or SIGSTOP be received by a pid namespace init? */
1212 bool force = false;
1213
1214 if (info == SEND_SIG_NOINFO) {
1215 /* Force if sent from an ancestor pid namespace */
1216 force = !task_pid_nr_ns(current, task_active_pid_ns(t));
1217 } else if (info == SEND_SIG_PRIV) {
1218 /* Don't ignore kernel generated signals */
1219 force = true;
1220 } else if (has_si_pid_and_uid(info)) {
1221 /* SIGKILL and SIGSTOP is special or has ids */
1222 struct user_namespace *t_user_ns;
1223
1224 rcu_read_lock();
1225 t_user_ns = task_cred_xxx(t, user_ns);
1226 if (current_user_ns() != t_user_ns) {
1227 kuid_t uid = make_kuid(current_user_ns(), info->si_uid);
1228 info->si_uid = from_kuid_munged(t_user_ns, uid);
1229 }
1230 rcu_read_unlock();
1231
1232 /* A kernel generated signal? */
1233 force = (info->si_code == SI_KERNEL);
1234
1235 /* From an ancestor pid namespace? */
1236 if (!task_pid_nr_ns(current, task_active_pid_ns(t))) {
1237 info->si_pid = 0;
1238 force = true;
1239 }
1240 }
1241 return __send_signal(sig, info, t, type, force);
1242}
1243
1244static void print_fatal_signal(int signr)
1245{
1246 struct pt_regs *regs = signal_pt_regs();
1247 pr_info("potentially unexpected fatal signal %d.\n", signr);
1248
1249#if defined(__i386__) && !defined(__arch_um__)
1250 pr_info("code at %08lx: ", regs->ip);
1251 {
1252 int i;
1253 for (i = 0; i < 16; i++) {
1254 unsigned char insn;
1255
1256 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1257 break;
1258 pr_cont("%02x ", insn);
1259 }
1260 }
1261 pr_cont("\n");
1262#endif
1263 preempt_disable();
1264 show_regs(regs);
1265 preempt_enable();
1266}
1267
1268static int __init setup_print_fatal_signals(char *str)
1269{
1270 get_option (&str, &print_fatal_signals);
1271
1272 return 1;
1273}
1274
1275__setup("print-fatal-signals=", setup_print_fatal_signals);
1276
1277int
1278__group_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1279{
1280 return send_signal(sig, info, p, PIDTYPE_TGID);
1281}
1282
1283int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p,
1284 enum pid_type type)
1285{
1286 unsigned long flags;
1287 int ret = -ESRCH;
1288
1289 if (lock_task_sighand(p, &flags)) {
1290 ret = send_signal(sig, info, p, type);
1291 unlock_task_sighand(p, &flags);
1292 }
1293
1294 return ret;
1295}
1296
1297/*
1298 * Force a signal that the process can't ignore: if necessary
1299 * we unblock the signal and change any SIG_IGN to SIG_DFL.
1300 *
1301 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1302 * since we do not want to have a signal handler that was blocked
1303 * be invoked when user space had explicitly blocked it.
1304 *
1305 * We don't want to have recursive SIGSEGV's etc, for example,
1306 * that is why we also clear SIGNAL_UNKILLABLE.
1307 */
1308static int
1309force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t)
1310{
1311 unsigned long int flags;
1312 int ret, blocked, ignored;
1313 struct k_sigaction *action;
1314 int sig = info->si_signo;
1315
1316 spin_lock_irqsave(&t->sighand->siglock, flags);
1317 action = &t->sighand->action[sig-1];
1318 ignored = action->sa.sa_handler == SIG_IGN;
1319 blocked = sigismember(&t->blocked, sig);
1320 if (blocked || ignored) {
1321 action->sa.sa_handler = SIG_DFL;
1322 if (blocked) {
1323 sigdelset(&t->blocked, sig);
1324 recalc_sigpending_and_wake(t);
1325 }
1326 }
1327 /*
1328 * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
1329 * debugging to leave init killable.
1330 */
1331 if (action->sa.sa_handler == SIG_DFL && !t->ptrace)
1332 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1333 ret = send_signal(sig, info, t, PIDTYPE_PID);
1334 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1335
1336 return ret;
1337}
1338
1339int force_sig_info(struct kernel_siginfo *info)
1340{
1341 return force_sig_info_to_task(info, current);
1342}
1343
1344/*
1345 * Nuke all other threads in the group.
1346 */
1347int zap_other_threads(struct task_struct *p)
1348{
1349 struct task_struct *t = p;
1350 int count = 0;
1351
1352 p->signal->group_stop_count = 0;
1353
1354 while_each_thread(p, t) {
1355 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1356 count++;
1357
1358 /* Don't bother with already dead threads */
1359 if (t->exit_state)
1360 continue;
1361 sigaddset(&t->pending.signal, SIGKILL);
1362 signal_wake_up(t, 1);
1363 }
1364
1365 return count;
1366}
1367
1368struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1369 unsigned long *flags)
1370{
1371 struct sighand_struct *sighand;
1372
1373 rcu_read_lock();
1374 for (;;) {
1375 sighand = rcu_dereference(tsk->sighand);
1376 if (unlikely(sighand == NULL))
1377 break;
1378
1379 /*
1380 * This sighand can be already freed and even reused, but
1381 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
1382 * initializes ->siglock: this slab can't go away, it has
1383 * the same object type, ->siglock can't be reinitialized.
1384 *
1385 * We need to ensure that tsk->sighand is still the same
1386 * after we take the lock, we can race with de_thread() or
1387 * __exit_signal(). In the latter case the next iteration
1388 * must see ->sighand == NULL.
1389 */
1390 spin_lock_irqsave(&sighand->siglock, *flags);
1391 if (likely(sighand == rcu_access_pointer(tsk->sighand)))
1392 break;
1393 spin_unlock_irqrestore(&sighand->siglock, *flags);
1394 }
1395 rcu_read_unlock();
1396
1397 return sighand;
1398}
1399
1400/*
1401 * send signal info to all the members of a group
1402 */
1403int group_send_sig_info(int sig, struct kernel_siginfo *info,
1404 struct task_struct *p, enum pid_type type)
1405{
1406 int ret;
1407
1408 rcu_read_lock();
1409 ret = check_kill_permission(sig, info, p);
1410 rcu_read_unlock();
1411
1412 if (!ret && sig)
1413 ret = do_send_sig_info(sig, info, p, type);
1414
1415 return ret;
1416}
1417
1418/*
1419 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1420 * control characters do (^C, ^Z etc)
1421 * - the caller must hold at least a readlock on tasklist_lock
1422 */
1423int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp)
1424{
1425 struct task_struct *p = NULL;
1426 int retval, success;
1427
1428 success = 0;
1429 retval = -ESRCH;
1430 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1431 int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID);
1432 success |= !err;
1433 retval = err;
1434 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1435 return success ? 0 : retval;
1436}
1437
1438int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid)
1439{
1440 int error = -ESRCH;
1441 struct task_struct *p;
1442
1443 for (;;) {
1444 rcu_read_lock();
1445 p = pid_task(pid, PIDTYPE_PID);
1446 if (p)
1447 error = group_send_sig_info(sig, info, p, PIDTYPE_TGID);
1448 rcu_read_unlock();
1449 if (likely(!p || error != -ESRCH))
1450 return error;
1451
1452 /*
1453 * The task was unhashed in between, try again. If it
1454 * is dead, pid_task() will return NULL, if we race with
1455 * de_thread() it will find the new leader.
1456 */
1457 }
1458}
1459
1460static int kill_proc_info(int sig, struct kernel_siginfo *info, pid_t pid)
1461{
1462 int error;
1463 rcu_read_lock();
1464 error = kill_pid_info(sig, info, find_vpid(pid));
1465 rcu_read_unlock();
1466 return error;
1467}
1468
1469static inline bool kill_as_cred_perm(const struct cred *cred,
1470 struct task_struct *target)
1471{
1472 const struct cred *pcred = __task_cred(target);
1473
1474 return uid_eq(cred->euid, pcred->suid) ||
1475 uid_eq(cred->euid, pcred->uid) ||
1476 uid_eq(cred->uid, pcred->suid) ||
1477 uid_eq(cred->uid, pcred->uid);
1478}
1479
1480/*
1481 * The usb asyncio usage of siginfo is wrong. The glibc support
1482 * for asyncio which uses SI_ASYNCIO assumes the layout is SIL_RT.
1483 * AKA after the generic fields:
1484 * kernel_pid_t si_pid;
1485 * kernel_uid32_t si_uid;
1486 * sigval_t si_value;
1487 *
1488 * Unfortunately when usb generates SI_ASYNCIO it assumes the layout
1489 * after the generic fields is:
1490 * void __user *si_addr;
1491 *
1492 * This is a practical problem when there is a 64bit big endian kernel
1493 * and a 32bit userspace. As the 32bit address will encoded in the low
1494 * 32bits of the pointer. Those low 32bits will be stored at higher
1495 * address than appear in a 32 bit pointer. So userspace will not
1496 * see the address it was expecting for it's completions.
1497 *
1498 * There is nothing in the encoding that can allow
1499 * copy_siginfo_to_user32 to detect this confusion of formats, so
1500 * handle this by requiring the caller of kill_pid_usb_asyncio to
1501 * notice when this situration takes place and to store the 32bit
1502 * pointer in sival_int, instead of sival_addr of the sigval_t addr
1503 * parameter.
1504 */
1505int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr,
1506 struct pid *pid, const struct cred *cred)
1507{
1508 struct kernel_siginfo info;
1509 struct task_struct *p;
1510 unsigned long flags;
1511 int ret = -EINVAL;
1512
1513 if (!valid_signal(sig))
1514 return ret;
1515
1516 clear_siginfo(&info);
1517 info.si_signo = sig;
1518 info.si_errno = errno;
1519 info.si_code = SI_ASYNCIO;
1520 *((sigval_t *)&info.si_pid) = addr;
1521
1522 rcu_read_lock();
1523 p = pid_task(pid, PIDTYPE_PID);
1524 if (!p) {
1525 ret = -ESRCH;
1526 goto out_unlock;
1527 }
1528 if (!kill_as_cred_perm(cred, p)) {
1529 ret = -EPERM;
1530 goto out_unlock;
1531 }
1532 ret = security_task_kill(p, &info, sig, cred);
1533 if (ret)
1534 goto out_unlock;
1535
1536 if (sig) {
1537 if (lock_task_sighand(p, &flags)) {
1538 ret = __send_signal(sig, &info, p, PIDTYPE_TGID, false);
1539 unlock_task_sighand(p, &flags);
1540 } else
1541 ret = -ESRCH;
1542 }
1543out_unlock:
1544 rcu_read_unlock();
1545 return ret;
1546}
1547EXPORT_SYMBOL_GPL(kill_pid_usb_asyncio);
1548
1549/*
1550 * kill_something_info() interprets pid in interesting ways just like kill(2).
1551 *
1552 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1553 * is probably wrong. Should make it like BSD or SYSV.
1554 */
1555
1556static int kill_something_info(int sig, struct kernel_siginfo *info, pid_t pid)
1557{
1558 int ret;
1559
1560 if (pid > 0)
1561 return kill_proc_info(sig, info, pid);
1562
1563 /* -INT_MIN is undefined. Exclude this case to avoid a UBSAN warning */
1564 if (pid == INT_MIN)
1565 return -ESRCH;
1566
1567 read_lock(&tasklist_lock);
1568 if (pid != -1) {
1569 ret = __kill_pgrp_info(sig, info,
1570 pid ? find_vpid(-pid) : task_pgrp(current));
1571 } else {
1572 int retval = 0, count = 0;
1573 struct task_struct * p;
1574
1575 for_each_process(p) {
1576 if (task_pid_vnr(p) > 1 &&
1577 !same_thread_group(p, current)) {
1578 int err = group_send_sig_info(sig, info, p,
1579 PIDTYPE_MAX);
1580 ++count;
1581 if (err != -EPERM)
1582 retval = err;
1583 }
1584 }
1585 ret = count ? retval : -ESRCH;
1586 }
1587 read_unlock(&tasklist_lock);
1588
1589 return ret;
1590}
1591
1592/*
1593 * These are for backward compatibility with the rest of the kernel source.
1594 */
1595
1596int send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1597{
1598 /*
1599 * Make sure legacy kernel users don't send in bad values
1600 * (normal paths check this in check_kill_permission).
1601 */
1602 if (!valid_signal(sig))
1603 return -EINVAL;
1604
1605 return do_send_sig_info(sig, info, p, PIDTYPE_PID);
1606}
1607EXPORT_SYMBOL(send_sig_info);
1608
1609#define __si_special(priv) \
1610 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1611
1612int
1613send_sig(int sig, struct task_struct *p, int priv)
1614{
1615 return send_sig_info(sig, __si_special(priv), p);
1616}
1617EXPORT_SYMBOL(send_sig);
1618
1619void force_sig(int sig)
1620{
1621 struct kernel_siginfo info;
1622
1623 clear_siginfo(&info);
1624 info.si_signo = sig;
1625 info.si_errno = 0;
1626 info.si_code = SI_KERNEL;
1627 info.si_pid = 0;
1628 info.si_uid = 0;
1629 force_sig_info(&info);
1630}
1631EXPORT_SYMBOL(force_sig);
1632
1633/*
1634 * When things go south during signal handling, we
1635 * will force a SIGSEGV. And if the signal that caused
1636 * the problem was already a SIGSEGV, we'll want to
1637 * make sure we don't even try to deliver the signal..
1638 */
1639void force_sigsegv(int sig)
1640{
1641 struct task_struct *p = current;
1642
1643 if (sig == SIGSEGV) {
1644 unsigned long flags;
1645 spin_lock_irqsave(&p->sighand->siglock, flags);
1646 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1647 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1648 }
1649 force_sig(SIGSEGV);
1650}
1651
1652int force_sig_fault_to_task(int sig, int code, void __user *addr
1653 ___ARCH_SI_TRAPNO(int trapno)
1654 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1655 , struct task_struct *t)
1656{
1657 struct kernel_siginfo info;
1658
1659 clear_siginfo(&info);
1660 info.si_signo = sig;
1661 info.si_errno = 0;
1662 info.si_code = code;
1663 info.si_addr = addr;
1664#ifdef __ARCH_SI_TRAPNO
1665 info.si_trapno = trapno;
1666#endif
1667#ifdef __ia64__
1668 info.si_imm = imm;
1669 info.si_flags = flags;
1670 info.si_isr = isr;
1671#endif
1672 return force_sig_info_to_task(&info, t);
1673}
1674
1675int force_sig_fault(int sig, int code, void __user *addr
1676 ___ARCH_SI_TRAPNO(int trapno)
1677 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr))
1678{
1679 return force_sig_fault_to_task(sig, code, addr
1680 ___ARCH_SI_TRAPNO(trapno)
1681 ___ARCH_SI_IA64(imm, flags, isr), current);
1682}
1683
1684int send_sig_fault(int sig, int code, void __user *addr
1685 ___ARCH_SI_TRAPNO(int trapno)
1686 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1687 , struct task_struct *t)
1688{
1689 struct kernel_siginfo info;
1690
1691 clear_siginfo(&info);
1692 info.si_signo = sig;
1693 info.si_errno = 0;
1694 info.si_code = code;
1695 info.si_addr = addr;
1696#ifdef __ARCH_SI_TRAPNO
1697 info.si_trapno = trapno;
1698#endif
1699#ifdef __ia64__
1700 info.si_imm = imm;
1701 info.si_flags = flags;
1702 info.si_isr = isr;
1703#endif
1704 return send_sig_info(info.si_signo, &info, t);
1705}
1706
1707int force_sig_mceerr(int code, void __user *addr, short lsb)
1708{
1709 struct kernel_siginfo info;
1710
1711 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1712 clear_siginfo(&info);
1713 info.si_signo = SIGBUS;
1714 info.si_errno = 0;
1715 info.si_code = code;
1716 info.si_addr = addr;
1717 info.si_addr_lsb = lsb;
1718 return force_sig_info(&info);
1719}
1720
1721int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1722{
1723 struct kernel_siginfo info;
1724
1725 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1726 clear_siginfo(&info);
1727 info.si_signo = SIGBUS;
1728 info.si_errno = 0;
1729 info.si_code = code;
1730 info.si_addr = addr;
1731 info.si_addr_lsb = lsb;
1732 return send_sig_info(info.si_signo, &info, t);
1733}
1734EXPORT_SYMBOL(send_sig_mceerr);
1735
1736int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper)
1737{
1738 struct kernel_siginfo info;
1739
1740 clear_siginfo(&info);
1741 info.si_signo = SIGSEGV;
1742 info.si_errno = 0;
1743 info.si_code = SEGV_BNDERR;
1744 info.si_addr = addr;
1745 info.si_lower = lower;
1746 info.si_upper = upper;
1747 return force_sig_info(&info);
1748}
1749
1750#ifdef SEGV_PKUERR
1751int force_sig_pkuerr(void __user *addr, u32 pkey)
1752{
1753 struct kernel_siginfo info;
1754
1755 clear_siginfo(&info);
1756 info.si_signo = SIGSEGV;
1757 info.si_errno = 0;
1758 info.si_code = SEGV_PKUERR;
1759 info.si_addr = addr;
1760 info.si_pkey = pkey;
1761 return force_sig_info(&info);
1762}
1763#endif
1764
1765/* For the crazy architectures that include trap information in
1766 * the errno field, instead of an actual errno value.
1767 */
1768int force_sig_ptrace_errno_trap(int errno, void __user *addr)
1769{
1770 struct kernel_siginfo info;
1771
1772 clear_siginfo(&info);
1773 info.si_signo = SIGTRAP;
1774 info.si_errno = errno;
1775 info.si_code = TRAP_HWBKPT;
1776 info.si_addr = addr;
1777 return force_sig_info(&info);
1778}
1779
1780int kill_pgrp(struct pid *pid, int sig, int priv)
1781{
1782 int ret;
1783
1784 read_lock(&tasklist_lock);
1785 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1786 read_unlock(&tasklist_lock);
1787
1788 return ret;
1789}
1790EXPORT_SYMBOL(kill_pgrp);
1791
1792int kill_pid(struct pid *pid, int sig, int priv)
1793{
1794 return kill_pid_info(sig, __si_special(priv), pid);
1795}
1796EXPORT_SYMBOL(kill_pid);
1797
1798/*
1799 * These functions support sending signals using preallocated sigqueue
1800 * structures. This is needed "because realtime applications cannot
1801 * afford to lose notifications of asynchronous events, like timer
1802 * expirations or I/O completions". In the case of POSIX Timers
1803 * we allocate the sigqueue structure from the timer_create. If this
1804 * allocation fails we are able to report the failure to the application
1805 * with an EAGAIN error.
1806 */
1807struct sigqueue *sigqueue_alloc(void)
1808{
1809 struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1810
1811 if (q)
1812 q->flags |= SIGQUEUE_PREALLOC;
1813
1814 return q;
1815}
1816
1817void sigqueue_free(struct sigqueue *q)
1818{
1819 unsigned long flags;
1820 spinlock_t *lock = ¤t->sighand->siglock;
1821
1822 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1823 /*
1824 * We must hold ->siglock while testing q->list
1825 * to serialize with collect_signal() or with
1826 * __exit_signal()->flush_sigqueue().
1827 */
1828 spin_lock_irqsave(lock, flags);
1829 q->flags &= ~SIGQUEUE_PREALLOC;
1830 /*
1831 * If it is queued it will be freed when dequeued,
1832 * like the "regular" sigqueue.
1833 */
1834 if (!list_empty(&q->list))
1835 q = NULL;
1836 spin_unlock_irqrestore(lock, flags);
1837
1838 if (q)
1839 __sigqueue_free(q);
1840}
1841
1842int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type)
1843{
1844 int sig = q->info.si_signo;
1845 struct sigpending *pending;
1846 struct task_struct *t;
1847 unsigned long flags;
1848 int ret, result;
1849
1850 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1851
1852 ret = -1;
1853 rcu_read_lock();
1854 t = pid_task(pid, type);
1855 if (!t || !likely(lock_task_sighand(t, &flags)))
1856 goto ret;
1857
1858 ret = 1; /* the signal is ignored */
1859 result = TRACE_SIGNAL_IGNORED;
1860 if (!prepare_signal(sig, t, false))
1861 goto out;
1862
1863 ret = 0;
1864 if (unlikely(!list_empty(&q->list))) {
1865 /*
1866 * If an SI_TIMER entry is already queue just increment
1867 * the overrun count.
1868 */
1869 BUG_ON(q->info.si_code != SI_TIMER);
1870 q->info.si_overrun++;
1871 result = TRACE_SIGNAL_ALREADY_PENDING;
1872 goto out;
1873 }
1874 q->info.si_overrun = 0;
1875
1876 signalfd_notify(t, sig);
1877 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1878 list_add_tail(&q->list, &pending->list);
1879 sigaddset(&pending->signal, sig);
1880 complete_signal(sig, t, type);
1881 result = TRACE_SIGNAL_DELIVERED;
1882out:
1883 trace_signal_generate(sig, &q->info, t, type != PIDTYPE_PID, result);
1884 unlock_task_sighand(t, &flags);
1885ret:
1886 rcu_read_unlock();
1887 return ret;
1888}
1889
1890static void do_notify_pidfd(struct task_struct *task)
1891{
1892 struct pid *pid;
1893
1894 WARN_ON(task->exit_state == 0);
1895 pid = task_pid(task);
1896 wake_up_all(&pid->wait_pidfd);
1897}
1898
1899/*
1900 * Let a parent know about the death of a child.
1901 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1902 *
1903 * Returns true if our parent ignored us and so we've switched to
1904 * self-reaping.
1905 */
1906bool do_notify_parent(struct task_struct *tsk, int sig)
1907{
1908 struct kernel_siginfo info;
1909 unsigned long flags;
1910 struct sighand_struct *psig;
1911 bool autoreap = false;
1912 u64 utime, stime;
1913
1914 BUG_ON(sig == -1);
1915
1916 /* do_notify_parent_cldstop should have been called instead. */
1917 BUG_ON(task_is_stopped_or_traced(tsk));
1918
1919 BUG_ON(!tsk->ptrace &&
1920 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1921
1922 /* Wake up all pidfd waiters */
1923 do_notify_pidfd(tsk);
1924
1925 if (sig != SIGCHLD) {
1926 /*
1927 * This is only possible if parent == real_parent.
1928 * Check if it has changed security domain.
1929 */
1930 if (tsk->parent_exec_id != READ_ONCE(tsk->parent->self_exec_id))
1931 sig = SIGCHLD;
1932 }
1933
1934 clear_siginfo(&info);
1935 info.si_signo = sig;
1936 info.si_errno = 0;
1937 /*
1938 * We are under tasklist_lock here so our parent is tied to
1939 * us and cannot change.
1940 *
1941 * task_active_pid_ns will always return the same pid namespace
1942 * until a task passes through release_task.
1943 *
1944 * write_lock() currently calls preempt_disable() which is the
1945 * same as rcu_read_lock(), but according to Oleg, this is not
1946 * correct to rely on this
1947 */
1948 rcu_read_lock();
1949 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
1950 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
1951 task_uid(tsk));
1952 rcu_read_unlock();
1953
1954 task_cputime(tsk, &utime, &stime);
1955 info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
1956 info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
1957
1958 info.si_status = tsk->exit_code & 0x7f;
1959 if (tsk->exit_code & 0x80)
1960 info.si_code = CLD_DUMPED;
1961 else if (tsk->exit_code & 0x7f)
1962 info.si_code = CLD_KILLED;
1963 else {
1964 info.si_code = CLD_EXITED;
1965 info.si_status = tsk->exit_code >> 8;
1966 }
1967
1968 psig = tsk->parent->sighand;
1969 spin_lock_irqsave(&psig->siglock, flags);
1970 if (!tsk->ptrace && sig == SIGCHLD &&
1971 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1972 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1973 /*
1974 * We are exiting and our parent doesn't care. POSIX.1
1975 * defines special semantics for setting SIGCHLD to SIG_IGN
1976 * or setting the SA_NOCLDWAIT flag: we should be reaped
1977 * automatically and not left for our parent's wait4 call.
1978 * Rather than having the parent do it as a magic kind of
1979 * signal handler, we just set this to tell do_exit that we
1980 * can be cleaned up without becoming a zombie. Note that
1981 * we still call __wake_up_parent in this case, because a
1982 * blocked sys_wait4 might now return -ECHILD.
1983 *
1984 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1985 * is implementation-defined: we do (if you don't want
1986 * it, just use SIG_IGN instead).
1987 */
1988 autoreap = true;
1989 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1990 sig = 0;
1991 }
1992 /*
1993 * Send with __send_signal as si_pid and si_uid are in the
1994 * parent's namespaces.
1995 */
1996 if (valid_signal(sig) && sig)
1997 __send_signal(sig, &info, tsk->parent, PIDTYPE_TGID, false);
1998 __wake_up_parent(tsk, tsk->parent);
1999 spin_unlock_irqrestore(&psig->siglock, flags);
2000
2001 return autoreap;
2002}
2003
2004/**
2005 * do_notify_parent_cldstop - notify parent of stopped/continued state change
2006 * @tsk: task reporting the state change
2007 * @for_ptracer: the notification is for ptracer
2008 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
2009 *
2010 * Notify @tsk's parent that the stopped/continued state has changed. If
2011 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
2012 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
2013 *
2014 * CONTEXT:
2015 * Must be called with tasklist_lock at least read locked.
2016 */
2017static void do_notify_parent_cldstop(struct task_struct *tsk,
2018 bool for_ptracer, int why)
2019{
2020 struct kernel_siginfo info;
2021 unsigned long flags;
2022 struct task_struct *parent;
2023 struct sighand_struct *sighand;
2024 u64 utime, stime;
2025
2026 if (for_ptracer) {
2027 parent = tsk->parent;
2028 } else {
2029 tsk = tsk->group_leader;
2030 parent = tsk->real_parent;
2031 }
2032
2033 clear_siginfo(&info);
2034 info.si_signo = SIGCHLD;
2035 info.si_errno = 0;
2036 /*
2037 * see comment in do_notify_parent() about the following 4 lines
2038 */
2039 rcu_read_lock();
2040 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
2041 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
2042 rcu_read_unlock();
2043
2044 task_cputime(tsk, &utime, &stime);
2045 info.si_utime = nsec_to_clock_t(utime);
2046 info.si_stime = nsec_to_clock_t(stime);
2047
2048 info.si_code = why;
2049 switch (why) {
2050 case CLD_CONTINUED:
2051 info.si_status = SIGCONT;
2052 break;
2053 case CLD_STOPPED:
2054 info.si_status = tsk->signal->group_exit_code & 0x7f;
2055 break;
2056 case CLD_TRAPPED:
2057 info.si_status = tsk->exit_code & 0x7f;
2058 break;
2059 default:
2060 BUG();
2061 }
2062
2063 sighand = parent->sighand;
2064 spin_lock_irqsave(&sighand->siglock, flags);
2065 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
2066 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
2067 __group_send_sig_info(SIGCHLD, &info, parent);
2068 /*
2069 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
2070 */
2071 __wake_up_parent(tsk, parent);
2072 spin_unlock_irqrestore(&sighand->siglock, flags);
2073}
2074
2075static inline bool may_ptrace_stop(void)
2076{
2077 if (!likely(current->ptrace))
2078 return false;
2079 /*
2080 * Are we in the middle of do_coredump?
2081 * If so and our tracer is also part of the coredump stopping
2082 * is a deadlock situation, and pointless because our tracer
2083 * is dead so don't allow us to stop.
2084 * If SIGKILL was already sent before the caller unlocked
2085 * ->siglock we must see ->core_state != NULL. Otherwise it
2086 * is safe to enter schedule().
2087 *
2088 * This is almost outdated, a task with the pending SIGKILL can't
2089 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
2090 * after SIGKILL was already dequeued.
2091 */
2092 if (unlikely(current->mm->core_state) &&
2093 unlikely(current->mm == current->parent->mm))
2094 return false;
2095
2096 return true;
2097}
2098
2099/*
2100 * Return non-zero if there is a SIGKILL that should be waking us up.
2101 * Called with the siglock held.
2102 */
2103static bool sigkill_pending(struct task_struct *tsk)
2104{
2105 return sigismember(&tsk->pending.signal, SIGKILL) ||
2106 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
2107}
2108
2109/*
2110 * This must be called with current->sighand->siglock held.
2111 *
2112 * This should be the path for all ptrace stops.
2113 * We always set current->last_siginfo while stopped here.
2114 * That makes it a way to test a stopped process for
2115 * being ptrace-stopped vs being job-control-stopped.
2116 *
2117 * If we actually decide not to stop at all because the tracer
2118 * is gone, we keep current->exit_code unless clear_code.
2119 */
2120static void ptrace_stop(int exit_code, int why, int clear_code, kernel_siginfo_t *info)
2121 __releases(¤t->sighand->siglock)
2122 __acquires(¤t->sighand->siglock)
2123{
2124 bool gstop_done = false;
2125
2126 if (arch_ptrace_stop_needed(exit_code, info)) {
2127 /*
2128 * The arch code has something special to do before a
2129 * ptrace stop. This is allowed to block, e.g. for faults
2130 * on user stack pages. We can't keep the siglock while
2131 * calling arch_ptrace_stop, so we must release it now.
2132 * To preserve proper semantics, we must do this before
2133 * any signal bookkeeping like checking group_stop_count.
2134 * Meanwhile, a SIGKILL could come in before we retake the
2135 * siglock. That must prevent us from sleeping in TASK_TRACED.
2136 * So after regaining the lock, we must check for SIGKILL.
2137 */
2138 spin_unlock_irq(¤t->sighand->siglock);
2139 arch_ptrace_stop(exit_code, info);
2140 spin_lock_irq(¤t->sighand->siglock);
2141 if (sigkill_pending(current))
2142 return;
2143 }
2144
2145 set_special_state(TASK_TRACED);
2146
2147 /*
2148 * We're committing to trapping. TRACED should be visible before
2149 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
2150 * Also, transition to TRACED and updates to ->jobctl should be
2151 * atomic with respect to siglock and should be done after the arch
2152 * hook as siglock is released and regrabbed across it.
2153 *
2154 * TRACER TRACEE
2155 *
2156 * ptrace_attach()
2157 * [L] wait_on_bit(JOBCTL_TRAPPING) [S] set_special_state(TRACED)
2158 * do_wait()
2159 * set_current_state() smp_wmb();
2160 * ptrace_do_wait()
2161 * wait_task_stopped()
2162 * task_stopped_code()
2163 * [L] task_is_traced() [S] task_clear_jobctl_trapping();
2164 */
2165 smp_wmb();
2166
2167 current->last_siginfo = info;
2168 current->exit_code = exit_code;
2169
2170 /*
2171 * If @why is CLD_STOPPED, we're trapping to participate in a group
2172 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
2173 * across siglock relocks since INTERRUPT was scheduled, PENDING
2174 * could be clear now. We act as if SIGCONT is received after
2175 * TASK_TRACED is entered - ignore it.
2176 */
2177 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
2178 gstop_done = task_participate_group_stop(current);
2179
2180 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
2181 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
2182 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
2183 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
2184
2185 /* entering a trap, clear TRAPPING */
2186 task_clear_jobctl_trapping(current);
2187
2188 spin_unlock_irq(¤t->sighand->siglock);
2189 read_lock(&tasklist_lock);
2190 if (may_ptrace_stop()) {
2191 /*
2192 * Notify parents of the stop.
2193 *
2194 * While ptraced, there are two parents - the ptracer and
2195 * the real_parent of the group_leader. The ptracer should
2196 * know about every stop while the real parent is only
2197 * interested in the completion of group stop. The states
2198 * for the two don't interact with each other. Notify
2199 * separately unless they're gonna be duplicates.
2200 */
2201 do_notify_parent_cldstop(current, true, why);
2202 if (gstop_done && ptrace_reparented(current))
2203 do_notify_parent_cldstop(current, false, why);
2204
2205 /*
2206 * Don't want to allow preemption here, because
2207 * sys_ptrace() needs this task to be inactive.
2208 *
2209 * XXX: implement read_unlock_no_resched().
2210 */
2211 preempt_disable();
2212 read_unlock(&tasklist_lock);
2213 cgroup_enter_frozen();
2214 preempt_enable_no_resched();
2215 freezable_schedule();
2216 cgroup_leave_frozen(true);
2217 } else {
2218 /*
2219 * By the time we got the lock, our tracer went away.
2220 * Don't drop the lock yet, another tracer may come.
2221 *
2222 * If @gstop_done, the ptracer went away between group stop
2223 * completion and here. During detach, it would have set
2224 * JOBCTL_STOP_PENDING on us and we'll re-enter
2225 * TASK_STOPPED in do_signal_stop() on return, so notifying
2226 * the real parent of the group stop completion is enough.
2227 */
2228 if (gstop_done)
2229 do_notify_parent_cldstop(current, false, why);
2230
2231 /* tasklist protects us from ptrace_freeze_traced() */
2232 __set_current_state(TASK_RUNNING);
2233 if (clear_code)
2234 current->exit_code = 0;
2235 read_unlock(&tasklist_lock);
2236 }
2237
2238 /*
2239 * We are back. Now reacquire the siglock before touching
2240 * last_siginfo, so that we are sure to have synchronized with
2241 * any signal-sending on another CPU that wants to examine it.
2242 */
2243 spin_lock_irq(¤t->sighand->siglock);
2244 current->last_siginfo = NULL;
2245
2246 /* LISTENING can be set only during STOP traps, clear it */
2247 current->jobctl &= ~JOBCTL_LISTENING;
2248
2249 /*
2250 * Queued signals ignored us while we were stopped for tracing.
2251 * So check for any that we should take before resuming user mode.
2252 * This sets TIF_SIGPENDING, but never clears it.
2253 */
2254 recalc_sigpending_tsk(current);
2255}
2256
2257static void ptrace_do_notify(int signr, int exit_code, int why)
2258{
2259 kernel_siginfo_t info;
2260
2261 clear_siginfo(&info);
2262 info.si_signo = signr;
2263 info.si_code = exit_code;
2264 info.si_pid = task_pid_vnr(current);
2265 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2266
2267 /* Let the debugger run. */
2268 ptrace_stop(exit_code, why, 1, &info);
2269}
2270
2271void ptrace_notify(int exit_code)
2272{
2273 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
2274 if (unlikely(current->task_works))
2275 task_work_run();
2276
2277 spin_lock_irq(¤t->sighand->siglock);
2278 ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
2279 spin_unlock_irq(¤t->sighand->siglock);
2280}
2281
2282/**
2283 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
2284 * @signr: signr causing group stop if initiating
2285 *
2286 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
2287 * and participate in it. If already set, participate in the existing
2288 * group stop. If participated in a group stop (and thus slept), %true is
2289 * returned with siglock released.
2290 *
2291 * If ptraced, this function doesn't handle stop itself. Instead,
2292 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2293 * untouched. The caller must ensure that INTERRUPT trap handling takes
2294 * places afterwards.
2295 *
2296 * CONTEXT:
2297 * Must be called with @current->sighand->siglock held, which is released
2298 * on %true return.
2299 *
2300 * RETURNS:
2301 * %false if group stop is already cancelled or ptrace trap is scheduled.
2302 * %true if participated in group stop.
2303 */
2304static bool do_signal_stop(int signr)
2305 __releases(¤t->sighand->siglock)
2306{
2307 struct signal_struct *sig = current->signal;
2308
2309 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2310 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2311 struct task_struct *t;
2312
2313 /* signr will be recorded in task->jobctl for retries */
2314 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2315
2316 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2317 unlikely(signal_group_exit(sig)))
2318 return false;
2319 /*
2320 * There is no group stop already in progress. We must
2321 * initiate one now.
2322 *
2323 * While ptraced, a task may be resumed while group stop is
2324 * still in effect and then receive a stop signal and
2325 * initiate another group stop. This deviates from the
2326 * usual behavior as two consecutive stop signals can't
2327 * cause two group stops when !ptraced. That is why we
2328 * also check !task_is_stopped(t) below.
2329 *
2330 * The condition can be distinguished by testing whether
2331 * SIGNAL_STOP_STOPPED is already set. Don't generate
2332 * group_exit_code in such case.
2333 *
2334 * This is not necessary for SIGNAL_STOP_CONTINUED because
2335 * an intervening stop signal is required to cause two
2336 * continued events regardless of ptrace.
2337 */
2338 if (!(sig->flags & SIGNAL_STOP_STOPPED))
2339 sig->group_exit_code = signr;
2340
2341 sig->group_stop_count = 0;
2342
2343 if (task_set_jobctl_pending(current, signr | gstop))
2344 sig->group_stop_count++;
2345
2346 t = current;
2347 while_each_thread(current, t) {
2348 /*
2349 * Setting state to TASK_STOPPED for a group
2350 * stop is always done with the siglock held,
2351 * so this check has no races.
2352 */
2353 if (!task_is_stopped(t) &&
2354 task_set_jobctl_pending(t, signr | gstop)) {
2355 sig->group_stop_count++;
2356 if (likely(!(t->ptrace & PT_SEIZED)))
2357 signal_wake_up(t, 0);
2358 else
2359 ptrace_trap_notify(t);
2360 }
2361 }
2362 }
2363
2364 if (likely(!current->ptrace)) {
2365 int notify = 0;
2366
2367 /*
2368 * If there are no other threads in the group, or if there
2369 * is a group stop in progress and we are the last to stop,
2370 * report to the parent.
2371 */
2372 if (task_participate_group_stop(current))
2373 notify = CLD_STOPPED;
2374
2375 set_special_state(TASK_STOPPED);
2376 spin_unlock_irq(¤t->sighand->siglock);
2377
2378 /*
2379 * Notify the parent of the group stop completion. Because
2380 * we're not holding either the siglock or tasklist_lock
2381 * here, ptracer may attach inbetween; however, this is for
2382 * group stop and should always be delivered to the real
2383 * parent of the group leader. The new ptracer will get
2384 * its notification when this task transitions into
2385 * TASK_TRACED.
2386 */
2387 if (notify) {
2388 read_lock(&tasklist_lock);
2389 do_notify_parent_cldstop(current, false, notify);
2390 read_unlock(&tasklist_lock);
2391 }
2392
2393 /* Now we don't run again until woken by SIGCONT or SIGKILL */
2394 cgroup_enter_frozen();
2395 freezable_schedule();
2396 return true;
2397 } else {
2398 /*
2399 * While ptraced, group stop is handled by STOP trap.
2400 * Schedule it and let the caller deal with it.
2401 */
2402 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2403 return false;
2404 }
2405}
2406
2407/**
2408 * do_jobctl_trap - take care of ptrace jobctl traps
2409 *
2410 * When PT_SEIZED, it's used for both group stop and explicit
2411 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
2412 * accompanying siginfo. If stopped, lower eight bits of exit_code contain
2413 * the stop signal; otherwise, %SIGTRAP.
2414 *
2415 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2416 * number as exit_code and no siginfo.
2417 *
2418 * CONTEXT:
2419 * Must be called with @current->sighand->siglock held, which may be
2420 * released and re-acquired before returning with intervening sleep.
2421 */
2422static void do_jobctl_trap(void)
2423{
2424 struct signal_struct *signal = current->signal;
2425 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2426
2427 if (current->ptrace & PT_SEIZED) {
2428 if (!signal->group_stop_count &&
2429 !(signal->flags & SIGNAL_STOP_STOPPED))
2430 signr = SIGTRAP;
2431 WARN_ON_ONCE(!signr);
2432 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2433 CLD_STOPPED);
2434 } else {
2435 WARN_ON_ONCE(!signr);
2436 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2437 current->exit_code = 0;
2438 }
2439}
2440
2441/**
2442 * do_freezer_trap - handle the freezer jobctl trap
2443 *
2444 * Puts the task into frozen state, if only the task is not about to quit.
2445 * In this case it drops JOBCTL_TRAP_FREEZE.
2446 *
2447 * CONTEXT:
2448 * Must be called with @current->sighand->siglock held,
2449 * which is always released before returning.
2450 */
2451static void do_freezer_trap(void)
2452 __releases(¤t->sighand->siglock)
2453{
2454 /*
2455 * If there are other trap bits pending except JOBCTL_TRAP_FREEZE,
2456 * let's make another loop to give it a chance to be handled.
2457 * In any case, we'll return back.
2458 */
2459 if ((current->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) !=
2460 JOBCTL_TRAP_FREEZE) {
2461 spin_unlock_irq(¤t->sighand->siglock);
2462 return;
2463 }
2464
2465 /*
2466 * Now we're sure that there is no pending fatal signal and no
2467 * pending traps. Clear TIF_SIGPENDING to not get out of schedule()
2468 * immediately (if there is a non-fatal signal pending), and
2469 * put the task into sleep.
2470 */
2471 __set_current_state(TASK_INTERRUPTIBLE);
2472 clear_thread_flag(TIF_SIGPENDING);
2473 spin_unlock_irq(¤t->sighand->siglock);
2474 cgroup_enter_frozen();
2475 freezable_schedule();
2476}
2477
2478static int ptrace_signal(int signr, kernel_siginfo_t *info)
2479{
2480 /*
2481 * We do not check sig_kernel_stop(signr) but set this marker
2482 * unconditionally because we do not know whether debugger will
2483 * change signr. This flag has no meaning unless we are going
2484 * to stop after return from ptrace_stop(). In this case it will
2485 * be checked in do_signal_stop(), we should only stop if it was
2486 * not cleared by SIGCONT while we were sleeping. See also the
2487 * comment in dequeue_signal().
2488 */
2489 current->jobctl |= JOBCTL_STOP_DEQUEUED;
2490 ptrace_stop(signr, CLD_TRAPPED, 0, info);
2491
2492 /* We're back. Did the debugger cancel the sig? */
2493 signr = current->exit_code;
2494 if (signr == 0)
2495 return signr;
2496
2497 current->exit_code = 0;
2498
2499 /*
2500 * Update the siginfo structure if the signal has
2501 * changed. If the debugger wanted something
2502 * specific in the siginfo structure then it should
2503 * have updated *info via PTRACE_SETSIGINFO.
2504 */
2505 if (signr != info->si_signo) {
2506 clear_siginfo(info);
2507 info->si_signo = signr;
2508 info->si_errno = 0;
2509 info->si_code = SI_USER;
2510 rcu_read_lock();
2511 info->si_pid = task_pid_vnr(current->parent);
2512 info->si_uid = from_kuid_munged(current_user_ns(),
2513 task_uid(current->parent));
2514 rcu_read_unlock();
2515 }
2516
2517 /* If the (new) signal is now blocked, requeue it. */
2518 if (sigismember(¤t->blocked, signr)) {
2519 send_signal(signr, info, current, PIDTYPE_PID);
2520 signr = 0;
2521 }
2522
2523 return signr;
2524}
2525
2526bool get_signal(struct ksignal *ksig)
2527{
2528 struct sighand_struct *sighand = current->sighand;
2529 struct signal_struct *signal = current->signal;
2530 int signr;
2531
2532 if (unlikely(uprobe_deny_signal()))
2533 return false;
2534
2535 /*
2536 * Do this once, we can't return to user-mode if freezing() == T.
2537 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2538 * thus do not need another check after return.
2539 */
2540 try_to_freeze();
2541
2542relock:
2543 spin_lock_irq(&sighand->siglock);
2544 /*
2545 * Make sure we can safely read ->jobctl() in task_work add. As Oleg
2546 * states:
2547 *
2548 * It pairs with mb (implied by cmpxchg) before READ_ONCE. So we
2549 * roughly have
2550 *
2551 * task_work_add: get_signal:
2552 * STORE(task->task_works, new_work); STORE(task->jobctl);
2553 * mb(); mb();
2554 * LOAD(task->jobctl); LOAD(task->task_works);
2555 *
2556 * and we can rely on STORE-MB-LOAD [ in task_work_add].
2557 */
2558 smp_store_mb(current->jobctl, current->jobctl & ~JOBCTL_TASK_WORK);
2559 if (unlikely(current->task_works)) {
2560 spin_unlock_irq(&sighand->siglock);
2561 task_work_run();
2562 goto relock;
2563 }
2564
2565 /*
2566 * Every stopped thread goes here after wakeup. Check to see if
2567 * we should notify the parent, prepare_signal(SIGCONT) encodes
2568 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2569 */
2570 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2571 int why;
2572
2573 if (signal->flags & SIGNAL_CLD_CONTINUED)
2574 why = CLD_CONTINUED;
2575 else
2576 why = CLD_STOPPED;
2577
2578 signal->flags &= ~SIGNAL_CLD_MASK;
2579
2580 spin_unlock_irq(&sighand->siglock);
2581
2582 /*
2583 * Notify the parent that we're continuing. This event is
2584 * always per-process and doesn't make whole lot of sense
2585 * for ptracers, who shouldn't consume the state via
2586 * wait(2) either, but, for backward compatibility, notify
2587 * the ptracer of the group leader too unless it's gonna be
2588 * a duplicate.
2589 */
2590 read_lock(&tasklist_lock);
2591 do_notify_parent_cldstop(current, false, why);
2592
2593 if (ptrace_reparented(current->group_leader))
2594 do_notify_parent_cldstop(current->group_leader,
2595 true, why);
2596 read_unlock(&tasklist_lock);
2597
2598 goto relock;
2599 }
2600
2601 /* Has this task already been marked for death? */
2602 if (signal_group_exit(signal)) {
2603 ksig->info.si_signo = signr = SIGKILL;
2604 sigdelset(¤t->pending.signal, SIGKILL);
2605 trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
2606 &sighand->action[SIGKILL - 1]);
2607 recalc_sigpending();
2608 goto fatal;
2609 }
2610
2611 for (;;) {
2612 struct k_sigaction *ka;
2613
2614 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2615 do_signal_stop(0))
2616 goto relock;
2617
2618 if (unlikely(current->jobctl &
2619 (JOBCTL_TRAP_MASK | JOBCTL_TRAP_FREEZE))) {
2620 if (current->jobctl & JOBCTL_TRAP_MASK) {
2621 do_jobctl_trap();
2622 spin_unlock_irq(&sighand->siglock);
2623 } else if (current->jobctl & JOBCTL_TRAP_FREEZE)
2624 do_freezer_trap();
2625
2626 goto relock;
2627 }
2628
2629 /*
2630 * If the task is leaving the frozen state, let's update
2631 * cgroup counters and reset the frozen bit.
2632 */
2633 if (unlikely(cgroup_task_frozen(current))) {
2634 spin_unlock_irq(&sighand->siglock);
2635 cgroup_leave_frozen(false);
2636 goto relock;
2637 }
2638
2639 /*
2640 * Signals generated by the execution of an instruction
2641 * need to be delivered before any other pending signals
2642 * so that the instruction pointer in the signal stack
2643 * frame points to the faulting instruction.
2644 */
2645 signr = dequeue_synchronous_signal(&ksig->info);
2646 if (!signr)
2647 signr = dequeue_signal(current, ¤t->blocked, &ksig->info);
2648
2649 if (!signr)
2650 break; /* will return 0 */
2651
2652 if (unlikely(current->ptrace) && signr != SIGKILL) {
2653 signr = ptrace_signal(signr, &ksig->info);
2654 if (!signr)
2655 continue;
2656 }
2657
2658 ka = &sighand->action[signr-1];
2659
2660 /* Trace actually delivered signals. */
2661 trace_signal_deliver(signr, &ksig->info, ka);
2662
2663 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
2664 continue;
2665 if (ka->sa.sa_handler != SIG_DFL) {
2666 /* Run the handler. */
2667 ksig->ka = *ka;
2668
2669 if (ka->sa.sa_flags & SA_ONESHOT)
2670 ka->sa.sa_handler = SIG_DFL;
2671
2672 break; /* will return non-zero "signr" value */
2673 }
2674
2675 /*
2676 * Now we are doing the default action for this signal.
2677 */
2678 if (sig_kernel_ignore(signr)) /* Default is nothing. */
2679 continue;
2680
2681 /*
2682 * Global init gets no signals it doesn't want.
2683 * Container-init gets no signals it doesn't want from same
2684 * container.
2685 *
2686 * Note that if global/container-init sees a sig_kernel_only()
2687 * signal here, the signal must have been generated internally
2688 * or must have come from an ancestor namespace. In either
2689 * case, the signal cannot be dropped.
2690 */
2691 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2692 !sig_kernel_only(signr))
2693 continue;
2694
2695 if (sig_kernel_stop(signr)) {
2696 /*
2697 * The default action is to stop all threads in
2698 * the thread group. The job control signals
2699 * do nothing in an orphaned pgrp, but SIGSTOP
2700 * always works. Note that siglock needs to be
2701 * dropped during the call to is_orphaned_pgrp()
2702 * because of lock ordering with tasklist_lock.
2703 * This allows an intervening SIGCONT to be posted.
2704 * We need to check for that and bail out if necessary.
2705 */
2706 if (signr != SIGSTOP) {
2707 spin_unlock_irq(&sighand->siglock);
2708
2709 /* signals can be posted during this window */
2710
2711 if (is_current_pgrp_orphaned())
2712 goto relock;
2713
2714 spin_lock_irq(&sighand->siglock);
2715 }
2716
2717 if (likely(do_signal_stop(ksig->info.si_signo))) {
2718 /* It released the siglock. */
2719 goto relock;
2720 }
2721
2722 /*
2723 * We didn't actually stop, due to a race
2724 * with SIGCONT or something like that.
2725 */
2726 continue;
2727 }
2728
2729 fatal:
2730 spin_unlock_irq(&sighand->siglock);
2731 if (unlikely(cgroup_task_frozen(current)))
2732 cgroup_leave_frozen(true);
2733
2734 /*
2735 * Anything else is fatal, maybe with a core dump.
2736 */
2737 current->flags |= PF_SIGNALED;
2738
2739 if (sig_kernel_coredump(signr)) {
2740 if (print_fatal_signals)
2741 print_fatal_signal(ksig->info.si_signo);
2742 proc_coredump_connector(current);
2743 /*
2744 * If it was able to dump core, this kills all
2745 * other threads in the group and synchronizes with
2746 * their demise. If we lost the race with another
2747 * thread getting here, it set group_exit_code
2748 * first and our do_group_exit call below will use
2749 * that value and ignore the one we pass it.
2750 */
2751 do_coredump(&ksig->info);
2752 }
2753
2754 /*
2755 * Death signals, no core dump.
2756 */
2757 do_group_exit(ksig->info.si_signo);
2758 /* NOTREACHED */
2759 }
2760 spin_unlock_irq(&sighand->siglock);
2761
2762 ksig->sig = signr;
2763 return ksig->sig > 0;
2764}
2765
2766/**
2767 * signal_delivered -
2768 * @ksig: kernel signal struct
2769 * @stepping: nonzero if debugger single-step or block-step in use
2770 *
2771 * This function should be called when a signal has successfully been
2772 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
2773 * is always blocked, and the signal itself is blocked unless %SA_NODEFER
2774 * is set in @ksig->ka.sa.sa_flags. Tracing is notified.
2775 */
2776static void signal_delivered(struct ksignal *ksig, int stepping)
2777{
2778 sigset_t blocked;
2779
2780 /* A signal was successfully delivered, and the
2781 saved sigmask was stored on the signal frame,
2782 and will be restored by sigreturn. So we can
2783 simply clear the restore sigmask flag. */
2784 clear_restore_sigmask();
2785
2786 sigorsets(&blocked, ¤t->blocked, &ksig->ka.sa.sa_mask);
2787 if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2788 sigaddset(&blocked, ksig->sig);
2789 set_current_blocked(&blocked);
2790 tracehook_signal_handler(stepping);
2791}
2792
2793void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2794{
2795 if (failed)
2796 force_sigsegv(ksig->sig);
2797 else
2798 signal_delivered(ksig, stepping);
2799}
2800
2801/*
2802 * It could be that complete_signal() picked us to notify about the
2803 * group-wide signal. Other threads should be notified now to take
2804 * the shared signals in @which since we will not.
2805 */
2806static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2807{
2808 sigset_t retarget;
2809 struct task_struct *t;
2810
2811 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2812 if (sigisemptyset(&retarget))
2813 return;
2814
2815 t = tsk;
2816 while_each_thread(tsk, t) {
2817 if (t->flags & PF_EXITING)
2818 continue;
2819
2820 if (!has_pending_signals(&retarget, &t->blocked))
2821 continue;
2822 /* Remove the signals this thread can handle. */
2823 sigandsets(&retarget, &retarget, &t->blocked);
2824
2825 if (!signal_pending(t))
2826 signal_wake_up(t, 0);
2827
2828 if (sigisemptyset(&retarget))
2829 break;
2830 }
2831}
2832
2833void exit_signals(struct task_struct *tsk)
2834{
2835 int group_stop = 0;
2836 sigset_t unblocked;
2837
2838 /*
2839 * @tsk is about to have PF_EXITING set - lock out users which
2840 * expect stable threadgroup.
2841 */
2842 cgroup_threadgroup_change_begin(tsk);
2843
2844 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2845 tsk->flags |= PF_EXITING;
2846 cgroup_threadgroup_change_end(tsk);
2847 return;
2848 }
2849
2850 spin_lock_irq(&tsk->sighand->siglock);
2851 /*
2852 * From now this task is not visible for group-wide signals,
2853 * see wants_signal(), do_signal_stop().
2854 */
2855 tsk->flags |= PF_EXITING;
2856
2857 cgroup_threadgroup_change_end(tsk);
2858
2859 if (!signal_pending(tsk))
2860 goto out;
2861
2862 unblocked = tsk->blocked;
2863 signotset(&unblocked);
2864 retarget_shared_pending(tsk, &unblocked);
2865
2866 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2867 task_participate_group_stop(tsk))
2868 group_stop = CLD_STOPPED;
2869out:
2870 spin_unlock_irq(&tsk->sighand->siglock);
2871
2872 /*
2873 * If group stop has completed, deliver the notification. This
2874 * should always go to the real parent of the group leader.
2875 */
2876 if (unlikely(group_stop)) {
2877 read_lock(&tasklist_lock);
2878 do_notify_parent_cldstop(tsk, false, group_stop);
2879 read_unlock(&tasklist_lock);
2880 }
2881}
2882
2883/*
2884 * System call entry points.
2885 */
2886
2887/**
2888 * sys_restart_syscall - restart a system call
2889 */
2890SYSCALL_DEFINE0(restart_syscall)
2891{
2892 struct restart_block *restart = ¤t->restart_block;
2893 return restart->fn(restart);
2894}
2895
2896long do_no_restart_syscall(struct restart_block *param)
2897{
2898 return -EINTR;
2899}
2900
2901static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2902{
2903 if (signal_pending(tsk) && !thread_group_empty(tsk)) {
2904 sigset_t newblocked;
2905 /* A set of now blocked but previously unblocked signals. */
2906 sigandnsets(&newblocked, newset, ¤t->blocked);
2907 retarget_shared_pending(tsk, &newblocked);
2908 }
2909 tsk->blocked = *newset;
2910 recalc_sigpending();
2911}
2912
2913/**
2914 * set_current_blocked - change current->blocked mask
2915 * @newset: new mask
2916 *
2917 * It is wrong to change ->blocked directly, this helper should be used
2918 * to ensure the process can't miss a shared signal we are going to block.
2919 */
2920void set_current_blocked(sigset_t *newset)
2921{
2922 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
2923 __set_current_blocked(newset);
2924}
2925
2926void __set_current_blocked(const sigset_t *newset)
2927{
2928 struct task_struct *tsk = current;
2929
2930 /*
2931 * In case the signal mask hasn't changed, there is nothing we need
2932 * to do. The current->blocked shouldn't be modified by other task.
2933 */
2934 if (sigequalsets(&tsk->blocked, newset))
2935 return;
2936
2937 spin_lock_irq(&tsk->sighand->siglock);
2938 __set_task_blocked(tsk, newset);
2939 spin_unlock_irq(&tsk->sighand->siglock);
2940}
2941
2942/*
2943 * This is also useful for kernel threads that want to temporarily
2944 * (or permanently) block certain signals.
2945 *
2946 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2947 * interface happily blocks "unblockable" signals like SIGKILL
2948 * and friends.
2949 */
2950int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2951{
2952 struct task_struct *tsk = current;
2953 sigset_t newset;
2954
2955 /* Lockless, only current can change ->blocked, never from irq */
2956 if (oldset)
2957 *oldset = tsk->blocked;
2958
2959 switch (how) {
2960 case SIG_BLOCK:
2961 sigorsets(&newset, &tsk->blocked, set);
2962 break;
2963 case SIG_UNBLOCK:
2964 sigandnsets(&newset, &tsk->blocked, set);
2965 break;
2966 case SIG_SETMASK:
2967 newset = *set;
2968 break;
2969 default:
2970 return -EINVAL;
2971 }
2972
2973 __set_current_blocked(&newset);
2974 return 0;
2975}
2976EXPORT_SYMBOL(sigprocmask);
2977
2978/*
2979 * The api helps set app-provided sigmasks.
2980 *
2981 * This is useful for syscalls such as ppoll, pselect, io_pgetevents and
2982 * epoll_pwait where a new sigmask is passed from userland for the syscalls.
2983 *
2984 * Note that it does set_restore_sigmask() in advance, so it must be always
2985 * paired with restore_saved_sigmask_unless() before return from syscall.
2986 */
2987int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize)
2988{
2989 sigset_t kmask;
2990
2991 if (!umask)
2992 return 0;
2993 if (sigsetsize != sizeof(sigset_t))
2994 return -EINVAL;
2995 if (copy_from_user(&kmask, umask, sizeof(sigset_t)))
2996 return -EFAULT;
2997
2998 set_restore_sigmask();
2999 current->saved_sigmask = current->blocked;
3000 set_current_blocked(&kmask);
3001
3002 return 0;
3003}
3004
3005#ifdef CONFIG_COMPAT
3006int set_compat_user_sigmask(const compat_sigset_t __user *umask,
3007 size_t sigsetsize)
3008{
3009 sigset_t kmask;
3010
3011 if (!umask)
3012 return 0;
3013 if (sigsetsize != sizeof(compat_sigset_t))
3014 return -EINVAL;
3015 if (get_compat_sigset(&kmask, umask))
3016 return -EFAULT;
3017
3018 set_restore_sigmask();
3019 current->saved_sigmask = current->blocked;
3020 set_current_blocked(&kmask);
3021
3022 return 0;
3023}
3024#endif
3025
3026/**
3027 * sys_rt_sigprocmask - change the list of currently blocked signals
3028 * @how: whether to add, remove, or set signals
3029 * @nset: stores pending signals
3030 * @oset: previous value of signal mask if non-null
3031 * @sigsetsize: size of sigset_t type
3032 */
3033SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
3034 sigset_t __user *, oset, size_t, sigsetsize)
3035{
3036 sigset_t old_set, new_set;
3037 int error;
3038
3039 /* XXX: Don't preclude handling different sized sigset_t's. */
3040 if (sigsetsize != sizeof(sigset_t))
3041 return -EINVAL;
3042
3043 old_set = current->blocked;
3044
3045 if (nset) {
3046 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
3047 return -EFAULT;
3048 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3049
3050 error = sigprocmask(how, &new_set, NULL);
3051 if (error)
3052 return error;
3053 }
3054
3055 if (oset) {
3056 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
3057 return -EFAULT;
3058 }
3059
3060 return 0;
3061}
3062
3063#ifdef CONFIG_COMPAT
3064COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
3065 compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
3066{
3067 sigset_t old_set = current->blocked;
3068
3069 /* XXX: Don't preclude handling different sized sigset_t's. */
3070 if (sigsetsize != sizeof(sigset_t))
3071 return -EINVAL;
3072
3073 if (nset) {
3074 sigset_t new_set;
3075 int error;
3076 if (get_compat_sigset(&new_set, nset))
3077 return -EFAULT;
3078 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3079
3080 error = sigprocmask(how, &new_set, NULL);
3081 if (error)
3082 return error;
3083 }
3084 return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0;
3085}
3086#endif
3087
3088static void do_sigpending(sigset_t *set)
3089{
3090 spin_lock_irq(¤t->sighand->siglock);
3091 sigorsets(set, ¤t->pending.signal,
3092 ¤t->signal->shared_pending.signal);
3093 spin_unlock_irq(¤t->sighand->siglock);
3094
3095 /* Outside the lock because only this thread touches it. */
3096 sigandsets(set, ¤t->blocked, set);
3097}
3098
3099/**
3100 * sys_rt_sigpending - examine a pending signal that has been raised
3101 * while blocked
3102 * @uset: stores pending signals
3103 * @sigsetsize: size of sigset_t type or larger
3104 */
3105SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
3106{
3107 sigset_t set;
3108
3109 if (sigsetsize > sizeof(*uset))
3110 return -EINVAL;
3111
3112 do_sigpending(&set);
3113
3114 if (copy_to_user(uset, &set, sigsetsize))
3115 return -EFAULT;
3116
3117 return 0;
3118}
3119
3120#ifdef CONFIG_COMPAT
3121COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
3122 compat_size_t, sigsetsize)
3123{
3124 sigset_t set;
3125
3126 if (sigsetsize > sizeof(*uset))
3127 return -EINVAL;
3128
3129 do_sigpending(&set);
3130
3131 return put_compat_sigset(uset, &set, sigsetsize);
3132}
3133#endif
3134
3135static const struct {
3136 unsigned char limit, layout;
3137} sig_sicodes[] = {
3138 [SIGILL] = { NSIGILL, SIL_FAULT },
3139 [SIGFPE] = { NSIGFPE, SIL_FAULT },
3140 [SIGSEGV] = { NSIGSEGV, SIL_FAULT },
3141 [SIGBUS] = { NSIGBUS, SIL_FAULT },
3142 [SIGTRAP] = { NSIGTRAP, SIL_FAULT },
3143#if defined(SIGEMT)
3144 [SIGEMT] = { NSIGEMT, SIL_FAULT },
3145#endif
3146 [SIGCHLD] = { NSIGCHLD, SIL_CHLD },
3147 [SIGPOLL] = { NSIGPOLL, SIL_POLL },
3148 [SIGSYS] = { NSIGSYS, SIL_SYS },
3149};
3150
3151static bool known_siginfo_layout(unsigned sig, int si_code)
3152{
3153 if (si_code == SI_KERNEL)
3154 return true;
3155 else if ((si_code > SI_USER)) {
3156 if (sig_specific_sicodes(sig)) {
3157 if (si_code <= sig_sicodes[sig].limit)
3158 return true;
3159 }
3160 else if (si_code <= NSIGPOLL)
3161 return true;
3162 }
3163 else if (si_code >= SI_DETHREAD)
3164 return true;
3165 else if (si_code == SI_ASYNCNL)
3166 return true;
3167 return false;
3168}
3169
3170enum siginfo_layout siginfo_layout(unsigned sig, int si_code)
3171{
3172 enum siginfo_layout layout = SIL_KILL;
3173 if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
3174 if ((sig < ARRAY_SIZE(sig_sicodes)) &&
3175 (si_code <= sig_sicodes[sig].limit)) {
3176 layout = sig_sicodes[sig].layout;
3177 /* Handle the exceptions */
3178 if ((sig == SIGBUS) &&
3179 (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO))
3180 layout = SIL_FAULT_MCEERR;
3181 else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR))
3182 layout = SIL_FAULT_BNDERR;
3183#ifdef SEGV_PKUERR
3184 else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR))
3185 layout = SIL_FAULT_PKUERR;
3186#endif
3187 }
3188 else if (si_code <= NSIGPOLL)
3189 layout = SIL_POLL;
3190 } else {
3191 if (si_code == SI_TIMER)
3192 layout = SIL_TIMER;
3193 else if (si_code == SI_SIGIO)
3194 layout = SIL_POLL;
3195 else if (si_code < 0)
3196 layout = SIL_RT;
3197 }
3198 return layout;
3199}
3200
3201static inline char __user *si_expansion(const siginfo_t __user *info)
3202{
3203 return ((char __user *)info) + sizeof(struct kernel_siginfo);
3204}
3205
3206int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from)
3207{
3208 char __user *expansion = si_expansion(to);
3209 if (copy_to_user(to, from , sizeof(struct kernel_siginfo)))
3210 return -EFAULT;
3211 if (clear_user(expansion, SI_EXPANSION_SIZE))
3212 return -EFAULT;
3213 return 0;
3214}
3215
3216static int post_copy_siginfo_from_user(kernel_siginfo_t *info,
3217 const siginfo_t __user *from)
3218{
3219 if (unlikely(!known_siginfo_layout(info->si_signo, info->si_code))) {
3220 char __user *expansion = si_expansion(from);
3221 char buf[SI_EXPANSION_SIZE];
3222 int i;
3223 /*
3224 * An unknown si_code might need more than
3225 * sizeof(struct kernel_siginfo) bytes. Verify all of the
3226 * extra bytes are 0. This guarantees copy_siginfo_to_user
3227 * will return this data to userspace exactly.
3228 */
3229 if (copy_from_user(&buf, expansion, SI_EXPANSION_SIZE))
3230 return -EFAULT;
3231 for (i = 0; i < SI_EXPANSION_SIZE; i++) {
3232 if (buf[i] != 0)
3233 return -E2BIG;
3234 }
3235 }
3236 return 0;
3237}
3238
3239static int __copy_siginfo_from_user(int signo, kernel_siginfo_t *to,
3240 const siginfo_t __user *from)
3241{
3242 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3243 return -EFAULT;
3244 to->si_signo = signo;
3245 return post_copy_siginfo_from_user(to, from);
3246}
3247
3248int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from)
3249{
3250 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3251 return -EFAULT;
3252 return post_copy_siginfo_from_user(to, from);
3253}
3254
3255#ifdef CONFIG_COMPAT
3256/**
3257 * copy_siginfo_to_external32 - copy a kernel siginfo into a compat user siginfo
3258 * @to: compat siginfo destination
3259 * @from: kernel siginfo source
3260 *
3261 * Note: This function does not work properly for the SIGCHLD on x32, but
3262 * fortunately it doesn't have to. The only valid callers for this function are
3263 * copy_siginfo_to_user32, which is overriden for x32 and the coredump code.
3264 * The latter does not care because SIGCHLD will never cause a coredump.
3265 */
3266void copy_siginfo_to_external32(struct compat_siginfo *to,
3267 const struct kernel_siginfo *from)
3268{
3269 memset(to, 0, sizeof(*to));
3270
3271 to->si_signo = from->si_signo;
3272 to->si_errno = from->si_errno;
3273 to->si_code = from->si_code;
3274 switch(siginfo_layout(from->si_signo, from->si_code)) {
3275 case SIL_KILL:
3276 to->si_pid = from->si_pid;
3277 to->si_uid = from->si_uid;
3278 break;
3279 case SIL_TIMER:
3280 to->si_tid = from->si_tid;
3281 to->si_overrun = from->si_overrun;
3282 to->si_int = from->si_int;
3283 break;
3284 case SIL_POLL:
3285 to->si_band = from->si_band;
3286 to->si_fd = from->si_fd;
3287 break;
3288 case SIL_FAULT:
3289 to->si_addr = ptr_to_compat(from->si_addr);
3290#ifdef __ARCH_SI_TRAPNO
3291 to->si_trapno = from->si_trapno;
3292#endif
3293 break;
3294 case SIL_FAULT_MCEERR:
3295 to->si_addr = ptr_to_compat(from->si_addr);
3296#ifdef __ARCH_SI_TRAPNO
3297 to->si_trapno = from->si_trapno;
3298#endif
3299 to->si_addr_lsb = from->si_addr_lsb;
3300 break;
3301 case SIL_FAULT_BNDERR:
3302 to->si_addr = ptr_to_compat(from->si_addr);
3303#ifdef __ARCH_SI_TRAPNO
3304 to->si_trapno = from->si_trapno;
3305#endif
3306 to->si_lower = ptr_to_compat(from->si_lower);
3307 to->si_upper = ptr_to_compat(from->si_upper);
3308 break;
3309 case SIL_FAULT_PKUERR:
3310 to->si_addr = ptr_to_compat(from->si_addr);
3311#ifdef __ARCH_SI_TRAPNO
3312 to->si_trapno = from->si_trapno;
3313#endif
3314 to->si_pkey = from->si_pkey;
3315 break;
3316 case SIL_CHLD:
3317 to->si_pid = from->si_pid;
3318 to->si_uid = from->si_uid;
3319 to->si_status = from->si_status;
3320 to->si_utime = from->si_utime;
3321 to->si_stime = from->si_stime;
3322 break;
3323 case SIL_RT:
3324 to->si_pid = from->si_pid;
3325 to->si_uid = from->si_uid;
3326 to->si_int = from->si_int;
3327 break;
3328 case SIL_SYS:
3329 to->si_call_addr = ptr_to_compat(from->si_call_addr);
3330 to->si_syscall = from->si_syscall;
3331 to->si_arch = from->si_arch;
3332 break;
3333 }
3334}
3335
3336int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
3337 const struct kernel_siginfo *from)
3338{
3339 struct compat_siginfo new;
3340
3341 copy_siginfo_to_external32(&new, from);
3342 if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
3343 return -EFAULT;
3344 return 0;
3345}
3346
3347static int post_copy_siginfo_from_user32(kernel_siginfo_t *to,
3348 const struct compat_siginfo *from)
3349{
3350 clear_siginfo(to);
3351 to->si_signo = from->si_signo;
3352 to->si_errno = from->si_errno;
3353 to->si_code = from->si_code;
3354 switch(siginfo_layout(from->si_signo, from->si_code)) {
3355 case SIL_KILL:
3356 to->si_pid = from->si_pid;
3357 to->si_uid = from->si_uid;
3358 break;
3359 case SIL_TIMER:
3360 to->si_tid = from->si_tid;
3361 to->si_overrun = from->si_overrun;
3362 to->si_int = from->si_int;
3363 break;
3364 case SIL_POLL:
3365 to->si_band = from->si_band;
3366 to->si_fd = from->si_fd;
3367 break;
3368 case SIL_FAULT:
3369 to->si_addr = compat_ptr(from->si_addr);
3370#ifdef __ARCH_SI_TRAPNO
3371 to->si_trapno = from->si_trapno;
3372#endif
3373 break;
3374 case SIL_FAULT_MCEERR:
3375 to->si_addr = compat_ptr(from->si_addr);
3376#ifdef __ARCH_SI_TRAPNO
3377 to->si_trapno = from->si_trapno;
3378#endif
3379 to->si_addr_lsb = from->si_addr_lsb;
3380 break;
3381 case SIL_FAULT_BNDERR:
3382 to->si_addr = compat_ptr(from->si_addr);
3383#ifdef __ARCH_SI_TRAPNO
3384 to->si_trapno = from->si_trapno;
3385#endif
3386 to->si_lower = compat_ptr(from->si_lower);
3387 to->si_upper = compat_ptr(from->si_upper);
3388 break;
3389 case SIL_FAULT_PKUERR:
3390 to->si_addr = compat_ptr(from->si_addr);
3391#ifdef __ARCH_SI_TRAPNO
3392 to->si_trapno = from->si_trapno;
3393#endif
3394 to->si_pkey = from->si_pkey;
3395 break;
3396 case SIL_CHLD:
3397 to->si_pid = from->si_pid;
3398 to->si_uid = from->si_uid;
3399 to->si_status = from->si_status;
3400#ifdef CONFIG_X86_X32_ABI
3401 if (in_x32_syscall()) {
3402 to->si_utime = from->_sifields._sigchld_x32._utime;
3403 to->si_stime = from->_sifields._sigchld_x32._stime;
3404 } else
3405#endif
3406 {
3407 to->si_utime = from->si_utime;
3408 to->si_stime = from->si_stime;
3409 }
3410 break;
3411 case SIL_RT:
3412 to->si_pid = from->si_pid;
3413 to->si_uid = from->si_uid;
3414 to->si_int = from->si_int;
3415 break;
3416 case SIL_SYS:
3417 to->si_call_addr = compat_ptr(from->si_call_addr);
3418 to->si_syscall = from->si_syscall;
3419 to->si_arch = from->si_arch;
3420 break;
3421 }
3422 return 0;
3423}
3424
3425static int __copy_siginfo_from_user32(int signo, struct kernel_siginfo *to,
3426 const struct compat_siginfo __user *ufrom)
3427{
3428 struct compat_siginfo from;
3429
3430 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3431 return -EFAULT;
3432
3433 from.si_signo = signo;
3434 return post_copy_siginfo_from_user32(to, &from);
3435}
3436
3437int copy_siginfo_from_user32(struct kernel_siginfo *to,
3438 const struct compat_siginfo __user *ufrom)
3439{
3440 struct compat_siginfo from;
3441
3442 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3443 return -EFAULT;
3444
3445 return post_copy_siginfo_from_user32(to, &from);
3446}
3447#endif /* CONFIG_COMPAT */
3448
3449/**
3450 * do_sigtimedwait - wait for queued signals specified in @which
3451 * @which: queued signals to wait for
3452 * @info: if non-null, the signal's siginfo is returned here
3453 * @ts: upper bound on process time suspension
3454 */
3455static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
3456 const struct timespec64 *ts)
3457{
3458 ktime_t *to = NULL, timeout = KTIME_MAX;
3459 struct task_struct *tsk = current;
3460 sigset_t mask = *which;
3461 int sig, ret = 0;
3462
3463 if (ts) {
3464 if (!timespec64_valid(ts))
3465 return -EINVAL;
3466 timeout = timespec64_to_ktime(*ts);
3467 to = &timeout;
3468 }
3469
3470 /*
3471 * Invert the set of allowed signals to get those we want to block.
3472 */
3473 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
3474 signotset(&mask);
3475
3476 spin_lock_irq(&tsk->sighand->siglock);
3477 sig = dequeue_signal(tsk, &mask, info);
3478 if (!sig && timeout) {
3479 /*
3480 * None ready, temporarily unblock those we're interested
3481 * while we are sleeping in so that we'll be awakened when
3482 * they arrive. Unblocking is always fine, we can avoid
3483 * set_current_blocked().
3484 */
3485 tsk->real_blocked = tsk->blocked;
3486 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
3487 recalc_sigpending();
3488 spin_unlock_irq(&tsk->sighand->siglock);
3489
3490 __set_current_state(TASK_INTERRUPTIBLE);
3491 ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns,
3492 HRTIMER_MODE_REL);
3493 spin_lock_irq(&tsk->sighand->siglock);
3494 __set_task_blocked(tsk, &tsk->real_blocked);
3495 sigemptyset(&tsk->real_blocked);
3496 sig = dequeue_signal(tsk, &mask, info);
3497 }
3498 spin_unlock_irq(&tsk->sighand->siglock);
3499
3500 if (sig)
3501 return sig;
3502 return ret ? -EINTR : -EAGAIN;
3503}
3504
3505/**
3506 * sys_rt_sigtimedwait - synchronously wait for queued signals specified
3507 * in @uthese
3508 * @uthese: queued signals to wait for
3509 * @uinfo: if non-null, the signal's siginfo is returned here
3510 * @uts: upper bound on process time suspension
3511 * @sigsetsize: size of sigset_t type
3512 */
3513SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
3514 siginfo_t __user *, uinfo,
3515 const struct __kernel_timespec __user *, uts,
3516 size_t, sigsetsize)
3517{
3518 sigset_t these;
3519 struct timespec64 ts;
3520 kernel_siginfo_t info;
3521 int ret;
3522
3523 /* XXX: Don't preclude handling different sized sigset_t's. */
3524 if (sigsetsize != sizeof(sigset_t))
3525 return -EINVAL;
3526
3527 if (copy_from_user(&these, uthese, sizeof(these)))
3528 return -EFAULT;
3529
3530 if (uts) {
3531 if (get_timespec64(&ts, uts))
3532 return -EFAULT;
3533 }
3534
3535 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3536
3537 if (ret > 0 && uinfo) {
3538 if (copy_siginfo_to_user(uinfo, &info))
3539 ret = -EFAULT;
3540 }
3541
3542 return ret;
3543}
3544
3545#ifdef CONFIG_COMPAT_32BIT_TIME
3546SYSCALL_DEFINE4(rt_sigtimedwait_time32, const sigset_t __user *, uthese,
3547 siginfo_t __user *, uinfo,
3548 const struct old_timespec32 __user *, uts,
3549 size_t, sigsetsize)
3550{
3551 sigset_t these;
3552 struct timespec64 ts;
3553 kernel_siginfo_t info;
3554 int ret;
3555
3556 if (sigsetsize != sizeof(sigset_t))
3557 return -EINVAL;
3558
3559 if (copy_from_user(&these, uthese, sizeof(these)))
3560 return -EFAULT;
3561
3562 if (uts) {
3563 if (get_old_timespec32(&ts, uts))
3564 return -EFAULT;
3565 }
3566
3567 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3568
3569 if (ret > 0 && uinfo) {
3570 if (copy_siginfo_to_user(uinfo, &info))
3571 ret = -EFAULT;
3572 }
3573
3574 return ret;
3575}
3576#endif
3577
3578#ifdef CONFIG_COMPAT
3579COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64, compat_sigset_t __user *, uthese,
3580 struct compat_siginfo __user *, uinfo,
3581 struct __kernel_timespec __user *, uts, compat_size_t, sigsetsize)
3582{
3583 sigset_t s;
3584 struct timespec64 t;
3585 kernel_siginfo_t info;
3586 long ret;
3587
3588 if (sigsetsize != sizeof(sigset_t))
3589 return -EINVAL;
3590
3591 if (get_compat_sigset(&s, uthese))
3592 return -EFAULT;
3593
3594 if (uts) {
3595 if (get_timespec64(&t, uts))
3596 return -EFAULT;
3597 }
3598
3599 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3600
3601 if (ret > 0 && uinfo) {
3602 if (copy_siginfo_to_user32(uinfo, &info))
3603 ret = -EFAULT;
3604 }
3605
3606 return ret;
3607}
3608
3609#ifdef CONFIG_COMPAT_32BIT_TIME
3610COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32, compat_sigset_t __user *, uthese,
3611 struct compat_siginfo __user *, uinfo,
3612 struct old_timespec32 __user *, uts, compat_size_t, sigsetsize)
3613{
3614 sigset_t s;
3615 struct timespec64 t;
3616 kernel_siginfo_t info;
3617 long ret;
3618
3619 if (sigsetsize != sizeof(sigset_t))
3620 return -EINVAL;
3621
3622 if (get_compat_sigset(&s, uthese))
3623 return -EFAULT;
3624
3625 if (uts) {
3626 if (get_old_timespec32(&t, uts))
3627 return -EFAULT;
3628 }
3629
3630 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3631
3632 if (ret > 0 && uinfo) {
3633 if (copy_siginfo_to_user32(uinfo, &info))
3634 ret = -EFAULT;
3635 }
3636
3637 return ret;
3638}
3639#endif
3640#endif
3641
3642static inline void prepare_kill_siginfo(int sig, struct kernel_siginfo *info)
3643{
3644 clear_siginfo(info);
3645 info->si_signo = sig;
3646 info->si_errno = 0;
3647 info->si_code = SI_USER;
3648 info->si_pid = task_tgid_vnr(current);
3649 info->si_uid = from_kuid_munged(current_user_ns(), current_uid());
3650}
3651
3652/**
3653 * sys_kill - send a signal to a process
3654 * @pid: the PID of the process
3655 * @sig: signal to be sent
3656 */
3657SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
3658{
3659 struct kernel_siginfo info;
3660
3661 prepare_kill_siginfo(sig, &info);
3662
3663 return kill_something_info(sig, &info, pid);
3664}
3665
3666/*
3667 * Verify that the signaler and signalee either are in the same pid namespace
3668 * or that the signaler's pid namespace is an ancestor of the signalee's pid
3669 * namespace.
3670 */
3671static bool access_pidfd_pidns(struct pid *pid)
3672{
3673 struct pid_namespace *active = task_active_pid_ns(current);
3674 struct pid_namespace *p = ns_of_pid(pid);
3675
3676 for (;;) {
3677 if (!p)
3678 return false;
3679 if (p == active)
3680 break;
3681 p = p->parent;
3682 }
3683
3684 return true;
3685}
3686
3687static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo, siginfo_t *info)
3688{
3689#ifdef CONFIG_COMPAT
3690 /*
3691 * Avoid hooking up compat syscalls and instead handle necessary
3692 * conversions here. Note, this is a stop-gap measure and should not be
3693 * considered a generic solution.
3694 */
3695 if (in_compat_syscall())
3696 return copy_siginfo_from_user32(
3697 kinfo, (struct compat_siginfo __user *)info);
3698#endif
3699 return copy_siginfo_from_user(kinfo, info);
3700}
3701
3702static struct pid *pidfd_to_pid(const struct file *file)
3703{
3704 struct pid *pid;
3705
3706 pid = pidfd_pid(file);
3707 if (!IS_ERR(pid))
3708 return pid;
3709
3710 return tgid_pidfd_to_pid(file);
3711}
3712
3713/**
3714 * sys_pidfd_send_signal - Signal a process through a pidfd
3715 * @pidfd: file descriptor of the process
3716 * @sig: signal to send
3717 * @info: signal info
3718 * @flags: future flags
3719 *
3720 * The syscall currently only signals via PIDTYPE_PID which covers
3721 * kill(<positive-pid>, <signal>. It does not signal threads or process
3722 * groups.
3723 * In order to extend the syscall to threads and process groups the @flags
3724 * argument should be used. In essence, the @flags argument will determine
3725 * what is signaled and not the file descriptor itself. Put in other words,
3726 * grouping is a property of the flags argument not a property of the file
3727 * descriptor.
3728 *
3729 * Return: 0 on success, negative errno on failure
3730 */
3731SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
3732 siginfo_t __user *, info, unsigned int, flags)
3733{
3734 int ret;
3735 struct fd f;
3736 struct pid *pid;
3737 kernel_siginfo_t kinfo;
3738
3739 /* Enforce flags be set to 0 until we add an extension. */
3740 if (flags)
3741 return -EINVAL;
3742
3743 f = fdget(pidfd);
3744 if (!f.file)
3745 return -EBADF;
3746
3747 /* Is this a pidfd? */
3748 pid = pidfd_to_pid(f.file);
3749 if (IS_ERR(pid)) {
3750 ret = PTR_ERR(pid);
3751 goto err;
3752 }
3753
3754 ret = -EINVAL;
3755 if (!access_pidfd_pidns(pid))
3756 goto err;
3757
3758 if (info) {
3759 ret = copy_siginfo_from_user_any(&kinfo, info);
3760 if (unlikely(ret))
3761 goto err;
3762
3763 ret = -EINVAL;
3764 if (unlikely(sig != kinfo.si_signo))
3765 goto err;
3766
3767 /* Only allow sending arbitrary signals to yourself. */
3768 ret = -EPERM;
3769 if ((task_pid(current) != pid) &&
3770 (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL))
3771 goto err;
3772 } else {
3773 prepare_kill_siginfo(sig, &kinfo);
3774 }
3775
3776 ret = kill_pid_info(sig, &kinfo, pid);
3777
3778err:
3779 fdput(f);
3780 return ret;
3781}
3782
3783static int
3784do_send_specific(pid_t tgid, pid_t pid, int sig, struct kernel_siginfo *info)
3785{
3786 struct task_struct *p;
3787 int error = -ESRCH;
3788
3789 rcu_read_lock();
3790 p = find_task_by_vpid(pid);
3791 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
3792 error = check_kill_permission(sig, info, p);
3793 /*
3794 * The null signal is a permissions and process existence
3795 * probe. No signal is actually delivered.
3796 */
3797 if (!error && sig) {
3798 error = do_send_sig_info(sig, info, p, PIDTYPE_PID);
3799 /*
3800 * If lock_task_sighand() failed we pretend the task
3801 * dies after receiving the signal. The window is tiny,
3802 * and the signal is private anyway.
3803 */
3804 if (unlikely(error == -ESRCH))
3805 error = 0;
3806 }
3807 }
3808 rcu_read_unlock();
3809
3810 return error;
3811}
3812
3813static int do_tkill(pid_t tgid, pid_t pid, int sig)
3814{
3815 struct kernel_siginfo info;
3816
3817 clear_siginfo(&info);
3818 info.si_signo = sig;
3819 info.si_errno = 0;
3820 info.si_code = SI_TKILL;
3821 info.si_pid = task_tgid_vnr(current);
3822 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
3823
3824 return do_send_specific(tgid, pid, sig, &info);
3825}
3826
3827/**
3828 * sys_tgkill - send signal to one specific thread
3829 * @tgid: the thread group ID of the thread
3830 * @pid: the PID of the thread
3831 * @sig: signal to be sent
3832 *
3833 * This syscall also checks the @tgid and returns -ESRCH even if the PID
3834 * exists but it's not belonging to the target process anymore. This
3835 * method solves the problem of threads exiting and PIDs getting reused.
3836 */
3837SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
3838{
3839 /* This is only valid for single tasks */
3840 if (pid <= 0 || tgid <= 0)
3841 return -EINVAL;
3842
3843 return do_tkill(tgid, pid, sig);
3844}
3845
3846/**
3847 * sys_tkill - send signal to one specific task
3848 * @pid: the PID of the task
3849 * @sig: signal to be sent
3850 *
3851 * Send a signal to only one task, even if it's a CLONE_THREAD task.
3852 */
3853SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
3854{
3855 /* This is only valid for single tasks */
3856 if (pid <= 0)
3857 return -EINVAL;
3858
3859 return do_tkill(0, pid, sig);
3860}
3861
3862static int do_rt_sigqueueinfo(pid_t pid, int sig, kernel_siginfo_t *info)
3863{
3864 /* Not even root can pretend to send signals from the kernel.
3865 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3866 */
3867 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3868 (task_pid_vnr(current) != pid))
3869 return -EPERM;
3870
3871 /* POSIX.1b doesn't mention process groups. */
3872 return kill_proc_info(sig, info, pid);
3873}
3874
3875/**
3876 * sys_rt_sigqueueinfo - send signal information to a signal
3877 * @pid: the PID of the thread
3878 * @sig: signal to be sent
3879 * @uinfo: signal info to be sent
3880 */
3881SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
3882 siginfo_t __user *, uinfo)
3883{
3884 kernel_siginfo_t info;
3885 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
3886 if (unlikely(ret))
3887 return ret;
3888 return do_rt_sigqueueinfo(pid, sig, &info);
3889}
3890
3891#ifdef CONFIG_COMPAT
3892COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
3893 compat_pid_t, pid,
3894 int, sig,
3895 struct compat_siginfo __user *, uinfo)
3896{
3897 kernel_siginfo_t info;
3898 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
3899 if (unlikely(ret))
3900 return ret;
3901 return do_rt_sigqueueinfo(pid, sig, &info);
3902}
3903#endif
3904
3905static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, kernel_siginfo_t *info)
3906{
3907 /* This is only valid for single tasks */
3908 if (pid <= 0 || tgid <= 0)
3909 return -EINVAL;
3910
3911 /* Not even root can pretend to send signals from the kernel.
3912 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3913 */
3914 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3915 (task_pid_vnr(current) != pid))
3916 return -EPERM;
3917
3918 return do_send_specific(tgid, pid, sig, info);
3919}
3920
3921SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
3922 siginfo_t __user *, uinfo)
3923{
3924 kernel_siginfo_t info;
3925 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
3926 if (unlikely(ret))
3927 return ret;
3928 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3929}
3930
3931#ifdef CONFIG_COMPAT
3932COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
3933 compat_pid_t, tgid,
3934 compat_pid_t, pid,
3935 int, sig,
3936 struct compat_siginfo __user *, uinfo)
3937{
3938 kernel_siginfo_t info;
3939 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
3940 if (unlikely(ret))
3941 return ret;
3942 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3943}
3944#endif
3945
3946/*
3947 * For kthreads only, must not be used if cloned with CLONE_SIGHAND
3948 */
3949void kernel_sigaction(int sig, __sighandler_t action)
3950{
3951 spin_lock_irq(¤t->sighand->siglock);
3952 current->sighand->action[sig - 1].sa.sa_handler = action;
3953 if (action == SIG_IGN) {
3954 sigset_t mask;
3955
3956 sigemptyset(&mask);
3957 sigaddset(&mask, sig);
3958
3959 flush_sigqueue_mask(&mask, ¤t->signal->shared_pending);
3960 flush_sigqueue_mask(&mask, ¤t->pending);
3961 recalc_sigpending();
3962 }
3963 spin_unlock_irq(¤t->sighand->siglock);
3964}
3965EXPORT_SYMBOL(kernel_sigaction);
3966
3967void __weak sigaction_compat_abi(struct k_sigaction *act,
3968 struct k_sigaction *oact)
3969{
3970}
3971
3972int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
3973{
3974 struct task_struct *p = current, *t;
3975 struct k_sigaction *k;
3976 sigset_t mask;
3977
3978 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
3979 return -EINVAL;
3980
3981 k = &p->sighand->action[sig-1];
3982
3983 spin_lock_irq(&p->sighand->siglock);
3984 if (oact)
3985 *oact = *k;
3986
3987 sigaction_compat_abi(act, oact);
3988
3989 if (act) {
3990 sigdelsetmask(&act->sa.sa_mask,
3991 sigmask(SIGKILL) | sigmask(SIGSTOP));
3992 *k = *act;
3993 /*
3994 * POSIX 3.3.1.3:
3995 * "Setting a signal action to SIG_IGN for a signal that is
3996 * pending shall cause the pending signal to be discarded,
3997 * whether or not it is blocked."
3998 *
3999 * "Setting a signal action to SIG_DFL for a signal that is
4000 * pending and whose default action is to ignore the signal
4001 * (for example, SIGCHLD), shall cause the pending signal to
4002 * be discarded, whether or not it is blocked"
4003 */
4004 if (sig_handler_ignored(sig_handler(p, sig), sig)) {
4005 sigemptyset(&mask);
4006 sigaddset(&mask, sig);
4007 flush_sigqueue_mask(&mask, &p->signal->shared_pending);
4008 for_each_thread(p, t)
4009 flush_sigqueue_mask(&mask, &t->pending);
4010 }
4011 }
4012
4013 spin_unlock_irq(&p->sighand->siglock);
4014 return 0;
4015}
4016
4017static int
4018do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp,
4019 size_t min_ss_size)
4020{
4021 struct task_struct *t = current;
4022
4023 if (oss) {
4024 memset(oss, 0, sizeof(stack_t));
4025 oss->ss_sp = (void __user *) t->sas_ss_sp;
4026 oss->ss_size = t->sas_ss_size;
4027 oss->ss_flags = sas_ss_flags(sp) |
4028 (current->sas_ss_flags & SS_FLAG_BITS);
4029 }
4030
4031 if (ss) {
4032 void __user *ss_sp = ss->ss_sp;
4033 size_t ss_size = ss->ss_size;
4034 unsigned ss_flags = ss->ss_flags;
4035 int ss_mode;
4036
4037 if (unlikely(on_sig_stack(sp)))
4038 return -EPERM;
4039
4040 ss_mode = ss_flags & ~SS_FLAG_BITS;
4041 if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
4042 ss_mode != 0))
4043 return -EINVAL;
4044
4045 if (ss_mode == SS_DISABLE) {
4046 ss_size = 0;
4047 ss_sp = NULL;
4048 } else {
4049 if (unlikely(ss_size < min_ss_size))
4050 return -ENOMEM;
4051 }
4052
4053 t->sas_ss_sp = (unsigned long) ss_sp;
4054 t->sas_ss_size = ss_size;
4055 t->sas_ss_flags = ss_flags;
4056 }
4057 return 0;
4058}
4059
4060SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
4061{
4062 stack_t new, old;
4063 int err;
4064 if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
4065 return -EFAULT;
4066 err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
4067 current_user_stack_pointer(),
4068 MINSIGSTKSZ);
4069 if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
4070 err = -EFAULT;
4071 return err;
4072}
4073
4074int restore_altstack(const stack_t __user *uss)
4075{
4076 stack_t new;
4077 if (copy_from_user(&new, uss, sizeof(stack_t)))
4078 return -EFAULT;
4079 (void)do_sigaltstack(&new, NULL, current_user_stack_pointer(),
4080 MINSIGSTKSZ);
4081 /* squash all but EFAULT for now */
4082 return 0;
4083}
4084
4085int __save_altstack(stack_t __user *uss, unsigned long sp)
4086{
4087 struct task_struct *t = current;
4088 int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
4089 __put_user(t->sas_ss_flags, &uss->ss_flags) |
4090 __put_user(t->sas_ss_size, &uss->ss_size);
4091 if (err)
4092 return err;
4093 if (t->sas_ss_flags & SS_AUTODISARM)
4094 sas_ss_reset(t);
4095 return 0;
4096}
4097
4098#ifdef CONFIG_COMPAT
4099static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr,
4100 compat_stack_t __user *uoss_ptr)
4101{
4102 stack_t uss, uoss;
4103 int ret;
4104
4105 if (uss_ptr) {
4106 compat_stack_t uss32;
4107 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
4108 return -EFAULT;
4109 uss.ss_sp = compat_ptr(uss32.ss_sp);
4110 uss.ss_flags = uss32.ss_flags;
4111 uss.ss_size = uss32.ss_size;
4112 }
4113 ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
4114 compat_user_stack_pointer(),
4115 COMPAT_MINSIGSTKSZ);
4116 if (ret >= 0 && uoss_ptr) {
4117 compat_stack_t old;
4118 memset(&old, 0, sizeof(old));
4119 old.ss_sp = ptr_to_compat(uoss.ss_sp);
4120 old.ss_flags = uoss.ss_flags;
4121 old.ss_size = uoss.ss_size;
4122 if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t)))
4123 ret = -EFAULT;
4124 }
4125 return ret;
4126}
4127
4128COMPAT_SYSCALL_DEFINE2(sigaltstack,
4129 const compat_stack_t __user *, uss_ptr,
4130 compat_stack_t __user *, uoss_ptr)
4131{
4132 return do_compat_sigaltstack(uss_ptr, uoss_ptr);
4133}
4134
4135int compat_restore_altstack(const compat_stack_t __user *uss)
4136{
4137 int err = do_compat_sigaltstack(uss, NULL);
4138 /* squash all but -EFAULT for now */
4139 return err == -EFAULT ? err : 0;
4140}
4141
4142int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
4143{
4144 int err;
4145 struct task_struct *t = current;
4146 err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
4147 &uss->ss_sp) |
4148 __put_user(t->sas_ss_flags, &uss->ss_flags) |
4149 __put_user(t->sas_ss_size, &uss->ss_size);
4150 if (err)
4151 return err;
4152 if (t->sas_ss_flags & SS_AUTODISARM)
4153 sas_ss_reset(t);
4154 return 0;
4155}
4156#endif
4157
4158#ifdef __ARCH_WANT_SYS_SIGPENDING
4159
4160/**
4161 * sys_sigpending - examine pending signals
4162 * @uset: where mask of pending signal is returned
4163 */
4164SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset)
4165{
4166 sigset_t set;
4167
4168 if (sizeof(old_sigset_t) > sizeof(*uset))
4169 return -EINVAL;
4170
4171 do_sigpending(&set);
4172
4173 if (copy_to_user(uset, &set, sizeof(old_sigset_t)))
4174 return -EFAULT;
4175
4176 return 0;
4177}
4178
4179#ifdef CONFIG_COMPAT
4180COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
4181{
4182 sigset_t set;
4183
4184 do_sigpending(&set);
4185
4186 return put_user(set.sig[0], set32);
4187}
4188#endif
4189
4190#endif
4191
4192#ifdef __ARCH_WANT_SYS_SIGPROCMASK
4193/**
4194 * sys_sigprocmask - examine and change blocked signals
4195 * @how: whether to add, remove, or set signals
4196 * @nset: signals to add or remove (if non-null)
4197 * @oset: previous value of signal mask if non-null
4198 *
4199 * Some platforms have their own version with special arguments;
4200 * others support only sys_rt_sigprocmask.
4201 */
4202
4203SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
4204 old_sigset_t __user *, oset)
4205{
4206 old_sigset_t old_set, new_set;
4207 sigset_t new_blocked;
4208
4209 old_set = current->blocked.sig[0];
4210
4211 if (nset) {
4212 if (copy_from_user(&new_set, nset, sizeof(*nset)))
4213 return -EFAULT;
4214
4215 new_blocked = current->blocked;
4216
4217 switch (how) {
4218 case SIG_BLOCK:
4219 sigaddsetmask(&new_blocked, new_set);
4220 break;
4221 case SIG_UNBLOCK:
4222 sigdelsetmask(&new_blocked, new_set);
4223 break;
4224 case SIG_SETMASK:
4225 new_blocked.sig[0] = new_set;
4226 break;
4227 default:
4228 return -EINVAL;
4229 }
4230
4231 set_current_blocked(&new_blocked);
4232 }
4233
4234 if (oset) {
4235 if (copy_to_user(oset, &old_set, sizeof(*oset)))
4236 return -EFAULT;
4237 }
4238
4239 return 0;
4240}
4241#endif /* __ARCH_WANT_SYS_SIGPROCMASK */
4242
4243#ifndef CONFIG_ODD_RT_SIGACTION
4244/**
4245 * sys_rt_sigaction - alter an action taken by a process
4246 * @sig: signal to be sent
4247 * @act: new sigaction
4248 * @oact: used to save the previous sigaction
4249 * @sigsetsize: size of sigset_t type
4250 */
4251SYSCALL_DEFINE4(rt_sigaction, int, sig,
4252 const struct sigaction __user *, act,
4253 struct sigaction __user *, oact,
4254 size_t, sigsetsize)
4255{
4256 struct k_sigaction new_sa, old_sa;
4257 int ret;
4258
4259 /* XXX: Don't preclude handling different sized sigset_t's. */
4260 if (sigsetsize != sizeof(sigset_t))
4261 return -EINVAL;
4262
4263 if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
4264 return -EFAULT;
4265
4266 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
4267 if (ret)
4268 return ret;
4269
4270 if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
4271 return -EFAULT;
4272
4273 return 0;
4274}
4275#ifdef CONFIG_COMPAT
4276COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
4277 const struct compat_sigaction __user *, act,
4278 struct compat_sigaction __user *, oact,
4279 compat_size_t, sigsetsize)
4280{
4281 struct k_sigaction new_ka, old_ka;
4282#ifdef __ARCH_HAS_SA_RESTORER
4283 compat_uptr_t restorer;
4284#endif
4285 int ret;
4286
4287 /* XXX: Don't preclude handling different sized sigset_t's. */
4288 if (sigsetsize != sizeof(compat_sigset_t))
4289 return -EINVAL;
4290
4291 if (act) {
4292 compat_uptr_t handler;
4293 ret = get_user(handler, &act->sa_handler);
4294 new_ka.sa.sa_handler = compat_ptr(handler);
4295#ifdef __ARCH_HAS_SA_RESTORER
4296 ret |= get_user(restorer, &act->sa_restorer);
4297 new_ka.sa.sa_restorer = compat_ptr(restorer);
4298#endif
4299 ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask);
4300 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
4301 if (ret)
4302 return -EFAULT;
4303 }
4304
4305 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4306 if (!ret && oact) {
4307 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
4308 &oact->sa_handler);
4309 ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask,
4310 sizeof(oact->sa_mask));
4311 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
4312#ifdef __ARCH_HAS_SA_RESTORER
4313 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4314 &oact->sa_restorer);
4315#endif
4316 }
4317 return ret;
4318}
4319#endif
4320#endif /* !CONFIG_ODD_RT_SIGACTION */
4321
4322#ifdef CONFIG_OLD_SIGACTION
4323SYSCALL_DEFINE3(sigaction, int, sig,
4324 const struct old_sigaction __user *, act,
4325 struct old_sigaction __user *, oact)
4326{
4327 struct k_sigaction new_ka, old_ka;
4328 int ret;
4329
4330 if (act) {
4331 old_sigset_t mask;
4332 if (!access_ok(act, sizeof(*act)) ||
4333 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
4334 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
4335 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4336 __get_user(mask, &act->sa_mask))
4337 return -EFAULT;
4338#ifdef __ARCH_HAS_KA_RESTORER
4339 new_ka.ka_restorer = NULL;
4340#endif
4341 siginitset(&new_ka.sa.sa_mask, mask);
4342 }
4343
4344 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4345
4346 if (!ret && oact) {
4347 if (!access_ok(oact, sizeof(*oact)) ||
4348 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
4349 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
4350 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4351 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4352 return -EFAULT;
4353 }
4354
4355 return ret;
4356}
4357#endif
4358#ifdef CONFIG_COMPAT_OLD_SIGACTION
4359COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
4360 const struct compat_old_sigaction __user *, act,
4361 struct compat_old_sigaction __user *, oact)
4362{
4363 struct k_sigaction new_ka, old_ka;
4364 int ret;
4365 compat_old_sigset_t mask;
4366 compat_uptr_t handler, restorer;
4367
4368 if (act) {
4369 if (!access_ok(act, sizeof(*act)) ||
4370 __get_user(handler, &act->sa_handler) ||
4371 __get_user(restorer, &act->sa_restorer) ||
4372 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4373 __get_user(mask, &act->sa_mask))
4374 return -EFAULT;
4375
4376#ifdef __ARCH_HAS_KA_RESTORER
4377 new_ka.ka_restorer = NULL;
4378#endif
4379 new_ka.sa.sa_handler = compat_ptr(handler);
4380 new_ka.sa.sa_restorer = compat_ptr(restorer);
4381 siginitset(&new_ka.sa.sa_mask, mask);
4382 }
4383
4384 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4385
4386 if (!ret && oact) {
4387 if (!access_ok(oact, sizeof(*oact)) ||
4388 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
4389 &oact->sa_handler) ||
4390 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4391 &oact->sa_restorer) ||
4392 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4393 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4394 return -EFAULT;
4395 }
4396 return ret;
4397}
4398#endif
4399
4400#ifdef CONFIG_SGETMASK_SYSCALL
4401
4402/*
4403 * For backwards compatibility. Functionality superseded by sigprocmask.
4404 */
4405SYSCALL_DEFINE0(sgetmask)
4406{
4407 /* SMP safe */
4408 return current->blocked.sig[0];
4409}
4410
4411SYSCALL_DEFINE1(ssetmask, int, newmask)
4412{
4413 int old = current->blocked.sig[0];
4414 sigset_t newset;
4415
4416 siginitset(&newset, newmask);
4417 set_current_blocked(&newset);
4418
4419 return old;
4420}
4421#endif /* CONFIG_SGETMASK_SYSCALL */
4422
4423#ifdef __ARCH_WANT_SYS_SIGNAL
4424/*
4425 * For backwards compatibility. Functionality superseded by sigaction.
4426 */
4427SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
4428{
4429 struct k_sigaction new_sa, old_sa;
4430 int ret;
4431
4432 new_sa.sa.sa_handler = handler;
4433 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
4434 sigemptyset(&new_sa.sa.sa_mask);
4435
4436 ret = do_sigaction(sig, &new_sa, &old_sa);
4437
4438 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
4439}
4440#endif /* __ARCH_WANT_SYS_SIGNAL */
4441
4442#ifdef __ARCH_WANT_SYS_PAUSE
4443
4444SYSCALL_DEFINE0(pause)
4445{
4446 while (!signal_pending(current)) {
4447 __set_current_state(TASK_INTERRUPTIBLE);
4448 schedule();
4449 }
4450 return -ERESTARTNOHAND;
4451}
4452
4453#endif
4454
4455static int sigsuspend(sigset_t *set)
4456{
4457 current->saved_sigmask = current->blocked;
4458 set_current_blocked(set);
4459
4460 while (!signal_pending(current)) {
4461 __set_current_state(TASK_INTERRUPTIBLE);
4462 schedule();
4463 }
4464 set_restore_sigmask();
4465 return -ERESTARTNOHAND;
4466}
4467
4468/**
4469 * sys_rt_sigsuspend - replace the signal mask for a value with the
4470 * @unewset value until a signal is received
4471 * @unewset: new signal mask value
4472 * @sigsetsize: size of sigset_t type
4473 */
4474SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
4475{
4476 sigset_t newset;
4477
4478 /* XXX: Don't preclude handling different sized sigset_t's. */
4479 if (sigsetsize != sizeof(sigset_t))
4480 return -EINVAL;
4481
4482 if (copy_from_user(&newset, unewset, sizeof(newset)))
4483 return -EFAULT;
4484 return sigsuspend(&newset);
4485}
4486
4487#ifdef CONFIG_COMPAT
4488COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
4489{
4490 sigset_t newset;
4491
4492 /* XXX: Don't preclude handling different sized sigset_t's. */
4493 if (sigsetsize != sizeof(sigset_t))
4494 return -EINVAL;
4495
4496 if (get_compat_sigset(&newset, unewset))
4497 return -EFAULT;
4498 return sigsuspend(&newset);
4499}
4500#endif
4501
4502#ifdef CONFIG_OLD_SIGSUSPEND
4503SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
4504{
4505 sigset_t blocked;
4506 siginitset(&blocked, mask);
4507 return sigsuspend(&blocked);
4508}
4509#endif
4510#ifdef CONFIG_OLD_SIGSUSPEND3
4511SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
4512{
4513 sigset_t blocked;
4514 siginitset(&blocked, mask);
4515 return sigsuspend(&blocked);
4516}
4517#endif
4518
4519__weak const char *arch_vma_name(struct vm_area_struct *vma)
4520{
4521 return NULL;
4522}
4523
4524static inline void siginfo_buildtime_checks(void)
4525{
4526 BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
4527
4528 /* Verify the offsets in the two siginfos match */
4529#define CHECK_OFFSET(field) \
4530 BUILD_BUG_ON(offsetof(siginfo_t, field) != offsetof(kernel_siginfo_t, field))
4531
4532 /* kill */
4533 CHECK_OFFSET(si_pid);
4534 CHECK_OFFSET(si_uid);
4535
4536 /* timer */
4537 CHECK_OFFSET(si_tid);
4538 CHECK_OFFSET(si_overrun);
4539 CHECK_OFFSET(si_value);
4540
4541 /* rt */
4542 CHECK_OFFSET(si_pid);
4543 CHECK_OFFSET(si_uid);
4544 CHECK_OFFSET(si_value);
4545
4546 /* sigchld */
4547 CHECK_OFFSET(si_pid);
4548 CHECK_OFFSET(si_uid);
4549 CHECK_OFFSET(si_status);
4550 CHECK_OFFSET(si_utime);
4551 CHECK_OFFSET(si_stime);
4552
4553 /* sigfault */
4554 CHECK_OFFSET(si_addr);
4555 CHECK_OFFSET(si_addr_lsb);
4556 CHECK_OFFSET(si_lower);
4557 CHECK_OFFSET(si_upper);
4558 CHECK_OFFSET(si_pkey);
4559
4560 /* sigpoll */
4561 CHECK_OFFSET(si_band);
4562 CHECK_OFFSET(si_fd);
4563
4564 /* sigsys */
4565 CHECK_OFFSET(si_call_addr);
4566 CHECK_OFFSET(si_syscall);
4567 CHECK_OFFSET(si_arch);
4568#undef CHECK_OFFSET
4569
4570 /* usb asyncio */
4571 BUILD_BUG_ON(offsetof(struct siginfo, si_pid) !=
4572 offsetof(struct siginfo, si_addr));
4573 if (sizeof(int) == sizeof(void __user *)) {
4574 BUILD_BUG_ON(sizeof_field(struct siginfo, si_pid) !=
4575 sizeof(void __user *));
4576 } else {
4577 BUILD_BUG_ON((sizeof_field(struct siginfo, si_pid) +
4578 sizeof_field(struct siginfo, si_uid)) !=
4579 sizeof(void __user *));
4580 BUILD_BUG_ON(offsetofend(struct siginfo, si_pid) !=
4581 offsetof(struct siginfo, si_uid));
4582 }
4583#ifdef CONFIG_COMPAT
4584 BUILD_BUG_ON(offsetof(struct compat_siginfo, si_pid) !=
4585 offsetof(struct compat_siginfo, si_addr));
4586 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4587 sizeof(compat_uptr_t));
4588 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4589 sizeof_field(struct siginfo, si_pid));
4590#endif
4591}
4592
4593void __init signals_init(void)
4594{
4595 siginfo_buildtime_checks();
4596
4597 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
4598}
4599
4600#ifdef CONFIG_KGDB_KDB
4601#include <linux/kdb.h>
4602/*
4603 * kdb_send_sig - Allows kdb to send signals without exposing
4604 * signal internals. This function checks if the required locks are
4605 * available before calling the main signal code, to avoid kdb
4606 * deadlocks.
4607 */
4608void kdb_send_sig(struct task_struct *t, int sig)
4609{
4610 static struct task_struct *kdb_prev_t;
4611 int new_t, ret;
4612 if (!spin_trylock(&t->sighand->siglock)) {
4613 kdb_printf("Can't do kill command now.\n"
4614 "The sigmask lock is held somewhere else in "
4615 "kernel, try again later\n");
4616 return;
4617 }
4618 new_t = kdb_prev_t != t;
4619 kdb_prev_t = t;
4620 if (t->state != TASK_RUNNING && new_t) {
4621 spin_unlock(&t->sighand->siglock);
4622 kdb_printf("Process is not RUNNING, sending a signal from "
4623 "kdb risks deadlock\n"
4624 "on the run queue locks. "
4625 "The signal has _not_ been sent.\n"
4626 "Reissue the kill command if you want to risk "
4627 "the deadlock.\n");
4628 return;
4629 }
4630 ret = send_signal(sig, SEND_SIG_PRIV, t, PIDTYPE_PID);
4631 spin_unlock(&t->sighand->siglock);
4632 if (ret)
4633 kdb_printf("Fail to deliver Signal %d to process %d.\n",
4634 sig, t->pid);
4635 else
4636 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
4637}
4638#endif /* CONFIG_KGDB_KDB */