Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/kernel/signal.c
4 *
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 *
7 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 *
9 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
10 * Changes to use preallocated sigqueue structures
11 * to allow signals to be sent reliably.
12 */
13
14#include <linux/slab.h>
15#include <linux/export.h>
16#include <linux/init.h>
17#include <linux/sched/mm.h>
18#include <linux/sched/user.h>
19#include <linux/sched/debug.h>
20#include <linux/sched/task.h>
21#include <linux/sched/task_stack.h>
22#include <linux/sched/cputime.h>
23#include <linux/file.h>
24#include <linux/fs.h>
25#include <linux/proc_fs.h>
26#include <linux/tty.h>
27#include <linux/binfmts.h>
28#include <linux/coredump.h>
29#include <linux/security.h>
30#include <linux/syscalls.h>
31#include <linux/ptrace.h>
32#include <linux/signal.h>
33#include <linux/signalfd.h>
34#include <linux/ratelimit.h>
35#include <linux/tracehook.h>
36#include <linux/capability.h>
37#include <linux/freezer.h>
38#include <linux/pid_namespace.h>
39#include <linux/nsproxy.h>
40#include <linux/user_namespace.h>
41#include <linux/uprobes.h>
42#include <linux/compat.h>
43#include <linux/cn_proc.h>
44#include <linux/compiler.h>
45#include <linux/posix-timers.h>
46#include <linux/cgroup.h>
47#include <linux/audit.h>
48
49#define CREATE_TRACE_POINTS
50#include <trace/events/signal.h>
51
52#include <asm/param.h>
53#include <linux/uaccess.h>
54#include <asm/unistd.h>
55#include <asm/siginfo.h>
56#include <asm/cacheflush.h>
57
58/*
59 * SLAB caches for signal bits.
60 */
61
62static struct kmem_cache *sigqueue_cachep;
63
64int print_fatal_signals __read_mostly;
65
66static void __user *sig_handler(struct task_struct *t, int sig)
67{
68 return t->sighand->action[sig - 1].sa.sa_handler;
69}
70
71static inline bool sig_handler_ignored(void __user *handler, int sig)
72{
73 /* Is it explicitly or implicitly ignored? */
74 return handler == SIG_IGN ||
75 (handler == SIG_DFL && sig_kernel_ignore(sig));
76}
77
78static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
79{
80 void __user *handler;
81
82 handler = sig_handler(t, sig);
83
84 /* SIGKILL and SIGSTOP may not be sent to the global init */
85 if (unlikely(is_global_init(t) && sig_kernel_only(sig)))
86 return true;
87
88 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
89 handler == SIG_DFL && !(force && sig_kernel_only(sig)))
90 return true;
91
92 /* Only allow kernel generated signals to this kthread */
93 if (unlikely((t->flags & PF_KTHREAD) &&
94 (handler == SIG_KTHREAD_KERNEL) && !force))
95 return true;
96
97 return sig_handler_ignored(handler, sig);
98}
99
100static bool sig_ignored(struct task_struct *t, int sig, bool force)
101{
102 /*
103 * Blocked signals are never ignored, since the
104 * signal handler may change by the time it is
105 * unblocked.
106 */
107 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
108 return false;
109
110 /*
111 * Tracers may want to know about even ignored signal unless it
112 * is SIGKILL which can't be reported anyway but can be ignored
113 * by SIGNAL_UNKILLABLE task.
114 */
115 if (t->ptrace && sig != SIGKILL)
116 return false;
117
118 return sig_task_ignored(t, sig, force);
119}
120
121/*
122 * Re-calculate pending state from the set of locally pending
123 * signals, globally pending signals, and blocked signals.
124 */
125static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked)
126{
127 unsigned long ready;
128 long i;
129
130 switch (_NSIG_WORDS) {
131 default:
132 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
133 ready |= signal->sig[i] &~ blocked->sig[i];
134 break;
135
136 case 4: ready = signal->sig[3] &~ blocked->sig[3];
137 ready |= signal->sig[2] &~ blocked->sig[2];
138 ready |= signal->sig[1] &~ blocked->sig[1];
139 ready |= signal->sig[0] &~ blocked->sig[0];
140 break;
141
142 case 2: ready = signal->sig[1] &~ blocked->sig[1];
143 ready |= signal->sig[0] &~ blocked->sig[0];
144 break;
145
146 case 1: ready = signal->sig[0] &~ blocked->sig[0];
147 }
148 return ready != 0;
149}
150
151#define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
152
153static bool recalc_sigpending_tsk(struct task_struct *t)
154{
155 if ((t->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) ||
156 PENDING(&t->pending, &t->blocked) ||
157 PENDING(&t->signal->shared_pending, &t->blocked) ||
158 cgroup_task_frozen(t)) {
159 set_tsk_thread_flag(t, TIF_SIGPENDING);
160 return true;
161 }
162
163 /*
164 * We must never clear the flag in another thread, or in current
165 * when it's possible the current syscall is returning -ERESTART*.
166 * So we don't clear it here, and only callers who know they should do.
167 */
168 return false;
169}
170
171/*
172 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
173 * This is superfluous when called on current, the wakeup is a harmless no-op.
174 */
175void recalc_sigpending_and_wake(struct task_struct *t)
176{
177 if (recalc_sigpending_tsk(t))
178 signal_wake_up(t, 0);
179}
180
181void recalc_sigpending(void)
182{
183 if (!recalc_sigpending_tsk(current) && !freezing(current))
184 clear_thread_flag(TIF_SIGPENDING);
185
186}
187EXPORT_SYMBOL(recalc_sigpending);
188
189void calculate_sigpending(void)
190{
191 /* Have any signals or users of TIF_SIGPENDING been delayed
192 * until after fork?
193 */
194 spin_lock_irq(¤t->sighand->siglock);
195 set_tsk_thread_flag(current, TIF_SIGPENDING);
196 recalc_sigpending();
197 spin_unlock_irq(¤t->sighand->siglock);
198}
199
200/* Given the mask, find the first available signal that should be serviced. */
201
202#define SYNCHRONOUS_MASK \
203 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
204 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
205
206int next_signal(struct sigpending *pending, sigset_t *mask)
207{
208 unsigned long i, *s, *m, x;
209 int sig = 0;
210
211 s = pending->signal.sig;
212 m = mask->sig;
213
214 /*
215 * Handle the first word specially: it contains the
216 * synchronous signals that need to be dequeued first.
217 */
218 x = *s &~ *m;
219 if (x) {
220 if (x & SYNCHRONOUS_MASK)
221 x &= SYNCHRONOUS_MASK;
222 sig = ffz(~x) + 1;
223 return sig;
224 }
225
226 switch (_NSIG_WORDS) {
227 default:
228 for (i = 1; i < _NSIG_WORDS; ++i) {
229 x = *++s &~ *++m;
230 if (!x)
231 continue;
232 sig = ffz(~x) + i*_NSIG_BPW + 1;
233 break;
234 }
235 break;
236
237 case 2:
238 x = s[1] &~ m[1];
239 if (!x)
240 break;
241 sig = ffz(~x) + _NSIG_BPW + 1;
242 break;
243
244 case 1:
245 /* Nothing to do */
246 break;
247 }
248
249 return sig;
250}
251
252static inline void print_dropped_signal(int sig)
253{
254 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
255
256 if (!print_fatal_signals)
257 return;
258
259 if (!__ratelimit(&ratelimit_state))
260 return;
261
262 pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
263 current->comm, current->pid, sig);
264}
265
266/**
267 * task_set_jobctl_pending - set jobctl pending bits
268 * @task: target task
269 * @mask: pending bits to set
270 *
271 * Clear @mask from @task->jobctl. @mask must be subset of
272 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
273 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
274 * cleared. If @task is already being killed or exiting, this function
275 * becomes noop.
276 *
277 * CONTEXT:
278 * Must be called with @task->sighand->siglock held.
279 *
280 * RETURNS:
281 * %true if @mask is set, %false if made noop because @task was dying.
282 */
283bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
284{
285 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
286 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
287 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
288
289 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
290 return false;
291
292 if (mask & JOBCTL_STOP_SIGMASK)
293 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
294
295 task->jobctl |= mask;
296 return true;
297}
298
299/**
300 * task_clear_jobctl_trapping - clear jobctl trapping bit
301 * @task: target task
302 *
303 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
304 * Clear it and wake up the ptracer. Note that we don't need any further
305 * locking. @task->siglock guarantees that @task->parent points to the
306 * ptracer.
307 *
308 * CONTEXT:
309 * Must be called with @task->sighand->siglock held.
310 */
311void task_clear_jobctl_trapping(struct task_struct *task)
312{
313 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
314 task->jobctl &= ~JOBCTL_TRAPPING;
315 smp_mb(); /* advised by wake_up_bit() */
316 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
317 }
318}
319
320/**
321 * task_clear_jobctl_pending - clear jobctl pending bits
322 * @task: target task
323 * @mask: pending bits to clear
324 *
325 * Clear @mask from @task->jobctl. @mask must be subset of
326 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
327 * STOP bits are cleared together.
328 *
329 * If clearing of @mask leaves no stop or trap pending, this function calls
330 * task_clear_jobctl_trapping().
331 *
332 * CONTEXT:
333 * Must be called with @task->sighand->siglock held.
334 */
335void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
336{
337 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
338
339 if (mask & JOBCTL_STOP_PENDING)
340 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
341
342 task->jobctl &= ~mask;
343
344 if (!(task->jobctl & JOBCTL_PENDING_MASK))
345 task_clear_jobctl_trapping(task);
346}
347
348/**
349 * task_participate_group_stop - participate in a group stop
350 * @task: task participating in a group stop
351 *
352 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
353 * Group stop states are cleared and the group stop count is consumed if
354 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
355 * stop, the appropriate `SIGNAL_*` flags are set.
356 *
357 * CONTEXT:
358 * Must be called with @task->sighand->siglock held.
359 *
360 * RETURNS:
361 * %true if group stop completion should be notified to the parent, %false
362 * otherwise.
363 */
364static bool task_participate_group_stop(struct task_struct *task)
365{
366 struct signal_struct *sig = task->signal;
367 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
368
369 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
370
371 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
372
373 if (!consume)
374 return false;
375
376 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
377 sig->group_stop_count--;
378
379 /*
380 * Tell the caller to notify completion iff we are entering into a
381 * fresh group stop. Read comment in do_signal_stop() for details.
382 */
383 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
384 signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
385 return true;
386 }
387 return false;
388}
389
390void task_join_group_stop(struct task_struct *task)
391{
392 unsigned long mask = current->jobctl & JOBCTL_STOP_SIGMASK;
393 struct signal_struct *sig = current->signal;
394
395 if (sig->group_stop_count) {
396 sig->group_stop_count++;
397 mask |= JOBCTL_STOP_CONSUME;
398 } else if (!(sig->flags & SIGNAL_STOP_STOPPED))
399 return;
400
401 /* Have the new thread join an on-going signal group stop */
402 task_set_jobctl_pending(task, mask | JOBCTL_STOP_PENDING);
403}
404
405/*
406 * allocate a new signal queue record
407 * - this may be called without locks if and only if t == current, otherwise an
408 * appropriate lock must be held to stop the target task from exiting
409 */
410static struct sigqueue *
411__sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags,
412 int override_rlimit, const unsigned int sigqueue_flags)
413{
414 struct sigqueue *q = NULL;
415 struct ucounts *ucounts = NULL;
416 long sigpending;
417
418 /*
419 * Protect access to @t credentials. This can go away when all
420 * callers hold rcu read lock.
421 *
422 * NOTE! A pending signal will hold on to the user refcount,
423 * and we get/put the refcount only when the sigpending count
424 * changes from/to zero.
425 */
426 rcu_read_lock();
427 ucounts = task_ucounts(t);
428 sigpending = inc_rlimit_get_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING);
429 rcu_read_unlock();
430 if (!sigpending)
431 return NULL;
432
433 if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {
434 q = kmem_cache_alloc(sigqueue_cachep, gfp_flags);
435 } else {
436 print_dropped_signal(sig);
437 }
438
439 if (unlikely(q == NULL)) {
440 dec_rlimit_put_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING);
441 } else {
442 INIT_LIST_HEAD(&q->list);
443 q->flags = sigqueue_flags;
444 q->ucounts = ucounts;
445 }
446 return q;
447}
448
449static void __sigqueue_free(struct sigqueue *q)
450{
451 if (q->flags & SIGQUEUE_PREALLOC)
452 return;
453 if (q->ucounts) {
454 dec_rlimit_put_ucounts(q->ucounts, UCOUNT_RLIMIT_SIGPENDING);
455 q->ucounts = NULL;
456 }
457 kmem_cache_free(sigqueue_cachep, q);
458}
459
460void flush_sigqueue(struct sigpending *queue)
461{
462 struct sigqueue *q;
463
464 sigemptyset(&queue->signal);
465 while (!list_empty(&queue->list)) {
466 q = list_entry(queue->list.next, struct sigqueue , list);
467 list_del_init(&q->list);
468 __sigqueue_free(q);
469 }
470}
471
472/*
473 * Flush all pending signals for this kthread.
474 */
475void flush_signals(struct task_struct *t)
476{
477 unsigned long flags;
478
479 spin_lock_irqsave(&t->sighand->siglock, flags);
480 clear_tsk_thread_flag(t, TIF_SIGPENDING);
481 flush_sigqueue(&t->pending);
482 flush_sigqueue(&t->signal->shared_pending);
483 spin_unlock_irqrestore(&t->sighand->siglock, flags);
484}
485EXPORT_SYMBOL(flush_signals);
486
487#ifdef CONFIG_POSIX_TIMERS
488static void __flush_itimer_signals(struct sigpending *pending)
489{
490 sigset_t signal, retain;
491 struct sigqueue *q, *n;
492
493 signal = pending->signal;
494 sigemptyset(&retain);
495
496 list_for_each_entry_safe(q, n, &pending->list, list) {
497 int sig = q->info.si_signo;
498
499 if (likely(q->info.si_code != SI_TIMER)) {
500 sigaddset(&retain, sig);
501 } else {
502 sigdelset(&signal, sig);
503 list_del_init(&q->list);
504 __sigqueue_free(q);
505 }
506 }
507
508 sigorsets(&pending->signal, &signal, &retain);
509}
510
511void flush_itimer_signals(void)
512{
513 struct task_struct *tsk = current;
514 unsigned long flags;
515
516 spin_lock_irqsave(&tsk->sighand->siglock, flags);
517 __flush_itimer_signals(&tsk->pending);
518 __flush_itimer_signals(&tsk->signal->shared_pending);
519 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
520}
521#endif
522
523void ignore_signals(struct task_struct *t)
524{
525 int i;
526
527 for (i = 0; i < _NSIG; ++i)
528 t->sighand->action[i].sa.sa_handler = SIG_IGN;
529
530 flush_signals(t);
531}
532
533/*
534 * Flush all handlers for a task.
535 */
536
537void
538flush_signal_handlers(struct task_struct *t, int force_default)
539{
540 int i;
541 struct k_sigaction *ka = &t->sighand->action[0];
542 for (i = _NSIG ; i != 0 ; i--) {
543 if (force_default || ka->sa.sa_handler != SIG_IGN)
544 ka->sa.sa_handler = SIG_DFL;
545 ka->sa.sa_flags = 0;
546#ifdef __ARCH_HAS_SA_RESTORER
547 ka->sa.sa_restorer = NULL;
548#endif
549 sigemptyset(&ka->sa.sa_mask);
550 ka++;
551 }
552}
553
554bool unhandled_signal(struct task_struct *tsk, int sig)
555{
556 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
557 if (is_global_init(tsk))
558 return true;
559
560 if (handler != SIG_IGN && handler != SIG_DFL)
561 return false;
562
563 /* if ptraced, let the tracer determine */
564 return !tsk->ptrace;
565}
566
567static void collect_signal(int sig, struct sigpending *list, kernel_siginfo_t *info,
568 bool *resched_timer)
569{
570 struct sigqueue *q, *first = NULL;
571
572 /*
573 * Collect the siginfo appropriate to this signal. Check if
574 * there is another siginfo for the same signal.
575 */
576 list_for_each_entry(q, &list->list, list) {
577 if (q->info.si_signo == sig) {
578 if (first)
579 goto still_pending;
580 first = q;
581 }
582 }
583
584 sigdelset(&list->signal, sig);
585
586 if (first) {
587still_pending:
588 list_del_init(&first->list);
589 copy_siginfo(info, &first->info);
590
591 *resched_timer =
592 (first->flags & SIGQUEUE_PREALLOC) &&
593 (info->si_code == SI_TIMER) &&
594 (info->si_sys_private);
595
596 __sigqueue_free(first);
597 } else {
598 /*
599 * Ok, it wasn't in the queue. This must be
600 * a fast-pathed signal or we must have been
601 * out of queue space. So zero out the info.
602 */
603 clear_siginfo(info);
604 info->si_signo = sig;
605 info->si_errno = 0;
606 info->si_code = SI_USER;
607 info->si_pid = 0;
608 info->si_uid = 0;
609 }
610}
611
612static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
613 kernel_siginfo_t *info, bool *resched_timer)
614{
615 int sig = next_signal(pending, mask);
616
617 if (sig)
618 collect_signal(sig, pending, info, resched_timer);
619 return sig;
620}
621
622/*
623 * Dequeue a signal and return the element to the caller, which is
624 * expected to free it.
625 *
626 * All callers have to hold the siglock.
627 */
628int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *info)
629{
630 bool resched_timer = false;
631 int signr;
632
633 /* We only dequeue private signals from ourselves, we don't let
634 * signalfd steal them
635 */
636 signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
637 if (!signr) {
638 signr = __dequeue_signal(&tsk->signal->shared_pending,
639 mask, info, &resched_timer);
640#ifdef CONFIG_POSIX_TIMERS
641 /*
642 * itimer signal ?
643 *
644 * itimers are process shared and we restart periodic
645 * itimers in the signal delivery path to prevent DoS
646 * attacks in the high resolution timer case. This is
647 * compliant with the old way of self-restarting
648 * itimers, as the SIGALRM is a legacy signal and only
649 * queued once. Changing the restart behaviour to
650 * restart the timer in the signal dequeue path is
651 * reducing the timer noise on heavy loaded !highres
652 * systems too.
653 */
654 if (unlikely(signr == SIGALRM)) {
655 struct hrtimer *tmr = &tsk->signal->real_timer;
656
657 if (!hrtimer_is_queued(tmr) &&
658 tsk->signal->it_real_incr != 0) {
659 hrtimer_forward(tmr, tmr->base->get_time(),
660 tsk->signal->it_real_incr);
661 hrtimer_restart(tmr);
662 }
663 }
664#endif
665 }
666
667 recalc_sigpending();
668 if (!signr)
669 return 0;
670
671 if (unlikely(sig_kernel_stop(signr))) {
672 /*
673 * Set a marker that we have dequeued a stop signal. Our
674 * caller might release the siglock and then the pending
675 * stop signal it is about to process is no longer in the
676 * pending bitmasks, but must still be cleared by a SIGCONT
677 * (and overruled by a SIGKILL). So those cases clear this
678 * shared flag after we've set it. Note that this flag may
679 * remain set after the signal we return is ignored or
680 * handled. That doesn't matter because its only purpose
681 * is to alert stop-signal processing code when another
682 * processor has come along and cleared the flag.
683 */
684 current->jobctl |= JOBCTL_STOP_DEQUEUED;
685 }
686#ifdef CONFIG_POSIX_TIMERS
687 if (resched_timer) {
688 /*
689 * Release the siglock to ensure proper locking order
690 * of timer locks outside of siglocks. Note, we leave
691 * irqs disabled here, since the posix-timers code is
692 * about to disable them again anyway.
693 */
694 spin_unlock(&tsk->sighand->siglock);
695 posixtimer_rearm(info);
696 spin_lock(&tsk->sighand->siglock);
697
698 /* Don't expose the si_sys_private value to userspace */
699 info->si_sys_private = 0;
700 }
701#endif
702 return signr;
703}
704EXPORT_SYMBOL_GPL(dequeue_signal);
705
706static int dequeue_synchronous_signal(kernel_siginfo_t *info)
707{
708 struct task_struct *tsk = current;
709 struct sigpending *pending = &tsk->pending;
710 struct sigqueue *q, *sync = NULL;
711
712 /*
713 * Might a synchronous signal be in the queue?
714 */
715 if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
716 return 0;
717
718 /*
719 * Return the first synchronous signal in the queue.
720 */
721 list_for_each_entry(q, &pending->list, list) {
722 /* Synchronous signals have a positive si_code */
723 if ((q->info.si_code > SI_USER) &&
724 (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
725 sync = q;
726 goto next;
727 }
728 }
729 return 0;
730next:
731 /*
732 * Check if there is another siginfo for the same signal.
733 */
734 list_for_each_entry_continue(q, &pending->list, list) {
735 if (q->info.si_signo == sync->info.si_signo)
736 goto still_pending;
737 }
738
739 sigdelset(&pending->signal, sync->info.si_signo);
740 recalc_sigpending();
741still_pending:
742 list_del_init(&sync->list);
743 copy_siginfo(info, &sync->info);
744 __sigqueue_free(sync);
745 return info->si_signo;
746}
747
748/*
749 * Tell a process that it has a new active signal..
750 *
751 * NOTE! we rely on the previous spin_lock to
752 * lock interrupts for us! We can only be called with
753 * "siglock" held, and the local interrupt must
754 * have been disabled when that got acquired!
755 *
756 * No need to set need_resched since signal event passing
757 * goes through ->blocked
758 */
759void signal_wake_up_state(struct task_struct *t, unsigned int state)
760{
761 set_tsk_thread_flag(t, TIF_SIGPENDING);
762 /*
763 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
764 * case. We don't check t->state here because there is a race with it
765 * executing another processor and just now entering stopped state.
766 * By using wake_up_state, we ensure the process will wake up and
767 * handle its death signal.
768 */
769 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
770 kick_process(t);
771}
772
773/*
774 * Remove signals in mask from the pending set and queue.
775 * Returns 1 if any signals were found.
776 *
777 * All callers must be holding the siglock.
778 */
779static void flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
780{
781 struct sigqueue *q, *n;
782 sigset_t m;
783
784 sigandsets(&m, mask, &s->signal);
785 if (sigisemptyset(&m))
786 return;
787
788 sigandnsets(&s->signal, &s->signal, mask);
789 list_for_each_entry_safe(q, n, &s->list, list) {
790 if (sigismember(mask, q->info.si_signo)) {
791 list_del_init(&q->list);
792 __sigqueue_free(q);
793 }
794 }
795}
796
797static inline int is_si_special(const struct kernel_siginfo *info)
798{
799 return info <= SEND_SIG_PRIV;
800}
801
802static inline bool si_fromuser(const struct kernel_siginfo *info)
803{
804 return info == SEND_SIG_NOINFO ||
805 (!is_si_special(info) && SI_FROMUSER(info));
806}
807
808/*
809 * called with RCU read lock from check_kill_permission()
810 */
811static bool kill_ok_by_cred(struct task_struct *t)
812{
813 const struct cred *cred = current_cred();
814 const struct cred *tcred = __task_cred(t);
815
816 return uid_eq(cred->euid, tcred->suid) ||
817 uid_eq(cred->euid, tcred->uid) ||
818 uid_eq(cred->uid, tcred->suid) ||
819 uid_eq(cred->uid, tcred->uid) ||
820 ns_capable(tcred->user_ns, CAP_KILL);
821}
822
823/*
824 * Bad permissions for sending the signal
825 * - the caller must hold the RCU read lock
826 */
827static int check_kill_permission(int sig, struct kernel_siginfo *info,
828 struct task_struct *t)
829{
830 struct pid *sid;
831 int error;
832
833 if (!valid_signal(sig))
834 return -EINVAL;
835
836 if (!si_fromuser(info))
837 return 0;
838
839 error = audit_signal_info(sig, t); /* Let audit system see the signal */
840 if (error)
841 return error;
842
843 if (!same_thread_group(current, t) &&
844 !kill_ok_by_cred(t)) {
845 switch (sig) {
846 case SIGCONT:
847 sid = task_session(t);
848 /*
849 * We don't return the error if sid == NULL. The
850 * task was unhashed, the caller must notice this.
851 */
852 if (!sid || sid == task_session(current))
853 break;
854 fallthrough;
855 default:
856 return -EPERM;
857 }
858 }
859
860 return security_task_kill(t, info, sig, NULL);
861}
862
863/**
864 * ptrace_trap_notify - schedule trap to notify ptracer
865 * @t: tracee wanting to notify tracer
866 *
867 * This function schedules sticky ptrace trap which is cleared on the next
868 * TRAP_STOP to notify ptracer of an event. @t must have been seized by
869 * ptracer.
870 *
871 * If @t is running, STOP trap will be taken. If trapped for STOP and
872 * ptracer is listening for events, tracee is woken up so that it can
873 * re-trap for the new event. If trapped otherwise, STOP trap will be
874 * eventually taken without returning to userland after the existing traps
875 * are finished by PTRACE_CONT.
876 *
877 * CONTEXT:
878 * Must be called with @task->sighand->siglock held.
879 */
880static void ptrace_trap_notify(struct task_struct *t)
881{
882 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
883 assert_spin_locked(&t->sighand->siglock);
884
885 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
886 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
887}
888
889/*
890 * Handle magic process-wide effects of stop/continue signals. Unlike
891 * the signal actions, these happen immediately at signal-generation
892 * time regardless of blocking, ignoring, or handling. This does the
893 * actual continuing for SIGCONT, but not the actual stopping for stop
894 * signals. The process stop is done as a signal action for SIG_DFL.
895 *
896 * Returns true if the signal should be actually delivered, otherwise
897 * it should be dropped.
898 */
899static bool prepare_signal(int sig, struct task_struct *p, bool force)
900{
901 struct signal_struct *signal = p->signal;
902 struct task_struct *t;
903 sigset_t flush;
904
905 if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
906 if (!(signal->flags & SIGNAL_GROUP_EXIT))
907 return sig == SIGKILL;
908 /*
909 * The process is in the middle of dying, nothing to do.
910 */
911 } else if (sig_kernel_stop(sig)) {
912 /*
913 * This is a stop signal. Remove SIGCONT from all queues.
914 */
915 siginitset(&flush, sigmask(SIGCONT));
916 flush_sigqueue_mask(&flush, &signal->shared_pending);
917 for_each_thread(p, t)
918 flush_sigqueue_mask(&flush, &t->pending);
919 } else if (sig == SIGCONT) {
920 unsigned int why;
921 /*
922 * Remove all stop signals from all queues, wake all threads.
923 */
924 siginitset(&flush, SIG_KERNEL_STOP_MASK);
925 flush_sigqueue_mask(&flush, &signal->shared_pending);
926 for_each_thread(p, t) {
927 flush_sigqueue_mask(&flush, &t->pending);
928 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
929 if (likely(!(t->ptrace & PT_SEIZED)))
930 wake_up_state(t, __TASK_STOPPED);
931 else
932 ptrace_trap_notify(t);
933 }
934
935 /*
936 * Notify the parent with CLD_CONTINUED if we were stopped.
937 *
938 * If we were in the middle of a group stop, we pretend it
939 * was already finished, and then continued. Since SIGCHLD
940 * doesn't queue we report only CLD_STOPPED, as if the next
941 * CLD_CONTINUED was dropped.
942 */
943 why = 0;
944 if (signal->flags & SIGNAL_STOP_STOPPED)
945 why |= SIGNAL_CLD_CONTINUED;
946 else if (signal->group_stop_count)
947 why |= SIGNAL_CLD_STOPPED;
948
949 if (why) {
950 /*
951 * The first thread which returns from do_signal_stop()
952 * will take ->siglock, notice SIGNAL_CLD_MASK, and
953 * notify its parent. See get_signal().
954 */
955 signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
956 signal->group_stop_count = 0;
957 signal->group_exit_code = 0;
958 }
959 }
960
961 return !sig_ignored(p, sig, force);
962}
963
964/*
965 * Test if P wants to take SIG. After we've checked all threads with this,
966 * it's equivalent to finding no threads not blocking SIG. Any threads not
967 * blocking SIG were ruled out because they are not running and already
968 * have pending signals. Such threads will dequeue from the shared queue
969 * as soon as they're available, so putting the signal on the shared queue
970 * will be equivalent to sending it to one such thread.
971 */
972static inline bool wants_signal(int sig, struct task_struct *p)
973{
974 if (sigismember(&p->blocked, sig))
975 return false;
976
977 if (p->flags & PF_EXITING)
978 return false;
979
980 if (sig == SIGKILL)
981 return true;
982
983 if (task_is_stopped_or_traced(p))
984 return false;
985
986 return task_curr(p) || !task_sigpending(p);
987}
988
989static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
990{
991 struct signal_struct *signal = p->signal;
992 struct task_struct *t;
993
994 /*
995 * Now find a thread we can wake up to take the signal off the queue.
996 *
997 * If the main thread wants the signal, it gets first crack.
998 * Probably the least surprising to the average bear.
999 */
1000 if (wants_signal(sig, p))
1001 t = p;
1002 else if ((type == PIDTYPE_PID) || thread_group_empty(p))
1003 /*
1004 * There is just one thread and it does not need to be woken.
1005 * It will dequeue unblocked signals before it runs again.
1006 */
1007 return;
1008 else {
1009 /*
1010 * Otherwise try to find a suitable thread.
1011 */
1012 t = signal->curr_target;
1013 while (!wants_signal(sig, t)) {
1014 t = next_thread(t);
1015 if (t == signal->curr_target)
1016 /*
1017 * No thread needs to be woken.
1018 * Any eligible threads will see
1019 * the signal in the queue soon.
1020 */
1021 return;
1022 }
1023 signal->curr_target = t;
1024 }
1025
1026 /*
1027 * Found a killable thread. If the signal will be fatal,
1028 * then start taking the whole group down immediately.
1029 */
1030 if (sig_fatal(p, sig) &&
1031 !(signal->flags & SIGNAL_GROUP_EXIT) &&
1032 !sigismember(&t->real_blocked, sig) &&
1033 (sig == SIGKILL || !p->ptrace)) {
1034 /*
1035 * This signal will be fatal to the whole group.
1036 */
1037 if (!sig_kernel_coredump(sig)) {
1038 /*
1039 * Start a group exit and wake everybody up.
1040 * This way we don't have other threads
1041 * running and doing things after a slower
1042 * thread has the fatal signal pending.
1043 */
1044 signal->flags = SIGNAL_GROUP_EXIT;
1045 signal->group_exit_code = sig;
1046 signal->group_stop_count = 0;
1047 t = p;
1048 do {
1049 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1050 sigaddset(&t->pending.signal, SIGKILL);
1051 signal_wake_up(t, 1);
1052 } while_each_thread(p, t);
1053 return;
1054 }
1055 }
1056
1057 /*
1058 * The signal is already in the shared-pending queue.
1059 * Tell the chosen thread to wake up and dequeue it.
1060 */
1061 signal_wake_up(t, sig == SIGKILL);
1062 return;
1063}
1064
1065static inline bool legacy_queue(struct sigpending *signals, int sig)
1066{
1067 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1068}
1069
1070static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
1071 enum pid_type type, bool force)
1072{
1073 struct sigpending *pending;
1074 struct sigqueue *q;
1075 int override_rlimit;
1076 int ret = 0, result;
1077
1078 assert_spin_locked(&t->sighand->siglock);
1079
1080 result = TRACE_SIGNAL_IGNORED;
1081 if (!prepare_signal(sig, t, force))
1082 goto ret;
1083
1084 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1085 /*
1086 * Short-circuit ignored signals and support queuing
1087 * exactly one non-rt signal, so that we can get more
1088 * detailed information about the cause of the signal.
1089 */
1090 result = TRACE_SIGNAL_ALREADY_PENDING;
1091 if (legacy_queue(pending, sig))
1092 goto ret;
1093
1094 result = TRACE_SIGNAL_DELIVERED;
1095 /*
1096 * Skip useless siginfo allocation for SIGKILL and kernel threads.
1097 */
1098 if ((sig == SIGKILL) || (t->flags & PF_KTHREAD))
1099 goto out_set;
1100
1101 /*
1102 * Real-time signals must be queued if sent by sigqueue, or
1103 * some other real-time mechanism. It is implementation
1104 * defined whether kill() does so. We attempt to do so, on
1105 * the principle of least surprise, but since kill is not
1106 * allowed to fail with EAGAIN when low on memory we just
1107 * make sure at least one signal gets delivered and don't
1108 * pass on the info struct.
1109 */
1110 if (sig < SIGRTMIN)
1111 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1112 else
1113 override_rlimit = 0;
1114
1115 q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit, 0);
1116
1117 if (q) {
1118 list_add_tail(&q->list, &pending->list);
1119 switch ((unsigned long) info) {
1120 case (unsigned long) SEND_SIG_NOINFO:
1121 clear_siginfo(&q->info);
1122 q->info.si_signo = sig;
1123 q->info.si_errno = 0;
1124 q->info.si_code = SI_USER;
1125 q->info.si_pid = task_tgid_nr_ns(current,
1126 task_active_pid_ns(t));
1127 rcu_read_lock();
1128 q->info.si_uid =
1129 from_kuid_munged(task_cred_xxx(t, user_ns),
1130 current_uid());
1131 rcu_read_unlock();
1132 break;
1133 case (unsigned long) SEND_SIG_PRIV:
1134 clear_siginfo(&q->info);
1135 q->info.si_signo = sig;
1136 q->info.si_errno = 0;
1137 q->info.si_code = SI_KERNEL;
1138 q->info.si_pid = 0;
1139 q->info.si_uid = 0;
1140 break;
1141 default:
1142 copy_siginfo(&q->info, info);
1143 break;
1144 }
1145 } else if (!is_si_special(info) &&
1146 sig >= SIGRTMIN && info->si_code != SI_USER) {
1147 /*
1148 * Queue overflow, abort. We may abort if the
1149 * signal was rt and sent by user using something
1150 * other than kill().
1151 */
1152 result = TRACE_SIGNAL_OVERFLOW_FAIL;
1153 ret = -EAGAIN;
1154 goto ret;
1155 } else {
1156 /*
1157 * This is a silent loss of information. We still
1158 * send the signal, but the *info bits are lost.
1159 */
1160 result = TRACE_SIGNAL_LOSE_INFO;
1161 }
1162
1163out_set:
1164 signalfd_notify(t, sig);
1165 sigaddset(&pending->signal, sig);
1166
1167 /* Let multiprocess signals appear after on-going forks */
1168 if (type > PIDTYPE_TGID) {
1169 struct multiprocess_signals *delayed;
1170 hlist_for_each_entry(delayed, &t->signal->multiprocess, node) {
1171 sigset_t *signal = &delayed->signal;
1172 /* Can't queue both a stop and a continue signal */
1173 if (sig == SIGCONT)
1174 sigdelsetmask(signal, SIG_KERNEL_STOP_MASK);
1175 else if (sig_kernel_stop(sig))
1176 sigdelset(signal, SIGCONT);
1177 sigaddset(signal, sig);
1178 }
1179 }
1180
1181 complete_signal(sig, t, type);
1182ret:
1183 trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result);
1184 return ret;
1185}
1186
1187static inline bool has_si_pid_and_uid(struct kernel_siginfo *info)
1188{
1189 bool ret = false;
1190 switch (siginfo_layout(info->si_signo, info->si_code)) {
1191 case SIL_KILL:
1192 case SIL_CHLD:
1193 case SIL_RT:
1194 ret = true;
1195 break;
1196 case SIL_TIMER:
1197 case SIL_POLL:
1198 case SIL_FAULT:
1199 case SIL_FAULT_TRAPNO:
1200 case SIL_FAULT_MCEERR:
1201 case SIL_FAULT_BNDERR:
1202 case SIL_FAULT_PKUERR:
1203 case SIL_PERF_EVENT:
1204 case SIL_SYS:
1205 ret = false;
1206 break;
1207 }
1208 return ret;
1209}
1210
1211static int send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
1212 enum pid_type type)
1213{
1214 /* Should SIGKILL or SIGSTOP be received by a pid namespace init? */
1215 bool force = false;
1216
1217 if (info == SEND_SIG_NOINFO) {
1218 /* Force if sent from an ancestor pid namespace */
1219 force = !task_pid_nr_ns(current, task_active_pid_ns(t));
1220 } else if (info == SEND_SIG_PRIV) {
1221 /* Don't ignore kernel generated signals */
1222 force = true;
1223 } else if (has_si_pid_and_uid(info)) {
1224 /* SIGKILL and SIGSTOP is special or has ids */
1225 struct user_namespace *t_user_ns;
1226
1227 rcu_read_lock();
1228 t_user_ns = task_cred_xxx(t, user_ns);
1229 if (current_user_ns() != t_user_ns) {
1230 kuid_t uid = make_kuid(current_user_ns(), info->si_uid);
1231 info->si_uid = from_kuid_munged(t_user_ns, uid);
1232 }
1233 rcu_read_unlock();
1234
1235 /* A kernel generated signal? */
1236 force = (info->si_code == SI_KERNEL);
1237
1238 /* From an ancestor pid namespace? */
1239 if (!task_pid_nr_ns(current, task_active_pid_ns(t))) {
1240 info->si_pid = 0;
1241 force = true;
1242 }
1243 }
1244 return __send_signal(sig, info, t, type, force);
1245}
1246
1247static void print_fatal_signal(int signr)
1248{
1249 struct pt_regs *regs = signal_pt_regs();
1250 pr_info("potentially unexpected fatal signal %d.\n", signr);
1251
1252#if defined(__i386__) && !defined(__arch_um__)
1253 pr_info("code at %08lx: ", regs->ip);
1254 {
1255 int i;
1256 for (i = 0; i < 16; i++) {
1257 unsigned char insn;
1258
1259 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1260 break;
1261 pr_cont("%02x ", insn);
1262 }
1263 }
1264 pr_cont("\n");
1265#endif
1266 preempt_disable();
1267 show_regs(regs);
1268 preempt_enable();
1269}
1270
1271static int __init setup_print_fatal_signals(char *str)
1272{
1273 get_option (&str, &print_fatal_signals);
1274
1275 return 1;
1276}
1277
1278__setup("print-fatal-signals=", setup_print_fatal_signals);
1279
1280int
1281__group_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1282{
1283 return send_signal(sig, info, p, PIDTYPE_TGID);
1284}
1285
1286int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p,
1287 enum pid_type type)
1288{
1289 unsigned long flags;
1290 int ret = -ESRCH;
1291
1292 if (lock_task_sighand(p, &flags)) {
1293 ret = send_signal(sig, info, p, type);
1294 unlock_task_sighand(p, &flags);
1295 }
1296
1297 return ret;
1298}
1299
1300/*
1301 * Force a signal that the process can't ignore: if necessary
1302 * we unblock the signal and change any SIG_IGN to SIG_DFL.
1303 *
1304 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1305 * since we do not want to have a signal handler that was blocked
1306 * be invoked when user space had explicitly blocked it.
1307 *
1308 * We don't want to have recursive SIGSEGV's etc, for example,
1309 * that is why we also clear SIGNAL_UNKILLABLE.
1310 */
1311static int
1312force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t)
1313{
1314 unsigned long int flags;
1315 int ret, blocked, ignored;
1316 struct k_sigaction *action;
1317 int sig = info->si_signo;
1318
1319 spin_lock_irqsave(&t->sighand->siglock, flags);
1320 action = &t->sighand->action[sig-1];
1321 ignored = action->sa.sa_handler == SIG_IGN;
1322 blocked = sigismember(&t->blocked, sig);
1323 if (blocked || ignored) {
1324 action->sa.sa_handler = SIG_DFL;
1325 if (blocked) {
1326 sigdelset(&t->blocked, sig);
1327 recalc_sigpending_and_wake(t);
1328 }
1329 }
1330 /*
1331 * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
1332 * debugging to leave init killable.
1333 */
1334 if (action->sa.sa_handler == SIG_DFL && !t->ptrace)
1335 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1336 ret = send_signal(sig, info, t, PIDTYPE_PID);
1337 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1338
1339 return ret;
1340}
1341
1342int force_sig_info(struct kernel_siginfo *info)
1343{
1344 return force_sig_info_to_task(info, current);
1345}
1346
1347/*
1348 * Nuke all other threads in the group.
1349 */
1350int zap_other_threads(struct task_struct *p)
1351{
1352 struct task_struct *t = p;
1353 int count = 0;
1354
1355 p->signal->group_stop_count = 0;
1356
1357 while_each_thread(p, t) {
1358 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1359 count++;
1360
1361 /* Don't bother with already dead threads */
1362 if (t->exit_state)
1363 continue;
1364 sigaddset(&t->pending.signal, SIGKILL);
1365 signal_wake_up(t, 1);
1366 }
1367
1368 return count;
1369}
1370
1371struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1372 unsigned long *flags)
1373{
1374 struct sighand_struct *sighand;
1375
1376 rcu_read_lock();
1377 for (;;) {
1378 sighand = rcu_dereference(tsk->sighand);
1379 if (unlikely(sighand == NULL))
1380 break;
1381
1382 /*
1383 * This sighand can be already freed and even reused, but
1384 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
1385 * initializes ->siglock: this slab can't go away, it has
1386 * the same object type, ->siglock can't be reinitialized.
1387 *
1388 * We need to ensure that tsk->sighand is still the same
1389 * after we take the lock, we can race with de_thread() or
1390 * __exit_signal(). In the latter case the next iteration
1391 * must see ->sighand == NULL.
1392 */
1393 spin_lock_irqsave(&sighand->siglock, *flags);
1394 if (likely(sighand == rcu_access_pointer(tsk->sighand)))
1395 break;
1396 spin_unlock_irqrestore(&sighand->siglock, *flags);
1397 }
1398 rcu_read_unlock();
1399
1400 return sighand;
1401}
1402
1403/*
1404 * send signal info to all the members of a group
1405 */
1406int group_send_sig_info(int sig, struct kernel_siginfo *info,
1407 struct task_struct *p, enum pid_type type)
1408{
1409 int ret;
1410
1411 rcu_read_lock();
1412 ret = check_kill_permission(sig, info, p);
1413 rcu_read_unlock();
1414
1415 if (!ret && sig)
1416 ret = do_send_sig_info(sig, info, p, type);
1417
1418 return ret;
1419}
1420
1421/*
1422 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1423 * control characters do (^C, ^Z etc)
1424 * - the caller must hold at least a readlock on tasklist_lock
1425 */
1426int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp)
1427{
1428 struct task_struct *p = NULL;
1429 int retval, success;
1430
1431 success = 0;
1432 retval = -ESRCH;
1433 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1434 int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID);
1435 success |= !err;
1436 retval = err;
1437 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1438 return success ? 0 : retval;
1439}
1440
1441int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid)
1442{
1443 int error = -ESRCH;
1444 struct task_struct *p;
1445
1446 for (;;) {
1447 rcu_read_lock();
1448 p = pid_task(pid, PIDTYPE_PID);
1449 if (p)
1450 error = group_send_sig_info(sig, info, p, PIDTYPE_TGID);
1451 rcu_read_unlock();
1452 if (likely(!p || error != -ESRCH))
1453 return error;
1454
1455 /*
1456 * The task was unhashed in between, try again. If it
1457 * is dead, pid_task() will return NULL, if we race with
1458 * de_thread() it will find the new leader.
1459 */
1460 }
1461}
1462
1463static int kill_proc_info(int sig, struct kernel_siginfo *info, pid_t pid)
1464{
1465 int error;
1466 rcu_read_lock();
1467 error = kill_pid_info(sig, info, find_vpid(pid));
1468 rcu_read_unlock();
1469 return error;
1470}
1471
1472static inline bool kill_as_cred_perm(const struct cred *cred,
1473 struct task_struct *target)
1474{
1475 const struct cred *pcred = __task_cred(target);
1476
1477 return uid_eq(cred->euid, pcred->suid) ||
1478 uid_eq(cred->euid, pcred->uid) ||
1479 uid_eq(cred->uid, pcred->suid) ||
1480 uid_eq(cred->uid, pcred->uid);
1481}
1482
1483/*
1484 * The usb asyncio usage of siginfo is wrong. The glibc support
1485 * for asyncio which uses SI_ASYNCIO assumes the layout is SIL_RT.
1486 * AKA after the generic fields:
1487 * kernel_pid_t si_pid;
1488 * kernel_uid32_t si_uid;
1489 * sigval_t si_value;
1490 *
1491 * Unfortunately when usb generates SI_ASYNCIO it assumes the layout
1492 * after the generic fields is:
1493 * void __user *si_addr;
1494 *
1495 * This is a practical problem when there is a 64bit big endian kernel
1496 * and a 32bit userspace. As the 32bit address will encoded in the low
1497 * 32bits of the pointer. Those low 32bits will be stored at higher
1498 * address than appear in a 32 bit pointer. So userspace will not
1499 * see the address it was expecting for it's completions.
1500 *
1501 * There is nothing in the encoding that can allow
1502 * copy_siginfo_to_user32 to detect this confusion of formats, so
1503 * handle this by requiring the caller of kill_pid_usb_asyncio to
1504 * notice when this situration takes place and to store the 32bit
1505 * pointer in sival_int, instead of sival_addr of the sigval_t addr
1506 * parameter.
1507 */
1508int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr,
1509 struct pid *pid, const struct cred *cred)
1510{
1511 struct kernel_siginfo info;
1512 struct task_struct *p;
1513 unsigned long flags;
1514 int ret = -EINVAL;
1515
1516 if (!valid_signal(sig))
1517 return ret;
1518
1519 clear_siginfo(&info);
1520 info.si_signo = sig;
1521 info.si_errno = errno;
1522 info.si_code = SI_ASYNCIO;
1523 *((sigval_t *)&info.si_pid) = addr;
1524
1525 rcu_read_lock();
1526 p = pid_task(pid, PIDTYPE_PID);
1527 if (!p) {
1528 ret = -ESRCH;
1529 goto out_unlock;
1530 }
1531 if (!kill_as_cred_perm(cred, p)) {
1532 ret = -EPERM;
1533 goto out_unlock;
1534 }
1535 ret = security_task_kill(p, &info, sig, cred);
1536 if (ret)
1537 goto out_unlock;
1538
1539 if (sig) {
1540 if (lock_task_sighand(p, &flags)) {
1541 ret = __send_signal(sig, &info, p, PIDTYPE_TGID, false);
1542 unlock_task_sighand(p, &flags);
1543 } else
1544 ret = -ESRCH;
1545 }
1546out_unlock:
1547 rcu_read_unlock();
1548 return ret;
1549}
1550EXPORT_SYMBOL_GPL(kill_pid_usb_asyncio);
1551
1552/*
1553 * kill_something_info() interprets pid in interesting ways just like kill(2).
1554 *
1555 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1556 * is probably wrong. Should make it like BSD or SYSV.
1557 */
1558
1559static int kill_something_info(int sig, struct kernel_siginfo *info, pid_t pid)
1560{
1561 int ret;
1562
1563 if (pid > 0)
1564 return kill_proc_info(sig, info, pid);
1565
1566 /* -INT_MIN is undefined. Exclude this case to avoid a UBSAN warning */
1567 if (pid == INT_MIN)
1568 return -ESRCH;
1569
1570 read_lock(&tasklist_lock);
1571 if (pid != -1) {
1572 ret = __kill_pgrp_info(sig, info,
1573 pid ? find_vpid(-pid) : task_pgrp(current));
1574 } else {
1575 int retval = 0, count = 0;
1576 struct task_struct * p;
1577
1578 for_each_process(p) {
1579 if (task_pid_vnr(p) > 1 &&
1580 !same_thread_group(p, current)) {
1581 int err = group_send_sig_info(sig, info, p,
1582 PIDTYPE_MAX);
1583 ++count;
1584 if (err != -EPERM)
1585 retval = err;
1586 }
1587 }
1588 ret = count ? retval : -ESRCH;
1589 }
1590 read_unlock(&tasklist_lock);
1591
1592 return ret;
1593}
1594
1595/*
1596 * These are for backward compatibility with the rest of the kernel source.
1597 */
1598
1599int send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1600{
1601 /*
1602 * Make sure legacy kernel users don't send in bad values
1603 * (normal paths check this in check_kill_permission).
1604 */
1605 if (!valid_signal(sig))
1606 return -EINVAL;
1607
1608 return do_send_sig_info(sig, info, p, PIDTYPE_PID);
1609}
1610EXPORT_SYMBOL(send_sig_info);
1611
1612#define __si_special(priv) \
1613 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1614
1615int
1616send_sig(int sig, struct task_struct *p, int priv)
1617{
1618 return send_sig_info(sig, __si_special(priv), p);
1619}
1620EXPORT_SYMBOL(send_sig);
1621
1622void force_sig(int sig)
1623{
1624 struct kernel_siginfo info;
1625
1626 clear_siginfo(&info);
1627 info.si_signo = sig;
1628 info.si_errno = 0;
1629 info.si_code = SI_KERNEL;
1630 info.si_pid = 0;
1631 info.si_uid = 0;
1632 force_sig_info(&info);
1633}
1634EXPORT_SYMBOL(force_sig);
1635
1636/*
1637 * When things go south during signal handling, we
1638 * will force a SIGSEGV. And if the signal that caused
1639 * the problem was already a SIGSEGV, we'll want to
1640 * make sure we don't even try to deliver the signal..
1641 */
1642void force_sigsegv(int sig)
1643{
1644 struct task_struct *p = current;
1645
1646 if (sig == SIGSEGV) {
1647 unsigned long flags;
1648 spin_lock_irqsave(&p->sighand->siglock, flags);
1649 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1650 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1651 }
1652 force_sig(SIGSEGV);
1653}
1654
1655int force_sig_fault_to_task(int sig, int code, void __user *addr
1656 ___ARCH_SI_TRAPNO(int trapno)
1657 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1658 , struct task_struct *t)
1659{
1660 struct kernel_siginfo info;
1661
1662 clear_siginfo(&info);
1663 info.si_signo = sig;
1664 info.si_errno = 0;
1665 info.si_code = code;
1666 info.si_addr = addr;
1667#ifdef __ARCH_SI_TRAPNO
1668 info.si_trapno = trapno;
1669#endif
1670#ifdef __ia64__
1671 info.si_imm = imm;
1672 info.si_flags = flags;
1673 info.si_isr = isr;
1674#endif
1675 return force_sig_info_to_task(&info, t);
1676}
1677
1678int force_sig_fault(int sig, int code, void __user *addr
1679 ___ARCH_SI_TRAPNO(int trapno)
1680 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr))
1681{
1682 return force_sig_fault_to_task(sig, code, addr
1683 ___ARCH_SI_TRAPNO(trapno)
1684 ___ARCH_SI_IA64(imm, flags, isr), current);
1685}
1686
1687int send_sig_fault(int sig, int code, void __user *addr
1688 ___ARCH_SI_TRAPNO(int trapno)
1689 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1690 , struct task_struct *t)
1691{
1692 struct kernel_siginfo info;
1693
1694 clear_siginfo(&info);
1695 info.si_signo = sig;
1696 info.si_errno = 0;
1697 info.si_code = code;
1698 info.si_addr = addr;
1699#ifdef __ARCH_SI_TRAPNO
1700 info.si_trapno = trapno;
1701#endif
1702#ifdef __ia64__
1703 info.si_imm = imm;
1704 info.si_flags = flags;
1705 info.si_isr = isr;
1706#endif
1707 return send_sig_info(info.si_signo, &info, t);
1708}
1709
1710int force_sig_mceerr(int code, void __user *addr, short lsb)
1711{
1712 struct kernel_siginfo info;
1713
1714 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1715 clear_siginfo(&info);
1716 info.si_signo = SIGBUS;
1717 info.si_errno = 0;
1718 info.si_code = code;
1719 info.si_addr = addr;
1720 info.si_addr_lsb = lsb;
1721 return force_sig_info(&info);
1722}
1723
1724int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1725{
1726 struct kernel_siginfo info;
1727
1728 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1729 clear_siginfo(&info);
1730 info.si_signo = SIGBUS;
1731 info.si_errno = 0;
1732 info.si_code = code;
1733 info.si_addr = addr;
1734 info.si_addr_lsb = lsb;
1735 return send_sig_info(info.si_signo, &info, t);
1736}
1737EXPORT_SYMBOL(send_sig_mceerr);
1738
1739int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper)
1740{
1741 struct kernel_siginfo info;
1742
1743 clear_siginfo(&info);
1744 info.si_signo = SIGSEGV;
1745 info.si_errno = 0;
1746 info.si_code = SEGV_BNDERR;
1747 info.si_addr = addr;
1748 info.si_lower = lower;
1749 info.si_upper = upper;
1750 return force_sig_info(&info);
1751}
1752
1753#ifdef SEGV_PKUERR
1754int force_sig_pkuerr(void __user *addr, u32 pkey)
1755{
1756 struct kernel_siginfo info;
1757
1758 clear_siginfo(&info);
1759 info.si_signo = SIGSEGV;
1760 info.si_errno = 0;
1761 info.si_code = SEGV_PKUERR;
1762 info.si_addr = addr;
1763 info.si_pkey = pkey;
1764 return force_sig_info(&info);
1765}
1766#endif
1767
1768int force_sig_perf(void __user *addr, u32 type, u64 sig_data)
1769{
1770 struct kernel_siginfo info;
1771
1772 clear_siginfo(&info);
1773 info.si_signo = SIGTRAP;
1774 info.si_errno = 0;
1775 info.si_code = TRAP_PERF;
1776 info.si_addr = addr;
1777 info.si_perf_data = sig_data;
1778 info.si_perf_type = type;
1779
1780 return force_sig_info(&info);
1781}
1782
1783/* For the crazy architectures that include trap information in
1784 * the errno field, instead of an actual errno value.
1785 */
1786int force_sig_ptrace_errno_trap(int errno, void __user *addr)
1787{
1788 struct kernel_siginfo info;
1789
1790 clear_siginfo(&info);
1791 info.si_signo = SIGTRAP;
1792 info.si_errno = errno;
1793 info.si_code = TRAP_HWBKPT;
1794 info.si_addr = addr;
1795 return force_sig_info(&info);
1796}
1797
1798int kill_pgrp(struct pid *pid, int sig, int priv)
1799{
1800 int ret;
1801
1802 read_lock(&tasklist_lock);
1803 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1804 read_unlock(&tasklist_lock);
1805
1806 return ret;
1807}
1808EXPORT_SYMBOL(kill_pgrp);
1809
1810int kill_pid(struct pid *pid, int sig, int priv)
1811{
1812 return kill_pid_info(sig, __si_special(priv), pid);
1813}
1814EXPORT_SYMBOL(kill_pid);
1815
1816/*
1817 * These functions support sending signals using preallocated sigqueue
1818 * structures. This is needed "because realtime applications cannot
1819 * afford to lose notifications of asynchronous events, like timer
1820 * expirations or I/O completions". In the case of POSIX Timers
1821 * we allocate the sigqueue structure from the timer_create. If this
1822 * allocation fails we are able to report the failure to the application
1823 * with an EAGAIN error.
1824 */
1825struct sigqueue *sigqueue_alloc(void)
1826{
1827 return __sigqueue_alloc(-1, current, GFP_KERNEL, 0, SIGQUEUE_PREALLOC);
1828}
1829
1830void sigqueue_free(struct sigqueue *q)
1831{
1832 unsigned long flags;
1833 spinlock_t *lock = ¤t->sighand->siglock;
1834
1835 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1836 /*
1837 * We must hold ->siglock while testing q->list
1838 * to serialize with collect_signal() or with
1839 * __exit_signal()->flush_sigqueue().
1840 */
1841 spin_lock_irqsave(lock, flags);
1842 q->flags &= ~SIGQUEUE_PREALLOC;
1843 /*
1844 * If it is queued it will be freed when dequeued,
1845 * like the "regular" sigqueue.
1846 */
1847 if (!list_empty(&q->list))
1848 q = NULL;
1849 spin_unlock_irqrestore(lock, flags);
1850
1851 if (q)
1852 __sigqueue_free(q);
1853}
1854
1855int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type)
1856{
1857 int sig = q->info.si_signo;
1858 struct sigpending *pending;
1859 struct task_struct *t;
1860 unsigned long flags;
1861 int ret, result;
1862
1863 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1864
1865 ret = -1;
1866 rcu_read_lock();
1867 t = pid_task(pid, type);
1868 if (!t || !likely(lock_task_sighand(t, &flags)))
1869 goto ret;
1870
1871 ret = 1; /* the signal is ignored */
1872 result = TRACE_SIGNAL_IGNORED;
1873 if (!prepare_signal(sig, t, false))
1874 goto out;
1875
1876 ret = 0;
1877 if (unlikely(!list_empty(&q->list))) {
1878 /*
1879 * If an SI_TIMER entry is already queue just increment
1880 * the overrun count.
1881 */
1882 BUG_ON(q->info.si_code != SI_TIMER);
1883 q->info.si_overrun++;
1884 result = TRACE_SIGNAL_ALREADY_PENDING;
1885 goto out;
1886 }
1887 q->info.si_overrun = 0;
1888
1889 signalfd_notify(t, sig);
1890 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1891 list_add_tail(&q->list, &pending->list);
1892 sigaddset(&pending->signal, sig);
1893 complete_signal(sig, t, type);
1894 result = TRACE_SIGNAL_DELIVERED;
1895out:
1896 trace_signal_generate(sig, &q->info, t, type != PIDTYPE_PID, result);
1897 unlock_task_sighand(t, &flags);
1898ret:
1899 rcu_read_unlock();
1900 return ret;
1901}
1902
1903static void do_notify_pidfd(struct task_struct *task)
1904{
1905 struct pid *pid;
1906
1907 WARN_ON(task->exit_state == 0);
1908 pid = task_pid(task);
1909 wake_up_all(&pid->wait_pidfd);
1910}
1911
1912/*
1913 * Let a parent know about the death of a child.
1914 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1915 *
1916 * Returns true if our parent ignored us and so we've switched to
1917 * self-reaping.
1918 */
1919bool do_notify_parent(struct task_struct *tsk, int sig)
1920{
1921 struct kernel_siginfo info;
1922 unsigned long flags;
1923 struct sighand_struct *psig;
1924 bool autoreap = false;
1925 u64 utime, stime;
1926
1927 BUG_ON(sig == -1);
1928
1929 /* do_notify_parent_cldstop should have been called instead. */
1930 BUG_ON(task_is_stopped_or_traced(tsk));
1931
1932 BUG_ON(!tsk->ptrace &&
1933 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1934
1935 /* Wake up all pidfd waiters */
1936 do_notify_pidfd(tsk);
1937
1938 if (sig != SIGCHLD) {
1939 /*
1940 * This is only possible if parent == real_parent.
1941 * Check if it has changed security domain.
1942 */
1943 if (tsk->parent_exec_id != READ_ONCE(tsk->parent->self_exec_id))
1944 sig = SIGCHLD;
1945 }
1946
1947 clear_siginfo(&info);
1948 info.si_signo = sig;
1949 info.si_errno = 0;
1950 /*
1951 * We are under tasklist_lock here so our parent is tied to
1952 * us and cannot change.
1953 *
1954 * task_active_pid_ns will always return the same pid namespace
1955 * until a task passes through release_task.
1956 *
1957 * write_lock() currently calls preempt_disable() which is the
1958 * same as rcu_read_lock(), but according to Oleg, this is not
1959 * correct to rely on this
1960 */
1961 rcu_read_lock();
1962 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
1963 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
1964 task_uid(tsk));
1965 rcu_read_unlock();
1966
1967 task_cputime(tsk, &utime, &stime);
1968 info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
1969 info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
1970
1971 info.si_status = tsk->exit_code & 0x7f;
1972 if (tsk->exit_code & 0x80)
1973 info.si_code = CLD_DUMPED;
1974 else if (tsk->exit_code & 0x7f)
1975 info.si_code = CLD_KILLED;
1976 else {
1977 info.si_code = CLD_EXITED;
1978 info.si_status = tsk->exit_code >> 8;
1979 }
1980
1981 psig = tsk->parent->sighand;
1982 spin_lock_irqsave(&psig->siglock, flags);
1983 if (!tsk->ptrace && sig == SIGCHLD &&
1984 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1985 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1986 /*
1987 * We are exiting and our parent doesn't care. POSIX.1
1988 * defines special semantics for setting SIGCHLD to SIG_IGN
1989 * or setting the SA_NOCLDWAIT flag: we should be reaped
1990 * automatically and not left for our parent's wait4 call.
1991 * Rather than having the parent do it as a magic kind of
1992 * signal handler, we just set this to tell do_exit that we
1993 * can be cleaned up without becoming a zombie. Note that
1994 * we still call __wake_up_parent in this case, because a
1995 * blocked sys_wait4 might now return -ECHILD.
1996 *
1997 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1998 * is implementation-defined: we do (if you don't want
1999 * it, just use SIG_IGN instead).
2000 */
2001 autoreap = true;
2002 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
2003 sig = 0;
2004 }
2005 /*
2006 * Send with __send_signal as si_pid and si_uid are in the
2007 * parent's namespaces.
2008 */
2009 if (valid_signal(sig) && sig)
2010 __send_signal(sig, &info, tsk->parent, PIDTYPE_TGID, false);
2011 __wake_up_parent(tsk, tsk->parent);
2012 spin_unlock_irqrestore(&psig->siglock, flags);
2013
2014 return autoreap;
2015}
2016
2017/**
2018 * do_notify_parent_cldstop - notify parent of stopped/continued state change
2019 * @tsk: task reporting the state change
2020 * @for_ptracer: the notification is for ptracer
2021 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
2022 *
2023 * Notify @tsk's parent that the stopped/continued state has changed. If
2024 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
2025 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
2026 *
2027 * CONTEXT:
2028 * Must be called with tasklist_lock at least read locked.
2029 */
2030static void do_notify_parent_cldstop(struct task_struct *tsk,
2031 bool for_ptracer, int why)
2032{
2033 struct kernel_siginfo info;
2034 unsigned long flags;
2035 struct task_struct *parent;
2036 struct sighand_struct *sighand;
2037 u64 utime, stime;
2038
2039 if (for_ptracer) {
2040 parent = tsk->parent;
2041 } else {
2042 tsk = tsk->group_leader;
2043 parent = tsk->real_parent;
2044 }
2045
2046 clear_siginfo(&info);
2047 info.si_signo = SIGCHLD;
2048 info.si_errno = 0;
2049 /*
2050 * see comment in do_notify_parent() about the following 4 lines
2051 */
2052 rcu_read_lock();
2053 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
2054 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
2055 rcu_read_unlock();
2056
2057 task_cputime(tsk, &utime, &stime);
2058 info.si_utime = nsec_to_clock_t(utime);
2059 info.si_stime = nsec_to_clock_t(stime);
2060
2061 info.si_code = why;
2062 switch (why) {
2063 case CLD_CONTINUED:
2064 info.si_status = SIGCONT;
2065 break;
2066 case CLD_STOPPED:
2067 info.si_status = tsk->signal->group_exit_code & 0x7f;
2068 break;
2069 case CLD_TRAPPED:
2070 info.si_status = tsk->exit_code & 0x7f;
2071 break;
2072 default:
2073 BUG();
2074 }
2075
2076 sighand = parent->sighand;
2077 spin_lock_irqsave(&sighand->siglock, flags);
2078 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
2079 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
2080 __group_send_sig_info(SIGCHLD, &info, parent);
2081 /*
2082 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
2083 */
2084 __wake_up_parent(tsk, parent);
2085 spin_unlock_irqrestore(&sighand->siglock, flags);
2086}
2087
2088static inline bool may_ptrace_stop(void)
2089{
2090 if (!likely(current->ptrace))
2091 return false;
2092 /*
2093 * Are we in the middle of do_coredump?
2094 * If so and our tracer is also part of the coredump stopping
2095 * is a deadlock situation, and pointless because our tracer
2096 * is dead so don't allow us to stop.
2097 * If SIGKILL was already sent before the caller unlocked
2098 * ->siglock we must see ->core_state != NULL. Otherwise it
2099 * is safe to enter schedule().
2100 *
2101 * This is almost outdated, a task with the pending SIGKILL can't
2102 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
2103 * after SIGKILL was already dequeued.
2104 */
2105 if (unlikely(current->mm->core_state) &&
2106 unlikely(current->mm == current->parent->mm))
2107 return false;
2108
2109 return true;
2110}
2111
2112/*
2113 * Return non-zero if there is a SIGKILL that should be waking us up.
2114 * Called with the siglock held.
2115 */
2116static bool sigkill_pending(struct task_struct *tsk)
2117{
2118 return sigismember(&tsk->pending.signal, SIGKILL) ||
2119 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
2120}
2121
2122/*
2123 * This must be called with current->sighand->siglock held.
2124 *
2125 * This should be the path for all ptrace stops.
2126 * We always set current->last_siginfo while stopped here.
2127 * That makes it a way to test a stopped process for
2128 * being ptrace-stopped vs being job-control-stopped.
2129 *
2130 * If we actually decide not to stop at all because the tracer
2131 * is gone, we keep current->exit_code unless clear_code.
2132 */
2133static void ptrace_stop(int exit_code, int why, int clear_code, kernel_siginfo_t *info)
2134 __releases(¤t->sighand->siglock)
2135 __acquires(¤t->sighand->siglock)
2136{
2137 bool gstop_done = false;
2138
2139 if (arch_ptrace_stop_needed(exit_code, info)) {
2140 /*
2141 * The arch code has something special to do before a
2142 * ptrace stop. This is allowed to block, e.g. for faults
2143 * on user stack pages. We can't keep the siglock while
2144 * calling arch_ptrace_stop, so we must release it now.
2145 * To preserve proper semantics, we must do this before
2146 * any signal bookkeeping like checking group_stop_count.
2147 * Meanwhile, a SIGKILL could come in before we retake the
2148 * siglock. That must prevent us from sleeping in TASK_TRACED.
2149 * So after regaining the lock, we must check for SIGKILL.
2150 */
2151 spin_unlock_irq(¤t->sighand->siglock);
2152 arch_ptrace_stop(exit_code, info);
2153 spin_lock_irq(¤t->sighand->siglock);
2154 if (sigkill_pending(current))
2155 return;
2156 }
2157
2158 set_special_state(TASK_TRACED);
2159
2160 /*
2161 * We're committing to trapping. TRACED should be visible before
2162 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
2163 * Also, transition to TRACED and updates to ->jobctl should be
2164 * atomic with respect to siglock and should be done after the arch
2165 * hook as siglock is released and regrabbed across it.
2166 *
2167 * TRACER TRACEE
2168 *
2169 * ptrace_attach()
2170 * [L] wait_on_bit(JOBCTL_TRAPPING) [S] set_special_state(TRACED)
2171 * do_wait()
2172 * set_current_state() smp_wmb();
2173 * ptrace_do_wait()
2174 * wait_task_stopped()
2175 * task_stopped_code()
2176 * [L] task_is_traced() [S] task_clear_jobctl_trapping();
2177 */
2178 smp_wmb();
2179
2180 current->last_siginfo = info;
2181 current->exit_code = exit_code;
2182
2183 /*
2184 * If @why is CLD_STOPPED, we're trapping to participate in a group
2185 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
2186 * across siglock relocks since INTERRUPT was scheduled, PENDING
2187 * could be clear now. We act as if SIGCONT is received after
2188 * TASK_TRACED is entered - ignore it.
2189 */
2190 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
2191 gstop_done = task_participate_group_stop(current);
2192
2193 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
2194 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
2195 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
2196 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
2197
2198 /* entering a trap, clear TRAPPING */
2199 task_clear_jobctl_trapping(current);
2200
2201 spin_unlock_irq(¤t->sighand->siglock);
2202 read_lock(&tasklist_lock);
2203 if (may_ptrace_stop()) {
2204 /*
2205 * Notify parents of the stop.
2206 *
2207 * While ptraced, there are two parents - the ptracer and
2208 * the real_parent of the group_leader. The ptracer should
2209 * know about every stop while the real parent is only
2210 * interested in the completion of group stop. The states
2211 * for the two don't interact with each other. Notify
2212 * separately unless they're gonna be duplicates.
2213 */
2214 do_notify_parent_cldstop(current, true, why);
2215 if (gstop_done && ptrace_reparented(current))
2216 do_notify_parent_cldstop(current, false, why);
2217
2218 /*
2219 * Don't want to allow preemption here, because
2220 * sys_ptrace() needs this task to be inactive.
2221 *
2222 * XXX: implement read_unlock_no_resched().
2223 */
2224 preempt_disable();
2225 read_unlock(&tasklist_lock);
2226 cgroup_enter_frozen();
2227 preempt_enable_no_resched();
2228 freezable_schedule();
2229 cgroup_leave_frozen(true);
2230 } else {
2231 /*
2232 * By the time we got the lock, our tracer went away.
2233 * Don't drop the lock yet, another tracer may come.
2234 *
2235 * If @gstop_done, the ptracer went away between group stop
2236 * completion and here. During detach, it would have set
2237 * JOBCTL_STOP_PENDING on us and we'll re-enter
2238 * TASK_STOPPED in do_signal_stop() on return, so notifying
2239 * the real parent of the group stop completion is enough.
2240 */
2241 if (gstop_done)
2242 do_notify_parent_cldstop(current, false, why);
2243
2244 /* tasklist protects us from ptrace_freeze_traced() */
2245 __set_current_state(TASK_RUNNING);
2246 if (clear_code)
2247 current->exit_code = 0;
2248 read_unlock(&tasklist_lock);
2249 }
2250
2251 /*
2252 * We are back. Now reacquire the siglock before touching
2253 * last_siginfo, so that we are sure to have synchronized with
2254 * any signal-sending on another CPU that wants to examine it.
2255 */
2256 spin_lock_irq(¤t->sighand->siglock);
2257 current->last_siginfo = NULL;
2258
2259 /* LISTENING can be set only during STOP traps, clear it */
2260 current->jobctl &= ~JOBCTL_LISTENING;
2261
2262 /*
2263 * Queued signals ignored us while we were stopped for tracing.
2264 * So check for any that we should take before resuming user mode.
2265 * This sets TIF_SIGPENDING, but never clears it.
2266 */
2267 recalc_sigpending_tsk(current);
2268}
2269
2270static void ptrace_do_notify(int signr, int exit_code, int why)
2271{
2272 kernel_siginfo_t info;
2273
2274 clear_siginfo(&info);
2275 info.si_signo = signr;
2276 info.si_code = exit_code;
2277 info.si_pid = task_pid_vnr(current);
2278 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2279
2280 /* Let the debugger run. */
2281 ptrace_stop(exit_code, why, 1, &info);
2282}
2283
2284void ptrace_notify(int exit_code)
2285{
2286 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
2287 if (unlikely(current->task_works))
2288 task_work_run();
2289
2290 spin_lock_irq(¤t->sighand->siglock);
2291 ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
2292 spin_unlock_irq(¤t->sighand->siglock);
2293}
2294
2295/**
2296 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
2297 * @signr: signr causing group stop if initiating
2298 *
2299 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
2300 * and participate in it. If already set, participate in the existing
2301 * group stop. If participated in a group stop (and thus slept), %true is
2302 * returned with siglock released.
2303 *
2304 * If ptraced, this function doesn't handle stop itself. Instead,
2305 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2306 * untouched. The caller must ensure that INTERRUPT trap handling takes
2307 * places afterwards.
2308 *
2309 * CONTEXT:
2310 * Must be called with @current->sighand->siglock held, which is released
2311 * on %true return.
2312 *
2313 * RETURNS:
2314 * %false if group stop is already cancelled or ptrace trap is scheduled.
2315 * %true if participated in group stop.
2316 */
2317static bool do_signal_stop(int signr)
2318 __releases(¤t->sighand->siglock)
2319{
2320 struct signal_struct *sig = current->signal;
2321
2322 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2323 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2324 struct task_struct *t;
2325
2326 /* signr will be recorded in task->jobctl for retries */
2327 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2328
2329 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2330 unlikely(signal_group_exit(sig)))
2331 return false;
2332 /*
2333 * There is no group stop already in progress. We must
2334 * initiate one now.
2335 *
2336 * While ptraced, a task may be resumed while group stop is
2337 * still in effect and then receive a stop signal and
2338 * initiate another group stop. This deviates from the
2339 * usual behavior as two consecutive stop signals can't
2340 * cause two group stops when !ptraced. That is why we
2341 * also check !task_is_stopped(t) below.
2342 *
2343 * The condition can be distinguished by testing whether
2344 * SIGNAL_STOP_STOPPED is already set. Don't generate
2345 * group_exit_code in such case.
2346 *
2347 * This is not necessary for SIGNAL_STOP_CONTINUED because
2348 * an intervening stop signal is required to cause two
2349 * continued events regardless of ptrace.
2350 */
2351 if (!(sig->flags & SIGNAL_STOP_STOPPED))
2352 sig->group_exit_code = signr;
2353
2354 sig->group_stop_count = 0;
2355
2356 if (task_set_jobctl_pending(current, signr | gstop))
2357 sig->group_stop_count++;
2358
2359 t = current;
2360 while_each_thread(current, t) {
2361 /*
2362 * Setting state to TASK_STOPPED for a group
2363 * stop is always done with the siglock held,
2364 * so this check has no races.
2365 */
2366 if (!task_is_stopped(t) &&
2367 task_set_jobctl_pending(t, signr | gstop)) {
2368 sig->group_stop_count++;
2369 if (likely(!(t->ptrace & PT_SEIZED)))
2370 signal_wake_up(t, 0);
2371 else
2372 ptrace_trap_notify(t);
2373 }
2374 }
2375 }
2376
2377 if (likely(!current->ptrace)) {
2378 int notify = 0;
2379
2380 /*
2381 * If there are no other threads in the group, or if there
2382 * is a group stop in progress and we are the last to stop,
2383 * report to the parent.
2384 */
2385 if (task_participate_group_stop(current))
2386 notify = CLD_STOPPED;
2387
2388 set_special_state(TASK_STOPPED);
2389 spin_unlock_irq(¤t->sighand->siglock);
2390
2391 /*
2392 * Notify the parent of the group stop completion. Because
2393 * we're not holding either the siglock or tasklist_lock
2394 * here, ptracer may attach inbetween; however, this is for
2395 * group stop and should always be delivered to the real
2396 * parent of the group leader. The new ptracer will get
2397 * its notification when this task transitions into
2398 * TASK_TRACED.
2399 */
2400 if (notify) {
2401 read_lock(&tasklist_lock);
2402 do_notify_parent_cldstop(current, false, notify);
2403 read_unlock(&tasklist_lock);
2404 }
2405
2406 /* Now we don't run again until woken by SIGCONT or SIGKILL */
2407 cgroup_enter_frozen();
2408 freezable_schedule();
2409 return true;
2410 } else {
2411 /*
2412 * While ptraced, group stop is handled by STOP trap.
2413 * Schedule it and let the caller deal with it.
2414 */
2415 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2416 return false;
2417 }
2418}
2419
2420/**
2421 * do_jobctl_trap - take care of ptrace jobctl traps
2422 *
2423 * When PT_SEIZED, it's used for both group stop and explicit
2424 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
2425 * accompanying siginfo. If stopped, lower eight bits of exit_code contain
2426 * the stop signal; otherwise, %SIGTRAP.
2427 *
2428 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2429 * number as exit_code and no siginfo.
2430 *
2431 * CONTEXT:
2432 * Must be called with @current->sighand->siglock held, which may be
2433 * released and re-acquired before returning with intervening sleep.
2434 */
2435static void do_jobctl_trap(void)
2436{
2437 struct signal_struct *signal = current->signal;
2438 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2439
2440 if (current->ptrace & PT_SEIZED) {
2441 if (!signal->group_stop_count &&
2442 !(signal->flags & SIGNAL_STOP_STOPPED))
2443 signr = SIGTRAP;
2444 WARN_ON_ONCE(!signr);
2445 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2446 CLD_STOPPED);
2447 } else {
2448 WARN_ON_ONCE(!signr);
2449 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2450 current->exit_code = 0;
2451 }
2452}
2453
2454/**
2455 * do_freezer_trap - handle the freezer jobctl trap
2456 *
2457 * Puts the task into frozen state, if only the task is not about to quit.
2458 * In this case it drops JOBCTL_TRAP_FREEZE.
2459 *
2460 * CONTEXT:
2461 * Must be called with @current->sighand->siglock held,
2462 * which is always released before returning.
2463 */
2464static void do_freezer_trap(void)
2465 __releases(¤t->sighand->siglock)
2466{
2467 /*
2468 * If there are other trap bits pending except JOBCTL_TRAP_FREEZE,
2469 * let's make another loop to give it a chance to be handled.
2470 * In any case, we'll return back.
2471 */
2472 if ((current->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) !=
2473 JOBCTL_TRAP_FREEZE) {
2474 spin_unlock_irq(¤t->sighand->siglock);
2475 return;
2476 }
2477
2478 /*
2479 * Now we're sure that there is no pending fatal signal and no
2480 * pending traps. Clear TIF_SIGPENDING to not get out of schedule()
2481 * immediately (if there is a non-fatal signal pending), and
2482 * put the task into sleep.
2483 */
2484 __set_current_state(TASK_INTERRUPTIBLE);
2485 clear_thread_flag(TIF_SIGPENDING);
2486 spin_unlock_irq(¤t->sighand->siglock);
2487 cgroup_enter_frozen();
2488 freezable_schedule();
2489}
2490
2491static int ptrace_signal(int signr, kernel_siginfo_t *info)
2492{
2493 /*
2494 * We do not check sig_kernel_stop(signr) but set this marker
2495 * unconditionally because we do not know whether debugger will
2496 * change signr. This flag has no meaning unless we are going
2497 * to stop after return from ptrace_stop(). In this case it will
2498 * be checked in do_signal_stop(), we should only stop if it was
2499 * not cleared by SIGCONT while we were sleeping. See also the
2500 * comment in dequeue_signal().
2501 */
2502 current->jobctl |= JOBCTL_STOP_DEQUEUED;
2503 ptrace_stop(signr, CLD_TRAPPED, 0, info);
2504
2505 /* We're back. Did the debugger cancel the sig? */
2506 signr = current->exit_code;
2507 if (signr == 0)
2508 return signr;
2509
2510 current->exit_code = 0;
2511
2512 /*
2513 * Update the siginfo structure if the signal has
2514 * changed. If the debugger wanted something
2515 * specific in the siginfo structure then it should
2516 * have updated *info via PTRACE_SETSIGINFO.
2517 */
2518 if (signr != info->si_signo) {
2519 clear_siginfo(info);
2520 info->si_signo = signr;
2521 info->si_errno = 0;
2522 info->si_code = SI_USER;
2523 rcu_read_lock();
2524 info->si_pid = task_pid_vnr(current->parent);
2525 info->si_uid = from_kuid_munged(current_user_ns(),
2526 task_uid(current->parent));
2527 rcu_read_unlock();
2528 }
2529
2530 /* If the (new) signal is now blocked, requeue it. */
2531 if (sigismember(¤t->blocked, signr)) {
2532 send_signal(signr, info, current, PIDTYPE_PID);
2533 signr = 0;
2534 }
2535
2536 return signr;
2537}
2538
2539static void hide_si_addr_tag_bits(struct ksignal *ksig)
2540{
2541 switch (siginfo_layout(ksig->sig, ksig->info.si_code)) {
2542 case SIL_FAULT:
2543 case SIL_FAULT_TRAPNO:
2544 case SIL_FAULT_MCEERR:
2545 case SIL_FAULT_BNDERR:
2546 case SIL_FAULT_PKUERR:
2547 case SIL_PERF_EVENT:
2548 ksig->info.si_addr = arch_untagged_si_addr(
2549 ksig->info.si_addr, ksig->sig, ksig->info.si_code);
2550 break;
2551 case SIL_KILL:
2552 case SIL_TIMER:
2553 case SIL_POLL:
2554 case SIL_CHLD:
2555 case SIL_RT:
2556 case SIL_SYS:
2557 break;
2558 }
2559}
2560
2561bool get_signal(struct ksignal *ksig)
2562{
2563 struct sighand_struct *sighand = current->sighand;
2564 struct signal_struct *signal = current->signal;
2565 int signr;
2566
2567 if (unlikely(current->task_works))
2568 task_work_run();
2569
2570 /*
2571 * For non-generic architectures, check for TIF_NOTIFY_SIGNAL so
2572 * that the arch handlers don't all have to do it. If we get here
2573 * without TIF_SIGPENDING, just exit after running signal work.
2574 */
2575 if (!IS_ENABLED(CONFIG_GENERIC_ENTRY)) {
2576 if (test_thread_flag(TIF_NOTIFY_SIGNAL))
2577 tracehook_notify_signal();
2578 if (!task_sigpending(current))
2579 return false;
2580 }
2581
2582 if (unlikely(uprobe_deny_signal()))
2583 return false;
2584
2585 /*
2586 * Do this once, we can't return to user-mode if freezing() == T.
2587 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2588 * thus do not need another check after return.
2589 */
2590 try_to_freeze();
2591
2592relock:
2593 spin_lock_irq(&sighand->siglock);
2594
2595 /*
2596 * Every stopped thread goes here after wakeup. Check to see if
2597 * we should notify the parent, prepare_signal(SIGCONT) encodes
2598 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2599 */
2600 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2601 int why;
2602
2603 if (signal->flags & SIGNAL_CLD_CONTINUED)
2604 why = CLD_CONTINUED;
2605 else
2606 why = CLD_STOPPED;
2607
2608 signal->flags &= ~SIGNAL_CLD_MASK;
2609
2610 spin_unlock_irq(&sighand->siglock);
2611
2612 /*
2613 * Notify the parent that we're continuing. This event is
2614 * always per-process and doesn't make whole lot of sense
2615 * for ptracers, who shouldn't consume the state via
2616 * wait(2) either, but, for backward compatibility, notify
2617 * the ptracer of the group leader too unless it's gonna be
2618 * a duplicate.
2619 */
2620 read_lock(&tasklist_lock);
2621 do_notify_parent_cldstop(current, false, why);
2622
2623 if (ptrace_reparented(current->group_leader))
2624 do_notify_parent_cldstop(current->group_leader,
2625 true, why);
2626 read_unlock(&tasklist_lock);
2627
2628 goto relock;
2629 }
2630
2631 /* Has this task already been marked for death? */
2632 if (signal_group_exit(signal)) {
2633 ksig->info.si_signo = signr = SIGKILL;
2634 sigdelset(¤t->pending.signal, SIGKILL);
2635 trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
2636 &sighand->action[SIGKILL - 1]);
2637 recalc_sigpending();
2638 goto fatal;
2639 }
2640
2641 for (;;) {
2642 struct k_sigaction *ka;
2643
2644 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2645 do_signal_stop(0))
2646 goto relock;
2647
2648 if (unlikely(current->jobctl &
2649 (JOBCTL_TRAP_MASK | JOBCTL_TRAP_FREEZE))) {
2650 if (current->jobctl & JOBCTL_TRAP_MASK) {
2651 do_jobctl_trap();
2652 spin_unlock_irq(&sighand->siglock);
2653 } else if (current->jobctl & JOBCTL_TRAP_FREEZE)
2654 do_freezer_trap();
2655
2656 goto relock;
2657 }
2658
2659 /*
2660 * If the task is leaving the frozen state, let's update
2661 * cgroup counters and reset the frozen bit.
2662 */
2663 if (unlikely(cgroup_task_frozen(current))) {
2664 spin_unlock_irq(&sighand->siglock);
2665 cgroup_leave_frozen(false);
2666 goto relock;
2667 }
2668
2669 /*
2670 * Signals generated by the execution of an instruction
2671 * need to be delivered before any other pending signals
2672 * so that the instruction pointer in the signal stack
2673 * frame points to the faulting instruction.
2674 */
2675 signr = dequeue_synchronous_signal(&ksig->info);
2676 if (!signr)
2677 signr = dequeue_signal(current, ¤t->blocked, &ksig->info);
2678
2679 if (!signr)
2680 break; /* will return 0 */
2681
2682 if (unlikely(current->ptrace) && signr != SIGKILL) {
2683 signr = ptrace_signal(signr, &ksig->info);
2684 if (!signr)
2685 continue;
2686 }
2687
2688 ka = &sighand->action[signr-1];
2689
2690 /* Trace actually delivered signals. */
2691 trace_signal_deliver(signr, &ksig->info, ka);
2692
2693 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
2694 continue;
2695 if (ka->sa.sa_handler != SIG_DFL) {
2696 /* Run the handler. */
2697 ksig->ka = *ka;
2698
2699 if (ka->sa.sa_flags & SA_ONESHOT)
2700 ka->sa.sa_handler = SIG_DFL;
2701
2702 break; /* will return non-zero "signr" value */
2703 }
2704
2705 /*
2706 * Now we are doing the default action for this signal.
2707 */
2708 if (sig_kernel_ignore(signr)) /* Default is nothing. */
2709 continue;
2710
2711 /*
2712 * Global init gets no signals it doesn't want.
2713 * Container-init gets no signals it doesn't want from same
2714 * container.
2715 *
2716 * Note that if global/container-init sees a sig_kernel_only()
2717 * signal here, the signal must have been generated internally
2718 * or must have come from an ancestor namespace. In either
2719 * case, the signal cannot be dropped.
2720 */
2721 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2722 !sig_kernel_only(signr))
2723 continue;
2724
2725 if (sig_kernel_stop(signr)) {
2726 /*
2727 * The default action is to stop all threads in
2728 * the thread group. The job control signals
2729 * do nothing in an orphaned pgrp, but SIGSTOP
2730 * always works. Note that siglock needs to be
2731 * dropped during the call to is_orphaned_pgrp()
2732 * because of lock ordering with tasklist_lock.
2733 * This allows an intervening SIGCONT to be posted.
2734 * We need to check for that and bail out if necessary.
2735 */
2736 if (signr != SIGSTOP) {
2737 spin_unlock_irq(&sighand->siglock);
2738
2739 /* signals can be posted during this window */
2740
2741 if (is_current_pgrp_orphaned())
2742 goto relock;
2743
2744 spin_lock_irq(&sighand->siglock);
2745 }
2746
2747 if (likely(do_signal_stop(ksig->info.si_signo))) {
2748 /* It released the siglock. */
2749 goto relock;
2750 }
2751
2752 /*
2753 * We didn't actually stop, due to a race
2754 * with SIGCONT or something like that.
2755 */
2756 continue;
2757 }
2758
2759 fatal:
2760 spin_unlock_irq(&sighand->siglock);
2761 if (unlikely(cgroup_task_frozen(current)))
2762 cgroup_leave_frozen(true);
2763
2764 /*
2765 * Anything else is fatal, maybe with a core dump.
2766 */
2767 current->flags |= PF_SIGNALED;
2768
2769 if (sig_kernel_coredump(signr)) {
2770 if (print_fatal_signals)
2771 print_fatal_signal(ksig->info.si_signo);
2772 proc_coredump_connector(current);
2773 /*
2774 * If it was able to dump core, this kills all
2775 * other threads in the group and synchronizes with
2776 * their demise. If we lost the race with another
2777 * thread getting here, it set group_exit_code
2778 * first and our do_group_exit call below will use
2779 * that value and ignore the one we pass it.
2780 */
2781 do_coredump(&ksig->info);
2782 }
2783
2784 /*
2785 * PF_IO_WORKER threads will catch and exit on fatal signals
2786 * themselves. They have cleanup that must be performed, so
2787 * we cannot call do_exit() on their behalf.
2788 */
2789 if (current->flags & PF_IO_WORKER)
2790 goto out;
2791
2792 /*
2793 * Death signals, no core dump.
2794 */
2795 do_group_exit(ksig->info.si_signo);
2796 /* NOTREACHED */
2797 }
2798 spin_unlock_irq(&sighand->siglock);
2799out:
2800 ksig->sig = signr;
2801
2802 if (!(ksig->ka.sa.sa_flags & SA_EXPOSE_TAGBITS))
2803 hide_si_addr_tag_bits(ksig);
2804
2805 return ksig->sig > 0;
2806}
2807
2808/**
2809 * signal_delivered -
2810 * @ksig: kernel signal struct
2811 * @stepping: nonzero if debugger single-step or block-step in use
2812 *
2813 * This function should be called when a signal has successfully been
2814 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
2815 * is always blocked, and the signal itself is blocked unless %SA_NODEFER
2816 * is set in @ksig->ka.sa.sa_flags. Tracing is notified.
2817 */
2818static void signal_delivered(struct ksignal *ksig, int stepping)
2819{
2820 sigset_t blocked;
2821
2822 /* A signal was successfully delivered, and the
2823 saved sigmask was stored on the signal frame,
2824 and will be restored by sigreturn. So we can
2825 simply clear the restore sigmask flag. */
2826 clear_restore_sigmask();
2827
2828 sigorsets(&blocked, ¤t->blocked, &ksig->ka.sa.sa_mask);
2829 if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2830 sigaddset(&blocked, ksig->sig);
2831 set_current_blocked(&blocked);
2832 if (current->sas_ss_flags & SS_AUTODISARM)
2833 sas_ss_reset(current);
2834 tracehook_signal_handler(stepping);
2835}
2836
2837void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2838{
2839 if (failed)
2840 force_sigsegv(ksig->sig);
2841 else
2842 signal_delivered(ksig, stepping);
2843}
2844
2845/*
2846 * It could be that complete_signal() picked us to notify about the
2847 * group-wide signal. Other threads should be notified now to take
2848 * the shared signals in @which since we will not.
2849 */
2850static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2851{
2852 sigset_t retarget;
2853 struct task_struct *t;
2854
2855 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2856 if (sigisemptyset(&retarget))
2857 return;
2858
2859 t = tsk;
2860 while_each_thread(tsk, t) {
2861 if (t->flags & PF_EXITING)
2862 continue;
2863
2864 if (!has_pending_signals(&retarget, &t->blocked))
2865 continue;
2866 /* Remove the signals this thread can handle. */
2867 sigandsets(&retarget, &retarget, &t->blocked);
2868
2869 if (!task_sigpending(t))
2870 signal_wake_up(t, 0);
2871
2872 if (sigisemptyset(&retarget))
2873 break;
2874 }
2875}
2876
2877void exit_signals(struct task_struct *tsk)
2878{
2879 int group_stop = 0;
2880 sigset_t unblocked;
2881
2882 /*
2883 * @tsk is about to have PF_EXITING set - lock out users which
2884 * expect stable threadgroup.
2885 */
2886 cgroup_threadgroup_change_begin(tsk);
2887
2888 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2889 tsk->flags |= PF_EXITING;
2890 cgroup_threadgroup_change_end(tsk);
2891 return;
2892 }
2893
2894 spin_lock_irq(&tsk->sighand->siglock);
2895 /*
2896 * From now this task is not visible for group-wide signals,
2897 * see wants_signal(), do_signal_stop().
2898 */
2899 tsk->flags |= PF_EXITING;
2900
2901 cgroup_threadgroup_change_end(tsk);
2902
2903 if (!task_sigpending(tsk))
2904 goto out;
2905
2906 unblocked = tsk->blocked;
2907 signotset(&unblocked);
2908 retarget_shared_pending(tsk, &unblocked);
2909
2910 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2911 task_participate_group_stop(tsk))
2912 group_stop = CLD_STOPPED;
2913out:
2914 spin_unlock_irq(&tsk->sighand->siglock);
2915
2916 /*
2917 * If group stop has completed, deliver the notification. This
2918 * should always go to the real parent of the group leader.
2919 */
2920 if (unlikely(group_stop)) {
2921 read_lock(&tasklist_lock);
2922 do_notify_parent_cldstop(tsk, false, group_stop);
2923 read_unlock(&tasklist_lock);
2924 }
2925}
2926
2927/*
2928 * System call entry points.
2929 */
2930
2931/**
2932 * sys_restart_syscall - restart a system call
2933 */
2934SYSCALL_DEFINE0(restart_syscall)
2935{
2936 struct restart_block *restart = ¤t->restart_block;
2937 return restart->fn(restart);
2938}
2939
2940long do_no_restart_syscall(struct restart_block *param)
2941{
2942 return -EINTR;
2943}
2944
2945static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2946{
2947 if (task_sigpending(tsk) && !thread_group_empty(tsk)) {
2948 sigset_t newblocked;
2949 /* A set of now blocked but previously unblocked signals. */
2950 sigandnsets(&newblocked, newset, ¤t->blocked);
2951 retarget_shared_pending(tsk, &newblocked);
2952 }
2953 tsk->blocked = *newset;
2954 recalc_sigpending();
2955}
2956
2957/**
2958 * set_current_blocked - change current->blocked mask
2959 * @newset: new mask
2960 *
2961 * It is wrong to change ->blocked directly, this helper should be used
2962 * to ensure the process can't miss a shared signal we are going to block.
2963 */
2964void set_current_blocked(sigset_t *newset)
2965{
2966 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
2967 __set_current_blocked(newset);
2968}
2969
2970void __set_current_blocked(const sigset_t *newset)
2971{
2972 struct task_struct *tsk = current;
2973
2974 /*
2975 * In case the signal mask hasn't changed, there is nothing we need
2976 * to do. The current->blocked shouldn't be modified by other task.
2977 */
2978 if (sigequalsets(&tsk->blocked, newset))
2979 return;
2980
2981 spin_lock_irq(&tsk->sighand->siglock);
2982 __set_task_blocked(tsk, newset);
2983 spin_unlock_irq(&tsk->sighand->siglock);
2984}
2985
2986/*
2987 * This is also useful for kernel threads that want to temporarily
2988 * (or permanently) block certain signals.
2989 *
2990 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2991 * interface happily blocks "unblockable" signals like SIGKILL
2992 * and friends.
2993 */
2994int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2995{
2996 struct task_struct *tsk = current;
2997 sigset_t newset;
2998
2999 /* Lockless, only current can change ->blocked, never from irq */
3000 if (oldset)
3001 *oldset = tsk->blocked;
3002
3003 switch (how) {
3004 case SIG_BLOCK:
3005 sigorsets(&newset, &tsk->blocked, set);
3006 break;
3007 case SIG_UNBLOCK:
3008 sigandnsets(&newset, &tsk->blocked, set);
3009 break;
3010 case SIG_SETMASK:
3011 newset = *set;
3012 break;
3013 default:
3014 return -EINVAL;
3015 }
3016
3017 __set_current_blocked(&newset);
3018 return 0;
3019}
3020EXPORT_SYMBOL(sigprocmask);
3021
3022/*
3023 * The api helps set app-provided sigmasks.
3024 *
3025 * This is useful for syscalls such as ppoll, pselect, io_pgetevents and
3026 * epoll_pwait where a new sigmask is passed from userland for the syscalls.
3027 *
3028 * Note that it does set_restore_sigmask() in advance, so it must be always
3029 * paired with restore_saved_sigmask_unless() before return from syscall.
3030 */
3031int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize)
3032{
3033 sigset_t kmask;
3034
3035 if (!umask)
3036 return 0;
3037 if (sigsetsize != sizeof(sigset_t))
3038 return -EINVAL;
3039 if (copy_from_user(&kmask, umask, sizeof(sigset_t)))
3040 return -EFAULT;
3041
3042 set_restore_sigmask();
3043 current->saved_sigmask = current->blocked;
3044 set_current_blocked(&kmask);
3045
3046 return 0;
3047}
3048
3049#ifdef CONFIG_COMPAT
3050int set_compat_user_sigmask(const compat_sigset_t __user *umask,
3051 size_t sigsetsize)
3052{
3053 sigset_t kmask;
3054
3055 if (!umask)
3056 return 0;
3057 if (sigsetsize != sizeof(compat_sigset_t))
3058 return -EINVAL;
3059 if (get_compat_sigset(&kmask, umask))
3060 return -EFAULT;
3061
3062 set_restore_sigmask();
3063 current->saved_sigmask = current->blocked;
3064 set_current_blocked(&kmask);
3065
3066 return 0;
3067}
3068#endif
3069
3070/**
3071 * sys_rt_sigprocmask - change the list of currently blocked signals
3072 * @how: whether to add, remove, or set signals
3073 * @nset: stores pending signals
3074 * @oset: previous value of signal mask if non-null
3075 * @sigsetsize: size of sigset_t type
3076 */
3077SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
3078 sigset_t __user *, oset, size_t, sigsetsize)
3079{
3080 sigset_t old_set, new_set;
3081 int error;
3082
3083 /* XXX: Don't preclude handling different sized sigset_t's. */
3084 if (sigsetsize != sizeof(sigset_t))
3085 return -EINVAL;
3086
3087 old_set = current->blocked;
3088
3089 if (nset) {
3090 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
3091 return -EFAULT;
3092 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3093
3094 error = sigprocmask(how, &new_set, NULL);
3095 if (error)
3096 return error;
3097 }
3098
3099 if (oset) {
3100 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
3101 return -EFAULT;
3102 }
3103
3104 return 0;
3105}
3106
3107#ifdef CONFIG_COMPAT
3108COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
3109 compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
3110{
3111 sigset_t old_set = current->blocked;
3112
3113 /* XXX: Don't preclude handling different sized sigset_t's. */
3114 if (sigsetsize != sizeof(sigset_t))
3115 return -EINVAL;
3116
3117 if (nset) {
3118 sigset_t new_set;
3119 int error;
3120 if (get_compat_sigset(&new_set, nset))
3121 return -EFAULT;
3122 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3123
3124 error = sigprocmask(how, &new_set, NULL);
3125 if (error)
3126 return error;
3127 }
3128 return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0;
3129}
3130#endif
3131
3132static void do_sigpending(sigset_t *set)
3133{
3134 spin_lock_irq(¤t->sighand->siglock);
3135 sigorsets(set, ¤t->pending.signal,
3136 ¤t->signal->shared_pending.signal);
3137 spin_unlock_irq(¤t->sighand->siglock);
3138
3139 /* Outside the lock because only this thread touches it. */
3140 sigandsets(set, ¤t->blocked, set);
3141}
3142
3143/**
3144 * sys_rt_sigpending - examine a pending signal that has been raised
3145 * while blocked
3146 * @uset: stores pending signals
3147 * @sigsetsize: size of sigset_t type or larger
3148 */
3149SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
3150{
3151 sigset_t set;
3152
3153 if (sigsetsize > sizeof(*uset))
3154 return -EINVAL;
3155
3156 do_sigpending(&set);
3157
3158 if (copy_to_user(uset, &set, sigsetsize))
3159 return -EFAULT;
3160
3161 return 0;
3162}
3163
3164#ifdef CONFIG_COMPAT
3165COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
3166 compat_size_t, sigsetsize)
3167{
3168 sigset_t set;
3169
3170 if (sigsetsize > sizeof(*uset))
3171 return -EINVAL;
3172
3173 do_sigpending(&set);
3174
3175 return put_compat_sigset(uset, &set, sigsetsize);
3176}
3177#endif
3178
3179static const struct {
3180 unsigned char limit, layout;
3181} sig_sicodes[] = {
3182 [SIGILL] = { NSIGILL, SIL_FAULT },
3183 [SIGFPE] = { NSIGFPE, SIL_FAULT },
3184 [SIGSEGV] = { NSIGSEGV, SIL_FAULT },
3185 [SIGBUS] = { NSIGBUS, SIL_FAULT },
3186 [SIGTRAP] = { NSIGTRAP, SIL_FAULT },
3187#if defined(SIGEMT)
3188 [SIGEMT] = { NSIGEMT, SIL_FAULT },
3189#endif
3190 [SIGCHLD] = { NSIGCHLD, SIL_CHLD },
3191 [SIGPOLL] = { NSIGPOLL, SIL_POLL },
3192 [SIGSYS] = { NSIGSYS, SIL_SYS },
3193};
3194
3195static bool known_siginfo_layout(unsigned sig, int si_code)
3196{
3197 if (si_code == SI_KERNEL)
3198 return true;
3199 else if ((si_code > SI_USER)) {
3200 if (sig_specific_sicodes(sig)) {
3201 if (si_code <= sig_sicodes[sig].limit)
3202 return true;
3203 }
3204 else if (si_code <= NSIGPOLL)
3205 return true;
3206 }
3207 else if (si_code >= SI_DETHREAD)
3208 return true;
3209 else if (si_code == SI_ASYNCNL)
3210 return true;
3211 return false;
3212}
3213
3214enum siginfo_layout siginfo_layout(unsigned sig, int si_code)
3215{
3216 enum siginfo_layout layout = SIL_KILL;
3217 if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
3218 if ((sig < ARRAY_SIZE(sig_sicodes)) &&
3219 (si_code <= sig_sicodes[sig].limit)) {
3220 layout = sig_sicodes[sig].layout;
3221 /* Handle the exceptions */
3222 if ((sig == SIGBUS) &&
3223 (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO))
3224 layout = SIL_FAULT_MCEERR;
3225 else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR))
3226 layout = SIL_FAULT_BNDERR;
3227#ifdef SEGV_PKUERR
3228 else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR))
3229 layout = SIL_FAULT_PKUERR;
3230#endif
3231 else if ((sig == SIGTRAP) && (si_code == TRAP_PERF))
3232 layout = SIL_PERF_EVENT;
3233#ifdef __ARCH_SI_TRAPNO
3234 else if (layout == SIL_FAULT)
3235 layout = SIL_FAULT_TRAPNO;
3236#endif
3237 }
3238 else if (si_code <= NSIGPOLL)
3239 layout = SIL_POLL;
3240 } else {
3241 if (si_code == SI_TIMER)
3242 layout = SIL_TIMER;
3243 else if (si_code == SI_SIGIO)
3244 layout = SIL_POLL;
3245 else if (si_code < 0)
3246 layout = SIL_RT;
3247 }
3248 return layout;
3249}
3250
3251static inline char __user *si_expansion(const siginfo_t __user *info)
3252{
3253 return ((char __user *)info) + sizeof(struct kernel_siginfo);
3254}
3255
3256int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from)
3257{
3258 char __user *expansion = si_expansion(to);
3259 if (copy_to_user(to, from , sizeof(struct kernel_siginfo)))
3260 return -EFAULT;
3261 if (clear_user(expansion, SI_EXPANSION_SIZE))
3262 return -EFAULT;
3263 return 0;
3264}
3265
3266static int post_copy_siginfo_from_user(kernel_siginfo_t *info,
3267 const siginfo_t __user *from)
3268{
3269 if (unlikely(!known_siginfo_layout(info->si_signo, info->si_code))) {
3270 char __user *expansion = si_expansion(from);
3271 char buf[SI_EXPANSION_SIZE];
3272 int i;
3273 /*
3274 * An unknown si_code might need more than
3275 * sizeof(struct kernel_siginfo) bytes. Verify all of the
3276 * extra bytes are 0. This guarantees copy_siginfo_to_user
3277 * will return this data to userspace exactly.
3278 */
3279 if (copy_from_user(&buf, expansion, SI_EXPANSION_SIZE))
3280 return -EFAULT;
3281 for (i = 0; i < SI_EXPANSION_SIZE; i++) {
3282 if (buf[i] != 0)
3283 return -E2BIG;
3284 }
3285 }
3286 return 0;
3287}
3288
3289static int __copy_siginfo_from_user(int signo, kernel_siginfo_t *to,
3290 const siginfo_t __user *from)
3291{
3292 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3293 return -EFAULT;
3294 to->si_signo = signo;
3295 return post_copy_siginfo_from_user(to, from);
3296}
3297
3298int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from)
3299{
3300 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3301 return -EFAULT;
3302 return post_copy_siginfo_from_user(to, from);
3303}
3304
3305#ifdef CONFIG_COMPAT
3306/**
3307 * copy_siginfo_to_external32 - copy a kernel siginfo into a compat user siginfo
3308 * @to: compat siginfo destination
3309 * @from: kernel siginfo source
3310 *
3311 * Note: This function does not work properly for the SIGCHLD on x32, but
3312 * fortunately it doesn't have to. The only valid callers for this function are
3313 * copy_siginfo_to_user32, which is overriden for x32 and the coredump code.
3314 * The latter does not care because SIGCHLD will never cause a coredump.
3315 */
3316void copy_siginfo_to_external32(struct compat_siginfo *to,
3317 const struct kernel_siginfo *from)
3318{
3319 memset(to, 0, sizeof(*to));
3320
3321 to->si_signo = from->si_signo;
3322 to->si_errno = from->si_errno;
3323 to->si_code = from->si_code;
3324 switch(siginfo_layout(from->si_signo, from->si_code)) {
3325 case SIL_KILL:
3326 to->si_pid = from->si_pid;
3327 to->si_uid = from->si_uid;
3328 break;
3329 case SIL_TIMER:
3330 to->si_tid = from->si_tid;
3331 to->si_overrun = from->si_overrun;
3332 to->si_int = from->si_int;
3333 break;
3334 case SIL_POLL:
3335 to->si_band = from->si_band;
3336 to->si_fd = from->si_fd;
3337 break;
3338 case SIL_FAULT:
3339 to->si_addr = ptr_to_compat(from->si_addr);
3340 break;
3341 case SIL_FAULT_TRAPNO:
3342 to->si_addr = ptr_to_compat(from->si_addr);
3343 to->si_trapno = from->si_trapno;
3344 break;
3345 case SIL_FAULT_MCEERR:
3346 to->si_addr = ptr_to_compat(from->si_addr);
3347 to->si_addr_lsb = from->si_addr_lsb;
3348 break;
3349 case SIL_FAULT_BNDERR:
3350 to->si_addr = ptr_to_compat(from->si_addr);
3351 to->si_lower = ptr_to_compat(from->si_lower);
3352 to->si_upper = ptr_to_compat(from->si_upper);
3353 break;
3354 case SIL_FAULT_PKUERR:
3355 to->si_addr = ptr_to_compat(from->si_addr);
3356 to->si_pkey = from->si_pkey;
3357 break;
3358 case SIL_PERF_EVENT:
3359 to->si_addr = ptr_to_compat(from->si_addr);
3360 to->si_perf_data = from->si_perf_data;
3361 to->si_perf_type = from->si_perf_type;
3362 break;
3363 case SIL_CHLD:
3364 to->si_pid = from->si_pid;
3365 to->si_uid = from->si_uid;
3366 to->si_status = from->si_status;
3367 to->si_utime = from->si_utime;
3368 to->si_stime = from->si_stime;
3369 break;
3370 case SIL_RT:
3371 to->si_pid = from->si_pid;
3372 to->si_uid = from->si_uid;
3373 to->si_int = from->si_int;
3374 break;
3375 case SIL_SYS:
3376 to->si_call_addr = ptr_to_compat(from->si_call_addr);
3377 to->si_syscall = from->si_syscall;
3378 to->si_arch = from->si_arch;
3379 break;
3380 }
3381}
3382
3383int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
3384 const struct kernel_siginfo *from)
3385{
3386 struct compat_siginfo new;
3387
3388 copy_siginfo_to_external32(&new, from);
3389 if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
3390 return -EFAULT;
3391 return 0;
3392}
3393
3394static int post_copy_siginfo_from_user32(kernel_siginfo_t *to,
3395 const struct compat_siginfo *from)
3396{
3397 clear_siginfo(to);
3398 to->si_signo = from->si_signo;
3399 to->si_errno = from->si_errno;
3400 to->si_code = from->si_code;
3401 switch(siginfo_layout(from->si_signo, from->si_code)) {
3402 case SIL_KILL:
3403 to->si_pid = from->si_pid;
3404 to->si_uid = from->si_uid;
3405 break;
3406 case SIL_TIMER:
3407 to->si_tid = from->si_tid;
3408 to->si_overrun = from->si_overrun;
3409 to->si_int = from->si_int;
3410 break;
3411 case SIL_POLL:
3412 to->si_band = from->si_band;
3413 to->si_fd = from->si_fd;
3414 break;
3415 case SIL_FAULT:
3416 to->si_addr = compat_ptr(from->si_addr);
3417 break;
3418 case SIL_FAULT_TRAPNO:
3419 to->si_addr = compat_ptr(from->si_addr);
3420 to->si_trapno = from->si_trapno;
3421 break;
3422 case SIL_FAULT_MCEERR:
3423 to->si_addr = compat_ptr(from->si_addr);
3424 to->si_addr_lsb = from->si_addr_lsb;
3425 break;
3426 case SIL_FAULT_BNDERR:
3427 to->si_addr = compat_ptr(from->si_addr);
3428 to->si_lower = compat_ptr(from->si_lower);
3429 to->si_upper = compat_ptr(from->si_upper);
3430 break;
3431 case SIL_FAULT_PKUERR:
3432 to->si_addr = compat_ptr(from->si_addr);
3433 to->si_pkey = from->si_pkey;
3434 break;
3435 case SIL_PERF_EVENT:
3436 to->si_addr = compat_ptr(from->si_addr);
3437 to->si_perf_data = from->si_perf_data;
3438 to->si_perf_type = from->si_perf_type;
3439 break;
3440 case SIL_CHLD:
3441 to->si_pid = from->si_pid;
3442 to->si_uid = from->si_uid;
3443 to->si_status = from->si_status;
3444#ifdef CONFIG_X86_X32_ABI
3445 if (in_x32_syscall()) {
3446 to->si_utime = from->_sifields._sigchld_x32._utime;
3447 to->si_stime = from->_sifields._sigchld_x32._stime;
3448 } else
3449#endif
3450 {
3451 to->si_utime = from->si_utime;
3452 to->si_stime = from->si_stime;
3453 }
3454 break;
3455 case SIL_RT:
3456 to->si_pid = from->si_pid;
3457 to->si_uid = from->si_uid;
3458 to->si_int = from->si_int;
3459 break;
3460 case SIL_SYS:
3461 to->si_call_addr = compat_ptr(from->si_call_addr);
3462 to->si_syscall = from->si_syscall;
3463 to->si_arch = from->si_arch;
3464 break;
3465 }
3466 return 0;
3467}
3468
3469static int __copy_siginfo_from_user32(int signo, struct kernel_siginfo *to,
3470 const struct compat_siginfo __user *ufrom)
3471{
3472 struct compat_siginfo from;
3473
3474 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3475 return -EFAULT;
3476
3477 from.si_signo = signo;
3478 return post_copy_siginfo_from_user32(to, &from);
3479}
3480
3481int copy_siginfo_from_user32(struct kernel_siginfo *to,
3482 const struct compat_siginfo __user *ufrom)
3483{
3484 struct compat_siginfo from;
3485
3486 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3487 return -EFAULT;
3488
3489 return post_copy_siginfo_from_user32(to, &from);
3490}
3491#endif /* CONFIG_COMPAT */
3492
3493/**
3494 * do_sigtimedwait - wait for queued signals specified in @which
3495 * @which: queued signals to wait for
3496 * @info: if non-null, the signal's siginfo is returned here
3497 * @ts: upper bound on process time suspension
3498 */
3499static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
3500 const struct timespec64 *ts)
3501{
3502 ktime_t *to = NULL, timeout = KTIME_MAX;
3503 struct task_struct *tsk = current;
3504 sigset_t mask = *which;
3505 int sig, ret = 0;
3506
3507 if (ts) {
3508 if (!timespec64_valid(ts))
3509 return -EINVAL;
3510 timeout = timespec64_to_ktime(*ts);
3511 to = &timeout;
3512 }
3513
3514 /*
3515 * Invert the set of allowed signals to get those we want to block.
3516 */
3517 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
3518 signotset(&mask);
3519
3520 spin_lock_irq(&tsk->sighand->siglock);
3521 sig = dequeue_signal(tsk, &mask, info);
3522 if (!sig && timeout) {
3523 /*
3524 * None ready, temporarily unblock those we're interested
3525 * while we are sleeping in so that we'll be awakened when
3526 * they arrive. Unblocking is always fine, we can avoid
3527 * set_current_blocked().
3528 */
3529 tsk->real_blocked = tsk->blocked;
3530 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
3531 recalc_sigpending();
3532 spin_unlock_irq(&tsk->sighand->siglock);
3533
3534 __set_current_state(TASK_INTERRUPTIBLE);
3535 ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns,
3536 HRTIMER_MODE_REL);
3537 spin_lock_irq(&tsk->sighand->siglock);
3538 __set_task_blocked(tsk, &tsk->real_blocked);
3539 sigemptyset(&tsk->real_blocked);
3540 sig = dequeue_signal(tsk, &mask, info);
3541 }
3542 spin_unlock_irq(&tsk->sighand->siglock);
3543
3544 if (sig)
3545 return sig;
3546 return ret ? -EINTR : -EAGAIN;
3547}
3548
3549/**
3550 * sys_rt_sigtimedwait - synchronously wait for queued signals specified
3551 * in @uthese
3552 * @uthese: queued signals to wait for
3553 * @uinfo: if non-null, the signal's siginfo is returned here
3554 * @uts: upper bound on process time suspension
3555 * @sigsetsize: size of sigset_t type
3556 */
3557SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
3558 siginfo_t __user *, uinfo,
3559 const struct __kernel_timespec __user *, uts,
3560 size_t, sigsetsize)
3561{
3562 sigset_t these;
3563 struct timespec64 ts;
3564 kernel_siginfo_t info;
3565 int ret;
3566
3567 /* XXX: Don't preclude handling different sized sigset_t's. */
3568 if (sigsetsize != sizeof(sigset_t))
3569 return -EINVAL;
3570
3571 if (copy_from_user(&these, uthese, sizeof(these)))
3572 return -EFAULT;
3573
3574 if (uts) {
3575 if (get_timespec64(&ts, uts))
3576 return -EFAULT;
3577 }
3578
3579 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3580
3581 if (ret > 0 && uinfo) {
3582 if (copy_siginfo_to_user(uinfo, &info))
3583 ret = -EFAULT;
3584 }
3585
3586 return ret;
3587}
3588
3589#ifdef CONFIG_COMPAT_32BIT_TIME
3590SYSCALL_DEFINE4(rt_sigtimedwait_time32, const sigset_t __user *, uthese,
3591 siginfo_t __user *, uinfo,
3592 const struct old_timespec32 __user *, uts,
3593 size_t, sigsetsize)
3594{
3595 sigset_t these;
3596 struct timespec64 ts;
3597 kernel_siginfo_t info;
3598 int ret;
3599
3600 if (sigsetsize != sizeof(sigset_t))
3601 return -EINVAL;
3602
3603 if (copy_from_user(&these, uthese, sizeof(these)))
3604 return -EFAULT;
3605
3606 if (uts) {
3607 if (get_old_timespec32(&ts, uts))
3608 return -EFAULT;
3609 }
3610
3611 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3612
3613 if (ret > 0 && uinfo) {
3614 if (copy_siginfo_to_user(uinfo, &info))
3615 ret = -EFAULT;
3616 }
3617
3618 return ret;
3619}
3620#endif
3621
3622#ifdef CONFIG_COMPAT
3623COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64, compat_sigset_t __user *, uthese,
3624 struct compat_siginfo __user *, uinfo,
3625 struct __kernel_timespec __user *, uts, compat_size_t, sigsetsize)
3626{
3627 sigset_t s;
3628 struct timespec64 t;
3629 kernel_siginfo_t info;
3630 long ret;
3631
3632 if (sigsetsize != sizeof(sigset_t))
3633 return -EINVAL;
3634
3635 if (get_compat_sigset(&s, uthese))
3636 return -EFAULT;
3637
3638 if (uts) {
3639 if (get_timespec64(&t, uts))
3640 return -EFAULT;
3641 }
3642
3643 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3644
3645 if (ret > 0 && uinfo) {
3646 if (copy_siginfo_to_user32(uinfo, &info))
3647 ret = -EFAULT;
3648 }
3649
3650 return ret;
3651}
3652
3653#ifdef CONFIG_COMPAT_32BIT_TIME
3654COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32, compat_sigset_t __user *, uthese,
3655 struct compat_siginfo __user *, uinfo,
3656 struct old_timespec32 __user *, uts, compat_size_t, sigsetsize)
3657{
3658 sigset_t s;
3659 struct timespec64 t;
3660 kernel_siginfo_t info;
3661 long ret;
3662
3663 if (sigsetsize != sizeof(sigset_t))
3664 return -EINVAL;
3665
3666 if (get_compat_sigset(&s, uthese))
3667 return -EFAULT;
3668
3669 if (uts) {
3670 if (get_old_timespec32(&t, uts))
3671 return -EFAULT;
3672 }
3673
3674 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3675
3676 if (ret > 0 && uinfo) {
3677 if (copy_siginfo_to_user32(uinfo, &info))
3678 ret = -EFAULT;
3679 }
3680
3681 return ret;
3682}
3683#endif
3684#endif
3685
3686static inline void prepare_kill_siginfo(int sig, struct kernel_siginfo *info)
3687{
3688 clear_siginfo(info);
3689 info->si_signo = sig;
3690 info->si_errno = 0;
3691 info->si_code = SI_USER;
3692 info->si_pid = task_tgid_vnr(current);
3693 info->si_uid = from_kuid_munged(current_user_ns(), current_uid());
3694}
3695
3696/**
3697 * sys_kill - send a signal to a process
3698 * @pid: the PID of the process
3699 * @sig: signal to be sent
3700 */
3701SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
3702{
3703 struct kernel_siginfo info;
3704
3705 prepare_kill_siginfo(sig, &info);
3706
3707 return kill_something_info(sig, &info, pid);
3708}
3709
3710/*
3711 * Verify that the signaler and signalee either are in the same pid namespace
3712 * or that the signaler's pid namespace is an ancestor of the signalee's pid
3713 * namespace.
3714 */
3715static bool access_pidfd_pidns(struct pid *pid)
3716{
3717 struct pid_namespace *active = task_active_pid_ns(current);
3718 struct pid_namespace *p = ns_of_pid(pid);
3719
3720 for (;;) {
3721 if (!p)
3722 return false;
3723 if (p == active)
3724 break;
3725 p = p->parent;
3726 }
3727
3728 return true;
3729}
3730
3731static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo,
3732 siginfo_t __user *info)
3733{
3734#ifdef CONFIG_COMPAT
3735 /*
3736 * Avoid hooking up compat syscalls and instead handle necessary
3737 * conversions here. Note, this is a stop-gap measure and should not be
3738 * considered a generic solution.
3739 */
3740 if (in_compat_syscall())
3741 return copy_siginfo_from_user32(
3742 kinfo, (struct compat_siginfo __user *)info);
3743#endif
3744 return copy_siginfo_from_user(kinfo, info);
3745}
3746
3747static struct pid *pidfd_to_pid(const struct file *file)
3748{
3749 struct pid *pid;
3750
3751 pid = pidfd_pid(file);
3752 if (!IS_ERR(pid))
3753 return pid;
3754
3755 return tgid_pidfd_to_pid(file);
3756}
3757
3758/**
3759 * sys_pidfd_send_signal - Signal a process through a pidfd
3760 * @pidfd: file descriptor of the process
3761 * @sig: signal to send
3762 * @info: signal info
3763 * @flags: future flags
3764 *
3765 * The syscall currently only signals via PIDTYPE_PID which covers
3766 * kill(<positive-pid>, <signal>. It does not signal threads or process
3767 * groups.
3768 * In order to extend the syscall to threads and process groups the @flags
3769 * argument should be used. In essence, the @flags argument will determine
3770 * what is signaled and not the file descriptor itself. Put in other words,
3771 * grouping is a property of the flags argument not a property of the file
3772 * descriptor.
3773 *
3774 * Return: 0 on success, negative errno on failure
3775 */
3776SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
3777 siginfo_t __user *, info, unsigned int, flags)
3778{
3779 int ret;
3780 struct fd f;
3781 struct pid *pid;
3782 kernel_siginfo_t kinfo;
3783
3784 /* Enforce flags be set to 0 until we add an extension. */
3785 if (flags)
3786 return -EINVAL;
3787
3788 f = fdget(pidfd);
3789 if (!f.file)
3790 return -EBADF;
3791
3792 /* Is this a pidfd? */
3793 pid = pidfd_to_pid(f.file);
3794 if (IS_ERR(pid)) {
3795 ret = PTR_ERR(pid);
3796 goto err;
3797 }
3798
3799 ret = -EINVAL;
3800 if (!access_pidfd_pidns(pid))
3801 goto err;
3802
3803 if (info) {
3804 ret = copy_siginfo_from_user_any(&kinfo, info);
3805 if (unlikely(ret))
3806 goto err;
3807
3808 ret = -EINVAL;
3809 if (unlikely(sig != kinfo.si_signo))
3810 goto err;
3811
3812 /* Only allow sending arbitrary signals to yourself. */
3813 ret = -EPERM;
3814 if ((task_pid(current) != pid) &&
3815 (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL))
3816 goto err;
3817 } else {
3818 prepare_kill_siginfo(sig, &kinfo);
3819 }
3820
3821 ret = kill_pid_info(sig, &kinfo, pid);
3822
3823err:
3824 fdput(f);
3825 return ret;
3826}
3827
3828static int
3829do_send_specific(pid_t tgid, pid_t pid, int sig, struct kernel_siginfo *info)
3830{
3831 struct task_struct *p;
3832 int error = -ESRCH;
3833
3834 rcu_read_lock();
3835 p = find_task_by_vpid(pid);
3836 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
3837 error = check_kill_permission(sig, info, p);
3838 /*
3839 * The null signal is a permissions and process existence
3840 * probe. No signal is actually delivered.
3841 */
3842 if (!error && sig) {
3843 error = do_send_sig_info(sig, info, p, PIDTYPE_PID);
3844 /*
3845 * If lock_task_sighand() failed we pretend the task
3846 * dies after receiving the signal. The window is tiny,
3847 * and the signal is private anyway.
3848 */
3849 if (unlikely(error == -ESRCH))
3850 error = 0;
3851 }
3852 }
3853 rcu_read_unlock();
3854
3855 return error;
3856}
3857
3858static int do_tkill(pid_t tgid, pid_t pid, int sig)
3859{
3860 struct kernel_siginfo info;
3861
3862 clear_siginfo(&info);
3863 info.si_signo = sig;
3864 info.si_errno = 0;
3865 info.si_code = SI_TKILL;
3866 info.si_pid = task_tgid_vnr(current);
3867 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
3868
3869 return do_send_specific(tgid, pid, sig, &info);
3870}
3871
3872/**
3873 * sys_tgkill - send signal to one specific thread
3874 * @tgid: the thread group ID of the thread
3875 * @pid: the PID of the thread
3876 * @sig: signal to be sent
3877 *
3878 * This syscall also checks the @tgid and returns -ESRCH even if the PID
3879 * exists but it's not belonging to the target process anymore. This
3880 * method solves the problem of threads exiting and PIDs getting reused.
3881 */
3882SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
3883{
3884 /* This is only valid for single tasks */
3885 if (pid <= 0 || tgid <= 0)
3886 return -EINVAL;
3887
3888 return do_tkill(tgid, pid, sig);
3889}
3890
3891/**
3892 * sys_tkill - send signal to one specific task
3893 * @pid: the PID of the task
3894 * @sig: signal to be sent
3895 *
3896 * Send a signal to only one task, even if it's a CLONE_THREAD task.
3897 */
3898SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
3899{
3900 /* This is only valid for single tasks */
3901 if (pid <= 0)
3902 return -EINVAL;
3903
3904 return do_tkill(0, pid, sig);
3905}
3906
3907static int do_rt_sigqueueinfo(pid_t pid, int sig, kernel_siginfo_t *info)
3908{
3909 /* Not even root can pretend to send signals from the kernel.
3910 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3911 */
3912 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3913 (task_pid_vnr(current) != pid))
3914 return -EPERM;
3915
3916 /* POSIX.1b doesn't mention process groups. */
3917 return kill_proc_info(sig, info, pid);
3918}
3919
3920/**
3921 * sys_rt_sigqueueinfo - send signal information to a signal
3922 * @pid: the PID of the thread
3923 * @sig: signal to be sent
3924 * @uinfo: signal info to be sent
3925 */
3926SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
3927 siginfo_t __user *, uinfo)
3928{
3929 kernel_siginfo_t info;
3930 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
3931 if (unlikely(ret))
3932 return ret;
3933 return do_rt_sigqueueinfo(pid, sig, &info);
3934}
3935
3936#ifdef CONFIG_COMPAT
3937COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
3938 compat_pid_t, pid,
3939 int, sig,
3940 struct compat_siginfo __user *, uinfo)
3941{
3942 kernel_siginfo_t info;
3943 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
3944 if (unlikely(ret))
3945 return ret;
3946 return do_rt_sigqueueinfo(pid, sig, &info);
3947}
3948#endif
3949
3950static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, kernel_siginfo_t *info)
3951{
3952 /* This is only valid for single tasks */
3953 if (pid <= 0 || tgid <= 0)
3954 return -EINVAL;
3955
3956 /* Not even root can pretend to send signals from the kernel.
3957 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3958 */
3959 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3960 (task_pid_vnr(current) != pid))
3961 return -EPERM;
3962
3963 return do_send_specific(tgid, pid, sig, info);
3964}
3965
3966SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
3967 siginfo_t __user *, uinfo)
3968{
3969 kernel_siginfo_t info;
3970 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
3971 if (unlikely(ret))
3972 return ret;
3973 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3974}
3975
3976#ifdef CONFIG_COMPAT
3977COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
3978 compat_pid_t, tgid,
3979 compat_pid_t, pid,
3980 int, sig,
3981 struct compat_siginfo __user *, uinfo)
3982{
3983 kernel_siginfo_t info;
3984 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
3985 if (unlikely(ret))
3986 return ret;
3987 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3988}
3989#endif
3990
3991/*
3992 * For kthreads only, must not be used if cloned with CLONE_SIGHAND
3993 */
3994void kernel_sigaction(int sig, __sighandler_t action)
3995{
3996 spin_lock_irq(¤t->sighand->siglock);
3997 current->sighand->action[sig - 1].sa.sa_handler = action;
3998 if (action == SIG_IGN) {
3999 sigset_t mask;
4000
4001 sigemptyset(&mask);
4002 sigaddset(&mask, sig);
4003
4004 flush_sigqueue_mask(&mask, ¤t->signal->shared_pending);
4005 flush_sigqueue_mask(&mask, ¤t->pending);
4006 recalc_sigpending();
4007 }
4008 spin_unlock_irq(¤t->sighand->siglock);
4009}
4010EXPORT_SYMBOL(kernel_sigaction);
4011
4012void __weak sigaction_compat_abi(struct k_sigaction *act,
4013 struct k_sigaction *oact)
4014{
4015}
4016
4017int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
4018{
4019 struct task_struct *p = current, *t;
4020 struct k_sigaction *k;
4021 sigset_t mask;
4022
4023 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
4024 return -EINVAL;
4025
4026 k = &p->sighand->action[sig-1];
4027
4028 spin_lock_irq(&p->sighand->siglock);
4029 if (oact)
4030 *oact = *k;
4031
4032 /*
4033 * Make sure that we never accidentally claim to support SA_UNSUPPORTED,
4034 * e.g. by having an architecture use the bit in their uapi.
4035 */
4036 BUILD_BUG_ON(UAPI_SA_FLAGS & SA_UNSUPPORTED);
4037
4038 /*
4039 * Clear unknown flag bits in order to allow userspace to detect missing
4040 * support for flag bits and to allow the kernel to use non-uapi bits
4041 * internally.
4042 */
4043 if (act)
4044 act->sa.sa_flags &= UAPI_SA_FLAGS;
4045 if (oact)
4046 oact->sa.sa_flags &= UAPI_SA_FLAGS;
4047
4048 sigaction_compat_abi(act, oact);
4049
4050 if (act) {
4051 sigdelsetmask(&act->sa.sa_mask,
4052 sigmask(SIGKILL) | sigmask(SIGSTOP));
4053 *k = *act;
4054 /*
4055 * POSIX 3.3.1.3:
4056 * "Setting a signal action to SIG_IGN for a signal that is
4057 * pending shall cause the pending signal to be discarded,
4058 * whether or not it is blocked."
4059 *
4060 * "Setting a signal action to SIG_DFL for a signal that is
4061 * pending and whose default action is to ignore the signal
4062 * (for example, SIGCHLD), shall cause the pending signal to
4063 * be discarded, whether or not it is blocked"
4064 */
4065 if (sig_handler_ignored(sig_handler(p, sig), sig)) {
4066 sigemptyset(&mask);
4067 sigaddset(&mask, sig);
4068 flush_sigqueue_mask(&mask, &p->signal->shared_pending);
4069 for_each_thread(p, t)
4070 flush_sigqueue_mask(&mask, &t->pending);
4071 }
4072 }
4073
4074 spin_unlock_irq(&p->sighand->siglock);
4075 return 0;
4076}
4077
4078static int
4079do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp,
4080 size_t min_ss_size)
4081{
4082 struct task_struct *t = current;
4083
4084 if (oss) {
4085 memset(oss, 0, sizeof(stack_t));
4086 oss->ss_sp = (void __user *) t->sas_ss_sp;
4087 oss->ss_size = t->sas_ss_size;
4088 oss->ss_flags = sas_ss_flags(sp) |
4089 (current->sas_ss_flags & SS_FLAG_BITS);
4090 }
4091
4092 if (ss) {
4093 void __user *ss_sp = ss->ss_sp;
4094 size_t ss_size = ss->ss_size;
4095 unsigned ss_flags = ss->ss_flags;
4096 int ss_mode;
4097
4098 if (unlikely(on_sig_stack(sp)))
4099 return -EPERM;
4100
4101 ss_mode = ss_flags & ~SS_FLAG_BITS;
4102 if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
4103 ss_mode != 0))
4104 return -EINVAL;
4105
4106 if (ss_mode == SS_DISABLE) {
4107 ss_size = 0;
4108 ss_sp = NULL;
4109 } else {
4110 if (unlikely(ss_size < min_ss_size))
4111 return -ENOMEM;
4112 }
4113
4114 t->sas_ss_sp = (unsigned long) ss_sp;
4115 t->sas_ss_size = ss_size;
4116 t->sas_ss_flags = ss_flags;
4117 }
4118 return 0;
4119}
4120
4121SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
4122{
4123 stack_t new, old;
4124 int err;
4125 if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
4126 return -EFAULT;
4127 err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
4128 current_user_stack_pointer(),
4129 MINSIGSTKSZ);
4130 if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
4131 err = -EFAULT;
4132 return err;
4133}
4134
4135int restore_altstack(const stack_t __user *uss)
4136{
4137 stack_t new;
4138 if (copy_from_user(&new, uss, sizeof(stack_t)))
4139 return -EFAULT;
4140 (void)do_sigaltstack(&new, NULL, current_user_stack_pointer(),
4141 MINSIGSTKSZ);
4142 /* squash all but EFAULT for now */
4143 return 0;
4144}
4145
4146int __save_altstack(stack_t __user *uss, unsigned long sp)
4147{
4148 struct task_struct *t = current;
4149 int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
4150 __put_user(t->sas_ss_flags, &uss->ss_flags) |
4151 __put_user(t->sas_ss_size, &uss->ss_size);
4152 return err;
4153}
4154
4155#ifdef CONFIG_COMPAT
4156static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr,
4157 compat_stack_t __user *uoss_ptr)
4158{
4159 stack_t uss, uoss;
4160 int ret;
4161
4162 if (uss_ptr) {
4163 compat_stack_t uss32;
4164 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
4165 return -EFAULT;
4166 uss.ss_sp = compat_ptr(uss32.ss_sp);
4167 uss.ss_flags = uss32.ss_flags;
4168 uss.ss_size = uss32.ss_size;
4169 }
4170 ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
4171 compat_user_stack_pointer(),
4172 COMPAT_MINSIGSTKSZ);
4173 if (ret >= 0 && uoss_ptr) {
4174 compat_stack_t old;
4175 memset(&old, 0, sizeof(old));
4176 old.ss_sp = ptr_to_compat(uoss.ss_sp);
4177 old.ss_flags = uoss.ss_flags;
4178 old.ss_size = uoss.ss_size;
4179 if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t)))
4180 ret = -EFAULT;
4181 }
4182 return ret;
4183}
4184
4185COMPAT_SYSCALL_DEFINE2(sigaltstack,
4186 const compat_stack_t __user *, uss_ptr,
4187 compat_stack_t __user *, uoss_ptr)
4188{
4189 return do_compat_sigaltstack(uss_ptr, uoss_ptr);
4190}
4191
4192int compat_restore_altstack(const compat_stack_t __user *uss)
4193{
4194 int err = do_compat_sigaltstack(uss, NULL);
4195 /* squash all but -EFAULT for now */
4196 return err == -EFAULT ? err : 0;
4197}
4198
4199int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
4200{
4201 int err;
4202 struct task_struct *t = current;
4203 err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
4204 &uss->ss_sp) |
4205 __put_user(t->sas_ss_flags, &uss->ss_flags) |
4206 __put_user(t->sas_ss_size, &uss->ss_size);
4207 return err;
4208}
4209#endif
4210
4211#ifdef __ARCH_WANT_SYS_SIGPENDING
4212
4213/**
4214 * sys_sigpending - examine pending signals
4215 * @uset: where mask of pending signal is returned
4216 */
4217SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset)
4218{
4219 sigset_t set;
4220
4221 if (sizeof(old_sigset_t) > sizeof(*uset))
4222 return -EINVAL;
4223
4224 do_sigpending(&set);
4225
4226 if (copy_to_user(uset, &set, sizeof(old_sigset_t)))
4227 return -EFAULT;
4228
4229 return 0;
4230}
4231
4232#ifdef CONFIG_COMPAT
4233COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
4234{
4235 sigset_t set;
4236
4237 do_sigpending(&set);
4238
4239 return put_user(set.sig[0], set32);
4240}
4241#endif
4242
4243#endif
4244
4245#ifdef __ARCH_WANT_SYS_SIGPROCMASK
4246/**
4247 * sys_sigprocmask - examine and change blocked signals
4248 * @how: whether to add, remove, or set signals
4249 * @nset: signals to add or remove (if non-null)
4250 * @oset: previous value of signal mask if non-null
4251 *
4252 * Some platforms have their own version with special arguments;
4253 * others support only sys_rt_sigprocmask.
4254 */
4255
4256SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
4257 old_sigset_t __user *, oset)
4258{
4259 old_sigset_t old_set, new_set;
4260 sigset_t new_blocked;
4261
4262 old_set = current->blocked.sig[0];
4263
4264 if (nset) {
4265 if (copy_from_user(&new_set, nset, sizeof(*nset)))
4266 return -EFAULT;
4267
4268 new_blocked = current->blocked;
4269
4270 switch (how) {
4271 case SIG_BLOCK:
4272 sigaddsetmask(&new_blocked, new_set);
4273 break;
4274 case SIG_UNBLOCK:
4275 sigdelsetmask(&new_blocked, new_set);
4276 break;
4277 case SIG_SETMASK:
4278 new_blocked.sig[0] = new_set;
4279 break;
4280 default:
4281 return -EINVAL;
4282 }
4283
4284 set_current_blocked(&new_blocked);
4285 }
4286
4287 if (oset) {
4288 if (copy_to_user(oset, &old_set, sizeof(*oset)))
4289 return -EFAULT;
4290 }
4291
4292 return 0;
4293}
4294#endif /* __ARCH_WANT_SYS_SIGPROCMASK */
4295
4296#ifndef CONFIG_ODD_RT_SIGACTION
4297/**
4298 * sys_rt_sigaction - alter an action taken by a process
4299 * @sig: signal to be sent
4300 * @act: new sigaction
4301 * @oact: used to save the previous sigaction
4302 * @sigsetsize: size of sigset_t type
4303 */
4304SYSCALL_DEFINE4(rt_sigaction, int, sig,
4305 const struct sigaction __user *, act,
4306 struct sigaction __user *, oact,
4307 size_t, sigsetsize)
4308{
4309 struct k_sigaction new_sa, old_sa;
4310 int ret;
4311
4312 /* XXX: Don't preclude handling different sized sigset_t's. */
4313 if (sigsetsize != sizeof(sigset_t))
4314 return -EINVAL;
4315
4316 if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
4317 return -EFAULT;
4318
4319 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
4320 if (ret)
4321 return ret;
4322
4323 if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
4324 return -EFAULT;
4325
4326 return 0;
4327}
4328#ifdef CONFIG_COMPAT
4329COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
4330 const struct compat_sigaction __user *, act,
4331 struct compat_sigaction __user *, oact,
4332 compat_size_t, sigsetsize)
4333{
4334 struct k_sigaction new_ka, old_ka;
4335#ifdef __ARCH_HAS_SA_RESTORER
4336 compat_uptr_t restorer;
4337#endif
4338 int ret;
4339
4340 /* XXX: Don't preclude handling different sized sigset_t's. */
4341 if (sigsetsize != sizeof(compat_sigset_t))
4342 return -EINVAL;
4343
4344 if (act) {
4345 compat_uptr_t handler;
4346 ret = get_user(handler, &act->sa_handler);
4347 new_ka.sa.sa_handler = compat_ptr(handler);
4348#ifdef __ARCH_HAS_SA_RESTORER
4349 ret |= get_user(restorer, &act->sa_restorer);
4350 new_ka.sa.sa_restorer = compat_ptr(restorer);
4351#endif
4352 ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask);
4353 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
4354 if (ret)
4355 return -EFAULT;
4356 }
4357
4358 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4359 if (!ret && oact) {
4360 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
4361 &oact->sa_handler);
4362 ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask,
4363 sizeof(oact->sa_mask));
4364 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
4365#ifdef __ARCH_HAS_SA_RESTORER
4366 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4367 &oact->sa_restorer);
4368#endif
4369 }
4370 return ret;
4371}
4372#endif
4373#endif /* !CONFIG_ODD_RT_SIGACTION */
4374
4375#ifdef CONFIG_OLD_SIGACTION
4376SYSCALL_DEFINE3(sigaction, int, sig,
4377 const struct old_sigaction __user *, act,
4378 struct old_sigaction __user *, oact)
4379{
4380 struct k_sigaction new_ka, old_ka;
4381 int ret;
4382
4383 if (act) {
4384 old_sigset_t mask;
4385 if (!access_ok(act, sizeof(*act)) ||
4386 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
4387 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
4388 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4389 __get_user(mask, &act->sa_mask))
4390 return -EFAULT;
4391#ifdef __ARCH_HAS_KA_RESTORER
4392 new_ka.ka_restorer = NULL;
4393#endif
4394 siginitset(&new_ka.sa.sa_mask, mask);
4395 }
4396
4397 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4398
4399 if (!ret && oact) {
4400 if (!access_ok(oact, sizeof(*oact)) ||
4401 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
4402 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
4403 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4404 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4405 return -EFAULT;
4406 }
4407
4408 return ret;
4409}
4410#endif
4411#ifdef CONFIG_COMPAT_OLD_SIGACTION
4412COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
4413 const struct compat_old_sigaction __user *, act,
4414 struct compat_old_sigaction __user *, oact)
4415{
4416 struct k_sigaction new_ka, old_ka;
4417 int ret;
4418 compat_old_sigset_t mask;
4419 compat_uptr_t handler, restorer;
4420
4421 if (act) {
4422 if (!access_ok(act, sizeof(*act)) ||
4423 __get_user(handler, &act->sa_handler) ||
4424 __get_user(restorer, &act->sa_restorer) ||
4425 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4426 __get_user(mask, &act->sa_mask))
4427 return -EFAULT;
4428
4429#ifdef __ARCH_HAS_KA_RESTORER
4430 new_ka.ka_restorer = NULL;
4431#endif
4432 new_ka.sa.sa_handler = compat_ptr(handler);
4433 new_ka.sa.sa_restorer = compat_ptr(restorer);
4434 siginitset(&new_ka.sa.sa_mask, mask);
4435 }
4436
4437 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4438
4439 if (!ret && oact) {
4440 if (!access_ok(oact, sizeof(*oact)) ||
4441 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
4442 &oact->sa_handler) ||
4443 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4444 &oact->sa_restorer) ||
4445 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4446 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4447 return -EFAULT;
4448 }
4449 return ret;
4450}
4451#endif
4452
4453#ifdef CONFIG_SGETMASK_SYSCALL
4454
4455/*
4456 * For backwards compatibility. Functionality superseded by sigprocmask.
4457 */
4458SYSCALL_DEFINE0(sgetmask)
4459{
4460 /* SMP safe */
4461 return current->blocked.sig[0];
4462}
4463
4464SYSCALL_DEFINE1(ssetmask, int, newmask)
4465{
4466 int old = current->blocked.sig[0];
4467 sigset_t newset;
4468
4469 siginitset(&newset, newmask);
4470 set_current_blocked(&newset);
4471
4472 return old;
4473}
4474#endif /* CONFIG_SGETMASK_SYSCALL */
4475
4476#ifdef __ARCH_WANT_SYS_SIGNAL
4477/*
4478 * For backwards compatibility. Functionality superseded by sigaction.
4479 */
4480SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
4481{
4482 struct k_sigaction new_sa, old_sa;
4483 int ret;
4484
4485 new_sa.sa.sa_handler = handler;
4486 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
4487 sigemptyset(&new_sa.sa.sa_mask);
4488
4489 ret = do_sigaction(sig, &new_sa, &old_sa);
4490
4491 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
4492}
4493#endif /* __ARCH_WANT_SYS_SIGNAL */
4494
4495#ifdef __ARCH_WANT_SYS_PAUSE
4496
4497SYSCALL_DEFINE0(pause)
4498{
4499 while (!signal_pending(current)) {
4500 __set_current_state(TASK_INTERRUPTIBLE);
4501 schedule();
4502 }
4503 return -ERESTARTNOHAND;
4504}
4505
4506#endif
4507
4508static int sigsuspend(sigset_t *set)
4509{
4510 current->saved_sigmask = current->blocked;
4511 set_current_blocked(set);
4512
4513 while (!signal_pending(current)) {
4514 __set_current_state(TASK_INTERRUPTIBLE);
4515 schedule();
4516 }
4517 set_restore_sigmask();
4518 return -ERESTARTNOHAND;
4519}
4520
4521/**
4522 * sys_rt_sigsuspend - replace the signal mask for a value with the
4523 * @unewset value until a signal is received
4524 * @unewset: new signal mask value
4525 * @sigsetsize: size of sigset_t type
4526 */
4527SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
4528{
4529 sigset_t newset;
4530
4531 /* XXX: Don't preclude handling different sized sigset_t's. */
4532 if (sigsetsize != sizeof(sigset_t))
4533 return -EINVAL;
4534
4535 if (copy_from_user(&newset, unewset, sizeof(newset)))
4536 return -EFAULT;
4537 return sigsuspend(&newset);
4538}
4539
4540#ifdef CONFIG_COMPAT
4541COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
4542{
4543 sigset_t newset;
4544
4545 /* XXX: Don't preclude handling different sized sigset_t's. */
4546 if (sigsetsize != sizeof(sigset_t))
4547 return -EINVAL;
4548
4549 if (get_compat_sigset(&newset, unewset))
4550 return -EFAULT;
4551 return sigsuspend(&newset);
4552}
4553#endif
4554
4555#ifdef CONFIG_OLD_SIGSUSPEND
4556SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
4557{
4558 sigset_t blocked;
4559 siginitset(&blocked, mask);
4560 return sigsuspend(&blocked);
4561}
4562#endif
4563#ifdef CONFIG_OLD_SIGSUSPEND3
4564SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
4565{
4566 sigset_t blocked;
4567 siginitset(&blocked, mask);
4568 return sigsuspend(&blocked);
4569}
4570#endif
4571
4572__weak const char *arch_vma_name(struct vm_area_struct *vma)
4573{
4574 return NULL;
4575}
4576
4577static inline void siginfo_buildtime_checks(void)
4578{
4579 BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
4580
4581 /* Verify the offsets in the two siginfos match */
4582#define CHECK_OFFSET(field) \
4583 BUILD_BUG_ON(offsetof(siginfo_t, field) != offsetof(kernel_siginfo_t, field))
4584
4585 /* kill */
4586 CHECK_OFFSET(si_pid);
4587 CHECK_OFFSET(si_uid);
4588
4589 /* timer */
4590 CHECK_OFFSET(si_tid);
4591 CHECK_OFFSET(si_overrun);
4592 CHECK_OFFSET(si_value);
4593
4594 /* rt */
4595 CHECK_OFFSET(si_pid);
4596 CHECK_OFFSET(si_uid);
4597 CHECK_OFFSET(si_value);
4598
4599 /* sigchld */
4600 CHECK_OFFSET(si_pid);
4601 CHECK_OFFSET(si_uid);
4602 CHECK_OFFSET(si_status);
4603 CHECK_OFFSET(si_utime);
4604 CHECK_OFFSET(si_stime);
4605
4606 /* sigfault */
4607 CHECK_OFFSET(si_addr);
4608 CHECK_OFFSET(si_trapno);
4609 CHECK_OFFSET(si_addr_lsb);
4610 CHECK_OFFSET(si_lower);
4611 CHECK_OFFSET(si_upper);
4612 CHECK_OFFSET(si_pkey);
4613 CHECK_OFFSET(si_perf_data);
4614 CHECK_OFFSET(si_perf_type);
4615
4616 /* sigpoll */
4617 CHECK_OFFSET(si_band);
4618 CHECK_OFFSET(si_fd);
4619
4620 /* sigsys */
4621 CHECK_OFFSET(si_call_addr);
4622 CHECK_OFFSET(si_syscall);
4623 CHECK_OFFSET(si_arch);
4624#undef CHECK_OFFSET
4625
4626 /* usb asyncio */
4627 BUILD_BUG_ON(offsetof(struct siginfo, si_pid) !=
4628 offsetof(struct siginfo, si_addr));
4629 if (sizeof(int) == sizeof(void __user *)) {
4630 BUILD_BUG_ON(sizeof_field(struct siginfo, si_pid) !=
4631 sizeof(void __user *));
4632 } else {
4633 BUILD_BUG_ON((sizeof_field(struct siginfo, si_pid) +
4634 sizeof_field(struct siginfo, si_uid)) !=
4635 sizeof(void __user *));
4636 BUILD_BUG_ON(offsetofend(struct siginfo, si_pid) !=
4637 offsetof(struct siginfo, si_uid));
4638 }
4639#ifdef CONFIG_COMPAT
4640 BUILD_BUG_ON(offsetof(struct compat_siginfo, si_pid) !=
4641 offsetof(struct compat_siginfo, si_addr));
4642 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4643 sizeof(compat_uptr_t));
4644 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4645 sizeof_field(struct siginfo, si_pid));
4646#endif
4647}
4648
4649void __init signals_init(void)
4650{
4651 siginfo_buildtime_checks();
4652
4653 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
4654}
4655
4656#ifdef CONFIG_KGDB_KDB
4657#include <linux/kdb.h>
4658/*
4659 * kdb_send_sig - Allows kdb to send signals without exposing
4660 * signal internals. This function checks if the required locks are
4661 * available before calling the main signal code, to avoid kdb
4662 * deadlocks.
4663 */
4664void kdb_send_sig(struct task_struct *t, int sig)
4665{
4666 static struct task_struct *kdb_prev_t;
4667 int new_t, ret;
4668 if (!spin_trylock(&t->sighand->siglock)) {
4669 kdb_printf("Can't do kill command now.\n"
4670 "The sigmask lock is held somewhere else in "
4671 "kernel, try again later\n");
4672 return;
4673 }
4674 new_t = kdb_prev_t != t;
4675 kdb_prev_t = t;
4676 if (!task_is_running(t) && new_t) {
4677 spin_unlock(&t->sighand->siglock);
4678 kdb_printf("Process is not RUNNING, sending a signal from "
4679 "kdb risks deadlock\n"
4680 "on the run queue locks. "
4681 "The signal has _not_ been sent.\n"
4682 "Reissue the kill command if you want to risk "
4683 "the deadlock.\n");
4684 return;
4685 }
4686 ret = send_signal(sig, SEND_SIG_PRIV, t, PIDTYPE_PID);
4687 spin_unlock(&t->sighand->siglock);
4688 if (ret)
4689 kdb_printf("Fail to deliver Signal %d to process %d.\n",
4690 sig, t->pid);
4691 else
4692 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
4693}
4694#endif /* CONFIG_KGDB_KDB */
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/kernel/signal.c
4 *
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 *
7 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 *
9 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
10 * Changes to use preallocated sigqueue structures
11 * to allow signals to be sent reliably.
12 */
13
14#include <linux/slab.h>
15#include <linux/export.h>
16#include <linux/init.h>
17#include <linux/sched/mm.h>
18#include <linux/sched/user.h>
19#include <linux/sched/debug.h>
20#include <linux/sched/task.h>
21#include <linux/sched/task_stack.h>
22#include <linux/sched/cputime.h>
23#include <linux/file.h>
24#include <linux/fs.h>
25#include <linux/mm.h>
26#include <linux/proc_fs.h>
27#include <linux/tty.h>
28#include <linux/binfmts.h>
29#include <linux/coredump.h>
30#include <linux/security.h>
31#include <linux/syscalls.h>
32#include <linux/ptrace.h>
33#include <linux/signal.h>
34#include <linux/signalfd.h>
35#include <linux/ratelimit.h>
36#include <linux/task_work.h>
37#include <linux/capability.h>
38#include <linux/freezer.h>
39#include <linux/pid_namespace.h>
40#include <linux/nsproxy.h>
41#include <linux/user_namespace.h>
42#include <linux/uprobes.h>
43#include <linux/compat.h>
44#include <linux/cn_proc.h>
45#include <linux/compiler.h>
46#include <linux/posix-timers.h>
47#include <linux/cgroup.h>
48#include <linux/audit.h>
49#include <linux/sysctl.h>
50#include <uapi/linux/pidfd.h>
51
52#define CREATE_TRACE_POINTS
53#include <trace/events/signal.h>
54
55#include <asm/param.h>
56#include <linux/uaccess.h>
57#include <asm/unistd.h>
58#include <asm/siginfo.h>
59#include <asm/cacheflush.h>
60#include <asm/syscall.h> /* for syscall_get_* */
61
62/*
63 * SLAB caches for signal bits.
64 */
65
66static struct kmem_cache *sigqueue_cachep;
67
68int print_fatal_signals __read_mostly;
69
70static void __user *sig_handler(struct task_struct *t, int sig)
71{
72 return t->sighand->action[sig - 1].sa.sa_handler;
73}
74
75static inline bool sig_handler_ignored(void __user *handler, int sig)
76{
77 /* Is it explicitly or implicitly ignored? */
78 return handler == SIG_IGN ||
79 (handler == SIG_DFL && sig_kernel_ignore(sig));
80}
81
82static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
83{
84 void __user *handler;
85
86 handler = sig_handler(t, sig);
87
88 /* SIGKILL and SIGSTOP may not be sent to the global init */
89 if (unlikely(is_global_init(t) && sig_kernel_only(sig)))
90 return true;
91
92 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
93 handler == SIG_DFL && !(force && sig_kernel_only(sig)))
94 return true;
95
96 /* Only allow kernel generated signals to this kthread */
97 if (unlikely((t->flags & PF_KTHREAD) &&
98 (handler == SIG_KTHREAD_KERNEL) && !force))
99 return true;
100
101 return sig_handler_ignored(handler, sig);
102}
103
104static bool sig_ignored(struct task_struct *t, int sig, bool force)
105{
106 /*
107 * Blocked signals are never ignored, since the
108 * signal handler may change by the time it is
109 * unblocked.
110 */
111 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
112 return false;
113
114 /*
115 * Tracers may want to know about even ignored signal unless it
116 * is SIGKILL which can't be reported anyway but can be ignored
117 * by SIGNAL_UNKILLABLE task.
118 */
119 if (t->ptrace && sig != SIGKILL)
120 return false;
121
122 return sig_task_ignored(t, sig, force);
123}
124
125/*
126 * Re-calculate pending state from the set of locally pending
127 * signals, globally pending signals, and blocked signals.
128 */
129static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked)
130{
131 unsigned long ready;
132 long i;
133
134 switch (_NSIG_WORDS) {
135 default:
136 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
137 ready |= signal->sig[i] &~ blocked->sig[i];
138 break;
139
140 case 4: ready = signal->sig[3] &~ blocked->sig[3];
141 ready |= signal->sig[2] &~ blocked->sig[2];
142 ready |= signal->sig[1] &~ blocked->sig[1];
143 ready |= signal->sig[0] &~ blocked->sig[0];
144 break;
145
146 case 2: ready = signal->sig[1] &~ blocked->sig[1];
147 ready |= signal->sig[0] &~ blocked->sig[0];
148 break;
149
150 case 1: ready = signal->sig[0] &~ blocked->sig[0];
151 }
152 return ready != 0;
153}
154
155#define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
156
157static bool recalc_sigpending_tsk(struct task_struct *t)
158{
159 if ((t->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) ||
160 PENDING(&t->pending, &t->blocked) ||
161 PENDING(&t->signal->shared_pending, &t->blocked) ||
162 cgroup_task_frozen(t)) {
163 set_tsk_thread_flag(t, TIF_SIGPENDING);
164 return true;
165 }
166
167 /*
168 * We must never clear the flag in another thread, or in current
169 * when it's possible the current syscall is returning -ERESTART*.
170 * So we don't clear it here, and only callers who know they should do.
171 */
172 return false;
173}
174
175void recalc_sigpending(void)
176{
177 if (!recalc_sigpending_tsk(current) && !freezing(current))
178 clear_thread_flag(TIF_SIGPENDING);
179
180}
181EXPORT_SYMBOL(recalc_sigpending);
182
183void calculate_sigpending(void)
184{
185 /* Have any signals or users of TIF_SIGPENDING been delayed
186 * until after fork?
187 */
188 spin_lock_irq(¤t->sighand->siglock);
189 set_tsk_thread_flag(current, TIF_SIGPENDING);
190 recalc_sigpending();
191 spin_unlock_irq(¤t->sighand->siglock);
192}
193
194/* Given the mask, find the first available signal that should be serviced. */
195
196#define SYNCHRONOUS_MASK \
197 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
198 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
199
200int next_signal(struct sigpending *pending, sigset_t *mask)
201{
202 unsigned long i, *s, *m, x;
203 int sig = 0;
204
205 s = pending->signal.sig;
206 m = mask->sig;
207
208 /*
209 * Handle the first word specially: it contains the
210 * synchronous signals that need to be dequeued first.
211 */
212 x = *s &~ *m;
213 if (x) {
214 if (x & SYNCHRONOUS_MASK)
215 x &= SYNCHRONOUS_MASK;
216 sig = ffz(~x) + 1;
217 return sig;
218 }
219
220 switch (_NSIG_WORDS) {
221 default:
222 for (i = 1; i < _NSIG_WORDS; ++i) {
223 x = *++s &~ *++m;
224 if (!x)
225 continue;
226 sig = ffz(~x) + i*_NSIG_BPW + 1;
227 break;
228 }
229 break;
230
231 case 2:
232 x = s[1] &~ m[1];
233 if (!x)
234 break;
235 sig = ffz(~x) + _NSIG_BPW + 1;
236 break;
237
238 case 1:
239 /* Nothing to do */
240 break;
241 }
242
243 return sig;
244}
245
246static inline void print_dropped_signal(int sig)
247{
248 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
249
250 if (!print_fatal_signals)
251 return;
252
253 if (!__ratelimit(&ratelimit_state))
254 return;
255
256 pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
257 current->comm, current->pid, sig);
258}
259
260/**
261 * task_set_jobctl_pending - set jobctl pending bits
262 * @task: target task
263 * @mask: pending bits to set
264 *
265 * Clear @mask from @task->jobctl. @mask must be subset of
266 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
267 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
268 * cleared. If @task is already being killed or exiting, this function
269 * becomes noop.
270 *
271 * CONTEXT:
272 * Must be called with @task->sighand->siglock held.
273 *
274 * RETURNS:
275 * %true if @mask is set, %false if made noop because @task was dying.
276 */
277bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
278{
279 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
280 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
281 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
282
283 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
284 return false;
285
286 if (mask & JOBCTL_STOP_SIGMASK)
287 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
288
289 task->jobctl |= mask;
290 return true;
291}
292
293/**
294 * task_clear_jobctl_trapping - clear jobctl trapping bit
295 * @task: target task
296 *
297 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
298 * Clear it and wake up the ptracer. Note that we don't need any further
299 * locking. @task->siglock guarantees that @task->parent points to the
300 * ptracer.
301 *
302 * CONTEXT:
303 * Must be called with @task->sighand->siglock held.
304 */
305void task_clear_jobctl_trapping(struct task_struct *task)
306{
307 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
308 task->jobctl &= ~JOBCTL_TRAPPING;
309 smp_mb(); /* advised by wake_up_bit() */
310 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
311 }
312}
313
314/**
315 * task_clear_jobctl_pending - clear jobctl pending bits
316 * @task: target task
317 * @mask: pending bits to clear
318 *
319 * Clear @mask from @task->jobctl. @mask must be subset of
320 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
321 * STOP bits are cleared together.
322 *
323 * If clearing of @mask leaves no stop or trap pending, this function calls
324 * task_clear_jobctl_trapping().
325 *
326 * CONTEXT:
327 * Must be called with @task->sighand->siglock held.
328 */
329void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
330{
331 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
332
333 if (mask & JOBCTL_STOP_PENDING)
334 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
335
336 task->jobctl &= ~mask;
337
338 if (!(task->jobctl & JOBCTL_PENDING_MASK))
339 task_clear_jobctl_trapping(task);
340}
341
342/**
343 * task_participate_group_stop - participate in a group stop
344 * @task: task participating in a group stop
345 *
346 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
347 * Group stop states are cleared and the group stop count is consumed if
348 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
349 * stop, the appropriate `SIGNAL_*` flags are set.
350 *
351 * CONTEXT:
352 * Must be called with @task->sighand->siglock held.
353 *
354 * RETURNS:
355 * %true if group stop completion should be notified to the parent, %false
356 * otherwise.
357 */
358static bool task_participate_group_stop(struct task_struct *task)
359{
360 struct signal_struct *sig = task->signal;
361 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
362
363 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
364
365 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
366
367 if (!consume)
368 return false;
369
370 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
371 sig->group_stop_count--;
372
373 /*
374 * Tell the caller to notify completion iff we are entering into a
375 * fresh group stop. Read comment in do_signal_stop() for details.
376 */
377 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
378 signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
379 return true;
380 }
381 return false;
382}
383
384void task_join_group_stop(struct task_struct *task)
385{
386 unsigned long mask = current->jobctl & JOBCTL_STOP_SIGMASK;
387 struct signal_struct *sig = current->signal;
388
389 if (sig->group_stop_count) {
390 sig->group_stop_count++;
391 mask |= JOBCTL_STOP_CONSUME;
392 } else if (!(sig->flags & SIGNAL_STOP_STOPPED))
393 return;
394
395 /* Have the new thread join an on-going signal group stop */
396 task_set_jobctl_pending(task, mask | JOBCTL_STOP_PENDING);
397}
398
399/*
400 * allocate a new signal queue record
401 * - this may be called without locks if and only if t == current, otherwise an
402 * appropriate lock must be held to stop the target task from exiting
403 */
404static struct sigqueue *
405__sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags,
406 int override_rlimit, const unsigned int sigqueue_flags)
407{
408 struct sigqueue *q = NULL;
409 struct ucounts *ucounts;
410 long sigpending;
411
412 /*
413 * Protect access to @t credentials. This can go away when all
414 * callers hold rcu read lock.
415 *
416 * NOTE! A pending signal will hold on to the user refcount,
417 * and we get/put the refcount only when the sigpending count
418 * changes from/to zero.
419 */
420 rcu_read_lock();
421 ucounts = task_ucounts(t);
422 sigpending = inc_rlimit_get_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING);
423 rcu_read_unlock();
424 if (!sigpending)
425 return NULL;
426
427 if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {
428 q = kmem_cache_alloc(sigqueue_cachep, gfp_flags);
429 } else {
430 print_dropped_signal(sig);
431 }
432
433 if (unlikely(q == NULL)) {
434 dec_rlimit_put_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING);
435 } else {
436 INIT_LIST_HEAD(&q->list);
437 q->flags = sigqueue_flags;
438 q->ucounts = ucounts;
439 }
440 return q;
441}
442
443static void __sigqueue_free(struct sigqueue *q)
444{
445 if (q->flags & SIGQUEUE_PREALLOC)
446 return;
447 if (q->ucounts) {
448 dec_rlimit_put_ucounts(q->ucounts, UCOUNT_RLIMIT_SIGPENDING);
449 q->ucounts = NULL;
450 }
451 kmem_cache_free(sigqueue_cachep, q);
452}
453
454void flush_sigqueue(struct sigpending *queue)
455{
456 struct sigqueue *q;
457
458 sigemptyset(&queue->signal);
459 while (!list_empty(&queue->list)) {
460 q = list_entry(queue->list.next, struct sigqueue , list);
461 list_del_init(&q->list);
462 __sigqueue_free(q);
463 }
464}
465
466/*
467 * Flush all pending signals for this kthread.
468 */
469void flush_signals(struct task_struct *t)
470{
471 unsigned long flags;
472
473 spin_lock_irqsave(&t->sighand->siglock, flags);
474 clear_tsk_thread_flag(t, TIF_SIGPENDING);
475 flush_sigqueue(&t->pending);
476 flush_sigqueue(&t->signal->shared_pending);
477 spin_unlock_irqrestore(&t->sighand->siglock, flags);
478}
479EXPORT_SYMBOL(flush_signals);
480
481#ifdef CONFIG_POSIX_TIMERS
482static void __flush_itimer_signals(struct sigpending *pending)
483{
484 sigset_t signal, retain;
485 struct sigqueue *q, *n;
486
487 signal = pending->signal;
488 sigemptyset(&retain);
489
490 list_for_each_entry_safe(q, n, &pending->list, list) {
491 int sig = q->info.si_signo;
492
493 if (likely(q->info.si_code != SI_TIMER)) {
494 sigaddset(&retain, sig);
495 } else {
496 sigdelset(&signal, sig);
497 list_del_init(&q->list);
498 __sigqueue_free(q);
499 }
500 }
501
502 sigorsets(&pending->signal, &signal, &retain);
503}
504
505void flush_itimer_signals(void)
506{
507 struct task_struct *tsk = current;
508 unsigned long flags;
509
510 spin_lock_irqsave(&tsk->sighand->siglock, flags);
511 __flush_itimer_signals(&tsk->pending);
512 __flush_itimer_signals(&tsk->signal->shared_pending);
513 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
514}
515#endif
516
517void ignore_signals(struct task_struct *t)
518{
519 int i;
520
521 for (i = 0; i < _NSIG; ++i)
522 t->sighand->action[i].sa.sa_handler = SIG_IGN;
523
524 flush_signals(t);
525}
526
527/*
528 * Flush all handlers for a task.
529 */
530
531void
532flush_signal_handlers(struct task_struct *t, int force_default)
533{
534 int i;
535 struct k_sigaction *ka = &t->sighand->action[0];
536 for (i = _NSIG ; i != 0 ; i--) {
537 if (force_default || ka->sa.sa_handler != SIG_IGN)
538 ka->sa.sa_handler = SIG_DFL;
539 ka->sa.sa_flags = 0;
540#ifdef __ARCH_HAS_SA_RESTORER
541 ka->sa.sa_restorer = NULL;
542#endif
543 sigemptyset(&ka->sa.sa_mask);
544 ka++;
545 }
546}
547
548bool unhandled_signal(struct task_struct *tsk, int sig)
549{
550 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
551 if (is_global_init(tsk))
552 return true;
553
554 if (handler != SIG_IGN && handler != SIG_DFL)
555 return false;
556
557 /* If dying, we handle all new signals by ignoring them */
558 if (fatal_signal_pending(tsk))
559 return false;
560
561 /* if ptraced, let the tracer determine */
562 return !tsk->ptrace;
563}
564
565static void collect_signal(int sig, struct sigpending *list, kernel_siginfo_t *info,
566 bool *resched_timer)
567{
568 struct sigqueue *q, *first = NULL;
569
570 /*
571 * Collect the siginfo appropriate to this signal. Check if
572 * there is another siginfo for the same signal.
573 */
574 list_for_each_entry(q, &list->list, list) {
575 if (q->info.si_signo == sig) {
576 if (first)
577 goto still_pending;
578 first = q;
579 }
580 }
581
582 sigdelset(&list->signal, sig);
583
584 if (first) {
585still_pending:
586 list_del_init(&first->list);
587 copy_siginfo(info, &first->info);
588
589 *resched_timer =
590 (first->flags & SIGQUEUE_PREALLOC) &&
591 (info->si_code == SI_TIMER) &&
592 (info->si_sys_private);
593
594 __sigqueue_free(first);
595 } else {
596 /*
597 * Ok, it wasn't in the queue. This must be
598 * a fast-pathed signal or we must have been
599 * out of queue space. So zero out the info.
600 */
601 clear_siginfo(info);
602 info->si_signo = sig;
603 info->si_errno = 0;
604 info->si_code = SI_USER;
605 info->si_pid = 0;
606 info->si_uid = 0;
607 }
608}
609
610static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
611 kernel_siginfo_t *info, bool *resched_timer)
612{
613 int sig = next_signal(pending, mask);
614
615 if (sig)
616 collect_signal(sig, pending, info, resched_timer);
617 return sig;
618}
619
620/*
621 * Dequeue a signal and return the element to the caller, which is
622 * expected to free it.
623 *
624 * All callers have to hold the siglock.
625 */
626int dequeue_signal(struct task_struct *tsk, sigset_t *mask,
627 kernel_siginfo_t *info, enum pid_type *type)
628{
629 bool resched_timer = false;
630 int signr;
631
632 /* We only dequeue private signals from ourselves, we don't let
633 * signalfd steal them
634 */
635 *type = PIDTYPE_PID;
636 signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
637 if (!signr) {
638 *type = PIDTYPE_TGID;
639 signr = __dequeue_signal(&tsk->signal->shared_pending,
640 mask, info, &resched_timer);
641#ifdef CONFIG_POSIX_TIMERS
642 /*
643 * itimer signal ?
644 *
645 * itimers are process shared and we restart periodic
646 * itimers in the signal delivery path to prevent DoS
647 * attacks in the high resolution timer case. This is
648 * compliant with the old way of self-restarting
649 * itimers, as the SIGALRM is a legacy signal and only
650 * queued once. Changing the restart behaviour to
651 * restart the timer in the signal dequeue path is
652 * reducing the timer noise on heavy loaded !highres
653 * systems too.
654 */
655 if (unlikely(signr == SIGALRM)) {
656 struct hrtimer *tmr = &tsk->signal->real_timer;
657
658 if (!hrtimer_is_queued(tmr) &&
659 tsk->signal->it_real_incr != 0) {
660 hrtimer_forward(tmr, tmr->base->get_time(),
661 tsk->signal->it_real_incr);
662 hrtimer_restart(tmr);
663 }
664 }
665#endif
666 }
667
668 recalc_sigpending();
669 if (!signr)
670 return 0;
671
672 if (unlikely(sig_kernel_stop(signr))) {
673 /*
674 * Set a marker that we have dequeued a stop signal. Our
675 * caller might release the siglock and then the pending
676 * stop signal it is about to process is no longer in the
677 * pending bitmasks, but must still be cleared by a SIGCONT
678 * (and overruled by a SIGKILL). So those cases clear this
679 * shared flag after we've set it. Note that this flag may
680 * remain set after the signal we return is ignored or
681 * handled. That doesn't matter because its only purpose
682 * is to alert stop-signal processing code when another
683 * processor has come along and cleared the flag.
684 */
685 current->jobctl |= JOBCTL_STOP_DEQUEUED;
686 }
687#ifdef CONFIG_POSIX_TIMERS
688 if (resched_timer) {
689 /*
690 * Release the siglock to ensure proper locking order
691 * of timer locks outside of siglocks. Note, we leave
692 * irqs disabled here, since the posix-timers code is
693 * about to disable them again anyway.
694 */
695 spin_unlock(&tsk->sighand->siglock);
696 posixtimer_rearm(info);
697 spin_lock(&tsk->sighand->siglock);
698
699 /* Don't expose the si_sys_private value to userspace */
700 info->si_sys_private = 0;
701 }
702#endif
703 return signr;
704}
705EXPORT_SYMBOL_GPL(dequeue_signal);
706
707static int dequeue_synchronous_signal(kernel_siginfo_t *info)
708{
709 struct task_struct *tsk = current;
710 struct sigpending *pending = &tsk->pending;
711 struct sigqueue *q, *sync = NULL;
712
713 /*
714 * Might a synchronous signal be in the queue?
715 */
716 if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
717 return 0;
718
719 /*
720 * Return the first synchronous signal in the queue.
721 */
722 list_for_each_entry(q, &pending->list, list) {
723 /* Synchronous signals have a positive si_code */
724 if ((q->info.si_code > SI_USER) &&
725 (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
726 sync = q;
727 goto next;
728 }
729 }
730 return 0;
731next:
732 /*
733 * Check if there is another siginfo for the same signal.
734 */
735 list_for_each_entry_continue(q, &pending->list, list) {
736 if (q->info.si_signo == sync->info.si_signo)
737 goto still_pending;
738 }
739
740 sigdelset(&pending->signal, sync->info.si_signo);
741 recalc_sigpending();
742still_pending:
743 list_del_init(&sync->list);
744 copy_siginfo(info, &sync->info);
745 __sigqueue_free(sync);
746 return info->si_signo;
747}
748
749/*
750 * Tell a process that it has a new active signal..
751 *
752 * NOTE! we rely on the previous spin_lock to
753 * lock interrupts for us! We can only be called with
754 * "siglock" held, and the local interrupt must
755 * have been disabled when that got acquired!
756 *
757 * No need to set need_resched since signal event passing
758 * goes through ->blocked
759 */
760void signal_wake_up_state(struct task_struct *t, unsigned int state)
761{
762 lockdep_assert_held(&t->sighand->siglock);
763
764 set_tsk_thread_flag(t, TIF_SIGPENDING);
765
766 /*
767 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
768 * case. We don't check t->state here because there is a race with it
769 * executing another processor and just now entering stopped state.
770 * By using wake_up_state, we ensure the process will wake up and
771 * handle its death signal.
772 */
773 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
774 kick_process(t);
775}
776
777/*
778 * Remove signals in mask from the pending set and queue.
779 * Returns 1 if any signals were found.
780 *
781 * All callers must be holding the siglock.
782 */
783static void flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
784{
785 struct sigqueue *q, *n;
786 sigset_t m;
787
788 sigandsets(&m, mask, &s->signal);
789 if (sigisemptyset(&m))
790 return;
791
792 sigandnsets(&s->signal, &s->signal, mask);
793 list_for_each_entry_safe(q, n, &s->list, list) {
794 if (sigismember(mask, q->info.si_signo)) {
795 list_del_init(&q->list);
796 __sigqueue_free(q);
797 }
798 }
799}
800
801static inline int is_si_special(const struct kernel_siginfo *info)
802{
803 return info <= SEND_SIG_PRIV;
804}
805
806static inline bool si_fromuser(const struct kernel_siginfo *info)
807{
808 return info == SEND_SIG_NOINFO ||
809 (!is_si_special(info) && SI_FROMUSER(info));
810}
811
812/*
813 * called with RCU read lock from check_kill_permission()
814 */
815static bool kill_ok_by_cred(struct task_struct *t)
816{
817 const struct cred *cred = current_cred();
818 const struct cred *tcred = __task_cred(t);
819
820 return uid_eq(cred->euid, tcred->suid) ||
821 uid_eq(cred->euid, tcred->uid) ||
822 uid_eq(cred->uid, tcred->suid) ||
823 uid_eq(cred->uid, tcred->uid) ||
824 ns_capable(tcred->user_ns, CAP_KILL);
825}
826
827/*
828 * Bad permissions for sending the signal
829 * - the caller must hold the RCU read lock
830 */
831static int check_kill_permission(int sig, struct kernel_siginfo *info,
832 struct task_struct *t)
833{
834 struct pid *sid;
835 int error;
836
837 if (!valid_signal(sig))
838 return -EINVAL;
839
840 if (!si_fromuser(info))
841 return 0;
842
843 error = audit_signal_info(sig, t); /* Let audit system see the signal */
844 if (error)
845 return error;
846
847 if (!same_thread_group(current, t) &&
848 !kill_ok_by_cred(t)) {
849 switch (sig) {
850 case SIGCONT:
851 sid = task_session(t);
852 /*
853 * We don't return the error if sid == NULL. The
854 * task was unhashed, the caller must notice this.
855 */
856 if (!sid || sid == task_session(current))
857 break;
858 fallthrough;
859 default:
860 return -EPERM;
861 }
862 }
863
864 return security_task_kill(t, info, sig, NULL);
865}
866
867/**
868 * ptrace_trap_notify - schedule trap to notify ptracer
869 * @t: tracee wanting to notify tracer
870 *
871 * This function schedules sticky ptrace trap which is cleared on the next
872 * TRAP_STOP to notify ptracer of an event. @t must have been seized by
873 * ptracer.
874 *
875 * If @t is running, STOP trap will be taken. If trapped for STOP and
876 * ptracer is listening for events, tracee is woken up so that it can
877 * re-trap for the new event. If trapped otherwise, STOP trap will be
878 * eventually taken without returning to userland after the existing traps
879 * are finished by PTRACE_CONT.
880 *
881 * CONTEXT:
882 * Must be called with @task->sighand->siglock held.
883 */
884static void ptrace_trap_notify(struct task_struct *t)
885{
886 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
887 lockdep_assert_held(&t->sighand->siglock);
888
889 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
890 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
891}
892
893/*
894 * Handle magic process-wide effects of stop/continue signals. Unlike
895 * the signal actions, these happen immediately at signal-generation
896 * time regardless of blocking, ignoring, or handling. This does the
897 * actual continuing for SIGCONT, but not the actual stopping for stop
898 * signals. The process stop is done as a signal action for SIG_DFL.
899 *
900 * Returns true if the signal should be actually delivered, otherwise
901 * it should be dropped.
902 */
903static bool prepare_signal(int sig, struct task_struct *p, bool force)
904{
905 struct signal_struct *signal = p->signal;
906 struct task_struct *t;
907 sigset_t flush;
908
909 if (signal->flags & SIGNAL_GROUP_EXIT) {
910 if (signal->core_state)
911 return sig == SIGKILL;
912 /*
913 * The process is in the middle of dying, drop the signal.
914 */
915 return false;
916 } else if (sig_kernel_stop(sig)) {
917 /*
918 * This is a stop signal. Remove SIGCONT from all queues.
919 */
920 siginitset(&flush, sigmask(SIGCONT));
921 flush_sigqueue_mask(&flush, &signal->shared_pending);
922 for_each_thread(p, t)
923 flush_sigqueue_mask(&flush, &t->pending);
924 } else if (sig == SIGCONT) {
925 unsigned int why;
926 /*
927 * Remove all stop signals from all queues, wake all threads.
928 */
929 siginitset(&flush, SIG_KERNEL_STOP_MASK);
930 flush_sigqueue_mask(&flush, &signal->shared_pending);
931 for_each_thread(p, t) {
932 flush_sigqueue_mask(&flush, &t->pending);
933 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
934 if (likely(!(t->ptrace & PT_SEIZED))) {
935 t->jobctl &= ~JOBCTL_STOPPED;
936 wake_up_state(t, __TASK_STOPPED);
937 } else
938 ptrace_trap_notify(t);
939 }
940
941 /*
942 * Notify the parent with CLD_CONTINUED if we were stopped.
943 *
944 * If we were in the middle of a group stop, we pretend it
945 * was already finished, and then continued. Since SIGCHLD
946 * doesn't queue we report only CLD_STOPPED, as if the next
947 * CLD_CONTINUED was dropped.
948 */
949 why = 0;
950 if (signal->flags & SIGNAL_STOP_STOPPED)
951 why |= SIGNAL_CLD_CONTINUED;
952 else if (signal->group_stop_count)
953 why |= SIGNAL_CLD_STOPPED;
954
955 if (why) {
956 /*
957 * The first thread which returns from do_signal_stop()
958 * will take ->siglock, notice SIGNAL_CLD_MASK, and
959 * notify its parent. See get_signal().
960 */
961 signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
962 signal->group_stop_count = 0;
963 signal->group_exit_code = 0;
964 }
965 }
966
967 return !sig_ignored(p, sig, force);
968}
969
970/*
971 * Test if P wants to take SIG. After we've checked all threads with this,
972 * it's equivalent to finding no threads not blocking SIG. Any threads not
973 * blocking SIG were ruled out because they are not running and already
974 * have pending signals. Such threads will dequeue from the shared queue
975 * as soon as they're available, so putting the signal on the shared queue
976 * will be equivalent to sending it to one such thread.
977 */
978static inline bool wants_signal(int sig, struct task_struct *p)
979{
980 if (sigismember(&p->blocked, sig))
981 return false;
982
983 if (p->flags & PF_EXITING)
984 return false;
985
986 if (sig == SIGKILL)
987 return true;
988
989 if (task_is_stopped_or_traced(p))
990 return false;
991
992 return task_curr(p) || !task_sigpending(p);
993}
994
995static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
996{
997 struct signal_struct *signal = p->signal;
998 struct task_struct *t;
999
1000 /*
1001 * Now find a thread we can wake up to take the signal off the queue.
1002 *
1003 * Try the suggested task first (may or may not be the main thread).
1004 */
1005 if (wants_signal(sig, p))
1006 t = p;
1007 else if ((type == PIDTYPE_PID) || thread_group_empty(p))
1008 /*
1009 * There is just one thread and it does not need to be woken.
1010 * It will dequeue unblocked signals before it runs again.
1011 */
1012 return;
1013 else {
1014 /*
1015 * Otherwise try to find a suitable thread.
1016 */
1017 t = signal->curr_target;
1018 while (!wants_signal(sig, t)) {
1019 t = next_thread(t);
1020 if (t == signal->curr_target)
1021 /*
1022 * No thread needs to be woken.
1023 * Any eligible threads will see
1024 * the signal in the queue soon.
1025 */
1026 return;
1027 }
1028 signal->curr_target = t;
1029 }
1030
1031 /*
1032 * Found a killable thread. If the signal will be fatal,
1033 * then start taking the whole group down immediately.
1034 */
1035 if (sig_fatal(p, sig) &&
1036 (signal->core_state || !(signal->flags & SIGNAL_GROUP_EXIT)) &&
1037 !sigismember(&t->real_blocked, sig) &&
1038 (sig == SIGKILL || !p->ptrace)) {
1039 /*
1040 * This signal will be fatal to the whole group.
1041 */
1042 if (!sig_kernel_coredump(sig)) {
1043 /*
1044 * Start a group exit and wake everybody up.
1045 * This way we don't have other threads
1046 * running and doing things after a slower
1047 * thread has the fatal signal pending.
1048 */
1049 signal->flags = SIGNAL_GROUP_EXIT;
1050 signal->group_exit_code = sig;
1051 signal->group_stop_count = 0;
1052 __for_each_thread(signal, t) {
1053 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1054 sigaddset(&t->pending.signal, SIGKILL);
1055 signal_wake_up(t, 1);
1056 }
1057 return;
1058 }
1059 }
1060
1061 /*
1062 * The signal is already in the shared-pending queue.
1063 * Tell the chosen thread to wake up and dequeue it.
1064 */
1065 signal_wake_up(t, sig == SIGKILL);
1066 return;
1067}
1068
1069static inline bool legacy_queue(struct sigpending *signals, int sig)
1070{
1071 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1072}
1073
1074static int __send_signal_locked(int sig, struct kernel_siginfo *info,
1075 struct task_struct *t, enum pid_type type, bool force)
1076{
1077 struct sigpending *pending;
1078 struct sigqueue *q;
1079 int override_rlimit;
1080 int ret = 0, result;
1081
1082 lockdep_assert_held(&t->sighand->siglock);
1083
1084 result = TRACE_SIGNAL_IGNORED;
1085 if (!prepare_signal(sig, t, force))
1086 goto ret;
1087
1088 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1089 /*
1090 * Short-circuit ignored signals and support queuing
1091 * exactly one non-rt signal, so that we can get more
1092 * detailed information about the cause of the signal.
1093 */
1094 result = TRACE_SIGNAL_ALREADY_PENDING;
1095 if (legacy_queue(pending, sig))
1096 goto ret;
1097
1098 result = TRACE_SIGNAL_DELIVERED;
1099 /*
1100 * Skip useless siginfo allocation for SIGKILL and kernel threads.
1101 */
1102 if ((sig == SIGKILL) || (t->flags & PF_KTHREAD))
1103 goto out_set;
1104
1105 /*
1106 * Real-time signals must be queued if sent by sigqueue, or
1107 * some other real-time mechanism. It is implementation
1108 * defined whether kill() does so. We attempt to do so, on
1109 * the principle of least surprise, but since kill is not
1110 * allowed to fail with EAGAIN when low on memory we just
1111 * make sure at least one signal gets delivered and don't
1112 * pass on the info struct.
1113 */
1114 if (sig < SIGRTMIN)
1115 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1116 else
1117 override_rlimit = 0;
1118
1119 q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit, 0);
1120
1121 if (q) {
1122 list_add_tail(&q->list, &pending->list);
1123 switch ((unsigned long) info) {
1124 case (unsigned long) SEND_SIG_NOINFO:
1125 clear_siginfo(&q->info);
1126 q->info.si_signo = sig;
1127 q->info.si_errno = 0;
1128 q->info.si_code = SI_USER;
1129 q->info.si_pid = task_tgid_nr_ns(current,
1130 task_active_pid_ns(t));
1131 rcu_read_lock();
1132 q->info.si_uid =
1133 from_kuid_munged(task_cred_xxx(t, user_ns),
1134 current_uid());
1135 rcu_read_unlock();
1136 break;
1137 case (unsigned long) SEND_SIG_PRIV:
1138 clear_siginfo(&q->info);
1139 q->info.si_signo = sig;
1140 q->info.si_errno = 0;
1141 q->info.si_code = SI_KERNEL;
1142 q->info.si_pid = 0;
1143 q->info.si_uid = 0;
1144 break;
1145 default:
1146 copy_siginfo(&q->info, info);
1147 break;
1148 }
1149 } else if (!is_si_special(info) &&
1150 sig >= SIGRTMIN && info->si_code != SI_USER) {
1151 /*
1152 * Queue overflow, abort. We may abort if the
1153 * signal was rt and sent by user using something
1154 * other than kill().
1155 */
1156 result = TRACE_SIGNAL_OVERFLOW_FAIL;
1157 ret = -EAGAIN;
1158 goto ret;
1159 } else {
1160 /*
1161 * This is a silent loss of information. We still
1162 * send the signal, but the *info bits are lost.
1163 */
1164 result = TRACE_SIGNAL_LOSE_INFO;
1165 }
1166
1167out_set:
1168 signalfd_notify(t, sig);
1169 sigaddset(&pending->signal, sig);
1170
1171 /* Let multiprocess signals appear after on-going forks */
1172 if (type > PIDTYPE_TGID) {
1173 struct multiprocess_signals *delayed;
1174 hlist_for_each_entry(delayed, &t->signal->multiprocess, node) {
1175 sigset_t *signal = &delayed->signal;
1176 /* Can't queue both a stop and a continue signal */
1177 if (sig == SIGCONT)
1178 sigdelsetmask(signal, SIG_KERNEL_STOP_MASK);
1179 else if (sig_kernel_stop(sig))
1180 sigdelset(signal, SIGCONT);
1181 sigaddset(signal, sig);
1182 }
1183 }
1184
1185 complete_signal(sig, t, type);
1186ret:
1187 trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result);
1188 return ret;
1189}
1190
1191static inline bool has_si_pid_and_uid(struct kernel_siginfo *info)
1192{
1193 bool ret = false;
1194 switch (siginfo_layout(info->si_signo, info->si_code)) {
1195 case SIL_KILL:
1196 case SIL_CHLD:
1197 case SIL_RT:
1198 ret = true;
1199 break;
1200 case SIL_TIMER:
1201 case SIL_POLL:
1202 case SIL_FAULT:
1203 case SIL_FAULT_TRAPNO:
1204 case SIL_FAULT_MCEERR:
1205 case SIL_FAULT_BNDERR:
1206 case SIL_FAULT_PKUERR:
1207 case SIL_FAULT_PERF_EVENT:
1208 case SIL_SYS:
1209 ret = false;
1210 break;
1211 }
1212 return ret;
1213}
1214
1215int send_signal_locked(int sig, struct kernel_siginfo *info,
1216 struct task_struct *t, enum pid_type type)
1217{
1218 /* Should SIGKILL or SIGSTOP be received by a pid namespace init? */
1219 bool force = false;
1220
1221 if (info == SEND_SIG_NOINFO) {
1222 /* Force if sent from an ancestor pid namespace */
1223 force = !task_pid_nr_ns(current, task_active_pid_ns(t));
1224 } else if (info == SEND_SIG_PRIV) {
1225 /* Don't ignore kernel generated signals */
1226 force = true;
1227 } else if (has_si_pid_and_uid(info)) {
1228 /* SIGKILL and SIGSTOP is special or has ids */
1229 struct user_namespace *t_user_ns;
1230
1231 rcu_read_lock();
1232 t_user_ns = task_cred_xxx(t, user_ns);
1233 if (current_user_ns() != t_user_ns) {
1234 kuid_t uid = make_kuid(current_user_ns(), info->si_uid);
1235 info->si_uid = from_kuid_munged(t_user_ns, uid);
1236 }
1237 rcu_read_unlock();
1238
1239 /* A kernel generated signal? */
1240 force = (info->si_code == SI_KERNEL);
1241
1242 /* From an ancestor pid namespace? */
1243 if (!task_pid_nr_ns(current, task_active_pid_ns(t))) {
1244 info->si_pid = 0;
1245 force = true;
1246 }
1247 }
1248 return __send_signal_locked(sig, info, t, type, force);
1249}
1250
1251static void print_fatal_signal(int signr)
1252{
1253 struct pt_regs *regs = task_pt_regs(current);
1254 struct file *exe_file;
1255
1256 exe_file = get_task_exe_file(current);
1257 if (exe_file) {
1258 pr_info("%pD: %s: potentially unexpected fatal signal %d.\n",
1259 exe_file, current->comm, signr);
1260 fput(exe_file);
1261 } else {
1262 pr_info("%s: potentially unexpected fatal signal %d.\n",
1263 current->comm, signr);
1264 }
1265
1266#if defined(__i386__) && !defined(__arch_um__)
1267 pr_info("code at %08lx: ", regs->ip);
1268 {
1269 int i;
1270 for (i = 0; i < 16; i++) {
1271 unsigned char insn;
1272
1273 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1274 break;
1275 pr_cont("%02x ", insn);
1276 }
1277 }
1278 pr_cont("\n");
1279#endif
1280 preempt_disable();
1281 show_regs(regs);
1282 preempt_enable();
1283}
1284
1285static int __init setup_print_fatal_signals(char *str)
1286{
1287 get_option (&str, &print_fatal_signals);
1288
1289 return 1;
1290}
1291
1292__setup("print-fatal-signals=", setup_print_fatal_signals);
1293
1294int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p,
1295 enum pid_type type)
1296{
1297 unsigned long flags;
1298 int ret = -ESRCH;
1299
1300 if (lock_task_sighand(p, &flags)) {
1301 ret = send_signal_locked(sig, info, p, type);
1302 unlock_task_sighand(p, &flags);
1303 }
1304
1305 return ret;
1306}
1307
1308enum sig_handler {
1309 HANDLER_CURRENT, /* If reachable use the current handler */
1310 HANDLER_SIG_DFL, /* Always use SIG_DFL handler semantics */
1311 HANDLER_EXIT, /* Only visible as the process exit code */
1312};
1313
1314/*
1315 * Force a signal that the process can't ignore: if necessary
1316 * we unblock the signal and change any SIG_IGN to SIG_DFL.
1317 *
1318 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1319 * since we do not want to have a signal handler that was blocked
1320 * be invoked when user space had explicitly blocked it.
1321 *
1322 * We don't want to have recursive SIGSEGV's etc, for example,
1323 * that is why we also clear SIGNAL_UNKILLABLE.
1324 */
1325static int
1326force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t,
1327 enum sig_handler handler)
1328{
1329 unsigned long int flags;
1330 int ret, blocked, ignored;
1331 struct k_sigaction *action;
1332 int sig = info->si_signo;
1333
1334 spin_lock_irqsave(&t->sighand->siglock, flags);
1335 action = &t->sighand->action[sig-1];
1336 ignored = action->sa.sa_handler == SIG_IGN;
1337 blocked = sigismember(&t->blocked, sig);
1338 if (blocked || ignored || (handler != HANDLER_CURRENT)) {
1339 action->sa.sa_handler = SIG_DFL;
1340 if (handler == HANDLER_EXIT)
1341 action->sa.sa_flags |= SA_IMMUTABLE;
1342 if (blocked)
1343 sigdelset(&t->blocked, sig);
1344 }
1345 /*
1346 * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
1347 * debugging to leave init killable. But HANDLER_EXIT is always fatal.
1348 */
1349 if (action->sa.sa_handler == SIG_DFL &&
1350 (!t->ptrace || (handler == HANDLER_EXIT)))
1351 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1352 ret = send_signal_locked(sig, info, t, PIDTYPE_PID);
1353 /* This can happen if the signal was already pending and blocked */
1354 if (!task_sigpending(t))
1355 signal_wake_up(t, 0);
1356 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1357
1358 return ret;
1359}
1360
1361int force_sig_info(struct kernel_siginfo *info)
1362{
1363 return force_sig_info_to_task(info, current, HANDLER_CURRENT);
1364}
1365
1366/*
1367 * Nuke all other threads in the group.
1368 */
1369int zap_other_threads(struct task_struct *p)
1370{
1371 struct task_struct *t;
1372 int count = 0;
1373
1374 p->signal->group_stop_count = 0;
1375
1376 for_other_threads(p, t) {
1377 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1378 /* Don't require de_thread to wait for the vhost_worker */
1379 if ((t->flags & (PF_IO_WORKER | PF_USER_WORKER)) != PF_USER_WORKER)
1380 count++;
1381
1382 /* Don't bother with already dead threads */
1383 if (t->exit_state)
1384 continue;
1385 sigaddset(&t->pending.signal, SIGKILL);
1386 signal_wake_up(t, 1);
1387 }
1388
1389 return count;
1390}
1391
1392struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1393 unsigned long *flags)
1394{
1395 struct sighand_struct *sighand;
1396
1397 rcu_read_lock();
1398 for (;;) {
1399 sighand = rcu_dereference(tsk->sighand);
1400 if (unlikely(sighand == NULL))
1401 break;
1402
1403 /*
1404 * This sighand can be already freed and even reused, but
1405 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
1406 * initializes ->siglock: this slab can't go away, it has
1407 * the same object type, ->siglock can't be reinitialized.
1408 *
1409 * We need to ensure that tsk->sighand is still the same
1410 * after we take the lock, we can race with de_thread() or
1411 * __exit_signal(). In the latter case the next iteration
1412 * must see ->sighand == NULL.
1413 */
1414 spin_lock_irqsave(&sighand->siglock, *flags);
1415 if (likely(sighand == rcu_access_pointer(tsk->sighand)))
1416 break;
1417 spin_unlock_irqrestore(&sighand->siglock, *flags);
1418 }
1419 rcu_read_unlock();
1420
1421 return sighand;
1422}
1423
1424#ifdef CONFIG_LOCKDEP
1425void lockdep_assert_task_sighand_held(struct task_struct *task)
1426{
1427 struct sighand_struct *sighand;
1428
1429 rcu_read_lock();
1430 sighand = rcu_dereference(task->sighand);
1431 if (sighand)
1432 lockdep_assert_held(&sighand->siglock);
1433 else
1434 WARN_ON_ONCE(1);
1435 rcu_read_unlock();
1436}
1437#endif
1438
1439/*
1440 * send signal info to all the members of a thread group or to the
1441 * individual thread if type == PIDTYPE_PID.
1442 */
1443int group_send_sig_info(int sig, struct kernel_siginfo *info,
1444 struct task_struct *p, enum pid_type type)
1445{
1446 int ret;
1447
1448 rcu_read_lock();
1449 ret = check_kill_permission(sig, info, p);
1450 rcu_read_unlock();
1451
1452 if (!ret && sig)
1453 ret = do_send_sig_info(sig, info, p, type);
1454
1455 return ret;
1456}
1457
1458/*
1459 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1460 * control characters do (^C, ^Z etc)
1461 * - the caller must hold at least a readlock on tasklist_lock
1462 */
1463int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp)
1464{
1465 struct task_struct *p = NULL;
1466 int ret = -ESRCH;
1467
1468 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1469 int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID);
1470 /*
1471 * If group_send_sig_info() succeeds at least once ret
1472 * becomes 0 and after that the code below has no effect.
1473 * Otherwise we return the last err or -ESRCH if this
1474 * process group is empty.
1475 */
1476 if (ret)
1477 ret = err;
1478 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1479
1480 return ret;
1481}
1482
1483static int kill_pid_info_type(int sig, struct kernel_siginfo *info,
1484 struct pid *pid, enum pid_type type)
1485{
1486 int error = -ESRCH;
1487 struct task_struct *p;
1488
1489 for (;;) {
1490 rcu_read_lock();
1491 p = pid_task(pid, PIDTYPE_PID);
1492 if (p)
1493 error = group_send_sig_info(sig, info, p, type);
1494 rcu_read_unlock();
1495 if (likely(!p || error != -ESRCH))
1496 return error;
1497 /*
1498 * The task was unhashed in between, try again. If it
1499 * is dead, pid_task() will return NULL, if we race with
1500 * de_thread() it will find the new leader.
1501 */
1502 }
1503}
1504
1505int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid)
1506{
1507 return kill_pid_info_type(sig, info, pid, PIDTYPE_TGID);
1508}
1509
1510static int kill_proc_info(int sig, struct kernel_siginfo *info, pid_t pid)
1511{
1512 int error;
1513 rcu_read_lock();
1514 error = kill_pid_info(sig, info, find_vpid(pid));
1515 rcu_read_unlock();
1516 return error;
1517}
1518
1519static inline bool kill_as_cred_perm(const struct cred *cred,
1520 struct task_struct *target)
1521{
1522 const struct cred *pcred = __task_cred(target);
1523
1524 return uid_eq(cred->euid, pcred->suid) ||
1525 uid_eq(cred->euid, pcred->uid) ||
1526 uid_eq(cred->uid, pcred->suid) ||
1527 uid_eq(cred->uid, pcred->uid);
1528}
1529
1530/*
1531 * The usb asyncio usage of siginfo is wrong. The glibc support
1532 * for asyncio which uses SI_ASYNCIO assumes the layout is SIL_RT.
1533 * AKA after the generic fields:
1534 * kernel_pid_t si_pid;
1535 * kernel_uid32_t si_uid;
1536 * sigval_t si_value;
1537 *
1538 * Unfortunately when usb generates SI_ASYNCIO it assumes the layout
1539 * after the generic fields is:
1540 * void __user *si_addr;
1541 *
1542 * This is a practical problem when there is a 64bit big endian kernel
1543 * and a 32bit userspace. As the 32bit address will encoded in the low
1544 * 32bits of the pointer. Those low 32bits will be stored at higher
1545 * address than appear in a 32 bit pointer. So userspace will not
1546 * see the address it was expecting for it's completions.
1547 *
1548 * There is nothing in the encoding that can allow
1549 * copy_siginfo_to_user32 to detect this confusion of formats, so
1550 * handle this by requiring the caller of kill_pid_usb_asyncio to
1551 * notice when this situration takes place and to store the 32bit
1552 * pointer in sival_int, instead of sival_addr of the sigval_t addr
1553 * parameter.
1554 */
1555int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr,
1556 struct pid *pid, const struct cred *cred)
1557{
1558 struct kernel_siginfo info;
1559 struct task_struct *p;
1560 unsigned long flags;
1561 int ret = -EINVAL;
1562
1563 if (!valid_signal(sig))
1564 return ret;
1565
1566 clear_siginfo(&info);
1567 info.si_signo = sig;
1568 info.si_errno = errno;
1569 info.si_code = SI_ASYNCIO;
1570 *((sigval_t *)&info.si_pid) = addr;
1571
1572 rcu_read_lock();
1573 p = pid_task(pid, PIDTYPE_PID);
1574 if (!p) {
1575 ret = -ESRCH;
1576 goto out_unlock;
1577 }
1578 if (!kill_as_cred_perm(cred, p)) {
1579 ret = -EPERM;
1580 goto out_unlock;
1581 }
1582 ret = security_task_kill(p, &info, sig, cred);
1583 if (ret)
1584 goto out_unlock;
1585
1586 if (sig) {
1587 if (lock_task_sighand(p, &flags)) {
1588 ret = __send_signal_locked(sig, &info, p, PIDTYPE_TGID, false);
1589 unlock_task_sighand(p, &flags);
1590 } else
1591 ret = -ESRCH;
1592 }
1593out_unlock:
1594 rcu_read_unlock();
1595 return ret;
1596}
1597EXPORT_SYMBOL_GPL(kill_pid_usb_asyncio);
1598
1599/*
1600 * kill_something_info() interprets pid in interesting ways just like kill(2).
1601 *
1602 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1603 * is probably wrong. Should make it like BSD or SYSV.
1604 */
1605
1606static int kill_something_info(int sig, struct kernel_siginfo *info, pid_t pid)
1607{
1608 int ret;
1609
1610 if (pid > 0)
1611 return kill_proc_info(sig, info, pid);
1612
1613 /* -INT_MIN is undefined. Exclude this case to avoid a UBSAN warning */
1614 if (pid == INT_MIN)
1615 return -ESRCH;
1616
1617 read_lock(&tasklist_lock);
1618 if (pid != -1) {
1619 ret = __kill_pgrp_info(sig, info,
1620 pid ? find_vpid(-pid) : task_pgrp(current));
1621 } else {
1622 int retval = 0, count = 0;
1623 struct task_struct * p;
1624
1625 for_each_process(p) {
1626 if (task_pid_vnr(p) > 1 &&
1627 !same_thread_group(p, current)) {
1628 int err = group_send_sig_info(sig, info, p,
1629 PIDTYPE_MAX);
1630 ++count;
1631 if (err != -EPERM)
1632 retval = err;
1633 }
1634 }
1635 ret = count ? retval : -ESRCH;
1636 }
1637 read_unlock(&tasklist_lock);
1638
1639 return ret;
1640}
1641
1642/*
1643 * These are for backward compatibility with the rest of the kernel source.
1644 */
1645
1646int send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1647{
1648 /*
1649 * Make sure legacy kernel users don't send in bad values
1650 * (normal paths check this in check_kill_permission).
1651 */
1652 if (!valid_signal(sig))
1653 return -EINVAL;
1654
1655 return do_send_sig_info(sig, info, p, PIDTYPE_PID);
1656}
1657EXPORT_SYMBOL(send_sig_info);
1658
1659#define __si_special(priv) \
1660 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1661
1662int
1663send_sig(int sig, struct task_struct *p, int priv)
1664{
1665 return send_sig_info(sig, __si_special(priv), p);
1666}
1667EXPORT_SYMBOL(send_sig);
1668
1669void force_sig(int sig)
1670{
1671 struct kernel_siginfo info;
1672
1673 clear_siginfo(&info);
1674 info.si_signo = sig;
1675 info.si_errno = 0;
1676 info.si_code = SI_KERNEL;
1677 info.si_pid = 0;
1678 info.si_uid = 0;
1679 force_sig_info(&info);
1680}
1681EXPORT_SYMBOL(force_sig);
1682
1683void force_fatal_sig(int sig)
1684{
1685 struct kernel_siginfo info;
1686
1687 clear_siginfo(&info);
1688 info.si_signo = sig;
1689 info.si_errno = 0;
1690 info.si_code = SI_KERNEL;
1691 info.si_pid = 0;
1692 info.si_uid = 0;
1693 force_sig_info_to_task(&info, current, HANDLER_SIG_DFL);
1694}
1695
1696void force_exit_sig(int sig)
1697{
1698 struct kernel_siginfo info;
1699
1700 clear_siginfo(&info);
1701 info.si_signo = sig;
1702 info.si_errno = 0;
1703 info.si_code = SI_KERNEL;
1704 info.si_pid = 0;
1705 info.si_uid = 0;
1706 force_sig_info_to_task(&info, current, HANDLER_EXIT);
1707}
1708
1709/*
1710 * When things go south during signal handling, we
1711 * will force a SIGSEGV. And if the signal that caused
1712 * the problem was already a SIGSEGV, we'll want to
1713 * make sure we don't even try to deliver the signal..
1714 */
1715void force_sigsegv(int sig)
1716{
1717 if (sig == SIGSEGV)
1718 force_fatal_sig(SIGSEGV);
1719 else
1720 force_sig(SIGSEGV);
1721}
1722
1723int force_sig_fault_to_task(int sig, int code, void __user *addr,
1724 struct task_struct *t)
1725{
1726 struct kernel_siginfo info;
1727
1728 clear_siginfo(&info);
1729 info.si_signo = sig;
1730 info.si_errno = 0;
1731 info.si_code = code;
1732 info.si_addr = addr;
1733 return force_sig_info_to_task(&info, t, HANDLER_CURRENT);
1734}
1735
1736int force_sig_fault(int sig, int code, void __user *addr)
1737{
1738 return force_sig_fault_to_task(sig, code, addr, current);
1739}
1740
1741int send_sig_fault(int sig, int code, void __user *addr, struct task_struct *t)
1742{
1743 struct kernel_siginfo info;
1744
1745 clear_siginfo(&info);
1746 info.si_signo = sig;
1747 info.si_errno = 0;
1748 info.si_code = code;
1749 info.si_addr = addr;
1750 return send_sig_info(info.si_signo, &info, t);
1751}
1752
1753int force_sig_mceerr(int code, void __user *addr, short lsb)
1754{
1755 struct kernel_siginfo info;
1756
1757 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1758 clear_siginfo(&info);
1759 info.si_signo = SIGBUS;
1760 info.si_errno = 0;
1761 info.si_code = code;
1762 info.si_addr = addr;
1763 info.si_addr_lsb = lsb;
1764 return force_sig_info(&info);
1765}
1766
1767int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1768{
1769 struct kernel_siginfo info;
1770
1771 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1772 clear_siginfo(&info);
1773 info.si_signo = SIGBUS;
1774 info.si_errno = 0;
1775 info.si_code = code;
1776 info.si_addr = addr;
1777 info.si_addr_lsb = lsb;
1778 return send_sig_info(info.si_signo, &info, t);
1779}
1780EXPORT_SYMBOL(send_sig_mceerr);
1781
1782int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper)
1783{
1784 struct kernel_siginfo info;
1785
1786 clear_siginfo(&info);
1787 info.si_signo = SIGSEGV;
1788 info.si_errno = 0;
1789 info.si_code = SEGV_BNDERR;
1790 info.si_addr = addr;
1791 info.si_lower = lower;
1792 info.si_upper = upper;
1793 return force_sig_info(&info);
1794}
1795
1796#ifdef SEGV_PKUERR
1797int force_sig_pkuerr(void __user *addr, u32 pkey)
1798{
1799 struct kernel_siginfo info;
1800
1801 clear_siginfo(&info);
1802 info.si_signo = SIGSEGV;
1803 info.si_errno = 0;
1804 info.si_code = SEGV_PKUERR;
1805 info.si_addr = addr;
1806 info.si_pkey = pkey;
1807 return force_sig_info(&info);
1808}
1809#endif
1810
1811int send_sig_perf(void __user *addr, u32 type, u64 sig_data)
1812{
1813 struct kernel_siginfo info;
1814
1815 clear_siginfo(&info);
1816 info.si_signo = SIGTRAP;
1817 info.si_errno = 0;
1818 info.si_code = TRAP_PERF;
1819 info.si_addr = addr;
1820 info.si_perf_data = sig_data;
1821 info.si_perf_type = type;
1822
1823 /*
1824 * Signals generated by perf events should not terminate the whole
1825 * process if SIGTRAP is blocked, however, delivering the signal
1826 * asynchronously is better than not delivering at all. But tell user
1827 * space if the signal was asynchronous, so it can clearly be
1828 * distinguished from normal synchronous ones.
1829 */
1830 info.si_perf_flags = sigismember(¤t->blocked, info.si_signo) ?
1831 TRAP_PERF_FLAG_ASYNC :
1832 0;
1833
1834 return send_sig_info(info.si_signo, &info, current);
1835}
1836
1837/**
1838 * force_sig_seccomp - signals the task to allow in-process syscall emulation
1839 * @syscall: syscall number to send to userland
1840 * @reason: filter-supplied reason code to send to userland (via si_errno)
1841 * @force_coredump: true to trigger a coredump
1842 *
1843 * Forces a SIGSYS with a code of SYS_SECCOMP and related sigsys info.
1844 */
1845int force_sig_seccomp(int syscall, int reason, bool force_coredump)
1846{
1847 struct kernel_siginfo info;
1848
1849 clear_siginfo(&info);
1850 info.si_signo = SIGSYS;
1851 info.si_code = SYS_SECCOMP;
1852 info.si_call_addr = (void __user *)KSTK_EIP(current);
1853 info.si_errno = reason;
1854 info.si_arch = syscall_get_arch(current);
1855 info.si_syscall = syscall;
1856 return force_sig_info_to_task(&info, current,
1857 force_coredump ? HANDLER_EXIT : HANDLER_CURRENT);
1858}
1859
1860/* For the crazy architectures that include trap information in
1861 * the errno field, instead of an actual errno value.
1862 */
1863int force_sig_ptrace_errno_trap(int errno, void __user *addr)
1864{
1865 struct kernel_siginfo info;
1866
1867 clear_siginfo(&info);
1868 info.si_signo = SIGTRAP;
1869 info.si_errno = errno;
1870 info.si_code = TRAP_HWBKPT;
1871 info.si_addr = addr;
1872 return force_sig_info(&info);
1873}
1874
1875/* For the rare architectures that include trap information using
1876 * si_trapno.
1877 */
1878int force_sig_fault_trapno(int sig, int code, void __user *addr, int trapno)
1879{
1880 struct kernel_siginfo info;
1881
1882 clear_siginfo(&info);
1883 info.si_signo = sig;
1884 info.si_errno = 0;
1885 info.si_code = code;
1886 info.si_addr = addr;
1887 info.si_trapno = trapno;
1888 return force_sig_info(&info);
1889}
1890
1891/* For the rare architectures that include trap information using
1892 * si_trapno.
1893 */
1894int send_sig_fault_trapno(int sig, int code, void __user *addr, int trapno,
1895 struct task_struct *t)
1896{
1897 struct kernel_siginfo info;
1898
1899 clear_siginfo(&info);
1900 info.si_signo = sig;
1901 info.si_errno = 0;
1902 info.si_code = code;
1903 info.si_addr = addr;
1904 info.si_trapno = trapno;
1905 return send_sig_info(info.si_signo, &info, t);
1906}
1907
1908static int kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp)
1909{
1910 int ret;
1911 read_lock(&tasklist_lock);
1912 ret = __kill_pgrp_info(sig, info, pgrp);
1913 read_unlock(&tasklist_lock);
1914 return ret;
1915}
1916
1917int kill_pgrp(struct pid *pid, int sig, int priv)
1918{
1919 return kill_pgrp_info(sig, __si_special(priv), pid);
1920}
1921EXPORT_SYMBOL(kill_pgrp);
1922
1923int kill_pid(struct pid *pid, int sig, int priv)
1924{
1925 return kill_pid_info(sig, __si_special(priv), pid);
1926}
1927EXPORT_SYMBOL(kill_pid);
1928
1929/*
1930 * These functions support sending signals using preallocated sigqueue
1931 * structures. This is needed "because realtime applications cannot
1932 * afford to lose notifications of asynchronous events, like timer
1933 * expirations or I/O completions". In the case of POSIX Timers
1934 * we allocate the sigqueue structure from the timer_create. If this
1935 * allocation fails we are able to report the failure to the application
1936 * with an EAGAIN error.
1937 */
1938struct sigqueue *sigqueue_alloc(void)
1939{
1940 return __sigqueue_alloc(-1, current, GFP_KERNEL, 0, SIGQUEUE_PREALLOC);
1941}
1942
1943void sigqueue_free(struct sigqueue *q)
1944{
1945 unsigned long flags;
1946 spinlock_t *lock = ¤t->sighand->siglock;
1947
1948 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1949 /*
1950 * We must hold ->siglock while testing q->list
1951 * to serialize with collect_signal() or with
1952 * __exit_signal()->flush_sigqueue().
1953 */
1954 spin_lock_irqsave(lock, flags);
1955 q->flags &= ~SIGQUEUE_PREALLOC;
1956 /*
1957 * If it is queued it will be freed when dequeued,
1958 * like the "regular" sigqueue.
1959 */
1960 if (!list_empty(&q->list))
1961 q = NULL;
1962 spin_unlock_irqrestore(lock, flags);
1963
1964 if (q)
1965 __sigqueue_free(q);
1966}
1967
1968int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type)
1969{
1970 int sig = q->info.si_signo;
1971 struct sigpending *pending;
1972 struct task_struct *t;
1973 unsigned long flags;
1974 int ret, result;
1975
1976 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1977
1978 ret = -1;
1979 rcu_read_lock();
1980
1981 /*
1982 * This function is used by POSIX timers to deliver a timer signal.
1983 * Where type is PIDTYPE_PID (such as for timers with SIGEV_THREAD_ID
1984 * set), the signal must be delivered to the specific thread (queues
1985 * into t->pending).
1986 *
1987 * Where type is not PIDTYPE_PID, signals must be delivered to the
1988 * process. In this case, prefer to deliver to current if it is in
1989 * the same thread group as the target process, which avoids
1990 * unnecessarily waking up a potentially idle task.
1991 */
1992 t = pid_task(pid, type);
1993 if (!t)
1994 goto ret;
1995 if (type != PIDTYPE_PID && same_thread_group(t, current))
1996 t = current;
1997 if (!likely(lock_task_sighand(t, &flags)))
1998 goto ret;
1999
2000 ret = 1; /* the signal is ignored */
2001 result = TRACE_SIGNAL_IGNORED;
2002 if (!prepare_signal(sig, t, false))
2003 goto out;
2004
2005 ret = 0;
2006 if (unlikely(!list_empty(&q->list))) {
2007 /*
2008 * If an SI_TIMER entry is already queue just increment
2009 * the overrun count.
2010 */
2011 BUG_ON(q->info.si_code != SI_TIMER);
2012 q->info.si_overrun++;
2013 result = TRACE_SIGNAL_ALREADY_PENDING;
2014 goto out;
2015 }
2016 q->info.si_overrun = 0;
2017
2018 signalfd_notify(t, sig);
2019 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
2020 list_add_tail(&q->list, &pending->list);
2021 sigaddset(&pending->signal, sig);
2022 complete_signal(sig, t, type);
2023 result = TRACE_SIGNAL_DELIVERED;
2024out:
2025 trace_signal_generate(sig, &q->info, t, type != PIDTYPE_PID, result);
2026 unlock_task_sighand(t, &flags);
2027ret:
2028 rcu_read_unlock();
2029 return ret;
2030}
2031
2032void do_notify_pidfd(struct task_struct *task)
2033{
2034 struct pid *pid = task_pid(task);
2035
2036 WARN_ON(task->exit_state == 0);
2037
2038 __wake_up(&pid->wait_pidfd, TASK_NORMAL, 0,
2039 poll_to_key(EPOLLIN | EPOLLRDNORM));
2040}
2041
2042/*
2043 * Let a parent know about the death of a child.
2044 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
2045 *
2046 * Returns true if our parent ignored us and so we've switched to
2047 * self-reaping.
2048 */
2049bool do_notify_parent(struct task_struct *tsk, int sig)
2050{
2051 struct kernel_siginfo info;
2052 unsigned long flags;
2053 struct sighand_struct *psig;
2054 bool autoreap = false;
2055 u64 utime, stime;
2056
2057 WARN_ON_ONCE(sig == -1);
2058
2059 /* do_notify_parent_cldstop should have been called instead. */
2060 WARN_ON_ONCE(task_is_stopped_or_traced(tsk));
2061
2062 WARN_ON_ONCE(!tsk->ptrace &&
2063 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
2064 /*
2065 * tsk is a group leader and has no threads, wake up the
2066 * non-PIDFD_THREAD waiters.
2067 */
2068 if (thread_group_empty(tsk))
2069 do_notify_pidfd(tsk);
2070
2071 if (sig != SIGCHLD) {
2072 /*
2073 * This is only possible if parent == real_parent.
2074 * Check if it has changed security domain.
2075 */
2076 if (tsk->parent_exec_id != READ_ONCE(tsk->parent->self_exec_id))
2077 sig = SIGCHLD;
2078 }
2079
2080 clear_siginfo(&info);
2081 info.si_signo = sig;
2082 info.si_errno = 0;
2083 /*
2084 * We are under tasklist_lock here so our parent is tied to
2085 * us and cannot change.
2086 *
2087 * task_active_pid_ns will always return the same pid namespace
2088 * until a task passes through release_task.
2089 *
2090 * write_lock() currently calls preempt_disable() which is the
2091 * same as rcu_read_lock(), but according to Oleg, this is not
2092 * correct to rely on this
2093 */
2094 rcu_read_lock();
2095 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
2096 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
2097 task_uid(tsk));
2098 rcu_read_unlock();
2099
2100 task_cputime(tsk, &utime, &stime);
2101 info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
2102 info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
2103
2104 info.si_status = tsk->exit_code & 0x7f;
2105 if (tsk->exit_code & 0x80)
2106 info.si_code = CLD_DUMPED;
2107 else if (tsk->exit_code & 0x7f)
2108 info.si_code = CLD_KILLED;
2109 else {
2110 info.si_code = CLD_EXITED;
2111 info.si_status = tsk->exit_code >> 8;
2112 }
2113
2114 psig = tsk->parent->sighand;
2115 spin_lock_irqsave(&psig->siglock, flags);
2116 if (!tsk->ptrace && sig == SIGCHLD &&
2117 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
2118 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
2119 /*
2120 * We are exiting and our parent doesn't care. POSIX.1
2121 * defines special semantics for setting SIGCHLD to SIG_IGN
2122 * or setting the SA_NOCLDWAIT flag: we should be reaped
2123 * automatically and not left for our parent's wait4 call.
2124 * Rather than having the parent do it as a magic kind of
2125 * signal handler, we just set this to tell do_exit that we
2126 * can be cleaned up without becoming a zombie. Note that
2127 * we still call __wake_up_parent in this case, because a
2128 * blocked sys_wait4 might now return -ECHILD.
2129 *
2130 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
2131 * is implementation-defined: we do (if you don't want
2132 * it, just use SIG_IGN instead).
2133 */
2134 autoreap = true;
2135 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
2136 sig = 0;
2137 }
2138 /*
2139 * Send with __send_signal as si_pid and si_uid are in the
2140 * parent's namespaces.
2141 */
2142 if (valid_signal(sig) && sig)
2143 __send_signal_locked(sig, &info, tsk->parent, PIDTYPE_TGID, false);
2144 __wake_up_parent(tsk, tsk->parent);
2145 spin_unlock_irqrestore(&psig->siglock, flags);
2146
2147 return autoreap;
2148}
2149
2150/**
2151 * do_notify_parent_cldstop - notify parent of stopped/continued state change
2152 * @tsk: task reporting the state change
2153 * @for_ptracer: the notification is for ptracer
2154 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
2155 *
2156 * Notify @tsk's parent that the stopped/continued state has changed. If
2157 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
2158 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
2159 *
2160 * CONTEXT:
2161 * Must be called with tasklist_lock at least read locked.
2162 */
2163static void do_notify_parent_cldstop(struct task_struct *tsk,
2164 bool for_ptracer, int why)
2165{
2166 struct kernel_siginfo info;
2167 unsigned long flags;
2168 struct task_struct *parent;
2169 struct sighand_struct *sighand;
2170 u64 utime, stime;
2171
2172 if (for_ptracer) {
2173 parent = tsk->parent;
2174 } else {
2175 tsk = tsk->group_leader;
2176 parent = tsk->real_parent;
2177 }
2178
2179 clear_siginfo(&info);
2180 info.si_signo = SIGCHLD;
2181 info.si_errno = 0;
2182 /*
2183 * see comment in do_notify_parent() about the following 4 lines
2184 */
2185 rcu_read_lock();
2186 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
2187 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
2188 rcu_read_unlock();
2189
2190 task_cputime(tsk, &utime, &stime);
2191 info.si_utime = nsec_to_clock_t(utime);
2192 info.si_stime = nsec_to_clock_t(stime);
2193
2194 info.si_code = why;
2195 switch (why) {
2196 case CLD_CONTINUED:
2197 info.si_status = SIGCONT;
2198 break;
2199 case CLD_STOPPED:
2200 info.si_status = tsk->signal->group_exit_code & 0x7f;
2201 break;
2202 case CLD_TRAPPED:
2203 info.si_status = tsk->exit_code & 0x7f;
2204 break;
2205 default:
2206 BUG();
2207 }
2208
2209 sighand = parent->sighand;
2210 spin_lock_irqsave(&sighand->siglock, flags);
2211 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
2212 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
2213 send_signal_locked(SIGCHLD, &info, parent, PIDTYPE_TGID);
2214 /*
2215 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
2216 */
2217 __wake_up_parent(tsk, parent);
2218 spin_unlock_irqrestore(&sighand->siglock, flags);
2219}
2220
2221/*
2222 * This must be called with current->sighand->siglock held.
2223 *
2224 * This should be the path for all ptrace stops.
2225 * We always set current->last_siginfo while stopped here.
2226 * That makes it a way to test a stopped process for
2227 * being ptrace-stopped vs being job-control-stopped.
2228 *
2229 * Returns the signal the ptracer requested the code resume
2230 * with. If the code did not stop because the tracer is gone,
2231 * the stop signal remains unchanged unless clear_code.
2232 */
2233static int ptrace_stop(int exit_code, int why, unsigned long message,
2234 kernel_siginfo_t *info)
2235 __releases(¤t->sighand->siglock)
2236 __acquires(¤t->sighand->siglock)
2237{
2238 bool gstop_done = false;
2239
2240 if (arch_ptrace_stop_needed()) {
2241 /*
2242 * The arch code has something special to do before a
2243 * ptrace stop. This is allowed to block, e.g. for faults
2244 * on user stack pages. We can't keep the siglock while
2245 * calling arch_ptrace_stop, so we must release it now.
2246 * To preserve proper semantics, we must do this before
2247 * any signal bookkeeping like checking group_stop_count.
2248 */
2249 spin_unlock_irq(¤t->sighand->siglock);
2250 arch_ptrace_stop();
2251 spin_lock_irq(¤t->sighand->siglock);
2252 }
2253
2254 /*
2255 * After this point ptrace_signal_wake_up or signal_wake_up
2256 * will clear TASK_TRACED if ptrace_unlink happens or a fatal
2257 * signal comes in. Handle previous ptrace_unlinks and fatal
2258 * signals here to prevent ptrace_stop sleeping in schedule.
2259 */
2260 if (!current->ptrace || __fatal_signal_pending(current))
2261 return exit_code;
2262
2263 set_special_state(TASK_TRACED);
2264 current->jobctl |= JOBCTL_TRACED;
2265
2266 /*
2267 * We're committing to trapping. TRACED should be visible before
2268 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
2269 * Also, transition to TRACED and updates to ->jobctl should be
2270 * atomic with respect to siglock and should be done after the arch
2271 * hook as siglock is released and regrabbed across it.
2272 *
2273 * TRACER TRACEE
2274 *
2275 * ptrace_attach()
2276 * [L] wait_on_bit(JOBCTL_TRAPPING) [S] set_special_state(TRACED)
2277 * do_wait()
2278 * set_current_state() smp_wmb();
2279 * ptrace_do_wait()
2280 * wait_task_stopped()
2281 * task_stopped_code()
2282 * [L] task_is_traced() [S] task_clear_jobctl_trapping();
2283 */
2284 smp_wmb();
2285
2286 current->ptrace_message = message;
2287 current->last_siginfo = info;
2288 current->exit_code = exit_code;
2289
2290 /*
2291 * If @why is CLD_STOPPED, we're trapping to participate in a group
2292 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
2293 * across siglock relocks since INTERRUPT was scheduled, PENDING
2294 * could be clear now. We act as if SIGCONT is received after
2295 * TASK_TRACED is entered - ignore it.
2296 */
2297 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
2298 gstop_done = task_participate_group_stop(current);
2299
2300 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
2301 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
2302 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
2303 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
2304
2305 /* entering a trap, clear TRAPPING */
2306 task_clear_jobctl_trapping(current);
2307
2308 spin_unlock_irq(¤t->sighand->siglock);
2309 read_lock(&tasklist_lock);
2310 /*
2311 * Notify parents of the stop.
2312 *
2313 * While ptraced, there are two parents - the ptracer and
2314 * the real_parent of the group_leader. The ptracer should
2315 * know about every stop while the real parent is only
2316 * interested in the completion of group stop. The states
2317 * for the two don't interact with each other. Notify
2318 * separately unless they're gonna be duplicates.
2319 */
2320 if (current->ptrace)
2321 do_notify_parent_cldstop(current, true, why);
2322 if (gstop_done && (!current->ptrace || ptrace_reparented(current)))
2323 do_notify_parent_cldstop(current, false, why);
2324
2325 /*
2326 * The previous do_notify_parent_cldstop() invocation woke ptracer.
2327 * One a PREEMPTION kernel this can result in preemption requirement
2328 * which will be fulfilled after read_unlock() and the ptracer will be
2329 * put on the CPU.
2330 * The ptracer is in wait_task_inactive(, __TASK_TRACED) waiting for
2331 * this task wait in schedule(). If this task gets preempted then it
2332 * remains enqueued on the runqueue. The ptracer will observe this and
2333 * then sleep for a delay of one HZ tick. In the meantime this task
2334 * gets scheduled, enters schedule() and will wait for the ptracer.
2335 *
2336 * This preemption point is not bad from a correctness point of
2337 * view but extends the runtime by one HZ tick time due to the
2338 * ptracer's sleep. The preempt-disable section ensures that there
2339 * will be no preemption between unlock and schedule() and so
2340 * improving the performance since the ptracer will observe that
2341 * the tracee is scheduled out once it gets on the CPU.
2342 *
2343 * On PREEMPT_RT locking tasklist_lock does not disable preemption.
2344 * Therefore the task can be preempted after do_notify_parent_cldstop()
2345 * before unlocking tasklist_lock so there is no benefit in doing this.
2346 *
2347 * In fact disabling preemption is harmful on PREEMPT_RT because
2348 * the spinlock_t in cgroup_enter_frozen() must not be acquired
2349 * with preemption disabled due to the 'sleeping' spinlock
2350 * substitution of RT.
2351 */
2352 if (!IS_ENABLED(CONFIG_PREEMPT_RT))
2353 preempt_disable();
2354 read_unlock(&tasklist_lock);
2355 cgroup_enter_frozen();
2356 if (!IS_ENABLED(CONFIG_PREEMPT_RT))
2357 preempt_enable_no_resched();
2358 schedule();
2359 cgroup_leave_frozen(true);
2360
2361 /*
2362 * We are back. Now reacquire the siglock before touching
2363 * last_siginfo, so that we are sure to have synchronized with
2364 * any signal-sending on another CPU that wants to examine it.
2365 */
2366 spin_lock_irq(¤t->sighand->siglock);
2367 exit_code = current->exit_code;
2368 current->last_siginfo = NULL;
2369 current->ptrace_message = 0;
2370 current->exit_code = 0;
2371
2372 /* LISTENING can be set only during STOP traps, clear it */
2373 current->jobctl &= ~(JOBCTL_LISTENING | JOBCTL_PTRACE_FROZEN);
2374
2375 /*
2376 * Queued signals ignored us while we were stopped for tracing.
2377 * So check for any that we should take before resuming user mode.
2378 * This sets TIF_SIGPENDING, but never clears it.
2379 */
2380 recalc_sigpending_tsk(current);
2381 return exit_code;
2382}
2383
2384static int ptrace_do_notify(int signr, int exit_code, int why, unsigned long message)
2385{
2386 kernel_siginfo_t info;
2387
2388 clear_siginfo(&info);
2389 info.si_signo = signr;
2390 info.si_code = exit_code;
2391 info.si_pid = task_pid_vnr(current);
2392 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2393
2394 /* Let the debugger run. */
2395 return ptrace_stop(exit_code, why, message, &info);
2396}
2397
2398int ptrace_notify(int exit_code, unsigned long message)
2399{
2400 int signr;
2401
2402 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
2403 if (unlikely(task_work_pending(current)))
2404 task_work_run();
2405
2406 spin_lock_irq(¤t->sighand->siglock);
2407 signr = ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED, message);
2408 spin_unlock_irq(¤t->sighand->siglock);
2409 return signr;
2410}
2411
2412/**
2413 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
2414 * @signr: signr causing group stop if initiating
2415 *
2416 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
2417 * and participate in it. If already set, participate in the existing
2418 * group stop. If participated in a group stop (and thus slept), %true is
2419 * returned with siglock released.
2420 *
2421 * If ptraced, this function doesn't handle stop itself. Instead,
2422 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2423 * untouched. The caller must ensure that INTERRUPT trap handling takes
2424 * places afterwards.
2425 *
2426 * CONTEXT:
2427 * Must be called with @current->sighand->siglock held, which is released
2428 * on %true return.
2429 *
2430 * RETURNS:
2431 * %false if group stop is already cancelled or ptrace trap is scheduled.
2432 * %true if participated in group stop.
2433 */
2434static bool do_signal_stop(int signr)
2435 __releases(¤t->sighand->siglock)
2436{
2437 struct signal_struct *sig = current->signal;
2438
2439 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2440 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2441 struct task_struct *t;
2442
2443 /* signr will be recorded in task->jobctl for retries */
2444 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2445
2446 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2447 unlikely(sig->flags & SIGNAL_GROUP_EXIT) ||
2448 unlikely(sig->group_exec_task))
2449 return false;
2450 /*
2451 * There is no group stop already in progress. We must
2452 * initiate one now.
2453 *
2454 * While ptraced, a task may be resumed while group stop is
2455 * still in effect and then receive a stop signal and
2456 * initiate another group stop. This deviates from the
2457 * usual behavior as two consecutive stop signals can't
2458 * cause two group stops when !ptraced. That is why we
2459 * also check !task_is_stopped(t) below.
2460 *
2461 * The condition can be distinguished by testing whether
2462 * SIGNAL_STOP_STOPPED is already set. Don't generate
2463 * group_exit_code in such case.
2464 *
2465 * This is not necessary for SIGNAL_STOP_CONTINUED because
2466 * an intervening stop signal is required to cause two
2467 * continued events regardless of ptrace.
2468 */
2469 if (!(sig->flags & SIGNAL_STOP_STOPPED))
2470 sig->group_exit_code = signr;
2471
2472 sig->group_stop_count = 0;
2473 if (task_set_jobctl_pending(current, signr | gstop))
2474 sig->group_stop_count++;
2475
2476 for_other_threads(current, t) {
2477 /*
2478 * Setting state to TASK_STOPPED for a group
2479 * stop is always done with the siglock held,
2480 * so this check has no races.
2481 */
2482 if (!task_is_stopped(t) &&
2483 task_set_jobctl_pending(t, signr | gstop)) {
2484 sig->group_stop_count++;
2485 if (likely(!(t->ptrace & PT_SEIZED)))
2486 signal_wake_up(t, 0);
2487 else
2488 ptrace_trap_notify(t);
2489 }
2490 }
2491 }
2492
2493 if (likely(!current->ptrace)) {
2494 int notify = 0;
2495
2496 /*
2497 * If there are no other threads in the group, or if there
2498 * is a group stop in progress and we are the last to stop,
2499 * report to the parent.
2500 */
2501 if (task_participate_group_stop(current))
2502 notify = CLD_STOPPED;
2503
2504 current->jobctl |= JOBCTL_STOPPED;
2505 set_special_state(TASK_STOPPED);
2506 spin_unlock_irq(¤t->sighand->siglock);
2507
2508 /*
2509 * Notify the parent of the group stop completion. Because
2510 * we're not holding either the siglock or tasklist_lock
2511 * here, ptracer may attach inbetween; however, this is for
2512 * group stop and should always be delivered to the real
2513 * parent of the group leader. The new ptracer will get
2514 * its notification when this task transitions into
2515 * TASK_TRACED.
2516 */
2517 if (notify) {
2518 read_lock(&tasklist_lock);
2519 do_notify_parent_cldstop(current, false, notify);
2520 read_unlock(&tasklist_lock);
2521 }
2522
2523 /* Now we don't run again until woken by SIGCONT or SIGKILL */
2524 cgroup_enter_frozen();
2525 schedule();
2526 return true;
2527 } else {
2528 /*
2529 * While ptraced, group stop is handled by STOP trap.
2530 * Schedule it and let the caller deal with it.
2531 */
2532 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2533 return false;
2534 }
2535}
2536
2537/**
2538 * do_jobctl_trap - take care of ptrace jobctl traps
2539 *
2540 * When PT_SEIZED, it's used for both group stop and explicit
2541 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
2542 * accompanying siginfo. If stopped, lower eight bits of exit_code contain
2543 * the stop signal; otherwise, %SIGTRAP.
2544 *
2545 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2546 * number as exit_code and no siginfo.
2547 *
2548 * CONTEXT:
2549 * Must be called with @current->sighand->siglock held, which may be
2550 * released and re-acquired before returning with intervening sleep.
2551 */
2552static void do_jobctl_trap(void)
2553{
2554 struct signal_struct *signal = current->signal;
2555 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2556
2557 if (current->ptrace & PT_SEIZED) {
2558 if (!signal->group_stop_count &&
2559 !(signal->flags & SIGNAL_STOP_STOPPED))
2560 signr = SIGTRAP;
2561 WARN_ON_ONCE(!signr);
2562 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2563 CLD_STOPPED, 0);
2564 } else {
2565 WARN_ON_ONCE(!signr);
2566 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2567 }
2568}
2569
2570/**
2571 * do_freezer_trap - handle the freezer jobctl trap
2572 *
2573 * Puts the task into frozen state, if only the task is not about to quit.
2574 * In this case it drops JOBCTL_TRAP_FREEZE.
2575 *
2576 * CONTEXT:
2577 * Must be called with @current->sighand->siglock held,
2578 * which is always released before returning.
2579 */
2580static void do_freezer_trap(void)
2581 __releases(¤t->sighand->siglock)
2582{
2583 /*
2584 * If there are other trap bits pending except JOBCTL_TRAP_FREEZE,
2585 * let's make another loop to give it a chance to be handled.
2586 * In any case, we'll return back.
2587 */
2588 if ((current->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) !=
2589 JOBCTL_TRAP_FREEZE) {
2590 spin_unlock_irq(¤t->sighand->siglock);
2591 return;
2592 }
2593
2594 /*
2595 * Now we're sure that there is no pending fatal signal and no
2596 * pending traps. Clear TIF_SIGPENDING to not get out of schedule()
2597 * immediately (if there is a non-fatal signal pending), and
2598 * put the task into sleep.
2599 */
2600 __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
2601 clear_thread_flag(TIF_SIGPENDING);
2602 spin_unlock_irq(¤t->sighand->siglock);
2603 cgroup_enter_frozen();
2604 schedule();
2605}
2606
2607static int ptrace_signal(int signr, kernel_siginfo_t *info, enum pid_type type)
2608{
2609 /*
2610 * We do not check sig_kernel_stop(signr) but set this marker
2611 * unconditionally because we do not know whether debugger will
2612 * change signr. This flag has no meaning unless we are going
2613 * to stop after return from ptrace_stop(). In this case it will
2614 * be checked in do_signal_stop(), we should only stop if it was
2615 * not cleared by SIGCONT while we were sleeping. See also the
2616 * comment in dequeue_signal().
2617 */
2618 current->jobctl |= JOBCTL_STOP_DEQUEUED;
2619 signr = ptrace_stop(signr, CLD_TRAPPED, 0, info);
2620
2621 /* We're back. Did the debugger cancel the sig? */
2622 if (signr == 0)
2623 return signr;
2624
2625 /*
2626 * Update the siginfo structure if the signal has
2627 * changed. If the debugger wanted something
2628 * specific in the siginfo structure then it should
2629 * have updated *info via PTRACE_SETSIGINFO.
2630 */
2631 if (signr != info->si_signo) {
2632 clear_siginfo(info);
2633 info->si_signo = signr;
2634 info->si_errno = 0;
2635 info->si_code = SI_USER;
2636 rcu_read_lock();
2637 info->si_pid = task_pid_vnr(current->parent);
2638 info->si_uid = from_kuid_munged(current_user_ns(),
2639 task_uid(current->parent));
2640 rcu_read_unlock();
2641 }
2642
2643 /* If the (new) signal is now blocked, requeue it. */
2644 if (sigismember(¤t->blocked, signr) ||
2645 fatal_signal_pending(current)) {
2646 send_signal_locked(signr, info, current, type);
2647 signr = 0;
2648 }
2649
2650 return signr;
2651}
2652
2653static void hide_si_addr_tag_bits(struct ksignal *ksig)
2654{
2655 switch (siginfo_layout(ksig->sig, ksig->info.si_code)) {
2656 case SIL_FAULT:
2657 case SIL_FAULT_TRAPNO:
2658 case SIL_FAULT_MCEERR:
2659 case SIL_FAULT_BNDERR:
2660 case SIL_FAULT_PKUERR:
2661 case SIL_FAULT_PERF_EVENT:
2662 ksig->info.si_addr = arch_untagged_si_addr(
2663 ksig->info.si_addr, ksig->sig, ksig->info.si_code);
2664 break;
2665 case SIL_KILL:
2666 case SIL_TIMER:
2667 case SIL_POLL:
2668 case SIL_CHLD:
2669 case SIL_RT:
2670 case SIL_SYS:
2671 break;
2672 }
2673}
2674
2675bool get_signal(struct ksignal *ksig)
2676{
2677 struct sighand_struct *sighand = current->sighand;
2678 struct signal_struct *signal = current->signal;
2679 int signr;
2680
2681 clear_notify_signal();
2682 if (unlikely(task_work_pending(current)))
2683 task_work_run();
2684
2685 if (!task_sigpending(current))
2686 return false;
2687
2688 if (unlikely(uprobe_deny_signal()))
2689 return false;
2690
2691 /*
2692 * Do this once, we can't return to user-mode if freezing() == T.
2693 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2694 * thus do not need another check after return.
2695 */
2696 try_to_freeze();
2697
2698relock:
2699 spin_lock_irq(&sighand->siglock);
2700
2701 /*
2702 * Every stopped thread goes here after wakeup. Check to see if
2703 * we should notify the parent, prepare_signal(SIGCONT) encodes
2704 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2705 */
2706 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2707 int why;
2708
2709 if (signal->flags & SIGNAL_CLD_CONTINUED)
2710 why = CLD_CONTINUED;
2711 else
2712 why = CLD_STOPPED;
2713
2714 signal->flags &= ~SIGNAL_CLD_MASK;
2715
2716 spin_unlock_irq(&sighand->siglock);
2717
2718 /*
2719 * Notify the parent that we're continuing. This event is
2720 * always per-process and doesn't make whole lot of sense
2721 * for ptracers, who shouldn't consume the state via
2722 * wait(2) either, but, for backward compatibility, notify
2723 * the ptracer of the group leader too unless it's gonna be
2724 * a duplicate.
2725 */
2726 read_lock(&tasklist_lock);
2727 do_notify_parent_cldstop(current, false, why);
2728
2729 if (ptrace_reparented(current->group_leader))
2730 do_notify_parent_cldstop(current->group_leader,
2731 true, why);
2732 read_unlock(&tasklist_lock);
2733
2734 goto relock;
2735 }
2736
2737 for (;;) {
2738 struct k_sigaction *ka;
2739 enum pid_type type;
2740
2741 /* Has this task already been marked for death? */
2742 if ((signal->flags & SIGNAL_GROUP_EXIT) ||
2743 signal->group_exec_task) {
2744 signr = SIGKILL;
2745 sigdelset(¤t->pending.signal, SIGKILL);
2746 trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
2747 &sighand->action[SIGKILL-1]);
2748 recalc_sigpending();
2749 /*
2750 * implies do_group_exit() or return to PF_USER_WORKER,
2751 * no need to initialize ksig->info/etc.
2752 */
2753 goto fatal;
2754 }
2755
2756 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2757 do_signal_stop(0))
2758 goto relock;
2759
2760 if (unlikely(current->jobctl &
2761 (JOBCTL_TRAP_MASK | JOBCTL_TRAP_FREEZE))) {
2762 if (current->jobctl & JOBCTL_TRAP_MASK) {
2763 do_jobctl_trap();
2764 spin_unlock_irq(&sighand->siglock);
2765 } else if (current->jobctl & JOBCTL_TRAP_FREEZE)
2766 do_freezer_trap();
2767
2768 goto relock;
2769 }
2770
2771 /*
2772 * If the task is leaving the frozen state, let's update
2773 * cgroup counters and reset the frozen bit.
2774 */
2775 if (unlikely(cgroup_task_frozen(current))) {
2776 spin_unlock_irq(&sighand->siglock);
2777 cgroup_leave_frozen(false);
2778 goto relock;
2779 }
2780
2781 /*
2782 * Signals generated by the execution of an instruction
2783 * need to be delivered before any other pending signals
2784 * so that the instruction pointer in the signal stack
2785 * frame points to the faulting instruction.
2786 */
2787 type = PIDTYPE_PID;
2788 signr = dequeue_synchronous_signal(&ksig->info);
2789 if (!signr)
2790 signr = dequeue_signal(current, ¤t->blocked,
2791 &ksig->info, &type);
2792
2793 if (!signr)
2794 break; /* will return 0 */
2795
2796 if (unlikely(current->ptrace) && (signr != SIGKILL) &&
2797 !(sighand->action[signr -1].sa.sa_flags & SA_IMMUTABLE)) {
2798 signr = ptrace_signal(signr, &ksig->info, type);
2799 if (!signr)
2800 continue;
2801 }
2802
2803 ka = &sighand->action[signr-1];
2804
2805 /* Trace actually delivered signals. */
2806 trace_signal_deliver(signr, &ksig->info, ka);
2807
2808 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
2809 continue;
2810 if (ka->sa.sa_handler != SIG_DFL) {
2811 /* Run the handler. */
2812 ksig->ka = *ka;
2813
2814 if (ka->sa.sa_flags & SA_ONESHOT)
2815 ka->sa.sa_handler = SIG_DFL;
2816
2817 break; /* will return non-zero "signr" value */
2818 }
2819
2820 /*
2821 * Now we are doing the default action for this signal.
2822 */
2823 if (sig_kernel_ignore(signr)) /* Default is nothing. */
2824 continue;
2825
2826 /*
2827 * Global init gets no signals it doesn't want.
2828 * Container-init gets no signals it doesn't want from same
2829 * container.
2830 *
2831 * Note that if global/container-init sees a sig_kernel_only()
2832 * signal here, the signal must have been generated internally
2833 * or must have come from an ancestor namespace. In either
2834 * case, the signal cannot be dropped.
2835 */
2836 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2837 !sig_kernel_only(signr))
2838 continue;
2839
2840 if (sig_kernel_stop(signr)) {
2841 /*
2842 * The default action is to stop all threads in
2843 * the thread group. The job control signals
2844 * do nothing in an orphaned pgrp, but SIGSTOP
2845 * always works. Note that siglock needs to be
2846 * dropped during the call to is_orphaned_pgrp()
2847 * because of lock ordering with tasklist_lock.
2848 * This allows an intervening SIGCONT to be posted.
2849 * We need to check for that and bail out if necessary.
2850 */
2851 if (signr != SIGSTOP) {
2852 spin_unlock_irq(&sighand->siglock);
2853
2854 /* signals can be posted during this window */
2855
2856 if (is_current_pgrp_orphaned())
2857 goto relock;
2858
2859 spin_lock_irq(&sighand->siglock);
2860 }
2861
2862 if (likely(do_signal_stop(signr))) {
2863 /* It released the siglock. */
2864 goto relock;
2865 }
2866
2867 /*
2868 * We didn't actually stop, due to a race
2869 * with SIGCONT or something like that.
2870 */
2871 continue;
2872 }
2873
2874 fatal:
2875 spin_unlock_irq(&sighand->siglock);
2876 if (unlikely(cgroup_task_frozen(current)))
2877 cgroup_leave_frozen(true);
2878
2879 /*
2880 * Anything else is fatal, maybe with a core dump.
2881 */
2882 current->flags |= PF_SIGNALED;
2883
2884 if (sig_kernel_coredump(signr)) {
2885 if (print_fatal_signals)
2886 print_fatal_signal(signr);
2887 proc_coredump_connector(current);
2888 /*
2889 * If it was able to dump core, this kills all
2890 * other threads in the group and synchronizes with
2891 * their demise. If we lost the race with another
2892 * thread getting here, it set group_exit_code
2893 * first and our do_group_exit call below will use
2894 * that value and ignore the one we pass it.
2895 */
2896 do_coredump(&ksig->info);
2897 }
2898
2899 /*
2900 * PF_USER_WORKER threads will catch and exit on fatal signals
2901 * themselves. They have cleanup that must be performed, so we
2902 * cannot call do_exit() on their behalf. Note that ksig won't
2903 * be properly initialized, PF_USER_WORKER's shouldn't use it.
2904 */
2905 if (current->flags & PF_USER_WORKER)
2906 goto out;
2907
2908 /*
2909 * Death signals, no core dump.
2910 */
2911 do_group_exit(signr);
2912 /* NOTREACHED */
2913 }
2914 spin_unlock_irq(&sighand->siglock);
2915
2916 ksig->sig = signr;
2917
2918 if (signr && !(ksig->ka.sa.sa_flags & SA_EXPOSE_TAGBITS))
2919 hide_si_addr_tag_bits(ksig);
2920out:
2921 return signr > 0;
2922}
2923
2924/**
2925 * signal_delivered - called after signal delivery to update blocked signals
2926 * @ksig: kernel signal struct
2927 * @stepping: nonzero if debugger single-step or block-step in use
2928 *
2929 * This function should be called when a signal has successfully been
2930 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
2931 * is always blocked), and the signal itself is blocked unless %SA_NODEFER
2932 * is set in @ksig->ka.sa.sa_flags. Tracing is notified.
2933 */
2934static void signal_delivered(struct ksignal *ksig, int stepping)
2935{
2936 sigset_t blocked;
2937
2938 /* A signal was successfully delivered, and the
2939 saved sigmask was stored on the signal frame,
2940 and will be restored by sigreturn. So we can
2941 simply clear the restore sigmask flag. */
2942 clear_restore_sigmask();
2943
2944 sigorsets(&blocked, ¤t->blocked, &ksig->ka.sa.sa_mask);
2945 if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2946 sigaddset(&blocked, ksig->sig);
2947 set_current_blocked(&blocked);
2948 if (current->sas_ss_flags & SS_AUTODISARM)
2949 sas_ss_reset(current);
2950 if (stepping)
2951 ptrace_notify(SIGTRAP, 0);
2952}
2953
2954void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2955{
2956 if (failed)
2957 force_sigsegv(ksig->sig);
2958 else
2959 signal_delivered(ksig, stepping);
2960}
2961
2962/*
2963 * It could be that complete_signal() picked us to notify about the
2964 * group-wide signal. Other threads should be notified now to take
2965 * the shared signals in @which since we will not.
2966 */
2967static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2968{
2969 sigset_t retarget;
2970 struct task_struct *t;
2971
2972 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2973 if (sigisemptyset(&retarget))
2974 return;
2975
2976 for_other_threads(tsk, t) {
2977 if (t->flags & PF_EXITING)
2978 continue;
2979
2980 if (!has_pending_signals(&retarget, &t->blocked))
2981 continue;
2982 /* Remove the signals this thread can handle. */
2983 sigandsets(&retarget, &retarget, &t->blocked);
2984
2985 if (!task_sigpending(t))
2986 signal_wake_up(t, 0);
2987
2988 if (sigisemptyset(&retarget))
2989 break;
2990 }
2991}
2992
2993void exit_signals(struct task_struct *tsk)
2994{
2995 int group_stop = 0;
2996 sigset_t unblocked;
2997
2998 /*
2999 * @tsk is about to have PF_EXITING set - lock out users which
3000 * expect stable threadgroup.
3001 */
3002 cgroup_threadgroup_change_begin(tsk);
3003
3004 if (thread_group_empty(tsk) || (tsk->signal->flags & SIGNAL_GROUP_EXIT)) {
3005 sched_mm_cid_exit_signals(tsk);
3006 tsk->flags |= PF_EXITING;
3007 cgroup_threadgroup_change_end(tsk);
3008 return;
3009 }
3010
3011 spin_lock_irq(&tsk->sighand->siglock);
3012 /*
3013 * From now this task is not visible for group-wide signals,
3014 * see wants_signal(), do_signal_stop().
3015 */
3016 sched_mm_cid_exit_signals(tsk);
3017 tsk->flags |= PF_EXITING;
3018
3019 cgroup_threadgroup_change_end(tsk);
3020
3021 if (!task_sigpending(tsk))
3022 goto out;
3023
3024 unblocked = tsk->blocked;
3025 signotset(&unblocked);
3026 retarget_shared_pending(tsk, &unblocked);
3027
3028 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
3029 task_participate_group_stop(tsk))
3030 group_stop = CLD_STOPPED;
3031out:
3032 spin_unlock_irq(&tsk->sighand->siglock);
3033
3034 /*
3035 * If group stop has completed, deliver the notification. This
3036 * should always go to the real parent of the group leader.
3037 */
3038 if (unlikely(group_stop)) {
3039 read_lock(&tasklist_lock);
3040 do_notify_parent_cldstop(tsk, false, group_stop);
3041 read_unlock(&tasklist_lock);
3042 }
3043}
3044
3045/*
3046 * System call entry points.
3047 */
3048
3049/**
3050 * sys_restart_syscall - restart a system call
3051 */
3052SYSCALL_DEFINE0(restart_syscall)
3053{
3054 struct restart_block *restart = ¤t->restart_block;
3055 return restart->fn(restart);
3056}
3057
3058long do_no_restart_syscall(struct restart_block *param)
3059{
3060 return -EINTR;
3061}
3062
3063static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
3064{
3065 if (task_sigpending(tsk) && !thread_group_empty(tsk)) {
3066 sigset_t newblocked;
3067 /* A set of now blocked but previously unblocked signals. */
3068 sigandnsets(&newblocked, newset, ¤t->blocked);
3069 retarget_shared_pending(tsk, &newblocked);
3070 }
3071 tsk->blocked = *newset;
3072 recalc_sigpending();
3073}
3074
3075/**
3076 * set_current_blocked - change current->blocked mask
3077 * @newset: new mask
3078 *
3079 * It is wrong to change ->blocked directly, this helper should be used
3080 * to ensure the process can't miss a shared signal we are going to block.
3081 */
3082void set_current_blocked(sigset_t *newset)
3083{
3084 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
3085 __set_current_blocked(newset);
3086}
3087
3088void __set_current_blocked(const sigset_t *newset)
3089{
3090 struct task_struct *tsk = current;
3091
3092 /*
3093 * In case the signal mask hasn't changed, there is nothing we need
3094 * to do. The current->blocked shouldn't be modified by other task.
3095 */
3096 if (sigequalsets(&tsk->blocked, newset))
3097 return;
3098
3099 spin_lock_irq(&tsk->sighand->siglock);
3100 __set_task_blocked(tsk, newset);
3101 spin_unlock_irq(&tsk->sighand->siglock);
3102}
3103
3104/*
3105 * This is also useful for kernel threads that want to temporarily
3106 * (or permanently) block certain signals.
3107 *
3108 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
3109 * interface happily blocks "unblockable" signals like SIGKILL
3110 * and friends.
3111 */
3112int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
3113{
3114 struct task_struct *tsk = current;
3115 sigset_t newset;
3116
3117 /* Lockless, only current can change ->blocked, never from irq */
3118 if (oldset)
3119 *oldset = tsk->blocked;
3120
3121 switch (how) {
3122 case SIG_BLOCK:
3123 sigorsets(&newset, &tsk->blocked, set);
3124 break;
3125 case SIG_UNBLOCK:
3126 sigandnsets(&newset, &tsk->blocked, set);
3127 break;
3128 case SIG_SETMASK:
3129 newset = *set;
3130 break;
3131 default:
3132 return -EINVAL;
3133 }
3134
3135 __set_current_blocked(&newset);
3136 return 0;
3137}
3138EXPORT_SYMBOL(sigprocmask);
3139
3140/*
3141 * The api helps set app-provided sigmasks.
3142 *
3143 * This is useful for syscalls such as ppoll, pselect, io_pgetevents and
3144 * epoll_pwait where a new sigmask is passed from userland for the syscalls.
3145 *
3146 * Note that it does set_restore_sigmask() in advance, so it must be always
3147 * paired with restore_saved_sigmask_unless() before return from syscall.
3148 */
3149int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize)
3150{
3151 sigset_t kmask;
3152
3153 if (!umask)
3154 return 0;
3155 if (sigsetsize != sizeof(sigset_t))
3156 return -EINVAL;
3157 if (copy_from_user(&kmask, umask, sizeof(sigset_t)))
3158 return -EFAULT;
3159
3160 set_restore_sigmask();
3161 current->saved_sigmask = current->blocked;
3162 set_current_blocked(&kmask);
3163
3164 return 0;
3165}
3166
3167#ifdef CONFIG_COMPAT
3168int set_compat_user_sigmask(const compat_sigset_t __user *umask,
3169 size_t sigsetsize)
3170{
3171 sigset_t kmask;
3172
3173 if (!umask)
3174 return 0;
3175 if (sigsetsize != sizeof(compat_sigset_t))
3176 return -EINVAL;
3177 if (get_compat_sigset(&kmask, umask))
3178 return -EFAULT;
3179
3180 set_restore_sigmask();
3181 current->saved_sigmask = current->blocked;
3182 set_current_blocked(&kmask);
3183
3184 return 0;
3185}
3186#endif
3187
3188/**
3189 * sys_rt_sigprocmask - change the list of currently blocked signals
3190 * @how: whether to add, remove, or set signals
3191 * @nset: stores pending signals
3192 * @oset: previous value of signal mask if non-null
3193 * @sigsetsize: size of sigset_t type
3194 */
3195SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
3196 sigset_t __user *, oset, size_t, sigsetsize)
3197{
3198 sigset_t old_set, new_set;
3199 int error;
3200
3201 /* XXX: Don't preclude handling different sized sigset_t's. */
3202 if (sigsetsize != sizeof(sigset_t))
3203 return -EINVAL;
3204
3205 old_set = current->blocked;
3206
3207 if (nset) {
3208 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
3209 return -EFAULT;
3210 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3211
3212 error = sigprocmask(how, &new_set, NULL);
3213 if (error)
3214 return error;
3215 }
3216
3217 if (oset) {
3218 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
3219 return -EFAULT;
3220 }
3221
3222 return 0;
3223}
3224
3225#ifdef CONFIG_COMPAT
3226COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
3227 compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
3228{
3229 sigset_t old_set = current->blocked;
3230
3231 /* XXX: Don't preclude handling different sized sigset_t's. */
3232 if (sigsetsize != sizeof(sigset_t))
3233 return -EINVAL;
3234
3235 if (nset) {
3236 sigset_t new_set;
3237 int error;
3238 if (get_compat_sigset(&new_set, nset))
3239 return -EFAULT;
3240 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3241
3242 error = sigprocmask(how, &new_set, NULL);
3243 if (error)
3244 return error;
3245 }
3246 return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0;
3247}
3248#endif
3249
3250static void do_sigpending(sigset_t *set)
3251{
3252 spin_lock_irq(¤t->sighand->siglock);
3253 sigorsets(set, ¤t->pending.signal,
3254 ¤t->signal->shared_pending.signal);
3255 spin_unlock_irq(¤t->sighand->siglock);
3256
3257 /* Outside the lock because only this thread touches it. */
3258 sigandsets(set, ¤t->blocked, set);
3259}
3260
3261/**
3262 * sys_rt_sigpending - examine a pending signal that has been raised
3263 * while blocked
3264 * @uset: stores pending signals
3265 * @sigsetsize: size of sigset_t type or larger
3266 */
3267SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
3268{
3269 sigset_t set;
3270
3271 if (sigsetsize > sizeof(*uset))
3272 return -EINVAL;
3273
3274 do_sigpending(&set);
3275
3276 if (copy_to_user(uset, &set, sigsetsize))
3277 return -EFAULT;
3278
3279 return 0;
3280}
3281
3282#ifdef CONFIG_COMPAT
3283COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
3284 compat_size_t, sigsetsize)
3285{
3286 sigset_t set;
3287
3288 if (sigsetsize > sizeof(*uset))
3289 return -EINVAL;
3290
3291 do_sigpending(&set);
3292
3293 return put_compat_sigset(uset, &set, sigsetsize);
3294}
3295#endif
3296
3297static const struct {
3298 unsigned char limit, layout;
3299} sig_sicodes[] = {
3300 [SIGILL] = { NSIGILL, SIL_FAULT },
3301 [SIGFPE] = { NSIGFPE, SIL_FAULT },
3302 [SIGSEGV] = { NSIGSEGV, SIL_FAULT },
3303 [SIGBUS] = { NSIGBUS, SIL_FAULT },
3304 [SIGTRAP] = { NSIGTRAP, SIL_FAULT },
3305#if defined(SIGEMT)
3306 [SIGEMT] = { NSIGEMT, SIL_FAULT },
3307#endif
3308 [SIGCHLD] = { NSIGCHLD, SIL_CHLD },
3309 [SIGPOLL] = { NSIGPOLL, SIL_POLL },
3310 [SIGSYS] = { NSIGSYS, SIL_SYS },
3311};
3312
3313static bool known_siginfo_layout(unsigned sig, int si_code)
3314{
3315 if (si_code == SI_KERNEL)
3316 return true;
3317 else if ((si_code > SI_USER)) {
3318 if (sig_specific_sicodes(sig)) {
3319 if (si_code <= sig_sicodes[sig].limit)
3320 return true;
3321 }
3322 else if (si_code <= NSIGPOLL)
3323 return true;
3324 }
3325 else if (si_code >= SI_DETHREAD)
3326 return true;
3327 else if (si_code == SI_ASYNCNL)
3328 return true;
3329 return false;
3330}
3331
3332enum siginfo_layout siginfo_layout(unsigned sig, int si_code)
3333{
3334 enum siginfo_layout layout = SIL_KILL;
3335 if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
3336 if ((sig < ARRAY_SIZE(sig_sicodes)) &&
3337 (si_code <= sig_sicodes[sig].limit)) {
3338 layout = sig_sicodes[sig].layout;
3339 /* Handle the exceptions */
3340 if ((sig == SIGBUS) &&
3341 (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO))
3342 layout = SIL_FAULT_MCEERR;
3343 else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR))
3344 layout = SIL_FAULT_BNDERR;
3345#ifdef SEGV_PKUERR
3346 else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR))
3347 layout = SIL_FAULT_PKUERR;
3348#endif
3349 else if ((sig == SIGTRAP) && (si_code == TRAP_PERF))
3350 layout = SIL_FAULT_PERF_EVENT;
3351 else if (IS_ENABLED(CONFIG_SPARC) &&
3352 (sig == SIGILL) && (si_code == ILL_ILLTRP))
3353 layout = SIL_FAULT_TRAPNO;
3354 else if (IS_ENABLED(CONFIG_ALPHA) &&
3355 ((sig == SIGFPE) ||
3356 ((sig == SIGTRAP) && (si_code == TRAP_UNK))))
3357 layout = SIL_FAULT_TRAPNO;
3358 }
3359 else if (si_code <= NSIGPOLL)
3360 layout = SIL_POLL;
3361 } else {
3362 if (si_code == SI_TIMER)
3363 layout = SIL_TIMER;
3364 else if (si_code == SI_SIGIO)
3365 layout = SIL_POLL;
3366 else if (si_code < 0)
3367 layout = SIL_RT;
3368 }
3369 return layout;
3370}
3371
3372static inline char __user *si_expansion(const siginfo_t __user *info)
3373{
3374 return ((char __user *)info) + sizeof(struct kernel_siginfo);
3375}
3376
3377int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from)
3378{
3379 char __user *expansion = si_expansion(to);
3380 if (copy_to_user(to, from , sizeof(struct kernel_siginfo)))
3381 return -EFAULT;
3382 if (clear_user(expansion, SI_EXPANSION_SIZE))
3383 return -EFAULT;
3384 return 0;
3385}
3386
3387static int post_copy_siginfo_from_user(kernel_siginfo_t *info,
3388 const siginfo_t __user *from)
3389{
3390 if (unlikely(!known_siginfo_layout(info->si_signo, info->si_code))) {
3391 char __user *expansion = si_expansion(from);
3392 char buf[SI_EXPANSION_SIZE];
3393 int i;
3394 /*
3395 * An unknown si_code might need more than
3396 * sizeof(struct kernel_siginfo) bytes. Verify all of the
3397 * extra bytes are 0. This guarantees copy_siginfo_to_user
3398 * will return this data to userspace exactly.
3399 */
3400 if (copy_from_user(&buf, expansion, SI_EXPANSION_SIZE))
3401 return -EFAULT;
3402 for (i = 0; i < SI_EXPANSION_SIZE; i++) {
3403 if (buf[i] != 0)
3404 return -E2BIG;
3405 }
3406 }
3407 return 0;
3408}
3409
3410static int __copy_siginfo_from_user(int signo, kernel_siginfo_t *to,
3411 const siginfo_t __user *from)
3412{
3413 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3414 return -EFAULT;
3415 to->si_signo = signo;
3416 return post_copy_siginfo_from_user(to, from);
3417}
3418
3419int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from)
3420{
3421 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3422 return -EFAULT;
3423 return post_copy_siginfo_from_user(to, from);
3424}
3425
3426#ifdef CONFIG_COMPAT
3427/**
3428 * copy_siginfo_to_external32 - copy a kernel siginfo into a compat user siginfo
3429 * @to: compat siginfo destination
3430 * @from: kernel siginfo source
3431 *
3432 * Note: This function does not work properly for the SIGCHLD on x32, but
3433 * fortunately it doesn't have to. The only valid callers for this function are
3434 * copy_siginfo_to_user32, which is overriden for x32 and the coredump code.
3435 * The latter does not care because SIGCHLD will never cause a coredump.
3436 */
3437void copy_siginfo_to_external32(struct compat_siginfo *to,
3438 const struct kernel_siginfo *from)
3439{
3440 memset(to, 0, sizeof(*to));
3441
3442 to->si_signo = from->si_signo;
3443 to->si_errno = from->si_errno;
3444 to->si_code = from->si_code;
3445 switch(siginfo_layout(from->si_signo, from->si_code)) {
3446 case SIL_KILL:
3447 to->si_pid = from->si_pid;
3448 to->si_uid = from->si_uid;
3449 break;
3450 case SIL_TIMER:
3451 to->si_tid = from->si_tid;
3452 to->si_overrun = from->si_overrun;
3453 to->si_int = from->si_int;
3454 break;
3455 case SIL_POLL:
3456 to->si_band = from->si_band;
3457 to->si_fd = from->si_fd;
3458 break;
3459 case SIL_FAULT:
3460 to->si_addr = ptr_to_compat(from->si_addr);
3461 break;
3462 case SIL_FAULT_TRAPNO:
3463 to->si_addr = ptr_to_compat(from->si_addr);
3464 to->si_trapno = from->si_trapno;
3465 break;
3466 case SIL_FAULT_MCEERR:
3467 to->si_addr = ptr_to_compat(from->si_addr);
3468 to->si_addr_lsb = from->si_addr_lsb;
3469 break;
3470 case SIL_FAULT_BNDERR:
3471 to->si_addr = ptr_to_compat(from->si_addr);
3472 to->si_lower = ptr_to_compat(from->si_lower);
3473 to->si_upper = ptr_to_compat(from->si_upper);
3474 break;
3475 case SIL_FAULT_PKUERR:
3476 to->si_addr = ptr_to_compat(from->si_addr);
3477 to->si_pkey = from->si_pkey;
3478 break;
3479 case SIL_FAULT_PERF_EVENT:
3480 to->si_addr = ptr_to_compat(from->si_addr);
3481 to->si_perf_data = from->si_perf_data;
3482 to->si_perf_type = from->si_perf_type;
3483 to->si_perf_flags = from->si_perf_flags;
3484 break;
3485 case SIL_CHLD:
3486 to->si_pid = from->si_pid;
3487 to->si_uid = from->si_uid;
3488 to->si_status = from->si_status;
3489 to->si_utime = from->si_utime;
3490 to->si_stime = from->si_stime;
3491 break;
3492 case SIL_RT:
3493 to->si_pid = from->si_pid;
3494 to->si_uid = from->si_uid;
3495 to->si_int = from->si_int;
3496 break;
3497 case SIL_SYS:
3498 to->si_call_addr = ptr_to_compat(from->si_call_addr);
3499 to->si_syscall = from->si_syscall;
3500 to->si_arch = from->si_arch;
3501 break;
3502 }
3503}
3504
3505int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
3506 const struct kernel_siginfo *from)
3507{
3508 struct compat_siginfo new;
3509
3510 copy_siginfo_to_external32(&new, from);
3511 if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
3512 return -EFAULT;
3513 return 0;
3514}
3515
3516static int post_copy_siginfo_from_user32(kernel_siginfo_t *to,
3517 const struct compat_siginfo *from)
3518{
3519 clear_siginfo(to);
3520 to->si_signo = from->si_signo;
3521 to->si_errno = from->si_errno;
3522 to->si_code = from->si_code;
3523 switch(siginfo_layout(from->si_signo, from->si_code)) {
3524 case SIL_KILL:
3525 to->si_pid = from->si_pid;
3526 to->si_uid = from->si_uid;
3527 break;
3528 case SIL_TIMER:
3529 to->si_tid = from->si_tid;
3530 to->si_overrun = from->si_overrun;
3531 to->si_int = from->si_int;
3532 break;
3533 case SIL_POLL:
3534 to->si_band = from->si_band;
3535 to->si_fd = from->si_fd;
3536 break;
3537 case SIL_FAULT:
3538 to->si_addr = compat_ptr(from->si_addr);
3539 break;
3540 case SIL_FAULT_TRAPNO:
3541 to->si_addr = compat_ptr(from->si_addr);
3542 to->si_trapno = from->si_trapno;
3543 break;
3544 case SIL_FAULT_MCEERR:
3545 to->si_addr = compat_ptr(from->si_addr);
3546 to->si_addr_lsb = from->si_addr_lsb;
3547 break;
3548 case SIL_FAULT_BNDERR:
3549 to->si_addr = compat_ptr(from->si_addr);
3550 to->si_lower = compat_ptr(from->si_lower);
3551 to->si_upper = compat_ptr(from->si_upper);
3552 break;
3553 case SIL_FAULT_PKUERR:
3554 to->si_addr = compat_ptr(from->si_addr);
3555 to->si_pkey = from->si_pkey;
3556 break;
3557 case SIL_FAULT_PERF_EVENT:
3558 to->si_addr = compat_ptr(from->si_addr);
3559 to->si_perf_data = from->si_perf_data;
3560 to->si_perf_type = from->si_perf_type;
3561 to->si_perf_flags = from->si_perf_flags;
3562 break;
3563 case SIL_CHLD:
3564 to->si_pid = from->si_pid;
3565 to->si_uid = from->si_uid;
3566 to->si_status = from->si_status;
3567#ifdef CONFIG_X86_X32_ABI
3568 if (in_x32_syscall()) {
3569 to->si_utime = from->_sifields._sigchld_x32._utime;
3570 to->si_stime = from->_sifields._sigchld_x32._stime;
3571 } else
3572#endif
3573 {
3574 to->si_utime = from->si_utime;
3575 to->si_stime = from->si_stime;
3576 }
3577 break;
3578 case SIL_RT:
3579 to->si_pid = from->si_pid;
3580 to->si_uid = from->si_uid;
3581 to->si_int = from->si_int;
3582 break;
3583 case SIL_SYS:
3584 to->si_call_addr = compat_ptr(from->si_call_addr);
3585 to->si_syscall = from->si_syscall;
3586 to->si_arch = from->si_arch;
3587 break;
3588 }
3589 return 0;
3590}
3591
3592static int __copy_siginfo_from_user32(int signo, struct kernel_siginfo *to,
3593 const struct compat_siginfo __user *ufrom)
3594{
3595 struct compat_siginfo from;
3596
3597 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3598 return -EFAULT;
3599
3600 from.si_signo = signo;
3601 return post_copy_siginfo_from_user32(to, &from);
3602}
3603
3604int copy_siginfo_from_user32(struct kernel_siginfo *to,
3605 const struct compat_siginfo __user *ufrom)
3606{
3607 struct compat_siginfo from;
3608
3609 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3610 return -EFAULT;
3611
3612 return post_copy_siginfo_from_user32(to, &from);
3613}
3614#endif /* CONFIG_COMPAT */
3615
3616/**
3617 * do_sigtimedwait - wait for queued signals specified in @which
3618 * @which: queued signals to wait for
3619 * @info: if non-null, the signal's siginfo is returned here
3620 * @ts: upper bound on process time suspension
3621 */
3622static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
3623 const struct timespec64 *ts)
3624{
3625 ktime_t *to = NULL, timeout = KTIME_MAX;
3626 struct task_struct *tsk = current;
3627 sigset_t mask = *which;
3628 enum pid_type type;
3629 int sig, ret = 0;
3630
3631 if (ts) {
3632 if (!timespec64_valid(ts))
3633 return -EINVAL;
3634 timeout = timespec64_to_ktime(*ts);
3635 to = &timeout;
3636 }
3637
3638 /*
3639 * Invert the set of allowed signals to get those we want to block.
3640 */
3641 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
3642 signotset(&mask);
3643
3644 spin_lock_irq(&tsk->sighand->siglock);
3645 sig = dequeue_signal(tsk, &mask, info, &type);
3646 if (!sig && timeout) {
3647 /*
3648 * None ready, temporarily unblock those we're interested
3649 * while we are sleeping in so that we'll be awakened when
3650 * they arrive. Unblocking is always fine, we can avoid
3651 * set_current_blocked().
3652 */
3653 tsk->real_blocked = tsk->blocked;
3654 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
3655 recalc_sigpending();
3656 spin_unlock_irq(&tsk->sighand->siglock);
3657
3658 __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
3659 ret = schedule_hrtimeout_range(to, tsk->timer_slack_ns,
3660 HRTIMER_MODE_REL);
3661 spin_lock_irq(&tsk->sighand->siglock);
3662 __set_task_blocked(tsk, &tsk->real_blocked);
3663 sigemptyset(&tsk->real_blocked);
3664 sig = dequeue_signal(tsk, &mask, info, &type);
3665 }
3666 spin_unlock_irq(&tsk->sighand->siglock);
3667
3668 if (sig)
3669 return sig;
3670 return ret ? -EINTR : -EAGAIN;
3671}
3672
3673/**
3674 * sys_rt_sigtimedwait - synchronously wait for queued signals specified
3675 * in @uthese
3676 * @uthese: queued signals to wait for
3677 * @uinfo: if non-null, the signal's siginfo is returned here
3678 * @uts: upper bound on process time suspension
3679 * @sigsetsize: size of sigset_t type
3680 */
3681SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
3682 siginfo_t __user *, uinfo,
3683 const struct __kernel_timespec __user *, uts,
3684 size_t, sigsetsize)
3685{
3686 sigset_t these;
3687 struct timespec64 ts;
3688 kernel_siginfo_t info;
3689 int ret;
3690
3691 /* XXX: Don't preclude handling different sized sigset_t's. */
3692 if (sigsetsize != sizeof(sigset_t))
3693 return -EINVAL;
3694
3695 if (copy_from_user(&these, uthese, sizeof(these)))
3696 return -EFAULT;
3697
3698 if (uts) {
3699 if (get_timespec64(&ts, uts))
3700 return -EFAULT;
3701 }
3702
3703 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3704
3705 if (ret > 0 && uinfo) {
3706 if (copy_siginfo_to_user(uinfo, &info))
3707 ret = -EFAULT;
3708 }
3709
3710 return ret;
3711}
3712
3713#ifdef CONFIG_COMPAT_32BIT_TIME
3714SYSCALL_DEFINE4(rt_sigtimedwait_time32, const sigset_t __user *, uthese,
3715 siginfo_t __user *, uinfo,
3716 const struct old_timespec32 __user *, uts,
3717 size_t, sigsetsize)
3718{
3719 sigset_t these;
3720 struct timespec64 ts;
3721 kernel_siginfo_t info;
3722 int ret;
3723
3724 if (sigsetsize != sizeof(sigset_t))
3725 return -EINVAL;
3726
3727 if (copy_from_user(&these, uthese, sizeof(these)))
3728 return -EFAULT;
3729
3730 if (uts) {
3731 if (get_old_timespec32(&ts, uts))
3732 return -EFAULT;
3733 }
3734
3735 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3736
3737 if (ret > 0 && uinfo) {
3738 if (copy_siginfo_to_user(uinfo, &info))
3739 ret = -EFAULT;
3740 }
3741
3742 return ret;
3743}
3744#endif
3745
3746#ifdef CONFIG_COMPAT
3747COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64, compat_sigset_t __user *, uthese,
3748 struct compat_siginfo __user *, uinfo,
3749 struct __kernel_timespec __user *, uts, compat_size_t, sigsetsize)
3750{
3751 sigset_t s;
3752 struct timespec64 t;
3753 kernel_siginfo_t info;
3754 long ret;
3755
3756 if (sigsetsize != sizeof(sigset_t))
3757 return -EINVAL;
3758
3759 if (get_compat_sigset(&s, uthese))
3760 return -EFAULT;
3761
3762 if (uts) {
3763 if (get_timespec64(&t, uts))
3764 return -EFAULT;
3765 }
3766
3767 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3768
3769 if (ret > 0 && uinfo) {
3770 if (copy_siginfo_to_user32(uinfo, &info))
3771 ret = -EFAULT;
3772 }
3773
3774 return ret;
3775}
3776
3777#ifdef CONFIG_COMPAT_32BIT_TIME
3778COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32, compat_sigset_t __user *, uthese,
3779 struct compat_siginfo __user *, uinfo,
3780 struct old_timespec32 __user *, uts, compat_size_t, sigsetsize)
3781{
3782 sigset_t s;
3783 struct timespec64 t;
3784 kernel_siginfo_t info;
3785 long ret;
3786
3787 if (sigsetsize != sizeof(sigset_t))
3788 return -EINVAL;
3789
3790 if (get_compat_sigset(&s, uthese))
3791 return -EFAULT;
3792
3793 if (uts) {
3794 if (get_old_timespec32(&t, uts))
3795 return -EFAULT;
3796 }
3797
3798 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3799
3800 if (ret > 0 && uinfo) {
3801 if (copy_siginfo_to_user32(uinfo, &info))
3802 ret = -EFAULT;
3803 }
3804
3805 return ret;
3806}
3807#endif
3808#endif
3809
3810static void prepare_kill_siginfo(int sig, struct kernel_siginfo *info,
3811 enum pid_type type)
3812{
3813 clear_siginfo(info);
3814 info->si_signo = sig;
3815 info->si_errno = 0;
3816 info->si_code = (type == PIDTYPE_PID) ? SI_TKILL : SI_USER;
3817 info->si_pid = task_tgid_vnr(current);
3818 info->si_uid = from_kuid_munged(current_user_ns(), current_uid());
3819}
3820
3821/**
3822 * sys_kill - send a signal to a process
3823 * @pid: the PID of the process
3824 * @sig: signal to be sent
3825 */
3826SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
3827{
3828 struct kernel_siginfo info;
3829
3830 prepare_kill_siginfo(sig, &info, PIDTYPE_TGID);
3831
3832 return kill_something_info(sig, &info, pid);
3833}
3834
3835/*
3836 * Verify that the signaler and signalee either are in the same pid namespace
3837 * or that the signaler's pid namespace is an ancestor of the signalee's pid
3838 * namespace.
3839 */
3840static bool access_pidfd_pidns(struct pid *pid)
3841{
3842 struct pid_namespace *active = task_active_pid_ns(current);
3843 struct pid_namespace *p = ns_of_pid(pid);
3844
3845 for (;;) {
3846 if (!p)
3847 return false;
3848 if (p == active)
3849 break;
3850 p = p->parent;
3851 }
3852
3853 return true;
3854}
3855
3856static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo,
3857 siginfo_t __user *info)
3858{
3859#ifdef CONFIG_COMPAT
3860 /*
3861 * Avoid hooking up compat syscalls and instead handle necessary
3862 * conversions here. Note, this is a stop-gap measure and should not be
3863 * considered a generic solution.
3864 */
3865 if (in_compat_syscall())
3866 return copy_siginfo_from_user32(
3867 kinfo, (struct compat_siginfo __user *)info);
3868#endif
3869 return copy_siginfo_from_user(kinfo, info);
3870}
3871
3872static struct pid *pidfd_to_pid(const struct file *file)
3873{
3874 struct pid *pid;
3875
3876 pid = pidfd_pid(file);
3877 if (!IS_ERR(pid))
3878 return pid;
3879
3880 return tgid_pidfd_to_pid(file);
3881}
3882
3883#define PIDFD_SEND_SIGNAL_FLAGS \
3884 (PIDFD_SIGNAL_THREAD | PIDFD_SIGNAL_THREAD_GROUP | \
3885 PIDFD_SIGNAL_PROCESS_GROUP)
3886
3887/**
3888 * sys_pidfd_send_signal - Signal a process through a pidfd
3889 * @pidfd: file descriptor of the process
3890 * @sig: signal to send
3891 * @info: signal info
3892 * @flags: future flags
3893 *
3894 * Send the signal to the thread group or to the individual thread depending
3895 * on PIDFD_THREAD.
3896 * In the future extension to @flags may be used to override the default scope
3897 * of @pidfd.
3898 *
3899 * Return: 0 on success, negative errno on failure
3900 */
3901SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
3902 siginfo_t __user *, info, unsigned int, flags)
3903{
3904 int ret;
3905 struct fd f;
3906 struct pid *pid;
3907 kernel_siginfo_t kinfo;
3908 enum pid_type type;
3909
3910 /* Enforce flags be set to 0 until we add an extension. */
3911 if (flags & ~PIDFD_SEND_SIGNAL_FLAGS)
3912 return -EINVAL;
3913
3914 /* Ensure that only a single signal scope determining flag is set. */
3915 if (hweight32(flags & PIDFD_SEND_SIGNAL_FLAGS) > 1)
3916 return -EINVAL;
3917
3918 f = fdget(pidfd);
3919 if (!f.file)
3920 return -EBADF;
3921
3922 /* Is this a pidfd? */
3923 pid = pidfd_to_pid(f.file);
3924 if (IS_ERR(pid)) {
3925 ret = PTR_ERR(pid);
3926 goto err;
3927 }
3928
3929 ret = -EINVAL;
3930 if (!access_pidfd_pidns(pid))
3931 goto err;
3932
3933 switch (flags) {
3934 case 0:
3935 /* Infer scope from the type of pidfd. */
3936 if (f.file->f_flags & PIDFD_THREAD)
3937 type = PIDTYPE_PID;
3938 else
3939 type = PIDTYPE_TGID;
3940 break;
3941 case PIDFD_SIGNAL_THREAD:
3942 type = PIDTYPE_PID;
3943 break;
3944 case PIDFD_SIGNAL_THREAD_GROUP:
3945 type = PIDTYPE_TGID;
3946 break;
3947 case PIDFD_SIGNAL_PROCESS_GROUP:
3948 type = PIDTYPE_PGID;
3949 break;
3950 }
3951
3952 if (info) {
3953 ret = copy_siginfo_from_user_any(&kinfo, info);
3954 if (unlikely(ret))
3955 goto err;
3956
3957 ret = -EINVAL;
3958 if (unlikely(sig != kinfo.si_signo))
3959 goto err;
3960
3961 /* Only allow sending arbitrary signals to yourself. */
3962 ret = -EPERM;
3963 if ((task_pid(current) != pid || type > PIDTYPE_TGID) &&
3964 (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL))
3965 goto err;
3966 } else {
3967 prepare_kill_siginfo(sig, &kinfo, type);
3968 }
3969
3970 if (type == PIDTYPE_PGID)
3971 ret = kill_pgrp_info(sig, &kinfo, pid);
3972 else
3973 ret = kill_pid_info_type(sig, &kinfo, pid, type);
3974err:
3975 fdput(f);
3976 return ret;
3977}
3978
3979static int
3980do_send_specific(pid_t tgid, pid_t pid, int sig, struct kernel_siginfo *info)
3981{
3982 struct task_struct *p;
3983 int error = -ESRCH;
3984
3985 rcu_read_lock();
3986 p = find_task_by_vpid(pid);
3987 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
3988 error = check_kill_permission(sig, info, p);
3989 /*
3990 * The null signal is a permissions and process existence
3991 * probe. No signal is actually delivered.
3992 */
3993 if (!error && sig) {
3994 error = do_send_sig_info(sig, info, p, PIDTYPE_PID);
3995 /*
3996 * If lock_task_sighand() failed we pretend the task
3997 * dies after receiving the signal. The window is tiny,
3998 * and the signal is private anyway.
3999 */
4000 if (unlikely(error == -ESRCH))
4001 error = 0;
4002 }
4003 }
4004 rcu_read_unlock();
4005
4006 return error;
4007}
4008
4009static int do_tkill(pid_t tgid, pid_t pid, int sig)
4010{
4011 struct kernel_siginfo info;
4012
4013 prepare_kill_siginfo(sig, &info, PIDTYPE_PID);
4014
4015 return do_send_specific(tgid, pid, sig, &info);
4016}
4017
4018/**
4019 * sys_tgkill - send signal to one specific thread
4020 * @tgid: the thread group ID of the thread
4021 * @pid: the PID of the thread
4022 * @sig: signal to be sent
4023 *
4024 * This syscall also checks the @tgid and returns -ESRCH even if the PID
4025 * exists but it's not belonging to the target process anymore. This
4026 * method solves the problem of threads exiting and PIDs getting reused.
4027 */
4028SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
4029{
4030 /* This is only valid for single tasks */
4031 if (pid <= 0 || tgid <= 0)
4032 return -EINVAL;
4033
4034 return do_tkill(tgid, pid, sig);
4035}
4036
4037/**
4038 * sys_tkill - send signal to one specific task
4039 * @pid: the PID of the task
4040 * @sig: signal to be sent
4041 *
4042 * Send a signal to only one task, even if it's a CLONE_THREAD task.
4043 */
4044SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
4045{
4046 /* This is only valid for single tasks */
4047 if (pid <= 0)
4048 return -EINVAL;
4049
4050 return do_tkill(0, pid, sig);
4051}
4052
4053static int do_rt_sigqueueinfo(pid_t pid, int sig, kernel_siginfo_t *info)
4054{
4055 /* Not even root can pretend to send signals from the kernel.
4056 * Nor can they impersonate a kill()/tgkill(), which adds source info.
4057 */
4058 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
4059 (task_pid_vnr(current) != pid))
4060 return -EPERM;
4061
4062 /* POSIX.1b doesn't mention process groups. */
4063 return kill_proc_info(sig, info, pid);
4064}
4065
4066/**
4067 * sys_rt_sigqueueinfo - send signal information to a signal
4068 * @pid: the PID of the thread
4069 * @sig: signal to be sent
4070 * @uinfo: signal info to be sent
4071 */
4072SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
4073 siginfo_t __user *, uinfo)
4074{
4075 kernel_siginfo_t info;
4076 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
4077 if (unlikely(ret))
4078 return ret;
4079 return do_rt_sigqueueinfo(pid, sig, &info);
4080}
4081
4082#ifdef CONFIG_COMPAT
4083COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
4084 compat_pid_t, pid,
4085 int, sig,
4086 struct compat_siginfo __user *, uinfo)
4087{
4088 kernel_siginfo_t info;
4089 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
4090 if (unlikely(ret))
4091 return ret;
4092 return do_rt_sigqueueinfo(pid, sig, &info);
4093}
4094#endif
4095
4096static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, kernel_siginfo_t *info)
4097{
4098 /* This is only valid for single tasks */
4099 if (pid <= 0 || tgid <= 0)
4100 return -EINVAL;
4101
4102 /* Not even root can pretend to send signals from the kernel.
4103 * Nor can they impersonate a kill()/tgkill(), which adds source info.
4104 */
4105 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
4106 (task_pid_vnr(current) != pid))
4107 return -EPERM;
4108
4109 return do_send_specific(tgid, pid, sig, info);
4110}
4111
4112SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
4113 siginfo_t __user *, uinfo)
4114{
4115 kernel_siginfo_t info;
4116 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
4117 if (unlikely(ret))
4118 return ret;
4119 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
4120}
4121
4122#ifdef CONFIG_COMPAT
4123COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
4124 compat_pid_t, tgid,
4125 compat_pid_t, pid,
4126 int, sig,
4127 struct compat_siginfo __user *, uinfo)
4128{
4129 kernel_siginfo_t info;
4130 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
4131 if (unlikely(ret))
4132 return ret;
4133 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
4134}
4135#endif
4136
4137/*
4138 * For kthreads only, must not be used if cloned with CLONE_SIGHAND
4139 */
4140void kernel_sigaction(int sig, __sighandler_t action)
4141{
4142 spin_lock_irq(¤t->sighand->siglock);
4143 current->sighand->action[sig - 1].sa.sa_handler = action;
4144 if (action == SIG_IGN) {
4145 sigset_t mask;
4146
4147 sigemptyset(&mask);
4148 sigaddset(&mask, sig);
4149
4150 flush_sigqueue_mask(&mask, ¤t->signal->shared_pending);
4151 flush_sigqueue_mask(&mask, ¤t->pending);
4152 recalc_sigpending();
4153 }
4154 spin_unlock_irq(¤t->sighand->siglock);
4155}
4156EXPORT_SYMBOL(kernel_sigaction);
4157
4158void __weak sigaction_compat_abi(struct k_sigaction *act,
4159 struct k_sigaction *oact)
4160{
4161}
4162
4163int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
4164{
4165 struct task_struct *p = current, *t;
4166 struct k_sigaction *k;
4167 sigset_t mask;
4168
4169 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
4170 return -EINVAL;
4171
4172 k = &p->sighand->action[sig-1];
4173
4174 spin_lock_irq(&p->sighand->siglock);
4175 if (k->sa.sa_flags & SA_IMMUTABLE) {
4176 spin_unlock_irq(&p->sighand->siglock);
4177 return -EINVAL;
4178 }
4179 if (oact)
4180 *oact = *k;
4181
4182 /*
4183 * Make sure that we never accidentally claim to support SA_UNSUPPORTED,
4184 * e.g. by having an architecture use the bit in their uapi.
4185 */
4186 BUILD_BUG_ON(UAPI_SA_FLAGS & SA_UNSUPPORTED);
4187
4188 /*
4189 * Clear unknown flag bits in order to allow userspace to detect missing
4190 * support for flag bits and to allow the kernel to use non-uapi bits
4191 * internally.
4192 */
4193 if (act)
4194 act->sa.sa_flags &= UAPI_SA_FLAGS;
4195 if (oact)
4196 oact->sa.sa_flags &= UAPI_SA_FLAGS;
4197
4198 sigaction_compat_abi(act, oact);
4199
4200 if (act) {
4201 sigdelsetmask(&act->sa.sa_mask,
4202 sigmask(SIGKILL) | sigmask(SIGSTOP));
4203 *k = *act;
4204 /*
4205 * POSIX 3.3.1.3:
4206 * "Setting a signal action to SIG_IGN for a signal that is
4207 * pending shall cause the pending signal to be discarded,
4208 * whether or not it is blocked."
4209 *
4210 * "Setting a signal action to SIG_DFL for a signal that is
4211 * pending and whose default action is to ignore the signal
4212 * (for example, SIGCHLD), shall cause the pending signal to
4213 * be discarded, whether or not it is blocked"
4214 */
4215 if (sig_handler_ignored(sig_handler(p, sig), sig)) {
4216 sigemptyset(&mask);
4217 sigaddset(&mask, sig);
4218 flush_sigqueue_mask(&mask, &p->signal->shared_pending);
4219 for_each_thread(p, t)
4220 flush_sigqueue_mask(&mask, &t->pending);
4221 }
4222 }
4223
4224 spin_unlock_irq(&p->sighand->siglock);
4225 return 0;
4226}
4227
4228#ifdef CONFIG_DYNAMIC_SIGFRAME
4229static inline void sigaltstack_lock(void)
4230 __acquires(¤t->sighand->siglock)
4231{
4232 spin_lock_irq(¤t->sighand->siglock);
4233}
4234
4235static inline void sigaltstack_unlock(void)
4236 __releases(¤t->sighand->siglock)
4237{
4238 spin_unlock_irq(¤t->sighand->siglock);
4239}
4240#else
4241static inline void sigaltstack_lock(void) { }
4242static inline void sigaltstack_unlock(void) { }
4243#endif
4244
4245static int
4246do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp,
4247 size_t min_ss_size)
4248{
4249 struct task_struct *t = current;
4250 int ret = 0;
4251
4252 if (oss) {
4253 memset(oss, 0, sizeof(stack_t));
4254 oss->ss_sp = (void __user *) t->sas_ss_sp;
4255 oss->ss_size = t->sas_ss_size;
4256 oss->ss_flags = sas_ss_flags(sp) |
4257 (current->sas_ss_flags & SS_FLAG_BITS);
4258 }
4259
4260 if (ss) {
4261 void __user *ss_sp = ss->ss_sp;
4262 size_t ss_size = ss->ss_size;
4263 unsigned ss_flags = ss->ss_flags;
4264 int ss_mode;
4265
4266 if (unlikely(on_sig_stack(sp)))
4267 return -EPERM;
4268
4269 ss_mode = ss_flags & ~SS_FLAG_BITS;
4270 if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
4271 ss_mode != 0))
4272 return -EINVAL;
4273
4274 /*
4275 * Return before taking any locks if no actual
4276 * sigaltstack changes were requested.
4277 */
4278 if (t->sas_ss_sp == (unsigned long)ss_sp &&
4279 t->sas_ss_size == ss_size &&
4280 t->sas_ss_flags == ss_flags)
4281 return 0;
4282
4283 sigaltstack_lock();
4284 if (ss_mode == SS_DISABLE) {
4285 ss_size = 0;
4286 ss_sp = NULL;
4287 } else {
4288 if (unlikely(ss_size < min_ss_size))
4289 ret = -ENOMEM;
4290 if (!sigaltstack_size_valid(ss_size))
4291 ret = -ENOMEM;
4292 }
4293 if (!ret) {
4294 t->sas_ss_sp = (unsigned long) ss_sp;
4295 t->sas_ss_size = ss_size;
4296 t->sas_ss_flags = ss_flags;
4297 }
4298 sigaltstack_unlock();
4299 }
4300 return ret;
4301}
4302
4303SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
4304{
4305 stack_t new, old;
4306 int err;
4307 if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
4308 return -EFAULT;
4309 err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
4310 current_user_stack_pointer(),
4311 MINSIGSTKSZ);
4312 if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
4313 err = -EFAULT;
4314 return err;
4315}
4316
4317int restore_altstack(const stack_t __user *uss)
4318{
4319 stack_t new;
4320 if (copy_from_user(&new, uss, sizeof(stack_t)))
4321 return -EFAULT;
4322 (void)do_sigaltstack(&new, NULL, current_user_stack_pointer(),
4323 MINSIGSTKSZ);
4324 /* squash all but EFAULT for now */
4325 return 0;
4326}
4327
4328int __save_altstack(stack_t __user *uss, unsigned long sp)
4329{
4330 struct task_struct *t = current;
4331 int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
4332 __put_user(t->sas_ss_flags, &uss->ss_flags) |
4333 __put_user(t->sas_ss_size, &uss->ss_size);
4334 return err;
4335}
4336
4337#ifdef CONFIG_COMPAT
4338static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr,
4339 compat_stack_t __user *uoss_ptr)
4340{
4341 stack_t uss, uoss;
4342 int ret;
4343
4344 if (uss_ptr) {
4345 compat_stack_t uss32;
4346 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
4347 return -EFAULT;
4348 uss.ss_sp = compat_ptr(uss32.ss_sp);
4349 uss.ss_flags = uss32.ss_flags;
4350 uss.ss_size = uss32.ss_size;
4351 }
4352 ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
4353 compat_user_stack_pointer(),
4354 COMPAT_MINSIGSTKSZ);
4355 if (ret >= 0 && uoss_ptr) {
4356 compat_stack_t old;
4357 memset(&old, 0, sizeof(old));
4358 old.ss_sp = ptr_to_compat(uoss.ss_sp);
4359 old.ss_flags = uoss.ss_flags;
4360 old.ss_size = uoss.ss_size;
4361 if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t)))
4362 ret = -EFAULT;
4363 }
4364 return ret;
4365}
4366
4367COMPAT_SYSCALL_DEFINE2(sigaltstack,
4368 const compat_stack_t __user *, uss_ptr,
4369 compat_stack_t __user *, uoss_ptr)
4370{
4371 return do_compat_sigaltstack(uss_ptr, uoss_ptr);
4372}
4373
4374int compat_restore_altstack(const compat_stack_t __user *uss)
4375{
4376 int err = do_compat_sigaltstack(uss, NULL);
4377 /* squash all but -EFAULT for now */
4378 return err == -EFAULT ? err : 0;
4379}
4380
4381int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
4382{
4383 int err;
4384 struct task_struct *t = current;
4385 err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
4386 &uss->ss_sp) |
4387 __put_user(t->sas_ss_flags, &uss->ss_flags) |
4388 __put_user(t->sas_ss_size, &uss->ss_size);
4389 return err;
4390}
4391#endif
4392
4393#ifdef __ARCH_WANT_SYS_SIGPENDING
4394
4395/**
4396 * sys_sigpending - examine pending signals
4397 * @uset: where mask of pending signal is returned
4398 */
4399SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset)
4400{
4401 sigset_t set;
4402
4403 if (sizeof(old_sigset_t) > sizeof(*uset))
4404 return -EINVAL;
4405
4406 do_sigpending(&set);
4407
4408 if (copy_to_user(uset, &set, sizeof(old_sigset_t)))
4409 return -EFAULT;
4410
4411 return 0;
4412}
4413
4414#ifdef CONFIG_COMPAT
4415COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
4416{
4417 sigset_t set;
4418
4419 do_sigpending(&set);
4420
4421 return put_user(set.sig[0], set32);
4422}
4423#endif
4424
4425#endif
4426
4427#ifdef __ARCH_WANT_SYS_SIGPROCMASK
4428/**
4429 * sys_sigprocmask - examine and change blocked signals
4430 * @how: whether to add, remove, or set signals
4431 * @nset: signals to add or remove (if non-null)
4432 * @oset: previous value of signal mask if non-null
4433 *
4434 * Some platforms have their own version with special arguments;
4435 * others support only sys_rt_sigprocmask.
4436 */
4437
4438SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
4439 old_sigset_t __user *, oset)
4440{
4441 old_sigset_t old_set, new_set;
4442 sigset_t new_blocked;
4443
4444 old_set = current->blocked.sig[0];
4445
4446 if (nset) {
4447 if (copy_from_user(&new_set, nset, sizeof(*nset)))
4448 return -EFAULT;
4449
4450 new_blocked = current->blocked;
4451
4452 switch (how) {
4453 case SIG_BLOCK:
4454 sigaddsetmask(&new_blocked, new_set);
4455 break;
4456 case SIG_UNBLOCK:
4457 sigdelsetmask(&new_blocked, new_set);
4458 break;
4459 case SIG_SETMASK:
4460 new_blocked.sig[0] = new_set;
4461 break;
4462 default:
4463 return -EINVAL;
4464 }
4465
4466 set_current_blocked(&new_blocked);
4467 }
4468
4469 if (oset) {
4470 if (copy_to_user(oset, &old_set, sizeof(*oset)))
4471 return -EFAULT;
4472 }
4473
4474 return 0;
4475}
4476#endif /* __ARCH_WANT_SYS_SIGPROCMASK */
4477
4478#ifndef CONFIG_ODD_RT_SIGACTION
4479/**
4480 * sys_rt_sigaction - alter an action taken by a process
4481 * @sig: signal to be sent
4482 * @act: new sigaction
4483 * @oact: used to save the previous sigaction
4484 * @sigsetsize: size of sigset_t type
4485 */
4486SYSCALL_DEFINE4(rt_sigaction, int, sig,
4487 const struct sigaction __user *, act,
4488 struct sigaction __user *, oact,
4489 size_t, sigsetsize)
4490{
4491 struct k_sigaction new_sa, old_sa;
4492 int ret;
4493
4494 /* XXX: Don't preclude handling different sized sigset_t's. */
4495 if (sigsetsize != sizeof(sigset_t))
4496 return -EINVAL;
4497
4498 if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
4499 return -EFAULT;
4500
4501 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
4502 if (ret)
4503 return ret;
4504
4505 if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
4506 return -EFAULT;
4507
4508 return 0;
4509}
4510#ifdef CONFIG_COMPAT
4511COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
4512 const struct compat_sigaction __user *, act,
4513 struct compat_sigaction __user *, oact,
4514 compat_size_t, sigsetsize)
4515{
4516 struct k_sigaction new_ka, old_ka;
4517#ifdef __ARCH_HAS_SA_RESTORER
4518 compat_uptr_t restorer;
4519#endif
4520 int ret;
4521
4522 /* XXX: Don't preclude handling different sized sigset_t's. */
4523 if (sigsetsize != sizeof(compat_sigset_t))
4524 return -EINVAL;
4525
4526 if (act) {
4527 compat_uptr_t handler;
4528 ret = get_user(handler, &act->sa_handler);
4529 new_ka.sa.sa_handler = compat_ptr(handler);
4530#ifdef __ARCH_HAS_SA_RESTORER
4531 ret |= get_user(restorer, &act->sa_restorer);
4532 new_ka.sa.sa_restorer = compat_ptr(restorer);
4533#endif
4534 ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask);
4535 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
4536 if (ret)
4537 return -EFAULT;
4538 }
4539
4540 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4541 if (!ret && oact) {
4542 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
4543 &oact->sa_handler);
4544 ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask,
4545 sizeof(oact->sa_mask));
4546 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
4547#ifdef __ARCH_HAS_SA_RESTORER
4548 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4549 &oact->sa_restorer);
4550#endif
4551 }
4552 return ret;
4553}
4554#endif
4555#endif /* !CONFIG_ODD_RT_SIGACTION */
4556
4557#ifdef CONFIG_OLD_SIGACTION
4558SYSCALL_DEFINE3(sigaction, int, sig,
4559 const struct old_sigaction __user *, act,
4560 struct old_sigaction __user *, oact)
4561{
4562 struct k_sigaction new_ka, old_ka;
4563 int ret;
4564
4565 if (act) {
4566 old_sigset_t mask;
4567 if (!access_ok(act, sizeof(*act)) ||
4568 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
4569 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
4570 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4571 __get_user(mask, &act->sa_mask))
4572 return -EFAULT;
4573#ifdef __ARCH_HAS_KA_RESTORER
4574 new_ka.ka_restorer = NULL;
4575#endif
4576 siginitset(&new_ka.sa.sa_mask, mask);
4577 }
4578
4579 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4580
4581 if (!ret && oact) {
4582 if (!access_ok(oact, sizeof(*oact)) ||
4583 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
4584 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
4585 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4586 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4587 return -EFAULT;
4588 }
4589
4590 return ret;
4591}
4592#endif
4593#ifdef CONFIG_COMPAT_OLD_SIGACTION
4594COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
4595 const struct compat_old_sigaction __user *, act,
4596 struct compat_old_sigaction __user *, oact)
4597{
4598 struct k_sigaction new_ka, old_ka;
4599 int ret;
4600 compat_old_sigset_t mask;
4601 compat_uptr_t handler, restorer;
4602
4603 if (act) {
4604 if (!access_ok(act, sizeof(*act)) ||
4605 __get_user(handler, &act->sa_handler) ||
4606 __get_user(restorer, &act->sa_restorer) ||
4607 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4608 __get_user(mask, &act->sa_mask))
4609 return -EFAULT;
4610
4611#ifdef __ARCH_HAS_KA_RESTORER
4612 new_ka.ka_restorer = NULL;
4613#endif
4614 new_ka.sa.sa_handler = compat_ptr(handler);
4615 new_ka.sa.sa_restorer = compat_ptr(restorer);
4616 siginitset(&new_ka.sa.sa_mask, mask);
4617 }
4618
4619 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4620
4621 if (!ret && oact) {
4622 if (!access_ok(oact, sizeof(*oact)) ||
4623 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
4624 &oact->sa_handler) ||
4625 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4626 &oact->sa_restorer) ||
4627 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4628 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4629 return -EFAULT;
4630 }
4631 return ret;
4632}
4633#endif
4634
4635#ifdef CONFIG_SGETMASK_SYSCALL
4636
4637/*
4638 * For backwards compatibility. Functionality superseded by sigprocmask.
4639 */
4640SYSCALL_DEFINE0(sgetmask)
4641{
4642 /* SMP safe */
4643 return current->blocked.sig[0];
4644}
4645
4646SYSCALL_DEFINE1(ssetmask, int, newmask)
4647{
4648 int old = current->blocked.sig[0];
4649 sigset_t newset;
4650
4651 siginitset(&newset, newmask);
4652 set_current_blocked(&newset);
4653
4654 return old;
4655}
4656#endif /* CONFIG_SGETMASK_SYSCALL */
4657
4658#ifdef __ARCH_WANT_SYS_SIGNAL
4659/*
4660 * For backwards compatibility. Functionality superseded by sigaction.
4661 */
4662SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
4663{
4664 struct k_sigaction new_sa, old_sa;
4665 int ret;
4666
4667 new_sa.sa.sa_handler = handler;
4668 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
4669 sigemptyset(&new_sa.sa.sa_mask);
4670
4671 ret = do_sigaction(sig, &new_sa, &old_sa);
4672
4673 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
4674}
4675#endif /* __ARCH_WANT_SYS_SIGNAL */
4676
4677#ifdef __ARCH_WANT_SYS_PAUSE
4678
4679SYSCALL_DEFINE0(pause)
4680{
4681 while (!signal_pending(current)) {
4682 __set_current_state(TASK_INTERRUPTIBLE);
4683 schedule();
4684 }
4685 return -ERESTARTNOHAND;
4686}
4687
4688#endif
4689
4690static int sigsuspend(sigset_t *set)
4691{
4692 current->saved_sigmask = current->blocked;
4693 set_current_blocked(set);
4694
4695 while (!signal_pending(current)) {
4696 __set_current_state(TASK_INTERRUPTIBLE);
4697 schedule();
4698 }
4699 set_restore_sigmask();
4700 return -ERESTARTNOHAND;
4701}
4702
4703/**
4704 * sys_rt_sigsuspend - replace the signal mask for a value with the
4705 * @unewset value until a signal is received
4706 * @unewset: new signal mask value
4707 * @sigsetsize: size of sigset_t type
4708 */
4709SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
4710{
4711 sigset_t newset;
4712
4713 /* XXX: Don't preclude handling different sized sigset_t's. */
4714 if (sigsetsize != sizeof(sigset_t))
4715 return -EINVAL;
4716
4717 if (copy_from_user(&newset, unewset, sizeof(newset)))
4718 return -EFAULT;
4719 return sigsuspend(&newset);
4720}
4721
4722#ifdef CONFIG_COMPAT
4723COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
4724{
4725 sigset_t newset;
4726
4727 /* XXX: Don't preclude handling different sized sigset_t's. */
4728 if (sigsetsize != sizeof(sigset_t))
4729 return -EINVAL;
4730
4731 if (get_compat_sigset(&newset, unewset))
4732 return -EFAULT;
4733 return sigsuspend(&newset);
4734}
4735#endif
4736
4737#ifdef CONFIG_OLD_SIGSUSPEND
4738SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
4739{
4740 sigset_t blocked;
4741 siginitset(&blocked, mask);
4742 return sigsuspend(&blocked);
4743}
4744#endif
4745#ifdef CONFIG_OLD_SIGSUSPEND3
4746SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
4747{
4748 sigset_t blocked;
4749 siginitset(&blocked, mask);
4750 return sigsuspend(&blocked);
4751}
4752#endif
4753
4754__weak const char *arch_vma_name(struct vm_area_struct *vma)
4755{
4756 return NULL;
4757}
4758
4759static inline void siginfo_buildtime_checks(void)
4760{
4761 BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
4762
4763 /* Verify the offsets in the two siginfos match */
4764#define CHECK_OFFSET(field) \
4765 BUILD_BUG_ON(offsetof(siginfo_t, field) != offsetof(kernel_siginfo_t, field))
4766
4767 /* kill */
4768 CHECK_OFFSET(si_pid);
4769 CHECK_OFFSET(si_uid);
4770
4771 /* timer */
4772 CHECK_OFFSET(si_tid);
4773 CHECK_OFFSET(si_overrun);
4774 CHECK_OFFSET(si_value);
4775
4776 /* rt */
4777 CHECK_OFFSET(si_pid);
4778 CHECK_OFFSET(si_uid);
4779 CHECK_OFFSET(si_value);
4780
4781 /* sigchld */
4782 CHECK_OFFSET(si_pid);
4783 CHECK_OFFSET(si_uid);
4784 CHECK_OFFSET(si_status);
4785 CHECK_OFFSET(si_utime);
4786 CHECK_OFFSET(si_stime);
4787
4788 /* sigfault */
4789 CHECK_OFFSET(si_addr);
4790 CHECK_OFFSET(si_trapno);
4791 CHECK_OFFSET(si_addr_lsb);
4792 CHECK_OFFSET(si_lower);
4793 CHECK_OFFSET(si_upper);
4794 CHECK_OFFSET(si_pkey);
4795 CHECK_OFFSET(si_perf_data);
4796 CHECK_OFFSET(si_perf_type);
4797 CHECK_OFFSET(si_perf_flags);
4798
4799 /* sigpoll */
4800 CHECK_OFFSET(si_band);
4801 CHECK_OFFSET(si_fd);
4802
4803 /* sigsys */
4804 CHECK_OFFSET(si_call_addr);
4805 CHECK_OFFSET(si_syscall);
4806 CHECK_OFFSET(si_arch);
4807#undef CHECK_OFFSET
4808
4809 /* usb asyncio */
4810 BUILD_BUG_ON(offsetof(struct siginfo, si_pid) !=
4811 offsetof(struct siginfo, si_addr));
4812 if (sizeof(int) == sizeof(void __user *)) {
4813 BUILD_BUG_ON(sizeof_field(struct siginfo, si_pid) !=
4814 sizeof(void __user *));
4815 } else {
4816 BUILD_BUG_ON((sizeof_field(struct siginfo, si_pid) +
4817 sizeof_field(struct siginfo, si_uid)) !=
4818 sizeof(void __user *));
4819 BUILD_BUG_ON(offsetofend(struct siginfo, si_pid) !=
4820 offsetof(struct siginfo, si_uid));
4821 }
4822#ifdef CONFIG_COMPAT
4823 BUILD_BUG_ON(offsetof(struct compat_siginfo, si_pid) !=
4824 offsetof(struct compat_siginfo, si_addr));
4825 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4826 sizeof(compat_uptr_t));
4827 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4828 sizeof_field(struct siginfo, si_pid));
4829#endif
4830}
4831
4832#if defined(CONFIG_SYSCTL)
4833static struct ctl_table signal_debug_table[] = {
4834#ifdef CONFIG_SYSCTL_EXCEPTION_TRACE
4835 {
4836 .procname = "exception-trace",
4837 .data = &show_unhandled_signals,
4838 .maxlen = sizeof(int),
4839 .mode = 0644,
4840 .proc_handler = proc_dointvec
4841 },
4842#endif
4843 { }
4844};
4845
4846static int __init init_signal_sysctls(void)
4847{
4848 register_sysctl_init("debug", signal_debug_table);
4849 return 0;
4850}
4851early_initcall(init_signal_sysctls);
4852#endif /* CONFIG_SYSCTL */
4853
4854void __init signals_init(void)
4855{
4856 siginfo_buildtime_checks();
4857
4858 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC | SLAB_ACCOUNT);
4859}
4860
4861#ifdef CONFIG_KGDB_KDB
4862#include <linux/kdb.h>
4863/*
4864 * kdb_send_sig - Allows kdb to send signals without exposing
4865 * signal internals. This function checks if the required locks are
4866 * available before calling the main signal code, to avoid kdb
4867 * deadlocks.
4868 */
4869void kdb_send_sig(struct task_struct *t, int sig)
4870{
4871 static struct task_struct *kdb_prev_t;
4872 int new_t, ret;
4873 if (!spin_trylock(&t->sighand->siglock)) {
4874 kdb_printf("Can't do kill command now.\n"
4875 "The sigmask lock is held somewhere else in "
4876 "kernel, try again later\n");
4877 return;
4878 }
4879 new_t = kdb_prev_t != t;
4880 kdb_prev_t = t;
4881 if (!task_is_running(t) && new_t) {
4882 spin_unlock(&t->sighand->siglock);
4883 kdb_printf("Process is not RUNNING, sending a signal from "
4884 "kdb risks deadlock\n"
4885 "on the run queue locks. "
4886 "The signal has _not_ been sent.\n"
4887 "Reissue the kill command if you want to risk "
4888 "the deadlock.\n");
4889 return;
4890 }
4891 ret = send_signal_locked(sig, SEND_SIG_PRIV, t, PIDTYPE_PID);
4892 spin_unlock(&t->sighand->siglock);
4893 if (ret)
4894 kdb_printf("Fail to deliver Signal %d to process %d.\n",
4895 sig, t->pid);
4896 else
4897 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
4898}
4899#endif /* CONFIG_KGDB_KDB */