Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/kernel/signal.c
4 *
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 *
7 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 *
9 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
10 * Changes to use preallocated sigqueue structures
11 * to allow signals to be sent reliably.
12 */
13
14#include <linux/slab.h>
15#include <linux/export.h>
16#include <linux/init.h>
17#include <linux/sched/mm.h>
18#include <linux/sched/user.h>
19#include <linux/sched/debug.h>
20#include <linux/sched/task.h>
21#include <linux/sched/task_stack.h>
22#include <linux/sched/cputime.h>
23#include <linux/file.h>
24#include <linux/fs.h>
25#include <linux/proc_fs.h>
26#include <linux/tty.h>
27#include <linux/binfmts.h>
28#include <linux/coredump.h>
29#include <linux/security.h>
30#include <linux/syscalls.h>
31#include <linux/ptrace.h>
32#include <linux/signal.h>
33#include <linux/signalfd.h>
34#include <linux/ratelimit.h>
35#include <linux/task_work.h>
36#include <linux/capability.h>
37#include <linux/freezer.h>
38#include <linux/pid_namespace.h>
39#include <linux/nsproxy.h>
40#include <linux/user_namespace.h>
41#include <linux/uprobes.h>
42#include <linux/compat.h>
43#include <linux/cn_proc.h>
44#include <linux/compiler.h>
45#include <linux/posix-timers.h>
46#include <linux/cgroup.h>
47#include <linux/audit.h>
48
49#define CREATE_TRACE_POINTS
50#include <trace/events/signal.h>
51
52#include <asm/param.h>
53#include <linux/uaccess.h>
54#include <asm/unistd.h>
55#include <asm/siginfo.h>
56#include <asm/cacheflush.h>
57#include <asm/syscall.h> /* for syscall_get_* */
58
59/*
60 * SLAB caches for signal bits.
61 */
62
63static struct kmem_cache *sigqueue_cachep;
64
65int print_fatal_signals __read_mostly;
66
67static void __user *sig_handler(struct task_struct *t, int sig)
68{
69 return t->sighand->action[sig - 1].sa.sa_handler;
70}
71
72static inline bool sig_handler_ignored(void __user *handler, int sig)
73{
74 /* Is it explicitly or implicitly ignored? */
75 return handler == SIG_IGN ||
76 (handler == SIG_DFL && sig_kernel_ignore(sig));
77}
78
79static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
80{
81 void __user *handler;
82
83 handler = sig_handler(t, sig);
84
85 /* SIGKILL and SIGSTOP may not be sent to the global init */
86 if (unlikely(is_global_init(t) && sig_kernel_only(sig)))
87 return true;
88
89 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
90 handler == SIG_DFL && !(force && sig_kernel_only(sig)))
91 return true;
92
93 /* Only allow kernel generated signals to this kthread */
94 if (unlikely((t->flags & PF_KTHREAD) &&
95 (handler == SIG_KTHREAD_KERNEL) && !force))
96 return true;
97
98 return sig_handler_ignored(handler, sig);
99}
100
101static bool sig_ignored(struct task_struct *t, int sig, bool force)
102{
103 /*
104 * Blocked signals are never ignored, since the
105 * signal handler may change by the time it is
106 * unblocked.
107 */
108 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
109 return false;
110
111 /*
112 * Tracers may want to know about even ignored signal unless it
113 * is SIGKILL which can't be reported anyway but can be ignored
114 * by SIGNAL_UNKILLABLE task.
115 */
116 if (t->ptrace && sig != SIGKILL)
117 return false;
118
119 return sig_task_ignored(t, sig, force);
120}
121
122/*
123 * Re-calculate pending state from the set of locally pending
124 * signals, globally pending signals, and blocked signals.
125 */
126static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked)
127{
128 unsigned long ready;
129 long i;
130
131 switch (_NSIG_WORDS) {
132 default:
133 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
134 ready |= signal->sig[i] &~ blocked->sig[i];
135 break;
136
137 case 4: ready = signal->sig[3] &~ blocked->sig[3];
138 ready |= signal->sig[2] &~ blocked->sig[2];
139 ready |= signal->sig[1] &~ blocked->sig[1];
140 ready |= signal->sig[0] &~ blocked->sig[0];
141 break;
142
143 case 2: ready = signal->sig[1] &~ blocked->sig[1];
144 ready |= signal->sig[0] &~ blocked->sig[0];
145 break;
146
147 case 1: ready = signal->sig[0] &~ blocked->sig[0];
148 }
149 return ready != 0;
150}
151
152#define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
153
154static bool recalc_sigpending_tsk(struct task_struct *t)
155{
156 if ((t->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) ||
157 PENDING(&t->pending, &t->blocked) ||
158 PENDING(&t->signal->shared_pending, &t->blocked) ||
159 cgroup_task_frozen(t)) {
160 set_tsk_thread_flag(t, TIF_SIGPENDING);
161 return true;
162 }
163
164 /*
165 * We must never clear the flag in another thread, or in current
166 * when it's possible the current syscall is returning -ERESTART*.
167 * So we don't clear it here, and only callers who know they should do.
168 */
169 return false;
170}
171
172/*
173 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
174 * This is superfluous when called on current, the wakeup is a harmless no-op.
175 */
176void recalc_sigpending_and_wake(struct task_struct *t)
177{
178 if (recalc_sigpending_tsk(t))
179 signal_wake_up(t, 0);
180}
181
182void recalc_sigpending(void)
183{
184 if (!recalc_sigpending_tsk(current) && !freezing(current))
185 clear_thread_flag(TIF_SIGPENDING);
186
187}
188EXPORT_SYMBOL(recalc_sigpending);
189
190void calculate_sigpending(void)
191{
192 /* Have any signals or users of TIF_SIGPENDING been delayed
193 * until after fork?
194 */
195 spin_lock_irq(¤t->sighand->siglock);
196 set_tsk_thread_flag(current, TIF_SIGPENDING);
197 recalc_sigpending();
198 spin_unlock_irq(¤t->sighand->siglock);
199}
200
201/* Given the mask, find the first available signal that should be serviced. */
202
203#define SYNCHRONOUS_MASK \
204 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
205 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
206
207int next_signal(struct sigpending *pending, sigset_t *mask)
208{
209 unsigned long i, *s, *m, x;
210 int sig = 0;
211
212 s = pending->signal.sig;
213 m = mask->sig;
214
215 /*
216 * Handle the first word specially: it contains the
217 * synchronous signals that need to be dequeued first.
218 */
219 x = *s &~ *m;
220 if (x) {
221 if (x & SYNCHRONOUS_MASK)
222 x &= SYNCHRONOUS_MASK;
223 sig = ffz(~x) + 1;
224 return sig;
225 }
226
227 switch (_NSIG_WORDS) {
228 default:
229 for (i = 1; i < _NSIG_WORDS; ++i) {
230 x = *++s &~ *++m;
231 if (!x)
232 continue;
233 sig = ffz(~x) + i*_NSIG_BPW + 1;
234 break;
235 }
236 break;
237
238 case 2:
239 x = s[1] &~ m[1];
240 if (!x)
241 break;
242 sig = ffz(~x) + _NSIG_BPW + 1;
243 break;
244
245 case 1:
246 /* Nothing to do */
247 break;
248 }
249
250 return sig;
251}
252
253static inline void print_dropped_signal(int sig)
254{
255 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
256
257 if (!print_fatal_signals)
258 return;
259
260 if (!__ratelimit(&ratelimit_state))
261 return;
262
263 pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
264 current->comm, current->pid, sig);
265}
266
267/**
268 * task_set_jobctl_pending - set jobctl pending bits
269 * @task: target task
270 * @mask: pending bits to set
271 *
272 * Clear @mask from @task->jobctl. @mask must be subset of
273 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
274 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
275 * cleared. If @task is already being killed or exiting, this function
276 * becomes noop.
277 *
278 * CONTEXT:
279 * Must be called with @task->sighand->siglock held.
280 *
281 * RETURNS:
282 * %true if @mask is set, %false if made noop because @task was dying.
283 */
284bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
285{
286 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
287 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
288 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
289
290 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
291 return false;
292
293 if (mask & JOBCTL_STOP_SIGMASK)
294 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
295
296 task->jobctl |= mask;
297 return true;
298}
299
300/**
301 * task_clear_jobctl_trapping - clear jobctl trapping bit
302 * @task: target task
303 *
304 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
305 * Clear it and wake up the ptracer. Note that we don't need any further
306 * locking. @task->siglock guarantees that @task->parent points to the
307 * ptracer.
308 *
309 * CONTEXT:
310 * Must be called with @task->sighand->siglock held.
311 */
312void task_clear_jobctl_trapping(struct task_struct *task)
313{
314 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
315 task->jobctl &= ~JOBCTL_TRAPPING;
316 smp_mb(); /* advised by wake_up_bit() */
317 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
318 }
319}
320
321/**
322 * task_clear_jobctl_pending - clear jobctl pending bits
323 * @task: target task
324 * @mask: pending bits to clear
325 *
326 * Clear @mask from @task->jobctl. @mask must be subset of
327 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
328 * STOP bits are cleared together.
329 *
330 * If clearing of @mask leaves no stop or trap pending, this function calls
331 * task_clear_jobctl_trapping().
332 *
333 * CONTEXT:
334 * Must be called with @task->sighand->siglock held.
335 */
336void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
337{
338 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
339
340 if (mask & JOBCTL_STOP_PENDING)
341 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
342
343 task->jobctl &= ~mask;
344
345 if (!(task->jobctl & JOBCTL_PENDING_MASK))
346 task_clear_jobctl_trapping(task);
347}
348
349/**
350 * task_participate_group_stop - participate in a group stop
351 * @task: task participating in a group stop
352 *
353 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
354 * Group stop states are cleared and the group stop count is consumed if
355 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
356 * stop, the appropriate `SIGNAL_*` flags are set.
357 *
358 * CONTEXT:
359 * Must be called with @task->sighand->siglock held.
360 *
361 * RETURNS:
362 * %true if group stop completion should be notified to the parent, %false
363 * otherwise.
364 */
365static bool task_participate_group_stop(struct task_struct *task)
366{
367 struct signal_struct *sig = task->signal;
368 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
369
370 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
371
372 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
373
374 if (!consume)
375 return false;
376
377 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
378 sig->group_stop_count--;
379
380 /*
381 * Tell the caller to notify completion iff we are entering into a
382 * fresh group stop. Read comment in do_signal_stop() for details.
383 */
384 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
385 signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
386 return true;
387 }
388 return false;
389}
390
391void task_join_group_stop(struct task_struct *task)
392{
393 unsigned long mask = current->jobctl & JOBCTL_STOP_SIGMASK;
394 struct signal_struct *sig = current->signal;
395
396 if (sig->group_stop_count) {
397 sig->group_stop_count++;
398 mask |= JOBCTL_STOP_CONSUME;
399 } else if (!(sig->flags & SIGNAL_STOP_STOPPED))
400 return;
401
402 /* Have the new thread join an on-going signal group stop */
403 task_set_jobctl_pending(task, mask | JOBCTL_STOP_PENDING);
404}
405
406/*
407 * allocate a new signal queue record
408 * - this may be called without locks if and only if t == current, otherwise an
409 * appropriate lock must be held to stop the target task from exiting
410 */
411static struct sigqueue *
412__sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags,
413 int override_rlimit, const unsigned int sigqueue_flags)
414{
415 struct sigqueue *q = NULL;
416 struct ucounts *ucounts = NULL;
417 long sigpending;
418
419 /*
420 * Protect access to @t credentials. This can go away when all
421 * callers hold rcu read lock.
422 *
423 * NOTE! A pending signal will hold on to the user refcount,
424 * and we get/put the refcount only when the sigpending count
425 * changes from/to zero.
426 */
427 rcu_read_lock();
428 ucounts = task_ucounts(t);
429 sigpending = inc_rlimit_get_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING);
430 rcu_read_unlock();
431 if (!sigpending)
432 return NULL;
433
434 if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {
435 q = kmem_cache_alloc(sigqueue_cachep, gfp_flags);
436 } else {
437 print_dropped_signal(sig);
438 }
439
440 if (unlikely(q == NULL)) {
441 dec_rlimit_put_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING);
442 } else {
443 INIT_LIST_HEAD(&q->list);
444 q->flags = sigqueue_flags;
445 q->ucounts = ucounts;
446 }
447 return q;
448}
449
450static void __sigqueue_free(struct sigqueue *q)
451{
452 if (q->flags & SIGQUEUE_PREALLOC)
453 return;
454 if (q->ucounts) {
455 dec_rlimit_put_ucounts(q->ucounts, UCOUNT_RLIMIT_SIGPENDING);
456 q->ucounts = NULL;
457 }
458 kmem_cache_free(sigqueue_cachep, q);
459}
460
461void flush_sigqueue(struct sigpending *queue)
462{
463 struct sigqueue *q;
464
465 sigemptyset(&queue->signal);
466 while (!list_empty(&queue->list)) {
467 q = list_entry(queue->list.next, struct sigqueue , list);
468 list_del_init(&q->list);
469 __sigqueue_free(q);
470 }
471}
472
473/*
474 * Flush all pending signals for this kthread.
475 */
476void flush_signals(struct task_struct *t)
477{
478 unsigned long flags;
479
480 spin_lock_irqsave(&t->sighand->siglock, flags);
481 clear_tsk_thread_flag(t, TIF_SIGPENDING);
482 flush_sigqueue(&t->pending);
483 flush_sigqueue(&t->signal->shared_pending);
484 spin_unlock_irqrestore(&t->sighand->siglock, flags);
485}
486EXPORT_SYMBOL(flush_signals);
487
488#ifdef CONFIG_POSIX_TIMERS
489static void __flush_itimer_signals(struct sigpending *pending)
490{
491 sigset_t signal, retain;
492 struct sigqueue *q, *n;
493
494 signal = pending->signal;
495 sigemptyset(&retain);
496
497 list_for_each_entry_safe(q, n, &pending->list, list) {
498 int sig = q->info.si_signo;
499
500 if (likely(q->info.si_code != SI_TIMER)) {
501 sigaddset(&retain, sig);
502 } else {
503 sigdelset(&signal, sig);
504 list_del_init(&q->list);
505 __sigqueue_free(q);
506 }
507 }
508
509 sigorsets(&pending->signal, &signal, &retain);
510}
511
512void flush_itimer_signals(void)
513{
514 struct task_struct *tsk = current;
515 unsigned long flags;
516
517 spin_lock_irqsave(&tsk->sighand->siglock, flags);
518 __flush_itimer_signals(&tsk->pending);
519 __flush_itimer_signals(&tsk->signal->shared_pending);
520 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
521}
522#endif
523
524void ignore_signals(struct task_struct *t)
525{
526 int i;
527
528 for (i = 0; i < _NSIG; ++i)
529 t->sighand->action[i].sa.sa_handler = SIG_IGN;
530
531 flush_signals(t);
532}
533
534/*
535 * Flush all handlers for a task.
536 */
537
538void
539flush_signal_handlers(struct task_struct *t, int force_default)
540{
541 int i;
542 struct k_sigaction *ka = &t->sighand->action[0];
543 for (i = _NSIG ; i != 0 ; i--) {
544 if (force_default || ka->sa.sa_handler != SIG_IGN)
545 ka->sa.sa_handler = SIG_DFL;
546 ka->sa.sa_flags = 0;
547#ifdef __ARCH_HAS_SA_RESTORER
548 ka->sa.sa_restorer = NULL;
549#endif
550 sigemptyset(&ka->sa.sa_mask);
551 ka++;
552 }
553}
554
555bool unhandled_signal(struct task_struct *tsk, int sig)
556{
557 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
558 if (is_global_init(tsk))
559 return true;
560
561 if (handler != SIG_IGN && handler != SIG_DFL)
562 return false;
563
564 /* if ptraced, let the tracer determine */
565 return !tsk->ptrace;
566}
567
568static void collect_signal(int sig, struct sigpending *list, kernel_siginfo_t *info,
569 bool *resched_timer)
570{
571 struct sigqueue *q, *first = NULL;
572
573 /*
574 * Collect the siginfo appropriate to this signal. Check if
575 * there is another siginfo for the same signal.
576 */
577 list_for_each_entry(q, &list->list, list) {
578 if (q->info.si_signo == sig) {
579 if (first)
580 goto still_pending;
581 first = q;
582 }
583 }
584
585 sigdelset(&list->signal, sig);
586
587 if (first) {
588still_pending:
589 list_del_init(&first->list);
590 copy_siginfo(info, &first->info);
591
592 *resched_timer =
593 (first->flags & SIGQUEUE_PREALLOC) &&
594 (info->si_code == SI_TIMER) &&
595 (info->si_sys_private);
596
597 __sigqueue_free(first);
598 } else {
599 /*
600 * Ok, it wasn't in the queue. This must be
601 * a fast-pathed signal or we must have been
602 * out of queue space. So zero out the info.
603 */
604 clear_siginfo(info);
605 info->si_signo = sig;
606 info->si_errno = 0;
607 info->si_code = SI_USER;
608 info->si_pid = 0;
609 info->si_uid = 0;
610 }
611}
612
613static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
614 kernel_siginfo_t *info, bool *resched_timer)
615{
616 int sig = next_signal(pending, mask);
617
618 if (sig)
619 collect_signal(sig, pending, info, resched_timer);
620 return sig;
621}
622
623/*
624 * Dequeue a signal and return the element to the caller, which is
625 * expected to free it.
626 *
627 * All callers have to hold the siglock.
628 */
629int dequeue_signal(struct task_struct *tsk, sigset_t *mask,
630 kernel_siginfo_t *info, enum pid_type *type)
631{
632 bool resched_timer = false;
633 int signr;
634
635 /* We only dequeue private signals from ourselves, we don't let
636 * signalfd steal them
637 */
638 *type = PIDTYPE_PID;
639 signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
640 if (!signr) {
641 *type = PIDTYPE_TGID;
642 signr = __dequeue_signal(&tsk->signal->shared_pending,
643 mask, info, &resched_timer);
644#ifdef CONFIG_POSIX_TIMERS
645 /*
646 * itimer signal ?
647 *
648 * itimers are process shared and we restart periodic
649 * itimers in the signal delivery path to prevent DoS
650 * attacks in the high resolution timer case. This is
651 * compliant with the old way of self-restarting
652 * itimers, as the SIGALRM is a legacy signal and only
653 * queued once. Changing the restart behaviour to
654 * restart the timer in the signal dequeue path is
655 * reducing the timer noise on heavy loaded !highres
656 * systems too.
657 */
658 if (unlikely(signr == SIGALRM)) {
659 struct hrtimer *tmr = &tsk->signal->real_timer;
660
661 if (!hrtimer_is_queued(tmr) &&
662 tsk->signal->it_real_incr != 0) {
663 hrtimer_forward(tmr, tmr->base->get_time(),
664 tsk->signal->it_real_incr);
665 hrtimer_restart(tmr);
666 }
667 }
668#endif
669 }
670
671 recalc_sigpending();
672 if (!signr)
673 return 0;
674
675 if (unlikely(sig_kernel_stop(signr))) {
676 /*
677 * Set a marker that we have dequeued a stop signal. Our
678 * caller might release the siglock and then the pending
679 * stop signal it is about to process is no longer in the
680 * pending bitmasks, but must still be cleared by a SIGCONT
681 * (and overruled by a SIGKILL). So those cases clear this
682 * shared flag after we've set it. Note that this flag may
683 * remain set after the signal we return is ignored or
684 * handled. That doesn't matter because its only purpose
685 * is to alert stop-signal processing code when another
686 * processor has come along and cleared the flag.
687 */
688 current->jobctl |= JOBCTL_STOP_DEQUEUED;
689 }
690#ifdef CONFIG_POSIX_TIMERS
691 if (resched_timer) {
692 /*
693 * Release the siglock to ensure proper locking order
694 * of timer locks outside of siglocks. Note, we leave
695 * irqs disabled here, since the posix-timers code is
696 * about to disable them again anyway.
697 */
698 spin_unlock(&tsk->sighand->siglock);
699 posixtimer_rearm(info);
700 spin_lock(&tsk->sighand->siglock);
701
702 /* Don't expose the si_sys_private value to userspace */
703 info->si_sys_private = 0;
704 }
705#endif
706 return signr;
707}
708EXPORT_SYMBOL_GPL(dequeue_signal);
709
710static int dequeue_synchronous_signal(kernel_siginfo_t *info)
711{
712 struct task_struct *tsk = current;
713 struct sigpending *pending = &tsk->pending;
714 struct sigqueue *q, *sync = NULL;
715
716 /*
717 * Might a synchronous signal be in the queue?
718 */
719 if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
720 return 0;
721
722 /*
723 * Return the first synchronous signal in the queue.
724 */
725 list_for_each_entry(q, &pending->list, list) {
726 /* Synchronous signals have a positive si_code */
727 if ((q->info.si_code > SI_USER) &&
728 (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
729 sync = q;
730 goto next;
731 }
732 }
733 return 0;
734next:
735 /*
736 * Check if there is another siginfo for the same signal.
737 */
738 list_for_each_entry_continue(q, &pending->list, list) {
739 if (q->info.si_signo == sync->info.si_signo)
740 goto still_pending;
741 }
742
743 sigdelset(&pending->signal, sync->info.si_signo);
744 recalc_sigpending();
745still_pending:
746 list_del_init(&sync->list);
747 copy_siginfo(info, &sync->info);
748 __sigqueue_free(sync);
749 return info->si_signo;
750}
751
752/*
753 * Tell a process that it has a new active signal..
754 *
755 * NOTE! we rely on the previous spin_lock to
756 * lock interrupts for us! We can only be called with
757 * "siglock" held, and the local interrupt must
758 * have been disabled when that got acquired!
759 *
760 * No need to set need_resched since signal event passing
761 * goes through ->blocked
762 */
763void signal_wake_up_state(struct task_struct *t, unsigned int state)
764{
765 lockdep_assert_held(&t->sighand->siglock);
766
767 set_tsk_thread_flag(t, TIF_SIGPENDING);
768
769 /*
770 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
771 * case. We don't check t->state here because there is a race with it
772 * executing another processor and just now entering stopped state.
773 * By using wake_up_state, we ensure the process will wake up and
774 * handle its death signal.
775 */
776 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
777 kick_process(t);
778}
779
780/*
781 * Remove signals in mask from the pending set and queue.
782 * Returns 1 if any signals were found.
783 *
784 * All callers must be holding the siglock.
785 */
786static void flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
787{
788 struct sigqueue *q, *n;
789 sigset_t m;
790
791 sigandsets(&m, mask, &s->signal);
792 if (sigisemptyset(&m))
793 return;
794
795 sigandnsets(&s->signal, &s->signal, mask);
796 list_for_each_entry_safe(q, n, &s->list, list) {
797 if (sigismember(mask, q->info.si_signo)) {
798 list_del_init(&q->list);
799 __sigqueue_free(q);
800 }
801 }
802}
803
804static inline int is_si_special(const struct kernel_siginfo *info)
805{
806 return info <= SEND_SIG_PRIV;
807}
808
809static inline bool si_fromuser(const struct kernel_siginfo *info)
810{
811 return info == SEND_SIG_NOINFO ||
812 (!is_si_special(info) && SI_FROMUSER(info));
813}
814
815/*
816 * called with RCU read lock from check_kill_permission()
817 */
818static bool kill_ok_by_cred(struct task_struct *t)
819{
820 const struct cred *cred = current_cred();
821 const struct cred *tcred = __task_cred(t);
822
823 return uid_eq(cred->euid, tcred->suid) ||
824 uid_eq(cred->euid, tcred->uid) ||
825 uid_eq(cred->uid, tcred->suid) ||
826 uid_eq(cred->uid, tcred->uid) ||
827 ns_capable(tcred->user_ns, CAP_KILL);
828}
829
830/*
831 * Bad permissions for sending the signal
832 * - the caller must hold the RCU read lock
833 */
834static int check_kill_permission(int sig, struct kernel_siginfo *info,
835 struct task_struct *t)
836{
837 struct pid *sid;
838 int error;
839
840 if (!valid_signal(sig))
841 return -EINVAL;
842
843 if (!si_fromuser(info))
844 return 0;
845
846 error = audit_signal_info(sig, t); /* Let audit system see the signal */
847 if (error)
848 return error;
849
850 if (!same_thread_group(current, t) &&
851 !kill_ok_by_cred(t)) {
852 switch (sig) {
853 case SIGCONT:
854 sid = task_session(t);
855 /*
856 * We don't return the error if sid == NULL. The
857 * task was unhashed, the caller must notice this.
858 */
859 if (!sid || sid == task_session(current))
860 break;
861 fallthrough;
862 default:
863 return -EPERM;
864 }
865 }
866
867 return security_task_kill(t, info, sig, NULL);
868}
869
870/**
871 * ptrace_trap_notify - schedule trap to notify ptracer
872 * @t: tracee wanting to notify tracer
873 *
874 * This function schedules sticky ptrace trap which is cleared on the next
875 * TRAP_STOP to notify ptracer of an event. @t must have been seized by
876 * ptracer.
877 *
878 * If @t is running, STOP trap will be taken. If trapped for STOP and
879 * ptracer is listening for events, tracee is woken up so that it can
880 * re-trap for the new event. If trapped otherwise, STOP trap will be
881 * eventually taken without returning to userland after the existing traps
882 * are finished by PTRACE_CONT.
883 *
884 * CONTEXT:
885 * Must be called with @task->sighand->siglock held.
886 */
887static void ptrace_trap_notify(struct task_struct *t)
888{
889 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
890 lockdep_assert_held(&t->sighand->siglock);
891
892 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
893 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
894}
895
896/*
897 * Handle magic process-wide effects of stop/continue signals. Unlike
898 * the signal actions, these happen immediately at signal-generation
899 * time regardless of blocking, ignoring, or handling. This does the
900 * actual continuing for SIGCONT, but not the actual stopping for stop
901 * signals. The process stop is done as a signal action for SIG_DFL.
902 *
903 * Returns true if the signal should be actually delivered, otherwise
904 * it should be dropped.
905 */
906static bool prepare_signal(int sig, struct task_struct *p, bool force)
907{
908 struct signal_struct *signal = p->signal;
909 struct task_struct *t;
910 sigset_t flush;
911
912 if (signal->flags & SIGNAL_GROUP_EXIT) {
913 if (signal->core_state)
914 return sig == SIGKILL;
915 /*
916 * The process is in the middle of dying, drop the signal.
917 */
918 return false;
919 } else if (sig_kernel_stop(sig)) {
920 /*
921 * This is a stop signal. Remove SIGCONT from all queues.
922 */
923 siginitset(&flush, sigmask(SIGCONT));
924 flush_sigqueue_mask(&flush, &signal->shared_pending);
925 for_each_thread(p, t)
926 flush_sigqueue_mask(&flush, &t->pending);
927 } else if (sig == SIGCONT) {
928 unsigned int why;
929 /*
930 * Remove all stop signals from all queues, wake all threads.
931 */
932 siginitset(&flush, SIG_KERNEL_STOP_MASK);
933 flush_sigqueue_mask(&flush, &signal->shared_pending);
934 for_each_thread(p, t) {
935 flush_sigqueue_mask(&flush, &t->pending);
936 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
937 if (likely(!(t->ptrace & PT_SEIZED))) {
938 t->jobctl &= ~JOBCTL_STOPPED;
939 wake_up_state(t, __TASK_STOPPED);
940 } else
941 ptrace_trap_notify(t);
942 }
943
944 /*
945 * Notify the parent with CLD_CONTINUED if we were stopped.
946 *
947 * If we were in the middle of a group stop, we pretend it
948 * was already finished, and then continued. Since SIGCHLD
949 * doesn't queue we report only CLD_STOPPED, as if the next
950 * CLD_CONTINUED was dropped.
951 */
952 why = 0;
953 if (signal->flags & SIGNAL_STOP_STOPPED)
954 why |= SIGNAL_CLD_CONTINUED;
955 else if (signal->group_stop_count)
956 why |= SIGNAL_CLD_STOPPED;
957
958 if (why) {
959 /*
960 * The first thread which returns from do_signal_stop()
961 * will take ->siglock, notice SIGNAL_CLD_MASK, and
962 * notify its parent. See get_signal().
963 */
964 signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
965 signal->group_stop_count = 0;
966 signal->group_exit_code = 0;
967 }
968 }
969
970 return !sig_ignored(p, sig, force);
971}
972
973/*
974 * Test if P wants to take SIG. After we've checked all threads with this,
975 * it's equivalent to finding no threads not blocking SIG. Any threads not
976 * blocking SIG were ruled out because they are not running and already
977 * have pending signals. Such threads will dequeue from the shared queue
978 * as soon as they're available, so putting the signal on the shared queue
979 * will be equivalent to sending it to one such thread.
980 */
981static inline bool wants_signal(int sig, struct task_struct *p)
982{
983 if (sigismember(&p->blocked, sig))
984 return false;
985
986 if (p->flags & PF_EXITING)
987 return false;
988
989 if (sig == SIGKILL)
990 return true;
991
992 if (task_is_stopped_or_traced(p))
993 return false;
994
995 return task_curr(p) || !task_sigpending(p);
996}
997
998static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
999{
1000 struct signal_struct *signal = p->signal;
1001 struct task_struct *t;
1002
1003 /*
1004 * Now find a thread we can wake up to take the signal off the queue.
1005 *
1006 * If the main thread wants the signal, it gets first crack.
1007 * Probably the least surprising to the average bear.
1008 */
1009 if (wants_signal(sig, p))
1010 t = p;
1011 else if ((type == PIDTYPE_PID) || thread_group_empty(p))
1012 /*
1013 * There is just one thread and it does not need to be woken.
1014 * It will dequeue unblocked signals before it runs again.
1015 */
1016 return;
1017 else {
1018 /*
1019 * Otherwise try to find a suitable thread.
1020 */
1021 t = signal->curr_target;
1022 while (!wants_signal(sig, t)) {
1023 t = next_thread(t);
1024 if (t == signal->curr_target)
1025 /*
1026 * No thread needs to be woken.
1027 * Any eligible threads will see
1028 * the signal in the queue soon.
1029 */
1030 return;
1031 }
1032 signal->curr_target = t;
1033 }
1034
1035 /*
1036 * Found a killable thread. If the signal will be fatal,
1037 * then start taking the whole group down immediately.
1038 */
1039 if (sig_fatal(p, sig) &&
1040 (signal->core_state || !(signal->flags & SIGNAL_GROUP_EXIT)) &&
1041 !sigismember(&t->real_blocked, sig) &&
1042 (sig == SIGKILL || !p->ptrace)) {
1043 /*
1044 * This signal will be fatal to the whole group.
1045 */
1046 if (!sig_kernel_coredump(sig)) {
1047 /*
1048 * Start a group exit and wake everybody up.
1049 * This way we don't have other threads
1050 * running and doing things after a slower
1051 * thread has the fatal signal pending.
1052 */
1053 signal->flags = SIGNAL_GROUP_EXIT;
1054 signal->group_exit_code = sig;
1055 signal->group_stop_count = 0;
1056 t = p;
1057 do {
1058 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1059 sigaddset(&t->pending.signal, SIGKILL);
1060 signal_wake_up(t, 1);
1061 } while_each_thread(p, t);
1062 return;
1063 }
1064 }
1065
1066 /*
1067 * The signal is already in the shared-pending queue.
1068 * Tell the chosen thread to wake up and dequeue it.
1069 */
1070 signal_wake_up(t, sig == SIGKILL);
1071 return;
1072}
1073
1074static inline bool legacy_queue(struct sigpending *signals, int sig)
1075{
1076 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1077}
1078
1079static int __send_signal_locked(int sig, struct kernel_siginfo *info,
1080 struct task_struct *t, enum pid_type type, bool force)
1081{
1082 struct sigpending *pending;
1083 struct sigqueue *q;
1084 int override_rlimit;
1085 int ret = 0, result;
1086
1087 lockdep_assert_held(&t->sighand->siglock);
1088
1089 result = TRACE_SIGNAL_IGNORED;
1090 if (!prepare_signal(sig, t, force))
1091 goto ret;
1092
1093 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1094 /*
1095 * Short-circuit ignored signals and support queuing
1096 * exactly one non-rt signal, so that we can get more
1097 * detailed information about the cause of the signal.
1098 */
1099 result = TRACE_SIGNAL_ALREADY_PENDING;
1100 if (legacy_queue(pending, sig))
1101 goto ret;
1102
1103 result = TRACE_SIGNAL_DELIVERED;
1104 /*
1105 * Skip useless siginfo allocation for SIGKILL and kernel threads.
1106 */
1107 if ((sig == SIGKILL) || (t->flags & PF_KTHREAD))
1108 goto out_set;
1109
1110 /*
1111 * Real-time signals must be queued if sent by sigqueue, or
1112 * some other real-time mechanism. It is implementation
1113 * defined whether kill() does so. We attempt to do so, on
1114 * the principle of least surprise, but since kill is not
1115 * allowed to fail with EAGAIN when low on memory we just
1116 * make sure at least one signal gets delivered and don't
1117 * pass on the info struct.
1118 */
1119 if (sig < SIGRTMIN)
1120 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1121 else
1122 override_rlimit = 0;
1123
1124 q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit, 0);
1125
1126 if (q) {
1127 list_add_tail(&q->list, &pending->list);
1128 switch ((unsigned long) info) {
1129 case (unsigned long) SEND_SIG_NOINFO:
1130 clear_siginfo(&q->info);
1131 q->info.si_signo = sig;
1132 q->info.si_errno = 0;
1133 q->info.si_code = SI_USER;
1134 q->info.si_pid = task_tgid_nr_ns(current,
1135 task_active_pid_ns(t));
1136 rcu_read_lock();
1137 q->info.si_uid =
1138 from_kuid_munged(task_cred_xxx(t, user_ns),
1139 current_uid());
1140 rcu_read_unlock();
1141 break;
1142 case (unsigned long) SEND_SIG_PRIV:
1143 clear_siginfo(&q->info);
1144 q->info.si_signo = sig;
1145 q->info.si_errno = 0;
1146 q->info.si_code = SI_KERNEL;
1147 q->info.si_pid = 0;
1148 q->info.si_uid = 0;
1149 break;
1150 default:
1151 copy_siginfo(&q->info, info);
1152 break;
1153 }
1154 } else if (!is_si_special(info) &&
1155 sig >= SIGRTMIN && info->si_code != SI_USER) {
1156 /*
1157 * Queue overflow, abort. We may abort if the
1158 * signal was rt and sent by user using something
1159 * other than kill().
1160 */
1161 result = TRACE_SIGNAL_OVERFLOW_FAIL;
1162 ret = -EAGAIN;
1163 goto ret;
1164 } else {
1165 /*
1166 * This is a silent loss of information. We still
1167 * send the signal, but the *info bits are lost.
1168 */
1169 result = TRACE_SIGNAL_LOSE_INFO;
1170 }
1171
1172out_set:
1173 signalfd_notify(t, sig);
1174 sigaddset(&pending->signal, sig);
1175
1176 /* Let multiprocess signals appear after on-going forks */
1177 if (type > PIDTYPE_TGID) {
1178 struct multiprocess_signals *delayed;
1179 hlist_for_each_entry(delayed, &t->signal->multiprocess, node) {
1180 sigset_t *signal = &delayed->signal;
1181 /* Can't queue both a stop and a continue signal */
1182 if (sig == SIGCONT)
1183 sigdelsetmask(signal, SIG_KERNEL_STOP_MASK);
1184 else if (sig_kernel_stop(sig))
1185 sigdelset(signal, SIGCONT);
1186 sigaddset(signal, sig);
1187 }
1188 }
1189
1190 complete_signal(sig, t, type);
1191ret:
1192 trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result);
1193 return ret;
1194}
1195
1196static inline bool has_si_pid_and_uid(struct kernel_siginfo *info)
1197{
1198 bool ret = false;
1199 switch (siginfo_layout(info->si_signo, info->si_code)) {
1200 case SIL_KILL:
1201 case SIL_CHLD:
1202 case SIL_RT:
1203 ret = true;
1204 break;
1205 case SIL_TIMER:
1206 case SIL_POLL:
1207 case SIL_FAULT:
1208 case SIL_FAULT_TRAPNO:
1209 case SIL_FAULT_MCEERR:
1210 case SIL_FAULT_BNDERR:
1211 case SIL_FAULT_PKUERR:
1212 case SIL_FAULT_PERF_EVENT:
1213 case SIL_SYS:
1214 ret = false;
1215 break;
1216 }
1217 return ret;
1218}
1219
1220int send_signal_locked(int sig, struct kernel_siginfo *info,
1221 struct task_struct *t, enum pid_type type)
1222{
1223 /* Should SIGKILL or SIGSTOP be received by a pid namespace init? */
1224 bool force = false;
1225
1226 if (info == SEND_SIG_NOINFO) {
1227 /* Force if sent from an ancestor pid namespace */
1228 force = !task_pid_nr_ns(current, task_active_pid_ns(t));
1229 } else if (info == SEND_SIG_PRIV) {
1230 /* Don't ignore kernel generated signals */
1231 force = true;
1232 } else if (has_si_pid_and_uid(info)) {
1233 /* SIGKILL and SIGSTOP is special or has ids */
1234 struct user_namespace *t_user_ns;
1235
1236 rcu_read_lock();
1237 t_user_ns = task_cred_xxx(t, user_ns);
1238 if (current_user_ns() != t_user_ns) {
1239 kuid_t uid = make_kuid(current_user_ns(), info->si_uid);
1240 info->si_uid = from_kuid_munged(t_user_ns, uid);
1241 }
1242 rcu_read_unlock();
1243
1244 /* A kernel generated signal? */
1245 force = (info->si_code == SI_KERNEL);
1246
1247 /* From an ancestor pid namespace? */
1248 if (!task_pid_nr_ns(current, task_active_pid_ns(t))) {
1249 info->si_pid = 0;
1250 force = true;
1251 }
1252 }
1253 return __send_signal_locked(sig, info, t, type, force);
1254}
1255
1256static void print_fatal_signal(int signr)
1257{
1258 struct pt_regs *regs = task_pt_regs(current);
1259 pr_info("potentially unexpected fatal signal %d.\n", signr);
1260
1261#if defined(__i386__) && !defined(__arch_um__)
1262 pr_info("code at %08lx: ", regs->ip);
1263 {
1264 int i;
1265 for (i = 0; i < 16; i++) {
1266 unsigned char insn;
1267
1268 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1269 break;
1270 pr_cont("%02x ", insn);
1271 }
1272 }
1273 pr_cont("\n");
1274#endif
1275 preempt_disable();
1276 show_regs(regs);
1277 preempt_enable();
1278}
1279
1280static int __init setup_print_fatal_signals(char *str)
1281{
1282 get_option (&str, &print_fatal_signals);
1283
1284 return 1;
1285}
1286
1287__setup("print-fatal-signals=", setup_print_fatal_signals);
1288
1289int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p,
1290 enum pid_type type)
1291{
1292 unsigned long flags;
1293 int ret = -ESRCH;
1294
1295 if (lock_task_sighand(p, &flags)) {
1296 ret = send_signal_locked(sig, info, p, type);
1297 unlock_task_sighand(p, &flags);
1298 }
1299
1300 return ret;
1301}
1302
1303enum sig_handler {
1304 HANDLER_CURRENT, /* If reachable use the current handler */
1305 HANDLER_SIG_DFL, /* Always use SIG_DFL handler semantics */
1306 HANDLER_EXIT, /* Only visible as the process exit code */
1307};
1308
1309/*
1310 * Force a signal that the process can't ignore: if necessary
1311 * we unblock the signal and change any SIG_IGN to SIG_DFL.
1312 *
1313 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1314 * since we do not want to have a signal handler that was blocked
1315 * be invoked when user space had explicitly blocked it.
1316 *
1317 * We don't want to have recursive SIGSEGV's etc, for example,
1318 * that is why we also clear SIGNAL_UNKILLABLE.
1319 */
1320static int
1321force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t,
1322 enum sig_handler handler)
1323{
1324 unsigned long int flags;
1325 int ret, blocked, ignored;
1326 struct k_sigaction *action;
1327 int sig = info->si_signo;
1328
1329 spin_lock_irqsave(&t->sighand->siglock, flags);
1330 action = &t->sighand->action[sig-1];
1331 ignored = action->sa.sa_handler == SIG_IGN;
1332 blocked = sigismember(&t->blocked, sig);
1333 if (blocked || ignored || (handler != HANDLER_CURRENT)) {
1334 action->sa.sa_handler = SIG_DFL;
1335 if (handler == HANDLER_EXIT)
1336 action->sa.sa_flags |= SA_IMMUTABLE;
1337 if (blocked) {
1338 sigdelset(&t->blocked, sig);
1339 recalc_sigpending_and_wake(t);
1340 }
1341 }
1342 /*
1343 * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
1344 * debugging to leave init killable. But HANDLER_EXIT is always fatal.
1345 */
1346 if (action->sa.sa_handler == SIG_DFL &&
1347 (!t->ptrace || (handler == HANDLER_EXIT)))
1348 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1349 ret = send_signal_locked(sig, info, t, PIDTYPE_PID);
1350 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1351
1352 return ret;
1353}
1354
1355int force_sig_info(struct kernel_siginfo *info)
1356{
1357 return force_sig_info_to_task(info, current, HANDLER_CURRENT);
1358}
1359
1360/*
1361 * Nuke all other threads in the group.
1362 */
1363int zap_other_threads(struct task_struct *p)
1364{
1365 struct task_struct *t = p;
1366 int count = 0;
1367
1368 p->signal->group_stop_count = 0;
1369
1370 while_each_thread(p, t) {
1371 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1372 count++;
1373
1374 /* Don't bother with already dead threads */
1375 if (t->exit_state)
1376 continue;
1377 sigaddset(&t->pending.signal, SIGKILL);
1378 signal_wake_up(t, 1);
1379 }
1380
1381 return count;
1382}
1383
1384struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1385 unsigned long *flags)
1386{
1387 struct sighand_struct *sighand;
1388
1389 rcu_read_lock();
1390 for (;;) {
1391 sighand = rcu_dereference(tsk->sighand);
1392 if (unlikely(sighand == NULL))
1393 break;
1394
1395 /*
1396 * This sighand can be already freed and even reused, but
1397 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
1398 * initializes ->siglock: this slab can't go away, it has
1399 * the same object type, ->siglock can't be reinitialized.
1400 *
1401 * We need to ensure that tsk->sighand is still the same
1402 * after we take the lock, we can race with de_thread() or
1403 * __exit_signal(). In the latter case the next iteration
1404 * must see ->sighand == NULL.
1405 */
1406 spin_lock_irqsave(&sighand->siglock, *flags);
1407 if (likely(sighand == rcu_access_pointer(tsk->sighand)))
1408 break;
1409 spin_unlock_irqrestore(&sighand->siglock, *flags);
1410 }
1411 rcu_read_unlock();
1412
1413 return sighand;
1414}
1415
1416#ifdef CONFIG_LOCKDEP
1417void lockdep_assert_task_sighand_held(struct task_struct *task)
1418{
1419 struct sighand_struct *sighand;
1420
1421 rcu_read_lock();
1422 sighand = rcu_dereference(task->sighand);
1423 if (sighand)
1424 lockdep_assert_held(&sighand->siglock);
1425 else
1426 WARN_ON_ONCE(1);
1427 rcu_read_unlock();
1428}
1429#endif
1430
1431/*
1432 * send signal info to all the members of a group
1433 */
1434int group_send_sig_info(int sig, struct kernel_siginfo *info,
1435 struct task_struct *p, enum pid_type type)
1436{
1437 int ret;
1438
1439 rcu_read_lock();
1440 ret = check_kill_permission(sig, info, p);
1441 rcu_read_unlock();
1442
1443 if (!ret && sig)
1444 ret = do_send_sig_info(sig, info, p, type);
1445
1446 return ret;
1447}
1448
1449/*
1450 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1451 * control characters do (^C, ^Z etc)
1452 * - the caller must hold at least a readlock on tasklist_lock
1453 */
1454int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp)
1455{
1456 struct task_struct *p = NULL;
1457 int retval, success;
1458
1459 success = 0;
1460 retval = -ESRCH;
1461 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1462 int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID);
1463 success |= !err;
1464 retval = err;
1465 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1466 return success ? 0 : retval;
1467}
1468
1469int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid)
1470{
1471 int error = -ESRCH;
1472 struct task_struct *p;
1473
1474 for (;;) {
1475 rcu_read_lock();
1476 p = pid_task(pid, PIDTYPE_PID);
1477 if (p)
1478 error = group_send_sig_info(sig, info, p, PIDTYPE_TGID);
1479 rcu_read_unlock();
1480 if (likely(!p || error != -ESRCH))
1481 return error;
1482
1483 /*
1484 * The task was unhashed in between, try again. If it
1485 * is dead, pid_task() will return NULL, if we race with
1486 * de_thread() it will find the new leader.
1487 */
1488 }
1489}
1490
1491static int kill_proc_info(int sig, struct kernel_siginfo *info, pid_t pid)
1492{
1493 int error;
1494 rcu_read_lock();
1495 error = kill_pid_info(sig, info, find_vpid(pid));
1496 rcu_read_unlock();
1497 return error;
1498}
1499
1500static inline bool kill_as_cred_perm(const struct cred *cred,
1501 struct task_struct *target)
1502{
1503 const struct cred *pcred = __task_cred(target);
1504
1505 return uid_eq(cred->euid, pcred->suid) ||
1506 uid_eq(cred->euid, pcred->uid) ||
1507 uid_eq(cred->uid, pcred->suid) ||
1508 uid_eq(cred->uid, pcred->uid);
1509}
1510
1511/*
1512 * The usb asyncio usage of siginfo is wrong. The glibc support
1513 * for asyncio which uses SI_ASYNCIO assumes the layout is SIL_RT.
1514 * AKA after the generic fields:
1515 * kernel_pid_t si_pid;
1516 * kernel_uid32_t si_uid;
1517 * sigval_t si_value;
1518 *
1519 * Unfortunately when usb generates SI_ASYNCIO it assumes the layout
1520 * after the generic fields is:
1521 * void __user *si_addr;
1522 *
1523 * This is a practical problem when there is a 64bit big endian kernel
1524 * and a 32bit userspace. As the 32bit address will encoded in the low
1525 * 32bits of the pointer. Those low 32bits will be stored at higher
1526 * address than appear in a 32 bit pointer. So userspace will not
1527 * see the address it was expecting for it's completions.
1528 *
1529 * There is nothing in the encoding that can allow
1530 * copy_siginfo_to_user32 to detect this confusion of formats, so
1531 * handle this by requiring the caller of kill_pid_usb_asyncio to
1532 * notice when this situration takes place and to store the 32bit
1533 * pointer in sival_int, instead of sival_addr of the sigval_t addr
1534 * parameter.
1535 */
1536int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr,
1537 struct pid *pid, const struct cred *cred)
1538{
1539 struct kernel_siginfo info;
1540 struct task_struct *p;
1541 unsigned long flags;
1542 int ret = -EINVAL;
1543
1544 if (!valid_signal(sig))
1545 return ret;
1546
1547 clear_siginfo(&info);
1548 info.si_signo = sig;
1549 info.si_errno = errno;
1550 info.si_code = SI_ASYNCIO;
1551 *((sigval_t *)&info.si_pid) = addr;
1552
1553 rcu_read_lock();
1554 p = pid_task(pid, PIDTYPE_PID);
1555 if (!p) {
1556 ret = -ESRCH;
1557 goto out_unlock;
1558 }
1559 if (!kill_as_cred_perm(cred, p)) {
1560 ret = -EPERM;
1561 goto out_unlock;
1562 }
1563 ret = security_task_kill(p, &info, sig, cred);
1564 if (ret)
1565 goto out_unlock;
1566
1567 if (sig) {
1568 if (lock_task_sighand(p, &flags)) {
1569 ret = __send_signal_locked(sig, &info, p, PIDTYPE_TGID, false);
1570 unlock_task_sighand(p, &flags);
1571 } else
1572 ret = -ESRCH;
1573 }
1574out_unlock:
1575 rcu_read_unlock();
1576 return ret;
1577}
1578EXPORT_SYMBOL_GPL(kill_pid_usb_asyncio);
1579
1580/*
1581 * kill_something_info() interprets pid in interesting ways just like kill(2).
1582 *
1583 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1584 * is probably wrong. Should make it like BSD or SYSV.
1585 */
1586
1587static int kill_something_info(int sig, struct kernel_siginfo *info, pid_t pid)
1588{
1589 int ret;
1590
1591 if (pid > 0)
1592 return kill_proc_info(sig, info, pid);
1593
1594 /* -INT_MIN is undefined. Exclude this case to avoid a UBSAN warning */
1595 if (pid == INT_MIN)
1596 return -ESRCH;
1597
1598 read_lock(&tasklist_lock);
1599 if (pid != -1) {
1600 ret = __kill_pgrp_info(sig, info,
1601 pid ? find_vpid(-pid) : task_pgrp(current));
1602 } else {
1603 int retval = 0, count = 0;
1604 struct task_struct * p;
1605
1606 for_each_process(p) {
1607 if (task_pid_vnr(p) > 1 &&
1608 !same_thread_group(p, current)) {
1609 int err = group_send_sig_info(sig, info, p,
1610 PIDTYPE_MAX);
1611 ++count;
1612 if (err != -EPERM)
1613 retval = err;
1614 }
1615 }
1616 ret = count ? retval : -ESRCH;
1617 }
1618 read_unlock(&tasklist_lock);
1619
1620 return ret;
1621}
1622
1623/*
1624 * These are for backward compatibility with the rest of the kernel source.
1625 */
1626
1627int send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1628{
1629 /*
1630 * Make sure legacy kernel users don't send in bad values
1631 * (normal paths check this in check_kill_permission).
1632 */
1633 if (!valid_signal(sig))
1634 return -EINVAL;
1635
1636 return do_send_sig_info(sig, info, p, PIDTYPE_PID);
1637}
1638EXPORT_SYMBOL(send_sig_info);
1639
1640#define __si_special(priv) \
1641 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1642
1643int
1644send_sig(int sig, struct task_struct *p, int priv)
1645{
1646 return send_sig_info(sig, __si_special(priv), p);
1647}
1648EXPORT_SYMBOL(send_sig);
1649
1650void force_sig(int sig)
1651{
1652 struct kernel_siginfo info;
1653
1654 clear_siginfo(&info);
1655 info.si_signo = sig;
1656 info.si_errno = 0;
1657 info.si_code = SI_KERNEL;
1658 info.si_pid = 0;
1659 info.si_uid = 0;
1660 force_sig_info(&info);
1661}
1662EXPORT_SYMBOL(force_sig);
1663
1664void force_fatal_sig(int sig)
1665{
1666 struct kernel_siginfo info;
1667
1668 clear_siginfo(&info);
1669 info.si_signo = sig;
1670 info.si_errno = 0;
1671 info.si_code = SI_KERNEL;
1672 info.si_pid = 0;
1673 info.si_uid = 0;
1674 force_sig_info_to_task(&info, current, HANDLER_SIG_DFL);
1675}
1676
1677void force_exit_sig(int sig)
1678{
1679 struct kernel_siginfo info;
1680
1681 clear_siginfo(&info);
1682 info.si_signo = sig;
1683 info.si_errno = 0;
1684 info.si_code = SI_KERNEL;
1685 info.si_pid = 0;
1686 info.si_uid = 0;
1687 force_sig_info_to_task(&info, current, HANDLER_EXIT);
1688}
1689
1690/*
1691 * When things go south during signal handling, we
1692 * will force a SIGSEGV. And if the signal that caused
1693 * the problem was already a SIGSEGV, we'll want to
1694 * make sure we don't even try to deliver the signal..
1695 */
1696void force_sigsegv(int sig)
1697{
1698 if (sig == SIGSEGV)
1699 force_fatal_sig(SIGSEGV);
1700 else
1701 force_sig(SIGSEGV);
1702}
1703
1704int force_sig_fault_to_task(int sig, int code, void __user *addr
1705 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1706 , struct task_struct *t)
1707{
1708 struct kernel_siginfo info;
1709
1710 clear_siginfo(&info);
1711 info.si_signo = sig;
1712 info.si_errno = 0;
1713 info.si_code = code;
1714 info.si_addr = addr;
1715#ifdef __ia64__
1716 info.si_imm = imm;
1717 info.si_flags = flags;
1718 info.si_isr = isr;
1719#endif
1720 return force_sig_info_to_task(&info, t, HANDLER_CURRENT);
1721}
1722
1723int force_sig_fault(int sig, int code, void __user *addr
1724 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr))
1725{
1726 return force_sig_fault_to_task(sig, code, addr
1727 ___ARCH_SI_IA64(imm, flags, isr), current);
1728}
1729
1730int send_sig_fault(int sig, int code, void __user *addr
1731 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1732 , struct task_struct *t)
1733{
1734 struct kernel_siginfo info;
1735
1736 clear_siginfo(&info);
1737 info.si_signo = sig;
1738 info.si_errno = 0;
1739 info.si_code = code;
1740 info.si_addr = addr;
1741#ifdef __ia64__
1742 info.si_imm = imm;
1743 info.si_flags = flags;
1744 info.si_isr = isr;
1745#endif
1746 return send_sig_info(info.si_signo, &info, t);
1747}
1748
1749int force_sig_mceerr(int code, void __user *addr, short lsb)
1750{
1751 struct kernel_siginfo info;
1752
1753 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1754 clear_siginfo(&info);
1755 info.si_signo = SIGBUS;
1756 info.si_errno = 0;
1757 info.si_code = code;
1758 info.si_addr = addr;
1759 info.si_addr_lsb = lsb;
1760 return force_sig_info(&info);
1761}
1762
1763int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1764{
1765 struct kernel_siginfo info;
1766
1767 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1768 clear_siginfo(&info);
1769 info.si_signo = SIGBUS;
1770 info.si_errno = 0;
1771 info.si_code = code;
1772 info.si_addr = addr;
1773 info.si_addr_lsb = lsb;
1774 return send_sig_info(info.si_signo, &info, t);
1775}
1776EXPORT_SYMBOL(send_sig_mceerr);
1777
1778int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper)
1779{
1780 struct kernel_siginfo info;
1781
1782 clear_siginfo(&info);
1783 info.si_signo = SIGSEGV;
1784 info.si_errno = 0;
1785 info.si_code = SEGV_BNDERR;
1786 info.si_addr = addr;
1787 info.si_lower = lower;
1788 info.si_upper = upper;
1789 return force_sig_info(&info);
1790}
1791
1792#ifdef SEGV_PKUERR
1793int force_sig_pkuerr(void __user *addr, u32 pkey)
1794{
1795 struct kernel_siginfo info;
1796
1797 clear_siginfo(&info);
1798 info.si_signo = SIGSEGV;
1799 info.si_errno = 0;
1800 info.si_code = SEGV_PKUERR;
1801 info.si_addr = addr;
1802 info.si_pkey = pkey;
1803 return force_sig_info(&info);
1804}
1805#endif
1806
1807int send_sig_perf(void __user *addr, u32 type, u64 sig_data)
1808{
1809 struct kernel_siginfo info;
1810
1811 clear_siginfo(&info);
1812 info.si_signo = SIGTRAP;
1813 info.si_errno = 0;
1814 info.si_code = TRAP_PERF;
1815 info.si_addr = addr;
1816 info.si_perf_data = sig_data;
1817 info.si_perf_type = type;
1818
1819 /*
1820 * Signals generated by perf events should not terminate the whole
1821 * process if SIGTRAP is blocked, however, delivering the signal
1822 * asynchronously is better than not delivering at all. But tell user
1823 * space if the signal was asynchronous, so it can clearly be
1824 * distinguished from normal synchronous ones.
1825 */
1826 info.si_perf_flags = sigismember(¤t->blocked, info.si_signo) ?
1827 TRAP_PERF_FLAG_ASYNC :
1828 0;
1829
1830 return send_sig_info(info.si_signo, &info, current);
1831}
1832
1833/**
1834 * force_sig_seccomp - signals the task to allow in-process syscall emulation
1835 * @syscall: syscall number to send to userland
1836 * @reason: filter-supplied reason code to send to userland (via si_errno)
1837 * @force_coredump: true to trigger a coredump
1838 *
1839 * Forces a SIGSYS with a code of SYS_SECCOMP and related sigsys info.
1840 */
1841int force_sig_seccomp(int syscall, int reason, bool force_coredump)
1842{
1843 struct kernel_siginfo info;
1844
1845 clear_siginfo(&info);
1846 info.si_signo = SIGSYS;
1847 info.si_code = SYS_SECCOMP;
1848 info.si_call_addr = (void __user *)KSTK_EIP(current);
1849 info.si_errno = reason;
1850 info.si_arch = syscall_get_arch(current);
1851 info.si_syscall = syscall;
1852 return force_sig_info_to_task(&info, current,
1853 force_coredump ? HANDLER_EXIT : HANDLER_CURRENT);
1854}
1855
1856/* For the crazy architectures that include trap information in
1857 * the errno field, instead of an actual errno value.
1858 */
1859int force_sig_ptrace_errno_trap(int errno, void __user *addr)
1860{
1861 struct kernel_siginfo info;
1862
1863 clear_siginfo(&info);
1864 info.si_signo = SIGTRAP;
1865 info.si_errno = errno;
1866 info.si_code = TRAP_HWBKPT;
1867 info.si_addr = addr;
1868 return force_sig_info(&info);
1869}
1870
1871/* For the rare architectures that include trap information using
1872 * si_trapno.
1873 */
1874int force_sig_fault_trapno(int sig, int code, void __user *addr, int trapno)
1875{
1876 struct kernel_siginfo info;
1877
1878 clear_siginfo(&info);
1879 info.si_signo = sig;
1880 info.si_errno = 0;
1881 info.si_code = code;
1882 info.si_addr = addr;
1883 info.si_trapno = trapno;
1884 return force_sig_info(&info);
1885}
1886
1887/* For the rare architectures that include trap information using
1888 * si_trapno.
1889 */
1890int send_sig_fault_trapno(int sig, int code, void __user *addr, int trapno,
1891 struct task_struct *t)
1892{
1893 struct kernel_siginfo info;
1894
1895 clear_siginfo(&info);
1896 info.si_signo = sig;
1897 info.si_errno = 0;
1898 info.si_code = code;
1899 info.si_addr = addr;
1900 info.si_trapno = trapno;
1901 return send_sig_info(info.si_signo, &info, t);
1902}
1903
1904int kill_pgrp(struct pid *pid, int sig, int priv)
1905{
1906 int ret;
1907
1908 read_lock(&tasklist_lock);
1909 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1910 read_unlock(&tasklist_lock);
1911
1912 return ret;
1913}
1914EXPORT_SYMBOL(kill_pgrp);
1915
1916int kill_pid(struct pid *pid, int sig, int priv)
1917{
1918 return kill_pid_info(sig, __si_special(priv), pid);
1919}
1920EXPORT_SYMBOL(kill_pid);
1921
1922/*
1923 * These functions support sending signals using preallocated sigqueue
1924 * structures. This is needed "because realtime applications cannot
1925 * afford to lose notifications of asynchronous events, like timer
1926 * expirations or I/O completions". In the case of POSIX Timers
1927 * we allocate the sigqueue structure from the timer_create. If this
1928 * allocation fails we are able to report the failure to the application
1929 * with an EAGAIN error.
1930 */
1931struct sigqueue *sigqueue_alloc(void)
1932{
1933 return __sigqueue_alloc(-1, current, GFP_KERNEL, 0, SIGQUEUE_PREALLOC);
1934}
1935
1936void sigqueue_free(struct sigqueue *q)
1937{
1938 unsigned long flags;
1939 spinlock_t *lock = ¤t->sighand->siglock;
1940
1941 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1942 /*
1943 * We must hold ->siglock while testing q->list
1944 * to serialize with collect_signal() or with
1945 * __exit_signal()->flush_sigqueue().
1946 */
1947 spin_lock_irqsave(lock, flags);
1948 q->flags &= ~SIGQUEUE_PREALLOC;
1949 /*
1950 * If it is queued it will be freed when dequeued,
1951 * like the "regular" sigqueue.
1952 */
1953 if (!list_empty(&q->list))
1954 q = NULL;
1955 spin_unlock_irqrestore(lock, flags);
1956
1957 if (q)
1958 __sigqueue_free(q);
1959}
1960
1961int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type)
1962{
1963 int sig = q->info.si_signo;
1964 struct sigpending *pending;
1965 struct task_struct *t;
1966 unsigned long flags;
1967 int ret, result;
1968
1969 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1970
1971 ret = -1;
1972 rcu_read_lock();
1973 t = pid_task(pid, type);
1974 if (!t || !likely(lock_task_sighand(t, &flags)))
1975 goto ret;
1976
1977 ret = 1; /* the signal is ignored */
1978 result = TRACE_SIGNAL_IGNORED;
1979 if (!prepare_signal(sig, t, false))
1980 goto out;
1981
1982 ret = 0;
1983 if (unlikely(!list_empty(&q->list))) {
1984 /*
1985 * If an SI_TIMER entry is already queue just increment
1986 * the overrun count.
1987 */
1988 BUG_ON(q->info.si_code != SI_TIMER);
1989 q->info.si_overrun++;
1990 result = TRACE_SIGNAL_ALREADY_PENDING;
1991 goto out;
1992 }
1993 q->info.si_overrun = 0;
1994
1995 signalfd_notify(t, sig);
1996 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1997 list_add_tail(&q->list, &pending->list);
1998 sigaddset(&pending->signal, sig);
1999 complete_signal(sig, t, type);
2000 result = TRACE_SIGNAL_DELIVERED;
2001out:
2002 trace_signal_generate(sig, &q->info, t, type != PIDTYPE_PID, result);
2003 unlock_task_sighand(t, &flags);
2004ret:
2005 rcu_read_unlock();
2006 return ret;
2007}
2008
2009static void do_notify_pidfd(struct task_struct *task)
2010{
2011 struct pid *pid;
2012
2013 WARN_ON(task->exit_state == 0);
2014 pid = task_pid(task);
2015 wake_up_all(&pid->wait_pidfd);
2016}
2017
2018/*
2019 * Let a parent know about the death of a child.
2020 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
2021 *
2022 * Returns true if our parent ignored us and so we've switched to
2023 * self-reaping.
2024 */
2025bool do_notify_parent(struct task_struct *tsk, int sig)
2026{
2027 struct kernel_siginfo info;
2028 unsigned long flags;
2029 struct sighand_struct *psig;
2030 bool autoreap = false;
2031 u64 utime, stime;
2032
2033 WARN_ON_ONCE(sig == -1);
2034
2035 /* do_notify_parent_cldstop should have been called instead. */
2036 WARN_ON_ONCE(task_is_stopped_or_traced(tsk));
2037
2038 WARN_ON_ONCE(!tsk->ptrace &&
2039 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
2040
2041 /* Wake up all pidfd waiters */
2042 do_notify_pidfd(tsk);
2043
2044 if (sig != SIGCHLD) {
2045 /*
2046 * This is only possible if parent == real_parent.
2047 * Check if it has changed security domain.
2048 */
2049 if (tsk->parent_exec_id != READ_ONCE(tsk->parent->self_exec_id))
2050 sig = SIGCHLD;
2051 }
2052
2053 clear_siginfo(&info);
2054 info.si_signo = sig;
2055 info.si_errno = 0;
2056 /*
2057 * We are under tasklist_lock here so our parent is tied to
2058 * us and cannot change.
2059 *
2060 * task_active_pid_ns will always return the same pid namespace
2061 * until a task passes through release_task.
2062 *
2063 * write_lock() currently calls preempt_disable() which is the
2064 * same as rcu_read_lock(), but according to Oleg, this is not
2065 * correct to rely on this
2066 */
2067 rcu_read_lock();
2068 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
2069 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
2070 task_uid(tsk));
2071 rcu_read_unlock();
2072
2073 task_cputime(tsk, &utime, &stime);
2074 info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
2075 info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
2076
2077 info.si_status = tsk->exit_code & 0x7f;
2078 if (tsk->exit_code & 0x80)
2079 info.si_code = CLD_DUMPED;
2080 else if (tsk->exit_code & 0x7f)
2081 info.si_code = CLD_KILLED;
2082 else {
2083 info.si_code = CLD_EXITED;
2084 info.si_status = tsk->exit_code >> 8;
2085 }
2086
2087 psig = tsk->parent->sighand;
2088 spin_lock_irqsave(&psig->siglock, flags);
2089 if (!tsk->ptrace && sig == SIGCHLD &&
2090 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
2091 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
2092 /*
2093 * We are exiting and our parent doesn't care. POSIX.1
2094 * defines special semantics for setting SIGCHLD to SIG_IGN
2095 * or setting the SA_NOCLDWAIT flag: we should be reaped
2096 * automatically and not left for our parent's wait4 call.
2097 * Rather than having the parent do it as a magic kind of
2098 * signal handler, we just set this to tell do_exit that we
2099 * can be cleaned up without becoming a zombie. Note that
2100 * we still call __wake_up_parent in this case, because a
2101 * blocked sys_wait4 might now return -ECHILD.
2102 *
2103 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
2104 * is implementation-defined: we do (if you don't want
2105 * it, just use SIG_IGN instead).
2106 */
2107 autoreap = true;
2108 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
2109 sig = 0;
2110 }
2111 /*
2112 * Send with __send_signal as si_pid and si_uid are in the
2113 * parent's namespaces.
2114 */
2115 if (valid_signal(sig) && sig)
2116 __send_signal_locked(sig, &info, tsk->parent, PIDTYPE_TGID, false);
2117 __wake_up_parent(tsk, tsk->parent);
2118 spin_unlock_irqrestore(&psig->siglock, flags);
2119
2120 return autoreap;
2121}
2122
2123/**
2124 * do_notify_parent_cldstop - notify parent of stopped/continued state change
2125 * @tsk: task reporting the state change
2126 * @for_ptracer: the notification is for ptracer
2127 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
2128 *
2129 * Notify @tsk's parent that the stopped/continued state has changed. If
2130 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
2131 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
2132 *
2133 * CONTEXT:
2134 * Must be called with tasklist_lock at least read locked.
2135 */
2136static void do_notify_parent_cldstop(struct task_struct *tsk,
2137 bool for_ptracer, int why)
2138{
2139 struct kernel_siginfo info;
2140 unsigned long flags;
2141 struct task_struct *parent;
2142 struct sighand_struct *sighand;
2143 u64 utime, stime;
2144
2145 if (for_ptracer) {
2146 parent = tsk->parent;
2147 } else {
2148 tsk = tsk->group_leader;
2149 parent = tsk->real_parent;
2150 }
2151
2152 clear_siginfo(&info);
2153 info.si_signo = SIGCHLD;
2154 info.si_errno = 0;
2155 /*
2156 * see comment in do_notify_parent() about the following 4 lines
2157 */
2158 rcu_read_lock();
2159 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
2160 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
2161 rcu_read_unlock();
2162
2163 task_cputime(tsk, &utime, &stime);
2164 info.si_utime = nsec_to_clock_t(utime);
2165 info.si_stime = nsec_to_clock_t(stime);
2166
2167 info.si_code = why;
2168 switch (why) {
2169 case CLD_CONTINUED:
2170 info.si_status = SIGCONT;
2171 break;
2172 case CLD_STOPPED:
2173 info.si_status = tsk->signal->group_exit_code & 0x7f;
2174 break;
2175 case CLD_TRAPPED:
2176 info.si_status = tsk->exit_code & 0x7f;
2177 break;
2178 default:
2179 BUG();
2180 }
2181
2182 sighand = parent->sighand;
2183 spin_lock_irqsave(&sighand->siglock, flags);
2184 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
2185 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
2186 send_signal_locked(SIGCHLD, &info, parent, PIDTYPE_TGID);
2187 /*
2188 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
2189 */
2190 __wake_up_parent(tsk, parent);
2191 spin_unlock_irqrestore(&sighand->siglock, flags);
2192}
2193
2194/*
2195 * This must be called with current->sighand->siglock held.
2196 *
2197 * This should be the path for all ptrace stops.
2198 * We always set current->last_siginfo while stopped here.
2199 * That makes it a way to test a stopped process for
2200 * being ptrace-stopped vs being job-control-stopped.
2201 *
2202 * Returns the signal the ptracer requested the code resume
2203 * with. If the code did not stop because the tracer is gone,
2204 * the stop signal remains unchanged unless clear_code.
2205 */
2206static int ptrace_stop(int exit_code, int why, unsigned long message,
2207 kernel_siginfo_t *info)
2208 __releases(¤t->sighand->siglock)
2209 __acquires(¤t->sighand->siglock)
2210{
2211 bool gstop_done = false;
2212
2213 if (arch_ptrace_stop_needed()) {
2214 /*
2215 * The arch code has something special to do before a
2216 * ptrace stop. This is allowed to block, e.g. for faults
2217 * on user stack pages. We can't keep the siglock while
2218 * calling arch_ptrace_stop, so we must release it now.
2219 * To preserve proper semantics, we must do this before
2220 * any signal bookkeeping like checking group_stop_count.
2221 */
2222 spin_unlock_irq(¤t->sighand->siglock);
2223 arch_ptrace_stop();
2224 spin_lock_irq(¤t->sighand->siglock);
2225 }
2226
2227 /*
2228 * After this point ptrace_signal_wake_up or signal_wake_up
2229 * will clear TASK_TRACED if ptrace_unlink happens or a fatal
2230 * signal comes in. Handle previous ptrace_unlinks and fatal
2231 * signals here to prevent ptrace_stop sleeping in schedule.
2232 */
2233 if (!current->ptrace || __fatal_signal_pending(current))
2234 return exit_code;
2235
2236 set_special_state(TASK_TRACED);
2237 current->jobctl |= JOBCTL_TRACED;
2238
2239 /*
2240 * We're committing to trapping. TRACED should be visible before
2241 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
2242 * Also, transition to TRACED and updates to ->jobctl should be
2243 * atomic with respect to siglock and should be done after the arch
2244 * hook as siglock is released and regrabbed across it.
2245 *
2246 * TRACER TRACEE
2247 *
2248 * ptrace_attach()
2249 * [L] wait_on_bit(JOBCTL_TRAPPING) [S] set_special_state(TRACED)
2250 * do_wait()
2251 * set_current_state() smp_wmb();
2252 * ptrace_do_wait()
2253 * wait_task_stopped()
2254 * task_stopped_code()
2255 * [L] task_is_traced() [S] task_clear_jobctl_trapping();
2256 */
2257 smp_wmb();
2258
2259 current->ptrace_message = message;
2260 current->last_siginfo = info;
2261 current->exit_code = exit_code;
2262
2263 /*
2264 * If @why is CLD_STOPPED, we're trapping to participate in a group
2265 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
2266 * across siglock relocks since INTERRUPT was scheduled, PENDING
2267 * could be clear now. We act as if SIGCONT is received after
2268 * TASK_TRACED is entered - ignore it.
2269 */
2270 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
2271 gstop_done = task_participate_group_stop(current);
2272
2273 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
2274 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
2275 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
2276 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
2277
2278 /* entering a trap, clear TRAPPING */
2279 task_clear_jobctl_trapping(current);
2280
2281 spin_unlock_irq(¤t->sighand->siglock);
2282 read_lock(&tasklist_lock);
2283 /*
2284 * Notify parents of the stop.
2285 *
2286 * While ptraced, there are two parents - the ptracer and
2287 * the real_parent of the group_leader. The ptracer should
2288 * know about every stop while the real parent is only
2289 * interested in the completion of group stop. The states
2290 * for the two don't interact with each other. Notify
2291 * separately unless they're gonna be duplicates.
2292 */
2293 if (current->ptrace)
2294 do_notify_parent_cldstop(current, true, why);
2295 if (gstop_done && (!current->ptrace || ptrace_reparented(current)))
2296 do_notify_parent_cldstop(current, false, why);
2297
2298 /*
2299 * Don't want to allow preemption here, because
2300 * sys_ptrace() needs this task to be inactive.
2301 *
2302 * XXX: implement read_unlock_no_resched().
2303 */
2304 preempt_disable();
2305 read_unlock(&tasklist_lock);
2306 cgroup_enter_frozen();
2307 preempt_enable_no_resched();
2308 schedule();
2309 cgroup_leave_frozen(true);
2310
2311 /*
2312 * We are back. Now reacquire the siglock before touching
2313 * last_siginfo, so that we are sure to have synchronized with
2314 * any signal-sending on another CPU that wants to examine it.
2315 */
2316 spin_lock_irq(¤t->sighand->siglock);
2317 exit_code = current->exit_code;
2318 current->last_siginfo = NULL;
2319 current->ptrace_message = 0;
2320 current->exit_code = 0;
2321
2322 /* LISTENING can be set only during STOP traps, clear it */
2323 current->jobctl &= ~(JOBCTL_LISTENING | JOBCTL_PTRACE_FROZEN);
2324
2325 /*
2326 * Queued signals ignored us while we were stopped for tracing.
2327 * So check for any that we should take before resuming user mode.
2328 * This sets TIF_SIGPENDING, but never clears it.
2329 */
2330 recalc_sigpending_tsk(current);
2331 return exit_code;
2332}
2333
2334static int ptrace_do_notify(int signr, int exit_code, int why, unsigned long message)
2335{
2336 kernel_siginfo_t info;
2337
2338 clear_siginfo(&info);
2339 info.si_signo = signr;
2340 info.si_code = exit_code;
2341 info.si_pid = task_pid_vnr(current);
2342 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2343
2344 /* Let the debugger run. */
2345 return ptrace_stop(exit_code, why, message, &info);
2346}
2347
2348int ptrace_notify(int exit_code, unsigned long message)
2349{
2350 int signr;
2351
2352 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
2353 if (unlikely(task_work_pending(current)))
2354 task_work_run();
2355
2356 spin_lock_irq(¤t->sighand->siglock);
2357 signr = ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED, message);
2358 spin_unlock_irq(¤t->sighand->siglock);
2359 return signr;
2360}
2361
2362/**
2363 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
2364 * @signr: signr causing group stop if initiating
2365 *
2366 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
2367 * and participate in it. If already set, participate in the existing
2368 * group stop. If participated in a group stop (and thus slept), %true is
2369 * returned with siglock released.
2370 *
2371 * If ptraced, this function doesn't handle stop itself. Instead,
2372 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2373 * untouched. The caller must ensure that INTERRUPT trap handling takes
2374 * places afterwards.
2375 *
2376 * CONTEXT:
2377 * Must be called with @current->sighand->siglock held, which is released
2378 * on %true return.
2379 *
2380 * RETURNS:
2381 * %false if group stop is already cancelled or ptrace trap is scheduled.
2382 * %true if participated in group stop.
2383 */
2384static bool do_signal_stop(int signr)
2385 __releases(¤t->sighand->siglock)
2386{
2387 struct signal_struct *sig = current->signal;
2388
2389 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2390 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2391 struct task_struct *t;
2392
2393 /* signr will be recorded in task->jobctl for retries */
2394 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2395
2396 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2397 unlikely(sig->flags & SIGNAL_GROUP_EXIT) ||
2398 unlikely(sig->group_exec_task))
2399 return false;
2400 /*
2401 * There is no group stop already in progress. We must
2402 * initiate one now.
2403 *
2404 * While ptraced, a task may be resumed while group stop is
2405 * still in effect and then receive a stop signal and
2406 * initiate another group stop. This deviates from the
2407 * usual behavior as two consecutive stop signals can't
2408 * cause two group stops when !ptraced. That is why we
2409 * also check !task_is_stopped(t) below.
2410 *
2411 * The condition can be distinguished by testing whether
2412 * SIGNAL_STOP_STOPPED is already set. Don't generate
2413 * group_exit_code in such case.
2414 *
2415 * This is not necessary for SIGNAL_STOP_CONTINUED because
2416 * an intervening stop signal is required to cause two
2417 * continued events regardless of ptrace.
2418 */
2419 if (!(sig->flags & SIGNAL_STOP_STOPPED))
2420 sig->group_exit_code = signr;
2421
2422 sig->group_stop_count = 0;
2423
2424 if (task_set_jobctl_pending(current, signr | gstop))
2425 sig->group_stop_count++;
2426
2427 t = current;
2428 while_each_thread(current, t) {
2429 /*
2430 * Setting state to TASK_STOPPED for a group
2431 * stop is always done with the siglock held,
2432 * so this check has no races.
2433 */
2434 if (!task_is_stopped(t) &&
2435 task_set_jobctl_pending(t, signr | gstop)) {
2436 sig->group_stop_count++;
2437 if (likely(!(t->ptrace & PT_SEIZED)))
2438 signal_wake_up(t, 0);
2439 else
2440 ptrace_trap_notify(t);
2441 }
2442 }
2443 }
2444
2445 if (likely(!current->ptrace)) {
2446 int notify = 0;
2447
2448 /*
2449 * If there are no other threads in the group, or if there
2450 * is a group stop in progress and we are the last to stop,
2451 * report to the parent.
2452 */
2453 if (task_participate_group_stop(current))
2454 notify = CLD_STOPPED;
2455
2456 current->jobctl |= JOBCTL_STOPPED;
2457 set_special_state(TASK_STOPPED);
2458 spin_unlock_irq(¤t->sighand->siglock);
2459
2460 /*
2461 * Notify the parent of the group stop completion. Because
2462 * we're not holding either the siglock or tasklist_lock
2463 * here, ptracer may attach inbetween; however, this is for
2464 * group stop and should always be delivered to the real
2465 * parent of the group leader. The new ptracer will get
2466 * its notification when this task transitions into
2467 * TASK_TRACED.
2468 */
2469 if (notify) {
2470 read_lock(&tasklist_lock);
2471 do_notify_parent_cldstop(current, false, notify);
2472 read_unlock(&tasklist_lock);
2473 }
2474
2475 /* Now we don't run again until woken by SIGCONT or SIGKILL */
2476 cgroup_enter_frozen();
2477 schedule();
2478 return true;
2479 } else {
2480 /*
2481 * While ptraced, group stop is handled by STOP trap.
2482 * Schedule it and let the caller deal with it.
2483 */
2484 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2485 return false;
2486 }
2487}
2488
2489/**
2490 * do_jobctl_trap - take care of ptrace jobctl traps
2491 *
2492 * When PT_SEIZED, it's used for both group stop and explicit
2493 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
2494 * accompanying siginfo. If stopped, lower eight bits of exit_code contain
2495 * the stop signal; otherwise, %SIGTRAP.
2496 *
2497 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2498 * number as exit_code and no siginfo.
2499 *
2500 * CONTEXT:
2501 * Must be called with @current->sighand->siglock held, which may be
2502 * released and re-acquired before returning with intervening sleep.
2503 */
2504static void do_jobctl_trap(void)
2505{
2506 struct signal_struct *signal = current->signal;
2507 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2508
2509 if (current->ptrace & PT_SEIZED) {
2510 if (!signal->group_stop_count &&
2511 !(signal->flags & SIGNAL_STOP_STOPPED))
2512 signr = SIGTRAP;
2513 WARN_ON_ONCE(!signr);
2514 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2515 CLD_STOPPED, 0);
2516 } else {
2517 WARN_ON_ONCE(!signr);
2518 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2519 }
2520}
2521
2522/**
2523 * do_freezer_trap - handle the freezer jobctl trap
2524 *
2525 * Puts the task into frozen state, if only the task is not about to quit.
2526 * In this case it drops JOBCTL_TRAP_FREEZE.
2527 *
2528 * CONTEXT:
2529 * Must be called with @current->sighand->siglock held,
2530 * which is always released before returning.
2531 */
2532static void do_freezer_trap(void)
2533 __releases(¤t->sighand->siglock)
2534{
2535 /*
2536 * If there are other trap bits pending except JOBCTL_TRAP_FREEZE,
2537 * let's make another loop to give it a chance to be handled.
2538 * In any case, we'll return back.
2539 */
2540 if ((current->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) !=
2541 JOBCTL_TRAP_FREEZE) {
2542 spin_unlock_irq(¤t->sighand->siglock);
2543 return;
2544 }
2545
2546 /*
2547 * Now we're sure that there is no pending fatal signal and no
2548 * pending traps. Clear TIF_SIGPENDING to not get out of schedule()
2549 * immediately (if there is a non-fatal signal pending), and
2550 * put the task into sleep.
2551 */
2552 __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
2553 clear_thread_flag(TIF_SIGPENDING);
2554 spin_unlock_irq(¤t->sighand->siglock);
2555 cgroup_enter_frozen();
2556 schedule();
2557}
2558
2559static int ptrace_signal(int signr, kernel_siginfo_t *info, enum pid_type type)
2560{
2561 /*
2562 * We do not check sig_kernel_stop(signr) but set this marker
2563 * unconditionally because we do not know whether debugger will
2564 * change signr. This flag has no meaning unless we are going
2565 * to stop after return from ptrace_stop(). In this case it will
2566 * be checked in do_signal_stop(), we should only stop if it was
2567 * not cleared by SIGCONT while we were sleeping. See also the
2568 * comment in dequeue_signal().
2569 */
2570 current->jobctl |= JOBCTL_STOP_DEQUEUED;
2571 signr = ptrace_stop(signr, CLD_TRAPPED, 0, info);
2572
2573 /* We're back. Did the debugger cancel the sig? */
2574 if (signr == 0)
2575 return signr;
2576
2577 /*
2578 * Update the siginfo structure if the signal has
2579 * changed. If the debugger wanted something
2580 * specific in the siginfo structure then it should
2581 * have updated *info via PTRACE_SETSIGINFO.
2582 */
2583 if (signr != info->si_signo) {
2584 clear_siginfo(info);
2585 info->si_signo = signr;
2586 info->si_errno = 0;
2587 info->si_code = SI_USER;
2588 rcu_read_lock();
2589 info->si_pid = task_pid_vnr(current->parent);
2590 info->si_uid = from_kuid_munged(current_user_ns(),
2591 task_uid(current->parent));
2592 rcu_read_unlock();
2593 }
2594
2595 /* If the (new) signal is now blocked, requeue it. */
2596 if (sigismember(¤t->blocked, signr) ||
2597 fatal_signal_pending(current)) {
2598 send_signal_locked(signr, info, current, type);
2599 signr = 0;
2600 }
2601
2602 return signr;
2603}
2604
2605static void hide_si_addr_tag_bits(struct ksignal *ksig)
2606{
2607 switch (siginfo_layout(ksig->sig, ksig->info.si_code)) {
2608 case SIL_FAULT:
2609 case SIL_FAULT_TRAPNO:
2610 case SIL_FAULT_MCEERR:
2611 case SIL_FAULT_BNDERR:
2612 case SIL_FAULT_PKUERR:
2613 case SIL_FAULT_PERF_EVENT:
2614 ksig->info.si_addr = arch_untagged_si_addr(
2615 ksig->info.si_addr, ksig->sig, ksig->info.si_code);
2616 break;
2617 case SIL_KILL:
2618 case SIL_TIMER:
2619 case SIL_POLL:
2620 case SIL_CHLD:
2621 case SIL_RT:
2622 case SIL_SYS:
2623 break;
2624 }
2625}
2626
2627bool get_signal(struct ksignal *ksig)
2628{
2629 struct sighand_struct *sighand = current->sighand;
2630 struct signal_struct *signal = current->signal;
2631 int signr;
2632
2633 clear_notify_signal();
2634 if (unlikely(task_work_pending(current)))
2635 task_work_run();
2636
2637 if (!task_sigpending(current))
2638 return false;
2639
2640 if (unlikely(uprobe_deny_signal()))
2641 return false;
2642
2643 /*
2644 * Do this once, we can't return to user-mode if freezing() == T.
2645 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2646 * thus do not need another check after return.
2647 */
2648 try_to_freeze();
2649
2650relock:
2651 spin_lock_irq(&sighand->siglock);
2652
2653 /*
2654 * Every stopped thread goes here after wakeup. Check to see if
2655 * we should notify the parent, prepare_signal(SIGCONT) encodes
2656 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2657 */
2658 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2659 int why;
2660
2661 if (signal->flags & SIGNAL_CLD_CONTINUED)
2662 why = CLD_CONTINUED;
2663 else
2664 why = CLD_STOPPED;
2665
2666 signal->flags &= ~SIGNAL_CLD_MASK;
2667
2668 spin_unlock_irq(&sighand->siglock);
2669
2670 /*
2671 * Notify the parent that we're continuing. This event is
2672 * always per-process and doesn't make whole lot of sense
2673 * for ptracers, who shouldn't consume the state via
2674 * wait(2) either, but, for backward compatibility, notify
2675 * the ptracer of the group leader too unless it's gonna be
2676 * a duplicate.
2677 */
2678 read_lock(&tasklist_lock);
2679 do_notify_parent_cldstop(current, false, why);
2680
2681 if (ptrace_reparented(current->group_leader))
2682 do_notify_parent_cldstop(current->group_leader,
2683 true, why);
2684 read_unlock(&tasklist_lock);
2685
2686 goto relock;
2687 }
2688
2689 for (;;) {
2690 struct k_sigaction *ka;
2691 enum pid_type type;
2692
2693 /* Has this task already been marked for death? */
2694 if ((signal->flags & SIGNAL_GROUP_EXIT) ||
2695 signal->group_exec_task) {
2696 clear_siginfo(&ksig->info);
2697 ksig->info.si_signo = signr = SIGKILL;
2698 sigdelset(¤t->pending.signal, SIGKILL);
2699 trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
2700 &sighand->action[SIGKILL - 1]);
2701 recalc_sigpending();
2702 goto fatal;
2703 }
2704
2705 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2706 do_signal_stop(0))
2707 goto relock;
2708
2709 if (unlikely(current->jobctl &
2710 (JOBCTL_TRAP_MASK | JOBCTL_TRAP_FREEZE))) {
2711 if (current->jobctl & JOBCTL_TRAP_MASK) {
2712 do_jobctl_trap();
2713 spin_unlock_irq(&sighand->siglock);
2714 } else if (current->jobctl & JOBCTL_TRAP_FREEZE)
2715 do_freezer_trap();
2716
2717 goto relock;
2718 }
2719
2720 /*
2721 * If the task is leaving the frozen state, let's update
2722 * cgroup counters and reset the frozen bit.
2723 */
2724 if (unlikely(cgroup_task_frozen(current))) {
2725 spin_unlock_irq(&sighand->siglock);
2726 cgroup_leave_frozen(false);
2727 goto relock;
2728 }
2729
2730 /*
2731 * Signals generated by the execution of an instruction
2732 * need to be delivered before any other pending signals
2733 * so that the instruction pointer in the signal stack
2734 * frame points to the faulting instruction.
2735 */
2736 type = PIDTYPE_PID;
2737 signr = dequeue_synchronous_signal(&ksig->info);
2738 if (!signr)
2739 signr = dequeue_signal(current, ¤t->blocked,
2740 &ksig->info, &type);
2741
2742 if (!signr)
2743 break; /* will return 0 */
2744
2745 if (unlikely(current->ptrace) && (signr != SIGKILL) &&
2746 !(sighand->action[signr -1].sa.sa_flags & SA_IMMUTABLE)) {
2747 signr = ptrace_signal(signr, &ksig->info, type);
2748 if (!signr)
2749 continue;
2750 }
2751
2752 ka = &sighand->action[signr-1];
2753
2754 /* Trace actually delivered signals. */
2755 trace_signal_deliver(signr, &ksig->info, ka);
2756
2757 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
2758 continue;
2759 if (ka->sa.sa_handler != SIG_DFL) {
2760 /* Run the handler. */
2761 ksig->ka = *ka;
2762
2763 if (ka->sa.sa_flags & SA_ONESHOT)
2764 ka->sa.sa_handler = SIG_DFL;
2765
2766 break; /* will return non-zero "signr" value */
2767 }
2768
2769 /*
2770 * Now we are doing the default action for this signal.
2771 */
2772 if (sig_kernel_ignore(signr)) /* Default is nothing. */
2773 continue;
2774
2775 /*
2776 * Global init gets no signals it doesn't want.
2777 * Container-init gets no signals it doesn't want from same
2778 * container.
2779 *
2780 * Note that if global/container-init sees a sig_kernel_only()
2781 * signal here, the signal must have been generated internally
2782 * or must have come from an ancestor namespace. In either
2783 * case, the signal cannot be dropped.
2784 */
2785 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2786 !sig_kernel_only(signr))
2787 continue;
2788
2789 if (sig_kernel_stop(signr)) {
2790 /*
2791 * The default action is to stop all threads in
2792 * the thread group. The job control signals
2793 * do nothing in an orphaned pgrp, but SIGSTOP
2794 * always works. Note that siglock needs to be
2795 * dropped during the call to is_orphaned_pgrp()
2796 * because of lock ordering with tasklist_lock.
2797 * This allows an intervening SIGCONT to be posted.
2798 * We need to check for that and bail out if necessary.
2799 */
2800 if (signr != SIGSTOP) {
2801 spin_unlock_irq(&sighand->siglock);
2802
2803 /* signals can be posted during this window */
2804
2805 if (is_current_pgrp_orphaned())
2806 goto relock;
2807
2808 spin_lock_irq(&sighand->siglock);
2809 }
2810
2811 if (likely(do_signal_stop(ksig->info.si_signo))) {
2812 /* It released the siglock. */
2813 goto relock;
2814 }
2815
2816 /*
2817 * We didn't actually stop, due to a race
2818 * with SIGCONT or something like that.
2819 */
2820 continue;
2821 }
2822
2823 fatal:
2824 spin_unlock_irq(&sighand->siglock);
2825 if (unlikely(cgroup_task_frozen(current)))
2826 cgroup_leave_frozen(true);
2827
2828 /*
2829 * Anything else is fatal, maybe with a core dump.
2830 */
2831 current->flags |= PF_SIGNALED;
2832
2833 if (sig_kernel_coredump(signr)) {
2834 if (print_fatal_signals)
2835 print_fatal_signal(ksig->info.si_signo);
2836 proc_coredump_connector(current);
2837 /*
2838 * If it was able to dump core, this kills all
2839 * other threads in the group and synchronizes with
2840 * their demise. If we lost the race with another
2841 * thread getting here, it set group_exit_code
2842 * first and our do_group_exit call below will use
2843 * that value and ignore the one we pass it.
2844 */
2845 do_coredump(&ksig->info);
2846 }
2847
2848 /*
2849 * PF_IO_WORKER threads will catch and exit on fatal signals
2850 * themselves. They have cleanup that must be performed, so
2851 * we cannot call do_exit() on their behalf.
2852 */
2853 if (current->flags & PF_IO_WORKER)
2854 goto out;
2855
2856 /*
2857 * Death signals, no core dump.
2858 */
2859 do_group_exit(ksig->info.si_signo);
2860 /* NOTREACHED */
2861 }
2862 spin_unlock_irq(&sighand->siglock);
2863out:
2864 ksig->sig = signr;
2865
2866 if (!(ksig->ka.sa.sa_flags & SA_EXPOSE_TAGBITS))
2867 hide_si_addr_tag_bits(ksig);
2868
2869 return ksig->sig > 0;
2870}
2871
2872/**
2873 * signal_delivered - called after signal delivery to update blocked signals
2874 * @ksig: kernel signal struct
2875 * @stepping: nonzero if debugger single-step or block-step in use
2876 *
2877 * This function should be called when a signal has successfully been
2878 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
2879 * is always blocked), and the signal itself is blocked unless %SA_NODEFER
2880 * is set in @ksig->ka.sa.sa_flags. Tracing is notified.
2881 */
2882static void signal_delivered(struct ksignal *ksig, int stepping)
2883{
2884 sigset_t blocked;
2885
2886 /* A signal was successfully delivered, and the
2887 saved sigmask was stored on the signal frame,
2888 and will be restored by sigreturn. So we can
2889 simply clear the restore sigmask flag. */
2890 clear_restore_sigmask();
2891
2892 sigorsets(&blocked, ¤t->blocked, &ksig->ka.sa.sa_mask);
2893 if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2894 sigaddset(&blocked, ksig->sig);
2895 set_current_blocked(&blocked);
2896 if (current->sas_ss_flags & SS_AUTODISARM)
2897 sas_ss_reset(current);
2898 if (stepping)
2899 ptrace_notify(SIGTRAP, 0);
2900}
2901
2902void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2903{
2904 if (failed)
2905 force_sigsegv(ksig->sig);
2906 else
2907 signal_delivered(ksig, stepping);
2908}
2909
2910/*
2911 * It could be that complete_signal() picked us to notify about the
2912 * group-wide signal. Other threads should be notified now to take
2913 * the shared signals in @which since we will not.
2914 */
2915static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2916{
2917 sigset_t retarget;
2918 struct task_struct *t;
2919
2920 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2921 if (sigisemptyset(&retarget))
2922 return;
2923
2924 t = tsk;
2925 while_each_thread(tsk, t) {
2926 if (t->flags & PF_EXITING)
2927 continue;
2928
2929 if (!has_pending_signals(&retarget, &t->blocked))
2930 continue;
2931 /* Remove the signals this thread can handle. */
2932 sigandsets(&retarget, &retarget, &t->blocked);
2933
2934 if (!task_sigpending(t))
2935 signal_wake_up(t, 0);
2936
2937 if (sigisemptyset(&retarget))
2938 break;
2939 }
2940}
2941
2942void exit_signals(struct task_struct *tsk)
2943{
2944 int group_stop = 0;
2945 sigset_t unblocked;
2946
2947 /*
2948 * @tsk is about to have PF_EXITING set - lock out users which
2949 * expect stable threadgroup.
2950 */
2951 cgroup_threadgroup_change_begin(tsk);
2952
2953 if (thread_group_empty(tsk) || (tsk->signal->flags & SIGNAL_GROUP_EXIT)) {
2954 tsk->flags |= PF_EXITING;
2955 cgroup_threadgroup_change_end(tsk);
2956 return;
2957 }
2958
2959 spin_lock_irq(&tsk->sighand->siglock);
2960 /*
2961 * From now this task is not visible for group-wide signals,
2962 * see wants_signal(), do_signal_stop().
2963 */
2964 tsk->flags |= PF_EXITING;
2965
2966 cgroup_threadgroup_change_end(tsk);
2967
2968 if (!task_sigpending(tsk))
2969 goto out;
2970
2971 unblocked = tsk->blocked;
2972 signotset(&unblocked);
2973 retarget_shared_pending(tsk, &unblocked);
2974
2975 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2976 task_participate_group_stop(tsk))
2977 group_stop = CLD_STOPPED;
2978out:
2979 spin_unlock_irq(&tsk->sighand->siglock);
2980
2981 /*
2982 * If group stop has completed, deliver the notification. This
2983 * should always go to the real parent of the group leader.
2984 */
2985 if (unlikely(group_stop)) {
2986 read_lock(&tasklist_lock);
2987 do_notify_parent_cldstop(tsk, false, group_stop);
2988 read_unlock(&tasklist_lock);
2989 }
2990}
2991
2992/*
2993 * System call entry points.
2994 */
2995
2996/**
2997 * sys_restart_syscall - restart a system call
2998 */
2999SYSCALL_DEFINE0(restart_syscall)
3000{
3001 struct restart_block *restart = ¤t->restart_block;
3002 return restart->fn(restart);
3003}
3004
3005long do_no_restart_syscall(struct restart_block *param)
3006{
3007 return -EINTR;
3008}
3009
3010static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
3011{
3012 if (task_sigpending(tsk) && !thread_group_empty(tsk)) {
3013 sigset_t newblocked;
3014 /* A set of now blocked but previously unblocked signals. */
3015 sigandnsets(&newblocked, newset, ¤t->blocked);
3016 retarget_shared_pending(tsk, &newblocked);
3017 }
3018 tsk->blocked = *newset;
3019 recalc_sigpending();
3020}
3021
3022/**
3023 * set_current_blocked - change current->blocked mask
3024 * @newset: new mask
3025 *
3026 * It is wrong to change ->blocked directly, this helper should be used
3027 * to ensure the process can't miss a shared signal we are going to block.
3028 */
3029void set_current_blocked(sigset_t *newset)
3030{
3031 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
3032 __set_current_blocked(newset);
3033}
3034
3035void __set_current_blocked(const sigset_t *newset)
3036{
3037 struct task_struct *tsk = current;
3038
3039 /*
3040 * In case the signal mask hasn't changed, there is nothing we need
3041 * to do. The current->blocked shouldn't be modified by other task.
3042 */
3043 if (sigequalsets(&tsk->blocked, newset))
3044 return;
3045
3046 spin_lock_irq(&tsk->sighand->siglock);
3047 __set_task_blocked(tsk, newset);
3048 spin_unlock_irq(&tsk->sighand->siglock);
3049}
3050
3051/*
3052 * This is also useful for kernel threads that want to temporarily
3053 * (or permanently) block certain signals.
3054 *
3055 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
3056 * interface happily blocks "unblockable" signals like SIGKILL
3057 * and friends.
3058 */
3059int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
3060{
3061 struct task_struct *tsk = current;
3062 sigset_t newset;
3063
3064 /* Lockless, only current can change ->blocked, never from irq */
3065 if (oldset)
3066 *oldset = tsk->blocked;
3067
3068 switch (how) {
3069 case SIG_BLOCK:
3070 sigorsets(&newset, &tsk->blocked, set);
3071 break;
3072 case SIG_UNBLOCK:
3073 sigandnsets(&newset, &tsk->blocked, set);
3074 break;
3075 case SIG_SETMASK:
3076 newset = *set;
3077 break;
3078 default:
3079 return -EINVAL;
3080 }
3081
3082 __set_current_blocked(&newset);
3083 return 0;
3084}
3085EXPORT_SYMBOL(sigprocmask);
3086
3087/*
3088 * The api helps set app-provided sigmasks.
3089 *
3090 * This is useful for syscalls such as ppoll, pselect, io_pgetevents and
3091 * epoll_pwait where a new sigmask is passed from userland for the syscalls.
3092 *
3093 * Note that it does set_restore_sigmask() in advance, so it must be always
3094 * paired with restore_saved_sigmask_unless() before return from syscall.
3095 */
3096int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize)
3097{
3098 sigset_t kmask;
3099
3100 if (!umask)
3101 return 0;
3102 if (sigsetsize != sizeof(sigset_t))
3103 return -EINVAL;
3104 if (copy_from_user(&kmask, umask, sizeof(sigset_t)))
3105 return -EFAULT;
3106
3107 set_restore_sigmask();
3108 current->saved_sigmask = current->blocked;
3109 set_current_blocked(&kmask);
3110
3111 return 0;
3112}
3113
3114#ifdef CONFIG_COMPAT
3115int set_compat_user_sigmask(const compat_sigset_t __user *umask,
3116 size_t sigsetsize)
3117{
3118 sigset_t kmask;
3119
3120 if (!umask)
3121 return 0;
3122 if (sigsetsize != sizeof(compat_sigset_t))
3123 return -EINVAL;
3124 if (get_compat_sigset(&kmask, umask))
3125 return -EFAULT;
3126
3127 set_restore_sigmask();
3128 current->saved_sigmask = current->blocked;
3129 set_current_blocked(&kmask);
3130
3131 return 0;
3132}
3133#endif
3134
3135/**
3136 * sys_rt_sigprocmask - change the list of currently blocked signals
3137 * @how: whether to add, remove, or set signals
3138 * @nset: stores pending signals
3139 * @oset: previous value of signal mask if non-null
3140 * @sigsetsize: size of sigset_t type
3141 */
3142SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
3143 sigset_t __user *, oset, size_t, sigsetsize)
3144{
3145 sigset_t old_set, new_set;
3146 int error;
3147
3148 /* XXX: Don't preclude handling different sized sigset_t's. */
3149 if (sigsetsize != sizeof(sigset_t))
3150 return -EINVAL;
3151
3152 old_set = current->blocked;
3153
3154 if (nset) {
3155 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
3156 return -EFAULT;
3157 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3158
3159 error = sigprocmask(how, &new_set, NULL);
3160 if (error)
3161 return error;
3162 }
3163
3164 if (oset) {
3165 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
3166 return -EFAULT;
3167 }
3168
3169 return 0;
3170}
3171
3172#ifdef CONFIG_COMPAT
3173COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
3174 compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
3175{
3176 sigset_t old_set = current->blocked;
3177
3178 /* XXX: Don't preclude handling different sized sigset_t's. */
3179 if (sigsetsize != sizeof(sigset_t))
3180 return -EINVAL;
3181
3182 if (nset) {
3183 sigset_t new_set;
3184 int error;
3185 if (get_compat_sigset(&new_set, nset))
3186 return -EFAULT;
3187 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3188
3189 error = sigprocmask(how, &new_set, NULL);
3190 if (error)
3191 return error;
3192 }
3193 return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0;
3194}
3195#endif
3196
3197static void do_sigpending(sigset_t *set)
3198{
3199 spin_lock_irq(¤t->sighand->siglock);
3200 sigorsets(set, ¤t->pending.signal,
3201 ¤t->signal->shared_pending.signal);
3202 spin_unlock_irq(¤t->sighand->siglock);
3203
3204 /* Outside the lock because only this thread touches it. */
3205 sigandsets(set, ¤t->blocked, set);
3206}
3207
3208/**
3209 * sys_rt_sigpending - examine a pending signal that has been raised
3210 * while blocked
3211 * @uset: stores pending signals
3212 * @sigsetsize: size of sigset_t type or larger
3213 */
3214SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
3215{
3216 sigset_t set;
3217
3218 if (sigsetsize > sizeof(*uset))
3219 return -EINVAL;
3220
3221 do_sigpending(&set);
3222
3223 if (copy_to_user(uset, &set, sigsetsize))
3224 return -EFAULT;
3225
3226 return 0;
3227}
3228
3229#ifdef CONFIG_COMPAT
3230COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
3231 compat_size_t, sigsetsize)
3232{
3233 sigset_t set;
3234
3235 if (sigsetsize > sizeof(*uset))
3236 return -EINVAL;
3237
3238 do_sigpending(&set);
3239
3240 return put_compat_sigset(uset, &set, sigsetsize);
3241}
3242#endif
3243
3244static const struct {
3245 unsigned char limit, layout;
3246} sig_sicodes[] = {
3247 [SIGILL] = { NSIGILL, SIL_FAULT },
3248 [SIGFPE] = { NSIGFPE, SIL_FAULT },
3249 [SIGSEGV] = { NSIGSEGV, SIL_FAULT },
3250 [SIGBUS] = { NSIGBUS, SIL_FAULT },
3251 [SIGTRAP] = { NSIGTRAP, SIL_FAULT },
3252#if defined(SIGEMT)
3253 [SIGEMT] = { NSIGEMT, SIL_FAULT },
3254#endif
3255 [SIGCHLD] = { NSIGCHLD, SIL_CHLD },
3256 [SIGPOLL] = { NSIGPOLL, SIL_POLL },
3257 [SIGSYS] = { NSIGSYS, SIL_SYS },
3258};
3259
3260static bool known_siginfo_layout(unsigned sig, int si_code)
3261{
3262 if (si_code == SI_KERNEL)
3263 return true;
3264 else if ((si_code > SI_USER)) {
3265 if (sig_specific_sicodes(sig)) {
3266 if (si_code <= sig_sicodes[sig].limit)
3267 return true;
3268 }
3269 else if (si_code <= NSIGPOLL)
3270 return true;
3271 }
3272 else if (si_code >= SI_DETHREAD)
3273 return true;
3274 else if (si_code == SI_ASYNCNL)
3275 return true;
3276 return false;
3277}
3278
3279enum siginfo_layout siginfo_layout(unsigned sig, int si_code)
3280{
3281 enum siginfo_layout layout = SIL_KILL;
3282 if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
3283 if ((sig < ARRAY_SIZE(sig_sicodes)) &&
3284 (si_code <= sig_sicodes[sig].limit)) {
3285 layout = sig_sicodes[sig].layout;
3286 /* Handle the exceptions */
3287 if ((sig == SIGBUS) &&
3288 (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO))
3289 layout = SIL_FAULT_MCEERR;
3290 else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR))
3291 layout = SIL_FAULT_BNDERR;
3292#ifdef SEGV_PKUERR
3293 else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR))
3294 layout = SIL_FAULT_PKUERR;
3295#endif
3296 else if ((sig == SIGTRAP) && (si_code == TRAP_PERF))
3297 layout = SIL_FAULT_PERF_EVENT;
3298 else if (IS_ENABLED(CONFIG_SPARC) &&
3299 (sig == SIGILL) && (si_code == ILL_ILLTRP))
3300 layout = SIL_FAULT_TRAPNO;
3301 else if (IS_ENABLED(CONFIG_ALPHA) &&
3302 ((sig == SIGFPE) ||
3303 ((sig == SIGTRAP) && (si_code == TRAP_UNK))))
3304 layout = SIL_FAULT_TRAPNO;
3305 }
3306 else if (si_code <= NSIGPOLL)
3307 layout = SIL_POLL;
3308 } else {
3309 if (si_code == SI_TIMER)
3310 layout = SIL_TIMER;
3311 else if (si_code == SI_SIGIO)
3312 layout = SIL_POLL;
3313 else if (si_code < 0)
3314 layout = SIL_RT;
3315 }
3316 return layout;
3317}
3318
3319static inline char __user *si_expansion(const siginfo_t __user *info)
3320{
3321 return ((char __user *)info) + sizeof(struct kernel_siginfo);
3322}
3323
3324int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from)
3325{
3326 char __user *expansion = si_expansion(to);
3327 if (copy_to_user(to, from , sizeof(struct kernel_siginfo)))
3328 return -EFAULT;
3329 if (clear_user(expansion, SI_EXPANSION_SIZE))
3330 return -EFAULT;
3331 return 0;
3332}
3333
3334static int post_copy_siginfo_from_user(kernel_siginfo_t *info,
3335 const siginfo_t __user *from)
3336{
3337 if (unlikely(!known_siginfo_layout(info->si_signo, info->si_code))) {
3338 char __user *expansion = si_expansion(from);
3339 char buf[SI_EXPANSION_SIZE];
3340 int i;
3341 /*
3342 * An unknown si_code might need more than
3343 * sizeof(struct kernel_siginfo) bytes. Verify all of the
3344 * extra bytes are 0. This guarantees copy_siginfo_to_user
3345 * will return this data to userspace exactly.
3346 */
3347 if (copy_from_user(&buf, expansion, SI_EXPANSION_SIZE))
3348 return -EFAULT;
3349 for (i = 0; i < SI_EXPANSION_SIZE; i++) {
3350 if (buf[i] != 0)
3351 return -E2BIG;
3352 }
3353 }
3354 return 0;
3355}
3356
3357static int __copy_siginfo_from_user(int signo, kernel_siginfo_t *to,
3358 const siginfo_t __user *from)
3359{
3360 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3361 return -EFAULT;
3362 to->si_signo = signo;
3363 return post_copy_siginfo_from_user(to, from);
3364}
3365
3366int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from)
3367{
3368 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3369 return -EFAULT;
3370 return post_copy_siginfo_from_user(to, from);
3371}
3372
3373#ifdef CONFIG_COMPAT
3374/**
3375 * copy_siginfo_to_external32 - copy a kernel siginfo into a compat user siginfo
3376 * @to: compat siginfo destination
3377 * @from: kernel siginfo source
3378 *
3379 * Note: This function does not work properly for the SIGCHLD on x32, but
3380 * fortunately it doesn't have to. The only valid callers for this function are
3381 * copy_siginfo_to_user32, which is overriden for x32 and the coredump code.
3382 * The latter does not care because SIGCHLD will never cause a coredump.
3383 */
3384void copy_siginfo_to_external32(struct compat_siginfo *to,
3385 const struct kernel_siginfo *from)
3386{
3387 memset(to, 0, sizeof(*to));
3388
3389 to->si_signo = from->si_signo;
3390 to->si_errno = from->si_errno;
3391 to->si_code = from->si_code;
3392 switch(siginfo_layout(from->si_signo, from->si_code)) {
3393 case SIL_KILL:
3394 to->si_pid = from->si_pid;
3395 to->si_uid = from->si_uid;
3396 break;
3397 case SIL_TIMER:
3398 to->si_tid = from->si_tid;
3399 to->si_overrun = from->si_overrun;
3400 to->si_int = from->si_int;
3401 break;
3402 case SIL_POLL:
3403 to->si_band = from->si_band;
3404 to->si_fd = from->si_fd;
3405 break;
3406 case SIL_FAULT:
3407 to->si_addr = ptr_to_compat(from->si_addr);
3408 break;
3409 case SIL_FAULT_TRAPNO:
3410 to->si_addr = ptr_to_compat(from->si_addr);
3411 to->si_trapno = from->si_trapno;
3412 break;
3413 case SIL_FAULT_MCEERR:
3414 to->si_addr = ptr_to_compat(from->si_addr);
3415 to->si_addr_lsb = from->si_addr_lsb;
3416 break;
3417 case SIL_FAULT_BNDERR:
3418 to->si_addr = ptr_to_compat(from->si_addr);
3419 to->si_lower = ptr_to_compat(from->si_lower);
3420 to->si_upper = ptr_to_compat(from->si_upper);
3421 break;
3422 case SIL_FAULT_PKUERR:
3423 to->si_addr = ptr_to_compat(from->si_addr);
3424 to->si_pkey = from->si_pkey;
3425 break;
3426 case SIL_FAULT_PERF_EVENT:
3427 to->si_addr = ptr_to_compat(from->si_addr);
3428 to->si_perf_data = from->si_perf_data;
3429 to->si_perf_type = from->si_perf_type;
3430 to->si_perf_flags = from->si_perf_flags;
3431 break;
3432 case SIL_CHLD:
3433 to->si_pid = from->si_pid;
3434 to->si_uid = from->si_uid;
3435 to->si_status = from->si_status;
3436 to->si_utime = from->si_utime;
3437 to->si_stime = from->si_stime;
3438 break;
3439 case SIL_RT:
3440 to->si_pid = from->si_pid;
3441 to->si_uid = from->si_uid;
3442 to->si_int = from->si_int;
3443 break;
3444 case SIL_SYS:
3445 to->si_call_addr = ptr_to_compat(from->si_call_addr);
3446 to->si_syscall = from->si_syscall;
3447 to->si_arch = from->si_arch;
3448 break;
3449 }
3450}
3451
3452int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
3453 const struct kernel_siginfo *from)
3454{
3455 struct compat_siginfo new;
3456
3457 copy_siginfo_to_external32(&new, from);
3458 if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
3459 return -EFAULT;
3460 return 0;
3461}
3462
3463static int post_copy_siginfo_from_user32(kernel_siginfo_t *to,
3464 const struct compat_siginfo *from)
3465{
3466 clear_siginfo(to);
3467 to->si_signo = from->si_signo;
3468 to->si_errno = from->si_errno;
3469 to->si_code = from->si_code;
3470 switch(siginfo_layout(from->si_signo, from->si_code)) {
3471 case SIL_KILL:
3472 to->si_pid = from->si_pid;
3473 to->si_uid = from->si_uid;
3474 break;
3475 case SIL_TIMER:
3476 to->si_tid = from->si_tid;
3477 to->si_overrun = from->si_overrun;
3478 to->si_int = from->si_int;
3479 break;
3480 case SIL_POLL:
3481 to->si_band = from->si_band;
3482 to->si_fd = from->si_fd;
3483 break;
3484 case SIL_FAULT:
3485 to->si_addr = compat_ptr(from->si_addr);
3486 break;
3487 case SIL_FAULT_TRAPNO:
3488 to->si_addr = compat_ptr(from->si_addr);
3489 to->si_trapno = from->si_trapno;
3490 break;
3491 case SIL_FAULT_MCEERR:
3492 to->si_addr = compat_ptr(from->si_addr);
3493 to->si_addr_lsb = from->si_addr_lsb;
3494 break;
3495 case SIL_FAULT_BNDERR:
3496 to->si_addr = compat_ptr(from->si_addr);
3497 to->si_lower = compat_ptr(from->si_lower);
3498 to->si_upper = compat_ptr(from->si_upper);
3499 break;
3500 case SIL_FAULT_PKUERR:
3501 to->si_addr = compat_ptr(from->si_addr);
3502 to->si_pkey = from->si_pkey;
3503 break;
3504 case SIL_FAULT_PERF_EVENT:
3505 to->si_addr = compat_ptr(from->si_addr);
3506 to->si_perf_data = from->si_perf_data;
3507 to->si_perf_type = from->si_perf_type;
3508 to->si_perf_flags = from->si_perf_flags;
3509 break;
3510 case SIL_CHLD:
3511 to->si_pid = from->si_pid;
3512 to->si_uid = from->si_uid;
3513 to->si_status = from->si_status;
3514#ifdef CONFIG_X86_X32_ABI
3515 if (in_x32_syscall()) {
3516 to->si_utime = from->_sifields._sigchld_x32._utime;
3517 to->si_stime = from->_sifields._sigchld_x32._stime;
3518 } else
3519#endif
3520 {
3521 to->si_utime = from->si_utime;
3522 to->si_stime = from->si_stime;
3523 }
3524 break;
3525 case SIL_RT:
3526 to->si_pid = from->si_pid;
3527 to->si_uid = from->si_uid;
3528 to->si_int = from->si_int;
3529 break;
3530 case SIL_SYS:
3531 to->si_call_addr = compat_ptr(from->si_call_addr);
3532 to->si_syscall = from->si_syscall;
3533 to->si_arch = from->si_arch;
3534 break;
3535 }
3536 return 0;
3537}
3538
3539static int __copy_siginfo_from_user32(int signo, struct kernel_siginfo *to,
3540 const struct compat_siginfo __user *ufrom)
3541{
3542 struct compat_siginfo from;
3543
3544 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3545 return -EFAULT;
3546
3547 from.si_signo = signo;
3548 return post_copy_siginfo_from_user32(to, &from);
3549}
3550
3551int copy_siginfo_from_user32(struct kernel_siginfo *to,
3552 const struct compat_siginfo __user *ufrom)
3553{
3554 struct compat_siginfo from;
3555
3556 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3557 return -EFAULT;
3558
3559 return post_copy_siginfo_from_user32(to, &from);
3560}
3561#endif /* CONFIG_COMPAT */
3562
3563/**
3564 * do_sigtimedwait - wait for queued signals specified in @which
3565 * @which: queued signals to wait for
3566 * @info: if non-null, the signal's siginfo is returned here
3567 * @ts: upper bound on process time suspension
3568 */
3569static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
3570 const struct timespec64 *ts)
3571{
3572 ktime_t *to = NULL, timeout = KTIME_MAX;
3573 struct task_struct *tsk = current;
3574 sigset_t mask = *which;
3575 enum pid_type type;
3576 int sig, ret = 0;
3577
3578 if (ts) {
3579 if (!timespec64_valid(ts))
3580 return -EINVAL;
3581 timeout = timespec64_to_ktime(*ts);
3582 to = &timeout;
3583 }
3584
3585 /*
3586 * Invert the set of allowed signals to get those we want to block.
3587 */
3588 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
3589 signotset(&mask);
3590
3591 spin_lock_irq(&tsk->sighand->siglock);
3592 sig = dequeue_signal(tsk, &mask, info, &type);
3593 if (!sig && timeout) {
3594 /*
3595 * None ready, temporarily unblock those we're interested
3596 * while we are sleeping in so that we'll be awakened when
3597 * they arrive. Unblocking is always fine, we can avoid
3598 * set_current_blocked().
3599 */
3600 tsk->real_blocked = tsk->blocked;
3601 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
3602 recalc_sigpending();
3603 spin_unlock_irq(&tsk->sighand->siglock);
3604
3605 __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
3606 ret = schedule_hrtimeout_range(to, tsk->timer_slack_ns,
3607 HRTIMER_MODE_REL);
3608 spin_lock_irq(&tsk->sighand->siglock);
3609 __set_task_blocked(tsk, &tsk->real_blocked);
3610 sigemptyset(&tsk->real_blocked);
3611 sig = dequeue_signal(tsk, &mask, info, &type);
3612 }
3613 spin_unlock_irq(&tsk->sighand->siglock);
3614
3615 if (sig)
3616 return sig;
3617 return ret ? -EINTR : -EAGAIN;
3618}
3619
3620/**
3621 * sys_rt_sigtimedwait - synchronously wait for queued signals specified
3622 * in @uthese
3623 * @uthese: queued signals to wait for
3624 * @uinfo: if non-null, the signal's siginfo is returned here
3625 * @uts: upper bound on process time suspension
3626 * @sigsetsize: size of sigset_t type
3627 */
3628SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
3629 siginfo_t __user *, uinfo,
3630 const struct __kernel_timespec __user *, uts,
3631 size_t, sigsetsize)
3632{
3633 sigset_t these;
3634 struct timespec64 ts;
3635 kernel_siginfo_t info;
3636 int ret;
3637
3638 /* XXX: Don't preclude handling different sized sigset_t's. */
3639 if (sigsetsize != sizeof(sigset_t))
3640 return -EINVAL;
3641
3642 if (copy_from_user(&these, uthese, sizeof(these)))
3643 return -EFAULT;
3644
3645 if (uts) {
3646 if (get_timespec64(&ts, uts))
3647 return -EFAULT;
3648 }
3649
3650 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3651
3652 if (ret > 0 && uinfo) {
3653 if (copy_siginfo_to_user(uinfo, &info))
3654 ret = -EFAULT;
3655 }
3656
3657 return ret;
3658}
3659
3660#ifdef CONFIG_COMPAT_32BIT_TIME
3661SYSCALL_DEFINE4(rt_sigtimedwait_time32, const sigset_t __user *, uthese,
3662 siginfo_t __user *, uinfo,
3663 const struct old_timespec32 __user *, uts,
3664 size_t, sigsetsize)
3665{
3666 sigset_t these;
3667 struct timespec64 ts;
3668 kernel_siginfo_t info;
3669 int ret;
3670
3671 if (sigsetsize != sizeof(sigset_t))
3672 return -EINVAL;
3673
3674 if (copy_from_user(&these, uthese, sizeof(these)))
3675 return -EFAULT;
3676
3677 if (uts) {
3678 if (get_old_timespec32(&ts, uts))
3679 return -EFAULT;
3680 }
3681
3682 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3683
3684 if (ret > 0 && uinfo) {
3685 if (copy_siginfo_to_user(uinfo, &info))
3686 ret = -EFAULT;
3687 }
3688
3689 return ret;
3690}
3691#endif
3692
3693#ifdef CONFIG_COMPAT
3694COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64, compat_sigset_t __user *, uthese,
3695 struct compat_siginfo __user *, uinfo,
3696 struct __kernel_timespec __user *, uts, compat_size_t, sigsetsize)
3697{
3698 sigset_t s;
3699 struct timespec64 t;
3700 kernel_siginfo_t info;
3701 long ret;
3702
3703 if (sigsetsize != sizeof(sigset_t))
3704 return -EINVAL;
3705
3706 if (get_compat_sigset(&s, uthese))
3707 return -EFAULT;
3708
3709 if (uts) {
3710 if (get_timespec64(&t, uts))
3711 return -EFAULT;
3712 }
3713
3714 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3715
3716 if (ret > 0 && uinfo) {
3717 if (copy_siginfo_to_user32(uinfo, &info))
3718 ret = -EFAULT;
3719 }
3720
3721 return ret;
3722}
3723
3724#ifdef CONFIG_COMPAT_32BIT_TIME
3725COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32, compat_sigset_t __user *, uthese,
3726 struct compat_siginfo __user *, uinfo,
3727 struct old_timespec32 __user *, uts, compat_size_t, sigsetsize)
3728{
3729 sigset_t s;
3730 struct timespec64 t;
3731 kernel_siginfo_t info;
3732 long ret;
3733
3734 if (sigsetsize != sizeof(sigset_t))
3735 return -EINVAL;
3736
3737 if (get_compat_sigset(&s, uthese))
3738 return -EFAULT;
3739
3740 if (uts) {
3741 if (get_old_timespec32(&t, uts))
3742 return -EFAULT;
3743 }
3744
3745 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3746
3747 if (ret > 0 && uinfo) {
3748 if (copy_siginfo_to_user32(uinfo, &info))
3749 ret = -EFAULT;
3750 }
3751
3752 return ret;
3753}
3754#endif
3755#endif
3756
3757static inline void prepare_kill_siginfo(int sig, struct kernel_siginfo *info)
3758{
3759 clear_siginfo(info);
3760 info->si_signo = sig;
3761 info->si_errno = 0;
3762 info->si_code = SI_USER;
3763 info->si_pid = task_tgid_vnr(current);
3764 info->si_uid = from_kuid_munged(current_user_ns(), current_uid());
3765}
3766
3767/**
3768 * sys_kill - send a signal to a process
3769 * @pid: the PID of the process
3770 * @sig: signal to be sent
3771 */
3772SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
3773{
3774 struct kernel_siginfo info;
3775
3776 prepare_kill_siginfo(sig, &info);
3777
3778 return kill_something_info(sig, &info, pid);
3779}
3780
3781/*
3782 * Verify that the signaler and signalee either are in the same pid namespace
3783 * or that the signaler's pid namespace is an ancestor of the signalee's pid
3784 * namespace.
3785 */
3786static bool access_pidfd_pidns(struct pid *pid)
3787{
3788 struct pid_namespace *active = task_active_pid_ns(current);
3789 struct pid_namespace *p = ns_of_pid(pid);
3790
3791 for (;;) {
3792 if (!p)
3793 return false;
3794 if (p == active)
3795 break;
3796 p = p->parent;
3797 }
3798
3799 return true;
3800}
3801
3802static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo,
3803 siginfo_t __user *info)
3804{
3805#ifdef CONFIG_COMPAT
3806 /*
3807 * Avoid hooking up compat syscalls and instead handle necessary
3808 * conversions here. Note, this is a stop-gap measure and should not be
3809 * considered a generic solution.
3810 */
3811 if (in_compat_syscall())
3812 return copy_siginfo_from_user32(
3813 kinfo, (struct compat_siginfo __user *)info);
3814#endif
3815 return copy_siginfo_from_user(kinfo, info);
3816}
3817
3818static struct pid *pidfd_to_pid(const struct file *file)
3819{
3820 struct pid *pid;
3821
3822 pid = pidfd_pid(file);
3823 if (!IS_ERR(pid))
3824 return pid;
3825
3826 return tgid_pidfd_to_pid(file);
3827}
3828
3829/**
3830 * sys_pidfd_send_signal - Signal a process through a pidfd
3831 * @pidfd: file descriptor of the process
3832 * @sig: signal to send
3833 * @info: signal info
3834 * @flags: future flags
3835 *
3836 * The syscall currently only signals via PIDTYPE_PID which covers
3837 * kill(<positive-pid>, <signal>. It does not signal threads or process
3838 * groups.
3839 * In order to extend the syscall to threads and process groups the @flags
3840 * argument should be used. In essence, the @flags argument will determine
3841 * what is signaled and not the file descriptor itself. Put in other words,
3842 * grouping is a property of the flags argument not a property of the file
3843 * descriptor.
3844 *
3845 * Return: 0 on success, negative errno on failure
3846 */
3847SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
3848 siginfo_t __user *, info, unsigned int, flags)
3849{
3850 int ret;
3851 struct fd f;
3852 struct pid *pid;
3853 kernel_siginfo_t kinfo;
3854
3855 /* Enforce flags be set to 0 until we add an extension. */
3856 if (flags)
3857 return -EINVAL;
3858
3859 f = fdget(pidfd);
3860 if (!f.file)
3861 return -EBADF;
3862
3863 /* Is this a pidfd? */
3864 pid = pidfd_to_pid(f.file);
3865 if (IS_ERR(pid)) {
3866 ret = PTR_ERR(pid);
3867 goto err;
3868 }
3869
3870 ret = -EINVAL;
3871 if (!access_pidfd_pidns(pid))
3872 goto err;
3873
3874 if (info) {
3875 ret = copy_siginfo_from_user_any(&kinfo, info);
3876 if (unlikely(ret))
3877 goto err;
3878
3879 ret = -EINVAL;
3880 if (unlikely(sig != kinfo.si_signo))
3881 goto err;
3882
3883 /* Only allow sending arbitrary signals to yourself. */
3884 ret = -EPERM;
3885 if ((task_pid(current) != pid) &&
3886 (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL))
3887 goto err;
3888 } else {
3889 prepare_kill_siginfo(sig, &kinfo);
3890 }
3891
3892 ret = kill_pid_info(sig, &kinfo, pid);
3893
3894err:
3895 fdput(f);
3896 return ret;
3897}
3898
3899static int
3900do_send_specific(pid_t tgid, pid_t pid, int sig, struct kernel_siginfo *info)
3901{
3902 struct task_struct *p;
3903 int error = -ESRCH;
3904
3905 rcu_read_lock();
3906 p = find_task_by_vpid(pid);
3907 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
3908 error = check_kill_permission(sig, info, p);
3909 /*
3910 * The null signal is a permissions and process existence
3911 * probe. No signal is actually delivered.
3912 */
3913 if (!error && sig) {
3914 error = do_send_sig_info(sig, info, p, PIDTYPE_PID);
3915 /*
3916 * If lock_task_sighand() failed we pretend the task
3917 * dies after receiving the signal. The window is tiny,
3918 * and the signal is private anyway.
3919 */
3920 if (unlikely(error == -ESRCH))
3921 error = 0;
3922 }
3923 }
3924 rcu_read_unlock();
3925
3926 return error;
3927}
3928
3929static int do_tkill(pid_t tgid, pid_t pid, int sig)
3930{
3931 struct kernel_siginfo info;
3932
3933 clear_siginfo(&info);
3934 info.si_signo = sig;
3935 info.si_errno = 0;
3936 info.si_code = SI_TKILL;
3937 info.si_pid = task_tgid_vnr(current);
3938 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
3939
3940 return do_send_specific(tgid, pid, sig, &info);
3941}
3942
3943/**
3944 * sys_tgkill - send signal to one specific thread
3945 * @tgid: the thread group ID of the thread
3946 * @pid: the PID of the thread
3947 * @sig: signal to be sent
3948 *
3949 * This syscall also checks the @tgid and returns -ESRCH even if the PID
3950 * exists but it's not belonging to the target process anymore. This
3951 * method solves the problem of threads exiting and PIDs getting reused.
3952 */
3953SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
3954{
3955 /* This is only valid for single tasks */
3956 if (pid <= 0 || tgid <= 0)
3957 return -EINVAL;
3958
3959 return do_tkill(tgid, pid, sig);
3960}
3961
3962/**
3963 * sys_tkill - send signal to one specific task
3964 * @pid: the PID of the task
3965 * @sig: signal to be sent
3966 *
3967 * Send a signal to only one task, even if it's a CLONE_THREAD task.
3968 */
3969SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
3970{
3971 /* This is only valid for single tasks */
3972 if (pid <= 0)
3973 return -EINVAL;
3974
3975 return do_tkill(0, pid, sig);
3976}
3977
3978static int do_rt_sigqueueinfo(pid_t pid, int sig, kernel_siginfo_t *info)
3979{
3980 /* Not even root can pretend to send signals from the kernel.
3981 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3982 */
3983 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3984 (task_pid_vnr(current) != pid))
3985 return -EPERM;
3986
3987 /* POSIX.1b doesn't mention process groups. */
3988 return kill_proc_info(sig, info, pid);
3989}
3990
3991/**
3992 * sys_rt_sigqueueinfo - send signal information to a signal
3993 * @pid: the PID of the thread
3994 * @sig: signal to be sent
3995 * @uinfo: signal info to be sent
3996 */
3997SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
3998 siginfo_t __user *, uinfo)
3999{
4000 kernel_siginfo_t info;
4001 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
4002 if (unlikely(ret))
4003 return ret;
4004 return do_rt_sigqueueinfo(pid, sig, &info);
4005}
4006
4007#ifdef CONFIG_COMPAT
4008COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
4009 compat_pid_t, pid,
4010 int, sig,
4011 struct compat_siginfo __user *, uinfo)
4012{
4013 kernel_siginfo_t info;
4014 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
4015 if (unlikely(ret))
4016 return ret;
4017 return do_rt_sigqueueinfo(pid, sig, &info);
4018}
4019#endif
4020
4021static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, kernel_siginfo_t *info)
4022{
4023 /* This is only valid for single tasks */
4024 if (pid <= 0 || tgid <= 0)
4025 return -EINVAL;
4026
4027 /* Not even root can pretend to send signals from the kernel.
4028 * Nor can they impersonate a kill()/tgkill(), which adds source info.
4029 */
4030 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
4031 (task_pid_vnr(current) != pid))
4032 return -EPERM;
4033
4034 return do_send_specific(tgid, pid, sig, info);
4035}
4036
4037SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
4038 siginfo_t __user *, uinfo)
4039{
4040 kernel_siginfo_t info;
4041 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
4042 if (unlikely(ret))
4043 return ret;
4044 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
4045}
4046
4047#ifdef CONFIG_COMPAT
4048COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
4049 compat_pid_t, tgid,
4050 compat_pid_t, pid,
4051 int, sig,
4052 struct compat_siginfo __user *, uinfo)
4053{
4054 kernel_siginfo_t info;
4055 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
4056 if (unlikely(ret))
4057 return ret;
4058 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
4059}
4060#endif
4061
4062/*
4063 * For kthreads only, must not be used if cloned with CLONE_SIGHAND
4064 */
4065void kernel_sigaction(int sig, __sighandler_t action)
4066{
4067 spin_lock_irq(¤t->sighand->siglock);
4068 current->sighand->action[sig - 1].sa.sa_handler = action;
4069 if (action == SIG_IGN) {
4070 sigset_t mask;
4071
4072 sigemptyset(&mask);
4073 sigaddset(&mask, sig);
4074
4075 flush_sigqueue_mask(&mask, ¤t->signal->shared_pending);
4076 flush_sigqueue_mask(&mask, ¤t->pending);
4077 recalc_sigpending();
4078 }
4079 spin_unlock_irq(¤t->sighand->siglock);
4080}
4081EXPORT_SYMBOL(kernel_sigaction);
4082
4083void __weak sigaction_compat_abi(struct k_sigaction *act,
4084 struct k_sigaction *oact)
4085{
4086}
4087
4088int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
4089{
4090 struct task_struct *p = current, *t;
4091 struct k_sigaction *k;
4092 sigset_t mask;
4093
4094 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
4095 return -EINVAL;
4096
4097 k = &p->sighand->action[sig-1];
4098
4099 spin_lock_irq(&p->sighand->siglock);
4100 if (k->sa.sa_flags & SA_IMMUTABLE) {
4101 spin_unlock_irq(&p->sighand->siglock);
4102 return -EINVAL;
4103 }
4104 if (oact)
4105 *oact = *k;
4106
4107 /*
4108 * Make sure that we never accidentally claim to support SA_UNSUPPORTED,
4109 * e.g. by having an architecture use the bit in their uapi.
4110 */
4111 BUILD_BUG_ON(UAPI_SA_FLAGS & SA_UNSUPPORTED);
4112
4113 /*
4114 * Clear unknown flag bits in order to allow userspace to detect missing
4115 * support for flag bits and to allow the kernel to use non-uapi bits
4116 * internally.
4117 */
4118 if (act)
4119 act->sa.sa_flags &= UAPI_SA_FLAGS;
4120 if (oact)
4121 oact->sa.sa_flags &= UAPI_SA_FLAGS;
4122
4123 sigaction_compat_abi(act, oact);
4124
4125 if (act) {
4126 sigdelsetmask(&act->sa.sa_mask,
4127 sigmask(SIGKILL) | sigmask(SIGSTOP));
4128 *k = *act;
4129 /*
4130 * POSIX 3.3.1.3:
4131 * "Setting a signal action to SIG_IGN for a signal that is
4132 * pending shall cause the pending signal to be discarded,
4133 * whether or not it is blocked."
4134 *
4135 * "Setting a signal action to SIG_DFL for a signal that is
4136 * pending and whose default action is to ignore the signal
4137 * (for example, SIGCHLD), shall cause the pending signal to
4138 * be discarded, whether or not it is blocked"
4139 */
4140 if (sig_handler_ignored(sig_handler(p, sig), sig)) {
4141 sigemptyset(&mask);
4142 sigaddset(&mask, sig);
4143 flush_sigqueue_mask(&mask, &p->signal->shared_pending);
4144 for_each_thread(p, t)
4145 flush_sigqueue_mask(&mask, &t->pending);
4146 }
4147 }
4148
4149 spin_unlock_irq(&p->sighand->siglock);
4150 return 0;
4151}
4152
4153#ifdef CONFIG_DYNAMIC_SIGFRAME
4154static inline void sigaltstack_lock(void)
4155 __acquires(¤t->sighand->siglock)
4156{
4157 spin_lock_irq(¤t->sighand->siglock);
4158}
4159
4160static inline void sigaltstack_unlock(void)
4161 __releases(¤t->sighand->siglock)
4162{
4163 spin_unlock_irq(¤t->sighand->siglock);
4164}
4165#else
4166static inline void sigaltstack_lock(void) { }
4167static inline void sigaltstack_unlock(void) { }
4168#endif
4169
4170static int
4171do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp,
4172 size_t min_ss_size)
4173{
4174 struct task_struct *t = current;
4175 int ret = 0;
4176
4177 if (oss) {
4178 memset(oss, 0, sizeof(stack_t));
4179 oss->ss_sp = (void __user *) t->sas_ss_sp;
4180 oss->ss_size = t->sas_ss_size;
4181 oss->ss_flags = sas_ss_flags(sp) |
4182 (current->sas_ss_flags & SS_FLAG_BITS);
4183 }
4184
4185 if (ss) {
4186 void __user *ss_sp = ss->ss_sp;
4187 size_t ss_size = ss->ss_size;
4188 unsigned ss_flags = ss->ss_flags;
4189 int ss_mode;
4190
4191 if (unlikely(on_sig_stack(sp)))
4192 return -EPERM;
4193
4194 ss_mode = ss_flags & ~SS_FLAG_BITS;
4195 if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
4196 ss_mode != 0))
4197 return -EINVAL;
4198
4199 /*
4200 * Return before taking any locks if no actual
4201 * sigaltstack changes were requested.
4202 */
4203 if (t->sas_ss_sp == (unsigned long)ss_sp &&
4204 t->sas_ss_size == ss_size &&
4205 t->sas_ss_flags == ss_flags)
4206 return 0;
4207
4208 sigaltstack_lock();
4209 if (ss_mode == SS_DISABLE) {
4210 ss_size = 0;
4211 ss_sp = NULL;
4212 } else {
4213 if (unlikely(ss_size < min_ss_size))
4214 ret = -ENOMEM;
4215 if (!sigaltstack_size_valid(ss_size))
4216 ret = -ENOMEM;
4217 }
4218 if (!ret) {
4219 t->sas_ss_sp = (unsigned long) ss_sp;
4220 t->sas_ss_size = ss_size;
4221 t->sas_ss_flags = ss_flags;
4222 }
4223 sigaltstack_unlock();
4224 }
4225 return ret;
4226}
4227
4228SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
4229{
4230 stack_t new, old;
4231 int err;
4232 if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
4233 return -EFAULT;
4234 err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
4235 current_user_stack_pointer(),
4236 MINSIGSTKSZ);
4237 if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
4238 err = -EFAULT;
4239 return err;
4240}
4241
4242int restore_altstack(const stack_t __user *uss)
4243{
4244 stack_t new;
4245 if (copy_from_user(&new, uss, sizeof(stack_t)))
4246 return -EFAULT;
4247 (void)do_sigaltstack(&new, NULL, current_user_stack_pointer(),
4248 MINSIGSTKSZ);
4249 /* squash all but EFAULT for now */
4250 return 0;
4251}
4252
4253int __save_altstack(stack_t __user *uss, unsigned long sp)
4254{
4255 struct task_struct *t = current;
4256 int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
4257 __put_user(t->sas_ss_flags, &uss->ss_flags) |
4258 __put_user(t->sas_ss_size, &uss->ss_size);
4259 return err;
4260}
4261
4262#ifdef CONFIG_COMPAT
4263static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr,
4264 compat_stack_t __user *uoss_ptr)
4265{
4266 stack_t uss, uoss;
4267 int ret;
4268
4269 if (uss_ptr) {
4270 compat_stack_t uss32;
4271 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
4272 return -EFAULT;
4273 uss.ss_sp = compat_ptr(uss32.ss_sp);
4274 uss.ss_flags = uss32.ss_flags;
4275 uss.ss_size = uss32.ss_size;
4276 }
4277 ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
4278 compat_user_stack_pointer(),
4279 COMPAT_MINSIGSTKSZ);
4280 if (ret >= 0 && uoss_ptr) {
4281 compat_stack_t old;
4282 memset(&old, 0, sizeof(old));
4283 old.ss_sp = ptr_to_compat(uoss.ss_sp);
4284 old.ss_flags = uoss.ss_flags;
4285 old.ss_size = uoss.ss_size;
4286 if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t)))
4287 ret = -EFAULT;
4288 }
4289 return ret;
4290}
4291
4292COMPAT_SYSCALL_DEFINE2(sigaltstack,
4293 const compat_stack_t __user *, uss_ptr,
4294 compat_stack_t __user *, uoss_ptr)
4295{
4296 return do_compat_sigaltstack(uss_ptr, uoss_ptr);
4297}
4298
4299int compat_restore_altstack(const compat_stack_t __user *uss)
4300{
4301 int err = do_compat_sigaltstack(uss, NULL);
4302 /* squash all but -EFAULT for now */
4303 return err == -EFAULT ? err : 0;
4304}
4305
4306int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
4307{
4308 int err;
4309 struct task_struct *t = current;
4310 err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
4311 &uss->ss_sp) |
4312 __put_user(t->sas_ss_flags, &uss->ss_flags) |
4313 __put_user(t->sas_ss_size, &uss->ss_size);
4314 return err;
4315}
4316#endif
4317
4318#ifdef __ARCH_WANT_SYS_SIGPENDING
4319
4320/**
4321 * sys_sigpending - examine pending signals
4322 * @uset: where mask of pending signal is returned
4323 */
4324SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset)
4325{
4326 sigset_t set;
4327
4328 if (sizeof(old_sigset_t) > sizeof(*uset))
4329 return -EINVAL;
4330
4331 do_sigpending(&set);
4332
4333 if (copy_to_user(uset, &set, sizeof(old_sigset_t)))
4334 return -EFAULT;
4335
4336 return 0;
4337}
4338
4339#ifdef CONFIG_COMPAT
4340COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
4341{
4342 sigset_t set;
4343
4344 do_sigpending(&set);
4345
4346 return put_user(set.sig[0], set32);
4347}
4348#endif
4349
4350#endif
4351
4352#ifdef __ARCH_WANT_SYS_SIGPROCMASK
4353/**
4354 * sys_sigprocmask - examine and change blocked signals
4355 * @how: whether to add, remove, or set signals
4356 * @nset: signals to add or remove (if non-null)
4357 * @oset: previous value of signal mask if non-null
4358 *
4359 * Some platforms have their own version with special arguments;
4360 * others support only sys_rt_sigprocmask.
4361 */
4362
4363SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
4364 old_sigset_t __user *, oset)
4365{
4366 old_sigset_t old_set, new_set;
4367 sigset_t new_blocked;
4368
4369 old_set = current->blocked.sig[0];
4370
4371 if (nset) {
4372 if (copy_from_user(&new_set, nset, sizeof(*nset)))
4373 return -EFAULT;
4374
4375 new_blocked = current->blocked;
4376
4377 switch (how) {
4378 case SIG_BLOCK:
4379 sigaddsetmask(&new_blocked, new_set);
4380 break;
4381 case SIG_UNBLOCK:
4382 sigdelsetmask(&new_blocked, new_set);
4383 break;
4384 case SIG_SETMASK:
4385 new_blocked.sig[0] = new_set;
4386 break;
4387 default:
4388 return -EINVAL;
4389 }
4390
4391 set_current_blocked(&new_blocked);
4392 }
4393
4394 if (oset) {
4395 if (copy_to_user(oset, &old_set, sizeof(*oset)))
4396 return -EFAULT;
4397 }
4398
4399 return 0;
4400}
4401#endif /* __ARCH_WANT_SYS_SIGPROCMASK */
4402
4403#ifndef CONFIG_ODD_RT_SIGACTION
4404/**
4405 * sys_rt_sigaction - alter an action taken by a process
4406 * @sig: signal to be sent
4407 * @act: new sigaction
4408 * @oact: used to save the previous sigaction
4409 * @sigsetsize: size of sigset_t type
4410 */
4411SYSCALL_DEFINE4(rt_sigaction, int, sig,
4412 const struct sigaction __user *, act,
4413 struct sigaction __user *, oact,
4414 size_t, sigsetsize)
4415{
4416 struct k_sigaction new_sa, old_sa;
4417 int ret;
4418
4419 /* XXX: Don't preclude handling different sized sigset_t's. */
4420 if (sigsetsize != sizeof(sigset_t))
4421 return -EINVAL;
4422
4423 if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
4424 return -EFAULT;
4425
4426 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
4427 if (ret)
4428 return ret;
4429
4430 if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
4431 return -EFAULT;
4432
4433 return 0;
4434}
4435#ifdef CONFIG_COMPAT
4436COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
4437 const struct compat_sigaction __user *, act,
4438 struct compat_sigaction __user *, oact,
4439 compat_size_t, sigsetsize)
4440{
4441 struct k_sigaction new_ka, old_ka;
4442#ifdef __ARCH_HAS_SA_RESTORER
4443 compat_uptr_t restorer;
4444#endif
4445 int ret;
4446
4447 /* XXX: Don't preclude handling different sized sigset_t's. */
4448 if (sigsetsize != sizeof(compat_sigset_t))
4449 return -EINVAL;
4450
4451 if (act) {
4452 compat_uptr_t handler;
4453 ret = get_user(handler, &act->sa_handler);
4454 new_ka.sa.sa_handler = compat_ptr(handler);
4455#ifdef __ARCH_HAS_SA_RESTORER
4456 ret |= get_user(restorer, &act->sa_restorer);
4457 new_ka.sa.sa_restorer = compat_ptr(restorer);
4458#endif
4459 ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask);
4460 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
4461 if (ret)
4462 return -EFAULT;
4463 }
4464
4465 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4466 if (!ret && oact) {
4467 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
4468 &oact->sa_handler);
4469 ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask,
4470 sizeof(oact->sa_mask));
4471 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
4472#ifdef __ARCH_HAS_SA_RESTORER
4473 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4474 &oact->sa_restorer);
4475#endif
4476 }
4477 return ret;
4478}
4479#endif
4480#endif /* !CONFIG_ODD_RT_SIGACTION */
4481
4482#ifdef CONFIG_OLD_SIGACTION
4483SYSCALL_DEFINE3(sigaction, int, sig,
4484 const struct old_sigaction __user *, act,
4485 struct old_sigaction __user *, oact)
4486{
4487 struct k_sigaction new_ka, old_ka;
4488 int ret;
4489
4490 if (act) {
4491 old_sigset_t mask;
4492 if (!access_ok(act, sizeof(*act)) ||
4493 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
4494 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
4495 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4496 __get_user(mask, &act->sa_mask))
4497 return -EFAULT;
4498#ifdef __ARCH_HAS_KA_RESTORER
4499 new_ka.ka_restorer = NULL;
4500#endif
4501 siginitset(&new_ka.sa.sa_mask, mask);
4502 }
4503
4504 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4505
4506 if (!ret && oact) {
4507 if (!access_ok(oact, sizeof(*oact)) ||
4508 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
4509 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
4510 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4511 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4512 return -EFAULT;
4513 }
4514
4515 return ret;
4516}
4517#endif
4518#ifdef CONFIG_COMPAT_OLD_SIGACTION
4519COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
4520 const struct compat_old_sigaction __user *, act,
4521 struct compat_old_sigaction __user *, oact)
4522{
4523 struct k_sigaction new_ka, old_ka;
4524 int ret;
4525 compat_old_sigset_t mask;
4526 compat_uptr_t handler, restorer;
4527
4528 if (act) {
4529 if (!access_ok(act, sizeof(*act)) ||
4530 __get_user(handler, &act->sa_handler) ||
4531 __get_user(restorer, &act->sa_restorer) ||
4532 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4533 __get_user(mask, &act->sa_mask))
4534 return -EFAULT;
4535
4536#ifdef __ARCH_HAS_KA_RESTORER
4537 new_ka.ka_restorer = NULL;
4538#endif
4539 new_ka.sa.sa_handler = compat_ptr(handler);
4540 new_ka.sa.sa_restorer = compat_ptr(restorer);
4541 siginitset(&new_ka.sa.sa_mask, mask);
4542 }
4543
4544 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4545
4546 if (!ret && oact) {
4547 if (!access_ok(oact, sizeof(*oact)) ||
4548 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
4549 &oact->sa_handler) ||
4550 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4551 &oact->sa_restorer) ||
4552 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4553 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4554 return -EFAULT;
4555 }
4556 return ret;
4557}
4558#endif
4559
4560#ifdef CONFIG_SGETMASK_SYSCALL
4561
4562/*
4563 * For backwards compatibility. Functionality superseded by sigprocmask.
4564 */
4565SYSCALL_DEFINE0(sgetmask)
4566{
4567 /* SMP safe */
4568 return current->blocked.sig[0];
4569}
4570
4571SYSCALL_DEFINE1(ssetmask, int, newmask)
4572{
4573 int old = current->blocked.sig[0];
4574 sigset_t newset;
4575
4576 siginitset(&newset, newmask);
4577 set_current_blocked(&newset);
4578
4579 return old;
4580}
4581#endif /* CONFIG_SGETMASK_SYSCALL */
4582
4583#ifdef __ARCH_WANT_SYS_SIGNAL
4584/*
4585 * For backwards compatibility. Functionality superseded by sigaction.
4586 */
4587SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
4588{
4589 struct k_sigaction new_sa, old_sa;
4590 int ret;
4591
4592 new_sa.sa.sa_handler = handler;
4593 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
4594 sigemptyset(&new_sa.sa.sa_mask);
4595
4596 ret = do_sigaction(sig, &new_sa, &old_sa);
4597
4598 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
4599}
4600#endif /* __ARCH_WANT_SYS_SIGNAL */
4601
4602#ifdef __ARCH_WANT_SYS_PAUSE
4603
4604SYSCALL_DEFINE0(pause)
4605{
4606 while (!signal_pending(current)) {
4607 __set_current_state(TASK_INTERRUPTIBLE);
4608 schedule();
4609 }
4610 return -ERESTARTNOHAND;
4611}
4612
4613#endif
4614
4615static int sigsuspend(sigset_t *set)
4616{
4617 current->saved_sigmask = current->blocked;
4618 set_current_blocked(set);
4619
4620 while (!signal_pending(current)) {
4621 __set_current_state(TASK_INTERRUPTIBLE);
4622 schedule();
4623 }
4624 set_restore_sigmask();
4625 return -ERESTARTNOHAND;
4626}
4627
4628/**
4629 * sys_rt_sigsuspend - replace the signal mask for a value with the
4630 * @unewset value until a signal is received
4631 * @unewset: new signal mask value
4632 * @sigsetsize: size of sigset_t type
4633 */
4634SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
4635{
4636 sigset_t newset;
4637
4638 /* XXX: Don't preclude handling different sized sigset_t's. */
4639 if (sigsetsize != sizeof(sigset_t))
4640 return -EINVAL;
4641
4642 if (copy_from_user(&newset, unewset, sizeof(newset)))
4643 return -EFAULT;
4644 return sigsuspend(&newset);
4645}
4646
4647#ifdef CONFIG_COMPAT
4648COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
4649{
4650 sigset_t newset;
4651
4652 /* XXX: Don't preclude handling different sized sigset_t's. */
4653 if (sigsetsize != sizeof(sigset_t))
4654 return -EINVAL;
4655
4656 if (get_compat_sigset(&newset, unewset))
4657 return -EFAULT;
4658 return sigsuspend(&newset);
4659}
4660#endif
4661
4662#ifdef CONFIG_OLD_SIGSUSPEND
4663SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
4664{
4665 sigset_t blocked;
4666 siginitset(&blocked, mask);
4667 return sigsuspend(&blocked);
4668}
4669#endif
4670#ifdef CONFIG_OLD_SIGSUSPEND3
4671SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
4672{
4673 sigset_t blocked;
4674 siginitset(&blocked, mask);
4675 return sigsuspend(&blocked);
4676}
4677#endif
4678
4679__weak const char *arch_vma_name(struct vm_area_struct *vma)
4680{
4681 return NULL;
4682}
4683
4684static inline void siginfo_buildtime_checks(void)
4685{
4686 BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
4687
4688 /* Verify the offsets in the two siginfos match */
4689#define CHECK_OFFSET(field) \
4690 BUILD_BUG_ON(offsetof(siginfo_t, field) != offsetof(kernel_siginfo_t, field))
4691
4692 /* kill */
4693 CHECK_OFFSET(si_pid);
4694 CHECK_OFFSET(si_uid);
4695
4696 /* timer */
4697 CHECK_OFFSET(si_tid);
4698 CHECK_OFFSET(si_overrun);
4699 CHECK_OFFSET(si_value);
4700
4701 /* rt */
4702 CHECK_OFFSET(si_pid);
4703 CHECK_OFFSET(si_uid);
4704 CHECK_OFFSET(si_value);
4705
4706 /* sigchld */
4707 CHECK_OFFSET(si_pid);
4708 CHECK_OFFSET(si_uid);
4709 CHECK_OFFSET(si_status);
4710 CHECK_OFFSET(si_utime);
4711 CHECK_OFFSET(si_stime);
4712
4713 /* sigfault */
4714 CHECK_OFFSET(si_addr);
4715 CHECK_OFFSET(si_trapno);
4716 CHECK_OFFSET(si_addr_lsb);
4717 CHECK_OFFSET(si_lower);
4718 CHECK_OFFSET(si_upper);
4719 CHECK_OFFSET(si_pkey);
4720 CHECK_OFFSET(si_perf_data);
4721 CHECK_OFFSET(si_perf_type);
4722 CHECK_OFFSET(si_perf_flags);
4723
4724 /* sigpoll */
4725 CHECK_OFFSET(si_band);
4726 CHECK_OFFSET(si_fd);
4727
4728 /* sigsys */
4729 CHECK_OFFSET(si_call_addr);
4730 CHECK_OFFSET(si_syscall);
4731 CHECK_OFFSET(si_arch);
4732#undef CHECK_OFFSET
4733
4734 /* usb asyncio */
4735 BUILD_BUG_ON(offsetof(struct siginfo, si_pid) !=
4736 offsetof(struct siginfo, si_addr));
4737 if (sizeof(int) == sizeof(void __user *)) {
4738 BUILD_BUG_ON(sizeof_field(struct siginfo, si_pid) !=
4739 sizeof(void __user *));
4740 } else {
4741 BUILD_BUG_ON((sizeof_field(struct siginfo, si_pid) +
4742 sizeof_field(struct siginfo, si_uid)) !=
4743 sizeof(void __user *));
4744 BUILD_BUG_ON(offsetofend(struct siginfo, si_pid) !=
4745 offsetof(struct siginfo, si_uid));
4746 }
4747#ifdef CONFIG_COMPAT
4748 BUILD_BUG_ON(offsetof(struct compat_siginfo, si_pid) !=
4749 offsetof(struct compat_siginfo, si_addr));
4750 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4751 sizeof(compat_uptr_t));
4752 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4753 sizeof_field(struct siginfo, si_pid));
4754#endif
4755}
4756
4757void __init signals_init(void)
4758{
4759 siginfo_buildtime_checks();
4760
4761 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC | SLAB_ACCOUNT);
4762}
4763
4764#ifdef CONFIG_KGDB_KDB
4765#include <linux/kdb.h>
4766/*
4767 * kdb_send_sig - Allows kdb to send signals without exposing
4768 * signal internals. This function checks if the required locks are
4769 * available before calling the main signal code, to avoid kdb
4770 * deadlocks.
4771 */
4772void kdb_send_sig(struct task_struct *t, int sig)
4773{
4774 static struct task_struct *kdb_prev_t;
4775 int new_t, ret;
4776 if (!spin_trylock(&t->sighand->siglock)) {
4777 kdb_printf("Can't do kill command now.\n"
4778 "The sigmask lock is held somewhere else in "
4779 "kernel, try again later\n");
4780 return;
4781 }
4782 new_t = kdb_prev_t != t;
4783 kdb_prev_t = t;
4784 if (!task_is_running(t) && new_t) {
4785 spin_unlock(&t->sighand->siglock);
4786 kdb_printf("Process is not RUNNING, sending a signal from "
4787 "kdb risks deadlock\n"
4788 "on the run queue locks. "
4789 "The signal has _not_ been sent.\n"
4790 "Reissue the kill command if you want to risk "
4791 "the deadlock.\n");
4792 return;
4793 }
4794 ret = send_signal_locked(sig, SEND_SIG_PRIV, t, PIDTYPE_PID);
4795 spin_unlock(&t->sighand->siglock);
4796 if (ret)
4797 kdb_printf("Fail to deliver Signal %d to process %d.\n",
4798 sig, t->pid);
4799 else
4800 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
4801}
4802#endif /* CONFIG_KGDB_KDB */
1/*
2 * linux/kernel/signal.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 *
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
7 *
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
11 */
12
13#include <linux/slab.h>
14#include <linux/export.h>
15#include <linux/init.h>
16#include <linux/sched.h>
17#include <linux/fs.h>
18#include <linux/tty.h>
19#include <linux/binfmts.h>
20#include <linux/security.h>
21#include <linux/syscalls.h>
22#include <linux/ptrace.h>
23#include <linux/signal.h>
24#include <linux/signalfd.h>
25#include <linux/ratelimit.h>
26#include <linux/tracehook.h>
27#include <linux/capability.h>
28#include <linux/freezer.h>
29#include <linux/pid_namespace.h>
30#include <linux/nsproxy.h>
31#include <linux/user_namespace.h>
32#include <linux/uprobes.h>
33#define CREATE_TRACE_POINTS
34#include <trace/events/signal.h>
35
36#include <asm/param.h>
37#include <asm/uaccess.h>
38#include <asm/unistd.h>
39#include <asm/siginfo.h>
40#include <asm/cacheflush.h>
41#include "audit.h" /* audit_signal_info() */
42
43/*
44 * SLAB caches for signal bits.
45 */
46
47static struct kmem_cache *sigqueue_cachep;
48
49int print_fatal_signals __read_mostly;
50
51static void __user *sig_handler(struct task_struct *t, int sig)
52{
53 return t->sighand->action[sig - 1].sa.sa_handler;
54}
55
56static int sig_handler_ignored(void __user *handler, int sig)
57{
58 /* Is it explicitly or implicitly ignored? */
59 return handler == SIG_IGN ||
60 (handler == SIG_DFL && sig_kernel_ignore(sig));
61}
62
63static int sig_task_ignored(struct task_struct *t, int sig, bool force)
64{
65 void __user *handler;
66
67 handler = sig_handler(t, sig);
68
69 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
70 handler == SIG_DFL && !force)
71 return 1;
72
73 return sig_handler_ignored(handler, sig);
74}
75
76static int sig_ignored(struct task_struct *t, int sig, bool force)
77{
78 /*
79 * Blocked signals are never ignored, since the
80 * signal handler may change by the time it is
81 * unblocked.
82 */
83 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
84 return 0;
85
86 if (!sig_task_ignored(t, sig, force))
87 return 0;
88
89 /*
90 * Tracers may want to know about even ignored signals.
91 */
92 return !t->ptrace;
93}
94
95/*
96 * Re-calculate pending state from the set of locally pending
97 * signals, globally pending signals, and blocked signals.
98 */
99static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
100{
101 unsigned long ready;
102 long i;
103
104 switch (_NSIG_WORDS) {
105 default:
106 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
107 ready |= signal->sig[i] &~ blocked->sig[i];
108 break;
109
110 case 4: ready = signal->sig[3] &~ blocked->sig[3];
111 ready |= signal->sig[2] &~ blocked->sig[2];
112 ready |= signal->sig[1] &~ blocked->sig[1];
113 ready |= signal->sig[0] &~ blocked->sig[0];
114 break;
115
116 case 2: ready = signal->sig[1] &~ blocked->sig[1];
117 ready |= signal->sig[0] &~ blocked->sig[0];
118 break;
119
120 case 1: ready = signal->sig[0] &~ blocked->sig[0];
121 }
122 return ready != 0;
123}
124
125#define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
126
127static int recalc_sigpending_tsk(struct task_struct *t)
128{
129 if ((t->jobctl & JOBCTL_PENDING_MASK) ||
130 PENDING(&t->pending, &t->blocked) ||
131 PENDING(&t->signal->shared_pending, &t->blocked)) {
132 set_tsk_thread_flag(t, TIF_SIGPENDING);
133 return 1;
134 }
135 /*
136 * We must never clear the flag in another thread, or in current
137 * when it's possible the current syscall is returning -ERESTART*.
138 * So we don't clear it here, and only callers who know they should do.
139 */
140 return 0;
141}
142
143/*
144 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
145 * This is superfluous when called on current, the wakeup is a harmless no-op.
146 */
147void recalc_sigpending_and_wake(struct task_struct *t)
148{
149 if (recalc_sigpending_tsk(t))
150 signal_wake_up(t, 0);
151}
152
153void recalc_sigpending(void)
154{
155 if (!recalc_sigpending_tsk(current) && !freezing(current))
156 clear_thread_flag(TIF_SIGPENDING);
157
158}
159
160/* Given the mask, find the first available signal that should be serviced. */
161
162#define SYNCHRONOUS_MASK \
163 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
164 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
165
166int next_signal(struct sigpending *pending, sigset_t *mask)
167{
168 unsigned long i, *s, *m, x;
169 int sig = 0;
170
171 s = pending->signal.sig;
172 m = mask->sig;
173
174 /*
175 * Handle the first word specially: it contains the
176 * synchronous signals that need to be dequeued first.
177 */
178 x = *s &~ *m;
179 if (x) {
180 if (x & SYNCHRONOUS_MASK)
181 x &= SYNCHRONOUS_MASK;
182 sig = ffz(~x) + 1;
183 return sig;
184 }
185
186 switch (_NSIG_WORDS) {
187 default:
188 for (i = 1; i < _NSIG_WORDS; ++i) {
189 x = *++s &~ *++m;
190 if (!x)
191 continue;
192 sig = ffz(~x) + i*_NSIG_BPW + 1;
193 break;
194 }
195 break;
196
197 case 2:
198 x = s[1] &~ m[1];
199 if (!x)
200 break;
201 sig = ffz(~x) + _NSIG_BPW + 1;
202 break;
203
204 case 1:
205 /* Nothing to do */
206 break;
207 }
208
209 return sig;
210}
211
212static inline void print_dropped_signal(int sig)
213{
214 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
215
216 if (!print_fatal_signals)
217 return;
218
219 if (!__ratelimit(&ratelimit_state))
220 return;
221
222 printk(KERN_INFO "%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
223 current->comm, current->pid, sig);
224}
225
226/**
227 * task_set_jobctl_pending - set jobctl pending bits
228 * @task: target task
229 * @mask: pending bits to set
230 *
231 * Clear @mask from @task->jobctl. @mask must be subset of
232 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
233 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
234 * cleared. If @task is already being killed or exiting, this function
235 * becomes noop.
236 *
237 * CONTEXT:
238 * Must be called with @task->sighand->siglock held.
239 *
240 * RETURNS:
241 * %true if @mask is set, %false if made noop because @task was dying.
242 */
243bool task_set_jobctl_pending(struct task_struct *task, unsigned int mask)
244{
245 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
246 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
247 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
248
249 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
250 return false;
251
252 if (mask & JOBCTL_STOP_SIGMASK)
253 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
254
255 task->jobctl |= mask;
256 return true;
257}
258
259/**
260 * task_clear_jobctl_trapping - clear jobctl trapping bit
261 * @task: target task
262 *
263 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
264 * Clear it and wake up the ptracer. Note that we don't need any further
265 * locking. @task->siglock guarantees that @task->parent points to the
266 * ptracer.
267 *
268 * CONTEXT:
269 * Must be called with @task->sighand->siglock held.
270 */
271void task_clear_jobctl_trapping(struct task_struct *task)
272{
273 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
274 task->jobctl &= ~JOBCTL_TRAPPING;
275 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
276 }
277}
278
279/**
280 * task_clear_jobctl_pending - clear jobctl pending bits
281 * @task: target task
282 * @mask: pending bits to clear
283 *
284 * Clear @mask from @task->jobctl. @mask must be subset of
285 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
286 * STOP bits are cleared together.
287 *
288 * If clearing of @mask leaves no stop or trap pending, this function calls
289 * task_clear_jobctl_trapping().
290 *
291 * CONTEXT:
292 * Must be called with @task->sighand->siglock held.
293 */
294void task_clear_jobctl_pending(struct task_struct *task, unsigned int mask)
295{
296 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
297
298 if (mask & JOBCTL_STOP_PENDING)
299 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
300
301 task->jobctl &= ~mask;
302
303 if (!(task->jobctl & JOBCTL_PENDING_MASK))
304 task_clear_jobctl_trapping(task);
305}
306
307/**
308 * task_participate_group_stop - participate in a group stop
309 * @task: task participating in a group stop
310 *
311 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
312 * Group stop states are cleared and the group stop count is consumed if
313 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
314 * stop, the appropriate %SIGNAL_* flags are set.
315 *
316 * CONTEXT:
317 * Must be called with @task->sighand->siglock held.
318 *
319 * RETURNS:
320 * %true if group stop completion should be notified to the parent, %false
321 * otherwise.
322 */
323static bool task_participate_group_stop(struct task_struct *task)
324{
325 struct signal_struct *sig = task->signal;
326 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
327
328 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
329
330 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
331
332 if (!consume)
333 return false;
334
335 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
336 sig->group_stop_count--;
337
338 /*
339 * Tell the caller to notify completion iff we are entering into a
340 * fresh group stop. Read comment in do_signal_stop() for details.
341 */
342 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
343 sig->flags = SIGNAL_STOP_STOPPED;
344 return true;
345 }
346 return false;
347}
348
349/*
350 * allocate a new signal queue record
351 * - this may be called without locks if and only if t == current, otherwise an
352 * appropriate lock must be held to stop the target task from exiting
353 */
354static struct sigqueue *
355__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
356{
357 struct sigqueue *q = NULL;
358 struct user_struct *user;
359
360 /*
361 * Protect access to @t credentials. This can go away when all
362 * callers hold rcu read lock.
363 */
364 rcu_read_lock();
365 user = get_uid(__task_cred(t)->user);
366 atomic_inc(&user->sigpending);
367 rcu_read_unlock();
368
369 if (override_rlimit ||
370 atomic_read(&user->sigpending) <=
371 task_rlimit(t, RLIMIT_SIGPENDING)) {
372 q = kmem_cache_alloc(sigqueue_cachep, flags);
373 } else {
374 print_dropped_signal(sig);
375 }
376
377 if (unlikely(q == NULL)) {
378 atomic_dec(&user->sigpending);
379 free_uid(user);
380 } else {
381 INIT_LIST_HEAD(&q->list);
382 q->flags = 0;
383 q->user = user;
384 }
385
386 return q;
387}
388
389static void __sigqueue_free(struct sigqueue *q)
390{
391 if (q->flags & SIGQUEUE_PREALLOC)
392 return;
393 atomic_dec(&q->user->sigpending);
394 free_uid(q->user);
395 kmem_cache_free(sigqueue_cachep, q);
396}
397
398void flush_sigqueue(struct sigpending *queue)
399{
400 struct sigqueue *q;
401
402 sigemptyset(&queue->signal);
403 while (!list_empty(&queue->list)) {
404 q = list_entry(queue->list.next, struct sigqueue , list);
405 list_del_init(&q->list);
406 __sigqueue_free(q);
407 }
408}
409
410/*
411 * Flush all pending signals for a task.
412 */
413void __flush_signals(struct task_struct *t)
414{
415 clear_tsk_thread_flag(t, TIF_SIGPENDING);
416 flush_sigqueue(&t->pending);
417 flush_sigqueue(&t->signal->shared_pending);
418}
419
420void flush_signals(struct task_struct *t)
421{
422 unsigned long flags;
423
424 spin_lock_irqsave(&t->sighand->siglock, flags);
425 __flush_signals(t);
426 spin_unlock_irqrestore(&t->sighand->siglock, flags);
427}
428
429static void __flush_itimer_signals(struct sigpending *pending)
430{
431 sigset_t signal, retain;
432 struct sigqueue *q, *n;
433
434 signal = pending->signal;
435 sigemptyset(&retain);
436
437 list_for_each_entry_safe(q, n, &pending->list, list) {
438 int sig = q->info.si_signo;
439
440 if (likely(q->info.si_code != SI_TIMER)) {
441 sigaddset(&retain, sig);
442 } else {
443 sigdelset(&signal, sig);
444 list_del_init(&q->list);
445 __sigqueue_free(q);
446 }
447 }
448
449 sigorsets(&pending->signal, &signal, &retain);
450}
451
452void flush_itimer_signals(void)
453{
454 struct task_struct *tsk = current;
455 unsigned long flags;
456
457 spin_lock_irqsave(&tsk->sighand->siglock, flags);
458 __flush_itimer_signals(&tsk->pending);
459 __flush_itimer_signals(&tsk->signal->shared_pending);
460 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
461}
462
463void ignore_signals(struct task_struct *t)
464{
465 int i;
466
467 for (i = 0; i < _NSIG; ++i)
468 t->sighand->action[i].sa.sa_handler = SIG_IGN;
469
470 flush_signals(t);
471}
472
473/*
474 * Flush all handlers for a task.
475 */
476
477void
478flush_signal_handlers(struct task_struct *t, int force_default)
479{
480 int i;
481 struct k_sigaction *ka = &t->sighand->action[0];
482 for (i = _NSIG ; i != 0 ; i--) {
483 if (force_default || ka->sa.sa_handler != SIG_IGN)
484 ka->sa.sa_handler = SIG_DFL;
485 ka->sa.sa_flags = 0;
486 sigemptyset(&ka->sa.sa_mask);
487 ka++;
488 }
489}
490
491int unhandled_signal(struct task_struct *tsk, int sig)
492{
493 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
494 if (is_global_init(tsk))
495 return 1;
496 if (handler != SIG_IGN && handler != SIG_DFL)
497 return 0;
498 /* if ptraced, let the tracer determine */
499 return !tsk->ptrace;
500}
501
502/*
503 * Notify the system that a driver wants to block all signals for this
504 * process, and wants to be notified if any signals at all were to be
505 * sent/acted upon. If the notifier routine returns non-zero, then the
506 * signal will be acted upon after all. If the notifier routine returns 0,
507 * then then signal will be blocked. Only one block per process is
508 * allowed. priv is a pointer to private data that the notifier routine
509 * can use to determine if the signal should be blocked or not.
510 */
511void
512block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
513{
514 unsigned long flags;
515
516 spin_lock_irqsave(¤t->sighand->siglock, flags);
517 current->notifier_mask = mask;
518 current->notifier_data = priv;
519 current->notifier = notifier;
520 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
521}
522
523/* Notify the system that blocking has ended. */
524
525void
526unblock_all_signals(void)
527{
528 unsigned long flags;
529
530 spin_lock_irqsave(¤t->sighand->siglock, flags);
531 current->notifier = NULL;
532 current->notifier_data = NULL;
533 recalc_sigpending();
534 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
535}
536
537static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
538{
539 struct sigqueue *q, *first = NULL;
540
541 /*
542 * Collect the siginfo appropriate to this signal. Check if
543 * there is another siginfo for the same signal.
544 */
545 list_for_each_entry(q, &list->list, list) {
546 if (q->info.si_signo == sig) {
547 if (first)
548 goto still_pending;
549 first = q;
550 }
551 }
552
553 sigdelset(&list->signal, sig);
554
555 if (first) {
556still_pending:
557 list_del_init(&first->list);
558 copy_siginfo(info, &first->info);
559 __sigqueue_free(first);
560 } else {
561 /*
562 * Ok, it wasn't in the queue. This must be
563 * a fast-pathed signal or we must have been
564 * out of queue space. So zero out the info.
565 */
566 info->si_signo = sig;
567 info->si_errno = 0;
568 info->si_code = SI_USER;
569 info->si_pid = 0;
570 info->si_uid = 0;
571 }
572}
573
574static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
575 siginfo_t *info)
576{
577 int sig = next_signal(pending, mask);
578
579 if (sig) {
580 if (current->notifier) {
581 if (sigismember(current->notifier_mask, sig)) {
582 if (!(current->notifier)(current->notifier_data)) {
583 clear_thread_flag(TIF_SIGPENDING);
584 return 0;
585 }
586 }
587 }
588
589 collect_signal(sig, pending, info);
590 }
591
592 return sig;
593}
594
595/*
596 * Dequeue a signal and return the element to the caller, which is
597 * expected to free it.
598 *
599 * All callers have to hold the siglock.
600 */
601int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
602{
603 int signr;
604
605 /* We only dequeue private signals from ourselves, we don't let
606 * signalfd steal them
607 */
608 signr = __dequeue_signal(&tsk->pending, mask, info);
609 if (!signr) {
610 signr = __dequeue_signal(&tsk->signal->shared_pending,
611 mask, info);
612 /*
613 * itimer signal ?
614 *
615 * itimers are process shared and we restart periodic
616 * itimers in the signal delivery path to prevent DoS
617 * attacks in the high resolution timer case. This is
618 * compliant with the old way of self-restarting
619 * itimers, as the SIGALRM is a legacy signal and only
620 * queued once. Changing the restart behaviour to
621 * restart the timer in the signal dequeue path is
622 * reducing the timer noise on heavy loaded !highres
623 * systems too.
624 */
625 if (unlikely(signr == SIGALRM)) {
626 struct hrtimer *tmr = &tsk->signal->real_timer;
627
628 if (!hrtimer_is_queued(tmr) &&
629 tsk->signal->it_real_incr.tv64 != 0) {
630 hrtimer_forward(tmr, tmr->base->get_time(),
631 tsk->signal->it_real_incr);
632 hrtimer_restart(tmr);
633 }
634 }
635 }
636
637 recalc_sigpending();
638 if (!signr)
639 return 0;
640
641 if (unlikely(sig_kernel_stop(signr))) {
642 /*
643 * Set a marker that we have dequeued a stop signal. Our
644 * caller might release the siglock and then the pending
645 * stop signal it is about to process is no longer in the
646 * pending bitmasks, but must still be cleared by a SIGCONT
647 * (and overruled by a SIGKILL). So those cases clear this
648 * shared flag after we've set it. Note that this flag may
649 * remain set after the signal we return is ignored or
650 * handled. That doesn't matter because its only purpose
651 * is to alert stop-signal processing code when another
652 * processor has come along and cleared the flag.
653 */
654 current->jobctl |= JOBCTL_STOP_DEQUEUED;
655 }
656 if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
657 /*
658 * Release the siglock to ensure proper locking order
659 * of timer locks outside of siglocks. Note, we leave
660 * irqs disabled here, since the posix-timers code is
661 * about to disable them again anyway.
662 */
663 spin_unlock(&tsk->sighand->siglock);
664 do_schedule_next_timer(info);
665 spin_lock(&tsk->sighand->siglock);
666 }
667 return signr;
668}
669
670/*
671 * Tell a process that it has a new active signal..
672 *
673 * NOTE! we rely on the previous spin_lock to
674 * lock interrupts for us! We can only be called with
675 * "siglock" held, and the local interrupt must
676 * have been disabled when that got acquired!
677 *
678 * No need to set need_resched since signal event passing
679 * goes through ->blocked
680 */
681void signal_wake_up(struct task_struct *t, int resume)
682{
683 unsigned int mask;
684
685 set_tsk_thread_flag(t, TIF_SIGPENDING);
686
687 /*
688 * For SIGKILL, we want to wake it up in the stopped/traced/killable
689 * case. We don't check t->state here because there is a race with it
690 * executing another processor and just now entering stopped state.
691 * By using wake_up_state, we ensure the process will wake up and
692 * handle its death signal.
693 */
694 mask = TASK_INTERRUPTIBLE;
695 if (resume)
696 mask |= TASK_WAKEKILL;
697 if (!wake_up_state(t, mask))
698 kick_process(t);
699}
700
701/*
702 * Remove signals in mask from the pending set and queue.
703 * Returns 1 if any signals were found.
704 *
705 * All callers must be holding the siglock.
706 *
707 * This version takes a sigset mask and looks at all signals,
708 * not just those in the first mask word.
709 */
710static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
711{
712 struct sigqueue *q, *n;
713 sigset_t m;
714
715 sigandsets(&m, mask, &s->signal);
716 if (sigisemptyset(&m))
717 return 0;
718
719 sigandnsets(&s->signal, &s->signal, mask);
720 list_for_each_entry_safe(q, n, &s->list, list) {
721 if (sigismember(mask, q->info.si_signo)) {
722 list_del_init(&q->list);
723 __sigqueue_free(q);
724 }
725 }
726 return 1;
727}
728/*
729 * Remove signals in mask from the pending set and queue.
730 * Returns 1 if any signals were found.
731 *
732 * All callers must be holding the siglock.
733 */
734static int rm_from_queue(unsigned long mask, struct sigpending *s)
735{
736 struct sigqueue *q, *n;
737
738 if (!sigtestsetmask(&s->signal, mask))
739 return 0;
740
741 sigdelsetmask(&s->signal, mask);
742 list_for_each_entry_safe(q, n, &s->list, list) {
743 if (q->info.si_signo < SIGRTMIN &&
744 (mask & sigmask(q->info.si_signo))) {
745 list_del_init(&q->list);
746 __sigqueue_free(q);
747 }
748 }
749 return 1;
750}
751
752static inline int is_si_special(const struct siginfo *info)
753{
754 return info <= SEND_SIG_FORCED;
755}
756
757static inline bool si_fromuser(const struct siginfo *info)
758{
759 return info == SEND_SIG_NOINFO ||
760 (!is_si_special(info) && SI_FROMUSER(info));
761}
762
763/*
764 * called with RCU read lock from check_kill_permission()
765 */
766static int kill_ok_by_cred(struct task_struct *t)
767{
768 const struct cred *cred = current_cred();
769 const struct cred *tcred = __task_cred(t);
770
771 if (uid_eq(cred->euid, tcred->suid) ||
772 uid_eq(cred->euid, tcred->uid) ||
773 uid_eq(cred->uid, tcred->suid) ||
774 uid_eq(cred->uid, tcred->uid))
775 return 1;
776
777 if (ns_capable(tcred->user_ns, CAP_KILL))
778 return 1;
779
780 return 0;
781}
782
783/*
784 * Bad permissions for sending the signal
785 * - the caller must hold the RCU read lock
786 */
787static int check_kill_permission(int sig, struct siginfo *info,
788 struct task_struct *t)
789{
790 struct pid *sid;
791 int error;
792
793 if (!valid_signal(sig))
794 return -EINVAL;
795
796 if (!si_fromuser(info))
797 return 0;
798
799 error = audit_signal_info(sig, t); /* Let audit system see the signal */
800 if (error)
801 return error;
802
803 if (!same_thread_group(current, t) &&
804 !kill_ok_by_cred(t)) {
805 switch (sig) {
806 case SIGCONT:
807 sid = task_session(t);
808 /*
809 * We don't return the error if sid == NULL. The
810 * task was unhashed, the caller must notice this.
811 */
812 if (!sid || sid == task_session(current))
813 break;
814 default:
815 return -EPERM;
816 }
817 }
818
819 return security_task_kill(t, info, sig, 0);
820}
821
822/**
823 * ptrace_trap_notify - schedule trap to notify ptracer
824 * @t: tracee wanting to notify tracer
825 *
826 * This function schedules sticky ptrace trap which is cleared on the next
827 * TRAP_STOP to notify ptracer of an event. @t must have been seized by
828 * ptracer.
829 *
830 * If @t is running, STOP trap will be taken. If trapped for STOP and
831 * ptracer is listening for events, tracee is woken up so that it can
832 * re-trap for the new event. If trapped otherwise, STOP trap will be
833 * eventually taken without returning to userland after the existing traps
834 * are finished by PTRACE_CONT.
835 *
836 * CONTEXT:
837 * Must be called with @task->sighand->siglock held.
838 */
839static void ptrace_trap_notify(struct task_struct *t)
840{
841 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
842 assert_spin_locked(&t->sighand->siglock);
843
844 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
845 signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
846}
847
848/*
849 * Handle magic process-wide effects of stop/continue signals. Unlike
850 * the signal actions, these happen immediately at signal-generation
851 * time regardless of blocking, ignoring, or handling. This does the
852 * actual continuing for SIGCONT, but not the actual stopping for stop
853 * signals. The process stop is done as a signal action for SIG_DFL.
854 *
855 * Returns true if the signal should be actually delivered, otherwise
856 * it should be dropped.
857 */
858static int prepare_signal(int sig, struct task_struct *p, bool force)
859{
860 struct signal_struct *signal = p->signal;
861 struct task_struct *t;
862
863 if (unlikely(signal->flags & SIGNAL_GROUP_EXIT)) {
864 /*
865 * The process is in the middle of dying, nothing to do.
866 */
867 } else if (sig_kernel_stop(sig)) {
868 /*
869 * This is a stop signal. Remove SIGCONT from all queues.
870 */
871 rm_from_queue(sigmask(SIGCONT), &signal->shared_pending);
872 t = p;
873 do {
874 rm_from_queue(sigmask(SIGCONT), &t->pending);
875 } while_each_thread(p, t);
876 } else if (sig == SIGCONT) {
877 unsigned int why;
878 /*
879 * Remove all stop signals from all queues, wake all threads.
880 */
881 rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending);
882 t = p;
883 do {
884 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
885 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
886 if (likely(!(t->ptrace & PT_SEIZED)))
887 wake_up_state(t, __TASK_STOPPED);
888 else
889 ptrace_trap_notify(t);
890 } while_each_thread(p, t);
891
892 /*
893 * Notify the parent with CLD_CONTINUED if we were stopped.
894 *
895 * If we were in the middle of a group stop, we pretend it
896 * was already finished, and then continued. Since SIGCHLD
897 * doesn't queue we report only CLD_STOPPED, as if the next
898 * CLD_CONTINUED was dropped.
899 */
900 why = 0;
901 if (signal->flags & SIGNAL_STOP_STOPPED)
902 why |= SIGNAL_CLD_CONTINUED;
903 else if (signal->group_stop_count)
904 why |= SIGNAL_CLD_STOPPED;
905
906 if (why) {
907 /*
908 * The first thread which returns from do_signal_stop()
909 * will take ->siglock, notice SIGNAL_CLD_MASK, and
910 * notify its parent. See get_signal_to_deliver().
911 */
912 signal->flags = why | SIGNAL_STOP_CONTINUED;
913 signal->group_stop_count = 0;
914 signal->group_exit_code = 0;
915 }
916 }
917
918 return !sig_ignored(p, sig, force);
919}
920
921/*
922 * Test if P wants to take SIG. After we've checked all threads with this,
923 * it's equivalent to finding no threads not blocking SIG. Any threads not
924 * blocking SIG were ruled out because they are not running and already
925 * have pending signals. Such threads will dequeue from the shared queue
926 * as soon as they're available, so putting the signal on the shared queue
927 * will be equivalent to sending it to one such thread.
928 */
929static inline int wants_signal(int sig, struct task_struct *p)
930{
931 if (sigismember(&p->blocked, sig))
932 return 0;
933 if (p->flags & PF_EXITING)
934 return 0;
935 if (sig == SIGKILL)
936 return 1;
937 if (task_is_stopped_or_traced(p))
938 return 0;
939 return task_curr(p) || !signal_pending(p);
940}
941
942static void complete_signal(int sig, struct task_struct *p, int group)
943{
944 struct signal_struct *signal = p->signal;
945 struct task_struct *t;
946
947 /*
948 * Now find a thread we can wake up to take the signal off the queue.
949 *
950 * If the main thread wants the signal, it gets first crack.
951 * Probably the least surprising to the average bear.
952 */
953 if (wants_signal(sig, p))
954 t = p;
955 else if (!group || thread_group_empty(p))
956 /*
957 * There is just one thread and it does not need to be woken.
958 * It will dequeue unblocked signals before it runs again.
959 */
960 return;
961 else {
962 /*
963 * Otherwise try to find a suitable thread.
964 */
965 t = signal->curr_target;
966 while (!wants_signal(sig, t)) {
967 t = next_thread(t);
968 if (t == signal->curr_target)
969 /*
970 * No thread needs to be woken.
971 * Any eligible threads will see
972 * the signal in the queue soon.
973 */
974 return;
975 }
976 signal->curr_target = t;
977 }
978
979 /*
980 * Found a killable thread. If the signal will be fatal,
981 * then start taking the whole group down immediately.
982 */
983 if (sig_fatal(p, sig) &&
984 !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
985 !sigismember(&t->real_blocked, sig) &&
986 (sig == SIGKILL || !t->ptrace)) {
987 /*
988 * This signal will be fatal to the whole group.
989 */
990 if (!sig_kernel_coredump(sig)) {
991 /*
992 * Start a group exit and wake everybody up.
993 * This way we don't have other threads
994 * running and doing things after a slower
995 * thread has the fatal signal pending.
996 */
997 signal->flags = SIGNAL_GROUP_EXIT;
998 signal->group_exit_code = sig;
999 signal->group_stop_count = 0;
1000 t = p;
1001 do {
1002 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1003 sigaddset(&t->pending.signal, SIGKILL);
1004 signal_wake_up(t, 1);
1005 } while_each_thread(p, t);
1006 return;
1007 }
1008 }
1009
1010 /*
1011 * The signal is already in the shared-pending queue.
1012 * Tell the chosen thread to wake up and dequeue it.
1013 */
1014 signal_wake_up(t, sig == SIGKILL);
1015 return;
1016}
1017
1018static inline int legacy_queue(struct sigpending *signals, int sig)
1019{
1020 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1021}
1022
1023#ifdef CONFIG_USER_NS
1024static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
1025{
1026 if (current_user_ns() == task_cred_xxx(t, user_ns))
1027 return;
1028
1029 if (SI_FROMKERNEL(info))
1030 return;
1031
1032 rcu_read_lock();
1033 info->si_uid = from_kuid_munged(task_cred_xxx(t, user_ns),
1034 make_kuid(current_user_ns(), info->si_uid));
1035 rcu_read_unlock();
1036}
1037#else
1038static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
1039{
1040 return;
1041}
1042#endif
1043
1044static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
1045 int group, int from_ancestor_ns)
1046{
1047 struct sigpending *pending;
1048 struct sigqueue *q;
1049 int override_rlimit;
1050 int ret = 0, result;
1051
1052 assert_spin_locked(&t->sighand->siglock);
1053
1054 result = TRACE_SIGNAL_IGNORED;
1055 if (!prepare_signal(sig, t,
1056 from_ancestor_ns || (info == SEND_SIG_FORCED)))
1057 goto ret;
1058
1059 pending = group ? &t->signal->shared_pending : &t->pending;
1060 /*
1061 * Short-circuit ignored signals and support queuing
1062 * exactly one non-rt signal, so that we can get more
1063 * detailed information about the cause of the signal.
1064 */
1065 result = TRACE_SIGNAL_ALREADY_PENDING;
1066 if (legacy_queue(pending, sig))
1067 goto ret;
1068
1069 result = TRACE_SIGNAL_DELIVERED;
1070 /*
1071 * fast-pathed signals for kernel-internal things like SIGSTOP
1072 * or SIGKILL.
1073 */
1074 if (info == SEND_SIG_FORCED)
1075 goto out_set;
1076
1077 /*
1078 * Real-time signals must be queued if sent by sigqueue, or
1079 * some other real-time mechanism. It is implementation
1080 * defined whether kill() does so. We attempt to do so, on
1081 * the principle of least surprise, but since kill is not
1082 * allowed to fail with EAGAIN when low on memory we just
1083 * make sure at least one signal gets delivered and don't
1084 * pass on the info struct.
1085 */
1086 if (sig < SIGRTMIN)
1087 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1088 else
1089 override_rlimit = 0;
1090
1091 q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE,
1092 override_rlimit);
1093 if (q) {
1094 list_add_tail(&q->list, &pending->list);
1095 switch ((unsigned long) info) {
1096 case (unsigned long) SEND_SIG_NOINFO:
1097 q->info.si_signo = sig;
1098 q->info.si_errno = 0;
1099 q->info.si_code = SI_USER;
1100 q->info.si_pid = task_tgid_nr_ns(current,
1101 task_active_pid_ns(t));
1102 q->info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
1103 break;
1104 case (unsigned long) SEND_SIG_PRIV:
1105 q->info.si_signo = sig;
1106 q->info.si_errno = 0;
1107 q->info.si_code = SI_KERNEL;
1108 q->info.si_pid = 0;
1109 q->info.si_uid = 0;
1110 break;
1111 default:
1112 copy_siginfo(&q->info, info);
1113 if (from_ancestor_ns)
1114 q->info.si_pid = 0;
1115 break;
1116 }
1117
1118 userns_fixup_signal_uid(&q->info, t);
1119
1120 } else if (!is_si_special(info)) {
1121 if (sig >= SIGRTMIN && info->si_code != SI_USER) {
1122 /*
1123 * Queue overflow, abort. We may abort if the
1124 * signal was rt and sent by user using something
1125 * other than kill().
1126 */
1127 result = TRACE_SIGNAL_OVERFLOW_FAIL;
1128 ret = -EAGAIN;
1129 goto ret;
1130 } else {
1131 /*
1132 * This is a silent loss of information. We still
1133 * send the signal, but the *info bits are lost.
1134 */
1135 result = TRACE_SIGNAL_LOSE_INFO;
1136 }
1137 }
1138
1139out_set:
1140 signalfd_notify(t, sig);
1141 sigaddset(&pending->signal, sig);
1142 complete_signal(sig, t, group);
1143ret:
1144 trace_signal_generate(sig, info, t, group, result);
1145 return ret;
1146}
1147
1148static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
1149 int group)
1150{
1151 int from_ancestor_ns = 0;
1152
1153#ifdef CONFIG_PID_NS
1154 from_ancestor_ns = si_fromuser(info) &&
1155 !task_pid_nr_ns(current, task_active_pid_ns(t));
1156#endif
1157
1158 return __send_signal(sig, info, t, group, from_ancestor_ns);
1159}
1160
1161static void print_fatal_signal(struct pt_regs *regs, int signr)
1162{
1163 printk("%s/%d: potentially unexpected fatal signal %d.\n",
1164 current->comm, task_pid_nr(current), signr);
1165
1166#if defined(__i386__) && !defined(__arch_um__)
1167 printk("code at %08lx: ", regs->ip);
1168 {
1169 int i;
1170 for (i = 0; i < 16; i++) {
1171 unsigned char insn;
1172
1173 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1174 break;
1175 printk("%02x ", insn);
1176 }
1177 }
1178#endif
1179 printk("\n");
1180 preempt_disable();
1181 show_regs(regs);
1182 preempt_enable();
1183}
1184
1185static int __init setup_print_fatal_signals(char *str)
1186{
1187 get_option (&str, &print_fatal_signals);
1188
1189 return 1;
1190}
1191
1192__setup("print-fatal-signals=", setup_print_fatal_signals);
1193
1194int
1195__group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1196{
1197 return send_signal(sig, info, p, 1);
1198}
1199
1200static int
1201specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1202{
1203 return send_signal(sig, info, t, 0);
1204}
1205
1206int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
1207 bool group)
1208{
1209 unsigned long flags;
1210 int ret = -ESRCH;
1211
1212 if (lock_task_sighand(p, &flags)) {
1213 ret = send_signal(sig, info, p, group);
1214 unlock_task_sighand(p, &flags);
1215 }
1216
1217 return ret;
1218}
1219
1220/*
1221 * Force a signal that the process can't ignore: if necessary
1222 * we unblock the signal and change any SIG_IGN to SIG_DFL.
1223 *
1224 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1225 * since we do not want to have a signal handler that was blocked
1226 * be invoked when user space had explicitly blocked it.
1227 *
1228 * We don't want to have recursive SIGSEGV's etc, for example,
1229 * that is why we also clear SIGNAL_UNKILLABLE.
1230 */
1231int
1232force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1233{
1234 unsigned long int flags;
1235 int ret, blocked, ignored;
1236 struct k_sigaction *action;
1237
1238 spin_lock_irqsave(&t->sighand->siglock, flags);
1239 action = &t->sighand->action[sig-1];
1240 ignored = action->sa.sa_handler == SIG_IGN;
1241 blocked = sigismember(&t->blocked, sig);
1242 if (blocked || ignored) {
1243 action->sa.sa_handler = SIG_DFL;
1244 if (blocked) {
1245 sigdelset(&t->blocked, sig);
1246 recalc_sigpending_and_wake(t);
1247 }
1248 }
1249 if (action->sa.sa_handler == SIG_DFL)
1250 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1251 ret = specific_send_sig_info(sig, info, t);
1252 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1253
1254 return ret;
1255}
1256
1257/*
1258 * Nuke all other threads in the group.
1259 */
1260int zap_other_threads(struct task_struct *p)
1261{
1262 struct task_struct *t = p;
1263 int count = 0;
1264
1265 p->signal->group_stop_count = 0;
1266
1267 while_each_thread(p, t) {
1268 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1269 count++;
1270
1271 /* Don't bother with already dead threads */
1272 if (t->exit_state)
1273 continue;
1274 sigaddset(&t->pending.signal, SIGKILL);
1275 signal_wake_up(t, 1);
1276 }
1277
1278 return count;
1279}
1280
1281struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1282 unsigned long *flags)
1283{
1284 struct sighand_struct *sighand;
1285
1286 for (;;) {
1287 local_irq_save(*flags);
1288 rcu_read_lock();
1289 sighand = rcu_dereference(tsk->sighand);
1290 if (unlikely(sighand == NULL)) {
1291 rcu_read_unlock();
1292 local_irq_restore(*flags);
1293 break;
1294 }
1295
1296 spin_lock(&sighand->siglock);
1297 if (likely(sighand == tsk->sighand)) {
1298 rcu_read_unlock();
1299 break;
1300 }
1301 spin_unlock(&sighand->siglock);
1302 rcu_read_unlock();
1303 local_irq_restore(*flags);
1304 }
1305
1306 return sighand;
1307}
1308
1309/*
1310 * send signal info to all the members of a group
1311 */
1312int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1313{
1314 int ret;
1315
1316 rcu_read_lock();
1317 ret = check_kill_permission(sig, info, p);
1318 rcu_read_unlock();
1319
1320 if (!ret && sig)
1321 ret = do_send_sig_info(sig, info, p, true);
1322
1323 return ret;
1324}
1325
1326/*
1327 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1328 * control characters do (^C, ^Z etc)
1329 * - the caller must hold at least a readlock on tasklist_lock
1330 */
1331int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1332{
1333 struct task_struct *p = NULL;
1334 int retval, success;
1335
1336 success = 0;
1337 retval = -ESRCH;
1338 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1339 int err = group_send_sig_info(sig, info, p);
1340 success |= !err;
1341 retval = err;
1342 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1343 return success ? 0 : retval;
1344}
1345
1346int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1347{
1348 int error = -ESRCH;
1349 struct task_struct *p;
1350
1351 rcu_read_lock();
1352retry:
1353 p = pid_task(pid, PIDTYPE_PID);
1354 if (p) {
1355 error = group_send_sig_info(sig, info, p);
1356 if (unlikely(error == -ESRCH))
1357 /*
1358 * The task was unhashed in between, try again.
1359 * If it is dead, pid_task() will return NULL,
1360 * if we race with de_thread() it will find the
1361 * new leader.
1362 */
1363 goto retry;
1364 }
1365 rcu_read_unlock();
1366
1367 return error;
1368}
1369
1370int kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1371{
1372 int error;
1373 rcu_read_lock();
1374 error = kill_pid_info(sig, info, find_vpid(pid));
1375 rcu_read_unlock();
1376 return error;
1377}
1378
1379static int kill_as_cred_perm(const struct cred *cred,
1380 struct task_struct *target)
1381{
1382 const struct cred *pcred = __task_cred(target);
1383 if (!uid_eq(cred->euid, pcred->suid) && !uid_eq(cred->euid, pcred->uid) &&
1384 !uid_eq(cred->uid, pcred->suid) && !uid_eq(cred->uid, pcred->uid))
1385 return 0;
1386 return 1;
1387}
1388
1389/* like kill_pid_info(), but doesn't use uid/euid of "current" */
1390int kill_pid_info_as_cred(int sig, struct siginfo *info, struct pid *pid,
1391 const struct cred *cred, u32 secid)
1392{
1393 int ret = -EINVAL;
1394 struct task_struct *p;
1395 unsigned long flags;
1396
1397 if (!valid_signal(sig))
1398 return ret;
1399
1400 rcu_read_lock();
1401 p = pid_task(pid, PIDTYPE_PID);
1402 if (!p) {
1403 ret = -ESRCH;
1404 goto out_unlock;
1405 }
1406 if (si_fromuser(info) && !kill_as_cred_perm(cred, p)) {
1407 ret = -EPERM;
1408 goto out_unlock;
1409 }
1410 ret = security_task_kill(p, info, sig, secid);
1411 if (ret)
1412 goto out_unlock;
1413
1414 if (sig) {
1415 if (lock_task_sighand(p, &flags)) {
1416 ret = __send_signal(sig, info, p, 1, 0);
1417 unlock_task_sighand(p, &flags);
1418 } else
1419 ret = -ESRCH;
1420 }
1421out_unlock:
1422 rcu_read_unlock();
1423 return ret;
1424}
1425EXPORT_SYMBOL_GPL(kill_pid_info_as_cred);
1426
1427/*
1428 * kill_something_info() interprets pid in interesting ways just like kill(2).
1429 *
1430 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1431 * is probably wrong. Should make it like BSD or SYSV.
1432 */
1433
1434static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
1435{
1436 int ret;
1437
1438 if (pid > 0) {
1439 rcu_read_lock();
1440 ret = kill_pid_info(sig, info, find_vpid(pid));
1441 rcu_read_unlock();
1442 return ret;
1443 }
1444
1445 read_lock(&tasklist_lock);
1446 if (pid != -1) {
1447 ret = __kill_pgrp_info(sig, info,
1448 pid ? find_vpid(-pid) : task_pgrp(current));
1449 } else {
1450 int retval = 0, count = 0;
1451 struct task_struct * p;
1452
1453 for_each_process(p) {
1454 if (task_pid_vnr(p) > 1 &&
1455 !same_thread_group(p, current)) {
1456 int err = group_send_sig_info(sig, info, p);
1457 ++count;
1458 if (err != -EPERM)
1459 retval = err;
1460 }
1461 }
1462 ret = count ? retval : -ESRCH;
1463 }
1464 read_unlock(&tasklist_lock);
1465
1466 return ret;
1467}
1468
1469/*
1470 * These are for backward compatibility with the rest of the kernel source.
1471 */
1472
1473int send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1474{
1475 /*
1476 * Make sure legacy kernel users don't send in bad values
1477 * (normal paths check this in check_kill_permission).
1478 */
1479 if (!valid_signal(sig))
1480 return -EINVAL;
1481
1482 return do_send_sig_info(sig, info, p, false);
1483}
1484
1485#define __si_special(priv) \
1486 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1487
1488int
1489send_sig(int sig, struct task_struct *p, int priv)
1490{
1491 return send_sig_info(sig, __si_special(priv), p);
1492}
1493
1494void
1495force_sig(int sig, struct task_struct *p)
1496{
1497 force_sig_info(sig, SEND_SIG_PRIV, p);
1498}
1499
1500/*
1501 * When things go south during signal handling, we
1502 * will force a SIGSEGV. And if the signal that caused
1503 * the problem was already a SIGSEGV, we'll want to
1504 * make sure we don't even try to deliver the signal..
1505 */
1506int
1507force_sigsegv(int sig, struct task_struct *p)
1508{
1509 if (sig == SIGSEGV) {
1510 unsigned long flags;
1511 spin_lock_irqsave(&p->sighand->siglock, flags);
1512 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1513 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1514 }
1515 force_sig(SIGSEGV, p);
1516 return 0;
1517}
1518
1519int kill_pgrp(struct pid *pid, int sig, int priv)
1520{
1521 int ret;
1522
1523 read_lock(&tasklist_lock);
1524 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1525 read_unlock(&tasklist_lock);
1526
1527 return ret;
1528}
1529EXPORT_SYMBOL(kill_pgrp);
1530
1531int kill_pid(struct pid *pid, int sig, int priv)
1532{
1533 return kill_pid_info(sig, __si_special(priv), pid);
1534}
1535EXPORT_SYMBOL(kill_pid);
1536
1537/*
1538 * These functions support sending signals using preallocated sigqueue
1539 * structures. This is needed "because realtime applications cannot
1540 * afford to lose notifications of asynchronous events, like timer
1541 * expirations or I/O completions". In the case of POSIX Timers
1542 * we allocate the sigqueue structure from the timer_create. If this
1543 * allocation fails we are able to report the failure to the application
1544 * with an EAGAIN error.
1545 */
1546struct sigqueue *sigqueue_alloc(void)
1547{
1548 struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1549
1550 if (q)
1551 q->flags |= SIGQUEUE_PREALLOC;
1552
1553 return q;
1554}
1555
1556void sigqueue_free(struct sigqueue *q)
1557{
1558 unsigned long flags;
1559 spinlock_t *lock = ¤t->sighand->siglock;
1560
1561 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1562 /*
1563 * We must hold ->siglock while testing q->list
1564 * to serialize with collect_signal() or with
1565 * __exit_signal()->flush_sigqueue().
1566 */
1567 spin_lock_irqsave(lock, flags);
1568 q->flags &= ~SIGQUEUE_PREALLOC;
1569 /*
1570 * If it is queued it will be freed when dequeued,
1571 * like the "regular" sigqueue.
1572 */
1573 if (!list_empty(&q->list))
1574 q = NULL;
1575 spin_unlock_irqrestore(lock, flags);
1576
1577 if (q)
1578 __sigqueue_free(q);
1579}
1580
1581int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
1582{
1583 int sig = q->info.si_signo;
1584 struct sigpending *pending;
1585 unsigned long flags;
1586 int ret, result;
1587
1588 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1589
1590 ret = -1;
1591 if (!likely(lock_task_sighand(t, &flags)))
1592 goto ret;
1593
1594 ret = 1; /* the signal is ignored */
1595 result = TRACE_SIGNAL_IGNORED;
1596 if (!prepare_signal(sig, t, false))
1597 goto out;
1598
1599 ret = 0;
1600 if (unlikely(!list_empty(&q->list))) {
1601 /*
1602 * If an SI_TIMER entry is already queue just increment
1603 * the overrun count.
1604 */
1605 BUG_ON(q->info.si_code != SI_TIMER);
1606 q->info.si_overrun++;
1607 result = TRACE_SIGNAL_ALREADY_PENDING;
1608 goto out;
1609 }
1610 q->info.si_overrun = 0;
1611
1612 signalfd_notify(t, sig);
1613 pending = group ? &t->signal->shared_pending : &t->pending;
1614 list_add_tail(&q->list, &pending->list);
1615 sigaddset(&pending->signal, sig);
1616 complete_signal(sig, t, group);
1617 result = TRACE_SIGNAL_DELIVERED;
1618out:
1619 trace_signal_generate(sig, &q->info, t, group, result);
1620 unlock_task_sighand(t, &flags);
1621ret:
1622 return ret;
1623}
1624
1625/*
1626 * Let a parent know about the death of a child.
1627 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1628 *
1629 * Returns true if our parent ignored us and so we've switched to
1630 * self-reaping.
1631 */
1632bool do_notify_parent(struct task_struct *tsk, int sig)
1633{
1634 struct siginfo info;
1635 unsigned long flags;
1636 struct sighand_struct *psig;
1637 bool autoreap = false;
1638
1639 BUG_ON(sig == -1);
1640
1641 /* do_notify_parent_cldstop should have been called instead. */
1642 BUG_ON(task_is_stopped_or_traced(tsk));
1643
1644 BUG_ON(!tsk->ptrace &&
1645 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1646
1647 if (sig != SIGCHLD) {
1648 /*
1649 * This is only possible if parent == real_parent.
1650 * Check if it has changed security domain.
1651 */
1652 if (tsk->parent_exec_id != tsk->parent->self_exec_id)
1653 sig = SIGCHLD;
1654 }
1655
1656 info.si_signo = sig;
1657 info.si_errno = 0;
1658 /*
1659 * We are under tasklist_lock here so our parent is tied to
1660 * us and cannot change.
1661 *
1662 * task_active_pid_ns will always return the same pid namespace
1663 * until a task passes through release_task.
1664 *
1665 * write_lock() currently calls preempt_disable() which is the
1666 * same as rcu_read_lock(), but according to Oleg, this is not
1667 * correct to rely on this
1668 */
1669 rcu_read_lock();
1670 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
1671 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
1672 task_uid(tsk));
1673 rcu_read_unlock();
1674
1675 info.si_utime = cputime_to_clock_t(tsk->utime + tsk->signal->utime);
1676 info.si_stime = cputime_to_clock_t(tsk->stime + tsk->signal->stime);
1677
1678 info.si_status = tsk->exit_code & 0x7f;
1679 if (tsk->exit_code & 0x80)
1680 info.si_code = CLD_DUMPED;
1681 else if (tsk->exit_code & 0x7f)
1682 info.si_code = CLD_KILLED;
1683 else {
1684 info.si_code = CLD_EXITED;
1685 info.si_status = tsk->exit_code >> 8;
1686 }
1687
1688 psig = tsk->parent->sighand;
1689 spin_lock_irqsave(&psig->siglock, flags);
1690 if (!tsk->ptrace && sig == SIGCHLD &&
1691 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1692 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1693 /*
1694 * We are exiting and our parent doesn't care. POSIX.1
1695 * defines special semantics for setting SIGCHLD to SIG_IGN
1696 * or setting the SA_NOCLDWAIT flag: we should be reaped
1697 * automatically and not left for our parent's wait4 call.
1698 * Rather than having the parent do it as a magic kind of
1699 * signal handler, we just set this to tell do_exit that we
1700 * can be cleaned up without becoming a zombie. Note that
1701 * we still call __wake_up_parent in this case, because a
1702 * blocked sys_wait4 might now return -ECHILD.
1703 *
1704 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1705 * is implementation-defined: we do (if you don't want
1706 * it, just use SIG_IGN instead).
1707 */
1708 autoreap = true;
1709 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1710 sig = 0;
1711 }
1712 if (valid_signal(sig) && sig)
1713 __group_send_sig_info(sig, &info, tsk->parent);
1714 __wake_up_parent(tsk, tsk->parent);
1715 spin_unlock_irqrestore(&psig->siglock, flags);
1716
1717 return autoreap;
1718}
1719
1720/**
1721 * do_notify_parent_cldstop - notify parent of stopped/continued state change
1722 * @tsk: task reporting the state change
1723 * @for_ptracer: the notification is for ptracer
1724 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
1725 *
1726 * Notify @tsk's parent that the stopped/continued state has changed. If
1727 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
1728 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
1729 *
1730 * CONTEXT:
1731 * Must be called with tasklist_lock at least read locked.
1732 */
1733static void do_notify_parent_cldstop(struct task_struct *tsk,
1734 bool for_ptracer, int why)
1735{
1736 struct siginfo info;
1737 unsigned long flags;
1738 struct task_struct *parent;
1739 struct sighand_struct *sighand;
1740
1741 if (for_ptracer) {
1742 parent = tsk->parent;
1743 } else {
1744 tsk = tsk->group_leader;
1745 parent = tsk->real_parent;
1746 }
1747
1748 info.si_signo = SIGCHLD;
1749 info.si_errno = 0;
1750 /*
1751 * see comment in do_notify_parent() about the following 4 lines
1752 */
1753 rcu_read_lock();
1754 info.si_pid = task_pid_nr_ns(tsk, parent->nsproxy->pid_ns);
1755 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
1756 rcu_read_unlock();
1757
1758 info.si_utime = cputime_to_clock_t(tsk->utime);
1759 info.si_stime = cputime_to_clock_t(tsk->stime);
1760
1761 info.si_code = why;
1762 switch (why) {
1763 case CLD_CONTINUED:
1764 info.si_status = SIGCONT;
1765 break;
1766 case CLD_STOPPED:
1767 info.si_status = tsk->signal->group_exit_code & 0x7f;
1768 break;
1769 case CLD_TRAPPED:
1770 info.si_status = tsk->exit_code & 0x7f;
1771 break;
1772 default:
1773 BUG();
1774 }
1775
1776 sighand = parent->sighand;
1777 spin_lock_irqsave(&sighand->siglock, flags);
1778 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1779 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1780 __group_send_sig_info(SIGCHLD, &info, parent);
1781 /*
1782 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1783 */
1784 __wake_up_parent(tsk, parent);
1785 spin_unlock_irqrestore(&sighand->siglock, flags);
1786}
1787
1788static inline int may_ptrace_stop(void)
1789{
1790 if (!likely(current->ptrace))
1791 return 0;
1792 /*
1793 * Are we in the middle of do_coredump?
1794 * If so and our tracer is also part of the coredump stopping
1795 * is a deadlock situation, and pointless because our tracer
1796 * is dead so don't allow us to stop.
1797 * If SIGKILL was already sent before the caller unlocked
1798 * ->siglock we must see ->core_state != NULL. Otherwise it
1799 * is safe to enter schedule().
1800 */
1801 if (unlikely(current->mm->core_state) &&
1802 unlikely(current->mm == current->parent->mm))
1803 return 0;
1804
1805 return 1;
1806}
1807
1808/*
1809 * Return non-zero if there is a SIGKILL that should be waking us up.
1810 * Called with the siglock held.
1811 */
1812static int sigkill_pending(struct task_struct *tsk)
1813{
1814 return sigismember(&tsk->pending.signal, SIGKILL) ||
1815 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
1816}
1817
1818/*
1819 * This must be called with current->sighand->siglock held.
1820 *
1821 * This should be the path for all ptrace stops.
1822 * We always set current->last_siginfo while stopped here.
1823 * That makes it a way to test a stopped process for
1824 * being ptrace-stopped vs being job-control-stopped.
1825 *
1826 * If we actually decide not to stop at all because the tracer
1827 * is gone, we keep current->exit_code unless clear_code.
1828 */
1829static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
1830 __releases(¤t->sighand->siglock)
1831 __acquires(¤t->sighand->siglock)
1832{
1833 bool gstop_done = false;
1834
1835 if (arch_ptrace_stop_needed(exit_code, info)) {
1836 /*
1837 * The arch code has something special to do before a
1838 * ptrace stop. This is allowed to block, e.g. for faults
1839 * on user stack pages. We can't keep the siglock while
1840 * calling arch_ptrace_stop, so we must release it now.
1841 * To preserve proper semantics, we must do this before
1842 * any signal bookkeeping like checking group_stop_count.
1843 * Meanwhile, a SIGKILL could come in before we retake the
1844 * siglock. That must prevent us from sleeping in TASK_TRACED.
1845 * So after regaining the lock, we must check for SIGKILL.
1846 */
1847 spin_unlock_irq(¤t->sighand->siglock);
1848 arch_ptrace_stop(exit_code, info);
1849 spin_lock_irq(¤t->sighand->siglock);
1850 if (sigkill_pending(current))
1851 return;
1852 }
1853
1854 /*
1855 * We're committing to trapping. TRACED should be visible before
1856 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
1857 * Also, transition to TRACED and updates to ->jobctl should be
1858 * atomic with respect to siglock and should be done after the arch
1859 * hook as siglock is released and regrabbed across it.
1860 */
1861 set_current_state(TASK_TRACED);
1862
1863 current->last_siginfo = info;
1864 current->exit_code = exit_code;
1865
1866 /*
1867 * If @why is CLD_STOPPED, we're trapping to participate in a group
1868 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
1869 * across siglock relocks since INTERRUPT was scheduled, PENDING
1870 * could be clear now. We act as if SIGCONT is received after
1871 * TASK_TRACED is entered - ignore it.
1872 */
1873 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
1874 gstop_done = task_participate_group_stop(current);
1875
1876 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
1877 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
1878 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
1879 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
1880
1881 /* entering a trap, clear TRAPPING */
1882 task_clear_jobctl_trapping(current);
1883
1884 spin_unlock_irq(¤t->sighand->siglock);
1885 read_lock(&tasklist_lock);
1886 if (may_ptrace_stop()) {
1887 /*
1888 * Notify parents of the stop.
1889 *
1890 * While ptraced, there are two parents - the ptracer and
1891 * the real_parent of the group_leader. The ptracer should
1892 * know about every stop while the real parent is only
1893 * interested in the completion of group stop. The states
1894 * for the two don't interact with each other. Notify
1895 * separately unless they're gonna be duplicates.
1896 */
1897 do_notify_parent_cldstop(current, true, why);
1898 if (gstop_done && ptrace_reparented(current))
1899 do_notify_parent_cldstop(current, false, why);
1900
1901 /*
1902 * Don't want to allow preemption here, because
1903 * sys_ptrace() needs this task to be inactive.
1904 *
1905 * XXX: implement read_unlock_no_resched().
1906 */
1907 preempt_disable();
1908 read_unlock(&tasklist_lock);
1909 preempt_enable_no_resched();
1910 schedule();
1911 } else {
1912 /*
1913 * By the time we got the lock, our tracer went away.
1914 * Don't drop the lock yet, another tracer may come.
1915 *
1916 * If @gstop_done, the ptracer went away between group stop
1917 * completion and here. During detach, it would have set
1918 * JOBCTL_STOP_PENDING on us and we'll re-enter
1919 * TASK_STOPPED in do_signal_stop() on return, so notifying
1920 * the real parent of the group stop completion is enough.
1921 */
1922 if (gstop_done)
1923 do_notify_parent_cldstop(current, false, why);
1924
1925 __set_current_state(TASK_RUNNING);
1926 if (clear_code)
1927 current->exit_code = 0;
1928 read_unlock(&tasklist_lock);
1929 }
1930
1931 /*
1932 * While in TASK_TRACED, we were considered "frozen enough".
1933 * Now that we woke up, it's crucial if we're supposed to be
1934 * frozen that we freeze now before running anything substantial.
1935 */
1936 try_to_freeze();
1937
1938 /*
1939 * We are back. Now reacquire the siglock before touching
1940 * last_siginfo, so that we are sure to have synchronized with
1941 * any signal-sending on another CPU that wants to examine it.
1942 */
1943 spin_lock_irq(¤t->sighand->siglock);
1944 current->last_siginfo = NULL;
1945
1946 /* LISTENING can be set only during STOP traps, clear it */
1947 current->jobctl &= ~JOBCTL_LISTENING;
1948
1949 /*
1950 * Queued signals ignored us while we were stopped for tracing.
1951 * So check for any that we should take before resuming user mode.
1952 * This sets TIF_SIGPENDING, but never clears it.
1953 */
1954 recalc_sigpending_tsk(current);
1955}
1956
1957static void ptrace_do_notify(int signr, int exit_code, int why)
1958{
1959 siginfo_t info;
1960
1961 memset(&info, 0, sizeof info);
1962 info.si_signo = signr;
1963 info.si_code = exit_code;
1964 info.si_pid = task_pid_vnr(current);
1965 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
1966
1967 /* Let the debugger run. */
1968 ptrace_stop(exit_code, why, 1, &info);
1969}
1970
1971void ptrace_notify(int exit_code)
1972{
1973 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1974
1975 spin_lock_irq(¤t->sighand->siglock);
1976 ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
1977 spin_unlock_irq(¤t->sighand->siglock);
1978}
1979
1980/**
1981 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
1982 * @signr: signr causing group stop if initiating
1983 *
1984 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
1985 * and participate in it. If already set, participate in the existing
1986 * group stop. If participated in a group stop (and thus slept), %true is
1987 * returned with siglock released.
1988 *
1989 * If ptraced, this function doesn't handle stop itself. Instead,
1990 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
1991 * untouched. The caller must ensure that INTERRUPT trap handling takes
1992 * places afterwards.
1993 *
1994 * CONTEXT:
1995 * Must be called with @current->sighand->siglock held, which is released
1996 * on %true return.
1997 *
1998 * RETURNS:
1999 * %false if group stop is already cancelled or ptrace trap is scheduled.
2000 * %true if participated in group stop.
2001 */
2002static bool do_signal_stop(int signr)
2003 __releases(¤t->sighand->siglock)
2004{
2005 struct signal_struct *sig = current->signal;
2006
2007 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2008 unsigned int gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2009 struct task_struct *t;
2010
2011 /* signr will be recorded in task->jobctl for retries */
2012 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2013
2014 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2015 unlikely(signal_group_exit(sig)))
2016 return false;
2017 /*
2018 * There is no group stop already in progress. We must
2019 * initiate one now.
2020 *
2021 * While ptraced, a task may be resumed while group stop is
2022 * still in effect and then receive a stop signal and
2023 * initiate another group stop. This deviates from the
2024 * usual behavior as two consecutive stop signals can't
2025 * cause two group stops when !ptraced. That is why we
2026 * also check !task_is_stopped(t) below.
2027 *
2028 * The condition can be distinguished by testing whether
2029 * SIGNAL_STOP_STOPPED is already set. Don't generate
2030 * group_exit_code in such case.
2031 *
2032 * This is not necessary for SIGNAL_STOP_CONTINUED because
2033 * an intervening stop signal is required to cause two
2034 * continued events regardless of ptrace.
2035 */
2036 if (!(sig->flags & SIGNAL_STOP_STOPPED))
2037 sig->group_exit_code = signr;
2038
2039 sig->group_stop_count = 0;
2040
2041 if (task_set_jobctl_pending(current, signr | gstop))
2042 sig->group_stop_count++;
2043
2044 for (t = next_thread(current); t != current;
2045 t = next_thread(t)) {
2046 /*
2047 * Setting state to TASK_STOPPED for a group
2048 * stop is always done with the siglock held,
2049 * so this check has no races.
2050 */
2051 if (!task_is_stopped(t) &&
2052 task_set_jobctl_pending(t, signr | gstop)) {
2053 sig->group_stop_count++;
2054 if (likely(!(t->ptrace & PT_SEIZED)))
2055 signal_wake_up(t, 0);
2056 else
2057 ptrace_trap_notify(t);
2058 }
2059 }
2060 }
2061
2062 if (likely(!current->ptrace)) {
2063 int notify = 0;
2064
2065 /*
2066 * If there are no other threads in the group, or if there
2067 * is a group stop in progress and we are the last to stop,
2068 * report to the parent.
2069 */
2070 if (task_participate_group_stop(current))
2071 notify = CLD_STOPPED;
2072
2073 __set_current_state(TASK_STOPPED);
2074 spin_unlock_irq(¤t->sighand->siglock);
2075
2076 /*
2077 * Notify the parent of the group stop completion. Because
2078 * we're not holding either the siglock or tasklist_lock
2079 * here, ptracer may attach inbetween; however, this is for
2080 * group stop and should always be delivered to the real
2081 * parent of the group leader. The new ptracer will get
2082 * its notification when this task transitions into
2083 * TASK_TRACED.
2084 */
2085 if (notify) {
2086 read_lock(&tasklist_lock);
2087 do_notify_parent_cldstop(current, false, notify);
2088 read_unlock(&tasklist_lock);
2089 }
2090
2091 /* Now we don't run again until woken by SIGCONT or SIGKILL */
2092 schedule();
2093 return true;
2094 } else {
2095 /*
2096 * While ptraced, group stop is handled by STOP trap.
2097 * Schedule it and let the caller deal with it.
2098 */
2099 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2100 return false;
2101 }
2102}
2103
2104/**
2105 * do_jobctl_trap - take care of ptrace jobctl traps
2106 *
2107 * When PT_SEIZED, it's used for both group stop and explicit
2108 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
2109 * accompanying siginfo. If stopped, lower eight bits of exit_code contain
2110 * the stop signal; otherwise, %SIGTRAP.
2111 *
2112 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2113 * number as exit_code and no siginfo.
2114 *
2115 * CONTEXT:
2116 * Must be called with @current->sighand->siglock held, which may be
2117 * released and re-acquired before returning with intervening sleep.
2118 */
2119static void do_jobctl_trap(void)
2120{
2121 struct signal_struct *signal = current->signal;
2122 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2123
2124 if (current->ptrace & PT_SEIZED) {
2125 if (!signal->group_stop_count &&
2126 !(signal->flags & SIGNAL_STOP_STOPPED))
2127 signr = SIGTRAP;
2128 WARN_ON_ONCE(!signr);
2129 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2130 CLD_STOPPED);
2131 } else {
2132 WARN_ON_ONCE(!signr);
2133 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2134 current->exit_code = 0;
2135 }
2136}
2137
2138static int ptrace_signal(int signr, siginfo_t *info,
2139 struct pt_regs *regs, void *cookie)
2140{
2141 ptrace_signal_deliver(regs, cookie);
2142 /*
2143 * We do not check sig_kernel_stop(signr) but set this marker
2144 * unconditionally because we do not know whether debugger will
2145 * change signr. This flag has no meaning unless we are going
2146 * to stop after return from ptrace_stop(). In this case it will
2147 * be checked in do_signal_stop(), we should only stop if it was
2148 * not cleared by SIGCONT while we were sleeping. See also the
2149 * comment in dequeue_signal().
2150 */
2151 current->jobctl |= JOBCTL_STOP_DEQUEUED;
2152 ptrace_stop(signr, CLD_TRAPPED, 0, info);
2153
2154 /* We're back. Did the debugger cancel the sig? */
2155 signr = current->exit_code;
2156 if (signr == 0)
2157 return signr;
2158
2159 current->exit_code = 0;
2160
2161 /*
2162 * Update the siginfo structure if the signal has
2163 * changed. If the debugger wanted something
2164 * specific in the siginfo structure then it should
2165 * have updated *info via PTRACE_SETSIGINFO.
2166 */
2167 if (signr != info->si_signo) {
2168 info->si_signo = signr;
2169 info->si_errno = 0;
2170 info->si_code = SI_USER;
2171 rcu_read_lock();
2172 info->si_pid = task_pid_vnr(current->parent);
2173 info->si_uid = from_kuid_munged(current_user_ns(),
2174 task_uid(current->parent));
2175 rcu_read_unlock();
2176 }
2177
2178 /* If the (new) signal is now blocked, requeue it. */
2179 if (sigismember(¤t->blocked, signr)) {
2180 specific_send_sig_info(signr, info, current);
2181 signr = 0;
2182 }
2183
2184 return signr;
2185}
2186
2187int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
2188 struct pt_regs *regs, void *cookie)
2189{
2190 struct sighand_struct *sighand = current->sighand;
2191 struct signal_struct *signal = current->signal;
2192 int signr;
2193
2194 if (unlikely(uprobe_deny_signal()))
2195 return 0;
2196
2197relock:
2198 /*
2199 * We'll jump back here after any time we were stopped in TASK_STOPPED.
2200 * While in TASK_STOPPED, we were considered "frozen enough".
2201 * Now that we woke up, it's crucial if we're supposed to be
2202 * frozen that we freeze now before running anything substantial.
2203 */
2204 try_to_freeze();
2205
2206 spin_lock_irq(&sighand->siglock);
2207 /*
2208 * Every stopped thread goes here after wakeup. Check to see if
2209 * we should notify the parent, prepare_signal(SIGCONT) encodes
2210 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2211 */
2212 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2213 int why;
2214
2215 if (signal->flags & SIGNAL_CLD_CONTINUED)
2216 why = CLD_CONTINUED;
2217 else
2218 why = CLD_STOPPED;
2219
2220 signal->flags &= ~SIGNAL_CLD_MASK;
2221
2222 spin_unlock_irq(&sighand->siglock);
2223
2224 /*
2225 * Notify the parent that we're continuing. This event is
2226 * always per-process and doesn't make whole lot of sense
2227 * for ptracers, who shouldn't consume the state via
2228 * wait(2) either, but, for backward compatibility, notify
2229 * the ptracer of the group leader too unless it's gonna be
2230 * a duplicate.
2231 */
2232 read_lock(&tasklist_lock);
2233 do_notify_parent_cldstop(current, false, why);
2234
2235 if (ptrace_reparented(current->group_leader))
2236 do_notify_parent_cldstop(current->group_leader,
2237 true, why);
2238 read_unlock(&tasklist_lock);
2239
2240 goto relock;
2241 }
2242
2243 for (;;) {
2244 struct k_sigaction *ka;
2245
2246 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2247 do_signal_stop(0))
2248 goto relock;
2249
2250 if (unlikely(current->jobctl & JOBCTL_TRAP_MASK)) {
2251 do_jobctl_trap();
2252 spin_unlock_irq(&sighand->siglock);
2253 goto relock;
2254 }
2255
2256 signr = dequeue_signal(current, ¤t->blocked, info);
2257
2258 if (!signr)
2259 break; /* will return 0 */
2260
2261 if (unlikely(current->ptrace) && signr != SIGKILL) {
2262 signr = ptrace_signal(signr, info,
2263 regs, cookie);
2264 if (!signr)
2265 continue;
2266 }
2267
2268 ka = &sighand->action[signr-1];
2269
2270 /* Trace actually delivered signals. */
2271 trace_signal_deliver(signr, info, ka);
2272
2273 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
2274 continue;
2275 if (ka->sa.sa_handler != SIG_DFL) {
2276 /* Run the handler. */
2277 *return_ka = *ka;
2278
2279 if (ka->sa.sa_flags & SA_ONESHOT)
2280 ka->sa.sa_handler = SIG_DFL;
2281
2282 break; /* will return non-zero "signr" value */
2283 }
2284
2285 /*
2286 * Now we are doing the default action for this signal.
2287 */
2288 if (sig_kernel_ignore(signr)) /* Default is nothing. */
2289 continue;
2290
2291 /*
2292 * Global init gets no signals it doesn't want.
2293 * Container-init gets no signals it doesn't want from same
2294 * container.
2295 *
2296 * Note that if global/container-init sees a sig_kernel_only()
2297 * signal here, the signal must have been generated internally
2298 * or must have come from an ancestor namespace. In either
2299 * case, the signal cannot be dropped.
2300 */
2301 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2302 !sig_kernel_only(signr))
2303 continue;
2304
2305 if (sig_kernel_stop(signr)) {
2306 /*
2307 * The default action is to stop all threads in
2308 * the thread group. The job control signals
2309 * do nothing in an orphaned pgrp, but SIGSTOP
2310 * always works. Note that siglock needs to be
2311 * dropped during the call to is_orphaned_pgrp()
2312 * because of lock ordering with tasklist_lock.
2313 * This allows an intervening SIGCONT to be posted.
2314 * We need to check for that and bail out if necessary.
2315 */
2316 if (signr != SIGSTOP) {
2317 spin_unlock_irq(&sighand->siglock);
2318
2319 /* signals can be posted during this window */
2320
2321 if (is_current_pgrp_orphaned())
2322 goto relock;
2323
2324 spin_lock_irq(&sighand->siglock);
2325 }
2326
2327 if (likely(do_signal_stop(info->si_signo))) {
2328 /* It released the siglock. */
2329 goto relock;
2330 }
2331
2332 /*
2333 * We didn't actually stop, due to a race
2334 * with SIGCONT or something like that.
2335 */
2336 continue;
2337 }
2338
2339 spin_unlock_irq(&sighand->siglock);
2340
2341 /*
2342 * Anything else is fatal, maybe with a core dump.
2343 */
2344 current->flags |= PF_SIGNALED;
2345
2346 if (sig_kernel_coredump(signr)) {
2347 if (print_fatal_signals)
2348 print_fatal_signal(regs, info->si_signo);
2349 /*
2350 * If it was able to dump core, this kills all
2351 * other threads in the group and synchronizes with
2352 * their demise. If we lost the race with another
2353 * thread getting here, it set group_exit_code
2354 * first and our do_group_exit call below will use
2355 * that value and ignore the one we pass it.
2356 */
2357 do_coredump(info->si_signo, info->si_signo, regs);
2358 }
2359
2360 /*
2361 * Death signals, no core dump.
2362 */
2363 do_group_exit(info->si_signo);
2364 /* NOTREACHED */
2365 }
2366 spin_unlock_irq(&sighand->siglock);
2367 return signr;
2368}
2369
2370/**
2371 * signal_delivered -
2372 * @sig: number of signal being delivered
2373 * @info: siginfo_t of signal being delivered
2374 * @ka: sigaction setting that chose the handler
2375 * @regs: user register state
2376 * @stepping: nonzero if debugger single-step or block-step in use
2377 *
2378 * This function should be called when a signal has succesfully been
2379 * delivered. It updates the blocked signals accordingly (@ka->sa.sa_mask
2380 * is always blocked, and the signal itself is blocked unless %SA_NODEFER
2381 * is set in @ka->sa.sa_flags. Tracing is notified.
2382 */
2383void signal_delivered(int sig, siginfo_t *info, struct k_sigaction *ka,
2384 struct pt_regs *regs, int stepping)
2385{
2386 sigset_t blocked;
2387
2388 /* A signal was successfully delivered, and the
2389 saved sigmask was stored on the signal frame,
2390 and will be restored by sigreturn. So we can
2391 simply clear the restore sigmask flag. */
2392 clear_restore_sigmask();
2393
2394 sigorsets(&blocked, ¤t->blocked, &ka->sa.sa_mask);
2395 if (!(ka->sa.sa_flags & SA_NODEFER))
2396 sigaddset(&blocked, sig);
2397 set_current_blocked(&blocked);
2398 tracehook_signal_handler(sig, info, ka, regs, stepping);
2399}
2400
2401/*
2402 * It could be that complete_signal() picked us to notify about the
2403 * group-wide signal. Other threads should be notified now to take
2404 * the shared signals in @which since we will not.
2405 */
2406static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2407{
2408 sigset_t retarget;
2409 struct task_struct *t;
2410
2411 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2412 if (sigisemptyset(&retarget))
2413 return;
2414
2415 t = tsk;
2416 while_each_thread(tsk, t) {
2417 if (t->flags & PF_EXITING)
2418 continue;
2419
2420 if (!has_pending_signals(&retarget, &t->blocked))
2421 continue;
2422 /* Remove the signals this thread can handle. */
2423 sigandsets(&retarget, &retarget, &t->blocked);
2424
2425 if (!signal_pending(t))
2426 signal_wake_up(t, 0);
2427
2428 if (sigisemptyset(&retarget))
2429 break;
2430 }
2431}
2432
2433void exit_signals(struct task_struct *tsk)
2434{
2435 int group_stop = 0;
2436 sigset_t unblocked;
2437
2438 /*
2439 * @tsk is about to have PF_EXITING set - lock out users which
2440 * expect stable threadgroup.
2441 */
2442 threadgroup_change_begin(tsk);
2443
2444 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2445 tsk->flags |= PF_EXITING;
2446 threadgroup_change_end(tsk);
2447 return;
2448 }
2449
2450 spin_lock_irq(&tsk->sighand->siglock);
2451 /*
2452 * From now this task is not visible for group-wide signals,
2453 * see wants_signal(), do_signal_stop().
2454 */
2455 tsk->flags |= PF_EXITING;
2456
2457 threadgroup_change_end(tsk);
2458
2459 if (!signal_pending(tsk))
2460 goto out;
2461
2462 unblocked = tsk->blocked;
2463 signotset(&unblocked);
2464 retarget_shared_pending(tsk, &unblocked);
2465
2466 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2467 task_participate_group_stop(tsk))
2468 group_stop = CLD_STOPPED;
2469out:
2470 spin_unlock_irq(&tsk->sighand->siglock);
2471
2472 /*
2473 * If group stop has completed, deliver the notification. This
2474 * should always go to the real parent of the group leader.
2475 */
2476 if (unlikely(group_stop)) {
2477 read_lock(&tasklist_lock);
2478 do_notify_parent_cldstop(tsk, false, group_stop);
2479 read_unlock(&tasklist_lock);
2480 }
2481}
2482
2483EXPORT_SYMBOL(recalc_sigpending);
2484EXPORT_SYMBOL_GPL(dequeue_signal);
2485EXPORT_SYMBOL(flush_signals);
2486EXPORT_SYMBOL(force_sig);
2487EXPORT_SYMBOL(send_sig);
2488EXPORT_SYMBOL(send_sig_info);
2489EXPORT_SYMBOL(sigprocmask);
2490EXPORT_SYMBOL(block_all_signals);
2491EXPORT_SYMBOL(unblock_all_signals);
2492
2493
2494/*
2495 * System call entry points.
2496 */
2497
2498/**
2499 * sys_restart_syscall - restart a system call
2500 */
2501SYSCALL_DEFINE0(restart_syscall)
2502{
2503 struct restart_block *restart = ¤t_thread_info()->restart_block;
2504 return restart->fn(restart);
2505}
2506
2507long do_no_restart_syscall(struct restart_block *param)
2508{
2509 return -EINTR;
2510}
2511
2512static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2513{
2514 if (signal_pending(tsk) && !thread_group_empty(tsk)) {
2515 sigset_t newblocked;
2516 /* A set of now blocked but previously unblocked signals. */
2517 sigandnsets(&newblocked, newset, ¤t->blocked);
2518 retarget_shared_pending(tsk, &newblocked);
2519 }
2520 tsk->blocked = *newset;
2521 recalc_sigpending();
2522}
2523
2524/**
2525 * set_current_blocked - change current->blocked mask
2526 * @newset: new mask
2527 *
2528 * It is wrong to change ->blocked directly, this helper should be used
2529 * to ensure the process can't miss a shared signal we are going to block.
2530 */
2531void set_current_blocked(sigset_t *newset)
2532{
2533 struct task_struct *tsk = current;
2534 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
2535 spin_lock_irq(&tsk->sighand->siglock);
2536 __set_task_blocked(tsk, newset);
2537 spin_unlock_irq(&tsk->sighand->siglock);
2538}
2539
2540void __set_current_blocked(const sigset_t *newset)
2541{
2542 struct task_struct *tsk = current;
2543
2544 spin_lock_irq(&tsk->sighand->siglock);
2545 __set_task_blocked(tsk, newset);
2546 spin_unlock_irq(&tsk->sighand->siglock);
2547}
2548
2549/*
2550 * This is also useful for kernel threads that want to temporarily
2551 * (or permanently) block certain signals.
2552 *
2553 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2554 * interface happily blocks "unblockable" signals like SIGKILL
2555 * and friends.
2556 */
2557int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2558{
2559 struct task_struct *tsk = current;
2560 sigset_t newset;
2561
2562 /* Lockless, only current can change ->blocked, never from irq */
2563 if (oldset)
2564 *oldset = tsk->blocked;
2565
2566 switch (how) {
2567 case SIG_BLOCK:
2568 sigorsets(&newset, &tsk->blocked, set);
2569 break;
2570 case SIG_UNBLOCK:
2571 sigandnsets(&newset, &tsk->blocked, set);
2572 break;
2573 case SIG_SETMASK:
2574 newset = *set;
2575 break;
2576 default:
2577 return -EINVAL;
2578 }
2579
2580 __set_current_blocked(&newset);
2581 return 0;
2582}
2583
2584/**
2585 * sys_rt_sigprocmask - change the list of currently blocked signals
2586 * @how: whether to add, remove, or set signals
2587 * @nset: stores pending signals
2588 * @oset: previous value of signal mask if non-null
2589 * @sigsetsize: size of sigset_t type
2590 */
2591SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
2592 sigset_t __user *, oset, size_t, sigsetsize)
2593{
2594 sigset_t old_set, new_set;
2595 int error;
2596
2597 /* XXX: Don't preclude handling different sized sigset_t's. */
2598 if (sigsetsize != sizeof(sigset_t))
2599 return -EINVAL;
2600
2601 old_set = current->blocked;
2602
2603 if (nset) {
2604 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
2605 return -EFAULT;
2606 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2607
2608 error = sigprocmask(how, &new_set, NULL);
2609 if (error)
2610 return error;
2611 }
2612
2613 if (oset) {
2614 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
2615 return -EFAULT;
2616 }
2617
2618 return 0;
2619}
2620
2621long do_sigpending(void __user *set, unsigned long sigsetsize)
2622{
2623 long error = -EINVAL;
2624 sigset_t pending;
2625
2626 if (sigsetsize > sizeof(sigset_t))
2627 goto out;
2628
2629 spin_lock_irq(¤t->sighand->siglock);
2630 sigorsets(&pending, ¤t->pending.signal,
2631 ¤t->signal->shared_pending.signal);
2632 spin_unlock_irq(¤t->sighand->siglock);
2633
2634 /* Outside the lock because only this thread touches it. */
2635 sigandsets(&pending, ¤t->blocked, &pending);
2636
2637 error = -EFAULT;
2638 if (!copy_to_user(set, &pending, sigsetsize))
2639 error = 0;
2640
2641out:
2642 return error;
2643}
2644
2645/**
2646 * sys_rt_sigpending - examine a pending signal that has been raised
2647 * while blocked
2648 * @set: stores pending signals
2649 * @sigsetsize: size of sigset_t type or larger
2650 */
2651SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, set, size_t, sigsetsize)
2652{
2653 return do_sigpending(set, sigsetsize);
2654}
2655
2656#ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2657
2658int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2659{
2660 int err;
2661
2662 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2663 return -EFAULT;
2664 if (from->si_code < 0)
2665 return __copy_to_user(to, from, sizeof(siginfo_t))
2666 ? -EFAULT : 0;
2667 /*
2668 * If you change siginfo_t structure, please be sure
2669 * this code is fixed accordingly.
2670 * Please remember to update the signalfd_copyinfo() function
2671 * inside fs/signalfd.c too, in case siginfo_t changes.
2672 * It should never copy any pad contained in the structure
2673 * to avoid security leaks, but must copy the generic
2674 * 3 ints plus the relevant union member.
2675 */
2676 err = __put_user(from->si_signo, &to->si_signo);
2677 err |= __put_user(from->si_errno, &to->si_errno);
2678 err |= __put_user((short)from->si_code, &to->si_code);
2679 switch (from->si_code & __SI_MASK) {
2680 case __SI_KILL:
2681 err |= __put_user(from->si_pid, &to->si_pid);
2682 err |= __put_user(from->si_uid, &to->si_uid);
2683 break;
2684 case __SI_TIMER:
2685 err |= __put_user(from->si_tid, &to->si_tid);
2686 err |= __put_user(from->si_overrun, &to->si_overrun);
2687 err |= __put_user(from->si_ptr, &to->si_ptr);
2688 break;
2689 case __SI_POLL:
2690 err |= __put_user(from->si_band, &to->si_band);
2691 err |= __put_user(from->si_fd, &to->si_fd);
2692 break;
2693 case __SI_FAULT:
2694 err |= __put_user(from->si_addr, &to->si_addr);
2695#ifdef __ARCH_SI_TRAPNO
2696 err |= __put_user(from->si_trapno, &to->si_trapno);
2697#endif
2698#ifdef BUS_MCEERR_AO
2699 /*
2700 * Other callers might not initialize the si_lsb field,
2701 * so check explicitly for the right codes here.
2702 */
2703 if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO)
2704 err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
2705#endif
2706 break;
2707 case __SI_CHLD:
2708 err |= __put_user(from->si_pid, &to->si_pid);
2709 err |= __put_user(from->si_uid, &to->si_uid);
2710 err |= __put_user(from->si_status, &to->si_status);
2711 err |= __put_user(from->si_utime, &to->si_utime);
2712 err |= __put_user(from->si_stime, &to->si_stime);
2713 break;
2714 case __SI_RT: /* This is not generated by the kernel as of now. */
2715 case __SI_MESGQ: /* But this is */
2716 err |= __put_user(from->si_pid, &to->si_pid);
2717 err |= __put_user(from->si_uid, &to->si_uid);
2718 err |= __put_user(from->si_ptr, &to->si_ptr);
2719 break;
2720#ifdef __ARCH_SIGSYS
2721 case __SI_SYS:
2722 err |= __put_user(from->si_call_addr, &to->si_call_addr);
2723 err |= __put_user(from->si_syscall, &to->si_syscall);
2724 err |= __put_user(from->si_arch, &to->si_arch);
2725 break;
2726#endif
2727 default: /* this is just in case for now ... */
2728 err |= __put_user(from->si_pid, &to->si_pid);
2729 err |= __put_user(from->si_uid, &to->si_uid);
2730 break;
2731 }
2732 return err;
2733}
2734
2735#endif
2736
2737/**
2738 * do_sigtimedwait - wait for queued signals specified in @which
2739 * @which: queued signals to wait for
2740 * @info: if non-null, the signal's siginfo is returned here
2741 * @ts: upper bound on process time suspension
2742 */
2743int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
2744 const struct timespec *ts)
2745{
2746 struct task_struct *tsk = current;
2747 long timeout = MAX_SCHEDULE_TIMEOUT;
2748 sigset_t mask = *which;
2749 int sig;
2750
2751 if (ts) {
2752 if (!timespec_valid(ts))
2753 return -EINVAL;
2754 timeout = timespec_to_jiffies(ts);
2755 /*
2756 * We can be close to the next tick, add another one
2757 * to ensure we will wait at least the time asked for.
2758 */
2759 if (ts->tv_sec || ts->tv_nsec)
2760 timeout++;
2761 }
2762
2763 /*
2764 * Invert the set of allowed signals to get those we want to block.
2765 */
2766 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
2767 signotset(&mask);
2768
2769 spin_lock_irq(&tsk->sighand->siglock);
2770 sig = dequeue_signal(tsk, &mask, info);
2771 if (!sig && timeout) {
2772 /*
2773 * None ready, temporarily unblock those we're interested
2774 * while we are sleeping in so that we'll be awakened when
2775 * they arrive. Unblocking is always fine, we can avoid
2776 * set_current_blocked().
2777 */
2778 tsk->real_blocked = tsk->blocked;
2779 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
2780 recalc_sigpending();
2781 spin_unlock_irq(&tsk->sighand->siglock);
2782
2783 timeout = schedule_timeout_interruptible(timeout);
2784
2785 spin_lock_irq(&tsk->sighand->siglock);
2786 __set_task_blocked(tsk, &tsk->real_blocked);
2787 siginitset(&tsk->real_blocked, 0);
2788 sig = dequeue_signal(tsk, &mask, info);
2789 }
2790 spin_unlock_irq(&tsk->sighand->siglock);
2791
2792 if (sig)
2793 return sig;
2794 return timeout ? -EINTR : -EAGAIN;
2795}
2796
2797/**
2798 * sys_rt_sigtimedwait - synchronously wait for queued signals specified
2799 * in @uthese
2800 * @uthese: queued signals to wait for
2801 * @uinfo: if non-null, the signal's siginfo is returned here
2802 * @uts: upper bound on process time suspension
2803 * @sigsetsize: size of sigset_t type
2804 */
2805SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
2806 siginfo_t __user *, uinfo, const struct timespec __user *, uts,
2807 size_t, sigsetsize)
2808{
2809 sigset_t these;
2810 struct timespec ts;
2811 siginfo_t info;
2812 int ret;
2813
2814 /* XXX: Don't preclude handling different sized sigset_t's. */
2815 if (sigsetsize != sizeof(sigset_t))
2816 return -EINVAL;
2817
2818 if (copy_from_user(&these, uthese, sizeof(these)))
2819 return -EFAULT;
2820
2821 if (uts) {
2822 if (copy_from_user(&ts, uts, sizeof(ts)))
2823 return -EFAULT;
2824 }
2825
2826 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
2827
2828 if (ret > 0 && uinfo) {
2829 if (copy_siginfo_to_user(uinfo, &info))
2830 ret = -EFAULT;
2831 }
2832
2833 return ret;
2834}
2835
2836/**
2837 * sys_kill - send a signal to a process
2838 * @pid: the PID of the process
2839 * @sig: signal to be sent
2840 */
2841SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
2842{
2843 struct siginfo info;
2844
2845 info.si_signo = sig;
2846 info.si_errno = 0;
2847 info.si_code = SI_USER;
2848 info.si_pid = task_tgid_vnr(current);
2849 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2850
2851 return kill_something_info(sig, &info, pid);
2852}
2853
2854static int
2855do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
2856{
2857 struct task_struct *p;
2858 int error = -ESRCH;
2859
2860 rcu_read_lock();
2861 p = find_task_by_vpid(pid);
2862 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
2863 error = check_kill_permission(sig, info, p);
2864 /*
2865 * The null signal is a permissions and process existence
2866 * probe. No signal is actually delivered.
2867 */
2868 if (!error && sig) {
2869 error = do_send_sig_info(sig, info, p, false);
2870 /*
2871 * If lock_task_sighand() failed we pretend the task
2872 * dies after receiving the signal. The window is tiny,
2873 * and the signal is private anyway.
2874 */
2875 if (unlikely(error == -ESRCH))
2876 error = 0;
2877 }
2878 }
2879 rcu_read_unlock();
2880
2881 return error;
2882}
2883
2884static int do_tkill(pid_t tgid, pid_t pid, int sig)
2885{
2886 struct siginfo info;
2887
2888 info.si_signo = sig;
2889 info.si_errno = 0;
2890 info.si_code = SI_TKILL;
2891 info.si_pid = task_tgid_vnr(current);
2892 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2893
2894 return do_send_specific(tgid, pid, sig, &info);
2895}
2896
2897/**
2898 * sys_tgkill - send signal to one specific thread
2899 * @tgid: the thread group ID of the thread
2900 * @pid: the PID of the thread
2901 * @sig: signal to be sent
2902 *
2903 * This syscall also checks the @tgid and returns -ESRCH even if the PID
2904 * exists but it's not belonging to the target process anymore. This
2905 * method solves the problem of threads exiting and PIDs getting reused.
2906 */
2907SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
2908{
2909 /* This is only valid for single tasks */
2910 if (pid <= 0 || tgid <= 0)
2911 return -EINVAL;
2912
2913 return do_tkill(tgid, pid, sig);
2914}
2915
2916/**
2917 * sys_tkill - send signal to one specific task
2918 * @pid: the PID of the task
2919 * @sig: signal to be sent
2920 *
2921 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2922 */
2923SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
2924{
2925 /* This is only valid for single tasks */
2926 if (pid <= 0)
2927 return -EINVAL;
2928
2929 return do_tkill(0, pid, sig);
2930}
2931
2932/**
2933 * sys_rt_sigqueueinfo - send signal information to a signal
2934 * @pid: the PID of the thread
2935 * @sig: signal to be sent
2936 * @uinfo: signal info to be sent
2937 */
2938SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
2939 siginfo_t __user *, uinfo)
2940{
2941 siginfo_t info;
2942
2943 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2944 return -EFAULT;
2945
2946 /* Not even root can pretend to send signals from the kernel.
2947 * Nor can they impersonate a kill()/tgkill(), which adds source info.
2948 */
2949 if (info.si_code >= 0 || info.si_code == SI_TKILL) {
2950 /* We used to allow any < 0 si_code */
2951 WARN_ON_ONCE(info.si_code < 0);
2952 return -EPERM;
2953 }
2954 info.si_signo = sig;
2955
2956 /* POSIX.1b doesn't mention process groups. */
2957 return kill_proc_info(sig, &info, pid);
2958}
2959
2960long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
2961{
2962 /* This is only valid for single tasks */
2963 if (pid <= 0 || tgid <= 0)
2964 return -EINVAL;
2965
2966 /* Not even root can pretend to send signals from the kernel.
2967 * Nor can they impersonate a kill()/tgkill(), which adds source info.
2968 */
2969 if (info->si_code >= 0 || info->si_code == SI_TKILL) {
2970 /* We used to allow any < 0 si_code */
2971 WARN_ON_ONCE(info->si_code < 0);
2972 return -EPERM;
2973 }
2974 info->si_signo = sig;
2975
2976 return do_send_specific(tgid, pid, sig, info);
2977}
2978
2979SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
2980 siginfo_t __user *, uinfo)
2981{
2982 siginfo_t info;
2983
2984 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2985 return -EFAULT;
2986
2987 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
2988}
2989
2990int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
2991{
2992 struct task_struct *t = current;
2993 struct k_sigaction *k;
2994 sigset_t mask;
2995
2996 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
2997 return -EINVAL;
2998
2999 k = &t->sighand->action[sig-1];
3000
3001 spin_lock_irq(¤t->sighand->siglock);
3002 if (oact)
3003 *oact = *k;
3004
3005 if (act) {
3006 sigdelsetmask(&act->sa.sa_mask,
3007 sigmask(SIGKILL) | sigmask(SIGSTOP));
3008 *k = *act;
3009 /*
3010 * POSIX 3.3.1.3:
3011 * "Setting a signal action to SIG_IGN for a signal that is
3012 * pending shall cause the pending signal to be discarded,
3013 * whether or not it is blocked."
3014 *
3015 * "Setting a signal action to SIG_DFL for a signal that is
3016 * pending and whose default action is to ignore the signal
3017 * (for example, SIGCHLD), shall cause the pending signal to
3018 * be discarded, whether or not it is blocked"
3019 */
3020 if (sig_handler_ignored(sig_handler(t, sig), sig)) {
3021 sigemptyset(&mask);
3022 sigaddset(&mask, sig);
3023 rm_from_queue_full(&mask, &t->signal->shared_pending);
3024 do {
3025 rm_from_queue_full(&mask, &t->pending);
3026 t = next_thread(t);
3027 } while (t != current);
3028 }
3029 }
3030
3031 spin_unlock_irq(¤t->sighand->siglock);
3032 return 0;
3033}
3034
3035int
3036do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
3037{
3038 stack_t oss;
3039 int error;
3040
3041 oss.ss_sp = (void __user *) current->sas_ss_sp;
3042 oss.ss_size = current->sas_ss_size;
3043 oss.ss_flags = sas_ss_flags(sp);
3044
3045 if (uss) {
3046 void __user *ss_sp;
3047 size_t ss_size;
3048 int ss_flags;
3049
3050 error = -EFAULT;
3051 if (!access_ok(VERIFY_READ, uss, sizeof(*uss)))
3052 goto out;
3053 error = __get_user(ss_sp, &uss->ss_sp) |
3054 __get_user(ss_flags, &uss->ss_flags) |
3055 __get_user(ss_size, &uss->ss_size);
3056 if (error)
3057 goto out;
3058
3059 error = -EPERM;
3060 if (on_sig_stack(sp))
3061 goto out;
3062
3063 error = -EINVAL;
3064 /*
3065 * Note - this code used to test ss_flags incorrectly:
3066 * old code may have been written using ss_flags==0
3067 * to mean ss_flags==SS_ONSTACK (as this was the only
3068 * way that worked) - this fix preserves that older
3069 * mechanism.
3070 */
3071 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
3072 goto out;
3073
3074 if (ss_flags == SS_DISABLE) {
3075 ss_size = 0;
3076 ss_sp = NULL;
3077 } else {
3078 error = -ENOMEM;
3079 if (ss_size < MINSIGSTKSZ)
3080 goto out;
3081 }
3082
3083 current->sas_ss_sp = (unsigned long) ss_sp;
3084 current->sas_ss_size = ss_size;
3085 }
3086
3087 error = 0;
3088 if (uoss) {
3089 error = -EFAULT;
3090 if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)))
3091 goto out;
3092 error = __put_user(oss.ss_sp, &uoss->ss_sp) |
3093 __put_user(oss.ss_size, &uoss->ss_size) |
3094 __put_user(oss.ss_flags, &uoss->ss_flags);
3095 }
3096
3097out:
3098 return error;
3099}
3100
3101#ifdef __ARCH_WANT_SYS_SIGPENDING
3102
3103/**
3104 * sys_sigpending - examine pending signals
3105 * @set: where mask of pending signal is returned
3106 */
3107SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set)
3108{
3109 return do_sigpending(set, sizeof(*set));
3110}
3111
3112#endif
3113
3114#ifdef __ARCH_WANT_SYS_SIGPROCMASK
3115/**
3116 * sys_sigprocmask - examine and change blocked signals
3117 * @how: whether to add, remove, or set signals
3118 * @nset: signals to add or remove (if non-null)
3119 * @oset: previous value of signal mask if non-null
3120 *
3121 * Some platforms have their own version with special arguments;
3122 * others support only sys_rt_sigprocmask.
3123 */
3124
3125SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
3126 old_sigset_t __user *, oset)
3127{
3128 old_sigset_t old_set, new_set;
3129 sigset_t new_blocked;
3130
3131 old_set = current->blocked.sig[0];
3132
3133 if (nset) {
3134 if (copy_from_user(&new_set, nset, sizeof(*nset)))
3135 return -EFAULT;
3136 new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
3137
3138 new_blocked = current->blocked;
3139
3140 switch (how) {
3141 case SIG_BLOCK:
3142 sigaddsetmask(&new_blocked, new_set);
3143 break;
3144 case SIG_UNBLOCK:
3145 sigdelsetmask(&new_blocked, new_set);
3146 break;
3147 case SIG_SETMASK:
3148 new_blocked.sig[0] = new_set;
3149 break;
3150 default:
3151 return -EINVAL;
3152 }
3153
3154 __set_current_blocked(&new_blocked);
3155 }
3156
3157 if (oset) {
3158 if (copy_to_user(oset, &old_set, sizeof(*oset)))
3159 return -EFAULT;
3160 }
3161
3162 return 0;
3163}
3164#endif /* __ARCH_WANT_SYS_SIGPROCMASK */
3165
3166#ifdef __ARCH_WANT_SYS_RT_SIGACTION
3167/**
3168 * sys_rt_sigaction - alter an action taken by a process
3169 * @sig: signal to be sent
3170 * @act: new sigaction
3171 * @oact: used to save the previous sigaction
3172 * @sigsetsize: size of sigset_t type
3173 */
3174SYSCALL_DEFINE4(rt_sigaction, int, sig,
3175 const struct sigaction __user *, act,
3176 struct sigaction __user *, oact,
3177 size_t, sigsetsize)
3178{
3179 struct k_sigaction new_sa, old_sa;
3180 int ret = -EINVAL;
3181
3182 /* XXX: Don't preclude handling different sized sigset_t's. */
3183 if (sigsetsize != sizeof(sigset_t))
3184 goto out;
3185
3186 if (act) {
3187 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
3188 return -EFAULT;
3189 }
3190
3191 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
3192
3193 if (!ret && oact) {
3194 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
3195 return -EFAULT;
3196 }
3197out:
3198 return ret;
3199}
3200#endif /* __ARCH_WANT_SYS_RT_SIGACTION */
3201
3202#ifdef __ARCH_WANT_SYS_SGETMASK
3203
3204/*
3205 * For backwards compatibility. Functionality superseded by sigprocmask.
3206 */
3207SYSCALL_DEFINE0(sgetmask)
3208{
3209 /* SMP safe */
3210 return current->blocked.sig[0];
3211}
3212
3213SYSCALL_DEFINE1(ssetmask, int, newmask)
3214{
3215 int old = current->blocked.sig[0];
3216 sigset_t newset;
3217
3218 set_current_blocked(&newset);
3219
3220 return old;
3221}
3222#endif /* __ARCH_WANT_SGETMASK */
3223
3224#ifdef __ARCH_WANT_SYS_SIGNAL
3225/*
3226 * For backwards compatibility. Functionality superseded by sigaction.
3227 */
3228SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
3229{
3230 struct k_sigaction new_sa, old_sa;
3231 int ret;
3232
3233 new_sa.sa.sa_handler = handler;
3234 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
3235 sigemptyset(&new_sa.sa.sa_mask);
3236
3237 ret = do_sigaction(sig, &new_sa, &old_sa);
3238
3239 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
3240}
3241#endif /* __ARCH_WANT_SYS_SIGNAL */
3242
3243#ifdef __ARCH_WANT_SYS_PAUSE
3244
3245SYSCALL_DEFINE0(pause)
3246{
3247 while (!signal_pending(current)) {
3248 current->state = TASK_INTERRUPTIBLE;
3249 schedule();
3250 }
3251 return -ERESTARTNOHAND;
3252}
3253
3254#endif
3255
3256int sigsuspend(sigset_t *set)
3257{
3258 current->saved_sigmask = current->blocked;
3259 set_current_blocked(set);
3260
3261 current->state = TASK_INTERRUPTIBLE;
3262 schedule();
3263 set_restore_sigmask();
3264 return -ERESTARTNOHAND;
3265}
3266
3267#ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
3268/**
3269 * sys_rt_sigsuspend - replace the signal mask for a value with the
3270 * @unewset value until a signal is received
3271 * @unewset: new signal mask value
3272 * @sigsetsize: size of sigset_t type
3273 */
3274SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
3275{
3276 sigset_t newset;
3277
3278 /* XXX: Don't preclude handling different sized sigset_t's. */
3279 if (sigsetsize != sizeof(sigset_t))
3280 return -EINVAL;
3281
3282 if (copy_from_user(&newset, unewset, sizeof(newset)))
3283 return -EFAULT;
3284 return sigsuspend(&newset);
3285}
3286#endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
3287
3288__attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma)
3289{
3290 return NULL;
3291}
3292
3293void __init signals_init(void)
3294{
3295 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
3296}
3297
3298#ifdef CONFIG_KGDB_KDB
3299#include <linux/kdb.h>
3300/*
3301 * kdb_send_sig_info - Allows kdb to send signals without exposing
3302 * signal internals. This function checks if the required locks are
3303 * available before calling the main signal code, to avoid kdb
3304 * deadlocks.
3305 */
3306void
3307kdb_send_sig_info(struct task_struct *t, struct siginfo *info)
3308{
3309 static struct task_struct *kdb_prev_t;
3310 int sig, new_t;
3311 if (!spin_trylock(&t->sighand->siglock)) {
3312 kdb_printf("Can't do kill command now.\n"
3313 "The sigmask lock is held somewhere else in "
3314 "kernel, try again later\n");
3315 return;
3316 }
3317 spin_unlock(&t->sighand->siglock);
3318 new_t = kdb_prev_t != t;
3319 kdb_prev_t = t;
3320 if (t->state != TASK_RUNNING && new_t) {
3321 kdb_printf("Process is not RUNNING, sending a signal from "
3322 "kdb risks deadlock\n"
3323 "on the run queue locks. "
3324 "The signal has _not_ been sent.\n"
3325 "Reissue the kill command if you want to risk "
3326 "the deadlock.\n");
3327 return;
3328 }
3329 sig = info->si_signo;
3330 if (send_sig_info(sig, info, t))
3331 kdb_printf("Fail to deliver Signal %d to process %d.\n",
3332 sig, t->pid);
3333 else
3334 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
3335}
3336#endif /* CONFIG_KGDB_KDB */