Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/kernel/exit.c
4 *
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 */
7
8#include <linux/mm.h>
9#include <linux/slab.h>
10#include <linux/sched/autogroup.h>
11#include <linux/sched/mm.h>
12#include <linux/sched/stat.h>
13#include <linux/sched/task.h>
14#include <linux/sched/task_stack.h>
15#include <linux/sched/cputime.h>
16#include <linux/interrupt.h>
17#include <linux/module.h>
18#include <linux/capability.h>
19#include <linux/completion.h>
20#include <linux/personality.h>
21#include <linux/tty.h>
22#include <linux/iocontext.h>
23#include <linux/key.h>
24#include <linux/cpu.h>
25#include <linux/acct.h>
26#include <linux/tsacct_kern.h>
27#include <linux/file.h>
28#include <linux/freezer.h>
29#include <linux/binfmts.h>
30#include <linux/nsproxy.h>
31#include <linux/pid_namespace.h>
32#include <linux/ptrace.h>
33#include <linux/profile.h>
34#include <linux/mount.h>
35#include <linux/proc_fs.h>
36#include <linux/kthread.h>
37#include <linux/mempolicy.h>
38#include <linux/taskstats_kern.h>
39#include <linux/delayacct.h>
40#include <linux/cgroup.h>
41#include <linux/syscalls.h>
42#include <linux/signal.h>
43#include <linux/posix-timers.h>
44#include <linux/cn_proc.h>
45#include <linux/mutex.h>
46#include <linux/futex.h>
47#include <linux/pipe_fs_i.h>
48#include <linux/audit.h> /* for audit_free() */
49#include <linux/resource.h>
50#include <linux/task_io_accounting_ops.h>
51#include <linux/blkdev.h>
52#include <linux/task_work.h>
53#include <linux/fs_struct.h>
54#include <linux/init_task.h>
55#include <linux/perf_event.h>
56#include <trace/events/sched.h>
57#include <linux/hw_breakpoint.h>
58#include <linux/oom.h>
59#include <linux/writeback.h>
60#include <linux/shm.h>
61#include <linux/kcov.h>
62#include <linux/kmsan.h>
63#include <linux/random.h>
64#include <linux/rcuwait.h>
65#include <linux/compat.h>
66#include <linux/io_uring.h>
67#include <linux/kprobes.h>
68#include <linux/rethook.h>
69#include <linux/sysfs.h>
70#include <linux/user_events.h>
71#include <linux/uaccess.h>
72
73#include <uapi/linux/wait.h>
74
75#include <asm/unistd.h>
76#include <asm/mmu_context.h>
77
78#include "exit.h"
79
80/*
81 * The default value should be high enough to not crash a system that randomly
82 * crashes its kernel from time to time, but low enough to at least not permit
83 * overflowing 32-bit refcounts or the ldsem writer count.
84 */
85static unsigned int oops_limit = 10000;
86
87#ifdef CONFIG_SYSCTL
88static struct ctl_table kern_exit_table[] = {
89 {
90 .procname = "oops_limit",
91 .data = &oops_limit,
92 .maxlen = sizeof(oops_limit),
93 .mode = 0644,
94 .proc_handler = proc_douintvec,
95 },
96};
97
98static __init int kernel_exit_sysctls_init(void)
99{
100 register_sysctl_init("kernel", kern_exit_table);
101 return 0;
102}
103late_initcall(kernel_exit_sysctls_init);
104#endif
105
106static atomic_t oops_count = ATOMIC_INIT(0);
107
108#ifdef CONFIG_SYSFS
109static ssize_t oops_count_show(struct kobject *kobj, struct kobj_attribute *attr,
110 char *page)
111{
112 return sysfs_emit(page, "%d\n", atomic_read(&oops_count));
113}
114
115static struct kobj_attribute oops_count_attr = __ATTR_RO(oops_count);
116
117static __init int kernel_exit_sysfs_init(void)
118{
119 sysfs_add_file_to_group(kernel_kobj, &oops_count_attr.attr, NULL);
120 return 0;
121}
122late_initcall(kernel_exit_sysfs_init);
123#endif
124
125static void __unhash_process(struct task_struct *p, bool group_dead)
126{
127 nr_threads--;
128 detach_pid(p, PIDTYPE_PID);
129 if (group_dead) {
130 detach_pid(p, PIDTYPE_TGID);
131 detach_pid(p, PIDTYPE_PGID);
132 detach_pid(p, PIDTYPE_SID);
133
134 list_del_rcu(&p->tasks);
135 list_del_init(&p->sibling);
136 __this_cpu_dec(process_counts);
137 }
138 list_del_rcu(&p->thread_node);
139}
140
141/*
142 * This function expects the tasklist_lock write-locked.
143 */
144static void __exit_signal(struct task_struct *tsk)
145{
146 struct signal_struct *sig = tsk->signal;
147 bool group_dead = thread_group_leader(tsk);
148 struct sighand_struct *sighand;
149 struct tty_struct *tty;
150 u64 utime, stime;
151
152 sighand = rcu_dereference_check(tsk->sighand,
153 lockdep_tasklist_lock_is_held());
154 spin_lock(&sighand->siglock);
155
156#ifdef CONFIG_POSIX_TIMERS
157 posix_cpu_timers_exit(tsk);
158 if (group_dead)
159 posix_cpu_timers_exit_group(tsk);
160#endif
161
162 if (group_dead) {
163 tty = sig->tty;
164 sig->tty = NULL;
165 } else {
166 /*
167 * If there is any task waiting for the group exit
168 * then notify it:
169 */
170 if (sig->notify_count > 0 && !--sig->notify_count)
171 wake_up_process(sig->group_exec_task);
172
173 if (tsk == sig->curr_target)
174 sig->curr_target = next_thread(tsk);
175 }
176
177 add_device_randomness((const void*) &tsk->se.sum_exec_runtime,
178 sizeof(unsigned long long));
179
180 /*
181 * Accumulate here the counters for all threads as they die. We could
182 * skip the group leader because it is the last user of signal_struct,
183 * but we want to avoid the race with thread_group_cputime() which can
184 * see the empty ->thread_head list.
185 */
186 task_cputime(tsk, &utime, &stime);
187 write_seqlock(&sig->stats_lock);
188 sig->utime += utime;
189 sig->stime += stime;
190 sig->gtime += task_gtime(tsk);
191 sig->min_flt += tsk->min_flt;
192 sig->maj_flt += tsk->maj_flt;
193 sig->nvcsw += tsk->nvcsw;
194 sig->nivcsw += tsk->nivcsw;
195 sig->inblock += task_io_get_inblock(tsk);
196 sig->oublock += task_io_get_oublock(tsk);
197 task_io_accounting_add(&sig->ioac, &tsk->ioac);
198 sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
199 sig->nr_threads--;
200 __unhash_process(tsk, group_dead);
201 write_sequnlock(&sig->stats_lock);
202
203 /*
204 * Do this under ->siglock, we can race with another thread
205 * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals.
206 */
207 flush_sigqueue(&tsk->pending);
208 tsk->sighand = NULL;
209 spin_unlock(&sighand->siglock);
210
211 __cleanup_sighand(sighand);
212 clear_tsk_thread_flag(tsk, TIF_SIGPENDING);
213 if (group_dead) {
214 flush_sigqueue(&sig->shared_pending);
215 tty_kref_put(tty);
216 }
217}
218
219static void delayed_put_task_struct(struct rcu_head *rhp)
220{
221 struct task_struct *tsk = container_of(rhp, struct task_struct, rcu);
222
223 kprobe_flush_task(tsk);
224 rethook_flush_task(tsk);
225 perf_event_delayed_put(tsk);
226 trace_sched_process_free(tsk);
227 put_task_struct(tsk);
228}
229
230void put_task_struct_rcu_user(struct task_struct *task)
231{
232 if (refcount_dec_and_test(&task->rcu_users))
233 call_rcu(&task->rcu, delayed_put_task_struct);
234}
235
236void __weak release_thread(struct task_struct *dead_task)
237{
238}
239
240void release_task(struct task_struct *p)
241{
242 struct task_struct *leader;
243 struct pid *thread_pid;
244 int zap_leader;
245repeat:
246 /* don't need to get the RCU readlock here - the process is dead and
247 * can't be modifying its own credentials. But shut RCU-lockdep up */
248 rcu_read_lock();
249 dec_rlimit_ucounts(task_ucounts(p), UCOUNT_RLIMIT_NPROC, 1);
250 rcu_read_unlock();
251
252 cgroup_release(p);
253
254 write_lock_irq(&tasklist_lock);
255 ptrace_release_task(p);
256 thread_pid = get_pid(p->thread_pid);
257 __exit_signal(p);
258
259 /*
260 * If we are the last non-leader member of the thread
261 * group, and the leader is zombie, then notify the
262 * group leader's parent process. (if it wants notification.)
263 */
264 zap_leader = 0;
265 leader = p->group_leader;
266 if (leader != p && thread_group_empty(leader)
267 && leader->exit_state == EXIT_ZOMBIE) {
268 /*
269 * If we were the last child thread and the leader has
270 * exited already, and the leader's parent ignores SIGCHLD,
271 * then we are the one who should release the leader.
272 */
273 zap_leader = do_notify_parent(leader, leader->exit_signal);
274 if (zap_leader)
275 leader->exit_state = EXIT_DEAD;
276 }
277
278 write_unlock_irq(&tasklist_lock);
279 proc_flush_pid(thread_pid);
280 put_pid(thread_pid);
281 release_thread(p);
282 put_task_struct_rcu_user(p);
283
284 p = leader;
285 if (unlikely(zap_leader))
286 goto repeat;
287}
288
289int rcuwait_wake_up(struct rcuwait *w)
290{
291 int ret = 0;
292 struct task_struct *task;
293
294 rcu_read_lock();
295
296 /*
297 * Order condition vs @task, such that everything prior to the load
298 * of @task is visible. This is the condition as to why the user called
299 * rcuwait_wake() in the first place. Pairs with set_current_state()
300 * barrier (A) in rcuwait_wait_event().
301 *
302 * WAIT WAKE
303 * [S] tsk = current [S] cond = true
304 * MB (A) MB (B)
305 * [L] cond [L] tsk
306 */
307 smp_mb(); /* (B) */
308
309 task = rcu_dereference(w->task);
310 if (task)
311 ret = wake_up_process(task);
312 rcu_read_unlock();
313
314 return ret;
315}
316EXPORT_SYMBOL_GPL(rcuwait_wake_up);
317
318/*
319 * Determine if a process group is "orphaned", according to the POSIX
320 * definition in 2.2.2.52. Orphaned process groups are not to be affected
321 * by terminal-generated stop signals. Newly orphaned process groups are
322 * to receive a SIGHUP and a SIGCONT.
323 *
324 * "I ask you, have you ever known what it is to be an orphan?"
325 */
326static int will_become_orphaned_pgrp(struct pid *pgrp,
327 struct task_struct *ignored_task)
328{
329 struct task_struct *p;
330
331 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
332 if ((p == ignored_task) ||
333 (p->exit_state && thread_group_empty(p)) ||
334 is_global_init(p->real_parent))
335 continue;
336
337 if (task_pgrp(p->real_parent) != pgrp &&
338 task_session(p->real_parent) == task_session(p))
339 return 0;
340 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
341
342 return 1;
343}
344
345int is_current_pgrp_orphaned(void)
346{
347 int retval;
348
349 read_lock(&tasklist_lock);
350 retval = will_become_orphaned_pgrp(task_pgrp(current), NULL);
351 read_unlock(&tasklist_lock);
352
353 return retval;
354}
355
356static bool has_stopped_jobs(struct pid *pgrp)
357{
358 struct task_struct *p;
359
360 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
361 if (p->signal->flags & SIGNAL_STOP_STOPPED)
362 return true;
363 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
364
365 return false;
366}
367
368/*
369 * Check to see if any process groups have become orphaned as
370 * a result of our exiting, and if they have any stopped jobs,
371 * send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2)
372 */
373static void
374kill_orphaned_pgrp(struct task_struct *tsk, struct task_struct *parent)
375{
376 struct pid *pgrp = task_pgrp(tsk);
377 struct task_struct *ignored_task = tsk;
378
379 if (!parent)
380 /* exit: our father is in a different pgrp than
381 * we are and we were the only connection outside.
382 */
383 parent = tsk->real_parent;
384 else
385 /* reparent: our child is in a different pgrp than
386 * we are, and it was the only connection outside.
387 */
388 ignored_task = NULL;
389
390 if (task_pgrp(parent) != pgrp &&
391 task_session(parent) == task_session(tsk) &&
392 will_become_orphaned_pgrp(pgrp, ignored_task) &&
393 has_stopped_jobs(pgrp)) {
394 __kill_pgrp_info(SIGHUP, SEND_SIG_PRIV, pgrp);
395 __kill_pgrp_info(SIGCONT, SEND_SIG_PRIV, pgrp);
396 }
397}
398
399static void coredump_task_exit(struct task_struct *tsk)
400{
401 struct core_state *core_state;
402
403 /*
404 * Serialize with any possible pending coredump.
405 * We must hold siglock around checking core_state
406 * and setting PF_POSTCOREDUMP. The core-inducing thread
407 * will increment ->nr_threads for each thread in the
408 * group without PF_POSTCOREDUMP set.
409 */
410 spin_lock_irq(&tsk->sighand->siglock);
411 tsk->flags |= PF_POSTCOREDUMP;
412 core_state = tsk->signal->core_state;
413 spin_unlock_irq(&tsk->sighand->siglock);
414 if (core_state) {
415 struct core_thread self;
416
417 self.task = current;
418 if (self.task->flags & PF_SIGNALED)
419 self.next = xchg(&core_state->dumper.next, &self);
420 else
421 self.task = NULL;
422 /*
423 * Implies mb(), the result of xchg() must be visible
424 * to core_state->dumper.
425 */
426 if (atomic_dec_and_test(&core_state->nr_threads))
427 complete(&core_state->startup);
428
429 for (;;) {
430 set_current_state(TASK_IDLE|TASK_FREEZABLE);
431 if (!self.task) /* see coredump_finish() */
432 break;
433 schedule();
434 }
435 __set_current_state(TASK_RUNNING);
436 }
437}
438
439#ifdef CONFIG_MEMCG
440/* drops tasklist_lock if succeeds */
441static bool __try_to_set_owner(struct task_struct *tsk, struct mm_struct *mm)
442{
443 bool ret = false;
444
445 task_lock(tsk);
446 if (likely(tsk->mm == mm)) {
447 /* tsk can't pass exit_mm/exec_mmap and exit */
448 read_unlock(&tasklist_lock);
449 WRITE_ONCE(mm->owner, tsk);
450 lru_gen_migrate_mm(mm);
451 ret = true;
452 }
453 task_unlock(tsk);
454 return ret;
455}
456
457static bool try_to_set_owner(struct task_struct *g, struct mm_struct *mm)
458{
459 struct task_struct *t;
460
461 for_each_thread(g, t) {
462 struct mm_struct *t_mm = READ_ONCE(t->mm);
463 if (t_mm == mm) {
464 if (__try_to_set_owner(t, mm))
465 return true;
466 } else if (t_mm)
467 break;
468 }
469
470 return false;
471}
472
473/*
474 * A task is exiting. If it owned this mm, find a new owner for the mm.
475 */
476void mm_update_next_owner(struct mm_struct *mm)
477{
478 struct task_struct *g, *p = current;
479
480 /*
481 * If the exiting or execing task is not the owner, it's
482 * someone else's problem.
483 */
484 if (mm->owner != p)
485 return;
486 /*
487 * The current owner is exiting/execing and there are no other
488 * candidates. Do not leave the mm pointing to a possibly
489 * freed task structure.
490 */
491 if (atomic_read(&mm->mm_users) <= 1) {
492 WRITE_ONCE(mm->owner, NULL);
493 return;
494 }
495
496 read_lock(&tasklist_lock);
497 /*
498 * Search in the children
499 */
500 list_for_each_entry(g, &p->children, sibling) {
501 if (try_to_set_owner(g, mm))
502 goto ret;
503 }
504 /*
505 * Search in the siblings
506 */
507 list_for_each_entry(g, &p->real_parent->children, sibling) {
508 if (try_to_set_owner(g, mm))
509 goto ret;
510 }
511 /*
512 * Search through everything else, we should not get here often.
513 */
514 for_each_process(g) {
515 if (atomic_read(&mm->mm_users) <= 1)
516 break;
517 if (g->flags & PF_KTHREAD)
518 continue;
519 if (try_to_set_owner(g, mm))
520 goto ret;
521 }
522 read_unlock(&tasklist_lock);
523 /*
524 * We found no owner yet mm_users > 1: this implies that we are
525 * most likely racing with swapoff (try_to_unuse()) or /proc or
526 * ptrace or page migration (get_task_mm()). Mark owner as NULL.
527 */
528 WRITE_ONCE(mm->owner, NULL);
529 ret:
530 return;
531
532}
533#endif /* CONFIG_MEMCG */
534
535/*
536 * Turn us into a lazy TLB process if we
537 * aren't already..
538 */
539static void exit_mm(void)
540{
541 struct mm_struct *mm = current->mm;
542
543 exit_mm_release(current, mm);
544 if (!mm)
545 return;
546 mmap_read_lock(mm);
547 mmgrab_lazy_tlb(mm);
548 BUG_ON(mm != current->active_mm);
549 /* more a memory barrier than a real lock */
550 task_lock(current);
551 /*
552 * When a thread stops operating on an address space, the loop
553 * in membarrier_private_expedited() may not observe that
554 * tsk->mm, and the loop in membarrier_global_expedited() may
555 * not observe a MEMBARRIER_STATE_GLOBAL_EXPEDITED
556 * rq->membarrier_state, so those would not issue an IPI.
557 * Membarrier requires a memory barrier after accessing
558 * user-space memory, before clearing tsk->mm or the
559 * rq->membarrier_state.
560 */
561 smp_mb__after_spinlock();
562 local_irq_disable();
563 current->mm = NULL;
564 membarrier_update_current_mm(NULL);
565 enter_lazy_tlb(mm, current);
566 local_irq_enable();
567 task_unlock(current);
568 mmap_read_unlock(mm);
569 mm_update_next_owner(mm);
570 mmput(mm);
571 if (test_thread_flag(TIF_MEMDIE))
572 exit_oom_victim();
573}
574
575static struct task_struct *find_alive_thread(struct task_struct *p)
576{
577 struct task_struct *t;
578
579 for_each_thread(p, t) {
580 if (!(t->flags & PF_EXITING))
581 return t;
582 }
583 return NULL;
584}
585
586static struct task_struct *find_child_reaper(struct task_struct *father,
587 struct list_head *dead)
588 __releases(&tasklist_lock)
589 __acquires(&tasklist_lock)
590{
591 struct pid_namespace *pid_ns = task_active_pid_ns(father);
592 struct task_struct *reaper = pid_ns->child_reaper;
593 struct task_struct *p, *n;
594
595 if (likely(reaper != father))
596 return reaper;
597
598 reaper = find_alive_thread(father);
599 if (reaper) {
600 pid_ns->child_reaper = reaper;
601 return reaper;
602 }
603
604 write_unlock_irq(&tasklist_lock);
605
606 list_for_each_entry_safe(p, n, dead, ptrace_entry) {
607 list_del_init(&p->ptrace_entry);
608 release_task(p);
609 }
610
611 zap_pid_ns_processes(pid_ns);
612 write_lock_irq(&tasklist_lock);
613
614 return father;
615}
616
617/*
618 * When we die, we re-parent all our children, and try to:
619 * 1. give them to another thread in our thread group, if such a member exists
620 * 2. give it to the first ancestor process which prctl'd itself as a
621 * child_subreaper for its children (like a service manager)
622 * 3. give it to the init process (PID 1) in our pid namespace
623 */
624static struct task_struct *find_new_reaper(struct task_struct *father,
625 struct task_struct *child_reaper)
626{
627 struct task_struct *thread, *reaper;
628
629 thread = find_alive_thread(father);
630 if (thread)
631 return thread;
632
633 if (father->signal->has_child_subreaper) {
634 unsigned int ns_level = task_pid(father)->level;
635 /*
636 * Find the first ->is_child_subreaper ancestor in our pid_ns.
637 * We can't check reaper != child_reaper to ensure we do not
638 * cross the namespaces, the exiting parent could be injected
639 * by setns() + fork().
640 * We check pid->level, this is slightly more efficient than
641 * task_active_pid_ns(reaper) != task_active_pid_ns(father).
642 */
643 for (reaper = father->real_parent;
644 task_pid(reaper)->level == ns_level;
645 reaper = reaper->real_parent) {
646 if (reaper == &init_task)
647 break;
648 if (!reaper->signal->is_child_subreaper)
649 continue;
650 thread = find_alive_thread(reaper);
651 if (thread)
652 return thread;
653 }
654 }
655
656 return child_reaper;
657}
658
659/*
660* Any that need to be release_task'd are put on the @dead list.
661 */
662static void reparent_leader(struct task_struct *father, struct task_struct *p,
663 struct list_head *dead)
664{
665 if (unlikely(p->exit_state == EXIT_DEAD))
666 return;
667
668 /* We don't want people slaying init. */
669 p->exit_signal = SIGCHLD;
670
671 /* If it has exited notify the new parent about this child's death. */
672 if (!p->ptrace &&
673 p->exit_state == EXIT_ZOMBIE && thread_group_empty(p)) {
674 if (do_notify_parent(p, p->exit_signal)) {
675 p->exit_state = EXIT_DEAD;
676 list_add(&p->ptrace_entry, dead);
677 }
678 }
679
680 kill_orphaned_pgrp(p, father);
681}
682
683/*
684 * This does two things:
685 *
686 * A. Make init inherit all the child processes
687 * B. Check to see if any process groups have become orphaned
688 * as a result of our exiting, and if they have any stopped
689 * jobs, send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2)
690 */
691static void forget_original_parent(struct task_struct *father,
692 struct list_head *dead)
693{
694 struct task_struct *p, *t, *reaper;
695
696 if (unlikely(!list_empty(&father->ptraced)))
697 exit_ptrace(father, dead);
698
699 /* Can drop and reacquire tasklist_lock */
700 reaper = find_child_reaper(father, dead);
701 if (list_empty(&father->children))
702 return;
703
704 reaper = find_new_reaper(father, reaper);
705 list_for_each_entry(p, &father->children, sibling) {
706 for_each_thread(p, t) {
707 RCU_INIT_POINTER(t->real_parent, reaper);
708 BUG_ON((!t->ptrace) != (rcu_access_pointer(t->parent) == father));
709 if (likely(!t->ptrace))
710 t->parent = t->real_parent;
711 if (t->pdeath_signal)
712 group_send_sig_info(t->pdeath_signal,
713 SEND_SIG_NOINFO, t,
714 PIDTYPE_TGID);
715 }
716 /*
717 * If this is a threaded reparent there is no need to
718 * notify anyone anything has happened.
719 */
720 if (!same_thread_group(reaper, father))
721 reparent_leader(father, p, dead);
722 }
723 list_splice_tail_init(&father->children, &reaper->children);
724}
725
726/*
727 * Send signals to all our closest relatives so that they know
728 * to properly mourn us..
729 */
730static void exit_notify(struct task_struct *tsk, int group_dead)
731{
732 bool autoreap;
733 struct task_struct *p, *n;
734 LIST_HEAD(dead);
735
736 write_lock_irq(&tasklist_lock);
737 forget_original_parent(tsk, &dead);
738
739 if (group_dead)
740 kill_orphaned_pgrp(tsk->group_leader, NULL);
741
742 tsk->exit_state = EXIT_ZOMBIE;
743 /*
744 * sub-thread or delay_group_leader(), wake up the
745 * PIDFD_THREAD waiters.
746 */
747 if (!thread_group_empty(tsk))
748 do_notify_pidfd(tsk);
749
750 if (unlikely(tsk->ptrace)) {
751 int sig = thread_group_leader(tsk) &&
752 thread_group_empty(tsk) &&
753 !ptrace_reparented(tsk) ?
754 tsk->exit_signal : SIGCHLD;
755 autoreap = do_notify_parent(tsk, sig);
756 } else if (thread_group_leader(tsk)) {
757 autoreap = thread_group_empty(tsk) &&
758 do_notify_parent(tsk, tsk->exit_signal);
759 } else {
760 autoreap = true;
761 }
762
763 if (autoreap) {
764 tsk->exit_state = EXIT_DEAD;
765 list_add(&tsk->ptrace_entry, &dead);
766 }
767
768 /* mt-exec, de_thread() is waiting for group leader */
769 if (unlikely(tsk->signal->notify_count < 0))
770 wake_up_process(tsk->signal->group_exec_task);
771 write_unlock_irq(&tasklist_lock);
772
773 list_for_each_entry_safe(p, n, &dead, ptrace_entry) {
774 list_del_init(&p->ptrace_entry);
775 release_task(p);
776 }
777}
778
779#ifdef CONFIG_DEBUG_STACK_USAGE
780unsigned long stack_not_used(struct task_struct *p)
781{
782 unsigned long *n = end_of_stack(p);
783
784 do { /* Skip over canary */
785# ifdef CONFIG_STACK_GROWSUP
786 n--;
787# else
788 n++;
789# endif
790 } while (!*n);
791
792# ifdef CONFIG_STACK_GROWSUP
793 return (unsigned long)end_of_stack(p) - (unsigned long)n;
794# else
795 return (unsigned long)n - (unsigned long)end_of_stack(p);
796# endif
797}
798
799/* Count the maximum pages reached in kernel stacks */
800static inline void kstack_histogram(unsigned long used_stack)
801{
802#ifdef CONFIG_VM_EVENT_COUNTERS
803 if (used_stack <= 1024)
804 count_vm_event(KSTACK_1K);
805#if THREAD_SIZE > 1024
806 else if (used_stack <= 2048)
807 count_vm_event(KSTACK_2K);
808#endif
809#if THREAD_SIZE > 2048
810 else if (used_stack <= 4096)
811 count_vm_event(KSTACK_4K);
812#endif
813#if THREAD_SIZE > 4096
814 else if (used_stack <= 8192)
815 count_vm_event(KSTACK_8K);
816#endif
817#if THREAD_SIZE > 8192
818 else if (used_stack <= 16384)
819 count_vm_event(KSTACK_16K);
820#endif
821#if THREAD_SIZE > 16384
822 else if (used_stack <= 32768)
823 count_vm_event(KSTACK_32K);
824#endif
825#if THREAD_SIZE > 32768
826 else if (used_stack <= 65536)
827 count_vm_event(KSTACK_64K);
828#endif
829#if THREAD_SIZE > 65536
830 else
831 count_vm_event(KSTACK_REST);
832#endif
833#endif /* CONFIG_VM_EVENT_COUNTERS */
834}
835
836static void check_stack_usage(void)
837{
838 static DEFINE_SPINLOCK(low_water_lock);
839 static int lowest_to_date = THREAD_SIZE;
840 unsigned long free;
841
842 free = stack_not_used(current);
843 kstack_histogram(THREAD_SIZE - free);
844
845 if (free >= lowest_to_date)
846 return;
847
848 spin_lock(&low_water_lock);
849 if (free < lowest_to_date) {
850 pr_info("%s (%d) used greatest stack depth: %lu bytes left\n",
851 current->comm, task_pid_nr(current), free);
852 lowest_to_date = free;
853 }
854 spin_unlock(&low_water_lock);
855}
856#else
857static inline void check_stack_usage(void) {}
858#endif
859
860static void synchronize_group_exit(struct task_struct *tsk, long code)
861{
862 struct sighand_struct *sighand = tsk->sighand;
863 struct signal_struct *signal = tsk->signal;
864
865 spin_lock_irq(&sighand->siglock);
866 signal->quick_threads--;
867 if ((signal->quick_threads == 0) &&
868 !(signal->flags & SIGNAL_GROUP_EXIT)) {
869 signal->flags = SIGNAL_GROUP_EXIT;
870 signal->group_exit_code = code;
871 signal->group_stop_count = 0;
872 }
873 spin_unlock_irq(&sighand->siglock);
874}
875
876void __noreturn do_exit(long code)
877{
878 struct task_struct *tsk = current;
879 int group_dead;
880
881 WARN_ON(irqs_disabled());
882
883 synchronize_group_exit(tsk, code);
884
885 WARN_ON(tsk->plug);
886
887 kcov_task_exit(tsk);
888 kmsan_task_exit(tsk);
889
890 coredump_task_exit(tsk);
891 ptrace_event(PTRACE_EVENT_EXIT, code);
892 user_events_exit(tsk);
893
894 io_uring_files_cancel();
895 exit_signals(tsk); /* sets PF_EXITING */
896
897 seccomp_filter_release(tsk);
898
899 acct_update_integrals(tsk);
900 group_dead = atomic_dec_and_test(&tsk->signal->live);
901 if (group_dead) {
902 /*
903 * If the last thread of global init has exited, panic
904 * immediately to get a useable coredump.
905 */
906 if (unlikely(is_global_init(tsk)))
907 panic("Attempted to kill init! exitcode=0x%08x\n",
908 tsk->signal->group_exit_code ?: (int)code);
909
910#ifdef CONFIG_POSIX_TIMERS
911 hrtimer_cancel(&tsk->signal->real_timer);
912 exit_itimers(tsk);
913#endif
914 if (tsk->mm)
915 setmax_mm_hiwater_rss(&tsk->signal->maxrss, tsk->mm);
916 }
917 acct_collect(code, group_dead);
918 if (group_dead)
919 tty_audit_exit();
920 audit_free(tsk);
921
922 tsk->exit_code = code;
923 taskstats_exit(tsk, group_dead);
924
925 exit_mm();
926
927 if (group_dead)
928 acct_process();
929 trace_sched_process_exit(tsk);
930
931 exit_sem(tsk);
932 exit_shm(tsk);
933 exit_files(tsk);
934 exit_fs(tsk);
935 if (group_dead)
936 disassociate_ctty(1);
937 exit_task_namespaces(tsk);
938 exit_task_work(tsk);
939 exit_thread(tsk);
940
941 /*
942 * Flush inherited counters to the parent - before the parent
943 * gets woken up by child-exit notifications.
944 *
945 * because of cgroup mode, must be called before cgroup_exit()
946 */
947 perf_event_exit_task(tsk);
948
949 sched_autogroup_exit_task(tsk);
950 cgroup_exit(tsk);
951
952 /*
953 * FIXME: do that only when needed, using sched_exit tracepoint
954 */
955 flush_ptrace_hw_breakpoint(tsk);
956
957 exit_tasks_rcu_start();
958 exit_notify(tsk, group_dead);
959 proc_exit_connector(tsk);
960 mpol_put_task_policy(tsk);
961#ifdef CONFIG_FUTEX
962 if (unlikely(current->pi_state_cache))
963 kfree(current->pi_state_cache);
964#endif
965 /*
966 * Make sure we are holding no locks:
967 */
968 debug_check_no_locks_held();
969
970 if (tsk->io_context)
971 exit_io_context(tsk);
972
973 if (tsk->splice_pipe)
974 free_pipe_info(tsk->splice_pipe);
975
976 if (tsk->task_frag.page)
977 put_page(tsk->task_frag.page);
978
979 exit_task_stack_account(tsk);
980
981 check_stack_usage();
982 preempt_disable();
983 if (tsk->nr_dirtied)
984 __this_cpu_add(dirty_throttle_leaks, tsk->nr_dirtied);
985 exit_rcu();
986 exit_tasks_rcu_finish();
987
988 lockdep_free_task(tsk);
989 do_task_dead();
990}
991
992void __noreturn make_task_dead(int signr)
993{
994 /*
995 * Take the task off the cpu after something catastrophic has
996 * happened.
997 *
998 * We can get here from a kernel oops, sometimes with preemption off.
999 * Start by checking for critical errors.
1000 * Then fix up important state like USER_DS and preemption.
1001 * Then do everything else.
1002 */
1003 struct task_struct *tsk = current;
1004 unsigned int limit;
1005
1006 if (unlikely(in_interrupt()))
1007 panic("Aiee, killing interrupt handler!");
1008 if (unlikely(!tsk->pid))
1009 panic("Attempted to kill the idle task!");
1010
1011 if (unlikely(irqs_disabled())) {
1012 pr_info("note: %s[%d] exited with irqs disabled\n",
1013 current->comm, task_pid_nr(current));
1014 local_irq_enable();
1015 }
1016 if (unlikely(in_atomic())) {
1017 pr_info("note: %s[%d] exited with preempt_count %d\n",
1018 current->comm, task_pid_nr(current),
1019 preempt_count());
1020 preempt_count_set(PREEMPT_ENABLED);
1021 }
1022
1023 /*
1024 * Every time the system oopses, if the oops happens while a reference
1025 * to an object was held, the reference leaks.
1026 * If the oops doesn't also leak memory, repeated oopsing can cause
1027 * reference counters to wrap around (if they're not using refcount_t).
1028 * This means that repeated oopsing can make unexploitable-looking bugs
1029 * exploitable through repeated oopsing.
1030 * To make sure this can't happen, place an upper bound on how often the
1031 * kernel may oops without panic().
1032 */
1033 limit = READ_ONCE(oops_limit);
1034 if (atomic_inc_return(&oops_count) >= limit && limit)
1035 panic("Oopsed too often (kernel.oops_limit is %d)", limit);
1036
1037 /*
1038 * We're taking recursive faults here in make_task_dead. Safest is to just
1039 * leave this task alone and wait for reboot.
1040 */
1041 if (unlikely(tsk->flags & PF_EXITING)) {
1042 pr_alert("Fixing recursive fault but reboot is needed!\n");
1043 futex_exit_recursive(tsk);
1044 tsk->exit_state = EXIT_DEAD;
1045 refcount_inc(&tsk->rcu_users);
1046 do_task_dead();
1047 }
1048
1049 do_exit(signr);
1050}
1051
1052SYSCALL_DEFINE1(exit, int, error_code)
1053{
1054 do_exit((error_code&0xff)<<8);
1055}
1056
1057/*
1058 * Take down every thread in the group. This is called by fatal signals
1059 * as well as by sys_exit_group (below).
1060 */
1061void __noreturn
1062do_group_exit(int exit_code)
1063{
1064 struct signal_struct *sig = current->signal;
1065
1066 if (sig->flags & SIGNAL_GROUP_EXIT)
1067 exit_code = sig->group_exit_code;
1068 else if (sig->group_exec_task)
1069 exit_code = 0;
1070 else {
1071 struct sighand_struct *const sighand = current->sighand;
1072
1073 spin_lock_irq(&sighand->siglock);
1074 if (sig->flags & SIGNAL_GROUP_EXIT)
1075 /* Another thread got here before we took the lock. */
1076 exit_code = sig->group_exit_code;
1077 else if (sig->group_exec_task)
1078 exit_code = 0;
1079 else {
1080 sig->group_exit_code = exit_code;
1081 sig->flags = SIGNAL_GROUP_EXIT;
1082 zap_other_threads(current);
1083 }
1084 spin_unlock_irq(&sighand->siglock);
1085 }
1086
1087 do_exit(exit_code);
1088 /* NOTREACHED */
1089}
1090
1091/*
1092 * this kills every thread in the thread group. Note that any externally
1093 * wait4()-ing process will get the correct exit code - even if this
1094 * thread is not the thread group leader.
1095 */
1096SYSCALL_DEFINE1(exit_group, int, error_code)
1097{
1098 do_group_exit((error_code & 0xff) << 8);
1099 /* NOTREACHED */
1100 return 0;
1101}
1102
1103static int eligible_pid(struct wait_opts *wo, struct task_struct *p)
1104{
1105 return wo->wo_type == PIDTYPE_MAX ||
1106 task_pid_type(p, wo->wo_type) == wo->wo_pid;
1107}
1108
1109static int
1110eligible_child(struct wait_opts *wo, bool ptrace, struct task_struct *p)
1111{
1112 if (!eligible_pid(wo, p))
1113 return 0;
1114
1115 /*
1116 * Wait for all children (clone and not) if __WALL is set or
1117 * if it is traced by us.
1118 */
1119 if (ptrace || (wo->wo_flags & __WALL))
1120 return 1;
1121
1122 /*
1123 * Otherwise, wait for clone children *only* if __WCLONE is set;
1124 * otherwise, wait for non-clone children *only*.
1125 *
1126 * Note: a "clone" child here is one that reports to its parent
1127 * using a signal other than SIGCHLD, or a non-leader thread which
1128 * we can only see if it is traced by us.
1129 */
1130 if ((p->exit_signal != SIGCHLD) ^ !!(wo->wo_flags & __WCLONE))
1131 return 0;
1132
1133 return 1;
1134}
1135
1136/*
1137 * Handle sys_wait4 work for one task in state EXIT_ZOMBIE. We hold
1138 * read_lock(&tasklist_lock) on entry. If we return zero, we still hold
1139 * the lock and this task is uninteresting. If we return nonzero, we have
1140 * released the lock and the system call should return.
1141 */
1142static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
1143{
1144 int state, status;
1145 pid_t pid = task_pid_vnr(p);
1146 uid_t uid = from_kuid_munged(current_user_ns(), task_uid(p));
1147 struct waitid_info *infop;
1148
1149 if (!likely(wo->wo_flags & WEXITED))
1150 return 0;
1151
1152 if (unlikely(wo->wo_flags & WNOWAIT)) {
1153 status = (p->signal->flags & SIGNAL_GROUP_EXIT)
1154 ? p->signal->group_exit_code : p->exit_code;
1155 get_task_struct(p);
1156 read_unlock(&tasklist_lock);
1157 sched_annotate_sleep();
1158 if (wo->wo_rusage)
1159 getrusage(p, RUSAGE_BOTH, wo->wo_rusage);
1160 put_task_struct(p);
1161 goto out_info;
1162 }
1163 /*
1164 * Move the task's state to DEAD/TRACE, only one thread can do this.
1165 */
1166 state = (ptrace_reparented(p) && thread_group_leader(p)) ?
1167 EXIT_TRACE : EXIT_DEAD;
1168 if (cmpxchg(&p->exit_state, EXIT_ZOMBIE, state) != EXIT_ZOMBIE)
1169 return 0;
1170 /*
1171 * We own this thread, nobody else can reap it.
1172 */
1173 read_unlock(&tasklist_lock);
1174 sched_annotate_sleep();
1175
1176 /*
1177 * Check thread_group_leader() to exclude the traced sub-threads.
1178 */
1179 if (state == EXIT_DEAD && thread_group_leader(p)) {
1180 struct signal_struct *sig = p->signal;
1181 struct signal_struct *psig = current->signal;
1182 unsigned long maxrss;
1183 u64 tgutime, tgstime;
1184
1185 /*
1186 * The resource counters for the group leader are in its
1187 * own task_struct. Those for dead threads in the group
1188 * are in its signal_struct, as are those for the child
1189 * processes it has previously reaped. All these
1190 * accumulate in the parent's signal_struct c* fields.
1191 *
1192 * We don't bother to take a lock here to protect these
1193 * p->signal fields because the whole thread group is dead
1194 * and nobody can change them.
1195 *
1196 * psig->stats_lock also protects us from our sub-threads
1197 * which can reap other children at the same time.
1198 *
1199 * We use thread_group_cputime_adjusted() to get times for
1200 * the thread group, which consolidates times for all threads
1201 * in the group including the group leader.
1202 */
1203 thread_group_cputime_adjusted(p, &tgutime, &tgstime);
1204 write_seqlock_irq(&psig->stats_lock);
1205 psig->cutime += tgutime + sig->cutime;
1206 psig->cstime += tgstime + sig->cstime;
1207 psig->cgtime += task_gtime(p) + sig->gtime + sig->cgtime;
1208 psig->cmin_flt +=
1209 p->min_flt + sig->min_flt + sig->cmin_flt;
1210 psig->cmaj_flt +=
1211 p->maj_flt + sig->maj_flt + sig->cmaj_flt;
1212 psig->cnvcsw +=
1213 p->nvcsw + sig->nvcsw + sig->cnvcsw;
1214 psig->cnivcsw +=
1215 p->nivcsw + sig->nivcsw + sig->cnivcsw;
1216 psig->cinblock +=
1217 task_io_get_inblock(p) +
1218 sig->inblock + sig->cinblock;
1219 psig->coublock +=
1220 task_io_get_oublock(p) +
1221 sig->oublock + sig->coublock;
1222 maxrss = max(sig->maxrss, sig->cmaxrss);
1223 if (psig->cmaxrss < maxrss)
1224 psig->cmaxrss = maxrss;
1225 task_io_accounting_add(&psig->ioac, &p->ioac);
1226 task_io_accounting_add(&psig->ioac, &sig->ioac);
1227 write_sequnlock_irq(&psig->stats_lock);
1228 }
1229
1230 if (wo->wo_rusage)
1231 getrusage(p, RUSAGE_BOTH, wo->wo_rusage);
1232 status = (p->signal->flags & SIGNAL_GROUP_EXIT)
1233 ? p->signal->group_exit_code : p->exit_code;
1234 wo->wo_stat = status;
1235
1236 if (state == EXIT_TRACE) {
1237 write_lock_irq(&tasklist_lock);
1238 /* We dropped tasklist, ptracer could die and untrace */
1239 ptrace_unlink(p);
1240
1241 /* If parent wants a zombie, don't release it now */
1242 state = EXIT_ZOMBIE;
1243 if (do_notify_parent(p, p->exit_signal))
1244 state = EXIT_DEAD;
1245 p->exit_state = state;
1246 write_unlock_irq(&tasklist_lock);
1247 }
1248 if (state == EXIT_DEAD)
1249 release_task(p);
1250
1251out_info:
1252 infop = wo->wo_info;
1253 if (infop) {
1254 if ((status & 0x7f) == 0) {
1255 infop->cause = CLD_EXITED;
1256 infop->status = status >> 8;
1257 } else {
1258 infop->cause = (status & 0x80) ? CLD_DUMPED : CLD_KILLED;
1259 infop->status = status & 0x7f;
1260 }
1261 infop->pid = pid;
1262 infop->uid = uid;
1263 }
1264
1265 return pid;
1266}
1267
1268static int *task_stopped_code(struct task_struct *p, bool ptrace)
1269{
1270 if (ptrace) {
1271 if (task_is_traced(p) && !(p->jobctl & JOBCTL_LISTENING))
1272 return &p->exit_code;
1273 } else {
1274 if (p->signal->flags & SIGNAL_STOP_STOPPED)
1275 return &p->signal->group_exit_code;
1276 }
1277 return NULL;
1278}
1279
1280/**
1281 * wait_task_stopped - Wait for %TASK_STOPPED or %TASK_TRACED
1282 * @wo: wait options
1283 * @ptrace: is the wait for ptrace
1284 * @p: task to wait for
1285 *
1286 * Handle sys_wait4() work for %p in state %TASK_STOPPED or %TASK_TRACED.
1287 *
1288 * CONTEXT:
1289 * read_lock(&tasklist_lock), which is released if return value is
1290 * non-zero. Also, grabs and releases @p->sighand->siglock.
1291 *
1292 * RETURNS:
1293 * 0 if wait condition didn't exist and search for other wait conditions
1294 * should continue. Non-zero return, -errno on failure and @p's pid on
1295 * success, implies that tasklist_lock is released and wait condition
1296 * search should terminate.
1297 */
1298static int wait_task_stopped(struct wait_opts *wo,
1299 int ptrace, struct task_struct *p)
1300{
1301 struct waitid_info *infop;
1302 int exit_code, *p_code, why;
1303 uid_t uid = 0; /* unneeded, required by compiler */
1304 pid_t pid;
1305
1306 /*
1307 * Traditionally we see ptrace'd stopped tasks regardless of options.
1308 */
1309 if (!ptrace && !(wo->wo_flags & WUNTRACED))
1310 return 0;
1311
1312 if (!task_stopped_code(p, ptrace))
1313 return 0;
1314
1315 exit_code = 0;
1316 spin_lock_irq(&p->sighand->siglock);
1317
1318 p_code = task_stopped_code(p, ptrace);
1319 if (unlikely(!p_code))
1320 goto unlock_sig;
1321
1322 exit_code = *p_code;
1323 if (!exit_code)
1324 goto unlock_sig;
1325
1326 if (!unlikely(wo->wo_flags & WNOWAIT))
1327 *p_code = 0;
1328
1329 uid = from_kuid_munged(current_user_ns(), task_uid(p));
1330unlock_sig:
1331 spin_unlock_irq(&p->sighand->siglock);
1332 if (!exit_code)
1333 return 0;
1334
1335 /*
1336 * Now we are pretty sure this task is interesting.
1337 * Make sure it doesn't get reaped out from under us while we
1338 * give up the lock and then examine it below. We don't want to
1339 * keep holding onto the tasklist_lock while we call getrusage and
1340 * possibly take page faults for user memory.
1341 */
1342 get_task_struct(p);
1343 pid = task_pid_vnr(p);
1344 why = ptrace ? CLD_TRAPPED : CLD_STOPPED;
1345 read_unlock(&tasklist_lock);
1346 sched_annotate_sleep();
1347 if (wo->wo_rusage)
1348 getrusage(p, RUSAGE_BOTH, wo->wo_rusage);
1349 put_task_struct(p);
1350
1351 if (likely(!(wo->wo_flags & WNOWAIT)))
1352 wo->wo_stat = (exit_code << 8) | 0x7f;
1353
1354 infop = wo->wo_info;
1355 if (infop) {
1356 infop->cause = why;
1357 infop->status = exit_code;
1358 infop->pid = pid;
1359 infop->uid = uid;
1360 }
1361 return pid;
1362}
1363
1364/*
1365 * Handle do_wait work for one task in a live, non-stopped state.
1366 * read_lock(&tasklist_lock) on entry. If we return zero, we still hold
1367 * the lock and this task is uninteresting. If we return nonzero, we have
1368 * released the lock and the system call should return.
1369 */
1370static int wait_task_continued(struct wait_opts *wo, struct task_struct *p)
1371{
1372 struct waitid_info *infop;
1373 pid_t pid;
1374 uid_t uid;
1375
1376 if (!unlikely(wo->wo_flags & WCONTINUED))
1377 return 0;
1378
1379 if (!(p->signal->flags & SIGNAL_STOP_CONTINUED))
1380 return 0;
1381
1382 spin_lock_irq(&p->sighand->siglock);
1383 /* Re-check with the lock held. */
1384 if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) {
1385 spin_unlock_irq(&p->sighand->siglock);
1386 return 0;
1387 }
1388 if (!unlikely(wo->wo_flags & WNOWAIT))
1389 p->signal->flags &= ~SIGNAL_STOP_CONTINUED;
1390 uid = from_kuid_munged(current_user_ns(), task_uid(p));
1391 spin_unlock_irq(&p->sighand->siglock);
1392
1393 pid = task_pid_vnr(p);
1394 get_task_struct(p);
1395 read_unlock(&tasklist_lock);
1396 sched_annotate_sleep();
1397 if (wo->wo_rusage)
1398 getrusage(p, RUSAGE_BOTH, wo->wo_rusage);
1399 put_task_struct(p);
1400
1401 infop = wo->wo_info;
1402 if (!infop) {
1403 wo->wo_stat = 0xffff;
1404 } else {
1405 infop->cause = CLD_CONTINUED;
1406 infop->pid = pid;
1407 infop->uid = uid;
1408 infop->status = SIGCONT;
1409 }
1410 return pid;
1411}
1412
1413/*
1414 * Consider @p for a wait by @parent.
1415 *
1416 * -ECHILD should be in ->notask_error before the first call.
1417 * Returns nonzero for a final return, when we have unlocked tasklist_lock.
1418 * Returns zero if the search for a child should continue;
1419 * then ->notask_error is 0 if @p is an eligible child,
1420 * or still -ECHILD.
1421 */
1422static int wait_consider_task(struct wait_opts *wo, int ptrace,
1423 struct task_struct *p)
1424{
1425 /*
1426 * We can race with wait_task_zombie() from another thread.
1427 * Ensure that EXIT_ZOMBIE -> EXIT_DEAD/EXIT_TRACE transition
1428 * can't confuse the checks below.
1429 */
1430 int exit_state = READ_ONCE(p->exit_state);
1431 int ret;
1432
1433 if (unlikely(exit_state == EXIT_DEAD))
1434 return 0;
1435
1436 ret = eligible_child(wo, ptrace, p);
1437 if (!ret)
1438 return ret;
1439
1440 if (unlikely(exit_state == EXIT_TRACE)) {
1441 /*
1442 * ptrace == 0 means we are the natural parent. In this case
1443 * we should clear notask_error, debugger will notify us.
1444 */
1445 if (likely(!ptrace))
1446 wo->notask_error = 0;
1447 return 0;
1448 }
1449
1450 if (likely(!ptrace) && unlikely(p->ptrace)) {
1451 /*
1452 * If it is traced by its real parent's group, just pretend
1453 * the caller is ptrace_do_wait() and reap this child if it
1454 * is zombie.
1455 *
1456 * This also hides group stop state from real parent; otherwise
1457 * a single stop can be reported twice as group and ptrace stop.
1458 * If a ptracer wants to distinguish these two events for its
1459 * own children it should create a separate process which takes
1460 * the role of real parent.
1461 */
1462 if (!ptrace_reparented(p))
1463 ptrace = 1;
1464 }
1465
1466 /* slay zombie? */
1467 if (exit_state == EXIT_ZOMBIE) {
1468 /* we don't reap group leaders with subthreads */
1469 if (!delay_group_leader(p)) {
1470 /*
1471 * A zombie ptracee is only visible to its ptracer.
1472 * Notification and reaping will be cascaded to the
1473 * real parent when the ptracer detaches.
1474 */
1475 if (unlikely(ptrace) || likely(!p->ptrace))
1476 return wait_task_zombie(wo, p);
1477 }
1478
1479 /*
1480 * Allow access to stopped/continued state via zombie by
1481 * falling through. Clearing of notask_error is complex.
1482 *
1483 * When !@ptrace:
1484 *
1485 * If WEXITED is set, notask_error should naturally be
1486 * cleared. If not, subset of WSTOPPED|WCONTINUED is set,
1487 * so, if there are live subthreads, there are events to
1488 * wait for. If all subthreads are dead, it's still safe
1489 * to clear - this function will be called again in finite
1490 * amount time once all the subthreads are released and
1491 * will then return without clearing.
1492 *
1493 * When @ptrace:
1494 *
1495 * Stopped state is per-task and thus can't change once the
1496 * target task dies. Only continued and exited can happen.
1497 * Clear notask_error if WCONTINUED | WEXITED.
1498 */
1499 if (likely(!ptrace) || (wo->wo_flags & (WCONTINUED | WEXITED)))
1500 wo->notask_error = 0;
1501 } else {
1502 /*
1503 * @p is alive and it's gonna stop, continue or exit, so
1504 * there always is something to wait for.
1505 */
1506 wo->notask_error = 0;
1507 }
1508
1509 /*
1510 * Wait for stopped. Depending on @ptrace, different stopped state
1511 * is used and the two don't interact with each other.
1512 */
1513 ret = wait_task_stopped(wo, ptrace, p);
1514 if (ret)
1515 return ret;
1516
1517 /*
1518 * Wait for continued. There's only one continued state and the
1519 * ptracer can consume it which can confuse the real parent. Don't
1520 * use WCONTINUED from ptracer. You don't need or want it.
1521 */
1522 return wait_task_continued(wo, p);
1523}
1524
1525/*
1526 * Do the work of do_wait() for one thread in the group, @tsk.
1527 *
1528 * -ECHILD should be in ->notask_error before the first call.
1529 * Returns nonzero for a final return, when we have unlocked tasklist_lock.
1530 * Returns zero if the search for a child should continue; then
1531 * ->notask_error is 0 if there were any eligible children,
1532 * or still -ECHILD.
1533 */
1534static int do_wait_thread(struct wait_opts *wo, struct task_struct *tsk)
1535{
1536 struct task_struct *p;
1537
1538 list_for_each_entry(p, &tsk->children, sibling) {
1539 int ret = wait_consider_task(wo, 0, p);
1540
1541 if (ret)
1542 return ret;
1543 }
1544
1545 return 0;
1546}
1547
1548static int ptrace_do_wait(struct wait_opts *wo, struct task_struct *tsk)
1549{
1550 struct task_struct *p;
1551
1552 list_for_each_entry(p, &tsk->ptraced, ptrace_entry) {
1553 int ret = wait_consider_task(wo, 1, p);
1554
1555 if (ret)
1556 return ret;
1557 }
1558
1559 return 0;
1560}
1561
1562bool pid_child_should_wake(struct wait_opts *wo, struct task_struct *p)
1563{
1564 if (!eligible_pid(wo, p))
1565 return false;
1566
1567 if ((wo->wo_flags & __WNOTHREAD) && wo->child_wait.private != p->parent)
1568 return false;
1569
1570 return true;
1571}
1572
1573static int child_wait_callback(wait_queue_entry_t *wait, unsigned mode,
1574 int sync, void *key)
1575{
1576 struct wait_opts *wo = container_of(wait, struct wait_opts,
1577 child_wait);
1578 struct task_struct *p = key;
1579
1580 if (pid_child_should_wake(wo, p))
1581 return default_wake_function(wait, mode, sync, key);
1582
1583 return 0;
1584}
1585
1586void __wake_up_parent(struct task_struct *p, struct task_struct *parent)
1587{
1588 __wake_up_sync_key(&parent->signal->wait_chldexit,
1589 TASK_INTERRUPTIBLE, p);
1590}
1591
1592static bool is_effectively_child(struct wait_opts *wo, bool ptrace,
1593 struct task_struct *target)
1594{
1595 struct task_struct *parent =
1596 !ptrace ? target->real_parent : target->parent;
1597
1598 return current == parent || (!(wo->wo_flags & __WNOTHREAD) &&
1599 same_thread_group(current, parent));
1600}
1601
1602/*
1603 * Optimization for waiting on PIDTYPE_PID. No need to iterate through child
1604 * and tracee lists to find the target task.
1605 */
1606static int do_wait_pid(struct wait_opts *wo)
1607{
1608 bool ptrace;
1609 struct task_struct *target;
1610 int retval;
1611
1612 ptrace = false;
1613 target = pid_task(wo->wo_pid, PIDTYPE_TGID);
1614 if (target && is_effectively_child(wo, ptrace, target)) {
1615 retval = wait_consider_task(wo, ptrace, target);
1616 if (retval)
1617 return retval;
1618 }
1619
1620 ptrace = true;
1621 target = pid_task(wo->wo_pid, PIDTYPE_PID);
1622 if (target && target->ptrace &&
1623 is_effectively_child(wo, ptrace, target)) {
1624 retval = wait_consider_task(wo, ptrace, target);
1625 if (retval)
1626 return retval;
1627 }
1628
1629 return 0;
1630}
1631
1632long __do_wait(struct wait_opts *wo)
1633{
1634 long retval;
1635
1636 /*
1637 * If there is nothing that can match our criteria, just get out.
1638 * We will clear ->notask_error to zero if we see any child that
1639 * might later match our criteria, even if we are not able to reap
1640 * it yet.
1641 */
1642 wo->notask_error = -ECHILD;
1643 if ((wo->wo_type < PIDTYPE_MAX) &&
1644 (!wo->wo_pid || !pid_has_task(wo->wo_pid, wo->wo_type)))
1645 goto notask;
1646
1647 read_lock(&tasklist_lock);
1648
1649 if (wo->wo_type == PIDTYPE_PID) {
1650 retval = do_wait_pid(wo);
1651 if (retval)
1652 return retval;
1653 } else {
1654 struct task_struct *tsk = current;
1655
1656 do {
1657 retval = do_wait_thread(wo, tsk);
1658 if (retval)
1659 return retval;
1660
1661 retval = ptrace_do_wait(wo, tsk);
1662 if (retval)
1663 return retval;
1664
1665 if (wo->wo_flags & __WNOTHREAD)
1666 break;
1667 } while_each_thread(current, tsk);
1668 }
1669 read_unlock(&tasklist_lock);
1670
1671notask:
1672 retval = wo->notask_error;
1673 if (!retval && !(wo->wo_flags & WNOHANG))
1674 return -ERESTARTSYS;
1675
1676 return retval;
1677}
1678
1679static long do_wait(struct wait_opts *wo)
1680{
1681 int retval;
1682
1683 trace_sched_process_wait(wo->wo_pid);
1684
1685 init_waitqueue_func_entry(&wo->child_wait, child_wait_callback);
1686 wo->child_wait.private = current;
1687 add_wait_queue(¤t->signal->wait_chldexit, &wo->child_wait);
1688
1689 do {
1690 set_current_state(TASK_INTERRUPTIBLE);
1691 retval = __do_wait(wo);
1692 if (retval != -ERESTARTSYS)
1693 break;
1694 if (signal_pending(current))
1695 break;
1696 schedule();
1697 } while (1);
1698
1699 __set_current_state(TASK_RUNNING);
1700 remove_wait_queue(¤t->signal->wait_chldexit, &wo->child_wait);
1701 return retval;
1702}
1703
1704int kernel_waitid_prepare(struct wait_opts *wo, int which, pid_t upid,
1705 struct waitid_info *infop, int options,
1706 struct rusage *ru)
1707{
1708 unsigned int f_flags = 0;
1709 struct pid *pid = NULL;
1710 enum pid_type type;
1711
1712 if (options & ~(WNOHANG|WNOWAIT|WEXITED|WSTOPPED|WCONTINUED|
1713 __WNOTHREAD|__WCLONE|__WALL))
1714 return -EINVAL;
1715 if (!(options & (WEXITED|WSTOPPED|WCONTINUED)))
1716 return -EINVAL;
1717
1718 switch (which) {
1719 case P_ALL:
1720 type = PIDTYPE_MAX;
1721 break;
1722 case P_PID:
1723 type = PIDTYPE_PID;
1724 if (upid <= 0)
1725 return -EINVAL;
1726
1727 pid = find_get_pid(upid);
1728 break;
1729 case P_PGID:
1730 type = PIDTYPE_PGID;
1731 if (upid < 0)
1732 return -EINVAL;
1733
1734 if (upid)
1735 pid = find_get_pid(upid);
1736 else
1737 pid = get_task_pid(current, PIDTYPE_PGID);
1738 break;
1739 case P_PIDFD:
1740 type = PIDTYPE_PID;
1741 if (upid < 0)
1742 return -EINVAL;
1743
1744 pid = pidfd_get_pid(upid, &f_flags);
1745 if (IS_ERR(pid))
1746 return PTR_ERR(pid);
1747
1748 break;
1749 default:
1750 return -EINVAL;
1751 }
1752
1753 wo->wo_type = type;
1754 wo->wo_pid = pid;
1755 wo->wo_flags = options;
1756 wo->wo_info = infop;
1757 wo->wo_rusage = ru;
1758 if (f_flags & O_NONBLOCK)
1759 wo->wo_flags |= WNOHANG;
1760
1761 return 0;
1762}
1763
1764static long kernel_waitid(int which, pid_t upid, struct waitid_info *infop,
1765 int options, struct rusage *ru)
1766{
1767 struct wait_opts wo;
1768 long ret;
1769
1770 ret = kernel_waitid_prepare(&wo, which, upid, infop, options, ru);
1771 if (ret)
1772 return ret;
1773
1774 ret = do_wait(&wo);
1775 if (!ret && !(options & WNOHANG) && (wo.wo_flags & WNOHANG))
1776 ret = -EAGAIN;
1777
1778 put_pid(wo.wo_pid);
1779 return ret;
1780}
1781
1782SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *,
1783 infop, int, options, struct rusage __user *, ru)
1784{
1785 struct rusage r;
1786 struct waitid_info info = {.status = 0};
1787 long err = kernel_waitid(which, upid, &info, options, ru ? &r : NULL);
1788 int signo = 0;
1789
1790 if (err > 0) {
1791 signo = SIGCHLD;
1792 err = 0;
1793 if (ru && copy_to_user(ru, &r, sizeof(struct rusage)))
1794 return -EFAULT;
1795 }
1796 if (!infop)
1797 return err;
1798
1799 if (!user_write_access_begin(infop, sizeof(*infop)))
1800 return -EFAULT;
1801
1802 unsafe_put_user(signo, &infop->si_signo, Efault);
1803 unsafe_put_user(0, &infop->si_errno, Efault);
1804 unsafe_put_user(info.cause, &infop->si_code, Efault);
1805 unsafe_put_user(info.pid, &infop->si_pid, Efault);
1806 unsafe_put_user(info.uid, &infop->si_uid, Efault);
1807 unsafe_put_user(info.status, &infop->si_status, Efault);
1808 user_write_access_end();
1809 return err;
1810Efault:
1811 user_write_access_end();
1812 return -EFAULT;
1813}
1814
1815long kernel_wait4(pid_t upid, int __user *stat_addr, int options,
1816 struct rusage *ru)
1817{
1818 struct wait_opts wo;
1819 struct pid *pid = NULL;
1820 enum pid_type type;
1821 long ret;
1822
1823 if (options & ~(WNOHANG|WUNTRACED|WCONTINUED|
1824 __WNOTHREAD|__WCLONE|__WALL))
1825 return -EINVAL;
1826
1827 /* -INT_MIN is not defined */
1828 if (upid == INT_MIN)
1829 return -ESRCH;
1830
1831 if (upid == -1)
1832 type = PIDTYPE_MAX;
1833 else if (upid < 0) {
1834 type = PIDTYPE_PGID;
1835 pid = find_get_pid(-upid);
1836 } else if (upid == 0) {
1837 type = PIDTYPE_PGID;
1838 pid = get_task_pid(current, PIDTYPE_PGID);
1839 } else /* upid > 0 */ {
1840 type = PIDTYPE_PID;
1841 pid = find_get_pid(upid);
1842 }
1843
1844 wo.wo_type = type;
1845 wo.wo_pid = pid;
1846 wo.wo_flags = options | WEXITED;
1847 wo.wo_info = NULL;
1848 wo.wo_stat = 0;
1849 wo.wo_rusage = ru;
1850 ret = do_wait(&wo);
1851 put_pid(pid);
1852 if (ret > 0 && stat_addr && put_user(wo.wo_stat, stat_addr))
1853 ret = -EFAULT;
1854
1855 return ret;
1856}
1857
1858int kernel_wait(pid_t pid, int *stat)
1859{
1860 struct wait_opts wo = {
1861 .wo_type = PIDTYPE_PID,
1862 .wo_pid = find_get_pid(pid),
1863 .wo_flags = WEXITED,
1864 };
1865 int ret;
1866
1867 ret = do_wait(&wo);
1868 if (ret > 0 && wo.wo_stat)
1869 *stat = wo.wo_stat;
1870 put_pid(wo.wo_pid);
1871 return ret;
1872}
1873
1874SYSCALL_DEFINE4(wait4, pid_t, upid, int __user *, stat_addr,
1875 int, options, struct rusage __user *, ru)
1876{
1877 struct rusage r;
1878 long err = kernel_wait4(upid, stat_addr, options, ru ? &r : NULL);
1879
1880 if (err > 0) {
1881 if (ru && copy_to_user(ru, &r, sizeof(struct rusage)))
1882 return -EFAULT;
1883 }
1884 return err;
1885}
1886
1887#ifdef __ARCH_WANT_SYS_WAITPID
1888
1889/*
1890 * sys_waitpid() remains for compatibility. waitpid() should be
1891 * implemented by calling sys_wait4() from libc.a.
1892 */
1893SYSCALL_DEFINE3(waitpid, pid_t, pid, int __user *, stat_addr, int, options)
1894{
1895 return kernel_wait4(pid, stat_addr, options, NULL);
1896}
1897
1898#endif
1899
1900#ifdef CONFIG_COMPAT
1901COMPAT_SYSCALL_DEFINE4(wait4,
1902 compat_pid_t, pid,
1903 compat_uint_t __user *, stat_addr,
1904 int, options,
1905 struct compat_rusage __user *, ru)
1906{
1907 struct rusage r;
1908 long err = kernel_wait4(pid, stat_addr, options, ru ? &r : NULL);
1909 if (err > 0) {
1910 if (ru && put_compat_rusage(&r, ru))
1911 return -EFAULT;
1912 }
1913 return err;
1914}
1915
1916COMPAT_SYSCALL_DEFINE5(waitid,
1917 int, which, compat_pid_t, pid,
1918 struct compat_siginfo __user *, infop, int, options,
1919 struct compat_rusage __user *, uru)
1920{
1921 struct rusage ru;
1922 struct waitid_info info = {.status = 0};
1923 long err = kernel_waitid(which, pid, &info, options, uru ? &ru : NULL);
1924 int signo = 0;
1925 if (err > 0) {
1926 signo = SIGCHLD;
1927 err = 0;
1928 if (uru) {
1929 /* kernel_waitid() overwrites everything in ru */
1930 if (COMPAT_USE_64BIT_TIME)
1931 err = copy_to_user(uru, &ru, sizeof(ru));
1932 else
1933 err = put_compat_rusage(&ru, uru);
1934 if (err)
1935 return -EFAULT;
1936 }
1937 }
1938
1939 if (!infop)
1940 return err;
1941
1942 if (!user_write_access_begin(infop, sizeof(*infop)))
1943 return -EFAULT;
1944
1945 unsafe_put_user(signo, &infop->si_signo, Efault);
1946 unsafe_put_user(0, &infop->si_errno, Efault);
1947 unsafe_put_user(info.cause, &infop->si_code, Efault);
1948 unsafe_put_user(info.pid, &infop->si_pid, Efault);
1949 unsafe_put_user(info.uid, &infop->si_uid, Efault);
1950 unsafe_put_user(info.status, &infop->si_status, Efault);
1951 user_write_access_end();
1952 return err;
1953Efault:
1954 user_write_access_end();
1955 return -EFAULT;
1956}
1957#endif
1958
1959/*
1960 * This needs to be __function_aligned as GCC implicitly makes any
1961 * implementation of abort() cold and drops alignment specified by
1962 * -falign-functions=N.
1963 *
1964 * See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=88345#c11
1965 */
1966__weak __function_aligned void abort(void)
1967{
1968 BUG();
1969
1970 /* if that doesn't kill us, halt */
1971 panic("Oops failed to kill thread");
1972}
1973EXPORT_SYMBOL(abort);
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/kernel/exit.c
4 *
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 */
7
8#include <linux/mm.h>
9#include <linux/slab.h>
10#include <linux/sched/autogroup.h>
11#include <linux/sched/mm.h>
12#include <linux/sched/stat.h>
13#include <linux/sched/task.h>
14#include <linux/sched/task_stack.h>
15#include <linux/sched/cputime.h>
16#include <linux/interrupt.h>
17#include <linux/module.h>
18#include <linux/capability.h>
19#include <linux/completion.h>
20#include <linux/personality.h>
21#include <linux/tty.h>
22#include <linux/iocontext.h>
23#include <linux/key.h>
24#include <linux/cpu.h>
25#include <linux/acct.h>
26#include <linux/tsacct_kern.h>
27#include <linux/file.h>
28#include <linux/fdtable.h>
29#include <linux/freezer.h>
30#include <linux/binfmts.h>
31#include <linux/nsproxy.h>
32#include <linux/pid_namespace.h>
33#include <linux/ptrace.h>
34#include <linux/profile.h>
35#include <linux/mount.h>
36#include <linux/proc_fs.h>
37#include <linux/kthread.h>
38#include <linux/mempolicy.h>
39#include <linux/taskstats_kern.h>
40#include <linux/delayacct.h>
41#include <linux/cgroup.h>
42#include <linux/syscalls.h>
43#include <linux/signal.h>
44#include <linux/posix-timers.h>
45#include <linux/cn_proc.h>
46#include <linux/mutex.h>
47#include <linux/futex.h>
48#include <linux/pipe_fs_i.h>
49#include <linux/audit.h> /* for audit_free() */
50#include <linux/resource.h>
51#include <linux/task_io_accounting_ops.h>
52#include <linux/blkdev.h>
53#include <linux/task_work.h>
54#include <linux/fs_struct.h>
55#include <linux/init_task.h>
56#include <linux/perf_event.h>
57#include <trace/events/sched.h>
58#include <linux/hw_breakpoint.h>
59#include <linux/oom.h>
60#include <linux/writeback.h>
61#include <linux/shm.h>
62#include <linux/kcov.h>
63#include <linux/kmsan.h>
64#include <linux/random.h>
65#include <linux/rcuwait.h>
66#include <linux/compat.h>
67#include <linux/io_uring.h>
68#include <linux/kprobes.h>
69#include <linux/rethook.h>
70#include <linux/sysfs.h>
71#include <linux/user_events.h>
72#include <linux/uaccess.h>
73
74#include <uapi/linux/wait.h>
75
76#include <asm/unistd.h>
77#include <asm/mmu_context.h>
78
79#include "exit.h"
80
81/*
82 * The default value should be high enough to not crash a system that randomly
83 * crashes its kernel from time to time, but low enough to at least not permit
84 * overflowing 32-bit refcounts or the ldsem writer count.
85 */
86static unsigned int oops_limit = 10000;
87
88#ifdef CONFIG_SYSCTL
89static struct ctl_table kern_exit_table[] = {
90 {
91 .procname = "oops_limit",
92 .data = &oops_limit,
93 .maxlen = sizeof(oops_limit),
94 .mode = 0644,
95 .proc_handler = proc_douintvec,
96 },
97 { }
98};
99
100static __init int kernel_exit_sysctls_init(void)
101{
102 register_sysctl_init("kernel", kern_exit_table);
103 return 0;
104}
105late_initcall(kernel_exit_sysctls_init);
106#endif
107
108static atomic_t oops_count = ATOMIC_INIT(0);
109
110#ifdef CONFIG_SYSFS
111static ssize_t oops_count_show(struct kobject *kobj, struct kobj_attribute *attr,
112 char *page)
113{
114 return sysfs_emit(page, "%d\n", atomic_read(&oops_count));
115}
116
117static struct kobj_attribute oops_count_attr = __ATTR_RO(oops_count);
118
119static __init int kernel_exit_sysfs_init(void)
120{
121 sysfs_add_file_to_group(kernel_kobj, &oops_count_attr.attr, NULL);
122 return 0;
123}
124late_initcall(kernel_exit_sysfs_init);
125#endif
126
127static void __unhash_process(struct task_struct *p, bool group_dead)
128{
129 nr_threads--;
130 detach_pid(p, PIDTYPE_PID);
131 if (group_dead) {
132 detach_pid(p, PIDTYPE_TGID);
133 detach_pid(p, PIDTYPE_PGID);
134 detach_pid(p, PIDTYPE_SID);
135
136 list_del_rcu(&p->tasks);
137 list_del_init(&p->sibling);
138 __this_cpu_dec(process_counts);
139 }
140 list_del_rcu(&p->thread_node);
141}
142
143/*
144 * This function expects the tasklist_lock write-locked.
145 */
146static void __exit_signal(struct task_struct *tsk)
147{
148 struct signal_struct *sig = tsk->signal;
149 bool group_dead = thread_group_leader(tsk);
150 struct sighand_struct *sighand;
151 struct tty_struct *tty;
152 u64 utime, stime;
153
154 sighand = rcu_dereference_check(tsk->sighand,
155 lockdep_tasklist_lock_is_held());
156 spin_lock(&sighand->siglock);
157
158#ifdef CONFIG_POSIX_TIMERS
159 posix_cpu_timers_exit(tsk);
160 if (group_dead)
161 posix_cpu_timers_exit_group(tsk);
162#endif
163
164 if (group_dead) {
165 tty = sig->tty;
166 sig->tty = NULL;
167 } else {
168 /*
169 * If there is any task waiting for the group exit
170 * then notify it:
171 */
172 if (sig->notify_count > 0 && !--sig->notify_count)
173 wake_up_process(sig->group_exec_task);
174
175 if (tsk == sig->curr_target)
176 sig->curr_target = next_thread(tsk);
177 }
178
179 add_device_randomness((const void*) &tsk->se.sum_exec_runtime,
180 sizeof(unsigned long long));
181
182 /*
183 * Accumulate here the counters for all threads as they die. We could
184 * skip the group leader because it is the last user of signal_struct,
185 * but we want to avoid the race with thread_group_cputime() which can
186 * see the empty ->thread_head list.
187 */
188 task_cputime(tsk, &utime, &stime);
189 write_seqlock(&sig->stats_lock);
190 sig->utime += utime;
191 sig->stime += stime;
192 sig->gtime += task_gtime(tsk);
193 sig->min_flt += tsk->min_flt;
194 sig->maj_flt += tsk->maj_flt;
195 sig->nvcsw += tsk->nvcsw;
196 sig->nivcsw += tsk->nivcsw;
197 sig->inblock += task_io_get_inblock(tsk);
198 sig->oublock += task_io_get_oublock(tsk);
199 task_io_accounting_add(&sig->ioac, &tsk->ioac);
200 sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
201 sig->nr_threads--;
202 __unhash_process(tsk, group_dead);
203 write_sequnlock(&sig->stats_lock);
204
205 /*
206 * Do this under ->siglock, we can race with another thread
207 * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals.
208 */
209 flush_sigqueue(&tsk->pending);
210 tsk->sighand = NULL;
211 spin_unlock(&sighand->siglock);
212
213 __cleanup_sighand(sighand);
214 clear_tsk_thread_flag(tsk, TIF_SIGPENDING);
215 if (group_dead) {
216 flush_sigqueue(&sig->shared_pending);
217 tty_kref_put(tty);
218 }
219}
220
221static void delayed_put_task_struct(struct rcu_head *rhp)
222{
223 struct task_struct *tsk = container_of(rhp, struct task_struct, rcu);
224
225 kprobe_flush_task(tsk);
226 rethook_flush_task(tsk);
227 perf_event_delayed_put(tsk);
228 trace_sched_process_free(tsk);
229 put_task_struct(tsk);
230}
231
232void put_task_struct_rcu_user(struct task_struct *task)
233{
234 if (refcount_dec_and_test(&task->rcu_users))
235 call_rcu(&task->rcu, delayed_put_task_struct);
236}
237
238void __weak release_thread(struct task_struct *dead_task)
239{
240}
241
242void release_task(struct task_struct *p)
243{
244 struct task_struct *leader;
245 struct pid *thread_pid;
246 int zap_leader;
247repeat:
248 /* don't need to get the RCU readlock here - the process is dead and
249 * can't be modifying its own credentials. But shut RCU-lockdep up */
250 rcu_read_lock();
251 dec_rlimit_ucounts(task_ucounts(p), UCOUNT_RLIMIT_NPROC, 1);
252 rcu_read_unlock();
253
254 cgroup_release(p);
255
256 write_lock_irq(&tasklist_lock);
257 ptrace_release_task(p);
258 thread_pid = get_pid(p->thread_pid);
259 __exit_signal(p);
260
261 /*
262 * If we are the last non-leader member of the thread
263 * group, and the leader is zombie, then notify the
264 * group leader's parent process. (if it wants notification.)
265 */
266 zap_leader = 0;
267 leader = p->group_leader;
268 if (leader != p && thread_group_empty(leader)
269 && leader->exit_state == EXIT_ZOMBIE) {
270 /*
271 * If we were the last child thread and the leader has
272 * exited already, and the leader's parent ignores SIGCHLD,
273 * then we are the one who should release the leader.
274 */
275 zap_leader = do_notify_parent(leader, leader->exit_signal);
276 if (zap_leader)
277 leader->exit_state = EXIT_DEAD;
278 }
279
280 write_unlock_irq(&tasklist_lock);
281 seccomp_filter_release(p);
282 proc_flush_pid(thread_pid);
283 put_pid(thread_pid);
284 release_thread(p);
285 put_task_struct_rcu_user(p);
286
287 p = leader;
288 if (unlikely(zap_leader))
289 goto repeat;
290}
291
292int rcuwait_wake_up(struct rcuwait *w)
293{
294 int ret = 0;
295 struct task_struct *task;
296
297 rcu_read_lock();
298
299 /*
300 * Order condition vs @task, such that everything prior to the load
301 * of @task is visible. This is the condition as to why the user called
302 * rcuwait_wake() in the first place. Pairs with set_current_state()
303 * barrier (A) in rcuwait_wait_event().
304 *
305 * WAIT WAKE
306 * [S] tsk = current [S] cond = true
307 * MB (A) MB (B)
308 * [L] cond [L] tsk
309 */
310 smp_mb(); /* (B) */
311
312 task = rcu_dereference(w->task);
313 if (task)
314 ret = wake_up_process(task);
315 rcu_read_unlock();
316
317 return ret;
318}
319EXPORT_SYMBOL_GPL(rcuwait_wake_up);
320
321/*
322 * Determine if a process group is "orphaned", according to the POSIX
323 * definition in 2.2.2.52. Orphaned process groups are not to be affected
324 * by terminal-generated stop signals. Newly orphaned process groups are
325 * to receive a SIGHUP and a SIGCONT.
326 *
327 * "I ask you, have you ever known what it is to be an orphan?"
328 */
329static int will_become_orphaned_pgrp(struct pid *pgrp,
330 struct task_struct *ignored_task)
331{
332 struct task_struct *p;
333
334 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
335 if ((p == ignored_task) ||
336 (p->exit_state && thread_group_empty(p)) ||
337 is_global_init(p->real_parent))
338 continue;
339
340 if (task_pgrp(p->real_parent) != pgrp &&
341 task_session(p->real_parent) == task_session(p))
342 return 0;
343 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
344
345 return 1;
346}
347
348int is_current_pgrp_orphaned(void)
349{
350 int retval;
351
352 read_lock(&tasklist_lock);
353 retval = will_become_orphaned_pgrp(task_pgrp(current), NULL);
354 read_unlock(&tasklist_lock);
355
356 return retval;
357}
358
359static bool has_stopped_jobs(struct pid *pgrp)
360{
361 struct task_struct *p;
362
363 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
364 if (p->signal->flags & SIGNAL_STOP_STOPPED)
365 return true;
366 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
367
368 return false;
369}
370
371/*
372 * Check to see if any process groups have become orphaned as
373 * a result of our exiting, and if they have any stopped jobs,
374 * send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2)
375 */
376static void
377kill_orphaned_pgrp(struct task_struct *tsk, struct task_struct *parent)
378{
379 struct pid *pgrp = task_pgrp(tsk);
380 struct task_struct *ignored_task = tsk;
381
382 if (!parent)
383 /* exit: our father is in a different pgrp than
384 * we are and we were the only connection outside.
385 */
386 parent = tsk->real_parent;
387 else
388 /* reparent: our child is in a different pgrp than
389 * we are, and it was the only connection outside.
390 */
391 ignored_task = NULL;
392
393 if (task_pgrp(parent) != pgrp &&
394 task_session(parent) == task_session(tsk) &&
395 will_become_orphaned_pgrp(pgrp, ignored_task) &&
396 has_stopped_jobs(pgrp)) {
397 __kill_pgrp_info(SIGHUP, SEND_SIG_PRIV, pgrp);
398 __kill_pgrp_info(SIGCONT, SEND_SIG_PRIV, pgrp);
399 }
400}
401
402static void coredump_task_exit(struct task_struct *tsk)
403{
404 struct core_state *core_state;
405
406 /*
407 * Serialize with any possible pending coredump.
408 * We must hold siglock around checking core_state
409 * and setting PF_POSTCOREDUMP. The core-inducing thread
410 * will increment ->nr_threads for each thread in the
411 * group without PF_POSTCOREDUMP set.
412 */
413 spin_lock_irq(&tsk->sighand->siglock);
414 tsk->flags |= PF_POSTCOREDUMP;
415 core_state = tsk->signal->core_state;
416 spin_unlock_irq(&tsk->sighand->siglock);
417
418 /* The vhost_worker does not particpate in coredumps */
419 if (core_state &&
420 ((tsk->flags & (PF_IO_WORKER | PF_USER_WORKER)) != PF_USER_WORKER)) {
421 struct core_thread self;
422
423 self.task = current;
424 if (self.task->flags & PF_SIGNALED)
425 self.next = xchg(&core_state->dumper.next, &self);
426 else
427 self.task = NULL;
428 /*
429 * Implies mb(), the result of xchg() must be visible
430 * to core_state->dumper.
431 */
432 if (atomic_dec_and_test(&core_state->nr_threads))
433 complete(&core_state->startup);
434
435 for (;;) {
436 set_current_state(TASK_UNINTERRUPTIBLE|TASK_FREEZABLE);
437 if (!self.task) /* see coredump_finish() */
438 break;
439 schedule();
440 }
441 __set_current_state(TASK_RUNNING);
442 }
443}
444
445#ifdef CONFIG_MEMCG
446/*
447 * A task is exiting. If it owned this mm, find a new owner for the mm.
448 */
449void mm_update_next_owner(struct mm_struct *mm)
450{
451 struct task_struct *c, *g, *p = current;
452
453retry:
454 /*
455 * If the exiting or execing task is not the owner, it's
456 * someone else's problem.
457 */
458 if (mm->owner != p)
459 return;
460 /*
461 * The current owner is exiting/execing and there are no other
462 * candidates. Do not leave the mm pointing to a possibly
463 * freed task structure.
464 */
465 if (atomic_read(&mm->mm_users) <= 1) {
466 WRITE_ONCE(mm->owner, NULL);
467 return;
468 }
469
470 read_lock(&tasklist_lock);
471 /*
472 * Search in the children
473 */
474 list_for_each_entry(c, &p->children, sibling) {
475 if (c->mm == mm)
476 goto assign_new_owner;
477 }
478
479 /*
480 * Search in the siblings
481 */
482 list_for_each_entry(c, &p->real_parent->children, sibling) {
483 if (c->mm == mm)
484 goto assign_new_owner;
485 }
486
487 /*
488 * Search through everything else, we should not get here often.
489 */
490 for_each_process(g) {
491 if (g->flags & PF_KTHREAD)
492 continue;
493 for_each_thread(g, c) {
494 if (c->mm == mm)
495 goto assign_new_owner;
496 if (c->mm)
497 break;
498 }
499 }
500 read_unlock(&tasklist_lock);
501 /*
502 * We found no owner yet mm_users > 1: this implies that we are
503 * most likely racing with swapoff (try_to_unuse()) or /proc or
504 * ptrace or page migration (get_task_mm()). Mark owner as NULL.
505 */
506 WRITE_ONCE(mm->owner, NULL);
507 return;
508
509assign_new_owner:
510 BUG_ON(c == p);
511 get_task_struct(c);
512 /*
513 * The task_lock protects c->mm from changing.
514 * We always want mm->owner->mm == mm
515 */
516 task_lock(c);
517 /*
518 * Delay read_unlock() till we have the task_lock()
519 * to ensure that c does not slip away underneath us
520 */
521 read_unlock(&tasklist_lock);
522 if (c->mm != mm) {
523 task_unlock(c);
524 put_task_struct(c);
525 goto retry;
526 }
527 WRITE_ONCE(mm->owner, c);
528 lru_gen_migrate_mm(mm);
529 task_unlock(c);
530 put_task_struct(c);
531}
532#endif /* CONFIG_MEMCG */
533
534/*
535 * Turn us into a lazy TLB process if we
536 * aren't already..
537 */
538static void exit_mm(void)
539{
540 struct mm_struct *mm = current->mm;
541
542 exit_mm_release(current, mm);
543 if (!mm)
544 return;
545 mmap_read_lock(mm);
546 mmgrab_lazy_tlb(mm);
547 BUG_ON(mm != current->active_mm);
548 /* more a memory barrier than a real lock */
549 task_lock(current);
550 /*
551 * When a thread stops operating on an address space, the loop
552 * in membarrier_private_expedited() may not observe that
553 * tsk->mm, and the loop in membarrier_global_expedited() may
554 * not observe a MEMBARRIER_STATE_GLOBAL_EXPEDITED
555 * rq->membarrier_state, so those would not issue an IPI.
556 * Membarrier requires a memory barrier after accessing
557 * user-space memory, before clearing tsk->mm or the
558 * rq->membarrier_state.
559 */
560 smp_mb__after_spinlock();
561 local_irq_disable();
562 current->mm = NULL;
563 membarrier_update_current_mm(NULL);
564 enter_lazy_tlb(mm, current);
565 local_irq_enable();
566 task_unlock(current);
567 mmap_read_unlock(mm);
568 mm_update_next_owner(mm);
569 mmput(mm);
570 if (test_thread_flag(TIF_MEMDIE))
571 exit_oom_victim();
572}
573
574static struct task_struct *find_alive_thread(struct task_struct *p)
575{
576 struct task_struct *t;
577
578 for_each_thread(p, t) {
579 if (!(t->flags & PF_EXITING))
580 return t;
581 }
582 return NULL;
583}
584
585static struct task_struct *find_child_reaper(struct task_struct *father,
586 struct list_head *dead)
587 __releases(&tasklist_lock)
588 __acquires(&tasklist_lock)
589{
590 struct pid_namespace *pid_ns = task_active_pid_ns(father);
591 struct task_struct *reaper = pid_ns->child_reaper;
592 struct task_struct *p, *n;
593
594 if (likely(reaper != father))
595 return reaper;
596
597 reaper = find_alive_thread(father);
598 if (reaper) {
599 pid_ns->child_reaper = reaper;
600 return reaper;
601 }
602
603 write_unlock_irq(&tasklist_lock);
604
605 list_for_each_entry_safe(p, n, dead, ptrace_entry) {
606 list_del_init(&p->ptrace_entry);
607 release_task(p);
608 }
609
610 zap_pid_ns_processes(pid_ns);
611 write_lock_irq(&tasklist_lock);
612
613 return father;
614}
615
616/*
617 * When we die, we re-parent all our children, and try to:
618 * 1. give them to another thread in our thread group, if such a member exists
619 * 2. give it to the first ancestor process which prctl'd itself as a
620 * child_subreaper for its children (like a service manager)
621 * 3. give it to the init process (PID 1) in our pid namespace
622 */
623static struct task_struct *find_new_reaper(struct task_struct *father,
624 struct task_struct *child_reaper)
625{
626 struct task_struct *thread, *reaper;
627
628 thread = find_alive_thread(father);
629 if (thread)
630 return thread;
631
632 if (father->signal->has_child_subreaper) {
633 unsigned int ns_level = task_pid(father)->level;
634 /*
635 * Find the first ->is_child_subreaper ancestor in our pid_ns.
636 * We can't check reaper != child_reaper to ensure we do not
637 * cross the namespaces, the exiting parent could be injected
638 * by setns() + fork().
639 * We check pid->level, this is slightly more efficient than
640 * task_active_pid_ns(reaper) != task_active_pid_ns(father).
641 */
642 for (reaper = father->real_parent;
643 task_pid(reaper)->level == ns_level;
644 reaper = reaper->real_parent) {
645 if (reaper == &init_task)
646 break;
647 if (!reaper->signal->is_child_subreaper)
648 continue;
649 thread = find_alive_thread(reaper);
650 if (thread)
651 return thread;
652 }
653 }
654
655 return child_reaper;
656}
657
658/*
659* Any that need to be release_task'd are put on the @dead list.
660 */
661static void reparent_leader(struct task_struct *father, struct task_struct *p,
662 struct list_head *dead)
663{
664 if (unlikely(p->exit_state == EXIT_DEAD))
665 return;
666
667 /* We don't want people slaying init. */
668 p->exit_signal = SIGCHLD;
669
670 /* If it has exited notify the new parent about this child's death. */
671 if (!p->ptrace &&
672 p->exit_state == EXIT_ZOMBIE && thread_group_empty(p)) {
673 if (do_notify_parent(p, p->exit_signal)) {
674 p->exit_state = EXIT_DEAD;
675 list_add(&p->ptrace_entry, dead);
676 }
677 }
678
679 kill_orphaned_pgrp(p, father);
680}
681
682/*
683 * This does two things:
684 *
685 * A. Make init inherit all the child processes
686 * B. Check to see if any process groups have become orphaned
687 * as a result of our exiting, and if they have any stopped
688 * jobs, send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2)
689 */
690static void forget_original_parent(struct task_struct *father,
691 struct list_head *dead)
692{
693 struct task_struct *p, *t, *reaper;
694
695 if (unlikely(!list_empty(&father->ptraced)))
696 exit_ptrace(father, dead);
697
698 /* Can drop and reacquire tasklist_lock */
699 reaper = find_child_reaper(father, dead);
700 if (list_empty(&father->children))
701 return;
702
703 reaper = find_new_reaper(father, reaper);
704 list_for_each_entry(p, &father->children, sibling) {
705 for_each_thread(p, t) {
706 RCU_INIT_POINTER(t->real_parent, reaper);
707 BUG_ON((!t->ptrace) != (rcu_access_pointer(t->parent) == father));
708 if (likely(!t->ptrace))
709 t->parent = t->real_parent;
710 if (t->pdeath_signal)
711 group_send_sig_info(t->pdeath_signal,
712 SEND_SIG_NOINFO, t,
713 PIDTYPE_TGID);
714 }
715 /*
716 * If this is a threaded reparent there is no need to
717 * notify anyone anything has happened.
718 */
719 if (!same_thread_group(reaper, father))
720 reparent_leader(father, p, dead);
721 }
722 list_splice_tail_init(&father->children, &reaper->children);
723}
724
725/*
726 * Send signals to all our closest relatives so that they know
727 * to properly mourn us..
728 */
729static void exit_notify(struct task_struct *tsk, int group_dead)
730{
731 bool autoreap;
732 struct task_struct *p, *n;
733 LIST_HEAD(dead);
734
735 write_lock_irq(&tasklist_lock);
736 forget_original_parent(tsk, &dead);
737
738 if (group_dead)
739 kill_orphaned_pgrp(tsk->group_leader, NULL);
740
741 tsk->exit_state = EXIT_ZOMBIE;
742 if (unlikely(tsk->ptrace)) {
743 int sig = thread_group_leader(tsk) &&
744 thread_group_empty(tsk) &&
745 !ptrace_reparented(tsk) ?
746 tsk->exit_signal : SIGCHLD;
747 autoreap = do_notify_parent(tsk, sig);
748 } else if (thread_group_leader(tsk)) {
749 autoreap = thread_group_empty(tsk) &&
750 do_notify_parent(tsk, tsk->exit_signal);
751 } else {
752 autoreap = true;
753 }
754
755 if (autoreap) {
756 tsk->exit_state = EXIT_DEAD;
757 list_add(&tsk->ptrace_entry, &dead);
758 }
759
760 /* mt-exec, de_thread() is waiting for group leader */
761 if (unlikely(tsk->signal->notify_count < 0))
762 wake_up_process(tsk->signal->group_exec_task);
763 write_unlock_irq(&tasklist_lock);
764
765 list_for_each_entry_safe(p, n, &dead, ptrace_entry) {
766 list_del_init(&p->ptrace_entry);
767 release_task(p);
768 }
769}
770
771#ifdef CONFIG_DEBUG_STACK_USAGE
772static void check_stack_usage(void)
773{
774 static DEFINE_SPINLOCK(low_water_lock);
775 static int lowest_to_date = THREAD_SIZE;
776 unsigned long free;
777
778 free = stack_not_used(current);
779
780 if (free >= lowest_to_date)
781 return;
782
783 spin_lock(&low_water_lock);
784 if (free < lowest_to_date) {
785 pr_info("%s (%d) used greatest stack depth: %lu bytes left\n",
786 current->comm, task_pid_nr(current), free);
787 lowest_to_date = free;
788 }
789 spin_unlock(&low_water_lock);
790}
791#else
792static inline void check_stack_usage(void) {}
793#endif
794
795static void synchronize_group_exit(struct task_struct *tsk, long code)
796{
797 struct sighand_struct *sighand = tsk->sighand;
798 struct signal_struct *signal = tsk->signal;
799
800 spin_lock_irq(&sighand->siglock);
801 signal->quick_threads--;
802 if ((signal->quick_threads == 0) &&
803 !(signal->flags & SIGNAL_GROUP_EXIT)) {
804 signal->flags = SIGNAL_GROUP_EXIT;
805 signal->group_exit_code = code;
806 signal->group_stop_count = 0;
807 }
808 spin_unlock_irq(&sighand->siglock);
809}
810
811void __noreturn do_exit(long code)
812{
813 struct task_struct *tsk = current;
814 int group_dead;
815
816 WARN_ON(irqs_disabled());
817
818 synchronize_group_exit(tsk, code);
819
820 WARN_ON(tsk->plug);
821
822 kcov_task_exit(tsk);
823 kmsan_task_exit(tsk);
824
825 coredump_task_exit(tsk);
826 ptrace_event(PTRACE_EVENT_EXIT, code);
827 user_events_exit(tsk);
828
829 io_uring_files_cancel();
830 exit_signals(tsk); /* sets PF_EXITING */
831
832 acct_update_integrals(tsk);
833 group_dead = atomic_dec_and_test(&tsk->signal->live);
834 if (group_dead) {
835 /*
836 * If the last thread of global init has exited, panic
837 * immediately to get a useable coredump.
838 */
839 if (unlikely(is_global_init(tsk)))
840 panic("Attempted to kill init! exitcode=0x%08x\n",
841 tsk->signal->group_exit_code ?: (int)code);
842
843#ifdef CONFIG_POSIX_TIMERS
844 hrtimer_cancel(&tsk->signal->real_timer);
845 exit_itimers(tsk);
846#endif
847 if (tsk->mm)
848 setmax_mm_hiwater_rss(&tsk->signal->maxrss, tsk->mm);
849 }
850 acct_collect(code, group_dead);
851 if (group_dead)
852 tty_audit_exit();
853 audit_free(tsk);
854
855 tsk->exit_code = code;
856 taskstats_exit(tsk, group_dead);
857
858 exit_mm();
859
860 if (group_dead)
861 acct_process();
862 trace_sched_process_exit(tsk);
863
864 exit_sem(tsk);
865 exit_shm(tsk);
866 exit_files(tsk);
867 exit_fs(tsk);
868 if (group_dead)
869 disassociate_ctty(1);
870 exit_task_namespaces(tsk);
871 exit_task_work(tsk);
872 exit_thread(tsk);
873
874 /*
875 * Flush inherited counters to the parent - before the parent
876 * gets woken up by child-exit notifications.
877 *
878 * because of cgroup mode, must be called before cgroup_exit()
879 */
880 perf_event_exit_task(tsk);
881
882 sched_autogroup_exit_task(tsk);
883 cgroup_exit(tsk);
884
885 /*
886 * FIXME: do that only when needed, using sched_exit tracepoint
887 */
888 flush_ptrace_hw_breakpoint(tsk);
889
890 exit_tasks_rcu_start();
891 exit_notify(tsk, group_dead);
892 proc_exit_connector(tsk);
893 mpol_put_task_policy(tsk);
894#ifdef CONFIG_FUTEX
895 if (unlikely(current->pi_state_cache))
896 kfree(current->pi_state_cache);
897#endif
898 /*
899 * Make sure we are holding no locks:
900 */
901 debug_check_no_locks_held();
902
903 if (tsk->io_context)
904 exit_io_context(tsk);
905
906 if (tsk->splice_pipe)
907 free_pipe_info(tsk->splice_pipe);
908
909 if (tsk->task_frag.page)
910 put_page(tsk->task_frag.page);
911
912 exit_task_stack_account(tsk);
913
914 check_stack_usage();
915 preempt_disable();
916 if (tsk->nr_dirtied)
917 __this_cpu_add(dirty_throttle_leaks, tsk->nr_dirtied);
918 exit_rcu();
919 exit_tasks_rcu_finish();
920
921 lockdep_free_task(tsk);
922 do_task_dead();
923}
924
925void __noreturn make_task_dead(int signr)
926{
927 /*
928 * Take the task off the cpu after something catastrophic has
929 * happened.
930 *
931 * We can get here from a kernel oops, sometimes with preemption off.
932 * Start by checking for critical errors.
933 * Then fix up important state like USER_DS and preemption.
934 * Then do everything else.
935 */
936 struct task_struct *tsk = current;
937 unsigned int limit;
938
939 if (unlikely(in_interrupt()))
940 panic("Aiee, killing interrupt handler!");
941 if (unlikely(!tsk->pid))
942 panic("Attempted to kill the idle task!");
943
944 if (unlikely(irqs_disabled())) {
945 pr_info("note: %s[%d] exited with irqs disabled\n",
946 current->comm, task_pid_nr(current));
947 local_irq_enable();
948 }
949 if (unlikely(in_atomic())) {
950 pr_info("note: %s[%d] exited with preempt_count %d\n",
951 current->comm, task_pid_nr(current),
952 preempt_count());
953 preempt_count_set(PREEMPT_ENABLED);
954 }
955
956 /*
957 * Every time the system oopses, if the oops happens while a reference
958 * to an object was held, the reference leaks.
959 * If the oops doesn't also leak memory, repeated oopsing can cause
960 * reference counters to wrap around (if they're not using refcount_t).
961 * This means that repeated oopsing can make unexploitable-looking bugs
962 * exploitable through repeated oopsing.
963 * To make sure this can't happen, place an upper bound on how often the
964 * kernel may oops without panic().
965 */
966 limit = READ_ONCE(oops_limit);
967 if (atomic_inc_return(&oops_count) >= limit && limit)
968 panic("Oopsed too often (kernel.oops_limit is %d)", limit);
969
970 /*
971 * We're taking recursive faults here in make_task_dead. Safest is to just
972 * leave this task alone and wait for reboot.
973 */
974 if (unlikely(tsk->flags & PF_EXITING)) {
975 pr_alert("Fixing recursive fault but reboot is needed!\n");
976 futex_exit_recursive(tsk);
977 tsk->exit_state = EXIT_DEAD;
978 refcount_inc(&tsk->rcu_users);
979 do_task_dead();
980 }
981
982 do_exit(signr);
983}
984
985SYSCALL_DEFINE1(exit, int, error_code)
986{
987 do_exit((error_code&0xff)<<8);
988}
989
990/*
991 * Take down every thread in the group. This is called by fatal signals
992 * as well as by sys_exit_group (below).
993 */
994void __noreturn
995do_group_exit(int exit_code)
996{
997 struct signal_struct *sig = current->signal;
998
999 if (sig->flags & SIGNAL_GROUP_EXIT)
1000 exit_code = sig->group_exit_code;
1001 else if (sig->group_exec_task)
1002 exit_code = 0;
1003 else {
1004 struct sighand_struct *const sighand = current->sighand;
1005
1006 spin_lock_irq(&sighand->siglock);
1007 if (sig->flags & SIGNAL_GROUP_EXIT)
1008 /* Another thread got here before we took the lock. */
1009 exit_code = sig->group_exit_code;
1010 else if (sig->group_exec_task)
1011 exit_code = 0;
1012 else {
1013 sig->group_exit_code = exit_code;
1014 sig->flags = SIGNAL_GROUP_EXIT;
1015 zap_other_threads(current);
1016 }
1017 spin_unlock_irq(&sighand->siglock);
1018 }
1019
1020 do_exit(exit_code);
1021 /* NOTREACHED */
1022}
1023
1024/*
1025 * this kills every thread in the thread group. Note that any externally
1026 * wait4()-ing process will get the correct exit code - even if this
1027 * thread is not the thread group leader.
1028 */
1029SYSCALL_DEFINE1(exit_group, int, error_code)
1030{
1031 do_group_exit((error_code & 0xff) << 8);
1032 /* NOTREACHED */
1033 return 0;
1034}
1035
1036static int eligible_pid(struct wait_opts *wo, struct task_struct *p)
1037{
1038 return wo->wo_type == PIDTYPE_MAX ||
1039 task_pid_type(p, wo->wo_type) == wo->wo_pid;
1040}
1041
1042static int
1043eligible_child(struct wait_opts *wo, bool ptrace, struct task_struct *p)
1044{
1045 if (!eligible_pid(wo, p))
1046 return 0;
1047
1048 /*
1049 * Wait for all children (clone and not) if __WALL is set or
1050 * if it is traced by us.
1051 */
1052 if (ptrace || (wo->wo_flags & __WALL))
1053 return 1;
1054
1055 /*
1056 * Otherwise, wait for clone children *only* if __WCLONE is set;
1057 * otherwise, wait for non-clone children *only*.
1058 *
1059 * Note: a "clone" child here is one that reports to its parent
1060 * using a signal other than SIGCHLD, or a non-leader thread which
1061 * we can only see if it is traced by us.
1062 */
1063 if ((p->exit_signal != SIGCHLD) ^ !!(wo->wo_flags & __WCLONE))
1064 return 0;
1065
1066 return 1;
1067}
1068
1069/*
1070 * Handle sys_wait4 work for one task in state EXIT_ZOMBIE. We hold
1071 * read_lock(&tasklist_lock) on entry. If we return zero, we still hold
1072 * the lock and this task is uninteresting. If we return nonzero, we have
1073 * released the lock and the system call should return.
1074 */
1075static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
1076{
1077 int state, status;
1078 pid_t pid = task_pid_vnr(p);
1079 uid_t uid = from_kuid_munged(current_user_ns(), task_uid(p));
1080 struct waitid_info *infop;
1081
1082 if (!likely(wo->wo_flags & WEXITED))
1083 return 0;
1084
1085 if (unlikely(wo->wo_flags & WNOWAIT)) {
1086 status = (p->signal->flags & SIGNAL_GROUP_EXIT)
1087 ? p->signal->group_exit_code : p->exit_code;
1088 get_task_struct(p);
1089 read_unlock(&tasklist_lock);
1090 sched_annotate_sleep();
1091 if (wo->wo_rusage)
1092 getrusage(p, RUSAGE_BOTH, wo->wo_rusage);
1093 put_task_struct(p);
1094 goto out_info;
1095 }
1096 /*
1097 * Move the task's state to DEAD/TRACE, only one thread can do this.
1098 */
1099 state = (ptrace_reparented(p) && thread_group_leader(p)) ?
1100 EXIT_TRACE : EXIT_DEAD;
1101 if (cmpxchg(&p->exit_state, EXIT_ZOMBIE, state) != EXIT_ZOMBIE)
1102 return 0;
1103 /*
1104 * We own this thread, nobody else can reap it.
1105 */
1106 read_unlock(&tasklist_lock);
1107 sched_annotate_sleep();
1108
1109 /*
1110 * Check thread_group_leader() to exclude the traced sub-threads.
1111 */
1112 if (state == EXIT_DEAD && thread_group_leader(p)) {
1113 struct signal_struct *sig = p->signal;
1114 struct signal_struct *psig = current->signal;
1115 unsigned long maxrss;
1116 u64 tgutime, tgstime;
1117
1118 /*
1119 * The resource counters for the group leader are in its
1120 * own task_struct. Those for dead threads in the group
1121 * are in its signal_struct, as are those for the child
1122 * processes it has previously reaped. All these
1123 * accumulate in the parent's signal_struct c* fields.
1124 *
1125 * We don't bother to take a lock here to protect these
1126 * p->signal fields because the whole thread group is dead
1127 * and nobody can change them.
1128 *
1129 * psig->stats_lock also protects us from our sub-threads
1130 * which can reap other children at the same time.
1131 *
1132 * We use thread_group_cputime_adjusted() to get times for
1133 * the thread group, which consolidates times for all threads
1134 * in the group including the group leader.
1135 */
1136 thread_group_cputime_adjusted(p, &tgutime, &tgstime);
1137 write_seqlock_irq(&psig->stats_lock);
1138 psig->cutime += tgutime + sig->cutime;
1139 psig->cstime += tgstime + sig->cstime;
1140 psig->cgtime += task_gtime(p) + sig->gtime + sig->cgtime;
1141 psig->cmin_flt +=
1142 p->min_flt + sig->min_flt + sig->cmin_flt;
1143 psig->cmaj_flt +=
1144 p->maj_flt + sig->maj_flt + sig->cmaj_flt;
1145 psig->cnvcsw +=
1146 p->nvcsw + sig->nvcsw + sig->cnvcsw;
1147 psig->cnivcsw +=
1148 p->nivcsw + sig->nivcsw + sig->cnivcsw;
1149 psig->cinblock +=
1150 task_io_get_inblock(p) +
1151 sig->inblock + sig->cinblock;
1152 psig->coublock +=
1153 task_io_get_oublock(p) +
1154 sig->oublock + sig->coublock;
1155 maxrss = max(sig->maxrss, sig->cmaxrss);
1156 if (psig->cmaxrss < maxrss)
1157 psig->cmaxrss = maxrss;
1158 task_io_accounting_add(&psig->ioac, &p->ioac);
1159 task_io_accounting_add(&psig->ioac, &sig->ioac);
1160 write_sequnlock_irq(&psig->stats_lock);
1161 }
1162
1163 if (wo->wo_rusage)
1164 getrusage(p, RUSAGE_BOTH, wo->wo_rusage);
1165 status = (p->signal->flags & SIGNAL_GROUP_EXIT)
1166 ? p->signal->group_exit_code : p->exit_code;
1167 wo->wo_stat = status;
1168
1169 if (state == EXIT_TRACE) {
1170 write_lock_irq(&tasklist_lock);
1171 /* We dropped tasklist, ptracer could die and untrace */
1172 ptrace_unlink(p);
1173
1174 /* If parent wants a zombie, don't release it now */
1175 state = EXIT_ZOMBIE;
1176 if (do_notify_parent(p, p->exit_signal))
1177 state = EXIT_DEAD;
1178 p->exit_state = state;
1179 write_unlock_irq(&tasklist_lock);
1180 }
1181 if (state == EXIT_DEAD)
1182 release_task(p);
1183
1184out_info:
1185 infop = wo->wo_info;
1186 if (infop) {
1187 if ((status & 0x7f) == 0) {
1188 infop->cause = CLD_EXITED;
1189 infop->status = status >> 8;
1190 } else {
1191 infop->cause = (status & 0x80) ? CLD_DUMPED : CLD_KILLED;
1192 infop->status = status & 0x7f;
1193 }
1194 infop->pid = pid;
1195 infop->uid = uid;
1196 }
1197
1198 return pid;
1199}
1200
1201static int *task_stopped_code(struct task_struct *p, bool ptrace)
1202{
1203 if (ptrace) {
1204 if (task_is_traced(p) && !(p->jobctl & JOBCTL_LISTENING))
1205 return &p->exit_code;
1206 } else {
1207 if (p->signal->flags & SIGNAL_STOP_STOPPED)
1208 return &p->signal->group_exit_code;
1209 }
1210 return NULL;
1211}
1212
1213/**
1214 * wait_task_stopped - Wait for %TASK_STOPPED or %TASK_TRACED
1215 * @wo: wait options
1216 * @ptrace: is the wait for ptrace
1217 * @p: task to wait for
1218 *
1219 * Handle sys_wait4() work for %p in state %TASK_STOPPED or %TASK_TRACED.
1220 *
1221 * CONTEXT:
1222 * read_lock(&tasklist_lock), which is released if return value is
1223 * non-zero. Also, grabs and releases @p->sighand->siglock.
1224 *
1225 * RETURNS:
1226 * 0 if wait condition didn't exist and search for other wait conditions
1227 * should continue. Non-zero return, -errno on failure and @p's pid on
1228 * success, implies that tasklist_lock is released and wait condition
1229 * search should terminate.
1230 */
1231static int wait_task_stopped(struct wait_opts *wo,
1232 int ptrace, struct task_struct *p)
1233{
1234 struct waitid_info *infop;
1235 int exit_code, *p_code, why;
1236 uid_t uid = 0; /* unneeded, required by compiler */
1237 pid_t pid;
1238
1239 /*
1240 * Traditionally we see ptrace'd stopped tasks regardless of options.
1241 */
1242 if (!ptrace && !(wo->wo_flags & WUNTRACED))
1243 return 0;
1244
1245 if (!task_stopped_code(p, ptrace))
1246 return 0;
1247
1248 exit_code = 0;
1249 spin_lock_irq(&p->sighand->siglock);
1250
1251 p_code = task_stopped_code(p, ptrace);
1252 if (unlikely(!p_code))
1253 goto unlock_sig;
1254
1255 exit_code = *p_code;
1256 if (!exit_code)
1257 goto unlock_sig;
1258
1259 if (!unlikely(wo->wo_flags & WNOWAIT))
1260 *p_code = 0;
1261
1262 uid = from_kuid_munged(current_user_ns(), task_uid(p));
1263unlock_sig:
1264 spin_unlock_irq(&p->sighand->siglock);
1265 if (!exit_code)
1266 return 0;
1267
1268 /*
1269 * Now we are pretty sure this task is interesting.
1270 * Make sure it doesn't get reaped out from under us while we
1271 * give up the lock and then examine it below. We don't want to
1272 * keep holding onto the tasklist_lock while we call getrusage and
1273 * possibly take page faults for user memory.
1274 */
1275 get_task_struct(p);
1276 pid = task_pid_vnr(p);
1277 why = ptrace ? CLD_TRAPPED : CLD_STOPPED;
1278 read_unlock(&tasklist_lock);
1279 sched_annotate_sleep();
1280 if (wo->wo_rusage)
1281 getrusage(p, RUSAGE_BOTH, wo->wo_rusage);
1282 put_task_struct(p);
1283
1284 if (likely(!(wo->wo_flags & WNOWAIT)))
1285 wo->wo_stat = (exit_code << 8) | 0x7f;
1286
1287 infop = wo->wo_info;
1288 if (infop) {
1289 infop->cause = why;
1290 infop->status = exit_code;
1291 infop->pid = pid;
1292 infop->uid = uid;
1293 }
1294 return pid;
1295}
1296
1297/*
1298 * Handle do_wait work for one task in a live, non-stopped state.
1299 * read_lock(&tasklist_lock) on entry. If we return zero, we still hold
1300 * the lock and this task is uninteresting. If we return nonzero, we have
1301 * released the lock and the system call should return.
1302 */
1303static int wait_task_continued(struct wait_opts *wo, struct task_struct *p)
1304{
1305 struct waitid_info *infop;
1306 pid_t pid;
1307 uid_t uid;
1308
1309 if (!unlikely(wo->wo_flags & WCONTINUED))
1310 return 0;
1311
1312 if (!(p->signal->flags & SIGNAL_STOP_CONTINUED))
1313 return 0;
1314
1315 spin_lock_irq(&p->sighand->siglock);
1316 /* Re-check with the lock held. */
1317 if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) {
1318 spin_unlock_irq(&p->sighand->siglock);
1319 return 0;
1320 }
1321 if (!unlikely(wo->wo_flags & WNOWAIT))
1322 p->signal->flags &= ~SIGNAL_STOP_CONTINUED;
1323 uid = from_kuid_munged(current_user_ns(), task_uid(p));
1324 spin_unlock_irq(&p->sighand->siglock);
1325
1326 pid = task_pid_vnr(p);
1327 get_task_struct(p);
1328 read_unlock(&tasklist_lock);
1329 sched_annotate_sleep();
1330 if (wo->wo_rusage)
1331 getrusage(p, RUSAGE_BOTH, wo->wo_rusage);
1332 put_task_struct(p);
1333
1334 infop = wo->wo_info;
1335 if (!infop) {
1336 wo->wo_stat = 0xffff;
1337 } else {
1338 infop->cause = CLD_CONTINUED;
1339 infop->pid = pid;
1340 infop->uid = uid;
1341 infop->status = SIGCONT;
1342 }
1343 return pid;
1344}
1345
1346/*
1347 * Consider @p for a wait by @parent.
1348 *
1349 * -ECHILD should be in ->notask_error before the first call.
1350 * Returns nonzero for a final return, when we have unlocked tasklist_lock.
1351 * Returns zero if the search for a child should continue;
1352 * then ->notask_error is 0 if @p is an eligible child,
1353 * or still -ECHILD.
1354 */
1355static int wait_consider_task(struct wait_opts *wo, int ptrace,
1356 struct task_struct *p)
1357{
1358 /*
1359 * We can race with wait_task_zombie() from another thread.
1360 * Ensure that EXIT_ZOMBIE -> EXIT_DEAD/EXIT_TRACE transition
1361 * can't confuse the checks below.
1362 */
1363 int exit_state = READ_ONCE(p->exit_state);
1364 int ret;
1365
1366 if (unlikely(exit_state == EXIT_DEAD))
1367 return 0;
1368
1369 ret = eligible_child(wo, ptrace, p);
1370 if (!ret)
1371 return ret;
1372
1373 if (unlikely(exit_state == EXIT_TRACE)) {
1374 /*
1375 * ptrace == 0 means we are the natural parent. In this case
1376 * we should clear notask_error, debugger will notify us.
1377 */
1378 if (likely(!ptrace))
1379 wo->notask_error = 0;
1380 return 0;
1381 }
1382
1383 if (likely(!ptrace) && unlikely(p->ptrace)) {
1384 /*
1385 * If it is traced by its real parent's group, just pretend
1386 * the caller is ptrace_do_wait() and reap this child if it
1387 * is zombie.
1388 *
1389 * This also hides group stop state from real parent; otherwise
1390 * a single stop can be reported twice as group and ptrace stop.
1391 * If a ptracer wants to distinguish these two events for its
1392 * own children it should create a separate process which takes
1393 * the role of real parent.
1394 */
1395 if (!ptrace_reparented(p))
1396 ptrace = 1;
1397 }
1398
1399 /* slay zombie? */
1400 if (exit_state == EXIT_ZOMBIE) {
1401 /* we don't reap group leaders with subthreads */
1402 if (!delay_group_leader(p)) {
1403 /*
1404 * A zombie ptracee is only visible to its ptracer.
1405 * Notification and reaping will be cascaded to the
1406 * real parent when the ptracer detaches.
1407 */
1408 if (unlikely(ptrace) || likely(!p->ptrace))
1409 return wait_task_zombie(wo, p);
1410 }
1411
1412 /*
1413 * Allow access to stopped/continued state via zombie by
1414 * falling through. Clearing of notask_error is complex.
1415 *
1416 * When !@ptrace:
1417 *
1418 * If WEXITED is set, notask_error should naturally be
1419 * cleared. If not, subset of WSTOPPED|WCONTINUED is set,
1420 * so, if there are live subthreads, there are events to
1421 * wait for. If all subthreads are dead, it's still safe
1422 * to clear - this function will be called again in finite
1423 * amount time once all the subthreads are released and
1424 * will then return without clearing.
1425 *
1426 * When @ptrace:
1427 *
1428 * Stopped state is per-task and thus can't change once the
1429 * target task dies. Only continued and exited can happen.
1430 * Clear notask_error if WCONTINUED | WEXITED.
1431 */
1432 if (likely(!ptrace) || (wo->wo_flags & (WCONTINUED | WEXITED)))
1433 wo->notask_error = 0;
1434 } else {
1435 /*
1436 * @p is alive and it's gonna stop, continue or exit, so
1437 * there always is something to wait for.
1438 */
1439 wo->notask_error = 0;
1440 }
1441
1442 /*
1443 * Wait for stopped. Depending on @ptrace, different stopped state
1444 * is used and the two don't interact with each other.
1445 */
1446 ret = wait_task_stopped(wo, ptrace, p);
1447 if (ret)
1448 return ret;
1449
1450 /*
1451 * Wait for continued. There's only one continued state and the
1452 * ptracer can consume it which can confuse the real parent. Don't
1453 * use WCONTINUED from ptracer. You don't need or want it.
1454 */
1455 return wait_task_continued(wo, p);
1456}
1457
1458/*
1459 * Do the work of do_wait() for one thread in the group, @tsk.
1460 *
1461 * -ECHILD should be in ->notask_error before the first call.
1462 * Returns nonzero for a final return, when we have unlocked tasklist_lock.
1463 * Returns zero if the search for a child should continue; then
1464 * ->notask_error is 0 if there were any eligible children,
1465 * or still -ECHILD.
1466 */
1467static int do_wait_thread(struct wait_opts *wo, struct task_struct *tsk)
1468{
1469 struct task_struct *p;
1470
1471 list_for_each_entry(p, &tsk->children, sibling) {
1472 int ret = wait_consider_task(wo, 0, p);
1473
1474 if (ret)
1475 return ret;
1476 }
1477
1478 return 0;
1479}
1480
1481static int ptrace_do_wait(struct wait_opts *wo, struct task_struct *tsk)
1482{
1483 struct task_struct *p;
1484
1485 list_for_each_entry(p, &tsk->ptraced, ptrace_entry) {
1486 int ret = wait_consider_task(wo, 1, p);
1487
1488 if (ret)
1489 return ret;
1490 }
1491
1492 return 0;
1493}
1494
1495bool pid_child_should_wake(struct wait_opts *wo, struct task_struct *p)
1496{
1497 if (!eligible_pid(wo, p))
1498 return false;
1499
1500 if ((wo->wo_flags & __WNOTHREAD) && wo->child_wait.private != p->parent)
1501 return false;
1502
1503 return true;
1504}
1505
1506static int child_wait_callback(wait_queue_entry_t *wait, unsigned mode,
1507 int sync, void *key)
1508{
1509 struct wait_opts *wo = container_of(wait, struct wait_opts,
1510 child_wait);
1511 struct task_struct *p = key;
1512
1513 if (pid_child_should_wake(wo, p))
1514 return default_wake_function(wait, mode, sync, key);
1515
1516 return 0;
1517}
1518
1519void __wake_up_parent(struct task_struct *p, struct task_struct *parent)
1520{
1521 __wake_up_sync_key(&parent->signal->wait_chldexit,
1522 TASK_INTERRUPTIBLE, p);
1523}
1524
1525static bool is_effectively_child(struct wait_opts *wo, bool ptrace,
1526 struct task_struct *target)
1527{
1528 struct task_struct *parent =
1529 !ptrace ? target->real_parent : target->parent;
1530
1531 return current == parent || (!(wo->wo_flags & __WNOTHREAD) &&
1532 same_thread_group(current, parent));
1533}
1534
1535/*
1536 * Optimization for waiting on PIDTYPE_PID. No need to iterate through child
1537 * and tracee lists to find the target task.
1538 */
1539static int do_wait_pid(struct wait_opts *wo)
1540{
1541 bool ptrace;
1542 struct task_struct *target;
1543 int retval;
1544
1545 ptrace = false;
1546 target = pid_task(wo->wo_pid, PIDTYPE_TGID);
1547 if (target && is_effectively_child(wo, ptrace, target)) {
1548 retval = wait_consider_task(wo, ptrace, target);
1549 if (retval)
1550 return retval;
1551 }
1552
1553 ptrace = true;
1554 target = pid_task(wo->wo_pid, PIDTYPE_PID);
1555 if (target && target->ptrace &&
1556 is_effectively_child(wo, ptrace, target)) {
1557 retval = wait_consider_task(wo, ptrace, target);
1558 if (retval)
1559 return retval;
1560 }
1561
1562 return 0;
1563}
1564
1565long __do_wait(struct wait_opts *wo)
1566{
1567 long retval;
1568
1569 /*
1570 * If there is nothing that can match our criteria, just get out.
1571 * We will clear ->notask_error to zero if we see any child that
1572 * might later match our criteria, even if we are not able to reap
1573 * it yet.
1574 */
1575 wo->notask_error = -ECHILD;
1576 if ((wo->wo_type < PIDTYPE_MAX) &&
1577 (!wo->wo_pid || !pid_has_task(wo->wo_pid, wo->wo_type)))
1578 goto notask;
1579
1580 read_lock(&tasklist_lock);
1581
1582 if (wo->wo_type == PIDTYPE_PID) {
1583 retval = do_wait_pid(wo);
1584 if (retval)
1585 return retval;
1586 } else {
1587 struct task_struct *tsk = current;
1588
1589 do {
1590 retval = do_wait_thread(wo, tsk);
1591 if (retval)
1592 return retval;
1593
1594 retval = ptrace_do_wait(wo, tsk);
1595 if (retval)
1596 return retval;
1597
1598 if (wo->wo_flags & __WNOTHREAD)
1599 break;
1600 } while_each_thread(current, tsk);
1601 }
1602 read_unlock(&tasklist_lock);
1603
1604notask:
1605 retval = wo->notask_error;
1606 if (!retval && !(wo->wo_flags & WNOHANG))
1607 return -ERESTARTSYS;
1608
1609 return retval;
1610}
1611
1612static long do_wait(struct wait_opts *wo)
1613{
1614 int retval;
1615
1616 trace_sched_process_wait(wo->wo_pid);
1617
1618 init_waitqueue_func_entry(&wo->child_wait, child_wait_callback);
1619 wo->child_wait.private = current;
1620 add_wait_queue(¤t->signal->wait_chldexit, &wo->child_wait);
1621
1622 do {
1623 set_current_state(TASK_INTERRUPTIBLE);
1624 retval = __do_wait(wo);
1625 if (retval != -ERESTARTSYS)
1626 break;
1627 if (signal_pending(current))
1628 break;
1629 schedule();
1630 } while (1);
1631
1632 __set_current_state(TASK_RUNNING);
1633 remove_wait_queue(¤t->signal->wait_chldexit, &wo->child_wait);
1634 return retval;
1635}
1636
1637int kernel_waitid_prepare(struct wait_opts *wo, int which, pid_t upid,
1638 struct waitid_info *infop, int options,
1639 struct rusage *ru)
1640{
1641 unsigned int f_flags = 0;
1642 struct pid *pid = NULL;
1643 enum pid_type type;
1644
1645 if (options & ~(WNOHANG|WNOWAIT|WEXITED|WSTOPPED|WCONTINUED|
1646 __WNOTHREAD|__WCLONE|__WALL))
1647 return -EINVAL;
1648 if (!(options & (WEXITED|WSTOPPED|WCONTINUED)))
1649 return -EINVAL;
1650
1651 switch (which) {
1652 case P_ALL:
1653 type = PIDTYPE_MAX;
1654 break;
1655 case P_PID:
1656 type = PIDTYPE_PID;
1657 if (upid <= 0)
1658 return -EINVAL;
1659
1660 pid = find_get_pid(upid);
1661 break;
1662 case P_PGID:
1663 type = PIDTYPE_PGID;
1664 if (upid < 0)
1665 return -EINVAL;
1666
1667 if (upid)
1668 pid = find_get_pid(upid);
1669 else
1670 pid = get_task_pid(current, PIDTYPE_PGID);
1671 break;
1672 case P_PIDFD:
1673 type = PIDTYPE_PID;
1674 if (upid < 0)
1675 return -EINVAL;
1676
1677 pid = pidfd_get_pid(upid, &f_flags);
1678 if (IS_ERR(pid))
1679 return PTR_ERR(pid);
1680
1681 break;
1682 default:
1683 return -EINVAL;
1684 }
1685
1686 wo->wo_type = type;
1687 wo->wo_pid = pid;
1688 wo->wo_flags = options;
1689 wo->wo_info = infop;
1690 wo->wo_rusage = ru;
1691 if (f_flags & O_NONBLOCK)
1692 wo->wo_flags |= WNOHANG;
1693
1694 return 0;
1695}
1696
1697static long kernel_waitid(int which, pid_t upid, struct waitid_info *infop,
1698 int options, struct rusage *ru)
1699{
1700 struct wait_opts wo;
1701 long ret;
1702
1703 ret = kernel_waitid_prepare(&wo, which, upid, infop, options, ru);
1704 if (ret)
1705 return ret;
1706
1707 ret = do_wait(&wo);
1708 if (!ret && !(options & WNOHANG) && (wo.wo_flags & WNOHANG))
1709 ret = -EAGAIN;
1710
1711 put_pid(wo.wo_pid);
1712 return ret;
1713}
1714
1715SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *,
1716 infop, int, options, struct rusage __user *, ru)
1717{
1718 struct rusage r;
1719 struct waitid_info info = {.status = 0};
1720 long err = kernel_waitid(which, upid, &info, options, ru ? &r : NULL);
1721 int signo = 0;
1722
1723 if (err > 0) {
1724 signo = SIGCHLD;
1725 err = 0;
1726 if (ru && copy_to_user(ru, &r, sizeof(struct rusage)))
1727 return -EFAULT;
1728 }
1729 if (!infop)
1730 return err;
1731
1732 if (!user_write_access_begin(infop, sizeof(*infop)))
1733 return -EFAULT;
1734
1735 unsafe_put_user(signo, &infop->si_signo, Efault);
1736 unsafe_put_user(0, &infop->si_errno, Efault);
1737 unsafe_put_user(info.cause, &infop->si_code, Efault);
1738 unsafe_put_user(info.pid, &infop->si_pid, Efault);
1739 unsafe_put_user(info.uid, &infop->si_uid, Efault);
1740 unsafe_put_user(info.status, &infop->si_status, Efault);
1741 user_write_access_end();
1742 return err;
1743Efault:
1744 user_write_access_end();
1745 return -EFAULT;
1746}
1747
1748long kernel_wait4(pid_t upid, int __user *stat_addr, int options,
1749 struct rusage *ru)
1750{
1751 struct wait_opts wo;
1752 struct pid *pid = NULL;
1753 enum pid_type type;
1754 long ret;
1755
1756 if (options & ~(WNOHANG|WUNTRACED|WCONTINUED|
1757 __WNOTHREAD|__WCLONE|__WALL))
1758 return -EINVAL;
1759
1760 /* -INT_MIN is not defined */
1761 if (upid == INT_MIN)
1762 return -ESRCH;
1763
1764 if (upid == -1)
1765 type = PIDTYPE_MAX;
1766 else if (upid < 0) {
1767 type = PIDTYPE_PGID;
1768 pid = find_get_pid(-upid);
1769 } else if (upid == 0) {
1770 type = PIDTYPE_PGID;
1771 pid = get_task_pid(current, PIDTYPE_PGID);
1772 } else /* upid > 0 */ {
1773 type = PIDTYPE_PID;
1774 pid = find_get_pid(upid);
1775 }
1776
1777 wo.wo_type = type;
1778 wo.wo_pid = pid;
1779 wo.wo_flags = options | WEXITED;
1780 wo.wo_info = NULL;
1781 wo.wo_stat = 0;
1782 wo.wo_rusage = ru;
1783 ret = do_wait(&wo);
1784 put_pid(pid);
1785 if (ret > 0 && stat_addr && put_user(wo.wo_stat, stat_addr))
1786 ret = -EFAULT;
1787
1788 return ret;
1789}
1790
1791int kernel_wait(pid_t pid, int *stat)
1792{
1793 struct wait_opts wo = {
1794 .wo_type = PIDTYPE_PID,
1795 .wo_pid = find_get_pid(pid),
1796 .wo_flags = WEXITED,
1797 };
1798 int ret;
1799
1800 ret = do_wait(&wo);
1801 if (ret > 0 && wo.wo_stat)
1802 *stat = wo.wo_stat;
1803 put_pid(wo.wo_pid);
1804 return ret;
1805}
1806
1807SYSCALL_DEFINE4(wait4, pid_t, upid, int __user *, stat_addr,
1808 int, options, struct rusage __user *, ru)
1809{
1810 struct rusage r;
1811 long err = kernel_wait4(upid, stat_addr, options, ru ? &r : NULL);
1812
1813 if (err > 0) {
1814 if (ru && copy_to_user(ru, &r, sizeof(struct rusage)))
1815 return -EFAULT;
1816 }
1817 return err;
1818}
1819
1820#ifdef __ARCH_WANT_SYS_WAITPID
1821
1822/*
1823 * sys_waitpid() remains for compatibility. waitpid() should be
1824 * implemented by calling sys_wait4() from libc.a.
1825 */
1826SYSCALL_DEFINE3(waitpid, pid_t, pid, int __user *, stat_addr, int, options)
1827{
1828 return kernel_wait4(pid, stat_addr, options, NULL);
1829}
1830
1831#endif
1832
1833#ifdef CONFIG_COMPAT
1834COMPAT_SYSCALL_DEFINE4(wait4,
1835 compat_pid_t, pid,
1836 compat_uint_t __user *, stat_addr,
1837 int, options,
1838 struct compat_rusage __user *, ru)
1839{
1840 struct rusage r;
1841 long err = kernel_wait4(pid, stat_addr, options, ru ? &r : NULL);
1842 if (err > 0) {
1843 if (ru && put_compat_rusage(&r, ru))
1844 return -EFAULT;
1845 }
1846 return err;
1847}
1848
1849COMPAT_SYSCALL_DEFINE5(waitid,
1850 int, which, compat_pid_t, pid,
1851 struct compat_siginfo __user *, infop, int, options,
1852 struct compat_rusage __user *, uru)
1853{
1854 struct rusage ru;
1855 struct waitid_info info = {.status = 0};
1856 long err = kernel_waitid(which, pid, &info, options, uru ? &ru : NULL);
1857 int signo = 0;
1858 if (err > 0) {
1859 signo = SIGCHLD;
1860 err = 0;
1861 if (uru) {
1862 /* kernel_waitid() overwrites everything in ru */
1863 if (COMPAT_USE_64BIT_TIME)
1864 err = copy_to_user(uru, &ru, sizeof(ru));
1865 else
1866 err = put_compat_rusage(&ru, uru);
1867 if (err)
1868 return -EFAULT;
1869 }
1870 }
1871
1872 if (!infop)
1873 return err;
1874
1875 if (!user_write_access_begin(infop, sizeof(*infop)))
1876 return -EFAULT;
1877
1878 unsafe_put_user(signo, &infop->si_signo, Efault);
1879 unsafe_put_user(0, &infop->si_errno, Efault);
1880 unsafe_put_user(info.cause, &infop->si_code, Efault);
1881 unsafe_put_user(info.pid, &infop->si_pid, Efault);
1882 unsafe_put_user(info.uid, &infop->si_uid, Efault);
1883 unsafe_put_user(info.status, &infop->si_status, Efault);
1884 user_write_access_end();
1885 return err;
1886Efault:
1887 user_write_access_end();
1888 return -EFAULT;
1889}
1890#endif
1891
1892/**
1893 * thread_group_exited - check that a thread group has exited
1894 * @pid: tgid of thread group to be checked.
1895 *
1896 * Test if the thread group represented by tgid has exited (all
1897 * threads are zombies, dead or completely gone).
1898 *
1899 * Return: true if the thread group has exited. false otherwise.
1900 */
1901bool thread_group_exited(struct pid *pid)
1902{
1903 struct task_struct *task;
1904 bool exited;
1905
1906 rcu_read_lock();
1907 task = pid_task(pid, PIDTYPE_PID);
1908 exited = !task ||
1909 (READ_ONCE(task->exit_state) && thread_group_empty(task));
1910 rcu_read_unlock();
1911
1912 return exited;
1913}
1914EXPORT_SYMBOL(thread_group_exited);
1915
1916/*
1917 * This needs to be __function_aligned as GCC implicitly makes any
1918 * implementation of abort() cold and drops alignment specified by
1919 * -falign-functions=N.
1920 *
1921 * See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=88345#c11
1922 */
1923__weak __function_aligned void abort(void)
1924{
1925 BUG();
1926
1927 /* if that doesn't kill us, halt */
1928 panic("Oops failed to kill thread");
1929}
1930EXPORT_SYMBOL(abort);