Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/mm/oom_kill.c
4 *
5 * Copyright (C) 1998,2000 Rik van Riel
6 * Thanks go out to Claus Fischer for some serious inspiration and
7 * for goading me into coding this file...
8 * Copyright (C) 2010 Google, Inc.
9 * Rewritten by David Rientjes
10 *
11 * The routines in this file are used to kill a process when
12 * we're seriously out of memory. This gets called from __alloc_pages()
13 * in mm/page_alloc.c when we really run out of memory.
14 *
15 * Since we won't call these routines often (on a well-configured
16 * machine) this file will double as a 'coding guide' and a signpost
17 * for newbie kernel hackers. It features several pointers to major
18 * kernel subsystems and hints as to where to find out what things do.
19 */
20
21#include <linux/oom.h>
22#include <linux/mm.h>
23#include <linux/err.h>
24#include <linux/gfp.h>
25#include <linux/sched.h>
26#include <linux/sched/mm.h>
27#include <linux/sched/coredump.h>
28#include <linux/sched/task.h>
29#include <linux/sched/debug.h>
30#include <linux/swap.h>
31#include <linux/timex.h>
32#include <linux/jiffies.h>
33#include <linux/cpuset.h>
34#include <linux/export.h>
35#include <linux/notifier.h>
36#include <linux/memcontrol.h>
37#include <linux/mempolicy.h>
38#include <linux/security.h>
39#include <linux/ptrace.h>
40#include <linux/freezer.h>
41#include <linux/ftrace.h>
42#include <linux/ratelimit.h>
43#include <linux/kthread.h>
44#include <linux/init.h>
45#include <linux/mmu_notifier.h>
46
47#include <asm/tlb.h>
48#include "internal.h"
49#include "slab.h"
50
51#define CREATE_TRACE_POINTS
52#include <trace/events/oom.h>
53
54int sysctl_panic_on_oom;
55int sysctl_oom_kill_allocating_task;
56int sysctl_oom_dump_tasks = 1;
57
58/*
59 * Serializes oom killer invocations (out_of_memory()) from all contexts to
60 * prevent from over eager oom killing (e.g. when the oom killer is invoked
61 * from different domains).
62 *
63 * oom_killer_disable() relies on this lock to stabilize oom_killer_disabled
64 * and mark_oom_victim
65 */
66DEFINE_MUTEX(oom_lock);
67/* Serializes oom_score_adj and oom_score_adj_min updates */
68DEFINE_MUTEX(oom_adj_mutex);
69
70static inline bool is_memcg_oom(struct oom_control *oc)
71{
72 return oc->memcg != NULL;
73}
74
75#ifdef CONFIG_NUMA
76/**
77 * oom_cpuset_eligible() - check task eligibility for kill
78 * @start: task struct of which task to consider
79 * @oc: pointer to struct oom_control
80 *
81 * Task eligibility is determined by whether or not a candidate task, @tsk,
82 * shares the same mempolicy nodes as current if it is bound by such a policy
83 * and whether or not it has the same set of allowed cpuset nodes.
84 *
85 * This function is assuming oom-killer context and 'current' has triggered
86 * the oom-killer.
87 */
88static bool oom_cpuset_eligible(struct task_struct *start,
89 struct oom_control *oc)
90{
91 struct task_struct *tsk;
92 bool ret = false;
93 const nodemask_t *mask = oc->nodemask;
94
95 if (is_memcg_oom(oc))
96 return true;
97
98 rcu_read_lock();
99 for_each_thread(start, tsk) {
100 if (mask) {
101 /*
102 * If this is a mempolicy constrained oom, tsk's
103 * cpuset is irrelevant. Only return true if its
104 * mempolicy intersects current, otherwise it may be
105 * needlessly killed.
106 */
107 ret = mempolicy_in_oom_domain(tsk, mask);
108 } else {
109 /*
110 * This is not a mempolicy constrained oom, so only
111 * check the mems of tsk's cpuset.
112 */
113 ret = cpuset_mems_allowed_intersects(current, tsk);
114 }
115 if (ret)
116 break;
117 }
118 rcu_read_unlock();
119
120 return ret;
121}
122#else
123static bool oom_cpuset_eligible(struct task_struct *tsk, struct oom_control *oc)
124{
125 return true;
126}
127#endif /* CONFIG_NUMA */
128
129/*
130 * The process p may have detached its own ->mm while exiting or through
131 * kthread_use_mm(), but one or more of its subthreads may still have a valid
132 * pointer. Return p, or any of its subthreads with a valid ->mm, with
133 * task_lock() held.
134 */
135struct task_struct *find_lock_task_mm(struct task_struct *p)
136{
137 struct task_struct *t;
138
139 rcu_read_lock();
140
141 for_each_thread(p, t) {
142 task_lock(t);
143 if (likely(t->mm))
144 goto found;
145 task_unlock(t);
146 }
147 t = NULL;
148found:
149 rcu_read_unlock();
150
151 return t;
152}
153
154/*
155 * order == -1 means the oom kill is required by sysrq, otherwise only
156 * for display purposes.
157 */
158static inline bool is_sysrq_oom(struct oom_control *oc)
159{
160 return oc->order == -1;
161}
162
163/* return true if the task is not adequate as candidate victim task. */
164static bool oom_unkillable_task(struct task_struct *p)
165{
166 if (is_global_init(p))
167 return true;
168 if (p->flags & PF_KTHREAD)
169 return true;
170 return false;
171}
172
173/*
174 * Check whether unreclaimable slab amount is greater than
175 * all user memory(LRU pages).
176 * dump_unreclaimable_slab() could help in the case that
177 * oom due to too much unreclaimable slab used by kernel.
178*/
179static bool should_dump_unreclaim_slab(void)
180{
181 unsigned long nr_lru;
182
183 nr_lru = global_node_page_state(NR_ACTIVE_ANON) +
184 global_node_page_state(NR_INACTIVE_ANON) +
185 global_node_page_state(NR_ACTIVE_FILE) +
186 global_node_page_state(NR_INACTIVE_FILE) +
187 global_node_page_state(NR_ISOLATED_ANON) +
188 global_node_page_state(NR_ISOLATED_FILE) +
189 global_node_page_state(NR_UNEVICTABLE);
190
191 return (global_node_page_state_pages(NR_SLAB_UNRECLAIMABLE_B) > nr_lru);
192}
193
194/**
195 * oom_badness - heuristic function to determine which candidate task to kill
196 * @p: task struct of which task we should calculate
197 * @totalpages: total present RAM allowed for page allocation
198 *
199 * The heuristic for determining which task to kill is made to be as simple and
200 * predictable as possible. The goal is to return the highest value for the
201 * task consuming the most memory to avoid subsequent oom failures.
202 */
203long oom_badness(struct task_struct *p, unsigned long totalpages)
204{
205 long points;
206 long adj;
207
208 if (oom_unkillable_task(p))
209 return LONG_MIN;
210
211 p = find_lock_task_mm(p);
212 if (!p)
213 return LONG_MIN;
214
215 /*
216 * Do not even consider tasks which are explicitly marked oom
217 * unkillable or have been already oom reaped or the are in
218 * the middle of vfork
219 */
220 adj = (long)p->signal->oom_score_adj;
221 if (adj == OOM_SCORE_ADJ_MIN ||
222 test_bit(MMF_OOM_SKIP, &p->mm->flags) ||
223 in_vfork(p)) {
224 task_unlock(p);
225 return LONG_MIN;
226 }
227
228 /*
229 * The baseline for the badness score is the proportion of RAM that each
230 * task's rss, pagetable and swap space use.
231 */
232 points = get_mm_rss(p->mm) + get_mm_counter(p->mm, MM_SWAPENTS) +
233 mm_pgtables_bytes(p->mm) / PAGE_SIZE;
234 task_unlock(p);
235
236 /* Normalize to oom_score_adj units */
237 adj *= totalpages / 1000;
238 points += adj;
239
240 return points;
241}
242
243static const char * const oom_constraint_text[] = {
244 [CONSTRAINT_NONE] = "CONSTRAINT_NONE",
245 [CONSTRAINT_CPUSET] = "CONSTRAINT_CPUSET",
246 [CONSTRAINT_MEMORY_POLICY] = "CONSTRAINT_MEMORY_POLICY",
247 [CONSTRAINT_MEMCG] = "CONSTRAINT_MEMCG",
248};
249
250/*
251 * Determine the type of allocation constraint.
252 */
253static enum oom_constraint constrained_alloc(struct oom_control *oc)
254{
255 struct zone *zone;
256 struct zoneref *z;
257 enum zone_type highest_zoneidx = gfp_zone(oc->gfp_mask);
258 bool cpuset_limited = false;
259 int nid;
260
261 if (is_memcg_oom(oc)) {
262 oc->totalpages = mem_cgroup_get_max(oc->memcg) ?: 1;
263 return CONSTRAINT_MEMCG;
264 }
265
266 /* Default to all available memory */
267 oc->totalpages = totalram_pages() + total_swap_pages;
268
269 if (!IS_ENABLED(CONFIG_NUMA))
270 return CONSTRAINT_NONE;
271
272 if (!oc->zonelist)
273 return CONSTRAINT_NONE;
274 /*
275 * Reach here only when __GFP_NOFAIL is used. So, we should avoid
276 * to kill current.We have to random task kill in this case.
277 * Hopefully, CONSTRAINT_THISNODE...but no way to handle it, now.
278 */
279 if (oc->gfp_mask & __GFP_THISNODE)
280 return CONSTRAINT_NONE;
281
282 /*
283 * This is not a __GFP_THISNODE allocation, so a truncated nodemask in
284 * the page allocator means a mempolicy is in effect. Cpuset policy
285 * is enforced in get_page_from_freelist().
286 */
287 if (oc->nodemask &&
288 !nodes_subset(node_states[N_MEMORY], *oc->nodemask)) {
289 oc->totalpages = total_swap_pages;
290 for_each_node_mask(nid, *oc->nodemask)
291 oc->totalpages += node_present_pages(nid);
292 return CONSTRAINT_MEMORY_POLICY;
293 }
294
295 /* Check this allocation failure is caused by cpuset's wall function */
296 for_each_zone_zonelist_nodemask(zone, z, oc->zonelist,
297 highest_zoneidx, oc->nodemask)
298 if (!cpuset_zone_allowed(zone, oc->gfp_mask))
299 cpuset_limited = true;
300
301 if (cpuset_limited) {
302 oc->totalpages = total_swap_pages;
303 for_each_node_mask(nid, cpuset_current_mems_allowed)
304 oc->totalpages += node_present_pages(nid);
305 return CONSTRAINT_CPUSET;
306 }
307 return CONSTRAINT_NONE;
308}
309
310static int oom_evaluate_task(struct task_struct *task, void *arg)
311{
312 struct oom_control *oc = arg;
313 long points;
314
315 if (oom_unkillable_task(task))
316 goto next;
317
318 /* p may not have freeable memory in nodemask */
319 if (!is_memcg_oom(oc) && !oom_cpuset_eligible(task, oc))
320 goto next;
321
322 /*
323 * This task already has access to memory reserves and is being killed.
324 * Don't allow any other task to have access to the reserves unless
325 * the task has MMF_OOM_SKIP because chances that it would release
326 * any memory is quite low.
327 */
328 if (!is_sysrq_oom(oc) && tsk_is_oom_victim(task)) {
329 if (test_bit(MMF_OOM_SKIP, &task->signal->oom_mm->flags))
330 goto next;
331 goto abort;
332 }
333
334 /*
335 * If task is allocating a lot of memory and has been marked to be
336 * killed first if it triggers an oom, then select it.
337 */
338 if (oom_task_origin(task)) {
339 points = LONG_MAX;
340 goto select;
341 }
342
343 points = oom_badness(task, oc->totalpages);
344 if (points == LONG_MIN || points < oc->chosen_points)
345 goto next;
346
347select:
348 if (oc->chosen)
349 put_task_struct(oc->chosen);
350 get_task_struct(task);
351 oc->chosen = task;
352 oc->chosen_points = points;
353next:
354 return 0;
355abort:
356 if (oc->chosen)
357 put_task_struct(oc->chosen);
358 oc->chosen = (void *)-1UL;
359 return 1;
360}
361
362/*
363 * Simple selection loop. We choose the process with the highest number of
364 * 'points'. In case scan was aborted, oc->chosen is set to -1.
365 */
366static void select_bad_process(struct oom_control *oc)
367{
368 oc->chosen_points = LONG_MIN;
369
370 if (is_memcg_oom(oc))
371 mem_cgroup_scan_tasks(oc->memcg, oom_evaluate_task, oc);
372 else {
373 struct task_struct *p;
374
375 rcu_read_lock();
376 for_each_process(p)
377 if (oom_evaluate_task(p, oc))
378 break;
379 rcu_read_unlock();
380 }
381}
382
383static int dump_task(struct task_struct *p, void *arg)
384{
385 struct oom_control *oc = arg;
386 struct task_struct *task;
387
388 if (oom_unkillable_task(p))
389 return 0;
390
391 /* p may not have freeable memory in nodemask */
392 if (!is_memcg_oom(oc) && !oom_cpuset_eligible(p, oc))
393 return 0;
394
395 task = find_lock_task_mm(p);
396 if (!task) {
397 /*
398 * All of p's threads have already detached their mm's. There's
399 * no need to report them; they can't be oom killed anyway.
400 */
401 return 0;
402 }
403
404 pr_info("[%7d] %5d %5d %8lu %8lu %8ld %8lu %5hd %s\n",
405 task->pid, from_kuid(&init_user_ns, task_uid(task)),
406 task->tgid, task->mm->total_vm, get_mm_rss(task->mm),
407 mm_pgtables_bytes(task->mm),
408 get_mm_counter(task->mm, MM_SWAPENTS),
409 task->signal->oom_score_adj, task->comm);
410 task_unlock(task);
411
412 return 0;
413}
414
415/**
416 * dump_tasks - dump current memory state of all system tasks
417 * @oc: pointer to struct oom_control
418 *
419 * Dumps the current memory state of all eligible tasks. Tasks not in the same
420 * memcg, not in the same cpuset, or bound to a disjoint set of mempolicy nodes
421 * are not shown.
422 * State information includes task's pid, uid, tgid, vm size, rss,
423 * pgtables_bytes, swapents, oom_score_adj value, and name.
424 */
425static void dump_tasks(struct oom_control *oc)
426{
427 pr_info("Tasks state (memory values in pages):\n");
428 pr_info("[ pid ] uid tgid total_vm rss pgtables_bytes swapents oom_score_adj name\n");
429
430 if (is_memcg_oom(oc))
431 mem_cgroup_scan_tasks(oc->memcg, dump_task, oc);
432 else {
433 struct task_struct *p;
434
435 rcu_read_lock();
436 for_each_process(p)
437 dump_task(p, oc);
438 rcu_read_unlock();
439 }
440}
441
442static void dump_oom_summary(struct oom_control *oc, struct task_struct *victim)
443{
444 /* one line summary of the oom killer context. */
445 pr_info("oom-kill:constraint=%s,nodemask=%*pbl",
446 oom_constraint_text[oc->constraint],
447 nodemask_pr_args(oc->nodemask));
448 cpuset_print_current_mems_allowed();
449 mem_cgroup_print_oom_context(oc->memcg, victim);
450 pr_cont(",task=%s,pid=%d,uid=%d\n", victim->comm, victim->pid,
451 from_kuid(&init_user_ns, task_uid(victim)));
452}
453
454static void dump_header(struct oom_control *oc, struct task_struct *p)
455{
456 pr_warn("%s invoked oom-killer: gfp_mask=%#x(%pGg), order=%d, oom_score_adj=%hd\n",
457 current->comm, oc->gfp_mask, &oc->gfp_mask, oc->order,
458 current->signal->oom_score_adj);
459 if (!IS_ENABLED(CONFIG_COMPACTION) && oc->order)
460 pr_warn("COMPACTION is disabled!!!\n");
461
462 dump_stack();
463 if (is_memcg_oom(oc))
464 mem_cgroup_print_oom_meminfo(oc->memcg);
465 else {
466 show_mem(SHOW_MEM_FILTER_NODES, oc->nodemask);
467 if (should_dump_unreclaim_slab())
468 dump_unreclaimable_slab();
469 }
470 if (sysctl_oom_dump_tasks)
471 dump_tasks(oc);
472 if (p)
473 dump_oom_summary(oc, p);
474}
475
476/*
477 * Number of OOM victims in flight
478 */
479static atomic_t oom_victims = ATOMIC_INIT(0);
480static DECLARE_WAIT_QUEUE_HEAD(oom_victims_wait);
481
482static bool oom_killer_disabled __read_mostly;
483
484#define K(x) ((x) << (PAGE_SHIFT-10))
485
486/*
487 * task->mm can be NULL if the task is the exited group leader. So to
488 * determine whether the task is using a particular mm, we examine all the
489 * task's threads: if one of those is using this mm then this task was also
490 * using it.
491 */
492bool process_shares_mm(struct task_struct *p, struct mm_struct *mm)
493{
494 struct task_struct *t;
495
496 for_each_thread(p, t) {
497 struct mm_struct *t_mm = READ_ONCE(t->mm);
498 if (t_mm)
499 return t_mm == mm;
500 }
501 return false;
502}
503
504#ifdef CONFIG_MMU
505/*
506 * OOM Reaper kernel thread which tries to reap the memory used by the OOM
507 * victim (if that is possible) to help the OOM killer to move on.
508 */
509static struct task_struct *oom_reaper_th;
510static DECLARE_WAIT_QUEUE_HEAD(oom_reaper_wait);
511static struct task_struct *oom_reaper_list;
512static DEFINE_SPINLOCK(oom_reaper_lock);
513
514bool __oom_reap_task_mm(struct mm_struct *mm)
515{
516 struct vm_area_struct *vma;
517 bool ret = true;
518
519 /*
520 * Tell all users of get_user/copy_from_user etc... that the content
521 * is no longer stable. No barriers really needed because unmapping
522 * should imply barriers already and the reader would hit a page fault
523 * if it stumbled over a reaped memory.
524 */
525 set_bit(MMF_UNSTABLE, &mm->flags);
526
527 for (vma = mm->mmap ; vma; vma = vma->vm_next) {
528 if (!can_madv_lru_vma(vma))
529 continue;
530
531 /*
532 * Only anonymous pages have a good chance to be dropped
533 * without additional steps which we cannot afford as we
534 * are OOM already.
535 *
536 * We do not even care about fs backed pages because all
537 * which are reclaimable have already been reclaimed and
538 * we do not want to block exit_mmap by keeping mm ref
539 * count elevated without a good reason.
540 */
541 if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED)) {
542 struct mmu_notifier_range range;
543 struct mmu_gather tlb;
544
545 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0,
546 vma, mm, vma->vm_start,
547 vma->vm_end);
548 tlb_gather_mmu(&tlb, mm);
549 if (mmu_notifier_invalidate_range_start_nonblock(&range)) {
550 tlb_finish_mmu(&tlb);
551 ret = false;
552 continue;
553 }
554 unmap_page_range(&tlb, vma, range.start, range.end, NULL);
555 mmu_notifier_invalidate_range_end(&range);
556 tlb_finish_mmu(&tlb);
557 }
558 }
559
560 return ret;
561}
562
563/*
564 * Reaps the address space of the give task.
565 *
566 * Returns true on success and false if none or part of the address space
567 * has been reclaimed and the caller should retry later.
568 */
569static bool oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
570{
571 bool ret = true;
572
573 if (!mmap_read_trylock(mm)) {
574 trace_skip_task_reaping(tsk->pid);
575 return false;
576 }
577
578 /*
579 * MMF_OOM_SKIP is set by exit_mmap when the OOM reaper can't
580 * work on the mm anymore. The check for MMF_OOM_SKIP must run
581 * under mmap_lock for reading because it serializes against the
582 * mmap_write_lock();mmap_write_unlock() cycle in exit_mmap().
583 */
584 if (test_bit(MMF_OOM_SKIP, &mm->flags)) {
585 trace_skip_task_reaping(tsk->pid);
586 goto out_unlock;
587 }
588
589 trace_start_task_reaping(tsk->pid);
590
591 /* failed to reap part of the address space. Try again later */
592 ret = __oom_reap_task_mm(mm);
593 if (!ret)
594 goto out_finish;
595
596 pr_info("oom_reaper: reaped process %d (%s), now anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
597 task_pid_nr(tsk), tsk->comm,
598 K(get_mm_counter(mm, MM_ANONPAGES)),
599 K(get_mm_counter(mm, MM_FILEPAGES)),
600 K(get_mm_counter(mm, MM_SHMEMPAGES)));
601out_finish:
602 trace_finish_task_reaping(tsk->pid);
603out_unlock:
604 mmap_read_unlock(mm);
605
606 return ret;
607}
608
609#define MAX_OOM_REAP_RETRIES 10
610static void oom_reap_task(struct task_struct *tsk)
611{
612 int attempts = 0;
613 struct mm_struct *mm = tsk->signal->oom_mm;
614
615 /* Retry the mmap_read_trylock(mm) a few times */
616 while (attempts++ < MAX_OOM_REAP_RETRIES && !oom_reap_task_mm(tsk, mm))
617 schedule_timeout_idle(HZ/10);
618
619 if (attempts <= MAX_OOM_REAP_RETRIES ||
620 test_bit(MMF_OOM_SKIP, &mm->flags))
621 goto done;
622
623 pr_info("oom_reaper: unable to reap pid:%d (%s)\n",
624 task_pid_nr(tsk), tsk->comm);
625 sched_show_task(tsk);
626 debug_show_all_locks();
627
628done:
629 tsk->oom_reaper_list = NULL;
630
631 /*
632 * Hide this mm from OOM killer because it has been either reaped or
633 * somebody can't call mmap_write_unlock(mm).
634 */
635 set_bit(MMF_OOM_SKIP, &mm->flags);
636
637 /* Drop a reference taken by wake_oom_reaper */
638 put_task_struct(tsk);
639}
640
641static int oom_reaper(void *unused)
642{
643 while (true) {
644 struct task_struct *tsk = NULL;
645
646 wait_event_freezable(oom_reaper_wait, oom_reaper_list != NULL);
647 spin_lock(&oom_reaper_lock);
648 if (oom_reaper_list != NULL) {
649 tsk = oom_reaper_list;
650 oom_reaper_list = tsk->oom_reaper_list;
651 }
652 spin_unlock(&oom_reaper_lock);
653
654 if (tsk)
655 oom_reap_task(tsk);
656 }
657
658 return 0;
659}
660
661static void wake_oom_reaper(struct task_struct *tsk)
662{
663 /* mm is already queued? */
664 if (test_and_set_bit(MMF_OOM_REAP_QUEUED, &tsk->signal->oom_mm->flags))
665 return;
666
667 get_task_struct(tsk);
668
669 spin_lock(&oom_reaper_lock);
670 tsk->oom_reaper_list = oom_reaper_list;
671 oom_reaper_list = tsk;
672 spin_unlock(&oom_reaper_lock);
673 trace_wake_reaper(tsk->pid);
674 wake_up(&oom_reaper_wait);
675}
676
677static int __init oom_init(void)
678{
679 oom_reaper_th = kthread_run(oom_reaper, NULL, "oom_reaper");
680 return 0;
681}
682subsys_initcall(oom_init)
683#else
684static inline void wake_oom_reaper(struct task_struct *tsk)
685{
686}
687#endif /* CONFIG_MMU */
688
689/**
690 * mark_oom_victim - mark the given task as OOM victim
691 * @tsk: task to mark
692 *
693 * Has to be called with oom_lock held and never after
694 * oom has been disabled already.
695 *
696 * tsk->mm has to be non NULL and caller has to guarantee it is stable (either
697 * under task_lock or operate on the current).
698 */
699static void mark_oom_victim(struct task_struct *tsk)
700{
701 struct mm_struct *mm = tsk->mm;
702
703 WARN_ON(oom_killer_disabled);
704 /* OOM killer might race with memcg OOM */
705 if (test_and_set_tsk_thread_flag(tsk, TIF_MEMDIE))
706 return;
707
708 /* oom_mm is bound to the signal struct life time. */
709 if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm)) {
710 mmgrab(tsk->signal->oom_mm);
711 set_bit(MMF_OOM_VICTIM, &mm->flags);
712 }
713
714 /*
715 * Make sure that the task is woken up from uninterruptible sleep
716 * if it is frozen because OOM killer wouldn't be able to free
717 * any memory and livelock. freezing_slow_path will tell the freezer
718 * that TIF_MEMDIE tasks should be ignored.
719 */
720 __thaw_task(tsk);
721 atomic_inc(&oom_victims);
722 trace_mark_victim(tsk->pid);
723}
724
725/**
726 * exit_oom_victim - note the exit of an OOM victim
727 */
728void exit_oom_victim(void)
729{
730 clear_thread_flag(TIF_MEMDIE);
731
732 if (!atomic_dec_return(&oom_victims))
733 wake_up_all(&oom_victims_wait);
734}
735
736/**
737 * oom_killer_enable - enable OOM killer
738 */
739void oom_killer_enable(void)
740{
741 oom_killer_disabled = false;
742 pr_info("OOM killer enabled.\n");
743}
744
745/**
746 * oom_killer_disable - disable OOM killer
747 * @timeout: maximum timeout to wait for oom victims in jiffies
748 *
749 * Forces all page allocations to fail rather than trigger OOM killer.
750 * Will block and wait until all OOM victims are killed or the given
751 * timeout expires.
752 *
753 * The function cannot be called when there are runnable user tasks because
754 * the userspace would see unexpected allocation failures as a result. Any
755 * new usage of this function should be consulted with MM people.
756 *
757 * Returns true if successful and false if the OOM killer cannot be
758 * disabled.
759 */
760bool oom_killer_disable(signed long timeout)
761{
762 signed long ret;
763
764 /*
765 * Make sure to not race with an ongoing OOM killer. Check that the
766 * current is not killed (possibly due to sharing the victim's memory).
767 */
768 if (mutex_lock_killable(&oom_lock))
769 return false;
770 oom_killer_disabled = true;
771 mutex_unlock(&oom_lock);
772
773 ret = wait_event_interruptible_timeout(oom_victims_wait,
774 !atomic_read(&oom_victims), timeout);
775 if (ret <= 0) {
776 oom_killer_enable();
777 return false;
778 }
779 pr_info("OOM killer disabled.\n");
780
781 return true;
782}
783
784static inline bool __task_will_free_mem(struct task_struct *task)
785{
786 struct signal_struct *sig = task->signal;
787
788 /*
789 * A coredumping process may sleep for an extended period in exit_mm(),
790 * so the oom killer cannot assume that the process will promptly exit
791 * and release memory.
792 */
793 if (sig->flags & SIGNAL_GROUP_COREDUMP)
794 return false;
795
796 if (sig->flags & SIGNAL_GROUP_EXIT)
797 return true;
798
799 if (thread_group_empty(task) && (task->flags & PF_EXITING))
800 return true;
801
802 return false;
803}
804
805/*
806 * Checks whether the given task is dying or exiting and likely to
807 * release its address space. This means that all threads and processes
808 * sharing the same mm have to be killed or exiting.
809 * Caller has to make sure that task->mm is stable (hold task_lock or
810 * it operates on the current).
811 */
812static bool task_will_free_mem(struct task_struct *task)
813{
814 struct mm_struct *mm = task->mm;
815 struct task_struct *p;
816 bool ret = true;
817
818 /*
819 * Skip tasks without mm because it might have passed its exit_mm and
820 * exit_oom_victim. oom_reaper could have rescued that but do not rely
821 * on that for now. We can consider find_lock_task_mm in future.
822 */
823 if (!mm)
824 return false;
825
826 if (!__task_will_free_mem(task))
827 return false;
828
829 /*
830 * This task has already been drained by the oom reaper so there are
831 * only small chances it will free some more
832 */
833 if (test_bit(MMF_OOM_SKIP, &mm->flags))
834 return false;
835
836 if (atomic_read(&mm->mm_users) <= 1)
837 return true;
838
839 /*
840 * Make sure that all tasks which share the mm with the given tasks
841 * are dying as well to make sure that a) nobody pins its mm and
842 * b) the task is also reapable by the oom reaper.
843 */
844 rcu_read_lock();
845 for_each_process(p) {
846 if (!process_shares_mm(p, mm))
847 continue;
848 if (same_thread_group(task, p))
849 continue;
850 ret = __task_will_free_mem(p);
851 if (!ret)
852 break;
853 }
854 rcu_read_unlock();
855
856 return ret;
857}
858
859static void __oom_kill_process(struct task_struct *victim, const char *message)
860{
861 struct task_struct *p;
862 struct mm_struct *mm;
863 bool can_oom_reap = true;
864
865 p = find_lock_task_mm(victim);
866 if (!p) {
867 pr_info("%s: OOM victim %d (%s) is already exiting. Skip killing the task\n",
868 message, task_pid_nr(victim), victim->comm);
869 put_task_struct(victim);
870 return;
871 } else if (victim != p) {
872 get_task_struct(p);
873 put_task_struct(victim);
874 victim = p;
875 }
876
877 /* Get a reference to safely compare mm after task_unlock(victim) */
878 mm = victim->mm;
879 mmgrab(mm);
880
881 /* Raise event before sending signal: task reaper must see this */
882 count_vm_event(OOM_KILL);
883 memcg_memory_event_mm(mm, MEMCG_OOM_KILL);
884
885 /*
886 * We should send SIGKILL before granting access to memory reserves
887 * in order to prevent the OOM victim from depleting the memory
888 * reserves from the user space under its control.
889 */
890 do_send_sig_info(SIGKILL, SEND_SIG_PRIV, victim, PIDTYPE_TGID);
891 mark_oom_victim(victim);
892 pr_err("%s: Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB, UID:%u pgtables:%lukB oom_score_adj:%hd\n",
893 message, task_pid_nr(victim), victim->comm, K(mm->total_vm),
894 K(get_mm_counter(mm, MM_ANONPAGES)),
895 K(get_mm_counter(mm, MM_FILEPAGES)),
896 K(get_mm_counter(mm, MM_SHMEMPAGES)),
897 from_kuid(&init_user_ns, task_uid(victim)),
898 mm_pgtables_bytes(mm) >> 10, victim->signal->oom_score_adj);
899 task_unlock(victim);
900
901 /*
902 * Kill all user processes sharing victim->mm in other thread groups, if
903 * any. They don't get access to memory reserves, though, to avoid
904 * depletion of all memory. This prevents mm->mmap_lock livelock when an
905 * oom killed thread cannot exit because it requires the semaphore and
906 * its contended by another thread trying to allocate memory itself.
907 * That thread will now get access to memory reserves since it has a
908 * pending fatal signal.
909 */
910 rcu_read_lock();
911 for_each_process(p) {
912 if (!process_shares_mm(p, mm))
913 continue;
914 if (same_thread_group(p, victim))
915 continue;
916 if (is_global_init(p)) {
917 can_oom_reap = false;
918 set_bit(MMF_OOM_SKIP, &mm->flags);
919 pr_info("oom killer %d (%s) has mm pinned by %d (%s)\n",
920 task_pid_nr(victim), victim->comm,
921 task_pid_nr(p), p->comm);
922 continue;
923 }
924 /*
925 * No kthread_use_mm() user needs to read from the userspace so
926 * we are ok to reap it.
927 */
928 if (unlikely(p->flags & PF_KTHREAD))
929 continue;
930 do_send_sig_info(SIGKILL, SEND_SIG_PRIV, p, PIDTYPE_TGID);
931 }
932 rcu_read_unlock();
933
934 if (can_oom_reap)
935 wake_oom_reaper(victim);
936
937 mmdrop(mm);
938 put_task_struct(victim);
939}
940#undef K
941
942/*
943 * Kill provided task unless it's secured by setting
944 * oom_score_adj to OOM_SCORE_ADJ_MIN.
945 */
946static int oom_kill_memcg_member(struct task_struct *task, void *message)
947{
948 if (task->signal->oom_score_adj != OOM_SCORE_ADJ_MIN &&
949 !is_global_init(task)) {
950 get_task_struct(task);
951 __oom_kill_process(task, message);
952 }
953 return 0;
954}
955
956static void oom_kill_process(struct oom_control *oc, const char *message)
957{
958 struct task_struct *victim = oc->chosen;
959 struct mem_cgroup *oom_group;
960 static DEFINE_RATELIMIT_STATE(oom_rs, DEFAULT_RATELIMIT_INTERVAL,
961 DEFAULT_RATELIMIT_BURST);
962
963 /*
964 * If the task is already exiting, don't alarm the sysadmin or kill
965 * its children or threads, just give it access to memory reserves
966 * so it can die quickly
967 */
968 task_lock(victim);
969 if (task_will_free_mem(victim)) {
970 mark_oom_victim(victim);
971 wake_oom_reaper(victim);
972 task_unlock(victim);
973 put_task_struct(victim);
974 return;
975 }
976 task_unlock(victim);
977
978 if (__ratelimit(&oom_rs))
979 dump_header(oc, victim);
980
981 /*
982 * Do we need to kill the entire memory cgroup?
983 * Or even one of the ancestor memory cgroups?
984 * Check this out before killing the victim task.
985 */
986 oom_group = mem_cgroup_get_oom_group(victim, oc->memcg);
987
988 __oom_kill_process(victim, message);
989
990 /*
991 * If necessary, kill all tasks in the selected memory cgroup.
992 */
993 if (oom_group) {
994 mem_cgroup_print_oom_group(oom_group);
995 mem_cgroup_scan_tasks(oom_group, oom_kill_memcg_member,
996 (void *)message);
997 mem_cgroup_put(oom_group);
998 }
999}
1000
1001/*
1002 * Determines whether the kernel must panic because of the panic_on_oom sysctl.
1003 */
1004static void check_panic_on_oom(struct oom_control *oc)
1005{
1006 if (likely(!sysctl_panic_on_oom))
1007 return;
1008 if (sysctl_panic_on_oom != 2) {
1009 /*
1010 * panic_on_oom == 1 only affects CONSTRAINT_NONE, the kernel
1011 * does not panic for cpuset, mempolicy, or memcg allocation
1012 * failures.
1013 */
1014 if (oc->constraint != CONSTRAINT_NONE)
1015 return;
1016 }
1017 /* Do not panic for oom kills triggered by sysrq */
1018 if (is_sysrq_oom(oc))
1019 return;
1020 dump_header(oc, NULL);
1021 panic("Out of memory: %s panic_on_oom is enabled\n",
1022 sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide");
1023}
1024
1025static BLOCKING_NOTIFIER_HEAD(oom_notify_list);
1026
1027int register_oom_notifier(struct notifier_block *nb)
1028{
1029 return blocking_notifier_chain_register(&oom_notify_list, nb);
1030}
1031EXPORT_SYMBOL_GPL(register_oom_notifier);
1032
1033int unregister_oom_notifier(struct notifier_block *nb)
1034{
1035 return blocking_notifier_chain_unregister(&oom_notify_list, nb);
1036}
1037EXPORT_SYMBOL_GPL(unregister_oom_notifier);
1038
1039/**
1040 * out_of_memory - kill the "best" process when we run out of memory
1041 * @oc: pointer to struct oom_control
1042 *
1043 * If we run out of memory, we have the choice between either
1044 * killing a random task (bad), letting the system crash (worse)
1045 * OR try to be smart about which process to kill. Note that we
1046 * don't have to be perfect here, we just have to be good.
1047 */
1048bool out_of_memory(struct oom_control *oc)
1049{
1050 unsigned long freed = 0;
1051
1052 if (oom_killer_disabled)
1053 return false;
1054
1055 if (!is_memcg_oom(oc)) {
1056 blocking_notifier_call_chain(&oom_notify_list, 0, &freed);
1057 if (freed > 0)
1058 /* Got some memory back in the last second. */
1059 return true;
1060 }
1061
1062 /*
1063 * If current has a pending SIGKILL or is exiting, then automatically
1064 * select it. The goal is to allow it to allocate so that it may
1065 * quickly exit and free its memory.
1066 */
1067 if (task_will_free_mem(current)) {
1068 mark_oom_victim(current);
1069 wake_oom_reaper(current);
1070 return true;
1071 }
1072
1073 /*
1074 * The OOM killer does not compensate for IO-less reclaim.
1075 * pagefault_out_of_memory lost its gfp context so we have to
1076 * make sure exclude 0 mask - all other users should have at least
1077 * ___GFP_DIRECT_RECLAIM to get here. But mem_cgroup_oom() has to
1078 * invoke the OOM killer even if it is a GFP_NOFS allocation.
1079 */
1080 if (oc->gfp_mask && !(oc->gfp_mask & __GFP_FS) && !is_memcg_oom(oc))
1081 return true;
1082
1083 /*
1084 * Check if there were limitations on the allocation (only relevant for
1085 * NUMA and memcg) that may require different handling.
1086 */
1087 oc->constraint = constrained_alloc(oc);
1088 if (oc->constraint != CONSTRAINT_MEMORY_POLICY)
1089 oc->nodemask = NULL;
1090 check_panic_on_oom(oc);
1091
1092 if (!is_memcg_oom(oc) && sysctl_oom_kill_allocating_task &&
1093 current->mm && !oom_unkillable_task(current) &&
1094 oom_cpuset_eligible(current, oc) &&
1095 current->signal->oom_score_adj != OOM_SCORE_ADJ_MIN) {
1096 get_task_struct(current);
1097 oc->chosen = current;
1098 oom_kill_process(oc, "Out of memory (oom_kill_allocating_task)");
1099 return true;
1100 }
1101
1102 select_bad_process(oc);
1103 /* Found nothing?!?! */
1104 if (!oc->chosen) {
1105 dump_header(oc, NULL);
1106 pr_warn("Out of memory and no killable processes...\n");
1107 /*
1108 * If we got here due to an actual allocation at the
1109 * system level, we cannot survive this and will enter
1110 * an endless loop in the allocator. Bail out now.
1111 */
1112 if (!is_sysrq_oom(oc) && !is_memcg_oom(oc))
1113 panic("System is deadlocked on memory\n");
1114 }
1115 if (oc->chosen && oc->chosen != (void *)-1UL)
1116 oom_kill_process(oc, !is_memcg_oom(oc) ? "Out of memory" :
1117 "Memory cgroup out of memory");
1118 return !!oc->chosen;
1119}
1120
1121/*
1122 * The pagefault handler calls here because it is out of memory, so kill a
1123 * memory-hogging task. If oom_lock is held by somebody else, a parallel oom
1124 * killing is already in progress so do nothing.
1125 */
1126void pagefault_out_of_memory(void)
1127{
1128 struct oom_control oc = {
1129 .zonelist = NULL,
1130 .nodemask = NULL,
1131 .memcg = NULL,
1132 .gfp_mask = 0,
1133 .order = 0,
1134 };
1135
1136 if (mem_cgroup_oom_synchronize(true))
1137 return;
1138
1139 if (!mutex_trylock(&oom_lock))
1140 return;
1141 out_of_memory(&oc);
1142 mutex_unlock(&oom_lock);
1143}
1/*
2 * linux/mm/oom_kill.c
3 *
4 * Copyright (C) 1998,2000 Rik van Riel
5 * Thanks go out to Claus Fischer for some serious inspiration and
6 * for goading me into coding this file...
7 * Copyright (C) 2010 Google, Inc.
8 * Rewritten by David Rientjes
9 *
10 * The routines in this file are used to kill a process when
11 * we're seriously out of memory. This gets called from __alloc_pages()
12 * in mm/page_alloc.c when we really run out of memory.
13 *
14 * Since we won't call these routines often (on a well-configured
15 * machine) this file will double as a 'coding guide' and a signpost
16 * for newbie kernel hackers. It features several pointers to major
17 * kernel subsystems and hints as to where to find out what things do.
18 */
19
20#include <linux/oom.h>
21#include <linux/mm.h>
22#include <linux/err.h>
23#include <linux/gfp.h>
24#include <linux/sched.h>
25#include <linux/swap.h>
26#include <linux/timex.h>
27#include <linux/jiffies.h>
28#include <linux/cpuset.h>
29#include <linux/export.h>
30#include <linux/notifier.h>
31#include <linux/memcontrol.h>
32#include <linux/mempolicy.h>
33#include <linux/security.h>
34#include <linux/ptrace.h>
35#include <linux/freezer.h>
36#include <linux/ftrace.h>
37#include <linux/ratelimit.h>
38
39#define CREATE_TRACE_POINTS
40#include <trace/events/oom.h>
41
42int sysctl_panic_on_oom;
43int sysctl_oom_kill_allocating_task;
44int sysctl_oom_dump_tasks = 1;
45static DEFINE_SPINLOCK(zone_scan_lock);
46
47#ifdef CONFIG_NUMA
48/**
49 * has_intersects_mems_allowed() - check task eligiblity for kill
50 * @start: task struct of which task to consider
51 * @mask: nodemask passed to page allocator for mempolicy ooms
52 *
53 * Task eligibility is determined by whether or not a candidate task, @tsk,
54 * shares the same mempolicy nodes as current if it is bound by such a policy
55 * and whether or not it has the same set of allowed cpuset nodes.
56 */
57static bool has_intersects_mems_allowed(struct task_struct *start,
58 const nodemask_t *mask)
59{
60 struct task_struct *tsk;
61 bool ret = false;
62
63 rcu_read_lock();
64 for_each_thread(start, tsk) {
65 if (mask) {
66 /*
67 * If this is a mempolicy constrained oom, tsk's
68 * cpuset is irrelevant. Only return true if its
69 * mempolicy intersects current, otherwise it may be
70 * needlessly killed.
71 */
72 ret = mempolicy_nodemask_intersects(tsk, mask);
73 } else {
74 /*
75 * This is not a mempolicy constrained oom, so only
76 * check the mems of tsk's cpuset.
77 */
78 ret = cpuset_mems_allowed_intersects(current, tsk);
79 }
80 if (ret)
81 break;
82 }
83 rcu_read_unlock();
84
85 return ret;
86}
87#else
88static bool has_intersects_mems_allowed(struct task_struct *tsk,
89 const nodemask_t *mask)
90{
91 return true;
92}
93#endif /* CONFIG_NUMA */
94
95/*
96 * The process p may have detached its own ->mm while exiting or through
97 * use_mm(), but one or more of its subthreads may still have a valid
98 * pointer. Return p, or any of its subthreads with a valid ->mm, with
99 * task_lock() held.
100 */
101struct task_struct *find_lock_task_mm(struct task_struct *p)
102{
103 struct task_struct *t;
104
105 rcu_read_lock();
106
107 for_each_thread(p, t) {
108 task_lock(t);
109 if (likely(t->mm))
110 goto found;
111 task_unlock(t);
112 }
113 t = NULL;
114found:
115 rcu_read_unlock();
116
117 return t;
118}
119
120/* return true if the task is not adequate as candidate victim task. */
121static bool oom_unkillable_task(struct task_struct *p,
122 const struct mem_cgroup *memcg, const nodemask_t *nodemask)
123{
124 if (is_global_init(p))
125 return true;
126 if (p->flags & PF_KTHREAD)
127 return true;
128
129 /* When mem_cgroup_out_of_memory() and p is not member of the group */
130 if (memcg && !task_in_mem_cgroup(p, memcg))
131 return true;
132
133 /* p may not have freeable memory in nodemask */
134 if (!has_intersects_mems_allowed(p, nodemask))
135 return true;
136
137 return false;
138}
139
140/**
141 * oom_badness - heuristic function to determine which candidate task to kill
142 * @p: task struct of which task we should calculate
143 * @totalpages: total present RAM allowed for page allocation
144 *
145 * The heuristic for determining which task to kill is made to be as simple and
146 * predictable as possible. The goal is to return the highest value for the
147 * task consuming the most memory to avoid subsequent oom failures.
148 */
149unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
150 const nodemask_t *nodemask, unsigned long totalpages)
151{
152 long points;
153 long adj;
154
155 if (oom_unkillable_task(p, memcg, nodemask))
156 return 0;
157
158 p = find_lock_task_mm(p);
159 if (!p)
160 return 0;
161
162 adj = (long)p->signal->oom_score_adj;
163 if (adj == OOM_SCORE_ADJ_MIN) {
164 task_unlock(p);
165 return 0;
166 }
167
168 /*
169 * The baseline for the badness score is the proportion of RAM that each
170 * task's rss, pagetable and swap space use.
171 */
172 points = get_mm_rss(p->mm) + atomic_long_read(&p->mm->nr_ptes) +
173 get_mm_counter(p->mm, MM_SWAPENTS);
174 task_unlock(p);
175
176 /*
177 * Root processes get 3% bonus, just like the __vm_enough_memory()
178 * implementation used by LSMs.
179 */
180 if (has_capability_noaudit(p, CAP_SYS_ADMIN))
181 points -= (points * 3) / 100;
182
183 /* Normalize to oom_score_adj units */
184 adj *= totalpages / 1000;
185 points += adj;
186
187 /*
188 * Never return 0 for an eligible task regardless of the root bonus and
189 * oom_score_adj (oom_score_adj can't be OOM_SCORE_ADJ_MIN here).
190 */
191 return points > 0 ? points : 1;
192}
193
194/*
195 * Determine the type of allocation constraint.
196 */
197#ifdef CONFIG_NUMA
198static enum oom_constraint constrained_alloc(struct zonelist *zonelist,
199 gfp_t gfp_mask, nodemask_t *nodemask,
200 unsigned long *totalpages)
201{
202 struct zone *zone;
203 struct zoneref *z;
204 enum zone_type high_zoneidx = gfp_zone(gfp_mask);
205 bool cpuset_limited = false;
206 int nid;
207
208 /* Default to all available memory */
209 *totalpages = totalram_pages + total_swap_pages;
210
211 if (!zonelist)
212 return CONSTRAINT_NONE;
213 /*
214 * Reach here only when __GFP_NOFAIL is used. So, we should avoid
215 * to kill current.We have to random task kill in this case.
216 * Hopefully, CONSTRAINT_THISNODE...but no way to handle it, now.
217 */
218 if (gfp_mask & __GFP_THISNODE)
219 return CONSTRAINT_NONE;
220
221 /*
222 * This is not a __GFP_THISNODE allocation, so a truncated nodemask in
223 * the page allocator means a mempolicy is in effect. Cpuset policy
224 * is enforced in get_page_from_freelist().
225 */
226 if (nodemask && !nodes_subset(node_states[N_MEMORY], *nodemask)) {
227 *totalpages = total_swap_pages;
228 for_each_node_mask(nid, *nodemask)
229 *totalpages += node_spanned_pages(nid);
230 return CONSTRAINT_MEMORY_POLICY;
231 }
232
233 /* Check this allocation failure is caused by cpuset's wall function */
234 for_each_zone_zonelist_nodemask(zone, z, zonelist,
235 high_zoneidx, nodemask)
236 if (!cpuset_zone_allowed_softwall(zone, gfp_mask))
237 cpuset_limited = true;
238
239 if (cpuset_limited) {
240 *totalpages = total_swap_pages;
241 for_each_node_mask(nid, cpuset_current_mems_allowed)
242 *totalpages += node_spanned_pages(nid);
243 return CONSTRAINT_CPUSET;
244 }
245 return CONSTRAINT_NONE;
246}
247#else
248static enum oom_constraint constrained_alloc(struct zonelist *zonelist,
249 gfp_t gfp_mask, nodemask_t *nodemask,
250 unsigned long *totalpages)
251{
252 *totalpages = totalram_pages + total_swap_pages;
253 return CONSTRAINT_NONE;
254}
255#endif
256
257enum oom_scan_t oom_scan_process_thread(struct task_struct *task,
258 unsigned long totalpages, const nodemask_t *nodemask,
259 bool force_kill)
260{
261 if (task->exit_state)
262 return OOM_SCAN_CONTINUE;
263 if (oom_unkillable_task(task, NULL, nodemask))
264 return OOM_SCAN_CONTINUE;
265
266 /*
267 * This task already has access to memory reserves and is being killed.
268 * Don't allow any other task to have access to the reserves.
269 */
270 if (test_tsk_thread_flag(task, TIF_MEMDIE)) {
271 if (unlikely(frozen(task)))
272 __thaw_task(task);
273 if (!force_kill)
274 return OOM_SCAN_ABORT;
275 }
276 if (!task->mm)
277 return OOM_SCAN_CONTINUE;
278
279 /*
280 * If task is allocating a lot of memory and has been marked to be
281 * killed first if it triggers an oom, then select it.
282 */
283 if (oom_task_origin(task))
284 return OOM_SCAN_SELECT;
285
286 if (task->flags & PF_EXITING && !force_kill) {
287 /*
288 * If this task is not being ptraced on exit, then wait for it
289 * to finish before killing some other task unnecessarily.
290 */
291 if (!(task->group_leader->ptrace & PT_TRACE_EXIT))
292 return OOM_SCAN_ABORT;
293 }
294 return OOM_SCAN_OK;
295}
296
297/*
298 * Simple selection loop. We chose the process with the highest
299 * number of 'points'. Returns -1 on scan abort.
300 *
301 * (not docbooked, we don't want this one cluttering up the manual)
302 */
303static struct task_struct *select_bad_process(unsigned int *ppoints,
304 unsigned long totalpages, const nodemask_t *nodemask,
305 bool force_kill)
306{
307 struct task_struct *g, *p;
308 struct task_struct *chosen = NULL;
309 unsigned long chosen_points = 0;
310
311 rcu_read_lock();
312 for_each_process_thread(g, p) {
313 unsigned int points;
314
315 switch (oom_scan_process_thread(p, totalpages, nodemask,
316 force_kill)) {
317 case OOM_SCAN_SELECT:
318 chosen = p;
319 chosen_points = ULONG_MAX;
320 /* fall through */
321 case OOM_SCAN_CONTINUE:
322 continue;
323 case OOM_SCAN_ABORT:
324 rcu_read_unlock();
325 return (struct task_struct *)(-1UL);
326 case OOM_SCAN_OK:
327 break;
328 };
329 points = oom_badness(p, NULL, nodemask, totalpages);
330 if (!points || points < chosen_points)
331 continue;
332 /* Prefer thread group leaders for display purposes */
333 if (points == chosen_points && thread_group_leader(chosen))
334 continue;
335
336 chosen = p;
337 chosen_points = points;
338 }
339 if (chosen)
340 get_task_struct(chosen);
341 rcu_read_unlock();
342
343 *ppoints = chosen_points * 1000 / totalpages;
344 return chosen;
345}
346
347/**
348 * dump_tasks - dump current memory state of all system tasks
349 * @memcg: current's memory controller, if constrained
350 * @nodemask: nodemask passed to page allocator for mempolicy ooms
351 *
352 * Dumps the current memory state of all eligible tasks. Tasks not in the same
353 * memcg, not in the same cpuset, or bound to a disjoint set of mempolicy nodes
354 * are not shown.
355 * State information includes task's pid, uid, tgid, vm size, rss, nr_ptes,
356 * swapents, oom_score_adj value, and name.
357 */
358static void dump_tasks(const struct mem_cgroup *memcg, const nodemask_t *nodemask)
359{
360 struct task_struct *p;
361 struct task_struct *task;
362
363 pr_info("[ pid ] uid tgid total_vm rss nr_ptes swapents oom_score_adj name\n");
364 rcu_read_lock();
365 for_each_process(p) {
366 if (oom_unkillable_task(p, memcg, nodemask))
367 continue;
368
369 task = find_lock_task_mm(p);
370 if (!task) {
371 /*
372 * This is a kthread or all of p's threads have already
373 * detached their mm's. There's no need to report
374 * them; they can't be oom killed anyway.
375 */
376 continue;
377 }
378
379 pr_info("[%5d] %5d %5d %8lu %8lu %7ld %8lu %5hd %s\n",
380 task->pid, from_kuid(&init_user_ns, task_uid(task)),
381 task->tgid, task->mm->total_vm, get_mm_rss(task->mm),
382 atomic_long_read(&task->mm->nr_ptes),
383 get_mm_counter(task->mm, MM_SWAPENTS),
384 task->signal->oom_score_adj, task->comm);
385 task_unlock(task);
386 }
387 rcu_read_unlock();
388}
389
390static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order,
391 struct mem_cgroup *memcg, const nodemask_t *nodemask)
392{
393 task_lock(current);
394 pr_warning("%s invoked oom-killer: gfp_mask=0x%x, order=%d, "
395 "oom_score_adj=%hd\n",
396 current->comm, gfp_mask, order,
397 current->signal->oom_score_adj);
398 cpuset_print_task_mems_allowed(current);
399 task_unlock(current);
400 dump_stack();
401 if (memcg)
402 mem_cgroup_print_oom_info(memcg, p);
403 else
404 show_mem(SHOW_MEM_FILTER_NODES);
405 if (sysctl_oom_dump_tasks)
406 dump_tasks(memcg, nodemask);
407}
408
409#define K(x) ((x) << (PAGE_SHIFT-10))
410/*
411 * Must be called while holding a reference to p, which will be released upon
412 * returning.
413 */
414void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
415 unsigned int points, unsigned long totalpages,
416 struct mem_cgroup *memcg, nodemask_t *nodemask,
417 const char *message)
418{
419 struct task_struct *victim = p;
420 struct task_struct *child;
421 struct task_struct *t;
422 struct mm_struct *mm;
423 unsigned int victim_points = 0;
424 static DEFINE_RATELIMIT_STATE(oom_rs, DEFAULT_RATELIMIT_INTERVAL,
425 DEFAULT_RATELIMIT_BURST);
426
427 /*
428 * If the task is already exiting, don't alarm the sysadmin or kill
429 * its children or threads, just set TIF_MEMDIE so it can die quickly
430 */
431 if (p->flags & PF_EXITING) {
432 set_tsk_thread_flag(p, TIF_MEMDIE);
433 put_task_struct(p);
434 return;
435 }
436
437 if (__ratelimit(&oom_rs))
438 dump_header(p, gfp_mask, order, memcg, nodemask);
439
440 task_lock(p);
441 pr_err("%s: Kill process %d (%s) score %d or sacrifice child\n",
442 message, task_pid_nr(p), p->comm, points);
443 task_unlock(p);
444
445 /*
446 * If any of p's children has a different mm and is eligible for kill,
447 * the one with the highest oom_badness() score is sacrificed for its
448 * parent. This attempts to lose the minimal amount of work done while
449 * still freeing memory.
450 */
451 read_lock(&tasklist_lock);
452 for_each_thread(p, t) {
453 list_for_each_entry(child, &t->children, sibling) {
454 unsigned int child_points;
455
456 if (child->mm == p->mm)
457 continue;
458 /*
459 * oom_badness() returns 0 if the thread is unkillable
460 */
461 child_points = oom_badness(child, memcg, nodemask,
462 totalpages);
463 if (child_points > victim_points) {
464 put_task_struct(victim);
465 victim = child;
466 victim_points = child_points;
467 get_task_struct(victim);
468 }
469 }
470 }
471 read_unlock(&tasklist_lock);
472
473 p = find_lock_task_mm(victim);
474 if (!p) {
475 put_task_struct(victim);
476 return;
477 } else if (victim != p) {
478 get_task_struct(p);
479 put_task_struct(victim);
480 victim = p;
481 }
482
483 /* mm cannot safely be dereferenced after task_unlock(victim) */
484 mm = victim->mm;
485 pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB\n",
486 task_pid_nr(victim), victim->comm, K(victim->mm->total_vm),
487 K(get_mm_counter(victim->mm, MM_ANONPAGES)),
488 K(get_mm_counter(victim->mm, MM_FILEPAGES)));
489 task_unlock(victim);
490
491 /*
492 * Kill all user processes sharing victim->mm in other thread groups, if
493 * any. They don't get access to memory reserves, though, to avoid
494 * depletion of all memory. This prevents mm->mmap_sem livelock when an
495 * oom killed thread cannot exit because it requires the semaphore and
496 * its contended by another thread trying to allocate memory itself.
497 * That thread will now get access to memory reserves since it has a
498 * pending fatal signal.
499 */
500 rcu_read_lock();
501 for_each_process(p)
502 if (p->mm == mm && !same_thread_group(p, victim) &&
503 !(p->flags & PF_KTHREAD)) {
504 if (p->signal->oom_score_adj == OOM_SCORE_ADJ_MIN)
505 continue;
506
507 task_lock(p); /* Protect ->comm from prctl() */
508 pr_err("Kill process %d (%s) sharing same memory\n",
509 task_pid_nr(p), p->comm);
510 task_unlock(p);
511 do_send_sig_info(SIGKILL, SEND_SIG_FORCED, p, true);
512 }
513 rcu_read_unlock();
514
515 set_tsk_thread_flag(victim, TIF_MEMDIE);
516 do_send_sig_info(SIGKILL, SEND_SIG_FORCED, victim, true);
517 put_task_struct(victim);
518}
519#undef K
520
521/*
522 * Determines whether the kernel must panic because of the panic_on_oom sysctl.
523 */
524void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask,
525 int order, const nodemask_t *nodemask)
526{
527 if (likely(!sysctl_panic_on_oom))
528 return;
529 if (sysctl_panic_on_oom != 2) {
530 /*
531 * panic_on_oom == 1 only affects CONSTRAINT_NONE, the kernel
532 * does not panic for cpuset, mempolicy, or memcg allocation
533 * failures.
534 */
535 if (constraint != CONSTRAINT_NONE)
536 return;
537 }
538 dump_header(NULL, gfp_mask, order, NULL, nodemask);
539 panic("Out of memory: %s panic_on_oom is enabled\n",
540 sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide");
541}
542
543static BLOCKING_NOTIFIER_HEAD(oom_notify_list);
544
545int register_oom_notifier(struct notifier_block *nb)
546{
547 return blocking_notifier_chain_register(&oom_notify_list, nb);
548}
549EXPORT_SYMBOL_GPL(register_oom_notifier);
550
551int unregister_oom_notifier(struct notifier_block *nb)
552{
553 return blocking_notifier_chain_unregister(&oom_notify_list, nb);
554}
555EXPORT_SYMBOL_GPL(unregister_oom_notifier);
556
557/*
558 * Try to acquire the OOM killer lock for the zones in zonelist. Returns zero
559 * if a parallel OOM killing is already taking place that includes a zone in
560 * the zonelist. Otherwise, locks all zones in the zonelist and returns 1.
561 */
562int try_set_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_mask)
563{
564 struct zoneref *z;
565 struct zone *zone;
566 int ret = 1;
567
568 spin_lock(&zone_scan_lock);
569 for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
570 if (zone_is_oom_locked(zone)) {
571 ret = 0;
572 goto out;
573 }
574 }
575
576 for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
577 /*
578 * Lock each zone in the zonelist under zone_scan_lock so a
579 * parallel invocation of try_set_zonelist_oom() doesn't succeed
580 * when it shouldn't.
581 */
582 zone_set_flag(zone, ZONE_OOM_LOCKED);
583 }
584
585out:
586 spin_unlock(&zone_scan_lock);
587 return ret;
588}
589
590/*
591 * Clears the ZONE_OOM_LOCKED flag for all zones in the zonelist so that failed
592 * allocation attempts with zonelists containing them may now recall the OOM
593 * killer, if necessary.
594 */
595void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_mask)
596{
597 struct zoneref *z;
598 struct zone *zone;
599
600 spin_lock(&zone_scan_lock);
601 for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
602 zone_clear_flag(zone, ZONE_OOM_LOCKED);
603 }
604 spin_unlock(&zone_scan_lock);
605}
606
607/**
608 * out_of_memory - kill the "best" process when we run out of memory
609 * @zonelist: zonelist pointer
610 * @gfp_mask: memory allocation flags
611 * @order: amount of memory being requested as a power of 2
612 * @nodemask: nodemask passed to page allocator
613 * @force_kill: true if a task must be killed, even if others are exiting
614 *
615 * If we run out of memory, we have the choice between either
616 * killing a random task (bad), letting the system crash (worse)
617 * OR try to be smart about which process to kill. Note that we
618 * don't have to be perfect here, we just have to be good.
619 */
620void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
621 int order, nodemask_t *nodemask, bool force_kill)
622{
623 const nodemask_t *mpol_mask;
624 struct task_struct *p;
625 unsigned long totalpages;
626 unsigned long freed = 0;
627 unsigned int uninitialized_var(points);
628 enum oom_constraint constraint = CONSTRAINT_NONE;
629 int killed = 0;
630
631 blocking_notifier_call_chain(&oom_notify_list, 0, &freed);
632 if (freed > 0)
633 /* Got some memory back in the last second. */
634 return;
635
636 /*
637 * If current has a pending SIGKILL or is exiting, then automatically
638 * select it. The goal is to allow it to allocate so that it may
639 * quickly exit and free its memory.
640 */
641 if (fatal_signal_pending(current) || current->flags & PF_EXITING) {
642 set_thread_flag(TIF_MEMDIE);
643 return;
644 }
645
646 /*
647 * Check if there were limitations on the allocation (only relevant for
648 * NUMA) that may require different handling.
649 */
650 constraint = constrained_alloc(zonelist, gfp_mask, nodemask,
651 &totalpages);
652 mpol_mask = (constraint == CONSTRAINT_MEMORY_POLICY) ? nodemask : NULL;
653 check_panic_on_oom(constraint, gfp_mask, order, mpol_mask);
654
655 if (sysctl_oom_kill_allocating_task && current->mm &&
656 !oom_unkillable_task(current, NULL, nodemask) &&
657 current->signal->oom_score_adj != OOM_SCORE_ADJ_MIN) {
658 get_task_struct(current);
659 oom_kill_process(current, gfp_mask, order, 0, totalpages, NULL,
660 nodemask,
661 "Out of memory (oom_kill_allocating_task)");
662 goto out;
663 }
664
665 p = select_bad_process(&points, totalpages, mpol_mask, force_kill);
666 /* Found nothing?!?! Either we hang forever, or we panic. */
667 if (!p) {
668 dump_header(NULL, gfp_mask, order, NULL, mpol_mask);
669 panic("Out of memory and no killable processes...\n");
670 }
671 if (p != (void *)-1UL) {
672 oom_kill_process(p, gfp_mask, order, points, totalpages, NULL,
673 nodemask, "Out of memory");
674 killed = 1;
675 }
676out:
677 /*
678 * Give the killed threads a good chance of exiting before trying to
679 * allocate memory again.
680 */
681 if (killed)
682 schedule_timeout_killable(1);
683}
684
685/*
686 * The pagefault handler calls here because it is out of memory, so kill a
687 * memory-hogging task. If any populated zone has ZONE_OOM_LOCKED set, a
688 * parallel oom killing is already in progress so do nothing.
689 */
690void pagefault_out_of_memory(void)
691{
692 struct zonelist *zonelist;
693
694 if (mem_cgroup_oom_synchronize(true))
695 return;
696
697 zonelist = node_zonelist(first_online_node, GFP_KERNEL);
698 if (try_set_zonelist_oom(zonelist, GFP_KERNEL)) {
699 out_of_memory(NULL, 0, 0, NULL, false);
700 clear_zonelist_oom(zonelist, GFP_KERNEL);
701 }
702}