Linux Audio

Check our new training course

Loading...
v5.9
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  linux/mm/oom_kill.c
   4 * 
   5 *  Copyright (C)  1998,2000  Rik van Riel
   6 *	Thanks go out to Claus Fischer for some serious inspiration and
   7 *	for goading me into coding this file...
   8 *  Copyright (C)  2010  Google, Inc.
   9 *	Rewritten by David Rientjes
  10 *
  11 *  The routines in this file are used to kill a process when
  12 *  we're seriously out of memory. This gets called from __alloc_pages()
  13 *  in mm/page_alloc.c when we really run out of memory.
  14 *
  15 *  Since we won't call these routines often (on a well-configured
  16 *  machine) this file will double as a 'coding guide' and a signpost
  17 *  for newbie kernel hackers. It features several pointers to major
  18 *  kernel subsystems and hints as to where to find out what things do.
  19 */
  20
  21#include <linux/oom.h>
  22#include <linux/mm.h>
  23#include <linux/err.h>
  24#include <linux/gfp.h>
  25#include <linux/sched.h>
  26#include <linux/sched/mm.h>
  27#include <linux/sched/coredump.h>
  28#include <linux/sched/task.h>
  29#include <linux/sched/debug.h>
  30#include <linux/swap.h>
  31#include <linux/timex.h>
  32#include <linux/jiffies.h>
  33#include <linux/cpuset.h>
  34#include <linux/export.h>
  35#include <linux/notifier.h>
  36#include <linux/memcontrol.h>
  37#include <linux/mempolicy.h>
  38#include <linux/security.h>
  39#include <linux/ptrace.h>
  40#include <linux/freezer.h>
  41#include <linux/ftrace.h>
  42#include <linux/ratelimit.h>
  43#include <linux/kthread.h>
  44#include <linux/init.h>
  45#include <linux/mmu_notifier.h>
  46
  47#include <asm/tlb.h>
  48#include "internal.h"
  49#include "slab.h"
  50
  51#define CREATE_TRACE_POINTS
  52#include <trace/events/oom.h>
  53
  54int sysctl_panic_on_oom;
  55int sysctl_oom_kill_allocating_task;
  56int sysctl_oom_dump_tasks = 1;
  57
  58/*
  59 * Serializes oom killer invocations (out_of_memory()) from all contexts to
  60 * prevent from over eager oom killing (e.g. when the oom killer is invoked
  61 * from different domains).
  62 *
  63 * oom_killer_disable() relies on this lock to stabilize oom_killer_disabled
  64 * and mark_oom_victim
  65 */
  66DEFINE_MUTEX(oom_lock);
  67
  68static inline bool is_memcg_oom(struct oom_control *oc)
  69{
  70	return oc->memcg != NULL;
  71}
  72
  73#ifdef CONFIG_NUMA
  74/**
  75 * oom_cpuset_eligible() - check task eligiblity for kill
  76 * @start: task struct of which task to consider
  77 * @oc: pointer to struct oom_control
  78 *
  79 * Task eligibility is determined by whether or not a candidate task, @tsk,
  80 * shares the same mempolicy nodes as current if it is bound by such a policy
  81 * and whether or not it has the same set of allowed cpuset nodes.
  82 *
  83 * This function is assuming oom-killer context and 'current' has triggered
  84 * the oom-killer.
  85 */
  86static bool oom_cpuset_eligible(struct task_struct *start,
  87				struct oom_control *oc)
  88{
  89	struct task_struct *tsk;
  90	bool ret = false;
  91	const nodemask_t *mask = oc->nodemask;
  92
  93	if (is_memcg_oom(oc))
  94		return true;
  95
  96	rcu_read_lock();
  97	for_each_thread(start, tsk) {
  98		if (mask) {
  99			/*
 100			 * If this is a mempolicy constrained oom, tsk's
 101			 * cpuset is irrelevant.  Only return true if its
 102			 * mempolicy intersects current, otherwise it may be
 103			 * needlessly killed.
 104			 */
 105			ret = mempolicy_nodemask_intersects(tsk, mask);
 106		} else {
 107			/*
 108			 * This is not a mempolicy constrained oom, so only
 109			 * check the mems of tsk's cpuset.
 110			 */
 111			ret = cpuset_mems_allowed_intersects(current, tsk);
 112		}
 113		if (ret)
 114			break;
 115	}
 116	rcu_read_unlock();
 117
 118	return ret;
 119}
 120#else
 121static bool oom_cpuset_eligible(struct task_struct *tsk, struct oom_control *oc)
 
 122{
 123	return true;
 124}
 125#endif /* CONFIG_NUMA */
 126
 127/*
 128 * The process p may have detached its own ->mm while exiting or through
 129 * kthread_use_mm(), but one or more of its subthreads may still have a valid
 130 * pointer.  Return p, or any of its subthreads with a valid ->mm, with
 131 * task_lock() held.
 132 */
 133struct task_struct *find_lock_task_mm(struct task_struct *p)
 134{
 135	struct task_struct *t;
 136
 137	rcu_read_lock();
 138
 139	for_each_thread(p, t) {
 140		task_lock(t);
 141		if (likely(t->mm))
 142			goto found;
 143		task_unlock(t);
 144	}
 145	t = NULL;
 146found:
 147	rcu_read_unlock();
 148
 149	return t;
 150}
 151
 152/*
 153 * order == -1 means the oom kill is required by sysrq, otherwise only
 154 * for display purposes.
 155 */
 156static inline bool is_sysrq_oom(struct oom_control *oc)
 157{
 158	return oc->order == -1;
 159}
 160
 
 
 
 
 
 161/* return true if the task is not adequate as candidate victim task. */
 162static bool oom_unkillable_task(struct task_struct *p)
 
 163{
 164	if (is_global_init(p))
 165		return true;
 166	if (p->flags & PF_KTHREAD)
 167		return true;
 168	return false;
 169}
 170
 171/*
 172 * Print out unreclaimble slabs info when unreclaimable slabs amount is greater
 173 * than all user memory (LRU pages)
 174 */
 175static bool is_dump_unreclaim_slabs(void)
 176{
 177	unsigned long nr_lru;
 178
 179	nr_lru = global_node_page_state(NR_ACTIVE_ANON) +
 180		 global_node_page_state(NR_INACTIVE_ANON) +
 181		 global_node_page_state(NR_ACTIVE_FILE) +
 182		 global_node_page_state(NR_INACTIVE_FILE) +
 183		 global_node_page_state(NR_ISOLATED_ANON) +
 184		 global_node_page_state(NR_ISOLATED_FILE) +
 185		 global_node_page_state(NR_UNEVICTABLE);
 186
 187	return (global_node_page_state_pages(NR_SLAB_UNRECLAIMABLE_B) > nr_lru);
 188}
 189
 190/**
 191 * oom_badness - heuristic function to determine which candidate task to kill
 192 * @p: task struct of which task we should calculate
 193 * @totalpages: total present RAM allowed for page allocation
 194 *
 195 * The heuristic for determining which task to kill is made to be as simple and
 196 * predictable as possible.  The goal is to return the highest value for the
 197 * task consuming the most memory to avoid subsequent oom failures.
 198 */
 199long oom_badness(struct task_struct *p, unsigned long totalpages)
 
 200{
 201	long points;
 202	long adj;
 203
 204	if (oom_unkillable_task(p))
 205		return LONG_MIN;
 206
 207	p = find_lock_task_mm(p);
 208	if (!p)
 209		return LONG_MIN;
 210
 211	/*
 212	 * Do not even consider tasks which are explicitly marked oom
 213	 * unkillable or have been already oom reaped or the are in
 214	 * the middle of vfork
 215	 */
 216	adj = (long)p->signal->oom_score_adj;
 217	if (adj == OOM_SCORE_ADJ_MIN ||
 218			test_bit(MMF_OOM_SKIP, &p->mm->flags) ||
 219			in_vfork(p)) {
 220		task_unlock(p);
 221		return LONG_MIN;
 222	}
 223
 224	/*
 225	 * The baseline for the badness score is the proportion of RAM that each
 226	 * task's rss, pagetable and swap space use.
 227	 */
 228	points = get_mm_rss(p->mm) + get_mm_counter(p->mm, MM_SWAPENTS) +
 229		mm_pgtables_bytes(p->mm) / PAGE_SIZE;
 230	task_unlock(p);
 231
 
 
 
 
 
 
 
 232	/* Normalize to oom_score_adj units */
 233	adj *= totalpages / 1000;
 234	points += adj;
 235
 236	return points;
 
 
 
 
 237}
 238
 239static const char * const oom_constraint_text[] = {
 240	[CONSTRAINT_NONE] = "CONSTRAINT_NONE",
 241	[CONSTRAINT_CPUSET] = "CONSTRAINT_CPUSET",
 242	[CONSTRAINT_MEMORY_POLICY] = "CONSTRAINT_MEMORY_POLICY",
 243	[CONSTRAINT_MEMCG] = "CONSTRAINT_MEMCG",
 244};
 245
 246/*
 247 * Determine the type of allocation constraint.
 248 */
 249static enum oom_constraint constrained_alloc(struct oom_control *oc)
 250{
 251	struct zone *zone;
 252	struct zoneref *z;
 253	enum zone_type highest_zoneidx = gfp_zone(oc->gfp_mask);
 254	bool cpuset_limited = false;
 255	int nid;
 256
 257	if (is_memcg_oom(oc)) {
 258		oc->totalpages = mem_cgroup_get_max(oc->memcg) ?: 1;
 259		return CONSTRAINT_MEMCG;
 260	}
 261
 262	/* Default to all available memory */
 263	oc->totalpages = totalram_pages() + total_swap_pages;
 264
 265	if (!IS_ENABLED(CONFIG_NUMA))
 266		return CONSTRAINT_NONE;
 267
 268	if (!oc->zonelist)
 269		return CONSTRAINT_NONE;
 270	/*
 271	 * Reach here only when __GFP_NOFAIL is used. So, we should avoid
 272	 * to kill current.We have to random task kill in this case.
 273	 * Hopefully, CONSTRAINT_THISNODE...but no way to handle it, now.
 274	 */
 275	if (oc->gfp_mask & __GFP_THISNODE)
 276		return CONSTRAINT_NONE;
 277
 278	/*
 279	 * This is not a __GFP_THISNODE allocation, so a truncated nodemask in
 280	 * the page allocator means a mempolicy is in effect.  Cpuset policy
 281	 * is enforced in get_page_from_freelist().
 282	 */
 283	if (oc->nodemask &&
 284	    !nodes_subset(node_states[N_MEMORY], *oc->nodemask)) {
 285		oc->totalpages = total_swap_pages;
 286		for_each_node_mask(nid, *oc->nodemask)
 287			oc->totalpages += node_present_pages(nid);
 288		return CONSTRAINT_MEMORY_POLICY;
 289	}
 290
 291	/* Check this allocation failure is caused by cpuset's wall function */
 292	for_each_zone_zonelist_nodemask(zone, z, oc->zonelist,
 293			highest_zoneidx, oc->nodemask)
 294		if (!cpuset_zone_allowed(zone, oc->gfp_mask))
 295			cpuset_limited = true;
 296
 297	if (cpuset_limited) {
 298		oc->totalpages = total_swap_pages;
 299		for_each_node_mask(nid, cpuset_current_mems_allowed)
 300			oc->totalpages += node_present_pages(nid);
 301		return CONSTRAINT_CPUSET;
 302	}
 303	return CONSTRAINT_NONE;
 304}
 305
 306static int oom_evaluate_task(struct task_struct *task, void *arg)
 307{
 308	struct oom_control *oc = arg;
 309	long points;
 310
 311	if (oom_unkillable_task(task))
 312		goto next;
 313
 314	/* p may not have freeable memory in nodemask */
 315	if (!is_memcg_oom(oc) && !oom_cpuset_eligible(task, oc))
 316		goto next;
 317
 318	/*
 319	 * This task already has access to memory reserves and is being killed.
 320	 * Don't allow any other task to have access to the reserves unless
 321	 * the task has MMF_OOM_SKIP because chances that it would release
 322	 * any memory is quite low.
 323	 */
 324	if (!is_sysrq_oom(oc) && tsk_is_oom_victim(task)) {
 325		if (test_bit(MMF_OOM_SKIP, &task->signal->oom_mm->flags))
 326			goto next;
 327		goto abort;
 328	}
 329
 330	/*
 331	 * If task is allocating a lot of memory and has been marked to be
 332	 * killed first if it triggers an oom, then select it.
 333	 */
 334	if (oom_task_origin(task)) {
 335		points = LONG_MAX;
 336		goto select;
 337	}
 338
 339	points = oom_badness(task, oc->totalpages);
 340	if (points == LONG_MIN || points < oc->chosen_points)
 341		goto next;
 342
 
 
 
 343select:
 344	if (oc->chosen)
 345		put_task_struct(oc->chosen);
 346	get_task_struct(task);
 347	oc->chosen = task;
 348	oc->chosen_points = points;
 349next:
 350	return 0;
 351abort:
 352	if (oc->chosen)
 353		put_task_struct(oc->chosen);
 354	oc->chosen = (void *)-1UL;
 355	return 1;
 356}
 357
 358/*
 359 * Simple selection loop. We choose the process with the highest number of
 360 * 'points'. In case scan was aborted, oc->chosen is set to -1.
 361 */
 362static void select_bad_process(struct oom_control *oc)
 363{
 364	oc->chosen_points = LONG_MIN;
 365
 366	if (is_memcg_oom(oc))
 367		mem_cgroup_scan_tasks(oc->memcg, oom_evaluate_task, oc);
 368	else {
 369		struct task_struct *p;
 370
 371		rcu_read_lock();
 372		for_each_process(p)
 373			if (oom_evaluate_task(p, oc))
 374				break;
 375		rcu_read_unlock();
 376	}
 377}
 378
 379static int dump_task(struct task_struct *p, void *arg)
 380{
 381	struct oom_control *oc = arg;
 382	struct task_struct *task;
 383
 384	if (oom_unkillable_task(p))
 385		return 0;
 386
 387	/* p may not have freeable memory in nodemask */
 388	if (!is_memcg_oom(oc) && !oom_cpuset_eligible(p, oc))
 389		return 0;
 390
 391	task = find_lock_task_mm(p);
 392	if (!task) {
 393		/*
 394		 * This is a kthread or all of p's threads have already
 395		 * detached their mm's.  There's no need to report
 396		 * them; they can't be oom killed anyway.
 397		 */
 398		return 0;
 399	}
 400
 401	pr_info("[%7d] %5d %5d %8lu %8lu %8ld %8lu         %5hd %s\n",
 402		task->pid, from_kuid(&init_user_ns, task_uid(task)),
 403		task->tgid, task->mm->total_vm, get_mm_rss(task->mm),
 404		mm_pgtables_bytes(task->mm),
 405		get_mm_counter(task->mm, MM_SWAPENTS),
 406		task->signal->oom_score_adj, task->comm);
 407	task_unlock(task);
 408
 409	return 0;
 410}
 411
 412/**
 413 * dump_tasks - dump current memory state of all system tasks
 414 * @oc: pointer to struct oom_control
 
 415 *
 416 * Dumps the current memory state of all eligible tasks.  Tasks not in the same
 417 * memcg, not in the same cpuset, or bound to a disjoint set of mempolicy nodes
 418 * are not shown.
 419 * State information includes task's pid, uid, tgid, vm size, rss,
 420 * pgtables_bytes, swapents, oom_score_adj value, and name.
 421 */
 422static void dump_tasks(struct oom_control *oc)
 423{
 424	pr_info("Tasks state (memory values in pages):\n");
 425	pr_info("[  pid  ]   uid  tgid total_vm      rss pgtables_bytes swapents oom_score_adj name\n");
 426
 427	if (is_memcg_oom(oc))
 428		mem_cgroup_scan_tasks(oc->memcg, dump_task, oc);
 429	else {
 430		struct task_struct *p;
 
 431
 432		rcu_read_lock();
 433		for_each_process(p)
 434			dump_task(p, oc);
 435		rcu_read_unlock();
 436	}
 437}
 
 
 
 438
 439static void dump_oom_summary(struct oom_control *oc, struct task_struct *victim)
 440{
 441	/* one line summary of the oom killer context. */
 442	pr_info("oom-kill:constraint=%s,nodemask=%*pbl",
 443			oom_constraint_text[oc->constraint],
 444			nodemask_pr_args(oc->nodemask));
 445	cpuset_print_current_mems_allowed();
 446	mem_cgroup_print_oom_context(oc->memcg, victim);
 447	pr_cont(",task=%s,pid=%d,uid=%d\n", victim->comm, victim->pid,
 448		from_kuid(&init_user_ns, task_uid(victim)));
 449}
 450
 451static void dump_header(struct oom_control *oc, struct task_struct *p)
 452{
 453	pr_warn("%s invoked oom-killer: gfp_mask=%#x(%pGg), order=%d, oom_score_adj=%hd\n",
 454		current->comm, oc->gfp_mask, &oc->gfp_mask, oc->order,
 455			current->signal->oom_score_adj);
 
 
 
 456	if (!IS_ENABLED(CONFIG_COMPACTION) && oc->order)
 457		pr_warn("COMPACTION is disabled!!!\n");
 458
 
 459	dump_stack();
 460	if (is_memcg_oom(oc))
 461		mem_cgroup_print_oom_meminfo(oc->memcg);
 462	else {
 463		show_mem(SHOW_MEM_FILTER_NODES, oc->nodemask);
 464		if (is_dump_unreclaim_slabs())
 465			dump_unreclaimable_slab();
 466	}
 467	if (sysctl_oom_dump_tasks)
 468		dump_tasks(oc);
 469	if (p)
 470		dump_oom_summary(oc, p);
 471}
 472
 473/*
 474 * Number of OOM victims in flight
 475 */
 476static atomic_t oom_victims = ATOMIC_INIT(0);
 477static DECLARE_WAIT_QUEUE_HEAD(oom_victims_wait);
 478
 479static bool oom_killer_disabled __read_mostly;
 480
 481#define K(x) ((x) << (PAGE_SHIFT-10))
 482
 483/*
 484 * task->mm can be NULL if the task is the exited group leader.  So to
 485 * determine whether the task is using a particular mm, we examine all the
 486 * task's threads: if one of those is using this mm then this task was also
 487 * using it.
 488 */
 489bool process_shares_mm(struct task_struct *p, struct mm_struct *mm)
 490{
 491	struct task_struct *t;
 492
 493	for_each_thread(p, t) {
 494		struct mm_struct *t_mm = READ_ONCE(t->mm);
 495		if (t_mm)
 496			return t_mm == mm;
 497	}
 498	return false;
 499}
 500
 
 501#ifdef CONFIG_MMU
 502/*
 503 * OOM Reaper kernel thread which tries to reap the memory used by the OOM
 504 * victim (if that is possible) to help the OOM killer to move on.
 505 */
 506static struct task_struct *oom_reaper_th;
 507static DECLARE_WAIT_QUEUE_HEAD(oom_reaper_wait);
 508static struct task_struct *oom_reaper_list;
 509static DEFINE_SPINLOCK(oom_reaper_lock);
 510
 511bool __oom_reap_task_mm(struct mm_struct *mm)
 512{
 
 513	struct vm_area_struct *vma;
 
 
 514	bool ret = true;
 515
 516	/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 517	 * Tell all users of get_user/copy_from_user etc... that the content
 518	 * is no longer stable. No barriers really needed because unmapping
 519	 * should imply barriers already and the reader would hit a page fault
 520	 * if it stumbled over a reaped memory.
 521	 */
 522	set_bit(MMF_UNSTABLE, &mm->flags);
 523
 
 524	for (vma = mm->mmap ; vma; vma = vma->vm_next) {
 525		if (!can_madv_lru_vma(vma))
 
 
 
 
 
 
 
 526			continue;
 527
 528		/*
 529		 * Only anonymous pages have a good chance to be dropped
 530		 * without additional steps which we cannot afford as we
 531		 * are OOM already.
 532		 *
 533		 * We do not even care about fs backed pages because all
 534		 * which are reclaimable have already been reclaimed and
 535		 * we do not want to block exit_mmap by keeping mm ref
 536		 * count elevated without a good reason.
 537		 */
 538		if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED)) {
 539			struct mmu_notifier_range range;
 540			struct mmu_gather tlb;
 541
 542			mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0,
 543						vma, mm, vma->vm_start,
 544						vma->vm_end);
 545			tlb_gather_mmu(&tlb, mm, range.start, range.end);
 546			if (mmu_notifier_invalidate_range_start_nonblock(&range)) {
 547				tlb_finish_mmu(&tlb, range.start, range.end);
 548				ret = false;
 549				continue;
 550			}
 551			unmap_page_range(&tlb, vma, range.start, range.end, NULL);
 552			mmu_notifier_invalidate_range_end(&range);
 553			tlb_finish_mmu(&tlb, range.start, range.end);
 554		}
 555	}
 556
 557	return ret;
 558}
 559
 560/*
 561 * Reaps the address space of the give task.
 562 *
 563 * Returns true on success and false if none or part of the address space
 564 * has been reclaimed and the caller should retry later.
 565 */
 566static bool oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
 567{
 568	bool ret = true;
 569
 570	if (!mmap_read_trylock(mm)) {
 571		trace_skip_task_reaping(tsk->pid);
 572		return false;
 573	}
 574
 575	/*
 576	 * MMF_OOM_SKIP is set by exit_mmap when the OOM reaper can't
 577	 * work on the mm anymore. The check for MMF_OOM_SKIP must run
 578	 * under mmap_lock for reading because it serializes against the
 579	 * mmap_write_lock();mmap_write_unlock() cycle in exit_mmap().
 580	 */
 581	if (test_bit(MMF_OOM_SKIP, &mm->flags)) {
 582		trace_skip_task_reaping(tsk->pid);
 583		goto out_unlock;
 584	}
 585
 586	trace_start_task_reaping(tsk->pid);
 587
 588	/* failed to reap part of the address space. Try again later */
 589	ret = __oom_reap_task_mm(mm);
 590	if (!ret)
 591		goto out_finish;
 592
 593	pr_info("oom_reaper: reaped process %d (%s), now anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
 594			task_pid_nr(tsk), tsk->comm,
 595			K(get_mm_counter(mm, MM_ANONPAGES)),
 596			K(get_mm_counter(mm, MM_FILEPAGES)),
 597			K(get_mm_counter(mm, MM_SHMEMPAGES)));
 598out_finish:
 599	trace_finish_task_reaping(tsk->pid);
 600out_unlock:
 601	mmap_read_unlock(mm);
 602
 
 
 
 
 
 
 
 
 603	return ret;
 604}
 605
 606#define MAX_OOM_REAP_RETRIES 10
 607static void oom_reap_task(struct task_struct *tsk)
 608{
 609	int attempts = 0;
 610	struct mm_struct *mm = tsk->signal->oom_mm;
 611
 612	/* Retry the mmap_read_trylock(mm) a few times */
 613	while (attempts++ < MAX_OOM_REAP_RETRIES && !oom_reap_task_mm(tsk, mm))
 614		schedule_timeout_idle(HZ/10);
 615
 616	if (attempts <= MAX_OOM_REAP_RETRIES ||
 617	    test_bit(MMF_OOM_SKIP, &mm->flags))
 618		goto done;
 619
 
 620	pr_info("oom_reaper: unable to reap pid:%d (%s)\n",
 621		task_pid_nr(tsk), tsk->comm);
 622	sched_show_task(tsk);
 623	debug_show_all_locks();
 624
 625done:
 626	tsk->oom_reaper_list = NULL;
 627
 628	/*
 629	 * Hide this mm from OOM killer because it has been either reaped or
 630	 * somebody can't call mmap_write_unlock(mm).
 631	 */
 632	set_bit(MMF_OOM_SKIP, &mm->flags);
 633
 634	/* Drop a reference taken by wake_oom_reaper */
 635	put_task_struct(tsk);
 636}
 637
 638static int oom_reaper(void *unused)
 639{
 640	while (true) {
 641		struct task_struct *tsk = NULL;
 642
 643		wait_event_freezable(oom_reaper_wait, oom_reaper_list != NULL);
 644		spin_lock(&oom_reaper_lock);
 645		if (oom_reaper_list != NULL) {
 646			tsk = oom_reaper_list;
 647			oom_reaper_list = tsk->oom_reaper_list;
 648		}
 649		spin_unlock(&oom_reaper_lock);
 650
 651		if (tsk)
 652			oom_reap_task(tsk);
 653	}
 654
 655	return 0;
 656}
 657
 658static void wake_oom_reaper(struct task_struct *tsk)
 659{
 660	/* mm is already queued? */
 661	if (test_and_set_bit(MMF_OOM_REAP_QUEUED, &tsk->signal->oom_mm->flags))
 
 
 
 662		return;
 663
 664	get_task_struct(tsk);
 665
 666	spin_lock(&oom_reaper_lock);
 667	tsk->oom_reaper_list = oom_reaper_list;
 668	oom_reaper_list = tsk;
 669	spin_unlock(&oom_reaper_lock);
 670	trace_wake_reaper(tsk->pid);
 671	wake_up(&oom_reaper_wait);
 672}
 673
 674static int __init oom_init(void)
 675{
 676	oom_reaper_th = kthread_run(oom_reaper, NULL, "oom_reaper");
 
 
 
 
 
 677	return 0;
 678}
 679subsys_initcall(oom_init)
 680#else
 681static inline void wake_oom_reaper(struct task_struct *tsk)
 682{
 683}
 684#endif /* CONFIG_MMU */
 685
 686/**
 687 * mark_oom_victim - mark the given task as OOM victim
 688 * @tsk: task to mark
 689 *
 690 * Has to be called with oom_lock held and never after
 691 * oom has been disabled already.
 692 *
 693 * tsk->mm has to be non NULL and caller has to guarantee it is stable (either
 694 * under task_lock or operate on the current).
 695 */
 696static void mark_oom_victim(struct task_struct *tsk)
 697{
 698	struct mm_struct *mm = tsk->mm;
 699
 700	WARN_ON(oom_killer_disabled);
 701	/* OOM killer might race with memcg OOM */
 702	if (test_and_set_tsk_thread_flag(tsk, TIF_MEMDIE))
 703		return;
 704
 705	/* oom_mm is bound to the signal struct life time. */
 706	if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm)) {
 707		mmgrab(tsk->signal->oom_mm);
 708		set_bit(MMF_OOM_VICTIM, &mm->flags);
 709	}
 710
 711	/*
 712	 * Make sure that the task is woken up from uninterruptible sleep
 713	 * if it is frozen because OOM killer wouldn't be able to free
 714	 * any memory and livelock. freezing_slow_path will tell the freezer
 715	 * that TIF_MEMDIE tasks should be ignored.
 716	 */
 717	__thaw_task(tsk);
 718	atomic_inc(&oom_victims);
 719	trace_mark_victim(tsk->pid);
 720}
 721
 722/**
 723 * exit_oom_victim - note the exit of an OOM victim
 724 */
 725void exit_oom_victim(void)
 726{
 727	clear_thread_flag(TIF_MEMDIE);
 728
 729	if (!atomic_dec_return(&oom_victims))
 730		wake_up_all(&oom_victims_wait);
 731}
 732
 733/**
 734 * oom_killer_enable - enable OOM killer
 735 */
 736void oom_killer_enable(void)
 737{
 738	oom_killer_disabled = false;
 739	pr_info("OOM killer enabled.\n");
 740}
 741
 742/**
 743 * oom_killer_disable - disable OOM killer
 744 * @timeout: maximum timeout to wait for oom victims in jiffies
 745 *
 746 * Forces all page allocations to fail rather than trigger OOM killer.
 747 * Will block and wait until all OOM victims are killed or the given
 748 * timeout expires.
 749 *
 750 * The function cannot be called when there are runnable user tasks because
 751 * the userspace would see unexpected allocation failures as a result. Any
 752 * new usage of this function should be consulted with MM people.
 753 *
 754 * Returns true if successful and false if the OOM killer cannot be
 755 * disabled.
 756 */
 757bool oom_killer_disable(signed long timeout)
 758{
 759	signed long ret;
 760
 761	/*
 762	 * Make sure to not race with an ongoing OOM killer. Check that the
 763	 * current is not killed (possibly due to sharing the victim's memory).
 764	 */
 765	if (mutex_lock_killable(&oom_lock))
 766		return false;
 767	oom_killer_disabled = true;
 768	mutex_unlock(&oom_lock);
 769
 770	ret = wait_event_interruptible_timeout(oom_victims_wait,
 771			!atomic_read(&oom_victims), timeout);
 772	if (ret <= 0) {
 773		oom_killer_enable();
 774		return false;
 775	}
 776	pr_info("OOM killer disabled.\n");
 777
 778	return true;
 779}
 780
 781static inline bool __task_will_free_mem(struct task_struct *task)
 782{
 783	struct signal_struct *sig = task->signal;
 784
 785	/*
 786	 * A coredumping process may sleep for an extended period in exit_mm(),
 787	 * so the oom killer cannot assume that the process will promptly exit
 788	 * and release memory.
 789	 */
 790	if (sig->flags & SIGNAL_GROUP_COREDUMP)
 791		return false;
 792
 793	if (sig->flags & SIGNAL_GROUP_EXIT)
 794		return true;
 795
 796	if (thread_group_empty(task) && (task->flags & PF_EXITING))
 797		return true;
 798
 799	return false;
 800}
 801
 802/*
 803 * Checks whether the given task is dying or exiting and likely to
 804 * release its address space. This means that all threads and processes
 805 * sharing the same mm have to be killed or exiting.
 806 * Caller has to make sure that task->mm is stable (hold task_lock or
 807 * it operates on the current).
 808 */
 809static bool task_will_free_mem(struct task_struct *task)
 810{
 811	struct mm_struct *mm = task->mm;
 812	struct task_struct *p;
 813	bool ret = true;
 814
 815	/*
 816	 * Skip tasks without mm because it might have passed its exit_mm and
 817	 * exit_oom_victim. oom_reaper could have rescued that but do not rely
 818	 * on that for now. We can consider find_lock_task_mm in future.
 819	 */
 820	if (!mm)
 821		return false;
 822
 823	if (!__task_will_free_mem(task))
 824		return false;
 825
 826	/*
 827	 * This task has already been drained by the oom reaper so there are
 828	 * only small chances it will free some more
 829	 */
 830	if (test_bit(MMF_OOM_SKIP, &mm->flags))
 831		return false;
 832
 833	if (atomic_read(&mm->mm_users) <= 1)
 834		return true;
 835
 836	/*
 837	 * Make sure that all tasks which share the mm with the given tasks
 838	 * are dying as well to make sure that a) nobody pins its mm and
 839	 * b) the task is also reapable by the oom reaper.
 840	 */
 841	rcu_read_lock();
 842	for_each_process(p) {
 843		if (!process_shares_mm(p, mm))
 844			continue;
 845		if (same_thread_group(task, p))
 846			continue;
 847		ret = __task_will_free_mem(p);
 848		if (!ret)
 849			break;
 850	}
 851	rcu_read_unlock();
 852
 853	return ret;
 854}
 855
 856static void __oom_kill_process(struct task_struct *victim, const char *message)
 857{
 858	struct task_struct *p;
 
 
 
 
 859	struct mm_struct *mm;
 
 
 
 860	bool can_oom_reap = true;
 861
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 862	p = find_lock_task_mm(victim);
 863	if (!p) {
 864		pr_info("%s: OOM victim %d (%s) is already exiting. Skip killing the task\n",
 865			message, task_pid_nr(victim), victim->comm);
 866		put_task_struct(victim);
 867		return;
 868	} else if (victim != p) {
 869		get_task_struct(p);
 870		put_task_struct(victim);
 871		victim = p;
 872	}
 873
 874	/* Get a reference to safely compare mm after task_unlock(victim) */
 875	mm = victim->mm;
 876	mmgrab(mm);
 877
 878	/* Raise event before sending signal: task reaper must see this */
 879	count_vm_event(OOM_KILL);
 880	memcg_memory_event_mm(mm, MEMCG_OOM_KILL);
 881
 882	/*
 883	 * We should send SIGKILL before granting access to memory reserves
 884	 * in order to prevent the OOM victim from depleting the memory
 885	 * reserves from the user space under its control.
 886	 */
 887	do_send_sig_info(SIGKILL, SEND_SIG_PRIV, victim, PIDTYPE_TGID);
 888	mark_oom_victim(victim);
 889	pr_err("%s: Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB, UID:%u pgtables:%lukB oom_score_adj:%hd\n",
 890		message, task_pid_nr(victim), victim->comm, K(mm->total_vm),
 891		K(get_mm_counter(mm, MM_ANONPAGES)),
 892		K(get_mm_counter(mm, MM_FILEPAGES)),
 893		K(get_mm_counter(mm, MM_SHMEMPAGES)),
 894		from_kuid(&init_user_ns, task_uid(victim)),
 895		mm_pgtables_bytes(mm) >> 10, victim->signal->oom_score_adj);
 896	task_unlock(victim);
 897
 898	/*
 899	 * Kill all user processes sharing victim->mm in other thread groups, if
 900	 * any.  They don't get access to memory reserves, though, to avoid
 901	 * depletion of all memory.  This prevents mm->mmap_lock livelock when an
 902	 * oom killed thread cannot exit because it requires the semaphore and
 903	 * its contended by another thread trying to allocate memory itself.
 904	 * That thread will now get access to memory reserves since it has a
 905	 * pending fatal signal.
 906	 */
 907	rcu_read_lock();
 908	for_each_process(p) {
 909		if (!process_shares_mm(p, mm))
 910			continue;
 911		if (same_thread_group(p, victim))
 912			continue;
 913		if (is_global_init(p)) {
 914			can_oom_reap = false;
 915			set_bit(MMF_OOM_SKIP, &mm->flags);
 916			pr_info("oom killer %d (%s) has mm pinned by %d (%s)\n",
 917					task_pid_nr(victim), victim->comm,
 918					task_pid_nr(p), p->comm);
 919			continue;
 920		}
 921		/*
 922		 * No kthead_use_mm() user needs to read from the userspace so
 923		 * we are ok to reap it.
 924		 */
 925		if (unlikely(p->flags & PF_KTHREAD))
 926			continue;
 927		do_send_sig_info(SIGKILL, SEND_SIG_PRIV, p, PIDTYPE_TGID);
 928	}
 929	rcu_read_unlock();
 930
 931	if (can_oom_reap)
 932		wake_oom_reaper(victim);
 933
 934	mmdrop(mm);
 935	put_task_struct(victim);
 936}
 937#undef K
 938
 939/*
 940 * Kill provided task unless it's secured by setting
 941 * oom_score_adj to OOM_SCORE_ADJ_MIN.
 942 */
 943static int oom_kill_memcg_member(struct task_struct *task, void *message)
 944{
 945	if (task->signal->oom_score_adj != OOM_SCORE_ADJ_MIN &&
 946	    !is_global_init(task)) {
 947		get_task_struct(task);
 948		__oom_kill_process(task, message);
 949	}
 950	return 0;
 951}
 952
 953static void oom_kill_process(struct oom_control *oc, const char *message)
 954{
 955	struct task_struct *victim = oc->chosen;
 956	struct mem_cgroup *oom_group;
 957	static DEFINE_RATELIMIT_STATE(oom_rs, DEFAULT_RATELIMIT_INTERVAL,
 958					      DEFAULT_RATELIMIT_BURST);
 959
 960	/*
 961	 * If the task is already exiting, don't alarm the sysadmin or kill
 962	 * its children or threads, just give it access to memory reserves
 963	 * so it can die quickly
 964	 */
 965	task_lock(victim);
 966	if (task_will_free_mem(victim)) {
 967		mark_oom_victim(victim);
 968		wake_oom_reaper(victim);
 969		task_unlock(victim);
 970		put_task_struct(victim);
 971		return;
 972	}
 973	task_unlock(victim);
 974
 975	if (__ratelimit(&oom_rs))
 976		dump_header(oc, victim);
 977
 978	/*
 979	 * Do we need to kill the entire memory cgroup?
 980	 * Or even one of the ancestor memory cgroups?
 981	 * Check this out before killing the victim task.
 982	 */
 983	oom_group = mem_cgroup_get_oom_group(victim, oc->memcg);
 984
 985	__oom_kill_process(victim, message);
 986
 987	/*
 988	 * If necessary, kill all tasks in the selected memory cgroup.
 989	 */
 990	if (oom_group) {
 991		mem_cgroup_print_oom_group(oom_group);
 992		mem_cgroup_scan_tasks(oom_group, oom_kill_memcg_member,
 993				      (void*)message);
 994		mem_cgroup_put(oom_group);
 995	}
 996}
 997
 998/*
 999 * Determines whether the kernel must panic because of the panic_on_oom sysctl.
1000 */
1001static void check_panic_on_oom(struct oom_control *oc)
 
1002{
1003	if (likely(!sysctl_panic_on_oom))
1004		return;
1005	if (sysctl_panic_on_oom != 2) {
1006		/*
1007		 * panic_on_oom == 1 only affects CONSTRAINT_NONE, the kernel
1008		 * does not panic for cpuset, mempolicy, or memcg allocation
1009		 * failures.
1010		 */
1011		if (oc->constraint != CONSTRAINT_NONE)
1012			return;
1013	}
1014	/* Do not panic for oom kills triggered by sysrq */
1015	if (is_sysrq_oom(oc))
1016		return;
1017	dump_header(oc, NULL);
1018	panic("Out of memory: %s panic_on_oom is enabled\n",
1019		sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide");
1020}
1021
1022static BLOCKING_NOTIFIER_HEAD(oom_notify_list);
1023
1024int register_oom_notifier(struct notifier_block *nb)
1025{
1026	return blocking_notifier_chain_register(&oom_notify_list, nb);
1027}
1028EXPORT_SYMBOL_GPL(register_oom_notifier);
1029
1030int unregister_oom_notifier(struct notifier_block *nb)
1031{
1032	return blocking_notifier_chain_unregister(&oom_notify_list, nb);
1033}
1034EXPORT_SYMBOL_GPL(unregister_oom_notifier);
1035
1036/**
1037 * out_of_memory - kill the "best" process when we run out of memory
1038 * @oc: pointer to struct oom_control
1039 *
1040 * If we run out of memory, we have the choice between either
1041 * killing a random task (bad), letting the system crash (worse)
1042 * OR try to be smart about which process to kill. Note that we
1043 * don't have to be perfect here, we just have to be good.
1044 */
1045bool out_of_memory(struct oom_control *oc)
1046{
1047	unsigned long freed = 0;
 
1048
1049	if (oom_killer_disabled)
1050		return false;
1051
1052	if (!is_memcg_oom(oc)) {
1053		blocking_notifier_call_chain(&oom_notify_list, 0, &freed);
1054		if (freed > 0)
1055			/* Got some memory back in the last second. */
1056			return true;
1057	}
1058
1059	/*
1060	 * If current has a pending SIGKILL or is exiting, then automatically
1061	 * select it.  The goal is to allow it to allocate so that it may
1062	 * quickly exit and free its memory.
1063	 */
1064	if (task_will_free_mem(current)) {
1065		mark_oom_victim(current);
1066		wake_oom_reaper(current);
1067		return true;
1068	}
1069
1070	/*
1071	 * The OOM killer does not compensate for IO-less reclaim.
1072	 * pagefault_out_of_memory lost its gfp context so we have to
1073	 * make sure exclude 0 mask - all other users should have at least
1074	 * ___GFP_DIRECT_RECLAIM to get here. But mem_cgroup_oom() has to
1075	 * invoke the OOM killer even if it is a GFP_NOFS allocation.
1076	 */
1077	if (oc->gfp_mask && !(oc->gfp_mask & __GFP_FS) && !is_memcg_oom(oc))
1078		return true;
1079
1080	/*
1081	 * Check if there were limitations on the allocation (only relevant for
1082	 * NUMA and memcg) that may require different handling.
1083	 */
1084	oc->constraint = constrained_alloc(oc);
1085	if (oc->constraint != CONSTRAINT_MEMORY_POLICY)
1086		oc->nodemask = NULL;
1087	check_panic_on_oom(oc);
1088
1089	if (!is_memcg_oom(oc) && sysctl_oom_kill_allocating_task &&
1090	    current->mm && !oom_unkillable_task(current) &&
1091	    oom_cpuset_eligible(current, oc) &&
1092	    current->signal->oom_score_adj != OOM_SCORE_ADJ_MIN) {
1093		get_task_struct(current);
1094		oc->chosen = current;
1095		oom_kill_process(oc, "Out of memory (oom_kill_allocating_task)");
1096		return true;
1097	}
1098
1099	select_bad_process(oc);
1100	/* Found nothing?!?! */
1101	if (!oc->chosen) {
1102		dump_header(oc, NULL);
1103		pr_warn("Out of memory and no killable processes...\n");
1104		/*
1105		 * If we got here due to an actual allocation at the
1106		 * system level, we cannot survive this and will enter
1107		 * an endless loop in the allocator. Bail out now.
1108		 */
1109		if (!is_sysrq_oom(oc) && !is_memcg_oom(oc))
1110			panic("System is deadlocked on memory\n");
1111	}
1112	if (oc->chosen && oc->chosen != (void *)-1UL)
1113		oom_kill_process(oc, !is_memcg_oom(oc) ? "Out of memory" :
1114				 "Memory cgroup out of memory");
 
 
 
 
 
 
1115	return !!oc->chosen;
1116}
1117
1118/*
1119 * The pagefault handler calls here because it is out of memory, so kill a
1120 * memory-hogging task. If oom_lock is held by somebody else, a parallel oom
1121 * killing is already in progress so do nothing.
1122 */
1123void pagefault_out_of_memory(void)
1124{
1125	struct oom_control oc = {
1126		.zonelist = NULL,
1127		.nodemask = NULL,
1128		.memcg = NULL,
1129		.gfp_mask = 0,
1130		.order = 0,
1131	};
1132
1133	if (mem_cgroup_oom_synchronize(true))
1134		return;
1135
1136	if (!mutex_trylock(&oom_lock))
1137		return;
1138	out_of_memory(&oc);
1139	mutex_unlock(&oom_lock);
1140}
v4.10.11
 
   1/*
   2 *  linux/mm/oom_kill.c
   3 * 
   4 *  Copyright (C)  1998,2000  Rik van Riel
   5 *	Thanks go out to Claus Fischer for some serious inspiration and
   6 *	for goading me into coding this file...
   7 *  Copyright (C)  2010  Google, Inc.
   8 *	Rewritten by David Rientjes
   9 *
  10 *  The routines in this file are used to kill a process when
  11 *  we're seriously out of memory. This gets called from __alloc_pages()
  12 *  in mm/page_alloc.c when we really run out of memory.
  13 *
  14 *  Since we won't call these routines often (on a well-configured
  15 *  machine) this file will double as a 'coding guide' and a signpost
  16 *  for newbie kernel hackers. It features several pointers to major
  17 *  kernel subsystems and hints as to where to find out what things do.
  18 */
  19
  20#include <linux/oom.h>
  21#include <linux/mm.h>
  22#include <linux/err.h>
  23#include <linux/gfp.h>
  24#include <linux/sched.h>
 
 
 
 
  25#include <linux/swap.h>
  26#include <linux/timex.h>
  27#include <linux/jiffies.h>
  28#include <linux/cpuset.h>
  29#include <linux/export.h>
  30#include <linux/notifier.h>
  31#include <linux/memcontrol.h>
  32#include <linux/mempolicy.h>
  33#include <linux/security.h>
  34#include <linux/ptrace.h>
  35#include <linux/freezer.h>
  36#include <linux/ftrace.h>
  37#include <linux/ratelimit.h>
  38#include <linux/kthread.h>
  39#include <linux/init.h>
 
  40
  41#include <asm/tlb.h>
  42#include "internal.h"
 
  43
  44#define CREATE_TRACE_POINTS
  45#include <trace/events/oom.h>
  46
  47int sysctl_panic_on_oom;
  48int sysctl_oom_kill_allocating_task;
  49int sysctl_oom_dump_tasks = 1;
  50
 
 
 
 
 
 
 
 
  51DEFINE_MUTEX(oom_lock);
  52
 
 
 
 
 
  53#ifdef CONFIG_NUMA
  54/**
  55 * has_intersects_mems_allowed() - check task eligiblity for kill
  56 * @start: task struct of which task to consider
  57 * @mask: nodemask passed to page allocator for mempolicy ooms
  58 *
  59 * Task eligibility is determined by whether or not a candidate task, @tsk,
  60 * shares the same mempolicy nodes as current if it is bound by such a policy
  61 * and whether or not it has the same set of allowed cpuset nodes.
 
 
 
  62 */
  63static bool has_intersects_mems_allowed(struct task_struct *start,
  64					const nodemask_t *mask)
  65{
  66	struct task_struct *tsk;
  67	bool ret = false;
 
 
 
 
  68
  69	rcu_read_lock();
  70	for_each_thread(start, tsk) {
  71		if (mask) {
  72			/*
  73			 * If this is a mempolicy constrained oom, tsk's
  74			 * cpuset is irrelevant.  Only return true if its
  75			 * mempolicy intersects current, otherwise it may be
  76			 * needlessly killed.
  77			 */
  78			ret = mempolicy_nodemask_intersects(tsk, mask);
  79		} else {
  80			/*
  81			 * This is not a mempolicy constrained oom, so only
  82			 * check the mems of tsk's cpuset.
  83			 */
  84			ret = cpuset_mems_allowed_intersects(current, tsk);
  85		}
  86		if (ret)
  87			break;
  88	}
  89	rcu_read_unlock();
  90
  91	return ret;
  92}
  93#else
  94static bool has_intersects_mems_allowed(struct task_struct *tsk,
  95					const nodemask_t *mask)
  96{
  97	return true;
  98}
  99#endif /* CONFIG_NUMA */
 100
 101/*
 102 * The process p may have detached its own ->mm while exiting or through
 103 * use_mm(), but one or more of its subthreads may still have a valid
 104 * pointer.  Return p, or any of its subthreads with a valid ->mm, with
 105 * task_lock() held.
 106 */
 107struct task_struct *find_lock_task_mm(struct task_struct *p)
 108{
 109	struct task_struct *t;
 110
 111	rcu_read_lock();
 112
 113	for_each_thread(p, t) {
 114		task_lock(t);
 115		if (likely(t->mm))
 116			goto found;
 117		task_unlock(t);
 118	}
 119	t = NULL;
 120found:
 121	rcu_read_unlock();
 122
 123	return t;
 124}
 125
 126/*
 127 * order == -1 means the oom kill is required by sysrq, otherwise only
 128 * for display purposes.
 129 */
 130static inline bool is_sysrq_oom(struct oom_control *oc)
 131{
 132	return oc->order == -1;
 133}
 134
 135static inline bool is_memcg_oom(struct oom_control *oc)
 136{
 137	return oc->memcg != NULL;
 138}
 139
 140/* return true if the task is not adequate as candidate victim task. */
 141static bool oom_unkillable_task(struct task_struct *p,
 142		struct mem_cgroup *memcg, const nodemask_t *nodemask)
 143{
 144	if (is_global_init(p))
 145		return true;
 146	if (p->flags & PF_KTHREAD)
 147		return true;
 
 
 148
 149	/* When mem_cgroup_out_of_memory() and p is not member of the group */
 150	if (memcg && !task_in_mem_cgroup(p, memcg))
 151		return true;
 
 
 
 
 152
 153	/* p may not have freeable memory in nodemask */
 154	if (!has_intersects_mems_allowed(p, nodemask))
 155		return true;
 
 
 
 
 156
 157	return false;
 158}
 159
 160/**
 161 * oom_badness - heuristic function to determine which candidate task to kill
 162 * @p: task struct of which task we should calculate
 163 * @totalpages: total present RAM allowed for page allocation
 164 *
 165 * The heuristic for determining which task to kill is made to be as simple and
 166 * predictable as possible.  The goal is to return the highest value for the
 167 * task consuming the most memory to avoid subsequent oom failures.
 168 */
 169unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
 170			  const nodemask_t *nodemask, unsigned long totalpages)
 171{
 172	long points;
 173	long adj;
 174
 175	if (oom_unkillable_task(p, memcg, nodemask))
 176		return 0;
 177
 178	p = find_lock_task_mm(p);
 179	if (!p)
 180		return 0;
 181
 182	/*
 183	 * Do not even consider tasks which are explicitly marked oom
 184	 * unkillable or have been already oom reaped or the are in
 185	 * the middle of vfork
 186	 */
 187	adj = (long)p->signal->oom_score_adj;
 188	if (adj == OOM_SCORE_ADJ_MIN ||
 189			test_bit(MMF_OOM_SKIP, &p->mm->flags) ||
 190			in_vfork(p)) {
 191		task_unlock(p);
 192		return 0;
 193	}
 194
 195	/*
 196	 * The baseline for the badness score is the proportion of RAM that each
 197	 * task's rss, pagetable and swap space use.
 198	 */
 199	points = get_mm_rss(p->mm) + get_mm_counter(p->mm, MM_SWAPENTS) +
 200		atomic_long_read(&p->mm->nr_ptes) + mm_nr_pmds(p->mm);
 201	task_unlock(p);
 202
 203	/*
 204	 * Root processes get 3% bonus, just like the __vm_enough_memory()
 205	 * implementation used by LSMs.
 206	 */
 207	if (has_capability_noaudit(p, CAP_SYS_ADMIN))
 208		points -= (points * 3) / 100;
 209
 210	/* Normalize to oom_score_adj units */
 211	adj *= totalpages / 1000;
 212	points += adj;
 213
 214	/*
 215	 * Never return 0 for an eligible task regardless of the root bonus and
 216	 * oom_score_adj (oom_score_adj can't be OOM_SCORE_ADJ_MIN here).
 217	 */
 218	return points > 0 ? points : 1;
 219}
 220
 221enum oom_constraint {
 222	CONSTRAINT_NONE,
 223	CONSTRAINT_CPUSET,
 224	CONSTRAINT_MEMORY_POLICY,
 225	CONSTRAINT_MEMCG,
 226};
 227
 228/*
 229 * Determine the type of allocation constraint.
 230 */
 231static enum oom_constraint constrained_alloc(struct oom_control *oc)
 232{
 233	struct zone *zone;
 234	struct zoneref *z;
 235	enum zone_type high_zoneidx = gfp_zone(oc->gfp_mask);
 236	bool cpuset_limited = false;
 237	int nid;
 238
 239	if (is_memcg_oom(oc)) {
 240		oc->totalpages = mem_cgroup_get_limit(oc->memcg) ?: 1;
 241		return CONSTRAINT_MEMCG;
 242	}
 243
 244	/* Default to all available memory */
 245	oc->totalpages = totalram_pages + total_swap_pages;
 246
 247	if (!IS_ENABLED(CONFIG_NUMA))
 248		return CONSTRAINT_NONE;
 249
 250	if (!oc->zonelist)
 251		return CONSTRAINT_NONE;
 252	/*
 253	 * Reach here only when __GFP_NOFAIL is used. So, we should avoid
 254	 * to kill current.We have to random task kill in this case.
 255	 * Hopefully, CONSTRAINT_THISNODE...but no way to handle it, now.
 256	 */
 257	if (oc->gfp_mask & __GFP_THISNODE)
 258		return CONSTRAINT_NONE;
 259
 260	/*
 261	 * This is not a __GFP_THISNODE allocation, so a truncated nodemask in
 262	 * the page allocator means a mempolicy is in effect.  Cpuset policy
 263	 * is enforced in get_page_from_freelist().
 264	 */
 265	if (oc->nodemask &&
 266	    !nodes_subset(node_states[N_MEMORY], *oc->nodemask)) {
 267		oc->totalpages = total_swap_pages;
 268		for_each_node_mask(nid, *oc->nodemask)
 269			oc->totalpages += node_spanned_pages(nid);
 270		return CONSTRAINT_MEMORY_POLICY;
 271	}
 272
 273	/* Check this allocation failure is caused by cpuset's wall function */
 274	for_each_zone_zonelist_nodemask(zone, z, oc->zonelist,
 275			high_zoneidx, oc->nodemask)
 276		if (!cpuset_zone_allowed(zone, oc->gfp_mask))
 277			cpuset_limited = true;
 278
 279	if (cpuset_limited) {
 280		oc->totalpages = total_swap_pages;
 281		for_each_node_mask(nid, cpuset_current_mems_allowed)
 282			oc->totalpages += node_spanned_pages(nid);
 283		return CONSTRAINT_CPUSET;
 284	}
 285	return CONSTRAINT_NONE;
 286}
 287
 288static int oom_evaluate_task(struct task_struct *task, void *arg)
 289{
 290	struct oom_control *oc = arg;
 291	unsigned long points;
 292
 293	if (oom_unkillable_task(task, NULL, oc->nodemask))
 
 
 
 
 294		goto next;
 295
 296	/*
 297	 * This task already has access to memory reserves and is being killed.
 298	 * Don't allow any other task to have access to the reserves unless
 299	 * the task has MMF_OOM_SKIP because chances that it would release
 300	 * any memory is quite low.
 301	 */
 302	if (!is_sysrq_oom(oc) && tsk_is_oom_victim(task)) {
 303		if (test_bit(MMF_OOM_SKIP, &task->signal->oom_mm->flags))
 304			goto next;
 305		goto abort;
 306	}
 307
 308	/*
 309	 * If task is allocating a lot of memory and has been marked to be
 310	 * killed first if it triggers an oom, then select it.
 311	 */
 312	if (oom_task_origin(task)) {
 313		points = ULONG_MAX;
 314		goto select;
 315	}
 316
 317	points = oom_badness(task, NULL, oc->nodemask, oc->totalpages);
 318	if (!points || points < oc->chosen_points)
 319		goto next;
 320
 321	/* Prefer thread group leaders for display purposes */
 322	if (points == oc->chosen_points && thread_group_leader(oc->chosen))
 323		goto next;
 324select:
 325	if (oc->chosen)
 326		put_task_struct(oc->chosen);
 327	get_task_struct(task);
 328	oc->chosen = task;
 329	oc->chosen_points = points;
 330next:
 331	return 0;
 332abort:
 333	if (oc->chosen)
 334		put_task_struct(oc->chosen);
 335	oc->chosen = (void *)-1UL;
 336	return 1;
 337}
 338
 339/*
 340 * Simple selection loop. We choose the process with the highest number of
 341 * 'points'. In case scan was aborted, oc->chosen is set to -1.
 342 */
 343static void select_bad_process(struct oom_control *oc)
 344{
 
 
 345	if (is_memcg_oom(oc))
 346		mem_cgroup_scan_tasks(oc->memcg, oom_evaluate_task, oc);
 347	else {
 348		struct task_struct *p;
 349
 350		rcu_read_lock();
 351		for_each_process(p)
 352			if (oom_evaluate_task(p, oc))
 353				break;
 354		rcu_read_unlock();
 355	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 356
 357	oc->chosen_points = oc->chosen_points * 1000 / oc->totalpages;
 
 
 
 
 
 
 
 
 358}
 359
 360/**
 361 * dump_tasks - dump current memory state of all system tasks
 362 * @memcg: current's memory controller, if constrained
 363 * @nodemask: nodemask passed to page allocator for mempolicy ooms
 364 *
 365 * Dumps the current memory state of all eligible tasks.  Tasks not in the same
 366 * memcg, not in the same cpuset, or bound to a disjoint set of mempolicy nodes
 367 * are not shown.
 368 * State information includes task's pid, uid, tgid, vm size, rss, nr_ptes,
 369 * swapents, oom_score_adj value, and name.
 370 */
 371static void dump_tasks(struct mem_cgroup *memcg, const nodemask_t *nodemask)
 372{
 373	struct task_struct *p;
 374	struct task_struct *task;
 375
 376	pr_info("[ pid ]   uid  tgid total_vm      rss nr_ptes nr_pmds swapents oom_score_adj name\n");
 377	rcu_read_lock();
 378	for_each_process(p) {
 379		if (oom_unkillable_task(p, memcg, nodemask))
 380			continue;
 381
 382		task = find_lock_task_mm(p);
 383		if (!task) {
 384			/*
 385			 * This is a kthread or all of p's threads have already
 386			 * detached their mm's.  There's no need to report
 387			 * them; they can't be oom killed anyway.
 388			 */
 389			continue;
 390		}
 391
 392		pr_info("[%5d] %5d %5d %8lu %8lu %7ld %7ld %8lu         %5hd %s\n",
 393			task->pid, from_kuid(&init_user_ns, task_uid(task)),
 394			task->tgid, task->mm->total_vm, get_mm_rss(task->mm),
 395			atomic_long_read(&task->mm->nr_ptes),
 396			mm_nr_pmds(task->mm),
 397			get_mm_counter(task->mm, MM_SWAPENTS),
 398			task->signal->oom_score_adj, task->comm);
 399		task_unlock(task);
 400	}
 401	rcu_read_unlock();
 402}
 403
 404static void dump_header(struct oom_control *oc, struct task_struct *p)
 405{
 406	nodemask_t *nm = (oc->nodemask) ? oc->nodemask : &cpuset_current_mems_allowed;
 407
 408	pr_warn("%s invoked oom-killer: gfp_mask=%#x(%pGg), nodemask=%*pbl, order=%d, oom_score_adj=%hd\n",
 409		current->comm, oc->gfp_mask, &oc->gfp_mask,
 410		nodemask_pr_args(nm), oc->order,
 411		current->signal->oom_score_adj);
 412	if (!IS_ENABLED(CONFIG_COMPACTION) && oc->order)
 413		pr_warn("COMPACTION is disabled!!!\n");
 414
 415	cpuset_print_current_mems_allowed();
 416	dump_stack();
 417	if (oc->memcg)
 418		mem_cgroup_print_oom_info(oc->memcg, p);
 419	else
 420		show_mem(SHOW_MEM_FILTER_NODES);
 
 
 
 421	if (sysctl_oom_dump_tasks)
 422		dump_tasks(oc->memcg, oc->nodemask);
 
 
 423}
 424
 425/*
 426 * Number of OOM victims in flight
 427 */
 428static atomic_t oom_victims = ATOMIC_INIT(0);
 429static DECLARE_WAIT_QUEUE_HEAD(oom_victims_wait);
 430
 431static bool oom_killer_disabled __read_mostly;
 432
 433#define K(x) ((x) << (PAGE_SHIFT-10))
 434
 435/*
 436 * task->mm can be NULL if the task is the exited group leader.  So to
 437 * determine whether the task is using a particular mm, we examine all the
 438 * task's threads: if one of those is using this mm then this task was also
 439 * using it.
 440 */
 441bool process_shares_mm(struct task_struct *p, struct mm_struct *mm)
 442{
 443	struct task_struct *t;
 444
 445	for_each_thread(p, t) {
 446		struct mm_struct *t_mm = READ_ONCE(t->mm);
 447		if (t_mm)
 448			return t_mm == mm;
 449	}
 450	return false;
 451}
 452
 453
 454#ifdef CONFIG_MMU
 455/*
 456 * OOM Reaper kernel thread which tries to reap the memory used by the OOM
 457 * victim (if that is possible) to help the OOM killer to move on.
 458 */
 459static struct task_struct *oom_reaper_th;
 460static DECLARE_WAIT_QUEUE_HEAD(oom_reaper_wait);
 461static struct task_struct *oom_reaper_list;
 462static DEFINE_SPINLOCK(oom_reaper_lock);
 463
 464static bool __oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
 465{
 466	struct mmu_gather tlb;
 467	struct vm_area_struct *vma;
 468	struct zap_details details = {.check_swap_entries = true,
 469				      .ignore_dirty = true};
 470	bool ret = true;
 471
 472	/*
 473	 * We have to make sure to not race with the victim exit path
 474	 * and cause premature new oom victim selection:
 475	 * __oom_reap_task_mm		exit_mm
 476	 *   mmget_not_zero
 477	 *				  mmput
 478	 *				    atomic_dec_and_test
 479	 *				  exit_oom_victim
 480	 *				[...]
 481	 *				out_of_memory
 482	 *				  select_bad_process
 483	 *				    # no TIF_MEMDIE task selects new victim
 484	 *  unmap_page_range # frees some memory
 485	 */
 486	mutex_lock(&oom_lock);
 487
 488	if (!down_read_trylock(&mm->mmap_sem)) {
 489		ret = false;
 490		goto unlock_oom;
 491	}
 492
 493	/*
 494	 * increase mm_users only after we know we will reap something so
 495	 * that the mmput_async is called only when we have reaped something
 496	 * and delayed __mmput doesn't matter that much
 497	 */
 498	if (!mmget_not_zero(mm)) {
 499		up_read(&mm->mmap_sem);
 500		goto unlock_oom;
 501	}
 502
 503	/*
 504	 * Tell all users of get_user/copy_from_user etc... that the content
 505	 * is no longer stable. No barriers really needed because unmapping
 506	 * should imply barriers already and the reader would hit a page fault
 507	 * if it stumbled over a reaped memory.
 508	 */
 509	set_bit(MMF_UNSTABLE, &mm->flags);
 510
 511	tlb_gather_mmu(&tlb, mm, 0, -1);
 512	for (vma = mm->mmap ; vma; vma = vma->vm_next) {
 513		if (is_vm_hugetlb_page(vma))
 514			continue;
 515
 516		/*
 517		 * mlocked VMAs require explicit munlocking before unmap.
 518		 * Let's keep it simple here and skip such VMAs.
 519		 */
 520		if (vma->vm_flags & VM_LOCKED)
 521			continue;
 522
 523		/*
 524		 * Only anonymous pages have a good chance to be dropped
 525		 * without additional steps which we cannot afford as we
 526		 * are OOM already.
 527		 *
 528		 * We do not even care about fs backed pages because all
 529		 * which are reclaimable have already been reclaimed and
 530		 * we do not want to block exit_mmap by keeping mm ref
 531		 * count elevated without a good reason.
 532		 */
 533		if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED))
 534			unmap_page_range(&tlb, vma, vma->vm_start, vma->vm_end,
 535					 &details);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 536	}
 537	tlb_finish_mmu(&tlb, 0, -1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 538	pr_info("oom_reaper: reaped process %d (%s), now anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
 539			task_pid_nr(tsk), tsk->comm,
 540			K(get_mm_counter(mm, MM_ANONPAGES)),
 541			K(get_mm_counter(mm, MM_FILEPAGES)),
 542			K(get_mm_counter(mm, MM_SHMEMPAGES)));
 543	up_read(&mm->mmap_sem);
 
 
 
 544
 545	/*
 546	 * Drop our reference but make sure the mmput slow path is called from a
 547	 * different context because we shouldn't risk we get stuck there and
 548	 * put the oom_reaper out of the way.
 549	 */
 550	mmput_async(mm);
 551unlock_oom:
 552	mutex_unlock(&oom_lock);
 553	return ret;
 554}
 555
 556#define MAX_OOM_REAP_RETRIES 10
 557static void oom_reap_task(struct task_struct *tsk)
 558{
 559	int attempts = 0;
 560	struct mm_struct *mm = tsk->signal->oom_mm;
 561
 562	/* Retry the down_read_trylock(mmap_sem) a few times */
 563	while (attempts++ < MAX_OOM_REAP_RETRIES && !__oom_reap_task_mm(tsk, mm))
 564		schedule_timeout_idle(HZ/10);
 565
 566	if (attempts <= MAX_OOM_REAP_RETRIES)
 
 567		goto done;
 568
 569
 570	pr_info("oom_reaper: unable to reap pid:%d (%s)\n",
 571		task_pid_nr(tsk), tsk->comm);
 
 572	debug_show_all_locks();
 573
 574done:
 575	tsk->oom_reaper_list = NULL;
 576
 577	/*
 578	 * Hide this mm from OOM killer because it has been either reaped or
 579	 * somebody can't call up_write(mmap_sem).
 580	 */
 581	set_bit(MMF_OOM_SKIP, &mm->flags);
 582
 583	/* Drop a reference taken by wake_oom_reaper */
 584	put_task_struct(tsk);
 585}
 586
 587static int oom_reaper(void *unused)
 588{
 589	while (true) {
 590		struct task_struct *tsk = NULL;
 591
 592		wait_event_freezable(oom_reaper_wait, oom_reaper_list != NULL);
 593		spin_lock(&oom_reaper_lock);
 594		if (oom_reaper_list != NULL) {
 595			tsk = oom_reaper_list;
 596			oom_reaper_list = tsk->oom_reaper_list;
 597		}
 598		spin_unlock(&oom_reaper_lock);
 599
 600		if (tsk)
 601			oom_reap_task(tsk);
 602	}
 603
 604	return 0;
 605}
 606
 607static void wake_oom_reaper(struct task_struct *tsk)
 608{
 609	if (!oom_reaper_th)
 610		return;
 611
 612	/* tsk is already queued? */
 613	if (tsk == oom_reaper_list || tsk->oom_reaper_list)
 614		return;
 615
 616	get_task_struct(tsk);
 617
 618	spin_lock(&oom_reaper_lock);
 619	tsk->oom_reaper_list = oom_reaper_list;
 620	oom_reaper_list = tsk;
 621	spin_unlock(&oom_reaper_lock);
 
 622	wake_up(&oom_reaper_wait);
 623}
 624
 625static int __init oom_init(void)
 626{
 627	oom_reaper_th = kthread_run(oom_reaper, NULL, "oom_reaper");
 628	if (IS_ERR(oom_reaper_th)) {
 629		pr_err("Unable to start OOM reaper %ld. Continuing regardless\n",
 630				PTR_ERR(oom_reaper_th));
 631		oom_reaper_th = NULL;
 632	}
 633	return 0;
 634}
 635subsys_initcall(oom_init)
 636#else
 637static inline void wake_oom_reaper(struct task_struct *tsk)
 638{
 639}
 640#endif /* CONFIG_MMU */
 641
 642/**
 643 * mark_oom_victim - mark the given task as OOM victim
 644 * @tsk: task to mark
 645 *
 646 * Has to be called with oom_lock held and never after
 647 * oom has been disabled already.
 648 *
 649 * tsk->mm has to be non NULL and caller has to guarantee it is stable (either
 650 * under task_lock or operate on the current).
 651 */
 652static void mark_oom_victim(struct task_struct *tsk)
 653{
 654	struct mm_struct *mm = tsk->mm;
 655
 656	WARN_ON(oom_killer_disabled);
 657	/* OOM killer might race with memcg OOM */
 658	if (test_and_set_tsk_thread_flag(tsk, TIF_MEMDIE))
 659		return;
 660
 661	/* oom_mm is bound to the signal struct life time. */
 662	if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm))
 663		atomic_inc(&tsk->signal->oom_mm->mm_count);
 
 
 664
 665	/*
 666	 * Make sure that the task is woken up from uninterruptible sleep
 667	 * if it is frozen because OOM killer wouldn't be able to free
 668	 * any memory and livelock. freezing_slow_path will tell the freezer
 669	 * that TIF_MEMDIE tasks should be ignored.
 670	 */
 671	__thaw_task(tsk);
 672	atomic_inc(&oom_victims);
 
 673}
 674
 675/**
 676 * exit_oom_victim - note the exit of an OOM victim
 677 */
 678void exit_oom_victim(void)
 679{
 680	clear_thread_flag(TIF_MEMDIE);
 681
 682	if (!atomic_dec_return(&oom_victims))
 683		wake_up_all(&oom_victims_wait);
 684}
 685
 686/**
 687 * oom_killer_enable - enable OOM killer
 688 */
 689void oom_killer_enable(void)
 690{
 691	oom_killer_disabled = false;
 
 692}
 693
 694/**
 695 * oom_killer_disable - disable OOM killer
 696 * @timeout: maximum timeout to wait for oom victims in jiffies
 697 *
 698 * Forces all page allocations to fail rather than trigger OOM killer.
 699 * Will block and wait until all OOM victims are killed or the given
 700 * timeout expires.
 701 *
 702 * The function cannot be called when there are runnable user tasks because
 703 * the userspace would see unexpected allocation failures as a result. Any
 704 * new usage of this function should be consulted with MM people.
 705 *
 706 * Returns true if successful and false if the OOM killer cannot be
 707 * disabled.
 708 */
 709bool oom_killer_disable(signed long timeout)
 710{
 711	signed long ret;
 712
 713	/*
 714	 * Make sure to not race with an ongoing OOM killer. Check that the
 715	 * current is not killed (possibly due to sharing the victim's memory).
 716	 */
 717	if (mutex_lock_killable(&oom_lock))
 718		return false;
 719	oom_killer_disabled = true;
 720	mutex_unlock(&oom_lock);
 721
 722	ret = wait_event_interruptible_timeout(oom_victims_wait,
 723			!atomic_read(&oom_victims), timeout);
 724	if (ret <= 0) {
 725		oom_killer_enable();
 726		return false;
 727	}
 
 728
 729	return true;
 730}
 731
 732static inline bool __task_will_free_mem(struct task_struct *task)
 733{
 734	struct signal_struct *sig = task->signal;
 735
 736	/*
 737	 * A coredumping process may sleep for an extended period in exit_mm(),
 738	 * so the oom killer cannot assume that the process will promptly exit
 739	 * and release memory.
 740	 */
 741	if (sig->flags & SIGNAL_GROUP_COREDUMP)
 742		return false;
 743
 744	if (sig->flags & SIGNAL_GROUP_EXIT)
 745		return true;
 746
 747	if (thread_group_empty(task) && (task->flags & PF_EXITING))
 748		return true;
 749
 750	return false;
 751}
 752
 753/*
 754 * Checks whether the given task is dying or exiting and likely to
 755 * release its address space. This means that all threads and processes
 756 * sharing the same mm have to be killed or exiting.
 757 * Caller has to make sure that task->mm is stable (hold task_lock or
 758 * it operates on the current).
 759 */
 760static bool task_will_free_mem(struct task_struct *task)
 761{
 762	struct mm_struct *mm = task->mm;
 763	struct task_struct *p;
 764	bool ret = true;
 765
 766	/*
 767	 * Skip tasks without mm because it might have passed its exit_mm and
 768	 * exit_oom_victim. oom_reaper could have rescued that but do not rely
 769	 * on that for now. We can consider find_lock_task_mm in future.
 770	 */
 771	if (!mm)
 772		return false;
 773
 774	if (!__task_will_free_mem(task))
 775		return false;
 776
 777	/*
 778	 * This task has already been drained by the oom reaper so there are
 779	 * only small chances it will free some more
 780	 */
 781	if (test_bit(MMF_OOM_SKIP, &mm->flags))
 782		return false;
 783
 784	if (atomic_read(&mm->mm_users) <= 1)
 785		return true;
 786
 787	/*
 788	 * Make sure that all tasks which share the mm with the given tasks
 789	 * are dying as well to make sure that a) nobody pins its mm and
 790	 * b) the task is also reapable by the oom reaper.
 791	 */
 792	rcu_read_lock();
 793	for_each_process(p) {
 794		if (!process_shares_mm(p, mm))
 795			continue;
 796		if (same_thread_group(task, p))
 797			continue;
 798		ret = __task_will_free_mem(p);
 799		if (!ret)
 800			break;
 801	}
 802	rcu_read_unlock();
 803
 804	return ret;
 805}
 806
 807static void oom_kill_process(struct oom_control *oc, const char *message)
 808{
 809	struct task_struct *p = oc->chosen;
 810	unsigned int points = oc->chosen_points;
 811	struct task_struct *victim = p;
 812	struct task_struct *child;
 813	struct task_struct *t;
 814	struct mm_struct *mm;
 815	unsigned int victim_points = 0;
 816	static DEFINE_RATELIMIT_STATE(oom_rs, DEFAULT_RATELIMIT_INTERVAL,
 817					      DEFAULT_RATELIMIT_BURST);
 818	bool can_oom_reap = true;
 819
 820	/*
 821	 * If the task is already exiting, don't alarm the sysadmin or kill
 822	 * its children or threads, just set TIF_MEMDIE so it can die quickly
 823	 */
 824	task_lock(p);
 825	if (task_will_free_mem(p)) {
 826		mark_oom_victim(p);
 827		wake_oom_reaper(p);
 828		task_unlock(p);
 829		put_task_struct(p);
 830		return;
 831	}
 832	task_unlock(p);
 833
 834	if (__ratelimit(&oom_rs))
 835		dump_header(oc, p);
 836
 837	pr_err("%s: Kill process %d (%s) score %u or sacrifice child\n",
 838		message, task_pid_nr(p), p->comm, points);
 839
 840	/*
 841	 * If any of p's children has a different mm and is eligible for kill,
 842	 * the one with the highest oom_badness() score is sacrificed for its
 843	 * parent.  This attempts to lose the minimal amount of work done while
 844	 * still freeing memory.
 845	 */
 846	read_lock(&tasklist_lock);
 847	for_each_thread(p, t) {
 848		list_for_each_entry(child, &t->children, sibling) {
 849			unsigned int child_points;
 850
 851			if (process_shares_mm(child, p->mm))
 852				continue;
 853			/*
 854			 * oom_badness() returns 0 if the thread is unkillable
 855			 */
 856			child_points = oom_badness(child,
 857				oc->memcg, oc->nodemask, oc->totalpages);
 858			if (child_points > victim_points) {
 859				put_task_struct(victim);
 860				victim = child;
 861				victim_points = child_points;
 862				get_task_struct(victim);
 863			}
 864		}
 865	}
 866	read_unlock(&tasklist_lock);
 867
 868	p = find_lock_task_mm(victim);
 869	if (!p) {
 
 
 870		put_task_struct(victim);
 871		return;
 872	} else if (victim != p) {
 873		get_task_struct(p);
 874		put_task_struct(victim);
 875		victim = p;
 876	}
 877
 878	/* Get a reference to safely compare mm after task_unlock(victim) */
 879	mm = victim->mm;
 880	atomic_inc(&mm->mm_count);
 
 
 
 
 
 881	/*
 882	 * We should send SIGKILL before setting TIF_MEMDIE in order to prevent
 883	 * the OOM victim from depleting the memory reserves from the user
 884	 * space under its control.
 885	 */
 886	do_send_sig_info(SIGKILL, SEND_SIG_FORCED, victim, true);
 887	mark_oom_victim(victim);
 888	pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
 889		task_pid_nr(victim), victim->comm, K(victim->mm->total_vm),
 890		K(get_mm_counter(victim->mm, MM_ANONPAGES)),
 891		K(get_mm_counter(victim->mm, MM_FILEPAGES)),
 892		K(get_mm_counter(victim->mm, MM_SHMEMPAGES)));
 
 
 893	task_unlock(victim);
 894
 895	/*
 896	 * Kill all user processes sharing victim->mm in other thread groups, if
 897	 * any.  They don't get access to memory reserves, though, to avoid
 898	 * depletion of all memory.  This prevents mm->mmap_sem livelock when an
 899	 * oom killed thread cannot exit because it requires the semaphore and
 900	 * its contended by another thread trying to allocate memory itself.
 901	 * That thread will now get access to memory reserves since it has a
 902	 * pending fatal signal.
 903	 */
 904	rcu_read_lock();
 905	for_each_process(p) {
 906		if (!process_shares_mm(p, mm))
 907			continue;
 908		if (same_thread_group(p, victim))
 909			continue;
 910		if (is_global_init(p)) {
 911			can_oom_reap = false;
 912			set_bit(MMF_OOM_SKIP, &mm->flags);
 913			pr_info("oom killer %d (%s) has mm pinned by %d (%s)\n",
 914					task_pid_nr(victim), victim->comm,
 915					task_pid_nr(p), p->comm);
 916			continue;
 917		}
 918		/*
 919		 * No use_mm() user needs to read from the userspace so we are
 920		 * ok to reap it.
 921		 */
 922		if (unlikely(p->flags & PF_KTHREAD))
 923			continue;
 924		do_send_sig_info(SIGKILL, SEND_SIG_FORCED, p, true);
 925	}
 926	rcu_read_unlock();
 927
 928	if (can_oom_reap)
 929		wake_oom_reaper(victim);
 930
 931	mmdrop(mm);
 932	put_task_struct(victim);
 933}
 934#undef K
 935
 936/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 937 * Determines whether the kernel must panic because of the panic_on_oom sysctl.
 938 */
 939static void check_panic_on_oom(struct oom_control *oc,
 940			       enum oom_constraint constraint)
 941{
 942	if (likely(!sysctl_panic_on_oom))
 943		return;
 944	if (sysctl_panic_on_oom != 2) {
 945		/*
 946		 * panic_on_oom == 1 only affects CONSTRAINT_NONE, the kernel
 947		 * does not panic for cpuset, mempolicy, or memcg allocation
 948		 * failures.
 949		 */
 950		if (constraint != CONSTRAINT_NONE)
 951			return;
 952	}
 953	/* Do not panic for oom kills triggered by sysrq */
 954	if (is_sysrq_oom(oc))
 955		return;
 956	dump_header(oc, NULL);
 957	panic("Out of memory: %s panic_on_oom is enabled\n",
 958		sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide");
 959}
 960
 961static BLOCKING_NOTIFIER_HEAD(oom_notify_list);
 962
 963int register_oom_notifier(struct notifier_block *nb)
 964{
 965	return blocking_notifier_chain_register(&oom_notify_list, nb);
 966}
 967EXPORT_SYMBOL_GPL(register_oom_notifier);
 968
 969int unregister_oom_notifier(struct notifier_block *nb)
 970{
 971	return blocking_notifier_chain_unregister(&oom_notify_list, nb);
 972}
 973EXPORT_SYMBOL_GPL(unregister_oom_notifier);
 974
 975/**
 976 * out_of_memory - kill the "best" process when we run out of memory
 977 * @oc: pointer to struct oom_control
 978 *
 979 * If we run out of memory, we have the choice between either
 980 * killing a random task (bad), letting the system crash (worse)
 981 * OR try to be smart about which process to kill. Note that we
 982 * don't have to be perfect here, we just have to be good.
 983 */
 984bool out_of_memory(struct oom_control *oc)
 985{
 986	unsigned long freed = 0;
 987	enum oom_constraint constraint = CONSTRAINT_NONE;
 988
 989	if (oom_killer_disabled)
 990		return false;
 991
 992	if (!is_memcg_oom(oc)) {
 993		blocking_notifier_call_chain(&oom_notify_list, 0, &freed);
 994		if (freed > 0)
 995			/* Got some memory back in the last second. */
 996			return true;
 997	}
 998
 999	/*
1000	 * If current has a pending SIGKILL or is exiting, then automatically
1001	 * select it.  The goal is to allow it to allocate so that it may
1002	 * quickly exit and free its memory.
1003	 */
1004	if (task_will_free_mem(current)) {
1005		mark_oom_victim(current);
1006		wake_oom_reaper(current);
1007		return true;
1008	}
1009
1010	/*
1011	 * The OOM killer does not compensate for IO-less reclaim.
1012	 * pagefault_out_of_memory lost its gfp context so we have to
1013	 * make sure exclude 0 mask - all other users should have at least
1014	 * ___GFP_DIRECT_RECLAIM to get here.
 
1015	 */
1016	if (oc->gfp_mask && !(oc->gfp_mask & (__GFP_FS|__GFP_NOFAIL)))
1017		return true;
1018
1019	/*
1020	 * Check if there were limitations on the allocation (only relevant for
1021	 * NUMA and memcg) that may require different handling.
1022	 */
1023	constraint = constrained_alloc(oc);
1024	if (constraint != CONSTRAINT_MEMORY_POLICY)
1025		oc->nodemask = NULL;
1026	check_panic_on_oom(oc, constraint);
1027
1028	if (!is_memcg_oom(oc) && sysctl_oom_kill_allocating_task &&
1029	    current->mm && !oom_unkillable_task(current, NULL, oc->nodemask) &&
 
1030	    current->signal->oom_score_adj != OOM_SCORE_ADJ_MIN) {
1031		get_task_struct(current);
1032		oc->chosen = current;
1033		oom_kill_process(oc, "Out of memory (oom_kill_allocating_task)");
1034		return true;
1035	}
1036
1037	select_bad_process(oc);
1038	/* Found nothing?!?! Either we hang forever, or we panic. */
1039	if (!oc->chosen && !is_sysrq_oom(oc) && !is_memcg_oom(oc)) {
1040		dump_header(oc, NULL);
1041		panic("Out of memory and no killable processes...\n");
 
 
 
 
 
 
 
1042	}
1043	if (oc->chosen && oc->chosen != (void *)-1UL) {
1044		oom_kill_process(oc, !is_memcg_oom(oc) ? "Out of memory" :
1045				 "Memory cgroup out of memory");
1046		/*
1047		 * Give the killed process a good chance to exit before trying
1048		 * to allocate memory again.
1049		 */
1050		schedule_timeout_killable(1);
1051	}
1052	return !!oc->chosen;
1053}
1054
1055/*
1056 * The pagefault handler calls here because it is out of memory, so kill a
1057 * memory-hogging task. If oom_lock is held by somebody else, a parallel oom
1058 * killing is already in progress so do nothing.
1059 */
1060void pagefault_out_of_memory(void)
1061{
1062	struct oom_control oc = {
1063		.zonelist = NULL,
1064		.nodemask = NULL,
1065		.memcg = NULL,
1066		.gfp_mask = 0,
1067		.order = 0,
1068	};
1069
1070	if (mem_cgroup_oom_synchronize(true))
1071		return;
1072
1073	if (!mutex_trylock(&oom_lock))
1074		return;
1075	out_of_memory(&oc);
1076	mutex_unlock(&oom_lock);
1077}