Linux Audio

Check our new training course

Loading...
v3.1
  1/*
  2 *  linux/mm/oom_kill.c
  3 * 
  4 *  Copyright (C)  1998,2000  Rik van Riel
  5 *	Thanks go out to Claus Fischer for some serious inspiration and
  6 *	for goading me into coding this file...
  7 *  Copyright (C)  2010  Google, Inc.
  8 *	Rewritten by David Rientjes
  9 *
 10 *  The routines in this file are used to kill a process when
 11 *  we're seriously out of memory. This gets called from __alloc_pages()
 12 *  in mm/page_alloc.c when we really run out of memory.
 13 *
 14 *  Since we won't call these routines often (on a well-configured
 15 *  machine) this file will double as a 'coding guide' and a signpost
 16 *  for newbie kernel hackers. It features several pointers to major
 17 *  kernel subsystems and hints as to where to find out what things do.
 18 */
 19
 20#include <linux/oom.h>
 21#include <linux/mm.h>
 22#include <linux/err.h>
 23#include <linux/gfp.h>
 24#include <linux/sched.h>
 25#include <linux/swap.h>
 26#include <linux/timex.h>
 27#include <linux/jiffies.h>
 28#include <linux/cpuset.h>
 29#include <linux/module.h>
 30#include <linux/notifier.h>
 31#include <linux/memcontrol.h>
 32#include <linux/mempolicy.h>
 33#include <linux/security.h>
 34#include <linux/ptrace.h>
 
 
 
 
 
 
 
 
 
 
 
 35
 36int sysctl_panic_on_oom;
 37int sysctl_oom_kill_allocating_task;
 38int sysctl_oom_dump_tasks = 1;
 39static DEFINE_SPINLOCK(zone_scan_lock);
 40
 41/**
 42 * test_set_oom_score_adj() - set current's oom_score_adj and return old value
 43 * @new_val: new oom_score_adj value
 44 *
 45 * Sets the oom_score_adj value for current to @new_val with proper
 46 * synchronization and returns the old value.  Usually used to temporarily
 47 * set a value, save the old value in the caller, and then reinstate it later.
 48 */
 49int test_set_oom_score_adj(int new_val)
 50{
 51	struct sighand_struct *sighand = current->sighand;
 52	int old_val;
 53
 54	spin_lock_irq(&sighand->siglock);
 55	old_val = current->signal->oom_score_adj;
 56	if (new_val != old_val) {
 57		if (new_val == OOM_SCORE_ADJ_MIN)
 58			atomic_inc(&current->mm->oom_disable_count);
 59		else if (old_val == OOM_SCORE_ADJ_MIN)
 60			atomic_dec(&current->mm->oom_disable_count);
 61		current->signal->oom_score_adj = new_val;
 62	}
 63	spin_unlock_irq(&sighand->siglock);
 64
 65	return old_val;
 66}
 67
 68#ifdef CONFIG_NUMA
 69/**
 70 * has_intersects_mems_allowed() - check task eligiblity for kill
 71 * @tsk: task struct of which task to consider
 72 * @mask: nodemask passed to page allocator for mempolicy ooms
 73 *
 74 * Task eligibility is determined by whether or not a candidate task, @tsk,
 75 * shares the same mempolicy nodes as current if it is bound by such a policy
 76 * and whether or not it has the same set of allowed cpuset nodes.
 77 */
 78static bool has_intersects_mems_allowed(struct task_struct *tsk,
 79					const nodemask_t *mask)
 80{
 81	struct task_struct *start = tsk;
 
 82
 83	do {
 
 84		if (mask) {
 85			/*
 86			 * If this is a mempolicy constrained oom, tsk's
 87			 * cpuset is irrelevant.  Only return true if its
 88			 * mempolicy intersects current, otherwise it may be
 89			 * needlessly killed.
 90			 */
 91			if (mempolicy_nodemask_intersects(tsk, mask))
 92				return true;
 93		} else {
 94			/*
 95			 * This is not a mempolicy constrained oom, so only
 96			 * check the mems of tsk's cpuset.
 97			 */
 98			if (cpuset_mems_allowed_intersects(current, tsk))
 99				return true;
100		}
101	} while_each_thread(start, tsk);
 
 
 
102
103	return false;
104}
105#else
106static bool has_intersects_mems_allowed(struct task_struct *tsk,
107					const nodemask_t *mask)
108{
109	return true;
110}
111#endif /* CONFIG_NUMA */
112
113/*
114 * The process p may have detached its own ->mm while exiting or through
115 * use_mm(), but one or more of its subthreads may still have a valid
116 * pointer.  Return p, or any of its subthreads with a valid ->mm, with
117 * task_lock() held.
118 */
119struct task_struct *find_lock_task_mm(struct task_struct *p)
120{
121	struct task_struct *t = p;
122
123	do {
 
 
124		task_lock(t);
125		if (likely(t->mm))
126			return t;
127		task_unlock(t);
128	} while_each_thread(p, t);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
129
130	return NULL;
 
 
131}
132
133/* return true if the task is not adequate as candidate victim task. */
134static bool oom_unkillable_task(struct task_struct *p,
135		const struct mem_cgroup *mem, const nodemask_t *nodemask)
136{
137	if (is_global_init(p))
138		return true;
139	if (p->flags & PF_KTHREAD)
140		return true;
141
142	/* When mem_cgroup_out_of_memory() and p is not member of the group */
143	if (mem && !task_in_mem_cgroup(p, mem))
144		return true;
145
146	/* p may not have freeable memory in nodemask */
147	if (!has_intersects_mems_allowed(p, nodemask))
148		return true;
149
150	return false;
151}
152
153/**
154 * oom_badness - heuristic function to determine which candidate task to kill
155 * @p: task struct of which task we should calculate
156 * @totalpages: total present RAM allowed for page allocation
157 *
158 * The heuristic for determining which task to kill is made to be as simple and
159 * predictable as possible.  The goal is to return the highest value for the
160 * task consuming the most memory to avoid subsequent oom failures.
161 */
162unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem,
163		      const nodemask_t *nodemask, unsigned long totalpages)
164{
165	int points;
 
166
167	if (oom_unkillable_task(p, mem, nodemask))
168		return 0;
169
170	p = find_lock_task_mm(p);
171	if (!p)
172		return 0;
173
174	/*
175	 * Shortcut check for a thread sharing p->mm that is OOM_SCORE_ADJ_MIN
176	 * so the entire heuristic doesn't need to be executed for something
177	 * that cannot be killed.
178	 */
179	if (atomic_read(&p->mm->oom_disable_count)) {
 
 
 
180		task_unlock(p);
181		return 0;
182	}
183
184	/*
185	 * The memory controller may have a limit of 0 bytes, so avoid a divide
186	 * by zero, if necessary.
187	 */
188	if (!totalpages)
189		totalpages = 1;
190
191	/*
192	 * The baseline for the badness score is the proportion of RAM that each
193	 * task's rss, pagetable and swap space use.
194	 */
195	points = get_mm_rss(p->mm) + p->mm->nr_ptes;
196	points += get_mm_counter(p->mm, MM_SWAPENTS);
197
198	points *= 1000;
199	points /= totalpages;
200	task_unlock(p);
201
202	/*
203	 * Root processes get 3% bonus, just like the __vm_enough_memory()
204	 * implementation used by LSMs.
205	 */
206	if (has_capability_noaudit(p, CAP_SYS_ADMIN))
207		points -= 30;
208
209	/*
210	 * /proc/pid/oom_score_adj ranges from -1000 to +1000 such that it may
211	 * either completely disable oom killing or always prefer a certain
212	 * task.
213	 */
214	points += p->signal->oom_score_adj;
215
216	/*
217	 * Never return 0 for an eligible task that may be killed since it's
218	 * possible that no single user task uses more than 0.1% of memory and
219	 * no single admin tasks uses more than 3.0%.
220	 */
221	if (points <= 0)
222		return 1;
223	return (points < 1000) ? points : 1000;
224}
225
 
 
 
 
 
 
 
226/*
227 * Determine the type of allocation constraint.
228 */
229#ifdef CONFIG_NUMA
230static enum oom_constraint constrained_alloc(struct zonelist *zonelist,
231				gfp_t gfp_mask, nodemask_t *nodemask,
232				unsigned long *totalpages)
233{
234	struct zone *zone;
235	struct zoneref *z;
236	enum zone_type high_zoneidx = gfp_zone(gfp_mask);
237	bool cpuset_limited = false;
238	int nid;
239
 
 
 
 
 
240	/* Default to all available memory */
241	*totalpages = totalram_pages + total_swap_pages;
242
243	if (!zonelist)
 
 
 
244		return CONSTRAINT_NONE;
245	/*
246	 * Reach here only when __GFP_NOFAIL is used. So, we should avoid
247	 * to kill current.We have to random task kill in this case.
248	 * Hopefully, CONSTRAINT_THISNODE...but no way to handle it, now.
249	 */
250	if (gfp_mask & __GFP_THISNODE)
251		return CONSTRAINT_NONE;
252
253	/*
254	 * This is not a __GFP_THISNODE allocation, so a truncated nodemask in
255	 * the page allocator means a mempolicy is in effect.  Cpuset policy
256	 * is enforced in get_page_from_freelist().
257	 */
258	if (nodemask && !nodes_subset(node_states[N_HIGH_MEMORY], *nodemask)) {
259		*totalpages = total_swap_pages;
260		for_each_node_mask(nid, *nodemask)
261			*totalpages += node_spanned_pages(nid);
 
262		return CONSTRAINT_MEMORY_POLICY;
263	}
264
265	/* Check this allocation failure is caused by cpuset's wall function */
266	for_each_zone_zonelist_nodemask(zone, z, zonelist,
267			high_zoneidx, nodemask)
268		if (!cpuset_zone_allowed_softwall(zone, gfp_mask))
269			cpuset_limited = true;
270
271	if (cpuset_limited) {
272		*totalpages = total_swap_pages;
273		for_each_node_mask(nid, cpuset_current_mems_allowed)
274			*totalpages += node_spanned_pages(nid);
275		return CONSTRAINT_CPUSET;
276	}
277	return CONSTRAINT_NONE;
278}
279#else
280static enum oom_constraint constrained_alloc(struct zonelist *zonelist,
281				gfp_t gfp_mask, nodemask_t *nodemask,
282				unsigned long *totalpages)
283{
284	*totalpages = totalram_pages + total_swap_pages;
285	return CONSTRAINT_NONE;
286}
287#endif
288
289/*
290 * Simple selection loop. We chose the process with the highest
291 * number of 'points'. We expect the caller will lock the tasklist.
292 *
293 * (not docbooked, we don't want this one cluttering up the manual)
294 */
295static struct task_struct *select_bad_process(unsigned int *ppoints,
296		unsigned long totalpages, struct mem_cgroup *mem,
297		const nodemask_t *nodemask)
298{
299	struct task_struct *g, *p;
300	struct task_struct *chosen = NULL;
301	*ppoints = 0;
302
303	do_each_thread(g, p) {
304		unsigned int points;
305
306		if (p->exit_state)
307			continue;
308		if (oom_unkillable_task(p, mem, nodemask))
309			continue;
 
 
 
 
 
 
 
310
311		/*
312		 * This task already has access to memory reserves and is
313		 * being killed. Don't allow any other task access to the
314		 * memory reserve.
315		 *
316		 * Note: this may have a chance of deadlock if it gets
317		 * blocked waiting for another task which itself is waiting
318		 * for memory. Is there a better alternative?
319		 */
320		if (test_tsk_thread_flag(p, TIF_MEMDIE))
321			return ERR_PTR(-1UL);
322		if (!p->mm)
323			continue;
324
325		if (p->flags & PF_EXITING) {
326			/*
327			 * If p is the current task and is in the process of
328			 * releasing memory, we allow the "kill" to set
329			 * TIF_MEMDIE, which will allow it to gain access to
330			 * memory reserves.  Otherwise, it may stall forever.
331			 *
332			 * The loop isn't broken here, however, in case other
333			 * threads are found to have already been oom killed.
334			 */
335			if (p == current) {
336				chosen = p;
337				*ppoints = 1000;
338			} else {
339				/*
340				 * If this task is not being ptraced on exit,
341				 * then wait for it to finish before killing
342				 * some other task unnecessarily.
343				 */
344				if (!(p->group_leader->ptrace & PT_TRACE_EXIT))
345					return ERR_PTR(-1UL);
346			}
347		}
348
349		points = oom_badness(p, mem, nodemask, totalpages);
350		if (points > *ppoints) {
351			chosen = p;
352			*ppoints = points;
353		}
354	} while_each_thread(g, p);
 
 
 
 
 
 
 
 
 
 
 
355
356	return chosen;
357}
358
359/**
360 * dump_tasks - dump current memory state of all system tasks
361 * @mem: current's memory controller, if constrained
362 * @nodemask: nodemask passed to page allocator for mempolicy ooms
363 *
364 * Dumps the current memory state of all eligible tasks.  Tasks not in the same
365 * memcg, not in the same cpuset, or bound to a disjoint set of mempolicy nodes
366 * are not shown.
367 * State information includes task's pid, uid, tgid, vm size, rss, cpu, oom_adj
368 * value, oom_score_adj value, and name.
369 *
370 * Call with tasklist_lock read-locked.
371 */
372static void dump_tasks(const struct mem_cgroup *mem, const nodemask_t *nodemask)
373{
374	struct task_struct *p;
375	struct task_struct *task;
376
377	pr_info("[ pid ]   uid  tgid total_vm      rss cpu oom_adj oom_score_adj name\n");
 
378	for_each_process(p) {
379		if (oom_unkillable_task(p, mem, nodemask))
380			continue;
381
382		task = find_lock_task_mm(p);
383		if (!task) {
384			/*
385			 * This is a kthread or all of p's threads have already
386			 * detached their mm's.  There's no need to report
387			 * them; they can't be oom killed anyway.
388			 */
389			continue;
390		}
391
392		pr_info("[%5d] %5d %5d %8lu %8lu %3u     %3d         %5d %s\n",
393			task->pid, task_uid(task), task->tgid,
394			task->mm->total_vm, get_mm_rss(task->mm),
395			task_cpu(task), task->signal->oom_adj,
 
 
396			task->signal->oom_score_adj, task->comm);
397		task_unlock(task);
398	}
 
399}
400
401static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order,
402			struct mem_cgroup *mem, const nodemask_t *nodemask)
403{
404	task_lock(current);
405	pr_warning("%s invoked oom-killer: gfp_mask=0x%x, order=%d, "
406		"oom_adj=%d, oom_score_adj=%d\n",
407		current->comm, gfp_mask, order, current->signal->oom_adj,
 
408		current->signal->oom_score_adj);
409	cpuset_print_task_mems_allowed(current);
410	task_unlock(current);
 
 
411	dump_stack();
412	mem_cgroup_print_oom_info(mem, p);
413	show_mem(SHOW_MEM_FILTER_NODES);
 
 
414	if (sysctl_oom_dump_tasks)
415		dump_tasks(mem, nodemask);
416}
417
 
 
 
 
 
 
 
 
418#define K(x) ((x) << (PAGE_SHIFT-10))
419static int oom_kill_task(struct task_struct *p, struct mem_cgroup *mem)
 
 
 
 
 
 
 
420{
421	struct task_struct *q;
422	struct mm_struct *mm;
423
424	p = find_lock_task_mm(p);
425	if (!p)
426		return 1;
 
 
 
 
427
428	/* mm cannot be safely dereferenced after task_unlock(p) */
429	mm = p->mm;
430
431	pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB\n",
432		task_pid_nr(p), p->comm, K(p->mm->total_vm),
433		K(get_mm_counter(p->mm, MM_ANONPAGES)),
434		K(get_mm_counter(p->mm, MM_FILEPAGES)));
435	task_unlock(p);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
436
437	/*
438	 * Kill all processes sharing p->mm in other thread groups, if any.
439	 * They don't get access to memory reserves or a higher scheduler
440	 * priority, though, to avoid depletion of all memory or task
441	 * starvation.  This prevents mm->mmap_sem livelock when an oom killed
442	 * task cannot exit because it requires the semaphore and its contended
443	 * by another thread trying to allocate memory itself.  That thread will
444	 * now get access to memory reserves since it has a pending fatal
445	 * signal.
446	 */
447	for_each_process(q)
448		if (q->mm == mm && !same_thread_group(q, p)) {
449			task_lock(q);	/* Protect ->comm from prctl() */
450			pr_err("Kill process %d (%s) sharing same memory\n",
451				task_pid_nr(q), q->comm);
452			task_unlock(q);
453			force_sig(SIGKILL, q);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
454		}
 
455
456	set_tsk_thread_flag(p, TIF_MEMDIE);
457	force_sig(SIGKILL, p);
 
458
459	return 0;
460}
461#undef K
462
463static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
464			    unsigned int points, unsigned long totalpages,
465			    struct mem_cgroup *mem, nodemask_t *nodemask,
466			    const char *message)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
467{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
468	struct task_struct *victim = p;
469	struct task_struct *child;
470	struct task_struct *t = p;
 
471	unsigned int victim_points = 0;
472
473	if (printk_ratelimit())
474		dump_header(p, gfp_mask, order, mem, nodemask);
475
476	/*
477	 * If the task is already exiting, don't alarm the sysadmin or kill
478	 * its children or threads, just set TIF_MEMDIE so it can die quickly
479	 */
480	if (p->flags & PF_EXITING) {
481		set_tsk_thread_flag(p, TIF_MEMDIE);
482		return 0;
 
 
 
 
483	}
 
484
485	task_lock(p);
486	pr_err("%s: Kill process %d (%s) score %d or sacrifice child\n",
 
 
487		message, task_pid_nr(p), p->comm, points);
488	task_unlock(p);
489
490	/*
491	 * If any of p's children has a different mm and is eligible for kill,
492	 * the one with the highest oom_badness() score is sacrificed for its
493	 * parent.  This attempts to lose the minimal amount of work done while
494	 * still freeing memory.
495	 */
496	do {
 
497		list_for_each_entry(child, &t->children, sibling) {
498			unsigned int child_points;
499
500			if (child->mm == p->mm)
501				continue;
502			/*
503			 * oom_badness() returns 0 if the thread is unkillable
504			 */
505			child_points = oom_badness(child, mem, nodemask,
506								totalpages);
507			if (child_points > victim_points) {
 
508				victim = child;
509				victim_points = child_points;
 
510			}
511		}
512	} while_each_thread(p, t);
 
 
 
 
 
 
 
 
 
 
 
513
514	return oom_kill_task(victim, mem);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
515}
 
516
517/*
518 * Determines whether the kernel must panic because of the panic_on_oom sysctl.
519 */
520static void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask,
521				int order, const nodemask_t *nodemask)
522{
523	if (likely(!sysctl_panic_on_oom))
524		return;
525	if (sysctl_panic_on_oom != 2) {
526		/*
527		 * panic_on_oom == 1 only affects CONSTRAINT_NONE, the kernel
528		 * does not panic for cpuset, mempolicy, or memcg allocation
529		 * failures.
530		 */
531		if (constraint != CONSTRAINT_NONE)
532			return;
533	}
534	read_lock(&tasklist_lock);
535	dump_header(NULL, gfp_mask, order, NULL, nodemask);
536	read_unlock(&tasklist_lock);
 
537	panic("Out of memory: %s panic_on_oom is enabled\n",
538		sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide");
539}
540
541#ifdef CONFIG_CGROUP_MEM_RES_CTLR
542void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask)
543{
544	unsigned long limit;
545	unsigned int points = 0;
546	struct task_struct *p;
547
548	/*
549	 * If current has a pending SIGKILL, then automatically select it.  The
550	 * goal is to allow it to allocate so that it may quickly exit and free
551	 * its memory.
552	 */
553	if (fatal_signal_pending(current)) {
554		set_thread_flag(TIF_MEMDIE);
555		return;
556	}
557
558	check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, 0, NULL);
559	limit = mem_cgroup_get_limit(mem) >> PAGE_SHIFT;
560	read_lock(&tasklist_lock);
561retry:
562	p = select_bad_process(&points, limit, mem, NULL);
563	if (!p || PTR_ERR(p) == -1UL)
564		goto out;
565
566	if (oom_kill_process(p, gfp_mask, 0, points, limit, mem, NULL,
567				"Memory cgroup out of memory"))
568		goto retry;
569out:
570	read_unlock(&tasklist_lock);
571}
572#endif
573
574static BLOCKING_NOTIFIER_HEAD(oom_notify_list);
575
576int register_oom_notifier(struct notifier_block *nb)
577{
578	return blocking_notifier_chain_register(&oom_notify_list, nb);
579}
580EXPORT_SYMBOL_GPL(register_oom_notifier);
581
582int unregister_oom_notifier(struct notifier_block *nb)
583{
584	return blocking_notifier_chain_unregister(&oom_notify_list, nb);
585}
586EXPORT_SYMBOL_GPL(unregister_oom_notifier);
587
588/*
589 * Try to acquire the OOM killer lock for the zones in zonelist.  Returns zero
590 * if a parallel OOM killing is already taking place that includes a zone in
591 * the zonelist.  Otherwise, locks all zones in the zonelist and returns 1.
592 */
593int try_set_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_mask)
594{
595	struct zoneref *z;
596	struct zone *zone;
597	int ret = 1;
598
599	spin_lock(&zone_scan_lock);
600	for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
601		if (zone_is_oom_locked(zone)) {
602			ret = 0;
603			goto out;
604		}
605	}
606
607	for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
608		/*
609		 * Lock each zone in the zonelist under zone_scan_lock so a
610		 * parallel invocation of try_set_zonelist_oom() doesn't succeed
611		 * when it shouldn't.
612		 */
613		zone_set_flag(zone, ZONE_OOM_LOCKED);
614	}
615
616out:
617	spin_unlock(&zone_scan_lock);
618	return ret;
619}
620
621/*
622 * Clears the ZONE_OOM_LOCKED flag for all zones in the zonelist so that failed
623 * allocation attempts with zonelists containing them may now recall the OOM
624 * killer, if necessary.
625 */
626void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_mask)
627{
628	struct zoneref *z;
629	struct zone *zone;
630
631	spin_lock(&zone_scan_lock);
632	for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
633		zone_clear_flag(zone, ZONE_OOM_LOCKED);
634	}
635	spin_unlock(&zone_scan_lock);
636}
637
638/*
639 * Try to acquire the oom killer lock for all system zones.  Returns zero if a
640 * parallel oom killing is taking place, otherwise locks all zones and returns
641 * non-zero.
642 */
643static int try_set_system_oom(void)
644{
645	struct zone *zone;
646	int ret = 1;
647
648	spin_lock(&zone_scan_lock);
649	for_each_populated_zone(zone)
650		if (zone_is_oom_locked(zone)) {
651			ret = 0;
652			goto out;
653		}
654	for_each_populated_zone(zone)
655		zone_set_flag(zone, ZONE_OOM_LOCKED);
656out:
657	spin_unlock(&zone_scan_lock);
658	return ret;
659}
660
661/*
662 * Clears ZONE_OOM_LOCKED for all system zones so that failed allocation
663 * attempts or page faults may now recall the oom killer, if necessary.
664 */
665static void clear_system_oom(void)
666{
667	struct zone *zone;
668
669	spin_lock(&zone_scan_lock);
670	for_each_populated_zone(zone)
671		zone_clear_flag(zone, ZONE_OOM_LOCKED);
672	spin_unlock(&zone_scan_lock);
673}
674
675/**
676 * out_of_memory - kill the "best" process when we run out of memory
677 * @zonelist: zonelist pointer
678 * @gfp_mask: memory allocation flags
679 * @order: amount of memory being requested as a power of 2
680 * @nodemask: nodemask passed to page allocator
681 *
682 * If we run out of memory, we have the choice between either
683 * killing a random task (bad), letting the system crash (worse)
684 * OR try to be smart about which process to kill. Note that we
685 * don't have to be perfect here, we just have to be good.
686 */
687void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
688		int order, nodemask_t *nodemask)
689{
690	const nodemask_t *mpol_mask;
691	struct task_struct *p;
692	unsigned long totalpages;
693	unsigned long freed = 0;
694	unsigned int points;
695	enum oom_constraint constraint = CONSTRAINT_NONE;
696	int killed = 0;
697
698	blocking_notifier_call_chain(&oom_notify_list, 0, &freed);
699	if (freed > 0)
700		/* Got some memory back in the last second. */
701		return;
 
 
 
 
 
702
703	/*
704	 * If current has a pending SIGKILL, then automatically select it.  The
705	 * goal is to allow it to allocate so that it may quickly exit and free
706	 * its memory.
707	 */
708	if (fatal_signal_pending(current)) {
709		set_thread_flag(TIF_MEMDIE);
710		return;
 
711	}
712
713	/*
714	 * Check if there were limitations on the allocation (only relevant for
715	 * NUMA) that may require different handling.
 
 
716	 */
717	constraint = constrained_alloc(zonelist, gfp_mask, nodemask,
718						&totalpages);
719	mpol_mask = (constraint == CONSTRAINT_MEMORY_POLICY) ? nodemask : NULL;
720	check_panic_on_oom(constraint, gfp_mask, order, mpol_mask);
721
722	read_lock(&tasklist_lock);
723	if (sysctl_oom_kill_allocating_task &&
724	    !oom_unkillable_task(current, NULL, nodemask) &&
725	    current->mm && !atomic_read(&current->mm->oom_disable_count)) {
726		/*
727		 * oom_kill_process() needs tasklist_lock held.  If it returns
728		 * non-zero, current could not be killed so we must fallback to
729		 * the tasklist scan.
730		 */
731		if (!oom_kill_process(current, gfp_mask, order, 0, totalpages,
732				NULL, nodemask,
733				"Out of memory (oom_kill_allocating_task)"))
734			goto out;
 
 
 
735	}
736
737retry:
738	p = select_bad_process(&points, totalpages, NULL, mpol_mask);
739	if (PTR_ERR(p) == -1UL)
740		goto out;
741
742	/* Found nothing?!?! Either we hang forever, or we panic. */
743	if (!p) {
744		dump_header(NULL, gfp_mask, order, NULL, mpol_mask);
745		read_unlock(&tasklist_lock);
746		panic("Out of memory and no killable processes...\n");
747	}
748
749	if (oom_kill_process(p, gfp_mask, order, points, totalpages, NULL,
750				nodemask, "Out of memory"))
751		goto retry;
752	killed = 1;
753out:
754	read_unlock(&tasklist_lock);
755
756	/*
757	 * Give "p" a good chance of killing itself before we
758	 * retry to allocate memory unless "p" is current
759	 */
760	if (killed && !test_thread_flag(TIF_MEMDIE))
761		schedule_timeout_uninterruptible(1);
762}
763
764/*
765 * The pagefault handler calls here because it is out of memory, so kill a
766 * memory-hogging task.  If a populated zone has ZONE_OOM_LOCKED set, a parallel
767 * oom killing is already in progress so do nothing.  If a task is found with
768 * TIF_MEMDIE set, it has been killed so do nothing and allow it to exit.
769 */
770void pagefault_out_of_memory(void)
771{
772	if (try_set_system_oom()) {
773		out_of_memory(NULL, 0, 0, NULL);
774		clear_system_oom();
775	}
776	if (!test_thread_flag(TIF_MEMDIE))
777		schedule_timeout_uninterruptible(1);
 
 
 
 
 
 
 
 
 
778}
v4.10.11
   1/*
   2 *  linux/mm/oom_kill.c
   3 * 
   4 *  Copyright (C)  1998,2000  Rik van Riel
   5 *	Thanks go out to Claus Fischer for some serious inspiration and
   6 *	for goading me into coding this file...
   7 *  Copyright (C)  2010  Google, Inc.
   8 *	Rewritten by David Rientjes
   9 *
  10 *  The routines in this file are used to kill a process when
  11 *  we're seriously out of memory. This gets called from __alloc_pages()
  12 *  in mm/page_alloc.c when we really run out of memory.
  13 *
  14 *  Since we won't call these routines often (on a well-configured
  15 *  machine) this file will double as a 'coding guide' and a signpost
  16 *  for newbie kernel hackers. It features several pointers to major
  17 *  kernel subsystems and hints as to where to find out what things do.
  18 */
  19
  20#include <linux/oom.h>
  21#include <linux/mm.h>
  22#include <linux/err.h>
  23#include <linux/gfp.h>
  24#include <linux/sched.h>
  25#include <linux/swap.h>
  26#include <linux/timex.h>
  27#include <linux/jiffies.h>
  28#include <linux/cpuset.h>
  29#include <linux/export.h>
  30#include <linux/notifier.h>
  31#include <linux/memcontrol.h>
  32#include <linux/mempolicy.h>
  33#include <linux/security.h>
  34#include <linux/ptrace.h>
  35#include <linux/freezer.h>
  36#include <linux/ftrace.h>
  37#include <linux/ratelimit.h>
  38#include <linux/kthread.h>
  39#include <linux/init.h>
  40
  41#include <asm/tlb.h>
  42#include "internal.h"
  43
  44#define CREATE_TRACE_POINTS
  45#include <trace/events/oom.h>
  46
  47int sysctl_panic_on_oom;
  48int sysctl_oom_kill_allocating_task;
  49int sysctl_oom_dump_tasks = 1;
 
  50
  51DEFINE_MUTEX(oom_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  52
  53#ifdef CONFIG_NUMA
  54/**
  55 * has_intersects_mems_allowed() - check task eligiblity for kill
  56 * @start: task struct of which task to consider
  57 * @mask: nodemask passed to page allocator for mempolicy ooms
  58 *
  59 * Task eligibility is determined by whether or not a candidate task, @tsk,
  60 * shares the same mempolicy nodes as current if it is bound by such a policy
  61 * and whether or not it has the same set of allowed cpuset nodes.
  62 */
  63static bool has_intersects_mems_allowed(struct task_struct *start,
  64					const nodemask_t *mask)
  65{
  66	struct task_struct *tsk;
  67	bool ret = false;
  68
  69	rcu_read_lock();
  70	for_each_thread(start, tsk) {
  71		if (mask) {
  72			/*
  73			 * If this is a mempolicy constrained oom, tsk's
  74			 * cpuset is irrelevant.  Only return true if its
  75			 * mempolicy intersects current, otherwise it may be
  76			 * needlessly killed.
  77			 */
  78			ret = mempolicy_nodemask_intersects(tsk, mask);
 
  79		} else {
  80			/*
  81			 * This is not a mempolicy constrained oom, so only
  82			 * check the mems of tsk's cpuset.
  83			 */
  84			ret = cpuset_mems_allowed_intersects(current, tsk);
 
  85		}
  86		if (ret)
  87			break;
  88	}
  89	rcu_read_unlock();
  90
  91	return ret;
  92}
  93#else
  94static bool has_intersects_mems_allowed(struct task_struct *tsk,
  95					const nodemask_t *mask)
  96{
  97	return true;
  98}
  99#endif /* CONFIG_NUMA */
 100
 101/*
 102 * The process p may have detached its own ->mm while exiting or through
 103 * use_mm(), but one or more of its subthreads may still have a valid
 104 * pointer.  Return p, or any of its subthreads with a valid ->mm, with
 105 * task_lock() held.
 106 */
 107struct task_struct *find_lock_task_mm(struct task_struct *p)
 108{
 109	struct task_struct *t;
 110
 111	rcu_read_lock();
 112
 113	for_each_thread(p, t) {
 114		task_lock(t);
 115		if (likely(t->mm))
 116			goto found;
 117		task_unlock(t);
 118	}
 119	t = NULL;
 120found:
 121	rcu_read_unlock();
 122
 123	return t;
 124}
 125
 126/*
 127 * order == -1 means the oom kill is required by sysrq, otherwise only
 128 * for display purposes.
 129 */
 130static inline bool is_sysrq_oom(struct oom_control *oc)
 131{
 132	return oc->order == -1;
 133}
 134
 135static inline bool is_memcg_oom(struct oom_control *oc)
 136{
 137	return oc->memcg != NULL;
 138}
 139
 140/* return true if the task is not adequate as candidate victim task. */
 141static bool oom_unkillable_task(struct task_struct *p,
 142		struct mem_cgroup *memcg, const nodemask_t *nodemask)
 143{
 144	if (is_global_init(p))
 145		return true;
 146	if (p->flags & PF_KTHREAD)
 147		return true;
 148
 149	/* When mem_cgroup_out_of_memory() and p is not member of the group */
 150	if (memcg && !task_in_mem_cgroup(p, memcg))
 151		return true;
 152
 153	/* p may not have freeable memory in nodemask */
 154	if (!has_intersects_mems_allowed(p, nodemask))
 155		return true;
 156
 157	return false;
 158}
 159
 160/**
 161 * oom_badness - heuristic function to determine which candidate task to kill
 162 * @p: task struct of which task we should calculate
 163 * @totalpages: total present RAM allowed for page allocation
 164 *
 165 * The heuristic for determining which task to kill is made to be as simple and
 166 * predictable as possible.  The goal is to return the highest value for the
 167 * task consuming the most memory to avoid subsequent oom failures.
 168 */
 169unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
 170			  const nodemask_t *nodemask, unsigned long totalpages)
 171{
 172	long points;
 173	long adj;
 174
 175	if (oom_unkillable_task(p, memcg, nodemask))
 176		return 0;
 177
 178	p = find_lock_task_mm(p);
 179	if (!p)
 180		return 0;
 181
 182	/*
 183	 * Do not even consider tasks which are explicitly marked oom
 184	 * unkillable or have been already oom reaped or the are in
 185	 * the middle of vfork
 186	 */
 187	adj = (long)p->signal->oom_score_adj;
 188	if (adj == OOM_SCORE_ADJ_MIN ||
 189			test_bit(MMF_OOM_SKIP, &p->mm->flags) ||
 190			in_vfork(p)) {
 191		task_unlock(p);
 192		return 0;
 193	}
 194
 195	/*
 
 
 
 
 
 
 
 196	 * The baseline for the badness score is the proportion of RAM that each
 197	 * task's rss, pagetable and swap space use.
 198	 */
 199	points = get_mm_rss(p->mm) + get_mm_counter(p->mm, MM_SWAPENTS) +
 200		atomic_long_read(&p->mm->nr_ptes) + mm_nr_pmds(p->mm);
 
 
 
 201	task_unlock(p);
 202
 203	/*
 204	 * Root processes get 3% bonus, just like the __vm_enough_memory()
 205	 * implementation used by LSMs.
 206	 */
 207	if (has_capability_noaudit(p, CAP_SYS_ADMIN))
 208		points -= (points * 3) / 100;
 209
 210	/* Normalize to oom_score_adj units */
 211	adj *= totalpages / 1000;
 212	points += adj;
 
 
 
 213
 214	/*
 215	 * Never return 0 for an eligible task regardless of the root bonus and
 216	 * oom_score_adj (oom_score_adj can't be OOM_SCORE_ADJ_MIN here).
 
 217	 */
 218	return points > 0 ? points : 1;
 
 
 219}
 220
 221enum oom_constraint {
 222	CONSTRAINT_NONE,
 223	CONSTRAINT_CPUSET,
 224	CONSTRAINT_MEMORY_POLICY,
 225	CONSTRAINT_MEMCG,
 226};
 227
 228/*
 229 * Determine the type of allocation constraint.
 230 */
 231static enum oom_constraint constrained_alloc(struct oom_control *oc)
 
 
 
 232{
 233	struct zone *zone;
 234	struct zoneref *z;
 235	enum zone_type high_zoneidx = gfp_zone(oc->gfp_mask);
 236	bool cpuset_limited = false;
 237	int nid;
 238
 239	if (is_memcg_oom(oc)) {
 240		oc->totalpages = mem_cgroup_get_limit(oc->memcg) ?: 1;
 241		return CONSTRAINT_MEMCG;
 242	}
 243
 244	/* Default to all available memory */
 245	oc->totalpages = totalram_pages + total_swap_pages;
 246
 247	if (!IS_ENABLED(CONFIG_NUMA))
 248		return CONSTRAINT_NONE;
 249
 250	if (!oc->zonelist)
 251		return CONSTRAINT_NONE;
 252	/*
 253	 * Reach here only when __GFP_NOFAIL is used. So, we should avoid
 254	 * to kill current.We have to random task kill in this case.
 255	 * Hopefully, CONSTRAINT_THISNODE...but no way to handle it, now.
 256	 */
 257	if (oc->gfp_mask & __GFP_THISNODE)
 258		return CONSTRAINT_NONE;
 259
 260	/*
 261	 * This is not a __GFP_THISNODE allocation, so a truncated nodemask in
 262	 * the page allocator means a mempolicy is in effect.  Cpuset policy
 263	 * is enforced in get_page_from_freelist().
 264	 */
 265	if (oc->nodemask &&
 266	    !nodes_subset(node_states[N_MEMORY], *oc->nodemask)) {
 267		oc->totalpages = total_swap_pages;
 268		for_each_node_mask(nid, *oc->nodemask)
 269			oc->totalpages += node_spanned_pages(nid);
 270		return CONSTRAINT_MEMORY_POLICY;
 271	}
 272
 273	/* Check this allocation failure is caused by cpuset's wall function */
 274	for_each_zone_zonelist_nodemask(zone, z, oc->zonelist,
 275			high_zoneidx, oc->nodemask)
 276		if (!cpuset_zone_allowed(zone, oc->gfp_mask))
 277			cpuset_limited = true;
 278
 279	if (cpuset_limited) {
 280		oc->totalpages = total_swap_pages;
 281		for_each_node_mask(nid, cpuset_current_mems_allowed)
 282			oc->totalpages += node_spanned_pages(nid);
 283		return CONSTRAINT_CPUSET;
 284	}
 285	return CONSTRAINT_NONE;
 286}
 
 
 
 
 
 
 
 
 
 287
 288static int oom_evaluate_task(struct task_struct *task, void *arg)
 
 
 
 
 
 
 
 
 289{
 290	struct oom_control *oc = arg;
 291	unsigned long points;
 
 292
 293	if (oom_unkillable_task(task, NULL, oc->nodemask))
 294		goto next;
 295
 296	/*
 297	 * This task already has access to memory reserves and is being killed.
 298	 * Don't allow any other task to have access to the reserves unless
 299	 * the task has MMF_OOM_SKIP because chances that it would release
 300	 * any memory is quite low.
 301	 */
 302	if (!is_sysrq_oom(oc) && tsk_is_oom_victim(task)) {
 303		if (test_bit(MMF_OOM_SKIP, &task->signal->oom_mm->flags))
 304			goto next;
 305		goto abort;
 306	}
 307
 308	/*
 309	 * If task is allocating a lot of memory and has been marked to be
 310	 * killed first if it triggers an oom, then select it.
 311	 */
 312	if (oom_task_origin(task)) {
 313		points = ULONG_MAX;
 314		goto select;
 315	}
 
 
 
 
 
 316
 317	points = oom_badness(task, NULL, oc->nodemask, oc->totalpages);
 318	if (!points || points < oc->chosen_points)
 319		goto next;
 320
 321	/* Prefer thread group leaders for display purposes */
 322	if (points == oc->chosen_points && thread_group_leader(oc->chosen))
 323		goto next;
 324select:
 325	if (oc->chosen)
 326		put_task_struct(oc->chosen);
 327	get_task_struct(task);
 328	oc->chosen = task;
 329	oc->chosen_points = points;
 330next:
 331	return 0;
 332abort:
 333	if (oc->chosen)
 334		put_task_struct(oc->chosen);
 335	oc->chosen = (void *)-1UL;
 336	return 1;
 337}
 
 
 338
 339/*
 340 * Simple selection loop. We choose the process with the highest number of
 341 * 'points'. In case scan was aborted, oc->chosen is set to -1.
 342 */
 343static void select_bad_process(struct oom_control *oc)
 344{
 345	if (is_memcg_oom(oc))
 346		mem_cgroup_scan_tasks(oc->memcg, oom_evaluate_task, oc);
 347	else {
 348		struct task_struct *p;
 349
 350		rcu_read_lock();
 351		for_each_process(p)
 352			if (oom_evaluate_task(p, oc))
 353				break;
 354		rcu_read_unlock();
 355	}
 356
 357	oc->chosen_points = oc->chosen_points * 1000 / oc->totalpages;
 358}
 359
 360/**
 361 * dump_tasks - dump current memory state of all system tasks
 362 * @memcg: current's memory controller, if constrained
 363 * @nodemask: nodemask passed to page allocator for mempolicy ooms
 364 *
 365 * Dumps the current memory state of all eligible tasks.  Tasks not in the same
 366 * memcg, not in the same cpuset, or bound to a disjoint set of mempolicy nodes
 367 * are not shown.
 368 * State information includes task's pid, uid, tgid, vm size, rss, nr_ptes,
 369 * swapents, oom_score_adj value, and name.
 
 
 370 */
 371static void dump_tasks(struct mem_cgroup *memcg, const nodemask_t *nodemask)
 372{
 373	struct task_struct *p;
 374	struct task_struct *task;
 375
 376	pr_info("[ pid ]   uid  tgid total_vm      rss nr_ptes nr_pmds swapents oom_score_adj name\n");
 377	rcu_read_lock();
 378	for_each_process(p) {
 379		if (oom_unkillable_task(p, memcg, nodemask))
 380			continue;
 381
 382		task = find_lock_task_mm(p);
 383		if (!task) {
 384			/*
 385			 * This is a kthread or all of p's threads have already
 386			 * detached their mm's.  There's no need to report
 387			 * them; they can't be oom killed anyway.
 388			 */
 389			continue;
 390		}
 391
 392		pr_info("[%5d] %5d %5d %8lu %8lu %7ld %7ld %8lu         %5hd %s\n",
 393			task->pid, from_kuid(&init_user_ns, task_uid(task)),
 394			task->tgid, task->mm->total_vm, get_mm_rss(task->mm),
 395			atomic_long_read(&task->mm->nr_ptes),
 396			mm_nr_pmds(task->mm),
 397			get_mm_counter(task->mm, MM_SWAPENTS),
 398			task->signal->oom_score_adj, task->comm);
 399		task_unlock(task);
 400	}
 401	rcu_read_unlock();
 402}
 403
 404static void dump_header(struct oom_control *oc, struct task_struct *p)
 
 405{
 406	nodemask_t *nm = (oc->nodemask) ? oc->nodemask : &cpuset_current_mems_allowed;
 407
 408	pr_warn("%s invoked oom-killer: gfp_mask=%#x(%pGg), nodemask=%*pbl, order=%d, oom_score_adj=%hd\n",
 409		current->comm, oc->gfp_mask, &oc->gfp_mask,
 410		nodemask_pr_args(nm), oc->order,
 411		current->signal->oom_score_adj);
 412	if (!IS_ENABLED(CONFIG_COMPACTION) && oc->order)
 413		pr_warn("COMPACTION is disabled!!!\n");
 414
 415	cpuset_print_current_mems_allowed();
 416	dump_stack();
 417	if (oc->memcg)
 418		mem_cgroup_print_oom_info(oc->memcg, p);
 419	else
 420		show_mem(SHOW_MEM_FILTER_NODES);
 421	if (sysctl_oom_dump_tasks)
 422		dump_tasks(oc->memcg, oc->nodemask);
 423}
 424
 425/*
 426 * Number of OOM victims in flight
 427 */
 428static atomic_t oom_victims = ATOMIC_INIT(0);
 429static DECLARE_WAIT_QUEUE_HEAD(oom_victims_wait);
 430
 431static bool oom_killer_disabled __read_mostly;
 432
 433#define K(x) ((x) << (PAGE_SHIFT-10))
 434
 435/*
 436 * task->mm can be NULL if the task is the exited group leader.  So to
 437 * determine whether the task is using a particular mm, we examine all the
 438 * task's threads: if one of those is using this mm then this task was also
 439 * using it.
 440 */
 441bool process_shares_mm(struct task_struct *p, struct mm_struct *mm)
 442{
 443	struct task_struct *t;
 
 444
 445	for_each_thread(p, t) {
 446		struct mm_struct *t_mm = READ_ONCE(t->mm);
 447		if (t_mm)
 448			return t_mm == mm;
 449	}
 450	return false;
 451}
 452
 
 
 453
 454#ifdef CONFIG_MMU
 455/*
 456 * OOM Reaper kernel thread which tries to reap the memory used by the OOM
 457 * victim (if that is possible) to help the OOM killer to move on.
 458 */
 459static struct task_struct *oom_reaper_th;
 460static DECLARE_WAIT_QUEUE_HEAD(oom_reaper_wait);
 461static struct task_struct *oom_reaper_list;
 462static DEFINE_SPINLOCK(oom_reaper_lock);
 463
 464static bool __oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
 465{
 466	struct mmu_gather tlb;
 467	struct vm_area_struct *vma;
 468	struct zap_details details = {.check_swap_entries = true,
 469				      .ignore_dirty = true};
 470	bool ret = true;
 471
 472	/*
 473	 * We have to make sure to not race with the victim exit path
 474	 * and cause premature new oom victim selection:
 475	 * __oom_reap_task_mm		exit_mm
 476	 *   mmget_not_zero
 477	 *				  mmput
 478	 *				    atomic_dec_and_test
 479	 *				  exit_oom_victim
 480	 *				[...]
 481	 *				out_of_memory
 482	 *				  select_bad_process
 483	 *				    # no TIF_MEMDIE task selects new victim
 484	 *  unmap_page_range # frees some memory
 485	 */
 486	mutex_lock(&oom_lock);
 487
 488	if (!down_read_trylock(&mm->mmap_sem)) {
 489		ret = false;
 490		goto unlock_oom;
 491	}
 492
 493	/*
 494	 * increase mm_users only after we know we will reap something so
 495	 * that the mmput_async is called only when we have reaped something
 496	 * and delayed __mmput doesn't matter that much
 497	 */
 498	if (!mmget_not_zero(mm)) {
 499		up_read(&mm->mmap_sem);
 500		goto unlock_oom;
 501	}
 502
 503	/*
 504	 * Tell all users of get_user/copy_from_user etc... that the content
 505	 * is no longer stable. No barriers really needed because unmapping
 506	 * should imply barriers already and the reader would hit a page fault
 507	 * if it stumbled over a reaped memory.
 508	 */
 509	set_bit(MMF_UNSTABLE, &mm->flags);
 510
 511	tlb_gather_mmu(&tlb, mm, 0, -1);
 512	for (vma = mm->mmap ; vma; vma = vma->vm_next) {
 513		if (is_vm_hugetlb_page(vma))
 514			continue;
 515
 516		/*
 517		 * mlocked VMAs require explicit munlocking before unmap.
 518		 * Let's keep it simple here and skip such VMAs.
 519		 */
 520		if (vma->vm_flags & VM_LOCKED)
 521			continue;
 522
 523		/*
 524		 * Only anonymous pages have a good chance to be dropped
 525		 * without additional steps which we cannot afford as we
 526		 * are OOM already.
 527		 *
 528		 * We do not even care about fs backed pages because all
 529		 * which are reclaimable have already been reclaimed and
 530		 * we do not want to block exit_mmap by keeping mm ref
 531		 * count elevated without a good reason.
 532		 */
 533		if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED))
 534			unmap_page_range(&tlb, vma, vma->vm_start, vma->vm_end,
 535					 &details);
 536	}
 537	tlb_finish_mmu(&tlb, 0, -1);
 538	pr_info("oom_reaper: reaped process %d (%s), now anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
 539			task_pid_nr(tsk), tsk->comm,
 540			K(get_mm_counter(mm, MM_ANONPAGES)),
 541			K(get_mm_counter(mm, MM_FILEPAGES)),
 542			K(get_mm_counter(mm, MM_SHMEMPAGES)));
 543	up_read(&mm->mmap_sem);
 544
 545	/*
 546	 * Drop our reference but make sure the mmput slow path is called from a
 547	 * different context because we shouldn't risk we get stuck there and
 548	 * put the oom_reaper out of the way.
 549	 */
 550	mmput_async(mm);
 551unlock_oom:
 552	mutex_unlock(&oom_lock);
 553	return ret;
 554}
 555
 556#define MAX_OOM_REAP_RETRIES 10
 557static void oom_reap_task(struct task_struct *tsk)
 558{
 559	int attempts = 0;
 560	struct mm_struct *mm = tsk->signal->oom_mm;
 561
 562	/* Retry the down_read_trylock(mmap_sem) a few times */
 563	while (attempts++ < MAX_OOM_REAP_RETRIES && !__oom_reap_task_mm(tsk, mm))
 564		schedule_timeout_idle(HZ/10);
 565
 566	if (attempts <= MAX_OOM_REAP_RETRIES)
 567		goto done;
 568
 569
 570	pr_info("oom_reaper: unable to reap pid:%d (%s)\n",
 571		task_pid_nr(tsk), tsk->comm);
 572	debug_show_all_locks();
 573
 574done:
 575	tsk->oom_reaper_list = NULL;
 576
 577	/*
 578	 * Hide this mm from OOM killer because it has been either reaped or
 579	 * somebody can't call up_write(mmap_sem).
 580	 */
 581	set_bit(MMF_OOM_SKIP, &mm->flags);
 582
 583	/* Drop a reference taken by wake_oom_reaper */
 584	put_task_struct(tsk);
 585}
 586
 587static int oom_reaper(void *unused)
 588{
 589	while (true) {
 590		struct task_struct *tsk = NULL;
 591
 592		wait_event_freezable(oom_reaper_wait, oom_reaper_list != NULL);
 593		spin_lock(&oom_reaper_lock);
 594		if (oom_reaper_list != NULL) {
 595			tsk = oom_reaper_list;
 596			oom_reaper_list = tsk->oom_reaper_list;
 597		}
 598		spin_unlock(&oom_reaper_lock);
 599
 600		if (tsk)
 601			oom_reap_task(tsk);
 602	}
 603
 604	return 0;
 605}
 
 606
 607static void wake_oom_reaper(struct task_struct *tsk)
 608{
 609	if (!oom_reaper_th)
 610		return;
 611
 612	/* tsk is already queued? */
 613	if (tsk == oom_reaper_list || tsk->oom_reaper_list)
 614		return;
 615
 616	get_task_struct(tsk);
 617
 618	spin_lock(&oom_reaper_lock);
 619	tsk->oom_reaper_list = oom_reaper_list;
 620	oom_reaper_list = tsk;
 621	spin_unlock(&oom_reaper_lock);
 622	wake_up(&oom_reaper_wait);
 623}
 624
 625static int __init oom_init(void)
 626{
 627	oom_reaper_th = kthread_run(oom_reaper, NULL, "oom_reaper");
 628	if (IS_ERR(oom_reaper_th)) {
 629		pr_err("Unable to start OOM reaper %ld. Continuing regardless\n",
 630				PTR_ERR(oom_reaper_th));
 631		oom_reaper_th = NULL;
 632	}
 633	return 0;
 634}
 635subsys_initcall(oom_init)
 636#else
 637static inline void wake_oom_reaper(struct task_struct *tsk)
 638{
 639}
 640#endif /* CONFIG_MMU */
 641
 642/**
 643 * mark_oom_victim - mark the given task as OOM victim
 644 * @tsk: task to mark
 645 *
 646 * Has to be called with oom_lock held and never after
 647 * oom has been disabled already.
 648 *
 649 * tsk->mm has to be non NULL and caller has to guarantee it is stable (either
 650 * under task_lock or operate on the current).
 651 */
 652static void mark_oom_victim(struct task_struct *tsk)
 653{
 654	struct mm_struct *mm = tsk->mm;
 655
 656	WARN_ON(oom_killer_disabled);
 657	/* OOM killer might race with memcg OOM */
 658	if (test_and_set_tsk_thread_flag(tsk, TIF_MEMDIE))
 659		return;
 660
 661	/* oom_mm is bound to the signal struct life time. */
 662	if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm))
 663		atomic_inc(&tsk->signal->oom_mm->mm_count);
 664
 665	/*
 666	 * Make sure that the task is woken up from uninterruptible sleep
 667	 * if it is frozen because OOM killer wouldn't be able to free
 668	 * any memory and livelock. freezing_slow_path will tell the freezer
 669	 * that TIF_MEMDIE tasks should be ignored.
 670	 */
 671	__thaw_task(tsk);
 672	atomic_inc(&oom_victims);
 673}
 674
 675/**
 676 * exit_oom_victim - note the exit of an OOM victim
 677 */
 678void exit_oom_victim(void)
 679{
 680	clear_thread_flag(TIF_MEMDIE);
 681
 682	if (!atomic_dec_return(&oom_victims))
 683		wake_up_all(&oom_victims_wait);
 684}
 685
 686/**
 687 * oom_killer_enable - enable OOM killer
 688 */
 689void oom_killer_enable(void)
 690{
 691	oom_killer_disabled = false;
 692}
 693
 694/**
 695 * oom_killer_disable - disable OOM killer
 696 * @timeout: maximum timeout to wait for oom victims in jiffies
 697 *
 698 * Forces all page allocations to fail rather than trigger OOM killer.
 699 * Will block and wait until all OOM victims are killed or the given
 700 * timeout expires.
 701 *
 702 * The function cannot be called when there are runnable user tasks because
 703 * the userspace would see unexpected allocation failures as a result. Any
 704 * new usage of this function should be consulted with MM people.
 705 *
 706 * Returns true if successful and false if the OOM killer cannot be
 707 * disabled.
 708 */
 709bool oom_killer_disable(signed long timeout)
 710{
 711	signed long ret;
 712
 713	/*
 714	 * Make sure to not race with an ongoing OOM killer. Check that the
 715	 * current is not killed (possibly due to sharing the victim's memory).
 716	 */
 717	if (mutex_lock_killable(&oom_lock))
 718		return false;
 719	oom_killer_disabled = true;
 720	mutex_unlock(&oom_lock);
 721
 722	ret = wait_event_interruptible_timeout(oom_victims_wait,
 723			!atomic_read(&oom_victims), timeout);
 724	if (ret <= 0) {
 725		oom_killer_enable();
 726		return false;
 727	}
 728
 729	return true;
 730}
 731
 732static inline bool __task_will_free_mem(struct task_struct *task)
 733{
 734	struct signal_struct *sig = task->signal;
 735
 736	/*
 737	 * A coredumping process may sleep for an extended period in exit_mm(),
 738	 * so the oom killer cannot assume that the process will promptly exit
 739	 * and release memory.
 740	 */
 741	if (sig->flags & SIGNAL_GROUP_COREDUMP)
 742		return false;
 743
 744	if (sig->flags & SIGNAL_GROUP_EXIT)
 745		return true;
 746
 747	if (thread_group_empty(task) && (task->flags & PF_EXITING))
 748		return true;
 749
 750	return false;
 751}
 752
 753/*
 754 * Checks whether the given task is dying or exiting and likely to
 755 * release its address space. This means that all threads and processes
 756 * sharing the same mm have to be killed or exiting.
 757 * Caller has to make sure that task->mm is stable (hold task_lock or
 758 * it operates on the current).
 759 */
 760static bool task_will_free_mem(struct task_struct *task)
 761{
 762	struct mm_struct *mm = task->mm;
 763	struct task_struct *p;
 764	bool ret = true;
 765
 766	/*
 767	 * Skip tasks without mm because it might have passed its exit_mm and
 768	 * exit_oom_victim. oom_reaper could have rescued that but do not rely
 769	 * on that for now. We can consider find_lock_task_mm in future.
 770	 */
 771	if (!mm)
 772		return false;
 773
 774	if (!__task_will_free_mem(task))
 775		return false;
 776
 777	/*
 778	 * This task has already been drained by the oom reaper so there are
 779	 * only small chances it will free some more
 780	 */
 781	if (test_bit(MMF_OOM_SKIP, &mm->flags))
 782		return false;
 783
 784	if (atomic_read(&mm->mm_users) <= 1)
 785		return true;
 786
 787	/*
 788	 * Make sure that all tasks which share the mm with the given tasks
 789	 * are dying as well to make sure that a) nobody pins its mm and
 790	 * b) the task is also reapable by the oom reaper.
 791	 */
 792	rcu_read_lock();
 793	for_each_process(p) {
 794		if (!process_shares_mm(p, mm))
 795			continue;
 796		if (same_thread_group(task, p))
 797			continue;
 798		ret = __task_will_free_mem(p);
 799		if (!ret)
 800			break;
 801	}
 802	rcu_read_unlock();
 803
 804	return ret;
 805}
 806
 807static void oom_kill_process(struct oom_control *oc, const char *message)
 808{
 809	struct task_struct *p = oc->chosen;
 810	unsigned int points = oc->chosen_points;
 811	struct task_struct *victim = p;
 812	struct task_struct *child;
 813	struct task_struct *t;
 814	struct mm_struct *mm;
 815	unsigned int victim_points = 0;
 816	static DEFINE_RATELIMIT_STATE(oom_rs, DEFAULT_RATELIMIT_INTERVAL,
 817					      DEFAULT_RATELIMIT_BURST);
 818	bool can_oom_reap = true;
 819
 820	/*
 821	 * If the task is already exiting, don't alarm the sysadmin or kill
 822	 * its children or threads, just set TIF_MEMDIE so it can die quickly
 823	 */
 824	task_lock(p);
 825	if (task_will_free_mem(p)) {
 826		mark_oom_victim(p);
 827		wake_oom_reaper(p);
 828		task_unlock(p);
 829		put_task_struct(p);
 830		return;
 831	}
 832	task_unlock(p);
 833
 834	if (__ratelimit(&oom_rs))
 835		dump_header(oc, p);
 836
 837	pr_err("%s: Kill process %d (%s) score %u or sacrifice child\n",
 838		message, task_pid_nr(p), p->comm, points);
 
 839
 840	/*
 841	 * If any of p's children has a different mm and is eligible for kill,
 842	 * the one with the highest oom_badness() score is sacrificed for its
 843	 * parent.  This attempts to lose the minimal amount of work done while
 844	 * still freeing memory.
 845	 */
 846	read_lock(&tasklist_lock);
 847	for_each_thread(p, t) {
 848		list_for_each_entry(child, &t->children, sibling) {
 849			unsigned int child_points;
 850
 851			if (process_shares_mm(child, p->mm))
 852				continue;
 853			/*
 854			 * oom_badness() returns 0 if the thread is unkillable
 855			 */
 856			child_points = oom_badness(child,
 857				oc->memcg, oc->nodemask, oc->totalpages);
 858			if (child_points > victim_points) {
 859				put_task_struct(victim);
 860				victim = child;
 861				victim_points = child_points;
 862				get_task_struct(victim);
 863			}
 864		}
 865	}
 866	read_unlock(&tasklist_lock);
 867
 868	p = find_lock_task_mm(victim);
 869	if (!p) {
 870		put_task_struct(victim);
 871		return;
 872	} else if (victim != p) {
 873		get_task_struct(p);
 874		put_task_struct(victim);
 875		victim = p;
 876	}
 877
 878	/* Get a reference to safely compare mm after task_unlock(victim) */
 879	mm = victim->mm;
 880	atomic_inc(&mm->mm_count);
 881	/*
 882	 * We should send SIGKILL before setting TIF_MEMDIE in order to prevent
 883	 * the OOM victim from depleting the memory reserves from the user
 884	 * space under its control.
 885	 */
 886	do_send_sig_info(SIGKILL, SEND_SIG_FORCED, victim, true);
 887	mark_oom_victim(victim);
 888	pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
 889		task_pid_nr(victim), victim->comm, K(victim->mm->total_vm),
 890		K(get_mm_counter(victim->mm, MM_ANONPAGES)),
 891		K(get_mm_counter(victim->mm, MM_FILEPAGES)),
 892		K(get_mm_counter(victim->mm, MM_SHMEMPAGES)));
 893	task_unlock(victim);
 894
 895	/*
 896	 * Kill all user processes sharing victim->mm in other thread groups, if
 897	 * any.  They don't get access to memory reserves, though, to avoid
 898	 * depletion of all memory.  This prevents mm->mmap_sem livelock when an
 899	 * oom killed thread cannot exit because it requires the semaphore and
 900	 * its contended by another thread trying to allocate memory itself.
 901	 * That thread will now get access to memory reserves since it has a
 902	 * pending fatal signal.
 903	 */
 904	rcu_read_lock();
 905	for_each_process(p) {
 906		if (!process_shares_mm(p, mm))
 907			continue;
 908		if (same_thread_group(p, victim))
 909			continue;
 910		if (is_global_init(p)) {
 911			can_oom_reap = false;
 912			set_bit(MMF_OOM_SKIP, &mm->flags);
 913			pr_info("oom killer %d (%s) has mm pinned by %d (%s)\n",
 914					task_pid_nr(victim), victim->comm,
 915					task_pid_nr(p), p->comm);
 916			continue;
 917		}
 918		/*
 919		 * No use_mm() user needs to read from the userspace so we are
 920		 * ok to reap it.
 921		 */
 922		if (unlikely(p->flags & PF_KTHREAD))
 923			continue;
 924		do_send_sig_info(SIGKILL, SEND_SIG_FORCED, p, true);
 925	}
 926	rcu_read_unlock();
 927
 928	if (can_oom_reap)
 929		wake_oom_reaper(victim);
 930
 931	mmdrop(mm);
 932	put_task_struct(victim);
 933}
 934#undef K
 935
 936/*
 937 * Determines whether the kernel must panic because of the panic_on_oom sysctl.
 938 */
 939static void check_panic_on_oom(struct oom_control *oc,
 940			       enum oom_constraint constraint)
 941{
 942	if (likely(!sysctl_panic_on_oom))
 943		return;
 944	if (sysctl_panic_on_oom != 2) {
 945		/*
 946		 * panic_on_oom == 1 only affects CONSTRAINT_NONE, the kernel
 947		 * does not panic for cpuset, mempolicy, or memcg allocation
 948		 * failures.
 949		 */
 950		if (constraint != CONSTRAINT_NONE)
 951			return;
 952	}
 953	/* Do not panic for oom kills triggered by sysrq */
 954	if (is_sysrq_oom(oc))
 955		return;
 956	dump_header(oc, NULL);
 957	panic("Out of memory: %s panic_on_oom is enabled\n",
 958		sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide");
 959}
 960
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 961static BLOCKING_NOTIFIER_HEAD(oom_notify_list);
 962
 963int register_oom_notifier(struct notifier_block *nb)
 964{
 965	return blocking_notifier_chain_register(&oom_notify_list, nb);
 966}
 967EXPORT_SYMBOL_GPL(register_oom_notifier);
 968
 969int unregister_oom_notifier(struct notifier_block *nb)
 970{
 971	return blocking_notifier_chain_unregister(&oom_notify_list, nb);
 972}
 973EXPORT_SYMBOL_GPL(unregister_oom_notifier);
 974
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 975/**
 976 * out_of_memory - kill the "best" process when we run out of memory
 977 * @oc: pointer to struct oom_control
 
 
 
 978 *
 979 * If we run out of memory, we have the choice between either
 980 * killing a random task (bad), letting the system crash (worse)
 981 * OR try to be smart about which process to kill. Note that we
 982 * don't have to be perfect here, we just have to be good.
 983 */
 984bool out_of_memory(struct oom_control *oc)
 
 985{
 
 
 
 986	unsigned long freed = 0;
 
 987	enum oom_constraint constraint = CONSTRAINT_NONE;
 
 988
 989	if (oom_killer_disabled)
 990		return false;
 991
 992	if (!is_memcg_oom(oc)) {
 993		blocking_notifier_call_chain(&oom_notify_list, 0, &freed);
 994		if (freed > 0)
 995			/* Got some memory back in the last second. */
 996			return true;
 997	}
 998
 999	/*
1000	 * If current has a pending SIGKILL or is exiting, then automatically
1001	 * select it.  The goal is to allow it to allocate so that it may
1002	 * quickly exit and free its memory.
1003	 */
1004	if (task_will_free_mem(current)) {
1005		mark_oom_victim(current);
1006		wake_oom_reaper(current);
1007		return true;
1008	}
1009
1010	/*
1011	 * The OOM killer does not compensate for IO-less reclaim.
1012	 * pagefault_out_of_memory lost its gfp context so we have to
1013	 * make sure exclude 0 mask - all other users should have at least
1014	 * ___GFP_DIRECT_RECLAIM to get here.
1015	 */
1016	if (oc->gfp_mask && !(oc->gfp_mask & (__GFP_FS|__GFP_NOFAIL)))
1017		return true;
 
 
1018
1019	/*
1020	 * Check if there were limitations on the allocation (only relevant for
1021	 * NUMA and memcg) that may require different handling.
1022	 */
1023	constraint = constrained_alloc(oc);
1024	if (constraint != CONSTRAINT_MEMORY_POLICY)
1025		oc->nodemask = NULL;
1026	check_panic_on_oom(oc, constraint);
1027
1028	if (!is_memcg_oom(oc) && sysctl_oom_kill_allocating_task &&
1029	    current->mm && !oom_unkillable_task(current, NULL, oc->nodemask) &&
1030	    current->signal->oom_score_adj != OOM_SCORE_ADJ_MIN) {
1031		get_task_struct(current);
1032		oc->chosen = current;
1033		oom_kill_process(oc, "Out of memory (oom_kill_allocating_task)");
1034		return true;
1035	}
1036
1037	select_bad_process(oc);
 
 
 
 
1038	/* Found nothing?!?! Either we hang forever, or we panic. */
1039	if (!oc->chosen && !is_sysrq_oom(oc) && !is_memcg_oom(oc)) {
1040		dump_header(oc, NULL);
 
1041		panic("Out of memory and no killable processes...\n");
1042	}
1043	if (oc->chosen && oc->chosen != (void *)-1UL) {
1044		oom_kill_process(oc, !is_memcg_oom(oc) ? "Out of memory" :
1045				 "Memory cgroup out of memory");
1046		/*
1047		 * Give the killed process a good chance to exit before trying
1048		 * to allocate memory again.
1049		 */
1050		schedule_timeout_killable(1);
1051	}
1052	return !!oc->chosen;
 
 
 
 
1053}
1054
1055/*
1056 * The pagefault handler calls here because it is out of memory, so kill a
1057 * memory-hogging task. If oom_lock is held by somebody else, a parallel oom
1058 * killing is already in progress so do nothing.
 
1059 */
1060void pagefault_out_of_memory(void)
1061{
1062	struct oom_control oc = {
1063		.zonelist = NULL,
1064		.nodemask = NULL,
1065		.memcg = NULL,
1066		.gfp_mask = 0,
1067		.order = 0,
1068	};
1069
1070	if (mem_cgroup_oom_synchronize(true))
1071		return;
1072
1073	if (!mutex_trylock(&oom_lock))
1074		return;
1075	out_of_memory(&oc);
1076	mutex_unlock(&oom_lock);
1077}