Loading...
1/* memcontrol.c - Memory Controller
2 *
3 * Copyright IBM Corporation, 2007
4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5 *
6 * Copyright 2007 OpenVZ SWsoft Inc
7 * Author: Pavel Emelianov <xemul@openvz.org>
8 *
9 * Memory thresholds
10 * Copyright (C) 2009 Nokia Corporation
11 * Author: Kirill A. Shutemov
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 */
23
24#include <linux/res_counter.h>
25#include <linux/memcontrol.h>
26#include <linux/cgroup.h>
27#include <linux/mm.h>
28#include <linux/hugetlb.h>
29#include <linux/pagemap.h>
30#include <linux/smp.h>
31#include <linux/page-flags.h>
32#include <linux/backing-dev.h>
33#include <linux/bit_spinlock.h>
34#include <linux/rcupdate.h>
35#include <linux/limits.h>
36#include <linux/mutex.h>
37#include <linux/rbtree.h>
38#include <linux/slab.h>
39#include <linux/swap.h>
40#include <linux/swapops.h>
41#include <linux/spinlock.h>
42#include <linux/eventfd.h>
43#include <linux/sort.h>
44#include <linux/fs.h>
45#include <linux/seq_file.h>
46#include <linux/vmalloc.h>
47#include <linux/mm_inline.h>
48#include <linux/page_cgroup.h>
49#include <linux/cpu.h>
50#include <linux/oom.h>
51#include "internal.h"
52
53#include <asm/uaccess.h>
54
55#include <trace/events/vmscan.h>
56
57struct cgroup_subsys mem_cgroup_subsys __read_mostly;
58#define MEM_CGROUP_RECLAIM_RETRIES 5
59struct mem_cgroup *root_mem_cgroup __read_mostly;
60
61#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
62/* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */
63int do_swap_account __read_mostly;
64
65/* for remember boot option*/
66#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP_ENABLED
67static int really_do_swap_account __initdata = 1;
68#else
69static int really_do_swap_account __initdata = 0;
70#endif
71
72#else
73#define do_swap_account (0)
74#endif
75
76
77/*
78 * Statistics for memory cgroup.
79 */
80enum mem_cgroup_stat_index {
81 /*
82 * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
83 */
84 MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */
85 MEM_CGROUP_STAT_RSS, /* # of pages charged as anon rss */
86 MEM_CGROUP_STAT_FILE_MAPPED, /* # of pages charged as file rss */
87 MEM_CGROUP_STAT_SWAPOUT, /* # of pages, swapped out */
88 MEM_CGROUP_STAT_DATA, /* end of data requires synchronization */
89 MEM_CGROUP_ON_MOVE, /* someone is moving account between groups */
90 MEM_CGROUP_STAT_NSTATS,
91};
92
93enum mem_cgroup_events_index {
94 MEM_CGROUP_EVENTS_PGPGIN, /* # of pages paged in */
95 MEM_CGROUP_EVENTS_PGPGOUT, /* # of pages paged out */
96 MEM_CGROUP_EVENTS_COUNT, /* # of pages paged in/out */
97 MEM_CGROUP_EVENTS_PGFAULT, /* # of page-faults */
98 MEM_CGROUP_EVENTS_PGMAJFAULT, /* # of major page-faults */
99 MEM_CGROUP_EVENTS_NSTATS,
100};
101/*
102 * Per memcg event counter is incremented at every pagein/pageout. With THP,
103 * it will be incremated by the number of pages. This counter is used for
104 * for trigger some periodic events. This is straightforward and better
105 * than using jiffies etc. to handle periodic memcg event.
106 */
107enum mem_cgroup_events_target {
108 MEM_CGROUP_TARGET_THRESH,
109 MEM_CGROUP_TARGET_SOFTLIMIT,
110 MEM_CGROUP_TARGET_NUMAINFO,
111 MEM_CGROUP_NTARGETS,
112};
113#define THRESHOLDS_EVENTS_TARGET (128)
114#define SOFTLIMIT_EVENTS_TARGET (1024)
115#define NUMAINFO_EVENTS_TARGET (1024)
116
117struct mem_cgroup_stat_cpu {
118 long count[MEM_CGROUP_STAT_NSTATS];
119 unsigned long events[MEM_CGROUP_EVENTS_NSTATS];
120 unsigned long targets[MEM_CGROUP_NTARGETS];
121};
122
123/*
124 * per-zone information in memory controller.
125 */
126struct mem_cgroup_per_zone {
127 /*
128 * spin_lock to protect the per cgroup LRU
129 */
130 struct list_head lists[NR_LRU_LISTS];
131 unsigned long count[NR_LRU_LISTS];
132
133 struct zone_reclaim_stat reclaim_stat;
134 struct rb_node tree_node; /* RB tree node */
135 unsigned long long usage_in_excess;/* Set to the value by which */
136 /* the soft limit is exceeded*/
137 bool on_tree;
138 struct mem_cgroup *mem; /* Back pointer, we cannot */
139 /* use container_of */
140};
141/* Macro for accessing counter */
142#define MEM_CGROUP_ZSTAT(mz, idx) ((mz)->count[(idx)])
143
144struct mem_cgroup_per_node {
145 struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
146};
147
148struct mem_cgroup_lru_info {
149 struct mem_cgroup_per_node *nodeinfo[MAX_NUMNODES];
150};
151
152/*
153 * Cgroups above their limits are maintained in a RB-Tree, independent of
154 * their hierarchy representation
155 */
156
157struct mem_cgroup_tree_per_zone {
158 struct rb_root rb_root;
159 spinlock_t lock;
160};
161
162struct mem_cgroup_tree_per_node {
163 struct mem_cgroup_tree_per_zone rb_tree_per_zone[MAX_NR_ZONES];
164};
165
166struct mem_cgroup_tree {
167 struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
168};
169
170static struct mem_cgroup_tree soft_limit_tree __read_mostly;
171
172struct mem_cgroup_threshold {
173 struct eventfd_ctx *eventfd;
174 u64 threshold;
175};
176
177/* For threshold */
178struct mem_cgroup_threshold_ary {
179 /* An array index points to threshold just below usage. */
180 int current_threshold;
181 /* Size of entries[] */
182 unsigned int size;
183 /* Array of thresholds */
184 struct mem_cgroup_threshold entries[0];
185};
186
187struct mem_cgroup_thresholds {
188 /* Primary thresholds array */
189 struct mem_cgroup_threshold_ary *primary;
190 /*
191 * Spare threshold array.
192 * This is needed to make mem_cgroup_unregister_event() "never fail".
193 * It must be able to store at least primary->size - 1 entries.
194 */
195 struct mem_cgroup_threshold_ary *spare;
196};
197
198/* for OOM */
199struct mem_cgroup_eventfd_list {
200 struct list_head list;
201 struct eventfd_ctx *eventfd;
202};
203
204static void mem_cgroup_threshold(struct mem_cgroup *mem);
205static void mem_cgroup_oom_notify(struct mem_cgroup *mem);
206
207/*
208 * The memory controller data structure. The memory controller controls both
209 * page cache and RSS per cgroup. We would eventually like to provide
210 * statistics based on the statistics developed by Rik Van Riel for clock-pro,
211 * to help the administrator determine what knobs to tune.
212 *
213 * TODO: Add a water mark for the memory controller. Reclaim will begin when
214 * we hit the water mark. May be even add a low water mark, such that
215 * no reclaim occurs from a cgroup at it's low water mark, this is
216 * a feature that will be implemented much later in the future.
217 */
218struct mem_cgroup {
219 struct cgroup_subsys_state css;
220 /*
221 * the counter to account for memory usage
222 */
223 struct res_counter res;
224 /*
225 * the counter to account for mem+swap usage.
226 */
227 struct res_counter memsw;
228 /*
229 * Per cgroup active and inactive list, similar to the
230 * per zone LRU lists.
231 */
232 struct mem_cgroup_lru_info info;
233 /*
234 * While reclaiming in a hierarchy, we cache the last child we
235 * reclaimed from.
236 */
237 int last_scanned_child;
238 int last_scanned_node;
239#if MAX_NUMNODES > 1
240 nodemask_t scan_nodes;
241 atomic_t numainfo_events;
242 atomic_t numainfo_updating;
243#endif
244 /*
245 * Should the accounting and control be hierarchical, per subtree?
246 */
247 bool use_hierarchy;
248
249 bool oom_lock;
250 atomic_t under_oom;
251
252 atomic_t refcnt;
253
254 int swappiness;
255 /* OOM-Killer disable */
256 int oom_kill_disable;
257
258 /* set when res.limit == memsw.limit */
259 bool memsw_is_minimum;
260
261 /* protect arrays of thresholds */
262 struct mutex thresholds_lock;
263
264 /* thresholds for memory usage. RCU-protected */
265 struct mem_cgroup_thresholds thresholds;
266
267 /* thresholds for mem+swap usage. RCU-protected */
268 struct mem_cgroup_thresholds memsw_thresholds;
269
270 /* For oom notifier event fd */
271 struct list_head oom_notify;
272
273 /*
274 * Should we move charges of a task when a task is moved into this
275 * mem_cgroup ? And what type of charges should we move ?
276 */
277 unsigned long move_charge_at_immigrate;
278 /*
279 * percpu counter.
280 */
281 struct mem_cgroup_stat_cpu *stat;
282 /*
283 * used when a cpu is offlined or other synchronizations
284 * See mem_cgroup_read_stat().
285 */
286 struct mem_cgroup_stat_cpu nocpu_base;
287 spinlock_t pcp_counter_lock;
288};
289
290/* Stuffs for move charges at task migration. */
291/*
292 * Types of charges to be moved. "move_charge_at_immitgrate" is treated as a
293 * left-shifted bitmap of these types.
294 */
295enum move_type {
296 MOVE_CHARGE_TYPE_ANON, /* private anonymous page and swap of it */
297 MOVE_CHARGE_TYPE_FILE, /* file page(including tmpfs) and swap of it */
298 NR_MOVE_TYPE,
299};
300
301/* "mc" and its members are protected by cgroup_mutex */
302static struct move_charge_struct {
303 spinlock_t lock; /* for from, to */
304 struct mem_cgroup *from;
305 struct mem_cgroup *to;
306 unsigned long precharge;
307 unsigned long moved_charge;
308 unsigned long moved_swap;
309 struct task_struct *moving_task; /* a task moving charges */
310 wait_queue_head_t waitq; /* a waitq for other context */
311} mc = {
312 .lock = __SPIN_LOCK_UNLOCKED(mc.lock),
313 .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
314};
315
316static bool move_anon(void)
317{
318 return test_bit(MOVE_CHARGE_TYPE_ANON,
319 &mc.to->move_charge_at_immigrate);
320}
321
322static bool move_file(void)
323{
324 return test_bit(MOVE_CHARGE_TYPE_FILE,
325 &mc.to->move_charge_at_immigrate);
326}
327
328/*
329 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
330 * limit reclaim to prevent infinite loops, if they ever occur.
331 */
332#define MEM_CGROUP_MAX_RECLAIM_LOOPS (100)
333#define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS (2)
334
335enum charge_type {
336 MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
337 MEM_CGROUP_CHARGE_TYPE_MAPPED,
338 MEM_CGROUP_CHARGE_TYPE_SHMEM, /* used by page migration of shmem */
339 MEM_CGROUP_CHARGE_TYPE_FORCE, /* used by force_empty */
340 MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */
341 MEM_CGROUP_CHARGE_TYPE_DROP, /* a page was unused swap cache */
342 NR_CHARGE_TYPE,
343};
344
345/* for encoding cft->private value on file */
346#define _MEM (0)
347#define _MEMSWAP (1)
348#define _OOM_TYPE (2)
349#define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val))
350#define MEMFILE_TYPE(val) (((val) >> 16) & 0xffff)
351#define MEMFILE_ATTR(val) ((val) & 0xffff)
352/* Used for OOM nofiier */
353#define OOM_CONTROL (0)
354
355/*
356 * Reclaim flags for mem_cgroup_hierarchical_reclaim
357 */
358#define MEM_CGROUP_RECLAIM_NOSWAP_BIT 0x0
359#define MEM_CGROUP_RECLAIM_NOSWAP (1 << MEM_CGROUP_RECLAIM_NOSWAP_BIT)
360#define MEM_CGROUP_RECLAIM_SHRINK_BIT 0x1
361#define MEM_CGROUP_RECLAIM_SHRINK (1 << MEM_CGROUP_RECLAIM_SHRINK_BIT)
362#define MEM_CGROUP_RECLAIM_SOFT_BIT 0x2
363#define MEM_CGROUP_RECLAIM_SOFT (1 << MEM_CGROUP_RECLAIM_SOFT_BIT)
364
365static void mem_cgroup_get(struct mem_cgroup *mem);
366static void mem_cgroup_put(struct mem_cgroup *mem);
367static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem);
368static void drain_all_stock_async(struct mem_cgroup *mem);
369
370static struct mem_cgroup_per_zone *
371mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid)
372{
373 return &mem->info.nodeinfo[nid]->zoneinfo[zid];
374}
375
376struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem)
377{
378 return &mem->css;
379}
380
381static struct mem_cgroup_per_zone *
382page_cgroup_zoneinfo(struct mem_cgroup *mem, struct page *page)
383{
384 int nid = page_to_nid(page);
385 int zid = page_zonenum(page);
386
387 return mem_cgroup_zoneinfo(mem, nid, zid);
388}
389
390static struct mem_cgroup_tree_per_zone *
391soft_limit_tree_node_zone(int nid, int zid)
392{
393 return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
394}
395
396static struct mem_cgroup_tree_per_zone *
397soft_limit_tree_from_page(struct page *page)
398{
399 int nid = page_to_nid(page);
400 int zid = page_zonenum(page);
401
402 return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
403}
404
405static void
406__mem_cgroup_insert_exceeded(struct mem_cgroup *mem,
407 struct mem_cgroup_per_zone *mz,
408 struct mem_cgroup_tree_per_zone *mctz,
409 unsigned long long new_usage_in_excess)
410{
411 struct rb_node **p = &mctz->rb_root.rb_node;
412 struct rb_node *parent = NULL;
413 struct mem_cgroup_per_zone *mz_node;
414
415 if (mz->on_tree)
416 return;
417
418 mz->usage_in_excess = new_usage_in_excess;
419 if (!mz->usage_in_excess)
420 return;
421 while (*p) {
422 parent = *p;
423 mz_node = rb_entry(parent, struct mem_cgroup_per_zone,
424 tree_node);
425 if (mz->usage_in_excess < mz_node->usage_in_excess)
426 p = &(*p)->rb_left;
427 /*
428 * We can't avoid mem cgroups that are over their soft
429 * limit by the same amount
430 */
431 else if (mz->usage_in_excess >= mz_node->usage_in_excess)
432 p = &(*p)->rb_right;
433 }
434 rb_link_node(&mz->tree_node, parent, p);
435 rb_insert_color(&mz->tree_node, &mctz->rb_root);
436 mz->on_tree = true;
437}
438
439static void
440__mem_cgroup_remove_exceeded(struct mem_cgroup *mem,
441 struct mem_cgroup_per_zone *mz,
442 struct mem_cgroup_tree_per_zone *mctz)
443{
444 if (!mz->on_tree)
445 return;
446 rb_erase(&mz->tree_node, &mctz->rb_root);
447 mz->on_tree = false;
448}
449
450static void
451mem_cgroup_remove_exceeded(struct mem_cgroup *mem,
452 struct mem_cgroup_per_zone *mz,
453 struct mem_cgroup_tree_per_zone *mctz)
454{
455 spin_lock(&mctz->lock);
456 __mem_cgroup_remove_exceeded(mem, mz, mctz);
457 spin_unlock(&mctz->lock);
458}
459
460
461static void mem_cgroup_update_tree(struct mem_cgroup *mem, struct page *page)
462{
463 unsigned long long excess;
464 struct mem_cgroup_per_zone *mz;
465 struct mem_cgroup_tree_per_zone *mctz;
466 int nid = page_to_nid(page);
467 int zid = page_zonenum(page);
468 mctz = soft_limit_tree_from_page(page);
469
470 /*
471 * Necessary to update all ancestors when hierarchy is used.
472 * because their event counter is not touched.
473 */
474 for (; mem; mem = parent_mem_cgroup(mem)) {
475 mz = mem_cgroup_zoneinfo(mem, nid, zid);
476 excess = res_counter_soft_limit_excess(&mem->res);
477 /*
478 * We have to update the tree if mz is on RB-tree or
479 * mem is over its softlimit.
480 */
481 if (excess || mz->on_tree) {
482 spin_lock(&mctz->lock);
483 /* if on-tree, remove it */
484 if (mz->on_tree)
485 __mem_cgroup_remove_exceeded(mem, mz, mctz);
486 /*
487 * Insert again. mz->usage_in_excess will be updated.
488 * If excess is 0, no tree ops.
489 */
490 __mem_cgroup_insert_exceeded(mem, mz, mctz, excess);
491 spin_unlock(&mctz->lock);
492 }
493 }
494}
495
496static void mem_cgroup_remove_from_trees(struct mem_cgroup *mem)
497{
498 int node, zone;
499 struct mem_cgroup_per_zone *mz;
500 struct mem_cgroup_tree_per_zone *mctz;
501
502 for_each_node_state(node, N_POSSIBLE) {
503 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
504 mz = mem_cgroup_zoneinfo(mem, node, zone);
505 mctz = soft_limit_tree_node_zone(node, zone);
506 mem_cgroup_remove_exceeded(mem, mz, mctz);
507 }
508 }
509}
510
511static struct mem_cgroup_per_zone *
512__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
513{
514 struct rb_node *rightmost = NULL;
515 struct mem_cgroup_per_zone *mz;
516
517retry:
518 mz = NULL;
519 rightmost = rb_last(&mctz->rb_root);
520 if (!rightmost)
521 goto done; /* Nothing to reclaim from */
522
523 mz = rb_entry(rightmost, struct mem_cgroup_per_zone, tree_node);
524 /*
525 * Remove the node now but someone else can add it back,
526 * we will to add it back at the end of reclaim to its correct
527 * position in the tree.
528 */
529 __mem_cgroup_remove_exceeded(mz->mem, mz, mctz);
530 if (!res_counter_soft_limit_excess(&mz->mem->res) ||
531 !css_tryget(&mz->mem->css))
532 goto retry;
533done:
534 return mz;
535}
536
537static struct mem_cgroup_per_zone *
538mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
539{
540 struct mem_cgroup_per_zone *mz;
541
542 spin_lock(&mctz->lock);
543 mz = __mem_cgroup_largest_soft_limit_node(mctz);
544 spin_unlock(&mctz->lock);
545 return mz;
546}
547
548/*
549 * Implementation Note: reading percpu statistics for memcg.
550 *
551 * Both of vmstat[] and percpu_counter has threshold and do periodic
552 * synchronization to implement "quick" read. There are trade-off between
553 * reading cost and precision of value. Then, we may have a chance to implement
554 * a periodic synchronizion of counter in memcg's counter.
555 *
556 * But this _read() function is used for user interface now. The user accounts
557 * memory usage by memory cgroup and he _always_ requires exact value because
558 * he accounts memory. Even if we provide quick-and-fuzzy read, we always
559 * have to visit all online cpus and make sum. So, for now, unnecessary
560 * synchronization is not implemented. (just implemented for cpu hotplug)
561 *
562 * If there are kernel internal actions which can make use of some not-exact
563 * value, and reading all cpu value can be performance bottleneck in some
564 * common workload, threashold and synchonization as vmstat[] should be
565 * implemented.
566 */
567static long mem_cgroup_read_stat(struct mem_cgroup *mem,
568 enum mem_cgroup_stat_index idx)
569{
570 long val = 0;
571 int cpu;
572
573 get_online_cpus();
574 for_each_online_cpu(cpu)
575 val += per_cpu(mem->stat->count[idx], cpu);
576#ifdef CONFIG_HOTPLUG_CPU
577 spin_lock(&mem->pcp_counter_lock);
578 val += mem->nocpu_base.count[idx];
579 spin_unlock(&mem->pcp_counter_lock);
580#endif
581 put_online_cpus();
582 return val;
583}
584
585static void mem_cgroup_swap_statistics(struct mem_cgroup *mem,
586 bool charge)
587{
588 int val = (charge) ? 1 : -1;
589 this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_SWAPOUT], val);
590}
591
592void mem_cgroup_pgfault(struct mem_cgroup *mem, int val)
593{
594 this_cpu_add(mem->stat->events[MEM_CGROUP_EVENTS_PGFAULT], val);
595}
596
597void mem_cgroup_pgmajfault(struct mem_cgroup *mem, int val)
598{
599 this_cpu_add(mem->stat->events[MEM_CGROUP_EVENTS_PGMAJFAULT], val);
600}
601
602static unsigned long mem_cgroup_read_events(struct mem_cgroup *mem,
603 enum mem_cgroup_events_index idx)
604{
605 unsigned long val = 0;
606 int cpu;
607
608 for_each_online_cpu(cpu)
609 val += per_cpu(mem->stat->events[idx], cpu);
610#ifdef CONFIG_HOTPLUG_CPU
611 spin_lock(&mem->pcp_counter_lock);
612 val += mem->nocpu_base.events[idx];
613 spin_unlock(&mem->pcp_counter_lock);
614#endif
615 return val;
616}
617
618static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
619 bool file, int nr_pages)
620{
621 preempt_disable();
622
623 if (file)
624 __this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_CACHE], nr_pages);
625 else
626 __this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_RSS], nr_pages);
627
628 /* pagein of a big page is an event. So, ignore page size */
629 if (nr_pages > 0)
630 __this_cpu_inc(mem->stat->events[MEM_CGROUP_EVENTS_PGPGIN]);
631 else {
632 __this_cpu_inc(mem->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]);
633 nr_pages = -nr_pages; /* for event */
634 }
635
636 __this_cpu_add(mem->stat->events[MEM_CGROUP_EVENTS_COUNT], nr_pages);
637
638 preempt_enable();
639}
640
641unsigned long
642mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *mem, int nid, int zid,
643 unsigned int lru_mask)
644{
645 struct mem_cgroup_per_zone *mz;
646 enum lru_list l;
647 unsigned long ret = 0;
648
649 mz = mem_cgroup_zoneinfo(mem, nid, zid);
650
651 for_each_lru(l) {
652 if (BIT(l) & lru_mask)
653 ret += MEM_CGROUP_ZSTAT(mz, l);
654 }
655 return ret;
656}
657
658static unsigned long
659mem_cgroup_node_nr_lru_pages(struct mem_cgroup *mem,
660 int nid, unsigned int lru_mask)
661{
662 u64 total = 0;
663 int zid;
664
665 for (zid = 0; zid < MAX_NR_ZONES; zid++)
666 total += mem_cgroup_zone_nr_lru_pages(mem, nid, zid, lru_mask);
667
668 return total;
669}
670
671static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *mem,
672 unsigned int lru_mask)
673{
674 int nid;
675 u64 total = 0;
676
677 for_each_node_state(nid, N_HIGH_MEMORY)
678 total += mem_cgroup_node_nr_lru_pages(mem, nid, lru_mask);
679 return total;
680}
681
682static bool __memcg_event_check(struct mem_cgroup *mem, int target)
683{
684 unsigned long val, next;
685
686 val = this_cpu_read(mem->stat->events[MEM_CGROUP_EVENTS_COUNT]);
687 next = this_cpu_read(mem->stat->targets[target]);
688 /* from time_after() in jiffies.h */
689 return ((long)next - (long)val < 0);
690}
691
692static void __mem_cgroup_target_update(struct mem_cgroup *mem, int target)
693{
694 unsigned long val, next;
695
696 val = this_cpu_read(mem->stat->events[MEM_CGROUP_EVENTS_COUNT]);
697
698 switch (target) {
699 case MEM_CGROUP_TARGET_THRESH:
700 next = val + THRESHOLDS_EVENTS_TARGET;
701 break;
702 case MEM_CGROUP_TARGET_SOFTLIMIT:
703 next = val + SOFTLIMIT_EVENTS_TARGET;
704 break;
705 case MEM_CGROUP_TARGET_NUMAINFO:
706 next = val + NUMAINFO_EVENTS_TARGET;
707 break;
708 default:
709 return;
710 }
711
712 this_cpu_write(mem->stat->targets[target], next);
713}
714
715/*
716 * Check events in order.
717 *
718 */
719static void memcg_check_events(struct mem_cgroup *mem, struct page *page)
720{
721 /* threshold event is triggered in finer grain than soft limit */
722 if (unlikely(__memcg_event_check(mem, MEM_CGROUP_TARGET_THRESH))) {
723 mem_cgroup_threshold(mem);
724 __mem_cgroup_target_update(mem, MEM_CGROUP_TARGET_THRESH);
725 if (unlikely(__memcg_event_check(mem,
726 MEM_CGROUP_TARGET_SOFTLIMIT))) {
727 mem_cgroup_update_tree(mem, page);
728 __mem_cgroup_target_update(mem,
729 MEM_CGROUP_TARGET_SOFTLIMIT);
730 }
731#if MAX_NUMNODES > 1
732 if (unlikely(__memcg_event_check(mem,
733 MEM_CGROUP_TARGET_NUMAINFO))) {
734 atomic_inc(&mem->numainfo_events);
735 __mem_cgroup_target_update(mem,
736 MEM_CGROUP_TARGET_NUMAINFO);
737 }
738#endif
739 }
740}
741
742static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
743{
744 return container_of(cgroup_subsys_state(cont,
745 mem_cgroup_subsys_id), struct mem_cgroup,
746 css);
747}
748
749struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
750{
751 /*
752 * mm_update_next_owner() may clear mm->owner to NULL
753 * if it races with swapoff, page migration, etc.
754 * So this can be called with p == NULL.
755 */
756 if (unlikely(!p))
757 return NULL;
758
759 return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
760 struct mem_cgroup, css);
761}
762
763struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
764{
765 struct mem_cgroup *mem = NULL;
766
767 if (!mm)
768 return NULL;
769 /*
770 * Because we have no locks, mm->owner's may be being moved to other
771 * cgroup. We use css_tryget() here even if this looks
772 * pessimistic (rather than adding locks here).
773 */
774 rcu_read_lock();
775 do {
776 mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
777 if (unlikely(!mem))
778 break;
779 } while (!css_tryget(&mem->css));
780 rcu_read_unlock();
781 return mem;
782}
783
784/* The caller has to guarantee "mem" exists before calling this */
785static struct mem_cgroup *mem_cgroup_start_loop(struct mem_cgroup *mem)
786{
787 struct cgroup_subsys_state *css;
788 int found;
789
790 if (!mem) /* ROOT cgroup has the smallest ID */
791 return root_mem_cgroup; /*css_put/get against root is ignored*/
792 if (!mem->use_hierarchy) {
793 if (css_tryget(&mem->css))
794 return mem;
795 return NULL;
796 }
797 rcu_read_lock();
798 /*
799 * searching a memory cgroup which has the smallest ID under given
800 * ROOT cgroup. (ID >= 1)
801 */
802 css = css_get_next(&mem_cgroup_subsys, 1, &mem->css, &found);
803 if (css && css_tryget(css))
804 mem = container_of(css, struct mem_cgroup, css);
805 else
806 mem = NULL;
807 rcu_read_unlock();
808 return mem;
809}
810
811static struct mem_cgroup *mem_cgroup_get_next(struct mem_cgroup *iter,
812 struct mem_cgroup *root,
813 bool cond)
814{
815 int nextid = css_id(&iter->css) + 1;
816 int found;
817 int hierarchy_used;
818 struct cgroup_subsys_state *css;
819
820 hierarchy_used = iter->use_hierarchy;
821
822 css_put(&iter->css);
823 /* If no ROOT, walk all, ignore hierarchy */
824 if (!cond || (root && !hierarchy_used))
825 return NULL;
826
827 if (!root)
828 root = root_mem_cgroup;
829
830 do {
831 iter = NULL;
832 rcu_read_lock();
833
834 css = css_get_next(&mem_cgroup_subsys, nextid,
835 &root->css, &found);
836 if (css && css_tryget(css))
837 iter = container_of(css, struct mem_cgroup, css);
838 rcu_read_unlock();
839 /* If css is NULL, no more cgroups will be found */
840 nextid = found + 1;
841 } while (css && !iter);
842
843 return iter;
844}
845/*
846 * for_eacn_mem_cgroup_tree() for visiting all cgroup under tree. Please
847 * be careful that "break" loop is not allowed. We have reference count.
848 * Instead of that modify "cond" to be false and "continue" to exit the loop.
849 */
850#define for_each_mem_cgroup_tree_cond(iter, root, cond) \
851 for (iter = mem_cgroup_start_loop(root);\
852 iter != NULL;\
853 iter = mem_cgroup_get_next(iter, root, cond))
854
855#define for_each_mem_cgroup_tree(iter, root) \
856 for_each_mem_cgroup_tree_cond(iter, root, true)
857
858#define for_each_mem_cgroup_all(iter) \
859 for_each_mem_cgroup_tree_cond(iter, NULL, true)
860
861
862static inline bool mem_cgroup_is_root(struct mem_cgroup *mem)
863{
864 return (mem == root_mem_cgroup);
865}
866
867void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
868{
869 struct mem_cgroup *mem;
870
871 if (!mm)
872 return;
873
874 rcu_read_lock();
875 mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
876 if (unlikely(!mem))
877 goto out;
878
879 switch (idx) {
880 case PGMAJFAULT:
881 mem_cgroup_pgmajfault(mem, 1);
882 break;
883 case PGFAULT:
884 mem_cgroup_pgfault(mem, 1);
885 break;
886 default:
887 BUG();
888 }
889out:
890 rcu_read_unlock();
891}
892EXPORT_SYMBOL(mem_cgroup_count_vm_event);
893
894/*
895 * Following LRU functions are allowed to be used without PCG_LOCK.
896 * Operations are called by routine of global LRU independently from memcg.
897 * What we have to take care of here is validness of pc->mem_cgroup.
898 *
899 * Changes to pc->mem_cgroup happens when
900 * 1. charge
901 * 2. moving account
902 * In typical case, "charge" is done before add-to-lru. Exception is SwapCache.
903 * It is added to LRU before charge.
904 * If PCG_USED bit is not set, page_cgroup is not added to this private LRU.
905 * When moving account, the page is not on LRU. It's isolated.
906 */
907
908void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru)
909{
910 struct page_cgroup *pc;
911 struct mem_cgroup_per_zone *mz;
912
913 if (mem_cgroup_disabled())
914 return;
915 pc = lookup_page_cgroup(page);
916 /* can happen while we handle swapcache. */
917 if (!TestClearPageCgroupAcctLRU(pc))
918 return;
919 VM_BUG_ON(!pc->mem_cgroup);
920 /*
921 * We don't check PCG_USED bit. It's cleared when the "page" is finally
922 * removed from global LRU.
923 */
924 mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
925 /* huge page split is done under lru_lock. so, we have no races. */
926 MEM_CGROUP_ZSTAT(mz, lru) -= 1 << compound_order(page);
927 if (mem_cgroup_is_root(pc->mem_cgroup))
928 return;
929 VM_BUG_ON(list_empty(&pc->lru));
930 list_del_init(&pc->lru);
931}
932
933void mem_cgroup_del_lru(struct page *page)
934{
935 mem_cgroup_del_lru_list(page, page_lru(page));
936}
937
938/*
939 * Writeback is about to end against a page which has been marked for immediate
940 * reclaim. If it still appears to be reclaimable, move it to the tail of the
941 * inactive list.
942 */
943void mem_cgroup_rotate_reclaimable_page(struct page *page)
944{
945 struct mem_cgroup_per_zone *mz;
946 struct page_cgroup *pc;
947 enum lru_list lru = page_lru(page);
948
949 if (mem_cgroup_disabled())
950 return;
951
952 pc = lookup_page_cgroup(page);
953 /* unused or root page is not rotated. */
954 if (!PageCgroupUsed(pc))
955 return;
956 /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
957 smp_rmb();
958 if (mem_cgroup_is_root(pc->mem_cgroup))
959 return;
960 mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
961 list_move_tail(&pc->lru, &mz->lists[lru]);
962}
963
964void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru)
965{
966 struct mem_cgroup_per_zone *mz;
967 struct page_cgroup *pc;
968
969 if (mem_cgroup_disabled())
970 return;
971
972 pc = lookup_page_cgroup(page);
973 /* unused or root page is not rotated. */
974 if (!PageCgroupUsed(pc))
975 return;
976 /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
977 smp_rmb();
978 if (mem_cgroup_is_root(pc->mem_cgroup))
979 return;
980 mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
981 list_move(&pc->lru, &mz->lists[lru]);
982}
983
984void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru)
985{
986 struct page_cgroup *pc;
987 struct mem_cgroup_per_zone *mz;
988
989 if (mem_cgroup_disabled())
990 return;
991 pc = lookup_page_cgroup(page);
992 VM_BUG_ON(PageCgroupAcctLRU(pc));
993 if (!PageCgroupUsed(pc))
994 return;
995 /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
996 smp_rmb();
997 mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
998 /* huge page split is done under lru_lock. so, we have no races. */
999 MEM_CGROUP_ZSTAT(mz, lru) += 1 << compound_order(page);
1000 SetPageCgroupAcctLRU(pc);
1001 if (mem_cgroup_is_root(pc->mem_cgroup))
1002 return;
1003 list_add(&pc->lru, &mz->lists[lru]);
1004}
1005
1006/*
1007 * At handling SwapCache and other FUSE stuff, pc->mem_cgroup may be changed
1008 * while it's linked to lru because the page may be reused after it's fully
1009 * uncharged. To handle that, unlink page_cgroup from LRU when charge it again.
1010 * It's done under lock_page and expected that zone->lru_lock isnever held.
1011 */
1012static void mem_cgroup_lru_del_before_commit(struct page *page)
1013{
1014 unsigned long flags;
1015 struct zone *zone = page_zone(page);
1016 struct page_cgroup *pc = lookup_page_cgroup(page);
1017
1018 /*
1019 * Doing this check without taking ->lru_lock seems wrong but this
1020 * is safe. Because if page_cgroup's USED bit is unset, the page
1021 * will not be added to any memcg's LRU. If page_cgroup's USED bit is
1022 * set, the commit after this will fail, anyway.
1023 * This all charge/uncharge is done under some mutual execustion.
1024 * So, we don't need to taking care of changes in USED bit.
1025 */
1026 if (likely(!PageLRU(page)))
1027 return;
1028
1029 spin_lock_irqsave(&zone->lru_lock, flags);
1030 /*
1031 * Forget old LRU when this page_cgroup is *not* used. This Used bit
1032 * is guarded by lock_page() because the page is SwapCache.
1033 */
1034 if (!PageCgroupUsed(pc))
1035 mem_cgroup_del_lru_list(page, page_lru(page));
1036 spin_unlock_irqrestore(&zone->lru_lock, flags);
1037}
1038
1039static void mem_cgroup_lru_add_after_commit(struct page *page)
1040{
1041 unsigned long flags;
1042 struct zone *zone = page_zone(page);
1043 struct page_cgroup *pc = lookup_page_cgroup(page);
1044
1045 /* taking care of that the page is added to LRU while we commit it */
1046 if (likely(!PageLRU(page)))
1047 return;
1048 spin_lock_irqsave(&zone->lru_lock, flags);
1049 /* link when the page is linked to LRU but page_cgroup isn't */
1050 if (PageLRU(page) && !PageCgroupAcctLRU(pc))
1051 mem_cgroup_add_lru_list(page, page_lru(page));
1052 spin_unlock_irqrestore(&zone->lru_lock, flags);
1053}
1054
1055
1056void mem_cgroup_move_lists(struct page *page,
1057 enum lru_list from, enum lru_list to)
1058{
1059 if (mem_cgroup_disabled())
1060 return;
1061 mem_cgroup_del_lru_list(page, from);
1062 mem_cgroup_add_lru_list(page, to);
1063}
1064
1065/*
1066 * Checks whether given mem is same or in the root_mem's
1067 * hierarchy subtree
1068 */
1069static bool mem_cgroup_same_or_subtree(const struct mem_cgroup *root_mem,
1070 struct mem_cgroup *mem)
1071{
1072 if (root_mem != mem) {
1073 return (root_mem->use_hierarchy &&
1074 css_is_ancestor(&mem->css, &root_mem->css));
1075 }
1076
1077 return true;
1078}
1079
1080int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
1081{
1082 int ret;
1083 struct mem_cgroup *curr = NULL;
1084 struct task_struct *p;
1085
1086 p = find_lock_task_mm(task);
1087 if (!p)
1088 return 0;
1089 curr = try_get_mem_cgroup_from_mm(p->mm);
1090 task_unlock(p);
1091 if (!curr)
1092 return 0;
1093 /*
1094 * We should check use_hierarchy of "mem" not "curr". Because checking
1095 * use_hierarchy of "curr" here make this function true if hierarchy is
1096 * enabled in "curr" and "curr" is a child of "mem" in *cgroup*
1097 * hierarchy(even if use_hierarchy is disabled in "mem").
1098 */
1099 ret = mem_cgroup_same_or_subtree(mem, curr);
1100 css_put(&curr->css);
1101 return ret;
1102}
1103
1104static int calc_inactive_ratio(struct mem_cgroup *memcg, unsigned long *present_pages)
1105{
1106 unsigned long active;
1107 unsigned long inactive;
1108 unsigned long gb;
1109 unsigned long inactive_ratio;
1110
1111 inactive = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_INACTIVE_ANON));
1112 active = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_ACTIVE_ANON));
1113
1114 gb = (inactive + active) >> (30 - PAGE_SHIFT);
1115 if (gb)
1116 inactive_ratio = int_sqrt(10 * gb);
1117 else
1118 inactive_ratio = 1;
1119
1120 if (present_pages) {
1121 present_pages[0] = inactive;
1122 present_pages[1] = active;
1123 }
1124
1125 return inactive_ratio;
1126}
1127
1128int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg)
1129{
1130 unsigned long active;
1131 unsigned long inactive;
1132 unsigned long present_pages[2];
1133 unsigned long inactive_ratio;
1134
1135 inactive_ratio = calc_inactive_ratio(memcg, present_pages);
1136
1137 inactive = present_pages[0];
1138 active = present_pages[1];
1139
1140 if (inactive * inactive_ratio < active)
1141 return 1;
1142
1143 return 0;
1144}
1145
1146int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg)
1147{
1148 unsigned long active;
1149 unsigned long inactive;
1150
1151 inactive = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_INACTIVE_FILE));
1152 active = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_ACTIVE_FILE));
1153
1154 return (active > inactive);
1155}
1156
1157struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg,
1158 struct zone *zone)
1159{
1160 int nid = zone_to_nid(zone);
1161 int zid = zone_idx(zone);
1162 struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid);
1163
1164 return &mz->reclaim_stat;
1165}
1166
1167struct zone_reclaim_stat *
1168mem_cgroup_get_reclaim_stat_from_page(struct page *page)
1169{
1170 struct page_cgroup *pc;
1171 struct mem_cgroup_per_zone *mz;
1172
1173 if (mem_cgroup_disabled())
1174 return NULL;
1175
1176 pc = lookup_page_cgroup(page);
1177 if (!PageCgroupUsed(pc))
1178 return NULL;
1179 /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
1180 smp_rmb();
1181 mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
1182 return &mz->reclaim_stat;
1183}
1184
1185unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
1186 struct list_head *dst,
1187 unsigned long *scanned, int order,
1188 int mode, struct zone *z,
1189 struct mem_cgroup *mem_cont,
1190 int active, int file)
1191{
1192 unsigned long nr_taken = 0;
1193 struct page *page;
1194 unsigned long scan;
1195 LIST_HEAD(pc_list);
1196 struct list_head *src;
1197 struct page_cgroup *pc, *tmp;
1198 int nid = zone_to_nid(z);
1199 int zid = zone_idx(z);
1200 struct mem_cgroup_per_zone *mz;
1201 int lru = LRU_FILE * file + active;
1202 int ret;
1203
1204 BUG_ON(!mem_cont);
1205 mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
1206 src = &mz->lists[lru];
1207
1208 scan = 0;
1209 list_for_each_entry_safe_reverse(pc, tmp, src, lru) {
1210 if (scan >= nr_to_scan)
1211 break;
1212
1213 if (unlikely(!PageCgroupUsed(pc)))
1214 continue;
1215
1216 page = lookup_cgroup_page(pc);
1217
1218 if (unlikely(!PageLRU(page)))
1219 continue;
1220
1221 scan++;
1222 ret = __isolate_lru_page(page, mode, file);
1223 switch (ret) {
1224 case 0:
1225 list_move(&page->lru, dst);
1226 mem_cgroup_del_lru(page);
1227 nr_taken += hpage_nr_pages(page);
1228 break;
1229 case -EBUSY:
1230 /* we don't affect global LRU but rotate in our LRU */
1231 mem_cgroup_rotate_lru_list(page, page_lru(page));
1232 break;
1233 default:
1234 break;
1235 }
1236 }
1237
1238 *scanned = scan;
1239
1240 trace_mm_vmscan_memcg_isolate(0, nr_to_scan, scan, nr_taken,
1241 0, 0, 0, mode);
1242
1243 return nr_taken;
1244}
1245
1246#define mem_cgroup_from_res_counter(counter, member) \
1247 container_of(counter, struct mem_cgroup, member)
1248
1249/**
1250 * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1251 * @mem: the memory cgroup
1252 *
1253 * Returns the maximum amount of memory @mem can be charged with, in
1254 * pages.
1255 */
1256static unsigned long mem_cgroup_margin(struct mem_cgroup *mem)
1257{
1258 unsigned long long margin;
1259
1260 margin = res_counter_margin(&mem->res);
1261 if (do_swap_account)
1262 margin = min(margin, res_counter_margin(&mem->memsw));
1263 return margin >> PAGE_SHIFT;
1264}
1265
1266int mem_cgroup_swappiness(struct mem_cgroup *memcg)
1267{
1268 struct cgroup *cgrp = memcg->css.cgroup;
1269
1270 /* root ? */
1271 if (cgrp->parent == NULL)
1272 return vm_swappiness;
1273
1274 return memcg->swappiness;
1275}
1276
1277static void mem_cgroup_start_move(struct mem_cgroup *mem)
1278{
1279 int cpu;
1280
1281 get_online_cpus();
1282 spin_lock(&mem->pcp_counter_lock);
1283 for_each_online_cpu(cpu)
1284 per_cpu(mem->stat->count[MEM_CGROUP_ON_MOVE], cpu) += 1;
1285 mem->nocpu_base.count[MEM_CGROUP_ON_MOVE] += 1;
1286 spin_unlock(&mem->pcp_counter_lock);
1287 put_online_cpus();
1288
1289 synchronize_rcu();
1290}
1291
1292static void mem_cgroup_end_move(struct mem_cgroup *mem)
1293{
1294 int cpu;
1295
1296 if (!mem)
1297 return;
1298 get_online_cpus();
1299 spin_lock(&mem->pcp_counter_lock);
1300 for_each_online_cpu(cpu)
1301 per_cpu(mem->stat->count[MEM_CGROUP_ON_MOVE], cpu) -= 1;
1302 mem->nocpu_base.count[MEM_CGROUP_ON_MOVE] -= 1;
1303 spin_unlock(&mem->pcp_counter_lock);
1304 put_online_cpus();
1305}
1306/*
1307 * 2 routines for checking "mem" is under move_account() or not.
1308 *
1309 * mem_cgroup_stealed() - checking a cgroup is mc.from or not. This is used
1310 * for avoiding race in accounting. If true,
1311 * pc->mem_cgroup may be overwritten.
1312 *
1313 * mem_cgroup_under_move() - checking a cgroup is mc.from or mc.to or
1314 * under hierarchy of moving cgroups. This is for
1315 * waiting at hith-memory prressure caused by "move".
1316 */
1317
1318static bool mem_cgroup_stealed(struct mem_cgroup *mem)
1319{
1320 VM_BUG_ON(!rcu_read_lock_held());
1321 return this_cpu_read(mem->stat->count[MEM_CGROUP_ON_MOVE]) > 0;
1322}
1323
1324static bool mem_cgroup_under_move(struct mem_cgroup *mem)
1325{
1326 struct mem_cgroup *from;
1327 struct mem_cgroup *to;
1328 bool ret = false;
1329 /*
1330 * Unlike task_move routines, we access mc.to, mc.from not under
1331 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
1332 */
1333 spin_lock(&mc.lock);
1334 from = mc.from;
1335 to = mc.to;
1336 if (!from)
1337 goto unlock;
1338
1339 ret = mem_cgroup_same_or_subtree(mem, from)
1340 || mem_cgroup_same_or_subtree(mem, to);
1341unlock:
1342 spin_unlock(&mc.lock);
1343 return ret;
1344}
1345
1346static bool mem_cgroup_wait_acct_move(struct mem_cgroup *mem)
1347{
1348 if (mc.moving_task && current != mc.moving_task) {
1349 if (mem_cgroup_under_move(mem)) {
1350 DEFINE_WAIT(wait);
1351 prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1352 /* moving charge context might have finished. */
1353 if (mc.moving_task)
1354 schedule();
1355 finish_wait(&mc.waitq, &wait);
1356 return true;
1357 }
1358 }
1359 return false;
1360}
1361
1362/**
1363 * mem_cgroup_print_oom_info: Called from OOM with tasklist_lock held in read mode.
1364 * @memcg: The memory cgroup that went over limit
1365 * @p: Task that is going to be killed
1366 *
1367 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1368 * enabled
1369 */
1370void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
1371{
1372 struct cgroup *task_cgrp;
1373 struct cgroup *mem_cgrp;
1374 /*
1375 * Need a buffer in BSS, can't rely on allocations. The code relies
1376 * on the assumption that OOM is serialized for memory controller.
1377 * If this assumption is broken, revisit this code.
1378 */
1379 static char memcg_name[PATH_MAX];
1380 int ret;
1381
1382 if (!memcg || !p)
1383 return;
1384
1385
1386 rcu_read_lock();
1387
1388 mem_cgrp = memcg->css.cgroup;
1389 task_cgrp = task_cgroup(p, mem_cgroup_subsys_id);
1390
1391 ret = cgroup_path(task_cgrp, memcg_name, PATH_MAX);
1392 if (ret < 0) {
1393 /*
1394 * Unfortunately, we are unable to convert to a useful name
1395 * But we'll still print out the usage information
1396 */
1397 rcu_read_unlock();
1398 goto done;
1399 }
1400 rcu_read_unlock();
1401
1402 printk(KERN_INFO "Task in %s killed", memcg_name);
1403
1404 rcu_read_lock();
1405 ret = cgroup_path(mem_cgrp, memcg_name, PATH_MAX);
1406 if (ret < 0) {
1407 rcu_read_unlock();
1408 goto done;
1409 }
1410 rcu_read_unlock();
1411
1412 /*
1413 * Continues from above, so we don't need an KERN_ level
1414 */
1415 printk(KERN_CONT " as a result of limit of %s\n", memcg_name);
1416done:
1417
1418 printk(KERN_INFO "memory: usage %llukB, limit %llukB, failcnt %llu\n",
1419 res_counter_read_u64(&memcg->res, RES_USAGE) >> 10,
1420 res_counter_read_u64(&memcg->res, RES_LIMIT) >> 10,
1421 res_counter_read_u64(&memcg->res, RES_FAILCNT));
1422 printk(KERN_INFO "memory+swap: usage %llukB, limit %llukB, "
1423 "failcnt %llu\n",
1424 res_counter_read_u64(&memcg->memsw, RES_USAGE) >> 10,
1425 res_counter_read_u64(&memcg->memsw, RES_LIMIT) >> 10,
1426 res_counter_read_u64(&memcg->memsw, RES_FAILCNT));
1427}
1428
1429/*
1430 * This function returns the number of memcg under hierarchy tree. Returns
1431 * 1(self count) if no children.
1432 */
1433static int mem_cgroup_count_children(struct mem_cgroup *mem)
1434{
1435 int num = 0;
1436 struct mem_cgroup *iter;
1437
1438 for_each_mem_cgroup_tree(iter, mem)
1439 num++;
1440 return num;
1441}
1442
1443/*
1444 * Return the memory (and swap, if configured) limit for a memcg.
1445 */
1446u64 mem_cgroup_get_limit(struct mem_cgroup *memcg)
1447{
1448 u64 limit;
1449 u64 memsw;
1450
1451 limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
1452 limit += total_swap_pages << PAGE_SHIFT;
1453
1454 memsw = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
1455 /*
1456 * If memsw is finite and limits the amount of swap space available
1457 * to this memcg, return that limit.
1458 */
1459 return min(limit, memsw);
1460}
1461
1462/*
1463 * Visit the first child (need not be the first child as per the ordering
1464 * of the cgroup list, since we track last_scanned_child) of @mem and use
1465 * that to reclaim free pages from.
1466 */
1467static struct mem_cgroup *
1468mem_cgroup_select_victim(struct mem_cgroup *root_mem)
1469{
1470 struct mem_cgroup *ret = NULL;
1471 struct cgroup_subsys_state *css;
1472 int nextid, found;
1473
1474 if (!root_mem->use_hierarchy) {
1475 css_get(&root_mem->css);
1476 ret = root_mem;
1477 }
1478
1479 while (!ret) {
1480 rcu_read_lock();
1481 nextid = root_mem->last_scanned_child + 1;
1482 css = css_get_next(&mem_cgroup_subsys, nextid, &root_mem->css,
1483 &found);
1484 if (css && css_tryget(css))
1485 ret = container_of(css, struct mem_cgroup, css);
1486
1487 rcu_read_unlock();
1488 /* Updates scanning parameter */
1489 if (!css) {
1490 /* this means start scan from ID:1 */
1491 root_mem->last_scanned_child = 0;
1492 } else
1493 root_mem->last_scanned_child = found;
1494 }
1495
1496 return ret;
1497}
1498
1499/**
1500 * test_mem_cgroup_node_reclaimable
1501 * @mem: the target memcg
1502 * @nid: the node ID to be checked.
1503 * @noswap : specify true here if the user wants flle only information.
1504 *
1505 * This function returns whether the specified memcg contains any
1506 * reclaimable pages on a node. Returns true if there are any reclaimable
1507 * pages in the node.
1508 */
1509static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *mem,
1510 int nid, bool noswap)
1511{
1512 if (mem_cgroup_node_nr_lru_pages(mem, nid, LRU_ALL_FILE))
1513 return true;
1514 if (noswap || !total_swap_pages)
1515 return false;
1516 if (mem_cgroup_node_nr_lru_pages(mem, nid, LRU_ALL_ANON))
1517 return true;
1518 return false;
1519
1520}
1521#if MAX_NUMNODES > 1
1522
1523/*
1524 * Always updating the nodemask is not very good - even if we have an empty
1525 * list or the wrong list here, we can start from some node and traverse all
1526 * nodes based on the zonelist. So update the list loosely once per 10 secs.
1527 *
1528 */
1529static void mem_cgroup_may_update_nodemask(struct mem_cgroup *mem)
1530{
1531 int nid;
1532 /*
1533 * numainfo_events > 0 means there was at least NUMAINFO_EVENTS_TARGET
1534 * pagein/pageout changes since the last update.
1535 */
1536 if (!atomic_read(&mem->numainfo_events))
1537 return;
1538 if (atomic_inc_return(&mem->numainfo_updating) > 1)
1539 return;
1540
1541 /* make a nodemask where this memcg uses memory from */
1542 mem->scan_nodes = node_states[N_HIGH_MEMORY];
1543
1544 for_each_node_mask(nid, node_states[N_HIGH_MEMORY]) {
1545
1546 if (!test_mem_cgroup_node_reclaimable(mem, nid, false))
1547 node_clear(nid, mem->scan_nodes);
1548 }
1549
1550 atomic_set(&mem->numainfo_events, 0);
1551 atomic_set(&mem->numainfo_updating, 0);
1552}
1553
1554/*
1555 * Selecting a node where we start reclaim from. Because what we need is just
1556 * reducing usage counter, start from anywhere is O,K. Considering
1557 * memory reclaim from current node, there are pros. and cons.
1558 *
1559 * Freeing memory from current node means freeing memory from a node which
1560 * we'll use or we've used. So, it may make LRU bad. And if several threads
1561 * hit limits, it will see a contention on a node. But freeing from remote
1562 * node means more costs for memory reclaim because of memory latency.
1563 *
1564 * Now, we use round-robin. Better algorithm is welcomed.
1565 */
1566int mem_cgroup_select_victim_node(struct mem_cgroup *mem)
1567{
1568 int node;
1569
1570 mem_cgroup_may_update_nodemask(mem);
1571 node = mem->last_scanned_node;
1572
1573 node = next_node(node, mem->scan_nodes);
1574 if (node == MAX_NUMNODES)
1575 node = first_node(mem->scan_nodes);
1576 /*
1577 * We call this when we hit limit, not when pages are added to LRU.
1578 * No LRU may hold pages because all pages are UNEVICTABLE or
1579 * memcg is too small and all pages are not on LRU. In that case,
1580 * we use curret node.
1581 */
1582 if (unlikely(node == MAX_NUMNODES))
1583 node = numa_node_id();
1584
1585 mem->last_scanned_node = node;
1586 return node;
1587}
1588
1589/*
1590 * Check all nodes whether it contains reclaimable pages or not.
1591 * For quick scan, we make use of scan_nodes. This will allow us to skip
1592 * unused nodes. But scan_nodes is lazily updated and may not cotain
1593 * enough new information. We need to do double check.
1594 */
1595bool mem_cgroup_reclaimable(struct mem_cgroup *mem, bool noswap)
1596{
1597 int nid;
1598
1599 /*
1600 * quick check...making use of scan_node.
1601 * We can skip unused nodes.
1602 */
1603 if (!nodes_empty(mem->scan_nodes)) {
1604 for (nid = first_node(mem->scan_nodes);
1605 nid < MAX_NUMNODES;
1606 nid = next_node(nid, mem->scan_nodes)) {
1607
1608 if (test_mem_cgroup_node_reclaimable(mem, nid, noswap))
1609 return true;
1610 }
1611 }
1612 /*
1613 * Check rest of nodes.
1614 */
1615 for_each_node_state(nid, N_HIGH_MEMORY) {
1616 if (node_isset(nid, mem->scan_nodes))
1617 continue;
1618 if (test_mem_cgroup_node_reclaimable(mem, nid, noswap))
1619 return true;
1620 }
1621 return false;
1622}
1623
1624#else
1625int mem_cgroup_select_victim_node(struct mem_cgroup *mem)
1626{
1627 return 0;
1628}
1629
1630bool mem_cgroup_reclaimable(struct mem_cgroup *mem, bool noswap)
1631{
1632 return test_mem_cgroup_node_reclaimable(mem, 0, noswap);
1633}
1634#endif
1635
1636/*
1637 * Scan the hierarchy if needed to reclaim memory. We remember the last child
1638 * we reclaimed from, so that we don't end up penalizing one child extensively
1639 * based on its position in the children list.
1640 *
1641 * root_mem is the original ancestor that we've been reclaim from.
1642 *
1643 * We give up and return to the caller when we visit root_mem twice.
1644 * (other groups can be removed while we're walking....)
1645 *
1646 * If shrink==true, for avoiding to free too much, this returns immedieately.
1647 */
1648static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
1649 struct zone *zone,
1650 gfp_t gfp_mask,
1651 unsigned long reclaim_options,
1652 unsigned long *total_scanned)
1653{
1654 struct mem_cgroup *victim;
1655 int ret, total = 0;
1656 int loop = 0;
1657 bool noswap = reclaim_options & MEM_CGROUP_RECLAIM_NOSWAP;
1658 bool shrink = reclaim_options & MEM_CGROUP_RECLAIM_SHRINK;
1659 bool check_soft = reclaim_options & MEM_CGROUP_RECLAIM_SOFT;
1660 unsigned long excess;
1661 unsigned long nr_scanned;
1662
1663 excess = res_counter_soft_limit_excess(&root_mem->res) >> PAGE_SHIFT;
1664
1665 /* If memsw_is_minimum==1, swap-out is of-no-use. */
1666 if (!check_soft && !shrink && root_mem->memsw_is_minimum)
1667 noswap = true;
1668
1669 while (1) {
1670 victim = mem_cgroup_select_victim(root_mem);
1671 if (victim == root_mem) {
1672 loop++;
1673 /*
1674 * We are not draining per cpu cached charges during
1675 * soft limit reclaim because global reclaim doesn't
1676 * care about charges. It tries to free some memory and
1677 * charges will not give any.
1678 */
1679 if (!check_soft && loop >= 1)
1680 drain_all_stock_async(root_mem);
1681 if (loop >= 2) {
1682 /*
1683 * If we have not been able to reclaim
1684 * anything, it might because there are
1685 * no reclaimable pages under this hierarchy
1686 */
1687 if (!check_soft || !total) {
1688 css_put(&victim->css);
1689 break;
1690 }
1691 /*
1692 * We want to do more targeted reclaim.
1693 * excess >> 2 is not to excessive so as to
1694 * reclaim too much, nor too less that we keep
1695 * coming back to reclaim from this cgroup
1696 */
1697 if (total >= (excess >> 2) ||
1698 (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS)) {
1699 css_put(&victim->css);
1700 break;
1701 }
1702 }
1703 }
1704 if (!mem_cgroup_reclaimable(victim, noswap)) {
1705 /* this cgroup's local usage == 0 */
1706 css_put(&victim->css);
1707 continue;
1708 }
1709 /* we use swappiness of local cgroup */
1710 if (check_soft) {
1711 ret = mem_cgroup_shrink_node_zone(victim, gfp_mask,
1712 noswap, zone, &nr_scanned);
1713 *total_scanned += nr_scanned;
1714 } else
1715 ret = try_to_free_mem_cgroup_pages(victim, gfp_mask,
1716 noswap);
1717 css_put(&victim->css);
1718 /*
1719 * At shrinking usage, we can't check we should stop here or
1720 * reclaim more. It's depends on callers. last_scanned_child
1721 * will work enough for keeping fairness under tree.
1722 */
1723 if (shrink)
1724 return ret;
1725 total += ret;
1726 if (check_soft) {
1727 if (!res_counter_soft_limit_excess(&root_mem->res))
1728 return total;
1729 } else if (mem_cgroup_margin(root_mem))
1730 return total;
1731 }
1732 return total;
1733}
1734
1735/*
1736 * Check OOM-Killer is already running under our hierarchy.
1737 * If someone is running, return false.
1738 * Has to be called with memcg_oom_lock
1739 */
1740static bool mem_cgroup_oom_lock(struct mem_cgroup *mem)
1741{
1742 struct mem_cgroup *iter, *failed = NULL;
1743 bool cond = true;
1744
1745 for_each_mem_cgroup_tree_cond(iter, mem, cond) {
1746 if (iter->oom_lock) {
1747 /*
1748 * this subtree of our hierarchy is already locked
1749 * so we cannot give a lock.
1750 */
1751 failed = iter;
1752 cond = false;
1753 } else
1754 iter->oom_lock = true;
1755 }
1756
1757 if (!failed)
1758 return true;
1759
1760 /*
1761 * OK, we failed to lock the whole subtree so we have to clean up
1762 * what we set up to the failing subtree
1763 */
1764 cond = true;
1765 for_each_mem_cgroup_tree_cond(iter, mem, cond) {
1766 if (iter == failed) {
1767 cond = false;
1768 continue;
1769 }
1770 iter->oom_lock = false;
1771 }
1772 return false;
1773}
1774
1775/*
1776 * Has to be called with memcg_oom_lock
1777 */
1778static int mem_cgroup_oom_unlock(struct mem_cgroup *mem)
1779{
1780 struct mem_cgroup *iter;
1781
1782 for_each_mem_cgroup_tree(iter, mem)
1783 iter->oom_lock = false;
1784 return 0;
1785}
1786
1787static void mem_cgroup_mark_under_oom(struct mem_cgroup *mem)
1788{
1789 struct mem_cgroup *iter;
1790
1791 for_each_mem_cgroup_tree(iter, mem)
1792 atomic_inc(&iter->under_oom);
1793}
1794
1795static void mem_cgroup_unmark_under_oom(struct mem_cgroup *mem)
1796{
1797 struct mem_cgroup *iter;
1798
1799 /*
1800 * When a new child is created while the hierarchy is under oom,
1801 * mem_cgroup_oom_lock() may not be called. We have to use
1802 * atomic_add_unless() here.
1803 */
1804 for_each_mem_cgroup_tree(iter, mem)
1805 atomic_add_unless(&iter->under_oom, -1, 0);
1806}
1807
1808static DEFINE_SPINLOCK(memcg_oom_lock);
1809static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
1810
1811struct oom_wait_info {
1812 struct mem_cgroup *mem;
1813 wait_queue_t wait;
1814};
1815
1816static int memcg_oom_wake_function(wait_queue_t *wait,
1817 unsigned mode, int sync, void *arg)
1818{
1819 struct mem_cgroup *wake_mem = (struct mem_cgroup *)arg,
1820 *oom_wait_mem;
1821 struct oom_wait_info *oom_wait_info;
1822
1823 oom_wait_info = container_of(wait, struct oom_wait_info, wait);
1824 oom_wait_mem = oom_wait_info->mem;
1825
1826 /*
1827 * Both of oom_wait_info->mem and wake_mem are stable under us.
1828 * Then we can use css_is_ancestor without taking care of RCU.
1829 */
1830 if (!mem_cgroup_same_or_subtree(oom_wait_mem, wake_mem)
1831 && !mem_cgroup_same_or_subtree(wake_mem, oom_wait_mem))
1832 return 0;
1833 return autoremove_wake_function(wait, mode, sync, arg);
1834}
1835
1836static void memcg_wakeup_oom(struct mem_cgroup *mem)
1837{
1838 /* for filtering, pass "mem" as argument. */
1839 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, mem);
1840}
1841
1842static void memcg_oom_recover(struct mem_cgroup *mem)
1843{
1844 if (mem && atomic_read(&mem->under_oom))
1845 memcg_wakeup_oom(mem);
1846}
1847
1848/*
1849 * try to call OOM killer. returns false if we should exit memory-reclaim loop.
1850 */
1851bool mem_cgroup_handle_oom(struct mem_cgroup *mem, gfp_t mask)
1852{
1853 struct oom_wait_info owait;
1854 bool locked, need_to_kill;
1855
1856 owait.mem = mem;
1857 owait.wait.flags = 0;
1858 owait.wait.func = memcg_oom_wake_function;
1859 owait.wait.private = current;
1860 INIT_LIST_HEAD(&owait.wait.task_list);
1861 need_to_kill = true;
1862 mem_cgroup_mark_under_oom(mem);
1863
1864 /* At first, try to OOM lock hierarchy under mem.*/
1865 spin_lock(&memcg_oom_lock);
1866 locked = mem_cgroup_oom_lock(mem);
1867 /*
1868 * Even if signal_pending(), we can't quit charge() loop without
1869 * accounting. So, UNINTERRUPTIBLE is appropriate. But SIGKILL
1870 * under OOM is always welcomed, use TASK_KILLABLE here.
1871 */
1872 prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
1873 if (!locked || mem->oom_kill_disable)
1874 need_to_kill = false;
1875 if (locked)
1876 mem_cgroup_oom_notify(mem);
1877 spin_unlock(&memcg_oom_lock);
1878
1879 if (need_to_kill) {
1880 finish_wait(&memcg_oom_waitq, &owait.wait);
1881 mem_cgroup_out_of_memory(mem, mask);
1882 } else {
1883 schedule();
1884 finish_wait(&memcg_oom_waitq, &owait.wait);
1885 }
1886 spin_lock(&memcg_oom_lock);
1887 if (locked)
1888 mem_cgroup_oom_unlock(mem);
1889 memcg_wakeup_oom(mem);
1890 spin_unlock(&memcg_oom_lock);
1891
1892 mem_cgroup_unmark_under_oom(mem);
1893
1894 if (test_thread_flag(TIF_MEMDIE) || fatal_signal_pending(current))
1895 return false;
1896 /* Give chance to dying process */
1897 schedule_timeout(1);
1898 return true;
1899}
1900
1901/*
1902 * Currently used to update mapped file statistics, but the routine can be
1903 * generalized to update other statistics as well.
1904 *
1905 * Notes: Race condition
1906 *
1907 * We usually use page_cgroup_lock() for accessing page_cgroup member but
1908 * it tends to be costly. But considering some conditions, we doesn't need
1909 * to do so _always_.
1910 *
1911 * Considering "charge", lock_page_cgroup() is not required because all
1912 * file-stat operations happen after a page is attached to radix-tree. There
1913 * are no race with "charge".
1914 *
1915 * Considering "uncharge", we know that memcg doesn't clear pc->mem_cgroup
1916 * at "uncharge" intentionally. So, we always see valid pc->mem_cgroup even
1917 * if there are race with "uncharge". Statistics itself is properly handled
1918 * by flags.
1919 *
1920 * Considering "move", this is an only case we see a race. To make the race
1921 * small, we check MEM_CGROUP_ON_MOVE percpu value and detect there are
1922 * possibility of race condition. If there is, we take a lock.
1923 */
1924
1925void mem_cgroup_update_page_stat(struct page *page,
1926 enum mem_cgroup_page_stat_item idx, int val)
1927{
1928 struct mem_cgroup *mem;
1929 struct page_cgroup *pc = lookup_page_cgroup(page);
1930 bool need_unlock = false;
1931 unsigned long uninitialized_var(flags);
1932
1933 if (unlikely(!pc))
1934 return;
1935
1936 rcu_read_lock();
1937 mem = pc->mem_cgroup;
1938 if (unlikely(!mem || !PageCgroupUsed(pc)))
1939 goto out;
1940 /* pc->mem_cgroup is unstable ? */
1941 if (unlikely(mem_cgroup_stealed(mem)) || PageTransHuge(page)) {
1942 /* take a lock against to access pc->mem_cgroup */
1943 move_lock_page_cgroup(pc, &flags);
1944 need_unlock = true;
1945 mem = pc->mem_cgroup;
1946 if (!mem || !PageCgroupUsed(pc))
1947 goto out;
1948 }
1949
1950 switch (idx) {
1951 case MEMCG_NR_FILE_MAPPED:
1952 if (val > 0)
1953 SetPageCgroupFileMapped(pc);
1954 else if (!page_mapped(page))
1955 ClearPageCgroupFileMapped(pc);
1956 idx = MEM_CGROUP_STAT_FILE_MAPPED;
1957 break;
1958 default:
1959 BUG();
1960 }
1961
1962 this_cpu_add(mem->stat->count[idx], val);
1963
1964out:
1965 if (unlikely(need_unlock))
1966 move_unlock_page_cgroup(pc, &flags);
1967 rcu_read_unlock();
1968 return;
1969}
1970EXPORT_SYMBOL(mem_cgroup_update_page_stat);
1971
1972/*
1973 * size of first charge trial. "32" comes from vmscan.c's magic value.
1974 * TODO: maybe necessary to use big numbers in big irons.
1975 */
1976#define CHARGE_BATCH 32U
1977struct memcg_stock_pcp {
1978 struct mem_cgroup *cached; /* this never be root cgroup */
1979 unsigned int nr_pages;
1980 struct work_struct work;
1981 unsigned long flags;
1982#define FLUSHING_CACHED_CHARGE (0)
1983};
1984static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
1985static DEFINE_MUTEX(percpu_charge_mutex);
1986
1987/*
1988 * Try to consume stocked charge on this cpu. If success, one page is consumed
1989 * from local stock and true is returned. If the stock is 0 or charges from a
1990 * cgroup which is not current target, returns false. This stock will be
1991 * refilled.
1992 */
1993static bool consume_stock(struct mem_cgroup *mem)
1994{
1995 struct memcg_stock_pcp *stock;
1996 bool ret = true;
1997
1998 stock = &get_cpu_var(memcg_stock);
1999 if (mem == stock->cached && stock->nr_pages)
2000 stock->nr_pages--;
2001 else /* need to call res_counter_charge */
2002 ret = false;
2003 put_cpu_var(memcg_stock);
2004 return ret;
2005}
2006
2007/*
2008 * Returns stocks cached in percpu to res_counter and reset cached information.
2009 */
2010static void drain_stock(struct memcg_stock_pcp *stock)
2011{
2012 struct mem_cgroup *old = stock->cached;
2013
2014 if (stock->nr_pages) {
2015 unsigned long bytes = stock->nr_pages * PAGE_SIZE;
2016
2017 res_counter_uncharge(&old->res, bytes);
2018 if (do_swap_account)
2019 res_counter_uncharge(&old->memsw, bytes);
2020 stock->nr_pages = 0;
2021 }
2022 stock->cached = NULL;
2023}
2024
2025/*
2026 * This must be called under preempt disabled or must be called by
2027 * a thread which is pinned to local cpu.
2028 */
2029static void drain_local_stock(struct work_struct *dummy)
2030{
2031 struct memcg_stock_pcp *stock = &__get_cpu_var(memcg_stock);
2032 drain_stock(stock);
2033 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
2034}
2035
2036/*
2037 * Cache charges(val) which is from res_counter, to local per_cpu area.
2038 * This will be consumed by consume_stock() function, later.
2039 */
2040static void refill_stock(struct mem_cgroup *mem, unsigned int nr_pages)
2041{
2042 struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock);
2043
2044 if (stock->cached != mem) { /* reset if necessary */
2045 drain_stock(stock);
2046 stock->cached = mem;
2047 }
2048 stock->nr_pages += nr_pages;
2049 put_cpu_var(memcg_stock);
2050}
2051
2052/*
2053 * Drains all per-CPU charge caches for given root_mem resp. subtree
2054 * of the hierarchy under it. sync flag says whether we should block
2055 * until the work is done.
2056 */
2057static void drain_all_stock(struct mem_cgroup *root_mem, bool sync)
2058{
2059 int cpu, curcpu;
2060
2061 /* Notify other cpus that system-wide "drain" is running */
2062 get_online_cpus();
2063 curcpu = get_cpu();
2064 for_each_online_cpu(cpu) {
2065 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
2066 struct mem_cgroup *mem;
2067
2068 mem = stock->cached;
2069 if (!mem || !stock->nr_pages)
2070 continue;
2071 if (!mem_cgroup_same_or_subtree(root_mem, mem))
2072 continue;
2073 if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
2074 if (cpu == curcpu)
2075 drain_local_stock(&stock->work);
2076 else
2077 schedule_work_on(cpu, &stock->work);
2078 }
2079 }
2080 put_cpu();
2081
2082 if (!sync)
2083 goto out;
2084
2085 for_each_online_cpu(cpu) {
2086 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
2087 if (test_bit(FLUSHING_CACHED_CHARGE, &stock->flags))
2088 flush_work(&stock->work);
2089 }
2090out:
2091 put_online_cpus();
2092}
2093
2094/*
2095 * Tries to drain stocked charges in other cpus. This function is asynchronous
2096 * and just put a work per cpu for draining localy on each cpu. Caller can
2097 * expects some charges will be back to res_counter later but cannot wait for
2098 * it.
2099 */
2100static void drain_all_stock_async(struct mem_cgroup *root_mem)
2101{
2102 /*
2103 * If someone calls draining, avoid adding more kworker runs.
2104 */
2105 if (!mutex_trylock(&percpu_charge_mutex))
2106 return;
2107 drain_all_stock(root_mem, false);
2108 mutex_unlock(&percpu_charge_mutex);
2109}
2110
2111/* This is a synchronous drain interface. */
2112static void drain_all_stock_sync(struct mem_cgroup *root_mem)
2113{
2114 /* called when force_empty is called */
2115 mutex_lock(&percpu_charge_mutex);
2116 drain_all_stock(root_mem, true);
2117 mutex_unlock(&percpu_charge_mutex);
2118}
2119
2120/*
2121 * This function drains percpu counter value from DEAD cpu and
2122 * move it to local cpu. Note that this function can be preempted.
2123 */
2124static void mem_cgroup_drain_pcp_counter(struct mem_cgroup *mem, int cpu)
2125{
2126 int i;
2127
2128 spin_lock(&mem->pcp_counter_lock);
2129 for (i = 0; i < MEM_CGROUP_STAT_DATA; i++) {
2130 long x = per_cpu(mem->stat->count[i], cpu);
2131
2132 per_cpu(mem->stat->count[i], cpu) = 0;
2133 mem->nocpu_base.count[i] += x;
2134 }
2135 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
2136 unsigned long x = per_cpu(mem->stat->events[i], cpu);
2137
2138 per_cpu(mem->stat->events[i], cpu) = 0;
2139 mem->nocpu_base.events[i] += x;
2140 }
2141 /* need to clear ON_MOVE value, works as a kind of lock. */
2142 per_cpu(mem->stat->count[MEM_CGROUP_ON_MOVE], cpu) = 0;
2143 spin_unlock(&mem->pcp_counter_lock);
2144}
2145
2146static void synchronize_mem_cgroup_on_move(struct mem_cgroup *mem, int cpu)
2147{
2148 int idx = MEM_CGROUP_ON_MOVE;
2149
2150 spin_lock(&mem->pcp_counter_lock);
2151 per_cpu(mem->stat->count[idx], cpu) = mem->nocpu_base.count[idx];
2152 spin_unlock(&mem->pcp_counter_lock);
2153}
2154
2155static int __cpuinit memcg_cpu_hotplug_callback(struct notifier_block *nb,
2156 unsigned long action,
2157 void *hcpu)
2158{
2159 int cpu = (unsigned long)hcpu;
2160 struct memcg_stock_pcp *stock;
2161 struct mem_cgroup *iter;
2162
2163 if ((action == CPU_ONLINE)) {
2164 for_each_mem_cgroup_all(iter)
2165 synchronize_mem_cgroup_on_move(iter, cpu);
2166 return NOTIFY_OK;
2167 }
2168
2169 if ((action != CPU_DEAD) || action != CPU_DEAD_FROZEN)
2170 return NOTIFY_OK;
2171
2172 for_each_mem_cgroup_all(iter)
2173 mem_cgroup_drain_pcp_counter(iter, cpu);
2174
2175 stock = &per_cpu(memcg_stock, cpu);
2176 drain_stock(stock);
2177 return NOTIFY_OK;
2178}
2179
2180
2181/* See __mem_cgroup_try_charge() for details */
2182enum {
2183 CHARGE_OK, /* success */
2184 CHARGE_RETRY, /* need to retry but retry is not bad */
2185 CHARGE_NOMEM, /* we can't do more. return -ENOMEM */
2186 CHARGE_WOULDBLOCK, /* GFP_WAIT wasn't set and no enough res. */
2187 CHARGE_OOM_DIE, /* the current is killed because of OOM */
2188};
2189
2190static int mem_cgroup_do_charge(struct mem_cgroup *mem, gfp_t gfp_mask,
2191 unsigned int nr_pages, bool oom_check)
2192{
2193 unsigned long csize = nr_pages * PAGE_SIZE;
2194 struct mem_cgroup *mem_over_limit;
2195 struct res_counter *fail_res;
2196 unsigned long flags = 0;
2197 int ret;
2198
2199 ret = res_counter_charge(&mem->res, csize, &fail_res);
2200
2201 if (likely(!ret)) {
2202 if (!do_swap_account)
2203 return CHARGE_OK;
2204 ret = res_counter_charge(&mem->memsw, csize, &fail_res);
2205 if (likely(!ret))
2206 return CHARGE_OK;
2207
2208 res_counter_uncharge(&mem->res, csize);
2209 mem_over_limit = mem_cgroup_from_res_counter(fail_res, memsw);
2210 flags |= MEM_CGROUP_RECLAIM_NOSWAP;
2211 } else
2212 mem_over_limit = mem_cgroup_from_res_counter(fail_res, res);
2213 /*
2214 * nr_pages can be either a huge page (HPAGE_PMD_NR), a batch
2215 * of regular pages (CHARGE_BATCH), or a single regular page (1).
2216 *
2217 * Never reclaim on behalf of optional batching, retry with a
2218 * single page instead.
2219 */
2220 if (nr_pages == CHARGE_BATCH)
2221 return CHARGE_RETRY;
2222
2223 if (!(gfp_mask & __GFP_WAIT))
2224 return CHARGE_WOULDBLOCK;
2225
2226 ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, NULL,
2227 gfp_mask, flags, NULL);
2228 if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
2229 return CHARGE_RETRY;
2230 /*
2231 * Even though the limit is exceeded at this point, reclaim
2232 * may have been able to free some pages. Retry the charge
2233 * before killing the task.
2234 *
2235 * Only for regular pages, though: huge pages are rather
2236 * unlikely to succeed so close to the limit, and we fall back
2237 * to regular pages anyway in case of failure.
2238 */
2239 if (nr_pages == 1 && ret)
2240 return CHARGE_RETRY;
2241
2242 /*
2243 * At task move, charge accounts can be doubly counted. So, it's
2244 * better to wait until the end of task_move if something is going on.
2245 */
2246 if (mem_cgroup_wait_acct_move(mem_over_limit))
2247 return CHARGE_RETRY;
2248
2249 /* If we don't need to call oom-killer at el, return immediately */
2250 if (!oom_check)
2251 return CHARGE_NOMEM;
2252 /* check OOM */
2253 if (!mem_cgroup_handle_oom(mem_over_limit, gfp_mask))
2254 return CHARGE_OOM_DIE;
2255
2256 return CHARGE_RETRY;
2257}
2258
2259/*
2260 * Unlike exported interface, "oom" parameter is added. if oom==true,
2261 * oom-killer can be invoked.
2262 */
2263static int __mem_cgroup_try_charge(struct mm_struct *mm,
2264 gfp_t gfp_mask,
2265 unsigned int nr_pages,
2266 struct mem_cgroup **memcg,
2267 bool oom)
2268{
2269 unsigned int batch = max(CHARGE_BATCH, nr_pages);
2270 int nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES;
2271 struct mem_cgroup *mem = NULL;
2272 int ret;
2273
2274 /*
2275 * Unlike gloval-vm's OOM-kill, we're not in memory shortage
2276 * in system level. So, allow to go ahead dying process in addition to
2277 * MEMDIE process.
2278 */
2279 if (unlikely(test_thread_flag(TIF_MEMDIE)
2280 || fatal_signal_pending(current)))
2281 goto bypass;
2282
2283 /*
2284 * We always charge the cgroup the mm_struct belongs to.
2285 * The mm_struct's mem_cgroup changes on task migration if the
2286 * thread group leader migrates. It's possible that mm is not
2287 * set, if so charge the init_mm (happens for pagecache usage).
2288 */
2289 if (!*memcg && !mm)
2290 goto bypass;
2291again:
2292 if (*memcg) { /* css should be a valid one */
2293 mem = *memcg;
2294 VM_BUG_ON(css_is_removed(&mem->css));
2295 if (mem_cgroup_is_root(mem))
2296 goto done;
2297 if (nr_pages == 1 && consume_stock(mem))
2298 goto done;
2299 css_get(&mem->css);
2300 } else {
2301 struct task_struct *p;
2302
2303 rcu_read_lock();
2304 p = rcu_dereference(mm->owner);
2305 /*
2306 * Because we don't have task_lock(), "p" can exit.
2307 * In that case, "mem" can point to root or p can be NULL with
2308 * race with swapoff. Then, we have small risk of mis-accouning.
2309 * But such kind of mis-account by race always happens because
2310 * we don't have cgroup_mutex(). It's overkill and we allo that
2311 * small race, here.
2312 * (*) swapoff at el will charge against mm-struct not against
2313 * task-struct. So, mm->owner can be NULL.
2314 */
2315 mem = mem_cgroup_from_task(p);
2316 if (!mem || mem_cgroup_is_root(mem)) {
2317 rcu_read_unlock();
2318 goto done;
2319 }
2320 if (nr_pages == 1 && consume_stock(mem)) {
2321 /*
2322 * It seems dagerous to access memcg without css_get().
2323 * But considering how consume_stok works, it's not
2324 * necessary. If consume_stock success, some charges
2325 * from this memcg are cached on this cpu. So, we
2326 * don't need to call css_get()/css_tryget() before
2327 * calling consume_stock().
2328 */
2329 rcu_read_unlock();
2330 goto done;
2331 }
2332 /* after here, we may be blocked. we need to get refcnt */
2333 if (!css_tryget(&mem->css)) {
2334 rcu_read_unlock();
2335 goto again;
2336 }
2337 rcu_read_unlock();
2338 }
2339
2340 do {
2341 bool oom_check;
2342
2343 /* If killed, bypass charge */
2344 if (fatal_signal_pending(current)) {
2345 css_put(&mem->css);
2346 goto bypass;
2347 }
2348
2349 oom_check = false;
2350 if (oom && !nr_oom_retries) {
2351 oom_check = true;
2352 nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES;
2353 }
2354
2355 ret = mem_cgroup_do_charge(mem, gfp_mask, batch, oom_check);
2356 switch (ret) {
2357 case CHARGE_OK:
2358 break;
2359 case CHARGE_RETRY: /* not in OOM situation but retry */
2360 batch = nr_pages;
2361 css_put(&mem->css);
2362 mem = NULL;
2363 goto again;
2364 case CHARGE_WOULDBLOCK: /* !__GFP_WAIT */
2365 css_put(&mem->css);
2366 goto nomem;
2367 case CHARGE_NOMEM: /* OOM routine works */
2368 if (!oom) {
2369 css_put(&mem->css);
2370 goto nomem;
2371 }
2372 /* If oom, we never return -ENOMEM */
2373 nr_oom_retries--;
2374 break;
2375 case CHARGE_OOM_DIE: /* Killed by OOM Killer */
2376 css_put(&mem->css);
2377 goto bypass;
2378 }
2379 } while (ret != CHARGE_OK);
2380
2381 if (batch > nr_pages)
2382 refill_stock(mem, batch - nr_pages);
2383 css_put(&mem->css);
2384done:
2385 *memcg = mem;
2386 return 0;
2387nomem:
2388 *memcg = NULL;
2389 return -ENOMEM;
2390bypass:
2391 *memcg = NULL;
2392 return 0;
2393}
2394
2395/*
2396 * Somemtimes we have to undo a charge we got by try_charge().
2397 * This function is for that and do uncharge, put css's refcnt.
2398 * gotten by try_charge().
2399 */
2400static void __mem_cgroup_cancel_charge(struct mem_cgroup *mem,
2401 unsigned int nr_pages)
2402{
2403 if (!mem_cgroup_is_root(mem)) {
2404 unsigned long bytes = nr_pages * PAGE_SIZE;
2405
2406 res_counter_uncharge(&mem->res, bytes);
2407 if (do_swap_account)
2408 res_counter_uncharge(&mem->memsw, bytes);
2409 }
2410}
2411
2412/*
2413 * A helper function to get mem_cgroup from ID. must be called under
2414 * rcu_read_lock(). The caller must check css_is_removed() or some if
2415 * it's concern. (dropping refcnt from swap can be called against removed
2416 * memcg.)
2417 */
2418static struct mem_cgroup *mem_cgroup_lookup(unsigned short id)
2419{
2420 struct cgroup_subsys_state *css;
2421
2422 /* ID 0 is unused ID */
2423 if (!id)
2424 return NULL;
2425 css = css_lookup(&mem_cgroup_subsys, id);
2426 if (!css)
2427 return NULL;
2428 return container_of(css, struct mem_cgroup, css);
2429}
2430
2431struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
2432{
2433 struct mem_cgroup *mem = NULL;
2434 struct page_cgroup *pc;
2435 unsigned short id;
2436 swp_entry_t ent;
2437
2438 VM_BUG_ON(!PageLocked(page));
2439
2440 pc = lookup_page_cgroup(page);
2441 lock_page_cgroup(pc);
2442 if (PageCgroupUsed(pc)) {
2443 mem = pc->mem_cgroup;
2444 if (mem && !css_tryget(&mem->css))
2445 mem = NULL;
2446 } else if (PageSwapCache(page)) {
2447 ent.val = page_private(page);
2448 id = lookup_swap_cgroup(ent);
2449 rcu_read_lock();
2450 mem = mem_cgroup_lookup(id);
2451 if (mem && !css_tryget(&mem->css))
2452 mem = NULL;
2453 rcu_read_unlock();
2454 }
2455 unlock_page_cgroup(pc);
2456 return mem;
2457}
2458
2459static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
2460 struct page *page,
2461 unsigned int nr_pages,
2462 struct page_cgroup *pc,
2463 enum charge_type ctype)
2464{
2465 lock_page_cgroup(pc);
2466 if (unlikely(PageCgroupUsed(pc))) {
2467 unlock_page_cgroup(pc);
2468 __mem_cgroup_cancel_charge(mem, nr_pages);
2469 return;
2470 }
2471 /*
2472 * we don't need page_cgroup_lock about tail pages, becase they are not
2473 * accessed by any other context at this point.
2474 */
2475 pc->mem_cgroup = mem;
2476 /*
2477 * We access a page_cgroup asynchronously without lock_page_cgroup().
2478 * Especially when a page_cgroup is taken from a page, pc->mem_cgroup
2479 * is accessed after testing USED bit. To make pc->mem_cgroup visible
2480 * before USED bit, we need memory barrier here.
2481 * See mem_cgroup_add_lru_list(), etc.
2482 */
2483 smp_wmb();
2484 switch (ctype) {
2485 case MEM_CGROUP_CHARGE_TYPE_CACHE:
2486 case MEM_CGROUP_CHARGE_TYPE_SHMEM:
2487 SetPageCgroupCache(pc);
2488 SetPageCgroupUsed(pc);
2489 break;
2490 case MEM_CGROUP_CHARGE_TYPE_MAPPED:
2491 ClearPageCgroupCache(pc);
2492 SetPageCgroupUsed(pc);
2493 break;
2494 default:
2495 break;
2496 }
2497
2498 mem_cgroup_charge_statistics(mem, PageCgroupCache(pc), nr_pages);
2499 unlock_page_cgroup(pc);
2500 /*
2501 * "charge_statistics" updated event counter. Then, check it.
2502 * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
2503 * if they exceeds softlimit.
2504 */
2505 memcg_check_events(mem, page);
2506}
2507
2508#ifdef CONFIG_TRANSPARENT_HUGEPAGE
2509
2510#define PCGF_NOCOPY_AT_SPLIT ((1 << PCG_LOCK) | (1 << PCG_MOVE_LOCK) |\
2511 (1 << PCG_ACCT_LRU) | (1 << PCG_MIGRATION))
2512/*
2513 * Because tail pages are not marked as "used", set it. We're under
2514 * zone->lru_lock, 'splitting on pmd' and compund_lock.
2515 */
2516void mem_cgroup_split_huge_fixup(struct page *head, struct page *tail)
2517{
2518 struct page_cgroup *head_pc = lookup_page_cgroup(head);
2519 struct page_cgroup *tail_pc = lookup_page_cgroup(tail);
2520 unsigned long flags;
2521
2522 if (mem_cgroup_disabled())
2523 return;
2524 /*
2525 * We have no races with charge/uncharge but will have races with
2526 * page state accounting.
2527 */
2528 move_lock_page_cgroup(head_pc, &flags);
2529
2530 tail_pc->mem_cgroup = head_pc->mem_cgroup;
2531 smp_wmb(); /* see __commit_charge() */
2532 if (PageCgroupAcctLRU(head_pc)) {
2533 enum lru_list lru;
2534 struct mem_cgroup_per_zone *mz;
2535
2536 /*
2537 * LRU flags cannot be copied because we need to add tail
2538 *.page to LRU by generic call and our hook will be called.
2539 * We hold lru_lock, then, reduce counter directly.
2540 */
2541 lru = page_lru(head);
2542 mz = page_cgroup_zoneinfo(head_pc->mem_cgroup, head);
2543 MEM_CGROUP_ZSTAT(mz, lru) -= 1;
2544 }
2545 tail_pc->flags = head_pc->flags & ~PCGF_NOCOPY_AT_SPLIT;
2546 move_unlock_page_cgroup(head_pc, &flags);
2547}
2548#endif
2549
2550/**
2551 * mem_cgroup_move_account - move account of the page
2552 * @page: the page
2553 * @nr_pages: number of regular pages (>1 for huge pages)
2554 * @pc: page_cgroup of the page.
2555 * @from: mem_cgroup which the page is moved from.
2556 * @to: mem_cgroup which the page is moved to. @from != @to.
2557 * @uncharge: whether we should call uncharge and css_put against @from.
2558 *
2559 * The caller must confirm following.
2560 * - page is not on LRU (isolate_page() is useful.)
2561 * - compound_lock is held when nr_pages > 1
2562 *
2563 * This function doesn't do "charge" nor css_get to new cgroup. It should be
2564 * done by a caller(__mem_cgroup_try_charge would be useful). If @uncharge is
2565 * true, this function does "uncharge" from old cgroup, but it doesn't if
2566 * @uncharge is false, so a caller should do "uncharge".
2567 */
2568static int mem_cgroup_move_account(struct page *page,
2569 unsigned int nr_pages,
2570 struct page_cgroup *pc,
2571 struct mem_cgroup *from,
2572 struct mem_cgroup *to,
2573 bool uncharge)
2574{
2575 unsigned long flags;
2576 int ret;
2577
2578 VM_BUG_ON(from == to);
2579 VM_BUG_ON(PageLRU(page));
2580 /*
2581 * The page is isolated from LRU. So, collapse function
2582 * will not handle this page. But page splitting can happen.
2583 * Do this check under compound_page_lock(). The caller should
2584 * hold it.
2585 */
2586 ret = -EBUSY;
2587 if (nr_pages > 1 && !PageTransHuge(page))
2588 goto out;
2589
2590 lock_page_cgroup(pc);
2591
2592 ret = -EINVAL;
2593 if (!PageCgroupUsed(pc) || pc->mem_cgroup != from)
2594 goto unlock;
2595
2596 move_lock_page_cgroup(pc, &flags);
2597
2598 if (PageCgroupFileMapped(pc)) {
2599 /* Update mapped_file data for mem_cgroup */
2600 preempt_disable();
2601 __this_cpu_dec(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
2602 __this_cpu_inc(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
2603 preempt_enable();
2604 }
2605 mem_cgroup_charge_statistics(from, PageCgroupCache(pc), -nr_pages);
2606 if (uncharge)
2607 /* This is not "cancel", but cancel_charge does all we need. */
2608 __mem_cgroup_cancel_charge(from, nr_pages);
2609
2610 /* caller should have done css_get */
2611 pc->mem_cgroup = to;
2612 mem_cgroup_charge_statistics(to, PageCgroupCache(pc), nr_pages);
2613 /*
2614 * We charges against "to" which may not have any tasks. Then, "to"
2615 * can be under rmdir(). But in current implementation, caller of
2616 * this function is just force_empty() and move charge, so it's
2617 * guaranteed that "to" is never removed. So, we don't check rmdir
2618 * status here.
2619 */
2620 move_unlock_page_cgroup(pc, &flags);
2621 ret = 0;
2622unlock:
2623 unlock_page_cgroup(pc);
2624 /*
2625 * check events
2626 */
2627 memcg_check_events(to, page);
2628 memcg_check_events(from, page);
2629out:
2630 return ret;
2631}
2632
2633/*
2634 * move charges to its parent.
2635 */
2636
2637static int mem_cgroup_move_parent(struct page *page,
2638 struct page_cgroup *pc,
2639 struct mem_cgroup *child,
2640 gfp_t gfp_mask)
2641{
2642 struct cgroup *cg = child->css.cgroup;
2643 struct cgroup *pcg = cg->parent;
2644 struct mem_cgroup *parent;
2645 unsigned int nr_pages;
2646 unsigned long uninitialized_var(flags);
2647 int ret;
2648
2649 /* Is ROOT ? */
2650 if (!pcg)
2651 return -EINVAL;
2652
2653 ret = -EBUSY;
2654 if (!get_page_unless_zero(page))
2655 goto out;
2656 if (isolate_lru_page(page))
2657 goto put;
2658
2659 nr_pages = hpage_nr_pages(page);
2660
2661 parent = mem_cgroup_from_cont(pcg);
2662 ret = __mem_cgroup_try_charge(NULL, gfp_mask, nr_pages, &parent, false);
2663 if (ret || !parent)
2664 goto put_back;
2665
2666 if (nr_pages > 1)
2667 flags = compound_lock_irqsave(page);
2668
2669 ret = mem_cgroup_move_account(page, nr_pages, pc, child, parent, true);
2670 if (ret)
2671 __mem_cgroup_cancel_charge(parent, nr_pages);
2672
2673 if (nr_pages > 1)
2674 compound_unlock_irqrestore(page, flags);
2675put_back:
2676 putback_lru_page(page);
2677put:
2678 put_page(page);
2679out:
2680 return ret;
2681}
2682
2683/*
2684 * Charge the memory controller for page usage.
2685 * Return
2686 * 0 if the charge was successful
2687 * < 0 if the cgroup is over its limit
2688 */
2689static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
2690 gfp_t gfp_mask, enum charge_type ctype)
2691{
2692 struct mem_cgroup *mem = NULL;
2693 unsigned int nr_pages = 1;
2694 struct page_cgroup *pc;
2695 bool oom = true;
2696 int ret;
2697
2698 if (PageTransHuge(page)) {
2699 nr_pages <<= compound_order(page);
2700 VM_BUG_ON(!PageTransHuge(page));
2701 /*
2702 * Never OOM-kill a process for a huge page. The
2703 * fault handler will fall back to regular pages.
2704 */
2705 oom = false;
2706 }
2707
2708 pc = lookup_page_cgroup(page);
2709 BUG_ON(!pc); /* XXX: remove this and move pc lookup into commit */
2710
2711 ret = __mem_cgroup_try_charge(mm, gfp_mask, nr_pages, &mem, oom);
2712 if (ret || !mem)
2713 return ret;
2714
2715 __mem_cgroup_commit_charge(mem, page, nr_pages, pc, ctype);
2716 return 0;
2717}
2718
2719int mem_cgroup_newpage_charge(struct page *page,
2720 struct mm_struct *mm, gfp_t gfp_mask)
2721{
2722 if (mem_cgroup_disabled())
2723 return 0;
2724 /*
2725 * If already mapped, we don't have to account.
2726 * If page cache, page->mapping has address_space.
2727 * But page->mapping may have out-of-use anon_vma pointer,
2728 * detecit it by PageAnon() check. newly-mapped-anon's page->mapping
2729 * is NULL.
2730 */
2731 if (page_mapped(page) || (page->mapping && !PageAnon(page)))
2732 return 0;
2733 if (unlikely(!mm))
2734 mm = &init_mm;
2735 return mem_cgroup_charge_common(page, mm, gfp_mask,
2736 MEM_CGROUP_CHARGE_TYPE_MAPPED);
2737}
2738
2739static void
2740__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
2741 enum charge_type ctype);
2742
2743static void
2744__mem_cgroup_commit_charge_lrucare(struct page *page, struct mem_cgroup *mem,
2745 enum charge_type ctype)
2746{
2747 struct page_cgroup *pc = lookup_page_cgroup(page);
2748 /*
2749 * In some case, SwapCache, FUSE(splice_buf->radixtree), the page
2750 * is already on LRU. It means the page may on some other page_cgroup's
2751 * LRU. Take care of it.
2752 */
2753 mem_cgroup_lru_del_before_commit(page);
2754 __mem_cgroup_commit_charge(mem, page, 1, pc, ctype);
2755 mem_cgroup_lru_add_after_commit(page);
2756 return;
2757}
2758
2759int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
2760 gfp_t gfp_mask)
2761{
2762 struct mem_cgroup *mem = NULL;
2763 int ret;
2764
2765 if (mem_cgroup_disabled())
2766 return 0;
2767 if (PageCompound(page))
2768 return 0;
2769
2770 if (unlikely(!mm))
2771 mm = &init_mm;
2772
2773 if (page_is_file_cache(page)) {
2774 ret = __mem_cgroup_try_charge(mm, gfp_mask, 1, &mem, true);
2775 if (ret || !mem)
2776 return ret;
2777
2778 /*
2779 * FUSE reuses pages without going through the final
2780 * put that would remove them from the LRU list, make
2781 * sure that they get relinked properly.
2782 */
2783 __mem_cgroup_commit_charge_lrucare(page, mem,
2784 MEM_CGROUP_CHARGE_TYPE_CACHE);
2785 return ret;
2786 }
2787 /* shmem */
2788 if (PageSwapCache(page)) {
2789 ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &mem);
2790 if (!ret)
2791 __mem_cgroup_commit_charge_swapin(page, mem,
2792 MEM_CGROUP_CHARGE_TYPE_SHMEM);
2793 } else
2794 ret = mem_cgroup_charge_common(page, mm, gfp_mask,
2795 MEM_CGROUP_CHARGE_TYPE_SHMEM);
2796
2797 return ret;
2798}
2799
2800/*
2801 * While swap-in, try_charge -> commit or cancel, the page is locked.
2802 * And when try_charge() successfully returns, one refcnt to memcg without
2803 * struct page_cgroup is acquired. This refcnt will be consumed by
2804 * "commit()" or removed by "cancel()"
2805 */
2806int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
2807 struct page *page,
2808 gfp_t mask, struct mem_cgroup **ptr)
2809{
2810 struct mem_cgroup *mem;
2811 int ret;
2812
2813 *ptr = NULL;
2814
2815 if (mem_cgroup_disabled())
2816 return 0;
2817
2818 if (!do_swap_account)
2819 goto charge_cur_mm;
2820 /*
2821 * A racing thread's fault, or swapoff, may have already updated
2822 * the pte, and even removed page from swap cache: in those cases
2823 * do_swap_page()'s pte_same() test will fail; but there's also a
2824 * KSM case which does need to charge the page.
2825 */
2826 if (!PageSwapCache(page))
2827 goto charge_cur_mm;
2828 mem = try_get_mem_cgroup_from_page(page);
2829 if (!mem)
2830 goto charge_cur_mm;
2831 *ptr = mem;
2832 ret = __mem_cgroup_try_charge(NULL, mask, 1, ptr, true);
2833 css_put(&mem->css);
2834 return ret;
2835charge_cur_mm:
2836 if (unlikely(!mm))
2837 mm = &init_mm;
2838 return __mem_cgroup_try_charge(mm, mask, 1, ptr, true);
2839}
2840
2841static void
2842__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
2843 enum charge_type ctype)
2844{
2845 if (mem_cgroup_disabled())
2846 return;
2847 if (!ptr)
2848 return;
2849 cgroup_exclude_rmdir(&ptr->css);
2850
2851 __mem_cgroup_commit_charge_lrucare(page, ptr, ctype);
2852 /*
2853 * Now swap is on-memory. This means this page may be
2854 * counted both as mem and swap....double count.
2855 * Fix it by uncharging from memsw. Basically, this SwapCache is stable
2856 * under lock_page(). But in do_swap_page()::memory.c, reuse_swap_page()
2857 * may call delete_from_swap_cache() before reach here.
2858 */
2859 if (do_swap_account && PageSwapCache(page)) {
2860 swp_entry_t ent = {.val = page_private(page)};
2861 unsigned short id;
2862 struct mem_cgroup *memcg;
2863
2864 id = swap_cgroup_record(ent, 0);
2865 rcu_read_lock();
2866 memcg = mem_cgroup_lookup(id);
2867 if (memcg) {
2868 /*
2869 * This recorded memcg can be obsolete one. So, avoid
2870 * calling css_tryget
2871 */
2872 if (!mem_cgroup_is_root(memcg))
2873 res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
2874 mem_cgroup_swap_statistics(memcg, false);
2875 mem_cgroup_put(memcg);
2876 }
2877 rcu_read_unlock();
2878 }
2879 /*
2880 * At swapin, we may charge account against cgroup which has no tasks.
2881 * So, rmdir()->pre_destroy() can be called while we do this charge.
2882 * In that case, we need to call pre_destroy() again. check it here.
2883 */
2884 cgroup_release_and_wakeup_rmdir(&ptr->css);
2885}
2886
2887void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
2888{
2889 __mem_cgroup_commit_charge_swapin(page, ptr,
2890 MEM_CGROUP_CHARGE_TYPE_MAPPED);
2891}
2892
2893void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem)
2894{
2895 if (mem_cgroup_disabled())
2896 return;
2897 if (!mem)
2898 return;
2899 __mem_cgroup_cancel_charge(mem, 1);
2900}
2901
2902static void mem_cgroup_do_uncharge(struct mem_cgroup *mem,
2903 unsigned int nr_pages,
2904 const enum charge_type ctype)
2905{
2906 struct memcg_batch_info *batch = NULL;
2907 bool uncharge_memsw = true;
2908
2909 /* If swapout, usage of swap doesn't decrease */
2910 if (!do_swap_account || ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
2911 uncharge_memsw = false;
2912
2913 batch = ¤t->memcg_batch;
2914 /*
2915 * In usual, we do css_get() when we remember memcg pointer.
2916 * But in this case, we keep res->usage until end of a series of
2917 * uncharges. Then, it's ok to ignore memcg's refcnt.
2918 */
2919 if (!batch->memcg)
2920 batch->memcg = mem;
2921 /*
2922 * do_batch > 0 when unmapping pages or inode invalidate/truncate.
2923 * In those cases, all pages freed continuously can be expected to be in
2924 * the same cgroup and we have chance to coalesce uncharges.
2925 * But we do uncharge one by one if this is killed by OOM(TIF_MEMDIE)
2926 * because we want to do uncharge as soon as possible.
2927 */
2928
2929 if (!batch->do_batch || test_thread_flag(TIF_MEMDIE))
2930 goto direct_uncharge;
2931
2932 if (nr_pages > 1)
2933 goto direct_uncharge;
2934
2935 /*
2936 * In typical case, batch->memcg == mem. This means we can
2937 * merge a series of uncharges to an uncharge of res_counter.
2938 * If not, we uncharge res_counter ony by one.
2939 */
2940 if (batch->memcg != mem)
2941 goto direct_uncharge;
2942 /* remember freed charge and uncharge it later */
2943 batch->nr_pages++;
2944 if (uncharge_memsw)
2945 batch->memsw_nr_pages++;
2946 return;
2947direct_uncharge:
2948 res_counter_uncharge(&mem->res, nr_pages * PAGE_SIZE);
2949 if (uncharge_memsw)
2950 res_counter_uncharge(&mem->memsw, nr_pages * PAGE_SIZE);
2951 if (unlikely(batch->memcg != mem))
2952 memcg_oom_recover(mem);
2953 return;
2954}
2955
2956/*
2957 * uncharge if !page_mapped(page)
2958 */
2959static struct mem_cgroup *
2960__mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
2961{
2962 struct mem_cgroup *mem = NULL;
2963 unsigned int nr_pages = 1;
2964 struct page_cgroup *pc;
2965
2966 if (mem_cgroup_disabled())
2967 return NULL;
2968
2969 if (PageSwapCache(page))
2970 return NULL;
2971
2972 if (PageTransHuge(page)) {
2973 nr_pages <<= compound_order(page);
2974 VM_BUG_ON(!PageTransHuge(page));
2975 }
2976 /*
2977 * Check if our page_cgroup is valid
2978 */
2979 pc = lookup_page_cgroup(page);
2980 if (unlikely(!pc || !PageCgroupUsed(pc)))
2981 return NULL;
2982
2983 lock_page_cgroup(pc);
2984
2985 mem = pc->mem_cgroup;
2986
2987 if (!PageCgroupUsed(pc))
2988 goto unlock_out;
2989
2990 switch (ctype) {
2991 case MEM_CGROUP_CHARGE_TYPE_MAPPED:
2992 case MEM_CGROUP_CHARGE_TYPE_DROP:
2993 /* See mem_cgroup_prepare_migration() */
2994 if (page_mapped(page) || PageCgroupMigration(pc))
2995 goto unlock_out;
2996 break;
2997 case MEM_CGROUP_CHARGE_TYPE_SWAPOUT:
2998 if (!PageAnon(page)) { /* Shared memory */
2999 if (page->mapping && !page_is_file_cache(page))
3000 goto unlock_out;
3001 } else if (page_mapped(page)) /* Anon */
3002 goto unlock_out;
3003 break;
3004 default:
3005 break;
3006 }
3007
3008 mem_cgroup_charge_statistics(mem, PageCgroupCache(pc), -nr_pages);
3009
3010 ClearPageCgroupUsed(pc);
3011 /*
3012 * pc->mem_cgroup is not cleared here. It will be accessed when it's
3013 * freed from LRU. This is safe because uncharged page is expected not
3014 * to be reused (freed soon). Exception is SwapCache, it's handled by
3015 * special functions.
3016 */
3017
3018 unlock_page_cgroup(pc);
3019 /*
3020 * even after unlock, we have mem->res.usage here and this memcg
3021 * will never be freed.
3022 */
3023 memcg_check_events(mem, page);
3024 if (do_swap_account && ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT) {
3025 mem_cgroup_swap_statistics(mem, true);
3026 mem_cgroup_get(mem);
3027 }
3028 if (!mem_cgroup_is_root(mem))
3029 mem_cgroup_do_uncharge(mem, nr_pages, ctype);
3030
3031 return mem;
3032
3033unlock_out:
3034 unlock_page_cgroup(pc);
3035 return NULL;
3036}
3037
3038void mem_cgroup_uncharge_page(struct page *page)
3039{
3040 /* early check. */
3041 if (page_mapped(page))
3042 return;
3043 if (page->mapping && !PageAnon(page))
3044 return;
3045 __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_MAPPED);
3046}
3047
3048void mem_cgroup_uncharge_cache_page(struct page *page)
3049{
3050 VM_BUG_ON(page_mapped(page));
3051 VM_BUG_ON(page->mapping);
3052 __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE);
3053}
3054
3055/*
3056 * Batch_start/batch_end is called in unmap_page_range/invlidate/trucate.
3057 * In that cases, pages are freed continuously and we can expect pages
3058 * are in the same memcg. All these calls itself limits the number of
3059 * pages freed at once, then uncharge_start/end() is called properly.
3060 * This may be called prural(2) times in a context,
3061 */
3062
3063void mem_cgroup_uncharge_start(void)
3064{
3065 current->memcg_batch.do_batch++;
3066 /* We can do nest. */
3067 if (current->memcg_batch.do_batch == 1) {
3068 current->memcg_batch.memcg = NULL;
3069 current->memcg_batch.nr_pages = 0;
3070 current->memcg_batch.memsw_nr_pages = 0;
3071 }
3072}
3073
3074void mem_cgroup_uncharge_end(void)
3075{
3076 struct memcg_batch_info *batch = ¤t->memcg_batch;
3077
3078 if (!batch->do_batch)
3079 return;
3080
3081 batch->do_batch--;
3082 if (batch->do_batch) /* If stacked, do nothing. */
3083 return;
3084
3085 if (!batch->memcg)
3086 return;
3087 /*
3088 * This "batch->memcg" is valid without any css_get/put etc...
3089 * bacause we hide charges behind us.
3090 */
3091 if (batch->nr_pages)
3092 res_counter_uncharge(&batch->memcg->res,
3093 batch->nr_pages * PAGE_SIZE);
3094 if (batch->memsw_nr_pages)
3095 res_counter_uncharge(&batch->memcg->memsw,
3096 batch->memsw_nr_pages * PAGE_SIZE);
3097 memcg_oom_recover(batch->memcg);
3098 /* forget this pointer (for sanity check) */
3099 batch->memcg = NULL;
3100}
3101
3102#ifdef CONFIG_SWAP
3103/*
3104 * called after __delete_from_swap_cache() and drop "page" account.
3105 * memcg information is recorded to swap_cgroup of "ent"
3106 */
3107void
3108mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout)
3109{
3110 struct mem_cgroup *memcg;
3111 int ctype = MEM_CGROUP_CHARGE_TYPE_SWAPOUT;
3112
3113 if (!swapout) /* this was a swap cache but the swap is unused ! */
3114 ctype = MEM_CGROUP_CHARGE_TYPE_DROP;
3115
3116 memcg = __mem_cgroup_uncharge_common(page, ctype);
3117
3118 /*
3119 * record memcg information, if swapout && memcg != NULL,
3120 * mem_cgroup_get() was called in uncharge().
3121 */
3122 if (do_swap_account && swapout && memcg)
3123 swap_cgroup_record(ent, css_id(&memcg->css));
3124}
3125#endif
3126
3127#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
3128/*
3129 * called from swap_entry_free(). remove record in swap_cgroup and
3130 * uncharge "memsw" account.
3131 */
3132void mem_cgroup_uncharge_swap(swp_entry_t ent)
3133{
3134 struct mem_cgroup *memcg;
3135 unsigned short id;
3136
3137 if (!do_swap_account)
3138 return;
3139
3140 id = swap_cgroup_record(ent, 0);
3141 rcu_read_lock();
3142 memcg = mem_cgroup_lookup(id);
3143 if (memcg) {
3144 /*
3145 * We uncharge this because swap is freed.
3146 * This memcg can be obsolete one. We avoid calling css_tryget
3147 */
3148 if (!mem_cgroup_is_root(memcg))
3149 res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
3150 mem_cgroup_swap_statistics(memcg, false);
3151 mem_cgroup_put(memcg);
3152 }
3153 rcu_read_unlock();
3154}
3155
3156/**
3157 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
3158 * @entry: swap entry to be moved
3159 * @from: mem_cgroup which the entry is moved from
3160 * @to: mem_cgroup which the entry is moved to
3161 * @need_fixup: whether we should fixup res_counters and refcounts.
3162 *
3163 * It succeeds only when the swap_cgroup's record for this entry is the same
3164 * as the mem_cgroup's id of @from.
3165 *
3166 * Returns 0 on success, -EINVAL on failure.
3167 *
3168 * The caller must have charged to @to, IOW, called res_counter_charge() about
3169 * both res and memsw, and called css_get().
3170 */
3171static int mem_cgroup_move_swap_account(swp_entry_t entry,
3172 struct mem_cgroup *from, struct mem_cgroup *to, bool need_fixup)
3173{
3174 unsigned short old_id, new_id;
3175
3176 old_id = css_id(&from->css);
3177 new_id = css_id(&to->css);
3178
3179 if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
3180 mem_cgroup_swap_statistics(from, false);
3181 mem_cgroup_swap_statistics(to, true);
3182 /*
3183 * This function is only called from task migration context now.
3184 * It postpones res_counter and refcount handling till the end
3185 * of task migration(mem_cgroup_clear_mc()) for performance
3186 * improvement. But we cannot postpone mem_cgroup_get(to)
3187 * because if the process that has been moved to @to does
3188 * swap-in, the refcount of @to might be decreased to 0.
3189 */
3190 mem_cgroup_get(to);
3191 if (need_fixup) {
3192 if (!mem_cgroup_is_root(from))
3193 res_counter_uncharge(&from->memsw, PAGE_SIZE);
3194 mem_cgroup_put(from);
3195 /*
3196 * we charged both to->res and to->memsw, so we should
3197 * uncharge to->res.
3198 */
3199 if (!mem_cgroup_is_root(to))
3200 res_counter_uncharge(&to->res, PAGE_SIZE);
3201 }
3202 return 0;
3203 }
3204 return -EINVAL;
3205}
3206#else
3207static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
3208 struct mem_cgroup *from, struct mem_cgroup *to, bool need_fixup)
3209{
3210 return -EINVAL;
3211}
3212#endif
3213
3214/*
3215 * Before starting migration, account PAGE_SIZE to mem_cgroup that the old
3216 * page belongs to.
3217 */
3218int mem_cgroup_prepare_migration(struct page *page,
3219 struct page *newpage, struct mem_cgroup **ptr, gfp_t gfp_mask)
3220{
3221 struct mem_cgroup *mem = NULL;
3222 struct page_cgroup *pc;
3223 enum charge_type ctype;
3224 int ret = 0;
3225
3226 *ptr = NULL;
3227
3228 VM_BUG_ON(PageTransHuge(page));
3229 if (mem_cgroup_disabled())
3230 return 0;
3231
3232 pc = lookup_page_cgroup(page);
3233 lock_page_cgroup(pc);
3234 if (PageCgroupUsed(pc)) {
3235 mem = pc->mem_cgroup;
3236 css_get(&mem->css);
3237 /*
3238 * At migrating an anonymous page, its mapcount goes down
3239 * to 0 and uncharge() will be called. But, even if it's fully
3240 * unmapped, migration may fail and this page has to be
3241 * charged again. We set MIGRATION flag here and delay uncharge
3242 * until end_migration() is called
3243 *
3244 * Corner Case Thinking
3245 * A)
3246 * When the old page was mapped as Anon and it's unmap-and-freed
3247 * while migration was ongoing.
3248 * If unmap finds the old page, uncharge() of it will be delayed
3249 * until end_migration(). If unmap finds a new page, it's
3250 * uncharged when it make mapcount to be 1->0. If unmap code
3251 * finds swap_migration_entry, the new page will not be mapped
3252 * and end_migration() will find it(mapcount==0).
3253 *
3254 * B)
3255 * When the old page was mapped but migraion fails, the kernel
3256 * remaps it. A charge for it is kept by MIGRATION flag even
3257 * if mapcount goes down to 0. We can do remap successfully
3258 * without charging it again.
3259 *
3260 * C)
3261 * The "old" page is under lock_page() until the end of
3262 * migration, so, the old page itself will not be swapped-out.
3263 * If the new page is swapped out before end_migraton, our
3264 * hook to usual swap-out path will catch the event.
3265 */
3266 if (PageAnon(page))
3267 SetPageCgroupMigration(pc);
3268 }
3269 unlock_page_cgroup(pc);
3270 /*
3271 * If the page is not charged at this point,
3272 * we return here.
3273 */
3274 if (!mem)
3275 return 0;
3276
3277 *ptr = mem;
3278 ret = __mem_cgroup_try_charge(NULL, gfp_mask, 1, ptr, false);
3279 css_put(&mem->css);/* drop extra refcnt */
3280 if (ret || *ptr == NULL) {
3281 if (PageAnon(page)) {
3282 lock_page_cgroup(pc);
3283 ClearPageCgroupMigration(pc);
3284 unlock_page_cgroup(pc);
3285 /*
3286 * The old page may be fully unmapped while we kept it.
3287 */
3288 mem_cgroup_uncharge_page(page);
3289 }
3290 return -ENOMEM;
3291 }
3292 /*
3293 * We charge new page before it's used/mapped. So, even if unlock_page()
3294 * is called before end_migration, we can catch all events on this new
3295 * page. In the case new page is migrated but not remapped, new page's
3296 * mapcount will be finally 0 and we call uncharge in end_migration().
3297 */
3298 pc = lookup_page_cgroup(newpage);
3299 if (PageAnon(page))
3300 ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED;
3301 else if (page_is_file_cache(page))
3302 ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
3303 else
3304 ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
3305 __mem_cgroup_commit_charge(mem, page, 1, pc, ctype);
3306 return ret;
3307}
3308
3309/* remove redundant charge if migration failed*/
3310void mem_cgroup_end_migration(struct mem_cgroup *mem,
3311 struct page *oldpage, struct page *newpage, bool migration_ok)
3312{
3313 struct page *used, *unused;
3314 struct page_cgroup *pc;
3315
3316 if (!mem)
3317 return;
3318 /* blocks rmdir() */
3319 cgroup_exclude_rmdir(&mem->css);
3320 if (!migration_ok) {
3321 used = oldpage;
3322 unused = newpage;
3323 } else {
3324 used = newpage;
3325 unused = oldpage;
3326 }
3327 /*
3328 * We disallowed uncharge of pages under migration because mapcount
3329 * of the page goes down to zero, temporarly.
3330 * Clear the flag and check the page should be charged.
3331 */
3332 pc = lookup_page_cgroup(oldpage);
3333 lock_page_cgroup(pc);
3334 ClearPageCgroupMigration(pc);
3335 unlock_page_cgroup(pc);
3336
3337 __mem_cgroup_uncharge_common(unused, MEM_CGROUP_CHARGE_TYPE_FORCE);
3338
3339 /*
3340 * If a page is a file cache, radix-tree replacement is very atomic
3341 * and we can skip this check. When it was an Anon page, its mapcount
3342 * goes down to 0. But because we added MIGRATION flage, it's not
3343 * uncharged yet. There are several case but page->mapcount check
3344 * and USED bit check in mem_cgroup_uncharge_page() will do enough
3345 * check. (see prepare_charge() also)
3346 */
3347 if (PageAnon(used))
3348 mem_cgroup_uncharge_page(used);
3349 /*
3350 * At migration, we may charge account against cgroup which has no
3351 * tasks.
3352 * So, rmdir()->pre_destroy() can be called while we do this charge.
3353 * In that case, we need to call pre_destroy() again. check it here.
3354 */
3355 cgroup_release_and_wakeup_rmdir(&mem->css);
3356}
3357
3358#ifdef CONFIG_DEBUG_VM
3359static struct page_cgroup *lookup_page_cgroup_used(struct page *page)
3360{
3361 struct page_cgroup *pc;
3362
3363 pc = lookup_page_cgroup(page);
3364 if (likely(pc) && PageCgroupUsed(pc))
3365 return pc;
3366 return NULL;
3367}
3368
3369bool mem_cgroup_bad_page_check(struct page *page)
3370{
3371 if (mem_cgroup_disabled())
3372 return false;
3373
3374 return lookup_page_cgroup_used(page) != NULL;
3375}
3376
3377void mem_cgroup_print_bad_page(struct page *page)
3378{
3379 struct page_cgroup *pc;
3380
3381 pc = lookup_page_cgroup_used(page);
3382 if (pc) {
3383 int ret = -1;
3384 char *path;
3385
3386 printk(KERN_ALERT "pc:%p pc->flags:%lx pc->mem_cgroup:%p",
3387 pc, pc->flags, pc->mem_cgroup);
3388
3389 path = kmalloc(PATH_MAX, GFP_KERNEL);
3390 if (path) {
3391 rcu_read_lock();
3392 ret = cgroup_path(pc->mem_cgroup->css.cgroup,
3393 path, PATH_MAX);
3394 rcu_read_unlock();
3395 }
3396
3397 printk(KERN_CONT "(%s)\n",
3398 (ret < 0) ? "cannot get the path" : path);
3399 kfree(path);
3400 }
3401}
3402#endif
3403
3404static DEFINE_MUTEX(set_limit_mutex);
3405
3406static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
3407 unsigned long long val)
3408{
3409 int retry_count;
3410 u64 memswlimit, memlimit;
3411 int ret = 0;
3412 int children = mem_cgroup_count_children(memcg);
3413 u64 curusage, oldusage;
3414 int enlarge;
3415
3416 /*
3417 * For keeping hierarchical_reclaim simple, how long we should retry
3418 * is depends on callers. We set our retry-count to be function
3419 * of # of children which we should visit in this loop.
3420 */
3421 retry_count = MEM_CGROUP_RECLAIM_RETRIES * children;
3422
3423 oldusage = res_counter_read_u64(&memcg->res, RES_USAGE);
3424
3425 enlarge = 0;
3426 while (retry_count) {
3427 if (signal_pending(current)) {
3428 ret = -EINTR;
3429 break;
3430 }
3431 /*
3432 * Rather than hide all in some function, I do this in
3433 * open coded manner. You see what this really does.
3434 * We have to guarantee mem->res.limit < mem->memsw.limit.
3435 */
3436 mutex_lock(&set_limit_mutex);
3437 memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
3438 if (memswlimit < val) {
3439 ret = -EINVAL;
3440 mutex_unlock(&set_limit_mutex);
3441 break;
3442 }
3443
3444 memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
3445 if (memlimit < val)
3446 enlarge = 1;
3447
3448 ret = res_counter_set_limit(&memcg->res, val);
3449 if (!ret) {
3450 if (memswlimit == val)
3451 memcg->memsw_is_minimum = true;
3452 else
3453 memcg->memsw_is_minimum = false;
3454 }
3455 mutex_unlock(&set_limit_mutex);
3456
3457 if (!ret)
3458 break;
3459
3460 mem_cgroup_hierarchical_reclaim(memcg, NULL, GFP_KERNEL,
3461 MEM_CGROUP_RECLAIM_SHRINK,
3462 NULL);
3463 curusage = res_counter_read_u64(&memcg->res, RES_USAGE);
3464 /* Usage is reduced ? */
3465 if (curusage >= oldusage)
3466 retry_count--;
3467 else
3468 oldusage = curusage;
3469 }
3470 if (!ret && enlarge)
3471 memcg_oom_recover(memcg);
3472
3473 return ret;
3474}
3475
3476static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
3477 unsigned long long val)
3478{
3479 int retry_count;
3480 u64 memlimit, memswlimit, oldusage, curusage;
3481 int children = mem_cgroup_count_children(memcg);
3482 int ret = -EBUSY;
3483 int enlarge = 0;
3484
3485 /* see mem_cgroup_resize_res_limit */
3486 retry_count = children * MEM_CGROUP_RECLAIM_RETRIES;
3487 oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
3488 while (retry_count) {
3489 if (signal_pending(current)) {
3490 ret = -EINTR;
3491 break;
3492 }
3493 /*
3494 * Rather than hide all in some function, I do this in
3495 * open coded manner. You see what this really does.
3496 * We have to guarantee mem->res.limit < mem->memsw.limit.
3497 */
3498 mutex_lock(&set_limit_mutex);
3499 memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
3500 if (memlimit > val) {
3501 ret = -EINVAL;
3502 mutex_unlock(&set_limit_mutex);
3503 break;
3504 }
3505 memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
3506 if (memswlimit < val)
3507 enlarge = 1;
3508 ret = res_counter_set_limit(&memcg->memsw, val);
3509 if (!ret) {
3510 if (memlimit == val)
3511 memcg->memsw_is_minimum = true;
3512 else
3513 memcg->memsw_is_minimum = false;
3514 }
3515 mutex_unlock(&set_limit_mutex);
3516
3517 if (!ret)
3518 break;
3519
3520 mem_cgroup_hierarchical_reclaim(memcg, NULL, GFP_KERNEL,
3521 MEM_CGROUP_RECLAIM_NOSWAP |
3522 MEM_CGROUP_RECLAIM_SHRINK,
3523 NULL);
3524 curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
3525 /* Usage is reduced ? */
3526 if (curusage >= oldusage)
3527 retry_count--;
3528 else
3529 oldusage = curusage;
3530 }
3531 if (!ret && enlarge)
3532 memcg_oom_recover(memcg);
3533 return ret;
3534}
3535
3536unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
3537 gfp_t gfp_mask,
3538 unsigned long *total_scanned)
3539{
3540 unsigned long nr_reclaimed = 0;
3541 struct mem_cgroup_per_zone *mz, *next_mz = NULL;
3542 unsigned long reclaimed;
3543 int loop = 0;
3544 struct mem_cgroup_tree_per_zone *mctz;
3545 unsigned long long excess;
3546 unsigned long nr_scanned;
3547
3548 if (order > 0)
3549 return 0;
3550
3551 mctz = soft_limit_tree_node_zone(zone_to_nid(zone), zone_idx(zone));
3552 /*
3553 * This loop can run a while, specially if mem_cgroup's continuously
3554 * keep exceeding their soft limit and putting the system under
3555 * pressure
3556 */
3557 do {
3558 if (next_mz)
3559 mz = next_mz;
3560 else
3561 mz = mem_cgroup_largest_soft_limit_node(mctz);
3562 if (!mz)
3563 break;
3564
3565 nr_scanned = 0;
3566 reclaimed = mem_cgroup_hierarchical_reclaim(mz->mem, zone,
3567 gfp_mask,
3568 MEM_CGROUP_RECLAIM_SOFT,
3569 &nr_scanned);
3570 nr_reclaimed += reclaimed;
3571 *total_scanned += nr_scanned;
3572 spin_lock(&mctz->lock);
3573
3574 /*
3575 * If we failed to reclaim anything from this memory cgroup
3576 * it is time to move on to the next cgroup
3577 */
3578 next_mz = NULL;
3579 if (!reclaimed) {
3580 do {
3581 /*
3582 * Loop until we find yet another one.
3583 *
3584 * By the time we get the soft_limit lock
3585 * again, someone might have aded the
3586 * group back on the RB tree. Iterate to
3587 * make sure we get a different mem.
3588 * mem_cgroup_largest_soft_limit_node returns
3589 * NULL if no other cgroup is present on
3590 * the tree
3591 */
3592 next_mz =
3593 __mem_cgroup_largest_soft_limit_node(mctz);
3594 if (next_mz == mz)
3595 css_put(&next_mz->mem->css);
3596 else /* next_mz == NULL or other memcg */
3597 break;
3598 } while (1);
3599 }
3600 __mem_cgroup_remove_exceeded(mz->mem, mz, mctz);
3601 excess = res_counter_soft_limit_excess(&mz->mem->res);
3602 /*
3603 * One school of thought says that we should not add
3604 * back the node to the tree if reclaim returns 0.
3605 * But our reclaim could return 0, simply because due
3606 * to priority we are exposing a smaller subset of
3607 * memory to reclaim from. Consider this as a longer
3608 * term TODO.
3609 */
3610 /* If excess == 0, no tree ops */
3611 __mem_cgroup_insert_exceeded(mz->mem, mz, mctz, excess);
3612 spin_unlock(&mctz->lock);
3613 css_put(&mz->mem->css);
3614 loop++;
3615 /*
3616 * Could not reclaim anything and there are no more
3617 * mem cgroups to try or we seem to be looping without
3618 * reclaiming anything.
3619 */
3620 if (!nr_reclaimed &&
3621 (next_mz == NULL ||
3622 loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
3623 break;
3624 } while (!nr_reclaimed);
3625 if (next_mz)
3626 css_put(&next_mz->mem->css);
3627 return nr_reclaimed;
3628}
3629
3630/*
3631 * This routine traverse page_cgroup in given list and drop them all.
3632 * *And* this routine doesn't reclaim page itself, just removes page_cgroup.
3633 */
3634static int mem_cgroup_force_empty_list(struct mem_cgroup *mem,
3635 int node, int zid, enum lru_list lru)
3636{
3637 struct zone *zone;
3638 struct mem_cgroup_per_zone *mz;
3639 struct page_cgroup *pc, *busy;
3640 unsigned long flags, loop;
3641 struct list_head *list;
3642 int ret = 0;
3643
3644 zone = &NODE_DATA(node)->node_zones[zid];
3645 mz = mem_cgroup_zoneinfo(mem, node, zid);
3646 list = &mz->lists[lru];
3647
3648 loop = MEM_CGROUP_ZSTAT(mz, lru);
3649 /* give some margin against EBUSY etc...*/
3650 loop += 256;
3651 busy = NULL;
3652 while (loop--) {
3653 struct page *page;
3654
3655 ret = 0;
3656 spin_lock_irqsave(&zone->lru_lock, flags);
3657 if (list_empty(list)) {
3658 spin_unlock_irqrestore(&zone->lru_lock, flags);
3659 break;
3660 }
3661 pc = list_entry(list->prev, struct page_cgroup, lru);
3662 if (busy == pc) {
3663 list_move(&pc->lru, list);
3664 busy = NULL;
3665 spin_unlock_irqrestore(&zone->lru_lock, flags);
3666 continue;
3667 }
3668 spin_unlock_irqrestore(&zone->lru_lock, flags);
3669
3670 page = lookup_cgroup_page(pc);
3671
3672 ret = mem_cgroup_move_parent(page, pc, mem, GFP_KERNEL);
3673 if (ret == -ENOMEM)
3674 break;
3675
3676 if (ret == -EBUSY || ret == -EINVAL) {
3677 /* found lock contention or "pc" is obsolete. */
3678 busy = pc;
3679 cond_resched();
3680 } else
3681 busy = NULL;
3682 }
3683
3684 if (!ret && !list_empty(list))
3685 return -EBUSY;
3686 return ret;
3687}
3688
3689/*
3690 * make mem_cgroup's charge to be 0 if there is no task.
3691 * This enables deleting this mem_cgroup.
3692 */
3693static int mem_cgroup_force_empty(struct mem_cgroup *mem, bool free_all)
3694{
3695 int ret;
3696 int node, zid, shrink;
3697 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
3698 struct cgroup *cgrp = mem->css.cgroup;
3699
3700 css_get(&mem->css);
3701
3702 shrink = 0;
3703 /* should free all ? */
3704 if (free_all)
3705 goto try_to_free;
3706move_account:
3707 do {
3708 ret = -EBUSY;
3709 if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children))
3710 goto out;
3711 ret = -EINTR;
3712 if (signal_pending(current))
3713 goto out;
3714 /* This is for making all *used* pages to be on LRU. */
3715 lru_add_drain_all();
3716 drain_all_stock_sync(mem);
3717 ret = 0;
3718 mem_cgroup_start_move(mem);
3719 for_each_node_state(node, N_HIGH_MEMORY) {
3720 for (zid = 0; !ret && zid < MAX_NR_ZONES; zid++) {
3721 enum lru_list l;
3722 for_each_lru(l) {
3723 ret = mem_cgroup_force_empty_list(mem,
3724 node, zid, l);
3725 if (ret)
3726 break;
3727 }
3728 }
3729 if (ret)
3730 break;
3731 }
3732 mem_cgroup_end_move(mem);
3733 memcg_oom_recover(mem);
3734 /* it seems parent cgroup doesn't have enough mem */
3735 if (ret == -ENOMEM)
3736 goto try_to_free;
3737 cond_resched();
3738 /* "ret" should also be checked to ensure all lists are empty. */
3739 } while (mem->res.usage > 0 || ret);
3740out:
3741 css_put(&mem->css);
3742 return ret;
3743
3744try_to_free:
3745 /* returns EBUSY if there is a task or if we come here twice. */
3746 if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children) || shrink) {
3747 ret = -EBUSY;
3748 goto out;
3749 }
3750 /* we call try-to-free pages for make this cgroup empty */
3751 lru_add_drain_all();
3752 /* try to free all pages in this cgroup */
3753 shrink = 1;
3754 while (nr_retries && mem->res.usage > 0) {
3755 int progress;
3756
3757 if (signal_pending(current)) {
3758 ret = -EINTR;
3759 goto out;
3760 }
3761 progress = try_to_free_mem_cgroup_pages(mem, GFP_KERNEL,
3762 false);
3763 if (!progress) {
3764 nr_retries--;
3765 /* maybe some writeback is necessary */
3766 congestion_wait(BLK_RW_ASYNC, HZ/10);
3767 }
3768
3769 }
3770 lru_add_drain();
3771 /* try move_account...there may be some *locked* pages. */
3772 goto move_account;
3773}
3774
3775int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event)
3776{
3777 return mem_cgroup_force_empty(mem_cgroup_from_cont(cont), true);
3778}
3779
3780
3781static u64 mem_cgroup_hierarchy_read(struct cgroup *cont, struct cftype *cft)
3782{
3783 return mem_cgroup_from_cont(cont)->use_hierarchy;
3784}
3785
3786static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft,
3787 u64 val)
3788{
3789 int retval = 0;
3790 struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
3791 struct cgroup *parent = cont->parent;
3792 struct mem_cgroup *parent_mem = NULL;
3793
3794 if (parent)
3795 parent_mem = mem_cgroup_from_cont(parent);
3796
3797 cgroup_lock();
3798 /*
3799 * If parent's use_hierarchy is set, we can't make any modifications
3800 * in the child subtrees. If it is unset, then the change can
3801 * occur, provided the current cgroup has no children.
3802 *
3803 * For the root cgroup, parent_mem is NULL, we allow value to be
3804 * set if there are no children.
3805 */
3806 if ((!parent_mem || !parent_mem->use_hierarchy) &&
3807 (val == 1 || val == 0)) {
3808 if (list_empty(&cont->children))
3809 mem->use_hierarchy = val;
3810 else
3811 retval = -EBUSY;
3812 } else
3813 retval = -EINVAL;
3814 cgroup_unlock();
3815
3816 return retval;
3817}
3818
3819
3820static unsigned long mem_cgroup_recursive_stat(struct mem_cgroup *mem,
3821 enum mem_cgroup_stat_index idx)
3822{
3823 struct mem_cgroup *iter;
3824 long val = 0;
3825
3826 /* Per-cpu values can be negative, use a signed accumulator */
3827 for_each_mem_cgroup_tree(iter, mem)
3828 val += mem_cgroup_read_stat(iter, idx);
3829
3830 if (val < 0) /* race ? */
3831 val = 0;
3832 return val;
3833}
3834
3835static inline u64 mem_cgroup_usage(struct mem_cgroup *mem, bool swap)
3836{
3837 u64 val;
3838
3839 if (!mem_cgroup_is_root(mem)) {
3840 if (!swap)
3841 return res_counter_read_u64(&mem->res, RES_USAGE);
3842 else
3843 return res_counter_read_u64(&mem->memsw, RES_USAGE);
3844 }
3845
3846 val = mem_cgroup_recursive_stat(mem, MEM_CGROUP_STAT_CACHE);
3847 val += mem_cgroup_recursive_stat(mem, MEM_CGROUP_STAT_RSS);
3848
3849 if (swap)
3850 val += mem_cgroup_recursive_stat(mem, MEM_CGROUP_STAT_SWAPOUT);
3851
3852 return val << PAGE_SHIFT;
3853}
3854
3855static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft)
3856{
3857 struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
3858 u64 val;
3859 int type, name;
3860
3861 type = MEMFILE_TYPE(cft->private);
3862 name = MEMFILE_ATTR(cft->private);
3863 switch (type) {
3864 case _MEM:
3865 if (name == RES_USAGE)
3866 val = mem_cgroup_usage(mem, false);
3867 else
3868 val = res_counter_read_u64(&mem->res, name);
3869 break;
3870 case _MEMSWAP:
3871 if (name == RES_USAGE)
3872 val = mem_cgroup_usage(mem, true);
3873 else
3874 val = res_counter_read_u64(&mem->memsw, name);
3875 break;
3876 default:
3877 BUG();
3878 break;
3879 }
3880 return val;
3881}
3882/*
3883 * The user of this function is...
3884 * RES_LIMIT.
3885 */
3886static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
3887 const char *buffer)
3888{
3889 struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
3890 int type, name;
3891 unsigned long long val;
3892 int ret;
3893
3894 type = MEMFILE_TYPE(cft->private);
3895 name = MEMFILE_ATTR(cft->private);
3896 switch (name) {
3897 case RES_LIMIT:
3898 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
3899 ret = -EINVAL;
3900 break;
3901 }
3902 /* This function does all necessary parse...reuse it */
3903 ret = res_counter_memparse_write_strategy(buffer, &val);
3904 if (ret)
3905 break;
3906 if (type == _MEM)
3907 ret = mem_cgroup_resize_limit(memcg, val);
3908 else
3909 ret = mem_cgroup_resize_memsw_limit(memcg, val);
3910 break;
3911 case RES_SOFT_LIMIT:
3912 ret = res_counter_memparse_write_strategy(buffer, &val);
3913 if (ret)
3914 break;
3915 /*
3916 * For memsw, soft limits are hard to implement in terms
3917 * of semantics, for now, we support soft limits for
3918 * control without swap
3919 */
3920 if (type == _MEM)
3921 ret = res_counter_set_soft_limit(&memcg->res, val);
3922 else
3923 ret = -EINVAL;
3924 break;
3925 default:
3926 ret = -EINVAL; /* should be BUG() ? */
3927 break;
3928 }
3929 return ret;
3930}
3931
3932static void memcg_get_hierarchical_limit(struct mem_cgroup *memcg,
3933 unsigned long long *mem_limit, unsigned long long *memsw_limit)
3934{
3935 struct cgroup *cgroup;
3936 unsigned long long min_limit, min_memsw_limit, tmp;
3937
3938 min_limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
3939 min_memsw_limit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
3940 cgroup = memcg->css.cgroup;
3941 if (!memcg->use_hierarchy)
3942 goto out;
3943
3944 while (cgroup->parent) {
3945 cgroup = cgroup->parent;
3946 memcg = mem_cgroup_from_cont(cgroup);
3947 if (!memcg->use_hierarchy)
3948 break;
3949 tmp = res_counter_read_u64(&memcg->res, RES_LIMIT);
3950 min_limit = min(min_limit, tmp);
3951 tmp = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
3952 min_memsw_limit = min(min_memsw_limit, tmp);
3953 }
3954out:
3955 *mem_limit = min_limit;
3956 *memsw_limit = min_memsw_limit;
3957 return;
3958}
3959
3960static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
3961{
3962 struct mem_cgroup *mem;
3963 int type, name;
3964
3965 mem = mem_cgroup_from_cont(cont);
3966 type = MEMFILE_TYPE(event);
3967 name = MEMFILE_ATTR(event);
3968 switch (name) {
3969 case RES_MAX_USAGE:
3970 if (type == _MEM)
3971 res_counter_reset_max(&mem->res);
3972 else
3973 res_counter_reset_max(&mem->memsw);
3974 break;
3975 case RES_FAILCNT:
3976 if (type == _MEM)
3977 res_counter_reset_failcnt(&mem->res);
3978 else
3979 res_counter_reset_failcnt(&mem->memsw);
3980 break;
3981 }
3982
3983 return 0;
3984}
3985
3986static u64 mem_cgroup_move_charge_read(struct cgroup *cgrp,
3987 struct cftype *cft)
3988{
3989 return mem_cgroup_from_cont(cgrp)->move_charge_at_immigrate;
3990}
3991
3992#ifdef CONFIG_MMU
3993static int mem_cgroup_move_charge_write(struct cgroup *cgrp,
3994 struct cftype *cft, u64 val)
3995{
3996 struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
3997
3998 if (val >= (1 << NR_MOVE_TYPE))
3999 return -EINVAL;
4000 /*
4001 * We check this value several times in both in can_attach() and
4002 * attach(), so we need cgroup lock to prevent this value from being
4003 * inconsistent.
4004 */
4005 cgroup_lock();
4006 mem->move_charge_at_immigrate = val;
4007 cgroup_unlock();
4008
4009 return 0;
4010}
4011#else
4012static int mem_cgroup_move_charge_write(struct cgroup *cgrp,
4013 struct cftype *cft, u64 val)
4014{
4015 return -ENOSYS;
4016}
4017#endif
4018
4019
4020/* For read statistics */
4021enum {
4022 MCS_CACHE,
4023 MCS_RSS,
4024 MCS_FILE_MAPPED,
4025 MCS_PGPGIN,
4026 MCS_PGPGOUT,
4027 MCS_SWAP,
4028 MCS_PGFAULT,
4029 MCS_PGMAJFAULT,
4030 MCS_INACTIVE_ANON,
4031 MCS_ACTIVE_ANON,
4032 MCS_INACTIVE_FILE,
4033 MCS_ACTIVE_FILE,
4034 MCS_UNEVICTABLE,
4035 NR_MCS_STAT,
4036};
4037
4038struct mcs_total_stat {
4039 s64 stat[NR_MCS_STAT];
4040};
4041
4042struct {
4043 char *local_name;
4044 char *total_name;
4045} memcg_stat_strings[NR_MCS_STAT] = {
4046 {"cache", "total_cache"},
4047 {"rss", "total_rss"},
4048 {"mapped_file", "total_mapped_file"},
4049 {"pgpgin", "total_pgpgin"},
4050 {"pgpgout", "total_pgpgout"},
4051 {"swap", "total_swap"},
4052 {"pgfault", "total_pgfault"},
4053 {"pgmajfault", "total_pgmajfault"},
4054 {"inactive_anon", "total_inactive_anon"},
4055 {"active_anon", "total_active_anon"},
4056 {"inactive_file", "total_inactive_file"},
4057 {"active_file", "total_active_file"},
4058 {"unevictable", "total_unevictable"}
4059};
4060
4061
4062static void
4063mem_cgroup_get_local_stat(struct mem_cgroup *mem, struct mcs_total_stat *s)
4064{
4065 s64 val;
4066
4067 /* per cpu stat */
4068 val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_CACHE);
4069 s->stat[MCS_CACHE] += val * PAGE_SIZE;
4070 val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_RSS);
4071 s->stat[MCS_RSS] += val * PAGE_SIZE;
4072 val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_FILE_MAPPED);
4073 s->stat[MCS_FILE_MAPPED] += val * PAGE_SIZE;
4074 val = mem_cgroup_read_events(mem, MEM_CGROUP_EVENTS_PGPGIN);
4075 s->stat[MCS_PGPGIN] += val;
4076 val = mem_cgroup_read_events(mem, MEM_CGROUP_EVENTS_PGPGOUT);
4077 s->stat[MCS_PGPGOUT] += val;
4078 if (do_swap_account) {
4079 val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_SWAPOUT);
4080 s->stat[MCS_SWAP] += val * PAGE_SIZE;
4081 }
4082 val = mem_cgroup_read_events(mem, MEM_CGROUP_EVENTS_PGFAULT);
4083 s->stat[MCS_PGFAULT] += val;
4084 val = mem_cgroup_read_events(mem, MEM_CGROUP_EVENTS_PGMAJFAULT);
4085 s->stat[MCS_PGMAJFAULT] += val;
4086
4087 /* per zone stat */
4088 val = mem_cgroup_nr_lru_pages(mem, BIT(LRU_INACTIVE_ANON));
4089 s->stat[MCS_INACTIVE_ANON] += val * PAGE_SIZE;
4090 val = mem_cgroup_nr_lru_pages(mem, BIT(LRU_ACTIVE_ANON));
4091 s->stat[MCS_ACTIVE_ANON] += val * PAGE_SIZE;
4092 val = mem_cgroup_nr_lru_pages(mem, BIT(LRU_INACTIVE_FILE));
4093 s->stat[MCS_INACTIVE_FILE] += val * PAGE_SIZE;
4094 val = mem_cgroup_nr_lru_pages(mem, BIT(LRU_ACTIVE_FILE));
4095 s->stat[MCS_ACTIVE_FILE] += val * PAGE_SIZE;
4096 val = mem_cgroup_nr_lru_pages(mem, BIT(LRU_UNEVICTABLE));
4097 s->stat[MCS_UNEVICTABLE] += val * PAGE_SIZE;
4098}
4099
4100static void
4101mem_cgroup_get_total_stat(struct mem_cgroup *mem, struct mcs_total_stat *s)
4102{
4103 struct mem_cgroup *iter;
4104
4105 for_each_mem_cgroup_tree(iter, mem)
4106 mem_cgroup_get_local_stat(iter, s);
4107}
4108
4109#ifdef CONFIG_NUMA
4110static int mem_control_numa_stat_show(struct seq_file *m, void *arg)
4111{
4112 int nid;
4113 unsigned long total_nr, file_nr, anon_nr, unevictable_nr;
4114 unsigned long node_nr;
4115 struct cgroup *cont = m->private;
4116 struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont);
4117
4118 total_nr = mem_cgroup_nr_lru_pages(mem_cont, LRU_ALL);
4119 seq_printf(m, "total=%lu", total_nr);
4120 for_each_node_state(nid, N_HIGH_MEMORY) {
4121 node_nr = mem_cgroup_node_nr_lru_pages(mem_cont, nid, LRU_ALL);
4122 seq_printf(m, " N%d=%lu", nid, node_nr);
4123 }
4124 seq_putc(m, '\n');
4125
4126 file_nr = mem_cgroup_nr_lru_pages(mem_cont, LRU_ALL_FILE);
4127 seq_printf(m, "file=%lu", file_nr);
4128 for_each_node_state(nid, N_HIGH_MEMORY) {
4129 node_nr = mem_cgroup_node_nr_lru_pages(mem_cont, nid,
4130 LRU_ALL_FILE);
4131 seq_printf(m, " N%d=%lu", nid, node_nr);
4132 }
4133 seq_putc(m, '\n');
4134
4135 anon_nr = mem_cgroup_nr_lru_pages(mem_cont, LRU_ALL_ANON);
4136 seq_printf(m, "anon=%lu", anon_nr);
4137 for_each_node_state(nid, N_HIGH_MEMORY) {
4138 node_nr = mem_cgroup_node_nr_lru_pages(mem_cont, nid,
4139 LRU_ALL_ANON);
4140 seq_printf(m, " N%d=%lu", nid, node_nr);
4141 }
4142 seq_putc(m, '\n');
4143
4144 unevictable_nr = mem_cgroup_nr_lru_pages(mem_cont, BIT(LRU_UNEVICTABLE));
4145 seq_printf(m, "unevictable=%lu", unevictable_nr);
4146 for_each_node_state(nid, N_HIGH_MEMORY) {
4147 node_nr = mem_cgroup_node_nr_lru_pages(mem_cont, nid,
4148 BIT(LRU_UNEVICTABLE));
4149 seq_printf(m, " N%d=%lu", nid, node_nr);
4150 }
4151 seq_putc(m, '\n');
4152 return 0;
4153}
4154#endif /* CONFIG_NUMA */
4155
4156static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
4157 struct cgroup_map_cb *cb)
4158{
4159 struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont);
4160 struct mcs_total_stat mystat;
4161 int i;
4162
4163 memset(&mystat, 0, sizeof(mystat));
4164 mem_cgroup_get_local_stat(mem_cont, &mystat);
4165
4166
4167 for (i = 0; i < NR_MCS_STAT; i++) {
4168 if (i == MCS_SWAP && !do_swap_account)
4169 continue;
4170 cb->fill(cb, memcg_stat_strings[i].local_name, mystat.stat[i]);
4171 }
4172
4173 /* Hierarchical information */
4174 {
4175 unsigned long long limit, memsw_limit;
4176 memcg_get_hierarchical_limit(mem_cont, &limit, &memsw_limit);
4177 cb->fill(cb, "hierarchical_memory_limit", limit);
4178 if (do_swap_account)
4179 cb->fill(cb, "hierarchical_memsw_limit", memsw_limit);
4180 }
4181
4182 memset(&mystat, 0, sizeof(mystat));
4183 mem_cgroup_get_total_stat(mem_cont, &mystat);
4184 for (i = 0; i < NR_MCS_STAT; i++) {
4185 if (i == MCS_SWAP && !do_swap_account)
4186 continue;
4187 cb->fill(cb, memcg_stat_strings[i].total_name, mystat.stat[i]);
4188 }
4189
4190#ifdef CONFIG_DEBUG_VM
4191 cb->fill(cb, "inactive_ratio", calc_inactive_ratio(mem_cont, NULL));
4192
4193 {
4194 int nid, zid;
4195 struct mem_cgroup_per_zone *mz;
4196 unsigned long recent_rotated[2] = {0, 0};
4197 unsigned long recent_scanned[2] = {0, 0};
4198
4199 for_each_online_node(nid)
4200 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
4201 mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
4202
4203 recent_rotated[0] +=
4204 mz->reclaim_stat.recent_rotated[0];
4205 recent_rotated[1] +=
4206 mz->reclaim_stat.recent_rotated[1];
4207 recent_scanned[0] +=
4208 mz->reclaim_stat.recent_scanned[0];
4209 recent_scanned[1] +=
4210 mz->reclaim_stat.recent_scanned[1];
4211 }
4212 cb->fill(cb, "recent_rotated_anon", recent_rotated[0]);
4213 cb->fill(cb, "recent_rotated_file", recent_rotated[1]);
4214 cb->fill(cb, "recent_scanned_anon", recent_scanned[0]);
4215 cb->fill(cb, "recent_scanned_file", recent_scanned[1]);
4216 }
4217#endif
4218
4219 return 0;
4220}
4221
4222static u64 mem_cgroup_swappiness_read(struct cgroup *cgrp, struct cftype *cft)
4223{
4224 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
4225
4226 return mem_cgroup_swappiness(memcg);
4227}
4228
4229static int mem_cgroup_swappiness_write(struct cgroup *cgrp, struct cftype *cft,
4230 u64 val)
4231{
4232 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
4233 struct mem_cgroup *parent;
4234
4235 if (val > 100)
4236 return -EINVAL;
4237
4238 if (cgrp->parent == NULL)
4239 return -EINVAL;
4240
4241 parent = mem_cgroup_from_cont(cgrp->parent);
4242
4243 cgroup_lock();
4244
4245 /* If under hierarchy, only empty-root can set this value */
4246 if ((parent->use_hierarchy) ||
4247 (memcg->use_hierarchy && !list_empty(&cgrp->children))) {
4248 cgroup_unlock();
4249 return -EINVAL;
4250 }
4251
4252 memcg->swappiness = val;
4253
4254 cgroup_unlock();
4255
4256 return 0;
4257}
4258
4259static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
4260{
4261 struct mem_cgroup_threshold_ary *t;
4262 u64 usage;
4263 int i;
4264
4265 rcu_read_lock();
4266 if (!swap)
4267 t = rcu_dereference(memcg->thresholds.primary);
4268 else
4269 t = rcu_dereference(memcg->memsw_thresholds.primary);
4270
4271 if (!t)
4272 goto unlock;
4273
4274 usage = mem_cgroup_usage(memcg, swap);
4275
4276 /*
4277 * current_threshold points to threshold just below usage.
4278 * If it's not true, a threshold was crossed after last
4279 * call of __mem_cgroup_threshold().
4280 */
4281 i = t->current_threshold;
4282
4283 /*
4284 * Iterate backward over array of thresholds starting from
4285 * current_threshold and check if a threshold is crossed.
4286 * If none of thresholds below usage is crossed, we read
4287 * only one element of the array here.
4288 */
4289 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
4290 eventfd_signal(t->entries[i].eventfd, 1);
4291
4292 /* i = current_threshold + 1 */
4293 i++;
4294
4295 /*
4296 * Iterate forward over array of thresholds starting from
4297 * current_threshold+1 and check if a threshold is crossed.
4298 * If none of thresholds above usage is crossed, we read
4299 * only one element of the array here.
4300 */
4301 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
4302 eventfd_signal(t->entries[i].eventfd, 1);
4303
4304 /* Update current_threshold */
4305 t->current_threshold = i - 1;
4306unlock:
4307 rcu_read_unlock();
4308}
4309
4310static void mem_cgroup_threshold(struct mem_cgroup *memcg)
4311{
4312 while (memcg) {
4313 __mem_cgroup_threshold(memcg, false);
4314 if (do_swap_account)
4315 __mem_cgroup_threshold(memcg, true);
4316
4317 memcg = parent_mem_cgroup(memcg);
4318 }
4319}
4320
4321static int compare_thresholds(const void *a, const void *b)
4322{
4323 const struct mem_cgroup_threshold *_a = a;
4324 const struct mem_cgroup_threshold *_b = b;
4325
4326 return _a->threshold - _b->threshold;
4327}
4328
4329static int mem_cgroup_oom_notify_cb(struct mem_cgroup *mem)
4330{
4331 struct mem_cgroup_eventfd_list *ev;
4332
4333 list_for_each_entry(ev, &mem->oom_notify, list)
4334 eventfd_signal(ev->eventfd, 1);
4335 return 0;
4336}
4337
4338static void mem_cgroup_oom_notify(struct mem_cgroup *mem)
4339{
4340 struct mem_cgroup *iter;
4341
4342 for_each_mem_cgroup_tree(iter, mem)
4343 mem_cgroup_oom_notify_cb(iter);
4344}
4345
4346static int mem_cgroup_usage_register_event(struct cgroup *cgrp,
4347 struct cftype *cft, struct eventfd_ctx *eventfd, const char *args)
4348{
4349 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
4350 struct mem_cgroup_thresholds *thresholds;
4351 struct mem_cgroup_threshold_ary *new;
4352 int type = MEMFILE_TYPE(cft->private);
4353 u64 threshold, usage;
4354 int i, size, ret;
4355
4356 ret = res_counter_memparse_write_strategy(args, &threshold);
4357 if (ret)
4358 return ret;
4359
4360 mutex_lock(&memcg->thresholds_lock);
4361
4362 if (type == _MEM)
4363 thresholds = &memcg->thresholds;
4364 else if (type == _MEMSWAP)
4365 thresholds = &memcg->memsw_thresholds;
4366 else
4367 BUG();
4368
4369 usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
4370
4371 /* Check if a threshold crossed before adding a new one */
4372 if (thresholds->primary)
4373 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
4374
4375 size = thresholds->primary ? thresholds->primary->size + 1 : 1;
4376
4377 /* Allocate memory for new array of thresholds */
4378 new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold),
4379 GFP_KERNEL);
4380 if (!new) {
4381 ret = -ENOMEM;
4382 goto unlock;
4383 }
4384 new->size = size;
4385
4386 /* Copy thresholds (if any) to new array */
4387 if (thresholds->primary) {
4388 memcpy(new->entries, thresholds->primary->entries, (size - 1) *
4389 sizeof(struct mem_cgroup_threshold));
4390 }
4391
4392 /* Add new threshold */
4393 new->entries[size - 1].eventfd = eventfd;
4394 new->entries[size - 1].threshold = threshold;
4395
4396 /* Sort thresholds. Registering of new threshold isn't time-critical */
4397 sort(new->entries, size, sizeof(struct mem_cgroup_threshold),
4398 compare_thresholds, NULL);
4399
4400 /* Find current threshold */
4401 new->current_threshold = -1;
4402 for (i = 0; i < size; i++) {
4403 if (new->entries[i].threshold < usage) {
4404 /*
4405 * new->current_threshold will not be used until
4406 * rcu_assign_pointer(), so it's safe to increment
4407 * it here.
4408 */
4409 ++new->current_threshold;
4410 }
4411 }
4412
4413 /* Free old spare buffer and save old primary buffer as spare */
4414 kfree(thresholds->spare);
4415 thresholds->spare = thresholds->primary;
4416
4417 rcu_assign_pointer(thresholds->primary, new);
4418
4419 /* To be sure that nobody uses thresholds */
4420 synchronize_rcu();
4421
4422unlock:
4423 mutex_unlock(&memcg->thresholds_lock);
4424
4425 return ret;
4426}
4427
4428static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
4429 struct cftype *cft, struct eventfd_ctx *eventfd)
4430{
4431 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
4432 struct mem_cgroup_thresholds *thresholds;
4433 struct mem_cgroup_threshold_ary *new;
4434 int type = MEMFILE_TYPE(cft->private);
4435 u64 usage;
4436 int i, j, size;
4437
4438 mutex_lock(&memcg->thresholds_lock);
4439 if (type == _MEM)
4440 thresholds = &memcg->thresholds;
4441 else if (type == _MEMSWAP)
4442 thresholds = &memcg->memsw_thresholds;
4443 else
4444 BUG();
4445
4446 /*
4447 * Something went wrong if we trying to unregister a threshold
4448 * if we don't have thresholds
4449 */
4450 BUG_ON(!thresholds);
4451
4452 usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
4453
4454 /* Check if a threshold crossed before removing */
4455 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
4456
4457 /* Calculate new number of threshold */
4458 size = 0;
4459 for (i = 0; i < thresholds->primary->size; i++) {
4460 if (thresholds->primary->entries[i].eventfd != eventfd)
4461 size++;
4462 }
4463
4464 new = thresholds->spare;
4465
4466 /* Set thresholds array to NULL if we don't have thresholds */
4467 if (!size) {
4468 kfree(new);
4469 new = NULL;
4470 goto swap_buffers;
4471 }
4472
4473 new->size = size;
4474
4475 /* Copy thresholds and find current threshold */
4476 new->current_threshold = -1;
4477 for (i = 0, j = 0; i < thresholds->primary->size; i++) {
4478 if (thresholds->primary->entries[i].eventfd == eventfd)
4479 continue;
4480
4481 new->entries[j] = thresholds->primary->entries[i];
4482 if (new->entries[j].threshold < usage) {
4483 /*
4484 * new->current_threshold will not be used
4485 * until rcu_assign_pointer(), so it's safe to increment
4486 * it here.
4487 */
4488 ++new->current_threshold;
4489 }
4490 j++;
4491 }
4492
4493swap_buffers:
4494 /* Swap primary and spare array */
4495 thresholds->spare = thresholds->primary;
4496 rcu_assign_pointer(thresholds->primary, new);
4497
4498 /* To be sure that nobody uses thresholds */
4499 synchronize_rcu();
4500
4501 mutex_unlock(&memcg->thresholds_lock);
4502}
4503
4504static int mem_cgroup_oom_register_event(struct cgroup *cgrp,
4505 struct cftype *cft, struct eventfd_ctx *eventfd, const char *args)
4506{
4507 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
4508 struct mem_cgroup_eventfd_list *event;
4509 int type = MEMFILE_TYPE(cft->private);
4510
4511 BUG_ON(type != _OOM_TYPE);
4512 event = kmalloc(sizeof(*event), GFP_KERNEL);
4513 if (!event)
4514 return -ENOMEM;
4515
4516 spin_lock(&memcg_oom_lock);
4517
4518 event->eventfd = eventfd;
4519 list_add(&event->list, &memcg->oom_notify);
4520
4521 /* already in OOM ? */
4522 if (atomic_read(&memcg->under_oom))
4523 eventfd_signal(eventfd, 1);
4524 spin_unlock(&memcg_oom_lock);
4525
4526 return 0;
4527}
4528
4529static void mem_cgroup_oom_unregister_event(struct cgroup *cgrp,
4530 struct cftype *cft, struct eventfd_ctx *eventfd)
4531{
4532 struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
4533 struct mem_cgroup_eventfd_list *ev, *tmp;
4534 int type = MEMFILE_TYPE(cft->private);
4535
4536 BUG_ON(type != _OOM_TYPE);
4537
4538 spin_lock(&memcg_oom_lock);
4539
4540 list_for_each_entry_safe(ev, tmp, &mem->oom_notify, list) {
4541 if (ev->eventfd == eventfd) {
4542 list_del(&ev->list);
4543 kfree(ev);
4544 }
4545 }
4546
4547 spin_unlock(&memcg_oom_lock);
4548}
4549
4550static int mem_cgroup_oom_control_read(struct cgroup *cgrp,
4551 struct cftype *cft, struct cgroup_map_cb *cb)
4552{
4553 struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
4554
4555 cb->fill(cb, "oom_kill_disable", mem->oom_kill_disable);
4556
4557 if (atomic_read(&mem->under_oom))
4558 cb->fill(cb, "under_oom", 1);
4559 else
4560 cb->fill(cb, "under_oom", 0);
4561 return 0;
4562}
4563
4564static int mem_cgroup_oom_control_write(struct cgroup *cgrp,
4565 struct cftype *cft, u64 val)
4566{
4567 struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
4568 struct mem_cgroup *parent;
4569
4570 /* cannot set to root cgroup and only 0 and 1 are allowed */
4571 if (!cgrp->parent || !((val == 0) || (val == 1)))
4572 return -EINVAL;
4573
4574 parent = mem_cgroup_from_cont(cgrp->parent);
4575
4576 cgroup_lock();
4577 /* oom-kill-disable is a flag for subhierarchy. */
4578 if ((parent->use_hierarchy) ||
4579 (mem->use_hierarchy && !list_empty(&cgrp->children))) {
4580 cgroup_unlock();
4581 return -EINVAL;
4582 }
4583 mem->oom_kill_disable = val;
4584 if (!val)
4585 memcg_oom_recover(mem);
4586 cgroup_unlock();
4587 return 0;
4588}
4589
4590#ifdef CONFIG_NUMA
4591static const struct file_operations mem_control_numa_stat_file_operations = {
4592 .read = seq_read,
4593 .llseek = seq_lseek,
4594 .release = single_release,
4595};
4596
4597static int mem_control_numa_stat_open(struct inode *unused, struct file *file)
4598{
4599 struct cgroup *cont = file->f_dentry->d_parent->d_fsdata;
4600
4601 file->f_op = &mem_control_numa_stat_file_operations;
4602 return single_open(file, mem_control_numa_stat_show, cont);
4603}
4604#endif /* CONFIG_NUMA */
4605
4606static struct cftype mem_cgroup_files[] = {
4607 {
4608 .name = "usage_in_bytes",
4609 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
4610 .read_u64 = mem_cgroup_read,
4611 .register_event = mem_cgroup_usage_register_event,
4612 .unregister_event = mem_cgroup_usage_unregister_event,
4613 },
4614 {
4615 .name = "max_usage_in_bytes",
4616 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
4617 .trigger = mem_cgroup_reset,
4618 .read_u64 = mem_cgroup_read,
4619 },
4620 {
4621 .name = "limit_in_bytes",
4622 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
4623 .write_string = mem_cgroup_write,
4624 .read_u64 = mem_cgroup_read,
4625 },
4626 {
4627 .name = "soft_limit_in_bytes",
4628 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
4629 .write_string = mem_cgroup_write,
4630 .read_u64 = mem_cgroup_read,
4631 },
4632 {
4633 .name = "failcnt",
4634 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
4635 .trigger = mem_cgroup_reset,
4636 .read_u64 = mem_cgroup_read,
4637 },
4638 {
4639 .name = "stat",
4640 .read_map = mem_control_stat_show,
4641 },
4642 {
4643 .name = "force_empty",
4644 .trigger = mem_cgroup_force_empty_write,
4645 },
4646 {
4647 .name = "use_hierarchy",
4648 .write_u64 = mem_cgroup_hierarchy_write,
4649 .read_u64 = mem_cgroup_hierarchy_read,
4650 },
4651 {
4652 .name = "swappiness",
4653 .read_u64 = mem_cgroup_swappiness_read,
4654 .write_u64 = mem_cgroup_swappiness_write,
4655 },
4656 {
4657 .name = "move_charge_at_immigrate",
4658 .read_u64 = mem_cgroup_move_charge_read,
4659 .write_u64 = mem_cgroup_move_charge_write,
4660 },
4661 {
4662 .name = "oom_control",
4663 .read_map = mem_cgroup_oom_control_read,
4664 .write_u64 = mem_cgroup_oom_control_write,
4665 .register_event = mem_cgroup_oom_register_event,
4666 .unregister_event = mem_cgroup_oom_unregister_event,
4667 .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
4668 },
4669#ifdef CONFIG_NUMA
4670 {
4671 .name = "numa_stat",
4672 .open = mem_control_numa_stat_open,
4673 .mode = S_IRUGO,
4674 },
4675#endif
4676};
4677
4678#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
4679static struct cftype memsw_cgroup_files[] = {
4680 {
4681 .name = "memsw.usage_in_bytes",
4682 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
4683 .read_u64 = mem_cgroup_read,
4684 .register_event = mem_cgroup_usage_register_event,
4685 .unregister_event = mem_cgroup_usage_unregister_event,
4686 },
4687 {
4688 .name = "memsw.max_usage_in_bytes",
4689 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
4690 .trigger = mem_cgroup_reset,
4691 .read_u64 = mem_cgroup_read,
4692 },
4693 {
4694 .name = "memsw.limit_in_bytes",
4695 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
4696 .write_string = mem_cgroup_write,
4697 .read_u64 = mem_cgroup_read,
4698 },
4699 {
4700 .name = "memsw.failcnt",
4701 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
4702 .trigger = mem_cgroup_reset,
4703 .read_u64 = mem_cgroup_read,
4704 },
4705};
4706
4707static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
4708{
4709 if (!do_swap_account)
4710 return 0;
4711 return cgroup_add_files(cont, ss, memsw_cgroup_files,
4712 ARRAY_SIZE(memsw_cgroup_files));
4713};
4714#else
4715static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
4716{
4717 return 0;
4718}
4719#endif
4720
4721static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
4722{
4723 struct mem_cgroup_per_node *pn;
4724 struct mem_cgroup_per_zone *mz;
4725 enum lru_list l;
4726 int zone, tmp = node;
4727 /*
4728 * This routine is called against possible nodes.
4729 * But it's BUG to call kmalloc() against offline node.
4730 *
4731 * TODO: this routine can waste much memory for nodes which will
4732 * never be onlined. It's better to use memory hotplug callback
4733 * function.
4734 */
4735 if (!node_state(node, N_NORMAL_MEMORY))
4736 tmp = -1;
4737 pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
4738 if (!pn)
4739 return 1;
4740
4741 mem->info.nodeinfo[node] = pn;
4742 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
4743 mz = &pn->zoneinfo[zone];
4744 for_each_lru(l)
4745 INIT_LIST_HEAD(&mz->lists[l]);
4746 mz->usage_in_excess = 0;
4747 mz->on_tree = false;
4748 mz->mem = mem;
4749 }
4750 return 0;
4751}
4752
4753static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
4754{
4755 kfree(mem->info.nodeinfo[node]);
4756}
4757
4758static struct mem_cgroup *mem_cgroup_alloc(void)
4759{
4760 struct mem_cgroup *mem;
4761 int size = sizeof(struct mem_cgroup);
4762
4763 /* Can be very big if MAX_NUMNODES is very big */
4764 if (size < PAGE_SIZE)
4765 mem = kzalloc(size, GFP_KERNEL);
4766 else
4767 mem = vzalloc(size);
4768
4769 if (!mem)
4770 return NULL;
4771
4772 mem->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
4773 if (!mem->stat)
4774 goto out_free;
4775 spin_lock_init(&mem->pcp_counter_lock);
4776 return mem;
4777
4778out_free:
4779 if (size < PAGE_SIZE)
4780 kfree(mem);
4781 else
4782 vfree(mem);
4783 return NULL;
4784}
4785
4786/*
4787 * At destroying mem_cgroup, references from swap_cgroup can remain.
4788 * (scanning all at force_empty is too costly...)
4789 *
4790 * Instead of clearing all references at force_empty, we remember
4791 * the number of reference from swap_cgroup and free mem_cgroup when
4792 * it goes down to 0.
4793 *
4794 * Removal of cgroup itself succeeds regardless of refs from swap.
4795 */
4796
4797static void __mem_cgroup_free(struct mem_cgroup *mem)
4798{
4799 int node;
4800
4801 mem_cgroup_remove_from_trees(mem);
4802 free_css_id(&mem_cgroup_subsys, &mem->css);
4803
4804 for_each_node_state(node, N_POSSIBLE)
4805 free_mem_cgroup_per_zone_info(mem, node);
4806
4807 free_percpu(mem->stat);
4808 if (sizeof(struct mem_cgroup) < PAGE_SIZE)
4809 kfree(mem);
4810 else
4811 vfree(mem);
4812}
4813
4814static void mem_cgroup_get(struct mem_cgroup *mem)
4815{
4816 atomic_inc(&mem->refcnt);
4817}
4818
4819static void __mem_cgroup_put(struct mem_cgroup *mem, int count)
4820{
4821 if (atomic_sub_and_test(count, &mem->refcnt)) {
4822 struct mem_cgroup *parent = parent_mem_cgroup(mem);
4823 __mem_cgroup_free(mem);
4824 if (parent)
4825 mem_cgroup_put(parent);
4826 }
4827}
4828
4829static void mem_cgroup_put(struct mem_cgroup *mem)
4830{
4831 __mem_cgroup_put(mem, 1);
4832}
4833
4834/*
4835 * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled.
4836 */
4837static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem)
4838{
4839 if (!mem->res.parent)
4840 return NULL;
4841 return mem_cgroup_from_res_counter(mem->res.parent, res);
4842}
4843
4844#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
4845static void __init enable_swap_cgroup(void)
4846{
4847 if (!mem_cgroup_disabled() && really_do_swap_account)
4848 do_swap_account = 1;
4849}
4850#else
4851static void __init enable_swap_cgroup(void)
4852{
4853}
4854#endif
4855
4856static int mem_cgroup_soft_limit_tree_init(void)
4857{
4858 struct mem_cgroup_tree_per_node *rtpn;
4859 struct mem_cgroup_tree_per_zone *rtpz;
4860 int tmp, node, zone;
4861
4862 for_each_node_state(node, N_POSSIBLE) {
4863 tmp = node;
4864 if (!node_state(node, N_NORMAL_MEMORY))
4865 tmp = -1;
4866 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, tmp);
4867 if (!rtpn)
4868 return 1;
4869
4870 soft_limit_tree.rb_tree_per_node[node] = rtpn;
4871
4872 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
4873 rtpz = &rtpn->rb_tree_per_zone[zone];
4874 rtpz->rb_root = RB_ROOT;
4875 spin_lock_init(&rtpz->lock);
4876 }
4877 }
4878 return 0;
4879}
4880
4881static struct cgroup_subsys_state * __ref
4882mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
4883{
4884 struct mem_cgroup *mem, *parent;
4885 long error = -ENOMEM;
4886 int node;
4887
4888 mem = mem_cgroup_alloc();
4889 if (!mem)
4890 return ERR_PTR(error);
4891
4892 for_each_node_state(node, N_POSSIBLE)
4893 if (alloc_mem_cgroup_per_zone_info(mem, node))
4894 goto free_out;
4895
4896 /* root ? */
4897 if (cont->parent == NULL) {
4898 int cpu;
4899 enable_swap_cgroup();
4900 parent = NULL;
4901 root_mem_cgroup = mem;
4902 if (mem_cgroup_soft_limit_tree_init())
4903 goto free_out;
4904 for_each_possible_cpu(cpu) {
4905 struct memcg_stock_pcp *stock =
4906 &per_cpu(memcg_stock, cpu);
4907 INIT_WORK(&stock->work, drain_local_stock);
4908 }
4909 hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
4910 } else {
4911 parent = mem_cgroup_from_cont(cont->parent);
4912 mem->use_hierarchy = parent->use_hierarchy;
4913 mem->oom_kill_disable = parent->oom_kill_disable;
4914 }
4915
4916 if (parent && parent->use_hierarchy) {
4917 res_counter_init(&mem->res, &parent->res);
4918 res_counter_init(&mem->memsw, &parent->memsw);
4919 /*
4920 * We increment refcnt of the parent to ensure that we can
4921 * safely access it on res_counter_charge/uncharge.
4922 * This refcnt will be decremented when freeing this
4923 * mem_cgroup(see mem_cgroup_put).
4924 */
4925 mem_cgroup_get(parent);
4926 } else {
4927 res_counter_init(&mem->res, NULL);
4928 res_counter_init(&mem->memsw, NULL);
4929 }
4930 mem->last_scanned_child = 0;
4931 mem->last_scanned_node = MAX_NUMNODES;
4932 INIT_LIST_HEAD(&mem->oom_notify);
4933
4934 if (parent)
4935 mem->swappiness = mem_cgroup_swappiness(parent);
4936 atomic_set(&mem->refcnt, 1);
4937 mem->move_charge_at_immigrate = 0;
4938 mutex_init(&mem->thresholds_lock);
4939 return &mem->css;
4940free_out:
4941 __mem_cgroup_free(mem);
4942 root_mem_cgroup = NULL;
4943 return ERR_PTR(error);
4944}
4945
4946static int mem_cgroup_pre_destroy(struct cgroup_subsys *ss,
4947 struct cgroup *cont)
4948{
4949 struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
4950
4951 return mem_cgroup_force_empty(mem, false);
4952}
4953
4954static void mem_cgroup_destroy(struct cgroup_subsys *ss,
4955 struct cgroup *cont)
4956{
4957 struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
4958
4959 mem_cgroup_put(mem);
4960}
4961
4962static int mem_cgroup_populate(struct cgroup_subsys *ss,
4963 struct cgroup *cont)
4964{
4965 int ret;
4966
4967 ret = cgroup_add_files(cont, ss, mem_cgroup_files,
4968 ARRAY_SIZE(mem_cgroup_files));
4969
4970 if (!ret)
4971 ret = register_memsw_files(cont, ss);
4972 return ret;
4973}
4974
4975#ifdef CONFIG_MMU
4976/* Handlers for move charge at task migration. */
4977#define PRECHARGE_COUNT_AT_ONCE 256
4978static int mem_cgroup_do_precharge(unsigned long count)
4979{
4980 int ret = 0;
4981 int batch_count = PRECHARGE_COUNT_AT_ONCE;
4982 struct mem_cgroup *mem = mc.to;
4983
4984 if (mem_cgroup_is_root(mem)) {
4985 mc.precharge += count;
4986 /* we don't need css_get for root */
4987 return ret;
4988 }
4989 /* try to charge at once */
4990 if (count > 1) {
4991 struct res_counter *dummy;
4992 /*
4993 * "mem" cannot be under rmdir() because we've already checked
4994 * by cgroup_lock_live_cgroup() that it is not removed and we
4995 * are still under the same cgroup_mutex. So we can postpone
4996 * css_get().
4997 */
4998 if (res_counter_charge(&mem->res, PAGE_SIZE * count, &dummy))
4999 goto one_by_one;
5000 if (do_swap_account && res_counter_charge(&mem->memsw,
5001 PAGE_SIZE * count, &dummy)) {
5002 res_counter_uncharge(&mem->res, PAGE_SIZE * count);
5003 goto one_by_one;
5004 }
5005 mc.precharge += count;
5006 return ret;
5007 }
5008one_by_one:
5009 /* fall back to one by one charge */
5010 while (count--) {
5011 if (signal_pending(current)) {
5012 ret = -EINTR;
5013 break;
5014 }
5015 if (!batch_count--) {
5016 batch_count = PRECHARGE_COUNT_AT_ONCE;
5017 cond_resched();
5018 }
5019 ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, 1, &mem, false);
5020 if (ret || !mem)
5021 /* mem_cgroup_clear_mc() will do uncharge later */
5022 return -ENOMEM;
5023 mc.precharge++;
5024 }
5025 return ret;
5026}
5027
5028/**
5029 * is_target_pte_for_mc - check a pte whether it is valid for move charge
5030 * @vma: the vma the pte to be checked belongs
5031 * @addr: the address corresponding to the pte to be checked
5032 * @ptent: the pte to be checked
5033 * @target: the pointer the target page or swap ent will be stored(can be NULL)
5034 *
5035 * Returns
5036 * 0(MC_TARGET_NONE): if the pte is not a target for move charge.
5037 * 1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
5038 * move charge. if @target is not NULL, the page is stored in target->page
5039 * with extra refcnt got(Callers should handle it).
5040 * 2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
5041 * target for charge migration. if @target is not NULL, the entry is stored
5042 * in target->ent.
5043 *
5044 * Called with pte lock held.
5045 */
5046union mc_target {
5047 struct page *page;
5048 swp_entry_t ent;
5049};
5050
5051enum mc_target_type {
5052 MC_TARGET_NONE, /* not used */
5053 MC_TARGET_PAGE,
5054 MC_TARGET_SWAP,
5055};
5056
5057static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
5058 unsigned long addr, pte_t ptent)
5059{
5060 struct page *page = vm_normal_page(vma, addr, ptent);
5061
5062 if (!page || !page_mapped(page))
5063 return NULL;
5064 if (PageAnon(page)) {
5065 /* we don't move shared anon */
5066 if (!move_anon() || page_mapcount(page) > 2)
5067 return NULL;
5068 } else if (!move_file())
5069 /* we ignore mapcount for file pages */
5070 return NULL;
5071 if (!get_page_unless_zero(page))
5072 return NULL;
5073
5074 return page;
5075}
5076
5077static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5078 unsigned long addr, pte_t ptent, swp_entry_t *entry)
5079{
5080 int usage_count;
5081 struct page *page = NULL;
5082 swp_entry_t ent = pte_to_swp_entry(ptent);
5083
5084 if (!move_anon() || non_swap_entry(ent))
5085 return NULL;
5086 usage_count = mem_cgroup_count_swap_user(ent, &page);
5087 if (usage_count > 1) { /* we don't move shared anon */
5088 if (page)
5089 put_page(page);
5090 return NULL;
5091 }
5092 if (do_swap_account)
5093 entry->val = ent.val;
5094
5095 return page;
5096}
5097
5098static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
5099 unsigned long addr, pte_t ptent, swp_entry_t *entry)
5100{
5101 struct page *page = NULL;
5102 struct inode *inode;
5103 struct address_space *mapping;
5104 pgoff_t pgoff;
5105
5106 if (!vma->vm_file) /* anonymous vma */
5107 return NULL;
5108 if (!move_file())
5109 return NULL;
5110
5111 inode = vma->vm_file->f_path.dentry->d_inode;
5112 mapping = vma->vm_file->f_mapping;
5113 if (pte_none(ptent))
5114 pgoff = linear_page_index(vma, addr);
5115 else /* pte_file(ptent) is true */
5116 pgoff = pte_to_pgoff(ptent);
5117
5118 /* page is moved even if it's not RSS of this task(page-faulted). */
5119 page = find_get_page(mapping, pgoff);
5120
5121#ifdef CONFIG_SWAP
5122 /* shmem/tmpfs may report page out on swap: account for that too. */
5123 if (radix_tree_exceptional_entry(page)) {
5124 swp_entry_t swap = radix_to_swp_entry(page);
5125 if (do_swap_account)
5126 *entry = swap;
5127 page = find_get_page(&swapper_space, swap.val);
5128 }
5129#endif
5130 return page;
5131}
5132
5133static int is_target_pte_for_mc(struct vm_area_struct *vma,
5134 unsigned long addr, pte_t ptent, union mc_target *target)
5135{
5136 struct page *page = NULL;
5137 struct page_cgroup *pc;
5138 int ret = 0;
5139 swp_entry_t ent = { .val = 0 };
5140
5141 if (pte_present(ptent))
5142 page = mc_handle_present_pte(vma, addr, ptent);
5143 else if (is_swap_pte(ptent))
5144 page = mc_handle_swap_pte(vma, addr, ptent, &ent);
5145 else if (pte_none(ptent) || pte_file(ptent))
5146 page = mc_handle_file_pte(vma, addr, ptent, &ent);
5147
5148 if (!page && !ent.val)
5149 return 0;
5150 if (page) {
5151 pc = lookup_page_cgroup(page);
5152 /*
5153 * Do only loose check w/o page_cgroup lock.
5154 * mem_cgroup_move_account() checks the pc is valid or not under
5155 * the lock.
5156 */
5157 if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) {
5158 ret = MC_TARGET_PAGE;
5159 if (target)
5160 target->page = page;
5161 }
5162 if (!ret || !target)
5163 put_page(page);
5164 }
5165 /* There is a swap entry and a page doesn't exist or isn't charged */
5166 if (ent.val && !ret &&
5167 css_id(&mc.from->css) == lookup_swap_cgroup(ent)) {
5168 ret = MC_TARGET_SWAP;
5169 if (target)
5170 target->ent = ent;
5171 }
5172 return ret;
5173}
5174
5175static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
5176 unsigned long addr, unsigned long end,
5177 struct mm_walk *walk)
5178{
5179 struct vm_area_struct *vma = walk->private;
5180 pte_t *pte;
5181 spinlock_t *ptl;
5182
5183 split_huge_page_pmd(walk->mm, pmd);
5184
5185 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
5186 for (; addr != end; pte++, addr += PAGE_SIZE)
5187 if (is_target_pte_for_mc(vma, addr, *pte, NULL))
5188 mc.precharge++; /* increment precharge temporarily */
5189 pte_unmap_unlock(pte - 1, ptl);
5190 cond_resched();
5191
5192 return 0;
5193}
5194
5195static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
5196{
5197 unsigned long precharge;
5198 struct vm_area_struct *vma;
5199
5200 down_read(&mm->mmap_sem);
5201 for (vma = mm->mmap; vma; vma = vma->vm_next) {
5202 struct mm_walk mem_cgroup_count_precharge_walk = {
5203 .pmd_entry = mem_cgroup_count_precharge_pte_range,
5204 .mm = mm,
5205 .private = vma,
5206 };
5207 if (is_vm_hugetlb_page(vma))
5208 continue;
5209 walk_page_range(vma->vm_start, vma->vm_end,
5210 &mem_cgroup_count_precharge_walk);
5211 }
5212 up_read(&mm->mmap_sem);
5213
5214 precharge = mc.precharge;
5215 mc.precharge = 0;
5216
5217 return precharge;
5218}
5219
5220static int mem_cgroup_precharge_mc(struct mm_struct *mm)
5221{
5222 unsigned long precharge = mem_cgroup_count_precharge(mm);
5223
5224 VM_BUG_ON(mc.moving_task);
5225 mc.moving_task = current;
5226 return mem_cgroup_do_precharge(precharge);
5227}
5228
5229/* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
5230static void __mem_cgroup_clear_mc(void)
5231{
5232 struct mem_cgroup *from = mc.from;
5233 struct mem_cgroup *to = mc.to;
5234
5235 /* we must uncharge all the leftover precharges from mc.to */
5236 if (mc.precharge) {
5237 __mem_cgroup_cancel_charge(mc.to, mc.precharge);
5238 mc.precharge = 0;
5239 }
5240 /*
5241 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
5242 * we must uncharge here.
5243 */
5244 if (mc.moved_charge) {
5245 __mem_cgroup_cancel_charge(mc.from, mc.moved_charge);
5246 mc.moved_charge = 0;
5247 }
5248 /* we must fixup refcnts and charges */
5249 if (mc.moved_swap) {
5250 /* uncharge swap account from the old cgroup */
5251 if (!mem_cgroup_is_root(mc.from))
5252 res_counter_uncharge(&mc.from->memsw,
5253 PAGE_SIZE * mc.moved_swap);
5254 __mem_cgroup_put(mc.from, mc.moved_swap);
5255
5256 if (!mem_cgroup_is_root(mc.to)) {
5257 /*
5258 * we charged both to->res and to->memsw, so we should
5259 * uncharge to->res.
5260 */
5261 res_counter_uncharge(&mc.to->res,
5262 PAGE_SIZE * mc.moved_swap);
5263 }
5264 /* we've already done mem_cgroup_get(mc.to) */
5265 mc.moved_swap = 0;
5266 }
5267 memcg_oom_recover(from);
5268 memcg_oom_recover(to);
5269 wake_up_all(&mc.waitq);
5270}
5271
5272static void mem_cgroup_clear_mc(void)
5273{
5274 struct mem_cgroup *from = mc.from;
5275
5276 /*
5277 * we must clear moving_task before waking up waiters at the end of
5278 * task migration.
5279 */
5280 mc.moving_task = NULL;
5281 __mem_cgroup_clear_mc();
5282 spin_lock(&mc.lock);
5283 mc.from = NULL;
5284 mc.to = NULL;
5285 spin_unlock(&mc.lock);
5286 mem_cgroup_end_move(from);
5287}
5288
5289static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
5290 struct cgroup *cgroup,
5291 struct task_struct *p)
5292{
5293 int ret = 0;
5294 struct mem_cgroup *mem = mem_cgroup_from_cont(cgroup);
5295
5296 if (mem->move_charge_at_immigrate) {
5297 struct mm_struct *mm;
5298 struct mem_cgroup *from = mem_cgroup_from_task(p);
5299
5300 VM_BUG_ON(from == mem);
5301
5302 mm = get_task_mm(p);
5303 if (!mm)
5304 return 0;
5305 /* We move charges only when we move a owner of the mm */
5306 if (mm->owner == p) {
5307 VM_BUG_ON(mc.from);
5308 VM_BUG_ON(mc.to);
5309 VM_BUG_ON(mc.precharge);
5310 VM_BUG_ON(mc.moved_charge);
5311 VM_BUG_ON(mc.moved_swap);
5312 mem_cgroup_start_move(from);
5313 spin_lock(&mc.lock);
5314 mc.from = from;
5315 mc.to = mem;
5316 spin_unlock(&mc.lock);
5317 /* We set mc.moving_task later */
5318
5319 ret = mem_cgroup_precharge_mc(mm);
5320 if (ret)
5321 mem_cgroup_clear_mc();
5322 }
5323 mmput(mm);
5324 }
5325 return ret;
5326}
5327
5328static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss,
5329 struct cgroup *cgroup,
5330 struct task_struct *p)
5331{
5332 mem_cgroup_clear_mc();
5333}
5334
5335static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
5336 unsigned long addr, unsigned long end,
5337 struct mm_walk *walk)
5338{
5339 int ret = 0;
5340 struct vm_area_struct *vma = walk->private;
5341 pte_t *pte;
5342 spinlock_t *ptl;
5343
5344 split_huge_page_pmd(walk->mm, pmd);
5345retry:
5346 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
5347 for (; addr != end; addr += PAGE_SIZE) {
5348 pte_t ptent = *(pte++);
5349 union mc_target target;
5350 int type;
5351 struct page *page;
5352 struct page_cgroup *pc;
5353 swp_entry_t ent;
5354
5355 if (!mc.precharge)
5356 break;
5357
5358 type = is_target_pte_for_mc(vma, addr, ptent, &target);
5359 switch (type) {
5360 case MC_TARGET_PAGE:
5361 page = target.page;
5362 if (isolate_lru_page(page))
5363 goto put;
5364 pc = lookup_page_cgroup(page);
5365 if (!mem_cgroup_move_account(page, 1, pc,
5366 mc.from, mc.to, false)) {
5367 mc.precharge--;
5368 /* we uncharge from mc.from later. */
5369 mc.moved_charge++;
5370 }
5371 putback_lru_page(page);
5372put: /* is_target_pte_for_mc() gets the page */
5373 put_page(page);
5374 break;
5375 case MC_TARGET_SWAP:
5376 ent = target.ent;
5377 if (!mem_cgroup_move_swap_account(ent,
5378 mc.from, mc.to, false)) {
5379 mc.precharge--;
5380 /* we fixup refcnts and charges later. */
5381 mc.moved_swap++;
5382 }
5383 break;
5384 default:
5385 break;
5386 }
5387 }
5388 pte_unmap_unlock(pte - 1, ptl);
5389 cond_resched();
5390
5391 if (addr != end) {
5392 /*
5393 * We have consumed all precharges we got in can_attach().
5394 * We try charge one by one, but don't do any additional
5395 * charges to mc.to if we have failed in charge once in attach()
5396 * phase.
5397 */
5398 ret = mem_cgroup_do_precharge(1);
5399 if (!ret)
5400 goto retry;
5401 }
5402
5403 return ret;
5404}
5405
5406static void mem_cgroup_move_charge(struct mm_struct *mm)
5407{
5408 struct vm_area_struct *vma;
5409
5410 lru_add_drain_all();
5411retry:
5412 if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
5413 /*
5414 * Someone who are holding the mmap_sem might be waiting in
5415 * waitq. So we cancel all extra charges, wake up all waiters,
5416 * and retry. Because we cancel precharges, we might not be able
5417 * to move enough charges, but moving charge is a best-effort
5418 * feature anyway, so it wouldn't be a big problem.
5419 */
5420 __mem_cgroup_clear_mc();
5421 cond_resched();
5422 goto retry;
5423 }
5424 for (vma = mm->mmap; vma; vma = vma->vm_next) {
5425 int ret;
5426 struct mm_walk mem_cgroup_move_charge_walk = {
5427 .pmd_entry = mem_cgroup_move_charge_pte_range,
5428 .mm = mm,
5429 .private = vma,
5430 };
5431 if (is_vm_hugetlb_page(vma))
5432 continue;
5433 ret = walk_page_range(vma->vm_start, vma->vm_end,
5434 &mem_cgroup_move_charge_walk);
5435 if (ret)
5436 /*
5437 * means we have consumed all precharges and failed in
5438 * doing additional charge. Just abandon here.
5439 */
5440 break;
5441 }
5442 up_read(&mm->mmap_sem);
5443}
5444
5445static void mem_cgroup_move_task(struct cgroup_subsys *ss,
5446 struct cgroup *cont,
5447 struct cgroup *old_cont,
5448 struct task_struct *p)
5449{
5450 struct mm_struct *mm = get_task_mm(p);
5451
5452 if (mm) {
5453 if (mc.to)
5454 mem_cgroup_move_charge(mm);
5455 put_swap_token(mm);
5456 mmput(mm);
5457 }
5458 if (mc.to)
5459 mem_cgroup_clear_mc();
5460}
5461#else /* !CONFIG_MMU */
5462static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
5463 struct cgroup *cgroup,
5464 struct task_struct *p)
5465{
5466 return 0;
5467}
5468static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss,
5469 struct cgroup *cgroup,
5470 struct task_struct *p)
5471{
5472}
5473static void mem_cgroup_move_task(struct cgroup_subsys *ss,
5474 struct cgroup *cont,
5475 struct cgroup *old_cont,
5476 struct task_struct *p)
5477{
5478}
5479#endif
5480
5481struct cgroup_subsys mem_cgroup_subsys = {
5482 .name = "memory",
5483 .subsys_id = mem_cgroup_subsys_id,
5484 .create = mem_cgroup_create,
5485 .pre_destroy = mem_cgroup_pre_destroy,
5486 .destroy = mem_cgroup_destroy,
5487 .populate = mem_cgroup_populate,
5488 .can_attach = mem_cgroup_can_attach,
5489 .cancel_attach = mem_cgroup_cancel_attach,
5490 .attach = mem_cgroup_move_task,
5491 .early_init = 0,
5492 .use_id = 1,
5493};
5494
5495#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
5496static int __init enable_swap_account(char *s)
5497{
5498 /* consider enabled if no parameter or 1 is given */
5499 if (!strcmp(s, "1"))
5500 really_do_swap_account = 1;
5501 else if (!strcmp(s, "0"))
5502 really_do_swap_account = 0;
5503 return 1;
5504}
5505__setup("swapaccount=", enable_swap_account);
5506
5507#endif
1// SPDX-License-Identifier: GPL-2.0-or-later
2/* memcontrol.c - Memory Controller
3 *
4 * Copyright IBM Corporation, 2007
5 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
6 *
7 * Copyright 2007 OpenVZ SWsoft Inc
8 * Author: Pavel Emelianov <xemul@openvz.org>
9 *
10 * Memory thresholds
11 * Copyright (C) 2009 Nokia Corporation
12 * Author: Kirill A. Shutemov
13 *
14 * Kernel Memory Controller
15 * Copyright (C) 2012 Parallels Inc. and Google Inc.
16 * Authors: Glauber Costa and Suleiman Souhlal
17 *
18 * Native page reclaim
19 * Charge lifetime sanitation
20 * Lockless page tracking & accounting
21 * Unified hierarchy configuration model
22 * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner
23 */
24
25#include <linux/page_counter.h>
26#include <linux/memcontrol.h>
27#include <linux/cgroup.h>
28#include <linux/pagewalk.h>
29#include <linux/sched/mm.h>
30#include <linux/shmem_fs.h>
31#include <linux/hugetlb.h>
32#include <linux/pagemap.h>
33#include <linux/vm_event_item.h>
34#include <linux/smp.h>
35#include <linux/page-flags.h>
36#include <linux/backing-dev.h>
37#include <linux/bit_spinlock.h>
38#include <linux/rcupdate.h>
39#include <linux/limits.h>
40#include <linux/export.h>
41#include <linux/mutex.h>
42#include <linux/rbtree.h>
43#include <linux/slab.h>
44#include <linux/swap.h>
45#include <linux/swapops.h>
46#include <linux/spinlock.h>
47#include <linux/eventfd.h>
48#include <linux/poll.h>
49#include <linux/sort.h>
50#include <linux/fs.h>
51#include <linux/seq_file.h>
52#include <linux/vmpressure.h>
53#include <linux/mm_inline.h>
54#include <linux/swap_cgroup.h>
55#include <linux/cpu.h>
56#include <linux/oom.h>
57#include <linux/lockdep.h>
58#include <linux/file.h>
59#include <linux/tracehook.h>
60#include <linux/psi.h>
61#include <linux/seq_buf.h>
62#include "internal.h"
63#include <net/sock.h>
64#include <net/ip.h>
65#include "slab.h"
66
67#include <linux/uaccess.h>
68
69#include <trace/events/vmscan.h>
70
71struct cgroup_subsys memory_cgrp_subsys __read_mostly;
72EXPORT_SYMBOL(memory_cgrp_subsys);
73
74struct mem_cgroup *root_mem_cgroup __read_mostly;
75
76/* Socket memory accounting disabled? */
77static bool cgroup_memory_nosocket;
78
79/* Kernel memory accounting disabled? */
80static bool cgroup_memory_nokmem;
81
82/* Whether the swap controller is active */
83#ifdef CONFIG_MEMCG_SWAP
84bool cgroup_memory_noswap __read_mostly;
85#else
86#define cgroup_memory_noswap 1
87#endif
88
89#ifdef CONFIG_CGROUP_WRITEBACK
90static DECLARE_WAIT_QUEUE_HEAD(memcg_cgwb_frn_waitq);
91#endif
92
93/* Whether legacy memory+swap accounting is active */
94static bool do_memsw_account(void)
95{
96 return !cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_noswap;
97}
98
99#define THRESHOLDS_EVENTS_TARGET 128
100#define SOFTLIMIT_EVENTS_TARGET 1024
101
102/*
103 * Cgroups above their limits are maintained in a RB-Tree, independent of
104 * their hierarchy representation
105 */
106
107struct mem_cgroup_tree_per_node {
108 struct rb_root rb_root;
109 struct rb_node *rb_rightmost;
110 spinlock_t lock;
111};
112
113struct mem_cgroup_tree {
114 struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
115};
116
117static struct mem_cgroup_tree soft_limit_tree __read_mostly;
118
119/* for OOM */
120struct mem_cgroup_eventfd_list {
121 struct list_head list;
122 struct eventfd_ctx *eventfd;
123};
124
125/*
126 * cgroup_event represents events which userspace want to receive.
127 */
128struct mem_cgroup_event {
129 /*
130 * memcg which the event belongs to.
131 */
132 struct mem_cgroup *memcg;
133 /*
134 * eventfd to signal userspace about the event.
135 */
136 struct eventfd_ctx *eventfd;
137 /*
138 * Each of these stored in a list by the cgroup.
139 */
140 struct list_head list;
141 /*
142 * register_event() callback will be used to add new userspace
143 * waiter for changes related to this event. Use eventfd_signal()
144 * on eventfd to send notification to userspace.
145 */
146 int (*register_event)(struct mem_cgroup *memcg,
147 struct eventfd_ctx *eventfd, const char *args);
148 /*
149 * unregister_event() callback will be called when userspace closes
150 * the eventfd or on cgroup removing. This callback must be set,
151 * if you want provide notification functionality.
152 */
153 void (*unregister_event)(struct mem_cgroup *memcg,
154 struct eventfd_ctx *eventfd);
155 /*
156 * All fields below needed to unregister event when
157 * userspace closes eventfd.
158 */
159 poll_table pt;
160 wait_queue_head_t *wqh;
161 wait_queue_entry_t wait;
162 struct work_struct remove;
163};
164
165static void mem_cgroup_threshold(struct mem_cgroup *memcg);
166static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
167
168/* Stuffs for move charges at task migration. */
169/*
170 * Types of charges to be moved.
171 */
172#define MOVE_ANON 0x1U
173#define MOVE_FILE 0x2U
174#define MOVE_MASK (MOVE_ANON | MOVE_FILE)
175
176/* "mc" and its members are protected by cgroup_mutex */
177static struct move_charge_struct {
178 spinlock_t lock; /* for from, to */
179 struct mm_struct *mm;
180 struct mem_cgroup *from;
181 struct mem_cgroup *to;
182 unsigned long flags;
183 unsigned long precharge;
184 unsigned long moved_charge;
185 unsigned long moved_swap;
186 struct task_struct *moving_task; /* a task moving charges */
187 wait_queue_head_t waitq; /* a waitq for other context */
188} mc = {
189 .lock = __SPIN_LOCK_UNLOCKED(mc.lock),
190 .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
191};
192
193/*
194 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
195 * limit reclaim to prevent infinite loops, if they ever occur.
196 */
197#define MEM_CGROUP_MAX_RECLAIM_LOOPS 100
198#define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2
199
200enum charge_type {
201 MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
202 MEM_CGROUP_CHARGE_TYPE_ANON,
203 MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */
204 MEM_CGROUP_CHARGE_TYPE_DROP, /* a page was unused swap cache */
205 NR_CHARGE_TYPE,
206};
207
208/* for encoding cft->private value on file */
209enum res_type {
210 _MEM,
211 _MEMSWAP,
212 _OOM_TYPE,
213 _KMEM,
214 _TCP,
215};
216
217#define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val))
218#define MEMFILE_TYPE(val) ((val) >> 16 & 0xffff)
219#define MEMFILE_ATTR(val) ((val) & 0xffff)
220/* Used for OOM nofiier */
221#define OOM_CONTROL (0)
222
223/*
224 * Iteration constructs for visiting all cgroups (under a tree). If
225 * loops are exited prematurely (break), mem_cgroup_iter_break() must
226 * be used for reference counting.
227 */
228#define for_each_mem_cgroup_tree(iter, root) \
229 for (iter = mem_cgroup_iter(root, NULL, NULL); \
230 iter != NULL; \
231 iter = mem_cgroup_iter(root, iter, NULL))
232
233#define for_each_mem_cgroup(iter) \
234 for (iter = mem_cgroup_iter(NULL, NULL, NULL); \
235 iter != NULL; \
236 iter = mem_cgroup_iter(NULL, iter, NULL))
237
238static inline bool should_force_charge(void)
239{
240 return tsk_is_oom_victim(current) || fatal_signal_pending(current) ||
241 (current->flags & PF_EXITING);
242}
243
244/* Some nice accessors for the vmpressure. */
245struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
246{
247 if (!memcg)
248 memcg = root_mem_cgroup;
249 return &memcg->vmpressure;
250}
251
252struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr)
253{
254 return &container_of(vmpr, struct mem_cgroup, vmpressure)->css;
255}
256
257#ifdef CONFIG_MEMCG_KMEM
258extern spinlock_t css_set_lock;
259
260static void obj_cgroup_release(struct percpu_ref *ref)
261{
262 struct obj_cgroup *objcg = container_of(ref, struct obj_cgroup, refcnt);
263 struct mem_cgroup *memcg;
264 unsigned int nr_bytes;
265 unsigned int nr_pages;
266 unsigned long flags;
267
268 /*
269 * At this point all allocated objects are freed, and
270 * objcg->nr_charged_bytes can't have an arbitrary byte value.
271 * However, it can be PAGE_SIZE or (x * PAGE_SIZE).
272 *
273 * The following sequence can lead to it:
274 * 1) CPU0: objcg == stock->cached_objcg
275 * 2) CPU1: we do a small allocation (e.g. 92 bytes),
276 * PAGE_SIZE bytes are charged
277 * 3) CPU1: a process from another memcg is allocating something,
278 * the stock if flushed,
279 * objcg->nr_charged_bytes = PAGE_SIZE - 92
280 * 5) CPU0: we do release this object,
281 * 92 bytes are added to stock->nr_bytes
282 * 6) CPU0: stock is flushed,
283 * 92 bytes are added to objcg->nr_charged_bytes
284 *
285 * In the result, nr_charged_bytes == PAGE_SIZE.
286 * This page will be uncharged in obj_cgroup_release().
287 */
288 nr_bytes = atomic_read(&objcg->nr_charged_bytes);
289 WARN_ON_ONCE(nr_bytes & (PAGE_SIZE - 1));
290 nr_pages = nr_bytes >> PAGE_SHIFT;
291
292 spin_lock_irqsave(&css_set_lock, flags);
293 memcg = obj_cgroup_memcg(objcg);
294 if (nr_pages)
295 __memcg_kmem_uncharge(memcg, nr_pages);
296 list_del(&objcg->list);
297 mem_cgroup_put(memcg);
298 spin_unlock_irqrestore(&css_set_lock, flags);
299
300 percpu_ref_exit(ref);
301 kfree_rcu(objcg, rcu);
302}
303
304static struct obj_cgroup *obj_cgroup_alloc(void)
305{
306 struct obj_cgroup *objcg;
307 int ret;
308
309 objcg = kzalloc(sizeof(struct obj_cgroup), GFP_KERNEL);
310 if (!objcg)
311 return NULL;
312
313 ret = percpu_ref_init(&objcg->refcnt, obj_cgroup_release, 0,
314 GFP_KERNEL);
315 if (ret) {
316 kfree(objcg);
317 return NULL;
318 }
319 INIT_LIST_HEAD(&objcg->list);
320 return objcg;
321}
322
323static void memcg_reparent_objcgs(struct mem_cgroup *memcg,
324 struct mem_cgroup *parent)
325{
326 struct obj_cgroup *objcg, *iter;
327
328 objcg = rcu_replace_pointer(memcg->objcg, NULL, true);
329
330 spin_lock_irq(&css_set_lock);
331
332 /* Move active objcg to the parent's list */
333 xchg(&objcg->memcg, parent);
334 css_get(&parent->css);
335 list_add(&objcg->list, &parent->objcg_list);
336
337 /* Move already reparented objcgs to the parent's list */
338 list_for_each_entry(iter, &memcg->objcg_list, list) {
339 css_get(&parent->css);
340 xchg(&iter->memcg, parent);
341 css_put(&memcg->css);
342 }
343 list_splice(&memcg->objcg_list, &parent->objcg_list);
344
345 spin_unlock_irq(&css_set_lock);
346
347 percpu_ref_kill(&objcg->refcnt);
348}
349
350/*
351 * This will be used as a shrinker list's index.
352 * The main reason for not using cgroup id for this:
353 * this works better in sparse environments, where we have a lot of memcgs,
354 * but only a few kmem-limited. Or also, if we have, for instance, 200
355 * memcgs, and none but the 200th is kmem-limited, we'd have to have a
356 * 200 entry array for that.
357 *
358 * The current size of the caches array is stored in memcg_nr_cache_ids. It
359 * will double each time we have to increase it.
360 */
361static DEFINE_IDA(memcg_cache_ida);
362int memcg_nr_cache_ids;
363
364/* Protects memcg_nr_cache_ids */
365static DECLARE_RWSEM(memcg_cache_ids_sem);
366
367void memcg_get_cache_ids(void)
368{
369 down_read(&memcg_cache_ids_sem);
370}
371
372void memcg_put_cache_ids(void)
373{
374 up_read(&memcg_cache_ids_sem);
375}
376
377/*
378 * MIN_SIZE is different than 1, because we would like to avoid going through
379 * the alloc/free process all the time. In a small machine, 4 kmem-limited
380 * cgroups is a reasonable guess. In the future, it could be a parameter or
381 * tunable, but that is strictly not necessary.
382 *
383 * MAX_SIZE should be as large as the number of cgrp_ids. Ideally, we could get
384 * this constant directly from cgroup, but it is understandable that this is
385 * better kept as an internal representation in cgroup.c. In any case, the
386 * cgrp_id space is not getting any smaller, and we don't have to necessarily
387 * increase ours as well if it increases.
388 */
389#define MEMCG_CACHES_MIN_SIZE 4
390#define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX
391
392/*
393 * A lot of the calls to the cache allocation functions are expected to be
394 * inlined by the compiler. Since the calls to memcg_slab_pre_alloc_hook() are
395 * conditional to this static branch, we'll have to allow modules that does
396 * kmem_cache_alloc and the such to see this symbol as well
397 */
398DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key);
399EXPORT_SYMBOL(memcg_kmem_enabled_key);
400#endif
401
402static int memcg_shrinker_map_size;
403static DEFINE_MUTEX(memcg_shrinker_map_mutex);
404
405static void memcg_free_shrinker_map_rcu(struct rcu_head *head)
406{
407 kvfree(container_of(head, struct memcg_shrinker_map, rcu));
408}
409
410static int memcg_expand_one_shrinker_map(struct mem_cgroup *memcg,
411 int size, int old_size)
412{
413 struct memcg_shrinker_map *new, *old;
414 int nid;
415
416 lockdep_assert_held(&memcg_shrinker_map_mutex);
417
418 for_each_node(nid) {
419 old = rcu_dereference_protected(
420 mem_cgroup_nodeinfo(memcg, nid)->shrinker_map, true);
421 /* Not yet online memcg */
422 if (!old)
423 return 0;
424
425 new = kvmalloc_node(sizeof(*new) + size, GFP_KERNEL, nid);
426 if (!new)
427 return -ENOMEM;
428
429 /* Set all old bits, clear all new bits */
430 memset(new->map, (int)0xff, old_size);
431 memset((void *)new->map + old_size, 0, size - old_size);
432
433 rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, new);
434 call_rcu(&old->rcu, memcg_free_shrinker_map_rcu);
435 }
436
437 return 0;
438}
439
440static void memcg_free_shrinker_maps(struct mem_cgroup *memcg)
441{
442 struct mem_cgroup_per_node *pn;
443 struct memcg_shrinker_map *map;
444 int nid;
445
446 if (mem_cgroup_is_root(memcg))
447 return;
448
449 for_each_node(nid) {
450 pn = mem_cgroup_nodeinfo(memcg, nid);
451 map = rcu_dereference_protected(pn->shrinker_map, true);
452 if (map)
453 kvfree(map);
454 rcu_assign_pointer(pn->shrinker_map, NULL);
455 }
456}
457
458static int memcg_alloc_shrinker_maps(struct mem_cgroup *memcg)
459{
460 struct memcg_shrinker_map *map;
461 int nid, size, ret = 0;
462
463 if (mem_cgroup_is_root(memcg))
464 return 0;
465
466 mutex_lock(&memcg_shrinker_map_mutex);
467 size = memcg_shrinker_map_size;
468 for_each_node(nid) {
469 map = kvzalloc_node(sizeof(*map) + size, GFP_KERNEL, nid);
470 if (!map) {
471 memcg_free_shrinker_maps(memcg);
472 ret = -ENOMEM;
473 break;
474 }
475 rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, map);
476 }
477 mutex_unlock(&memcg_shrinker_map_mutex);
478
479 return ret;
480}
481
482int memcg_expand_shrinker_maps(int new_id)
483{
484 int size, old_size, ret = 0;
485 struct mem_cgroup *memcg;
486
487 size = DIV_ROUND_UP(new_id + 1, BITS_PER_LONG) * sizeof(unsigned long);
488 old_size = memcg_shrinker_map_size;
489 if (size <= old_size)
490 return 0;
491
492 mutex_lock(&memcg_shrinker_map_mutex);
493 if (!root_mem_cgroup)
494 goto unlock;
495
496 for_each_mem_cgroup(memcg) {
497 if (mem_cgroup_is_root(memcg))
498 continue;
499 ret = memcg_expand_one_shrinker_map(memcg, size, old_size);
500 if (ret) {
501 mem_cgroup_iter_break(NULL, memcg);
502 goto unlock;
503 }
504 }
505unlock:
506 if (!ret)
507 memcg_shrinker_map_size = size;
508 mutex_unlock(&memcg_shrinker_map_mutex);
509 return ret;
510}
511
512void memcg_set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id)
513{
514 if (shrinker_id >= 0 && memcg && !mem_cgroup_is_root(memcg)) {
515 struct memcg_shrinker_map *map;
516
517 rcu_read_lock();
518 map = rcu_dereference(memcg->nodeinfo[nid]->shrinker_map);
519 /* Pairs with smp mb in shrink_slab() */
520 smp_mb__before_atomic();
521 set_bit(shrinker_id, map->map);
522 rcu_read_unlock();
523 }
524}
525
526/**
527 * mem_cgroup_css_from_page - css of the memcg associated with a page
528 * @page: page of interest
529 *
530 * If memcg is bound to the default hierarchy, css of the memcg associated
531 * with @page is returned. The returned css remains associated with @page
532 * until it is released.
533 *
534 * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup
535 * is returned.
536 */
537struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page)
538{
539 struct mem_cgroup *memcg;
540
541 memcg = page->mem_cgroup;
542
543 if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
544 memcg = root_mem_cgroup;
545
546 return &memcg->css;
547}
548
549/**
550 * page_cgroup_ino - return inode number of the memcg a page is charged to
551 * @page: the page
552 *
553 * Look up the closest online ancestor of the memory cgroup @page is charged to
554 * and return its inode number or 0 if @page is not charged to any cgroup. It
555 * is safe to call this function without holding a reference to @page.
556 *
557 * Note, this function is inherently racy, because there is nothing to prevent
558 * the cgroup inode from getting torn down and potentially reallocated a moment
559 * after page_cgroup_ino() returns, so it only should be used by callers that
560 * do not care (such as procfs interfaces).
561 */
562ino_t page_cgroup_ino(struct page *page)
563{
564 struct mem_cgroup *memcg;
565 unsigned long ino = 0;
566
567 rcu_read_lock();
568 memcg = page->mem_cgroup;
569
570 /*
571 * The lowest bit set means that memcg isn't a valid
572 * memcg pointer, but a obj_cgroups pointer.
573 * In this case the page is shared and doesn't belong
574 * to any specific memory cgroup.
575 */
576 if ((unsigned long) memcg & 0x1UL)
577 memcg = NULL;
578
579 while (memcg && !(memcg->css.flags & CSS_ONLINE))
580 memcg = parent_mem_cgroup(memcg);
581 if (memcg)
582 ino = cgroup_ino(memcg->css.cgroup);
583 rcu_read_unlock();
584 return ino;
585}
586
587static struct mem_cgroup_per_node *
588mem_cgroup_page_nodeinfo(struct mem_cgroup *memcg, struct page *page)
589{
590 int nid = page_to_nid(page);
591
592 return memcg->nodeinfo[nid];
593}
594
595static struct mem_cgroup_tree_per_node *
596soft_limit_tree_node(int nid)
597{
598 return soft_limit_tree.rb_tree_per_node[nid];
599}
600
601static struct mem_cgroup_tree_per_node *
602soft_limit_tree_from_page(struct page *page)
603{
604 int nid = page_to_nid(page);
605
606 return soft_limit_tree.rb_tree_per_node[nid];
607}
608
609static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_node *mz,
610 struct mem_cgroup_tree_per_node *mctz,
611 unsigned long new_usage_in_excess)
612{
613 struct rb_node **p = &mctz->rb_root.rb_node;
614 struct rb_node *parent = NULL;
615 struct mem_cgroup_per_node *mz_node;
616 bool rightmost = true;
617
618 if (mz->on_tree)
619 return;
620
621 mz->usage_in_excess = new_usage_in_excess;
622 if (!mz->usage_in_excess)
623 return;
624 while (*p) {
625 parent = *p;
626 mz_node = rb_entry(parent, struct mem_cgroup_per_node,
627 tree_node);
628 if (mz->usage_in_excess < mz_node->usage_in_excess) {
629 p = &(*p)->rb_left;
630 rightmost = false;
631 }
632
633 /*
634 * We can't avoid mem cgroups that are over their soft
635 * limit by the same amount
636 */
637 else if (mz->usage_in_excess >= mz_node->usage_in_excess)
638 p = &(*p)->rb_right;
639 }
640
641 if (rightmost)
642 mctz->rb_rightmost = &mz->tree_node;
643
644 rb_link_node(&mz->tree_node, parent, p);
645 rb_insert_color(&mz->tree_node, &mctz->rb_root);
646 mz->on_tree = true;
647}
648
649static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
650 struct mem_cgroup_tree_per_node *mctz)
651{
652 if (!mz->on_tree)
653 return;
654
655 if (&mz->tree_node == mctz->rb_rightmost)
656 mctz->rb_rightmost = rb_prev(&mz->tree_node);
657
658 rb_erase(&mz->tree_node, &mctz->rb_root);
659 mz->on_tree = false;
660}
661
662static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
663 struct mem_cgroup_tree_per_node *mctz)
664{
665 unsigned long flags;
666
667 spin_lock_irqsave(&mctz->lock, flags);
668 __mem_cgroup_remove_exceeded(mz, mctz);
669 spin_unlock_irqrestore(&mctz->lock, flags);
670}
671
672static unsigned long soft_limit_excess(struct mem_cgroup *memcg)
673{
674 unsigned long nr_pages = page_counter_read(&memcg->memory);
675 unsigned long soft_limit = READ_ONCE(memcg->soft_limit);
676 unsigned long excess = 0;
677
678 if (nr_pages > soft_limit)
679 excess = nr_pages - soft_limit;
680
681 return excess;
682}
683
684static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
685{
686 unsigned long excess;
687 struct mem_cgroup_per_node *mz;
688 struct mem_cgroup_tree_per_node *mctz;
689
690 mctz = soft_limit_tree_from_page(page);
691 if (!mctz)
692 return;
693 /*
694 * Necessary to update all ancestors when hierarchy is used.
695 * because their event counter is not touched.
696 */
697 for (; memcg; memcg = parent_mem_cgroup(memcg)) {
698 mz = mem_cgroup_page_nodeinfo(memcg, page);
699 excess = soft_limit_excess(memcg);
700 /*
701 * We have to update the tree if mz is on RB-tree or
702 * mem is over its softlimit.
703 */
704 if (excess || mz->on_tree) {
705 unsigned long flags;
706
707 spin_lock_irqsave(&mctz->lock, flags);
708 /* if on-tree, remove it */
709 if (mz->on_tree)
710 __mem_cgroup_remove_exceeded(mz, mctz);
711 /*
712 * Insert again. mz->usage_in_excess will be updated.
713 * If excess is 0, no tree ops.
714 */
715 __mem_cgroup_insert_exceeded(mz, mctz, excess);
716 spin_unlock_irqrestore(&mctz->lock, flags);
717 }
718 }
719}
720
721static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
722{
723 struct mem_cgroup_tree_per_node *mctz;
724 struct mem_cgroup_per_node *mz;
725 int nid;
726
727 for_each_node(nid) {
728 mz = mem_cgroup_nodeinfo(memcg, nid);
729 mctz = soft_limit_tree_node(nid);
730 if (mctz)
731 mem_cgroup_remove_exceeded(mz, mctz);
732 }
733}
734
735static struct mem_cgroup_per_node *
736__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
737{
738 struct mem_cgroup_per_node *mz;
739
740retry:
741 mz = NULL;
742 if (!mctz->rb_rightmost)
743 goto done; /* Nothing to reclaim from */
744
745 mz = rb_entry(mctz->rb_rightmost,
746 struct mem_cgroup_per_node, tree_node);
747 /*
748 * Remove the node now but someone else can add it back,
749 * we will to add it back at the end of reclaim to its correct
750 * position in the tree.
751 */
752 __mem_cgroup_remove_exceeded(mz, mctz);
753 if (!soft_limit_excess(mz->memcg) ||
754 !css_tryget(&mz->memcg->css))
755 goto retry;
756done:
757 return mz;
758}
759
760static struct mem_cgroup_per_node *
761mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
762{
763 struct mem_cgroup_per_node *mz;
764
765 spin_lock_irq(&mctz->lock);
766 mz = __mem_cgroup_largest_soft_limit_node(mctz);
767 spin_unlock_irq(&mctz->lock);
768 return mz;
769}
770
771/**
772 * __mod_memcg_state - update cgroup memory statistics
773 * @memcg: the memory cgroup
774 * @idx: the stat item - can be enum memcg_stat_item or enum node_stat_item
775 * @val: delta to add to the counter, can be negative
776 */
777void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val)
778{
779 long x, threshold = MEMCG_CHARGE_BATCH;
780
781 if (mem_cgroup_disabled())
782 return;
783
784 if (memcg_stat_item_in_bytes(idx))
785 threshold <<= PAGE_SHIFT;
786
787 x = val + __this_cpu_read(memcg->vmstats_percpu->stat[idx]);
788 if (unlikely(abs(x) > threshold)) {
789 struct mem_cgroup *mi;
790
791 /*
792 * Batch local counters to keep them in sync with
793 * the hierarchical ones.
794 */
795 __this_cpu_add(memcg->vmstats_local->stat[idx], x);
796 for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
797 atomic_long_add(x, &mi->vmstats[idx]);
798 x = 0;
799 }
800 __this_cpu_write(memcg->vmstats_percpu->stat[idx], x);
801}
802
803static struct mem_cgroup_per_node *
804parent_nodeinfo(struct mem_cgroup_per_node *pn, int nid)
805{
806 struct mem_cgroup *parent;
807
808 parent = parent_mem_cgroup(pn->memcg);
809 if (!parent)
810 return NULL;
811 return mem_cgroup_nodeinfo(parent, nid);
812}
813
814void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
815 int val)
816{
817 struct mem_cgroup_per_node *pn;
818 struct mem_cgroup *memcg;
819 long x, threshold = MEMCG_CHARGE_BATCH;
820
821 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
822 memcg = pn->memcg;
823
824 /* Update memcg */
825 __mod_memcg_state(memcg, idx, val);
826
827 /* Update lruvec */
828 __this_cpu_add(pn->lruvec_stat_local->count[idx], val);
829
830 if (vmstat_item_in_bytes(idx))
831 threshold <<= PAGE_SHIFT;
832
833 x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]);
834 if (unlikely(abs(x) > threshold)) {
835 pg_data_t *pgdat = lruvec_pgdat(lruvec);
836 struct mem_cgroup_per_node *pi;
837
838 for (pi = pn; pi; pi = parent_nodeinfo(pi, pgdat->node_id))
839 atomic_long_add(x, &pi->lruvec_stat[idx]);
840 x = 0;
841 }
842 __this_cpu_write(pn->lruvec_stat_cpu->count[idx], x);
843}
844
845/**
846 * __mod_lruvec_state - update lruvec memory statistics
847 * @lruvec: the lruvec
848 * @idx: the stat item
849 * @val: delta to add to the counter, can be negative
850 *
851 * The lruvec is the intersection of the NUMA node and a cgroup. This
852 * function updates the all three counters that are affected by a
853 * change of state at this level: per-node, per-cgroup, per-lruvec.
854 */
855void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
856 int val)
857{
858 /* Update node */
859 __mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
860
861 /* Update memcg and lruvec */
862 if (!mem_cgroup_disabled())
863 __mod_memcg_lruvec_state(lruvec, idx, val);
864}
865
866void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val)
867{
868 pg_data_t *pgdat = page_pgdat(virt_to_page(p));
869 struct mem_cgroup *memcg;
870 struct lruvec *lruvec;
871
872 rcu_read_lock();
873 memcg = mem_cgroup_from_obj(p);
874
875 /* Untracked pages have no memcg, no lruvec. Update only the node */
876 if (!memcg || memcg == root_mem_cgroup) {
877 __mod_node_page_state(pgdat, idx, val);
878 } else {
879 lruvec = mem_cgroup_lruvec(memcg, pgdat);
880 __mod_lruvec_state(lruvec, idx, val);
881 }
882 rcu_read_unlock();
883}
884
885void mod_memcg_obj_state(void *p, int idx, int val)
886{
887 struct mem_cgroup *memcg;
888
889 rcu_read_lock();
890 memcg = mem_cgroup_from_obj(p);
891 if (memcg)
892 mod_memcg_state(memcg, idx, val);
893 rcu_read_unlock();
894}
895
896/**
897 * __count_memcg_events - account VM events in a cgroup
898 * @memcg: the memory cgroup
899 * @idx: the event item
900 * @count: the number of events that occured
901 */
902void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
903 unsigned long count)
904{
905 unsigned long x;
906
907 if (mem_cgroup_disabled())
908 return;
909
910 x = count + __this_cpu_read(memcg->vmstats_percpu->events[idx]);
911 if (unlikely(x > MEMCG_CHARGE_BATCH)) {
912 struct mem_cgroup *mi;
913
914 /*
915 * Batch local counters to keep them in sync with
916 * the hierarchical ones.
917 */
918 __this_cpu_add(memcg->vmstats_local->events[idx], x);
919 for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
920 atomic_long_add(x, &mi->vmevents[idx]);
921 x = 0;
922 }
923 __this_cpu_write(memcg->vmstats_percpu->events[idx], x);
924}
925
926static unsigned long memcg_events(struct mem_cgroup *memcg, int event)
927{
928 return atomic_long_read(&memcg->vmevents[event]);
929}
930
931static unsigned long memcg_events_local(struct mem_cgroup *memcg, int event)
932{
933 long x = 0;
934 int cpu;
935
936 for_each_possible_cpu(cpu)
937 x += per_cpu(memcg->vmstats_local->events[event], cpu);
938 return x;
939}
940
941static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
942 struct page *page,
943 int nr_pages)
944{
945 /* pagein of a big page is an event. So, ignore page size */
946 if (nr_pages > 0)
947 __count_memcg_events(memcg, PGPGIN, 1);
948 else {
949 __count_memcg_events(memcg, PGPGOUT, 1);
950 nr_pages = -nr_pages; /* for event */
951 }
952
953 __this_cpu_add(memcg->vmstats_percpu->nr_page_events, nr_pages);
954}
955
956static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
957 enum mem_cgroup_events_target target)
958{
959 unsigned long val, next;
960
961 val = __this_cpu_read(memcg->vmstats_percpu->nr_page_events);
962 next = __this_cpu_read(memcg->vmstats_percpu->targets[target]);
963 /* from time_after() in jiffies.h */
964 if ((long)(next - val) < 0) {
965 switch (target) {
966 case MEM_CGROUP_TARGET_THRESH:
967 next = val + THRESHOLDS_EVENTS_TARGET;
968 break;
969 case MEM_CGROUP_TARGET_SOFTLIMIT:
970 next = val + SOFTLIMIT_EVENTS_TARGET;
971 break;
972 default:
973 break;
974 }
975 __this_cpu_write(memcg->vmstats_percpu->targets[target], next);
976 return true;
977 }
978 return false;
979}
980
981/*
982 * Check events in order.
983 *
984 */
985static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
986{
987 /* threshold event is triggered in finer grain than soft limit */
988 if (unlikely(mem_cgroup_event_ratelimit(memcg,
989 MEM_CGROUP_TARGET_THRESH))) {
990 bool do_softlimit;
991
992 do_softlimit = mem_cgroup_event_ratelimit(memcg,
993 MEM_CGROUP_TARGET_SOFTLIMIT);
994 mem_cgroup_threshold(memcg);
995 if (unlikely(do_softlimit))
996 mem_cgroup_update_tree(memcg, page);
997 }
998}
999
1000struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
1001{
1002 /*
1003 * mm_update_next_owner() may clear mm->owner to NULL
1004 * if it races with swapoff, page migration, etc.
1005 * So this can be called with p == NULL.
1006 */
1007 if (unlikely(!p))
1008 return NULL;
1009
1010 return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
1011}
1012EXPORT_SYMBOL(mem_cgroup_from_task);
1013
1014/**
1015 * get_mem_cgroup_from_mm: Obtain a reference on given mm_struct's memcg.
1016 * @mm: mm from which memcg should be extracted. It can be NULL.
1017 *
1018 * Obtain a reference on mm->memcg and returns it if successful. Otherwise
1019 * root_mem_cgroup is returned. However if mem_cgroup is disabled, NULL is
1020 * returned.
1021 */
1022struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
1023{
1024 struct mem_cgroup *memcg;
1025
1026 if (mem_cgroup_disabled())
1027 return NULL;
1028
1029 rcu_read_lock();
1030 do {
1031 /*
1032 * Page cache insertions can happen withou an
1033 * actual mm context, e.g. during disk probing
1034 * on boot, loopback IO, acct() writes etc.
1035 */
1036 if (unlikely(!mm))
1037 memcg = root_mem_cgroup;
1038 else {
1039 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1040 if (unlikely(!memcg))
1041 memcg = root_mem_cgroup;
1042 }
1043 } while (!css_tryget(&memcg->css));
1044 rcu_read_unlock();
1045 return memcg;
1046}
1047EXPORT_SYMBOL(get_mem_cgroup_from_mm);
1048
1049/**
1050 * get_mem_cgroup_from_page: Obtain a reference on given page's memcg.
1051 * @page: page from which memcg should be extracted.
1052 *
1053 * Obtain a reference on page->memcg and returns it if successful. Otherwise
1054 * root_mem_cgroup is returned.
1055 */
1056struct mem_cgroup *get_mem_cgroup_from_page(struct page *page)
1057{
1058 struct mem_cgroup *memcg = page->mem_cgroup;
1059
1060 if (mem_cgroup_disabled())
1061 return NULL;
1062
1063 rcu_read_lock();
1064 /* Page should not get uncharged and freed memcg under us. */
1065 if (!memcg || WARN_ON_ONCE(!css_tryget(&memcg->css)))
1066 memcg = root_mem_cgroup;
1067 rcu_read_unlock();
1068 return memcg;
1069}
1070EXPORT_SYMBOL(get_mem_cgroup_from_page);
1071
1072/**
1073 * If current->active_memcg is non-NULL, do not fallback to current->mm->memcg.
1074 */
1075static __always_inline struct mem_cgroup *get_mem_cgroup_from_current(void)
1076{
1077 if (unlikely(current->active_memcg)) {
1078 struct mem_cgroup *memcg;
1079
1080 rcu_read_lock();
1081 /* current->active_memcg must hold a ref. */
1082 if (WARN_ON_ONCE(!css_tryget(¤t->active_memcg->css)))
1083 memcg = root_mem_cgroup;
1084 else
1085 memcg = current->active_memcg;
1086 rcu_read_unlock();
1087 return memcg;
1088 }
1089 return get_mem_cgroup_from_mm(current->mm);
1090}
1091
1092/**
1093 * mem_cgroup_iter - iterate over memory cgroup hierarchy
1094 * @root: hierarchy root
1095 * @prev: previously returned memcg, NULL on first invocation
1096 * @reclaim: cookie for shared reclaim walks, NULL for full walks
1097 *
1098 * Returns references to children of the hierarchy below @root, or
1099 * @root itself, or %NULL after a full round-trip.
1100 *
1101 * Caller must pass the return value in @prev on subsequent
1102 * invocations for reference counting, or use mem_cgroup_iter_break()
1103 * to cancel a hierarchy walk before the round-trip is complete.
1104 *
1105 * Reclaimers can specify a node and a priority level in @reclaim to
1106 * divide up the memcgs in the hierarchy among all concurrent
1107 * reclaimers operating on the same node and priority.
1108 */
1109struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
1110 struct mem_cgroup *prev,
1111 struct mem_cgroup_reclaim_cookie *reclaim)
1112{
1113 struct mem_cgroup_reclaim_iter *iter;
1114 struct cgroup_subsys_state *css = NULL;
1115 struct mem_cgroup *memcg = NULL;
1116 struct mem_cgroup *pos = NULL;
1117
1118 if (mem_cgroup_disabled())
1119 return NULL;
1120
1121 if (!root)
1122 root = root_mem_cgroup;
1123
1124 if (prev && !reclaim)
1125 pos = prev;
1126
1127 if (!root->use_hierarchy && root != root_mem_cgroup) {
1128 if (prev)
1129 goto out;
1130 return root;
1131 }
1132
1133 rcu_read_lock();
1134
1135 if (reclaim) {
1136 struct mem_cgroup_per_node *mz;
1137
1138 mz = mem_cgroup_nodeinfo(root, reclaim->pgdat->node_id);
1139 iter = &mz->iter;
1140
1141 if (prev && reclaim->generation != iter->generation)
1142 goto out_unlock;
1143
1144 while (1) {
1145 pos = READ_ONCE(iter->position);
1146 if (!pos || css_tryget(&pos->css))
1147 break;
1148 /*
1149 * css reference reached zero, so iter->position will
1150 * be cleared by ->css_released. However, we should not
1151 * rely on this happening soon, because ->css_released
1152 * is called from a work queue, and by busy-waiting we
1153 * might block it. So we clear iter->position right
1154 * away.
1155 */
1156 (void)cmpxchg(&iter->position, pos, NULL);
1157 }
1158 }
1159
1160 if (pos)
1161 css = &pos->css;
1162
1163 for (;;) {
1164 css = css_next_descendant_pre(css, &root->css);
1165 if (!css) {
1166 /*
1167 * Reclaimers share the hierarchy walk, and a
1168 * new one might jump in right at the end of
1169 * the hierarchy - make sure they see at least
1170 * one group and restart from the beginning.
1171 */
1172 if (!prev)
1173 continue;
1174 break;
1175 }
1176
1177 /*
1178 * Verify the css and acquire a reference. The root
1179 * is provided by the caller, so we know it's alive
1180 * and kicking, and don't take an extra reference.
1181 */
1182 memcg = mem_cgroup_from_css(css);
1183
1184 if (css == &root->css)
1185 break;
1186
1187 if (css_tryget(css))
1188 break;
1189
1190 memcg = NULL;
1191 }
1192
1193 if (reclaim) {
1194 /*
1195 * The position could have already been updated by a competing
1196 * thread, so check that the value hasn't changed since we read
1197 * it to avoid reclaiming from the same cgroup twice.
1198 */
1199 (void)cmpxchg(&iter->position, pos, memcg);
1200
1201 if (pos)
1202 css_put(&pos->css);
1203
1204 if (!memcg)
1205 iter->generation++;
1206 else if (!prev)
1207 reclaim->generation = iter->generation;
1208 }
1209
1210out_unlock:
1211 rcu_read_unlock();
1212out:
1213 if (prev && prev != root)
1214 css_put(&prev->css);
1215
1216 return memcg;
1217}
1218
1219/**
1220 * mem_cgroup_iter_break - abort a hierarchy walk prematurely
1221 * @root: hierarchy root
1222 * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
1223 */
1224void mem_cgroup_iter_break(struct mem_cgroup *root,
1225 struct mem_cgroup *prev)
1226{
1227 if (!root)
1228 root = root_mem_cgroup;
1229 if (prev && prev != root)
1230 css_put(&prev->css);
1231}
1232
1233static void __invalidate_reclaim_iterators(struct mem_cgroup *from,
1234 struct mem_cgroup *dead_memcg)
1235{
1236 struct mem_cgroup_reclaim_iter *iter;
1237 struct mem_cgroup_per_node *mz;
1238 int nid;
1239
1240 for_each_node(nid) {
1241 mz = mem_cgroup_nodeinfo(from, nid);
1242 iter = &mz->iter;
1243 cmpxchg(&iter->position, dead_memcg, NULL);
1244 }
1245}
1246
1247static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
1248{
1249 struct mem_cgroup *memcg = dead_memcg;
1250 struct mem_cgroup *last;
1251
1252 do {
1253 __invalidate_reclaim_iterators(memcg, dead_memcg);
1254 last = memcg;
1255 } while ((memcg = parent_mem_cgroup(memcg)));
1256
1257 /*
1258 * When cgruop1 non-hierarchy mode is used,
1259 * parent_mem_cgroup() does not walk all the way up to the
1260 * cgroup root (root_mem_cgroup). So we have to handle
1261 * dead_memcg from cgroup root separately.
1262 */
1263 if (last != root_mem_cgroup)
1264 __invalidate_reclaim_iterators(root_mem_cgroup,
1265 dead_memcg);
1266}
1267
1268/**
1269 * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
1270 * @memcg: hierarchy root
1271 * @fn: function to call for each task
1272 * @arg: argument passed to @fn
1273 *
1274 * This function iterates over tasks attached to @memcg or to any of its
1275 * descendants and calls @fn for each task. If @fn returns a non-zero
1276 * value, the function breaks the iteration loop and returns the value.
1277 * Otherwise, it will iterate over all tasks and return 0.
1278 *
1279 * This function must not be called for the root memory cgroup.
1280 */
1281int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
1282 int (*fn)(struct task_struct *, void *), void *arg)
1283{
1284 struct mem_cgroup *iter;
1285 int ret = 0;
1286
1287 BUG_ON(memcg == root_mem_cgroup);
1288
1289 for_each_mem_cgroup_tree(iter, memcg) {
1290 struct css_task_iter it;
1291 struct task_struct *task;
1292
1293 css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it);
1294 while (!ret && (task = css_task_iter_next(&it)))
1295 ret = fn(task, arg);
1296 css_task_iter_end(&it);
1297 if (ret) {
1298 mem_cgroup_iter_break(memcg, iter);
1299 break;
1300 }
1301 }
1302 return ret;
1303}
1304
1305/**
1306 * mem_cgroup_page_lruvec - return lruvec for isolating/putting an LRU page
1307 * @page: the page
1308 * @pgdat: pgdat of the page
1309 *
1310 * This function relies on page->mem_cgroup being stable - see the
1311 * access rules in commit_charge().
1312 */
1313struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct pglist_data *pgdat)
1314{
1315 struct mem_cgroup_per_node *mz;
1316 struct mem_cgroup *memcg;
1317 struct lruvec *lruvec;
1318
1319 if (mem_cgroup_disabled()) {
1320 lruvec = &pgdat->__lruvec;
1321 goto out;
1322 }
1323
1324 memcg = page->mem_cgroup;
1325 /*
1326 * Swapcache readahead pages are added to the LRU - and
1327 * possibly migrated - before they are charged.
1328 */
1329 if (!memcg)
1330 memcg = root_mem_cgroup;
1331
1332 mz = mem_cgroup_page_nodeinfo(memcg, page);
1333 lruvec = &mz->lruvec;
1334out:
1335 /*
1336 * Since a node can be onlined after the mem_cgroup was created,
1337 * we have to be prepared to initialize lruvec->zone here;
1338 * and if offlined then reonlined, we need to reinitialize it.
1339 */
1340 if (unlikely(lruvec->pgdat != pgdat))
1341 lruvec->pgdat = pgdat;
1342 return lruvec;
1343}
1344
1345/**
1346 * mem_cgroup_update_lru_size - account for adding or removing an lru page
1347 * @lruvec: mem_cgroup per zone lru vector
1348 * @lru: index of lru list the page is sitting on
1349 * @zid: zone id of the accounted pages
1350 * @nr_pages: positive when adding or negative when removing
1351 *
1352 * This function must be called under lru_lock, just before a page is added
1353 * to or just after a page is removed from an lru list (that ordering being
1354 * so as to allow it to check that lru_size 0 is consistent with list_empty).
1355 */
1356void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
1357 int zid, int nr_pages)
1358{
1359 struct mem_cgroup_per_node *mz;
1360 unsigned long *lru_size;
1361 long size;
1362
1363 if (mem_cgroup_disabled())
1364 return;
1365
1366 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
1367 lru_size = &mz->lru_zone_size[zid][lru];
1368
1369 if (nr_pages < 0)
1370 *lru_size += nr_pages;
1371
1372 size = *lru_size;
1373 if (WARN_ONCE(size < 0,
1374 "%s(%p, %d, %d): lru_size %ld\n",
1375 __func__, lruvec, lru, nr_pages, size)) {
1376 VM_BUG_ON(1);
1377 *lru_size = 0;
1378 }
1379
1380 if (nr_pages > 0)
1381 *lru_size += nr_pages;
1382}
1383
1384/**
1385 * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1386 * @memcg: the memory cgroup
1387 *
1388 * Returns the maximum amount of memory @mem can be charged with, in
1389 * pages.
1390 */
1391static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
1392{
1393 unsigned long margin = 0;
1394 unsigned long count;
1395 unsigned long limit;
1396
1397 count = page_counter_read(&memcg->memory);
1398 limit = READ_ONCE(memcg->memory.max);
1399 if (count < limit)
1400 margin = limit - count;
1401
1402 if (do_memsw_account()) {
1403 count = page_counter_read(&memcg->memsw);
1404 limit = READ_ONCE(memcg->memsw.max);
1405 if (count < limit)
1406 margin = min(margin, limit - count);
1407 else
1408 margin = 0;
1409 }
1410
1411 return margin;
1412}
1413
1414/*
1415 * A routine for checking "mem" is under move_account() or not.
1416 *
1417 * Checking a cgroup is mc.from or mc.to or under hierarchy of
1418 * moving cgroups. This is for waiting at high-memory pressure
1419 * caused by "move".
1420 */
1421static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
1422{
1423 struct mem_cgroup *from;
1424 struct mem_cgroup *to;
1425 bool ret = false;
1426 /*
1427 * Unlike task_move routines, we access mc.to, mc.from not under
1428 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
1429 */
1430 spin_lock(&mc.lock);
1431 from = mc.from;
1432 to = mc.to;
1433 if (!from)
1434 goto unlock;
1435
1436 ret = mem_cgroup_is_descendant(from, memcg) ||
1437 mem_cgroup_is_descendant(to, memcg);
1438unlock:
1439 spin_unlock(&mc.lock);
1440 return ret;
1441}
1442
1443static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
1444{
1445 if (mc.moving_task && current != mc.moving_task) {
1446 if (mem_cgroup_under_move(memcg)) {
1447 DEFINE_WAIT(wait);
1448 prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1449 /* moving charge context might have finished. */
1450 if (mc.moving_task)
1451 schedule();
1452 finish_wait(&mc.waitq, &wait);
1453 return true;
1454 }
1455 }
1456 return false;
1457}
1458
1459static char *memory_stat_format(struct mem_cgroup *memcg)
1460{
1461 struct seq_buf s;
1462 int i;
1463
1464 seq_buf_init(&s, kmalloc(PAGE_SIZE, GFP_KERNEL), PAGE_SIZE);
1465 if (!s.buffer)
1466 return NULL;
1467
1468 /*
1469 * Provide statistics on the state of the memory subsystem as
1470 * well as cumulative event counters that show past behavior.
1471 *
1472 * This list is ordered following a combination of these gradients:
1473 * 1) generic big picture -> specifics and details
1474 * 2) reflecting userspace activity -> reflecting kernel heuristics
1475 *
1476 * Current memory state:
1477 */
1478
1479 seq_buf_printf(&s, "anon %llu\n",
1480 (u64)memcg_page_state(memcg, NR_ANON_MAPPED) *
1481 PAGE_SIZE);
1482 seq_buf_printf(&s, "file %llu\n",
1483 (u64)memcg_page_state(memcg, NR_FILE_PAGES) *
1484 PAGE_SIZE);
1485 seq_buf_printf(&s, "kernel_stack %llu\n",
1486 (u64)memcg_page_state(memcg, NR_KERNEL_STACK_KB) *
1487 1024);
1488 seq_buf_printf(&s, "slab %llu\n",
1489 (u64)(memcg_page_state(memcg, NR_SLAB_RECLAIMABLE_B) +
1490 memcg_page_state(memcg, NR_SLAB_UNRECLAIMABLE_B)));
1491 seq_buf_printf(&s, "percpu %llu\n",
1492 (u64)memcg_page_state(memcg, MEMCG_PERCPU_B));
1493 seq_buf_printf(&s, "sock %llu\n",
1494 (u64)memcg_page_state(memcg, MEMCG_SOCK) *
1495 PAGE_SIZE);
1496
1497 seq_buf_printf(&s, "shmem %llu\n",
1498 (u64)memcg_page_state(memcg, NR_SHMEM) *
1499 PAGE_SIZE);
1500 seq_buf_printf(&s, "file_mapped %llu\n",
1501 (u64)memcg_page_state(memcg, NR_FILE_MAPPED) *
1502 PAGE_SIZE);
1503 seq_buf_printf(&s, "file_dirty %llu\n",
1504 (u64)memcg_page_state(memcg, NR_FILE_DIRTY) *
1505 PAGE_SIZE);
1506 seq_buf_printf(&s, "file_writeback %llu\n",
1507 (u64)memcg_page_state(memcg, NR_WRITEBACK) *
1508 PAGE_SIZE);
1509
1510#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1511 seq_buf_printf(&s, "anon_thp %llu\n",
1512 (u64)memcg_page_state(memcg, NR_ANON_THPS) *
1513 HPAGE_PMD_SIZE);
1514#endif
1515
1516 for (i = 0; i < NR_LRU_LISTS; i++)
1517 seq_buf_printf(&s, "%s %llu\n", lru_list_name(i),
1518 (u64)memcg_page_state(memcg, NR_LRU_BASE + i) *
1519 PAGE_SIZE);
1520
1521 seq_buf_printf(&s, "slab_reclaimable %llu\n",
1522 (u64)memcg_page_state(memcg, NR_SLAB_RECLAIMABLE_B));
1523 seq_buf_printf(&s, "slab_unreclaimable %llu\n",
1524 (u64)memcg_page_state(memcg, NR_SLAB_UNRECLAIMABLE_B));
1525
1526 /* Accumulated memory events */
1527
1528 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGFAULT),
1529 memcg_events(memcg, PGFAULT));
1530 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGMAJFAULT),
1531 memcg_events(memcg, PGMAJFAULT));
1532
1533 seq_buf_printf(&s, "workingset_refault_anon %lu\n",
1534 memcg_page_state(memcg, WORKINGSET_REFAULT_ANON));
1535 seq_buf_printf(&s, "workingset_refault_file %lu\n",
1536 memcg_page_state(memcg, WORKINGSET_REFAULT_FILE));
1537 seq_buf_printf(&s, "workingset_activate_anon %lu\n",
1538 memcg_page_state(memcg, WORKINGSET_ACTIVATE_ANON));
1539 seq_buf_printf(&s, "workingset_activate_file %lu\n",
1540 memcg_page_state(memcg, WORKINGSET_ACTIVATE_FILE));
1541 seq_buf_printf(&s, "workingset_restore_anon %lu\n",
1542 memcg_page_state(memcg, WORKINGSET_RESTORE_ANON));
1543 seq_buf_printf(&s, "workingset_restore_file %lu\n",
1544 memcg_page_state(memcg, WORKINGSET_RESTORE_FILE));
1545 seq_buf_printf(&s, "workingset_nodereclaim %lu\n",
1546 memcg_page_state(memcg, WORKINGSET_NODERECLAIM));
1547
1548 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGREFILL),
1549 memcg_events(memcg, PGREFILL));
1550 seq_buf_printf(&s, "pgscan %lu\n",
1551 memcg_events(memcg, PGSCAN_KSWAPD) +
1552 memcg_events(memcg, PGSCAN_DIRECT));
1553 seq_buf_printf(&s, "pgsteal %lu\n",
1554 memcg_events(memcg, PGSTEAL_KSWAPD) +
1555 memcg_events(memcg, PGSTEAL_DIRECT));
1556 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGACTIVATE),
1557 memcg_events(memcg, PGACTIVATE));
1558 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGDEACTIVATE),
1559 memcg_events(memcg, PGDEACTIVATE));
1560 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGLAZYFREE),
1561 memcg_events(memcg, PGLAZYFREE));
1562 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGLAZYFREED),
1563 memcg_events(memcg, PGLAZYFREED));
1564
1565#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1566 seq_buf_printf(&s, "%s %lu\n", vm_event_name(THP_FAULT_ALLOC),
1567 memcg_events(memcg, THP_FAULT_ALLOC));
1568 seq_buf_printf(&s, "%s %lu\n", vm_event_name(THP_COLLAPSE_ALLOC),
1569 memcg_events(memcg, THP_COLLAPSE_ALLOC));
1570#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1571
1572 /* The above should easily fit into one page */
1573 WARN_ON_ONCE(seq_buf_has_overflowed(&s));
1574
1575 return s.buffer;
1576}
1577
1578#define K(x) ((x) << (PAGE_SHIFT-10))
1579/**
1580 * mem_cgroup_print_oom_context: Print OOM information relevant to
1581 * memory controller.
1582 * @memcg: The memory cgroup that went over limit
1583 * @p: Task that is going to be killed
1584 *
1585 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1586 * enabled
1587 */
1588void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
1589{
1590 rcu_read_lock();
1591
1592 if (memcg) {
1593 pr_cont(",oom_memcg=");
1594 pr_cont_cgroup_path(memcg->css.cgroup);
1595 } else
1596 pr_cont(",global_oom");
1597 if (p) {
1598 pr_cont(",task_memcg=");
1599 pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
1600 }
1601 rcu_read_unlock();
1602}
1603
1604/**
1605 * mem_cgroup_print_oom_meminfo: Print OOM memory information relevant to
1606 * memory controller.
1607 * @memcg: The memory cgroup that went over limit
1608 */
1609void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
1610{
1611 char *buf;
1612
1613 pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
1614 K((u64)page_counter_read(&memcg->memory)),
1615 K((u64)READ_ONCE(memcg->memory.max)), memcg->memory.failcnt);
1616 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
1617 pr_info("swap: usage %llukB, limit %llukB, failcnt %lu\n",
1618 K((u64)page_counter_read(&memcg->swap)),
1619 K((u64)READ_ONCE(memcg->swap.max)), memcg->swap.failcnt);
1620 else {
1621 pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
1622 K((u64)page_counter_read(&memcg->memsw)),
1623 K((u64)memcg->memsw.max), memcg->memsw.failcnt);
1624 pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
1625 K((u64)page_counter_read(&memcg->kmem)),
1626 K((u64)memcg->kmem.max), memcg->kmem.failcnt);
1627 }
1628
1629 pr_info("Memory cgroup stats for ");
1630 pr_cont_cgroup_path(memcg->css.cgroup);
1631 pr_cont(":");
1632 buf = memory_stat_format(memcg);
1633 if (!buf)
1634 return;
1635 pr_info("%s", buf);
1636 kfree(buf);
1637}
1638
1639/*
1640 * Return the memory (and swap, if configured) limit for a memcg.
1641 */
1642unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
1643{
1644 unsigned long max;
1645
1646 max = READ_ONCE(memcg->memory.max);
1647 if (mem_cgroup_swappiness(memcg)) {
1648 unsigned long memsw_max;
1649 unsigned long swap_max;
1650
1651 memsw_max = memcg->memsw.max;
1652 swap_max = READ_ONCE(memcg->swap.max);
1653 swap_max = min(swap_max, (unsigned long)total_swap_pages);
1654 max = min(max + swap_max, memsw_max);
1655 }
1656 return max;
1657}
1658
1659unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
1660{
1661 return page_counter_read(&memcg->memory);
1662}
1663
1664static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
1665 int order)
1666{
1667 struct oom_control oc = {
1668 .zonelist = NULL,
1669 .nodemask = NULL,
1670 .memcg = memcg,
1671 .gfp_mask = gfp_mask,
1672 .order = order,
1673 };
1674 bool ret = true;
1675
1676 if (mutex_lock_killable(&oom_lock))
1677 return true;
1678
1679 if (mem_cgroup_margin(memcg) >= (1 << order))
1680 goto unlock;
1681
1682 /*
1683 * A few threads which were not waiting at mutex_lock_killable() can
1684 * fail to bail out. Therefore, check again after holding oom_lock.
1685 */
1686 ret = should_force_charge() || out_of_memory(&oc);
1687
1688unlock:
1689 mutex_unlock(&oom_lock);
1690 return ret;
1691}
1692
1693static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
1694 pg_data_t *pgdat,
1695 gfp_t gfp_mask,
1696 unsigned long *total_scanned)
1697{
1698 struct mem_cgroup *victim = NULL;
1699 int total = 0;
1700 int loop = 0;
1701 unsigned long excess;
1702 unsigned long nr_scanned;
1703 struct mem_cgroup_reclaim_cookie reclaim = {
1704 .pgdat = pgdat,
1705 };
1706
1707 excess = soft_limit_excess(root_memcg);
1708
1709 while (1) {
1710 victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
1711 if (!victim) {
1712 loop++;
1713 if (loop >= 2) {
1714 /*
1715 * If we have not been able to reclaim
1716 * anything, it might because there are
1717 * no reclaimable pages under this hierarchy
1718 */
1719 if (!total)
1720 break;
1721 /*
1722 * We want to do more targeted reclaim.
1723 * excess >> 2 is not to excessive so as to
1724 * reclaim too much, nor too less that we keep
1725 * coming back to reclaim from this cgroup
1726 */
1727 if (total >= (excess >> 2) ||
1728 (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
1729 break;
1730 }
1731 continue;
1732 }
1733 total += mem_cgroup_shrink_node(victim, gfp_mask, false,
1734 pgdat, &nr_scanned);
1735 *total_scanned += nr_scanned;
1736 if (!soft_limit_excess(root_memcg))
1737 break;
1738 }
1739 mem_cgroup_iter_break(root_memcg, victim);
1740 return total;
1741}
1742
1743#ifdef CONFIG_LOCKDEP
1744static struct lockdep_map memcg_oom_lock_dep_map = {
1745 .name = "memcg_oom_lock",
1746};
1747#endif
1748
1749static DEFINE_SPINLOCK(memcg_oom_lock);
1750
1751/*
1752 * Check OOM-Killer is already running under our hierarchy.
1753 * If someone is running, return false.
1754 */
1755static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
1756{
1757 struct mem_cgroup *iter, *failed = NULL;
1758
1759 spin_lock(&memcg_oom_lock);
1760
1761 for_each_mem_cgroup_tree(iter, memcg) {
1762 if (iter->oom_lock) {
1763 /*
1764 * this subtree of our hierarchy is already locked
1765 * so we cannot give a lock.
1766 */
1767 failed = iter;
1768 mem_cgroup_iter_break(memcg, iter);
1769 break;
1770 } else
1771 iter->oom_lock = true;
1772 }
1773
1774 if (failed) {
1775 /*
1776 * OK, we failed to lock the whole subtree so we have
1777 * to clean up what we set up to the failing subtree
1778 */
1779 for_each_mem_cgroup_tree(iter, memcg) {
1780 if (iter == failed) {
1781 mem_cgroup_iter_break(memcg, iter);
1782 break;
1783 }
1784 iter->oom_lock = false;
1785 }
1786 } else
1787 mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_);
1788
1789 spin_unlock(&memcg_oom_lock);
1790
1791 return !failed;
1792}
1793
1794static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
1795{
1796 struct mem_cgroup *iter;
1797
1798 spin_lock(&memcg_oom_lock);
1799 mutex_release(&memcg_oom_lock_dep_map, _RET_IP_);
1800 for_each_mem_cgroup_tree(iter, memcg)
1801 iter->oom_lock = false;
1802 spin_unlock(&memcg_oom_lock);
1803}
1804
1805static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
1806{
1807 struct mem_cgroup *iter;
1808
1809 spin_lock(&memcg_oom_lock);
1810 for_each_mem_cgroup_tree(iter, memcg)
1811 iter->under_oom++;
1812 spin_unlock(&memcg_oom_lock);
1813}
1814
1815static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
1816{
1817 struct mem_cgroup *iter;
1818
1819 /*
1820 * When a new child is created while the hierarchy is under oom,
1821 * mem_cgroup_oom_lock() may not be called. Watch for underflow.
1822 */
1823 spin_lock(&memcg_oom_lock);
1824 for_each_mem_cgroup_tree(iter, memcg)
1825 if (iter->under_oom > 0)
1826 iter->under_oom--;
1827 spin_unlock(&memcg_oom_lock);
1828}
1829
1830static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
1831
1832struct oom_wait_info {
1833 struct mem_cgroup *memcg;
1834 wait_queue_entry_t wait;
1835};
1836
1837static int memcg_oom_wake_function(wait_queue_entry_t *wait,
1838 unsigned mode, int sync, void *arg)
1839{
1840 struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
1841 struct mem_cgroup *oom_wait_memcg;
1842 struct oom_wait_info *oom_wait_info;
1843
1844 oom_wait_info = container_of(wait, struct oom_wait_info, wait);
1845 oom_wait_memcg = oom_wait_info->memcg;
1846
1847 if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) &&
1848 !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg))
1849 return 0;
1850 return autoremove_wake_function(wait, mode, sync, arg);
1851}
1852
1853static void memcg_oom_recover(struct mem_cgroup *memcg)
1854{
1855 /*
1856 * For the following lockless ->under_oom test, the only required
1857 * guarantee is that it must see the state asserted by an OOM when
1858 * this function is called as a result of userland actions
1859 * triggered by the notification of the OOM. This is trivially
1860 * achieved by invoking mem_cgroup_mark_under_oom() before
1861 * triggering notification.
1862 */
1863 if (memcg && memcg->under_oom)
1864 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
1865}
1866
1867enum oom_status {
1868 OOM_SUCCESS,
1869 OOM_FAILED,
1870 OOM_ASYNC,
1871 OOM_SKIPPED
1872};
1873
1874static enum oom_status mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
1875{
1876 enum oom_status ret;
1877 bool locked;
1878
1879 if (order > PAGE_ALLOC_COSTLY_ORDER)
1880 return OOM_SKIPPED;
1881
1882 memcg_memory_event(memcg, MEMCG_OOM);
1883
1884 /*
1885 * We are in the middle of the charge context here, so we
1886 * don't want to block when potentially sitting on a callstack
1887 * that holds all kinds of filesystem and mm locks.
1888 *
1889 * cgroup1 allows disabling the OOM killer and waiting for outside
1890 * handling until the charge can succeed; remember the context and put
1891 * the task to sleep at the end of the page fault when all locks are
1892 * released.
1893 *
1894 * On the other hand, in-kernel OOM killer allows for an async victim
1895 * memory reclaim (oom_reaper) and that means that we are not solely
1896 * relying on the oom victim to make a forward progress and we can
1897 * invoke the oom killer here.
1898 *
1899 * Please note that mem_cgroup_out_of_memory might fail to find a
1900 * victim and then we have to bail out from the charge path.
1901 */
1902 if (memcg->oom_kill_disable) {
1903 if (!current->in_user_fault)
1904 return OOM_SKIPPED;
1905 css_get(&memcg->css);
1906 current->memcg_in_oom = memcg;
1907 current->memcg_oom_gfp_mask = mask;
1908 current->memcg_oom_order = order;
1909
1910 return OOM_ASYNC;
1911 }
1912
1913 mem_cgroup_mark_under_oom(memcg);
1914
1915 locked = mem_cgroup_oom_trylock(memcg);
1916
1917 if (locked)
1918 mem_cgroup_oom_notify(memcg);
1919
1920 mem_cgroup_unmark_under_oom(memcg);
1921 if (mem_cgroup_out_of_memory(memcg, mask, order))
1922 ret = OOM_SUCCESS;
1923 else
1924 ret = OOM_FAILED;
1925
1926 if (locked)
1927 mem_cgroup_oom_unlock(memcg);
1928
1929 return ret;
1930}
1931
1932/**
1933 * mem_cgroup_oom_synchronize - complete memcg OOM handling
1934 * @handle: actually kill/wait or just clean up the OOM state
1935 *
1936 * This has to be called at the end of a page fault if the memcg OOM
1937 * handler was enabled.
1938 *
1939 * Memcg supports userspace OOM handling where failed allocations must
1940 * sleep on a waitqueue until the userspace task resolves the
1941 * situation. Sleeping directly in the charge context with all kinds
1942 * of locks held is not a good idea, instead we remember an OOM state
1943 * in the task and mem_cgroup_oom_synchronize() has to be called at
1944 * the end of the page fault to complete the OOM handling.
1945 *
1946 * Returns %true if an ongoing memcg OOM situation was detected and
1947 * completed, %false otherwise.
1948 */
1949bool mem_cgroup_oom_synchronize(bool handle)
1950{
1951 struct mem_cgroup *memcg = current->memcg_in_oom;
1952 struct oom_wait_info owait;
1953 bool locked;
1954
1955 /* OOM is global, do not handle */
1956 if (!memcg)
1957 return false;
1958
1959 if (!handle)
1960 goto cleanup;
1961
1962 owait.memcg = memcg;
1963 owait.wait.flags = 0;
1964 owait.wait.func = memcg_oom_wake_function;
1965 owait.wait.private = current;
1966 INIT_LIST_HEAD(&owait.wait.entry);
1967
1968 prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
1969 mem_cgroup_mark_under_oom(memcg);
1970
1971 locked = mem_cgroup_oom_trylock(memcg);
1972
1973 if (locked)
1974 mem_cgroup_oom_notify(memcg);
1975
1976 if (locked && !memcg->oom_kill_disable) {
1977 mem_cgroup_unmark_under_oom(memcg);
1978 finish_wait(&memcg_oom_waitq, &owait.wait);
1979 mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask,
1980 current->memcg_oom_order);
1981 } else {
1982 schedule();
1983 mem_cgroup_unmark_under_oom(memcg);
1984 finish_wait(&memcg_oom_waitq, &owait.wait);
1985 }
1986
1987 if (locked) {
1988 mem_cgroup_oom_unlock(memcg);
1989 /*
1990 * There is no guarantee that an OOM-lock contender
1991 * sees the wakeups triggered by the OOM kill
1992 * uncharges. Wake any sleepers explicitely.
1993 */
1994 memcg_oom_recover(memcg);
1995 }
1996cleanup:
1997 current->memcg_in_oom = NULL;
1998 css_put(&memcg->css);
1999 return true;
2000}
2001
2002/**
2003 * mem_cgroup_get_oom_group - get a memory cgroup to clean up after OOM
2004 * @victim: task to be killed by the OOM killer
2005 * @oom_domain: memcg in case of memcg OOM, NULL in case of system-wide OOM
2006 *
2007 * Returns a pointer to a memory cgroup, which has to be cleaned up
2008 * by killing all belonging OOM-killable tasks.
2009 *
2010 * Caller has to call mem_cgroup_put() on the returned non-NULL memcg.
2011 */
2012struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
2013 struct mem_cgroup *oom_domain)
2014{
2015 struct mem_cgroup *oom_group = NULL;
2016 struct mem_cgroup *memcg;
2017
2018 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
2019 return NULL;
2020
2021 if (!oom_domain)
2022 oom_domain = root_mem_cgroup;
2023
2024 rcu_read_lock();
2025
2026 memcg = mem_cgroup_from_task(victim);
2027 if (memcg == root_mem_cgroup)
2028 goto out;
2029
2030 /*
2031 * If the victim task has been asynchronously moved to a different
2032 * memory cgroup, we might end up killing tasks outside oom_domain.
2033 * In this case it's better to ignore memory.group.oom.
2034 */
2035 if (unlikely(!mem_cgroup_is_descendant(memcg, oom_domain)))
2036 goto out;
2037
2038 /*
2039 * Traverse the memory cgroup hierarchy from the victim task's
2040 * cgroup up to the OOMing cgroup (or root) to find the
2041 * highest-level memory cgroup with oom.group set.
2042 */
2043 for (; memcg; memcg = parent_mem_cgroup(memcg)) {
2044 if (memcg->oom_group)
2045 oom_group = memcg;
2046
2047 if (memcg == oom_domain)
2048 break;
2049 }
2050
2051 if (oom_group)
2052 css_get(&oom_group->css);
2053out:
2054 rcu_read_unlock();
2055
2056 return oom_group;
2057}
2058
2059void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
2060{
2061 pr_info("Tasks in ");
2062 pr_cont_cgroup_path(memcg->css.cgroup);
2063 pr_cont(" are going to be killed due to memory.oom.group set\n");
2064}
2065
2066/**
2067 * lock_page_memcg - lock a page->mem_cgroup binding
2068 * @page: the page
2069 *
2070 * This function protects unlocked LRU pages from being moved to
2071 * another cgroup.
2072 *
2073 * It ensures lifetime of the returned memcg. Caller is responsible
2074 * for the lifetime of the page; __unlock_page_memcg() is available
2075 * when @page might get freed inside the locked section.
2076 */
2077struct mem_cgroup *lock_page_memcg(struct page *page)
2078{
2079 struct page *head = compound_head(page); /* rmap on tail pages */
2080 struct mem_cgroup *memcg;
2081 unsigned long flags;
2082
2083 /*
2084 * The RCU lock is held throughout the transaction. The fast
2085 * path can get away without acquiring the memcg->move_lock
2086 * because page moving starts with an RCU grace period.
2087 *
2088 * The RCU lock also protects the memcg from being freed when
2089 * the page state that is going to change is the only thing
2090 * preventing the page itself from being freed. E.g. writeback
2091 * doesn't hold a page reference and relies on PG_writeback to
2092 * keep off truncation, migration and so forth.
2093 */
2094 rcu_read_lock();
2095
2096 if (mem_cgroup_disabled())
2097 return NULL;
2098again:
2099 memcg = head->mem_cgroup;
2100 if (unlikely(!memcg))
2101 return NULL;
2102
2103 if (atomic_read(&memcg->moving_account) <= 0)
2104 return memcg;
2105
2106 spin_lock_irqsave(&memcg->move_lock, flags);
2107 if (memcg != head->mem_cgroup) {
2108 spin_unlock_irqrestore(&memcg->move_lock, flags);
2109 goto again;
2110 }
2111
2112 /*
2113 * When charge migration first begins, we can have locked and
2114 * unlocked page stat updates happening concurrently. Track
2115 * the task who has the lock for unlock_page_memcg().
2116 */
2117 memcg->move_lock_task = current;
2118 memcg->move_lock_flags = flags;
2119
2120 return memcg;
2121}
2122EXPORT_SYMBOL(lock_page_memcg);
2123
2124/**
2125 * __unlock_page_memcg - unlock and unpin a memcg
2126 * @memcg: the memcg
2127 *
2128 * Unlock and unpin a memcg returned by lock_page_memcg().
2129 */
2130void __unlock_page_memcg(struct mem_cgroup *memcg)
2131{
2132 if (memcg && memcg->move_lock_task == current) {
2133 unsigned long flags = memcg->move_lock_flags;
2134
2135 memcg->move_lock_task = NULL;
2136 memcg->move_lock_flags = 0;
2137
2138 spin_unlock_irqrestore(&memcg->move_lock, flags);
2139 }
2140
2141 rcu_read_unlock();
2142}
2143
2144/**
2145 * unlock_page_memcg - unlock a page->mem_cgroup binding
2146 * @page: the page
2147 */
2148void unlock_page_memcg(struct page *page)
2149{
2150 struct page *head = compound_head(page);
2151
2152 __unlock_page_memcg(head->mem_cgroup);
2153}
2154EXPORT_SYMBOL(unlock_page_memcg);
2155
2156struct memcg_stock_pcp {
2157 struct mem_cgroup *cached; /* this never be root cgroup */
2158 unsigned int nr_pages;
2159
2160#ifdef CONFIG_MEMCG_KMEM
2161 struct obj_cgroup *cached_objcg;
2162 unsigned int nr_bytes;
2163#endif
2164
2165 struct work_struct work;
2166 unsigned long flags;
2167#define FLUSHING_CACHED_CHARGE 0
2168};
2169static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
2170static DEFINE_MUTEX(percpu_charge_mutex);
2171
2172#ifdef CONFIG_MEMCG_KMEM
2173static void drain_obj_stock(struct memcg_stock_pcp *stock);
2174static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
2175 struct mem_cgroup *root_memcg);
2176
2177#else
2178static inline void drain_obj_stock(struct memcg_stock_pcp *stock)
2179{
2180}
2181static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
2182 struct mem_cgroup *root_memcg)
2183{
2184 return false;
2185}
2186#endif
2187
2188/**
2189 * consume_stock: Try to consume stocked charge on this cpu.
2190 * @memcg: memcg to consume from.
2191 * @nr_pages: how many pages to charge.
2192 *
2193 * The charges will only happen if @memcg matches the current cpu's memcg
2194 * stock, and at least @nr_pages are available in that stock. Failure to
2195 * service an allocation will refill the stock.
2196 *
2197 * returns true if successful, false otherwise.
2198 */
2199static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2200{
2201 struct memcg_stock_pcp *stock;
2202 unsigned long flags;
2203 bool ret = false;
2204
2205 if (nr_pages > MEMCG_CHARGE_BATCH)
2206 return ret;
2207
2208 local_irq_save(flags);
2209
2210 stock = this_cpu_ptr(&memcg_stock);
2211 if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
2212 stock->nr_pages -= nr_pages;
2213 ret = true;
2214 }
2215
2216 local_irq_restore(flags);
2217
2218 return ret;
2219}
2220
2221/*
2222 * Returns stocks cached in percpu and reset cached information.
2223 */
2224static void drain_stock(struct memcg_stock_pcp *stock)
2225{
2226 struct mem_cgroup *old = stock->cached;
2227
2228 if (!old)
2229 return;
2230
2231 if (stock->nr_pages) {
2232 page_counter_uncharge(&old->memory, stock->nr_pages);
2233 if (do_memsw_account())
2234 page_counter_uncharge(&old->memsw, stock->nr_pages);
2235 stock->nr_pages = 0;
2236 }
2237
2238 css_put(&old->css);
2239 stock->cached = NULL;
2240}
2241
2242static void drain_local_stock(struct work_struct *dummy)
2243{
2244 struct memcg_stock_pcp *stock;
2245 unsigned long flags;
2246
2247 /*
2248 * The only protection from memory hotplug vs. drain_stock races is
2249 * that we always operate on local CPU stock here with IRQ disabled
2250 */
2251 local_irq_save(flags);
2252
2253 stock = this_cpu_ptr(&memcg_stock);
2254 drain_obj_stock(stock);
2255 drain_stock(stock);
2256 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
2257
2258 local_irq_restore(flags);
2259}
2260
2261/*
2262 * Cache charges(val) to local per_cpu area.
2263 * This will be consumed by consume_stock() function, later.
2264 */
2265static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2266{
2267 struct memcg_stock_pcp *stock;
2268 unsigned long flags;
2269
2270 local_irq_save(flags);
2271
2272 stock = this_cpu_ptr(&memcg_stock);
2273 if (stock->cached != memcg) { /* reset if necessary */
2274 drain_stock(stock);
2275 css_get(&memcg->css);
2276 stock->cached = memcg;
2277 }
2278 stock->nr_pages += nr_pages;
2279
2280 if (stock->nr_pages > MEMCG_CHARGE_BATCH)
2281 drain_stock(stock);
2282
2283 local_irq_restore(flags);
2284}
2285
2286/*
2287 * Drains all per-CPU charge caches for given root_memcg resp. subtree
2288 * of the hierarchy under it.
2289 */
2290static void drain_all_stock(struct mem_cgroup *root_memcg)
2291{
2292 int cpu, curcpu;
2293
2294 /* If someone's already draining, avoid adding running more workers. */
2295 if (!mutex_trylock(&percpu_charge_mutex))
2296 return;
2297 /*
2298 * Notify other cpus that system-wide "drain" is running
2299 * We do not care about races with the cpu hotplug because cpu down
2300 * as well as workers from this path always operate on the local
2301 * per-cpu data. CPU up doesn't touch memcg_stock at all.
2302 */
2303 curcpu = get_cpu();
2304 for_each_online_cpu(cpu) {
2305 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
2306 struct mem_cgroup *memcg;
2307 bool flush = false;
2308
2309 rcu_read_lock();
2310 memcg = stock->cached;
2311 if (memcg && stock->nr_pages &&
2312 mem_cgroup_is_descendant(memcg, root_memcg))
2313 flush = true;
2314 if (obj_stock_flush_required(stock, root_memcg))
2315 flush = true;
2316 rcu_read_unlock();
2317
2318 if (flush &&
2319 !test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
2320 if (cpu == curcpu)
2321 drain_local_stock(&stock->work);
2322 else
2323 schedule_work_on(cpu, &stock->work);
2324 }
2325 }
2326 put_cpu();
2327 mutex_unlock(&percpu_charge_mutex);
2328}
2329
2330static int memcg_hotplug_cpu_dead(unsigned int cpu)
2331{
2332 struct memcg_stock_pcp *stock;
2333 struct mem_cgroup *memcg, *mi;
2334
2335 stock = &per_cpu(memcg_stock, cpu);
2336 drain_stock(stock);
2337
2338 for_each_mem_cgroup(memcg) {
2339 int i;
2340
2341 for (i = 0; i < MEMCG_NR_STAT; i++) {
2342 int nid;
2343 long x;
2344
2345 x = this_cpu_xchg(memcg->vmstats_percpu->stat[i], 0);
2346 if (x)
2347 for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
2348 atomic_long_add(x, &memcg->vmstats[i]);
2349
2350 if (i >= NR_VM_NODE_STAT_ITEMS)
2351 continue;
2352
2353 for_each_node(nid) {
2354 struct mem_cgroup_per_node *pn;
2355
2356 pn = mem_cgroup_nodeinfo(memcg, nid);
2357 x = this_cpu_xchg(pn->lruvec_stat_cpu->count[i], 0);
2358 if (x)
2359 do {
2360 atomic_long_add(x, &pn->lruvec_stat[i]);
2361 } while ((pn = parent_nodeinfo(pn, nid)));
2362 }
2363 }
2364
2365 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
2366 long x;
2367
2368 x = this_cpu_xchg(memcg->vmstats_percpu->events[i], 0);
2369 if (x)
2370 for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
2371 atomic_long_add(x, &memcg->vmevents[i]);
2372 }
2373 }
2374
2375 return 0;
2376}
2377
2378static unsigned long reclaim_high(struct mem_cgroup *memcg,
2379 unsigned int nr_pages,
2380 gfp_t gfp_mask)
2381{
2382 unsigned long nr_reclaimed = 0;
2383
2384 do {
2385 unsigned long pflags;
2386
2387 if (page_counter_read(&memcg->memory) <=
2388 READ_ONCE(memcg->memory.high))
2389 continue;
2390
2391 memcg_memory_event(memcg, MEMCG_HIGH);
2392
2393 psi_memstall_enter(&pflags);
2394 nr_reclaimed += try_to_free_mem_cgroup_pages(memcg, nr_pages,
2395 gfp_mask, true);
2396 psi_memstall_leave(&pflags);
2397 } while ((memcg = parent_mem_cgroup(memcg)) &&
2398 !mem_cgroup_is_root(memcg));
2399
2400 return nr_reclaimed;
2401}
2402
2403static void high_work_func(struct work_struct *work)
2404{
2405 struct mem_cgroup *memcg;
2406
2407 memcg = container_of(work, struct mem_cgroup, high_work);
2408 reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL);
2409}
2410
2411/*
2412 * Clamp the maximum sleep time per allocation batch to 2 seconds. This is
2413 * enough to still cause a significant slowdown in most cases, while still
2414 * allowing diagnostics and tracing to proceed without becoming stuck.
2415 */
2416#define MEMCG_MAX_HIGH_DELAY_JIFFIES (2UL*HZ)
2417
2418/*
2419 * When calculating the delay, we use these either side of the exponentiation to
2420 * maintain precision and scale to a reasonable number of jiffies (see the table
2421 * below.
2422 *
2423 * - MEMCG_DELAY_PRECISION_SHIFT: Extra precision bits while translating the
2424 * overage ratio to a delay.
2425 * - MEMCG_DELAY_SCALING_SHIFT: The number of bits to scale down the
2426 * proposed penalty in order to reduce to a reasonable number of jiffies, and
2427 * to produce a reasonable delay curve.
2428 *
2429 * MEMCG_DELAY_SCALING_SHIFT just happens to be a number that produces a
2430 * reasonable delay curve compared to precision-adjusted overage, not
2431 * penalising heavily at first, but still making sure that growth beyond the
2432 * limit penalises misbehaviour cgroups by slowing them down exponentially. For
2433 * example, with a high of 100 megabytes:
2434 *
2435 * +-------+------------------------+
2436 * | usage | time to allocate in ms |
2437 * +-------+------------------------+
2438 * | 100M | 0 |
2439 * | 101M | 6 |
2440 * | 102M | 25 |
2441 * | 103M | 57 |
2442 * | 104M | 102 |
2443 * | 105M | 159 |
2444 * | 106M | 230 |
2445 * | 107M | 313 |
2446 * | 108M | 409 |
2447 * | 109M | 518 |
2448 * | 110M | 639 |
2449 * | 111M | 774 |
2450 * | 112M | 921 |
2451 * | 113M | 1081 |
2452 * | 114M | 1254 |
2453 * | 115M | 1439 |
2454 * | 116M | 1638 |
2455 * | 117M | 1849 |
2456 * | 118M | 2000 |
2457 * | 119M | 2000 |
2458 * | 120M | 2000 |
2459 * +-------+------------------------+
2460 */
2461 #define MEMCG_DELAY_PRECISION_SHIFT 20
2462 #define MEMCG_DELAY_SCALING_SHIFT 14
2463
2464static u64 calculate_overage(unsigned long usage, unsigned long high)
2465{
2466 u64 overage;
2467
2468 if (usage <= high)
2469 return 0;
2470
2471 /*
2472 * Prevent division by 0 in overage calculation by acting as if
2473 * it was a threshold of 1 page
2474 */
2475 high = max(high, 1UL);
2476
2477 overage = usage - high;
2478 overage <<= MEMCG_DELAY_PRECISION_SHIFT;
2479 return div64_u64(overage, high);
2480}
2481
2482static u64 mem_find_max_overage(struct mem_cgroup *memcg)
2483{
2484 u64 overage, max_overage = 0;
2485
2486 do {
2487 overage = calculate_overage(page_counter_read(&memcg->memory),
2488 READ_ONCE(memcg->memory.high));
2489 max_overage = max(overage, max_overage);
2490 } while ((memcg = parent_mem_cgroup(memcg)) &&
2491 !mem_cgroup_is_root(memcg));
2492
2493 return max_overage;
2494}
2495
2496static u64 swap_find_max_overage(struct mem_cgroup *memcg)
2497{
2498 u64 overage, max_overage = 0;
2499
2500 do {
2501 overage = calculate_overage(page_counter_read(&memcg->swap),
2502 READ_ONCE(memcg->swap.high));
2503 if (overage)
2504 memcg_memory_event(memcg, MEMCG_SWAP_HIGH);
2505 max_overage = max(overage, max_overage);
2506 } while ((memcg = parent_mem_cgroup(memcg)) &&
2507 !mem_cgroup_is_root(memcg));
2508
2509 return max_overage;
2510}
2511
2512/*
2513 * Get the number of jiffies that we should penalise a mischievous cgroup which
2514 * is exceeding its memory.high by checking both it and its ancestors.
2515 */
2516static unsigned long calculate_high_delay(struct mem_cgroup *memcg,
2517 unsigned int nr_pages,
2518 u64 max_overage)
2519{
2520 unsigned long penalty_jiffies;
2521
2522 if (!max_overage)
2523 return 0;
2524
2525 /*
2526 * We use overage compared to memory.high to calculate the number of
2527 * jiffies to sleep (penalty_jiffies). Ideally this value should be
2528 * fairly lenient on small overages, and increasingly harsh when the
2529 * memcg in question makes it clear that it has no intention of stopping
2530 * its crazy behaviour, so we exponentially increase the delay based on
2531 * overage amount.
2532 */
2533 penalty_jiffies = max_overage * max_overage * HZ;
2534 penalty_jiffies >>= MEMCG_DELAY_PRECISION_SHIFT;
2535 penalty_jiffies >>= MEMCG_DELAY_SCALING_SHIFT;
2536
2537 /*
2538 * Factor in the task's own contribution to the overage, such that four
2539 * N-sized allocations are throttled approximately the same as one
2540 * 4N-sized allocation.
2541 *
2542 * MEMCG_CHARGE_BATCH pages is nominal, so work out how much smaller or
2543 * larger the current charge patch is than that.
2544 */
2545 return penalty_jiffies * nr_pages / MEMCG_CHARGE_BATCH;
2546}
2547
2548/*
2549 * Scheduled by try_charge() to be executed from the userland return path
2550 * and reclaims memory over the high limit.
2551 */
2552void mem_cgroup_handle_over_high(void)
2553{
2554 unsigned long penalty_jiffies;
2555 unsigned long pflags;
2556 unsigned long nr_reclaimed;
2557 unsigned int nr_pages = current->memcg_nr_pages_over_high;
2558 int nr_retries = MAX_RECLAIM_RETRIES;
2559 struct mem_cgroup *memcg;
2560 bool in_retry = false;
2561
2562 if (likely(!nr_pages))
2563 return;
2564
2565 memcg = get_mem_cgroup_from_mm(current->mm);
2566 current->memcg_nr_pages_over_high = 0;
2567
2568retry_reclaim:
2569 /*
2570 * The allocating task should reclaim at least the batch size, but for
2571 * subsequent retries we only want to do what's necessary to prevent oom
2572 * or breaching resource isolation.
2573 *
2574 * This is distinct from memory.max or page allocator behaviour because
2575 * memory.high is currently batched, whereas memory.max and the page
2576 * allocator run every time an allocation is made.
2577 */
2578 nr_reclaimed = reclaim_high(memcg,
2579 in_retry ? SWAP_CLUSTER_MAX : nr_pages,
2580 GFP_KERNEL);
2581
2582 /*
2583 * memory.high is breached and reclaim is unable to keep up. Throttle
2584 * allocators proactively to slow down excessive growth.
2585 */
2586 penalty_jiffies = calculate_high_delay(memcg, nr_pages,
2587 mem_find_max_overage(memcg));
2588
2589 penalty_jiffies += calculate_high_delay(memcg, nr_pages,
2590 swap_find_max_overage(memcg));
2591
2592 /*
2593 * Clamp the max delay per usermode return so as to still keep the
2594 * application moving forwards and also permit diagnostics, albeit
2595 * extremely slowly.
2596 */
2597 penalty_jiffies = min(penalty_jiffies, MEMCG_MAX_HIGH_DELAY_JIFFIES);
2598
2599 /*
2600 * Don't sleep if the amount of jiffies this memcg owes us is so low
2601 * that it's not even worth doing, in an attempt to be nice to those who
2602 * go only a small amount over their memory.high value and maybe haven't
2603 * been aggressively reclaimed enough yet.
2604 */
2605 if (penalty_jiffies <= HZ / 100)
2606 goto out;
2607
2608 /*
2609 * If reclaim is making forward progress but we're still over
2610 * memory.high, we want to encourage that rather than doing allocator
2611 * throttling.
2612 */
2613 if (nr_reclaimed || nr_retries--) {
2614 in_retry = true;
2615 goto retry_reclaim;
2616 }
2617
2618 /*
2619 * If we exit early, we're guaranteed to die (since
2620 * schedule_timeout_killable sets TASK_KILLABLE). This means we don't
2621 * need to account for any ill-begotten jiffies to pay them off later.
2622 */
2623 psi_memstall_enter(&pflags);
2624 schedule_timeout_killable(penalty_jiffies);
2625 psi_memstall_leave(&pflags);
2626
2627out:
2628 css_put(&memcg->css);
2629}
2630
2631static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
2632 unsigned int nr_pages)
2633{
2634 unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages);
2635 int nr_retries = MAX_RECLAIM_RETRIES;
2636 struct mem_cgroup *mem_over_limit;
2637 struct page_counter *counter;
2638 enum oom_status oom_status;
2639 unsigned long nr_reclaimed;
2640 bool may_swap = true;
2641 bool drained = false;
2642 unsigned long pflags;
2643
2644 if (mem_cgroup_is_root(memcg))
2645 return 0;
2646retry:
2647 if (consume_stock(memcg, nr_pages))
2648 return 0;
2649
2650 if (!do_memsw_account() ||
2651 page_counter_try_charge(&memcg->memsw, batch, &counter)) {
2652 if (page_counter_try_charge(&memcg->memory, batch, &counter))
2653 goto done_restock;
2654 if (do_memsw_account())
2655 page_counter_uncharge(&memcg->memsw, batch);
2656 mem_over_limit = mem_cgroup_from_counter(counter, memory);
2657 } else {
2658 mem_over_limit = mem_cgroup_from_counter(counter, memsw);
2659 may_swap = false;
2660 }
2661
2662 if (batch > nr_pages) {
2663 batch = nr_pages;
2664 goto retry;
2665 }
2666
2667 /*
2668 * Memcg doesn't have a dedicated reserve for atomic
2669 * allocations. But like the global atomic pool, we need to
2670 * put the burden of reclaim on regular allocation requests
2671 * and let these go through as privileged allocations.
2672 */
2673 if (gfp_mask & __GFP_ATOMIC)
2674 goto force;
2675
2676 /*
2677 * Unlike in global OOM situations, memcg is not in a physical
2678 * memory shortage. Allow dying and OOM-killed tasks to
2679 * bypass the last charges so that they can exit quickly and
2680 * free their memory.
2681 */
2682 if (unlikely(should_force_charge()))
2683 goto force;
2684
2685 /*
2686 * Prevent unbounded recursion when reclaim operations need to
2687 * allocate memory. This might exceed the limits temporarily,
2688 * but we prefer facilitating memory reclaim and getting back
2689 * under the limit over triggering OOM kills in these cases.
2690 */
2691 if (unlikely(current->flags & PF_MEMALLOC))
2692 goto force;
2693
2694 if (unlikely(task_in_memcg_oom(current)))
2695 goto nomem;
2696
2697 if (!gfpflags_allow_blocking(gfp_mask))
2698 goto nomem;
2699
2700 memcg_memory_event(mem_over_limit, MEMCG_MAX);
2701
2702 psi_memstall_enter(&pflags);
2703 nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
2704 gfp_mask, may_swap);
2705 psi_memstall_leave(&pflags);
2706
2707 if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
2708 goto retry;
2709
2710 if (!drained) {
2711 drain_all_stock(mem_over_limit);
2712 drained = true;
2713 goto retry;
2714 }
2715
2716 if (gfp_mask & __GFP_NORETRY)
2717 goto nomem;
2718 /*
2719 * Even though the limit is exceeded at this point, reclaim
2720 * may have been able to free some pages. Retry the charge
2721 * before killing the task.
2722 *
2723 * Only for regular pages, though: huge pages are rather
2724 * unlikely to succeed so close to the limit, and we fall back
2725 * to regular pages anyway in case of failure.
2726 */
2727 if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))
2728 goto retry;
2729 /*
2730 * At task move, charge accounts can be doubly counted. So, it's
2731 * better to wait until the end of task_move if something is going on.
2732 */
2733 if (mem_cgroup_wait_acct_move(mem_over_limit))
2734 goto retry;
2735
2736 if (nr_retries--)
2737 goto retry;
2738
2739 if (gfp_mask & __GFP_RETRY_MAYFAIL)
2740 goto nomem;
2741
2742 if (gfp_mask & __GFP_NOFAIL)
2743 goto force;
2744
2745 if (fatal_signal_pending(current))
2746 goto force;
2747
2748 /*
2749 * keep retrying as long as the memcg oom killer is able to make
2750 * a forward progress or bypass the charge if the oom killer
2751 * couldn't make any progress.
2752 */
2753 oom_status = mem_cgroup_oom(mem_over_limit, gfp_mask,
2754 get_order(nr_pages * PAGE_SIZE));
2755 switch (oom_status) {
2756 case OOM_SUCCESS:
2757 nr_retries = MAX_RECLAIM_RETRIES;
2758 goto retry;
2759 case OOM_FAILED:
2760 goto force;
2761 default:
2762 goto nomem;
2763 }
2764nomem:
2765 if (!(gfp_mask & __GFP_NOFAIL))
2766 return -ENOMEM;
2767force:
2768 /*
2769 * The allocation either can't fail or will lead to more memory
2770 * being freed very soon. Allow memory usage go over the limit
2771 * temporarily by force charging it.
2772 */
2773 page_counter_charge(&memcg->memory, nr_pages);
2774 if (do_memsw_account())
2775 page_counter_charge(&memcg->memsw, nr_pages);
2776
2777 return 0;
2778
2779done_restock:
2780 if (batch > nr_pages)
2781 refill_stock(memcg, batch - nr_pages);
2782
2783 /*
2784 * If the hierarchy is above the normal consumption range, schedule
2785 * reclaim on returning to userland. We can perform reclaim here
2786 * if __GFP_RECLAIM but let's always punt for simplicity and so that
2787 * GFP_KERNEL can consistently be used during reclaim. @memcg is
2788 * not recorded as it most likely matches current's and won't
2789 * change in the meantime. As high limit is checked again before
2790 * reclaim, the cost of mismatch is negligible.
2791 */
2792 do {
2793 bool mem_high, swap_high;
2794
2795 mem_high = page_counter_read(&memcg->memory) >
2796 READ_ONCE(memcg->memory.high);
2797 swap_high = page_counter_read(&memcg->swap) >
2798 READ_ONCE(memcg->swap.high);
2799
2800 /* Don't bother a random interrupted task */
2801 if (in_interrupt()) {
2802 if (mem_high) {
2803 schedule_work(&memcg->high_work);
2804 break;
2805 }
2806 continue;
2807 }
2808
2809 if (mem_high || swap_high) {
2810 /*
2811 * The allocating tasks in this cgroup will need to do
2812 * reclaim or be throttled to prevent further growth
2813 * of the memory or swap footprints.
2814 *
2815 * Target some best-effort fairness between the tasks,
2816 * and distribute reclaim work and delay penalties
2817 * based on how much each task is actually allocating.
2818 */
2819 current->memcg_nr_pages_over_high += batch;
2820 set_notify_resume(current);
2821 break;
2822 }
2823 } while ((memcg = parent_mem_cgroup(memcg)));
2824
2825 return 0;
2826}
2827
2828#if defined(CONFIG_MEMCG_KMEM) || defined(CONFIG_MMU)
2829static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
2830{
2831 if (mem_cgroup_is_root(memcg))
2832 return;
2833
2834 page_counter_uncharge(&memcg->memory, nr_pages);
2835 if (do_memsw_account())
2836 page_counter_uncharge(&memcg->memsw, nr_pages);
2837}
2838#endif
2839
2840static void commit_charge(struct page *page, struct mem_cgroup *memcg)
2841{
2842 VM_BUG_ON_PAGE(page->mem_cgroup, page);
2843 /*
2844 * Any of the following ensures page->mem_cgroup stability:
2845 *
2846 * - the page lock
2847 * - LRU isolation
2848 * - lock_page_memcg()
2849 * - exclusive reference
2850 */
2851 page->mem_cgroup = memcg;
2852}
2853
2854#ifdef CONFIG_MEMCG_KMEM
2855int memcg_alloc_page_obj_cgroups(struct page *page, struct kmem_cache *s,
2856 gfp_t gfp)
2857{
2858 unsigned int objects = objs_per_slab_page(s, page);
2859 void *vec;
2860
2861 vec = kcalloc_node(objects, sizeof(struct obj_cgroup *), gfp,
2862 page_to_nid(page));
2863 if (!vec)
2864 return -ENOMEM;
2865
2866 if (cmpxchg(&page->obj_cgroups, NULL,
2867 (struct obj_cgroup **) ((unsigned long)vec | 0x1UL)))
2868 kfree(vec);
2869 else
2870 kmemleak_not_leak(vec);
2871
2872 return 0;
2873}
2874
2875/*
2876 * Returns a pointer to the memory cgroup to which the kernel object is charged.
2877 *
2878 * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(),
2879 * cgroup_mutex, etc.
2880 */
2881struct mem_cgroup *mem_cgroup_from_obj(void *p)
2882{
2883 struct page *page;
2884
2885 if (mem_cgroup_disabled())
2886 return NULL;
2887
2888 page = virt_to_head_page(p);
2889
2890 /*
2891 * Slab objects are accounted individually, not per-page.
2892 * Memcg membership data for each individual object is saved in
2893 * the page->obj_cgroups.
2894 */
2895 if (page_has_obj_cgroups(page)) {
2896 struct obj_cgroup *objcg;
2897 unsigned int off;
2898
2899 off = obj_to_index(page->slab_cache, page, p);
2900 objcg = page_obj_cgroups(page)[off];
2901 if (objcg)
2902 return obj_cgroup_memcg(objcg);
2903
2904 return NULL;
2905 }
2906
2907 /* All other pages use page->mem_cgroup */
2908 return page->mem_cgroup;
2909}
2910
2911__always_inline struct obj_cgroup *get_obj_cgroup_from_current(void)
2912{
2913 struct obj_cgroup *objcg = NULL;
2914 struct mem_cgroup *memcg;
2915
2916 if (unlikely(!current->mm && !current->active_memcg))
2917 return NULL;
2918
2919 rcu_read_lock();
2920 if (unlikely(current->active_memcg))
2921 memcg = rcu_dereference(current->active_memcg);
2922 else
2923 memcg = mem_cgroup_from_task(current);
2924
2925 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) {
2926 objcg = rcu_dereference(memcg->objcg);
2927 if (objcg && obj_cgroup_tryget(objcg))
2928 break;
2929 }
2930 rcu_read_unlock();
2931
2932 return objcg;
2933}
2934
2935static int memcg_alloc_cache_id(void)
2936{
2937 int id, size;
2938 int err;
2939
2940 id = ida_simple_get(&memcg_cache_ida,
2941 0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL);
2942 if (id < 0)
2943 return id;
2944
2945 if (id < memcg_nr_cache_ids)
2946 return id;
2947
2948 /*
2949 * There's no space for the new id in memcg_caches arrays,
2950 * so we have to grow them.
2951 */
2952 down_write(&memcg_cache_ids_sem);
2953
2954 size = 2 * (id + 1);
2955 if (size < MEMCG_CACHES_MIN_SIZE)
2956 size = MEMCG_CACHES_MIN_SIZE;
2957 else if (size > MEMCG_CACHES_MAX_SIZE)
2958 size = MEMCG_CACHES_MAX_SIZE;
2959
2960 err = memcg_update_all_list_lrus(size);
2961 if (!err)
2962 memcg_nr_cache_ids = size;
2963
2964 up_write(&memcg_cache_ids_sem);
2965
2966 if (err) {
2967 ida_simple_remove(&memcg_cache_ida, id);
2968 return err;
2969 }
2970 return id;
2971}
2972
2973static void memcg_free_cache_id(int id)
2974{
2975 ida_simple_remove(&memcg_cache_ida, id);
2976}
2977
2978/**
2979 * __memcg_kmem_charge: charge a number of kernel pages to a memcg
2980 * @memcg: memory cgroup to charge
2981 * @gfp: reclaim mode
2982 * @nr_pages: number of pages to charge
2983 *
2984 * Returns 0 on success, an error code on failure.
2985 */
2986int __memcg_kmem_charge(struct mem_cgroup *memcg, gfp_t gfp,
2987 unsigned int nr_pages)
2988{
2989 struct page_counter *counter;
2990 int ret;
2991
2992 ret = try_charge(memcg, gfp, nr_pages);
2993 if (ret)
2994 return ret;
2995
2996 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) &&
2997 !page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) {
2998
2999 /*
3000 * Enforce __GFP_NOFAIL allocation because callers are not
3001 * prepared to see failures and likely do not have any failure
3002 * handling code.
3003 */
3004 if (gfp & __GFP_NOFAIL) {
3005 page_counter_charge(&memcg->kmem, nr_pages);
3006 return 0;
3007 }
3008 cancel_charge(memcg, nr_pages);
3009 return -ENOMEM;
3010 }
3011 return 0;
3012}
3013
3014/**
3015 * __memcg_kmem_uncharge: uncharge a number of kernel pages from a memcg
3016 * @memcg: memcg to uncharge
3017 * @nr_pages: number of pages to uncharge
3018 */
3019void __memcg_kmem_uncharge(struct mem_cgroup *memcg, unsigned int nr_pages)
3020{
3021 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
3022 page_counter_uncharge(&memcg->kmem, nr_pages);
3023
3024 page_counter_uncharge(&memcg->memory, nr_pages);
3025 if (do_memsw_account())
3026 page_counter_uncharge(&memcg->memsw, nr_pages);
3027}
3028
3029/**
3030 * __memcg_kmem_charge_page: charge a kmem page to the current memory cgroup
3031 * @page: page to charge
3032 * @gfp: reclaim mode
3033 * @order: allocation order
3034 *
3035 * Returns 0 on success, an error code on failure.
3036 */
3037int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order)
3038{
3039 struct mem_cgroup *memcg;
3040 int ret = 0;
3041
3042 if (memcg_kmem_bypass())
3043 return 0;
3044
3045 memcg = get_mem_cgroup_from_current();
3046 if (!mem_cgroup_is_root(memcg)) {
3047 ret = __memcg_kmem_charge(memcg, gfp, 1 << order);
3048 if (!ret) {
3049 page->mem_cgroup = memcg;
3050 __SetPageKmemcg(page);
3051 return 0;
3052 }
3053 }
3054 css_put(&memcg->css);
3055 return ret;
3056}
3057
3058/**
3059 * __memcg_kmem_uncharge_page: uncharge a kmem page
3060 * @page: page to uncharge
3061 * @order: allocation order
3062 */
3063void __memcg_kmem_uncharge_page(struct page *page, int order)
3064{
3065 struct mem_cgroup *memcg = page->mem_cgroup;
3066 unsigned int nr_pages = 1 << order;
3067
3068 if (!memcg)
3069 return;
3070
3071 VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page);
3072 __memcg_kmem_uncharge(memcg, nr_pages);
3073 page->mem_cgroup = NULL;
3074 css_put(&memcg->css);
3075
3076 /* slab pages do not have PageKmemcg flag set */
3077 if (PageKmemcg(page))
3078 __ClearPageKmemcg(page);
3079}
3080
3081static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
3082{
3083 struct memcg_stock_pcp *stock;
3084 unsigned long flags;
3085 bool ret = false;
3086
3087 local_irq_save(flags);
3088
3089 stock = this_cpu_ptr(&memcg_stock);
3090 if (objcg == stock->cached_objcg && stock->nr_bytes >= nr_bytes) {
3091 stock->nr_bytes -= nr_bytes;
3092 ret = true;
3093 }
3094
3095 local_irq_restore(flags);
3096
3097 return ret;
3098}
3099
3100static void drain_obj_stock(struct memcg_stock_pcp *stock)
3101{
3102 struct obj_cgroup *old = stock->cached_objcg;
3103
3104 if (!old)
3105 return;
3106
3107 if (stock->nr_bytes) {
3108 unsigned int nr_pages = stock->nr_bytes >> PAGE_SHIFT;
3109 unsigned int nr_bytes = stock->nr_bytes & (PAGE_SIZE - 1);
3110
3111 if (nr_pages) {
3112 rcu_read_lock();
3113 __memcg_kmem_uncharge(obj_cgroup_memcg(old), nr_pages);
3114 rcu_read_unlock();
3115 }
3116
3117 /*
3118 * The leftover is flushed to the centralized per-memcg value.
3119 * On the next attempt to refill obj stock it will be moved
3120 * to a per-cpu stock (probably, on an other CPU), see
3121 * refill_obj_stock().
3122 *
3123 * How often it's flushed is a trade-off between the memory
3124 * limit enforcement accuracy and potential CPU contention,
3125 * so it might be changed in the future.
3126 */
3127 atomic_add(nr_bytes, &old->nr_charged_bytes);
3128 stock->nr_bytes = 0;
3129 }
3130
3131 obj_cgroup_put(old);
3132 stock->cached_objcg = NULL;
3133}
3134
3135static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
3136 struct mem_cgroup *root_memcg)
3137{
3138 struct mem_cgroup *memcg;
3139
3140 if (stock->cached_objcg) {
3141 memcg = obj_cgroup_memcg(stock->cached_objcg);
3142 if (memcg && mem_cgroup_is_descendant(memcg, root_memcg))
3143 return true;
3144 }
3145
3146 return false;
3147}
3148
3149static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
3150{
3151 struct memcg_stock_pcp *stock;
3152 unsigned long flags;
3153
3154 local_irq_save(flags);
3155
3156 stock = this_cpu_ptr(&memcg_stock);
3157 if (stock->cached_objcg != objcg) { /* reset if necessary */
3158 drain_obj_stock(stock);
3159 obj_cgroup_get(objcg);
3160 stock->cached_objcg = objcg;
3161 stock->nr_bytes = atomic_xchg(&objcg->nr_charged_bytes, 0);
3162 }
3163 stock->nr_bytes += nr_bytes;
3164
3165 if (stock->nr_bytes > PAGE_SIZE)
3166 drain_obj_stock(stock);
3167
3168 local_irq_restore(flags);
3169}
3170
3171int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size)
3172{
3173 struct mem_cgroup *memcg;
3174 unsigned int nr_pages, nr_bytes;
3175 int ret;
3176
3177 if (consume_obj_stock(objcg, size))
3178 return 0;
3179
3180 /*
3181 * In theory, memcg->nr_charged_bytes can have enough
3182 * pre-charged bytes to satisfy the allocation. However,
3183 * flushing memcg->nr_charged_bytes requires two atomic
3184 * operations, and memcg->nr_charged_bytes can't be big,
3185 * so it's better to ignore it and try grab some new pages.
3186 * memcg->nr_charged_bytes will be flushed in
3187 * refill_obj_stock(), called from this function or
3188 * independently later.
3189 */
3190 rcu_read_lock();
3191 memcg = obj_cgroup_memcg(objcg);
3192 css_get(&memcg->css);
3193 rcu_read_unlock();
3194
3195 nr_pages = size >> PAGE_SHIFT;
3196 nr_bytes = size & (PAGE_SIZE - 1);
3197
3198 if (nr_bytes)
3199 nr_pages += 1;
3200
3201 ret = __memcg_kmem_charge(memcg, gfp, nr_pages);
3202 if (!ret && nr_bytes)
3203 refill_obj_stock(objcg, PAGE_SIZE - nr_bytes);
3204
3205 css_put(&memcg->css);
3206 return ret;
3207}
3208
3209void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size)
3210{
3211 refill_obj_stock(objcg, size);
3212}
3213
3214#endif /* CONFIG_MEMCG_KMEM */
3215
3216#ifdef CONFIG_TRANSPARENT_HUGEPAGE
3217
3218/*
3219 * Because tail pages are not marked as "used", set it. We're under
3220 * pgdat->lru_lock and migration entries setup in all page mappings.
3221 */
3222void mem_cgroup_split_huge_fixup(struct page *head)
3223{
3224 struct mem_cgroup *memcg = head->mem_cgroup;
3225 int i;
3226
3227 if (mem_cgroup_disabled())
3228 return;
3229
3230 for (i = 1; i < HPAGE_PMD_NR; i++) {
3231 css_get(&memcg->css);
3232 head[i].mem_cgroup = memcg;
3233 }
3234}
3235#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
3236
3237#ifdef CONFIG_MEMCG_SWAP
3238/**
3239 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
3240 * @entry: swap entry to be moved
3241 * @from: mem_cgroup which the entry is moved from
3242 * @to: mem_cgroup which the entry is moved to
3243 *
3244 * It succeeds only when the swap_cgroup's record for this entry is the same
3245 * as the mem_cgroup's id of @from.
3246 *
3247 * Returns 0 on success, -EINVAL on failure.
3248 *
3249 * The caller must have charged to @to, IOW, called page_counter_charge() about
3250 * both res and memsw, and called css_get().
3251 */
3252static int mem_cgroup_move_swap_account(swp_entry_t entry,
3253 struct mem_cgroup *from, struct mem_cgroup *to)
3254{
3255 unsigned short old_id, new_id;
3256
3257 old_id = mem_cgroup_id(from);
3258 new_id = mem_cgroup_id(to);
3259
3260 if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
3261 mod_memcg_state(from, MEMCG_SWAP, -1);
3262 mod_memcg_state(to, MEMCG_SWAP, 1);
3263 return 0;
3264 }
3265 return -EINVAL;
3266}
3267#else
3268static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
3269 struct mem_cgroup *from, struct mem_cgroup *to)
3270{
3271 return -EINVAL;
3272}
3273#endif
3274
3275static DEFINE_MUTEX(memcg_max_mutex);
3276
3277static int mem_cgroup_resize_max(struct mem_cgroup *memcg,
3278 unsigned long max, bool memsw)
3279{
3280 bool enlarge = false;
3281 bool drained = false;
3282 int ret;
3283 bool limits_invariant;
3284 struct page_counter *counter = memsw ? &memcg->memsw : &memcg->memory;
3285
3286 do {
3287 if (signal_pending(current)) {
3288 ret = -EINTR;
3289 break;
3290 }
3291
3292 mutex_lock(&memcg_max_mutex);
3293 /*
3294 * Make sure that the new limit (memsw or memory limit) doesn't
3295 * break our basic invariant rule memory.max <= memsw.max.
3296 */
3297 limits_invariant = memsw ? max >= READ_ONCE(memcg->memory.max) :
3298 max <= memcg->memsw.max;
3299 if (!limits_invariant) {
3300 mutex_unlock(&memcg_max_mutex);
3301 ret = -EINVAL;
3302 break;
3303 }
3304 if (max > counter->max)
3305 enlarge = true;
3306 ret = page_counter_set_max(counter, max);
3307 mutex_unlock(&memcg_max_mutex);
3308
3309 if (!ret)
3310 break;
3311
3312 if (!drained) {
3313 drain_all_stock(memcg);
3314 drained = true;
3315 continue;
3316 }
3317
3318 if (!try_to_free_mem_cgroup_pages(memcg, 1,
3319 GFP_KERNEL, !memsw)) {
3320 ret = -EBUSY;
3321 break;
3322 }
3323 } while (true);
3324
3325 if (!ret && enlarge)
3326 memcg_oom_recover(memcg);
3327
3328 return ret;
3329}
3330
3331unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
3332 gfp_t gfp_mask,
3333 unsigned long *total_scanned)
3334{
3335 unsigned long nr_reclaimed = 0;
3336 struct mem_cgroup_per_node *mz, *next_mz = NULL;
3337 unsigned long reclaimed;
3338 int loop = 0;
3339 struct mem_cgroup_tree_per_node *mctz;
3340 unsigned long excess;
3341 unsigned long nr_scanned;
3342
3343 if (order > 0)
3344 return 0;
3345
3346 mctz = soft_limit_tree_node(pgdat->node_id);
3347
3348 /*
3349 * Do not even bother to check the largest node if the root
3350 * is empty. Do it lockless to prevent lock bouncing. Races
3351 * are acceptable as soft limit is best effort anyway.
3352 */
3353 if (!mctz || RB_EMPTY_ROOT(&mctz->rb_root))
3354 return 0;
3355
3356 /*
3357 * This loop can run a while, specially if mem_cgroup's continuously
3358 * keep exceeding their soft limit and putting the system under
3359 * pressure
3360 */
3361 do {
3362 if (next_mz)
3363 mz = next_mz;
3364 else
3365 mz = mem_cgroup_largest_soft_limit_node(mctz);
3366 if (!mz)
3367 break;
3368
3369 nr_scanned = 0;
3370 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat,
3371 gfp_mask, &nr_scanned);
3372 nr_reclaimed += reclaimed;
3373 *total_scanned += nr_scanned;
3374 spin_lock_irq(&mctz->lock);
3375 __mem_cgroup_remove_exceeded(mz, mctz);
3376
3377 /*
3378 * If we failed to reclaim anything from this memory cgroup
3379 * it is time to move on to the next cgroup
3380 */
3381 next_mz = NULL;
3382 if (!reclaimed)
3383 next_mz = __mem_cgroup_largest_soft_limit_node(mctz);
3384
3385 excess = soft_limit_excess(mz->memcg);
3386 /*
3387 * One school of thought says that we should not add
3388 * back the node to the tree if reclaim returns 0.
3389 * But our reclaim could return 0, simply because due
3390 * to priority we are exposing a smaller subset of
3391 * memory to reclaim from. Consider this as a longer
3392 * term TODO.
3393 */
3394 /* If excess == 0, no tree ops */
3395 __mem_cgroup_insert_exceeded(mz, mctz, excess);
3396 spin_unlock_irq(&mctz->lock);
3397 css_put(&mz->memcg->css);
3398 loop++;
3399 /*
3400 * Could not reclaim anything and there are no more
3401 * mem cgroups to try or we seem to be looping without
3402 * reclaiming anything.
3403 */
3404 if (!nr_reclaimed &&
3405 (next_mz == NULL ||
3406 loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
3407 break;
3408 } while (!nr_reclaimed);
3409 if (next_mz)
3410 css_put(&next_mz->memcg->css);
3411 return nr_reclaimed;
3412}
3413
3414/*
3415 * Test whether @memcg has children, dead or alive. Note that this
3416 * function doesn't care whether @memcg has use_hierarchy enabled and
3417 * returns %true if there are child csses according to the cgroup
3418 * hierarchy. Testing use_hierarchy is the caller's responsibility.
3419 */
3420static inline bool memcg_has_children(struct mem_cgroup *memcg)
3421{
3422 bool ret;
3423
3424 rcu_read_lock();
3425 ret = css_next_child(NULL, &memcg->css);
3426 rcu_read_unlock();
3427 return ret;
3428}
3429
3430/*
3431 * Reclaims as many pages from the given memcg as possible.
3432 *
3433 * Caller is responsible for holding css reference for memcg.
3434 */
3435static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
3436{
3437 int nr_retries = MAX_RECLAIM_RETRIES;
3438
3439 /* we call try-to-free pages for make this cgroup empty */
3440 lru_add_drain_all();
3441
3442 drain_all_stock(memcg);
3443
3444 /* try to free all pages in this cgroup */
3445 while (nr_retries && page_counter_read(&memcg->memory)) {
3446 int progress;
3447
3448 if (signal_pending(current))
3449 return -EINTR;
3450
3451 progress = try_to_free_mem_cgroup_pages(memcg, 1,
3452 GFP_KERNEL, true);
3453 if (!progress) {
3454 nr_retries--;
3455 /* maybe some writeback is necessary */
3456 congestion_wait(BLK_RW_ASYNC, HZ/10);
3457 }
3458
3459 }
3460
3461 return 0;
3462}
3463
3464static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of,
3465 char *buf, size_t nbytes,
3466 loff_t off)
3467{
3468 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3469
3470 if (mem_cgroup_is_root(memcg))
3471 return -EINVAL;
3472 return mem_cgroup_force_empty(memcg) ?: nbytes;
3473}
3474
3475static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css,
3476 struct cftype *cft)
3477{
3478 return mem_cgroup_from_css(css)->use_hierarchy;
3479}
3480
3481static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
3482 struct cftype *cft, u64 val)
3483{
3484 int retval = 0;
3485 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3486 struct mem_cgroup *parent_memcg = mem_cgroup_from_css(memcg->css.parent);
3487
3488 if (memcg->use_hierarchy == val)
3489 return 0;
3490
3491 /*
3492 * If parent's use_hierarchy is set, we can't make any modifications
3493 * in the child subtrees. If it is unset, then the change can
3494 * occur, provided the current cgroup has no children.
3495 *
3496 * For the root cgroup, parent_mem is NULL, we allow value to be
3497 * set if there are no children.
3498 */
3499 if ((!parent_memcg || !parent_memcg->use_hierarchy) &&
3500 (val == 1 || val == 0)) {
3501 if (!memcg_has_children(memcg))
3502 memcg->use_hierarchy = val;
3503 else
3504 retval = -EBUSY;
3505 } else
3506 retval = -EINVAL;
3507
3508 return retval;
3509}
3510
3511static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
3512{
3513 unsigned long val;
3514
3515 if (mem_cgroup_is_root(memcg)) {
3516 val = memcg_page_state(memcg, NR_FILE_PAGES) +
3517 memcg_page_state(memcg, NR_ANON_MAPPED);
3518 if (swap)
3519 val += memcg_page_state(memcg, MEMCG_SWAP);
3520 } else {
3521 if (!swap)
3522 val = page_counter_read(&memcg->memory);
3523 else
3524 val = page_counter_read(&memcg->memsw);
3525 }
3526 return val;
3527}
3528
3529enum {
3530 RES_USAGE,
3531 RES_LIMIT,
3532 RES_MAX_USAGE,
3533 RES_FAILCNT,
3534 RES_SOFT_LIMIT,
3535};
3536
3537static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
3538 struct cftype *cft)
3539{
3540 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3541 struct page_counter *counter;
3542
3543 switch (MEMFILE_TYPE(cft->private)) {
3544 case _MEM:
3545 counter = &memcg->memory;
3546 break;
3547 case _MEMSWAP:
3548 counter = &memcg->memsw;
3549 break;
3550 case _KMEM:
3551 counter = &memcg->kmem;
3552 break;
3553 case _TCP:
3554 counter = &memcg->tcpmem;
3555 break;
3556 default:
3557 BUG();
3558 }
3559
3560 switch (MEMFILE_ATTR(cft->private)) {
3561 case RES_USAGE:
3562 if (counter == &memcg->memory)
3563 return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE;
3564 if (counter == &memcg->memsw)
3565 return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE;
3566 return (u64)page_counter_read(counter) * PAGE_SIZE;
3567 case RES_LIMIT:
3568 return (u64)counter->max * PAGE_SIZE;
3569 case RES_MAX_USAGE:
3570 return (u64)counter->watermark * PAGE_SIZE;
3571 case RES_FAILCNT:
3572 return counter->failcnt;
3573 case RES_SOFT_LIMIT:
3574 return (u64)memcg->soft_limit * PAGE_SIZE;
3575 default:
3576 BUG();
3577 }
3578}
3579
3580static void memcg_flush_percpu_vmstats(struct mem_cgroup *memcg)
3581{
3582 unsigned long stat[MEMCG_NR_STAT] = {0};
3583 struct mem_cgroup *mi;
3584 int node, cpu, i;
3585
3586 for_each_online_cpu(cpu)
3587 for (i = 0; i < MEMCG_NR_STAT; i++)
3588 stat[i] += per_cpu(memcg->vmstats_percpu->stat[i], cpu);
3589
3590 for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
3591 for (i = 0; i < MEMCG_NR_STAT; i++)
3592 atomic_long_add(stat[i], &mi->vmstats[i]);
3593
3594 for_each_node(node) {
3595 struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
3596 struct mem_cgroup_per_node *pi;
3597
3598 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
3599 stat[i] = 0;
3600
3601 for_each_online_cpu(cpu)
3602 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
3603 stat[i] += per_cpu(
3604 pn->lruvec_stat_cpu->count[i], cpu);
3605
3606 for (pi = pn; pi; pi = parent_nodeinfo(pi, node))
3607 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
3608 atomic_long_add(stat[i], &pi->lruvec_stat[i]);
3609 }
3610}
3611
3612static void memcg_flush_percpu_vmevents(struct mem_cgroup *memcg)
3613{
3614 unsigned long events[NR_VM_EVENT_ITEMS];
3615 struct mem_cgroup *mi;
3616 int cpu, i;
3617
3618 for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
3619 events[i] = 0;
3620
3621 for_each_online_cpu(cpu)
3622 for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
3623 events[i] += per_cpu(memcg->vmstats_percpu->events[i],
3624 cpu);
3625
3626 for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
3627 for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
3628 atomic_long_add(events[i], &mi->vmevents[i]);
3629}
3630
3631#ifdef CONFIG_MEMCG_KMEM
3632static int memcg_online_kmem(struct mem_cgroup *memcg)
3633{
3634 struct obj_cgroup *objcg;
3635 int memcg_id;
3636
3637 if (cgroup_memory_nokmem)
3638 return 0;
3639
3640 BUG_ON(memcg->kmemcg_id >= 0);
3641 BUG_ON(memcg->kmem_state);
3642
3643 memcg_id = memcg_alloc_cache_id();
3644 if (memcg_id < 0)
3645 return memcg_id;
3646
3647 objcg = obj_cgroup_alloc();
3648 if (!objcg) {
3649 memcg_free_cache_id(memcg_id);
3650 return -ENOMEM;
3651 }
3652 objcg->memcg = memcg;
3653 rcu_assign_pointer(memcg->objcg, objcg);
3654
3655 static_branch_enable(&memcg_kmem_enabled_key);
3656
3657 /*
3658 * A memory cgroup is considered kmem-online as soon as it gets
3659 * kmemcg_id. Setting the id after enabling static branching will
3660 * guarantee no one starts accounting before all call sites are
3661 * patched.
3662 */
3663 memcg->kmemcg_id = memcg_id;
3664 memcg->kmem_state = KMEM_ONLINE;
3665
3666 return 0;
3667}
3668
3669static void memcg_offline_kmem(struct mem_cgroup *memcg)
3670{
3671 struct cgroup_subsys_state *css;
3672 struct mem_cgroup *parent, *child;
3673 int kmemcg_id;
3674
3675 if (memcg->kmem_state != KMEM_ONLINE)
3676 return;
3677
3678 memcg->kmem_state = KMEM_ALLOCATED;
3679
3680 parent = parent_mem_cgroup(memcg);
3681 if (!parent)
3682 parent = root_mem_cgroup;
3683
3684 memcg_reparent_objcgs(memcg, parent);
3685
3686 kmemcg_id = memcg->kmemcg_id;
3687 BUG_ON(kmemcg_id < 0);
3688
3689 /*
3690 * Change kmemcg_id of this cgroup and all its descendants to the
3691 * parent's id, and then move all entries from this cgroup's list_lrus
3692 * to ones of the parent. After we have finished, all list_lrus
3693 * corresponding to this cgroup are guaranteed to remain empty. The
3694 * ordering is imposed by list_lru_node->lock taken by
3695 * memcg_drain_all_list_lrus().
3696 */
3697 rcu_read_lock(); /* can be called from css_free w/o cgroup_mutex */
3698 css_for_each_descendant_pre(css, &memcg->css) {
3699 child = mem_cgroup_from_css(css);
3700 BUG_ON(child->kmemcg_id != kmemcg_id);
3701 child->kmemcg_id = parent->kmemcg_id;
3702 if (!memcg->use_hierarchy)
3703 break;
3704 }
3705 rcu_read_unlock();
3706
3707 memcg_drain_all_list_lrus(kmemcg_id, parent);
3708
3709 memcg_free_cache_id(kmemcg_id);
3710}
3711
3712static void memcg_free_kmem(struct mem_cgroup *memcg)
3713{
3714 /* css_alloc() failed, offlining didn't happen */
3715 if (unlikely(memcg->kmem_state == KMEM_ONLINE))
3716 memcg_offline_kmem(memcg);
3717}
3718#else
3719static int memcg_online_kmem(struct mem_cgroup *memcg)
3720{
3721 return 0;
3722}
3723static void memcg_offline_kmem(struct mem_cgroup *memcg)
3724{
3725}
3726static void memcg_free_kmem(struct mem_cgroup *memcg)
3727{
3728}
3729#endif /* CONFIG_MEMCG_KMEM */
3730
3731static int memcg_update_kmem_max(struct mem_cgroup *memcg,
3732 unsigned long max)
3733{
3734 int ret;
3735
3736 mutex_lock(&memcg_max_mutex);
3737 ret = page_counter_set_max(&memcg->kmem, max);
3738 mutex_unlock(&memcg_max_mutex);
3739 return ret;
3740}
3741
3742static int memcg_update_tcp_max(struct mem_cgroup *memcg, unsigned long max)
3743{
3744 int ret;
3745
3746 mutex_lock(&memcg_max_mutex);
3747
3748 ret = page_counter_set_max(&memcg->tcpmem, max);
3749 if (ret)
3750 goto out;
3751
3752 if (!memcg->tcpmem_active) {
3753 /*
3754 * The active flag needs to be written after the static_key
3755 * update. This is what guarantees that the socket activation
3756 * function is the last one to run. See mem_cgroup_sk_alloc()
3757 * for details, and note that we don't mark any socket as
3758 * belonging to this memcg until that flag is up.
3759 *
3760 * We need to do this, because static_keys will span multiple
3761 * sites, but we can't control their order. If we mark a socket
3762 * as accounted, but the accounting functions are not patched in
3763 * yet, we'll lose accounting.
3764 *
3765 * We never race with the readers in mem_cgroup_sk_alloc(),
3766 * because when this value change, the code to process it is not
3767 * patched in yet.
3768 */
3769 static_branch_inc(&memcg_sockets_enabled_key);
3770 memcg->tcpmem_active = true;
3771 }
3772out:
3773 mutex_unlock(&memcg_max_mutex);
3774 return ret;
3775}
3776
3777/*
3778 * The user of this function is...
3779 * RES_LIMIT.
3780 */
3781static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
3782 char *buf, size_t nbytes, loff_t off)
3783{
3784 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3785 unsigned long nr_pages;
3786 int ret;
3787
3788 buf = strstrip(buf);
3789 ret = page_counter_memparse(buf, "-1", &nr_pages);
3790 if (ret)
3791 return ret;
3792
3793 switch (MEMFILE_ATTR(of_cft(of)->private)) {
3794 case RES_LIMIT:
3795 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
3796 ret = -EINVAL;
3797 break;
3798 }
3799 switch (MEMFILE_TYPE(of_cft(of)->private)) {
3800 case _MEM:
3801 ret = mem_cgroup_resize_max(memcg, nr_pages, false);
3802 break;
3803 case _MEMSWAP:
3804 ret = mem_cgroup_resize_max(memcg, nr_pages, true);
3805 break;
3806 case _KMEM:
3807 pr_warn_once("kmem.limit_in_bytes is deprecated and will be removed. "
3808 "Please report your usecase to linux-mm@kvack.org if you "
3809 "depend on this functionality.\n");
3810 ret = memcg_update_kmem_max(memcg, nr_pages);
3811 break;
3812 case _TCP:
3813 ret = memcg_update_tcp_max(memcg, nr_pages);
3814 break;
3815 }
3816 break;
3817 case RES_SOFT_LIMIT:
3818 memcg->soft_limit = nr_pages;
3819 ret = 0;
3820 break;
3821 }
3822 return ret ?: nbytes;
3823}
3824
3825static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf,
3826 size_t nbytes, loff_t off)
3827{
3828 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3829 struct page_counter *counter;
3830
3831 switch (MEMFILE_TYPE(of_cft(of)->private)) {
3832 case _MEM:
3833 counter = &memcg->memory;
3834 break;
3835 case _MEMSWAP:
3836 counter = &memcg->memsw;
3837 break;
3838 case _KMEM:
3839 counter = &memcg->kmem;
3840 break;
3841 case _TCP:
3842 counter = &memcg->tcpmem;
3843 break;
3844 default:
3845 BUG();
3846 }
3847
3848 switch (MEMFILE_ATTR(of_cft(of)->private)) {
3849 case RES_MAX_USAGE:
3850 page_counter_reset_watermark(counter);
3851 break;
3852 case RES_FAILCNT:
3853 counter->failcnt = 0;
3854 break;
3855 default:
3856 BUG();
3857 }
3858
3859 return nbytes;
3860}
3861
3862static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css,
3863 struct cftype *cft)
3864{
3865 return mem_cgroup_from_css(css)->move_charge_at_immigrate;
3866}
3867
3868#ifdef CONFIG_MMU
3869static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3870 struct cftype *cft, u64 val)
3871{
3872 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3873
3874 if (val & ~MOVE_MASK)
3875 return -EINVAL;
3876
3877 /*
3878 * No kind of locking is needed in here, because ->can_attach() will
3879 * check this value once in the beginning of the process, and then carry
3880 * on with stale data. This means that changes to this value will only
3881 * affect task migrations starting after the change.
3882 */
3883 memcg->move_charge_at_immigrate = val;
3884 return 0;
3885}
3886#else
3887static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3888 struct cftype *cft, u64 val)
3889{
3890 return -ENOSYS;
3891}
3892#endif
3893
3894#ifdef CONFIG_NUMA
3895
3896#define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE))
3897#define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
3898#define LRU_ALL ((1 << NR_LRU_LISTS) - 1)
3899
3900static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
3901 int nid, unsigned int lru_mask, bool tree)
3902{
3903 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
3904 unsigned long nr = 0;
3905 enum lru_list lru;
3906
3907 VM_BUG_ON((unsigned)nid >= nr_node_ids);
3908
3909 for_each_lru(lru) {
3910 if (!(BIT(lru) & lru_mask))
3911 continue;
3912 if (tree)
3913 nr += lruvec_page_state(lruvec, NR_LRU_BASE + lru);
3914 else
3915 nr += lruvec_page_state_local(lruvec, NR_LRU_BASE + lru);
3916 }
3917 return nr;
3918}
3919
3920static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
3921 unsigned int lru_mask,
3922 bool tree)
3923{
3924 unsigned long nr = 0;
3925 enum lru_list lru;
3926
3927 for_each_lru(lru) {
3928 if (!(BIT(lru) & lru_mask))
3929 continue;
3930 if (tree)
3931 nr += memcg_page_state(memcg, NR_LRU_BASE + lru);
3932 else
3933 nr += memcg_page_state_local(memcg, NR_LRU_BASE + lru);
3934 }
3935 return nr;
3936}
3937
3938static int memcg_numa_stat_show(struct seq_file *m, void *v)
3939{
3940 struct numa_stat {
3941 const char *name;
3942 unsigned int lru_mask;
3943 };
3944
3945 static const struct numa_stat stats[] = {
3946 { "total", LRU_ALL },
3947 { "file", LRU_ALL_FILE },
3948 { "anon", LRU_ALL_ANON },
3949 { "unevictable", BIT(LRU_UNEVICTABLE) },
3950 };
3951 const struct numa_stat *stat;
3952 int nid;
3953 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
3954
3955 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3956 seq_printf(m, "%s=%lu", stat->name,
3957 mem_cgroup_nr_lru_pages(memcg, stat->lru_mask,
3958 false));
3959 for_each_node_state(nid, N_MEMORY)
3960 seq_printf(m, " N%d=%lu", nid,
3961 mem_cgroup_node_nr_lru_pages(memcg, nid,
3962 stat->lru_mask, false));
3963 seq_putc(m, '\n');
3964 }
3965
3966 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3967
3968 seq_printf(m, "hierarchical_%s=%lu", stat->name,
3969 mem_cgroup_nr_lru_pages(memcg, stat->lru_mask,
3970 true));
3971 for_each_node_state(nid, N_MEMORY)
3972 seq_printf(m, " N%d=%lu", nid,
3973 mem_cgroup_node_nr_lru_pages(memcg, nid,
3974 stat->lru_mask, true));
3975 seq_putc(m, '\n');
3976 }
3977
3978 return 0;
3979}
3980#endif /* CONFIG_NUMA */
3981
3982static const unsigned int memcg1_stats[] = {
3983 NR_FILE_PAGES,
3984 NR_ANON_MAPPED,
3985#ifdef CONFIG_TRANSPARENT_HUGEPAGE
3986 NR_ANON_THPS,
3987#endif
3988 NR_SHMEM,
3989 NR_FILE_MAPPED,
3990 NR_FILE_DIRTY,
3991 NR_WRITEBACK,
3992 MEMCG_SWAP,
3993};
3994
3995static const char *const memcg1_stat_names[] = {
3996 "cache",
3997 "rss",
3998#ifdef CONFIG_TRANSPARENT_HUGEPAGE
3999 "rss_huge",
4000#endif
4001 "shmem",
4002 "mapped_file",
4003 "dirty",
4004 "writeback",
4005 "swap",
4006};
4007
4008/* Universal VM events cgroup1 shows, original sort order */
4009static const unsigned int memcg1_events[] = {
4010 PGPGIN,
4011 PGPGOUT,
4012 PGFAULT,
4013 PGMAJFAULT,
4014};
4015
4016static int memcg_stat_show(struct seq_file *m, void *v)
4017{
4018 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4019 unsigned long memory, memsw;
4020 struct mem_cgroup *mi;
4021 unsigned int i;
4022
4023 BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names) != ARRAY_SIZE(memcg1_stats));
4024
4025 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
4026 unsigned long nr;
4027
4028 if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
4029 continue;
4030 nr = memcg_page_state_local(memcg, memcg1_stats[i]);
4031#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4032 if (memcg1_stats[i] == NR_ANON_THPS)
4033 nr *= HPAGE_PMD_NR;
4034#endif
4035 seq_printf(m, "%s %lu\n", memcg1_stat_names[i], nr * PAGE_SIZE);
4036 }
4037
4038 for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
4039 seq_printf(m, "%s %lu\n", vm_event_name(memcg1_events[i]),
4040 memcg_events_local(memcg, memcg1_events[i]));
4041
4042 for (i = 0; i < NR_LRU_LISTS; i++)
4043 seq_printf(m, "%s %lu\n", lru_list_name(i),
4044 memcg_page_state_local(memcg, NR_LRU_BASE + i) *
4045 PAGE_SIZE);
4046
4047 /* Hierarchical information */
4048 memory = memsw = PAGE_COUNTER_MAX;
4049 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) {
4050 memory = min(memory, READ_ONCE(mi->memory.max));
4051 memsw = min(memsw, READ_ONCE(mi->memsw.max));
4052 }
4053 seq_printf(m, "hierarchical_memory_limit %llu\n",
4054 (u64)memory * PAGE_SIZE);
4055 if (do_memsw_account())
4056 seq_printf(m, "hierarchical_memsw_limit %llu\n",
4057 (u64)memsw * PAGE_SIZE);
4058
4059 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
4060 if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
4061 continue;
4062 seq_printf(m, "total_%s %llu\n", memcg1_stat_names[i],
4063 (u64)memcg_page_state(memcg, memcg1_stats[i]) *
4064 PAGE_SIZE);
4065 }
4066
4067 for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
4068 seq_printf(m, "total_%s %llu\n",
4069 vm_event_name(memcg1_events[i]),
4070 (u64)memcg_events(memcg, memcg1_events[i]));
4071
4072 for (i = 0; i < NR_LRU_LISTS; i++)
4073 seq_printf(m, "total_%s %llu\n", lru_list_name(i),
4074 (u64)memcg_page_state(memcg, NR_LRU_BASE + i) *
4075 PAGE_SIZE);
4076
4077#ifdef CONFIG_DEBUG_VM
4078 {
4079 pg_data_t *pgdat;
4080 struct mem_cgroup_per_node *mz;
4081 unsigned long anon_cost = 0;
4082 unsigned long file_cost = 0;
4083
4084 for_each_online_pgdat(pgdat) {
4085 mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
4086
4087 anon_cost += mz->lruvec.anon_cost;
4088 file_cost += mz->lruvec.file_cost;
4089 }
4090 seq_printf(m, "anon_cost %lu\n", anon_cost);
4091 seq_printf(m, "file_cost %lu\n", file_cost);
4092 }
4093#endif
4094
4095 return 0;
4096}
4097
4098static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css,
4099 struct cftype *cft)
4100{
4101 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4102
4103 return mem_cgroup_swappiness(memcg);
4104}
4105
4106static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
4107 struct cftype *cft, u64 val)
4108{
4109 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4110
4111 if (val > 100)
4112 return -EINVAL;
4113
4114 if (css->parent)
4115 memcg->swappiness = val;
4116 else
4117 vm_swappiness = val;
4118
4119 return 0;
4120}
4121
4122static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
4123{
4124 struct mem_cgroup_threshold_ary *t;
4125 unsigned long usage;
4126 int i;
4127
4128 rcu_read_lock();
4129 if (!swap)
4130 t = rcu_dereference(memcg->thresholds.primary);
4131 else
4132 t = rcu_dereference(memcg->memsw_thresholds.primary);
4133
4134 if (!t)
4135 goto unlock;
4136
4137 usage = mem_cgroup_usage(memcg, swap);
4138
4139 /*
4140 * current_threshold points to threshold just below or equal to usage.
4141 * If it's not true, a threshold was crossed after last
4142 * call of __mem_cgroup_threshold().
4143 */
4144 i = t->current_threshold;
4145
4146 /*
4147 * Iterate backward over array of thresholds starting from
4148 * current_threshold and check if a threshold is crossed.
4149 * If none of thresholds below usage is crossed, we read
4150 * only one element of the array here.
4151 */
4152 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
4153 eventfd_signal(t->entries[i].eventfd, 1);
4154
4155 /* i = current_threshold + 1 */
4156 i++;
4157
4158 /*
4159 * Iterate forward over array of thresholds starting from
4160 * current_threshold+1 and check if a threshold is crossed.
4161 * If none of thresholds above usage is crossed, we read
4162 * only one element of the array here.
4163 */
4164 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
4165 eventfd_signal(t->entries[i].eventfd, 1);
4166
4167 /* Update current_threshold */
4168 t->current_threshold = i - 1;
4169unlock:
4170 rcu_read_unlock();
4171}
4172
4173static void mem_cgroup_threshold(struct mem_cgroup *memcg)
4174{
4175 while (memcg) {
4176 __mem_cgroup_threshold(memcg, false);
4177 if (do_memsw_account())
4178 __mem_cgroup_threshold(memcg, true);
4179
4180 memcg = parent_mem_cgroup(memcg);
4181 }
4182}
4183
4184static int compare_thresholds(const void *a, const void *b)
4185{
4186 const struct mem_cgroup_threshold *_a = a;
4187 const struct mem_cgroup_threshold *_b = b;
4188
4189 if (_a->threshold > _b->threshold)
4190 return 1;
4191
4192 if (_a->threshold < _b->threshold)
4193 return -1;
4194
4195 return 0;
4196}
4197
4198static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
4199{
4200 struct mem_cgroup_eventfd_list *ev;
4201
4202 spin_lock(&memcg_oom_lock);
4203
4204 list_for_each_entry(ev, &memcg->oom_notify, list)
4205 eventfd_signal(ev->eventfd, 1);
4206
4207 spin_unlock(&memcg_oom_lock);
4208 return 0;
4209}
4210
4211static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
4212{
4213 struct mem_cgroup *iter;
4214
4215 for_each_mem_cgroup_tree(iter, memcg)
4216 mem_cgroup_oom_notify_cb(iter);
4217}
4218
4219static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
4220 struct eventfd_ctx *eventfd, const char *args, enum res_type type)
4221{
4222 struct mem_cgroup_thresholds *thresholds;
4223 struct mem_cgroup_threshold_ary *new;
4224 unsigned long threshold;
4225 unsigned long usage;
4226 int i, size, ret;
4227
4228 ret = page_counter_memparse(args, "-1", &threshold);
4229 if (ret)
4230 return ret;
4231
4232 mutex_lock(&memcg->thresholds_lock);
4233
4234 if (type == _MEM) {
4235 thresholds = &memcg->thresholds;
4236 usage = mem_cgroup_usage(memcg, false);
4237 } else if (type == _MEMSWAP) {
4238 thresholds = &memcg->memsw_thresholds;
4239 usage = mem_cgroup_usage(memcg, true);
4240 } else
4241 BUG();
4242
4243 /* Check if a threshold crossed before adding a new one */
4244 if (thresholds->primary)
4245 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
4246
4247 size = thresholds->primary ? thresholds->primary->size + 1 : 1;
4248
4249 /* Allocate memory for new array of thresholds */
4250 new = kmalloc(struct_size(new, entries, size), GFP_KERNEL);
4251 if (!new) {
4252 ret = -ENOMEM;
4253 goto unlock;
4254 }
4255 new->size = size;
4256
4257 /* Copy thresholds (if any) to new array */
4258 if (thresholds->primary) {
4259 memcpy(new->entries, thresholds->primary->entries, (size - 1) *
4260 sizeof(struct mem_cgroup_threshold));
4261 }
4262
4263 /* Add new threshold */
4264 new->entries[size - 1].eventfd = eventfd;
4265 new->entries[size - 1].threshold = threshold;
4266
4267 /* Sort thresholds. Registering of new threshold isn't time-critical */
4268 sort(new->entries, size, sizeof(struct mem_cgroup_threshold),
4269 compare_thresholds, NULL);
4270
4271 /* Find current threshold */
4272 new->current_threshold = -1;
4273 for (i = 0; i < size; i++) {
4274 if (new->entries[i].threshold <= usage) {
4275 /*
4276 * new->current_threshold will not be used until
4277 * rcu_assign_pointer(), so it's safe to increment
4278 * it here.
4279 */
4280 ++new->current_threshold;
4281 } else
4282 break;
4283 }
4284
4285 /* Free old spare buffer and save old primary buffer as spare */
4286 kfree(thresholds->spare);
4287 thresholds->spare = thresholds->primary;
4288
4289 rcu_assign_pointer(thresholds->primary, new);
4290
4291 /* To be sure that nobody uses thresholds */
4292 synchronize_rcu();
4293
4294unlock:
4295 mutex_unlock(&memcg->thresholds_lock);
4296
4297 return ret;
4298}
4299
4300static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
4301 struct eventfd_ctx *eventfd, const char *args)
4302{
4303 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM);
4304}
4305
4306static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg,
4307 struct eventfd_ctx *eventfd, const char *args)
4308{
4309 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP);
4310}
4311
4312static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4313 struct eventfd_ctx *eventfd, enum res_type type)
4314{
4315 struct mem_cgroup_thresholds *thresholds;
4316 struct mem_cgroup_threshold_ary *new;
4317 unsigned long usage;
4318 int i, j, size, entries;
4319
4320 mutex_lock(&memcg->thresholds_lock);
4321
4322 if (type == _MEM) {
4323 thresholds = &memcg->thresholds;
4324 usage = mem_cgroup_usage(memcg, false);
4325 } else if (type == _MEMSWAP) {
4326 thresholds = &memcg->memsw_thresholds;
4327 usage = mem_cgroup_usage(memcg, true);
4328 } else
4329 BUG();
4330
4331 if (!thresholds->primary)
4332 goto unlock;
4333
4334 /* Check if a threshold crossed before removing */
4335 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
4336
4337 /* Calculate new number of threshold */
4338 size = entries = 0;
4339 for (i = 0; i < thresholds->primary->size; i++) {
4340 if (thresholds->primary->entries[i].eventfd != eventfd)
4341 size++;
4342 else
4343 entries++;
4344 }
4345
4346 new = thresholds->spare;
4347
4348 /* If no items related to eventfd have been cleared, nothing to do */
4349 if (!entries)
4350 goto unlock;
4351
4352 /* Set thresholds array to NULL if we don't have thresholds */
4353 if (!size) {
4354 kfree(new);
4355 new = NULL;
4356 goto swap_buffers;
4357 }
4358
4359 new->size = size;
4360
4361 /* Copy thresholds and find current threshold */
4362 new->current_threshold = -1;
4363 for (i = 0, j = 0; i < thresholds->primary->size; i++) {
4364 if (thresholds->primary->entries[i].eventfd == eventfd)
4365 continue;
4366
4367 new->entries[j] = thresholds->primary->entries[i];
4368 if (new->entries[j].threshold <= usage) {
4369 /*
4370 * new->current_threshold will not be used
4371 * until rcu_assign_pointer(), so it's safe to increment
4372 * it here.
4373 */
4374 ++new->current_threshold;
4375 }
4376 j++;
4377 }
4378
4379swap_buffers:
4380 /* Swap primary and spare array */
4381 thresholds->spare = thresholds->primary;
4382
4383 rcu_assign_pointer(thresholds->primary, new);
4384
4385 /* To be sure that nobody uses thresholds */
4386 synchronize_rcu();
4387
4388 /* If all events are unregistered, free the spare array */
4389 if (!new) {
4390 kfree(thresholds->spare);
4391 thresholds->spare = NULL;
4392 }
4393unlock:
4394 mutex_unlock(&memcg->thresholds_lock);
4395}
4396
4397static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4398 struct eventfd_ctx *eventfd)
4399{
4400 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM);
4401}
4402
4403static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4404 struct eventfd_ctx *eventfd)
4405{
4406 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP);
4407}
4408
4409static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg,
4410 struct eventfd_ctx *eventfd, const char *args)
4411{
4412 struct mem_cgroup_eventfd_list *event;
4413
4414 event = kmalloc(sizeof(*event), GFP_KERNEL);
4415 if (!event)
4416 return -ENOMEM;
4417
4418 spin_lock(&memcg_oom_lock);
4419
4420 event->eventfd = eventfd;
4421 list_add(&event->list, &memcg->oom_notify);
4422
4423 /* already in OOM ? */
4424 if (memcg->under_oom)
4425 eventfd_signal(eventfd, 1);
4426 spin_unlock(&memcg_oom_lock);
4427
4428 return 0;
4429}
4430
4431static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg,
4432 struct eventfd_ctx *eventfd)
4433{
4434 struct mem_cgroup_eventfd_list *ev, *tmp;
4435
4436 spin_lock(&memcg_oom_lock);
4437
4438 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
4439 if (ev->eventfd == eventfd) {
4440 list_del(&ev->list);
4441 kfree(ev);
4442 }
4443 }
4444
4445 spin_unlock(&memcg_oom_lock);
4446}
4447
4448static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v)
4449{
4450 struct mem_cgroup *memcg = mem_cgroup_from_seq(sf);
4451
4452 seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable);
4453 seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom);
4454 seq_printf(sf, "oom_kill %lu\n",
4455 atomic_long_read(&memcg->memory_events[MEMCG_OOM_KILL]));
4456 return 0;
4457}
4458
4459static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
4460 struct cftype *cft, u64 val)
4461{
4462 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4463
4464 /* cannot set to root cgroup and only 0 and 1 are allowed */
4465 if (!css->parent || !((val == 0) || (val == 1)))
4466 return -EINVAL;
4467
4468 memcg->oom_kill_disable = val;
4469 if (!val)
4470 memcg_oom_recover(memcg);
4471
4472 return 0;
4473}
4474
4475#ifdef CONFIG_CGROUP_WRITEBACK
4476
4477#include <trace/events/writeback.h>
4478
4479static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
4480{
4481 return wb_domain_init(&memcg->cgwb_domain, gfp);
4482}
4483
4484static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
4485{
4486 wb_domain_exit(&memcg->cgwb_domain);
4487}
4488
4489static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
4490{
4491 wb_domain_size_changed(&memcg->cgwb_domain);
4492}
4493
4494struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
4495{
4496 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4497
4498 if (!memcg->css.parent)
4499 return NULL;
4500
4501 return &memcg->cgwb_domain;
4502}
4503
4504/*
4505 * idx can be of type enum memcg_stat_item or node_stat_item.
4506 * Keep in sync with memcg_exact_page().
4507 */
4508static unsigned long memcg_exact_page_state(struct mem_cgroup *memcg, int idx)
4509{
4510 long x = atomic_long_read(&memcg->vmstats[idx]);
4511 int cpu;
4512
4513 for_each_online_cpu(cpu)
4514 x += per_cpu_ptr(memcg->vmstats_percpu, cpu)->stat[idx];
4515 if (x < 0)
4516 x = 0;
4517 return x;
4518}
4519
4520/**
4521 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
4522 * @wb: bdi_writeback in question
4523 * @pfilepages: out parameter for number of file pages
4524 * @pheadroom: out parameter for number of allocatable pages according to memcg
4525 * @pdirty: out parameter for number of dirty pages
4526 * @pwriteback: out parameter for number of pages under writeback
4527 *
4528 * Determine the numbers of file, headroom, dirty, and writeback pages in
4529 * @wb's memcg. File, dirty and writeback are self-explanatory. Headroom
4530 * is a bit more involved.
4531 *
4532 * A memcg's headroom is "min(max, high) - used". In the hierarchy, the
4533 * headroom is calculated as the lowest headroom of itself and the
4534 * ancestors. Note that this doesn't consider the actual amount of
4535 * available memory in the system. The caller should further cap
4536 * *@pheadroom accordingly.
4537 */
4538void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
4539 unsigned long *pheadroom, unsigned long *pdirty,
4540 unsigned long *pwriteback)
4541{
4542 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4543 struct mem_cgroup *parent;
4544
4545 *pdirty = memcg_exact_page_state(memcg, NR_FILE_DIRTY);
4546
4547 *pwriteback = memcg_exact_page_state(memcg, NR_WRITEBACK);
4548 *pfilepages = memcg_exact_page_state(memcg, NR_INACTIVE_FILE) +
4549 memcg_exact_page_state(memcg, NR_ACTIVE_FILE);
4550 *pheadroom = PAGE_COUNTER_MAX;
4551
4552 while ((parent = parent_mem_cgroup(memcg))) {
4553 unsigned long ceiling = min(READ_ONCE(memcg->memory.max),
4554 READ_ONCE(memcg->memory.high));
4555 unsigned long used = page_counter_read(&memcg->memory);
4556
4557 *pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
4558 memcg = parent;
4559 }
4560}
4561
4562/*
4563 * Foreign dirty flushing
4564 *
4565 * There's an inherent mismatch between memcg and writeback. The former
4566 * trackes ownership per-page while the latter per-inode. This was a
4567 * deliberate design decision because honoring per-page ownership in the
4568 * writeback path is complicated, may lead to higher CPU and IO overheads
4569 * and deemed unnecessary given that write-sharing an inode across
4570 * different cgroups isn't a common use-case.
4571 *
4572 * Combined with inode majority-writer ownership switching, this works well
4573 * enough in most cases but there are some pathological cases. For
4574 * example, let's say there are two cgroups A and B which keep writing to
4575 * different but confined parts of the same inode. B owns the inode and
4576 * A's memory is limited far below B's. A's dirty ratio can rise enough to
4577 * trigger balance_dirty_pages() sleeps but B's can be low enough to avoid
4578 * triggering background writeback. A will be slowed down without a way to
4579 * make writeback of the dirty pages happen.
4580 *
4581 * Conditions like the above can lead to a cgroup getting repatedly and
4582 * severely throttled after making some progress after each
4583 * dirty_expire_interval while the underyling IO device is almost
4584 * completely idle.
4585 *
4586 * Solving this problem completely requires matching the ownership tracking
4587 * granularities between memcg and writeback in either direction. However,
4588 * the more egregious behaviors can be avoided by simply remembering the
4589 * most recent foreign dirtying events and initiating remote flushes on
4590 * them when local writeback isn't enough to keep the memory clean enough.
4591 *
4592 * The following two functions implement such mechanism. When a foreign
4593 * page - a page whose memcg and writeback ownerships don't match - is
4594 * dirtied, mem_cgroup_track_foreign_dirty() records the inode owning
4595 * bdi_writeback on the page owning memcg. When balance_dirty_pages()
4596 * decides that the memcg needs to sleep due to high dirty ratio, it calls
4597 * mem_cgroup_flush_foreign() which queues writeback on the recorded
4598 * foreign bdi_writebacks which haven't expired. Both the numbers of
4599 * recorded bdi_writebacks and concurrent in-flight foreign writebacks are
4600 * limited to MEMCG_CGWB_FRN_CNT.
4601 *
4602 * The mechanism only remembers IDs and doesn't hold any object references.
4603 * As being wrong occasionally doesn't matter, updates and accesses to the
4604 * records are lockless and racy.
4605 */
4606void mem_cgroup_track_foreign_dirty_slowpath(struct page *page,
4607 struct bdi_writeback *wb)
4608{
4609 struct mem_cgroup *memcg = page->mem_cgroup;
4610 struct memcg_cgwb_frn *frn;
4611 u64 now = get_jiffies_64();
4612 u64 oldest_at = now;
4613 int oldest = -1;
4614 int i;
4615
4616 trace_track_foreign_dirty(page, wb);
4617
4618 /*
4619 * Pick the slot to use. If there is already a slot for @wb, keep
4620 * using it. If not replace the oldest one which isn't being
4621 * written out.
4622 */
4623 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
4624 frn = &memcg->cgwb_frn[i];
4625 if (frn->bdi_id == wb->bdi->id &&
4626 frn->memcg_id == wb->memcg_css->id)
4627 break;
4628 if (time_before64(frn->at, oldest_at) &&
4629 atomic_read(&frn->done.cnt) == 1) {
4630 oldest = i;
4631 oldest_at = frn->at;
4632 }
4633 }
4634
4635 if (i < MEMCG_CGWB_FRN_CNT) {
4636 /*
4637 * Re-using an existing one. Update timestamp lazily to
4638 * avoid making the cacheline hot. We want them to be
4639 * reasonably up-to-date and significantly shorter than
4640 * dirty_expire_interval as that's what expires the record.
4641 * Use the shorter of 1s and dirty_expire_interval / 8.
4642 */
4643 unsigned long update_intv =
4644 min_t(unsigned long, HZ,
4645 msecs_to_jiffies(dirty_expire_interval * 10) / 8);
4646
4647 if (time_before64(frn->at, now - update_intv))
4648 frn->at = now;
4649 } else if (oldest >= 0) {
4650 /* replace the oldest free one */
4651 frn = &memcg->cgwb_frn[oldest];
4652 frn->bdi_id = wb->bdi->id;
4653 frn->memcg_id = wb->memcg_css->id;
4654 frn->at = now;
4655 }
4656}
4657
4658/* issue foreign writeback flushes for recorded foreign dirtying events */
4659void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
4660{
4661 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4662 unsigned long intv = msecs_to_jiffies(dirty_expire_interval * 10);
4663 u64 now = jiffies_64;
4664 int i;
4665
4666 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
4667 struct memcg_cgwb_frn *frn = &memcg->cgwb_frn[i];
4668
4669 /*
4670 * If the record is older than dirty_expire_interval,
4671 * writeback on it has already started. No need to kick it
4672 * off again. Also, don't start a new one if there's
4673 * already one in flight.
4674 */
4675 if (time_after64(frn->at, now - intv) &&
4676 atomic_read(&frn->done.cnt) == 1) {
4677 frn->at = 0;
4678 trace_flush_foreign(wb, frn->bdi_id, frn->memcg_id);
4679 cgroup_writeback_by_id(frn->bdi_id, frn->memcg_id, 0,
4680 WB_REASON_FOREIGN_FLUSH,
4681 &frn->done);
4682 }
4683 }
4684}
4685
4686#else /* CONFIG_CGROUP_WRITEBACK */
4687
4688static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
4689{
4690 return 0;
4691}
4692
4693static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
4694{
4695}
4696
4697static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
4698{
4699}
4700
4701#endif /* CONFIG_CGROUP_WRITEBACK */
4702
4703/*
4704 * DO NOT USE IN NEW FILES.
4705 *
4706 * "cgroup.event_control" implementation.
4707 *
4708 * This is way over-engineered. It tries to support fully configurable
4709 * events for each user. Such level of flexibility is completely
4710 * unnecessary especially in the light of the planned unified hierarchy.
4711 *
4712 * Please deprecate this and replace with something simpler if at all
4713 * possible.
4714 */
4715
4716/*
4717 * Unregister event and free resources.
4718 *
4719 * Gets called from workqueue.
4720 */
4721static void memcg_event_remove(struct work_struct *work)
4722{
4723 struct mem_cgroup_event *event =
4724 container_of(work, struct mem_cgroup_event, remove);
4725 struct mem_cgroup *memcg = event->memcg;
4726
4727 remove_wait_queue(event->wqh, &event->wait);
4728
4729 event->unregister_event(memcg, event->eventfd);
4730
4731 /* Notify userspace the event is going away. */
4732 eventfd_signal(event->eventfd, 1);
4733
4734 eventfd_ctx_put(event->eventfd);
4735 kfree(event);
4736 css_put(&memcg->css);
4737}
4738
4739/*
4740 * Gets called on EPOLLHUP on eventfd when user closes it.
4741 *
4742 * Called with wqh->lock held and interrupts disabled.
4743 */
4744static int memcg_event_wake(wait_queue_entry_t *wait, unsigned mode,
4745 int sync, void *key)
4746{
4747 struct mem_cgroup_event *event =
4748 container_of(wait, struct mem_cgroup_event, wait);
4749 struct mem_cgroup *memcg = event->memcg;
4750 __poll_t flags = key_to_poll(key);
4751
4752 if (flags & EPOLLHUP) {
4753 /*
4754 * If the event has been detached at cgroup removal, we
4755 * can simply return knowing the other side will cleanup
4756 * for us.
4757 *
4758 * We can't race against event freeing since the other
4759 * side will require wqh->lock via remove_wait_queue(),
4760 * which we hold.
4761 */
4762 spin_lock(&memcg->event_list_lock);
4763 if (!list_empty(&event->list)) {
4764 list_del_init(&event->list);
4765 /*
4766 * We are in atomic context, but cgroup_event_remove()
4767 * may sleep, so we have to call it in workqueue.
4768 */
4769 schedule_work(&event->remove);
4770 }
4771 spin_unlock(&memcg->event_list_lock);
4772 }
4773
4774 return 0;
4775}
4776
4777static void memcg_event_ptable_queue_proc(struct file *file,
4778 wait_queue_head_t *wqh, poll_table *pt)
4779{
4780 struct mem_cgroup_event *event =
4781 container_of(pt, struct mem_cgroup_event, pt);
4782
4783 event->wqh = wqh;
4784 add_wait_queue(wqh, &event->wait);
4785}
4786
4787/*
4788 * DO NOT USE IN NEW FILES.
4789 *
4790 * Parse input and register new cgroup event handler.
4791 *
4792 * Input must be in format '<event_fd> <control_fd> <args>'.
4793 * Interpretation of args is defined by control file implementation.
4794 */
4795static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
4796 char *buf, size_t nbytes, loff_t off)
4797{
4798 struct cgroup_subsys_state *css = of_css(of);
4799 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4800 struct mem_cgroup_event *event;
4801 struct cgroup_subsys_state *cfile_css;
4802 unsigned int efd, cfd;
4803 struct fd efile;
4804 struct fd cfile;
4805 const char *name;
4806 char *endp;
4807 int ret;
4808
4809 buf = strstrip(buf);
4810
4811 efd = simple_strtoul(buf, &endp, 10);
4812 if (*endp != ' ')
4813 return -EINVAL;
4814 buf = endp + 1;
4815
4816 cfd = simple_strtoul(buf, &endp, 10);
4817 if ((*endp != ' ') && (*endp != '\0'))
4818 return -EINVAL;
4819 buf = endp + 1;
4820
4821 event = kzalloc(sizeof(*event), GFP_KERNEL);
4822 if (!event)
4823 return -ENOMEM;
4824
4825 event->memcg = memcg;
4826 INIT_LIST_HEAD(&event->list);
4827 init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc);
4828 init_waitqueue_func_entry(&event->wait, memcg_event_wake);
4829 INIT_WORK(&event->remove, memcg_event_remove);
4830
4831 efile = fdget(efd);
4832 if (!efile.file) {
4833 ret = -EBADF;
4834 goto out_kfree;
4835 }
4836
4837 event->eventfd = eventfd_ctx_fileget(efile.file);
4838 if (IS_ERR(event->eventfd)) {
4839 ret = PTR_ERR(event->eventfd);
4840 goto out_put_efile;
4841 }
4842
4843 cfile = fdget(cfd);
4844 if (!cfile.file) {
4845 ret = -EBADF;
4846 goto out_put_eventfd;
4847 }
4848
4849 /* the process need read permission on control file */
4850 /* AV: shouldn't we check that it's been opened for read instead? */
4851 ret = inode_permission(file_inode(cfile.file), MAY_READ);
4852 if (ret < 0)
4853 goto out_put_cfile;
4854
4855 /*
4856 * Determine the event callbacks and set them in @event. This used
4857 * to be done via struct cftype but cgroup core no longer knows
4858 * about these events. The following is crude but the whole thing
4859 * is for compatibility anyway.
4860 *
4861 * DO NOT ADD NEW FILES.
4862 */
4863 name = cfile.file->f_path.dentry->d_name.name;
4864
4865 if (!strcmp(name, "memory.usage_in_bytes")) {
4866 event->register_event = mem_cgroup_usage_register_event;
4867 event->unregister_event = mem_cgroup_usage_unregister_event;
4868 } else if (!strcmp(name, "memory.oom_control")) {
4869 event->register_event = mem_cgroup_oom_register_event;
4870 event->unregister_event = mem_cgroup_oom_unregister_event;
4871 } else if (!strcmp(name, "memory.pressure_level")) {
4872 event->register_event = vmpressure_register_event;
4873 event->unregister_event = vmpressure_unregister_event;
4874 } else if (!strcmp(name, "memory.memsw.usage_in_bytes")) {
4875 event->register_event = memsw_cgroup_usage_register_event;
4876 event->unregister_event = memsw_cgroup_usage_unregister_event;
4877 } else {
4878 ret = -EINVAL;
4879 goto out_put_cfile;
4880 }
4881
4882 /*
4883 * Verify @cfile should belong to @css. Also, remaining events are
4884 * automatically removed on cgroup destruction but the removal is
4885 * asynchronous, so take an extra ref on @css.
4886 */
4887 cfile_css = css_tryget_online_from_dir(cfile.file->f_path.dentry->d_parent,
4888 &memory_cgrp_subsys);
4889 ret = -EINVAL;
4890 if (IS_ERR(cfile_css))
4891 goto out_put_cfile;
4892 if (cfile_css != css) {
4893 css_put(cfile_css);
4894 goto out_put_cfile;
4895 }
4896
4897 ret = event->register_event(memcg, event->eventfd, buf);
4898 if (ret)
4899 goto out_put_css;
4900
4901 vfs_poll(efile.file, &event->pt);
4902
4903 spin_lock(&memcg->event_list_lock);
4904 list_add(&event->list, &memcg->event_list);
4905 spin_unlock(&memcg->event_list_lock);
4906
4907 fdput(cfile);
4908 fdput(efile);
4909
4910 return nbytes;
4911
4912out_put_css:
4913 css_put(css);
4914out_put_cfile:
4915 fdput(cfile);
4916out_put_eventfd:
4917 eventfd_ctx_put(event->eventfd);
4918out_put_efile:
4919 fdput(efile);
4920out_kfree:
4921 kfree(event);
4922
4923 return ret;
4924}
4925
4926static struct cftype mem_cgroup_legacy_files[] = {
4927 {
4928 .name = "usage_in_bytes",
4929 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
4930 .read_u64 = mem_cgroup_read_u64,
4931 },
4932 {
4933 .name = "max_usage_in_bytes",
4934 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
4935 .write = mem_cgroup_reset,
4936 .read_u64 = mem_cgroup_read_u64,
4937 },
4938 {
4939 .name = "limit_in_bytes",
4940 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
4941 .write = mem_cgroup_write,
4942 .read_u64 = mem_cgroup_read_u64,
4943 },
4944 {
4945 .name = "soft_limit_in_bytes",
4946 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
4947 .write = mem_cgroup_write,
4948 .read_u64 = mem_cgroup_read_u64,
4949 },
4950 {
4951 .name = "failcnt",
4952 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
4953 .write = mem_cgroup_reset,
4954 .read_u64 = mem_cgroup_read_u64,
4955 },
4956 {
4957 .name = "stat",
4958 .seq_show = memcg_stat_show,
4959 },
4960 {
4961 .name = "force_empty",
4962 .write = mem_cgroup_force_empty_write,
4963 },
4964 {
4965 .name = "use_hierarchy",
4966 .write_u64 = mem_cgroup_hierarchy_write,
4967 .read_u64 = mem_cgroup_hierarchy_read,
4968 },
4969 {
4970 .name = "cgroup.event_control", /* XXX: for compat */
4971 .write = memcg_write_event_control,
4972 .flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE,
4973 },
4974 {
4975 .name = "swappiness",
4976 .read_u64 = mem_cgroup_swappiness_read,
4977 .write_u64 = mem_cgroup_swappiness_write,
4978 },
4979 {
4980 .name = "move_charge_at_immigrate",
4981 .read_u64 = mem_cgroup_move_charge_read,
4982 .write_u64 = mem_cgroup_move_charge_write,
4983 },
4984 {
4985 .name = "oom_control",
4986 .seq_show = mem_cgroup_oom_control_read,
4987 .write_u64 = mem_cgroup_oom_control_write,
4988 .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
4989 },
4990 {
4991 .name = "pressure_level",
4992 },
4993#ifdef CONFIG_NUMA
4994 {
4995 .name = "numa_stat",
4996 .seq_show = memcg_numa_stat_show,
4997 },
4998#endif
4999 {
5000 .name = "kmem.limit_in_bytes",
5001 .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
5002 .write = mem_cgroup_write,
5003 .read_u64 = mem_cgroup_read_u64,
5004 },
5005 {
5006 .name = "kmem.usage_in_bytes",
5007 .private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
5008 .read_u64 = mem_cgroup_read_u64,
5009 },
5010 {
5011 .name = "kmem.failcnt",
5012 .private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT),
5013 .write = mem_cgroup_reset,
5014 .read_u64 = mem_cgroup_read_u64,
5015 },
5016 {
5017 .name = "kmem.max_usage_in_bytes",
5018 .private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE),
5019 .write = mem_cgroup_reset,
5020 .read_u64 = mem_cgroup_read_u64,
5021 },
5022#if defined(CONFIG_MEMCG_KMEM) && \
5023 (defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG))
5024 {
5025 .name = "kmem.slabinfo",
5026 .seq_show = memcg_slab_show,
5027 },
5028#endif
5029 {
5030 .name = "kmem.tcp.limit_in_bytes",
5031 .private = MEMFILE_PRIVATE(_TCP, RES_LIMIT),
5032 .write = mem_cgroup_write,
5033 .read_u64 = mem_cgroup_read_u64,
5034 },
5035 {
5036 .name = "kmem.tcp.usage_in_bytes",
5037 .private = MEMFILE_PRIVATE(_TCP, RES_USAGE),
5038 .read_u64 = mem_cgroup_read_u64,
5039 },
5040 {
5041 .name = "kmem.tcp.failcnt",
5042 .private = MEMFILE_PRIVATE(_TCP, RES_FAILCNT),
5043 .write = mem_cgroup_reset,
5044 .read_u64 = mem_cgroup_read_u64,
5045 },
5046 {
5047 .name = "kmem.tcp.max_usage_in_bytes",
5048 .private = MEMFILE_PRIVATE(_TCP, RES_MAX_USAGE),
5049 .write = mem_cgroup_reset,
5050 .read_u64 = mem_cgroup_read_u64,
5051 },
5052 { }, /* terminate */
5053};
5054
5055/*
5056 * Private memory cgroup IDR
5057 *
5058 * Swap-out records and page cache shadow entries need to store memcg
5059 * references in constrained space, so we maintain an ID space that is
5060 * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of
5061 * memory-controlled cgroups to 64k.
5062 *
5063 * However, there usually are many references to the offline CSS after
5064 * the cgroup has been destroyed, such as page cache or reclaimable
5065 * slab objects, that don't need to hang on to the ID. We want to keep
5066 * those dead CSS from occupying IDs, or we might quickly exhaust the
5067 * relatively small ID space and prevent the creation of new cgroups
5068 * even when there are much fewer than 64k cgroups - possibly none.
5069 *
5070 * Maintain a private 16-bit ID space for memcg, and allow the ID to
5071 * be freed and recycled when it's no longer needed, which is usually
5072 * when the CSS is offlined.
5073 *
5074 * The only exception to that are records of swapped out tmpfs/shmem
5075 * pages that need to be attributed to live ancestors on swapin. But
5076 * those references are manageable from userspace.
5077 */
5078
5079static DEFINE_IDR(mem_cgroup_idr);
5080
5081static void mem_cgroup_id_remove(struct mem_cgroup *memcg)
5082{
5083 if (memcg->id.id > 0) {
5084 idr_remove(&mem_cgroup_idr, memcg->id.id);
5085 memcg->id.id = 0;
5086 }
5087}
5088
5089static void __maybe_unused mem_cgroup_id_get_many(struct mem_cgroup *memcg,
5090 unsigned int n)
5091{
5092 refcount_add(n, &memcg->id.ref);
5093}
5094
5095static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
5096{
5097 if (refcount_sub_and_test(n, &memcg->id.ref)) {
5098 mem_cgroup_id_remove(memcg);
5099
5100 /* Memcg ID pins CSS */
5101 css_put(&memcg->css);
5102 }
5103}
5104
5105static inline void mem_cgroup_id_put(struct mem_cgroup *memcg)
5106{
5107 mem_cgroup_id_put_many(memcg, 1);
5108}
5109
5110/**
5111 * mem_cgroup_from_id - look up a memcg from a memcg id
5112 * @id: the memcg id to look up
5113 *
5114 * Caller must hold rcu_read_lock().
5115 */
5116struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
5117{
5118 WARN_ON_ONCE(!rcu_read_lock_held());
5119 return idr_find(&mem_cgroup_idr, id);
5120}
5121
5122static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
5123{
5124 struct mem_cgroup_per_node *pn;
5125 int tmp = node;
5126 /*
5127 * This routine is called against possible nodes.
5128 * But it's BUG to call kmalloc() against offline node.
5129 *
5130 * TODO: this routine can waste much memory for nodes which will
5131 * never be onlined. It's better to use memory hotplug callback
5132 * function.
5133 */
5134 if (!node_state(node, N_NORMAL_MEMORY))
5135 tmp = -1;
5136 pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
5137 if (!pn)
5138 return 1;
5139
5140 pn->lruvec_stat_local = alloc_percpu_gfp(struct lruvec_stat,
5141 GFP_KERNEL_ACCOUNT);
5142 if (!pn->lruvec_stat_local) {
5143 kfree(pn);
5144 return 1;
5145 }
5146
5147 pn->lruvec_stat_cpu = alloc_percpu_gfp(struct lruvec_stat,
5148 GFP_KERNEL_ACCOUNT);
5149 if (!pn->lruvec_stat_cpu) {
5150 free_percpu(pn->lruvec_stat_local);
5151 kfree(pn);
5152 return 1;
5153 }
5154
5155 lruvec_init(&pn->lruvec);
5156 pn->usage_in_excess = 0;
5157 pn->on_tree = false;
5158 pn->memcg = memcg;
5159
5160 memcg->nodeinfo[node] = pn;
5161 return 0;
5162}
5163
5164static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
5165{
5166 struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
5167
5168 if (!pn)
5169 return;
5170
5171 free_percpu(pn->lruvec_stat_cpu);
5172 free_percpu(pn->lruvec_stat_local);
5173 kfree(pn);
5174}
5175
5176static void __mem_cgroup_free(struct mem_cgroup *memcg)
5177{
5178 int node;
5179
5180 for_each_node(node)
5181 free_mem_cgroup_per_node_info(memcg, node);
5182 free_percpu(memcg->vmstats_percpu);
5183 free_percpu(memcg->vmstats_local);
5184 kfree(memcg);
5185}
5186
5187static void mem_cgroup_free(struct mem_cgroup *memcg)
5188{
5189 memcg_wb_domain_exit(memcg);
5190 /*
5191 * Flush percpu vmstats and vmevents to guarantee the value correctness
5192 * on parent's and all ancestor levels.
5193 */
5194 memcg_flush_percpu_vmstats(memcg);
5195 memcg_flush_percpu_vmevents(memcg);
5196 __mem_cgroup_free(memcg);
5197}
5198
5199static struct mem_cgroup *mem_cgroup_alloc(void)
5200{
5201 struct mem_cgroup *memcg;
5202 unsigned int size;
5203 int node;
5204 int __maybe_unused i;
5205 long error = -ENOMEM;
5206
5207 size = sizeof(struct mem_cgroup);
5208 size += nr_node_ids * sizeof(struct mem_cgroup_per_node *);
5209
5210 memcg = kzalloc(size, GFP_KERNEL);
5211 if (!memcg)
5212 return ERR_PTR(error);
5213
5214 memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL,
5215 1, MEM_CGROUP_ID_MAX,
5216 GFP_KERNEL);
5217 if (memcg->id.id < 0) {
5218 error = memcg->id.id;
5219 goto fail;
5220 }
5221
5222 memcg->vmstats_local = alloc_percpu_gfp(struct memcg_vmstats_percpu,
5223 GFP_KERNEL_ACCOUNT);
5224 if (!memcg->vmstats_local)
5225 goto fail;
5226
5227 memcg->vmstats_percpu = alloc_percpu_gfp(struct memcg_vmstats_percpu,
5228 GFP_KERNEL_ACCOUNT);
5229 if (!memcg->vmstats_percpu)
5230 goto fail;
5231
5232 for_each_node(node)
5233 if (alloc_mem_cgroup_per_node_info(memcg, node))
5234 goto fail;
5235
5236 if (memcg_wb_domain_init(memcg, GFP_KERNEL))
5237 goto fail;
5238
5239 INIT_WORK(&memcg->high_work, high_work_func);
5240 INIT_LIST_HEAD(&memcg->oom_notify);
5241 mutex_init(&memcg->thresholds_lock);
5242 spin_lock_init(&memcg->move_lock);
5243 vmpressure_init(&memcg->vmpressure);
5244 INIT_LIST_HEAD(&memcg->event_list);
5245 spin_lock_init(&memcg->event_list_lock);
5246 memcg->socket_pressure = jiffies;
5247#ifdef CONFIG_MEMCG_KMEM
5248 memcg->kmemcg_id = -1;
5249 INIT_LIST_HEAD(&memcg->objcg_list);
5250#endif
5251#ifdef CONFIG_CGROUP_WRITEBACK
5252 INIT_LIST_HEAD(&memcg->cgwb_list);
5253 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
5254 memcg->cgwb_frn[i].done =
5255 __WB_COMPLETION_INIT(&memcg_cgwb_frn_waitq);
5256#endif
5257#ifdef CONFIG_TRANSPARENT_HUGEPAGE
5258 spin_lock_init(&memcg->deferred_split_queue.split_queue_lock);
5259 INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue);
5260 memcg->deferred_split_queue.split_queue_len = 0;
5261#endif
5262 idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
5263 return memcg;
5264fail:
5265 mem_cgroup_id_remove(memcg);
5266 __mem_cgroup_free(memcg);
5267 return ERR_PTR(error);
5268}
5269
5270static struct cgroup_subsys_state * __ref
5271mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
5272{
5273 struct mem_cgroup *parent = mem_cgroup_from_css(parent_css);
5274 struct mem_cgroup *memcg;
5275 long error = -ENOMEM;
5276
5277 memalloc_use_memcg(parent);
5278 memcg = mem_cgroup_alloc();
5279 memalloc_unuse_memcg();
5280 if (IS_ERR(memcg))
5281 return ERR_CAST(memcg);
5282
5283 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
5284 memcg->soft_limit = PAGE_COUNTER_MAX;
5285 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
5286 if (parent) {
5287 memcg->swappiness = mem_cgroup_swappiness(parent);
5288 memcg->oom_kill_disable = parent->oom_kill_disable;
5289 }
5290 if (parent && parent->use_hierarchy) {
5291 memcg->use_hierarchy = true;
5292 page_counter_init(&memcg->memory, &parent->memory);
5293 page_counter_init(&memcg->swap, &parent->swap);
5294 page_counter_init(&memcg->memsw, &parent->memsw);
5295 page_counter_init(&memcg->kmem, &parent->kmem);
5296 page_counter_init(&memcg->tcpmem, &parent->tcpmem);
5297 } else {
5298 page_counter_init(&memcg->memory, NULL);
5299 page_counter_init(&memcg->swap, NULL);
5300 page_counter_init(&memcg->memsw, NULL);
5301 page_counter_init(&memcg->kmem, NULL);
5302 page_counter_init(&memcg->tcpmem, NULL);
5303 /*
5304 * Deeper hierachy with use_hierarchy == false doesn't make
5305 * much sense so let cgroup subsystem know about this
5306 * unfortunate state in our controller.
5307 */
5308 if (parent != root_mem_cgroup)
5309 memory_cgrp_subsys.broken_hierarchy = true;
5310 }
5311
5312 /* The following stuff does not apply to the root */
5313 if (!parent) {
5314 root_mem_cgroup = memcg;
5315 return &memcg->css;
5316 }
5317
5318 error = memcg_online_kmem(memcg);
5319 if (error)
5320 goto fail;
5321
5322 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
5323 static_branch_inc(&memcg_sockets_enabled_key);
5324
5325 return &memcg->css;
5326fail:
5327 mem_cgroup_id_remove(memcg);
5328 mem_cgroup_free(memcg);
5329 return ERR_PTR(error);
5330}
5331
5332static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
5333{
5334 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5335
5336 /*
5337 * A memcg must be visible for memcg_expand_shrinker_maps()
5338 * by the time the maps are allocated. So, we allocate maps
5339 * here, when for_each_mem_cgroup() can't skip it.
5340 */
5341 if (memcg_alloc_shrinker_maps(memcg)) {
5342 mem_cgroup_id_remove(memcg);
5343 return -ENOMEM;
5344 }
5345
5346 /* Online state pins memcg ID, memcg ID pins CSS */
5347 refcount_set(&memcg->id.ref, 1);
5348 css_get(css);
5349 return 0;
5350}
5351
5352static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
5353{
5354 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5355 struct mem_cgroup_event *event, *tmp;
5356
5357 /*
5358 * Unregister events and notify userspace.
5359 * Notify userspace about cgroup removing only after rmdir of cgroup
5360 * directory to avoid race between userspace and kernelspace.
5361 */
5362 spin_lock(&memcg->event_list_lock);
5363 list_for_each_entry_safe(event, tmp, &memcg->event_list, list) {
5364 list_del_init(&event->list);
5365 schedule_work(&event->remove);
5366 }
5367 spin_unlock(&memcg->event_list_lock);
5368
5369 page_counter_set_min(&memcg->memory, 0);
5370 page_counter_set_low(&memcg->memory, 0);
5371
5372 memcg_offline_kmem(memcg);
5373 wb_memcg_offline(memcg);
5374
5375 drain_all_stock(memcg);
5376
5377 mem_cgroup_id_put(memcg);
5378}
5379
5380static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
5381{
5382 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5383
5384 invalidate_reclaim_iterators(memcg);
5385}
5386
5387static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
5388{
5389 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5390 int __maybe_unused i;
5391
5392#ifdef CONFIG_CGROUP_WRITEBACK
5393 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
5394 wb_wait_for_completion(&memcg->cgwb_frn[i].done);
5395#endif
5396 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
5397 static_branch_dec(&memcg_sockets_enabled_key);
5398
5399 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active)
5400 static_branch_dec(&memcg_sockets_enabled_key);
5401
5402 vmpressure_cleanup(&memcg->vmpressure);
5403 cancel_work_sync(&memcg->high_work);
5404 mem_cgroup_remove_from_trees(memcg);
5405 memcg_free_shrinker_maps(memcg);
5406 memcg_free_kmem(memcg);
5407 mem_cgroup_free(memcg);
5408}
5409
5410/**
5411 * mem_cgroup_css_reset - reset the states of a mem_cgroup
5412 * @css: the target css
5413 *
5414 * Reset the states of the mem_cgroup associated with @css. This is
5415 * invoked when the userland requests disabling on the default hierarchy
5416 * but the memcg is pinned through dependency. The memcg should stop
5417 * applying policies and should revert to the vanilla state as it may be
5418 * made visible again.
5419 *
5420 * The current implementation only resets the essential configurations.
5421 * This needs to be expanded to cover all the visible parts.
5422 */
5423static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
5424{
5425 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5426
5427 page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX);
5428 page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX);
5429 page_counter_set_max(&memcg->memsw, PAGE_COUNTER_MAX);
5430 page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX);
5431 page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX);
5432 page_counter_set_min(&memcg->memory, 0);
5433 page_counter_set_low(&memcg->memory, 0);
5434 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
5435 memcg->soft_limit = PAGE_COUNTER_MAX;
5436 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
5437 memcg_wb_domain_size_changed(memcg);
5438}
5439
5440#ifdef CONFIG_MMU
5441/* Handlers for move charge at task migration. */
5442static int mem_cgroup_do_precharge(unsigned long count)
5443{
5444 int ret;
5445
5446 /* Try a single bulk charge without reclaim first, kswapd may wake */
5447 ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count);
5448 if (!ret) {
5449 mc.precharge += count;
5450 return ret;
5451 }
5452
5453 /* Try charges one by one with reclaim, but do not retry */
5454 while (count--) {
5455 ret = try_charge(mc.to, GFP_KERNEL | __GFP_NORETRY, 1);
5456 if (ret)
5457 return ret;
5458 mc.precharge++;
5459 cond_resched();
5460 }
5461 return 0;
5462}
5463
5464union mc_target {
5465 struct page *page;
5466 swp_entry_t ent;
5467};
5468
5469enum mc_target_type {
5470 MC_TARGET_NONE = 0,
5471 MC_TARGET_PAGE,
5472 MC_TARGET_SWAP,
5473 MC_TARGET_DEVICE,
5474};
5475
5476static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
5477 unsigned long addr, pte_t ptent)
5478{
5479 struct page *page = vm_normal_page(vma, addr, ptent);
5480
5481 if (!page || !page_mapped(page))
5482 return NULL;
5483 if (PageAnon(page)) {
5484 if (!(mc.flags & MOVE_ANON))
5485 return NULL;
5486 } else {
5487 if (!(mc.flags & MOVE_FILE))
5488 return NULL;
5489 }
5490 if (!get_page_unless_zero(page))
5491 return NULL;
5492
5493 return page;
5494}
5495
5496#if defined(CONFIG_SWAP) || defined(CONFIG_DEVICE_PRIVATE)
5497static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5498 pte_t ptent, swp_entry_t *entry)
5499{
5500 struct page *page = NULL;
5501 swp_entry_t ent = pte_to_swp_entry(ptent);
5502
5503 if (!(mc.flags & MOVE_ANON) || non_swap_entry(ent))
5504 return NULL;
5505
5506 /*
5507 * Handle MEMORY_DEVICE_PRIVATE which are ZONE_DEVICE page belonging to
5508 * a device and because they are not accessible by CPU they are store
5509 * as special swap entry in the CPU page table.
5510 */
5511 if (is_device_private_entry(ent)) {
5512 page = device_private_entry_to_page(ent);
5513 /*
5514 * MEMORY_DEVICE_PRIVATE means ZONE_DEVICE page and which have
5515 * a refcount of 1 when free (unlike normal page)
5516 */
5517 if (!page_ref_add_unless(page, 1, 1))
5518 return NULL;
5519 return page;
5520 }
5521
5522 /*
5523 * Because lookup_swap_cache() updates some statistics counter,
5524 * we call find_get_page() with swapper_space directly.
5525 */
5526 page = find_get_page(swap_address_space(ent), swp_offset(ent));
5527 entry->val = ent.val;
5528
5529 return page;
5530}
5531#else
5532static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5533 pte_t ptent, swp_entry_t *entry)
5534{
5535 return NULL;
5536}
5537#endif
5538
5539static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
5540 unsigned long addr, pte_t ptent, swp_entry_t *entry)
5541{
5542 struct page *page = NULL;
5543 struct address_space *mapping;
5544 pgoff_t pgoff;
5545
5546 if (!vma->vm_file) /* anonymous vma */
5547 return NULL;
5548 if (!(mc.flags & MOVE_FILE))
5549 return NULL;
5550
5551 mapping = vma->vm_file->f_mapping;
5552 pgoff = linear_page_index(vma, addr);
5553
5554 /* page is moved even if it's not RSS of this task(page-faulted). */
5555#ifdef CONFIG_SWAP
5556 /* shmem/tmpfs may report page out on swap: account for that too. */
5557 if (shmem_mapping(mapping)) {
5558 page = find_get_entry(mapping, pgoff);
5559 if (xa_is_value(page)) {
5560 swp_entry_t swp = radix_to_swp_entry(page);
5561 *entry = swp;
5562 page = find_get_page(swap_address_space(swp),
5563 swp_offset(swp));
5564 }
5565 } else
5566 page = find_get_page(mapping, pgoff);
5567#else
5568 page = find_get_page(mapping, pgoff);
5569#endif
5570 return page;
5571}
5572
5573/**
5574 * mem_cgroup_move_account - move account of the page
5575 * @page: the page
5576 * @compound: charge the page as compound or small page
5577 * @from: mem_cgroup which the page is moved from.
5578 * @to: mem_cgroup which the page is moved to. @from != @to.
5579 *
5580 * The caller must make sure the page is not on LRU (isolate_page() is useful.)
5581 *
5582 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
5583 * from old cgroup.
5584 */
5585static int mem_cgroup_move_account(struct page *page,
5586 bool compound,
5587 struct mem_cgroup *from,
5588 struct mem_cgroup *to)
5589{
5590 struct lruvec *from_vec, *to_vec;
5591 struct pglist_data *pgdat;
5592 unsigned int nr_pages = compound ? thp_nr_pages(page) : 1;
5593 int ret;
5594
5595 VM_BUG_ON(from == to);
5596 VM_BUG_ON_PAGE(PageLRU(page), page);
5597 VM_BUG_ON(compound && !PageTransHuge(page));
5598
5599 /*
5600 * Prevent mem_cgroup_migrate() from looking at
5601 * page->mem_cgroup of its source page while we change it.
5602 */
5603 ret = -EBUSY;
5604 if (!trylock_page(page))
5605 goto out;
5606
5607 ret = -EINVAL;
5608 if (page->mem_cgroup != from)
5609 goto out_unlock;
5610
5611 pgdat = page_pgdat(page);
5612 from_vec = mem_cgroup_lruvec(from, pgdat);
5613 to_vec = mem_cgroup_lruvec(to, pgdat);
5614
5615 lock_page_memcg(page);
5616
5617 if (PageAnon(page)) {
5618 if (page_mapped(page)) {
5619 __mod_lruvec_state(from_vec, NR_ANON_MAPPED, -nr_pages);
5620 __mod_lruvec_state(to_vec, NR_ANON_MAPPED, nr_pages);
5621 if (PageTransHuge(page)) {
5622 __mod_lruvec_state(from_vec, NR_ANON_THPS,
5623 -nr_pages);
5624 __mod_lruvec_state(to_vec, NR_ANON_THPS,
5625 nr_pages);
5626 }
5627
5628 }
5629 } else {
5630 __mod_lruvec_state(from_vec, NR_FILE_PAGES, -nr_pages);
5631 __mod_lruvec_state(to_vec, NR_FILE_PAGES, nr_pages);
5632
5633 if (PageSwapBacked(page)) {
5634 __mod_lruvec_state(from_vec, NR_SHMEM, -nr_pages);
5635 __mod_lruvec_state(to_vec, NR_SHMEM, nr_pages);
5636 }
5637
5638 if (page_mapped(page)) {
5639 __mod_lruvec_state(from_vec, NR_FILE_MAPPED, -nr_pages);
5640 __mod_lruvec_state(to_vec, NR_FILE_MAPPED, nr_pages);
5641 }
5642
5643 if (PageDirty(page)) {
5644 struct address_space *mapping = page_mapping(page);
5645
5646 if (mapping_cap_account_dirty(mapping)) {
5647 __mod_lruvec_state(from_vec, NR_FILE_DIRTY,
5648 -nr_pages);
5649 __mod_lruvec_state(to_vec, NR_FILE_DIRTY,
5650 nr_pages);
5651 }
5652 }
5653 }
5654
5655 if (PageWriteback(page)) {
5656 __mod_lruvec_state(from_vec, NR_WRITEBACK, -nr_pages);
5657 __mod_lruvec_state(to_vec, NR_WRITEBACK, nr_pages);
5658 }
5659
5660 /*
5661 * All state has been migrated, let's switch to the new memcg.
5662 *
5663 * It is safe to change page->mem_cgroup here because the page
5664 * is referenced, charged, isolated, and locked: we can't race
5665 * with (un)charging, migration, LRU putback, or anything else
5666 * that would rely on a stable page->mem_cgroup.
5667 *
5668 * Note that lock_page_memcg is a memcg lock, not a page lock,
5669 * to save space. As soon as we switch page->mem_cgroup to a
5670 * new memcg that isn't locked, the above state can change
5671 * concurrently again. Make sure we're truly done with it.
5672 */
5673 smp_mb();
5674
5675 css_get(&to->css);
5676 css_put(&from->css);
5677
5678 page->mem_cgroup = to;
5679
5680 __unlock_page_memcg(from);
5681
5682 ret = 0;
5683
5684 local_irq_disable();
5685 mem_cgroup_charge_statistics(to, page, nr_pages);
5686 memcg_check_events(to, page);
5687 mem_cgroup_charge_statistics(from, page, -nr_pages);
5688 memcg_check_events(from, page);
5689 local_irq_enable();
5690out_unlock:
5691 unlock_page(page);
5692out:
5693 return ret;
5694}
5695
5696/**
5697 * get_mctgt_type - get target type of moving charge
5698 * @vma: the vma the pte to be checked belongs
5699 * @addr: the address corresponding to the pte to be checked
5700 * @ptent: the pte to be checked
5701 * @target: the pointer the target page or swap ent will be stored(can be NULL)
5702 *
5703 * Returns
5704 * 0(MC_TARGET_NONE): if the pte is not a target for move charge.
5705 * 1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
5706 * move charge. if @target is not NULL, the page is stored in target->page
5707 * with extra refcnt got(Callers should handle it).
5708 * 2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
5709 * target for charge migration. if @target is not NULL, the entry is stored
5710 * in target->ent.
5711 * 3(MC_TARGET_DEVICE): like MC_TARGET_PAGE but page is MEMORY_DEVICE_PRIVATE
5712 * (so ZONE_DEVICE page and thus not on the lru).
5713 * For now we such page is charge like a regular page would be as for all
5714 * intent and purposes it is just special memory taking the place of a
5715 * regular page.
5716 *
5717 * See Documentations/vm/hmm.txt and include/linux/hmm.h
5718 *
5719 * Called with pte lock held.
5720 */
5721
5722static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
5723 unsigned long addr, pte_t ptent, union mc_target *target)
5724{
5725 struct page *page = NULL;
5726 enum mc_target_type ret = MC_TARGET_NONE;
5727 swp_entry_t ent = { .val = 0 };
5728
5729 if (pte_present(ptent))
5730 page = mc_handle_present_pte(vma, addr, ptent);
5731 else if (is_swap_pte(ptent))
5732 page = mc_handle_swap_pte(vma, ptent, &ent);
5733 else if (pte_none(ptent))
5734 page = mc_handle_file_pte(vma, addr, ptent, &ent);
5735
5736 if (!page && !ent.val)
5737 return ret;
5738 if (page) {
5739 /*
5740 * Do only loose check w/o serialization.
5741 * mem_cgroup_move_account() checks the page is valid or
5742 * not under LRU exclusion.
5743 */
5744 if (page->mem_cgroup == mc.from) {
5745 ret = MC_TARGET_PAGE;
5746 if (is_device_private_page(page))
5747 ret = MC_TARGET_DEVICE;
5748 if (target)
5749 target->page = page;
5750 }
5751 if (!ret || !target)
5752 put_page(page);
5753 }
5754 /*
5755 * There is a swap entry and a page doesn't exist or isn't charged.
5756 * But we cannot move a tail-page in a THP.
5757 */
5758 if (ent.val && !ret && (!page || !PageTransCompound(page)) &&
5759 mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) {
5760 ret = MC_TARGET_SWAP;
5761 if (target)
5762 target->ent = ent;
5763 }
5764 return ret;
5765}
5766
5767#ifdef CONFIG_TRANSPARENT_HUGEPAGE
5768/*
5769 * We don't consider PMD mapped swapping or file mapped pages because THP does
5770 * not support them for now.
5771 * Caller should make sure that pmd_trans_huge(pmd) is true.
5772 */
5773static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
5774 unsigned long addr, pmd_t pmd, union mc_target *target)
5775{
5776 struct page *page = NULL;
5777 enum mc_target_type ret = MC_TARGET_NONE;
5778
5779 if (unlikely(is_swap_pmd(pmd))) {
5780 VM_BUG_ON(thp_migration_supported() &&
5781 !is_pmd_migration_entry(pmd));
5782 return ret;
5783 }
5784 page = pmd_page(pmd);
5785 VM_BUG_ON_PAGE(!page || !PageHead(page), page);
5786 if (!(mc.flags & MOVE_ANON))
5787 return ret;
5788 if (page->mem_cgroup == mc.from) {
5789 ret = MC_TARGET_PAGE;
5790 if (target) {
5791 get_page(page);
5792 target->page = page;
5793 }
5794 }
5795 return ret;
5796}
5797#else
5798static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
5799 unsigned long addr, pmd_t pmd, union mc_target *target)
5800{
5801 return MC_TARGET_NONE;
5802}
5803#endif
5804
5805static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
5806 unsigned long addr, unsigned long end,
5807 struct mm_walk *walk)
5808{
5809 struct vm_area_struct *vma = walk->vma;
5810 pte_t *pte;
5811 spinlock_t *ptl;
5812
5813 ptl = pmd_trans_huge_lock(pmd, vma);
5814 if (ptl) {
5815 /*
5816 * Note their can not be MC_TARGET_DEVICE for now as we do not
5817 * support transparent huge page with MEMORY_DEVICE_PRIVATE but
5818 * this might change.
5819 */
5820 if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
5821 mc.precharge += HPAGE_PMD_NR;
5822 spin_unlock(ptl);
5823 return 0;
5824 }
5825
5826 if (pmd_trans_unstable(pmd))
5827 return 0;
5828 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
5829 for (; addr != end; pte++, addr += PAGE_SIZE)
5830 if (get_mctgt_type(vma, addr, *pte, NULL))
5831 mc.precharge++; /* increment precharge temporarily */
5832 pte_unmap_unlock(pte - 1, ptl);
5833 cond_resched();
5834
5835 return 0;
5836}
5837
5838static const struct mm_walk_ops precharge_walk_ops = {
5839 .pmd_entry = mem_cgroup_count_precharge_pte_range,
5840};
5841
5842static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
5843{
5844 unsigned long precharge;
5845
5846 mmap_read_lock(mm);
5847 walk_page_range(mm, 0, mm->highest_vm_end, &precharge_walk_ops, NULL);
5848 mmap_read_unlock(mm);
5849
5850 precharge = mc.precharge;
5851 mc.precharge = 0;
5852
5853 return precharge;
5854}
5855
5856static int mem_cgroup_precharge_mc(struct mm_struct *mm)
5857{
5858 unsigned long precharge = mem_cgroup_count_precharge(mm);
5859
5860 VM_BUG_ON(mc.moving_task);
5861 mc.moving_task = current;
5862 return mem_cgroup_do_precharge(precharge);
5863}
5864
5865/* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
5866static void __mem_cgroup_clear_mc(void)
5867{
5868 struct mem_cgroup *from = mc.from;
5869 struct mem_cgroup *to = mc.to;
5870
5871 /* we must uncharge all the leftover precharges from mc.to */
5872 if (mc.precharge) {
5873 cancel_charge(mc.to, mc.precharge);
5874 mc.precharge = 0;
5875 }
5876 /*
5877 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
5878 * we must uncharge here.
5879 */
5880 if (mc.moved_charge) {
5881 cancel_charge(mc.from, mc.moved_charge);
5882 mc.moved_charge = 0;
5883 }
5884 /* we must fixup refcnts and charges */
5885 if (mc.moved_swap) {
5886 /* uncharge swap account from the old cgroup */
5887 if (!mem_cgroup_is_root(mc.from))
5888 page_counter_uncharge(&mc.from->memsw, mc.moved_swap);
5889
5890 mem_cgroup_id_put_many(mc.from, mc.moved_swap);
5891
5892 /*
5893 * we charged both to->memory and to->memsw, so we
5894 * should uncharge to->memory.
5895 */
5896 if (!mem_cgroup_is_root(mc.to))
5897 page_counter_uncharge(&mc.to->memory, mc.moved_swap);
5898
5899 mc.moved_swap = 0;
5900 }
5901 memcg_oom_recover(from);
5902 memcg_oom_recover(to);
5903 wake_up_all(&mc.waitq);
5904}
5905
5906static void mem_cgroup_clear_mc(void)
5907{
5908 struct mm_struct *mm = mc.mm;
5909
5910 /*
5911 * we must clear moving_task before waking up waiters at the end of
5912 * task migration.
5913 */
5914 mc.moving_task = NULL;
5915 __mem_cgroup_clear_mc();
5916 spin_lock(&mc.lock);
5917 mc.from = NULL;
5918 mc.to = NULL;
5919 mc.mm = NULL;
5920 spin_unlock(&mc.lock);
5921
5922 mmput(mm);
5923}
5924
5925static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
5926{
5927 struct cgroup_subsys_state *css;
5928 struct mem_cgroup *memcg = NULL; /* unneeded init to make gcc happy */
5929 struct mem_cgroup *from;
5930 struct task_struct *leader, *p;
5931 struct mm_struct *mm;
5932 unsigned long move_flags;
5933 int ret = 0;
5934
5935 /* charge immigration isn't supported on the default hierarchy */
5936 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
5937 return 0;
5938
5939 /*
5940 * Multi-process migrations only happen on the default hierarchy
5941 * where charge immigration is not used. Perform charge
5942 * immigration if @tset contains a leader and whine if there are
5943 * multiple.
5944 */
5945 p = NULL;
5946 cgroup_taskset_for_each_leader(leader, css, tset) {
5947 WARN_ON_ONCE(p);
5948 p = leader;
5949 memcg = mem_cgroup_from_css(css);
5950 }
5951 if (!p)
5952 return 0;
5953
5954 /*
5955 * We are now commited to this value whatever it is. Changes in this
5956 * tunable will only affect upcoming migrations, not the current one.
5957 * So we need to save it, and keep it going.
5958 */
5959 move_flags = READ_ONCE(memcg->move_charge_at_immigrate);
5960 if (!move_flags)
5961 return 0;
5962
5963 from = mem_cgroup_from_task(p);
5964
5965 VM_BUG_ON(from == memcg);
5966
5967 mm = get_task_mm(p);
5968 if (!mm)
5969 return 0;
5970 /* We move charges only when we move a owner of the mm */
5971 if (mm->owner == p) {
5972 VM_BUG_ON(mc.from);
5973 VM_BUG_ON(mc.to);
5974 VM_BUG_ON(mc.precharge);
5975 VM_BUG_ON(mc.moved_charge);
5976 VM_BUG_ON(mc.moved_swap);
5977
5978 spin_lock(&mc.lock);
5979 mc.mm = mm;
5980 mc.from = from;
5981 mc.to = memcg;
5982 mc.flags = move_flags;
5983 spin_unlock(&mc.lock);
5984 /* We set mc.moving_task later */
5985
5986 ret = mem_cgroup_precharge_mc(mm);
5987 if (ret)
5988 mem_cgroup_clear_mc();
5989 } else {
5990 mmput(mm);
5991 }
5992 return ret;
5993}
5994
5995static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
5996{
5997 if (mc.to)
5998 mem_cgroup_clear_mc();
5999}
6000
6001static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
6002 unsigned long addr, unsigned long end,
6003 struct mm_walk *walk)
6004{
6005 int ret = 0;
6006 struct vm_area_struct *vma = walk->vma;
6007 pte_t *pte;
6008 spinlock_t *ptl;
6009 enum mc_target_type target_type;
6010 union mc_target target;
6011 struct page *page;
6012
6013 ptl = pmd_trans_huge_lock(pmd, vma);
6014 if (ptl) {
6015 if (mc.precharge < HPAGE_PMD_NR) {
6016 spin_unlock(ptl);
6017 return 0;
6018 }
6019 target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
6020 if (target_type == MC_TARGET_PAGE) {
6021 page = target.page;
6022 if (!isolate_lru_page(page)) {
6023 if (!mem_cgroup_move_account(page, true,
6024 mc.from, mc.to)) {
6025 mc.precharge -= HPAGE_PMD_NR;
6026 mc.moved_charge += HPAGE_PMD_NR;
6027 }
6028 putback_lru_page(page);
6029 }
6030 put_page(page);
6031 } else if (target_type == MC_TARGET_DEVICE) {
6032 page = target.page;
6033 if (!mem_cgroup_move_account(page, true,
6034 mc.from, mc.to)) {
6035 mc.precharge -= HPAGE_PMD_NR;
6036 mc.moved_charge += HPAGE_PMD_NR;
6037 }
6038 put_page(page);
6039 }
6040 spin_unlock(ptl);
6041 return 0;
6042 }
6043
6044 if (pmd_trans_unstable(pmd))
6045 return 0;
6046retry:
6047 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
6048 for (; addr != end; addr += PAGE_SIZE) {
6049 pte_t ptent = *(pte++);
6050 bool device = false;
6051 swp_entry_t ent;
6052
6053 if (!mc.precharge)
6054 break;
6055
6056 switch (get_mctgt_type(vma, addr, ptent, &target)) {
6057 case MC_TARGET_DEVICE:
6058 device = true;
6059 fallthrough;
6060 case MC_TARGET_PAGE:
6061 page = target.page;
6062 /*
6063 * We can have a part of the split pmd here. Moving it
6064 * can be done but it would be too convoluted so simply
6065 * ignore such a partial THP and keep it in original
6066 * memcg. There should be somebody mapping the head.
6067 */
6068 if (PageTransCompound(page))
6069 goto put;
6070 if (!device && isolate_lru_page(page))
6071 goto put;
6072 if (!mem_cgroup_move_account(page, false,
6073 mc.from, mc.to)) {
6074 mc.precharge--;
6075 /* we uncharge from mc.from later. */
6076 mc.moved_charge++;
6077 }
6078 if (!device)
6079 putback_lru_page(page);
6080put: /* get_mctgt_type() gets the page */
6081 put_page(page);
6082 break;
6083 case MC_TARGET_SWAP:
6084 ent = target.ent;
6085 if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
6086 mc.precharge--;
6087 mem_cgroup_id_get_many(mc.to, 1);
6088 /* we fixup other refcnts and charges later. */
6089 mc.moved_swap++;
6090 }
6091 break;
6092 default:
6093 break;
6094 }
6095 }
6096 pte_unmap_unlock(pte - 1, ptl);
6097 cond_resched();
6098
6099 if (addr != end) {
6100 /*
6101 * We have consumed all precharges we got in can_attach().
6102 * We try charge one by one, but don't do any additional
6103 * charges to mc.to if we have failed in charge once in attach()
6104 * phase.
6105 */
6106 ret = mem_cgroup_do_precharge(1);
6107 if (!ret)
6108 goto retry;
6109 }
6110
6111 return ret;
6112}
6113
6114static const struct mm_walk_ops charge_walk_ops = {
6115 .pmd_entry = mem_cgroup_move_charge_pte_range,
6116};
6117
6118static void mem_cgroup_move_charge(void)
6119{
6120 lru_add_drain_all();
6121 /*
6122 * Signal lock_page_memcg() to take the memcg's move_lock
6123 * while we're moving its pages to another memcg. Then wait
6124 * for already started RCU-only updates to finish.
6125 */
6126 atomic_inc(&mc.from->moving_account);
6127 synchronize_rcu();
6128retry:
6129 if (unlikely(!mmap_read_trylock(mc.mm))) {
6130 /*
6131 * Someone who are holding the mmap_lock might be waiting in
6132 * waitq. So we cancel all extra charges, wake up all waiters,
6133 * and retry. Because we cancel precharges, we might not be able
6134 * to move enough charges, but moving charge is a best-effort
6135 * feature anyway, so it wouldn't be a big problem.
6136 */
6137 __mem_cgroup_clear_mc();
6138 cond_resched();
6139 goto retry;
6140 }
6141 /*
6142 * When we have consumed all precharges and failed in doing
6143 * additional charge, the page walk just aborts.
6144 */
6145 walk_page_range(mc.mm, 0, mc.mm->highest_vm_end, &charge_walk_ops,
6146 NULL);
6147
6148 mmap_read_unlock(mc.mm);
6149 atomic_dec(&mc.from->moving_account);
6150}
6151
6152static void mem_cgroup_move_task(void)
6153{
6154 if (mc.to) {
6155 mem_cgroup_move_charge();
6156 mem_cgroup_clear_mc();
6157 }
6158}
6159#else /* !CONFIG_MMU */
6160static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
6161{
6162 return 0;
6163}
6164static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
6165{
6166}
6167static void mem_cgroup_move_task(void)
6168{
6169}
6170#endif
6171
6172/*
6173 * Cgroup retains root cgroups across [un]mount cycles making it necessary
6174 * to verify whether we're attached to the default hierarchy on each mount
6175 * attempt.
6176 */
6177static void mem_cgroup_bind(struct cgroup_subsys_state *root_css)
6178{
6179 /*
6180 * use_hierarchy is forced on the default hierarchy. cgroup core
6181 * guarantees that @root doesn't have any children, so turning it
6182 * on for the root memcg is enough.
6183 */
6184 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
6185 root_mem_cgroup->use_hierarchy = true;
6186 else
6187 root_mem_cgroup->use_hierarchy = false;
6188}
6189
6190static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value)
6191{
6192 if (value == PAGE_COUNTER_MAX)
6193 seq_puts(m, "max\n");
6194 else
6195 seq_printf(m, "%llu\n", (u64)value * PAGE_SIZE);
6196
6197 return 0;
6198}
6199
6200static u64 memory_current_read(struct cgroup_subsys_state *css,
6201 struct cftype *cft)
6202{
6203 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6204
6205 return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE;
6206}
6207
6208static int memory_min_show(struct seq_file *m, void *v)
6209{
6210 return seq_puts_memcg_tunable(m,
6211 READ_ONCE(mem_cgroup_from_seq(m)->memory.min));
6212}
6213
6214static ssize_t memory_min_write(struct kernfs_open_file *of,
6215 char *buf, size_t nbytes, loff_t off)
6216{
6217 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6218 unsigned long min;
6219 int err;
6220
6221 buf = strstrip(buf);
6222 err = page_counter_memparse(buf, "max", &min);
6223 if (err)
6224 return err;
6225
6226 page_counter_set_min(&memcg->memory, min);
6227
6228 return nbytes;
6229}
6230
6231static int memory_low_show(struct seq_file *m, void *v)
6232{
6233 return seq_puts_memcg_tunable(m,
6234 READ_ONCE(mem_cgroup_from_seq(m)->memory.low));
6235}
6236
6237static ssize_t memory_low_write(struct kernfs_open_file *of,
6238 char *buf, size_t nbytes, loff_t off)
6239{
6240 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6241 unsigned long low;
6242 int err;
6243
6244 buf = strstrip(buf);
6245 err = page_counter_memparse(buf, "max", &low);
6246 if (err)
6247 return err;
6248
6249 page_counter_set_low(&memcg->memory, low);
6250
6251 return nbytes;
6252}
6253
6254static int memory_high_show(struct seq_file *m, void *v)
6255{
6256 return seq_puts_memcg_tunable(m,
6257 READ_ONCE(mem_cgroup_from_seq(m)->memory.high));
6258}
6259
6260static ssize_t memory_high_write(struct kernfs_open_file *of,
6261 char *buf, size_t nbytes, loff_t off)
6262{
6263 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6264 unsigned int nr_retries = MAX_RECLAIM_RETRIES;
6265 bool drained = false;
6266 unsigned long high;
6267 int err;
6268
6269 buf = strstrip(buf);
6270 err = page_counter_memparse(buf, "max", &high);
6271 if (err)
6272 return err;
6273
6274 for (;;) {
6275 unsigned long nr_pages = page_counter_read(&memcg->memory);
6276 unsigned long reclaimed;
6277
6278 if (nr_pages <= high)
6279 break;
6280
6281 if (signal_pending(current))
6282 break;
6283
6284 if (!drained) {
6285 drain_all_stock(memcg);
6286 drained = true;
6287 continue;
6288 }
6289
6290 reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
6291 GFP_KERNEL, true);
6292
6293 if (!reclaimed && !nr_retries--)
6294 break;
6295 }
6296
6297 page_counter_set_high(&memcg->memory, high);
6298
6299 memcg_wb_domain_size_changed(memcg);
6300
6301 return nbytes;
6302}
6303
6304static int memory_max_show(struct seq_file *m, void *v)
6305{
6306 return seq_puts_memcg_tunable(m,
6307 READ_ONCE(mem_cgroup_from_seq(m)->memory.max));
6308}
6309
6310static ssize_t memory_max_write(struct kernfs_open_file *of,
6311 char *buf, size_t nbytes, loff_t off)
6312{
6313 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6314 unsigned int nr_reclaims = MAX_RECLAIM_RETRIES;
6315 bool drained = false;
6316 unsigned long max;
6317 int err;
6318
6319 buf = strstrip(buf);
6320 err = page_counter_memparse(buf, "max", &max);
6321 if (err)
6322 return err;
6323
6324 xchg(&memcg->memory.max, max);
6325
6326 for (;;) {
6327 unsigned long nr_pages = page_counter_read(&memcg->memory);
6328
6329 if (nr_pages <= max)
6330 break;
6331
6332 if (signal_pending(current))
6333 break;
6334
6335 if (!drained) {
6336 drain_all_stock(memcg);
6337 drained = true;
6338 continue;
6339 }
6340
6341 if (nr_reclaims) {
6342 if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max,
6343 GFP_KERNEL, true))
6344 nr_reclaims--;
6345 continue;
6346 }
6347
6348 memcg_memory_event(memcg, MEMCG_OOM);
6349 if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
6350 break;
6351 }
6352
6353 memcg_wb_domain_size_changed(memcg);
6354 return nbytes;
6355}
6356
6357static void __memory_events_show(struct seq_file *m, atomic_long_t *events)
6358{
6359 seq_printf(m, "low %lu\n", atomic_long_read(&events[MEMCG_LOW]));
6360 seq_printf(m, "high %lu\n", atomic_long_read(&events[MEMCG_HIGH]));
6361 seq_printf(m, "max %lu\n", atomic_long_read(&events[MEMCG_MAX]));
6362 seq_printf(m, "oom %lu\n", atomic_long_read(&events[MEMCG_OOM]));
6363 seq_printf(m, "oom_kill %lu\n",
6364 atomic_long_read(&events[MEMCG_OOM_KILL]));
6365}
6366
6367static int memory_events_show(struct seq_file *m, void *v)
6368{
6369 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6370
6371 __memory_events_show(m, memcg->memory_events);
6372 return 0;
6373}
6374
6375static int memory_events_local_show(struct seq_file *m, void *v)
6376{
6377 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6378
6379 __memory_events_show(m, memcg->memory_events_local);
6380 return 0;
6381}
6382
6383static int memory_stat_show(struct seq_file *m, void *v)
6384{
6385 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6386 char *buf;
6387
6388 buf = memory_stat_format(memcg);
6389 if (!buf)
6390 return -ENOMEM;
6391 seq_puts(m, buf);
6392 kfree(buf);
6393 return 0;
6394}
6395
6396static int memory_oom_group_show(struct seq_file *m, void *v)
6397{
6398 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6399
6400 seq_printf(m, "%d\n", memcg->oom_group);
6401
6402 return 0;
6403}
6404
6405static ssize_t memory_oom_group_write(struct kernfs_open_file *of,
6406 char *buf, size_t nbytes, loff_t off)
6407{
6408 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6409 int ret, oom_group;
6410
6411 buf = strstrip(buf);
6412 if (!buf)
6413 return -EINVAL;
6414
6415 ret = kstrtoint(buf, 0, &oom_group);
6416 if (ret)
6417 return ret;
6418
6419 if (oom_group != 0 && oom_group != 1)
6420 return -EINVAL;
6421
6422 memcg->oom_group = oom_group;
6423
6424 return nbytes;
6425}
6426
6427static struct cftype memory_files[] = {
6428 {
6429 .name = "current",
6430 .flags = CFTYPE_NOT_ON_ROOT,
6431 .read_u64 = memory_current_read,
6432 },
6433 {
6434 .name = "min",
6435 .flags = CFTYPE_NOT_ON_ROOT,
6436 .seq_show = memory_min_show,
6437 .write = memory_min_write,
6438 },
6439 {
6440 .name = "low",
6441 .flags = CFTYPE_NOT_ON_ROOT,
6442 .seq_show = memory_low_show,
6443 .write = memory_low_write,
6444 },
6445 {
6446 .name = "high",
6447 .flags = CFTYPE_NOT_ON_ROOT,
6448 .seq_show = memory_high_show,
6449 .write = memory_high_write,
6450 },
6451 {
6452 .name = "max",
6453 .flags = CFTYPE_NOT_ON_ROOT,
6454 .seq_show = memory_max_show,
6455 .write = memory_max_write,
6456 },
6457 {
6458 .name = "events",
6459 .flags = CFTYPE_NOT_ON_ROOT,
6460 .file_offset = offsetof(struct mem_cgroup, events_file),
6461 .seq_show = memory_events_show,
6462 },
6463 {
6464 .name = "events.local",
6465 .flags = CFTYPE_NOT_ON_ROOT,
6466 .file_offset = offsetof(struct mem_cgroup, events_local_file),
6467 .seq_show = memory_events_local_show,
6468 },
6469 {
6470 .name = "stat",
6471 .seq_show = memory_stat_show,
6472 },
6473 {
6474 .name = "oom.group",
6475 .flags = CFTYPE_NOT_ON_ROOT | CFTYPE_NS_DELEGATABLE,
6476 .seq_show = memory_oom_group_show,
6477 .write = memory_oom_group_write,
6478 },
6479 { } /* terminate */
6480};
6481
6482struct cgroup_subsys memory_cgrp_subsys = {
6483 .css_alloc = mem_cgroup_css_alloc,
6484 .css_online = mem_cgroup_css_online,
6485 .css_offline = mem_cgroup_css_offline,
6486 .css_released = mem_cgroup_css_released,
6487 .css_free = mem_cgroup_css_free,
6488 .css_reset = mem_cgroup_css_reset,
6489 .can_attach = mem_cgroup_can_attach,
6490 .cancel_attach = mem_cgroup_cancel_attach,
6491 .post_attach = mem_cgroup_move_task,
6492 .bind = mem_cgroup_bind,
6493 .dfl_cftypes = memory_files,
6494 .legacy_cftypes = mem_cgroup_legacy_files,
6495 .early_init = 0,
6496};
6497
6498/*
6499 * This function calculates an individual cgroup's effective
6500 * protection which is derived from its own memory.min/low, its
6501 * parent's and siblings' settings, as well as the actual memory
6502 * distribution in the tree.
6503 *
6504 * The following rules apply to the effective protection values:
6505 *
6506 * 1. At the first level of reclaim, effective protection is equal to
6507 * the declared protection in memory.min and memory.low.
6508 *
6509 * 2. To enable safe delegation of the protection configuration, at
6510 * subsequent levels the effective protection is capped to the
6511 * parent's effective protection.
6512 *
6513 * 3. To make complex and dynamic subtrees easier to configure, the
6514 * user is allowed to overcommit the declared protection at a given
6515 * level. If that is the case, the parent's effective protection is
6516 * distributed to the children in proportion to how much protection
6517 * they have declared and how much of it they are utilizing.
6518 *
6519 * This makes distribution proportional, but also work-conserving:
6520 * if one cgroup claims much more protection than it uses memory,
6521 * the unused remainder is available to its siblings.
6522 *
6523 * 4. Conversely, when the declared protection is undercommitted at a
6524 * given level, the distribution of the larger parental protection
6525 * budget is NOT proportional. A cgroup's protection from a sibling
6526 * is capped to its own memory.min/low setting.
6527 *
6528 * 5. However, to allow protecting recursive subtrees from each other
6529 * without having to declare each individual cgroup's fixed share
6530 * of the ancestor's claim to protection, any unutilized -
6531 * "floating" - protection from up the tree is distributed in
6532 * proportion to each cgroup's *usage*. This makes the protection
6533 * neutral wrt sibling cgroups and lets them compete freely over
6534 * the shared parental protection budget, but it protects the
6535 * subtree as a whole from neighboring subtrees.
6536 *
6537 * Note that 4. and 5. are not in conflict: 4. is about protecting
6538 * against immediate siblings whereas 5. is about protecting against
6539 * neighboring subtrees.
6540 */
6541static unsigned long effective_protection(unsigned long usage,
6542 unsigned long parent_usage,
6543 unsigned long setting,
6544 unsigned long parent_effective,
6545 unsigned long siblings_protected)
6546{
6547 unsigned long protected;
6548 unsigned long ep;
6549
6550 protected = min(usage, setting);
6551 /*
6552 * If all cgroups at this level combined claim and use more
6553 * protection then what the parent affords them, distribute
6554 * shares in proportion to utilization.
6555 *
6556 * We are using actual utilization rather than the statically
6557 * claimed protection in order to be work-conserving: claimed
6558 * but unused protection is available to siblings that would
6559 * otherwise get a smaller chunk than what they claimed.
6560 */
6561 if (siblings_protected > parent_effective)
6562 return protected * parent_effective / siblings_protected;
6563
6564 /*
6565 * Ok, utilized protection of all children is within what the
6566 * parent affords them, so we know whatever this child claims
6567 * and utilizes is effectively protected.
6568 *
6569 * If there is unprotected usage beyond this value, reclaim
6570 * will apply pressure in proportion to that amount.
6571 *
6572 * If there is unutilized protection, the cgroup will be fully
6573 * shielded from reclaim, but we do return a smaller value for
6574 * protection than what the group could enjoy in theory. This
6575 * is okay. With the overcommit distribution above, effective
6576 * protection is always dependent on how memory is actually
6577 * consumed among the siblings anyway.
6578 */
6579 ep = protected;
6580
6581 /*
6582 * If the children aren't claiming (all of) the protection
6583 * afforded to them by the parent, distribute the remainder in
6584 * proportion to the (unprotected) memory of each cgroup. That
6585 * way, cgroups that aren't explicitly prioritized wrt each
6586 * other compete freely over the allowance, but they are
6587 * collectively protected from neighboring trees.
6588 *
6589 * We're using unprotected memory for the weight so that if
6590 * some cgroups DO claim explicit protection, we don't protect
6591 * the same bytes twice.
6592 *
6593 * Check both usage and parent_usage against the respective
6594 * protected values. One should imply the other, but they
6595 * aren't read atomically - make sure the division is sane.
6596 */
6597 if (!(cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT))
6598 return ep;
6599 if (parent_effective > siblings_protected &&
6600 parent_usage > siblings_protected &&
6601 usage > protected) {
6602 unsigned long unclaimed;
6603
6604 unclaimed = parent_effective - siblings_protected;
6605 unclaimed *= usage - protected;
6606 unclaimed /= parent_usage - siblings_protected;
6607
6608 ep += unclaimed;
6609 }
6610
6611 return ep;
6612}
6613
6614/**
6615 * mem_cgroup_protected - check if memory consumption is in the normal range
6616 * @root: the top ancestor of the sub-tree being checked
6617 * @memcg: the memory cgroup to check
6618 *
6619 * WARNING: This function is not stateless! It can only be used as part
6620 * of a top-down tree iteration, not for isolated queries.
6621 */
6622void mem_cgroup_calculate_protection(struct mem_cgroup *root,
6623 struct mem_cgroup *memcg)
6624{
6625 unsigned long usage, parent_usage;
6626 struct mem_cgroup *parent;
6627
6628 if (mem_cgroup_disabled())
6629 return;
6630
6631 if (!root)
6632 root = root_mem_cgroup;
6633
6634 /*
6635 * Effective values of the reclaim targets are ignored so they
6636 * can be stale. Have a look at mem_cgroup_protection for more
6637 * details.
6638 * TODO: calculation should be more robust so that we do not need
6639 * that special casing.
6640 */
6641 if (memcg == root)
6642 return;
6643
6644 usage = page_counter_read(&memcg->memory);
6645 if (!usage)
6646 return;
6647
6648 parent = parent_mem_cgroup(memcg);
6649 /* No parent means a non-hierarchical mode on v1 memcg */
6650 if (!parent)
6651 return;
6652
6653 if (parent == root) {
6654 memcg->memory.emin = READ_ONCE(memcg->memory.min);
6655 memcg->memory.elow = READ_ONCE(memcg->memory.low);
6656 return;
6657 }
6658
6659 parent_usage = page_counter_read(&parent->memory);
6660
6661 WRITE_ONCE(memcg->memory.emin, effective_protection(usage, parent_usage,
6662 READ_ONCE(memcg->memory.min),
6663 READ_ONCE(parent->memory.emin),
6664 atomic_long_read(&parent->memory.children_min_usage)));
6665
6666 WRITE_ONCE(memcg->memory.elow, effective_protection(usage, parent_usage,
6667 READ_ONCE(memcg->memory.low),
6668 READ_ONCE(parent->memory.elow),
6669 atomic_long_read(&parent->memory.children_low_usage)));
6670}
6671
6672/**
6673 * mem_cgroup_charge - charge a newly allocated page to a cgroup
6674 * @page: page to charge
6675 * @mm: mm context of the victim
6676 * @gfp_mask: reclaim mode
6677 *
6678 * Try to charge @page to the memcg that @mm belongs to, reclaiming
6679 * pages according to @gfp_mask if necessary.
6680 *
6681 * Returns 0 on success. Otherwise, an error code is returned.
6682 */
6683int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask)
6684{
6685 unsigned int nr_pages = thp_nr_pages(page);
6686 struct mem_cgroup *memcg = NULL;
6687 int ret = 0;
6688
6689 if (mem_cgroup_disabled())
6690 goto out;
6691
6692 if (PageSwapCache(page)) {
6693 swp_entry_t ent = { .val = page_private(page), };
6694 unsigned short id;
6695
6696 /*
6697 * Every swap fault against a single page tries to charge the
6698 * page, bail as early as possible. shmem_unuse() encounters
6699 * already charged pages, too. page->mem_cgroup is protected
6700 * by the page lock, which serializes swap cache removal, which
6701 * in turn serializes uncharging.
6702 */
6703 VM_BUG_ON_PAGE(!PageLocked(page), page);
6704 if (compound_head(page)->mem_cgroup)
6705 goto out;
6706
6707 id = lookup_swap_cgroup_id(ent);
6708 rcu_read_lock();
6709 memcg = mem_cgroup_from_id(id);
6710 if (memcg && !css_tryget_online(&memcg->css))
6711 memcg = NULL;
6712 rcu_read_unlock();
6713 }
6714
6715 if (!memcg)
6716 memcg = get_mem_cgroup_from_mm(mm);
6717
6718 ret = try_charge(memcg, gfp_mask, nr_pages);
6719 if (ret)
6720 goto out_put;
6721
6722 css_get(&memcg->css);
6723 commit_charge(page, memcg);
6724
6725 local_irq_disable();
6726 mem_cgroup_charge_statistics(memcg, page, nr_pages);
6727 memcg_check_events(memcg, page);
6728 local_irq_enable();
6729
6730 if (PageSwapCache(page)) {
6731 swp_entry_t entry = { .val = page_private(page) };
6732 /*
6733 * The swap entry might not get freed for a long time,
6734 * let's not wait for it. The page already received a
6735 * memory+swap charge, drop the swap entry duplicate.
6736 */
6737 mem_cgroup_uncharge_swap(entry, nr_pages);
6738 }
6739
6740out_put:
6741 css_put(&memcg->css);
6742out:
6743 return ret;
6744}
6745
6746struct uncharge_gather {
6747 struct mem_cgroup *memcg;
6748 unsigned long nr_pages;
6749 unsigned long pgpgout;
6750 unsigned long nr_kmem;
6751 struct page *dummy_page;
6752};
6753
6754static inline void uncharge_gather_clear(struct uncharge_gather *ug)
6755{
6756 memset(ug, 0, sizeof(*ug));
6757}
6758
6759static void uncharge_batch(const struct uncharge_gather *ug)
6760{
6761 unsigned long flags;
6762
6763 if (!mem_cgroup_is_root(ug->memcg)) {
6764 page_counter_uncharge(&ug->memcg->memory, ug->nr_pages);
6765 if (do_memsw_account())
6766 page_counter_uncharge(&ug->memcg->memsw, ug->nr_pages);
6767 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && ug->nr_kmem)
6768 page_counter_uncharge(&ug->memcg->kmem, ug->nr_kmem);
6769 memcg_oom_recover(ug->memcg);
6770 }
6771
6772 local_irq_save(flags);
6773 __count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout);
6774 __this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_pages);
6775 memcg_check_events(ug->memcg, ug->dummy_page);
6776 local_irq_restore(flags);
6777
6778 /* drop reference from uncharge_page */
6779 css_put(&ug->memcg->css);
6780}
6781
6782static void uncharge_page(struct page *page, struct uncharge_gather *ug)
6783{
6784 unsigned long nr_pages;
6785
6786 VM_BUG_ON_PAGE(PageLRU(page), page);
6787
6788 if (!page->mem_cgroup)
6789 return;
6790
6791 /*
6792 * Nobody should be changing or seriously looking at
6793 * page->mem_cgroup at this point, we have fully
6794 * exclusive access to the page.
6795 */
6796
6797 if (ug->memcg != page->mem_cgroup) {
6798 if (ug->memcg) {
6799 uncharge_batch(ug);
6800 uncharge_gather_clear(ug);
6801 }
6802 ug->memcg = page->mem_cgroup;
6803
6804 /* pairs with css_put in uncharge_batch */
6805 css_get(&ug->memcg->css);
6806 }
6807
6808 nr_pages = compound_nr(page);
6809 ug->nr_pages += nr_pages;
6810
6811 if (!PageKmemcg(page)) {
6812 ug->pgpgout++;
6813 } else {
6814 ug->nr_kmem += nr_pages;
6815 __ClearPageKmemcg(page);
6816 }
6817
6818 ug->dummy_page = page;
6819 page->mem_cgroup = NULL;
6820 css_put(&ug->memcg->css);
6821}
6822
6823static void uncharge_list(struct list_head *page_list)
6824{
6825 struct uncharge_gather ug;
6826 struct list_head *next;
6827
6828 uncharge_gather_clear(&ug);
6829
6830 /*
6831 * Note that the list can be a single page->lru; hence the
6832 * do-while loop instead of a simple list_for_each_entry().
6833 */
6834 next = page_list->next;
6835 do {
6836 struct page *page;
6837
6838 page = list_entry(next, struct page, lru);
6839 next = page->lru.next;
6840
6841 uncharge_page(page, &ug);
6842 } while (next != page_list);
6843
6844 if (ug.memcg)
6845 uncharge_batch(&ug);
6846}
6847
6848/**
6849 * mem_cgroup_uncharge - uncharge a page
6850 * @page: page to uncharge
6851 *
6852 * Uncharge a page previously charged with mem_cgroup_charge().
6853 */
6854void mem_cgroup_uncharge(struct page *page)
6855{
6856 struct uncharge_gather ug;
6857
6858 if (mem_cgroup_disabled())
6859 return;
6860
6861 /* Don't touch page->lru of any random page, pre-check: */
6862 if (!page->mem_cgroup)
6863 return;
6864
6865 uncharge_gather_clear(&ug);
6866 uncharge_page(page, &ug);
6867 uncharge_batch(&ug);
6868}
6869
6870/**
6871 * mem_cgroup_uncharge_list - uncharge a list of page
6872 * @page_list: list of pages to uncharge
6873 *
6874 * Uncharge a list of pages previously charged with
6875 * mem_cgroup_charge().
6876 */
6877void mem_cgroup_uncharge_list(struct list_head *page_list)
6878{
6879 if (mem_cgroup_disabled())
6880 return;
6881
6882 if (!list_empty(page_list))
6883 uncharge_list(page_list);
6884}
6885
6886/**
6887 * mem_cgroup_migrate - charge a page's replacement
6888 * @oldpage: currently circulating page
6889 * @newpage: replacement page
6890 *
6891 * Charge @newpage as a replacement page for @oldpage. @oldpage will
6892 * be uncharged upon free.
6893 *
6894 * Both pages must be locked, @newpage->mapping must be set up.
6895 */
6896void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
6897{
6898 struct mem_cgroup *memcg;
6899 unsigned int nr_pages;
6900 unsigned long flags;
6901
6902 VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
6903 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
6904 VM_BUG_ON_PAGE(PageAnon(oldpage) != PageAnon(newpage), newpage);
6905 VM_BUG_ON_PAGE(PageTransHuge(oldpage) != PageTransHuge(newpage),
6906 newpage);
6907
6908 if (mem_cgroup_disabled())
6909 return;
6910
6911 /* Page cache replacement: new page already charged? */
6912 if (newpage->mem_cgroup)
6913 return;
6914
6915 /* Swapcache readahead pages can get replaced before being charged */
6916 memcg = oldpage->mem_cgroup;
6917 if (!memcg)
6918 return;
6919
6920 /* Force-charge the new page. The old one will be freed soon */
6921 nr_pages = thp_nr_pages(newpage);
6922
6923 page_counter_charge(&memcg->memory, nr_pages);
6924 if (do_memsw_account())
6925 page_counter_charge(&memcg->memsw, nr_pages);
6926
6927 css_get(&memcg->css);
6928 commit_charge(newpage, memcg);
6929
6930 local_irq_save(flags);
6931 mem_cgroup_charge_statistics(memcg, newpage, nr_pages);
6932 memcg_check_events(memcg, newpage);
6933 local_irq_restore(flags);
6934}
6935
6936DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
6937EXPORT_SYMBOL(memcg_sockets_enabled_key);
6938
6939void mem_cgroup_sk_alloc(struct sock *sk)
6940{
6941 struct mem_cgroup *memcg;
6942
6943 if (!mem_cgroup_sockets_enabled)
6944 return;
6945
6946 /* Do not associate the sock with unrelated interrupted task's memcg. */
6947 if (in_interrupt())
6948 return;
6949
6950 rcu_read_lock();
6951 memcg = mem_cgroup_from_task(current);
6952 if (memcg == root_mem_cgroup)
6953 goto out;
6954 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active)
6955 goto out;
6956 if (css_tryget(&memcg->css))
6957 sk->sk_memcg = memcg;
6958out:
6959 rcu_read_unlock();
6960}
6961
6962void mem_cgroup_sk_free(struct sock *sk)
6963{
6964 if (sk->sk_memcg)
6965 css_put(&sk->sk_memcg->css);
6966}
6967
6968/**
6969 * mem_cgroup_charge_skmem - charge socket memory
6970 * @memcg: memcg to charge
6971 * @nr_pages: number of pages to charge
6972 *
6973 * Charges @nr_pages to @memcg. Returns %true if the charge fit within
6974 * @memcg's configured limit, %false if the charge had to be forced.
6975 */
6976bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
6977{
6978 gfp_t gfp_mask = GFP_KERNEL;
6979
6980 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
6981 struct page_counter *fail;
6982
6983 if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) {
6984 memcg->tcpmem_pressure = 0;
6985 return true;
6986 }
6987 page_counter_charge(&memcg->tcpmem, nr_pages);
6988 memcg->tcpmem_pressure = 1;
6989 return false;
6990 }
6991
6992 /* Don't block in the packet receive path */
6993 if (in_softirq())
6994 gfp_mask = GFP_NOWAIT;
6995
6996 mod_memcg_state(memcg, MEMCG_SOCK, nr_pages);
6997
6998 if (try_charge(memcg, gfp_mask, nr_pages) == 0)
6999 return true;
7000
7001 try_charge(memcg, gfp_mask|__GFP_NOFAIL, nr_pages);
7002 return false;
7003}
7004
7005/**
7006 * mem_cgroup_uncharge_skmem - uncharge socket memory
7007 * @memcg: memcg to uncharge
7008 * @nr_pages: number of pages to uncharge
7009 */
7010void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
7011{
7012 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
7013 page_counter_uncharge(&memcg->tcpmem, nr_pages);
7014 return;
7015 }
7016
7017 mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages);
7018
7019 refill_stock(memcg, nr_pages);
7020}
7021
7022static int __init cgroup_memory(char *s)
7023{
7024 char *token;
7025
7026 while ((token = strsep(&s, ",")) != NULL) {
7027 if (!*token)
7028 continue;
7029 if (!strcmp(token, "nosocket"))
7030 cgroup_memory_nosocket = true;
7031 if (!strcmp(token, "nokmem"))
7032 cgroup_memory_nokmem = true;
7033 }
7034 return 0;
7035}
7036__setup("cgroup.memory=", cgroup_memory);
7037
7038/*
7039 * subsys_initcall() for memory controller.
7040 *
7041 * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this
7042 * context because of lock dependencies (cgroup_lock -> cpu hotplug) but
7043 * basically everything that doesn't depend on a specific mem_cgroup structure
7044 * should be initialized from here.
7045 */
7046static int __init mem_cgroup_init(void)
7047{
7048 int cpu, node;
7049
7050 cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
7051 memcg_hotplug_cpu_dead);
7052
7053 for_each_possible_cpu(cpu)
7054 INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
7055 drain_local_stock);
7056
7057 for_each_node(node) {
7058 struct mem_cgroup_tree_per_node *rtpn;
7059
7060 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL,
7061 node_online(node) ? node : NUMA_NO_NODE);
7062
7063 rtpn->rb_root = RB_ROOT;
7064 rtpn->rb_rightmost = NULL;
7065 spin_lock_init(&rtpn->lock);
7066 soft_limit_tree.rb_tree_per_node[node] = rtpn;
7067 }
7068
7069 return 0;
7070}
7071subsys_initcall(mem_cgroup_init);
7072
7073#ifdef CONFIG_MEMCG_SWAP
7074static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg)
7075{
7076 while (!refcount_inc_not_zero(&memcg->id.ref)) {
7077 /*
7078 * The root cgroup cannot be destroyed, so it's refcount must
7079 * always be >= 1.
7080 */
7081 if (WARN_ON_ONCE(memcg == root_mem_cgroup)) {
7082 VM_BUG_ON(1);
7083 break;
7084 }
7085 memcg = parent_mem_cgroup(memcg);
7086 if (!memcg)
7087 memcg = root_mem_cgroup;
7088 }
7089 return memcg;
7090}
7091
7092/**
7093 * mem_cgroup_swapout - transfer a memsw charge to swap
7094 * @page: page whose memsw charge to transfer
7095 * @entry: swap entry to move the charge to
7096 *
7097 * Transfer the memsw charge of @page to @entry.
7098 */
7099void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
7100{
7101 struct mem_cgroup *memcg, *swap_memcg;
7102 unsigned int nr_entries;
7103 unsigned short oldid;
7104
7105 VM_BUG_ON_PAGE(PageLRU(page), page);
7106 VM_BUG_ON_PAGE(page_count(page), page);
7107
7108 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
7109 return;
7110
7111 memcg = page->mem_cgroup;
7112
7113 /* Readahead page, never charged */
7114 if (!memcg)
7115 return;
7116
7117 /*
7118 * In case the memcg owning these pages has been offlined and doesn't
7119 * have an ID allocated to it anymore, charge the closest online
7120 * ancestor for the swap instead and transfer the memory+swap charge.
7121 */
7122 swap_memcg = mem_cgroup_id_get_online(memcg);
7123 nr_entries = thp_nr_pages(page);
7124 /* Get references for the tail pages, too */
7125 if (nr_entries > 1)
7126 mem_cgroup_id_get_many(swap_memcg, nr_entries - 1);
7127 oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg),
7128 nr_entries);
7129 VM_BUG_ON_PAGE(oldid, page);
7130 mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries);
7131
7132 page->mem_cgroup = NULL;
7133
7134 if (!mem_cgroup_is_root(memcg))
7135 page_counter_uncharge(&memcg->memory, nr_entries);
7136
7137 if (!cgroup_memory_noswap && memcg != swap_memcg) {
7138 if (!mem_cgroup_is_root(swap_memcg))
7139 page_counter_charge(&swap_memcg->memsw, nr_entries);
7140 page_counter_uncharge(&memcg->memsw, nr_entries);
7141 }
7142
7143 /*
7144 * Interrupts should be disabled here because the caller holds the
7145 * i_pages lock which is taken with interrupts-off. It is
7146 * important here to have the interrupts disabled because it is the
7147 * only synchronisation we have for updating the per-CPU variables.
7148 */
7149 VM_BUG_ON(!irqs_disabled());
7150 mem_cgroup_charge_statistics(memcg, page, -nr_entries);
7151 memcg_check_events(memcg, page);
7152
7153 css_put(&memcg->css);
7154}
7155
7156/**
7157 * mem_cgroup_try_charge_swap - try charging swap space for a page
7158 * @page: page being added to swap
7159 * @entry: swap entry to charge
7160 *
7161 * Try to charge @page's memcg for the swap space at @entry.
7162 *
7163 * Returns 0 on success, -ENOMEM on failure.
7164 */
7165int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry)
7166{
7167 unsigned int nr_pages = thp_nr_pages(page);
7168 struct page_counter *counter;
7169 struct mem_cgroup *memcg;
7170 unsigned short oldid;
7171
7172 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
7173 return 0;
7174
7175 memcg = page->mem_cgroup;
7176
7177 /* Readahead page, never charged */
7178 if (!memcg)
7179 return 0;
7180
7181 if (!entry.val) {
7182 memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
7183 return 0;
7184 }
7185
7186 memcg = mem_cgroup_id_get_online(memcg);
7187
7188 if (!cgroup_memory_noswap && !mem_cgroup_is_root(memcg) &&
7189 !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) {
7190 memcg_memory_event(memcg, MEMCG_SWAP_MAX);
7191 memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
7192 mem_cgroup_id_put(memcg);
7193 return -ENOMEM;
7194 }
7195
7196 /* Get references for the tail pages, too */
7197 if (nr_pages > 1)
7198 mem_cgroup_id_get_many(memcg, nr_pages - 1);
7199 oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg), nr_pages);
7200 VM_BUG_ON_PAGE(oldid, page);
7201 mod_memcg_state(memcg, MEMCG_SWAP, nr_pages);
7202
7203 return 0;
7204}
7205
7206/**
7207 * mem_cgroup_uncharge_swap - uncharge swap space
7208 * @entry: swap entry to uncharge
7209 * @nr_pages: the amount of swap space to uncharge
7210 */
7211void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
7212{
7213 struct mem_cgroup *memcg;
7214 unsigned short id;
7215
7216 id = swap_cgroup_record(entry, 0, nr_pages);
7217 rcu_read_lock();
7218 memcg = mem_cgroup_from_id(id);
7219 if (memcg) {
7220 if (!cgroup_memory_noswap && !mem_cgroup_is_root(memcg)) {
7221 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
7222 page_counter_uncharge(&memcg->swap, nr_pages);
7223 else
7224 page_counter_uncharge(&memcg->memsw, nr_pages);
7225 }
7226 mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages);
7227 mem_cgroup_id_put_many(memcg, nr_pages);
7228 }
7229 rcu_read_unlock();
7230}
7231
7232long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
7233{
7234 long nr_swap_pages = get_nr_swap_pages();
7235
7236 if (cgroup_memory_noswap || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
7237 return nr_swap_pages;
7238 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg))
7239 nr_swap_pages = min_t(long, nr_swap_pages,
7240 READ_ONCE(memcg->swap.max) -
7241 page_counter_read(&memcg->swap));
7242 return nr_swap_pages;
7243}
7244
7245bool mem_cgroup_swap_full(struct page *page)
7246{
7247 struct mem_cgroup *memcg;
7248
7249 VM_BUG_ON_PAGE(!PageLocked(page), page);
7250
7251 if (vm_swap_full())
7252 return true;
7253 if (cgroup_memory_noswap || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
7254 return false;
7255
7256 memcg = page->mem_cgroup;
7257 if (!memcg)
7258 return false;
7259
7260 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) {
7261 unsigned long usage = page_counter_read(&memcg->swap);
7262
7263 if (usage * 2 >= READ_ONCE(memcg->swap.high) ||
7264 usage * 2 >= READ_ONCE(memcg->swap.max))
7265 return true;
7266 }
7267
7268 return false;
7269}
7270
7271static int __init setup_swap_account(char *s)
7272{
7273 if (!strcmp(s, "1"))
7274 cgroup_memory_noswap = 0;
7275 else if (!strcmp(s, "0"))
7276 cgroup_memory_noswap = 1;
7277 return 1;
7278}
7279__setup("swapaccount=", setup_swap_account);
7280
7281static u64 swap_current_read(struct cgroup_subsys_state *css,
7282 struct cftype *cft)
7283{
7284 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
7285
7286 return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE;
7287}
7288
7289static int swap_high_show(struct seq_file *m, void *v)
7290{
7291 return seq_puts_memcg_tunable(m,
7292 READ_ONCE(mem_cgroup_from_seq(m)->swap.high));
7293}
7294
7295static ssize_t swap_high_write(struct kernfs_open_file *of,
7296 char *buf, size_t nbytes, loff_t off)
7297{
7298 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
7299 unsigned long high;
7300 int err;
7301
7302 buf = strstrip(buf);
7303 err = page_counter_memparse(buf, "max", &high);
7304 if (err)
7305 return err;
7306
7307 page_counter_set_high(&memcg->swap, high);
7308
7309 return nbytes;
7310}
7311
7312static int swap_max_show(struct seq_file *m, void *v)
7313{
7314 return seq_puts_memcg_tunable(m,
7315 READ_ONCE(mem_cgroup_from_seq(m)->swap.max));
7316}
7317
7318static ssize_t swap_max_write(struct kernfs_open_file *of,
7319 char *buf, size_t nbytes, loff_t off)
7320{
7321 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
7322 unsigned long max;
7323 int err;
7324
7325 buf = strstrip(buf);
7326 err = page_counter_memparse(buf, "max", &max);
7327 if (err)
7328 return err;
7329
7330 xchg(&memcg->swap.max, max);
7331
7332 return nbytes;
7333}
7334
7335static int swap_events_show(struct seq_file *m, void *v)
7336{
7337 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
7338
7339 seq_printf(m, "high %lu\n",
7340 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_HIGH]));
7341 seq_printf(m, "max %lu\n",
7342 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX]));
7343 seq_printf(m, "fail %lu\n",
7344 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL]));
7345
7346 return 0;
7347}
7348
7349static struct cftype swap_files[] = {
7350 {
7351 .name = "swap.current",
7352 .flags = CFTYPE_NOT_ON_ROOT,
7353 .read_u64 = swap_current_read,
7354 },
7355 {
7356 .name = "swap.high",
7357 .flags = CFTYPE_NOT_ON_ROOT,
7358 .seq_show = swap_high_show,
7359 .write = swap_high_write,
7360 },
7361 {
7362 .name = "swap.max",
7363 .flags = CFTYPE_NOT_ON_ROOT,
7364 .seq_show = swap_max_show,
7365 .write = swap_max_write,
7366 },
7367 {
7368 .name = "swap.events",
7369 .flags = CFTYPE_NOT_ON_ROOT,
7370 .file_offset = offsetof(struct mem_cgroup, swap_events_file),
7371 .seq_show = swap_events_show,
7372 },
7373 { } /* terminate */
7374};
7375
7376static struct cftype memsw_files[] = {
7377 {
7378 .name = "memsw.usage_in_bytes",
7379 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
7380 .read_u64 = mem_cgroup_read_u64,
7381 },
7382 {
7383 .name = "memsw.max_usage_in_bytes",
7384 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
7385 .write = mem_cgroup_reset,
7386 .read_u64 = mem_cgroup_read_u64,
7387 },
7388 {
7389 .name = "memsw.limit_in_bytes",
7390 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
7391 .write = mem_cgroup_write,
7392 .read_u64 = mem_cgroup_read_u64,
7393 },
7394 {
7395 .name = "memsw.failcnt",
7396 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
7397 .write = mem_cgroup_reset,
7398 .read_u64 = mem_cgroup_read_u64,
7399 },
7400 { }, /* terminate */
7401};
7402
7403/*
7404 * If mem_cgroup_swap_init() is implemented as a subsys_initcall()
7405 * instead of a core_initcall(), this could mean cgroup_memory_noswap still
7406 * remains set to false even when memcg is disabled via "cgroup_disable=memory"
7407 * boot parameter. This may result in premature OOPS inside
7408 * mem_cgroup_get_nr_swap_pages() function in corner cases.
7409 */
7410static int __init mem_cgroup_swap_init(void)
7411{
7412 /* No memory control -> no swap control */
7413 if (mem_cgroup_disabled())
7414 cgroup_memory_noswap = true;
7415
7416 if (cgroup_memory_noswap)
7417 return 0;
7418
7419 WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, swap_files));
7420 WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, memsw_files));
7421
7422 return 0;
7423}
7424core_initcall(mem_cgroup_swap_init);
7425
7426#endif /* CONFIG_MEMCG_SWAP */