Loading...
1/* memcontrol.c - Memory Controller
2 *
3 * Copyright IBM Corporation, 2007
4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5 *
6 * Copyright 2007 OpenVZ SWsoft Inc
7 * Author: Pavel Emelianov <xemul@openvz.org>
8 *
9 * Memory thresholds
10 * Copyright (C) 2009 Nokia Corporation
11 * Author: Kirill A. Shutemov
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 */
23
24#include <linux/res_counter.h>
25#include <linux/memcontrol.h>
26#include <linux/cgroup.h>
27#include <linux/mm.h>
28#include <linux/hugetlb.h>
29#include <linux/pagemap.h>
30#include <linux/smp.h>
31#include <linux/page-flags.h>
32#include <linux/backing-dev.h>
33#include <linux/bit_spinlock.h>
34#include <linux/rcupdate.h>
35#include <linux/limits.h>
36#include <linux/mutex.h>
37#include <linux/rbtree.h>
38#include <linux/slab.h>
39#include <linux/swap.h>
40#include <linux/swapops.h>
41#include <linux/spinlock.h>
42#include <linux/eventfd.h>
43#include <linux/sort.h>
44#include <linux/fs.h>
45#include <linux/seq_file.h>
46#include <linux/vmalloc.h>
47#include <linux/mm_inline.h>
48#include <linux/page_cgroup.h>
49#include <linux/cpu.h>
50#include <linux/oom.h>
51#include "internal.h"
52
53#include <asm/uaccess.h>
54
55#include <trace/events/vmscan.h>
56
57struct cgroup_subsys mem_cgroup_subsys __read_mostly;
58#define MEM_CGROUP_RECLAIM_RETRIES 5
59struct mem_cgroup *root_mem_cgroup __read_mostly;
60
61#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
62/* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */
63int do_swap_account __read_mostly;
64
65/* for remember boot option*/
66#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP_ENABLED
67static int really_do_swap_account __initdata = 1;
68#else
69static int really_do_swap_account __initdata = 0;
70#endif
71
72#else
73#define do_swap_account (0)
74#endif
75
76
77/*
78 * Statistics for memory cgroup.
79 */
80enum mem_cgroup_stat_index {
81 /*
82 * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
83 */
84 MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */
85 MEM_CGROUP_STAT_RSS, /* # of pages charged as anon rss */
86 MEM_CGROUP_STAT_FILE_MAPPED, /* # of pages charged as file rss */
87 MEM_CGROUP_STAT_SWAPOUT, /* # of pages, swapped out */
88 MEM_CGROUP_STAT_DATA, /* end of data requires synchronization */
89 MEM_CGROUP_ON_MOVE, /* someone is moving account between groups */
90 MEM_CGROUP_STAT_NSTATS,
91};
92
93enum mem_cgroup_events_index {
94 MEM_CGROUP_EVENTS_PGPGIN, /* # of pages paged in */
95 MEM_CGROUP_EVENTS_PGPGOUT, /* # of pages paged out */
96 MEM_CGROUP_EVENTS_COUNT, /* # of pages paged in/out */
97 MEM_CGROUP_EVENTS_PGFAULT, /* # of page-faults */
98 MEM_CGROUP_EVENTS_PGMAJFAULT, /* # of major page-faults */
99 MEM_CGROUP_EVENTS_NSTATS,
100};
101/*
102 * Per memcg event counter is incremented at every pagein/pageout. With THP,
103 * it will be incremated by the number of pages. This counter is used for
104 * for trigger some periodic events. This is straightforward and better
105 * than using jiffies etc. to handle periodic memcg event.
106 */
107enum mem_cgroup_events_target {
108 MEM_CGROUP_TARGET_THRESH,
109 MEM_CGROUP_TARGET_SOFTLIMIT,
110 MEM_CGROUP_TARGET_NUMAINFO,
111 MEM_CGROUP_NTARGETS,
112};
113#define THRESHOLDS_EVENTS_TARGET (128)
114#define SOFTLIMIT_EVENTS_TARGET (1024)
115#define NUMAINFO_EVENTS_TARGET (1024)
116
117struct mem_cgroup_stat_cpu {
118 long count[MEM_CGROUP_STAT_NSTATS];
119 unsigned long events[MEM_CGROUP_EVENTS_NSTATS];
120 unsigned long targets[MEM_CGROUP_NTARGETS];
121};
122
123/*
124 * per-zone information in memory controller.
125 */
126struct mem_cgroup_per_zone {
127 /*
128 * spin_lock to protect the per cgroup LRU
129 */
130 struct list_head lists[NR_LRU_LISTS];
131 unsigned long count[NR_LRU_LISTS];
132
133 struct zone_reclaim_stat reclaim_stat;
134 struct rb_node tree_node; /* RB tree node */
135 unsigned long long usage_in_excess;/* Set to the value by which */
136 /* the soft limit is exceeded*/
137 bool on_tree;
138 struct mem_cgroup *mem; /* Back pointer, we cannot */
139 /* use container_of */
140};
141/* Macro for accessing counter */
142#define MEM_CGROUP_ZSTAT(mz, idx) ((mz)->count[(idx)])
143
144struct mem_cgroup_per_node {
145 struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
146};
147
148struct mem_cgroup_lru_info {
149 struct mem_cgroup_per_node *nodeinfo[MAX_NUMNODES];
150};
151
152/*
153 * Cgroups above their limits are maintained in a RB-Tree, independent of
154 * their hierarchy representation
155 */
156
157struct mem_cgroup_tree_per_zone {
158 struct rb_root rb_root;
159 spinlock_t lock;
160};
161
162struct mem_cgroup_tree_per_node {
163 struct mem_cgroup_tree_per_zone rb_tree_per_zone[MAX_NR_ZONES];
164};
165
166struct mem_cgroup_tree {
167 struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
168};
169
170static struct mem_cgroup_tree soft_limit_tree __read_mostly;
171
172struct mem_cgroup_threshold {
173 struct eventfd_ctx *eventfd;
174 u64 threshold;
175};
176
177/* For threshold */
178struct mem_cgroup_threshold_ary {
179 /* An array index points to threshold just below usage. */
180 int current_threshold;
181 /* Size of entries[] */
182 unsigned int size;
183 /* Array of thresholds */
184 struct mem_cgroup_threshold entries[0];
185};
186
187struct mem_cgroup_thresholds {
188 /* Primary thresholds array */
189 struct mem_cgroup_threshold_ary *primary;
190 /*
191 * Spare threshold array.
192 * This is needed to make mem_cgroup_unregister_event() "never fail".
193 * It must be able to store at least primary->size - 1 entries.
194 */
195 struct mem_cgroup_threshold_ary *spare;
196};
197
198/* for OOM */
199struct mem_cgroup_eventfd_list {
200 struct list_head list;
201 struct eventfd_ctx *eventfd;
202};
203
204static void mem_cgroup_threshold(struct mem_cgroup *mem);
205static void mem_cgroup_oom_notify(struct mem_cgroup *mem);
206
207/*
208 * The memory controller data structure. The memory controller controls both
209 * page cache and RSS per cgroup. We would eventually like to provide
210 * statistics based on the statistics developed by Rik Van Riel for clock-pro,
211 * to help the administrator determine what knobs to tune.
212 *
213 * TODO: Add a water mark for the memory controller. Reclaim will begin when
214 * we hit the water mark. May be even add a low water mark, such that
215 * no reclaim occurs from a cgroup at it's low water mark, this is
216 * a feature that will be implemented much later in the future.
217 */
218struct mem_cgroup {
219 struct cgroup_subsys_state css;
220 /*
221 * the counter to account for memory usage
222 */
223 struct res_counter res;
224 /*
225 * the counter to account for mem+swap usage.
226 */
227 struct res_counter memsw;
228 /*
229 * Per cgroup active and inactive list, similar to the
230 * per zone LRU lists.
231 */
232 struct mem_cgroup_lru_info info;
233 /*
234 * While reclaiming in a hierarchy, we cache the last child we
235 * reclaimed from.
236 */
237 int last_scanned_child;
238 int last_scanned_node;
239#if MAX_NUMNODES > 1
240 nodemask_t scan_nodes;
241 atomic_t numainfo_events;
242 atomic_t numainfo_updating;
243#endif
244 /*
245 * Should the accounting and control be hierarchical, per subtree?
246 */
247 bool use_hierarchy;
248
249 bool oom_lock;
250 atomic_t under_oom;
251
252 atomic_t refcnt;
253
254 int swappiness;
255 /* OOM-Killer disable */
256 int oom_kill_disable;
257
258 /* set when res.limit == memsw.limit */
259 bool memsw_is_minimum;
260
261 /* protect arrays of thresholds */
262 struct mutex thresholds_lock;
263
264 /* thresholds for memory usage. RCU-protected */
265 struct mem_cgroup_thresholds thresholds;
266
267 /* thresholds for mem+swap usage. RCU-protected */
268 struct mem_cgroup_thresholds memsw_thresholds;
269
270 /* For oom notifier event fd */
271 struct list_head oom_notify;
272
273 /*
274 * Should we move charges of a task when a task is moved into this
275 * mem_cgroup ? And what type of charges should we move ?
276 */
277 unsigned long move_charge_at_immigrate;
278 /*
279 * percpu counter.
280 */
281 struct mem_cgroup_stat_cpu *stat;
282 /*
283 * used when a cpu is offlined or other synchronizations
284 * See mem_cgroup_read_stat().
285 */
286 struct mem_cgroup_stat_cpu nocpu_base;
287 spinlock_t pcp_counter_lock;
288};
289
290/* Stuffs for move charges at task migration. */
291/*
292 * Types of charges to be moved. "move_charge_at_immitgrate" is treated as a
293 * left-shifted bitmap of these types.
294 */
295enum move_type {
296 MOVE_CHARGE_TYPE_ANON, /* private anonymous page and swap of it */
297 MOVE_CHARGE_TYPE_FILE, /* file page(including tmpfs) and swap of it */
298 NR_MOVE_TYPE,
299};
300
301/* "mc" and its members are protected by cgroup_mutex */
302static struct move_charge_struct {
303 spinlock_t lock; /* for from, to */
304 struct mem_cgroup *from;
305 struct mem_cgroup *to;
306 unsigned long precharge;
307 unsigned long moved_charge;
308 unsigned long moved_swap;
309 struct task_struct *moving_task; /* a task moving charges */
310 wait_queue_head_t waitq; /* a waitq for other context */
311} mc = {
312 .lock = __SPIN_LOCK_UNLOCKED(mc.lock),
313 .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
314};
315
316static bool move_anon(void)
317{
318 return test_bit(MOVE_CHARGE_TYPE_ANON,
319 &mc.to->move_charge_at_immigrate);
320}
321
322static bool move_file(void)
323{
324 return test_bit(MOVE_CHARGE_TYPE_FILE,
325 &mc.to->move_charge_at_immigrate);
326}
327
328/*
329 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
330 * limit reclaim to prevent infinite loops, if they ever occur.
331 */
332#define MEM_CGROUP_MAX_RECLAIM_LOOPS (100)
333#define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS (2)
334
335enum charge_type {
336 MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
337 MEM_CGROUP_CHARGE_TYPE_MAPPED,
338 MEM_CGROUP_CHARGE_TYPE_SHMEM, /* used by page migration of shmem */
339 MEM_CGROUP_CHARGE_TYPE_FORCE, /* used by force_empty */
340 MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */
341 MEM_CGROUP_CHARGE_TYPE_DROP, /* a page was unused swap cache */
342 NR_CHARGE_TYPE,
343};
344
345/* for encoding cft->private value on file */
346#define _MEM (0)
347#define _MEMSWAP (1)
348#define _OOM_TYPE (2)
349#define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val))
350#define MEMFILE_TYPE(val) (((val) >> 16) & 0xffff)
351#define MEMFILE_ATTR(val) ((val) & 0xffff)
352/* Used for OOM nofiier */
353#define OOM_CONTROL (0)
354
355/*
356 * Reclaim flags for mem_cgroup_hierarchical_reclaim
357 */
358#define MEM_CGROUP_RECLAIM_NOSWAP_BIT 0x0
359#define MEM_CGROUP_RECLAIM_NOSWAP (1 << MEM_CGROUP_RECLAIM_NOSWAP_BIT)
360#define MEM_CGROUP_RECLAIM_SHRINK_BIT 0x1
361#define MEM_CGROUP_RECLAIM_SHRINK (1 << MEM_CGROUP_RECLAIM_SHRINK_BIT)
362#define MEM_CGROUP_RECLAIM_SOFT_BIT 0x2
363#define MEM_CGROUP_RECLAIM_SOFT (1 << MEM_CGROUP_RECLAIM_SOFT_BIT)
364
365static void mem_cgroup_get(struct mem_cgroup *mem);
366static void mem_cgroup_put(struct mem_cgroup *mem);
367static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem);
368static void drain_all_stock_async(struct mem_cgroup *mem);
369
370static struct mem_cgroup_per_zone *
371mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid)
372{
373 return &mem->info.nodeinfo[nid]->zoneinfo[zid];
374}
375
376struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem)
377{
378 return &mem->css;
379}
380
381static struct mem_cgroup_per_zone *
382page_cgroup_zoneinfo(struct mem_cgroup *mem, struct page *page)
383{
384 int nid = page_to_nid(page);
385 int zid = page_zonenum(page);
386
387 return mem_cgroup_zoneinfo(mem, nid, zid);
388}
389
390static struct mem_cgroup_tree_per_zone *
391soft_limit_tree_node_zone(int nid, int zid)
392{
393 return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
394}
395
396static struct mem_cgroup_tree_per_zone *
397soft_limit_tree_from_page(struct page *page)
398{
399 int nid = page_to_nid(page);
400 int zid = page_zonenum(page);
401
402 return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
403}
404
405static void
406__mem_cgroup_insert_exceeded(struct mem_cgroup *mem,
407 struct mem_cgroup_per_zone *mz,
408 struct mem_cgroup_tree_per_zone *mctz,
409 unsigned long long new_usage_in_excess)
410{
411 struct rb_node **p = &mctz->rb_root.rb_node;
412 struct rb_node *parent = NULL;
413 struct mem_cgroup_per_zone *mz_node;
414
415 if (mz->on_tree)
416 return;
417
418 mz->usage_in_excess = new_usage_in_excess;
419 if (!mz->usage_in_excess)
420 return;
421 while (*p) {
422 parent = *p;
423 mz_node = rb_entry(parent, struct mem_cgroup_per_zone,
424 tree_node);
425 if (mz->usage_in_excess < mz_node->usage_in_excess)
426 p = &(*p)->rb_left;
427 /*
428 * We can't avoid mem cgroups that are over their soft
429 * limit by the same amount
430 */
431 else if (mz->usage_in_excess >= mz_node->usage_in_excess)
432 p = &(*p)->rb_right;
433 }
434 rb_link_node(&mz->tree_node, parent, p);
435 rb_insert_color(&mz->tree_node, &mctz->rb_root);
436 mz->on_tree = true;
437}
438
439static void
440__mem_cgroup_remove_exceeded(struct mem_cgroup *mem,
441 struct mem_cgroup_per_zone *mz,
442 struct mem_cgroup_tree_per_zone *mctz)
443{
444 if (!mz->on_tree)
445 return;
446 rb_erase(&mz->tree_node, &mctz->rb_root);
447 mz->on_tree = false;
448}
449
450static void
451mem_cgroup_remove_exceeded(struct mem_cgroup *mem,
452 struct mem_cgroup_per_zone *mz,
453 struct mem_cgroup_tree_per_zone *mctz)
454{
455 spin_lock(&mctz->lock);
456 __mem_cgroup_remove_exceeded(mem, mz, mctz);
457 spin_unlock(&mctz->lock);
458}
459
460
461static void mem_cgroup_update_tree(struct mem_cgroup *mem, struct page *page)
462{
463 unsigned long long excess;
464 struct mem_cgroup_per_zone *mz;
465 struct mem_cgroup_tree_per_zone *mctz;
466 int nid = page_to_nid(page);
467 int zid = page_zonenum(page);
468 mctz = soft_limit_tree_from_page(page);
469
470 /*
471 * Necessary to update all ancestors when hierarchy is used.
472 * because their event counter is not touched.
473 */
474 for (; mem; mem = parent_mem_cgroup(mem)) {
475 mz = mem_cgroup_zoneinfo(mem, nid, zid);
476 excess = res_counter_soft_limit_excess(&mem->res);
477 /*
478 * We have to update the tree if mz is on RB-tree or
479 * mem is over its softlimit.
480 */
481 if (excess || mz->on_tree) {
482 spin_lock(&mctz->lock);
483 /* if on-tree, remove it */
484 if (mz->on_tree)
485 __mem_cgroup_remove_exceeded(mem, mz, mctz);
486 /*
487 * Insert again. mz->usage_in_excess will be updated.
488 * If excess is 0, no tree ops.
489 */
490 __mem_cgroup_insert_exceeded(mem, mz, mctz, excess);
491 spin_unlock(&mctz->lock);
492 }
493 }
494}
495
496static void mem_cgroup_remove_from_trees(struct mem_cgroup *mem)
497{
498 int node, zone;
499 struct mem_cgroup_per_zone *mz;
500 struct mem_cgroup_tree_per_zone *mctz;
501
502 for_each_node_state(node, N_POSSIBLE) {
503 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
504 mz = mem_cgroup_zoneinfo(mem, node, zone);
505 mctz = soft_limit_tree_node_zone(node, zone);
506 mem_cgroup_remove_exceeded(mem, mz, mctz);
507 }
508 }
509}
510
511static struct mem_cgroup_per_zone *
512__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
513{
514 struct rb_node *rightmost = NULL;
515 struct mem_cgroup_per_zone *mz;
516
517retry:
518 mz = NULL;
519 rightmost = rb_last(&mctz->rb_root);
520 if (!rightmost)
521 goto done; /* Nothing to reclaim from */
522
523 mz = rb_entry(rightmost, struct mem_cgroup_per_zone, tree_node);
524 /*
525 * Remove the node now but someone else can add it back,
526 * we will to add it back at the end of reclaim to its correct
527 * position in the tree.
528 */
529 __mem_cgroup_remove_exceeded(mz->mem, mz, mctz);
530 if (!res_counter_soft_limit_excess(&mz->mem->res) ||
531 !css_tryget(&mz->mem->css))
532 goto retry;
533done:
534 return mz;
535}
536
537static struct mem_cgroup_per_zone *
538mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
539{
540 struct mem_cgroup_per_zone *mz;
541
542 spin_lock(&mctz->lock);
543 mz = __mem_cgroup_largest_soft_limit_node(mctz);
544 spin_unlock(&mctz->lock);
545 return mz;
546}
547
548/*
549 * Implementation Note: reading percpu statistics for memcg.
550 *
551 * Both of vmstat[] and percpu_counter has threshold and do periodic
552 * synchronization to implement "quick" read. There are trade-off between
553 * reading cost and precision of value. Then, we may have a chance to implement
554 * a periodic synchronizion of counter in memcg's counter.
555 *
556 * But this _read() function is used for user interface now. The user accounts
557 * memory usage by memory cgroup and he _always_ requires exact value because
558 * he accounts memory. Even if we provide quick-and-fuzzy read, we always
559 * have to visit all online cpus and make sum. So, for now, unnecessary
560 * synchronization is not implemented. (just implemented for cpu hotplug)
561 *
562 * If there are kernel internal actions which can make use of some not-exact
563 * value, and reading all cpu value can be performance bottleneck in some
564 * common workload, threashold and synchonization as vmstat[] should be
565 * implemented.
566 */
567static long mem_cgroup_read_stat(struct mem_cgroup *mem,
568 enum mem_cgroup_stat_index idx)
569{
570 long val = 0;
571 int cpu;
572
573 get_online_cpus();
574 for_each_online_cpu(cpu)
575 val += per_cpu(mem->stat->count[idx], cpu);
576#ifdef CONFIG_HOTPLUG_CPU
577 spin_lock(&mem->pcp_counter_lock);
578 val += mem->nocpu_base.count[idx];
579 spin_unlock(&mem->pcp_counter_lock);
580#endif
581 put_online_cpus();
582 return val;
583}
584
585static void mem_cgroup_swap_statistics(struct mem_cgroup *mem,
586 bool charge)
587{
588 int val = (charge) ? 1 : -1;
589 this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_SWAPOUT], val);
590}
591
592void mem_cgroup_pgfault(struct mem_cgroup *mem, int val)
593{
594 this_cpu_add(mem->stat->events[MEM_CGROUP_EVENTS_PGFAULT], val);
595}
596
597void mem_cgroup_pgmajfault(struct mem_cgroup *mem, int val)
598{
599 this_cpu_add(mem->stat->events[MEM_CGROUP_EVENTS_PGMAJFAULT], val);
600}
601
602static unsigned long mem_cgroup_read_events(struct mem_cgroup *mem,
603 enum mem_cgroup_events_index idx)
604{
605 unsigned long val = 0;
606 int cpu;
607
608 for_each_online_cpu(cpu)
609 val += per_cpu(mem->stat->events[idx], cpu);
610#ifdef CONFIG_HOTPLUG_CPU
611 spin_lock(&mem->pcp_counter_lock);
612 val += mem->nocpu_base.events[idx];
613 spin_unlock(&mem->pcp_counter_lock);
614#endif
615 return val;
616}
617
618static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
619 bool file, int nr_pages)
620{
621 preempt_disable();
622
623 if (file)
624 __this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_CACHE], nr_pages);
625 else
626 __this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_RSS], nr_pages);
627
628 /* pagein of a big page is an event. So, ignore page size */
629 if (nr_pages > 0)
630 __this_cpu_inc(mem->stat->events[MEM_CGROUP_EVENTS_PGPGIN]);
631 else {
632 __this_cpu_inc(mem->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]);
633 nr_pages = -nr_pages; /* for event */
634 }
635
636 __this_cpu_add(mem->stat->events[MEM_CGROUP_EVENTS_COUNT], nr_pages);
637
638 preempt_enable();
639}
640
641unsigned long
642mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *mem, int nid, int zid,
643 unsigned int lru_mask)
644{
645 struct mem_cgroup_per_zone *mz;
646 enum lru_list l;
647 unsigned long ret = 0;
648
649 mz = mem_cgroup_zoneinfo(mem, nid, zid);
650
651 for_each_lru(l) {
652 if (BIT(l) & lru_mask)
653 ret += MEM_CGROUP_ZSTAT(mz, l);
654 }
655 return ret;
656}
657
658static unsigned long
659mem_cgroup_node_nr_lru_pages(struct mem_cgroup *mem,
660 int nid, unsigned int lru_mask)
661{
662 u64 total = 0;
663 int zid;
664
665 for (zid = 0; zid < MAX_NR_ZONES; zid++)
666 total += mem_cgroup_zone_nr_lru_pages(mem, nid, zid, lru_mask);
667
668 return total;
669}
670
671static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *mem,
672 unsigned int lru_mask)
673{
674 int nid;
675 u64 total = 0;
676
677 for_each_node_state(nid, N_HIGH_MEMORY)
678 total += mem_cgroup_node_nr_lru_pages(mem, nid, lru_mask);
679 return total;
680}
681
682static bool __memcg_event_check(struct mem_cgroup *mem, int target)
683{
684 unsigned long val, next;
685
686 val = this_cpu_read(mem->stat->events[MEM_CGROUP_EVENTS_COUNT]);
687 next = this_cpu_read(mem->stat->targets[target]);
688 /* from time_after() in jiffies.h */
689 return ((long)next - (long)val < 0);
690}
691
692static void __mem_cgroup_target_update(struct mem_cgroup *mem, int target)
693{
694 unsigned long val, next;
695
696 val = this_cpu_read(mem->stat->events[MEM_CGROUP_EVENTS_COUNT]);
697
698 switch (target) {
699 case MEM_CGROUP_TARGET_THRESH:
700 next = val + THRESHOLDS_EVENTS_TARGET;
701 break;
702 case MEM_CGROUP_TARGET_SOFTLIMIT:
703 next = val + SOFTLIMIT_EVENTS_TARGET;
704 break;
705 case MEM_CGROUP_TARGET_NUMAINFO:
706 next = val + NUMAINFO_EVENTS_TARGET;
707 break;
708 default:
709 return;
710 }
711
712 this_cpu_write(mem->stat->targets[target], next);
713}
714
715/*
716 * Check events in order.
717 *
718 */
719static void memcg_check_events(struct mem_cgroup *mem, struct page *page)
720{
721 /* threshold event is triggered in finer grain than soft limit */
722 if (unlikely(__memcg_event_check(mem, MEM_CGROUP_TARGET_THRESH))) {
723 mem_cgroup_threshold(mem);
724 __mem_cgroup_target_update(mem, MEM_CGROUP_TARGET_THRESH);
725 if (unlikely(__memcg_event_check(mem,
726 MEM_CGROUP_TARGET_SOFTLIMIT))) {
727 mem_cgroup_update_tree(mem, page);
728 __mem_cgroup_target_update(mem,
729 MEM_CGROUP_TARGET_SOFTLIMIT);
730 }
731#if MAX_NUMNODES > 1
732 if (unlikely(__memcg_event_check(mem,
733 MEM_CGROUP_TARGET_NUMAINFO))) {
734 atomic_inc(&mem->numainfo_events);
735 __mem_cgroup_target_update(mem,
736 MEM_CGROUP_TARGET_NUMAINFO);
737 }
738#endif
739 }
740}
741
742static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
743{
744 return container_of(cgroup_subsys_state(cont,
745 mem_cgroup_subsys_id), struct mem_cgroup,
746 css);
747}
748
749struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
750{
751 /*
752 * mm_update_next_owner() may clear mm->owner to NULL
753 * if it races with swapoff, page migration, etc.
754 * So this can be called with p == NULL.
755 */
756 if (unlikely(!p))
757 return NULL;
758
759 return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
760 struct mem_cgroup, css);
761}
762
763struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
764{
765 struct mem_cgroup *mem = NULL;
766
767 if (!mm)
768 return NULL;
769 /*
770 * Because we have no locks, mm->owner's may be being moved to other
771 * cgroup. We use css_tryget() here even if this looks
772 * pessimistic (rather than adding locks here).
773 */
774 rcu_read_lock();
775 do {
776 mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
777 if (unlikely(!mem))
778 break;
779 } while (!css_tryget(&mem->css));
780 rcu_read_unlock();
781 return mem;
782}
783
784/* The caller has to guarantee "mem" exists before calling this */
785static struct mem_cgroup *mem_cgroup_start_loop(struct mem_cgroup *mem)
786{
787 struct cgroup_subsys_state *css;
788 int found;
789
790 if (!mem) /* ROOT cgroup has the smallest ID */
791 return root_mem_cgroup; /*css_put/get against root is ignored*/
792 if (!mem->use_hierarchy) {
793 if (css_tryget(&mem->css))
794 return mem;
795 return NULL;
796 }
797 rcu_read_lock();
798 /*
799 * searching a memory cgroup which has the smallest ID under given
800 * ROOT cgroup. (ID >= 1)
801 */
802 css = css_get_next(&mem_cgroup_subsys, 1, &mem->css, &found);
803 if (css && css_tryget(css))
804 mem = container_of(css, struct mem_cgroup, css);
805 else
806 mem = NULL;
807 rcu_read_unlock();
808 return mem;
809}
810
811static struct mem_cgroup *mem_cgroup_get_next(struct mem_cgroup *iter,
812 struct mem_cgroup *root,
813 bool cond)
814{
815 int nextid = css_id(&iter->css) + 1;
816 int found;
817 int hierarchy_used;
818 struct cgroup_subsys_state *css;
819
820 hierarchy_used = iter->use_hierarchy;
821
822 css_put(&iter->css);
823 /* If no ROOT, walk all, ignore hierarchy */
824 if (!cond || (root && !hierarchy_used))
825 return NULL;
826
827 if (!root)
828 root = root_mem_cgroup;
829
830 do {
831 iter = NULL;
832 rcu_read_lock();
833
834 css = css_get_next(&mem_cgroup_subsys, nextid,
835 &root->css, &found);
836 if (css && css_tryget(css))
837 iter = container_of(css, struct mem_cgroup, css);
838 rcu_read_unlock();
839 /* If css is NULL, no more cgroups will be found */
840 nextid = found + 1;
841 } while (css && !iter);
842
843 return iter;
844}
845/*
846 * for_eacn_mem_cgroup_tree() for visiting all cgroup under tree. Please
847 * be careful that "break" loop is not allowed. We have reference count.
848 * Instead of that modify "cond" to be false and "continue" to exit the loop.
849 */
850#define for_each_mem_cgroup_tree_cond(iter, root, cond) \
851 for (iter = mem_cgroup_start_loop(root);\
852 iter != NULL;\
853 iter = mem_cgroup_get_next(iter, root, cond))
854
855#define for_each_mem_cgroup_tree(iter, root) \
856 for_each_mem_cgroup_tree_cond(iter, root, true)
857
858#define for_each_mem_cgroup_all(iter) \
859 for_each_mem_cgroup_tree_cond(iter, NULL, true)
860
861
862static inline bool mem_cgroup_is_root(struct mem_cgroup *mem)
863{
864 return (mem == root_mem_cgroup);
865}
866
867void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
868{
869 struct mem_cgroup *mem;
870
871 if (!mm)
872 return;
873
874 rcu_read_lock();
875 mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
876 if (unlikely(!mem))
877 goto out;
878
879 switch (idx) {
880 case PGMAJFAULT:
881 mem_cgroup_pgmajfault(mem, 1);
882 break;
883 case PGFAULT:
884 mem_cgroup_pgfault(mem, 1);
885 break;
886 default:
887 BUG();
888 }
889out:
890 rcu_read_unlock();
891}
892EXPORT_SYMBOL(mem_cgroup_count_vm_event);
893
894/*
895 * Following LRU functions are allowed to be used without PCG_LOCK.
896 * Operations are called by routine of global LRU independently from memcg.
897 * What we have to take care of here is validness of pc->mem_cgroup.
898 *
899 * Changes to pc->mem_cgroup happens when
900 * 1. charge
901 * 2. moving account
902 * In typical case, "charge" is done before add-to-lru. Exception is SwapCache.
903 * It is added to LRU before charge.
904 * If PCG_USED bit is not set, page_cgroup is not added to this private LRU.
905 * When moving account, the page is not on LRU. It's isolated.
906 */
907
908void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru)
909{
910 struct page_cgroup *pc;
911 struct mem_cgroup_per_zone *mz;
912
913 if (mem_cgroup_disabled())
914 return;
915 pc = lookup_page_cgroup(page);
916 /* can happen while we handle swapcache. */
917 if (!TestClearPageCgroupAcctLRU(pc))
918 return;
919 VM_BUG_ON(!pc->mem_cgroup);
920 /*
921 * We don't check PCG_USED bit. It's cleared when the "page" is finally
922 * removed from global LRU.
923 */
924 mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
925 /* huge page split is done under lru_lock. so, we have no races. */
926 MEM_CGROUP_ZSTAT(mz, lru) -= 1 << compound_order(page);
927 if (mem_cgroup_is_root(pc->mem_cgroup))
928 return;
929 VM_BUG_ON(list_empty(&pc->lru));
930 list_del_init(&pc->lru);
931}
932
933void mem_cgroup_del_lru(struct page *page)
934{
935 mem_cgroup_del_lru_list(page, page_lru(page));
936}
937
938/*
939 * Writeback is about to end against a page which has been marked for immediate
940 * reclaim. If it still appears to be reclaimable, move it to the tail of the
941 * inactive list.
942 */
943void mem_cgroup_rotate_reclaimable_page(struct page *page)
944{
945 struct mem_cgroup_per_zone *mz;
946 struct page_cgroup *pc;
947 enum lru_list lru = page_lru(page);
948
949 if (mem_cgroup_disabled())
950 return;
951
952 pc = lookup_page_cgroup(page);
953 /* unused or root page is not rotated. */
954 if (!PageCgroupUsed(pc))
955 return;
956 /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
957 smp_rmb();
958 if (mem_cgroup_is_root(pc->mem_cgroup))
959 return;
960 mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
961 list_move_tail(&pc->lru, &mz->lists[lru]);
962}
963
964void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru)
965{
966 struct mem_cgroup_per_zone *mz;
967 struct page_cgroup *pc;
968
969 if (mem_cgroup_disabled())
970 return;
971
972 pc = lookup_page_cgroup(page);
973 /* unused or root page is not rotated. */
974 if (!PageCgroupUsed(pc))
975 return;
976 /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
977 smp_rmb();
978 if (mem_cgroup_is_root(pc->mem_cgroup))
979 return;
980 mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
981 list_move(&pc->lru, &mz->lists[lru]);
982}
983
984void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru)
985{
986 struct page_cgroup *pc;
987 struct mem_cgroup_per_zone *mz;
988
989 if (mem_cgroup_disabled())
990 return;
991 pc = lookup_page_cgroup(page);
992 VM_BUG_ON(PageCgroupAcctLRU(pc));
993 if (!PageCgroupUsed(pc))
994 return;
995 /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
996 smp_rmb();
997 mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
998 /* huge page split is done under lru_lock. so, we have no races. */
999 MEM_CGROUP_ZSTAT(mz, lru) += 1 << compound_order(page);
1000 SetPageCgroupAcctLRU(pc);
1001 if (mem_cgroup_is_root(pc->mem_cgroup))
1002 return;
1003 list_add(&pc->lru, &mz->lists[lru]);
1004}
1005
1006/*
1007 * At handling SwapCache and other FUSE stuff, pc->mem_cgroup may be changed
1008 * while it's linked to lru because the page may be reused after it's fully
1009 * uncharged. To handle that, unlink page_cgroup from LRU when charge it again.
1010 * It's done under lock_page and expected that zone->lru_lock isnever held.
1011 */
1012static void mem_cgroup_lru_del_before_commit(struct page *page)
1013{
1014 unsigned long flags;
1015 struct zone *zone = page_zone(page);
1016 struct page_cgroup *pc = lookup_page_cgroup(page);
1017
1018 /*
1019 * Doing this check without taking ->lru_lock seems wrong but this
1020 * is safe. Because if page_cgroup's USED bit is unset, the page
1021 * will not be added to any memcg's LRU. If page_cgroup's USED bit is
1022 * set, the commit after this will fail, anyway.
1023 * This all charge/uncharge is done under some mutual execustion.
1024 * So, we don't need to taking care of changes in USED bit.
1025 */
1026 if (likely(!PageLRU(page)))
1027 return;
1028
1029 spin_lock_irqsave(&zone->lru_lock, flags);
1030 /*
1031 * Forget old LRU when this page_cgroup is *not* used. This Used bit
1032 * is guarded by lock_page() because the page is SwapCache.
1033 */
1034 if (!PageCgroupUsed(pc))
1035 mem_cgroup_del_lru_list(page, page_lru(page));
1036 spin_unlock_irqrestore(&zone->lru_lock, flags);
1037}
1038
1039static void mem_cgroup_lru_add_after_commit(struct page *page)
1040{
1041 unsigned long flags;
1042 struct zone *zone = page_zone(page);
1043 struct page_cgroup *pc = lookup_page_cgroup(page);
1044
1045 /* taking care of that the page is added to LRU while we commit it */
1046 if (likely(!PageLRU(page)))
1047 return;
1048 spin_lock_irqsave(&zone->lru_lock, flags);
1049 /* link when the page is linked to LRU but page_cgroup isn't */
1050 if (PageLRU(page) && !PageCgroupAcctLRU(pc))
1051 mem_cgroup_add_lru_list(page, page_lru(page));
1052 spin_unlock_irqrestore(&zone->lru_lock, flags);
1053}
1054
1055
1056void mem_cgroup_move_lists(struct page *page,
1057 enum lru_list from, enum lru_list to)
1058{
1059 if (mem_cgroup_disabled())
1060 return;
1061 mem_cgroup_del_lru_list(page, from);
1062 mem_cgroup_add_lru_list(page, to);
1063}
1064
1065/*
1066 * Checks whether given mem is same or in the root_mem's
1067 * hierarchy subtree
1068 */
1069static bool mem_cgroup_same_or_subtree(const struct mem_cgroup *root_mem,
1070 struct mem_cgroup *mem)
1071{
1072 if (root_mem != mem) {
1073 return (root_mem->use_hierarchy &&
1074 css_is_ancestor(&mem->css, &root_mem->css));
1075 }
1076
1077 return true;
1078}
1079
1080int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
1081{
1082 int ret;
1083 struct mem_cgroup *curr = NULL;
1084 struct task_struct *p;
1085
1086 p = find_lock_task_mm(task);
1087 if (!p)
1088 return 0;
1089 curr = try_get_mem_cgroup_from_mm(p->mm);
1090 task_unlock(p);
1091 if (!curr)
1092 return 0;
1093 /*
1094 * We should check use_hierarchy of "mem" not "curr". Because checking
1095 * use_hierarchy of "curr" here make this function true if hierarchy is
1096 * enabled in "curr" and "curr" is a child of "mem" in *cgroup*
1097 * hierarchy(even if use_hierarchy is disabled in "mem").
1098 */
1099 ret = mem_cgroup_same_or_subtree(mem, curr);
1100 css_put(&curr->css);
1101 return ret;
1102}
1103
1104static int calc_inactive_ratio(struct mem_cgroup *memcg, unsigned long *present_pages)
1105{
1106 unsigned long active;
1107 unsigned long inactive;
1108 unsigned long gb;
1109 unsigned long inactive_ratio;
1110
1111 inactive = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_INACTIVE_ANON));
1112 active = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_ACTIVE_ANON));
1113
1114 gb = (inactive + active) >> (30 - PAGE_SHIFT);
1115 if (gb)
1116 inactive_ratio = int_sqrt(10 * gb);
1117 else
1118 inactive_ratio = 1;
1119
1120 if (present_pages) {
1121 present_pages[0] = inactive;
1122 present_pages[1] = active;
1123 }
1124
1125 return inactive_ratio;
1126}
1127
1128int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg)
1129{
1130 unsigned long active;
1131 unsigned long inactive;
1132 unsigned long present_pages[2];
1133 unsigned long inactive_ratio;
1134
1135 inactive_ratio = calc_inactive_ratio(memcg, present_pages);
1136
1137 inactive = present_pages[0];
1138 active = present_pages[1];
1139
1140 if (inactive * inactive_ratio < active)
1141 return 1;
1142
1143 return 0;
1144}
1145
1146int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg)
1147{
1148 unsigned long active;
1149 unsigned long inactive;
1150
1151 inactive = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_INACTIVE_FILE));
1152 active = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_ACTIVE_FILE));
1153
1154 return (active > inactive);
1155}
1156
1157struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg,
1158 struct zone *zone)
1159{
1160 int nid = zone_to_nid(zone);
1161 int zid = zone_idx(zone);
1162 struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid);
1163
1164 return &mz->reclaim_stat;
1165}
1166
1167struct zone_reclaim_stat *
1168mem_cgroup_get_reclaim_stat_from_page(struct page *page)
1169{
1170 struct page_cgroup *pc;
1171 struct mem_cgroup_per_zone *mz;
1172
1173 if (mem_cgroup_disabled())
1174 return NULL;
1175
1176 pc = lookup_page_cgroup(page);
1177 if (!PageCgroupUsed(pc))
1178 return NULL;
1179 /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
1180 smp_rmb();
1181 mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
1182 return &mz->reclaim_stat;
1183}
1184
1185unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
1186 struct list_head *dst,
1187 unsigned long *scanned, int order,
1188 int mode, struct zone *z,
1189 struct mem_cgroup *mem_cont,
1190 int active, int file)
1191{
1192 unsigned long nr_taken = 0;
1193 struct page *page;
1194 unsigned long scan;
1195 LIST_HEAD(pc_list);
1196 struct list_head *src;
1197 struct page_cgroup *pc, *tmp;
1198 int nid = zone_to_nid(z);
1199 int zid = zone_idx(z);
1200 struct mem_cgroup_per_zone *mz;
1201 int lru = LRU_FILE * file + active;
1202 int ret;
1203
1204 BUG_ON(!mem_cont);
1205 mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
1206 src = &mz->lists[lru];
1207
1208 scan = 0;
1209 list_for_each_entry_safe_reverse(pc, tmp, src, lru) {
1210 if (scan >= nr_to_scan)
1211 break;
1212
1213 if (unlikely(!PageCgroupUsed(pc)))
1214 continue;
1215
1216 page = lookup_cgroup_page(pc);
1217
1218 if (unlikely(!PageLRU(page)))
1219 continue;
1220
1221 scan++;
1222 ret = __isolate_lru_page(page, mode, file);
1223 switch (ret) {
1224 case 0:
1225 list_move(&page->lru, dst);
1226 mem_cgroup_del_lru(page);
1227 nr_taken += hpage_nr_pages(page);
1228 break;
1229 case -EBUSY:
1230 /* we don't affect global LRU but rotate in our LRU */
1231 mem_cgroup_rotate_lru_list(page, page_lru(page));
1232 break;
1233 default:
1234 break;
1235 }
1236 }
1237
1238 *scanned = scan;
1239
1240 trace_mm_vmscan_memcg_isolate(0, nr_to_scan, scan, nr_taken,
1241 0, 0, 0, mode);
1242
1243 return nr_taken;
1244}
1245
1246#define mem_cgroup_from_res_counter(counter, member) \
1247 container_of(counter, struct mem_cgroup, member)
1248
1249/**
1250 * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1251 * @mem: the memory cgroup
1252 *
1253 * Returns the maximum amount of memory @mem can be charged with, in
1254 * pages.
1255 */
1256static unsigned long mem_cgroup_margin(struct mem_cgroup *mem)
1257{
1258 unsigned long long margin;
1259
1260 margin = res_counter_margin(&mem->res);
1261 if (do_swap_account)
1262 margin = min(margin, res_counter_margin(&mem->memsw));
1263 return margin >> PAGE_SHIFT;
1264}
1265
1266int mem_cgroup_swappiness(struct mem_cgroup *memcg)
1267{
1268 struct cgroup *cgrp = memcg->css.cgroup;
1269
1270 /* root ? */
1271 if (cgrp->parent == NULL)
1272 return vm_swappiness;
1273
1274 return memcg->swappiness;
1275}
1276
1277static void mem_cgroup_start_move(struct mem_cgroup *mem)
1278{
1279 int cpu;
1280
1281 get_online_cpus();
1282 spin_lock(&mem->pcp_counter_lock);
1283 for_each_online_cpu(cpu)
1284 per_cpu(mem->stat->count[MEM_CGROUP_ON_MOVE], cpu) += 1;
1285 mem->nocpu_base.count[MEM_CGROUP_ON_MOVE] += 1;
1286 spin_unlock(&mem->pcp_counter_lock);
1287 put_online_cpus();
1288
1289 synchronize_rcu();
1290}
1291
1292static void mem_cgroup_end_move(struct mem_cgroup *mem)
1293{
1294 int cpu;
1295
1296 if (!mem)
1297 return;
1298 get_online_cpus();
1299 spin_lock(&mem->pcp_counter_lock);
1300 for_each_online_cpu(cpu)
1301 per_cpu(mem->stat->count[MEM_CGROUP_ON_MOVE], cpu) -= 1;
1302 mem->nocpu_base.count[MEM_CGROUP_ON_MOVE] -= 1;
1303 spin_unlock(&mem->pcp_counter_lock);
1304 put_online_cpus();
1305}
1306/*
1307 * 2 routines for checking "mem" is under move_account() or not.
1308 *
1309 * mem_cgroup_stealed() - checking a cgroup is mc.from or not. This is used
1310 * for avoiding race in accounting. If true,
1311 * pc->mem_cgroup may be overwritten.
1312 *
1313 * mem_cgroup_under_move() - checking a cgroup is mc.from or mc.to or
1314 * under hierarchy of moving cgroups. This is for
1315 * waiting at hith-memory prressure caused by "move".
1316 */
1317
1318static bool mem_cgroup_stealed(struct mem_cgroup *mem)
1319{
1320 VM_BUG_ON(!rcu_read_lock_held());
1321 return this_cpu_read(mem->stat->count[MEM_CGROUP_ON_MOVE]) > 0;
1322}
1323
1324static bool mem_cgroup_under_move(struct mem_cgroup *mem)
1325{
1326 struct mem_cgroup *from;
1327 struct mem_cgroup *to;
1328 bool ret = false;
1329 /*
1330 * Unlike task_move routines, we access mc.to, mc.from not under
1331 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
1332 */
1333 spin_lock(&mc.lock);
1334 from = mc.from;
1335 to = mc.to;
1336 if (!from)
1337 goto unlock;
1338
1339 ret = mem_cgroup_same_or_subtree(mem, from)
1340 || mem_cgroup_same_or_subtree(mem, to);
1341unlock:
1342 spin_unlock(&mc.lock);
1343 return ret;
1344}
1345
1346static bool mem_cgroup_wait_acct_move(struct mem_cgroup *mem)
1347{
1348 if (mc.moving_task && current != mc.moving_task) {
1349 if (mem_cgroup_under_move(mem)) {
1350 DEFINE_WAIT(wait);
1351 prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1352 /* moving charge context might have finished. */
1353 if (mc.moving_task)
1354 schedule();
1355 finish_wait(&mc.waitq, &wait);
1356 return true;
1357 }
1358 }
1359 return false;
1360}
1361
1362/**
1363 * mem_cgroup_print_oom_info: Called from OOM with tasklist_lock held in read mode.
1364 * @memcg: The memory cgroup that went over limit
1365 * @p: Task that is going to be killed
1366 *
1367 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1368 * enabled
1369 */
1370void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
1371{
1372 struct cgroup *task_cgrp;
1373 struct cgroup *mem_cgrp;
1374 /*
1375 * Need a buffer in BSS, can't rely on allocations. The code relies
1376 * on the assumption that OOM is serialized for memory controller.
1377 * If this assumption is broken, revisit this code.
1378 */
1379 static char memcg_name[PATH_MAX];
1380 int ret;
1381
1382 if (!memcg || !p)
1383 return;
1384
1385
1386 rcu_read_lock();
1387
1388 mem_cgrp = memcg->css.cgroup;
1389 task_cgrp = task_cgroup(p, mem_cgroup_subsys_id);
1390
1391 ret = cgroup_path(task_cgrp, memcg_name, PATH_MAX);
1392 if (ret < 0) {
1393 /*
1394 * Unfortunately, we are unable to convert to a useful name
1395 * But we'll still print out the usage information
1396 */
1397 rcu_read_unlock();
1398 goto done;
1399 }
1400 rcu_read_unlock();
1401
1402 printk(KERN_INFO "Task in %s killed", memcg_name);
1403
1404 rcu_read_lock();
1405 ret = cgroup_path(mem_cgrp, memcg_name, PATH_MAX);
1406 if (ret < 0) {
1407 rcu_read_unlock();
1408 goto done;
1409 }
1410 rcu_read_unlock();
1411
1412 /*
1413 * Continues from above, so we don't need an KERN_ level
1414 */
1415 printk(KERN_CONT " as a result of limit of %s\n", memcg_name);
1416done:
1417
1418 printk(KERN_INFO "memory: usage %llukB, limit %llukB, failcnt %llu\n",
1419 res_counter_read_u64(&memcg->res, RES_USAGE) >> 10,
1420 res_counter_read_u64(&memcg->res, RES_LIMIT) >> 10,
1421 res_counter_read_u64(&memcg->res, RES_FAILCNT));
1422 printk(KERN_INFO "memory+swap: usage %llukB, limit %llukB, "
1423 "failcnt %llu\n",
1424 res_counter_read_u64(&memcg->memsw, RES_USAGE) >> 10,
1425 res_counter_read_u64(&memcg->memsw, RES_LIMIT) >> 10,
1426 res_counter_read_u64(&memcg->memsw, RES_FAILCNT));
1427}
1428
1429/*
1430 * This function returns the number of memcg under hierarchy tree. Returns
1431 * 1(self count) if no children.
1432 */
1433static int mem_cgroup_count_children(struct mem_cgroup *mem)
1434{
1435 int num = 0;
1436 struct mem_cgroup *iter;
1437
1438 for_each_mem_cgroup_tree(iter, mem)
1439 num++;
1440 return num;
1441}
1442
1443/*
1444 * Return the memory (and swap, if configured) limit for a memcg.
1445 */
1446u64 mem_cgroup_get_limit(struct mem_cgroup *memcg)
1447{
1448 u64 limit;
1449 u64 memsw;
1450
1451 limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
1452 limit += total_swap_pages << PAGE_SHIFT;
1453
1454 memsw = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
1455 /*
1456 * If memsw is finite and limits the amount of swap space available
1457 * to this memcg, return that limit.
1458 */
1459 return min(limit, memsw);
1460}
1461
1462/*
1463 * Visit the first child (need not be the first child as per the ordering
1464 * of the cgroup list, since we track last_scanned_child) of @mem and use
1465 * that to reclaim free pages from.
1466 */
1467static struct mem_cgroup *
1468mem_cgroup_select_victim(struct mem_cgroup *root_mem)
1469{
1470 struct mem_cgroup *ret = NULL;
1471 struct cgroup_subsys_state *css;
1472 int nextid, found;
1473
1474 if (!root_mem->use_hierarchy) {
1475 css_get(&root_mem->css);
1476 ret = root_mem;
1477 }
1478
1479 while (!ret) {
1480 rcu_read_lock();
1481 nextid = root_mem->last_scanned_child + 1;
1482 css = css_get_next(&mem_cgroup_subsys, nextid, &root_mem->css,
1483 &found);
1484 if (css && css_tryget(css))
1485 ret = container_of(css, struct mem_cgroup, css);
1486
1487 rcu_read_unlock();
1488 /* Updates scanning parameter */
1489 if (!css) {
1490 /* this means start scan from ID:1 */
1491 root_mem->last_scanned_child = 0;
1492 } else
1493 root_mem->last_scanned_child = found;
1494 }
1495
1496 return ret;
1497}
1498
1499/**
1500 * test_mem_cgroup_node_reclaimable
1501 * @mem: the target memcg
1502 * @nid: the node ID to be checked.
1503 * @noswap : specify true here if the user wants flle only information.
1504 *
1505 * This function returns whether the specified memcg contains any
1506 * reclaimable pages on a node. Returns true if there are any reclaimable
1507 * pages in the node.
1508 */
1509static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *mem,
1510 int nid, bool noswap)
1511{
1512 if (mem_cgroup_node_nr_lru_pages(mem, nid, LRU_ALL_FILE))
1513 return true;
1514 if (noswap || !total_swap_pages)
1515 return false;
1516 if (mem_cgroup_node_nr_lru_pages(mem, nid, LRU_ALL_ANON))
1517 return true;
1518 return false;
1519
1520}
1521#if MAX_NUMNODES > 1
1522
1523/*
1524 * Always updating the nodemask is not very good - even if we have an empty
1525 * list or the wrong list here, we can start from some node and traverse all
1526 * nodes based on the zonelist. So update the list loosely once per 10 secs.
1527 *
1528 */
1529static void mem_cgroup_may_update_nodemask(struct mem_cgroup *mem)
1530{
1531 int nid;
1532 /*
1533 * numainfo_events > 0 means there was at least NUMAINFO_EVENTS_TARGET
1534 * pagein/pageout changes since the last update.
1535 */
1536 if (!atomic_read(&mem->numainfo_events))
1537 return;
1538 if (atomic_inc_return(&mem->numainfo_updating) > 1)
1539 return;
1540
1541 /* make a nodemask where this memcg uses memory from */
1542 mem->scan_nodes = node_states[N_HIGH_MEMORY];
1543
1544 for_each_node_mask(nid, node_states[N_HIGH_MEMORY]) {
1545
1546 if (!test_mem_cgroup_node_reclaimable(mem, nid, false))
1547 node_clear(nid, mem->scan_nodes);
1548 }
1549
1550 atomic_set(&mem->numainfo_events, 0);
1551 atomic_set(&mem->numainfo_updating, 0);
1552}
1553
1554/*
1555 * Selecting a node where we start reclaim from. Because what we need is just
1556 * reducing usage counter, start from anywhere is O,K. Considering
1557 * memory reclaim from current node, there are pros. and cons.
1558 *
1559 * Freeing memory from current node means freeing memory from a node which
1560 * we'll use or we've used. So, it may make LRU bad. And if several threads
1561 * hit limits, it will see a contention on a node. But freeing from remote
1562 * node means more costs for memory reclaim because of memory latency.
1563 *
1564 * Now, we use round-robin. Better algorithm is welcomed.
1565 */
1566int mem_cgroup_select_victim_node(struct mem_cgroup *mem)
1567{
1568 int node;
1569
1570 mem_cgroup_may_update_nodemask(mem);
1571 node = mem->last_scanned_node;
1572
1573 node = next_node(node, mem->scan_nodes);
1574 if (node == MAX_NUMNODES)
1575 node = first_node(mem->scan_nodes);
1576 /*
1577 * We call this when we hit limit, not when pages are added to LRU.
1578 * No LRU may hold pages because all pages are UNEVICTABLE or
1579 * memcg is too small and all pages are not on LRU. In that case,
1580 * we use curret node.
1581 */
1582 if (unlikely(node == MAX_NUMNODES))
1583 node = numa_node_id();
1584
1585 mem->last_scanned_node = node;
1586 return node;
1587}
1588
1589/*
1590 * Check all nodes whether it contains reclaimable pages or not.
1591 * For quick scan, we make use of scan_nodes. This will allow us to skip
1592 * unused nodes. But scan_nodes is lazily updated and may not cotain
1593 * enough new information. We need to do double check.
1594 */
1595bool mem_cgroup_reclaimable(struct mem_cgroup *mem, bool noswap)
1596{
1597 int nid;
1598
1599 /*
1600 * quick check...making use of scan_node.
1601 * We can skip unused nodes.
1602 */
1603 if (!nodes_empty(mem->scan_nodes)) {
1604 for (nid = first_node(mem->scan_nodes);
1605 nid < MAX_NUMNODES;
1606 nid = next_node(nid, mem->scan_nodes)) {
1607
1608 if (test_mem_cgroup_node_reclaimable(mem, nid, noswap))
1609 return true;
1610 }
1611 }
1612 /*
1613 * Check rest of nodes.
1614 */
1615 for_each_node_state(nid, N_HIGH_MEMORY) {
1616 if (node_isset(nid, mem->scan_nodes))
1617 continue;
1618 if (test_mem_cgroup_node_reclaimable(mem, nid, noswap))
1619 return true;
1620 }
1621 return false;
1622}
1623
1624#else
1625int mem_cgroup_select_victim_node(struct mem_cgroup *mem)
1626{
1627 return 0;
1628}
1629
1630bool mem_cgroup_reclaimable(struct mem_cgroup *mem, bool noswap)
1631{
1632 return test_mem_cgroup_node_reclaimable(mem, 0, noswap);
1633}
1634#endif
1635
1636/*
1637 * Scan the hierarchy if needed to reclaim memory. We remember the last child
1638 * we reclaimed from, so that we don't end up penalizing one child extensively
1639 * based on its position in the children list.
1640 *
1641 * root_mem is the original ancestor that we've been reclaim from.
1642 *
1643 * We give up and return to the caller when we visit root_mem twice.
1644 * (other groups can be removed while we're walking....)
1645 *
1646 * If shrink==true, for avoiding to free too much, this returns immedieately.
1647 */
1648static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
1649 struct zone *zone,
1650 gfp_t gfp_mask,
1651 unsigned long reclaim_options,
1652 unsigned long *total_scanned)
1653{
1654 struct mem_cgroup *victim;
1655 int ret, total = 0;
1656 int loop = 0;
1657 bool noswap = reclaim_options & MEM_CGROUP_RECLAIM_NOSWAP;
1658 bool shrink = reclaim_options & MEM_CGROUP_RECLAIM_SHRINK;
1659 bool check_soft = reclaim_options & MEM_CGROUP_RECLAIM_SOFT;
1660 unsigned long excess;
1661 unsigned long nr_scanned;
1662
1663 excess = res_counter_soft_limit_excess(&root_mem->res) >> PAGE_SHIFT;
1664
1665 /* If memsw_is_minimum==1, swap-out is of-no-use. */
1666 if (!check_soft && !shrink && root_mem->memsw_is_minimum)
1667 noswap = true;
1668
1669 while (1) {
1670 victim = mem_cgroup_select_victim(root_mem);
1671 if (victim == root_mem) {
1672 loop++;
1673 /*
1674 * We are not draining per cpu cached charges during
1675 * soft limit reclaim because global reclaim doesn't
1676 * care about charges. It tries to free some memory and
1677 * charges will not give any.
1678 */
1679 if (!check_soft && loop >= 1)
1680 drain_all_stock_async(root_mem);
1681 if (loop >= 2) {
1682 /*
1683 * If we have not been able to reclaim
1684 * anything, it might because there are
1685 * no reclaimable pages under this hierarchy
1686 */
1687 if (!check_soft || !total) {
1688 css_put(&victim->css);
1689 break;
1690 }
1691 /*
1692 * We want to do more targeted reclaim.
1693 * excess >> 2 is not to excessive so as to
1694 * reclaim too much, nor too less that we keep
1695 * coming back to reclaim from this cgroup
1696 */
1697 if (total >= (excess >> 2) ||
1698 (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS)) {
1699 css_put(&victim->css);
1700 break;
1701 }
1702 }
1703 }
1704 if (!mem_cgroup_reclaimable(victim, noswap)) {
1705 /* this cgroup's local usage == 0 */
1706 css_put(&victim->css);
1707 continue;
1708 }
1709 /* we use swappiness of local cgroup */
1710 if (check_soft) {
1711 ret = mem_cgroup_shrink_node_zone(victim, gfp_mask,
1712 noswap, zone, &nr_scanned);
1713 *total_scanned += nr_scanned;
1714 } else
1715 ret = try_to_free_mem_cgroup_pages(victim, gfp_mask,
1716 noswap);
1717 css_put(&victim->css);
1718 /*
1719 * At shrinking usage, we can't check we should stop here or
1720 * reclaim more. It's depends on callers. last_scanned_child
1721 * will work enough for keeping fairness under tree.
1722 */
1723 if (shrink)
1724 return ret;
1725 total += ret;
1726 if (check_soft) {
1727 if (!res_counter_soft_limit_excess(&root_mem->res))
1728 return total;
1729 } else if (mem_cgroup_margin(root_mem))
1730 return total;
1731 }
1732 return total;
1733}
1734
1735/*
1736 * Check OOM-Killer is already running under our hierarchy.
1737 * If someone is running, return false.
1738 * Has to be called with memcg_oom_lock
1739 */
1740static bool mem_cgroup_oom_lock(struct mem_cgroup *mem)
1741{
1742 struct mem_cgroup *iter, *failed = NULL;
1743 bool cond = true;
1744
1745 for_each_mem_cgroup_tree_cond(iter, mem, cond) {
1746 if (iter->oom_lock) {
1747 /*
1748 * this subtree of our hierarchy is already locked
1749 * so we cannot give a lock.
1750 */
1751 failed = iter;
1752 cond = false;
1753 } else
1754 iter->oom_lock = true;
1755 }
1756
1757 if (!failed)
1758 return true;
1759
1760 /*
1761 * OK, we failed to lock the whole subtree so we have to clean up
1762 * what we set up to the failing subtree
1763 */
1764 cond = true;
1765 for_each_mem_cgroup_tree_cond(iter, mem, cond) {
1766 if (iter == failed) {
1767 cond = false;
1768 continue;
1769 }
1770 iter->oom_lock = false;
1771 }
1772 return false;
1773}
1774
1775/*
1776 * Has to be called with memcg_oom_lock
1777 */
1778static int mem_cgroup_oom_unlock(struct mem_cgroup *mem)
1779{
1780 struct mem_cgroup *iter;
1781
1782 for_each_mem_cgroup_tree(iter, mem)
1783 iter->oom_lock = false;
1784 return 0;
1785}
1786
1787static void mem_cgroup_mark_under_oom(struct mem_cgroup *mem)
1788{
1789 struct mem_cgroup *iter;
1790
1791 for_each_mem_cgroup_tree(iter, mem)
1792 atomic_inc(&iter->under_oom);
1793}
1794
1795static void mem_cgroup_unmark_under_oom(struct mem_cgroup *mem)
1796{
1797 struct mem_cgroup *iter;
1798
1799 /*
1800 * When a new child is created while the hierarchy is under oom,
1801 * mem_cgroup_oom_lock() may not be called. We have to use
1802 * atomic_add_unless() here.
1803 */
1804 for_each_mem_cgroup_tree(iter, mem)
1805 atomic_add_unless(&iter->under_oom, -1, 0);
1806}
1807
1808static DEFINE_SPINLOCK(memcg_oom_lock);
1809static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
1810
1811struct oom_wait_info {
1812 struct mem_cgroup *mem;
1813 wait_queue_t wait;
1814};
1815
1816static int memcg_oom_wake_function(wait_queue_t *wait,
1817 unsigned mode, int sync, void *arg)
1818{
1819 struct mem_cgroup *wake_mem = (struct mem_cgroup *)arg,
1820 *oom_wait_mem;
1821 struct oom_wait_info *oom_wait_info;
1822
1823 oom_wait_info = container_of(wait, struct oom_wait_info, wait);
1824 oom_wait_mem = oom_wait_info->mem;
1825
1826 /*
1827 * Both of oom_wait_info->mem and wake_mem are stable under us.
1828 * Then we can use css_is_ancestor without taking care of RCU.
1829 */
1830 if (!mem_cgroup_same_or_subtree(oom_wait_mem, wake_mem)
1831 && !mem_cgroup_same_or_subtree(wake_mem, oom_wait_mem))
1832 return 0;
1833 return autoremove_wake_function(wait, mode, sync, arg);
1834}
1835
1836static void memcg_wakeup_oom(struct mem_cgroup *mem)
1837{
1838 /* for filtering, pass "mem" as argument. */
1839 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, mem);
1840}
1841
1842static void memcg_oom_recover(struct mem_cgroup *mem)
1843{
1844 if (mem && atomic_read(&mem->under_oom))
1845 memcg_wakeup_oom(mem);
1846}
1847
1848/*
1849 * try to call OOM killer. returns false if we should exit memory-reclaim loop.
1850 */
1851bool mem_cgroup_handle_oom(struct mem_cgroup *mem, gfp_t mask)
1852{
1853 struct oom_wait_info owait;
1854 bool locked, need_to_kill;
1855
1856 owait.mem = mem;
1857 owait.wait.flags = 0;
1858 owait.wait.func = memcg_oom_wake_function;
1859 owait.wait.private = current;
1860 INIT_LIST_HEAD(&owait.wait.task_list);
1861 need_to_kill = true;
1862 mem_cgroup_mark_under_oom(mem);
1863
1864 /* At first, try to OOM lock hierarchy under mem.*/
1865 spin_lock(&memcg_oom_lock);
1866 locked = mem_cgroup_oom_lock(mem);
1867 /*
1868 * Even if signal_pending(), we can't quit charge() loop without
1869 * accounting. So, UNINTERRUPTIBLE is appropriate. But SIGKILL
1870 * under OOM is always welcomed, use TASK_KILLABLE here.
1871 */
1872 prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
1873 if (!locked || mem->oom_kill_disable)
1874 need_to_kill = false;
1875 if (locked)
1876 mem_cgroup_oom_notify(mem);
1877 spin_unlock(&memcg_oom_lock);
1878
1879 if (need_to_kill) {
1880 finish_wait(&memcg_oom_waitq, &owait.wait);
1881 mem_cgroup_out_of_memory(mem, mask);
1882 } else {
1883 schedule();
1884 finish_wait(&memcg_oom_waitq, &owait.wait);
1885 }
1886 spin_lock(&memcg_oom_lock);
1887 if (locked)
1888 mem_cgroup_oom_unlock(mem);
1889 memcg_wakeup_oom(mem);
1890 spin_unlock(&memcg_oom_lock);
1891
1892 mem_cgroup_unmark_under_oom(mem);
1893
1894 if (test_thread_flag(TIF_MEMDIE) || fatal_signal_pending(current))
1895 return false;
1896 /* Give chance to dying process */
1897 schedule_timeout(1);
1898 return true;
1899}
1900
1901/*
1902 * Currently used to update mapped file statistics, but the routine can be
1903 * generalized to update other statistics as well.
1904 *
1905 * Notes: Race condition
1906 *
1907 * We usually use page_cgroup_lock() for accessing page_cgroup member but
1908 * it tends to be costly. But considering some conditions, we doesn't need
1909 * to do so _always_.
1910 *
1911 * Considering "charge", lock_page_cgroup() is not required because all
1912 * file-stat operations happen after a page is attached to radix-tree. There
1913 * are no race with "charge".
1914 *
1915 * Considering "uncharge", we know that memcg doesn't clear pc->mem_cgroup
1916 * at "uncharge" intentionally. So, we always see valid pc->mem_cgroup even
1917 * if there are race with "uncharge". Statistics itself is properly handled
1918 * by flags.
1919 *
1920 * Considering "move", this is an only case we see a race. To make the race
1921 * small, we check MEM_CGROUP_ON_MOVE percpu value and detect there are
1922 * possibility of race condition. If there is, we take a lock.
1923 */
1924
1925void mem_cgroup_update_page_stat(struct page *page,
1926 enum mem_cgroup_page_stat_item idx, int val)
1927{
1928 struct mem_cgroup *mem;
1929 struct page_cgroup *pc = lookup_page_cgroup(page);
1930 bool need_unlock = false;
1931 unsigned long uninitialized_var(flags);
1932
1933 if (unlikely(!pc))
1934 return;
1935
1936 rcu_read_lock();
1937 mem = pc->mem_cgroup;
1938 if (unlikely(!mem || !PageCgroupUsed(pc)))
1939 goto out;
1940 /* pc->mem_cgroup is unstable ? */
1941 if (unlikely(mem_cgroup_stealed(mem)) || PageTransHuge(page)) {
1942 /* take a lock against to access pc->mem_cgroup */
1943 move_lock_page_cgroup(pc, &flags);
1944 need_unlock = true;
1945 mem = pc->mem_cgroup;
1946 if (!mem || !PageCgroupUsed(pc))
1947 goto out;
1948 }
1949
1950 switch (idx) {
1951 case MEMCG_NR_FILE_MAPPED:
1952 if (val > 0)
1953 SetPageCgroupFileMapped(pc);
1954 else if (!page_mapped(page))
1955 ClearPageCgroupFileMapped(pc);
1956 idx = MEM_CGROUP_STAT_FILE_MAPPED;
1957 break;
1958 default:
1959 BUG();
1960 }
1961
1962 this_cpu_add(mem->stat->count[idx], val);
1963
1964out:
1965 if (unlikely(need_unlock))
1966 move_unlock_page_cgroup(pc, &flags);
1967 rcu_read_unlock();
1968 return;
1969}
1970EXPORT_SYMBOL(mem_cgroup_update_page_stat);
1971
1972/*
1973 * size of first charge trial. "32" comes from vmscan.c's magic value.
1974 * TODO: maybe necessary to use big numbers in big irons.
1975 */
1976#define CHARGE_BATCH 32U
1977struct memcg_stock_pcp {
1978 struct mem_cgroup *cached; /* this never be root cgroup */
1979 unsigned int nr_pages;
1980 struct work_struct work;
1981 unsigned long flags;
1982#define FLUSHING_CACHED_CHARGE (0)
1983};
1984static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
1985static DEFINE_MUTEX(percpu_charge_mutex);
1986
1987/*
1988 * Try to consume stocked charge on this cpu. If success, one page is consumed
1989 * from local stock and true is returned. If the stock is 0 or charges from a
1990 * cgroup which is not current target, returns false. This stock will be
1991 * refilled.
1992 */
1993static bool consume_stock(struct mem_cgroup *mem)
1994{
1995 struct memcg_stock_pcp *stock;
1996 bool ret = true;
1997
1998 stock = &get_cpu_var(memcg_stock);
1999 if (mem == stock->cached && stock->nr_pages)
2000 stock->nr_pages--;
2001 else /* need to call res_counter_charge */
2002 ret = false;
2003 put_cpu_var(memcg_stock);
2004 return ret;
2005}
2006
2007/*
2008 * Returns stocks cached in percpu to res_counter and reset cached information.
2009 */
2010static void drain_stock(struct memcg_stock_pcp *stock)
2011{
2012 struct mem_cgroup *old = stock->cached;
2013
2014 if (stock->nr_pages) {
2015 unsigned long bytes = stock->nr_pages * PAGE_SIZE;
2016
2017 res_counter_uncharge(&old->res, bytes);
2018 if (do_swap_account)
2019 res_counter_uncharge(&old->memsw, bytes);
2020 stock->nr_pages = 0;
2021 }
2022 stock->cached = NULL;
2023}
2024
2025/*
2026 * This must be called under preempt disabled or must be called by
2027 * a thread which is pinned to local cpu.
2028 */
2029static void drain_local_stock(struct work_struct *dummy)
2030{
2031 struct memcg_stock_pcp *stock = &__get_cpu_var(memcg_stock);
2032 drain_stock(stock);
2033 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
2034}
2035
2036/*
2037 * Cache charges(val) which is from res_counter, to local per_cpu area.
2038 * This will be consumed by consume_stock() function, later.
2039 */
2040static void refill_stock(struct mem_cgroup *mem, unsigned int nr_pages)
2041{
2042 struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock);
2043
2044 if (stock->cached != mem) { /* reset if necessary */
2045 drain_stock(stock);
2046 stock->cached = mem;
2047 }
2048 stock->nr_pages += nr_pages;
2049 put_cpu_var(memcg_stock);
2050}
2051
2052/*
2053 * Drains all per-CPU charge caches for given root_mem resp. subtree
2054 * of the hierarchy under it. sync flag says whether we should block
2055 * until the work is done.
2056 */
2057static void drain_all_stock(struct mem_cgroup *root_mem, bool sync)
2058{
2059 int cpu, curcpu;
2060
2061 /* Notify other cpus that system-wide "drain" is running */
2062 get_online_cpus();
2063 curcpu = get_cpu();
2064 for_each_online_cpu(cpu) {
2065 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
2066 struct mem_cgroup *mem;
2067
2068 mem = stock->cached;
2069 if (!mem || !stock->nr_pages)
2070 continue;
2071 if (!mem_cgroup_same_or_subtree(root_mem, mem))
2072 continue;
2073 if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
2074 if (cpu == curcpu)
2075 drain_local_stock(&stock->work);
2076 else
2077 schedule_work_on(cpu, &stock->work);
2078 }
2079 }
2080 put_cpu();
2081
2082 if (!sync)
2083 goto out;
2084
2085 for_each_online_cpu(cpu) {
2086 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
2087 if (test_bit(FLUSHING_CACHED_CHARGE, &stock->flags))
2088 flush_work(&stock->work);
2089 }
2090out:
2091 put_online_cpus();
2092}
2093
2094/*
2095 * Tries to drain stocked charges in other cpus. This function is asynchronous
2096 * and just put a work per cpu for draining localy on each cpu. Caller can
2097 * expects some charges will be back to res_counter later but cannot wait for
2098 * it.
2099 */
2100static void drain_all_stock_async(struct mem_cgroup *root_mem)
2101{
2102 /*
2103 * If someone calls draining, avoid adding more kworker runs.
2104 */
2105 if (!mutex_trylock(&percpu_charge_mutex))
2106 return;
2107 drain_all_stock(root_mem, false);
2108 mutex_unlock(&percpu_charge_mutex);
2109}
2110
2111/* This is a synchronous drain interface. */
2112static void drain_all_stock_sync(struct mem_cgroup *root_mem)
2113{
2114 /* called when force_empty is called */
2115 mutex_lock(&percpu_charge_mutex);
2116 drain_all_stock(root_mem, true);
2117 mutex_unlock(&percpu_charge_mutex);
2118}
2119
2120/*
2121 * This function drains percpu counter value from DEAD cpu and
2122 * move it to local cpu. Note that this function can be preempted.
2123 */
2124static void mem_cgroup_drain_pcp_counter(struct mem_cgroup *mem, int cpu)
2125{
2126 int i;
2127
2128 spin_lock(&mem->pcp_counter_lock);
2129 for (i = 0; i < MEM_CGROUP_STAT_DATA; i++) {
2130 long x = per_cpu(mem->stat->count[i], cpu);
2131
2132 per_cpu(mem->stat->count[i], cpu) = 0;
2133 mem->nocpu_base.count[i] += x;
2134 }
2135 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
2136 unsigned long x = per_cpu(mem->stat->events[i], cpu);
2137
2138 per_cpu(mem->stat->events[i], cpu) = 0;
2139 mem->nocpu_base.events[i] += x;
2140 }
2141 /* need to clear ON_MOVE value, works as a kind of lock. */
2142 per_cpu(mem->stat->count[MEM_CGROUP_ON_MOVE], cpu) = 0;
2143 spin_unlock(&mem->pcp_counter_lock);
2144}
2145
2146static void synchronize_mem_cgroup_on_move(struct mem_cgroup *mem, int cpu)
2147{
2148 int idx = MEM_CGROUP_ON_MOVE;
2149
2150 spin_lock(&mem->pcp_counter_lock);
2151 per_cpu(mem->stat->count[idx], cpu) = mem->nocpu_base.count[idx];
2152 spin_unlock(&mem->pcp_counter_lock);
2153}
2154
2155static int __cpuinit memcg_cpu_hotplug_callback(struct notifier_block *nb,
2156 unsigned long action,
2157 void *hcpu)
2158{
2159 int cpu = (unsigned long)hcpu;
2160 struct memcg_stock_pcp *stock;
2161 struct mem_cgroup *iter;
2162
2163 if ((action == CPU_ONLINE)) {
2164 for_each_mem_cgroup_all(iter)
2165 synchronize_mem_cgroup_on_move(iter, cpu);
2166 return NOTIFY_OK;
2167 }
2168
2169 if ((action != CPU_DEAD) || action != CPU_DEAD_FROZEN)
2170 return NOTIFY_OK;
2171
2172 for_each_mem_cgroup_all(iter)
2173 mem_cgroup_drain_pcp_counter(iter, cpu);
2174
2175 stock = &per_cpu(memcg_stock, cpu);
2176 drain_stock(stock);
2177 return NOTIFY_OK;
2178}
2179
2180
2181/* See __mem_cgroup_try_charge() for details */
2182enum {
2183 CHARGE_OK, /* success */
2184 CHARGE_RETRY, /* need to retry but retry is not bad */
2185 CHARGE_NOMEM, /* we can't do more. return -ENOMEM */
2186 CHARGE_WOULDBLOCK, /* GFP_WAIT wasn't set and no enough res. */
2187 CHARGE_OOM_DIE, /* the current is killed because of OOM */
2188};
2189
2190static int mem_cgroup_do_charge(struct mem_cgroup *mem, gfp_t gfp_mask,
2191 unsigned int nr_pages, bool oom_check)
2192{
2193 unsigned long csize = nr_pages * PAGE_SIZE;
2194 struct mem_cgroup *mem_over_limit;
2195 struct res_counter *fail_res;
2196 unsigned long flags = 0;
2197 int ret;
2198
2199 ret = res_counter_charge(&mem->res, csize, &fail_res);
2200
2201 if (likely(!ret)) {
2202 if (!do_swap_account)
2203 return CHARGE_OK;
2204 ret = res_counter_charge(&mem->memsw, csize, &fail_res);
2205 if (likely(!ret))
2206 return CHARGE_OK;
2207
2208 res_counter_uncharge(&mem->res, csize);
2209 mem_over_limit = mem_cgroup_from_res_counter(fail_res, memsw);
2210 flags |= MEM_CGROUP_RECLAIM_NOSWAP;
2211 } else
2212 mem_over_limit = mem_cgroup_from_res_counter(fail_res, res);
2213 /*
2214 * nr_pages can be either a huge page (HPAGE_PMD_NR), a batch
2215 * of regular pages (CHARGE_BATCH), or a single regular page (1).
2216 *
2217 * Never reclaim on behalf of optional batching, retry with a
2218 * single page instead.
2219 */
2220 if (nr_pages == CHARGE_BATCH)
2221 return CHARGE_RETRY;
2222
2223 if (!(gfp_mask & __GFP_WAIT))
2224 return CHARGE_WOULDBLOCK;
2225
2226 ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, NULL,
2227 gfp_mask, flags, NULL);
2228 if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
2229 return CHARGE_RETRY;
2230 /*
2231 * Even though the limit is exceeded at this point, reclaim
2232 * may have been able to free some pages. Retry the charge
2233 * before killing the task.
2234 *
2235 * Only for regular pages, though: huge pages are rather
2236 * unlikely to succeed so close to the limit, and we fall back
2237 * to regular pages anyway in case of failure.
2238 */
2239 if (nr_pages == 1 && ret)
2240 return CHARGE_RETRY;
2241
2242 /*
2243 * At task move, charge accounts can be doubly counted. So, it's
2244 * better to wait until the end of task_move if something is going on.
2245 */
2246 if (mem_cgroup_wait_acct_move(mem_over_limit))
2247 return CHARGE_RETRY;
2248
2249 /* If we don't need to call oom-killer at el, return immediately */
2250 if (!oom_check)
2251 return CHARGE_NOMEM;
2252 /* check OOM */
2253 if (!mem_cgroup_handle_oom(mem_over_limit, gfp_mask))
2254 return CHARGE_OOM_DIE;
2255
2256 return CHARGE_RETRY;
2257}
2258
2259/*
2260 * Unlike exported interface, "oom" parameter is added. if oom==true,
2261 * oom-killer can be invoked.
2262 */
2263static int __mem_cgroup_try_charge(struct mm_struct *mm,
2264 gfp_t gfp_mask,
2265 unsigned int nr_pages,
2266 struct mem_cgroup **memcg,
2267 bool oom)
2268{
2269 unsigned int batch = max(CHARGE_BATCH, nr_pages);
2270 int nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES;
2271 struct mem_cgroup *mem = NULL;
2272 int ret;
2273
2274 /*
2275 * Unlike gloval-vm's OOM-kill, we're not in memory shortage
2276 * in system level. So, allow to go ahead dying process in addition to
2277 * MEMDIE process.
2278 */
2279 if (unlikely(test_thread_flag(TIF_MEMDIE)
2280 || fatal_signal_pending(current)))
2281 goto bypass;
2282
2283 /*
2284 * We always charge the cgroup the mm_struct belongs to.
2285 * The mm_struct's mem_cgroup changes on task migration if the
2286 * thread group leader migrates. It's possible that mm is not
2287 * set, if so charge the init_mm (happens for pagecache usage).
2288 */
2289 if (!*memcg && !mm)
2290 goto bypass;
2291again:
2292 if (*memcg) { /* css should be a valid one */
2293 mem = *memcg;
2294 VM_BUG_ON(css_is_removed(&mem->css));
2295 if (mem_cgroup_is_root(mem))
2296 goto done;
2297 if (nr_pages == 1 && consume_stock(mem))
2298 goto done;
2299 css_get(&mem->css);
2300 } else {
2301 struct task_struct *p;
2302
2303 rcu_read_lock();
2304 p = rcu_dereference(mm->owner);
2305 /*
2306 * Because we don't have task_lock(), "p" can exit.
2307 * In that case, "mem" can point to root or p can be NULL with
2308 * race with swapoff. Then, we have small risk of mis-accouning.
2309 * But such kind of mis-account by race always happens because
2310 * we don't have cgroup_mutex(). It's overkill and we allo that
2311 * small race, here.
2312 * (*) swapoff at el will charge against mm-struct not against
2313 * task-struct. So, mm->owner can be NULL.
2314 */
2315 mem = mem_cgroup_from_task(p);
2316 if (!mem || mem_cgroup_is_root(mem)) {
2317 rcu_read_unlock();
2318 goto done;
2319 }
2320 if (nr_pages == 1 && consume_stock(mem)) {
2321 /*
2322 * It seems dagerous to access memcg without css_get().
2323 * But considering how consume_stok works, it's not
2324 * necessary. If consume_stock success, some charges
2325 * from this memcg are cached on this cpu. So, we
2326 * don't need to call css_get()/css_tryget() before
2327 * calling consume_stock().
2328 */
2329 rcu_read_unlock();
2330 goto done;
2331 }
2332 /* after here, we may be blocked. we need to get refcnt */
2333 if (!css_tryget(&mem->css)) {
2334 rcu_read_unlock();
2335 goto again;
2336 }
2337 rcu_read_unlock();
2338 }
2339
2340 do {
2341 bool oom_check;
2342
2343 /* If killed, bypass charge */
2344 if (fatal_signal_pending(current)) {
2345 css_put(&mem->css);
2346 goto bypass;
2347 }
2348
2349 oom_check = false;
2350 if (oom && !nr_oom_retries) {
2351 oom_check = true;
2352 nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES;
2353 }
2354
2355 ret = mem_cgroup_do_charge(mem, gfp_mask, batch, oom_check);
2356 switch (ret) {
2357 case CHARGE_OK:
2358 break;
2359 case CHARGE_RETRY: /* not in OOM situation but retry */
2360 batch = nr_pages;
2361 css_put(&mem->css);
2362 mem = NULL;
2363 goto again;
2364 case CHARGE_WOULDBLOCK: /* !__GFP_WAIT */
2365 css_put(&mem->css);
2366 goto nomem;
2367 case CHARGE_NOMEM: /* OOM routine works */
2368 if (!oom) {
2369 css_put(&mem->css);
2370 goto nomem;
2371 }
2372 /* If oom, we never return -ENOMEM */
2373 nr_oom_retries--;
2374 break;
2375 case CHARGE_OOM_DIE: /* Killed by OOM Killer */
2376 css_put(&mem->css);
2377 goto bypass;
2378 }
2379 } while (ret != CHARGE_OK);
2380
2381 if (batch > nr_pages)
2382 refill_stock(mem, batch - nr_pages);
2383 css_put(&mem->css);
2384done:
2385 *memcg = mem;
2386 return 0;
2387nomem:
2388 *memcg = NULL;
2389 return -ENOMEM;
2390bypass:
2391 *memcg = NULL;
2392 return 0;
2393}
2394
2395/*
2396 * Somemtimes we have to undo a charge we got by try_charge().
2397 * This function is for that and do uncharge, put css's refcnt.
2398 * gotten by try_charge().
2399 */
2400static void __mem_cgroup_cancel_charge(struct mem_cgroup *mem,
2401 unsigned int nr_pages)
2402{
2403 if (!mem_cgroup_is_root(mem)) {
2404 unsigned long bytes = nr_pages * PAGE_SIZE;
2405
2406 res_counter_uncharge(&mem->res, bytes);
2407 if (do_swap_account)
2408 res_counter_uncharge(&mem->memsw, bytes);
2409 }
2410}
2411
2412/*
2413 * A helper function to get mem_cgroup from ID. must be called under
2414 * rcu_read_lock(). The caller must check css_is_removed() or some if
2415 * it's concern. (dropping refcnt from swap can be called against removed
2416 * memcg.)
2417 */
2418static struct mem_cgroup *mem_cgroup_lookup(unsigned short id)
2419{
2420 struct cgroup_subsys_state *css;
2421
2422 /* ID 0 is unused ID */
2423 if (!id)
2424 return NULL;
2425 css = css_lookup(&mem_cgroup_subsys, id);
2426 if (!css)
2427 return NULL;
2428 return container_of(css, struct mem_cgroup, css);
2429}
2430
2431struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
2432{
2433 struct mem_cgroup *mem = NULL;
2434 struct page_cgroup *pc;
2435 unsigned short id;
2436 swp_entry_t ent;
2437
2438 VM_BUG_ON(!PageLocked(page));
2439
2440 pc = lookup_page_cgroup(page);
2441 lock_page_cgroup(pc);
2442 if (PageCgroupUsed(pc)) {
2443 mem = pc->mem_cgroup;
2444 if (mem && !css_tryget(&mem->css))
2445 mem = NULL;
2446 } else if (PageSwapCache(page)) {
2447 ent.val = page_private(page);
2448 id = lookup_swap_cgroup(ent);
2449 rcu_read_lock();
2450 mem = mem_cgroup_lookup(id);
2451 if (mem && !css_tryget(&mem->css))
2452 mem = NULL;
2453 rcu_read_unlock();
2454 }
2455 unlock_page_cgroup(pc);
2456 return mem;
2457}
2458
2459static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
2460 struct page *page,
2461 unsigned int nr_pages,
2462 struct page_cgroup *pc,
2463 enum charge_type ctype)
2464{
2465 lock_page_cgroup(pc);
2466 if (unlikely(PageCgroupUsed(pc))) {
2467 unlock_page_cgroup(pc);
2468 __mem_cgroup_cancel_charge(mem, nr_pages);
2469 return;
2470 }
2471 /*
2472 * we don't need page_cgroup_lock about tail pages, becase they are not
2473 * accessed by any other context at this point.
2474 */
2475 pc->mem_cgroup = mem;
2476 /*
2477 * We access a page_cgroup asynchronously without lock_page_cgroup().
2478 * Especially when a page_cgroup is taken from a page, pc->mem_cgroup
2479 * is accessed after testing USED bit. To make pc->mem_cgroup visible
2480 * before USED bit, we need memory barrier here.
2481 * See mem_cgroup_add_lru_list(), etc.
2482 */
2483 smp_wmb();
2484 switch (ctype) {
2485 case MEM_CGROUP_CHARGE_TYPE_CACHE:
2486 case MEM_CGROUP_CHARGE_TYPE_SHMEM:
2487 SetPageCgroupCache(pc);
2488 SetPageCgroupUsed(pc);
2489 break;
2490 case MEM_CGROUP_CHARGE_TYPE_MAPPED:
2491 ClearPageCgroupCache(pc);
2492 SetPageCgroupUsed(pc);
2493 break;
2494 default:
2495 break;
2496 }
2497
2498 mem_cgroup_charge_statistics(mem, PageCgroupCache(pc), nr_pages);
2499 unlock_page_cgroup(pc);
2500 /*
2501 * "charge_statistics" updated event counter. Then, check it.
2502 * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
2503 * if they exceeds softlimit.
2504 */
2505 memcg_check_events(mem, page);
2506}
2507
2508#ifdef CONFIG_TRANSPARENT_HUGEPAGE
2509
2510#define PCGF_NOCOPY_AT_SPLIT ((1 << PCG_LOCK) | (1 << PCG_MOVE_LOCK) |\
2511 (1 << PCG_ACCT_LRU) | (1 << PCG_MIGRATION))
2512/*
2513 * Because tail pages are not marked as "used", set it. We're under
2514 * zone->lru_lock, 'splitting on pmd' and compund_lock.
2515 */
2516void mem_cgroup_split_huge_fixup(struct page *head, struct page *tail)
2517{
2518 struct page_cgroup *head_pc = lookup_page_cgroup(head);
2519 struct page_cgroup *tail_pc = lookup_page_cgroup(tail);
2520 unsigned long flags;
2521
2522 if (mem_cgroup_disabled())
2523 return;
2524 /*
2525 * We have no races with charge/uncharge but will have races with
2526 * page state accounting.
2527 */
2528 move_lock_page_cgroup(head_pc, &flags);
2529
2530 tail_pc->mem_cgroup = head_pc->mem_cgroup;
2531 smp_wmb(); /* see __commit_charge() */
2532 if (PageCgroupAcctLRU(head_pc)) {
2533 enum lru_list lru;
2534 struct mem_cgroup_per_zone *mz;
2535
2536 /*
2537 * LRU flags cannot be copied because we need to add tail
2538 *.page to LRU by generic call and our hook will be called.
2539 * We hold lru_lock, then, reduce counter directly.
2540 */
2541 lru = page_lru(head);
2542 mz = page_cgroup_zoneinfo(head_pc->mem_cgroup, head);
2543 MEM_CGROUP_ZSTAT(mz, lru) -= 1;
2544 }
2545 tail_pc->flags = head_pc->flags & ~PCGF_NOCOPY_AT_SPLIT;
2546 move_unlock_page_cgroup(head_pc, &flags);
2547}
2548#endif
2549
2550/**
2551 * mem_cgroup_move_account - move account of the page
2552 * @page: the page
2553 * @nr_pages: number of regular pages (>1 for huge pages)
2554 * @pc: page_cgroup of the page.
2555 * @from: mem_cgroup which the page is moved from.
2556 * @to: mem_cgroup which the page is moved to. @from != @to.
2557 * @uncharge: whether we should call uncharge and css_put against @from.
2558 *
2559 * The caller must confirm following.
2560 * - page is not on LRU (isolate_page() is useful.)
2561 * - compound_lock is held when nr_pages > 1
2562 *
2563 * This function doesn't do "charge" nor css_get to new cgroup. It should be
2564 * done by a caller(__mem_cgroup_try_charge would be useful). If @uncharge is
2565 * true, this function does "uncharge" from old cgroup, but it doesn't if
2566 * @uncharge is false, so a caller should do "uncharge".
2567 */
2568static int mem_cgroup_move_account(struct page *page,
2569 unsigned int nr_pages,
2570 struct page_cgroup *pc,
2571 struct mem_cgroup *from,
2572 struct mem_cgroup *to,
2573 bool uncharge)
2574{
2575 unsigned long flags;
2576 int ret;
2577
2578 VM_BUG_ON(from == to);
2579 VM_BUG_ON(PageLRU(page));
2580 /*
2581 * The page is isolated from LRU. So, collapse function
2582 * will not handle this page. But page splitting can happen.
2583 * Do this check under compound_page_lock(). The caller should
2584 * hold it.
2585 */
2586 ret = -EBUSY;
2587 if (nr_pages > 1 && !PageTransHuge(page))
2588 goto out;
2589
2590 lock_page_cgroup(pc);
2591
2592 ret = -EINVAL;
2593 if (!PageCgroupUsed(pc) || pc->mem_cgroup != from)
2594 goto unlock;
2595
2596 move_lock_page_cgroup(pc, &flags);
2597
2598 if (PageCgroupFileMapped(pc)) {
2599 /* Update mapped_file data for mem_cgroup */
2600 preempt_disable();
2601 __this_cpu_dec(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
2602 __this_cpu_inc(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
2603 preempt_enable();
2604 }
2605 mem_cgroup_charge_statistics(from, PageCgroupCache(pc), -nr_pages);
2606 if (uncharge)
2607 /* This is not "cancel", but cancel_charge does all we need. */
2608 __mem_cgroup_cancel_charge(from, nr_pages);
2609
2610 /* caller should have done css_get */
2611 pc->mem_cgroup = to;
2612 mem_cgroup_charge_statistics(to, PageCgroupCache(pc), nr_pages);
2613 /*
2614 * We charges against "to" which may not have any tasks. Then, "to"
2615 * can be under rmdir(). But in current implementation, caller of
2616 * this function is just force_empty() and move charge, so it's
2617 * guaranteed that "to" is never removed. So, we don't check rmdir
2618 * status here.
2619 */
2620 move_unlock_page_cgroup(pc, &flags);
2621 ret = 0;
2622unlock:
2623 unlock_page_cgroup(pc);
2624 /*
2625 * check events
2626 */
2627 memcg_check_events(to, page);
2628 memcg_check_events(from, page);
2629out:
2630 return ret;
2631}
2632
2633/*
2634 * move charges to its parent.
2635 */
2636
2637static int mem_cgroup_move_parent(struct page *page,
2638 struct page_cgroup *pc,
2639 struct mem_cgroup *child,
2640 gfp_t gfp_mask)
2641{
2642 struct cgroup *cg = child->css.cgroup;
2643 struct cgroup *pcg = cg->parent;
2644 struct mem_cgroup *parent;
2645 unsigned int nr_pages;
2646 unsigned long uninitialized_var(flags);
2647 int ret;
2648
2649 /* Is ROOT ? */
2650 if (!pcg)
2651 return -EINVAL;
2652
2653 ret = -EBUSY;
2654 if (!get_page_unless_zero(page))
2655 goto out;
2656 if (isolate_lru_page(page))
2657 goto put;
2658
2659 nr_pages = hpage_nr_pages(page);
2660
2661 parent = mem_cgroup_from_cont(pcg);
2662 ret = __mem_cgroup_try_charge(NULL, gfp_mask, nr_pages, &parent, false);
2663 if (ret || !parent)
2664 goto put_back;
2665
2666 if (nr_pages > 1)
2667 flags = compound_lock_irqsave(page);
2668
2669 ret = mem_cgroup_move_account(page, nr_pages, pc, child, parent, true);
2670 if (ret)
2671 __mem_cgroup_cancel_charge(parent, nr_pages);
2672
2673 if (nr_pages > 1)
2674 compound_unlock_irqrestore(page, flags);
2675put_back:
2676 putback_lru_page(page);
2677put:
2678 put_page(page);
2679out:
2680 return ret;
2681}
2682
2683/*
2684 * Charge the memory controller for page usage.
2685 * Return
2686 * 0 if the charge was successful
2687 * < 0 if the cgroup is over its limit
2688 */
2689static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
2690 gfp_t gfp_mask, enum charge_type ctype)
2691{
2692 struct mem_cgroup *mem = NULL;
2693 unsigned int nr_pages = 1;
2694 struct page_cgroup *pc;
2695 bool oom = true;
2696 int ret;
2697
2698 if (PageTransHuge(page)) {
2699 nr_pages <<= compound_order(page);
2700 VM_BUG_ON(!PageTransHuge(page));
2701 /*
2702 * Never OOM-kill a process for a huge page. The
2703 * fault handler will fall back to regular pages.
2704 */
2705 oom = false;
2706 }
2707
2708 pc = lookup_page_cgroup(page);
2709 BUG_ON(!pc); /* XXX: remove this and move pc lookup into commit */
2710
2711 ret = __mem_cgroup_try_charge(mm, gfp_mask, nr_pages, &mem, oom);
2712 if (ret || !mem)
2713 return ret;
2714
2715 __mem_cgroup_commit_charge(mem, page, nr_pages, pc, ctype);
2716 return 0;
2717}
2718
2719int mem_cgroup_newpage_charge(struct page *page,
2720 struct mm_struct *mm, gfp_t gfp_mask)
2721{
2722 if (mem_cgroup_disabled())
2723 return 0;
2724 /*
2725 * If already mapped, we don't have to account.
2726 * If page cache, page->mapping has address_space.
2727 * But page->mapping may have out-of-use anon_vma pointer,
2728 * detecit it by PageAnon() check. newly-mapped-anon's page->mapping
2729 * is NULL.
2730 */
2731 if (page_mapped(page) || (page->mapping && !PageAnon(page)))
2732 return 0;
2733 if (unlikely(!mm))
2734 mm = &init_mm;
2735 return mem_cgroup_charge_common(page, mm, gfp_mask,
2736 MEM_CGROUP_CHARGE_TYPE_MAPPED);
2737}
2738
2739static void
2740__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
2741 enum charge_type ctype);
2742
2743static void
2744__mem_cgroup_commit_charge_lrucare(struct page *page, struct mem_cgroup *mem,
2745 enum charge_type ctype)
2746{
2747 struct page_cgroup *pc = lookup_page_cgroup(page);
2748 /*
2749 * In some case, SwapCache, FUSE(splice_buf->radixtree), the page
2750 * is already on LRU. It means the page may on some other page_cgroup's
2751 * LRU. Take care of it.
2752 */
2753 mem_cgroup_lru_del_before_commit(page);
2754 __mem_cgroup_commit_charge(mem, page, 1, pc, ctype);
2755 mem_cgroup_lru_add_after_commit(page);
2756 return;
2757}
2758
2759int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
2760 gfp_t gfp_mask)
2761{
2762 struct mem_cgroup *mem = NULL;
2763 int ret;
2764
2765 if (mem_cgroup_disabled())
2766 return 0;
2767 if (PageCompound(page))
2768 return 0;
2769
2770 if (unlikely(!mm))
2771 mm = &init_mm;
2772
2773 if (page_is_file_cache(page)) {
2774 ret = __mem_cgroup_try_charge(mm, gfp_mask, 1, &mem, true);
2775 if (ret || !mem)
2776 return ret;
2777
2778 /*
2779 * FUSE reuses pages without going through the final
2780 * put that would remove them from the LRU list, make
2781 * sure that they get relinked properly.
2782 */
2783 __mem_cgroup_commit_charge_lrucare(page, mem,
2784 MEM_CGROUP_CHARGE_TYPE_CACHE);
2785 return ret;
2786 }
2787 /* shmem */
2788 if (PageSwapCache(page)) {
2789 ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &mem);
2790 if (!ret)
2791 __mem_cgroup_commit_charge_swapin(page, mem,
2792 MEM_CGROUP_CHARGE_TYPE_SHMEM);
2793 } else
2794 ret = mem_cgroup_charge_common(page, mm, gfp_mask,
2795 MEM_CGROUP_CHARGE_TYPE_SHMEM);
2796
2797 return ret;
2798}
2799
2800/*
2801 * While swap-in, try_charge -> commit or cancel, the page is locked.
2802 * And when try_charge() successfully returns, one refcnt to memcg without
2803 * struct page_cgroup is acquired. This refcnt will be consumed by
2804 * "commit()" or removed by "cancel()"
2805 */
2806int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
2807 struct page *page,
2808 gfp_t mask, struct mem_cgroup **ptr)
2809{
2810 struct mem_cgroup *mem;
2811 int ret;
2812
2813 *ptr = NULL;
2814
2815 if (mem_cgroup_disabled())
2816 return 0;
2817
2818 if (!do_swap_account)
2819 goto charge_cur_mm;
2820 /*
2821 * A racing thread's fault, or swapoff, may have already updated
2822 * the pte, and even removed page from swap cache: in those cases
2823 * do_swap_page()'s pte_same() test will fail; but there's also a
2824 * KSM case which does need to charge the page.
2825 */
2826 if (!PageSwapCache(page))
2827 goto charge_cur_mm;
2828 mem = try_get_mem_cgroup_from_page(page);
2829 if (!mem)
2830 goto charge_cur_mm;
2831 *ptr = mem;
2832 ret = __mem_cgroup_try_charge(NULL, mask, 1, ptr, true);
2833 css_put(&mem->css);
2834 return ret;
2835charge_cur_mm:
2836 if (unlikely(!mm))
2837 mm = &init_mm;
2838 return __mem_cgroup_try_charge(mm, mask, 1, ptr, true);
2839}
2840
2841static void
2842__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
2843 enum charge_type ctype)
2844{
2845 if (mem_cgroup_disabled())
2846 return;
2847 if (!ptr)
2848 return;
2849 cgroup_exclude_rmdir(&ptr->css);
2850
2851 __mem_cgroup_commit_charge_lrucare(page, ptr, ctype);
2852 /*
2853 * Now swap is on-memory. This means this page may be
2854 * counted both as mem and swap....double count.
2855 * Fix it by uncharging from memsw. Basically, this SwapCache is stable
2856 * under lock_page(). But in do_swap_page()::memory.c, reuse_swap_page()
2857 * may call delete_from_swap_cache() before reach here.
2858 */
2859 if (do_swap_account && PageSwapCache(page)) {
2860 swp_entry_t ent = {.val = page_private(page)};
2861 unsigned short id;
2862 struct mem_cgroup *memcg;
2863
2864 id = swap_cgroup_record(ent, 0);
2865 rcu_read_lock();
2866 memcg = mem_cgroup_lookup(id);
2867 if (memcg) {
2868 /*
2869 * This recorded memcg can be obsolete one. So, avoid
2870 * calling css_tryget
2871 */
2872 if (!mem_cgroup_is_root(memcg))
2873 res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
2874 mem_cgroup_swap_statistics(memcg, false);
2875 mem_cgroup_put(memcg);
2876 }
2877 rcu_read_unlock();
2878 }
2879 /*
2880 * At swapin, we may charge account against cgroup which has no tasks.
2881 * So, rmdir()->pre_destroy() can be called while we do this charge.
2882 * In that case, we need to call pre_destroy() again. check it here.
2883 */
2884 cgroup_release_and_wakeup_rmdir(&ptr->css);
2885}
2886
2887void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
2888{
2889 __mem_cgroup_commit_charge_swapin(page, ptr,
2890 MEM_CGROUP_CHARGE_TYPE_MAPPED);
2891}
2892
2893void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem)
2894{
2895 if (mem_cgroup_disabled())
2896 return;
2897 if (!mem)
2898 return;
2899 __mem_cgroup_cancel_charge(mem, 1);
2900}
2901
2902static void mem_cgroup_do_uncharge(struct mem_cgroup *mem,
2903 unsigned int nr_pages,
2904 const enum charge_type ctype)
2905{
2906 struct memcg_batch_info *batch = NULL;
2907 bool uncharge_memsw = true;
2908
2909 /* If swapout, usage of swap doesn't decrease */
2910 if (!do_swap_account || ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
2911 uncharge_memsw = false;
2912
2913 batch = ¤t->memcg_batch;
2914 /*
2915 * In usual, we do css_get() when we remember memcg pointer.
2916 * But in this case, we keep res->usage until end of a series of
2917 * uncharges. Then, it's ok to ignore memcg's refcnt.
2918 */
2919 if (!batch->memcg)
2920 batch->memcg = mem;
2921 /*
2922 * do_batch > 0 when unmapping pages or inode invalidate/truncate.
2923 * In those cases, all pages freed continuously can be expected to be in
2924 * the same cgroup and we have chance to coalesce uncharges.
2925 * But we do uncharge one by one if this is killed by OOM(TIF_MEMDIE)
2926 * because we want to do uncharge as soon as possible.
2927 */
2928
2929 if (!batch->do_batch || test_thread_flag(TIF_MEMDIE))
2930 goto direct_uncharge;
2931
2932 if (nr_pages > 1)
2933 goto direct_uncharge;
2934
2935 /*
2936 * In typical case, batch->memcg == mem. This means we can
2937 * merge a series of uncharges to an uncharge of res_counter.
2938 * If not, we uncharge res_counter ony by one.
2939 */
2940 if (batch->memcg != mem)
2941 goto direct_uncharge;
2942 /* remember freed charge and uncharge it later */
2943 batch->nr_pages++;
2944 if (uncharge_memsw)
2945 batch->memsw_nr_pages++;
2946 return;
2947direct_uncharge:
2948 res_counter_uncharge(&mem->res, nr_pages * PAGE_SIZE);
2949 if (uncharge_memsw)
2950 res_counter_uncharge(&mem->memsw, nr_pages * PAGE_SIZE);
2951 if (unlikely(batch->memcg != mem))
2952 memcg_oom_recover(mem);
2953 return;
2954}
2955
2956/*
2957 * uncharge if !page_mapped(page)
2958 */
2959static struct mem_cgroup *
2960__mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
2961{
2962 struct mem_cgroup *mem = NULL;
2963 unsigned int nr_pages = 1;
2964 struct page_cgroup *pc;
2965
2966 if (mem_cgroup_disabled())
2967 return NULL;
2968
2969 if (PageSwapCache(page))
2970 return NULL;
2971
2972 if (PageTransHuge(page)) {
2973 nr_pages <<= compound_order(page);
2974 VM_BUG_ON(!PageTransHuge(page));
2975 }
2976 /*
2977 * Check if our page_cgroup is valid
2978 */
2979 pc = lookup_page_cgroup(page);
2980 if (unlikely(!pc || !PageCgroupUsed(pc)))
2981 return NULL;
2982
2983 lock_page_cgroup(pc);
2984
2985 mem = pc->mem_cgroup;
2986
2987 if (!PageCgroupUsed(pc))
2988 goto unlock_out;
2989
2990 switch (ctype) {
2991 case MEM_CGROUP_CHARGE_TYPE_MAPPED:
2992 case MEM_CGROUP_CHARGE_TYPE_DROP:
2993 /* See mem_cgroup_prepare_migration() */
2994 if (page_mapped(page) || PageCgroupMigration(pc))
2995 goto unlock_out;
2996 break;
2997 case MEM_CGROUP_CHARGE_TYPE_SWAPOUT:
2998 if (!PageAnon(page)) { /* Shared memory */
2999 if (page->mapping && !page_is_file_cache(page))
3000 goto unlock_out;
3001 } else if (page_mapped(page)) /* Anon */
3002 goto unlock_out;
3003 break;
3004 default:
3005 break;
3006 }
3007
3008 mem_cgroup_charge_statistics(mem, PageCgroupCache(pc), -nr_pages);
3009
3010 ClearPageCgroupUsed(pc);
3011 /*
3012 * pc->mem_cgroup is not cleared here. It will be accessed when it's
3013 * freed from LRU. This is safe because uncharged page is expected not
3014 * to be reused (freed soon). Exception is SwapCache, it's handled by
3015 * special functions.
3016 */
3017
3018 unlock_page_cgroup(pc);
3019 /*
3020 * even after unlock, we have mem->res.usage here and this memcg
3021 * will never be freed.
3022 */
3023 memcg_check_events(mem, page);
3024 if (do_swap_account && ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT) {
3025 mem_cgroup_swap_statistics(mem, true);
3026 mem_cgroup_get(mem);
3027 }
3028 if (!mem_cgroup_is_root(mem))
3029 mem_cgroup_do_uncharge(mem, nr_pages, ctype);
3030
3031 return mem;
3032
3033unlock_out:
3034 unlock_page_cgroup(pc);
3035 return NULL;
3036}
3037
3038void mem_cgroup_uncharge_page(struct page *page)
3039{
3040 /* early check. */
3041 if (page_mapped(page))
3042 return;
3043 if (page->mapping && !PageAnon(page))
3044 return;
3045 __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_MAPPED);
3046}
3047
3048void mem_cgroup_uncharge_cache_page(struct page *page)
3049{
3050 VM_BUG_ON(page_mapped(page));
3051 VM_BUG_ON(page->mapping);
3052 __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE);
3053}
3054
3055/*
3056 * Batch_start/batch_end is called in unmap_page_range/invlidate/trucate.
3057 * In that cases, pages are freed continuously and we can expect pages
3058 * are in the same memcg. All these calls itself limits the number of
3059 * pages freed at once, then uncharge_start/end() is called properly.
3060 * This may be called prural(2) times in a context,
3061 */
3062
3063void mem_cgroup_uncharge_start(void)
3064{
3065 current->memcg_batch.do_batch++;
3066 /* We can do nest. */
3067 if (current->memcg_batch.do_batch == 1) {
3068 current->memcg_batch.memcg = NULL;
3069 current->memcg_batch.nr_pages = 0;
3070 current->memcg_batch.memsw_nr_pages = 0;
3071 }
3072}
3073
3074void mem_cgroup_uncharge_end(void)
3075{
3076 struct memcg_batch_info *batch = ¤t->memcg_batch;
3077
3078 if (!batch->do_batch)
3079 return;
3080
3081 batch->do_batch--;
3082 if (batch->do_batch) /* If stacked, do nothing. */
3083 return;
3084
3085 if (!batch->memcg)
3086 return;
3087 /*
3088 * This "batch->memcg" is valid without any css_get/put etc...
3089 * bacause we hide charges behind us.
3090 */
3091 if (batch->nr_pages)
3092 res_counter_uncharge(&batch->memcg->res,
3093 batch->nr_pages * PAGE_SIZE);
3094 if (batch->memsw_nr_pages)
3095 res_counter_uncharge(&batch->memcg->memsw,
3096 batch->memsw_nr_pages * PAGE_SIZE);
3097 memcg_oom_recover(batch->memcg);
3098 /* forget this pointer (for sanity check) */
3099 batch->memcg = NULL;
3100}
3101
3102#ifdef CONFIG_SWAP
3103/*
3104 * called after __delete_from_swap_cache() and drop "page" account.
3105 * memcg information is recorded to swap_cgroup of "ent"
3106 */
3107void
3108mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout)
3109{
3110 struct mem_cgroup *memcg;
3111 int ctype = MEM_CGROUP_CHARGE_TYPE_SWAPOUT;
3112
3113 if (!swapout) /* this was a swap cache but the swap is unused ! */
3114 ctype = MEM_CGROUP_CHARGE_TYPE_DROP;
3115
3116 memcg = __mem_cgroup_uncharge_common(page, ctype);
3117
3118 /*
3119 * record memcg information, if swapout && memcg != NULL,
3120 * mem_cgroup_get() was called in uncharge().
3121 */
3122 if (do_swap_account && swapout && memcg)
3123 swap_cgroup_record(ent, css_id(&memcg->css));
3124}
3125#endif
3126
3127#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
3128/*
3129 * called from swap_entry_free(). remove record in swap_cgroup and
3130 * uncharge "memsw" account.
3131 */
3132void mem_cgroup_uncharge_swap(swp_entry_t ent)
3133{
3134 struct mem_cgroup *memcg;
3135 unsigned short id;
3136
3137 if (!do_swap_account)
3138 return;
3139
3140 id = swap_cgroup_record(ent, 0);
3141 rcu_read_lock();
3142 memcg = mem_cgroup_lookup(id);
3143 if (memcg) {
3144 /*
3145 * We uncharge this because swap is freed.
3146 * This memcg can be obsolete one. We avoid calling css_tryget
3147 */
3148 if (!mem_cgroup_is_root(memcg))
3149 res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
3150 mem_cgroup_swap_statistics(memcg, false);
3151 mem_cgroup_put(memcg);
3152 }
3153 rcu_read_unlock();
3154}
3155
3156/**
3157 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
3158 * @entry: swap entry to be moved
3159 * @from: mem_cgroup which the entry is moved from
3160 * @to: mem_cgroup which the entry is moved to
3161 * @need_fixup: whether we should fixup res_counters and refcounts.
3162 *
3163 * It succeeds only when the swap_cgroup's record for this entry is the same
3164 * as the mem_cgroup's id of @from.
3165 *
3166 * Returns 0 on success, -EINVAL on failure.
3167 *
3168 * The caller must have charged to @to, IOW, called res_counter_charge() about
3169 * both res and memsw, and called css_get().
3170 */
3171static int mem_cgroup_move_swap_account(swp_entry_t entry,
3172 struct mem_cgroup *from, struct mem_cgroup *to, bool need_fixup)
3173{
3174 unsigned short old_id, new_id;
3175
3176 old_id = css_id(&from->css);
3177 new_id = css_id(&to->css);
3178
3179 if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
3180 mem_cgroup_swap_statistics(from, false);
3181 mem_cgroup_swap_statistics(to, true);
3182 /*
3183 * This function is only called from task migration context now.
3184 * It postpones res_counter and refcount handling till the end
3185 * of task migration(mem_cgroup_clear_mc()) for performance
3186 * improvement. But we cannot postpone mem_cgroup_get(to)
3187 * because if the process that has been moved to @to does
3188 * swap-in, the refcount of @to might be decreased to 0.
3189 */
3190 mem_cgroup_get(to);
3191 if (need_fixup) {
3192 if (!mem_cgroup_is_root(from))
3193 res_counter_uncharge(&from->memsw, PAGE_SIZE);
3194 mem_cgroup_put(from);
3195 /*
3196 * we charged both to->res and to->memsw, so we should
3197 * uncharge to->res.
3198 */
3199 if (!mem_cgroup_is_root(to))
3200 res_counter_uncharge(&to->res, PAGE_SIZE);
3201 }
3202 return 0;
3203 }
3204 return -EINVAL;
3205}
3206#else
3207static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
3208 struct mem_cgroup *from, struct mem_cgroup *to, bool need_fixup)
3209{
3210 return -EINVAL;
3211}
3212#endif
3213
3214/*
3215 * Before starting migration, account PAGE_SIZE to mem_cgroup that the old
3216 * page belongs to.
3217 */
3218int mem_cgroup_prepare_migration(struct page *page,
3219 struct page *newpage, struct mem_cgroup **ptr, gfp_t gfp_mask)
3220{
3221 struct mem_cgroup *mem = NULL;
3222 struct page_cgroup *pc;
3223 enum charge_type ctype;
3224 int ret = 0;
3225
3226 *ptr = NULL;
3227
3228 VM_BUG_ON(PageTransHuge(page));
3229 if (mem_cgroup_disabled())
3230 return 0;
3231
3232 pc = lookup_page_cgroup(page);
3233 lock_page_cgroup(pc);
3234 if (PageCgroupUsed(pc)) {
3235 mem = pc->mem_cgroup;
3236 css_get(&mem->css);
3237 /*
3238 * At migrating an anonymous page, its mapcount goes down
3239 * to 0 and uncharge() will be called. But, even if it's fully
3240 * unmapped, migration may fail and this page has to be
3241 * charged again. We set MIGRATION flag here and delay uncharge
3242 * until end_migration() is called
3243 *
3244 * Corner Case Thinking
3245 * A)
3246 * When the old page was mapped as Anon and it's unmap-and-freed
3247 * while migration was ongoing.
3248 * If unmap finds the old page, uncharge() of it will be delayed
3249 * until end_migration(). If unmap finds a new page, it's
3250 * uncharged when it make mapcount to be 1->0. If unmap code
3251 * finds swap_migration_entry, the new page will not be mapped
3252 * and end_migration() will find it(mapcount==0).
3253 *
3254 * B)
3255 * When the old page was mapped but migraion fails, the kernel
3256 * remaps it. A charge for it is kept by MIGRATION flag even
3257 * if mapcount goes down to 0. We can do remap successfully
3258 * without charging it again.
3259 *
3260 * C)
3261 * The "old" page is under lock_page() until the end of
3262 * migration, so, the old page itself will not be swapped-out.
3263 * If the new page is swapped out before end_migraton, our
3264 * hook to usual swap-out path will catch the event.
3265 */
3266 if (PageAnon(page))
3267 SetPageCgroupMigration(pc);
3268 }
3269 unlock_page_cgroup(pc);
3270 /*
3271 * If the page is not charged at this point,
3272 * we return here.
3273 */
3274 if (!mem)
3275 return 0;
3276
3277 *ptr = mem;
3278 ret = __mem_cgroup_try_charge(NULL, gfp_mask, 1, ptr, false);
3279 css_put(&mem->css);/* drop extra refcnt */
3280 if (ret || *ptr == NULL) {
3281 if (PageAnon(page)) {
3282 lock_page_cgroup(pc);
3283 ClearPageCgroupMigration(pc);
3284 unlock_page_cgroup(pc);
3285 /*
3286 * The old page may be fully unmapped while we kept it.
3287 */
3288 mem_cgroup_uncharge_page(page);
3289 }
3290 return -ENOMEM;
3291 }
3292 /*
3293 * We charge new page before it's used/mapped. So, even if unlock_page()
3294 * is called before end_migration, we can catch all events on this new
3295 * page. In the case new page is migrated but not remapped, new page's
3296 * mapcount will be finally 0 and we call uncharge in end_migration().
3297 */
3298 pc = lookup_page_cgroup(newpage);
3299 if (PageAnon(page))
3300 ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED;
3301 else if (page_is_file_cache(page))
3302 ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
3303 else
3304 ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
3305 __mem_cgroup_commit_charge(mem, page, 1, pc, ctype);
3306 return ret;
3307}
3308
3309/* remove redundant charge if migration failed*/
3310void mem_cgroup_end_migration(struct mem_cgroup *mem,
3311 struct page *oldpage, struct page *newpage, bool migration_ok)
3312{
3313 struct page *used, *unused;
3314 struct page_cgroup *pc;
3315
3316 if (!mem)
3317 return;
3318 /* blocks rmdir() */
3319 cgroup_exclude_rmdir(&mem->css);
3320 if (!migration_ok) {
3321 used = oldpage;
3322 unused = newpage;
3323 } else {
3324 used = newpage;
3325 unused = oldpage;
3326 }
3327 /*
3328 * We disallowed uncharge of pages under migration because mapcount
3329 * of the page goes down to zero, temporarly.
3330 * Clear the flag and check the page should be charged.
3331 */
3332 pc = lookup_page_cgroup(oldpage);
3333 lock_page_cgroup(pc);
3334 ClearPageCgroupMigration(pc);
3335 unlock_page_cgroup(pc);
3336
3337 __mem_cgroup_uncharge_common(unused, MEM_CGROUP_CHARGE_TYPE_FORCE);
3338
3339 /*
3340 * If a page is a file cache, radix-tree replacement is very atomic
3341 * and we can skip this check. When it was an Anon page, its mapcount
3342 * goes down to 0. But because we added MIGRATION flage, it's not
3343 * uncharged yet. There are several case but page->mapcount check
3344 * and USED bit check in mem_cgroup_uncharge_page() will do enough
3345 * check. (see prepare_charge() also)
3346 */
3347 if (PageAnon(used))
3348 mem_cgroup_uncharge_page(used);
3349 /*
3350 * At migration, we may charge account against cgroup which has no
3351 * tasks.
3352 * So, rmdir()->pre_destroy() can be called while we do this charge.
3353 * In that case, we need to call pre_destroy() again. check it here.
3354 */
3355 cgroup_release_and_wakeup_rmdir(&mem->css);
3356}
3357
3358#ifdef CONFIG_DEBUG_VM
3359static struct page_cgroup *lookup_page_cgroup_used(struct page *page)
3360{
3361 struct page_cgroup *pc;
3362
3363 pc = lookup_page_cgroup(page);
3364 if (likely(pc) && PageCgroupUsed(pc))
3365 return pc;
3366 return NULL;
3367}
3368
3369bool mem_cgroup_bad_page_check(struct page *page)
3370{
3371 if (mem_cgroup_disabled())
3372 return false;
3373
3374 return lookup_page_cgroup_used(page) != NULL;
3375}
3376
3377void mem_cgroup_print_bad_page(struct page *page)
3378{
3379 struct page_cgroup *pc;
3380
3381 pc = lookup_page_cgroup_used(page);
3382 if (pc) {
3383 int ret = -1;
3384 char *path;
3385
3386 printk(KERN_ALERT "pc:%p pc->flags:%lx pc->mem_cgroup:%p",
3387 pc, pc->flags, pc->mem_cgroup);
3388
3389 path = kmalloc(PATH_MAX, GFP_KERNEL);
3390 if (path) {
3391 rcu_read_lock();
3392 ret = cgroup_path(pc->mem_cgroup->css.cgroup,
3393 path, PATH_MAX);
3394 rcu_read_unlock();
3395 }
3396
3397 printk(KERN_CONT "(%s)\n",
3398 (ret < 0) ? "cannot get the path" : path);
3399 kfree(path);
3400 }
3401}
3402#endif
3403
3404static DEFINE_MUTEX(set_limit_mutex);
3405
3406static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
3407 unsigned long long val)
3408{
3409 int retry_count;
3410 u64 memswlimit, memlimit;
3411 int ret = 0;
3412 int children = mem_cgroup_count_children(memcg);
3413 u64 curusage, oldusage;
3414 int enlarge;
3415
3416 /*
3417 * For keeping hierarchical_reclaim simple, how long we should retry
3418 * is depends on callers. We set our retry-count to be function
3419 * of # of children which we should visit in this loop.
3420 */
3421 retry_count = MEM_CGROUP_RECLAIM_RETRIES * children;
3422
3423 oldusage = res_counter_read_u64(&memcg->res, RES_USAGE);
3424
3425 enlarge = 0;
3426 while (retry_count) {
3427 if (signal_pending(current)) {
3428 ret = -EINTR;
3429 break;
3430 }
3431 /*
3432 * Rather than hide all in some function, I do this in
3433 * open coded manner. You see what this really does.
3434 * We have to guarantee mem->res.limit < mem->memsw.limit.
3435 */
3436 mutex_lock(&set_limit_mutex);
3437 memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
3438 if (memswlimit < val) {
3439 ret = -EINVAL;
3440 mutex_unlock(&set_limit_mutex);
3441 break;
3442 }
3443
3444 memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
3445 if (memlimit < val)
3446 enlarge = 1;
3447
3448 ret = res_counter_set_limit(&memcg->res, val);
3449 if (!ret) {
3450 if (memswlimit == val)
3451 memcg->memsw_is_minimum = true;
3452 else
3453 memcg->memsw_is_minimum = false;
3454 }
3455 mutex_unlock(&set_limit_mutex);
3456
3457 if (!ret)
3458 break;
3459
3460 mem_cgroup_hierarchical_reclaim(memcg, NULL, GFP_KERNEL,
3461 MEM_CGROUP_RECLAIM_SHRINK,
3462 NULL);
3463 curusage = res_counter_read_u64(&memcg->res, RES_USAGE);
3464 /* Usage is reduced ? */
3465 if (curusage >= oldusage)
3466 retry_count--;
3467 else
3468 oldusage = curusage;
3469 }
3470 if (!ret && enlarge)
3471 memcg_oom_recover(memcg);
3472
3473 return ret;
3474}
3475
3476static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
3477 unsigned long long val)
3478{
3479 int retry_count;
3480 u64 memlimit, memswlimit, oldusage, curusage;
3481 int children = mem_cgroup_count_children(memcg);
3482 int ret = -EBUSY;
3483 int enlarge = 0;
3484
3485 /* see mem_cgroup_resize_res_limit */
3486 retry_count = children * MEM_CGROUP_RECLAIM_RETRIES;
3487 oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
3488 while (retry_count) {
3489 if (signal_pending(current)) {
3490 ret = -EINTR;
3491 break;
3492 }
3493 /*
3494 * Rather than hide all in some function, I do this in
3495 * open coded manner. You see what this really does.
3496 * We have to guarantee mem->res.limit < mem->memsw.limit.
3497 */
3498 mutex_lock(&set_limit_mutex);
3499 memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
3500 if (memlimit > val) {
3501 ret = -EINVAL;
3502 mutex_unlock(&set_limit_mutex);
3503 break;
3504 }
3505 memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
3506 if (memswlimit < val)
3507 enlarge = 1;
3508 ret = res_counter_set_limit(&memcg->memsw, val);
3509 if (!ret) {
3510 if (memlimit == val)
3511 memcg->memsw_is_minimum = true;
3512 else
3513 memcg->memsw_is_minimum = false;
3514 }
3515 mutex_unlock(&set_limit_mutex);
3516
3517 if (!ret)
3518 break;
3519
3520 mem_cgroup_hierarchical_reclaim(memcg, NULL, GFP_KERNEL,
3521 MEM_CGROUP_RECLAIM_NOSWAP |
3522 MEM_CGROUP_RECLAIM_SHRINK,
3523 NULL);
3524 curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
3525 /* Usage is reduced ? */
3526 if (curusage >= oldusage)
3527 retry_count--;
3528 else
3529 oldusage = curusage;
3530 }
3531 if (!ret && enlarge)
3532 memcg_oom_recover(memcg);
3533 return ret;
3534}
3535
3536unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
3537 gfp_t gfp_mask,
3538 unsigned long *total_scanned)
3539{
3540 unsigned long nr_reclaimed = 0;
3541 struct mem_cgroup_per_zone *mz, *next_mz = NULL;
3542 unsigned long reclaimed;
3543 int loop = 0;
3544 struct mem_cgroup_tree_per_zone *mctz;
3545 unsigned long long excess;
3546 unsigned long nr_scanned;
3547
3548 if (order > 0)
3549 return 0;
3550
3551 mctz = soft_limit_tree_node_zone(zone_to_nid(zone), zone_idx(zone));
3552 /*
3553 * This loop can run a while, specially if mem_cgroup's continuously
3554 * keep exceeding their soft limit and putting the system under
3555 * pressure
3556 */
3557 do {
3558 if (next_mz)
3559 mz = next_mz;
3560 else
3561 mz = mem_cgroup_largest_soft_limit_node(mctz);
3562 if (!mz)
3563 break;
3564
3565 nr_scanned = 0;
3566 reclaimed = mem_cgroup_hierarchical_reclaim(mz->mem, zone,
3567 gfp_mask,
3568 MEM_CGROUP_RECLAIM_SOFT,
3569 &nr_scanned);
3570 nr_reclaimed += reclaimed;
3571 *total_scanned += nr_scanned;
3572 spin_lock(&mctz->lock);
3573
3574 /*
3575 * If we failed to reclaim anything from this memory cgroup
3576 * it is time to move on to the next cgroup
3577 */
3578 next_mz = NULL;
3579 if (!reclaimed) {
3580 do {
3581 /*
3582 * Loop until we find yet another one.
3583 *
3584 * By the time we get the soft_limit lock
3585 * again, someone might have aded the
3586 * group back on the RB tree. Iterate to
3587 * make sure we get a different mem.
3588 * mem_cgroup_largest_soft_limit_node returns
3589 * NULL if no other cgroup is present on
3590 * the tree
3591 */
3592 next_mz =
3593 __mem_cgroup_largest_soft_limit_node(mctz);
3594 if (next_mz == mz)
3595 css_put(&next_mz->mem->css);
3596 else /* next_mz == NULL or other memcg */
3597 break;
3598 } while (1);
3599 }
3600 __mem_cgroup_remove_exceeded(mz->mem, mz, mctz);
3601 excess = res_counter_soft_limit_excess(&mz->mem->res);
3602 /*
3603 * One school of thought says that we should not add
3604 * back the node to the tree if reclaim returns 0.
3605 * But our reclaim could return 0, simply because due
3606 * to priority we are exposing a smaller subset of
3607 * memory to reclaim from. Consider this as a longer
3608 * term TODO.
3609 */
3610 /* If excess == 0, no tree ops */
3611 __mem_cgroup_insert_exceeded(mz->mem, mz, mctz, excess);
3612 spin_unlock(&mctz->lock);
3613 css_put(&mz->mem->css);
3614 loop++;
3615 /*
3616 * Could not reclaim anything and there are no more
3617 * mem cgroups to try or we seem to be looping without
3618 * reclaiming anything.
3619 */
3620 if (!nr_reclaimed &&
3621 (next_mz == NULL ||
3622 loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
3623 break;
3624 } while (!nr_reclaimed);
3625 if (next_mz)
3626 css_put(&next_mz->mem->css);
3627 return nr_reclaimed;
3628}
3629
3630/*
3631 * This routine traverse page_cgroup in given list and drop them all.
3632 * *And* this routine doesn't reclaim page itself, just removes page_cgroup.
3633 */
3634static int mem_cgroup_force_empty_list(struct mem_cgroup *mem,
3635 int node, int zid, enum lru_list lru)
3636{
3637 struct zone *zone;
3638 struct mem_cgroup_per_zone *mz;
3639 struct page_cgroup *pc, *busy;
3640 unsigned long flags, loop;
3641 struct list_head *list;
3642 int ret = 0;
3643
3644 zone = &NODE_DATA(node)->node_zones[zid];
3645 mz = mem_cgroup_zoneinfo(mem, node, zid);
3646 list = &mz->lists[lru];
3647
3648 loop = MEM_CGROUP_ZSTAT(mz, lru);
3649 /* give some margin against EBUSY etc...*/
3650 loop += 256;
3651 busy = NULL;
3652 while (loop--) {
3653 struct page *page;
3654
3655 ret = 0;
3656 spin_lock_irqsave(&zone->lru_lock, flags);
3657 if (list_empty(list)) {
3658 spin_unlock_irqrestore(&zone->lru_lock, flags);
3659 break;
3660 }
3661 pc = list_entry(list->prev, struct page_cgroup, lru);
3662 if (busy == pc) {
3663 list_move(&pc->lru, list);
3664 busy = NULL;
3665 spin_unlock_irqrestore(&zone->lru_lock, flags);
3666 continue;
3667 }
3668 spin_unlock_irqrestore(&zone->lru_lock, flags);
3669
3670 page = lookup_cgroup_page(pc);
3671
3672 ret = mem_cgroup_move_parent(page, pc, mem, GFP_KERNEL);
3673 if (ret == -ENOMEM)
3674 break;
3675
3676 if (ret == -EBUSY || ret == -EINVAL) {
3677 /* found lock contention or "pc" is obsolete. */
3678 busy = pc;
3679 cond_resched();
3680 } else
3681 busy = NULL;
3682 }
3683
3684 if (!ret && !list_empty(list))
3685 return -EBUSY;
3686 return ret;
3687}
3688
3689/*
3690 * make mem_cgroup's charge to be 0 if there is no task.
3691 * This enables deleting this mem_cgroup.
3692 */
3693static int mem_cgroup_force_empty(struct mem_cgroup *mem, bool free_all)
3694{
3695 int ret;
3696 int node, zid, shrink;
3697 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
3698 struct cgroup *cgrp = mem->css.cgroup;
3699
3700 css_get(&mem->css);
3701
3702 shrink = 0;
3703 /* should free all ? */
3704 if (free_all)
3705 goto try_to_free;
3706move_account:
3707 do {
3708 ret = -EBUSY;
3709 if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children))
3710 goto out;
3711 ret = -EINTR;
3712 if (signal_pending(current))
3713 goto out;
3714 /* This is for making all *used* pages to be on LRU. */
3715 lru_add_drain_all();
3716 drain_all_stock_sync(mem);
3717 ret = 0;
3718 mem_cgroup_start_move(mem);
3719 for_each_node_state(node, N_HIGH_MEMORY) {
3720 for (zid = 0; !ret && zid < MAX_NR_ZONES; zid++) {
3721 enum lru_list l;
3722 for_each_lru(l) {
3723 ret = mem_cgroup_force_empty_list(mem,
3724 node, zid, l);
3725 if (ret)
3726 break;
3727 }
3728 }
3729 if (ret)
3730 break;
3731 }
3732 mem_cgroup_end_move(mem);
3733 memcg_oom_recover(mem);
3734 /* it seems parent cgroup doesn't have enough mem */
3735 if (ret == -ENOMEM)
3736 goto try_to_free;
3737 cond_resched();
3738 /* "ret" should also be checked to ensure all lists are empty. */
3739 } while (mem->res.usage > 0 || ret);
3740out:
3741 css_put(&mem->css);
3742 return ret;
3743
3744try_to_free:
3745 /* returns EBUSY if there is a task or if we come here twice. */
3746 if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children) || shrink) {
3747 ret = -EBUSY;
3748 goto out;
3749 }
3750 /* we call try-to-free pages for make this cgroup empty */
3751 lru_add_drain_all();
3752 /* try to free all pages in this cgroup */
3753 shrink = 1;
3754 while (nr_retries && mem->res.usage > 0) {
3755 int progress;
3756
3757 if (signal_pending(current)) {
3758 ret = -EINTR;
3759 goto out;
3760 }
3761 progress = try_to_free_mem_cgroup_pages(mem, GFP_KERNEL,
3762 false);
3763 if (!progress) {
3764 nr_retries--;
3765 /* maybe some writeback is necessary */
3766 congestion_wait(BLK_RW_ASYNC, HZ/10);
3767 }
3768
3769 }
3770 lru_add_drain();
3771 /* try move_account...there may be some *locked* pages. */
3772 goto move_account;
3773}
3774
3775int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event)
3776{
3777 return mem_cgroup_force_empty(mem_cgroup_from_cont(cont), true);
3778}
3779
3780
3781static u64 mem_cgroup_hierarchy_read(struct cgroup *cont, struct cftype *cft)
3782{
3783 return mem_cgroup_from_cont(cont)->use_hierarchy;
3784}
3785
3786static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft,
3787 u64 val)
3788{
3789 int retval = 0;
3790 struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
3791 struct cgroup *parent = cont->parent;
3792 struct mem_cgroup *parent_mem = NULL;
3793
3794 if (parent)
3795 parent_mem = mem_cgroup_from_cont(parent);
3796
3797 cgroup_lock();
3798 /*
3799 * If parent's use_hierarchy is set, we can't make any modifications
3800 * in the child subtrees. If it is unset, then the change can
3801 * occur, provided the current cgroup has no children.
3802 *
3803 * For the root cgroup, parent_mem is NULL, we allow value to be
3804 * set if there are no children.
3805 */
3806 if ((!parent_mem || !parent_mem->use_hierarchy) &&
3807 (val == 1 || val == 0)) {
3808 if (list_empty(&cont->children))
3809 mem->use_hierarchy = val;
3810 else
3811 retval = -EBUSY;
3812 } else
3813 retval = -EINVAL;
3814 cgroup_unlock();
3815
3816 return retval;
3817}
3818
3819
3820static unsigned long mem_cgroup_recursive_stat(struct mem_cgroup *mem,
3821 enum mem_cgroup_stat_index idx)
3822{
3823 struct mem_cgroup *iter;
3824 long val = 0;
3825
3826 /* Per-cpu values can be negative, use a signed accumulator */
3827 for_each_mem_cgroup_tree(iter, mem)
3828 val += mem_cgroup_read_stat(iter, idx);
3829
3830 if (val < 0) /* race ? */
3831 val = 0;
3832 return val;
3833}
3834
3835static inline u64 mem_cgroup_usage(struct mem_cgroup *mem, bool swap)
3836{
3837 u64 val;
3838
3839 if (!mem_cgroup_is_root(mem)) {
3840 if (!swap)
3841 return res_counter_read_u64(&mem->res, RES_USAGE);
3842 else
3843 return res_counter_read_u64(&mem->memsw, RES_USAGE);
3844 }
3845
3846 val = mem_cgroup_recursive_stat(mem, MEM_CGROUP_STAT_CACHE);
3847 val += mem_cgroup_recursive_stat(mem, MEM_CGROUP_STAT_RSS);
3848
3849 if (swap)
3850 val += mem_cgroup_recursive_stat(mem, MEM_CGROUP_STAT_SWAPOUT);
3851
3852 return val << PAGE_SHIFT;
3853}
3854
3855static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft)
3856{
3857 struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
3858 u64 val;
3859 int type, name;
3860
3861 type = MEMFILE_TYPE(cft->private);
3862 name = MEMFILE_ATTR(cft->private);
3863 switch (type) {
3864 case _MEM:
3865 if (name == RES_USAGE)
3866 val = mem_cgroup_usage(mem, false);
3867 else
3868 val = res_counter_read_u64(&mem->res, name);
3869 break;
3870 case _MEMSWAP:
3871 if (name == RES_USAGE)
3872 val = mem_cgroup_usage(mem, true);
3873 else
3874 val = res_counter_read_u64(&mem->memsw, name);
3875 break;
3876 default:
3877 BUG();
3878 break;
3879 }
3880 return val;
3881}
3882/*
3883 * The user of this function is...
3884 * RES_LIMIT.
3885 */
3886static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
3887 const char *buffer)
3888{
3889 struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
3890 int type, name;
3891 unsigned long long val;
3892 int ret;
3893
3894 type = MEMFILE_TYPE(cft->private);
3895 name = MEMFILE_ATTR(cft->private);
3896 switch (name) {
3897 case RES_LIMIT:
3898 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
3899 ret = -EINVAL;
3900 break;
3901 }
3902 /* This function does all necessary parse...reuse it */
3903 ret = res_counter_memparse_write_strategy(buffer, &val);
3904 if (ret)
3905 break;
3906 if (type == _MEM)
3907 ret = mem_cgroup_resize_limit(memcg, val);
3908 else
3909 ret = mem_cgroup_resize_memsw_limit(memcg, val);
3910 break;
3911 case RES_SOFT_LIMIT:
3912 ret = res_counter_memparse_write_strategy(buffer, &val);
3913 if (ret)
3914 break;
3915 /*
3916 * For memsw, soft limits are hard to implement in terms
3917 * of semantics, for now, we support soft limits for
3918 * control without swap
3919 */
3920 if (type == _MEM)
3921 ret = res_counter_set_soft_limit(&memcg->res, val);
3922 else
3923 ret = -EINVAL;
3924 break;
3925 default:
3926 ret = -EINVAL; /* should be BUG() ? */
3927 break;
3928 }
3929 return ret;
3930}
3931
3932static void memcg_get_hierarchical_limit(struct mem_cgroup *memcg,
3933 unsigned long long *mem_limit, unsigned long long *memsw_limit)
3934{
3935 struct cgroup *cgroup;
3936 unsigned long long min_limit, min_memsw_limit, tmp;
3937
3938 min_limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
3939 min_memsw_limit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
3940 cgroup = memcg->css.cgroup;
3941 if (!memcg->use_hierarchy)
3942 goto out;
3943
3944 while (cgroup->parent) {
3945 cgroup = cgroup->parent;
3946 memcg = mem_cgroup_from_cont(cgroup);
3947 if (!memcg->use_hierarchy)
3948 break;
3949 tmp = res_counter_read_u64(&memcg->res, RES_LIMIT);
3950 min_limit = min(min_limit, tmp);
3951 tmp = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
3952 min_memsw_limit = min(min_memsw_limit, tmp);
3953 }
3954out:
3955 *mem_limit = min_limit;
3956 *memsw_limit = min_memsw_limit;
3957 return;
3958}
3959
3960static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
3961{
3962 struct mem_cgroup *mem;
3963 int type, name;
3964
3965 mem = mem_cgroup_from_cont(cont);
3966 type = MEMFILE_TYPE(event);
3967 name = MEMFILE_ATTR(event);
3968 switch (name) {
3969 case RES_MAX_USAGE:
3970 if (type == _MEM)
3971 res_counter_reset_max(&mem->res);
3972 else
3973 res_counter_reset_max(&mem->memsw);
3974 break;
3975 case RES_FAILCNT:
3976 if (type == _MEM)
3977 res_counter_reset_failcnt(&mem->res);
3978 else
3979 res_counter_reset_failcnt(&mem->memsw);
3980 break;
3981 }
3982
3983 return 0;
3984}
3985
3986static u64 mem_cgroup_move_charge_read(struct cgroup *cgrp,
3987 struct cftype *cft)
3988{
3989 return mem_cgroup_from_cont(cgrp)->move_charge_at_immigrate;
3990}
3991
3992#ifdef CONFIG_MMU
3993static int mem_cgroup_move_charge_write(struct cgroup *cgrp,
3994 struct cftype *cft, u64 val)
3995{
3996 struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
3997
3998 if (val >= (1 << NR_MOVE_TYPE))
3999 return -EINVAL;
4000 /*
4001 * We check this value several times in both in can_attach() and
4002 * attach(), so we need cgroup lock to prevent this value from being
4003 * inconsistent.
4004 */
4005 cgroup_lock();
4006 mem->move_charge_at_immigrate = val;
4007 cgroup_unlock();
4008
4009 return 0;
4010}
4011#else
4012static int mem_cgroup_move_charge_write(struct cgroup *cgrp,
4013 struct cftype *cft, u64 val)
4014{
4015 return -ENOSYS;
4016}
4017#endif
4018
4019
4020/* For read statistics */
4021enum {
4022 MCS_CACHE,
4023 MCS_RSS,
4024 MCS_FILE_MAPPED,
4025 MCS_PGPGIN,
4026 MCS_PGPGOUT,
4027 MCS_SWAP,
4028 MCS_PGFAULT,
4029 MCS_PGMAJFAULT,
4030 MCS_INACTIVE_ANON,
4031 MCS_ACTIVE_ANON,
4032 MCS_INACTIVE_FILE,
4033 MCS_ACTIVE_FILE,
4034 MCS_UNEVICTABLE,
4035 NR_MCS_STAT,
4036};
4037
4038struct mcs_total_stat {
4039 s64 stat[NR_MCS_STAT];
4040};
4041
4042struct {
4043 char *local_name;
4044 char *total_name;
4045} memcg_stat_strings[NR_MCS_STAT] = {
4046 {"cache", "total_cache"},
4047 {"rss", "total_rss"},
4048 {"mapped_file", "total_mapped_file"},
4049 {"pgpgin", "total_pgpgin"},
4050 {"pgpgout", "total_pgpgout"},
4051 {"swap", "total_swap"},
4052 {"pgfault", "total_pgfault"},
4053 {"pgmajfault", "total_pgmajfault"},
4054 {"inactive_anon", "total_inactive_anon"},
4055 {"active_anon", "total_active_anon"},
4056 {"inactive_file", "total_inactive_file"},
4057 {"active_file", "total_active_file"},
4058 {"unevictable", "total_unevictable"}
4059};
4060
4061
4062static void
4063mem_cgroup_get_local_stat(struct mem_cgroup *mem, struct mcs_total_stat *s)
4064{
4065 s64 val;
4066
4067 /* per cpu stat */
4068 val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_CACHE);
4069 s->stat[MCS_CACHE] += val * PAGE_SIZE;
4070 val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_RSS);
4071 s->stat[MCS_RSS] += val * PAGE_SIZE;
4072 val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_FILE_MAPPED);
4073 s->stat[MCS_FILE_MAPPED] += val * PAGE_SIZE;
4074 val = mem_cgroup_read_events(mem, MEM_CGROUP_EVENTS_PGPGIN);
4075 s->stat[MCS_PGPGIN] += val;
4076 val = mem_cgroup_read_events(mem, MEM_CGROUP_EVENTS_PGPGOUT);
4077 s->stat[MCS_PGPGOUT] += val;
4078 if (do_swap_account) {
4079 val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_SWAPOUT);
4080 s->stat[MCS_SWAP] += val * PAGE_SIZE;
4081 }
4082 val = mem_cgroup_read_events(mem, MEM_CGROUP_EVENTS_PGFAULT);
4083 s->stat[MCS_PGFAULT] += val;
4084 val = mem_cgroup_read_events(mem, MEM_CGROUP_EVENTS_PGMAJFAULT);
4085 s->stat[MCS_PGMAJFAULT] += val;
4086
4087 /* per zone stat */
4088 val = mem_cgroup_nr_lru_pages(mem, BIT(LRU_INACTIVE_ANON));
4089 s->stat[MCS_INACTIVE_ANON] += val * PAGE_SIZE;
4090 val = mem_cgroup_nr_lru_pages(mem, BIT(LRU_ACTIVE_ANON));
4091 s->stat[MCS_ACTIVE_ANON] += val * PAGE_SIZE;
4092 val = mem_cgroup_nr_lru_pages(mem, BIT(LRU_INACTIVE_FILE));
4093 s->stat[MCS_INACTIVE_FILE] += val * PAGE_SIZE;
4094 val = mem_cgroup_nr_lru_pages(mem, BIT(LRU_ACTIVE_FILE));
4095 s->stat[MCS_ACTIVE_FILE] += val * PAGE_SIZE;
4096 val = mem_cgroup_nr_lru_pages(mem, BIT(LRU_UNEVICTABLE));
4097 s->stat[MCS_UNEVICTABLE] += val * PAGE_SIZE;
4098}
4099
4100static void
4101mem_cgroup_get_total_stat(struct mem_cgroup *mem, struct mcs_total_stat *s)
4102{
4103 struct mem_cgroup *iter;
4104
4105 for_each_mem_cgroup_tree(iter, mem)
4106 mem_cgroup_get_local_stat(iter, s);
4107}
4108
4109#ifdef CONFIG_NUMA
4110static int mem_control_numa_stat_show(struct seq_file *m, void *arg)
4111{
4112 int nid;
4113 unsigned long total_nr, file_nr, anon_nr, unevictable_nr;
4114 unsigned long node_nr;
4115 struct cgroup *cont = m->private;
4116 struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont);
4117
4118 total_nr = mem_cgroup_nr_lru_pages(mem_cont, LRU_ALL);
4119 seq_printf(m, "total=%lu", total_nr);
4120 for_each_node_state(nid, N_HIGH_MEMORY) {
4121 node_nr = mem_cgroup_node_nr_lru_pages(mem_cont, nid, LRU_ALL);
4122 seq_printf(m, " N%d=%lu", nid, node_nr);
4123 }
4124 seq_putc(m, '\n');
4125
4126 file_nr = mem_cgroup_nr_lru_pages(mem_cont, LRU_ALL_FILE);
4127 seq_printf(m, "file=%lu", file_nr);
4128 for_each_node_state(nid, N_HIGH_MEMORY) {
4129 node_nr = mem_cgroup_node_nr_lru_pages(mem_cont, nid,
4130 LRU_ALL_FILE);
4131 seq_printf(m, " N%d=%lu", nid, node_nr);
4132 }
4133 seq_putc(m, '\n');
4134
4135 anon_nr = mem_cgroup_nr_lru_pages(mem_cont, LRU_ALL_ANON);
4136 seq_printf(m, "anon=%lu", anon_nr);
4137 for_each_node_state(nid, N_HIGH_MEMORY) {
4138 node_nr = mem_cgroup_node_nr_lru_pages(mem_cont, nid,
4139 LRU_ALL_ANON);
4140 seq_printf(m, " N%d=%lu", nid, node_nr);
4141 }
4142 seq_putc(m, '\n');
4143
4144 unevictable_nr = mem_cgroup_nr_lru_pages(mem_cont, BIT(LRU_UNEVICTABLE));
4145 seq_printf(m, "unevictable=%lu", unevictable_nr);
4146 for_each_node_state(nid, N_HIGH_MEMORY) {
4147 node_nr = mem_cgroup_node_nr_lru_pages(mem_cont, nid,
4148 BIT(LRU_UNEVICTABLE));
4149 seq_printf(m, " N%d=%lu", nid, node_nr);
4150 }
4151 seq_putc(m, '\n');
4152 return 0;
4153}
4154#endif /* CONFIG_NUMA */
4155
4156static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
4157 struct cgroup_map_cb *cb)
4158{
4159 struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont);
4160 struct mcs_total_stat mystat;
4161 int i;
4162
4163 memset(&mystat, 0, sizeof(mystat));
4164 mem_cgroup_get_local_stat(mem_cont, &mystat);
4165
4166
4167 for (i = 0; i < NR_MCS_STAT; i++) {
4168 if (i == MCS_SWAP && !do_swap_account)
4169 continue;
4170 cb->fill(cb, memcg_stat_strings[i].local_name, mystat.stat[i]);
4171 }
4172
4173 /* Hierarchical information */
4174 {
4175 unsigned long long limit, memsw_limit;
4176 memcg_get_hierarchical_limit(mem_cont, &limit, &memsw_limit);
4177 cb->fill(cb, "hierarchical_memory_limit", limit);
4178 if (do_swap_account)
4179 cb->fill(cb, "hierarchical_memsw_limit", memsw_limit);
4180 }
4181
4182 memset(&mystat, 0, sizeof(mystat));
4183 mem_cgroup_get_total_stat(mem_cont, &mystat);
4184 for (i = 0; i < NR_MCS_STAT; i++) {
4185 if (i == MCS_SWAP && !do_swap_account)
4186 continue;
4187 cb->fill(cb, memcg_stat_strings[i].total_name, mystat.stat[i]);
4188 }
4189
4190#ifdef CONFIG_DEBUG_VM
4191 cb->fill(cb, "inactive_ratio", calc_inactive_ratio(mem_cont, NULL));
4192
4193 {
4194 int nid, zid;
4195 struct mem_cgroup_per_zone *mz;
4196 unsigned long recent_rotated[2] = {0, 0};
4197 unsigned long recent_scanned[2] = {0, 0};
4198
4199 for_each_online_node(nid)
4200 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
4201 mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
4202
4203 recent_rotated[0] +=
4204 mz->reclaim_stat.recent_rotated[0];
4205 recent_rotated[1] +=
4206 mz->reclaim_stat.recent_rotated[1];
4207 recent_scanned[0] +=
4208 mz->reclaim_stat.recent_scanned[0];
4209 recent_scanned[1] +=
4210 mz->reclaim_stat.recent_scanned[1];
4211 }
4212 cb->fill(cb, "recent_rotated_anon", recent_rotated[0]);
4213 cb->fill(cb, "recent_rotated_file", recent_rotated[1]);
4214 cb->fill(cb, "recent_scanned_anon", recent_scanned[0]);
4215 cb->fill(cb, "recent_scanned_file", recent_scanned[1]);
4216 }
4217#endif
4218
4219 return 0;
4220}
4221
4222static u64 mem_cgroup_swappiness_read(struct cgroup *cgrp, struct cftype *cft)
4223{
4224 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
4225
4226 return mem_cgroup_swappiness(memcg);
4227}
4228
4229static int mem_cgroup_swappiness_write(struct cgroup *cgrp, struct cftype *cft,
4230 u64 val)
4231{
4232 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
4233 struct mem_cgroup *parent;
4234
4235 if (val > 100)
4236 return -EINVAL;
4237
4238 if (cgrp->parent == NULL)
4239 return -EINVAL;
4240
4241 parent = mem_cgroup_from_cont(cgrp->parent);
4242
4243 cgroup_lock();
4244
4245 /* If under hierarchy, only empty-root can set this value */
4246 if ((parent->use_hierarchy) ||
4247 (memcg->use_hierarchy && !list_empty(&cgrp->children))) {
4248 cgroup_unlock();
4249 return -EINVAL;
4250 }
4251
4252 memcg->swappiness = val;
4253
4254 cgroup_unlock();
4255
4256 return 0;
4257}
4258
4259static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
4260{
4261 struct mem_cgroup_threshold_ary *t;
4262 u64 usage;
4263 int i;
4264
4265 rcu_read_lock();
4266 if (!swap)
4267 t = rcu_dereference(memcg->thresholds.primary);
4268 else
4269 t = rcu_dereference(memcg->memsw_thresholds.primary);
4270
4271 if (!t)
4272 goto unlock;
4273
4274 usage = mem_cgroup_usage(memcg, swap);
4275
4276 /*
4277 * current_threshold points to threshold just below usage.
4278 * If it's not true, a threshold was crossed after last
4279 * call of __mem_cgroup_threshold().
4280 */
4281 i = t->current_threshold;
4282
4283 /*
4284 * Iterate backward over array of thresholds starting from
4285 * current_threshold and check if a threshold is crossed.
4286 * If none of thresholds below usage is crossed, we read
4287 * only one element of the array here.
4288 */
4289 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
4290 eventfd_signal(t->entries[i].eventfd, 1);
4291
4292 /* i = current_threshold + 1 */
4293 i++;
4294
4295 /*
4296 * Iterate forward over array of thresholds starting from
4297 * current_threshold+1 and check if a threshold is crossed.
4298 * If none of thresholds above usage is crossed, we read
4299 * only one element of the array here.
4300 */
4301 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
4302 eventfd_signal(t->entries[i].eventfd, 1);
4303
4304 /* Update current_threshold */
4305 t->current_threshold = i - 1;
4306unlock:
4307 rcu_read_unlock();
4308}
4309
4310static void mem_cgroup_threshold(struct mem_cgroup *memcg)
4311{
4312 while (memcg) {
4313 __mem_cgroup_threshold(memcg, false);
4314 if (do_swap_account)
4315 __mem_cgroup_threshold(memcg, true);
4316
4317 memcg = parent_mem_cgroup(memcg);
4318 }
4319}
4320
4321static int compare_thresholds(const void *a, const void *b)
4322{
4323 const struct mem_cgroup_threshold *_a = a;
4324 const struct mem_cgroup_threshold *_b = b;
4325
4326 return _a->threshold - _b->threshold;
4327}
4328
4329static int mem_cgroup_oom_notify_cb(struct mem_cgroup *mem)
4330{
4331 struct mem_cgroup_eventfd_list *ev;
4332
4333 list_for_each_entry(ev, &mem->oom_notify, list)
4334 eventfd_signal(ev->eventfd, 1);
4335 return 0;
4336}
4337
4338static void mem_cgroup_oom_notify(struct mem_cgroup *mem)
4339{
4340 struct mem_cgroup *iter;
4341
4342 for_each_mem_cgroup_tree(iter, mem)
4343 mem_cgroup_oom_notify_cb(iter);
4344}
4345
4346static int mem_cgroup_usage_register_event(struct cgroup *cgrp,
4347 struct cftype *cft, struct eventfd_ctx *eventfd, const char *args)
4348{
4349 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
4350 struct mem_cgroup_thresholds *thresholds;
4351 struct mem_cgroup_threshold_ary *new;
4352 int type = MEMFILE_TYPE(cft->private);
4353 u64 threshold, usage;
4354 int i, size, ret;
4355
4356 ret = res_counter_memparse_write_strategy(args, &threshold);
4357 if (ret)
4358 return ret;
4359
4360 mutex_lock(&memcg->thresholds_lock);
4361
4362 if (type == _MEM)
4363 thresholds = &memcg->thresholds;
4364 else if (type == _MEMSWAP)
4365 thresholds = &memcg->memsw_thresholds;
4366 else
4367 BUG();
4368
4369 usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
4370
4371 /* Check if a threshold crossed before adding a new one */
4372 if (thresholds->primary)
4373 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
4374
4375 size = thresholds->primary ? thresholds->primary->size + 1 : 1;
4376
4377 /* Allocate memory for new array of thresholds */
4378 new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold),
4379 GFP_KERNEL);
4380 if (!new) {
4381 ret = -ENOMEM;
4382 goto unlock;
4383 }
4384 new->size = size;
4385
4386 /* Copy thresholds (if any) to new array */
4387 if (thresholds->primary) {
4388 memcpy(new->entries, thresholds->primary->entries, (size - 1) *
4389 sizeof(struct mem_cgroup_threshold));
4390 }
4391
4392 /* Add new threshold */
4393 new->entries[size - 1].eventfd = eventfd;
4394 new->entries[size - 1].threshold = threshold;
4395
4396 /* Sort thresholds. Registering of new threshold isn't time-critical */
4397 sort(new->entries, size, sizeof(struct mem_cgroup_threshold),
4398 compare_thresholds, NULL);
4399
4400 /* Find current threshold */
4401 new->current_threshold = -1;
4402 for (i = 0; i < size; i++) {
4403 if (new->entries[i].threshold < usage) {
4404 /*
4405 * new->current_threshold will not be used until
4406 * rcu_assign_pointer(), so it's safe to increment
4407 * it here.
4408 */
4409 ++new->current_threshold;
4410 }
4411 }
4412
4413 /* Free old spare buffer and save old primary buffer as spare */
4414 kfree(thresholds->spare);
4415 thresholds->spare = thresholds->primary;
4416
4417 rcu_assign_pointer(thresholds->primary, new);
4418
4419 /* To be sure that nobody uses thresholds */
4420 synchronize_rcu();
4421
4422unlock:
4423 mutex_unlock(&memcg->thresholds_lock);
4424
4425 return ret;
4426}
4427
4428static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
4429 struct cftype *cft, struct eventfd_ctx *eventfd)
4430{
4431 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
4432 struct mem_cgroup_thresholds *thresholds;
4433 struct mem_cgroup_threshold_ary *new;
4434 int type = MEMFILE_TYPE(cft->private);
4435 u64 usage;
4436 int i, j, size;
4437
4438 mutex_lock(&memcg->thresholds_lock);
4439 if (type == _MEM)
4440 thresholds = &memcg->thresholds;
4441 else if (type == _MEMSWAP)
4442 thresholds = &memcg->memsw_thresholds;
4443 else
4444 BUG();
4445
4446 /*
4447 * Something went wrong if we trying to unregister a threshold
4448 * if we don't have thresholds
4449 */
4450 BUG_ON(!thresholds);
4451
4452 usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
4453
4454 /* Check if a threshold crossed before removing */
4455 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
4456
4457 /* Calculate new number of threshold */
4458 size = 0;
4459 for (i = 0; i < thresholds->primary->size; i++) {
4460 if (thresholds->primary->entries[i].eventfd != eventfd)
4461 size++;
4462 }
4463
4464 new = thresholds->spare;
4465
4466 /* Set thresholds array to NULL if we don't have thresholds */
4467 if (!size) {
4468 kfree(new);
4469 new = NULL;
4470 goto swap_buffers;
4471 }
4472
4473 new->size = size;
4474
4475 /* Copy thresholds and find current threshold */
4476 new->current_threshold = -1;
4477 for (i = 0, j = 0; i < thresholds->primary->size; i++) {
4478 if (thresholds->primary->entries[i].eventfd == eventfd)
4479 continue;
4480
4481 new->entries[j] = thresholds->primary->entries[i];
4482 if (new->entries[j].threshold < usage) {
4483 /*
4484 * new->current_threshold will not be used
4485 * until rcu_assign_pointer(), so it's safe to increment
4486 * it here.
4487 */
4488 ++new->current_threshold;
4489 }
4490 j++;
4491 }
4492
4493swap_buffers:
4494 /* Swap primary and spare array */
4495 thresholds->spare = thresholds->primary;
4496 rcu_assign_pointer(thresholds->primary, new);
4497
4498 /* To be sure that nobody uses thresholds */
4499 synchronize_rcu();
4500
4501 mutex_unlock(&memcg->thresholds_lock);
4502}
4503
4504static int mem_cgroup_oom_register_event(struct cgroup *cgrp,
4505 struct cftype *cft, struct eventfd_ctx *eventfd, const char *args)
4506{
4507 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
4508 struct mem_cgroup_eventfd_list *event;
4509 int type = MEMFILE_TYPE(cft->private);
4510
4511 BUG_ON(type != _OOM_TYPE);
4512 event = kmalloc(sizeof(*event), GFP_KERNEL);
4513 if (!event)
4514 return -ENOMEM;
4515
4516 spin_lock(&memcg_oom_lock);
4517
4518 event->eventfd = eventfd;
4519 list_add(&event->list, &memcg->oom_notify);
4520
4521 /* already in OOM ? */
4522 if (atomic_read(&memcg->under_oom))
4523 eventfd_signal(eventfd, 1);
4524 spin_unlock(&memcg_oom_lock);
4525
4526 return 0;
4527}
4528
4529static void mem_cgroup_oom_unregister_event(struct cgroup *cgrp,
4530 struct cftype *cft, struct eventfd_ctx *eventfd)
4531{
4532 struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
4533 struct mem_cgroup_eventfd_list *ev, *tmp;
4534 int type = MEMFILE_TYPE(cft->private);
4535
4536 BUG_ON(type != _OOM_TYPE);
4537
4538 spin_lock(&memcg_oom_lock);
4539
4540 list_for_each_entry_safe(ev, tmp, &mem->oom_notify, list) {
4541 if (ev->eventfd == eventfd) {
4542 list_del(&ev->list);
4543 kfree(ev);
4544 }
4545 }
4546
4547 spin_unlock(&memcg_oom_lock);
4548}
4549
4550static int mem_cgroup_oom_control_read(struct cgroup *cgrp,
4551 struct cftype *cft, struct cgroup_map_cb *cb)
4552{
4553 struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
4554
4555 cb->fill(cb, "oom_kill_disable", mem->oom_kill_disable);
4556
4557 if (atomic_read(&mem->under_oom))
4558 cb->fill(cb, "under_oom", 1);
4559 else
4560 cb->fill(cb, "under_oom", 0);
4561 return 0;
4562}
4563
4564static int mem_cgroup_oom_control_write(struct cgroup *cgrp,
4565 struct cftype *cft, u64 val)
4566{
4567 struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
4568 struct mem_cgroup *parent;
4569
4570 /* cannot set to root cgroup and only 0 and 1 are allowed */
4571 if (!cgrp->parent || !((val == 0) || (val == 1)))
4572 return -EINVAL;
4573
4574 parent = mem_cgroup_from_cont(cgrp->parent);
4575
4576 cgroup_lock();
4577 /* oom-kill-disable is a flag for subhierarchy. */
4578 if ((parent->use_hierarchy) ||
4579 (mem->use_hierarchy && !list_empty(&cgrp->children))) {
4580 cgroup_unlock();
4581 return -EINVAL;
4582 }
4583 mem->oom_kill_disable = val;
4584 if (!val)
4585 memcg_oom_recover(mem);
4586 cgroup_unlock();
4587 return 0;
4588}
4589
4590#ifdef CONFIG_NUMA
4591static const struct file_operations mem_control_numa_stat_file_operations = {
4592 .read = seq_read,
4593 .llseek = seq_lseek,
4594 .release = single_release,
4595};
4596
4597static int mem_control_numa_stat_open(struct inode *unused, struct file *file)
4598{
4599 struct cgroup *cont = file->f_dentry->d_parent->d_fsdata;
4600
4601 file->f_op = &mem_control_numa_stat_file_operations;
4602 return single_open(file, mem_control_numa_stat_show, cont);
4603}
4604#endif /* CONFIG_NUMA */
4605
4606static struct cftype mem_cgroup_files[] = {
4607 {
4608 .name = "usage_in_bytes",
4609 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
4610 .read_u64 = mem_cgroup_read,
4611 .register_event = mem_cgroup_usage_register_event,
4612 .unregister_event = mem_cgroup_usage_unregister_event,
4613 },
4614 {
4615 .name = "max_usage_in_bytes",
4616 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
4617 .trigger = mem_cgroup_reset,
4618 .read_u64 = mem_cgroup_read,
4619 },
4620 {
4621 .name = "limit_in_bytes",
4622 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
4623 .write_string = mem_cgroup_write,
4624 .read_u64 = mem_cgroup_read,
4625 },
4626 {
4627 .name = "soft_limit_in_bytes",
4628 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
4629 .write_string = mem_cgroup_write,
4630 .read_u64 = mem_cgroup_read,
4631 },
4632 {
4633 .name = "failcnt",
4634 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
4635 .trigger = mem_cgroup_reset,
4636 .read_u64 = mem_cgroup_read,
4637 },
4638 {
4639 .name = "stat",
4640 .read_map = mem_control_stat_show,
4641 },
4642 {
4643 .name = "force_empty",
4644 .trigger = mem_cgroup_force_empty_write,
4645 },
4646 {
4647 .name = "use_hierarchy",
4648 .write_u64 = mem_cgroup_hierarchy_write,
4649 .read_u64 = mem_cgroup_hierarchy_read,
4650 },
4651 {
4652 .name = "swappiness",
4653 .read_u64 = mem_cgroup_swappiness_read,
4654 .write_u64 = mem_cgroup_swappiness_write,
4655 },
4656 {
4657 .name = "move_charge_at_immigrate",
4658 .read_u64 = mem_cgroup_move_charge_read,
4659 .write_u64 = mem_cgroup_move_charge_write,
4660 },
4661 {
4662 .name = "oom_control",
4663 .read_map = mem_cgroup_oom_control_read,
4664 .write_u64 = mem_cgroup_oom_control_write,
4665 .register_event = mem_cgroup_oom_register_event,
4666 .unregister_event = mem_cgroup_oom_unregister_event,
4667 .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
4668 },
4669#ifdef CONFIG_NUMA
4670 {
4671 .name = "numa_stat",
4672 .open = mem_control_numa_stat_open,
4673 .mode = S_IRUGO,
4674 },
4675#endif
4676};
4677
4678#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
4679static struct cftype memsw_cgroup_files[] = {
4680 {
4681 .name = "memsw.usage_in_bytes",
4682 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
4683 .read_u64 = mem_cgroup_read,
4684 .register_event = mem_cgroup_usage_register_event,
4685 .unregister_event = mem_cgroup_usage_unregister_event,
4686 },
4687 {
4688 .name = "memsw.max_usage_in_bytes",
4689 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
4690 .trigger = mem_cgroup_reset,
4691 .read_u64 = mem_cgroup_read,
4692 },
4693 {
4694 .name = "memsw.limit_in_bytes",
4695 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
4696 .write_string = mem_cgroup_write,
4697 .read_u64 = mem_cgroup_read,
4698 },
4699 {
4700 .name = "memsw.failcnt",
4701 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
4702 .trigger = mem_cgroup_reset,
4703 .read_u64 = mem_cgroup_read,
4704 },
4705};
4706
4707static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
4708{
4709 if (!do_swap_account)
4710 return 0;
4711 return cgroup_add_files(cont, ss, memsw_cgroup_files,
4712 ARRAY_SIZE(memsw_cgroup_files));
4713};
4714#else
4715static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
4716{
4717 return 0;
4718}
4719#endif
4720
4721static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
4722{
4723 struct mem_cgroup_per_node *pn;
4724 struct mem_cgroup_per_zone *mz;
4725 enum lru_list l;
4726 int zone, tmp = node;
4727 /*
4728 * This routine is called against possible nodes.
4729 * But it's BUG to call kmalloc() against offline node.
4730 *
4731 * TODO: this routine can waste much memory for nodes which will
4732 * never be onlined. It's better to use memory hotplug callback
4733 * function.
4734 */
4735 if (!node_state(node, N_NORMAL_MEMORY))
4736 tmp = -1;
4737 pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
4738 if (!pn)
4739 return 1;
4740
4741 mem->info.nodeinfo[node] = pn;
4742 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
4743 mz = &pn->zoneinfo[zone];
4744 for_each_lru(l)
4745 INIT_LIST_HEAD(&mz->lists[l]);
4746 mz->usage_in_excess = 0;
4747 mz->on_tree = false;
4748 mz->mem = mem;
4749 }
4750 return 0;
4751}
4752
4753static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
4754{
4755 kfree(mem->info.nodeinfo[node]);
4756}
4757
4758static struct mem_cgroup *mem_cgroup_alloc(void)
4759{
4760 struct mem_cgroup *mem;
4761 int size = sizeof(struct mem_cgroup);
4762
4763 /* Can be very big if MAX_NUMNODES is very big */
4764 if (size < PAGE_SIZE)
4765 mem = kzalloc(size, GFP_KERNEL);
4766 else
4767 mem = vzalloc(size);
4768
4769 if (!mem)
4770 return NULL;
4771
4772 mem->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
4773 if (!mem->stat)
4774 goto out_free;
4775 spin_lock_init(&mem->pcp_counter_lock);
4776 return mem;
4777
4778out_free:
4779 if (size < PAGE_SIZE)
4780 kfree(mem);
4781 else
4782 vfree(mem);
4783 return NULL;
4784}
4785
4786/*
4787 * At destroying mem_cgroup, references from swap_cgroup can remain.
4788 * (scanning all at force_empty is too costly...)
4789 *
4790 * Instead of clearing all references at force_empty, we remember
4791 * the number of reference from swap_cgroup and free mem_cgroup when
4792 * it goes down to 0.
4793 *
4794 * Removal of cgroup itself succeeds regardless of refs from swap.
4795 */
4796
4797static void __mem_cgroup_free(struct mem_cgroup *mem)
4798{
4799 int node;
4800
4801 mem_cgroup_remove_from_trees(mem);
4802 free_css_id(&mem_cgroup_subsys, &mem->css);
4803
4804 for_each_node_state(node, N_POSSIBLE)
4805 free_mem_cgroup_per_zone_info(mem, node);
4806
4807 free_percpu(mem->stat);
4808 if (sizeof(struct mem_cgroup) < PAGE_SIZE)
4809 kfree(mem);
4810 else
4811 vfree(mem);
4812}
4813
4814static void mem_cgroup_get(struct mem_cgroup *mem)
4815{
4816 atomic_inc(&mem->refcnt);
4817}
4818
4819static void __mem_cgroup_put(struct mem_cgroup *mem, int count)
4820{
4821 if (atomic_sub_and_test(count, &mem->refcnt)) {
4822 struct mem_cgroup *parent = parent_mem_cgroup(mem);
4823 __mem_cgroup_free(mem);
4824 if (parent)
4825 mem_cgroup_put(parent);
4826 }
4827}
4828
4829static void mem_cgroup_put(struct mem_cgroup *mem)
4830{
4831 __mem_cgroup_put(mem, 1);
4832}
4833
4834/*
4835 * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled.
4836 */
4837static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem)
4838{
4839 if (!mem->res.parent)
4840 return NULL;
4841 return mem_cgroup_from_res_counter(mem->res.parent, res);
4842}
4843
4844#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
4845static void __init enable_swap_cgroup(void)
4846{
4847 if (!mem_cgroup_disabled() && really_do_swap_account)
4848 do_swap_account = 1;
4849}
4850#else
4851static void __init enable_swap_cgroup(void)
4852{
4853}
4854#endif
4855
4856static int mem_cgroup_soft_limit_tree_init(void)
4857{
4858 struct mem_cgroup_tree_per_node *rtpn;
4859 struct mem_cgroup_tree_per_zone *rtpz;
4860 int tmp, node, zone;
4861
4862 for_each_node_state(node, N_POSSIBLE) {
4863 tmp = node;
4864 if (!node_state(node, N_NORMAL_MEMORY))
4865 tmp = -1;
4866 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, tmp);
4867 if (!rtpn)
4868 return 1;
4869
4870 soft_limit_tree.rb_tree_per_node[node] = rtpn;
4871
4872 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
4873 rtpz = &rtpn->rb_tree_per_zone[zone];
4874 rtpz->rb_root = RB_ROOT;
4875 spin_lock_init(&rtpz->lock);
4876 }
4877 }
4878 return 0;
4879}
4880
4881static struct cgroup_subsys_state * __ref
4882mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
4883{
4884 struct mem_cgroup *mem, *parent;
4885 long error = -ENOMEM;
4886 int node;
4887
4888 mem = mem_cgroup_alloc();
4889 if (!mem)
4890 return ERR_PTR(error);
4891
4892 for_each_node_state(node, N_POSSIBLE)
4893 if (alloc_mem_cgroup_per_zone_info(mem, node))
4894 goto free_out;
4895
4896 /* root ? */
4897 if (cont->parent == NULL) {
4898 int cpu;
4899 enable_swap_cgroup();
4900 parent = NULL;
4901 root_mem_cgroup = mem;
4902 if (mem_cgroup_soft_limit_tree_init())
4903 goto free_out;
4904 for_each_possible_cpu(cpu) {
4905 struct memcg_stock_pcp *stock =
4906 &per_cpu(memcg_stock, cpu);
4907 INIT_WORK(&stock->work, drain_local_stock);
4908 }
4909 hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
4910 } else {
4911 parent = mem_cgroup_from_cont(cont->parent);
4912 mem->use_hierarchy = parent->use_hierarchy;
4913 mem->oom_kill_disable = parent->oom_kill_disable;
4914 }
4915
4916 if (parent && parent->use_hierarchy) {
4917 res_counter_init(&mem->res, &parent->res);
4918 res_counter_init(&mem->memsw, &parent->memsw);
4919 /*
4920 * We increment refcnt of the parent to ensure that we can
4921 * safely access it on res_counter_charge/uncharge.
4922 * This refcnt will be decremented when freeing this
4923 * mem_cgroup(see mem_cgroup_put).
4924 */
4925 mem_cgroup_get(parent);
4926 } else {
4927 res_counter_init(&mem->res, NULL);
4928 res_counter_init(&mem->memsw, NULL);
4929 }
4930 mem->last_scanned_child = 0;
4931 mem->last_scanned_node = MAX_NUMNODES;
4932 INIT_LIST_HEAD(&mem->oom_notify);
4933
4934 if (parent)
4935 mem->swappiness = mem_cgroup_swappiness(parent);
4936 atomic_set(&mem->refcnt, 1);
4937 mem->move_charge_at_immigrate = 0;
4938 mutex_init(&mem->thresholds_lock);
4939 return &mem->css;
4940free_out:
4941 __mem_cgroup_free(mem);
4942 root_mem_cgroup = NULL;
4943 return ERR_PTR(error);
4944}
4945
4946static int mem_cgroup_pre_destroy(struct cgroup_subsys *ss,
4947 struct cgroup *cont)
4948{
4949 struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
4950
4951 return mem_cgroup_force_empty(mem, false);
4952}
4953
4954static void mem_cgroup_destroy(struct cgroup_subsys *ss,
4955 struct cgroup *cont)
4956{
4957 struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
4958
4959 mem_cgroup_put(mem);
4960}
4961
4962static int mem_cgroup_populate(struct cgroup_subsys *ss,
4963 struct cgroup *cont)
4964{
4965 int ret;
4966
4967 ret = cgroup_add_files(cont, ss, mem_cgroup_files,
4968 ARRAY_SIZE(mem_cgroup_files));
4969
4970 if (!ret)
4971 ret = register_memsw_files(cont, ss);
4972 return ret;
4973}
4974
4975#ifdef CONFIG_MMU
4976/* Handlers for move charge at task migration. */
4977#define PRECHARGE_COUNT_AT_ONCE 256
4978static int mem_cgroup_do_precharge(unsigned long count)
4979{
4980 int ret = 0;
4981 int batch_count = PRECHARGE_COUNT_AT_ONCE;
4982 struct mem_cgroup *mem = mc.to;
4983
4984 if (mem_cgroup_is_root(mem)) {
4985 mc.precharge += count;
4986 /* we don't need css_get for root */
4987 return ret;
4988 }
4989 /* try to charge at once */
4990 if (count > 1) {
4991 struct res_counter *dummy;
4992 /*
4993 * "mem" cannot be under rmdir() because we've already checked
4994 * by cgroup_lock_live_cgroup() that it is not removed and we
4995 * are still under the same cgroup_mutex. So we can postpone
4996 * css_get().
4997 */
4998 if (res_counter_charge(&mem->res, PAGE_SIZE * count, &dummy))
4999 goto one_by_one;
5000 if (do_swap_account && res_counter_charge(&mem->memsw,
5001 PAGE_SIZE * count, &dummy)) {
5002 res_counter_uncharge(&mem->res, PAGE_SIZE * count);
5003 goto one_by_one;
5004 }
5005 mc.precharge += count;
5006 return ret;
5007 }
5008one_by_one:
5009 /* fall back to one by one charge */
5010 while (count--) {
5011 if (signal_pending(current)) {
5012 ret = -EINTR;
5013 break;
5014 }
5015 if (!batch_count--) {
5016 batch_count = PRECHARGE_COUNT_AT_ONCE;
5017 cond_resched();
5018 }
5019 ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, 1, &mem, false);
5020 if (ret || !mem)
5021 /* mem_cgroup_clear_mc() will do uncharge later */
5022 return -ENOMEM;
5023 mc.precharge++;
5024 }
5025 return ret;
5026}
5027
5028/**
5029 * is_target_pte_for_mc - check a pte whether it is valid for move charge
5030 * @vma: the vma the pte to be checked belongs
5031 * @addr: the address corresponding to the pte to be checked
5032 * @ptent: the pte to be checked
5033 * @target: the pointer the target page or swap ent will be stored(can be NULL)
5034 *
5035 * Returns
5036 * 0(MC_TARGET_NONE): if the pte is not a target for move charge.
5037 * 1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
5038 * move charge. if @target is not NULL, the page is stored in target->page
5039 * with extra refcnt got(Callers should handle it).
5040 * 2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
5041 * target for charge migration. if @target is not NULL, the entry is stored
5042 * in target->ent.
5043 *
5044 * Called with pte lock held.
5045 */
5046union mc_target {
5047 struct page *page;
5048 swp_entry_t ent;
5049};
5050
5051enum mc_target_type {
5052 MC_TARGET_NONE, /* not used */
5053 MC_TARGET_PAGE,
5054 MC_TARGET_SWAP,
5055};
5056
5057static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
5058 unsigned long addr, pte_t ptent)
5059{
5060 struct page *page = vm_normal_page(vma, addr, ptent);
5061
5062 if (!page || !page_mapped(page))
5063 return NULL;
5064 if (PageAnon(page)) {
5065 /* we don't move shared anon */
5066 if (!move_anon() || page_mapcount(page) > 2)
5067 return NULL;
5068 } else if (!move_file())
5069 /* we ignore mapcount for file pages */
5070 return NULL;
5071 if (!get_page_unless_zero(page))
5072 return NULL;
5073
5074 return page;
5075}
5076
5077static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5078 unsigned long addr, pte_t ptent, swp_entry_t *entry)
5079{
5080 int usage_count;
5081 struct page *page = NULL;
5082 swp_entry_t ent = pte_to_swp_entry(ptent);
5083
5084 if (!move_anon() || non_swap_entry(ent))
5085 return NULL;
5086 usage_count = mem_cgroup_count_swap_user(ent, &page);
5087 if (usage_count > 1) { /* we don't move shared anon */
5088 if (page)
5089 put_page(page);
5090 return NULL;
5091 }
5092 if (do_swap_account)
5093 entry->val = ent.val;
5094
5095 return page;
5096}
5097
5098static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
5099 unsigned long addr, pte_t ptent, swp_entry_t *entry)
5100{
5101 struct page *page = NULL;
5102 struct inode *inode;
5103 struct address_space *mapping;
5104 pgoff_t pgoff;
5105
5106 if (!vma->vm_file) /* anonymous vma */
5107 return NULL;
5108 if (!move_file())
5109 return NULL;
5110
5111 inode = vma->vm_file->f_path.dentry->d_inode;
5112 mapping = vma->vm_file->f_mapping;
5113 if (pte_none(ptent))
5114 pgoff = linear_page_index(vma, addr);
5115 else /* pte_file(ptent) is true */
5116 pgoff = pte_to_pgoff(ptent);
5117
5118 /* page is moved even if it's not RSS of this task(page-faulted). */
5119 page = find_get_page(mapping, pgoff);
5120
5121#ifdef CONFIG_SWAP
5122 /* shmem/tmpfs may report page out on swap: account for that too. */
5123 if (radix_tree_exceptional_entry(page)) {
5124 swp_entry_t swap = radix_to_swp_entry(page);
5125 if (do_swap_account)
5126 *entry = swap;
5127 page = find_get_page(&swapper_space, swap.val);
5128 }
5129#endif
5130 return page;
5131}
5132
5133static int is_target_pte_for_mc(struct vm_area_struct *vma,
5134 unsigned long addr, pte_t ptent, union mc_target *target)
5135{
5136 struct page *page = NULL;
5137 struct page_cgroup *pc;
5138 int ret = 0;
5139 swp_entry_t ent = { .val = 0 };
5140
5141 if (pte_present(ptent))
5142 page = mc_handle_present_pte(vma, addr, ptent);
5143 else if (is_swap_pte(ptent))
5144 page = mc_handle_swap_pte(vma, addr, ptent, &ent);
5145 else if (pte_none(ptent) || pte_file(ptent))
5146 page = mc_handle_file_pte(vma, addr, ptent, &ent);
5147
5148 if (!page && !ent.val)
5149 return 0;
5150 if (page) {
5151 pc = lookup_page_cgroup(page);
5152 /*
5153 * Do only loose check w/o page_cgroup lock.
5154 * mem_cgroup_move_account() checks the pc is valid or not under
5155 * the lock.
5156 */
5157 if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) {
5158 ret = MC_TARGET_PAGE;
5159 if (target)
5160 target->page = page;
5161 }
5162 if (!ret || !target)
5163 put_page(page);
5164 }
5165 /* There is a swap entry and a page doesn't exist or isn't charged */
5166 if (ent.val && !ret &&
5167 css_id(&mc.from->css) == lookup_swap_cgroup(ent)) {
5168 ret = MC_TARGET_SWAP;
5169 if (target)
5170 target->ent = ent;
5171 }
5172 return ret;
5173}
5174
5175static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
5176 unsigned long addr, unsigned long end,
5177 struct mm_walk *walk)
5178{
5179 struct vm_area_struct *vma = walk->private;
5180 pte_t *pte;
5181 spinlock_t *ptl;
5182
5183 split_huge_page_pmd(walk->mm, pmd);
5184
5185 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
5186 for (; addr != end; pte++, addr += PAGE_SIZE)
5187 if (is_target_pte_for_mc(vma, addr, *pte, NULL))
5188 mc.precharge++; /* increment precharge temporarily */
5189 pte_unmap_unlock(pte - 1, ptl);
5190 cond_resched();
5191
5192 return 0;
5193}
5194
5195static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
5196{
5197 unsigned long precharge;
5198 struct vm_area_struct *vma;
5199
5200 down_read(&mm->mmap_sem);
5201 for (vma = mm->mmap; vma; vma = vma->vm_next) {
5202 struct mm_walk mem_cgroup_count_precharge_walk = {
5203 .pmd_entry = mem_cgroup_count_precharge_pte_range,
5204 .mm = mm,
5205 .private = vma,
5206 };
5207 if (is_vm_hugetlb_page(vma))
5208 continue;
5209 walk_page_range(vma->vm_start, vma->vm_end,
5210 &mem_cgroup_count_precharge_walk);
5211 }
5212 up_read(&mm->mmap_sem);
5213
5214 precharge = mc.precharge;
5215 mc.precharge = 0;
5216
5217 return precharge;
5218}
5219
5220static int mem_cgroup_precharge_mc(struct mm_struct *mm)
5221{
5222 unsigned long precharge = mem_cgroup_count_precharge(mm);
5223
5224 VM_BUG_ON(mc.moving_task);
5225 mc.moving_task = current;
5226 return mem_cgroup_do_precharge(precharge);
5227}
5228
5229/* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
5230static void __mem_cgroup_clear_mc(void)
5231{
5232 struct mem_cgroup *from = mc.from;
5233 struct mem_cgroup *to = mc.to;
5234
5235 /* we must uncharge all the leftover precharges from mc.to */
5236 if (mc.precharge) {
5237 __mem_cgroup_cancel_charge(mc.to, mc.precharge);
5238 mc.precharge = 0;
5239 }
5240 /*
5241 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
5242 * we must uncharge here.
5243 */
5244 if (mc.moved_charge) {
5245 __mem_cgroup_cancel_charge(mc.from, mc.moved_charge);
5246 mc.moved_charge = 0;
5247 }
5248 /* we must fixup refcnts and charges */
5249 if (mc.moved_swap) {
5250 /* uncharge swap account from the old cgroup */
5251 if (!mem_cgroup_is_root(mc.from))
5252 res_counter_uncharge(&mc.from->memsw,
5253 PAGE_SIZE * mc.moved_swap);
5254 __mem_cgroup_put(mc.from, mc.moved_swap);
5255
5256 if (!mem_cgroup_is_root(mc.to)) {
5257 /*
5258 * we charged both to->res and to->memsw, so we should
5259 * uncharge to->res.
5260 */
5261 res_counter_uncharge(&mc.to->res,
5262 PAGE_SIZE * mc.moved_swap);
5263 }
5264 /* we've already done mem_cgroup_get(mc.to) */
5265 mc.moved_swap = 0;
5266 }
5267 memcg_oom_recover(from);
5268 memcg_oom_recover(to);
5269 wake_up_all(&mc.waitq);
5270}
5271
5272static void mem_cgroup_clear_mc(void)
5273{
5274 struct mem_cgroup *from = mc.from;
5275
5276 /*
5277 * we must clear moving_task before waking up waiters at the end of
5278 * task migration.
5279 */
5280 mc.moving_task = NULL;
5281 __mem_cgroup_clear_mc();
5282 spin_lock(&mc.lock);
5283 mc.from = NULL;
5284 mc.to = NULL;
5285 spin_unlock(&mc.lock);
5286 mem_cgroup_end_move(from);
5287}
5288
5289static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
5290 struct cgroup *cgroup,
5291 struct task_struct *p)
5292{
5293 int ret = 0;
5294 struct mem_cgroup *mem = mem_cgroup_from_cont(cgroup);
5295
5296 if (mem->move_charge_at_immigrate) {
5297 struct mm_struct *mm;
5298 struct mem_cgroup *from = mem_cgroup_from_task(p);
5299
5300 VM_BUG_ON(from == mem);
5301
5302 mm = get_task_mm(p);
5303 if (!mm)
5304 return 0;
5305 /* We move charges only when we move a owner of the mm */
5306 if (mm->owner == p) {
5307 VM_BUG_ON(mc.from);
5308 VM_BUG_ON(mc.to);
5309 VM_BUG_ON(mc.precharge);
5310 VM_BUG_ON(mc.moved_charge);
5311 VM_BUG_ON(mc.moved_swap);
5312 mem_cgroup_start_move(from);
5313 spin_lock(&mc.lock);
5314 mc.from = from;
5315 mc.to = mem;
5316 spin_unlock(&mc.lock);
5317 /* We set mc.moving_task later */
5318
5319 ret = mem_cgroup_precharge_mc(mm);
5320 if (ret)
5321 mem_cgroup_clear_mc();
5322 }
5323 mmput(mm);
5324 }
5325 return ret;
5326}
5327
5328static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss,
5329 struct cgroup *cgroup,
5330 struct task_struct *p)
5331{
5332 mem_cgroup_clear_mc();
5333}
5334
5335static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
5336 unsigned long addr, unsigned long end,
5337 struct mm_walk *walk)
5338{
5339 int ret = 0;
5340 struct vm_area_struct *vma = walk->private;
5341 pte_t *pte;
5342 spinlock_t *ptl;
5343
5344 split_huge_page_pmd(walk->mm, pmd);
5345retry:
5346 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
5347 for (; addr != end; addr += PAGE_SIZE) {
5348 pte_t ptent = *(pte++);
5349 union mc_target target;
5350 int type;
5351 struct page *page;
5352 struct page_cgroup *pc;
5353 swp_entry_t ent;
5354
5355 if (!mc.precharge)
5356 break;
5357
5358 type = is_target_pte_for_mc(vma, addr, ptent, &target);
5359 switch (type) {
5360 case MC_TARGET_PAGE:
5361 page = target.page;
5362 if (isolate_lru_page(page))
5363 goto put;
5364 pc = lookup_page_cgroup(page);
5365 if (!mem_cgroup_move_account(page, 1, pc,
5366 mc.from, mc.to, false)) {
5367 mc.precharge--;
5368 /* we uncharge from mc.from later. */
5369 mc.moved_charge++;
5370 }
5371 putback_lru_page(page);
5372put: /* is_target_pte_for_mc() gets the page */
5373 put_page(page);
5374 break;
5375 case MC_TARGET_SWAP:
5376 ent = target.ent;
5377 if (!mem_cgroup_move_swap_account(ent,
5378 mc.from, mc.to, false)) {
5379 mc.precharge--;
5380 /* we fixup refcnts and charges later. */
5381 mc.moved_swap++;
5382 }
5383 break;
5384 default:
5385 break;
5386 }
5387 }
5388 pte_unmap_unlock(pte - 1, ptl);
5389 cond_resched();
5390
5391 if (addr != end) {
5392 /*
5393 * We have consumed all precharges we got in can_attach().
5394 * We try charge one by one, but don't do any additional
5395 * charges to mc.to if we have failed in charge once in attach()
5396 * phase.
5397 */
5398 ret = mem_cgroup_do_precharge(1);
5399 if (!ret)
5400 goto retry;
5401 }
5402
5403 return ret;
5404}
5405
5406static void mem_cgroup_move_charge(struct mm_struct *mm)
5407{
5408 struct vm_area_struct *vma;
5409
5410 lru_add_drain_all();
5411retry:
5412 if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
5413 /*
5414 * Someone who are holding the mmap_sem might be waiting in
5415 * waitq. So we cancel all extra charges, wake up all waiters,
5416 * and retry. Because we cancel precharges, we might not be able
5417 * to move enough charges, but moving charge is a best-effort
5418 * feature anyway, so it wouldn't be a big problem.
5419 */
5420 __mem_cgroup_clear_mc();
5421 cond_resched();
5422 goto retry;
5423 }
5424 for (vma = mm->mmap; vma; vma = vma->vm_next) {
5425 int ret;
5426 struct mm_walk mem_cgroup_move_charge_walk = {
5427 .pmd_entry = mem_cgroup_move_charge_pte_range,
5428 .mm = mm,
5429 .private = vma,
5430 };
5431 if (is_vm_hugetlb_page(vma))
5432 continue;
5433 ret = walk_page_range(vma->vm_start, vma->vm_end,
5434 &mem_cgroup_move_charge_walk);
5435 if (ret)
5436 /*
5437 * means we have consumed all precharges and failed in
5438 * doing additional charge. Just abandon here.
5439 */
5440 break;
5441 }
5442 up_read(&mm->mmap_sem);
5443}
5444
5445static void mem_cgroup_move_task(struct cgroup_subsys *ss,
5446 struct cgroup *cont,
5447 struct cgroup *old_cont,
5448 struct task_struct *p)
5449{
5450 struct mm_struct *mm = get_task_mm(p);
5451
5452 if (mm) {
5453 if (mc.to)
5454 mem_cgroup_move_charge(mm);
5455 put_swap_token(mm);
5456 mmput(mm);
5457 }
5458 if (mc.to)
5459 mem_cgroup_clear_mc();
5460}
5461#else /* !CONFIG_MMU */
5462static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
5463 struct cgroup *cgroup,
5464 struct task_struct *p)
5465{
5466 return 0;
5467}
5468static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss,
5469 struct cgroup *cgroup,
5470 struct task_struct *p)
5471{
5472}
5473static void mem_cgroup_move_task(struct cgroup_subsys *ss,
5474 struct cgroup *cont,
5475 struct cgroup *old_cont,
5476 struct task_struct *p)
5477{
5478}
5479#endif
5480
5481struct cgroup_subsys mem_cgroup_subsys = {
5482 .name = "memory",
5483 .subsys_id = mem_cgroup_subsys_id,
5484 .create = mem_cgroup_create,
5485 .pre_destroy = mem_cgroup_pre_destroy,
5486 .destroy = mem_cgroup_destroy,
5487 .populate = mem_cgroup_populate,
5488 .can_attach = mem_cgroup_can_attach,
5489 .cancel_attach = mem_cgroup_cancel_attach,
5490 .attach = mem_cgroup_move_task,
5491 .early_init = 0,
5492 .use_id = 1,
5493};
5494
5495#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
5496static int __init enable_swap_account(char *s)
5497{
5498 /* consider enabled if no parameter or 1 is given */
5499 if (!strcmp(s, "1"))
5500 really_do_swap_account = 1;
5501 else if (!strcmp(s, "0"))
5502 really_do_swap_account = 0;
5503 return 1;
5504}
5505__setup("swapaccount=", enable_swap_account);
5506
5507#endif
1/* memcontrol.c - Memory Controller
2 *
3 * Copyright IBM Corporation, 2007
4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5 *
6 * Copyright 2007 OpenVZ SWsoft Inc
7 * Author: Pavel Emelianov <xemul@openvz.org>
8 *
9 * Memory thresholds
10 * Copyright (C) 2009 Nokia Corporation
11 * Author: Kirill A. Shutemov
12 *
13 * Kernel Memory Controller
14 * Copyright (C) 2012 Parallels Inc. and Google Inc.
15 * Authors: Glauber Costa and Suleiman Souhlal
16 *
17 * Native page reclaim
18 * Charge lifetime sanitation
19 * Lockless page tracking & accounting
20 * Unified hierarchy configuration model
21 * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner
22 *
23 * This program is free software; you can redistribute it and/or modify
24 * it under the terms of the GNU General Public License as published by
25 * the Free Software Foundation; either version 2 of the License, or
26 * (at your option) any later version.
27 *
28 * This program is distributed in the hope that it will be useful,
29 * but WITHOUT ANY WARRANTY; without even the implied warranty of
30 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
31 * GNU General Public License for more details.
32 */
33
34#include <linux/page_counter.h>
35#include <linux/memcontrol.h>
36#include <linux/cgroup.h>
37#include <linux/mm.h>
38#include <linux/hugetlb.h>
39#include <linux/pagemap.h>
40#include <linux/smp.h>
41#include <linux/page-flags.h>
42#include <linux/backing-dev.h>
43#include <linux/bit_spinlock.h>
44#include <linux/rcupdate.h>
45#include <linux/limits.h>
46#include <linux/export.h>
47#include <linux/mutex.h>
48#include <linux/rbtree.h>
49#include <linux/slab.h>
50#include <linux/swap.h>
51#include <linux/swapops.h>
52#include <linux/spinlock.h>
53#include <linux/eventfd.h>
54#include <linux/poll.h>
55#include <linux/sort.h>
56#include <linux/fs.h>
57#include <linux/seq_file.h>
58#include <linux/vmpressure.h>
59#include <linux/mm_inline.h>
60#include <linux/swap_cgroup.h>
61#include <linux/cpu.h>
62#include <linux/oom.h>
63#include <linux/lockdep.h>
64#include <linux/file.h>
65#include <linux/tracehook.h>
66#include "internal.h"
67#include <net/sock.h>
68#include <net/ip.h>
69#include "slab.h"
70
71#include <linux/uaccess.h>
72
73#include <trace/events/vmscan.h>
74
75struct cgroup_subsys memory_cgrp_subsys __read_mostly;
76EXPORT_SYMBOL(memory_cgrp_subsys);
77
78struct mem_cgroup *root_mem_cgroup __read_mostly;
79
80#define MEM_CGROUP_RECLAIM_RETRIES 5
81
82/* Socket memory accounting disabled? */
83static bool cgroup_memory_nosocket;
84
85/* Kernel memory accounting disabled? */
86static bool cgroup_memory_nokmem;
87
88/* Whether the swap controller is active */
89#ifdef CONFIG_MEMCG_SWAP
90int do_swap_account __read_mostly;
91#else
92#define do_swap_account 0
93#endif
94
95/* Whether legacy memory+swap accounting is active */
96static bool do_memsw_account(void)
97{
98 return !cgroup_subsys_on_dfl(memory_cgrp_subsys) && do_swap_account;
99}
100
101static const char * const mem_cgroup_stat_names[] = {
102 "cache",
103 "rss",
104 "rss_huge",
105 "mapped_file",
106 "dirty",
107 "writeback",
108 "swap",
109};
110
111static const char * const mem_cgroup_events_names[] = {
112 "pgpgin",
113 "pgpgout",
114 "pgfault",
115 "pgmajfault",
116};
117
118static const char * const mem_cgroup_lru_names[] = {
119 "inactive_anon",
120 "active_anon",
121 "inactive_file",
122 "active_file",
123 "unevictable",
124};
125
126#define THRESHOLDS_EVENTS_TARGET 128
127#define SOFTLIMIT_EVENTS_TARGET 1024
128#define NUMAINFO_EVENTS_TARGET 1024
129
130/*
131 * Cgroups above their limits are maintained in a RB-Tree, independent of
132 * their hierarchy representation
133 */
134
135struct mem_cgroup_tree_per_node {
136 struct rb_root rb_root;
137 spinlock_t lock;
138};
139
140struct mem_cgroup_tree {
141 struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
142};
143
144static struct mem_cgroup_tree soft_limit_tree __read_mostly;
145
146/* for OOM */
147struct mem_cgroup_eventfd_list {
148 struct list_head list;
149 struct eventfd_ctx *eventfd;
150};
151
152/*
153 * cgroup_event represents events which userspace want to receive.
154 */
155struct mem_cgroup_event {
156 /*
157 * memcg which the event belongs to.
158 */
159 struct mem_cgroup *memcg;
160 /*
161 * eventfd to signal userspace about the event.
162 */
163 struct eventfd_ctx *eventfd;
164 /*
165 * Each of these stored in a list by the cgroup.
166 */
167 struct list_head list;
168 /*
169 * register_event() callback will be used to add new userspace
170 * waiter for changes related to this event. Use eventfd_signal()
171 * on eventfd to send notification to userspace.
172 */
173 int (*register_event)(struct mem_cgroup *memcg,
174 struct eventfd_ctx *eventfd, const char *args);
175 /*
176 * unregister_event() callback will be called when userspace closes
177 * the eventfd or on cgroup removing. This callback must be set,
178 * if you want provide notification functionality.
179 */
180 void (*unregister_event)(struct mem_cgroup *memcg,
181 struct eventfd_ctx *eventfd);
182 /*
183 * All fields below needed to unregister event when
184 * userspace closes eventfd.
185 */
186 poll_table pt;
187 wait_queue_head_t *wqh;
188 wait_queue_t wait;
189 struct work_struct remove;
190};
191
192static void mem_cgroup_threshold(struct mem_cgroup *memcg);
193static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
194
195/* Stuffs for move charges at task migration. */
196/*
197 * Types of charges to be moved.
198 */
199#define MOVE_ANON 0x1U
200#define MOVE_FILE 0x2U
201#define MOVE_MASK (MOVE_ANON | MOVE_FILE)
202
203/* "mc" and its members are protected by cgroup_mutex */
204static struct move_charge_struct {
205 spinlock_t lock; /* for from, to */
206 struct mm_struct *mm;
207 struct mem_cgroup *from;
208 struct mem_cgroup *to;
209 unsigned long flags;
210 unsigned long precharge;
211 unsigned long moved_charge;
212 unsigned long moved_swap;
213 struct task_struct *moving_task; /* a task moving charges */
214 wait_queue_head_t waitq; /* a waitq for other context */
215} mc = {
216 .lock = __SPIN_LOCK_UNLOCKED(mc.lock),
217 .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
218};
219
220/*
221 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
222 * limit reclaim to prevent infinite loops, if they ever occur.
223 */
224#define MEM_CGROUP_MAX_RECLAIM_LOOPS 100
225#define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2
226
227enum charge_type {
228 MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
229 MEM_CGROUP_CHARGE_TYPE_ANON,
230 MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */
231 MEM_CGROUP_CHARGE_TYPE_DROP, /* a page was unused swap cache */
232 NR_CHARGE_TYPE,
233};
234
235/* for encoding cft->private value on file */
236enum res_type {
237 _MEM,
238 _MEMSWAP,
239 _OOM_TYPE,
240 _KMEM,
241 _TCP,
242};
243
244#define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val))
245#define MEMFILE_TYPE(val) ((val) >> 16 & 0xffff)
246#define MEMFILE_ATTR(val) ((val) & 0xffff)
247/* Used for OOM nofiier */
248#define OOM_CONTROL (0)
249
250/* Some nice accessors for the vmpressure. */
251struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
252{
253 if (!memcg)
254 memcg = root_mem_cgroup;
255 return &memcg->vmpressure;
256}
257
258struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr)
259{
260 return &container_of(vmpr, struct mem_cgroup, vmpressure)->css;
261}
262
263static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
264{
265 return (memcg == root_mem_cgroup);
266}
267
268#ifndef CONFIG_SLOB
269/*
270 * This will be the memcg's index in each cache's ->memcg_params.memcg_caches.
271 * The main reason for not using cgroup id for this:
272 * this works better in sparse environments, where we have a lot of memcgs,
273 * but only a few kmem-limited. Or also, if we have, for instance, 200
274 * memcgs, and none but the 200th is kmem-limited, we'd have to have a
275 * 200 entry array for that.
276 *
277 * The current size of the caches array is stored in memcg_nr_cache_ids. It
278 * will double each time we have to increase it.
279 */
280static DEFINE_IDA(memcg_cache_ida);
281int memcg_nr_cache_ids;
282
283/* Protects memcg_nr_cache_ids */
284static DECLARE_RWSEM(memcg_cache_ids_sem);
285
286void memcg_get_cache_ids(void)
287{
288 down_read(&memcg_cache_ids_sem);
289}
290
291void memcg_put_cache_ids(void)
292{
293 up_read(&memcg_cache_ids_sem);
294}
295
296/*
297 * MIN_SIZE is different than 1, because we would like to avoid going through
298 * the alloc/free process all the time. In a small machine, 4 kmem-limited
299 * cgroups is a reasonable guess. In the future, it could be a parameter or
300 * tunable, but that is strictly not necessary.
301 *
302 * MAX_SIZE should be as large as the number of cgrp_ids. Ideally, we could get
303 * this constant directly from cgroup, but it is understandable that this is
304 * better kept as an internal representation in cgroup.c. In any case, the
305 * cgrp_id space is not getting any smaller, and we don't have to necessarily
306 * increase ours as well if it increases.
307 */
308#define MEMCG_CACHES_MIN_SIZE 4
309#define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX
310
311/*
312 * A lot of the calls to the cache allocation functions are expected to be
313 * inlined by the compiler. Since the calls to memcg_kmem_get_cache are
314 * conditional to this static branch, we'll have to allow modules that does
315 * kmem_cache_alloc and the such to see this symbol as well
316 */
317DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key);
318EXPORT_SYMBOL(memcg_kmem_enabled_key);
319
320#endif /* !CONFIG_SLOB */
321
322/**
323 * mem_cgroup_css_from_page - css of the memcg associated with a page
324 * @page: page of interest
325 *
326 * If memcg is bound to the default hierarchy, css of the memcg associated
327 * with @page is returned. The returned css remains associated with @page
328 * until it is released.
329 *
330 * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup
331 * is returned.
332 */
333struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page)
334{
335 struct mem_cgroup *memcg;
336
337 memcg = page->mem_cgroup;
338
339 if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
340 memcg = root_mem_cgroup;
341
342 return &memcg->css;
343}
344
345/**
346 * page_cgroup_ino - return inode number of the memcg a page is charged to
347 * @page: the page
348 *
349 * Look up the closest online ancestor of the memory cgroup @page is charged to
350 * and return its inode number or 0 if @page is not charged to any cgroup. It
351 * is safe to call this function without holding a reference to @page.
352 *
353 * Note, this function is inherently racy, because there is nothing to prevent
354 * the cgroup inode from getting torn down and potentially reallocated a moment
355 * after page_cgroup_ino() returns, so it only should be used by callers that
356 * do not care (such as procfs interfaces).
357 */
358ino_t page_cgroup_ino(struct page *page)
359{
360 struct mem_cgroup *memcg;
361 unsigned long ino = 0;
362
363 rcu_read_lock();
364 memcg = READ_ONCE(page->mem_cgroup);
365 while (memcg && !(memcg->css.flags & CSS_ONLINE))
366 memcg = parent_mem_cgroup(memcg);
367 if (memcg)
368 ino = cgroup_ino(memcg->css.cgroup);
369 rcu_read_unlock();
370 return ino;
371}
372
373static struct mem_cgroup_per_node *
374mem_cgroup_page_nodeinfo(struct mem_cgroup *memcg, struct page *page)
375{
376 int nid = page_to_nid(page);
377
378 return memcg->nodeinfo[nid];
379}
380
381static struct mem_cgroup_tree_per_node *
382soft_limit_tree_node(int nid)
383{
384 return soft_limit_tree.rb_tree_per_node[nid];
385}
386
387static struct mem_cgroup_tree_per_node *
388soft_limit_tree_from_page(struct page *page)
389{
390 int nid = page_to_nid(page);
391
392 return soft_limit_tree.rb_tree_per_node[nid];
393}
394
395static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_node *mz,
396 struct mem_cgroup_tree_per_node *mctz,
397 unsigned long new_usage_in_excess)
398{
399 struct rb_node **p = &mctz->rb_root.rb_node;
400 struct rb_node *parent = NULL;
401 struct mem_cgroup_per_node *mz_node;
402
403 if (mz->on_tree)
404 return;
405
406 mz->usage_in_excess = new_usage_in_excess;
407 if (!mz->usage_in_excess)
408 return;
409 while (*p) {
410 parent = *p;
411 mz_node = rb_entry(parent, struct mem_cgroup_per_node,
412 tree_node);
413 if (mz->usage_in_excess < mz_node->usage_in_excess)
414 p = &(*p)->rb_left;
415 /*
416 * We can't avoid mem cgroups that are over their soft
417 * limit by the same amount
418 */
419 else if (mz->usage_in_excess >= mz_node->usage_in_excess)
420 p = &(*p)->rb_right;
421 }
422 rb_link_node(&mz->tree_node, parent, p);
423 rb_insert_color(&mz->tree_node, &mctz->rb_root);
424 mz->on_tree = true;
425}
426
427static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
428 struct mem_cgroup_tree_per_node *mctz)
429{
430 if (!mz->on_tree)
431 return;
432 rb_erase(&mz->tree_node, &mctz->rb_root);
433 mz->on_tree = false;
434}
435
436static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
437 struct mem_cgroup_tree_per_node *mctz)
438{
439 unsigned long flags;
440
441 spin_lock_irqsave(&mctz->lock, flags);
442 __mem_cgroup_remove_exceeded(mz, mctz);
443 spin_unlock_irqrestore(&mctz->lock, flags);
444}
445
446static unsigned long soft_limit_excess(struct mem_cgroup *memcg)
447{
448 unsigned long nr_pages = page_counter_read(&memcg->memory);
449 unsigned long soft_limit = READ_ONCE(memcg->soft_limit);
450 unsigned long excess = 0;
451
452 if (nr_pages > soft_limit)
453 excess = nr_pages - soft_limit;
454
455 return excess;
456}
457
458static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
459{
460 unsigned long excess;
461 struct mem_cgroup_per_node *mz;
462 struct mem_cgroup_tree_per_node *mctz;
463
464 mctz = soft_limit_tree_from_page(page);
465 /*
466 * Necessary to update all ancestors when hierarchy is used.
467 * because their event counter is not touched.
468 */
469 for (; memcg; memcg = parent_mem_cgroup(memcg)) {
470 mz = mem_cgroup_page_nodeinfo(memcg, page);
471 excess = soft_limit_excess(memcg);
472 /*
473 * We have to update the tree if mz is on RB-tree or
474 * mem is over its softlimit.
475 */
476 if (excess || mz->on_tree) {
477 unsigned long flags;
478
479 spin_lock_irqsave(&mctz->lock, flags);
480 /* if on-tree, remove it */
481 if (mz->on_tree)
482 __mem_cgroup_remove_exceeded(mz, mctz);
483 /*
484 * Insert again. mz->usage_in_excess will be updated.
485 * If excess is 0, no tree ops.
486 */
487 __mem_cgroup_insert_exceeded(mz, mctz, excess);
488 spin_unlock_irqrestore(&mctz->lock, flags);
489 }
490 }
491}
492
493static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
494{
495 struct mem_cgroup_tree_per_node *mctz;
496 struct mem_cgroup_per_node *mz;
497 int nid;
498
499 for_each_node(nid) {
500 mz = mem_cgroup_nodeinfo(memcg, nid);
501 mctz = soft_limit_tree_node(nid);
502 mem_cgroup_remove_exceeded(mz, mctz);
503 }
504}
505
506static struct mem_cgroup_per_node *
507__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
508{
509 struct rb_node *rightmost = NULL;
510 struct mem_cgroup_per_node *mz;
511
512retry:
513 mz = NULL;
514 rightmost = rb_last(&mctz->rb_root);
515 if (!rightmost)
516 goto done; /* Nothing to reclaim from */
517
518 mz = rb_entry(rightmost, struct mem_cgroup_per_node, tree_node);
519 /*
520 * Remove the node now but someone else can add it back,
521 * we will to add it back at the end of reclaim to its correct
522 * position in the tree.
523 */
524 __mem_cgroup_remove_exceeded(mz, mctz);
525 if (!soft_limit_excess(mz->memcg) ||
526 !css_tryget_online(&mz->memcg->css))
527 goto retry;
528done:
529 return mz;
530}
531
532static struct mem_cgroup_per_node *
533mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
534{
535 struct mem_cgroup_per_node *mz;
536
537 spin_lock_irq(&mctz->lock);
538 mz = __mem_cgroup_largest_soft_limit_node(mctz);
539 spin_unlock_irq(&mctz->lock);
540 return mz;
541}
542
543/*
544 * Return page count for single (non recursive) @memcg.
545 *
546 * Implementation Note: reading percpu statistics for memcg.
547 *
548 * Both of vmstat[] and percpu_counter has threshold and do periodic
549 * synchronization to implement "quick" read. There are trade-off between
550 * reading cost and precision of value. Then, we may have a chance to implement
551 * a periodic synchronization of counter in memcg's counter.
552 *
553 * But this _read() function is used for user interface now. The user accounts
554 * memory usage by memory cgroup and he _always_ requires exact value because
555 * he accounts memory. Even if we provide quick-and-fuzzy read, we always
556 * have to visit all online cpus and make sum. So, for now, unnecessary
557 * synchronization is not implemented. (just implemented for cpu hotplug)
558 *
559 * If there are kernel internal actions which can make use of some not-exact
560 * value, and reading all cpu value can be performance bottleneck in some
561 * common workload, threshold and synchronization as vmstat[] should be
562 * implemented.
563 */
564static unsigned long
565mem_cgroup_read_stat(struct mem_cgroup *memcg, enum mem_cgroup_stat_index idx)
566{
567 long val = 0;
568 int cpu;
569
570 /* Per-cpu values can be negative, use a signed accumulator */
571 for_each_possible_cpu(cpu)
572 val += per_cpu(memcg->stat->count[idx], cpu);
573 /*
574 * Summing races with updates, so val may be negative. Avoid exposing
575 * transient negative values.
576 */
577 if (val < 0)
578 val = 0;
579 return val;
580}
581
582static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg,
583 enum mem_cgroup_events_index idx)
584{
585 unsigned long val = 0;
586 int cpu;
587
588 for_each_possible_cpu(cpu)
589 val += per_cpu(memcg->stat->events[idx], cpu);
590 return val;
591}
592
593static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
594 struct page *page,
595 bool compound, int nr_pages)
596{
597 /*
598 * Here, RSS means 'mapped anon' and anon's SwapCache. Shmem/tmpfs is
599 * counted as CACHE even if it's on ANON LRU.
600 */
601 if (PageAnon(page))
602 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS],
603 nr_pages);
604 else
605 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE],
606 nr_pages);
607
608 if (compound) {
609 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
610 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE],
611 nr_pages);
612 }
613
614 /* pagein of a big page is an event. So, ignore page size */
615 if (nr_pages > 0)
616 __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGIN]);
617 else {
618 __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]);
619 nr_pages = -nr_pages; /* for event */
620 }
621
622 __this_cpu_add(memcg->stat->nr_page_events, nr_pages);
623}
624
625unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
626 int nid, unsigned int lru_mask)
627{
628 struct lruvec *lruvec = mem_cgroup_lruvec(NODE_DATA(nid), memcg);
629 unsigned long nr = 0;
630 enum lru_list lru;
631
632 VM_BUG_ON((unsigned)nid >= nr_node_ids);
633
634 for_each_lru(lru) {
635 if (!(BIT(lru) & lru_mask))
636 continue;
637 nr += mem_cgroup_get_lru_size(lruvec, lru);
638 }
639 return nr;
640}
641
642static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
643 unsigned int lru_mask)
644{
645 unsigned long nr = 0;
646 int nid;
647
648 for_each_node_state(nid, N_MEMORY)
649 nr += mem_cgroup_node_nr_lru_pages(memcg, nid, lru_mask);
650 return nr;
651}
652
653static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
654 enum mem_cgroup_events_target target)
655{
656 unsigned long val, next;
657
658 val = __this_cpu_read(memcg->stat->nr_page_events);
659 next = __this_cpu_read(memcg->stat->targets[target]);
660 /* from time_after() in jiffies.h */
661 if ((long)next - (long)val < 0) {
662 switch (target) {
663 case MEM_CGROUP_TARGET_THRESH:
664 next = val + THRESHOLDS_EVENTS_TARGET;
665 break;
666 case MEM_CGROUP_TARGET_SOFTLIMIT:
667 next = val + SOFTLIMIT_EVENTS_TARGET;
668 break;
669 case MEM_CGROUP_TARGET_NUMAINFO:
670 next = val + NUMAINFO_EVENTS_TARGET;
671 break;
672 default:
673 break;
674 }
675 __this_cpu_write(memcg->stat->targets[target], next);
676 return true;
677 }
678 return false;
679}
680
681/*
682 * Check events in order.
683 *
684 */
685static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
686{
687 /* threshold event is triggered in finer grain than soft limit */
688 if (unlikely(mem_cgroup_event_ratelimit(memcg,
689 MEM_CGROUP_TARGET_THRESH))) {
690 bool do_softlimit;
691 bool do_numainfo __maybe_unused;
692
693 do_softlimit = mem_cgroup_event_ratelimit(memcg,
694 MEM_CGROUP_TARGET_SOFTLIMIT);
695#if MAX_NUMNODES > 1
696 do_numainfo = mem_cgroup_event_ratelimit(memcg,
697 MEM_CGROUP_TARGET_NUMAINFO);
698#endif
699 mem_cgroup_threshold(memcg);
700 if (unlikely(do_softlimit))
701 mem_cgroup_update_tree(memcg, page);
702#if MAX_NUMNODES > 1
703 if (unlikely(do_numainfo))
704 atomic_inc(&memcg->numainfo_events);
705#endif
706 }
707}
708
709struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
710{
711 /*
712 * mm_update_next_owner() may clear mm->owner to NULL
713 * if it races with swapoff, page migration, etc.
714 * So this can be called with p == NULL.
715 */
716 if (unlikely(!p))
717 return NULL;
718
719 return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
720}
721EXPORT_SYMBOL(mem_cgroup_from_task);
722
723static struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
724{
725 struct mem_cgroup *memcg = NULL;
726
727 rcu_read_lock();
728 do {
729 /*
730 * Page cache insertions can happen withou an
731 * actual mm context, e.g. during disk probing
732 * on boot, loopback IO, acct() writes etc.
733 */
734 if (unlikely(!mm))
735 memcg = root_mem_cgroup;
736 else {
737 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
738 if (unlikely(!memcg))
739 memcg = root_mem_cgroup;
740 }
741 } while (!css_tryget_online(&memcg->css));
742 rcu_read_unlock();
743 return memcg;
744}
745
746/**
747 * mem_cgroup_iter - iterate over memory cgroup hierarchy
748 * @root: hierarchy root
749 * @prev: previously returned memcg, NULL on first invocation
750 * @reclaim: cookie for shared reclaim walks, NULL for full walks
751 *
752 * Returns references to children of the hierarchy below @root, or
753 * @root itself, or %NULL after a full round-trip.
754 *
755 * Caller must pass the return value in @prev on subsequent
756 * invocations for reference counting, or use mem_cgroup_iter_break()
757 * to cancel a hierarchy walk before the round-trip is complete.
758 *
759 * Reclaimers can specify a zone and a priority level in @reclaim to
760 * divide up the memcgs in the hierarchy among all concurrent
761 * reclaimers operating on the same zone and priority.
762 */
763struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
764 struct mem_cgroup *prev,
765 struct mem_cgroup_reclaim_cookie *reclaim)
766{
767 struct mem_cgroup_reclaim_iter *uninitialized_var(iter);
768 struct cgroup_subsys_state *css = NULL;
769 struct mem_cgroup *memcg = NULL;
770 struct mem_cgroup *pos = NULL;
771
772 if (mem_cgroup_disabled())
773 return NULL;
774
775 if (!root)
776 root = root_mem_cgroup;
777
778 if (prev && !reclaim)
779 pos = prev;
780
781 if (!root->use_hierarchy && root != root_mem_cgroup) {
782 if (prev)
783 goto out;
784 return root;
785 }
786
787 rcu_read_lock();
788
789 if (reclaim) {
790 struct mem_cgroup_per_node *mz;
791
792 mz = mem_cgroup_nodeinfo(root, reclaim->pgdat->node_id);
793 iter = &mz->iter[reclaim->priority];
794
795 if (prev && reclaim->generation != iter->generation)
796 goto out_unlock;
797
798 while (1) {
799 pos = READ_ONCE(iter->position);
800 if (!pos || css_tryget(&pos->css))
801 break;
802 /*
803 * css reference reached zero, so iter->position will
804 * be cleared by ->css_released. However, we should not
805 * rely on this happening soon, because ->css_released
806 * is called from a work queue, and by busy-waiting we
807 * might block it. So we clear iter->position right
808 * away.
809 */
810 (void)cmpxchg(&iter->position, pos, NULL);
811 }
812 }
813
814 if (pos)
815 css = &pos->css;
816
817 for (;;) {
818 css = css_next_descendant_pre(css, &root->css);
819 if (!css) {
820 /*
821 * Reclaimers share the hierarchy walk, and a
822 * new one might jump in right at the end of
823 * the hierarchy - make sure they see at least
824 * one group and restart from the beginning.
825 */
826 if (!prev)
827 continue;
828 break;
829 }
830
831 /*
832 * Verify the css and acquire a reference. The root
833 * is provided by the caller, so we know it's alive
834 * and kicking, and don't take an extra reference.
835 */
836 memcg = mem_cgroup_from_css(css);
837
838 if (css == &root->css)
839 break;
840
841 if (css_tryget(css))
842 break;
843
844 memcg = NULL;
845 }
846
847 if (reclaim) {
848 /*
849 * The position could have already been updated by a competing
850 * thread, so check that the value hasn't changed since we read
851 * it to avoid reclaiming from the same cgroup twice.
852 */
853 (void)cmpxchg(&iter->position, pos, memcg);
854
855 if (pos)
856 css_put(&pos->css);
857
858 if (!memcg)
859 iter->generation++;
860 else if (!prev)
861 reclaim->generation = iter->generation;
862 }
863
864out_unlock:
865 rcu_read_unlock();
866out:
867 if (prev && prev != root)
868 css_put(&prev->css);
869
870 return memcg;
871}
872
873/**
874 * mem_cgroup_iter_break - abort a hierarchy walk prematurely
875 * @root: hierarchy root
876 * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
877 */
878void mem_cgroup_iter_break(struct mem_cgroup *root,
879 struct mem_cgroup *prev)
880{
881 if (!root)
882 root = root_mem_cgroup;
883 if (prev && prev != root)
884 css_put(&prev->css);
885}
886
887static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
888{
889 struct mem_cgroup *memcg = dead_memcg;
890 struct mem_cgroup_reclaim_iter *iter;
891 struct mem_cgroup_per_node *mz;
892 int nid;
893 int i;
894
895 while ((memcg = parent_mem_cgroup(memcg))) {
896 for_each_node(nid) {
897 mz = mem_cgroup_nodeinfo(memcg, nid);
898 for (i = 0; i <= DEF_PRIORITY; i++) {
899 iter = &mz->iter[i];
900 cmpxchg(&iter->position,
901 dead_memcg, NULL);
902 }
903 }
904 }
905}
906
907/*
908 * Iteration constructs for visiting all cgroups (under a tree). If
909 * loops are exited prematurely (break), mem_cgroup_iter_break() must
910 * be used for reference counting.
911 */
912#define for_each_mem_cgroup_tree(iter, root) \
913 for (iter = mem_cgroup_iter(root, NULL, NULL); \
914 iter != NULL; \
915 iter = mem_cgroup_iter(root, iter, NULL))
916
917#define for_each_mem_cgroup(iter) \
918 for (iter = mem_cgroup_iter(NULL, NULL, NULL); \
919 iter != NULL; \
920 iter = mem_cgroup_iter(NULL, iter, NULL))
921
922/**
923 * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
924 * @memcg: hierarchy root
925 * @fn: function to call for each task
926 * @arg: argument passed to @fn
927 *
928 * This function iterates over tasks attached to @memcg or to any of its
929 * descendants and calls @fn for each task. If @fn returns a non-zero
930 * value, the function breaks the iteration loop and returns the value.
931 * Otherwise, it will iterate over all tasks and return 0.
932 *
933 * This function must not be called for the root memory cgroup.
934 */
935int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
936 int (*fn)(struct task_struct *, void *), void *arg)
937{
938 struct mem_cgroup *iter;
939 int ret = 0;
940
941 BUG_ON(memcg == root_mem_cgroup);
942
943 for_each_mem_cgroup_tree(iter, memcg) {
944 struct css_task_iter it;
945 struct task_struct *task;
946
947 css_task_iter_start(&iter->css, &it);
948 while (!ret && (task = css_task_iter_next(&it)))
949 ret = fn(task, arg);
950 css_task_iter_end(&it);
951 if (ret) {
952 mem_cgroup_iter_break(memcg, iter);
953 break;
954 }
955 }
956 return ret;
957}
958
959/**
960 * mem_cgroup_page_lruvec - return lruvec for isolating/putting an LRU page
961 * @page: the page
962 * @zone: zone of the page
963 *
964 * This function is only safe when following the LRU page isolation
965 * and putback protocol: the LRU lock must be held, and the page must
966 * either be PageLRU() or the caller must have isolated/allocated it.
967 */
968struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct pglist_data *pgdat)
969{
970 struct mem_cgroup_per_node *mz;
971 struct mem_cgroup *memcg;
972 struct lruvec *lruvec;
973
974 if (mem_cgroup_disabled()) {
975 lruvec = &pgdat->lruvec;
976 goto out;
977 }
978
979 memcg = page->mem_cgroup;
980 /*
981 * Swapcache readahead pages are added to the LRU - and
982 * possibly migrated - before they are charged.
983 */
984 if (!memcg)
985 memcg = root_mem_cgroup;
986
987 mz = mem_cgroup_page_nodeinfo(memcg, page);
988 lruvec = &mz->lruvec;
989out:
990 /*
991 * Since a node can be onlined after the mem_cgroup was created,
992 * we have to be prepared to initialize lruvec->zone here;
993 * and if offlined then reonlined, we need to reinitialize it.
994 */
995 if (unlikely(lruvec->pgdat != pgdat))
996 lruvec->pgdat = pgdat;
997 return lruvec;
998}
999
1000/**
1001 * mem_cgroup_update_lru_size - account for adding or removing an lru page
1002 * @lruvec: mem_cgroup per zone lru vector
1003 * @lru: index of lru list the page is sitting on
1004 * @zid: zone id of the accounted pages
1005 * @nr_pages: positive when adding or negative when removing
1006 *
1007 * This function must be called under lru_lock, just before a page is added
1008 * to or just after a page is removed from an lru list (that ordering being
1009 * so as to allow it to check that lru_size 0 is consistent with list_empty).
1010 */
1011void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
1012 int zid, int nr_pages)
1013{
1014 struct mem_cgroup_per_node *mz;
1015 unsigned long *lru_size;
1016 long size;
1017
1018 if (mem_cgroup_disabled())
1019 return;
1020
1021 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
1022 lru_size = &mz->lru_zone_size[zid][lru];
1023
1024 if (nr_pages < 0)
1025 *lru_size += nr_pages;
1026
1027 size = *lru_size;
1028 if (WARN_ONCE(size < 0,
1029 "%s(%p, %d, %d): lru_size %ld\n",
1030 __func__, lruvec, lru, nr_pages, size)) {
1031 VM_BUG_ON(1);
1032 *lru_size = 0;
1033 }
1034
1035 if (nr_pages > 0)
1036 *lru_size += nr_pages;
1037}
1038
1039bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg)
1040{
1041 struct mem_cgroup *task_memcg;
1042 struct task_struct *p;
1043 bool ret;
1044
1045 p = find_lock_task_mm(task);
1046 if (p) {
1047 task_memcg = get_mem_cgroup_from_mm(p->mm);
1048 task_unlock(p);
1049 } else {
1050 /*
1051 * All threads may have already detached their mm's, but the oom
1052 * killer still needs to detect if they have already been oom
1053 * killed to prevent needlessly killing additional tasks.
1054 */
1055 rcu_read_lock();
1056 task_memcg = mem_cgroup_from_task(task);
1057 css_get(&task_memcg->css);
1058 rcu_read_unlock();
1059 }
1060 ret = mem_cgroup_is_descendant(task_memcg, memcg);
1061 css_put(&task_memcg->css);
1062 return ret;
1063}
1064
1065/**
1066 * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1067 * @memcg: the memory cgroup
1068 *
1069 * Returns the maximum amount of memory @mem can be charged with, in
1070 * pages.
1071 */
1072static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
1073{
1074 unsigned long margin = 0;
1075 unsigned long count;
1076 unsigned long limit;
1077
1078 count = page_counter_read(&memcg->memory);
1079 limit = READ_ONCE(memcg->memory.limit);
1080 if (count < limit)
1081 margin = limit - count;
1082
1083 if (do_memsw_account()) {
1084 count = page_counter_read(&memcg->memsw);
1085 limit = READ_ONCE(memcg->memsw.limit);
1086 if (count <= limit)
1087 margin = min(margin, limit - count);
1088 else
1089 margin = 0;
1090 }
1091
1092 return margin;
1093}
1094
1095/*
1096 * A routine for checking "mem" is under move_account() or not.
1097 *
1098 * Checking a cgroup is mc.from or mc.to or under hierarchy of
1099 * moving cgroups. This is for waiting at high-memory pressure
1100 * caused by "move".
1101 */
1102static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
1103{
1104 struct mem_cgroup *from;
1105 struct mem_cgroup *to;
1106 bool ret = false;
1107 /*
1108 * Unlike task_move routines, we access mc.to, mc.from not under
1109 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
1110 */
1111 spin_lock(&mc.lock);
1112 from = mc.from;
1113 to = mc.to;
1114 if (!from)
1115 goto unlock;
1116
1117 ret = mem_cgroup_is_descendant(from, memcg) ||
1118 mem_cgroup_is_descendant(to, memcg);
1119unlock:
1120 spin_unlock(&mc.lock);
1121 return ret;
1122}
1123
1124static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
1125{
1126 if (mc.moving_task && current != mc.moving_task) {
1127 if (mem_cgroup_under_move(memcg)) {
1128 DEFINE_WAIT(wait);
1129 prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1130 /* moving charge context might have finished. */
1131 if (mc.moving_task)
1132 schedule();
1133 finish_wait(&mc.waitq, &wait);
1134 return true;
1135 }
1136 }
1137 return false;
1138}
1139
1140#define K(x) ((x) << (PAGE_SHIFT-10))
1141/**
1142 * mem_cgroup_print_oom_info: Print OOM information relevant to memory controller.
1143 * @memcg: The memory cgroup that went over limit
1144 * @p: Task that is going to be killed
1145 *
1146 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1147 * enabled
1148 */
1149void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
1150{
1151 struct mem_cgroup *iter;
1152 unsigned int i;
1153
1154 rcu_read_lock();
1155
1156 if (p) {
1157 pr_info("Task in ");
1158 pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
1159 pr_cont(" killed as a result of limit of ");
1160 } else {
1161 pr_info("Memory limit reached of cgroup ");
1162 }
1163
1164 pr_cont_cgroup_path(memcg->css.cgroup);
1165 pr_cont("\n");
1166
1167 rcu_read_unlock();
1168
1169 pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
1170 K((u64)page_counter_read(&memcg->memory)),
1171 K((u64)memcg->memory.limit), memcg->memory.failcnt);
1172 pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
1173 K((u64)page_counter_read(&memcg->memsw)),
1174 K((u64)memcg->memsw.limit), memcg->memsw.failcnt);
1175 pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
1176 K((u64)page_counter_read(&memcg->kmem)),
1177 K((u64)memcg->kmem.limit), memcg->kmem.failcnt);
1178
1179 for_each_mem_cgroup_tree(iter, memcg) {
1180 pr_info("Memory cgroup stats for ");
1181 pr_cont_cgroup_path(iter->css.cgroup);
1182 pr_cont(":");
1183
1184 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
1185 if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
1186 continue;
1187 pr_cont(" %s:%luKB", mem_cgroup_stat_names[i],
1188 K(mem_cgroup_read_stat(iter, i)));
1189 }
1190
1191 for (i = 0; i < NR_LRU_LISTS; i++)
1192 pr_cont(" %s:%luKB", mem_cgroup_lru_names[i],
1193 K(mem_cgroup_nr_lru_pages(iter, BIT(i))));
1194
1195 pr_cont("\n");
1196 }
1197}
1198
1199/*
1200 * This function returns the number of memcg under hierarchy tree. Returns
1201 * 1(self count) if no children.
1202 */
1203static int mem_cgroup_count_children(struct mem_cgroup *memcg)
1204{
1205 int num = 0;
1206 struct mem_cgroup *iter;
1207
1208 for_each_mem_cgroup_tree(iter, memcg)
1209 num++;
1210 return num;
1211}
1212
1213/*
1214 * Return the memory (and swap, if configured) limit for a memcg.
1215 */
1216unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg)
1217{
1218 unsigned long limit;
1219
1220 limit = memcg->memory.limit;
1221 if (mem_cgroup_swappiness(memcg)) {
1222 unsigned long memsw_limit;
1223 unsigned long swap_limit;
1224
1225 memsw_limit = memcg->memsw.limit;
1226 swap_limit = memcg->swap.limit;
1227 swap_limit = min(swap_limit, (unsigned long)total_swap_pages);
1228 limit = min(limit + swap_limit, memsw_limit);
1229 }
1230 return limit;
1231}
1232
1233static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
1234 int order)
1235{
1236 struct oom_control oc = {
1237 .zonelist = NULL,
1238 .nodemask = NULL,
1239 .memcg = memcg,
1240 .gfp_mask = gfp_mask,
1241 .order = order,
1242 };
1243 bool ret;
1244
1245 mutex_lock(&oom_lock);
1246 ret = out_of_memory(&oc);
1247 mutex_unlock(&oom_lock);
1248 return ret;
1249}
1250
1251#if MAX_NUMNODES > 1
1252
1253/**
1254 * test_mem_cgroup_node_reclaimable
1255 * @memcg: the target memcg
1256 * @nid: the node ID to be checked.
1257 * @noswap : specify true here if the user wants flle only information.
1258 *
1259 * This function returns whether the specified memcg contains any
1260 * reclaimable pages on a node. Returns true if there are any reclaimable
1261 * pages in the node.
1262 */
1263static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg,
1264 int nid, bool noswap)
1265{
1266 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_FILE))
1267 return true;
1268 if (noswap || !total_swap_pages)
1269 return false;
1270 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_ANON))
1271 return true;
1272 return false;
1273
1274}
1275
1276/*
1277 * Always updating the nodemask is not very good - even if we have an empty
1278 * list or the wrong list here, we can start from some node and traverse all
1279 * nodes based on the zonelist. So update the list loosely once per 10 secs.
1280 *
1281 */
1282static void mem_cgroup_may_update_nodemask(struct mem_cgroup *memcg)
1283{
1284 int nid;
1285 /*
1286 * numainfo_events > 0 means there was at least NUMAINFO_EVENTS_TARGET
1287 * pagein/pageout changes since the last update.
1288 */
1289 if (!atomic_read(&memcg->numainfo_events))
1290 return;
1291 if (atomic_inc_return(&memcg->numainfo_updating) > 1)
1292 return;
1293
1294 /* make a nodemask where this memcg uses memory from */
1295 memcg->scan_nodes = node_states[N_MEMORY];
1296
1297 for_each_node_mask(nid, node_states[N_MEMORY]) {
1298
1299 if (!test_mem_cgroup_node_reclaimable(memcg, nid, false))
1300 node_clear(nid, memcg->scan_nodes);
1301 }
1302
1303 atomic_set(&memcg->numainfo_events, 0);
1304 atomic_set(&memcg->numainfo_updating, 0);
1305}
1306
1307/*
1308 * Selecting a node where we start reclaim from. Because what we need is just
1309 * reducing usage counter, start from anywhere is O,K. Considering
1310 * memory reclaim from current node, there are pros. and cons.
1311 *
1312 * Freeing memory from current node means freeing memory from a node which
1313 * we'll use or we've used. So, it may make LRU bad. And if several threads
1314 * hit limits, it will see a contention on a node. But freeing from remote
1315 * node means more costs for memory reclaim because of memory latency.
1316 *
1317 * Now, we use round-robin. Better algorithm is welcomed.
1318 */
1319int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
1320{
1321 int node;
1322
1323 mem_cgroup_may_update_nodemask(memcg);
1324 node = memcg->last_scanned_node;
1325
1326 node = next_node_in(node, memcg->scan_nodes);
1327 /*
1328 * mem_cgroup_may_update_nodemask might have seen no reclaimmable pages
1329 * last time it really checked all the LRUs due to rate limiting.
1330 * Fallback to the current node in that case for simplicity.
1331 */
1332 if (unlikely(node == MAX_NUMNODES))
1333 node = numa_node_id();
1334
1335 memcg->last_scanned_node = node;
1336 return node;
1337}
1338#else
1339int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
1340{
1341 return 0;
1342}
1343#endif
1344
1345static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
1346 pg_data_t *pgdat,
1347 gfp_t gfp_mask,
1348 unsigned long *total_scanned)
1349{
1350 struct mem_cgroup *victim = NULL;
1351 int total = 0;
1352 int loop = 0;
1353 unsigned long excess;
1354 unsigned long nr_scanned;
1355 struct mem_cgroup_reclaim_cookie reclaim = {
1356 .pgdat = pgdat,
1357 .priority = 0,
1358 };
1359
1360 excess = soft_limit_excess(root_memcg);
1361
1362 while (1) {
1363 victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
1364 if (!victim) {
1365 loop++;
1366 if (loop >= 2) {
1367 /*
1368 * If we have not been able to reclaim
1369 * anything, it might because there are
1370 * no reclaimable pages under this hierarchy
1371 */
1372 if (!total)
1373 break;
1374 /*
1375 * We want to do more targeted reclaim.
1376 * excess >> 2 is not to excessive so as to
1377 * reclaim too much, nor too less that we keep
1378 * coming back to reclaim from this cgroup
1379 */
1380 if (total >= (excess >> 2) ||
1381 (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
1382 break;
1383 }
1384 continue;
1385 }
1386 total += mem_cgroup_shrink_node(victim, gfp_mask, false,
1387 pgdat, &nr_scanned);
1388 *total_scanned += nr_scanned;
1389 if (!soft_limit_excess(root_memcg))
1390 break;
1391 }
1392 mem_cgroup_iter_break(root_memcg, victim);
1393 return total;
1394}
1395
1396#ifdef CONFIG_LOCKDEP
1397static struct lockdep_map memcg_oom_lock_dep_map = {
1398 .name = "memcg_oom_lock",
1399};
1400#endif
1401
1402static DEFINE_SPINLOCK(memcg_oom_lock);
1403
1404/*
1405 * Check OOM-Killer is already running under our hierarchy.
1406 * If someone is running, return false.
1407 */
1408static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
1409{
1410 struct mem_cgroup *iter, *failed = NULL;
1411
1412 spin_lock(&memcg_oom_lock);
1413
1414 for_each_mem_cgroup_tree(iter, memcg) {
1415 if (iter->oom_lock) {
1416 /*
1417 * this subtree of our hierarchy is already locked
1418 * so we cannot give a lock.
1419 */
1420 failed = iter;
1421 mem_cgroup_iter_break(memcg, iter);
1422 break;
1423 } else
1424 iter->oom_lock = true;
1425 }
1426
1427 if (failed) {
1428 /*
1429 * OK, we failed to lock the whole subtree so we have
1430 * to clean up what we set up to the failing subtree
1431 */
1432 for_each_mem_cgroup_tree(iter, memcg) {
1433 if (iter == failed) {
1434 mem_cgroup_iter_break(memcg, iter);
1435 break;
1436 }
1437 iter->oom_lock = false;
1438 }
1439 } else
1440 mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_);
1441
1442 spin_unlock(&memcg_oom_lock);
1443
1444 return !failed;
1445}
1446
1447static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
1448{
1449 struct mem_cgroup *iter;
1450
1451 spin_lock(&memcg_oom_lock);
1452 mutex_release(&memcg_oom_lock_dep_map, 1, _RET_IP_);
1453 for_each_mem_cgroup_tree(iter, memcg)
1454 iter->oom_lock = false;
1455 spin_unlock(&memcg_oom_lock);
1456}
1457
1458static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
1459{
1460 struct mem_cgroup *iter;
1461
1462 spin_lock(&memcg_oom_lock);
1463 for_each_mem_cgroup_tree(iter, memcg)
1464 iter->under_oom++;
1465 spin_unlock(&memcg_oom_lock);
1466}
1467
1468static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
1469{
1470 struct mem_cgroup *iter;
1471
1472 /*
1473 * When a new child is created while the hierarchy is under oom,
1474 * mem_cgroup_oom_lock() may not be called. Watch for underflow.
1475 */
1476 spin_lock(&memcg_oom_lock);
1477 for_each_mem_cgroup_tree(iter, memcg)
1478 if (iter->under_oom > 0)
1479 iter->under_oom--;
1480 spin_unlock(&memcg_oom_lock);
1481}
1482
1483static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
1484
1485struct oom_wait_info {
1486 struct mem_cgroup *memcg;
1487 wait_queue_t wait;
1488};
1489
1490static int memcg_oom_wake_function(wait_queue_t *wait,
1491 unsigned mode, int sync, void *arg)
1492{
1493 struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
1494 struct mem_cgroup *oom_wait_memcg;
1495 struct oom_wait_info *oom_wait_info;
1496
1497 oom_wait_info = container_of(wait, struct oom_wait_info, wait);
1498 oom_wait_memcg = oom_wait_info->memcg;
1499
1500 if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) &&
1501 !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg))
1502 return 0;
1503 return autoremove_wake_function(wait, mode, sync, arg);
1504}
1505
1506static void memcg_oom_recover(struct mem_cgroup *memcg)
1507{
1508 /*
1509 * For the following lockless ->under_oom test, the only required
1510 * guarantee is that it must see the state asserted by an OOM when
1511 * this function is called as a result of userland actions
1512 * triggered by the notification of the OOM. This is trivially
1513 * achieved by invoking mem_cgroup_mark_under_oom() before
1514 * triggering notification.
1515 */
1516 if (memcg && memcg->under_oom)
1517 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
1518}
1519
1520static void mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
1521{
1522 if (!current->memcg_may_oom)
1523 return;
1524 /*
1525 * We are in the middle of the charge context here, so we
1526 * don't want to block when potentially sitting on a callstack
1527 * that holds all kinds of filesystem and mm locks.
1528 *
1529 * Also, the caller may handle a failed allocation gracefully
1530 * (like optional page cache readahead) and so an OOM killer
1531 * invocation might not even be necessary.
1532 *
1533 * That's why we don't do anything here except remember the
1534 * OOM context and then deal with it at the end of the page
1535 * fault when the stack is unwound, the locks are released,
1536 * and when we know whether the fault was overall successful.
1537 */
1538 css_get(&memcg->css);
1539 current->memcg_in_oom = memcg;
1540 current->memcg_oom_gfp_mask = mask;
1541 current->memcg_oom_order = order;
1542}
1543
1544/**
1545 * mem_cgroup_oom_synchronize - complete memcg OOM handling
1546 * @handle: actually kill/wait or just clean up the OOM state
1547 *
1548 * This has to be called at the end of a page fault if the memcg OOM
1549 * handler was enabled.
1550 *
1551 * Memcg supports userspace OOM handling where failed allocations must
1552 * sleep on a waitqueue until the userspace task resolves the
1553 * situation. Sleeping directly in the charge context with all kinds
1554 * of locks held is not a good idea, instead we remember an OOM state
1555 * in the task and mem_cgroup_oom_synchronize() has to be called at
1556 * the end of the page fault to complete the OOM handling.
1557 *
1558 * Returns %true if an ongoing memcg OOM situation was detected and
1559 * completed, %false otherwise.
1560 */
1561bool mem_cgroup_oom_synchronize(bool handle)
1562{
1563 struct mem_cgroup *memcg = current->memcg_in_oom;
1564 struct oom_wait_info owait;
1565 bool locked;
1566
1567 /* OOM is global, do not handle */
1568 if (!memcg)
1569 return false;
1570
1571 if (!handle)
1572 goto cleanup;
1573
1574 owait.memcg = memcg;
1575 owait.wait.flags = 0;
1576 owait.wait.func = memcg_oom_wake_function;
1577 owait.wait.private = current;
1578 INIT_LIST_HEAD(&owait.wait.task_list);
1579
1580 prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
1581 mem_cgroup_mark_under_oom(memcg);
1582
1583 locked = mem_cgroup_oom_trylock(memcg);
1584
1585 if (locked)
1586 mem_cgroup_oom_notify(memcg);
1587
1588 if (locked && !memcg->oom_kill_disable) {
1589 mem_cgroup_unmark_under_oom(memcg);
1590 finish_wait(&memcg_oom_waitq, &owait.wait);
1591 mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask,
1592 current->memcg_oom_order);
1593 } else {
1594 schedule();
1595 mem_cgroup_unmark_under_oom(memcg);
1596 finish_wait(&memcg_oom_waitq, &owait.wait);
1597 }
1598
1599 if (locked) {
1600 mem_cgroup_oom_unlock(memcg);
1601 /*
1602 * There is no guarantee that an OOM-lock contender
1603 * sees the wakeups triggered by the OOM kill
1604 * uncharges. Wake any sleepers explicitely.
1605 */
1606 memcg_oom_recover(memcg);
1607 }
1608cleanup:
1609 current->memcg_in_oom = NULL;
1610 css_put(&memcg->css);
1611 return true;
1612}
1613
1614/**
1615 * lock_page_memcg - lock a page->mem_cgroup binding
1616 * @page: the page
1617 *
1618 * This function protects unlocked LRU pages from being moved to
1619 * another cgroup and stabilizes their page->mem_cgroup binding.
1620 */
1621void lock_page_memcg(struct page *page)
1622{
1623 struct mem_cgroup *memcg;
1624 unsigned long flags;
1625
1626 /*
1627 * The RCU lock is held throughout the transaction. The fast
1628 * path can get away without acquiring the memcg->move_lock
1629 * because page moving starts with an RCU grace period.
1630 */
1631 rcu_read_lock();
1632
1633 if (mem_cgroup_disabled())
1634 return;
1635again:
1636 memcg = page->mem_cgroup;
1637 if (unlikely(!memcg))
1638 return;
1639
1640 if (atomic_read(&memcg->moving_account) <= 0)
1641 return;
1642
1643 spin_lock_irqsave(&memcg->move_lock, flags);
1644 if (memcg != page->mem_cgroup) {
1645 spin_unlock_irqrestore(&memcg->move_lock, flags);
1646 goto again;
1647 }
1648
1649 /*
1650 * When charge migration first begins, we can have locked and
1651 * unlocked page stat updates happening concurrently. Track
1652 * the task who has the lock for unlock_page_memcg().
1653 */
1654 memcg->move_lock_task = current;
1655 memcg->move_lock_flags = flags;
1656
1657 return;
1658}
1659EXPORT_SYMBOL(lock_page_memcg);
1660
1661/**
1662 * unlock_page_memcg - unlock a page->mem_cgroup binding
1663 * @page: the page
1664 */
1665void unlock_page_memcg(struct page *page)
1666{
1667 struct mem_cgroup *memcg = page->mem_cgroup;
1668
1669 if (memcg && memcg->move_lock_task == current) {
1670 unsigned long flags = memcg->move_lock_flags;
1671
1672 memcg->move_lock_task = NULL;
1673 memcg->move_lock_flags = 0;
1674
1675 spin_unlock_irqrestore(&memcg->move_lock, flags);
1676 }
1677
1678 rcu_read_unlock();
1679}
1680EXPORT_SYMBOL(unlock_page_memcg);
1681
1682/*
1683 * size of first charge trial. "32" comes from vmscan.c's magic value.
1684 * TODO: maybe necessary to use big numbers in big irons.
1685 */
1686#define CHARGE_BATCH 32U
1687struct memcg_stock_pcp {
1688 struct mem_cgroup *cached; /* this never be root cgroup */
1689 unsigned int nr_pages;
1690 struct work_struct work;
1691 unsigned long flags;
1692#define FLUSHING_CACHED_CHARGE 0
1693};
1694static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
1695static DEFINE_MUTEX(percpu_charge_mutex);
1696
1697/**
1698 * consume_stock: Try to consume stocked charge on this cpu.
1699 * @memcg: memcg to consume from.
1700 * @nr_pages: how many pages to charge.
1701 *
1702 * The charges will only happen if @memcg matches the current cpu's memcg
1703 * stock, and at least @nr_pages are available in that stock. Failure to
1704 * service an allocation will refill the stock.
1705 *
1706 * returns true if successful, false otherwise.
1707 */
1708static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
1709{
1710 struct memcg_stock_pcp *stock;
1711 unsigned long flags;
1712 bool ret = false;
1713
1714 if (nr_pages > CHARGE_BATCH)
1715 return ret;
1716
1717 local_irq_save(flags);
1718
1719 stock = this_cpu_ptr(&memcg_stock);
1720 if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
1721 stock->nr_pages -= nr_pages;
1722 ret = true;
1723 }
1724
1725 local_irq_restore(flags);
1726
1727 return ret;
1728}
1729
1730/*
1731 * Returns stocks cached in percpu and reset cached information.
1732 */
1733static void drain_stock(struct memcg_stock_pcp *stock)
1734{
1735 struct mem_cgroup *old = stock->cached;
1736
1737 if (stock->nr_pages) {
1738 page_counter_uncharge(&old->memory, stock->nr_pages);
1739 if (do_memsw_account())
1740 page_counter_uncharge(&old->memsw, stock->nr_pages);
1741 css_put_many(&old->css, stock->nr_pages);
1742 stock->nr_pages = 0;
1743 }
1744 stock->cached = NULL;
1745}
1746
1747static void drain_local_stock(struct work_struct *dummy)
1748{
1749 struct memcg_stock_pcp *stock;
1750 unsigned long flags;
1751
1752 local_irq_save(flags);
1753
1754 stock = this_cpu_ptr(&memcg_stock);
1755 drain_stock(stock);
1756 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
1757
1758 local_irq_restore(flags);
1759}
1760
1761/*
1762 * Cache charges(val) to local per_cpu area.
1763 * This will be consumed by consume_stock() function, later.
1764 */
1765static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
1766{
1767 struct memcg_stock_pcp *stock;
1768 unsigned long flags;
1769
1770 local_irq_save(flags);
1771
1772 stock = this_cpu_ptr(&memcg_stock);
1773 if (stock->cached != memcg) { /* reset if necessary */
1774 drain_stock(stock);
1775 stock->cached = memcg;
1776 }
1777 stock->nr_pages += nr_pages;
1778
1779 local_irq_restore(flags);
1780}
1781
1782/*
1783 * Drains all per-CPU charge caches for given root_memcg resp. subtree
1784 * of the hierarchy under it.
1785 */
1786static void drain_all_stock(struct mem_cgroup *root_memcg)
1787{
1788 int cpu, curcpu;
1789
1790 /* If someone's already draining, avoid adding running more workers. */
1791 if (!mutex_trylock(&percpu_charge_mutex))
1792 return;
1793 /* Notify other cpus that system-wide "drain" is running */
1794 get_online_cpus();
1795 curcpu = get_cpu();
1796 for_each_online_cpu(cpu) {
1797 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
1798 struct mem_cgroup *memcg;
1799
1800 memcg = stock->cached;
1801 if (!memcg || !stock->nr_pages)
1802 continue;
1803 if (!mem_cgroup_is_descendant(memcg, root_memcg))
1804 continue;
1805 if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
1806 if (cpu == curcpu)
1807 drain_local_stock(&stock->work);
1808 else
1809 schedule_work_on(cpu, &stock->work);
1810 }
1811 }
1812 put_cpu();
1813 put_online_cpus();
1814 mutex_unlock(&percpu_charge_mutex);
1815}
1816
1817static int memcg_hotplug_cpu_dead(unsigned int cpu)
1818{
1819 struct memcg_stock_pcp *stock;
1820
1821 stock = &per_cpu(memcg_stock, cpu);
1822 drain_stock(stock);
1823 return 0;
1824}
1825
1826static void reclaim_high(struct mem_cgroup *memcg,
1827 unsigned int nr_pages,
1828 gfp_t gfp_mask)
1829{
1830 do {
1831 if (page_counter_read(&memcg->memory) <= memcg->high)
1832 continue;
1833 mem_cgroup_events(memcg, MEMCG_HIGH, 1);
1834 try_to_free_mem_cgroup_pages(memcg, nr_pages, gfp_mask, true);
1835 } while ((memcg = parent_mem_cgroup(memcg)));
1836}
1837
1838static void high_work_func(struct work_struct *work)
1839{
1840 struct mem_cgroup *memcg;
1841
1842 memcg = container_of(work, struct mem_cgroup, high_work);
1843 reclaim_high(memcg, CHARGE_BATCH, GFP_KERNEL);
1844}
1845
1846/*
1847 * Scheduled by try_charge() to be executed from the userland return path
1848 * and reclaims memory over the high limit.
1849 */
1850void mem_cgroup_handle_over_high(void)
1851{
1852 unsigned int nr_pages = current->memcg_nr_pages_over_high;
1853 struct mem_cgroup *memcg;
1854
1855 if (likely(!nr_pages))
1856 return;
1857
1858 memcg = get_mem_cgroup_from_mm(current->mm);
1859 reclaim_high(memcg, nr_pages, GFP_KERNEL);
1860 css_put(&memcg->css);
1861 current->memcg_nr_pages_over_high = 0;
1862}
1863
1864static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
1865 unsigned int nr_pages)
1866{
1867 unsigned int batch = max(CHARGE_BATCH, nr_pages);
1868 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
1869 struct mem_cgroup *mem_over_limit;
1870 struct page_counter *counter;
1871 unsigned long nr_reclaimed;
1872 bool may_swap = true;
1873 bool drained = false;
1874
1875 if (mem_cgroup_is_root(memcg))
1876 return 0;
1877retry:
1878 if (consume_stock(memcg, nr_pages))
1879 return 0;
1880
1881 if (!do_memsw_account() ||
1882 page_counter_try_charge(&memcg->memsw, batch, &counter)) {
1883 if (page_counter_try_charge(&memcg->memory, batch, &counter))
1884 goto done_restock;
1885 if (do_memsw_account())
1886 page_counter_uncharge(&memcg->memsw, batch);
1887 mem_over_limit = mem_cgroup_from_counter(counter, memory);
1888 } else {
1889 mem_over_limit = mem_cgroup_from_counter(counter, memsw);
1890 may_swap = false;
1891 }
1892
1893 if (batch > nr_pages) {
1894 batch = nr_pages;
1895 goto retry;
1896 }
1897
1898 /*
1899 * Unlike in global OOM situations, memcg is not in a physical
1900 * memory shortage. Allow dying and OOM-killed tasks to
1901 * bypass the last charges so that they can exit quickly and
1902 * free their memory.
1903 */
1904 if (unlikely(test_thread_flag(TIF_MEMDIE) ||
1905 fatal_signal_pending(current) ||
1906 current->flags & PF_EXITING))
1907 goto force;
1908
1909 /*
1910 * Prevent unbounded recursion when reclaim operations need to
1911 * allocate memory. This might exceed the limits temporarily,
1912 * but we prefer facilitating memory reclaim and getting back
1913 * under the limit over triggering OOM kills in these cases.
1914 */
1915 if (unlikely(current->flags & PF_MEMALLOC))
1916 goto force;
1917
1918 if (unlikely(task_in_memcg_oom(current)))
1919 goto nomem;
1920
1921 if (!gfpflags_allow_blocking(gfp_mask))
1922 goto nomem;
1923
1924 mem_cgroup_events(mem_over_limit, MEMCG_MAX, 1);
1925
1926 nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
1927 gfp_mask, may_swap);
1928
1929 if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
1930 goto retry;
1931
1932 if (!drained) {
1933 drain_all_stock(mem_over_limit);
1934 drained = true;
1935 goto retry;
1936 }
1937
1938 if (gfp_mask & __GFP_NORETRY)
1939 goto nomem;
1940 /*
1941 * Even though the limit is exceeded at this point, reclaim
1942 * may have been able to free some pages. Retry the charge
1943 * before killing the task.
1944 *
1945 * Only for regular pages, though: huge pages are rather
1946 * unlikely to succeed so close to the limit, and we fall back
1947 * to regular pages anyway in case of failure.
1948 */
1949 if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))
1950 goto retry;
1951 /*
1952 * At task move, charge accounts can be doubly counted. So, it's
1953 * better to wait until the end of task_move if something is going on.
1954 */
1955 if (mem_cgroup_wait_acct_move(mem_over_limit))
1956 goto retry;
1957
1958 if (nr_retries--)
1959 goto retry;
1960
1961 if (gfp_mask & __GFP_NOFAIL)
1962 goto force;
1963
1964 if (fatal_signal_pending(current))
1965 goto force;
1966
1967 mem_cgroup_events(mem_over_limit, MEMCG_OOM, 1);
1968
1969 mem_cgroup_oom(mem_over_limit, gfp_mask,
1970 get_order(nr_pages * PAGE_SIZE));
1971nomem:
1972 if (!(gfp_mask & __GFP_NOFAIL))
1973 return -ENOMEM;
1974force:
1975 /*
1976 * The allocation either can't fail or will lead to more memory
1977 * being freed very soon. Allow memory usage go over the limit
1978 * temporarily by force charging it.
1979 */
1980 page_counter_charge(&memcg->memory, nr_pages);
1981 if (do_memsw_account())
1982 page_counter_charge(&memcg->memsw, nr_pages);
1983 css_get_many(&memcg->css, nr_pages);
1984
1985 return 0;
1986
1987done_restock:
1988 css_get_many(&memcg->css, batch);
1989 if (batch > nr_pages)
1990 refill_stock(memcg, batch - nr_pages);
1991
1992 /*
1993 * If the hierarchy is above the normal consumption range, schedule
1994 * reclaim on returning to userland. We can perform reclaim here
1995 * if __GFP_RECLAIM but let's always punt for simplicity and so that
1996 * GFP_KERNEL can consistently be used during reclaim. @memcg is
1997 * not recorded as it most likely matches current's and won't
1998 * change in the meantime. As high limit is checked again before
1999 * reclaim, the cost of mismatch is negligible.
2000 */
2001 do {
2002 if (page_counter_read(&memcg->memory) > memcg->high) {
2003 /* Don't bother a random interrupted task */
2004 if (in_interrupt()) {
2005 schedule_work(&memcg->high_work);
2006 break;
2007 }
2008 current->memcg_nr_pages_over_high += batch;
2009 set_notify_resume(current);
2010 break;
2011 }
2012 } while ((memcg = parent_mem_cgroup(memcg)));
2013
2014 return 0;
2015}
2016
2017static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
2018{
2019 if (mem_cgroup_is_root(memcg))
2020 return;
2021
2022 page_counter_uncharge(&memcg->memory, nr_pages);
2023 if (do_memsw_account())
2024 page_counter_uncharge(&memcg->memsw, nr_pages);
2025
2026 css_put_many(&memcg->css, nr_pages);
2027}
2028
2029static void lock_page_lru(struct page *page, int *isolated)
2030{
2031 struct zone *zone = page_zone(page);
2032
2033 spin_lock_irq(zone_lru_lock(zone));
2034 if (PageLRU(page)) {
2035 struct lruvec *lruvec;
2036
2037 lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat);
2038 ClearPageLRU(page);
2039 del_page_from_lru_list(page, lruvec, page_lru(page));
2040 *isolated = 1;
2041 } else
2042 *isolated = 0;
2043}
2044
2045static void unlock_page_lru(struct page *page, int isolated)
2046{
2047 struct zone *zone = page_zone(page);
2048
2049 if (isolated) {
2050 struct lruvec *lruvec;
2051
2052 lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat);
2053 VM_BUG_ON_PAGE(PageLRU(page), page);
2054 SetPageLRU(page);
2055 add_page_to_lru_list(page, lruvec, page_lru(page));
2056 }
2057 spin_unlock_irq(zone_lru_lock(zone));
2058}
2059
2060static void commit_charge(struct page *page, struct mem_cgroup *memcg,
2061 bool lrucare)
2062{
2063 int isolated;
2064
2065 VM_BUG_ON_PAGE(page->mem_cgroup, page);
2066
2067 /*
2068 * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page
2069 * may already be on some other mem_cgroup's LRU. Take care of it.
2070 */
2071 if (lrucare)
2072 lock_page_lru(page, &isolated);
2073
2074 /*
2075 * Nobody should be changing or seriously looking at
2076 * page->mem_cgroup at this point:
2077 *
2078 * - the page is uncharged
2079 *
2080 * - the page is off-LRU
2081 *
2082 * - an anonymous fault has exclusive page access, except for
2083 * a locked page table
2084 *
2085 * - a page cache insertion, a swapin fault, or a migration
2086 * have the page locked
2087 */
2088 page->mem_cgroup = memcg;
2089
2090 if (lrucare)
2091 unlock_page_lru(page, isolated);
2092}
2093
2094#ifndef CONFIG_SLOB
2095static int memcg_alloc_cache_id(void)
2096{
2097 int id, size;
2098 int err;
2099
2100 id = ida_simple_get(&memcg_cache_ida,
2101 0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL);
2102 if (id < 0)
2103 return id;
2104
2105 if (id < memcg_nr_cache_ids)
2106 return id;
2107
2108 /*
2109 * There's no space for the new id in memcg_caches arrays,
2110 * so we have to grow them.
2111 */
2112 down_write(&memcg_cache_ids_sem);
2113
2114 size = 2 * (id + 1);
2115 if (size < MEMCG_CACHES_MIN_SIZE)
2116 size = MEMCG_CACHES_MIN_SIZE;
2117 else if (size > MEMCG_CACHES_MAX_SIZE)
2118 size = MEMCG_CACHES_MAX_SIZE;
2119
2120 err = memcg_update_all_caches(size);
2121 if (!err)
2122 err = memcg_update_all_list_lrus(size);
2123 if (!err)
2124 memcg_nr_cache_ids = size;
2125
2126 up_write(&memcg_cache_ids_sem);
2127
2128 if (err) {
2129 ida_simple_remove(&memcg_cache_ida, id);
2130 return err;
2131 }
2132 return id;
2133}
2134
2135static void memcg_free_cache_id(int id)
2136{
2137 ida_simple_remove(&memcg_cache_ida, id);
2138}
2139
2140struct memcg_kmem_cache_create_work {
2141 struct mem_cgroup *memcg;
2142 struct kmem_cache *cachep;
2143 struct work_struct work;
2144};
2145
2146static struct workqueue_struct *memcg_kmem_cache_create_wq;
2147
2148static void memcg_kmem_cache_create_func(struct work_struct *w)
2149{
2150 struct memcg_kmem_cache_create_work *cw =
2151 container_of(w, struct memcg_kmem_cache_create_work, work);
2152 struct mem_cgroup *memcg = cw->memcg;
2153 struct kmem_cache *cachep = cw->cachep;
2154
2155 memcg_create_kmem_cache(memcg, cachep);
2156
2157 css_put(&memcg->css);
2158 kfree(cw);
2159}
2160
2161/*
2162 * Enqueue the creation of a per-memcg kmem_cache.
2163 */
2164static void __memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
2165 struct kmem_cache *cachep)
2166{
2167 struct memcg_kmem_cache_create_work *cw;
2168
2169 cw = kmalloc(sizeof(*cw), GFP_NOWAIT);
2170 if (!cw)
2171 return;
2172
2173 css_get(&memcg->css);
2174
2175 cw->memcg = memcg;
2176 cw->cachep = cachep;
2177 INIT_WORK(&cw->work, memcg_kmem_cache_create_func);
2178
2179 queue_work(memcg_kmem_cache_create_wq, &cw->work);
2180}
2181
2182static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
2183 struct kmem_cache *cachep)
2184{
2185 /*
2186 * We need to stop accounting when we kmalloc, because if the
2187 * corresponding kmalloc cache is not yet created, the first allocation
2188 * in __memcg_schedule_kmem_cache_create will recurse.
2189 *
2190 * However, it is better to enclose the whole function. Depending on
2191 * the debugging options enabled, INIT_WORK(), for instance, can
2192 * trigger an allocation. This too, will make us recurse. Because at
2193 * this point we can't allow ourselves back into memcg_kmem_get_cache,
2194 * the safest choice is to do it like this, wrapping the whole function.
2195 */
2196 current->memcg_kmem_skip_account = 1;
2197 __memcg_schedule_kmem_cache_create(memcg, cachep);
2198 current->memcg_kmem_skip_account = 0;
2199}
2200
2201static inline bool memcg_kmem_bypass(void)
2202{
2203 if (in_interrupt() || !current->mm || (current->flags & PF_KTHREAD))
2204 return true;
2205 return false;
2206}
2207
2208/**
2209 * memcg_kmem_get_cache: select the correct per-memcg cache for allocation
2210 * @cachep: the original global kmem cache
2211 *
2212 * Return the kmem_cache we're supposed to use for a slab allocation.
2213 * We try to use the current memcg's version of the cache.
2214 *
2215 * If the cache does not exist yet, if we are the first user of it, we
2216 * create it asynchronously in a workqueue and let the current allocation
2217 * go through with the original cache.
2218 *
2219 * This function takes a reference to the cache it returns to assure it
2220 * won't get destroyed while we are working with it. Once the caller is
2221 * done with it, memcg_kmem_put_cache() must be called to release the
2222 * reference.
2223 */
2224struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep)
2225{
2226 struct mem_cgroup *memcg;
2227 struct kmem_cache *memcg_cachep;
2228 int kmemcg_id;
2229
2230 VM_BUG_ON(!is_root_cache(cachep));
2231
2232 if (memcg_kmem_bypass())
2233 return cachep;
2234
2235 if (current->memcg_kmem_skip_account)
2236 return cachep;
2237
2238 memcg = get_mem_cgroup_from_mm(current->mm);
2239 kmemcg_id = READ_ONCE(memcg->kmemcg_id);
2240 if (kmemcg_id < 0)
2241 goto out;
2242
2243 memcg_cachep = cache_from_memcg_idx(cachep, kmemcg_id);
2244 if (likely(memcg_cachep))
2245 return memcg_cachep;
2246
2247 /*
2248 * If we are in a safe context (can wait, and not in interrupt
2249 * context), we could be be predictable and return right away.
2250 * This would guarantee that the allocation being performed
2251 * already belongs in the new cache.
2252 *
2253 * However, there are some clashes that can arrive from locking.
2254 * For instance, because we acquire the slab_mutex while doing
2255 * memcg_create_kmem_cache, this means no further allocation
2256 * could happen with the slab_mutex held. So it's better to
2257 * defer everything.
2258 */
2259 memcg_schedule_kmem_cache_create(memcg, cachep);
2260out:
2261 css_put(&memcg->css);
2262 return cachep;
2263}
2264
2265/**
2266 * memcg_kmem_put_cache: drop reference taken by memcg_kmem_get_cache
2267 * @cachep: the cache returned by memcg_kmem_get_cache
2268 */
2269void memcg_kmem_put_cache(struct kmem_cache *cachep)
2270{
2271 if (!is_root_cache(cachep))
2272 css_put(&cachep->memcg_params.memcg->css);
2273}
2274
2275/**
2276 * memcg_kmem_charge: charge a kmem page
2277 * @page: page to charge
2278 * @gfp: reclaim mode
2279 * @order: allocation order
2280 * @memcg: memory cgroup to charge
2281 *
2282 * Returns 0 on success, an error code on failure.
2283 */
2284int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
2285 struct mem_cgroup *memcg)
2286{
2287 unsigned int nr_pages = 1 << order;
2288 struct page_counter *counter;
2289 int ret;
2290
2291 ret = try_charge(memcg, gfp, nr_pages);
2292 if (ret)
2293 return ret;
2294
2295 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) &&
2296 !page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) {
2297 cancel_charge(memcg, nr_pages);
2298 return -ENOMEM;
2299 }
2300
2301 page->mem_cgroup = memcg;
2302
2303 return 0;
2304}
2305
2306/**
2307 * memcg_kmem_charge: charge a kmem page to the current memory cgroup
2308 * @page: page to charge
2309 * @gfp: reclaim mode
2310 * @order: allocation order
2311 *
2312 * Returns 0 on success, an error code on failure.
2313 */
2314int memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
2315{
2316 struct mem_cgroup *memcg;
2317 int ret = 0;
2318
2319 if (memcg_kmem_bypass())
2320 return 0;
2321
2322 memcg = get_mem_cgroup_from_mm(current->mm);
2323 if (!mem_cgroup_is_root(memcg)) {
2324 ret = memcg_kmem_charge_memcg(page, gfp, order, memcg);
2325 if (!ret)
2326 __SetPageKmemcg(page);
2327 }
2328 css_put(&memcg->css);
2329 return ret;
2330}
2331/**
2332 * memcg_kmem_uncharge: uncharge a kmem page
2333 * @page: page to uncharge
2334 * @order: allocation order
2335 */
2336void memcg_kmem_uncharge(struct page *page, int order)
2337{
2338 struct mem_cgroup *memcg = page->mem_cgroup;
2339 unsigned int nr_pages = 1 << order;
2340
2341 if (!memcg)
2342 return;
2343
2344 VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page);
2345
2346 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
2347 page_counter_uncharge(&memcg->kmem, nr_pages);
2348
2349 page_counter_uncharge(&memcg->memory, nr_pages);
2350 if (do_memsw_account())
2351 page_counter_uncharge(&memcg->memsw, nr_pages);
2352
2353 page->mem_cgroup = NULL;
2354
2355 /* slab pages do not have PageKmemcg flag set */
2356 if (PageKmemcg(page))
2357 __ClearPageKmemcg(page);
2358
2359 css_put_many(&memcg->css, nr_pages);
2360}
2361#endif /* !CONFIG_SLOB */
2362
2363#ifdef CONFIG_TRANSPARENT_HUGEPAGE
2364
2365/*
2366 * Because tail pages are not marked as "used", set it. We're under
2367 * zone_lru_lock and migration entries setup in all page mappings.
2368 */
2369void mem_cgroup_split_huge_fixup(struct page *head)
2370{
2371 int i;
2372
2373 if (mem_cgroup_disabled())
2374 return;
2375
2376 for (i = 1; i < HPAGE_PMD_NR; i++)
2377 head[i].mem_cgroup = head->mem_cgroup;
2378
2379 __this_cpu_sub(head->mem_cgroup->stat->count[MEM_CGROUP_STAT_RSS_HUGE],
2380 HPAGE_PMD_NR);
2381}
2382#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
2383
2384#ifdef CONFIG_MEMCG_SWAP
2385static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg,
2386 bool charge)
2387{
2388 int val = (charge) ? 1 : -1;
2389 this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_SWAP], val);
2390}
2391
2392/**
2393 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
2394 * @entry: swap entry to be moved
2395 * @from: mem_cgroup which the entry is moved from
2396 * @to: mem_cgroup which the entry is moved to
2397 *
2398 * It succeeds only when the swap_cgroup's record for this entry is the same
2399 * as the mem_cgroup's id of @from.
2400 *
2401 * Returns 0 on success, -EINVAL on failure.
2402 *
2403 * The caller must have charged to @to, IOW, called page_counter_charge() about
2404 * both res and memsw, and called css_get().
2405 */
2406static int mem_cgroup_move_swap_account(swp_entry_t entry,
2407 struct mem_cgroup *from, struct mem_cgroup *to)
2408{
2409 unsigned short old_id, new_id;
2410
2411 old_id = mem_cgroup_id(from);
2412 new_id = mem_cgroup_id(to);
2413
2414 if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
2415 mem_cgroup_swap_statistics(from, false);
2416 mem_cgroup_swap_statistics(to, true);
2417 return 0;
2418 }
2419 return -EINVAL;
2420}
2421#else
2422static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
2423 struct mem_cgroup *from, struct mem_cgroup *to)
2424{
2425 return -EINVAL;
2426}
2427#endif
2428
2429static DEFINE_MUTEX(memcg_limit_mutex);
2430
2431static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
2432 unsigned long limit)
2433{
2434 unsigned long curusage;
2435 unsigned long oldusage;
2436 bool enlarge = false;
2437 int retry_count;
2438 int ret;
2439
2440 /*
2441 * For keeping hierarchical_reclaim simple, how long we should retry
2442 * is depends on callers. We set our retry-count to be function
2443 * of # of children which we should visit in this loop.
2444 */
2445 retry_count = MEM_CGROUP_RECLAIM_RETRIES *
2446 mem_cgroup_count_children(memcg);
2447
2448 oldusage = page_counter_read(&memcg->memory);
2449
2450 do {
2451 if (signal_pending(current)) {
2452 ret = -EINTR;
2453 break;
2454 }
2455
2456 mutex_lock(&memcg_limit_mutex);
2457 if (limit > memcg->memsw.limit) {
2458 mutex_unlock(&memcg_limit_mutex);
2459 ret = -EINVAL;
2460 break;
2461 }
2462 if (limit > memcg->memory.limit)
2463 enlarge = true;
2464 ret = page_counter_limit(&memcg->memory, limit);
2465 mutex_unlock(&memcg_limit_mutex);
2466
2467 if (!ret)
2468 break;
2469
2470 try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, true);
2471
2472 curusage = page_counter_read(&memcg->memory);
2473 /* Usage is reduced ? */
2474 if (curusage >= oldusage)
2475 retry_count--;
2476 else
2477 oldusage = curusage;
2478 } while (retry_count);
2479
2480 if (!ret && enlarge)
2481 memcg_oom_recover(memcg);
2482
2483 return ret;
2484}
2485
2486static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
2487 unsigned long limit)
2488{
2489 unsigned long curusage;
2490 unsigned long oldusage;
2491 bool enlarge = false;
2492 int retry_count;
2493 int ret;
2494
2495 /* see mem_cgroup_resize_res_limit */
2496 retry_count = MEM_CGROUP_RECLAIM_RETRIES *
2497 mem_cgroup_count_children(memcg);
2498
2499 oldusage = page_counter_read(&memcg->memsw);
2500
2501 do {
2502 if (signal_pending(current)) {
2503 ret = -EINTR;
2504 break;
2505 }
2506
2507 mutex_lock(&memcg_limit_mutex);
2508 if (limit < memcg->memory.limit) {
2509 mutex_unlock(&memcg_limit_mutex);
2510 ret = -EINVAL;
2511 break;
2512 }
2513 if (limit > memcg->memsw.limit)
2514 enlarge = true;
2515 ret = page_counter_limit(&memcg->memsw, limit);
2516 mutex_unlock(&memcg_limit_mutex);
2517
2518 if (!ret)
2519 break;
2520
2521 try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, false);
2522
2523 curusage = page_counter_read(&memcg->memsw);
2524 /* Usage is reduced ? */
2525 if (curusage >= oldusage)
2526 retry_count--;
2527 else
2528 oldusage = curusage;
2529 } while (retry_count);
2530
2531 if (!ret && enlarge)
2532 memcg_oom_recover(memcg);
2533
2534 return ret;
2535}
2536
2537unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
2538 gfp_t gfp_mask,
2539 unsigned long *total_scanned)
2540{
2541 unsigned long nr_reclaimed = 0;
2542 struct mem_cgroup_per_node *mz, *next_mz = NULL;
2543 unsigned long reclaimed;
2544 int loop = 0;
2545 struct mem_cgroup_tree_per_node *mctz;
2546 unsigned long excess;
2547 unsigned long nr_scanned;
2548
2549 if (order > 0)
2550 return 0;
2551
2552 mctz = soft_limit_tree_node(pgdat->node_id);
2553
2554 /*
2555 * Do not even bother to check the largest node if the root
2556 * is empty. Do it lockless to prevent lock bouncing. Races
2557 * are acceptable as soft limit is best effort anyway.
2558 */
2559 if (RB_EMPTY_ROOT(&mctz->rb_root))
2560 return 0;
2561
2562 /*
2563 * This loop can run a while, specially if mem_cgroup's continuously
2564 * keep exceeding their soft limit and putting the system under
2565 * pressure
2566 */
2567 do {
2568 if (next_mz)
2569 mz = next_mz;
2570 else
2571 mz = mem_cgroup_largest_soft_limit_node(mctz);
2572 if (!mz)
2573 break;
2574
2575 nr_scanned = 0;
2576 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat,
2577 gfp_mask, &nr_scanned);
2578 nr_reclaimed += reclaimed;
2579 *total_scanned += nr_scanned;
2580 spin_lock_irq(&mctz->lock);
2581 __mem_cgroup_remove_exceeded(mz, mctz);
2582
2583 /*
2584 * If we failed to reclaim anything from this memory cgroup
2585 * it is time to move on to the next cgroup
2586 */
2587 next_mz = NULL;
2588 if (!reclaimed)
2589 next_mz = __mem_cgroup_largest_soft_limit_node(mctz);
2590
2591 excess = soft_limit_excess(mz->memcg);
2592 /*
2593 * One school of thought says that we should not add
2594 * back the node to the tree if reclaim returns 0.
2595 * But our reclaim could return 0, simply because due
2596 * to priority we are exposing a smaller subset of
2597 * memory to reclaim from. Consider this as a longer
2598 * term TODO.
2599 */
2600 /* If excess == 0, no tree ops */
2601 __mem_cgroup_insert_exceeded(mz, mctz, excess);
2602 spin_unlock_irq(&mctz->lock);
2603 css_put(&mz->memcg->css);
2604 loop++;
2605 /*
2606 * Could not reclaim anything and there are no more
2607 * mem cgroups to try or we seem to be looping without
2608 * reclaiming anything.
2609 */
2610 if (!nr_reclaimed &&
2611 (next_mz == NULL ||
2612 loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
2613 break;
2614 } while (!nr_reclaimed);
2615 if (next_mz)
2616 css_put(&next_mz->memcg->css);
2617 return nr_reclaimed;
2618}
2619
2620/*
2621 * Test whether @memcg has children, dead or alive. Note that this
2622 * function doesn't care whether @memcg has use_hierarchy enabled and
2623 * returns %true if there are child csses according to the cgroup
2624 * hierarchy. Testing use_hierarchy is the caller's responsiblity.
2625 */
2626static inline bool memcg_has_children(struct mem_cgroup *memcg)
2627{
2628 bool ret;
2629
2630 rcu_read_lock();
2631 ret = css_next_child(NULL, &memcg->css);
2632 rcu_read_unlock();
2633 return ret;
2634}
2635
2636/*
2637 * Reclaims as many pages from the given memcg as possible.
2638 *
2639 * Caller is responsible for holding css reference for memcg.
2640 */
2641static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
2642{
2643 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
2644
2645 /* we call try-to-free pages for make this cgroup empty */
2646 lru_add_drain_all();
2647 /* try to free all pages in this cgroup */
2648 while (nr_retries && page_counter_read(&memcg->memory)) {
2649 int progress;
2650
2651 if (signal_pending(current))
2652 return -EINTR;
2653
2654 progress = try_to_free_mem_cgroup_pages(memcg, 1,
2655 GFP_KERNEL, true);
2656 if (!progress) {
2657 nr_retries--;
2658 /* maybe some writeback is necessary */
2659 congestion_wait(BLK_RW_ASYNC, HZ/10);
2660 }
2661
2662 }
2663
2664 return 0;
2665}
2666
2667static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of,
2668 char *buf, size_t nbytes,
2669 loff_t off)
2670{
2671 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
2672
2673 if (mem_cgroup_is_root(memcg))
2674 return -EINVAL;
2675 return mem_cgroup_force_empty(memcg) ?: nbytes;
2676}
2677
2678static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css,
2679 struct cftype *cft)
2680{
2681 return mem_cgroup_from_css(css)->use_hierarchy;
2682}
2683
2684static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
2685 struct cftype *cft, u64 val)
2686{
2687 int retval = 0;
2688 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
2689 struct mem_cgroup *parent_memcg = mem_cgroup_from_css(memcg->css.parent);
2690
2691 if (memcg->use_hierarchy == val)
2692 return 0;
2693
2694 /*
2695 * If parent's use_hierarchy is set, we can't make any modifications
2696 * in the child subtrees. If it is unset, then the change can
2697 * occur, provided the current cgroup has no children.
2698 *
2699 * For the root cgroup, parent_mem is NULL, we allow value to be
2700 * set if there are no children.
2701 */
2702 if ((!parent_memcg || !parent_memcg->use_hierarchy) &&
2703 (val == 1 || val == 0)) {
2704 if (!memcg_has_children(memcg))
2705 memcg->use_hierarchy = val;
2706 else
2707 retval = -EBUSY;
2708 } else
2709 retval = -EINVAL;
2710
2711 return retval;
2712}
2713
2714static void tree_stat(struct mem_cgroup *memcg, unsigned long *stat)
2715{
2716 struct mem_cgroup *iter;
2717 int i;
2718
2719 memset(stat, 0, sizeof(*stat) * MEMCG_NR_STAT);
2720
2721 for_each_mem_cgroup_tree(iter, memcg) {
2722 for (i = 0; i < MEMCG_NR_STAT; i++)
2723 stat[i] += mem_cgroup_read_stat(iter, i);
2724 }
2725}
2726
2727static void tree_events(struct mem_cgroup *memcg, unsigned long *events)
2728{
2729 struct mem_cgroup *iter;
2730 int i;
2731
2732 memset(events, 0, sizeof(*events) * MEMCG_NR_EVENTS);
2733
2734 for_each_mem_cgroup_tree(iter, memcg) {
2735 for (i = 0; i < MEMCG_NR_EVENTS; i++)
2736 events[i] += mem_cgroup_read_events(iter, i);
2737 }
2738}
2739
2740static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
2741{
2742 unsigned long val = 0;
2743
2744 if (mem_cgroup_is_root(memcg)) {
2745 struct mem_cgroup *iter;
2746
2747 for_each_mem_cgroup_tree(iter, memcg) {
2748 val += mem_cgroup_read_stat(iter,
2749 MEM_CGROUP_STAT_CACHE);
2750 val += mem_cgroup_read_stat(iter,
2751 MEM_CGROUP_STAT_RSS);
2752 if (swap)
2753 val += mem_cgroup_read_stat(iter,
2754 MEM_CGROUP_STAT_SWAP);
2755 }
2756 } else {
2757 if (!swap)
2758 val = page_counter_read(&memcg->memory);
2759 else
2760 val = page_counter_read(&memcg->memsw);
2761 }
2762 return val;
2763}
2764
2765enum {
2766 RES_USAGE,
2767 RES_LIMIT,
2768 RES_MAX_USAGE,
2769 RES_FAILCNT,
2770 RES_SOFT_LIMIT,
2771};
2772
2773static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
2774 struct cftype *cft)
2775{
2776 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
2777 struct page_counter *counter;
2778
2779 switch (MEMFILE_TYPE(cft->private)) {
2780 case _MEM:
2781 counter = &memcg->memory;
2782 break;
2783 case _MEMSWAP:
2784 counter = &memcg->memsw;
2785 break;
2786 case _KMEM:
2787 counter = &memcg->kmem;
2788 break;
2789 case _TCP:
2790 counter = &memcg->tcpmem;
2791 break;
2792 default:
2793 BUG();
2794 }
2795
2796 switch (MEMFILE_ATTR(cft->private)) {
2797 case RES_USAGE:
2798 if (counter == &memcg->memory)
2799 return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE;
2800 if (counter == &memcg->memsw)
2801 return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE;
2802 return (u64)page_counter_read(counter) * PAGE_SIZE;
2803 case RES_LIMIT:
2804 return (u64)counter->limit * PAGE_SIZE;
2805 case RES_MAX_USAGE:
2806 return (u64)counter->watermark * PAGE_SIZE;
2807 case RES_FAILCNT:
2808 return counter->failcnt;
2809 case RES_SOFT_LIMIT:
2810 return (u64)memcg->soft_limit * PAGE_SIZE;
2811 default:
2812 BUG();
2813 }
2814}
2815
2816#ifndef CONFIG_SLOB
2817static int memcg_online_kmem(struct mem_cgroup *memcg)
2818{
2819 int memcg_id;
2820
2821 if (cgroup_memory_nokmem)
2822 return 0;
2823
2824 BUG_ON(memcg->kmemcg_id >= 0);
2825 BUG_ON(memcg->kmem_state);
2826
2827 memcg_id = memcg_alloc_cache_id();
2828 if (memcg_id < 0)
2829 return memcg_id;
2830
2831 static_branch_inc(&memcg_kmem_enabled_key);
2832 /*
2833 * A memory cgroup is considered kmem-online as soon as it gets
2834 * kmemcg_id. Setting the id after enabling static branching will
2835 * guarantee no one starts accounting before all call sites are
2836 * patched.
2837 */
2838 memcg->kmemcg_id = memcg_id;
2839 memcg->kmem_state = KMEM_ONLINE;
2840
2841 return 0;
2842}
2843
2844static void memcg_offline_kmem(struct mem_cgroup *memcg)
2845{
2846 struct cgroup_subsys_state *css;
2847 struct mem_cgroup *parent, *child;
2848 int kmemcg_id;
2849
2850 if (memcg->kmem_state != KMEM_ONLINE)
2851 return;
2852 /*
2853 * Clear the online state before clearing memcg_caches array
2854 * entries. The slab_mutex in memcg_deactivate_kmem_caches()
2855 * guarantees that no cache will be created for this cgroup
2856 * after we are done (see memcg_create_kmem_cache()).
2857 */
2858 memcg->kmem_state = KMEM_ALLOCATED;
2859
2860 memcg_deactivate_kmem_caches(memcg);
2861
2862 kmemcg_id = memcg->kmemcg_id;
2863 BUG_ON(kmemcg_id < 0);
2864
2865 parent = parent_mem_cgroup(memcg);
2866 if (!parent)
2867 parent = root_mem_cgroup;
2868
2869 /*
2870 * Change kmemcg_id of this cgroup and all its descendants to the
2871 * parent's id, and then move all entries from this cgroup's list_lrus
2872 * to ones of the parent. After we have finished, all list_lrus
2873 * corresponding to this cgroup are guaranteed to remain empty. The
2874 * ordering is imposed by list_lru_node->lock taken by
2875 * memcg_drain_all_list_lrus().
2876 */
2877 rcu_read_lock(); /* can be called from css_free w/o cgroup_mutex */
2878 css_for_each_descendant_pre(css, &memcg->css) {
2879 child = mem_cgroup_from_css(css);
2880 BUG_ON(child->kmemcg_id != kmemcg_id);
2881 child->kmemcg_id = parent->kmemcg_id;
2882 if (!memcg->use_hierarchy)
2883 break;
2884 }
2885 rcu_read_unlock();
2886
2887 memcg_drain_all_list_lrus(kmemcg_id, parent->kmemcg_id);
2888
2889 memcg_free_cache_id(kmemcg_id);
2890}
2891
2892static void memcg_free_kmem(struct mem_cgroup *memcg)
2893{
2894 /* css_alloc() failed, offlining didn't happen */
2895 if (unlikely(memcg->kmem_state == KMEM_ONLINE))
2896 memcg_offline_kmem(memcg);
2897
2898 if (memcg->kmem_state == KMEM_ALLOCATED) {
2899 memcg_destroy_kmem_caches(memcg);
2900 static_branch_dec(&memcg_kmem_enabled_key);
2901 WARN_ON(page_counter_read(&memcg->kmem));
2902 }
2903}
2904#else
2905static int memcg_online_kmem(struct mem_cgroup *memcg)
2906{
2907 return 0;
2908}
2909static void memcg_offline_kmem(struct mem_cgroup *memcg)
2910{
2911}
2912static void memcg_free_kmem(struct mem_cgroup *memcg)
2913{
2914}
2915#endif /* !CONFIG_SLOB */
2916
2917static int memcg_update_kmem_limit(struct mem_cgroup *memcg,
2918 unsigned long limit)
2919{
2920 int ret;
2921
2922 mutex_lock(&memcg_limit_mutex);
2923 ret = page_counter_limit(&memcg->kmem, limit);
2924 mutex_unlock(&memcg_limit_mutex);
2925 return ret;
2926}
2927
2928static int memcg_update_tcp_limit(struct mem_cgroup *memcg, unsigned long limit)
2929{
2930 int ret;
2931
2932 mutex_lock(&memcg_limit_mutex);
2933
2934 ret = page_counter_limit(&memcg->tcpmem, limit);
2935 if (ret)
2936 goto out;
2937
2938 if (!memcg->tcpmem_active) {
2939 /*
2940 * The active flag needs to be written after the static_key
2941 * update. This is what guarantees that the socket activation
2942 * function is the last one to run. See mem_cgroup_sk_alloc()
2943 * for details, and note that we don't mark any socket as
2944 * belonging to this memcg until that flag is up.
2945 *
2946 * We need to do this, because static_keys will span multiple
2947 * sites, but we can't control their order. If we mark a socket
2948 * as accounted, but the accounting functions are not patched in
2949 * yet, we'll lose accounting.
2950 *
2951 * We never race with the readers in mem_cgroup_sk_alloc(),
2952 * because when this value change, the code to process it is not
2953 * patched in yet.
2954 */
2955 static_branch_inc(&memcg_sockets_enabled_key);
2956 memcg->tcpmem_active = true;
2957 }
2958out:
2959 mutex_unlock(&memcg_limit_mutex);
2960 return ret;
2961}
2962
2963/*
2964 * The user of this function is...
2965 * RES_LIMIT.
2966 */
2967static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
2968 char *buf, size_t nbytes, loff_t off)
2969{
2970 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
2971 unsigned long nr_pages;
2972 int ret;
2973
2974 buf = strstrip(buf);
2975 ret = page_counter_memparse(buf, "-1", &nr_pages);
2976 if (ret)
2977 return ret;
2978
2979 switch (MEMFILE_ATTR(of_cft(of)->private)) {
2980 case RES_LIMIT:
2981 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
2982 ret = -EINVAL;
2983 break;
2984 }
2985 switch (MEMFILE_TYPE(of_cft(of)->private)) {
2986 case _MEM:
2987 ret = mem_cgroup_resize_limit(memcg, nr_pages);
2988 break;
2989 case _MEMSWAP:
2990 ret = mem_cgroup_resize_memsw_limit(memcg, nr_pages);
2991 break;
2992 case _KMEM:
2993 ret = memcg_update_kmem_limit(memcg, nr_pages);
2994 break;
2995 case _TCP:
2996 ret = memcg_update_tcp_limit(memcg, nr_pages);
2997 break;
2998 }
2999 break;
3000 case RES_SOFT_LIMIT:
3001 memcg->soft_limit = nr_pages;
3002 ret = 0;
3003 break;
3004 }
3005 return ret ?: nbytes;
3006}
3007
3008static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf,
3009 size_t nbytes, loff_t off)
3010{
3011 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3012 struct page_counter *counter;
3013
3014 switch (MEMFILE_TYPE(of_cft(of)->private)) {
3015 case _MEM:
3016 counter = &memcg->memory;
3017 break;
3018 case _MEMSWAP:
3019 counter = &memcg->memsw;
3020 break;
3021 case _KMEM:
3022 counter = &memcg->kmem;
3023 break;
3024 case _TCP:
3025 counter = &memcg->tcpmem;
3026 break;
3027 default:
3028 BUG();
3029 }
3030
3031 switch (MEMFILE_ATTR(of_cft(of)->private)) {
3032 case RES_MAX_USAGE:
3033 page_counter_reset_watermark(counter);
3034 break;
3035 case RES_FAILCNT:
3036 counter->failcnt = 0;
3037 break;
3038 default:
3039 BUG();
3040 }
3041
3042 return nbytes;
3043}
3044
3045static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css,
3046 struct cftype *cft)
3047{
3048 return mem_cgroup_from_css(css)->move_charge_at_immigrate;
3049}
3050
3051#ifdef CONFIG_MMU
3052static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3053 struct cftype *cft, u64 val)
3054{
3055 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3056
3057 if (val & ~MOVE_MASK)
3058 return -EINVAL;
3059
3060 /*
3061 * No kind of locking is needed in here, because ->can_attach() will
3062 * check this value once in the beginning of the process, and then carry
3063 * on with stale data. This means that changes to this value will only
3064 * affect task migrations starting after the change.
3065 */
3066 memcg->move_charge_at_immigrate = val;
3067 return 0;
3068}
3069#else
3070static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3071 struct cftype *cft, u64 val)
3072{
3073 return -ENOSYS;
3074}
3075#endif
3076
3077#ifdef CONFIG_NUMA
3078static int memcg_numa_stat_show(struct seq_file *m, void *v)
3079{
3080 struct numa_stat {
3081 const char *name;
3082 unsigned int lru_mask;
3083 };
3084
3085 static const struct numa_stat stats[] = {
3086 { "total", LRU_ALL },
3087 { "file", LRU_ALL_FILE },
3088 { "anon", LRU_ALL_ANON },
3089 { "unevictable", BIT(LRU_UNEVICTABLE) },
3090 };
3091 const struct numa_stat *stat;
3092 int nid;
3093 unsigned long nr;
3094 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
3095
3096 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3097 nr = mem_cgroup_nr_lru_pages(memcg, stat->lru_mask);
3098 seq_printf(m, "%s=%lu", stat->name, nr);
3099 for_each_node_state(nid, N_MEMORY) {
3100 nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
3101 stat->lru_mask);
3102 seq_printf(m, " N%d=%lu", nid, nr);
3103 }
3104 seq_putc(m, '\n');
3105 }
3106
3107 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3108 struct mem_cgroup *iter;
3109
3110 nr = 0;
3111 for_each_mem_cgroup_tree(iter, memcg)
3112 nr += mem_cgroup_nr_lru_pages(iter, stat->lru_mask);
3113 seq_printf(m, "hierarchical_%s=%lu", stat->name, nr);
3114 for_each_node_state(nid, N_MEMORY) {
3115 nr = 0;
3116 for_each_mem_cgroup_tree(iter, memcg)
3117 nr += mem_cgroup_node_nr_lru_pages(
3118 iter, nid, stat->lru_mask);
3119 seq_printf(m, " N%d=%lu", nid, nr);
3120 }
3121 seq_putc(m, '\n');
3122 }
3123
3124 return 0;
3125}
3126#endif /* CONFIG_NUMA */
3127
3128static int memcg_stat_show(struct seq_file *m, void *v)
3129{
3130 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
3131 unsigned long memory, memsw;
3132 struct mem_cgroup *mi;
3133 unsigned int i;
3134
3135 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_stat_names) !=
3136 MEM_CGROUP_STAT_NSTATS);
3137 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_events_names) !=
3138 MEM_CGROUP_EVENTS_NSTATS);
3139 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS);
3140
3141 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
3142 if (i == MEM_CGROUP_STAT_SWAP && !do_memsw_account())
3143 continue;
3144 seq_printf(m, "%s %lu\n", mem_cgroup_stat_names[i],
3145 mem_cgroup_read_stat(memcg, i) * PAGE_SIZE);
3146 }
3147
3148 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++)
3149 seq_printf(m, "%s %lu\n", mem_cgroup_events_names[i],
3150 mem_cgroup_read_events(memcg, i));
3151
3152 for (i = 0; i < NR_LRU_LISTS; i++)
3153 seq_printf(m, "%s %lu\n", mem_cgroup_lru_names[i],
3154 mem_cgroup_nr_lru_pages(memcg, BIT(i)) * PAGE_SIZE);
3155
3156 /* Hierarchical information */
3157 memory = memsw = PAGE_COUNTER_MAX;
3158 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) {
3159 memory = min(memory, mi->memory.limit);
3160 memsw = min(memsw, mi->memsw.limit);
3161 }
3162 seq_printf(m, "hierarchical_memory_limit %llu\n",
3163 (u64)memory * PAGE_SIZE);
3164 if (do_memsw_account())
3165 seq_printf(m, "hierarchical_memsw_limit %llu\n",
3166 (u64)memsw * PAGE_SIZE);
3167
3168 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
3169 unsigned long long val = 0;
3170
3171 if (i == MEM_CGROUP_STAT_SWAP && !do_memsw_account())
3172 continue;
3173 for_each_mem_cgroup_tree(mi, memcg)
3174 val += mem_cgroup_read_stat(mi, i) * PAGE_SIZE;
3175 seq_printf(m, "total_%s %llu\n", mem_cgroup_stat_names[i], val);
3176 }
3177
3178 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
3179 unsigned long long val = 0;
3180
3181 for_each_mem_cgroup_tree(mi, memcg)
3182 val += mem_cgroup_read_events(mi, i);
3183 seq_printf(m, "total_%s %llu\n",
3184 mem_cgroup_events_names[i], val);
3185 }
3186
3187 for (i = 0; i < NR_LRU_LISTS; i++) {
3188 unsigned long long val = 0;
3189
3190 for_each_mem_cgroup_tree(mi, memcg)
3191 val += mem_cgroup_nr_lru_pages(mi, BIT(i)) * PAGE_SIZE;
3192 seq_printf(m, "total_%s %llu\n", mem_cgroup_lru_names[i], val);
3193 }
3194
3195#ifdef CONFIG_DEBUG_VM
3196 {
3197 pg_data_t *pgdat;
3198 struct mem_cgroup_per_node *mz;
3199 struct zone_reclaim_stat *rstat;
3200 unsigned long recent_rotated[2] = {0, 0};
3201 unsigned long recent_scanned[2] = {0, 0};
3202
3203 for_each_online_pgdat(pgdat) {
3204 mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
3205 rstat = &mz->lruvec.reclaim_stat;
3206
3207 recent_rotated[0] += rstat->recent_rotated[0];
3208 recent_rotated[1] += rstat->recent_rotated[1];
3209 recent_scanned[0] += rstat->recent_scanned[0];
3210 recent_scanned[1] += rstat->recent_scanned[1];
3211 }
3212 seq_printf(m, "recent_rotated_anon %lu\n", recent_rotated[0]);
3213 seq_printf(m, "recent_rotated_file %lu\n", recent_rotated[1]);
3214 seq_printf(m, "recent_scanned_anon %lu\n", recent_scanned[0]);
3215 seq_printf(m, "recent_scanned_file %lu\n", recent_scanned[1]);
3216 }
3217#endif
3218
3219 return 0;
3220}
3221
3222static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css,
3223 struct cftype *cft)
3224{
3225 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3226
3227 return mem_cgroup_swappiness(memcg);
3228}
3229
3230static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
3231 struct cftype *cft, u64 val)
3232{
3233 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3234
3235 if (val > 100)
3236 return -EINVAL;
3237
3238 if (css->parent)
3239 memcg->swappiness = val;
3240 else
3241 vm_swappiness = val;
3242
3243 return 0;
3244}
3245
3246static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
3247{
3248 struct mem_cgroup_threshold_ary *t;
3249 unsigned long usage;
3250 int i;
3251
3252 rcu_read_lock();
3253 if (!swap)
3254 t = rcu_dereference(memcg->thresholds.primary);
3255 else
3256 t = rcu_dereference(memcg->memsw_thresholds.primary);
3257
3258 if (!t)
3259 goto unlock;
3260
3261 usage = mem_cgroup_usage(memcg, swap);
3262
3263 /*
3264 * current_threshold points to threshold just below or equal to usage.
3265 * If it's not true, a threshold was crossed after last
3266 * call of __mem_cgroup_threshold().
3267 */
3268 i = t->current_threshold;
3269
3270 /*
3271 * Iterate backward over array of thresholds starting from
3272 * current_threshold and check if a threshold is crossed.
3273 * If none of thresholds below usage is crossed, we read
3274 * only one element of the array here.
3275 */
3276 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
3277 eventfd_signal(t->entries[i].eventfd, 1);
3278
3279 /* i = current_threshold + 1 */
3280 i++;
3281
3282 /*
3283 * Iterate forward over array of thresholds starting from
3284 * current_threshold+1 and check if a threshold is crossed.
3285 * If none of thresholds above usage is crossed, we read
3286 * only one element of the array here.
3287 */
3288 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
3289 eventfd_signal(t->entries[i].eventfd, 1);
3290
3291 /* Update current_threshold */
3292 t->current_threshold = i - 1;
3293unlock:
3294 rcu_read_unlock();
3295}
3296
3297static void mem_cgroup_threshold(struct mem_cgroup *memcg)
3298{
3299 while (memcg) {
3300 __mem_cgroup_threshold(memcg, false);
3301 if (do_memsw_account())
3302 __mem_cgroup_threshold(memcg, true);
3303
3304 memcg = parent_mem_cgroup(memcg);
3305 }
3306}
3307
3308static int compare_thresholds(const void *a, const void *b)
3309{
3310 const struct mem_cgroup_threshold *_a = a;
3311 const struct mem_cgroup_threshold *_b = b;
3312
3313 if (_a->threshold > _b->threshold)
3314 return 1;
3315
3316 if (_a->threshold < _b->threshold)
3317 return -1;
3318
3319 return 0;
3320}
3321
3322static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
3323{
3324 struct mem_cgroup_eventfd_list *ev;
3325
3326 spin_lock(&memcg_oom_lock);
3327
3328 list_for_each_entry(ev, &memcg->oom_notify, list)
3329 eventfd_signal(ev->eventfd, 1);
3330
3331 spin_unlock(&memcg_oom_lock);
3332 return 0;
3333}
3334
3335static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
3336{
3337 struct mem_cgroup *iter;
3338
3339 for_each_mem_cgroup_tree(iter, memcg)
3340 mem_cgroup_oom_notify_cb(iter);
3341}
3342
3343static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
3344 struct eventfd_ctx *eventfd, const char *args, enum res_type type)
3345{
3346 struct mem_cgroup_thresholds *thresholds;
3347 struct mem_cgroup_threshold_ary *new;
3348 unsigned long threshold;
3349 unsigned long usage;
3350 int i, size, ret;
3351
3352 ret = page_counter_memparse(args, "-1", &threshold);
3353 if (ret)
3354 return ret;
3355
3356 mutex_lock(&memcg->thresholds_lock);
3357
3358 if (type == _MEM) {
3359 thresholds = &memcg->thresholds;
3360 usage = mem_cgroup_usage(memcg, false);
3361 } else if (type == _MEMSWAP) {
3362 thresholds = &memcg->memsw_thresholds;
3363 usage = mem_cgroup_usage(memcg, true);
3364 } else
3365 BUG();
3366
3367 /* Check if a threshold crossed before adding a new one */
3368 if (thresholds->primary)
3369 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
3370
3371 size = thresholds->primary ? thresholds->primary->size + 1 : 1;
3372
3373 /* Allocate memory for new array of thresholds */
3374 new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold),
3375 GFP_KERNEL);
3376 if (!new) {
3377 ret = -ENOMEM;
3378 goto unlock;
3379 }
3380 new->size = size;
3381
3382 /* Copy thresholds (if any) to new array */
3383 if (thresholds->primary) {
3384 memcpy(new->entries, thresholds->primary->entries, (size - 1) *
3385 sizeof(struct mem_cgroup_threshold));
3386 }
3387
3388 /* Add new threshold */
3389 new->entries[size - 1].eventfd = eventfd;
3390 new->entries[size - 1].threshold = threshold;
3391
3392 /* Sort thresholds. Registering of new threshold isn't time-critical */
3393 sort(new->entries, size, sizeof(struct mem_cgroup_threshold),
3394 compare_thresholds, NULL);
3395
3396 /* Find current threshold */
3397 new->current_threshold = -1;
3398 for (i = 0; i < size; i++) {
3399 if (new->entries[i].threshold <= usage) {
3400 /*
3401 * new->current_threshold will not be used until
3402 * rcu_assign_pointer(), so it's safe to increment
3403 * it here.
3404 */
3405 ++new->current_threshold;
3406 } else
3407 break;
3408 }
3409
3410 /* Free old spare buffer and save old primary buffer as spare */
3411 kfree(thresholds->spare);
3412 thresholds->spare = thresholds->primary;
3413
3414 rcu_assign_pointer(thresholds->primary, new);
3415
3416 /* To be sure that nobody uses thresholds */
3417 synchronize_rcu();
3418
3419unlock:
3420 mutex_unlock(&memcg->thresholds_lock);
3421
3422 return ret;
3423}
3424
3425static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
3426 struct eventfd_ctx *eventfd, const char *args)
3427{
3428 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM);
3429}
3430
3431static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg,
3432 struct eventfd_ctx *eventfd, const char *args)
3433{
3434 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP);
3435}
3436
3437static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
3438 struct eventfd_ctx *eventfd, enum res_type type)
3439{
3440 struct mem_cgroup_thresholds *thresholds;
3441 struct mem_cgroup_threshold_ary *new;
3442 unsigned long usage;
3443 int i, j, size;
3444
3445 mutex_lock(&memcg->thresholds_lock);
3446
3447 if (type == _MEM) {
3448 thresholds = &memcg->thresholds;
3449 usage = mem_cgroup_usage(memcg, false);
3450 } else if (type == _MEMSWAP) {
3451 thresholds = &memcg->memsw_thresholds;
3452 usage = mem_cgroup_usage(memcg, true);
3453 } else
3454 BUG();
3455
3456 if (!thresholds->primary)
3457 goto unlock;
3458
3459 /* Check if a threshold crossed before removing */
3460 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
3461
3462 /* Calculate new number of threshold */
3463 size = 0;
3464 for (i = 0; i < thresholds->primary->size; i++) {
3465 if (thresholds->primary->entries[i].eventfd != eventfd)
3466 size++;
3467 }
3468
3469 new = thresholds->spare;
3470
3471 /* Set thresholds array to NULL if we don't have thresholds */
3472 if (!size) {
3473 kfree(new);
3474 new = NULL;
3475 goto swap_buffers;
3476 }
3477
3478 new->size = size;
3479
3480 /* Copy thresholds and find current threshold */
3481 new->current_threshold = -1;
3482 for (i = 0, j = 0; i < thresholds->primary->size; i++) {
3483 if (thresholds->primary->entries[i].eventfd == eventfd)
3484 continue;
3485
3486 new->entries[j] = thresholds->primary->entries[i];
3487 if (new->entries[j].threshold <= usage) {
3488 /*
3489 * new->current_threshold will not be used
3490 * until rcu_assign_pointer(), so it's safe to increment
3491 * it here.
3492 */
3493 ++new->current_threshold;
3494 }
3495 j++;
3496 }
3497
3498swap_buffers:
3499 /* Swap primary and spare array */
3500 thresholds->spare = thresholds->primary;
3501
3502 rcu_assign_pointer(thresholds->primary, new);
3503
3504 /* To be sure that nobody uses thresholds */
3505 synchronize_rcu();
3506
3507 /* If all events are unregistered, free the spare array */
3508 if (!new) {
3509 kfree(thresholds->spare);
3510 thresholds->spare = NULL;
3511 }
3512unlock:
3513 mutex_unlock(&memcg->thresholds_lock);
3514}
3515
3516static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
3517 struct eventfd_ctx *eventfd)
3518{
3519 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM);
3520}
3521
3522static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
3523 struct eventfd_ctx *eventfd)
3524{
3525 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP);
3526}
3527
3528static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg,
3529 struct eventfd_ctx *eventfd, const char *args)
3530{
3531 struct mem_cgroup_eventfd_list *event;
3532
3533 event = kmalloc(sizeof(*event), GFP_KERNEL);
3534 if (!event)
3535 return -ENOMEM;
3536
3537 spin_lock(&memcg_oom_lock);
3538
3539 event->eventfd = eventfd;
3540 list_add(&event->list, &memcg->oom_notify);
3541
3542 /* already in OOM ? */
3543 if (memcg->under_oom)
3544 eventfd_signal(eventfd, 1);
3545 spin_unlock(&memcg_oom_lock);
3546
3547 return 0;
3548}
3549
3550static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg,
3551 struct eventfd_ctx *eventfd)
3552{
3553 struct mem_cgroup_eventfd_list *ev, *tmp;
3554
3555 spin_lock(&memcg_oom_lock);
3556
3557 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
3558 if (ev->eventfd == eventfd) {
3559 list_del(&ev->list);
3560 kfree(ev);
3561 }
3562 }
3563
3564 spin_unlock(&memcg_oom_lock);
3565}
3566
3567static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v)
3568{
3569 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf));
3570
3571 seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable);
3572 seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom);
3573 return 0;
3574}
3575
3576static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
3577 struct cftype *cft, u64 val)
3578{
3579 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3580
3581 /* cannot set to root cgroup and only 0 and 1 are allowed */
3582 if (!css->parent || !((val == 0) || (val == 1)))
3583 return -EINVAL;
3584
3585 memcg->oom_kill_disable = val;
3586 if (!val)
3587 memcg_oom_recover(memcg);
3588
3589 return 0;
3590}
3591
3592#ifdef CONFIG_CGROUP_WRITEBACK
3593
3594struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg)
3595{
3596 return &memcg->cgwb_list;
3597}
3598
3599static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
3600{
3601 return wb_domain_init(&memcg->cgwb_domain, gfp);
3602}
3603
3604static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
3605{
3606 wb_domain_exit(&memcg->cgwb_domain);
3607}
3608
3609static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
3610{
3611 wb_domain_size_changed(&memcg->cgwb_domain);
3612}
3613
3614struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
3615{
3616 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3617
3618 if (!memcg->css.parent)
3619 return NULL;
3620
3621 return &memcg->cgwb_domain;
3622}
3623
3624/**
3625 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
3626 * @wb: bdi_writeback in question
3627 * @pfilepages: out parameter for number of file pages
3628 * @pheadroom: out parameter for number of allocatable pages according to memcg
3629 * @pdirty: out parameter for number of dirty pages
3630 * @pwriteback: out parameter for number of pages under writeback
3631 *
3632 * Determine the numbers of file, headroom, dirty, and writeback pages in
3633 * @wb's memcg. File, dirty and writeback are self-explanatory. Headroom
3634 * is a bit more involved.
3635 *
3636 * A memcg's headroom is "min(max, high) - used". In the hierarchy, the
3637 * headroom is calculated as the lowest headroom of itself and the
3638 * ancestors. Note that this doesn't consider the actual amount of
3639 * available memory in the system. The caller should further cap
3640 * *@pheadroom accordingly.
3641 */
3642void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
3643 unsigned long *pheadroom, unsigned long *pdirty,
3644 unsigned long *pwriteback)
3645{
3646 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3647 struct mem_cgroup *parent;
3648
3649 *pdirty = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_DIRTY);
3650
3651 /* this should eventually include NR_UNSTABLE_NFS */
3652 *pwriteback = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_WRITEBACK);
3653 *pfilepages = mem_cgroup_nr_lru_pages(memcg, (1 << LRU_INACTIVE_FILE) |
3654 (1 << LRU_ACTIVE_FILE));
3655 *pheadroom = PAGE_COUNTER_MAX;
3656
3657 while ((parent = parent_mem_cgroup(memcg))) {
3658 unsigned long ceiling = min(memcg->memory.limit, memcg->high);
3659 unsigned long used = page_counter_read(&memcg->memory);
3660
3661 *pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
3662 memcg = parent;
3663 }
3664}
3665
3666#else /* CONFIG_CGROUP_WRITEBACK */
3667
3668static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
3669{
3670 return 0;
3671}
3672
3673static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
3674{
3675}
3676
3677static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
3678{
3679}
3680
3681#endif /* CONFIG_CGROUP_WRITEBACK */
3682
3683/*
3684 * DO NOT USE IN NEW FILES.
3685 *
3686 * "cgroup.event_control" implementation.
3687 *
3688 * This is way over-engineered. It tries to support fully configurable
3689 * events for each user. Such level of flexibility is completely
3690 * unnecessary especially in the light of the planned unified hierarchy.
3691 *
3692 * Please deprecate this and replace with something simpler if at all
3693 * possible.
3694 */
3695
3696/*
3697 * Unregister event and free resources.
3698 *
3699 * Gets called from workqueue.
3700 */
3701static void memcg_event_remove(struct work_struct *work)
3702{
3703 struct mem_cgroup_event *event =
3704 container_of(work, struct mem_cgroup_event, remove);
3705 struct mem_cgroup *memcg = event->memcg;
3706
3707 remove_wait_queue(event->wqh, &event->wait);
3708
3709 event->unregister_event(memcg, event->eventfd);
3710
3711 /* Notify userspace the event is going away. */
3712 eventfd_signal(event->eventfd, 1);
3713
3714 eventfd_ctx_put(event->eventfd);
3715 kfree(event);
3716 css_put(&memcg->css);
3717}
3718
3719/*
3720 * Gets called on POLLHUP on eventfd when user closes it.
3721 *
3722 * Called with wqh->lock held and interrupts disabled.
3723 */
3724static int memcg_event_wake(wait_queue_t *wait, unsigned mode,
3725 int sync, void *key)
3726{
3727 struct mem_cgroup_event *event =
3728 container_of(wait, struct mem_cgroup_event, wait);
3729 struct mem_cgroup *memcg = event->memcg;
3730 unsigned long flags = (unsigned long)key;
3731
3732 if (flags & POLLHUP) {
3733 /*
3734 * If the event has been detached at cgroup removal, we
3735 * can simply return knowing the other side will cleanup
3736 * for us.
3737 *
3738 * We can't race against event freeing since the other
3739 * side will require wqh->lock via remove_wait_queue(),
3740 * which we hold.
3741 */
3742 spin_lock(&memcg->event_list_lock);
3743 if (!list_empty(&event->list)) {
3744 list_del_init(&event->list);
3745 /*
3746 * We are in atomic context, but cgroup_event_remove()
3747 * may sleep, so we have to call it in workqueue.
3748 */
3749 schedule_work(&event->remove);
3750 }
3751 spin_unlock(&memcg->event_list_lock);
3752 }
3753
3754 return 0;
3755}
3756
3757static void memcg_event_ptable_queue_proc(struct file *file,
3758 wait_queue_head_t *wqh, poll_table *pt)
3759{
3760 struct mem_cgroup_event *event =
3761 container_of(pt, struct mem_cgroup_event, pt);
3762
3763 event->wqh = wqh;
3764 add_wait_queue(wqh, &event->wait);
3765}
3766
3767/*
3768 * DO NOT USE IN NEW FILES.
3769 *
3770 * Parse input and register new cgroup event handler.
3771 *
3772 * Input must be in format '<event_fd> <control_fd> <args>'.
3773 * Interpretation of args is defined by control file implementation.
3774 */
3775static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
3776 char *buf, size_t nbytes, loff_t off)
3777{
3778 struct cgroup_subsys_state *css = of_css(of);
3779 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3780 struct mem_cgroup_event *event;
3781 struct cgroup_subsys_state *cfile_css;
3782 unsigned int efd, cfd;
3783 struct fd efile;
3784 struct fd cfile;
3785 const char *name;
3786 char *endp;
3787 int ret;
3788
3789 buf = strstrip(buf);
3790
3791 efd = simple_strtoul(buf, &endp, 10);
3792 if (*endp != ' ')
3793 return -EINVAL;
3794 buf = endp + 1;
3795
3796 cfd = simple_strtoul(buf, &endp, 10);
3797 if ((*endp != ' ') && (*endp != '\0'))
3798 return -EINVAL;
3799 buf = endp + 1;
3800
3801 event = kzalloc(sizeof(*event), GFP_KERNEL);
3802 if (!event)
3803 return -ENOMEM;
3804
3805 event->memcg = memcg;
3806 INIT_LIST_HEAD(&event->list);
3807 init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc);
3808 init_waitqueue_func_entry(&event->wait, memcg_event_wake);
3809 INIT_WORK(&event->remove, memcg_event_remove);
3810
3811 efile = fdget(efd);
3812 if (!efile.file) {
3813 ret = -EBADF;
3814 goto out_kfree;
3815 }
3816
3817 event->eventfd = eventfd_ctx_fileget(efile.file);
3818 if (IS_ERR(event->eventfd)) {
3819 ret = PTR_ERR(event->eventfd);
3820 goto out_put_efile;
3821 }
3822
3823 cfile = fdget(cfd);
3824 if (!cfile.file) {
3825 ret = -EBADF;
3826 goto out_put_eventfd;
3827 }
3828
3829 /* the process need read permission on control file */
3830 /* AV: shouldn't we check that it's been opened for read instead? */
3831 ret = inode_permission(file_inode(cfile.file), MAY_READ);
3832 if (ret < 0)
3833 goto out_put_cfile;
3834
3835 /*
3836 * Determine the event callbacks and set them in @event. This used
3837 * to be done via struct cftype but cgroup core no longer knows
3838 * about these events. The following is crude but the whole thing
3839 * is for compatibility anyway.
3840 *
3841 * DO NOT ADD NEW FILES.
3842 */
3843 name = cfile.file->f_path.dentry->d_name.name;
3844
3845 if (!strcmp(name, "memory.usage_in_bytes")) {
3846 event->register_event = mem_cgroup_usage_register_event;
3847 event->unregister_event = mem_cgroup_usage_unregister_event;
3848 } else if (!strcmp(name, "memory.oom_control")) {
3849 event->register_event = mem_cgroup_oom_register_event;
3850 event->unregister_event = mem_cgroup_oom_unregister_event;
3851 } else if (!strcmp(name, "memory.pressure_level")) {
3852 event->register_event = vmpressure_register_event;
3853 event->unregister_event = vmpressure_unregister_event;
3854 } else if (!strcmp(name, "memory.memsw.usage_in_bytes")) {
3855 event->register_event = memsw_cgroup_usage_register_event;
3856 event->unregister_event = memsw_cgroup_usage_unregister_event;
3857 } else {
3858 ret = -EINVAL;
3859 goto out_put_cfile;
3860 }
3861
3862 /*
3863 * Verify @cfile should belong to @css. Also, remaining events are
3864 * automatically removed on cgroup destruction but the removal is
3865 * asynchronous, so take an extra ref on @css.
3866 */
3867 cfile_css = css_tryget_online_from_dir(cfile.file->f_path.dentry->d_parent,
3868 &memory_cgrp_subsys);
3869 ret = -EINVAL;
3870 if (IS_ERR(cfile_css))
3871 goto out_put_cfile;
3872 if (cfile_css != css) {
3873 css_put(cfile_css);
3874 goto out_put_cfile;
3875 }
3876
3877 ret = event->register_event(memcg, event->eventfd, buf);
3878 if (ret)
3879 goto out_put_css;
3880
3881 efile.file->f_op->poll(efile.file, &event->pt);
3882
3883 spin_lock(&memcg->event_list_lock);
3884 list_add(&event->list, &memcg->event_list);
3885 spin_unlock(&memcg->event_list_lock);
3886
3887 fdput(cfile);
3888 fdput(efile);
3889
3890 return nbytes;
3891
3892out_put_css:
3893 css_put(css);
3894out_put_cfile:
3895 fdput(cfile);
3896out_put_eventfd:
3897 eventfd_ctx_put(event->eventfd);
3898out_put_efile:
3899 fdput(efile);
3900out_kfree:
3901 kfree(event);
3902
3903 return ret;
3904}
3905
3906static struct cftype mem_cgroup_legacy_files[] = {
3907 {
3908 .name = "usage_in_bytes",
3909 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
3910 .read_u64 = mem_cgroup_read_u64,
3911 },
3912 {
3913 .name = "max_usage_in_bytes",
3914 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
3915 .write = mem_cgroup_reset,
3916 .read_u64 = mem_cgroup_read_u64,
3917 },
3918 {
3919 .name = "limit_in_bytes",
3920 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
3921 .write = mem_cgroup_write,
3922 .read_u64 = mem_cgroup_read_u64,
3923 },
3924 {
3925 .name = "soft_limit_in_bytes",
3926 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
3927 .write = mem_cgroup_write,
3928 .read_u64 = mem_cgroup_read_u64,
3929 },
3930 {
3931 .name = "failcnt",
3932 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
3933 .write = mem_cgroup_reset,
3934 .read_u64 = mem_cgroup_read_u64,
3935 },
3936 {
3937 .name = "stat",
3938 .seq_show = memcg_stat_show,
3939 },
3940 {
3941 .name = "force_empty",
3942 .write = mem_cgroup_force_empty_write,
3943 },
3944 {
3945 .name = "use_hierarchy",
3946 .write_u64 = mem_cgroup_hierarchy_write,
3947 .read_u64 = mem_cgroup_hierarchy_read,
3948 },
3949 {
3950 .name = "cgroup.event_control", /* XXX: for compat */
3951 .write = memcg_write_event_control,
3952 .flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE,
3953 },
3954 {
3955 .name = "swappiness",
3956 .read_u64 = mem_cgroup_swappiness_read,
3957 .write_u64 = mem_cgroup_swappiness_write,
3958 },
3959 {
3960 .name = "move_charge_at_immigrate",
3961 .read_u64 = mem_cgroup_move_charge_read,
3962 .write_u64 = mem_cgroup_move_charge_write,
3963 },
3964 {
3965 .name = "oom_control",
3966 .seq_show = mem_cgroup_oom_control_read,
3967 .write_u64 = mem_cgroup_oom_control_write,
3968 .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
3969 },
3970 {
3971 .name = "pressure_level",
3972 },
3973#ifdef CONFIG_NUMA
3974 {
3975 .name = "numa_stat",
3976 .seq_show = memcg_numa_stat_show,
3977 },
3978#endif
3979 {
3980 .name = "kmem.limit_in_bytes",
3981 .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
3982 .write = mem_cgroup_write,
3983 .read_u64 = mem_cgroup_read_u64,
3984 },
3985 {
3986 .name = "kmem.usage_in_bytes",
3987 .private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
3988 .read_u64 = mem_cgroup_read_u64,
3989 },
3990 {
3991 .name = "kmem.failcnt",
3992 .private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT),
3993 .write = mem_cgroup_reset,
3994 .read_u64 = mem_cgroup_read_u64,
3995 },
3996 {
3997 .name = "kmem.max_usage_in_bytes",
3998 .private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE),
3999 .write = mem_cgroup_reset,
4000 .read_u64 = mem_cgroup_read_u64,
4001 },
4002#ifdef CONFIG_SLABINFO
4003 {
4004 .name = "kmem.slabinfo",
4005 .seq_start = slab_start,
4006 .seq_next = slab_next,
4007 .seq_stop = slab_stop,
4008 .seq_show = memcg_slab_show,
4009 },
4010#endif
4011 {
4012 .name = "kmem.tcp.limit_in_bytes",
4013 .private = MEMFILE_PRIVATE(_TCP, RES_LIMIT),
4014 .write = mem_cgroup_write,
4015 .read_u64 = mem_cgroup_read_u64,
4016 },
4017 {
4018 .name = "kmem.tcp.usage_in_bytes",
4019 .private = MEMFILE_PRIVATE(_TCP, RES_USAGE),
4020 .read_u64 = mem_cgroup_read_u64,
4021 },
4022 {
4023 .name = "kmem.tcp.failcnt",
4024 .private = MEMFILE_PRIVATE(_TCP, RES_FAILCNT),
4025 .write = mem_cgroup_reset,
4026 .read_u64 = mem_cgroup_read_u64,
4027 },
4028 {
4029 .name = "kmem.tcp.max_usage_in_bytes",
4030 .private = MEMFILE_PRIVATE(_TCP, RES_MAX_USAGE),
4031 .write = mem_cgroup_reset,
4032 .read_u64 = mem_cgroup_read_u64,
4033 },
4034 { }, /* terminate */
4035};
4036
4037/*
4038 * Private memory cgroup IDR
4039 *
4040 * Swap-out records and page cache shadow entries need to store memcg
4041 * references in constrained space, so we maintain an ID space that is
4042 * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of
4043 * memory-controlled cgroups to 64k.
4044 *
4045 * However, there usually are many references to the oflline CSS after
4046 * the cgroup has been destroyed, such as page cache or reclaimable
4047 * slab objects, that don't need to hang on to the ID. We want to keep
4048 * those dead CSS from occupying IDs, or we might quickly exhaust the
4049 * relatively small ID space and prevent the creation of new cgroups
4050 * even when there are much fewer than 64k cgroups - possibly none.
4051 *
4052 * Maintain a private 16-bit ID space for memcg, and allow the ID to
4053 * be freed and recycled when it's no longer needed, which is usually
4054 * when the CSS is offlined.
4055 *
4056 * The only exception to that are records of swapped out tmpfs/shmem
4057 * pages that need to be attributed to live ancestors on swapin. But
4058 * those references are manageable from userspace.
4059 */
4060
4061static DEFINE_IDR(mem_cgroup_idr);
4062
4063static void mem_cgroup_id_get_many(struct mem_cgroup *memcg, unsigned int n)
4064{
4065 VM_BUG_ON(atomic_read(&memcg->id.ref) <= 0);
4066 atomic_add(n, &memcg->id.ref);
4067}
4068
4069static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
4070{
4071 VM_BUG_ON(atomic_read(&memcg->id.ref) < n);
4072 if (atomic_sub_and_test(n, &memcg->id.ref)) {
4073 idr_remove(&mem_cgroup_idr, memcg->id.id);
4074 memcg->id.id = 0;
4075
4076 /* Memcg ID pins CSS */
4077 css_put(&memcg->css);
4078 }
4079}
4080
4081static inline void mem_cgroup_id_get(struct mem_cgroup *memcg)
4082{
4083 mem_cgroup_id_get_many(memcg, 1);
4084}
4085
4086static inline void mem_cgroup_id_put(struct mem_cgroup *memcg)
4087{
4088 mem_cgroup_id_put_many(memcg, 1);
4089}
4090
4091/**
4092 * mem_cgroup_from_id - look up a memcg from a memcg id
4093 * @id: the memcg id to look up
4094 *
4095 * Caller must hold rcu_read_lock().
4096 */
4097struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
4098{
4099 WARN_ON_ONCE(!rcu_read_lock_held());
4100 return idr_find(&mem_cgroup_idr, id);
4101}
4102
4103static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
4104{
4105 struct mem_cgroup_per_node *pn;
4106 int tmp = node;
4107 /*
4108 * This routine is called against possible nodes.
4109 * But it's BUG to call kmalloc() against offline node.
4110 *
4111 * TODO: this routine can waste much memory for nodes which will
4112 * never be onlined. It's better to use memory hotplug callback
4113 * function.
4114 */
4115 if (!node_state(node, N_NORMAL_MEMORY))
4116 tmp = -1;
4117 pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
4118 if (!pn)
4119 return 1;
4120
4121 lruvec_init(&pn->lruvec);
4122 pn->usage_in_excess = 0;
4123 pn->on_tree = false;
4124 pn->memcg = memcg;
4125
4126 memcg->nodeinfo[node] = pn;
4127 return 0;
4128}
4129
4130static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
4131{
4132 kfree(memcg->nodeinfo[node]);
4133}
4134
4135static void __mem_cgroup_free(struct mem_cgroup *memcg)
4136{
4137 int node;
4138
4139 for_each_node(node)
4140 free_mem_cgroup_per_node_info(memcg, node);
4141 free_percpu(memcg->stat);
4142 kfree(memcg);
4143}
4144
4145static void mem_cgroup_free(struct mem_cgroup *memcg)
4146{
4147 memcg_wb_domain_exit(memcg);
4148 __mem_cgroup_free(memcg);
4149}
4150
4151static struct mem_cgroup *mem_cgroup_alloc(void)
4152{
4153 struct mem_cgroup *memcg;
4154 size_t size;
4155 int node;
4156
4157 size = sizeof(struct mem_cgroup);
4158 size += nr_node_ids * sizeof(struct mem_cgroup_per_node *);
4159
4160 memcg = kzalloc(size, GFP_KERNEL);
4161 if (!memcg)
4162 return NULL;
4163
4164 memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL,
4165 1, MEM_CGROUP_ID_MAX,
4166 GFP_KERNEL);
4167 if (memcg->id.id < 0)
4168 goto fail;
4169
4170 memcg->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
4171 if (!memcg->stat)
4172 goto fail;
4173
4174 for_each_node(node)
4175 if (alloc_mem_cgroup_per_node_info(memcg, node))
4176 goto fail;
4177
4178 if (memcg_wb_domain_init(memcg, GFP_KERNEL))
4179 goto fail;
4180
4181 INIT_WORK(&memcg->high_work, high_work_func);
4182 memcg->last_scanned_node = MAX_NUMNODES;
4183 INIT_LIST_HEAD(&memcg->oom_notify);
4184 mutex_init(&memcg->thresholds_lock);
4185 spin_lock_init(&memcg->move_lock);
4186 vmpressure_init(&memcg->vmpressure);
4187 INIT_LIST_HEAD(&memcg->event_list);
4188 spin_lock_init(&memcg->event_list_lock);
4189 memcg->socket_pressure = jiffies;
4190#ifndef CONFIG_SLOB
4191 memcg->kmemcg_id = -1;
4192#endif
4193#ifdef CONFIG_CGROUP_WRITEBACK
4194 INIT_LIST_HEAD(&memcg->cgwb_list);
4195#endif
4196 idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
4197 return memcg;
4198fail:
4199 if (memcg->id.id > 0)
4200 idr_remove(&mem_cgroup_idr, memcg->id.id);
4201 __mem_cgroup_free(memcg);
4202 return NULL;
4203}
4204
4205static struct cgroup_subsys_state * __ref
4206mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
4207{
4208 struct mem_cgroup *parent = mem_cgroup_from_css(parent_css);
4209 struct mem_cgroup *memcg;
4210 long error = -ENOMEM;
4211
4212 memcg = mem_cgroup_alloc();
4213 if (!memcg)
4214 return ERR_PTR(error);
4215
4216 memcg->high = PAGE_COUNTER_MAX;
4217 memcg->soft_limit = PAGE_COUNTER_MAX;
4218 if (parent) {
4219 memcg->swappiness = mem_cgroup_swappiness(parent);
4220 memcg->oom_kill_disable = parent->oom_kill_disable;
4221 }
4222 if (parent && parent->use_hierarchy) {
4223 memcg->use_hierarchy = true;
4224 page_counter_init(&memcg->memory, &parent->memory);
4225 page_counter_init(&memcg->swap, &parent->swap);
4226 page_counter_init(&memcg->memsw, &parent->memsw);
4227 page_counter_init(&memcg->kmem, &parent->kmem);
4228 page_counter_init(&memcg->tcpmem, &parent->tcpmem);
4229 } else {
4230 page_counter_init(&memcg->memory, NULL);
4231 page_counter_init(&memcg->swap, NULL);
4232 page_counter_init(&memcg->memsw, NULL);
4233 page_counter_init(&memcg->kmem, NULL);
4234 page_counter_init(&memcg->tcpmem, NULL);
4235 /*
4236 * Deeper hierachy with use_hierarchy == false doesn't make
4237 * much sense so let cgroup subsystem know about this
4238 * unfortunate state in our controller.
4239 */
4240 if (parent != root_mem_cgroup)
4241 memory_cgrp_subsys.broken_hierarchy = true;
4242 }
4243
4244 /* The following stuff does not apply to the root */
4245 if (!parent) {
4246 root_mem_cgroup = memcg;
4247 return &memcg->css;
4248 }
4249
4250 error = memcg_online_kmem(memcg);
4251 if (error)
4252 goto fail;
4253
4254 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
4255 static_branch_inc(&memcg_sockets_enabled_key);
4256
4257 return &memcg->css;
4258fail:
4259 mem_cgroup_free(memcg);
4260 return ERR_PTR(-ENOMEM);
4261}
4262
4263static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
4264{
4265 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4266
4267 /* Online state pins memcg ID, memcg ID pins CSS */
4268 atomic_set(&memcg->id.ref, 1);
4269 css_get(css);
4270 return 0;
4271}
4272
4273static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
4274{
4275 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4276 struct mem_cgroup_event *event, *tmp;
4277
4278 /*
4279 * Unregister events and notify userspace.
4280 * Notify userspace about cgroup removing only after rmdir of cgroup
4281 * directory to avoid race between userspace and kernelspace.
4282 */
4283 spin_lock(&memcg->event_list_lock);
4284 list_for_each_entry_safe(event, tmp, &memcg->event_list, list) {
4285 list_del_init(&event->list);
4286 schedule_work(&event->remove);
4287 }
4288 spin_unlock(&memcg->event_list_lock);
4289
4290 memcg_offline_kmem(memcg);
4291 wb_memcg_offline(memcg);
4292
4293 mem_cgroup_id_put(memcg);
4294}
4295
4296static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
4297{
4298 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4299
4300 invalidate_reclaim_iterators(memcg);
4301}
4302
4303static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
4304{
4305 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4306
4307 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
4308 static_branch_dec(&memcg_sockets_enabled_key);
4309
4310 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active)
4311 static_branch_dec(&memcg_sockets_enabled_key);
4312
4313 vmpressure_cleanup(&memcg->vmpressure);
4314 cancel_work_sync(&memcg->high_work);
4315 mem_cgroup_remove_from_trees(memcg);
4316 memcg_free_kmem(memcg);
4317 mem_cgroup_free(memcg);
4318}
4319
4320/**
4321 * mem_cgroup_css_reset - reset the states of a mem_cgroup
4322 * @css: the target css
4323 *
4324 * Reset the states of the mem_cgroup associated with @css. This is
4325 * invoked when the userland requests disabling on the default hierarchy
4326 * but the memcg is pinned through dependency. The memcg should stop
4327 * applying policies and should revert to the vanilla state as it may be
4328 * made visible again.
4329 *
4330 * The current implementation only resets the essential configurations.
4331 * This needs to be expanded to cover all the visible parts.
4332 */
4333static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
4334{
4335 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4336
4337 page_counter_limit(&memcg->memory, PAGE_COUNTER_MAX);
4338 page_counter_limit(&memcg->swap, PAGE_COUNTER_MAX);
4339 page_counter_limit(&memcg->memsw, PAGE_COUNTER_MAX);
4340 page_counter_limit(&memcg->kmem, PAGE_COUNTER_MAX);
4341 page_counter_limit(&memcg->tcpmem, PAGE_COUNTER_MAX);
4342 memcg->low = 0;
4343 memcg->high = PAGE_COUNTER_MAX;
4344 memcg->soft_limit = PAGE_COUNTER_MAX;
4345 memcg_wb_domain_size_changed(memcg);
4346}
4347
4348#ifdef CONFIG_MMU
4349/* Handlers for move charge at task migration. */
4350static int mem_cgroup_do_precharge(unsigned long count)
4351{
4352 int ret;
4353
4354 /* Try a single bulk charge without reclaim first, kswapd may wake */
4355 ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count);
4356 if (!ret) {
4357 mc.precharge += count;
4358 return ret;
4359 }
4360
4361 /* Try charges one by one with reclaim, but do not retry */
4362 while (count--) {
4363 ret = try_charge(mc.to, GFP_KERNEL | __GFP_NORETRY, 1);
4364 if (ret)
4365 return ret;
4366 mc.precharge++;
4367 cond_resched();
4368 }
4369 return 0;
4370}
4371
4372union mc_target {
4373 struct page *page;
4374 swp_entry_t ent;
4375};
4376
4377enum mc_target_type {
4378 MC_TARGET_NONE = 0,
4379 MC_TARGET_PAGE,
4380 MC_TARGET_SWAP,
4381};
4382
4383static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
4384 unsigned long addr, pte_t ptent)
4385{
4386 struct page *page = vm_normal_page(vma, addr, ptent);
4387
4388 if (!page || !page_mapped(page))
4389 return NULL;
4390 if (PageAnon(page)) {
4391 if (!(mc.flags & MOVE_ANON))
4392 return NULL;
4393 } else {
4394 if (!(mc.flags & MOVE_FILE))
4395 return NULL;
4396 }
4397 if (!get_page_unless_zero(page))
4398 return NULL;
4399
4400 return page;
4401}
4402
4403#ifdef CONFIG_SWAP
4404static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
4405 pte_t ptent, swp_entry_t *entry)
4406{
4407 struct page *page = NULL;
4408 swp_entry_t ent = pte_to_swp_entry(ptent);
4409
4410 if (!(mc.flags & MOVE_ANON) || non_swap_entry(ent))
4411 return NULL;
4412 /*
4413 * Because lookup_swap_cache() updates some statistics counter,
4414 * we call find_get_page() with swapper_space directly.
4415 */
4416 page = find_get_page(swap_address_space(ent), swp_offset(ent));
4417 if (do_memsw_account())
4418 entry->val = ent.val;
4419
4420 return page;
4421}
4422#else
4423static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
4424 pte_t ptent, swp_entry_t *entry)
4425{
4426 return NULL;
4427}
4428#endif
4429
4430static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
4431 unsigned long addr, pte_t ptent, swp_entry_t *entry)
4432{
4433 struct page *page = NULL;
4434 struct address_space *mapping;
4435 pgoff_t pgoff;
4436
4437 if (!vma->vm_file) /* anonymous vma */
4438 return NULL;
4439 if (!(mc.flags & MOVE_FILE))
4440 return NULL;
4441
4442 mapping = vma->vm_file->f_mapping;
4443 pgoff = linear_page_index(vma, addr);
4444
4445 /* page is moved even if it's not RSS of this task(page-faulted). */
4446#ifdef CONFIG_SWAP
4447 /* shmem/tmpfs may report page out on swap: account for that too. */
4448 if (shmem_mapping(mapping)) {
4449 page = find_get_entry(mapping, pgoff);
4450 if (radix_tree_exceptional_entry(page)) {
4451 swp_entry_t swp = radix_to_swp_entry(page);
4452 if (do_memsw_account())
4453 *entry = swp;
4454 page = find_get_page(swap_address_space(swp),
4455 swp_offset(swp));
4456 }
4457 } else
4458 page = find_get_page(mapping, pgoff);
4459#else
4460 page = find_get_page(mapping, pgoff);
4461#endif
4462 return page;
4463}
4464
4465/**
4466 * mem_cgroup_move_account - move account of the page
4467 * @page: the page
4468 * @compound: charge the page as compound or small page
4469 * @from: mem_cgroup which the page is moved from.
4470 * @to: mem_cgroup which the page is moved to. @from != @to.
4471 *
4472 * The caller must make sure the page is not on LRU (isolate_page() is useful.)
4473 *
4474 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
4475 * from old cgroup.
4476 */
4477static int mem_cgroup_move_account(struct page *page,
4478 bool compound,
4479 struct mem_cgroup *from,
4480 struct mem_cgroup *to)
4481{
4482 unsigned long flags;
4483 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
4484 int ret;
4485 bool anon;
4486
4487 VM_BUG_ON(from == to);
4488 VM_BUG_ON_PAGE(PageLRU(page), page);
4489 VM_BUG_ON(compound && !PageTransHuge(page));
4490
4491 /*
4492 * Prevent mem_cgroup_migrate() from looking at
4493 * page->mem_cgroup of its source page while we change it.
4494 */
4495 ret = -EBUSY;
4496 if (!trylock_page(page))
4497 goto out;
4498
4499 ret = -EINVAL;
4500 if (page->mem_cgroup != from)
4501 goto out_unlock;
4502
4503 anon = PageAnon(page);
4504
4505 spin_lock_irqsave(&from->move_lock, flags);
4506
4507 if (!anon && page_mapped(page)) {
4508 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED],
4509 nr_pages);
4510 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED],
4511 nr_pages);
4512 }
4513
4514 /*
4515 * move_lock grabbed above and caller set from->moving_account, so
4516 * mem_cgroup_update_page_stat() will serialize updates to PageDirty.
4517 * So mapping should be stable for dirty pages.
4518 */
4519 if (!anon && PageDirty(page)) {
4520 struct address_space *mapping = page_mapping(page);
4521
4522 if (mapping_cap_account_dirty(mapping)) {
4523 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_DIRTY],
4524 nr_pages);
4525 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_DIRTY],
4526 nr_pages);
4527 }
4528 }
4529
4530 if (PageWriteback(page)) {
4531 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_WRITEBACK],
4532 nr_pages);
4533 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_WRITEBACK],
4534 nr_pages);
4535 }
4536
4537 /*
4538 * It is safe to change page->mem_cgroup here because the page
4539 * is referenced, charged, and isolated - we can't race with
4540 * uncharging, charging, migration, or LRU putback.
4541 */
4542
4543 /* caller should have done css_get */
4544 page->mem_cgroup = to;
4545 spin_unlock_irqrestore(&from->move_lock, flags);
4546
4547 ret = 0;
4548
4549 local_irq_disable();
4550 mem_cgroup_charge_statistics(to, page, compound, nr_pages);
4551 memcg_check_events(to, page);
4552 mem_cgroup_charge_statistics(from, page, compound, -nr_pages);
4553 memcg_check_events(from, page);
4554 local_irq_enable();
4555out_unlock:
4556 unlock_page(page);
4557out:
4558 return ret;
4559}
4560
4561/**
4562 * get_mctgt_type - get target type of moving charge
4563 * @vma: the vma the pte to be checked belongs
4564 * @addr: the address corresponding to the pte to be checked
4565 * @ptent: the pte to be checked
4566 * @target: the pointer the target page or swap ent will be stored(can be NULL)
4567 *
4568 * Returns
4569 * 0(MC_TARGET_NONE): if the pte is not a target for move charge.
4570 * 1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
4571 * move charge. if @target is not NULL, the page is stored in target->page
4572 * with extra refcnt got(Callers should handle it).
4573 * 2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
4574 * target for charge migration. if @target is not NULL, the entry is stored
4575 * in target->ent.
4576 *
4577 * Called with pte lock held.
4578 */
4579
4580static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
4581 unsigned long addr, pte_t ptent, union mc_target *target)
4582{
4583 struct page *page = NULL;
4584 enum mc_target_type ret = MC_TARGET_NONE;
4585 swp_entry_t ent = { .val = 0 };
4586
4587 if (pte_present(ptent))
4588 page = mc_handle_present_pte(vma, addr, ptent);
4589 else if (is_swap_pte(ptent))
4590 page = mc_handle_swap_pte(vma, ptent, &ent);
4591 else if (pte_none(ptent))
4592 page = mc_handle_file_pte(vma, addr, ptent, &ent);
4593
4594 if (!page && !ent.val)
4595 return ret;
4596 if (page) {
4597 /*
4598 * Do only loose check w/o serialization.
4599 * mem_cgroup_move_account() checks the page is valid or
4600 * not under LRU exclusion.
4601 */
4602 if (page->mem_cgroup == mc.from) {
4603 ret = MC_TARGET_PAGE;
4604 if (target)
4605 target->page = page;
4606 }
4607 if (!ret || !target)
4608 put_page(page);
4609 }
4610 /* There is a swap entry and a page doesn't exist or isn't charged */
4611 if (ent.val && !ret &&
4612 mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) {
4613 ret = MC_TARGET_SWAP;
4614 if (target)
4615 target->ent = ent;
4616 }
4617 return ret;
4618}
4619
4620#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4621/*
4622 * We don't consider swapping or file mapped pages because THP does not
4623 * support them for now.
4624 * Caller should make sure that pmd_trans_huge(pmd) is true.
4625 */
4626static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
4627 unsigned long addr, pmd_t pmd, union mc_target *target)
4628{
4629 struct page *page = NULL;
4630 enum mc_target_type ret = MC_TARGET_NONE;
4631
4632 page = pmd_page(pmd);
4633 VM_BUG_ON_PAGE(!page || !PageHead(page), page);
4634 if (!(mc.flags & MOVE_ANON))
4635 return ret;
4636 if (page->mem_cgroup == mc.from) {
4637 ret = MC_TARGET_PAGE;
4638 if (target) {
4639 get_page(page);
4640 target->page = page;
4641 }
4642 }
4643 return ret;
4644}
4645#else
4646static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
4647 unsigned long addr, pmd_t pmd, union mc_target *target)
4648{
4649 return MC_TARGET_NONE;
4650}
4651#endif
4652
4653static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
4654 unsigned long addr, unsigned long end,
4655 struct mm_walk *walk)
4656{
4657 struct vm_area_struct *vma = walk->vma;
4658 pte_t *pte;
4659 spinlock_t *ptl;
4660
4661 ptl = pmd_trans_huge_lock(pmd, vma);
4662 if (ptl) {
4663 if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
4664 mc.precharge += HPAGE_PMD_NR;
4665 spin_unlock(ptl);
4666 return 0;
4667 }
4668
4669 if (pmd_trans_unstable(pmd))
4670 return 0;
4671 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
4672 for (; addr != end; pte++, addr += PAGE_SIZE)
4673 if (get_mctgt_type(vma, addr, *pte, NULL))
4674 mc.precharge++; /* increment precharge temporarily */
4675 pte_unmap_unlock(pte - 1, ptl);
4676 cond_resched();
4677
4678 return 0;
4679}
4680
4681static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
4682{
4683 unsigned long precharge;
4684
4685 struct mm_walk mem_cgroup_count_precharge_walk = {
4686 .pmd_entry = mem_cgroup_count_precharge_pte_range,
4687 .mm = mm,
4688 };
4689 down_read(&mm->mmap_sem);
4690 walk_page_range(0, mm->highest_vm_end,
4691 &mem_cgroup_count_precharge_walk);
4692 up_read(&mm->mmap_sem);
4693
4694 precharge = mc.precharge;
4695 mc.precharge = 0;
4696
4697 return precharge;
4698}
4699
4700static int mem_cgroup_precharge_mc(struct mm_struct *mm)
4701{
4702 unsigned long precharge = mem_cgroup_count_precharge(mm);
4703
4704 VM_BUG_ON(mc.moving_task);
4705 mc.moving_task = current;
4706 return mem_cgroup_do_precharge(precharge);
4707}
4708
4709/* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
4710static void __mem_cgroup_clear_mc(void)
4711{
4712 struct mem_cgroup *from = mc.from;
4713 struct mem_cgroup *to = mc.to;
4714
4715 /* we must uncharge all the leftover precharges from mc.to */
4716 if (mc.precharge) {
4717 cancel_charge(mc.to, mc.precharge);
4718 mc.precharge = 0;
4719 }
4720 /*
4721 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
4722 * we must uncharge here.
4723 */
4724 if (mc.moved_charge) {
4725 cancel_charge(mc.from, mc.moved_charge);
4726 mc.moved_charge = 0;
4727 }
4728 /* we must fixup refcnts and charges */
4729 if (mc.moved_swap) {
4730 /* uncharge swap account from the old cgroup */
4731 if (!mem_cgroup_is_root(mc.from))
4732 page_counter_uncharge(&mc.from->memsw, mc.moved_swap);
4733
4734 mem_cgroup_id_put_many(mc.from, mc.moved_swap);
4735
4736 /*
4737 * we charged both to->memory and to->memsw, so we
4738 * should uncharge to->memory.
4739 */
4740 if (!mem_cgroup_is_root(mc.to))
4741 page_counter_uncharge(&mc.to->memory, mc.moved_swap);
4742
4743 mem_cgroup_id_get_many(mc.to, mc.moved_swap);
4744 css_put_many(&mc.to->css, mc.moved_swap);
4745
4746 mc.moved_swap = 0;
4747 }
4748 memcg_oom_recover(from);
4749 memcg_oom_recover(to);
4750 wake_up_all(&mc.waitq);
4751}
4752
4753static void mem_cgroup_clear_mc(void)
4754{
4755 struct mm_struct *mm = mc.mm;
4756
4757 /*
4758 * we must clear moving_task before waking up waiters at the end of
4759 * task migration.
4760 */
4761 mc.moving_task = NULL;
4762 __mem_cgroup_clear_mc();
4763 spin_lock(&mc.lock);
4764 mc.from = NULL;
4765 mc.to = NULL;
4766 mc.mm = NULL;
4767 spin_unlock(&mc.lock);
4768
4769 mmput(mm);
4770}
4771
4772static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
4773{
4774 struct cgroup_subsys_state *css;
4775 struct mem_cgroup *memcg = NULL; /* unneeded init to make gcc happy */
4776 struct mem_cgroup *from;
4777 struct task_struct *leader, *p;
4778 struct mm_struct *mm;
4779 unsigned long move_flags;
4780 int ret = 0;
4781
4782 /* charge immigration isn't supported on the default hierarchy */
4783 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
4784 return 0;
4785
4786 /*
4787 * Multi-process migrations only happen on the default hierarchy
4788 * where charge immigration is not used. Perform charge
4789 * immigration if @tset contains a leader and whine if there are
4790 * multiple.
4791 */
4792 p = NULL;
4793 cgroup_taskset_for_each_leader(leader, css, tset) {
4794 WARN_ON_ONCE(p);
4795 p = leader;
4796 memcg = mem_cgroup_from_css(css);
4797 }
4798 if (!p)
4799 return 0;
4800
4801 /*
4802 * We are now commited to this value whatever it is. Changes in this
4803 * tunable will only affect upcoming migrations, not the current one.
4804 * So we need to save it, and keep it going.
4805 */
4806 move_flags = READ_ONCE(memcg->move_charge_at_immigrate);
4807 if (!move_flags)
4808 return 0;
4809
4810 from = mem_cgroup_from_task(p);
4811
4812 VM_BUG_ON(from == memcg);
4813
4814 mm = get_task_mm(p);
4815 if (!mm)
4816 return 0;
4817 /* We move charges only when we move a owner of the mm */
4818 if (mm->owner == p) {
4819 VM_BUG_ON(mc.from);
4820 VM_BUG_ON(mc.to);
4821 VM_BUG_ON(mc.precharge);
4822 VM_BUG_ON(mc.moved_charge);
4823 VM_BUG_ON(mc.moved_swap);
4824
4825 spin_lock(&mc.lock);
4826 mc.mm = mm;
4827 mc.from = from;
4828 mc.to = memcg;
4829 mc.flags = move_flags;
4830 spin_unlock(&mc.lock);
4831 /* We set mc.moving_task later */
4832
4833 ret = mem_cgroup_precharge_mc(mm);
4834 if (ret)
4835 mem_cgroup_clear_mc();
4836 } else {
4837 mmput(mm);
4838 }
4839 return ret;
4840}
4841
4842static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
4843{
4844 if (mc.to)
4845 mem_cgroup_clear_mc();
4846}
4847
4848static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
4849 unsigned long addr, unsigned long end,
4850 struct mm_walk *walk)
4851{
4852 int ret = 0;
4853 struct vm_area_struct *vma = walk->vma;
4854 pte_t *pte;
4855 spinlock_t *ptl;
4856 enum mc_target_type target_type;
4857 union mc_target target;
4858 struct page *page;
4859
4860 ptl = pmd_trans_huge_lock(pmd, vma);
4861 if (ptl) {
4862 if (mc.precharge < HPAGE_PMD_NR) {
4863 spin_unlock(ptl);
4864 return 0;
4865 }
4866 target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
4867 if (target_type == MC_TARGET_PAGE) {
4868 page = target.page;
4869 if (!isolate_lru_page(page)) {
4870 if (!mem_cgroup_move_account(page, true,
4871 mc.from, mc.to)) {
4872 mc.precharge -= HPAGE_PMD_NR;
4873 mc.moved_charge += HPAGE_PMD_NR;
4874 }
4875 putback_lru_page(page);
4876 }
4877 put_page(page);
4878 }
4879 spin_unlock(ptl);
4880 return 0;
4881 }
4882
4883 if (pmd_trans_unstable(pmd))
4884 return 0;
4885retry:
4886 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
4887 for (; addr != end; addr += PAGE_SIZE) {
4888 pte_t ptent = *(pte++);
4889 swp_entry_t ent;
4890
4891 if (!mc.precharge)
4892 break;
4893
4894 switch (get_mctgt_type(vma, addr, ptent, &target)) {
4895 case MC_TARGET_PAGE:
4896 page = target.page;
4897 /*
4898 * We can have a part of the split pmd here. Moving it
4899 * can be done but it would be too convoluted so simply
4900 * ignore such a partial THP and keep it in original
4901 * memcg. There should be somebody mapping the head.
4902 */
4903 if (PageTransCompound(page))
4904 goto put;
4905 if (isolate_lru_page(page))
4906 goto put;
4907 if (!mem_cgroup_move_account(page, false,
4908 mc.from, mc.to)) {
4909 mc.precharge--;
4910 /* we uncharge from mc.from later. */
4911 mc.moved_charge++;
4912 }
4913 putback_lru_page(page);
4914put: /* get_mctgt_type() gets the page */
4915 put_page(page);
4916 break;
4917 case MC_TARGET_SWAP:
4918 ent = target.ent;
4919 if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
4920 mc.precharge--;
4921 /* we fixup refcnts and charges later. */
4922 mc.moved_swap++;
4923 }
4924 break;
4925 default:
4926 break;
4927 }
4928 }
4929 pte_unmap_unlock(pte - 1, ptl);
4930 cond_resched();
4931
4932 if (addr != end) {
4933 /*
4934 * We have consumed all precharges we got in can_attach().
4935 * We try charge one by one, but don't do any additional
4936 * charges to mc.to if we have failed in charge once in attach()
4937 * phase.
4938 */
4939 ret = mem_cgroup_do_precharge(1);
4940 if (!ret)
4941 goto retry;
4942 }
4943
4944 return ret;
4945}
4946
4947static void mem_cgroup_move_charge(void)
4948{
4949 struct mm_walk mem_cgroup_move_charge_walk = {
4950 .pmd_entry = mem_cgroup_move_charge_pte_range,
4951 .mm = mc.mm,
4952 };
4953
4954 lru_add_drain_all();
4955 /*
4956 * Signal lock_page_memcg() to take the memcg's move_lock
4957 * while we're moving its pages to another memcg. Then wait
4958 * for already started RCU-only updates to finish.
4959 */
4960 atomic_inc(&mc.from->moving_account);
4961 synchronize_rcu();
4962retry:
4963 if (unlikely(!down_read_trylock(&mc.mm->mmap_sem))) {
4964 /*
4965 * Someone who are holding the mmap_sem might be waiting in
4966 * waitq. So we cancel all extra charges, wake up all waiters,
4967 * and retry. Because we cancel precharges, we might not be able
4968 * to move enough charges, but moving charge is a best-effort
4969 * feature anyway, so it wouldn't be a big problem.
4970 */
4971 __mem_cgroup_clear_mc();
4972 cond_resched();
4973 goto retry;
4974 }
4975 /*
4976 * When we have consumed all precharges and failed in doing
4977 * additional charge, the page walk just aborts.
4978 */
4979 walk_page_range(0, mc.mm->highest_vm_end, &mem_cgroup_move_charge_walk);
4980
4981 up_read(&mc.mm->mmap_sem);
4982 atomic_dec(&mc.from->moving_account);
4983}
4984
4985static void mem_cgroup_move_task(void)
4986{
4987 if (mc.to) {
4988 mem_cgroup_move_charge();
4989 mem_cgroup_clear_mc();
4990 }
4991}
4992#else /* !CONFIG_MMU */
4993static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
4994{
4995 return 0;
4996}
4997static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
4998{
4999}
5000static void mem_cgroup_move_task(void)
5001{
5002}
5003#endif
5004
5005/*
5006 * Cgroup retains root cgroups across [un]mount cycles making it necessary
5007 * to verify whether we're attached to the default hierarchy on each mount
5008 * attempt.
5009 */
5010static void mem_cgroup_bind(struct cgroup_subsys_state *root_css)
5011{
5012 /*
5013 * use_hierarchy is forced on the default hierarchy. cgroup core
5014 * guarantees that @root doesn't have any children, so turning it
5015 * on for the root memcg is enough.
5016 */
5017 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
5018 root_mem_cgroup->use_hierarchy = true;
5019 else
5020 root_mem_cgroup->use_hierarchy = false;
5021}
5022
5023static u64 memory_current_read(struct cgroup_subsys_state *css,
5024 struct cftype *cft)
5025{
5026 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5027
5028 return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE;
5029}
5030
5031static int memory_low_show(struct seq_file *m, void *v)
5032{
5033 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5034 unsigned long low = READ_ONCE(memcg->low);
5035
5036 if (low == PAGE_COUNTER_MAX)
5037 seq_puts(m, "max\n");
5038 else
5039 seq_printf(m, "%llu\n", (u64)low * PAGE_SIZE);
5040
5041 return 0;
5042}
5043
5044static ssize_t memory_low_write(struct kernfs_open_file *of,
5045 char *buf, size_t nbytes, loff_t off)
5046{
5047 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5048 unsigned long low;
5049 int err;
5050
5051 buf = strstrip(buf);
5052 err = page_counter_memparse(buf, "max", &low);
5053 if (err)
5054 return err;
5055
5056 memcg->low = low;
5057
5058 return nbytes;
5059}
5060
5061static int memory_high_show(struct seq_file *m, void *v)
5062{
5063 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5064 unsigned long high = READ_ONCE(memcg->high);
5065
5066 if (high == PAGE_COUNTER_MAX)
5067 seq_puts(m, "max\n");
5068 else
5069 seq_printf(m, "%llu\n", (u64)high * PAGE_SIZE);
5070
5071 return 0;
5072}
5073
5074static ssize_t memory_high_write(struct kernfs_open_file *of,
5075 char *buf, size_t nbytes, loff_t off)
5076{
5077 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5078 unsigned long nr_pages;
5079 unsigned long high;
5080 int err;
5081
5082 buf = strstrip(buf);
5083 err = page_counter_memparse(buf, "max", &high);
5084 if (err)
5085 return err;
5086
5087 memcg->high = high;
5088
5089 nr_pages = page_counter_read(&memcg->memory);
5090 if (nr_pages > high)
5091 try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
5092 GFP_KERNEL, true);
5093
5094 memcg_wb_domain_size_changed(memcg);
5095 return nbytes;
5096}
5097
5098static int memory_max_show(struct seq_file *m, void *v)
5099{
5100 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5101 unsigned long max = READ_ONCE(memcg->memory.limit);
5102
5103 if (max == PAGE_COUNTER_MAX)
5104 seq_puts(m, "max\n");
5105 else
5106 seq_printf(m, "%llu\n", (u64)max * PAGE_SIZE);
5107
5108 return 0;
5109}
5110
5111static ssize_t memory_max_write(struct kernfs_open_file *of,
5112 char *buf, size_t nbytes, loff_t off)
5113{
5114 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5115 unsigned int nr_reclaims = MEM_CGROUP_RECLAIM_RETRIES;
5116 bool drained = false;
5117 unsigned long max;
5118 int err;
5119
5120 buf = strstrip(buf);
5121 err = page_counter_memparse(buf, "max", &max);
5122 if (err)
5123 return err;
5124
5125 xchg(&memcg->memory.limit, max);
5126
5127 for (;;) {
5128 unsigned long nr_pages = page_counter_read(&memcg->memory);
5129
5130 if (nr_pages <= max)
5131 break;
5132
5133 if (signal_pending(current)) {
5134 err = -EINTR;
5135 break;
5136 }
5137
5138 if (!drained) {
5139 drain_all_stock(memcg);
5140 drained = true;
5141 continue;
5142 }
5143
5144 if (nr_reclaims) {
5145 if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max,
5146 GFP_KERNEL, true))
5147 nr_reclaims--;
5148 continue;
5149 }
5150
5151 mem_cgroup_events(memcg, MEMCG_OOM, 1);
5152 if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
5153 break;
5154 }
5155
5156 memcg_wb_domain_size_changed(memcg);
5157 return nbytes;
5158}
5159
5160static int memory_events_show(struct seq_file *m, void *v)
5161{
5162 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5163
5164 seq_printf(m, "low %lu\n", mem_cgroup_read_events(memcg, MEMCG_LOW));
5165 seq_printf(m, "high %lu\n", mem_cgroup_read_events(memcg, MEMCG_HIGH));
5166 seq_printf(m, "max %lu\n", mem_cgroup_read_events(memcg, MEMCG_MAX));
5167 seq_printf(m, "oom %lu\n", mem_cgroup_read_events(memcg, MEMCG_OOM));
5168
5169 return 0;
5170}
5171
5172static int memory_stat_show(struct seq_file *m, void *v)
5173{
5174 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5175 unsigned long stat[MEMCG_NR_STAT];
5176 unsigned long events[MEMCG_NR_EVENTS];
5177 int i;
5178
5179 /*
5180 * Provide statistics on the state of the memory subsystem as
5181 * well as cumulative event counters that show past behavior.
5182 *
5183 * This list is ordered following a combination of these gradients:
5184 * 1) generic big picture -> specifics and details
5185 * 2) reflecting userspace activity -> reflecting kernel heuristics
5186 *
5187 * Current memory state:
5188 */
5189
5190 tree_stat(memcg, stat);
5191 tree_events(memcg, events);
5192
5193 seq_printf(m, "anon %llu\n",
5194 (u64)stat[MEM_CGROUP_STAT_RSS] * PAGE_SIZE);
5195 seq_printf(m, "file %llu\n",
5196 (u64)stat[MEM_CGROUP_STAT_CACHE] * PAGE_SIZE);
5197 seq_printf(m, "kernel_stack %llu\n",
5198 (u64)stat[MEMCG_KERNEL_STACK_KB] * 1024);
5199 seq_printf(m, "slab %llu\n",
5200 (u64)(stat[MEMCG_SLAB_RECLAIMABLE] +
5201 stat[MEMCG_SLAB_UNRECLAIMABLE]) * PAGE_SIZE);
5202 seq_printf(m, "sock %llu\n",
5203 (u64)stat[MEMCG_SOCK] * PAGE_SIZE);
5204
5205 seq_printf(m, "file_mapped %llu\n",
5206 (u64)stat[MEM_CGROUP_STAT_FILE_MAPPED] * PAGE_SIZE);
5207 seq_printf(m, "file_dirty %llu\n",
5208 (u64)stat[MEM_CGROUP_STAT_DIRTY] * PAGE_SIZE);
5209 seq_printf(m, "file_writeback %llu\n",
5210 (u64)stat[MEM_CGROUP_STAT_WRITEBACK] * PAGE_SIZE);
5211
5212 for (i = 0; i < NR_LRU_LISTS; i++) {
5213 struct mem_cgroup *mi;
5214 unsigned long val = 0;
5215
5216 for_each_mem_cgroup_tree(mi, memcg)
5217 val += mem_cgroup_nr_lru_pages(mi, BIT(i));
5218 seq_printf(m, "%s %llu\n",
5219 mem_cgroup_lru_names[i], (u64)val * PAGE_SIZE);
5220 }
5221
5222 seq_printf(m, "slab_reclaimable %llu\n",
5223 (u64)stat[MEMCG_SLAB_RECLAIMABLE] * PAGE_SIZE);
5224 seq_printf(m, "slab_unreclaimable %llu\n",
5225 (u64)stat[MEMCG_SLAB_UNRECLAIMABLE] * PAGE_SIZE);
5226
5227 /* Accumulated memory events */
5228
5229 seq_printf(m, "pgfault %lu\n",
5230 events[MEM_CGROUP_EVENTS_PGFAULT]);
5231 seq_printf(m, "pgmajfault %lu\n",
5232 events[MEM_CGROUP_EVENTS_PGMAJFAULT]);
5233
5234 return 0;
5235}
5236
5237static struct cftype memory_files[] = {
5238 {
5239 .name = "current",
5240 .flags = CFTYPE_NOT_ON_ROOT,
5241 .read_u64 = memory_current_read,
5242 },
5243 {
5244 .name = "low",
5245 .flags = CFTYPE_NOT_ON_ROOT,
5246 .seq_show = memory_low_show,
5247 .write = memory_low_write,
5248 },
5249 {
5250 .name = "high",
5251 .flags = CFTYPE_NOT_ON_ROOT,
5252 .seq_show = memory_high_show,
5253 .write = memory_high_write,
5254 },
5255 {
5256 .name = "max",
5257 .flags = CFTYPE_NOT_ON_ROOT,
5258 .seq_show = memory_max_show,
5259 .write = memory_max_write,
5260 },
5261 {
5262 .name = "events",
5263 .flags = CFTYPE_NOT_ON_ROOT,
5264 .file_offset = offsetof(struct mem_cgroup, events_file),
5265 .seq_show = memory_events_show,
5266 },
5267 {
5268 .name = "stat",
5269 .flags = CFTYPE_NOT_ON_ROOT,
5270 .seq_show = memory_stat_show,
5271 },
5272 { } /* terminate */
5273};
5274
5275struct cgroup_subsys memory_cgrp_subsys = {
5276 .css_alloc = mem_cgroup_css_alloc,
5277 .css_online = mem_cgroup_css_online,
5278 .css_offline = mem_cgroup_css_offline,
5279 .css_released = mem_cgroup_css_released,
5280 .css_free = mem_cgroup_css_free,
5281 .css_reset = mem_cgroup_css_reset,
5282 .can_attach = mem_cgroup_can_attach,
5283 .cancel_attach = mem_cgroup_cancel_attach,
5284 .post_attach = mem_cgroup_move_task,
5285 .bind = mem_cgroup_bind,
5286 .dfl_cftypes = memory_files,
5287 .legacy_cftypes = mem_cgroup_legacy_files,
5288 .early_init = 0,
5289};
5290
5291/**
5292 * mem_cgroup_low - check if memory consumption is below the normal range
5293 * @root: the highest ancestor to consider
5294 * @memcg: the memory cgroup to check
5295 *
5296 * Returns %true if memory consumption of @memcg, and that of all
5297 * configurable ancestors up to @root, is below the normal range.
5298 */
5299bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg)
5300{
5301 if (mem_cgroup_disabled())
5302 return false;
5303
5304 /*
5305 * The toplevel group doesn't have a configurable range, so
5306 * it's never low when looked at directly, and it is not
5307 * considered an ancestor when assessing the hierarchy.
5308 */
5309
5310 if (memcg == root_mem_cgroup)
5311 return false;
5312
5313 if (page_counter_read(&memcg->memory) >= memcg->low)
5314 return false;
5315
5316 while (memcg != root) {
5317 memcg = parent_mem_cgroup(memcg);
5318
5319 if (memcg == root_mem_cgroup)
5320 break;
5321
5322 if (page_counter_read(&memcg->memory) >= memcg->low)
5323 return false;
5324 }
5325 return true;
5326}
5327
5328/**
5329 * mem_cgroup_try_charge - try charging a page
5330 * @page: page to charge
5331 * @mm: mm context of the victim
5332 * @gfp_mask: reclaim mode
5333 * @memcgp: charged memcg return
5334 * @compound: charge the page as compound or small page
5335 *
5336 * Try to charge @page to the memcg that @mm belongs to, reclaiming
5337 * pages according to @gfp_mask if necessary.
5338 *
5339 * Returns 0 on success, with *@memcgp pointing to the charged memcg.
5340 * Otherwise, an error code is returned.
5341 *
5342 * After page->mapping has been set up, the caller must finalize the
5343 * charge with mem_cgroup_commit_charge(). Or abort the transaction
5344 * with mem_cgroup_cancel_charge() in case page instantiation fails.
5345 */
5346int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
5347 gfp_t gfp_mask, struct mem_cgroup **memcgp,
5348 bool compound)
5349{
5350 struct mem_cgroup *memcg = NULL;
5351 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
5352 int ret = 0;
5353
5354 if (mem_cgroup_disabled())
5355 goto out;
5356
5357 if (PageSwapCache(page)) {
5358 /*
5359 * Every swap fault against a single page tries to charge the
5360 * page, bail as early as possible. shmem_unuse() encounters
5361 * already charged pages, too. The USED bit is protected by
5362 * the page lock, which serializes swap cache removal, which
5363 * in turn serializes uncharging.
5364 */
5365 VM_BUG_ON_PAGE(!PageLocked(page), page);
5366 if (page->mem_cgroup)
5367 goto out;
5368
5369 if (do_swap_account) {
5370 swp_entry_t ent = { .val = page_private(page), };
5371 unsigned short id = lookup_swap_cgroup_id(ent);
5372
5373 rcu_read_lock();
5374 memcg = mem_cgroup_from_id(id);
5375 if (memcg && !css_tryget_online(&memcg->css))
5376 memcg = NULL;
5377 rcu_read_unlock();
5378 }
5379 }
5380
5381 if (!memcg)
5382 memcg = get_mem_cgroup_from_mm(mm);
5383
5384 ret = try_charge(memcg, gfp_mask, nr_pages);
5385
5386 css_put(&memcg->css);
5387out:
5388 *memcgp = memcg;
5389 return ret;
5390}
5391
5392/**
5393 * mem_cgroup_commit_charge - commit a page charge
5394 * @page: page to charge
5395 * @memcg: memcg to charge the page to
5396 * @lrucare: page might be on LRU already
5397 * @compound: charge the page as compound or small page
5398 *
5399 * Finalize a charge transaction started by mem_cgroup_try_charge(),
5400 * after page->mapping has been set up. This must happen atomically
5401 * as part of the page instantiation, i.e. under the page table lock
5402 * for anonymous pages, under the page lock for page and swap cache.
5403 *
5404 * In addition, the page must not be on the LRU during the commit, to
5405 * prevent racing with task migration. If it might be, use @lrucare.
5406 *
5407 * Use mem_cgroup_cancel_charge() to cancel the transaction instead.
5408 */
5409void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
5410 bool lrucare, bool compound)
5411{
5412 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
5413
5414 VM_BUG_ON_PAGE(!page->mapping, page);
5415 VM_BUG_ON_PAGE(PageLRU(page) && !lrucare, page);
5416
5417 if (mem_cgroup_disabled())
5418 return;
5419 /*
5420 * Swap faults will attempt to charge the same page multiple
5421 * times. But reuse_swap_page() might have removed the page
5422 * from swapcache already, so we can't check PageSwapCache().
5423 */
5424 if (!memcg)
5425 return;
5426
5427 commit_charge(page, memcg, lrucare);
5428
5429 local_irq_disable();
5430 mem_cgroup_charge_statistics(memcg, page, compound, nr_pages);
5431 memcg_check_events(memcg, page);
5432 local_irq_enable();
5433
5434 if (do_memsw_account() && PageSwapCache(page)) {
5435 swp_entry_t entry = { .val = page_private(page) };
5436 /*
5437 * The swap entry might not get freed for a long time,
5438 * let's not wait for it. The page already received a
5439 * memory+swap charge, drop the swap entry duplicate.
5440 */
5441 mem_cgroup_uncharge_swap(entry);
5442 }
5443}
5444
5445/**
5446 * mem_cgroup_cancel_charge - cancel a page charge
5447 * @page: page to charge
5448 * @memcg: memcg to charge the page to
5449 * @compound: charge the page as compound or small page
5450 *
5451 * Cancel a charge transaction started by mem_cgroup_try_charge().
5452 */
5453void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg,
5454 bool compound)
5455{
5456 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
5457
5458 if (mem_cgroup_disabled())
5459 return;
5460 /*
5461 * Swap faults will attempt to charge the same page multiple
5462 * times. But reuse_swap_page() might have removed the page
5463 * from swapcache already, so we can't check PageSwapCache().
5464 */
5465 if (!memcg)
5466 return;
5467
5468 cancel_charge(memcg, nr_pages);
5469}
5470
5471static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout,
5472 unsigned long nr_anon, unsigned long nr_file,
5473 unsigned long nr_huge, unsigned long nr_kmem,
5474 struct page *dummy_page)
5475{
5476 unsigned long nr_pages = nr_anon + nr_file + nr_kmem;
5477 unsigned long flags;
5478
5479 if (!mem_cgroup_is_root(memcg)) {
5480 page_counter_uncharge(&memcg->memory, nr_pages);
5481 if (do_memsw_account())
5482 page_counter_uncharge(&memcg->memsw, nr_pages);
5483 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && nr_kmem)
5484 page_counter_uncharge(&memcg->kmem, nr_kmem);
5485 memcg_oom_recover(memcg);
5486 }
5487
5488 local_irq_save(flags);
5489 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS], nr_anon);
5490 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_CACHE], nr_file);
5491 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], nr_huge);
5492 __this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT], pgpgout);
5493 __this_cpu_add(memcg->stat->nr_page_events, nr_pages);
5494 memcg_check_events(memcg, dummy_page);
5495 local_irq_restore(flags);
5496
5497 if (!mem_cgroup_is_root(memcg))
5498 css_put_many(&memcg->css, nr_pages);
5499}
5500
5501static void uncharge_list(struct list_head *page_list)
5502{
5503 struct mem_cgroup *memcg = NULL;
5504 unsigned long nr_anon = 0;
5505 unsigned long nr_file = 0;
5506 unsigned long nr_huge = 0;
5507 unsigned long nr_kmem = 0;
5508 unsigned long pgpgout = 0;
5509 struct list_head *next;
5510 struct page *page;
5511
5512 /*
5513 * Note that the list can be a single page->lru; hence the
5514 * do-while loop instead of a simple list_for_each_entry().
5515 */
5516 next = page_list->next;
5517 do {
5518 page = list_entry(next, struct page, lru);
5519 next = page->lru.next;
5520
5521 VM_BUG_ON_PAGE(PageLRU(page), page);
5522 VM_BUG_ON_PAGE(page_count(page), page);
5523
5524 if (!page->mem_cgroup)
5525 continue;
5526
5527 /*
5528 * Nobody should be changing or seriously looking at
5529 * page->mem_cgroup at this point, we have fully
5530 * exclusive access to the page.
5531 */
5532
5533 if (memcg != page->mem_cgroup) {
5534 if (memcg) {
5535 uncharge_batch(memcg, pgpgout, nr_anon, nr_file,
5536 nr_huge, nr_kmem, page);
5537 pgpgout = nr_anon = nr_file =
5538 nr_huge = nr_kmem = 0;
5539 }
5540 memcg = page->mem_cgroup;
5541 }
5542
5543 if (!PageKmemcg(page)) {
5544 unsigned int nr_pages = 1;
5545
5546 if (PageTransHuge(page)) {
5547 nr_pages <<= compound_order(page);
5548 nr_huge += nr_pages;
5549 }
5550 if (PageAnon(page))
5551 nr_anon += nr_pages;
5552 else
5553 nr_file += nr_pages;
5554 pgpgout++;
5555 } else {
5556 nr_kmem += 1 << compound_order(page);
5557 __ClearPageKmemcg(page);
5558 }
5559
5560 page->mem_cgroup = NULL;
5561 } while (next != page_list);
5562
5563 if (memcg)
5564 uncharge_batch(memcg, pgpgout, nr_anon, nr_file,
5565 nr_huge, nr_kmem, page);
5566}
5567
5568/**
5569 * mem_cgroup_uncharge - uncharge a page
5570 * @page: page to uncharge
5571 *
5572 * Uncharge a page previously charged with mem_cgroup_try_charge() and
5573 * mem_cgroup_commit_charge().
5574 */
5575void mem_cgroup_uncharge(struct page *page)
5576{
5577 if (mem_cgroup_disabled())
5578 return;
5579
5580 /* Don't touch page->lru of any random page, pre-check: */
5581 if (!page->mem_cgroup)
5582 return;
5583
5584 INIT_LIST_HEAD(&page->lru);
5585 uncharge_list(&page->lru);
5586}
5587
5588/**
5589 * mem_cgroup_uncharge_list - uncharge a list of page
5590 * @page_list: list of pages to uncharge
5591 *
5592 * Uncharge a list of pages previously charged with
5593 * mem_cgroup_try_charge() and mem_cgroup_commit_charge().
5594 */
5595void mem_cgroup_uncharge_list(struct list_head *page_list)
5596{
5597 if (mem_cgroup_disabled())
5598 return;
5599
5600 if (!list_empty(page_list))
5601 uncharge_list(page_list);
5602}
5603
5604/**
5605 * mem_cgroup_migrate - charge a page's replacement
5606 * @oldpage: currently circulating page
5607 * @newpage: replacement page
5608 *
5609 * Charge @newpage as a replacement page for @oldpage. @oldpage will
5610 * be uncharged upon free.
5611 *
5612 * Both pages must be locked, @newpage->mapping must be set up.
5613 */
5614void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
5615{
5616 struct mem_cgroup *memcg;
5617 unsigned int nr_pages;
5618 bool compound;
5619 unsigned long flags;
5620
5621 VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
5622 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
5623 VM_BUG_ON_PAGE(PageAnon(oldpage) != PageAnon(newpage), newpage);
5624 VM_BUG_ON_PAGE(PageTransHuge(oldpage) != PageTransHuge(newpage),
5625 newpage);
5626
5627 if (mem_cgroup_disabled())
5628 return;
5629
5630 /* Page cache replacement: new page already charged? */
5631 if (newpage->mem_cgroup)
5632 return;
5633
5634 /* Swapcache readahead pages can get replaced before being charged */
5635 memcg = oldpage->mem_cgroup;
5636 if (!memcg)
5637 return;
5638
5639 /* Force-charge the new page. The old one will be freed soon */
5640 compound = PageTransHuge(newpage);
5641 nr_pages = compound ? hpage_nr_pages(newpage) : 1;
5642
5643 page_counter_charge(&memcg->memory, nr_pages);
5644 if (do_memsw_account())
5645 page_counter_charge(&memcg->memsw, nr_pages);
5646 css_get_many(&memcg->css, nr_pages);
5647
5648 commit_charge(newpage, memcg, false);
5649
5650 local_irq_save(flags);
5651 mem_cgroup_charge_statistics(memcg, newpage, compound, nr_pages);
5652 memcg_check_events(memcg, newpage);
5653 local_irq_restore(flags);
5654}
5655
5656DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
5657EXPORT_SYMBOL(memcg_sockets_enabled_key);
5658
5659void mem_cgroup_sk_alloc(struct sock *sk)
5660{
5661 struct mem_cgroup *memcg;
5662
5663 if (!mem_cgroup_sockets_enabled)
5664 return;
5665
5666 /*
5667 * Socket cloning can throw us here with sk_memcg already
5668 * filled. It won't however, necessarily happen from
5669 * process context. So the test for root memcg given
5670 * the current task's memcg won't help us in this case.
5671 *
5672 * Respecting the original socket's memcg is a better
5673 * decision in this case.
5674 */
5675 if (sk->sk_memcg) {
5676 BUG_ON(mem_cgroup_is_root(sk->sk_memcg));
5677 css_get(&sk->sk_memcg->css);
5678 return;
5679 }
5680
5681 rcu_read_lock();
5682 memcg = mem_cgroup_from_task(current);
5683 if (memcg == root_mem_cgroup)
5684 goto out;
5685 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active)
5686 goto out;
5687 if (css_tryget_online(&memcg->css))
5688 sk->sk_memcg = memcg;
5689out:
5690 rcu_read_unlock();
5691}
5692
5693void mem_cgroup_sk_free(struct sock *sk)
5694{
5695 if (sk->sk_memcg)
5696 css_put(&sk->sk_memcg->css);
5697}
5698
5699/**
5700 * mem_cgroup_charge_skmem - charge socket memory
5701 * @memcg: memcg to charge
5702 * @nr_pages: number of pages to charge
5703 *
5704 * Charges @nr_pages to @memcg. Returns %true if the charge fit within
5705 * @memcg's configured limit, %false if the charge had to be forced.
5706 */
5707bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
5708{
5709 gfp_t gfp_mask = GFP_KERNEL;
5710
5711 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
5712 struct page_counter *fail;
5713
5714 if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) {
5715 memcg->tcpmem_pressure = 0;
5716 return true;
5717 }
5718 page_counter_charge(&memcg->tcpmem, nr_pages);
5719 memcg->tcpmem_pressure = 1;
5720 return false;
5721 }
5722
5723 /* Don't block in the packet receive path */
5724 if (in_softirq())
5725 gfp_mask = GFP_NOWAIT;
5726
5727 this_cpu_add(memcg->stat->count[MEMCG_SOCK], nr_pages);
5728
5729 if (try_charge(memcg, gfp_mask, nr_pages) == 0)
5730 return true;
5731
5732 try_charge(memcg, gfp_mask|__GFP_NOFAIL, nr_pages);
5733 return false;
5734}
5735
5736/**
5737 * mem_cgroup_uncharge_skmem - uncharge socket memory
5738 * @memcg - memcg to uncharge
5739 * @nr_pages - number of pages to uncharge
5740 */
5741void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
5742{
5743 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
5744 page_counter_uncharge(&memcg->tcpmem, nr_pages);
5745 return;
5746 }
5747
5748 this_cpu_sub(memcg->stat->count[MEMCG_SOCK], nr_pages);
5749
5750 page_counter_uncharge(&memcg->memory, nr_pages);
5751 css_put_many(&memcg->css, nr_pages);
5752}
5753
5754static int __init cgroup_memory(char *s)
5755{
5756 char *token;
5757
5758 while ((token = strsep(&s, ",")) != NULL) {
5759 if (!*token)
5760 continue;
5761 if (!strcmp(token, "nosocket"))
5762 cgroup_memory_nosocket = true;
5763 if (!strcmp(token, "nokmem"))
5764 cgroup_memory_nokmem = true;
5765 }
5766 return 0;
5767}
5768__setup("cgroup.memory=", cgroup_memory);
5769
5770/*
5771 * subsys_initcall() for memory controller.
5772 *
5773 * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this
5774 * context because of lock dependencies (cgroup_lock -> cpu hotplug) but
5775 * basically everything that doesn't depend on a specific mem_cgroup structure
5776 * should be initialized from here.
5777 */
5778static int __init mem_cgroup_init(void)
5779{
5780 int cpu, node;
5781
5782#ifndef CONFIG_SLOB
5783 /*
5784 * Kmem cache creation is mostly done with the slab_mutex held,
5785 * so use a special workqueue to avoid stalling all worker
5786 * threads in case lots of cgroups are created simultaneously.
5787 */
5788 memcg_kmem_cache_create_wq =
5789 alloc_ordered_workqueue("memcg_kmem_cache_create", 0);
5790 BUG_ON(!memcg_kmem_cache_create_wq);
5791#endif
5792
5793 cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
5794 memcg_hotplug_cpu_dead);
5795
5796 for_each_possible_cpu(cpu)
5797 INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
5798 drain_local_stock);
5799
5800 for_each_node(node) {
5801 struct mem_cgroup_tree_per_node *rtpn;
5802
5803 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL,
5804 node_online(node) ? node : NUMA_NO_NODE);
5805
5806 rtpn->rb_root = RB_ROOT;
5807 spin_lock_init(&rtpn->lock);
5808 soft_limit_tree.rb_tree_per_node[node] = rtpn;
5809 }
5810
5811 return 0;
5812}
5813subsys_initcall(mem_cgroup_init);
5814
5815#ifdef CONFIG_MEMCG_SWAP
5816static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg)
5817{
5818 while (!atomic_inc_not_zero(&memcg->id.ref)) {
5819 /*
5820 * The root cgroup cannot be destroyed, so it's refcount must
5821 * always be >= 1.
5822 */
5823 if (WARN_ON_ONCE(memcg == root_mem_cgroup)) {
5824 VM_BUG_ON(1);
5825 break;
5826 }
5827 memcg = parent_mem_cgroup(memcg);
5828 if (!memcg)
5829 memcg = root_mem_cgroup;
5830 }
5831 return memcg;
5832}
5833
5834/**
5835 * mem_cgroup_swapout - transfer a memsw charge to swap
5836 * @page: page whose memsw charge to transfer
5837 * @entry: swap entry to move the charge to
5838 *
5839 * Transfer the memsw charge of @page to @entry.
5840 */
5841void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
5842{
5843 struct mem_cgroup *memcg, *swap_memcg;
5844 unsigned short oldid;
5845
5846 VM_BUG_ON_PAGE(PageLRU(page), page);
5847 VM_BUG_ON_PAGE(page_count(page), page);
5848
5849 if (!do_memsw_account())
5850 return;
5851
5852 memcg = page->mem_cgroup;
5853
5854 /* Readahead page, never charged */
5855 if (!memcg)
5856 return;
5857
5858 /*
5859 * In case the memcg owning these pages has been offlined and doesn't
5860 * have an ID allocated to it anymore, charge the closest online
5861 * ancestor for the swap instead and transfer the memory+swap charge.
5862 */
5863 swap_memcg = mem_cgroup_id_get_online(memcg);
5864 oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg));
5865 VM_BUG_ON_PAGE(oldid, page);
5866 mem_cgroup_swap_statistics(swap_memcg, true);
5867
5868 page->mem_cgroup = NULL;
5869
5870 if (!mem_cgroup_is_root(memcg))
5871 page_counter_uncharge(&memcg->memory, 1);
5872
5873 if (memcg != swap_memcg) {
5874 if (!mem_cgroup_is_root(swap_memcg))
5875 page_counter_charge(&swap_memcg->memsw, 1);
5876 page_counter_uncharge(&memcg->memsw, 1);
5877 }
5878
5879 /*
5880 * Interrupts should be disabled here because the caller holds the
5881 * mapping->tree_lock lock which is taken with interrupts-off. It is
5882 * important here to have the interrupts disabled because it is the
5883 * only synchronisation we have for udpating the per-CPU variables.
5884 */
5885 VM_BUG_ON(!irqs_disabled());
5886 mem_cgroup_charge_statistics(memcg, page, false, -1);
5887 memcg_check_events(memcg, page);
5888
5889 if (!mem_cgroup_is_root(memcg))
5890 css_put(&memcg->css);
5891}
5892
5893/*
5894 * mem_cgroup_try_charge_swap - try charging a swap entry
5895 * @page: page being added to swap
5896 * @entry: swap entry to charge
5897 *
5898 * Try to charge @entry to the memcg that @page belongs to.
5899 *
5900 * Returns 0 on success, -ENOMEM on failure.
5901 */
5902int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry)
5903{
5904 struct mem_cgroup *memcg;
5905 struct page_counter *counter;
5906 unsigned short oldid;
5907
5908 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) || !do_swap_account)
5909 return 0;
5910
5911 memcg = page->mem_cgroup;
5912
5913 /* Readahead page, never charged */
5914 if (!memcg)
5915 return 0;
5916
5917 memcg = mem_cgroup_id_get_online(memcg);
5918
5919 if (!mem_cgroup_is_root(memcg) &&
5920 !page_counter_try_charge(&memcg->swap, 1, &counter)) {
5921 mem_cgroup_id_put(memcg);
5922 return -ENOMEM;
5923 }
5924
5925 oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg));
5926 VM_BUG_ON_PAGE(oldid, page);
5927 mem_cgroup_swap_statistics(memcg, true);
5928
5929 return 0;
5930}
5931
5932/**
5933 * mem_cgroup_uncharge_swap - uncharge a swap entry
5934 * @entry: swap entry to uncharge
5935 *
5936 * Drop the swap charge associated with @entry.
5937 */
5938void mem_cgroup_uncharge_swap(swp_entry_t entry)
5939{
5940 struct mem_cgroup *memcg;
5941 unsigned short id;
5942
5943 if (!do_swap_account)
5944 return;
5945
5946 id = swap_cgroup_record(entry, 0);
5947 rcu_read_lock();
5948 memcg = mem_cgroup_from_id(id);
5949 if (memcg) {
5950 if (!mem_cgroup_is_root(memcg)) {
5951 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
5952 page_counter_uncharge(&memcg->swap, 1);
5953 else
5954 page_counter_uncharge(&memcg->memsw, 1);
5955 }
5956 mem_cgroup_swap_statistics(memcg, false);
5957 mem_cgroup_id_put(memcg);
5958 }
5959 rcu_read_unlock();
5960}
5961
5962long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
5963{
5964 long nr_swap_pages = get_nr_swap_pages();
5965
5966 if (!do_swap_account || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
5967 return nr_swap_pages;
5968 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg))
5969 nr_swap_pages = min_t(long, nr_swap_pages,
5970 READ_ONCE(memcg->swap.limit) -
5971 page_counter_read(&memcg->swap));
5972 return nr_swap_pages;
5973}
5974
5975bool mem_cgroup_swap_full(struct page *page)
5976{
5977 struct mem_cgroup *memcg;
5978
5979 VM_BUG_ON_PAGE(!PageLocked(page), page);
5980
5981 if (vm_swap_full())
5982 return true;
5983 if (!do_swap_account || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
5984 return false;
5985
5986 memcg = page->mem_cgroup;
5987 if (!memcg)
5988 return false;
5989
5990 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg))
5991 if (page_counter_read(&memcg->swap) * 2 >= memcg->swap.limit)
5992 return true;
5993
5994 return false;
5995}
5996
5997/* for remember boot option*/
5998#ifdef CONFIG_MEMCG_SWAP_ENABLED
5999static int really_do_swap_account __initdata = 1;
6000#else
6001static int really_do_swap_account __initdata;
6002#endif
6003
6004static int __init enable_swap_account(char *s)
6005{
6006 if (!strcmp(s, "1"))
6007 really_do_swap_account = 1;
6008 else if (!strcmp(s, "0"))
6009 really_do_swap_account = 0;
6010 return 1;
6011}
6012__setup("swapaccount=", enable_swap_account);
6013
6014static u64 swap_current_read(struct cgroup_subsys_state *css,
6015 struct cftype *cft)
6016{
6017 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6018
6019 return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE;
6020}
6021
6022static int swap_max_show(struct seq_file *m, void *v)
6023{
6024 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
6025 unsigned long max = READ_ONCE(memcg->swap.limit);
6026
6027 if (max == PAGE_COUNTER_MAX)
6028 seq_puts(m, "max\n");
6029 else
6030 seq_printf(m, "%llu\n", (u64)max * PAGE_SIZE);
6031
6032 return 0;
6033}
6034
6035static ssize_t swap_max_write(struct kernfs_open_file *of,
6036 char *buf, size_t nbytes, loff_t off)
6037{
6038 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6039 unsigned long max;
6040 int err;
6041
6042 buf = strstrip(buf);
6043 err = page_counter_memparse(buf, "max", &max);
6044 if (err)
6045 return err;
6046
6047 mutex_lock(&memcg_limit_mutex);
6048 err = page_counter_limit(&memcg->swap, max);
6049 mutex_unlock(&memcg_limit_mutex);
6050 if (err)
6051 return err;
6052
6053 return nbytes;
6054}
6055
6056static struct cftype swap_files[] = {
6057 {
6058 .name = "swap.current",
6059 .flags = CFTYPE_NOT_ON_ROOT,
6060 .read_u64 = swap_current_read,
6061 },
6062 {
6063 .name = "swap.max",
6064 .flags = CFTYPE_NOT_ON_ROOT,
6065 .seq_show = swap_max_show,
6066 .write = swap_max_write,
6067 },
6068 { } /* terminate */
6069};
6070
6071static struct cftype memsw_cgroup_files[] = {
6072 {
6073 .name = "memsw.usage_in_bytes",
6074 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
6075 .read_u64 = mem_cgroup_read_u64,
6076 },
6077 {
6078 .name = "memsw.max_usage_in_bytes",
6079 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
6080 .write = mem_cgroup_reset,
6081 .read_u64 = mem_cgroup_read_u64,
6082 },
6083 {
6084 .name = "memsw.limit_in_bytes",
6085 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
6086 .write = mem_cgroup_write,
6087 .read_u64 = mem_cgroup_read_u64,
6088 },
6089 {
6090 .name = "memsw.failcnt",
6091 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
6092 .write = mem_cgroup_reset,
6093 .read_u64 = mem_cgroup_read_u64,
6094 },
6095 { }, /* terminate */
6096};
6097
6098static int __init mem_cgroup_swap_init(void)
6099{
6100 if (!mem_cgroup_disabled() && really_do_swap_account) {
6101 do_swap_account = 1;
6102 WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys,
6103 swap_files));
6104 WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys,
6105 memsw_cgroup_files));
6106 }
6107 return 0;
6108}
6109subsys_initcall(mem_cgroup_swap_init);
6110
6111#endif /* CONFIG_MEMCG_SWAP */