Loading...
1/* memcontrol.c - Memory Controller
2 *
3 * Copyright IBM Corporation, 2007
4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5 *
6 * Copyright 2007 OpenVZ SWsoft Inc
7 * Author: Pavel Emelianov <xemul@openvz.org>
8 *
9 * Memory thresholds
10 * Copyright (C) 2009 Nokia Corporation
11 * Author: Kirill A. Shutemov
12 *
13 * Kernel Memory Controller
14 * Copyright (C) 2012 Parallels Inc. and Google Inc.
15 * Authors: Glauber Costa and Suleiman Souhlal
16 *
17 * Native page reclaim
18 * Charge lifetime sanitation
19 * Lockless page tracking & accounting
20 * Unified hierarchy configuration model
21 * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner
22 *
23 * This program is free software; you can redistribute it and/or modify
24 * it under the terms of the GNU General Public License as published by
25 * the Free Software Foundation; either version 2 of the License, or
26 * (at your option) any later version.
27 *
28 * This program is distributed in the hope that it will be useful,
29 * but WITHOUT ANY WARRANTY; without even the implied warranty of
30 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
31 * GNU General Public License for more details.
32 */
33
34#include <linux/page_counter.h>
35#include <linux/memcontrol.h>
36#include <linux/cgroup.h>
37#include <linux/mm.h>
38#include <linux/sched/mm.h>
39#include <linux/shmem_fs.h>
40#include <linux/hugetlb.h>
41#include <linux/pagemap.h>
42#include <linux/smp.h>
43#include <linux/page-flags.h>
44#include <linux/backing-dev.h>
45#include <linux/bit_spinlock.h>
46#include <linux/rcupdate.h>
47#include <linux/limits.h>
48#include <linux/export.h>
49#include <linux/mutex.h>
50#include <linux/rbtree.h>
51#include <linux/slab.h>
52#include <linux/swap.h>
53#include <linux/swapops.h>
54#include <linux/spinlock.h>
55#include <linux/eventfd.h>
56#include <linux/poll.h>
57#include <linux/sort.h>
58#include <linux/fs.h>
59#include <linux/seq_file.h>
60#include <linux/vmpressure.h>
61#include <linux/mm_inline.h>
62#include <linux/swap_cgroup.h>
63#include <linux/cpu.h>
64#include <linux/oom.h>
65#include <linux/lockdep.h>
66#include <linux/file.h>
67#include <linux/tracehook.h>
68#include "internal.h"
69#include <net/sock.h>
70#include <net/ip.h>
71#include "slab.h"
72
73#include <linux/uaccess.h>
74
75#include <trace/events/vmscan.h>
76
77struct cgroup_subsys memory_cgrp_subsys __read_mostly;
78EXPORT_SYMBOL(memory_cgrp_subsys);
79
80struct mem_cgroup *root_mem_cgroup __read_mostly;
81
82#define MEM_CGROUP_RECLAIM_RETRIES 5
83
84/* Socket memory accounting disabled? */
85static bool cgroup_memory_nosocket;
86
87/* Kernel memory accounting disabled? */
88static bool cgroup_memory_nokmem;
89
90/* Whether the swap controller is active */
91#ifdef CONFIG_MEMCG_SWAP
92int do_swap_account __read_mostly;
93#else
94#define do_swap_account 0
95#endif
96
97/* Whether legacy memory+swap accounting is active */
98static bool do_memsw_account(void)
99{
100 return !cgroup_subsys_on_dfl(memory_cgrp_subsys) && do_swap_account;
101}
102
103static const char *const mem_cgroup_lru_names[] = {
104 "inactive_anon",
105 "active_anon",
106 "inactive_file",
107 "active_file",
108 "unevictable",
109};
110
111#define THRESHOLDS_EVENTS_TARGET 128
112#define SOFTLIMIT_EVENTS_TARGET 1024
113#define NUMAINFO_EVENTS_TARGET 1024
114
115/*
116 * Cgroups above their limits are maintained in a RB-Tree, independent of
117 * their hierarchy representation
118 */
119
120struct mem_cgroup_tree_per_node {
121 struct rb_root rb_root;
122 struct rb_node *rb_rightmost;
123 spinlock_t lock;
124};
125
126struct mem_cgroup_tree {
127 struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
128};
129
130static struct mem_cgroup_tree soft_limit_tree __read_mostly;
131
132/* for OOM */
133struct mem_cgroup_eventfd_list {
134 struct list_head list;
135 struct eventfd_ctx *eventfd;
136};
137
138/*
139 * cgroup_event represents events which userspace want to receive.
140 */
141struct mem_cgroup_event {
142 /*
143 * memcg which the event belongs to.
144 */
145 struct mem_cgroup *memcg;
146 /*
147 * eventfd to signal userspace about the event.
148 */
149 struct eventfd_ctx *eventfd;
150 /*
151 * Each of these stored in a list by the cgroup.
152 */
153 struct list_head list;
154 /*
155 * register_event() callback will be used to add new userspace
156 * waiter for changes related to this event. Use eventfd_signal()
157 * on eventfd to send notification to userspace.
158 */
159 int (*register_event)(struct mem_cgroup *memcg,
160 struct eventfd_ctx *eventfd, const char *args);
161 /*
162 * unregister_event() callback will be called when userspace closes
163 * the eventfd or on cgroup removing. This callback must be set,
164 * if you want provide notification functionality.
165 */
166 void (*unregister_event)(struct mem_cgroup *memcg,
167 struct eventfd_ctx *eventfd);
168 /*
169 * All fields below needed to unregister event when
170 * userspace closes eventfd.
171 */
172 poll_table pt;
173 wait_queue_head_t *wqh;
174 wait_queue_entry_t wait;
175 struct work_struct remove;
176};
177
178static void mem_cgroup_threshold(struct mem_cgroup *memcg);
179static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
180
181/* Stuffs for move charges at task migration. */
182/*
183 * Types of charges to be moved.
184 */
185#define MOVE_ANON 0x1U
186#define MOVE_FILE 0x2U
187#define MOVE_MASK (MOVE_ANON | MOVE_FILE)
188
189/* "mc" and its members are protected by cgroup_mutex */
190static struct move_charge_struct {
191 spinlock_t lock; /* for from, to */
192 struct mm_struct *mm;
193 struct mem_cgroup *from;
194 struct mem_cgroup *to;
195 unsigned long flags;
196 unsigned long precharge;
197 unsigned long moved_charge;
198 unsigned long moved_swap;
199 struct task_struct *moving_task; /* a task moving charges */
200 wait_queue_head_t waitq; /* a waitq for other context */
201} mc = {
202 .lock = __SPIN_LOCK_UNLOCKED(mc.lock),
203 .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
204};
205
206/*
207 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
208 * limit reclaim to prevent infinite loops, if they ever occur.
209 */
210#define MEM_CGROUP_MAX_RECLAIM_LOOPS 100
211#define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2
212
213enum charge_type {
214 MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
215 MEM_CGROUP_CHARGE_TYPE_ANON,
216 MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */
217 MEM_CGROUP_CHARGE_TYPE_DROP, /* a page was unused swap cache */
218 NR_CHARGE_TYPE,
219};
220
221/* for encoding cft->private value on file */
222enum res_type {
223 _MEM,
224 _MEMSWAP,
225 _OOM_TYPE,
226 _KMEM,
227 _TCP,
228};
229
230#define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val))
231#define MEMFILE_TYPE(val) ((val) >> 16 & 0xffff)
232#define MEMFILE_ATTR(val) ((val) & 0xffff)
233/* Used for OOM nofiier */
234#define OOM_CONTROL (0)
235
236/* Some nice accessors for the vmpressure. */
237struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
238{
239 if (!memcg)
240 memcg = root_mem_cgroup;
241 return &memcg->vmpressure;
242}
243
244struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr)
245{
246 return &container_of(vmpr, struct mem_cgroup, vmpressure)->css;
247}
248
249static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
250{
251 return (memcg == root_mem_cgroup);
252}
253
254#ifndef CONFIG_SLOB
255/*
256 * This will be the memcg's index in each cache's ->memcg_params.memcg_caches.
257 * The main reason for not using cgroup id for this:
258 * this works better in sparse environments, where we have a lot of memcgs,
259 * but only a few kmem-limited. Or also, if we have, for instance, 200
260 * memcgs, and none but the 200th is kmem-limited, we'd have to have a
261 * 200 entry array for that.
262 *
263 * The current size of the caches array is stored in memcg_nr_cache_ids. It
264 * will double each time we have to increase it.
265 */
266static DEFINE_IDA(memcg_cache_ida);
267int memcg_nr_cache_ids;
268
269/* Protects memcg_nr_cache_ids */
270static DECLARE_RWSEM(memcg_cache_ids_sem);
271
272void memcg_get_cache_ids(void)
273{
274 down_read(&memcg_cache_ids_sem);
275}
276
277void memcg_put_cache_ids(void)
278{
279 up_read(&memcg_cache_ids_sem);
280}
281
282/*
283 * MIN_SIZE is different than 1, because we would like to avoid going through
284 * the alloc/free process all the time. In a small machine, 4 kmem-limited
285 * cgroups is a reasonable guess. In the future, it could be a parameter or
286 * tunable, but that is strictly not necessary.
287 *
288 * MAX_SIZE should be as large as the number of cgrp_ids. Ideally, we could get
289 * this constant directly from cgroup, but it is understandable that this is
290 * better kept as an internal representation in cgroup.c. In any case, the
291 * cgrp_id space is not getting any smaller, and we don't have to necessarily
292 * increase ours as well if it increases.
293 */
294#define MEMCG_CACHES_MIN_SIZE 4
295#define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX
296
297/*
298 * A lot of the calls to the cache allocation functions are expected to be
299 * inlined by the compiler. Since the calls to memcg_kmem_get_cache are
300 * conditional to this static branch, we'll have to allow modules that does
301 * kmem_cache_alloc and the such to see this symbol as well
302 */
303DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key);
304EXPORT_SYMBOL(memcg_kmem_enabled_key);
305
306struct workqueue_struct *memcg_kmem_cache_wq;
307
308#endif /* !CONFIG_SLOB */
309
310/**
311 * mem_cgroup_css_from_page - css of the memcg associated with a page
312 * @page: page of interest
313 *
314 * If memcg is bound to the default hierarchy, css of the memcg associated
315 * with @page is returned. The returned css remains associated with @page
316 * until it is released.
317 *
318 * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup
319 * is returned.
320 */
321struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page)
322{
323 struct mem_cgroup *memcg;
324
325 memcg = page->mem_cgroup;
326
327 if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
328 memcg = root_mem_cgroup;
329
330 return &memcg->css;
331}
332
333/**
334 * page_cgroup_ino - return inode number of the memcg a page is charged to
335 * @page: the page
336 *
337 * Look up the closest online ancestor of the memory cgroup @page is charged to
338 * and return its inode number or 0 if @page is not charged to any cgroup. It
339 * is safe to call this function without holding a reference to @page.
340 *
341 * Note, this function is inherently racy, because there is nothing to prevent
342 * the cgroup inode from getting torn down and potentially reallocated a moment
343 * after page_cgroup_ino() returns, so it only should be used by callers that
344 * do not care (such as procfs interfaces).
345 */
346ino_t page_cgroup_ino(struct page *page)
347{
348 struct mem_cgroup *memcg;
349 unsigned long ino = 0;
350
351 rcu_read_lock();
352 memcg = READ_ONCE(page->mem_cgroup);
353 while (memcg && !(memcg->css.flags & CSS_ONLINE))
354 memcg = parent_mem_cgroup(memcg);
355 if (memcg)
356 ino = cgroup_ino(memcg->css.cgroup);
357 rcu_read_unlock();
358 return ino;
359}
360
361static struct mem_cgroup_per_node *
362mem_cgroup_page_nodeinfo(struct mem_cgroup *memcg, struct page *page)
363{
364 int nid = page_to_nid(page);
365
366 return memcg->nodeinfo[nid];
367}
368
369static struct mem_cgroup_tree_per_node *
370soft_limit_tree_node(int nid)
371{
372 return soft_limit_tree.rb_tree_per_node[nid];
373}
374
375static struct mem_cgroup_tree_per_node *
376soft_limit_tree_from_page(struct page *page)
377{
378 int nid = page_to_nid(page);
379
380 return soft_limit_tree.rb_tree_per_node[nid];
381}
382
383static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_node *mz,
384 struct mem_cgroup_tree_per_node *mctz,
385 unsigned long new_usage_in_excess)
386{
387 struct rb_node **p = &mctz->rb_root.rb_node;
388 struct rb_node *parent = NULL;
389 struct mem_cgroup_per_node *mz_node;
390 bool rightmost = true;
391
392 if (mz->on_tree)
393 return;
394
395 mz->usage_in_excess = new_usage_in_excess;
396 if (!mz->usage_in_excess)
397 return;
398 while (*p) {
399 parent = *p;
400 mz_node = rb_entry(parent, struct mem_cgroup_per_node,
401 tree_node);
402 if (mz->usage_in_excess < mz_node->usage_in_excess) {
403 p = &(*p)->rb_left;
404 rightmost = false;
405 }
406
407 /*
408 * We can't avoid mem cgroups that are over their soft
409 * limit by the same amount
410 */
411 else if (mz->usage_in_excess >= mz_node->usage_in_excess)
412 p = &(*p)->rb_right;
413 }
414
415 if (rightmost)
416 mctz->rb_rightmost = &mz->tree_node;
417
418 rb_link_node(&mz->tree_node, parent, p);
419 rb_insert_color(&mz->tree_node, &mctz->rb_root);
420 mz->on_tree = true;
421}
422
423static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
424 struct mem_cgroup_tree_per_node *mctz)
425{
426 if (!mz->on_tree)
427 return;
428
429 if (&mz->tree_node == mctz->rb_rightmost)
430 mctz->rb_rightmost = rb_prev(&mz->tree_node);
431
432 rb_erase(&mz->tree_node, &mctz->rb_root);
433 mz->on_tree = false;
434}
435
436static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
437 struct mem_cgroup_tree_per_node *mctz)
438{
439 unsigned long flags;
440
441 spin_lock_irqsave(&mctz->lock, flags);
442 __mem_cgroup_remove_exceeded(mz, mctz);
443 spin_unlock_irqrestore(&mctz->lock, flags);
444}
445
446static unsigned long soft_limit_excess(struct mem_cgroup *memcg)
447{
448 unsigned long nr_pages = page_counter_read(&memcg->memory);
449 unsigned long soft_limit = READ_ONCE(memcg->soft_limit);
450 unsigned long excess = 0;
451
452 if (nr_pages > soft_limit)
453 excess = nr_pages - soft_limit;
454
455 return excess;
456}
457
458static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
459{
460 unsigned long excess;
461 struct mem_cgroup_per_node *mz;
462 struct mem_cgroup_tree_per_node *mctz;
463
464 mctz = soft_limit_tree_from_page(page);
465 if (!mctz)
466 return;
467 /*
468 * Necessary to update all ancestors when hierarchy is used.
469 * because their event counter is not touched.
470 */
471 for (; memcg; memcg = parent_mem_cgroup(memcg)) {
472 mz = mem_cgroup_page_nodeinfo(memcg, page);
473 excess = soft_limit_excess(memcg);
474 /*
475 * We have to update the tree if mz is on RB-tree or
476 * mem is over its softlimit.
477 */
478 if (excess || mz->on_tree) {
479 unsigned long flags;
480
481 spin_lock_irqsave(&mctz->lock, flags);
482 /* if on-tree, remove it */
483 if (mz->on_tree)
484 __mem_cgroup_remove_exceeded(mz, mctz);
485 /*
486 * Insert again. mz->usage_in_excess will be updated.
487 * If excess is 0, no tree ops.
488 */
489 __mem_cgroup_insert_exceeded(mz, mctz, excess);
490 spin_unlock_irqrestore(&mctz->lock, flags);
491 }
492 }
493}
494
495static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
496{
497 struct mem_cgroup_tree_per_node *mctz;
498 struct mem_cgroup_per_node *mz;
499 int nid;
500
501 for_each_node(nid) {
502 mz = mem_cgroup_nodeinfo(memcg, nid);
503 mctz = soft_limit_tree_node(nid);
504 if (mctz)
505 mem_cgroup_remove_exceeded(mz, mctz);
506 }
507}
508
509static struct mem_cgroup_per_node *
510__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
511{
512 struct mem_cgroup_per_node *mz;
513
514retry:
515 mz = NULL;
516 if (!mctz->rb_rightmost)
517 goto done; /* Nothing to reclaim from */
518
519 mz = rb_entry(mctz->rb_rightmost,
520 struct mem_cgroup_per_node, tree_node);
521 /*
522 * Remove the node now but someone else can add it back,
523 * we will to add it back at the end of reclaim to its correct
524 * position in the tree.
525 */
526 __mem_cgroup_remove_exceeded(mz, mctz);
527 if (!soft_limit_excess(mz->memcg) ||
528 !css_tryget_online(&mz->memcg->css))
529 goto retry;
530done:
531 return mz;
532}
533
534static struct mem_cgroup_per_node *
535mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
536{
537 struct mem_cgroup_per_node *mz;
538
539 spin_lock_irq(&mctz->lock);
540 mz = __mem_cgroup_largest_soft_limit_node(mctz);
541 spin_unlock_irq(&mctz->lock);
542 return mz;
543}
544
545static unsigned long memcg_sum_events(struct mem_cgroup *memcg,
546 int event)
547{
548 return atomic_long_read(&memcg->events[event]);
549}
550
551static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
552 struct page *page,
553 bool compound, int nr_pages)
554{
555 /*
556 * Here, RSS means 'mapped anon' and anon's SwapCache. Shmem/tmpfs is
557 * counted as CACHE even if it's on ANON LRU.
558 */
559 if (PageAnon(page))
560 __mod_memcg_state(memcg, MEMCG_RSS, nr_pages);
561 else {
562 __mod_memcg_state(memcg, MEMCG_CACHE, nr_pages);
563 if (PageSwapBacked(page))
564 __mod_memcg_state(memcg, NR_SHMEM, nr_pages);
565 }
566
567 if (compound) {
568 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
569 __mod_memcg_state(memcg, MEMCG_RSS_HUGE, nr_pages);
570 }
571
572 /* pagein of a big page is an event. So, ignore page size */
573 if (nr_pages > 0)
574 __count_memcg_events(memcg, PGPGIN, 1);
575 else {
576 __count_memcg_events(memcg, PGPGOUT, 1);
577 nr_pages = -nr_pages; /* for event */
578 }
579
580 __this_cpu_add(memcg->stat_cpu->nr_page_events, nr_pages);
581}
582
583unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
584 int nid, unsigned int lru_mask)
585{
586 struct lruvec *lruvec = mem_cgroup_lruvec(NODE_DATA(nid), memcg);
587 unsigned long nr = 0;
588 enum lru_list lru;
589
590 VM_BUG_ON((unsigned)nid >= nr_node_ids);
591
592 for_each_lru(lru) {
593 if (!(BIT(lru) & lru_mask))
594 continue;
595 nr += mem_cgroup_get_lru_size(lruvec, lru);
596 }
597 return nr;
598}
599
600static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
601 unsigned int lru_mask)
602{
603 unsigned long nr = 0;
604 int nid;
605
606 for_each_node_state(nid, N_MEMORY)
607 nr += mem_cgroup_node_nr_lru_pages(memcg, nid, lru_mask);
608 return nr;
609}
610
611static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
612 enum mem_cgroup_events_target target)
613{
614 unsigned long val, next;
615
616 val = __this_cpu_read(memcg->stat_cpu->nr_page_events);
617 next = __this_cpu_read(memcg->stat_cpu->targets[target]);
618 /* from time_after() in jiffies.h */
619 if ((long)(next - val) < 0) {
620 switch (target) {
621 case MEM_CGROUP_TARGET_THRESH:
622 next = val + THRESHOLDS_EVENTS_TARGET;
623 break;
624 case MEM_CGROUP_TARGET_SOFTLIMIT:
625 next = val + SOFTLIMIT_EVENTS_TARGET;
626 break;
627 case MEM_CGROUP_TARGET_NUMAINFO:
628 next = val + NUMAINFO_EVENTS_TARGET;
629 break;
630 default:
631 break;
632 }
633 __this_cpu_write(memcg->stat_cpu->targets[target], next);
634 return true;
635 }
636 return false;
637}
638
639/*
640 * Check events in order.
641 *
642 */
643static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
644{
645 /* threshold event is triggered in finer grain than soft limit */
646 if (unlikely(mem_cgroup_event_ratelimit(memcg,
647 MEM_CGROUP_TARGET_THRESH))) {
648 bool do_softlimit;
649 bool do_numainfo __maybe_unused;
650
651 do_softlimit = mem_cgroup_event_ratelimit(memcg,
652 MEM_CGROUP_TARGET_SOFTLIMIT);
653#if MAX_NUMNODES > 1
654 do_numainfo = mem_cgroup_event_ratelimit(memcg,
655 MEM_CGROUP_TARGET_NUMAINFO);
656#endif
657 mem_cgroup_threshold(memcg);
658 if (unlikely(do_softlimit))
659 mem_cgroup_update_tree(memcg, page);
660#if MAX_NUMNODES > 1
661 if (unlikely(do_numainfo))
662 atomic_inc(&memcg->numainfo_events);
663#endif
664 }
665}
666
667struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
668{
669 /*
670 * mm_update_next_owner() may clear mm->owner to NULL
671 * if it races with swapoff, page migration, etc.
672 * So this can be called with p == NULL.
673 */
674 if (unlikely(!p))
675 return NULL;
676
677 return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
678}
679EXPORT_SYMBOL(mem_cgroup_from_task);
680
681static struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
682{
683 struct mem_cgroup *memcg = NULL;
684
685 rcu_read_lock();
686 do {
687 /*
688 * Page cache insertions can happen withou an
689 * actual mm context, e.g. during disk probing
690 * on boot, loopback IO, acct() writes etc.
691 */
692 if (unlikely(!mm))
693 memcg = root_mem_cgroup;
694 else {
695 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
696 if (unlikely(!memcg))
697 memcg = root_mem_cgroup;
698 }
699 } while (!css_tryget_online(&memcg->css));
700 rcu_read_unlock();
701 return memcg;
702}
703
704/**
705 * mem_cgroup_iter - iterate over memory cgroup hierarchy
706 * @root: hierarchy root
707 * @prev: previously returned memcg, NULL on first invocation
708 * @reclaim: cookie for shared reclaim walks, NULL for full walks
709 *
710 * Returns references to children of the hierarchy below @root, or
711 * @root itself, or %NULL after a full round-trip.
712 *
713 * Caller must pass the return value in @prev on subsequent
714 * invocations for reference counting, or use mem_cgroup_iter_break()
715 * to cancel a hierarchy walk before the round-trip is complete.
716 *
717 * Reclaimers can specify a node and a priority level in @reclaim to
718 * divide up the memcgs in the hierarchy among all concurrent
719 * reclaimers operating on the same node and priority.
720 */
721struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
722 struct mem_cgroup *prev,
723 struct mem_cgroup_reclaim_cookie *reclaim)
724{
725 struct mem_cgroup_reclaim_iter *uninitialized_var(iter);
726 struct cgroup_subsys_state *css = NULL;
727 struct mem_cgroup *memcg = NULL;
728 struct mem_cgroup *pos = NULL;
729
730 if (mem_cgroup_disabled())
731 return NULL;
732
733 if (!root)
734 root = root_mem_cgroup;
735
736 if (prev && !reclaim)
737 pos = prev;
738
739 if (!root->use_hierarchy && root != root_mem_cgroup) {
740 if (prev)
741 goto out;
742 return root;
743 }
744
745 rcu_read_lock();
746
747 if (reclaim) {
748 struct mem_cgroup_per_node *mz;
749
750 mz = mem_cgroup_nodeinfo(root, reclaim->pgdat->node_id);
751 iter = &mz->iter[reclaim->priority];
752
753 if (prev && reclaim->generation != iter->generation)
754 goto out_unlock;
755
756 while (1) {
757 pos = READ_ONCE(iter->position);
758 if (!pos || css_tryget(&pos->css))
759 break;
760 /*
761 * css reference reached zero, so iter->position will
762 * be cleared by ->css_released. However, we should not
763 * rely on this happening soon, because ->css_released
764 * is called from a work queue, and by busy-waiting we
765 * might block it. So we clear iter->position right
766 * away.
767 */
768 (void)cmpxchg(&iter->position, pos, NULL);
769 }
770 }
771
772 if (pos)
773 css = &pos->css;
774
775 for (;;) {
776 css = css_next_descendant_pre(css, &root->css);
777 if (!css) {
778 /*
779 * Reclaimers share the hierarchy walk, and a
780 * new one might jump in right at the end of
781 * the hierarchy - make sure they see at least
782 * one group and restart from the beginning.
783 */
784 if (!prev)
785 continue;
786 break;
787 }
788
789 /*
790 * Verify the css and acquire a reference. The root
791 * is provided by the caller, so we know it's alive
792 * and kicking, and don't take an extra reference.
793 */
794 memcg = mem_cgroup_from_css(css);
795
796 if (css == &root->css)
797 break;
798
799 if (css_tryget(css))
800 break;
801
802 memcg = NULL;
803 }
804
805 if (reclaim) {
806 /*
807 * The position could have already been updated by a competing
808 * thread, so check that the value hasn't changed since we read
809 * it to avoid reclaiming from the same cgroup twice.
810 */
811 (void)cmpxchg(&iter->position, pos, memcg);
812
813 if (pos)
814 css_put(&pos->css);
815
816 if (!memcg)
817 iter->generation++;
818 else if (!prev)
819 reclaim->generation = iter->generation;
820 }
821
822out_unlock:
823 rcu_read_unlock();
824out:
825 if (prev && prev != root)
826 css_put(&prev->css);
827
828 return memcg;
829}
830
831/**
832 * mem_cgroup_iter_break - abort a hierarchy walk prematurely
833 * @root: hierarchy root
834 * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
835 */
836void mem_cgroup_iter_break(struct mem_cgroup *root,
837 struct mem_cgroup *prev)
838{
839 if (!root)
840 root = root_mem_cgroup;
841 if (prev && prev != root)
842 css_put(&prev->css);
843}
844
845static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
846{
847 struct mem_cgroup *memcg = dead_memcg;
848 struct mem_cgroup_reclaim_iter *iter;
849 struct mem_cgroup_per_node *mz;
850 int nid;
851 int i;
852
853 while ((memcg = parent_mem_cgroup(memcg))) {
854 for_each_node(nid) {
855 mz = mem_cgroup_nodeinfo(memcg, nid);
856 for (i = 0; i <= DEF_PRIORITY; i++) {
857 iter = &mz->iter[i];
858 cmpxchg(&iter->position,
859 dead_memcg, NULL);
860 }
861 }
862 }
863}
864
865/*
866 * Iteration constructs for visiting all cgroups (under a tree). If
867 * loops are exited prematurely (break), mem_cgroup_iter_break() must
868 * be used for reference counting.
869 */
870#define for_each_mem_cgroup_tree(iter, root) \
871 for (iter = mem_cgroup_iter(root, NULL, NULL); \
872 iter != NULL; \
873 iter = mem_cgroup_iter(root, iter, NULL))
874
875#define for_each_mem_cgroup(iter) \
876 for (iter = mem_cgroup_iter(NULL, NULL, NULL); \
877 iter != NULL; \
878 iter = mem_cgroup_iter(NULL, iter, NULL))
879
880/**
881 * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
882 * @memcg: hierarchy root
883 * @fn: function to call for each task
884 * @arg: argument passed to @fn
885 *
886 * This function iterates over tasks attached to @memcg or to any of its
887 * descendants and calls @fn for each task. If @fn returns a non-zero
888 * value, the function breaks the iteration loop and returns the value.
889 * Otherwise, it will iterate over all tasks and return 0.
890 *
891 * This function must not be called for the root memory cgroup.
892 */
893int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
894 int (*fn)(struct task_struct *, void *), void *arg)
895{
896 struct mem_cgroup *iter;
897 int ret = 0;
898
899 BUG_ON(memcg == root_mem_cgroup);
900
901 for_each_mem_cgroup_tree(iter, memcg) {
902 struct css_task_iter it;
903 struct task_struct *task;
904
905 css_task_iter_start(&iter->css, 0, &it);
906 while (!ret && (task = css_task_iter_next(&it)))
907 ret = fn(task, arg);
908 css_task_iter_end(&it);
909 if (ret) {
910 mem_cgroup_iter_break(memcg, iter);
911 break;
912 }
913 }
914 return ret;
915}
916
917/**
918 * mem_cgroup_page_lruvec - return lruvec for isolating/putting an LRU page
919 * @page: the page
920 * @pgdat: pgdat of the page
921 *
922 * This function is only safe when following the LRU page isolation
923 * and putback protocol: the LRU lock must be held, and the page must
924 * either be PageLRU() or the caller must have isolated/allocated it.
925 */
926struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct pglist_data *pgdat)
927{
928 struct mem_cgroup_per_node *mz;
929 struct mem_cgroup *memcg;
930 struct lruvec *lruvec;
931
932 if (mem_cgroup_disabled()) {
933 lruvec = &pgdat->lruvec;
934 goto out;
935 }
936
937 memcg = page->mem_cgroup;
938 /*
939 * Swapcache readahead pages are added to the LRU - and
940 * possibly migrated - before they are charged.
941 */
942 if (!memcg)
943 memcg = root_mem_cgroup;
944
945 mz = mem_cgroup_page_nodeinfo(memcg, page);
946 lruvec = &mz->lruvec;
947out:
948 /*
949 * Since a node can be onlined after the mem_cgroup was created,
950 * we have to be prepared to initialize lruvec->zone here;
951 * and if offlined then reonlined, we need to reinitialize it.
952 */
953 if (unlikely(lruvec->pgdat != pgdat))
954 lruvec->pgdat = pgdat;
955 return lruvec;
956}
957
958/**
959 * mem_cgroup_update_lru_size - account for adding or removing an lru page
960 * @lruvec: mem_cgroup per zone lru vector
961 * @lru: index of lru list the page is sitting on
962 * @zid: zone id of the accounted pages
963 * @nr_pages: positive when adding or negative when removing
964 *
965 * This function must be called under lru_lock, just before a page is added
966 * to or just after a page is removed from an lru list (that ordering being
967 * so as to allow it to check that lru_size 0 is consistent with list_empty).
968 */
969void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
970 int zid, int nr_pages)
971{
972 struct mem_cgroup_per_node *mz;
973 unsigned long *lru_size;
974 long size;
975
976 if (mem_cgroup_disabled())
977 return;
978
979 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
980 lru_size = &mz->lru_zone_size[zid][lru];
981
982 if (nr_pages < 0)
983 *lru_size += nr_pages;
984
985 size = *lru_size;
986 if (WARN_ONCE(size < 0,
987 "%s(%p, %d, %d): lru_size %ld\n",
988 __func__, lruvec, lru, nr_pages, size)) {
989 VM_BUG_ON(1);
990 *lru_size = 0;
991 }
992
993 if (nr_pages > 0)
994 *lru_size += nr_pages;
995}
996
997bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg)
998{
999 struct mem_cgroup *task_memcg;
1000 struct task_struct *p;
1001 bool ret;
1002
1003 p = find_lock_task_mm(task);
1004 if (p) {
1005 task_memcg = get_mem_cgroup_from_mm(p->mm);
1006 task_unlock(p);
1007 } else {
1008 /*
1009 * All threads may have already detached their mm's, but the oom
1010 * killer still needs to detect if they have already been oom
1011 * killed to prevent needlessly killing additional tasks.
1012 */
1013 rcu_read_lock();
1014 task_memcg = mem_cgroup_from_task(task);
1015 css_get(&task_memcg->css);
1016 rcu_read_unlock();
1017 }
1018 ret = mem_cgroup_is_descendant(task_memcg, memcg);
1019 css_put(&task_memcg->css);
1020 return ret;
1021}
1022
1023/**
1024 * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1025 * @memcg: the memory cgroup
1026 *
1027 * Returns the maximum amount of memory @mem can be charged with, in
1028 * pages.
1029 */
1030static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
1031{
1032 unsigned long margin = 0;
1033 unsigned long count;
1034 unsigned long limit;
1035
1036 count = page_counter_read(&memcg->memory);
1037 limit = READ_ONCE(memcg->memory.limit);
1038 if (count < limit)
1039 margin = limit - count;
1040
1041 if (do_memsw_account()) {
1042 count = page_counter_read(&memcg->memsw);
1043 limit = READ_ONCE(memcg->memsw.limit);
1044 if (count <= limit)
1045 margin = min(margin, limit - count);
1046 else
1047 margin = 0;
1048 }
1049
1050 return margin;
1051}
1052
1053/*
1054 * A routine for checking "mem" is under move_account() or not.
1055 *
1056 * Checking a cgroup is mc.from or mc.to or under hierarchy of
1057 * moving cgroups. This is for waiting at high-memory pressure
1058 * caused by "move".
1059 */
1060static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
1061{
1062 struct mem_cgroup *from;
1063 struct mem_cgroup *to;
1064 bool ret = false;
1065 /*
1066 * Unlike task_move routines, we access mc.to, mc.from not under
1067 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
1068 */
1069 spin_lock(&mc.lock);
1070 from = mc.from;
1071 to = mc.to;
1072 if (!from)
1073 goto unlock;
1074
1075 ret = mem_cgroup_is_descendant(from, memcg) ||
1076 mem_cgroup_is_descendant(to, memcg);
1077unlock:
1078 spin_unlock(&mc.lock);
1079 return ret;
1080}
1081
1082static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
1083{
1084 if (mc.moving_task && current != mc.moving_task) {
1085 if (mem_cgroup_under_move(memcg)) {
1086 DEFINE_WAIT(wait);
1087 prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1088 /* moving charge context might have finished. */
1089 if (mc.moving_task)
1090 schedule();
1091 finish_wait(&mc.waitq, &wait);
1092 return true;
1093 }
1094 }
1095 return false;
1096}
1097
1098static const unsigned int memcg1_stats[] = {
1099 MEMCG_CACHE,
1100 MEMCG_RSS,
1101 MEMCG_RSS_HUGE,
1102 NR_SHMEM,
1103 NR_FILE_MAPPED,
1104 NR_FILE_DIRTY,
1105 NR_WRITEBACK,
1106 MEMCG_SWAP,
1107};
1108
1109static const char *const memcg1_stat_names[] = {
1110 "cache",
1111 "rss",
1112 "rss_huge",
1113 "shmem",
1114 "mapped_file",
1115 "dirty",
1116 "writeback",
1117 "swap",
1118};
1119
1120#define K(x) ((x) << (PAGE_SHIFT-10))
1121/**
1122 * mem_cgroup_print_oom_info: Print OOM information relevant to memory controller.
1123 * @memcg: The memory cgroup that went over limit
1124 * @p: Task that is going to be killed
1125 *
1126 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1127 * enabled
1128 */
1129void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
1130{
1131 struct mem_cgroup *iter;
1132 unsigned int i;
1133
1134 rcu_read_lock();
1135
1136 if (p) {
1137 pr_info("Task in ");
1138 pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
1139 pr_cont(" killed as a result of limit of ");
1140 } else {
1141 pr_info("Memory limit reached of cgroup ");
1142 }
1143
1144 pr_cont_cgroup_path(memcg->css.cgroup);
1145 pr_cont("\n");
1146
1147 rcu_read_unlock();
1148
1149 pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
1150 K((u64)page_counter_read(&memcg->memory)),
1151 K((u64)memcg->memory.limit), memcg->memory.failcnt);
1152 pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
1153 K((u64)page_counter_read(&memcg->memsw)),
1154 K((u64)memcg->memsw.limit), memcg->memsw.failcnt);
1155 pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
1156 K((u64)page_counter_read(&memcg->kmem)),
1157 K((u64)memcg->kmem.limit), memcg->kmem.failcnt);
1158
1159 for_each_mem_cgroup_tree(iter, memcg) {
1160 pr_info("Memory cgroup stats for ");
1161 pr_cont_cgroup_path(iter->css.cgroup);
1162 pr_cont(":");
1163
1164 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
1165 if (memcg1_stats[i] == MEMCG_SWAP && !do_swap_account)
1166 continue;
1167 pr_cont(" %s:%luKB", memcg1_stat_names[i],
1168 K(memcg_page_state(iter, memcg1_stats[i])));
1169 }
1170
1171 for (i = 0; i < NR_LRU_LISTS; i++)
1172 pr_cont(" %s:%luKB", mem_cgroup_lru_names[i],
1173 K(mem_cgroup_nr_lru_pages(iter, BIT(i))));
1174
1175 pr_cont("\n");
1176 }
1177}
1178
1179/*
1180 * Return the memory (and swap, if configured) limit for a memcg.
1181 */
1182unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg)
1183{
1184 unsigned long limit;
1185
1186 limit = memcg->memory.limit;
1187 if (mem_cgroup_swappiness(memcg)) {
1188 unsigned long memsw_limit;
1189 unsigned long swap_limit;
1190
1191 memsw_limit = memcg->memsw.limit;
1192 swap_limit = memcg->swap.limit;
1193 swap_limit = min(swap_limit, (unsigned long)total_swap_pages);
1194 limit = min(limit + swap_limit, memsw_limit);
1195 }
1196 return limit;
1197}
1198
1199static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
1200 int order)
1201{
1202 struct oom_control oc = {
1203 .zonelist = NULL,
1204 .nodemask = NULL,
1205 .memcg = memcg,
1206 .gfp_mask = gfp_mask,
1207 .order = order,
1208 };
1209 bool ret;
1210
1211 mutex_lock(&oom_lock);
1212 ret = out_of_memory(&oc);
1213 mutex_unlock(&oom_lock);
1214 return ret;
1215}
1216
1217#if MAX_NUMNODES > 1
1218
1219/**
1220 * test_mem_cgroup_node_reclaimable
1221 * @memcg: the target memcg
1222 * @nid: the node ID to be checked.
1223 * @noswap : specify true here if the user wants flle only information.
1224 *
1225 * This function returns whether the specified memcg contains any
1226 * reclaimable pages on a node. Returns true if there are any reclaimable
1227 * pages in the node.
1228 */
1229static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg,
1230 int nid, bool noswap)
1231{
1232 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_FILE))
1233 return true;
1234 if (noswap || !total_swap_pages)
1235 return false;
1236 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_ANON))
1237 return true;
1238 return false;
1239
1240}
1241
1242/*
1243 * Always updating the nodemask is not very good - even if we have an empty
1244 * list or the wrong list here, we can start from some node and traverse all
1245 * nodes based on the zonelist. So update the list loosely once per 10 secs.
1246 *
1247 */
1248static void mem_cgroup_may_update_nodemask(struct mem_cgroup *memcg)
1249{
1250 int nid;
1251 /*
1252 * numainfo_events > 0 means there was at least NUMAINFO_EVENTS_TARGET
1253 * pagein/pageout changes since the last update.
1254 */
1255 if (!atomic_read(&memcg->numainfo_events))
1256 return;
1257 if (atomic_inc_return(&memcg->numainfo_updating) > 1)
1258 return;
1259
1260 /* make a nodemask where this memcg uses memory from */
1261 memcg->scan_nodes = node_states[N_MEMORY];
1262
1263 for_each_node_mask(nid, node_states[N_MEMORY]) {
1264
1265 if (!test_mem_cgroup_node_reclaimable(memcg, nid, false))
1266 node_clear(nid, memcg->scan_nodes);
1267 }
1268
1269 atomic_set(&memcg->numainfo_events, 0);
1270 atomic_set(&memcg->numainfo_updating, 0);
1271}
1272
1273/*
1274 * Selecting a node where we start reclaim from. Because what we need is just
1275 * reducing usage counter, start from anywhere is O,K. Considering
1276 * memory reclaim from current node, there are pros. and cons.
1277 *
1278 * Freeing memory from current node means freeing memory from a node which
1279 * we'll use or we've used. So, it may make LRU bad. And if several threads
1280 * hit limits, it will see a contention on a node. But freeing from remote
1281 * node means more costs for memory reclaim because of memory latency.
1282 *
1283 * Now, we use round-robin. Better algorithm is welcomed.
1284 */
1285int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
1286{
1287 int node;
1288
1289 mem_cgroup_may_update_nodemask(memcg);
1290 node = memcg->last_scanned_node;
1291
1292 node = next_node_in(node, memcg->scan_nodes);
1293 /*
1294 * mem_cgroup_may_update_nodemask might have seen no reclaimmable pages
1295 * last time it really checked all the LRUs due to rate limiting.
1296 * Fallback to the current node in that case for simplicity.
1297 */
1298 if (unlikely(node == MAX_NUMNODES))
1299 node = numa_node_id();
1300
1301 memcg->last_scanned_node = node;
1302 return node;
1303}
1304#else
1305int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
1306{
1307 return 0;
1308}
1309#endif
1310
1311static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
1312 pg_data_t *pgdat,
1313 gfp_t gfp_mask,
1314 unsigned long *total_scanned)
1315{
1316 struct mem_cgroup *victim = NULL;
1317 int total = 0;
1318 int loop = 0;
1319 unsigned long excess;
1320 unsigned long nr_scanned;
1321 struct mem_cgroup_reclaim_cookie reclaim = {
1322 .pgdat = pgdat,
1323 .priority = 0,
1324 };
1325
1326 excess = soft_limit_excess(root_memcg);
1327
1328 while (1) {
1329 victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
1330 if (!victim) {
1331 loop++;
1332 if (loop >= 2) {
1333 /*
1334 * If we have not been able to reclaim
1335 * anything, it might because there are
1336 * no reclaimable pages under this hierarchy
1337 */
1338 if (!total)
1339 break;
1340 /*
1341 * We want to do more targeted reclaim.
1342 * excess >> 2 is not to excessive so as to
1343 * reclaim too much, nor too less that we keep
1344 * coming back to reclaim from this cgroup
1345 */
1346 if (total >= (excess >> 2) ||
1347 (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
1348 break;
1349 }
1350 continue;
1351 }
1352 total += mem_cgroup_shrink_node(victim, gfp_mask, false,
1353 pgdat, &nr_scanned);
1354 *total_scanned += nr_scanned;
1355 if (!soft_limit_excess(root_memcg))
1356 break;
1357 }
1358 mem_cgroup_iter_break(root_memcg, victim);
1359 return total;
1360}
1361
1362#ifdef CONFIG_LOCKDEP
1363static struct lockdep_map memcg_oom_lock_dep_map = {
1364 .name = "memcg_oom_lock",
1365};
1366#endif
1367
1368static DEFINE_SPINLOCK(memcg_oom_lock);
1369
1370/*
1371 * Check OOM-Killer is already running under our hierarchy.
1372 * If someone is running, return false.
1373 */
1374static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
1375{
1376 struct mem_cgroup *iter, *failed = NULL;
1377
1378 spin_lock(&memcg_oom_lock);
1379
1380 for_each_mem_cgroup_tree(iter, memcg) {
1381 if (iter->oom_lock) {
1382 /*
1383 * this subtree of our hierarchy is already locked
1384 * so we cannot give a lock.
1385 */
1386 failed = iter;
1387 mem_cgroup_iter_break(memcg, iter);
1388 break;
1389 } else
1390 iter->oom_lock = true;
1391 }
1392
1393 if (failed) {
1394 /*
1395 * OK, we failed to lock the whole subtree so we have
1396 * to clean up what we set up to the failing subtree
1397 */
1398 for_each_mem_cgroup_tree(iter, memcg) {
1399 if (iter == failed) {
1400 mem_cgroup_iter_break(memcg, iter);
1401 break;
1402 }
1403 iter->oom_lock = false;
1404 }
1405 } else
1406 mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_);
1407
1408 spin_unlock(&memcg_oom_lock);
1409
1410 return !failed;
1411}
1412
1413static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
1414{
1415 struct mem_cgroup *iter;
1416
1417 spin_lock(&memcg_oom_lock);
1418 mutex_release(&memcg_oom_lock_dep_map, 1, _RET_IP_);
1419 for_each_mem_cgroup_tree(iter, memcg)
1420 iter->oom_lock = false;
1421 spin_unlock(&memcg_oom_lock);
1422}
1423
1424static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
1425{
1426 struct mem_cgroup *iter;
1427
1428 spin_lock(&memcg_oom_lock);
1429 for_each_mem_cgroup_tree(iter, memcg)
1430 iter->under_oom++;
1431 spin_unlock(&memcg_oom_lock);
1432}
1433
1434static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
1435{
1436 struct mem_cgroup *iter;
1437
1438 /*
1439 * When a new child is created while the hierarchy is under oom,
1440 * mem_cgroup_oom_lock() may not be called. Watch for underflow.
1441 */
1442 spin_lock(&memcg_oom_lock);
1443 for_each_mem_cgroup_tree(iter, memcg)
1444 if (iter->under_oom > 0)
1445 iter->under_oom--;
1446 spin_unlock(&memcg_oom_lock);
1447}
1448
1449static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
1450
1451struct oom_wait_info {
1452 struct mem_cgroup *memcg;
1453 wait_queue_entry_t wait;
1454};
1455
1456static int memcg_oom_wake_function(wait_queue_entry_t *wait,
1457 unsigned mode, int sync, void *arg)
1458{
1459 struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
1460 struct mem_cgroup *oom_wait_memcg;
1461 struct oom_wait_info *oom_wait_info;
1462
1463 oom_wait_info = container_of(wait, struct oom_wait_info, wait);
1464 oom_wait_memcg = oom_wait_info->memcg;
1465
1466 if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) &&
1467 !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg))
1468 return 0;
1469 return autoremove_wake_function(wait, mode, sync, arg);
1470}
1471
1472static void memcg_oom_recover(struct mem_cgroup *memcg)
1473{
1474 /*
1475 * For the following lockless ->under_oom test, the only required
1476 * guarantee is that it must see the state asserted by an OOM when
1477 * this function is called as a result of userland actions
1478 * triggered by the notification of the OOM. This is trivially
1479 * achieved by invoking mem_cgroup_mark_under_oom() before
1480 * triggering notification.
1481 */
1482 if (memcg && memcg->under_oom)
1483 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
1484}
1485
1486static void mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
1487{
1488 if (!current->memcg_may_oom || order > PAGE_ALLOC_COSTLY_ORDER)
1489 return;
1490 /*
1491 * We are in the middle of the charge context here, so we
1492 * don't want to block when potentially sitting on a callstack
1493 * that holds all kinds of filesystem and mm locks.
1494 *
1495 * Also, the caller may handle a failed allocation gracefully
1496 * (like optional page cache readahead) and so an OOM killer
1497 * invocation might not even be necessary.
1498 *
1499 * That's why we don't do anything here except remember the
1500 * OOM context and then deal with it at the end of the page
1501 * fault when the stack is unwound, the locks are released,
1502 * and when we know whether the fault was overall successful.
1503 */
1504 css_get(&memcg->css);
1505 current->memcg_in_oom = memcg;
1506 current->memcg_oom_gfp_mask = mask;
1507 current->memcg_oom_order = order;
1508}
1509
1510/**
1511 * mem_cgroup_oom_synchronize - complete memcg OOM handling
1512 * @handle: actually kill/wait or just clean up the OOM state
1513 *
1514 * This has to be called at the end of a page fault if the memcg OOM
1515 * handler was enabled.
1516 *
1517 * Memcg supports userspace OOM handling where failed allocations must
1518 * sleep on a waitqueue until the userspace task resolves the
1519 * situation. Sleeping directly in the charge context with all kinds
1520 * of locks held is not a good idea, instead we remember an OOM state
1521 * in the task and mem_cgroup_oom_synchronize() has to be called at
1522 * the end of the page fault to complete the OOM handling.
1523 *
1524 * Returns %true if an ongoing memcg OOM situation was detected and
1525 * completed, %false otherwise.
1526 */
1527bool mem_cgroup_oom_synchronize(bool handle)
1528{
1529 struct mem_cgroup *memcg = current->memcg_in_oom;
1530 struct oom_wait_info owait;
1531 bool locked;
1532
1533 /* OOM is global, do not handle */
1534 if (!memcg)
1535 return false;
1536
1537 if (!handle)
1538 goto cleanup;
1539
1540 owait.memcg = memcg;
1541 owait.wait.flags = 0;
1542 owait.wait.func = memcg_oom_wake_function;
1543 owait.wait.private = current;
1544 INIT_LIST_HEAD(&owait.wait.entry);
1545
1546 prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
1547 mem_cgroup_mark_under_oom(memcg);
1548
1549 locked = mem_cgroup_oom_trylock(memcg);
1550
1551 if (locked)
1552 mem_cgroup_oom_notify(memcg);
1553
1554 if (locked && !memcg->oom_kill_disable) {
1555 mem_cgroup_unmark_under_oom(memcg);
1556 finish_wait(&memcg_oom_waitq, &owait.wait);
1557 mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask,
1558 current->memcg_oom_order);
1559 } else {
1560 schedule();
1561 mem_cgroup_unmark_under_oom(memcg);
1562 finish_wait(&memcg_oom_waitq, &owait.wait);
1563 }
1564
1565 if (locked) {
1566 mem_cgroup_oom_unlock(memcg);
1567 /*
1568 * There is no guarantee that an OOM-lock contender
1569 * sees the wakeups triggered by the OOM kill
1570 * uncharges. Wake any sleepers explicitely.
1571 */
1572 memcg_oom_recover(memcg);
1573 }
1574cleanup:
1575 current->memcg_in_oom = NULL;
1576 css_put(&memcg->css);
1577 return true;
1578}
1579
1580/**
1581 * lock_page_memcg - lock a page->mem_cgroup binding
1582 * @page: the page
1583 *
1584 * This function protects unlocked LRU pages from being moved to
1585 * another cgroup.
1586 *
1587 * It ensures lifetime of the returned memcg. Caller is responsible
1588 * for the lifetime of the page; __unlock_page_memcg() is available
1589 * when @page might get freed inside the locked section.
1590 */
1591struct mem_cgroup *lock_page_memcg(struct page *page)
1592{
1593 struct mem_cgroup *memcg;
1594 unsigned long flags;
1595
1596 /*
1597 * The RCU lock is held throughout the transaction. The fast
1598 * path can get away without acquiring the memcg->move_lock
1599 * because page moving starts with an RCU grace period.
1600 *
1601 * The RCU lock also protects the memcg from being freed when
1602 * the page state that is going to change is the only thing
1603 * preventing the page itself from being freed. E.g. writeback
1604 * doesn't hold a page reference and relies on PG_writeback to
1605 * keep off truncation, migration and so forth.
1606 */
1607 rcu_read_lock();
1608
1609 if (mem_cgroup_disabled())
1610 return NULL;
1611again:
1612 memcg = page->mem_cgroup;
1613 if (unlikely(!memcg))
1614 return NULL;
1615
1616 if (atomic_read(&memcg->moving_account) <= 0)
1617 return memcg;
1618
1619 spin_lock_irqsave(&memcg->move_lock, flags);
1620 if (memcg != page->mem_cgroup) {
1621 spin_unlock_irqrestore(&memcg->move_lock, flags);
1622 goto again;
1623 }
1624
1625 /*
1626 * When charge migration first begins, we can have locked and
1627 * unlocked page stat updates happening concurrently. Track
1628 * the task who has the lock for unlock_page_memcg().
1629 */
1630 memcg->move_lock_task = current;
1631 memcg->move_lock_flags = flags;
1632
1633 return memcg;
1634}
1635EXPORT_SYMBOL(lock_page_memcg);
1636
1637/**
1638 * __unlock_page_memcg - unlock and unpin a memcg
1639 * @memcg: the memcg
1640 *
1641 * Unlock and unpin a memcg returned by lock_page_memcg().
1642 */
1643void __unlock_page_memcg(struct mem_cgroup *memcg)
1644{
1645 if (memcg && memcg->move_lock_task == current) {
1646 unsigned long flags = memcg->move_lock_flags;
1647
1648 memcg->move_lock_task = NULL;
1649 memcg->move_lock_flags = 0;
1650
1651 spin_unlock_irqrestore(&memcg->move_lock, flags);
1652 }
1653
1654 rcu_read_unlock();
1655}
1656
1657/**
1658 * unlock_page_memcg - unlock a page->mem_cgroup binding
1659 * @page: the page
1660 */
1661void unlock_page_memcg(struct page *page)
1662{
1663 __unlock_page_memcg(page->mem_cgroup);
1664}
1665EXPORT_SYMBOL(unlock_page_memcg);
1666
1667struct memcg_stock_pcp {
1668 struct mem_cgroup *cached; /* this never be root cgroup */
1669 unsigned int nr_pages;
1670 struct work_struct work;
1671 unsigned long flags;
1672#define FLUSHING_CACHED_CHARGE 0
1673};
1674static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
1675static DEFINE_MUTEX(percpu_charge_mutex);
1676
1677/**
1678 * consume_stock: Try to consume stocked charge on this cpu.
1679 * @memcg: memcg to consume from.
1680 * @nr_pages: how many pages to charge.
1681 *
1682 * The charges will only happen if @memcg matches the current cpu's memcg
1683 * stock, and at least @nr_pages are available in that stock. Failure to
1684 * service an allocation will refill the stock.
1685 *
1686 * returns true if successful, false otherwise.
1687 */
1688static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
1689{
1690 struct memcg_stock_pcp *stock;
1691 unsigned long flags;
1692 bool ret = false;
1693
1694 if (nr_pages > MEMCG_CHARGE_BATCH)
1695 return ret;
1696
1697 local_irq_save(flags);
1698
1699 stock = this_cpu_ptr(&memcg_stock);
1700 if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
1701 stock->nr_pages -= nr_pages;
1702 ret = true;
1703 }
1704
1705 local_irq_restore(flags);
1706
1707 return ret;
1708}
1709
1710/*
1711 * Returns stocks cached in percpu and reset cached information.
1712 */
1713static void drain_stock(struct memcg_stock_pcp *stock)
1714{
1715 struct mem_cgroup *old = stock->cached;
1716
1717 if (stock->nr_pages) {
1718 page_counter_uncharge(&old->memory, stock->nr_pages);
1719 if (do_memsw_account())
1720 page_counter_uncharge(&old->memsw, stock->nr_pages);
1721 css_put_many(&old->css, stock->nr_pages);
1722 stock->nr_pages = 0;
1723 }
1724 stock->cached = NULL;
1725}
1726
1727static void drain_local_stock(struct work_struct *dummy)
1728{
1729 struct memcg_stock_pcp *stock;
1730 unsigned long flags;
1731
1732 /*
1733 * The only protection from memory hotplug vs. drain_stock races is
1734 * that we always operate on local CPU stock here with IRQ disabled
1735 */
1736 local_irq_save(flags);
1737
1738 stock = this_cpu_ptr(&memcg_stock);
1739 drain_stock(stock);
1740 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
1741
1742 local_irq_restore(flags);
1743}
1744
1745/*
1746 * Cache charges(val) to local per_cpu area.
1747 * This will be consumed by consume_stock() function, later.
1748 */
1749static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
1750{
1751 struct memcg_stock_pcp *stock;
1752 unsigned long flags;
1753
1754 local_irq_save(flags);
1755
1756 stock = this_cpu_ptr(&memcg_stock);
1757 if (stock->cached != memcg) { /* reset if necessary */
1758 drain_stock(stock);
1759 stock->cached = memcg;
1760 }
1761 stock->nr_pages += nr_pages;
1762
1763 if (stock->nr_pages > MEMCG_CHARGE_BATCH)
1764 drain_stock(stock);
1765
1766 local_irq_restore(flags);
1767}
1768
1769/*
1770 * Drains all per-CPU charge caches for given root_memcg resp. subtree
1771 * of the hierarchy under it.
1772 */
1773static void drain_all_stock(struct mem_cgroup *root_memcg)
1774{
1775 int cpu, curcpu;
1776
1777 /* If someone's already draining, avoid adding running more workers. */
1778 if (!mutex_trylock(&percpu_charge_mutex))
1779 return;
1780 /*
1781 * Notify other cpus that system-wide "drain" is running
1782 * We do not care about races with the cpu hotplug because cpu down
1783 * as well as workers from this path always operate on the local
1784 * per-cpu data. CPU up doesn't touch memcg_stock at all.
1785 */
1786 curcpu = get_cpu();
1787 for_each_online_cpu(cpu) {
1788 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
1789 struct mem_cgroup *memcg;
1790
1791 memcg = stock->cached;
1792 if (!memcg || !stock->nr_pages || !css_tryget(&memcg->css))
1793 continue;
1794 if (!mem_cgroup_is_descendant(memcg, root_memcg)) {
1795 css_put(&memcg->css);
1796 continue;
1797 }
1798 if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
1799 if (cpu == curcpu)
1800 drain_local_stock(&stock->work);
1801 else
1802 schedule_work_on(cpu, &stock->work);
1803 }
1804 css_put(&memcg->css);
1805 }
1806 put_cpu();
1807 mutex_unlock(&percpu_charge_mutex);
1808}
1809
1810static int memcg_hotplug_cpu_dead(unsigned int cpu)
1811{
1812 struct memcg_stock_pcp *stock;
1813 struct mem_cgroup *memcg;
1814
1815 stock = &per_cpu(memcg_stock, cpu);
1816 drain_stock(stock);
1817
1818 for_each_mem_cgroup(memcg) {
1819 int i;
1820
1821 for (i = 0; i < MEMCG_NR_STAT; i++) {
1822 int nid;
1823 long x;
1824
1825 x = this_cpu_xchg(memcg->stat_cpu->count[i], 0);
1826 if (x)
1827 atomic_long_add(x, &memcg->stat[i]);
1828
1829 if (i >= NR_VM_NODE_STAT_ITEMS)
1830 continue;
1831
1832 for_each_node(nid) {
1833 struct mem_cgroup_per_node *pn;
1834
1835 pn = mem_cgroup_nodeinfo(memcg, nid);
1836 x = this_cpu_xchg(pn->lruvec_stat_cpu->count[i], 0);
1837 if (x)
1838 atomic_long_add(x, &pn->lruvec_stat[i]);
1839 }
1840 }
1841
1842 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
1843 long x;
1844
1845 x = this_cpu_xchg(memcg->stat_cpu->events[i], 0);
1846 if (x)
1847 atomic_long_add(x, &memcg->events[i]);
1848 }
1849 }
1850
1851 return 0;
1852}
1853
1854static void reclaim_high(struct mem_cgroup *memcg,
1855 unsigned int nr_pages,
1856 gfp_t gfp_mask)
1857{
1858 do {
1859 if (page_counter_read(&memcg->memory) <= memcg->high)
1860 continue;
1861 memcg_memory_event(memcg, MEMCG_HIGH);
1862 try_to_free_mem_cgroup_pages(memcg, nr_pages, gfp_mask, true);
1863 } while ((memcg = parent_mem_cgroup(memcg)));
1864}
1865
1866static void high_work_func(struct work_struct *work)
1867{
1868 struct mem_cgroup *memcg;
1869
1870 memcg = container_of(work, struct mem_cgroup, high_work);
1871 reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL);
1872}
1873
1874/*
1875 * Scheduled by try_charge() to be executed from the userland return path
1876 * and reclaims memory over the high limit.
1877 */
1878void mem_cgroup_handle_over_high(void)
1879{
1880 unsigned int nr_pages = current->memcg_nr_pages_over_high;
1881 struct mem_cgroup *memcg;
1882
1883 if (likely(!nr_pages))
1884 return;
1885
1886 memcg = get_mem_cgroup_from_mm(current->mm);
1887 reclaim_high(memcg, nr_pages, GFP_KERNEL);
1888 css_put(&memcg->css);
1889 current->memcg_nr_pages_over_high = 0;
1890}
1891
1892static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
1893 unsigned int nr_pages)
1894{
1895 unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages);
1896 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
1897 struct mem_cgroup *mem_over_limit;
1898 struct page_counter *counter;
1899 unsigned long nr_reclaimed;
1900 bool may_swap = true;
1901 bool drained = false;
1902
1903 if (mem_cgroup_is_root(memcg))
1904 return 0;
1905retry:
1906 if (consume_stock(memcg, nr_pages))
1907 return 0;
1908
1909 if (!do_memsw_account() ||
1910 page_counter_try_charge(&memcg->memsw, batch, &counter)) {
1911 if (page_counter_try_charge(&memcg->memory, batch, &counter))
1912 goto done_restock;
1913 if (do_memsw_account())
1914 page_counter_uncharge(&memcg->memsw, batch);
1915 mem_over_limit = mem_cgroup_from_counter(counter, memory);
1916 } else {
1917 mem_over_limit = mem_cgroup_from_counter(counter, memsw);
1918 may_swap = false;
1919 }
1920
1921 if (batch > nr_pages) {
1922 batch = nr_pages;
1923 goto retry;
1924 }
1925
1926 /*
1927 * Unlike in global OOM situations, memcg is not in a physical
1928 * memory shortage. Allow dying and OOM-killed tasks to
1929 * bypass the last charges so that they can exit quickly and
1930 * free their memory.
1931 */
1932 if (unlikely(tsk_is_oom_victim(current) ||
1933 fatal_signal_pending(current) ||
1934 current->flags & PF_EXITING))
1935 goto force;
1936
1937 /*
1938 * Prevent unbounded recursion when reclaim operations need to
1939 * allocate memory. This might exceed the limits temporarily,
1940 * but we prefer facilitating memory reclaim and getting back
1941 * under the limit over triggering OOM kills in these cases.
1942 */
1943 if (unlikely(current->flags & PF_MEMALLOC))
1944 goto force;
1945
1946 if (unlikely(task_in_memcg_oom(current)))
1947 goto nomem;
1948
1949 if (!gfpflags_allow_blocking(gfp_mask))
1950 goto nomem;
1951
1952 memcg_memory_event(mem_over_limit, MEMCG_MAX);
1953
1954 nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
1955 gfp_mask, may_swap);
1956
1957 if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
1958 goto retry;
1959
1960 if (!drained) {
1961 drain_all_stock(mem_over_limit);
1962 drained = true;
1963 goto retry;
1964 }
1965
1966 if (gfp_mask & __GFP_NORETRY)
1967 goto nomem;
1968 /*
1969 * Even though the limit is exceeded at this point, reclaim
1970 * may have been able to free some pages. Retry the charge
1971 * before killing the task.
1972 *
1973 * Only for regular pages, though: huge pages are rather
1974 * unlikely to succeed so close to the limit, and we fall back
1975 * to regular pages anyway in case of failure.
1976 */
1977 if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))
1978 goto retry;
1979 /*
1980 * At task move, charge accounts can be doubly counted. So, it's
1981 * better to wait until the end of task_move if something is going on.
1982 */
1983 if (mem_cgroup_wait_acct_move(mem_over_limit))
1984 goto retry;
1985
1986 if (nr_retries--)
1987 goto retry;
1988
1989 if (gfp_mask & __GFP_NOFAIL)
1990 goto force;
1991
1992 if (fatal_signal_pending(current))
1993 goto force;
1994
1995 memcg_memory_event(mem_over_limit, MEMCG_OOM);
1996
1997 mem_cgroup_oom(mem_over_limit, gfp_mask,
1998 get_order(nr_pages * PAGE_SIZE));
1999nomem:
2000 if (!(gfp_mask & __GFP_NOFAIL))
2001 return -ENOMEM;
2002force:
2003 /*
2004 * The allocation either can't fail or will lead to more memory
2005 * being freed very soon. Allow memory usage go over the limit
2006 * temporarily by force charging it.
2007 */
2008 page_counter_charge(&memcg->memory, nr_pages);
2009 if (do_memsw_account())
2010 page_counter_charge(&memcg->memsw, nr_pages);
2011 css_get_many(&memcg->css, nr_pages);
2012
2013 return 0;
2014
2015done_restock:
2016 css_get_many(&memcg->css, batch);
2017 if (batch > nr_pages)
2018 refill_stock(memcg, batch - nr_pages);
2019
2020 /*
2021 * If the hierarchy is above the normal consumption range, schedule
2022 * reclaim on returning to userland. We can perform reclaim here
2023 * if __GFP_RECLAIM but let's always punt for simplicity and so that
2024 * GFP_KERNEL can consistently be used during reclaim. @memcg is
2025 * not recorded as it most likely matches current's and won't
2026 * change in the meantime. As high limit is checked again before
2027 * reclaim, the cost of mismatch is negligible.
2028 */
2029 do {
2030 if (page_counter_read(&memcg->memory) > memcg->high) {
2031 /* Don't bother a random interrupted task */
2032 if (in_interrupt()) {
2033 schedule_work(&memcg->high_work);
2034 break;
2035 }
2036 current->memcg_nr_pages_over_high += batch;
2037 set_notify_resume(current);
2038 break;
2039 }
2040 } while ((memcg = parent_mem_cgroup(memcg)));
2041
2042 return 0;
2043}
2044
2045static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
2046{
2047 if (mem_cgroup_is_root(memcg))
2048 return;
2049
2050 page_counter_uncharge(&memcg->memory, nr_pages);
2051 if (do_memsw_account())
2052 page_counter_uncharge(&memcg->memsw, nr_pages);
2053
2054 css_put_many(&memcg->css, nr_pages);
2055}
2056
2057static void lock_page_lru(struct page *page, int *isolated)
2058{
2059 struct zone *zone = page_zone(page);
2060
2061 spin_lock_irq(zone_lru_lock(zone));
2062 if (PageLRU(page)) {
2063 struct lruvec *lruvec;
2064
2065 lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat);
2066 ClearPageLRU(page);
2067 del_page_from_lru_list(page, lruvec, page_lru(page));
2068 *isolated = 1;
2069 } else
2070 *isolated = 0;
2071}
2072
2073static void unlock_page_lru(struct page *page, int isolated)
2074{
2075 struct zone *zone = page_zone(page);
2076
2077 if (isolated) {
2078 struct lruvec *lruvec;
2079
2080 lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat);
2081 VM_BUG_ON_PAGE(PageLRU(page), page);
2082 SetPageLRU(page);
2083 add_page_to_lru_list(page, lruvec, page_lru(page));
2084 }
2085 spin_unlock_irq(zone_lru_lock(zone));
2086}
2087
2088static void commit_charge(struct page *page, struct mem_cgroup *memcg,
2089 bool lrucare)
2090{
2091 int isolated;
2092
2093 VM_BUG_ON_PAGE(page->mem_cgroup, page);
2094
2095 /*
2096 * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page
2097 * may already be on some other mem_cgroup's LRU. Take care of it.
2098 */
2099 if (lrucare)
2100 lock_page_lru(page, &isolated);
2101
2102 /*
2103 * Nobody should be changing or seriously looking at
2104 * page->mem_cgroup at this point:
2105 *
2106 * - the page is uncharged
2107 *
2108 * - the page is off-LRU
2109 *
2110 * - an anonymous fault has exclusive page access, except for
2111 * a locked page table
2112 *
2113 * - a page cache insertion, a swapin fault, or a migration
2114 * have the page locked
2115 */
2116 page->mem_cgroup = memcg;
2117
2118 if (lrucare)
2119 unlock_page_lru(page, isolated);
2120}
2121
2122#ifndef CONFIG_SLOB
2123static int memcg_alloc_cache_id(void)
2124{
2125 int id, size;
2126 int err;
2127
2128 id = ida_simple_get(&memcg_cache_ida,
2129 0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL);
2130 if (id < 0)
2131 return id;
2132
2133 if (id < memcg_nr_cache_ids)
2134 return id;
2135
2136 /*
2137 * There's no space for the new id in memcg_caches arrays,
2138 * so we have to grow them.
2139 */
2140 down_write(&memcg_cache_ids_sem);
2141
2142 size = 2 * (id + 1);
2143 if (size < MEMCG_CACHES_MIN_SIZE)
2144 size = MEMCG_CACHES_MIN_SIZE;
2145 else if (size > MEMCG_CACHES_MAX_SIZE)
2146 size = MEMCG_CACHES_MAX_SIZE;
2147
2148 err = memcg_update_all_caches(size);
2149 if (!err)
2150 err = memcg_update_all_list_lrus(size);
2151 if (!err)
2152 memcg_nr_cache_ids = size;
2153
2154 up_write(&memcg_cache_ids_sem);
2155
2156 if (err) {
2157 ida_simple_remove(&memcg_cache_ida, id);
2158 return err;
2159 }
2160 return id;
2161}
2162
2163static void memcg_free_cache_id(int id)
2164{
2165 ida_simple_remove(&memcg_cache_ida, id);
2166}
2167
2168struct memcg_kmem_cache_create_work {
2169 struct mem_cgroup *memcg;
2170 struct kmem_cache *cachep;
2171 struct work_struct work;
2172};
2173
2174static void memcg_kmem_cache_create_func(struct work_struct *w)
2175{
2176 struct memcg_kmem_cache_create_work *cw =
2177 container_of(w, struct memcg_kmem_cache_create_work, work);
2178 struct mem_cgroup *memcg = cw->memcg;
2179 struct kmem_cache *cachep = cw->cachep;
2180
2181 memcg_create_kmem_cache(memcg, cachep);
2182
2183 css_put(&memcg->css);
2184 kfree(cw);
2185}
2186
2187/*
2188 * Enqueue the creation of a per-memcg kmem_cache.
2189 */
2190static void __memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
2191 struct kmem_cache *cachep)
2192{
2193 struct memcg_kmem_cache_create_work *cw;
2194
2195 cw = kmalloc(sizeof(*cw), GFP_NOWAIT | __GFP_NOWARN);
2196 if (!cw)
2197 return;
2198
2199 css_get(&memcg->css);
2200
2201 cw->memcg = memcg;
2202 cw->cachep = cachep;
2203 INIT_WORK(&cw->work, memcg_kmem_cache_create_func);
2204
2205 queue_work(memcg_kmem_cache_wq, &cw->work);
2206}
2207
2208static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
2209 struct kmem_cache *cachep)
2210{
2211 /*
2212 * We need to stop accounting when we kmalloc, because if the
2213 * corresponding kmalloc cache is not yet created, the first allocation
2214 * in __memcg_schedule_kmem_cache_create will recurse.
2215 *
2216 * However, it is better to enclose the whole function. Depending on
2217 * the debugging options enabled, INIT_WORK(), for instance, can
2218 * trigger an allocation. This too, will make us recurse. Because at
2219 * this point we can't allow ourselves back into memcg_kmem_get_cache,
2220 * the safest choice is to do it like this, wrapping the whole function.
2221 */
2222 current->memcg_kmem_skip_account = 1;
2223 __memcg_schedule_kmem_cache_create(memcg, cachep);
2224 current->memcg_kmem_skip_account = 0;
2225}
2226
2227static inline bool memcg_kmem_bypass(void)
2228{
2229 if (in_interrupt() || !current->mm || (current->flags & PF_KTHREAD))
2230 return true;
2231 return false;
2232}
2233
2234/**
2235 * memcg_kmem_get_cache: select the correct per-memcg cache for allocation
2236 * @cachep: the original global kmem cache
2237 *
2238 * Return the kmem_cache we're supposed to use for a slab allocation.
2239 * We try to use the current memcg's version of the cache.
2240 *
2241 * If the cache does not exist yet, if we are the first user of it, we
2242 * create it asynchronously in a workqueue and let the current allocation
2243 * go through with the original cache.
2244 *
2245 * This function takes a reference to the cache it returns to assure it
2246 * won't get destroyed while we are working with it. Once the caller is
2247 * done with it, memcg_kmem_put_cache() must be called to release the
2248 * reference.
2249 */
2250struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep)
2251{
2252 struct mem_cgroup *memcg;
2253 struct kmem_cache *memcg_cachep;
2254 int kmemcg_id;
2255
2256 VM_BUG_ON(!is_root_cache(cachep));
2257
2258 if (memcg_kmem_bypass())
2259 return cachep;
2260
2261 if (current->memcg_kmem_skip_account)
2262 return cachep;
2263
2264 memcg = get_mem_cgroup_from_mm(current->mm);
2265 kmemcg_id = READ_ONCE(memcg->kmemcg_id);
2266 if (kmemcg_id < 0)
2267 goto out;
2268
2269 memcg_cachep = cache_from_memcg_idx(cachep, kmemcg_id);
2270 if (likely(memcg_cachep))
2271 return memcg_cachep;
2272
2273 /*
2274 * If we are in a safe context (can wait, and not in interrupt
2275 * context), we could be be predictable and return right away.
2276 * This would guarantee that the allocation being performed
2277 * already belongs in the new cache.
2278 *
2279 * However, there are some clashes that can arrive from locking.
2280 * For instance, because we acquire the slab_mutex while doing
2281 * memcg_create_kmem_cache, this means no further allocation
2282 * could happen with the slab_mutex held. So it's better to
2283 * defer everything.
2284 */
2285 memcg_schedule_kmem_cache_create(memcg, cachep);
2286out:
2287 css_put(&memcg->css);
2288 return cachep;
2289}
2290
2291/**
2292 * memcg_kmem_put_cache: drop reference taken by memcg_kmem_get_cache
2293 * @cachep: the cache returned by memcg_kmem_get_cache
2294 */
2295void memcg_kmem_put_cache(struct kmem_cache *cachep)
2296{
2297 if (!is_root_cache(cachep))
2298 css_put(&cachep->memcg_params.memcg->css);
2299}
2300
2301/**
2302 * memcg_kmem_charge_memcg: charge a kmem page
2303 * @page: page to charge
2304 * @gfp: reclaim mode
2305 * @order: allocation order
2306 * @memcg: memory cgroup to charge
2307 *
2308 * Returns 0 on success, an error code on failure.
2309 */
2310int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
2311 struct mem_cgroup *memcg)
2312{
2313 unsigned int nr_pages = 1 << order;
2314 struct page_counter *counter;
2315 int ret;
2316
2317 ret = try_charge(memcg, gfp, nr_pages);
2318 if (ret)
2319 return ret;
2320
2321 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) &&
2322 !page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) {
2323 cancel_charge(memcg, nr_pages);
2324 return -ENOMEM;
2325 }
2326
2327 page->mem_cgroup = memcg;
2328
2329 return 0;
2330}
2331
2332/**
2333 * memcg_kmem_charge: charge a kmem page to the current memory cgroup
2334 * @page: page to charge
2335 * @gfp: reclaim mode
2336 * @order: allocation order
2337 *
2338 * Returns 0 on success, an error code on failure.
2339 */
2340int memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
2341{
2342 struct mem_cgroup *memcg;
2343 int ret = 0;
2344
2345 if (memcg_kmem_bypass())
2346 return 0;
2347
2348 memcg = get_mem_cgroup_from_mm(current->mm);
2349 if (!mem_cgroup_is_root(memcg)) {
2350 ret = memcg_kmem_charge_memcg(page, gfp, order, memcg);
2351 if (!ret)
2352 __SetPageKmemcg(page);
2353 }
2354 css_put(&memcg->css);
2355 return ret;
2356}
2357/**
2358 * memcg_kmem_uncharge: uncharge a kmem page
2359 * @page: page to uncharge
2360 * @order: allocation order
2361 */
2362void memcg_kmem_uncharge(struct page *page, int order)
2363{
2364 struct mem_cgroup *memcg = page->mem_cgroup;
2365 unsigned int nr_pages = 1 << order;
2366
2367 if (!memcg)
2368 return;
2369
2370 VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page);
2371
2372 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
2373 page_counter_uncharge(&memcg->kmem, nr_pages);
2374
2375 page_counter_uncharge(&memcg->memory, nr_pages);
2376 if (do_memsw_account())
2377 page_counter_uncharge(&memcg->memsw, nr_pages);
2378
2379 page->mem_cgroup = NULL;
2380
2381 /* slab pages do not have PageKmemcg flag set */
2382 if (PageKmemcg(page))
2383 __ClearPageKmemcg(page);
2384
2385 css_put_many(&memcg->css, nr_pages);
2386}
2387#endif /* !CONFIG_SLOB */
2388
2389#ifdef CONFIG_TRANSPARENT_HUGEPAGE
2390
2391/*
2392 * Because tail pages are not marked as "used", set it. We're under
2393 * zone_lru_lock and migration entries setup in all page mappings.
2394 */
2395void mem_cgroup_split_huge_fixup(struct page *head)
2396{
2397 int i;
2398
2399 if (mem_cgroup_disabled())
2400 return;
2401
2402 for (i = 1; i < HPAGE_PMD_NR; i++)
2403 head[i].mem_cgroup = head->mem_cgroup;
2404
2405 __mod_memcg_state(head->mem_cgroup, MEMCG_RSS_HUGE, -HPAGE_PMD_NR);
2406}
2407#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
2408
2409#ifdef CONFIG_MEMCG_SWAP
2410/**
2411 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
2412 * @entry: swap entry to be moved
2413 * @from: mem_cgroup which the entry is moved from
2414 * @to: mem_cgroup which the entry is moved to
2415 *
2416 * It succeeds only when the swap_cgroup's record for this entry is the same
2417 * as the mem_cgroup's id of @from.
2418 *
2419 * Returns 0 on success, -EINVAL on failure.
2420 *
2421 * The caller must have charged to @to, IOW, called page_counter_charge() about
2422 * both res and memsw, and called css_get().
2423 */
2424static int mem_cgroup_move_swap_account(swp_entry_t entry,
2425 struct mem_cgroup *from, struct mem_cgroup *to)
2426{
2427 unsigned short old_id, new_id;
2428
2429 old_id = mem_cgroup_id(from);
2430 new_id = mem_cgroup_id(to);
2431
2432 if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
2433 mod_memcg_state(from, MEMCG_SWAP, -1);
2434 mod_memcg_state(to, MEMCG_SWAP, 1);
2435 return 0;
2436 }
2437 return -EINVAL;
2438}
2439#else
2440static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
2441 struct mem_cgroup *from, struct mem_cgroup *to)
2442{
2443 return -EINVAL;
2444}
2445#endif
2446
2447static DEFINE_MUTEX(memcg_limit_mutex);
2448
2449static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
2450 unsigned long limit, bool memsw)
2451{
2452 bool enlarge = false;
2453 int ret;
2454 bool limits_invariant;
2455 struct page_counter *counter = memsw ? &memcg->memsw : &memcg->memory;
2456
2457 do {
2458 if (signal_pending(current)) {
2459 ret = -EINTR;
2460 break;
2461 }
2462
2463 mutex_lock(&memcg_limit_mutex);
2464 /*
2465 * Make sure that the new limit (memsw or memory limit) doesn't
2466 * break our basic invariant rule memory.limit <= memsw.limit.
2467 */
2468 limits_invariant = memsw ? limit >= memcg->memory.limit :
2469 limit <= memcg->memsw.limit;
2470 if (!limits_invariant) {
2471 mutex_unlock(&memcg_limit_mutex);
2472 ret = -EINVAL;
2473 break;
2474 }
2475 if (limit > counter->limit)
2476 enlarge = true;
2477 ret = page_counter_limit(counter, limit);
2478 mutex_unlock(&memcg_limit_mutex);
2479
2480 if (!ret)
2481 break;
2482
2483 if (!try_to_free_mem_cgroup_pages(memcg, 1,
2484 GFP_KERNEL, !memsw)) {
2485 ret = -EBUSY;
2486 break;
2487 }
2488 } while (true);
2489
2490 if (!ret && enlarge)
2491 memcg_oom_recover(memcg);
2492
2493 return ret;
2494}
2495
2496unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
2497 gfp_t gfp_mask,
2498 unsigned long *total_scanned)
2499{
2500 unsigned long nr_reclaimed = 0;
2501 struct mem_cgroup_per_node *mz, *next_mz = NULL;
2502 unsigned long reclaimed;
2503 int loop = 0;
2504 struct mem_cgroup_tree_per_node *mctz;
2505 unsigned long excess;
2506 unsigned long nr_scanned;
2507
2508 if (order > 0)
2509 return 0;
2510
2511 mctz = soft_limit_tree_node(pgdat->node_id);
2512
2513 /*
2514 * Do not even bother to check the largest node if the root
2515 * is empty. Do it lockless to prevent lock bouncing. Races
2516 * are acceptable as soft limit is best effort anyway.
2517 */
2518 if (!mctz || RB_EMPTY_ROOT(&mctz->rb_root))
2519 return 0;
2520
2521 /*
2522 * This loop can run a while, specially if mem_cgroup's continuously
2523 * keep exceeding their soft limit and putting the system under
2524 * pressure
2525 */
2526 do {
2527 if (next_mz)
2528 mz = next_mz;
2529 else
2530 mz = mem_cgroup_largest_soft_limit_node(mctz);
2531 if (!mz)
2532 break;
2533
2534 nr_scanned = 0;
2535 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat,
2536 gfp_mask, &nr_scanned);
2537 nr_reclaimed += reclaimed;
2538 *total_scanned += nr_scanned;
2539 spin_lock_irq(&mctz->lock);
2540 __mem_cgroup_remove_exceeded(mz, mctz);
2541
2542 /*
2543 * If we failed to reclaim anything from this memory cgroup
2544 * it is time to move on to the next cgroup
2545 */
2546 next_mz = NULL;
2547 if (!reclaimed)
2548 next_mz = __mem_cgroup_largest_soft_limit_node(mctz);
2549
2550 excess = soft_limit_excess(mz->memcg);
2551 /*
2552 * One school of thought says that we should not add
2553 * back the node to the tree if reclaim returns 0.
2554 * But our reclaim could return 0, simply because due
2555 * to priority we are exposing a smaller subset of
2556 * memory to reclaim from. Consider this as a longer
2557 * term TODO.
2558 */
2559 /* If excess == 0, no tree ops */
2560 __mem_cgroup_insert_exceeded(mz, mctz, excess);
2561 spin_unlock_irq(&mctz->lock);
2562 css_put(&mz->memcg->css);
2563 loop++;
2564 /*
2565 * Could not reclaim anything and there are no more
2566 * mem cgroups to try or we seem to be looping without
2567 * reclaiming anything.
2568 */
2569 if (!nr_reclaimed &&
2570 (next_mz == NULL ||
2571 loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
2572 break;
2573 } while (!nr_reclaimed);
2574 if (next_mz)
2575 css_put(&next_mz->memcg->css);
2576 return nr_reclaimed;
2577}
2578
2579/*
2580 * Test whether @memcg has children, dead or alive. Note that this
2581 * function doesn't care whether @memcg has use_hierarchy enabled and
2582 * returns %true if there are child csses according to the cgroup
2583 * hierarchy. Testing use_hierarchy is the caller's responsiblity.
2584 */
2585static inline bool memcg_has_children(struct mem_cgroup *memcg)
2586{
2587 bool ret;
2588
2589 rcu_read_lock();
2590 ret = css_next_child(NULL, &memcg->css);
2591 rcu_read_unlock();
2592 return ret;
2593}
2594
2595/*
2596 * Reclaims as many pages from the given memcg as possible.
2597 *
2598 * Caller is responsible for holding css reference for memcg.
2599 */
2600static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
2601{
2602 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
2603
2604 /* we call try-to-free pages for make this cgroup empty */
2605 lru_add_drain_all();
2606 /* try to free all pages in this cgroup */
2607 while (nr_retries && page_counter_read(&memcg->memory)) {
2608 int progress;
2609
2610 if (signal_pending(current))
2611 return -EINTR;
2612
2613 progress = try_to_free_mem_cgroup_pages(memcg, 1,
2614 GFP_KERNEL, true);
2615 if (!progress) {
2616 nr_retries--;
2617 /* maybe some writeback is necessary */
2618 congestion_wait(BLK_RW_ASYNC, HZ/10);
2619 }
2620
2621 }
2622
2623 return 0;
2624}
2625
2626static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of,
2627 char *buf, size_t nbytes,
2628 loff_t off)
2629{
2630 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
2631
2632 if (mem_cgroup_is_root(memcg))
2633 return -EINVAL;
2634 return mem_cgroup_force_empty(memcg) ?: nbytes;
2635}
2636
2637static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css,
2638 struct cftype *cft)
2639{
2640 return mem_cgroup_from_css(css)->use_hierarchy;
2641}
2642
2643static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
2644 struct cftype *cft, u64 val)
2645{
2646 int retval = 0;
2647 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
2648 struct mem_cgroup *parent_memcg = mem_cgroup_from_css(memcg->css.parent);
2649
2650 if (memcg->use_hierarchy == val)
2651 return 0;
2652
2653 /*
2654 * If parent's use_hierarchy is set, we can't make any modifications
2655 * in the child subtrees. If it is unset, then the change can
2656 * occur, provided the current cgroup has no children.
2657 *
2658 * For the root cgroup, parent_mem is NULL, we allow value to be
2659 * set if there are no children.
2660 */
2661 if ((!parent_memcg || !parent_memcg->use_hierarchy) &&
2662 (val == 1 || val == 0)) {
2663 if (!memcg_has_children(memcg))
2664 memcg->use_hierarchy = val;
2665 else
2666 retval = -EBUSY;
2667 } else
2668 retval = -EINVAL;
2669
2670 return retval;
2671}
2672
2673static void tree_stat(struct mem_cgroup *memcg, unsigned long *stat)
2674{
2675 struct mem_cgroup *iter;
2676 int i;
2677
2678 memset(stat, 0, sizeof(*stat) * MEMCG_NR_STAT);
2679
2680 for_each_mem_cgroup_tree(iter, memcg) {
2681 for (i = 0; i < MEMCG_NR_STAT; i++)
2682 stat[i] += memcg_page_state(iter, i);
2683 }
2684}
2685
2686static void tree_events(struct mem_cgroup *memcg, unsigned long *events)
2687{
2688 struct mem_cgroup *iter;
2689 int i;
2690
2691 memset(events, 0, sizeof(*events) * NR_VM_EVENT_ITEMS);
2692
2693 for_each_mem_cgroup_tree(iter, memcg) {
2694 for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
2695 events[i] += memcg_sum_events(iter, i);
2696 }
2697}
2698
2699static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
2700{
2701 unsigned long val = 0;
2702
2703 if (mem_cgroup_is_root(memcg)) {
2704 struct mem_cgroup *iter;
2705
2706 for_each_mem_cgroup_tree(iter, memcg) {
2707 val += memcg_page_state(iter, MEMCG_CACHE);
2708 val += memcg_page_state(iter, MEMCG_RSS);
2709 if (swap)
2710 val += memcg_page_state(iter, MEMCG_SWAP);
2711 }
2712 } else {
2713 if (!swap)
2714 val = page_counter_read(&memcg->memory);
2715 else
2716 val = page_counter_read(&memcg->memsw);
2717 }
2718 return val;
2719}
2720
2721enum {
2722 RES_USAGE,
2723 RES_LIMIT,
2724 RES_MAX_USAGE,
2725 RES_FAILCNT,
2726 RES_SOFT_LIMIT,
2727};
2728
2729static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
2730 struct cftype *cft)
2731{
2732 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
2733 struct page_counter *counter;
2734
2735 switch (MEMFILE_TYPE(cft->private)) {
2736 case _MEM:
2737 counter = &memcg->memory;
2738 break;
2739 case _MEMSWAP:
2740 counter = &memcg->memsw;
2741 break;
2742 case _KMEM:
2743 counter = &memcg->kmem;
2744 break;
2745 case _TCP:
2746 counter = &memcg->tcpmem;
2747 break;
2748 default:
2749 BUG();
2750 }
2751
2752 switch (MEMFILE_ATTR(cft->private)) {
2753 case RES_USAGE:
2754 if (counter == &memcg->memory)
2755 return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE;
2756 if (counter == &memcg->memsw)
2757 return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE;
2758 return (u64)page_counter_read(counter) * PAGE_SIZE;
2759 case RES_LIMIT:
2760 return (u64)counter->limit * PAGE_SIZE;
2761 case RES_MAX_USAGE:
2762 return (u64)counter->watermark * PAGE_SIZE;
2763 case RES_FAILCNT:
2764 return counter->failcnt;
2765 case RES_SOFT_LIMIT:
2766 return (u64)memcg->soft_limit * PAGE_SIZE;
2767 default:
2768 BUG();
2769 }
2770}
2771
2772#ifndef CONFIG_SLOB
2773static int memcg_online_kmem(struct mem_cgroup *memcg)
2774{
2775 int memcg_id;
2776
2777 if (cgroup_memory_nokmem)
2778 return 0;
2779
2780 BUG_ON(memcg->kmemcg_id >= 0);
2781 BUG_ON(memcg->kmem_state);
2782
2783 memcg_id = memcg_alloc_cache_id();
2784 if (memcg_id < 0)
2785 return memcg_id;
2786
2787 static_branch_inc(&memcg_kmem_enabled_key);
2788 /*
2789 * A memory cgroup is considered kmem-online as soon as it gets
2790 * kmemcg_id. Setting the id after enabling static branching will
2791 * guarantee no one starts accounting before all call sites are
2792 * patched.
2793 */
2794 memcg->kmemcg_id = memcg_id;
2795 memcg->kmem_state = KMEM_ONLINE;
2796 INIT_LIST_HEAD(&memcg->kmem_caches);
2797
2798 return 0;
2799}
2800
2801static void memcg_offline_kmem(struct mem_cgroup *memcg)
2802{
2803 struct cgroup_subsys_state *css;
2804 struct mem_cgroup *parent, *child;
2805 int kmemcg_id;
2806
2807 if (memcg->kmem_state != KMEM_ONLINE)
2808 return;
2809 /*
2810 * Clear the online state before clearing memcg_caches array
2811 * entries. The slab_mutex in memcg_deactivate_kmem_caches()
2812 * guarantees that no cache will be created for this cgroup
2813 * after we are done (see memcg_create_kmem_cache()).
2814 */
2815 memcg->kmem_state = KMEM_ALLOCATED;
2816
2817 memcg_deactivate_kmem_caches(memcg);
2818
2819 kmemcg_id = memcg->kmemcg_id;
2820 BUG_ON(kmemcg_id < 0);
2821
2822 parent = parent_mem_cgroup(memcg);
2823 if (!parent)
2824 parent = root_mem_cgroup;
2825
2826 /*
2827 * Change kmemcg_id of this cgroup and all its descendants to the
2828 * parent's id, and then move all entries from this cgroup's list_lrus
2829 * to ones of the parent. After we have finished, all list_lrus
2830 * corresponding to this cgroup are guaranteed to remain empty. The
2831 * ordering is imposed by list_lru_node->lock taken by
2832 * memcg_drain_all_list_lrus().
2833 */
2834 rcu_read_lock(); /* can be called from css_free w/o cgroup_mutex */
2835 css_for_each_descendant_pre(css, &memcg->css) {
2836 child = mem_cgroup_from_css(css);
2837 BUG_ON(child->kmemcg_id != kmemcg_id);
2838 child->kmemcg_id = parent->kmemcg_id;
2839 if (!memcg->use_hierarchy)
2840 break;
2841 }
2842 rcu_read_unlock();
2843
2844 memcg_drain_all_list_lrus(kmemcg_id, parent->kmemcg_id);
2845
2846 memcg_free_cache_id(kmemcg_id);
2847}
2848
2849static void memcg_free_kmem(struct mem_cgroup *memcg)
2850{
2851 /* css_alloc() failed, offlining didn't happen */
2852 if (unlikely(memcg->kmem_state == KMEM_ONLINE))
2853 memcg_offline_kmem(memcg);
2854
2855 if (memcg->kmem_state == KMEM_ALLOCATED) {
2856 memcg_destroy_kmem_caches(memcg);
2857 static_branch_dec(&memcg_kmem_enabled_key);
2858 WARN_ON(page_counter_read(&memcg->kmem));
2859 }
2860}
2861#else
2862static int memcg_online_kmem(struct mem_cgroup *memcg)
2863{
2864 return 0;
2865}
2866static void memcg_offline_kmem(struct mem_cgroup *memcg)
2867{
2868}
2869static void memcg_free_kmem(struct mem_cgroup *memcg)
2870{
2871}
2872#endif /* !CONFIG_SLOB */
2873
2874static int memcg_update_kmem_limit(struct mem_cgroup *memcg,
2875 unsigned long limit)
2876{
2877 int ret;
2878
2879 mutex_lock(&memcg_limit_mutex);
2880 ret = page_counter_limit(&memcg->kmem, limit);
2881 mutex_unlock(&memcg_limit_mutex);
2882 return ret;
2883}
2884
2885static int memcg_update_tcp_limit(struct mem_cgroup *memcg, unsigned long limit)
2886{
2887 int ret;
2888
2889 mutex_lock(&memcg_limit_mutex);
2890
2891 ret = page_counter_limit(&memcg->tcpmem, limit);
2892 if (ret)
2893 goto out;
2894
2895 if (!memcg->tcpmem_active) {
2896 /*
2897 * The active flag needs to be written after the static_key
2898 * update. This is what guarantees that the socket activation
2899 * function is the last one to run. See mem_cgroup_sk_alloc()
2900 * for details, and note that we don't mark any socket as
2901 * belonging to this memcg until that flag is up.
2902 *
2903 * We need to do this, because static_keys will span multiple
2904 * sites, but we can't control their order. If we mark a socket
2905 * as accounted, but the accounting functions are not patched in
2906 * yet, we'll lose accounting.
2907 *
2908 * We never race with the readers in mem_cgroup_sk_alloc(),
2909 * because when this value change, the code to process it is not
2910 * patched in yet.
2911 */
2912 static_branch_inc(&memcg_sockets_enabled_key);
2913 memcg->tcpmem_active = true;
2914 }
2915out:
2916 mutex_unlock(&memcg_limit_mutex);
2917 return ret;
2918}
2919
2920/*
2921 * The user of this function is...
2922 * RES_LIMIT.
2923 */
2924static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
2925 char *buf, size_t nbytes, loff_t off)
2926{
2927 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
2928 unsigned long nr_pages;
2929 int ret;
2930
2931 buf = strstrip(buf);
2932 ret = page_counter_memparse(buf, "-1", &nr_pages);
2933 if (ret)
2934 return ret;
2935
2936 switch (MEMFILE_ATTR(of_cft(of)->private)) {
2937 case RES_LIMIT:
2938 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
2939 ret = -EINVAL;
2940 break;
2941 }
2942 switch (MEMFILE_TYPE(of_cft(of)->private)) {
2943 case _MEM:
2944 ret = mem_cgroup_resize_limit(memcg, nr_pages, false);
2945 break;
2946 case _MEMSWAP:
2947 ret = mem_cgroup_resize_limit(memcg, nr_pages, true);
2948 break;
2949 case _KMEM:
2950 ret = memcg_update_kmem_limit(memcg, nr_pages);
2951 break;
2952 case _TCP:
2953 ret = memcg_update_tcp_limit(memcg, nr_pages);
2954 break;
2955 }
2956 break;
2957 case RES_SOFT_LIMIT:
2958 memcg->soft_limit = nr_pages;
2959 ret = 0;
2960 break;
2961 }
2962 return ret ?: nbytes;
2963}
2964
2965static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf,
2966 size_t nbytes, loff_t off)
2967{
2968 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
2969 struct page_counter *counter;
2970
2971 switch (MEMFILE_TYPE(of_cft(of)->private)) {
2972 case _MEM:
2973 counter = &memcg->memory;
2974 break;
2975 case _MEMSWAP:
2976 counter = &memcg->memsw;
2977 break;
2978 case _KMEM:
2979 counter = &memcg->kmem;
2980 break;
2981 case _TCP:
2982 counter = &memcg->tcpmem;
2983 break;
2984 default:
2985 BUG();
2986 }
2987
2988 switch (MEMFILE_ATTR(of_cft(of)->private)) {
2989 case RES_MAX_USAGE:
2990 page_counter_reset_watermark(counter);
2991 break;
2992 case RES_FAILCNT:
2993 counter->failcnt = 0;
2994 break;
2995 default:
2996 BUG();
2997 }
2998
2999 return nbytes;
3000}
3001
3002static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css,
3003 struct cftype *cft)
3004{
3005 return mem_cgroup_from_css(css)->move_charge_at_immigrate;
3006}
3007
3008#ifdef CONFIG_MMU
3009static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3010 struct cftype *cft, u64 val)
3011{
3012 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3013
3014 if (val & ~MOVE_MASK)
3015 return -EINVAL;
3016
3017 /*
3018 * No kind of locking is needed in here, because ->can_attach() will
3019 * check this value once in the beginning of the process, and then carry
3020 * on with stale data. This means that changes to this value will only
3021 * affect task migrations starting after the change.
3022 */
3023 memcg->move_charge_at_immigrate = val;
3024 return 0;
3025}
3026#else
3027static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3028 struct cftype *cft, u64 val)
3029{
3030 return -ENOSYS;
3031}
3032#endif
3033
3034#ifdef CONFIG_NUMA
3035static int memcg_numa_stat_show(struct seq_file *m, void *v)
3036{
3037 struct numa_stat {
3038 const char *name;
3039 unsigned int lru_mask;
3040 };
3041
3042 static const struct numa_stat stats[] = {
3043 { "total", LRU_ALL },
3044 { "file", LRU_ALL_FILE },
3045 { "anon", LRU_ALL_ANON },
3046 { "unevictable", BIT(LRU_UNEVICTABLE) },
3047 };
3048 const struct numa_stat *stat;
3049 int nid;
3050 unsigned long nr;
3051 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
3052
3053 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3054 nr = mem_cgroup_nr_lru_pages(memcg, stat->lru_mask);
3055 seq_printf(m, "%s=%lu", stat->name, nr);
3056 for_each_node_state(nid, N_MEMORY) {
3057 nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
3058 stat->lru_mask);
3059 seq_printf(m, " N%d=%lu", nid, nr);
3060 }
3061 seq_putc(m, '\n');
3062 }
3063
3064 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3065 struct mem_cgroup *iter;
3066
3067 nr = 0;
3068 for_each_mem_cgroup_tree(iter, memcg)
3069 nr += mem_cgroup_nr_lru_pages(iter, stat->lru_mask);
3070 seq_printf(m, "hierarchical_%s=%lu", stat->name, nr);
3071 for_each_node_state(nid, N_MEMORY) {
3072 nr = 0;
3073 for_each_mem_cgroup_tree(iter, memcg)
3074 nr += mem_cgroup_node_nr_lru_pages(
3075 iter, nid, stat->lru_mask);
3076 seq_printf(m, " N%d=%lu", nid, nr);
3077 }
3078 seq_putc(m, '\n');
3079 }
3080
3081 return 0;
3082}
3083#endif /* CONFIG_NUMA */
3084
3085/* Universal VM events cgroup1 shows, original sort order */
3086unsigned int memcg1_events[] = {
3087 PGPGIN,
3088 PGPGOUT,
3089 PGFAULT,
3090 PGMAJFAULT,
3091};
3092
3093static const char *const memcg1_event_names[] = {
3094 "pgpgin",
3095 "pgpgout",
3096 "pgfault",
3097 "pgmajfault",
3098};
3099
3100static int memcg_stat_show(struct seq_file *m, void *v)
3101{
3102 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
3103 unsigned long memory, memsw;
3104 struct mem_cgroup *mi;
3105 unsigned int i;
3106
3107 BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names) != ARRAY_SIZE(memcg1_stats));
3108 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS);
3109
3110 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
3111 if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
3112 continue;
3113 seq_printf(m, "%s %lu\n", memcg1_stat_names[i],
3114 memcg_page_state(memcg, memcg1_stats[i]) *
3115 PAGE_SIZE);
3116 }
3117
3118 for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
3119 seq_printf(m, "%s %lu\n", memcg1_event_names[i],
3120 memcg_sum_events(memcg, memcg1_events[i]));
3121
3122 for (i = 0; i < NR_LRU_LISTS; i++)
3123 seq_printf(m, "%s %lu\n", mem_cgroup_lru_names[i],
3124 mem_cgroup_nr_lru_pages(memcg, BIT(i)) * PAGE_SIZE);
3125
3126 /* Hierarchical information */
3127 memory = memsw = PAGE_COUNTER_MAX;
3128 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) {
3129 memory = min(memory, mi->memory.limit);
3130 memsw = min(memsw, mi->memsw.limit);
3131 }
3132 seq_printf(m, "hierarchical_memory_limit %llu\n",
3133 (u64)memory * PAGE_SIZE);
3134 if (do_memsw_account())
3135 seq_printf(m, "hierarchical_memsw_limit %llu\n",
3136 (u64)memsw * PAGE_SIZE);
3137
3138 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
3139 unsigned long long val = 0;
3140
3141 if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
3142 continue;
3143 for_each_mem_cgroup_tree(mi, memcg)
3144 val += memcg_page_state(mi, memcg1_stats[i]) *
3145 PAGE_SIZE;
3146 seq_printf(m, "total_%s %llu\n", memcg1_stat_names[i], val);
3147 }
3148
3149 for (i = 0; i < ARRAY_SIZE(memcg1_events); i++) {
3150 unsigned long long val = 0;
3151
3152 for_each_mem_cgroup_tree(mi, memcg)
3153 val += memcg_sum_events(mi, memcg1_events[i]);
3154 seq_printf(m, "total_%s %llu\n", memcg1_event_names[i], val);
3155 }
3156
3157 for (i = 0; i < NR_LRU_LISTS; i++) {
3158 unsigned long long val = 0;
3159
3160 for_each_mem_cgroup_tree(mi, memcg)
3161 val += mem_cgroup_nr_lru_pages(mi, BIT(i)) * PAGE_SIZE;
3162 seq_printf(m, "total_%s %llu\n", mem_cgroup_lru_names[i], val);
3163 }
3164
3165#ifdef CONFIG_DEBUG_VM
3166 {
3167 pg_data_t *pgdat;
3168 struct mem_cgroup_per_node *mz;
3169 struct zone_reclaim_stat *rstat;
3170 unsigned long recent_rotated[2] = {0, 0};
3171 unsigned long recent_scanned[2] = {0, 0};
3172
3173 for_each_online_pgdat(pgdat) {
3174 mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
3175 rstat = &mz->lruvec.reclaim_stat;
3176
3177 recent_rotated[0] += rstat->recent_rotated[0];
3178 recent_rotated[1] += rstat->recent_rotated[1];
3179 recent_scanned[0] += rstat->recent_scanned[0];
3180 recent_scanned[1] += rstat->recent_scanned[1];
3181 }
3182 seq_printf(m, "recent_rotated_anon %lu\n", recent_rotated[0]);
3183 seq_printf(m, "recent_rotated_file %lu\n", recent_rotated[1]);
3184 seq_printf(m, "recent_scanned_anon %lu\n", recent_scanned[0]);
3185 seq_printf(m, "recent_scanned_file %lu\n", recent_scanned[1]);
3186 }
3187#endif
3188
3189 return 0;
3190}
3191
3192static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css,
3193 struct cftype *cft)
3194{
3195 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3196
3197 return mem_cgroup_swappiness(memcg);
3198}
3199
3200static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
3201 struct cftype *cft, u64 val)
3202{
3203 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3204
3205 if (val > 100)
3206 return -EINVAL;
3207
3208 if (css->parent)
3209 memcg->swappiness = val;
3210 else
3211 vm_swappiness = val;
3212
3213 return 0;
3214}
3215
3216static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
3217{
3218 struct mem_cgroup_threshold_ary *t;
3219 unsigned long usage;
3220 int i;
3221
3222 rcu_read_lock();
3223 if (!swap)
3224 t = rcu_dereference(memcg->thresholds.primary);
3225 else
3226 t = rcu_dereference(memcg->memsw_thresholds.primary);
3227
3228 if (!t)
3229 goto unlock;
3230
3231 usage = mem_cgroup_usage(memcg, swap);
3232
3233 /*
3234 * current_threshold points to threshold just below or equal to usage.
3235 * If it's not true, a threshold was crossed after last
3236 * call of __mem_cgroup_threshold().
3237 */
3238 i = t->current_threshold;
3239
3240 /*
3241 * Iterate backward over array of thresholds starting from
3242 * current_threshold and check if a threshold is crossed.
3243 * If none of thresholds below usage is crossed, we read
3244 * only one element of the array here.
3245 */
3246 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
3247 eventfd_signal(t->entries[i].eventfd, 1);
3248
3249 /* i = current_threshold + 1 */
3250 i++;
3251
3252 /*
3253 * Iterate forward over array of thresholds starting from
3254 * current_threshold+1 and check if a threshold is crossed.
3255 * If none of thresholds above usage is crossed, we read
3256 * only one element of the array here.
3257 */
3258 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
3259 eventfd_signal(t->entries[i].eventfd, 1);
3260
3261 /* Update current_threshold */
3262 t->current_threshold = i - 1;
3263unlock:
3264 rcu_read_unlock();
3265}
3266
3267static void mem_cgroup_threshold(struct mem_cgroup *memcg)
3268{
3269 while (memcg) {
3270 __mem_cgroup_threshold(memcg, false);
3271 if (do_memsw_account())
3272 __mem_cgroup_threshold(memcg, true);
3273
3274 memcg = parent_mem_cgroup(memcg);
3275 }
3276}
3277
3278static int compare_thresholds(const void *a, const void *b)
3279{
3280 const struct mem_cgroup_threshold *_a = a;
3281 const struct mem_cgroup_threshold *_b = b;
3282
3283 if (_a->threshold > _b->threshold)
3284 return 1;
3285
3286 if (_a->threshold < _b->threshold)
3287 return -1;
3288
3289 return 0;
3290}
3291
3292static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
3293{
3294 struct mem_cgroup_eventfd_list *ev;
3295
3296 spin_lock(&memcg_oom_lock);
3297
3298 list_for_each_entry(ev, &memcg->oom_notify, list)
3299 eventfd_signal(ev->eventfd, 1);
3300
3301 spin_unlock(&memcg_oom_lock);
3302 return 0;
3303}
3304
3305static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
3306{
3307 struct mem_cgroup *iter;
3308
3309 for_each_mem_cgroup_tree(iter, memcg)
3310 mem_cgroup_oom_notify_cb(iter);
3311}
3312
3313static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
3314 struct eventfd_ctx *eventfd, const char *args, enum res_type type)
3315{
3316 struct mem_cgroup_thresholds *thresholds;
3317 struct mem_cgroup_threshold_ary *new;
3318 unsigned long threshold;
3319 unsigned long usage;
3320 int i, size, ret;
3321
3322 ret = page_counter_memparse(args, "-1", &threshold);
3323 if (ret)
3324 return ret;
3325
3326 mutex_lock(&memcg->thresholds_lock);
3327
3328 if (type == _MEM) {
3329 thresholds = &memcg->thresholds;
3330 usage = mem_cgroup_usage(memcg, false);
3331 } else if (type == _MEMSWAP) {
3332 thresholds = &memcg->memsw_thresholds;
3333 usage = mem_cgroup_usage(memcg, true);
3334 } else
3335 BUG();
3336
3337 /* Check if a threshold crossed before adding a new one */
3338 if (thresholds->primary)
3339 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
3340
3341 size = thresholds->primary ? thresholds->primary->size + 1 : 1;
3342
3343 /* Allocate memory for new array of thresholds */
3344 new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold),
3345 GFP_KERNEL);
3346 if (!new) {
3347 ret = -ENOMEM;
3348 goto unlock;
3349 }
3350 new->size = size;
3351
3352 /* Copy thresholds (if any) to new array */
3353 if (thresholds->primary) {
3354 memcpy(new->entries, thresholds->primary->entries, (size - 1) *
3355 sizeof(struct mem_cgroup_threshold));
3356 }
3357
3358 /* Add new threshold */
3359 new->entries[size - 1].eventfd = eventfd;
3360 new->entries[size - 1].threshold = threshold;
3361
3362 /* Sort thresholds. Registering of new threshold isn't time-critical */
3363 sort(new->entries, size, sizeof(struct mem_cgroup_threshold),
3364 compare_thresholds, NULL);
3365
3366 /* Find current threshold */
3367 new->current_threshold = -1;
3368 for (i = 0; i < size; i++) {
3369 if (new->entries[i].threshold <= usage) {
3370 /*
3371 * new->current_threshold will not be used until
3372 * rcu_assign_pointer(), so it's safe to increment
3373 * it here.
3374 */
3375 ++new->current_threshold;
3376 } else
3377 break;
3378 }
3379
3380 /* Free old spare buffer and save old primary buffer as spare */
3381 kfree(thresholds->spare);
3382 thresholds->spare = thresholds->primary;
3383
3384 rcu_assign_pointer(thresholds->primary, new);
3385
3386 /* To be sure that nobody uses thresholds */
3387 synchronize_rcu();
3388
3389unlock:
3390 mutex_unlock(&memcg->thresholds_lock);
3391
3392 return ret;
3393}
3394
3395static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
3396 struct eventfd_ctx *eventfd, const char *args)
3397{
3398 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM);
3399}
3400
3401static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg,
3402 struct eventfd_ctx *eventfd, const char *args)
3403{
3404 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP);
3405}
3406
3407static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
3408 struct eventfd_ctx *eventfd, enum res_type type)
3409{
3410 struct mem_cgroup_thresholds *thresholds;
3411 struct mem_cgroup_threshold_ary *new;
3412 unsigned long usage;
3413 int i, j, size;
3414
3415 mutex_lock(&memcg->thresholds_lock);
3416
3417 if (type == _MEM) {
3418 thresholds = &memcg->thresholds;
3419 usage = mem_cgroup_usage(memcg, false);
3420 } else if (type == _MEMSWAP) {
3421 thresholds = &memcg->memsw_thresholds;
3422 usage = mem_cgroup_usage(memcg, true);
3423 } else
3424 BUG();
3425
3426 if (!thresholds->primary)
3427 goto unlock;
3428
3429 /* Check if a threshold crossed before removing */
3430 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
3431
3432 /* Calculate new number of threshold */
3433 size = 0;
3434 for (i = 0; i < thresholds->primary->size; i++) {
3435 if (thresholds->primary->entries[i].eventfd != eventfd)
3436 size++;
3437 }
3438
3439 new = thresholds->spare;
3440
3441 /* Set thresholds array to NULL if we don't have thresholds */
3442 if (!size) {
3443 kfree(new);
3444 new = NULL;
3445 goto swap_buffers;
3446 }
3447
3448 new->size = size;
3449
3450 /* Copy thresholds and find current threshold */
3451 new->current_threshold = -1;
3452 for (i = 0, j = 0; i < thresholds->primary->size; i++) {
3453 if (thresholds->primary->entries[i].eventfd == eventfd)
3454 continue;
3455
3456 new->entries[j] = thresholds->primary->entries[i];
3457 if (new->entries[j].threshold <= usage) {
3458 /*
3459 * new->current_threshold will not be used
3460 * until rcu_assign_pointer(), so it's safe to increment
3461 * it here.
3462 */
3463 ++new->current_threshold;
3464 }
3465 j++;
3466 }
3467
3468swap_buffers:
3469 /* Swap primary and spare array */
3470 thresholds->spare = thresholds->primary;
3471
3472 rcu_assign_pointer(thresholds->primary, new);
3473
3474 /* To be sure that nobody uses thresholds */
3475 synchronize_rcu();
3476
3477 /* If all events are unregistered, free the spare array */
3478 if (!new) {
3479 kfree(thresholds->spare);
3480 thresholds->spare = NULL;
3481 }
3482unlock:
3483 mutex_unlock(&memcg->thresholds_lock);
3484}
3485
3486static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
3487 struct eventfd_ctx *eventfd)
3488{
3489 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM);
3490}
3491
3492static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
3493 struct eventfd_ctx *eventfd)
3494{
3495 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP);
3496}
3497
3498static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg,
3499 struct eventfd_ctx *eventfd, const char *args)
3500{
3501 struct mem_cgroup_eventfd_list *event;
3502
3503 event = kmalloc(sizeof(*event), GFP_KERNEL);
3504 if (!event)
3505 return -ENOMEM;
3506
3507 spin_lock(&memcg_oom_lock);
3508
3509 event->eventfd = eventfd;
3510 list_add(&event->list, &memcg->oom_notify);
3511
3512 /* already in OOM ? */
3513 if (memcg->under_oom)
3514 eventfd_signal(eventfd, 1);
3515 spin_unlock(&memcg_oom_lock);
3516
3517 return 0;
3518}
3519
3520static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg,
3521 struct eventfd_ctx *eventfd)
3522{
3523 struct mem_cgroup_eventfd_list *ev, *tmp;
3524
3525 spin_lock(&memcg_oom_lock);
3526
3527 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
3528 if (ev->eventfd == eventfd) {
3529 list_del(&ev->list);
3530 kfree(ev);
3531 }
3532 }
3533
3534 spin_unlock(&memcg_oom_lock);
3535}
3536
3537static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v)
3538{
3539 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf));
3540
3541 seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable);
3542 seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom);
3543 seq_printf(sf, "oom_kill %lu\n", memcg_sum_events(memcg, OOM_KILL));
3544 return 0;
3545}
3546
3547static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
3548 struct cftype *cft, u64 val)
3549{
3550 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3551
3552 /* cannot set to root cgroup and only 0 and 1 are allowed */
3553 if (!css->parent || !((val == 0) || (val == 1)))
3554 return -EINVAL;
3555
3556 memcg->oom_kill_disable = val;
3557 if (!val)
3558 memcg_oom_recover(memcg);
3559
3560 return 0;
3561}
3562
3563#ifdef CONFIG_CGROUP_WRITEBACK
3564
3565struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg)
3566{
3567 return &memcg->cgwb_list;
3568}
3569
3570static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
3571{
3572 return wb_domain_init(&memcg->cgwb_domain, gfp);
3573}
3574
3575static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
3576{
3577 wb_domain_exit(&memcg->cgwb_domain);
3578}
3579
3580static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
3581{
3582 wb_domain_size_changed(&memcg->cgwb_domain);
3583}
3584
3585struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
3586{
3587 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3588
3589 if (!memcg->css.parent)
3590 return NULL;
3591
3592 return &memcg->cgwb_domain;
3593}
3594
3595/**
3596 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
3597 * @wb: bdi_writeback in question
3598 * @pfilepages: out parameter for number of file pages
3599 * @pheadroom: out parameter for number of allocatable pages according to memcg
3600 * @pdirty: out parameter for number of dirty pages
3601 * @pwriteback: out parameter for number of pages under writeback
3602 *
3603 * Determine the numbers of file, headroom, dirty, and writeback pages in
3604 * @wb's memcg. File, dirty and writeback are self-explanatory. Headroom
3605 * is a bit more involved.
3606 *
3607 * A memcg's headroom is "min(max, high) - used". In the hierarchy, the
3608 * headroom is calculated as the lowest headroom of itself and the
3609 * ancestors. Note that this doesn't consider the actual amount of
3610 * available memory in the system. The caller should further cap
3611 * *@pheadroom accordingly.
3612 */
3613void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
3614 unsigned long *pheadroom, unsigned long *pdirty,
3615 unsigned long *pwriteback)
3616{
3617 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3618 struct mem_cgroup *parent;
3619
3620 *pdirty = memcg_page_state(memcg, NR_FILE_DIRTY);
3621
3622 /* this should eventually include NR_UNSTABLE_NFS */
3623 *pwriteback = memcg_page_state(memcg, NR_WRITEBACK);
3624 *pfilepages = mem_cgroup_nr_lru_pages(memcg, (1 << LRU_INACTIVE_FILE) |
3625 (1 << LRU_ACTIVE_FILE));
3626 *pheadroom = PAGE_COUNTER_MAX;
3627
3628 while ((parent = parent_mem_cgroup(memcg))) {
3629 unsigned long ceiling = min(memcg->memory.limit, memcg->high);
3630 unsigned long used = page_counter_read(&memcg->memory);
3631
3632 *pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
3633 memcg = parent;
3634 }
3635}
3636
3637#else /* CONFIG_CGROUP_WRITEBACK */
3638
3639static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
3640{
3641 return 0;
3642}
3643
3644static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
3645{
3646}
3647
3648static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
3649{
3650}
3651
3652#endif /* CONFIG_CGROUP_WRITEBACK */
3653
3654/*
3655 * DO NOT USE IN NEW FILES.
3656 *
3657 * "cgroup.event_control" implementation.
3658 *
3659 * This is way over-engineered. It tries to support fully configurable
3660 * events for each user. Such level of flexibility is completely
3661 * unnecessary especially in the light of the planned unified hierarchy.
3662 *
3663 * Please deprecate this and replace with something simpler if at all
3664 * possible.
3665 */
3666
3667/*
3668 * Unregister event and free resources.
3669 *
3670 * Gets called from workqueue.
3671 */
3672static void memcg_event_remove(struct work_struct *work)
3673{
3674 struct mem_cgroup_event *event =
3675 container_of(work, struct mem_cgroup_event, remove);
3676 struct mem_cgroup *memcg = event->memcg;
3677
3678 remove_wait_queue(event->wqh, &event->wait);
3679
3680 event->unregister_event(memcg, event->eventfd);
3681
3682 /* Notify userspace the event is going away. */
3683 eventfd_signal(event->eventfd, 1);
3684
3685 eventfd_ctx_put(event->eventfd);
3686 kfree(event);
3687 css_put(&memcg->css);
3688}
3689
3690/*
3691 * Gets called on EPOLLHUP on eventfd when user closes it.
3692 *
3693 * Called with wqh->lock held and interrupts disabled.
3694 */
3695static int memcg_event_wake(wait_queue_entry_t *wait, unsigned mode,
3696 int sync, void *key)
3697{
3698 struct mem_cgroup_event *event =
3699 container_of(wait, struct mem_cgroup_event, wait);
3700 struct mem_cgroup *memcg = event->memcg;
3701 __poll_t flags = key_to_poll(key);
3702
3703 if (flags & EPOLLHUP) {
3704 /*
3705 * If the event has been detached at cgroup removal, we
3706 * can simply return knowing the other side will cleanup
3707 * for us.
3708 *
3709 * We can't race against event freeing since the other
3710 * side will require wqh->lock via remove_wait_queue(),
3711 * which we hold.
3712 */
3713 spin_lock(&memcg->event_list_lock);
3714 if (!list_empty(&event->list)) {
3715 list_del_init(&event->list);
3716 /*
3717 * We are in atomic context, but cgroup_event_remove()
3718 * may sleep, so we have to call it in workqueue.
3719 */
3720 schedule_work(&event->remove);
3721 }
3722 spin_unlock(&memcg->event_list_lock);
3723 }
3724
3725 return 0;
3726}
3727
3728static void memcg_event_ptable_queue_proc(struct file *file,
3729 wait_queue_head_t *wqh, poll_table *pt)
3730{
3731 struct mem_cgroup_event *event =
3732 container_of(pt, struct mem_cgroup_event, pt);
3733
3734 event->wqh = wqh;
3735 add_wait_queue(wqh, &event->wait);
3736}
3737
3738/*
3739 * DO NOT USE IN NEW FILES.
3740 *
3741 * Parse input and register new cgroup event handler.
3742 *
3743 * Input must be in format '<event_fd> <control_fd> <args>'.
3744 * Interpretation of args is defined by control file implementation.
3745 */
3746static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
3747 char *buf, size_t nbytes, loff_t off)
3748{
3749 struct cgroup_subsys_state *css = of_css(of);
3750 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3751 struct mem_cgroup_event *event;
3752 struct cgroup_subsys_state *cfile_css;
3753 unsigned int efd, cfd;
3754 struct fd efile;
3755 struct fd cfile;
3756 const char *name;
3757 char *endp;
3758 int ret;
3759
3760 buf = strstrip(buf);
3761
3762 efd = simple_strtoul(buf, &endp, 10);
3763 if (*endp != ' ')
3764 return -EINVAL;
3765 buf = endp + 1;
3766
3767 cfd = simple_strtoul(buf, &endp, 10);
3768 if ((*endp != ' ') && (*endp != '\0'))
3769 return -EINVAL;
3770 buf = endp + 1;
3771
3772 event = kzalloc(sizeof(*event), GFP_KERNEL);
3773 if (!event)
3774 return -ENOMEM;
3775
3776 event->memcg = memcg;
3777 INIT_LIST_HEAD(&event->list);
3778 init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc);
3779 init_waitqueue_func_entry(&event->wait, memcg_event_wake);
3780 INIT_WORK(&event->remove, memcg_event_remove);
3781
3782 efile = fdget(efd);
3783 if (!efile.file) {
3784 ret = -EBADF;
3785 goto out_kfree;
3786 }
3787
3788 event->eventfd = eventfd_ctx_fileget(efile.file);
3789 if (IS_ERR(event->eventfd)) {
3790 ret = PTR_ERR(event->eventfd);
3791 goto out_put_efile;
3792 }
3793
3794 cfile = fdget(cfd);
3795 if (!cfile.file) {
3796 ret = -EBADF;
3797 goto out_put_eventfd;
3798 }
3799
3800 /* the process need read permission on control file */
3801 /* AV: shouldn't we check that it's been opened for read instead? */
3802 ret = inode_permission(file_inode(cfile.file), MAY_READ);
3803 if (ret < 0)
3804 goto out_put_cfile;
3805
3806 /*
3807 * Determine the event callbacks and set them in @event. This used
3808 * to be done via struct cftype but cgroup core no longer knows
3809 * about these events. The following is crude but the whole thing
3810 * is for compatibility anyway.
3811 *
3812 * DO NOT ADD NEW FILES.
3813 */
3814 name = cfile.file->f_path.dentry->d_name.name;
3815
3816 if (!strcmp(name, "memory.usage_in_bytes")) {
3817 event->register_event = mem_cgroup_usage_register_event;
3818 event->unregister_event = mem_cgroup_usage_unregister_event;
3819 } else if (!strcmp(name, "memory.oom_control")) {
3820 event->register_event = mem_cgroup_oom_register_event;
3821 event->unregister_event = mem_cgroup_oom_unregister_event;
3822 } else if (!strcmp(name, "memory.pressure_level")) {
3823 event->register_event = vmpressure_register_event;
3824 event->unregister_event = vmpressure_unregister_event;
3825 } else if (!strcmp(name, "memory.memsw.usage_in_bytes")) {
3826 event->register_event = memsw_cgroup_usage_register_event;
3827 event->unregister_event = memsw_cgroup_usage_unregister_event;
3828 } else {
3829 ret = -EINVAL;
3830 goto out_put_cfile;
3831 }
3832
3833 /*
3834 * Verify @cfile should belong to @css. Also, remaining events are
3835 * automatically removed on cgroup destruction but the removal is
3836 * asynchronous, so take an extra ref on @css.
3837 */
3838 cfile_css = css_tryget_online_from_dir(cfile.file->f_path.dentry->d_parent,
3839 &memory_cgrp_subsys);
3840 ret = -EINVAL;
3841 if (IS_ERR(cfile_css))
3842 goto out_put_cfile;
3843 if (cfile_css != css) {
3844 css_put(cfile_css);
3845 goto out_put_cfile;
3846 }
3847
3848 ret = event->register_event(memcg, event->eventfd, buf);
3849 if (ret)
3850 goto out_put_css;
3851
3852 efile.file->f_op->poll(efile.file, &event->pt);
3853
3854 spin_lock(&memcg->event_list_lock);
3855 list_add(&event->list, &memcg->event_list);
3856 spin_unlock(&memcg->event_list_lock);
3857
3858 fdput(cfile);
3859 fdput(efile);
3860
3861 return nbytes;
3862
3863out_put_css:
3864 css_put(css);
3865out_put_cfile:
3866 fdput(cfile);
3867out_put_eventfd:
3868 eventfd_ctx_put(event->eventfd);
3869out_put_efile:
3870 fdput(efile);
3871out_kfree:
3872 kfree(event);
3873
3874 return ret;
3875}
3876
3877static struct cftype mem_cgroup_legacy_files[] = {
3878 {
3879 .name = "usage_in_bytes",
3880 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
3881 .read_u64 = mem_cgroup_read_u64,
3882 },
3883 {
3884 .name = "max_usage_in_bytes",
3885 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
3886 .write = mem_cgroup_reset,
3887 .read_u64 = mem_cgroup_read_u64,
3888 },
3889 {
3890 .name = "limit_in_bytes",
3891 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
3892 .write = mem_cgroup_write,
3893 .read_u64 = mem_cgroup_read_u64,
3894 },
3895 {
3896 .name = "soft_limit_in_bytes",
3897 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
3898 .write = mem_cgroup_write,
3899 .read_u64 = mem_cgroup_read_u64,
3900 },
3901 {
3902 .name = "failcnt",
3903 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
3904 .write = mem_cgroup_reset,
3905 .read_u64 = mem_cgroup_read_u64,
3906 },
3907 {
3908 .name = "stat",
3909 .seq_show = memcg_stat_show,
3910 },
3911 {
3912 .name = "force_empty",
3913 .write = mem_cgroup_force_empty_write,
3914 },
3915 {
3916 .name = "use_hierarchy",
3917 .write_u64 = mem_cgroup_hierarchy_write,
3918 .read_u64 = mem_cgroup_hierarchy_read,
3919 },
3920 {
3921 .name = "cgroup.event_control", /* XXX: for compat */
3922 .write = memcg_write_event_control,
3923 .flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE,
3924 },
3925 {
3926 .name = "swappiness",
3927 .read_u64 = mem_cgroup_swappiness_read,
3928 .write_u64 = mem_cgroup_swappiness_write,
3929 },
3930 {
3931 .name = "move_charge_at_immigrate",
3932 .read_u64 = mem_cgroup_move_charge_read,
3933 .write_u64 = mem_cgroup_move_charge_write,
3934 },
3935 {
3936 .name = "oom_control",
3937 .seq_show = mem_cgroup_oom_control_read,
3938 .write_u64 = mem_cgroup_oom_control_write,
3939 .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
3940 },
3941 {
3942 .name = "pressure_level",
3943 },
3944#ifdef CONFIG_NUMA
3945 {
3946 .name = "numa_stat",
3947 .seq_show = memcg_numa_stat_show,
3948 },
3949#endif
3950 {
3951 .name = "kmem.limit_in_bytes",
3952 .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
3953 .write = mem_cgroup_write,
3954 .read_u64 = mem_cgroup_read_u64,
3955 },
3956 {
3957 .name = "kmem.usage_in_bytes",
3958 .private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
3959 .read_u64 = mem_cgroup_read_u64,
3960 },
3961 {
3962 .name = "kmem.failcnt",
3963 .private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT),
3964 .write = mem_cgroup_reset,
3965 .read_u64 = mem_cgroup_read_u64,
3966 },
3967 {
3968 .name = "kmem.max_usage_in_bytes",
3969 .private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE),
3970 .write = mem_cgroup_reset,
3971 .read_u64 = mem_cgroup_read_u64,
3972 },
3973#if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
3974 {
3975 .name = "kmem.slabinfo",
3976 .seq_start = memcg_slab_start,
3977 .seq_next = memcg_slab_next,
3978 .seq_stop = memcg_slab_stop,
3979 .seq_show = memcg_slab_show,
3980 },
3981#endif
3982 {
3983 .name = "kmem.tcp.limit_in_bytes",
3984 .private = MEMFILE_PRIVATE(_TCP, RES_LIMIT),
3985 .write = mem_cgroup_write,
3986 .read_u64 = mem_cgroup_read_u64,
3987 },
3988 {
3989 .name = "kmem.tcp.usage_in_bytes",
3990 .private = MEMFILE_PRIVATE(_TCP, RES_USAGE),
3991 .read_u64 = mem_cgroup_read_u64,
3992 },
3993 {
3994 .name = "kmem.tcp.failcnt",
3995 .private = MEMFILE_PRIVATE(_TCP, RES_FAILCNT),
3996 .write = mem_cgroup_reset,
3997 .read_u64 = mem_cgroup_read_u64,
3998 },
3999 {
4000 .name = "kmem.tcp.max_usage_in_bytes",
4001 .private = MEMFILE_PRIVATE(_TCP, RES_MAX_USAGE),
4002 .write = mem_cgroup_reset,
4003 .read_u64 = mem_cgroup_read_u64,
4004 },
4005 { }, /* terminate */
4006};
4007
4008/*
4009 * Private memory cgroup IDR
4010 *
4011 * Swap-out records and page cache shadow entries need to store memcg
4012 * references in constrained space, so we maintain an ID space that is
4013 * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of
4014 * memory-controlled cgroups to 64k.
4015 *
4016 * However, there usually are many references to the oflline CSS after
4017 * the cgroup has been destroyed, such as page cache or reclaimable
4018 * slab objects, that don't need to hang on to the ID. We want to keep
4019 * those dead CSS from occupying IDs, or we might quickly exhaust the
4020 * relatively small ID space and prevent the creation of new cgroups
4021 * even when there are much fewer than 64k cgroups - possibly none.
4022 *
4023 * Maintain a private 16-bit ID space for memcg, and allow the ID to
4024 * be freed and recycled when it's no longer needed, which is usually
4025 * when the CSS is offlined.
4026 *
4027 * The only exception to that are records of swapped out tmpfs/shmem
4028 * pages that need to be attributed to live ancestors on swapin. But
4029 * those references are manageable from userspace.
4030 */
4031
4032static DEFINE_IDR(mem_cgroup_idr);
4033
4034static void mem_cgroup_id_get_many(struct mem_cgroup *memcg, unsigned int n)
4035{
4036 VM_BUG_ON(atomic_read(&memcg->id.ref) <= 0);
4037 atomic_add(n, &memcg->id.ref);
4038}
4039
4040static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
4041{
4042 VM_BUG_ON(atomic_read(&memcg->id.ref) < n);
4043 if (atomic_sub_and_test(n, &memcg->id.ref)) {
4044 idr_remove(&mem_cgroup_idr, memcg->id.id);
4045 memcg->id.id = 0;
4046
4047 /* Memcg ID pins CSS */
4048 css_put(&memcg->css);
4049 }
4050}
4051
4052static inline void mem_cgroup_id_get(struct mem_cgroup *memcg)
4053{
4054 mem_cgroup_id_get_many(memcg, 1);
4055}
4056
4057static inline void mem_cgroup_id_put(struct mem_cgroup *memcg)
4058{
4059 mem_cgroup_id_put_many(memcg, 1);
4060}
4061
4062/**
4063 * mem_cgroup_from_id - look up a memcg from a memcg id
4064 * @id: the memcg id to look up
4065 *
4066 * Caller must hold rcu_read_lock().
4067 */
4068struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
4069{
4070 WARN_ON_ONCE(!rcu_read_lock_held());
4071 return idr_find(&mem_cgroup_idr, id);
4072}
4073
4074static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
4075{
4076 struct mem_cgroup_per_node *pn;
4077 int tmp = node;
4078 /*
4079 * This routine is called against possible nodes.
4080 * But it's BUG to call kmalloc() against offline node.
4081 *
4082 * TODO: this routine can waste much memory for nodes which will
4083 * never be onlined. It's better to use memory hotplug callback
4084 * function.
4085 */
4086 if (!node_state(node, N_NORMAL_MEMORY))
4087 tmp = -1;
4088 pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
4089 if (!pn)
4090 return 1;
4091
4092 pn->lruvec_stat_cpu = alloc_percpu(struct lruvec_stat);
4093 if (!pn->lruvec_stat_cpu) {
4094 kfree(pn);
4095 return 1;
4096 }
4097
4098 lruvec_init(&pn->lruvec);
4099 pn->usage_in_excess = 0;
4100 pn->on_tree = false;
4101 pn->memcg = memcg;
4102
4103 memcg->nodeinfo[node] = pn;
4104 return 0;
4105}
4106
4107static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
4108{
4109 struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
4110
4111 if (!pn)
4112 return;
4113
4114 free_percpu(pn->lruvec_stat_cpu);
4115 kfree(pn);
4116}
4117
4118static void __mem_cgroup_free(struct mem_cgroup *memcg)
4119{
4120 int node;
4121
4122 for_each_node(node)
4123 free_mem_cgroup_per_node_info(memcg, node);
4124 free_percpu(memcg->stat_cpu);
4125 kfree(memcg);
4126}
4127
4128static void mem_cgroup_free(struct mem_cgroup *memcg)
4129{
4130 memcg_wb_domain_exit(memcg);
4131 __mem_cgroup_free(memcg);
4132}
4133
4134static struct mem_cgroup *mem_cgroup_alloc(void)
4135{
4136 struct mem_cgroup *memcg;
4137 size_t size;
4138 int node;
4139
4140 size = sizeof(struct mem_cgroup);
4141 size += nr_node_ids * sizeof(struct mem_cgroup_per_node *);
4142
4143 memcg = kzalloc(size, GFP_KERNEL);
4144 if (!memcg)
4145 return NULL;
4146
4147 memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL,
4148 1, MEM_CGROUP_ID_MAX,
4149 GFP_KERNEL);
4150 if (memcg->id.id < 0)
4151 goto fail;
4152
4153 memcg->stat_cpu = alloc_percpu(struct mem_cgroup_stat_cpu);
4154 if (!memcg->stat_cpu)
4155 goto fail;
4156
4157 for_each_node(node)
4158 if (alloc_mem_cgroup_per_node_info(memcg, node))
4159 goto fail;
4160
4161 if (memcg_wb_domain_init(memcg, GFP_KERNEL))
4162 goto fail;
4163
4164 INIT_WORK(&memcg->high_work, high_work_func);
4165 memcg->last_scanned_node = MAX_NUMNODES;
4166 INIT_LIST_HEAD(&memcg->oom_notify);
4167 mutex_init(&memcg->thresholds_lock);
4168 spin_lock_init(&memcg->move_lock);
4169 vmpressure_init(&memcg->vmpressure);
4170 INIT_LIST_HEAD(&memcg->event_list);
4171 spin_lock_init(&memcg->event_list_lock);
4172 memcg->socket_pressure = jiffies;
4173#ifndef CONFIG_SLOB
4174 memcg->kmemcg_id = -1;
4175#endif
4176#ifdef CONFIG_CGROUP_WRITEBACK
4177 INIT_LIST_HEAD(&memcg->cgwb_list);
4178#endif
4179 idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
4180 return memcg;
4181fail:
4182 if (memcg->id.id > 0)
4183 idr_remove(&mem_cgroup_idr, memcg->id.id);
4184 __mem_cgroup_free(memcg);
4185 return NULL;
4186}
4187
4188static struct cgroup_subsys_state * __ref
4189mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
4190{
4191 struct mem_cgroup *parent = mem_cgroup_from_css(parent_css);
4192 struct mem_cgroup *memcg;
4193 long error = -ENOMEM;
4194
4195 memcg = mem_cgroup_alloc();
4196 if (!memcg)
4197 return ERR_PTR(error);
4198
4199 memcg->high = PAGE_COUNTER_MAX;
4200 memcg->soft_limit = PAGE_COUNTER_MAX;
4201 if (parent) {
4202 memcg->swappiness = mem_cgroup_swappiness(parent);
4203 memcg->oom_kill_disable = parent->oom_kill_disable;
4204 }
4205 if (parent && parent->use_hierarchy) {
4206 memcg->use_hierarchy = true;
4207 page_counter_init(&memcg->memory, &parent->memory);
4208 page_counter_init(&memcg->swap, &parent->swap);
4209 page_counter_init(&memcg->memsw, &parent->memsw);
4210 page_counter_init(&memcg->kmem, &parent->kmem);
4211 page_counter_init(&memcg->tcpmem, &parent->tcpmem);
4212 } else {
4213 page_counter_init(&memcg->memory, NULL);
4214 page_counter_init(&memcg->swap, NULL);
4215 page_counter_init(&memcg->memsw, NULL);
4216 page_counter_init(&memcg->kmem, NULL);
4217 page_counter_init(&memcg->tcpmem, NULL);
4218 /*
4219 * Deeper hierachy with use_hierarchy == false doesn't make
4220 * much sense so let cgroup subsystem know about this
4221 * unfortunate state in our controller.
4222 */
4223 if (parent != root_mem_cgroup)
4224 memory_cgrp_subsys.broken_hierarchy = true;
4225 }
4226
4227 /* The following stuff does not apply to the root */
4228 if (!parent) {
4229 root_mem_cgroup = memcg;
4230 return &memcg->css;
4231 }
4232
4233 error = memcg_online_kmem(memcg);
4234 if (error)
4235 goto fail;
4236
4237 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
4238 static_branch_inc(&memcg_sockets_enabled_key);
4239
4240 return &memcg->css;
4241fail:
4242 mem_cgroup_free(memcg);
4243 return ERR_PTR(-ENOMEM);
4244}
4245
4246static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
4247{
4248 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4249
4250 /* Online state pins memcg ID, memcg ID pins CSS */
4251 atomic_set(&memcg->id.ref, 1);
4252 css_get(css);
4253 return 0;
4254}
4255
4256static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
4257{
4258 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4259 struct mem_cgroup_event *event, *tmp;
4260
4261 /*
4262 * Unregister events and notify userspace.
4263 * Notify userspace about cgroup removing only after rmdir of cgroup
4264 * directory to avoid race between userspace and kernelspace.
4265 */
4266 spin_lock(&memcg->event_list_lock);
4267 list_for_each_entry_safe(event, tmp, &memcg->event_list, list) {
4268 list_del_init(&event->list);
4269 schedule_work(&event->remove);
4270 }
4271 spin_unlock(&memcg->event_list_lock);
4272
4273 memcg->low = 0;
4274
4275 memcg_offline_kmem(memcg);
4276 wb_memcg_offline(memcg);
4277
4278 mem_cgroup_id_put(memcg);
4279}
4280
4281static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
4282{
4283 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4284
4285 invalidate_reclaim_iterators(memcg);
4286}
4287
4288static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
4289{
4290 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4291
4292 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
4293 static_branch_dec(&memcg_sockets_enabled_key);
4294
4295 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active)
4296 static_branch_dec(&memcg_sockets_enabled_key);
4297
4298 vmpressure_cleanup(&memcg->vmpressure);
4299 cancel_work_sync(&memcg->high_work);
4300 mem_cgroup_remove_from_trees(memcg);
4301 memcg_free_kmem(memcg);
4302 mem_cgroup_free(memcg);
4303}
4304
4305/**
4306 * mem_cgroup_css_reset - reset the states of a mem_cgroup
4307 * @css: the target css
4308 *
4309 * Reset the states of the mem_cgroup associated with @css. This is
4310 * invoked when the userland requests disabling on the default hierarchy
4311 * but the memcg is pinned through dependency. The memcg should stop
4312 * applying policies and should revert to the vanilla state as it may be
4313 * made visible again.
4314 *
4315 * The current implementation only resets the essential configurations.
4316 * This needs to be expanded to cover all the visible parts.
4317 */
4318static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
4319{
4320 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4321
4322 page_counter_limit(&memcg->memory, PAGE_COUNTER_MAX);
4323 page_counter_limit(&memcg->swap, PAGE_COUNTER_MAX);
4324 page_counter_limit(&memcg->memsw, PAGE_COUNTER_MAX);
4325 page_counter_limit(&memcg->kmem, PAGE_COUNTER_MAX);
4326 page_counter_limit(&memcg->tcpmem, PAGE_COUNTER_MAX);
4327 memcg->low = 0;
4328 memcg->high = PAGE_COUNTER_MAX;
4329 memcg->soft_limit = PAGE_COUNTER_MAX;
4330 memcg_wb_domain_size_changed(memcg);
4331}
4332
4333#ifdef CONFIG_MMU
4334/* Handlers for move charge at task migration. */
4335static int mem_cgroup_do_precharge(unsigned long count)
4336{
4337 int ret;
4338
4339 /* Try a single bulk charge without reclaim first, kswapd may wake */
4340 ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count);
4341 if (!ret) {
4342 mc.precharge += count;
4343 return ret;
4344 }
4345
4346 /* Try charges one by one with reclaim, but do not retry */
4347 while (count--) {
4348 ret = try_charge(mc.to, GFP_KERNEL | __GFP_NORETRY, 1);
4349 if (ret)
4350 return ret;
4351 mc.precharge++;
4352 cond_resched();
4353 }
4354 return 0;
4355}
4356
4357union mc_target {
4358 struct page *page;
4359 swp_entry_t ent;
4360};
4361
4362enum mc_target_type {
4363 MC_TARGET_NONE = 0,
4364 MC_TARGET_PAGE,
4365 MC_TARGET_SWAP,
4366 MC_TARGET_DEVICE,
4367};
4368
4369static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
4370 unsigned long addr, pte_t ptent)
4371{
4372 struct page *page = _vm_normal_page(vma, addr, ptent, true);
4373
4374 if (!page || !page_mapped(page))
4375 return NULL;
4376 if (PageAnon(page)) {
4377 if (!(mc.flags & MOVE_ANON))
4378 return NULL;
4379 } else {
4380 if (!(mc.flags & MOVE_FILE))
4381 return NULL;
4382 }
4383 if (!get_page_unless_zero(page))
4384 return NULL;
4385
4386 return page;
4387}
4388
4389#if defined(CONFIG_SWAP) || defined(CONFIG_DEVICE_PRIVATE)
4390static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
4391 pte_t ptent, swp_entry_t *entry)
4392{
4393 struct page *page = NULL;
4394 swp_entry_t ent = pte_to_swp_entry(ptent);
4395
4396 if (!(mc.flags & MOVE_ANON) || non_swap_entry(ent))
4397 return NULL;
4398
4399 /*
4400 * Handle MEMORY_DEVICE_PRIVATE which are ZONE_DEVICE page belonging to
4401 * a device and because they are not accessible by CPU they are store
4402 * as special swap entry in the CPU page table.
4403 */
4404 if (is_device_private_entry(ent)) {
4405 page = device_private_entry_to_page(ent);
4406 /*
4407 * MEMORY_DEVICE_PRIVATE means ZONE_DEVICE page and which have
4408 * a refcount of 1 when free (unlike normal page)
4409 */
4410 if (!page_ref_add_unless(page, 1, 1))
4411 return NULL;
4412 return page;
4413 }
4414
4415 /*
4416 * Because lookup_swap_cache() updates some statistics counter,
4417 * we call find_get_page() with swapper_space directly.
4418 */
4419 page = find_get_page(swap_address_space(ent), swp_offset(ent));
4420 if (do_memsw_account())
4421 entry->val = ent.val;
4422
4423 return page;
4424}
4425#else
4426static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
4427 pte_t ptent, swp_entry_t *entry)
4428{
4429 return NULL;
4430}
4431#endif
4432
4433static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
4434 unsigned long addr, pte_t ptent, swp_entry_t *entry)
4435{
4436 struct page *page = NULL;
4437 struct address_space *mapping;
4438 pgoff_t pgoff;
4439
4440 if (!vma->vm_file) /* anonymous vma */
4441 return NULL;
4442 if (!(mc.flags & MOVE_FILE))
4443 return NULL;
4444
4445 mapping = vma->vm_file->f_mapping;
4446 pgoff = linear_page_index(vma, addr);
4447
4448 /* page is moved even if it's not RSS of this task(page-faulted). */
4449#ifdef CONFIG_SWAP
4450 /* shmem/tmpfs may report page out on swap: account for that too. */
4451 if (shmem_mapping(mapping)) {
4452 page = find_get_entry(mapping, pgoff);
4453 if (radix_tree_exceptional_entry(page)) {
4454 swp_entry_t swp = radix_to_swp_entry(page);
4455 if (do_memsw_account())
4456 *entry = swp;
4457 page = find_get_page(swap_address_space(swp),
4458 swp_offset(swp));
4459 }
4460 } else
4461 page = find_get_page(mapping, pgoff);
4462#else
4463 page = find_get_page(mapping, pgoff);
4464#endif
4465 return page;
4466}
4467
4468/**
4469 * mem_cgroup_move_account - move account of the page
4470 * @page: the page
4471 * @compound: charge the page as compound or small page
4472 * @from: mem_cgroup which the page is moved from.
4473 * @to: mem_cgroup which the page is moved to. @from != @to.
4474 *
4475 * The caller must make sure the page is not on LRU (isolate_page() is useful.)
4476 *
4477 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
4478 * from old cgroup.
4479 */
4480static int mem_cgroup_move_account(struct page *page,
4481 bool compound,
4482 struct mem_cgroup *from,
4483 struct mem_cgroup *to)
4484{
4485 unsigned long flags;
4486 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
4487 int ret;
4488 bool anon;
4489
4490 VM_BUG_ON(from == to);
4491 VM_BUG_ON_PAGE(PageLRU(page), page);
4492 VM_BUG_ON(compound && !PageTransHuge(page));
4493
4494 /*
4495 * Prevent mem_cgroup_migrate() from looking at
4496 * page->mem_cgroup of its source page while we change it.
4497 */
4498 ret = -EBUSY;
4499 if (!trylock_page(page))
4500 goto out;
4501
4502 ret = -EINVAL;
4503 if (page->mem_cgroup != from)
4504 goto out_unlock;
4505
4506 anon = PageAnon(page);
4507
4508 spin_lock_irqsave(&from->move_lock, flags);
4509
4510 if (!anon && page_mapped(page)) {
4511 __mod_memcg_state(from, NR_FILE_MAPPED, -nr_pages);
4512 __mod_memcg_state(to, NR_FILE_MAPPED, nr_pages);
4513 }
4514
4515 /*
4516 * move_lock grabbed above and caller set from->moving_account, so
4517 * mod_memcg_page_state will serialize updates to PageDirty.
4518 * So mapping should be stable for dirty pages.
4519 */
4520 if (!anon && PageDirty(page)) {
4521 struct address_space *mapping = page_mapping(page);
4522
4523 if (mapping_cap_account_dirty(mapping)) {
4524 __mod_memcg_state(from, NR_FILE_DIRTY, -nr_pages);
4525 __mod_memcg_state(to, NR_FILE_DIRTY, nr_pages);
4526 }
4527 }
4528
4529 if (PageWriteback(page)) {
4530 __mod_memcg_state(from, NR_WRITEBACK, -nr_pages);
4531 __mod_memcg_state(to, NR_WRITEBACK, nr_pages);
4532 }
4533
4534 /*
4535 * It is safe to change page->mem_cgroup here because the page
4536 * is referenced, charged, and isolated - we can't race with
4537 * uncharging, charging, migration, or LRU putback.
4538 */
4539
4540 /* caller should have done css_get */
4541 page->mem_cgroup = to;
4542 spin_unlock_irqrestore(&from->move_lock, flags);
4543
4544 ret = 0;
4545
4546 local_irq_disable();
4547 mem_cgroup_charge_statistics(to, page, compound, nr_pages);
4548 memcg_check_events(to, page);
4549 mem_cgroup_charge_statistics(from, page, compound, -nr_pages);
4550 memcg_check_events(from, page);
4551 local_irq_enable();
4552out_unlock:
4553 unlock_page(page);
4554out:
4555 return ret;
4556}
4557
4558/**
4559 * get_mctgt_type - get target type of moving charge
4560 * @vma: the vma the pte to be checked belongs
4561 * @addr: the address corresponding to the pte to be checked
4562 * @ptent: the pte to be checked
4563 * @target: the pointer the target page or swap ent will be stored(can be NULL)
4564 *
4565 * Returns
4566 * 0(MC_TARGET_NONE): if the pte is not a target for move charge.
4567 * 1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
4568 * move charge. if @target is not NULL, the page is stored in target->page
4569 * with extra refcnt got(Callers should handle it).
4570 * 2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
4571 * target for charge migration. if @target is not NULL, the entry is stored
4572 * in target->ent.
4573 * 3(MC_TARGET_DEVICE): like MC_TARGET_PAGE but page is MEMORY_DEVICE_PUBLIC
4574 * or MEMORY_DEVICE_PRIVATE (so ZONE_DEVICE page and thus not on the lru).
4575 * For now we such page is charge like a regular page would be as for all
4576 * intent and purposes it is just special memory taking the place of a
4577 * regular page.
4578 *
4579 * See Documentations/vm/hmm.txt and include/linux/hmm.h
4580 *
4581 * Called with pte lock held.
4582 */
4583
4584static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
4585 unsigned long addr, pte_t ptent, union mc_target *target)
4586{
4587 struct page *page = NULL;
4588 enum mc_target_type ret = MC_TARGET_NONE;
4589 swp_entry_t ent = { .val = 0 };
4590
4591 if (pte_present(ptent))
4592 page = mc_handle_present_pte(vma, addr, ptent);
4593 else if (is_swap_pte(ptent))
4594 page = mc_handle_swap_pte(vma, ptent, &ent);
4595 else if (pte_none(ptent))
4596 page = mc_handle_file_pte(vma, addr, ptent, &ent);
4597
4598 if (!page && !ent.val)
4599 return ret;
4600 if (page) {
4601 /*
4602 * Do only loose check w/o serialization.
4603 * mem_cgroup_move_account() checks the page is valid or
4604 * not under LRU exclusion.
4605 */
4606 if (page->mem_cgroup == mc.from) {
4607 ret = MC_TARGET_PAGE;
4608 if (is_device_private_page(page) ||
4609 is_device_public_page(page))
4610 ret = MC_TARGET_DEVICE;
4611 if (target)
4612 target->page = page;
4613 }
4614 if (!ret || !target)
4615 put_page(page);
4616 }
4617 /*
4618 * There is a swap entry and a page doesn't exist or isn't charged.
4619 * But we cannot move a tail-page in a THP.
4620 */
4621 if (ent.val && !ret && (!page || !PageTransCompound(page)) &&
4622 mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) {
4623 ret = MC_TARGET_SWAP;
4624 if (target)
4625 target->ent = ent;
4626 }
4627 return ret;
4628}
4629
4630#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4631/*
4632 * We don't consider PMD mapped swapping or file mapped pages because THP does
4633 * not support them for now.
4634 * Caller should make sure that pmd_trans_huge(pmd) is true.
4635 */
4636static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
4637 unsigned long addr, pmd_t pmd, union mc_target *target)
4638{
4639 struct page *page = NULL;
4640 enum mc_target_type ret = MC_TARGET_NONE;
4641
4642 if (unlikely(is_swap_pmd(pmd))) {
4643 VM_BUG_ON(thp_migration_supported() &&
4644 !is_pmd_migration_entry(pmd));
4645 return ret;
4646 }
4647 page = pmd_page(pmd);
4648 VM_BUG_ON_PAGE(!page || !PageHead(page), page);
4649 if (!(mc.flags & MOVE_ANON))
4650 return ret;
4651 if (page->mem_cgroup == mc.from) {
4652 ret = MC_TARGET_PAGE;
4653 if (target) {
4654 get_page(page);
4655 target->page = page;
4656 }
4657 }
4658 return ret;
4659}
4660#else
4661static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
4662 unsigned long addr, pmd_t pmd, union mc_target *target)
4663{
4664 return MC_TARGET_NONE;
4665}
4666#endif
4667
4668static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
4669 unsigned long addr, unsigned long end,
4670 struct mm_walk *walk)
4671{
4672 struct vm_area_struct *vma = walk->vma;
4673 pte_t *pte;
4674 spinlock_t *ptl;
4675
4676 ptl = pmd_trans_huge_lock(pmd, vma);
4677 if (ptl) {
4678 /*
4679 * Note their can not be MC_TARGET_DEVICE for now as we do not
4680 * support transparent huge page with MEMORY_DEVICE_PUBLIC or
4681 * MEMORY_DEVICE_PRIVATE but this might change.
4682 */
4683 if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
4684 mc.precharge += HPAGE_PMD_NR;
4685 spin_unlock(ptl);
4686 return 0;
4687 }
4688
4689 if (pmd_trans_unstable(pmd))
4690 return 0;
4691 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
4692 for (; addr != end; pte++, addr += PAGE_SIZE)
4693 if (get_mctgt_type(vma, addr, *pte, NULL))
4694 mc.precharge++; /* increment precharge temporarily */
4695 pte_unmap_unlock(pte - 1, ptl);
4696 cond_resched();
4697
4698 return 0;
4699}
4700
4701static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
4702{
4703 unsigned long precharge;
4704
4705 struct mm_walk mem_cgroup_count_precharge_walk = {
4706 .pmd_entry = mem_cgroup_count_precharge_pte_range,
4707 .mm = mm,
4708 };
4709 down_read(&mm->mmap_sem);
4710 walk_page_range(0, mm->highest_vm_end,
4711 &mem_cgroup_count_precharge_walk);
4712 up_read(&mm->mmap_sem);
4713
4714 precharge = mc.precharge;
4715 mc.precharge = 0;
4716
4717 return precharge;
4718}
4719
4720static int mem_cgroup_precharge_mc(struct mm_struct *mm)
4721{
4722 unsigned long precharge = mem_cgroup_count_precharge(mm);
4723
4724 VM_BUG_ON(mc.moving_task);
4725 mc.moving_task = current;
4726 return mem_cgroup_do_precharge(precharge);
4727}
4728
4729/* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
4730static void __mem_cgroup_clear_mc(void)
4731{
4732 struct mem_cgroup *from = mc.from;
4733 struct mem_cgroup *to = mc.to;
4734
4735 /* we must uncharge all the leftover precharges from mc.to */
4736 if (mc.precharge) {
4737 cancel_charge(mc.to, mc.precharge);
4738 mc.precharge = 0;
4739 }
4740 /*
4741 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
4742 * we must uncharge here.
4743 */
4744 if (mc.moved_charge) {
4745 cancel_charge(mc.from, mc.moved_charge);
4746 mc.moved_charge = 0;
4747 }
4748 /* we must fixup refcnts and charges */
4749 if (mc.moved_swap) {
4750 /* uncharge swap account from the old cgroup */
4751 if (!mem_cgroup_is_root(mc.from))
4752 page_counter_uncharge(&mc.from->memsw, mc.moved_swap);
4753
4754 mem_cgroup_id_put_many(mc.from, mc.moved_swap);
4755
4756 /*
4757 * we charged both to->memory and to->memsw, so we
4758 * should uncharge to->memory.
4759 */
4760 if (!mem_cgroup_is_root(mc.to))
4761 page_counter_uncharge(&mc.to->memory, mc.moved_swap);
4762
4763 mem_cgroup_id_get_many(mc.to, mc.moved_swap);
4764 css_put_many(&mc.to->css, mc.moved_swap);
4765
4766 mc.moved_swap = 0;
4767 }
4768 memcg_oom_recover(from);
4769 memcg_oom_recover(to);
4770 wake_up_all(&mc.waitq);
4771}
4772
4773static void mem_cgroup_clear_mc(void)
4774{
4775 struct mm_struct *mm = mc.mm;
4776
4777 /*
4778 * we must clear moving_task before waking up waiters at the end of
4779 * task migration.
4780 */
4781 mc.moving_task = NULL;
4782 __mem_cgroup_clear_mc();
4783 spin_lock(&mc.lock);
4784 mc.from = NULL;
4785 mc.to = NULL;
4786 mc.mm = NULL;
4787 spin_unlock(&mc.lock);
4788
4789 mmput(mm);
4790}
4791
4792static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
4793{
4794 struct cgroup_subsys_state *css;
4795 struct mem_cgroup *memcg = NULL; /* unneeded init to make gcc happy */
4796 struct mem_cgroup *from;
4797 struct task_struct *leader, *p;
4798 struct mm_struct *mm;
4799 unsigned long move_flags;
4800 int ret = 0;
4801
4802 /* charge immigration isn't supported on the default hierarchy */
4803 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
4804 return 0;
4805
4806 /*
4807 * Multi-process migrations only happen on the default hierarchy
4808 * where charge immigration is not used. Perform charge
4809 * immigration if @tset contains a leader and whine if there are
4810 * multiple.
4811 */
4812 p = NULL;
4813 cgroup_taskset_for_each_leader(leader, css, tset) {
4814 WARN_ON_ONCE(p);
4815 p = leader;
4816 memcg = mem_cgroup_from_css(css);
4817 }
4818 if (!p)
4819 return 0;
4820
4821 /*
4822 * We are now commited to this value whatever it is. Changes in this
4823 * tunable will only affect upcoming migrations, not the current one.
4824 * So we need to save it, and keep it going.
4825 */
4826 move_flags = READ_ONCE(memcg->move_charge_at_immigrate);
4827 if (!move_flags)
4828 return 0;
4829
4830 from = mem_cgroup_from_task(p);
4831
4832 VM_BUG_ON(from == memcg);
4833
4834 mm = get_task_mm(p);
4835 if (!mm)
4836 return 0;
4837 /* We move charges only when we move a owner of the mm */
4838 if (mm->owner == p) {
4839 VM_BUG_ON(mc.from);
4840 VM_BUG_ON(mc.to);
4841 VM_BUG_ON(mc.precharge);
4842 VM_BUG_ON(mc.moved_charge);
4843 VM_BUG_ON(mc.moved_swap);
4844
4845 spin_lock(&mc.lock);
4846 mc.mm = mm;
4847 mc.from = from;
4848 mc.to = memcg;
4849 mc.flags = move_flags;
4850 spin_unlock(&mc.lock);
4851 /* We set mc.moving_task later */
4852
4853 ret = mem_cgroup_precharge_mc(mm);
4854 if (ret)
4855 mem_cgroup_clear_mc();
4856 } else {
4857 mmput(mm);
4858 }
4859 return ret;
4860}
4861
4862static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
4863{
4864 if (mc.to)
4865 mem_cgroup_clear_mc();
4866}
4867
4868static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
4869 unsigned long addr, unsigned long end,
4870 struct mm_walk *walk)
4871{
4872 int ret = 0;
4873 struct vm_area_struct *vma = walk->vma;
4874 pte_t *pte;
4875 spinlock_t *ptl;
4876 enum mc_target_type target_type;
4877 union mc_target target;
4878 struct page *page;
4879
4880 ptl = pmd_trans_huge_lock(pmd, vma);
4881 if (ptl) {
4882 if (mc.precharge < HPAGE_PMD_NR) {
4883 spin_unlock(ptl);
4884 return 0;
4885 }
4886 target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
4887 if (target_type == MC_TARGET_PAGE) {
4888 page = target.page;
4889 if (!isolate_lru_page(page)) {
4890 if (!mem_cgroup_move_account(page, true,
4891 mc.from, mc.to)) {
4892 mc.precharge -= HPAGE_PMD_NR;
4893 mc.moved_charge += HPAGE_PMD_NR;
4894 }
4895 putback_lru_page(page);
4896 }
4897 put_page(page);
4898 } else if (target_type == MC_TARGET_DEVICE) {
4899 page = target.page;
4900 if (!mem_cgroup_move_account(page, true,
4901 mc.from, mc.to)) {
4902 mc.precharge -= HPAGE_PMD_NR;
4903 mc.moved_charge += HPAGE_PMD_NR;
4904 }
4905 put_page(page);
4906 }
4907 spin_unlock(ptl);
4908 return 0;
4909 }
4910
4911 if (pmd_trans_unstable(pmd))
4912 return 0;
4913retry:
4914 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
4915 for (; addr != end; addr += PAGE_SIZE) {
4916 pte_t ptent = *(pte++);
4917 bool device = false;
4918 swp_entry_t ent;
4919
4920 if (!mc.precharge)
4921 break;
4922
4923 switch (get_mctgt_type(vma, addr, ptent, &target)) {
4924 case MC_TARGET_DEVICE:
4925 device = true;
4926 /* fall through */
4927 case MC_TARGET_PAGE:
4928 page = target.page;
4929 /*
4930 * We can have a part of the split pmd here. Moving it
4931 * can be done but it would be too convoluted so simply
4932 * ignore such a partial THP and keep it in original
4933 * memcg. There should be somebody mapping the head.
4934 */
4935 if (PageTransCompound(page))
4936 goto put;
4937 if (!device && isolate_lru_page(page))
4938 goto put;
4939 if (!mem_cgroup_move_account(page, false,
4940 mc.from, mc.to)) {
4941 mc.precharge--;
4942 /* we uncharge from mc.from later. */
4943 mc.moved_charge++;
4944 }
4945 if (!device)
4946 putback_lru_page(page);
4947put: /* get_mctgt_type() gets the page */
4948 put_page(page);
4949 break;
4950 case MC_TARGET_SWAP:
4951 ent = target.ent;
4952 if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
4953 mc.precharge--;
4954 /* we fixup refcnts and charges later. */
4955 mc.moved_swap++;
4956 }
4957 break;
4958 default:
4959 break;
4960 }
4961 }
4962 pte_unmap_unlock(pte - 1, ptl);
4963 cond_resched();
4964
4965 if (addr != end) {
4966 /*
4967 * We have consumed all precharges we got in can_attach().
4968 * We try charge one by one, but don't do any additional
4969 * charges to mc.to if we have failed in charge once in attach()
4970 * phase.
4971 */
4972 ret = mem_cgroup_do_precharge(1);
4973 if (!ret)
4974 goto retry;
4975 }
4976
4977 return ret;
4978}
4979
4980static void mem_cgroup_move_charge(void)
4981{
4982 struct mm_walk mem_cgroup_move_charge_walk = {
4983 .pmd_entry = mem_cgroup_move_charge_pte_range,
4984 .mm = mc.mm,
4985 };
4986
4987 lru_add_drain_all();
4988 /*
4989 * Signal lock_page_memcg() to take the memcg's move_lock
4990 * while we're moving its pages to another memcg. Then wait
4991 * for already started RCU-only updates to finish.
4992 */
4993 atomic_inc(&mc.from->moving_account);
4994 synchronize_rcu();
4995retry:
4996 if (unlikely(!down_read_trylock(&mc.mm->mmap_sem))) {
4997 /*
4998 * Someone who are holding the mmap_sem might be waiting in
4999 * waitq. So we cancel all extra charges, wake up all waiters,
5000 * and retry. Because we cancel precharges, we might not be able
5001 * to move enough charges, but moving charge is a best-effort
5002 * feature anyway, so it wouldn't be a big problem.
5003 */
5004 __mem_cgroup_clear_mc();
5005 cond_resched();
5006 goto retry;
5007 }
5008 /*
5009 * When we have consumed all precharges and failed in doing
5010 * additional charge, the page walk just aborts.
5011 */
5012 walk_page_range(0, mc.mm->highest_vm_end, &mem_cgroup_move_charge_walk);
5013
5014 up_read(&mc.mm->mmap_sem);
5015 atomic_dec(&mc.from->moving_account);
5016}
5017
5018static void mem_cgroup_move_task(void)
5019{
5020 if (mc.to) {
5021 mem_cgroup_move_charge();
5022 mem_cgroup_clear_mc();
5023 }
5024}
5025#else /* !CONFIG_MMU */
5026static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
5027{
5028 return 0;
5029}
5030static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
5031{
5032}
5033static void mem_cgroup_move_task(void)
5034{
5035}
5036#endif
5037
5038/*
5039 * Cgroup retains root cgroups across [un]mount cycles making it necessary
5040 * to verify whether we're attached to the default hierarchy on each mount
5041 * attempt.
5042 */
5043static void mem_cgroup_bind(struct cgroup_subsys_state *root_css)
5044{
5045 /*
5046 * use_hierarchy is forced on the default hierarchy. cgroup core
5047 * guarantees that @root doesn't have any children, so turning it
5048 * on for the root memcg is enough.
5049 */
5050 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
5051 root_mem_cgroup->use_hierarchy = true;
5052 else
5053 root_mem_cgroup->use_hierarchy = false;
5054}
5055
5056static u64 memory_current_read(struct cgroup_subsys_state *css,
5057 struct cftype *cft)
5058{
5059 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5060
5061 return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE;
5062}
5063
5064static int memory_low_show(struct seq_file *m, void *v)
5065{
5066 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5067 unsigned long low = READ_ONCE(memcg->low);
5068
5069 if (low == PAGE_COUNTER_MAX)
5070 seq_puts(m, "max\n");
5071 else
5072 seq_printf(m, "%llu\n", (u64)low * PAGE_SIZE);
5073
5074 return 0;
5075}
5076
5077static ssize_t memory_low_write(struct kernfs_open_file *of,
5078 char *buf, size_t nbytes, loff_t off)
5079{
5080 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5081 unsigned long low;
5082 int err;
5083
5084 buf = strstrip(buf);
5085 err = page_counter_memparse(buf, "max", &low);
5086 if (err)
5087 return err;
5088
5089 memcg->low = low;
5090
5091 return nbytes;
5092}
5093
5094static int memory_high_show(struct seq_file *m, void *v)
5095{
5096 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5097 unsigned long high = READ_ONCE(memcg->high);
5098
5099 if (high == PAGE_COUNTER_MAX)
5100 seq_puts(m, "max\n");
5101 else
5102 seq_printf(m, "%llu\n", (u64)high * PAGE_SIZE);
5103
5104 return 0;
5105}
5106
5107static ssize_t memory_high_write(struct kernfs_open_file *of,
5108 char *buf, size_t nbytes, loff_t off)
5109{
5110 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5111 unsigned long nr_pages;
5112 unsigned long high;
5113 int err;
5114
5115 buf = strstrip(buf);
5116 err = page_counter_memparse(buf, "max", &high);
5117 if (err)
5118 return err;
5119
5120 memcg->high = high;
5121
5122 nr_pages = page_counter_read(&memcg->memory);
5123 if (nr_pages > high)
5124 try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
5125 GFP_KERNEL, true);
5126
5127 memcg_wb_domain_size_changed(memcg);
5128 return nbytes;
5129}
5130
5131static int memory_max_show(struct seq_file *m, void *v)
5132{
5133 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5134 unsigned long max = READ_ONCE(memcg->memory.limit);
5135
5136 if (max == PAGE_COUNTER_MAX)
5137 seq_puts(m, "max\n");
5138 else
5139 seq_printf(m, "%llu\n", (u64)max * PAGE_SIZE);
5140
5141 return 0;
5142}
5143
5144static ssize_t memory_max_write(struct kernfs_open_file *of,
5145 char *buf, size_t nbytes, loff_t off)
5146{
5147 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5148 unsigned int nr_reclaims = MEM_CGROUP_RECLAIM_RETRIES;
5149 bool drained = false;
5150 unsigned long max;
5151 int err;
5152
5153 buf = strstrip(buf);
5154 err = page_counter_memparse(buf, "max", &max);
5155 if (err)
5156 return err;
5157
5158 xchg(&memcg->memory.limit, max);
5159
5160 for (;;) {
5161 unsigned long nr_pages = page_counter_read(&memcg->memory);
5162
5163 if (nr_pages <= max)
5164 break;
5165
5166 if (signal_pending(current)) {
5167 err = -EINTR;
5168 break;
5169 }
5170
5171 if (!drained) {
5172 drain_all_stock(memcg);
5173 drained = true;
5174 continue;
5175 }
5176
5177 if (nr_reclaims) {
5178 if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max,
5179 GFP_KERNEL, true))
5180 nr_reclaims--;
5181 continue;
5182 }
5183
5184 memcg_memory_event(memcg, MEMCG_OOM);
5185 if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
5186 break;
5187 }
5188
5189 memcg_wb_domain_size_changed(memcg);
5190 return nbytes;
5191}
5192
5193static int memory_events_show(struct seq_file *m, void *v)
5194{
5195 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5196
5197 seq_printf(m, "low %lu\n",
5198 atomic_long_read(&memcg->memory_events[MEMCG_LOW]));
5199 seq_printf(m, "high %lu\n",
5200 atomic_long_read(&memcg->memory_events[MEMCG_HIGH]));
5201 seq_printf(m, "max %lu\n",
5202 atomic_long_read(&memcg->memory_events[MEMCG_MAX]));
5203 seq_printf(m, "oom %lu\n",
5204 atomic_long_read(&memcg->memory_events[MEMCG_OOM]));
5205 seq_printf(m, "oom_kill %lu\n", memcg_sum_events(memcg, OOM_KILL));
5206
5207 return 0;
5208}
5209
5210static int memory_stat_show(struct seq_file *m, void *v)
5211{
5212 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5213 unsigned long stat[MEMCG_NR_STAT];
5214 unsigned long events[NR_VM_EVENT_ITEMS];
5215 int i;
5216
5217 /*
5218 * Provide statistics on the state of the memory subsystem as
5219 * well as cumulative event counters that show past behavior.
5220 *
5221 * This list is ordered following a combination of these gradients:
5222 * 1) generic big picture -> specifics and details
5223 * 2) reflecting userspace activity -> reflecting kernel heuristics
5224 *
5225 * Current memory state:
5226 */
5227
5228 tree_stat(memcg, stat);
5229 tree_events(memcg, events);
5230
5231 seq_printf(m, "anon %llu\n",
5232 (u64)stat[MEMCG_RSS] * PAGE_SIZE);
5233 seq_printf(m, "file %llu\n",
5234 (u64)stat[MEMCG_CACHE] * PAGE_SIZE);
5235 seq_printf(m, "kernel_stack %llu\n",
5236 (u64)stat[MEMCG_KERNEL_STACK_KB] * 1024);
5237 seq_printf(m, "slab %llu\n",
5238 (u64)(stat[NR_SLAB_RECLAIMABLE] +
5239 stat[NR_SLAB_UNRECLAIMABLE]) * PAGE_SIZE);
5240 seq_printf(m, "sock %llu\n",
5241 (u64)stat[MEMCG_SOCK] * PAGE_SIZE);
5242
5243 seq_printf(m, "shmem %llu\n",
5244 (u64)stat[NR_SHMEM] * PAGE_SIZE);
5245 seq_printf(m, "file_mapped %llu\n",
5246 (u64)stat[NR_FILE_MAPPED] * PAGE_SIZE);
5247 seq_printf(m, "file_dirty %llu\n",
5248 (u64)stat[NR_FILE_DIRTY] * PAGE_SIZE);
5249 seq_printf(m, "file_writeback %llu\n",
5250 (u64)stat[NR_WRITEBACK] * PAGE_SIZE);
5251
5252 for (i = 0; i < NR_LRU_LISTS; i++) {
5253 struct mem_cgroup *mi;
5254 unsigned long val = 0;
5255
5256 for_each_mem_cgroup_tree(mi, memcg)
5257 val += mem_cgroup_nr_lru_pages(mi, BIT(i));
5258 seq_printf(m, "%s %llu\n",
5259 mem_cgroup_lru_names[i], (u64)val * PAGE_SIZE);
5260 }
5261
5262 seq_printf(m, "slab_reclaimable %llu\n",
5263 (u64)stat[NR_SLAB_RECLAIMABLE] * PAGE_SIZE);
5264 seq_printf(m, "slab_unreclaimable %llu\n",
5265 (u64)stat[NR_SLAB_UNRECLAIMABLE] * PAGE_SIZE);
5266
5267 /* Accumulated memory events */
5268
5269 seq_printf(m, "pgfault %lu\n", events[PGFAULT]);
5270 seq_printf(m, "pgmajfault %lu\n", events[PGMAJFAULT]);
5271
5272 seq_printf(m, "pgrefill %lu\n", events[PGREFILL]);
5273 seq_printf(m, "pgscan %lu\n", events[PGSCAN_KSWAPD] +
5274 events[PGSCAN_DIRECT]);
5275 seq_printf(m, "pgsteal %lu\n", events[PGSTEAL_KSWAPD] +
5276 events[PGSTEAL_DIRECT]);
5277 seq_printf(m, "pgactivate %lu\n", events[PGACTIVATE]);
5278 seq_printf(m, "pgdeactivate %lu\n", events[PGDEACTIVATE]);
5279 seq_printf(m, "pglazyfree %lu\n", events[PGLAZYFREE]);
5280 seq_printf(m, "pglazyfreed %lu\n", events[PGLAZYFREED]);
5281
5282 seq_printf(m, "workingset_refault %lu\n",
5283 stat[WORKINGSET_REFAULT]);
5284 seq_printf(m, "workingset_activate %lu\n",
5285 stat[WORKINGSET_ACTIVATE]);
5286 seq_printf(m, "workingset_nodereclaim %lu\n",
5287 stat[WORKINGSET_NODERECLAIM]);
5288
5289 return 0;
5290}
5291
5292static struct cftype memory_files[] = {
5293 {
5294 .name = "current",
5295 .flags = CFTYPE_NOT_ON_ROOT,
5296 .read_u64 = memory_current_read,
5297 },
5298 {
5299 .name = "low",
5300 .flags = CFTYPE_NOT_ON_ROOT,
5301 .seq_show = memory_low_show,
5302 .write = memory_low_write,
5303 },
5304 {
5305 .name = "high",
5306 .flags = CFTYPE_NOT_ON_ROOT,
5307 .seq_show = memory_high_show,
5308 .write = memory_high_write,
5309 },
5310 {
5311 .name = "max",
5312 .flags = CFTYPE_NOT_ON_ROOT,
5313 .seq_show = memory_max_show,
5314 .write = memory_max_write,
5315 },
5316 {
5317 .name = "events",
5318 .flags = CFTYPE_NOT_ON_ROOT,
5319 .file_offset = offsetof(struct mem_cgroup, events_file),
5320 .seq_show = memory_events_show,
5321 },
5322 {
5323 .name = "stat",
5324 .flags = CFTYPE_NOT_ON_ROOT,
5325 .seq_show = memory_stat_show,
5326 },
5327 { } /* terminate */
5328};
5329
5330struct cgroup_subsys memory_cgrp_subsys = {
5331 .css_alloc = mem_cgroup_css_alloc,
5332 .css_online = mem_cgroup_css_online,
5333 .css_offline = mem_cgroup_css_offline,
5334 .css_released = mem_cgroup_css_released,
5335 .css_free = mem_cgroup_css_free,
5336 .css_reset = mem_cgroup_css_reset,
5337 .can_attach = mem_cgroup_can_attach,
5338 .cancel_attach = mem_cgroup_cancel_attach,
5339 .post_attach = mem_cgroup_move_task,
5340 .bind = mem_cgroup_bind,
5341 .dfl_cftypes = memory_files,
5342 .legacy_cftypes = mem_cgroup_legacy_files,
5343 .early_init = 0,
5344};
5345
5346/**
5347 * mem_cgroup_low - check if memory consumption is below the normal range
5348 * @root: the top ancestor of the sub-tree being checked
5349 * @memcg: the memory cgroup to check
5350 *
5351 * Returns %true if memory consumption of @memcg, and that of all
5352 * ancestors up to (but not including) @root, is below the normal range.
5353 *
5354 * @root is exclusive; it is never low when looked at directly and isn't
5355 * checked when traversing the hierarchy.
5356 *
5357 * Excluding @root enables using memory.low to prioritize memory usage
5358 * between cgroups within a subtree of the hierarchy that is limited by
5359 * memory.high or memory.max.
5360 *
5361 * For example, given cgroup A with children B and C:
5362 *
5363 * A
5364 * / \
5365 * B C
5366 *
5367 * and
5368 *
5369 * 1. A/memory.current > A/memory.high
5370 * 2. A/B/memory.current < A/B/memory.low
5371 * 3. A/C/memory.current >= A/C/memory.low
5372 *
5373 * As 'A' is high, i.e. triggers reclaim from 'A', and 'B' is low, we
5374 * should reclaim from 'C' until 'A' is no longer high or until we can
5375 * no longer reclaim from 'C'. If 'A', i.e. @root, isn't excluded by
5376 * mem_cgroup_low when reclaming from 'A', then 'B' won't be considered
5377 * low and we will reclaim indiscriminately from both 'B' and 'C'.
5378 */
5379bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg)
5380{
5381 if (mem_cgroup_disabled())
5382 return false;
5383
5384 if (!root)
5385 root = root_mem_cgroup;
5386 if (memcg == root)
5387 return false;
5388
5389 for (; memcg != root; memcg = parent_mem_cgroup(memcg)) {
5390 if (page_counter_read(&memcg->memory) >= memcg->low)
5391 return false;
5392 }
5393
5394 return true;
5395}
5396
5397/**
5398 * mem_cgroup_try_charge - try charging a page
5399 * @page: page to charge
5400 * @mm: mm context of the victim
5401 * @gfp_mask: reclaim mode
5402 * @memcgp: charged memcg return
5403 * @compound: charge the page as compound or small page
5404 *
5405 * Try to charge @page to the memcg that @mm belongs to, reclaiming
5406 * pages according to @gfp_mask if necessary.
5407 *
5408 * Returns 0 on success, with *@memcgp pointing to the charged memcg.
5409 * Otherwise, an error code is returned.
5410 *
5411 * After page->mapping has been set up, the caller must finalize the
5412 * charge with mem_cgroup_commit_charge(). Or abort the transaction
5413 * with mem_cgroup_cancel_charge() in case page instantiation fails.
5414 */
5415int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
5416 gfp_t gfp_mask, struct mem_cgroup **memcgp,
5417 bool compound)
5418{
5419 struct mem_cgroup *memcg = NULL;
5420 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
5421 int ret = 0;
5422
5423 if (mem_cgroup_disabled())
5424 goto out;
5425
5426 if (PageSwapCache(page)) {
5427 /*
5428 * Every swap fault against a single page tries to charge the
5429 * page, bail as early as possible. shmem_unuse() encounters
5430 * already charged pages, too. The USED bit is protected by
5431 * the page lock, which serializes swap cache removal, which
5432 * in turn serializes uncharging.
5433 */
5434 VM_BUG_ON_PAGE(!PageLocked(page), page);
5435 if (compound_head(page)->mem_cgroup)
5436 goto out;
5437
5438 if (do_swap_account) {
5439 swp_entry_t ent = { .val = page_private(page), };
5440 unsigned short id = lookup_swap_cgroup_id(ent);
5441
5442 rcu_read_lock();
5443 memcg = mem_cgroup_from_id(id);
5444 if (memcg && !css_tryget_online(&memcg->css))
5445 memcg = NULL;
5446 rcu_read_unlock();
5447 }
5448 }
5449
5450 if (!memcg)
5451 memcg = get_mem_cgroup_from_mm(mm);
5452
5453 ret = try_charge(memcg, gfp_mask, nr_pages);
5454
5455 css_put(&memcg->css);
5456out:
5457 *memcgp = memcg;
5458 return ret;
5459}
5460
5461/**
5462 * mem_cgroup_commit_charge - commit a page charge
5463 * @page: page to charge
5464 * @memcg: memcg to charge the page to
5465 * @lrucare: page might be on LRU already
5466 * @compound: charge the page as compound or small page
5467 *
5468 * Finalize a charge transaction started by mem_cgroup_try_charge(),
5469 * after page->mapping has been set up. This must happen atomically
5470 * as part of the page instantiation, i.e. under the page table lock
5471 * for anonymous pages, under the page lock for page and swap cache.
5472 *
5473 * In addition, the page must not be on the LRU during the commit, to
5474 * prevent racing with task migration. If it might be, use @lrucare.
5475 *
5476 * Use mem_cgroup_cancel_charge() to cancel the transaction instead.
5477 */
5478void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
5479 bool lrucare, bool compound)
5480{
5481 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
5482
5483 VM_BUG_ON_PAGE(!page->mapping, page);
5484 VM_BUG_ON_PAGE(PageLRU(page) && !lrucare, page);
5485
5486 if (mem_cgroup_disabled())
5487 return;
5488 /*
5489 * Swap faults will attempt to charge the same page multiple
5490 * times. But reuse_swap_page() might have removed the page
5491 * from swapcache already, so we can't check PageSwapCache().
5492 */
5493 if (!memcg)
5494 return;
5495
5496 commit_charge(page, memcg, lrucare);
5497
5498 local_irq_disable();
5499 mem_cgroup_charge_statistics(memcg, page, compound, nr_pages);
5500 memcg_check_events(memcg, page);
5501 local_irq_enable();
5502
5503 if (do_memsw_account() && PageSwapCache(page)) {
5504 swp_entry_t entry = { .val = page_private(page) };
5505 /*
5506 * The swap entry might not get freed for a long time,
5507 * let's not wait for it. The page already received a
5508 * memory+swap charge, drop the swap entry duplicate.
5509 */
5510 mem_cgroup_uncharge_swap(entry, nr_pages);
5511 }
5512}
5513
5514/**
5515 * mem_cgroup_cancel_charge - cancel a page charge
5516 * @page: page to charge
5517 * @memcg: memcg to charge the page to
5518 * @compound: charge the page as compound or small page
5519 *
5520 * Cancel a charge transaction started by mem_cgroup_try_charge().
5521 */
5522void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg,
5523 bool compound)
5524{
5525 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
5526
5527 if (mem_cgroup_disabled())
5528 return;
5529 /*
5530 * Swap faults will attempt to charge the same page multiple
5531 * times. But reuse_swap_page() might have removed the page
5532 * from swapcache already, so we can't check PageSwapCache().
5533 */
5534 if (!memcg)
5535 return;
5536
5537 cancel_charge(memcg, nr_pages);
5538}
5539
5540struct uncharge_gather {
5541 struct mem_cgroup *memcg;
5542 unsigned long pgpgout;
5543 unsigned long nr_anon;
5544 unsigned long nr_file;
5545 unsigned long nr_kmem;
5546 unsigned long nr_huge;
5547 unsigned long nr_shmem;
5548 struct page *dummy_page;
5549};
5550
5551static inline void uncharge_gather_clear(struct uncharge_gather *ug)
5552{
5553 memset(ug, 0, sizeof(*ug));
5554}
5555
5556static void uncharge_batch(const struct uncharge_gather *ug)
5557{
5558 unsigned long nr_pages = ug->nr_anon + ug->nr_file + ug->nr_kmem;
5559 unsigned long flags;
5560
5561 if (!mem_cgroup_is_root(ug->memcg)) {
5562 page_counter_uncharge(&ug->memcg->memory, nr_pages);
5563 if (do_memsw_account())
5564 page_counter_uncharge(&ug->memcg->memsw, nr_pages);
5565 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && ug->nr_kmem)
5566 page_counter_uncharge(&ug->memcg->kmem, ug->nr_kmem);
5567 memcg_oom_recover(ug->memcg);
5568 }
5569
5570 local_irq_save(flags);
5571 __mod_memcg_state(ug->memcg, MEMCG_RSS, -ug->nr_anon);
5572 __mod_memcg_state(ug->memcg, MEMCG_CACHE, -ug->nr_file);
5573 __mod_memcg_state(ug->memcg, MEMCG_RSS_HUGE, -ug->nr_huge);
5574 __mod_memcg_state(ug->memcg, NR_SHMEM, -ug->nr_shmem);
5575 __count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout);
5576 __this_cpu_add(ug->memcg->stat_cpu->nr_page_events, nr_pages);
5577 memcg_check_events(ug->memcg, ug->dummy_page);
5578 local_irq_restore(flags);
5579
5580 if (!mem_cgroup_is_root(ug->memcg))
5581 css_put_many(&ug->memcg->css, nr_pages);
5582}
5583
5584static void uncharge_page(struct page *page, struct uncharge_gather *ug)
5585{
5586 VM_BUG_ON_PAGE(PageLRU(page), page);
5587 VM_BUG_ON_PAGE(page_count(page) && !is_zone_device_page(page) &&
5588 !PageHWPoison(page) , page);
5589
5590 if (!page->mem_cgroup)
5591 return;
5592
5593 /*
5594 * Nobody should be changing or seriously looking at
5595 * page->mem_cgroup at this point, we have fully
5596 * exclusive access to the page.
5597 */
5598
5599 if (ug->memcg != page->mem_cgroup) {
5600 if (ug->memcg) {
5601 uncharge_batch(ug);
5602 uncharge_gather_clear(ug);
5603 }
5604 ug->memcg = page->mem_cgroup;
5605 }
5606
5607 if (!PageKmemcg(page)) {
5608 unsigned int nr_pages = 1;
5609
5610 if (PageTransHuge(page)) {
5611 nr_pages <<= compound_order(page);
5612 ug->nr_huge += nr_pages;
5613 }
5614 if (PageAnon(page))
5615 ug->nr_anon += nr_pages;
5616 else {
5617 ug->nr_file += nr_pages;
5618 if (PageSwapBacked(page))
5619 ug->nr_shmem += nr_pages;
5620 }
5621 ug->pgpgout++;
5622 } else {
5623 ug->nr_kmem += 1 << compound_order(page);
5624 __ClearPageKmemcg(page);
5625 }
5626
5627 ug->dummy_page = page;
5628 page->mem_cgroup = NULL;
5629}
5630
5631static void uncharge_list(struct list_head *page_list)
5632{
5633 struct uncharge_gather ug;
5634 struct list_head *next;
5635
5636 uncharge_gather_clear(&ug);
5637
5638 /*
5639 * Note that the list can be a single page->lru; hence the
5640 * do-while loop instead of a simple list_for_each_entry().
5641 */
5642 next = page_list->next;
5643 do {
5644 struct page *page;
5645
5646 page = list_entry(next, struct page, lru);
5647 next = page->lru.next;
5648
5649 uncharge_page(page, &ug);
5650 } while (next != page_list);
5651
5652 if (ug.memcg)
5653 uncharge_batch(&ug);
5654}
5655
5656/**
5657 * mem_cgroup_uncharge - uncharge a page
5658 * @page: page to uncharge
5659 *
5660 * Uncharge a page previously charged with mem_cgroup_try_charge() and
5661 * mem_cgroup_commit_charge().
5662 */
5663void mem_cgroup_uncharge(struct page *page)
5664{
5665 struct uncharge_gather ug;
5666
5667 if (mem_cgroup_disabled())
5668 return;
5669
5670 /* Don't touch page->lru of any random page, pre-check: */
5671 if (!page->mem_cgroup)
5672 return;
5673
5674 uncharge_gather_clear(&ug);
5675 uncharge_page(page, &ug);
5676 uncharge_batch(&ug);
5677}
5678
5679/**
5680 * mem_cgroup_uncharge_list - uncharge a list of page
5681 * @page_list: list of pages to uncharge
5682 *
5683 * Uncharge a list of pages previously charged with
5684 * mem_cgroup_try_charge() and mem_cgroup_commit_charge().
5685 */
5686void mem_cgroup_uncharge_list(struct list_head *page_list)
5687{
5688 if (mem_cgroup_disabled())
5689 return;
5690
5691 if (!list_empty(page_list))
5692 uncharge_list(page_list);
5693}
5694
5695/**
5696 * mem_cgroup_migrate - charge a page's replacement
5697 * @oldpage: currently circulating page
5698 * @newpage: replacement page
5699 *
5700 * Charge @newpage as a replacement page for @oldpage. @oldpage will
5701 * be uncharged upon free.
5702 *
5703 * Both pages must be locked, @newpage->mapping must be set up.
5704 */
5705void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
5706{
5707 struct mem_cgroup *memcg;
5708 unsigned int nr_pages;
5709 bool compound;
5710 unsigned long flags;
5711
5712 VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
5713 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
5714 VM_BUG_ON_PAGE(PageAnon(oldpage) != PageAnon(newpage), newpage);
5715 VM_BUG_ON_PAGE(PageTransHuge(oldpage) != PageTransHuge(newpage),
5716 newpage);
5717
5718 if (mem_cgroup_disabled())
5719 return;
5720
5721 /* Page cache replacement: new page already charged? */
5722 if (newpage->mem_cgroup)
5723 return;
5724
5725 /* Swapcache readahead pages can get replaced before being charged */
5726 memcg = oldpage->mem_cgroup;
5727 if (!memcg)
5728 return;
5729
5730 /* Force-charge the new page. The old one will be freed soon */
5731 compound = PageTransHuge(newpage);
5732 nr_pages = compound ? hpage_nr_pages(newpage) : 1;
5733
5734 page_counter_charge(&memcg->memory, nr_pages);
5735 if (do_memsw_account())
5736 page_counter_charge(&memcg->memsw, nr_pages);
5737 css_get_many(&memcg->css, nr_pages);
5738
5739 commit_charge(newpage, memcg, false);
5740
5741 local_irq_save(flags);
5742 mem_cgroup_charge_statistics(memcg, newpage, compound, nr_pages);
5743 memcg_check_events(memcg, newpage);
5744 local_irq_restore(flags);
5745}
5746
5747DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
5748EXPORT_SYMBOL(memcg_sockets_enabled_key);
5749
5750void mem_cgroup_sk_alloc(struct sock *sk)
5751{
5752 struct mem_cgroup *memcg;
5753
5754 if (!mem_cgroup_sockets_enabled)
5755 return;
5756
5757 /*
5758 * Socket cloning can throw us here with sk_memcg already
5759 * filled. It won't however, necessarily happen from
5760 * process context. So the test for root memcg given
5761 * the current task's memcg won't help us in this case.
5762 *
5763 * Respecting the original socket's memcg is a better
5764 * decision in this case.
5765 */
5766 if (sk->sk_memcg) {
5767 css_get(&sk->sk_memcg->css);
5768 return;
5769 }
5770
5771 rcu_read_lock();
5772 memcg = mem_cgroup_from_task(current);
5773 if (memcg == root_mem_cgroup)
5774 goto out;
5775 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active)
5776 goto out;
5777 if (css_tryget_online(&memcg->css))
5778 sk->sk_memcg = memcg;
5779out:
5780 rcu_read_unlock();
5781}
5782
5783void mem_cgroup_sk_free(struct sock *sk)
5784{
5785 if (sk->sk_memcg)
5786 css_put(&sk->sk_memcg->css);
5787}
5788
5789/**
5790 * mem_cgroup_charge_skmem - charge socket memory
5791 * @memcg: memcg to charge
5792 * @nr_pages: number of pages to charge
5793 *
5794 * Charges @nr_pages to @memcg. Returns %true if the charge fit within
5795 * @memcg's configured limit, %false if the charge had to be forced.
5796 */
5797bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
5798{
5799 gfp_t gfp_mask = GFP_KERNEL;
5800
5801 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
5802 struct page_counter *fail;
5803
5804 if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) {
5805 memcg->tcpmem_pressure = 0;
5806 return true;
5807 }
5808 page_counter_charge(&memcg->tcpmem, nr_pages);
5809 memcg->tcpmem_pressure = 1;
5810 return false;
5811 }
5812
5813 /* Don't block in the packet receive path */
5814 if (in_softirq())
5815 gfp_mask = GFP_NOWAIT;
5816
5817 mod_memcg_state(memcg, MEMCG_SOCK, nr_pages);
5818
5819 if (try_charge(memcg, gfp_mask, nr_pages) == 0)
5820 return true;
5821
5822 try_charge(memcg, gfp_mask|__GFP_NOFAIL, nr_pages);
5823 return false;
5824}
5825
5826/**
5827 * mem_cgroup_uncharge_skmem - uncharge socket memory
5828 * @memcg: memcg to uncharge
5829 * @nr_pages: number of pages to uncharge
5830 */
5831void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
5832{
5833 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
5834 page_counter_uncharge(&memcg->tcpmem, nr_pages);
5835 return;
5836 }
5837
5838 mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages);
5839
5840 refill_stock(memcg, nr_pages);
5841}
5842
5843static int __init cgroup_memory(char *s)
5844{
5845 char *token;
5846
5847 while ((token = strsep(&s, ",")) != NULL) {
5848 if (!*token)
5849 continue;
5850 if (!strcmp(token, "nosocket"))
5851 cgroup_memory_nosocket = true;
5852 if (!strcmp(token, "nokmem"))
5853 cgroup_memory_nokmem = true;
5854 }
5855 return 0;
5856}
5857__setup("cgroup.memory=", cgroup_memory);
5858
5859/*
5860 * subsys_initcall() for memory controller.
5861 *
5862 * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this
5863 * context because of lock dependencies (cgroup_lock -> cpu hotplug) but
5864 * basically everything that doesn't depend on a specific mem_cgroup structure
5865 * should be initialized from here.
5866 */
5867static int __init mem_cgroup_init(void)
5868{
5869 int cpu, node;
5870
5871#ifndef CONFIG_SLOB
5872 /*
5873 * Kmem cache creation is mostly done with the slab_mutex held,
5874 * so use a workqueue with limited concurrency to avoid stalling
5875 * all worker threads in case lots of cgroups are created and
5876 * destroyed simultaneously.
5877 */
5878 memcg_kmem_cache_wq = alloc_workqueue("memcg_kmem_cache", 0, 1);
5879 BUG_ON(!memcg_kmem_cache_wq);
5880#endif
5881
5882 cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
5883 memcg_hotplug_cpu_dead);
5884
5885 for_each_possible_cpu(cpu)
5886 INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
5887 drain_local_stock);
5888
5889 for_each_node(node) {
5890 struct mem_cgroup_tree_per_node *rtpn;
5891
5892 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL,
5893 node_online(node) ? node : NUMA_NO_NODE);
5894
5895 rtpn->rb_root = RB_ROOT;
5896 rtpn->rb_rightmost = NULL;
5897 spin_lock_init(&rtpn->lock);
5898 soft_limit_tree.rb_tree_per_node[node] = rtpn;
5899 }
5900
5901 return 0;
5902}
5903subsys_initcall(mem_cgroup_init);
5904
5905#ifdef CONFIG_MEMCG_SWAP
5906static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg)
5907{
5908 while (!atomic_inc_not_zero(&memcg->id.ref)) {
5909 /*
5910 * The root cgroup cannot be destroyed, so it's refcount must
5911 * always be >= 1.
5912 */
5913 if (WARN_ON_ONCE(memcg == root_mem_cgroup)) {
5914 VM_BUG_ON(1);
5915 break;
5916 }
5917 memcg = parent_mem_cgroup(memcg);
5918 if (!memcg)
5919 memcg = root_mem_cgroup;
5920 }
5921 return memcg;
5922}
5923
5924/**
5925 * mem_cgroup_swapout - transfer a memsw charge to swap
5926 * @page: page whose memsw charge to transfer
5927 * @entry: swap entry to move the charge to
5928 *
5929 * Transfer the memsw charge of @page to @entry.
5930 */
5931void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
5932{
5933 struct mem_cgroup *memcg, *swap_memcg;
5934 unsigned int nr_entries;
5935 unsigned short oldid;
5936
5937 VM_BUG_ON_PAGE(PageLRU(page), page);
5938 VM_BUG_ON_PAGE(page_count(page), page);
5939
5940 if (!do_memsw_account())
5941 return;
5942
5943 memcg = page->mem_cgroup;
5944
5945 /* Readahead page, never charged */
5946 if (!memcg)
5947 return;
5948
5949 /*
5950 * In case the memcg owning these pages has been offlined and doesn't
5951 * have an ID allocated to it anymore, charge the closest online
5952 * ancestor for the swap instead and transfer the memory+swap charge.
5953 */
5954 swap_memcg = mem_cgroup_id_get_online(memcg);
5955 nr_entries = hpage_nr_pages(page);
5956 /* Get references for the tail pages, too */
5957 if (nr_entries > 1)
5958 mem_cgroup_id_get_many(swap_memcg, nr_entries - 1);
5959 oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg),
5960 nr_entries);
5961 VM_BUG_ON_PAGE(oldid, page);
5962 mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries);
5963
5964 page->mem_cgroup = NULL;
5965
5966 if (!mem_cgroup_is_root(memcg))
5967 page_counter_uncharge(&memcg->memory, nr_entries);
5968
5969 if (memcg != swap_memcg) {
5970 if (!mem_cgroup_is_root(swap_memcg))
5971 page_counter_charge(&swap_memcg->memsw, nr_entries);
5972 page_counter_uncharge(&memcg->memsw, nr_entries);
5973 }
5974
5975 /*
5976 * Interrupts should be disabled here because the caller holds the
5977 * i_pages lock which is taken with interrupts-off. It is
5978 * important here to have the interrupts disabled because it is the
5979 * only synchronisation we have for updating the per-CPU variables.
5980 */
5981 VM_BUG_ON(!irqs_disabled());
5982 mem_cgroup_charge_statistics(memcg, page, PageTransHuge(page),
5983 -nr_entries);
5984 memcg_check_events(memcg, page);
5985
5986 if (!mem_cgroup_is_root(memcg))
5987 css_put_many(&memcg->css, nr_entries);
5988}
5989
5990/**
5991 * mem_cgroup_try_charge_swap - try charging swap space for a page
5992 * @page: page being added to swap
5993 * @entry: swap entry to charge
5994 *
5995 * Try to charge @page's memcg for the swap space at @entry.
5996 *
5997 * Returns 0 on success, -ENOMEM on failure.
5998 */
5999int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry)
6000{
6001 unsigned int nr_pages = hpage_nr_pages(page);
6002 struct page_counter *counter;
6003 struct mem_cgroup *memcg;
6004 unsigned short oldid;
6005
6006 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) || !do_swap_account)
6007 return 0;
6008
6009 memcg = page->mem_cgroup;
6010
6011 /* Readahead page, never charged */
6012 if (!memcg)
6013 return 0;
6014
6015 memcg = mem_cgroup_id_get_online(memcg);
6016
6017 if (!mem_cgroup_is_root(memcg) &&
6018 !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) {
6019 mem_cgroup_id_put(memcg);
6020 return -ENOMEM;
6021 }
6022
6023 /* Get references for the tail pages, too */
6024 if (nr_pages > 1)
6025 mem_cgroup_id_get_many(memcg, nr_pages - 1);
6026 oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg), nr_pages);
6027 VM_BUG_ON_PAGE(oldid, page);
6028 mod_memcg_state(memcg, MEMCG_SWAP, nr_pages);
6029
6030 return 0;
6031}
6032
6033/**
6034 * mem_cgroup_uncharge_swap - uncharge swap space
6035 * @entry: swap entry to uncharge
6036 * @nr_pages: the amount of swap space to uncharge
6037 */
6038void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
6039{
6040 struct mem_cgroup *memcg;
6041 unsigned short id;
6042
6043 if (!do_swap_account)
6044 return;
6045
6046 id = swap_cgroup_record(entry, 0, nr_pages);
6047 rcu_read_lock();
6048 memcg = mem_cgroup_from_id(id);
6049 if (memcg) {
6050 if (!mem_cgroup_is_root(memcg)) {
6051 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
6052 page_counter_uncharge(&memcg->swap, nr_pages);
6053 else
6054 page_counter_uncharge(&memcg->memsw, nr_pages);
6055 }
6056 mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages);
6057 mem_cgroup_id_put_many(memcg, nr_pages);
6058 }
6059 rcu_read_unlock();
6060}
6061
6062long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
6063{
6064 long nr_swap_pages = get_nr_swap_pages();
6065
6066 if (!do_swap_account || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
6067 return nr_swap_pages;
6068 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg))
6069 nr_swap_pages = min_t(long, nr_swap_pages,
6070 READ_ONCE(memcg->swap.limit) -
6071 page_counter_read(&memcg->swap));
6072 return nr_swap_pages;
6073}
6074
6075bool mem_cgroup_swap_full(struct page *page)
6076{
6077 struct mem_cgroup *memcg;
6078
6079 VM_BUG_ON_PAGE(!PageLocked(page), page);
6080
6081 if (vm_swap_full())
6082 return true;
6083 if (!do_swap_account || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
6084 return false;
6085
6086 memcg = page->mem_cgroup;
6087 if (!memcg)
6088 return false;
6089
6090 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg))
6091 if (page_counter_read(&memcg->swap) * 2 >= memcg->swap.limit)
6092 return true;
6093
6094 return false;
6095}
6096
6097/* for remember boot option*/
6098#ifdef CONFIG_MEMCG_SWAP_ENABLED
6099static int really_do_swap_account __initdata = 1;
6100#else
6101static int really_do_swap_account __initdata;
6102#endif
6103
6104static int __init enable_swap_account(char *s)
6105{
6106 if (!strcmp(s, "1"))
6107 really_do_swap_account = 1;
6108 else if (!strcmp(s, "0"))
6109 really_do_swap_account = 0;
6110 return 1;
6111}
6112__setup("swapaccount=", enable_swap_account);
6113
6114static u64 swap_current_read(struct cgroup_subsys_state *css,
6115 struct cftype *cft)
6116{
6117 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6118
6119 return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE;
6120}
6121
6122static int swap_max_show(struct seq_file *m, void *v)
6123{
6124 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
6125 unsigned long max = READ_ONCE(memcg->swap.limit);
6126
6127 if (max == PAGE_COUNTER_MAX)
6128 seq_puts(m, "max\n");
6129 else
6130 seq_printf(m, "%llu\n", (u64)max * PAGE_SIZE);
6131
6132 return 0;
6133}
6134
6135static ssize_t swap_max_write(struct kernfs_open_file *of,
6136 char *buf, size_t nbytes, loff_t off)
6137{
6138 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6139 unsigned long max;
6140 int err;
6141
6142 buf = strstrip(buf);
6143 err = page_counter_memparse(buf, "max", &max);
6144 if (err)
6145 return err;
6146
6147 mutex_lock(&memcg_limit_mutex);
6148 err = page_counter_limit(&memcg->swap, max);
6149 mutex_unlock(&memcg_limit_mutex);
6150 if (err)
6151 return err;
6152
6153 return nbytes;
6154}
6155
6156static struct cftype swap_files[] = {
6157 {
6158 .name = "swap.current",
6159 .flags = CFTYPE_NOT_ON_ROOT,
6160 .read_u64 = swap_current_read,
6161 },
6162 {
6163 .name = "swap.max",
6164 .flags = CFTYPE_NOT_ON_ROOT,
6165 .seq_show = swap_max_show,
6166 .write = swap_max_write,
6167 },
6168 { } /* terminate */
6169};
6170
6171static struct cftype memsw_cgroup_files[] = {
6172 {
6173 .name = "memsw.usage_in_bytes",
6174 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
6175 .read_u64 = mem_cgroup_read_u64,
6176 },
6177 {
6178 .name = "memsw.max_usage_in_bytes",
6179 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
6180 .write = mem_cgroup_reset,
6181 .read_u64 = mem_cgroup_read_u64,
6182 },
6183 {
6184 .name = "memsw.limit_in_bytes",
6185 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
6186 .write = mem_cgroup_write,
6187 .read_u64 = mem_cgroup_read_u64,
6188 },
6189 {
6190 .name = "memsw.failcnt",
6191 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
6192 .write = mem_cgroup_reset,
6193 .read_u64 = mem_cgroup_read_u64,
6194 },
6195 { }, /* terminate */
6196};
6197
6198static int __init mem_cgroup_swap_init(void)
6199{
6200 if (!mem_cgroup_disabled() && really_do_swap_account) {
6201 do_swap_account = 1;
6202 WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys,
6203 swap_files));
6204 WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys,
6205 memsw_cgroup_files));
6206 }
6207 return 0;
6208}
6209subsys_initcall(mem_cgroup_swap_init);
6210
6211#endif /* CONFIG_MEMCG_SWAP */
1/* memcontrol.c - Memory Controller
2 *
3 * Copyright IBM Corporation, 2007
4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5 *
6 * Copyright 2007 OpenVZ SWsoft Inc
7 * Author: Pavel Emelianov <xemul@openvz.org>
8 *
9 * Memory thresholds
10 * Copyright (C) 2009 Nokia Corporation
11 * Author: Kirill A. Shutemov
12 *
13 * Kernel Memory Controller
14 * Copyright (C) 2012 Parallels Inc. and Google Inc.
15 * Authors: Glauber Costa and Suleiman Souhlal
16 *
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License as published by
19 * the Free Software Foundation; either version 2 of the License, or
20 * (at your option) any later version.
21 *
22 * This program is distributed in the hope that it will be useful,
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25 * GNU General Public License for more details.
26 */
27
28#include <linux/res_counter.h>
29#include <linux/memcontrol.h>
30#include <linux/cgroup.h>
31#include <linux/mm.h>
32#include <linux/hugetlb.h>
33#include <linux/pagemap.h>
34#include <linux/smp.h>
35#include <linux/page-flags.h>
36#include <linux/backing-dev.h>
37#include <linux/bit_spinlock.h>
38#include <linux/rcupdate.h>
39#include <linux/limits.h>
40#include <linux/export.h>
41#include <linux/mutex.h>
42#include <linux/rbtree.h>
43#include <linux/slab.h>
44#include <linux/swap.h>
45#include <linux/swapops.h>
46#include <linux/spinlock.h>
47#include <linux/eventfd.h>
48#include <linux/poll.h>
49#include <linux/sort.h>
50#include <linux/fs.h>
51#include <linux/seq_file.h>
52#include <linux/vmpressure.h>
53#include <linux/mm_inline.h>
54#include <linux/page_cgroup.h>
55#include <linux/cpu.h>
56#include <linux/oom.h>
57#include <linux/lockdep.h>
58#include <linux/file.h>
59#include "internal.h"
60#include <net/sock.h>
61#include <net/ip.h>
62#include <net/tcp_memcontrol.h>
63#include "slab.h"
64
65#include <asm/uaccess.h>
66
67#include <trace/events/vmscan.h>
68
69struct cgroup_subsys memory_cgrp_subsys __read_mostly;
70EXPORT_SYMBOL(memory_cgrp_subsys);
71
72#define MEM_CGROUP_RECLAIM_RETRIES 5
73static struct mem_cgroup *root_mem_cgroup __read_mostly;
74
75#ifdef CONFIG_MEMCG_SWAP
76/* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */
77int do_swap_account __read_mostly;
78
79/* for remember boot option*/
80#ifdef CONFIG_MEMCG_SWAP_ENABLED
81static int really_do_swap_account __initdata = 1;
82#else
83static int really_do_swap_account __initdata = 0;
84#endif
85
86#else
87#define do_swap_account 0
88#endif
89
90
91static const char * const mem_cgroup_stat_names[] = {
92 "cache",
93 "rss",
94 "rss_huge",
95 "mapped_file",
96 "writeback",
97 "swap",
98};
99
100enum mem_cgroup_events_index {
101 MEM_CGROUP_EVENTS_PGPGIN, /* # of pages paged in */
102 MEM_CGROUP_EVENTS_PGPGOUT, /* # of pages paged out */
103 MEM_CGROUP_EVENTS_PGFAULT, /* # of page-faults */
104 MEM_CGROUP_EVENTS_PGMAJFAULT, /* # of major page-faults */
105 MEM_CGROUP_EVENTS_NSTATS,
106};
107
108static const char * const mem_cgroup_events_names[] = {
109 "pgpgin",
110 "pgpgout",
111 "pgfault",
112 "pgmajfault",
113};
114
115static const char * const mem_cgroup_lru_names[] = {
116 "inactive_anon",
117 "active_anon",
118 "inactive_file",
119 "active_file",
120 "unevictable",
121};
122
123/*
124 * Per memcg event counter is incremented at every pagein/pageout. With THP,
125 * it will be incremated by the number of pages. This counter is used for
126 * for trigger some periodic events. This is straightforward and better
127 * than using jiffies etc. to handle periodic memcg event.
128 */
129enum mem_cgroup_events_target {
130 MEM_CGROUP_TARGET_THRESH,
131 MEM_CGROUP_TARGET_SOFTLIMIT,
132 MEM_CGROUP_TARGET_NUMAINFO,
133 MEM_CGROUP_NTARGETS,
134};
135#define THRESHOLDS_EVENTS_TARGET 128
136#define SOFTLIMIT_EVENTS_TARGET 1024
137#define NUMAINFO_EVENTS_TARGET 1024
138
139struct mem_cgroup_stat_cpu {
140 long count[MEM_CGROUP_STAT_NSTATS];
141 unsigned long events[MEM_CGROUP_EVENTS_NSTATS];
142 unsigned long nr_page_events;
143 unsigned long targets[MEM_CGROUP_NTARGETS];
144};
145
146struct mem_cgroup_reclaim_iter {
147 /*
148 * last scanned hierarchy member. Valid only if last_dead_count
149 * matches memcg->dead_count of the hierarchy root group.
150 */
151 struct mem_cgroup *last_visited;
152 int last_dead_count;
153
154 /* scan generation, increased every round-trip */
155 unsigned int generation;
156};
157
158/*
159 * per-zone information in memory controller.
160 */
161struct mem_cgroup_per_zone {
162 struct lruvec lruvec;
163 unsigned long lru_size[NR_LRU_LISTS];
164
165 struct mem_cgroup_reclaim_iter reclaim_iter[DEF_PRIORITY + 1];
166
167 struct rb_node tree_node; /* RB tree node */
168 unsigned long long usage_in_excess;/* Set to the value by which */
169 /* the soft limit is exceeded*/
170 bool on_tree;
171 struct mem_cgroup *memcg; /* Back pointer, we cannot */
172 /* use container_of */
173};
174
175struct mem_cgroup_per_node {
176 struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
177};
178
179/*
180 * Cgroups above their limits are maintained in a RB-Tree, independent of
181 * their hierarchy representation
182 */
183
184struct mem_cgroup_tree_per_zone {
185 struct rb_root rb_root;
186 spinlock_t lock;
187};
188
189struct mem_cgroup_tree_per_node {
190 struct mem_cgroup_tree_per_zone rb_tree_per_zone[MAX_NR_ZONES];
191};
192
193struct mem_cgroup_tree {
194 struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
195};
196
197static struct mem_cgroup_tree soft_limit_tree __read_mostly;
198
199struct mem_cgroup_threshold {
200 struct eventfd_ctx *eventfd;
201 u64 threshold;
202};
203
204/* For threshold */
205struct mem_cgroup_threshold_ary {
206 /* An array index points to threshold just below or equal to usage. */
207 int current_threshold;
208 /* Size of entries[] */
209 unsigned int size;
210 /* Array of thresholds */
211 struct mem_cgroup_threshold entries[0];
212};
213
214struct mem_cgroup_thresholds {
215 /* Primary thresholds array */
216 struct mem_cgroup_threshold_ary *primary;
217 /*
218 * Spare threshold array.
219 * This is needed to make mem_cgroup_unregister_event() "never fail".
220 * It must be able to store at least primary->size - 1 entries.
221 */
222 struct mem_cgroup_threshold_ary *spare;
223};
224
225/* for OOM */
226struct mem_cgroup_eventfd_list {
227 struct list_head list;
228 struct eventfd_ctx *eventfd;
229};
230
231/*
232 * cgroup_event represents events which userspace want to receive.
233 */
234struct mem_cgroup_event {
235 /*
236 * memcg which the event belongs to.
237 */
238 struct mem_cgroup *memcg;
239 /*
240 * eventfd to signal userspace about the event.
241 */
242 struct eventfd_ctx *eventfd;
243 /*
244 * Each of these stored in a list by the cgroup.
245 */
246 struct list_head list;
247 /*
248 * register_event() callback will be used to add new userspace
249 * waiter for changes related to this event. Use eventfd_signal()
250 * on eventfd to send notification to userspace.
251 */
252 int (*register_event)(struct mem_cgroup *memcg,
253 struct eventfd_ctx *eventfd, const char *args);
254 /*
255 * unregister_event() callback will be called when userspace closes
256 * the eventfd or on cgroup removing. This callback must be set,
257 * if you want provide notification functionality.
258 */
259 void (*unregister_event)(struct mem_cgroup *memcg,
260 struct eventfd_ctx *eventfd);
261 /*
262 * All fields below needed to unregister event when
263 * userspace closes eventfd.
264 */
265 poll_table pt;
266 wait_queue_head_t *wqh;
267 wait_queue_t wait;
268 struct work_struct remove;
269};
270
271static void mem_cgroup_threshold(struct mem_cgroup *memcg);
272static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
273
274/*
275 * The memory controller data structure. The memory controller controls both
276 * page cache and RSS per cgroup. We would eventually like to provide
277 * statistics based on the statistics developed by Rik Van Riel for clock-pro,
278 * to help the administrator determine what knobs to tune.
279 *
280 * TODO: Add a water mark for the memory controller. Reclaim will begin when
281 * we hit the water mark. May be even add a low water mark, such that
282 * no reclaim occurs from a cgroup at it's low water mark, this is
283 * a feature that will be implemented much later in the future.
284 */
285struct mem_cgroup {
286 struct cgroup_subsys_state css;
287 /*
288 * the counter to account for memory usage
289 */
290 struct res_counter res;
291
292 /* vmpressure notifications */
293 struct vmpressure vmpressure;
294
295 /*
296 * the counter to account for mem+swap usage.
297 */
298 struct res_counter memsw;
299
300 /*
301 * the counter to account for kernel memory usage.
302 */
303 struct res_counter kmem;
304 /*
305 * Should the accounting and control be hierarchical, per subtree?
306 */
307 bool use_hierarchy;
308 unsigned long kmem_account_flags; /* See KMEM_ACCOUNTED_*, below */
309
310 bool oom_lock;
311 atomic_t under_oom;
312 atomic_t oom_wakeups;
313
314 int swappiness;
315 /* OOM-Killer disable */
316 int oom_kill_disable;
317
318 /* set when res.limit == memsw.limit */
319 bool memsw_is_minimum;
320
321 /* protect arrays of thresholds */
322 struct mutex thresholds_lock;
323
324 /* thresholds for memory usage. RCU-protected */
325 struct mem_cgroup_thresholds thresholds;
326
327 /* thresholds for mem+swap usage. RCU-protected */
328 struct mem_cgroup_thresholds memsw_thresholds;
329
330 /* For oom notifier event fd */
331 struct list_head oom_notify;
332
333 /*
334 * Should we move charges of a task when a task is moved into this
335 * mem_cgroup ? And what type of charges should we move ?
336 */
337 unsigned long move_charge_at_immigrate;
338 /*
339 * set > 0 if pages under this cgroup are moving to other cgroup.
340 */
341 atomic_t moving_account;
342 /* taken only while moving_account > 0 */
343 spinlock_t move_lock;
344 /*
345 * percpu counter.
346 */
347 struct mem_cgroup_stat_cpu __percpu *stat;
348 /*
349 * used when a cpu is offlined or other synchronizations
350 * See mem_cgroup_read_stat().
351 */
352 struct mem_cgroup_stat_cpu nocpu_base;
353 spinlock_t pcp_counter_lock;
354
355 atomic_t dead_count;
356#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET)
357 struct cg_proto tcp_mem;
358#endif
359#if defined(CONFIG_MEMCG_KMEM)
360 /* analogous to slab_common's slab_caches list. per-memcg */
361 struct list_head memcg_slab_caches;
362 /* Not a spinlock, we can take a lot of time walking the list */
363 struct mutex slab_caches_mutex;
364 /* Index in the kmem_cache->memcg_params->memcg_caches array */
365 int kmemcg_id;
366#endif
367
368 int last_scanned_node;
369#if MAX_NUMNODES > 1
370 nodemask_t scan_nodes;
371 atomic_t numainfo_events;
372 atomic_t numainfo_updating;
373#endif
374
375 /* List of events which userspace want to receive */
376 struct list_head event_list;
377 spinlock_t event_list_lock;
378
379 struct mem_cgroup_per_node *nodeinfo[0];
380 /* WARNING: nodeinfo must be the last member here */
381};
382
383/* internal only representation about the status of kmem accounting. */
384enum {
385 KMEM_ACCOUNTED_ACTIVE, /* accounted by this cgroup itself */
386 KMEM_ACCOUNTED_DEAD, /* dead memcg with pending kmem charges */
387};
388
389#ifdef CONFIG_MEMCG_KMEM
390static inline void memcg_kmem_set_active(struct mem_cgroup *memcg)
391{
392 set_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags);
393}
394
395static bool memcg_kmem_is_active(struct mem_cgroup *memcg)
396{
397 return test_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags);
398}
399
400static void memcg_kmem_mark_dead(struct mem_cgroup *memcg)
401{
402 /*
403 * Our caller must use css_get() first, because memcg_uncharge_kmem()
404 * will call css_put() if it sees the memcg is dead.
405 */
406 smp_wmb();
407 if (test_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags))
408 set_bit(KMEM_ACCOUNTED_DEAD, &memcg->kmem_account_flags);
409}
410
411static bool memcg_kmem_test_and_clear_dead(struct mem_cgroup *memcg)
412{
413 return test_and_clear_bit(KMEM_ACCOUNTED_DEAD,
414 &memcg->kmem_account_flags);
415}
416#endif
417
418/* Stuffs for move charges at task migration. */
419/*
420 * Types of charges to be moved. "move_charge_at_immitgrate" and
421 * "immigrate_flags" are treated as a left-shifted bitmap of these types.
422 */
423enum move_type {
424 MOVE_CHARGE_TYPE_ANON, /* private anonymous page and swap of it */
425 MOVE_CHARGE_TYPE_FILE, /* file page(including tmpfs) and swap of it */
426 NR_MOVE_TYPE,
427};
428
429/* "mc" and its members are protected by cgroup_mutex */
430static struct move_charge_struct {
431 spinlock_t lock; /* for from, to */
432 struct mem_cgroup *from;
433 struct mem_cgroup *to;
434 unsigned long immigrate_flags;
435 unsigned long precharge;
436 unsigned long moved_charge;
437 unsigned long moved_swap;
438 struct task_struct *moving_task; /* a task moving charges */
439 wait_queue_head_t waitq; /* a waitq for other context */
440} mc = {
441 .lock = __SPIN_LOCK_UNLOCKED(mc.lock),
442 .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
443};
444
445static bool move_anon(void)
446{
447 return test_bit(MOVE_CHARGE_TYPE_ANON, &mc.immigrate_flags);
448}
449
450static bool move_file(void)
451{
452 return test_bit(MOVE_CHARGE_TYPE_FILE, &mc.immigrate_flags);
453}
454
455/*
456 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
457 * limit reclaim to prevent infinite loops, if they ever occur.
458 */
459#define MEM_CGROUP_MAX_RECLAIM_LOOPS 100
460#define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2
461
462enum charge_type {
463 MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
464 MEM_CGROUP_CHARGE_TYPE_ANON,
465 MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */
466 MEM_CGROUP_CHARGE_TYPE_DROP, /* a page was unused swap cache */
467 NR_CHARGE_TYPE,
468};
469
470/* for encoding cft->private value on file */
471enum res_type {
472 _MEM,
473 _MEMSWAP,
474 _OOM_TYPE,
475 _KMEM,
476};
477
478#define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val))
479#define MEMFILE_TYPE(val) ((val) >> 16 & 0xffff)
480#define MEMFILE_ATTR(val) ((val) & 0xffff)
481/* Used for OOM nofiier */
482#define OOM_CONTROL (0)
483
484/*
485 * Reclaim flags for mem_cgroup_hierarchical_reclaim
486 */
487#define MEM_CGROUP_RECLAIM_NOSWAP_BIT 0x0
488#define MEM_CGROUP_RECLAIM_NOSWAP (1 << MEM_CGROUP_RECLAIM_NOSWAP_BIT)
489#define MEM_CGROUP_RECLAIM_SHRINK_BIT 0x1
490#define MEM_CGROUP_RECLAIM_SHRINK (1 << MEM_CGROUP_RECLAIM_SHRINK_BIT)
491
492/*
493 * The memcg_create_mutex will be held whenever a new cgroup is created.
494 * As a consequence, any change that needs to protect against new child cgroups
495 * appearing has to hold it as well.
496 */
497static DEFINE_MUTEX(memcg_create_mutex);
498
499struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *s)
500{
501 return s ? container_of(s, struct mem_cgroup, css) : NULL;
502}
503
504/* Some nice accessors for the vmpressure. */
505struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
506{
507 if (!memcg)
508 memcg = root_mem_cgroup;
509 return &memcg->vmpressure;
510}
511
512struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr)
513{
514 return &container_of(vmpr, struct mem_cgroup, vmpressure)->css;
515}
516
517static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
518{
519 return (memcg == root_mem_cgroup);
520}
521
522/*
523 * We restrict the id in the range of [1, 65535], so it can fit into
524 * an unsigned short.
525 */
526#define MEM_CGROUP_ID_MAX USHRT_MAX
527
528static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
529{
530 /*
531 * The ID of the root cgroup is 0, but memcg treat 0 as an
532 * invalid ID, so we return (cgroup_id + 1).
533 */
534 return memcg->css.cgroup->id + 1;
535}
536
537static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
538{
539 struct cgroup_subsys_state *css;
540
541 css = css_from_id(id - 1, &memory_cgrp_subsys);
542 return mem_cgroup_from_css(css);
543}
544
545/* Writing them here to avoid exposing memcg's inner layout */
546#if defined(CONFIG_INET) && defined(CONFIG_MEMCG_KMEM)
547
548void sock_update_memcg(struct sock *sk)
549{
550 if (mem_cgroup_sockets_enabled) {
551 struct mem_cgroup *memcg;
552 struct cg_proto *cg_proto;
553
554 BUG_ON(!sk->sk_prot->proto_cgroup);
555
556 /* Socket cloning can throw us here with sk_cgrp already
557 * filled. It won't however, necessarily happen from
558 * process context. So the test for root memcg given
559 * the current task's memcg won't help us in this case.
560 *
561 * Respecting the original socket's memcg is a better
562 * decision in this case.
563 */
564 if (sk->sk_cgrp) {
565 BUG_ON(mem_cgroup_is_root(sk->sk_cgrp->memcg));
566 css_get(&sk->sk_cgrp->memcg->css);
567 return;
568 }
569
570 rcu_read_lock();
571 memcg = mem_cgroup_from_task(current);
572 cg_proto = sk->sk_prot->proto_cgroup(memcg);
573 if (!mem_cgroup_is_root(memcg) &&
574 memcg_proto_active(cg_proto) && css_tryget(&memcg->css)) {
575 sk->sk_cgrp = cg_proto;
576 }
577 rcu_read_unlock();
578 }
579}
580EXPORT_SYMBOL(sock_update_memcg);
581
582void sock_release_memcg(struct sock *sk)
583{
584 if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
585 struct mem_cgroup *memcg;
586 WARN_ON(!sk->sk_cgrp->memcg);
587 memcg = sk->sk_cgrp->memcg;
588 css_put(&sk->sk_cgrp->memcg->css);
589 }
590}
591
592struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg)
593{
594 if (!memcg || mem_cgroup_is_root(memcg))
595 return NULL;
596
597 return &memcg->tcp_mem;
598}
599EXPORT_SYMBOL(tcp_proto_cgroup);
600
601static void disarm_sock_keys(struct mem_cgroup *memcg)
602{
603 if (!memcg_proto_activated(&memcg->tcp_mem))
604 return;
605 static_key_slow_dec(&memcg_socket_limit_enabled);
606}
607#else
608static void disarm_sock_keys(struct mem_cgroup *memcg)
609{
610}
611#endif
612
613#ifdef CONFIG_MEMCG_KMEM
614/*
615 * This will be the memcg's index in each cache's ->memcg_params->memcg_caches.
616 * The main reason for not using cgroup id for this:
617 * this works better in sparse environments, where we have a lot of memcgs,
618 * but only a few kmem-limited. Or also, if we have, for instance, 200
619 * memcgs, and none but the 200th is kmem-limited, we'd have to have a
620 * 200 entry array for that.
621 *
622 * The current size of the caches array is stored in
623 * memcg_limited_groups_array_size. It will double each time we have to
624 * increase it.
625 */
626static DEFINE_IDA(kmem_limited_groups);
627int memcg_limited_groups_array_size;
628
629/*
630 * MIN_SIZE is different than 1, because we would like to avoid going through
631 * the alloc/free process all the time. In a small machine, 4 kmem-limited
632 * cgroups is a reasonable guess. In the future, it could be a parameter or
633 * tunable, but that is strictly not necessary.
634 *
635 * MAX_SIZE should be as large as the number of cgrp_ids. Ideally, we could get
636 * this constant directly from cgroup, but it is understandable that this is
637 * better kept as an internal representation in cgroup.c. In any case, the
638 * cgrp_id space is not getting any smaller, and we don't have to necessarily
639 * increase ours as well if it increases.
640 */
641#define MEMCG_CACHES_MIN_SIZE 4
642#define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX
643
644/*
645 * A lot of the calls to the cache allocation functions are expected to be
646 * inlined by the compiler. Since the calls to memcg_kmem_get_cache are
647 * conditional to this static branch, we'll have to allow modules that does
648 * kmem_cache_alloc and the such to see this symbol as well
649 */
650struct static_key memcg_kmem_enabled_key;
651EXPORT_SYMBOL(memcg_kmem_enabled_key);
652
653static void disarm_kmem_keys(struct mem_cgroup *memcg)
654{
655 if (memcg_kmem_is_active(memcg)) {
656 static_key_slow_dec(&memcg_kmem_enabled_key);
657 ida_simple_remove(&kmem_limited_groups, memcg->kmemcg_id);
658 }
659 /*
660 * This check can't live in kmem destruction function,
661 * since the charges will outlive the cgroup
662 */
663 WARN_ON(res_counter_read_u64(&memcg->kmem, RES_USAGE) != 0);
664}
665#else
666static void disarm_kmem_keys(struct mem_cgroup *memcg)
667{
668}
669#endif /* CONFIG_MEMCG_KMEM */
670
671static void disarm_static_keys(struct mem_cgroup *memcg)
672{
673 disarm_sock_keys(memcg);
674 disarm_kmem_keys(memcg);
675}
676
677static void drain_all_stock_async(struct mem_cgroup *memcg);
678
679static struct mem_cgroup_per_zone *
680mem_cgroup_zoneinfo(struct mem_cgroup *memcg, int nid, int zid)
681{
682 VM_BUG_ON((unsigned)nid >= nr_node_ids);
683 return &memcg->nodeinfo[nid]->zoneinfo[zid];
684}
685
686struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg)
687{
688 return &memcg->css;
689}
690
691static struct mem_cgroup_per_zone *
692page_cgroup_zoneinfo(struct mem_cgroup *memcg, struct page *page)
693{
694 int nid = page_to_nid(page);
695 int zid = page_zonenum(page);
696
697 return mem_cgroup_zoneinfo(memcg, nid, zid);
698}
699
700static struct mem_cgroup_tree_per_zone *
701soft_limit_tree_node_zone(int nid, int zid)
702{
703 return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
704}
705
706static struct mem_cgroup_tree_per_zone *
707soft_limit_tree_from_page(struct page *page)
708{
709 int nid = page_to_nid(page);
710 int zid = page_zonenum(page);
711
712 return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
713}
714
715static void
716__mem_cgroup_insert_exceeded(struct mem_cgroup *memcg,
717 struct mem_cgroup_per_zone *mz,
718 struct mem_cgroup_tree_per_zone *mctz,
719 unsigned long long new_usage_in_excess)
720{
721 struct rb_node **p = &mctz->rb_root.rb_node;
722 struct rb_node *parent = NULL;
723 struct mem_cgroup_per_zone *mz_node;
724
725 if (mz->on_tree)
726 return;
727
728 mz->usage_in_excess = new_usage_in_excess;
729 if (!mz->usage_in_excess)
730 return;
731 while (*p) {
732 parent = *p;
733 mz_node = rb_entry(parent, struct mem_cgroup_per_zone,
734 tree_node);
735 if (mz->usage_in_excess < mz_node->usage_in_excess)
736 p = &(*p)->rb_left;
737 /*
738 * We can't avoid mem cgroups that are over their soft
739 * limit by the same amount
740 */
741 else if (mz->usage_in_excess >= mz_node->usage_in_excess)
742 p = &(*p)->rb_right;
743 }
744 rb_link_node(&mz->tree_node, parent, p);
745 rb_insert_color(&mz->tree_node, &mctz->rb_root);
746 mz->on_tree = true;
747}
748
749static void
750__mem_cgroup_remove_exceeded(struct mem_cgroup *memcg,
751 struct mem_cgroup_per_zone *mz,
752 struct mem_cgroup_tree_per_zone *mctz)
753{
754 if (!mz->on_tree)
755 return;
756 rb_erase(&mz->tree_node, &mctz->rb_root);
757 mz->on_tree = false;
758}
759
760static void
761mem_cgroup_remove_exceeded(struct mem_cgroup *memcg,
762 struct mem_cgroup_per_zone *mz,
763 struct mem_cgroup_tree_per_zone *mctz)
764{
765 spin_lock(&mctz->lock);
766 __mem_cgroup_remove_exceeded(memcg, mz, mctz);
767 spin_unlock(&mctz->lock);
768}
769
770
771static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
772{
773 unsigned long long excess;
774 struct mem_cgroup_per_zone *mz;
775 struct mem_cgroup_tree_per_zone *mctz;
776 int nid = page_to_nid(page);
777 int zid = page_zonenum(page);
778 mctz = soft_limit_tree_from_page(page);
779
780 /*
781 * Necessary to update all ancestors when hierarchy is used.
782 * because their event counter is not touched.
783 */
784 for (; memcg; memcg = parent_mem_cgroup(memcg)) {
785 mz = mem_cgroup_zoneinfo(memcg, nid, zid);
786 excess = res_counter_soft_limit_excess(&memcg->res);
787 /*
788 * We have to update the tree if mz is on RB-tree or
789 * mem is over its softlimit.
790 */
791 if (excess || mz->on_tree) {
792 spin_lock(&mctz->lock);
793 /* if on-tree, remove it */
794 if (mz->on_tree)
795 __mem_cgroup_remove_exceeded(memcg, mz, mctz);
796 /*
797 * Insert again. mz->usage_in_excess will be updated.
798 * If excess is 0, no tree ops.
799 */
800 __mem_cgroup_insert_exceeded(memcg, mz, mctz, excess);
801 spin_unlock(&mctz->lock);
802 }
803 }
804}
805
806static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
807{
808 int node, zone;
809 struct mem_cgroup_per_zone *mz;
810 struct mem_cgroup_tree_per_zone *mctz;
811
812 for_each_node(node) {
813 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
814 mz = mem_cgroup_zoneinfo(memcg, node, zone);
815 mctz = soft_limit_tree_node_zone(node, zone);
816 mem_cgroup_remove_exceeded(memcg, mz, mctz);
817 }
818 }
819}
820
821static struct mem_cgroup_per_zone *
822__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
823{
824 struct rb_node *rightmost = NULL;
825 struct mem_cgroup_per_zone *mz;
826
827retry:
828 mz = NULL;
829 rightmost = rb_last(&mctz->rb_root);
830 if (!rightmost)
831 goto done; /* Nothing to reclaim from */
832
833 mz = rb_entry(rightmost, struct mem_cgroup_per_zone, tree_node);
834 /*
835 * Remove the node now but someone else can add it back,
836 * we will to add it back at the end of reclaim to its correct
837 * position in the tree.
838 */
839 __mem_cgroup_remove_exceeded(mz->memcg, mz, mctz);
840 if (!res_counter_soft_limit_excess(&mz->memcg->res) ||
841 !css_tryget(&mz->memcg->css))
842 goto retry;
843done:
844 return mz;
845}
846
847static struct mem_cgroup_per_zone *
848mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
849{
850 struct mem_cgroup_per_zone *mz;
851
852 spin_lock(&mctz->lock);
853 mz = __mem_cgroup_largest_soft_limit_node(mctz);
854 spin_unlock(&mctz->lock);
855 return mz;
856}
857
858/*
859 * Implementation Note: reading percpu statistics for memcg.
860 *
861 * Both of vmstat[] and percpu_counter has threshold and do periodic
862 * synchronization to implement "quick" read. There are trade-off between
863 * reading cost and precision of value. Then, we may have a chance to implement
864 * a periodic synchronizion of counter in memcg's counter.
865 *
866 * But this _read() function is used for user interface now. The user accounts
867 * memory usage by memory cgroup and he _always_ requires exact value because
868 * he accounts memory. Even if we provide quick-and-fuzzy read, we always
869 * have to visit all online cpus and make sum. So, for now, unnecessary
870 * synchronization is not implemented. (just implemented for cpu hotplug)
871 *
872 * If there are kernel internal actions which can make use of some not-exact
873 * value, and reading all cpu value can be performance bottleneck in some
874 * common workload, threashold and synchonization as vmstat[] should be
875 * implemented.
876 */
877static long mem_cgroup_read_stat(struct mem_cgroup *memcg,
878 enum mem_cgroup_stat_index idx)
879{
880 long val = 0;
881 int cpu;
882
883 get_online_cpus();
884 for_each_online_cpu(cpu)
885 val += per_cpu(memcg->stat->count[idx], cpu);
886#ifdef CONFIG_HOTPLUG_CPU
887 spin_lock(&memcg->pcp_counter_lock);
888 val += memcg->nocpu_base.count[idx];
889 spin_unlock(&memcg->pcp_counter_lock);
890#endif
891 put_online_cpus();
892 return val;
893}
894
895static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg,
896 bool charge)
897{
898 int val = (charge) ? 1 : -1;
899 this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_SWAP], val);
900}
901
902static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg,
903 enum mem_cgroup_events_index idx)
904{
905 unsigned long val = 0;
906 int cpu;
907
908 get_online_cpus();
909 for_each_online_cpu(cpu)
910 val += per_cpu(memcg->stat->events[idx], cpu);
911#ifdef CONFIG_HOTPLUG_CPU
912 spin_lock(&memcg->pcp_counter_lock);
913 val += memcg->nocpu_base.events[idx];
914 spin_unlock(&memcg->pcp_counter_lock);
915#endif
916 put_online_cpus();
917 return val;
918}
919
920static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
921 struct page *page,
922 bool anon, int nr_pages)
923{
924 /*
925 * Here, RSS means 'mapped anon' and anon's SwapCache. Shmem/tmpfs is
926 * counted as CACHE even if it's on ANON LRU.
927 */
928 if (anon)
929 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS],
930 nr_pages);
931 else
932 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE],
933 nr_pages);
934
935 if (PageTransHuge(page))
936 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE],
937 nr_pages);
938
939 /* pagein of a big page is an event. So, ignore page size */
940 if (nr_pages > 0)
941 __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGIN]);
942 else {
943 __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]);
944 nr_pages = -nr_pages; /* for event */
945 }
946
947 __this_cpu_add(memcg->stat->nr_page_events, nr_pages);
948}
949
950unsigned long
951mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
952{
953 struct mem_cgroup_per_zone *mz;
954
955 mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
956 return mz->lru_size[lru];
957}
958
959static unsigned long
960mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, int nid, int zid,
961 unsigned int lru_mask)
962{
963 struct mem_cgroup_per_zone *mz;
964 enum lru_list lru;
965 unsigned long ret = 0;
966
967 mz = mem_cgroup_zoneinfo(memcg, nid, zid);
968
969 for_each_lru(lru) {
970 if (BIT(lru) & lru_mask)
971 ret += mz->lru_size[lru];
972 }
973 return ret;
974}
975
976static unsigned long
977mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
978 int nid, unsigned int lru_mask)
979{
980 u64 total = 0;
981 int zid;
982
983 for (zid = 0; zid < MAX_NR_ZONES; zid++)
984 total += mem_cgroup_zone_nr_lru_pages(memcg,
985 nid, zid, lru_mask);
986
987 return total;
988}
989
990static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
991 unsigned int lru_mask)
992{
993 int nid;
994 u64 total = 0;
995
996 for_each_node_state(nid, N_MEMORY)
997 total += mem_cgroup_node_nr_lru_pages(memcg, nid, lru_mask);
998 return total;
999}
1000
1001static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
1002 enum mem_cgroup_events_target target)
1003{
1004 unsigned long val, next;
1005
1006 val = __this_cpu_read(memcg->stat->nr_page_events);
1007 next = __this_cpu_read(memcg->stat->targets[target]);
1008 /* from time_after() in jiffies.h */
1009 if ((long)next - (long)val < 0) {
1010 switch (target) {
1011 case MEM_CGROUP_TARGET_THRESH:
1012 next = val + THRESHOLDS_EVENTS_TARGET;
1013 break;
1014 case MEM_CGROUP_TARGET_SOFTLIMIT:
1015 next = val + SOFTLIMIT_EVENTS_TARGET;
1016 break;
1017 case MEM_CGROUP_TARGET_NUMAINFO:
1018 next = val + NUMAINFO_EVENTS_TARGET;
1019 break;
1020 default:
1021 break;
1022 }
1023 __this_cpu_write(memcg->stat->targets[target], next);
1024 return true;
1025 }
1026 return false;
1027}
1028
1029/*
1030 * Check events in order.
1031 *
1032 */
1033static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
1034{
1035 preempt_disable();
1036 /* threshold event is triggered in finer grain than soft limit */
1037 if (unlikely(mem_cgroup_event_ratelimit(memcg,
1038 MEM_CGROUP_TARGET_THRESH))) {
1039 bool do_softlimit;
1040 bool do_numainfo __maybe_unused;
1041
1042 do_softlimit = mem_cgroup_event_ratelimit(memcg,
1043 MEM_CGROUP_TARGET_SOFTLIMIT);
1044#if MAX_NUMNODES > 1
1045 do_numainfo = mem_cgroup_event_ratelimit(memcg,
1046 MEM_CGROUP_TARGET_NUMAINFO);
1047#endif
1048 preempt_enable();
1049
1050 mem_cgroup_threshold(memcg);
1051 if (unlikely(do_softlimit))
1052 mem_cgroup_update_tree(memcg, page);
1053#if MAX_NUMNODES > 1
1054 if (unlikely(do_numainfo))
1055 atomic_inc(&memcg->numainfo_events);
1056#endif
1057 } else
1058 preempt_enable();
1059}
1060
1061struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
1062{
1063 /*
1064 * mm_update_next_owner() may clear mm->owner to NULL
1065 * if it races with swapoff, page migration, etc.
1066 * So this can be called with p == NULL.
1067 */
1068 if (unlikely(!p))
1069 return NULL;
1070
1071 return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
1072}
1073
1074static struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
1075{
1076 struct mem_cgroup *memcg = NULL;
1077
1078 rcu_read_lock();
1079 do {
1080 /*
1081 * Page cache insertions can happen withou an
1082 * actual mm context, e.g. during disk probing
1083 * on boot, loopback IO, acct() writes etc.
1084 */
1085 if (unlikely(!mm))
1086 memcg = root_mem_cgroup;
1087 else {
1088 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1089 if (unlikely(!memcg))
1090 memcg = root_mem_cgroup;
1091 }
1092 } while (!css_tryget(&memcg->css));
1093 rcu_read_unlock();
1094 return memcg;
1095}
1096
1097/*
1098 * Returns a next (in a pre-order walk) alive memcg (with elevated css
1099 * ref. count) or NULL if the whole root's subtree has been visited.
1100 *
1101 * helper function to be used by mem_cgroup_iter
1102 */
1103static struct mem_cgroup *__mem_cgroup_iter_next(struct mem_cgroup *root,
1104 struct mem_cgroup *last_visited)
1105{
1106 struct cgroup_subsys_state *prev_css, *next_css;
1107
1108 prev_css = last_visited ? &last_visited->css : NULL;
1109skip_node:
1110 next_css = css_next_descendant_pre(prev_css, &root->css);
1111
1112 /*
1113 * Even if we found a group we have to make sure it is
1114 * alive. css && !memcg means that the groups should be
1115 * skipped and we should continue the tree walk.
1116 * last_visited css is safe to use because it is
1117 * protected by css_get and the tree walk is rcu safe.
1118 *
1119 * We do not take a reference on the root of the tree walk
1120 * because we might race with the root removal when it would
1121 * be the only node in the iterated hierarchy and mem_cgroup_iter
1122 * would end up in an endless loop because it expects that at
1123 * least one valid node will be returned. Root cannot disappear
1124 * because caller of the iterator should hold it already so
1125 * skipping css reference should be safe.
1126 */
1127 if (next_css) {
1128 if ((next_css == &root->css) ||
1129 ((next_css->flags & CSS_ONLINE) && css_tryget(next_css)))
1130 return mem_cgroup_from_css(next_css);
1131
1132 prev_css = next_css;
1133 goto skip_node;
1134 }
1135
1136 return NULL;
1137}
1138
1139static void mem_cgroup_iter_invalidate(struct mem_cgroup *root)
1140{
1141 /*
1142 * When a group in the hierarchy below root is destroyed, the
1143 * hierarchy iterator can no longer be trusted since it might
1144 * have pointed to the destroyed group. Invalidate it.
1145 */
1146 atomic_inc(&root->dead_count);
1147}
1148
1149static struct mem_cgroup *
1150mem_cgroup_iter_load(struct mem_cgroup_reclaim_iter *iter,
1151 struct mem_cgroup *root,
1152 int *sequence)
1153{
1154 struct mem_cgroup *position = NULL;
1155 /*
1156 * A cgroup destruction happens in two stages: offlining and
1157 * release. They are separated by a RCU grace period.
1158 *
1159 * If the iterator is valid, we may still race with an
1160 * offlining. The RCU lock ensures the object won't be
1161 * released, tryget will fail if we lost the race.
1162 */
1163 *sequence = atomic_read(&root->dead_count);
1164 if (iter->last_dead_count == *sequence) {
1165 smp_rmb();
1166 position = iter->last_visited;
1167
1168 /*
1169 * We cannot take a reference to root because we might race
1170 * with root removal and returning NULL would end up in
1171 * an endless loop on the iterator user level when root
1172 * would be returned all the time.
1173 */
1174 if (position && position != root &&
1175 !css_tryget(&position->css))
1176 position = NULL;
1177 }
1178 return position;
1179}
1180
1181static void mem_cgroup_iter_update(struct mem_cgroup_reclaim_iter *iter,
1182 struct mem_cgroup *last_visited,
1183 struct mem_cgroup *new_position,
1184 struct mem_cgroup *root,
1185 int sequence)
1186{
1187 /* root reference counting symmetric to mem_cgroup_iter_load */
1188 if (last_visited && last_visited != root)
1189 css_put(&last_visited->css);
1190 /*
1191 * We store the sequence count from the time @last_visited was
1192 * loaded successfully instead of rereading it here so that we
1193 * don't lose destruction events in between. We could have
1194 * raced with the destruction of @new_position after all.
1195 */
1196 iter->last_visited = new_position;
1197 smp_wmb();
1198 iter->last_dead_count = sequence;
1199}
1200
1201/**
1202 * mem_cgroup_iter - iterate over memory cgroup hierarchy
1203 * @root: hierarchy root
1204 * @prev: previously returned memcg, NULL on first invocation
1205 * @reclaim: cookie for shared reclaim walks, NULL for full walks
1206 *
1207 * Returns references to children of the hierarchy below @root, or
1208 * @root itself, or %NULL after a full round-trip.
1209 *
1210 * Caller must pass the return value in @prev on subsequent
1211 * invocations for reference counting, or use mem_cgroup_iter_break()
1212 * to cancel a hierarchy walk before the round-trip is complete.
1213 *
1214 * Reclaimers can specify a zone and a priority level in @reclaim to
1215 * divide up the memcgs in the hierarchy among all concurrent
1216 * reclaimers operating on the same zone and priority.
1217 */
1218struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
1219 struct mem_cgroup *prev,
1220 struct mem_cgroup_reclaim_cookie *reclaim)
1221{
1222 struct mem_cgroup *memcg = NULL;
1223 struct mem_cgroup *last_visited = NULL;
1224
1225 if (mem_cgroup_disabled())
1226 return NULL;
1227
1228 if (!root)
1229 root = root_mem_cgroup;
1230
1231 if (prev && !reclaim)
1232 last_visited = prev;
1233
1234 if (!root->use_hierarchy && root != root_mem_cgroup) {
1235 if (prev)
1236 goto out_css_put;
1237 return root;
1238 }
1239
1240 rcu_read_lock();
1241 while (!memcg) {
1242 struct mem_cgroup_reclaim_iter *uninitialized_var(iter);
1243 int uninitialized_var(seq);
1244
1245 if (reclaim) {
1246 int nid = zone_to_nid(reclaim->zone);
1247 int zid = zone_idx(reclaim->zone);
1248 struct mem_cgroup_per_zone *mz;
1249
1250 mz = mem_cgroup_zoneinfo(root, nid, zid);
1251 iter = &mz->reclaim_iter[reclaim->priority];
1252 if (prev && reclaim->generation != iter->generation) {
1253 iter->last_visited = NULL;
1254 goto out_unlock;
1255 }
1256
1257 last_visited = mem_cgroup_iter_load(iter, root, &seq);
1258 }
1259
1260 memcg = __mem_cgroup_iter_next(root, last_visited);
1261
1262 if (reclaim) {
1263 mem_cgroup_iter_update(iter, last_visited, memcg, root,
1264 seq);
1265
1266 if (!memcg)
1267 iter->generation++;
1268 else if (!prev && memcg)
1269 reclaim->generation = iter->generation;
1270 }
1271
1272 if (prev && !memcg)
1273 goto out_unlock;
1274 }
1275out_unlock:
1276 rcu_read_unlock();
1277out_css_put:
1278 if (prev && prev != root)
1279 css_put(&prev->css);
1280
1281 return memcg;
1282}
1283
1284/**
1285 * mem_cgroup_iter_break - abort a hierarchy walk prematurely
1286 * @root: hierarchy root
1287 * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
1288 */
1289void mem_cgroup_iter_break(struct mem_cgroup *root,
1290 struct mem_cgroup *prev)
1291{
1292 if (!root)
1293 root = root_mem_cgroup;
1294 if (prev && prev != root)
1295 css_put(&prev->css);
1296}
1297
1298/*
1299 * Iteration constructs for visiting all cgroups (under a tree). If
1300 * loops are exited prematurely (break), mem_cgroup_iter_break() must
1301 * be used for reference counting.
1302 */
1303#define for_each_mem_cgroup_tree(iter, root) \
1304 for (iter = mem_cgroup_iter(root, NULL, NULL); \
1305 iter != NULL; \
1306 iter = mem_cgroup_iter(root, iter, NULL))
1307
1308#define for_each_mem_cgroup(iter) \
1309 for (iter = mem_cgroup_iter(NULL, NULL, NULL); \
1310 iter != NULL; \
1311 iter = mem_cgroup_iter(NULL, iter, NULL))
1312
1313void __mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
1314{
1315 struct mem_cgroup *memcg;
1316
1317 rcu_read_lock();
1318 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1319 if (unlikely(!memcg))
1320 goto out;
1321
1322 switch (idx) {
1323 case PGFAULT:
1324 this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGFAULT]);
1325 break;
1326 case PGMAJFAULT:
1327 this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGMAJFAULT]);
1328 break;
1329 default:
1330 BUG();
1331 }
1332out:
1333 rcu_read_unlock();
1334}
1335EXPORT_SYMBOL(__mem_cgroup_count_vm_event);
1336
1337/**
1338 * mem_cgroup_zone_lruvec - get the lru list vector for a zone and memcg
1339 * @zone: zone of the wanted lruvec
1340 * @memcg: memcg of the wanted lruvec
1341 *
1342 * Returns the lru list vector holding pages for the given @zone and
1343 * @mem. This can be the global zone lruvec, if the memory controller
1344 * is disabled.
1345 */
1346struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
1347 struct mem_cgroup *memcg)
1348{
1349 struct mem_cgroup_per_zone *mz;
1350 struct lruvec *lruvec;
1351
1352 if (mem_cgroup_disabled()) {
1353 lruvec = &zone->lruvec;
1354 goto out;
1355 }
1356
1357 mz = mem_cgroup_zoneinfo(memcg, zone_to_nid(zone), zone_idx(zone));
1358 lruvec = &mz->lruvec;
1359out:
1360 /*
1361 * Since a node can be onlined after the mem_cgroup was created,
1362 * we have to be prepared to initialize lruvec->zone here;
1363 * and if offlined then reonlined, we need to reinitialize it.
1364 */
1365 if (unlikely(lruvec->zone != zone))
1366 lruvec->zone = zone;
1367 return lruvec;
1368}
1369
1370/*
1371 * Following LRU functions are allowed to be used without PCG_LOCK.
1372 * Operations are called by routine of global LRU independently from memcg.
1373 * What we have to take care of here is validness of pc->mem_cgroup.
1374 *
1375 * Changes to pc->mem_cgroup happens when
1376 * 1. charge
1377 * 2. moving account
1378 * In typical case, "charge" is done before add-to-lru. Exception is SwapCache.
1379 * It is added to LRU before charge.
1380 * If PCG_USED bit is not set, page_cgroup is not added to this private LRU.
1381 * When moving account, the page is not on LRU. It's isolated.
1382 */
1383
1384/**
1385 * mem_cgroup_page_lruvec - return lruvec for adding an lru page
1386 * @page: the page
1387 * @zone: zone of the page
1388 */
1389struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone)
1390{
1391 struct mem_cgroup_per_zone *mz;
1392 struct mem_cgroup *memcg;
1393 struct page_cgroup *pc;
1394 struct lruvec *lruvec;
1395
1396 if (mem_cgroup_disabled()) {
1397 lruvec = &zone->lruvec;
1398 goto out;
1399 }
1400
1401 pc = lookup_page_cgroup(page);
1402 memcg = pc->mem_cgroup;
1403
1404 /*
1405 * Surreptitiously switch any uncharged offlist page to root:
1406 * an uncharged page off lru does nothing to secure
1407 * its former mem_cgroup from sudden removal.
1408 *
1409 * Our caller holds lru_lock, and PageCgroupUsed is updated
1410 * under page_cgroup lock: between them, they make all uses
1411 * of pc->mem_cgroup safe.
1412 */
1413 if (!PageLRU(page) && !PageCgroupUsed(pc) && memcg != root_mem_cgroup)
1414 pc->mem_cgroup = memcg = root_mem_cgroup;
1415
1416 mz = page_cgroup_zoneinfo(memcg, page);
1417 lruvec = &mz->lruvec;
1418out:
1419 /*
1420 * Since a node can be onlined after the mem_cgroup was created,
1421 * we have to be prepared to initialize lruvec->zone here;
1422 * and if offlined then reonlined, we need to reinitialize it.
1423 */
1424 if (unlikely(lruvec->zone != zone))
1425 lruvec->zone = zone;
1426 return lruvec;
1427}
1428
1429/**
1430 * mem_cgroup_update_lru_size - account for adding or removing an lru page
1431 * @lruvec: mem_cgroup per zone lru vector
1432 * @lru: index of lru list the page is sitting on
1433 * @nr_pages: positive when adding or negative when removing
1434 *
1435 * This function must be called when a page is added to or removed from an
1436 * lru list.
1437 */
1438void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
1439 int nr_pages)
1440{
1441 struct mem_cgroup_per_zone *mz;
1442 unsigned long *lru_size;
1443
1444 if (mem_cgroup_disabled())
1445 return;
1446
1447 mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
1448 lru_size = mz->lru_size + lru;
1449 *lru_size += nr_pages;
1450 VM_BUG_ON((long)(*lru_size) < 0);
1451}
1452
1453/*
1454 * Checks whether given mem is same or in the root_mem_cgroup's
1455 * hierarchy subtree
1456 */
1457bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
1458 struct mem_cgroup *memcg)
1459{
1460 if (root_memcg == memcg)
1461 return true;
1462 if (!root_memcg->use_hierarchy || !memcg)
1463 return false;
1464 return cgroup_is_descendant(memcg->css.cgroup, root_memcg->css.cgroup);
1465}
1466
1467static bool mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
1468 struct mem_cgroup *memcg)
1469{
1470 bool ret;
1471
1472 rcu_read_lock();
1473 ret = __mem_cgroup_same_or_subtree(root_memcg, memcg);
1474 rcu_read_unlock();
1475 return ret;
1476}
1477
1478bool task_in_mem_cgroup(struct task_struct *task,
1479 const struct mem_cgroup *memcg)
1480{
1481 struct mem_cgroup *curr = NULL;
1482 struct task_struct *p;
1483 bool ret;
1484
1485 p = find_lock_task_mm(task);
1486 if (p) {
1487 curr = get_mem_cgroup_from_mm(p->mm);
1488 task_unlock(p);
1489 } else {
1490 /*
1491 * All threads may have already detached their mm's, but the oom
1492 * killer still needs to detect if they have already been oom
1493 * killed to prevent needlessly killing additional tasks.
1494 */
1495 rcu_read_lock();
1496 curr = mem_cgroup_from_task(task);
1497 if (curr)
1498 css_get(&curr->css);
1499 rcu_read_unlock();
1500 }
1501 /*
1502 * We should check use_hierarchy of "memcg" not "curr". Because checking
1503 * use_hierarchy of "curr" here make this function true if hierarchy is
1504 * enabled in "curr" and "curr" is a child of "memcg" in *cgroup*
1505 * hierarchy(even if use_hierarchy is disabled in "memcg").
1506 */
1507 ret = mem_cgroup_same_or_subtree(memcg, curr);
1508 css_put(&curr->css);
1509 return ret;
1510}
1511
1512int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec)
1513{
1514 unsigned long inactive_ratio;
1515 unsigned long inactive;
1516 unsigned long active;
1517 unsigned long gb;
1518
1519 inactive = mem_cgroup_get_lru_size(lruvec, LRU_INACTIVE_ANON);
1520 active = mem_cgroup_get_lru_size(lruvec, LRU_ACTIVE_ANON);
1521
1522 gb = (inactive + active) >> (30 - PAGE_SHIFT);
1523 if (gb)
1524 inactive_ratio = int_sqrt(10 * gb);
1525 else
1526 inactive_ratio = 1;
1527
1528 return inactive * inactive_ratio < active;
1529}
1530
1531#define mem_cgroup_from_res_counter(counter, member) \
1532 container_of(counter, struct mem_cgroup, member)
1533
1534/**
1535 * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1536 * @memcg: the memory cgroup
1537 *
1538 * Returns the maximum amount of memory @mem can be charged with, in
1539 * pages.
1540 */
1541static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
1542{
1543 unsigned long long margin;
1544
1545 margin = res_counter_margin(&memcg->res);
1546 if (do_swap_account)
1547 margin = min(margin, res_counter_margin(&memcg->memsw));
1548 return margin >> PAGE_SHIFT;
1549}
1550
1551int mem_cgroup_swappiness(struct mem_cgroup *memcg)
1552{
1553 /* root ? */
1554 if (!css_parent(&memcg->css))
1555 return vm_swappiness;
1556
1557 return memcg->swappiness;
1558}
1559
1560/*
1561 * memcg->moving_account is used for checking possibility that some thread is
1562 * calling move_account(). When a thread on CPU-A starts moving pages under
1563 * a memcg, other threads should check memcg->moving_account under
1564 * rcu_read_lock(), like this:
1565 *
1566 * CPU-A CPU-B
1567 * rcu_read_lock()
1568 * memcg->moving_account+1 if (memcg->mocing_account)
1569 * take heavy locks.
1570 * synchronize_rcu() update something.
1571 * rcu_read_unlock()
1572 * start move here.
1573 */
1574
1575/* for quick checking without looking up memcg */
1576atomic_t memcg_moving __read_mostly;
1577
1578static void mem_cgroup_start_move(struct mem_cgroup *memcg)
1579{
1580 atomic_inc(&memcg_moving);
1581 atomic_inc(&memcg->moving_account);
1582 synchronize_rcu();
1583}
1584
1585static void mem_cgroup_end_move(struct mem_cgroup *memcg)
1586{
1587 /*
1588 * Now, mem_cgroup_clear_mc() may call this function with NULL.
1589 * We check NULL in callee rather than caller.
1590 */
1591 if (memcg) {
1592 atomic_dec(&memcg_moving);
1593 atomic_dec(&memcg->moving_account);
1594 }
1595}
1596
1597/*
1598 * 2 routines for checking "mem" is under move_account() or not.
1599 *
1600 * mem_cgroup_stolen() - checking whether a cgroup is mc.from or not. This
1601 * is used for avoiding races in accounting. If true,
1602 * pc->mem_cgroup may be overwritten.
1603 *
1604 * mem_cgroup_under_move() - checking a cgroup is mc.from or mc.to or
1605 * under hierarchy of moving cgroups. This is for
1606 * waiting at hith-memory prressure caused by "move".
1607 */
1608
1609static bool mem_cgroup_stolen(struct mem_cgroup *memcg)
1610{
1611 VM_BUG_ON(!rcu_read_lock_held());
1612 return atomic_read(&memcg->moving_account) > 0;
1613}
1614
1615static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
1616{
1617 struct mem_cgroup *from;
1618 struct mem_cgroup *to;
1619 bool ret = false;
1620 /*
1621 * Unlike task_move routines, we access mc.to, mc.from not under
1622 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
1623 */
1624 spin_lock(&mc.lock);
1625 from = mc.from;
1626 to = mc.to;
1627 if (!from)
1628 goto unlock;
1629
1630 ret = mem_cgroup_same_or_subtree(memcg, from)
1631 || mem_cgroup_same_or_subtree(memcg, to);
1632unlock:
1633 spin_unlock(&mc.lock);
1634 return ret;
1635}
1636
1637static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
1638{
1639 if (mc.moving_task && current != mc.moving_task) {
1640 if (mem_cgroup_under_move(memcg)) {
1641 DEFINE_WAIT(wait);
1642 prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1643 /* moving charge context might have finished. */
1644 if (mc.moving_task)
1645 schedule();
1646 finish_wait(&mc.waitq, &wait);
1647 return true;
1648 }
1649 }
1650 return false;
1651}
1652
1653/*
1654 * Take this lock when
1655 * - a code tries to modify page's memcg while it's USED.
1656 * - a code tries to modify page state accounting in a memcg.
1657 * see mem_cgroup_stolen(), too.
1658 */
1659static void move_lock_mem_cgroup(struct mem_cgroup *memcg,
1660 unsigned long *flags)
1661{
1662 spin_lock_irqsave(&memcg->move_lock, *flags);
1663}
1664
1665static void move_unlock_mem_cgroup(struct mem_cgroup *memcg,
1666 unsigned long *flags)
1667{
1668 spin_unlock_irqrestore(&memcg->move_lock, *flags);
1669}
1670
1671#define K(x) ((x) << (PAGE_SHIFT-10))
1672/**
1673 * mem_cgroup_print_oom_info: Print OOM information relevant to memory controller.
1674 * @memcg: The memory cgroup that went over limit
1675 * @p: Task that is going to be killed
1676 *
1677 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1678 * enabled
1679 */
1680void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
1681{
1682 /* oom_info_lock ensures that parallel ooms do not interleave */
1683 static DEFINE_MUTEX(oom_info_lock);
1684 struct mem_cgroup *iter;
1685 unsigned int i;
1686
1687 if (!p)
1688 return;
1689
1690 mutex_lock(&oom_info_lock);
1691 rcu_read_lock();
1692
1693 pr_info("Task in ");
1694 pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
1695 pr_info(" killed as a result of limit of ");
1696 pr_cont_cgroup_path(memcg->css.cgroup);
1697 pr_info("\n");
1698
1699 rcu_read_unlock();
1700
1701 pr_info("memory: usage %llukB, limit %llukB, failcnt %llu\n",
1702 res_counter_read_u64(&memcg->res, RES_USAGE) >> 10,
1703 res_counter_read_u64(&memcg->res, RES_LIMIT) >> 10,
1704 res_counter_read_u64(&memcg->res, RES_FAILCNT));
1705 pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %llu\n",
1706 res_counter_read_u64(&memcg->memsw, RES_USAGE) >> 10,
1707 res_counter_read_u64(&memcg->memsw, RES_LIMIT) >> 10,
1708 res_counter_read_u64(&memcg->memsw, RES_FAILCNT));
1709 pr_info("kmem: usage %llukB, limit %llukB, failcnt %llu\n",
1710 res_counter_read_u64(&memcg->kmem, RES_USAGE) >> 10,
1711 res_counter_read_u64(&memcg->kmem, RES_LIMIT) >> 10,
1712 res_counter_read_u64(&memcg->kmem, RES_FAILCNT));
1713
1714 for_each_mem_cgroup_tree(iter, memcg) {
1715 pr_info("Memory cgroup stats for ");
1716 pr_cont_cgroup_path(iter->css.cgroup);
1717 pr_cont(":");
1718
1719 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
1720 if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
1721 continue;
1722 pr_cont(" %s:%ldKB", mem_cgroup_stat_names[i],
1723 K(mem_cgroup_read_stat(iter, i)));
1724 }
1725
1726 for (i = 0; i < NR_LRU_LISTS; i++)
1727 pr_cont(" %s:%luKB", mem_cgroup_lru_names[i],
1728 K(mem_cgroup_nr_lru_pages(iter, BIT(i))));
1729
1730 pr_cont("\n");
1731 }
1732 mutex_unlock(&oom_info_lock);
1733}
1734
1735/*
1736 * This function returns the number of memcg under hierarchy tree. Returns
1737 * 1(self count) if no children.
1738 */
1739static int mem_cgroup_count_children(struct mem_cgroup *memcg)
1740{
1741 int num = 0;
1742 struct mem_cgroup *iter;
1743
1744 for_each_mem_cgroup_tree(iter, memcg)
1745 num++;
1746 return num;
1747}
1748
1749/*
1750 * Return the memory (and swap, if configured) limit for a memcg.
1751 */
1752static u64 mem_cgroup_get_limit(struct mem_cgroup *memcg)
1753{
1754 u64 limit;
1755
1756 limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
1757
1758 /*
1759 * Do not consider swap space if we cannot swap due to swappiness
1760 */
1761 if (mem_cgroup_swappiness(memcg)) {
1762 u64 memsw;
1763
1764 limit += total_swap_pages << PAGE_SHIFT;
1765 memsw = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
1766
1767 /*
1768 * If memsw is finite and limits the amount of swap space
1769 * available to this memcg, return that limit.
1770 */
1771 limit = min(limit, memsw);
1772 }
1773
1774 return limit;
1775}
1776
1777static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
1778 int order)
1779{
1780 struct mem_cgroup *iter;
1781 unsigned long chosen_points = 0;
1782 unsigned long totalpages;
1783 unsigned int points = 0;
1784 struct task_struct *chosen = NULL;
1785
1786 /*
1787 * If current has a pending SIGKILL or is exiting, then automatically
1788 * select it. The goal is to allow it to allocate so that it may
1789 * quickly exit and free its memory.
1790 */
1791 if (fatal_signal_pending(current) || current->flags & PF_EXITING) {
1792 set_thread_flag(TIF_MEMDIE);
1793 return;
1794 }
1795
1796 check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, order, NULL);
1797 totalpages = mem_cgroup_get_limit(memcg) >> PAGE_SHIFT ? : 1;
1798 for_each_mem_cgroup_tree(iter, memcg) {
1799 struct css_task_iter it;
1800 struct task_struct *task;
1801
1802 css_task_iter_start(&iter->css, &it);
1803 while ((task = css_task_iter_next(&it))) {
1804 switch (oom_scan_process_thread(task, totalpages, NULL,
1805 false)) {
1806 case OOM_SCAN_SELECT:
1807 if (chosen)
1808 put_task_struct(chosen);
1809 chosen = task;
1810 chosen_points = ULONG_MAX;
1811 get_task_struct(chosen);
1812 /* fall through */
1813 case OOM_SCAN_CONTINUE:
1814 continue;
1815 case OOM_SCAN_ABORT:
1816 css_task_iter_end(&it);
1817 mem_cgroup_iter_break(memcg, iter);
1818 if (chosen)
1819 put_task_struct(chosen);
1820 return;
1821 case OOM_SCAN_OK:
1822 break;
1823 };
1824 points = oom_badness(task, memcg, NULL, totalpages);
1825 if (!points || points < chosen_points)
1826 continue;
1827 /* Prefer thread group leaders for display purposes */
1828 if (points == chosen_points &&
1829 thread_group_leader(chosen))
1830 continue;
1831
1832 if (chosen)
1833 put_task_struct(chosen);
1834 chosen = task;
1835 chosen_points = points;
1836 get_task_struct(chosen);
1837 }
1838 css_task_iter_end(&it);
1839 }
1840
1841 if (!chosen)
1842 return;
1843 points = chosen_points * 1000 / totalpages;
1844 oom_kill_process(chosen, gfp_mask, order, points, totalpages, memcg,
1845 NULL, "Memory cgroup out of memory");
1846}
1847
1848static unsigned long mem_cgroup_reclaim(struct mem_cgroup *memcg,
1849 gfp_t gfp_mask,
1850 unsigned long flags)
1851{
1852 unsigned long total = 0;
1853 bool noswap = false;
1854 int loop;
1855
1856 if (flags & MEM_CGROUP_RECLAIM_NOSWAP)
1857 noswap = true;
1858 if (!(flags & MEM_CGROUP_RECLAIM_SHRINK) && memcg->memsw_is_minimum)
1859 noswap = true;
1860
1861 for (loop = 0; loop < MEM_CGROUP_MAX_RECLAIM_LOOPS; loop++) {
1862 if (loop)
1863 drain_all_stock_async(memcg);
1864 total += try_to_free_mem_cgroup_pages(memcg, gfp_mask, noswap);
1865 /*
1866 * Allow limit shrinkers, which are triggered directly
1867 * by userspace, to catch signals and stop reclaim
1868 * after minimal progress, regardless of the margin.
1869 */
1870 if (total && (flags & MEM_CGROUP_RECLAIM_SHRINK))
1871 break;
1872 if (mem_cgroup_margin(memcg))
1873 break;
1874 /*
1875 * If nothing was reclaimed after two attempts, there
1876 * may be no reclaimable pages in this hierarchy.
1877 */
1878 if (loop && !total)
1879 break;
1880 }
1881 return total;
1882}
1883
1884/**
1885 * test_mem_cgroup_node_reclaimable
1886 * @memcg: the target memcg
1887 * @nid: the node ID to be checked.
1888 * @noswap : specify true here if the user wants flle only information.
1889 *
1890 * This function returns whether the specified memcg contains any
1891 * reclaimable pages on a node. Returns true if there are any reclaimable
1892 * pages in the node.
1893 */
1894static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg,
1895 int nid, bool noswap)
1896{
1897 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_FILE))
1898 return true;
1899 if (noswap || !total_swap_pages)
1900 return false;
1901 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_ANON))
1902 return true;
1903 return false;
1904
1905}
1906#if MAX_NUMNODES > 1
1907
1908/*
1909 * Always updating the nodemask is not very good - even if we have an empty
1910 * list or the wrong list here, we can start from some node and traverse all
1911 * nodes based on the zonelist. So update the list loosely once per 10 secs.
1912 *
1913 */
1914static void mem_cgroup_may_update_nodemask(struct mem_cgroup *memcg)
1915{
1916 int nid;
1917 /*
1918 * numainfo_events > 0 means there was at least NUMAINFO_EVENTS_TARGET
1919 * pagein/pageout changes since the last update.
1920 */
1921 if (!atomic_read(&memcg->numainfo_events))
1922 return;
1923 if (atomic_inc_return(&memcg->numainfo_updating) > 1)
1924 return;
1925
1926 /* make a nodemask where this memcg uses memory from */
1927 memcg->scan_nodes = node_states[N_MEMORY];
1928
1929 for_each_node_mask(nid, node_states[N_MEMORY]) {
1930
1931 if (!test_mem_cgroup_node_reclaimable(memcg, nid, false))
1932 node_clear(nid, memcg->scan_nodes);
1933 }
1934
1935 atomic_set(&memcg->numainfo_events, 0);
1936 atomic_set(&memcg->numainfo_updating, 0);
1937}
1938
1939/*
1940 * Selecting a node where we start reclaim from. Because what we need is just
1941 * reducing usage counter, start from anywhere is O,K. Considering
1942 * memory reclaim from current node, there are pros. and cons.
1943 *
1944 * Freeing memory from current node means freeing memory from a node which
1945 * we'll use or we've used. So, it may make LRU bad. And if several threads
1946 * hit limits, it will see a contention on a node. But freeing from remote
1947 * node means more costs for memory reclaim because of memory latency.
1948 *
1949 * Now, we use round-robin. Better algorithm is welcomed.
1950 */
1951int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
1952{
1953 int node;
1954
1955 mem_cgroup_may_update_nodemask(memcg);
1956 node = memcg->last_scanned_node;
1957
1958 node = next_node(node, memcg->scan_nodes);
1959 if (node == MAX_NUMNODES)
1960 node = first_node(memcg->scan_nodes);
1961 /*
1962 * We call this when we hit limit, not when pages are added to LRU.
1963 * No LRU may hold pages because all pages are UNEVICTABLE or
1964 * memcg is too small and all pages are not on LRU. In that case,
1965 * we use curret node.
1966 */
1967 if (unlikely(node == MAX_NUMNODES))
1968 node = numa_node_id();
1969
1970 memcg->last_scanned_node = node;
1971 return node;
1972}
1973
1974/*
1975 * Check all nodes whether it contains reclaimable pages or not.
1976 * For quick scan, we make use of scan_nodes. This will allow us to skip
1977 * unused nodes. But scan_nodes is lazily updated and may not cotain
1978 * enough new information. We need to do double check.
1979 */
1980static bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
1981{
1982 int nid;
1983
1984 /*
1985 * quick check...making use of scan_node.
1986 * We can skip unused nodes.
1987 */
1988 if (!nodes_empty(memcg->scan_nodes)) {
1989 for (nid = first_node(memcg->scan_nodes);
1990 nid < MAX_NUMNODES;
1991 nid = next_node(nid, memcg->scan_nodes)) {
1992
1993 if (test_mem_cgroup_node_reclaimable(memcg, nid, noswap))
1994 return true;
1995 }
1996 }
1997 /*
1998 * Check rest of nodes.
1999 */
2000 for_each_node_state(nid, N_MEMORY) {
2001 if (node_isset(nid, memcg->scan_nodes))
2002 continue;
2003 if (test_mem_cgroup_node_reclaimable(memcg, nid, noswap))
2004 return true;
2005 }
2006 return false;
2007}
2008
2009#else
2010int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
2011{
2012 return 0;
2013}
2014
2015static bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
2016{
2017 return test_mem_cgroup_node_reclaimable(memcg, 0, noswap);
2018}
2019#endif
2020
2021static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
2022 struct zone *zone,
2023 gfp_t gfp_mask,
2024 unsigned long *total_scanned)
2025{
2026 struct mem_cgroup *victim = NULL;
2027 int total = 0;
2028 int loop = 0;
2029 unsigned long excess;
2030 unsigned long nr_scanned;
2031 struct mem_cgroup_reclaim_cookie reclaim = {
2032 .zone = zone,
2033 .priority = 0,
2034 };
2035
2036 excess = res_counter_soft_limit_excess(&root_memcg->res) >> PAGE_SHIFT;
2037
2038 while (1) {
2039 victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
2040 if (!victim) {
2041 loop++;
2042 if (loop >= 2) {
2043 /*
2044 * If we have not been able to reclaim
2045 * anything, it might because there are
2046 * no reclaimable pages under this hierarchy
2047 */
2048 if (!total)
2049 break;
2050 /*
2051 * We want to do more targeted reclaim.
2052 * excess >> 2 is not to excessive so as to
2053 * reclaim too much, nor too less that we keep
2054 * coming back to reclaim from this cgroup
2055 */
2056 if (total >= (excess >> 2) ||
2057 (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
2058 break;
2059 }
2060 continue;
2061 }
2062 if (!mem_cgroup_reclaimable(victim, false))
2063 continue;
2064 total += mem_cgroup_shrink_node_zone(victim, gfp_mask, false,
2065 zone, &nr_scanned);
2066 *total_scanned += nr_scanned;
2067 if (!res_counter_soft_limit_excess(&root_memcg->res))
2068 break;
2069 }
2070 mem_cgroup_iter_break(root_memcg, victim);
2071 return total;
2072}
2073
2074#ifdef CONFIG_LOCKDEP
2075static struct lockdep_map memcg_oom_lock_dep_map = {
2076 .name = "memcg_oom_lock",
2077};
2078#endif
2079
2080static DEFINE_SPINLOCK(memcg_oom_lock);
2081
2082/*
2083 * Check OOM-Killer is already running under our hierarchy.
2084 * If someone is running, return false.
2085 */
2086static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
2087{
2088 struct mem_cgroup *iter, *failed = NULL;
2089
2090 spin_lock(&memcg_oom_lock);
2091
2092 for_each_mem_cgroup_tree(iter, memcg) {
2093 if (iter->oom_lock) {
2094 /*
2095 * this subtree of our hierarchy is already locked
2096 * so we cannot give a lock.
2097 */
2098 failed = iter;
2099 mem_cgroup_iter_break(memcg, iter);
2100 break;
2101 } else
2102 iter->oom_lock = true;
2103 }
2104
2105 if (failed) {
2106 /*
2107 * OK, we failed to lock the whole subtree so we have
2108 * to clean up what we set up to the failing subtree
2109 */
2110 for_each_mem_cgroup_tree(iter, memcg) {
2111 if (iter == failed) {
2112 mem_cgroup_iter_break(memcg, iter);
2113 break;
2114 }
2115 iter->oom_lock = false;
2116 }
2117 } else
2118 mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_);
2119
2120 spin_unlock(&memcg_oom_lock);
2121
2122 return !failed;
2123}
2124
2125static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
2126{
2127 struct mem_cgroup *iter;
2128
2129 spin_lock(&memcg_oom_lock);
2130 mutex_release(&memcg_oom_lock_dep_map, 1, _RET_IP_);
2131 for_each_mem_cgroup_tree(iter, memcg)
2132 iter->oom_lock = false;
2133 spin_unlock(&memcg_oom_lock);
2134}
2135
2136static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
2137{
2138 struct mem_cgroup *iter;
2139
2140 for_each_mem_cgroup_tree(iter, memcg)
2141 atomic_inc(&iter->under_oom);
2142}
2143
2144static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
2145{
2146 struct mem_cgroup *iter;
2147
2148 /*
2149 * When a new child is created while the hierarchy is under oom,
2150 * mem_cgroup_oom_lock() may not be called. We have to use
2151 * atomic_add_unless() here.
2152 */
2153 for_each_mem_cgroup_tree(iter, memcg)
2154 atomic_add_unless(&iter->under_oom, -1, 0);
2155}
2156
2157static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
2158
2159struct oom_wait_info {
2160 struct mem_cgroup *memcg;
2161 wait_queue_t wait;
2162};
2163
2164static int memcg_oom_wake_function(wait_queue_t *wait,
2165 unsigned mode, int sync, void *arg)
2166{
2167 struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
2168 struct mem_cgroup *oom_wait_memcg;
2169 struct oom_wait_info *oom_wait_info;
2170
2171 oom_wait_info = container_of(wait, struct oom_wait_info, wait);
2172 oom_wait_memcg = oom_wait_info->memcg;
2173
2174 /*
2175 * Both of oom_wait_info->memcg and wake_memcg are stable under us.
2176 * Then we can use css_is_ancestor without taking care of RCU.
2177 */
2178 if (!mem_cgroup_same_or_subtree(oom_wait_memcg, wake_memcg)
2179 && !mem_cgroup_same_or_subtree(wake_memcg, oom_wait_memcg))
2180 return 0;
2181 return autoremove_wake_function(wait, mode, sync, arg);
2182}
2183
2184static void memcg_wakeup_oom(struct mem_cgroup *memcg)
2185{
2186 atomic_inc(&memcg->oom_wakeups);
2187 /* for filtering, pass "memcg" as argument. */
2188 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
2189}
2190
2191static void memcg_oom_recover(struct mem_cgroup *memcg)
2192{
2193 if (memcg && atomic_read(&memcg->under_oom))
2194 memcg_wakeup_oom(memcg);
2195}
2196
2197static void mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
2198{
2199 if (!current->memcg_oom.may_oom)
2200 return;
2201 /*
2202 * We are in the middle of the charge context here, so we
2203 * don't want to block when potentially sitting on a callstack
2204 * that holds all kinds of filesystem and mm locks.
2205 *
2206 * Also, the caller may handle a failed allocation gracefully
2207 * (like optional page cache readahead) and so an OOM killer
2208 * invocation might not even be necessary.
2209 *
2210 * That's why we don't do anything here except remember the
2211 * OOM context and then deal with it at the end of the page
2212 * fault when the stack is unwound, the locks are released,
2213 * and when we know whether the fault was overall successful.
2214 */
2215 css_get(&memcg->css);
2216 current->memcg_oom.memcg = memcg;
2217 current->memcg_oom.gfp_mask = mask;
2218 current->memcg_oom.order = order;
2219}
2220
2221/**
2222 * mem_cgroup_oom_synchronize - complete memcg OOM handling
2223 * @handle: actually kill/wait or just clean up the OOM state
2224 *
2225 * This has to be called at the end of a page fault if the memcg OOM
2226 * handler was enabled.
2227 *
2228 * Memcg supports userspace OOM handling where failed allocations must
2229 * sleep on a waitqueue until the userspace task resolves the
2230 * situation. Sleeping directly in the charge context with all kinds
2231 * of locks held is not a good idea, instead we remember an OOM state
2232 * in the task and mem_cgroup_oom_synchronize() has to be called at
2233 * the end of the page fault to complete the OOM handling.
2234 *
2235 * Returns %true if an ongoing memcg OOM situation was detected and
2236 * completed, %false otherwise.
2237 */
2238bool mem_cgroup_oom_synchronize(bool handle)
2239{
2240 struct mem_cgroup *memcg = current->memcg_oom.memcg;
2241 struct oom_wait_info owait;
2242 bool locked;
2243
2244 /* OOM is global, do not handle */
2245 if (!memcg)
2246 return false;
2247
2248 if (!handle)
2249 goto cleanup;
2250
2251 owait.memcg = memcg;
2252 owait.wait.flags = 0;
2253 owait.wait.func = memcg_oom_wake_function;
2254 owait.wait.private = current;
2255 INIT_LIST_HEAD(&owait.wait.task_list);
2256
2257 prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
2258 mem_cgroup_mark_under_oom(memcg);
2259
2260 locked = mem_cgroup_oom_trylock(memcg);
2261
2262 if (locked)
2263 mem_cgroup_oom_notify(memcg);
2264
2265 if (locked && !memcg->oom_kill_disable) {
2266 mem_cgroup_unmark_under_oom(memcg);
2267 finish_wait(&memcg_oom_waitq, &owait.wait);
2268 mem_cgroup_out_of_memory(memcg, current->memcg_oom.gfp_mask,
2269 current->memcg_oom.order);
2270 } else {
2271 schedule();
2272 mem_cgroup_unmark_under_oom(memcg);
2273 finish_wait(&memcg_oom_waitq, &owait.wait);
2274 }
2275
2276 if (locked) {
2277 mem_cgroup_oom_unlock(memcg);
2278 /*
2279 * There is no guarantee that an OOM-lock contender
2280 * sees the wakeups triggered by the OOM kill
2281 * uncharges. Wake any sleepers explicitely.
2282 */
2283 memcg_oom_recover(memcg);
2284 }
2285cleanup:
2286 current->memcg_oom.memcg = NULL;
2287 css_put(&memcg->css);
2288 return true;
2289}
2290
2291/*
2292 * Currently used to update mapped file statistics, but the routine can be
2293 * generalized to update other statistics as well.
2294 *
2295 * Notes: Race condition
2296 *
2297 * We usually use page_cgroup_lock() for accessing page_cgroup member but
2298 * it tends to be costly. But considering some conditions, we doesn't need
2299 * to do so _always_.
2300 *
2301 * Considering "charge", lock_page_cgroup() is not required because all
2302 * file-stat operations happen after a page is attached to radix-tree. There
2303 * are no race with "charge".
2304 *
2305 * Considering "uncharge", we know that memcg doesn't clear pc->mem_cgroup
2306 * at "uncharge" intentionally. So, we always see valid pc->mem_cgroup even
2307 * if there are race with "uncharge". Statistics itself is properly handled
2308 * by flags.
2309 *
2310 * Considering "move", this is an only case we see a race. To make the race
2311 * small, we check mm->moving_account and detect there are possibility of race
2312 * If there is, we take a lock.
2313 */
2314
2315void __mem_cgroup_begin_update_page_stat(struct page *page,
2316 bool *locked, unsigned long *flags)
2317{
2318 struct mem_cgroup *memcg;
2319 struct page_cgroup *pc;
2320
2321 pc = lookup_page_cgroup(page);
2322again:
2323 memcg = pc->mem_cgroup;
2324 if (unlikely(!memcg || !PageCgroupUsed(pc)))
2325 return;
2326 /*
2327 * If this memory cgroup is not under account moving, we don't
2328 * need to take move_lock_mem_cgroup(). Because we already hold
2329 * rcu_read_lock(), any calls to move_account will be delayed until
2330 * rcu_read_unlock() if mem_cgroup_stolen() == true.
2331 */
2332 if (!mem_cgroup_stolen(memcg))
2333 return;
2334
2335 move_lock_mem_cgroup(memcg, flags);
2336 if (memcg != pc->mem_cgroup || !PageCgroupUsed(pc)) {
2337 move_unlock_mem_cgroup(memcg, flags);
2338 goto again;
2339 }
2340 *locked = true;
2341}
2342
2343void __mem_cgroup_end_update_page_stat(struct page *page, unsigned long *flags)
2344{
2345 struct page_cgroup *pc = lookup_page_cgroup(page);
2346
2347 /*
2348 * It's guaranteed that pc->mem_cgroup never changes while
2349 * lock is held because a routine modifies pc->mem_cgroup
2350 * should take move_lock_mem_cgroup().
2351 */
2352 move_unlock_mem_cgroup(pc->mem_cgroup, flags);
2353}
2354
2355void mem_cgroup_update_page_stat(struct page *page,
2356 enum mem_cgroup_stat_index idx, int val)
2357{
2358 struct mem_cgroup *memcg;
2359 struct page_cgroup *pc = lookup_page_cgroup(page);
2360 unsigned long uninitialized_var(flags);
2361
2362 if (mem_cgroup_disabled())
2363 return;
2364
2365 VM_BUG_ON(!rcu_read_lock_held());
2366 memcg = pc->mem_cgroup;
2367 if (unlikely(!memcg || !PageCgroupUsed(pc)))
2368 return;
2369
2370 this_cpu_add(memcg->stat->count[idx], val);
2371}
2372
2373/*
2374 * size of first charge trial. "32" comes from vmscan.c's magic value.
2375 * TODO: maybe necessary to use big numbers in big irons.
2376 */
2377#define CHARGE_BATCH 32U
2378struct memcg_stock_pcp {
2379 struct mem_cgroup *cached; /* this never be root cgroup */
2380 unsigned int nr_pages;
2381 struct work_struct work;
2382 unsigned long flags;
2383#define FLUSHING_CACHED_CHARGE 0
2384};
2385static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
2386static DEFINE_MUTEX(percpu_charge_mutex);
2387
2388/**
2389 * consume_stock: Try to consume stocked charge on this cpu.
2390 * @memcg: memcg to consume from.
2391 * @nr_pages: how many pages to charge.
2392 *
2393 * The charges will only happen if @memcg matches the current cpu's memcg
2394 * stock, and at least @nr_pages are available in that stock. Failure to
2395 * service an allocation will refill the stock.
2396 *
2397 * returns true if successful, false otherwise.
2398 */
2399static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2400{
2401 struct memcg_stock_pcp *stock;
2402 bool ret = true;
2403
2404 if (nr_pages > CHARGE_BATCH)
2405 return false;
2406
2407 stock = &get_cpu_var(memcg_stock);
2408 if (memcg == stock->cached && stock->nr_pages >= nr_pages)
2409 stock->nr_pages -= nr_pages;
2410 else /* need to call res_counter_charge */
2411 ret = false;
2412 put_cpu_var(memcg_stock);
2413 return ret;
2414}
2415
2416/*
2417 * Returns stocks cached in percpu to res_counter and reset cached information.
2418 */
2419static void drain_stock(struct memcg_stock_pcp *stock)
2420{
2421 struct mem_cgroup *old = stock->cached;
2422
2423 if (stock->nr_pages) {
2424 unsigned long bytes = stock->nr_pages * PAGE_SIZE;
2425
2426 res_counter_uncharge(&old->res, bytes);
2427 if (do_swap_account)
2428 res_counter_uncharge(&old->memsw, bytes);
2429 stock->nr_pages = 0;
2430 }
2431 stock->cached = NULL;
2432}
2433
2434/*
2435 * This must be called under preempt disabled or must be called by
2436 * a thread which is pinned to local cpu.
2437 */
2438static void drain_local_stock(struct work_struct *dummy)
2439{
2440 struct memcg_stock_pcp *stock = &__get_cpu_var(memcg_stock);
2441 drain_stock(stock);
2442 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
2443}
2444
2445static void __init memcg_stock_init(void)
2446{
2447 int cpu;
2448
2449 for_each_possible_cpu(cpu) {
2450 struct memcg_stock_pcp *stock =
2451 &per_cpu(memcg_stock, cpu);
2452 INIT_WORK(&stock->work, drain_local_stock);
2453 }
2454}
2455
2456/*
2457 * Cache charges(val) which is from res_counter, to local per_cpu area.
2458 * This will be consumed by consume_stock() function, later.
2459 */
2460static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2461{
2462 struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock);
2463
2464 if (stock->cached != memcg) { /* reset if necessary */
2465 drain_stock(stock);
2466 stock->cached = memcg;
2467 }
2468 stock->nr_pages += nr_pages;
2469 put_cpu_var(memcg_stock);
2470}
2471
2472/*
2473 * Drains all per-CPU charge caches for given root_memcg resp. subtree
2474 * of the hierarchy under it. sync flag says whether we should block
2475 * until the work is done.
2476 */
2477static void drain_all_stock(struct mem_cgroup *root_memcg, bool sync)
2478{
2479 int cpu, curcpu;
2480
2481 /* Notify other cpus that system-wide "drain" is running */
2482 get_online_cpus();
2483 curcpu = get_cpu();
2484 for_each_online_cpu(cpu) {
2485 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
2486 struct mem_cgroup *memcg;
2487
2488 memcg = stock->cached;
2489 if (!memcg || !stock->nr_pages)
2490 continue;
2491 if (!mem_cgroup_same_or_subtree(root_memcg, memcg))
2492 continue;
2493 if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
2494 if (cpu == curcpu)
2495 drain_local_stock(&stock->work);
2496 else
2497 schedule_work_on(cpu, &stock->work);
2498 }
2499 }
2500 put_cpu();
2501
2502 if (!sync)
2503 goto out;
2504
2505 for_each_online_cpu(cpu) {
2506 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
2507 if (test_bit(FLUSHING_CACHED_CHARGE, &stock->flags))
2508 flush_work(&stock->work);
2509 }
2510out:
2511 put_online_cpus();
2512}
2513
2514/*
2515 * Tries to drain stocked charges in other cpus. This function is asynchronous
2516 * and just put a work per cpu for draining localy on each cpu. Caller can
2517 * expects some charges will be back to res_counter later but cannot wait for
2518 * it.
2519 */
2520static void drain_all_stock_async(struct mem_cgroup *root_memcg)
2521{
2522 /*
2523 * If someone calls draining, avoid adding more kworker runs.
2524 */
2525 if (!mutex_trylock(&percpu_charge_mutex))
2526 return;
2527 drain_all_stock(root_memcg, false);
2528 mutex_unlock(&percpu_charge_mutex);
2529}
2530
2531/* This is a synchronous drain interface. */
2532static void drain_all_stock_sync(struct mem_cgroup *root_memcg)
2533{
2534 /* called when force_empty is called */
2535 mutex_lock(&percpu_charge_mutex);
2536 drain_all_stock(root_memcg, true);
2537 mutex_unlock(&percpu_charge_mutex);
2538}
2539
2540/*
2541 * This function drains percpu counter value from DEAD cpu and
2542 * move it to local cpu. Note that this function can be preempted.
2543 */
2544static void mem_cgroup_drain_pcp_counter(struct mem_cgroup *memcg, int cpu)
2545{
2546 int i;
2547
2548 spin_lock(&memcg->pcp_counter_lock);
2549 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
2550 long x = per_cpu(memcg->stat->count[i], cpu);
2551
2552 per_cpu(memcg->stat->count[i], cpu) = 0;
2553 memcg->nocpu_base.count[i] += x;
2554 }
2555 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
2556 unsigned long x = per_cpu(memcg->stat->events[i], cpu);
2557
2558 per_cpu(memcg->stat->events[i], cpu) = 0;
2559 memcg->nocpu_base.events[i] += x;
2560 }
2561 spin_unlock(&memcg->pcp_counter_lock);
2562}
2563
2564static int memcg_cpu_hotplug_callback(struct notifier_block *nb,
2565 unsigned long action,
2566 void *hcpu)
2567{
2568 int cpu = (unsigned long)hcpu;
2569 struct memcg_stock_pcp *stock;
2570 struct mem_cgroup *iter;
2571
2572 if (action == CPU_ONLINE)
2573 return NOTIFY_OK;
2574
2575 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
2576 return NOTIFY_OK;
2577
2578 for_each_mem_cgroup(iter)
2579 mem_cgroup_drain_pcp_counter(iter, cpu);
2580
2581 stock = &per_cpu(memcg_stock, cpu);
2582 drain_stock(stock);
2583 return NOTIFY_OK;
2584}
2585
2586
2587/* See mem_cgroup_try_charge() for details */
2588enum {
2589 CHARGE_OK, /* success */
2590 CHARGE_RETRY, /* need to retry but retry is not bad */
2591 CHARGE_NOMEM, /* we can't do more. return -ENOMEM */
2592 CHARGE_WOULDBLOCK, /* GFP_WAIT wasn't set and no enough res. */
2593};
2594
2595static int mem_cgroup_do_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
2596 unsigned int nr_pages, unsigned int min_pages,
2597 bool invoke_oom)
2598{
2599 unsigned long csize = nr_pages * PAGE_SIZE;
2600 struct mem_cgroup *mem_over_limit;
2601 struct res_counter *fail_res;
2602 unsigned long flags = 0;
2603 int ret;
2604
2605 ret = res_counter_charge(&memcg->res, csize, &fail_res);
2606
2607 if (likely(!ret)) {
2608 if (!do_swap_account)
2609 return CHARGE_OK;
2610 ret = res_counter_charge(&memcg->memsw, csize, &fail_res);
2611 if (likely(!ret))
2612 return CHARGE_OK;
2613
2614 res_counter_uncharge(&memcg->res, csize);
2615 mem_over_limit = mem_cgroup_from_res_counter(fail_res, memsw);
2616 flags |= MEM_CGROUP_RECLAIM_NOSWAP;
2617 } else
2618 mem_over_limit = mem_cgroup_from_res_counter(fail_res, res);
2619 /*
2620 * Never reclaim on behalf of optional batching, retry with a
2621 * single page instead.
2622 */
2623 if (nr_pages > min_pages)
2624 return CHARGE_RETRY;
2625
2626 if (!(gfp_mask & __GFP_WAIT))
2627 return CHARGE_WOULDBLOCK;
2628
2629 if (gfp_mask & __GFP_NORETRY)
2630 return CHARGE_NOMEM;
2631
2632 ret = mem_cgroup_reclaim(mem_over_limit, gfp_mask, flags);
2633 if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
2634 return CHARGE_RETRY;
2635 /*
2636 * Even though the limit is exceeded at this point, reclaim
2637 * may have been able to free some pages. Retry the charge
2638 * before killing the task.
2639 *
2640 * Only for regular pages, though: huge pages are rather
2641 * unlikely to succeed so close to the limit, and we fall back
2642 * to regular pages anyway in case of failure.
2643 */
2644 if (nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER) && ret)
2645 return CHARGE_RETRY;
2646
2647 /*
2648 * At task move, charge accounts can be doubly counted. So, it's
2649 * better to wait until the end of task_move if something is going on.
2650 */
2651 if (mem_cgroup_wait_acct_move(mem_over_limit))
2652 return CHARGE_RETRY;
2653
2654 if (invoke_oom)
2655 mem_cgroup_oom(mem_over_limit, gfp_mask, get_order(csize));
2656
2657 return CHARGE_NOMEM;
2658}
2659
2660/**
2661 * mem_cgroup_try_charge - try charging a memcg
2662 * @memcg: memcg to charge
2663 * @nr_pages: number of pages to charge
2664 * @oom: trigger OOM if reclaim fails
2665 *
2666 * Returns 0 if @memcg was charged successfully, -EINTR if the charge
2667 * was bypassed to root_mem_cgroup, and -ENOMEM if the charge failed.
2668 */
2669static int mem_cgroup_try_charge(struct mem_cgroup *memcg,
2670 gfp_t gfp_mask,
2671 unsigned int nr_pages,
2672 bool oom)
2673{
2674 unsigned int batch = max(CHARGE_BATCH, nr_pages);
2675 int nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES;
2676 int ret;
2677
2678 if (mem_cgroup_is_root(memcg))
2679 goto done;
2680 /*
2681 * Unlike in global OOM situations, memcg is not in a physical
2682 * memory shortage. Allow dying and OOM-killed tasks to
2683 * bypass the last charges so that they can exit quickly and
2684 * free their memory.
2685 */
2686 if (unlikely(test_thread_flag(TIF_MEMDIE) ||
2687 fatal_signal_pending(current)))
2688 goto bypass;
2689
2690 if (unlikely(task_in_memcg_oom(current)))
2691 goto nomem;
2692
2693 if (gfp_mask & __GFP_NOFAIL)
2694 oom = false;
2695again:
2696 if (consume_stock(memcg, nr_pages))
2697 goto done;
2698
2699 do {
2700 bool invoke_oom = oom && !nr_oom_retries;
2701
2702 /* If killed, bypass charge */
2703 if (fatal_signal_pending(current))
2704 goto bypass;
2705
2706 ret = mem_cgroup_do_charge(memcg, gfp_mask, batch,
2707 nr_pages, invoke_oom);
2708 switch (ret) {
2709 case CHARGE_OK:
2710 break;
2711 case CHARGE_RETRY: /* not in OOM situation but retry */
2712 batch = nr_pages;
2713 goto again;
2714 case CHARGE_WOULDBLOCK: /* !__GFP_WAIT */
2715 goto nomem;
2716 case CHARGE_NOMEM: /* OOM routine works */
2717 if (!oom || invoke_oom)
2718 goto nomem;
2719 nr_oom_retries--;
2720 break;
2721 }
2722 } while (ret != CHARGE_OK);
2723
2724 if (batch > nr_pages)
2725 refill_stock(memcg, batch - nr_pages);
2726done:
2727 return 0;
2728nomem:
2729 if (!(gfp_mask & __GFP_NOFAIL))
2730 return -ENOMEM;
2731bypass:
2732 return -EINTR;
2733}
2734
2735/**
2736 * mem_cgroup_try_charge_mm - try charging a mm
2737 * @mm: mm_struct to charge
2738 * @nr_pages: number of pages to charge
2739 * @oom: trigger OOM if reclaim fails
2740 *
2741 * Returns the charged mem_cgroup associated with the given mm_struct or
2742 * NULL the charge failed.
2743 */
2744static struct mem_cgroup *mem_cgroup_try_charge_mm(struct mm_struct *mm,
2745 gfp_t gfp_mask,
2746 unsigned int nr_pages,
2747 bool oom)
2748
2749{
2750 struct mem_cgroup *memcg;
2751 int ret;
2752
2753 memcg = get_mem_cgroup_from_mm(mm);
2754 ret = mem_cgroup_try_charge(memcg, gfp_mask, nr_pages, oom);
2755 css_put(&memcg->css);
2756 if (ret == -EINTR)
2757 memcg = root_mem_cgroup;
2758 else if (ret)
2759 memcg = NULL;
2760
2761 return memcg;
2762}
2763
2764/*
2765 * Somemtimes we have to undo a charge we got by try_charge().
2766 * This function is for that and do uncharge, put css's refcnt.
2767 * gotten by try_charge().
2768 */
2769static void __mem_cgroup_cancel_charge(struct mem_cgroup *memcg,
2770 unsigned int nr_pages)
2771{
2772 if (!mem_cgroup_is_root(memcg)) {
2773 unsigned long bytes = nr_pages * PAGE_SIZE;
2774
2775 res_counter_uncharge(&memcg->res, bytes);
2776 if (do_swap_account)
2777 res_counter_uncharge(&memcg->memsw, bytes);
2778 }
2779}
2780
2781/*
2782 * Cancel chrages in this cgroup....doesn't propagate to parent cgroup.
2783 * This is useful when moving usage to parent cgroup.
2784 */
2785static void __mem_cgroup_cancel_local_charge(struct mem_cgroup *memcg,
2786 unsigned int nr_pages)
2787{
2788 unsigned long bytes = nr_pages * PAGE_SIZE;
2789
2790 if (mem_cgroup_is_root(memcg))
2791 return;
2792
2793 res_counter_uncharge_until(&memcg->res, memcg->res.parent, bytes);
2794 if (do_swap_account)
2795 res_counter_uncharge_until(&memcg->memsw,
2796 memcg->memsw.parent, bytes);
2797}
2798
2799/*
2800 * A helper function to get mem_cgroup from ID. must be called under
2801 * rcu_read_lock(). The caller is responsible for calling css_tryget if
2802 * the mem_cgroup is used for charging. (dropping refcnt from swap can be
2803 * called against removed memcg.)
2804 */
2805static struct mem_cgroup *mem_cgroup_lookup(unsigned short id)
2806{
2807 /* ID 0 is unused ID */
2808 if (!id)
2809 return NULL;
2810 return mem_cgroup_from_id(id);
2811}
2812
2813struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
2814{
2815 struct mem_cgroup *memcg = NULL;
2816 struct page_cgroup *pc;
2817 unsigned short id;
2818 swp_entry_t ent;
2819
2820 VM_BUG_ON_PAGE(!PageLocked(page), page);
2821
2822 pc = lookup_page_cgroup(page);
2823 lock_page_cgroup(pc);
2824 if (PageCgroupUsed(pc)) {
2825 memcg = pc->mem_cgroup;
2826 if (memcg && !css_tryget(&memcg->css))
2827 memcg = NULL;
2828 } else if (PageSwapCache(page)) {
2829 ent.val = page_private(page);
2830 id = lookup_swap_cgroup_id(ent);
2831 rcu_read_lock();
2832 memcg = mem_cgroup_lookup(id);
2833 if (memcg && !css_tryget(&memcg->css))
2834 memcg = NULL;
2835 rcu_read_unlock();
2836 }
2837 unlock_page_cgroup(pc);
2838 return memcg;
2839}
2840
2841static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
2842 struct page *page,
2843 unsigned int nr_pages,
2844 enum charge_type ctype,
2845 bool lrucare)
2846{
2847 struct page_cgroup *pc = lookup_page_cgroup(page);
2848 struct zone *uninitialized_var(zone);
2849 struct lruvec *lruvec;
2850 bool was_on_lru = false;
2851 bool anon;
2852
2853 lock_page_cgroup(pc);
2854 VM_BUG_ON_PAGE(PageCgroupUsed(pc), page);
2855 /*
2856 * we don't need page_cgroup_lock about tail pages, becase they are not
2857 * accessed by any other context at this point.
2858 */
2859
2860 /*
2861 * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page
2862 * may already be on some other mem_cgroup's LRU. Take care of it.
2863 */
2864 if (lrucare) {
2865 zone = page_zone(page);
2866 spin_lock_irq(&zone->lru_lock);
2867 if (PageLRU(page)) {
2868 lruvec = mem_cgroup_zone_lruvec(zone, pc->mem_cgroup);
2869 ClearPageLRU(page);
2870 del_page_from_lru_list(page, lruvec, page_lru(page));
2871 was_on_lru = true;
2872 }
2873 }
2874
2875 pc->mem_cgroup = memcg;
2876 /*
2877 * We access a page_cgroup asynchronously without lock_page_cgroup().
2878 * Especially when a page_cgroup is taken from a page, pc->mem_cgroup
2879 * is accessed after testing USED bit. To make pc->mem_cgroup visible
2880 * before USED bit, we need memory barrier here.
2881 * See mem_cgroup_add_lru_list(), etc.
2882 */
2883 smp_wmb();
2884 SetPageCgroupUsed(pc);
2885
2886 if (lrucare) {
2887 if (was_on_lru) {
2888 lruvec = mem_cgroup_zone_lruvec(zone, pc->mem_cgroup);
2889 VM_BUG_ON_PAGE(PageLRU(page), page);
2890 SetPageLRU(page);
2891 add_page_to_lru_list(page, lruvec, page_lru(page));
2892 }
2893 spin_unlock_irq(&zone->lru_lock);
2894 }
2895
2896 if (ctype == MEM_CGROUP_CHARGE_TYPE_ANON)
2897 anon = true;
2898 else
2899 anon = false;
2900
2901 mem_cgroup_charge_statistics(memcg, page, anon, nr_pages);
2902 unlock_page_cgroup(pc);
2903
2904 /*
2905 * "charge_statistics" updated event counter. Then, check it.
2906 * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
2907 * if they exceeds softlimit.
2908 */
2909 memcg_check_events(memcg, page);
2910}
2911
2912static DEFINE_MUTEX(set_limit_mutex);
2913
2914#ifdef CONFIG_MEMCG_KMEM
2915static DEFINE_MUTEX(activate_kmem_mutex);
2916
2917static inline bool memcg_can_account_kmem(struct mem_cgroup *memcg)
2918{
2919 return !mem_cgroup_disabled() && !mem_cgroup_is_root(memcg) &&
2920 memcg_kmem_is_active(memcg);
2921}
2922
2923/*
2924 * This is a bit cumbersome, but it is rarely used and avoids a backpointer
2925 * in the memcg_cache_params struct.
2926 */
2927static struct kmem_cache *memcg_params_to_cache(struct memcg_cache_params *p)
2928{
2929 struct kmem_cache *cachep;
2930
2931 VM_BUG_ON(p->is_root_cache);
2932 cachep = p->root_cache;
2933 return cache_from_memcg_idx(cachep, memcg_cache_id(p->memcg));
2934}
2935
2936#ifdef CONFIG_SLABINFO
2937static int mem_cgroup_slabinfo_read(struct seq_file *m, void *v)
2938{
2939 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
2940 struct memcg_cache_params *params;
2941
2942 if (!memcg_can_account_kmem(memcg))
2943 return -EIO;
2944
2945 print_slabinfo_header(m);
2946
2947 mutex_lock(&memcg->slab_caches_mutex);
2948 list_for_each_entry(params, &memcg->memcg_slab_caches, list)
2949 cache_show(memcg_params_to_cache(params), m);
2950 mutex_unlock(&memcg->slab_caches_mutex);
2951
2952 return 0;
2953}
2954#endif
2955
2956static int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, u64 size)
2957{
2958 struct res_counter *fail_res;
2959 int ret = 0;
2960
2961 ret = res_counter_charge(&memcg->kmem, size, &fail_res);
2962 if (ret)
2963 return ret;
2964
2965 ret = mem_cgroup_try_charge(memcg, gfp, size >> PAGE_SHIFT,
2966 oom_gfp_allowed(gfp));
2967 if (ret == -EINTR) {
2968 /*
2969 * mem_cgroup_try_charge() chosed to bypass to root due to
2970 * OOM kill or fatal signal. Since our only options are to
2971 * either fail the allocation or charge it to this cgroup, do
2972 * it as a temporary condition. But we can't fail. From a
2973 * kmem/slab perspective, the cache has already been selected,
2974 * by mem_cgroup_kmem_get_cache(), so it is too late to change
2975 * our minds.
2976 *
2977 * This condition will only trigger if the task entered
2978 * memcg_charge_kmem in a sane state, but was OOM-killed during
2979 * mem_cgroup_try_charge() above. Tasks that were already
2980 * dying when the allocation triggers should have been already
2981 * directed to the root cgroup in memcontrol.h
2982 */
2983 res_counter_charge_nofail(&memcg->res, size, &fail_res);
2984 if (do_swap_account)
2985 res_counter_charge_nofail(&memcg->memsw, size,
2986 &fail_res);
2987 ret = 0;
2988 } else if (ret)
2989 res_counter_uncharge(&memcg->kmem, size);
2990
2991 return ret;
2992}
2993
2994static void memcg_uncharge_kmem(struct mem_cgroup *memcg, u64 size)
2995{
2996 res_counter_uncharge(&memcg->res, size);
2997 if (do_swap_account)
2998 res_counter_uncharge(&memcg->memsw, size);
2999
3000 /* Not down to 0 */
3001 if (res_counter_uncharge(&memcg->kmem, size))
3002 return;
3003
3004 /*
3005 * Releases a reference taken in kmem_cgroup_css_offline in case
3006 * this last uncharge is racing with the offlining code or it is
3007 * outliving the memcg existence.
3008 *
3009 * The memory barrier imposed by test&clear is paired with the
3010 * explicit one in memcg_kmem_mark_dead().
3011 */
3012 if (memcg_kmem_test_and_clear_dead(memcg))
3013 css_put(&memcg->css);
3014}
3015
3016/*
3017 * helper for acessing a memcg's index. It will be used as an index in the
3018 * child cache array in kmem_cache, and also to derive its name. This function
3019 * will return -1 when this is not a kmem-limited memcg.
3020 */
3021int memcg_cache_id(struct mem_cgroup *memcg)
3022{
3023 return memcg ? memcg->kmemcg_id : -1;
3024}
3025
3026static size_t memcg_caches_array_size(int num_groups)
3027{
3028 ssize_t size;
3029 if (num_groups <= 0)
3030 return 0;
3031
3032 size = 2 * num_groups;
3033 if (size < MEMCG_CACHES_MIN_SIZE)
3034 size = MEMCG_CACHES_MIN_SIZE;
3035 else if (size > MEMCG_CACHES_MAX_SIZE)
3036 size = MEMCG_CACHES_MAX_SIZE;
3037
3038 return size;
3039}
3040
3041/*
3042 * We should update the current array size iff all caches updates succeed. This
3043 * can only be done from the slab side. The slab mutex needs to be held when
3044 * calling this.
3045 */
3046void memcg_update_array_size(int num)
3047{
3048 if (num > memcg_limited_groups_array_size)
3049 memcg_limited_groups_array_size = memcg_caches_array_size(num);
3050}
3051
3052static void kmem_cache_destroy_work_func(struct work_struct *w);
3053
3054int memcg_update_cache_size(struct kmem_cache *s, int num_groups)
3055{
3056 struct memcg_cache_params *cur_params = s->memcg_params;
3057
3058 VM_BUG_ON(!is_root_cache(s));
3059
3060 if (num_groups > memcg_limited_groups_array_size) {
3061 int i;
3062 struct memcg_cache_params *new_params;
3063 ssize_t size = memcg_caches_array_size(num_groups);
3064
3065 size *= sizeof(void *);
3066 size += offsetof(struct memcg_cache_params, memcg_caches);
3067
3068 new_params = kzalloc(size, GFP_KERNEL);
3069 if (!new_params)
3070 return -ENOMEM;
3071
3072 new_params->is_root_cache = true;
3073
3074 /*
3075 * There is the chance it will be bigger than
3076 * memcg_limited_groups_array_size, if we failed an allocation
3077 * in a cache, in which case all caches updated before it, will
3078 * have a bigger array.
3079 *
3080 * But if that is the case, the data after
3081 * memcg_limited_groups_array_size is certainly unused
3082 */
3083 for (i = 0; i < memcg_limited_groups_array_size; i++) {
3084 if (!cur_params->memcg_caches[i])
3085 continue;
3086 new_params->memcg_caches[i] =
3087 cur_params->memcg_caches[i];
3088 }
3089
3090 /*
3091 * Ideally, we would wait until all caches succeed, and only
3092 * then free the old one. But this is not worth the extra
3093 * pointer per-cache we'd have to have for this.
3094 *
3095 * It is not a big deal if some caches are left with a size
3096 * bigger than the others. And all updates will reset this
3097 * anyway.
3098 */
3099 rcu_assign_pointer(s->memcg_params, new_params);
3100 if (cur_params)
3101 kfree_rcu(cur_params, rcu_head);
3102 }
3103 return 0;
3104}
3105
3106char *memcg_create_cache_name(struct mem_cgroup *memcg,
3107 struct kmem_cache *root_cache)
3108{
3109 static char *buf = NULL;
3110
3111 /*
3112 * We need a mutex here to protect the shared buffer. Since this is
3113 * expected to be called only on cache creation, we can employ the
3114 * slab_mutex for that purpose.
3115 */
3116 lockdep_assert_held(&slab_mutex);
3117
3118 if (!buf) {
3119 buf = kmalloc(NAME_MAX + 1, GFP_KERNEL);
3120 if (!buf)
3121 return NULL;
3122 }
3123
3124 cgroup_name(memcg->css.cgroup, buf, NAME_MAX + 1);
3125 return kasprintf(GFP_KERNEL, "%s(%d:%s)", root_cache->name,
3126 memcg_cache_id(memcg), buf);
3127}
3128
3129int memcg_alloc_cache_params(struct mem_cgroup *memcg, struct kmem_cache *s,
3130 struct kmem_cache *root_cache)
3131{
3132 size_t size;
3133
3134 if (!memcg_kmem_enabled())
3135 return 0;
3136
3137 if (!memcg) {
3138 size = offsetof(struct memcg_cache_params, memcg_caches);
3139 size += memcg_limited_groups_array_size * sizeof(void *);
3140 } else
3141 size = sizeof(struct memcg_cache_params);
3142
3143 s->memcg_params = kzalloc(size, GFP_KERNEL);
3144 if (!s->memcg_params)
3145 return -ENOMEM;
3146
3147 if (memcg) {
3148 s->memcg_params->memcg = memcg;
3149 s->memcg_params->root_cache = root_cache;
3150 INIT_WORK(&s->memcg_params->destroy,
3151 kmem_cache_destroy_work_func);
3152 css_get(&memcg->css);
3153 } else
3154 s->memcg_params->is_root_cache = true;
3155
3156 return 0;
3157}
3158
3159void memcg_free_cache_params(struct kmem_cache *s)
3160{
3161 if (!s->memcg_params)
3162 return;
3163 if (!s->memcg_params->is_root_cache)
3164 css_put(&s->memcg_params->memcg->css);
3165 kfree(s->memcg_params);
3166}
3167
3168void memcg_register_cache(struct kmem_cache *s)
3169{
3170 struct kmem_cache *root;
3171 struct mem_cgroup *memcg;
3172 int id;
3173
3174 if (is_root_cache(s))
3175 return;
3176
3177 /*
3178 * Holding the slab_mutex assures nobody will touch the memcg_caches
3179 * array while we are modifying it.
3180 */
3181 lockdep_assert_held(&slab_mutex);
3182
3183 root = s->memcg_params->root_cache;
3184 memcg = s->memcg_params->memcg;
3185 id = memcg_cache_id(memcg);
3186
3187 /*
3188 * Since readers won't lock (see cache_from_memcg_idx()), we need a
3189 * barrier here to ensure nobody will see the kmem_cache partially
3190 * initialized.
3191 */
3192 smp_wmb();
3193
3194 /*
3195 * Initialize the pointer to this cache in its parent's memcg_params
3196 * before adding it to the memcg_slab_caches list, otherwise we can
3197 * fail to convert memcg_params_to_cache() while traversing the list.
3198 */
3199 VM_BUG_ON(root->memcg_params->memcg_caches[id]);
3200 root->memcg_params->memcg_caches[id] = s;
3201
3202 mutex_lock(&memcg->slab_caches_mutex);
3203 list_add(&s->memcg_params->list, &memcg->memcg_slab_caches);
3204 mutex_unlock(&memcg->slab_caches_mutex);
3205}
3206
3207void memcg_unregister_cache(struct kmem_cache *s)
3208{
3209 struct kmem_cache *root;
3210 struct mem_cgroup *memcg;
3211 int id;
3212
3213 if (is_root_cache(s))
3214 return;
3215
3216 /*
3217 * Holding the slab_mutex assures nobody will touch the memcg_caches
3218 * array while we are modifying it.
3219 */
3220 lockdep_assert_held(&slab_mutex);
3221
3222 root = s->memcg_params->root_cache;
3223 memcg = s->memcg_params->memcg;
3224 id = memcg_cache_id(memcg);
3225
3226 mutex_lock(&memcg->slab_caches_mutex);
3227 list_del(&s->memcg_params->list);
3228 mutex_unlock(&memcg->slab_caches_mutex);
3229
3230 /*
3231 * Clear the pointer to this cache in its parent's memcg_params only
3232 * after removing it from the memcg_slab_caches list, otherwise we can
3233 * fail to convert memcg_params_to_cache() while traversing the list.
3234 */
3235 VM_BUG_ON(root->memcg_params->memcg_caches[id] != s);
3236 root->memcg_params->memcg_caches[id] = NULL;
3237}
3238
3239/*
3240 * During the creation a new cache, we need to disable our accounting mechanism
3241 * altogether. This is true even if we are not creating, but rather just
3242 * enqueing new caches to be created.
3243 *
3244 * This is because that process will trigger allocations; some visible, like
3245 * explicit kmallocs to auxiliary data structures, name strings and internal
3246 * cache structures; some well concealed, like INIT_WORK() that can allocate
3247 * objects during debug.
3248 *
3249 * If any allocation happens during memcg_kmem_get_cache, we will recurse back
3250 * to it. This may not be a bounded recursion: since the first cache creation
3251 * failed to complete (waiting on the allocation), we'll just try to create the
3252 * cache again, failing at the same point.
3253 *
3254 * memcg_kmem_get_cache is prepared to abort after seeing a positive count of
3255 * memcg_kmem_skip_account. So we enclose anything that might allocate memory
3256 * inside the following two functions.
3257 */
3258static inline void memcg_stop_kmem_account(void)
3259{
3260 VM_BUG_ON(!current->mm);
3261 current->memcg_kmem_skip_account++;
3262}
3263
3264static inline void memcg_resume_kmem_account(void)
3265{
3266 VM_BUG_ON(!current->mm);
3267 current->memcg_kmem_skip_account--;
3268}
3269
3270static void kmem_cache_destroy_work_func(struct work_struct *w)
3271{
3272 struct kmem_cache *cachep;
3273 struct memcg_cache_params *p;
3274
3275 p = container_of(w, struct memcg_cache_params, destroy);
3276
3277 cachep = memcg_params_to_cache(p);
3278
3279 /*
3280 * If we get down to 0 after shrink, we could delete right away.
3281 * However, memcg_release_pages() already puts us back in the workqueue
3282 * in that case. If we proceed deleting, we'll get a dangling
3283 * reference, and removing the object from the workqueue in that case
3284 * is unnecessary complication. We are not a fast path.
3285 *
3286 * Note that this case is fundamentally different from racing with
3287 * shrink_slab(): if memcg_cgroup_destroy_cache() is called in
3288 * kmem_cache_shrink, not only we would be reinserting a dead cache
3289 * into the queue, but doing so from inside the worker racing to
3290 * destroy it.
3291 *
3292 * So if we aren't down to zero, we'll just schedule a worker and try
3293 * again
3294 */
3295 if (atomic_read(&cachep->memcg_params->nr_pages) != 0)
3296 kmem_cache_shrink(cachep);
3297 else
3298 kmem_cache_destroy(cachep);
3299}
3300
3301void mem_cgroup_destroy_cache(struct kmem_cache *cachep)
3302{
3303 if (!cachep->memcg_params->dead)
3304 return;
3305
3306 /*
3307 * There are many ways in which we can get here.
3308 *
3309 * We can get to a memory-pressure situation while the delayed work is
3310 * still pending to run. The vmscan shrinkers can then release all
3311 * cache memory and get us to destruction. If this is the case, we'll
3312 * be executed twice, which is a bug (the second time will execute over
3313 * bogus data). In this case, cancelling the work should be fine.
3314 *
3315 * But we can also get here from the worker itself, if
3316 * kmem_cache_shrink is enough to shake all the remaining objects and
3317 * get the page count to 0. In this case, we'll deadlock if we try to
3318 * cancel the work (the worker runs with an internal lock held, which
3319 * is the same lock we would hold for cancel_work_sync().)
3320 *
3321 * Since we can't possibly know who got us here, just refrain from
3322 * running if there is already work pending
3323 */
3324 if (work_pending(&cachep->memcg_params->destroy))
3325 return;
3326 /*
3327 * We have to defer the actual destroying to a workqueue, because
3328 * we might currently be in a context that cannot sleep.
3329 */
3330 schedule_work(&cachep->memcg_params->destroy);
3331}
3332
3333int __kmem_cache_destroy_memcg_children(struct kmem_cache *s)
3334{
3335 struct kmem_cache *c;
3336 int i, failed = 0;
3337
3338 /*
3339 * If the cache is being destroyed, we trust that there is no one else
3340 * requesting objects from it. Even if there are, the sanity checks in
3341 * kmem_cache_destroy should caught this ill-case.
3342 *
3343 * Still, we don't want anyone else freeing memcg_caches under our
3344 * noses, which can happen if a new memcg comes to life. As usual,
3345 * we'll take the activate_kmem_mutex to protect ourselves against
3346 * this.
3347 */
3348 mutex_lock(&activate_kmem_mutex);
3349 for_each_memcg_cache_index(i) {
3350 c = cache_from_memcg_idx(s, i);
3351 if (!c)
3352 continue;
3353
3354 /*
3355 * We will now manually delete the caches, so to avoid races
3356 * we need to cancel all pending destruction workers and
3357 * proceed with destruction ourselves.
3358 *
3359 * kmem_cache_destroy() will call kmem_cache_shrink internally,
3360 * and that could spawn the workers again: it is likely that
3361 * the cache still have active pages until this very moment.
3362 * This would lead us back to mem_cgroup_destroy_cache.
3363 *
3364 * But that will not execute at all if the "dead" flag is not
3365 * set, so flip it down to guarantee we are in control.
3366 */
3367 c->memcg_params->dead = false;
3368 cancel_work_sync(&c->memcg_params->destroy);
3369 kmem_cache_destroy(c);
3370
3371 if (cache_from_memcg_idx(s, i))
3372 failed++;
3373 }
3374 mutex_unlock(&activate_kmem_mutex);
3375 return failed;
3376}
3377
3378static void mem_cgroup_destroy_all_caches(struct mem_cgroup *memcg)
3379{
3380 struct kmem_cache *cachep;
3381 struct memcg_cache_params *params;
3382
3383 if (!memcg_kmem_is_active(memcg))
3384 return;
3385
3386 mutex_lock(&memcg->slab_caches_mutex);
3387 list_for_each_entry(params, &memcg->memcg_slab_caches, list) {
3388 cachep = memcg_params_to_cache(params);
3389 cachep->memcg_params->dead = true;
3390 schedule_work(&cachep->memcg_params->destroy);
3391 }
3392 mutex_unlock(&memcg->slab_caches_mutex);
3393}
3394
3395struct create_work {
3396 struct mem_cgroup *memcg;
3397 struct kmem_cache *cachep;
3398 struct work_struct work;
3399};
3400
3401static void memcg_create_cache_work_func(struct work_struct *w)
3402{
3403 struct create_work *cw = container_of(w, struct create_work, work);
3404 struct mem_cgroup *memcg = cw->memcg;
3405 struct kmem_cache *cachep = cw->cachep;
3406
3407 kmem_cache_create_memcg(memcg, cachep);
3408 css_put(&memcg->css);
3409 kfree(cw);
3410}
3411
3412/*
3413 * Enqueue the creation of a per-memcg kmem_cache.
3414 */
3415static void __memcg_create_cache_enqueue(struct mem_cgroup *memcg,
3416 struct kmem_cache *cachep)
3417{
3418 struct create_work *cw;
3419
3420 cw = kmalloc(sizeof(struct create_work), GFP_NOWAIT);
3421 if (cw == NULL) {
3422 css_put(&memcg->css);
3423 return;
3424 }
3425
3426 cw->memcg = memcg;
3427 cw->cachep = cachep;
3428
3429 INIT_WORK(&cw->work, memcg_create_cache_work_func);
3430 schedule_work(&cw->work);
3431}
3432
3433static void memcg_create_cache_enqueue(struct mem_cgroup *memcg,
3434 struct kmem_cache *cachep)
3435{
3436 /*
3437 * We need to stop accounting when we kmalloc, because if the
3438 * corresponding kmalloc cache is not yet created, the first allocation
3439 * in __memcg_create_cache_enqueue will recurse.
3440 *
3441 * However, it is better to enclose the whole function. Depending on
3442 * the debugging options enabled, INIT_WORK(), for instance, can
3443 * trigger an allocation. This too, will make us recurse. Because at
3444 * this point we can't allow ourselves back into memcg_kmem_get_cache,
3445 * the safest choice is to do it like this, wrapping the whole function.
3446 */
3447 memcg_stop_kmem_account();
3448 __memcg_create_cache_enqueue(memcg, cachep);
3449 memcg_resume_kmem_account();
3450}
3451/*
3452 * Return the kmem_cache we're supposed to use for a slab allocation.
3453 * We try to use the current memcg's version of the cache.
3454 *
3455 * If the cache does not exist yet, if we are the first user of it,
3456 * we either create it immediately, if possible, or create it asynchronously
3457 * in a workqueue.
3458 * In the latter case, we will let the current allocation go through with
3459 * the original cache.
3460 *
3461 * Can't be called in interrupt context or from kernel threads.
3462 * This function needs to be called with rcu_read_lock() held.
3463 */
3464struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep,
3465 gfp_t gfp)
3466{
3467 struct mem_cgroup *memcg;
3468 struct kmem_cache *memcg_cachep;
3469
3470 VM_BUG_ON(!cachep->memcg_params);
3471 VM_BUG_ON(!cachep->memcg_params->is_root_cache);
3472
3473 if (!current->mm || current->memcg_kmem_skip_account)
3474 return cachep;
3475
3476 rcu_read_lock();
3477 memcg = mem_cgroup_from_task(rcu_dereference(current->mm->owner));
3478
3479 if (!memcg_can_account_kmem(memcg))
3480 goto out;
3481
3482 memcg_cachep = cache_from_memcg_idx(cachep, memcg_cache_id(memcg));
3483 if (likely(memcg_cachep)) {
3484 cachep = memcg_cachep;
3485 goto out;
3486 }
3487
3488 /* The corresponding put will be done in the workqueue. */
3489 if (!css_tryget(&memcg->css))
3490 goto out;
3491 rcu_read_unlock();
3492
3493 /*
3494 * If we are in a safe context (can wait, and not in interrupt
3495 * context), we could be be predictable and return right away.
3496 * This would guarantee that the allocation being performed
3497 * already belongs in the new cache.
3498 *
3499 * However, there are some clashes that can arrive from locking.
3500 * For instance, because we acquire the slab_mutex while doing
3501 * kmem_cache_dup, this means no further allocation could happen
3502 * with the slab_mutex held.
3503 *
3504 * Also, because cache creation issue get_online_cpus(), this
3505 * creates a lock chain: memcg_slab_mutex -> cpu_hotplug_mutex,
3506 * that ends up reversed during cpu hotplug. (cpuset allocates
3507 * a bunch of GFP_KERNEL memory during cpuup). Due to all that,
3508 * better to defer everything.
3509 */
3510 memcg_create_cache_enqueue(memcg, cachep);
3511 return cachep;
3512out:
3513 rcu_read_unlock();
3514 return cachep;
3515}
3516EXPORT_SYMBOL(__memcg_kmem_get_cache);
3517
3518/*
3519 * We need to verify if the allocation against current->mm->owner's memcg is
3520 * possible for the given order. But the page is not allocated yet, so we'll
3521 * need a further commit step to do the final arrangements.
3522 *
3523 * It is possible for the task to switch cgroups in this mean time, so at
3524 * commit time, we can't rely on task conversion any longer. We'll then use
3525 * the handle argument to return to the caller which cgroup we should commit
3526 * against. We could also return the memcg directly and avoid the pointer
3527 * passing, but a boolean return value gives better semantics considering
3528 * the compiled-out case as well.
3529 *
3530 * Returning true means the allocation is possible.
3531 */
3532bool
3533__memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **_memcg, int order)
3534{
3535 struct mem_cgroup *memcg;
3536 int ret;
3537
3538 *_memcg = NULL;
3539
3540 /*
3541 * Disabling accounting is only relevant for some specific memcg
3542 * internal allocations. Therefore we would initially not have such
3543 * check here, since direct calls to the page allocator that are marked
3544 * with GFP_KMEMCG only happen outside memcg core. We are mostly
3545 * concerned with cache allocations, and by having this test at
3546 * memcg_kmem_get_cache, we are already able to relay the allocation to
3547 * the root cache and bypass the memcg cache altogether.
3548 *
3549 * There is one exception, though: the SLUB allocator does not create
3550 * large order caches, but rather service large kmallocs directly from
3551 * the page allocator. Therefore, the following sequence when backed by
3552 * the SLUB allocator:
3553 *
3554 * memcg_stop_kmem_account();
3555 * kmalloc(<large_number>)
3556 * memcg_resume_kmem_account();
3557 *
3558 * would effectively ignore the fact that we should skip accounting,
3559 * since it will drive us directly to this function without passing
3560 * through the cache selector memcg_kmem_get_cache. Such large
3561 * allocations are extremely rare but can happen, for instance, for the
3562 * cache arrays. We bring this test here.
3563 */
3564 if (!current->mm || current->memcg_kmem_skip_account)
3565 return true;
3566
3567 memcg = get_mem_cgroup_from_mm(current->mm);
3568
3569 if (!memcg_can_account_kmem(memcg)) {
3570 css_put(&memcg->css);
3571 return true;
3572 }
3573
3574 ret = memcg_charge_kmem(memcg, gfp, PAGE_SIZE << order);
3575 if (!ret)
3576 *_memcg = memcg;
3577
3578 css_put(&memcg->css);
3579 return (ret == 0);
3580}
3581
3582void __memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg,
3583 int order)
3584{
3585 struct page_cgroup *pc;
3586
3587 VM_BUG_ON(mem_cgroup_is_root(memcg));
3588
3589 /* The page allocation failed. Revert */
3590 if (!page) {
3591 memcg_uncharge_kmem(memcg, PAGE_SIZE << order);
3592 return;
3593 }
3594
3595 pc = lookup_page_cgroup(page);
3596 lock_page_cgroup(pc);
3597 pc->mem_cgroup = memcg;
3598 SetPageCgroupUsed(pc);
3599 unlock_page_cgroup(pc);
3600}
3601
3602void __memcg_kmem_uncharge_pages(struct page *page, int order)
3603{
3604 struct mem_cgroup *memcg = NULL;
3605 struct page_cgroup *pc;
3606
3607
3608 pc = lookup_page_cgroup(page);
3609 /*
3610 * Fast unlocked return. Theoretically might have changed, have to
3611 * check again after locking.
3612 */
3613 if (!PageCgroupUsed(pc))
3614 return;
3615
3616 lock_page_cgroup(pc);
3617 if (PageCgroupUsed(pc)) {
3618 memcg = pc->mem_cgroup;
3619 ClearPageCgroupUsed(pc);
3620 }
3621 unlock_page_cgroup(pc);
3622
3623 /*
3624 * We trust that only if there is a memcg associated with the page, it
3625 * is a valid allocation
3626 */
3627 if (!memcg)
3628 return;
3629
3630 VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page);
3631 memcg_uncharge_kmem(memcg, PAGE_SIZE << order);
3632}
3633#else
3634static inline void mem_cgroup_destroy_all_caches(struct mem_cgroup *memcg)
3635{
3636}
3637#endif /* CONFIG_MEMCG_KMEM */
3638
3639#ifdef CONFIG_TRANSPARENT_HUGEPAGE
3640
3641#define PCGF_NOCOPY_AT_SPLIT (1 << PCG_LOCK | 1 << PCG_MIGRATION)
3642/*
3643 * Because tail pages are not marked as "used", set it. We're under
3644 * zone->lru_lock, 'splitting on pmd' and compound_lock.
3645 * charge/uncharge will be never happen and move_account() is done under
3646 * compound_lock(), so we don't have to take care of races.
3647 */
3648void mem_cgroup_split_huge_fixup(struct page *head)
3649{
3650 struct page_cgroup *head_pc = lookup_page_cgroup(head);
3651 struct page_cgroup *pc;
3652 struct mem_cgroup *memcg;
3653 int i;
3654
3655 if (mem_cgroup_disabled())
3656 return;
3657
3658 memcg = head_pc->mem_cgroup;
3659 for (i = 1; i < HPAGE_PMD_NR; i++) {
3660 pc = head_pc + i;
3661 pc->mem_cgroup = memcg;
3662 smp_wmb();/* see __commit_charge() */
3663 pc->flags = head_pc->flags & ~PCGF_NOCOPY_AT_SPLIT;
3664 }
3665 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE],
3666 HPAGE_PMD_NR);
3667}
3668#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
3669
3670/**
3671 * mem_cgroup_move_account - move account of the page
3672 * @page: the page
3673 * @nr_pages: number of regular pages (>1 for huge pages)
3674 * @pc: page_cgroup of the page.
3675 * @from: mem_cgroup which the page is moved from.
3676 * @to: mem_cgroup which the page is moved to. @from != @to.
3677 *
3678 * The caller must confirm following.
3679 * - page is not on LRU (isolate_page() is useful.)
3680 * - compound_lock is held when nr_pages > 1
3681 *
3682 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
3683 * from old cgroup.
3684 */
3685static int mem_cgroup_move_account(struct page *page,
3686 unsigned int nr_pages,
3687 struct page_cgroup *pc,
3688 struct mem_cgroup *from,
3689 struct mem_cgroup *to)
3690{
3691 unsigned long flags;
3692 int ret;
3693 bool anon = PageAnon(page);
3694
3695 VM_BUG_ON(from == to);
3696 VM_BUG_ON_PAGE(PageLRU(page), page);
3697 /*
3698 * The page is isolated from LRU. So, collapse function
3699 * will not handle this page. But page splitting can happen.
3700 * Do this check under compound_page_lock(). The caller should
3701 * hold it.
3702 */
3703 ret = -EBUSY;
3704 if (nr_pages > 1 && !PageTransHuge(page))
3705 goto out;
3706
3707 lock_page_cgroup(pc);
3708
3709 ret = -EINVAL;
3710 if (!PageCgroupUsed(pc) || pc->mem_cgroup != from)
3711 goto unlock;
3712
3713 move_lock_mem_cgroup(from, &flags);
3714
3715 if (!anon && page_mapped(page)) {
3716 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED],
3717 nr_pages);
3718 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED],
3719 nr_pages);
3720 }
3721
3722 if (PageWriteback(page)) {
3723 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_WRITEBACK],
3724 nr_pages);
3725 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_WRITEBACK],
3726 nr_pages);
3727 }
3728
3729 mem_cgroup_charge_statistics(from, page, anon, -nr_pages);
3730
3731 /* caller should have done css_get */
3732 pc->mem_cgroup = to;
3733 mem_cgroup_charge_statistics(to, page, anon, nr_pages);
3734 move_unlock_mem_cgroup(from, &flags);
3735 ret = 0;
3736unlock:
3737 unlock_page_cgroup(pc);
3738 /*
3739 * check events
3740 */
3741 memcg_check_events(to, page);
3742 memcg_check_events(from, page);
3743out:
3744 return ret;
3745}
3746
3747/**
3748 * mem_cgroup_move_parent - moves page to the parent group
3749 * @page: the page to move
3750 * @pc: page_cgroup of the page
3751 * @child: page's cgroup
3752 *
3753 * move charges to its parent or the root cgroup if the group has no
3754 * parent (aka use_hierarchy==0).
3755 * Although this might fail (get_page_unless_zero, isolate_lru_page or
3756 * mem_cgroup_move_account fails) the failure is always temporary and
3757 * it signals a race with a page removal/uncharge or migration. In the
3758 * first case the page is on the way out and it will vanish from the LRU
3759 * on the next attempt and the call should be retried later.
3760 * Isolation from the LRU fails only if page has been isolated from
3761 * the LRU since we looked at it and that usually means either global
3762 * reclaim or migration going on. The page will either get back to the
3763 * LRU or vanish.
3764 * Finaly mem_cgroup_move_account fails only if the page got uncharged
3765 * (!PageCgroupUsed) or moved to a different group. The page will
3766 * disappear in the next attempt.
3767 */
3768static int mem_cgroup_move_parent(struct page *page,
3769 struct page_cgroup *pc,
3770 struct mem_cgroup *child)
3771{
3772 struct mem_cgroup *parent;
3773 unsigned int nr_pages;
3774 unsigned long uninitialized_var(flags);
3775 int ret;
3776
3777 VM_BUG_ON(mem_cgroup_is_root(child));
3778
3779 ret = -EBUSY;
3780 if (!get_page_unless_zero(page))
3781 goto out;
3782 if (isolate_lru_page(page))
3783 goto put;
3784
3785 nr_pages = hpage_nr_pages(page);
3786
3787 parent = parent_mem_cgroup(child);
3788 /*
3789 * If no parent, move charges to root cgroup.
3790 */
3791 if (!parent)
3792 parent = root_mem_cgroup;
3793
3794 if (nr_pages > 1) {
3795 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
3796 flags = compound_lock_irqsave(page);
3797 }
3798
3799 ret = mem_cgroup_move_account(page, nr_pages,
3800 pc, child, parent);
3801 if (!ret)
3802 __mem_cgroup_cancel_local_charge(child, nr_pages);
3803
3804 if (nr_pages > 1)
3805 compound_unlock_irqrestore(page, flags);
3806 putback_lru_page(page);
3807put:
3808 put_page(page);
3809out:
3810 return ret;
3811}
3812
3813int mem_cgroup_charge_anon(struct page *page,
3814 struct mm_struct *mm, gfp_t gfp_mask)
3815{
3816 unsigned int nr_pages = 1;
3817 struct mem_cgroup *memcg;
3818 bool oom = true;
3819
3820 if (mem_cgroup_disabled())
3821 return 0;
3822
3823 VM_BUG_ON_PAGE(page_mapped(page), page);
3824 VM_BUG_ON_PAGE(page->mapping && !PageAnon(page), page);
3825 VM_BUG_ON(!mm);
3826
3827 if (PageTransHuge(page)) {
3828 nr_pages <<= compound_order(page);
3829 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
3830 /*
3831 * Never OOM-kill a process for a huge page. The
3832 * fault handler will fall back to regular pages.
3833 */
3834 oom = false;
3835 }
3836
3837 memcg = mem_cgroup_try_charge_mm(mm, gfp_mask, nr_pages, oom);
3838 if (!memcg)
3839 return -ENOMEM;
3840 __mem_cgroup_commit_charge(memcg, page, nr_pages,
3841 MEM_CGROUP_CHARGE_TYPE_ANON, false);
3842 return 0;
3843}
3844
3845/*
3846 * While swap-in, try_charge -> commit or cancel, the page is locked.
3847 * And when try_charge() successfully returns, one refcnt to memcg without
3848 * struct page_cgroup is acquired. This refcnt will be consumed by
3849 * "commit()" or removed by "cancel()"
3850 */
3851static int __mem_cgroup_try_charge_swapin(struct mm_struct *mm,
3852 struct page *page,
3853 gfp_t mask,
3854 struct mem_cgroup **memcgp)
3855{
3856 struct mem_cgroup *memcg = NULL;
3857 struct page_cgroup *pc;
3858 int ret;
3859
3860 pc = lookup_page_cgroup(page);
3861 /*
3862 * Every swap fault against a single page tries to charge the
3863 * page, bail as early as possible. shmem_unuse() encounters
3864 * already charged pages, too. The USED bit is protected by
3865 * the page lock, which serializes swap cache removal, which
3866 * in turn serializes uncharging.
3867 */
3868 if (PageCgroupUsed(pc))
3869 goto out;
3870 if (do_swap_account)
3871 memcg = try_get_mem_cgroup_from_page(page);
3872 if (!memcg)
3873 memcg = get_mem_cgroup_from_mm(mm);
3874 ret = mem_cgroup_try_charge(memcg, mask, 1, true);
3875 css_put(&memcg->css);
3876 if (ret == -EINTR)
3877 memcg = root_mem_cgroup;
3878 else if (ret)
3879 return ret;
3880out:
3881 *memcgp = memcg;
3882 return 0;
3883}
3884
3885int mem_cgroup_try_charge_swapin(struct mm_struct *mm, struct page *page,
3886 gfp_t gfp_mask, struct mem_cgroup **memcgp)
3887{
3888 if (mem_cgroup_disabled()) {
3889 *memcgp = NULL;
3890 return 0;
3891 }
3892 /*
3893 * A racing thread's fault, or swapoff, may have already
3894 * updated the pte, and even removed page from swap cache: in
3895 * those cases unuse_pte()'s pte_same() test will fail; but
3896 * there's also a KSM case which does need to charge the page.
3897 */
3898 if (!PageSwapCache(page)) {
3899 struct mem_cgroup *memcg;
3900
3901 memcg = mem_cgroup_try_charge_mm(mm, gfp_mask, 1, true);
3902 if (!memcg)
3903 return -ENOMEM;
3904 *memcgp = memcg;
3905 return 0;
3906 }
3907 return __mem_cgroup_try_charge_swapin(mm, page, gfp_mask, memcgp);
3908}
3909
3910void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg)
3911{
3912 if (mem_cgroup_disabled())
3913 return;
3914 if (!memcg)
3915 return;
3916 __mem_cgroup_cancel_charge(memcg, 1);
3917}
3918
3919static void
3920__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *memcg,
3921 enum charge_type ctype)
3922{
3923 if (mem_cgroup_disabled())
3924 return;
3925 if (!memcg)
3926 return;
3927
3928 __mem_cgroup_commit_charge(memcg, page, 1, ctype, true);
3929 /*
3930 * Now swap is on-memory. This means this page may be
3931 * counted both as mem and swap....double count.
3932 * Fix it by uncharging from memsw. Basically, this SwapCache is stable
3933 * under lock_page(). But in do_swap_page()::memory.c, reuse_swap_page()
3934 * may call delete_from_swap_cache() before reach here.
3935 */
3936 if (do_swap_account && PageSwapCache(page)) {
3937 swp_entry_t ent = {.val = page_private(page)};
3938 mem_cgroup_uncharge_swap(ent);
3939 }
3940}
3941
3942void mem_cgroup_commit_charge_swapin(struct page *page,
3943 struct mem_cgroup *memcg)
3944{
3945 __mem_cgroup_commit_charge_swapin(page, memcg,
3946 MEM_CGROUP_CHARGE_TYPE_ANON);
3947}
3948
3949int mem_cgroup_charge_file(struct page *page, struct mm_struct *mm,
3950 gfp_t gfp_mask)
3951{
3952 enum charge_type type = MEM_CGROUP_CHARGE_TYPE_CACHE;
3953 struct mem_cgroup *memcg;
3954 int ret;
3955
3956 if (mem_cgroup_disabled())
3957 return 0;
3958 if (PageCompound(page))
3959 return 0;
3960
3961 if (PageSwapCache(page)) { /* shmem */
3962 ret = __mem_cgroup_try_charge_swapin(mm, page,
3963 gfp_mask, &memcg);
3964 if (ret)
3965 return ret;
3966 __mem_cgroup_commit_charge_swapin(page, memcg, type);
3967 return 0;
3968 }
3969
3970 memcg = mem_cgroup_try_charge_mm(mm, gfp_mask, 1, true);
3971 if (!memcg)
3972 return -ENOMEM;
3973 __mem_cgroup_commit_charge(memcg, page, 1, type, false);
3974 return 0;
3975}
3976
3977static void mem_cgroup_do_uncharge(struct mem_cgroup *memcg,
3978 unsigned int nr_pages,
3979 const enum charge_type ctype)
3980{
3981 struct memcg_batch_info *batch = NULL;
3982 bool uncharge_memsw = true;
3983
3984 /* If swapout, usage of swap doesn't decrease */
3985 if (!do_swap_account || ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
3986 uncharge_memsw = false;
3987
3988 batch = ¤t->memcg_batch;
3989 /*
3990 * In usual, we do css_get() when we remember memcg pointer.
3991 * But in this case, we keep res->usage until end of a series of
3992 * uncharges. Then, it's ok to ignore memcg's refcnt.
3993 */
3994 if (!batch->memcg)
3995 batch->memcg = memcg;
3996 /*
3997 * do_batch > 0 when unmapping pages or inode invalidate/truncate.
3998 * In those cases, all pages freed continuously can be expected to be in
3999 * the same cgroup and we have chance to coalesce uncharges.
4000 * But we do uncharge one by one if this is killed by OOM(TIF_MEMDIE)
4001 * because we want to do uncharge as soon as possible.
4002 */
4003
4004 if (!batch->do_batch || test_thread_flag(TIF_MEMDIE))
4005 goto direct_uncharge;
4006
4007 if (nr_pages > 1)
4008 goto direct_uncharge;
4009
4010 /*
4011 * In typical case, batch->memcg == mem. This means we can
4012 * merge a series of uncharges to an uncharge of res_counter.
4013 * If not, we uncharge res_counter ony by one.
4014 */
4015 if (batch->memcg != memcg)
4016 goto direct_uncharge;
4017 /* remember freed charge and uncharge it later */
4018 batch->nr_pages++;
4019 if (uncharge_memsw)
4020 batch->memsw_nr_pages++;
4021 return;
4022direct_uncharge:
4023 res_counter_uncharge(&memcg->res, nr_pages * PAGE_SIZE);
4024 if (uncharge_memsw)
4025 res_counter_uncharge(&memcg->memsw, nr_pages * PAGE_SIZE);
4026 if (unlikely(batch->memcg != memcg))
4027 memcg_oom_recover(memcg);
4028}
4029
4030/*
4031 * uncharge if !page_mapped(page)
4032 */
4033static struct mem_cgroup *
4034__mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype,
4035 bool end_migration)
4036{
4037 struct mem_cgroup *memcg = NULL;
4038 unsigned int nr_pages = 1;
4039 struct page_cgroup *pc;
4040 bool anon;
4041
4042 if (mem_cgroup_disabled())
4043 return NULL;
4044
4045 if (PageTransHuge(page)) {
4046 nr_pages <<= compound_order(page);
4047 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
4048 }
4049 /*
4050 * Check if our page_cgroup is valid
4051 */
4052 pc = lookup_page_cgroup(page);
4053 if (unlikely(!PageCgroupUsed(pc)))
4054 return NULL;
4055
4056 lock_page_cgroup(pc);
4057
4058 memcg = pc->mem_cgroup;
4059
4060 if (!PageCgroupUsed(pc))
4061 goto unlock_out;
4062
4063 anon = PageAnon(page);
4064
4065 switch (ctype) {
4066 case MEM_CGROUP_CHARGE_TYPE_ANON:
4067 /*
4068 * Generally PageAnon tells if it's the anon statistics to be
4069 * updated; but sometimes e.g. mem_cgroup_uncharge_page() is
4070 * used before page reached the stage of being marked PageAnon.
4071 */
4072 anon = true;
4073 /* fallthrough */
4074 case MEM_CGROUP_CHARGE_TYPE_DROP:
4075 /* See mem_cgroup_prepare_migration() */
4076 if (page_mapped(page))
4077 goto unlock_out;
4078 /*
4079 * Pages under migration may not be uncharged. But
4080 * end_migration() /must/ be the one uncharging the
4081 * unused post-migration page and so it has to call
4082 * here with the migration bit still set. See the
4083 * res_counter handling below.
4084 */
4085 if (!end_migration && PageCgroupMigration(pc))
4086 goto unlock_out;
4087 break;
4088 case MEM_CGROUP_CHARGE_TYPE_SWAPOUT:
4089 if (!PageAnon(page)) { /* Shared memory */
4090 if (page->mapping && !page_is_file_cache(page))
4091 goto unlock_out;
4092 } else if (page_mapped(page)) /* Anon */
4093 goto unlock_out;
4094 break;
4095 default:
4096 break;
4097 }
4098
4099 mem_cgroup_charge_statistics(memcg, page, anon, -nr_pages);
4100
4101 ClearPageCgroupUsed(pc);
4102 /*
4103 * pc->mem_cgroup is not cleared here. It will be accessed when it's
4104 * freed from LRU. This is safe because uncharged page is expected not
4105 * to be reused (freed soon). Exception is SwapCache, it's handled by
4106 * special functions.
4107 */
4108
4109 unlock_page_cgroup(pc);
4110 /*
4111 * even after unlock, we have memcg->res.usage here and this memcg
4112 * will never be freed, so it's safe to call css_get().
4113 */
4114 memcg_check_events(memcg, page);
4115 if (do_swap_account && ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT) {
4116 mem_cgroup_swap_statistics(memcg, true);
4117 css_get(&memcg->css);
4118 }
4119 /*
4120 * Migration does not charge the res_counter for the
4121 * replacement page, so leave it alone when phasing out the
4122 * page that is unused after the migration.
4123 */
4124 if (!end_migration && !mem_cgroup_is_root(memcg))
4125 mem_cgroup_do_uncharge(memcg, nr_pages, ctype);
4126
4127 return memcg;
4128
4129unlock_out:
4130 unlock_page_cgroup(pc);
4131 return NULL;
4132}
4133
4134void mem_cgroup_uncharge_page(struct page *page)
4135{
4136 /* early check. */
4137 if (page_mapped(page))
4138 return;
4139 VM_BUG_ON_PAGE(page->mapping && !PageAnon(page), page);
4140 /*
4141 * If the page is in swap cache, uncharge should be deferred
4142 * to the swap path, which also properly accounts swap usage
4143 * and handles memcg lifetime.
4144 *
4145 * Note that this check is not stable and reclaim may add the
4146 * page to swap cache at any time after this. However, if the
4147 * page is not in swap cache by the time page->mapcount hits
4148 * 0, there won't be any page table references to the swap
4149 * slot, and reclaim will free it and not actually write the
4150 * page to disk.
4151 */
4152 if (PageSwapCache(page))
4153 return;
4154 __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_ANON, false);
4155}
4156
4157void mem_cgroup_uncharge_cache_page(struct page *page)
4158{
4159 VM_BUG_ON_PAGE(page_mapped(page), page);
4160 VM_BUG_ON_PAGE(page->mapping, page);
4161 __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE, false);
4162}
4163
4164/*
4165 * Batch_start/batch_end is called in unmap_page_range/invlidate/trucate.
4166 * In that cases, pages are freed continuously and we can expect pages
4167 * are in the same memcg. All these calls itself limits the number of
4168 * pages freed at once, then uncharge_start/end() is called properly.
4169 * This may be called prural(2) times in a context,
4170 */
4171
4172void mem_cgroup_uncharge_start(void)
4173{
4174 current->memcg_batch.do_batch++;
4175 /* We can do nest. */
4176 if (current->memcg_batch.do_batch == 1) {
4177 current->memcg_batch.memcg = NULL;
4178 current->memcg_batch.nr_pages = 0;
4179 current->memcg_batch.memsw_nr_pages = 0;
4180 }
4181}
4182
4183void mem_cgroup_uncharge_end(void)
4184{
4185 struct memcg_batch_info *batch = ¤t->memcg_batch;
4186
4187 if (!batch->do_batch)
4188 return;
4189
4190 batch->do_batch--;
4191 if (batch->do_batch) /* If stacked, do nothing. */
4192 return;
4193
4194 if (!batch->memcg)
4195 return;
4196 /*
4197 * This "batch->memcg" is valid without any css_get/put etc...
4198 * bacause we hide charges behind us.
4199 */
4200 if (batch->nr_pages)
4201 res_counter_uncharge(&batch->memcg->res,
4202 batch->nr_pages * PAGE_SIZE);
4203 if (batch->memsw_nr_pages)
4204 res_counter_uncharge(&batch->memcg->memsw,
4205 batch->memsw_nr_pages * PAGE_SIZE);
4206 memcg_oom_recover(batch->memcg);
4207 /* forget this pointer (for sanity check) */
4208 batch->memcg = NULL;
4209}
4210
4211#ifdef CONFIG_SWAP
4212/*
4213 * called after __delete_from_swap_cache() and drop "page" account.
4214 * memcg information is recorded to swap_cgroup of "ent"
4215 */
4216void
4217mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout)
4218{
4219 struct mem_cgroup *memcg;
4220 int ctype = MEM_CGROUP_CHARGE_TYPE_SWAPOUT;
4221
4222 if (!swapout) /* this was a swap cache but the swap is unused ! */
4223 ctype = MEM_CGROUP_CHARGE_TYPE_DROP;
4224
4225 memcg = __mem_cgroup_uncharge_common(page, ctype, false);
4226
4227 /*
4228 * record memcg information, if swapout && memcg != NULL,
4229 * css_get() was called in uncharge().
4230 */
4231 if (do_swap_account && swapout && memcg)
4232 swap_cgroup_record(ent, mem_cgroup_id(memcg));
4233}
4234#endif
4235
4236#ifdef CONFIG_MEMCG_SWAP
4237/*
4238 * called from swap_entry_free(). remove record in swap_cgroup and
4239 * uncharge "memsw" account.
4240 */
4241void mem_cgroup_uncharge_swap(swp_entry_t ent)
4242{
4243 struct mem_cgroup *memcg;
4244 unsigned short id;
4245
4246 if (!do_swap_account)
4247 return;
4248
4249 id = swap_cgroup_record(ent, 0);
4250 rcu_read_lock();
4251 memcg = mem_cgroup_lookup(id);
4252 if (memcg) {
4253 /*
4254 * We uncharge this because swap is freed.
4255 * This memcg can be obsolete one. We avoid calling css_tryget
4256 */
4257 if (!mem_cgroup_is_root(memcg))
4258 res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
4259 mem_cgroup_swap_statistics(memcg, false);
4260 css_put(&memcg->css);
4261 }
4262 rcu_read_unlock();
4263}
4264
4265/**
4266 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
4267 * @entry: swap entry to be moved
4268 * @from: mem_cgroup which the entry is moved from
4269 * @to: mem_cgroup which the entry is moved to
4270 *
4271 * It succeeds only when the swap_cgroup's record for this entry is the same
4272 * as the mem_cgroup's id of @from.
4273 *
4274 * Returns 0 on success, -EINVAL on failure.
4275 *
4276 * The caller must have charged to @to, IOW, called res_counter_charge() about
4277 * both res and memsw, and called css_get().
4278 */
4279static int mem_cgroup_move_swap_account(swp_entry_t entry,
4280 struct mem_cgroup *from, struct mem_cgroup *to)
4281{
4282 unsigned short old_id, new_id;
4283
4284 old_id = mem_cgroup_id(from);
4285 new_id = mem_cgroup_id(to);
4286
4287 if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
4288 mem_cgroup_swap_statistics(from, false);
4289 mem_cgroup_swap_statistics(to, true);
4290 /*
4291 * This function is only called from task migration context now.
4292 * It postpones res_counter and refcount handling till the end
4293 * of task migration(mem_cgroup_clear_mc()) for performance
4294 * improvement. But we cannot postpone css_get(to) because if
4295 * the process that has been moved to @to does swap-in, the
4296 * refcount of @to might be decreased to 0.
4297 *
4298 * We are in attach() phase, so the cgroup is guaranteed to be
4299 * alive, so we can just call css_get().
4300 */
4301 css_get(&to->css);
4302 return 0;
4303 }
4304 return -EINVAL;
4305}
4306#else
4307static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
4308 struct mem_cgroup *from, struct mem_cgroup *to)
4309{
4310 return -EINVAL;
4311}
4312#endif
4313
4314/*
4315 * Before starting migration, account PAGE_SIZE to mem_cgroup that the old
4316 * page belongs to.
4317 */
4318void mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
4319 struct mem_cgroup **memcgp)
4320{
4321 struct mem_cgroup *memcg = NULL;
4322 unsigned int nr_pages = 1;
4323 struct page_cgroup *pc;
4324 enum charge_type ctype;
4325
4326 *memcgp = NULL;
4327
4328 if (mem_cgroup_disabled())
4329 return;
4330
4331 if (PageTransHuge(page))
4332 nr_pages <<= compound_order(page);
4333
4334 pc = lookup_page_cgroup(page);
4335 lock_page_cgroup(pc);
4336 if (PageCgroupUsed(pc)) {
4337 memcg = pc->mem_cgroup;
4338 css_get(&memcg->css);
4339 /*
4340 * At migrating an anonymous page, its mapcount goes down
4341 * to 0 and uncharge() will be called. But, even if it's fully
4342 * unmapped, migration may fail and this page has to be
4343 * charged again. We set MIGRATION flag here and delay uncharge
4344 * until end_migration() is called
4345 *
4346 * Corner Case Thinking
4347 * A)
4348 * When the old page was mapped as Anon and it's unmap-and-freed
4349 * while migration was ongoing.
4350 * If unmap finds the old page, uncharge() of it will be delayed
4351 * until end_migration(). If unmap finds a new page, it's
4352 * uncharged when it make mapcount to be 1->0. If unmap code
4353 * finds swap_migration_entry, the new page will not be mapped
4354 * and end_migration() will find it(mapcount==0).
4355 *
4356 * B)
4357 * When the old page was mapped but migraion fails, the kernel
4358 * remaps it. A charge for it is kept by MIGRATION flag even
4359 * if mapcount goes down to 0. We can do remap successfully
4360 * without charging it again.
4361 *
4362 * C)
4363 * The "old" page is under lock_page() until the end of
4364 * migration, so, the old page itself will not be swapped-out.
4365 * If the new page is swapped out before end_migraton, our
4366 * hook to usual swap-out path will catch the event.
4367 */
4368 if (PageAnon(page))
4369 SetPageCgroupMigration(pc);
4370 }
4371 unlock_page_cgroup(pc);
4372 /*
4373 * If the page is not charged at this point,
4374 * we return here.
4375 */
4376 if (!memcg)
4377 return;
4378
4379 *memcgp = memcg;
4380 /*
4381 * We charge new page before it's used/mapped. So, even if unlock_page()
4382 * is called before end_migration, we can catch all events on this new
4383 * page. In the case new page is migrated but not remapped, new page's
4384 * mapcount will be finally 0 and we call uncharge in end_migration().
4385 */
4386 if (PageAnon(page))
4387 ctype = MEM_CGROUP_CHARGE_TYPE_ANON;
4388 else
4389 ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
4390 /*
4391 * The page is committed to the memcg, but it's not actually
4392 * charged to the res_counter since we plan on replacing the
4393 * old one and only one page is going to be left afterwards.
4394 */
4395 __mem_cgroup_commit_charge(memcg, newpage, nr_pages, ctype, false);
4396}
4397
4398/* remove redundant charge if migration failed*/
4399void mem_cgroup_end_migration(struct mem_cgroup *memcg,
4400 struct page *oldpage, struct page *newpage, bool migration_ok)
4401{
4402 struct page *used, *unused;
4403 struct page_cgroup *pc;
4404 bool anon;
4405
4406 if (!memcg)
4407 return;
4408
4409 if (!migration_ok) {
4410 used = oldpage;
4411 unused = newpage;
4412 } else {
4413 used = newpage;
4414 unused = oldpage;
4415 }
4416 anon = PageAnon(used);
4417 __mem_cgroup_uncharge_common(unused,
4418 anon ? MEM_CGROUP_CHARGE_TYPE_ANON
4419 : MEM_CGROUP_CHARGE_TYPE_CACHE,
4420 true);
4421 css_put(&memcg->css);
4422 /*
4423 * We disallowed uncharge of pages under migration because mapcount
4424 * of the page goes down to zero, temporarly.
4425 * Clear the flag and check the page should be charged.
4426 */
4427 pc = lookup_page_cgroup(oldpage);
4428 lock_page_cgroup(pc);
4429 ClearPageCgroupMigration(pc);
4430 unlock_page_cgroup(pc);
4431
4432 /*
4433 * If a page is a file cache, radix-tree replacement is very atomic
4434 * and we can skip this check. When it was an Anon page, its mapcount
4435 * goes down to 0. But because we added MIGRATION flage, it's not
4436 * uncharged yet. There are several case but page->mapcount check
4437 * and USED bit check in mem_cgroup_uncharge_page() will do enough
4438 * check. (see prepare_charge() also)
4439 */
4440 if (anon)
4441 mem_cgroup_uncharge_page(used);
4442}
4443
4444/*
4445 * At replace page cache, newpage is not under any memcg but it's on
4446 * LRU. So, this function doesn't touch res_counter but handles LRU
4447 * in correct way. Both pages are locked so we cannot race with uncharge.
4448 */
4449void mem_cgroup_replace_page_cache(struct page *oldpage,
4450 struct page *newpage)
4451{
4452 struct mem_cgroup *memcg = NULL;
4453 struct page_cgroup *pc;
4454 enum charge_type type = MEM_CGROUP_CHARGE_TYPE_CACHE;
4455
4456 if (mem_cgroup_disabled())
4457 return;
4458
4459 pc = lookup_page_cgroup(oldpage);
4460 /* fix accounting on old pages */
4461 lock_page_cgroup(pc);
4462 if (PageCgroupUsed(pc)) {
4463 memcg = pc->mem_cgroup;
4464 mem_cgroup_charge_statistics(memcg, oldpage, false, -1);
4465 ClearPageCgroupUsed(pc);
4466 }
4467 unlock_page_cgroup(pc);
4468
4469 /*
4470 * When called from shmem_replace_page(), in some cases the
4471 * oldpage has already been charged, and in some cases not.
4472 */
4473 if (!memcg)
4474 return;
4475 /*
4476 * Even if newpage->mapping was NULL before starting replacement,
4477 * the newpage may be on LRU(or pagevec for LRU) already. We lock
4478 * LRU while we overwrite pc->mem_cgroup.
4479 */
4480 __mem_cgroup_commit_charge(memcg, newpage, 1, type, true);
4481}
4482
4483#ifdef CONFIG_DEBUG_VM
4484static struct page_cgroup *lookup_page_cgroup_used(struct page *page)
4485{
4486 struct page_cgroup *pc;
4487
4488 pc = lookup_page_cgroup(page);
4489 /*
4490 * Can be NULL while feeding pages into the page allocator for
4491 * the first time, i.e. during boot or memory hotplug;
4492 * or when mem_cgroup_disabled().
4493 */
4494 if (likely(pc) && PageCgroupUsed(pc))
4495 return pc;
4496 return NULL;
4497}
4498
4499bool mem_cgroup_bad_page_check(struct page *page)
4500{
4501 if (mem_cgroup_disabled())
4502 return false;
4503
4504 return lookup_page_cgroup_used(page) != NULL;
4505}
4506
4507void mem_cgroup_print_bad_page(struct page *page)
4508{
4509 struct page_cgroup *pc;
4510
4511 pc = lookup_page_cgroup_used(page);
4512 if (pc) {
4513 pr_alert("pc:%p pc->flags:%lx pc->mem_cgroup:%p\n",
4514 pc, pc->flags, pc->mem_cgroup);
4515 }
4516}
4517#endif
4518
4519static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
4520 unsigned long long val)
4521{
4522 int retry_count;
4523 u64 memswlimit, memlimit;
4524 int ret = 0;
4525 int children = mem_cgroup_count_children(memcg);
4526 u64 curusage, oldusage;
4527 int enlarge;
4528
4529 /*
4530 * For keeping hierarchical_reclaim simple, how long we should retry
4531 * is depends on callers. We set our retry-count to be function
4532 * of # of children which we should visit in this loop.
4533 */
4534 retry_count = MEM_CGROUP_RECLAIM_RETRIES * children;
4535
4536 oldusage = res_counter_read_u64(&memcg->res, RES_USAGE);
4537
4538 enlarge = 0;
4539 while (retry_count) {
4540 if (signal_pending(current)) {
4541 ret = -EINTR;
4542 break;
4543 }
4544 /*
4545 * Rather than hide all in some function, I do this in
4546 * open coded manner. You see what this really does.
4547 * We have to guarantee memcg->res.limit <= memcg->memsw.limit.
4548 */
4549 mutex_lock(&set_limit_mutex);
4550 memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
4551 if (memswlimit < val) {
4552 ret = -EINVAL;
4553 mutex_unlock(&set_limit_mutex);
4554 break;
4555 }
4556
4557 memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
4558 if (memlimit < val)
4559 enlarge = 1;
4560
4561 ret = res_counter_set_limit(&memcg->res, val);
4562 if (!ret) {
4563 if (memswlimit == val)
4564 memcg->memsw_is_minimum = true;
4565 else
4566 memcg->memsw_is_minimum = false;
4567 }
4568 mutex_unlock(&set_limit_mutex);
4569
4570 if (!ret)
4571 break;
4572
4573 mem_cgroup_reclaim(memcg, GFP_KERNEL,
4574 MEM_CGROUP_RECLAIM_SHRINK);
4575 curusage = res_counter_read_u64(&memcg->res, RES_USAGE);
4576 /* Usage is reduced ? */
4577 if (curusage >= oldusage)
4578 retry_count--;
4579 else
4580 oldusage = curusage;
4581 }
4582 if (!ret && enlarge)
4583 memcg_oom_recover(memcg);
4584
4585 return ret;
4586}
4587
4588static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
4589 unsigned long long val)
4590{
4591 int retry_count;
4592 u64 memlimit, memswlimit, oldusage, curusage;
4593 int children = mem_cgroup_count_children(memcg);
4594 int ret = -EBUSY;
4595 int enlarge = 0;
4596
4597 /* see mem_cgroup_resize_res_limit */
4598 retry_count = children * MEM_CGROUP_RECLAIM_RETRIES;
4599 oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
4600 while (retry_count) {
4601 if (signal_pending(current)) {
4602 ret = -EINTR;
4603 break;
4604 }
4605 /*
4606 * Rather than hide all in some function, I do this in
4607 * open coded manner. You see what this really does.
4608 * We have to guarantee memcg->res.limit <= memcg->memsw.limit.
4609 */
4610 mutex_lock(&set_limit_mutex);
4611 memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
4612 if (memlimit > val) {
4613 ret = -EINVAL;
4614 mutex_unlock(&set_limit_mutex);
4615 break;
4616 }
4617 memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
4618 if (memswlimit < val)
4619 enlarge = 1;
4620 ret = res_counter_set_limit(&memcg->memsw, val);
4621 if (!ret) {
4622 if (memlimit == val)
4623 memcg->memsw_is_minimum = true;
4624 else
4625 memcg->memsw_is_minimum = false;
4626 }
4627 mutex_unlock(&set_limit_mutex);
4628
4629 if (!ret)
4630 break;
4631
4632 mem_cgroup_reclaim(memcg, GFP_KERNEL,
4633 MEM_CGROUP_RECLAIM_NOSWAP |
4634 MEM_CGROUP_RECLAIM_SHRINK);
4635 curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
4636 /* Usage is reduced ? */
4637 if (curusage >= oldusage)
4638 retry_count--;
4639 else
4640 oldusage = curusage;
4641 }
4642 if (!ret && enlarge)
4643 memcg_oom_recover(memcg);
4644 return ret;
4645}
4646
4647unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
4648 gfp_t gfp_mask,
4649 unsigned long *total_scanned)
4650{
4651 unsigned long nr_reclaimed = 0;
4652 struct mem_cgroup_per_zone *mz, *next_mz = NULL;
4653 unsigned long reclaimed;
4654 int loop = 0;
4655 struct mem_cgroup_tree_per_zone *mctz;
4656 unsigned long long excess;
4657 unsigned long nr_scanned;
4658
4659 if (order > 0)
4660 return 0;
4661
4662 mctz = soft_limit_tree_node_zone(zone_to_nid(zone), zone_idx(zone));
4663 /*
4664 * This loop can run a while, specially if mem_cgroup's continuously
4665 * keep exceeding their soft limit and putting the system under
4666 * pressure
4667 */
4668 do {
4669 if (next_mz)
4670 mz = next_mz;
4671 else
4672 mz = mem_cgroup_largest_soft_limit_node(mctz);
4673 if (!mz)
4674 break;
4675
4676 nr_scanned = 0;
4677 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, zone,
4678 gfp_mask, &nr_scanned);
4679 nr_reclaimed += reclaimed;
4680 *total_scanned += nr_scanned;
4681 spin_lock(&mctz->lock);
4682
4683 /*
4684 * If we failed to reclaim anything from this memory cgroup
4685 * it is time to move on to the next cgroup
4686 */
4687 next_mz = NULL;
4688 if (!reclaimed) {
4689 do {
4690 /*
4691 * Loop until we find yet another one.
4692 *
4693 * By the time we get the soft_limit lock
4694 * again, someone might have aded the
4695 * group back on the RB tree. Iterate to
4696 * make sure we get a different mem.
4697 * mem_cgroup_largest_soft_limit_node returns
4698 * NULL if no other cgroup is present on
4699 * the tree
4700 */
4701 next_mz =
4702 __mem_cgroup_largest_soft_limit_node(mctz);
4703 if (next_mz == mz)
4704 css_put(&next_mz->memcg->css);
4705 else /* next_mz == NULL or other memcg */
4706 break;
4707 } while (1);
4708 }
4709 __mem_cgroup_remove_exceeded(mz->memcg, mz, mctz);
4710 excess = res_counter_soft_limit_excess(&mz->memcg->res);
4711 /*
4712 * One school of thought says that we should not add
4713 * back the node to the tree if reclaim returns 0.
4714 * But our reclaim could return 0, simply because due
4715 * to priority we are exposing a smaller subset of
4716 * memory to reclaim from. Consider this as a longer
4717 * term TODO.
4718 */
4719 /* If excess == 0, no tree ops */
4720 __mem_cgroup_insert_exceeded(mz->memcg, mz, mctz, excess);
4721 spin_unlock(&mctz->lock);
4722 css_put(&mz->memcg->css);
4723 loop++;
4724 /*
4725 * Could not reclaim anything and there are no more
4726 * mem cgroups to try or we seem to be looping without
4727 * reclaiming anything.
4728 */
4729 if (!nr_reclaimed &&
4730 (next_mz == NULL ||
4731 loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
4732 break;
4733 } while (!nr_reclaimed);
4734 if (next_mz)
4735 css_put(&next_mz->memcg->css);
4736 return nr_reclaimed;
4737}
4738
4739/**
4740 * mem_cgroup_force_empty_list - clears LRU of a group
4741 * @memcg: group to clear
4742 * @node: NUMA node
4743 * @zid: zone id
4744 * @lru: lru to to clear
4745 *
4746 * Traverse a specified page_cgroup list and try to drop them all. This doesn't
4747 * reclaim the pages page themselves - pages are moved to the parent (or root)
4748 * group.
4749 */
4750static void mem_cgroup_force_empty_list(struct mem_cgroup *memcg,
4751 int node, int zid, enum lru_list lru)
4752{
4753 struct lruvec *lruvec;
4754 unsigned long flags;
4755 struct list_head *list;
4756 struct page *busy;
4757 struct zone *zone;
4758
4759 zone = &NODE_DATA(node)->node_zones[zid];
4760 lruvec = mem_cgroup_zone_lruvec(zone, memcg);
4761 list = &lruvec->lists[lru];
4762
4763 busy = NULL;
4764 do {
4765 struct page_cgroup *pc;
4766 struct page *page;
4767
4768 spin_lock_irqsave(&zone->lru_lock, flags);
4769 if (list_empty(list)) {
4770 spin_unlock_irqrestore(&zone->lru_lock, flags);
4771 break;
4772 }
4773 page = list_entry(list->prev, struct page, lru);
4774 if (busy == page) {
4775 list_move(&page->lru, list);
4776 busy = NULL;
4777 spin_unlock_irqrestore(&zone->lru_lock, flags);
4778 continue;
4779 }
4780 spin_unlock_irqrestore(&zone->lru_lock, flags);
4781
4782 pc = lookup_page_cgroup(page);
4783
4784 if (mem_cgroup_move_parent(page, pc, memcg)) {
4785 /* found lock contention or "pc" is obsolete. */
4786 busy = page;
4787 cond_resched();
4788 } else
4789 busy = NULL;
4790 } while (!list_empty(list));
4791}
4792
4793/*
4794 * make mem_cgroup's charge to be 0 if there is no task by moving
4795 * all the charges and pages to the parent.
4796 * This enables deleting this mem_cgroup.
4797 *
4798 * Caller is responsible for holding css reference on the memcg.
4799 */
4800static void mem_cgroup_reparent_charges(struct mem_cgroup *memcg)
4801{
4802 int node, zid;
4803 u64 usage;
4804
4805 do {
4806 /* This is for making all *used* pages to be on LRU. */
4807 lru_add_drain_all();
4808 drain_all_stock_sync(memcg);
4809 mem_cgroup_start_move(memcg);
4810 for_each_node_state(node, N_MEMORY) {
4811 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
4812 enum lru_list lru;
4813 for_each_lru(lru) {
4814 mem_cgroup_force_empty_list(memcg,
4815 node, zid, lru);
4816 }
4817 }
4818 }
4819 mem_cgroup_end_move(memcg);
4820 memcg_oom_recover(memcg);
4821 cond_resched();
4822
4823 /*
4824 * Kernel memory may not necessarily be trackable to a specific
4825 * process. So they are not migrated, and therefore we can't
4826 * expect their value to drop to 0 here.
4827 * Having res filled up with kmem only is enough.
4828 *
4829 * This is a safety check because mem_cgroup_force_empty_list
4830 * could have raced with mem_cgroup_replace_page_cache callers
4831 * so the lru seemed empty but the page could have been added
4832 * right after the check. RES_USAGE should be safe as we always
4833 * charge before adding to the LRU.
4834 */
4835 usage = res_counter_read_u64(&memcg->res, RES_USAGE) -
4836 res_counter_read_u64(&memcg->kmem, RES_USAGE);
4837 } while (usage > 0);
4838}
4839
4840static inline bool memcg_has_children(struct mem_cgroup *memcg)
4841{
4842 lockdep_assert_held(&memcg_create_mutex);
4843 /*
4844 * The lock does not prevent addition or deletion to the list
4845 * of children, but it prevents a new child from being
4846 * initialized based on this parent in css_online(), so it's
4847 * enough to decide whether hierarchically inherited
4848 * attributes can still be changed or not.
4849 */
4850 return memcg->use_hierarchy &&
4851 !list_empty(&memcg->css.cgroup->children);
4852}
4853
4854/*
4855 * Reclaims as many pages from the given memcg as possible and moves
4856 * the rest to the parent.
4857 *
4858 * Caller is responsible for holding css reference for memcg.
4859 */
4860static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
4861{
4862 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
4863 struct cgroup *cgrp = memcg->css.cgroup;
4864
4865 /* returns EBUSY if there is a task or if we come here twice. */
4866 if (cgroup_has_tasks(cgrp) || !list_empty(&cgrp->children))
4867 return -EBUSY;
4868
4869 /* we call try-to-free pages for make this cgroup empty */
4870 lru_add_drain_all();
4871 /* try to free all pages in this cgroup */
4872 while (nr_retries && res_counter_read_u64(&memcg->res, RES_USAGE) > 0) {
4873 int progress;
4874
4875 if (signal_pending(current))
4876 return -EINTR;
4877
4878 progress = try_to_free_mem_cgroup_pages(memcg, GFP_KERNEL,
4879 false);
4880 if (!progress) {
4881 nr_retries--;
4882 /* maybe some writeback is necessary */
4883 congestion_wait(BLK_RW_ASYNC, HZ/10);
4884 }
4885
4886 }
4887 lru_add_drain();
4888 mem_cgroup_reparent_charges(memcg);
4889
4890 return 0;
4891}
4892
4893static int mem_cgroup_force_empty_write(struct cgroup_subsys_state *css,
4894 unsigned int event)
4895{
4896 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4897
4898 if (mem_cgroup_is_root(memcg))
4899 return -EINVAL;
4900 return mem_cgroup_force_empty(memcg);
4901}
4902
4903static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css,
4904 struct cftype *cft)
4905{
4906 return mem_cgroup_from_css(css)->use_hierarchy;
4907}
4908
4909static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
4910 struct cftype *cft, u64 val)
4911{
4912 int retval = 0;
4913 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4914 struct mem_cgroup *parent_memcg = mem_cgroup_from_css(css_parent(&memcg->css));
4915
4916 mutex_lock(&memcg_create_mutex);
4917
4918 if (memcg->use_hierarchy == val)
4919 goto out;
4920
4921 /*
4922 * If parent's use_hierarchy is set, we can't make any modifications
4923 * in the child subtrees. If it is unset, then the change can
4924 * occur, provided the current cgroup has no children.
4925 *
4926 * For the root cgroup, parent_mem is NULL, we allow value to be
4927 * set if there are no children.
4928 */
4929 if ((!parent_memcg || !parent_memcg->use_hierarchy) &&
4930 (val == 1 || val == 0)) {
4931 if (list_empty(&memcg->css.cgroup->children))
4932 memcg->use_hierarchy = val;
4933 else
4934 retval = -EBUSY;
4935 } else
4936 retval = -EINVAL;
4937
4938out:
4939 mutex_unlock(&memcg_create_mutex);
4940
4941 return retval;
4942}
4943
4944
4945static unsigned long mem_cgroup_recursive_stat(struct mem_cgroup *memcg,
4946 enum mem_cgroup_stat_index idx)
4947{
4948 struct mem_cgroup *iter;
4949 long val = 0;
4950
4951 /* Per-cpu values can be negative, use a signed accumulator */
4952 for_each_mem_cgroup_tree(iter, memcg)
4953 val += mem_cgroup_read_stat(iter, idx);
4954
4955 if (val < 0) /* race ? */
4956 val = 0;
4957 return val;
4958}
4959
4960static inline u64 mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
4961{
4962 u64 val;
4963
4964 if (!mem_cgroup_is_root(memcg)) {
4965 if (!swap)
4966 return res_counter_read_u64(&memcg->res, RES_USAGE);
4967 else
4968 return res_counter_read_u64(&memcg->memsw, RES_USAGE);
4969 }
4970
4971 /*
4972 * Transparent hugepages are still accounted for in MEM_CGROUP_STAT_RSS
4973 * as well as in MEM_CGROUP_STAT_RSS_HUGE.
4974 */
4975 val = mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_CACHE);
4976 val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_RSS);
4977
4978 if (swap)
4979 val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_SWAP);
4980
4981 return val << PAGE_SHIFT;
4982}
4983
4984static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
4985 struct cftype *cft)
4986{
4987 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4988 u64 val;
4989 int name;
4990 enum res_type type;
4991
4992 type = MEMFILE_TYPE(cft->private);
4993 name = MEMFILE_ATTR(cft->private);
4994
4995 switch (type) {
4996 case _MEM:
4997 if (name == RES_USAGE)
4998 val = mem_cgroup_usage(memcg, false);
4999 else
5000 val = res_counter_read_u64(&memcg->res, name);
5001 break;
5002 case _MEMSWAP:
5003 if (name == RES_USAGE)
5004 val = mem_cgroup_usage(memcg, true);
5005 else
5006 val = res_counter_read_u64(&memcg->memsw, name);
5007 break;
5008 case _KMEM:
5009 val = res_counter_read_u64(&memcg->kmem, name);
5010 break;
5011 default:
5012 BUG();
5013 }
5014
5015 return val;
5016}
5017
5018#ifdef CONFIG_MEMCG_KMEM
5019/* should be called with activate_kmem_mutex held */
5020static int __memcg_activate_kmem(struct mem_cgroup *memcg,
5021 unsigned long long limit)
5022{
5023 int err = 0;
5024 int memcg_id;
5025
5026 if (memcg_kmem_is_active(memcg))
5027 return 0;
5028
5029 /*
5030 * We are going to allocate memory for data shared by all memory
5031 * cgroups so let's stop accounting here.
5032 */
5033 memcg_stop_kmem_account();
5034
5035 /*
5036 * For simplicity, we won't allow this to be disabled. It also can't
5037 * be changed if the cgroup has children already, or if tasks had
5038 * already joined.
5039 *
5040 * If tasks join before we set the limit, a person looking at
5041 * kmem.usage_in_bytes will have no way to determine when it took
5042 * place, which makes the value quite meaningless.
5043 *
5044 * After it first became limited, changes in the value of the limit are
5045 * of course permitted.
5046 */
5047 mutex_lock(&memcg_create_mutex);
5048 if (cgroup_has_tasks(memcg->css.cgroup) || memcg_has_children(memcg))
5049 err = -EBUSY;
5050 mutex_unlock(&memcg_create_mutex);
5051 if (err)
5052 goto out;
5053
5054 memcg_id = ida_simple_get(&kmem_limited_groups,
5055 0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL);
5056 if (memcg_id < 0) {
5057 err = memcg_id;
5058 goto out;
5059 }
5060
5061 /*
5062 * Make sure we have enough space for this cgroup in each root cache's
5063 * memcg_params.
5064 */
5065 err = memcg_update_all_caches(memcg_id + 1);
5066 if (err)
5067 goto out_rmid;
5068
5069 memcg->kmemcg_id = memcg_id;
5070 INIT_LIST_HEAD(&memcg->memcg_slab_caches);
5071 mutex_init(&memcg->slab_caches_mutex);
5072
5073 /*
5074 * We couldn't have accounted to this cgroup, because it hasn't got the
5075 * active bit set yet, so this should succeed.
5076 */
5077 err = res_counter_set_limit(&memcg->kmem, limit);
5078 VM_BUG_ON(err);
5079
5080 static_key_slow_inc(&memcg_kmem_enabled_key);
5081 /*
5082 * Setting the active bit after enabling static branching will
5083 * guarantee no one starts accounting before all call sites are
5084 * patched.
5085 */
5086 memcg_kmem_set_active(memcg);
5087out:
5088 memcg_resume_kmem_account();
5089 return err;
5090
5091out_rmid:
5092 ida_simple_remove(&kmem_limited_groups, memcg_id);
5093 goto out;
5094}
5095
5096static int memcg_activate_kmem(struct mem_cgroup *memcg,
5097 unsigned long long limit)
5098{
5099 int ret;
5100
5101 mutex_lock(&activate_kmem_mutex);
5102 ret = __memcg_activate_kmem(memcg, limit);
5103 mutex_unlock(&activate_kmem_mutex);
5104 return ret;
5105}
5106
5107static int memcg_update_kmem_limit(struct mem_cgroup *memcg,
5108 unsigned long long val)
5109{
5110 int ret;
5111
5112 if (!memcg_kmem_is_active(memcg))
5113 ret = memcg_activate_kmem(memcg, val);
5114 else
5115 ret = res_counter_set_limit(&memcg->kmem, val);
5116 return ret;
5117}
5118
5119static int memcg_propagate_kmem(struct mem_cgroup *memcg)
5120{
5121 int ret = 0;
5122 struct mem_cgroup *parent = parent_mem_cgroup(memcg);
5123
5124 if (!parent)
5125 return 0;
5126
5127 mutex_lock(&activate_kmem_mutex);
5128 /*
5129 * If the parent cgroup is not kmem-active now, it cannot be activated
5130 * after this point, because it has at least one child already.
5131 */
5132 if (memcg_kmem_is_active(parent))
5133 ret = __memcg_activate_kmem(memcg, RES_COUNTER_MAX);
5134 mutex_unlock(&activate_kmem_mutex);
5135 return ret;
5136}
5137#else
5138static int memcg_update_kmem_limit(struct mem_cgroup *memcg,
5139 unsigned long long val)
5140{
5141 return -EINVAL;
5142}
5143#endif /* CONFIG_MEMCG_KMEM */
5144
5145/*
5146 * The user of this function is...
5147 * RES_LIMIT.
5148 */
5149static int mem_cgroup_write(struct cgroup_subsys_state *css, struct cftype *cft,
5150 char *buffer)
5151{
5152 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5153 enum res_type type;
5154 int name;
5155 unsigned long long val;
5156 int ret;
5157
5158 type = MEMFILE_TYPE(cft->private);
5159 name = MEMFILE_ATTR(cft->private);
5160
5161 switch (name) {
5162 case RES_LIMIT:
5163 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
5164 ret = -EINVAL;
5165 break;
5166 }
5167 /* This function does all necessary parse...reuse it */
5168 ret = res_counter_memparse_write_strategy(buffer, &val);
5169 if (ret)
5170 break;
5171 if (type == _MEM)
5172 ret = mem_cgroup_resize_limit(memcg, val);
5173 else if (type == _MEMSWAP)
5174 ret = mem_cgroup_resize_memsw_limit(memcg, val);
5175 else if (type == _KMEM)
5176 ret = memcg_update_kmem_limit(memcg, val);
5177 else
5178 return -EINVAL;
5179 break;
5180 case RES_SOFT_LIMIT:
5181 ret = res_counter_memparse_write_strategy(buffer, &val);
5182 if (ret)
5183 break;
5184 /*
5185 * For memsw, soft limits are hard to implement in terms
5186 * of semantics, for now, we support soft limits for
5187 * control without swap
5188 */
5189 if (type == _MEM)
5190 ret = res_counter_set_soft_limit(&memcg->res, val);
5191 else
5192 ret = -EINVAL;
5193 break;
5194 default:
5195 ret = -EINVAL; /* should be BUG() ? */
5196 break;
5197 }
5198 return ret;
5199}
5200
5201static void memcg_get_hierarchical_limit(struct mem_cgroup *memcg,
5202 unsigned long long *mem_limit, unsigned long long *memsw_limit)
5203{
5204 unsigned long long min_limit, min_memsw_limit, tmp;
5205
5206 min_limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
5207 min_memsw_limit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
5208 if (!memcg->use_hierarchy)
5209 goto out;
5210
5211 while (css_parent(&memcg->css)) {
5212 memcg = mem_cgroup_from_css(css_parent(&memcg->css));
5213 if (!memcg->use_hierarchy)
5214 break;
5215 tmp = res_counter_read_u64(&memcg->res, RES_LIMIT);
5216 min_limit = min(min_limit, tmp);
5217 tmp = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
5218 min_memsw_limit = min(min_memsw_limit, tmp);
5219 }
5220out:
5221 *mem_limit = min_limit;
5222 *memsw_limit = min_memsw_limit;
5223}
5224
5225static int mem_cgroup_reset(struct cgroup_subsys_state *css, unsigned int event)
5226{
5227 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5228 int name;
5229 enum res_type type;
5230
5231 type = MEMFILE_TYPE(event);
5232 name = MEMFILE_ATTR(event);
5233
5234 switch (name) {
5235 case RES_MAX_USAGE:
5236 if (type == _MEM)
5237 res_counter_reset_max(&memcg->res);
5238 else if (type == _MEMSWAP)
5239 res_counter_reset_max(&memcg->memsw);
5240 else if (type == _KMEM)
5241 res_counter_reset_max(&memcg->kmem);
5242 else
5243 return -EINVAL;
5244 break;
5245 case RES_FAILCNT:
5246 if (type == _MEM)
5247 res_counter_reset_failcnt(&memcg->res);
5248 else if (type == _MEMSWAP)
5249 res_counter_reset_failcnt(&memcg->memsw);
5250 else if (type == _KMEM)
5251 res_counter_reset_failcnt(&memcg->kmem);
5252 else
5253 return -EINVAL;
5254 break;
5255 }
5256
5257 return 0;
5258}
5259
5260static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css,
5261 struct cftype *cft)
5262{
5263 return mem_cgroup_from_css(css)->move_charge_at_immigrate;
5264}
5265
5266#ifdef CONFIG_MMU
5267static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
5268 struct cftype *cft, u64 val)
5269{
5270 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5271
5272 if (val >= (1 << NR_MOVE_TYPE))
5273 return -EINVAL;
5274
5275 /*
5276 * No kind of locking is needed in here, because ->can_attach() will
5277 * check this value once in the beginning of the process, and then carry
5278 * on with stale data. This means that changes to this value will only
5279 * affect task migrations starting after the change.
5280 */
5281 memcg->move_charge_at_immigrate = val;
5282 return 0;
5283}
5284#else
5285static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
5286 struct cftype *cft, u64 val)
5287{
5288 return -ENOSYS;
5289}
5290#endif
5291
5292#ifdef CONFIG_NUMA
5293static int memcg_numa_stat_show(struct seq_file *m, void *v)
5294{
5295 struct numa_stat {
5296 const char *name;
5297 unsigned int lru_mask;
5298 };
5299
5300 static const struct numa_stat stats[] = {
5301 { "total", LRU_ALL },
5302 { "file", LRU_ALL_FILE },
5303 { "anon", LRU_ALL_ANON },
5304 { "unevictable", BIT(LRU_UNEVICTABLE) },
5305 };
5306 const struct numa_stat *stat;
5307 int nid;
5308 unsigned long nr;
5309 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5310
5311 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
5312 nr = mem_cgroup_nr_lru_pages(memcg, stat->lru_mask);
5313 seq_printf(m, "%s=%lu", stat->name, nr);
5314 for_each_node_state(nid, N_MEMORY) {
5315 nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
5316 stat->lru_mask);
5317 seq_printf(m, " N%d=%lu", nid, nr);
5318 }
5319 seq_putc(m, '\n');
5320 }
5321
5322 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
5323 struct mem_cgroup *iter;
5324
5325 nr = 0;
5326 for_each_mem_cgroup_tree(iter, memcg)
5327 nr += mem_cgroup_nr_lru_pages(iter, stat->lru_mask);
5328 seq_printf(m, "hierarchical_%s=%lu", stat->name, nr);
5329 for_each_node_state(nid, N_MEMORY) {
5330 nr = 0;
5331 for_each_mem_cgroup_tree(iter, memcg)
5332 nr += mem_cgroup_node_nr_lru_pages(
5333 iter, nid, stat->lru_mask);
5334 seq_printf(m, " N%d=%lu", nid, nr);
5335 }
5336 seq_putc(m, '\n');
5337 }
5338
5339 return 0;
5340}
5341#endif /* CONFIG_NUMA */
5342
5343static inline void mem_cgroup_lru_names_not_uptodate(void)
5344{
5345 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS);
5346}
5347
5348static int memcg_stat_show(struct seq_file *m, void *v)
5349{
5350 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5351 struct mem_cgroup *mi;
5352 unsigned int i;
5353
5354 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
5355 if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
5356 continue;
5357 seq_printf(m, "%s %ld\n", mem_cgroup_stat_names[i],
5358 mem_cgroup_read_stat(memcg, i) * PAGE_SIZE);
5359 }
5360
5361 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++)
5362 seq_printf(m, "%s %lu\n", mem_cgroup_events_names[i],
5363 mem_cgroup_read_events(memcg, i));
5364
5365 for (i = 0; i < NR_LRU_LISTS; i++)
5366 seq_printf(m, "%s %lu\n", mem_cgroup_lru_names[i],
5367 mem_cgroup_nr_lru_pages(memcg, BIT(i)) * PAGE_SIZE);
5368
5369 /* Hierarchical information */
5370 {
5371 unsigned long long limit, memsw_limit;
5372 memcg_get_hierarchical_limit(memcg, &limit, &memsw_limit);
5373 seq_printf(m, "hierarchical_memory_limit %llu\n", limit);
5374 if (do_swap_account)
5375 seq_printf(m, "hierarchical_memsw_limit %llu\n",
5376 memsw_limit);
5377 }
5378
5379 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
5380 long long val = 0;
5381
5382 if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
5383 continue;
5384 for_each_mem_cgroup_tree(mi, memcg)
5385 val += mem_cgroup_read_stat(mi, i) * PAGE_SIZE;
5386 seq_printf(m, "total_%s %lld\n", mem_cgroup_stat_names[i], val);
5387 }
5388
5389 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
5390 unsigned long long val = 0;
5391
5392 for_each_mem_cgroup_tree(mi, memcg)
5393 val += mem_cgroup_read_events(mi, i);
5394 seq_printf(m, "total_%s %llu\n",
5395 mem_cgroup_events_names[i], val);
5396 }
5397
5398 for (i = 0; i < NR_LRU_LISTS; i++) {
5399 unsigned long long val = 0;
5400
5401 for_each_mem_cgroup_tree(mi, memcg)
5402 val += mem_cgroup_nr_lru_pages(mi, BIT(i)) * PAGE_SIZE;
5403 seq_printf(m, "total_%s %llu\n", mem_cgroup_lru_names[i], val);
5404 }
5405
5406#ifdef CONFIG_DEBUG_VM
5407 {
5408 int nid, zid;
5409 struct mem_cgroup_per_zone *mz;
5410 struct zone_reclaim_stat *rstat;
5411 unsigned long recent_rotated[2] = {0, 0};
5412 unsigned long recent_scanned[2] = {0, 0};
5413
5414 for_each_online_node(nid)
5415 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
5416 mz = mem_cgroup_zoneinfo(memcg, nid, zid);
5417 rstat = &mz->lruvec.reclaim_stat;
5418
5419 recent_rotated[0] += rstat->recent_rotated[0];
5420 recent_rotated[1] += rstat->recent_rotated[1];
5421 recent_scanned[0] += rstat->recent_scanned[0];
5422 recent_scanned[1] += rstat->recent_scanned[1];
5423 }
5424 seq_printf(m, "recent_rotated_anon %lu\n", recent_rotated[0]);
5425 seq_printf(m, "recent_rotated_file %lu\n", recent_rotated[1]);
5426 seq_printf(m, "recent_scanned_anon %lu\n", recent_scanned[0]);
5427 seq_printf(m, "recent_scanned_file %lu\n", recent_scanned[1]);
5428 }
5429#endif
5430
5431 return 0;
5432}
5433
5434static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css,
5435 struct cftype *cft)
5436{
5437 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5438
5439 return mem_cgroup_swappiness(memcg);
5440}
5441
5442static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
5443 struct cftype *cft, u64 val)
5444{
5445 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5446 struct mem_cgroup *parent = mem_cgroup_from_css(css_parent(&memcg->css));
5447
5448 if (val > 100 || !parent)
5449 return -EINVAL;
5450
5451 mutex_lock(&memcg_create_mutex);
5452
5453 /* If under hierarchy, only empty-root can set this value */
5454 if ((parent->use_hierarchy) || memcg_has_children(memcg)) {
5455 mutex_unlock(&memcg_create_mutex);
5456 return -EINVAL;
5457 }
5458
5459 memcg->swappiness = val;
5460
5461 mutex_unlock(&memcg_create_mutex);
5462
5463 return 0;
5464}
5465
5466static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
5467{
5468 struct mem_cgroup_threshold_ary *t;
5469 u64 usage;
5470 int i;
5471
5472 rcu_read_lock();
5473 if (!swap)
5474 t = rcu_dereference(memcg->thresholds.primary);
5475 else
5476 t = rcu_dereference(memcg->memsw_thresholds.primary);
5477
5478 if (!t)
5479 goto unlock;
5480
5481 usage = mem_cgroup_usage(memcg, swap);
5482
5483 /*
5484 * current_threshold points to threshold just below or equal to usage.
5485 * If it's not true, a threshold was crossed after last
5486 * call of __mem_cgroup_threshold().
5487 */
5488 i = t->current_threshold;
5489
5490 /*
5491 * Iterate backward over array of thresholds starting from
5492 * current_threshold and check if a threshold is crossed.
5493 * If none of thresholds below usage is crossed, we read
5494 * only one element of the array here.
5495 */
5496 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
5497 eventfd_signal(t->entries[i].eventfd, 1);
5498
5499 /* i = current_threshold + 1 */
5500 i++;
5501
5502 /*
5503 * Iterate forward over array of thresholds starting from
5504 * current_threshold+1 and check if a threshold is crossed.
5505 * If none of thresholds above usage is crossed, we read
5506 * only one element of the array here.
5507 */
5508 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
5509 eventfd_signal(t->entries[i].eventfd, 1);
5510
5511 /* Update current_threshold */
5512 t->current_threshold = i - 1;
5513unlock:
5514 rcu_read_unlock();
5515}
5516
5517static void mem_cgroup_threshold(struct mem_cgroup *memcg)
5518{
5519 while (memcg) {
5520 __mem_cgroup_threshold(memcg, false);
5521 if (do_swap_account)
5522 __mem_cgroup_threshold(memcg, true);
5523
5524 memcg = parent_mem_cgroup(memcg);
5525 }
5526}
5527
5528static int compare_thresholds(const void *a, const void *b)
5529{
5530 const struct mem_cgroup_threshold *_a = a;
5531 const struct mem_cgroup_threshold *_b = b;
5532
5533 if (_a->threshold > _b->threshold)
5534 return 1;
5535
5536 if (_a->threshold < _b->threshold)
5537 return -1;
5538
5539 return 0;
5540}
5541
5542static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
5543{
5544 struct mem_cgroup_eventfd_list *ev;
5545
5546 list_for_each_entry(ev, &memcg->oom_notify, list)
5547 eventfd_signal(ev->eventfd, 1);
5548 return 0;
5549}
5550
5551static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
5552{
5553 struct mem_cgroup *iter;
5554
5555 for_each_mem_cgroup_tree(iter, memcg)
5556 mem_cgroup_oom_notify_cb(iter);
5557}
5558
5559static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
5560 struct eventfd_ctx *eventfd, const char *args, enum res_type type)
5561{
5562 struct mem_cgroup_thresholds *thresholds;
5563 struct mem_cgroup_threshold_ary *new;
5564 u64 threshold, usage;
5565 int i, size, ret;
5566
5567 ret = res_counter_memparse_write_strategy(args, &threshold);
5568 if (ret)
5569 return ret;
5570
5571 mutex_lock(&memcg->thresholds_lock);
5572
5573 if (type == _MEM)
5574 thresholds = &memcg->thresholds;
5575 else if (type == _MEMSWAP)
5576 thresholds = &memcg->memsw_thresholds;
5577 else
5578 BUG();
5579
5580 usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
5581
5582 /* Check if a threshold crossed before adding a new one */
5583 if (thresholds->primary)
5584 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
5585
5586 size = thresholds->primary ? thresholds->primary->size + 1 : 1;
5587
5588 /* Allocate memory for new array of thresholds */
5589 new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold),
5590 GFP_KERNEL);
5591 if (!new) {
5592 ret = -ENOMEM;
5593 goto unlock;
5594 }
5595 new->size = size;
5596
5597 /* Copy thresholds (if any) to new array */
5598 if (thresholds->primary) {
5599 memcpy(new->entries, thresholds->primary->entries, (size - 1) *
5600 sizeof(struct mem_cgroup_threshold));
5601 }
5602
5603 /* Add new threshold */
5604 new->entries[size - 1].eventfd = eventfd;
5605 new->entries[size - 1].threshold = threshold;
5606
5607 /* Sort thresholds. Registering of new threshold isn't time-critical */
5608 sort(new->entries, size, sizeof(struct mem_cgroup_threshold),
5609 compare_thresholds, NULL);
5610
5611 /* Find current threshold */
5612 new->current_threshold = -1;
5613 for (i = 0; i < size; i++) {
5614 if (new->entries[i].threshold <= usage) {
5615 /*
5616 * new->current_threshold will not be used until
5617 * rcu_assign_pointer(), so it's safe to increment
5618 * it here.
5619 */
5620 ++new->current_threshold;
5621 } else
5622 break;
5623 }
5624
5625 /* Free old spare buffer and save old primary buffer as spare */
5626 kfree(thresholds->spare);
5627 thresholds->spare = thresholds->primary;
5628
5629 rcu_assign_pointer(thresholds->primary, new);
5630
5631 /* To be sure that nobody uses thresholds */
5632 synchronize_rcu();
5633
5634unlock:
5635 mutex_unlock(&memcg->thresholds_lock);
5636
5637 return ret;
5638}
5639
5640static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
5641 struct eventfd_ctx *eventfd, const char *args)
5642{
5643 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM);
5644}
5645
5646static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg,
5647 struct eventfd_ctx *eventfd, const char *args)
5648{
5649 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP);
5650}
5651
5652static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
5653 struct eventfd_ctx *eventfd, enum res_type type)
5654{
5655 struct mem_cgroup_thresholds *thresholds;
5656 struct mem_cgroup_threshold_ary *new;
5657 u64 usage;
5658 int i, j, size;
5659
5660 mutex_lock(&memcg->thresholds_lock);
5661 if (type == _MEM)
5662 thresholds = &memcg->thresholds;
5663 else if (type == _MEMSWAP)
5664 thresholds = &memcg->memsw_thresholds;
5665 else
5666 BUG();
5667
5668 if (!thresholds->primary)
5669 goto unlock;
5670
5671 usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
5672
5673 /* Check if a threshold crossed before removing */
5674 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
5675
5676 /* Calculate new number of threshold */
5677 size = 0;
5678 for (i = 0; i < thresholds->primary->size; i++) {
5679 if (thresholds->primary->entries[i].eventfd != eventfd)
5680 size++;
5681 }
5682
5683 new = thresholds->spare;
5684
5685 /* Set thresholds array to NULL if we don't have thresholds */
5686 if (!size) {
5687 kfree(new);
5688 new = NULL;
5689 goto swap_buffers;
5690 }
5691
5692 new->size = size;
5693
5694 /* Copy thresholds and find current threshold */
5695 new->current_threshold = -1;
5696 for (i = 0, j = 0; i < thresholds->primary->size; i++) {
5697 if (thresholds->primary->entries[i].eventfd == eventfd)
5698 continue;
5699
5700 new->entries[j] = thresholds->primary->entries[i];
5701 if (new->entries[j].threshold <= usage) {
5702 /*
5703 * new->current_threshold will not be used
5704 * until rcu_assign_pointer(), so it's safe to increment
5705 * it here.
5706 */
5707 ++new->current_threshold;
5708 }
5709 j++;
5710 }
5711
5712swap_buffers:
5713 /* Swap primary and spare array */
5714 thresholds->spare = thresholds->primary;
5715 /* If all events are unregistered, free the spare array */
5716 if (!new) {
5717 kfree(thresholds->spare);
5718 thresholds->spare = NULL;
5719 }
5720
5721 rcu_assign_pointer(thresholds->primary, new);
5722
5723 /* To be sure that nobody uses thresholds */
5724 synchronize_rcu();
5725unlock:
5726 mutex_unlock(&memcg->thresholds_lock);
5727}
5728
5729static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
5730 struct eventfd_ctx *eventfd)
5731{
5732 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM);
5733}
5734
5735static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
5736 struct eventfd_ctx *eventfd)
5737{
5738 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP);
5739}
5740
5741static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg,
5742 struct eventfd_ctx *eventfd, const char *args)
5743{
5744 struct mem_cgroup_eventfd_list *event;
5745
5746 event = kmalloc(sizeof(*event), GFP_KERNEL);
5747 if (!event)
5748 return -ENOMEM;
5749
5750 spin_lock(&memcg_oom_lock);
5751
5752 event->eventfd = eventfd;
5753 list_add(&event->list, &memcg->oom_notify);
5754
5755 /* already in OOM ? */
5756 if (atomic_read(&memcg->under_oom))
5757 eventfd_signal(eventfd, 1);
5758 spin_unlock(&memcg_oom_lock);
5759
5760 return 0;
5761}
5762
5763static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg,
5764 struct eventfd_ctx *eventfd)
5765{
5766 struct mem_cgroup_eventfd_list *ev, *tmp;
5767
5768 spin_lock(&memcg_oom_lock);
5769
5770 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
5771 if (ev->eventfd == eventfd) {
5772 list_del(&ev->list);
5773 kfree(ev);
5774 }
5775 }
5776
5777 spin_unlock(&memcg_oom_lock);
5778}
5779
5780static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v)
5781{
5782 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf));
5783
5784 seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable);
5785 seq_printf(sf, "under_oom %d\n", (bool)atomic_read(&memcg->under_oom));
5786 return 0;
5787}
5788
5789static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
5790 struct cftype *cft, u64 val)
5791{
5792 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5793 struct mem_cgroup *parent = mem_cgroup_from_css(css_parent(&memcg->css));
5794
5795 /* cannot set to root cgroup and only 0 and 1 are allowed */
5796 if (!parent || !((val == 0) || (val == 1)))
5797 return -EINVAL;
5798
5799 mutex_lock(&memcg_create_mutex);
5800 /* oom-kill-disable is a flag for subhierarchy. */
5801 if ((parent->use_hierarchy) || memcg_has_children(memcg)) {
5802 mutex_unlock(&memcg_create_mutex);
5803 return -EINVAL;
5804 }
5805 memcg->oom_kill_disable = val;
5806 if (!val)
5807 memcg_oom_recover(memcg);
5808 mutex_unlock(&memcg_create_mutex);
5809 return 0;
5810}
5811
5812#ifdef CONFIG_MEMCG_KMEM
5813static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
5814{
5815 int ret;
5816
5817 memcg->kmemcg_id = -1;
5818 ret = memcg_propagate_kmem(memcg);
5819 if (ret)
5820 return ret;
5821
5822 return mem_cgroup_sockets_init(memcg, ss);
5823}
5824
5825static void memcg_destroy_kmem(struct mem_cgroup *memcg)
5826{
5827 mem_cgroup_sockets_destroy(memcg);
5828}
5829
5830static void kmem_cgroup_css_offline(struct mem_cgroup *memcg)
5831{
5832 if (!memcg_kmem_is_active(memcg))
5833 return;
5834
5835 /*
5836 * kmem charges can outlive the cgroup. In the case of slab
5837 * pages, for instance, a page contain objects from various
5838 * processes. As we prevent from taking a reference for every
5839 * such allocation we have to be careful when doing uncharge
5840 * (see memcg_uncharge_kmem) and here during offlining.
5841 *
5842 * The idea is that that only the _last_ uncharge which sees
5843 * the dead memcg will drop the last reference. An additional
5844 * reference is taken here before the group is marked dead
5845 * which is then paired with css_put during uncharge resp. here.
5846 *
5847 * Although this might sound strange as this path is called from
5848 * css_offline() when the referencemight have dropped down to 0
5849 * and shouldn't be incremented anymore (css_tryget would fail)
5850 * we do not have other options because of the kmem allocations
5851 * lifetime.
5852 */
5853 css_get(&memcg->css);
5854
5855 memcg_kmem_mark_dead(memcg);
5856
5857 if (res_counter_read_u64(&memcg->kmem, RES_USAGE) != 0)
5858 return;
5859
5860 if (memcg_kmem_test_and_clear_dead(memcg))
5861 css_put(&memcg->css);
5862}
5863#else
5864static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
5865{
5866 return 0;
5867}
5868
5869static void memcg_destroy_kmem(struct mem_cgroup *memcg)
5870{
5871}
5872
5873static void kmem_cgroup_css_offline(struct mem_cgroup *memcg)
5874{
5875}
5876#endif
5877
5878/*
5879 * DO NOT USE IN NEW FILES.
5880 *
5881 * "cgroup.event_control" implementation.
5882 *
5883 * This is way over-engineered. It tries to support fully configurable
5884 * events for each user. Such level of flexibility is completely
5885 * unnecessary especially in the light of the planned unified hierarchy.
5886 *
5887 * Please deprecate this and replace with something simpler if at all
5888 * possible.
5889 */
5890
5891/*
5892 * Unregister event and free resources.
5893 *
5894 * Gets called from workqueue.
5895 */
5896static void memcg_event_remove(struct work_struct *work)
5897{
5898 struct mem_cgroup_event *event =
5899 container_of(work, struct mem_cgroup_event, remove);
5900 struct mem_cgroup *memcg = event->memcg;
5901
5902 remove_wait_queue(event->wqh, &event->wait);
5903
5904 event->unregister_event(memcg, event->eventfd);
5905
5906 /* Notify userspace the event is going away. */
5907 eventfd_signal(event->eventfd, 1);
5908
5909 eventfd_ctx_put(event->eventfd);
5910 kfree(event);
5911 css_put(&memcg->css);
5912}
5913
5914/*
5915 * Gets called on POLLHUP on eventfd when user closes it.
5916 *
5917 * Called with wqh->lock held and interrupts disabled.
5918 */
5919static int memcg_event_wake(wait_queue_t *wait, unsigned mode,
5920 int sync, void *key)
5921{
5922 struct mem_cgroup_event *event =
5923 container_of(wait, struct mem_cgroup_event, wait);
5924 struct mem_cgroup *memcg = event->memcg;
5925 unsigned long flags = (unsigned long)key;
5926
5927 if (flags & POLLHUP) {
5928 /*
5929 * If the event has been detached at cgroup removal, we
5930 * can simply return knowing the other side will cleanup
5931 * for us.
5932 *
5933 * We can't race against event freeing since the other
5934 * side will require wqh->lock via remove_wait_queue(),
5935 * which we hold.
5936 */
5937 spin_lock(&memcg->event_list_lock);
5938 if (!list_empty(&event->list)) {
5939 list_del_init(&event->list);
5940 /*
5941 * We are in atomic context, but cgroup_event_remove()
5942 * may sleep, so we have to call it in workqueue.
5943 */
5944 schedule_work(&event->remove);
5945 }
5946 spin_unlock(&memcg->event_list_lock);
5947 }
5948
5949 return 0;
5950}
5951
5952static void memcg_event_ptable_queue_proc(struct file *file,
5953 wait_queue_head_t *wqh, poll_table *pt)
5954{
5955 struct mem_cgroup_event *event =
5956 container_of(pt, struct mem_cgroup_event, pt);
5957
5958 event->wqh = wqh;
5959 add_wait_queue(wqh, &event->wait);
5960}
5961
5962/*
5963 * DO NOT USE IN NEW FILES.
5964 *
5965 * Parse input and register new cgroup event handler.
5966 *
5967 * Input must be in format '<event_fd> <control_fd> <args>'.
5968 * Interpretation of args is defined by control file implementation.
5969 */
5970static int memcg_write_event_control(struct cgroup_subsys_state *css,
5971 struct cftype *cft, char *buffer)
5972{
5973 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5974 struct mem_cgroup_event *event;
5975 struct cgroup_subsys_state *cfile_css;
5976 unsigned int efd, cfd;
5977 struct fd efile;
5978 struct fd cfile;
5979 const char *name;
5980 char *endp;
5981 int ret;
5982
5983 efd = simple_strtoul(buffer, &endp, 10);
5984 if (*endp != ' ')
5985 return -EINVAL;
5986 buffer = endp + 1;
5987
5988 cfd = simple_strtoul(buffer, &endp, 10);
5989 if ((*endp != ' ') && (*endp != '\0'))
5990 return -EINVAL;
5991 buffer = endp + 1;
5992
5993 event = kzalloc(sizeof(*event), GFP_KERNEL);
5994 if (!event)
5995 return -ENOMEM;
5996
5997 event->memcg = memcg;
5998 INIT_LIST_HEAD(&event->list);
5999 init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc);
6000 init_waitqueue_func_entry(&event->wait, memcg_event_wake);
6001 INIT_WORK(&event->remove, memcg_event_remove);
6002
6003 efile = fdget(efd);
6004 if (!efile.file) {
6005 ret = -EBADF;
6006 goto out_kfree;
6007 }
6008
6009 event->eventfd = eventfd_ctx_fileget(efile.file);
6010 if (IS_ERR(event->eventfd)) {
6011 ret = PTR_ERR(event->eventfd);
6012 goto out_put_efile;
6013 }
6014
6015 cfile = fdget(cfd);
6016 if (!cfile.file) {
6017 ret = -EBADF;
6018 goto out_put_eventfd;
6019 }
6020
6021 /* the process need read permission on control file */
6022 /* AV: shouldn't we check that it's been opened for read instead? */
6023 ret = inode_permission(file_inode(cfile.file), MAY_READ);
6024 if (ret < 0)
6025 goto out_put_cfile;
6026
6027 /*
6028 * Determine the event callbacks and set them in @event. This used
6029 * to be done via struct cftype but cgroup core no longer knows
6030 * about these events. The following is crude but the whole thing
6031 * is for compatibility anyway.
6032 *
6033 * DO NOT ADD NEW FILES.
6034 */
6035 name = cfile.file->f_dentry->d_name.name;
6036
6037 if (!strcmp(name, "memory.usage_in_bytes")) {
6038 event->register_event = mem_cgroup_usage_register_event;
6039 event->unregister_event = mem_cgroup_usage_unregister_event;
6040 } else if (!strcmp(name, "memory.oom_control")) {
6041 event->register_event = mem_cgroup_oom_register_event;
6042 event->unregister_event = mem_cgroup_oom_unregister_event;
6043 } else if (!strcmp(name, "memory.pressure_level")) {
6044 event->register_event = vmpressure_register_event;
6045 event->unregister_event = vmpressure_unregister_event;
6046 } else if (!strcmp(name, "memory.memsw.usage_in_bytes")) {
6047 event->register_event = memsw_cgroup_usage_register_event;
6048 event->unregister_event = memsw_cgroup_usage_unregister_event;
6049 } else {
6050 ret = -EINVAL;
6051 goto out_put_cfile;
6052 }
6053
6054 /*
6055 * Verify @cfile should belong to @css. Also, remaining events are
6056 * automatically removed on cgroup destruction but the removal is
6057 * asynchronous, so take an extra ref on @css.
6058 */
6059 cfile_css = css_tryget_from_dir(cfile.file->f_dentry->d_parent,
6060 &memory_cgrp_subsys);
6061 ret = -EINVAL;
6062 if (IS_ERR(cfile_css))
6063 goto out_put_cfile;
6064 if (cfile_css != css) {
6065 css_put(cfile_css);
6066 goto out_put_cfile;
6067 }
6068
6069 ret = event->register_event(memcg, event->eventfd, buffer);
6070 if (ret)
6071 goto out_put_css;
6072
6073 efile.file->f_op->poll(efile.file, &event->pt);
6074
6075 spin_lock(&memcg->event_list_lock);
6076 list_add(&event->list, &memcg->event_list);
6077 spin_unlock(&memcg->event_list_lock);
6078
6079 fdput(cfile);
6080 fdput(efile);
6081
6082 return 0;
6083
6084out_put_css:
6085 css_put(css);
6086out_put_cfile:
6087 fdput(cfile);
6088out_put_eventfd:
6089 eventfd_ctx_put(event->eventfd);
6090out_put_efile:
6091 fdput(efile);
6092out_kfree:
6093 kfree(event);
6094
6095 return ret;
6096}
6097
6098static struct cftype mem_cgroup_files[] = {
6099 {
6100 .name = "usage_in_bytes",
6101 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
6102 .read_u64 = mem_cgroup_read_u64,
6103 },
6104 {
6105 .name = "max_usage_in_bytes",
6106 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
6107 .trigger = mem_cgroup_reset,
6108 .read_u64 = mem_cgroup_read_u64,
6109 },
6110 {
6111 .name = "limit_in_bytes",
6112 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
6113 .write_string = mem_cgroup_write,
6114 .read_u64 = mem_cgroup_read_u64,
6115 },
6116 {
6117 .name = "soft_limit_in_bytes",
6118 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
6119 .write_string = mem_cgroup_write,
6120 .read_u64 = mem_cgroup_read_u64,
6121 },
6122 {
6123 .name = "failcnt",
6124 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
6125 .trigger = mem_cgroup_reset,
6126 .read_u64 = mem_cgroup_read_u64,
6127 },
6128 {
6129 .name = "stat",
6130 .seq_show = memcg_stat_show,
6131 },
6132 {
6133 .name = "force_empty",
6134 .trigger = mem_cgroup_force_empty_write,
6135 },
6136 {
6137 .name = "use_hierarchy",
6138 .flags = CFTYPE_INSANE,
6139 .write_u64 = mem_cgroup_hierarchy_write,
6140 .read_u64 = mem_cgroup_hierarchy_read,
6141 },
6142 {
6143 .name = "cgroup.event_control", /* XXX: for compat */
6144 .write_string = memcg_write_event_control,
6145 .flags = CFTYPE_NO_PREFIX,
6146 .mode = S_IWUGO,
6147 },
6148 {
6149 .name = "swappiness",
6150 .read_u64 = mem_cgroup_swappiness_read,
6151 .write_u64 = mem_cgroup_swappiness_write,
6152 },
6153 {
6154 .name = "move_charge_at_immigrate",
6155 .read_u64 = mem_cgroup_move_charge_read,
6156 .write_u64 = mem_cgroup_move_charge_write,
6157 },
6158 {
6159 .name = "oom_control",
6160 .seq_show = mem_cgroup_oom_control_read,
6161 .write_u64 = mem_cgroup_oom_control_write,
6162 .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
6163 },
6164 {
6165 .name = "pressure_level",
6166 },
6167#ifdef CONFIG_NUMA
6168 {
6169 .name = "numa_stat",
6170 .seq_show = memcg_numa_stat_show,
6171 },
6172#endif
6173#ifdef CONFIG_MEMCG_KMEM
6174 {
6175 .name = "kmem.limit_in_bytes",
6176 .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
6177 .write_string = mem_cgroup_write,
6178 .read_u64 = mem_cgroup_read_u64,
6179 },
6180 {
6181 .name = "kmem.usage_in_bytes",
6182 .private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
6183 .read_u64 = mem_cgroup_read_u64,
6184 },
6185 {
6186 .name = "kmem.failcnt",
6187 .private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT),
6188 .trigger = mem_cgroup_reset,
6189 .read_u64 = mem_cgroup_read_u64,
6190 },
6191 {
6192 .name = "kmem.max_usage_in_bytes",
6193 .private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE),
6194 .trigger = mem_cgroup_reset,
6195 .read_u64 = mem_cgroup_read_u64,
6196 },
6197#ifdef CONFIG_SLABINFO
6198 {
6199 .name = "kmem.slabinfo",
6200 .seq_show = mem_cgroup_slabinfo_read,
6201 },
6202#endif
6203#endif
6204 { }, /* terminate */
6205};
6206
6207#ifdef CONFIG_MEMCG_SWAP
6208static struct cftype memsw_cgroup_files[] = {
6209 {
6210 .name = "memsw.usage_in_bytes",
6211 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
6212 .read_u64 = mem_cgroup_read_u64,
6213 },
6214 {
6215 .name = "memsw.max_usage_in_bytes",
6216 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
6217 .trigger = mem_cgroup_reset,
6218 .read_u64 = mem_cgroup_read_u64,
6219 },
6220 {
6221 .name = "memsw.limit_in_bytes",
6222 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
6223 .write_string = mem_cgroup_write,
6224 .read_u64 = mem_cgroup_read_u64,
6225 },
6226 {
6227 .name = "memsw.failcnt",
6228 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
6229 .trigger = mem_cgroup_reset,
6230 .read_u64 = mem_cgroup_read_u64,
6231 },
6232 { }, /* terminate */
6233};
6234#endif
6235static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
6236{
6237 struct mem_cgroup_per_node *pn;
6238 struct mem_cgroup_per_zone *mz;
6239 int zone, tmp = node;
6240 /*
6241 * This routine is called against possible nodes.
6242 * But it's BUG to call kmalloc() against offline node.
6243 *
6244 * TODO: this routine can waste much memory for nodes which will
6245 * never be onlined. It's better to use memory hotplug callback
6246 * function.
6247 */
6248 if (!node_state(node, N_NORMAL_MEMORY))
6249 tmp = -1;
6250 pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
6251 if (!pn)
6252 return 1;
6253
6254 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
6255 mz = &pn->zoneinfo[zone];
6256 lruvec_init(&mz->lruvec);
6257 mz->usage_in_excess = 0;
6258 mz->on_tree = false;
6259 mz->memcg = memcg;
6260 }
6261 memcg->nodeinfo[node] = pn;
6262 return 0;
6263}
6264
6265static void free_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
6266{
6267 kfree(memcg->nodeinfo[node]);
6268}
6269
6270static struct mem_cgroup *mem_cgroup_alloc(void)
6271{
6272 struct mem_cgroup *memcg;
6273 size_t size;
6274
6275 size = sizeof(struct mem_cgroup);
6276 size += nr_node_ids * sizeof(struct mem_cgroup_per_node *);
6277
6278 memcg = kzalloc(size, GFP_KERNEL);
6279 if (!memcg)
6280 return NULL;
6281
6282 memcg->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
6283 if (!memcg->stat)
6284 goto out_free;
6285 spin_lock_init(&memcg->pcp_counter_lock);
6286 return memcg;
6287
6288out_free:
6289 kfree(memcg);
6290 return NULL;
6291}
6292
6293/*
6294 * At destroying mem_cgroup, references from swap_cgroup can remain.
6295 * (scanning all at force_empty is too costly...)
6296 *
6297 * Instead of clearing all references at force_empty, we remember
6298 * the number of reference from swap_cgroup and free mem_cgroup when
6299 * it goes down to 0.
6300 *
6301 * Removal of cgroup itself succeeds regardless of refs from swap.
6302 */
6303
6304static void __mem_cgroup_free(struct mem_cgroup *memcg)
6305{
6306 int node;
6307
6308 mem_cgroup_remove_from_trees(memcg);
6309
6310 for_each_node(node)
6311 free_mem_cgroup_per_zone_info(memcg, node);
6312
6313 free_percpu(memcg->stat);
6314
6315 /*
6316 * We need to make sure that (at least for now), the jump label
6317 * destruction code runs outside of the cgroup lock. This is because
6318 * get_online_cpus(), which is called from the static_branch update,
6319 * can't be called inside the cgroup_lock. cpusets are the ones
6320 * enforcing this dependency, so if they ever change, we might as well.
6321 *
6322 * schedule_work() will guarantee this happens. Be careful if you need
6323 * to move this code around, and make sure it is outside
6324 * the cgroup_lock.
6325 */
6326 disarm_static_keys(memcg);
6327 kfree(memcg);
6328}
6329
6330/*
6331 * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled.
6332 */
6333struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
6334{
6335 if (!memcg->res.parent)
6336 return NULL;
6337 return mem_cgroup_from_res_counter(memcg->res.parent, res);
6338}
6339EXPORT_SYMBOL(parent_mem_cgroup);
6340
6341static void __init mem_cgroup_soft_limit_tree_init(void)
6342{
6343 struct mem_cgroup_tree_per_node *rtpn;
6344 struct mem_cgroup_tree_per_zone *rtpz;
6345 int tmp, node, zone;
6346
6347 for_each_node(node) {
6348 tmp = node;
6349 if (!node_state(node, N_NORMAL_MEMORY))
6350 tmp = -1;
6351 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, tmp);
6352 BUG_ON(!rtpn);
6353
6354 soft_limit_tree.rb_tree_per_node[node] = rtpn;
6355
6356 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
6357 rtpz = &rtpn->rb_tree_per_zone[zone];
6358 rtpz->rb_root = RB_ROOT;
6359 spin_lock_init(&rtpz->lock);
6360 }
6361 }
6362}
6363
6364static struct cgroup_subsys_state * __ref
6365mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
6366{
6367 struct mem_cgroup *memcg;
6368 long error = -ENOMEM;
6369 int node;
6370
6371 memcg = mem_cgroup_alloc();
6372 if (!memcg)
6373 return ERR_PTR(error);
6374
6375 for_each_node(node)
6376 if (alloc_mem_cgroup_per_zone_info(memcg, node))
6377 goto free_out;
6378
6379 /* root ? */
6380 if (parent_css == NULL) {
6381 root_mem_cgroup = memcg;
6382 res_counter_init(&memcg->res, NULL);
6383 res_counter_init(&memcg->memsw, NULL);
6384 res_counter_init(&memcg->kmem, NULL);
6385 }
6386
6387 memcg->last_scanned_node = MAX_NUMNODES;
6388 INIT_LIST_HEAD(&memcg->oom_notify);
6389 memcg->move_charge_at_immigrate = 0;
6390 mutex_init(&memcg->thresholds_lock);
6391 spin_lock_init(&memcg->move_lock);
6392 vmpressure_init(&memcg->vmpressure);
6393 INIT_LIST_HEAD(&memcg->event_list);
6394 spin_lock_init(&memcg->event_list_lock);
6395
6396 return &memcg->css;
6397
6398free_out:
6399 __mem_cgroup_free(memcg);
6400 return ERR_PTR(error);
6401}
6402
6403static int
6404mem_cgroup_css_online(struct cgroup_subsys_state *css)
6405{
6406 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6407 struct mem_cgroup *parent = mem_cgroup_from_css(css_parent(css));
6408
6409 if (css->cgroup->id > MEM_CGROUP_ID_MAX)
6410 return -ENOSPC;
6411
6412 if (!parent)
6413 return 0;
6414
6415 mutex_lock(&memcg_create_mutex);
6416
6417 memcg->use_hierarchy = parent->use_hierarchy;
6418 memcg->oom_kill_disable = parent->oom_kill_disable;
6419 memcg->swappiness = mem_cgroup_swappiness(parent);
6420
6421 if (parent->use_hierarchy) {
6422 res_counter_init(&memcg->res, &parent->res);
6423 res_counter_init(&memcg->memsw, &parent->memsw);
6424 res_counter_init(&memcg->kmem, &parent->kmem);
6425
6426 /*
6427 * No need to take a reference to the parent because cgroup
6428 * core guarantees its existence.
6429 */
6430 } else {
6431 res_counter_init(&memcg->res, NULL);
6432 res_counter_init(&memcg->memsw, NULL);
6433 res_counter_init(&memcg->kmem, NULL);
6434 /*
6435 * Deeper hierachy with use_hierarchy == false doesn't make
6436 * much sense so let cgroup subsystem know about this
6437 * unfortunate state in our controller.
6438 */
6439 if (parent != root_mem_cgroup)
6440 memory_cgrp_subsys.broken_hierarchy = true;
6441 }
6442 mutex_unlock(&memcg_create_mutex);
6443
6444 return memcg_init_kmem(memcg, &memory_cgrp_subsys);
6445}
6446
6447/*
6448 * Announce all parents that a group from their hierarchy is gone.
6449 */
6450static void mem_cgroup_invalidate_reclaim_iterators(struct mem_cgroup *memcg)
6451{
6452 struct mem_cgroup *parent = memcg;
6453
6454 while ((parent = parent_mem_cgroup(parent)))
6455 mem_cgroup_iter_invalidate(parent);
6456
6457 /*
6458 * if the root memcg is not hierarchical we have to check it
6459 * explicitely.
6460 */
6461 if (!root_mem_cgroup->use_hierarchy)
6462 mem_cgroup_iter_invalidate(root_mem_cgroup);
6463}
6464
6465static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
6466{
6467 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6468 struct mem_cgroup_event *event, *tmp;
6469 struct cgroup_subsys_state *iter;
6470
6471 /*
6472 * Unregister events and notify userspace.
6473 * Notify userspace about cgroup removing only after rmdir of cgroup
6474 * directory to avoid race between userspace and kernelspace.
6475 */
6476 spin_lock(&memcg->event_list_lock);
6477 list_for_each_entry_safe(event, tmp, &memcg->event_list, list) {
6478 list_del_init(&event->list);
6479 schedule_work(&event->remove);
6480 }
6481 spin_unlock(&memcg->event_list_lock);
6482
6483 kmem_cgroup_css_offline(memcg);
6484
6485 mem_cgroup_invalidate_reclaim_iterators(memcg);
6486
6487 /*
6488 * This requires that offlining is serialized. Right now that is
6489 * guaranteed because css_killed_work_fn() holds the cgroup_mutex.
6490 */
6491 css_for_each_descendant_post(iter, css)
6492 mem_cgroup_reparent_charges(mem_cgroup_from_css(iter));
6493
6494 mem_cgroup_destroy_all_caches(memcg);
6495 vmpressure_cleanup(&memcg->vmpressure);
6496}
6497
6498static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
6499{
6500 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6501 /*
6502 * XXX: css_offline() would be where we should reparent all
6503 * memory to prepare the cgroup for destruction. However,
6504 * memcg does not do css_tryget() and res_counter charging
6505 * under the same RCU lock region, which means that charging
6506 * could race with offlining. Offlining only happens to
6507 * cgroups with no tasks in them but charges can show up
6508 * without any tasks from the swapin path when the target
6509 * memcg is looked up from the swapout record and not from the
6510 * current task as it usually is. A race like this can leak
6511 * charges and put pages with stale cgroup pointers into
6512 * circulation:
6513 *
6514 * #0 #1
6515 * lookup_swap_cgroup_id()
6516 * rcu_read_lock()
6517 * mem_cgroup_lookup()
6518 * css_tryget()
6519 * rcu_read_unlock()
6520 * disable css_tryget()
6521 * call_rcu()
6522 * offline_css()
6523 * reparent_charges()
6524 * res_counter_charge()
6525 * css_put()
6526 * css_free()
6527 * pc->mem_cgroup = dead memcg
6528 * add page to lru
6529 *
6530 * The bulk of the charges are still moved in offline_css() to
6531 * avoid pinning a lot of pages in case a long-term reference
6532 * like a swapout record is deferring the css_free() to long
6533 * after offlining. But this makes sure we catch any charges
6534 * made after offlining:
6535 */
6536 mem_cgroup_reparent_charges(memcg);
6537
6538 memcg_destroy_kmem(memcg);
6539 __mem_cgroup_free(memcg);
6540}
6541
6542#ifdef CONFIG_MMU
6543/* Handlers for move charge at task migration. */
6544#define PRECHARGE_COUNT_AT_ONCE 256
6545static int mem_cgroup_do_precharge(unsigned long count)
6546{
6547 int ret = 0;
6548 int batch_count = PRECHARGE_COUNT_AT_ONCE;
6549 struct mem_cgroup *memcg = mc.to;
6550
6551 if (mem_cgroup_is_root(memcg)) {
6552 mc.precharge += count;
6553 /* we don't need css_get for root */
6554 return ret;
6555 }
6556 /* try to charge at once */
6557 if (count > 1) {
6558 struct res_counter *dummy;
6559 /*
6560 * "memcg" cannot be under rmdir() because we've already checked
6561 * by cgroup_lock_live_cgroup() that it is not removed and we
6562 * are still under the same cgroup_mutex. So we can postpone
6563 * css_get().
6564 */
6565 if (res_counter_charge(&memcg->res, PAGE_SIZE * count, &dummy))
6566 goto one_by_one;
6567 if (do_swap_account && res_counter_charge(&memcg->memsw,
6568 PAGE_SIZE * count, &dummy)) {
6569 res_counter_uncharge(&memcg->res, PAGE_SIZE * count);
6570 goto one_by_one;
6571 }
6572 mc.precharge += count;
6573 return ret;
6574 }
6575one_by_one:
6576 /* fall back to one by one charge */
6577 while (count--) {
6578 if (signal_pending(current)) {
6579 ret = -EINTR;
6580 break;
6581 }
6582 if (!batch_count--) {
6583 batch_count = PRECHARGE_COUNT_AT_ONCE;
6584 cond_resched();
6585 }
6586 ret = mem_cgroup_try_charge(memcg, GFP_KERNEL, 1, false);
6587 if (ret)
6588 /* mem_cgroup_clear_mc() will do uncharge later */
6589 return ret;
6590 mc.precharge++;
6591 }
6592 return ret;
6593}
6594
6595/**
6596 * get_mctgt_type - get target type of moving charge
6597 * @vma: the vma the pte to be checked belongs
6598 * @addr: the address corresponding to the pte to be checked
6599 * @ptent: the pte to be checked
6600 * @target: the pointer the target page or swap ent will be stored(can be NULL)
6601 *
6602 * Returns
6603 * 0(MC_TARGET_NONE): if the pte is not a target for move charge.
6604 * 1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
6605 * move charge. if @target is not NULL, the page is stored in target->page
6606 * with extra refcnt got(Callers should handle it).
6607 * 2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
6608 * target for charge migration. if @target is not NULL, the entry is stored
6609 * in target->ent.
6610 *
6611 * Called with pte lock held.
6612 */
6613union mc_target {
6614 struct page *page;
6615 swp_entry_t ent;
6616};
6617
6618enum mc_target_type {
6619 MC_TARGET_NONE = 0,
6620 MC_TARGET_PAGE,
6621 MC_TARGET_SWAP,
6622};
6623
6624static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
6625 unsigned long addr, pte_t ptent)
6626{
6627 struct page *page = vm_normal_page(vma, addr, ptent);
6628
6629 if (!page || !page_mapped(page))
6630 return NULL;
6631 if (PageAnon(page)) {
6632 /* we don't move shared anon */
6633 if (!move_anon())
6634 return NULL;
6635 } else if (!move_file())
6636 /* we ignore mapcount for file pages */
6637 return NULL;
6638 if (!get_page_unless_zero(page))
6639 return NULL;
6640
6641 return page;
6642}
6643
6644#ifdef CONFIG_SWAP
6645static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
6646 unsigned long addr, pte_t ptent, swp_entry_t *entry)
6647{
6648 struct page *page = NULL;
6649 swp_entry_t ent = pte_to_swp_entry(ptent);
6650
6651 if (!move_anon() || non_swap_entry(ent))
6652 return NULL;
6653 /*
6654 * Because lookup_swap_cache() updates some statistics counter,
6655 * we call find_get_page() with swapper_space directly.
6656 */
6657 page = find_get_page(swap_address_space(ent), ent.val);
6658 if (do_swap_account)
6659 entry->val = ent.val;
6660
6661 return page;
6662}
6663#else
6664static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
6665 unsigned long addr, pte_t ptent, swp_entry_t *entry)
6666{
6667 return NULL;
6668}
6669#endif
6670
6671static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
6672 unsigned long addr, pte_t ptent, swp_entry_t *entry)
6673{
6674 struct page *page = NULL;
6675 struct address_space *mapping;
6676 pgoff_t pgoff;
6677
6678 if (!vma->vm_file) /* anonymous vma */
6679 return NULL;
6680 if (!move_file())
6681 return NULL;
6682
6683 mapping = vma->vm_file->f_mapping;
6684 if (pte_none(ptent))
6685 pgoff = linear_page_index(vma, addr);
6686 else /* pte_file(ptent) is true */
6687 pgoff = pte_to_pgoff(ptent);
6688
6689 /* page is moved even if it's not RSS of this task(page-faulted). */
6690#ifdef CONFIG_SWAP
6691 /* shmem/tmpfs may report page out on swap: account for that too. */
6692 if (shmem_mapping(mapping)) {
6693 page = find_get_entry(mapping, pgoff);
6694 if (radix_tree_exceptional_entry(page)) {
6695 swp_entry_t swp = radix_to_swp_entry(page);
6696 if (do_swap_account)
6697 *entry = swp;
6698 page = find_get_page(swap_address_space(swp), swp.val);
6699 }
6700 } else
6701 page = find_get_page(mapping, pgoff);
6702#else
6703 page = find_get_page(mapping, pgoff);
6704#endif
6705 return page;
6706}
6707
6708static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
6709 unsigned long addr, pte_t ptent, union mc_target *target)
6710{
6711 struct page *page = NULL;
6712 struct page_cgroup *pc;
6713 enum mc_target_type ret = MC_TARGET_NONE;
6714 swp_entry_t ent = { .val = 0 };
6715
6716 if (pte_present(ptent))
6717 page = mc_handle_present_pte(vma, addr, ptent);
6718 else if (is_swap_pte(ptent))
6719 page = mc_handle_swap_pte(vma, addr, ptent, &ent);
6720 else if (pte_none(ptent) || pte_file(ptent))
6721 page = mc_handle_file_pte(vma, addr, ptent, &ent);
6722
6723 if (!page && !ent.val)
6724 return ret;
6725 if (page) {
6726 pc = lookup_page_cgroup(page);
6727 /*
6728 * Do only loose check w/o page_cgroup lock.
6729 * mem_cgroup_move_account() checks the pc is valid or not under
6730 * the lock.
6731 */
6732 if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) {
6733 ret = MC_TARGET_PAGE;
6734 if (target)
6735 target->page = page;
6736 }
6737 if (!ret || !target)
6738 put_page(page);
6739 }
6740 /* There is a swap entry and a page doesn't exist or isn't charged */
6741 if (ent.val && !ret &&
6742 mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) {
6743 ret = MC_TARGET_SWAP;
6744 if (target)
6745 target->ent = ent;
6746 }
6747 return ret;
6748}
6749
6750#ifdef CONFIG_TRANSPARENT_HUGEPAGE
6751/*
6752 * We don't consider swapping or file mapped pages because THP does not
6753 * support them for now.
6754 * Caller should make sure that pmd_trans_huge(pmd) is true.
6755 */
6756static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
6757 unsigned long addr, pmd_t pmd, union mc_target *target)
6758{
6759 struct page *page = NULL;
6760 struct page_cgroup *pc;
6761 enum mc_target_type ret = MC_TARGET_NONE;
6762
6763 page = pmd_page(pmd);
6764 VM_BUG_ON_PAGE(!page || !PageHead(page), page);
6765 if (!move_anon())
6766 return ret;
6767 pc = lookup_page_cgroup(page);
6768 if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) {
6769 ret = MC_TARGET_PAGE;
6770 if (target) {
6771 get_page(page);
6772 target->page = page;
6773 }
6774 }
6775 return ret;
6776}
6777#else
6778static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
6779 unsigned long addr, pmd_t pmd, union mc_target *target)
6780{
6781 return MC_TARGET_NONE;
6782}
6783#endif
6784
6785static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
6786 unsigned long addr, unsigned long end,
6787 struct mm_walk *walk)
6788{
6789 struct vm_area_struct *vma = walk->private;
6790 pte_t *pte;
6791 spinlock_t *ptl;
6792
6793 if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
6794 if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
6795 mc.precharge += HPAGE_PMD_NR;
6796 spin_unlock(ptl);
6797 return 0;
6798 }
6799
6800 if (pmd_trans_unstable(pmd))
6801 return 0;
6802 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
6803 for (; addr != end; pte++, addr += PAGE_SIZE)
6804 if (get_mctgt_type(vma, addr, *pte, NULL))
6805 mc.precharge++; /* increment precharge temporarily */
6806 pte_unmap_unlock(pte - 1, ptl);
6807 cond_resched();
6808
6809 return 0;
6810}
6811
6812static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
6813{
6814 unsigned long precharge;
6815 struct vm_area_struct *vma;
6816
6817 down_read(&mm->mmap_sem);
6818 for (vma = mm->mmap; vma; vma = vma->vm_next) {
6819 struct mm_walk mem_cgroup_count_precharge_walk = {
6820 .pmd_entry = mem_cgroup_count_precharge_pte_range,
6821 .mm = mm,
6822 .private = vma,
6823 };
6824 if (is_vm_hugetlb_page(vma))
6825 continue;
6826 walk_page_range(vma->vm_start, vma->vm_end,
6827 &mem_cgroup_count_precharge_walk);
6828 }
6829 up_read(&mm->mmap_sem);
6830
6831 precharge = mc.precharge;
6832 mc.precharge = 0;
6833
6834 return precharge;
6835}
6836
6837static int mem_cgroup_precharge_mc(struct mm_struct *mm)
6838{
6839 unsigned long precharge = mem_cgroup_count_precharge(mm);
6840
6841 VM_BUG_ON(mc.moving_task);
6842 mc.moving_task = current;
6843 return mem_cgroup_do_precharge(precharge);
6844}
6845
6846/* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
6847static void __mem_cgroup_clear_mc(void)
6848{
6849 struct mem_cgroup *from = mc.from;
6850 struct mem_cgroup *to = mc.to;
6851 int i;
6852
6853 /* we must uncharge all the leftover precharges from mc.to */
6854 if (mc.precharge) {
6855 __mem_cgroup_cancel_charge(mc.to, mc.precharge);
6856 mc.precharge = 0;
6857 }
6858 /*
6859 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
6860 * we must uncharge here.
6861 */
6862 if (mc.moved_charge) {
6863 __mem_cgroup_cancel_charge(mc.from, mc.moved_charge);
6864 mc.moved_charge = 0;
6865 }
6866 /* we must fixup refcnts and charges */
6867 if (mc.moved_swap) {
6868 /* uncharge swap account from the old cgroup */
6869 if (!mem_cgroup_is_root(mc.from))
6870 res_counter_uncharge(&mc.from->memsw,
6871 PAGE_SIZE * mc.moved_swap);
6872
6873 for (i = 0; i < mc.moved_swap; i++)
6874 css_put(&mc.from->css);
6875
6876 if (!mem_cgroup_is_root(mc.to)) {
6877 /*
6878 * we charged both to->res and to->memsw, so we should
6879 * uncharge to->res.
6880 */
6881 res_counter_uncharge(&mc.to->res,
6882 PAGE_SIZE * mc.moved_swap);
6883 }
6884 /* we've already done css_get(mc.to) */
6885 mc.moved_swap = 0;
6886 }
6887 memcg_oom_recover(from);
6888 memcg_oom_recover(to);
6889 wake_up_all(&mc.waitq);
6890}
6891
6892static void mem_cgroup_clear_mc(void)
6893{
6894 struct mem_cgroup *from = mc.from;
6895
6896 /*
6897 * we must clear moving_task before waking up waiters at the end of
6898 * task migration.
6899 */
6900 mc.moving_task = NULL;
6901 __mem_cgroup_clear_mc();
6902 spin_lock(&mc.lock);
6903 mc.from = NULL;
6904 mc.to = NULL;
6905 spin_unlock(&mc.lock);
6906 mem_cgroup_end_move(from);
6907}
6908
6909static int mem_cgroup_can_attach(struct cgroup_subsys_state *css,
6910 struct cgroup_taskset *tset)
6911{
6912 struct task_struct *p = cgroup_taskset_first(tset);
6913 int ret = 0;
6914 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6915 unsigned long move_charge_at_immigrate;
6916
6917 /*
6918 * We are now commited to this value whatever it is. Changes in this
6919 * tunable will only affect upcoming migrations, not the current one.
6920 * So we need to save it, and keep it going.
6921 */
6922 move_charge_at_immigrate = memcg->move_charge_at_immigrate;
6923 if (move_charge_at_immigrate) {
6924 struct mm_struct *mm;
6925 struct mem_cgroup *from = mem_cgroup_from_task(p);
6926
6927 VM_BUG_ON(from == memcg);
6928
6929 mm = get_task_mm(p);
6930 if (!mm)
6931 return 0;
6932 /* We move charges only when we move a owner of the mm */
6933 if (mm->owner == p) {
6934 VM_BUG_ON(mc.from);
6935 VM_BUG_ON(mc.to);
6936 VM_BUG_ON(mc.precharge);
6937 VM_BUG_ON(mc.moved_charge);
6938 VM_BUG_ON(mc.moved_swap);
6939 mem_cgroup_start_move(from);
6940 spin_lock(&mc.lock);
6941 mc.from = from;
6942 mc.to = memcg;
6943 mc.immigrate_flags = move_charge_at_immigrate;
6944 spin_unlock(&mc.lock);
6945 /* We set mc.moving_task later */
6946
6947 ret = mem_cgroup_precharge_mc(mm);
6948 if (ret)
6949 mem_cgroup_clear_mc();
6950 }
6951 mmput(mm);
6952 }
6953 return ret;
6954}
6955
6956static void mem_cgroup_cancel_attach(struct cgroup_subsys_state *css,
6957 struct cgroup_taskset *tset)
6958{
6959 mem_cgroup_clear_mc();
6960}
6961
6962static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
6963 unsigned long addr, unsigned long end,
6964 struct mm_walk *walk)
6965{
6966 int ret = 0;
6967 struct vm_area_struct *vma = walk->private;
6968 pte_t *pte;
6969 spinlock_t *ptl;
6970 enum mc_target_type target_type;
6971 union mc_target target;
6972 struct page *page;
6973 struct page_cgroup *pc;
6974
6975 /*
6976 * We don't take compound_lock() here but no race with splitting thp
6977 * happens because:
6978 * - if pmd_trans_huge_lock() returns 1, the relevant thp is not
6979 * under splitting, which means there's no concurrent thp split,
6980 * - if another thread runs into split_huge_page() just after we
6981 * entered this if-block, the thread must wait for page table lock
6982 * to be unlocked in __split_huge_page_splitting(), where the main
6983 * part of thp split is not executed yet.
6984 */
6985 if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
6986 if (mc.precharge < HPAGE_PMD_NR) {
6987 spin_unlock(ptl);
6988 return 0;
6989 }
6990 target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
6991 if (target_type == MC_TARGET_PAGE) {
6992 page = target.page;
6993 if (!isolate_lru_page(page)) {
6994 pc = lookup_page_cgroup(page);
6995 if (!mem_cgroup_move_account(page, HPAGE_PMD_NR,
6996 pc, mc.from, mc.to)) {
6997 mc.precharge -= HPAGE_PMD_NR;
6998 mc.moved_charge += HPAGE_PMD_NR;
6999 }
7000 putback_lru_page(page);
7001 }
7002 put_page(page);
7003 }
7004 spin_unlock(ptl);
7005 return 0;
7006 }
7007
7008 if (pmd_trans_unstable(pmd))
7009 return 0;
7010retry:
7011 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
7012 for (; addr != end; addr += PAGE_SIZE) {
7013 pte_t ptent = *(pte++);
7014 swp_entry_t ent;
7015
7016 if (!mc.precharge)
7017 break;
7018
7019 switch (get_mctgt_type(vma, addr, ptent, &target)) {
7020 case MC_TARGET_PAGE:
7021 page = target.page;
7022 if (isolate_lru_page(page))
7023 goto put;
7024 pc = lookup_page_cgroup(page);
7025 if (!mem_cgroup_move_account(page, 1, pc,
7026 mc.from, mc.to)) {
7027 mc.precharge--;
7028 /* we uncharge from mc.from later. */
7029 mc.moved_charge++;
7030 }
7031 putback_lru_page(page);
7032put: /* get_mctgt_type() gets the page */
7033 put_page(page);
7034 break;
7035 case MC_TARGET_SWAP:
7036 ent = target.ent;
7037 if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
7038 mc.precharge--;
7039 /* we fixup refcnts and charges later. */
7040 mc.moved_swap++;
7041 }
7042 break;
7043 default:
7044 break;
7045 }
7046 }
7047 pte_unmap_unlock(pte - 1, ptl);
7048 cond_resched();
7049
7050 if (addr != end) {
7051 /*
7052 * We have consumed all precharges we got in can_attach().
7053 * We try charge one by one, but don't do any additional
7054 * charges to mc.to if we have failed in charge once in attach()
7055 * phase.
7056 */
7057 ret = mem_cgroup_do_precharge(1);
7058 if (!ret)
7059 goto retry;
7060 }
7061
7062 return ret;
7063}
7064
7065static void mem_cgroup_move_charge(struct mm_struct *mm)
7066{
7067 struct vm_area_struct *vma;
7068
7069 lru_add_drain_all();
7070retry:
7071 if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
7072 /*
7073 * Someone who are holding the mmap_sem might be waiting in
7074 * waitq. So we cancel all extra charges, wake up all waiters,
7075 * and retry. Because we cancel precharges, we might not be able
7076 * to move enough charges, but moving charge is a best-effort
7077 * feature anyway, so it wouldn't be a big problem.
7078 */
7079 __mem_cgroup_clear_mc();
7080 cond_resched();
7081 goto retry;
7082 }
7083 for (vma = mm->mmap; vma; vma = vma->vm_next) {
7084 int ret;
7085 struct mm_walk mem_cgroup_move_charge_walk = {
7086 .pmd_entry = mem_cgroup_move_charge_pte_range,
7087 .mm = mm,
7088 .private = vma,
7089 };
7090 if (is_vm_hugetlb_page(vma))
7091 continue;
7092 ret = walk_page_range(vma->vm_start, vma->vm_end,
7093 &mem_cgroup_move_charge_walk);
7094 if (ret)
7095 /*
7096 * means we have consumed all precharges and failed in
7097 * doing additional charge. Just abandon here.
7098 */
7099 break;
7100 }
7101 up_read(&mm->mmap_sem);
7102}
7103
7104static void mem_cgroup_move_task(struct cgroup_subsys_state *css,
7105 struct cgroup_taskset *tset)
7106{
7107 struct task_struct *p = cgroup_taskset_first(tset);
7108 struct mm_struct *mm = get_task_mm(p);
7109
7110 if (mm) {
7111 if (mc.to)
7112 mem_cgroup_move_charge(mm);
7113 mmput(mm);
7114 }
7115 if (mc.to)
7116 mem_cgroup_clear_mc();
7117}
7118#else /* !CONFIG_MMU */
7119static int mem_cgroup_can_attach(struct cgroup_subsys_state *css,
7120 struct cgroup_taskset *tset)
7121{
7122 return 0;
7123}
7124static void mem_cgroup_cancel_attach(struct cgroup_subsys_state *css,
7125 struct cgroup_taskset *tset)
7126{
7127}
7128static void mem_cgroup_move_task(struct cgroup_subsys_state *css,
7129 struct cgroup_taskset *tset)
7130{
7131}
7132#endif
7133
7134/*
7135 * Cgroup retains root cgroups across [un]mount cycles making it necessary
7136 * to verify sane_behavior flag on each mount attempt.
7137 */
7138static void mem_cgroup_bind(struct cgroup_subsys_state *root_css)
7139{
7140 /*
7141 * use_hierarchy is forced with sane_behavior. cgroup core
7142 * guarantees that @root doesn't have any children, so turning it
7143 * on for the root memcg is enough.
7144 */
7145 if (cgroup_sane_behavior(root_css->cgroup))
7146 mem_cgroup_from_css(root_css)->use_hierarchy = true;
7147}
7148
7149struct cgroup_subsys memory_cgrp_subsys = {
7150 .css_alloc = mem_cgroup_css_alloc,
7151 .css_online = mem_cgroup_css_online,
7152 .css_offline = mem_cgroup_css_offline,
7153 .css_free = mem_cgroup_css_free,
7154 .can_attach = mem_cgroup_can_attach,
7155 .cancel_attach = mem_cgroup_cancel_attach,
7156 .attach = mem_cgroup_move_task,
7157 .bind = mem_cgroup_bind,
7158 .base_cftypes = mem_cgroup_files,
7159 .early_init = 0,
7160};
7161
7162#ifdef CONFIG_MEMCG_SWAP
7163static int __init enable_swap_account(char *s)
7164{
7165 if (!strcmp(s, "1"))
7166 really_do_swap_account = 1;
7167 else if (!strcmp(s, "0"))
7168 really_do_swap_account = 0;
7169 return 1;
7170}
7171__setup("swapaccount=", enable_swap_account);
7172
7173static void __init memsw_file_init(void)
7174{
7175 WARN_ON(cgroup_add_cftypes(&memory_cgrp_subsys, memsw_cgroup_files));
7176}
7177
7178static void __init enable_swap_cgroup(void)
7179{
7180 if (!mem_cgroup_disabled() && really_do_swap_account) {
7181 do_swap_account = 1;
7182 memsw_file_init();
7183 }
7184}
7185
7186#else
7187static void __init enable_swap_cgroup(void)
7188{
7189}
7190#endif
7191
7192/*
7193 * subsys_initcall() for memory controller.
7194 *
7195 * Some parts like hotcpu_notifier() have to be initialized from this context
7196 * because of lock dependencies (cgroup_lock -> cpu hotplug) but basically
7197 * everything that doesn't depend on a specific mem_cgroup structure should
7198 * be initialized from here.
7199 */
7200static int __init mem_cgroup_init(void)
7201{
7202 hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
7203 enable_swap_cgroup();
7204 mem_cgroup_soft_limit_tree_init();
7205 memcg_stock_init();
7206 return 0;
7207}
7208subsys_initcall(mem_cgroup_init);