Loading...
1/* memcontrol.c - Memory Controller
2 *
3 * Copyright IBM Corporation, 2007
4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5 *
6 * Copyright 2007 OpenVZ SWsoft Inc
7 * Author: Pavel Emelianov <xemul@openvz.org>
8 *
9 * Memory thresholds
10 * Copyright (C) 2009 Nokia Corporation
11 * Author: Kirill A. Shutemov
12 *
13 * Kernel Memory Controller
14 * Copyright (C) 2012 Parallels Inc. and Google Inc.
15 * Authors: Glauber Costa and Suleiman Souhlal
16 *
17 * Native page reclaim
18 * Charge lifetime sanitation
19 * Lockless page tracking & accounting
20 * Unified hierarchy configuration model
21 * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner
22 *
23 * This program is free software; you can redistribute it and/or modify
24 * it under the terms of the GNU General Public License as published by
25 * the Free Software Foundation; either version 2 of the License, or
26 * (at your option) any later version.
27 *
28 * This program is distributed in the hope that it will be useful,
29 * but WITHOUT ANY WARRANTY; without even the implied warranty of
30 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
31 * GNU General Public License for more details.
32 */
33
34#include <linux/page_counter.h>
35#include <linux/memcontrol.h>
36#include <linux/cgroup.h>
37#include <linux/mm.h>
38#include <linux/hugetlb.h>
39#include <linux/pagemap.h>
40#include <linux/smp.h>
41#include <linux/page-flags.h>
42#include <linux/backing-dev.h>
43#include <linux/bit_spinlock.h>
44#include <linux/rcupdate.h>
45#include <linux/limits.h>
46#include <linux/export.h>
47#include <linux/mutex.h>
48#include <linux/rbtree.h>
49#include <linux/slab.h>
50#include <linux/swap.h>
51#include <linux/swapops.h>
52#include <linux/spinlock.h>
53#include <linux/eventfd.h>
54#include <linux/poll.h>
55#include <linux/sort.h>
56#include <linux/fs.h>
57#include <linux/seq_file.h>
58#include <linux/vmpressure.h>
59#include <linux/mm_inline.h>
60#include <linux/swap_cgroup.h>
61#include <linux/cpu.h>
62#include <linux/oom.h>
63#include <linux/lockdep.h>
64#include <linux/file.h>
65#include <linux/tracehook.h>
66#include "internal.h"
67#include <net/sock.h>
68#include <net/ip.h>
69#include "slab.h"
70
71#include <asm/uaccess.h>
72
73#include <trace/events/vmscan.h>
74
75struct cgroup_subsys memory_cgrp_subsys __read_mostly;
76EXPORT_SYMBOL(memory_cgrp_subsys);
77
78struct mem_cgroup *root_mem_cgroup __read_mostly;
79
80#define MEM_CGROUP_RECLAIM_RETRIES 5
81
82/* Socket memory accounting disabled? */
83static bool cgroup_memory_nosocket;
84
85/* Kernel memory accounting disabled? */
86static bool cgroup_memory_nokmem;
87
88/* Whether the swap controller is active */
89#ifdef CONFIG_MEMCG_SWAP
90int do_swap_account __read_mostly;
91#else
92#define do_swap_account 0
93#endif
94
95/* Whether legacy memory+swap accounting is active */
96static bool do_memsw_account(void)
97{
98 return !cgroup_subsys_on_dfl(memory_cgrp_subsys) && do_swap_account;
99}
100
101static const char * const mem_cgroup_stat_names[] = {
102 "cache",
103 "rss",
104 "rss_huge",
105 "mapped_file",
106 "dirty",
107 "writeback",
108 "swap",
109};
110
111static const char * const mem_cgroup_events_names[] = {
112 "pgpgin",
113 "pgpgout",
114 "pgfault",
115 "pgmajfault",
116};
117
118static const char * const mem_cgroup_lru_names[] = {
119 "inactive_anon",
120 "active_anon",
121 "inactive_file",
122 "active_file",
123 "unevictable",
124};
125
126#define THRESHOLDS_EVENTS_TARGET 128
127#define SOFTLIMIT_EVENTS_TARGET 1024
128#define NUMAINFO_EVENTS_TARGET 1024
129
130/*
131 * Cgroups above their limits are maintained in a RB-Tree, independent of
132 * their hierarchy representation
133 */
134
135struct mem_cgroup_tree_per_zone {
136 struct rb_root rb_root;
137 spinlock_t lock;
138};
139
140struct mem_cgroup_tree_per_node {
141 struct mem_cgroup_tree_per_zone rb_tree_per_zone[MAX_NR_ZONES];
142};
143
144struct mem_cgroup_tree {
145 struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
146};
147
148static struct mem_cgroup_tree soft_limit_tree __read_mostly;
149
150/* for OOM */
151struct mem_cgroup_eventfd_list {
152 struct list_head list;
153 struct eventfd_ctx *eventfd;
154};
155
156/*
157 * cgroup_event represents events which userspace want to receive.
158 */
159struct mem_cgroup_event {
160 /*
161 * memcg which the event belongs to.
162 */
163 struct mem_cgroup *memcg;
164 /*
165 * eventfd to signal userspace about the event.
166 */
167 struct eventfd_ctx *eventfd;
168 /*
169 * Each of these stored in a list by the cgroup.
170 */
171 struct list_head list;
172 /*
173 * register_event() callback will be used to add new userspace
174 * waiter for changes related to this event. Use eventfd_signal()
175 * on eventfd to send notification to userspace.
176 */
177 int (*register_event)(struct mem_cgroup *memcg,
178 struct eventfd_ctx *eventfd, const char *args);
179 /*
180 * unregister_event() callback will be called when userspace closes
181 * the eventfd or on cgroup removing. This callback must be set,
182 * if you want provide notification functionality.
183 */
184 void (*unregister_event)(struct mem_cgroup *memcg,
185 struct eventfd_ctx *eventfd);
186 /*
187 * All fields below needed to unregister event when
188 * userspace closes eventfd.
189 */
190 poll_table pt;
191 wait_queue_head_t *wqh;
192 wait_queue_t wait;
193 struct work_struct remove;
194};
195
196static void mem_cgroup_threshold(struct mem_cgroup *memcg);
197static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
198
199/* Stuffs for move charges at task migration. */
200/*
201 * Types of charges to be moved.
202 */
203#define MOVE_ANON 0x1U
204#define MOVE_FILE 0x2U
205#define MOVE_MASK (MOVE_ANON | MOVE_FILE)
206
207/* "mc" and its members are protected by cgroup_mutex */
208static struct move_charge_struct {
209 spinlock_t lock; /* for from, to */
210 struct mm_struct *mm;
211 struct mem_cgroup *from;
212 struct mem_cgroup *to;
213 unsigned long flags;
214 unsigned long precharge;
215 unsigned long moved_charge;
216 unsigned long moved_swap;
217 struct task_struct *moving_task; /* a task moving charges */
218 wait_queue_head_t waitq; /* a waitq for other context */
219} mc = {
220 .lock = __SPIN_LOCK_UNLOCKED(mc.lock),
221 .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
222};
223
224/*
225 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
226 * limit reclaim to prevent infinite loops, if they ever occur.
227 */
228#define MEM_CGROUP_MAX_RECLAIM_LOOPS 100
229#define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2
230
231enum charge_type {
232 MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
233 MEM_CGROUP_CHARGE_TYPE_ANON,
234 MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */
235 MEM_CGROUP_CHARGE_TYPE_DROP, /* a page was unused swap cache */
236 NR_CHARGE_TYPE,
237};
238
239/* for encoding cft->private value on file */
240enum res_type {
241 _MEM,
242 _MEMSWAP,
243 _OOM_TYPE,
244 _KMEM,
245 _TCP,
246};
247
248#define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val))
249#define MEMFILE_TYPE(val) ((val) >> 16 & 0xffff)
250#define MEMFILE_ATTR(val) ((val) & 0xffff)
251/* Used for OOM nofiier */
252#define OOM_CONTROL (0)
253
254/* Some nice accessors for the vmpressure. */
255struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
256{
257 if (!memcg)
258 memcg = root_mem_cgroup;
259 return &memcg->vmpressure;
260}
261
262struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr)
263{
264 return &container_of(vmpr, struct mem_cgroup, vmpressure)->css;
265}
266
267static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
268{
269 return (memcg == root_mem_cgroup);
270}
271
272#ifndef CONFIG_SLOB
273/*
274 * This will be the memcg's index in each cache's ->memcg_params.memcg_caches.
275 * The main reason for not using cgroup id for this:
276 * this works better in sparse environments, where we have a lot of memcgs,
277 * but only a few kmem-limited. Or also, if we have, for instance, 200
278 * memcgs, and none but the 200th is kmem-limited, we'd have to have a
279 * 200 entry array for that.
280 *
281 * The current size of the caches array is stored in memcg_nr_cache_ids. It
282 * will double each time we have to increase it.
283 */
284static DEFINE_IDA(memcg_cache_ida);
285int memcg_nr_cache_ids;
286
287/* Protects memcg_nr_cache_ids */
288static DECLARE_RWSEM(memcg_cache_ids_sem);
289
290void memcg_get_cache_ids(void)
291{
292 down_read(&memcg_cache_ids_sem);
293}
294
295void memcg_put_cache_ids(void)
296{
297 up_read(&memcg_cache_ids_sem);
298}
299
300/*
301 * MIN_SIZE is different than 1, because we would like to avoid going through
302 * the alloc/free process all the time. In a small machine, 4 kmem-limited
303 * cgroups is a reasonable guess. In the future, it could be a parameter or
304 * tunable, but that is strictly not necessary.
305 *
306 * MAX_SIZE should be as large as the number of cgrp_ids. Ideally, we could get
307 * this constant directly from cgroup, but it is understandable that this is
308 * better kept as an internal representation in cgroup.c. In any case, the
309 * cgrp_id space is not getting any smaller, and we don't have to necessarily
310 * increase ours as well if it increases.
311 */
312#define MEMCG_CACHES_MIN_SIZE 4
313#define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX
314
315/*
316 * A lot of the calls to the cache allocation functions are expected to be
317 * inlined by the compiler. Since the calls to memcg_kmem_get_cache are
318 * conditional to this static branch, we'll have to allow modules that does
319 * kmem_cache_alloc and the such to see this symbol as well
320 */
321DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key);
322EXPORT_SYMBOL(memcg_kmem_enabled_key);
323
324#endif /* !CONFIG_SLOB */
325
326static struct mem_cgroup_per_zone *
327mem_cgroup_zone_zoneinfo(struct mem_cgroup *memcg, struct zone *zone)
328{
329 int nid = zone_to_nid(zone);
330 int zid = zone_idx(zone);
331
332 return &memcg->nodeinfo[nid]->zoneinfo[zid];
333}
334
335/**
336 * mem_cgroup_css_from_page - css of the memcg associated with a page
337 * @page: page of interest
338 *
339 * If memcg is bound to the default hierarchy, css of the memcg associated
340 * with @page is returned. The returned css remains associated with @page
341 * until it is released.
342 *
343 * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup
344 * is returned.
345 */
346struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page)
347{
348 struct mem_cgroup *memcg;
349
350 memcg = page->mem_cgroup;
351
352 if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
353 memcg = root_mem_cgroup;
354
355 return &memcg->css;
356}
357
358/**
359 * page_cgroup_ino - return inode number of the memcg a page is charged to
360 * @page: the page
361 *
362 * Look up the closest online ancestor of the memory cgroup @page is charged to
363 * and return its inode number or 0 if @page is not charged to any cgroup. It
364 * is safe to call this function without holding a reference to @page.
365 *
366 * Note, this function is inherently racy, because there is nothing to prevent
367 * the cgroup inode from getting torn down and potentially reallocated a moment
368 * after page_cgroup_ino() returns, so it only should be used by callers that
369 * do not care (such as procfs interfaces).
370 */
371ino_t page_cgroup_ino(struct page *page)
372{
373 struct mem_cgroup *memcg;
374 unsigned long ino = 0;
375
376 rcu_read_lock();
377 memcg = READ_ONCE(page->mem_cgroup);
378 while (memcg && !(memcg->css.flags & CSS_ONLINE))
379 memcg = parent_mem_cgroup(memcg);
380 if (memcg)
381 ino = cgroup_ino(memcg->css.cgroup);
382 rcu_read_unlock();
383 return ino;
384}
385
386static struct mem_cgroup_per_zone *
387mem_cgroup_page_zoneinfo(struct mem_cgroup *memcg, struct page *page)
388{
389 int nid = page_to_nid(page);
390 int zid = page_zonenum(page);
391
392 return &memcg->nodeinfo[nid]->zoneinfo[zid];
393}
394
395static struct mem_cgroup_tree_per_zone *
396soft_limit_tree_node_zone(int nid, int zid)
397{
398 return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
399}
400
401static struct mem_cgroup_tree_per_zone *
402soft_limit_tree_from_page(struct page *page)
403{
404 int nid = page_to_nid(page);
405 int zid = page_zonenum(page);
406
407 return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
408}
409
410static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_zone *mz,
411 struct mem_cgroup_tree_per_zone *mctz,
412 unsigned long new_usage_in_excess)
413{
414 struct rb_node **p = &mctz->rb_root.rb_node;
415 struct rb_node *parent = NULL;
416 struct mem_cgroup_per_zone *mz_node;
417
418 if (mz->on_tree)
419 return;
420
421 mz->usage_in_excess = new_usage_in_excess;
422 if (!mz->usage_in_excess)
423 return;
424 while (*p) {
425 parent = *p;
426 mz_node = rb_entry(parent, struct mem_cgroup_per_zone,
427 tree_node);
428 if (mz->usage_in_excess < mz_node->usage_in_excess)
429 p = &(*p)->rb_left;
430 /*
431 * We can't avoid mem cgroups that are over their soft
432 * limit by the same amount
433 */
434 else if (mz->usage_in_excess >= mz_node->usage_in_excess)
435 p = &(*p)->rb_right;
436 }
437 rb_link_node(&mz->tree_node, parent, p);
438 rb_insert_color(&mz->tree_node, &mctz->rb_root);
439 mz->on_tree = true;
440}
441
442static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_zone *mz,
443 struct mem_cgroup_tree_per_zone *mctz)
444{
445 if (!mz->on_tree)
446 return;
447 rb_erase(&mz->tree_node, &mctz->rb_root);
448 mz->on_tree = false;
449}
450
451static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_zone *mz,
452 struct mem_cgroup_tree_per_zone *mctz)
453{
454 unsigned long flags;
455
456 spin_lock_irqsave(&mctz->lock, flags);
457 __mem_cgroup_remove_exceeded(mz, mctz);
458 spin_unlock_irqrestore(&mctz->lock, flags);
459}
460
461static unsigned long soft_limit_excess(struct mem_cgroup *memcg)
462{
463 unsigned long nr_pages = page_counter_read(&memcg->memory);
464 unsigned long soft_limit = READ_ONCE(memcg->soft_limit);
465 unsigned long excess = 0;
466
467 if (nr_pages > soft_limit)
468 excess = nr_pages - soft_limit;
469
470 return excess;
471}
472
473static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
474{
475 unsigned long excess;
476 struct mem_cgroup_per_zone *mz;
477 struct mem_cgroup_tree_per_zone *mctz;
478
479 mctz = soft_limit_tree_from_page(page);
480 /*
481 * Necessary to update all ancestors when hierarchy is used.
482 * because their event counter is not touched.
483 */
484 for (; memcg; memcg = parent_mem_cgroup(memcg)) {
485 mz = mem_cgroup_page_zoneinfo(memcg, page);
486 excess = soft_limit_excess(memcg);
487 /*
488 * We have to update the tree if mz is on RB-tree or
489 * mem is over its softlimit.
490 */
491 if (excess || mz->on_tree) {
492 unsigned long flags;
493
494 spin_lock_irqsave(&mctz->lock, flags);
495 /* if on-tree, remove it */
496 if (mz->on_tree)
497 __mem_cgroup_remove_exceeded(mz, mctz);
498 /*
499 * Insert again. mz->usage_in_excess will be updated.
500 * If excess is 0, no tree ops.
501 */
502 __mem_cgroup_insert_exceeded(mz, mctz, excess);
503 spin_unlock_irqrestore(&mctz->lock, flags);
504 }
505 }
506}
507
508static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
509{
510 struct mem_cgroup_tree_per_zone *mctz;
511 struct mem_cgroup_per_zone *mz;
512 int nid, zid;
513
514 for_each_node(nid) {
515 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
516 mz = &memcg->nodeinfo[nid]->zoneinfo[zid];
517 mctz = soft_limit_tree_node_zone(nid, zid);
518 mem_cgroup_remove_exceeded(mz, mctz);
519 }
520 }
521}
522
523static struct mem_cgroup_per_zone *
524__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
525{
526 struct rb_node *rightmost = NULL;
527 struct mem_cgroup_per_zone *mz;
528
529retry:
530 mz = NULL;
531 rightmost = rb_last(&mctz->rb_root);
532 if (!rightmost)
533 goto done; /* Nothing to reclaim from */
534
535 mz = rb_entry(rightmost, struct mem_cgroup_per_zone, tree_node);
536 /*
537 * Remove the node now but someone else can add it back,
538 * we will to add it back at the end of reclaim to its correct
539 * position in the tree.
540 */
541 __mem_cgroup_remove_exceeded(mz, mctz);
542 if (!soft_limit_excess(mz->memcg) ||
543 !css_tryget_online(&mz->memcg->css))
544 goto retry;
545done:
546 return mz;
547}
548
549static struct mem_cgroup_per_zone *
550mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
551{
552 struct mem_cgroup_per_zone *mz;
553
554 spin_lock_irq(&mctz->lock);
555 mz = __mem_cgroup_largest_soft_limit_node(mctz);
556 spin_unlock_irq(&mctz->lock);
557 return mz;
558}
559
560/*
561 * Return page count for single (non recursive) @memcg.
562 *
563 * Implementation Note: reading percpu statistics for memcg.
564 *
565 * Both of vmstat[] and percpu_counter has threshold and do periodic
566 * synchronization to implement "quick" read. There are trade-off between
567 * reading cost and precision of value. Then, we may have a chance to implement
568 * a periodic synchronization of counter in memcg's counter.
569 *
570 * But this _read() function is used for user interface now. The user accounts
571 * memory usage by memory cgroup and he _always_ requires exact value because
572 * he accounts memory. Even if we provide quick-and-fuzzy read, we always
573 * have to visit all online cpus and make sum. So, for now, unnecessary
574 * synchronization is not implemented. (just implemented for cpu hotplug)
575 *
576 * If there are kernel internal actions which can make use of some not-exact
577 * value, and reading all cpu value can be performance bottleneck in some
578 * common workload, threshold and synchronization as vmstat[] should be
579 * implemented.
580 */
581static unsigned long
582mem_cgroup_read_stat(struct mem_cgroup *memcg, enum mem_cgroup_stat_index idx)
583{
584 long val = 0;
585 int cpu;
586
587 /* Per-cpu values can be negative, use a signed accumulator */
588 for_each_possible_cpu(cpu)
589 val += per_cpu(memcg->stat->count[idx], cpu);
590 /*
591 * Summing races with updates, so val may be negative. Avoid exposing
592 * transient negative values.
593 */
594 if (val < 0)
595 val = 0;
596 return val;
597}
598
599static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg,
600 enum mem_cgroup_events_index idx)
601{
602 unsigned long val = 0;
603 int cpu;
604
605 for_each_possible_cpu(cpu)
606 val += per_cpu(memcg->stat->events[idx], cpu);
607 return val;
608}
609
610static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
611 struct page *page,
612 bool compound, int nr_pages)
613{
614 /*
615 * Here, RSS means 'mapped anon' and anon's SwapCache. Shmem/tmpfs is
616 * counted as CACHE even if it's on ANON LRU.
617 */
618 if (PageAnon(page))
619 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS],
620 nr_pages);
621 else
622 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE],
623 nr_pages);
624
625 if (compound) {
626 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
627 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE],
628 nr_pages);
629 }
630
631 /* pagein of a big page is an event. So, ignore page size */
632 if (nr_pages > 0)
633 __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGIN]);
634 else {
635 __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]);
636 nr_pages = -nr_pages; /* for event */
637 }
638
639 __this_cpu_add(memcg->stat->nr_page_events, nr_pages);
640}
641
642unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
643 int nid, unsigned int lru_mask)
644{
645 unsigned long nr = 0;
646 int zid;
647
648 VM_BUG_ON((unsigned)nid >= nr_node_ids);
649
650 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
651 struct mem_cgroup_per_zone *mz;
652 enum lru_list lru;
653
654 for_each_lru(lru) {
655 if (!(BIT(lru) & lru_mask))
656 continue;
657 mz = &memcg->nodeinfo[nid]->zoneinfo[zid];
658 nr += mz->lru_size[lru];
659 }
660 }
661 return nr;
662}
663
664static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
665 unsigned int lru_mask)
666{
667 unsigned long nr = 0;
668 int nid;
669
670 for_each_node_state(nid, N_MEMORY)
671 nr += mem_cgroup_node_nr_lru_pages(memcg, nid, lru_mask);
672 return nr;
673}
674
675static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
676 enum mem_cgroup_events_target target)
677{
678 unsigned long val, next;
679
680 val = __this_cpu_read(memcg->stat->nr_page_events);
681 next = __this_cpu_read(memcg->stat->targets[target]);
682 /* from time_after() in jiffies.h */
683 if ((long)next - (long)val < 0) {
684 switch (target) {
685 case MEM_CGROUP_TARGET_THRESH:
686 next = val + THRESHOLDS_EVENTS_TARGET;
687 break;
688 case MEM_CGROUP_TARGET_SOFTLIMIT:
689 next = val + SOFTLIMIT_EVENTS_TARGET;
690 break;
691 case MEM_CGROUP_TARGET_NUMAINFO:
692 next = val + NUMAINFO_EVENTS_TARGET;
693 break;
694 default:
695 break;
696 }
697 __this_cpu_write(memcg->stat->targets[target], next);
698 return true;
699 }
700 return false;
701}
702
703/*
704 * Check events in order.
705 *
706 */
707static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
708{
709 /* threshold event is triggered in finer grain than soft limit */
710 if (unlikely(mem_cgroup_event_ratelimit(memcg,
711 MEM_CGROUP_TARGET_THRESH))) {
712 bool do_softlimit;
713 bool do_numainfo __maybe_unused;
714
715 do_softlimit = mem_cgroup_event_ratelimit(memcg,
716 MEM_CGROUP_TARGET_SOFTLIMIT);
717#if MAX_NUMNODES > 1
718 do_numainfo = mem_cgroup_event_ratelimit(memcg,
719 MEM_CGROUP_TARGET_NUMAINFO);
720#endif
721 mem_cgroup_threshold(memcg);
722 if (unlikely(do_softlimit))
723 mem_cgroup_update_tree(memcg, page);
724#if MAX_NUMNODES > 1
725 if (unlikely(do_numainfo))
726 atomic_inc(&memcg->numainfo_events);
727#endif
728 }
729}
730
731struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
732{
733 /*
734 * mm_update_next_owner() may clear mm->owner to NULL
735 * if it races with swapoff, page migration, etc.
736 * So this can be called with p == NULL.
737 */
738 if (unlikely(!p))
739 return NULL;
740
741 return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
742}
743EXPORT_SYMBOL(mem_cgroup_from_task);
744
745static struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
746{
747 struct mem_cgroup *memcg = NULL;
748
749 rcu_read_lock();
750 do {
751 /*
752 * Page cache insertions can happen withou an
753 * actual mm context, e.g. during disk probing
754 * on boot, loopback IO, acct() writes etc.
755 */
756 if (unlikely(!mm))
757 memcg = root_mem_cgroup;
758 else {
759 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
760 if (unlikely(!memcg))
761 memcg = root_mem_cgroup;
762 }
763 } while (!css_tryget_online(&memcg->css));
764 rcu_read_unlock();
765 return memcg;
766}
767
768/**
769 * mem_cgroup_iter - iterate over memory cgroup hierarchy
770 * @root: hierarchy root
771 * @prev: previously returned memcg, NULL on first invocation
772 * @reclaim: cookie for shared reclaim walks, NULL for full walks
773 *
774 * Returns references to children of the hierarchy below @root, or
775 * @root itself, or %NULL after a full round-trip.
776 *
777 * Caller must pass the return value in @prev on subsequent
778 * invocations for reference counting, or use mem_cgroup_iter_break()
779 * to cancel a hierarchy walk before the round-trip is complete.
780 *
781 * Reclaimers can specify a zone and a priority level in @reclaim to
782 * divide up the memcgs in the hierarchy among all concurrent
783 * reclaimers operating on the same zone and priority.
784 */
785struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
786 struct mem_cgroup *prev,
787 struct mem_cgroup_reclaim_cookie *reclaim)
788{
789 struct mem_cgroup_reclaim_iter *uninitialized_var(iter);
790 struct cgroup_subsys_state *css = NULL;
791 struct mem_cgroup *memcg = NULL;
792 struct mem_cgroup *pos = NULL;
793
794 if (mem_cgroup_disabled())
795 return NULL;
796
797 if (!root)
798 root = root_mem_cgroup;
799
800 if (prev && !reclaim)
801 pos = prev;
802
803 if (!root->use_hierarchy && root != root_mem_cgroup) {
804 if (prev)
805 goto out;
806 return root;
807 }
808
809 rcu_read_lock();
810
811 if (reclaim) {
812 struct mem_cgroup_per_zone *mz;
813
814 mz = mem_cgroup_zone_zoneinfo(root, reclaim->zone);
815 iter = &mz->iter[reclaim->priority];
816
817 if (prev && reclaim->generation != iter->generation)
818 goto out_unlock;
819
820 while (1) {
821 pos = READ_ONCE(iter->position);
822 if (!pos || css_tryget(&pos->css))
823 break;
824 /*
825 * css reference reached zero, so iter->position will
826 * be cleared by ->css_released. However, we should not
827 * rely on this happening soon, because ->css_released
828 * is called from a work queue, and by busy-waiting we
829 * might block it. So we clear iter->position right
830 * away.
831 */
832 (void)cmpxchg(&iter->position, pos, NULL);
833 }
834 }
835
836 if (pos)
837 css = &pos->css;
838
839 for (;;) {
840 css = css_next_descendant_pre(css, &root->css);
841 if (!css) {
842 /*
843 * Reclaimers share the hierarchy walk, and a
844 * new one might jump in right at the end of
845 * the hierarchy - make sure they see at least
846 * one group and restart from the beginning.
847 */
848 if (!prev)
849 continue;
850 break;
851 }
852
853 /*
854 * Verify the css and acquire a reference. The root
855 * is provided by the caller, so we know it's alive
856 * and kicking, and don't take an extra reference.
857 */
858 memcg = mem_cgroup_from_css(css);
859
860 if (css == &root->css)
861 break;
862
863 if (css_tryget(css))
864 break;
865
866 memcg = NULL;
867 }
868
869 if (reclaim) {
870 /*
871 * The position could have already been updated by a competing
872 * thread, so check that the value hasn't changed since we read
873 * it to avoid reclaiming from the same cgroup twice.
874 */
875 (void)cmpxchg(&iter->position, pos, memcg);
876
877 if (pos)
878 css_put(&pos->css);
879
880 if (!memcg)
881 iter->generation++;
882 else if (!prev)
883 reclaim->generation = iter->generation;
884 }
885
886out_unlock:
887 rcu_read_unlock();
888out:
889 if (prev && prev != root)
890 css_put(&prev->css);
891
892 return memcg;
893}
894
895/**
896 * mem_cgroup_iter_break - abort a hierarchy walk prematurely
897 * @root: hierarchy root
898 * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
899 */
900void mem_cgroup_iter_break(struct mem_cgroup *root,
901 struct mem_cgroup *prev)
902{
903 if (!root)
904 root = root_mem_cgroup;
905 if (prev && prev != root)
906 css_put(&prev->css);
907}
908
909static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
910{
911 struct mem_cgroup *memcg = dead_memcg;
912 struct mem_cgroup_reclaim_iter *iter;
913 struct mem_cgroup_per_zone *mz;
914 int nid, zid;
915 int i;
916
917 while ((memcg = parent_mem_cgroup(memcg))) {
918 for_each_node(nid) {
919 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
920 mz = &memcg->nodeinfo[nid]->zoneinfo[zid];
921 for (i = 0; i <= DEF_PRIORITY; i++) {
922 iter = &mz->iter[i];
923 cmpxchg(&iter->position,
924 dead_memcg, NULL);
925 }
926 }
927 }
928 }
929}
930
931/*
932 * Iteration constructs for visiting all cgroups (under a tree). If
933 * loops are exited prematurely (break), mem_cgroup_iter_break() must
934 * be used for reference counting.
935 */
936#define for_each_mem_cgroup_tree(iter, root) \
937 for (iter = mem_cgroup_iter(root, NULL, NULL); \
938 iter != NULL; \
939 iter = mem_cgroup_iter(root, iter, NULL))
940
941#define for_each_mem_cgroup(iter) \
942 for (iter = mem_cgroup_iter(NULL, NULL, NULL); \
943 iter != NULL; \
944 iter = mem_cgroup_iter(NULL, iter, NULL))
945
946/**
947 * mem_cgroup_zone_lruvec - get the lru list vector for a zone and memcg
948 * @zone: zone of the wanted lruvec
949 * @memcg: memcg of the wanted lruvec
950 *
951 * Returns the lru list vector holding pages for the given @zone and
952 * @mem. This can be the global zone lruvec, if the memory controller
953 * is disabled.
954 */
955struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
956 struct mem_cgroup *memcg)
957{
958 struct mem_cgroup_per_zone *mz;
959 struct lruvec *lruvec;
960
961 if (mem_cgroup_disabled()) {
962 lruvec = &zone->lruvec;
963 goto out;
964 }
965
966 mz = mem_cgroup_zone_zoneinfo(memcg, zone);
967 lruvec = &mz->lruvec;
968out:
969 /*
970 * Since a node can be onlined after the mem_cgroup was created,
971 * we have to be prepared to initialize lruvec->zone here;
972 * and if offlined then reonlined, we need to reinitialize it.
973 */
974 if (unlikely(lruvec->zone != zone))
975 lruvec->zone = zone;
976 return lruvec;
977}
978
979/**
980 * mem_cgroup_page_lruvec - return lruvec for isolating/putting an LRU page
981 * @page: the page
982 * @zone: zone of the page
983 *
984 * This function is only safe when following the LRU page isolation
985 * and putback protocol: the LRU lock must be held, and the page must
986 * either be PageLRU() or the caller must have isolated/allocated it.
987 */
988struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone)
989{
990 struct mem_cgroup_per_zone *mz;
991 struct mem_cgroup *memcg;
992 struct lruvec *lruvec;
993
994 if (mem_cgroup_disabled()) {
995 lruvec = &zone->lruvec;
996 goto out;
997 }
998
999 memcg = page->mem_cgroup;
1000 /*
1001 * Swapcache readahead pages are added to the LRU - and
1002 * possibly migrated - before they are charged.
1003 */
1004 if (!memcg)
1005 memcg = root_mem_cgroup;
1006
1007 mz = mem_cgroup_page_zoneinfo(memcg, page);
1008 lruvec = &mz->lruvec;
1009out:
1010 /*
1011 * Since a node can be onlined after the mem_cgroup was created,
1012 * we have to be prepared to initialize lruvec->zone here;
1013 * and if offlined then reonlined, we need to reinitialize it.
1014 */
1015 if (unlikely(lruvec->zone != zone))
1016 lruvec->zone = zone;
1017 return lruvec;
1018}
1019
1020/**
1021 * mem_cgroup_update_lru_size - account for adding or removing an lru page
1022 * @lruvec: mem_cgroup per zone lru vector
1023 * @lru: index of lru list the page is sitting on
1024 * @nr_pages: positive when adding or negative when removing
1025 *
1026 * This function must be called when a page is added to or removed from an
1027 * lru list.
1028 */
1029void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
1030 int nr_pages)
1031{
1032 struct mem_cgroup_per_zone *mz;
1033 unsigned long *lru_size;
1034
1035 if (mem_cgroup_disabled())
1036 return;
1037
1038 mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
1039 lru_size = mz->lru_size + lru;
1040 *lru_size += nr_pages;
1041 VM_BUG_ON((long)(*lru_size) < 0);
1042}
1043
1044bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg)
1045{
1046 struct mem_cgroup *task_memcg;
1047 struct task_struct *p;
1048 bool ret;
1049
1050 p = find_lock_task_mm(task);
1051 if (p) {
1052 task_memcg = get_mem_cgroup_from_mm(p->mm);
1053 task_unlock(p);
1054 } else {
1055 /*
1056 * All threads may have already detached their mm's, but the oom
1057 * killer still needs to detect if they have already been oom
1058 * killed to prevent needlessly killing additional tasks.
1059 */
1060 rcu_read_lock();
1061 task_memcg = mem_cgroup_from_task(task);
1062 css_get(&task_memcg->css);
1063 rcu_read_unlock();
1064 }
1065 ret = mem_cgroup_is_descendant(task_memcg, memcg);
1066 css_put(&task_memcg->css);
1067 return ret;
1068}
1069
1070/**
1071 * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1072 * @memcg: the memory cgroup
1073 *
1074 * Returns the maximum amount of memory @mem can be charged with, in
1075 * pages.
1076 */
1077static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
1078{
1079 unsigned long margin = 0;
1080 unsigned long count;
1081 unsigned long limit;
1082
1083 count = page_counter_read(&memcg->memory);
1084 limit = READ_ONCE(memcg->memory.limit);
1085 if (count < limit)
1086 margin = limit - count;
1087
1088 if (do_memsw_account()) {
1089 count = page_counter_read(&memcg->memsw);
1090 limit = READ_ONCE(memcg->memsw.limit);
1091 if (count <= limit)
1092 margin = min(margin, limit - count);
1093 }
1094
1095 return margin;
1096}
1097
1098/*
1099 * A routine for checking "mem" is under move_account() or not.
1100 *
1101 * Checking a cgroup is mc.from or mc.to or under hierarchy of
1102 * moving cgroups. This is for waiting at high-memory pressure
1103 * caused by "move".
1104 */
1105static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
1106{
1107 struct mem_cgroup *from;
1108 struct mem_cgroup *to;
1109 bool ret = false;
1110 /*
1111 * Unlike task_move routines, we access mc.to, mc.from not under
1112 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
1113 */
1114 spin_lock(&mc.lock);
1115 from = mc.from;
1116 to = mc.to;
1117 if (!from)
1118 goto unlock;
1119
1120 ret = mem_cgroup_is_descendant(from, memcg) ||
1121 mem_cgroup_is_descendant(to, memcg);
1122unlock:
1123 spin_unlock(&mc.lock);
1124 return ret;
1125}
1126
1127static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
1128{
1129 if (mc.moving_task && current != mc.moving_task) {
1130 if (mem_cgroup_under_move(memcg)) {
1131 DEFINE_WAIT(wait);
1132 prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1133 /* moving charge context might have finished. */
1134 if (mc.moving_task)
1135 schedule();
1136 finish_wait(&mc.waitq, &wait);
1137 return true;
1138 }
1139 }
1140 return false;
1141}
1142
1143#define K(x) ((x) << (PAGE_SHIFT-10))
1144/**
1145 * mem_cgroup_print_oom_info: Print OOM information relevant to memory controller.
1146 * @memcg: The memory cgroup that went over limit
1147 * @p: Task that is going to be killed
1148 *
1149 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1150 * enabled
1151 */
1152void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
1153{
1154 struct mem_cgroup *iter;
1155 unsigned int i;
1156
1157 rcu_read_lock();
1158
1159 if (p) {
1160 pr_info("Task in ");
1161 pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
1162 pr_cont(" killed as a result of limit of ");
1163 } else {
1164 pr_info("Memory limit reached of cgroup ");
1165 }
1166
1167 pr_cont_cgroup_path(memcg->css.cgroup);
1168 pr_cont("\n");
1169
1170 rcu_read_unlock();
1171
1172 pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
1173 K((u64)page_counter_read(&memcg->memory)),
1174 K((u64)memcg->memory.limit), memcg->memory.failcnt);
1175 pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
1176 K((u64)page_counter_read(&memcg->memsw)),
1177 K((u64)memcg->memsw.limit), memcg->memsw.failcnt);
1178 pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
1179 K((u64)page_counter_read(&memcg->kmem)),
1180 K((u64)memcg->kmem.limit), memcg->kmem.failcnt);
1181
1182 for_each_mem_cgroup_tree(iter, memcg) {
1183 pr_info("Memory cgroup stats for ");
1184 pr_cont_cgroup_path(iter->css.cgroup);
1185 pr_cont(":");
1186
1187 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
1188 if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
1189 continue;
1190 pr_cont(" %s:%luKB", mem_cgroup_stat_names[i],
1191 K(mem_cgroup_read_stat(iter, i)));
1192 }
1193
1194 for (i = 0; i < NR_LRU_LISTS; i++)
1195 pr_cont(" %s:%luKB", mem_cgroup_lru_names[i],
1196 K(mem_cgroup_nr_lru_pages(iter, BIT(i))));
1197
1198 pr_cont("\n");
1199 }
1200}
1201
1202/*
1203 * This function returns the number of memcg under hierarchy tree. Returns
1204 * 1(self count) if no children.
1205 */
1206static int mem_cgroup_count_children(struct mem_cgroup *memcg)
1207{
1208 int num = 0;
1209 struct mem_cgroup *iter;
1210
1211 for_each_mem_cgroup_tree(iter, memcg)
1212 num++;
1213 return num;
1214}
1215
1216/*
1217 * Return the memory (and swap, if configured) limit for a memcg.
1218 */
1219static unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg)
1220{
1221 unsigned long limit;
1222
1223 limit = memcg->memory.limit;
1224 if (mem_cgroup_swappiness(memcg)) {
1225 unsigned long memsw_limit;
1226 unsigned long swap_limit;
1227
1228 memsw_limit = memcg->memsw.limit;
1229 swap_limit = memcg->swap.limit;
1230 swap_limit = min(swap_limit, (unsigned long)total_swap_pages);
1231 limit = min(limit + swap_limit, memsw_limit);
1232 }
1233 return limit;
1234}
1235
1236static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
1237 int order)
1238{
1239 struct oom_control oc = {
1240 .zonelist = NULL,
1241 .nodemask = NULL,
1242 .gfp_mask = gfp_mask,
1243 .order = order,
1244 };
1245 struct mem_cgroup *iter;
1246 unsigned long chosen_points = 0;
1247 unsigned long totalpages;
1248 unsigned int points = 0;
1249 struct task_struct *chosen = NULL;
1250
1251 mutex_lock(&oom_lock);
1252
1253 /*
1254 * If current has a pending SIGKILL or is exiting, then automatically
1255 * select it. The goal is to allow it to allocate so that it may
1256 * quickly exit and free its memory.
1257 */
1258 if (fatal_signal_pending(current) || task_will_free_mem(current)) {
1259 mark_oom_victim(current);
1260 goto unlock;
1261 }
1262
1263 check_panic_on_oom(&oc, CONSTRAINT_MEMCG, memcg);
1264 totalpages = mem_cgroup_get_limit(memcg) ? : 1;
1265 for_each_mem_cgroup_tree(iter, memcg) {
1266 struct css_task_iter it;
1267 struct task_struct *task;
1268
1269 css_task_iter_start(&iter->css, &it);
1270 while ((task = css_task_iter_next(&it))) {
1271 switch (oom_scan_process_thread(&oc, task, totalpages)) {
1272 case OOM_SCAN_SELECT:
1273 if (chosen)
1274 put_task_struct(chosen);
1275 chosen = task;
1276 chosen_points = ULONG_MAX;
1277 get_task_struct(chosen);
1278 /* fall through */
1279 case OOM_SCAN_CONTINUE:
1280 continue;
1281 case OOM_SCAN_ABORT:
1282 css_task_iter_end(&it);
1283 mem_cgroup_iter_break(memcg, iter);
1284 if (chosen)
1285 put_task_struct(chosen);
1286 goto unlock;
1287 case OOM_SCAN_OK:
1288 break;
1289 };
1290 points = oom_badness(task, memcg, NULL, totalpages);
1291 if (!points || points < chosen_points)
1292 continue;
1293 /* Prefer thread group leaders for display purposes */
1294 if (points == chosen_points &&
1295 thread_group_leader(chosen))
1296 continue;
1297
1298 if (chosen)
1299 put_task_struct(chosen);
1300 chosen = task;
1301 chosen_points = points;
1302 get_task_struct(chosen);
1303 }
1304 css_task_iter_end(&it);
1305 }
1306
1307 if (chosen) {
1308 points = chosen_points * 1000 / totalpages;
1309 oom_kill_process(&oc, chosen, points, totalpages, memcg,
1310 "Memory cgroup out of memory");
1311 }
1312unlock:
1313 mutex_unlock(&oom_lock);
1314 return chosen;
1315}
1316
1317#if MAX_NUMNODES > 1
1318
1319/**
1320 * test_mem_cgroup_node_reclaimable
1321 * @memcg: the target memcg
1322 * @nid: the node ID to be checked.
1323 * @noswap : specify true here if the user wants flle only information.
1324 *
1325 * This function returns whether the specified memcg contains any
1326 * reclaimable pages on a node. Returns true if there are any reclaimable
1327 * pages in the node.
1328 */
1329static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg,
1330 int nid, bool noswap)
1331{
1332 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_FILE))
1333 return true;
1334 if (noswap || !total_swap_pages)
1335 return false;
1336 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_ANON))
1337 return true;
1338 return false;
1339
1340}
1341
1342/*
1343 * Always updating the nodemask is not very good - even if we have an empty
1344 * list or the wrong list here, we can start from some node and traverse all
1345 * nodes based on the zonelist. So update the list loosely once per 10 secs.
1346 *
1347 */
1348static void mem_cgroup_may_update_nodemask(struct mem_cgroup *memcg)
1349{
1350 int nid;
1351 /*
1352 * numainfo_events > 0 means there was at least NUMAINFO_EVENTS_TARGET
1353 * pagein/pageout changes since the last update.
1354 */
1355 if (!atomic_read(&memcg->numainfo_events))
1356 return;
1357 if (atomic_inc_return(&memcg->numainfo_updating) > 1)
1358 return;
1359
1360 /* make a nodemask where this memcg uses memory from */
1361 memcg->scan_nodes = node_states[N_MEMORY];
1362
1363 for_each_node_mask(nid, node_states[N_MEMORY]) {
1364
1365 if (!test_mem_cgroup_node_reclaimable(memcg, nid, false))
1366 node_clear(nid, memcg->scan_nodes);
1367 }
1368
1369 atomic_set(&memcg->numainfo_events, 0);
1370 atomic_set(&memcg->numainfo_updating, 0);
1371}
1372
1373/*
1374 * Selecting a node where we start reclaim from. Because what we need is just
1375 * reducing usage counter, start from anywhere is O,K. Considering
1376 * memory reclaim from current node, there are pros. and cons.
1377 *
1378 * Freeing memory from current node means freeing memory from a node which
1379 * we'll use or we've used. So, it may make LRU bad. And if several threads
1380 * hit limits, it will see a contention on a node. But freeing from remote
1381 * node means more costs for memory reclaim because of memory latency.
1382 *
1383 * Now, we use round-robin. Better algorithm is welcomed.
1384 */
1385int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
1386{
1387 int node;
1388
1389 mem_cgroup_may_update_nodemask(memcg);
1390 node = memcg->last_scanned_node;
1391
1392 node = next_node(node, memcg->scan_nodes);
1393 if (node == MAX_NUMNODES)
1394 node = first_node(memcg->scan_nodes);
1395 /*
1396 * We call this when we hit limit, not when pages are added to LRU.
1397 * No LRU may hold pages because all pages are UNEVICTABLE or
1398 * memcg is too small and all pages are not on LRU. In that case,
1399 * we use curret node.
1400 */
1401 if (unlikely(node == MAX_NUMNODES))
1402 node = numa_node_id();
1403
1404 memcg->last_scanned_node = node;
1405 return node;
1406}
1407#else
1408int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
1409{
1410 return 0;
1411}
1412#endif
1413
1414static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
1415 struct zone *zone,
1416 gfp_t gfp_mask,
1417 unsigned long *total_scanned)
1418{
1419 struct mem_cgroup *victim = NULL;
1420 int total = 0;
1421 int loop = 0;
1422 unsigned long excess;
1423 unsigned long nr_scanned;
1424 struct mem_cgroup_reclaim_cookie reclaim = {
1425 .zone = zone,
1426 .priority = 0,
1427 };
1428
1429 excess = soft_limit_excess(root_memcg);
1430
1431 while (1) {
1432 victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
1433 if (!victim) {
1434 loop++;
1435 if (loop >= 2) {
1436 /*
1437 * If we have not been able to reclaim
1438 * anything, it might because there are
1439 * no reclaimable pages under this hierarchy
1440 */
1441 if (!total)
1442 break;
1443 /*
1444 * We want to do more targeted reclaim.
1445 * excess >> 2 is not to excessive so as to
1446 * reclaim too much, nor too less that we keep
1447 * coming back to reclaim from this cgroup
1448 */
1449 if (total >= (excess >> 2) ||
1450 (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
1451 break;
1452 }
1453 continue;
1454 }
1455 total += mem_cgroup_shrink_node_zone(victim, gfp_mask, false,
1456 zone, &nr_scanned);
1457 *total_scanned += nr_scanned;
1458 if (!soft_limit_excess(root_memcg))
1459 break;
1460 }
1461 mem_cgroup_iter_break(root_memcg, victim);
1462 return total;
1463}
1464
1465#ifdef CONFIG_LOCKDEP
1466static struct lockdep_map memcg_oom_lock_dep_map = {
1467 .name = "memcg_oom_lock",
1468};
1469#endif
1470
1471static DEFINE_SPINLOCK(memcg_oom_lock);
1472
1473/*
1474 * Check OOM-Killer is already running under our hierarchy.
1475 * If someone is running, return false.
1476 */
1477static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
1478{
1479 struct mem_cgroup *iter, *failed = NULL;
1480
1481 spin_lock(&memcg_oom_lock);
1482
1483 for_each_mem_cgroup_tree(iter, memcg) {
1484 if (iter->oom_lock) {
1485 /*
1486 * this subtree of our hierarchy is already locked
1487 * so we cannot give a lock.
1488 */
1489 failed = iter;
1490 mem_cgroup_iter_break(memcg, iter);
1491 break;
1492 } else
1493 iter->oom_lock = true;
1494 }
1495
1496 if (failed) {
1497 /*
1498 * OK, we failed to lock the whole subtree so we have
1499 * to clean up what we set up to the failing subtree
1500 */
1501 for_each_mem_cgroup_tree(iter, memcg) {
1502 if (iter == failed) {
1503 mem_cgroup_iter_break(memcg, iter);
1504 break;
1505 }
1506 iter->oom_lock = false;
1507 }
1508 } else
1509 mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_);
1510
1511 spin_unlock(&memcg_oom_lock);
1512
1513 return !failed;
1514}
1515
1516static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
1517{
1518 struct mem_cgroup *iter;
1519
1520 spin_lock(&memcg_oom_lock);
1521 mutex_release(&memcg_oom_lock_dep_map, 1, _RET_IP_);
1522 for_each_mem_cgroup_tree(iter, memcg)
1523 iter->oom_lock = false;
1524 spin_unlock(&memcg_oom_lock);
1525}
1526
1527static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
1528{
1529 struct mem_cgroup *iter;
1530
1531 spin_lock(&memcg_oom_lock);
1532 for_each_mem_cgroup_tree(iter, memcg)
1533 iter->under_oom++;
1534 spin_unlock(&memcg_oom_lock);
1535}
1536
1537static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
1538{
1539 struct mem_cgroup *iter;
1540
1541 /*
1542 * When a new child is created while the hierarchy is under oom,
1543 * mem_cgroup_oom_lock() may not be called. Watch for underflow.
1544 */
1545 spin_lock(&memcg_oom_lock);
1546 for_each_mem_cgroup_tree(iter, memcg)
1547 if (iter->under_oom > 0)
1548 iter->under_oom--;
1549 spin_unlock(&memcg_oom_lock);
1550}
1551
1552static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
1553
1554struct oom_wait_info {
1555 struct mem_cgroup *memcg;
1556 wait_queue_t wait;
1557};
1558
1559static int memcg_oom_wake_function(wait_queue_t *wait,
1560 unsigned mode, int sync, void *arg)
1561{
1562 struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
1563 struct mem_cgroup *oom_wait_memcg;
1564 struct oom_wait_info *oom_wait_info;
1565
1566 oom_wait_info = container_of(wait, struct oom_wait_info, wait);
1567 oom_wait_memcg = oom_wait_info->memcg;
1568
1569 if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) &&
1570 !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg))
1571 return 0;
1572 return autoremove_wake_function(wait, mode, sync, arg);
1573}
1574
1575static void memcg_oom_recover(struct mem_cgroup *memcg)
1576{
1577 /*
1578 * For the following lockless ->under_oom test, the only required
1579 * guarantee is that it must see the state asserted by an OOM when
1580 * this function is called as a result of userland actions
1581 * triggered by the notification of the OOM. This is trivially
1582 * achieved by invoking mem_cgroup_mark_under_oom() before
1583 * triggering notification.
1584 */
1585 if (memcg && memcg->under_oom)
1586 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
1587}
1588
1589static void mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
1590{
1591 if (!current->memcg_may_oom)
1592 return;
1593 /*
1594 * We are in the middle of the charge context here, so we
1595 * don't want to block when potentially sitting on a callstack
1596 * that holds all kinds of filesystem and mm locks.
1597 *
1598 * Also, the caller may handle a failed allocation gracefully
1599 * (like optional page cache readahead) and so an OOM killer
1600 * invocation might not even be necessary.
1601 *
1602 * That's why we don't do anything here except remember the
1603 * OOM context and then deal with it at the end of the page
1604 * fault when the stack is unwound, the locks are released,
1605 * and when we know whether the fault was overall successful.
1606 */
1607 css_get(&memcg->css);
1608 current->memcg_in_oom = memcg;
1609 current->memcg_oom_gfp_mask = mask;
1610 current->memcg_oom_order = order;
1611}
1612
1613/**
1614 * mem_cgroup_oom_synchronize - complete memcg OOM handling
1615 * @handle: actually kill/wait or just clean up the OOM state
1616 *
1617 * This has to be called at the end of a page fault if the memcg OOM
1618 * handler was enabled.
1619 *
1620 * Memcg supports userspace OOM handling where failed allocations must
1621 * sleep on a waitqueue until the userspace task resolves the
1622 * situation. Sleeping directly in the charge context with all kinds
1623 * of locks held is not a good idea, instead we remember an OOM state
1624 * in the task and mem_cgroup_oom_synchronize() has to be called at
1625 * the end of the page fault to complete the OOM handling.
1626 *
1627 * Returns %true if an ongoing memcg OOM situation was detected and
1628 * completed, %false otherwise.
1629 */
1630bool mem_cgroup_oom_synchronize(bool handle)
1631{
1632 struct mem_cgroup *memcg = current->memcg_in_oom;
1633 struct oom_wait_info owait;
1634 bool locked;
1635
1636 /* OOM is global, do not handle */
1637 if (!memcg)
1638 return false;
1639
1640 if (!handle || oom_killer_disabled)
1641 goto cleanup;
1642
1643 owait.memcg = memcg;
1644 owait.wait.flags = 0;
1645 owait.wait.func = memcg_oom_wake_function;
1646 owait.wait.private = current;
1647 INIT_LIST_HEAD(&owait.wait.task_list);
1648
1649 prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
1650 mem_cgroup_mark_under_oom(memcg);
1651
1652 locked = mem_cgroup_oom_trylock(memcg);
1653
1654 if (locked)
1655 mem_cgroup_oom_notify(memcg);
1656
1657 if (locked && !memcg->oom_kill_disable) {
1658 mem_cgroup_unmark_under_oom(memcg);
1659 finish_wait(&memcg_oom_waitq, &owait.wait);
1660 mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask,
1661 current->memcg_oom_order);
1662 } else {
1663 schedule();
1664 mem_cgroup_unmark_under_oom(memcg);
1665 finish_wait(&memcg_oom_waitq, &owait.wait);
1666 }
1667
1668 if (locked) {
1669 mem_cgroup_oom_unlock(memcg);
1670 /*
1671 * There is no guarantee that an OOM-lock contender
1672 * sees the wakeups triggered by the OOM kill
1673 * uncharges. Wake any sleepers explicitely.
1674 */
1675 memcg_oom_recover(memcg);
1676 }
1677cleanup:
1678 current->memcg_in_oom = NULL;
1679 css_put(&memcg->css);
1680 return true;
1681}
1682
1683/**
1684 * lock_page_memcg - lock a page->mem_cgroup binding
1685 * @page: the page
1686 *
1687 * This function protects unlocked LRU pages from being moved to
1688 * another cgroup and stabilizes their page->mem_cgroup binding.
1689 */
1690void lock_page_memcg(struct page *page)
1691{
1692 struct mem_cgroup *memcg;
1693 unsigned long flags;
1694
1695 /*
1696 * The RCU lock is held throughout the transaction. The fast
1697 * path can get away without acquiring the memcg->move_lock
1698 * because page moving starts with an RCU grace period.
1699 */
1700 rcu_read_lock();
1701
1702 if (mem_cgroup_disabled())
1703 return;
1704again:
1705 memcg = page->mem_cgroup;
1706 if (unlikely(!memcg))
1707 return;
1708
1709 if (atomic_read(&memcg->moving_account) <= 0)
1710 return;
1711
1712 spin_lock_irqsave(&memcg->move_lock, flags);
1713 if (memcg != page->mem_cgroup) {
1714 spin_unlock_irqrestore(&memcg->move_lock, flags);
1715 goto again;
1716 }
1717
1718 /*
1719 * When charge migration first begins, we can have locked and
1720 * unlocked page stat updates happening concurrently. Track
1721 * the task who has the lock for unlock_page_memcg().
1722 */
1723 memcg->move_lock_task = current;
1724 memcg->move_lock_flags = flags;
1725
1726 return;
1727}
1728EXPORT_SYMBOL(lock_page_memcg);
1729
1730/**
1731 * unlock_page_memcg - unlock a page->mem_cgroup binding
1732 * @page: the page
1733 */
1734void unlock_page_memcg(struct page *page)
1735{
1736 struct mem_cgroup *memcg = page->mem_cgroup;
1737
1738 if (memcg && memcg->move_lock_task == current) {
1739 unsigned long flags = memcg->move_lock_flags;
1740
1741 memcg->move_lock_task = NULL;
1742 memcg->move_lock_flags = 0;
1743
1744 spin_unlock_irqrestore(&memcg->move_lock, flags);
1745 }
1746
1747 rcu_read_unlock();
1748}
1749EXPORT_SYMBOL(unlock_page_memcg);
1750
1751/*
1752 * size of first charge trial. "32" comes from vmscan.c's magic value.
1753 * TODO: maybe necessary to use big numbers in big irons.
1754 */
1755#define CHARGE_BATCH 32U
1756struct memcg_stock_pcp {
1757 struct mem_cgroup *cached; /* this never be root cgroup */
1758 unsigned int nr_pages;
1759 struct work_struct work;
1760 unsigned long flags;
1761#define FLUSHING_CACHED_CHARGE 0
1762};
1763static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
1764static DEFINE_MUTEX(percpu_charge_mutex);
1765
1766/**
1767 * consume_stock: Try to consume stocked charge on this cpu.
1768 * @memcg: memcg to consume from.
1769 * @nr_pages: how many pages to charge.
1770 *
1771 * The charges will only happen if @memcg matches the current cpu's memcg
1772 * stock, and at least @nr_pages are available in that stock. Failure to
1773 * service an allocation will refill the stock.
1774 *
1775 * returns true if successful, false otherwise.
1776 */
1777static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
1778{
1779 struct memcg_stock_pcp *stock;
1780 bool ret = false;
1781
1782 if (nr_pages > CHARGE_BATCH)
1783 return ret;
1784
1785 stock = &get_cpu_var(memcg_stock);
1786 if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
1787 stock->nr_pages -= nr_pages;
1788 ret = true;
1789 }
1790 put_cpu_var(memcg_stock);
1791 return ret;
1792}
1793
1794/*
1795 * Returns stocks cached in percpu and reset cached information.
1796 */
1797static void drain_stock(struct memcg_stock_pcp *stock)
1798{
1799 struct mem_cgroup *old = stock->cached;
1800
1801 if (stock->nr_pages) {
1802 page_counter_uncharge(&old->memory, stock->nr_pages);
1803 if (do_memsw_account())
1804 page_counter_uncharge(&old->memsw, stock->nr_pages);
1805 css_put_many(&old->css, stock->nr_pages);
1806 stock->nr_pages = 0;
1807 }
1808 stock->cached = NULL;
1809}
1810
1811/*
1812 * This must be called under preempt disabled or must be called by
1813 * a thread which is pinned to local cpu.
1814 */
1815static void drain_local_stock(struct work_struct *dummy)
1816{
1817 struct memcg_stock_pcp *stock = this_cpu_ptr(&memcg_stock);
1818 drain_stock(stock);
1819 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
1820}
1821
1822/*
1823 * Cache charges(val) to local per_cpu area.
1824 * This will be consumed by consume_stock() function, later.
1825 */
1826static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
1827{
1828 struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock);
1829
1830 if (stock->cached != memcg) { /* reset if necessary */
1831 drain_stock(stock);
1832 stock->cached = memcg;
1833 }
1834 stock->nr_pages += nr_pages;
1835 put_cpu_var(memcg_stock);
1836}
1837
1838/*
1839 * Drains all per-CPU charge caches for given root_memcg resp. subtree
1840 * of the hierarchy under it.
1841 */
1842static void drain_all_stock(struct mem_cgroup *root_memcg)
1843{
1844 int cpu, curcpu;
1845
1846 /* If someone's already draining, avoid adding running more workers. */
1847 if (!mutex_trylock(&percpu_charge_mutex))
1848 return;
1849 /* Notify other cpus that system-wide "drain" is running */
1850 get_online_cpus();
1851 curcpu = get_cpu();
1852 for_each_online_cpu(cpu) {
1853 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
1854 struct mem_cgroup *memcg;
1855
1856 memcg = stock->cached;
1857 if (!memcg || !stock->nr_pages)
1858 continue;
1859 if (!mem_cgroup_is_descendant(memcg, root_memcg))
1860 continue;
1861 if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
1862 if (cpu == curcpu)
1863 drain_local_stock(&stock->work);
1864 else
1865 schedule_work_on(cpu, &stock->work);
1866 }
1867 }
1868 put_cpu();
1869 put_online_cpus();
1870 mutex_unlock(&percpu_charge_mutex);
1871}
1872
1873static int memcg_cpu_hotplug_callback(struct notifier_block *nb,
1874 unsigned long action,
1875 void *hcpu)
1876{
1877 int cpu = (unsigned long)hcpu;
1878 struct memcg_stock_pcp *stock;
1879
1880 if (action == CPU_ONLINE)
1881 return NOTIFY_OK;
1882
1883 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
1884 return NOTIFY_OK;
1885
1886 stock = &per_cpu(memcg_stock, cpu);
1887 drain_stock(stock);
1888 return NOTIFY_OK;
1889}
1890
1891static void reclaim_high(struct mem_cgroup *memcg,
1892 unsigned int nr_pages,
1893 gfp_t gfp_mask)
1894{
1895 do {
1896 if (page_counter_read(&memcg->memory) <= memcg->high)
1897 continue;
1898 mem_cgroup_events(memcg, MEMCG_HIGH, 1);
1899 try_to_free_mem_cgroup_pages(memcg, nr_pages, gfp_mask, true);
1900 } while ((memcg = parent_mem_cgroup(memcg)));
1901}
1902
1903static void high_work_func(struct work_struct *work)
1904{
1905 struct mem_cgroup *memcg;
1906
1907 memcg = container_of(work, struct mem_cgroup, high_work);
1908 reclaim_high(memcg, CHARGE_BATCH, GFP_KERNEL);
1909}
1910
1911/*
1912 * Scheduled by try_charge() to be executed from the userland return path
1913 * and reclaims memory over the high limit.
1914 */
1915void mem_cgroup_handle_over_high(void)
1916{
1917 unsigned int nr_pages = current->memcg_nr_pages_over_high;
1918 struct mem_cgroup *memcg;
1919
1920 if (likely(!nr_pages))
1921 return;
1922
1923 memcg = get_mem_cgroup_from_mm(current->mm);
1924 reclaim_high(memcg, nr_pages, GFP_KERNEL);
1925 css_put(&memcg->css);
1926 current->memcg_nr_pages_over_high = 0;
1927}
1928
1929static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
1930 unsigned int nr_pages)
1931{
1932 unsigned int batch = max(CHARGE_BATCH, nr_pages);
1933 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
1934 struct mem_cgroup *mem_over_limit;
1935 struct page_counter *counter;
1936 unsigned long nr_reclaimed;
1937 bool may_swap = true;
1938 bool drained = false;
1939
1940 if (mem_cgroup_is_root(memcg))
1941 return 0;
1942retry:
1943 if (consume_stock(memcg, nr_pages))
1944 return 0;
1945
1946 if (!do_memsw_account() ||
1947 page_counter_try_charge(&memcg->memsw, batch, &counter)) {
1948 if (page_counter_try_charge(&memcg->memory, batch, &counter))
1949 goto done_restock;
1950 if (do_memsw_account())
1951 page_counter_uncharge(&memcg->memsw, batch);
1952 mem_over_limit = mem_cgroup_from_counter(counter, memory);
1953 } else {
1954 mem_over_limit = mem_cgroup_from_counter(counter, memsw);
1955 may_swap = false;
1956 }
1957
1958 if (batch > nr_pages) {
1959 batch = nr_pages;
1960 goto retry;
1961 }
1962
1963 /*
1964 * Unlike in global OOM situations, memcg is not in a physical
1965 * memory shortage. Allow dying and OOM-killed tasks to
1966 * bypass the last charges so that they can exit quickly and
1967 * free their memory.
1968 */
1969 if (unlikely(test_thread_flag(TIF_MEMDIE) ||
1970 fatal_signal_pending(current) ||
1971 current->flags & PF_EXITING))
1972 goto force;
1973
1974 if (unlikely(task_in_memcg_oom(current)))
1975 goto nomem;
1976
1977 if (!gfpflags_allow_blocking(gfp_mask))
1978 goto nomem;
1979
1980 mem_cgroup_events(mem_over_limit, MEMCG_MAX, 1);
1981
1982 nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
1983 gfp_mask, may_swap);
1984
1985 if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
1986 goto retry;
1987
1988 if (!drained) {
1989 drain_all_stock(mem_over_limit);
1990 drained = true;
1991 goto retry;
1992 }
1993
1994 if (gfp_mask & __GFP_NORETRY)
1995 goto nomem;
1996 /*
1997 * Even though the limit is exceeded at this point, reclaim
1998 * may have been able to free some pages. Retry the charge
1999 * before killing the task.
2000 *
2001 * Only for regular pages, though: huge pages are rather
2002 * unlikely to succeed so close to the limit, and we fall back
2003 * to regular pages anyway in case of failure.
2004 */
2005 if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))
2006 goto retry;
2007 /*
2008 * At task move, charge accounts can be doubly counted. So, it's
2009 * better to wait until the end of task_move if something is going on.
2010 */
2011 if (mem_cgroup_wait_acct_move(mem_over_limit))
2012 goto retry;
2013
2014 if (nr_retries--)
2015 goto retry;
2016
2017 if (gfp_mask & __GFP_NOFAIL)
2018 goto force;
2019
2020 if (fatal_signal_pending(current))
2021 goto force;
2022
2023 mem_cgroup_events(mem_over_limit, MEMCG_OOM, 1);
2024
2025 mem_cgroup_oom(mem_over_limit, gfp_mask,
2026 get_order(nr_pages * PAGE_SIZE));
2027nomem:
2028 if (!(gfp_mask & __GFP_NOFAIL))
2029 return -ENOMEM;
2030force:
2031 /*
2032 * The allocation either can't fail or will lead to more memory
2033 * being freed very soon. Allow memory usage go over the limit
2034 * temporarily by force charging it.
2035 */
2036 page_counter_charge(&memcg->memory, nr_pages);
2037 if (do_memsw_account())
2038 page_counter_charge(&memcg->memsw, nr_pages);
2039 css_get_many(&memcg->css, nr_pages);
2040
2041 return 0;
2042
2043done_restock:
2044 css_get_many(&memcg->css, batch);
2045 if (batch > nr_pages)
2046 refill_stock(memcg, batch - nr_pages);
2047
2048 /*
2049 * If the hierarchy is above the normal consumption range, schedule
2050 * reclaim on returning to userland. We can perform reclaim here
2051 * if __GFP_RECLAIM but let's always punt for simplicity and so that
2052 * GFP_KERNEL can consistently be used during reclaim. @memcg is
2053 * not recorded as it most likely matches current's and won't
2054 * change in the meantime. As high limit is checked again before
2055 * reclaim, the cost of mismatch is negligible.
2056 */
2057 do {
2058 if (page_counter_read(&memcg->memory) > memcg->high) {
2059 /* Don't bother a random interrupted task */
2060 if (in_interrupt()) {
2061 schedule_work(&memcg->high_work);
2062 break;
2063 }
2064 current->memcg_nr_pages_over_high += batch;
2065 set_notify_resume(current);
2066 break;
2067 }
2068 } while ((memcg = parent_mem_cgroup(memcg)));
2069
2070 return 0;
2071}
2072
2073static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
2074{
2075 if (mem_cgroup_is_root(memcg))
2076 return;
2077
2078 page_counter_uncharge(&memcg->memory, nr_pages);
2079 if (do_memsw_account())
2080 page_counter_uncharge(&memcg->memsw, nr_pages);
2081
2082 css_put_many(&memcg->css, nr_pages);
2083}
2084
2085static void lock_page_lru(struct page *page, int *isolated)
2086{
2087 struct zone *zone = page_zone(page);
2088
2089 spin_lock_irq(&zone->lru_lock);
2090 if (PageLRU(page)) {
2091 struct lruvec *lruvec;
2092
2093 lruvec = mem_cgroup_page_lruvec(page, zone);
2094 ClearPageLRU(page);
2095 del_page_from_lru_list(page, lruvec, page_lru(page));
2096 *isolated = 1;
2097 } else
2098 *isolated = 0;
2099}
2100
2101static void unlock_page_lru(struct page *page, int isolated)
2102{
2103 struct zone *zone = page_zone(page);
2104
2105 if (isolated) {
2106 struct lruvec *lruvec;
2107
2108 lruvec = mem_cgroup_page_lruvec(page, zone);
2109 VM_BUG_ON_PAGE(PageLRU(page), page);
2110 SetPageLRU(page);
2111 add_page_to_lru_list(page, lruvec, page_lru(page));
2112 }
2113 spin_unlock_irq(&zone->lru_lock);
2114}
2115
2116static void commit_charge(struct page *page, struct mem_cgroup *memcg,
2117 bool lrucare)
2118{
2119 int isolated;
2120
2121 VM_BUG_ON_PAGE(page->mem_cgroup, page);
2122
2123 /*
2124 * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page
2125 * may already be on some other mem_cgroup's LRU. Take care of it.
2126 */
2127 if (lrucare)
2128 lock_page_lru(page, &isolated);
2129
2130 /*
2131 * Nobody should be changing or seriously looking at
2132 * page->mem_cgroup at this point:
2133 *
2134 * - the page is uncharged
2135 *
2136 * - the page is off-LRU
2137 *
2138 * - an anonymous fault has exclusive page access, except for
2139 * a locked page table
2140 *
2141 * - a page cache insertion, a swapin fault, or a migration
2142 * have the page locked
2143 */
2144 page->mem_cgroup = memcg;
2145
2146 if (lrucare)
2147 unlock_page_lru(page, isolated);
2148}
2149
2150#ifndef CONFIG_SLOB
2151static int memcg_alloc_cache_id(void)
2152{
2153 int id, size;
2154 int err;
2155
2156 id = ida_simple_get(&memcg_cache_ida,
2157 0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL);
2158 if (id < 0)
2159 return id;
2160
2161 if (id < memcg_nr_cache_ids)
2162 return id;
2163
2164 /*
2165 * There's no space for the new id in memcg_caches arrays,
2166 * so we have to grow them.
2167 */
2168 down_write(&memcg_cache_ids_sem);
2169
2170 size = 2 * (id + 1);
2171 if (size < MEMCG_CACHES_MIN_SIZE)
2172 size = MEMCG_CACHES_MIN_SIZE;
2173 else if (size > MEMCG_CACHES_MAX_SIZE)
2174 size = MEMCG_CACHES_MAX_SIZE;
2175
2176 err = memcg_update_all_caches(size);
2177 if (!err)
2178 err = memcg_update_all_list_lrus(size);
2179 if (!err)
2180 memcg_nr_cache_ids = size;
2181
2182 up_write(&memcg_cache_ids_sem);
2183
2184 if (err) {
2185 ida_simple_remove(&memcg_cache_ida, id);
2186 return err;
2187 }
2188 return id;
2189}
2190
2191static void memcg_free_cache_id(int id)
2192{
2193 ida_simple_remove(&memcg_cache_ida, id);
2194}
2195
2196struct memcg_kmem_cache_create_work {
2197 struct mem_cgroup *memcg;
2198 struct kmem_cache *cachep;
2199 struct work_struct work;
2200};
2201
2202static void memcg_kmem_cache_create_func(struct work_struct *w)
2203{
2204 struct memcg_kmem_cache_create_work *cw =
2205 container_of(w, struct memcg_kmem_cache_create_work, work);
2206 struct mem_cgroup *memcg = cw->memcg;
2207 struct kmem_cache *cachep = cw->cachep;
2208
2209 memcg_create_kmem_cache(memcg, cachep);
2210
2211 css_put(&memcg->css);
2212 kfree(cw);
2213}
2214
2215/*
2216 * Enqueue the creation of a per-memcg kmem_cache.
2217 */
2218static void __memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
2219 struct kmem_cache *cachep)
2220{
2221 struct memcg_kmem_cache_create_work *cw;
2222
2223 cw = kmalloc(sizeof(*cw), GFP_NOWAIT);
2224 if (!cw)
2225 return;
2226
2227 css_get(&memcg->css);
2228
2229 cw->memcg = memcg;
2230 cw->cachep = cachep;
2231 INIT_WORK(&cw->work, memcg_kmem_cache_create_func);
2232
2233 schedule_work(&cw->work);
2234}
2235
2236static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
2237 struct kmem_cache *cachep)
2238{
2239 /*
2240 * We need to stop accounting when we kmalloc, because if the
2241 * corresponding kmalloc cache is not yet created, the first allocation
2242 * in __memcg_schedule_kmem_cache_create will recurse.
2243 *
2244 * However, it is better to enclose the whole function. Depending on
2245 * the debugging options enabled, INIT_WORK(), for instance, can
2246 * trigger an allocation. This too, will make us recurse. Because at
2247 * this point we can't allow ourselves back into memcg_kmem_get_cache,
2248 * the safest choice is to do it like this, wrapping the whole function.
2249 */
2250 current->memcg_kmem_skip_account = 1;
2251 __memcg_schedule_kmem_cache_create(memcg, cachep);
2252 current->memcg_kmem_skip_account = 0;
2253}
2254
2255/*
2256 * Return the kmem_cache we're supposed to use for a slab allocation.
2257 * We try to use the current memcg's version of the cache.
2258 *
2259 * If the cache does not exist yet, if we are the first user of it,
2260 * we either create it immediately, if possible, or create it asynchronously
2261 * in a workqueue.
2262 * In the latter case, we will let the current allocation go through with
2263 * the original cache.
2264 *
2265 * Can't be called in interrupt context or from kernel threads.
2266 * This function needs to be called with rcu_read_lock() held.
2267 */
2268struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
2269{
2270 struct mem_cgroup *memcg;
2271 struct kmem_cache *memcg_cachep;
2272 int kmemcg_id;
2273
2274 VM_BUG_ON(!is_root_cache(cachep));
2275
2276 if (cachep->flags & SLAB_ACCOUNT)
2277 gfp |= __GFP_ACCOUNT;
2278
2279 if (!(gfp & __GFP_ACCOUNT))
2280 return cachep;
2281
2282 if (current->memcg_kmem_skip_account)
2283 return cachep;
2284
2285 memcg = get_mem_cgroup_from_mm(current->mm);
2286 kmemcg_id = READ_ONCE(memcg->kmemcg_id);
2287 if (kmemcg_id < 0)
2288 goto out;
2289
2290 memcg_cachep = cache_from_memcg_idx(cachep, kmemcg_id);
2291 if (likely(memcg_cachep))
2292 return memcg_cachep;
2293
2294 /*
2295 * If we are in a safe context (can wait, and not in interrupt
2296 * context), we could be be predictable and return right away.
2297 * This would guarantee that the allocation being performed
2298 * already belongs in the new cache.
2299 *
2300 * However, there are some clashes that can arrive from locking.
2301 * For instance, because we acquire the slab_mutex while doing
2302 * memcg_create_kmem_cache, this means no further allocation
2303 * could happen with the slab_mutex held. So it's better to
2304 * defer everything.
2305 */
2306 memcg_schedule_kmem_cache_create(memcg, cachep);
2307out:
2308 css_put(&memcg->css);
2309 return cachep;
2310}
2311
2312void __memcg_kmem_put_cache(struct kmem_cache *cachep)
2313{
2314 if (!is_root_cache(cachep))
2315 css_put(&cachep->memcg_params.memcg->css);
2316}
2317
2318int __memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
2319 struct mem_cgroup *memcg)
2320{
2321 unsigned int nr_pages = 1 << order;
2322 struct page_counter *counter;
2323 int ret;
2324
2325 ret = try_charge(memcg, gfp, nr_pages);
2326 if (ret)
2327 return ret;
2328
2329 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) &&
2330 !page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) {
2331 cancel_charge(memcg, nr_pages);
2332 return -ENOMEM;
2333 }
2334
2335 page->mem_cgroup = memcg;
2336
2337 return 0;
2338}
2339
2340int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
2341{
2342 struct mem_cgroup *memcg;
2343 int ret = 0;
2344
2345 memcg = get_mem_cgroup_from_mm(current->mm);
2346 if (!mem_cgroup_is_root(memcg))
2347 ret = __memcg_kmem_charge_memcg(page, gfp, order, memcg);
2348 css_put(&memcg->css);
2349 return ret;
2350}
2351
2352void __memcg_kmem_uncharge(struct page *page, int order)
2353{
2354 struct mem_cgroup *memcg = page->mem_cgroup;
2355 unsigned int nr_pages = 1 << order;
2356
2357 if (!memcg)
2358 return;
2359
2360 VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page);
2361
2362 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
2363 page_counter_uncharge(&memcg->kmem, nr_pages);
2364
2365 page_counter_uncharge(&memcg->memory, nr_pages);
2366 if (do_memsw_account())
2367 page_counter_uncharge(&memcg->memsw, nr_pages);
2368
2369 page->mem_cgroup = NULL;
2370 css_put_many(&memcg->css, nr_pages);
2371}
2372#endif /* !CONFIG_SLOB */
2373
2374#ifdef CONFIG_TRANSPARENT_HUGEPAGE
2375
2376/*
2377 * Because tail pages are not marked as "used", set it. We're under
2378 * zone->lru_lock and migration entries setup in all page mappings.
2379 */
2380void mem_cgroup_split_huge_fixup(struct page *head)
2381{
2382 int i;
2383
2384 if (mem_cgroup_disabled())
2385 return;
2386
2387 for (i = 1; i < HPAGE_PMD_NR; i++)
2388 head[i].mem_cgroup = head->mem_cgroup;
2389
2390 __this_cpu_sub(head->mem_cgroup->stat->count[MEM_CGROUP_STAT_RSS_HUGE],
2391 HPAGE_PMD_NR);
2392}
2393#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
2394
2395#ifdef CONFIG_MEMCG_SWAP
2396static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg,
2397 bool charge)
2398{
2399 int val = (charge) ? 1 : -1;
2400 this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_SWAP], val);
2401}
2402
2403/**
2404 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
2405 * @entry: swap entry to be moved
2406 * @from: mem_cgroup which the entry is moved from
2407 * @to: mem_cgroup which the entry is moved to
2408 *
2409 * It succeeds only when the swap_cgroup's record for this entry is the same
2410 * as the mem_cgroup's id of @from.
2411 *
2412 * Returns 0 on success, -EINVAL on failure.
2413 *
2414 * The caller must have charged to @to, IOW, called page_counter_charge() about
2415 * both res and memsw, and called css_get().
2416 */
2417static int mem_cgroup_move_swap_account(swp_entry_t entry,
2418 struct mem_cgroup *from, struct mem_cgroup *to)
2419{
2420 unsigned short old_id, new_id;
2421
2422 old_id = mem_cgroup_id(from);
2423 new_id = mem_cgroup_id(to);
2424
2425 if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
2426 mem_cgroup_swap_statistics(from, false);
2427 mem_cgroup_swap_statistics(to, true);
2428 return 0;
2429 }
2430 return -EINVAL;
2431}
2432#else
2433static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
2434 struct mem_cgroup *from, struct mem_cgroup *to)
2435{
2436 return -EINVAL;
2437}
2438#endif
2439
2440static DEFINE_MUTEX(memcg_limit_mutex);
2441
2442static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
2443 unsigned long limit)
2444{
2445 unsigned long curusage;
2446 unsigned long oldusage;
2447 bool enlarge = false;
2448 int retry_count;
2449 int ret;
2450
2451 /*
2452 * For keeping hierarchical_reclaim simple, how long we should retry
2453 * is depends on callers. We set our retry-count to be function
2454 * of # of children which we should visit in this loop.
2455 */
2456 retry_count = MEM_CGROUP_RECLAIM_RETRIES *
2457 mem_cgroup_count_children(memcg);
2458
2459 oldusage = page_counter_read(&memcg->memory);
2460
2461 do {
2462 if (signal_pending(current)) {
2463 ret = -EINTR;
2464 break;
2465 }
2466
2467 mutex_lock(&memcg_limit_mutex);
2468 if (limit > memcg->memsw.limit) {
2469 mutex_unlock(&memcg_limit_mutex);
2470 ret = -EINVAL;
2471 break;
2472 }
2473 if (limit > memcg->memory.limit)
2474 enlarge = true;
2475 ret = page_counter_limit(&memcg->memory, limit);
2476 mutex_unlock(&memcg_limit_mutex);
2477
2478 if (!ret)
2479 break;
2480
2481 try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, true);
2482
2483 curusage = page_counter_read(&memcg->memory);
2484 /* Usage is reduced ? */
2485 if (curusage >= oldusage)
2486 retry_count--;
2487 else
2488 oldusage = curusage;
2489 } while (retry_count);
2490
2491 if (!ret && enlarge)
2492 memcg_oom_recover(memcg);
2493
2494 return ret;
2495}
2496
2497static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
2498 unsigned long limit)
2499{
2500 unsigned long curusage;
2501 unsigned long oldusage;
2502 bool enlarge = false;
2503 int retry_count;
2504 int ret;
2505
2506 /* see mem_cgroup_resize_res_limit */
2507 retry_count = MEM_CGROUP_RECLAIM_RETRIES *
2508 mem_cgroup_count_children(memcg);
2509
2510 oldusage = page_counter_read(&memcg->memsw);
2511
2512 do {
2513 if (signal_pending(current)) {
2514 ret = -EINTR;
2515 break;
2516 }
2517
2518 mutex_lock(&memcg_limit_mutex);
2519 if (limit < memcg->memory.limit) {
2520 mutex_unlock(&memcg_limit_mutex);
2521 ret = -EINVAL;
2522 break;
2523 }
2524 if (limit > memcg->memsw.limit)
2525 enlarge = true;
2526 ret = page_counter_limit(&memcg->memsw, limit);
2527 mutex_unlock(&memcg_limit_mutex);
2528
2529 if (!ret)
2530 break;
2531
2532 try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, false);
2533
2534 curusage = page_counter_read(&memcg->memsw);
2535 /* Usage is reduced ? */
2536 if (curusage >= oldusage)
2537 retry_count--;
2538 else
2539 oldusage = curusage;
2540 } while (retry_count);
2541
2542 if (!ret && enlarge)
2543 memcg_oom_recover(memcg);
2544
2545 return ret;
2546}
2547
2548unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
2549 gfp_t gfp_mask,
2550 unsigned long *total_scanned)
2551{
2552 unsigned long nr_reclaimed = 0;
2553 struct mem_cgroup_per_zone *mz, *next_mz = NULL;
2554 unsigned long reclaimed;
2555 int loop = 0;
2556 struct mem_cgroup_tree_per_zone *mctz;
2557 unsigned long excess;
2558 unsigned long nr_scanned;
2559
2560 if (order > 0)
2561 return 0;
2562
2563 mctz = soft_limit_tree_node_zone(zone_to_nid(zone), zone_idx(zone));
2564 /*
2565 * This loop can run a while, specially if mem_cgroup's continuously
2566 * keep exceeding their soft limit and putting the system under
2567 * pressure
2568 */
2569 do {
2570 if (next_mz)
2571 mz = next_mz;
2572 else
2573 mz = mem_cgroup_largest_soft_limit_node(mctz);
2574 if (!mz)
2575 break;
2576
2577 nr_scanned = 0;
2578 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, zone,
2579 gfp_mask, &nr_scanned);
2580 nr_reclaimed += reclaimed;
2581 *total_scanned += nr_scanned;
2582 spin_lock_irq(&mctz->lock);
2583 __mem_cgroup_remove_exceeded(mz, mctz);
2584
2585 /*
2586 * If we failed to reclaim anything from this memory cgroup
2587 * it is time to move on to the next cgroup
2588 */
2589 next_mz = NULL;
2590 if (!reclaimed)
2591 next_mz = __mem_cgroup_largest_soft_limit_node(mctz);
2592
2593 excess = soft_limit_excess(mz->memcg);
2594 /*
2595 * One school of thought says that we should not add
2596 * back the node to the tree if reclaim returns 0.
2597 * But our reclaim could return 0, simply because due
2598 * to priority we are exposing a smaller subset of
2599 * memory to reclaim from. Consider this as a longer
2600 * term TODO.
2601 */
2602 /* If excess == 0, no tree ops */
2603 __mem_cgroup_insert_exceeded(mz, mctz, excess);
2604 spin_unlock_irq(&mctz->lock);
2605 css_put(&mz->memcg->css);
2606 loop++;
2607 /*
2608 * Could not reclaim anything and there are no more
2609 * mem cgroups to try or we seem to be looping without
2610 * reclaiming anything.
2611 */
2612 if (!nr_reclaimed &&
2613 (next_mz == NULL ||
2614 loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
2615 break;
2616 } while (!nr_reclaimed);
2617 if (next_mz)
2618 css_put(&next_mz->memcg->css);
2619 return nr_reclaimed;
2620}
2621
2622/*
2623 * Test whether @memcg has children, dead or alive. Note that this
2624 * function doesn't care whether @memcg has use_hierarchy enabled and
2625 * returns %true if there are child csses according to the cgroup
2626 * hierarchy. Testing use_hierarchy is the caller's responsiblity.
2627 */
2628static inline bool memcg_has_children(struct mem_cgroup *memcg)
2629{
2630 bool ret;
2631
2632 rcu_read_lock();
2633 ret = css_next_child(NULL, &memcg->css);
2634 rcu_read_unlock();
2635 return ret;
2636}
2637
2638/*
2639 * Reclaims as many pages from the given memcg as possible and moves
2640 * the rest to the parent.
2641 *
2642 * Caller is responsible for holding css reference for memcg.
2643 */
2644static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
2645{
2646 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
2647
2648 /* we call try-to-free pages for make this cgroup empty */
2649 lru_add_drain_all();
2650 /* try to free all pages in this cgroup */
2651 while (nr_retries && page_counter_read(&memcg->memory)) {
2652 int progress;
2653
2654 if (signal_pending(current))
2655 return -EINTR;
2656
2657 progress = try_to_free_mem_cgroup_pages(memcg, 1,
2658 GFP_KERNEL, true);
2659 if (!progress) {
2660 nr_retries--;
2661 /* maybe some writeback is necessary */
2662 congestion_wait(BLK_RW_ASYNC, HZ/10);
2663 }
2664
2665 }
2666
2667 return 0;
2668}
2669
2670static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of,
2671 char *buf, size_t nbytes,
2672 loff_t off)
2673{
2674 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
2675
2676 if (mem_cgroup_is_root(memcg))
2677 return -EINVAL;
2678 return mem_cgroup_force_empty(memcg) ?: nbytes;
2679}
2680
2681static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css,
2682 struct cftype *cft)
2683{
2684 return mem_cgroup_from_css(css)->use_hierarchy;
2685}
2686
2687static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
2688 struct cftype *cft, u64 val)
2689{
2690 int retval = 0;
2691 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
2692 struct mem_cgroup *parent_memcg = mem_cgroup_from_css(memcg->css.parent);
2693
2694 if (memcg->use_hierarchy == val)
2695 return 0;
2696
2697 /*
2698 * If parent's use_hierarchy is set, we can't make any modifications
2699 * in the child subtrees. If it is unset, then the change can
2700 * occur, provided the current cgroup has no children.
2701 *
2702 * For the root cgroup, parent_mem is NULL, we allow value to be
2703 * set if there are no children.
2704 */
2705 if ((!parent_memcg || !parent_memcg->use_hierarchy) &&
2706 (val == 1 || val == 0)) {
2707 if (!memcg_has_children(memcg))
2708 memcg->use_hierarchy = val;
2709 else
2710 retval = -EBUSY;
2711 } else
2712 retval = -EINVAL;
2713
2714 return retval;
2715}
2716
2717static void tree_stat(struct mem_cgroup *memcg, unsigned long *stat)
2718{
2719 struct mem_cgroup *iter;
2720 int i;
2721
2722 memset(stat, 0, sizeof(*stat) * MEMCG_NR_STAT);
2723
2724 for_each_mem_cgroup_tree(iter, memcg) {
2725 for (i = 0; i < MEMCG_NR_STAT; i++)
2726 stat[i] += mem_cgroup_read_stat(iter, i);
2727 }
2728}
2729
2730static void tree_events(struct mem_cgroup *memcg, unsigned long *events)
2731{
2732 struct mem_cgroup *iter;
2733 int i;
2734
2735 memset(events, 0, sizeof(*events) * MEMCG_NR_EVENTS);
2736
2737 for_each_mem_cgroup_tree(iter, memcg) {
2738 for (i = 0; i < MEMCG_NR_EVENTS; i++)
2739 events[i] += mem_cgroup_read_events(iter, i);
2740 }
2741}
2742
2743static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
2744{
2745 unsigned long val = 0;
2746
2747 if (mem_cgroup_is_root(memcg)) {
2748 struct mem_cgroup *iter;
2749
2750 for_each_mem_cgroup_tree(iter, memcg) {
2751 val += mem_cgroup_read_stat(iter,
2752 MEM_CGROUP_STAT_CACHE);
2753 val += mem_cgroup_read_stat(iter,
2754 MEM_CGROUP_STAT_RSS);
2755 if (swap)
2756 val += mem_cgroup_read_stat(iter,
2757 MEM_CGROUP_STAT_SWAP);
2758 }
2759 } else {
2760 if (!swap)
2761 val = page_counter_read(&memcg->memory);
2762 else
2763 val = page_counter_read(&memcg->memsw);
2764 }
2765 return val;
2766}
2767
2768enum {
2769 RES_USAGE,
2770 RES_LIMIT,
2771 RES_MAX_USAGE,
2772 RES_FAILCNT,
2773 RES_SOFT_LIMIT,
2774};
2775
2776static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
2777 struct cftype *cft)
2778{
2779 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
2780 struct page_counter *counter;
2781
2782 switch (MEMFILE_TYPE(cft->private)) {
2783 case _MEM:
2784 counter = &memcg->memory;
2785 break;
2786 case _MEMSWAP:
2787 counter = &memcg->memsw;
2788 break;
2789 case _KMEM:
2790 counter = &memcg->kmem;
2791 break;
2792 case _TCP:
2793 counter = &memcg->tcpmem;
2794 break;
2795 default:
2796 BUG();
2797 }
2798
2799 switch (MEMFILE_ATTR(cft->private)) {
2800 case RES_USAGE:
2801 if (counter == &memcg->memory)
2802 return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE;
2803 if (counter == &memcg->memsw)
2804 return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE;
2805 return (u64)page_counter_read(counter) * PAGE_SIZE;
2806 case RES_LIMIT:
2807 return (u64)counter->limit * PAGE_SIZE;
2808 case RES_MAX_USAGE:
2809 return (u64)counter->watermark * PAGE_SIZE;
2810 case RES_FAILCNT:
2811 return counter->failcnt;
2812 case RES_SOFT_LIMIT:
2813 return (u64)memcg->soft_limit * PAGE_SIZE;
2814 default:
2815 BUG();
2816 }
2817}
2818
2819#ifndef CONFIG_SLOB
2820static int memcg_online_kmem(struct mem_cgroup *memcg)
2821{
2822 int memcg_id;
2823
2824 if (cgroup_memory_nokmem)
2825 return 0;
2826
2827 BUG_ON(memcg->kmemcg_id >= 0);
2828 BUG_ON(memcg->kmem_state);
2829
2830 memcg_id = memcg_alloc_cache_id();
2831 if (memcg_id < 0)
2832 return memcg_id;
2833
2834 static_branch_inc(&memcg_kmem_enabled_key);
2835 /*
2836 * A memory cgroup is considered kmem-online as soon as it gets
2837 * kmemcg_id. Setting the id after enabling static branching will
2838 * guarantee no one starts accounting before all call sites are
2839 * patched.
2840 */
2841 memcg->kmemcg_id = memcg_id;
2842 memcg->kmem_state = KMEM_ONLINE;
2843
2844 return 0;
2845}
2846
2847static void memcg_offline_kmem(struct mem_cgroup *memcg)
2848{
2849 struct cgroup_subsys_state *css;
2850 struct mem_cgroup *parent, *child;
2851 int kmemcg_id;
2852
2853 if (memcg->kmem_state != KMEM_ONLINE)
2854 return;
2855 /*
2856 * Clear the online state before clearing memcg_caches array
2857 * entries. The slab_mutex in memcg_deactivate_kmem_caches()
2858 * guarantees that no cache will be created for this cgroup
2859 * after we are done (see memcg_create_kmem_cache()).
2860 */
2861 memcg->kmem_state = KMEM_ALLOCATED;
2862
2863 memcg_deactivate_kmem_caches(memcg);
2864
2865 kmemcg_id = memcg->kmemcg_id;
2866 BUG_ON(kmemcg_id < 0);
2867
2868 parent = parent_mem_cgroup(memcg);
2869 if (!parent)
2870 parent = root_mem_cgroup;
2871
2872 /*
2873 * Change kmemcg_id of this cgroup and all its descendants to the
2874 * parent's id, and then move all entries from this cgroup's list_lrus
2875 * to ones of the parent. After we have finished, all list_lrus
2876 * corresponding to this cgroup are guaranteed to remain empty. The
2877 * ordering is imposed by list_lru_node->lock taken by
2878 * memcg_drain_all_list_lrus().
2879 */
2880 css_for_each_descendant_pre(css, &memcg->css) {
2881 child = mem_cgroup_from_css(css);
2882 BUG_ON(child->kmemcg_id != kmemcg_id);
2883 child->kmemcg_id = parent->kmemcg_id;
2884 if (!memcg->use_hierarchy)
2885 break;
2886 }
2887 memcg_drain_all_list_lrus(kmemcg_id, parent->kmemcg_id);
2888
2889 memcg_free_cache_id(kmemcg_id);
2890}
2891
2892static void memcg_free_kmem(struct mem_cgroup *memcg)
2893{
2894 /* css_alloc() failed, offlining didn't happen */
2895 if (unlikely(memcg->kmem_state == KMEM_ONLINE))
2896 memcg_offline_kmem(memcg);
2897
2898 if (memcg->kmem_state == KMEM_ALLOCATED) {
2899 memcg_destroy_kmem_caches(memcg);
2900 static_branch_dec(&memcg_kmem_enabled_key);
2901 WARN_ON(page_counter_read(&memcg->kmem));
2902 }
2903}
2904#else
2905static int memcg_online_kmem(struct mem_cgroup *memcg)
2906{
2907 return 0;
2908}
2909static void memcg_offline_kmem(struct mem_cgroup *memcg)
2910{
2911}
2912static void memcg_free_kmem(struct mem_cgroup *memcg)
2913{
2914}
2915#endif /* !CONFIG_SLOB */
2916
2917static int memcg_update_kmem_limit(struct mem_cgroup *memcg,
2918 unsigned long limit)
2919{
2920 int ret;
2921
2922 mutex_lock(&memcg_limit_mutex);
2923 ret = page_counter_limit(&memcg->kmem, limit);
2924 mutex_unlock(&memcg_limit_mutex);
2925 return ret;
2926}
2927
2928static int memcg_update_tcp_limit(struct mem_cgroup *memcg, unsigned long limit)
2929{
2930 int ret;
2931
2932 mutex_lock(&memcg_limit_mutex);
2933
2934 ret = page_counter_limit(&memcg->tcpmem, limit);
2935 if (ret)
2936 goto out;
2937
2938 if (!memcg->tcpmem_active) {
2939 /*
2940 * The active flag needs to be written after the static_key
2941 * update. This is what guarantees that the socket activation
2942 * function is the last one to run. See sock_update_memcg() for
2943 * details, and note that we don't mark any socket as belonging
2944 * to this memcg until that flag is up.
2945 *
2946 * We need to do this, because static_keys will span multiple
2947 * sites, but we can't control their order. If we mark a socket
2948 * as accounted, but the accounting functions are not patched in
2949 * yet, we'll lose accounting.
2950 *
2951 * We never race with the readers in sock_update_memcg(),
2952 * because when this value change, the code to process it is not
2953 * patched in yet.
2954 */
2955 static_branch_inc(&memcg_sockets_enabled_key);
2956 memcg->tcpmem_active = true;
2957 }
2958out:
2959 mutex_unlock(&memcg_limit_mutex);
2960 return ret;
2961}
2962
2963/*
2964 * The user of this function is...
2965 * RES_LIMIT.
2966 */
2967static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
2968 char *buf, size_t nbytes, loff_t off)
2969{
2970 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
2971 unsigned long nr_pages;
2972 int ret;
2973
2974 buf = strstrip(buf);
2975 ret = page_counter_memparse(buf, "-1", &nr_pages);
2976 if (ret)
2977 return ret;
2978
2979 switch (MEMFILE_ATTR(of_cft(of)->private)) {
2980 case RES_LIMIT:
2981 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
2982 ret = -EINVAL;
2983 break;
2984 }
2985 switch (MEMFILE_TYPE(of_cft(of)->private)) {
2986 case _MEM:
2987 ret = mem_cgroup_resize_limit(memcg, nr_pages);
2988 break;
2989 case _MEMSWAP:
2990 ret = mem_cgroup_resize_memsw_limit(memcg, nr_pages);
2991 break;
2992 case _KMEM:
2993 ret = memcg_update_kmem_limit(memcg, nr_pages);
2994 break;
2995 case _TCP:
2996 ret = memcg_update_tcp_limit(memcg, nr_pages);
2997 break;
2998 }
2999 break;
3000 case RES_SOFT_LIMIT:
3001 memcg->soft_limit = nr_pages;
3002 ret = 0;
3003 break;
3004 }
3005 return ret ?: nbytes;
3006}
3007
3008static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf,
3009 size_t nbytes, loff_t off)
3010{
3011 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3012 struct page_counter *counter;
3013
3014 switch (MEMFILE_TYPE(of_cft(of)->private)) {
3015 case _MEM:
3016 counter = &memcg->memory;
3017 break;
3018 case _MEMSWAP:
3019 counter = &memcg->memsw;
3020 break;
3021 case _KMEM:
3022 counter = &memcg->kmem;
3023 break;
3024 case _TCP:
3025 counter = &memcg->tcpmem;
3026 break;
3027 default:
3028 BUG();
3029 }
3030
3031 switch (MEMFILE_ATTR(of_cft(of)->private)) {
3032 case RES_MAX_USAGE:
3033 page_counter_reset_watermark(counter);
3034 break;
3035 case RES_FAILCNT:
3036 counter->failcnt = 0;
3037 break;
3038 default:
3039 BUG();
3040 }
3041
3042 return nbytes;
3043}
3044
3045static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css,
3046 struct cftype *cft)
3047{
3048 return mem_cgroup_from_css(css)->move_charge_at_immigrate;
3049}
3050
3051#ifdef CONFIG_MMU
3052static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3053 struct cftype *cft, u64 val)
3054{
3055 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3056
3057 if (val & ~MOVE_MASK)
3058 return -EINVAL;
3059
3060 /*
3061 * No kind of locking is needed in here, because ->can_attach() will
3062 * check this value once in the beginning of the process, and then carry
3063 * on with stale data. This means that changes to this value will only
3064 * affect task migrations starting after the change.
3065 */
3066 memcg->move_charge_at_immigrate = val;
3067 return 0;
3068}
3069#else
3070static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3071 struct cftype *cft, u64 val)
3072{
3073 return -ENOSYS;
3074}
3075#endif
3076
3077#ifdef CONFIG_NUMA
3078static int memcg_numa_stat_show(struct seq_file *m, void *v)
3079{
3080 struct numa_stat {
3081 const char *name;
3082 unsigned int lru_mask;
3083 };
3084
3085 static const struct numa_stat stats[] = {
3086 { "total", LRU_ALL },
3087 { "file", LRU_ALL_FILE },
3088 { "anon", LRU_ALL_ANON },
3089 { "unevictable", BIT(LRU_UNEVICTABLE) },
3090 };
3091 const struct numa_stat *stat;
3092 int nid;
3093 unsigned long nr;
3094 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
3095
3096 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3097 nr = mem_cgroup_nr_lru_pages(memcg, stat->lru_mask);
3098 seq_printf(m, "%s=%lu", stat->name, nr);
3099 for_each_node_state(nid, N_MEMORY) {
3100 nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
3101 stat->lru_mask);
3102 seq_printf(m, " N%d=%lu", nid, nr);
3103 }
3104 seq_putc(m, '\n');
3105 }
3106
3107 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3108 struct mem_cgroup *iter;
3109
3110 nr = 0;
3111 for_each_mem_cgroup_tree(iter, memcg)
3112 nr += mem_cgroup_nr_lru_pages(iter, stat->lru_mask);
3113 seq_printf(m, "hierarchical_%s=%lu", stat->name, nr);
3114 for_each_node_state(nid, N_MEMORY) {
3115 nr = 0;
3116 for_each_mem_cgroup_tree(iter, memcg)
3117 nr += mem_cgroup_node_nr_lru_pages(
3118 iter, nid, stat->lru_mask);
3119 seq_printf(m, " N%d=%lu", nid, nr);
3120 }
3121 seq_putc(m, '\n');
3122 }
3123
3124 return 0;
3125}
3126#endif /* CONFIG_NUMA */
3127
3128static int memcg_stat_show(struct seq_file *m, void *v)
3129{
3130 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
3131 unsigned long memory, memsw;
3132 struct mem_cgroup *mi;
3133 unsigned int i;
3134
3135 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_stat_names) !=
3136 MEM_CGROUP_STAT_NSTATS);
3137 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_events_names) !=
3138 MEM_CGROUP_EVENTS_NSTATS);
3139 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS);
3140
3141 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
3142 if (i == MEM_CGROUP_STAT_SWAP && !do_memsw_account())
3143 continue;
3144 seq_printf(m, "%s %lu\n", mem_cgroup_stat_names[i],
3145 mem_cgroup_read_stat(memcg, i) * PAGE_SIZE);
3146 }
3147
3148 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++)
3149 seq_printf(m, "%s %lu\n", mem_cgroup_events_names[i],
3150 mem_cgroup_read_events(memcg, i));
3151
3152 for (i = 0; i < NR_LRU_LISTS; i++)
3153 seq_printf(m, "%s %lu\n", mem_cgroup_lru_names[i],
3154 mem_cgroup_nr_lru_pages(memcg, BIT(i)) * PAGE_SIZE);
3155
3156 /* Hierarchical information */
3157 memory = memsw = PAGE_COUNTER_MAX;
3158 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) {
3159 memory = min(memory, mi->memory.limit);
3160 memsw = min(memsw, mi->memsw.limit);
3161 }
3162 seq_printf(m, "hierarchical_memory_limit %llu\n",
3163 (u64)memory * PAGE_SIZE);
3164 if (do_memsw_account())
3165 seq_printf(m, "hierarchical_memsw_limit %llu\n",
3166 (u64)memsw * PAGE_SIZE);
3167
3168 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
3169 unsigned long long val = 0;
3170
3171 if (i == MEM_CGROUP_STAT_SWAP && !do_memsw_account())
3172 continue;
3173 for_each_mem_cgroup_tree(mi, memcg)
3174 val += mem_cgroup_read_stat(mi, i) * PAGE_SIZE;
3175 seq_printf(m, "total_%s %llu\n", mem_cgroup_stat_names[i], val);
3176 }
3177
3178 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
3179 unsigned long long val = 0;
3180
3181 for_each_mem_cgroup_tree(mi, memcg)
3182 val += mem_cgroup_read_events(mi, i);
3183 seq_printf(m, "total_%s %llu\n",
3184 mem_cgroup_events_names[i], val);
3185 }
3186
3187 for (i = 0; i < NR_LRU_LISTS; i++) {
3188 unsigned long long val = 0;
3189
3190 for_each_mem_cgroup_tree(mi, memcg)
3191 val += mem_cgroup_nr_lru_pages(mi, BIT(i)) * PAGE_SIZE;
3192 seq_printf(m, "total_%s %llu\n", mem_cgroup_lru_names[i], val);
3193 }
3194
3195#ifdef CONFIG_DEBUG_VM
3196 {
3197 int nid, zid;
3198 struct mem_cgroup_per_zone *mz;
3199 struct zone_reclaim_stat *rstat;
3200 unsigned long recent_rotated[2] = {0, 0};
3201 unsigned long recent_scanned[2] = {0, 0};
3202
3203 for_each_online_node(nid)
3204 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
3205 mz = &memcg->nodeinfo[nid]->zoneinfo[zid];
3206 rstat = &mz->lruvec.reclaim_stat;
3207
3208 recent_rotated[0] += rstat->recent_rotated[0];
3209 recent_rotated[1] += rstat->recent_rotated[1];
3210 recent_scanned[0] += rstat->recent_scanned[0];
3211 recent_scanned[1] += rstat->recent_scanned[1];
3212 }
3213 seq_printf(m, "recent_rotated_anon %lu\n", recent_rotated[0]);
3214 seq_printf(m, "recent_rotated_file %lu\n", recent_rotated[1]);
3215 seq_printf(m, "recent_scanned_anon %lu\n", recent_scanned[0]);
3216 seq_printf(m, "recent_scanned_file %lu\n", recent_scanned[1]);
3217 }
3218#endif
3219
3220 return 0;
3221}
3222
3223static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css,
3224 struct cftype *cft)
3225{
3226 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3227
3228 return mem_cgroup_swappiness(memcg);
3229}
3230
3231static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
3232 struct cftype *cft, u64 val)
3233{
3234 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3235
3236 if (val > 100)
3237 return -EINVAL;
3238
3239 if (css->parent)
3240 memcg->swappiness = val;
3241 else
3242 vm_swappiness = val;
3243
3244 return 0;
3245}
3246
3247static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
3248{
3249 struct mem_cgroup_threshold_ary *t;
3250 unsigned long usage;
3251 int i;
3252
3253 rcu_read_lock();
3254 if (!swap)
3255 t = rcu_dereference(memcg->thresholds.primary);
3256 else
3257 t = rcu_dereference(memcg->memsw_thresholds.primary);
3258
3259 if (!t)
3260 goto unlock;
3261
3262 usage = mem_cgroup_usage(memcg, swap);
3263
3264 /*
3265 * current_threshold points to threshold just below or equal to usage.
3266 * If it's not true, a threshold was crossed after last
3267 * call of __mem_cgroup_threshold().
3268 */
3269 i = t->current_threshold;
3270
3271 /*
3272 * Iterate backward over array of thresholds starting from
3273 * current_threshold and check if a threshold is crossed.
3274 * If none of thresholds below usage is crossed, we read
3275 * only one element of the array here.
3276 */
3277 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
3278 eventfd_signal(t->entries[i].eventfd, 1);
3279
3280 /* i = current_threshold + 1 */
3281 i++;
3282
3283 /*
3284 * Iterate forward over array of thresholds starting from
3285 * current_threshold+1 and check if a threshold is crossed.
3286 * If none of thresholds above usage is crossed, we read
3287 * only one element of the array here.
3288 */
3289 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
3290 eventfd_signal(t->entries[i].eventfd, 1);
3291
3292 /* Update current_threshold */
3293 t->current_threshold = i - 1;
3294unlock:
3295 rcu_read_unlock();
3296}
3297
3298static void mem_cgroup_threshold(struct mem_cgroup *memcg)
3299{
3300 while (memcg) {
3301 __mem_cgroup_threshold(memcg, false);
3302 if (do_memsw_account())
3303 __mem_cgroup_threshold(memcg, true);
3304
3305 memcg = parent_mem_cgroup(memcg);
3306 }
3307}
3308
3309static int compare_thresholds(const void *a, const void *b)
3310{
3311 const struct mem_cgroup_threshold *_a = a;
3312 const struct mem_cgroup_threshold *_b = b;
3313
3314 if (_a->threshold > _b->threshold)
3315 return 1;
3316
3317 if (_a->threshold < _b->threshold)
3318 return -1;
3319
3320 return 0;
3321}
3322
3323static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
3324{
3325 struct mem_cgroup_eventfd_list *ev;
3326
3327 spin_lock(&memcg_oom_lock);
3328
3329 list_for_each_entry(ev, &memcg->oom_notify, list)
3330 eventfd_signal(ev->eventfd, 1);
3331
3332 spin_unlock(&memcg_oom_lock);
3333 return 0;
3334}
3335
3336static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
3337{
3338 struct mem_cgroup *iter;
3339
3340 for_each_mem_cgroup_tree(iter, memcg)
3341 mem_cgroup_oom_notify_cb(iter);
3342}
3343
3344static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
3345 struct eventfd_ctx *eventfd, const char *args, enum res_type type)
3346{
3347 struct mem_cgroup_thresholds *thresholds;
3348 struct mem_cgroup_threshold_ary *new;
3349 unsigned long threshold;
3350 unsigned long usage;
3351 int i, size, ret;
3352
3353 ret = page_counter_memparse(args, "-1", &threshold);
3354 if (ret)
3355 return ret;
3356
3357 mutex_lock(&memcg->thresholds_lock);
3358
3359 if (type == _MEM) {
3360 thresholds = &memcg->thresholds;
3361 usage = mem_cgroup_usage(memcg, false);
3362 } else if (type == _MEMSWAP) {
3363 thresholds = &memcg->memsw_thresholds;
3364 usage = mem_cgroup_usage(memcg, true);
3365 } else
3366 BUG();
3367
3368 /* Check if a threshold crossed before adding a new one */
3369 if (thresholds->primary)
3370 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
3371
3372 size = thresholds->primary ? thresholds->primary->size + 1 : 1;
3373
3374 /* Allocate memory for new array of thresholds */
3375 new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold),
3376 GFP_KERNEL);
3377 if (!new) {
3378 ret = -ENOMEM;
3379 goto unlock;
3380 }
3381 new->size = size;
3382
3383 /* Copy thresholds (if any) to new array */
3384 if (thresholds->primary) {
3385 memcpy(new->entries, thresholds->primary->entries, (size - 1) *
3386 sizeof(struct mem_cgroup_threshold));
3387 }
3388
3389 /* Add new threshold */
3390 new->entries[size - 1].eventfd = eventfd;
3391 new->entries[size - 1].threshold = threshold;
3392
3393 /* Sort thresholds. Registering of new threshold isn't time-critical */
3394 sort(new->entries, size, sizeof(struct mem_cgroup_threshold),
3395 compare_thresholds, NULL);
3396
3397 /* Find current threshold */
3398 new->current_threshold = -1;
3399 for (i = 0; i < size; i++) {
3400 if (new->entries[i].threshold <= usage) {
3401 /*
3402 * new->current_threshold will not be used until
3403 * rcu_assign_pointer(), so it's safe to increment
3404 * it here.
3405 */
3406 ++new->current_threshold;
3407 } else
3408 break;
3409 }
3410
3411 /* Free old spare buffer and save old primary buffer as spare */
3412 kfree(thresholds->spare);
3413 thresholds->spare = thresholds->primary;
3414
3415 rcu_assign_pointer(thresholds->primary, new);
3416
3417 /* To be sure that nobody uses thresholds */
3418 synchronize_rcu();
3419
3420unlock:
3421 mutex_unlock(&memcg->thresholds_lock);
3422
3423 return ret;
3424}
3425
3426static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
3427 struct eventfd_ctx *eventfd, const char *args)
3428{
3429 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM);
3430}
3431
3432static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg,
3433 struct eventfd_ctx *eventfd, const char *args)
3434{
3435 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP);
3436}
3437
3438static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
3439 struct eventfd_ctx *eventfd, enum res_type type)
3440{
3441 struct mem_cgroup_thresholds *thresholds;
3442 struct mem_cgroup_threshold_ary *new;
3443 unsigned long usage;
3444 int i, j, size;
3445
3446 mutex_lock(&memcg->thresholds_lock);
3447
3448 if (type == _MEM) {
3449 thresholds = &memcg->thresholds;
3450 usage = mem_cgroup_usage(memcg, false);
3451 } else if (type == _MEMSWAP) {
3452 thresholds = &memcg->memsw_thresholds;
3453 usage = mem_cgroup_usage(memcg, true);
3454 } else
3455 BUG();
3456
3457 if (!thresholds->primary)
3458 goto unlock;
3459
3460 /* Check if a threshold crossed before removing */
3461 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
3462
3463 /* Calculate new number of threshold */
3464 size = 0;
3465 for (i = 0; i < thresholds->primary->size; i++) {
3466 if (thresholds->primary->entries[i].eventfd != eventfd)
3467 size++;
3468 }
3469
3470 new = thresholds->spare;
3471
3472 /* Set thresholds array to NULL if we don't have thresholds */
3473 if (!size) {
3474 kfree(new);
3475 new = NULL;
3476 goto swap_buffers;
3477 }
3478
3479 new->size = size;
3480
3481 /* Copy thresholds and find current threshold */
3482 new->current_threshold = -1;
3483 for (i = 0, j = 0; i < thresholds->primary->size; i++) {
3484 if (thresholds->primary->entries[i].eventfd == eventfd)
3485 continue;
3486
3487 new->entries[j] = thresholds->primary->entries[i];
3488 if (new->entries[j].threshold <= usage) {
3489 /*
3490 * new->current_threshold will not be used
3491 * until rcu_assign_pointer(), so it's safe to increment
3492 * it here.
3493 */
3494 ++new->current_threshold;
3495 }
3496 j++;
3497 }
3498
3499swap_buffers:
3500 /* Swap primary and spare array */
3501 thresholds->spare = thresholds->primary;
3502
3503 rcu_assign_pointer(thresholds->primary, new);
3504
3505 /* To be sure that nobody uses thresholds */
3506 synchronize_rcu();
3507
3508 /* If all events are unregistered, free the spare array */
3509 if (!new) {
3510 kfree(thresholds->spare);
3511 thresholds->spare = NULL;
3512 }
3513unlock:
3514 mutex_unlock(&memcg->thresholds_lock);
3515}
3516
3517static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
3518 struct eventfd_ctx *eventfd)
3519{
3520 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM);
3521}
3522
3523static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
3524 struct eventfd_ctx *eventfd)
3525{
3526 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP);
3527}
3528
3529static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg,
3530 struct eventfd_ctx *eventfd, const char *args)
3531{
3532 struct mem_cgroup_eventfd_list *event;
3533
3534 event = kmalloc(sizeof(*event), GFP_KERNEL);
3535 if (!event)
3536 return -ENOMEM;
3537
3538 spin_lock(&memcg_oom_lock);
3539
3540 event->eventfd = eventfd;
3541 list_add(&event->list, &memcg->oom_notify);
3542
3543 /* already in OOM ? */
3544 if (memcg->under_oom)
3545 eventfd_signal(eventfd, 1);
3546 spin_unlock(&memcg_oom_lock);
3547
3548 return 0;
3549}
3550
3551static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg,
3552 struct eventfd_ctx *eventfd)
3553{
3554 struct mem_cgroup_eventfd_list *ev, *tmp;
3555
3556 spin_lock(&memcg_oom_lock);
3557
3558 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
3559 if (ev->eventfd == eventfd) {
3560 list_del(&ev->list);
3561 kfree(ev);
3562 }
3563 }
3564
3565 spin_unlock(&memcg_oom_lock);
3566}
3567
3568static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v)
3569{
3570 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf));
3571
3572 seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable);
3573 seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom);
3574 return 0;
3575}
3576
3577static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
3578 struct cftype *cft, u64 val)
3579{
3580 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3581
3582 /* cannot set to root cgroup and only 0 and 1 are allowed */
3583 if (!css->parent || !((val == 0) || (val == 1)))
3584 return -EINVAL;
3585
3586 memcg->oom_kill_disable = val;
3587 if (!val)
3588 memcg_oom_recover(memcg);
3589
3590 return 0;
3591}
3592
3593#ifdef CONFIG_CGROUP_WRITEBACK
3594
3595struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg)
3596{
3597 return &memcg->cgwb_list;
3598}
3599
3600static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
3601{
3602 return wb_domain_init(&memcg->cgwb_domain, gfp);
3603}
3604
3605static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
3606{
3607 wb_domain_exit(&memcg->cgwb_domain);
3608}
3609
3610static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
3611{
3612 wb_domain_size_changed(&memcg->cgwb_domain);
3613}
3614
3615struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
3616{
3617 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3618
3619 if (!memcg->css.parent)
3620 return NULL;
3621
3622 return &memcg->cgwb_domain;
3623}
3624
3625/**
3626 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
3627 * @wb: bdi_writeback in question
3628 * @pfilepages: out parameter for number of file pages
3629 * @pheadroom: out parameter for number of allocatable pages according to memcg
3630 * @pdirty: out parameter for number of dirty pages
3631 * @pwriteback: out parameter for number of pages under writeback
3632 *
3633 * Determine the numbers of file, headroom, dirty, and writeback pages in
3634 * @wb's memcg. File, dirty and writeback are self-explanatory. Headroom
3635 * is a bit more involved.
3636 *
3637 * A memcg's headroom is "min(max, high) - used". In the hierarchy, the
3638 * headroom is calculated as the lowest headroom of itself and the
3639 * ancestors. Note that this doesn't consider the actual amount of
3640 * available memory in the system. The caller should further cap
3641 * *@pheadroom accordingly.
3642 */
3643void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
3644 unsigned long *pheadroom, unsigned long *pdirty,
3645 unsigned long *pwriteback)
3646{
3647 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3648 struct mem_cgroup *parent;
3649
3650 *pdirty = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_DIRTY);
3651
3652 /* this should eventually include NR_UNSTABLE_NFS */
3653 *pwriteback = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_WRITEBACK);
3654 *pfilepages = mem_cgroup_nr_lru_pages(memcg, (1 << LRU_INACTIVE_FILE) |
3655 (1 << LRU_ACTIVE_FILE));
3656 *pheadroom = PAGE_COUNTER_MAX;
3657
3658 while ((parent = parent_mem_cgroup(memcg))) {
3659 unsigned long ceiling = min(memcg->memory.limit, memcg->high);
3660 unsigned long used = page_counter_read(&memcg->memory);
3661
3662 *pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
3663 memcg = parent;
3664 }
3665}
3666
3667#else /* CONFIG_CGROUP_WRITEBACK */
3668
3669static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
3670{
3671 return 0;
3672}
3673
3674static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
3675{
3676}
3677
3678static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
3679{
3680}
3681
3682#endif /* CONFIG_CGROUP_WRITEBACK */
3683
3684/*
3685 * DO NOT USE IN NEW FILES.
3686 *
3687 * "cgroup.event_control" implementation.
3688 *
3689 * This is way over-engineered. It tries to support fully configurable
3690 * events for each user. Such level of flexibility is completely
3691 * unnecessary especially in the light of the planned unified hierarchy.
3692 *
3693 * Please deprecate this and replace with something simpler if at all
3694 * possible.
3695 */
3696
3697/*
3698 * Unregister event and free resources.
3699 *
3700 * Gets called from workqueue.
3701 */
3702static void memcg_event_remove(struct work_struct *work)
3703{
3704 struct mem_cgroup_event *event =
3705 container_of(work, struct mem_cgroup_event, remove);
3706 struct mem_cgroup *memcg = event->memcg;
3707
3708 remove_wait_queue(event->wqh, &event->wait);
3709
3710 event->unregister_event(memcg, event->eventfd);
3711
3712 /* Notify userspace the event is going away. */
3713 eventfd_signal(event->eventfd, 1);
3714
3715 eventfd_ctx_put(event->eventfd);
3716 kfree(event);
3717 css_put(&memcg->css);
3718}
3719
3720/*
3721 * Gets called on POLLHUP on eventfd when user closes it.
3722 *
3723 * Called with wqh->lock held and interrupts disabled.
3724 */
3725static int memcg_event_wake(wait_queue_t *wait, unsigned mode,
3726 int sync, void *key)
3727{
3728 struct mem_cgroup_event *event =
3729 container_of(wait, struct mem_cgroup_event, wait);
3730 struct mem_cgroup *memcg = event->memcg;
3731 unsigned long flags = (unsigned long)key;
3732
3733 if (flags & POLLHUP) {
3734 /*
3735 * If the event has been detached at cgroup removal, we
3736 * can simply return knowing the other side will cleanup
3737 * for us.
3738 *
3739 * We can't race against event freeing since the other
3740 * side will require wqh->lock via remove_wait_queue(),
3741 * which we hold.
3742 */
3743 spin_lock(&memcg->event_list_lock);
3744 if (!list_empty(&event->list)) {
3745 list_del_init(&event->list);
3746 /*
3747 * We are in atomic context, but cgroup_event_remove()
3748 * may sleep, so we have to call it in workqueue.
3749 */
3750 schedule_work(&event->remove);
3751 }
3752 spin_unlock(&memcg->event_list_lock);
3753 }
3754
3755 return 0;
3756}
3757
3758static void memcg_event_ptable_queue_proc(struct file *file,
3759 wait_queue_head_t *wqh, poll_table *pt)
3760{
3761 struct mem_cgroup_event *event =
3762 container_of(pt, struct mem_cgroup_event, pt);
3763
3764 event->wqh = wqh;
3765 add_wait_queue(wqh, &event->wait);
3766}
3767
3768/*
3769 * DO NOT USE IN NEW FILES.
3770 *
3771 * Parse input and register new cgroup event handler.
3772 *
3773 * Input must be in format '<event_fd> <control_fd> <args>'.
3774 * Interpretation of args is defined by control file implementation.
3775 */
3776static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
3777 char *buf, size_t nbytes, loff_t off)
3778{
3779 struct cgroup_subsys_state *css = of_css(of);
3780 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3781 struct mem_cgroup_event *event;
3782 struct cgroup_subsys_state *cfile_css;
3783 unsigned int efd, cfd;
3784 struct fd efile;
3785 struct fd cfile;
3786 const char *name;
3787 char *endp;
3788 int ret;
3789
3790 buf = strstrip(buf);
3791
3792 efd = simple_strtoul(buf, &endp, 10);
3793 if (*endp != ' ')
3794 return -EINVAL;
3795 buf = endp + 1;
3796
3797 cfd = simple_strtoul(buf, &endp, 10);
3798 if ((*endp != ' ') && (*endp != '\0'))
3799 return -EINVAL;
3800 buf = endp + 1;
3801
3802 event = kzalloc(sizeof(*event), GFP_KERNEL);
3803 if (!event)
3804 return -ENOMEM;
3805
3806 event->memcg = memcg;
3807 INIT_LIST_HEAD(&event->list);
3808 init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc);
3809 init_waitqueue_func_entry(&event->wait, memcg_event_wake);
3810 INIT_WORK(&event->remove, memcg_event_remove);
3811
3812 efile = fdget(efd);
3813 if (!efile.file) {
3814 ret = -EBADF;
3815 goto out_kfree;
3816 }
3817
3818 event->eventfd = eventfd_ctx_fileget(efile.file);
3819 if (IS_ERR(event->eventfd)) {
3820 ret = PTR_ERR(event->eventfd);
3821 goto out_put_efile;
3822 }
3823
3824 cfile = fdget(cfd);
3825 if (!cfile.file) {
3826 ret = -EBADF;
3827 goto out_put_eventfd;
3828 }
3829
3830 /* the process need read permission on control file */
3831 /* AV: shouldn't we check that it's been opened for read instead? */
3832 ret = inode_permission(file_inode(cfile.file), MAY_READ);
3833 if (ret < 0)
3834 goto out_put_cfile;
3835
3836 /*
3837 * Determine the event callbacks and set them in @event. This used
3838 * to be done via struct cftype but cgroup core no longer knows
3839 * about these events. The following is crude but the whole thing
3840 * is for compatibility anyway.
3841 *
3842 * DO NOT ADD NEW FILES.
3843 */
3844 name = cfile.file->f_path.dentry->d_name.name;
3845
3846 if (!strcmp(name, "memory.usage_in_bytes")) {
3847 event->register_event = mem_cgroup_usage_register_event;
3848 event->unregister_event = mem_cgroup_usage_unregister_event;
3849 } else if (!strcmp(name, "memory.oom_control")) {
3850 event->register_event = mem_cgroup_oom_register_event;
3851 event->unregister_event = mem_cgroup_oom_unregister_event;
3852 } else if (!strcmp(name, "memory.pressure_level")) {
3853 event->register_event = vmpressure_register_event;
3854 event->unregister_event = vmpressure_unregister_event;
3855 } else if (!strcmp(name, "memory.memsw.usage_in_bytes")) {
3856 event->register_event = memsw_cgroup_usage_register_event;
3857 event->unregister_event = memsw_cgroup_usage_unregister_event;
3858 } else {
3859 ret = -EINVAL;
3860 goto out_put_cfile;
3861 }
3862
3863 /*
3864 * Verify @cfile should belong to @css. Also, remaining events are
3865 * automatically removed on cgroup destruction but the removal is
3866 * asynchronous, so take an extra ref on @css.
3867 */
3868 cfile_css = css_tryget_online_from_dir(cfile.file->f_path.dentry->d_parent,
3869 &memory_cgrp_subsys);
3870 ret = -EINVAL;
3871 if (IS_ERR(cfile_css))
3872 goto out_put_cfile;
3873 if (cfile_css != css) {
3874 css_put(cfile_css);
3875 goto out_put_cfile;
3876 }
3877
3878 ret = event->register_event(memcg, event->eventfd, buf);
3879 if (ret)
3880 goto out_put_css;
3881
3882 efile.file->f_op->poll(efile.file, &event->pt);
3883
3884 spin_lock(&memcg->event_list_lock);
3885 list_add(&event->list, &memcg->event_list);
3886 spin_unlock(&memcg->event_list_lock);
3887
3888 fdput(cfile);
3889 fdput(efile);
3890
3891 return nbytes;
3892
3893out_put_css:
3894 css_put(css);
3895out_put_cfile:
3896 fdput(cfile);
3897out_put_eventfd:
3898 eventfd_ctx_put(event->eventfd);
3899out_put_efile:
3900 fdput(efile);
3901out_kfree:
3902 kfree(event);
3903
3904 return ret;
3905}
3906
3907static struct cftype mem_cgroup_legacy_files[] = {
3908 {
3909 .name = "usage_in_bytes",
3910 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
3911 .read_u64 = mem_cgroup_read_u64,
3912 },
3913 {
3914 .name = "max_usage_in_bytes",
3915 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
3916 .write = mem_cgroup_reset,
3917 .read_u64 = mem_cgroup_read_u64,
3918 },
3919 {
3920 .name = "limit_in_bytes",
3921 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
3922 .write = mem_cgroup_write,
3923 .read_u64 = mem_cgroup_read_u64,
3924 },
3925 {
3926 .name = "soft_limit_in_bytes",
3927 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
3928 .write = mem_cgroup_write,
3929 .read_u64 = mem_cgroup_read_u64,
3930 },
3931 {
3932 .name = "failcnt",
3933 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
3934 .write = mem_cgroup_reset,
3935 .read_u64 = mem_cgroup_read_u64,
3936 },
3937 {
3938 .name = "stat",
3939 .seq_show = memcg_stat_show,
3940 },
3941 {
3942 .name = "force_empty",
3943 .write = mem_cgroup_force_empty_write,
3944 },
3945 {
3946 .name = "use_hierarchy",
3947 .write_u64 = mem_cgroup_hierarchy_write,
3948 .read_u64 = mem_cgroup_hierarchy_read,
3949 },
3950 {
3951 .name = "cgroup.event_control", /* XXX: for compat */
3952 .write = memcg_write_event_control,
3953 .flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE,
3954 },
3955 {
3956 .name = "swappiness",
3957 .read_u64 = mem_cgroup_swappiness_read,
3958 .write_u64 = mem_cgroup_swappiness_write,
3959 },
3960 {
3961 .name = "move_charge_at_immigrate",
3962 .read_u64 = mem_cgroup_move_charge_read,
3963 .write_u64 = mem_cgroup_move_charge_write,
3964 },
3965 {
3966 .name = "oom_control",
3967 .seq_show = mem_cgroup_oom_control_read,
3968 .write_u64 = mem_cgroup_oom_control_write,
3969 .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
3970 },
3971 {
3972 .name = "pressure_level",
3973 },
3974#ifdef CONFIG_NUMA
3975 {
3976 .name = "numa_stat",
3977 .seq_show = memcg_numa_stat_show,
3978 },
3979#endif
3980 {
3981 .name = "kmem.limit_in_bytes",
3982 .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
3983 .write = mem_cgroup_write,
3984 .read_u64 = mem_cgroup_read_u64,
3985 },
3986 {
3987 .name = "kmem.usage_in_bytes",
3988 .private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
3989 .read_u64 = mem_cgroup_read_u64,
3990 },
3991 {
3992 .name = "kmem.failcnt",
3993 .private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT),
3994 .write = mem_cgroup_reset,
3995 .read_u64 = mem_cgroup_read_u64,
3996 },
3997 {
3998 .name = "kmem.max_usage_in_bytes",
3999 .private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE),
4000 .write = mem_cgroup_reset,
4001 .read_u64 = mem_cgroup_read_u64,
4002 },
4003#ifdef CONFIG_SLABINFO
4004 {
4005 .name = "kmem.slabinfo",
4006 .seq_start = slab_start,
4007 .seq_next = slab_next,
4008 .seq_stop = slab_stop,
4009 .seq_show = memcg_slab_show,
4010 },
4011#endif
4012 {
4013 .name = "kmem.tcp.limit_in_bytes",
4014 .private = MEMFILE_PRIVATE(_TCP, RES_LIMIT),
4015 .write = mem_cgroup_write,
4016 .read_u64 = mem_cgroup_read_u64,
4017 },
4018 {
4019 .name = "kmem.tcp.usage_in_bytes",
4020 .private = MEMFILE_PRIVATE(_TCP, RES_USAGE),
4021 .read_u64 = mem_cgroup_read_u64,
4022 },
4023 {
4024 .name = "kmem.tcp.failcnt",
4025 .private = MEMFILE_PRIVATE(_TCP, RES_FAILCNT),
4026 .write = mem_cgroup_reset,
4027 .read_u64 = mem_cgroup_read_u64,
4028 },
4029 {
4030 .name = "kmem.tcp.max_usage_in_bytes",
4031 .private = MEMFILE_PRIVATE(_TCP, RES_MAX_USAGE),
4032 .write = mem_cgroup_reset,
4033 .read_u64 = mem_cgroup_read_u64,
4034 },
4035 { }, /* terminate */
4036};
4037
4038static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
4039{
4040 struct mem_cgroup_per_node *pn;
4041 struct mem_cgroup_per_zone *mz;
4042 int zone, tmp = node;
4043 /*
4044 * This routine is called against possible nodes.
4045 * But it's BUG to call kmalloc() against offline node.
4046 *
4047 * TODO: this routine can waste much memory for nodes which will
4048 * never be onlined. It's better to use memory hotplug callback
4049 * function.
4050 */
4051 if (!node_state(node, N_NORMAL_MEMORY))
4052 tmp = -1;
4053 pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
4054 if (!pn)
4055 return 1;
4056
4057 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
4058 mz = &pn->zoneinfo[zone];
4059 lruvec_init(&mz->lruvec);
4060 mz->usage_in_excess = 0;
4061 mz->on_tree = false;
4062 mz->memcg = memcg;
4063 }
4064 memcg->nodeinfo[node] = pn;
4065 return 0;
4066}
4067
4068static void free_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
4069{
4070 kfree(memcg->nodeinfo[node]);
4071}
4072
4073static void mem_cgroup_free(struct mem_cgroup *memcg)
4074{
4075 int node;
4076
4077 memcg_wb_domain_exit(memcg);
4078 for_each_node(node)
4079 free_mem_cgroup_per_zone_info(memcg, node);
4080 free_percpu(memcg->stat);
4081 kfree(memcg);
4082}
4083
4084static struct mem_cgroup *mem_cgroup_alloc(void)
4085{
4086 struct mem_cgroup *memcg;
4087 size_t size;
4088 int node;
4089
4090 size = sizeof(struct mem_cgroup);
4091 size += nr_node_ids * sizeof(struct mem_cgroup_per_node *);
4092
4093 memcg = kzalloc(size, GFP_KERNEL);
4094 if (!memcg)
4095 return NULL;
4096
4097 memcg->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
4098 if (!memcg->stat)
4099 goto fail;
4100
4101 for_each_node(node)
4102 if (alloc_mem_cgroup_per_zone_info(memcg, node))
4103 goto fail;
4104
4105 if (memcg_wb_domain_init(memcg, GFP_KERNEL))
4106 goto fail;
4107
4108 INIT_WORK(&memcg->high_work, high_work_func);
4109 memcg->last_scanned_node = MAX_NUMNODES;
4110 INIT_LIST_HEAD(&memcg->oom_notify);
4111 mutex_init(&memcg->thresholds_lock);
4112 spin_lock_init(&memcg->move_lock);
4113 vmpressure_init(&memcg->vmpressure);
4114 INIT_LIST_HEAD(&memcg->event_list);
4115 spin_lock_init(&memcg->event_list_lock);
4116 memcg->socket_pressure = jiffies;
4117#ifndef CONFIG_SLOB
4118 memcg->kmemcg_id = -1;
4119#endif
4120#ifdef CONFIG_CGROUP_WRITEBACK
4121 INIT_LIST_HEAD(&memcg->cgwb_list);
4122#endif
4123 return memcg;
4124fail:
4125 mem_cgroup_free(memcg);
4126 return NULL;
4127}
4128
4129static struct cgroup_subsys_state * __ref
4130mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
4131{
4132 struct mem_cgroup *parent = mem_cgroup_from_css(parent_css);
4133 struct mem_cgroup *memcg;
4134 long error = -ENOMEM;
4135
4136 memcg = mem_cgroup_alloc();
4137 if (!memcg)
4138 return ERR_PTR(error);
4139
4140 memcg->high = PAGE_COUNTER_MAX;
4141 memcg->soft_limit = PAGE_COUNTER_MAX;
4142 if (parent) {
4143 memcg->swappiness = mem_cgroup_swappiness(parent);
4144 memcg->oom_kill_disable = parent->oom_kill_disable;
4145 }
4146 if (parent && parent->use_hierarchy) {
4147 memcg->use_hierarchy = true;
4148 page_counter_init(&memcg->memory, &parent->memory);
4149 page_counter_init(&memcg->swap, &parent->swap);
4150 page_counter_init(&memcg->memsw, &parent->memsw);
4151 page_counter_init(&memcg->kmem, &parent->kmem);
4152 page_counter_init(&memcg->tcpmem, &parent->tcpmem);
4153 } else {
4154 page_counter_init(&memcg->memory, NULL);
4155 page_counter_init(&memcg->swap, NULL);
4156 page_counter_init(&memcg->memsw, NULL);
4157 page_counter_init(&memcg->kmem, NULL);
4158 page_counter_init(&memcg->tcpmem, NULL);
4159 /*
4160 * Deeper hierachy with use_hierarchy == false doesn't make
4161 * much sense so let cgroup subsystem know about this
4162 * unfortunate state in our controller.
4163 */
4164 if (parent != root_mem_cgroup)
4165 memory_cgrp_subsys.broken_hierarchy = true;
4166 }
4167
4168 /* The following stuff does not apply to the root */
4169 if (!parent) {
4170 root_mem_cgroup = memcg;
4171 return &memcg->css;
4172 }
4173
4174 error = memcg_online_kmem(memcg);
4175 if (error)
4176 goto fail;
4177
4178 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
4179 static_branch_inc(&memcg_sockets_enabled_key);
4180
4181 return &memcg->css;
4182fail:
4183 mem_cgroup_free(memcg);
4184 return NULL;
4185}
4186
4187static int
4188mem_cgroup_css_online(struct cgroup_subsys_state *css)
4189{
4190 if (css->id > MEM_CGROUP_ID_MAX)
4191 return -ENOSPC;
4192
4193 return 0;
4194}
4195
4196static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
4197{
4198 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4199 struct mem_cgroup_event *event, *tmp;
4200
4201 /*
4202 * Unregister events and notify userspace.
4203 * Notify userspace about cgroup removing only after rmdir of cgroup
4204 * directory to avoid race between userspace and kernelspace.
4205 */
4206 spin_lock(&memcg->event_list_lock);
4207 list_for_each_entry_safe(event, tmp, &memcg->event_list, list) {
4208 list_del_init(&event->list);
4209 schedule_work(&event->remove);
4210 }
4211 spin_unlock(&memcg->event_list_lock);
4212
4213 memcg_offline_kmem(memcg);
4214 wb_memcg_offline(memcg);
4215}
4216
4217static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
4218{
4219 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4220
4221 invalidate_reclaim_iterators(memcg);
4222}
4223
4224static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
4225{
4226 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4227
4228 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
4229 static_branch_dec(&memcg_sockets_enabled_key);
4230
4231 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active)
4232 static_branch_dec(&memcg_sockets_enabled_key);
4233
4234 vmpressure_cleanup(&memcg->vmpressure);
4235 cancel_work_sync(&memcg->high_work);
4236 mem_cgroup_remove_from_trees(memcg);
4237 memcg_free_kmem(memcg);
4238 mem_cgroup_free(memcg);
4239}
4240
4241/**
4242 * mem_cgroup_css_reset - reset the states of a mem_cgroup
4243 * @css: the target css
4244 *
4245 * Reset the states of the mem_cgroup associated with @css. This is
4246 * invoked when the userland requests disabling on the default hierarchy
4247 * but the memcg is pinned through dependency. The memcg should stop
4248 * applying policies and should revert to the vanilla state as it may be
4249 * made visible again.
4250 *
4251 * The current implementation only resets the essential configurations.
4252 * This needs to be expanded to cover all the visible parts.
4253 */
4254static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
4255{
4256 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4257
4258 page_counter_limit(&memcg->memory, PAGE_COUNTER_MAX);
4259 page_counter_limit(&memcg->swap, PAGE_COUNTER_MAX);
4260 page_counter_limit(&memcg->memsw, PAGE_COUNTER_MAX);
4261 page_counter_limit(&memcg->kmem, PAGE_COUNTER_MAX);
4262 page_counter_limit(&memcg->tcpmem, PAGE_COUNTER_MAX);
4263 memcg->low = 0;
4264 memcg->high = PAGE_COUNTER_MAX;
4265 memcg->soft_limit = PAGE_COUNTER_MAX;
4266 memcg_wb_domain_size_changed(memcg);
4267}
4268
4269#ifdef CONFIG_MMU
4270/* Handlers for move charge at task migration. */
4271static int mem_cgroup_do_precharge(unsigned long count)
4272{
4273 int ret;
4274
4275 /* Try a single bulk charge without reclaim first, kswapd may wake */
4276 ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count);
4277 if (!ret) {
4278 mc.precharge += count;
4279 return ret;
4280 }
4281
4282 /* Try charges one by one with reclaim */
4283 while (count--) {
4284 ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_NORETRY, 1);
4285 if (ret)
4286 return ret;
4287 mc.precharge++;
4288 cond_resched();
4289 }
4290 return 0;
4291}
4292
4293/**
4294 * get_mctgt_type - get target type of moving charge
4295 * @vma: the vma the pte to be checked belongs
4296 * @addr: the address corresponding to the pte to be checked
4297 * @ptent: the pte to be checked
4298 * @target: the pointer the target page or swap ent will be stored(can be NULL)
4299 *
4300 * Returns
4301 * 0(MC_TARGET_NONE): if the pte is not a target for move charge.
4302 * 1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
4303 * move charge. if @target is not NULL, the page is stored in target->page
4304 * with extra refcnt got(Callers should handle it).
4305 * 2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
4306 * target for charge migration. if @target is not NULL, the entry is stored
4307 * in target->ent.
4308 *
4309 * Called with pte lock held.
4310 */
4311union mc_target {
4312 struct page *page;
4313 swp_entry_t ent;
4314};
4315
4316enum mc_target_type {
4317 MC_TARGET_NONE = 0,
4318 MC_TARGET_PAGE,
4319 MC_TARGET_SWAP,
4320};
4321
4322static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
4323 unsigned long addr, pte_t ptent)
4324{
4325 struct page *page = vm_normal_page(vma, addr, ptent);
4326
4327 if (!page || !page_mapped(page))
4328 return NULL;
4329 if (PageAnon(page)) {
4330 if (!(mc.flags & MOVE_ANON))
4331 return NULL;
4332 } else {
4333 if (!(mc.flags & MOVE_FILE))
4334 return NULL;
4335 }
4336 if (!get_page_unless_zero(page))
4337 return NULL;
4338
4339 return page;
4340}
4341
4342#ifdef CONFIG_SWAP
4343static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
4344 unsigned long addr, pte_t ptent, swp_entry_t *entry)
4345{
4346 struct page *page = NULL;
4347 swp_entry_t ent = pte_to_swp_entry(ptent);
4348
4349 if (!(mc.flags & MOVE_ANON) || non_swap_entry(ent))
4350 return NULL;
4351 /*
4352 * Because lookup_swap_cache() updates some statistics counter,
4353 * we call find_get_page() with swapper_space directly.
4354 */
4355 page = find_get_page(swap_address_space(ent), ent.val);
4356 if (do_memsw_account())
4357 entry->val = ent.val;
4358
4359 return page;
4360}
4361#else
4362static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
4363 unsigned long addr, pte_t ptent, swp_entry_t *entry)
4364{
4365 return NULL;
4366}
4367#endif
4368
4369static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
4370 unsigned long addr, pte_t ptent, swp_entry_t *entry)
4371{
4372 struct page *page = NULL;
4373 struct address_space *mapping;
4374 pgoff_t pgoff;
4375
4376 if (!vma->vm_file) /* anonymous vma */
4377 return NULL;
4378 if (!(mc.flags & MOVE_FILE))
4379 return NULL;
4380
4381 mapping = vma->vm_file->f_mapping;
4382 pgoff = linear_page_index(vma, addr);
4383
4384 /* page is moved even if it's not RSS of this task(page-faulted). */
4385#ifdef CONFIG_SWAP
4386 /* shmem/tmpfs may report page out on swap: account for that too. */
4387 if (shmem_mapping(mapping)) {
4388 page = find_get_entry(mapping, pgoff);
4389 if (radix_tree_exceptional_entry(page)) {
4390 swp_entry_t swp = radix_to_swp_entry(page);
4391 if (do_memsw_account())
4392 *entry = swp;
4393 page = find_get_page(swap_address_space(swp), swp.val);
4394 }
4395 } else
4396 page = find_get_page(mapping, pgoff);
4397#else
4398 page = find_get_page(mapping, pgoff);
4399#endif
4400 return page;
4401}
4402
4403/**
4404 * mem_cgroup_move_account - move account of the page
4405 * @page: the page
4406 * @nr_pages: number of regular pages (>1 for huge pages)
4407 * @from: mem_cgroup which the page is moved from.
4408 * @to: mem_cgroup which the page is moved to. @from != @to.
4409 *
4410 * The caller must make sure the page is not on LRU (isolate_page() is useful.)
4411 *
4412 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
4413 * from old cgroup.
4414 */
4415static int mem_cgroup_move_account(struct page *page,
4416 bool compound,
4417 struct mem_cgroup *from,
4418 struct mem_cgroup *to)
4419{
4420 unsigned long flags;
4421 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
4422 int ret;
4423 bool anon;
4424
4425 VM_BUG_ON(from == to);
4426 VM_BUG_ON_PAGE(PageLRU(page), page);
4427 VM_BUG_ON(compound && !PageTransHuge(page));
4428
4429 /*
4430 * Prevent mem_cgroup_migrate() from looking at
4431 * page->mem_cgroup of its source page while we change it.
4432 */
4433 ret = -EBUSY;
4434 if (!trylock_page(page))
4435 goto out;
4436
4437 ret = -EINVAL;
4438 if (page->mem_cgroup != from)
4439 goto out_unlock;
4440
4441 anon = PageAnon(page);
4442
4443 spin_lock_irqsave(&from->move_lock, flags);
4444
4445 if (!anon && page_mapped(page)) {
4446 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED],
4447 nr_pages);
4448 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED],
4449 nr_pages);
4450 }
4451
4452 /*
4453 * move_lock grabbed above and caller set from->moving_account, so
4454 * mem_cgroup_update_page_stat() will serialize updates to PageDirty.
4455 * So mapping should be stable for dirty pages.
4456 */
4457 if (!anon && PageDirty(page)) {
4458 struct address_space *mapping = page_mapping(page);
4459
4460 if (mapping_cap_account_dirty(mapping)) {
4461 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_DIRTY],
4462 nr_pages);
4463 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_DIRTY],
4464 nr_pages);
4465 }
4466 }
4467
4468 if (PageWriteback(page)) {
4469 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_WRITEBACK],
4470 nr_pages);
4471 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_WRITEBACK],
4472 nr_pages);
4473 }
4474
4475 /*
4476 * It is safe to change page->mem_cgroup here because the page
4477 * is referenced, charged, and isolated - we can't race with
4478 * uncharging, charging, migration, or LRU putback.
4479 */
4480
4481 /* caller should have done css_get */
4482 page->mem_cgroup = to;
4483 spin_unlock_irqrestore(&from->move_lock, flags);
4484
4485 ret = 0;
4486
4487 local_irq_disable();
4488 mem_cgroup_charge_statistics(to, page, compound, nr_pages);
4489 memcg_check_events(to, page);
4490 mem_cgroup_charge_statistics(from, page, compound, -nr_pages);
4491 memcg_check_events(from, page);
4492 local_irq_enable();
4493out_unlock:
4494 unlock_page(page);
4495out:
4496 return ret;
4497}
4498
4499static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
4500 unsigned long addr, pte_t ptent, union mc_target *target)
4501{
4502 struct page *page = NULL;
4503 enum mc_target_type ret = MC_TARGET_NONE;
4504 swp_entry_t ent = { .val = 0 };
4505
4506 if (pte_present(ptent))
4507 page = mc_handle_present_pte(vma, addr, ptent);
4508 else if (is_swap_pte(ptent))
4509 page = mc_handle_swap_pte(vma, addr, ptent, &ent);
4510 else if (pte_none(ptent))
4511 page = mc_handle_file_pte(vma, addr, ptent, &ent);
4512
4513 if (!page && !ent.val)
4514 return ret;
4515 if (page) {
4516 /*
4517 * Do only loose check w/o serialization.
4518 * mem_cgroup_move_account() checks the page is valid or
4519 * not under LRU exclusion.
4520 */
4521 if (page->mem_cgroup == mc.from) {
4522 ret = MC_TARGET_PAGE;
4523 if (target)
4524 target->page = page;
4525 }
4526 if (!ret || !target)
4527 put_page(page);
4528 }
4529 /* There is a swap entry and a page doesn't exist or isn't charged */
4530 if (ent.val && !ret &&
4531 mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) {
4532 ret = MC_TARGET_SWAP;
4533 if (target)
4534 target->ent = ent;
4535 }
4536 return ret;
4537}
4538
4539#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4540/*
4541 * We don't consider swapping or file mapped pages because THP does not
4542 * support them for now.
4543 * Caller should make sure that pmd_trans_huge(pmd) is true.
4544 */
4545static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
4546 unsigned long addr, pmd_t pmd, union mc_target *target)
4547{
4548 struct page *page = NULL;
4549 enum mc_target_type ret = MC_TARGET_NONE;
4550
4551 page = pmd_page(pmd);
4552 VM_BUG_ON_PAGE(!page || !PageHead(page), page);
4553 if (!(mc.flags & MOVE_ANON))
4554 return ret;
4555 if (page->mem_cgroup == mc.from) {
4556 ret = MC_TARGET_PAGE;
4557 if (target) {
4558 get_page(page);
4559 target->page = page;
4560 }
4561 }
4562 return ret;
4563}
4564#else
4565static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
4566 unsigned long addr, pmd_t pmd, union mc_target *target)
4567{
4568 return MC_TARGET_NONE;
4569}
4570#endif
4571
4572static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
4573 unsigned long addr, unsigned long end,
4574 struct mm_walk *walk)
4575{
4576 struct vm_area_struct *vma = walk->vma;
4577 pte_t *pte;
4578 spinlock_t *ptl;
4579
4580 ptl = pmd_trans_huge_lock(pmd, vma);
4581 if (ptl) {
4582 if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
4583 mc.precharge += HPAGE_PMD_NR;
4584 spin_unlock(ptl);
4585 return 0;
4586 }
4587
4588 if (pmd_trans_unstable(pmd))
4589 return 0;
4590 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
4591 for (; addr != end; pte++, addr += PAGE_SIZE)
4592 if (get_mctgt_type(vma, addr, *pte, NULL))
4593 mc.precharge++; /* increment precharge temporarily */
4594 pte_unmap_unlock(pte - 1, ptl);
4595 cond_resched();
4596
4597 return 0;
4598}
4599
4600static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
4601{
4602 unsigned long precharge;
4603
4604 struct mm_walk mem_cgroup_count_precharge_walk = {
4605 .pmd_entry = mem_cgroup_count_precharge_pte_range,
4606 .mm = mm,
4607 };
4608 down_read(&mm->mmap_sem);
4609 walk_page_range(0, ~0UL, &mem_cgroup_count_precharge_walk);
4610 up_read(&mm->mmap_sem);
4611
4612 precharge = mc.precharge;
4613 mc.precharge = 0;
4614
4615 return precharge;
4616}
4617
4618static int mem_cgroup_precharge_mc(struct mm_struct *mm)
4619{
4620 unsigned long precharge = mem_cgroup_count_precharge(mm);
4621
4622 VM_BUG_ON(mc.moving_task);
4623 mc.moving_task = current;
4624 return mem_cgroup_do_precharge(precharge);
4625}
4626
4627/* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
4628static void __mem_cgroup_clear_mc(void)
4629{
4630 struct mem_cgroup *from = mc.from;
4631 struct mem_cgroup *to = mc.to;
4632
4633 /* we must uncharge all the leftover precharges from mc.to */
4634 if (mc.precharge) {
4635 cancel_charge(mc.to, mc.precharge);
4636 mc.precharge = 0;
4637 }
4638 /*
4639 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
4640 * we must uncharge here.
4641 */
4642 if (mc.moved_charge) {
4643 cancel_charge(mc.from, mc.moved_charge);
4644 mc.moved_charge = 0;
4645 }
4646 /* we must fixup refcnts and charges */
4647 if (mc.moved_swap) {
4648 /* uncharge swap account from the old cgroup */
4649 if (!mem_cgroup_is_root(mc.from))
4650 page_counter_uncharge(&mc.from->memsw, mc.moved_swap);
4651
4652 /*
4653 * we charged both to->memory and to->memsw, so we
4654 * should uncharge to->memory.
4655 */
4656 if (!mem_cgroup_is_root(mc.to))
4657 page_counter_uncharge(&mc.to->memory, mc.moved_swap);
4658
4659 css_put_many(&mc.from->css, mc.moved_swap);
4660
4661 /* we've already done css_get(mc.to) */
4662 mc.moved_swap = 0;
4663 }
4664 memcg_oom_recover(from);
4665 memcg_oom_recover(to);
4666 wake_up_all(&mc.waitq);
4667}
4668
4669static void mem_cgroup_clear_mc(void)
4670{
4671 struct mm_struct *mm = mc.mm;
4672
4673 /*
4674 * we must clear moving_task before waking up waiters at the end of
4675 * task migration.
4676 */
4677 mc.moving_task = NULL;
4678 __mem_cgroup_clear_mc();
4679 spin_lock(&mc.lock);
4680 mc.from = NULL;
4681 mc.to = NULL;
4682 mc.mm = NULL;
4683 spin_unlock(&mc.lock);
4684
4685 mmput(mm);
4686}
4687
4688static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
4689{
4690 struct cgroup_subsys_state *css;
4691 struct mem_cgroup *memcg = NULL; /* unneeded init to make gcc happy */
4692 struct mem_cgroup *from;
4693 struct task_struct *leader, *p;
4694 struct mm_struct *mm;
4695 unsigned long move_flags;
4696 int ret = 0;
4697
4698 /* charge immigration isn't supported on the default hierarchy */
4699 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
4700 return 0;
4701
4702 /*
4703 * Multi-process migrations only happen on the default hierarchy
4704 * where charge immigration is not used. Perform charge
4705 * immigration if @tset contains a leader and whine if there are
4706 * multiple.
4707 */
4708 p = NULL;
4709 cgroup_taskset_for_each_leader(leader, css, tset) {
4710 WARN_ON_ONCE(p);
4711 p = leader;
4712 memcg = mem_cgroup_from_css(css);
4713 }
4714 if (!p)
4715 return 0;
4716
4717 /*
4718 * We are now commited to this value whatever it is. Changes in this
4719 * tunable will only affect upcoming migrations, not the current one.
4720 * So we need to save it, and keep it going.
4721 */
4722 move_flags = READ_ONCE(memcg->move_charge_at_immigrate);
4723 if (!move_flags)
4724 return 0;
4725
4726 from = mem_cgroup_from_task(p);
4727
4728 VM_BUG_ON(from == memcg);
4729
4730 mm = get_task_mm(p);
4731 if (!mm)
4732 return 0;
4733 /* We move charges only when we move a owner of the mm */
4734 if (mm->owner == p) {
4735 VM_BUG_ON(mc.from);
4736 VM_BUG_ON(mc.to);
4737 VM_BUG_ON(mc.precharge);
4738 VM_BUG_ON(mc.moved_charge);
4739 VM_BUG_ON(mc.moved_swap);
4740
4741 spin_lock(&mc.lock);
4742 mc.mm = mm;
4743 mc.from = from;
4744 mc.to = memcg;
4745 mc.flags = move_flags;
4746 spin_unlock(&mc.lock);
4747 /* We set mc.moving_task later */
4748
4749 ret = mem_cgroup_precharge_mc(mm);
4750 if (ret)
4751 mem_cgroup_clear_mc();
4752 } else {
4753 mmput(mm);
4754 }
4755 return ret;
4756}
4757
4758static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
4759{
4760 if (mc.to)
4761 mem_cgroup_clear_mc();
4762}
4763
4764static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
4765 unsigned long addr, unsigned long end,
4766 struct mm_walk *walk)
4767{
4768 int ret = 0;
4769 struct vm_area_struct *vma = walk->vma;
4770 pte_t *pte;
4771 spinlock_t *ptl;
4772 enum mc_target_type target_type;
4773 union mc_target target;
4774 struct page *page;
4775
4776 ptl = pmd_trans_huge_lock(pmd, vma);
4777 if (ptl) {
4778 if (mc.precharge < HPAGE_PMD_NR) {
4779 spin_unlock(ptl);
4780 return 0;
4781 }
4782 target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
4783 if (target_type == MC_TARGET_PAGE) {
4784 page = target.page;
4785 if (!isolate_lru_page(page)) {
4786 if (!mem_cgroup_move_account(page, true,
4787 mc.from, mc.to)) {
4788 mc.precharge -= HPAGE_PMD_NR;
4789 mc.moved_charge += HPAGE_PMD_NR;
4790 }
4791 putback_lru_page(page);
4792 }
4793 put_page(page);
4794 }
4795 spin_unlock(ptl);
4796 return 0;
4797 }
4798
4799 if (pmd_trans_unstable(pmd))
4800 return 0;
4801retry:
4802 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
4803 for (; addr != end; addr += PAGE_SIZE) {
4804 pte_t ptent = *(pte++);
4805 swp_entry_t ent;
4806
4807 if (!mc.precharge)
4808 break;
4809
4810 switch (get_mctgt_type(vma, addr, ptent, &target)) {
4811 case MC_TARGET_PAGE:
4812 page = target.page;
4813 /*
4814 * We can have a part of the split pmd here. Moving it
4815 * can be done but it would be too convoluted so simply
4816 * ignore such a partial THP and keep it in original
4817 * memcg. There should be somebody mapping the head.
4818 */
4819 if (PageTransCompound(page))
4820 goto put;
4821 if (isolate_lru_page(page))
4822 goto put;
4823 if (!mem_cgroup_move_account(page, false,
4824 mc.from, mc.to)) {
4825 mc.precharge--;
4826 /* we uncharge from mc.from later. */
4827 mc.moved_charge++;
4828 }
4829 putback_lru_page(page);
4830put: /* get_mctgt_type() gets the page */
4831 put_page(page);
4832 break;
4833 case MC_TARGET_SWAP:
4834 ent = target.ent;
4835 if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
4836 mc.precharge--;
4837 /* we fixup refcnts and charges later. */
4838 mc.moved_swap++;
4839 }
4840 break;
4841 default:
4842 break;
4843 }
4844 }
4845 pte_unmap_unlock(pte - 1, ptl);
4846 cond_resched();
4847
4848 if (addr != end) {
4849 /*
4850 * We have consumed all precharges we got in can_attach().
4851 * We try charge one by one, but don't do any additional
4852 * charges to mc.to if we have failed in charge once in attach()
4853 * phase.
4854 */
4855 ret = mem_cgroup_do_precharge(1);
4856 if (!ret)
4857 goto retry;
4858 }
4859
4860 return ret;
4861}
4862
4863static void mem_cgroup_move_charge(void)
4864{
4865 struct mm_walk mem_cgroup_move_charge_walk = {
4866 .pmd_entry = mem_cgroup_move_charge_pte_range,
4867 .mm = mc.mm,
4868 };
4869
4870 lru_add_drain_all();
4871 /*
4872 * Signal lock_page_memcg() to take the memcg's move_lock
4873 * while we're moving its pages to another memcg. Then wait
4874 * for already started RCU-only updates to finish.
4875 */
4876 atomic_inc(&mc.from->moving_account);
4877 synchronize_rcu();
4878retry:
4879 if (unlikely(!down_read_trylock(&mc.mm->mmap_sem))) {
4880 /*
4881 * Someone who are holding the mmap_sem might be waiting in
4882 * waitq. So we cancel all extra charges, wake up all waiters,
4883 * and retry. Because we cancel precharges, we might not be able
4884 * to move enough charges, but moving charge is a best-effort
4885 * feature anyway, so it wouldn't be a big problem.
4886 */
4887 __mem_cgroup_clear_mc();
4888 cond_resched();
4889 goto retry;
4890 }
4891 /*
4892 * When we have consumed all precharges and failed in doing
4893 * additional charge, the page walk just aborts.
4894 */
4895 walk_page_range(0, ~0UL, &mem_cgroup_move_charge_walk);
4896 up_read(&mc.mm->mmap_sem);
4897 atomic_dec(&mc.from->moving_account);
4898}
4899
4900static void mem_cgroup_move_task(void)
4901{
4902 if (mc.to) {
4903 mem_cgroup_move_charge();
4904 mem_cgroup_clear_mc();
4905 }
4906}
4907#else /* !CONFIG_MMU */
4908static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
4909{
4910 return 0;
4911}
4912static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
4913{
4914}
4915static void mem_cgroup_move_task(void)
4916{
4917}
4918#endif
4919
4920/*
4921 * Cgroup retains root cgroups across [un]mount cycles making it necessary
4922 * to verify whether we're attached to the default hierarchy on each mount
4923 * attempt.
4924 */
4925static void mem_cgroup_bind(struct cgroup_subsys_state *root_css)
4926{
4927 /*
4928 * use_hierarchy is forced on the default hierarchy. cgroup core
4929 * guarantees that @root doesn't have any children, so turning it
4930 * on for the root memcg is enough.
4931 */
4932 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
4933 root_mem_cgroup->use_hierarchy = true;
4934 else
4935 root_mem_cgroup->use_hierarchy = false;
4936}
4937
4938static u64 memory_current_read(struct cgroup_subsys_state *css,
4939 struct cftype *cft)
4940{
4941 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4942
4943 return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE;
4944}
4945
4946static int memory_low_show(struct seq_file *m, void *v)
4947{
4948 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
4949 unsigned long low = READ_ONCE(memcg->low);
4950
4951 if (low == PAGE_COUNTER_MAX)
4952 seq_puts(m, "max\n");
4953 else
4954 seq_printf(m, "%llu\n", (u64)low * PAGE_SIZE);
4955
4956 return 0;
4957}
4958
4959static ssize_t memory_low_write(struct kernfs_open_file *of,
4960 char *buf, size_t nbytes, loff_t off)
4961{
4962 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4963 unsigned long low;
4964 int err;
4965
4966 buf = strstrip(buf);
4967 err = page_counter_memparse(buf, "max", &low);
4968 if (err)
4969 return err;
4970
4971 memcg->low = low;
4972
4973 return nbytes;
4974}
4975
4976static int memory_high_show(struct seq_file *m, void *v)
4977{
4978 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
4979 unsigned long high = READ_ONCE(memcg->high);
4980
4981 if (high == PAGE_COUNTER_MAX)
4982 seq_puts(m, "max\n");
4983 else
4984 seq_printf(m, "%llu\n", (u64)high * PAGE_SIZE);
4985
4986 return 0;
4987}
4988
4989static ssize_t memory_high_write(struct kernfs_open_file *of,
4990 char *buf, size_t nbytes, loff_t off)
4991{
4992 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4993 unsigned long nr_pages;
4994 unsigned long high;
4995 int err;
4996
4997 buf = strstrip(buf);
4998 err = page_counter_memparse(buf, "max", &high);
4999 if (err)
5000 return err;
5001
5002 memcg->high = high;
5003
5004 nr_pages = page_counter_read(&memcg->memory);
5005 if (nr_pages > high)
5006 try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
5007 GFP_KERNEL, true);
5008
5009 memcg_wb_domain_size_changed(memcg);
5010 return nbytes;
5011}
5012
5013static int memory_max_show(struct seq_file *m, void *v)
5014{
5015 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5016 unsigned long max = READ_ONCE(memcg->memory.limit);
5017
5018 if (max == PAGE_COUNTER_MAX)
5019 seq_puts(m, "max\n");
5020 else
5021 seq_printf(m, "%llu\n", (u64)max * PAGE_SIZE);
5022
5023 return 0;
5024}
5025
5026static ssize_t memory_max_write(struct kernfs_open_file *of,
5027 char *buf, size_t nbytes, loff_t off)
5028{
5029 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5030 unsigned int nr_reclaims = MEM_CGROUP_RECLAIM_RETRIES;
5031 bool drained = false;
5032 unsigned long max;
5033 int err;
5034
5035 buf = strstrip(buf);
5036 err = page_counter_memparse(buf, "max", &max);
5037 if (err)
5038 return err;
5039
5040 xchg(&memcg->memory.limit, max);
5041
5042 for (;;) {
5043 unsigned long nr_pages = page_counter_read(&memcg->memory);
5044
5045 if (nr_pages <= max)
5046 break;
5047
5048 if (signal_pending(current)) {
5049 err = -EINTR;
5050 break;
5051 }
5052
5053 if (!drained) {
5054 drain_all_stock(memcg);
5055 drained = true;
5056 continue;
5057 }
5058
5059 if (nr_reclaims) {
5060 if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max,
5061 GFP_KERNEL, true))
5062 nr_reclaims--;
5063 continue;
5064 }
5065
5066 mem_cgroup_events(memcg, MEMCG_OOM, 1);
5067 if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
5068 break;
5069 }
5070
5071 memcg_wb_domain_size_changed(memcg);
5072 return nbytes;
5073}
5074
5075static int memory_events_show(struct seq_file *m, void *v)
5076{
5077 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5078
5079 seq_printf(m, "low %lu\n", mem_cgroup_read_events(memcg, MEMCG_LOW));
5080 seq_printf(m, "high %lu\n", mem_cgroup_read_events(memcg, MEMCG_HIGH));
5081 seq_printf(m, "max %lu\n", mem_cgroup_read_events(memcg, MEMCG_MAX));
5082 seq_printf(m, "oom %lu\n", mem_cgroup_read_events(memcg, MEMCG_OOM));
5083
5084 return 0;
5085}
5086
5087static int memory_stat_show(struct seq_file *m, void *v)
5088{
5089 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5090 unsigned long stat[MEMCG_NR_STAT];
5091 unsigned long events[MEMCG_NR_EVENTS];
5092 int i;
5093
5094 /*
5095 * Provide statistics on the state of the memory subsystem as
5096 * well as cumulative event counters that show past behavior.
5097 *
5098 * This list is ordered following a combination of these gradients:
5099 * 1) generic big picture -> specifics and details
5100 * 2) reflecting userspace activity -> reflecting kernel heuristics
5101 *
5102 * Current memory state:
5103 */
5104
5105 tree_stat(memcg, stat);
5106 tree_events(memcg, events);
5107
5108 seq_printf(m, "anon %llu\n",
5109 (u64)stat[MEM_CGROUP_STAT_RSS] * PAGE_SIZE);
5110 seq_printf(m, "file %llu\n",
5111 (u64)stat[MEM_CGROUP_STAT_CACHE] * PAGE_SIZE);
5112 seq_printf(m, "kernel_stack %llu\n",
5113 (u64)stat[MEMCG_KERNEL_STACK] * PAGE_SIZE);
5114 seq_printf(m, "slab %llu\n",
5115 (u64)(stat[MEMCG_SLAB_RECLAIMABLE] +
5116 stat[MEMCG_SLAB_UNRECLAIMABLE]) * PAGE_SIZE);
5117 seq_printf(m, "sock %llu\n",
5118 (u64)stat[MEMCG_SOCK] * PAGE_SIZE);
5119
5120 seq_printf(m, "file_mapped %llu\n",
5121 (u64)stat[MEM_CGROUP_STAT_FILE_MAPPED] * PAGE_SIZE);
5122 seq_printf(m, "file_dirty %llu\n",
5123 (u64)stat[MEM_CGROUP_STAT_DIRTY] * PAGE_SIZE);
5124 seq_printf(m, "file_writeback %llu\n",
5125 (u64)stat[MEM_CGROUP_STAT_WRITEBACK] * PAGE_SIZE);
5126
5127 for (i = 0; i < NR_LRU_LISTS; i++) {
5128 struct mem_cgroup *mi;
5129 unsigned long val = 0;
5130
5131 for_each_mem_cgroup_tree(mi, memcg)
5132 val += mem_cgroup_nr_lru_pages(mi, BIT(i));
5133 seq_printf(m, "%s %llu\n",
5134 mem_cgroup_lru_names[i], (u64)val * PAGE_SIZE);
5135 }
5136
5137 seq_printf(m, "slab_reclaimable %llu\n",
5138 (u64)stat[MEMCG_SLAB_RECLAIMABLE] * PAGE_SIZE);
5139 seq_printf(m, "slab_unreclaimable %llu\n",
5140 (u64)stat[MEMCG_SLAB_UNRECLAIMABLE] * PAGE_SIZE);
5141
5142 /* Accumulated memory events */
5143
5144 seq_printf(m, "pgfault %lu\n",
5145 events[MEM_CGROUP_EVENTS_PGFAULT]);
5146 seq_printf(m, "pgmajfault %lu\n",
5147 events[MEM_CGROUP_EVENTS_PGMAJFAULT]);
5148
5149 return 0;
5150}
5151
5152static struct cftype memory_files[] = {
5153 {
5154 .name = "current",
5155 .flags = CFTYPE_NOT_ON_ROOT,
5156 .read_u64 = memory_current_read,
5157 },
5158 {
5159 .name = "low",
5160 .flags = CFTYPE_NOT_ON_ROOT,
5161 .seq_show = memory_low_show,
5162 .write = memory_low_write,
5163 },
5164 {
5165 .name = "high",
5166 .flags = CFTYPE_NOT_ON_ROOT,
5167 .seq_show = memory_high_show,
5168 .write = memory_high_write,
5169 },
5170 {
5171 .name = "max",
5172 .flags = CFTYPE_NOT_ON_ROOT,
5173 .seq_show = memory_max_show,
5174 .write = memory_max_write,
5175 },
5176 {
5177 .name = "events",
5178 .flags = CFTYPE_NOT_ON_ROOT,
5179 .file_offset = offsetof(struct mem_cgroup, events_file),
5180 .seq_show = memory_events_show,
5181 },
5182 {
5183 .name = "stat",
5184 .flags = CFTYPE_NOT_ON_ROOT,
5185 .seq_show = memory_stat_show,
5186 },
5187 { } /* terminate */
5188};
5189
5190struct cgroup_subsys memory_cgrp_subsys = {
5191 .css_alloc = mem_cgroup_css_alloc,
5192 .css_online = mem_cgroup_css_online,
5193 .css_offline = mem_cgroup_css_offline,
5194 .css_released = mem_cgroup_css_released,
5195 .css_free = mem_cgroup_css_free,
5196 .css_reset = mem_cgroup_css_reset,
5197 .can_attach = mem_cgroup_can_attach,
5198 .cancel_attach = mem_cgroup_cancel_attach,
5199 .post_attach = mem_cgroup_move_task,
5200 .bind = mem_cgroup_bind,
5201 .dfl_cftypes = memory_files,
5202 .legacy_cftypes = mem_cgroup_legacy_files,
5203 .early_init = 0,
5204};
5205
5206/**
5207 * mem_cgroup_low - check if memory consumption is below the normal range
5208 * @root: the highest ancestor to consider
5209 * @memcg: the memory cgroup to check
5210 *
5211 * Returns %true if memory consumption of @memcg, and that of all
5212 * configurable ancestors up to @root, is below the normal range.
5213 */
5214bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg)
5215{
5216 if (mem_cgroup_disabled())
5217 return false;
5218
5219 /*
5220 * The toplevel group doesn't have a configurable range, so
5221 * it's never low when looked at directly, and it is not
5222 * considered an ancestor when assessing the hierarchy.
5223 */
5224
5225 if (memcg == root_mem_cgroup)
5226 return false;
5227
5228 if (page_counter_read(&memcg->memory) >= memcg->low)
5229 return false;
5230
5231 while (memcg != root) {
5232 memcg = parent_mem_cgroup(memcg);
5233
5234 if (memcg == root_mem_cgroup)
5235 break;
5236
5237 if (page_counter_read(&memcg->memory) >= memcg->low)
5238 return false;
5239 }
5240 return true;
5241}
5242
5243/**
5244 * mem_cgroup_try_charge - try charging a page
5245 * @page: page to charge
5246 * @mm: mm context of the victim
5247 * @gfp_mask: reclaim mode
5248 * @memcgp: charged memcg return
5249 *
5250 * Try to charge @page to the memcg that @mm belongs to, reclaiming
5251 * pages according to @gfp_mask if necessary.
5252 *
5253 * Returns 0 on success, with *@memcgp pointing to the charged memcg.
5254 * Otherwise, an error code is returned.
5255 *
5256 * After page->mapping has been set up, the caller must finalize the
5257 * charge with mem_cgroup_commit_charge(). Or abort the transaction
5258 * with mem_cgroup_cancel_charge() in case page instantiation fails.
5259 */
5260int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
5261 gfp_t gfp_mask, struct mem_cgroup **memcgp,
5262 bool compound)
5263{
5264 struct mem_cgroup *memcg = NULL;
5265 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
5266 int ret = 0;
5267
5268 if (mem_cgroup_disabled())
5269 goto out;
5270
5271 if (PageSwapCache(page)) {
5272 /*
5273 * Every swap fault against a single page tries to charge the
5274 * page, bail as early as possible. shmem_unuse() encounters
5275 * already charged pages, too. The USED bit is protected by
5276 * the page lock, which serializes swap cache removal, which
5277 * in turn serializes uncharging.
5278 */
5279 VM_BUG_ON_PAGE(!PageLocked(page), page);
5280 if (page->mem_cgroup)
5281 goto out;
5282
5283 if (do_swap_account) {
5284 swp_entry_t ent = { .val = page_private(page), };
5285 unsigned short id = lookup_swap_cgroup_id(ent);
5286
5287 rcu_read_lock();
5288 memcg = mem_cgroup_from_id(id);
5289 if (memcg && !css_tryget_online(&memcg->css))
5290 memcg = NULL;
5291 rcu_read_unlock();
5292 }
5293 }
5294
5295 if (!memcg)
5296 memcg = get_mem_cgroup_from_mm(mm);
5297
5298 ret = try_charge(memcg, gfp_mask, nr_pages);
5299
5300 css_put(&memcg->css);
5301out:
5302 *memcgp = memcg;
5303 return ret;
5304}
5305
5306/**
5307 * mem_cgroup_commit_charge - commit a page charge
5308 * @page: page to charge
5309 * @memcg: memcg to charge the page to
5310 * @lrucare: page might be on LRU already
5311 *
5312 * Finalize a charge transaction started by mem_cgroup_try_charge(),
5313 * after page->mapping has been set up. This must happen atomically
5314 * as part of the page instantiation, i.e. under the page table lock
5315 * for anonymous pages, under the page lock for page and swap cache.
5316 *
5317 * In addition, the page must not be on the LRU during the commit, to
5318 * prevent racing with task migration. If it might be, use @lrucare.
5319 *
5320 * Use mem_cgroup_cancel_charge() to cancel the transaction instead.
5321 */
5322void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
5323 bool lrucare, bool compound)
5324{
5325 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
5326
5327 VM_BUG_ON_PAGE(!page->mapping, page);
5328 VM_BUG_ON_PAGE(PageLRU(page) && !lrucare, page);
5329
5330 if (mem_cgroup_disabled())
5331 return;
5332 /*
5333 * Swap faults will attempt to charge the same page multiple
5334 * times. But reuse_swap_page() might have removed the page
5335 * from swapcache already, so we can't check PageSwapCache().
5336 */
5337 if (!memcg)
5338 return;
5339
5340 commit_charge(page, memcg, lrucare);
5341
5342 local_irq_disable();
5343 mem_cgroup_charge_statistics(memcg, page, compound, nr_pages);
5344 memcg_check_events(memcg, page);
5345 local_irq_enable();
5346
5347 if (do_memsw_account() && PageSwapCache(page)) {
5348 swp_entry_t entry = { .val = page_private(page) };
5349 /*
5350 * The swap entry might not get freed for a long time,
5351 * let's not wait for it. The page already received a
5352 * memory+swap charge, drop the swap entry duplicate.
5353 */
5354 mem_cgroup_uncharge_swap(entry);
5355 }
5356}
5357
5358/**
5359 * mem_cgroup_cancel_charge - cancel a page charge
5360 * @page: page to charge
5361 * @memcg: memcg to charge the page to
5362 *
5363 * Cancel a charge transaction started by mem_cgroup_try_charge().
5364 */
5365void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg,
5366 bool compound)
5367{
5368 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
5369
5370 if (mem_cgroup_disabled())
5371 return;
5372 /*
5373 * Swap faults will attempt to charge the same page multiple
5374 * times. But reuse_swap_page() might have removed the page
5375 * from swapcache already, so we can't check PageSwapCache().
5376 */
5377 if (!memcg)
5378 return;
5379
5380 cancel_charge(memcg, nr_pages);
5381}
5382
5383static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout,
5384 unsigned long nr_anon, unsigned long nr_file,
5385 unsigned long nr_huge, struct page *dummy_page)
5386{
5387 unsigned long nr_pages = nr_anon + nr_file;
5388 unsigned long flags;
5389
5390 if (!mem_cgroup_is_root(memcg)) {
5391 page_counter_uncharge(&memcg->memory, nr_pages);
5392 if (do_memsw_account())
5393 page_counter_uncharge(&memcg->memsw, nr_pages);
5394 memcg_oom_recover(memcg);
5395 }
5396
5397 local_irq_save(flags);
5398 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS], nr_anon);
5399 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_CACHE], nr_file);
5400 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], nr_huge);
5401 __this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT], pgpgout);
5402 __this_cpu_add(memcg->stat->nr_page_events, nr_pages);
5403 memcg_check_events(memcg, dummy_page);
5404 local_irq_restore(flags);
5405
5406 if (!mem_cgroup_is_root(memcg))
5407 css_put_many(&memcg->css, nr_pages);
5408}
5409
5410static void uncharge_list(struct list_head *page_list)
5411{
5412 struct mem_cgroup *memcg = NULL;
5413 unsigned long nr_anon = 0;
5414 unsigned long nr_file = 0;
5415 unsigned long nr_huge = 0;
5416 unsigned long pgpgout = 0;
5417 struct list_head *next;
5418 struct page *page;
5419
5420 /*
5421 * Note that the list can be a single page->lru; hence the
5422 * do-while loop instead of a simple list_for_each_entry().
5423 */
5424 next = page_list->next;
5425 do {
5426 unsigned int nr_pages = 1;
5427
5428 page = list_entry(next, struct page, lru);
5429 next = page->lru.next;
5430
5431 VM_BUG_ON_PAGE(PageLRU(page), page);
5432 VM_BUG_ON_PAGE(page_count(page), page);
5433
5434 if (!page->mem_cgroup)
5435 continue;
5436
5437 /*
5438 * Nobody should be changing or seriously looking at
5439 * page->mem_cgroup at this point, we have fully
5440 * exclusive access to the page.
5441 */
5442
5443 if (memcg != page->mem_cgroup) {
5444 if (memcg) {
5445 uncharge_batch(memcg, pgpgout, nr_anon, nr_file,
5446 nr_huge, page);
5447 pgpgout = nr_anon = nr_file = nr_huge = 0;
5448 }
5449 memcg = page->mem_cgroup;
5450 }
5451
5452 if (PageTransHuge(page)) {
5453 nr_pages <<= compound_order(page);
5454 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
5455 nr_huge += nr_pages;
5456 }
5457
5458 if (PageAnon(page))
5459 nr_anon += nr_pages;
5460 else
5461 nr_file += nr_pages;
5462
5463 page->mem_cgroup = NULL;
5464
5465 pgpgout++;
5466 } while (next != page_list);
5467
5468 if (memcg)
5469 uncharge_batch(memcg, pgpgout, nr_anon, nr_file,
5470 nr_huge, page);
5471}
5472
5473/**
5474 * mem_cgroup_uncharge - uncharge a page
5475 * @page: page to uncharge
5476 *
5477 * Uncharge a page previously charged with mem_cgroup_try_charge() and
5478 * mem_cgroup_commit_charge().
5479 */
5480void mem_cgroup_uncharge(struct page *page)
5481{
5482 if (mem_cgroup_disabled())
5483 return;
5484
5485 /* Don't touch page->lru of any random page, pre-check: */
5486 if (!page->mem_cgroup)
5487 return;
5488
5489 INIT_LIST_HEAD(&page->lru);
5490 uncharge_list(&page->lru);
5491}
5492
5493/**
5494 * mem_cgroup_uncharge_list - uncharge a list of page
5495 * @page_list: list of pages to uncharge
5496 *
5497 * Uncharge a list of pages previously charged with
5498 * mem_cgroup_try_charge() and mem_cgroup_commit_charge().
5499 */
5500void mem_cgroup_uncharge_list(struct list_head *page_list)
5501{
5502 if (mem_cgroup_disabled())
5503 return;
5504
5505 if (!list_empty(page_list))
5506 uncharge_list(page_list);
5507}
5508
5509/**
5510 * mem_cgroup_migrate - charge a page's replacement
5511 * @oldpage: currently circulating page
5512 * @newpage: replacement page
5513 *
5514 * Charge @newpage as a replacement page for @oldpage. @oldpage will
5515 * be uncharged upon free.
5516 *
5517 * Both pages must be locked, @newpage->mapping must be set up.
5518 */
5519void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
5520{
5521 struct mem_cgroup *memcg;
5522 unsigned int nr_pages;
5523 bool compound;
5524
5525 VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
5526 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
5527 VM_BUG_ON_PAGE(PageAnon(oldpage) != PageAnon(newpage), newpage);
5528 VM_BUG_ON_PAGE(PageTransHuge(oldpage) != PageTransHuge(newpage),
5529 newpage);
5530
5531 if (mem_cgroup_disabled())
5532 return;
5533
5534 /* Page cache replacement: new page already charged? */
5535 if (newpage->mem_cgroup)
5536 return;
5537
5538 /* Swapcache readahead pages can get replaced before being charged */
5539 memcg = oldpage->mem_cgroup;
5540 if (!memcg)
5541 return;
5542
5543 /* Force-charge the new page. The old one will be freed soon */
5544 compound = PageTransHuge(newpage);
5545 nr_pages = compound ? hpage_nr_pages(newpage) : 1;
5546
5547 page_counter_charge(&memcg->memory, nr_pages);
5548 if (do_memsw_account())
5549 page_counter_charge(&memcg->memsw, nr_pages);
5550 css_get_many(&memcg->css, nr_pages);
5551
5552 commit_charge(newpage, memcg, false);
5553
5554 local_irq_disable();
5555 mem_cgroup_charge_statistics(memcg, newpage, compound, nr_pages);
5556 memcg_check_events(memcg, newpage);
5557 local_irq_enable();
5558}
5559
5560DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
5561EXPORT_SYMBOL(memcg_sockets_enabled_key);
5562
5563void sock_update_memcg(struct sock *sk)
5564{
5565 struct mem_cgroup *memcg;
5566
5567 /* Socket cloning can throw us here with sk_cgrp already
5568 * filled. It won't however, necessarily happen from
5569 * process context. So the test for root memcg given
5570 * the current task's memcg won't help us in this case.
5571 *
5572 * Respecting the original socket's memcg is a better
5573 * decision in this case.
5574 */
5575 if (sk->sk_memcg) {
5576 BUG_ON(mem_cgroup_is_root(sk->sk_memcg));
5577 css_get(&sk->sk_memcg->css);
5578 return;
5579 }
5580
5581 rcu_read_lock();
5582 memcg = mem_cgroup_from_task(current);
5583 if (memcg == root_mem_cgroup)
5584 goto out;
5585 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active)
5586 goto out;
5587 if (css_tryget_online(&memcg->css))
5588 sk->sk_memcg = memcg;
5589out:
5590 rcu_read_unlock();
5591}
5592EXPORT_SYMBOL(sock_update_memcg);
5593
5594void sock_release_memcg(struct sock *sk)
5595{
5596 WARN_ON(!sk->sk_memcg);
5597 css_put(&sk->sk_memcg->css);
5598}
5599
5600/**
5601 * mem_cgroup_charge_skmem - charge socket memory
5602 * @memcg: memcg to charge
5603 * @nr_pages: number of pages to charge
5604 *
5605 * Charges @nr_pages to @memcg. Returns %true if the charge fit within
5606 * @memcg's configured limit, %false if the charge had to be forced.
5607 */
5608bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
5609{
5610 gfp_t gfp_mask = GFP_KERNEL;
5611
5612 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
5613 struct page_counter *fail;
5614
5615 if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) {
5616 memcg->tcpmem_pressure = 0;
5617 return true;
5618 }
5619 page_counter_charge(&memcg->tcpmem, nr_pages);
5620 memcg->tcpmem_pressure = 1;
5621 return false;
5622 }
5623
5624 /* Don't block in the packet receive path */
5625 if (in_softirq())
5626 gfp_mask = GFP_NOWAIT;
5627
5628 this_cpu_add(memcg->stat->count[MEMCG_SOCK], nr_pages);
5629
5630 if (try_charge(memcg, gfp_mask, nr_pages) == 0)
5631 return true;
5632
5633 try_charge(memcg, gfp_mask|__GFP_NOFAIL, nr_pages);
5634 return false;
5635}
5636
5637/**
5638 * mem_cgroup_uncharge_skmem - uncharge socket memory
5639 * @memcg - memcg to uncharge
5640 * @nr_pages - number of pages to uncharge
5641 */
5642void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
5643{
5644 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
5645 page_counter_uncharge(&memcg->tcpmem, nr_pages);
5646 return;
5647 }
5648
5649 this_cpu_sub(memcg->stat->count[MEMCG_SOCK], nr_pages);
5650
5651 page_counter_uncharge(&memcg->memory, nr_pages);
5652 css_put_many(&memcg->css, nr_pages);
5653}
5654
5655static int __init cgroup_memory(char *s)
5656{
5657 char *token;
5658
5659 while ((token = strsep(&s, ",")) != NULL) {
5660 if (!*token)
5661 continue;
5662 if (!strcmp(token, "nosocket"))
5663 cgroup_memory_nosocket = true;
5664 if (!strcmp(token, "nokmem"))
5665 cgroup_memory_nokmem = true;
5666 }
5667 return 0;
5668}
5669__setup("cgroup.memory=", cgroup_memory);
5670
5671/*
5672 * subsys_initcall() for memory controller.
5673 *
5674 * Some parts like hotcpu_notifier() have to be initialized from this context
5675 * because of lock dependencies (cgroup_lock -> cpu hotplug) but basically
5676 * everything that doesn't depend on a specific mem_cgroup structure should
5677 * be initialized from here.
5678 */
5679static int __init mem_cgroup_init(void)
5680{
5681 int cpu, node;
5682
5683 hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
5684
5685 for_each_possible_cpu(cpu)
5686 INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
5687 drain_local_stock);
5688
5689 for_each_node(node) {
5690 struct mem_cgroup_tree_per_node *rtpn;
5691 int zone;
5692
5693 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL,
5694 node_online(node) ? node : NUMA_NO_NODE);
5695
5696 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
5697 struct mem_cgroup_tree_per_zone *rtpz;
5698
5699 rtpz = &rtpn->rb_tree_per_zone[zone];
5700 rtpz->rb_root = RB_ROOT;
5701 spin_lock_init(&rtpz->lock);
5702 }
5703 soft_limit_tree.rb_tree_per_node[node] = rtpn;
5704 }
5705
5706 return 0;
5707}
5708subsys_initcall(mem_cgroup_init);
5709
5710#ifdef CONFIG_MEMCG_SWAP
5711/**
5712 * mem_cgroup_swapout - transfer a memsw charge to swap
5713 * @page: page whose memsw charge to transfer
5714 * @entry: swap entry to move the charge to
5715 *
5716 * Transfer the memsw charge of @page to @entry.
5717 */
5718void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
5719{
5720 struct mem_cgroup *memcg;
5721 unsigned short oldid;
5722
5723 VM_BUG_ON_PAGE(PageLRU(page), page);
5724 VM_BUG_ON_PAGE(page_count(page), page);
5725
5726 if (!do_memsw_account())
5727 return;
5728
5729 memcg = page->mem_cgroup;
5730
5731 /* Readahead page, never charged */
5732 if (!memcg)
5733 return;
5734
5735 oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg));
5736 VM_BUG_ON_PAGE(oldid, page);
5737 mem_cgroup_swap_statistics(memcg, true);
5738
5739 page->mem_cgroup = NULL;
5740
5741 if (!mem_cgroup_is_root(memcg))
5742 page_counter_uncharge(&memcg->memory, 1);
5743
5744 /*
5745 * Interrupts should be disabled here because the caller holds the
5746 * mapping->tree_lock lock which is taken with interrupts-off. It is
5747 * important here to have the interrupts disabled because it is the
5748 * only synchronisation we have for udpating the per-CPU variables.
5749 */
5750 VM_BUG_ON(!irqs_disabled());
5751 mem_cgroup_charge_statistics(memcg, page, false, -1);
5752 memcg_check_events(memcg, page);
5753}
5754
5755/*
5756 * mem_cgroup_try_charge_swap - try charging a swap entry
5757 * @page: page being added to swap
5758 * @entry: swap entry to charge
5759 *
5760 * Try to charge @entry to the memcg that @page belongs to.
5761 *
5762 * Returns 0 on success, -ENOMEM on failure.
5763 */
5764int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry)
5765{
5766 struct mem_cgroup *memcg;
5767 struct page_counter *counter;
5768 unsigned short oldid;
5769
5770 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) || !do_swap_account)
5771 return 0;
5772
5773 memcg = page->mem_cgroup;
5774
5775 /* Readahead page, never charged */
5776 if (!memcg)
5777 return 0;
5778
5779 if (!mem_cgroup_is_root(memcg) &&
5780 !page_counter_try_charge(&memcg->swap, 1, &counter))
5781 return -ENOMEM;
5782
5783 oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg));
5784 VM_BUG_ON_PAGE(oldid, page);
5785 mem_cgroup_swap_statistics(memcg, true);
5786
5787 css_get(&memcg->css);
5788 return 0;
5789}
5790
5791/**
5792 * mem_cgroup_uncharge_swap - uncharge a swap entry
5793 * @entry: swap entry to uncharge
5794 *
5795 * Drop the swap charge associated with @entry.
5796 */
5797void mem_cgroup_uncharge_swap(swp_entry_t entry)
5798{
5799 struct mem_cgroup *memcg;
5800 unsigned short id;
5801
5802 if (!do_swap_account)
5803 return;
5804
5805 id = swap_cgroup_record(entry, 0);
5806 rcu_read_lock();
5807 memcg = mem_cgroup_from_id(id);
5808 if (memcg) {
5809 if (!mem_cgroup_is_root(memcg)) {
5810 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
5811 page_counter_uncharge(&memcg->swap, 1);
5812 else
5813 page_counter_uncharge(&memcg->memsw, 1);
5814 }
5815 mem_cgroup_swap_statistics(memcg, false);
5816 css_put(&memcg->css);
5817 }
5818 rcu_read_unlock();
5819}
5820
5821long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
5822{
5823 long nr_swap_pages = get_nr_swap_pages();
5824
5825 if (!do_swap_account || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
5826 return nr_swap_pages;
5827 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg))
5828 nr_swap_pages = min_t(long, nr_swap_pages,
5829 READ_ONCE(memcg->swap.limit) -
5830 page_counter_read(&memcg->swap));
5831 return nr_swap_pages;
5832}
5833
5834bool mem_cgroup_swap_full(struct page *page)
5835{
5836 struct mem_cgroup *memcg;
5837
5838 VM_BUG_ON_PAGE(!PageLocked(page), page);
5839
5840 if (vm_swap_full())
5841 return true;
5842 if (!do_swap_account || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
5843 return false;
5844
5845 memcg = page->mem_cgroup;
5846 if (!memcg)
5847 return false;
5848
5849 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg))
5850 if (page_counter_read(&memcg->swap) * 2 >= memcg->swap.limit)
5851 return true;
5852
5853 return false;
5854}
5855
5856/* for remember boot option*/
5857#ifdef CONFIG_MEMCG_SWAP_ENABLED
5858static int really_do_swap_account __initdata = 1;
5859#else
5860static int really_do_swap_account __initdata;
5861#endif
5862
5863static int __init enable_swap_account(char *s)
5864{
5865 if (!strcmp(s, "1"))
5866 really_do_swap_account = 1;
5867 else if (!strcmp(s, "0"))
5868 really_do_swap_account = 0;
5869 return 1;
5870}
5871__setup("swapaccount=", enable_swap_account);
5872
5873static u64 swap_current_read(struct cgroup_subsys_state *css,
5874 struct cftype *cft)
5875{
5876 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5877
5878 return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE;
5879}
5880
5881static int swap_max_show(struct seq_file *m, void *v)
5882{
5883 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5884 unsigned long max = READ_ONCE(memcg->swap.limit);
5885
5886 if (max == PAGE_COUNTER_MAX)
5887 seq_puts(m, "max\n");
5888 else
5889 seq_printf(m, "%llu\n", (u64)max * PAGE_SIZE);
5890
5891 return 0;
5892}
5893
5894static ssize_t swap_max_write(struct kernfs_open_file *of,
5895 char *buf, size_t nbytes, loff_t off)
5896{
5897 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5898 unsigned long max;
5899 int err;
5900
5901 buf = strstrip(buf);
5902 err = page_counter_memparse(buf, "max", &max);
5903 if (err)
5904 return err;
5905
5906 mutex_lock(&memcg_limit_mutex);
5907 err = page_counter_limit(&memcg->swap, max);
5908 mutex_unlock(&memcg_limit_mutex);
5909 if (err)
5910 return err;
5911
5912 return nbytes;
5913}
5914
5915static struct cftype swap_files[] = {
5916 {
5917 .name = "swap.current",
5918 .flags = CFTYPE_NOT_ON_ROOT,
5919 .read_u64 = swap_current_read,
5920 },
5921 {
5922 .name = "swap.max",
5923 .flags = CFTYPE_NOT_ON_ROOT,
5924 .seq_show = swap_max_show,
5925 .write = swap_max_write,
5926 },
5927 { } /* terminate */
5928};
5929
5930static struct cftype memsw_cgroup_files[] = {
5931 {
5932 .name = "memsw.usage_in_bytes",
5933 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
5934 .read_u64 = mem_cgroup_read_u64,
5935 },
5936 {
5937 .name = "memsw.max_usage_in_bytes",
5938 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
5939 .write = mem_cgroup_reset,
5940 .read_u64 = mem_cgroup_read_u64,
5941 },
5942 {
5943 .name = "memsw.limit_in_bytes",
5944 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
5945 .write = mem_cgroup_write,
5946 .read_u64 = mem_cgroup_read_u64,
5947 },
5948 {
5949 .name = "memsw.failcnt",
5950 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
5951 .write = mem_cgroup_reset,
5952 .read_u64 = mem_cgroup_read_u64,
5953 },
5954 { }, /* terminate */
5955};
5956
5957static int __init mem_cgroup_swap_init(void)
5958{
5959 if (!mem_cgroup_disabled() && really_do_swap_account) {
5960 do_swap_account = 1;
5961 WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys,
5962 swap_files));
5963 WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys,
5964 memsw_cgroup_files));
5965 }
5966 return 0;
5967}
5968subsys_initcall(mem_cgroup_swap_init);
5969
5970#endif /* CONFIG_MEMCG_SWAP */
1// SPDX-License-Identifier: GPL-2.0-or-later
2/* memcontrol.c - Memory Controller
3 *
4 * Copyright IBM Corporation, 2007
5 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
6 *
7 * Copyright 2007 OpenVZ SWsoft Inc
8 * Author: Pavel Emelianov <xemul@openvz.org>
9 *
10 * Memory thresholds
11 * Copyright (C) 2009 Nokia Corporation
12 * Author: Kirill A. Shutemov
13 *
14 * Kernel Memory Controller
15 * Copyright (C) 2012 Parallels Inc. and Google Inc.
16 * Authors: Glauber Costa and Suleiman Souhlal
17 *
18 * Native page reclaim
19 * Charge lifetime sanitation
20 * Lockless page tracking & accounting
21 * Unified hierarchy configuration model
22 * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner
23 *
24 * Per memcg lru locking
25 * Copyright (C) 2020 Alibaba, Inc, Alex Shi
26 */
27
28#include <linux/page_counter.h>
29#include <linux/memcontrol.h>
30#include <linux/cgroup.h>
31#include <linux/pagewalk.h>
32#include <linux/sched/mm.h>
33#include <linux/shmem_fs.h>
34#include <linux/hugetlb.h>
35#include <linux/pagemap.h>
36#include <linux/pagevec.h>
37#include <linux/vm_event_item.h>
38#include <linux/smp.h>
39#include <linux/page-flags.h>
40#include <linux/backing-dev.h>
41#include <linux/bit_spinlock.h>
42#include <linux/rcupdate.h>
43#include <linux/limits.h>
44#include <linux/export.h>
45#include <linux/mutex.h>
46#include <linux/rbtree.h>
47#include <linux/slab.h>
48#include <linux/swap.h>
49#include <linux/swapops.h>
50#include <linux/spinlock.h>
51#include <linux/eventfd.h>
52#include <linux/poll.h>
53#include <linux/sort.h>
54#include <linux/fs.h>
55#include <linux/seq_file.h>
56#include <linux/vmpressure.h>
57#include <linux/memremap.h>
58#include <linux/mm_inline.h>
59#include <linux/swap_cgroup.h>
60#include <linux/cpu.h>
61#include <linux/oom.h>
62#include <linux/lockdep.h>
63#include <linux/file.h>
64#include <linux/resume_user_mode.h>
65#include <linux/psi.h>
66#include <linux/seq_buf.h>
67#include <linux/sched/isolation.h>
68#include <linux/kmemleak.h>
69#include "internal.h"
70#include <net/sock.h>
71#include <net/ip.h>
72#include "slab.h"
73#include "swap.h"
74
75#include <linux/uaccess.h>
76
77#include <trace/events/vmscan.h>
78
79struct cgroup_subsys memory_cgrp_subsys __read_mostly;
80EXPORT_SYMBOL(memory_cgrp_subsys);
81
82struct mem_cgroup *root_mem_cgroup __read_mostly;
83
84/* Active memory cgroup to use from an interrupt context */
85DEFINE_PER_CPU(struct mem_cgroup *, int_active_memcg);
86EXPORT_PER_CPU_SYMBOL_GPL(int_active_memcg);
87
88/* Socket memory accounting disabled? */
89static bool cgroup_memory_nosocket __ro_after_init;
90
91/* Kernel memory accounting disabled? */
92static bool cgroup_memory_nokmem __ro_after_init;
93
94/* BPF memory accounting disabled? */
95static bool cgroup_memory_nobpf __ro_after_init;
96
97#ifdef CONFIG_CGROUP_WRITEBACK
98static DECLARE_WAIT_QUEUE_HEAD(memcg_cgwb_frn_waitq);
99#endif
100
101/* Whether legacy memory+swap accounting is active */
102static bool do_memsw_account(void)
103{
104 return !cgroup_subsys_on_dfl(memory_cgrp_subsys);
105}
106
107#define THRESHOLDS_EVENTS_TARGET 128
108#define SOFTLIMIT_EVENTS_TARGET 1024
109
110/*
111 * Cgroups above their limits are maintained in a RB-Tree, independent of
112 * their hierarchy representation
113 */
114
115struct mem_cgroup_tree_per_node {
116 struct rb_root rb_root;
117 struct rb_node *rb_rightmost;
118 spinlock_t lock;
119};
120
121struct mem_cgroup_tree {
122 struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
123};
124
125static struct mem_cgroup_tree soft_limit_tree __read_mostly;
126
127/* for OOM */
128struct mem_cgroup_eventfd_list {
129 struct list_head list;
130 struct eventfd_ctx *eventfd;
131};
132
133/*
134 * cgroup_event represents events which userspace want to receive.
135 */
136struct mem_cgroup_event {
137 /*
138 * memcg which the event belongs to.
139 */
140 struct mem_cgroup *memcg;
141 /*
142 * eventfd to signal userspace about the event.
143 */
144 struct eventfd_ctx *eventfd;
145 /*
146 * Each of these stored in a list by the cgroup.
147 */
148 struct list_head list;
149 /*
150 * register_event() callback will be used to add new userspace
151 * waiter for changes related to this event. Use eventfd_signal()
152 * on eventfd to send notification to userspace.
153 */
154 int (*register_event)(struct mem_cgroup *memcg,
155 struct eventfd_ctx *eventfd, const char *args);
156 /*
157 * unregister_event() callback will be called when userspace closes
158 * the eventfd or on cgroup removing. This callback must be set,
159 * if you want provide notification functionality.
160 */
161 void (*unregister_event)(struct mem_cgroup *memcg,
162 struct eventfd_ctx *eventfd);
163 /*
164 * All fields below needed to unregister event when
165 * userspace closes eventfd.
166 */
167 poll_table pt;
168 wait_queue_head_t *wqh;
169 wait_queue_entry_t wait;
170 struct work_struct remove;
171};
172
173static void mem_cgroup_threshold(struct mem_cgroup *memcg);
174static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
175
176/* Stuffs for move charges at task migration. */
177/*
178 * Types of charges to be moved.
179 */
180#define MOVE_ANON 0x1U
181#define MOVE_FILE 0x2U
182#define MOVE_MASK (MOVE_ANON | MOVE_FILE)
183
184/* "mc" and its members are protected by cgroup_mutex */
185static struct move_charge_struct {
186 spinlock_t lock; /* for from, to */
187 struct mm_struct *mm;
188 struct mem_cgroup *from;
189 struct mem_cgroup *to;
190 unsigned long flags;
191 unsigned long precharge;
192 unsigned long moved_charge;
193 unsigned long moved_swap;
194 struct task_struct *moving_task; /* a task moving charges */
195 wait_queue_head_t waitq; /* a waitq for other context */
196} mc = {
197 .lock = __SPIN_LOCK_UNLOCKED(mc.lock),
198 .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
199};
200
201/*
202 * Maximum loops in mem_cgroup_soft_reclaim(), used for soft
203 * limit reclaim to prevent infinite loops, if they ever occur.
204 */
205#define MEM_CGROUP_MAX_RECLAIM_LOOPS 100
206#define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2
207
208/* for encoding cft->private value on file */
209enum res_type {
210 _MEM,
211 _MEMSWAP,
212 _KMEM,
213 _TCP,
214};
215
216#define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val))
217#define MEMFILE_TYPE(val) ((val) >> 16 & 0xffff)
218#define MEMFILE_ATTR(val) ((val) & 0xffff)
219
220/*
221 * Iteration constructs for visiting all cgroups (under a tree). If
222 * loops are exited prematurely (break), mem_cgroup_iter_break() must
223 * be used for reference counting.
224 */
225#define for_each_mem_cgroup_tree(iter, root) \
226 for (iter = mem_cgroup_iter(root, NULL, NULL); \
227 iter != NULL; \
228 iter = mem_cgroup_iter(root, iter, NULL))
229
230#define for_each_mem_cgroup(iter) \
231 for (iter = mem_cgroup_iter(NULL, NULL, NULL); \
232 iter != NULL; \
233 iter = mem_cgroup_iter(NULL, iter, NULL))
234
235static inline bool task_is_dying(void)
236{
237 return tsk_is_oom_victim(current) || fatal_signal_pending(current) ||
238 (current->flags & PF_EXITING);
239}
240
241/* Some nice accessors for the vmpressure. */
242struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
243{
244 if (!memcg)
245 memcg = root_mem_cgroup;
246 return &memcg->vmpressure;
247}
248
249struct mem_cgroup *vmpressure_to_memcg(struct vmpressure *vmpr)
250{
251 return container_of(vmpr, struct mem_cgroup, vmpressure);
252}
253
254#define CURRENT_OBJCG_UPDATE_BIT 0
255#define CURRENT_OBJCG_UPDATE_FLAG (1UL << CURRENT_OBJCG_UPDATE_BIT)
256
257#ifdef CONFIG_MEMCG_KMEM
258static DEFINE_SPINLOCK(objcg_lock);
259
260bool mem_cgroup_kmem_disabled(void)
261{
262 return cgroup_memory_nokmem;
263}
264
265static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg,
266 unsigned int nr_pages);
267
268static void obj_cgroup_release(struct percpu_ref *ref)
269{
270 struct obj_cgroup *objcg = container_of(ref, struct obj_cgroup, refcnt);
271 unsigned int nr_bytes;
272 unsigned int nr_pages;
273 unsigned long flags;
274
275 /*
276 * At this point all allocated objects are freed, and
277 * objcg->nr_charged_bytes can't have an arbitrary byte value.
278 * However, it can be PAGE_SIZE or (x * PAGE_SIZE).
279 *
280 * The following sequence can lead to it:
281 * 1) CPU0: objcg == stock->cached_objcg
282 * 2) CPU1: we do a small allocation (e.g. 92 bytes),
283 * PAGE_SIZE bytes are charged
284 * 3) CPU1: a process from another memcg is allocating something,
285 * the stock if flushed,
286 * objcg->nr_charged_bytes = PAGE_SIZE - 92
287 * 5) CPU0: we do release this object,
288 * 92 bytes are added to stock->nr_bytes
289 * 6) CPU0: stock is flushed,
290 * 92 bytes are added to objcg->nr_charged_bytes
291 *
292 * In the result, nr_charged_bytes == PAGE_SIZE.
293 * This page will be uncharged in obj_cgroup_release().
294 */
295 nr_bytes = atomic_read(&objcg->nr_charged_bytes);
296 WARN_ON_ONCE(nr_bytes & (PAGE_SIZE - 1));
297 nr_pages = nr_bytes >> PAGE_SHIFT;
298
299 if (nr_pages)
300 obj_cgroup_uncharge_pages(objcg, nr_pages);
301
302 spin_lock_irqsave(&objcg_lock, flags);
303 list_del(&objcg->list);
304 spin_unlock_irqrestore(&objcg_lock, flags);
305
306 percpu_ref_exit(ref);
307 kfree_rcu(objcg, rcu);
308}
309
310static struct obj_cgroup *obj_cgroup_alloc(void)
311{
312 struct obj_cgroup *objcg;
313 int ret;
314
315 objcg = kzalloc(sizeof(struct obj_cgroup), GFP_KERNEL);
316 if (!objcg)
317 return NULL;
318
319 ret = percpu_ref_init(&objcg->refcnt, obj_cgroup_release, 0,
320 GFP_KERNEL);
321 if (ret) {
322 kfree(objcg);
323 return NULL;
324 }
325 INIT_LIST_HEAD(&objcg->list);
326 return objcg;
327}
328
329static void memcg_reparent_objcgs(struct mem_cgroup *memcg,
330 struct mem_cgroup *parent)
331{
332 struct obj_cgroup *objcg, *iter;
333
334 objcg = rcu_replace_pointer(memcg->objcg, NULL, true);
335
336 spin_lock_irq(&objcg_lock);
337
338 /* 1) Ready to reparent active objcg. */
339 list_add(&objcg->list, &memcg->objcg_list);
340 /* 2) Reparent active objcg and already reparented objcgs to parent. */
341 list_for_each_entry(iter, &memcg->objcg_list, list)
342 WRITE_ONCE(iter->memcg, parent);
343 /* 3) Move already reparented objcgs to the parent's list */
344 list_splice(&memcg->objcg_list, &parent->objcg_list);
345
346 spin_unlock_irq(&objcg_lock);
347
348 percpu_ref_kill(&objcg->refcnt);
349}
350
351/*
352 * A lot of the calls to the cache allocation functions are expected to be
353 * inlined by the compiler. Since the calls to memcg_slab_pre_alloc_hook() are
354 * conditional to this static branch, we'll have to allow modules that does
355 * kmem_cache_alloc and the such to see this symbol as well
356 */
357DEFINE_STATIC_KEY_FALSE(memcg_kmem_online_key);
358EXPORT_SYMBOL(memcg_kmem_online_key);
359
360DEFINE_STATIC_KEY_FALSE(memcg_bpf_enabled_key);
361EXPORT_SYMBOL(memcg_bpf_enabled_key);
362#endif
363
364/**
365 * mem_cgroup_css_from_folio - css of the memcg associated with a folio
366 * @folio: folio of interest
367 *
368 * If memcg is bound to the default hierarchy, css of the memcg associated
369 * with @folio is returned. The returned css remains associated with @folio
370 * until it is released.
371 *
372 * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup
373 * is returned.
374 */
375struct cgroup_subsys_state *mem_cgroup_css_from_folio(struct folio *folio)
376{
377 struct mem_cgroup *memcg = folio_memcg(folio);
378
379 if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
380 memcg = root_mem_cgroup;
381
382 return &memcg->css;
383}
384
385/**
386 * page_cgroup_ino - return inode number of the memcg a page is charged to
387 * @page: the page
388 *
389 * Look up the closest online ancestor of the memory cgroup @page is charged to
390 * and return its inode number or 0 if @page is not charged to any cgroup. It
391 * is safe to call this function without holding a reference to @page.
392 *
393 * Note, this function is inherently racy, because there is nothing to prevent
394 * the cgroup inode from getting torn down and potentially reallocated a moment
395 * after page_cgroup_ino() returns, so it only should be used by callers that
396 * do not care (such as procfs interfaces).
397 */
398ino_t page_cgroup_ino(struct page *page)
399{
400 struct mem_cgroup *memcg;
401 unsigned long ino = 0;
402
403 rcu_read_lock();
404 /* page_folio() is racy here, but the entire function is racy anyway */
405 memcg = folio_memcg_check(page_folio(page));
406
407 while (memcg && !(memcg->css.flags & CSS_ONLINE))
408 memcg = parent_mem_cgroup(memcg);
409 if (memcg)
410 ino = cgroup_ino(memcg->css.cgroup);
411 rcu_read_unlock();
412 return ino;
413}
414
415static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_node *mz,
416 struct mem_cgroup_tree_per_node *mctz,
417 unsigned long new_usage_in_excess)
418{
419 struct rb_node **p = &mctz->rb_root.rb_node;
420 struct rb_node *parent = NULL;
421 struct mem_cgroup_per_node *mz_node;
422 bool rightmost = true;
423
424 if (mz->on_tree)
425 return;
426
427 mz->usage_in_excess = new_usage_in_excess;
428 if (!mz->usage_in_excess)
429 return;
430 while (*p) {
431 parent = *p;
432 mz_node = rb_entry(parent, struct mem_cgroup_per_node,
433 tree_node);
434 if (mz->usage_in_excess < mz_node->usage_in_excess) {
435 p = &(*p)->rb_left;
436 rightmost = false;
437 } else {
438 p = &(*p)->rb_right;
439 }
440 }
441
442 if (rightmost)
443 mctz->rb_rightmost = &mz->tree_node;
444
445 rb_link_node(&mz->tree_node, parent, p);
446 rb_insert_color(&mz->tree_node, &mctz->rb_root);
447 mz->on_tree = true;
448}
449
450static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
451 struct mem_cgroup_tree_per_node *mctz)
452{
453 if (!mz->on_tree)
454 return;
455
456 if (&mz->tree_node == mctz->rb_rightmost)
457 mctz->rb_rightmost = rb_prev(&mz->tree_node);
458
459 rb_erase(&mz->tree_node, &mctz->rb_root);
460 mz->on_tree = false;
461}
462
463static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
464 struct mem_cgroup_tree_per_node *mctz)
465{
466 unsigned long flags;
467
468 spin_lock_irqsave(&mctz->lock, flags);
469 __mem_cgroup_remove_exceeded(mz, mctz);
470 spin_unlock_irqrestore(&mctz->lock, flags);
471}
472
473static unsigned long soft_limit_excess(struct mem_cgroup *memcg)
474{
475 unsigned long nr_pages = page_counter_read(&memcg->memory);
476 unsigned long soft_limit = READ_ONCE(memcg->soft_limit);
477 unsigned long excess = 0;
478
479 if (nr_pages > soft_limit)
480 excess = nr_pages - soft_limit;
481
482 return excess;
483}
484
485static void mem_cgroup_update_tree(struct mem_cgroup *memcg, int nid)
486{
487 unsigned long excess;
488 struct mem_cgroup_per_node *mz;
489 struct mem_cgroup_tree_per_node *mctz;
490
491 if (lru_gen_enabled()) {
492 if (soft_limit_excess(memcg))
493 lru_gen_soft_reclaim(memcg, nid);
494 return;
495 }
496
497 mctz = soft_limit_tree.rb_tree_per_node[nid];
498 if (!mctz)
499 return;
500 /*
501 * Necessary to update all ancestors when hierarchy is used.
502 * because their event counter is not touched.
503 */
504 for (; memcg; memcg = parent_mem_cgroup(memcg)) {
505 mz = memcg->nodeinfo[nid];
506 excess = soft_limit_excess(memcg);
507 /*
508 * We have to update the tree if mz is on RB-tree or
509 * mem is over its softlimit.
510 */
511 if (excess || mz->on_tree) {
512 unsigned long flags;
513
514 spin_lock_irqsave(&mctz->lock, flags);
515 /* if on-tree, remove it */
516 if (mz->on_tree)
517 __mem_cgroup_remove_exceeded(mz, mctz);
518 /*
519 * Insert again. mz->usage_in_excess will be updated.
520 * If excess is 0, no tree ops.
521 */
522 __mem_cgroup_insert_exceeded(mz, mctz, excess);
523 spin_unlock_irqrestore(&mctz->lock, flags);
524 }
525 }
526}
527
528static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
529{
530 struct mem_cgroup_tree_per_node *mctz;
531 struct mem_cgroup_per_node *mz;
532 int nid;
533
534 for_each_node(nid) {
535 mz = memcg->nodeinfo[nid];
536 mctz = soft_limit_tree.rb_tree_per_node[nid];
537 if (mctz)
538 mem_cgroup_remove_exceeded(mz, mctz);
539 }
540}
541
542static struct mem_cgroup_per_node *
543__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
544{
545 struct mem_cgroup_per_node *mz;
546
547retry:
548 mz = NULL;
549 if (!mctz->rb_rightmost)
550 goto done; /* Nothing to reclaim from */
551
552 mz = rb_entry(mctz->rb_rightmost,
553 struct mem_cgroup_per_node, tree_node);
554 /*
555 * Remove the node now but someone else can add it back,
556 * we will to add it back at the end of reclaim to its correct
557 * position in the tree.
558 */
559 __mem_cgroup_remove_exceeded(mz, mctz);
560 if (!soft_limit_excess(mz->memcg) ||
561 !css_tryget(&mz->memcg->css))
562 goto retry;
563done:
564 return mz;
565}
566
567static struct mem_cgroup_per_node *
568mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
569{
570 struct mem_cgroup_per_node *mz;
571
572 spin_lock_irq(&mctz->lock);
573 mz = __mem_cgroup_largest_soft_limit_node(mctz);
574 spin_unlock_irq(&mctz->lock);
575 return mz;
576}
577
578/* Subset of vm_event_item to report for memcg event stats */
579static const unsigned int memcg_vm_event_stat[] = {
580 PGPGIN,
581 PGPGOUT,
582 PGSCAN_KSWAPD,
583 PGSCAN_DIRECT,
584 PGSCAN_KHUGEPAGED,
585 PGSTEAL_KSWAPD,
586 PGSTEAL_DIRECT,
587 PGSTEAL_KHUGEPAGED,
588 PGFAULT,
589 PGMAJFAULT,
590 PGREFILL,
591 PGACTIVATE,
592 PGDEACTIVATE,
593 PGLAZYFREE,
594 PGLAZYFREED,
595#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
596 ZSWPIN,
597 ZSWPOUT,
598 ZSWPWB,
599#endif
600#ifdef CONFIG_TRANSPARENT_HUGEPAGE
601 THP_FAULT_ALLOC,
602 THP_COLLAPSE_ALLOC,
603 THP_SWPOUT,
604 THP_SWPOUT_FALLBACK,
605#endif
606};
607
608#define NR_MEMCG_EVENTS ARRAY_SIZE(memcg_vm_event_stat)
609static int mem_cgroup_events_index[NR_VM_EVENT_ITEMS] __read_mostly;
610
611static void init_memcg_events(void)
612{
613 int i;
614
615 for (i = 0; i < NR_MEMCG_EVENTS; ++i)
616 mem_cgroup_events_index[memcg_vm_event_stat[i]] = i + 1;
617}
618
619static inline int memcg_events_index(enum vm_event_item idx)
620{
621 return mem_cgroup_events_index[idx] - 1;
622}
623
624struct memcg_vmstats_percpu {
625 /* Stats updates since the last flush */
626 unsigned int stats_updates;
627
628 /* Cached pointers for fast iteration in memcg_rstat_updated() */
629 struct memcg_vmstats_percpu *parent;
630 struct memcg_vmstats *vmstats;
631
632 /* The above should fit a single cacheline for memcg_rstat_updated() */
633
634 /* Local (CPU and cgroup) page state & events */
635 long state[MEMCG_NR_STAT];
636 unsigned long events[NR_MEMCG_EVENTS];
637
638 /* Delta calculation for lockless upward propagation */
639 long state_prev[MEMCG_NR_STAT];
640 unsigned long events_prev[NR_MEMCG_EVENTS];
641
642 /* Cgroup1: threshold notifications & softlimit tree updates */
643 unsigned long nr_page_events;
644 unsigned long targets[MEM_CGROUP_NTARGETS];
645} ____cacheline_aligned;
646
647struct memcg_vmstats {
648 /* Aggregated (CPU and subtree) page state & events */
649 long state[MEMCG_NR_STAT];
650 unsigned long events[NR_MEMCG_EVENTS];
651
652 /* Non-hierarchical (CPU aggregated) page state & events */
653 long state_local[MEMCG_NR_STAT];
654 unsigned long events_local[NR_MEMCG_EVENTS];
655
656 /* Pending child counts during tree propagation */
657 long state_pending[MEMCG_NR_STAT];
658 unsigned long events_pending[NR_MEMCG_EVENTS];
659
660 /* Stats updates since the last flush */
661 atomic64_t stats_updates;
662};
663
664/*
665 * memcg and lruvec stats flushing
666 *
667 * Many codepaths leading to stats update or read are performance sensitive and
668 * adding stats flushing in such codepaths is not desirable. So, to optimize the
669 * flushing the kernel does:
670 *
671 * 1) Periodically and asynchronously flush the stats every 2 seconds to not let
672 * rstat update tree grow unbounded.
673 *
674 * 2) Flush the stats synchronously on reader side only when there are more than
675 * (MEMCG_CHARGE_BATCH * nr_cpus) update events. Though this optimization
676 * will let stats be out of sync by atmost (MEMCG_CHARGE_BATCH * nr_cpus) but
677 * only for 2 seconds due to (1).
678 */
679static void flush_memcg_stats_dwork(struct work_struct *w);
680static DECLARE_DEFERRABLE_WORK(stats_flush_dwork, flush_memcg_stats_dwork);
681static u64 flush_last_time;
682
683#define FLUSH_TIME (2UL*HZ)
684
685/*
686 * Accessors to ensure that preemption is disabled on PREEMPT_RT because it can
687 * not rely on this as part of an acquired spinlock_t lock. These functions are
688 * never used in hardirq context on PREEMPT_RT and therefore disabling preemtion
689 * is sufficient.
690 */
691static void memcg_stats_lock(void)
692{
693 preempt_disable_nested();
694 VM_WARN_ON_IRQS_ENABLED();
695}
696
697static void __memcg_stats_lock(void)
698{
699 preempt_disable_nested();
700}
701
702static void memcg_stats_unlock(void)
703{
704 preempt_enable_nested();
705}
706
707
708static bool memcg_vmstats_needs_flush(struct memcg_vmstats *vmstats)
709{
710 return atomic64_read(&vmstats->stats_updates) >
711 MEMCG_CHARGE_BATCH * num_online_cpus();
712}
713
714static inline void memcg_rstat_updated(struct mem_cgroup *memcg, int val)
715{
716 struct memcg_vmstats_percpu *statc;
717 int cpu = smp_processor_id();
718
719 if (!val)
720 return;
721
722 cgroup_rstat_updated(memcg->css.cgroup, cpu);
723 statc = this_cpu_ptr(memcg->vmstats_percpu);
724 for (; statc; statc = statc->parent) {
725 statc->stats_updates += abs(val);
726 if (statc->stats_updates < MEMCG_CHARGE_BATCH)
727 continue;
728
729 /*
730 * If @memcg is already flush-able, increasing stats_updates is
731 * redundant. Avoid the overhead of the atomic update.
732 */
733 if (!memcg_vmstats_needs_flush(statc->vmstats))
734 atomic64_add(statc->stats_updates,
735 &statc->vmstats->stats_updates);
736 statc->stats_updates = 0;
737 }
738}
739
740static void do_flush_stats(struct mem_cgroup *memcg)
741{
742 if (mem_cgroup_is_root(memcg))
743 WRITE_ONCE(flush_last_time, jiffies_64);
744
745 cgroup_rstat_flush(memcg->css.cgroup);
746}
747
748/*
749 * mem_cgroup_flush_stats - flush the stats of a memory cgroup subtree
750 * @memcg: root of the subtree to flush
751 *
752 * Flushing is serialized by the underlying global rstat lock. There is also a
753 * minimum amount of work to be done even if there are no stat updates to flush.
754 * Hence, we only flush the stats if the updates delta exceeds a threshold. This
755 * avoids unnecessary work and contention on the underlying lock.
756 */
757void mem_cgroup_flush_stats(struct mem_cgroup *memcg)
758{
759 if (mem_cgroup_disabled())
760 return;
761
762 if (!memcg)
763 memcg = root_mem_cgroup;
764
765 if (memcg_vmstats_needs_flush(memcg->vmstats))
766 do_flush_stats(memcg);
767}
768
769void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg)
770{
771 /* Only flush if the periodic flusher is one full cycle late */
772 if (time_after64(jiffies_64, READ_ONCE(flush_last_time) + 2*FLUSH_TIME))
773 mem_cgroup_flush_stats(memcg);
774}
775
776static void flush_memcg_stats_dwork(struct work_struct *w)
777{
778 /*
779 * Deliberately ignore memcg_vmstats_needs_flush() here so that flushing
780 * in latency-sensitive paths is as cheap as possible.
781 */
782 do_flush_stats(root_mem_cgroup);
783 queue_delayed_work(system_unbound_wq, &stats_flush_dwork, FLUSH_TIME);
784}
785
786unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
787{
788 long x = READ_ONCE(memcg->vmstats->state[idx]);
789#ifdef CONFIG_SMP
790 if (x < 0)
791 x = 0;
792#endif
793 return x;
794}
795
796static int memcg_page_state_unit(int item);
797
798/*
799 * Normalize the value passed into memcg_rstat_updated() to be in pages. Round
800 * up non-zero sub-page updates to 1 page as zero page updates are ignored.
801 */
802static int memcg_state_val_in_pages(int idx, int val)
803{
804 int unit = memcg_page_state_unit(idx);
805
806 if (!val || unit == PAGE_SIZE)
807 return val;
808 else
809 return max(val * unit / PAGE_SIZE, 1UL);
810}
811
812/**
813 * __mod_memcg_state - update cgroup memory statistics
814 * @memcg: the memory cgroup
815 * @idx: the stat item - can be enum memcg_stat_item or enum node_stat_item
816 * @val: delta to add to the counter, can be negative
817 */
818void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val)
819{
820 if (mem_cgroup_disabled())
821 return;
822
823 __this_cpu_add(memcg->vmstats_percpu->state[idx], val);
824 memcg_rstat_updated(memcg, memcg_state_val_in_pages(idx, val));
825}
826
827/* idx can be of type enum memcg_stat_item or node_stat_item. */
828static unsigned long memcg_page_state_local(struct mem_cgroup *memcg, int idx)
829{
830 long x = READ_ONCE(memcg->vmstats->state_local[idx]);
831
832#ifdef CONFIG_SMP
833 if (x < 0)
834 x = 0;
835#endif
836 return x;
837}
838
839void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
840 int val)
841{
842 struct mem_cgroup_per_node *pn;
843 struct mem_cgroup *memcg;
844
845 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
846 memcg = pn->memcg;
847
848 /*
849 * The caller from rmap relies on disabled preemption because they never
850 * update their counter from in-interrupt context. For these two
851 * counters we check that the update is never performed from an
852 * interrupt context while other caller need to have disabled interrupt.
853 */
854 __memcg_stats_lock();
855 if (IS_ENABLED(CONFIG_DEBUG_VM)) {
856 switch (idx) {
857 case NR_ANON_MAPPED:
858 case NR_FILE_MAPPED:
859 case NR_ANON_THPS:
860 case NR_SHMEM_PMDMAPPED:
861 case NR_FILE_PMDMAPPED:
862 WARN_ON_ONCE(!in_task());
863 break;
864 default:
865 VM_WARN_ON_IRQS_ENABLED();
866 }
867 }
868
869 /* Update memcg */
870 __this_cpu_add(memcg->vmstats_percpu->state[idx], val);
871
872 /* Update lruvec */
873 __this_cpu_add(pn->lruvec_stats_percpu->state[idx], val);
874
875 memcg_rstat_updated(memcg, memcg_state_val_in_pages(idx, val));
876 memcg_stats_unlock();
877}
878
879/**
880 * __mod_lruvec_state - update lruvec memory statistics
881 * @lruvec: the lruvec
882 * @idx: the stat item
883 * @val: delta to add to the counter, can be negative
884 *
885 * The lruvec is the intersection of the NUMA node and a cgroup. This
886 * function updates the all three counters that are affected by a
887 * change of state at this level: per-node, per-cgroup, per-lruvec.
888 */
889void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
890 int val)
891{
892 /* Update node */
893 __mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
894
895 /* Update memcg and lruvec */
896 if (!mem_cgroup_disabled())
897 __mod_memcg_lruvec_state(lruvec, idx, val);
898}
899
900void __lruvec_stat_mod_folio(struct folio *folio, enum node_stat_item idx,
901 int val)
902{
903 struct mem_cgroup *memcg;
904 pg_data_t *pgdat = folio_pgdat(folio);
905 struct lruvec *lruvec;
906
907 rcu_read_lock();
908 memcg = folio_memcg(folio);
909 /* Untracked pages have no memcg, no lruvec. Update only the node */
910 if (!memcg) {
911 rcu_read_unlock();
912 __mod_node_page_state(pgdat, idx, val);
913 return;
914 }
915
916 lruvec = mem_cgroup_lruvec(memcg, pgdat);
917 __mod_lruvec_state(lruvec, idx, val);
918 rcu_read_unlock();
919}
920EXPORT_SYMBOL(__lruvec_stat_mod_folio);
921
922void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val)
923{
924 pg_data_t *pgdat = page_pgdat(virt_to_page(p));
925 struct mem_cgroup *memcg;
926 struct lruvec *lruvec;
927
928 rcu_read_lock();
929 memcg = mem_cgroup_from_slab_obj(p);
930
931 /*
932 * Untracked pages have no memcg, no lruvec. Update only the
933 * node. If we reparent the slab objects to the root memcg,
934 * when we free the slab object, we need to update the per-memcg
935 * vmstats to keep it correct for the root memcg.
936 */
937 if (!memcg) {
938 __mod_node_page_state(pgdat, idx, val);
939 } else {
940 lruvec = mem_cgroup_lruvec(memcg, pgdat);
941 __mod_lruvec_state(lruvec, idx, val);
942 }
943 rcu_read_unlock();
944}
945
946/**
947 * __count_memcg_events - account VM events in a cgroup
948 * @memcg: the memory cgroup
949 * @idx: the event item
950 * @count: the number of events that occurred
951 */
952void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
953 unsigned long count)
954{
955 int index = memcg_events_index(idx);
956
957 if (mem_cgroup_disabled() || index < 0)
958 return;
959
960 memcg_stats_lock();
961 __this_cpu_add(memcg->vmstats_percpu->events[index], count);
962 memcg_rstat_updated(memcg, count);
963 memcg_stats_unlock();
964}
965
966static unsigned long memcg_events(struct mem_cgroup *memcg, int event)
967{
968 int index = memcg_events_index(event);
969
970 if (index < 0)
971 return 0;
972 return READ_ONCE(memcg->vmstats->events[index]);
973}
974
975static unsigned long memcg_events_local(struct mem_cgroup *memcg, int event)
976{
977 int index = memcg_events_index(event);
978
979 if (index < 0)
980 return 0;
981
982 return READ_ONCE(memcg->vmstats->events_local[index]);
983}
984
985static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
986 int nr_pages)
987{
988 /* pagein of a big page is an event. So, ignore page size */
989 if (nr_pages > 0)
990 __count_memcg_events(memcg, PGPGIN, 1);
991 else {
992 __count_memcg_events(memcg, PGPGOUT, 1);
993 nr_pages = -nr_pages; /* for event */
994 }
995
996 __this_cpu_add(memcg->vmstats_percpu->nr_page_events, nr_pages);
997}
998
999static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
1000 enum mem_cgroup_events_target target)
1001{
1002 unsigned long val, next;
1003
1004 val = __this_cpu_read(memcg->vmstats_percpu->nr_page_events);
1005 next = __this_cpu_read(memcg->vmstats_percpu->targets[target]);
1006 /* from time_after() in jiffies.h */
1007 if ((long)(next - val) < 0) {
1008 switch (target) {
1009 case MEM_CGROUP_TARGET_THRESH:
1010 next = val + THRESHOLDS_EVENTS_TARGET;
1011 break;
1012 case MEM_CGROUP_TARGET_SOFTLIMIT:
1013 next = val + SOFTLIMIT_EVENTS_TARGET;
1014 break;
1015 default:
1016 break;
1017 }
1018 __this_cpu_write(memcg->vmstats_percpu->targets[target], next);
1019 return true;
1020 }
1021 return false;
1022}
1023
1024/*
1025 * Check events in order.
1026 *
1027 */
1028static void memcg_check_events(struct mem_cgroup *memcg, int nid)
1029{
1030 if (IS_ENABLED(CONFIG_PREEMPT_RT))
1031 return;
1032
1033 /* threshold event is triggered in finer grain than soft limit */
1034 if (unlikely(mem_cgroup_event_ratelimit(memcg,
1035 MEM_CGROUP_TARGET_THRESH))) {
1036 bool do_softlimit;
1037
1038 do_softlimit = mem_cgroup_event_ratelimit(memcg,
1039 MEM_CGROUP_TARGET_SOFTLIMIT);
1040 mem_cgroup_threshold(memcg);
1041 if (unlikely(do_softlimit))
1042 mem_cgroup_update_tree(memcg, nid);
1043 }
1044}
1045
1046struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
1047{
1048 /*
1049 * mm_update_next_owner() may clear mm->owner to NULL
1050 * if it races with swapoff, page migration, etc.
1051 * So this can be called with p == NULL.
1052 */
1053 if (unlikely(!p))
1054 return NULL;
1055
1056 return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
1057}
1058EXPORT_SYMBOL(mem_cgroup_from_task);
1059
1060static __always_inline struct mem_cgroup *active_memcg(void)
1061{
1062 if (!in_task())
1063 return this_cpu_read(int_active_memcg);
1064 else
1065 return current->active_memcg;
1066}
1067
1068/**
1069 * get_mem_cgroup_from_mm: Obtain a reference on given mm_struct's memcg.
1070 * @mm: mm from which memcg should be extracted. It can be NULL.
1071 *
1072 * Obtain a reference on mm->memcg and returns it if successful. If mm
1073 * is NULL, then the memcg is chosen as follows:
1074 * 1) The active memcg, if set.
1075 * 2) current->mm->memcg, if available
1076 * 3) root memcg
1077 * If mem_cgroup is disabled, NULL is returned.
1078 */
1079struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
1080{
1081 struct mem_cgroup *memcg;
1082
1083 if (mem_cgroup_disabled())
1084 return NULL;
1085
1086 /*
1087 * Page cache insertions can happen without an
1088 * actual mm context, e.g. during disk probing
1089 * on boot, loopback IO, acct() writes etc.
1090 *
1091 * No need to css_get on root memcg as the reference
1092 * counting is disabled on the root level in the
1093 * cgroup core. See CSS_NO_REF.
1094 */
1095 if (unlikely(!mm)) {
1096 memcg = active_memcg();
1097 if (unlikely(memcg)) {
1098 /* remote memcg must hold a ref */
1099 css_get(&memcg->css);
1100 return memcg;
1101 }
1102 mm = current->mm;
1103 if (unlikely(!mm))
1104 return root_mem_cgroup;
1105 }
1106
1107 rcu_read_lock();
1108 do {
1109 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1110 if (unlikely(!memcg))
1111 memcg = root_mem_cgroup;
1112 } while (!css_tryget(&memcg->css));
1113 rcu_read_unlock();
1114 return memcg;
1115}
1116EXPORT_SYMBOL(get_mem_cgroup_from_mm);
1117
1118/**
1119 * get_mem_cgroup_from_current - Obtain a reference on current task's memcg.
1120 */
1121struct mem_cgroup *get_mem_cgroup_from_current(void)
1122{
1123 struct mem_cgroup *memcg;
1124
1125 if (mem_cgroup_disabled())
1126 return NULL;
1127
1128again:
1129 rcu_read_lock();
1130 memcg = mem_cgroup_from_task(current);
1131 if (!css_tryget(&memcg->css)) {
1132 rcu_read_unlock();
1133 goto again;
1134 }
1135 rcu_read_unlock();
1136 return memcg;
1137}
1138
1139/**
1140 * mem_cgroup_iter - iterate over memory cgroup hierarchy
1141 * @root: hierarchy root
1142 * @prev: previously returned memcg, NULL on first invocation
1143 * @reclaim: cookie for shared reclaim walks, NULL for full walks
1144 *
1145 * Returns references to children of the hierarchy below @root, or
1146 * @root itself, or %NULL after a full round-trip.
1147 *
1148 * Caller must pass the return value in @prev on subsequent
1149 * invocations for reference counting, or use mem_cgroup_iter_break()
1150 * to cancel a hierarchy walk before the round-trip is complete.
1151 *
1152 * Reclaimers can specify a node in @reclaim to divide up the memcgs
1153 * in the hierarchy among all concurrent reclaimers operating on the
1154 * same node.
1155 */
1156struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
1157 struct mem_cgroup *prev,
1158 struct mem_cgroup_reclaim_cookie *reclaim)
1159{
1160 struct mem_cgroup_reclaim_iter *iter;
1161 struct cgroup_subsys_state *css = NULL;
1162 struct mem_cgroup *memcg = NULL;
1163 struct mem_cgroup *pos = NULL;
1164
1165 if (mem_cgroup_disabled())
1166 return NULL;
1167
1168 if (!root)
1169 root = root_mem_cgroup;
1170
1171 rcu_read_lock();
1172
1173 if (reclaim) {
1174 struct mem_cgroup_per_node *mz;
1175
1176 mz = root->nodeinfo[reclaim->pgdat->node_id];
1177 iter = &mz->iter;
1178
1179 /*
1180 * On start, join the current reclaim iteration cycle.
1181 * Exit when a concurrent walker completes it.
1182 */
1183 if (!prev)
1184 reclaim->generation = iter->generation;
1185 else if (reclaim->generation != iter->generation)
1186 goto out_unlock;
1187
1188 while (1) {
1189 pos = READ_ONCE(iter->position);
1190 if (!pos || css_tryget(&pos->css))
1191 break;
1192 /*
1193 * css reference reached zero, so iter->position will
1194 * be cleared by ->css_released. However, we should not
1195 * rely on this happening soon, because ->css_released
1196 * is called from a work queue, and by busy-waiting we
1197 * might block it. So we clear iter->position right
1198 * away.
1199 */
1200 (void)cmpxchg(&iter->position, pos, NULL);
1201 }
1202 } else if (prev) {
1203 pos = prev;
1204 }
1205
1206 if (pos)
1207 css = &pos->css;
1208
1209 for (;;) {
1210 css = css_next_descendant_pre(css, &root->css);
1211 if (!css) {
1212 /*
1213 * Reclaimers share the hierarchy walk, and a
1214 * new one might jump in right at the end of
1215 * the hierarchy - make sure they see at least
1216 * one group and restart from the beginning.
1217 */
1218 if (!prev)
1219 continue;
1220 break;
1221 }
1222
1223 /*
1224 * Verify the css and acquire a reference. The root
1225 * is provided by the caller, so we know it's alive
1226 * and kicking, and don't take an extra reference.
1227 */
1228 if (css == &root->css || css_tryget(css)) {
1229 memcg = mem_cgroup_from_css(css);
1230 break;
1231 }
1232 }
1233
1234 if (reclaim) {
1235 /*
1236 * The position could have already been updated by a competing
1237 * thread, so check that the value hasn't changed since we read
1238 * it to avoid reclaiming from the same cgroup twice.
1239 */
1240 (void)cmpxchg(&iter->position, pos, memcg);
1241
1242 if (pos)
1243 css_put(&pos->css);
1244
1245 if (!memcg)
1246 iter->generation++;
1247 }
1248
1249out_unlock:
1250 rcu_read_unlock();
1251 if (prev && prev != root)
1252 css_put(&prev->css);
1253
1254 return memcg;
1255}
1256
1257/**
1258 * mem_cgroup_iter_break - abort a hierarchy walk prematurely
1259 * @root: hierarchy root
1260 * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
1261 */
1262void mem_cgroup_iter_break(struct mem_cgroup *root,
1263 struct mem_cgroup *prev)
1264{
1265 if (!root)
1266 root = root_mem_cgroup;
1267 if (prev && prev != root)
1268 css_put(&prev->css);
1269}
1270
1271static void __invalidate_reclaim_iterators(struct mem_cgroup *from,
1272 struct mem_cgroup *dead_memcg)
1273{
1274 struct mem_cgroup_reclaim_iter *iter;
1275 struct mem_cgroup_per_node *mz;
1276 int nid;
1277
1278 for_each_node(nid) {
1279 mz = from->nodeinfo[nid];
1280 iter = &mz->iter;
1281 cmpxchg(&iter->position, dead_memcg, NULL);
1282 }
1283}
1284
1285static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
1286{
1287 struct mem_cgroup *memcg = dead_memcg;
1288 struct mem_cgroup *last;
1289
1290 do {
1291 __invalidate_reclaim_iterators(memcg, dead_memcg);
1292 last = memcg;
1293 } while ((memcg = parent_mem_cgroup(memcg)));
1294
1295 /*
1296 * When cgroup1 non-hierarchy mode is used,
1297 * parent_mem_cgroup() does not walk all the way up to the
1298 * cgroup root (root_mem_cgroup). So we have to handle
1299 * dead_memcg from cgroup root separately.
1300 */
1301 if (!mem_cgroup_is_root(last))
1302 __invalidate_reclaim_iterators(root_mem_cgroup,
1303 dead_memcg);
1304}
1305
1306/**
1307 * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
1308 * @memcg: hierarchy root
1309 * @fn: function to call for each task
1310 * @arg: argument passed to @fn
1311 *
1312 * This function iterates over tasks attached to @memcg or to any of its
1313 * descendants and calls @fn for each task. If @fn returns a non-zero
1314 * value, the function breaks the iteration loop. Otherwise, it will iterate
1315 * over all tasks and return 0.
1316 *
1317 * This function must not be called for the root memory cgroup.
1318 */
1319void mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
1320 int (*fn)(struct task_struct *, void *), void *arg)
1321{
1322 struct mem_cgroup *iter;
1323 int ret = 0;
1324
1325 BUG_ON(mem_cgroup_is_root(memcg));
1326
1327 for_each_mem_cgroup_tree(iter, memcg) {
1328 struct css_task_iter it;
1329 struct task_struct *task;
1330
1331 css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it);
1332 while (!ret && (task = css_task_iter_next(&it)))
1333 ret = fn(task, arg);
1334 css_task_iter_end(&it);
1335 if (ret) {
1336 mem_cgroup_iter_break(memcg, iter);
1337 break;
1338 }
1339 }
1340}
1341
1342#ifdef CONFIG_DEBUG_VM
1343void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
1344{
1345 struct mem_cgroup *memcg;
1346
1347 if (mem_cgroup_disabled())
1348 return;
1349
1350 memcg = folio_memcg(folio);
1351
1352 if (!memcg)
1353 VM_BUG_ON_FOLIO(!mem_cgroup_is_root(lruvec_memcg(lruvec)), folio);
1354 else
1355 VM_BUG_ON_FOLIO(lruvec_memcg(lruvec) != memcg, folio);
1356}
1357#endif
1358
1359/**
1360 * folio_lruvec_lock - Lock the lruvec for a folio.
1361 * @folio: Pointer to the folio.
1362 *
1363 * These functions are safe to use under any of the following conditions:
1364 * - folio locked
1365 * - folio_test_lru false
1366 * - folio_memcg_lock()
1367 * - folio frozen (refcount of 0)
1368 *
1369 * Return: The lruvec this folio is on with its lock held.
1370 */
1371struct lruvec *folio_lruvec_lock(struct folio *folio)
1372{
1373 struct lruvec *lruvec = folio_lruvec(folio);
1374
1375 spin_lock(&lruvec->lru_lock);
1376 lruvec_memcg_debug(lruvec, folio);
1377
1378 return lruvec;
1379}
1380
1381/**
1382 * folio_lruvec_lock_irq - Lock the lruvec for a folio.
1383 * @folio: Pointer to the folio.
1384 *
1385 * These functions are safe to use under any of the following conditions:
1386 * - folio locked
1387 * - folio_test_lru false
1388 * - folio_memcg_lock()
1389 * - folio frozen (refcount of 0)
1390 *
1391 * Return: The lruvec this folio is on with its lock held and interrupts
1392 * disabled.
1393 */
1394struct lruvec *folio_lruvec_lock_irq(struct folio *folio)
1395{
1396 struct lruvec *lruvec = folio_lruvec(folio);
1397
1398 spin_lock_irq(&lruvec->lru_lock);
1399 lruvec_memcg_debug(lruvec, folio);
1400
1401 return lruvec;
1402}
1403
1404/**
1405 * folio_lruvec_lock_irqsave - Lock the lruvec for a folio.
1406 * @folio: Pointer to the folio.
1407 * @flags: Pointer to irqsave flags.
1408 *
1409 * These functions are safe to use under any of the following conditions:
1410 * - folio locked
1411 * - folio_test_lru false
1412 * - folio_memcg_lock()
1413 * - folio frozen (refcount of 0)
1414 *
1415 * Return: The lruvec this folio is on with its lock held and interrupts
1416 * disabled.
1417 */
1418struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
1419 unsigned long *flags)
1420{
1421 struct lruvec *lruvec = folio_lruvec(folio);
1422
1423 spin_lock_irqsave(&lruvec->lru_lock, *flags);
1424 lruvec_memcg_debug(lruvec, folio);
1425
1426 return lruvec;
1427}
1428
1429/**
1430 * mem_cgroup_update_lru_size - account for adding or removing an lru page
1431 * @lruvec: mem_cgroup per zone lru vector
1432 * @lru: index of lru list the page is sitting on
1433 * @zid: zone id of the accounted pages
1434 * @nr_pages: positive when adding or negative when removing
1435 *
1436 * This function must be called under lru_lock, just before a page is added
1437 * to or just after a page is removed from an lru list.
1438 */
1439void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
1440 int zid, int nr_pages)
1441{
1442 struct mem_cgroup_per_node *mz;
1443 unsigned long *lru_size;
1444 long size;
1445
1446 if (mem_cgroup_disabled())
1447 return;
1448
1449 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
1450 lru_size = &mz->lru_zone_size[zid][lru];
1451
1452 if (nr_pages < 0)
1453 *lru_size += nr_pages;
1454
1455 size = *lru_size;
1456 if (WARN_ONCE(size < 0,
1457 "%s(%p, %d, %d): lru_size %ld\n",
1458 __func__, lruvec, lru, nr_pages, size)) {
1459 VM_BUG_ON(1);
1460 *lru_size = 0;
1461 }
1462
1463 if (nr_pages > 0)
1464 *lru_size += nr_pages;
1465}
1466
1467/**
1468 * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1469 * @memcg: the memory cgroup
1470 *
1471 * Returns the maximum amount of memory @mem can be charged with, in
1472 * pages.
1473 */
1474static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
1475{
1476 unsigned long margin = 0;
1477 unsigned long count;
1478 unsigned long limit;
1479
1480 count = page_counter_read(&memcg->memory);
1481 limit = READ_ONCE(memcg->memory.max);
1482 if (count < limit)
1483 margin = limit - count;
1484
1485 if (do_memsw_account()) {
1486 count = page_counter_read(&memcg->memsw);
1487 limit = READ_ONCE(memcg->memsw.max);
1488 if (count < limit)
1489 margin = min(margin, limit - count);
1490 else
1491 margin = 0;
1492 }
1493
1494 return margin;
1495}
1496
1497/*
1498 * A routine for checking "mem" is under move_account() or not.
1499 *
1500 * Checking a cgroup is mc.from or mc.to or under hierarchy of
1501 * moving cgroups. This is for waiting at high-memory pressure
1502 * caused by "move".
1503 */
1504static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
1505{
1506 struct mem_cgroup *from;
1507 struct mem_cgroup *to;
1508 bool ret = false;
1509 /*
1510 * Unlike task_move routines, we access mc.to, mc.from not under
1511 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
1512 */
1513 spin_lock(&mc.lock);
1514 from = mc.from;
1515 to = mc.to;
1516 if (!from)
1517 goto unlock;
1518
1519 ret = mem_cgroup_is_descendant(from, memcg) ||
1520 mem_cgroup_is_descendant(to, memcg);
1521unlock:
1522 spin_unlock(&mc.lock);
1523 return ret;
1524}
1525
1526static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
1527{
1528 if (mc.moving_task && current != mc.moving_task) {
1529 if (mem_cgroup_under_move(memcg)) {
1530 DEFINE_WAIT(wait);
1531 prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1532 /* moving charge context might have finished. */
1533 if (mc.moving_task)
1534 schedule();
1535 finish_wait(&mc.waitq, &wait);
1536 return true;
1537 }
1538 }
1539 return false;
1540}
1541
1542struct memory_stat {
1543 const char *name;
1544 unsigned int idx;
1545};
1546
1547static const struct memory_stat memory_stats[] = {
1548 { "anon", NR_ANON_MAPPED },
1549 { "file", NR_FILE_PAGES },
1550 { "kernel", MEMCG_KMEM },
1551 { "kernel_stack", NR_KERNEL_STACK_KB },
1552 { "pagetables", NR_PAGETABLE },
1553 { "sec_pagetables", NR_SECONDARY_PAGETABLE },
1554 { "percpu", MEMCG_PERCPU_B },
1555 { "sock", MEMCG_SOCK },
1556 { "vmalloc", MEMCG_VMALLOC },
1557 { "shmem", NR_SHMEM },
1558#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
1559 { "zswap", MEMCG_ZSWAP_B },
1560 { "zswapped", MEMCG_ZSWAPPED },
1561#endif
1562 { "file_mapped", NR_FILE_MAPPED },
1563 { "file_dirty", NR_FILE_DIRTY },
1564 { "file_writeback", NR_WRITEBACK },
1565#ifdef CONFIG_SWAP
1566 { "swapcached", NR_SWAPCACHE },
1567#endif
1568#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1569 { "anon_thp", NR_ANON_THPS },
1570 { "file_thp", NR_FILE_THPS },
1571 { "shmem_thp", NR_SHMEM_THPS },
1572#endif
1573 { "inactive_anon", NR_INACTIVE_ANON },
1574 { "active_anon", NR_ACTIVE_ANON },
1575 { "inactive_file", NR_INACTIVE_FILE },
1576 { "active_file", NR_ACTIVE_FILE },
1577 { "unevictable", NR_UNEVICTABLE },
1578 { "slab_reclaimable", NR_SLAB_RECLAIMABLE_B },
1579 { "slab_unreclaimable", NR_SLAB_UNRECLAIMABLE_B },
1580
1581 /* The memory events */
1582 { "workingset_refault_anon", WORKINGSET_REFAULT_ANON },
1583 { "workingset_refault_file", WORKINGSET_REFAULT_FILE },
1584 { "workingset_activate_anon", WORKINGSET_ACTIVATE_ANON },
1585 { "workingset_activate_file", WORKINGSET_ACTIVATE_FILE },
1586 { "workingset_restore_anon", WORKINGSET_RESTORE_ANON },
1587 { "workingset_restore_file", WORKINGSET_RESTORE_FILE },
1588 { "workingset_nodereclaim", WORKINGSET_NODERECLAIM },
1589};
1590
1591/* The actual unit of the state item, not the same as the output unit */
1592static int memcg_page_state_unit(int item)
1593{
1594 switch (item) {
1595 case MEMCG_PERCPU_B:
1596 case MEMCG_ZSWAP_B:
1597 case NR_SLAB_RECLAIMABLE_B:
1598 case NR_SLAB_UNRECLAIMABLE_B:
1599 return 1;
1600 case NR_KERNEL_STACK_KB:
1601 return SZ_1K;
1602 default:
1603 return PAGE_SIZE;
1604 }
1605}
1606
1607/* Translate stat items to the correct unit for memory.stat output */
1608static int memcg_page_state_output_unit(int item)
1609{
1610 /*
1611 * Workingset state is actually in pages, but we export it to userspace
1612 * as a scalar count of events, so special case it here.
1613 */
1614 switch (item) {
1615 case WORKINGSET_REFAULT_ANON:
1616 case WORKINGSET_REFAULT_FILE:
1617 case WORKINGSET_ACTIVATE_ANON:
1618 case WORKINGSET_ACTIVATE_FILE:
1619 case WORKINGSET_RESTORE_ANON:
1620 case WORKINGSET_RESTORE_FILE:
1621 case WORKINGSET_NODERECLAIM:
1622 return 1;
1623 default:
1624 return memcg_page_state_unit(item);
1625 }
1626}
1627
1628static inline unsigned long memcg_page_state_output(struct mem_cgroup *memcg,
1629 int item)
1630{
1631 return memcg_page_state(memcg, item) *
1632 memcg_page_state_output_unit(item);
1633}
1634
1635static inline unsigned long memcg_page_state_local_output(
1636 struct mem_cgroup *memcg, int item)
1637{
1638 return memcg_page_state_local(memcg, item) *
1639 memcg_page_state_output_unit(item);
1640}
1641
1642static void memcg_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
1643{
1644 int i;
1645
1646 /*
1647 * Provide statistics on the state of the memory subsystem as
1648 * well as cumulative event counters that show past behavior.
1649 *
1650 * This list is ordered following a combination of these gradients:
1651 * 1) generic big picture -> specifics and details
1652 * 2) reflecting userspace activity -> reflecting kernel heuristics
1653 *
1654 * Current memory state:
1655 */
1656 mem_cgroup_flush_stats(memcg);
1657
1658 for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
1659 u64 size;
1660
1661 size = memcg_page_state_output(memcg, memory_stats[i].idx);
1662 seq_buf_printf(s, "%s %llu\n", memory_stats[i].name, size);
1663
1664 if (unlikely(memory_stats[i].idx == NR_SLAB_UNRECLAIMABLE_B)) {
1665 size += memcg_page_state_output(memcg,
1666 NR_SLAB_RECLAIMABLE_B);
1667 seq_buf_printf(s, "slab %llu\n", size);
1668 }
1669 }
1670
1671 /* Accumulated memory events */
1672 seq_buf_printf(s, "pgscan %lu\n",
1673 memcg_events(memcg, PGSCAN_KSWAPD) +
1674 memcg_events(memcg, PGSCAN_DIRECT) +
1675 memcg_events(memcg, PGSCAN_KHUGEPAGED));
1676 seq_buf_printf(s, "pgsteal %lu\n",
1677 memcg_events(memcg, PGSTEAL_KSWAPD) +
1678 memcg_events(memcg, PGSTEAL_DIRECT) +
1679 memcg_events(memcg, PGSTEAL_KHUGEPAGED));
1680
1681 for (i = 0; i < ARRAY_SIZE(memcg_vm_event_stat); i++) {
1682 if (memcg_vm_event_stat[i] == PGPGIN ||
1683 memcg_vm_event_stat[i] == PGPGOUT)
1684 continue;
1685
1686 seq_buf_printf(s, "%s %lu\n",
1687 vm_event_name(memcg_vm_event_stat[i]),
1688 memcg_events(memcg, memcg_vm_event_stat[i]));
1689 }
1690
1691 /* The above should easily fit into one page */
1692 WARN_ON_ONCE(seq_buf_has_overflowed(s));
1693}
1694
1695static void memcg1_stat_format(struct mem_cgroup *memcg, struct seq_buf *s);
1696
1697static void memory_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
1698{
1699 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
1700 memcg_stat_format(memcg, s);
1701 else
1702 memcg1_stat_format(memcg, s);
1703 WARN_ON_ONCE(seq_buf_has_overflowed(s));
1704}
1705
1706/**
1707 * mem_cgroup_print_oom_context: Print OOM information relevant to
1708 * memory controller.
1709 * @memcg: The memory cgroup that went over limit
1710 * @p: Task that is going to be killed
1711 *
1712 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1713 * enabled
1714 */
1715void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
1716{
1717 rcu_read_lock();
1718
1719 if (memcg) {
1720 pr_cont(",oom_memcg=");
1721 pr_cont_cgroup_path(memcg->css.cgroup);
1722 } else
1723 pr_cont(",global_oom");
1724 if (p) {
1725 pr_cont(",task_memcg=");
1726 pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
1727 }
1728 rcu_read_unlock();
1729}
1730
1731/**
1732 * mem_cgroup_print_oom_meminfo: Print OOM memory information relevant to
1733 * memory controller.
1734 * @memcg: The memory cgroup that went over limit
1735 */
1736void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
1737{
1738 /* Use static buffer, for the caller is holding oom_lock. */
1739 static char buf[PAGE_SIZE];
1740 struct seq_buf s;
1741
1742 lockdep_assert_held(&oom_lock);
1743
1744 pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
1745 K((u64)page_counter_read(&memcg->memory)),
1746 K((u64)READ_ONCE(memcg->memory.max)), memcg->memory.failcnt);
1747 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
1748 pr_info("swap: usage %llukB, limit %llukB, failcnt %lu\n",
1749 K((u64)page_counter_read(&memcg->swap)),
1750 K((u64)READ_ONCE(memcg->swap.max)), memcg->swap.failcnt);
1751 else {
1752 pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
1753 K((u64)page_counter_read(&memcg->memsw)),
1754 K((u64)memcg->memsw.max), memcg->memsw.failcnt);
1755 pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
1756 K((u64)page_counter_read(&memcg->kmem)),
1757 K((u64)memcg->kmem.max), memcg->kmem.failcnt);
1758 }
1759
1760 pr_info("Memory cgroup stats for ");
1761 pr_cont_cgroup_path(memcg->css.cgroup);
1762 pr_cont(":");
1763 seq_buf_init(&s, buf, sizeof(buf));
1764 memory_stat_format(memcg, &s);
1765 seq_buf_do_printk(&s, KERN_INFO);
1766}
1767
1768/*
1769 * Return the memory (and swap, if configured) limit for a memcg.
1770 */
1771unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
1772{
1773 unsigned long max = READ_ONCE(memcg->memory.max);
1774
1775 if (do_memsw_account()) {
1776 if (mem_cgroup_swappiness(memcg)) {
1777 /* Calculate swap excess capacity from memsw limit */
1778 unsigned long swap = READ_ONCE(memcg->memsw.max) - max;
1779
1780 max += min(swap, (unsigned long)total_swap_pages);
1781 }
1782 } else {
1783 if (mem_cgroup_swappiness(memcg))
1784 max += min(READ_ONCE(memcg->swap.max),
1785 (unsigned long)total_swap_pages);
1786 }
1787 return max;
1788}
1789
1790unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
1791{
1792 return page_counter_read(&memcg->memory);
1793}
1794
1795static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
1796 int order)
1797{
1798 struct oom_control oc = {
1799 .zonelist = NULL,
1800 .nodemask = NULL,
1801 .memcg = memcg,
1802 .gfp_mask = gfp_mask,
1803 .order = order,
1804 };
1805 bool ret = true;
1806
1807 if (mutex_lock_killable(&oom_lock))
1808 return true;
1809
1810 if (mem_cgroup_margin(memcg) >= (1 << order))
1811 goto unlock;
1812
1813 /*
1814 * A few threads which were not waiting at mutex_lock_killable() can
1815 * fail to bail out. Therefore, check again after holding oom_lock.
1816 */
1817 ret = task_is_dying() || out_of_memory(&oc);
1818
1819unlock:
1820 mutex_unlock(&oom_lock);
1821 return ret;
1822}
1823
1824static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
1825 pg_data_t *pgdat,
1826 gfp_t gfp_mask,
1827 unsigned long *total_scanned)
1828{
1829 struct mem_cgroup *victim = NULL;
1830 int total = 0;
1831 int loop = 0;
1832 unsigned long excess;
1833 unsigned long nr_scanned;
1834 struct mem_cgroup_reclaim_cookie reclaim = {
1835 .pgdat = pgdat,
1836 };
1837
1838 excess = soft_limit_excess(root_memcg);
1839
1840 while (1) {
1841 victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
1842 if (!victim) {
1843 loop++;
1844 if (loop >= 2) {
1845 /*
1846 * If we have not been able to reclaim
1847 * anything, it might because there are
1848 * no reclaimable pages under this hierarchy
1849 */
1850 if (!total)
1851 break;
1852 /*
1853 * We want to do more targeted reclaim.
1854 * excess >> 2 is not to excessive so as to
1855 * reclaim too much, nor too less that we keep
1856 * coming back to reclaim from this cgroup
1857 */
1858 if (total >= (excess >> 2) ||
1859 (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
1860 break;
1861 }
1862 continue;
1863 }
1864 total += mem_cgroup_shrink_node(victim, gfp_mask, false,
1865 pgdat, &nr_scanned);
1866 *total_scanned += nr_scanned;
1867 if (!soft_limit_excess(root_memcg))
1868 break;
1869 }
1870 mem_cgroup_iter_break(root_memcg, victim);
1871 return total;
1872}
1873
1874#ifdef CONFIG_LOCKDEP
1875static struct lockdep_map memcg_oom_lock_dep_map = {
1876 .name = "memcg_oom_lock",
1877};
1878#endif
1879
1880static DEFINE_SPINLOCK(memcg_oom_lock);
1881
1882/*
1883 * Check OOM-Killer is already running under our hierarchy.
1884 * If someone is running, return false.
1885 */
1886static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
1887{
1888 struct mem_cgroup *iter, *failed = NULL;
1889
1890 spin_lock(&memcg_oom_lock);
1891
1892 for_each_mem_cgroup_tree(iter, memcg) {
1893 if (iter->oom_lock) {
1894 /*
1895 * this subtree of our hierarchy is already locked
1896 * so we cannot give a lock.
1897 */
1898 failed = iter;
1899 mem_cgroup_iter_break(memcg, iter);
1900 break;
1901 } else
1902 iter->oom_lock = true;
1903 }
1904
1905 if (failed) {
1906 /*
1907 * OK, we failed to lock the whole subtree so we have
1908 * to clean up what we set up to the failing subtree
1909 */
1910 for_each_mem_cgroup_tree(iter, memcg) {
1911 if (iter == failed) {
1912 mem_cgroup_iter_break(memcg, iter);
1913 break;
1914 }
1915 iter->oom_lock = false;
1916 }
1917 } else
1918 mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_);
1919
1920 spin_unlock(&memcg_oom_lock);
1921
1922 return !failed;
1923}
1924
1925static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
1926{
1927 struct mem_cgroup *iter;
1928
1929 spin_lock(&memcg_oom_lock);
1930 mutex_release(&memcg_oom_lock_dep_map, _RET_IP_);
1931 for_each_mem_cgroup_tree(iter, memcg)
1932 iter->oom_lock = false;
1933 spin_unlock(&memcg_oom_lock);
1934}
1935
1936static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
1937{
1938 struct mem_cgroup *iter;
1939
1940 spin_lock(&memcg_oom_lock);
1941 for_each_mem_cgroup_tree(iter, memcg)
1942 iter->under_oom++;
1943 spin_unlock(&memcg_oom_lock);
1944}
1945
1946static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
1947{
1948 struct mem_cgroup *iter;
1949
1950 /*
1951 * Be careful about under_oom underflows because a child memcg
1952 * could have been added after mem_cgroup_mark_under_oom.
1953 */
1954 spin_lock(&memcg_oom_lock);
1955 for_each_mem_cgroup_tree(iter, memcg)
1956 if (iter->under_oom > 0)
1957 iter->under_oom--;
1958 spin_unlock(&memcg_oom_lock);
1959}
1960
1961static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
1962
1963struct oom_wait_info {
1964 struct mem_cgroup *memcg;
1965 wait_queue_entry_t wait;
1966};
1967
1968static int memcg_oom_wake_function(wait_queue_entry_t *wait,
1969 unsigned mode, int sync, void *arg)
1970{
1971 struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
1972 struct mem_cgroup *oom_wait_memcg;
1973 struct oom_wait_info *oom_wait_info;
1974
1975 oom_wait_info = container_of(wait, struct oom_wait_info, wait);
1976 oom_wait_memcg = oom_wait_info->memcg;
1977
1978 if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) &&
1979 !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg))
1980 return 0;
1981 return autoremove_wake_function(wait, mode, sync, arg);
1982}
1983
1984static void memcg_oom_recover(struct mem_cgroup *memcg)
1985{
1986 /*
1987 * For the following lockless ->under_oom test, the only required
1988 * guarantee is that it must see the state asserted by an OOM when
1989 * this function is called as a result of userland actions
1990 * triggered by the notification of the OOM. This is trivially
1991 * achieved by invoking mem_cgroup_mark_under_oom() before
1992 * triggering notification.
1993 */
1994 if (memcg && memcg->under_oom)
1995 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
1996}
1997
1998/*
1999 * Returns true if successfully killed one or more processes. Though in some
2000 * corner cases it can return true even without killing any process.
2001 */
2002static bool mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
2003{
2004 bool locked, ret;
2005
2006 if (order > PAGE_ALLOC_COSTLY_ORDER)
2007 return false;
2008
2009 memcg_memory_event(memcg, MEMCG_OOM);
2010
2011 /*
2012 * We are in the middle of the charge context here, so we
2013 * don't want to block when potentially sitting on a callstack
2014 * that holds all kinds of filesystem and mm locks.
2015 *
2016 * cgroup1 allows disabling the OOM killer and waiting for outside
2017 * handling until the charge can succeed; remember the context and put
2018 * the task to sleep at the end of the page fault when all locks are
2019 * released.
2020 *
2021 * On the other hand, in-kernel OOM killer allows for an async victim
2022 * memory reclaim (oom_reaper) and that means that we are not solely
2023 * relying on the oom victim to make a forward progress and we can
2024 * invoke the oom killer here.
2025 *
2026 * Please note that mem_cgroup_out_of_memory might fail to find a
2027 * victim and then we have to bail out from the charge path.
2028 */
2029 if (READ_ONCE(memcg->oom_kill_disable)) {
2030 if (current->in_user_fault) {
2031 css_get(&memcg->css);
2032 current->memcg_in_oom = memcg;
2033 current->memcg_oom_gfp_mask = mask;
2034 current->memcg_oom_order = order;
2035 }
2036 return false;
2037 }
2038
2039 mem_cgroup_mark_under_oom(memcg);
2040
2041 locked = mem_cgroup_oom_trylock(memcg);
2042
2043 if (locked)
2044 mem_cgroup_oom_notify(memcg);
2045
2046 mem_cgroup_unmark_under_oom(memcg);
2047 ret = mem_cgroup_out_of_memory(memcg, mask, order);
2048
2049 if (locked)
2050 mem_cgroup_oom_unlock(memcg);
2051
2052 return ret;
2053}
2054
2055/**
2056 * mem_cgroup_oom_synchronize - complete memcg OOM handling
2057 * @handle: actually kill/wait or just clean up the OOM state
2058 *
2059 * This has to be called at the end of a page fault if the memcg OOM
2060 * handler was enabled.
2061 *
2062 * Memcg supports userspace OOM handling where failed allocations must
2063 * sleep on a waitqueue until the userspace task resolves the
2064 * situation. Sleeping directly in the charge context with all kinds
2065 * of locks held is not a good idea, instead we remember an OOM state
2066 * in the task and mem_cgroup_oom_synchronize() has to be called at
2067 * the end of the page fault to complete the OOM handling.
2068 *
2069 * Returns %true if an ongoing memcg OOM situation was detected and
2070 * completed, %false otherwise.
2071 */
2072bool mem_cgroup_oom_synchronize(bool handle)
2073{
2074 struct mem_cgroup *memcg = current->memcg_in_oom;
2075 struct oom_wait_info owait;
2076 bool locked;
2077
2078 /* OOM is global, do not handle */
2079 if (!memcg)
2080 return false;
2081
2082 if (!handle)
2083 goto cleanup;
2084
2085 owait.memcg = memcg;
2086 owait.wait.flags = 0;
2087 owait.wait.func = memcg_oom_wake_function;
2088 owait.wait.private = current;
2089 INIT_LIST_HEAD(&owait.wait.entry);
2090
2091 prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
2092 mem_cgroup_mark_under_oom(memcg);
2093
2094 locked = mem_cgroup_oom_trylock(memcg);
2095
2096 if (locked)
2097 mem_cgroup_oom_notify(memcg);
2098
2099 schedule();
2100 mem_cgroup_unmark_under_oom(memcg);
2101 finish_wait(&memcg_oom_waitq, &owait.wait);
2102
2103 if (locked)
2104 mem_cgroup_oom_unlock(memcg);
2105cleanup:
2106 current->memcg_in_oom = NULL;
2107 css_put(&memcg->css);
2108 return true;
2109}
2110
2111/**
2112 * mem_cgroup_get_oom_group - get a memory cgroup to clean up after OOM
2113 * @victim: task to be killed by the OOM killer
2114 * @oom_domain: memcg in case of memcg OOM, NULL in case of system-wide OOM
2115 *
2116 * Returns a pointer to a memory cgroup, which has to be cleaned up
2117 * by killing all belonging OOM-killable tasks.
2118 *
2119 * Caller has to call mem_cgroup_put() on the returned non-NULL memcg.
2120 */
2121struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
2122 struct mem_cgroup *oom_domain)
2123{
2124 struct mem_cgroup *oom_group = NULL;
2125 struct mem_cgroup *memcg;
2126
2127 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
2128 return NULL;
2129
2130 if (!oom_domain)
2131 oom_domain = root_mem_cgroup;
2132
2133 rcu_read_lock();
2134
2135 memcg = mem_cgroup_from_task(victim);
2136 if (mem_cgroup_is_root(memcg))
2137 goto out;
2138
2139 /*
2140 * If the victim task has been asynchronously moved to a different
2141 * memory cgroup, we might end up killing tasks outside oom_domain.
2142 * In this case it's better to ignore memory.group.oom.
2143 */
2144 if (unlikely(!mem_cgroup_is_descendant(memcg, oom_domain)))
2145 goto out;
2146
2147 /*
2148 * Traverse the memory cgroup hierarchy from the victim task's
2149 * cgroup up to the OOMing cgroup (or root) to find the
2150 * highest-level memory cgroup with oom.group set.
2151 */
2152 for (; memcg; memcg = parent_mem_cgroup(memcg)) {
2153 if (READ_ONCE(memcg->oom_group))
2154 oom_group = memcg;
2155
2156 if (memcg == oom_domain)
2157 break;
2158 }
2159
2160 if (oom_group)
2161 css_get(&oom_group->css);
2162out:
2163 rcu_read_unlock();
2164
2165 return oom_group;
2166}
2167
2168void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
2169{
2170 pr_info("Tasks in ");
2171 pr_cont_cgroup_path(memcg->css.cgroup);
2172 pr_cont(" are going to be killed due to memory.oom.group set\n");
2173}
2174
2175/**
2176 * folio_memcg_lock - Bind a folio to its memcg.
2177 * @folio: The folio.
2178 *
2179 * This function prevents unlocked LRU folios from being moved to
2180 * another cgroup.
2181 *
2182 * It ensures lifetime of the bound memcg. The caller is responsible
2183 * for the lifetime of the folio.
2184 */
2185void folio_memcg_lock(struct folio *folio)
2186{
2187 struct mem_cgroup *memcg;
2188 unsigned long flags;
2189
2190 /*
2191 * The RCU lock is held throughout the transaction. The fast
2192 * path can get away without acquiring the memcg->move_lock
2193 * because page moving starts with an RCU grace period.
2194 */
2195 rcu_read_lock();
2196
2197 if (mem_cgroup_disabled())
2198 return;
2199again:
2200 memcg = folio_memcg(folio);
2201 if (unlikely(!memcg))
2202 return;
2203
2204#ifdef CONFIG_PROVE_LOCKING
2205 local_irq_save(flags);
2206 might_lock(&memcg->move_lock);
2207 local_irq_restore(flags);
2208#endif
2209
2210 if (atomic_read(&memcg->moving_account) <= 0)
2211 return;
2212
2213 spin_lock_irqsave(&memcg->move_lock, flags);
2214 if (memcg != folio_memcg(folio)) {
2215 spin_unlock_irqrestore(&memcg->move_lock, flags);
2216 goto again;
2217 }
2218
2219 /*
2220 * When charge migration first begins, we can have multiple
2221 * critical sections holding the fast-path RCU lock and one
2222 * holding the slowpath move_lock. Track the task who has the
2223 * move_lock for folio_memcg_unlock().
2224 */
2225 memcg->move_lock_task = current;
2226 memcg->move_lock_flags = flags;
2227}
2228
2229static void __folio_memcg_unlock(struct mem_cgroup *memcg)
2230{
2231 if (memcg && memcg->move_lock_task == current) {
2232 unsigned long flags = memcg->move_lock_flags;
2233
2234 memcg->move_lock_task = NULL;
2235 memcg->move_lock_flags = 0;
2236
2237 spin_unlock_irqrestore(&memcg->move_lock, flags);
2238 }
2239
2240 rcu_read_unlock();
2241}
2242
2243/**
2244 * folio_memcg_unlock - Release the binding between a folio and its memcg.
2245 * @folio: The folio.
2246 *
2247 * This releases the binding created by folio_memcg_lock(). This does
2248 * not change the accounting of this folio to its memcg, but it does
2249 * permit others to change it.
2250 */
2251void folio_memcg_unlock(struct folio *folio)
2252{
2253 __folio_memcg_unlock(folio_memcg(folio));
2254}
2255
2256struct memcg_stock_pcp {
2257 local_lock_t stock_lock;
2258 struct mem_cgroup *cached; /* this never be root cgroup */
2259 unsigned int nr_pages;
2260
2261#ifdef CONFIG_MEMCG_KMEM
2262 struct obj_cgroup *cached_objcg;
2263 struct pglist_data *cached_pgdat;
2264 unsigned int nr_bytes;
2265 int nr_slab_reclaimable_b;
2266 int nr_slab_unreclaimable_b;
2267#endif
2268
2269 struct work_struct work;
2270 unsigned long flags;
2271#define FLUSHING_CACHED_CHARGE 0
2272};
2273static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock) = {
2274 .stock_lock = INIT_LOCAL_LOCK(stock_lock),
2275};
2276static DEFINE_MUTEX(percpu_charge_mutex);
2277
2278#ifdef CONFIG_MEMCG_KMEM
2279static struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock);
2280static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
2281 struct mem_cgroup *root_memcg);
2282static void memcg_account_kmem(struct mem_cgroup *memcg, int nr_pages);
2283
2284#else
2285static inline struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock)
2286{
2287 return NULL;
2288}
2289static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
2290 struct mem_cgroup *root_memcg)
2291{
2292 return false;
2293}
2294static void memcg_account_kmem(struct mem_cgroup *memcg, int nr_pages)
2295{
2296}
2297#endif
2298
2299/**
2300 * consume_stock: Try to consume stocked charge on this cpu.
2301 * @memcg: memcg to consume from.
2302 * @nr_pages: how many pages to charge.
2303 *
2304 * The charges will only happen if @memcg matches the current cpu's memcg
2305 * stock, and at least @nr_pages are available in that stock. Failure to
2306 * service an allocation will refill the stock.
2307 *
2308 * returns true if successful, false otherwise.
2309 */
2310static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2311{
2312 struct memcg_stock_pcp *stock;
2313 unsigned long flags;
2314 bool ret = false;
2315
2316 if (nr_pages > MEMCG_CHARGE_BATCH)
2317 return ret;
2318
2319 local_lock_irqsave(&memcg_stock.stock_lock, flags);
2320
2321 stock = this_cpu_ptr(&memcg_stock);
2322 if (memcg == READ_ONCE(stock->cached) && stock->nr_pages >= nr_pages) {
2323 stock->nr_pages -= nr_pages;
2324 ret = true;
2325 }
2326
2327 local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
2328
2329 return ret;
2330}
2331
2332/*
2333 * Returns stocks cached in percpu and reset cached information.
2334 */
2335static void drain_stock(struct memcg_stock_pcp *stock)
2336{
2337 struct mem_cgroup *old = READ_ONCE(stock->cached);
2338
2339 if (!old)
2340 return;
2341
2342 if (stock->nr_pages) {
2343 page_counter_uncharge(&old->memory, stock->nr_pages);
2344 if (do_memsw_account())
2345 page_counter_uncharge(&old->memsw, stock->nr_pages);
2346 stock->nr_pages = 0;
2347 }
2348
2349 css_put(&old->css);
2350 WRITE_ONCE(stock->cached, NULL);
2351}
2352
2353static void drain_local_stock(struct work_struct *dummy)
2354{
2355 struct memcg_stock_pcp *stock;
2356 struct obj_cgroup *old = NULL;
2357 unsigned long flags;
2358
2359 /*
2360 * The only protection from cpu hotplug (memcg_hotplug_cpu_dead) vs.
2361 * drain_stock races is that we always operate on local CPU stock
2362 * here with IRQ disabled
2363 */
2364 local_lock_irqsave(&memcg_stock.stock_lock, flags);
2365
2366 stock = this_cpu_ptr(&memcg_stock);
2367 old = drain_obj_stock(stock);
2368 drain_stock(stock);
2369 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
2370
2371 local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
2372 if (old)
2373 obj_cgroup_put(old);
2374}
2375
2376/*
2377 * Cache charges(val) to local per_cpu area.
2378 * This will be consumed by consume_stock() function, later.
2379 */
2380static void __refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2381{
2382 struct memcg_stock_pcp *stock;
2383
2384 stock = this_cpu_ptr(&memcg_stock);
2385 if (READ_ONCE(stock->cached) != memcg) { /* reset if necessary */
2386 drain_stock(stock);
2387 css_get(&memcg->css);
2388 WRITE_ONCE(stock->cached, memcg);
2389 }
2390 stock->nr_pages += nr_pages;
2391
2392 if (stock->nr_pages > MEMCG_CHARGE_BATCH)
2393 drain_stock(stock);
2394}
2395
2396static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2397{
2398 unsigned long flags;
2399
2400 local_lock_irqsave(&memcg_stock.stock_lock, flags);
2401 __refill_stock(memcg, nr_pages);
2402 local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
2403}
2404
2405/*
2406 * Drains all per-CPU charge caches for given root_memcg resp. subtree
2407 * of the hierarchy under it.
2408 */
2409static void drain_all_stock(struct mem_cgroup *root_memcg)
2410{
2411 int cpu, curcpu;
2412
2413 /* If someone's already draining, avoid adding running more workers. */
2414 if (!mutex_trylock(&percpu_charge_mutex))
2415 return;
2416 /*
2417 * Notify other cpus that system-wide "drain" is running
2418 * We do not care about races with the cpu hotplug because cpu down
2419 * as well as workers from this path always operate on the local
2420 * per-cpu data. CPU up doesn't touch memcg_stock at all.
2421 */
2422 migrate_disable();
2423 curcpu = smp_processor_id();
2424 for_each_online_cpu(cpu) {
2425 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
2426 struct mem_cgroup *memcg;
2427 bool flush = false;
2428
2429 rcu_read_lock();
2430 memcg = READ_ONCE(stock->cached);
2431 if (memcg && stock->nr_pages &&
2432 mem_cgroup_is_descendant(memcg, root_memcg))
2433 flush = true;
2434 else if (obj_stock_flush_required(stock, root_memcg))
2435 flush = true;
2436 rcu_read_unlock();
2437
2438 if (flush &&
2439 !test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
2440 if (cpu == curcpu)
2441 drain_local_stock(&stock->work);
2442 else if (!cpu_is_isolated(cpu))
2443 schedule_work_on(cpu, &stock->work);
2444 }
2445 }
2446 migrate_enable();
2447 mutex_unlock(&percpu_charge_mutex);
2448}
2449
2450static int memcg_hotplug_cpu_dead(unsigned int cpu)
2451{
2452 struct memcg_stock_pcp *stock;
2453
2454 stock = &per_cpu(memcg_stock, cpu);
2455 drain_stock(stock);
2456
2457 return 0;
2458}
2459
2460static unsigned long reclaim_high(struct mem_cgroup *memcg,
2461 unsigned int nr_pages,
2462 gfp_t gfp_mask)
2463{
2464 unsigned long nr_reclaimed = 0;
2465
2466 do {
2467 unsigned long pflags;
2468
2469 if (page_counter_read(&memcg->memory) <=
2470 READ_ONCE(memcg->memory.high))
2471 continue;
2472
2473 memcg_memory_event(memcg, MEMCG_HIGH);
2474
2475 psi_memstall_enter(&pflags);
2476 nr_reclaimed += try_to_free_mem_cgroup_pages(memcg, nr_pages,
2477 gfp_mask,
2478 MEMCG_RECLAIM_MAY_SWAP);
2479 psi_memstall_leave(&pflags);
2480 } while ((memcg = parent_mem_cgroup(memcg)) &&
2481 !mem_cgroup_is_root(memcg));
2482
2483 return nr_reclaimed;
2484}
2485
2486static void high_work_func(struct work_struct *work)
2487{
2488 struct mem_cgroup *memcg;
2489
2490 memcg = container_of(work, struct mem_cgroup, high_work);
2491 reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL);
2492}
2493
2494/*
2495 * Clamp the maximum sleep time per allocation batch to 2 seconds. This is
2496 * enough to still cause a significant slowdown in most cases, while still
2497 * allowing diagnostics and tracing to proceed without becoming stuck.
2498 */
2499#define MEMCG_MAX_HIGH_DELAY_JIFFIES (2UL*HZ)
2500
2501/*
2502 * When calculating the delay, we use these either side of the exponentiation to
2503 * maintain precision and scale to a reasonable number of jiffies (see the table
2504 * below.
2505 *
2506 * - MEMCG_DELAY_PRECISION_SHIFT: Extra precision bits while translating the
2507 * overage ratio to a delay.
2508 * - MEMCG_DELAY_SCALING_SHIFT: The number of bits to scale down the
2509 * proposed penalty in order to reduce to a reasonable number of jiffies, and
2510 * to produce a reasonable delay curve.
2511 *
2512 * MEMCG_DELAY_SCALING_SHIFT just happens to be a number that produces a
2513 * reasonable delay curve compared to precision-adjusted overage, not
2514 * penalising heavily at first, but still making sure that growth beyond the
2515 * limit penalises misbehaviour cgroups by slowing them down exponentially. For
2516 * example, with a high of 100 megabytes:
2517 *
2518 * +-------+------------------------+
2519 * | usage | time to allocate in ms |
2520 * +-------+------------------------+
2521 * | 100M | 0 |
2522 * | 101M | 6 |
2523 * | 102M | 25 |
2524 * | 103M | 57 |
2525 * | 104M | 102 |
2526 * | 105M | 159 |
2527 * | 106M | 230 |
2528 * | 107M | 313 |
2529 * | 108M | 409 |
2530 * | 109M | 518 |
2531 * | 110M | 639 |
2532 * | 111M | 774 |
2533 * | 112M | 921 |
2534 * | 113M | 1081 |
2535 * | 114M | 1254 |
2536 * | 115M | 1439 |
2537 * | 116M | 1638 |
2538 * | 117M | 1849 |
2539 * | 118M | 2000 |
2540 * | 119M | 2000 |
2541 * | 120M | 2000 |
2542 * +-------+------------------------+
2543 */
2544 #define MEMCG_DELAY_PRECISION_SHIFT 20
2545 #define MEMCG_DELAY_SCALING_SHIFT 14
2546
2547static u64 calculate_overage(unsigned long usage, unsigned long high)
2548{
2549 u64 overage;
2550
2551 if (usage <= high)
2552 return 0;
2553
2554 /*
2555 * Prevent division by 0 in overage calculation by acting as if
2556 * it was a threshold of 1 page
2557 */
2558 high = max(high, 1UL);
2559
2560 overage = usage - high;
2561 overage <<= MEMCG_DELAY_PRECISION_SHIFT;
2562 return div64_u64(overage, high);
2563}
2564
2565static u64 mem_find_max_overage(struct mem_cgroup *memcg)
2566{
2567 u64 overage, max_overage = 0;
2568
2569 do {
2570 overage = calculate_overage(page_counter_read(&memcg->memory),
2571 READ_ONCE(memcg->memory.high));
2572 max_overage = max(overage, max_overage);
2573 } while ((memcg = parent_mem_cgroup(memcg)) &&
2574 !mem_cgroup_is_root(memcg));
2575
2576 return max_overage;
2577}
2578
2579static u64 swap_find_max_overage(struct mem_cgroup *memcg)
2580{
2581 u64 overage, max_overage = 0;
2582
2583 do {
2584 overage = calculate_overage(page_counter_read(&memcg->swap),
2585 READ_ONCE(memcg->swap.high));
2586 if (overage)
2587 memcg_memory_event(memcg, MEMCG_SWAP_HIGH);
2588 max_overage = max(overage, max_overage);
2589 } while ((memcg = parent_mem_cgroup(memcg)) &&
2590 !mem_cgroup_is_root(memcg));
2591
2592 return max_overage;
2593}
2594
2595/*
2596 * Get the number of jiffies that we should penalise a mischievous cgroup which
2597 * is exceeding its memory.high by checking both it and its ancestors.
2598 */
2599static unsigned long calculate_high_delay(struct mem_cgroup *memcg,
2600 unsigned int nr_pages,
2601 u64 max_overage)
2602{
2603 unsigned long penalty_jiffies;
2604
2605 if (!max_overage)
2606 return 0;
2607
2608 /*
2609 * We use overage compared to memory.high to calculate the number of
2610 * jiffies to sleep (penalty_jiffies). Ideally this value should be
2611 * fairly lenient on small overages, and increasingly harsh when the
2612 * memcg in question makes it clear that it has no intention of stopping
2613 * its crazy behaviour, so we exponentially increase the delay based on
2614 * overage amount.
2615 */
2616 penalty_jiffies = max_overage * max_overage * HZ;
2617 penalty_jiffies >>= MEMCG_DELAY_PRECISION_SHIFT;
2618 penalty_jiffies >>= MEMCG_DELAY_SCALING_SHIFT;
2619
2620 /*
2621 * Factor in the task's own contribution to the overage, such that four
2622 * N-sized allocations are throttled approximately the same as one
2623 * 4N-sized allocation.
2624 *
2625 * MEMCG_CHARGE_BATCH pages is nominal, so work out how much smaller or
2626 * larger the current charge patch is than that.
2627 */
2628 return penalty_jiffies * nr_pages / MEMCG_CHARGE_BATCH;
2629}
2630
2631/*
2632 * Reclaims memory over the high limit. Called directly from
2633 * try_charge() (context permitting), as well as from the userland
2634 * return path where reclaim is always able to block.
2635 */
2636void mem_cgroup_handle_over_high(gfp_t gfp_mask)
2637{
2638 unsigned long penalty_jiffies;
2639 unsigned long pflags;
2640 unsigned long nr_reclaimed;
2641 unsigned int nr_pages = current->memcg_nr_pages_over_high;
2642 int nr_retries = MAX_RECLAIM_RETRIES;
2643 struct mem_cgroup *memcg;
2644 bool in_retry = false;
2645
2646 if (likely(!nr_pages))
2647 return;
2648
2649 memcg = get_mem_cgroup_from_mm(current->mm);
2650 current->memcg_nr_pages_over_high = 0;
2651
2652retry_reclaim:
2653 /*
2654 * Bail if the task is already exiting. Unlike memory.max,
2655 * memory.high enforcement isn't as strict, and there is no
2656 * OOM killer involved, which means the excess could already
2657 * be much bigger (and still growing) than it could for
2658 * memory.max; the dying task could get stuck in fruitless
2659 * reclaim for a long time, which isn't desirable.
2660 */
2661 if (task_is_dying())
2662 goto out;
2663
2664 /*
2665 * The allocating task should reclaim at least the batch size, but for
2666 * subsequent retries we only want to do what's necessary to prevent oom
2667 * or breaching resource isolation.
2668 *
2669 * This is distinct from memory.max or page allocator behaviour because
2670 * memory.high is currently batched, whereas memory.max and the page
2671 * allocator run every time an allocation is made.
2672 */
2673 nr_reclaimed = reclaim_high(memcg,
2674 in_retry ? SWAP_CLUSTER_MAX : nr_pages,
2675 gfp_mask);
2676
2677 /*
2678 * memory.high is breached and reclaim is unable to keep up. Throttle
2679 * allocators proactively to slow down excessive growth.
2680 */
2681 penalty_jiffies = calculate_high_delay(memcg, nr_pages,
2682 mem_find_max_overage(memcg));
2683
2684 penalty_jiffies += calculate_high_delay(memcg, nr_pages,
2685 swap_find_max_overage(memcg));
2686
2687 /*
2688 * Clamp the max delay per usermode return so as to still keep the
2689 * application moving forwards and also permit diagnostics, albeit
2690 * extremely slowly.
2691 */
2692 penalty_jiffies = min(penalty_jiffies, MEMCG_MAX_HIGH_DELAY_JIFFIES);
2693
2694 /*
2695 * Don't sleep if the amount of jiffies this memcg owes us is so low
2696 * that it's not even worth doing, in an attempt to be nice to those who
2697 * go only a small amount over their memory.high value and maybe haven't
2698 * been aggressively reclaimed enough yet.
2699 */
2700 if (penalty_jiffies <= HZ / 100)
2701 goto out;
2702
2703 /*
2704 * If reclaim is making forward progress but we're still over
2705 * memory.high, we want to encourage that rather than doing allocator
2706 * throttling.
2707 */
2708 if (nr_reclaimed || nr_retries--) {
2709 in_retry = true;
2710 goto retry_reclaim;
2711 }
2712
2713 /*
2714 * Reclaim didn't manage to push usage below the limit, slow
2715 * this allocating task down.
2716 *
2717 * If we exit early, we're guaranteed to die (since
2718 * schedule_timeout_killable sets TASK_KILLABLE). This means we don't
2719 * need to account for any ill-begotten jiffies to pay them off later.
2720 */
2721 psi_memstall_enter(&pflags);
2722 schedule_timeout_killable(penalty_jiffies);
2723 psi_memstall_leave(&pflags);
2724
2725out:
2726 css_put(&memcg->css);
2727}
2728
2729static int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask,
2730 unsigned int nr_pages)
2731{
2732 unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages);
2733 int nr_retries = MAX_RECLAIM_RETRIES;
2734 struct mem_cgroup *mem_over_limit;
2735 struct page_counter *counter;
2736 unsigned long nr_reclaimed;
2737 bool passed_oom = false;
2738 unsigned int reclaim_options = MEMCG_RECLAIM_MAY_SWAP;
2739 bool drained = false;
2740 bool raised_max_event = false;
2741 unsigned long pflags;
2742
2743retry:
2744 if (consume_stock(memcg, nr_pages))
2745 return 0;
2746
2747 if (!do_memsw_account() ||
2748 page_counter_try_charge(&memcg->memsw, batch, &counter)) {
2749 if (page_counter_try_charge(&memcg->memory, batch, &counter))
2750 goto done_restock;
2751 if (do_memsw_account())
2752 page_counter_uncharge(&memcg->memsw, batch);
2753 mem_over_limit = mem_cgroup_from_counter(counter, memory);
2754 } else {
2755 mem_over_limit = mem_cgroup_from_counter(counter, memsw);
2756 reclaim_options &= ~MEMCG_RECLAIM_MAY_SWAP;
2757 }
2758
2759 if (batch > nr_pages) {
2760 batch = nr_pages;
2761 goto retry;
2762 }
2763
2764 /*
2765 * Prevent unbounded recursion when reclaim operations need to
2766 * allocate memory. This might exceed the limits temporarily,
2767 * but we prefer facilitating memory reclaim and getting back
2768 * under the limit over triggering OOM kills in these cases.
2769 */
2770 if (unlikely(current->flags & PF_MEMALLOC))
2771 goto force;
2772
2773 if (unlikely(task_in_memcg_oom(current)))
2774 goto nomem;
2775
2776 if (!gfpflags_allow_blocking(gfp_mask))
2777 goto nomem;
2778
2779 memcg_memory_event(mem_over_limit, MEMCG_MAX);
2780 raised_max_event = true;
2781
2782 psi_memstall_enter(&pflags);
2783 nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
2784 gfp_mask, reclaim_options);
2785 psi_memstall_leave(&pflags);
2786
2787 if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
2788 goto retry;
2789
2790 if (!drained) {
2791 drain_all_stock(mem_over_limit);
2792 drained = true;
2793 goto retry;
2794 }
2795
2796 if (gfp_mask & __GFP_NORETRY)
2797 goto nomem;
2798 /*
2799 * Even though the limit is exceeded at this point, reclaim
2800 * may have been able to free some pages. Retry the charge
2801 * before killing the task.
2802 *
2803 * Only for regular pages, though: huge pages are rather
2804 * unlikely to succeed so close to the limit, and we fall back
2805 * to regular pages anyway in case of failure.
2806 */
2807 if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))
2808 goto retry;
2809 /*
2810 * At task move, charge accounts can be doubly counted. So, it's
2811 * better to wait until the end of task_move if something is going on.
2812 */
2813 if (mem_cgroup_wait_acct_move(mem_over_limit))
2814 goto retry;
2815
2816 if (nr_retries--)
2817 goto retry;
2818
2819 if (gfp_mask & __GFP_RETRY_MAYFAIL)
2820 goto nomem;
2821
2822 /* Avoid endless loop for tasks bypassed by the oom killer */
2823 if (passed_oom && task_is_dying())
2824 goto nomem;
2825
2826 /*
2827 * keep retrying as long as the memcg oom killer is able to make
2828 * a forward progress or bypass the charge if the oom killer
2829 * couldn't make any progress.
2830 */
2831 if (mem_cgroup_oom(mem_over_limit, gfp_mask,
2832 get_order(nr_pages * PAGE_SIZE))) {
2833 passed_oom = true;
2834 nr_retries = MAX_RECLAIM_RETRIES;
2835 goto retry;
2836 }
2837nomem:
2838 /*
2839 * Memcg doesn't have a dedicated reserve for atomic
2840 * allocations. But like the global atomic pool, we need to
2841 * put the burden of reclaim on regular allocation requests
2842 * and let these go through as privileged allocations.
2843 */
2844 if (!(gfp_mask & (__GFP_NOFAIL | __GFP_HIGH)))
2845 return -ENOMEM;
2846force:
2847 /*
2848 * If the allocation has to be enforced, don't forget to raise
2849 * a MEMCG_MAX event.
2850 */
2851 if (!raised_max_event)
2852 memcg_memory_event(mem_over_limit, MEMCG_MAX);
2853
2854 /*
2855 * The allocation either can't fail or will lead to more memory
2856 * being freed very soon. Allow memory usage go over the limit
2857 * temporarily by force charging it.
2858 */
2859 page_counter_charge(&memcg->memory, nr_pages);
2860 if (do_memsw_account())
2861 page_counter_charge(&memcg->memsw, nr_pages);
2862
2863 return 0;
2864
2865done_restock:
2866 if (batch > nr_pages)
2867 refill_stock(memcg, batch - nr_pages);
2868
2869 /*
2870 * If the hierarchy is above the normal consumption range, schedule
2871 * reclaim on returning to userland. We can perform reclaim here
2872 * if __GFP_RECLAIM but let's always punt for simplicity and so that
2873 * GFP_KERNEL can consistently be used during reclaim. @memcg is
2874 * not recorded as it most likely matches current's and won't
2875 * change in the meantime. As high limit is checked again before
2876 * reclaim, the cost of mismatch is negligible.
2877 */
2878 do {
2879 bool mem_high, swap_high;
2880
2881 mem_high = page_counter_read(&memcg->memory) >
2882 READ_ONCE(memcg->memory.high);
2883 swap_high = page_counter_read(&memcg->swap) >
2884 READ_ONCE(memcg->swap.high);
2885
2886 /* Don't bother a random interrupted task */
2887 if (!in_task()) {
2888 if (mem_high) {
2889 schedule_work(&memcg->high_work);
2890 break;
2891 }
2892 continue;
2893 }
2894
2895 if (mem_high || swap_high) {
2896 /*
2897 * The allocating tasks in this cgroup will need to do
2898 * reclaim or be throttled to prevent further growth
2899 * of the memory or swap footprints.
2900 *
2901 * Target some best-effort fairness between the tasks,
2902 * and distribute reclaim work and delay penalties
2903 * based on how much each task is actually allocating.
2904 */
2905 current->memcg_nr_pages_over_high += batch;
2906 set_notify_resume(current);
2907 break;
2908 }
2909 } while ((memcg = parent_mem_cgroup(memcg)));
2910
2911 /*
2912 * Reclaim is set up above to be called from the userland
2913 * return path. But also attempt synchronous reclaim to avoid
2914 * excessive overrun while the task is still inside the
2915 * kernel. If this is successful, the return path will see it
2916 * when it rechecks the overage and simply bail out.
2917 */
2918 if (current->memcg_nr_pages_over_high > MEMCG_CHARGE_BATCH &&
2919 !(current->flags & PF_MEMALLOC) &&
2920 gfpflags_allow_blocking(gfp_mask))
2921 mem_cgroup_handle_over_high(gfp_mask);
2922 return 0;
2923}
2924
2925static inline int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
2926 unsigned int nr_pages)
2927{
2928 if (mem_cgroup_is_root(memcg))
2929 return 0;
2930
2931 return try_charge_memcg(memcg, gfp_mask, nr_pages);
2932}
2933
2934/**
2935 * mem_cgroup_cancel_charge() - cancel an uncommitted try_charge() call.
2936 * @memcg: memcg previously charged.
2937 * @nr_pages: number of pages previously charged.
2938 */
2939void mem_cgroup_cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
2940{
2941 if (mem_cgroup_is_root(memcg))
2942 return;
2943
2944 page_counter_uncharge(&memcg->memory, nr_pages);
2945 if (do_memsw_account())
2946 page_counter_uncharge(&memcg->memsw, nr_pages);
2947}
2948
2949static void commit_charge(struct folio *folio, struct mem_cgroup *memcg)
2950{
2951 VM_BUG_ON_FOLIO(folio_memcg(folio), folio);
2952 /*
2953 * Any of the following ensures page's memcg stability:
2954 *
2955 * - the page lock
2956 * - LRU isolation
2957 * - folio_memcg_lock()
2958 * - exclusive reference
2959 * - mem_cgroup_trylock_pages()
2960 */
2961 folio->memcg_data = (unsigned long)memcg;
2962}
2963
2964/**
2965 * mem_cgroup_commit_charge - commit a previously successful try_charge().
2966 * @folio: folio to commit the charge to.
2967 * @memcg: memcg previously charged.
2968 */
2969void mem_cgroup_commit_charge(struct folio *folio, struct mem_cgroup *memcg)
2970{
2971 css_get(&memcg->css);
2972 commit_charge(folio, memcg);
2973
2974 local_irq_disable();
2975 mem_cgroup_charge_statistics(memcg, folio_nr_pages(folio));
2976 memcg_check_events(memcg, folio_nid(folio));
2977 local_irq_enable();
2978}
2979
2980#ifdef CONFIG_MEMCG_KMEM
2981/*
2982 * The allocated objcg pointers array is not accounted directly.
2983 * Moreover, it should not come from DMA buffer and is not readily
2984 * reclaimable. So those GFP bits should be masked off.
2985 */
2986#define OBJCGS_CLEAR_MASK (__GFP_DMA | __GFP_RECLAIMABLE | \
2987 __GFP_ACCOUNT | __GFP_NOFAIL)
2988
2989/*
2990 * mod_objcg_mlstate() may be called with irq enabled, so
2991 * mod_memcg_lruvec_state() should be used.
2992 */
2993static inline void mod_objcg_mlstate(struct obj_cgroup *objcg,
2994 struct pglist_data *pgdat,
2995 enum node_stat_item idx, int nr)
2996{
2997 struct mem_cgroup *memcg;
2998 struct lruvec *lruvec;
2999
3000 rcu_read_lock();
3001 memcg = obj_cgroup_memcg(objcg);
3002 lruvec = mem_cgroup_lruvec(memcg, pgdat);
3003 mod_memcg_lruvec_state(lruvec, idx, nr);
3004 rcu_read_unlock();
3005}
3006
3007int memcg_alloc_slab_cgroups(struct slab *slab, struct kmem_cache *s,
3008 gfp_t gfp, bool new_slab)
3009{
3010 unsigned int objects = objs_per_slab(s, slab);
3011 unsigned long memcg_data;
3012 void *vec;
3013
3014 gfp &= ~OBJCGS_CLEAR_MASK;
3015 vec = kcalloc_node(objects, sizeof(struct obj_cgroup *), gfp,
3016 slab_nid(slab));
3017 if (!vec)
3018 return -ENOMEM;
3019
3020 memcg_data = (unsigned long) vec | MEMCG_DATA_OBJCGS;
3021 if (new_slab) {
3022 /*
3023 * If the slab is brand new and nobody can yet access its
3024 * memcg_data, no synchronization is required and memcg_data can
3025 * be simply assigned.
3026 */
3027 slab->memcg_data = memcg_data;
3028 } else if (cmpxchg(&slab->memcg_data, 0, memcg_data)) {
3029 /*
3030 * If the slab is already in use, somebody can allocate and
3031 * assign obj_cgroups in parallel. In this case the existing
3032 * objcg vector should be reused.
3033 */
3034 kfree(vec);
3035 return 0;
3036 }
3037
3038 kmemleak_not_leak(vec);
3039 return 0;
3040}
3041
3042static __always_inline
3043struct mem_cgroup *mem_cgroup_from_obj_folio(struct folio *folio, void *p)
3044{
3045 /*
3046 * Slab objects are accounted individually, not per-page.
3047 * Memcg membership data for each individual object is saved in
3048 * slab->memcg_data.
3049 */
3050 if (folio_test_slab(folio)) {
3051 struct obj_cgroup **objcgs;
3052 struct slab *slab;
3053 unsigned int off;
3054
3055 slab = folio_slab(folio);
3056 objcgs = slab_objcgs(slab);
3057 if (!objcgs)
3058 return NULL;
3059
3060 off = obj_to_index(slab->slab_cache, slab, p);
3061 if (objcgs[off])
3062 return obj_cgroup_memcg(objcgs[off]);
3063
3064 return NULL;
3065 }
3066
3067 /*
3068 * folio_memcg_check() is used here, because in theory we can encounter
3069 * a folio where the slab flag has been cleared already, but
3070 * slab->memcg_data has not been freed yet
3071 * folio_memcg_check() will guarantee that a proper memory
3072 * cgroup pointer or NULL will be returned.
3073 */
3074 return folio_memcg_check(folio);
3075}
3076
3077/*
3078 * Returns a pointer to the memory cgroup to which the kernel object is charged.
3079 *
3080 * A passed kernel object can be a slab object, vmalloc object or a generic
3081 * kernel page, so different mechanisms for getting the memory cgroup pointer
3082 * should be used.
3083 *
3084 * In certain cases (e.g. kernel stacks or large kmallocs with SLUB) the caller
3085 * can not know for sure how the kernel object is implemented.
3086 * mem_cgroup_from_obj() can be safely used in such cases.
3087 *
3088 * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(),
3089 * cgroup_mutex, etc.
3090 */
3091struct mem_cgroup *mem_cgroup_from_obj(void *p)
3092{
3093 struct folio *folio;
3094
3095 if (mem_cgroup_disabled())
3096 return NULL;
3097
3098 if (unlikely(is_vmalloc_addr(p)))
3099 folio = page_folio(vmalloc_to_page(p));
3100 else
3101 folio = virt_to_folio(p);
3102
3103 return mem_cgroup_from_obj_folio(folio, p);
3104}
3105
3106/*
3107 * Returns a pointer to the memory cgroup to which the kernel object is charged.
3108 * Similar to mem_cgroup_from_obj(), but faster and not suitable for objects,
3109 * allocated using vmalloc().
3110 *
3111 * A passed kernel object must be a slab object or a generic kernel page.
3112 *
3113 * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(),
3114 * cgroup_mutex, etc.
3115 */
3116struct mem_cgroup *mem_cgroup_from_slab_obj(void *p)
3117{
3118 if (mem_cgroup_disabled())
3119 return NULL;
3120
3121 return mem_cgroup_from_obj_folio(virt_to_folio(p), p);
3122}
3123
3124static struct obj_cgroup *__get_obj_cgroup_from_memcg(struct mem_cgroup *memcg)
3125{
3126 struct obj_cgroup *objcg = NULL;
3127
3128 for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) {
3129 objcg = rcu_dereference(memcg->objcg);
3130 if (likely(objcg && obj_cgroup_tryget(objcg)))
3131 break;
3132 objcg = NULL;
3133 }
3134 return objcg;
3135}
3136
3137static struct obj_cgroup *current_objcg_update(void)
3138{
3139 struct mem_cgroup *memcg;
3140 struct obj_cgroup *old, *objcg = NULL;
3141
3142 do {
3143 /* Atomically drop the update bit. */
3144 old = xchg(¤t->objcg, NULL);
3145 if (old) {
3146 old = (struct obj_cgroup *)
3147 ((unsigned long)old & ~CURRENT_OBJCG_UPDATE_FLAG);
3148 if (old)
3149 obj_cgroup_put(old);
3150
3151 old = NULL;
3152 }
3153
3154 /* If new objcg is NULL, no reason for the second atomic update. */
3155 if (!current->mm || (current->flags & PF_KTHREAD))
3156 return NULL;
3157
3158 /*
3159 * Release the objcg pointer from the previous iteration,
3160 * if try_cmpxcg() below fails.
3161 */
3162 if (unlikely(objcg)) {
3163 obj_cgroup_put(objcg);
3164 objcg = NULL;
3165 }
3166
3167 /*
3168 * Obtain the new objcg pointer. The current task can be
3169 * asynchronously moved to another memcg and the previous
3170 * memcg can be offlined. So let's get the memcg pointer
3171 * and try get a reference to objcg under a rcu read lock.
3172 */
3173
3174 rcu_read_lock();
3175 memcg = mem_cgroup_from_task(current);
3176 objcg = __get_obj_cgroup_from_memcg(memcg);
3177 rcu_read_unlock();
3178
3179 /*
3180 * Try set up a new objcg pointer atomically. If it
3181 * fails, it means the update flag was set concurrently, so
3182 * the whole procedure should be repeated.
3183 */
3184 } while (!try_cmpxchg(¤t->objcg, &old, objcg));
3185
3186 return objcg;
3187}
3188
3189__always_inline struct obj_cgroup *current_obj_cgroup(void)
3190{
3191 struct mem_cgroup *memcg;
3192 struct obj_cgroup *objcg;
3193
3194 if (in_task()) {
3195 memcg = current->active_memcg;
3196 if (unlikely(memcg))
3197 goto from_memcg;
3198
3199 objcg = READ_ONCE(current->objcg);
3200 if (unlikely((unsigned long)objcg & CURRENT_OBJCG_UPDATE_FLAG))
3201 objcg = current_objcg_update();
3202 /*
3203 * Objcg reference is kept by the task, so it's safe
3204 * to use the objcg by the current task.
3205 */
3206 return objcg;
3207 }
3208
3209 memcg = this_cpu_read(int_active_memcg);
3210 if (unlikely(memcg))
3211 goto from_memcg;
3212
3213 return NULL;
3214
3215from_memcg:
3216 objcg = NULL;
3217 for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) {
3218 /*
3219 * Memcg pointer is protected by scope (see set_active_memcg())
3220 * and is pinning the corresponding objcg, so objcg can't go
3221 * away and can be used within the scope without any additional
3222 * protection.
3223 */
3224 objcg = rcu_dereference_check(memcg->objcg, 1);
3225 if (likely(objcg))
3226 break;
3227 }
3228
3229 return objcg;
3230}
3231
3232struct obj_cgroup *get_obj_cgroup_from_folio(struct folio *folio)
3233{
3234 struct obj_cgroup *objcg;
3235
3236 if (!memcg_kmem_online())
3237 return NULL;
3238
3239 if (folio_memcg_kmem(folio)) {
3240 objcg = __folio_objcg(folio);
3241 obj_cgroup_get(objcg);
3242 } else {
3243 struct mem_cgroup *memcg;
3244
3245 rcu_read_lock();
3246 memcg = __folio_memcg(folio);
3247 if (memcg)
3248 objcg = __get_obj_cgroup_from_memcg(memcg);
3249 else
3250 objcg = NULL;
3251 rcu_read_unlock();
3252 }
3253 return objcg;
3254}
3255
3256static void memcg_account_kmem(struct mem_cgroup *memcg, int nr_pages)
3257{
3258 mod_memcg_state(memcg, MEMCG_KMEM, nr_pages);
3259 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
3260 if (nr_pages > 0)
3261 page_counter_charge(&memcg->kmem, nr_pages);
3262 else
3263 page_counter_uncharge(&memcg->kmem, -nr_pages);
3264 }
3265}
3266
3267
3268/*
3269 * obj_cgroup_uncharge_pages: uncharge a number of kernel pages from a objcg
3270 * @objcg: object cgroup to uncharge
3271 * @nr_pages: number of pages to uncharge
3272 */
3273static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg,
3274 unsigned int nr_pages)
3275{
3276 struct mem_cgroup *memcg;
3277
3278 memcg = get_mem_cgroup_from_objcg(objcg);
3279
3280 memcg_account_kmem(memcg, -nr_pages);
3281 refill_stock(memcg, nr_pages);
3282
3283 css_put(&memcg->css);
3284}
3285
3286/*
3287 * obj_cgroup_charge_pages: charge a number of kernel pages to a objcg
3288 * @objcg: object cgroup to charge
3289 * @gfp: reclaim mode
3290 * @nr_pages: number of pages to charge
3291 *
3292 * Returns 0 on success, an error code on failure.
3293 */
3294static int obj_cgroup_charge_pages(struct obj_cgroup *objcg, gfp_t gfp,
3295 unsigned int nr_pages)
3296{
3297 struct mem_cgroup *memcg;
3298 int ret;
3299
3300 memcg = get_mem_cgroup_from_objcg(objcg);
3301
3302 ret = try_charge_memcg(memcg, gfp, nr_pages);
3303 if (ret)
3304 goto out;
3305
3306 memcg_account_kmem(memcg, nr_pages);
3307out:
3308 css_put(&memcg->css);
3309
3310 return ret;
3311}
3312
3313/**
3314 * __memcg_kmem_charge_page: charge a kmem page to the current memory cgroup
3315 * @page: page to charge
3316 * @gfp: reclaim mode
3317 * @order: allocation order
3318 *
3319 * Returns 0 on success, an error code on failure.
3320 */
3321int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order)
3322{
3323 struct obj_cgroup *objcg;
3324 int ret = 0;
3325
3326 objcg = current_obj_cgroup();
3327 if (objcg) {
3328 ret = obj_cgroup_charge_pages(objcg, gfp, 1 << order);
3329 if (!ret) {
3330 obj_cgroup_get(objcg);
3331 page->memcg_data = (unsigned long)objcg |
3332 MEMCG_DATA_KMEM;
3333 return 0;
3334 }
3335 }
3336 return ret;
3337}
3338
3339/**
3340 * __memcg_kmem_uncharge_page: uncharge a kmem page
3341 * @page: page to uncharge
3342 * @order: allocation order
3343 */
3344void __memcg_kmem_uncharge_page(struct page *page, int order)
3345{
3346 struct folio *folio = page_folio(page);
3347 struct obj_cgroup *objcg;
3348 unsigned int nr_pages = 1 << order;
3349
3350 if (!folio_memcg_kmem(folio))
3351 return;
3352
3353 objcg = __folio_objcg(folio);
3354 obj_cgroup_uncharge_pages(objcg, nr_pages);
3355 folio->memcg_data = 0;
3356 obj_cgroup_put(objcg);
3357}
3358
3359void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
3360 enum node_stat_item idx, int nr)
3361{
3362 struct memcg_stock_pcp *stock;
3363 struct obj_cgroup *old = NULL;
3364 unsigned long flags;
3365 int *bytes;
3366
3367 local_lock_irqsave(&memcg_stock.stock_lock, flags);
3368 stock = this_cpu_ptr(&memcg_stock);
3369
3370 /*
3371 * Save vmstat data in stock and skip vmstat array update unless
3372 * accumulating over a page of vmstat data or when pgdat or idx
3373 * changes.
3374 */
3375 if (READ_ONCE(stock->cached_objcg) != objcg) {
3376 old = drain_obj_stock(stock);
3377 obj_cgroup_get(objcg);
3378 stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
3379 ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
3380 WRITE_ONCE(stock->cached_objcg, objcg);
3381 stock->cached_pgdat = pgdat;
3382 } else if (stock->cached_pgdat != pgdat) {
3383 /* Flush the existing cached vmstat data */
3384 struct pglist_data *oldpg = stock->cached_pgdat;
3385
3386 if (stock->nr_slab_reclaimable_b) {
3387 mod_objcg_mlstate(objcg, oldpg, NR_SLAB_RECLAIMABLE_B,
3388 stock->nr_slab_reclaimable_b);
3389 stock->nr_slab_reclaimable_b = 0;
3390 }
3391 if (stock->nr_slab_unreclaimable_b) {
3392 mod_objcg_mlstate(objcg, oldpg, NR_SLAB_UNRECLAIMABLE_B,
3393 stock->nr_slab_unreclaimable_b);
3394 stock->nr_slab_unreclaimable_b = 0;
3395 }
3396 stock->cached_pgdat = pgdat;
3397 }
3398
3399 bytes = (idx == NR_SLAB_RECLAIMABLE_B) ? &stock->nr_slab_reclaimable_b
3400 : &stock->nr_slab_unreclaimable_b;
3401 /*
3402 * Even for large object >= PAGE_SIZE, the vmstat data will still be
3403 * cached locally at least once before pushing it out.
3404 */
3405 if (!*bytes) {
3406 *bytes = nr;
3407 nr = 0;
3408 } else {
3409 *bytes += nr;
3410 if (abs(*bytes) > PAGE_SIZE) {
3411 nr = *bytes;
3412 *bytes = 0;
3413 } else {
3414 nr = 0;
3415 }
3416 }
3417 if (nr)
3418 mod_objcg_mlstate(objcg, pgdat, idx, nr);
3419
3420 local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
3421 if (old)
3422 obj_cgroup_put(old);
3423}
3424
3425static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
3426{
3427 struct memcg_stock_pcp *stock;
3428 unsigned long flags;
3429 bool ret = false;
3430
3431 local_lock_irqsave(&memcg_stock.stock_lock, flags);
3432
3433 stock = this_cpu_ptr(&memcg_stock);
3434 if (objcg == READ_ONCE(stock->cached_objcg) && stock->nr_bytes >= nr_bytes) {
3435 stock->nr_bytes -= nr_bytes;
3436 ret = true;
3437 }
3438
3439 local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
3440
3441 return ret;
3442}
3443
3444static struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock)
3445{
3446 struct obj_cgroup *old = READ_ONCE(stock->cached_objcg);
3447
3448 if (!old)
3449 return NULL;
3450
3451 if (stock->nr_bytes) {
3452 unsigned int nr_pages = stock->nr_bytes >> PAGE_SHIFT;
3453 unsigned int nr_bytes = stock->nr_bytes & (PAGE_SIZE - 1);
3454
3455 if (nr_pages) {
3456 struct mem_cgroup *memcg;
3457
3458 memcg = get_mem_cgroup_from_objcg(old);
3459
3460 memcg_account_kmem(memcg, -nr_pages);
3461 __refill_stock(memcg, nr_pages);
3462
3463 css_put(&memcg->css);
3464 }
3465
3466 /*
3467 * The leftover is flushed to the centralized per-memcg value.
3468 * On the next attempt to refill obj stock it will be moved
3469 * to a per-cpu stock (probably, on an other CPU), see
3470 * refill_obj_stock().
3471 *
3472 * How often it's flushed is a trade-off between the memory
3473 * limit enforcement accuracy and potential CPU contention,
3474 * so it might be changed in the future.
3475 */
3476 atomic_add(nr_bytes, &old->nr_charged_bytes);
3477 stock->nr_bytes = 0;
3478 }
3479
3480 /*
3481 * Flush the vmstat data in current stock
3482 */
3483 if (stock->nr_slab_reclaimable_b || stock->nr_slab_unreclaimable_b) {
3484 if (stock->nr_slab_reclaimable_b) {
3485 mod_objcg_mlstate(old, stock->cached_pgdat,
3486 NR_SLAB_RECLAIMABLE_B,
3487 stock->nr_slab_reclaimable_b);
3488 stock->nr_slab_reclaimable_b = 0;
3489 }
3490 if (stock->nr_slab_unreclaimable_b) {
3491 mod_objcg_mlstate(old, stock->cached_pgdat,
3492 NR_SLAB_UNRECLAIMABLE_B,
3493 stock->nr_slab_unreclaimable_b);
3494 stock->nr_slab_unreclaimable_b = 0;
3495 }
3496 stock->cached_pgdat = NULL;
3497 }
3498
3499 WRITE_ONCE(stock->cached_objcg, NULL);
3500 /*
3501 * The `old' objects needs to be released by the caller via
3502 * obj_cgroup_put() outside of memcg_stock_pcp::stock_lock.
3503 */
3504 return old;
3505}
3506
3507static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
3508 struct mem_cgroup *root_memcg)
3509{
3510 struct obj_cgroup *objcg = READ_ONCE(stock->cached_objcg);
3511 struct mem_cgroup *memcg;
3512
3513 if (objcg) {
3514 memcg = obj_cgroup_memcg(objcg);
3515 if (memcg && mem_cgroup_is_descendant(memcg, root_memcg))
3516 return true;
3517 }
3518
3519 return false;
3520}
3521
3522static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
3523 bool allow_uncharge)
3524{
3525 struct memcg_stock_pcp *stock;
3526 struct obj_cgroup *old = NULL;
3527 unsigned long flags;
3528 unsigned int nr_pages = 0;
3529
3530 local_lock_irqsave(&memcg_stock.stock_lock, flags);
3531
3532 stock = this_cpu_ptr(&memcg_stock);
3533 if (READ_ONCE(stock->cached_objcg) != objcg) { /* reset if necessary */
3534 old = drain_obj_stock(stock);
3535 obj_cgroup_get(objcg);
3536 WRITE_ONCE(stock->cached_objcg, objcg);
3537 stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
3538 ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
3539 allow_uncharge = true; /* Allow uncharge when objcg changes */
3540 }
3541 stock->nr_bytes += nr_bytes;
3542
3543 if (allow_uncharge && (stock->nr_bytes > PAGE_SIZE)) {
3544 nr_pages = stock->nr_bytes >> PAGE_SHIFT;
3545 stock->nr_bytes &= (PAGE_SIZE - 1);
3546 }
3547
3548 local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
3549 if (old)
3550 obj_cgroup_put(old);
3551
3552 if (nr_pages)
3553 obj_cgroup_uncharge_pages(objcg, nr_pages);
3554}
3555
3556int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size)
3557{
3558 unsigned int nr_pages, nr_bytes;
3559 int ret;
3560
3561 if (consume_obj_stock(objcg, size))
3562 return 0;
3563
3564 /*
3565 * In theory, objcg->nr_charged_bytes can have enough
3566 * pre-charged bytes to satisfy the allocation. However,
3567 * flushing objcg->nr_charged_bytes requires two atomic
3568 * operations, and objcg->nr_charged_bytes can't be big.
3569 * The shared objcg->nr_charged_bytes can also become a
3570 * performance bottleneck if all tasks of the same memcg are
3571 * trying to update it. So it's better to ignore it and try
3572 * grab some new pages. The stock's nr_bytes will be flushed to
3573 * objcg->nr_charged_bytes later on when objcg changes.
3574 *
3575 * The stock's nr_bytes may contain enough pre-charged bytes
3576 * to allow one less page from being charged, but we can't rely
3577 * on the pre-charged bytes not being changed outside of
3578 * consume_obj_stock() or refill_obj_stock(). So ignore those
3579 * pre-charged bytes as well when charging pages. To avoid a
3580 * page uncharge right after a page charge, we set the
3581 * allow_uncharge flag to false when calling refill_obj_stock()
3582 * to temporarily allow the pre-charged bytes to exceed the page
3583 * size limit. The maximum reachable value of the pre-charged
3584 * bytes is (sizeof(object) + PAGE_SIZE - 2) if there is no data
3585 * race.
3586 */
3587 nr_pages = size >> PAGE_SHIFT;
3588 nr_bytes = size & (PAGE_SIZE - 1);
3589
3590 if (nr_bytes)
3591 nr_pages += 1;
3592
3593 ret = obj_cgroup_charge_pages(objcg, gfp, nr_pages);
3594 if (!ret && nr_bytes)
3595 refill_obj_stock(objcg, PAGE_SIZE - nr_bytes, false);
3596
3597 return ret;
3598}
3599
3600void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size)
3601{
3602 refill_obj_stock(objcg, size, true);
3603}
3604
3605#endif /* CONFIG_MEMCG_KMEM */
3606
3607/*
3608 * Because page_memcg(head) is not set on tails, set it now.
3609 */
3610void split_page_memcg(struct page *head, int old_order, int new_order)
3611{
3612 struct folio *folio = page_folio(head);
3613 struct mem_cgroup *memcg = folio_memcg(folio);
3614 int i;
3615 unsigned int old_nr = 1 << old_order;
3616 unsigned int new_nr = 1 << new_order;
3617
3618 if (mem_cgroup_disabled() || !memcg)
3619 return;
3620
3621 for (i = new_nr; i < old_nr; i += new_nr)
3622 folio_page(folio, i)->memcg_data = folio->memcg_data;
3623
3624 if (folio_memcg_kmem(folio))
3625 obj_cgroup_get_many(__folio_objcg(folio), old_nr / new_nr - 1);
3626 else
3627 css_get_many(&memcg->css, old_nr / new_nr - 1);
3628}
3629
3630#ifdef CONFIG_SWAP
3631/**
3632 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
3633 * @entry: swap entry to be moved
3634 * @from: mem_cgroup which the entry is moved from
3635 * @to: mem_cgroup which the entry is moved to
3636 *
3637 * It succeeds only when the swap_cgroup's record for this entry is the same
3638 * as the mem_cgroup's id of @from.
3639 *
3640 * Returns 0 on success, -EINVAL on failure.
3641 *
3642 * The caller must have charged to @to, IOW, called page_counter_charge() about
3643 * both res and memsw, and called css_get().
3644 */
3645static int mem_cgroup_move_swap_account(swp_entry_t entry,
3646 struct mem_cgroup *from, struct mem_cgroup *to)
3647{
3648 unsigned short old_id, new_id;
3649
3650 old_id = mem_cgroup_id(from);
3651 new_id = mem_cgroup_id(to);
3652
3653 if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
3654 mod_memcg_state(from, MEMCG_SWAP, -1);
3655 mod_memcg_state(to, MEMCG_SWAP, 1);
3656 return 0;
3657 }
3658 return -EINVAL;
3659}
3660#else
3661static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
3662 struct mem_cgroup *from, struct mem_cgroup *to)
3663{
3664 return -EINVAL;
3665}
3666#endif
3667
3668static DEFINE_MUTEX(memcg_max_mutex);
3669
3670static int mem_cgroup_resize_max(struct mem_cgroup *memcg,
3671 unsigned long max, bool memsw)
3672{
3673 bool enlarge = false;
3674 bool drained = false;
3675 int ret;
3676 bool limits_invariant;
3677 struct page_counter *counter = memsw ? &memcg->memsw : &memcg->memory;
3678
3679 do {
3680 if (signal_pending(current)) {
3681 ret = -EINTR;
3682 break;
3683 }
3684
3685 mutex_lock(&memcg_max_mutex);
3686 /*
3687 * Make sure that the new limit (memsw or memory limit) doesn't
3688 * break our basic invariant rule memory.max <= memsw.max.
3689 */
3690 limits_invariant = memsw ? max >= READ_ONCE(memcg->memory.max) :
3691 max <= memcg->memsw.max;
3692 if (!limits_invariant) {
3693 mutex_unlock(&memcg_max_mutex);
3694 ret = -EINVAL;
3695 break;
3696 }
3697 if (max > counter->max)
3698 enlarge = true;
3699 ret = page_counter_set_max(counter, max);
3700 mutex_unlock(&memcg_max_mutex);
3701
3702 if (!ret)
3703 break;
3704
3705 if (!drained) {
3706 drain_all_stock(memcg);
3707 drained = true;
3708 continue;
3709 }
3710
3711 if (!try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL,
3712 memsw ? 0 : MEMCG_RECLAIM_MAY_SWAP)) {
3713 ret = -EBUSY;
3714 break;
3715 }
3716 } while (true);
3717
3718 if (!ret && enlarge)
3719 memcg_oom_recover(memcg);
3720
3721 return ret;
3722}
3723
3724unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
3725 gfp_t gfp_mask,
3726 unsigned long *total_scanned)
3727{
3728 unsigned long nr_reclaimed = 0;
3729 struct mem_cgroup_per_node *mz, *next_mz = NULL;
3730 unsigned long reclaimed;
3731 int loop = 0;
3732 struct mem_cgroup_tree_per_node *mctz;
3733 unsigned long excess;
3734
3735 if (lru_gen_enabled())
3736 return 0;
3737
3738 if (order > 0)
3739 return 0;
3740
3741 mctz = soft_limit_tree.rb_tree_per_node[pgdat->node_id];
3742
3743 /*
3744 * Do not even bother to check the largest node if the root
3745 * is empty. Do it lockless to prevent lock bouncing. Races
3746 * are acceptable as soft limit is best effort anyway.
3747 */
3748 if (!mctz || RB_EMPTY_ROOT(&mctz->rb_root))
3749 return 0;
3750
3751 /*
3752 * This loop can run a while, specially if mem_cgroup's continuously
3753 * keep exceeding their soft limit and putting the system under
3754 * pressure
3755 */
3756 do {
3757 if (next_mz)
3758 mz = next_mz;
3759 else
3760 mz = mem_cgroup_largest_soft_limit_node(mctz);
3761 if (!mz)
3762 break;
3763
3764 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat,
3765 gfp_mask, total_scanned);
3766 nr_reclaimed += reclaimed;
3767 spin_lock_irq(&mctz->lock);
3768
3769 /*
3770 * If we failed to reclaim anything from this memory cgroup
3771 * it is time to move on to the next cgroup
3772 */
3773 next_mz = NULL;
3774 if (!reclaimed)
3775 next_mz = __mem_cgroup_largest_soft_limit_node(mctz);
3776
3777 excess = soft_limit_excess(mz->memcg);
3778 /*
3779 * One school of thought says that we should not add
3780 * back the node to the tree if reclaim returns 0.
3781 * But our reclaim could return 0, simply because due
3782 * to priority we are exposing a smaller subset of
3783 * memory to reclaim from. Consider this as a longer
3784 * term TODO.
3785 */
3786 /* If excess == 0, no tree ops */
3787 __mem_cgroup_insert_exceeded(mz, mctz, excess);
3788 spin_unlock_irq(&mctz->lock);
3789 css_put(&mz->memcg->css);
3790 loop++;
3791 /*
3792 * Could not reclaim anything and there are no more
3793 * mem cgroups to try or we seem to be looping without
3794 * reclaiming anything.
3795 */
3796 if (!nr_reclaimed &&
3797 (next_mz == NULL ||
3798 loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
3799 break;
3800 } while (!nr_reclaimed);
3801 if (next_mz)
3802 css_put(&next_mz->memcg->css);
3803 return nr_reclaimed;
3804}
3805
3806/*
3807 * Reclaims as many pages from the given memcg as possible.
3808 *
3809 * Caller is responsible for holding css reference for memcg.
3810 */
3811static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
3812{
3813 int nr_retries = MAX_RECLAIM_RETRIES;
3814
3815 /* we call try-to-free pages for make this cgroup empty */
3816 lru_add_drain_all();
3817
3818 drain_all_stock(memcg);
3819
3820 /* try to free all pages in this cgroup */
3821 while (nr_retries && page_counter_read(&memcg->memory)) {
3822 if (signal_pending(current))
3823 return -EINTR;
3824
3825 if (!try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL,
3826 MEMCG_RECLAIM_MAY_SWAP))
3827 nr_retries--;
3828 }
3829
3830 return 0;
3831}
3832
3833static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of,
3834 char *buf, size_t nbytes,
3835 loff_t off)
3836{
3837 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3838
3839 if (mem_cgroup_is_root(memcg))
3840 return -EINVAL;
3841 return mem_cgroup_force_empty(memcg) ?: nbytes;
3842}
3843
3844static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css,
3845 struct cftype *cft)
3846{
3847 return 1;
3848}
3849
3850static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
3851 struct cftype *cft, u64 val)
3852{
3853 if (val == 1)
3854 return 0;
3855
3856 pr_warn_once("Non-hierarchical mode is deprecated. "
3857 "Please report your usecase to linux-mm@kvack.org if you "
3858 "depend on this functionality.\n");
3859
3860 return -EINVAL;
3861}
3862
3863static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
3864{
3865 unsigned long val;
3866
3867 if (mem_cgroup_is_root(memcg)) {
3868 /*
3869 * Approximate root's usage from global state. This isn't
3870 * perfect, but the root usage was always an approximation.
3871 */
3872 val = global_node_page_state(NR_FILE_PAGES) +
3873 global_node_page_state(NR_ANON_MAPPED);
3874 if (swap)
3875 val += total_swap_pages - get_nr_swap_pages();
3876 } else {
3877 if (!swap)
3878 val = page_counter_read(&memcg->memory);
3879 else
3880 val = page_counter_read(&memcg->memsw);
3881 }
3882 return val;
3883}
3884
3885enum {
3886 RES_USAGE,
3887 RES_LIMIT,
3888 RES_MAX_USAGE,
3889 RES_FAILCNT,
3890 RES_SOFT_LIMIT,
3891};
3892
3893static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
3894 struct cftype *cft)
3895{
3896 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3897 struct page_counter *counter;
3898
3899 switch (MEMFILE_TYPE(cft->private)) {
3900 case _MEM:
3901 counter = &memcg->memory;
3902 break;
3903 case _MEMSWAP:
3904 counter = &memcg->memsw;
3905 break;
3906 case _KMEM:
3907 counter = &memcg->kmem;
3908 break;
3909 case _TCP:
3910 counter = &memcg->tcpmem;
3911 break;
3912 default:
3913 BUG();
3914 }
3915
3916 switch (MEMFILE_ATTR(cft->private)) {
3917 case RES_USAGE:
3918 if (counter == &memcg->memory)
3919 return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE;
3920 if (counter == &memcg->memsw)
3921 return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE;
3922 return (u64)page_counter_read(counter) * PAGE_SIZE;
3923 case RES_LIMIT:
3924 return (u64)counter->max * PAGE_SIZE;
3925 case RES_MAX_USAGE:
3926 return (u64)counter->watermark * PAGE_SIZE;
3927 case RES_FAILCNT:
3928 return counter->failcnt;
3929 case RES_SOFT_LIMIT:
3930 return (u64)READ_ONCE(memcg->soft_limit) * PAGE_SIZE;
3931 default:
3932 BUG();
3933 }
3934}
3935
3936/*
3937 * This function doesn't do anything useful. Its only job is to provide a read
3938 * handler for a file so that cgroup_file_mode() will add read permissions.
3939 */
3940static int mem_cgroup_dummy_seq_show(__always_unused struct seq_file *m,
3941 __always_unused void *v)
3942{
3943 return -EINVAL;
3944}
3945
3946#ifdef CONFIG_MEMCG_KMEM
3947static int memcg_online_kmem(struct mem_cgroup *memcg)
3948{
3949 struct obj_cgroup *objcg;
3950
3951 if (mem_cgroup_kmem_disabled())
3952 return 0;
3953
3954 if (unlikely(mem_cgroup_is_root(memcg)))
3955 return 0;
3956
3957 objcg = obj_cgroup_alloc();
3958 if (!objcg)
3959 return -ENOMEM;
3960
3961 objcg->memcg = memcg;
3962 rcu_assign_pointer(memcg->objcg, objcg);
3963 obj_cgroup_get(objcg);
3964 memcg->orig_objcg = objcg;
3965
3966 static_branch_enable(&memcg_kmem_online_key);
3967
3968 memcg->kmemcg_id = memcg->id.id;
3969
3970 return 0;
3971}
3972
3973static void memcg_offline_kmem(struct mem_cgroup *memcg)
3974{
3975 struct mem_cgroup *parent;
3976
3977 if (mem_cgroup_kmem_disabled())
3978 return;
3979
3980 if (unlikely(mem_cgroup_is_root(memcg)))
3981 return;
3982
3983 parent = parent_mem_cgroup(memcg);
3984 if (!parent)
3985 parent = root_mem_cgroup;
3986
3987 memcg_reparent_objcgs(memcg, parent);
3988
3989 /*
3990 * After we have finished memcg_reparent_objcgs(), all list_lrus
3991 * corresponding to this cgroup are guaranteed to remain empty.
3992 * The ordering is imposed by list_lru_node->lock taken by
3993 * memcg_reparent_list_lrus().
3994 */
3995 memcg_reparent_list_lrus(memcg, parent);
3996}
3997#else
3998static int memcg_online_kmem(struct mem_cgroup *memcg)
3999{
4000 return 0;
4001}
4002static void memcg_offline_kmem(struct mem_cgroup *memcg)
4003{
4004}
4005#endif /* CONFIG_MEMCG_KMEM */
4006
4007static int memcg_update_tcp_max(struct mem_cgroup *memcg, unsigned long max)
4008{
4009 int ret;
4010
4011 mutex_lock(&memcg_max_mutex);
4012
4013 ret = page_counter_set_max(&memcg->tcpmem, max);
4014 if (ret)
4015 goto out;
4016
4017 if (!memcg->tcpmem_active) {
4018 /*
4019 * The active flag needs to be written after the static_key
4020 * update. This is what guarantees that the socket activation
4021 * function is the last one to run. See mem_cgroup_sk_alloc()
4022 * for details, and note that we don't mark any socket as
4023 * belonging to this memcg until that flag is up.
4024 *
4025 * We need to do this, because static_keys will span multiple
4026 * sites, but we can't control their order. If we mark a socket
4027 * as accounted, but the accounting functions are not patched in
4028 * yet, we'll lose accounting.
4029 *
4030 * We never race with the readers in mem_cgroup_sk_alloc(),
4031 * because when this value change, the code to process it is not
4032 * patched in yet.
4033 */
4034 static_branch_inc(&memcg_sockets_enabled_key);
4035 memcg->tcpmem_active = true;
4036 }
4037out:
4038 mutex_unlock(&memcg_max_mutex);
4039 return ret;
4040}
4041
4042/*
4043 * The user of this function is...
4044 * RES_LIMIT.
4045 */
4046static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
4047 char *buf, size_t nbytes, loff_t off)
4048{
4049 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4050 unsigned long nr_pages;
4051 int ret;
4052
4053 buf = strstrip(buf);
4054 ret = page_counter_memparse(buf, "-1", &nr_pages);
4055 if (ret)
4056 return ret;
4057
4058 switch (MEMFILE_ATTR(of_cft(of)->private)) {
4059 case RES_LIMIT:
4060 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
4061 ret = -EINVAL;
4062 break;
4063 }
4064 switch (MEMFILE_TYPE(of_cft(of)->private)) {
4065 case _MEM:
4066 ret = mem_cgroup_resize_max(memcg, nr_pages, false);
4067 break;
4068 case _MEMSWAP:
4069 ret = mem_cgroup_resize_max(memcg, nr_pages, true);
4070 break;
4071 case _KMEM:
4072 pr_warn_once("kmem.limit_in_bytes is deprecated and will be removed. "
4073 "Writing any value to this file has no effect. "
4074 "Please report your usecase to linux-mm@kvack.org if you "
4075 "depend on this functionality.\n");
4076 ret = 0;
4077 break;
4078 case _TCP:
4079 ret = memcg_update_tcp_max(memcg, nr_pages);
4080 break;
4081 }
4082 break;
4083 case RES_SOFT_LIMIT:
4084 if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
4085 ret = -EOPNOTSUPP;
4086 } else {
4087 WRITE_ONCE(memcg->soft_limit, nr_pages);
4088 ret = 0;
4089 }
4090 break;
4091 }
4092 return ret ?: nbytes;
4093}
4094
4095static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf,
4096 size_t nbytes, loff_t off)
4097{
4098 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4099 struct page_counter *counter;
4100
4101 switch (MEMFILE_TYPE(of_cft(of)->private)) {
4102 case _MEM:
4103 counter = &memcg->memory;
4104 break;
4105 case _MEMSWAP:
4106 counter = &memcg->memsw;
4107 break;
4108 case _KMEM:
4109 counter = &memcg->kmem;
4110 break;
4111 case _TCP:
4112 counter = &memcg->tcpmem;
4113 break;
4114 default:
4115 BUG();
4116 }
4117
4118 switch (MEMFILE_ATTR(of_cft(of)->private)) {
4119 case RES_MAX_USAGE:
4120 page_counter_reset_watermark(counter);
4121 break;
4122 case RES_FAILCNT:
4123 counter->failcnt = 0;
4124 break;
4125 default:
4126 BUG();
4127 }
4128
4129 return nbytes;
4130}
4131
4132static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css,
4133 struct cftype *cft)
4134{
4135 return mem_cgroup_from_css(css)->move_charge_at_immigrate;
4136}
4137
4138#ifdef CONFIG_MMU
4139static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
4140 struct cftype *cft, u64 val)
4141{
4142 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4143
4144 pr_warn_once("Cgroup memory moving (move_charge_at_immigrate) is deprecated. "
4145 "Please report your usecase to linux-mm@kvack.org if you "
4146 "depend on this functionality.\n");
4147
4148 if (val & ~MOVE_MASK)
4149 return -EINVAL;
4150
4151 /*
4152 * No kind of locking is needed in here, because ->can_attach() will
4153 * check this value once in the beginning of the process, and then carry
4154 * on with stale data. This means that changes to this value will only
4155 * affect task migrations starting after the change.
4156 */
4157 memcg->move_charge_at_immigrate = val;
4158 return 0;
4159}
4160#else
4161static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
4162 struct cftype *cft, u64 val)
4163{
4164 return -ENOSYS;
4165}
4166#endif
4167
4168#ifdef CONFIG_NUMA
4169
4170#define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE))
4171#define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
4172#define LRU_ALL ((1 << NR_LRU_LISTS) - 1)
4173
4174static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
4175 int nid, unsigned int lru_mask, bool tree)
4176{
4177 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
4178 unsigned long nr = 0;
4179 enum lru_list lru;
4180
4181 VM_BUG_ON((unsigned)nid >= nr_node_ids);
4182
4183 for_each_lru(lru) {
4184 if (!(BIT(lru) & lru_mask))
4185 continue;
4186 if (tree)
4187 nr += lruvec_page_state(lruvec, NR_LRU_BASE + lru);
4188 else
4189 nr += lruvec_page_state_local(lruvec, NR_LRU_BASE + lru);
4190 }
4191 return nr;
4192}
4193
4194static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
4195 unsigned int lru_mask,
4196 bool tree)
4197{
4198 unsigned long nr = 0;
4199 enum lru_list lru;
4200
4201 for_each_lru(lru) {
4202 if (!(BIT(lru) & lru_mask))
4203 continue;
4204 if (tree)
4205 nr += memcg_page_state(memcg, NR_LRU_BASE + lru);
4206 else
4207 nr += memcg_page_state_local(memcg, NR_LRU_BASE + lru);
4208 }
4209 return nr;
4210}
4211
4212static int memcg_numa_stat_show(struct seq_file *m, void *v)
4213{
4214 struct numa_stat {
4215 const char *name;
4216 unsigned int lru_mask;
4217 };
4218
4219 static const struct numa_stat stats[] = {
4220 { "total", LRU_ALL },
4221 { "file", LRU_ALL_FILE },
4222 { "anon", LRU_ALL_ANON },
4223 { "unevictable", BIT(LRU_UNEVICTABLE) },
4224 };
4225 const struct numa_stat *stat;
4226 int nid;
4227 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4228
4229 mem_cgroup_flush_stats(memcg);
4230
4231 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
4232 seq_printf(m, "%s=%lu", stat->name,
4233 mem_cgroup_nr_lru_pages(memcg, stat->lru_mask,
4234 false));
4235 for_each_node_state(nid, N_MEMORY)
4236 seq_printf(m, " N%d=%lu", nid,
4237 mem_cgroup_node_nr_lru_pages(memcg, nid,
4238 stat->lru_mask, false));
4239 seq_putc(m, '\n');
4240 }
4241
4242 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
4243
4244 seq_printf(m, "hierarchical_%s=%lu", stat->name,
4245 mem_cgroup_nr_lru_pages(memcg, stat->lru_mask,
4246 true));
4247 for_each_node_state(nid, N_MEMORY)
4248 seq_printf(m, " N%d=%lu", nid,
4249 mem_cgroup_node_nr_lru_pages(memcg, nid,
4250 stat->lru_mask, true));
4251 seq_putc(m, '\n');
4252 }
4253
4254 return 0;
4255}
4256#endif /* CONFIG_NUMA */
4257
4258static const unsigned int memcg1_stats[] = {
4259 NR_FILE_PAGES,
4260 NR_ANON_MAPPED,
4261#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4262 NR_ANON_THPS,
4263#endif
4264 NR_SHMEM,
4265 NR_FILE_MAPPED,
4266 NR_FILE_DIRTY,
4267 NR_WRITEBACK,
4268 WORKINGSET_REFAULT_ANON,
4269 WORKINGSET_REFAULT_FILE,
4270#ifdef CONFIG_SWAP
4271 MEMCG_SWAP,
4272 NR_SWAPCACHE,
4273#endif
4274};
4275
4276static const char *const memcg1_stat_names[] = {
4277 "cache",
4278 "rss",
4279#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4280 "rss_huge",
4281#endif
4282 "shmem",
4283 "mapped_file",
4284 "dirty",
4285 "writeback",
4286 "workingset_refault_anon",
4287 "workingset_refault_file",
4288#ifdef CONFIG_SWAP
4289 "swap",
4290 "swapcached",
4291#endif
4292};
4293
4294/* Universal VM events cgroup1 shows, original sort order */
4295static const unsigned int memcg1_events[] = {
4296 PGPGIN,
4297 PGPGOUT,
4298 PGFAULT,
4299 PGMAJFAULT,
4300};
4301
4302static void memcg1_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
4303{
4304 unsigned long memory, memsw;
4305 struct mem_cgroup *mi;
4306 unsigned int i;
4307
4308 BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names) != ARRAY_SIZE(memcg1_stats));
4309
4310 mem_cgroup_flush_stats(memcg);
4311
4312 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
4313 unsigned long nr;
4314
4315 nr = memcg_page_state_local_output(memcg, memcg1_stats[i]);
4316 seq_buf_printf(s, "%s %lu\n", memcg1_stat_names[i], nr);
4317 }
4318
4319 for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
4320 seq_buf_printf(s, "%s %lu\n", vm_event_name(memcg1_events[i]),
4321 memcg_events_local(memcg, memcg1_events[i]));
4322
4323 for (i = 0; i < NR_LRU_LISTS; i++)
4324 seq_buf_printf(s, "%s %lu\n", lru_list_name(i),
4325 memcg_page_state_local(memcg, NR_LRU_BASE + i) *
4326 PAGE_SIZE);
4327
4328 /* Hierarchical information */
4329 memory = memsw = PAGE_COUNTER_MAX;
4330 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) {
4331 memory = min(memory, READ_ONCE(mi->memory.max));
4332 memsw = min(memsw, READ_ONCE(mi->memsw.max));
4333 }
4334 seq_buf_printf(s, "hierarchical_memory_limit %llu\n",
4335 (u64)memory * PAGE_SIZE);
4336 seq_buf_printf(s, "hierarchical_memsw_limit %llu\n",
4337 (u64)memsw * PAGE_SIZE);
4338
4339 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
4340 unsigned long nr;
4341
4342 nr = memcg_page_state_output(memcg, memcg1_stats[i]);
4343 seq_buf_printf(s, "total_%s %llu\n", memcg1_stat_names[i],
4344 (u64)nr);
4345 }
4346
4347 for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
4348 seq_buf_printf(s, "total_%s %llu\n",
4349 vm_event_name(memcg1_events[i]),
4350 (u64)memcg_events(memcg, memcg1_events[i]));
4351
4352 for (i = 0; i < NR_LRU_LISTS; i++)
4353 seq_buf_printf(s, "total_%s %llu\n", lru_list_name(i),
4354 (u64)memcg_page_state(memcg, NR_LRU_BASE + i) *
4355 PAGE_SIZE);
4356
4357#ifdef CONFIG_DEBUG_VM
4358 {
4359 pg_data_t *pgdat;
4360 struct mem_cgroup_per_node *mz;
4361 unsigned long anon_cost = 0;
4362 unsigned long file_cost = 0;
4363
4364 for_each_online_pgdat(pgdat) {
4365 mz = memcg->nodeinfo[pgdat->node_id];
4366
4367 anon_cost += mz->lruvec.anon_cost;
4368 file_cost += mz->lruvec.file_cost;
4369 }
4370 seq_buf_printf(s, "anon_cost %lu\n", anon_cost);
4371 seq_buf_printf(s, "file_cost %lu\n", file_cost);
4372 }
4373#endif
4374}
4375
4376static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css,
4377 struct cftype *cft)
4378{
4379 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4380
4381 return mem_cgroup_swappiness(memcg);
4382}
4383
4384static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
4385 struct cftype *cft, u64 val)
4386{
4387 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4388
4389 if (val > 200)
4390 return -EINVAL;
4391
4392 if (!mem_cgroup_is_root(memcg))
4393 WRITE_ONCE(memcg->swappiness, val);
4394 else
4395 WRITE_ONCE(vm_swappiness, val);
4396
4397 return 0;
4398}
4399
4400static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
4401{
4402 struct mem_cgroup_threshold_ary *t;
4403 unsigned long usage;
4404 int i;
4405
4406 rcu_read_lock();
4407 if (!swap)
4408 t = rcu_dereference(memcg->thresholds.primary);
4409 else
4410 t = rcu_dereference(memcg->memsw_thresholds.primary);
4411
4412 if (!t)
4413 goto unlock;
4414
4415 usage = mem_cgroup_usage(memcg, swap);
4416
4417 /*
4418 * current_threshold points to threshold just below or equal to usage.
4419 * If it's not true, a threshold was crossed after last
4420 * call of __mem_cgroup_threshold().
4421 */
4422 i = t->current_threshold;
4423
4424 /*
4425 * Iterate backward over array of thresholds starting from
4426 * current_threshold and check if a threshold is crossed.
4427 * If none of thresholds below usage is crossed, we read
4428 * only one element of the array here.
4429 */
4430 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
4431 eventfd_signal(t->entries[i].eventfd);
4432
4433 /* i = current_threshold + 1 */
4434 i++;
4435
4436 /*
4437 * Iterate forward over array of thresholds starting from
4438 * current_threshold+1 and check if a threshold is crossed.
4439 * If none of thresholds above usage is crossed, we read
4440 * only one element of the array here.
4441 */
4442 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
4443 eventfd_signal(t->entries[i].eventfd);
4444
4445 /* Update current_threshold */
4446 t->current_threshold = i - 1;
4447unlock:
4448 rcu_read_unlock();
4449}
4450
4451static void mem_cgroup_threshold(struct mem_cgroup *memcg)
4452{
4453 while (memcg) {
4454 __mem_cgroup_threshold(memcg, false);
4455 if (do_memsw_account())
4456 __mem_cgroup_threshold(memcg, true);
4457
4458 memcg = parent_mem_cgroup(memcg);
4459 }
4460}
4461
4462static int compare_thresholds(const void *a, const void *b)
4463{
4464 const struct mem_cgroup_threshold *_a = a;
4465 const struct mem_cgroup_threshold *_b = b;
4466
4467 if (_a->threshold > _b->threshold)
4468 return 1;
4469
4470 if (_a->threshold < _b->threshold)
4471 return -1;
4472
4473 return 0;
4474}
4475
4476static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
4477{
4478 struct mem_cgroup_eventfd_list *ev;
4479
4480 spin_lock(&memcg_oom_lock);
4481
4482 list_for_each_entry(ev, &memcg->oom_notify, list)
4483 eventfd_signal(ev->eventfd);
4484
4485 spin_unlock(&memcg_oom_lock);
4486 return 0;
4487}
4488
4489static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
4490{
4491 struct mem_cgroup *iter;
4492
4493 for_each_mem_cgroup_tree(iter, memcg)
4494 mem_cgroup_oom_notify_cb(iter);
4495}
4496
4497static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
4498 struct eventfd_ctx *eventfd, const char *args, enum res_type type)
4499{
4500 struct mem_cgroup_thresholds *thresholds;
4501 struct mem_cgroup_threshold_ary *new;
4502 unsigned long threshold;
4503 unsigned long usage;
4504 int i, size, ret;
4505
4506 ret = page_counter_memparse(args, "-1", &threshold);
4507 if (ret)
4508 return ret;
4509
4510 mutex_lock(&memcg->thresholds_lock);
4511
4512 if (type == _MEM) {
4513 thresholds = &memcg->thresholds;
4514 usage = mem_cgroup_usage(memcg, false);
4515 } else if (type == _MEMSWAP) {
4516 thresholds = &memcg->memsw_thresholds;
4517 usage = mem_cgroup_usage(memcg, true);
4518 } else
4519 BUG();
4520
4521 /* Check if a threshold crossed before adding a new one */
4522 if (thresholds->primary)
4523 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
4524
4525 size = thresholds->primary ? thresholds->primary->size + 1 : 1;
4526
4527 /* Allocate memory for new array of thresholds */
4528 new = kmalloc(struct_size(new, entries, size), GFP_KERNEL);
4529 if (!new) {
4530 ret = -ENOMEM;
4531 goto unlock;
4532 }
4533 new->size = size;
4534
4535 /* Copy thresholds (if any) to new array */
4536 if (thresholds->primary)
4537 memcpy(new->entries, thresholds->primary->entries,
4538 flex_array_size(new, entries, size - 1));
4539
4540 /* Add new threshold */
4541 new->entries[size - 1].eventfd = eventfd;
4542 new->entries[size - 1].threshold = threshold;
4543
4544 /* Sort thresholds. Registering of new threshold isn't time-critical */
4545 sort(new->entries, size, sizeof(*new->entries),
4546 compare_thresholds, NULL);
4547
4548 /* Find current threshold */
4549 new->current_threshold = -1;
4550 for (i = 0; i < size; i++) {
4551 if (new->entries[i].threshold <= usage) {
4552 /*
4553 * new->current_threshold will not be used until
4554 * rcu_assign_pointer(), so it's safe to increment
4555 * it here.
4556 */
4557 ++new->current_threshold;
4558 } else
4559 break;
4560 }
4561
4562 /* Free old spare buffer and save old primary buffer as spare */
4563 kfree(thresholds->spare);
4564 thresholds->spare = thresholds->primary;
4565
4566 rcu_assign_pointer(thresholds->primary, new);
4567
4568 /* To be sure that nobody uses thresholds */
4569 synchronize_rcu();
4570
4571unlock:
4572 mutex_unlock(&memcg->thresholds_lock);
4573
4574 return ret;
4575}
4576
4577static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
4578 struct eventfd_ctx *eventfd, const char *args)
4579{
4580 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM);
4581}
4582
4583static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg,
4584 struct eventfd_ctx *eventfd, const char *args)
4585{
4586 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP);
4587}
4588
4589static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4590 struct eventfd_ctx *eventfd, enum res_type type)
4591{
4592 struct mem_cgroup_thresholds *thresholds;
4593 struct mem_cgroup_threshold_ary *new;
4594 unsigned long usage;
4595 int i, j, size, entries;
4596
4597 mutex_lock(&memcg->thresholds_lock);
4598
4599 if (type == _MEM) {
4600 thresholds = &memcg->thresholds;
4601 usage = mem_cgroup_usage(memcg, false);
4602 } else if (type == _MEMSWAP) {
4603 thresholds = &memcg->memsw_thresholds;
4604 usage = mem_cgroup_usage(memcg, true);
4605 } else
4606 BUG();
4607
4608 if (!thresholds->primary)
4609 goto unlock;
4610
4611 /* Check if a threshold crossed before removing */
4612 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
4613
4614 /* Calculate new number of threshold */
4615 size = entries = 0;
4616 for (i = 0; i < thresholds->primary->size; i++) {
4617 if (thresholds->primary->entries[i].eventfd != eventfd)
4618 size++;
4619 else
4620 entries++;
4621 }
4622
4623 new = thresholds->spare;
4624
4625 /* If no items related to eventfd have been cleared, nothing to do */
4626 if (!entries)
4627 goto unlock;
4628
4629 /* Set thresholds array to NULL if we don't have thresholds */
4630 if (!size) {
4631 kfree(new);
4632 new = NULL;
4633 goto swap_buffers;
4634 }
4635
4636 new->size = size;
4637
4638 /* Copy thresholds and find current threshold */
4639 new->current_threshold = -1;
4640 for (i = 0, j = 0; i < thresholds->primary->size; i++) {
4641 if (thresholds->primary->entries[i].eventfd == eventfd)
4642 continue;
4643
4644 new->entries[j] = thresholds->primary->entries[i];
4645 if (new->entries[j].threshold <= usage) {
4646 /*
4647 * new->current_threshold will not be used
4648 * until rcu_assign_pointer(), so it's safe to increment
4649 * it here.
4650 */
4651 ++new->current_threshold;
4652 }
4653 j++;
4654 }
4655
4656swap_buffers:
4657 /* Swap primary and spare array */
4658 thresholds->spare = thresholds->primary;
4659
4660 rcu_assign_pointer(thresholds->primary, new);
4661
4662 /* To be sure that nobody uses thresholds */
4663 synchronize_rcu();
4664
4665 /* If all events are unregistered, free the spare array */
4666 if (!new) {
4667 kfree(thresholds->spare);
4668 thresholds->spare = NULL;
4669 }
4670unlock:
4671 mutex_unlock(&memcg->thresholds_lock);
4672}
4673
4674static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4675 struct eventfd_ctx *eventfd)
4676{
4677 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM);
4678}
4679
4680static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4681 struct eventfd_ctx *eventfd)
4682{
4683 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP);
4684}
4685
4686static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg,
4687 struct eventfd_ctx *eventfd, const char *args)
4688{
4689 struct mem_cgroup_eventfd_list *event;
4690
4691 event = kmalloc(sizeof(*event), GFP_KERNEL);
4692 if (!event)
4693 return -ENOMEM;
4694
4695 spin_lock(&memcg_oom_lock);
4696
4697 event->eventfd = eventfd;
4698 list_add(&event->list, &memcg->oom_notify);
4699
4700 /* already in OOM ? */
4701 if (memcg->under_oom)
4702 eventfd_signal(eventfd);
4703 spin_unlock(&memcg_oom_lock);
4704
4705 return 0;
4706}
4707
4708static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg,
4709 struct eventfd_ctx *eventfd)
4710{
4711 struct mem_cgroup_eventfd_list *ev, *tmp;
4712
4713 spin_lock(&memcg_oom_lock);
4714
4715 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
4716 if (ev->eventfd == eventfd) {
4717 list_del(&ev->list);
4718 kfree(ev);
4719 }
4720 }
4721
4722 spin_unlock(&memcg_oom_lock);
4723}
4724
4725static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v)
4726{
4727 struct mem_cgroup *memcg = mem_cgroup_from_seq(sf);
4728
4729 seq_printf(sf, "oom_kill_disable %d\n", READ_ONCE(memcg->oom_kill_disable));
4730 seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom);
4731 seq_printf(sf, "oom_kill %lu\n",
4732 atomic_long_read(&memcg->memory_events[MEMCG_OOM_KILL]));
4733 return 0;
4734}
4735
4736static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
4737 struct cftype *cft, u64 val)
4738{
4739 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4740
4741 /* cannot set to root cgroup and only 0 and 1 are allowed */
4742 if (mem_cgroup_is_root(memcg) || !((val == 0) || (val == 1)))
4743 return -EINVAL;
4744
4745 WRITE_ONCE(memcg->oom_kill_disable, val);
4746 if (!val)
4747 memcg_oom_recover(memcg);
4748
4749 return 0;
4750}
4751
4752#ifdef CONFIG_CGROUP_WRITEBACK
4753
4754#include <trace/events/writeback.h>
4755
4756static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
4757{
4758 return wb_domain_init(&memcg->cgwb_domain, gfp);
4759}
4760
4761static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
4762{
4763 wb_domain_exit(&memcg->cgwb_domain);
4764}
4765
4766static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
4767{
4768 wb_domain_size_changed(&memcg->cgwb_domain);
4769}
4770
4771struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
4772{
4773 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4774
4775 if (!memcg->css.parent)
4776 return NULL;
4777
4778 return &memcg->cgwb_domain;
4779}
4780
4781/**
4782 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
4783 * @wb: bdi_writeback in question
4784 * @pfilepages: out parameter for number of file pages
4785 * @pheadroom: out parameter for number of allocatable pages according to memcg
4786 * @pdirty: out parameter for number of dirty pages
4787 * @pwriteback: out parameter for number of pages under writeback
4788 *
4789 * Determine the numbers of file, headroom, dirty, and writeback pages in
4790 * @wb's memcg. File, dirty and writeback are self-explanatory. Headroom
4791 * is a bit more involved.
4792 *
4793 * A memcg's headroom is "min(max, high) - used". In the hierarchy, the
4794 * headroom is calculated as the lowest headroom of itself and the
4795 * ancestors. Note that this doesn't consider the actual amount of
4796 * available memory in the system. The caller should further cap
4797 * *@pheadroom accordingly.
4798 */
4799void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
4800 unsigned long *pheadroom, unsigned long *pdirty,
4801 unsigned long *pwriteback)
4802{
4803 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4804 struct mem_cgroup *parent;
4805
4806 mem_cgroup_flush_stats_ratelimited(memcg);
4807
4808 *pdirty = memcg_page_state(memcg, NR_FILE_DIRTY);
4809 *pwriteback = memcg_page_state(memcg, NR_WRITEBACK);
4810 *pfilepages = memcg_page_state(memcg, NR_INACTIVE_FILE) +
4811 memcg_page_state(memcg, NR_ACTIVE_FILE);
4812
4813 *pheadroom = PAGE_COUNTER_MAX;
4814 while ((parent = parent_mem_cgroup(memcg))) {
4815 unsigned long ceiling = min(READ_ONCE(memcg->memory.max),
4816 READ_ONCE(memcg->memory.high));
4817 unsigned long used = page_counter_read(&memcg->memory);
4818
4819 *pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
4820 memcg = parent;
4821 }
4822}
4823
4824/*
4825 * Foreign dirty flushing
4826 *
4827 * There's an inherent mismatch between memcg and writeback. The former
4828 * tracks ownership per-page while the latter per-inode. This was a
4829 * deliberate design decision because honoring per-page ownership in the
4830 * writeback path is complicated, may lead to higher CPU and IO overheads
4831 * and deemed unnecessary given that write-sharing an inode across
4832 * different cgroups isn't a common use-case.
4833 *
4834 * Combined with inode majority-writer ownership switching, this works well
4835 * enough in most cases but there are some pathological cases. For
4836 * example, let's say there are two cgroups A and B which keep writing to
4837 * different but confined parts of the same inode. B owns the inode and
4838 * A's memory is limited far below B's. A's dirty ratio can rise enough to
4839 * trigger balance_dirty_pages() sleeps but B's can be low enough to avoid
4840 * triggering background writeback. A will be slowed down without a way to
4841 * make writeback of the dirty pages happen.
4842 *
4843 * Conditions like the above can lead to a cgroup getting repeatedly and
4844 * severely throttled after making some progress after each
4845 * dirty_expire_interval while the underlying IO device is almost
4846 * completely idle.
4847 *
4848 * Solving this problem completely requires matching the ownership tracking
4849 * granularities between memcg and writeback in either direction. However,
4850 * the more egregious behaviors can be avoided by simply remembering the
4851 * most recent foreign dirtying events and initiating remote flushes on
4852 * them when local writeback isn't enough to keep the memory clean enough.
4853 *
4854 * The following two functions implement such mechanism. When a foreign
4855 * page - a page whose memcg and writeback ownerships don't match - is
4856 * dirtied, mem_cgroup_track_foreign_dirty() records the inode owning
4857 * bdi_writeback on the page owning memcg. When balance_dirty_pages()
4858 * decides that the memcg needs to sleep due to high dirty ratio, it calls
4859 * mem_cgroup_flush_foreign() which queues writeback on the recorded
4860 * foreign bdi_writebacks which haven't expired. Both the numbers of
4861 * recorded bdi_writebacks and concurrent in-flight foreign writebacks are
4862 * limited to MEMCG_CGWB_FRN_CNT.
4863 *
4864 * The mechanism only remembers IDs and doesn't hold any object references.
4865 * As being wrong occasionally doesn't matter, updates and accesses to the
4866 * records are lockless and racy.
4867 */
4868void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio,
4869 struct bdi_writeback *wb)
4870{
4871 struct mem_cgroup *memcg = folio_memcg(folio);
4872 struct memcg_cgwb_frn *frn;
4873 u64 now = get_jiffies_64();
4874 u64 oldest_at = now;
4875 int oldest = -1;
4876 int i;
4877
4878 trace_track_foreign_dirty(folio, wb);
4879
4880 /*
4881 * Pick the slot to use. If there is already a slot for @wb, keep
4882 * using it. If not replace the oldest one which isn't being
4883 * written out.
4884 */
4885 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
4886 frn = &memcg->cgwb_frn[i];
4887 if (frn->bdi_id == wb->bdi->id &&
4888 frn->memcg_id == wb->memcg_css->id)
4889 break;
4890 if (time_before64(frn->at, oldest_at) &&
4891 atomic_read(&frn->done.cnt) == 1) {
4892 oldest = i;
4893 oldest_at = frn->at;
4894 }
4895 }
4896
4897 if (i < MEMCG_CGWB_FRN_CNT) {
4898 /*
4899 * Re-using an existing one. Update timestamp lazily to
4900 * avoid making the cacheline hot. We want them to be
4901 * reasonably up-to-date and significantly shorter than
4902 * dirty_expire_interval as that's what expires the record.
4903 * Use the shorter of 1s and dirty_expire_interval / 8.
4904 */
4905 unsigned long update_intv =
4906 min_t(unsigned long, HZ,
4907 msecs_to_jiffies(dirty_expire_interval * 10) / 8);
4908
4909 if (time_before64(frn->at, now - update_intv))
4910 frn->at = now;
4911 } else if (oldest >= 0) {
4912 /* replace the oldest free one */
4913 frn = &memcg->cgwb_frn[oldest];
4914 frn->bdi_id = wb->bdi->id;
4915 frn->memcg_id = wb->memcg_css->id;
4916 frn->at = now;
4917 }
4918}
4919
4920/* issue foreign writeback flushes for recorded foreign dirtying events */
4921void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
4922{
4923 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4924 unsigned long intv = msecs_to_jiffies(dirty_expire_interval * 10);
4925 u64 now = jiffies_64;
4926 int i;
4927
4928 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
4929 struct memcg_cgwb_frn *frn = &memcg->cgwb_frn[i];
4930
4931 /*
4932 * If the record is older than dirty_expire_interval,
4933 * writeback on it has already started. No need to kick it
4934 * off again. Also, don't start a new one if there's
4935 * already one in flight.
4936 */
4937 if (time_after64(frn->at, now - intv) &&
4938 atomic_read(&frn->done.cnt) == 1) {
4939 frn->at = 0;
4940 trace_flush_foreign(wb, frn->bdi_id, frn->memcg_id);
4941 cgroup_writeback_by_id(frn->bdi_id, frn->memcg_id,
4942 WB_REASON_FOREIGN_FLUSH,
4943 &frn->done);
4944 }
4945 }
4946}
4947
4948#else /* CONFIG_CGROUP_WRITEBACK */
4949
4950static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
4951{
4952 return 0;
4953}
4954
4955static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
4956{
4957}
4958
4959static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
4960{
4961}
4962
4963#endif /* CONFIG_CGROUP_WRITEBACK */
4964
4965/*
4966 * DO NOT USE IN NEW FILES.
4967 *
4968 * "cgroup.event_control" implementation.
4969 *
4970 * This is way over-engineered. It tries to support fully configurable
4971 * events for each user. Such level of flexibility is completely
4972 * unnecessary especially in the light of the planned unified hierarchy.
4973 *
4974 * Please deprecate this and replace with something simpler if at all
4975 * possible.
4976 */
4977
4978/*
4979 * Unregister event and free resources.
4980 *
4981 * Gets called from workqueue.
4982 */
4983static void memcg_event_remove(struct work_struct *work)
4984{
4985 struct mem_cgroup_event *event =
4986 container_of(work, struct mem_cgroup_event, remove);
4987 struct mem_cgroup *memcg = event->memcg;
4988
4989 remove_wait_queue(event->wqh, &event->wait);
4990
4991 event->unregister_event(memcg, event->eventfd);
4992
4993 /* Notify userspace the event is going away. */
4994 eventfd_signal(event->eventfd);
4995
4996 eventfd_ctx_put(event->eventfd);
4997 kfree(event);
4998 css_put(&memcg->css);
4999}
5000
5001/*
5002 * Gets called on EPOLLHUP on eventfd when user closes it.
5003 *
5004 * Called with wqh->lock held and interrupts disabled.
5005 */
5006static int memcg_event_wake(wait_queue_entry_t *wait, unsigned mode,
5007 int sync, void *key)
5008{
5009 struct mem_cgroup_event *event =
5010 container_of(wait, struct mem_cgroup_event, wait);
5011 struct mem_cgroup *memcg = event->memcg;
5012 __poll_t flags = key_to_poll(key);
5013
5014 if (flags & EPOLLHUP) {
5015 /*
5016 * If the event has been detached at cgroup removal, we
5017 * can simply return knowing the other side will cleanup
5018 * for us.
5019 *
5020 * We can't race against event freeing since the other
5021 * side will require wqh->lock via remove_wait_queue(),
5022 * which we hold.
5023 */
5024 spin_lock(&memcg->event_list_lock);
5025 if (!list_empty(&event->list)) {
5026 list_del_init(&event->list);
5027 /*
5028 * We are in atomic context, but cgroup_event_remove()
5029 * may sleep, so we have to call it in workqueue.
5030 */
5031 schedule_work(&event->remove);
5032 }
5033 spin_unlock(&memcg->event_list_lock);
5034 }
5035
5036 return 0;
5037}
5038
5039static void memcg_event_ptable_queue_proc(struct file *file,
5040 wait_queue_head_t *wqh, poll_table *pt)
5041{
5042 struct mem_cgroup_event *event =
5043 container_of(pt, struct mem_cgroup_event, pt);
5044
5045 event->wqh = wqh;
5046 add_wait_queue(wqh, &event->wait);
5047}
5048
5049/*
5050 * DO NOT USE IN NEW FILES.
5051 *
5052 * Parse input and register new cgroup event handler.
5053 *
5054 * Input must be in format '<event_fd> <control_fd> <args>'.
5055 * Interpretation of args is defined by control file implementation.
5056 */
5057static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
5058 char *buf, size_t nbytes, loff_t off)
5059{
5060 struct cgroup_subsys_state *css = of_css(of);
5061 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5062 struct mem_cgroup_event *event;
5063 struct cgroup_subsys_state *cfile_css;
5064 unsigned int efd, cfd;
5065 struct fd efile;
5066 struct fd cfile;
5067 struct dentry *cdentry;
5068 const char *name;
5069 char *endp;
5070 int ret;
5071
5072 if (IS_ENABLED(CONFIG_PREEMPT_RT))
5073 return -EOPNOTSUPP;
5074
5075 buf = strstrip(buf);
5076
5077 efd = simple_strtoul(buf, &endp, 10);
5078 if (*endp != ' ')
5079 return -EINVAL;
5080 buf = endp + 1;
5081
5082 cfd = simple_strtoul(buf, &endp, 10);
5083 if ((*endp != ' ') && (*endp != '\0'))
5084 return -EINVAL;
5085 buf = endp + 1;
5086
5087 event = kzalloc(sizeof(*event), GFP_KERNEL);
5088 if (!event)
5089 return -ENOMEM;
5090
5091 event->memcg = memcg;
5092 INIT_LIST_HEAD(&event->list);
5093 init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc);
5094 init_waitqueue_func_entry(&event->wait, memcg_event_wake);
5095 INIT_WORK(&event->remove, memcg_event_remove);
5096
5097 efile = fdget(efd);
5098 if (!efile.file) {
5099 ret = -EBADF;
5100 goto out_kfree;
5101 }
5102
5103 event->eventfd = eventfd_ctx_fileget(efile.file);
5104 if (IS_ERR(event->eventfd)) {
5105 ret = PTR_ERR(event->eventfd);
5106 goto out_put_efile;
5107 }
5108
5109 cfile = fdget(cfd);
5110 if (!cfile.file) {
5111 ret = -EBADF;
5112 goto out_put_eventfd;
5113 }
5114
5115 /* the process need read permission on control file */
5116 /* AV: shouldn't we check that it's been opened for read instead? */
5117 ret = file_permission(cfile.file, MAY_READ);
5118 if (ret < 0)
5119 goto out_put_cfile;
5120
5121 /*
5122 * The control file must be a regular cgroup1 file. As a regular cgroup
5123 * file can't be renamed, it's safe to access its name afterwards.
5124 */
5125 cdentry = cfile.file->f_path.dentry;
5126 if (cdentry->d_sb->s_type != &cgroup_fs_type || !d_is_reg(cdentry)) {
5127 ret = -EINVAL;
5128 goto out_put_cfile;
5129 }
5130
5131 /*
5132 * Determine the event callbacks and set them in @event. This used
5133 * to be done via struct cftype but cgroup core no longer knows
5134 * about these events. The following is crude but the whole thing
5135 * is for compatibility anyway.
5136 *
5137 * DO NOT ADD NEW FILES.
5138 */
5139 name = cdentry->d_name.name;
5140
5141 if (!strcmp(name, "memory.usage_in_bytes")) {
5142 event->register_event = mem_cgroup_usage_register_event;
5143 event->unregister_event = mem_cgroup_usage_unregister_event;
5144 } else if (!strcmp(name, "memory.oom_control")) {
5145 event->register_event = mem_cgroup_oom_register_event;
5146 event->unregister_event = mem_cgroup_oom_unregister_event;
5147 } else if (!strcmp(name, "memory.pressure_level")) {
5148 event->register_event = vmpressure_register_event;
5149 event->unregister_event = vmpressure_unregister_event;
5150 } else if (!strcmp(name, "memory.memsw.usage_in_bytes")) {
5151 event->register_event = memsw_cgroup_usage_register_event;
5152 event->unregister_event = memsw_cgroup_usage_unregister_event;
5153 } else {
5154 ret = -EINVAL;
5155 goto out_put_cfile;
5156 }
5157
5158 /*
5159 * Verify @cfile should belong to @css. Also, remaining events are
5160 * automatically removed on cgroup destruction but the removal is
5161 * asynchronous, so take an extra ref on @css.
5162 */
5163 cfile_css = css_tryget_online_from_dir(cdentry->d_parent,
5164 &memory_cgrp_subsys);
5165 ret = -EINVAL;
5166 if (IS_ERR(cfile_css))
5167 goto out_put_cfile;
5168 if (cfile_css != css) {
5169 css_put(cfile_css);
5170 goto out_put_cfile;
5171 }
5172
5173 ret = event->register_event(memcg, event->eventfd, buf);
5174 if (ret)
5175 goto out_put_css;
5176
5177 vfs_poll(efile.file, &event->pt);
5178
5179 spin_lock_irq(&memcg->event_list_lock);
5180 list_add(&event->list, &memcg->event_list);
5181 spin_unlock_irq(&memcg->event_list_lock);
5182
5183 fdput(cfile);
5184 fdput(efile);
5185
5186 return nbytes;
5187
5188out_put_css:
5189 css_put(css);
5190out_put_cfile:
5191 fdput(cfile);
5192out_put_eventfd:
5193 eventfd_ctx_put(event->eventfd);
5194out_put_efile:
5195 fdput(efile);
5196out_kfree:
5197 kfree(event);
5198
5199 return ret;
5200}
5201
5202#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_SLUB_DEBUG)
5203static int mem_cgroup_slab_show(struct seq_file *m, void *p)
5204{
5205 /*
5206 * Deprecated.
5207 * Please, take a look at tools/cgroup/memcg_slabinfo.py .
5208 */
5209 return 0;
5210}
5211#endif
5212
5213static int memory_stat_show(struct seq_file *m, void *v);
5214
5215static struct cftype mem_cgroup_legacy_files[] = {
5216 {
5217 .name = "usage_in_bytes",
5218 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
5219 .read_u64 = mem_cgroup_read_u64,
5220 },
5221 {
5222 .name = "max_usage_in_bytes",
5223 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
5224 .write = mem_cgroup_reset,
5225 .read_u64 = mem_cgroup_read_u64,
5226 },
5227 {
5228 .name = "limit_in_bytes",
5229 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
5230 .write = mem_cgroup_write,
5231 .read_u64 = mem_cgroup_read_u64,
5232 },
5233 {
5234 .name = "soft_limit_in_bytes",
5235 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
5236 .write = mem_cgroup_write,
5237 .read_u64 = mem_cgroup_read_u64,
5238 },
5239 {
5240 .name = "failcnt",
5241 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
5242 .write = mem_cgroup_reset,
5243 .read_u64 = mem_cgroup_read_u64,
5244 },
5245 {
5246 .name = "stat",
5247 .seq_show = memory_stat_show,
5248 },
5249 {
5250 .name = "force_empty",
5251 .write = mem_cgroup_force_empty_write,
5252 },
5253 {
5254 .name = "use_hierarchy",
5255 .write_u64 = mem_cgroup_hierarchy_write,
5256 .read_u64 = mem_cgroup_hierarchy_read,
5257 },
5258 {
5259 .name = "cgroup.event_control", /* XXX: for compat */
5260 .write = memcg_write_event_control,
5261 .flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE,
5262 },
5263 {
5264 .name = "swappiness",
5265 .read_u64 = mem_cgroup_swappiness_read,
5266 .write_u64 = mem_cgroup_swappiness_write,
5267 },
5268 {
5269 .name = "move_charge_at_immigrate",
5270 .read_u64 = mem_cgroup_move_charge_read,
5271 .write_u64 = mem_cgroup_move_charge_write,
5272 },
5273 {
5274 .name = "oom_control",
5275 .seq_show = mem_cgroup_oom_control_read,
5276 .write_u64 = mem_cgroup_oom_control_write,
5277 },
5278 {
5279 .name = "pressure_level",
5280 .seq_show = mem_cgroup_dummy_seq_show,
5281 },
5282#ifdef CONFIG_NUMA
5283 {
5284 .name = "numa_stat",
5285 .seq_show = memcg_numa_stat_show,
5286 },
5287#endif
5288 {
5289 .name = "kmem.limit_in_bytes",
5290 .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
5291 .write = mem_cgroup_write,
5292 .read_u64 = mem_cgroup_read_u64,
5293 },
5294 {
5295 .name = "kmem.usage_in_bytes",
5296 .private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
5297 .read_u64 = mem_cgroup_read_u64,
5298 },
5299 {
5300 .name = "kmem.failcnt",
5301 .private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT),
5302 .write = mem_cgroup_reset,
5303 .read_u64 = mem_cgroup_read_u64,
5304 },
5305 {
5306 .name = "kmem.max_usage_in_bytes",
5307 .private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE),
5308 .write = mem_cgroup_reset,
5309 .read_u64 = mem_cgroup_read_u64,
5310 },
5311#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_SLUB_DEBUG)
5312 {
5313 .name = "kmem.slabinfo",
5314 .seq_show = mem_cgroup_slab_show,
5315 },
5316#endif
5317 {
5318 .name = "kmem.tcp.limit_in_bytes",
5319 .private = MEMFILE_PRIVATE(_TCP, RES_LIMIT),
5320 .write = mem_cgroup_write,
5321 .read_u64 = mem_cgroup_read_u64,
5322 },
5323 {
5324 .name = "kmem.tcp.usage_in_bytes",
5325 .private = MEMFILE_PRIVATE(_TCP, RES_USAGE),
5326 .read_u64 = mem_cgroup_read_u64,
5327 },
5328 {
5329 .name = "kmem.tcp.failcnt",
5330 .private = MEMFILE_PRIVATE(_TCP, RES_FAILCNT),
5331 .write = mem_cgroup_reset,
5332 .read_u64 = mem_cgroup_read_u64,
5333 },
5334 {
5335 .name = "kmem.tcp.max_usage_in_bytes",
5336 .private = MEMFILE_PRIVATE(_TCP, RES_MAX_USAGE),
5337 .write = mem_cgroup_reset,
5338 .read_u64 = mem_cgroup_read_u64,
5339 },
5340 { }, /* terminate */
5341};
5342
5343/*
5344 * Private memory cgroup IDR
5345 *
5346 * Swap-out records and page cache shadow entries need to store memcg
5347 * references in constrained space, so we maintain an ID space that is
5348 * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of
5349 * memory-controlled cgroups to 64k.
5350 *
5351 * However, there usually are many references to the offline CSS after
5352 * the cgroup has been destroyed, such as page cache or reclaimable
5353 * slab objects, that don't need to hang on to the ID. We want to keep
5354 * those dead CSS from occupying IDs, or we might quickly exhaust the
5355 * relatively small ID space and prevent the creation of new cgroups
5356 * even when there are much fewer than 64k cgroups - possibly none.
5357 *
5358 * Maintain a private 16-bit ID space for memcg, and allow the ID to
5359 * be freed and recycled when it's no longer needed, which is usually
5360 * when the CSS is offlined.
5361 *
5362 * The only exception to that are records of swapped out tmpfs/shmem
5363 * pages that need to be attributed to live ancestors on swapin. But
5364 * those references are manageable from userspace.
5365 */
5366
5367#define MEM_CGROUP_ID_MAX ((1UL << MEM_CGROUP_ID_SHIFT) - 1)
5368static DEFINE_IDR(mem_cgroup_idr);
5369
5370static void mem_cgroup_id_remove(struct mem_cgroup *memcg)
5371{
5372 if (memcg->id.id > 0) {
5373 idr_remove(&mem_cgroup_idr, memcg->id.id);
5374 memcg->id.id = 0;
5375 }
5376}
5377
5378static void __maybe_unused mem_cgroup_id_get_many(struct mem_cgroup *memcg,
5379 unsigned int n)
5380{
5381 refcount_add(n, &memcg->id.ref);
5382}
5383
5384static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
5385{
5386 if (refcount_sub_and_test(n, &memcg->id.ref)) {
5387 mem_cgroup_id_remove(memcg);
5388
5389 /* Memcg ID pins CSS */
5390 css_put(&memcg->css);
5391 }
5392}
5393
5394static inline void mem_cgroup_id_put(struct mem_cgroup *memcg)
5395{
5396 mem_cgroup_id_put_many(memcg, 1);
5397}
5398
5399/**
5400 * mem_cgroup_from_id - look up a memcg from a memcg id
5401 * @id: the memcg id to look up
5402 *
5403 * Caller must hold rcu_read_lock().
5404 */
5405struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
5406{
5407 WARN_ON_ONCE(!rcu_read_lock_held());
5408 return idr_find(&mem_cgroup_idr, id);
5409}
5410
5411#ifdef CONFIG_SHRINKER_DEBUG
5412struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino)
5413{
5414 struct cgroup *cgrp;
5415 struct cgroup_subsys_state *css;
5416 struct mem_cgroup *memcg;
5417
5418 cgrp = cgroup_get_from_id(ino);
5419 if (IS_ERR(cgrp))
5420 return ERR_CAST(cgrp);
5421
5422 css = cgroup_get_e_css(cgrp, &memory_cgrp_subsys);
5423 if (css)
5424 memcg = container_of(css, struct mem_cgroup, css);
5425 else
5426 memcg = ERR_PTR(-ENOENT);
5427
5428 cgroup_put(cgrp);
5429
5430 return memcg;
5431}
5432#endif
5433
5434static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
5435{
5436 struct mem_cgroup_per_node *pn;
5437
5438 pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, node);
5439 if (!pn)
5440 return 1;
5441
5442 pn->lruvec_stats_percpu = alloc_percpu_gfp(struct lruvec_stats_percpu,
5443 GFP_KERNEL_ACCOUNT);
5444 if (!pn->lruvec_stats_percpu) {
5445 kfree(pn);
5446 return 1;
5447 }
5448
5449 lruvec_init(&pn->lruvec);
5450 pn->memcg = memcg;
5451
5452 memcg->nodeinfo[node] = pn;
5453 return 0;
5454}
5455
5456static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
5457{
5458 struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
5459
5460 if (!pn)
5461 return;
5462
5463 free_percpu(pn->lruvec_stats_percpu);
5464 kfree(pn);
5465}
5466
5467static void __mem_cgroup_free(struct mem_cgroup *memcg)
5468{
5469 int node;
5470
5471 if (memcg->orig_objcg)
5472 obj_cgroup_put(memcg->orig_objcg);
5473
5474 for_each_node(node)
5475 free_mem_cgroup_per_node_info(memcg, node);
5476 kfree(memcg->vmstats);
5477 free_percpu(memcg->vmstats_percpu);
5478 kfree(memcg);
5479}
5480
5481static void mem_cgroup_free(struct mem_cgroup *memcg)
5482{
5483 lru_gen_exit_memcg(memcg);
5484 memcg_wb_domain_exit(memcg);
5485 __mem_cgroup_free(memcg);
5486}
5487
5488static struct mem_cgroup *mem_cgroup_alloc(struct mem_cgroup *parent)
5489{
5490 struct memcg_vmstats_percpu *statc, *pstatc;
5491 struct mem_cgroup *memcg;
5492 int node, cpu;
5493 int __maybe_unused i;
5494 long error = -ENOMEM;
5495
5496 memcg = kzalloc(struct_size(memcg, nodeinfo, nr_node_ids), GFP_KERNEL);
5497 if (!memcg)
5498 return ERR_PTR(error);
5499
5500 memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL,
5501 1, MEM_CGROUP_ID_MAX + 1, GFP_KERNEL);
5502 if (memcg->id.id < 0) {
5503 error = memcg->id.id;
5504 goto fail;
5505 }
5506
5507 memcg->vmstats = kzalloc(sizeof(struct memcg_vmstats), GFP_KERNEL);
5508 if (!memcg->vmstats)
5509 goto fail;
5510
5511 memcg->vmstats_percpu = alloc_percpu_gfp(struct memcg_vmstats_percpu,
5512 GFP_KERNEL_ACCOUNT);
5513 if (!memcg->vmstats_percpu)
5514 goto fail;
5515
5516 for_each_possible_cpu(cpu) {
5517 if (parent)
5518 pstatc = per_cpu_ptr(parent->vmstats_percpu, cpu);
5519 statc = per_cpu_ptr(memcg->vmstats_percpu, cpu);
5520 statc->parent = parent ? pstatc : NULL;
5521 statc->vmstats = memcg->vmstats;
5522 }
5523
5524 for_each_node(node)
5525 if (alloc_mem_cgroup_per_node_info(memcg, node))
5526 goto fail;
5527
5528 if (memcg_wb_domain_init(memcg, GFP_KERNEL))
5529 goto fail;
5530
5531 INIT_WORK(&memcg->high_work, high_work_func);
5532 INIT_LIST_HEAD(&memcg->oom_notify);
5533 mutex_init(&memcg->thresholds_lock);
5534 spin_lock_init(&memcg->move_lock);
5535 vmpressure_init(&memcg->vmpressure);
5536 INIT_LIST_HEAD(&memcg->event_list);
5537 spin_lock_init(&memcg->event_list_lock);
5538 memcg->socket_pressure = jiffies;
5539#ifdef CONFIG_MEMCG_KMEM
5540 memcg->kmemcg_id = -1;
5541 INIT_LIST_HEAD(&memcg->objcg_list);
5542#endif
5543#ifdef CONFIG_CGROUP_WRITEBACK
5544 INIT_LIST_HEAD(&memcg->cgwb_list);
5545 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
5546 memcg->cgwb_frn[i].done =
5547 __WB_COMPLETION_INIT(&memcg_cgwb_frn_waitq);
5548#endif
5549#ifdef CONFIG_TRANSPARENT_HUGEPAGE
5550 spin_lock_init(&memcg->deferred_split_queue.split_queue_lock);
5551 INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue);
5552 memcg->deferred_split_queue.split_queue_len = 0;
5553#endif
5554 lru_gen_init_memcg(memcg);
5555 return memcg;
5556fail:
5557 mem_cgroup_id_remove(memcg);
5558 __mem_cgroup_free(memcg);
5559 return ERR_PTR(error);
5560}
5561
5562static struct cgroup_subsys_state * __ref
5563mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
5564{
5565 struct mem_cgroup *parent = mem_cgroup_from_css(parent_css);
5566 struct mem_cgroup *memcg, *old_memcg;
5567
5568 old_memcg = set_active_memcg(parent);
5569 memcg = mem_cgroup_alloc(parent);
5570 set_active_memcg(old_memcg);
5571 if (IS_ERR(memcg))
5572 return ERR_CAST(memcg);
5573
5574 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
5575 WRITE_ONCE(memcg->soft_limit, PAGE_COUNTER_MAX);
5576#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
5577 memcg->zswap_max = PAGE_COUNTER_MAX;
5578 WRITE_ONCE(memcg->zswap_writeback,
5579 !parent || READ_ONCE(parent->zswap_writeback));
5580#endif
5581 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
5582 if (parent) {
5583 WRITE_ONCE(memcg->swappiness, mem_cgroup_swappiness(parent));
5584 WRITE_ONCE(memcg->oom_kill_disable, READ_ONCE(parent->oom_kill_disable));
5585
5586 page_counter_init(&memcg->memory, &parent->memory);
5587 page_counter_init(&memcg->swap, &parent->swap);
5588 page_counter_init(&memcg->kmem, &parent->kmem);
5589 page_counter_init(&memcg->tcpmem, &parent->tcpmem);
5590 } else {
5591 init_memcg_events();
5592 page_counter_init(&memcg->memory, NULL);
5593 page_counter_init(&memcg->swap, NULL);
5594 page_counter_init(&memcg->kmem, NULL);
5595 page_counter_init(&memcg->tcpmem, NULL);
5596
5597 root_mem_cgroup = memcg;
5598 return &memcg->css;
5599 }
5600
5601 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
5602 static_branch_inc(&memcg_sockets_enabled_key);
5603
5604#if defined(CONFIG_MEMCG_KMEM)
5605 if (!cgroup_memory_nobpf)
5606 static_branch_inc(&memcg_bpf_enabled_key);
5607#endif
5608
5609 return &memcg->css;
5610}
5611
5612static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
5613{
5614 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5615
5616 if (memcg_online_kmem(memcg))
5617 goto remove_id;
5618
5619 /*
5620 * A memcg must be visible for expand_shrinker_info()
5621 * by the time the maps are allocated. So, we allocate maps
5622 * here, when for_each_mem_cgroup() can't skip it.
5623 */
5624 if (alloc_shrinker_info(memcg))
5625 goto offline_kmem;
5626
5627 if (unlikely(mem_cgroup_is_root(memcg)) && !mem_cgroup_disabled())
5628 queue_delayed_work(system_unbound_wq, &stats_flush_dwork,
5629 FLUSH_TIME);
5630 lru_gen_online_memcg(memcg);
5631
5632 /* Online state pins memcg ID, memcg ID pins CSS */
5633 refcount_set(&memcg->id.ref, 1);
5634 css_get(css);
5635
5636 /*
5637 * Ensure mem_cgroup_from_id() works once we're fully online.
5638 *
5639 * We could do this earlier and require callers to filter with
5640 * css_tryget_online(). But right now there are no users that
5641 * need earlier access, and the workingset code relies on the
5642 * cgroup tree linkage (mem_cgroup_get_nr_swap_pages()). So
5643 * publish it here at the end of onlining. This matches the
5644 * regular ID destruction during offlining.
5645 */
5646 idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
5647
5648 return 0;
5649offline_kmem:
5650 memcg_offline_kmem(memcg);
5651remove_id:
5652 mem_cgroup_id_remove(memcg);
5653 return -ENOMEM;
5654}
5655
5656static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
5657{
5658 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5659 struct mem_cgroup_event *event, *tmp;
5660
5661 /*
5662 * Unregister events and notify userspace.
5663 * Notify userspace about cgroup removing only after rmdir of cgroup
5664 * directory to avoid race between userspace and kernelspace.
5665 */
5666 spin_lock_irq(&memcg->event_list_lock);
5667 list_for_each_entry_safe(event, tmp, &memcg->event_list, list) {
5668 list_del_init(&event->list);
5669 schedule_work(&event->remove);
5670 }
5671 spin_unlock_irq(&memcg->event_list_lock);
5672
5673 page_counter_set_min(&memcg->memory, 0);
5674 page_counter_set_low(&memcg->memory, 0);
5675
5676 zswap_memcg_offline_cleanup(memcg);
5677
5678 memcg_offline_kmem(memcg);
5679 reparent_shrinker_deferred(memcg);
5680 wb_memcg_offline(memcg);
5681 lru_gen_offline_memcg(memcg);
5682
5683 drain_all_stock(memcg);
5684
5685 mem_cgroup_id_put(memcg);
5686}
5687
5688static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
5689{
5690 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5691
5692 invalidate_reclaim_iterators(memcg);
5693 lru_gen_release_memcg(memcg);
5694}
5695
5696static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
5697{
5698 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5699 int __maybe_unused i;
5700
5701#ifdef CONFIG_CGROUP_WRITEBACK
5702 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
5703 wb_wait_for_completion(&memcg->cgwb_frn[i].done);
5704#endif
5705 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
5706 static_branch_dec(&memcg_sockets_enabled_key);
5707
5708 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active)
5709 static_branch_dec(&memcg_sockets_enabled_key);
5710
5711#if defined(CONFIG_MEMCG_KMEM)
5712 if (!cgroup_memory_nobpf)
5713 static_branch_dec(&memcg_bpf_enabled_key);
5714#endif
5715
5716 vmpressure_cleanup(&memcg->vmpressure);
5717 cancel_work_sync(&memcg->high_work);
5718 mem_cgroup_remove_from_trees(memcg);
5719 free_shrinker_info(memcg);
5720 mem_cgroup_free(memcg);
5721}
5722
5723/**
5724 * mem_cgroup_css_reset - reset the states of a mem_cgroup
5725 * @css: the target css
5726 *
5727 * Reset the states of the mem_cgroup associated with @css. This is
5728 * invoked when the userland requests disabling on the default hierarchy
5729 * but the memcg is pinned through dependency. The memcg should stop
5730 * applying policies and should revert to the vanilla state as it may be
5731 * made visible again.
5732 *
5733 * The current implementation only resets the essential configurations.
5734 * This needs to be expanded to cover all the visible parts.
5735 */
5736static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
5737{
5738 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5739
5740 page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX);
5741 page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX);
5742 page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX);
5743 page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX);
5744 page_counter_set_min(&memcg->memory, 0);
5745 page_counter_set_low(&memcg->memory, 0);
5746 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
5747 WRITE_ONCE(memcg->soft_limit, PAGE_COUNTER_MAX);
5748 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
5749 memcg_wb_domain_size_changed(memcg);
5750}
5751
5752static void mem_cgroup_css_rstat_flush(struct cgroup_subsys_state *css, int cpu)
5753{
5754 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5755 struct mem_cgroup *parent = parent_mem_cgroup(memcg);
5756 struct memcg_vmstats_percpu *statc;
5757 long delta, delta_cpu, v;
5758 int i, nid;
5759
5760 statc = per_cpu_ptr(memcg->vmstats_percpu, cpu);
5761
5762 for (i = 0; i < MEMCG_NR_STAT; i++) {
5763 /*
5764 * Collect the aggregated propagation counts of groups
5765 * below us. We're in a per-cpu loop here and this is
5766 * a global counter, so the first cycle will get them.
5767 */
5768 delta = memcg->vmstats->state_pending[i];
5769 if (delta)
5770 memcg->vmstats->state_pending[i] = 0;
5771
5772 /* Add CPU changes on this level since the last flush */
5773 delta_cpu = 0;
5774 v = READ_ONCE(statc->state[i]);
5775 if (v != statc->state_prev[i]) {
5776 delta_cpu = v - statc->state_prev[i];
5777 delta += delta_cpu;
5778 statc->state_prev[i] = v;
5779 }
5780
5781 /* Aggregate counts on this level and propagate upwards */
5782 if (delta_cpu)
5783 memcg->vmstats->state_local[i] += delta_cpu;
5784
5785 if (delta) {
5786 memcg->vmstats->state[i] += delta;
5787 if (parent)
5788 parent->vmstats->state_pending[i] += delta;
5789 }
5790 }
5791
5792 for (i = 0; i < NR_MEMCG_EVENTS; i++) {
5793 delta = memcg->vmstats->events_pending[i];
5794 if (delta)
5795 memcg->vmstats->events_pending[i] = 0;
5796
5797 delta_cpu = 0;
5798 v = READ_ONCE(statc->events[i]);
5799 if (v != statc->events_prev[i]) {
5800 delta_cpu = v - statc->events_prev[i];
5801 delta += delta_cpu;
5802 statc->events_prev[i] = v;
5803 }
5804
5805 if (delta_cpu)
5806 memcg->vmstats->events_local[i] += delta_cpu;
5807
5808 if (delta) {
5809 memcg->vmstats->events[i] += delta;
5810 if (parent)
5811 parent->vmstats->events_pending[i] += delta;
5812 }
5813 }
5814
5815 for_each_node_state(nid, N_MEMORY) {
5816 struct mem_cgroup_per_node *pn = memcg->nodeinfo[nid];
5817 struct mem_cgroup_per_node *ppn = NULL;
5818 struct lruvec_stats_percpu *lstatc;
5819
5820 if (parent)
5821 ppn = parent->nodeinfo[nid];
5822
5823 lstatc = per_cpu_ptr(pn->lruvec_stats_percpu, cpu);
5824
5825 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
5826 delta = pn->lruvec_stats.state_pending[i];
5827 if (delta)
5828 pn->lruvec_stats.state_pending[i] = 0;
5829
5830 delta_cpu = 0;
5831 v = READ_ONCE(lstatc->state[i]);
5832 if (v != lstatc->state_prev[i]) {
5833 delta_cpu = v - lstatc->state_prev[i];
5834 delta += delta_cpu;
5835 lstatc->state_prev[i] = v;
5836 }
5837
5838 if (delta_cpu)
5839 pn->lruvec_stats.state_local[i] += delta_cpu;
5840
5841 if (delta) {
5842 pn->lruvec_stats.state[i] += delta;
5843 if (ppn)
5844 ppn->lruvec_stats.state_pending[i] += delta;
5845 }
5846 }
5847 }
5848 statc->stats_updates = 0;
5849 /* We are in a per-cpu loop here, only do the atomic write once */
5850 if (atomic64_read(&memcg->vmstats->stats_updates))
5851 atomic64_set(&memcg->vmstats->stats_updates, 0);
5852}
5853
5854#ifdef CONFIG_MMU
5855/* Handlers for move charge at task migration. */
5856static int mem_cgroup_do_precharge(unsigned long count)
5857{
5858 int ret;
5859
5860 /* Try a single bulk charge without reclaim first, kswapd may wake */
5861 ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count);
5862 if (!ret) {
5863 mc.precharge += count;
5864 return ret;
5865 }
5866
5867 /* Try charges one by one with reclaim, but do not retry */
5868 while (count--) {
5869 ret = try_charge(mc.to, GFP_KERNEL | __GFP_NORETRY, 1);
5870 if (ret)
5871 return ret;
5872 mc.precharge++;
5873 cond_resched();
5874 }
5875 return 0;
5876}
5877
5878union mc_target {
5879 struct folio *folio;
5880 swp_entry_t ent;
5881};
5882
5883enum mc_target_type {
5884 MC_TARGET_NONE = 0,
5885 MC_TARGET_PAGE,
5886 MC_TARGET_SWAP,
5887 MC_TARGET_DEVICE,
5888};
5889
5890static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
5891 unsigned long addr, pte_t ptent)
5892{
5893 struct page *page = vm_normal_page(vma, addr, ptent);
5894
5895 if (!page)
5896 return NULL;
5897 if (PageAnon(page)) {
5898 if (!(mc.flags & MOVE_ANON))
5899 return NULL;
5900 } else {
5901 if (!(mc.flags & MOVE_FILE))
5902 return NULL;
5903 }
5904 get_page(page);
5905
5906 return page;
5907}
5908
5909#if defined(CONFIG_SWAP) || defined(CONFIG_DEVICE_PRIVATE)
5910static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5911 pte_t ptent, swp_entry_t *entry)
5912{
5913 struct page *page = NULL;
5914 swp_entry_t ent = pte_to_swp_entry(ptent);
5915
5916 if (!(mc.flags & MOVE_ANON))
5917 return NULL;
5918
5919 /*
5920 * Handle device private pages that are not accessible by the CPU, but
5921 * stored as special swap entries in the page table.
5922 */
5923 if (is_device_private_entry(ent)) {
5924 page = pfn_swap_entry_to_page(ent);
5925 if (!get_page_unless_zero(page))
5926 return NULL;
5927 return page;
5928 }
5929
5930 if (non_swap_entry(ent))
5931 return NULL;
5932
5933 /*
5934 * Because swap_cache_get_folio() updates some statistics counter,
5935 * we call find_get_page() with swapper_space directly.
5936 */
5937 page = find_get_page(swap_address_space(ent), swp_offset(ent));
5938 entry->val = ent.val;
5939
5940 return page;
5941}
5942#else
5943static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5944 pte_t ptent, swp_entry_t *entry)
5945{
5946 return NULL;
5947}
5948#endif
5949
5950static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
5951 unsigned long addr, pte_t ptent)
5952{
5953 unsigned long index;
5954 struct folio *folio;
5955
5956 if (!vma->vm_file) /* anonymous vma */
5957 return NULL;
5958 if (!(mc.flags & MOVE_FILE))
5959 return NULL;
5960
5961 /* folio is moved even if it's not RSS of this task(page-faulted). */
5962 /* shmem/tmpfs may report page out on swap: account for that too. */
5963 index = linear_page_index(vma, addr);
5964 folio = filemap_get_incore_folio(vma->vm_file->f_mapping, index);
5965 if (IS_ERR(folio))
5966 return NULL;
5967 return folio_file_page(folio, index);
5968}
5969
5970/**
5971 * mem_cgroup_move_account - move account of the folio
5972 * @folio: The folio.
5973 * @compound: charge the page as compound or small page
5974 * @from: mem_cgroup which the folio is moved from.
5975 * @to: mem_cgroup which the folio is moved to. @from != @to.
5976 *
5977 * The folio must be locked and not on the LRU.
5978 *
5979 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
5980 * from old cgroup.
5981 */
5982static int mem_cgroup_move_account(struct folio *folio,
5983 bool compound,
5984 struct mem_cgroup *from,
5985 struct mem_cgroup *to)
5986{
5987 struct lruvec *from_vec, *to_vec;
5988 struct pglist_data *pgdat;
5989 unsigned int nr_pages = compound ? folio_nr_pages(folio) : 1;
5990 int nid, ret;
5991
5992 VM_BUG_ON(from == to);
5993 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
5994 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
5995 VM_BUG_ON(compound && !folio_test_large(folio));
5996
5997 ret = -EINVAL;
5998 if (folio_memcg(folio) != from)
5999 goto out;
6000
6001 pgdat = folio_pgdat(folio);
6002 from_vec = mem_cgroup_lruvec(from, pgdat);
6003 to_vec = mem_cgroup_lruvec(to, pgdat);
6004
6005 folio_memcg_lock(folio);
6006
6007 if (folio_test_anon(folio)) {
6008 if (folio_mapped(folio)) {
6009 __mod_lruvec_state(from_vec, NR_ANON_MAPPED, -nr_pages);
6010 __mod_lruvec_state(to_vec, NR_ANON_MAPPED, nr_pages);
6011 if (folio_test_pmd_mappable(folio)) {
6012 __mod_lruvec_state(from_vec, NR_ANON_THPS,
6013 -nr_pages);
6014 __mod_lruvec_state(to_vec, NR_ANON_THPS,
6015 nr_pages);
6016 }
6017 }
6018 } else {
6019 __mod_lruvec_state(from_vec, NR_FILE_PAGES, -nr_pages);
6020 __mod_lruvec_state(to_vec, NR_FILE_PAGES, nr_pages);
6021
6022 if (folio_test_swapbacked(folio)) {
6023 __mod_lruvec_state(from_vec, NR_SHMEM, -nr_pages);
6024 __mod_lruvec_state(to_vec, NR_SHMEM, nr_pages);
6025 }
6026
6027 if (folio_mapped(folio)) {
6028 __mod_lruvec_state(from_vec, NR_FILE_MAPPED, -nr_pages);
6029 __mod_lruvec_state(to_vec, NR_FILE_MAPPED, nr_pages);
6030 }
6031
6032 if (folio_test_dirty(folio)) {
6033 struct address_space *mapping = folio_mapping(folio);
6034
6035 if (mapping_can_writeback(mapping)) {
6036 __mod_lruvec_state(from_vec, NR_FILE_DIRTY,
6037 -nr_pages);
6038 __mod_lruvec_state(to_vec, NR_FILE_DIRTY,
6039 nr_pages);
6040 }
6041 }
6042 }
6043
6044#ifdef CONFIG_SWAP
6045 if (folio_test_swapcache(folio)) {
6046 __mod_lruvec_state(from_vec, NR_SWAPCACHE, -nr_pages);
6047 __mod_lruvec_state(to_vec, NR_SWAPCACHE, nr_pages);
6048 }
6049#endif
6050 if (folio_test_writeback(folio)) {
6051 __mod_lruvec_state(from_vec, NR_WRITEBACK, -nr_pages);
6052 __mod_lruvec_state(to_vec, NR_WRITEBACK, nr_pages);
6053 }
6054
6055 /*
6056 * All state has been migrated, let's switch to the new memcg.
6057 *
6058 * It is safe to change page's memcg here because the page
6059 * is referenced, charged, isolated, and locked: we can't race
6060 * with (un)charging, migration, LRU putback, or anything else
6061 * that would rely on a stable page's memory cgroup.
6062 *
6063 * Note that folio_memcg_lock is a memcg lock, not a page lock,
6064 * to save space. As soon as we switch page's memory cgroup to a
6065 * new memcg that isn't locked, the above state can change
6066 * concurrently again. Make sure we're truly done with it.
6067 */
6068 smp_mb();
6069
6070 css_get(&to->css);
6071 css_put(&from->css);
6072
6073 folio->memcg_data = (unsigned long)to;
6074
6075 __folio_memcg_unlock(from);
6076
6077 ret = 0;
6078 nid = folio_nid(folio);
6079
6080 local_irq_disable();
6081 mem_cgroup_charge_statistics(to, nr_pages);
6082 memcg_check_events(to, nid);
6083 mem_cgroup_charge_statistics(from, -nr_pages);
6084 memcg_check_events(from, nid);
6085 local_irq_enable();
6086out:
6087 return ret;
6088}
6089
6090/**
6091 * get_mctgt_type - get target type of moving charge
6092 * @vma: the vma the pte to be checked belongs
6093 * @addr: the address corresponding to the pte to be checked
6094 * @ptent: the pte to be checked
6095 * @target: the pointer the target page or swap ent will be stored(can be NULL)
6096 *
6097 * Context: Called with pte lock held.
6098 * Return:
6099 * * MC_TARGET_NONE - If the pte is not a target for move charge.
6100 * * MC_TARGET_PAGE - If the page corresponding to this pte is a target for
6101 * move charge. If @target is not NULL, the folio is stored in target->folio
6102 * with extra refcnt taken (Caller should release it).
6103 * * MC_TARGET_SWAP - If the swap entry corresponding to this pte is a
6104 * target for charge migration. If @target is not NULL, the entry is
6105 * stored in target->ent.
6106 * * MC_TARGET_DEVICE - Like MC_TARGET_PAGE but page is device memory and
6107 * thus not on the lru. For now such page is charged like a regular page
6108 * would be as it is just special memory taking the place of a regular page.
6109 * See Documentations/vm/hmm.txt and include/linux/hmm.h
6110 */
6111static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
6112 unsigned long addr, pte_t ptent, union mc_target *target)
6113{
6114 struct page *page = NULL;
6115 struct folio *folio;
6116 enum mc_target_type ret = MC_TARGET_NONE;
6117 swp_entry_t ent = { .val = 0 };
6118
6119 if (pte_present(ptent))
6120 page = mc_handle_present_pte(vma, addr, ptent);
6121 else if (pte_none_mostly(ptent))
6122 /*
6123 * PTE markers should be treated as a none pte here, separated
6124 * from other swap handling below.
6125 */
6126 page = mc_handle_file_pte(vma, addr, ptent);
6127 else if (is_swap_pte(ptent))
6128 page = mc_handle_swap_pte(vma, ptent, &ent);
6129
6130 if (page)
6131 folio = page_folio(page);
6132 if (target && page) {
6133 if (!folio_trylock(folio)) {
6134 folio_put(folio);
6135 return ret;
6136 }
6137 /*
6138 * page_mapped() must be stable during the move. This
6139 * pte is locked, so if it's present, the page cannot
6140 * become unmapped. If it isn't, we have only partial
6141 * control over the mapped state: the page lock will
6142 * prevent new faults against pagecache and swapcache,
6143 * so an unmapped page cannot become mapped. However,
6144 * if the page is already mapped elsewhere, it can
6145 * unmap, and there is nothing we can do about it.
6146 * Alas, skip moving the page in this case.
6147 */
6148 if (!pte_present(ptent) && page_mapped(page)) {
6149 folio_unlock(folio);
6150 folio_put(folio);
6151 return ret;
6152 }
6153 }
6154
6155 if (!page && !ent.val)
6156 return ret;
6157 if (page) {
6158 /*
6159 * Do only loose check w/o serialization.
6160 * mem_cgroup_move_account() checks the page is valid or
6161 * not under LRU exclusion.
6162 */
6163 if (folio_memcg(folio) == mc.from) {
6164 ret = MC_TARGET_PAGE;
6165 if (folio_is_device_private(folio) ||
6166 folio_is_device_coherent(folio))
6167 ret = MC_TARGET_DEVICE;
6168 if (target)
6169 target->folio = folio;
6170 }
6171 if (!ret || !target) {
6172 if (target)
6173 folio_unlock(folio);
6174 folio_put(folio);
6175 }
6176 }
6177 /*
6178 * There is a swap entry and a page doesn't exist or isn't charged.
6179 * But we cannot move a tail-page in a THP.
6180 */
6181 if (ent.val && !ret && (!page || !PageTransCompound(page)) &&
6182 mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) {
6183 ret = MC_TARGET_SWAP;
6184 if (target)
6185 target->ent = ent;
6186 }
6187 return ret;
6188}
6189
6190#ifdef CONFIG_TRANSPARENT_HUGEPAGE
6191/*
6192 * We don't consider PMD mapped swapping or file mapped pages because THP does
6193 * not support them for now.
6194 * Caller should make sure that pmd_trans_huge(pmd) is true.
6195 */
6196static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
6197 unsigned long addr, pmd_t pmd, union mc_target *target)
6198{
6199 struct page *page = NULL;
6200 struct folio *folio;
6201 enum mc_target_type ret = MC_TARGET_NONE;
6202
6203 if (unlikely(is_swap_pmd(pmd))) {
6204 VM_BUG_ON(thp_migration_supported() &&
6205 !is_pmd_migration_entry(pmd));
6206 return ret;
6207 }
6208 page = pmd_page(pmd);
6209 VM_BUG_ON_PAGE(!page || !PageHead(page), page);
6210 folio = page_folio(page);
6211 if (!(mc.flags & MOVE_ANON))
6212 return ret;
6213 if (folio_memcg(folio) == mc.from) {
6214 ret = MC_TARGET_PAGE;
6215 if (target) {
6216 folio_get(folio);
6217 if (!folio_trylock(folio)) {
6218 folio_put(folio);
6219 return MC_TARGET_NONE;
6220 }
6221 target->folio = folio;
6222 }
6223 }
6224 return ret;
6225}
6226#else
6227static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
6228 unsigned long addr, pmd_t pmd, union mc_target *target)
6229{
6230 return MC_TARGET_NONE;
6231}
6232#endif
6233
6234static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
6235 unsigned long addr, unsigned long end,
6236 struct mm_walk *walk)
6237{
6238 struct vm_area_struct *vma = walk->vma;
6239 pte_t *pte;
6240 spinlock_t *ptl;
6241
6242 ptl = pmd_trans_huge_lock(pmd, vma);
6243 if (ptl) {
6244 /*
6245 * Note their can not be MC_TARGET_DEVICE for now as we do not
6246 * support transparent huge page with MEMORY_DEVICE_PRIVATE but
6247 * this might change.
6248 */
6249 if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
6250 mc.precharge += HPAGE_PMD_NR;
6251 spin_unlock(ptl);
6252 return 0;
6253 }
6254
6255 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
6256 if (!pte)
6257 return 0;
6258 for (; addr != end; pte++, addr += PAGE_SIZE)
6259 if (get_mctgt_type(vma, addr, ptep_get(pte), NULL))
6260 mc.precharge++; /* increment precharge temporarily */
6261 pte_unmap_unlock(pte - 1, ptl);
6262 cond_resched();
6263
6264 return 0;
6265}
6266
6267static const struct mm_walk_ops precharge_walk_ops = {
6268 .pmd_entry = mem_cgroup_count_precharge_pte_range,
6269 .walk_lock = PGWALK_RDLOCK,
6270};
6271
6272static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
6273{
6274 unsigned long precharge;
6275
6276 mmap_read_lock(mm);
6277 walk_page_range(mm, 0, ULONG_MAX, &precharge_walk_ops, NULL);
6278 mmap_read_unlock(mm);
6279
6280 precharge = mc.precharge;
6281 mc.precharge = 0;
6282
6283 return precharge;
6284}
6285
6286static int mem_cgroup_precharge_mc(struct mm_struct *mm)
6287{
6288 unsigned long precharge = mem_cgroup_count_precharge(mm);
6289
6290 VM_BUG_ON(mc.moving_task);
6291 mc.moving_task = current;
6292 return mem_cgroup_do_precharge(precharge);
6293}
6294
6295/* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
6296static void __mem_cgroup_clear_mc(void)
6297{
6298 struct mem_cgroup *from = mc.from;
6299 struct mem_cgroup *to = mc.to;
6300
6301 /* we must uncharge all the leftover precharges from mc.to */
6302 if (mc.precharge) {
6303 mem_cgroup_cancel_charge(mc.to, mc.precharge);
6304 mc.precharge = 0;
6305 }
6306 /*
6307 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
6308 * we must uncharge here.
6309 */
6310 if (mc.moved_charge) {
6311 mem_cgroup_cancel_charge(mc.from, mc.moved_charge);
6312 mc.moved_charge = 0;
6313 }
6314 /* we must fixup refcnts and charges */
6315 if (mc.moved_swap) {
6316 /* uncharge swap account from the old cgroup */
6317 if (!mem_cgroup_is_root(mc.from))
6318 page_counter_uncharge(&mc.from->memsw, mc.moved_swap);
6319
6320 mem_cgroup_id_put_many(mc.from, mc.moved_swap);
6321
6322 /*
6323 * we charged both to->memory and to->memsw, so we
6324 * should uncharge to->memory.
6325 */
6326 if (!mem_cgroup_is_root(mc.to))
6327 page_counter_uncharge(&mc.to->memory, mc.moved_swap);
6328
6329 mc.moved_swap = 0;
6330 }
6331 memcg_oom_recover(from);
6332 memcg_oom_recover(to);
6333 wake_up_all(&mc.waitq);
6334}
6335
6336static void mem_cgroup_clear_mc(void)
6337{
6338 struct mm_struct *mm = mc.mm;
6339
6340 /*
6341 * we must clear moving_task before waking up waiters at the end of
6342 * task migration.
6343 */
6344 mc.moving_task = NULL;
6345 __mem_cgroup_clear_mc();
6346 spin_lock(&mc.lock);
6347 mc.from = NULL;
6348 mc.to = NULL;
6349 mc.mm = NULL;
6350 spin_unlock(&mc.lock);
6351
6352 mmput(mm);
6353}
6354
6355static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
6356{
6357 struct cgroup_subsys_state *css;
6358 struct mem_cgroup *memcg = NULL; /* unneeded init to make gcc happy */
6359 struct mem_cgroup *from;
6360 struct task_struct *leader, *p;
6361 struct mm_struct *mm;
6362 unsigned long move_flags;
6363 int ret = 0;
6364
6365 /* charge immigration isn't supported on the default hierarchy */
6366 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
6367 return 0;
6368
6369 /*
6370 * Multi-process migrations only happen on the default hierarchy
6371 * where charge immigration is not used. Perform charge
6372 * immigration if @tset contains a leader and whine if there are
6373 * multiple.
6374 */
6375 p = NULL;
6376 cgroup_taskset_for_each_leader(leader, css, tset) {
6377 WARN_ON_ONCE(p);
6378 p = leader;
6379 memcg = mem_cgroup_from_css(css);
6380 }
6381 if (!p)
6382 return 0;
6383
6384 /*
6385 * We are now committed to this value whatever it is. Changes in this
6386 * tunable will only affect upcoming migrations, not the current one.
6387 * So we need to save it, and keep it going.
6388 */
6389 move_flags = READ_ONCE(memcg->move_charge_at_immigrate);
6390 if (!move_flags)
6391 return 0;
6392
6393 from = mem_cgroup_from_task(p);
6394
6395 VM_BUG_ON(from == memcg);
6396
6397 mm = get_task_mm(p);
6398 if (!mm)
6399 return 0;
6400 /* We move charges only when we move a owner of the mm */
6401 if (mm->owner == p) {
6402 VM_BUG_ON(mc.from);
6403 VM_BUG_ON(mc.to);
6404 VM_BUG_ON(mc.precharge);
6405 VM_BUG_ON(mc.moved_charge);
6406 VM_BUG_ON(mc.moved_swap);
6407
6408 spin_lock(&mc.lock);
6409 mc.mm = mm;
6410 mc.from = from;
6411 mc.to = memcg;
6412 mc.flags = move_flags;
6413 spin_unlock(&mc.lock);
6414 /* We set mc.moving_task later */
6415
6416 ret = mem_cgroup_precharge_mc(mm);
6417 if (ret)
6418 mem_cgroup_clear_mc();
6419 } else {
6420 mmput(mm);
6421 }
6422 return ret;
6423}
6424
6425static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
6426{
6427 if (mc.to)
6428 mem_cgroup_clear_mc();
6429}
6430
6431static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
6432 unsigned long addr, unsigned long end,
6433 struct mm_walk *walk)
6434{
6435 int ret = 0;
6436 struct vm_area_struct *vma = walk->vma;
6437 pte_t *pte;
6438 spinlock_t *ptl;
6439 enum mc_target_type target_type;
6440 union mc_target target;
6441 struct folio *folio;
6442
6443 ptl = pmd_trans_huge_lock(pmd, vma);
6444 if (ptl) {
6445 if (mc.precharge < HPAGE_PMD_NR) {
6446 spin_unlock(ptl);
6447 return 0;
6448 }
6449 target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
6450 if (target_type == MC_TARGET_PAGE) {
6451 folio = target.folio;
6452 if (folio_isolate_lru(folio)) {
6453 if (!mem_cgroup_move_account(folio, true,
6454 mc.from, mc.to)) {
6455 mc.precharge -= HPAGE_PMD_NR;
6456 mc.moved_charge += HPAGE_PMD_NR;
6457 }
6458 folio_putback_lru(folio);
6459 }
6460 folio_unlock(folio);
6461 folio_put(folio);
6462 } else if (target_type == MC_TARGET_DEVICE) {
6463 folio = target.folio;
6464 if (!mem_cgroup_move_account(folio, true,
6465 mc.from, mc.to)) {
6466 mc.precharge -= HPAGE_PMD_NR;
6467 mc.moved_charge += HPAGE_PMD_NR;
6468 }
6469 folio_unlock(folio);
6470 folio_put(folio);
6471 }
6472 spin_unlock(ptl);
6473 return 0;
6474 }
6475
6476retry:
6477 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
6478 if (!pte)
6479 return 0;
6480 for (; addr != end; addr += PAGE_SIZE) {
6481 pte_t ptent = ptep_get(pte++);
6482 bool device = false;
6483 swp_entry_t ent;
6484
6485 if (!mc.precharge)
6486 break;
6487
6488 switch (get_mctgt_type(vma, addr, ptent, &target)) {
6489 case MC_TARGET_DEVICE:
6490 device = true;
6491 fallthrough;
6492 case MC_TARGET_PAGE:
6493 folio = target.folio;
6494 /*
6495 * We can have a part of the split pmd here. Moving it
6496 * can be done but it would be too convoluted so simply
6497 * ignore such a partial THP and keep it in original
6498 * memcg. There should be somebody mapping the head.
6499 */
6500 if (folio_test_large(folio))
6501 goto put;
6502 if (!device && !folio_isolate_lru(folio))
6503 goto put;
6504 if (!mem_cgroup_move_account(folio, false,
6505 mc.from, mc.to)) {
6506 mc.precharge--;
6507 /* we uncharge from mc.from later. */
6508 mc.moved_charge++;
6509 }
6510 if (!device)
6511 folio_putback_lru(folio);
6512put: /* get_mctgt_type() gets & locks the page */
6513 folio_unlock(folio);
6514 folio_put(folio);
6515 break;
6516 case MC_TARGET_SWAP:
6517 ent = target.ent;
6518 if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
6519 mc.precharge--;
6520 mem_cgroup_id_get_many(mc.to, 1);
6521 /* we fixup other refcnts and charges later. */
6522 mc.moved_swap++;
6523 }
6524 break;
6525 default:
6526 break;
6527 }
6528 }
6529 pte_unmap_unlock(pte - 1, ptl);
6530 cond_resched();
6531
6532 if (addr != end) {
6533 /*
6534 * We have consumed all precharges we got in can_attach().
6535 * We try charge one by one, but don't do any additional
6536 * charges to mc.to if we have failed in charge once in attach()
6537 * phase.
6538 */
6539 ret = mem_cgroup_do_precharge(1);
6540 if (!ret)
6541 goto retry;
6542 }
6543
6544 return ret;
6545}
6546
6547static const struct mm_walk_ops charge_walk_ops = {
6548 .pmd_entry = mem_cgroup_move_charge_pte_range,
6549 .walk_lock = PGWALK_RDLOCK,
6550};
6551
6552static void mem_cgroup_move_charge(void)
6553{
6554 lru_add_drain_all();
6555 /*
6556 * Signal folio_memcg_lock() to take the memcg's move_lock
6557 * while we're moving its pages to another memcg. Then wait
6558 * for already started RCU-only updates to finish.
6559 */
6560 atomic_inc(&mc.from->moving_account);
6561 synchronize_rcu();
6562retry:
6563 if (unlikely(!mmap_read_trylock(mc.mm))) {
6564 /*
6565 * Someone who are holding the mmap_lock might be waiting in
6566 * waitq. So we cancel all extra charges, wake up all waiters,
6567 * and retry. Because we cancel precharges, we might not be able
6568 * to move enough charges, but moving charge is a best-effort
6569 * feature anyway, so it wouldn't be a big problem.
6570 */
6571 __mem_cgroup_clear_mc();
6572 cond_resched();
6573 goto retry;
6574 }
6575 /*
6576 * When we have consumed all precharges and failed in doing
6577 * additional charge, the page walk just aborts.
6578 */
6579 walk_page_range(mc.mm, 0, ULONG_MAX, &charge_walk_ops, NULL);
6580 mmap_read_unlock(mc.mm);
6581 atomic_dec(&mc.from->moving_account);
6582}
6583
6584static void mem_cgroup_move_task(void)
6585{
6586 if (mc.to) {
6587 mem_cgroup_move_charge();
6588 mem_cgroup_clear_mc();
6589 }
6590}
6591
6592#else /* !CONFIG_MMU */
6593static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
6594{
6595 return 0;
6596}
6597static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
6598{
6599}
6600static void mem_cgroup_move_task(void)
6601{
6602}
6603#endif
6604
6605#ifdef CONFIG_MEMCG_KMEM
6606static void mem_cgroup_fork(struct task_struct *task)
6607{
6608 /*
6609 * Set the update flag to cause task->objcg to be initialized lazily
6610 * on the first allocation. It can be done without any synchronization
6611 * because it's always performed on the current task, so does
6612 * current_objcg_update().
6613 */
6614 task->objcg = (struct obj_cgroup *)CURRENT_OBJCG_UPDATE_FLAG;
6615}
6616
6617static void mem_cgroup_exit(struct task_struct *task)
6618{
6619 struct obj_cgroup *objcg = task->objcg;
6620
6621 objcg = (struct obj_cgroup *)
6622 ((unsigned long)objcg & ~CURRENT_OBJCG_UPDATE_FLAG);
6623 if (objcg)
6624 obj_cgroup_put(objcg);
6625
6626 /*
6627 * Some kernel allocations can happen after this point,
6628 * but let's ignore them. It can be done without any synchronization
6629 * because it's always performed on the current task, so does
6630 * current_objcg_update().
6631 */
6632 task->objcg = NULL;
6633}
6634#endif
6635
6636#ifdef CONFIG_LRU_GEN
6637static void mem_cgroup_lru_gen_attach(struct cgroup_taskset *tset)
6638{
6639 struct task_struct *task;
6640 struct cgroup_subsys_state *css;
6641
6642 /* find the first leader if there is any */
6643 cgroup_taskset_for_each_leader(task, css, tset)
6644 break;
6645
6646 if (!task)
6647 return;
6648
6649 task_lock(task);
6650 if (task->mm && READ_ONCE(task->mm->owner) == task)
6651 lru_gen_migrate_mm(task->mm);
6652 task_unlock(task);
6653}
6654#else
6655static void mem_cgroup_lru_gen_attach(struct cgroup_taskset *tset) {}
6656#endif /* CONFIG_LRU_GEN */
6657
6658#ifdef CONFIG_MEMCG_KMEM
6659static void mem_cgroup_kmem_attach(struct cgroup_taskset *tset)
6660{
6661 struct task_struct *task;
6662 struct cgroup_subsys_state *css;
6663
6664 cgroup_taskset_for_each(task, css, tset) {
6665 /* atomically set the update bit */
6666 set_bit(CURRENT_OBJCG_UPDATE_BIT, (unsigned long *)&task->objcg);
6667 }
6668}
6669#else
6670static void mem_cgroup_kmem_attach(struct cgroup_taskset *tset) {}
6671#endif /* CONFIG_MEMCG_KMEM */
6672
6673#if defined(CONFIG_LRU_GEN) || defined(CONFIG_MEMCG_KMEM)
6674static void mem_cgroup_attach(struct cgroup_taskset *tset)
6675{
6676 mem_cgroup_lru_gen_attach(tset);
6677 mem_cgroup_kmem_attach(tset);
6678}
6679#endif
6680
6681static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value)
6682{
6683 if (value == PAGE_COUNTER_MAX)
6684 seq_puts(m, "max\n");
6685 else
6686 seq_printf(m, "%llu\n", (u64)value * PAGE_SIZE);
6687
6688 return 0;
6689}
6690
6691static u64 memory_current_read(struct cgroup_subsys_state *css,
6692 struct cftype *cft)
6693{
6694 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6695
6696 return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE;
6697}
6698
6699static u64 memory_peak_read(struct cgroup_subsys_state *css,
6700 struct cftype *cft)
6701{
6702 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6703
6704 return (u64)memcg->memory.watermark * PAGE_SIZE;
6705}
6706
6707static int memory_min_show(struct seq_file *m, void *v)
6708{
6709 return seq_puts_memcg_tunable(m,
6710 READ_ONCE(mem_cgroup_from_seq(m)->memory.min));
6711}
6712
6713static ssize_t memory_min_write(struct kernfs_open_file *of,
6714 char *buf, size_t nbytes, loff_t off)
6715{
6716 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6717 unsigned long min;
6718 int err;
6719
6720 buf = strstrip(buf);
6721 err = page_counter_memparse(buf, "max", &min);
6722 if (err)
6723 return err;
6724
6725 page_counter_set_min(&memcg->memory, min);
6726
6727 return nbytes;
6728}
6729
6730static int memory_low_show(struct seq_file *m, void *v)
6731{
6732 return seq_puts_memcg_tunable(m,
6733 READ_ONCE(mem_cgroup_from_seq(m)->memory.low));
6734}
6735
6736static ssize_t memory_low_write(struct kernfs_open_file *of,
6737 char *buf, size_t nbytes, loff_t off)
6738{
6739 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6740 unsigned long low;
6741 int err;
6742
6743 buf = strstrip(buf);
6744 err = page_counter_memparse(buf, "max", &low);
6745 if (err)
6746 return err;
6747
6748 page_counter_set_low(&memcg->memory, low);
6749
6750 return nbytes;
6751}
6752
6753static int memory_high_show(struct seq_file *m, void *v)
6754{
6755 return seq_puts_memcg_tunable(m,
6756 READ_ONCE(mem_cgroup_from_seq(m)->memory.high));
6757}
6758
6759static ssize_t memory_high_write(struct kernfs_open_file *of,
6760 char *buf, size_t nbytes, loff_t off)
6761{
6762 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6763 unsigned int nr_retries = MAX_RECLAIM_RETRIES;
6764 bool drained = false;
6765 unsigned long high;
6766 int err;
6767
6768 buf = strstrip(buf);
6769 err = page_counter_memparse(buf, "max", &high);
6770 if (err)
6771 return err;
6772
6773 page_counter_set_high(&memcg->memory, high);
6774
6775 for (;;) {
6776 unsigned long nr_pages = page_counter_read(&memcg->memory);
6777 unsigned long reclaimed;
6778
6779 if (nr_pages <= high)
6780 break;
6781
6782 if (signal_pending(current))
6783 break;
6784
6785 if (!drained) {
6786 drain_all_stock(memcg);
6787 drained = true;
6788 continue;
6789 }
6790
6791 reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
6792 GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP);
6793
6794 if (!reclaimed && !nr_retries--)
6795 break;
6796 }
6797
6798 memcg_wb_domain_size_changed(memcg);
6799 return nbytes;
6800}
6801
6802static int memory_max_show(struct seq_file *m, void *v)
6803{
6804 return seq_puts_memcg_tunable(m,
6805 READ_ONCE(mem_cgroup_from_seq(m)->memory.max));
6806}
6807
6808static ssize_t memory_max_write(struct kernfs_open_file *of,
6809 char *buf, size_t nbytes, loff_t off)
6810{
6811 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6812 unsigned int nr_reclaims = MAX_RECLAIM_RETRIES;
6813 bool drained = false;
6814 unsigned long max;
6815 int err;
6816
6817 buf = strstrip(buf);
6818 err = page_counter_memparse(buf, "max", &max);
6819 if (err)
6820 return err;
6821
6822 xchg(&memcg->memory.max, max);
6823
6824 for (;;) {
6825 unsigned long nr_pages = page_counter_read(&memcg->memory);
6826
6827 if (nr_pages <= max)
6828 break;
6829
6830 if (signal_pending(current))
6831 break;
6832
6833 if (!drained) {
6834 drain_all_stock(memcg);
6835 drained = true;
6836 continue;
6837 }
6838
6839 if (nr_reclaims) {
6840 if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max,
6841 GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP))
6842 nr_reclaims--;
6843 continue;
6844 }
6845
6846 memcg_memory_event(memcg, MEMCG_OOM);
6847 if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
6848 break;
6849 }
6850
6851 memcg_wb_domain_size_changed(memcg);
6852 return nbytes;
6853}
6854
6855/*
6856 * Note: don't forget to update the 'samples/cgroup/memcg_event_listener'
6857 * if any new events become available.
6858 */
6859static void __memory_events_show(struct seq_file *m, atomic_long_t *events)
6860{
6861 seq_printf(m, "low %lu\n", atomic_long_read(&events[MEMCG_LOW]));
6862 seq_printf(m, "high %lu\n", atomic_long_read(&events[MEMCG_HIGH]));
6863 seq_printf(m, "max %lu\n", atomic_long_read(&events[MEMCG_MAX]));
6864 seq_printf(m, "oom %lu\n", atomic_long_read(&events[MEMCG_OOM]));
6865 seq_printf(m, "oom_kill %lu\n",
6866 atomic_long_read(&events[MEMCG_OOM_KILL]));
6867 seq_printf(m, "oom_group_kill %lu\n",
6868 atomic_long_read(&events[MEMCG_OOM_GROUP_KILL]));
6869}
6870
6871static int memory_events_show(struct seq_file *m, void *v)
6872{
6873 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6874
6875 __memory_events_show(m, memcg->memory_events);
6876 return 0;
6877}
6878
6879static int memory_events_local_show(struct seq_file *m, void *v)
6880{
6881 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6882
6883 __memory_events_show(m, memcg->memory_events_local);
6884 return 0;
6885}
6886
6887static int memory_stat_show(struct seq_file *m, void *v)
6888{
6889 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6890 char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
6891 struct seq_buf s;
6892
6893 if (!buf)
6894 return -ENOMEM;
6895 seq_buf_init(&s, buf, PAGE_SIZE);
6896 memory_stat_format(memcg, &s);
6897 seq_puts(m, buf);
6898 kfree(buf);
6899 return 0;
6900}
6901
6902#ifdef CONFIG_NUMA
6903static inline unsigned long lruvec_page_state_output(struct lruvec *lruvec,
6904 int item)
6905{
6906 return lruvec_page_state(lruvec, item) *
6907 memcg_page_state_output_unit(item);
6908}
6909
6910static int memory_numa_stat_show(struct seq_file *m, void *v)
6911{
6912 int i;
6913 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6914
6915 mem_cgroup_flush_stats(memcg);
6916
6917 for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
6918 int nid;
6919
6920 if (memory_stats[i].idx >= NR_VM_NODE_STAT_ITEMS)
6921 continue;
6922
6923 seq_printf(m, "%s", memory_stats[i].name);
6924 for_each_node_state(nid, N_MEMORY) {
6925 u64 size;
6926 struct lruvec *lruvec;
6927
6928 lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
6929 size = lruvec_page_state_output(lruvec,
6930 memory_stats[i].idx);
6931 seq_printf(m, " N%d=%llu", nid, size);
6932 }
6933 seq_putc(m, '\n');
6934 }
6935
6936 return 0;
6937}
6938#endif
6939
6940static int memory_oom_group_show(struct seq_file *m, void *v)
6941{
6942 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6943
6944 seq_printf(m, "%d\n", READ_ONCE(memcg->oom_group));
6945
6946 return 0;
6947}
6948
6949static ssize_t memory_oom_group_write(struct kernfs_open_file *of,
6950 char *buf, size_t nbytes, loff_t off)
6951{
6952 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6953 int ret, oom_group;
6954
6955 buf = strstrip(buf);
6956 if (!buf)
6957 return -EINVAL;
6958
6959 ret = kstrtoint(buf, 0, &oom_group);
6960 if (ret)
6961 return ret;
6962
6963 if (oom_group != 0 && oom_group != 1)
6964 return -EINVAL;
6965
6966 WRITE_ONCE(memcg->oom_group, oom_group);
6967
6968 return nbytes;
6969}
6970
6971static ssize_t memory_reclaim(struct kernfs_open_file *of, char *buf,
6972 size_t nbytes, loff_t off)
6973{
6974 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6975 unsigned int nr_retries = MAX_RECLAIM_RETRIES;
6976 unsigned long nr_to_reclaim, nr_reclaimed = 0;
6977 unsigned int reclaim_options;
6978 int err;
6979
6980 buf = strstrip(buf);
6981 err = page_counter_memparse(buf, "", &nr_to_reclaim);
6982 if (err)
6983 return err;
6984
6985 reclaim_options = MEMCG_RECLAIM_MAY_SWAP | MEMCG_RECLAIM_PROACTIVE;
6986 while (nr_reclaimed < nr_to_reclaim) {
6987 /* Will converge on zero, but reclaim enforces a minimum */
6988 unsigned long batch_size = (nr_to_reclaim - nr_reclaimed) / 4;
6989 unsigned long reclaimed;
6990
6991 if (signal_pending(current))
6992 return -EINTR;
6993
6994 /*
6995 * This is the final attempt, drain percpu lru caches in the
6996 * hope of introducing more evictable pages for
6997 * try_to_free_mem_cgroup_pages().
6998 */
6999 if (!nr_retries)
7000 lru_add_drain_all();
7001
7002 reclaimed = try_to_free_mem_cgroup_pages(memcg,
7003 batch_size, GFP_KERNEL, reclaim_options);
7004
7005 if (!reclaimed && !nr_retries--)
7006 return -EAGAIN;
7007
7008 nr_reclaimed += reclaimed;
7009 }
7010
7011 return nbytes;
7012}
7013
7014static struct cftype memory_files[] = {
7015 {
7016 .name = "current",
7017 .flags = CFTYPE_NOT_ON_ROOT,
7018 .read_u64 = memory_current_read,
7019 },
7020 {
7021 .name = "peak",
7022 .flags = CFTYPE_NOT_ON_ROOT,
7023 .read_u64 = memory_peak_read,
7024 },
7025 {
7026 .name = "min",
7027 .flags = CFTYPE_NOT_ON_ROOT,
7028 .seq_show = memory_min_show,
7029 .write = memory_min_write,
7030 },
7031 {
7032 .name = "low",
7033 .flags = CFTYPE_NOT_ON_ROOT,
7034 .seq_show = memory_low_show,
7035 .write = memory_low_write,
7036 },
7037 {
7038 .name = "high",
7039 .flags = CFTYPE_NOT_ON_ROOT,
7040 .seq_show = memory_high_show,
7041 .write = memory_high_write,
7042 },
7043 {
7044 .name = "max",
7045 .flags = CFTYPE_NOT_ON_ROOT,
7046 .seq_show = memory_max_show,
7047 .write = memory_max_write,
7048 },
7049 {
7050 .name = "events",
7051 .flags = CFTYPE_NOT_ON_ROOT,
7052 .file_offset = offsetof(struct mem_cgroup, events_file),
7053 .seq_show = memory_events_show,
7054 },
7055 {
7056 .name = "events.local",
7057 .flags = CFTYPE_NOT_ON_ROOT,
7058 .file_offset = offsetof(struct mem_cgroup, events_local_file),
7059 .seq_show = memory_events_local_show,
7060 },
7061 {
7062 .name = "stat",
7063 .seq_show = memory_stat_show,
7064 },
7065#ifdef CONFIG_NUMA
7066 {
7067 .name = "numa_stat",
7068 .seq_show = memory_numa_stat_show,
7069 },
7070#endif
7071 {
7072 .name = "oom.group",
7073 .flags = CFTYPE_NOT_ON_ROOT | CFTYPE_NS_DELEGATABLE,
7074 .seq_show = memory_oom_group_show,
7075 .write = memory_oom_group_write,
7076 },
7077 {
7078 .name = "reclaim",
7079 .flags = CFTYPE_NS_DELEGATABLE,
7080 .write = memory_reclaim,
7081 },
7082 { } /* terminate */
7083};
7084
7085struct cgroup_subsys memory_cgrp_subsys = {
7086 .css_alloc = mem_cgroup_css_alloc,
7087 .css_online = mem_cgroup_css_online,
7088 .css_offline = mem_cgroup_css_offline,
7089 .css_released = mem_cgroup_css_released,
7090 .css_free = mem_cgroup_css_free,
7091 .css_reset = mem_cgroup_css_reset,
7092 .css_rstat_flush = mem_cgroup_css_rstat_flush,
7093 .can_attach = mem_cgroup_can_attach,
7094#if defined(CONFIG_LRU_GEN) || defined(CONFIG_MEMCG_KMEM)
7095 .attach = mem_cgroup_attach,
7096#endif
7097 .cancel_attach = mem_cgroup_cancel_attach,
7098 .post_attach = mem_cgroup_move_task,
7099#ifdef CONFIG_MEMCG_KMEM
7100 .fork = mem_cgroup_fork,
7101 .exit = mem_cgroup_exit,
7102#endif
7103 .dfl_cftypes = memory_files,
7104 .legacy_cftypes = mem_cgroup_legacy_files,
7105 .early_init = 0,
7106};
7107
7108/*
7109 * This function calculates an individual cgroup's effective
7110 * protection which is derived from its own memory.min/low, its
7111 * parent's and siblings' settings, as well as the actual memory
7112 * distribution in the tree.
7113 *
7114 * The following rules apply to the effective protection values:
7115 *
7116 * 1. At the first level of reclaim, effective protection is equal to
7117 * the declared protection in memory.min and memory.low.
7118 *
7119 * 2. To enable safe delegation of the protection configuration, at
7120 * subsequent levels the effective protection is capped to the
7121 * parent's effective protection.
7122 *
7123 * 3. To make complex and dynamic subtrees easier to configure, the
7124 * user is allowed to overcommit the declared protection at a given
7125 * level. If that is the case, the parent's effective protection is
7126 * distributed to the children in proportion to how much protection
7127 * they have declared and how much of it they are utilizing.
7128 *
7129 * This makes distribution proportional, but also work-conserving:
7130 * if one cgroup claims much more protection than it uses memory,
7131 * the unused remainder is available to its siblings.
7132 *
7133 * 4. Conversely, when the declared protection is undercommitted at a
7134 * given level, the distribution of the larger parental protection
7135 * budget is NOT proportional. A cgroup's protection from a sibling
7136 * is capped to its own memory.min/low setting.
7137 *
7138 * 5. However, to allow protecting recursive subtrees from each other
7139 * without having to declare each individual cgroup's fixed share
7140 * of the ancestor's claim to protection, any unutilized -
7141 * "floating" - protection from up the tree is distributed in
7142 * proportion to each cgroup's *usage*. This makes the protection
7143 * neutral wrt sibling cgroups and lets them compete freely over
7144 * the shared parental protection budget, but it protects the
7145 * subtree as a whole from neighboring subtrees.
7146 *
7147 * Note that 4. and 5. are not in conflict: 4. is about protecting
7148 * against immediate siblings whereas 5. is about protecting against
7149 * neighboring subtrees.
7150 */
7151static unsigned long effective_protection(unsigned long usage,
7152 unsigned long parent_usage,
7153 unsigned long setting,
7154 unsigned long parent_effective,
7155 unsigned long siblings_protected)
7156{
7157 unsigned long protected;
7158 unsigned long ep;
7159
7160 protected = min(usage, setting);
7161 /*
7162 * If all cgroups at this level combined claim and use more
7163 * protection than what the parent affords them, distribute
7164 * shares in proportion to utilization.
7165 *
7166 * We are using actual utilization rather than the statically
7167 * claimed protection in order to be work-conserving: claimed
7168 * but unused protection is available to siblings that would
7169 * otherwise get a smaller chunk than what they claimed.
7170 */
7171 if (siblings_protected > parent_effective)
7172 return protected * parent_effective / siblings_protected;
7173
7174 /*
7175 * Ok, utilized protection of all children is within what the
7176 * parent affords them, so we know whatever this child claims
7177 * and utilizes is effectively protected.
7178 *
7179 * If there is unprotected usage beyond this value, reclaim
7180 * will apply pressure in proportion to that amount.
7181 *
7182 * If there is unutilized protection, the cgroup will be fully
7183 * shielded from reclaim, but we do return a smaller value for
7184 * protection than what the group could enjoy in theory. This
7185 * is okay. With the overcommit distribution above, effective
7186 * protection is always dependent on how memory is actually
7187 * consumed among the siblings anyway.
7188 */
7189 ep = protected;
7190
7191 /*
7192 * If the children aren't claiming (all of) the protection
7193 * afforded to them by the parent, distribute the remainder in
7194 * proportion to the (unprotected) memory of each cgroup. That
7195 * way, cgroups that aren't explicitly prioritized wrt each
7196 * other compete freely over the allowance, but they are
7197 * collectively protected from neighboring trees.
7198 *
7199 * We're using unprotected memory for the weight so that if
7200 * some cgroups DO claim explicit protection, we don't protect
7201 * the same bytes twice.
7202 *
7203 * Check both usage and parent_usage against the respective
7204 * protected values. One should imply the other, but they
7205 * aren't read atomically - make sure the division is sane.
7206 */
7207 if (!(cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT))
7208 return ep;
7209 if (parent_effective > siblings_protected &&
7210 parent_usage > siblings_protected &&
7211 usage > protected) {
7212 unsigned long unclaimed;
7213
7214 unclaimed = parent_effective - siblings_protected;
7215 unclaimed *= usage - protected;
7216 unclaimed /= parent_usage - siblings_protected;
7217
7218 ep += unclaimed;
7219 }
7220
7221 return ep;
7222}
7223
7224/**
7225 * mem_cgroup_calculate_protection - check if memory consumption is in the normal range
7226 * @root: the top ancestor of the sub-tree being checked
7227 * @memcg: the memory cgroup to check
7228 *
7229 * WARNING: This function is not stateless! It can only be used as part
7230 * of a top-down tree iteration, not for isolated queries.
7231 */
7232void mem_cgroup_calculate_protection(struct mem_cgroup *root,
7233 struct mem_cgroup *memcg)
7234{
7235 unsigned long usage, parent_usage;
7236 struct mem_cgroup *parent;
7237
7238 if (mem_cgroup_disabled())
7239 return;
7240
7241 if (!root)
7242 root = root_mem_cgroup;
7243
7244 /*
7245 * Effective values of the reclaim targets are ignored so they
7246 * can be stale. Have a look at mem_cgroup_protection for more
7247 * details.
7248 * TODO: calculation should be more robust so that we do not need
7249 * that special casing.
7250 */
7251 if (memcg == root)
7252 return;
7253
7254 usage = page_counter_read(&memcg->memory);
7255 if (!usage)
7256 return;
7257
7258 parent = parent_mem_cgroup(memcg);
7259
7260 if (parent == root) {
7261 memcg->memory.emin = READ_ONCE(memcg->memory.min);
7262 memcg->memory.elow = READ_ONCE(memcg->memory.low);
7263 return;
7264 }
7265
7266 parent_usage = page_counter_read(&parent->memory);
7267
7268 WRITE_ONCE(memcg->memory.emin, effective_protection(usage, parent_usage,
7269 READ_ONCE(memcg->memory.min),
7270 READ_ONCE(parent->memory.emin),
7271 atomic_long_read(&parent->memory.children_min_usage)));
7272
7273 WRITE_ONCE(memcg->memory.elow, effective_protection(usage, parent_usage,
7274 READ_ONCE(memcg->memory.low),
7275 READ_ONCE(parent->memory.elow),
7276 atomic_long_read(&parent->memory.children_low_usage)));
7277}
7278
7279static int charge_memcg(struct folio *folio, struct mem_cgroup *memcg,
7280 gfp_t gfp)
7281{
7282 int ret;
7283
7284 ret = try_charge(memcg, gfp, folio_nr_pages(folio));
7285 if (ret)
7286 goto out;
7287
7288 mem_cgroup_commit_charge(folio, memcg);
7289out:
7290 return ret;
7291}
7292
7293int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp)
7294{
7295 struct mem_cgroup *memcg;
7296 int ret;
7297
7298 memcg = get_mem_cgroup_from_mm(mm);
7299 ret = charge_memcg(folio, memcg, gfp);
7300 css_put(&memcg->css);
7301
7302 return ret;
7303}
7304
7305/**
7306 * mem_cgroup_hugetlb_try_charge - try to charge the memcg for a hugetlb folio
7307 * @memcg: memcg to charge.
7308 * @gfp: reclaim mode.
7309 * @nr_pages: number of pages to charge.
7310 *
7311 * This function is called when allocating a huge page folio to determine if
7312 * the memcg has the capacity for it. It does not commit the charge yet,
7313 * as the hugetlb folio itself has not been obtained from the hugetlb pool.
7314 *
7315 * Once we have obtained the hugetlb folio, we can call
7316 * mem_cgroup_commit_charge() to commit the charge. If we fail to obtain the
7317 * folio, we should instead call mem_cgroup_cancel_charge() to undo the effect
7318 * of try_charge().
7319 *
7320 * Returns 0 on success. Otherwise, an error code is returned.
7321 */
7322int mem_cgroup_hugetlb_try_charge(struct mem_cgroup *memcg, gfp_t gfp,
7323 long nr_pages)
7324{
7325 /*
7326 * If hugetlb memcg charging is not enabled, do not fail hugetlb allocation,
7327 * but do not attempt to commit charge later (or cancel on error) either.
7328 */
7329 if (mem_cgroup_disabled() || !memcg ||
7330 !cgroup_subsys_on_dfl(memory_cgrp_subsys) ||
7331 !(cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING))
7332 return -EOPNOTSUPP;
7333
7334 if (try_charge(memcg, gfp, nr_pages))
7335 return -ENOMEM;
7336
7337 return 0;
7338}
7339
7340/**
7341 * mem_cgroup_swapin_charge_folio - Charge a newly allocated folio for swapin.
7342 * @folio: folio to charge.
7343 * @mm: mm context of the victim
7344 * @gfp: reclaim mode
7345 * @entry: swap entry for which the folio is allocated
7346 *
7347 * This function charges a folio allocated for swapin. Please call this before
7348 * adding the folio to the swapcache.
7349 *
7350 * Returns 0 on success. Otherwise, an error code is returned.
7351 */
7352int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm,
7353 gfp_t gfp, swp_entry_t entry)
7354{
7355 struct mem_cgroup *memcg;
7356 unsigned short id;
7357 int ret;
7358
7359 if (mem_cgroup_disabled())
7360 return 0;
7361
7362 id = lookup_swap_cgroup_id(entry);
7363 rcu_read_lock();
7364 memcg = mem_cgroup_from_id(id);
7365 if (!memcg || !css_tryget_online(&memcg->css))
7366 memcg = get_mem_cgroup_from_mm(mm);
7367 rcu_read_unlock();
7368
7369 ret = charge_memcg(folio, memcg, gfp);
7370
7371 css_put(&memcg->css);
7372 return ret;
7373}
7374
7375/*
7376 * mem_cgroup_swapin_uncharge_swap - uncharge swap slot
7377 * @entry: swap entry for which the page is charged
7378 *
7379 * Call this function after successfully adding the charged page to swapcache.
7380 *
7381 * Note: This function assumes the page for which swap slot is being uncharged
7382 * is order 0 page.
7383 */
7384void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry)
7385{
7386 /*
7387 * Cgroup1's unified memory+swap counter has been charged with the
7388 * new swapcache page, finish the transfer by uncharging the swap
7389 * slot. The swap slot would also get uncharged when it dies, but
7390 * it can stick around indefinitely and we'd count the page twice
7391 * the entire time.
7392 *
7393 * Cgroup2 has separate resource counters for memory and swap,
7394 * so this is a non-issue here. Memory and swap charge lifetimes
7395 * correspond 1:1 to page and swap slot lifetimes: we charge the
7396 * page to memory here, and uncharge swap when the slot is freed.
7397 */
7398 if (!mem_cgroup_disabled() && do_memsw_account()) {
7399 /*
7400 * The swap entry might not get freed for a long time,
7401 * let's not wait for it. The page already received a
7402 * memory+swap charge, drop the swap entry duplicate.
7403 */
7404 mem_cgroup_uncharge_swap(entry, 1);
7405 }
7406}
7407
7408struct uncharge_gather {
7409 struct mem_cgroup *memcg;
7410 unsigned long nr_memory;
7411 unsigned long pgpgout;
7412 unsigned long nr_kmem;
7413 int nid;
7414};
7415
7416static inline void uncharge_gather_clear(struct uncharge_gather *ug)
7417{
7418 memset(ug, 0, sizeof(*ug));
7419}
7420
7421static void uncharge_batch(const struct uncharge_gather *ug)
7422{
7423 unsigned long flags;
7424
7425 if (ug->nr_memory) {
7426 page_counter_uncharge(&ug->memcg->memory, ug->nr_memory);
7427 if (do_memsw_account())
7428 page_counter_uncharge(&ug->memcg->memsw, ug->nr_memory);
7429 if (ug->nr_kmem)
7430 memcg_account_kmem(ug->memcg, -ug->nr_kmem);
7431 memcg_oom_recover(ug->memcg);
7432 }
7433
7434 local_irq_save(flags);
7435 __count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout);
7436 __this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_memory);
7437 memcg_check_events(ug->memcg, ug->nid);
7438 local_irq_restore(flags);
7439
7440 /* drop reference from uncharge_folio */
7441 css_put(&ug->memcg->css);
7442}
7443
7444static void uncharge_folio(struct folio *folio, struct uncharge_gather *ug)
7445{
7446 long nr_pages;
7447 struct mem_cgroup *memcg;
7448 struct obj_cgroup *objcg;
7449
7450 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
7451
7452 /*
7453 * Nobody should be changing or seriously looking at
7454 * folio memcg or objcg at this point, we have fully
7455 * exclusive access to the folio.
7456 */
7457 if (folio_memcg_kmem(folio)) {
7458 objcg = __folio_objcg(folio);
7459 /*
7460 * This get matches the put at the end of the function and
7461 * kmem pages do not hold memcg references anymore.
7462 */
7463 memcg = get_mem_cgroup_from_objcg(objcg);
7464 } else {
7465 memcg = __folio_memcg(folio);
7466 }
7467
7468 if (!memcg)
7469 return;
7470
7471 if (ug->memcg != memcg) {
7472 if (ug->memcg) {
7473 uncharge_batch(ug);
7474 uncharge_gather_clear(ug);
7475 }
7476 ug->memcg = memcg;
7477 ug->nid = folio_nid(folio);
7478
7479 /* pairs with css_put in uncharge_batch */
7480 css_get(&memcg->css);
7481 }
7482
7483 nr_pages = folio_nr_pages(folio);
7484
7485 if (folio_memcg_kmem(folio)) {
7486 ug->nr_memory += nr_pages;
7487 ug->nr_kmem += nr_pages;
7488
7489 folio->memcg_data = 0;
7490 obj_cgroup_put(objcg);
7491 } else {
7492 /* LRU pages aren't accounted at the root level */
7493 if (!mem_cgroup_is_root(memcg))
7494 ug->nr_memory += nr_pages;
7495 ug->pgpgout++;
7496
7497 folio->memcg_data = 0;
7498 }
7499
7500 css_put(&memcg->css);
7501}
7502
7503void __mem_cgroup_uncharge(struct folio *folio)
7504{
7505 struct uncharge_gather ug;
7506
7507 /* Don't touch folio->lru of any random page, pre-check: */
7508 if (!folio_memcg(folio))
7509 return;
7510
7511 uncharge_gather_clear(&ug);
7512 uncharge_folio(folio, &ug);
7513 uncharge_batch(&ug);
7514}
7515
7516void __mem_cgroup_uncharge_folios(struct folio_batch *folios)
7517{
7518 struct uncharge_gather ug;
7519 unsigned int i;
7520
7521 uncharge_gather_clear(&ug);
7522 for (i = 0; i < folios->nr; i++)
7523 uncharge_folio(folios->folios[i], &ug);
7524 if (ug.memcg)
7525 uncharge_batch(&ug);
7526}
7527
7528/**
7529 * mem_cgroup_replace_folio - Charge a folio's replacement.
7530 * @old: Currently circulating folio.
7531 * @new: Replacement folio.
7532 *
7533 * Charge @new as a replacement folio for @old. @old will
7534 * be uncharged upon free. This is only used by the page cache
7535 * (in replace_page_cache_folio()).
7536 *
7537 * Both folios must be locked, @new->mapping must be set up.
7538 */
7539void mem_cgroup_replace_folio(struct folio *old, struct folio *new)
7540{
7541 struct mem_cgroup *memcg;
7542 long nr_pages = folio_nr_pages(new);
7543 unsigned long flags;
7544
7545 VM_BUG_ON_FOLIO(!folio_test_locked(old), old);
7546 VM_BUG_ON_FOLIO(!folio_test_locked(new), new);
7547 VM_BUG_ON_FOLIO(folio_test_anon(old) != folio_test_anon(new), new);
7548 VM_BUG_ON_FOLIO(folio_nr_pages(old) != nr_pages, new);
7549
7550 if (mem_cgroup_disabled())
7551 return;
7552
7553 /* Page cache replacement: new folio already charged? */
7554 if (folio_memcg(new))
7555 return;
7556
7557 memcg = folio_memcg(old);
7558 VM_WARN_ON_ONCE_FOLIO(!memcg, old);
7559 if (!memcg)
7560 return;
7561
7562 /* Force-charge the new page. The old one will be freed soon */
7563 if (!mem_cgroup_is_root(memcg)) {
7564 page_counter_charge(&memcg->memory, nr_pages);
7565 if (do_memsw_account())
7566 page_counter_charge(&memcg->memsw, nr_pages);
7567 }
7568
7569 css_get(&memcg->css);
7570 commit_charge(new, memcg);
7571
7572 local_irq_save(flags);
7573 mem_cgroup_charge_statistics(memcg, nr_pages);
7574 memcg_check_events(memcg, folio_nid(new));
7575 local_irq_restore(flags);
7576}
7577
7578/**
7579 * mem_cgroup_migrate - Transfer the memcg data from the old to the new folio.
7580 * @old: Currently circulating folio.
7581 * @new: Replacement folio.
7582 *
7583 * Transfer the memcg data from the old folio to the new folio for migration.
7584 * The old folio's data info will be cleared. Note that the memory counters
7585 * will remain unchanged throughout the process.
7586 *
7587 * Both folios must be locked, @new->mapping must be set up.
7588 */
7589void mem_cgroup_migrate(struct folio *old, struct folio *new)
7590{
7591 struct mem_cgroup *memcg;
7592
7593 VM_BUG_ON_FOLIO(!folio_test_locked(old), old);
7594 VM_BUG_ON_FOLIO(!folio_test_locked(new), new);
7595 VM_BUG_ON_FOLIO(folio_test_anon(old) != folio_test_anon(new), new);
7596 VM_BUG_ON_FOLIO(folio_nr_pages(old) != folio_nr_pages(new), new);
7597
7598 if (mem_cgroup_disabled())
7599 return;
7600
7601 memcg = folio_memcg(old);
7602 /*
7603 * Note that it is normal to see !memcg for a hugetlb folio.
7604 * For e.g, itt could have been allocated when memory_hugetlb_accounting
7605 * was not selected.
7606 */
7607 VM_WARN_ON_ONCE_FOLIO(!folio_test_hugetlb(old) && !memcg, old);
7608 if (!memcg)
7609 return;
7610
7611 /* Transfer the charge and the css ref */
7612 commit_charge(new, memcg);
7613 /*
7614 * If the old folio is a large folio and is in the split queue, it needs
7615 * to be removed from the split queue now, in case getting an incorrect
7616 * split queue in destroy_large_folio() after the memcg of the old folio
7617 * is cleared.
7618 *
7619 * In addition, the old folio is about to be freed after migration, so
7620 * removing from the split queue a bit earlier seems reasonable.
7621 */
7622 if (folio_test_large(old) && folio_test_large_rmappable(old))
7623 folio_undo_large_rmappable(old);
7624 old->memcg_data = 0;
7625}
7626
7627DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
7628EXPORT_SYMBOL(memcg_sockets_enabled_key);
7629
7630void mem_cgroup_sk_alloc(struct sock *sk)
7631{
7632 struct mem_cgroup *memcg;
7633
7634 if (!mem_cgroup_sockets_enabled)
7635 return;
7636
7637 /* Do not associate the sock with unrelated interrupted task's memcg. */
7638 if (!in_task())
7639 return;
7640
7641 rcu_read_lock();
7642 memcg = mem_cgroup_from_task(current);
7643 if (mem_cgroup_is_root(memcg))
7644 goto out;
7645 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active)
7646 goto out;
7647 if (css_tryget(&memcg->css))
7648 sk->sk_memcg = memcg;
7649out:
7650 rcu_read_unlock();
7651}
7652
7653void mem_cgroup_sk_free(struct sock *sk)
7654{
7655 if (sk->sk_memcg)
7656 css_put(&sk->sk_memcg->css);
7657}
7658
7659/**
7660 * mem_cgroup_charge_skmem - charge socket memory
7661 * @memcg: memcg to charge
7662 * @nr_pages: number of pages to charge
7663 * @gfp_mask: reclaim mode
7664 *
7665 * Charges @nr_pages to @memcg. Returns %true if the charge fit within
7666 * @memcg's configured limit, %false if it doesn't.
7667 */
7668bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages,
7669 gfp_t gfp_mask)
7670{
7671 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
7672 struct page_counter *fail;
7673
7674 if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) {
7675 memcg->tcpmem_pressure = 0;
7676 return true;
7677 }
7678 memcg->tcpmem_pressure = 1;
7679 if (gfp_mask & __GFP_NOFAIL) {
7680 page_counter_charge(&memcg->tcpmem, nr_pages);
7681 return true;
7682 }
7683 return false;
7684 }
7685
7686 if (try_charge(memcg, gfp_mask, nr_pages) == 0) {
7687 mod_memcg_state(memcg, MEMCG_SOCK, nr_pages);
7688 return true;
7689 }
7690
7691 return false;
7692}
7693
7694/**
7695 * mem_cgroup_uncharge_skmem - uncharge socket memory
7696 * @memcg: memcg to uncharge
7697 * @nr_pages: number of pages to uncharge
7698 */
7699void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
7700{
7701 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
7702 page_counter_uncharge(&memcg->tcpmem, nr_pages);
7703 return;
7704 }
7705
7706 mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages);
7707
7708 refill_stock(memcg, nr_pages);
7709}
7710
7711static int __init cgroup_memory(char *s)
7712{
7713 char *token;
7714
7715 while ((token = strsep(&s, ",")) != NULL) {
7716 if (!*token)
7717 continue;
7718 if (!strcmp(token, "nosocket"))
7719 cgroup_memory_nosocket = true;
7720 if (!strcmp(token, "nokmem"))
7721 cgroup_memory_nokmem = true;
7722 if (!strcmp(token, "nobpf"))
7723 cgroup_memory_nobpf = true;
7724 }
7725 return 1;
7726}
7727__setup("cgroup.memory=", cgroup_memory);
7728
7729/*
7730 * subsys_initcall() for memory controller.
7731 *
7732 * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this
7733 * context because of lock dependencies (cgroup_lock -> cpu hotplug) but
7734 * basically everything that doesn't depend on a specific mem_cgroup structure
7735 * should be initialized from here.
7736 */
7737static int __init mem_cgroup_init(void)
7738{
7739 int cpu, node;
7740
7741 /*
7742 * Currently s32 type (can refer to struct batched_lruvec_stat) is
7743 * used for per-memcg-per-cpu caching of per-node statistics. In order
7744 * to work fine, we should make sure that the overfill threshold can't
7745 * exceed S32_MAX / PAGE_SIZE.
7746 */
7747 BUILD_BUG_ON(MEMCG_CHARGE_BATCH > S32_MAX / PAGE_SIZE);
7748
7749 cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
7750 memcg_hotplug_cpu_dead);
7751
7752 for_each_possible_cpu(cpu)
7753 INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
7754 drain_local_stock);
7755
7756 for_each_node(node) {
7757 struct mem_cgroup_tree_per_node *rtpn;
7758
7759 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, node);
7760
7761 rtpn->rb_root = RB_ROOT;
7762 rtpn->rb_rightmost = NULL;
7763 spin_lock_init(&rtpn->lock);
7764 soft_limit_tree.rb_tree_per_node[node] = rtpn;
7765 }
7766
7767 return 0;
7768}
7769subsys_initcall(mem_cgroup_init);
7770
7771#ifdef CONFIG_SWAP
7772static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg)
7773{
7774 while (!refcount_inc_not_zero(&memcg->id.ref)) {
7775 /*
7776 * The root cgroup cannot be destroyed, so it's refcount must
7777 * always be >= 1.
7778 */
7779 if (WARN_ON_ONCE(mem_cgroup_is_root(memcg))) {
7780 VM_BUG_ON(1);
7781 break;
7782 }
7783 memcg = parent_mem_cgroup(memcg);
7784 if (!memcg)
7785 memcg = root_mem_cgroup;
7786 }
7787 return memcg;
7788}
7789
7790/**
7791 * mem_cgroup_swapout - transfer a memsw charge to swap
7792 * @folio: folio whose memsw charge to transfer
7793 * @entry: swap entry to move the charge to
7794 *
7795 * Transfer the memsw charge of @folio to @entry.
7796 */
7797void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry)
7798{
7799 struct mem_cgroup *memcg, *swap_memcg;
7800 unsigned int nr_entries;
7801 unsigned short oldid;
7802
7803 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
7804 VM_BUG_ON_FOLIO(folio_ref_count(folio), folio);
7805
7806 if (mem_cgroup_disabled())
7807 return;
7808
7809 if (!do_memsw_account())
7810 return;
7811
7812 memcg = folio_memcg(folio);
7813
7814 VM_WARN_ON_ONCE_FOLIO(!memcg, folio);
7815 if (!memcg)
7816 return;
7817
7818 /*
7819 * In case the memcg owning these pages has been offlined and doesn't
7820 * have an ID allocated to it anymore, charge the closest online
7821 * ancestor for the swap instead and transfer the memory+swap charge.
7822 */
7823 swap_memcg = mem_cgroup_id_get_online(memcg);
7824 nr_entries = folio_nr_pages(folio);
7825 /* Get references for the tail pages, too */
7826 if (nr_entries > 1)
7827 mem_cgroup_id_get_many(swap_memcg, nr_entries - 1);
7828 oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg),
7829 nr_entries);
7830 VM_BUG_ON_FOLIO(oldid, folio);
7831 mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries);
7832
7833 folio->memcg_data = 0;
7834
7835 if (!mem_cgroup_is_root(memcg))
7836 page_counter_uncharge(&memcg->memory, nr_entries);
7837
7838 if (memcg != swap_memcg) {
7839 if (!mem_cgroup_is_root(swap_memcg))
7840 page_counter_charge(&swap_memcg->memsw, nr_entries);
7841 page_counter_uncharge(&memcg->memsw, nr_entries);
7842 }
7843
7844 /*
7845 * Interrupts should be disabled here because the caller holds the
7846 * i_pages lock which is taken with interrupts-off. It is
7847 * important here to have the interrupts disabled because it is the
7848 * only synchronisation we have for updating the per-CPU variables.
7849 */
7850 memcg_stats_lock();
7851 mem_cgroup_charge_statistics(memcg, -nr_entries);
7852 memcg_stats_unlock();
7853 memcg_check_events(memcg, folio_nid(folio));
7854
7855 css_put(&memcg->css);
7856}
7857
7858/**
7859 * __mem_cgroup_try_charge_swap - try charging swap space for a folio
7860 * @folio: folio being added to swap
7861 * @entry: swap entry to charge
7862 *
7863 * Try to charge @folio's memcg for the swap space at @entry.
7864 *
7865 * Returns 0 on success, -ENOMEM on failure.
7866 */
7867int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry)
7868{
7869 unsigned int nr_pages = folio_nr_pages(folio);
7870 struct page_counter *counter;
7871 struct mem_cgroup *memcg;
7872 unsigned short oldid;
7873
7874 if (do_memsw_account())
7875 return 0;
7876
7877 memcg = folio_memcg(folio);
7878
7879 VM_WARN_ON_ONCE_FOLIO(!memcg, folio);
7880 if (!memcg)
7881 return 0;
7882
7883 if (!entry.val) {
7884 memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
7885 return 0;
7886 }
7887
7888 memcg = mem_cgroup_id_get_online(memcg);
7889
7890 if (!mem_cgroup_is_root(memcg) &&
7891 !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) {
7892 memcg_memory_event(memcg, MEMCG_SWAP_MAX);
7893 memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
7894 mem_cgroup_id_put(memcg);
7895 return -ENOMEM;
7896 }
7897
7898 /* Get references for the tail pages, too */
7899 if (nr_pages > 1)
7900 mem_cgroup_id_get_many(memcg, nr_pages - 1);
7901 oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg), nr_pages);
7902 VM_BUG_ON_FOLIO(oldid, folio);
7903 mod_memcg_state(memcg, MEMCG_SWAP, nr_pages);
7904
7905 return 0;
7906}
7907
7908/**
7909 * __mem_cgroup_uncharge_swap - uncharge swap space
7910 * @entry: swap entry to uncharge
7911 * @nr_pages: the amount of swap space to uncharge
7912 */
7913void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
7914{
7915 struct mem_cgroup *memcg;
7916 unsigned short id;
7917
7918 id = swap_cgroup_record(entry, 0, nr_pages);
7919 rcu_read_lock();
7920 memcg = mem_cgroup_from_id(id);
7921 if (memcg) {
7922 if (!mem_cgroup_is_root(memcg)) {
7923 if (do_memsw_account())
7924 page_counter_uncharge(&memcg->memsw, nr_pages);
7925 else
7926 page_counter_uncharge(&memcg->swap, nr_pages);
7927 }
7928 mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages);
7929 mem_cgroup_id_put_many(memcg, nr_pages);
7930 }
7931 rcu_read_unlock();
7932}
7933
7934long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
7935{
7936 long nr_swap_pages = get_nr_swap_pages();
7937
7938 if (mem_cgroup_disabled() || do_memsw_account())
7939 return nr_swap_pages;
7940 for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg))
7941 nr_swap_pages = min_t(long, nr_swap_pages,
7942 READ_ONCE(memcg->swap.max) -
7943 page_counter_read(&memcg->swap));
7944 return nr_swap_pages;
7945}
7946
7947bool mem_cgroup_swap_full(struct folio *folio)
7948{
7949 struct mem_cgroup *memcg;
7950
7951 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
7952
7953 if (vm_swap_full())
7954 return true;
7955 if (do_memsw_account())
7956 return false;
7957
7958 memcg = folio_memcg(folio);
7959 if (!memcg)
7960 return false;
7961
7962 for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) {
7963 unsigned long usage = page_counter_read(&memcg->swap);
7964
7965 if (usage * 2 >= READ_ONCE(memcg->swap.high) ||
7966 usage * 2 >= READ_ONCE(memcg->swap.max))
7967 return true;
7968 }
7969
7970 return false;
7971}
7972
7973static int __init setup_swap_account(char *s)
7974{
7975 bool res;
7976
7977 if (!kstrtobool(s, &res) && !res)
7978 pr_warn_once("The swapaccount=0 commandline option is deprecated "
7979 "in favor of configuring swap control via cgroupfs. "
7980 "Please report your usecase to linux-mm@kvack.org if you "
7981 "depend on this functionality.\n");
7982 return 1;
7983}
7984__setup("swapaccount=", setup_swap_account);
7985
7986static u64 swap_current_read(struct cgroup_subsys_state *css,
7987 struct cftype *cft)
7988{
7989 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
7990
7991 return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE;
7992}
7993
7994static u64 swap_peak_read(struct cgroup_subsys_state *css,
7995 struct cftype *cft)
7996{
7997 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
7998
7999 return (u64)memcg->swap.watermark * PAGE_SIZE;
8000}
8001
8002static int swap_high_show(struct seq_file *m, void *v)
8003{
8004 return seq_puts_memcg_tunable(m,
8005 READ_ONCE(mem_cgroup_from_seq(m)->swap.high));
8006}
8007
8008static ssize_t swap_high_write(struct kernfs_open_file *of,
8009 char *buf, size_t nbytes, loff_t off)
8010{
8011 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
8012 unsigned long high;
8013 int err;
8014
8015 buf = strstrip(buf);
8016 err = page_counter_memparse(buf, "max", &high);
8017 if (err)
8018 return err;
8019
8020 page_counter_set_high(&memcg->swap, high);
8021
8022 return nbytes;
8023}
8024
8025static int swap_max_show(struct seq_file *m, void *v)
8026{
8027 return seq_puts_memcg_tunable(m,
8028 READ_ONCE(mem_cgroup_from_seq(m)->swap.max));
8029}
8030
8031static ssize_t swap_max_write(struct kernfs_open_file *of,
8032 char *buf, size_t nbytes, loff_t off)
8033{
8034 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
8035 unsigned long max;
8036 int err;
8037
8038 buf = strstrip(buf);
8039 err = page_counter_memparse(buf, "max", &max);
8040 if (err)
8041 return err;
8042
8043 xchg(&memcg->swap.max, max);
8044
8045 return nbytes;
8046}
8047
8048static int swap_events_show(struct seq_file *m, void *v)
8049{
8050 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
8051
8052 seq_printf(m, "high %lu\n",
8053 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_HIGH]));
8054 seq_printf(m, "max %lu\n",
8055 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX]));
8056 seq_printf(m, "fail %lu\n",
8057 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL]));
8058
8059 return 0;
8060}
8061
8062static struct cftype swap_files[] = {
8063 {
8064 .name = "swap.current",
8065 .flags = CFTYPE_NOT_ON_ROOT,
8066 .read_u64 = swap_current_read,
8067 },
8068 {
8069 .name = "swap.high",
8070 .flags = CFTYPE_NOT_ON_ROOT,
8071 .seq_show = swap_high_show,
8072 .write = swap_high_write,
8073 },
8074 {
8075 .name = "swap.max",
8076 .flags = CFTYPE_NOT_ON_ROOT,
8077 .seq_show = swap_max_show,
8078 .write = swap_max_write,
8079 },
8080 {
8081 .name = "swap.peak",
8082 .flags = CFTYPE_NOT_ON_ROOT,
8083 .read_u64 = swap_peak_read,
8084 },
8085 {
8086 .name = "swap.events",
8087 .flags = CFTYPE_NOT_ON_ROOT,
8088 .file_offset = offsetof(struct mem_cgroup, swap_events_file),
8089 .seq_show = swap_events_show,
8090 },
8091 { } /* terminate */
8092};
8093
8094static struct cftype memsw_files[] = {
8095 {
8096 .name = "memsw.usage_in_bytes",
8097 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
8098 .read_u64 = mem_cgroup_read_u64,
8099 },
8100 {
8101 .name = "memsw.max_usage_in_bytes",
8102 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
8103 .write = mem_cgroup_reset,
8104 .read_u64 = mem_cgroup_read_u64,
8105 },
8106 {
8107 .name = "memsw.limit_in_bytes",
8108 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
8109 .write = mem_cgroup_write,
8110 .read_u64 = mem_cgroup_read_u64,
8111 },
8112 {
8113 .name = "memsw.failcnt",
8114 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
8115 .write = mem_cgroup_reset,
8116 .read_u64 = mem_cgroup_read_u64,
8117 },
8118 { }, /* terminate */
8119};
8120
8121#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
8122/**
8123 * obj_cgroup_may_zswap - check if this cgroup can zswap
8124 * @objcg: the object cgroup
8125 *
8126 * Check if the hierarchical zswap limit has been reached.
8127 *
8128 * This doesn't check for specific headroom, and it is not atomic
8129 * either. But with zswap, the size of the allocation is only known
8130 * once compression has occurred, and this optimistic pre-check avoids
8131 * spending cycles on compression when there is already no room left
8132 * or zswap is disabled altogether somewhere in the hierarchy.
8133 */
8134bool obj_cgroup_may_zswap(struct obj_cgroup *objcg)
8135{
8136 struct mem_cgroup *memcg, *original_memcg;
8137 bool ret = true;
8138
8139 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
8140 return true;
8141
8142 original_memcg = get_mem_cgroup_from_objcg(objcg);
8143 for (memcg = original_memcg; !mem_cgroup_is_root(memcg);
8144 memcg = parent_mem_cgroup(memcg)) {
8145 unsigned long max = READ_ONCE(memcg->zswap_max);
8146 unsigned long pages;
8147
8148 if (max == PAGE_COUNTER_MAX)
8149 continue;
8150 if (max == 0) {
8151 ret = false;
8152 break;
8153 }
8154
8155 /*
8156 * mem_cgroup_flush_stats() ignores small changes. Use
8157 * do_flush_stats() directly to get accurate stats for charging.
8158 */
8159 do_flush_stats(memcg);
8160 pages = memcg_page_state(memcg, MEMCG_ZSWAP_B) / PAGE_SIZE;
8161 if (pages < max)
8162 continue;
8163 ret = false;
8164 break;
8165 }
8166 mem_cgroup_put(original_memcg);
8167 return ret;
8168}
8169
8170/**
8171 * obj_cgroup_charge_zswap - charge compression backend memory
8172 * @objcg: the object cgroup
8173 * @size: size of compressed object
8174 *
8175 * This forces the charge after obj_cgroup_may_zswap() allowed
8176 * compression and storage in zwap for this cgroup to go ahead.
8177 */
8178void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size)
8179{
8180 struct mem_cgroup *memcg;
8181
8182 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
8183 return;
8184
8185 VM_WARN_ON_ONCE(!(current->flags & PF_MEMALLOC));
8186
8187 /* PF_MEMALLOC context, charging must succeed */
8188 if (obj_cgroup_charge(objcg, GFP_KERNEL, size))
8189 VM_WARN_ON_ONCE(1);
8190
8191 rcu_read_lock();
8192 memcg = obj_cgroup_memcg(objcg);
8193 mod_memcg_state(memcg, MEMCG_ZSWAP_B, size);
8194 mod_memcg_state(memcg, MEMCG_ZSWAPPED, 1);
8195 rcu_read_unlock();
8196}
8197
8198/**
8199 * obj_cgroup_uncharge_zswap - uncharge compression backend memory
8200 * @objcg: the object cgroup
8201 * @size: size of compressed object
8202 *
8203 * Uncharges zswap memory on page in.
8204 */
8205void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size)
8206{
8207 struct mem_cgroup *memcg;
8208
8209 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
8210 return;
8211
8212 obj_cgroup_uncharge(objcg, size);
8213
8214 rcu_read_lock();
8215 memcg = obj_cgroup_memcg(objcg);
8216 mod_memcg_state(memcg, MEMCG_ZSWAP_B, -size);
8217 mod_memcg_state(memcg, MEMCG_ZSWAPPED, -1);
8218 rcu_read_unlock();
8219}
8220
8221bool mem_cgroup_zswap_writeback_enabled(struct mem_cgroup *memcg)
8222{
8223 /* if zswap is disabled, do not block pages going to the swapping device */
8224 return !is_zswap_enabled() || !memcg || READ_ONCE(memcg->zswap_writeback);
8225}
8226
8227static u64 zswap_current_read(struct cgroup_subsys_state *css,
8228 struct cftype *cft)
8229{
8230 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
8231
8232 mem_cgroup_flush_stats(memcg);
8233 return memcg_page_state(memcg, MEMCG_ZSWAP_B);
8234}
8235
8236static int zswap_max_show(struct seq_file *m, void *v)
8237{
8238 return seq_puts_memcg_tunable(m,
8239 READ_ONCE(mem_cgroup_from_seq(m)->zswap_max));
8240}
8241
8242static ssize_t zswap_max_write(struct kernfs_open_file *of,
8243 char *buf, size_t nbytes, loff_t off)
8244{
8245 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
8246 unsigned long max;
8247 int err;
8248
8249 buf = strstrip(buf);
8250 err = page_counter_memparse(buf, "max", &max);
8251 if (err)
8252 return err;
8253
8254 xchg(&memcg->zswap_max, max);
8255
8256 return nbytes;
8257}
8258
8259static int zswap_writeback_show(struct seq_file *m, void *v)
8260{
8261 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
8262
8263 seq_printf(m, "%d\n", READ_ONCE(memcg->zswap_writeback));
8264 return 0;
8265}
8266
8267static ssize_t zswap_writeback_write(struct kernfs_open_file *of,
8268 char *buf, size_t nbytes, loff_t off)
8269{
8270 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
8271 int zswap_writeback;
8272 ssize_t parse_ret = kstrtoint(strstrip(buf), 0, &zswap_writeback);
8273
8274 if (parse_ret)
8275 return parse_ret;
8276
8277 if (zswap_writeback != 0 && zswap_writeback != 1)
8278 return -EINVAL;
8279
8280 WRITE_ONCE(memcg->zswap_writeback, zswap_writeback);
8281 return nbytes;
8282}
8283
8284static struct cftype zswap_files[] = {
8285 {
8286 .name = "zswap.current",
8287 .flags = CFTYPE_NOT_ON_ROOT,
8288 .read_u64 = zswap_current_read,
8289 },
8290 {
8291 .name = "zswap.max",
8292 .flags = CFTYPE_NOT_ON_ROOT,
8293 .seq_show = zswap_max_show,
8294 .write = zswap_max_write,
8295 },
8296 {
8297 .name = "zswap.writeback",
8298 .seq_show = zswap_writeback_show,
8299 .write = zswap_writeback_write,
8300 },
8301 { } /* terminate */
8302};
8303#endif /* CONFIG_MEMCG_KMEM && CONFIG_ZSWAP */
8304
8305static int __init mem_cgroup_swap_init(void)
8306{
8307 if (mem_cgroup_disabled())
8308 return 0;
8309
8310 WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, swap_files));
8311 WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, memsw_files));
8312#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
8313 WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, zswap_files));
8314#endif
8315 return 0;
8316}
8317subsys_initcall(mem_cgroup_swap_init);
8318
8319#endif /* CONFIG_SWAP */