Loading...
1/* memcontrol.c - Memory Controller
2 *
3 * Copyright IBM Corporation, 2007
4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5 *
6 * Copyright 2007 OpenVZ SWsoft Inc
7 * Author: Pavel Emelianov <xemul@openvz.org>
8 *
9 * Memory thresholds
10 * Copyright (C) 2009 Nokia Corporation
11 * Author: Kirill A. Shutemov
12 *
13 * Kernel Memory Controller
14 * Copyright (C) 2012 Parallels Inc. and Google Inc.
15 * Authors: Glauber Costa and Suleiman Souhlal
16 *
17 * Native page reclaim
18 * Charge lifetime sanitation
19 * Lockless page tracking & accounting
20 * Unified hierarchy configuration model
21 * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner
22 *
23 * This program is free software; you can redistribute it and/or modify
24 * it under the terms of the GNU General Public License as published by
25 * the Free Software Foundation; either version 2 of the License, or
26 * (at your option) any later version.
27 *
28 * This program is distributed in the hope that it will be useful,
29 * but WITHOUT ANY WARRANTY; without even the implied warranty of
30 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
31 * GNU General Public License for more details.
32 */
33
34#include <linux/page_counter.h>
35#include <linux/memcontrol.h>
36#include <linux/cgroup.h>
37#include <linux/mm.h>
38#include <linux/sched/mm.h>
39#include <linux/shmem_fs.h>
40#include <linux/hugetlb.h>
41#include <linux/pagemap.h>
42#include <linux/smp.h>
43#include <linux/page-flags.h>
44#include <linux/backing-dev.h>
45#include <linux/bit_spinlock.h>
46#include <linux/rcupdate.h>
47#include <linux/limits.h>
48#include <linux/export.h>
49#include <linux/mutex.h>
50#include <linux/rbtree.h>
51#include <linux/slab.h>
52#include <linux/swap.h>
53#include <linux/swapops.h>
54#include <linux/spinlock.h>
55#include <linux/eventfd.h>
56#include <linux/poll.h>
57#include <linux/sort.h>
58#include <linux/fs.h>
59#include <linux/seq_file.h>
60#include <linux/vmpressure.h>
61#include <linux/mm_inline.h>
62#include <linux/swap_cgroup.h>
63#include <linux/cpu.h>
64#include <linux/oom.h>
65#include <linux/lockdep.h>
66#include <linux/file.h>
67#include <linux/tracehook.h>
68#include "internal.h"
69#include <net/sock.h>
70#include <net/ip.h>
71#include "slab.h"
72
73#include <linux/uaccess.h>
74
75#include <trace/events/vmscan.h>
76
77struct cgroup_subsys memory_cgrp_subsys __read_mostly;
78EXPORT_SYMBOL(memory_cgrp_subsys);
79
80struct mem_cgroup *root_mem_cgroup __read_mostly;
81
82#define MEM_CGROUP_RECLAIM_RETRIES 5
83
84/* Socket memory accounting disabled? */
85static bool cgroup_memory_nosocket;
86
87/* Kernel memory accounting disabled? */
88static bool cgroup_memory_nokmem;
89
90/* Whether the swap controller is active */
91#ifdef CONFIG_MEMCG_SWAP
92int do_swap_account __read_mostly;
93#else
94#define do_swap_account 0
95#endif
96
97/* Whether legacy memory+swap accounting is active */
98static bool do_memsw_account(void)
99{
100 return !cgroup_subsys_on_dfl(memory_cgrp_subsys) && do_swap_account;
101}
102
103static const char *const mem_cgroup_lru_names[] = {
104 "inactive_anon",
105 "active_anon",
106 "inactive_file",
107 "active_file",
108 "unevictable",
109};
110
111#define THRESHOLDS_EVENTS_TARGET 128
112#define SOFTLIMIT_EVENTS_TARGET 1024
113#define NUMAINFO_EVENTS_TARGET 1024
114
115/*
116 * Cgroups above their limits are maintained in a RB-Tree, independent of
117 * their hierarchy representation
118 */
119
120struct mem_cgroup_tree_per_node {
121 struct rb_root rb_root;
122 struct rb_node *rb_rightmost;
123 spinlock_t lock;
124};
125
126struct mem_cgroup_tree {
127 struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
128};
129
130static struct mem_cgroup_tree soft_limit_tree __read_mostly;
131
132/* for OOM */
133struct mem_cgroup_eventfd_list {
134 struct list_head list;
135 struct eventfd_ctx *eventfd;
136};
137
138/*
139 * cgroup_event represents events which userspace want to receive.
140 */
141struct mem_cgroup_event {
142 /*
143 * memcg which the event belongs to.
144 */
145 struct mem_cgroup *memcg;
146 /*
147 * eventfd to signal userspace about the event.
148 */
149 struct eventfd_ctx *eventfd;
150 /*
151 * Each of these stored in a list by the cgroup.
152 */
153 struct list_head list;
154 /*
155 * register_event() callback will be used to add new userspace
156 * waiter for changes related to this event. Use eventfd_signal()
157 * on eventfd to send notification to userspace.
158 */
159 int (*register_event)(struct mem_cgroup *memcg,
160 struct eventfd_ctx *eventfd, const char *args);
161 /*
162 * unregister_event() callback will be called when userspace closes
163 * the eventfd or on cgroup removing. This callback must be set,
164 * if you want provide notification functionality.
165 */
166 void (*unregister_event)(struct mem_cgroup *memcg,
167 struct eventfd_ctx *eventfd);
168 /*
169 * All fields below needed to unregister event when
170 * userspace closes eventfd.
171 */
172 poll_table pt;
173 wait_queue_head_t *wqh;
174 wait_queue_entry_t wait;
175 struct work_struct remove;
176};
177
178static void mem_cgroup_threshold(struct mem_cgroup *memcg);
179static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
180
181/* Stuffs for move charges at task migration. */
182/*
183 * Types of charges to be moved.
184 */
185#define MOVE_ANON 0x1U
186#define MOVE_FILE 0x2U
187#define MOVE_MASK (MOVE_ANON | MOVE_FILE)
188
189/* "mc" and its members are protected by cgroup_mutex */
190static struct move_charge_struct {
191 spinlock_t lock; /* for from, to */
192 struct mm_struct *mm;
193 struct mem_cgroup *from;
194 struct mem_cgroup *to;
195 unsigned long flags;
196 unsigned long precharge;
197 unsigned long moved_charge;
198 unsigned long moved_swap;
199 struct task_struct *moving_task; /* a task moving charges */
200 wait_queue_head_t waitq; /* a waitq for other context */
201} mc = {
202 .lock = __SPIN_LOCK_UNLOCKED(mc.lock),
203 .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
204};
205
206/*
207 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
208 * limit reclaim to prevent infinite loops, if they ever occur.
209 */
210#define MEM_CGROUP_MAX_RECLAIM_LOOPS 100
211#define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2
212
213enum charge_type {
214 MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
215 MEM_CGROUP_CHARGE_TYPE_ANON,
216 MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */
217 MEM_CGROUP_CHARGE_TYPE_DROP, /* a page was unused swap cache */
218 NR_CHARGE_TYPE,
219};
220
221/* for encoding cft->private value on file */
222enum res_type {
223 _MEM,
224 _MEMSWAP,
225 _OOM_TYPE,
226 _KMEM,
227 _TCP,
228};
229
230#define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val))
231#define MEMFILE_TYPE(val) ((val) >> 16 & 0xffff)
232#define MEMFILE_ATTR(val) ((val) & 0xffff)
233/* Used for OOM nofiier */
234#define OOM_CONTROL (0)
235
236/* Some nice accessors for the vmpressure. */
237struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
238{
239 if (!memcg)
240 memcg = root_mem_cgroup;
241 return &memcg->vmpressure;
242}
243
244struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr)
245{
246 return &container_of(vmpr, struct mem_cgroup, vmpressure)->css;
247}
248
249static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
250{
251 return (memcg == root_mem_cgroup);
252}
253
254#ifndef CONFIG_SLOB
255/*
256 * This will be the memcg's index in each cache's ->memcg_params.memcg_caches.
257 * The main reason for not using cgroup id for this:
258 * this works better in sparse environments, where we have a lot of memcgs,
259 * but only a few kmem-limited. Or also, if we have, for instance, 200
260 * memcgs, and none but the 200th is kmem-limited, we'd have to have a
261 * 200 entry array for that.
262 *
263 * The current size of the caches array is stored in memcg_nr_cache_ids. It
264 * will double each time we have to increase it.
265 */
266static DEFINE_IDA(memcg_cache_ida);
267int memcg_nr_cache_ids;
268
269/* Protects memcg_nr_cache_ids */
270static DECLARE_RWSEM(memcg_cache_ids_sem);
271
272void memcg_get_cache_ids(void)
273{
274 down_read(&memcg_cache_ids_sem);
275}
276
277void memcg_put_cache_ids(void)
278{
279 up_read(&memcg_cache_ids_sem);
280}
281
282/*
283 * MIN_SIZE is different than 1, because we would like to avoid going through
284 * the alloc/free process all the time. In a small machine, 4 kmem-limited
285 * cgroups is a reasonable guess. In the future, it could be a parameter or
286 * tunable, but that is strictly not necessary.
287 *
288 * MAX_SIZE should be as large as the number of cgrp_ids. Ideally, we could get
289 * this constant directly from cgroup, but it is understandable that this is
290 * better kept as an internal representation in cgroup.c. In any case, the
291 * cgrp_id space is not getting any smaller, and we don't have to necessarily
292 * increase ours as well if it increases.
293 */
294#define MEMCG_CACHES_MIN_SIZE 4
295#define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX
296
297/*
298 * A lot of the calls to the cache allocation functions are expected to be
299 * inlined by the compiler. Since the calls to memcg_kmem_get_cache are
300 * conditional to this static branch, we'll have to allow modules that does
301 * kmem_cache_alloc and the such to see this symbol as well
302 */
303DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key);
304EXPORT_SYMBOL(memcg_kmem_enabled_key);
305
306struct workqueue_struct *memcg_kmem_cache_wq;
307
308#endif /* !CONFIG_SLOB */
309
310/**
311 * mem_cgroup_css_from_page - css of the memcg associated with a page
312 * @page: page of interest
313 *
314 * If memcg is bound to the default hierarchy, css of the memcg associated
315 * with @page is returned. The returned css remains associated with @page
316 * until it is released.
317 *
318 * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup
319 * is returned.
320 */
321struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page)
322{
323 struct mem_cgroup *memcg;
324
325 memcg = page->mem_cgroup;
326
327 if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
328 memcg = root_mem_cgroup;
329
330 return &memcg->css;
331}
332
333/**
334 * page_cgroup_ino - return inode number of the memcg a page is charged to
335 * @page: the page
336 *
337 * Look up the closest online ancestor of the memory cgroup @page is charged to
338 * and return its inode number or 0 if @page is not charged to any cgroup. It
339 * is safe to call this function without holding a reference to @page.
340 *
341 * Note, this function is inherently racy, because there is nothing to prevent
342 * the cgroup inode from getting torn down and potentially reallocated a moment
343 * after page_cgroup_ino() returns, so it only should be used by callers that
344 * do not care (such as procfs interfaces).
345 */
346ino_t page_cgroup_ino(struct page *page)
347{
348 struct mem_cgroup *memcg;
349 unsigned long ino = 0;
350
351 rcu_read_lock();
352 memcg = READ_ONCE(page->mem_cgroup);
353 while (memcg && !(memcg->css.flags & CSS_ONLINE))
354 memcg = parent_mem_cgroup(memcg);
355 if (memcg)
356 ino = cgroup_ino(memcg->css.cgroup);
357 rcu_read_unlock();
358 return ino;
359}
360
361static struct mem_cgroup_per_node *
362mem_cgroup_page_nodeinfo(struct mem_cgroup *memcg, struct page *page)
363{
364 int nid = page_to_nid(page);
365
366 return memcg->nodeinfo[nid];
367}
368
369static struct mem_cgroup_tree_per_node *
370soft_limit_tree_node(int nid)
371{
372 return soft_limit_tree.rb_tree_per_node[nid];
373}
374
375static struct mem_cgroup_tree_per_node *
376soft_limit_tree_from_page(struct page *page)
377{
378 int nid = page_to_nid(page);
379
380 return soft_limit_tree.rb_tree_per_node[nid];
381}
382
383static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_node *mz,
384 struct mem_cgroup_tree_per_node *mctz,
385 unsigned long new_usage_in_excess)
386{
387 struct rb_node **p = &mctz->rb_root.rb_node;
388 struct rb_node *parent = NULL;
389 struct mem_cgroup_per_node *mz_node;
390 bool rightmost = true;
391
392 if (mz->on_tree)
393 return;
394
395 mz->usage_in_excess = new_usage_in_excess;
396 if (!mz->usage_in_excess)
397 return;
398 while (*p) {
399 parent = *p;
400 mz_node = rb_entry(parent, struct mem_cgroup_per_node,
401 tree_node);
402 if (mz->usage_in_excess < mz_node->usage_in_excess) {
403 p = &(*p)->rb_left;
404 rightmost = false;
405 }
406
407 /*
408 * We can't avoid mem cgroups that are over their soft
409 * limit by the same amount
410 */
411 else if (mz->usage_in_excess >= mz_node->usage_in_excess)
412 p = &(*p)->rb_right;
413 }
414
415 if (rightmost)
416 mctz->rb_rightmost = &mz->tree_node;
417
418 rb_link_node(&mz->tree_node, parent, p);
419 rb_insert_color(&mz->tree_node, &mctz->rb_root);
420 mz->on_tree = true;
421}
422
423static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
424 struct mem_cgroup_tree_per_node *mctz)
425{
426 if (!mz->on_tree)
427 return;
428
429 if (&mz->tree_node == mctz->rb_rightmost)
430 mctz->rb_rightmost = rb_prev(&mz->tree_node);
431
432 rb_erase(&mz->tree_node, &mctz->rb_root);
433 mz->on_tree = false;
434}
435
436static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
437 struct mem_cgroup_tree_per_node *mctz)
438{
439 unsigned long flags;
440
441 spin_lock_irqsave(&mctz->lock, flags);
442 __mem_cgroup_remove_exceeded(mz, mctz);
443 spin_unlock_irqrestore(&mctz->lock, flags);
444}
445
446static unsigned long soft_limit_excess(struct mem_cgroup *memcg)
447{
448 unsigned long nr_pages = page_counter_read(&memcg->memory);
449 unsigned long soft_limit = READ_ONCE(memcg->soft_limit);
450 unsigned long excess = 0;
451
452 if (nr_pages > soft_limit)
453 excess = nr_pages - soft_limit;
454
455 return excess;
456}
457
458static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
459{
460 unsigned long excess;
461 struct mem_cgroup_per_node *mz;
462 struct mem_cgroup_tree_per_node *mctz;
463
464 mctz = soft_limit_tree_from_page(page);
465 if (!mctz)
466 return;
467 /*
468 * Necessary to update all ancestors when hierarchy is used.
469 * because their event counter is not touched.
470 */
471 for (; memcg; memcg = parent_mem_cgroup(memcg)) {
472 mz = mem_cgroup_page_nodeinfo(memcg, page);
473 excess = soft_limit_excess(memcg);
474 /*
475 * We have to update the tree if mz is on RB-tree or
476 * mem is over its softlimit.
477 */
478 if (excess || mz->on_tree) {
479 unsigned long flags;
480
481 spin_lock_irqsave(&mctz->lock, flags);
482 /* if on-tree, remove it */
483 if (mz->on_tree)
484 __mem_cgroup_remove_exceeded(mz, mctz);
485 /*
486 * Insert again. mz->usage_in_excess will be updated.
487 * If excess is 0, no tree ops.
488 */
489 __mem_cgroup_insert_exceeded(mz, mctz, excess);
490 spin_unlock_irqrestore(&mctz->lock, flags);
491 }
492 }
493}
494
495static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
496{
497 struct mem_cgroup_tree_per_node *mctz;
498 struct mem_cgroup_per_node *mz;
499 int nid;
500
501 for_each_node(nid) {
502 mz = mem_cgroup_nodeinfo(memcg, nid);
503 mctz = soft_limit_tree_node(nid);
504 if (mctz)
505 mem_cgroup_remove_exceeded(mz, mctz);
506 }
507}
508
509static struct mem_cgroup_per_node *
510__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
511{
512 struct mem_cgroup_per_node *mz;
513
514retry:
515 mz = NULL;
516 if (!mctz->rb_rightmost)
517 goto done; /* Nothing to reclaim from */
518
519 mz = rb_entry(mctz->rb_rightmost,
520 struct mem_cgroup_per_node, tree_node);
521 /*
522 * Remove the node now but someone else can add it back,
523 * we will to add it back at the end of reclaim to its correct
524 * position in the tree.
525 */
526 __mem_cgroup_remove_exceeded(mz, mctz);
527 if (!soft_limit_excess(mz->memcg) ||
528 !css_tryget_online(&mz->memcg->css))
529 goto retry;
530done:
531 return mz;
532}
533
534static struct mem_cgroup_per_node *
535mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
536{
537 struct mem_cgroup_per_node *mz;
538
539 spin_lock_irq(&mctz->lock);
540 mz = __mem_cgroup_largest_soft_limit_node(mctz);
541 spin_unlock_irq(&mctz->lock);
542 return mz;
543}
544
545static unsigned long memcg_sum_events(struct mem_cgroup *memcg,
546 int event)
547{
548 return atomic_long_read(&memcg->events[event]);
549}
550
551static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
552 struct page *page,
553 bool compound, int nr_pages)
554{
555 /*
556 * Here, RSS means 'mapped anon' and anon's SwapCache. Shmem/tmpfs is
557 * counted as CACHE even if it's on ANON LRU.
558 */
559 if (PageAnon(page))
560 __mod_memcg_state(memcg, MEMCG_RSS, nr_pages);
561 else {
562 __mod_memcg_state(memcg, MEMCG_CACHE, nr_pages);
563 if (PageSwapBacked(page))
564 __mod_memcg_state(memcg, NR_SHMEM, nr_pages);
565 }
566
567 if (compound) {
568 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
569 __mod_memcg_state(memcg, MEMCG_RSS_HUGE, nr_pages);
570 }
571
572 /* pagein of a big page is an event. So, ignore page size */
573 if (nr_pages > 0)
574 __count_memcg_events(memcg, PGPGIN, 1);
575 else {
576 __count_memcg_events(memcg, PGPGOUT, 1);
577 nr_pages = -nr_pages; /* for event */
578 }
579
580 __this_cpu_add(memcg->stat_cpu->nr_page_events, nr_pages);
581}
582
583unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
584 int nid, unsigned int lru_mask)
585{
586 struct lruvec *lruvec = mem_cgroup_lruvec(NODE_DATA(nid), memcg);
587 unsigned long nr = 0;
588 enum lru_list lru;
589
590 VM_BUG_ON((unsigned)nid >= nr_node_ids);
591
592 for_each_lru(lru) {
593 if (!(BIT(lru) & lru_mask))
594 continue;
595 nr += mem_cgroup_get_lru_size(lruvec, lru);
596 }
597 return nr;
598}
599
600static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
601 unsigned int lru_mask)
602{
603 unsigned long nr = 0;
604 int nid;
605
606 for_each_node_state(nid, N_MEMORY)
607 nr += mem_cgroup_node_nr_lru_pages(memcg, nid, lru_mask);
608 return nr;
609}
610
611static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
612 enum mem_cgroup_events_target target)
613{
614 unsigned long val, next;
615
616 val = __this_cpu_read(memcg->stat_cpu->nr_page_events);
617 next = __this_cpu_read(memcg->stat_cpu->targets[target]);
618 /* from time_after() in jiffies.h */
619 if ((long)(next - val) < 0) {
620 switch (target) {
621 case MEM_CGROUP_TARGET_THRESH:
622 next = val + THRESHOLDS_EVENTS_TARGET;
623 break;
624 case MEM_CGROUP_TARGET_SOFTLIMIT:
625 next = val + SOFTLIMIT_EVENTS_TARGET;
626 break;
627 case MEM_CGROUP_TARGET_NUMAINFO:
628 next = val + NUMAINFO_EVENTS_TARGET;
629 break;
630 default:
631 break;
632 }
633 __this_cpu_write(memcg->stat_cpu->targets[target], next);
634 return true;
635 }
636 return false;
637}
638
639/*
640 * Check events in order.
641 *
642 */
643static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
644{
645 /* threshold event is triggered in finer grain than soft limit */
646 if (unlikely(mem_cgroup_event_ratelimit(memcg,
647 MEM_CGROUP_TARGET_THRESH))) {
648 bool do_softlimit;
649 bool do_numainfo __maybe_unused;
650
651 do_softlimit = mem_cgroup_event_ratelimit(memcg,
652 MEM_CGROUP_TARGET_SOFTLIMIT);
653#if MAX_NUMNODES > 1
654 do_numainfo = mem_cgroup_event_ratelimit(memcg,
655 MEM_CGROUP_TARGET_NUMAINFO);
656#endif
657 mem_cgroup_threshold(memcg);
658 if (unlikely(do_softlimit))
659 mem_cgroup_update_tree(memcg, page);
660#if MAX_NUMNODES > 1
661 if (unlikely(do_numainfo))
662 atomic_inc(&memcg->numainfo_events);
663#endif
664 }
665}
666
667struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
668{
669 /*
670 * mm_update_next_owner() may clear mm->owner to NULL
671 * if it races with swapoff, page migration, etc.
672 * So this can be called with p == NULL.
673 */
674 if (unlikely(!p))
675 return NULL;
676
677 return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
678}
679EXPORT_SYMBOL(mem_cgroup_from_task);
680
681static struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
682{
683 struct mem_cgroup *memcg = NULL;
684
685 rcu_read_lock();
686 do {
687 /*
688 * Page cache insertions can happen withou an
689 * actual mm context, e.g. during disk probing
690 * on boot, loopback IO, acct() writes etc.
691 */
692 if (unlikely(!mm))
693 memcg = root_mem_cgroup;
694 else {
695 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
696 if (unlikely(!memcg))
697 memcg = root_mem_cgroup;
698 }
699 } while (!css_tryget_online(&memcg->css));
700 rcu_read_unlock();
701 return memcg;
702}
703
704/**
705 * mem_cgroup_iter - iterate over memory cgroup hierarchy
706 * @root: hierarchy root
707 * @prev: previously returned memcg, NULL on first invocation
708 * @reclaim: cookie for shared reclaim walks, NULL for full walks
709 *
710 * Returns references to children of the hierarchy below @root, or
711 * @root itself, or %NULL after a full round-trip.
712 *
713 * Caller must pass the return value in @prev on subsequent
714 * invocations for reference counting, or use mem_cgroup_iter_break()
715 * to cancel a hierarchy walk before the round-trip is complete.
716 *
717 * Reclaimers can specify a node and a priority level in @reclaim to
718 * divide up the memcgs in the hierarchy among all concurrent
719 * reclaimers operating on the same node and priority.
720 */
721struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
722 struct mem_cgroup *prev,
723 struct mem_cgroup_reclaim_cookie *reclaim)
724{
725 struct mem_cgroup_reclaim_iter *uninitialized_var(iter);
726 struct cgroup_subsys_state *css = NULL;
727 struct mem_cgroup *memcg = NULL;
728 struct mem_cgroup *pos = NULL;
729
730 if (mem_cgroup_disabled())
731 return NULL;
732
733 if (!root)
734 root = root_mem_cgroup;
735
736 if (prev && !reclaim)
737 pos = prev;
738
739 if (!root->use_hierarchy && root != root_mem_cgroup) {
740 if (prev)
741 goto out;
742 return root;
743 }
744
745 rcu_read_lock();
746
747 if (reclaim) {
748 struct mem_cgroup_per_node *mz;
749
750 mz = mem_cgroup_nodeinfo(root, reclaim->pgdat->node_id);
751 iter = &mz->iter[reclaim->priority];
752
753 if (prev && reclaim->generation != iter->generation)
754 goto out_unlock;
755
756 while (1) {
757 pos = READ_ONCE(iter->position);
758 if (!pos || css_tryget(&pos->css))
759 break;
760 /*
761 * css reference reached zero, so iter->position will
762 * be cleared by ->css_released. However, we should not
763 * rely on this happening soon, because ->css_released
764 * is called from a work queue, and by busy-waiting we
765 * might block it. So we clear iter->position right
766 * away.
767 */
768 (void)cmpxchg(&iter->position, pos, NULL);
769 }
770 }
771
772 if (pos)
773 css = &pos->css;
774
775 for (;;) {
776 css = css_next_descendant_pre(css, &root->css);
777 if (!css) {
778 /*
779 * Reclaimers share the hierarchy walk, and a
780 * new one might jump in right at the end of
781 * the hierarchy - make sure they see at least
782 * one group and restart from the beginning.
783 */
784 if (!prev)
785 continue;
786 break;
787 }
788
789 /*
790 * Verify the css and acquire a reference. The root
791 * is provided by the caller, so we know it's alive
792 * and kicking, and don't take an extra reference.
793 */
794 memcg = mem_cgroup_from_css(css);
795
796 if (css == &root->css)
797 break;
798
799 if (css_tryget(css))
800 break;
801
802 memcg = NULL;
803 }
804
805 if (reclaim) {
806 /*
807 * The position could have already been updated by a competing
808 * thread, so check that the value hasn't changed since we read
809 * it to avoid reclaiming from the same cgroup twice.
810 */
811 (void)cmpxchg(&iter->position, pos, memcg);
812
813 if (pos)
814 css_put(&pos->css);
815
816 if (!memcg)
817 iter->generation++;
818 else if (!prev)
819 reclaim->generation = iter->generation;
820 }
821
822out_unlock:
823 rcu_read_unlock();
824out:
825 if (prev && prev != root)
826 css_put(&prev->css);
827
828 return memcg;
829}
830
831/**
832 * mem_cgroup_iter_break - abort a hierarchy walk prematurely
833 * @root: hierarchy root
834 * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
835 */
836void mem_cgroup_iter_break(struct mem_cgroup *root,
837 struct mem_cgroup *prev)
838{
839 if (!root)
840 root = root_mem_cgroup;
841 if (prev && prev != root)
842 css_put(&prev->css);
843}
844
845static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
846{
847 struct mem_cgroup *memcg = dead_memcg;
848 struct mem_cgroup_reclaim_iter *iter;
849 struct mem_cgroup_per_node *mz;
850 int nid;
851 int i;
852
853 while ((memcg = parent_mem_cgroup(memcg))) {
854 for_each_node(nid) {
855 mz = mem_cgroup_nodeinfo(memcg, nid);
856 for (i = 0; i <= DEF_PRIORITY; i++) {
857 iter = &mz->iter[i];
858 cmpxchg(&iter->position,
859 dead_memcg, NULL);
860 }
861 }
862 }
863}
864
865/*
866 * Iteration constructs for visiting all cgroups (under a tree). If
867 * loops are exited prematurely (break), mem_cgroup_iter_break() must
868 * be used for reference counting.
869 */
870#define for_each_mem_cgroup_tree(iter, root) \
871 for (iter = mem_cgroup_iter(root, NULL, NULL); \
872 iter != NULL; \
873 iter = mem_cgroup_iter(root, iter, NULL))
874
875#define for_each_mem_cgroup(iter) \
876 for (iter = mem_cgroup_iter(NULL, NULL, NULL); \
877 iter != NULL; \
878 iter = mem_cgroup_iter(NULL, iter, NULL))
879
880/**
881 * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
882 * @memcg: hierarchy root
883 * @fn: function to call for each task
884 * @arg: argument passed to @fn
885 *
886 * This function iterates over tasks attached to @memcg or to any of its
887 * descendants and calls @fn for each task. If @fn returns a non-zero
888 * value, the function breaks the iteration loop and returns the value.
889 * Otherwise, it will iterate over all tasks and return 0.
890 *
891 * This function must not be called for the root memory cgroup.
892 */
893int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
894 int (*fn)(struct task_struct *, void *), void *arg)
895{
896 struct mem_cgroup *iter;
897 int ret = 0;
898
899 BUG_ON(memcg == root_mem_cgroup);
900
901 for_each_mem_cgroup_tree(iter, memcg) {
902 struct css_task_iter it;
903 struct task_struct *task;
904
905 css_task_iter_start(&iter->css, 0, &it);
906 while (!ret && (task = css_task_iter_next(&it)))
907 ret = fn(task, arg);
908 css_task_iter_end(&it);
909 if (ret) {
910 mem_cgroup_iter_break(memcg, iter);
911 break;
912 }
913 }
914 return ret;
915}
916
917/**
918 * mem_cgroup_page_lruvec - return lruvec for isolating/putting an LRU page
919 * @page: the page
920 * @pgdat: pgdat of the page
921 *
922 * This function is only safe when following the LRU page isolation
923 * and putback protocol: the LRU lock must be held, and the page must
924 * either be PageLRU() or the caller must have isolated/allocated it.
925 */
926struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct pglist_data *pgdat)
927{
928 struct mem_cgroup_per_node *mz;
929 struct mem_cgroup *memcg;
930 struct lruvec *lruvec;
931
932 if (mem_cgroup_disabled()) {
933 lruvec = &pgdat->lruvec;
934 goto out;
935 }
936
937 memcg = page->mem_cgroup;
938 /*
939 * Swapcache readahead pages are added to the LRU - and
940 * possibly migrated - before they are charged.
941 */
942 if (!memcg)
943 memcg = root_mem_cgroup;
944
945 mz = mem_cgroup_page_nodeinfo(memcg, page);
946 lruvec = &mz->lruvec;
947out:
948 /*
949 * Since a node can be onlined after the mem_cgroup was created,
950 * we have to be prepared to initialize lruvec->zone here;
951 * and if offlined then reonlined, we need to reinitialize it.
952 */
953 if (unlikely(lruvec->pgdat != pgdat))
954 lruvec->pgdat = pgdat;
955 return lruvec;
956}
957
958/**
959 * mem_cgroup_update_lru_size - account for adding or removing an lru page
960 * @lruvec: mem_cgroup per zone lru vector
961 * @lru: index of lru list the page is sitting on
962 * @zid: zone id of the accounted pages
963 * @nr_pages: positive when adding or negative when removing
964 *
965 * This function must be called under lru_lock, just before a page is added
966 * to or just after a page is removed from an lru list (that ordering being
967 * so as to allow it to check that lru_size 0 is consistent with list_empty).
968 */
969void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
970 int zid, int nr_pages)
971{
972 struct mem_cgroup_per_node *mz;
973 unsigned long *lru_size;
974 long size;
975
976 if (mem_cgroup_disabled())
977 return;
978
979 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
980 lru_size = &mz->lru_zone_size[zid][lru];
981
982 if (nr_pages < 0)
983 *lru_size += nr_pages;
984
985 size = *lru_size;
986 if (WARN_ONCE(size < 0,
987 "%s(%p, %d, %d): lru_size %ld\n",
988 __func__, lruvec, lru, nr_pages, size)) {
989 VM_BUG_ON(1);
990 *lru_size = 0;
991 }
992
993 if (nr_pages > 0)
994 *lru_size += nr_pages;
995}
996
997bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg)
998{
999 struct mem_cgroup *task_memcg;
1000 struct task_struct *p;
1001 bool ret;
1002
1003 p = find_lock_task_mm(task);
1004 if (p) {
1005 task_memcg = get_mem_cgroup_from_mm(p->mm);
1006 task_unlock(p);
1007 } else {
1008 /*
1009 * All threads may have already detached their mm's, but the oom
1010 * killer still needs to detect if they have already been oom
1011 * killed to prevent needlessly killing additional tasks.
1012 */
1013 rcu_read_lock();
1014 task_memcg = mem_cgroup_from_task(task);
1015 css_get(&task_memcg->css);
1016 rcu_read_unlock();
1017 }
1018 ret = mem_cgroup_is_descendant(task_memcg, memcg);
1019 css_put(&task_memcg->css);
1020 return ret;
1021}
1022
1023/**
1024 * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1025 * @memcg: the memory cgroup
1026 *
1027 * Returns the maximum amount of memory @mem can be charged with, in
1028 * pages.
1029 */
1030static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
1031{
1032 unsigned long margin = 0;
1033 unsigned long count;
1034 unsigned long limit;
1035
1036 count = page_counter_read(&memcg->memory);
1037 limit = READ_ONCE(memcg->memory.limit);
1038 if (count < limit)
1039 margin = limit - count;
1040
1041 if (do_memsw_account()) {
1042 count = page_counter_read(&memcg->memsw);
1043 limit = READ_ONCE(memcg->memsw.limit);
1044 if (count <= limit)
1045 margin = min(margin, limit - count);
1046 else
1047 margin = 0;
1048 }
1049
1050 return margin;
1051}
1052
1053/*
1054 * A routine for checking "mem" is under move_account() or not.
1055 *
1056 * Checking a cgroup is mc.from or mc.to or under hierarchy of
1057 * moving cgroups. This is for waiting at high-memory pressure
1058 * caused by "move".
1059 */
1060static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
1061{
1062 struct mem_cgroup *from;
1063 struct mem_cgroup *to;
1064 bool ret = false;
1065 /*
1066 * Unlike task_move routines, we access mc.to, mc.from not under
1067 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
1068 */
1069 spin_lock(&mc.lock);
1070 from = mc.from;
1071 to = mc.to;
1072 if (!from)
1073 goto unlock;
1074
1075 ret = mem_cgroup_is_descendant(from, memcg) ||
1076 mem_cgroup_is_descendant(to, memcg);
1077unlock:
1078 spin_unlock(&mc.lock);
1079 return ret;
1080}
1081
1082static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
1083{
1084 if (mc.moving_task && current != mc.moving_task) {
1085 if (mem_cgroup_under_move(memcg)) {
1086 DEFINE_WAIT(wait);
1087 prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1088 /* moving charge context might have finished. */
1089 if (mc.moving_task)
1090 schedule();
1091 finish_wait(&mc.waitq, &wait);
1092 return true;
1093 }
1094 }
1095 return false;
1096}
1097
1098static const unsigned int memcg1_stats[] = {
1099 MEMCG_CACHE,
1100 MEMCG_RSS,
1101 MEMCG_RSS_HUGE,
1102 NR_SHMEM,
1103 NR_FILE_MAPPED,
1104 NR_FILE_DIRTY,
1105 NR_WRITEBACK,
1106 MEMCG_SWAP,
1107};
1108
1109static const char *const memcg1_stat_names[] = {
1110 "cache",
1111 "rss",
1112 "rss_huge",
1113 "shmem",
1114 "mapped_file",
1115 "dirty",
1116 "writeback",
1117 "swap",
1118};
1119
1120#define K(x) ((x) << (PAGE_SHIFT-10))
1121/**
1122 * mem_cgroup_print_oom_info: Print OOM information relevant to memory controller.
1123 * @memcg: The memory cgroup that went over limit
1124 * @p: Task that is going to be killed
1125 *
1126 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1127 * enabled
1128 */
1129void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
1130{
1131 struct mem_cgroup *iter;
1132 unsigned int i;
1133
1134 rcu_read_lock();
1135
1136 if (p) {
1137 pr_info("Task in ");
1138 pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
1139 pr_cont(" killed as a result of limit of ");
1140 } else {
1141 pr_info("Memory limit reached of cgroup ");
1142 }
1143
1144 pr_cont_cgroup_path(memcg->css.cgroup);
1145 pr_cont("\n");
1146
1147 rcu_read_unlock();
1148
1149 pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
1150 K((u64)page_counter_read(&memcg->memory)),
1151 K((u64)memcg->memory.limit), memcg->memory.failcnt);
1152 pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
1153 K((u64)page_counter_read(&memcg->memsw)),
1154 K((u64)memcg->memsw.limit), memcg->memsw.failcnt);
1155 pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
1156 K((u64)page_counter_read(&memcg->kmem)),
1157 K((u64)memcg->kmem.limit), memcg->kmem.failcnt);
1158
1159 for_each_mem_cgroup_tree(iter, memcg) {
1160 pr_info("Memory cgroup stats for ");
1161 pr_cont_cgroup_path(iter->css.cgroup);
1162 pr_cont(":");
1163
1164 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
1165 if (memcg1_stats[i] == MEMCG_SWAP && !do_swap_account)
1166 continue;
1167 pr_cont(" %s:%luKB", memcg1_stat_names[i],
1168 K(memcg_page_state(iter, memcg1_stats[i])));
1169 }
1170
1171 for (i = 0; i < NR_LRU_LISTS; i++)
1172 pr_cont(" %s:%luKB", mem_cgroup_lru_names[i],
1173 K(mem_cgroup_nr_lru_pages(iter, BIT(i))));
1174
1175 pr_cont("\n");
1176 }
1177}
1178
1179/*
1180 * Return the memory (and swap, if configured) limit for a memcg.
1181 */
1182unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg)
1183{
1184 unsigned long limit;
1185
1186 limit = memcg->memory.limit;
1187 if (mem_cgroup_swappiness(memcg)) {
1188 unsigned long memsw_limit;
1189 unsigned long swap_limit;
1190
1191 memsw_limit = memcg->memsw.limit;
1192 swap_limit = memcg->swap.limit;
1193 swap_limit = min(swap_limit, (unsigned long)total_swap_pages);
1194 limit = min(limit + swap_limit, memsw_limit);
1195 }
1196 return limit;
1197}
1198
1199static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
1200 int order)
1201{
1202 struct oom_control oc = {
1203 .zonelist = NULL,
1204 .nodemask = NULL,
1205 .memcg = memcg,
1206 .gfp_mask = gfp_mask,
1207 .order = order,
1208 };
1209 bool ret;
1210
1211 mutex_lock(&oom_lock);
1212 ret = out_of_memory(&oc);
1213 mutex_unlock(&oom_lock);
1214 return ret;
1215}
1216
1217#if MAX_NUMNODES > 1
1218
1219/**
1220 * test_mem_cgroup_node_reclaimable
1221 * @memcg: the target memcg
1222 * @nid: the node ID to be checked.
1223 * @noswap : specify true here if the user wants flle only information.
1224 *
1225 * This function returns whether the specified memcg contains any
1226 * reclaimable pages on a node. Returns true if there are any reclaimable
1227 * pages in the node.
1228 */
1229static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg,
1230 int nid, bool noswap)
1231{
1232 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_FILE))
1233 return true;
1234 if (noswap || !total_swap_pages)
1235 return false;
1236 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_ANON))
1237 return true;
1238 return false;
1239
1240}
1241
1242/*
1243 * Always updating the nodemask is not very good - even if we have an empty
1244 * list or the wrong list here, we can start from some node and traverse all
1245 * nodes based on the zonelist. So update the list loosely once per 10 secs.
1246 *
1247 */
1248static void mem_cgroup_may_update_nodemask(struct mem_cgroup *memcg)
1249{
1250 int nid;
1251 /*
1252 * numainfo_events > 0 means there was at least NUMAINFO_EVENTS_TARGET
1253 * pagein/pageout changes since the last update.
1254 */
1255 if (!atomic_read(&memcg->numainfo_events))
1256 return;
1257 if (atomic_inc_return(&memcg->numainfo_updating) > 1)
1258 return;
1259
1260 /* make a nodemask where this memcg uses memory from */
1261 memcg->scan_nodes = node_states[N_MEMORY];
1262
1263 for_each_node_mask(nid, node_states[N_MEMORY]) {
1264
1265 if (!test_mem_cgroup_node_reclaimable(memcg, nid, false))
1266 node_clear(nid, memcg->scan_nodes);
1267 }
1268
1269 atomic_set(&memcg->numainfo_events, 0);
1270 atomic_set(&memcg->numainfo_updating, 0);
1271}
1272
1273/*
1274 * Selecting a node where we start reclaim from. Because what we need is just
1275 * reducing usage counter, start from anywhere is O,K. Considering
1276 * memory reclaim from current node, there are pros. and cons.
1277 *
1278 * Freeing memory from current node means freeing memory from a node which
1279 * we'll use or we've used. So, it may make LRU bad. And if several threads
1280 * hit limits, it will see a contention on a node. But freeing from remote
1281 * node means more costs for memory reclaim because of memory latency.
1282 *
1283 * Now, we use round-robin. Better algorithm is welcomed.
1284 */
1285int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
1286{
1287 int node;
1288
1289 mem_cgroup_may_update_nodemask(memcg);
1290 node = memcg->last_scanned_node;
1291
1292 node = next_node_in(node, memcg->scan_nodes);
1293 /*
1294 * mem_cgroup_may_update_nodemask might have seen no reclaimmable pages
1295 * last time it really checked all the LRUs due to rate limiting.
1296 * Fallback to the current node in that case for simplicity.
1297 */
1298 if (unlikely(node == MAX_NUMNODES))
1299 node = numa_node_id();
1300
1301 memcg->last_scanned_node = node;
1302 return node;
1303}
1304#else
1305int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
1306{
1307 return 0;
1308}
1309#endif
1310
1311static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
1312 pg_data_t *pgdat,
1313 gfp_t gfp_mask,
1314 unsigned long *total_scanned)
1315{
1316 struct mem_cgroup *victim = NULL;
1317 int total = 0;
1318 int loop = 0;
1319 unsigned long excess;
1320 unsigned long nr_scanned;
1321 struct mem_cgroup_reclaim_cookie reclaim = {
1322 .pgdat = pgdat,
1323 .priority = 0,
1324 };
1325
1326 excess = soft_limit_excess(root_memcg);
1327
1328 while (1) {
1329 victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
1330 if (!victim) {
1331 loop++;
1332 if (loop >= 2) {
1333 /*
1334 * If we have not been able to reclaim
1335 * anything, it might because there are
1336 * no reclaimable pages under this hierarchy
1337 */
1338 if (!total)
1339 break;
1340 /*
1341 * We want to do more targeted reclaim.
1342 * excess >> 2 is not to excessive so as to
1343 * reclaim too much, nor too less that we keep
1344 * coming back to reclaim from this cgroup
1345 */
1346 if (total >= (excess >> 2) ||
1347 (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
1348 break;
1349 }
1350 continue;
1351 }
1352 total += mem_cgroup_shrink_node(victim, gfp_mask, false,
1353 pgdat, &nr_scanned);
1354 *total_scanned += nr_scanned;
1355 if (!soft_limit_excess(root_memcg))
1356 break;
1357 }
1358 mem_cgroup_iter_break(root_memcg, victim);
1359 return total;
1360}
1361
1362#ifdef CONFIG_LOCKDEP
1363static struct lockdep_map memcg_oom_lock_dep_map = {
1364 .name = "memcg_oom_lock",
1365};
1366#endif
1367
1368static DEFINE_SPINLOCK(memcg_oom_lock);
1369
1370/*
1371 * Check OOM-Killer is already running under our hierarchy.
1372 * If someone is running, return false.
1373 */
1374static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
1375{
1376 struct mem_cgroup *iter, *failed = NULL;
1377
1378 spin_lock(&memcg_oom_lock);
1379
1380 for_each_mem_cgroup_tree(iter, memcg) {
1381 if (iter->oom_lock) {
1382 /*
1383 * this subtree of our hierarchy is already locked
1384 * so we cannot give a lock.
1385 */
1386 failed = iter;
1387 mem_cgroup_iter_break(memcg, iter);
1388 break;
1389 } else
1390 iter->oom_lock = true;
1391 }
1392
1393 if (failed) {
1394 /*
1395 * OK, we failed to lock the whole subtree so we have
1396 * to clean up what we set up to the failing subtree
1397 */
1398 for_each_mem_cgroup_tree(iter, memcg) {
1399 if (iter == failed) {
1400 mem_cgroup_iter_break(memcg, iter);
1401 break;
1402 }
1403 iter->oom_lock = false;
1404 }
1405 } else
1406 mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_);
1407
1408 spin_unlock(&memcg_oom_lock);
1409
1410 return !failed;
1411}
1412
1413static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
1414{
1415 struct mem_cgroup *iter;
1416
1417 spin_lock(&memcg_oom_lock);
1418 mutex_release(&memcg_oom_lock_dep_map, 1, _RET_IP_);
1419 for_each_mem_cgroup_tree(iter, memcg)
1420 iter->oom_lock = false;
1421 spin_unlock(&memcg_oom_lock);
1422}
1423
1424static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
1425{
1426 struct mem_cgroup *iter;
1427
1428 spin_lock(&memcg_oom_lock);
1429 for_each_mem_cgroup_tree(iter, memcg)
1430 iter->under_oom++;
1431 spin_unlock(&memcg_oom_lock);
1432}
1433
1434static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
1435{
1436 struct mem_cgroup *iter;
1437
1438 /*
1439 * When a new child is created while the hierarchy is under oom,
1440 * mem_cgroup_oom_lock() may not be called. Watch for underflow.
1441 */
1442 spin_lock(&memcg_oom_lock);
1443 for_each_mem_cgroup_tree(iter, memcg)
1444 if (iter->under_oom > 0)
1445 iter->under_oom--;
1446 spin_unlock(&memcg_oom_lock);
1447}
1448
1449static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
1450
1451struct oom_wait_info {
1452 struct mem_cgroup *memcg;
1453 wait_queue_entry_t wait;
1454};
1455
1456static int memcg_oom_wake_function(wait_queue_entry_t *wait,
1457 unsigned mode, int sync, void *arg)
1458{
1459 struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
1460 struct mem_cgroup *oom_wait_memcg;
1461 struct oom_wait_info *oom_wait_info;
1462
1463 oom_wait_info = container_of(wait, struct oom_wait_info, wait);
1464 oom_wait_memcg = oom_wait_info->memcg;
1465
1466 if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) &&
1467 !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg))
1468 return 0;
1469 return autoremove_wake_function(wait, mode, sync, arg);
1470}
1471
1472static void memcg_oom_recover(struct mem_cgroup *memcg)
1473{
1474 /*
1475 * For the following lockless ->under_oom test, the only required
1476 * guarantee is that it must see the state asserted by an OOM when
1477 * this function is called as a result of userland actions
1478 * triggered by the notification of the OOM. This is trivially
1479 * achieved by invoking mem_cgroup_mark_under_oom() before
1480 * triggering notification.
1481 */
1482 if (memcg && memcg->under_oom)
1483 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
1484}
1485
1486static void mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
1487{
1488 if (!current->memcg_may_oom || order > PAGE_ALLOC_COSTLY_ORDER)
1489 return;
1490 /*
1491 * We are in the middle of the charge context here, so we
1492 * don't want to block when potentially sitting on a callstack
1493 * that holds all kinds of filesystem and mm locks.
1494 *
1495 * Also, the caller may handle a failed allocation gracefully
1496 * (like optional page cache readahead) and so an OOM killer
1497 * invocation might not even be necessary.
1498 *
1499 * That's why we don't do anything here except remember the
1500 * OOM context and then deal with it at the end of the page
1501 * fault when the stack is unwound, the locks are released,
1502 * and when we know whether the fault was overall successful.
1503 */
1504 css_get(&memcg->css);
1505 current->memcg_in_oom = memcg;
1506 current->memcg_oom_gfp_mask = mask;
1507 current->memcg_oom_order = order;
1508}
1509
1510/**
1511 * mem_cgroup_oom_synchronize - complete memcg OOM handling
1512 * @handle: actually kill/wait or just clean up the OOM state
1513 *
1514 * This has to be called at the end of a page fault if the memcg OOM
1515 * handler was enabled.
1516 *
1517 * Memcg supports userspace OOM handling where failed allocations must
1518 * sleep on a waitqueue until the userspace task resolves the
1519 * situation. Sleeping directly in the charge context with all kinds
1520 * of locks held is not a good idea, instead we remember an OOM state
1521 * in the task and mem_cgroup_oom_synchronize() has to be called at
1522 * the end of the page fault to complete the OOM handling.
1523 *
1524 * Returns %true if an ongoing memcg OOM situation was detected and
1525 * completed, %false otherwise.
1526 */
1527bool mem_cgroup_oom_synchronize(bool handle)
1528{
1529 struct mem_cgroup *memcg = current->memcg_in_oom;
1530 struct oom_wait_info owait;
1531 bool locked;
1532
1533 /* OOM is global, do not handle */
1534 if (!memcg)
1535 return false;
1536
1537 if (!handle)
1538 goto cleanup;
1539
1540 owait.memcg = memcg;
1541 owait.wait.flags = 0;
1542 owait.wait.func = memcg_oom_wake_function;
1543 owait.wait.private = current;
1544 INIT_LIST_HEAD(&owait.wait.entry);
1545
1546 prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
1547 mem_cgroup_mark_under_oom(memcg);
1548
1549 locked = mem_cgroup_oom_trylock(memcg);
1550
1551 if (locked)
1552 mem_cgroup_oom_notify(memcg);
1553
1554 if (locked && !memcg->oom_kill_disable) {
1555 mem_cgroup_unmark_under_oom(memcg);
1556 finish_wait(&memcg_oom_waitq, &owait.wait);
1557 mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask,
1558 current->memcg_oom_order);
1559 } else {
1560 schedule();
1561 mem_cgroup_unmark_under_oom(memcg);
1562 finish_wait(&memcg_oom_waitq, &owait.wait);
1563 }
1564
1565 if (locked) {
1566 mem_cgroup_oom_unlock(memcg);
1567 /*
1568 * There is no guarantee that an OOM-lock contender
1569 * sees the wakeups triggered by the OOM kill
1570 * uncharges. Wake any sleepers explicitely.
1571 */
1572 memcg_oom_recover(memcg);
1573 }
1574cleanup:
1575 current->memcg_in_oom = NULL;
1576 css_put(&memcg->css);
1577 return true;
1578}
1579
1580/**
1581 * lock_page_memcg - lock a page->mem_cgroup binding
1582 * @page: the page
1583 *
1584 * This function protects unlocked LRU pages from being moved to
1585 * another cgroup.
1586 *
1587 * It ensures lifetime of the returned memcg. Caller is responsible
1588 * for the lifetime of the page; __unlock_page_memcg() is available
1589 * when @page might get freed inside the locked section.
1590 */
1591struct mem_cgroup *lock_page_memcg(struct page *page)
1592{
1593 struct mem_cgroup *memcg;
1594 unsigned long flags;
1595
1596 /*
1597 * The RCU lock is held throughout the transaction. The fast
1598 * path can get away without acquiring the memcg->move_lock
1599 * because page moving starts with an RCU grace period.
1600 *
1601 * The RCU lock also protects the memcg from being freed when
1602 * the page state that is going to change is the only thing
1603 * preventing the page itself from being freed. E.g. writeback
1604 * doesn't hold a page reference and relies on PG_writeback to
1605 * keep off truncation, migration and so forth.
1606 */
1607 rcu_read_lock();
1608
1609 if (mem_cgroup_disabled())
1610 return NULL;
1611again:
1612 memcg = page->mem_cgroup;
1613 if (unlikely(!memcg))
1614 return NULL;
1615
1616 if (atomic_read(&memcg->moving_account) <= 0)
1617 return memcg;
1618
1619 spin_lock_irqsave(&memcg->move_lock, flags);
1620 if (memcg != page->mem_cgroup) {
1621 spin_unlock_irqrestore(&memcg->move_lock, flags);
1622 goto again;
1623 }
1624
1625 /*
1626 * When charge migration first begins, we can have locked and
1627 * unlocked page stat updates happening concurrently. Track
1628 * the task who has the lock for unlock_page_memcg().
1629 */
1630 memcg->move_lock_task = current;
1631 memcg->move_lock_flags = flags;
1632
1633 return memcg;
1634}
1635EXPORT_SYMBOL(lock_page_memcg);
1636
1637/**
1638 * __unlock_page_memcg - unlock and unpin a memcg
1639 * @memcg: the memcg
1640 *
1641 * Unlock and unpin a memcg returned by lock_page_memcg().
1642 */
1643void __unlock_page_memcg(struct mem_cgroup *memcg)
1644{
1645 if (memcg && memcg->move_lock_task == current) {
1646 unsigned long flags = memcg->move_lock_flags;
1647
1648 memcg->move_lock_task = NULL;
1649 memcg->move_lock_flags = 0;
1650
1651 spin_unlock_irqrestore(&memcg->move_lock, flags);
1652 }
1653
1654 rcu_read_unlock();
1655}
1656
1657/**
1658 * unlock_page_memcg - unlock a page->mem_cgroup binding
1659 * @page: the page
1660 */
1661void unlock_page_memcg(struct page *page)
1662{
1663 __unlock_page_memcg(page->mem_cgroup);
1664}
1665EXPORT_SYMBOL(unlock_page_memcg);
1666
1667struct memcg_stock_pcp {
1668 struct mem_cgroup *cached; /* this never be root cgroup */
1669 unsigned int nr_pages;
1670 struct work_struct work;
1671 unsigned long flags;
1672#define FLUSHING_CACHED_CHARGE 0
1673};
1674static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
1675static DEFINE_MUTEX(percpu_charge_mutex);
1676
1677/**
1678 * consume_stock: Try to consume stocked charge on this cpu.
1679 * @memcg: memcg to consume from.
1680 * @nr_pages: how many pages to charge.
1681 *
1682 * The charges will only happen if @memcg matches the current cpu's memcg
1683 * stock, and at least @nr_pages are available in that stock. Failure to
1684 * service an allocation will refill the stock.
1685 *
1686 * returns true if successful, false otherwise.
1687 */
1688static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
1689{
1690 struct memcg_stock_pcp *stock;
1691 unsigned long flags;
1692 bool ret = false;
1693
1694 if (nr_pages > MEMCG_CHARGE_BATCH)
1695 return ret;
1696
1697 local_irq_save(flags);
1698
1699 stock = this_cpu_ptr(&memcg_stock);
1700 if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
1701 stock->nr_pages -= nr_pages;
1702 ret = true;
1703 }
1704
1705 local_irq_restore(flags);
1706
1707 return ret;
1708}
1709
1710/*
1711 * Returns stocks cached in percpu and reset cached information.
1712 */
1713static void drain_stock(struct memcg_stock_pcp *stock)
1714{
1715 struct mem_cgroup *old = stock->cached;
1716
1717 if (stock->nr_pages) {
1718 page_counter_uncharge(&old->memory, stock->nr_pages);
1719 if (do_memsw_account())
1720 page_counter_uncharge(&old->memsw, stock->nr_pages);
1721 css_put_many(&old->css, stock->nr_pages);
1722 stock->nr_pages = 0;
1723 }
1724 stock->cached = NULL;
1725}
1726
1727static void drain_local_stock(struct work_struct *dummy)
1728{
1729 struct memcg_stock_pcp *stock;
1730 unsigned long flags;
1731
1732 /*
1733 * The only protection from memory hotplug vs. drain_stock races is
1734 * that we always operate on local CPU stock here with IRQ disabled
1735 */
1736 local_irq_save(flags);
1737
1738 stock = this_cpu_ptr(&memcg_stock);
1739 drain_stock(stock);
1740 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
1741
1742 local_irq_restore(flags);
1743}
1744
1745/*
1746 * Cache charges(val) to local per_cpu area.
1747 * This will be consumed by consume_stock() function, later.
1748 */
1749static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
1750{
1751 struct memcg_stock_pcp *stock;
1752 unsigned long flags;
1753
1754 local_irq_save(flags);
1755
1756 stock = this_cpu_ptr(&memcg_stock);
1757 if (stock->cached != memcg) { /* reset if necessary */
1758 drain_stock(stock);
1759 stock->cached = memcg;
1760 }
1761 stock->nr_pages += nr_pages;
1762
1763 if (stock->nr_pages > MEMCG_CHARGE_BATCH)
1764 drain_stock(stock);
1765
1766 local_irq_restore(flags);
1767}
1768
1769/*
1770 * Drains all per-CPU charge caches for given root_memcg resp. subtree
1771 * of the hierarchy under it.
1772 */
1773static void drain_all_stock(struct mem_cgroup *root_memcg)
1774{
1775 int cpu, curcpu;
1776
1777 /* If someone's already draining, avoid adding running more workers. */
1778 if (!mutex_trylock(&percpu_charge_mutex))
1779 return;
1780 /*
1781 * Notify other cpus that system-wide "drain" is running
1782 * We do not care about races with the cpu hotplug because cpu down
1783 * as well as workers from this path always operate on the local
1784 * per-cpu data. CPU up doesn't touch memcg_stock at all.
1785 */
1786 curcpu = get_cpu();
1787 for_each_online_cpu(cpu) {
1788 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
1789 struct mem_cgroup *memcg;
1790
1791 memcg = stock->cached;
1792 if (!memcg || !stock->nr_pages || !css_tryget(&memcg->css))
1793 continue;
1794 if (!mem_cgroup_is_descendant(memcg, root_memcg)) {
1795 css_put(&memcg->css);
1796 continue;
1797 }
1798 if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
1799 if (cpu == curcpu)
1800 drain_local_stock(&stock->work);
1801 else
1802 schedule_work_on(cpu, &stock->work);
1803 }
1804 css_put(&memcg->css);
1805 }
1806 put_cpu();
1807 mutex_unlock(&percpu_charge_mutex);
1808}
1809
1810static int memcg_hotplug_cpu_dead(unsigned int cpu)
1811{
1812 struct memcg_stock_pcp *stock;
1813 struct mem_cgroup *memcg;
1814
1815 stock = &per_cpu(memcg_stock, cpu);
1816 drain_stock(stock);
1817
1818 for_each_mem_cgroup(memcg) {
1819 int i;
1820
1821 for (i = 0; i < MEMCG_NR_STAT; i++) {
1822 int nid;
1823 long x;
1824
1825 x = this_cpu_xchg(memcg->stat_cpu->count[i], 0);
1826 if (x)
1827 atomic_long_add(x, &memcg->stat[i]);
1828
1829 if (i >= NR_VM_NODE_STAT_ITEMS)
1830 continue;
1831
1832 for_each_node(nid) {
1833 struct mem_cgroup_per_node *pn;
1834
1835 pn = mem_cgroup_nodeinfo(memcg, nid);
1836 x = this_cpu_xchg(pn->lruvec_stat_cpu->count[i], 0);
1837 if (x)
1838 atomic_long_add(x, &pn->lruvec_stat[i]);
1839 }
1840 }
1841
1842 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
1843 long x;
1844
1845 x = this_cpu_xchg(memcg->stat_cpu->events[i], 0);
1846 if (x)
1847 atomic_long_add(x, &memcg->events[i]);
1848 }
1849 }
1850
1851 return 0;
1852}
1853
1854static void reclaim_high(struct mem_cgroup *memcg,
1855 unsigned int nr_pages,
1856 gfp_t gfp_mask)
1857{
1858 do {
1859 if (page_counter_read(&memcg->memory) <= memcg->high)
1860 continue;
1861 memcg_memory_event(memcg, MEMCG_HIGH);
1862 try_to_free_mem_cgroup_pages(memcg, nr_pages, gfp_mask, true);
1863 } while ((memcg = parent_mem_cgroup(memcg)));
1864}
1865
1866static void high_work_func(struct work_struct *work)
1867{
1868 struct mem_cgroup *memcg;
1869
1870 memcg = container_of(work, struct mem_cgroup, high_work);
1871 reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL);
1872}
1873
1874/*
1875 * Scheduled by try_charge() to be executed from the userland return path
1876 * and reclaims memory over the high limit.
1877 */
1878void mem_cgroup_handle_over_high(void)
1879{
1880 unsigned int nr_pages = current->memcg_nr_pages_over_high;
1881 struct mem_cgroup *memcg;
1882
1883 if (likely(!nr_pages))
1884 return;
1885
1886 memcg = get_mem_cgroup_from_mm(current->mm);
1887 reclaim_high(memcg, nr_pages, GFP_KERNEL);
1888 css_put(&memcg->css);
1889 current->memcg_nr_pages_over_high = 0;
1890}
1891
1892static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
1893 unsigned int nr_pages)
1894{
1895 unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages);
1896 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
1897 struct mem_cgroup *mem_over_limit;
1898 struct page_counter *counter;
1899 unsigned long nr_reclaimed;
1900 bool may_swap = true;
1901 bool drained = false;
1902
1903 if (mem_cgroup_is_root(memcg))
1904 return 0;
1905retry:
1906 if (consume_stock(memcg, nr_pages))
1907 return 0;
1908
1909 if (!do_memsw_account() ||
1910 page_counter_try_charge(&memcg->memsw, batch, &counter)) {
1911 if (page_counter_try_charge(&memcg->memory, batch, &counter))
1912 goto done_restock;
1913 if (do_memsw_account())
1914 page_counter_uncharge(&memcg->memsw, batch);
1915 mem_over_limit = mem_cgroup_from_counter(counter, memory);
1916 } else {
1917 mem_over_limit = mem_cgroup_from_counter(counter, memsw);
1918 may_swap = false;
1919 }
1920
1921 if (batch > nr_pages) {
1922 batch = nr_pages;
1923 goto retry;
1924 }
1925
1926 /*
1927 * Unlike in global OOM situations, memcg is not in a physical
1928 * memory shortage. Allow dying and OOM-killed tasks to
1929 * bypass the last charges so that they can exit quickly and
1930 * free their memory.
1931 */
1932 if (unlikely(tsk_is_oom_victim(current) ||
1933 fatal_signal_pending(current) ||
1934 current->flags & PF_EXITING))
1935 goto force;
1936
1937 /*
1938 * Prevent unbounded recursion when reclaim operations need to
1939 * allocate memory. This might exceed the limits temporarily,
1940 * but we prefer facilitating memory reclaim and getting back
1941 * under the limit over triggering OOM kills in these cases.
1942 */
1943 if (unlikely(current->flags & PF_MEMALLOC))
1944 goto force;
1945
1946 if (unlikely(task_in_memcg_oom(current)))
1947 goto nomem;
1948
1949 if (!gfpflags_allow_blocking(gfp_mask))
1950 goto nomem;
1951
1952 memcg_memory_event(mem_over_limit, MEMCG_MAX);
1953
1954 nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
1955 gfp_mask, may_swap);
1956
1957 if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
1958 goto retry;
1959
1960 if (!drained) {
1961 drain_all_stock(mem_over_limit);
1962 drained = true;
1963 goto retry;
1964 }
1965
1966 if (gfp_mask & __GFP_NORETRY)
1967 goto nomem;
1968 /*
1969 * Even though the limit is exceeded at this point, reclaim
1970 * may have been able to free some pages. Retry the charge
1971 * before killing the task.
1972 *
1973 * Only for regular pages, though: huge pages are rather
1974 * unlikely to succeed so close to the limit, and we fall back
1975 * to regular pages anyway in case of failure.
1976 */
1977 if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))
1978 goto retry;
1979 /*
1980 * At task move, charge accounts can be doubly counted. So, it's
1981 * better to wait until the end of task_move if something is going on.
1982 */
1983 if (mem_cgroup_wait_acct_move(mem_over_limit))
1984 goto retry;
1985
1986 if (nr_retries--)
1987 goto retry;
1988
1989 if (gfp_mask & __GFP_NOFAIL)
1990 goto force;
1991
1992 if (fatal_signal_pending(current))
1993 goto force;
1994
1995 memcg_memory_event(mem_over_limit, MEMCG_OOM);
1996
1997 mem_cgroup_oom(mem_over_limit, gfp_mask,
1998 get_order(nr_pages * PAGE_SIZE));
1999nomem:
2000 if (!(gfp_mask & __GFP_NOFAIL))
2001 return -ENOMEM;
2002force:
2003 /*
2004 * The allocation either can't fail or will lead to more memory
2005 * being freed very soon. Allow memory usage go over the limit
2006 * temporarily by force charging it.
2007 */
2008 page_counter_charge(&memcg->memory, nr_pages);
2009 if (do_memsw_account())
2010 page_counter_charge(&memcg->memsw, nr_pages);
2011 css_get_many(&memcg->css, nr_pages);
2012
2013 return 0;
2014
2015done_restock:
2016 css_get_many(&memcg->css, batch);
2017 if (batch > nr_pages)
2018 refill_stock(memcg, batch - nr_pages);
2019
2020 /*
2021 * If the hierarchy is above the normal consumption range, schedule
2022 * reclaim on returning to userland. We can perform reclaim here
2023 * if __GFP_RECLAIM but let's always punt for simplicity and so that
2024 * GFP_KERNEL can consistently be used during reclaim. @memcg is
2025 * not recorded as it most likely matches current's and won't
2026 * change in the meantime. As high limit is checked again before
2027 * reclaim, the cost of mismatch is negligible.
2028 */
2029 do {
2030 if (page_counter_read(&memcg->memory) > memcg->high) {
2031 /* Don't bother a random interrupted task */
2032 if (in_interrupt()) {
2033 schedule_work(&memcg->high_work);
2034 break;
2035 }
2036 current->memcg_nr_pages_over_high += batch;
2037 set_notify_resume(current);
2038 break;
2039 }
2040 } while ((memcg = parent_mem_cgroup(memcg)));
2041
2042 return 0;
2043}
2044
2045static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
2046{
2047 if (mem_cgroup_is_root(memcg))
2048 return;
2049
2050 page_counter_uncharge(&memcg->memory, nr_pages);
2051 if (do_memsw_account())
2052 page_counter_uncharge(&memcg->memsw, nr_pages);
2053
2054 css_put_many(&memcg->css, nr_pages);
2055}
2056
2057static void lock_page_lru(struct page *page, int *isolated)
2058{
2059 struct zone *zone = page_zone(page);
2060
2061 spin_lock_irq(zone_lru_lock(zone));
2062 if (PageLRU(page)) {
2063 struct lruvec *lruvec;
2064
2065 lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat);
2066 ClearPageLRU(page);
2067 del_page_from_lru_list(page, lruvec, page_lru(page));
2068 *isolated = 1;
2069 } else
2070 *isolated = 0;
2071}
2072
2073static void unlock_page_lru(struct page *page, int isolated)
2074{
2075 struct zone *zone = page_zone(page);
2076
2077 if (isolated) {
2078 struct lruvec *lruvec;
2079
2080 lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat);
2081 VM_BUG_ON_PAGE(PageLRU(page), page);
2082 SetPageLRU(page);
2083 add_page_to_lru_list(page, lruvec, page_lru(page));
2084 }
2085 spin_unlock_irq(zone_lru_lock(zone));
2086}
2087
2088static void commit_charge(struct page *page, struct mem_cgroup *memcg,
2089 bool lrucare)
2090{
2091 int isolated;
2092
2093 VM_BUG_ON_PAGE(page->mem_cgroup, page);
2094
2095 /*
2096 * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page
2097 * may already be on some other mem_cgroup's LRU. Take care of it.
2098 */
2099 if (lrucare)
2100 lock_page_lru(page, &isolated);
2101
2102 /*
2103 * Nobody should be changing or seriously looking at
2104 * page->mem_cgroup at this point:
2105 *
2106 * - the page is uncharged
2107 *
2108 * - the page is off-LRU
2109 *
2110 * - an anonymous fault has exclusive page access, except for
2111 * a locked page table
2112 *
2113 * - a page cache insertion, a swapin fault, or a migration
2114 * have the page locked
2115 */
2116 page->mem_cgroup = memcg;
2117
2118 if (lrucare)
2119 unlock_page_lru(page, isolated);
2120}
2121
2122#ifndef CONFIG_SLOB
2123static int memcg_alloc_cache_id(void)
2124{
2125 int id, size;
2126 int err;
2127
2128 id = ida_simple_get(&memcg_cache_ida,
2129 0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL);
2130 if (id < 0)
2131 return id;
2132
2133 if (id < memcg_nr_cache_ids)
2134 return id;
2135
2136 /*
2137 * There's no space for the new id in memcg_caches arrays,
2138 * so we have to grow them.
2139 */
2140 down_write(&memcg_cache_ids_sem);
2141
2142 size = 2 * (id + 1);
2143 if (size < MEMCG_CACHES_MIN_SIZE)
2144 size = MEMCG_CACHES_MIN_SIZE;
2145 else if (size > MEMCG_CACHES_MAX_SIZE)
2146 size = MEMCG_CACHES_MAX_SIZE;
2147
2148 err = memcg_update_all_caches(size);
2149 if (!err)
2150 err = memcg_update_all_list_lrus(size);
2151 if (!err)
2152 memcg_nr_cache_ids = size;
2153
2154 up_write(&memcg_cache_ids_sem);
2155
2156 if (err) {
2157 ida_simple_remove(&memcg_cache_ida, id);
2158 return err;
2159 }
2160 return id;
2161}
2162
2163static void memcg_free_cache_id(int id)
2164{
2165 ida_simple_remove(&memcg_cache_ida, id);
2166}
2167
2168struct memcg_kmem_cache_create_work {
2169 struct mem_cgroup *memcg;
2170 struct kmem_cache *cachep;
2171 struct work_struct work;
2172};
2173
2174static void memcg_kmem_cache_create_func(struct work_struct *w)
2175{
2176 struct memcg_kmem_cache_create_work *cw =
2177 container_of(w, struct memcg_kmem_cache_create_work, work);
2178 struct mem_cgroup *memcg = cw->memcg;
2179 struct kmem_cache *cachep = cw->cachep;
2180
2181 memcg_create_kmem_cache(memcg, cachep);
2182
2183 css_put(&memcg->css);
2184 kfree(cw);
2185}
2186
2187/*
2188 * Enqueue the creation of a per-memcg kmem_cache.
2189 */
2190static void __memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
2191 struct kmem_cache *cachep)
2192{
2193 struct memcg_kmem_cache_create_work *cw;
2194
2195 cw = kmalloc(sizeof(*cw), GFP_NOWAIT | __GFP_NOWARN);
2196 if (!cw)
2197 return;
2198
2199 css_get(&memcg->css);
2200
2201 cw->memcg = memcg;
2202 cw->cachep = cachep;
2203 INIT_WORK(&cw->work, memcg_kmem_cache_create_func);
2204
2205 queue_work(memcg_kmem_cache_wq, &cw->work);
2206}
2207
2208static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
2209 struct kmem_cache *cachep)
2210{
2211 /*
2212 * We need to stop accounting when we kmalloc, because if the
2213 * corresponding kmalloc cache is not yet created, the first allocation
2214 * in __memcg_schedule_kmem_cache_create will recurse.
2215 *
2216 * However, it is better to enclose the whole function. Depending on
2217 * the debugging options enabled, INIT_WORK(), for instance, can
2218 * trigger an allocation. This too, will make us recurse. Because at
2219 * this point we can't allow ourselves back into memcg_kmem_get_cache,
2220 * the safest choice is to do it like this, wrapping the whole function.
2221 */
2222 current->memcg_kmem_skip_account = 1;
2223 __memcg_schedule_kmem_cache_create(memcg, cachep);
2224 current->memcg_kmem_skip_account = 0;
2225}
2226
2227static inline bool memcg_kmem_bypass(void)
2228{
2229 if (in_interrupt() || !current->mm || (current->flags & PF_KTHREAD))
2230 return true;
2231 return false;
2232}
2233
2234/**
2235 * memcg_kmem_get_cache: select the correct per-memcg cache for allocation
2236 * @cachep: the original global kmem cache
2237 *
2238 * Return the kmem_cache we're supposed to use for a slab allocation.
2239 * We try to use the current memcg's version of the cache.
2240 *
2241 * If the cache does not exist yet, if we are the first user of it, we
2242 * create it asynchronously in a workqueue and let the current allocation
2243 * go through with the original cache.
2244 *
2245 * This function takes a reference to the cache it returns to assure it
2246 * won't get destroyed while we are working with it. Once the caller is
2247 * done with it, memcg_kmem_put_cache() must be called to release the
2248 * reference.
2249 */
2250struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep)
2251{
2252 struct mem_cgroup *memcg;
2253 struct kmem_cache *memcg_cachep;
2254 int kmemcg_id;
2255
2256 VM_BUG_ON(!is_root_cache(cachep));
2257
2258 if (memcg_kmem_bypass())
2259 return cachep;
2260
2261 if (current->memcg_kmem_skip_account)
2262 return cachep;
2263
2264 memcg = get_mem_cgroup_from_mm(current->mm);
2265 kmemcg_id = READ_ONCE(memcg->kmemcg_id);
2266 if (kmemcg_id < 0)
2267 goto out;
2268
2269 memcg_cachep = cache_from_memcg_idx(cachep, kmemcg_id);
2270 if (likely(memcg_cachep))
2271 return memcg_cachep;
2272
2273 /*
2274 * If we are in a safe context (can wait, and not in interrupt
2275 * context), we could be be predictable and return right away.
2276 * This would guarantee that the allocation being performed
2277 * already belongs in the new cache.
2278 *
2279 * However, there are some clashes that can arrive from locking.
2280 * For instance, because we acquire the slab_mutex while doing
2281 * memcg_create_kmem_cache, this means no further allocation
2282 * could happen with the slab_mutex held. So it's better to
2283 * defer everything.
2284 */
2285 memcg_schedule_kmem_cache_create(memcg, cachep);
2286out:
2287 css_put(&memcg->css);
2288 return cachep;
2289}
2290
2291/**
2292 * memcg_kmem_put_cache: drop reference taken by memcg_kmem_get_cache
2293 * @cachep: the cache returned by memcg_kmem_get_cache
2294 */
2295void memcg_kmem_put_cache(struct kmem_cache *cachep)
2296{
2297 if (!is_root_cache(cachep))
2298 css_put(&cachep->memcg_params.memcg->css);
2299}
2300
2301/**
2302 * memcg_kmem_charge_memcg: charge a kmem page
2303 * @page: page to charge
2304 * @gfp: reclaim mode
2305 * @order: allocation order
2306 * @memcg: memory cgroup to charge
2307 *
2308 * Returns 0 on success, an error code on failure.
2309 */
2310int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
2311 struct mem_cgroup *memcg)
2312{
2313 unsigned int nr_pages = 1 << order;
2314 struct page_counter *counter;
2315 int ret;
2316
2317 ret = try_charge(memcg, gfp, nr_pages);
2318 if (ret)
2319 return ret;
2320
2321 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) &&
2322 !page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) {
2323 cancel_charge(memcg, nr_pages);
2324 return -ENOMEM;
2325 }
2326
2327 page->mem_cgroup = memcg;
2328
2329 return 0;
2330}
2331
2332/**
2333 * memcg_kmem_charge: charge a kmem page to the current memory cgroup
2334 * @page: page to charge
2335 * @gfp: reclaim mode
2336 * @order: allocation order
2337 *
2338 * Returns 0 on success, an error code on failure.
2339 */
2340int memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
2341{
2342 struct mem_cgroup *memcg;
2343 int ret = 0;
2344
2345 if (memcg_kmem_bypass())
2346 return 0;
2347
2348 memcg = get_mem_cgroup_from_mm(current->mm);
2349 if (!mem_cgroup_is_root(memcg)) {
2350 ret = memcg_kmem_charge_memcg(page, gfp, order, memcg);
2351 if (!ret)
2352 __SetPageKmemcg(page);
2353 }
2354 css_put(&memcg->css);
2355 return ret;
2356}
2357/**
2358 * memcg_kmem_uncharge: uncharge a kmem page
2359 * @page: page to uncharge
2360 * @order: allocation order
2361 */
2362void memcg_kmem_uncharge(struct page *page, int order)
2363{
2364 struct mem_cgroup *memcg = page->mem_cgroup;
2365 unsigned int nr_pages = 1 << order;
2366
2367 if (!memcg)
2368 return;
2369
2370 VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page);
2371
2372 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
2373 page_counter_uncharge(&memcg->kmem, nr_pages);
2374
2375 page_counter_uncharge(&memcg->memory, nr_pages);
2376 if (do_memsw_account())
2377 page_counter_uncharge(&memcg->memsw, nr_pages);
2378
2379 page->mem_cgroup = NULL;
2380
2381 /* slab pages do not have PageKmemcg flag set */
2382 if (PageKmemcg(page))
2383 __ClearPageKmemcg(page);
2384
2385 css_put_many(&memcg->css, nr_pages);
2386}
2387#endif /* !CONFIG_SLOB */
2388
2389#ifdef CONFIG_TRANSPARENT_HUGEPAGE
2390
2391/*
2392 * Because tail pages are not marked as "used", set it. We're under
2393 * zone_lru_lock and migration entries setup in all page mappings.
2394 */
2395void mem_cgroup_split_huge_fixup(struct page *head)
2396{
2397 int i;
2398
2399 if (mem_cgroup_disabled())
2400 return;
2401
2402 for (i = 1; i < HPAGE_PMD_NR; i++)
2403 head[i].mem_cgroup = head->mem_cgroup;
2404
2405 __mod_memcg_state(head->mem_cgroup, MEMCG_RSS_HUGE, -HPAGE_PMD_NR);
2406}
2407#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
2408
2409#ifdef CONFIG_MEMCG_SWAP
2410/**
2411 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
2412 * @entry: swap entry to be moved
2413 * @from: mem_cgroup which the entry is moved from
2414 * @to: mem_cgroup which the entry is moved to
2415 *
2416 * It succeeds only when the swap_cgroup's record for this entry is the same
2417 * as the mem_cgroup's id of @from.
2418 *
2419 * Returns 0 on success, -EINVAL on failure.
2420 *
2421 * The caller must have charged to @to, IOW, called page_counter_charge() about
2422 * both res and memsw, and called css_get().
2423 */
2424static int mem_cgroup_move_swap_account(swp_entry_t entry,
2425 struct mem_cgroup *from, struct mem_cgroup *to)
2426{
2427 unsigned short old_id, new_id;
2428
2429 old_id = mem_cgroup_id(from);
2430 new_id = mem_cgroup_id(to);
2431
2432 if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
2433 mod_memcg_state(from, MEMCG_SWAP, -1);
2434 mod_memcg_state(to, MEMCG_SWAP, 1);
2435 return 0;
2436 }
2437 return -EINVAL;
2438}
2439#else
2440static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
2441 struct mem_cgroup *from, struct mem_cgroup *to)
2442{
2443 return -EINVAL;
2444}
2445#endif
2446
2447static DEFINE_MUTEX(memcg_limit_mutex);
2448
2449static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
2450 unsigned long limit, bool memsw)
2451{
2452 bool enlarge = false;
2453 int ret;
2454 bool limits_invariant;
2455 struct page_counter *counter = memsw ? &memcg->memsw : &memcg->memory;
2456
2457 do {
2458 if (signal_pending(current)) {
2459 ret = -EINTR;
2460 break;
2461 }
2462
2463 mutex_lock(&memcg_limit_mutex);
2464 /*
2465 * Make sure that the new limit (memsw or memory limit) doesn't
2466 * break our basic invariant rule memory.limit <= memsw.limit.
2467 */
2468 limits_invariant = memsw ? limit >= memcg->memory.limit :
2469 limit <= memcg->memsw.limit;
2470 if (!limits_invariant) {
2471 mutex_unlock(&memcg_limit_mutex);
2472 ret = -EINVAL;
2473 break;
2474 }
2475 if (limit > counter->limit)
2476 enlarge = true;
2477 ret = page_counter_limit(counter, limit);
2478 mutex_unlock(&memcg_limit_mutex);
2479
2480 if (!ret)
2481 break;
2482
2483 if (!try_to_free_mem_cgroup_pages(memcg, 1,
2484 GFP_KERNEL, !memsw)) {
2485 ret = -EBUSY;
2486 break;
2487 }
2488 } while (true);
2489
2490 if (!ret && enlarge)
2491 memcg_oom_recover(memcg);
2492
2493 return ret;
2494}
2495
2496unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
2497 gfp_t gfp_mask,
2498 unsigned long *total_scanned)
2499{
2500 unsigned long nr_reclaimed = 0;
2501 struct mem_cgroup_per_node *mz, *next_mz = NULL;
2502 unsigned long reclaimed;
2503 int loop = 0;
2504 struct mem_cgroup_tree_per_node *mctz;
2505 unsigned long excess;
2506 unsigned long nr_scanned;
2507
2508 if (order > 0)
2509 return 0;
2510
2511 mctz = soft_limit_tree_node(pgdat->node_id);
2512
2513 /*
2514 * Do not even bother to check the largest node if the root
2515 * is empty. Do it lockless to prevent lock bouncing. Races
2516 * are acceptable as soft limit is best effort anyway.
2517 */
2518 if (!mctz || RB_EMPTY_ROOT(&mctz->rb_root))
2519 return 0;
2520
2521 /*
2522 * This loop can run a while, specially if mem_cgroup's continuously
2523 * keep exceeding their soft limit and putting the system under
2524 * pressure
2525 */
2526 do {
2527 if (next_mz)
2528 mz = next_mz;
2529 else
2530 mz = mem_cgroup_largest_soft_limit_node(mctz);
2531 if (!mz)
2532 break;
2533
2534 nr_scanned = 0;
2535 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat,
2536 gfp_mask, &nr_scanned);
2537 nr_reclaimed += reclaimed;
2538 *total_scanned += nr_scanned;
2539 spin_lock_irq(&mctz->lock);
2540 __mem_cgroup_remove_exceeded(mz, mctz);
2541
2542 /*
2543 * If we failed to reclaim anything from this memory cgroup
2544 * it is time to move on to the next cgroup
2545 */
2546 next_mz = NULL;
2547 if (!reclaimed)
2548 next_mz = __mem_cgroup_largest_soft_limit_node(mctz);
2549
2550 excess = soft_limit_excess(mz->memcg);
2551 /*
2552 * One school of thought says that we should not add
2553 * back the node to the tree if reclaim returns 0.
2554 * But our reclaim could return 0, simply because due
2555 * to priority we are exposing a smaller subset of
2556 * memory to reclaim from. Consider this as a longer
2557 * term TODO.
2558 */
2559 /* If excess == 0, no tree ops */
2560 __mem_cgroup_insert_exceeded(mz, mctz, excess);
2561 spin_unlock_irq(&mctz->lock);
2562 css_put(&mz->memcg->css);
2563 loop++;
2564 /*
2565 * Could not reclaim anything and there are no more
2566 * mem cgroups to try or we seem to be looping without
2567 * reclaiming anything.
2568 */
2569 if (!nr_reclaimed &&
2570 (next_mz == NULL ||
2571 loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
2572 break;
2573 } while (!nr_reclaimed);
2574 if (next_mz)
2575 css_put(&next_mz->memcg->css);
2576 return nr_reclaimed;
2577}
2578
2579/*
2580 * Test whether @memcg has children, dead or alive. Note that this
2581 * function doesn't care whether @memcg has use_hierarchy enabled and
2582 * returns %true if there are child csses according to the cgroup
2583 * hierarchy. Testing use_hierarchy is the caller's responsiblity.
2584 */
2585static inline bool memcg_has_children(struct mem_cgroup *memcg)
2586{
2587 bool ret;
2588
2589 rcu_read_lock();
2590 ret = css_next_child(NULL, &memcg->css);
2591 rcu_read_unlock();
2592 return ret;
2593}
2594
2595/*
2596 * Reclaims as many pages from the given memcg as possible.
2597 *
2598 * Caller is responsible for holding css reference for memcg.
2599 */
2600static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
2601{
2602 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
2603
2604 /* we call try-to-free pages for make this cgroup empty */
2605 lru_add_drain_all();
2606 /* try to free all pages in this cgroup */
2607 while (nr_retries && page_counter_read(&memcg->memory)) {
2608 int progress;
2609
2610 if (signal_pending(current))
2611 return -EINTR;
2612
2613 progress = try_to_free_mem_cgroup_pages(memcg, 1,
2614 GFP_KERNEL, true);
2615 if (!progress) {
2616 nr_retries--;
2617 /* maybe some writeback is necessary */
2618 congestion_wait(BLK_RW_ASYNC, HZ/10);
2619 }
2620
2621 }
2622
2623 return 0;
2624}
2625
2626static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of,
2627 char *buf, size_t nbytes,
2628 loff_t off)
2629{
2630 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
2631
2632 if (mem_cgroup_is_root(memcg))
2633 return -EINVAL;
2634 return mem_cgroup_force_empty(memcg) ?: nbytes;
2635}
2636
2637static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css,
2638 struct cftype *cft)
2639{
2640 return mem_cgroup_from_css(css)->use_hierarchy;
2641}
2642
2643static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
2644 struct cftype *cft, u64 val)
2645{
2646 int retval = 0;
2647 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
2648 struct mem_cgroup *parent_memcg = mem_cgroup_from_css(memcg->css.parent);
2649
2650 if (memcg->use_hierarchy == val)
2651 return 0;
2652
2653 /*
2654 * If parent's use_hierarchy is set, we can't make any modifications
2655 * in the child subtrees. If it is unset, then the change can
2656 * occur, provided the current cgroup has no children.
2657 *
2658 * For the root cgroup, parent_mem is NULL, we allow value to be
2659 * set if there are no children.
2660 */
2661 if ((!parent_memcg || !parent_memcg->use_hierarchy) &&
2662 (val == 1 || val == 0)) {
2663 if (!memcg_has_children(memcg))
2664 memcg->use_hierarchy = val;
2665 else
2666 retval = -EBUSY;
2667 } else
2668 retval = -EINVAL;
2669
2670 return retval;
2671}
2672
2673static void tree_stat(struct mem_cgroup *memcg, unsigned long *stat)
2674{
2675 struct mem_cgroup *iter;
2676 int i;
2677
2678 memset(stat, 0, sizeof(*stat) * MEMCG_NR_STAT);
2679
2680 for_each_mem_cgroup_tree(iter, memcg) {
2681 for (i = 0; i < MEMCG_NR_STAT; i++)
2682 stat[i] += memcg_page_state(iter, i);
2683 }
2684}
2685
2686static void tree_events(struct mem_cgroup *memcg, unsigned long *events)
2687{
2688 struct mem_cgroup *iter;
2689 int i;
2690
2691 memset(events, 0, sizeof(*events) * NR_VM_EVENT_ITEMS);
2692
2693 for_each_mem_cgroup_tree(iter, memcg) {
2694 for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
2695 events[i] += memcg_sum_events(iter, i);
2696 }
2697}
2698
2699static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
2700{
2701 unsigned long val = 0;
2702
2703 if (mem_cgroup_is_root(memcg)) {
2704 struct mem_cgroup *iter;
2705
2706 for_each_mem_cgroup_tree(iter, memcg) {
2707 val += memcg_page_state(iter, MEMCG_CACHE);
2708 val += memcg_page_state(iter, MEMCG_RSS);
2709 if (swap)
2710 val += memcg_page_state(iter, MEMCG_SWAP);
2711 }
2712 } else {
2713 if (!swap)
2714 val = page_counter_read(&memcg->memory);
2715 else
2716 val = page_counter_read(&memcg->memsw);
2717 }
2718 return val;
2719}
2720
2721enum {
2722 RES_USAGE,
2723 RES_LIMIT,
2724 RES_MAX_USAGE,
2725 RES_FAILCNT,
2726 RES_SOFT_LIMIT,
2727};
2728
2729static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
2730 struct cftype *cft)
2731{
2732 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
2733 struct page_counter *counter;
2734
2735 switch (MEMFILE_TYPE(cft->private)) {
2736 case _MEM:
2737 counter = &memcg->memory;
2738 break;
2739 case _MEMSWAP:
2740 counter = &memcg->memsw;
2741 break;
2742 case _KMEM:
2743 counter = &memcg->kmem;
2744 break;
2745 case _TCP:
2746 counter = &memcg->tcpmem;
2747 break;
2748 default:
2749 BUG();
2750 }
2751
2752 switch (MEMFILE_ATTR(cft->private)) {
2753 case RES_USAGE:
2754 if (counter == &memcg->memory)
2755 return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE;
2756 if (counter == &memcg->memsw)
2757 return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE;
2758 return (u64)page_counter_read(counter) * PAGE_SIZE;
2759 case RES_LIMIT:
2760 return (u64)counter->limit * PAGE_SIZE;
2761 case RES_MAX_USAGE:
2762 return (u64)counter->watermark * PAGE_SIZE;
2763 case RES_FAILCNT:
2764 return counter->failcnt;
2765 case RES_SOFT_LIMIT:
2766 return (u64)memcg->soft_limit * PAGE_SIZE;
2767 default:
2768 BUG();
2769 }
2770}
2771
2772#ifndef CONFIG_SLOB
2773static int memcg_online_kmem(struct mem_cgroup *memcg)
2774{
2775 int memcg_id;
2776
2777 if (cgroup_memory_nokmem)
2778 return 0;
2779
2780 BUG_ON(memcg->kmemcg_id >= 0);
2781 BUG_ON(memcg->kmem_state);
2782
2783 memcg_id = memcg_alloc_cache_id();
2784 if (memcg_id < 0)
2785 return memcg_id;
2786
2787 static_branch_inc(&memcg_kmem_enabled_key);
2788 /*
2789 * A memory cgroup is considered kmem-online as soon as it gets
2790 * kmemcg_id. Setting the id after enabling static branching will
2791 * guarantee no one starts accounting before all call sites are
2792 * patched.
2793 */
2794 memcg->kmemcg_id = memcg_id;
2795 memcg->kmem_state = KMEM_ONLINE;
2796 INIT_LIST_HEAD(&memcg->kmem_caches);
2797
2798 return 0;
2799}
2800
2801static void memcg_offline_kmem(struct mem_cgroup *memcg)
2802{
2803 struct cgroup_subsys_state *css;
2804 struct mem_cgroup *parent, *child;
2805 int kmemcg_id;
2806
2807 if (memcg->kmem_state != KMEM_ONLINE)
2808 return;
2809 /*
2810 * Clear the online state before clearing memcg_caches array
2811 * entries. The slab_mutex in memcg_deactivate_kmem_caches()
2812 * guarantees that no cache will be created for this cgroup
2813 * after we are done (see memcg_create_kmem_cache()).
2814 */
2815 memcg->kmem_state = KMEM_ALLOCATED;
2816
2817 memcg_deactivate_kmem_caches(memcg);
2818
2819 kmemcg_id = memcg->kmemcg_id;
2820 BUG_ON(kmemcg_id < 0);
2821
2822 parent = parent_mem_cgroup(memcg);
2823 if (!parent)
2824 parent = root_mem_cgroup;
2825
2826 /*
2827 * Change kmemcg_id of this cgroup and all its descendants to the
2828 * parent's id, and then move all entries from this cgroup's list_lrus
2829 * to ones of the parent. After we have finished, all list_lrus
2830 * corresponding to this cgroup are guaranteed to remain empty. The
2831 * ordering is imposed by list_lru_node->lock taken by
2832 * memcg_drain_all_list_lrus().
2833 */
2834 rcu_read_lock(); /* can be called from css_free w/o cgroup_mutex */
2835 css_for_each_descendant_pre(css, &memcg->css) {
2836 child = mem_cgroup_from_css(css);
2837 BUG_ON(child->kmemcg_id != kmemcg_id);
2838 child->kmemcg_id = parent->kmemcg_id;
2839 if (!memcg->use_hierarchy)
2840 break;
2841 }
2842 rcu_read_unlock();
2843
2844 memcg_drain_all_list_lrus(kmemcg_id, parent->kmemcg_id);
2845
2846 memcg_free_cache_id(kmemcg_id);
2847}
2848
2849static void memcg_free_kmem(struct mem_cgroup *memcg)
2850{
2851 /* css_alloc() failed, offlining didn't happen */
2852 if (unlikely(memcg->kmem_state == KMEM_ONLINE))
2853 memcg_offline_kmem(memcg);
2854
2855 if (memcg->kmem_state == KMEM_ALLOCATED) {
2856 memcg_destroy_kmem_caches(memcg);
2857 static_branch_dec(&memcg_kmem_enabled_key);
2858 WARN_ON(page_counter_read(&memcg->kmem));
2859 }
2860}
2861#else
2862static int memcg_online_kmem(struct mem_cgroup *memcg)
2863{
2864 return 0;
2865}
2866static void memcg_offline_kmem(struct mem_cgroup *memcg)
2867{
2868}
2869static void memcg_free_kmem(struct mem_cgroup *memcg)
2870{
2871}
2872#endif /* !CONFIG_SLOB */
2873
2874static int memcg_update_kmem_limit(struct mem_cgroup *memcg,
2875 unsigned long limit)
2876{
2877 int ret;
2878
2879 mutex_lock(&memcg_limit_mutex);
2880 ret = page_counter_limit(&memcg->kmem, limit);
2881 mutex_unlock(&memcg_limit_mutex);
2882 return ret;
2883}
2884
2885static int memcg_update_tcp_limit(struct mem_cgroup *memcg, unsigned long limit)
2886{
2887 int ret;
2888
2889 mutex_lock(&memcg_limit_mutex);
2890
2891 ret = page_counter_limit(&memcg->tcpmem, limit);
2892 if (ret)
2893 goto out;
2894
2895 if (!memcg->tcpmem_active) {
2896 /*
2897 * The active flag needs to be written after the static_key
2898 * update. This is what guarantees that the socket activation
2899 * function is the last one to run. See mem_cgroup_sk_alloc()
2900 * for details, and note that we don't mark any socket as
2901 * belonging to this memcg until that flag is up.
2902 *
2903 * We need to do this, because static_keys will span multiple
2904 * sites, but we can't control their order. If we mark a socket
2905 * as accounted, but the accounting functions are not patched in
2906 * yet, we'll lose accounting.
2907 *
2908 * We never race with the readers in mem_cgroup_sk_alloc(),
2909 * because when this value change, the code to process it is not
2910 * patched in yet.
2911 */
2912 static_branch_inc(&memcg_sockets_enabled_key);
2913 memcg->tcpmem_active = true;
2914 }
2915out:
2916 mutex_unlock(&memcg_limit_mutex);
2917 return ret;
2918}
2919
2920/*
2921 * The user of this function is...
2922 * RES_LIMIT.
2923 */
2924static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
2925 char *buf, size_t nbytes, loff_t off)
2926{
2927 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
2928 unsigned long nr_pages;
2929 int ret;
2930
2931 buf = strstrip(buf);
2932 ret = page_counter_memparse(buf, "-1", &nr_pages);
2933 if (ret)
2934 return ret;
2935
2936 switch (MEMFILE_ATTR(of_cft(of)->private)) {
2937 case RES_LIMIT:
2938 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
2939 ret = -EINVAL;
2940 break;
2941 }
2942 switch (MEMFILE_TYPE(of_cft(of)->private)) {
2943 case _MEM:
2944 ret = mem_cgroup_resize_limit(memcg, nr_pages, false);
2945 break;
2946 case _MEMSWAP:
2947 ret = mem_cgroup_resize_limit(memcg, nr_pages, true);
2948 break;
2949 case _KMEM:
2950 ret = memcg_update_kmem_limit(memcg, nr_pages);
2951 break;
2952 case _TCP:
2953 ret = memcg_update_tcp_limit(memcg, nr_pages);
2954 break;
2955 }
2956 break;
2957 case RES_SOFT_LIMIT:
2958 memcg->soft_limit = nr_pages;
2959 ret = 0;
2960 break;
2961 }
2962 return ret ?: nbytes;
2963}
2964
2965static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf,
2966 size_t nbytes, loff_t off)
2967{
2968 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
2969 struct page_counter *counter;
2970
2971 switch (MEMFILE_TYPE(of_cft(of)->private)) {
2972 case _MEM:
2973 counter = &memcg->memory;
2974 break;
2975 case _MEMSWAP:
2976 counter = &memcg->memsw;
2977 break;
2978 case _KMEM:
2979 counter = &memcg->kmem;
2980 break;
2981 case _TCP:
2982 counter = &memcg->tcpmem;
2983 break;
2984 default:
2985 BUG();
2986 }
2987
2988 switch (MEMFILE_ATTR(of_cft(of)->private)) {
2989 case RES_MAX_USAGE:
2990 page_counter_reset_watermark(counter);
2991 break;
2992 case RES_FAILCNT:
2993 counter->failcnt = 0;
2994 break;
2995 default:
2996 BUG();
2997 }
2998
2999 return nbytes;
3000}
3001
3002static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css,
3003 struct cftype *cft)
3004{
3005 return mem_cgroup_from_css(css)->move_charge_at_immigrate;
3006}
3007
3008#ifdef CONFIG_MMU
3009static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3010 struct cftype *cft, u64 val)
3011{
3012 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3013
3014 if (val & ~MOVE_MASK)
3015 return -EINVAL;
3016
3017 /*
3018 * No kind of locking is needed in here, because ->can_attach() will
3019 * check this value once in the beginning of the process, and then carry
3020 * on with stale data. This means that changes to this value will only
3021 * affect task migrations starting after the change.
3022 */
3023 memcg->move_charge_at_immigrate = val;
3024 return 0;
3025}
3026#else
3027static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3028 struct cftype *cft, u64 val)
3029{
3030 return -ENOSYS;
3031}
3032#endif
3033
3034#ifdef CONFIG_NUMA
3035static int memcg_numa_stat_show(struct seq_file *m, void *v)
3036{
3037 struct numa_stat {
3038 const char *name;
3039 unsigned int lru_mask;
3040 };
3041
3042 static const struct numa_stat stats[] = {
3043 { "total", LRU_ALL },
3044 { "file", LRU_ALL_FILE },
3045 { "anon", LRU_ALL_ANON },
3046 { "unevictable", BIT(LRU_UNEVICTABLE) },
3047 };
3048 const struct numa_stat *stat;
3049 int nid;
3050 unsigned long nr;
3051 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
3052
3053 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3054 nr = mem_cgroup_nr_lru_pages(memcg, stat->lru_mask);
3055 seq_printf(m, "%s=%lu", stat->name, nr);
3056 for_each_node_state(nid, N_MEMORY) {
3057 nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
3058 stat->lru_mask);
3059 seq_printf(m, " N%d=%lu", nid, nr);
3060 }
3061 seq_putc(m, '\n');
3062 }
3063
3064 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3065 struct mem_cgroup *iter;
3066
3067 nr = 0;
3068 for_each_mem_cgroup_tree(iter, memcg)
3069 nr += mem_cgroup_nr_lru_pages(iter, stat->lru_mask);
3070 seq_printf(m, "hierarchical_%s=%lu", stat->name, nr);
3071 for_each_node_state(nid, N_MEMORY) {
3072 nr = 0;
3073 for_each_mem_cgroup_tree(iter, memcg)
3074 nr += mem_cgroup_node_nr_lru_pages(
3075 iter, nid, stat->lru_mask);
3076 seq_printf(m, " N%d=%lu", nid, nr);
3077 }
3078 seq_putc(m, '\n');
3079 }
3080
3081 return 0;
3082}
3083#endif /* CONFIG_NUMA */
3084
3085/* Universal VM events cgroup1 shows, original sort order */
3086unsigned int memcg1_events[] = {
3087 PGPGIN,
3088 PGPGOUT,
3089 PGFAULT,
3090 PGMAJFAULT,
3091};
3092
3093static const char *const memcg1_event_names[] = {
3094 "pgpgin",
3095 "pgpgout",
3096 "pgfault",
3097 "pgmajfault",
3098};
3099
3100static int memcg_stat_show(struct seq_file *m, void *v)
3101{
3102 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
3103 unsigned long memory, memsw;
3104 struct mem_cgroup *mi;
3105 unsigned int i;
3106
3107 BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names) != ARRAY_SIZE(memcg1_stats));
3108 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS);
3109
3110 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
3111 if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
3112 continue;
3113 seq_printf(m, "%s %lu\n", memcg1_stat_names[i],
3114 memcg_page_state(memcg, memcg1_stats[i]) *
3115 PAGE_SIZE);
3116 }
3117
3118 for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
3119 seq_printf(m, "%s %lu\n", memcg1_event_names[i],
3120 memcg_sum_events(memcg, memcg1_events[i]));
3121
3122 for (i = 0; i < NR_LRU_LISTS; i++)
3123 seq_printf(m, "%s %lu\n", mem_cgroup_lru_names[i],
3124 mem_cgroup_nr_lru_pages(memcg, BIT(i)) * PAGE_SIZE);
3125
3126 /* Hierarchical information */
3127 memory = memsw = PAGE_COUNTER_MAX;
3128 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) {
3129 memory = min(memory, mi->memory.limit);
3130 memsw = min(memsw, mi->memsw.limit);
3131 }
3132 seq_printf(m, "hierarchical_memory_limit %llu\n",
3133 (u64)memory * PAGE_SIZE);
3134 if (do_memsw_account())
3135 seq_printf(m, "hierarchical_memsw_limit %llu\n",
3136 (u64)memsw * PAGE_SIZE);
3137
3138 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
3139 unsigned long long val = 0;
3140
3141 if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
3142 continue;
3143 for_each_mem_cgroup_tree(mi, memcg)
3144 val += memcg_page_state(mi, memcg1_stats[i]) *
3145 PAGE_SIZE;
3146 seq_printf(m, "total_%s %llu\n", memcg1_stat_names[i], val);
3147 }
3148
3149 for (i = 0; i < ARRAY_SIZE(memcg1_events); i++) {
3150 unsigned long long val = 0;
3151
3152 for_each_mem_cgroup_tree(mi, memcg)
3153 val += memcg_sum_events(mi, memcg1_events[i]);
3154 seq_printf(m, "total_%s %llu\n", memcg1_event_names[i], val);
3155 }
3156
3157 for (i = 0; i < NR_LRU_LISTS; i++) {
3158 unsigned long long val = 0;
3159
3160 for_each_mem_cgroup_tree(mi, memcg)
3161 val += mem_cgroup_nr_lru_pages(mi, BIT(i)) * PAGE_SIZE;
3162 seq_printf(m, "total_%s %llu\n", mem_cgroup_lru_names[i], val);
3163 }
3164
3165#ifdef CONFIG_DEBUG_VM
3166 {
3167 pg_data_t *pgdat;
3168 struct mem_cgroup_per_node *mz;
3169 struct zone_reclaim_stat *rstat;
3170 unsigned long recent_rotated[2] = {0, 0};
3171 unsigned long recent_scanned[2] = {0, 0};
3172
3173 for_each_online_pgdat(pgdat) {
3174 mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
3175 rstat = &mz->lruvec.reclaim_stat;
3176
3177 recent_rotated[0] += rstat->recent_rotated[0];
3178 recent_rotated[1] += rstat->recent_rotated[1];
3179 recent_scanned[0] += rstat->recent_scanned[0];
3180 recent_scanned[1] += rstat->recent_scanned[1];
3181 }
3182 seq_printf(m, "recent_rotated_anon %lu\n", recent_rotated[0]);
3183 seq_printf(m, "recent_rotated_file %lu\n", recent_rotated[1]);
3184 seq_printf(m, "recent_scanned_anon %lu\n", recent_scanned[0]);
3185 seq_printf(m, "recent_scanned_file %lu\n", recent_scanned[1]);
3186 }
3187#endif
3188
3189 return 0;
3190}
3191
3192static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css,
3193 struct cftype *cft)
3194{
3195 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3196
3197 return mem_cgroup_swappiness(memcg);
3198}
3199
3200static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
3201 struct cftype *cft, u64 val)
3202{
3203 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3204
3205 if (val > 100)
3206 return -EINVAL;
3207
3208 if (css->parent)
3209 memcg->swappiness = val;
3210 else
3211 vm_swappiness = val;
3212
3213 return 0;
3214}
3215
3216static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
3217{
3218 struct mem_cgroup_threshold_ary *t;
3219 unsigned long usage;
3220 int i;
3221
3222 rcu_read_lock();
3223 if (!swap)
3224 t = rcu_dereference(memcg->thresholds.primary);
3225 else
3226 t = rcu_dereference(memcg->memsw_thresholds.primary);
3227
3228 if (!t)
3229 goto unlock;
3230
3231 usage = mem_cgroup_usage(memcg, swap);
3232
3233 /*
3234 * current_threshold points to threshold just below or equal to usage.
3235 * If it's not true, a threshold was crossed after last
3236 * call of __mem_cgroup_threshold().
3237 */
3238 i = t->current_threshold;
3239
3240 /*
3241 * Iterate backward over array of thresholds starting from
3242 * current_threshold and check if a threshold is crossed.
3243 * If none of thresholds below usage is crossed, we read
3244 * only one element of the array here.
3245 */
3246 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
3247 eventfd_signal(t->entries[i].eventfd, 1);
3248
3249 /* i = current_threshold + 1 */
3250 i++;
3251
3252 /*
3253 * Iterate forward over array of thresholds starting from
3254 * current_threshold+1 and check if a threshold is crossed.
3255 * If none of thresholds above usage is crossed, we read
3256 * only one element of the array here.
3257 */
3258 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
3259 eventfd_signal(t->entries[i].eventfd, 1);
3260
3261 /* Update current_threshold */
3262 t->current_threshold = i - 1;
3263unlock:
3264 rcu_read_unlock();
3265}
3266
3267static void mem_cgroup_threshold(struct mem_cgroup *memcg)
3268{
3269 while (memcg) {
3270 __mem_cgroup_threshold(memcg, false);
3271 if (do_memsw_account())
3272 __mem_cgroup_threshold(memcg, true);
3273
3274 memcg = parent_mem_cgroup(memcg);
3275 }
3276}
3277
3278static int compare_thresholds(const void *a, const void *b)
3279{
3280 const struct mem_cgroup_threshold *_a = a;
3281 const struct mem_cgroup_threshold *_b = b;
3282
3283 if (_a->threshold > _b->threshold)
3284 return 1;
3285
3286 if (_a->threshold < _b->threshold)
3287 return -1;
3288
3289 return 0;
3290}
3291
3292static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
3293{
3294 struct mem_cgroup_eventfd_list *ev;
3295
3296 spin_lock(&memcg_oom_lock);
3297
3298 list_for_each_entry(ev, &memcg->oom_notify, list)
3299 eventfd_signal(ev->eventfd, 1);
3300
3301 spin_unlock(&memcg_oom_lock);
3302 return 0;
3303}
3304
3305static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
3306{
3307 struct mem_cgroup *iter;
3308
3309 for_each_mem_cgroup_tree(iter, memcg)
3310 mem_cgroup_oom_notify_cb(iter);
3311}
3312
3313static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
3314 struct eventfd_ctx *eventfd, const char *args, enum res_type type)
3315{
3316 struct mem_cgroup_thresholds *thresholds;
3317 struct mem_cgroup_threshold_ary *new;
3318 unsigned long threshold;
3319 unsigned long usage;
3320 int i, size, ret;
3321
3322 ret = page_counter_memparse(args, "-1", &threshold);
3323 if (ret)
3324 return ret;
3325
3326 mutex_lock(&memcg->thresholds_lock);
3327
3328 if (type == _MEM) {
3329 thresholds = &memcg->thresholds;
3330 usage = mem_cgroup_usage(memcg, false);
3331 } else if (type == _MEMSWAP) {
3332 thresholds = &memcg->memsw_thresholds;
3333 usage = mem_cgroup_usage(memcg, true);
3334 } else
3335 BUG();
3336
3337 /* Check if a threshold crossed before adding a new one */
3338 if (thresholds->primary)
3339 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
3340
3341 size = thresholds->primary ? thresholds->primary->size + 1 : 1;
3342
3343 /* Allocate memory for new array of thresholds */
3344 new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold),
3345 GFP_KERNEL);
3346 if (!new) {
3347 ret = -ENOMEM;
3348 goto unlock;
3349 }
3350 new->size = size;
3351
3352 /* Copy thresholds (if any) to new array */
3353 if (thresholds->primary) {
3354 memcpy(new->entries, thresholds->primary->entries, (size - 1) *
3355 sizeof(struct mem_cgroup_threshold));
3356 }
3357
3358 /* Add new threshold */
3359 new->entries[size - 1].eventfd = eventfd;
3360 new->entries[size - 1].threshold = threshold;
3361
3362 /* Sort thresholds. Registering of new threshold isn't time-critical */
3363 sort(new->entries, size, sizeof(struct mem_cgroup_threshold),
3364 compare_thresholds, NULL);
3365
3366 /* Find current threshold */
3367 new->current_threshold = -1;
3368 for (i = 0; i < size; i++) {
3369 if (new->entries[i].threshold <= usage) {
3370 /*
3371 * new->current_threshold will not be used until
3372 * rcu_assign_pointer(), so it's safe to increment
3373 * it here.
3374 */
3375 ++new->current_threshold;
3376 } else
3377 break;
3378 }
3379
3380 /* Free old spare buffer and save old primary buffer as spare */
3381 kfree(thresholds->spare);
3382 thresholds->spare = thresholds->primary;
3383
3384 rcu_assign_pointer(thresholds->primary, new);
3385
3386 /* To be sure that nobody uses thresholds */
3387 synchronize_rcu();
3388
3389unlock:
3390 mutex_unlock(&memcg->thresholds_lock);
3391
3392 return ret;
3393}
3394
3395static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
3396 struct eventfd_ctx *eventfd, const char *args)
3397{
3398 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM);
3399}
3400
3401static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg,
3402 struct eventfd_ctx *eventfd, const char *args)
3403{
3404 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP);
3405}
3406
3407static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
3408 struct eventfd_ctx *eventfd, enum res_type type)
3409{
3410 struct mem_cgroup_thresholds *thresholds;
3411 struct mem_cgroup_threshold_ary *new;
3412 unsigned long usage;
3413 int i, j, size;
3414
3415 mutex_lock(&memcg->thresholds_lock);
3416
3417 if (type == _MEM) {
3418 thresholds = &memcg->thresholds;
3419 usage = mem_cgroup_usage(memcg, false);
3420 } else if (type == _MEMSWAP) {
3421 thresholds = &memcg->memsw_thresholds;
3422 usage = mem_cgroup_usage(memcg, true);
3423 } else
3424 BUG();
3425
3426 if (!thresholds->primary)
3427 goto unlock;
3428
3429 /* Check if a threshold crossed before removing */
3430 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
3431
3432 /* Calculate new number of threshold */
3433 size = 0;
3434 for (i = 0; i < thresholds->primary->size; i++) {
3435 if (thresholds->primary->entries[i].eventfd != eventfd)
3436 size++;
3437 }
3438
3439 new = thresholds->spare;
3440
3441 /* Set thresholds array to NULL if we don't have thresholds */
3442 if (!size) {
3443 kfree(new);
3444 new = NULL;
3445 goto swap_buffers;
3446 }
3447
3448 new->size = size;
3449
3450 /* Copy thresholds and find current threshold */
3451 new->current_threshold = -1;
3452 for (i = 0, j = 0; i < thresholds->primary->size; i++) {
3453 if (thresholds->primary->entries[i].eventfd == eventfd)
3454 continue;
3455
3456 new->entries[j] = thresholds->primary->entries[i];
3457 if (new->entries[j].threshold <= usage) {
3458 /*
3459 * new->current_threshold will not be used
3460 * until rcu_assign_pointer(), so it's safe to increment
3461 * it here.
3462 */
3463 ++new->current_threshold;
3464 }
3465 j++;
3466 }
3467
3468swap_buffers:
3469 /* Swap primary and spare array */
3470 thresholds->spare = thresholds->primary;
3471
3472 rcu_assign_pointer(thresholds->primary, new);
3473
3474 /* To be sure that nobody uses thresholds */
3475 synchronize_rcu();
3476
3477 /* If all events are unregistered, free the spare array */
3478 if (!new) {
3479 kfree(thresholds->spare);
3480 thresholds->spare = NULL;
3481 }
3482unlock:
3483 mutex_unlock(&memcg->thresholds_lock);
3484}
3485
3486static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
3487 struct eventfd_ctx *eventfd)
3488{
3489 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM);
3490}
3491
3492static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
3493 struct eventfd_ctx *eventfd)
3494{
3495 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP);
3496}
3497
3498static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg,
3499 struct eventfd_ctx *eventfd, const char *args)
3500{
3501 struct mem_cgroup_eventfd_list *event;
3502
3503 event = kmalloc(sizeof(*event), GFP_KERNEL);
3504 if (!event)
3505 return -ENOMEM;
3506
3507 spin_lock(&memcg_oom_lock);
3508
3509 event->eventfd = eventfd;
3510 list_add(&event->list, &memcg->oom_notify);
3511
3512 /* already in OOM ? */
3513 if (memcg->under_oom)
3514 eventfd_signal(eventfd, 1);
3515 spin_unlock(&memcg_oom_lock);
3516
3517 return 0;
3518}
3519
3520static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg,
3521 struct eventfd_ctx *eventfd)
3522{
3523 struct mem_cgroup_eventfd_list *ev, *tmp;
3524
3525 spin_lock(&memcg_oom_lock);
3526
3527 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
3528 if (ev->eventfd == eventfd) {
3529 list_del(&ev->list);
3530 kfree(ev);
3531 }
3532 }
3533
3534 spin_unlock(&memcg_oom_lock);
3535}
3536
3537static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v)
3538{
3539 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf));
3540
3541 seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable);
3542 seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom);
3543 seq_printf(sf, "oom_kill %lu\n", memcg_sum_events(memcg, OOM_KILL));
3544 return 0;
3545}
3546
3547static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
3548 struct cftype *cft, u64 val)
3549{
3550 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3551
3552 /* cannot set to root cgroup and only 0 and 1 are allowed */
3553 if (!css->parent || !((val == 0) || (val == 1)))
3554 return -EINVAL;
3555
3556 memcg->oom_kill_disable = val;
3557 if (!val)
3558 memcg_oom_recover(memcg);
3559
3560 return 0;
3561}
3562
3563#ifdef CONFIG_CGROUP_WRITEBACK
3564
3565struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg)
3566{
3567 return &memcg->cgwb_list;
3568}
3569
3570static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
3571{
3572 return wb_domain_init(&memcg->cgwb_domain, gfp);
3573}
3574
3575static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
3576{
3577 wb_domain_exit(&memcg->cgwb_domain);
3578}
3579
3580static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
3581{
3582 wb_domain_size_changed(&memcg->cgwb_domain);
3583}
3584
3585struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
3586{
3587 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3588
3589 if (!memcg->css.parent)
3590 return NULL;
3591
3592 return &memcg->cgwb_domain;
3593}
3594
3595/**
3596 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
3597 * @wb: bdi_writeback in question
3598 * @pfilepages: out parameter for number of file pages
3599 * @pheadroom: out parameter for number of allocatable pages according to memcg
3600 * @pdirty: out parameter for number of dirty pages
3601 * @pwriteback: out parameter for number of pages under writeback
3602 *
3603 * Determine the numbers of file, headroom, dirty, and writeback pages in
3604 * @wb's memcg. File, dirty and writeback are self-explanatory. Headroom
3605 * is a bit more involved.
3606 *
3607 * A memcg's headroom is "min(max, high) - used". In the hierarchy, the
3608 * headroom is calculated as the lowest headroom of itself and the
3609 * ancestors. Note that this doesn't consider the actual amount of
3610 * available memory in the system. The caller should further cap
3611 * *@pheadroom accordingly.
3612 */
3613void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
3614 unsigned long *pheadroom, unsigned long *pdirty,
3615 unsigned long *pwriteback)
3616{
3617 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3618 struct mem_cgroup *parent;
3619
3620 *pdirty = memcg_page_state(memcg, NR_FILE_DIRTY);
3621
3622 /* this should eventually include NR_UNSTABLE_NFS */
3623 *pwriteback = memcg_page_state(memcg, NR_WRITEBACK);
3624 *pfilepages = mem_cgroup_nr_lru_pages(memcg, (1 << LRU_INACTIVE_FILE) |
3625 (1 << LRU_ACTIVE_FILE));
3626 *pheadroom = PAGE_COUNTER_MAX;
3627
3628 while ((parent = parent_mem_cgroup(memcg))) {
3629 unsigned long ceiling = min(memcg->memory.limit, memcg->high);
3630 unsigned long used = page_counter_read(&memcg->memory);
3631
3632 *pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
3633 memcg = parent;
3634 }
3635}
3636
3637#else /* CONFIG_CGROUP_WRITEBACK */
3638
3639static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
3640{
3641 return 0;
3642}
3643
3644static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
3645{
3646}
3647
3648static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
3649{
3650}
3651
3652#endif /* CONFIG_CGROUP_WRITEBACK */
3653
3654/*
3655 * DO NOT USE IN NEW FILES.
3656 *
3657 * "cgroup.event_control" implementation.
3658 *
3659 * This is way over-engineered. It tries to support fully configurable
3660 * events for each user. Such level of flexibility is completely
3661 * unnecessary especially in the light of the planned unified hierarchy.
3662 *
3663 * Please deprecate this and replace with something simpler if at all
3664 * possible.
3665 */
3666
3667/*
3668 * Unregister event and free resources.
3669 *
3670 * Gets called from workqueue.
3671 */
3672static void memcg_event_remove(struct work_struct *work)
3673{
3674 struct mem_cgroup_event *event =
3675 container_of(work, struct mem_cgroup_event, remove);
3676 struct mem_cgroup *memcg = event->memcg;
3677
3678 remove_wait_queue(event->wqh, &event->wait);
3679
3680 event->unregister_event(memcg, event->eventfd);
3681
3682 /* Notify userspace the event is going away. */
3683 eventfd_signal(event->eventfd, 1);
3684
3685 eventfd_ctx_put(event->eventfd);
3686 kfree(event);
3687 css_put(&memcg->css);
3688}
3689
3690/*
3691 * Gets called on EPOLLHUP on eventfd when user closes it.
3692 *
3693 * Called with wqh->lock held and interrupts disabled.
3694 */
3695static int memcg_event_wake(wait_queue_entry_t *wait, unsigned mode,
3696 int sync, void *key)
3697{
3698 struct mem_cgroup_event *event =
3699 container_of(wait, struct mem_cgroup_event, wait);
3700 struct mem_cgroup *memcg = event->memcg;
3701 __poll_t flags = key_to_poll(key);
3702
3703 if (flags & EPOLLHUP) {
3704 /*
3705 * If the event has been detached at cgroup removal, we
3706 * can simply return knowing the other side will cleanup
3707 * for us.
3708 *
3709 * We can't race against event freeing since the other
3710 * side will require wqh->lock via remove_wait_queue(),
3711 * which we hold.
3712 */
3713 spin_lock(&memcg->event_list_lock);
3714 if (!list_empty(&event->list)) {
3715 list_del_init(&event->list);
3716 /*
3717 * We are in atomic context, but cgroup_event_remove()
3718 * may sleep, so we have to call it in workqueue.
3719 */
3720 schedule_work(&event->remove);
3721 }
3722 spin_unlock(&memcg->event_list_lock);
3723 }
3724
3725 return 0;
3726}
3727
3728static void memcg_event_ptable_queue_proc(struct file *file,
3729 wait_queue_head_t *wqh, poll_table *pt)
3730{
3731 struct mem_cgroup_event *event =
3732 container_of(pt, struct mem_cgroup_event, pt);
3733
3734 event->wqh = wqh;
3735 add_wait_queue(wqh, &event->wait);
3736}
3737
3738/*
3739 * DO NOT USE IN NEW FILES.
3740 *
3741 * Parse input and register new cgroup event handler.
3742 *
3743 * Input must be in format '<event_fd> <control_fd> <args>'.
3744 * Interpretation of args is defined by control file implementation.
3745 */
3746static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
3747 char *buf, size_t nbytes, loff_t off)
3748{
3749 struct cgroup_subsys_state *css = of_css(of);
3750 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3751 struct mem_cgroup_event *event;
3752 struct cgroup_subsys_state *cfile_css;
3753 unsigned int efd, cfd;
3754 struct fd efile;
3755 struct fd cfile;
3756 const char *name;
3757 char *endp;
3758 int ret;
3759
3760 buf = strstrip(buf);
3761
3762 efd = simple_strtoul(buf, &endp, 10);
3763 if (*endp != ' ')
3764 return -EINVAL;
3765 buf = endp + 1;
3766
3767 cfd = simple_strtoul(buf, &endp, 10);
3768 if ((*endp != ' ') && (*endp != '\0'))
3769 return -EINVAL;
3770 buf = endp + 1;
3771
3772 event = kzalloc(sizeof(*event), GFP_KERNEL);
3773 if (!event)
3774 return -ENOMEM;
3775
3776 event->memcg = memcg;
3777 INIT_LIST_HEAD(&event->list);
3778 init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc);
3779 init_waitqueue_func_entry(&event->wait, memcg_event_wake);
3780 INIT_WORK(&event->remove, memcg_event_remove);
3781
3782 efile = fdget(efd);
3783 if (!efile.file) {
3784 ret = -EBADF;
3785 goto out_kfree;
3786 }
3787
3788 event->eventfd = eventfd_ctx_fileget(efile.file);
3789 if (IS_ERR(event->eventfd)) {
3790 ret = PTR_ERR(event->eventfd);
3791 goto out_put_efile;
3792 }
3793
3794 cfile = fdget(cfd);
3795 if (!cfile.file) {
3796 ret = -EBADF;
3797 goto out_put_eventfd;
3798 }
3799
3800 /* the process need read permission on control file */
3801 /* AV: shouldn't we check that it's been opened for read instead? */
3802 ret = inode_permission(file_inode(cfile.file), MAY_READ);
3803 if (ret < 0)
3804 goto out_put_cfile;
3805
3806 /*
3807 * Determine the event callbacks and set them in @event. This used
3808 * to be done via struct cftype but cgroup core no longer knows
3809 * about these events. The following is crude but the whole thing
3810 * is for compatibility anyway.
3811 *
3812 * DO NOT ADD NEW FILES.
3813 */
3814 name = cfile.file->f_path.dentry->d_name.name;
3815
3816 if (!strcmp(name, "memory.usage_in_bytes")) {
3817 event->register_event = mem_cgroup_usage_register_event;
3818 event->unregister_event = mem_cgroup_usage_unregister_event;
3819 } else if (!strcmp(name, "memory.oom_control")) {
3820 event->register_event = mem_cgroup_oom_register_event;
3821 event->unregister_event = mem_cgroup_oom_unregister_event;
3822 } else if (!strcmp(name, "memory.pressure_level")) {
3823 event->register_event = vmpressure_register_event;
3824 event->unregister_event = vmpressure_unregister_event;
3825 } else if (!strcmp(name, "memory.memsw.usage_in_bytes")) {
3826 event->register_event = memsw_cgroup_usage_register_event;
3827 event->unregister_event = memsw_cgroup_usage_unregister_event;
3828 } else {
3829 ret = -EINVAL;
3830 goto out_put_cfile;
3831 }
3832
3833 /*
3834 * Verify @cfile should belong to @css. Also, remaining events are
3835 * automatically removed on cgroup destruction but the removal is
3836 * asynchronous, so take an extra ref on @css.
3837 */
3838 cfile_css = css_tryget_online_from_dir(cfile.file->f_path.dentry->d_parent,
3839 &memory_cgrp_subsys);
3840 ret = -EINVAL;
3841 if (IS_ERR(cfile_css))
3842 goto out_put_cfile;
3843 if (cfile_css != css) {
3844 css_put(cfile_css);
3845 goto out_put_cfile;
3846 }
3847
3848 ret = event->register_event(memcg, event->eventfd, buf);
3849 if (ret)
3850 goto out_put_css;
3851
3852 efile.file->f_op->poll(efile.file, &event->pt);
3853
3854 spin_lock(&memcg->event_list_lock);
3855 list_add(&event->list, &memcg->event_list);
3856 spin_unlock(&memcg->event_list_lock);
3857
3858 fdput(cfile);
3859 fdput(efile);
3860
3861 return nbytes;
3862
3863out_put_css:
3864 css_put(css);
3865out_put_cfile:
3866 fdput(cfile);
3867out_put_eventfd:
3868 eventfd_ctx_put(event->eventfd);
3869out_put_efile:
3870 fdput(efile);
3871out_kfree:
3872 kfree(event);
3873
3874 return ret;
3875}
3876
3877static struct cftype mem_cgroup_legacy_files[] = {
3878 {
3879 .name = "usage_in_bytes",
3880 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
3881 .read_u64 = mem_cgroup_read_u64,
3882 },
3883 {
3884 .name = "max_usage_in_bytes",
3885 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
3886 .write = mem_cgroup_reset,
3887 .read_u64 = mem_cgroup_read_u64,
3888 },
3889 {
3890 .name = "limit_in_bytes",
3891 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
3892 .write = mem_cgroup_write,
3893 .read_u64 = mem_cgroup_read_u64,
3894 },
3895 {
3896 .name = "soft_limit_in_bytes",
3897 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
3898 .write = mem_cgroup_write,
3899 .read_u64 = mem_cgroup_read_u64,
3900 },
3901 {
3902 .name = "failcnt",
3903 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
3904 .write = mem_cgroup_reset,
3905 .read_u64 = mem_cgroup_read_u64,
3906 },
3907 {
3908 .name = "stat",
3909 .seq_show = memcg_stat_show,
3910 },
3911 {
3912 .name = "force_empty",
3913 .write = mem_cgroup_force_empty_write,
3914 },
3915 {
3916 .name = "use_hierarchy",
3917 .write_u64 = mem_cgroup_hierarchy_write,
3918 .read_u64 = mem_cgroup_hierarchy_read,
3919 },
3920 {
3921 .name = "cgroup.event_control", /* XXX: for compat */
3922 .write = memcg_write_event_control,
3923 .flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE,
3924 },
3925 {
3926 .name = "swappiness",
3927 .read_u64 = mem_cgroup_swappiness_read,
3928 .write_u64 = mem_cgroup_swappiness_write,
3929 },
3930 {
3931 .name = "move_charge_at_immigrate",
3932 .read_u64 = mem_cgroup_move_charge_read,
3933 .write_u64 = mem_cgroup_move_charge_write,
3934 },
3935 {
3936 .name = "oom_control",
3937 .seq_show = mem_cgroup_oom_control_read,
3938 .write_u64 = mem_cgroup_oom_control_write,
3939 .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
3940 },
3941 {
3942 .name = "pressure_level",
3943 },
3944#ifdef CONFIG_NUMA
3945 {
3946 .name = "numa_stat",
3947 .seq_show = memcg_numa_stat_show,
3948 },
3949#endif
3950 {
3951 .name = "kmem.limit_in_bytes",
3952 .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
3953 .write = mem_cgroup_write,
3954 .read_u64 = mem_cgroup_read_u64,
3955 },
3956 {
3957 .name = "kmem.usage_in_bytes",
3958 .private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
3959 .read_u64 = mem_cgroup_read_u64,
3960 },
3961 {
3962 .name = "kmem.failcnt",
3963 .private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT),
3964 .write = mem_cgroup_reset,
3965 .read_u64 = mem_cgroup_read_u64,
3966 },
3967 {
3968 .name = "kmem.max_usage_in_bytes",
3969 .private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE),
3970 .write = mem_cgroup_reset,
3971 .read_u64 = mem_cgroup_read_u64,
3972 },
3973#if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
3974 {
3975 .name = "kmem.slabinfo",
3976 .seq_start = memcg_slab_start,
3977 .seq_next = memcg_slab_next,
3978 .seq_stop = memcg_slab_stop,
3979 .seq_show = memcg_slab_show,
3980 },
3981#endif
3982 {
3983 .name = "kmem.tcp.limit_in_bytes",
3984 .private = MEMFILE_PRIVATE(_TCP, RES_LIMIT),
3985 .write = mem_cgroup_write,
3986 .read_u64 = mem_cgroup_read_u64,
3987 },
3988 {
3989 .name = "kmem.tcp.usage_in_bytes",
3990 .private = MEMFILE_PRIVATE(_TCP, RES_USAGE),
3991 .read_u64 = mem_cgroup_read_u64,
3992 },
3993 {
3994 .name = "kmem.tcp.failcnt",
3995 .private = MEMFILE_PRIVATE(_TCP, RES_FAILCNT),
3996 .write = mem_cgroup_reset,
3997 .read_u64 = mem_cgroup_read_u64,
3998 },
3999 {
4000 .name = "kmem.tcp.max_usage_in_bytes",
4001 .private = MEMFILE_PRIVATE(_TCP, RES_MAX_USAGE),
4002 .write = mem_cgroup_reset,
4003 .read_u64 = mem_cgroup_read_u64,
4004 },
4005 { }, /* terminate */
4006};
4007
4008/*
4009 * Private memory cgroup IDR
4010 *
4011 * Swap-out records and page cache shadow entries need to store memcg
4012 * references in constrained space, so we maintain an ID space that is
4013 * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of
4014 * memory-controlled cgroups to 64k.
4015 *
4016 * However, there usually are many references to the oflline CSS after
4017 * the cgroup has been destroyed, such as page cache or reclaimable
4018 * slab objects, that don't need to hang on to the ID. We want to keep
4019 * those dead CSS from occupying IDs, or we might quickly exhaust the
4020 * relatively small ID space and prevent the creation of new cgroups
4021 * even when there are much fewer than 64k cgroups - possibly none.
4022 *
4023 * Maintain a private 16-bit ID space for memcg, and allow the ID to
4024 * be freed and recycled when it's no longer needed, which is usually
4025 * when the CSS is offlined.
4026 *
4027 * The only exception to that are records of swapped out tmpfs/shmem
4028 * pages that need to be attributed to live ancestors on swapin. But
4029 * those references are manageable from userspace.
4030 */
4031
4032static DEFINE_IDR(mem_cgroup_idr);
4033
4034static void mem_cgroup_id_get_many(struct mem_cgroup *memcg, unsigned int n)
4035{
4036 VM_BUG_ON(atomic_read(&memcg->id.ref) <= 0);
4037 atomic_add(n, &memcg->id.ref);
4038}
4039
4040static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
4041{
4042 VM_BUG_ON(atomic_read(&memcg->id.ref) < n);
4043 if (atomic_sub_and_test(n, &memcg->id.ref)) {
4044 idr_remove(&mem_cgroup_idr, memcg->id.id);
4045 memcg->id.id = 0;
4046
4047 /* Memcg ID pins CSS */
4048 css_put(&memcg->css);
4049 }
4050}
4051
4052static inline void mem_cgroup_id_get(struct mem_cgroup *memcg)
4053{
4054 mem_cgroup_id_get_many(memcg, 1);
4055}
4056
4057static inline void mem_cgroup_id_put(struct mem_cgroup *memcg)
4058{
4059 mem_cgroup_id_put_many(memcg, 1);
4060}
4061
4062/**
4063 * mem_cgroup_from_id - look up a memcg from a memcg id
4064 * @id: the memcg id to look up
4065 *
4066 * Caller must hold rcu_read_lock().
4067 */
4068struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
4069{
4070 WARN_ON_ONCE(!rcu_read_lock_held());
4071 return idr_find(&mem_cgroup_idr, id);
4072}
4073
4074static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
4075{
4076 struct mem_cgroup_per_node *pn;
4077 int tmp = node;
4078 /*
4079 * This routine is called against possible nodes.
4080 * But it's BUG to call kmalloc() against offline node.
4081 *
4082 * TODO: this routine can waste much memory for nodes which will
4083 * never be onlined. It's better to use memory hotplug callback
4084 * function.
4085 */
4086 if (!node_state(node, N_NORMAL_MEMORY))
4087 tmp = -1;
4088 pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
4089 if (!pn)
4090 return 1;
4091
4092 pn->lruvec_stat_cpu = alloc_percpu(struct lruvec_stat);
4093 if (!pn->lruvec_stat_cpu) {
4094 kfree(pn);
4095 return 1;
4096 }
4097
4098 lruvec_init(&pn->lruvec);
4099 pn->usage_in_excess = 0;
4100 pn->on_tree = false;
4101 pn->memcg = memcg;
4102
4103 memcg->nodeinfo[node] = pn;
4104 return 0;
4105}
4106
4107static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
4108{
4109 struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
4110
4111 if (!pn)
4112 return;
4113
4114 free_percpu(pn->lruvec_stat_cpu);
4115 kfree(pn);
4116}
4117
4118static void __mem_cgroup_free(struct mem_cgroup *memcg)
4119{
4120 int node;
4121
4122 for_each_node(node)
4123 free_mem_cgroup_per_node_info(memcg, node);
4124 free_percpu(memcg->stat_cpu);
4125 kfree(memcg);
4126}
4127
4128static void mem_cgroup_free(struct mem_cgroup *memcg)
4129{
4130 memcg_wb_domain_exit(memcg);
4131 __mem_cgroup_free(memcg);
4132}
4133
4134static struct mem_cgroup *mem_cgroup_alloc(void)
4135{
4136 struct mem_cgroup *memcg;
4137 size_t size;
4138 int node;
4139
4140 size = sizeof(struct mem_cgroup);
4141 size += nr_node_ids * sizeof(struct mem_cgroup_per_node *);
4142
4143 memcg = kzalloc(size, GFP_KERNEL);
4144 if (!memcg)
4145 return NULL;
4146
4147 memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL,
4148 1, MEM_CGROUP_ID_MAX,
4149 GFP_KERNEL);
4150 if (memcg->id.id < 0)
4151 goto fail;
4152
4153 memcg->stat_cpu = alloc_percpu(struct mem_cgroup_stat_cpu);
4154 if (!memcg->stat_cpu)
4155 goto fail;
4156
4157 for_each_node(node)
4158 if (alloc_mem_cgroup_per_node_info(memcg, node))
4159 goto fail;
4160
4161 if (memcg_wb_domain_init(memcg, GFP_KERNEL))
4162 goto fail;
4163
4164 INIT_WORK(&memcg->high_work, high_work_func);
4165 memcg->last_scanned_node = MAX_NUMNODES;
4166 INIT_LIST_HEAD(&memcg->oom_notify);
4167 mutex_init(&memcg->thresholds_lock);
4168 spin_lock_init(&memcg->move_lock);
4169 vmpressure_init(&memcg->vmpressure);
4170 INIT_LIST_HEAD(&memcg->event_list);
4171 spin_lock_init(&memcg->event_list_lock);
4172 memcg->socket_pressure = jiffies;
4173#ifndef CONFIG_SLOB
4174 memcg->kmemcg_id = -1;
4175#endif
4176#ifdef CONFIG_CGROUP_WRITEBACK
4177 INIT_LIST_HEAD(&memcg->cgwb_list);
4178#endif
4179 idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
4180 return memcg;
4181fail:
4182 if (memcg->id.id > 0)
4183 idr_remove(&mem_cgroup_idr, memcg->id.id);
4184 __mem_cgroup_free(memcg);
4185 return NULL;
4186}
4187
4188static struct cgroup_subsys_state * __ref
4189mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
4190{
4191 struct mem_cgroup *parent = mem_cgroup_from_css(parent_css);
4192 struct mem_cgroup *memcg;
4193 long error = -ENOMEM;
4194
4195 memcg = mem_cgroup_alloc();
4196 if (!memcg)
4197 return ERR_PTR(error);
4198
4199 memcg->high = PAGE_COUNTER_MAX;
4200 memcg->soft_limit = PAGE_COUNTER_MAX;
4201 if (parent) {
4202 memcg->swappiness = mem_cgroup_swappiness(parent);
4203 memcg->oom_kill_disable = parent->oom_kill_disable;
4204 }
4205 if (parent && parent->use_hierarchy) {
4206 memcg->use_hierarchy = true;
4207 page_counter_init(&memcg->memory, &parent->memory);
4208 page_counter_init(&memcg->swap, &parent->swap);
4209 page_counter_init(&memcg->memsw, &parent->memsw);
4210 page_counter_init(&memcg->kmem, &parent->kmem);
4211 page_counter_init(&memcg->tcpmem, &parent->tcpmem);
4212 } else {
4213 page_counter_init(&memcg->memory, NULL);
4214 page_counter_init(&memcg->swap, NULL);
4215 page_counter_init(&memcg->memsw, NULL);
4216 page_counter_init(&memcg->kmem, NULL);
4217 page_counter_init(&memcg->tcpmem, NULL);
4218 /*
4219 * Deeper hierachy with use_hierarchy == false doesn't make
4220 * much sense so let cgroup subsystem know about this
4221 * unfortunate state in our controller.
4222 */
4223 if (parent != root_mem_cgroup)
4224 memory_cgrp_subsys.broken_hierarchy = true;
4225 }
4226
4227 /* The following stuff does not apply to the root */
4228 if (!parent) {
4229 root_mem_cgroup = memcg;
4230 return &memcg->css;
4231 }
4232
4233 error = memcg_online_kmem(memcg);
4234 if (error)
4235 goto fail;
4236
4237 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
4238 static_branch_inc(&memcg_sockets_enabled_key);
4239
4240 return &memcg->css;
4241fail:
4242 mem_cgroup_free(memcg);
4243 return ERR_PTR(-ENOMEM);
4244}
4245
4246static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
4247{
4248 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4249
4250 /* Online state pins memcg ID, memcg ID pins CSS */
4251 atomic_set(&memcg->id.ref, 1);
4252 css_get(css);
4253 return 0;
4254}
4255
4256static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
4257{
4258 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4259 struct mem_cgroup_event *event, *tmp;
4260
4261 /*
4262 * Unregister events and notify userspace.
4263 * Notify userspace about cgroup removing only after rmdir of cgroup
4264 * directory to avoid race between userspace and kernelspace.
4265 */
4266 spin_lock(&memcg->event_list_lock);
4267 list_for_each_entry_safe(event, tmp, &memcg->event_list, list) {
4268 list_del_init(&event->list);
4269 schedule_work(&event->remove);
4270 }
4271 spin_unlock(&memcg->event_list_lock);
4272
4273 memcg->low = 0;
4274
4275 memcg_offline_kmem(memcg);
4276 wb_memcg_offline(memcg);
4277
4278 mem_cgroup_id_put(memcg);
4279}
4280
4281static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
4282{
4283 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4284
4285 invalidate_reclaim_iterators(memcg);
4286}
4287
4288static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
4289{
4290 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4291
4292 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
4293 static_branch_dec(&memcg_sockets_enabled_key);
4294
4295 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active)
4296 static_branch_dec(&memcg_sockets_enabled_key);
4297
4298 vmpressure_cleanup(&memcg->vmpressure);
4299 cancel_work_sync(&memcg->high_work);
4300 mem_cgroup_remove_from_trees(memcg);
4301 memcg_free_kmem(memcg);
4302 mem_cgroup_free(memcg);
4303}
4304
4305/**
4306 * mem_cgroup_css_reset - reset the states of a mem_cgroup
4307 * @css: the target css
4308 *
4309 * Reset the states of the mem_cgroup associated with @css. This is
4310 * invoked when the userland requests disabling on the default hierarchy
4311 * but the memcg is pinned through dependency. The memcg should stop
4312 * applying policies and should revert to the vanilla state as it may be
4313 * made visible again.
4314 *
4315 * The current implementation only resets the essential configurations.
4316 * This needs to be expanded to cover all the visible parts.
4317 */
4318static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
4319{
4320 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4321
4322 page_counter_limit(&memcg->memory, PAGE_COUNTER_MAX);
4323 page_counter_limit(&memcg->swap, PAGE_COUNTER_MAX);
4324 page_counter_limit(&memcg->memsw, PAGE_COUNTER_MAX);
4325 page_counter_limit(&memcg->kmem, PAGE_COUNTER_MAX);
4326 page_counter_limit(&memcg->tcpmem, PAGE_COUNTER_MAX);
4327 memcg->low = 0;
4328 memcg->high = PAGE_COUNTER_MAX;
4329 memcg->soft_limit = PAGE_COUNTER_MAX;
4330 memcg_wb_domain_size_changed(memcg);
4331}
4332
4333#ifdef CONFIG_MMU
4334/* Handlers for move charge at task migration. */
4335static int mem_cgroup_do_precharge(unsigned long count)
4336{
4337 int ret;
4338
4339 /* Try a single bulk charge without reclaim first, kswapd may wake */
4340 ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count);
4341 if (!ret) {
4342 mc.precharge += count;
4343 return ret;
4344 }
4345
4346 /* Try charges one by one with reclaim, but do not retry */
4347 while (count--) {
4348 ret = try_charge(mc.to, GFP_KERNEL | __GFP_NORETRY, 1);
4349 if (ret)
4350 return ret;
4351 mc.precharge++;
4352 cond_resched();
4353 }
4354 return 0;
4355}
4356
4357union mc_target {
4358 struct page *page;
4359 swp_entry_t ent;
4360};
4361
4362enum mc_target_type {
4363 MC_TARGET_NONE = 0,
4364 MC_TARGET_PAGE,
4365 MC_TARGET_SWAP,
4366 MC_TARGET_DEVICE,
4367};
4368
4369static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
4370 unsigned long addr, pte_t ptent)
4371{
4372 struct page *page = _vm_normal_page(vma, addr, ptent, true);
4373
4374 if (!page || !page_mapped(page))
4375 return NULL;
4376 if (PageAnon(page)) {
4377 if (!(mc.flags & MOVE_ANON))
4378 return NULL;
4379 } else {
4380 if (!(mc.flags & MOVE_FILE))
4381 return NULL;
4382 }
4383 if (!get_page_unless_zero(page))
4384 return NULL;
4385
4386 return page;
4387}
4388
4389#if defined(CONFIG_SWAP) || defined(CONFIG_DEVICE_PRIVATE)
4390static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
4391 pte_t ptent, swp_entry_t *entry)
4392{
4393 struct page *page = NULL;
4394 swp_entry_t ent = pte_to_swp_entry(ptent);
4395
4396 if (!(mc.flags & MOVE_ANON) || non_swap_entry(ent))
4397 return NULL;
4398
4399 /*
4400 * Handle MEMORY_DEVICE_PRIVATE which are ZONE_DEVICE page belonging to
4401 * a device and because they are not accessible by CPU they are store
4402 * as special swap entry in the CPU page table.
4403 */
4404 if (is_device_private_entry(ent)) {
4405 page = device_private_entry_to_page(ent);
4406 /*
4407 * MEMORY_DEVICE_PRIVATE means ZONE_DEVICE page and which have
4408 * a refcount of 1 when free (unlike normal page)
4409 */
4410 if (!page_ref_add_unless(page, 1, 1))
4411 return NULL;
4412 return page;
4413 }
4414
4415 /*
4416 * Because lookup_swap_cache() updates some statistics counter,
4417 * we call find_get_page() with swapper_space directly.
4418 */
4419 page = find_get_page(swap_address_space(ent), swp_offset(ent));
4420 if (do_memsw_account())
4421 entry->val = ent.val;
4422
4423 return page;
4424}
4425#else
4426static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
4427 pte_t ptent, swp_entry_t *entry)
4428{
4429 return NULL;
4430}
4431#endif
4432
4433static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
4434 unsigned long addr, pte_t ptent, swp_entry_t *entry)
4435{
4436 struct page *page = NULL;
4437 struct address_space *mapping;
4438 pgoff_t pgoff;
4439
4440 if (!vma->vm_file) /* anonymous vma */
4441 return NULL;
4442 if (!(mc.flags & MOVE_FILE))
4443 return NULL;
4444
4445 mapping = vma->vm_file->f_mapping;
4446 pgoff = linear_page_index(vma, addr);
4447
4448 /* page is moved even if it's not RSS of this task(page-faulted). */
4449#ifdef CONFIG_SWAP
4450 /* shmem/tmpfs may report page out on swap: account for that too. */
4451 if (shmem_mapping(mapping)) {
4452 page = find_get_entry(mapping, pgoff);
4453 if (radix_tree_exceptional_entry(page)) {
4454 swp_entry_t swp = radix_to_swp_entry(page);
4455 if (do_memsw_account())
4456 *entry = swp;
4457 page = find_get_page(swap_address_space(swp),
4458 swp_offset(swp));
4459 }
4460 } else
4461 page = find_get_page(mapping, pgoff);
4462#else
4463 page = find_get_page(mapping, pgoff);
4464#endif
4465 return page;
4466}
4467
4468/**
4469 * mem_cgroup_move_account - move account of the page
4470 * @page: the page
4471 * @compound: charge the page as compound or small page
4472 * @from: mem_cgroup which the page is moved from.
4473 * @to: mem_cgroup which the page is moved to. @from != @to.
4474 *
4475 * The caller must make sure the page is not on LRU (isolate_page() is useful.)
4476 *
4477 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
4478 * from old cgroup.
4479 */
4480static int mem_cgroup_move_account(struct page *page,
4481 bool compound,
4482 struct mem_cgroup *from,
4483 struct mem_cgroup *to)
4484{
4485 unsigned long flags;
4486 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
4487 int ret;
4488 bool anon;
4489
4490 VM_BUG_ON(from == to);
4491 VM_BUG_ON_PAGE(PageLRU(page), page);
4492 VM_BUG_ON(compound && !PageTransHuge(page));
4493
4494 /*
4495 * Prevent mem_cgroup_migrate() from looking at
4496 * page->mem_cgroup of its source page while we change it.
4497 */
4498 ret = -EBUSY;
4499 if (!trylock_page(page))
4500 goto out;
4501
4502 ret = -EINVAL;
4503 if (page->mem_cgroup != from)
4504 goto out_unlock;
4505
4506 anon = PageAnon(page);
4507
4508 spin_lock_irqsave(&from->move_lock, flags);
4509
4510 if (!anon && page_mapped(page)) {
4511 __mod_memcg_state(from, NR_FILE_MAPPED, -nr_pages);
4512 __mod_memcg_state(to, NR_FILE_MAPPED, nr_pages);
4513 }
4514
4515 /*
4516 * move_lock grabbed above and caller set from->moving_account, so
4517 * mod_memcg_page_state will serialize updates to PageDirty.
4518 * So mapping should be stable for dirty pages.
4519 */
4520 if (!anon && PageDirty(page)) {
4521 struct address_space *mapping = page_mapping(page);
4522
4523 if (mapping_cap_account_dirty(mapping)) {
4524 __mod_memcg_state(from, NR_FILE_DIRTY, -nr_pages);
4525 __mod_memcg_state(to, NR_FILE_DIRTY, nr_pages);
4526 }
4527 }
4528
4529 if (PageWriteback(page)) {
4530 __mod_memcg_state(from, NR_WRITEBACK, -nr_pages);
4531 __mod_memcg_state(to, NR_WRITEBACK, nr_pages);
4532 }
4533
4534 /*
4535 * It is safe to change page->mem_cgroup here because the page
4536 * is referenced, charged, and isolated - we can't race with
4537 * uncharging, charging, migration, or LRU putback.
4538 */
4539
4540 /* caller should have done css_get */
4541 page->mem_cgroup = to;
4542 spin_unlock_irqrestore(&from->move_lock, flags);
4543
4544 ret = 0;
4545
4546 local_irq_disable();
4547 mem_cgroup_charge_statistics(to, page, compound, nr_pages);
4548 memcg_check_events(to, page);
4549 mem_cgroup_charge_statistics(from, page, compound, -nr_pages);
4550 memcg_check_events(from, page);
4551 local_irq_enable();
4552out_unlock:
4553 unlock_page(page);
4554out:
4555 return ret;
4556}
4557
4558/**
4559 * get_mctgt_type - get target type of moving charge
4560 * @vma: the vma the pte to be checked belongs
4561 * @addr: the address corresponding to the pte to be checked
4562 * @ptent: the pte to be checked
4563 * @target: the pointer the target page or swap ent will be stored(can be NULL)
4564 *
4565 * Returns
4566 * 0(MC_TARGET_NONE): if the pte is not a target for move charge.
4567 * 1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
4568 * move charge. if @target is not NULL, the page is stored in target->page
4569 * with extra refcnt got(Callers should handle it).
4570 * 2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
4571 * target for charge migration. if @target is not NULL, the entry is stored
4572 * in target->ent.
4573 * 3(MC_TARGET_DEVICE): like MC_TARGET_PAGE but page is MEMORY_DEVICE_PUBLIC
4574 * or MEMORY_DEVICE_PRIVATE (so ZONE_DEVICE page and thus not on the lru).
4575 * For now we such page is charge like a regular page would be as for all
4576 * intent and purposes it is just special memory taking the place of a
4577 * regular page.
4578 *
4579 * See Documentations/vm/hmm.txt and include/linux/hmm.h
4580 *
4581 * Called with pte lock held.
4582 */
4583
4584static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
4585 unsigned long addr, pte_t ptent, union mc_target *target)
4586{
4587 struct page *page = NULL;
4588 enum mc_target_type ret = MC_TARGET_NONE;
4589 swp_entry_t ent = { .val = 0 };
4590
4591 if (pte_present(ptent))
4592 page = mc_handle_present_pte(vma, addr, ptent);
4593 else if (is_swap_pte(ptent))
4594 page = mc_handle_swap_pte(vma, ptent, &ent);
4595 else if (pte_none(ptent))
4596 page = mc_handle_file_pte(vma, addr, ptent, &ent);
4597
4598 if (!page && !ent.val)
4599 return ret;
4600 if (page) {
4601 /*
4602 * Do only loose check w/o serialization.
4603 * mem_cgroup_move_account() checks the page is valid or
4604 * not under LRU exclusion.
4605 */
4606 if (page->mem_cgroup == mc.from) {
4607 ret = MC_TARGET_PAGE;
4608 if (is_device_private_page(page) ||
4609 is_device_public_page(page))
4610 ret = MC_TARGET_DEVICE;
4611 if (target)
4612 target->page = page;
4613 }
4614 if (!ret || !target)
4615 put_page(page);
4616 }
4617 /*
4618 * There is a swap entry and a page doesn't exist or isn't charged.
4619 * But we cannot move a tail-page in a THP.
4620 */
4621 if (ent.val && !ret && (!page || !PageTransCompound(page)) &&
4622 mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) {
4623 ret = MC_TARGET_SWAP;
4624 if (target)
4625 target->ent = ent;
4626 }
4627 return ret;
4628}
4629
4630#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4631/*
4632 * We don't consider PMD mapped swapping or file mapped pages because THP does
4633 * not support them for now.
4634 * Caller should make sure that pmd_trans_huge(pmd) is true.
4635 */
4636static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
4637 unsigned long addr, pmd_t pmd, union mc_target *target)
4638{
4639 struct page *page = NULL;
4640 enum mc_target_type ret = MC_TARGET_NONE;
4641
4642 if (unlikely(is_swap_pmd(pmd))) {
4643 VM_BUG_ON(thp_migration_supported() &&
4644 !is_pmd_migration_entry(pmd));
4645 return ret;
4646 }
4647 page = pmd_page(pmd);
4648 VM_BUG_ON_PAGE(!page || !PageHead(page), page);
4649 if (!(mc.flags & MOVE_ANON))
4650 return ret;
4651 if (page->mem_cgroup == mc.from) {
4652 ret = MC_TARGET_PAGE;
4653 if (target) {
4654 get_page(page);
4655 target->page = page;
4656 }
4657 }
4658 return ret;
4659}
4660#else
4661static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
4662 unsigned long addr, pmd_t pmd, union mc_target *target)
4663{
4664 return MC_TARGET_NONE;
4665}
4666#endif
4667
4668static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
4669 unsigned long addr, unsigned long end,
4670 struct mm_walk *walk)
4671{
4672 struct vm_area_struct *vma = walk->vma;
4673 pte_t *pte;
4674 spinlock_t *ptl;
4675
4676 ptl = pmd_trans_huge_lock(pmd, vma);
4677 if (ptl) {
4678 /*
4679 * Note their can not be MC_TARGET_DEVICE for now as we do not
4680 * support transparent huge page with MEMORY_DEVICE_PUBLIC or
4681 * MEMORY_DEVICE_PRIVATE but this might change.
4682 */
4683 if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
4684 mc.precharge += HPAGE_PMD_NR;
4685 spin_unlock(ptl);
4686 return 0;
4687 }
4688
4689 if (pmd_trans_unstable(pmd))
4690 return 0;
4691 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
4692 for (; addr != end; pte++, addr += PAGE_SIZE)
4693 if (get_mctgt_type(vma, addr, *pte, NULL))
4694 mc.precharge++; /* increment precharge temporarily */
4695 pte_unmap_unlock(pte - 1, ptl);
4696 cond_resched();
4697
4698 return 0;
4699}
4700
4701static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
4702{
4703 unsigned long precharge;
4704
4705 struct mm_walk mem_cgroup_count_precharge_walk = {
4706 .pmd_entry = mem_cgroup_count_precharge_pte_range,
4707 .mm = mm,
4708 };
4709 down_read(&mm->mmap_sem);
4710 walk_page_range(0, mm->highest_vm_end,
4711 &mem_cgroup_count_precharge_walk);
4712 up_read(&mm->mmap_sem);
4713
4714 precharge = mc.precharge;
4715 mc.precharge = 0;
4716
4717 return precharge;
4718}
4719
4720static int mem_cgroup_precharge_mc(struct mm_struct *mm)
4721{
4722 unsigned long precharge = mem_cgroup_count_precharge(mm);
4723
4724 VM_BUG_ON(mc.moving_task);
4725 mc.moving_task = current;
4726 return mem_cgroup_do_precharge(precharge);
4727}
4728
4729/* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
4730static void __mem_cgroup_clear_mc(void)
4731{
4732 struct mem_cgroup *from = mc.from;
4733 struct mem_cgroup *to = mc.to;
4734
4735 /* we must uncharge all the leftover precharges from mc.to */
4736 if (mc.precharge) {
4737 cancel_charge(mc.to, mc.precharge);
4738 mc.precharge = 0;
4739 }
4740 /*
4741 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
4742 * we must uncharge here.
4743 */
4744 if (mc.moved_charge) {
4745 cancel_charge(mc.from, mc.moved_charge);
4746 mc.moved_charge = 0;
4747 }
4748 /* we must fixup refcnts and charges */
4749 if (mc.moved_swap) {
4750 /* uncharge swap account from the old cgroup */
4751 if (!mem_cgroup_is_root(mc.from))
4752 page_counter_uncharge(&mc.from->memsw, mc.moved_swap);
4753
4754 mem_cgroup_id_put_many(mc.from, mc.moved_swap);
4755
4756 /*
4757 * we charged both to->memory and to->memsw, so we
4758 * should uncharge to->memory.
4759 */
4760 if (!mem_cgroup_is_root(mc.to))
4761 page_counter_uncharge(&mc.to->memory, mc.moved_swap);
4762
4763 mem_cgroup_id_get_many(mc.to, mc.moved_swap);
4764 css_put_many(&mc.to->css, mc.moved_swap);
4765
4766 mc.moved_swap = 0;
4767 }
4768 memcg_oom_recover(from);
4769 memcg_oom_recover(to);
4770 wake_up_all(&mc.waitq);
4771}
4772
4773static void mem_cgroup_clear_mc(void)
4774{
4775 struct mm_struct *mm = mc.mm;
4776
4777 /*
4778 * we must clear moving_task before waking up waiters at the end of
4779 * task migration.
4780 */
4781 mc.moving_task = NULL;
4782 __mem_cgroup_clear_mc();
4783 spin_lock(&mc.lock);
4784 mc.from = NULL;
4785 mc.to = NULL;
4786 mc.mm = NULL;
4787 spin_unlock(&mc.lock);
4788
4789 mmput(mm);
4790}
4791
4792static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
4793{
4794 struct cgroup_subsys_state *css;
4795 struct mem_cgroup *memcg = NULL; /* unneeded init to make gcc happy */
4796 struct mem_cgroup *from;
4797 struct task_struct *leader, *p;
4798 struct mm_struct *mm;
4799 unsigned long move_flags;
4800 int ret = 0;
4801
4802 /* charge immigration isn't supported on the default hierarchy */
4803 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
4804 return 0;
4805
4806 /*
4807 * Multi-process migrations only happen on the default hierarchy
4808 * where charge immigration is not used. Perform charge
4809 * immigration if @tset contains a leader and whine if there are
4810 * multiple.
4811 */
4812 p = NULL;
4813 cgroup_taskset_for_each_leader(leader, css, tset) {
4814 WARN_ON_ONCE(p);
4815 p = leader;
4816 memcg = mem_cgroup_from_css(css);
4817 }
4818 if (!p)
4819 return 0;
4820
4821 /*
4822 * We are now commited to this value whatever it is. Changes in this
4823 * tunable will only affect upcoming migrations, not the current one.
4824 * So we need to save it, and keep it going.
4825 */
4826 move_flags = READ_ONCE(memcg->move_charge_at_immigrate);
4827 if (!move_flags)
4828 return 0;
4829
4830 from = mem_cgroup_from_task(p);
4831
4832 VM_BUG_ON(from == memcg);
4833
4834 mm = get_task_mm(p);
4835 if (!mm)
4836 return 0;
4837 /* We move charges only when we move a owner of the mm */
4838 if (mm->owner == p) {
4839 VM_BUG_ON(mc.from);
4840 VM_BUG_ON(mc.to);
4841 VM_BUG_ON(mc.precharge);
4842 VM_BUG_ON(mc.moved_charge);
4843 VM_BUG_ON(mc.moved_swap);
4844
4845 spin_lock(&mc.lock);
4846 mc.mm = mm;
4847 mc.from = from;
4848 mc.to = memcg;
4849 mc.flags = move_flags;
4850 spin_unlock(&mc.lock);
4851 /* We set mc.moving_task later */
4852
4853 ret = mem_cgroup_precharge_mc(mm);
4854 if (ret)
4855 mem_cgroup_clear_mc();
4856 } else {
4857 mmput(mm);
4858 }
4859 return ret;
4860}
4861
4862static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
4863{
4864 if (mc.to)
4865 mem_cgroup_clear_mc();
4866}
4867
4868static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
4869 unsigned long addr, unsigned long end,
4870 struct mm_walk *walk)
4871{
4872 int ret = 0;
4873 struct vm_area_struct *vma = walk->vma;
4874 pte_t *pte;
4875 spinlock_t *ptl;
4876 enum mc_target_type target_type;
4877 union mc_target target;
4878 struct page *page;
4879
4880 ptl = pmd_trans_huge_lock(pmd, vma);
4881 if (ptl) {
4882 if (mc.precharge < HPAGE_PMD_NR) {
4883 spin_unlock(ptl);
4884 return 0;
4885 }
4886 target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
4887 if (target_type == MC_TARGET_PAGE) {
4888 page = target.page;
4889 if (!isolate_lru_page(page)) {
4890 if (!mem_cgroup_move_account(page, true,
4891 mc.from, mc.to)) {
4892 mc.precharge -= HPAGE_PMD_NR;
4893 mc.moved_charge += HPAGE_PMD_NR;
4894 }
4895 putback_lru_page(page);
4896 }
4897 put_page(page);
4898 } else if (target_type == MC_TARGET_DEVICE) {
4899 page = target.page;
4900 if (!mem_cgroup_move_account(page, true,
4901 mc.from, mc.to)) {
4902 mc.precharge -= HPAGE_PMD_NR;
4903 mc.moved_charge += HPAGE_PMD_NR;
4904 }
4905 put_page(page);
4906 }
4907 spin_unlock(ptl);
4908 return 0;
4909 }
4910
4911 if (pmd_trans_unstable(pmd))
4912 return 0;
4913retry:
4914 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
4915 for (; addr != end; addr += PAGE_SIZE) {
4916 pte_t ptent = *(pte++);
4917 bool device = false;
4918 swp_entry_t ent;
4919
4920 if (!mc.precharge)
4921 break;
4922
4923 switch (get_mctgt_type(vma, addr, ptent, &target)) {
4924 case MC_TARGET_DEVICE:
4925 device = true;
4926 /* fall through */
4927 case MC_TARGET_PAGE:
4928 page = target.page;
4929 /*
4930 * We can have a part of the split pmd here. Moving it
4931 * can be done but it would be too convoluted so simply
4932 * ignore such a partial THP and keep it in original
4933 * memcg. There should be somebody mapping the head.
4934 */
4935 if (PageTransCompound(page))
4936 goto put;
4937 if (!device && isolate_lru_page(page))
4938 goto put;
4939 if (!mem_cgroup_move_account(page, false,
4940 mc.from, mc.to)) {
4941 mc.precharge--;
4942 /* we uncharge from mc.from later. */
4943 mc.moved_charge++;
4944 }
4945 if (!device)
4946 putback_lru_page(page);
4947put: /* get_mctgt_type() gets the page */
4948 put_page(page);
4949 break;
4950 case MC_TARGET_SWAP:
4951 ent = target.ent;
4952 if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
4953 mc.precharge--;
4954 /* we fixup refcnts and charges later. */
4955 mc.moved_swap++;
4956 }
4957 break;
4958 default:
4959 break;
4960 }
4961 }
4962 pte_unmap_unlock(pte - 1, ptl);
4963 cond_resched();
4964
4965 if (addr != end) {
4966 /*
4967 * We have consumed all precharges we got in can_attach().
4968 * We try charge one by one, but don't do any additional
4969 * charges to mc.to if we have failed in charge once in attach()
4970 * phase.
4971 */
4972 ret = mem_cgroup_do_precharge(1);
4973 if (!ret)
4974 goto retry;
4975 }
4976
4977 return ret;
4978}
4979
4980static void mem_cgroup_move_charge(void)
4981{
4982 struct mm_walk mem_cgroup_move_charge_walk = {
4983 .pmd_entry = mem_cgroup_move_charge_pte_range,
4984 .mm = mc.mm,
4985 };
4986
4987 lru_add_drain_all();
4988 /*
4989 * Signal lock_page_memcg() to take the memcg's move_lock
4990 * while we're moving its pages to another memcg. Then wait
4991 * for already started RCU-only updates to finish.
4992 */
4993 atomic_inc(&mc.from->moving_account);
4994 synchronize_rcu();
4995retry:
4996 if (unlikely(!down_read_trylock(&mc.mm->mmap_sem))) {
4997 /*
4998 * Someone who are holding the mmap_sem might be waiting in
4999 * waitq. So we cancel all extra charges, wake up all waiters,
5000 * and retry. Because we cancel precharges, we might not be able
5001 * to move enough charges, but moving charge is a best-effort
5002 * feature anyway, so it wouldn't be a big problem.
5003 */
5004 __mem_cgroup_clear_mc();
5005 cond_resched();
5006 goto retry;
5007 }
5008 /*
5009 * When we have consumed all precharges and failed in doing
5010 * additional charge, the page walk just aborts.
5011 */
5012 walk_page_range(0, mc.mm->highest_vm_end, &mem_cgroup_move_charge_walk);
5013
5014 up_read(&mc.mm->mmap_sem);
5015 atomic_dec(&mc.from->moving_account);
5016}
5017
5018static void mem_cgroup_move_task(void)
5019{
5020 if (mc.to) {
5021 mem_cgroup_move_charge();
5022 mem_cgroup_clear_mc();
5023 }
5024}
5025#else /* !CONFIG_MMU */
5026static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
5027{
5028 return 0;
5029}
5030static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
5031{
5032}
5033static void mem_cgroup_move_task(void)
5034{
5035}
5036#endif
5037
5038/*
5039 * Cgroup retains root cgroups across [un]mount cycles making it necessary
5040 * to verify whether we're attached to the default hierarchy on each mount
5041 * attempt.
5042 */
5043static void mem_cgroup_bind(struct cgroup_subsys_state *root_css)
5044{
5045 /*
5046 * use_hierarchy is forced on the default hierarchy. cgroup core
5047 * guarantees that @root doesn't have any children, so turning it
5048 * on for the root memcg is enough.
5049 */
5050 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
5051 root_mem_cgroup->use_hierarchy = true;
5052 else
5053 root_mem_cgroup->use_hierarchy = false;
5054}
5055
5056static u64 memory_current_read(struct cgroup_subsys_state *css,
5057 struct cftype *cft)
5058{
5059 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5060
5061 return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE;
5062}
5063
5064static int memory_low_show(struct seq_file *m, void *v)
5065{
5066 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5067 unsigned long low = READ_ONCE(memcg->low);
5068
5069 if (low == PAGE_COUNTER_MAX)
5070 seq_puts(m, "max\n");
5071 else
5072 seq_printf(m, "%llu\n", (u64)low * PAGE_SIZE);
5073
5074 return 0;
5075}
5076
5077static ssize_t memory_low_write(struct kernfs_open_file *of,
5078 char *buf, size_t nbytes, loff_t off)
5079{
5080 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5081 unsigned long low;
5082 int err;
5083
5084 buf = strstrip(buf);
5085 err = page_counter_memparse(buf, "max", &low);
5086 if (err)
5087 return err;
5088
5089 memcg->low = low;
5090
5091 return nbytes;
5092}
5093
5094static int memory_high_show(struct seq_file *m, void *v)
5095{
5096 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5097 unsigned long high = READ_ONCE(memcg->high);
5098
5099 if (high == PAGE_COUNTER_MAX)
5100 seq_puts(m, "max\n");
5101 else
5102 seq_printf(m, "%llu\n", (u64)high * PAGE_SIZE);
5103
5104 return 0;
5105}
5106
5107static ssize_t memory_high_write(struct kernfs_open_file *of,
5108 char *buf, size_t nbytes, loff_t off)
5109{
5110 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5111 unsigned long nr_pages;
5112 unsigned long high;
5113 int err;
5114
5115 buf = strstrip(buf);
5116 err = page_counter_memparse(buf, "max", &high);
5117 if (err)
5118 return err;
5119
5120 memcg->high = high;
5121
5122 nr_pages = page_counter_read(&memcg->memory);
5123 if (nr_pages > high)
5124 try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
5125 GFP_KERNEL, true);
5126
5127 memcg_wb_domain_size_changed(memcg);
5128 return nbytes;
5129}
5130
5131static int memory_max_show(struct seq_file *m, void *v)
5132{
5133 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5134 unsigned long max = READ_ONCE(memcg->memory.limit);
5135
5136 if (max == PAGE_COUNTER_MAX)
5137 seq_puts(m, "max\n");
5138 else
5139 seq_printf(m, "%llu\n", (u64)max * PAGE_SIZE);
5140
5141 return 0;
5142}
5143
5144static ssize_t memory_max_write(struct kernfs_open_file *of,
5145 char *buf, size_t nbytes, loff_t off)
5146{
5147 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5148 unsigned int nr_reclaims = MEM_CGROUP_RECLAIM_RETRIES;
5149 bool drained = false;
5150 unsigned long max;
5151 int err;
5152
5153 buf = strstrip(buf);
5154 err = page_counter_memparse(buf, "max", &max);
5155 if (err)
5156 return err;
5157
5158 xchg(&memcg->memory.limit, max);
5159
5160 for (;;) {
5161 unsigned long nr_pages = page_counter_read(&memcg->memory);
5162
5163 if (nr_pages <= max)
5164 break;
5165
5166 if (signal_pending(current)) {
5167 err = -EINTR;
5168 break;
5169 }
5170
5171 if (!drained) {
5172 drain_all_stock(memcg);
5173 drained = true;
5174 continue;
5175 }
5176
5177 if (nr_reclaims) {
5178 if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max,
5179 GFP_KERNEL, true))
5180 nr_reclaims--;
5181 continue;
5182 }
5183
5184 memcg_memory_event(memcg, MEMCG_OOM);
5185 if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
5186 break;
5187 }
5188
5189 memcg_wb_domain_size_changed(memcg);
5190 return nbytes;
5191}
5192
5193static int memory_events_show(struct seq_file *m, void *v)
5194{
5195 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5196
5197 seq_printf(m, "low %lu\n",
5198 atomic_long_read(&memcg->memory_events[MEMCG_LOW]));
5199 seq_printf(m, "high %lu\n",
5200 atomic_long_read(&memcg->memory_events[MEMCG_HIGH]));
5201 seq_printf(m, "max %lu\n",
5202 atomic_long_read(&memcg->memory_events[MEMCG_MAX]));
5203 seq_printf(m, "oom %lu\n",
5204 atomic_long_read(&memcg->memory_events[MEMCG_OOM]));
5205 seq_printf(m, "oom_kill %lu\n", memcg_sum_events(memcg, OOM_KILL));
5206
5207 return 0;
5208}
5209
5210static int memory_stat_show(struct seq_file *m, void *v)
5211{
5212 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5213 unsigned long stat[MEMCG_NR_STAT];
5214 unsigned long events[NR_VM_EVENT_ITEMS];
5215 int i;
5216
5217 /*
5218 * Provide statistics on the state of the memory subsystem as
5219 * well as cumulative event counters that show past behavior.
5220 *
5221 * This list is ordered following a combination of these gradients:
5222 * 1) generic big picture -> specifics and details
5223 * 2) reflecting userspace activity -> reflecting kernel heuristics
5224 *
5225 * Current memory state:
5226 */
5227
5228 tree_stat(memcg, stat);
5229 tree_events(memcg, events);
5230
5231 seq_printf(m, "anon %llu\n",
5232 (u64)stat[MEMCG_RSS] * PAGE_SIZE);
5233 seq_printf(m, "file %llu\n",
5234 (u64)stat[MEMCG_CACHE] * PAGE_SIZE);
5235 seq_printf(m, "kernel_stack %llu\n",
5236 (u64)stat[MEMCG_KERNEL_STACK_KB] * 1024);
5237 seq_printf(m, "slab %llu\n",
5238 (u64)(stat[NR_SLAB_RECLAIMABLE] +
5239 stat[NR_SLAB_UNRECLAIMABLE]) * PAGE_SIZE);
5240 seq_printf(m, "sock %llu\n",
5241 (u64)stat[MEMCG_SOCK] * PAGE_SIZE);
5242
5243 seq_printf(m, "shmem %llu\n",
5244 (u64)stat[NR_SHMEM] * PAGE_SIZE);
5245 seq_printf(m, "file_mapped %llu\n",
5246 (u64)stat[NR_FILE_MAPPED] * PAGE_SIZE);
5247 seq_printf(m, "file_dirty %llu\n",
5248 (u64)stat[NR_FILE_DIRTY] * PAGE_SIZE);
5249 seq_printf(m, "file_writeback %llu\n",
5250 (u64)stat[NR_WRITEBACK] * PAGE_SIZE);
5251
5252 for (i = 0; i < NR_LRU_LISTS; i++) {
5253 struct mem_cgroup *mi;
5254 unsigned long val = 0;
5255
5256 for_each_mem_cgroup_tree(mi, memcg)
5257 val += mem_cgroup_nr_lru_pages(mi, BIT(i));
5258 seq_printf(m, "%s %llu\n",
5259 mem_cgroup_lru_names[i], (u64)val * PAGE_SIZE);
5260 }
5261
5262 seq_printf(m, "slab_reclaimable %llu\n",
5263 (u64)stat[NR_SLAB_RECLAIMABLE] * PAGE_SIZE);
5264 seq_printf(m, "slab_unreclaimable %llu\n",
5265 (u64)stat[NR_SLAB_UNRECLAIMABLE] * PAGE_SIZE);
5266
5267 /* Accumulated memory events */
5268
5269 seq_printf(m, "pgfault %lu\n", events[PGFAULT]);
5270 seq_printf(m, "pgmajfault %lu\n", events[PGMAJFAULT]);
5271
5272 seq_printf(m, "pgrefill %lu\n", events[PGREFILL]);
5273 seq_printf(m, "pgscan %lu\n", events[PGSCAN_KSWAPD] +
5274 events[PGSCAN_DIRECT]);
5275 seq_printf(m, "pgsteal %lu\n", events[PGSTEAL_KSWAPD] +
5276 events[PGSTEAL_DIRECT]);
5277 seq_printf(m, "pgactivate %lu\n", events[PGACTIVATE]);
5278 seq_printf(m, "pgdeactivate %lu\n", events[PGDEACTIVATE]);
5279 seq_printf(m, "pglazyfree %lu\n", events[PGLAZYFREE]);
5280 seq_printf(m, "pglazyfreed %lu\n", events[PGLAZYFREED]);
5281
5282 seq_printf(m, "workingset_refault %lu\n",
5283 stat[WORKINGSET_REFAULT]);
5284 seq_printf(m, "workingset_activate %lu\n",
5285 stat[WORKINGSET_ACTIVATE]);
5286 seq_printf(m, "workingset_nodereclaim %lu\n",
5287 stat[WORKINGSET_NODERECLAIM]);
5288
5289 return 0;
5290}
5291
5292static struct cftype memory_files[] = {
5293 {
5294 .name = "current",
5295 .flags = CFTYPE_NOT_ON_ROOT,
5296 .read_u64 = memory_current_read,
5297 },
5298 {
5299 .name = "low",
5300 .flags = CFTYPE_NOT_ON_ROOT,
5301 .seq_show = memory_low_show,
5302 .write = memory_low_write,
5303 },
5304 {
5305 .name = "high",
5306 .flags = CFTYPE_NOT_ON_ROOT,
5307 .seq_show = memory_high_show,
5308 .write = memory_high_write,
5309 },
5310 {
5311 .name = "max",
5312 .flags = CFTYPE_NOT_ON_ROOT,
5313 .seq_show = memory_max_show,
5314 .write = memory_max_write,
5315 },
5316 {
5317 .name = "events",
5318 .flags = CFTYPE_NOT_ON_ROOT,
5319 .file_offset = offsetof(struct mem_cgroup, events_file),
5320 .seq_show = memory_events_show,
5321 },
5322 {
5323 .name = "stat",
5324 .flags = CFTYPE_NOT_ON_ROOT,
5325 .seq_show = memory_stat_show,
5326 },
5327 { } /* terminate */
5328};
5329
5330struct cgroup_subsys memory_cgrp_subsys = {
5331 .css_alloc = mem_cgroup_css_alloc,
5332 .css_online = mem_cgroup_css_online,
5333 .css_offline = mem_cgroup_css_offline,
5334 .css_released = mem_cgroup_css_released,
5335 .css_free = mem_cgroup_css_free,
5336 .css_reset = mem_cgroup_css_reset,
5337 .can_attach = mem_cgroup_can_attach,
5338 .cancel_attach = mem_cgroup_cancel_attach,
5339 .post_attach = mem_cgroup_move_task,
5340 .bind = mem_cgroup_bind,
5341 .dfl_cftypes = memory_files,
5342 .legacy_cftypes = mem_cgroup_legacy_files,
5343 .early_init = 0,
5344};
5345
5346/**
5347 * mem_cgroup_low - check if memory consumption is below the normal range
5348 * @root: the top ancestor of the sub-tree being checked
5349 * @memcg: the memory cgroup to check
5350 *
5351 * Returns %true if memory consumption of @memcg, and that of all
5352 * ancestors up to (but not including) @root, is below the normal range.
5353 *
5354 * @root is exclusive; it is never low when looked at directly and isn't
5355 * checked when traversing the hierarchy.
5356 *
5357 * Excluding @root enables using memory.low to prioritize memory usage
5358 * between cgroups within a subtree of the hierarchy that is limited by
5359 * memory.high or memory.max.
5360 *
5361 * For example, given cgroup A with children B and C:
5362 *
5363 * A
5364 * / \
5365 * B C
5366 *
5367 * and
5368 *
5369 * 1. A/memory.current > A/memory.high
5370 * 2. A/B/memory.current < A/B/memory.low
5371 * 3. A/C/memory.current >= A/C/memory.low
5372 *
5373 * As 'A' is high, i.e. triggers reclaim from 'A', and 'B' is low, we
5374 * should reclaim from 'C' until 'A' is no longer high or until we can
5375 * no longer reclaim from 'C'. If 'A', i.e. @root, isn't excluded by
5376 * mem_cgroup_low when reclaming from 'A', then 'B' won't be considered
5377 * low and we will reclaim indiscriminately from both 'B' and 'C'.
5378 */
5379bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg)
5380{
5381 if (mem_cgroup_disabled())
5382 return false;
5383
5384 if (!root)
5385 root = root_mem_cgroup;
5386 if (memcg == root)
5387 return false;
5388
5389 for (; memcg != root; memcg = parent_mem_cgroup(memcg)) {
5390 if (page_counter_read(&memcg->memory) >= memcg->low)
5391 return false;
5392 }
5393
5394 return true;
5395}
5396
5397/**
5398 * mem_cgroup_try_charge - try charging a page
5399 * @page: page to charge
5400 * @mm: mm context of the victim
5401 * @gfp_mask: reclaim mode
5402 * @memcgp: charged memcg return
5403 * @compound: charge the page as compound or small page
5404 *
5405 * Try to charge @page to the memcg that @mm belongs to, reclaiming
5406 * pages according to @gfp_mask if necessary.
5407 *
5408 * Returns 0 on success, with *@memcgp pointing to the charged memcg.
5409 * Otherwise, an error code is returned.
5410 *
5411 * After page->mapping has been set up, the caller must finalize the
5412 * charge with mem_cgroup_commit_charge(). Or abort the transaction
5413 * with mem_cgroup_cancel_charge() in case page instantiation fails.
5414 */
5415int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
5416 gfp_t gfp_mask, struct mem_cgroup **memcgp,
5417 bool compound)
5418{
5419 struct mem_cgroup *memcg = NULL;
5420 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
5421 int ret = 0;
5422
5423 if (mem_cgroup_disabled())
5424 goto out;
5425
5426 if (PageSwapCache(page)) {
5427 /*
5428 * Every swap fault against a single page tries to charge the
5429 * page, bail as early as possible. shmem_unuse() encounters
5430 * already charged pages, too. The USED bit is protected by
5431 * the page lock, which serializes swap cache removal, which
5432 * in turn serializes uncharging.
5433 */
5434 VM_BUG_ON_PAGE(!PageLocked(page), page);
5435 if (compound_head(page)->mem_cgroup)
5436 goto out;
5437
5438 if (do_swap_account) {
5439 swp_entry_t ent = { .val = page_private(page), };
5440 unsigned short id = lookup_swap_cgroup_id(ent);
5441
5442 rcu_read_lock();
5443 memcg = mem_cgroup_from_id(id);
5444 if (memcg && !css_tryget_online(&memcg->css))
5445 memcg = NULL;
5446 rcu_read_unlock();
5447 }
5448 }
5449
5450 if (!memcg)
5451 memcg = get_mem_cgroup_from_mm(mm);
5452
5453 ret = try_charge(memcg, gfp_mask, nr_pages);
5454
5455 css_put(&memcg->css);
5456out:
5457 *memcgp = memcg;
5458 return ret;
5459}
5460
5461/**
5462 * mem_cgroup_commit_charge - commit a page charge
5463 * @page: page to charge
5464 * @memcg: memcg to charge the page to
5465 * @lrucare: page might be on LRU already
5466 * @compound: charge the page as compound or small page
5467 *
5468 * Finalize a charge transaction started by mem_cgroup_try_charge(),
5469 * after page->mapping has been set up. This must happen atomically
5470 * as part of the page instantiation, i.e. under the page table lock
5471 * for anonymous pages, under the page lock for page and swap cache.
5472 *
5473 * In addition, the page must not be on the LRU during the commit, to
5474 * prevent racing with task migration. If it might be, use @lrucare.
5475 *
5476 * Use mem_cgroup_cancel_charge() to cancel the transaction instead.
5477 */
5478void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
5479 bool lrucare, bool compound)
5480{
5481 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
5482
5483 VM_BUG_ON_PAGE(!page->mapping, page);
5484 VM_BUG_ON_PAGE(PageLRU(page) && !lrucare, page);
5485
5486 if (mem_cgroup_disabled())
5487 return;
5488 /*
5489 * Swap faults will attempt to charge the same page multiple
5490 * times. But reuse_swap_page() might have removed the page
5491 * from swapcache already, so we can't check PageSwapCache().
5492 */
5493 if (!memcg)
5494 return;
5495
5496 commit_charge(page, memcg, lrucare);
5497
5498 local_irq_disable();
5499 mem_cgroup_charge_statistics(memcg, page, compound, nr_pages);
5500 memcg_check_events(memcg, page);
5501 local_irq_enable();
5502
5503 if (do_memsw_account() && PageSwapCache(page)) {
5504 swp_entry_t entry = { .val = page_private(page) };
5505 /*
5506 * The swap entry might not get freed for a long time,
5507 * let's not wait for it. The page already received a
5508 * memory+swap charge, drop the swap entry duplicate.
5509 */
5510 mem_cgroup_uncharge_swap(entry, nr_pages);
5511 }
5512}
5513
5514/**
5515 * mem_cgroup_cancel_charge - cancel a page charge
5516 * @page: page to charge
5517 * @memcg: memcg to charge the page to
5518 * @compound: charge the page as compound or small page
5519 *
5520 * Cancel a charge transaction started by mem_cgroup_try_charge().
5521 */
5522void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg,
5523 bool compound)
5524{
5525 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
5526
5527 if (mem_cgroup_disabled())
5528 return;
5529 /*
5530 * Swap faults will attempt to charge the same page multiple
5531 * times. But reuse_swap_page() might have removed the page
5532 * from swapcache already, so we can't check PageSwapCache().
5533 */
5534 if (!memcg)
5535 return;
5536
5537 cancel_charge(memcg, nr_pages);
5538}
5539
5540struct uncharge_gather {
5541 struct mem_cgroup *memcg;
5542 unsigned long pgpgout;
5543 unsigned long nr_anon;
5544 unsigned long nr_file;
5545 unsigned long nr_kmem;
5546 unsigned long nr_huge;
5547 unsigned long nr_shmem;
5548 struct page *dummy_page;
5549};
5550
5551static inline void uncharge_gather_clear(struct uncharge_gather *ug)
5552{
5553 memset(ug, 0, sizeof(*ug));
5554}
5555
5556static void uncharge_batch(const struct uncharge_gather *ug)
5557{
5558 unsigned long nr_pages = ug->nr_anon + ug->nr_file + ug->nr_kmem;
5559 unsigned long flags;
5560
5561 if (!mem_cgroup_is_root(ug->memcg)) {
5562 page_counter_uncharge(&ug->memcg->memory, nr_pages);
5563 if (do_memsw_account())
5564 page_counter_uncharge(&ug->memcg->memsw, nr_pages);
5565 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && ug->nr_kmem)
5566 page_counter_uncharge(&ug->memcg->kmem, ug->nr_kmem);
5567 memcg_oom_recover(ug->memcg);
5568 }
5569
5570 local_irq_save(flags);
5571 __mod_memcg_state(ug->memcg, MEMCG_RSS, -ug->nr_anon);
5572 __mod_memcg_state(ug->memcg, MEMCG_CACHE, -ug->nr_file);
5573 __mod_memcg_state(ug->memcg, MEMCG_RSS_HUGE, -ug->nr_huge);
5574 __mod_memcg_state(ug->memcg, NR_SHMEM, -ug->nr_shmem);
5575 __count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout);
5576 __this_cpu_add(ug->memcg->stat_cpu->nr_page_events, nr_pages);
5577 memcg_check_events(ug->memcg, ug->dummy_page);
5578 local_irq_restore(flags);
5579
5580 if (!mem_cgroup_is_root(ug->memcg))
5581 css_put_many(&ug->memcg->css, nr_pages);
5582}
5583
5584static void uncharge_page(struct page *page, struct uncharge_gather *ug)
5585{
5586 VM_BUG_ON_PAGE(PageLRU(page), page);
5587 VM_BUG_ON_PAGE(page_count(page) && !is_zone_device_page(page) &&
5588 !PageHWPoison(page) , page);
5589
5590 if (!page->mem_cgroup)
5591 return;
5592
5593 /*
5594 * Nobody should be changing or seriously looking at
5595 * page->mem_cgroup at this point, we have fully
5596 * exclusive access to the page.
5597 */
5598
5599 if (ug->memcg != page->mem_cgroup) {
5600 if (ug->memcg) {
5601 uncharge_batch(ug);
5602 uncharge_gather_clear(ug);
5603 }
5604 ug->memcg = page->mem_cgroup;
5605 }
5606
5607 if (!PageKmemcg(page)) {
5608 unsigned int nr_pages = 1;
5609
5610 if (PageTransHuge(page)) {
5611 nr_pages <<= compound_order(page);
5612 ug->nr_huge += nr_pages;
5613 }
5614 if (PageAnon(page))
5615 ug->nr_anon += nr_pages;
5616 else {
5617 ug->nr_file += nr_pages;
5618 if (PageSwapBacked(page))
5619 ug->nr_shmem += nr_pages;
5620 }
5621 ug->pgpgout++;
5622 } else {
5623 ug->nr_kmem += 1 << compound_order(page);
5624 __ClearPageKmemcg(page);
5625 }
5626
5627 ug->dummy_page = page;
5628 page->mem_cgroup = NULL;
5629}
5630
5631static void uncharge_list(struct list_head *page_list)
5632{
5633 struct uncharge_gather ug;
5634 struct list_head *next;
5635
5636 uncharge_gather_clear(&ug);
5637
5638 /*
5639 * Note that the list can be a single page->lru; hence the
5640 * do-while loop instead of a simple list_for_each_entry().
5641 */
5642 next = page_list->next;
5643 do {
5644 struct page *page;
5645
5646 page = list_entry(next, struct page, lru);
5647 next = page->lru.next;
5648
5649 uncharge_page(page, &ug);
5650 } while (next != page_list);
5651
5652 if (ug.memcg)
5653 uncharge_batch(&ug);
5654}
5655
5656/**
5657 * mem_cgroup_uncharge - uncharge a page
5658 * @page: page to uncharge
5659 *
5660 * Uncharge a page previously charged with mem_cgroup_try_charge() and
5661 * mem_cgroup_commit_charge().
5662 */
5663void mem_cgroup_uncharge(struct page *page)
5664{
5665 struct uncharge_gather ug;
5666
5667 if (mem_cgroup_disabled())
5668 return;
5669
5670 /* Don't touch page->lru of any random page, pre-check: */
5671 if (!page->mem_cgroup)
5672 return;
5673
5674 uncharge_gather_clear(&ug);
5675 uncharge_page(page, &ug);
5676 uncharge_batch(&ug);
5677}
5678
5679/**
5680 * mem_cgroup_uncharge_list - uncharge a list of page
5681 * @page_list: list of pages to uncharge
5682 *
5683 * Uncharge a list of pages previously charged with
5684 * mem_cgroup_try_charge() and mem_cgroup_commit_charge().
5685 */
5686void mem_cgroup_uncharge_list(struct list_head *page_list)
5687{
5688 if (mem_cgroup_disabled())
5689 return;
5690
5691 if (!list_empty(page_list))
5692 uncharge_list(page_list);
5693}
5694
5695/**
5696 * mem_cgroup_migrate - charge a page's replacement
5697 * @oldpage: currently circulating page
5698 * @newpage: replacement page
5699 *
5700 * Charge @newpage as a replacement page for @oldpage. @oldpage will
5701 * be uncharged upon free.
5702 *
5703 * Both pages must be locked, @newpage->mapping must be set up.
5704 */
5705void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
5706{
5707 struct mem_cgroup *memcg;
5708 unsigned int nr_pages;
5709 bool compound;
5710 unsigned long flags;
5711
5712 VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
5713 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
5714 VM_BUG_ON_PAGE(PageAnon(oldpage) != PageAnon(newpage), newpage);
5715 VM_BUG_ON_PAGE(PageTransHuge(oldpage) != PageTransHuge(newpage),
5716 newpage);
5717
5718 if (mem_cgroup_disabled())
5719 return;
5720
5721 /* Page cache replacement: new page already charged? */
5722 if (newpage->mem_cgroup)
5723 return;
5724
5725 /* Swapcache readahead pages can get replaced before being charged */
5726 memcg = oldpage->mem_cgroup;
5727 if (!memcg)
5728 return;
5729
5730 /* Force-charge the new page. The old one will be freed soon */
5731 compound = PageTransHuge(newpage);
5732 nr_pages = compound ? hpage_nr_pages(newpage) : 1;
5733
5734 page_counter_charge(&memcg->memory, nr_pages);
5735 if (do_memsw_account())
5736 page_counter_charge(&memcg->memsw, nr_pages);
5737 css_get_many(&memcg->css, nr_pages);
5738
5739 commit_charge(newpage, memcg, false);
5740
5741 local_irq_save(flags);
5742 mem_cgroup_charge_statistics(memcg, newpage, compound, nr_pages);
5743 memcg_check_events(memcg, newpage);
5744 local_irq_restore(flags);
5745}
5746
5747DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
5748EXPORT_SYMBOL(memcg_sockets_enabled_key);
5749
5750void mem_cgroup_sk_alloc(struct sock *sk)
5751{
5752 struct mem_cgroup *memcg;
5753
5754 if (!mem_cgroup_sockets_enabled)
5755 return;
5756
5757 /*
5758 * Socket cloning can throw us here with sk_memcg already
5759 * filled. It won't however, necessarily happen from
5760 * process context. So the test for root memcg given
5761 * the current task's memcg won't help us in this case.
5762 *
5763 * Respecting the original socket's memcg is a better
5764 * decision in this case.
5765 */
5766 if (sk->sk_memcg) {
5767 css_get(&sk->sk_memcg->css);
5768 return;
5769 }
5770
5771 rcu_read_lock();
5772 memcg = mem_cgroup_from_task(current);
5773 if (memcg == root_mem_cgroup)
5774 goto out;
5775 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active)
5776 goto out;
5777 if (css_tryget_online(&memcg->css))
5778 sk->sk_memcg = memcg;
5779out:
5780 rcu_read_unlock();
5781}
5782
5783void mem_cgroup_sk_free(struct sock *sk)
5784{
5785 if (sk->sk_memcg)
5786 css_put(&sk->sk_memcg->css);
5787}
5788
5789/**
5790 * mem_cgroup_charge_skmem - charge socket memory
5791 * @memcg: memcg to charge
5792 * @nr_pages: number of pages to charge
5793 *
5794 * Charges @nr_pages to @memcg. Returns %true if the charge fit within
5795 * @memcg's configured limit, %false if the charge had to be forced.
5796 */
5797bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
5798{
5799 gfp_t gfp_mask = GFP_KERNEL;
5800
5801 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
5802 struct page_counter *fail;
5803
5804 if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) {
5805 memcg->tcpmem_pressure = 0;
5806 return true;
5807 }
5808 page_counter_charge(&memcg->tcpmem, nr_pages);
5809 memcg->tcpmem_pressure = 1;
5810 return false;
5811 }
5812
5813 /* Don't block in the packet receive path */
5814 if (in_softirq())
5815 gfp_mask = GFP_NOWAIT;
5816
5817 mod_memcg_state(memcg, MEMCG_SOCK, nr_pages);
5818
5819 if (try_charge(memcg, gfp_mask, nr_pages) == 0)
5820 return true;
5821
5822 try_charge(memcg, gfp_mask|__GFP_NOFAIL, nr_pages);
5823 return false;
5824}
5825
5826/**
5827 * mem_cgroup_uncharge_skmem - uncharge socket memory
5828 * @memcg: memcg to uncharge
5829 * @nr_pages: number of pages to uncharge
5830 */
5831void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
5832{
5833 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
5834 page_counter_uncharge(&memcg->tcpmem, nr_pages);
5835 return;
5836 }
5837
5838 mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages);
5839
5840 refill_stock(memcg, nr_pages);
5841}
5842
5843static int __init cgroup_memory(char *s)
5844{
5845 char *token;
5846
5847 while ((token = strsep(&s, ",")) != NULL) {
5848 if (!*token)
5849 continue;
5850 if (!strcmp(token, "nosocket"))
5851 cgroup_memory_nosocket = true;
5852 if (!strcmp(token, "nokmem"))
5853 cgroup_memory_nokmem = true;
5854 }
5855 return 0;
5856}
5857__setup("cgroup.memory=", cgroup_memory);
5858
5859/*
5860 * subsys_initcall() for memory controller.
5861 *
5862 * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this
5863 * context because of lock dependencies (cgroup_lock -> cpu hotplug) but
5864 * basically everything that doesn't depend on a specific mem_cgroup structure
5865 * should be initialized from here.
5866 */
5867static int __init mem_cgroup_init(void)
5868{
5869 int cpu, node;
5870
5871#ifndef CONFIG_SLOB
5872 /*
5873 * Kmem cache creation is mostly done with the slab_mutex held,
5874 * so use a workqueue with limited concurrency to avoid stalling
5875 * all worker threads in case lots of cgroups are created and
5876 * destroyed simultaneously.
5877 */
5878 memcg_kmem_cache_wq = alloc_workqueue("memcg_kmem_cache", 0, 1);
5879 BUG_ON(!memcg_kmem_cache_wq);
5880#endif
5881
5882 cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
5883 memcg_hotplug_cpu_dead);
5884
5885 for_each_possible_cpu(cpu)
5886 INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
5887 drain_local_stock);
5888
5889 for_each_node(node) {
5890 struct mem_cgroup_tree_per_node *rtpn;
5891
5892 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL,
5893 node_online(node) ? node : NUMA_NO_NODE);
5894
5895 rtpn->rb_root = RB_ROOT;
5896 rtpn->rb_rightmost = NULL;
5897 spin_lock_init(&rtpn->lock);
5898 soft_limit_tree.rb_tree_per_node[node] = rtpn;
5899 }
5900
5901 return 0;
5902}
5903subsys_initcall(mem_cgroup_init);
5904
5905#ifdef CONFIG_MEMCG_SWAP
5906static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg)
5907{
5908 while (!atomic_inc_not_zero(&memcg->id.ref)) {
5909 /*
5910 * The root cgroup cannot be destroyed, so it's refcount must
5911 * always be >= 1.
5912 */
5913 if (WARN_ON_ONCE(memcg == root_mem_cgroup)) {
5914 VM_BUG_ON(1);
5915 break;
5916 }
5917 memcg = parent_mem_cgroup(memcg);
5918 if (!memcg)
5919 memcg = root_mem_cgroup;
5920 }
5921 return memcg;
5922}
5923
5924/**
5925 * mem_cgroup_swapout - transfer a memsw charge to swap
5926 * @page: page whose memsw charge to transfer
5927 * @entry: swap entry to move the charge to
5928 *
5929 * Transfer the memsw charge of @page to @entry.
5930 */
5931void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
5932{
5933 struct mem_cgroup *memcg, *swap_memcg;
5934 unsigned int nr_entries;
5935 unsigned short oldid;
5936
5937 VM_BUG_ON_PAGE(PageLRU(page), page);
5938 VM_BUG_ON_PAGE(page_count(page), page);
5939
5940 if (!do_memsw_account())
5941 return;
5942
5943 memcg = page->mem_cgroup;
5944
5945 /* Readahead page, never charged */
5946 if (!memcg)
5947 return;
5948
5949 /*
5950 * In case the memcg owning these pages has been offlined and doesn't
5951 * have an ID allocated to it anymore, charge the closest online
5952 * ancestor for the swap instead and transfer the memory+swap charge.
5953 */
5954 swap_memcg = mem_cgroup_id_get_online(memcg);
5955 nr_entries = hpage_nr_pages(page);
5956 /* Get references for the tail pages, too */
5957 if (nr_entries > 1)
5958 mem_cgroup_id_get_many(swap_memcg, nr_entries - 1);
5959 oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg),
5960 nr_entries);
5961 VM_BUG_ON_PAGE(oldid, page);
5962 mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries);
5963
5964 page->mem_cgroup = NULL;
5965
5966 if (!mem_cgroup_is_root(memcg))
5967 page_counter_uncharge(&memcg->memory, nr_entries);
5968
5969 if (memcg != swap_memcg) {
5970 if (!mem_cgroup_is_root(swap_memcg))
5971 page_counter_charge(&swap_memcg->memsw, nr_entries);
5972 page_counter_uncharge(&memcg->memsw, nr_entries);
5973 }
5974
5975 /*
5976 * Interrupts should be disabled here because the caller holds the
5977 * i_pages lock which is taken with interrupts-off. It is
5978 * important here to have the interrupts disabled because it is the
5979 * only synchronisation we have for updating the per-CPU variables.
5980 */
5981 VM_BUG_ON(!irqs_disabled());
5982 mem_cgroup_charge_statistics(memcg, page, PageTransHuge(page),
5983 -nr_entries);
5984 memcg_check_events(memcg, page);
5985
5986 if (!mem_cgroup_is_root(memcg))
5987 css_put_many(&memcg->css, nr_entries);
5988}
5989
5990/**
5991 * mem_cgroup_try_charge_swap - try charging swap space for a page
5992 * @page: page being added to swap
5993 * @entry: swap entry to charge
5994 *
5995 * Try to charge @page's memcg for the swap space at @entry.
5996 *
5997 * Returns 0 on success, -ENOMEM on failure.
5998 */
5999int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry)
6000{
6001 unsigned int nr_pages = hpage_nr_pages(page);
6002 struct page_counter *counter;
6003 struct mem_cgroup *memcg;
6004 unsigned short oldid;
6005
6006 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) || !do_swap_account)
6007 return 0;
6008
6009 memcg = page->mem_cgroup;
6010
6011 /* Readahead page, never charged */
6012 if (!memcg)
6013 return 0;
6014
6015 memcg = mem_cgroup_id_get_online(memcg);
6016
6017 if (!mem_cgroup_is_root(memcg) &&
6018 !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) {
6019 mem_cgroup_id_put(memcg);
6020 return -ENOMEM;
6021 }
6022
6023 /* Get references for the tail pages, too */
6024 if (nr_pages > 1)
6025 mem_cgroup_id_get_many(memcg, nr_pages - 1);
6026 oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg), nr_pages);
6027 VM_BUG_ON_PAGE(oldid, page);
6028 mod_memcg_state(memcg, MEMCG_SWAP, nr_pages);
6029
6030 return 0;
6031}
6032
6033/**
6034 * mem_cgroup_uncharge_swap - uncharge swap space
6035 * @entry: swap entry to uncharge
6036 * @nr_pages: the amount of swap space to uncharge
6037 */
6038void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
6039{
6040 struct mem_cgroup *memcg;
6041 unsigned short id;
6042
6043 if (!do_swap_account)
6044 return;
6045
6046 id = swap_cgroup_record(entry, 0, nr_pages);
6047 rcu_read_lock();
6048 memcg = mem_cgroup_from_id(id);
6049 if (memcg) {
6050 if (!mem_cgroup_is_root(memcg)) {
6051 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
6052 page_counter_uncharge(&memcg->swap, nr_pages);
6053 else
6054 page_counter_uncharge(&memcg->memsw, nr_pages);
6055 }
6056 mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages);
6057 mem_cgroup_id_put_many(memcg, nr_pages);
6058 }
6059 rcu_read_unlock();
6060}
6061
6062long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
6063{
6064 long nr_swap_pages = get_nr_swap_pages();
6065
6066 if (!do_swap_account || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
6067 return nr_swap_pages;
6068 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg))
6069 nr_swap_pages = min_t(long, nr_swap_pages,
6070 READ_ONCE(memcg->swap.limit) -
6071 page_counter_read(&memcg->swap));
6072 return nr_swap_pages;
6073}
6074
6075bool mem_cgroup_swap_full(struct page *page)
6076{
6077 struct mem_cgroup *memcg;
6078
6079 VM_BUG_ON_PAGE(!PageLocked(page), page);
6080
6081 if (vm_swap_full())
6082 return true;
6083 if (!do_swap_account || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
6084 return false;
6085
6086 memcg = page->mem_cgroup;
6087 if (!memcg)
6088 return false;
6089
6090 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg))
6091 if (page_counter_read(&memcg->swap) * 2 >= memcg->swap.limit)
6092 return true;
6093
6094 return false;
6095}
6096
6097/* for remember boot option*/
6098#ifdef CONFIG_MEMCG_SWAP_ENABLED
6099static int really_do_swap_account __initdata = 1;
6100#else
6101static int really_do_swap_account __initdata;
6102#endif
6103
6104static int __init enable_swap_account(char *s)
6105{
6106 if (!strcmp(s, "1"))
6107 really_do_swap_account = 1;
6108 else if (!strcmp(s, "0"))
6109 really_do_swap_account = 0;
6110 return 1;
6111}
6112__setup("swapaccount=", enable_swap_account);
6113
6114static u64 swap_current_read(struct cgroup_subsys_state *css,
6115 struct cftype *cft)
6116{
6117 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6118
6119 return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE;
6120}
6121
6122static int swap_max_show(struct seq_file *m, void *v)
6123{
6124 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
6125 unsigned long max = READ_ONCE(memcg->swap.limit);
6126
6127 if (max == PAGE_COUNTER_MAX)
6128 seq_puts(m, "max\n");
6129 else
6130 seq_printf(m, "%llu\n", (u64)max * PAGE_SIZE);
6131
6132 return 0;
6133}
6134
6135static ssize_t swap_max_write(struct kernfs_open_file *of,
6136 char *buf, size_t nbytes, loff_t off)
6137{
6138 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6139 unsigned long max;
6140 int err;
6141
6142 buf = strstrip(buf);
6143 err = page_counter_memparse(buf, "max", &max);
6144 if (err)
6145 return err;
6146
6147 mutex_lock(&memcg_limit_mutex);
6148 err = page_counter_limit(&memcg->swap, max);
6149 mutex_unlock(&memcg_limit_mutex);
6150 if (err)
6151 return err;
6152
6153 return nbytes;
6154}
6155
6156static struct cftype swap_files[] = {
6157 {
6158 .name = "swap.current",
6159 .flags = CFTYPE_NOT_ON_ROOT,
6160 .read_u64 = swap_current_read,
6161 },
6162 {
6163 .name = "swap.max",
6164 .flags = CFTYPE_NOT_ON_ROOT,
6165 .seq_show = swap_max_show,
6166 .write = swap_max_write,
6167 },
6168 { } /* terminate */
6169};
6170
6171static struct cftype memsw_cgroup_files[] = {
6172 {
6173 .name = "memsw.usage_in_bytes",
6174 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
6175 .read_u64 = mem_cgroup_read_u64,
6176 },
6177 {
6178 .name = "memsw.max_usage_in_bytes",
6179 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
6180 .write = mem_cgroup_reset,
6181 .read_u64 = mem_cgroup_read_u64,
6182 },
6183 {
6184 .name = "memsw.limit_in_bytes",
6185 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
6186 .write = mem_cgroup_write,
6187 .read_u64 = mem_cgroup_read_u64,
6188 },
6189 {
6190 .name = "memsw.failcnt",
6191 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
6192 .write = mem_cgroup_reset,
6193 .read_u64 = mem_cgroup_read_u64,
6194 },
6195 { }, /* terminate */
6196};
6197
6198static int __init mem_cgroup_swap_init(void)
6199{
6200 if (!mem_cgroup_disabled() && really_do_swap_account) {
6201 do_swap_account = 1;
6202 WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys,
6203 swap_files));
6204 WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys,
6205 memsw_cgroup_files));
6206 }
6207 return 0;
6208}
6209subsys_initcall(mem_cgroup_swap_init);
6210
6211#endif /* CONFIG_MEMCG_SWAP */
1// SPDX-License-Identifier: GPL-2.0-or-later
2/* memcontrol.c - Memory Controller
3 *
4 * Copyright IBM Corporation, 2007
5 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
6 *
7 * Copyright 2007 OpenVZ SWsoft Inc
8 * Author: Pavel Emelianov <xemul@openvz.org>
9 *
10 * Memory thresholds
11 * Copyright (C) 2009 Nokia Corporation
12 * Author: Kirill A. Shutemov
13 *
14 * Kernel Memory Controller
15 * Copyright (C) 2012 Parallels Inc. and Google Inc.
16 * Authors: Glauber Costa and Suleiman Souhlal
17 *
18 * Native page reclaim
19 * Charge lifetime sanitation
20 * Lockless page tracking & accounting
21 * Unified hierarchy configuration model
22 * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner
23 *
24 * Per memcg lru locking
25 * Copyright (C) 2020 Alibaba, Inc, Alex Shi
26 */
27
28#include <linux/page_counter.h>
29#include <linux/memcontrol.h>
30#include <linux/cgroup.h>
31#include <linux/pagewalk.h>
32#include <linux/sched/mm.h>
33#include <linux/shmem_fs.h>
34#include <linux/hugetlb.h>
35#include <linux/pagemap.h>
36#include <linux/pagevec.h>
37#include <linux/vm_event_item.h>
38#include <linux/smp.h>
39#include <linux/page-flags.h>
40#include <linux/backing-dev.h>
41#include <linux/bit_spinlock.h>
42#include <linux/rcupdate.h>
43#include <linux/limits.h>
44#include <linux/export.h>
45#include <linux/mutex.h>
46#include <linux/rbtree.h>
47#include <linux/slab.h>
48#include <linux/swap.h>
49#include <linux/swapops.h>
50#include <linux/spinlock.h>
51#include <linux/eventfd.h>
52#include <linux/poll.h>
53#include <linux/sort.h>
54#include <linux/fs.h>
55#include <linux/seq_file.h>
56#include <linux/vmpressure.h>
57#include <linux/memremap.h>
58#include <linux/mm_inline.h>
59#include <linux/swap_cgroup.h>
60#include <linux/cpu.h>
61#include <linux/oom.h>
62#include <linux/lockdep.h>
63#include <linux/file.h>
64#include <linux/resume_user_mode.h>
65#include <linux/psi.h>
66#include <linux/seq_buf.h>
67#include <linux/sched/isolation.h>
68#include <linux/kmemleak.h>
69#include "internal.h"
70#include <net/sock.h>
71#include <net/ip.h>
72#include "slab.h"
73#include "swap.h"
74
75#include <linux/uaccess.h>
76
77#include <trace/events/vmscan.h>
78
79struct cgroup_subsys memory_cgrp_subsys __read_mostly;
80EXPORT_SYMBOL(memory_cgrp_subsys);
81
82struct mem_cgroup *root_mem_cgroup __read_mostly;
83
84/* Active memory cgroup to use from an interrupt context */
85DEFINE_PER_CPU(struct mem_cgroup *, int_active_memcg);
86EXPORT_PER_CPU_SYMBOL_GPL(int_active_memcg);
87
88/* Socket memory accounting disabled? */
89static bool cgroup_memory_nosocket __ro_after_init;
90
91/* Kernel memory accounting disabled? */
92static bool cgroup_memory_nokmem __ro_after_init;
93
94/* BPF memory accounting disabled? */
95static bool cgroup_memory_nobpf __ro_after_init;
96
97#ifdef CONFIG_CGROUP_WRITEBACK
98static DECLARE_WAIT_QUEUE_HEAD(memcg_cgwb_frn_waitq);
99#endif
100
101/* Whether legacy memory+swap accounting is active */
102static bool do_memsw_account(void)
103{
104 return !cgroup_subsys_on_dfl(memory_cgrp_subsys);
105}
106
107#define THRESHOLDS_EVENTS_TARGET 128
108#define SOFTLIMIT_EVENTS_TARGET 1024
109
110/*
111 * Cgroups above their limits are maintained in a RB-Tree, independent of
112 * their hierarchy representation
113 */
114
115struct mem_cgroup_tree_per_node {
116 struct rb_root rb_root;
117 struct rb_node *rb_rightmost;
118 spinlock_t lock;
119};
120
121struct mem_cgroup_tree {
122 struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
123};
124
125static struct mem_cgroup_tree soft_limit_tree __read_mostly;
126
127/* for OOM */
128struct mem_cgroup_eventfd_list {
129 struct list_head list;
130 struct eventfd_ctx *eventfd;
131};
132
133/*
134 * cgroup_event represents events which userspace want to receive.
135 */
136struct mem_cgroup_event {
137 /*
138 * memcg which the event belongs to.
139 */
140 struct mem_cgroup *memcg;
141 /*
142 * eventfd to signal userspace about the event.
143 */
144 struct eventfd_ctx *eventfd;
145 /*
146 * Each of these stored in a list by the cgroup.
147 */
148 struct list_head list;
149 /*
150 * register_event() callback will be used to add new userspace
151 * waiter for changes related to this event. Use eventfd_signal()
152 * on eventfd to send notification to userspace.
153 */
154 int (*register_event)(struct mem_cgroup *memcg,
155 struct eventfd_ctx *eventfd, const char *args);
156 /*
157 * unregister_event() callback will be called when userspace closes
158 * the eventfd or on cgroup removing. This callback must be set,
159 * if you want provide notification functionality.
160 */
161 void (*unregister_event)(struct mem_cgroup *memcg,
162 struct eventfd_ctx *eventfd);
163 /*
164 * All fields below needed to unregister event when
165 * userspace closes eventfd.
166 */
167 poll_table pt;
168 wait_queue_head_t *wqh;
169 wait_queue_entry_t wait;
170 struct work_struct remove;
171};
172
173static void mem_cgroup_threshold(struct mem_cgroup *memcg);
174static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
175
176/* Stuffs for move charges at task migration. */
177/*
178 * Types of charges to be moved.
179 */
180#define MOVE_ANON 0x1U
181#define MOVE_FILE 0x2U
182#define MOVE_MASK (MOVE_ANON | MOVE_FILE)
183
184/* "mc" and its members are protected by cgroup_mutex */
185static struct move_charge_struct {
186 spinlock_t lock; /* for from, to */
187 struct mm_struct *mm;
188 struct mem_cgroup *from;
189 struct mem_cgroup *to;
190 unsigned long flags;
191 unsigned long precharge;
192 unsigned long moved_charge;
193 unsigned long moved_swap;
194 struct task_struct *moving_task; /* a task moving charges */
195 wait_queue_head_t waitq; /* a waitq for other context */
196} mc = {
197 .lock = __SPIN_LOCK_UNLOCKED(mc.lock),
198 .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
199};
200
201/*
202 * Maximum loops in mem_cgroup_soft_reclaim(), used for soft
203 * limit reclaim to prevent infinite loops, if they ever occur.
204 */
205#define MEM_CGROUP_MAX_RECLAIM_LOOPS 100
206#define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2
207
208/* for encoding cft->private value on file */
209enum res_type {
210 _MEM,
211 _MEMSWAP,
212 _KMEM,
213 _TCP,
214};
215
216#define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val))
217#define MEMFILE_TYPE(val) ((val) >> 16 & 0xffff)
218#define MEMFILE_ATTR(val) ((val) & 0xffff)
219
220/*
221 * Iteration constructs for visiting all cgroups (under a tree). If
222 * loops are exited prematurely (break), mem_cgroup_iter_break() must
223 * be used for reference counting.
224 */
225#define for_each_mem_cgroup_tree(iter, root) \
226 for (iter = mem_cgroup_iter(root, NULL, NULL); \
227 iter != NULL; \
228 iter = mem_cgroup_iter(root, iter, NULL))
229
230#define for_each_mem_cgroup(iter) \
231 for (iter = mem_cgroup_iter(NULL, NULL, NULL); \
232 iter != NULL; \
233 iter = mem_cgroup_iter(NULL, iter, NULL))
234
235static inline bool task_is_dying(void)
236{
237 return tsk_is_oom_victim(current) || fatal_signal_pending(current) ||
238 (current->flags & PF_EXITING);
239}
240
241/* Some nice accessors for the vmpressure. */
242struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
243{
244 if (!memcg)
245 memcg = root_mem_cgroup;
246 return &memcg->vmpressure;
247}
248
249struct mem_cgroup *vmpressure_to_memcg(struct vmpressure *vmpr)
250{
251 return container_of(vmpr, struct mem_cgroup, vmpressure);
252}
253
254#define CURRENT_OBJCG_UPDATE_BIT 0
255#define CURRENT_OBJCG_UPDATE_FLAG (1UL << CURRENT_OBJCG_UPDATE_BIT)
256
257#ifdef CONFIG_MEMCG_KMEM
258static DEFINE_SPINLOCK(objcg_lock);
259
260bool mem_cgroup_kmem_disabled(void)
261{
262 return cgroup_memory_nokmem;
263}
264
265static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg,
266 unsigned int nr_pages);
267
268static void obj_cgroup_release(struct percpu_ref *ref)
269{
270 struct obj_cgroup *objcg = container_of(ref, struct obj_cgroup, refcnt);
271 unsigned int nr_bytes;
272 unsigned int nr_pages;
273 unsigned long flags;
274
275 /*
276 * At this point all allocated objects are freed, and
277 * objcg->nr_charged_bytes can't have an arbitrary byte value.
278 * However, it can be PAGE_SIZE or (x * PAGE_SIZE).
279 *
280 * The following sequence can lead to it:
281 * 1) CPU0: objcg == stock->cached_objcg
282 * 2) CPU1: we do a small allocation (e.g. 92 bytes),
283 * PAGE_SIZE bytes are charged
284 * 3) CPU1: a process from another memcg is allocating something,
285 * the stock if flushed,
286 * objcg->nr_charged_bytes = PAGE_SIZE - 92
287 * 5) CPU0: we do release this object,
288 * 92 bytes are added to stock->nr_bytes
289 * 6) CPU0: stock is flushed,
290 * 92 bytes are added to objcg->nr_charged_bytes
291 *
292 * In the result, nr_charged_bytes == PAGE_SIZE.
293 * This page will be uncharged in obj_cgroup_release().
294 */
295 nr_bytes = atomic_read(&objcg->nr_charged_bytes);
296 WARN_ON_ONCE(nr_bytes & (PAGE_SIZE - 1));
297 nr_pages = nr_bytes >> PAGE_SHIFT;
298
299 if (nr_pages)
300 obj_cgroup_uncharge_pages(objcg, nr_pages);
301
302 spin_lock_irqsave(&objcg_lock, flags);
303 list_del(&objcg->list);
304 spin_unlock_irqrestore(&objcg_lock, flags);
305
306 percpu_ref_exit(ref);
307 kfree_rcu(objcg, rcu);
308}
309
310static struct obj_cgroup *obj_cgroup_alloc(void)
311{
312 struct obj_cgroup *objcg;
313 int ret;
314
315 objcg = kzalloc(sizeof(struct obj_cgroup), GFP_KERNEL);
316 if (!objcg)
317 return NULL;
318
319 ret = percpu_ref_init(&objcg->refcnt, obj_cgroup_release, 0,
320 GFP_KERNEL);
321 if (ret) {
322 kfree(objcg);
323 return NULL;
324 }
325 INIT_LIST_HEAD(&objcg->list);
326 return objcg;
327}
328
329static void memcg_reparent_objcgs(struct mem_cgroup *memcg,
330 struct mem_cgroup *parent)
331{
332 struct obj_cgroup *objcg, *iter;
333
334 objcg = rcu_replace_pointer(memcg->objcg, NULL, true);
335
336 spin_lock_irq(&objcg_lock);
337
338 /* 1) Ready to reparent active objcg. */
339 list_add(&objcg->list, &memcg->objcg_list);
340 /* 2) Reparent active objcg and already reparented objcgs to parent. */
341 list_for_each_entry(iter, &memcg->objcg_list, list)
342 WRITE_ONCE(iter->memcg, parent);
343 /* 3) Move already reparented objcgs to the parent's list */
344 list_splice(&memcg->objcg_list, &parent->objcg_list);
345
346 spin_unlock_irq(&objcg_lock);
347
348 percpu_ref_kill(&objcg->refcnt);
349}
350
351/*
352 * A lot of the calls to the cache allocation functions are expected to be
353 * inlined by the compiler. Since the calls to memcg_slab_pre_alloc_hook() are
354 * conditional to this static branch, we'll have to allow modules that does
355 * kmem_cache_alloc and the such to see this symbol as well
356 */
357DEFINE_STATIC_KEY_FALSE(memcg_kmem_online_key);
358EXPORT_SYMBOL(memcg_kmem_online_key);
359
360DEFINE_STATIC_KEY_FALSE(memcg_bpf_enabled_key);
361EXPORT_SYMBOL(memcg_bpf_enabled_key);
362#endif
363
364/**
365 * mem_cgroup_css_from_folio - css of the memcg associated with a folio
366 * @folio: folio of interest
367 *
368 * If memcg is bound to the default hierarchy, css of the memcg associated
369 * with @folio is returned. The returned css remains associated with @folio
370 * until it is released.
371 *
372 * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup
373 * is returned.
374 */
375struct cgroup_subsys_state *mem_cgroup_css_from_folio(struct folio *folio)
376{
377 struct mem_cgroup *memcg = folio_memcg(folio);
378
379 if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
380 memcg = root_mem_cgroup;
381
382 return &memcg->css;
383}
384
385/**
386 * page_cgroup_ino - return inode number of the memcg a page is charged to
387 * @page: the page
388 *
389 * Look up the closest online ancestor of the memory cgroup @page is charged to
390 * and return its inode number or 0 if @page is not charged to any cgroup. It
391 * is safe to call this function without holding a reference to @page.
392 *
393 * Note, this function is inherently racy, because there is nothing to prevent
394 * the cgroup inode from getting torn down and potentially reallocated a moment
395 * after page_cgroup_ino() returns, so it only should be used by callers that
396 * do not care (such as procfs interfaces).
397 */
398ino_t page_cgroup_ino(struct page *page)
399{
400 struct mem_cgroup *memcg;
401 unsigned long ino = 0;
402
403 rcu_read_lock();
404 /* page_folio() is racy here, but the entire function is racy anyway */
405 memcg = folio_memcg_check(page_folio(page));
406
407 while (memcg && !(memcg->css.flags & CSS_ONLINE))
408 memcg = parent_mem_cgroup(memcg);
409 if (memcg)
410 ino = cgroup_ino(memcg->css.cgroup);
411 rcu_read_unlock();
412 return ino;
413}
414
415static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_node *mz,
416 struct mem_cgroup_tree_per_node *mctz,
417 unsigned long new_usage_in_excess)
418{
419 struct rb_node **p = &mctz->rb_root.rb_node;
420 struct rb_node *parent = NULL;
421 struct mem_cgroup_per_node *mz_node;
422 bool rightmost = true;
423
424 if (mz->on_tree)
425 return;
426
427 mz->usage_in_excess = new_usage_in_excess;
428 if (!mz->usage_in_excess)
429 return;
430 while (*p) {
431 parent = *p;
432 mz_node = rb_entry(parent, struct mem_cgroup_per_node,
433 tree_node);
434 if (mz->usage_in_excess < mz_node->usage_in_excess) {
435 p = &(*p)->rb_left;
436 rightmost = false;
437 } else {
438 p = &(*p)->rb_right;
439 }
440 }
441
442 if (rightmost)
443 mctz->rb_rightmost = &mz->tree_node;
444
445 rb_link_node(&mz->tree_node, parent, p);
446 rb_insert_color(&mz->tree_node, &mctz->rb_root);
447 mz->on_tree = true;
448}
449
450static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
451 struct mem_cgroup_tree_per_node *mctz)
452{
453 if (!mz->on_tree)
454 return;
455
456 if (&mz->tree_node == mctz->rb_rightmost)
457 mctz->rb_rightmost = rb_prev(&mz->tree_node);
458
459 rb_erase(&mz->tree_node, &mctz->rb_root);
460 mz->on_tree = false;
461}
462
463static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
464 struct mem_cgroup_tree_per_node *mctz)
465{
466 unsigned long flags;
467
468 spin_lock_irqsave(&mctz->lock, flags);
469 __mem_cgroup_remove_exceeded(mz, mctz);
470 spin_unlock_irqrestore(&mctz->lock, flags);
471}
472
473static unsigned long soft_limit_excess(struct mem_cgroup *memcg)
474{
475 unsigned long nr_pages = page_counter_read(&memcg->memory);
476 unsigned long soft_limit = READ_ONCE(memcg->soft_limit);
477 unsigned long excess = 0;
478
479 if (nr_pages > soft_limit)
480 excess = nr_pages - soft_limit;
481
482 return excess;
483}
484
485static void mem_cgroup_update_tree(struct mem_cgroup *memcg, int nid)
486{
487 unsigned long excess;
488 struct mem_cgroup_per_node *mz;
489 struct mem_cgroup_tree_per_node *mctz;
490
491 if (lru_gen_enabled()) {
492 if (soft_limit_excess(memcg))
493 lru_gen_soft_reclaim(memcg, nid);
494 return;
495 }
496
497 mctz = soft_limit_tree.rb_tree_per_node[nid];
498 if (!mctz)
499 return;
500 /*
501 * Necessary to update all ancestors when hierarchy is used.
502 * because their event counter is not touched.
503 */
504 for (; memcg; memcg = parent_mem_cgroup(memcg)) {
505 mz = memcg->nodeinfo[nid];
506 excess = soft_limit_excess(memcg);
507 /*
508 * We have to update the tree if mz is on RB-tree or
509 * mem is over its softlimit.
510 */
511 if (excess || mz->on_tree) {
512 unsigned long flags;
513
514 spin_lock_irqsave(&mctz->lock, flags);
515 /* if on-tree, remove it */
516 if (mz->on_tree)
517 __mem_cgroup_remove_exceeded(mz, mctz);
518 /*
519 * Insert again. mz->usage_in_excess will be updated.
520 * If excess is 0, no tree ops.
521 */
522 __mem_cgroup_insert_exceeded(mz, mctz, excess);
523 spin_unlock_irqrestore(&mctz->lock, flags);
524 }
525 }
526}
527
528static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
529{
530 struct mem_cgroup_tree_per_node *mctz;
531 struct mem_cgroup_per_node *mz;
532 int nid;
533
534 for_each_node(nid) {
535 mz = memcg->nodeinfo[nid];
536 mctz = soft_limit_tree.rb_tree_per_node[nid];
537 if (mctz)
538 mem_cgroup_remove_exceeded(mz, mctz);
539 }
540}
541
542static struct mem_cgroup_per_node *
543__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
544{
545 struct mem_cgroup_per_node *mz;
546
547retry:
548 mz = NULL;
549 if (!mctz->rb_rightmost)
550 goto done; /* Nothing to reclaim from */
551
552 mz = rb_entry(mctz->rb_rightmost,
553 struct mem_cgroup_per_node, tree_node);
554 /*
555 * Remove the node now but someone else can add it back,
556 * we will to add it back at the end of reclaim to its correct
557 * position in the tree.
558 */
559 __mem_cgroup_remove_exceeded(mz, mctz);
560 if (!soft_limit_excess(mz->memcg) ||
561 !css_tryget(&mz->memcg->css))
562 goto retry;
563done:
564 return mz;
565}
566
567static struct mem_cgroup_per_node *
568mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
569{
570 struct mem_cgroup_per_node *mz;
571
572 spin_lock_irq(&mctz->lock);
573 mz = __mem_cgroup_largest_soft_limit_node(mctz);
574 spin_unlock_irq(&mctz->lock);
575 return mz;
576}
577
578/* Subset of vm_event_item to report for memcg event stats */
579static const unsigned int memcg_vm_event_stat[] = {
580 PGPGIN,
581 PGPGOUT,
582 PGSCAN_KSWAPD,
583 PGSCAN_DIRECT,
584 PGSCAN_KHUGEPAGED,
585 PGSTEAL_KSWAPD,
586 PGSTEAL_DIRECT,
587 PGSTEAL_KHUGEPAGED,
588 PGFAULT,
589 PGMAJFAULT,
590 PGREFILL,
591 PGACTIVATE,
592 PGDEACTIVATE,
593 PGLAZYFREE,
594 PGLAZYFREED,
595#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
596 ZSWPIN,
597 ZSWPOUT,
598 ZSWPWB,
599#endif
600#ifdef CONFIG_TRANSPARENT_HUGEPAGE
601 THP_FAULT_ALLOC,
602 THP_COLLAPSE_ALLOC,
603 THP_SWPOUT,
604 THP_SWPOUT_FALLBACK,
605#endif
606};
607
608#define NR_MEMCG_EVENTS ARRAY_SIZE(memcg_vm_event_stat)
609static int mem_cgroup_events_index[NR_VM_EVENT_ITEMS] __read_mostly;
610
611static void init_memcg_events(void)
612{
613 int i;
614
615 for (i = 0; i < NR_MEMCG_EVENTS; ++i)
616 mem_cgroup_events_index[memcg_vm_event_stat[i]] = i + 1;
617}
618
619static inline int memcg_events_index(enum vm_event_item idx)
620{
621 return mem_cgroup_events_index[idx] - 1;
622}
623
624struct memcg_vmstats_percpu {
625 /* Stats updates since the last flush */
626 unsigned int stats_updates;
627
628 /* Cached pointers for fast iteration in memcg_rstat_updated() */
629 struct memcg_vmstats_percpu *parent;
630 struct memcg_vmstats *vmstats;
631
632 /* The above should fit a single cacheline for memcg_rstat_updated() */
633
634 /* Local (CPU and cgroup) page state & events */
635 long state[MEMCG_NR_STAT];
636 unsigned long events[NR_MEMCG_EVENTS];
637
638 /* Delta calculation for lockless upward propagation */
639 long state_prev[MEMCG_NR_STAT];
640 unsigned long events_prev[NR_MEMCG_EVENTS];
641
642 /* Cgroup1: threshold notifications & softlimit tree updates */
643 unsigned long nr_page_events;
644 unsigned long targets[MEM_CGROUP_NTARGETS];
645} ____cacheline_aligned;
646
647struct memcg_vmstats {
648 /* Aggregated (CPU and subtree) page state & events */
649 long state[MEMCG_NR_STAT];
650 unsigned long events[NR_MEMCG_EVENTS];
651
652 /* Non-hierarchical (CPU aggregated) page state & events */
653 long state_local[MEMCG_NR_STAT];
654 unsigned long events_local[NR_MEMCG_EVENTS];
655
656 /* Pending child counts during tree propagation */
657 long state_pending[MEMCG_NR_STAT];
658 unsigned long events_pending[NR_MEMCG_EVENTS];
659
660 /* Stats updates since the last flush */
661 atomic64_t stats_updates;
662};
663
664/*
665 * memcg and lruvec stats flushing
666 *
667 * Many codepaths leading to stats update or read are performance sensitive and
668 * adding stats flushing in such codepaths is not desirable. So, to optimize the
669 * flushing the kernel does:
670 *
671 * 1) Periodically and asynchronously flush the stats every 2 seconds to not let
672 * rstat update tree grow unbounded.
673 *
674 * 2) Flush the stats synchronously on reader side only when there are more than
675 * (MEMCG_CHARGE_BATCH * nr_cpus) update events. Though this optimization
676 * will let stats be out of sync by atmost (MEMCG_CHARGE_BATCH * nr_cpus) but
677 * only for 2 seconds due to (1).
678 */
679static void flush_memcg_stats_dwork(struct work_struct *w);
680static DECLARE_DEFERRABLE_WORK(stats_flush_dwork, flush_memcg_stats_dwork);
681static u64 flush_last_time;
682
683#define FLUSH_TIME (2UL*HZ)
684
685/*
686 * Accessors to ensure that preemption is disabled on PREEMPT_RT because it can
687 * not rely on this as part of an acquired spinlock_t lock. These functions are
688 * never used in hardirq context on PREEMPT_RT and therefore disabling preemtion
689 * is sufficient.
690 */
691static void memcg_stats_lock(void)
692{
693 preempt_disable_nested();
694 VM_WARN_ON_IRQS_ENABLED();
695}
696
697static void __memcg_stats_lock(void)
698{
699 preempt_disable_nested();
700}
701
702static void memcg_stats_unlock(void)
703{
704 preempt_enable_nested();
705}
706
707
708static bool memcg_vmstats_needs_flush(struct memcg_vmstats *vmstats)
709{
710 return atomic64_read(&vmstats->stats_updates) >
711 MEMCG_CHARGE_BATCH * num_online_cpus();
712}
713
714static inline void memcg_rstat_updated(struct mem_cgroup *memcg, int val)
715{
716 struct memcg_vmstats_percpu *statc;
717 int cpu = smp_processor_id();
718
719 if (!val)
720 return;
721
722 cgroup_rstat_updated(memcg->css.cgroup, cpu);
723 statc = this_cpu_ptr(memcg->vmstats_percpu);
724 for (; statc; statc = statc->parent) {
725 statc->stats_updates += abs(val);
726 if (statc->stats_updates < MEMCG_CHARGE_BATCH)
727 continue;
728
729 /*
730 * If @memcg is already flush-able, increasing stats_updates is
731 * redundant. Avoid the overhead of the atomic update.
732 */
733 if (!memcg_vmstats_needs_flush(statc->vmstats))
734 atomic64_add(statc->stats_updates,
735 &statc->vmstats->stats_updates);
736 statc->stats_updates = 0;
737 }
738}
739
740static void do_flush_stats(struct mem_cgroup *memcg)
741{
742 if (mem_cgroup_is_root(memcg))
743 WRITE_ONCE(flush_last_time, jiffies_64);
744
745 cgroup_rstat_flush(memcg->css.cgroup);
746}
747
748/*
749 * mem_cgroup_flush_stats - flush the stats of a memory cgroup subtree
750 * @memcg: root of the subtree to flush
751 *
752 * Flushing is serialized by the underlying global rstat lock. There is also a
753 * minimum amount of work to be done even if there are no stat updates to flush.
754 * Hence, we only flush the stats if the updates delta exceeds a threshold. This
755 * avoids unnecessary work and contention on the underlying lock.
756 */
757void mem_cgroup_flush_stats(struct mem_cgroup *memcg)
758{
759 if (mem_cgroup_disabled())
760 return;
761
762 if (!memcg)
763 memcg = root_mem_cgroup;
764
765 if (memcg_vmstats_needs_flush(memcg->vmstats))
766 do_flush_stats(memcg);
767}
768
769void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg)
770{
771 /* Only flush if the periodic flusher is one full cycle late */
772 if (time_after64(jiffies_64, READ_ONCE(flush_last_time) + 2*FLUSH_TIME))
773 mem_cgroup_flush_stats(memcg);
774}
775
776static void flush_memcg_stats_dwork(struct work_struct *w)
777{
778 /*
779 * Deliberately ignore memcg_vmstats_needs_flush() here so that flushing
780 * in latency-sensitive paths is as cheap as possible.
781 */
782 do_flush_stats(root_mem_cgroup);
783 queue_delayed_work(system_unbound_wq, &stats_flush_dwork, FLUSH_TIME);
784}
785
786unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
787{
788 long x = READ_ONCE(memcg->vmstats->state[idx]);
789#ifdef CONFIG_SMP
790 if (x < 0)
791 x = 0;
792#endif
793 return x;
794}
795
796static int memcg_page_state_unit(int item);
797
798/*
799 * Normalize the value passed into memcg_rstat_updated() to be in pages. Round
800 * up non-zero sub-page updates to 1 page as zero page updates are ignored.
801 */
802static int memcg_state_val_in_pages(int idx, int val)
803{
804 int unit = memcg_page_state_unit(idx);
805
806 if (!val || unit == PAGE_SIZE)
807 return val;
808 else
809 return max(val * unit / PAGE_SIZE, 1UL);
810}
811
812/**
813 * __mod_memcg_state - update cgroup memory statistics
814 * @memcg: the memory cgroup
815 * @idx: the stat item - can be enum memcg_stat_item or enum node_stat_item
816 * @val: delta to add to the counter, can be negative
817 */
818void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val)
819{
820 if (mem_cgroup_disabled())
821 return;
822
823 __this_cpu_add(memcg->vmstats_percpu->state[idx], val);
824 memcg_rstat_updated(memcg, memcg_state_val_in_pages(idx, val));
825}
826
827/* idx can be of type enum memcg_stat_item or node_stat_item. */
828static unsigned long memcg_page_state_local(struct mem_cgroup *memcg, int idx)
829{
830 long x = READ_ONCE(memcg->vmstats->state_local[idx]);
831
832#ifdef CONFIG_SMP
833 if (x < 0)
834 x = 0;
835#endif
836 return x;
837}
838
839void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
840 int val)
841{
842 struct mem_cgroup_per_node *pn;
843 struct mem_cgroup *memcg;
844
845 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
846 memcg = pn->memcg;
847
848 /*
849 * The caller from rmap relies on disabled preemption because they never
850 * update their counter from in-interrupt context. For these two
851 * counters we check that the update is never performed from an
852 * interrupt context while other caller need to have disabled interrupt.
853 */
854 __memcg_stats_lock();
855 if (IS_ENABLED(CONFIG_DEBUG_VM)) {
856 switch (idx) {
857 case NR_ANON_MAPPED:
858 case NR_FILE_MAPPED:
859 case NR_ANON_THPS:
860 case NR_SHMEM_PMDMAPPED:
861 case NR_FILE_PMDMAPPED:
862 WARN_ON_ONCE(!in_task());
863 break;
864 default:
865 VM_WARN_ON_IRQS_ENABLED();
866 }
867 }
868
869 /* Update memcg */
870 __this_cpu_add(memcg->vmstats_percpu->state[idx], val);
871
872 /* Update lruvec */
873 __this_cpu_add(pn->lruvec_stats_percpu->state[idx], val);
874
875 memcg_rstat_updated(memcg, memcg_state_val_in_pages(idx, val));
876 memcg_stats_unlock();
877}
878
879/**
880 * __mod_lruvec_state - update lruvec memory statistics
881 * @lruvec: the lruvec
882 * @idx: the stat item
883 * @val: delta to add to the counter, can be negative
884 *
885 * The lruvec is the intersection of the NUMA node and a cgroup. This
886 * function updates the all three counters that are affected by a
887 * change of state at this level: per-node, per-cgroup, per-lruvec.
888 */
889void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
890 int val)
891{
892 /* Update node */
893 __mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
894
895 /* Update memcg and lruvec */
896 if (!mem_cgroup_disabled())
897 __mod_memcg_lruvec_state(lruvec, idx, val);
898}
899
900void __lruvec_stat_mod_folio(struct folio *folio, enum node_stat_item idx,
901 int val)
902{
903 struct mem_cgroup *memcg;
904 pg_data_t *pgdat = folio_pgdat(folio);
905 struct lruvec *lruvec;
906
907 rcu_read_lock();
908 memcg = folio_memcg(folio);
909 /* Untracked pages have no memcg, no lruvec. Update only the node */
910 if (!memcg) {
911 rcu_read_unlock();
912 __mod_node_page_state(pgdat, idx, val);
913 return;
914 }
915
916 lruvec = mem_cgroup_lruvec(memcg, pgdat);
917 __mod_lruvec_state(lruvec, idx, val);
918 rcu_read_unlock();
919}
920EXPORT_SYMBOL(__lruvec_stat_mod_folio);
921
922void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val)
923{
924 pg_data_t *pgdat = page_pgdat(virt_to_page(p));
925 struct mem_cgroup *memcg;
926 struct lruvec *lruvec;
927
928 rcu_read_lock();
929 memcg = mem_cgroup_from_slab_obj(p);
930
931 /*
932 * Untracked pages have no memcg, no lruvec. Update only the
933 * node. If we reparent the slab objects to the root memcg,
934 * when we free the slab object, we need to update the per-memcg
935 * vmstats to keep it correct for the root memcg.
936 */
937 if (!memcg) {
938 __mod_node_page_state(pgdat, idx, val);
939 } else {
940 lruvec = mem_cgroup_lruvec(memcg, pgdat);
941 __mod_lruvec_state(lruvec, idx, val);
942 }
943 rcu_read_unlock();
944}
945
946/**
947 * __count_memcg_events - account VM events in a cgroup
948 * @memcg: the memory cgroup
949 * @idx: the event item
950 * @count: the number of events that occurred
951 */
952void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
953 unsigned long count)
954{
955 int index = memcg_events_index(idx);
956
957 if (mem_cgroup_disabled() || index < 0)
958 return;
959
960 memcg_stats_lock();
961 __this_cpu_add(memcg->vmstats_percpu->events[index], count);
962 memcg_rstat_updated(memcg, count);
963 memcg_stats_unlock();
964}
965
966static unsigned long memcg_events(struct mem_cgroup *memcg, int event)
967{
968 int index = memcg_events_index(event);
969
970 if (index < 0)
971 return 0;
972 return READ_ONCE(memcg->vmstats->events[index]);
973}
974
975static unsigned long memcg_events_local(struct mem_cgroup *memcg, int event)
976{
977 int index = memcg_events_index(event);
978
979 if (index < 0)
980 return 0;
981
982 return READ_ONCE(memcg->vmstats->events_local[index]);
983}
984
985static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
986 int nr_pages)
987{
988 /* pagein of a big page is an event. So, ignore page size */
989 if (nr_pages > 0)
990 __count_memcg_events(memcg, PGPGIN, 1);
991 else {
992 __count_memcg_events(memcg, PGPGOUT, 1);
993 nr_pages = -nr_pages; /* for event */
994 }
995
996 __this_cpu_add(memcg->vmstats_percpu->nr_page_events, nr_pages);
997}
998
999static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
1000 enum mem_cgroup_events_target target)
1001{
1002 unsigned long val, next;
1003
1004 val = __this_cpu_read(memcg->vmstats_percpu->nr_page_events);
1005 next = __this_cpu_read(memcg->vmstats_percpu->targets[target]);
1006 /* from time_after() in jiffies.h */
1007 if ((long)(next - val) < 0) {
1008 switch (target) {
1009 case MEM_CGROUP_TARGET_THRESH:
1010 next = val + THRESHOLDS_EVENTS_TARGET;
1011 break;
1012 case MEM_CGROUP_TARGET_SOFTLIMIT:
1013 next = val + SOFTLIMIT_EVENTS_TARGET;
1014 break;
1015 default:
1016 break;
1017 }
1018 __this_cpu_write(memcg->vmstats_percpu->targets[target], next);
1019 return true;
1020 }
1021 return false;
1022}
1023
1024/*
1025 * Check events in order.
1026 *
1027 */
1028static void memcg_check_events(struct mem_cgroup *memcg, int nid)
1029{
1030 if (IS_ENABLED(CONFIG_PREEMPT_RT))
1031 return;
1032
1033 /* threshold event is triggered in finer grain than soft limit */
1034 if (unlikely(mem_cgroup_event_ratelimit(memcg,
1035 MEM_CGROUP_TARGET_THRESH))) {
1036 bool do_softlimit;
1037
1038 do_softlimit = mem_cgroup_event_ratelimit(memcg,
1039 MEM_CGROUP_TARGET_SOFTLIMIT);
1040 mem_cgroup_threshold(memcg);
1041 if (unlikely(do_softlimit))
1042 mem_cgroup_update_tree(memcg, nid);
1043 }
1044}
1045
1046struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
1047{
1048 /*
1049 * mm_update_next_owner() may clear mm->owner to NULL
1050 * if it races with swapoff, page migration, etc.
1051 * So this can be called with p == NULL.
1052 */
1053 if (unlikely(!p))
1054 return NULL;
1055
1056 return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
1057}
1058EXPORT_SYMBOL(mem_cgroup_from_task);
1059
1060static __always_inline struct mem_cgroup *active_memcg(void)
1061{
1062 if (!in_task())
1063 return this_cpu_read(int_active_memcg);
1064 else
1065 return current->active_memcg;
1066}
1067
1068/**
1069 * get_mem_cgroup_from_mm: Obtain a reference on given mm_struct's memcg.
1070 * @mm: mm from which memcg should be extracted. It can be NULL.
1071 *
1072 * Obtain a reference on mm->memcg and returns it if successful. If mm
1073 * is NULL, then the memcg is chosen as follows:
1074 * 1) The active memcg, if set.
1075 * 2) current->mm->memcg, if available
1076 * 3) root memcg
1077 * If mem_cgroup is disabled, NULL is returned.
1078 */
1079struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
1080{
1081 struct mem_cgroup *memcg;
1082
1083 if (mem_cgroup_disabled())
1084 return NULL;
1085
1086 /*
1087 * Page cache insertions can happen without an
1088 * actual mm context, e.g. during disk probing
1089 * on boot, loopback IO, acct() writes etc.
1090 *
1091 * No need to css_get on root memcg as the reference
1092 * counting is disabled on the root level in the
1093 * cgroup core. See CSS_NO_REF.
1094 */
1095 if (unlikely(!mm)) {
1096 memcg = active_memcg();
1097 if (unlikely(memcg)) {
1098 /* remote memcg must hold a ref */
1099 css_get(&memcg->css);
1100 return memcg;
1101 }
1102 mm = current->mm;
1103 if (unlikely(!mm))
1104 return root_mem_cgroup;
1105 }
1106
1107 rcu_read_lock();
1108 do {
1109 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1110 if (unlikely(!memcg))
1111 memcg = root_mem_cgroup;
1112 } while (!css_tryget(&memcg->css));
1113 rcu_read_unlock();
1114 return memcg;
1115}
1116EXPORT_SYMBOL(get_mem_cgroup_from_mm);
1117
1118/**
1119 * get_mem_cgroup_from_current - Obtain a reference on current task's memcg.
1120 */
1121struct mem_cgroup *get_mem_cgroup_from_current(void)
1122{
1123 struct mem_cgroup *memcg;
1124
1125 if (mem_cgroup_disabled())
1126 return NULL;
1127
1128again:
1129 rcu_read_lock();
1130 memcg = mem_cgroup_from_task(current);
1131 if (!css_tryget(&memcg->css)) {
1132 rcu_read_unlock();
1133 goto again;
1134 }
1135 rcu_read_unlock();
1136 return memcg;
1137}
1138
1139/**
1140 * mem_cgroup_iter - iterate over memory cgroup hierarchy
1141 * @root: hierarchy root
1142 * @prev: previously returned memcg, NULL on first invocation
1143 * @reclaim: cookie for shared reclaim walks, NULL for full walks
1144 *
1145 * Returns references to children of the hierarchy below @root, or
1146 * @root itself, or %NULL after a full round-trip.
1147 *
1148 * Caller must pass the return value in @prev on subsequent
1149 * invocations for reference counting, or use mem_cgroup_iter_break()
1150 * to cancel a hierarchy walk before the round-trip is complete.
1151 *
1152 * Reclaimers can specify a node in @reclaim to divide up the memcgs
1153 * in the hierarchy among all concurrent reclaimers operating on the
1154 * same node.
1155 */
1156struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
1157 struct mem_cgroup *prev,
1158 struct mem_cgroup_reclaim_cookie *reclaim)
1159{
1160 struct mem_cgroup_reclaim_iter *iter;
1161 struct cgroup_subsys_state *css = NULL;
1162 struct mem_cgroup *memcg = NULL;
1163 struct mem_cgroup *pos = NULL;
1164
1165 if (mem_cgroup_disabled())
1166 return NULL;
1167
1168 if (!root)
1169 root = root_mem_cgroup;
1170
1171 rcu_read_lock();
1172
1173 if (reclaim) {
1174 struct mem_cgroup_per_node *mz;
1175
1176 mz = root->nodeinfo[reclaim->pgdat->node_id];
1177 iter = &mz->iter;
1178
1179 /*
1180 * On start, join the current reclaim iteration cycle.
1181 * Exit when a concurrent walker completes it.
1182 */
1183 if (!prev)
1184 reclaim->generation = iter->generation;
1185 else if (reclaim->generation != iter->generation)
1186 goto out_unlock;
1187
1188 while (1) {
1189 pos = READ_ONCE(iter->position);
1190 if (!pos || css_tryget(&pos->css))
1191 break;
1192 /*
1193 * css reference reached zero, so iter->position will
1194 * be cleared by ->css_released. However, we should not
1195 * rely on this happening soon, because ->css_released
1196 * is called from a work queue, and by busy-waiting we
1197 * might block it. So we clear iter->position right
1198 * away.
1199 */
1200 (void)cmpxchg(&iter->position, pos, NULL);
1201 }
1202 } else if (prev) {
1203 pos = prev;
1204 }
1205
1206 if (pos)
1207 css = &pos->css;
1208
1209 for (;;) {
1210 css = css_next_descendant_pre(css, &root->css);
1211 if (!css) {
1212 /*
1213 * Reclaimers share the hierarchy walk, and a
1214 * new one might jump in right at the end of
1215 * the hierarchy - make sure they see at least
1216 * one group and restart from the beginning.
1217 */
1218 if (!prev)
1219 continue;
1220 break;
1221 }
1222
1223 /*
1224 * Verify the css and acquire a reference. The root
1225 * is provided by the caller, so we know it's alive
1226 * and kicking, and don't take an extra reference.
1227 */
1228 if (css == &root->css || css_tryget(css)) {
1229 memcg = mem_cgroup_from_css(css);
1230 break;
1231 }
1232 }
1233
1234 if (reclaim) {
1235 /*
1236 * The position could have already been updated by a competing
1237 * thread, so check that the value hasn't changed since we read
1238 * it to avoid reclaiming from the same cgroup twice.
1239 */
1240 (void)cmpxchg(&iter->position, pos, memcg);
1241
1242 if (pos)
1243 css_put(&pos->css);
1244
1245 if (!memcg)
1246 iter->generation++;
1247 }
1248
1249out_unlock:
1250 rcu_read_unlock();
1251 if (prev && prev != root)
1252 css_put(&prev->css);
1253
1254 return memcg;
1255}
1256
1257/**
1258 * mem_cgroup_iter_break - abort a hierarchy walk prematurely
1259 * @root: hierarchy root
1260 * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
1261 */
1262void mem_cgroup_iter_break(struct mem_cgroup *root,
1263 struct mem_cgroup *prev)
1264{
1265 if (!root)
1266 root = root_mem_cgroup;
1267 if (prev && prev != root)
1268 css_put(&prev->css);
1269}
1270
1271static void __invalidate_reclaim_iterators(struct mem_cgroup *from,
1272 struct mem_cgroup *dead_memcg)
1273{
1274 struct mem_cgroup_reclaim_iter *iter;
1275 struct mem_cgroup_per_node *mz;
1276 int nid;
1277
1278 for_each_node(nid) {
1279 mz = from->nodeinfo[nid];
1280 iter = &mz->iter;
1281 cmpxchg(&iter->position, dead_memcg, NULL);
1282 }
1283}
1284
1285static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
1286{
1287 struct mem_cgroup *memcg = dead_memcg;
1288 struct mem_cgroup *last;
1289
1290 do {
1291 __invalidate_reclaim_iterators(memcg, dead_memcg);
1292 last = memcg;
1293 } while ((memcg = parent_mem_cgroup(memcg)));
1294
1295 /*
1296 * When cgroup1 non-hierarchy mode is used,
1297 * parent_mem_cgroup() does not walk all the way up to the
1298 * cgroup root (root_mem_cgroup). So we have to handle
1299 * dead_memcg from cgroup root separately.
1300 */
1301 if (!mem_cgroup_is_root(last))
1302 __invalidate_reclaim_iterators(root_mem_cgroup,
1303 dead_memcg);
1304}
1305
1306/**
1307 * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
1308 * @memcg: hierarchy root
1309 * @fn: function to call for each task
1310 * @arg: argument passed to @fn
1311 *
1312 * This function iterates over tasks attached to @memcg or to any of its
1313 * descendants and calls @fn for each task. If @fn returns a non-zero
1314 * value, the function breaks the iteration loop. Otherwise, it will iterate
1315 * over all tasks and return 0.
1316 *
1317 * This function must not be called for the root memory cgroup.
1318 */
1319void mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
1320 int (*fn)(struct task_struct *, void *), void *arg)
1321{
1322 struct mem_cgroup *iter;
1323 int ret = 0;
1324
1325 BUG_ON(mem_cgroup_is_root(memcg));
1326
1327 for_each_mem_cgroup_tree(iter, memcg) {
1328 struct css_task_iter it;
1329 struct task_struct *task;
1330
1331 css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it);
1332 while (!ret && (task = css_task_iter_next(&it)))
1333 ret = fn(task, arg);
1334 css_task_iter_end(&it);
1335 if (ret) {
1336 mem_cgroup_iter_break(memcg, iter);
1337 break;
1338 }
1339 }
1340}
1341
1342#ifdef CONFIG_DEBUG_VM
1343void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
1344{
1345 struct mem_cgroup *memcg;
1346
1347 if (mem_cgroup_disabled())
1348 return;
1349
1350 memcg = folio_memcg(folio);
1351
1352 if (!memcg)
1353 VM_BUG_ON_FOLIO(!mem_cgroup_is_root(lruvec_memcg(lruvec)), folio);
1354 else
1355 VM_BUG_ON_FOLIO(lruvec_memcg(lruvec) != memcg, folio);
1356}
1357#endif
1358
1359/**
1360 * folio_lruvec_lock - Lock the lruvec for a folio.
1361 * @folio: Pointer to the folio.
1362 *
1363 * These functions are safe to use under any of the following conditions:
1364 * - folio locked
1365 * - folio_test_lru false
1366 * - folio_memcg_lock()
1367 * - folio frozen (refcount of 0)
1368 *
1369 * Return: The lruvec this folio is on with its lock held.
1370 */
1371struct lruvec *folio_lruvec_lock(struct folio *folio)
1372{
1373 struct lruvec *lruvec = folio_lruvec(folio);
1374
1375 spin_lock(&lruvec->lru_lock);
1376 lruvec_memcg_debug(lruvec, folio);
1377
1378 return lruvec;
1379}
1380
1381/**
1382 * folio_lruvec_lock_irq - Lock the lruvec for a folio.
1383 * @folio: Pointer to the folio.
1384 *
1385 * These functions are safe to use under any of the following conditions:
1386 * - folio locked
1387 * - folio_test_lru false
1388 * - folio_memcg_lock()
1389 * - folio frozen (refcount of 0)
1390 *
1391 * Return: The lruvec this folio is on with its lock held and interrupts
1392 * disabled.
1393 */
1394struct lruvec *folio_lruvec_lock_irq(struct folio *folio)
1395{
1396 struct lruvec *lruvec = folio_lruvec(folio);
1397
1398 spin_lock_irq(&lruvec->lru_lock);
1399 lruvec_memcg_debug(lruvec, folio);
1400
1401 return lruvec;
1402}
1403
1404/**
1405 * folio_lruvec_lock_irqsave - Lock the lruvec for a folio.
1406 * @folio: Pointer to the folio.
1407 * @flags: Pointer to irqsave flags.
1408 *
1409 * These functions are safe to use under any of the following conditions:
1410 * - folio locked
1411 * - folio_test_lru false
1412 * - folio_memcg_lock()
1413 * - folio frozen (refcount of 0)
1414 *
1415 * Return: The lruvec this folio is on with its lock held and interrupts
1416 * disabled.
1417 */
1418struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
1419 unsigned long *flags)
1420{
1421 struct lruvec *lruvec = folio_lruvec(folio);
1422
1423 spin_lock_irqsave(&lruvec->lru_lock, *flags);
1424 lruvec_memcg_debug(lruvec, folio);
1425
1426 return lruvec;
1427}
1428
1429/**
1430 * mem_cgroup_update_lru_size - account for adding or removing an lru page
1431 * @lruvec: mem_cgroup per zone lru vector
1432 * @lru: index of lru list the page is sitting on
1433 * @zid: zone id of the accounted pages
1434 * @nr_pages: positive when adding or negative when removing
1435 *
1436 * This function must be called under lru_lock, just before a page is added
1437 * to or just after a page is removed from an lru list.
1438 */
1439void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
1440 int zid, int nr_pages)
1441{
1442 struct mem_cgroup_per_node *mz;
1443 unsigned long *lru_size;
1444 long size;
1445
1446 if (mem_cgroup_disabled())
1447 return;
1448
1449 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
1450 lru_size = &mz->lru_zone_size[zid][lru];
1451
1452 if (nr_pages < 0)
1453 *lru_size += nr_pages;
1454
1455 size = *lru_size;
1456 if (WARN_ONCE(size < 0,
1457 "%s(%p, %d, %d): lru_size %ld\n",
1458 __func__, lruvec, lru, nr_pages, size)) {
1459 VM_BUG_ON(1);
1460 *lru_size = 0;
1461 }
1462
1463 if (nr_pages > 0)
1464 *lru_size += nr_pages;
1465}
1466
1467/**
1468 * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1469 * @memcg: the memory cgroup
1470 *
1471 * Returns the maximum amount of memory @mem can be charged with, in
1472 * pages.
1473 */
1474static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
1475{
1476 unsigned long margin = 0;
1477 unsigned long count;
1478 unsigned long limit;
1479
1480 count = page_counter_read(&memcg->memory);
1481 limit = READ_ONCE(memcg->memory.max);
1482 if (count < limit)
1483 margin = limit - count;
1484
1485 if (do_memsw_account()) {
1486 count = page_counter_read(&memcg->memsw);
1487 limit = READ_ONCE(memcg->memsw.max);
1488 if (count < limit)
1489 margin = min(margin, limit - count);
1490 else
1491 margin = 0;
1492 }
1493
1494 return margin;
1495}
1496
1497/*
1498 * A routine for checking "mem" is under move_account() or not.
1499 *
1500 * Checking a cgroup is mc.from or mc.to or under hierarchy of
1501 * moving cgroups. This is for waiting at high-memory pressure
1502 * caused by "move".
1503 */
1504static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
1505{
1506 struct mem_cgroup *from;
1507 struct mem_cgroup *to;
1508 bool ret = false;
1509 /*
1510 * Unlike task_move routines, we access mc.to, mc.from not under
1511 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
1512 */
1513 spin_lock(&mc.lock);
1514 from = mc.from;
1515 to = mc.to;
1516 if (!from)
1517 goto unlock;
1518
1519 ret = mem_cgroup_is_descendant(from, memcg) ||
1520 mem_cgroup_is_descendant(to, memcg);
1521unlock:
1522 spin_unlock(&mc.lock);
1523 return ret;
1524}
1525
1526static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
1527{
1528 if (mc.moving_task && current != mc.moving_task) {
1529 if (mem_cgroup_under_move(memcg)) {
1530 DEFINE_WAIT(wait);
1531 prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1532 /* moving charge context might have finished. */
1533 if (mc.moving_task)
1534 schedule();
1535 finish_wait(&mc.waitq, &wait);
1536 return true;
1537 }
1538 }
1539 return false;
1540}
1541
1542struct memory_stat {
1543 const char *name;
1544 unsigned int idx;
1545};
1546
1547static const struct memory_stat memory_stats[] = {
1548 { "anon", NR_ANON_MAPPED },
1549 { "file", NR_FILE_PAGES },
1550 { "kernel", MEMCG_KMEM },
1551 { "kernel_stack", NR_KERNEL_STACK_KB },
1552 { "pagetables", NR_PAGETABLE },
1553 { "sec_pagetables", NR_SECONDARY_PAGETABLE },
1554 { "percpu", MEMCG_PERCPU_B },
1555 { "sock", MEMCG_SOCK },
1556 { "vmalloc", MEMCG_VMALLOC },
1557 { "shmem", NR_SHMEM },
1558#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
1559 { "zswap", MEMCG_ZSWAP_B },
1560 { "zswapped", MEMCG_ZSWAPPED },
1561#endif
1562 { "file_mapped", NR_FILE_MAPPED },
1563 { "file_dirty", NR_FILE_DIRTY },
1564 { "file_writeback", NR_WRITEBACK },
1565#ifdef CONFIG_SWAP
1566 { "swapcached", NR_SWAPCACHE },
1567#endif
1568#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1569 { "anon_thp", NR_ANON_THPS },
1570 { "file_thp", NR_FILE_THPS },
1571 { "shmem_thp", NR_SHMEM_THPS },
1572#endif
1573 { "inactive_anon", NR_INACTIVE_ANON },
1574 { "active_anon", NR_ACTIVE_ANON },
1575 { "inactive_file", NR_INACTIVE_FILE },
1576 { "active_file", NR_ACTIVE_FILE },
1577 { "unevictable", NR_UNEVICTABLE },
1578 { "slab_reclaimable", NR_SLAB_RECLAIMABLE_B },
1579 { "slab_unreclaimable", NR_SLAB_UNRECLAIMABLE_B },
1580
1581 /* The memory events */
1582 { "workingset_refault_anon", WORKINGSET_REFAULT_ANON },
1583 { "workingset_refault_file", WORKINGSET_REFAULT_FILE },
1584 { "workingset_activate_anon", WORKINGSET_ACTIVATE_ANON },
1585 { "workingset_activate_file", WORKINGSET_ACTIVATE_FILE },
1586 { "workingset_restore_anon", WORKINGSET_RESTORE_ANON },
1587 { "workingset_restore_file", WORKINGSET_RESTORE_FILE },
1588 { "workingset_nodereclaim", WORKINGSET_NODERECLAIM },
1589};
1590
1591/* The actual unit of the state item, not the same as the output unit */
1592static int memcg_page_state_unit(int item)
1593{
1594 switch (item) {
1595 case MEMCG_PERCPU_B:
1596 case MEMCG_ZSWAP_B:
1597 case NR_SLAB_RECLAIMABLE_B:
1598 case NR_SLAB_UNRECLAIMABLE_B:
1599 return 1;
1600 case NR_KERNEL_STACK_KB:
1601 return SZ_1K;
1602 default:
1603 return PAGE_SIZE;
1604 }
1605}
1606
1607/* Translate stat items to the correct unit for memory.stat output */
1608static int memcg_page_state_output_unit(int item)
1609{
1610 /*
1611 * Workingset state is actually in pages, but we export it to userspace
1612 * as a scalar count of events, so special case it here.
1613 */
1614 switch (item) {
1615 case WORKINGSET_REFAULT_ANON:
1616 case WORKINGSET_REFAULT_FILE:
1617 case WORKINGSET_ACTIVATE_ANON:
1618 case WORKINGSET_ACTIVATE_FILE:
1619 case WORKINGSET_RESTORE_ANON:
1620 case WORKINGSET_RESTORE_FILE:
1621 case WORKINGSET_NODERECLAIM:
1622 return 1;
1623 default:
1624 return memcg_page_state_unit(item);
1625 }
1626}
1627
1628static inline unsigned long memcg_page_state_output(struct mem_cgroup *memcg,
1629 int item)
1630{
1631 return memcg_page_state(memcg, item) *
1632 memcg_page_state_output_unit(item);
1633}
1634
1635static inline unsigned long memcg_page_state_local_output(
1636 struct mem_cgroup *memcg, int item)
1637{
1638 return memcg_page_state_local(memcg, item) *
1639 memcg_page_state_output_unit(item);
1640}
1641
1642static void memcg_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
1643{
1644 int i;
1645
1646 /*
1647 * Provide statistics on the state of the memory subsystem as
1648 * well as cumulative event counters that show past behavior.
1649 *
1650 * This list is ordered following a combination of these gradients:
1651 * 1) generic big picture -> specifics and details
1652 * 2) reflecting userspace activity -> reflecting kernel heuristics
1653 *
1654 * Current memory state:
1655 */
1656 mem_cgroup_flush_stats(memcg);
1657
1658 for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
1659 u64 size;
1660
1661 size = memcg_page_state_output(memcg, memory_stats[i].idx);
1662 seq_buf_printf(s, "%s %llu\n", memory_stats[i].name, size);
1663
1664 if (unlikely(memory_stats[i].idx == NR_SLAB_UNRECLAIMABLE_B)) {
1665 size += memcg_page_state_output(memcg,
1666 NR_SLAB_RECLAIMABLE_B);
1667 seq_buf_printf(s, "slab %llu\n", size);
1668 }
1669 }
1670
1671 /* Accumulated memory events */
1672 seq_buf_printf(s, "pgscan %lu\n",
1673 memcg_events(memcg, PGSCAN_KSWAPD) +
1674 memcg_events(memcg, PGSCAN_DIRECT) +
1675 memcg_events(memcg, PGSCAN_KHUGEPAGED));
1676 seq_buf_printf(s, "pgsteal %lu\n",
1677 memcg_events(memcg, PGSTEAL_KSWAPD) +
1678 memcg_events(memcg, PGSTEAL_DIRECT) +
1679 memcg_events(memcg, PGSTEAL_KHUGEPAGED));
1680
1681 for (i = 0; i < ARRAY_SIZE(memcg_vm_event_stat); i++) {
1682 if (memcg_vm_event_stat[i] == PGPGIN ||
1683 memcg_vm_event_stat[i] == PGPGOUT)
1684 continue;
1685
1686 seq_buf_printf(s, "%s %lu\n",
1687 vm_event_name(memcg_vm_event_stat[i]),
1688 memcg_events(memcg, memcg_vm_event_stat[i]));
1689 }
1690
1691 /* The above should easily fit into one page */
1692 WARN_ON_ONCE(seq_buf_has_overflowed(s));
1693}
1694
1695static void memcg1_stat_format(struct mem_cgroup *memcg, struct seq_buf *s);
1696
1697static void memory_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
1698{
1699 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
1700 memcg_stat_format(memcg, s);
1701 else
1702 memcg1_stat_format(memcg, s);
1703 WARN_ON_ONCE(seq_buf_has_overflowed(s));
1704}
1705
1706/**
1707 * mem_cgroup_print_oom_context: Print OOM information relevant to
1708 * memory controller.
1709 * @memcg: The memory cgroup that went over limit
1710 * @p: Task that is going to be killed
1711 *
1712 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1713 * enabled
1714 */
1715void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
1716{
1717 rcu_read_lock();
1718
1719 if (memcg) {
1720 pr_cont(",oom_memcg=");
1721 pr_cont_cgroup_path(memcg->css.cgroup);
1722 } else
1723 pr_cont(",global_oom");
1724 if (p) {
1725 pr_cont(",task_memcg=");
1726 pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
1727 }
1728 rcu_read_unlock();
1729}
1730
1731/**
1732 * mem_cgroup_print_oom_meminfo: Print OOM memory information relevant to
1733 * memory controller.
1734 * @memcg: The memory cgroup that went over limit
1735 */
1736void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
1737{
1738 /* Use static buffer, for the caller is holding oom_lock. */
1739 static char buf[PAGE_SIZE];
1740 struct seq_buf s;
1741
1742 lockdep_assert_held(&oom_lock);
1743
1744 pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
1745 K((u64)page_counter_read(&memcg->memory)),
1746 K((u64)READ_ONCE(memcg->memory.max)), memcg->memory.failcnt);
1747 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
1748 pr_info("swap: usage %llukB, limit %llukB, failcnt %lu\n",
1749 K((u64)page_counter_read(&memcg->swap)),
1750 K((u64)READ_ONCE(memcg->swap.max)), memcg->swap.failcnt);
1751 else {
1752 pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
1753 K((u64)page_counter_read(&memcg->memsw)),
1754 K((u64)memcg->memsw.max), memcg->memsw.failcnt);
1755 pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
1756 K((u64)page_counter_read(&memcg->kmem)),
1757 K((u64)memcg->kmem.max), memcg->kmem.failcnt);
1758 }
1759
1760 pr_info("Memory cgroup stats for ");
1761 pr_cont_cgroup_path(memcg->css.cgroup);
1762 pr_cont(":");
1763 seq_buf_init(&s, buf, sizeof(buf));
1764 memory_stat_format(memcg, &s);
1765 seq_buf_do_printk(&s, KERN_INFO);
1766}
1767
1768/*
1769 * Return the memory (and swap, if configured) limit for a memcg.
1770 */
1771unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
1772{
1773 unsigned long max = READ_ONCE(memcg->memory.max);
1774
1775 if (do_memsw_account()) {
1776 if (mem_cgroup_swappiness(memcg)) {
1777 /* Calculate swap excess capacity from memsw limit */
1778 unsigned long swap = READ_ONCE(memcg->memsw.max) - max;
1779
1780 max += min(swap, (unsigned long)total_swap_pages);
1781 }
1782 } else {
1783 if (mem_cgroup_swappiness(memcg))
1784 max += min(READ_ONCE(memcg->swap.max),
1785 (unsigned long)total_swap_pages);
1786 }
1787 return max;
1788}
1789
1790unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
1791{
1792 return page_counter_read(&memcg->memory);
1793}
1794
1795static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
1796 int order)
1797{
1798 struct oom_control oc = {
1799 .zonelist = NULL,
1800 .nodemask = NULL,
1801 .memcg = memcg,
1802 .gfp_mask = gfp_mask,
1803 .order = order,
1804 };
1805 bool ret = true;
1806
1807 if (mutex_lock_killable(&oom_lock))
1808 return true;
1809
1810 if (mem_cgroup_margin(memcg) >= (1 << order))
1811 goto unlock;
1812
1813 /*
1814 * A few threads which were not waiting at mutex_lock_killable() can
1815 * fail to bail out. Therefore, check again after holding oom_lock.
1816 */
1817 ret = task_is_dying() || out_of_memory(&oc);
1818
1819unlock:
1820 mutex_unlock(&oom_lock);
1821 return ret;
1822}
1823
1824static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
1825 pg_data_t *pgdat,
1826 gfp_t gfp_mask,
1827 unsigned long *total_scanned)
1828{
1829 struct mem_cgroup *victim = NULL;
1830 int total = 0;
1831 int loop = 0;
1832 unsigned long excess;
1833 unsigned long nr_scanned;
1834 struct mem_cgroup_reclaim_cookie reclaim = {
1835 .pgdat = pgdat,
1836 };
1837
1838 excess = soft_limit_excess(root_memcg);
1839
1840 while (1) {
1841 victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
1842 if (!victim) {
1843 loop++;
1844 if (loop >= 2) {
1845 /*
1846 * If we have not been able to reclaim
1847 * anything, it might because there are
1848 * no reclaimable pages under this hierarchy
1849 */
1850 if (!total)
1851 break;
1852 /*
1853 * We want to do more targeted reclaim.
1854 * excess >> 2 is not to excessive so as to
1855 * reclaim too much, nor too less that we keep
1856 * coming back to reclaim from this cgroup
1857 */
1858 if (total >= (excess >> 2) ||
1859 (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
1860 break;
1861 }
1862 continue;
1863 }
1864 total += mem_cgroup_shrink_node(victim, gfp_mask, false,
1865 pgdat, &nr_scanned);
1866 *total_scanned += nr_scanned;
1867 if (!soft_limit_excess(root_memcg))
1868 break;
1869 }
1870 mem_cgroup_iter_break(root_memcg, victim);
1871 return total;
1872}
1873
1874#ifdef CONFIG_LOCKDEP
1875static struct lockdep_map memcg_oom_lock_dep_map = {
1876 .name = "memcg_oom_lock",
1877};
1878#endif
1879
1880static DEFINE_SPINLOCK(memcg_oom_lock);
1881
1882/*
1883 * Check OOM-Killer is already running under our hierarchy.
1884 * If someone is running, return false.
1885 */
1886static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
1887{
1888 struct mem_cgroup *iter, *failed = NULL;
1889
1890 spin_lock(&memcg_oom_lock);
1891
1892 for_each_mem_cgroup_tree(iter, memcg) {
1893 if (iter->oom_lock) {
1894 /*
1895 * this subtree of our hierarchy is already locked
1896 * so we cannot give a lock.
1897 */
1898 failed = iter;
1899 mem_cgroup_iter_break(memcg, iter);
1900 break;
1901 } else
1902 iter->oom_lock = true;
1903 }
1904
1905 if (failed) {
1906 /*
1907 * OK, we failed to lock the whole subtree so we have
1908 * to clean up what we set up to the failing subtree
1909 */
1910 for_each_mem_cgroup_tree(iter, memcg) {
1911 if (iter == failed) {
1912 mem_cgroup_iter_break(memcg, iter);
1913 break;
1914 }
1915 iter->oom_lock = false;
1916 }
1917 } else
1918 mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_);
1919
1920 spin_unlock(&memcg_oom_lock);
1921
1922 return !failed;
1923}
1924
1925static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
1926{
1927 struct mem_cgroup *iter;
1928
1929 spin_lock(&memcg_oom_lock);
1930 mutex_release(&memcg_oom_lock_dep_map, _RET_IP_);
1931 for_each_mem_cgroup_tree(iter, memcg)
1932 iter->oom_lock = false;
1933 spin_unlock(&memcg_oom_lock);
1934}
1935
1936static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
1937{
1938 struct mem_cgroup *iter;
1939
1940 spin_lock(&memcg_oom_lock);
1941 for_each_mem_cgroup_tree(iter, memcg)
1942 iter->under_oom++;
1943 spin_unlock(&memcg_oom_lock);
1944}
1945
1946static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
1947{
1948 struct mem_cgroup *iter;
1949
1950 /*
1951 * Be careful about under_oom underflows because a child memcg
1952 * could have been added after mem_cgroup_mark_under_oom.
1953 */
1954 spin_lock(&memcg_oom_lock);
1955 for_each_mem_cgroup_tree(iter, memcg)
1956 if (iter->under_oom > 0)
1957 iter->under_oom--;
1958 spin_unlock(&memcg_oom_lock);
1959}
1960
1961static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
1962
1963struct oom_wait_info {
1964 struct mem_cgroup *memcg;
1965 wait_queue_entry_t wait;
1966};
1967
1968static int memcg_oom_wake_function(wait_queue_entry_t *wait,
1969 unsigned mode, int sync, void *arg)
1970{
1971 struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
1972 struct mem_cgroup *oom_wait_memcg;
1973 struct oom_wait_info *oom_wait_info;
1974
1975 oom_wait_info = container_of(wait, struct oom_wait_info, wait);
1976 oom_wait_memcg = oom_wait_info->memcg;
1977
1978 if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) &&
1979 !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg))
1980 return 0;
1981 return autoremove_wake_function(wait, mode, sync, arg);
1982}
1983
1984static void memcg_oom_recover(struct mem_cgroup *memcg)
1985{
1986 /*
1987 * For the following lockless ->under_oom test, the only required
1988 * guarantee is that it must see the state asserted by an OOM when
1989 * this function is called as a result of userland actions
1990 * triggered by the notification of the OOM. This is trivially
1991 * achieved by invoking mem_cgroup_mark_under_oom() before
1992 * triggering notification.
1993 */
1994 if (memcg && memcg->under_oom)
1995 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
1996}
1997
1998/*
1999 * Returns true if successfully killed one or more processes. Though in some
2000 * corner cases it can return true even without killing any process.
2001 */
2002static bool mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
2003{
2004 bool locked, ret;
2005
2006 if (order > PAGE_ALLOC_COSTLY_ORDER)
2007 return false;
2008
2009 memcg_memory_event(memcg, MEMCG_OOM);
2010
2011 /*
2012 * We are in the middle of the charge context here, so we
2013 * don't want to block when potentially sitting on a callstack
2014 * that holds all kinds of filesystem and mm locks.
2015 *
2016 * cgroup1 allows disabling the OOM killer and waiting for outside
2017 * handling until the charge can succeed; remember the context and put
2018 * the task to sleep at the end of the page fault when all locks are
2019 * released.
2020 *
2021 * On the other hand, in-kernel OOM killer allows for an async victim
2022 * memory reclaim (oom_reaper) and that means that we are not solely
2023 * relying on the oom victim to make a forward progress and we can
2024 * invoke the oom killer here.
2025 *
2026 * Please note that mem_cgroup_out_of_memory might fail to find a
2027 * victim and then we have to bail out from the charge path.
2028 */
2029 if (READ_ONCE(memcg->oom_kill_disable)) {
2030 if (current->in_user_fault) {
2031 css_get(&memcg->css);
2032 current->memcg_in_oom = memcg;
2033 current->memcg_oom_gfp_mask = mask;
2034 current->memcg_oom_order = order;
2035 }
2036 return false;
2037 }
2038
2039 mem_cgroup_mark_under_oom(memcg);
2040
2041 locked = mem_cgroup_oom_trylock(memcg);
2042
2043 if (locked)
2044 mem_cgroup_oom_notify(memcg);
2045
2046 mem_cgroup_unmark_under_oom(memcg);
2047 ret = mem_cgroup_out_of_memory(memcg, mask, order);
2048
2049 if (locked)
2050 mem_cgroup_oom_unlock(memcg);
2051
2052 return ret;
2053}
2054
2055/**
2056 * mem_cgroup_oom_synchronize - complete memcg OOM handling
2057 * @handle: actually kill/wait or just clean up the OOM state
2058 *
2059 * This has to be called at the end of a page fault if the memcg OOM
2060 * handler was enabled.
2061 *
2062 * Memcg supports userspace OOM handling where failed allocations must
2063 * sleep on a waitqueue until the userspace task resolves the
2064 * situation. Sleeping directly in the charge context with all kinds
2065 * of locks held is not a good idea, instead we remember an OOM state
2066 * in the task and mem_cgroup_oom_synchronize() has to be called at
2067 * the end of the page fault to complete the OOM handling.
2068 *
2069 * Returns %true if an ongoing memcg OOM situation was detected and
2070 * completed, %false otherwise.
2071 */
2072bool mem_cgroup_oom_synchronize(bool handle)
2073{
2074 struct mem_cgroup *memcg = current->memcg_in_oom;
2075 struct oom_wait_info owait;
2076 bool locked;
2077
2078 /* OOM is global, do not handle */
2079 if (!memcg)
2080 return false;
2081
2082 if (!handle)
2083 goto cleanup;
2084
2085 owait.memcg = memcg;
2086 owait.wait.flags = 0;
2087 owait.wait.func = memcg_oom_wake_function;
2088 owait.wait.private = current;
2089 INIT_LIST_HEAD(&owait.wait.entry);
2090
2091 prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
2092 mem_cgroup_mark_under_oom(memcg);
2093
2094 locked = mem_cgroup_oom_trylock(memcg);
2095
2096 if (locked)
2097 mem_cgroup_oom_notify(memcg);
2098
2099 schedule();
2100 mem_cgroup_unmark_under_oom(memcg);
2101 finish_wait(&memcg_oom_waitq, &owait.wait);
2102
2103 if (locked)
2104 mem_cgroup_oom_unlock(memcg);
2105cleanup:
2106 current->memcg_in_oom = NULL;
2107 css_put(&memcg->css);
2108 return true;
2109}
2110
2111/**
2112 * mem_cgroup_get_oom_group - get a memory cgroup to clean up after OOM
2113 * @victim: task to be killed by the OOM killer
2114 * @oom_domain: memcg in case of memcg OOM, NULL in case of system-wide OOM
2115 *
2116 * Returns a pointer to a memory cgroup, which has to be cleaned up
2117 * by killing all belonging OOM-killable tasks.
2118 *
2119 * Caller has to call mem_cgroup_put() on the returned non-NULL memcg.
2120 */
2121struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
2122 struct mem_cgroup *oom_domain)
2123{
2124 struct mem_cgroup *oom_group = NULL;
2125 struct mem_cgroup *memcg;
2126
2127 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
2128 return NULL;
2129
2130 if (!oom_domain)
2131 oom_domain = root_mem_cgroup;
2132
2133 rcu_read_lock();
2134
2135 memcg = mem_cgroup_from_task(victim);
2136 if (mem_cgroup_is_root(memcg))
2137 goto out;
2138
2139 /*
2140 * If the victim task has been asynchronously moved to a different
2141 * memory cgroup, we might end up killing tasks outside oom_domain.
2142 * In this case it's better to ignore memory.group.oom.
2143 */
2144 if (unlikely(!mem_cgroup_is_descendant(memcg, oom_domain)))
2145 goto out;
2146
2147 /*
2148 * Traverse the memory cgroup hierarchy from the victim task's
2149 * cgroup up to the OOMing cgroup (or root) to find the
2150 * highest-level memory cgroup with oom.group set.
2151 */
2152 for (; memcg; memcg = parent_mem_cgroup(memcg)) {
2153 if (READ_ONCE(memcg->oom_group))
2154 oom_group = memcg;
2155
2156 if (memcg == oom_domain)
2157 break;
2158 }
2159
2160 if (oom_group)
2161 css_get(&oom_group->css);
2162out:
2163 rcu_read_unlock();
2164
2165 return oom_group;
2166}
2167
2168void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
2169{
2170 pr_info("Tasks in ");
2171 pr_cont_cgroup_path(memcg->css.cgroup);
2172 pr_cont(" are going to be killed due to memory.oom.group set\n");
2173}
2174
2175/**
2176 * folio_memcg_lock - Bind a folio to its memcg.
2177 * @folio: The folio.
2178 *
2179 * This function prevents unlocked LRU folios from being moved to
2180 * another cgroup.
2181 *
2182 * It ensures lifetime of the bound memcg. The caller is responsible
2183 * for the lifetime of the folio.
2184 */
2185void folio_memcg_lock(struct folio *folio)
2186{
2187 struct mem_cgroup *memcg;
2188 unsigned long flags;
2189
2190 /*
2191 * The RCU lock is held throughout the transaction. The fast
2192 * path can get away without acquiring the memcg->move_lock
2193 * because page moving starts with an RCU grace period.
2194 */
2195 rcu_read_lock();
2196
2197 if (mem_cgroup_disabled())
2198 return;
2199again:
2200 memcg = folio_memcg(folio);
2201 if (unlikely(!memcg))
2202 return;
2203
2204#ifdef CONFIG_PROVE_LOCKING
2205 local_irq_save(flags);
2206 might_lock(&memcg->move_lock);
2207 local_irq_restore(flags);
2208#endif
2209
2210 if (atomic_read(&memcg->moving_account) <= 0)
2211 return;
2212
2213 spin_lock_irqsave(&memcg->move_lock, flags);
2214 if (memcg != folio_memcg(folio)) {
2215 spin_unlock_irqrestore(&memcg->move_lock, flags);
2216 goto again;
2217 }
2218
2219 /*
2220 * When charge migration first begins, we can have multiple
2221 * critical sections holding the fast-path RCU lock and one
2222 * holding the slowpath move_lock. Track the task who has the
2223 * move_lock for folio_memcg_unlock().
2224 */
2225 memcg->move_lock_task = current;
2226 memcg->move_lock_flags = flags;
2227}
2228
2229static void __folio_memcg_unlock(struct mem_cgroup *memcg)
2230{
2231 if (memcg && memcg->move_lock_task == current) {
2232 unsigned long flags = memcg->move_lock_flags;
2233
2234 memcg->move_lock_task = NULL;
2235 memcg->move_lock_flags = 0;
2236
2237 spin_unlock_irqrestore(&memcg->move_lock, flags);
2238 }
2239
2240 rcu_read_unlock();
2241}
2242
2243/**
2244 * folio_memcg_unlock - Release the binding between a folio and its memcg.
2245 * @folio: The folio.
2246 *
2247 * This releases the binding created by folio_memcg_lock(). This does
2248 * not change the accounting of this folio to its memcg, but it does
2249 * permit others to change it.
2250 */
2251void folio_memcg_unlock(struct folio *folio)
2252{
2253 __folio_memcg_unlock(folio_memcg(folio));
2254}
2255
2256struct memcg_stock_pcp {
2257 local_lock_t stock_lock;
2258 struct mem_cgroup *cached; /* this never be root cgroup */
2259 unsigned int nr_pages;
2260
2261#ifdef CONFIG_MEMCG_KMEM
2262 struct obj_cgroup *cached_objcg;
2263 struct pglist_data *cached_pgdat;
2264 unsigned int nr_bytes;
2265 int nr_slab_reclaimable_b;
2266 int nr_slab_unreclaimable_b;
2267#endif
2268
2269 struct work_struct work;
2270 unsigned long flags;
2271#define FLUSHING_CACHED_CHARGE 0
2272};
2273static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock) = {
2274 .stock_lock = INIT_LOCAL_LOCK(stock_lock),
2275};
2276static DEFINE_MUTEX(percpu_charge_mutex);
2277
2278#ifdef CONFIG_MEMCG_KMEM
2279static struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock);
2280static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
2281 struct mem_cgroup *root_memcg);
2282static void memcg_account_kmem(struct mem_cgroup *memcg, int nr_pages);
2283
2284#else
2285static inline struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock)
2286{
2287 return NULL;
2288}
2289static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
2290 struct mem_cgroup *root_memcg)
2291{
2292 return false;
2293}
2294static void memcg_account_kmem(struct mem_cgroup *memcg, int nr_pages)
2295{
2296}
2297#endif
2298
2299/**
2300 * consume_stock: Try to consume stocked charge on this cpu.
2301 * @memcg: memcg to consume from.
2302 * @nr_pages: how many pages to charge.
2303 *
2304 * The charges will only happen if @memcg matches the current cpu's memcg
2305 * stock, and at least @nr_pages are available in that stock. Failure to
2306 * service an allocation will refill the stock.
2307 *
2308 * returns true if successful, false otherwise.
2309 */
2310static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2311{
2312 struct memcg_stock_pcp *stock;
2313 unsigned long flags;
2314 bool ret = false;
2315
2316 if (nr_pages > MEMCG_CHARGE_BATCH)
2317 return ret;
2318
2319 local_lock_irqsave(&memcg_stock.stock_lock, flags);
2320
2321 stock = this_cpu_ptr(&memcg_stock);
2322 if (memcg == READ_ONCE(stock->cached) && stock->nr_pages >= nr_pages) {
2323 stock->nr_pages -= nr_pages;
2324 ret = true;
2325 }
2326
2327 local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
2328
2329 return ret;
2330}
2331
2332/*
2333 * Returns stocks cached in percpu and reset cached information.
2334 */
2335static void drain_stock(struct memcg_stock_pcp *stock)
2336{
2337 struct mem_cgroup *old = READ_ONCE(stock->cached);
2338
2339 if (!old)
2340 return;
2341
2342 if (stock->nr_pages) {
2343 page_counter_uncharge(&old->memory, stock->nr_pages);
2344 if (do_memsw_account())
2345 page_counter_uncharge(&old->memsw, stock->nr_pages);
2346 stock->nr_pages = 0;
2347 }
2348
2349 css_put(&old->css);
2350 WRITE_ONCE(stock->cached, NULL);
2351}
2352
2353static void drain_local_stock(struct work_struct *dummy)
2354{
2355 struct memcg_stock_pcp *stock;
2356 struct obj_cgroup *old = NULL;
2357 unsigned long flags;
2358
2359 /*
2360 * The only protection from cpu hotplug (memcg_hotplug_cpu_dead) vs.
2361 * drain_stock races is that we always operate on local CPU stock
2362 * here with IRQ disabled
2363 */
2364 local_lock_irqsave(&memcg_stock.stock_lock, flags);
2365
2366 stock = this_cpu_ptr(&memcg_stock);
2367 old = drain_obj_stock(stock);
2368 drain_stock(stock);
2369 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
2370
2371 local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
2372 if (old)
2373 obj_cgroup_put(old);
2374}
2375
2376/*
2377 * Cache charges(val) to local per_cpu area.
2378 * This will be consumed by consume_stock() function, later.
2379 */
2380static void __refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2381{
2382 struct memcg_stock_pcp *stock;
2383
2384 stock = this_cpu_ptr(&memcg_stock);
2385 if (READ_ONCE(stock->cached) != memcg) { /* reset if necessary */
2386 drain_stock(stock);
2387 css_get(&memcg->css);
2388 WRITE_ONCE(stock->cached, memcg);
2389 }
2390 stock->nr_pages += nr_pages;
2391
2392 if (stock->nr_pages > MEMCG_CHARGE_BATCH)
2393 drain_stock(stock);
2394}
2395
2396static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2397{
2398 unsigned long flags;
2399
2400 local_lock_irqsave(&memcg_stock.stock_lock, flags);
2401 __refill_stock(memcg, nr_pages);
2402 local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
2403}
2404
2405/*
2406 * Drains all per-CPU charge caches for given root_memcg resp. subtree
2407 * of the hierarchy under it.
2408 */
2409static void drain_all_stock(struct mem_cgroup *root_memcg)
2410{
2411 int cpu, curcpu;
2412
2413 /* If someone's already draining, avoid adding running more workers. */
2414 if (!mutex_trylock(&percpu_charge_mutex))
2415 return;
2416 /*
2417 * Notify other cpus that system-wide "drain" is running
2418 * We do not care about races with the cpu hotplug because cpu down
2419 * as well as workers from this path always operate on the local
2420 * per-cpu data. CPU up doesn't touch memcg_stock at all.
2421 */
2422 migrate_disable();
2423 curcpu = smp_processor_id();
2424 for_each_online_cpu(cpu) {
2425 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
2426 struct mem_cgroup *memcg;
2427 bool flush = false;
2428
2429 rcu_read_lock();
2430 memcg = READ_ONCE(stock->cached);
2431 if (memcg && stock->nr_pages &&
2432 mem_cgroup_is_descendant(memcg, root_memcg))
2433 flush = true;
2434 else if (obj_stock_flush_required(stock, root_memcg))
2435 flush = true;
2436 rcu_read_unlock();
2437
2438 if (flush &&
2439 !test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
2440 if (cpu == curcpu)
2441 drain_local_stock(&stock->work);
2442 else if (!cpu_is_isolated(cpu))
2443 schedule_work_on(cpu, &stock->work);
2444 }
2445 }
2446 migrate_enable();
2447 mutex_unlock(&percpu_charge_mutex);
2448}
2449
2450static int memcg_hotplug_cpu_dead(unsigned int cpu)
2451{
2452 struct memcg_stock_pcp *stock;
2453
2454 stock = &per_cpu(memcg_stock, cpu);
2455 drain_stock(stock);
2456
2457 return 0;
2458}
2459
2460static unsigned long reclaim_high(struct mem_cgroup *memcg,
2461 unsigned int nr_pages,
2462 gfp_t gfp_mask)
2463{
2464 unsigned long nr_reclaimed = 0;
2465
2466 do {
2467 unsigned long pflags;
2468
2469 if (page_counter_read(&memcg->memory) <=
2470 READ_ONCE(memcg->memory.high))
2471 continue;
2472
2473 memcg_memory_event(memcg, MEMCG_HIGH);
2474
2475 psi_memstall_enter(&pflags);
2476 nr_reclaimed += try_to_free_mem_cgroup_pages(memcg, nr_pages,
2477 gfp_mask,
2478 MEMCG_RECLAIM_MAY_SWAP);
2479 psi_memstall_leave(&pflags);
2480 } while ((memcg = parent_mem_cgroup(memcg)) &&
2481 !mem_cgroup_is_root(memcg));
2482
2483 return nr_reclaimed;
2484}
2485
2486static void high_work_func(struct work_struct *work)
2487{
2488 struct mem_cgroup *memcg;
2489
2490 memcg = container_of(work, struct mem_cgroup, high_work);
2491 reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL);
2492}
2493
2494/*
2495 * Clamp the maximum sleep time per allocation batch to 2 seconds. This is
2496 * enough to still cause a significant slowdown in most cases, while still
2497 * allowing diagnostics and tracing to proceed without becoming stuck.
2498 */
2499#define MEMCG_MAX_HIGH_DELAY_JIFFIES (2UL*HZ)
2500
2501/*
2502 * When calculating the delay, we use these either side of the exponentiation to
2503 * maintain precision and scale to a reasonable number of jiffies (see the table
2504 * below.
2505 *
2506 * - MEMCG_DELAY_PRECISION_SHIFT: Extra precision bits while translating the
2507 * overage ratio to a delay.
2508 * - MEMCG_DELAY_SCALING_SHIFT: The number of bits to scale down the
2509 * proposed penalty in order to reduce to a reasonable number of jiffies, and
2510 * to produce a reasonable delay curve.
2511 *
2512 * MEMCG_DELAY_SCALING_SHIFT just happens to be a number that produces a
2513 * reasonable delay curve compared to precision-adjusted overage, not
2514 * penalising heavily at first, but still making sure that growth beyond the
2515 * limit penalises misbehaviour cgroups by slowing them down exponentially. For
2516 * example, with a high of 100 megabytes:
2517 *
2518 * +-------+------------------------+
2519 * | usage | time to allocate in ms |
2520 * +-------+------------------------+
2521 * | 100M | 0 |
2522 * | 101M | 6 |
2523 * | 102M | 25 |
2524 * | 103M | 57 |
2525 * | 104M | 102 |
2526 * | 105M | 159 |
2527 * | 106M | 230 |
2528 * | 107M | 313 |
2529 * | 108M | 409 |
2530 * | 109M | 518 |
2531 * | 110M | 639 |
2532 * | 111M | 774 |
2533 * | 112M | 921 |
2534 * | 113M | 1081 |
2535 * | 114M | 1254 |
2536 * | 115M | 1439 |
2537 * | 116M | 1638 |
2538 * | 117M | 1849 |
2539 * | 118M | 2000 |
2540 * | 119M | 2000 |
2541 * | 120M | 2000 |
2542 * +-------+------------------------+
2543 */
2544 #define MEMCG_DELAY_PRECISION_SHIFT 20
2545 #define MEMCG_DELAY_SCALING_SHIFT 14
2546
2547static u64 calculate_overage(unsigned long usage, unsigned long high)
2548{
2549 u64 overage;
2550
2551 if (usage <= high)
2552 return 0;
2553
2554 /*
2555 * Prevent division by 0 in overage calculation by acting as if
2556 * it was a threshold of 1 page
2557 */
2558 high = max(high, 1UL);
2559
2560 overage = usage - high;
2561 overage <<= MEMCG_DELAY_PRECISION_SHIFT;
2562 return div64_u64(overage, high);
2563}
2564
2565static u64 mem_find_max_overage(struct mem_cgroup *memcg)
2566{
2567 u64 overage, max_overage = 0;
2568
2569 do {
2570 overage = calculate_overage(page_counter_read(&memcg->memory),
2571 READ_ONCE(memcg->memory.high));
2572 max_overage = max(overage, max_overage);
2573 } while ((memcg = parent_mem_cgroup(memcg)) &&
2574 !mem_cgroup_is_root(memcg));
2575
2576 return max_overage;
2577}
2578
2579static u64 swap_find_max_overage(struct mem_cgroup *memcg)
2580{
2581 u64 overage, max_overage = 0;
2582
2583 do {
2584 overage = calculate_overage(page_counter_read(&memcg->swap),
2585 READ_ONCE(memcg->swap.high));
2586 if (overage)
2587 memcg_memory_event(memcg, MEMCG_SWAP_HIGH);
2588 max_overage = max(overage, max_overage);
2589 } while ((memcg = parent_mem_cgroup(memcg)) &&
2590 !mem_cgroup_is_root(memcg));
2591
2592 return max_overage;
2593}
2594
2595/*
2596 * Get the number of jiffies that we should penalise a mischievous cgroup which
2597 * is exceeding its memory.high by checking both it and its ancestors.
2598 */
2599static unsigned long calculate_high_delay(struct mem_cgroup *memcg,
2600 unsigned int nr_pages,
2601 u64 max_overage)
2602{
2603 unsigned long penalty_jiffies;
2604
2605 if (!max_overage)
2606 return 0;
2607
2608 /*
2609 * We use overage compared to memory.high to calculate the number of
2610 * jiffies to sleep (penalty_jiffies). Ideally this value should be
2611 * fairly lenient on small overages, and increasingly harsh when the
2612 * memcg in question makes it clear that it has no intention of stopping
2613 * its crazy behaviour, so we exponentially increase the delay based on
2614 * overage amount.
2615 */
2616 penalty_jiffies = max_overage * max_overage * HZ;
2617 penalty_jiffies >>= MEMCG_DELAY_PRECISION_SHIFT;
2618 penalty_jiffies >>= MEMCG_DELAY_SCALING_SHIFT;
2619
2620 /*
2621 * Factor in the task's own contribution to the overage, such that four
2622 * N-sized allocations are throttled approximately the same as one
2623 * 4N-sized allocation.
2624 *
2625 * MEMCG_CHARGE_BATCH pages is nominal, so work out how much smaller or
2626 * larger the current charge patch is than that.
2627 */
2628 return penalty_jiffies * nr_pages / MEMCG_CHARGE_BATCH;
2629}
2630
2631/*
2632 * Reclaims memory over the high limit. Called directly from
2633 * try_charge() (context permitting), as well as from the userland
2634 * return path where reclaim is always able to block.
2635 */
2636void mem_cgroup_handle_over_high(gfp_t gfp_mask)
2637{
2638 unsigned long penalty_jiffies;
2639 unsigned long pflags;
2640 unsigned long nr_reclaimed;
2641 unsigned int nr_pages = current->memcg_nr_pages_over_high;
2642 int nr_retries = MAX_RECLAIM_RETRIES;
2643 struct mem_cgroup *memcg;
2644 bool in_retry = false;
2645
2646 if (likely(!nr_pages))
2647 return;
2648
2649 memcg = get_mem_cgroup_from_mm(current->mm);
2650 current->memcg_nr_pages_over_high = 0;
2651
2652retry_reclaim:
2653 /*
2654 * Bail if the task is already exiting. Unlike memory.max,
2655 * memory.high enforcement isn't as strict, and there is no
2656 * OOM killer involved, which means the excess could already
2657 * be much bigger (and still growing) than it could for
2658 * memory.max; the dying task could get stuck in fruitless
2659 * reclaim for a long time, which isn't desirable.
2660 */
2661 if (task_is_dying())
2662 goto out;
2663
2664 /*
2665 * The allocating task should reclaim at least the batch size, but for
2666 * subsequent retries we only want to do what's necessary to prevent oom
2667 * or breaching resource isolation.
2668 *
2669 * This is distinct from memory.max or page allocator behaviour because
2670 * memory.high is currently batched, whereas memory.max and the page
2671 * allocator run every time an allocation is made.
2672 */
2673 nr_reclaimed = reclaim_high(memcg,
2674 in_retry ? SWAP_CLUSTER_MAX : nr_pages,
2675 gfp_mask);
2676
2677 /*
2678 * memory.high is breached and reclaim is unable to keep up. Throttle
2679 * allocators proactively to slow down excessive growth.
2680 */
2681 penalty_jiffies = calculate_high_delay(memcg, nr_pages,
2682 mem_find_max_overage(memcg));
2683
2684 penalty_jiffies += calculate_high_delay(memcg, nr_pages,
2685 swap_find_max_overage(memcg));
2686
2687 /*
2688 * Clamp the max delay per usermode return so as to still keep the
2689 * application moving forwards and also permit diagnostics, albeit
2690 * extremely slowly.
2691 */
2692 penalty_jiffies = min(penalty_jiffies, MEMCG_MAX_HIGH_DELAY_JIFFIES);
2693
2694 /*
2695 * Don't sleep if the amount of jiffies this memcg owes us is so low
2696 * that it's not even worth doing, in an attempt to be nice to those who
2697 * go only a small amount over their memory.high value and maybe haven't
2698 * been aggressively reclaimed enough yet.
2699 */
2700 if (penalty_jiffies <= HZ / 100)
2701 goto out;
2702
2703 /*
2704 * If reclaim is making forward progress but we're still over
2705 * memory.high, we want to encourage that rather than doing allocator
2706 * throttling.
2707 */
2708 if (nr_reclaimed || nr_retries--) {
2709 in_retry = true;
2710 goto retry_reclaim;
2711 }
2712
2713 /*
2714 * Reclaim didn't manage to push usage below the limit, slow
2715 * this allocating task down.
2716 *
2717 * If we exit early, we're guaranteed to die (since
2718 * schedule_timeout_killable sets TASK_KILLABLE). This means we don't
2719 * need to account for any ill-begotten jiffies to pay them off later.
2720 */
2721 psi_memstall_enter(&pflags);
2722 schedule_timeout_killable(penalty_jiffies);
2723 psi_memstall_leave(&pflags);
2724
2725out:
2726 css_put(&memcg->css);
2727}
2728
2729static int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask,
2730 unsigned int nr_pages)
2731{
2732 unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages);
2733 int nr_retries = MAX_RECLAIM_RETRIES;
2734 struct mem_cgroup *mem_over_limit;
2735 struct page_counter *counter;
2736 unsigned long nr_reclaimed;
2737 bool passed_oom = false;
2738 unsigned int reclaim_options = MEMCG_RECLAIM_MAY_SWAP;
2739 bool drained = false;
2740 bool raised_max_event = false;
2741 unsigned long pflags;
2742
2743retry:
2744 if (consume_stock(memcg, nr_pages))
2745 return 0;
2746
2747 if (!do_memsw_account() ||
2748 page_counter_try_charge(&memcg->memsw, batch, &counter)) {
2749 if (page_counter_try_charge(&memcg->memory, batch, &counter))
2750 goto done_restock;
2751 if (do_memsw_account())
2752 page_counter_uncharge(&memcg->memsw, batch);
2753 mem_over_limit = mem_cgroup_from_counter(counter, memory);
2754 } else {
2755 mem_over_limit = mem_cgroup_from_counter(counter, memsw);
2756 reclaim_options &= ~MEMCG_RECLAIM_MAY_SWAP;
2757 }
2758
2759 if (batch > nr_pages) {
2760 batch = nr_pages;
2761 goto retry;
2762 }
2763
2764 /*
2765 * Prevent unbounded recursion when reclaim operations need to
2766 * allocate memory. This might exceed the limits temporarily,
2767 * but we prefer facilitating memory reclaim and getting back
2768 * under the limit over triggering OOM kills in these cases.
2769 */
2770 if (unlikely(current->flags & PF_MEMALLOC))
2771 goto force;
2772
2773 if (unlikely(task_in_memcg_oom(current)))
2774 goto nomem;
2775
2776 if (!gfpflags_allow_blocking(gfp_mask))
2777 goto nomem;
2778
2779 memcg_memory_event(mem_over_limit, MEMCG_MAX);
2780 raised_max_event = true;
2781
2782 psi_memstall_enter(&pflags);
2783 nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
2784 gfp_mask, reclaim_options);
2785 psi_memstall_leave(&pflags);
2786
2787 if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
2788 goto retry;
2789
2790 if (!drained) {
2791 drain_all_stock(mem_over_limit);
2792 drained = true;
2793 goto retry;
2794 }
2795
2796 if (gfp_mask & __GFP_NORETRY)
2797 goto nomem;
2798 /*
2799 * Even though the limit is exceeded at this point, reclaim
2800 * may have been able to free some pages. Retry the charge
2801 * before killing the task.
2802 *
2803 * Only for regular pages, though: huge pages are rather
2804 * unlikely to succeed so close to the limit, and we fall back
2805 * to regular pages anyway in case of failure.
2806 */
2807 if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))
2808 goto retry;
2809 /*
2810 * At task move, charge accounts can be doubly counted. So, it's
2811 * better to wait until the end of task_move if something is going on.
2812 */
2813 if (mem_cgroup_wait_acct_move(mem_over_limit))
2814 goto retry;
2815
2816 if (nr_retries--)
2817 goto retry;
2818
2819 if (gfp_mask & __GFP_RETRY_MAYFAIL)
2820 goto nomem;
2821
2822 /* Avoid endless loop for tasks bypassed by the oom killer */
2823 if (passed_oom && task_is_dying())
2824 goto nomem;
2825
2826 /*
2827 * keep retrying as long as the memcg oom killer is able to make
2828 * a forward progress or bypass the charge if the oom killer
2829 * couldn't make any progress.
2830 */
2831 if (mem_cgroup_oom(mem_over_limit, gfp_mask,
2832 get_order(nr_pages * PAGE_SIZE))) {
2833 passed_oom = true;
2834 nr_retries = MAX_RECLAIM_RETRIES;
2835 goto retry;
2836 }
2837nomem:
2838 /*
2839 * Memcg doesn't have a dedicated reserve for atomic
2840 * allocations. But like the global atomic pool, we need to
2841 * put the burden of reclaim on regular allocation requests
2842 * and let these go through as privileged allocations.
2843 */
2844 if (!(gfp_mask & (__GFP_NOFAIL | __GFP_HIGH)))
2845 return -ENOMEM;
2846force:
2847 /*
2848 * If the allocation has to be enforced, don't forget to raise
2849 * a MEMCG_MAX event.
2850 */
2851 if (!raised_max_event)
2852 memcg_memory_event(mem_over_limit, MEMCG_MAX);
2853
2854 /*
2855 * The allocation either can't fail or will lead to more memory
2856 * being freed very soon. Allow memory usage go over the limit
2857 * temporarily by force charging it.
2858 */
2859 page_counter_charge(&memcg->memory, nr_pages);
2860 if (do_memsw_account())
2861 page_counter_charge(&memcg->memsw, nr_pages);
2862
2863 return 0;
2864
2865done_restock:
2866 if (batch > nr_pages)
2867 refill_stock(memcg, batch - nr_pages);
2868
2869 /*
2870 * If the hierarchy is above the normal consumption range, schedule
2871 * reclaim on returning to userland. We can perform reclaim here
2872 * if __GFP_RECLAIM but let's always punt for simplicity and so that
2873 * GFP_KERNEL can consistently be used during reclaim. @memcg is
2874 * not recorded as it most likely matches current's and won't
2875 * change in the meantime. As high limit is checked again before
2876 * reclaim, the cost of mismatch is negligible.
2877 */
2878 do {
2879 bool mem_high, swap_high;
2880
2881 mem_high = page_counter_read(&memcg->memory) >
2882 READ_ONCE(memcg->memory.high);
2883 swap_high = page_counter_read(&memcg->swap) >
2884 READ_ONCE(memcg->swap.high);
2885
2886 /* Don't bother a random interrupted task */
2887 if (!in_task()) {
2888 if (mem_high) {
2889 schedule_work(&memcg->high_work);
2890 break;
2891 }
2892 continue;
2893 }
2894
2895 if (mem_high || swap_high) {
2896 /*
2897 * The allocating tasks in this cgroup will need to do
2898 * reclaim or be throttled to prevent further growth
2899 * of the memory or swap footprints.
2900 *
2901 * Target some best-effort fairness between the tasks,
2902 * and distribute reclaim work and delay penalties
2903 * based on how much each task is actually allocating.
2904 */
2905 current->memcg_nr_pages_over_high += batch;
2906 set_notify_resume(current);
2907 break;
2908 }
2909 } while ((memcg = parent_mem_cgroup(memcg)));
2910
2911 /*
2912 * Reclaim is set up above to be called from the userland
2913 * return path. But also attempt synchronous reclaim to avoid
2914 * excessive overrun while the task is still inside the
2915 * kernel. If this is successful, the return path will see it
2916 * when it rechecks the overage and simply bail out.
2917 */
2918 if (current->memcg_nr_pages_over_high > MEMCG_CHARGE_BATCH &&
2919 !(current->flags & PF_MEMALLOC) &&
2920 gfpflags_allow_blocking(gfp_mask))
2921 mem_cgroup_handle_over_high(gfp_mask);
2922 return 0;
2923}
2924
2925static inline int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
2926 unsigned int nr_pages)
2927{
2928 if (mem_cgroup_is_root(memcg))
2929 return 0;
2930
2931 return try_charge_memcg(memcg, gfp_mask, nr_pages);
2932}
2933
2934/**
2935 * mem_cgroup_cancel_charge() - cancel an uncommitted try_charge() call.
2936 * @memcg: memcg previously charged.
2937 * @nr_pages: number of pages previously charged.
2938 */
2939void mem_cgroup_cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
2940{
2941 if (mem_cgroup_is_root(memcg))
2942 return;
2943
2944 page_counter_uncharge(&memcg->memory, nr_pages);
2945 if (do_memsw_account())
2946 page_counter_uncharge(&memcg->memsw, nr_pages);
2947}
2948
2949static void commit_charge(struct folio *folio, struct mem_cgroup *memcg)
2950{
2951 VM_BUG_ON_FOLIO(folio_memcg(folio), folio);
2952 /*
2953 * Any of the following ensures page's memcg stability:
2954 *
2955 * - the page lock
2956 * - LRU isolation
2957 * - folio_memcg_lock()
2958 * - exclusive reference
2959 * - mem_cgroup_trylock_pages()
2960 */
2961 folio->memcg_data = (unsigned long)memcg;
2962}
2963
2964/**
2965 * mem_cgroup_commit_charge - commit a previously successful try_charge().
2966 * @folio: folio to commit the charge to.
2967 * @memcg: memcg previously charged.
2968 */
2969void mem_cgroup_commit_charge(struct folio *folio, struct mem_cgroup *memcg)
2970{
2971 css_get(&memcg->css);
2972 commit_charge(folio, memcg);
2973
2974 local_irq_disable();
2975 mem_cgroup_charge_statistics(memcg, folio_nr_pages(folio));
2976 memcg_check_events(memcg, folio_nid(folio));
2977 local_irq_enable();
2978}
2979
2980#ifdef CONFIG_MEMCG_KMEM
2981/*
2982 * The allocated objcg pointers array is not accounted directly.
2983 * Moreover, it should not come from DMA buffer and is not readily
2984 * reclaimable. So those GFP bits should be masked off.
2985 */
2986#define OBJCGS_CLEAR_MASK (__GFP_DMA | __GFP_RECLAIMABLE | \
2987 __GFP_ACCOUNT | __GFP_NOFAIL)
2988
2989/*
2990 * mod_objcg_mlstate() may be called with irq enabled, so
2991 * mod_memcg_lruvec_state() should be used.
2992 */
2993static inline void mod_objcg_mlstate(struct obj_cgroup *objcg,
2994 struct pglist_data *pgdat,
2995 enum node_stat_item idx, int nr)
2996{
2997 struct mem_cgroup *memcg;
2998 struct lruvec *lruvec;
2999
3000 rcu_read_lock();
3001 memcg = obj_cgroup_memcg(objcg);
3002 lruvec = mem_cgroup_lruvec(memcg, pgdat);
3003 mod_memcg_lruvec_state(lruvec, idx, nr);
3004 rcu_read_unlock();
3005}
3006
3007int memcg_alloc_slab_cgroups(struct slab *slab, struct kmem_cache *s,
3008 gfp_t gfp, bool new_slab)
3009{
3010 unsigned int objects = objs_per_slab(s, slab);
3011 unsigned long memcg_data;
3012 void *vec;
3013
3014 gfp &= ~OBJCGS_CLEAR_MASK;
3015 vec = kcalloc_node(objects, sizeof(struct obj_cgroup *), gfp,
3016 slab_nid(slab));
3017 if (!vec)
3018 return -ENOMEM;
3019
3020 memcg_data = (unsigned long) vec | MEMCG_DATA_OBJCGS;
3021 if (new_slab) {
3022 /*
3023 * If the slab is brand new and nobody can yet access its
3024 * memcg_data, no synchronization is required and memcg_data can
3025 * be simply assigned.
3026 */
3027 slab->memcg_data = memcg_data;
3028 } else if (cmpxchg(&slab->memcg_data, 0, memcg_data)) {
3029 /*
3030 * If the slab is already in use, somebody can allocate and
3031 * assign obj_cgroups in parallel. In this case the existing
3032 * objcg vector should be reused.
3033 */
3034 kfree(vec);
3035 return 0;
3036 }
3037
3038 kmemleak_not_leak(vec);
3039 return 0;
3040}
3041
3042static __always_inline
3043struct mem_cgroup *mem_cgroup_from_obj_folio(struct folio *folio, void *p)
3044{
3045 /*
3046 * Slab objects are accounted individually, not per-page.
3047 * Memcg membership data for each individual object is saved in
3048 * slab->memcg_data.
3049 */
3050 if (folio_test_slab(folio)) {
3051 struct obj_cgroup **objcgs;
3052 struct slab *slab;
3053 unsigned int off;
3054
3055 slab = folio_slab(folio);
3056 objcgs = slab_objcgs(slab);
3057 if (!objcgs)
3058 return NULL;
3059
3060 off = obj_to_index(slab->slab_cache, slab, p);
3061 if (objcgs[off])
3062 return obj_cgroup_memcg(objcgs[off]);
3063
3064 return NULL;
3065 }
3066
3067 /*
3068 * folio_memcg_check() is used here, because in theory we can encounter
3069 * a folio where the slab flag has been cleared already, but
3070 * slab->memcg_data has not been freed yet
3071 * folio_memcg_check() will guarantee that a proper memory
3072 * cgroup pointer or NULL will be returned.
3073 */
3074 return folio_memcg_check(folio);
3075}
3076
3077/*
3078 * Returns a pointer to the memory cgroup to which the kernel object is charged.
3079 *
3080 * A passed kernel object can be a slab object, vmalloc object or a generic
3081 * kernel page, so different mechanisms for getting the memory cgroup pointer
3082 * should be used.
3083 *
3084 * In certain cases (e.g. kernel stacks or large kmallocs with SLUB) the caller
3085 * can not know for sure how the kernel object is implemented.
3086 * mem_cgroup_from_obj() can be safely used in such cases.
3087 *
3088 * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(),
3089 * cgroup_mutex, etc.
3090 */
3091struct mem_cgroup *mem_cgroup_from_obj(void *p)
3092{
3093 struct folio *folio;
3094
3095 if (mem_cgroup_disabled())
3096 return NULL;
3097
3098 if (unlikely(is_vmalloc_addr(p)))
3099 folio = page_folio(vmalloc_to_page(p));
3100 else
3101 folio = virt_to_folio(p);
3102
3103 return mem_cgroup_from_obj_folio(folio, p);
3104}
3105
3106/*
3107 * Returns a pointer to the memory cgroup to which the kernel object is charged.
3108 * Similar to mem_cgroup_from_obj(), but faster and not suitable for objects,
3109 * allocated using vmalloc().
3110 *
3111 * A passed kernel object must be a slab object or a generic kernel page.
3112 *
3113 * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(),
3114 * cgroup_mutex, etc.
3115 */
3116struct mem_cgroup *mem_cgroup_from_slab_obj(void *p)
3117{
3118 if (mem_cgroup_disabled())
3119 return NULL;
3120
3121 return mem_cgroup_from_obj_folio(virt_to_folio(p), p);
3122}
3123
3124static struct obj_cgroup *__get_obj_cgroup_from_memcg(struct mem_cgroup *memcg)
3125{
3126 struct obj_cgroup *objcg = NULL;
3127
3128 for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) {
3129 objcg = rcu_dereference(memcg->objcg);
3130 if (likely(objcg && obj_cgroup_tryget(objcg)))
3131 break;
3132 objcg = NULL;
3133 }
3134 return objcg;
3135}
3136
3137static struct obj_cgroup *current_objcg_update(void)
3138{
3139 struct mem_cgroup *memcg;
3140 struct obj_cgroup *old, *objcg = NULL;
3141
3142 do {
3143 /* Atomically drop the update bit. */
3144 old = xchg(¤t->objcg, NULL);
3145 if (old) {
3146 old = (struct obj_cgroup *)
3147 ((unsigned long)old & ~CURRENT_OBJCG_UPDATE_FLAG);
3148 if (old)
3149 obj_cgroup_put(old);
3150
3151 old = NULL;
3152 }
3153
3154 /* If new objcg is NULL, no reason for the second atomic update. */
3155 if (!current->mm || (current->flags & PF_KTHREAD))
3156 return NULL;
3157
3158 /*
3159 * Release the objcg pointer from the previous iteration,
3160 * if try_cmpxcg() below fails.
3161 */
3162 if (unlikely(objcg)) {
3163 obj_cgroup_put(objcg);
3164 objcg = NULL;
3165 }
3166
3167 /*
3168 * Obtain the new objcg pointer. The current task can be
3169 * asynchronously moved to another memcg and the previous
3170 * memcg can be offlined. So let's get the memcg pointer
3171 * and try get a reference to objcg under a rcu read lock.
3172 */
3173
3174 rcu_read_lock();
3175 memcg = mem_cgroup_from_task(current);
3176 objcg = __get_obj_cgroup_from_memcg(memcg);
3177 rcu_read_unlock();
3178
3179 /*
3180 * Try set up a new objcg pointer atomically. If it
3181 * fails, it means the update flag was set concurrently, so
3182 * the whole procedure should be repeated.
3183 */
3184 } while (!try_cmpxchg(¤t->objcg, &old, objcg));
3185
3186 return objcg;
3187}
3188
3189__always_inline struct obj_cgroup *current_obj_cgroup(void)
3190{
3191 struct mem_cgroup *memcg;
3192 struct obj_cgroup *objcg;
3193
3194 if (in_task()) {
3195 memcg = current->active_memcg;
3196 if (unlikely(memcg))
3197 goto from_memcg;
3198
3199 objcg = READ_ONCE(current->objcg);
3200 if (unlikely((unsigned long)objcg & CURRENT_OBJCG_UPDATE_FLAG))
3201 objcg = current_objcg_update();
3202 /*
3203 * Objcg reference is kept by the task, so it's safe
3204 * to use the objcg by the current task.
3205 */
3206 return objcg;
3207 }
3208
3209 memcg = this_cpu_read(int_active_memcg);
3210 if (unlikely(memcg))
3211 goto from_memcg;
3212
3213 return NULL;
3214
3215from_memcg:
3216 objcg = NULL;
3217 for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) {
3218 /*
3219 * Memcg pointer is protected by scope (see set_active_memcg())
3220 * and is pinning the corresponding objcg, so objcg can't go
3221 * away and can be used within the scope without any additional
3222 * protection.
3223 */
3224 objcg = rcu_dereference_check(memcg->objcg, 1);
3225 if (likely(objcg))
3226 break;
3227 }
3228
3229 return objcg;
3230}
3231
3232struct obj_cgroup *get_obj_cgroup_from_folio(struct folio *folio)
3233{
3234 struct obj_cgroup *objcg;
3235
3236 if (!memcg_kmem_online())
3237 return NULL;
3238
3239 if (folio_memcg_kmem(folio)) {
3240 objcg = __folio_objcg(folio);
3241 obj_cgroup_get(objcg);
3242 } else {
3243 struct mem_cgroup *memcg;
3244
3245 rcu_read_lock();
3246 memcg = __folio_memcg(folio);
3247 if (memcg)
3248 objcg = __get_obj_cgroup_from_memcg(memcg);
3249 else
3250 objcg = NULL;
3251 rcu_read_unlock();
3252 }
3253 return objcg;
3254}
3255
3256static void memcg_account_kmem(struct mem_cgroup *memcg, int nr_pages)
3257{
3258 mod_memcg_state(memcg, MEMCG_KMEM, nr_pages);
3259 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
3260 if (nr_pages > 0)
3261 page_counter_charge(&memcg->kmem, nr_pages);
3262 else
3263 page_counter_uncharge(&memcg->kmem, -nr_pages);
3264 }
3265}
3266
3267
3268/*
3269 * obj_cgroup_uncharge_pages: uncharge a number of kernel pages from a objcg
3270 * @objcg: object cgroup to uncharge
3271 * @nr_pages: number of pages to uncharge
3272 */
3273static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg,
3274 unsigned int nr_pages)
3275{
3276 struct mem_cgroup *memcg;
3277
3278 memcg = get_mem_cgroup_from_objcg(objcg);
3279
3280 memcg_account_kmem(memcg, -nr_pages);
3281 refill_stock(memcg, nr_pages);
3282
3283 css_put(&memcg->css);
3284}
3285
3286/*
3287 * obj_cgroup_charge_pages: charge a number of kernel pages to a objcg
3288 * @objcg: object cgroup to charge
3289 * @gfp: reclaim mode
3290 * @nr_pages: number of pages to charge
3291 *
3292 * Returns 0 on success, an error code on failure.
3293 */
3294static int obj_cgroup_charge_pages(struct obj_cgroup *objcg, gfp_t gfp,
3295 unsigned int nr_pages)
3296{
3297 struct mem_cgroup *memcg;
3298 int ret;
3299
3300 memcg = get_mem_cgroup_from_objcg(objcg);
3301
3302 ret = try_charge_memcg(memcg, gfp, nr_pages);
3303 if (ret)
3304 goto out;
3305
3306 memcg_account_kmem(memcg, nr_pages);
3307out:
3308 css_put(&memcg->css);
3309
3310 return ret;
3311}
3312
3313/**
3314 * __memcg_kmem_charge_page: charge a kmem page to the current memory cgroup
3315 * @page: page to charge
3316 * @gfp: reclaim mode
3317 * @order: allocation order
3318 *
3319 * Returns 0 on success, an error code on failure.
3320 */
3321int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order)
3322{
3323 struct obj_cgroup *objcg;
3324 int ret = 0;
3325
3326 objcg = current_obj_cgroup();
3327 if (objcg) {
3328 ret = obj_cgroup_charge_pages(objcg, gfp, 1 << order);
3329 if (!ret) {
3330 obj_cgroup_get(objcg);
3331 page->memcg_data = (unsigned long)objcg |
3332 MEMCG_DATA_KMEM;
3333 return 0;
3334 }
3335 }
3336 return ret;
3337}
3338
3339/**
3340 * __memcg_kmem_uncharge_page: uncharge a kmem page
3341 * @page: page to uncharge
3342 * @order: allocation order
3343 */
3344void __memcg_kmem_uncharge_page(struct page *page, int order)
3345{
3346 struct folio *folio = page_folio(page);
3347 struct obj_cgroup *objcg;
3348 unsigned int nr_pages = 1 << order;
3349
3350 if (!folio_memcg_kmem(folio))
3351 return;
3352
3353 objcg = __folio_objcg(folio);
3354 obj_cgroup_uncharge_pages(objcg, nr_pages);
3355 folio->memcg_data = 0;
3356 obj_cgroup_put(objcg);
3357}
3358
3359void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
3360 enum node_stat_item idx, int nr)
3361{
3362 struct memcg_stock_pcp *stock;
3363 struct obj_cgroup *old = NULL;
3364 unsigned long flags;
3365 int *bytes;
3366
3367 local_lock_irqsave(&memcg_stock.stock_lock, flags);
3368 stock = this_cpu_ptr(&memcg_stock);
3369
3370 /*
3371 * Save vmstat data in stock and skip vmstat array update unless
3372 * accumulating over a page of vmstat data or when pgdat or idx
3373 * changes.
3374 */
3375 if (READ_ONCE(stock->cached_objcg) != objcg) {
3376 old = drain_obj_stock(stock);
3377 obj_cgroup_get(objcg);
3378 stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
3379 ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
3380 WRITE_ONCE(stock->cached_objcg, objcg);
3381 stock->cached_pgdat = pgdat;
3382 } else if (stock->cached_pgdat != pgdat) {
3383 /* Flush the existing cached vmstat data */
3384 struct pglist_data *oldpg = stock->cached_pgdat;
3385
3386 if (stock->nr_slab_reclaimable_b) {
3387 mod_objcg_mlstate(objcg, oldpg, NR_SLAB_RECLAIMABLE_B,
3388 stock->nr_slab_reclaimable_b);
3389 stock->nr_slab_reclaimable_b = 0;
3390 }
3391 if (stock->nr_slab_unreclaimable_b) {
3392 mod_objcg_mlstate(objcg, oldpg, NR_SLAB_UNRECLAIMABLE_B,
3393 stock->nr_slab_unreclaimable_b);
3394 stock->nr_slab_unreclaimable_b = 0;
3395 }
3396 stock->cached_pgdat = pgdat;
3397 }
3398
3399 bytes = (idx == NR_SLAB_RECLAIMABLE_B) ? &stock->nr_slab_reclaimable_b
3400 : &stock->nr_slab_unreclaimable_b;
3401 /*
3402 * Even for large object >= PAGE_SIZE, the vmstat data will still be
3403 * cached locally at least once before pushing it out.
3404 */
3405 if (!*bytes) {
3406 *bytes = nr;
3407 nr = 0;
3408 } else {
3409 *bytes += nr;
3410 if (abs(*bytes) > PAGE_SIZE) {
3411 nr = *bytes;
3412 *bytes = 0;
3413 } else {
3414 nr = 0;
3415 }
3416 }
3417 if (nr)
3418 mod_objcg_mlstate(objcg, pgdat, idx, nr);
3419
3420 local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
3421 if (old)
3422 obj_cgroup_put(old);
3423}
3424
3425static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
3426{
3427 struct memcg_stock_pcp *stock;
3428 unsigned long flags;
3429 bool ret = false;
3430
3431 local_lock_irqsave(&memcg_stock.stock_lock, flags);
3432
3433 stock = this_cpu_ptr(&memcg_stock);
3434 if (objcg == READ_ONCE(stock->cached_objcg) && stock->nr_bytes >= nr_bytes) {
3435 stock->nr_bytes -= nr_bytes;
3436 ret = true;
3437 }
3438
3439 local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
3440
3441 return ret;
3442}
3443
3444static struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock)
3445{
3446 struct obj_cgroup *old = READ_ONCE(stock->cached_objcg);
3447
3448 if (!old)
3449 return NULL;
3450
3451 if (stock->nr_bytes) {
3452 unsigned int nr_pages = stock->nr_bytes >> PAGE_SHIFT;
3453 unsigned int nr_bytes = stock->nr_bytes & (PAGE_SIZE - 1);
3454
3455 if (nr_pages) {
3456 struct mem_cgroup *memcg;
3457
3458 memcg = get_mem_cgroup_from_objcg(old);
3459
3460 memcg_account_kmem(memcg, -nr_pages);
3461 __refill_stock(memcg, nr_pages);
3462
3463 css_put(&memcg->css);
3464 }
3465
3466 /*
3467 * The leftover is flushed to the centralized per-memcg value.
3468 * On the next attempt to refill obj stock it will be moved
3469 * to a per-cpu stock (probably, on an other CPU), see
3470 * refill_obj_stock().
3471 *
3472 * How often it's flushed is a trade-off between the memory
3473 * limit enforcement accuracy and potential CPU contention,
3474 * so it might be changed in the future.
3475 */
3476 atomic_add(nr_bytes, &old->nr_charged_bytes);
3477 stock->nr_bytes = 0;
3478 }
3479
3480 /*
3481 * Flush the vmstat data in current stock
3482 */
3483 if (stock->nr_slab_reclaimable_b || stock->nr_slab_unreclaimable_b) {
3484 if (stock->nr_slab_reclaimable_b) {
3485 mod_objcg_mlstate(old, stock->cached_pgdat,
3486 NR_SLAB_RECLAIMABLE_B,
3487 stock->nr_slab_reclaimable_b);
3488 stock->nr_slab_reclaimable_b = 0;
3489 }
3490 if (stock->nr_slab_unreclaimable_b) {
3491 mod_objcg_mlstate(old, stock->cached_pgdat,
3492 NR_SLAB_UNRECLAIMABLE_B,
3493 stock->nr_slab_unreclaimable_b);
3494 stock->nr_slab_unreclaimable_b = 0;
3495 }
3496 stock->cached_pgdat = NULL;
3497 }
3498
3499 WRITE_ONCE(stock->cached_objcg, NULL);
3500 /*
3501 * The `old' objects needs to be released by the caller via
3502 * obj_cgroup_put() outside of memcg_stock_pcp::stock_lock.
3503 */
3504 return old;
3505}
3506
3507static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
3508 struct mem_cgroup *root_memcg)
3509{
3510 struct obj_cgroup *objcg = READ_ONCE(stock->cached_objcg);
3511 struct mem_cgroup *memcg;
3512
3513 if (objcg) {
3514 memcg = obj_cgroup_memcg(objcg);
3515 if (memcg && mem_cgroup_is_descendant(memcg, root_memcg))
3516 return true;
3517 }
3518
3519 return false;
3520}
3521
3522static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
3523 bool allow_uncharge)
3524{
3525 struct memcg_stock_pcp *stock;
3526 struct obj_cgroup *old = NULL;
3527 unsigned long flags;
3528 unsigned int nr_pages = 0;
3529
3530 local_lock_irqsave(&memcg_stock.stock_lock, flags);
3531
3532 stock = this_cpu_ptr(&memcg_stock);
3533 if (READ_ONCE(stock->cached_objcg) != objcg) { /* reset if necessary */
3534 old = drain_obj_stock(stock);
3535 obj_cgroup_get(objcg);
3536 WRITE_ONCE(stock->cached_objcg, objcg);
3537 stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
3538 ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
3539 allow_uncharge = true; /* Allow uncharge when objcg changes */
3540 }
3541 stock->nr_bytes += nr_bytes;
3542
3543 if (allow_uncharge && (stock->nr_bytes > PAGE_SIZE)) {
3544 nr_pages = stock->nr_bytes >> PAGE_SHIFT;
3545 stock->nr_bytes &= (PAGE_SIZE - 1);
3546 }
3547
3548 local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
3549 if (old)
3550 obj_cgroup_put(old);
3551
3552 if (nr_pages)
3553 obj_cgroup_uncharge_pages(objcg, nr_pages);
3554}
3555
3556int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size)
3557{
3558 unsigned int nr_pages, nr_bytes;
3559 int ret;
3560
3561 if (consume_obj_stock(objcg, size))
3562 return 0;
3563
3564 /*
3565 * In theory, objcg->nr_charged_bytes can have enough
3566 * pre-charged bytes to satisfy the allocation. However,
3567 * flushing objcg->nr_charged_bytes requires two atomic
3568 * operations, and objcg->nr_charged_bytes can't be big.
3569 * The shared objcg->nr_charged_bytes can also become a
3570 * performance bottleneck if all tasks of the same memcg are
3571 * trying to update it. So it's better to ignore it and try
3572 * grab some new pages. The stock's nr_bytes will be flushed to
3573 * objcg->nr_charged_bytes later on when objcg changes.
3574 *
3575 * The stock's nr_bytes may contain enough pre-charged bytes
3576 * to allow one less page from being charged, but we can't rely
3577 * on the pre-charged bytes not being changed outside of
3578 * consume_obj_stock() or refill_obj_stock(). So ignore those
3579 * pre-charged bytes as well when charging pages. To avoid a
3580 * page uncharge right after a page charge, we set the
3581 * allow_uncharge flag to false when calling refill_obj_stock()
3582 * to temporarily allow the pre-charged bytes to exceed the page
3583 * size limit. The maximum reachable value of the pre-charged
3584 * bytes is (sizeof(object) + PAGE_SIZE - 2) if there is no data
3585 * race.
3586 */
3587 nr_pages = size >> PAGE_SHIFT;
3588 nr_bytes = size & (PAGE_SIZE - 1);
3589
3590 if (nr_bytes)
3591 nr_pages += 1;
3592
3593 ret = obj_cgroup_charge_pages(objcg, gfp, nr_pages);
3594 if (!ret && nr_bytes)
3595 refill_obj_stock(objcg, PAGE_SIZE - nr_bytes, false);
3596
3597 return ret;
3598}
3599
3600void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size)
3601{
3602 refill_obj_stock(objcg, size, true);
3603}
3604
3605#endif /* CONFIG_MEMCG_KMEM */
3606
3607/*
3608 * Because page_memcg(head) is not set on tails, set it now.
3609 */
3610void split_page_memcg(struct page *head, int old_order, int new_order)
3611{
3612 struct folio *folio = page_folio(head);
3613 struct mem_cgroup *memcg = folio_memcg(folio);
3614 int i;
3615 unsigned int old_nr = 1 << old_order;
3616 unsigned int new_nr = 1 << new_order;
3617
3618 if (mem_cgroup_disabled() || !memcg)
3619 return;
3620
3621 for (i = new_nr; i < old_nr; i += new_nr)
3622 folio_page(folio, i)->memcg_data = folio->memcg_data;
3623
3624 if (folio_memcg_kmem(folio))
3625 obj_cgroup_get_many(__folio_objcg(folio), old_nr / new_nr - 1);
3626 else
3627 css_get_many(&memcg->css, old_nr / new_nr - 1);
3628}
3629
3630#ifdef CONFIG_SWAP
3631/**
3632 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
3633 * @entry: swap entry to be moved
3634 * @from: mem_cgroup which the entry is moved from
3635 * @to: mem_cgroup which the entry is moved to
3636 *
3637 * It succeeds only when the swap_cgroup's record for this entry is the same
3638 * as the mem_cgroup's id of @from.
3639 *
3640 * Returns 0 on success, -EINVAL on failure.
3641 *
3642 * The caller must have charged to @to, IOW, called page_counter_charge() about
3643 * both res and memsw, and called css_get().
3644 */
3645static int mem_cgroup_move_swap_account(swp_entry_t entry,
3646 struct mem_cgroup *from, struct mem_cgroup *to)
3647{
3648 unsigned short old_id, new_id;
3649
3650 old_id = mem_cgroup_id(from);
3651 new_id = mem_cgroup_id(to);
3652
3653 if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
3654 mod_memcg_state(from, MEMCG_SWAP, -1);
3655 mod_memcg_state(to, MEMCG_SWAP, 1);
3656 return 0;
3657 }
3658 return -EINVAL;
3659}
3660#else
3661static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
3662 struct mem_cgroup *from, struct mem_cgroup *to)
3663{
3664 return -EINVAL;
3665}
3666#endif
3667
3668static DEFINE_MUTEX(memcg_max_mutex);
3669
3670static int mem_cgroup_resize_max(struct mem_cgroup *memcg,
3671 unsigned long max, bool memsw)
3672{
3673 bool enlarge = false;
3674 bool drained = false;
3675 int ret;
3676 bool limits_invariant;
3677 struct page_counter *counter = memsw ? &memcg->memsw : &memcg->memory;
3678
3679 do {
3680 if (signal_pending(current)) {
3681 ret = -EINTR;
3682 break;
3683 }
3684
3685 mutex_lock(&memcg_max_mutex);
3686 /*
3687 * Make sure that the new limit (memsw or memory limit) doesn't
3688 * break our basic invariant rule memory.max <= memsw.max.
3689 */
3690 limits_invariant = memsw ? max >= READ_ONCE(memcg->memory.max) :
3691 max <= memcg->memsw.max;
3692 if (!limits_invariant) {
3693 mutex_unlock(&memcg_max_mutex);
3694 ret = -EINVAL;
3695 break;
3696 }
3697 if (max > counter->max)
3698 enlarge = true;
3699 ret = page_counter_set_max(counter, max);
3700 mutex_unlock(&memcg_max_mutex);
3701
3702 if (!ret)
3703 break;
3704
3705 if (!drained) {
3706 drain_all_stock(memcg);
3707 drained = true;
3708 continue;
3709 }
3710
3711 if (!try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL,
3712 memsw ? 0 : MEMCG_RECLAIM_MAY_SWAP)) {
3713 ret = -EBUSY;
3714 break;
3715 }
3716 } while (true);
3717
3718 if (!ret && enlarge)
3719 memcg_oom_recover(memcg);
3720
3721 return ret;
3722}
3723
3724unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
3725 gfp_t gfp_mask,
3726 unsigned long *total_scanned)
3727{
3728 unsigned long nr_reclaimed = 0;
3729 struct mem_cgroup_per_node *mz, *next_mz = NULL;
3730 unsigned long reclaimed;
3731 int loop = 0;
3732 struct mem_cgroup_tree_per_node *mctz;
3733 unsigned long excess;
3734
3735 if (lru_gen_enabled())
3736 return 0;
3737
3738 if (order > 0)
3739 return 0;
3740
3741 mctz = soft_limit_tree.rb_tree_per_node[pgdat->node_id];
3742
3743 /*
3744 * Do not even bother to check the largest node if the root
3745 * is empty. Do it lockless to prevent lock bouncing. Races
3746 * are acceptable as soft limit is best effort anyway.
3747 */
3748 if (!mctz || RB_EMPTY_ROOT(&mctz->rb_root))
3749 return 0;
3750
3751 /*
3752 * This loop can run a while, specially if mem_cgroup's continuously
3753 * keep exceeding their soft limit and putting the system under
3754 * pressure
3755 */
3756 do {
3757 if (next_mz)
3758 mz = next_mz;
3759 else
3760 mz = mem_cgroup_largest_soft_limit_node(mctz);
3761 if (!mz)
3762 break;
3763
3764 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat,
3765 gfp_mask, total_scanned);
3766 nr_reclaimed += reclaimed;
3767 spin_lock_irq(&mctz->lock);
3768
3769 /*
3770 * If we failed to reclaim anything from this memory cgroup
3771 * it is time to move on to the next cgroup
3772 */
3773 next_mz = NULL;
3774 if (!reclaimed)
3775 next_mz = __mem_cgroup_largest_soft_limit_node(mctz);
3776
3777 excess = soft_limit_excess(mz->memcg);
3778 /*
3779 * One school of thought says that we should not add
3780 * back the node to the tree if reclaim returns 0.
3781 * But our reclaim could return 0, simply because due
3782 * to priority we are exposing a smaller subset of
3783 * memory to reclaim from. Consider this as a longer
3784 * term TODO.
3785 */
3786 /* If excess == 0, no tree ops */
3787 __mem_cgroup_insert_exceeded(mz, mctz, excess);
3788 spin_unlock_irq(&mctz->lock);
3789 css_put(&mz->memcg->css);
3790 loop++;
3791 /*
3792 * Could not reclaim anything and there are no more
3793 * mem cgroups to try or we seem to be looping without
3794 * reclaiming anything.
3795 */
3796 if (!nr_reclaimed &&
3797 (next_mz == NULL ||
3798 loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
3799 break;
3800 } while (!nr_reclaimed);
3801 if (next_mz)
3802 css_put(&next_mz->memcg->css);
3803 return nr_reclaimed;
3804}
3805
3806/*
3807 * Reclaims as many pages from the given memcg as possible.
3808 *
3809 * Caller is responsible for holding css reference for memcg.
3810 */
3811static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
3812{
3813 int nr_retries = MAX_RECLAIM_RETRIES;
3814
3815 /* we call try-to-free pages for make this cgroup empty */
3816 lru_add_drain_all();
3817
3818 drain_all_stock(memcg);
3819
3820 /* try to free all pages in this cgroup */
3821 while (nr_retries && page_counter_read(&memcg->memory)) {
3822 if (signal_pending(current))
3823 return -EINTR;
3824
3825 if (!try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL,
3826 MEMCG_RECLAIM_MAY_SWAP))
3827 nr_retries--;
3828 }
3829
3830 return 0;
3831}
3832
3833static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of,
3834 char *buf, size_t nbytes,
3835 loff_t off)
3836{
3837 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3838
3839 if (mem_cgroup_is_root(memcg))
3840 return -EINVAL;
3841 return mem_cgroup_force_empty(memcg) ?: nbytes;
3842}
3843
3844static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css,
3845 struct cftype *cft)
3846{
3847 return 1;
3848}
3849
3850static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
3851 struct cftype *cft, u64 val)
3852{
3853 if (val == 1)
3854 return 0;
3855
3856 pr_warn_once("Non-hierarchical mode is deprecated. "
3857 "Please report your usecase to linux-mm@kvack.org if you "
3858 "depend on this functionality.\n");
3859
3860 return -EINVAL;
3861}
3862
3863static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
3864{
3865 unsigned long val;
3866
3867 if (mem_cgroup_is_root(memcg)) {
3868 /*
3869 * Approximate root's usage from global state. This isn't
3870 * perfect, but the root usage was always an approximation.
3871 */
3872 val = global_node_page_state(NR_FILE_PAGES) +
3873 global_node_page_state(NR_ANON_MAPPED);
3874 if (swap)
3875 val += total_swap_pages - get_nr_swap_pages();
3876 } else {
3877 if (!swap)
3878 val = page_counter_read(&memcg->memory);
3879 else
3880 val = page_counter_read(&memcg->memsw);
3881 }
3882 return val;
3883}
3884
3885enum {
3886 RES_USAGE,
3887 RES_LIMIT,
3888 RES_MAX_USAGE,
3889 RES_FAILCNT,
3890 RES_SOFT_LIMIT,
3891};
3892
3893static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
3894 struct cftype *cft)
3895{
3896 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3897 struct page_counter *counter;
3898
3899 switch (MEMFILE_TYPE(cft->private)) {
3900 case _MEM:
3901 counter = &memcg->memory;
3902 break;
3903 case _MEMSWAP:
3904 counter = &memcg->memsw;
3905 break;
3906 case _KMEM:
3907 counter = &memcg->kmem;
3908 break;
3909 case _TCP:
3910 counter = &memcg->tcpmem;
3911 break;
3912 default:
3913 BUG();
3914 }
3915
3916 switch (MEMFILE_ATTR(cft->private)) {
3917 case RES_USAGE:
3918 if (counter == &memcg->memory)
3919 return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE;
3920 if (counter == &memcg->memsw)
3921 return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE;
3922 return (u64)page_counter_read(counter) * PAGE_SIZE;
3923 case RES_LIMIT:
3924 return (u64)counter->max * PAGE_SIZE;
3925 case RES_MAX_USAGE:
3926 return (u64)counter->watermark * PAGE_SIZE;
3927 case RES_FAILCNT:
3928 return counter->failcnt;
3929 case RES_SOFT_LIMIT:
3930 return (u64)READ_ONCE(memcg->soft_limit) * PAGE_SIZE;
3931 default:
3932 BUG();
3933 }
3934}
3935
3936/*
3937 * This function doesn't do anything useful. Its only job is to provide a read
3938 * handler for a file so that cgroup_file_mode() will add read permissions.
3939 */
3940static int mem_cgroup_dummy_seq_show(__always_unused struct seq_file *m,
3941 __always_unused void *v)
3942{
3943 return -EINVAL;
3944}
3945
3946#ifdef CONFIG_MEMCG_KMEM
3947static int memcg_online_kmem(struct mem_cgroup *memcg)
3948{
3949 struct obj_cgroup *objcg;
3950
3951 if (mem_cgroup_kmem_disabled())
3952 return 0;
3953
3954 if (unlikely(mem_cgroup_is_root(memcg)))
3955 return 0;
3956
3957 objcg = obj_cgroup_alloc();
3958 if (!objcg)
3959 return -ENOMEM;
3960
3961 objcg->memcg = memcg;
3962 rcu_assign_pointer(memcg->objcg, objcg);
3963 obj_cgroup_get(objcg);
3964 memcg->orig_objcg = objcg;
3965
3966 static_branch_enable(&memcg_kmem_online_key);
3967
3968 memcg->kmemcg_id = memcg->id.id;
3969
3970 return 0;
3971}
3972
3973static void memcg_offline_kmem(struct mem_cgroup *memcg)
3974{
3975 struct mem_cgroup *parent;
3976
3977 if (mem_cgroup_kmem_disabled())
3978 return;
3979
3980 if (unlikely(mem_cgroup_is_root(memcg)))
3981 return;
3982
3983 parent = parent_mem_cgroup(memcg);
3984 if (!parent)
3985 parent = root_mem_cgroup;
3986
3987 memcg_reparent_objcgs(memcg, parent);
3988
3989 /*
3990 * After we have finished memcg_reparent_objcgs(), all list_lrus
3991 * corresponding to this cgroup are guaranteed to remain empty.
3992 * The ordering is imposed by list_lru_node->lock taken by
3993 * memcg_reparent_list_lrus().
3994 */
3995 memcg_reparent_list_lrus(memcg, parent);
3996}
3997#else
3998static int memcg_online_kmem(struct mem_cgroup *memcg)
3999{
4000 return 0;
4001}
4002static void memcg_offline_kmem(struct mem_cgroup *memcg)
4003{
4004}
4005#endif /* CONFIG_MEMCG_KMEM */
4006
4007static int memcg_update_tcp_max(struct mem_cgroup *memcg, unsigned long max)
4008{
4009 int ret;
4010
4011 mutex_lock(&memcg_max_mutex);
4012
4013 ret = page_counter_set_max(&memcg->tcpmem, max);
4014 if (ret)
4015 goto out;
4016
4017 if (!memcg->tcpmem_active) {
4018 /*
4019 * The active flag needs to be written after the static_key
4020 * update. This is what guarantees that the socket activation
4021 * function is the last one to run. See mem_cgroup_sk_alloc()
4022 * for details, and note that we don't mark any socket as
4023 * belonging to this memcg until that flag is up.
4024 *
4025 * We need to do this, because static_keys will span multiple
4026 * sites, but we can't control their order. If we mark a socket
4027 * as accounted, but the accounting functions are not patched in
4028 * yet, we'll lose accounting.
4029 *
4030 * We never race with the readers in mem_cgroup_sk_alloc(),
4031 * because when this value change, the code to process it is not
4032 * patched in yet.
4033 */
4034 static_branch_inc(&memcg_sockets_enabled_key);
4035 memcg->tcpmem_active = true;
4036 }
4037out:
4038 mutex_unlock(&memcg_max_mutex);
4039 return ret;
4040}
4041
4042/*
4043 * The user of this function is...
4044 * RES_LIMIT.
4045 */
4046static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
4047 char *buf, size_t nbytes, loff_t off)
4048{
4049 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4050 unsigned long nr_pages;
4051 int ret;
4052
4053 buf = strstrip(buf);
4054 ret = page_counter_memparse(buf, "-1", &nr_pages);
4055 if (ret)
4056 return ret;
4057
4058 switch (MEMFILE_ATTR(of_cft(of)->private)) {
4059 case RES_LIMIT:
4060 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
4061 ret = -EINVAL;
4062 break;
4063 }
4064 switch (MEMFILE_TYPE(of_cft(of)->private)) {
4065 case _MEM:
4066 ret = mem_cgroup_resize_max(memcg, nr_pages, false);
4067 break;
4068 case _MEMSWAP:
4069 ret = mem_cgroup_resize_max(memcg, nr_pages, true);
4070 break;
4071 case _KMEM:
4072 pr_warn_once("kmem.limit_in_bytes is deprecated and will be removed. "
4073 "Writing any value to this file has no effect. "
4074 "Please report your usecase to linux-mm@kvack.org if you "
4075 "depend on this functionality.\n");
4076 ret = 0;
4077 break;
4078 case _TCP:
4079 ret = memcg_update_tcp_max(memcg, nr_pages);
4080 break;
4081 }
4082 break;
4083 case RES_SOFT_LIMIT:
4084 if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
4085 ret = -EOPNOTSUPP;
4086 } else {
4087 WRITE_ONCE(memcg->soft_limit, nr_pages);
4088 ret = 0;
4089 }
4090 break;
4091 }
4092 return ret ?: nbytes;
4093}
4094
4095static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf,
4096 size_t nbytes, loff_t off)
4097{
4098 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4099 struct page_counter *counter;
4100
4101 switch (MEMFILE_TYPE(of_cft(of)->private)) {
4102 case _MEM:
4103 counter = &memcg->memory;
4104 break;
4105 case _MEMSWAP:
4106 counter = &memcg->memsw;
4107 break;
4108 case _KMEM:
4109 counter = &memcg->kmem;
4110 break;
4111 case _TCP:
4112 counter = &memcg->tcpmem;
4113 break;
4114 default:
4115 BUG();
4116 }
4117
4118 switch (MEMFILE_ATTR(of_cft(of)->private)) {
4119 case RES_MAX_USAGE:
4120 page_counter_reset_watermark(counter);
4121 break;
4122 case RES_FAILCNT:
4123 counter->failcnt = 0;
4124 break;
4125 default:
4126 BUG();
4127 }
4128
4129 return nbytes;
4130}
4131
4132static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css,
4133 struct cftype *cft)
4134{
4135 return mem_cgroup_from_css(css)->move_charge_at_immigrate;
4136}
4137
4138#ifdef CONFIG_MMU
4139static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
4140 struct cftype *cft, u64 val)
4141{
4142 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4143
4144 pr_warn_once("Cgroup memory moving (move_charge_at_immigrate) is deprecated. "
4145 "Please report your usecase to linux-mm@kvack.org if you "
4146 "depend on this functionality.\n");
4147
4148 if (val & ~MOVE_MASK)
4149 return -EINVAL;
4150
4151 /*
4152 * No kind of locking is needed in here, because ->can_attach() will
4153 * check this value once in the beginning of the process, and then carry
4154 * on with stale data. This means that changes to this value will only
4155 * affect task migrations starting after the change.
4156 */
4157 memcg->move_charge_at_immigrate = val;
4158 return 0;
4159}
4160#else
4161static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
4162 struct cftype *cft, u64 val)
4163{
4164 return -ENOSYS;
4165}
4166#endif
4167
4168#ifdef CONFIG_NUMA
4169
4170#define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE))
4171#define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
4172#define LRU_ALL ((1 << NR_LRU_LISTS) - 1)
4173
4174static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
4175 int nid, unsigned int lru_mask, bool tree)
4176{
4177 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
4178 unsigned long nr = 0;
4179 enum lru_list lru;
4180
4181 VM_BUG_ON((unsigned)nid >= nr_node_ids);
4182
4183 for_each_lru(lru) {
4184 if (!(BIT(lru) & lru_mask))
4185 continue;
4186 if (tree)
4187 nr += lruvec_page_state(lruvec, NR_LRU_BASE + lru);
4188 else
4189 nr += lruvec_page_state_local(lruvec, NR_LRU_BASE + lru);
4190 }
4191 return nr;
4192}
4193
4194static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
4195 unsigned int lru_mask,
4196 bool tree)
4197{
4198 unsigned long nr = 0;
4199 enum lru_list lru;
4200
4201 for_each_lru(lru) {
4202 if (!(BIT(lru) & lru_mask))
4203 continue;
4204 if (tree)
4205 nr += memcg_page_state(memcg, NR_LRU_BASE + lru);
4206 else
4207 nr += memcg_page_state_local(memcg, NR_LRU_BASE + lru);
4208 }
4209 return nr;
4210}
4211
4212static int memcg_numa_stat_show(struct seq_file *m, void *v)
4213{
4214 struct numa_stat {
4215 const char *name;
4216 unsigned int lru_mask;
4217 };
4218
4219 static const struct numa_stat stats[] = {
4220 { "total", LRU_ALL },
4221 { "file", LRU_ALL_FILE },
4222 { "anon", LRU_ALL_ANON },
4223 { "unevictable", BIT(LRU_UNEVICTABLE) },
4224 };
4225 const struct numa_stat *stat;
4226 int nid;
4227 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4228
4229 mem_cgroup_flush_stats(memcg);
4230
4231 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
4232 seq_printf(m, "%s=%lu", stat->name,
4233 mem_cgroup_nr_lru_pages(memcg, stat->lru_mask,
4234 false));
4235 for_each_node_state(nid, N_MEMORY)
4236 seq_printf(m, " N%d=%lu", nid,
4237 mem_cgroup_node_nr_lru_pages(memcg, nid,
4238 stat->lru_mask, false));
4239 seq_putc(m, '\n');
4240 }
4241
4242 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
4243
4244 seq_printf(m, "hierarchical_%s=%lu", stat->name,
4245 mem_cgroup_nr_lru_pages(memcg, stat->lru_mask,
4246 true));
4247 for_each_node_state(nid, N_MEMORY)
4248 seq_printf(m, " N%d=%lu", nid,
4249 mem_cgroup_node_nr_lru_pages(memcg, nid,
4250 stat->lru_mask, true));
4251 seq_putc(m, '\n');
4252 }
4253
4254 return 0;
4255}
4256#endif /* CONFIG_NUMA */
4257
4258static const unsigned int memcg1_stats[] = {
4259 NR_FILE_PAGES,
4260 NR_ANON_MAPPED,
4261#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4262 NR_ANON_THPS,
4263#endif
4264 NR_SHMEM,
4265 NR_FILE_MAPPED,
4266 NR_FILE_DIRTY,
4267 NR_WRITEBACK,
4268 WORKINGSET_REFAULT_ANON,
4269 WORKINGSET_REFAULT_FILE,
4270#ifdef CONFIG_SWAP
4271 MEMCG_SWAP,
4272 NR_SWAPCACHE,
4273#endif
4274};
4275
4276static const char *const memcg1_stat_names[] = {
4277 "cache",
4278 "rss",
4279#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4280 "rss_huge",
4281#endif
4282 "shmem",
4283 "mapped_file",
4284 "dirty",
4285 "writeback",
4286 "workingset_refault_anon",
4287 "workingset_refault_file",
4288#ifdef CONFIG_SWAP
4289 "swap",
4290 "swapcached",
4291#endif
4292};
4293
4294/* Universal VM events cgroup1 shows, original sort order */
4295static const unsigned int memcg1_events[] = {
4296 PGPGIN,
4297 PGPGOUT,
4298 PGFAULT,
4299 PGMAJFAULT,
4300};
4301
4302static void memcg1_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
4303{
4304 unsigned long memory, memsw;
4305 struct mem_cgroup *mi;
4306 unsigned int i;
4307
4308 BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names) != ARRAY_SIZE(memcg1_stats));
4309
4310 mem_cgroup_flush_stats(memcg);
4311
4312 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
4313 unsigned long nr;
4314
4315 nr = memcg_page_state_local_output(memcg, memcg1_stats[i]);
4316 seq_buf_printf(s, "%s %lu\n", memcg1_stat_names[i], nr);
4317 }
4318
4319 for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
4320 seq_buf_printf(s, "%s %lu\n", vm_event_name(memcg1_events[i]),
4321 memcg_events_local(memcg, memcg1_events[i]));
4322
4323 for (i = 0; i < NR_LRU_LISTS; i++)
4324 seq_buf_printf(s, "%s %lu\n", lru_list_name(i),
4325 memcg_page_state_local(memcg, NR_LRU_BASE + i) *
4326 PAGE_SIZE);
4327
4328 /* Hierarchical information */
4329 memory = memsw = PAGE_COUNTER_MAX;
4330 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) {
4331 memory = min(memory, READ_ONCE(mi->memory.max));
4332 memsw = min(memsw, READ_ONCE(mi->memsw.max));
4333 }
4334 seq_buf_printf(s, "hierarchical_memory_limit %llu\n",
4335 (u64)memory * PAGE_SIZE);
4336 seq_buf_printf(s, "hierarchical_memsw_limit %llu\n",
4337 (u64)memsw * PAGE_SIZE);
4338
4339 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
4340 unsigned long nr;
4341
4342 nr = memcg_page_state_output(memcg, memcg1_stats[i]);
4343 seq_buf_printf(s, "total_%s %llu\n", memcg1_stat_names[i],
4344 (u64)nr);
4345 }
4346
4347 for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
4348 seq_buf_printf(s, "total_%s %llu\n",
4349 vm_event_name(memcg1_events[i]),
4350 (u64)memcg_events(memcg, memcg1_events[i]));
4351
4352 for (i = 0; i < NR_LRU_LISTS; i++)
4353 seq_buf_printf(s, "total_%s %llu\n", lru_list_name(i),
4354 (u64)memcg_page_state(memcg, NR_LRU_BASE + i) *
4355 PAGE_SIZE);
4356
4357#ifdef CONFIG_DEBUG_VM
4358 {
4359 pg_data_t *pgdat;
4360 struct mem_cgroup_per_node *mz;
4361 unsigned long anon_cost = 0;
4362 unsigned long file_cost = 0;
4363
4364 for_each_online_pgdat(pgdat) {
4365 mz = memcg->nodeinfo[pgdat->node_id];
4366
4367 anon_cost += mz->lruvec.anon_cost;
4368 file_cost += mz->lruvec.file_cost;
4369 }
4370 seq_buf_printf(s, "anon_cost %lu\n", anon_cost);
4371 seq_buf_printf(s, "file_cost %lu\n", file_cost);
4372 }
4373#endif
4374}
4375
4376static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css,
4377 struct cftype *cft)
4378{
4379 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4380
4381 return mem_cgroup_swappiness(memcg);
4382}
4383
4384static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
4385 struct cftype *cft, u64 val)
4386{
4387 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4388
4389 if (val > 200)
4390 return -EINVAL;
4391
4392 if (!mem_cgroup_is_root(memcg))
4393 WRITE_ONCE(memcg->swappiness, val);
4394 else
4395 WRITE_ONCE(vm_swappiness, val);
4396
4397 return 0;
4398}
4399
4400static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
4401{
4402 struct mem_cgroup_threshold_ary *t;
4403 unsigned long usage;
4404 int i;
4405
4406 rcu_read_lock();
4407 if (!swap)
4408 t = rcu_dereference(memcg->thresholds.primary);
4409 else
4410 t = rcu_dereference(memcg->memsw_thresholds.primary);
4411
4412 if (!t)
4413 goto unlock;
4414
4415 usage = mem_cgroup_usage(memcg, swap);
4416
4417 /*
4418 * current_threshold points to threshold just below or equal to usage.
4419 * If it's not true, a threshold was crossed after last
4420 * call of __mem_cgroup_threshold().
4421 */
4422 i = t->current_threshold;
4423
4424 /*
4425 * Iterate backward over array of thresholds starting from
4426 * current_threshold and check if a threshold is crossed.
4427 * If none of thresholds below usage is crossed, we read
4428 * only one element of the array here.
4429 */
4430 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
4431 eventfd_signal(t->entries[i].eventfd);
4432
4433 /* i = current_threshold + 1 */
4434 i++;
4435
4436 /*
4437 * Iterate forward over array of thresholds starting from
4438 * current_threshold+1 and check if a threshold is crossed.
4439 * If none of thresholds above usage is crossed, we read
4440 * only one element of the array here.
4441 */
4442 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
4443 eventfd_signal(t->entries[i].eventfd);
4444
4445 /* Update current_threshold */
4446 t->current_threshold = i - 1;
4447unlock:
4448 rcu_read_unlock();
4449}
4450
4451static void mem_cgroup_threshold(struct mem_cgroup *memcg)
4452{
4453 while (memcg) {
4454 __mem_cgroup_threshold(memcg, false);
4455 if (do_memsw_account())
4456 __mem_cgroup_threshold(memcg, true);
4457
4458 memcg = parent_mem_cgroup(memcg);
4459 }
4460}
4461
4462static int compare_thresholds(const void *a, const void *b)
4463{
4464 const struct mem_cgroup_threshold *_a = a;
4465 const struct mem_cgroup_threshold *_b = b;
4466
4467 if (_a->threshold > _b->threshold)
4468 return 1;
4469
4470 if (_a->threshold < _b->threshold)
4471 return -1;
4472
4473 return 0;
4474}
4475
4476static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
4477{
4478 struct mem_cgroup_eventfd_list *ev;
4479
4480 spin_lock(&memcg_oom_lock);
4481
4482 list_for_each_entry(ev, &memcg->oom_notify, list)
4483 eventfd_signal(ev->eventfd);
4484
4485 spin_unlock(&memcg_oom_lock);
4486 return 0;
4487}
4488
4489static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
4490{
4491 struct mem_cgroup *iter;
4492
4493 for_each_mem_cgroup_tree(iter, memcg)
4494 mem_cgroup_oom_notify_cb(iter);
4495}
4496
4497static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
4498 struct eventfd_ctx *eventfd, const char *args, enum res_type type)
4499{
4500 struct mem_cgroup_thresholds *thresholds;
4501 struct mem_cgroup_threshold_ary *new;
4502 unsigned long threshold;
4503 unsigned long usage;
4504 int i, size, ret;
4505
4506 ret = page_counter_memparse(args, "-1", &threshold);
4507 if (ret)
4508 return ret;
4509
4510 mutex_lock(&memcg->thresholds_lock);
4511
4512 if (type == _MEM) {
4513 thresholds = &memcg->thresholds;
4514 usage = mem_cgroup_usage(memcg, false);
4515 } else if (type == _MEMSWAP) {
4516 thresholds = &memcg->memsw_thresholds;
4517 usage = mem_cgroup_usage(memcg, true);
4518 } else
4519 BUG();
4520
4521 /* Check if a threshold crossed before adding a new one */
4522 if (thresholds->primary)
4523 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
4524
4525 size = thresholds->primary ? thresholds->primary->size + 1 : 1;
4526
4527 /* Allocate memory for new array of thresholds */
4528 new = kmalloc(struct_size(new, entries, size), GFP_KERNEL);
4529 if (!new) {
4530 ret = -ENOMEM;
4531 goto unlock;
4532 }
4533 new->size = size;
4534
4535 /* Copy thresholds (if any) to new array */
4536 if (thresholds->primary)
4537 memcpy(new->entries, thresholds->primary->entries,
4538 flex_array_size(new, entries, size - 1));
4539
4540 /* Add new threshold */
4541 new->entries[size - 1].eventfd = eventfd;
4542 new->entries[size - 1].threshold = threshold;
4543
4544 /* Sort thresholds. Registering of new threshold isn't time-critical */
4545 sort(new->entries, size, sizeof(*new->entries),
4546 compare_thresholds, NULL);
4547
4548 /* Find current threshold */
4549 new->current_threshold = -1;
4550 for (i = 0; i < size; i++) {
4551 if (new->entries[i].threshold <= usage) {
4552 /*
4553 * new->current_threshold will not be used until
4554 * rcu_assign_pointer(), so it's safe to increment
4555 * it here.
4556 */
4557 ++new->current_threshold;
4558 } else
4559 break;
4560 }
4561
4562 /* Free old spare buffer and save old primary buffer as spare */
4563 kfree(thresholds->spare);
4564 thresholds->spare = thresholds->primary;
4565
4566 rcu_assign_pointer(thresholds->primary, new);
4567
4568 /* To be sure that nobody uses thresholds */
4569 synchronize_rcu();
4570
4571unlock:
4572 mutex_unlock(&memcg->thresholds_lock);
4573
4574 return ret;
4575}
4576
4577static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
4578 struct eventfd_ctx *eventfd, const char *args)
4579{
4580 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM);
4581}
4582
4583static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg,
4584 struct eventfd_ctx *eventfd, const char *args)
4585{
4586 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP);
4587}
4588
4589static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4590 struct eventfd_ctx *eventfd, enum res_type type)
4591{
4592 struct mem_cgroup_thresholds *thresholds;
4593 struct mem_cgroup_threshold_ary *new;
4594 unsigned long usage;
4595 int i, j, size, entries;
4596
4597 mutex_lock(&memcg->thresholds_lock);
4598
4599 if (type == _MEM) {
4600 thresholds = &memcg->thresholds;
4601 usage = mem_cgroup_usage(memcg, false);
4602 } else if (type == _MEMSWAP) {
4603 thresholds = &memcg->memsw_thresholds;
4604 usage = mem_cgroup_usage(memcg, true);
4605 } else
4606 BUG();
4607
4608 if (!thresholds->primary)
4609 goto unlock;
4610
4611 /* Check if a threshold crossed before removing */
4612 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
4613
4614 /* Calculate new number of threshold */
4615 size = entries = 0;
4616 for (i = 0; i < thresholds->primary->size; i++) {
4617 if (thresholds->primary->entries[i].eventfd != eventfd)
4618 size++;
4619 else
4620 entries++;
4621 }
4622
4623 new = thresholds->spare;
4624
4625 /* If no items related to eventfd have been cleared, nothing to do */
4626 if (!entries)
4627 goto unlock;
4628
4629 /* Set thresholds array to NULL if we don't have thresholds */
4630 if (!size) {
4631 kfree(new);
4632 new = NULL;
4633 goto swap_buffers;
4634 }
4635
4636 new->size = size;
4637
4638 /* Copy thresholds and find current threshold */
4639 new->current_threshold = -1;
4640 for (i = 0, j = 0; i < thresholds->primary->size; i++) {
4641 if (thresholds->primary->entries[i].eventfd == eventfd)
4642 continue;
4643
4644 new->entries[j] = thresholds->primary->entries[i];
4645 if (new->entries[j].threshold <= usage) {
4646 /*
4647 * new->current_threshold will not be used
4648 * until rcu_assign_pointer(), so it's safe to increment
4649 * it here.
4650 */
4651 ++new->current_threshold;
4652 }
4653 j++;
4654 }
4655
4656swap_buffers:
4657 /* Swap primary and spare array */
4658 thresholds->spare = thresholds->primary;
4659
4660 rcu_assign_pointer(thresholds->primary, new);
4661
4662 /* To be sure that nobody uses thresholds */
4663 synchronize_rcu();
4664
4665 /* If all events are unregistered, free the spare array */
4666 if (!new) {
4667 kfree(thresholds->spare);
4668 thresholds->spare = NULL;
4669 }
4670unlock:
4671 mutex_unlock(&memcg->thresholds_lock);
4672}
4673
4674static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4675 struct eventfd_ctx *eventfd)
4676{
4677 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM);
4678}
4679
4680static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4681 struct eventfd_ctx *eventfd)
4682{
4683 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP);
4684}
4685
4686static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg,
4687 struct eventfd_ctx *eventfd, const char *args)
4688{
4689 struct mem_cgroup_eventfd_list *event;
4690
4691 event = kmalloc(sizeof(*event), GFP_KERNEL);
4692 if (!event)
4693 return -ENOMEM;
4694
4695 spin_lock(&memcg_oom_lock);
4696
4697 event->eventfd = eventfd;
4698 list_add(&event->list, &memcg->oom_notify);
4699
4700 /* already in OOM ? */
4701 if (memcg->under_oom)
4702 eventfd_signal(eventfd);
4703 spin_unlock(&memcg_oom_lock);
4704
4705 return 0;
4706}
4707
4708static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg,
4709 struct eventfd_ctx *eventfd)
4710{
4711 struct mem_cgroup_eventfd_list *ev, *tmp;
4712
4713 spin_lock(&memcg_oom_lock);
4714
4715 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
4716 if (ev->eventfd == eventfd) {
4717 list_del(&ev->list);
4718 kfree(ev);
4719 }
4720 }
4721
4722 spin_unlock(&memcg_oom_lock);
4723}
4724
4725static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v)
4726{
4727 struct mem_cgroup *memcg = mem_cgroup_from_seq(sf);
4728
4729 seq_printf(sf, "oom_kill_disable %d\n", READ_ONCE(memcg->oom_kill_disable));
4730 seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom);
4731 seq_printf(sf, "oom_kill %lu\n",
4732 atomic_long_read(&memcg->memory_events[MEMCG_OOM_KILL]));
4733 return 0;
4734}
4735
4736static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
4737 struct cftype *cft, u64 val)
4738{
4739 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4740
4741 /* cannot set to root cgroup and only 0 and 1 are allowed */
4742 if (mem_cgroup_is_root(memcg) || !((val == 0) || (val == 1)))
4743 return -EINVAL;
4744
4745 WRITE_ONCE(memcg->oom_kill_disable, val);
4746 if (!val)
4747 memcg_oom_recover(memcg);
4748
4749 return 0;
4750}
4751
4752#ifdef CONFIG_CGROUP_WRITEBACK
4753
4754#include <trace/events/writeback.h>
4755
4756static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
4757{
4758 return wb_domain_init(&memcg->cgwb_domain, gfp);
4759}
4760
4761static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
4762{
4763 wb_domain_exit(&memcg->cgwb_domain);
4764}
4765
4766static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
4767{
4768 wb_domain_size_changed(&memcg->cgwb_domain);
4769}
4770
4771struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
4772{
4773 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4774
4775 if (!memcg->css.parent)
4776 return NULL;
4777
4778 return &memcg->cgwb_domain;
4779}
4780
4781/**
4782 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
4783 * @wb: bdi_writeback in question
4784 * @pfilepages: out parameter for number of file pages
4785 * @pheadroom: out parameter for number of allocatable pages according to memcg
4786 * @pdirty: out parameter for number of dirty pages
4787 * @pwriteback: out parameter for number of pages under writeback
4788 *
4789 * Determine the numbers of file, headroom, dirty, and writeback pages in
4790 * @wb's memcg. File, dirty and writeback are self-explanatory. Headroom
4791 * is a bit more involved.
4792 *
4793 * A memcg's headroom is "min(max, high) - used". In the hierarchy, the
4794 * headroom is calculated as the lowest headroom of itself and the
4795 * ancestors. Note that this doesn't consider the actual amount of
4796 * available memory in the system. The caller should further cap
4797 * *@pheadroom accordingly.
4798 */
4799void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
4800 unsigned long *pheadroom, unsigned long *pdirty,
4801 unsigned long *pwriteback)
4802{
4803 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4804 struct mem_cgroup *parent;
4805
4806 mem_cgroup_flush_stats_ratelimited(memcg);
4807
4808 *pdirty = memcg_page_state(memcg, NR_FILE_DIRTY);
4809 *pwriteback = memcg_page_state(memcg, NR_WRITEBACK);
4810 *pfilepages = memcg_page_state(memcg, NR_INACTIVE_FILE) +
4811 memcg_page_state(memcg, NR_ACTIVE_FILE);
4812
4813 *pheadroom = PAGE_COUNTER_MAX;
4814 while ((parent = parent_mem_cgroup(memcg))) {
4815 unsigned long ceiling = min(READ_ONCE(memcg->memory.max),
4816 READ_ONCE(memcg->memory.high));
4817 unsigned long used = page_counter_read(&memcg->memory);
4818
4819 *pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
4820 memcg = parent;
4821 }
4822}
4823
4824/*
4825 * Foreign dirty flushing
4826 *
4827 * There's an inherent mismatch between memcg and writeback. The former
4828 * tracks ownership per-page while the latter per-inode. This was a
4829 * deliberate design decision because honoring per-page ownership in the
4830 * writeback path is complicated, may lead to higher CPU and IO overheads
4831 * and deemed unnecessary given that write-sharing an inode across
4832 * different cgroups isn't a common use-case.
4833 *
4834 * Combined with inode majority-writer ownership switching, this works well
4835 * enough in most cases but there are some pathological cases. For
4836 * example, let's say there are two cgroups A and B which keep writing to
4837 * different but confined parts of the same inode. B owns the inode and
4838 * A's memory is limited far below B's. A's dirty ratio can rise enough to
4839 * trigger balance_dirty_pages() sleeps but B's can be low enough to avoid
4840 * triggering background writeback. A will be slowed down without a way to
4841 * make writeback of the dirty pages happen.
4842 *
4843 * Conditions like the above can lead to a cgroup getting repeatedly and
4844 * severely throttled after making some progress after each
4845 * dirty_expire_interval while the underlying IO device is almost
4846 * completely idle.
4847 *
4848 * Solving this problem completely requires matching the ownership tracking
4849 * granularities between memcg and writeback in either direction. However,
4850 * the more egregious behaviors can be avoided by simply remembering the
4851 * most recent foreign dirtying events and initiating remote flushes on
4852 * them when local writeback isn't enough to keep the memory clean enough.
4853 *
4854 * The following two functions implement such mechanism. When a foreign
4855 * page - a page whose memcg and writeback ownerships don't match - is
4856 * dirtied, mem_cgroup_track_foreign_dirty() records the inode owning
4857 * bdi_writeback on the page owning memcg. When balance_dirty_pages()
4858 * decides that the memcg needs to sleep due to high dirty ratio, it calls
4859 * mem_cgroup_flush_foreign() which queues writeback on the recorded
4860 * foreign bdi_writebacks which haven't expired. Both the numbers of
4861 * recorded bdi_writebacks and concurrent in-flight foreign writebacks are
4862 * limited to MEMCG_CGWB_FRN_CNT.
4863 *
4864 * The mechanism only remembers IDs and doesn't hold any object references.
4865 * As being wrong occasionally doesn't matter, updates and accesses to the
4866 * records are lockless and racy.
4867 */
4868void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio,
4869 struct bdi_writeback *wb)
4870{
4871 struct mem_cgroup *memcg = folio_memcg(folio);
4872 struct memcg_cgwb_frn *frn;
4873 u64 now = get_jiffies_64();
4874 u64 oldest_at = now;
4875 int oldest = -1;
4876 int i;
4877
4878 trace_track_foreign_dirty(folio, wb);
4879
4880 /*
4881 * Pick the slot to use. If there is already a slot for @wb, keep
4882 * using it. If not replace the oldest one which isn't being
4883 * written out.
4884 */
4885 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
4886 frn = &memcg->cgwb_frn[i];
4887 if (frn->bdi_id == wb->bdi->id &&
4888 frn->memcg_id == wb->memcg_css->id)
4889 break;
4890 if (time_before64(frn->at, oldest_at) &&
4891 atomic_read(&frn->done.cnt) == 1) {
4892 oldest = i;
4893 oldest_at = frn->at;
4894 }
4895 }
4896
4897 if (i < MEMCG_CGWB_FRN_CNT) {
4898 /*
4899 * Re-using an existing one. Update timestamp lazily to
4900 * avoid making the cacheline hot. We want them to be
4901 * reasonably up-to-date and significantly shorter than
4902 * dirty_expire_interval as that's what expires the record.
4903 * Use the shorter of 1s and dirty_expire_interval / 8.
4904 */
4905 unsigned long update_intv =
4906 min_t(unsigned long, HZ,
4907 msecs_to_jiffies(dirty_expire_interval * 10) / 8);
4908
4909 if (time_before64(frn->at, now - update_intv))
4910 frn->at = now;
4911 } else if (oldest >= 0) {
4912 /* replace the oldest free one */
4913 frn = &memcg->cgwb_frn[oldest];
4914 frn->bdi_id = wb->bdi->id;
4915 frn->memcg_id = wb->memcg_css->id;
4916 frn->at = now;
4917 }
4918}
4919
4920/* issue foreign writeback flushes for recorded foreign dirtying events */
4921void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
4922{
4923 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4924 unsigned long intv = msecs_to_jiffies(dirty_expire_interval * 10);
4925 u64 now = jiffies_64;
4926 int i;
4927
4928 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
4929 struct memcg_cgwb_frn *frn = &memcg->cgwb_frn[i];
4930
4931 /*
4932 * If the record is older than dirty_expire_interval,
4933 * writeback on it has already started. No need to kick it
4934 * off again. Also, don't start a new one if there's
4935 * already one in flight.
4936 */
4937 if (time_after64(frn->at, now - intv) &&
4938 atomic_read(&frn->done.cnt) == 1) {
4939 frn->at = 0;
4940 trace_flush_foreign(wb, frn->bdi_id, frn->memcg_id);
4941 cgroup_writeback_by_id(frn->bdi_id, frn->memcg_id,
4942 WB_REASON_FOREIGN_FLUSH,
4943 &frn->done);
4944 }
4945 }
4946}
4947
4948#else /* CONFIG_CGROUP_WRITEBACK */
4949
4950static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
4951{
4952 return 0;
4953}
4954
4955static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
4956{
4957}
4958
4959static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
4960{
4961}
4962
4963#endif /* CONFIG_CGROUP_WRITEBACK */
4964
4965/*
4966 * DO NOT USE IN NEW FILES.
4967 *
4968 * "cgroup.event_control" implementation.
4969 *
4970 * This is way over-engineered. It tries to support fully configurable
4971 * events for each user. Such level of flexibility is completely
4972 * unnecessary especially in the light of the planned unified hierarchy.
4973 *
4974 * Please deprecate this and replace with something simpler if at all
4975 * possible.
4976 */
4977
4978/*
4979 * Unregister event and free resources.
4980 *
4981 * Gets called from workqueue.
4982 */
4983static void memcg_event_remove(struct work_struct *work)
4984{
4985 struct mem_cgroup_event *event =
4986 container_of(work, struct mem_cgroup_event, remove);
4987 struct mem_cgroup *memcg = event->memcg;
4988
4989 remove_wait_queue(event->wqh, &event->wait);
4990
4991 event->unregister_event(memcg, event->eventfd);
4992
4993 /* Notify userspace the event is going away. */
4994 eventfd_signal(event->eventfd);
4995
4996 eventfd_ctx_put(event->eventfd);
4997 kfree(event);
4998 css_put(&memcg->css);
4999}
5000
5001/*
5002 * Gets called on EPOLLHUP on eventfd when user closes it.
5003 *
5004 * Called with wqh->lock held and interrupts disabled.
5005 */
5006static int memcg_event_wake(wait_queue_entry_t *wait, unsigned mode,
5007 int sync, void *key)
5008{
5009 struct mem_cgroup_event *event =
5010 container_of(wait, struct mem_cgroup_event, wait);
5011 struct mem_cgroup *memcg = event->memcg;
5012 __poll_t flags = key_to_poll(key);
5013
5014 if (flags & EPOLLHUP) {
5015 /*
5016 * If the event has been detached at cgroup removal, we
5017 * can simply return knowing the other side will cleanup
5018 * for us.
5019 *
5020 * We can't race against event freeing since the other
5021 * side will require wqh->lock via remove_wait_queue(),
5022 * which we hold.
5023 */
5024 spin_lock(&memcg->event_list_lock);
5025 if (!list_empty(&event->list)) {
5026 list_del_init(&event->list);
5027 /*
5028 * We are in atomic context, but cgroup_event_remove()
5029 * may sleep, so we have to call it in workqueue.
5030 */
5031 schedule_work(&event->remove);
5032 }
5033 spin_unlock(&memcg->event_list_lock);
5034 }
5035
5036 return 0;
5037}
5038
5039static void memcg_event_ptable_queue_proc(struct file *file,
5040 wait_queue_head_t *wqh, poll_table *pt)
5041{
5042 struct mem_cgroup_event *event =
5043 container_of(pt, struct mem_cgroup_event, pt);
5044
5045 event->wqh = wqh;
5046 add_wait_queue(wqh, &event->wait);
5047}
5048
5049/*
5050 * DO NOT USE IN NEW FILES.
5051 *
5052 * Parse input and register new cgroup event handler.
5053 *
5054 * Input must be in format '<event_fd> <control_fd> <args>'.
5055 * Interpretation of args is defined by control file implementation.
5056 */
5057static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
5058 char *buf, size_t nbytes, loff_t off)
5059{
5060 struct cgroup_subsys_state *css = of_css(of);
5061 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5062 struct mem_cgroup_event *event;
5063 struct cgroup_subsys_state *cfile_css;
5064 unsigned int efd, cfd;
5065 struct fd efile;
5066 struct fd cfile;
5067 struct dentry *cdentry;
5068 const char *name;
5069 char *endp;
5070 int ret;
5071
5072 if (IS_ENABLED(CONFIG_PREEMPT_RT))
5073 return -EOPNOTSUPP;
5074
5075 buf = strstrip(buf);
5076
5077 efd = simple_strtoul(buf, &endp, 10);
5078 if (*endp != ' ')
5079 return -EINVAL;
5080 buf = endp + 1;
5081
5082 cfd = simple_strtoul(buf, &endp, 10);
5083 if ((*endp != ' ') && (*endp != '\0'))
5084 return -EINVAL;
5085 buf = endp + 1;
5086
5087 event = kzalloc(sizeof(*event), GFP_KERNEL);
5088 if (!event)
5089 return -ENOMEM;
5090
5091 event->memcg = memcg;
5092 INIT_LIST_HEAD(&event->list);
5093 init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc);
5094 init_waitqueue_func_entry(&event->wait, memcg_event_wake);
5095 INIT_WORK(&event->remove, memcg_event_remove);
5096
5097 efile = fdget(efd);
5098 if (!efile.file) {
5099 ret = -EBADF;
5100 goto out_kfree;
5101 }
5102
5103 event->eventfd = eventfd_ctx_fileget(efile.file);
5104 if (IS_ERR(event->eventfd)) {
5105 ret = PTR_ERR(event->eventfd);
5106 goto out_put_efile;
5107 }
5108
5109 cfile = fdget(cfd);
5110 if (!cfile.file) {
5111 ret = -EBADF;
5112 goto out_put_eventfd;
5113 }
5114
5115 /* the process need read permission on control file */
5116 /* AV: shouldn't we check that it's been opened for read instead? */
5117 ret = file_permission(cfile.file, MAY_READ);
5118 if (ret < 0)
5119 goto out_put_cfile;
5120
5121 /*
5122 * The control file must be a regular cgroup1 file. As a regular cgroup
5123 * file can't be renamed, it's safe to access its name afterwards.
5124 */
5125 cdentry = cfile.file->f_path.dentry;
5126 if (cdentry->d_sb->s_type != &cgroup_fs_type || !d_is_reg(cdentry)) {
5127 ret = -EINVAL;
5128 goto out_put_cfile;
5129 }
5130
5131 /*
5132 * Determine the event callbacks and set them in @event. This used
5133 * to be done via struct cftype but cgroup core no longer knows
5134 * about these events. The following is crude but the whole thing
5135 * is for compatibility anyway.
5136 *
5137 * DO NOT ADD NEW FILES.
5138 */
5139 name = cdentry->d_name.name;
5140
5141 if (!strcmp(name, "memory.usage_in_bytes")) {
5142 event->register_event = mem_cgroup_usage_register_event;
5143 event->unregister_event = mem_cgroup_usage_unregister_event;
5144 } else if (!strcmp(name, "memory.oom_control")) {
5145 event->register_event = mem_cgroup_oom_register_event;
5146 event->unregister_event = mem_cgroup_oom_unregister_event;
5147 } else if (!strcmp(name, "memory.pressure_level")) {
5148 event->register_event = vmpressure_register_event;
5149 event->unregister_event = vmpressure_unregister_event;
5150 } else if (!strcmp(name, "memory.memsw.usage_in_bytes")) {
5151 event->register_event = memsw_cgroup_usage_register_event;
5152 event->unregister_event = memsw_cgroup_usage_unregister_event;
5153 } else {
5154 ret = -EINVAL;
5155 goto out_put_cfile;
5156 }
5157
5158 /*
5159 * Verify @cfile should belong to @css. Also, remaining events are
5160 * automatically removed on cgroup destruction but the removal is
5161 * asynchronous, so take an extra ref on @css.
5162 */
5163 cfile_css = css_tryget_online_from_dir(cdentry->d_parent,
5164 &memory_cgrp_subsys);
5165 ret = -EINVAL;
5166 if (IS_ERR(cfile_css))
5167 goto out_put_cfile;
5168 if (cfile_css != css) {
5169 css_put(cfile_css);
5170 goto out_put_cfile;
5171 }
5172
5173 ret = event->register_event(memcg, event->eventfd, buf);
5174 if (ret)
5175 goto out_put_css;
5176
5177 vfs_poll(efile.file, &event->pt);
5178
5179 spin_lock_irq(&memcg->event_list_lock);
5180 list_add(&event->list, &memcg->event_list);
5181 spin_unlock_irq(&memcg->event_list_lock);
5182
5183 fdput(cfile);
5184 fdput(efile);
5185
5186 return nbytes;
5187
5188out_put_css:
5189 css_put(css);
5190out_put_cfile:
5191 fdput(cfile);
5192out_put_eventfd:
5193 eventfd_ctx_put(event->eventfd);
5194out_put_efile:
5195 fdput(efile);
5196out_kfree:
5197 kfree(event);
5198
5199 return ret;
5200}
5201
5202#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_SLUB_DEBUG)
5203static int mem_cgroup_slab_show(struct seq_file *m, void *p)
5204{
5205 /*
5206 * Deprecated.
5207 * Please, take a look at tools/cgroup/memcg_slabinfo.py .
5208 */
5209 return 0;
5210}
5211#endif
5212
5213static int memory_stat_show(struct seq_file *m, void *v);
5214
5215static struct cftype mem_cgroup_legacy_files[] = {
5216 {
5217 .name = "usage_in_bytes",
5218 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
5219 .read_u64 = mem_cgroup_read_u64,
5220 },
5221 {
5222 .name = "max_usage_in_bytes",
5223 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
5224 .write = mem_cgroup_reset,
5225 .read_u64 = mem_cgroup_read_u64,
5226 },
5227 {
5228 .name = "limit_in_bytes",
5229 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
5230 .write = mem_cgroup_write,
5231 .read_u64 = mem_cgroup_read_u64,
5232 },
5233 {
5234 .name = "soft_limit_in_bytes",
5235 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
5236 .write = mem_cgroup_write,
5237 .read_u64 = mem_cgroup_read_u64,
5238 },
5239 {
5240 .name = "failcnt",
5241 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
5242 .write = mem_cgroup_reset,
5243 .read_u64 = mem_cgroup_read_u64,
5244 },
5245 {
5246 .name = "stat",
5247 .seq_show = memory_stat_show,
5248 },
5249 {
5250 .name = "force_empty",
5251 .write = mem_cgroup_force_empty_write,
5252 },
5253 {
5254 .name = "use_hierarchy",
5255 .write_u64 = mem_cgroup_hierarchy_write,
5256 .read_u64 = mem_cgroup_hierarchy_read,
5257 },
5258 {
5259 .name = "cgroup.event_control", /* XXX: for compat */
5260 .write = memcg_write_event_control,
5261 .flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE,
5262 },
5263 {
5264 .name = "swappiness",
5265 .read_u64 = mem_cgroup_swappiness_read,
5266 .write_u64 = mem_cgroup_swappiness_write,
5267 },
5268 {
5269 .name = "move_charge_at_immigrate",
5270 .read_u64 = mem_cgroup_move_charge_read,
5271 .write_u64 = mem_cgroup_move_charge_write,
5272 },
5273 {
5274 .name = "oom_control",
5275 .seq_show = mem_cgroup_oom_control_read,
5276 .write_u64 = mem_cgroup_oom_control_write,
5277 },
5278 {
5279 .name = "pressure_level",
5280 .seq_show = mem_cgroup_dummy_seq_show,
5281 },
5282#ifdef CONFIG_NUMA
5283 {
5284 .name = "numa_stat",
5285 .seq_show = memcg_numa_stat_show,
5286 },
5287#endif
5288 {
5289 .name = "kmem.limit_in_bytes",
5290 .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
5291 .write = mem_cgroup_write,
5292 .read_u64 = mem_cgroup_read_u64,
5293 },
5294 {
5295 .name = "kmem.usage_in_bytes",
5296 .private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
5297 .read_u64 = mem_cgroup_read_u64,
5298 },
5299 {
5300 .name = "kmem.failcnt",
5301 .private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT),
5302 .write = mem_cgroup_reset,
5303 .read_u64 = mem_cgroup_read_u64,
5304 },
5305 {
5306 .name = "kmem.max_usage_in_bytes",
5307 .private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE),
5308 .write = mem_cgroup_reset,
5309 .read_u64 = mem_cgroup_read_u64,
5310 },
5311#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_SLUB_DEBUG)
5312 {
5313 .name = "kmem.slabinfo",
5314 .seq_show = mem_cgroup_slab_show,
5315 },
5316#endif
5317 {
5318 .name = "kmem.tcp.limit_in_bytes",
5319 .private = MEMFILE_PRIVATE(_TCP, RES_LIMIT),
5320 .write = mem_cgroup_write,
5321 .read_u64 = mem_cgroup_read_u64,
5322 },
5323 {
5324 .name = "kmem.tcp.usage_in_bytes",
5325 .private = MEMFILE_PRIVATE(_TCP, RES_USAGE),
5326 .read_u64 = mem_cgroup_read_u64,
5327 },
5328 {
5329 .name = "kmem.tcp.failcnt",
5330 .private = MEMFILE_PRIVATE(_TCP, RES_FAILCNT),
5331 .write = mem_cgroup_reset,
5332 .read_u64 = mem_cgroup_read_u64,
5333 },
5334 {
5335 .name = "kmem.tcp.max_usage_in_bytes",
5336 .private = MEMFILE_PRIVATE(_TCP, RES_MAX_USAGE),
5337 .write = mem_cgroup_reset,
5338 .read_u64 = mem_cgroup_read_u64,
5339 },
5340 { }, /* terminate */
5341};
5342
5343/*
5344 * Private memory cgroup IDR
5345 *
5346 * Swap-out records and page cache shadow entries need to store memcg
5347 * references in constrained space, so we maintain an ID space that is
5348 * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of
5349 * memory-controlled cgroups to 64k.
5350 *
5351 * However, there usually are many references to the offline CSS after
5352 * the cgroup has been destroyed, such as page cache or reclaimable
5353 * slab objects, that don't need to hang on to the ID. We want to keep
5354 * those dead CSS from occupying IDs, or we might quickly exhaust the
5355 * relatively small ID space and prevent the creation of new cgroups
5356 * even when there are much fewer than 64k cgroups - possibly none.
5357 *
5358 * Maintain a private 16-bit ID space for memcg, and allow the ID to
5359 * be freed and recycled when it's no longer needed, which is usually
5360 * when the CSS is offlined.
5361 *
5362 * The only exception to that are records of swapped out tmpfs/shmem
5363 * pages that need to be attributed to live ancestors on swapin. But
5364 * those references are manageable from userspace.
5365 */
5366
5367#define MEM_CGROUP_ID_MAX ((1UL << MEM_CGROUP_ID_SHIFT) - 1)
5368static DEFINE_IDR(mem_cgroup_idr);
5369
5370static void mem_cgroup_id_remove(struct mem_cgroup *memcg)
5371{
5372 if (memcg->id.id > 0) {
5373 idr_remove(&mem_cgroup_idr, memcg->id.id);
5374 memcg->id.id = 0;
5375 }
5376}
5377
5378static void __maybe_unused mem_cgroup_id_get_many(struct mem_cgroup *memcg,
5379 unsigned int n)
5380{
5381 refcount_add(n, &memcg->id.ref);
5382}
5383
5384static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
5385{
5386 if (refcount_sub_and_test(n, &memcg->id.ref)) {
5387 mem_cgroup_id_remove(memcg);
5388
5389 /* Memcg ID pins CSS */
5390 css_put(&memcg->css);
5391 }
5392}
5393
5394static inline void mem_cgroup_id_put(struct mem_cgroup *memcg)
5395{
5396 mem_cgroup_id_put_many(memcg, 1);
5397}
5398
5399/**
5400 * mem_cgroup_from_id - look up a memcg from a memcg id
5401 * @id: the memcg id to look up
5402 *
5403 * Caller must hold rcu_read_lock().
5404 */
5405struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
5406{
5407 WARN_ON_ONCE(!rcu_read_lock_held());
5408 return idr_find(&mem_cgroup_idr, id);
5409}
5410
5411#ifdef CONFIG_SHRINKER_DEBUG
5412struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino)
5413{
5414 struct cgroup *cgrp;
5415 struct cgroup_subsys_state *css;
5416 struct mem_cgroup *memcg;
5417
5418 cgrp = cgroup_get_from_id(ino);
5419 if (IS_ERR(cgrp))
5420 return ERR_CAST(cgrp);
5421
5422 css = cgroup_get_e_css(cgrp, &memory_cgrp_subsys);
5423 if (css)
5424 memcg = container_of(css, struct mem_cgroup, css);
5425 else
5426 memcg = ERR_PTR(-ENOENT);
5427
5428 cgroup_put(cgrp);
5429
5430 return memcg;
5431}
5432#endif
5433
5434static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
5435{
5436 struct mem_cgroup_per_node *pn;
5437
5438 pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, node);
5439 if (!pn)
5440 return 1;
5441
5442 pn->lruvec_stats_percpu = alloc_percpu_gfp(struct lruvec_stats_percpu,
5443 GFP_KERNEL_ACCOUNT);
5444 if (!pn->lruvec_stats_percpu) {
5445 kfree(pn);
5446 return 1;
5447 }
5448
5449 lruvec_init(&pn->lruvec);
5450 pn->memcg = memcg;
5451
5452 memcg->nodeinfo[node] = pn;
5453 return 0;
5454}
5455
5456static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
5457{
5458 struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
5459
5460 if (!pn)
5461 return;
5462
5463 free_percpu(pn->lruvec_stats_percpu);
5464 kfree(pn);
5465}
5466
5467static void __mem_cgroup_free(struct mem_cgroup *memcg)
5468{
5469 int node;
5470
5471 if (memcg->orig_objcg)
5472 obj_cgroup_put(memcg->orig_objcg);
5473
5474 for_each_node(node)
5475 free_mem_cgroup_per_node_info(memcg, node);
5476 kfree(memcg->vmstats);
5477 free_percpu(memcg->vmstats_percpu);
5478 kfree(memcg);
5479}
5480
5481static void mem_cgroup_free(struct mem_cgroup *memcg)
5482{
5483 lru_gen_exit_memcg(memcg);
5484 memcg_wb_domain_exit(memcg);
5485 __mem_cgroup_free(memcg);
5486}
5487
5488static struct mem_cgroup *mem_cgroup_alloc(struct mem_cgroup *parent)
5489{
5490 struct memcg_vmstats_percpu *statc, *pstatc;
5491 struct mem_cgroup *memcg;
5492 int node, cpu;
5493 int __maybe_unused i;
5494 long error = -ENOMEM;
5495
5496 memcg = kzalloc(struct_size(memcg, nodeinfo, nr_node_ids), GFP_KERNEL);
5497 if (!memcg)
5498 return ERR_PTR(error);
5499
5500 memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL,
5501 1, MEM_CGROUP_ID_MAX + 1, GFP_KERNEL);
5502 if (memcg->id.id < 0) {
5503 error = memcg->id.id;
5504 goto fail;
5505 }
5506
5507 memcg->vmstats = kzalloc(sizeof(struct memcg_vmstats), GFP_KERNEL);
5508 if (!memcg->vmstats)
5509 goto fail;
5510
5511 memcg->vmstats_percpu = alloc_percpu_gfp(struct memcg_vmstats_percpu,
5512 GFP_KERNEL_ACCOUNT);
5513 if (!memcg->vmstats_percpu)
5514 goto fail;
5515
5516 for_each_possible_cpu(cpu) {
5517 if (parent)
5518 pstatc = per_cpu_ptr(parent->vmstats_percpu, cpu);
5519 statc = per_cpu_ptr(memcg->vmstats_percpu, cpu);
5520 statc->parent = parent ? pstatc : NULL;
5521 statc->vmstats = memcg->vmstats;
5522 }
5523
5524 for_each_node(node)
5525 if (alloc_mem_cgroup_per_node_info(memcg, node))
5526 goto fail;
5527
5528 if (memcg_wb_domain_init(memcg, GFP_KERNEL))
5529 goto fail;
5530
5531 INIT_WORK(&memcg->high_work, high_work_func);
5532 INIT_LIST_HEAD(&memcg->oom_notify);
5533 mutex_init(&memcg->thresholds_lock);
5534 spin_lock_init(&memcg->move_lock);
5535 vmpressure_init(&memcg->vmpressure);
5536 INIT_LIST_HEAD(&memcg->event_list);
5537 spin_lock_init(&memcg->event_list_lock);
5538 memcg->socket_pressure = jiffies;
5539#ifdef CONFIG_MEMCG_KMEM
5540 memcg->kmemcg_id = -1;
5541 INIT_LIST_HEAD(&memcg->objcg_list);
5542#endif
5543#ifdef CONFIG_CGROUP_WRITEBACK
5544 INIT_LIST_HEAD(&memcg->cgwb_list);
5545 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
5546 memcg->cgwb_frn[i].done =
5547 __WB_COMPLETION_INIT(&memcg_cgwb_frn_waitq);
5548#endif
5549#ifdef CONFIG_TRANSPARENT_HUGEPAGE
5550 spin_lock_init(&memcg->deferred_split_queue.split_queue_lock);
5551 INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue);
5552 memcg->deferred_split_queue.split_queue_len = 0;
5553#endif
5554 lru_gen_init_memcg(memcg);
5555 return memcg;
5556fail:
5557 mem_cgroup_id_remove(memcg);
5558 __mem_cgroup_free(memcg);
5559 return ERR_PTR(error);
5560}
5561
5562static struct cgroup_subsys_state * __ref
5563mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
5564{
5565 struct mem_cgroup *parent = mem_cgroup_from_css(parent_css);
5566 struct mem_cgroup *memcg, *old_memcg;
5567
5568 old_memcg = set_active_memcg(parent);
5569 memcg = mem_cgroup_alloc(parent);
5570 set_active_memcg(old_memcg);
5571 if (IS_ERR(memcg))
5572 return ERR_CAST(memcg);
5573
5574 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
5575 WRITE_ONCE(memcg->soft_limit, PAGE_COUNTER_MAX);
5576#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
5577 memcg->zswap_max = PAGE_COUNTER_MAX;
5578 WRITE_ONCE(memcg->zswap_writeback,
5579 !parent || READ_ONCE(parent->zswap_writeback));
5580#endif
5581 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
5582 if (parent) {
5583 WRITE_ONCE(memcg->swappiness, mem_cgroup_swappiness(parent));
5584 WRITE_ONCE(memcg->oom_kill_disable, READ_ONCE(parent->oom_kill_disable));
5585
5586 page_counter_init(&memcg->memory, &parent->memory);
5587 page_counter_init(&memcg->swap, &parent->swap);
5588 page_counter_init(&memcg->kmem, &parent->kmem);
5589 page_counter_init(&memcg->tcpmem, &parent->tcpmem);
5590 } else {
5591 init_memcg_events();
5592 page_counter_init(&memcg->memory, NULL);
5593 page_counter_init(&memcg->swap, NULL);
5594 page_counter_init(&memcg->kmem, NULL);
5595 page_counter_init(&memcg->tcpmem, NULL);
5596
5597 root_mem_cgroup = memcg;
5598 return &memcg->css;
5599 }
5600
5601 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
5602 static_branch_inc(&memcg_sockets_enabled_key);
5603
5604#if defined(CONFIG_MEMCG_KMEM)
5605 if (!cgroup_memory_nobpf)
5606 static_branch_inc(&memcg_bpf_enabled_key);
5607#endif
5608
5609 return &memcg->css;
5610}
5611
5612static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
5613{
5614 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5615
5616 if (memcg_online_kmem(memcg))
5617 goto remove_id;
5618
5619 /*
5620 * A memcg must be visible for expand_shrinker_info()
5621 * by the time the maps are allocated. So, we allocate maps
5622 * here, when for_each_mem_cgroup() can't skip it.
5623 */
5624 if (alloc_shrinker_info(memcg))
5625 goto offline_kmem;
5626
5627 if (unlikely(mem_cgroup_is_root(memcg)) && !mem_cgroup_disabled())
5628 queue_delayed_work(system_unbound_wq, &stats_flush_dwork,
5629 FLUSH_TIME);
5630 lru_gen_online_memcg(memcg);
5631
5632 /* Online state pins memcg ID, memcg ID pins CSS */
5633 refcount_set(&memcg->id.ref, 1);
5634 css_get(css);
5635
5636 /*
5637 * Ensure mem_cgroup_from_id() works once we're fully online.
5638 *
5639 * We could do this earlier and require callers to filter with
5640 * css_tryget_online(). But right now there are no users that
5641 * need earlier access, and the workingset code relies on the
5642 * cgroup tree linkage (mem_cgroup_get_nr_swap_pages()). So
5643 * publish it here at the end of onlining. This matches the
5644 * regular ID destruction during offlining.
5645 */
5646 idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
5647
5648 return 0;
5649offline_kmem:
5650 memcg_offline_kmem(memcg);
5651remove_id:
5652 mem_cgroup_id_remove(memcg);
5653 return -ENOMEM;
5654}
5655
5656static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
5657{
5658 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5659 struct mem_cgroup_event *event, *tmp;
5660
5661 /*
5662 * Unregister events and notify userspace.
5663 * Notify userspace about cgroup removing only after rmdir of cgroup
5664 * directory to avoid race between userspace and kernelspace.
5665 */
5666 spin_lock_irq(&memcg->event_list_lock);
5667 list_for_each_entry_safe(event, tmp, &memcg->event_list, list) {
5668 list_del_init(&event->list);
5669 schedule_work(&event->remove);
5670 }
5671 spin_unlock_irq(&memcg->event_list_lock);
5672
5673 page_counter_set_min(&memcg->memory, 0);
5674 page_counter_set_low(&memcg->memory, 0);
5675
5676 zswap_memcg_offline_cleanup(memcg);
5677
5678 memcg_offline_kmem(memcg);
5679 reparent_shrinker_deferred(memcg);
5680 wb_memcg_offline(memcg);
5681 lru_gen_offline_memcg(memcg);
5682
5683 drain_all_stock(memcg);
5684
5685 mem_cgroup_id_put(memcg);
5686}
5687
5688static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
5689{
5690 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5691
5692 invalidate_reclaim_iterators(memcg);
5693 lru_gen_release_memcg(memcg);
5694}
5695
5696static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
5697{
5698 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5699 int __maybe_unused i;
5700
5701#ifdef CONFIG_CGROUP_WRITEBACK
5702 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
5703 wb_wait_for_completion(&memcg->cgwb_frn[i].done);
5704#endif
5705 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
5706 static_branch_dec(&memcg_sockets_enabled_key);
5707
5708 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active)
5709 static_branch_dec(&memcg_sockets_enabled_key);
5710
5711#if defined(CONFIG_MEMCG_KMEM)
5712 if (!cgroup_memory_nobpf)
5713 static_branch_dec(&memcg_bpf_enabled_key);
5714#endif
5715
5716 vmpressure_cleanup(&memcg->vmpressure);
5717 cancel_work_sync(&memcg->high_work);
5718 mem_cgroup_remove_from_trees(memcg);
5719 free_shrinker_info(memcg);
5720 mem_cgroup_free(memcg);
5721}
5722
5723/**
5724 * mem_cgroup_css_reset - reset the states of a mem_cgroup
5725 * @css: the target css
5726 *
5727 * Reset the states of the mem_cgroup associated with @css. This is
5728 * invoked when the userland requests disabling on the default hierarchy
5729 * but the memcg is pinned through dependency. The memcg should stop
5730 * applying policies and should revert to the vanilla state as it may be
5731 * made visible again.
5732 *
5733 * The current implementation only resets the essential configurations.
5734 * This needs to be expanded to cover all the visible parts.
5735 */
5736static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
5737{
5738 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5739
5740 page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX);
5741 page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX);
5742 page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX);
5743 page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX);
5744 page_counter_set_min(&memcg->memory, 0);
5745 page_counter_set_low(&memcg->memory, 0);
5746 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
5747 WRITE_ONCE(memcg->soft_limit, PAGE_COUNTER_MAX);
5748 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
5749 memcg_wb_domain_size_changed(memcg);
5750}
5751
5752static void mem_cgroup_css_rstat_flush(struct cgroup_subsys_state *css, int cpu)
5753{
5754 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5755 struct mem_cgroup *parent = parent_mem_cgroup(memcg);
5756 struct memcg_vmstats_percpu *statc;
5757 long delta, delta_cpu, v;
5758 int i, nid;
5759
5760 statc = per_cpu_ptr(memcg->vmstats_percpu, cpu);
5761
5762 for (i = 0; i < MEMCG_NR_STAT; i++) {
5763 /*
5764 * Collect the aggregated propagation counts of groups
5765 * below us. We're in a per-cpu loop here and this is
5766 * a global counter, so the first cycle will get them.
5767 */
5768 delta = memcg->vmstats->state_pending[i];
5769 if (delta)
5770 memcg->vmstats->state_pending[i] = 0;
5771
5772 /* Add CPU changes on this level since the last flush */
5773 delta_cpu = 0;
5774 v = READ_ONCE(statc->state[i]);
5775 if (v != statc->state_prev[i]) {
5776 delta_cpu = v - statc->state_prev[i];
5777 delta += delta_cpu;
5778 statc->state_prev[i] = v;
5779 }
5780
5781 /* Aggregate counts on this level and propagate upwards */
5782 if (delta_cpu)
5783 memcg->vmstats->state_local[i] += delta_cpu;
5784
5785 if (delta) {
5786 memcg->vmstats->state[i] += delta;
5787 if (parent)
5788 parent->vmstats->state_pending[i] += delta;
5789 }
5790 }
5791
5792 for (i = 0; i < NR_MEMCG_EVENTS; i++) {
5793 delta = memcg->vmstats->events_pending[i];
5794 if (delta)
5795 memcg->vmstats->events_pending[i] = 0;
5796
5797 delta_cpu = 0;
5798 v = READ_ONCE(statc->events[i]);
5799 if (v != statc->events_prev[i]) {
5800 delta_cpu = v - statc->events_prev[i];
5801 delta += delta_cpu;
5802 statc->events_prev[i] = v;
5803 }
5804
5805 if (delta_cpu)
5806 memcg->vmstats->events_local[i] += delta_cpu;
5807
5808 if (delta) {
5809 memcg->vmstats->events[i] += delta;
5810 if (parent)
5811 parent->vmstats->events_pending[i] += delta;
5812 }
5813 }
5814
5815 for_each_node_state(nid, N_MEMORY) {
5816 struct mem_cgroup_per_node *pn = memcg->nodeinfo[nid];
5817 struct mem_cgroup_per_node *ppn = NULL;
5818 struct lruvec_stats_percpu *lstatc;
5819
5820 if (parent)
5821 ppn = parent->nodeinfo[nid];
5822
5823 lstatc = per_cpu_ptr(pn->lruvec_stats_percpu, cpu);
5824
5825 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
5826 delta = pn->lruvec_stats.state_pending[i];
5827 if (delta)
5828 pn->lruvec_stats.state_pending[i] = 0;
5829
5830 delta_cpu = 0;
5831 v = READ_ONCE(lstatc->state[i]);
5832 if (v != lstatc->state_prev[i]) {
5833 delta_cpu = v - lstatc->state_prev[i];
5834 delta += delta_cpu;
5835 lstatc->state_prev[i] = v;
5836 }
5837
5838 if (delta_cpu)
5839 pn->lruvec_stats.state_local[i] += delta_cpu;
5840
5841 if (delta) {
5842 pn->lruvec_stats.state[i] += delta;
5843 if (ppn)
5844 ppn->lruvec_stats.state_pending[i] += delta;
5845 }
5846 }
5847 }
5848 statc->stats_updates = 0;
5849 /* We are in a per-cpu loop here, only do the atomic write once */
5850 if (atomic64_read(&memcg->vmstats->stats_updates))
5851 atomic64_set(&memcg->vmstats->stats_updates, 0);
5852}
5853
5854#ifdef CONFIG_MMU
5855/* Handlers for move charge at task migration. */
5856static int mem_cgroup_do_precharge(unsigned long count)
5857{
5858 int ret;
5859
5860 /* Try a single bulk charge without reclaim first, kswapd may wake */
5861 ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count);
5862 if (!ret) {
5863 mc.precharge += count;
5864 return ret;
5865 }
5866
5867 /* Try charges one by one with reclaim, but do not retry */
5868 while (count--) {
5869 ret = try_charge(mc.to, GFP_KERNEL | __GFP_NORETRY, 1);
5870 if (ret)
5871 return ret;
5872 mc.precharge++;
5873 cond_resched();
5874 }
5875 return 0;
5876}
5877
5878union mc_target {
5879 struct folio *folio;
5880 swp_entry_t ent;
5881};
5882
5883enum mc_target_type {
5884 MC_TARGET_NONE = 0,
5885 MC_TARGET_PAGE,
5886 MC_TARGET_SWAP,
5887 MC_TARGET_DEVICE,
5888};
5889
5890static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
5891 unsigned long addr, pte_t ptent)
5892{
5893 struct page *page = vm_normal_page(vma, addr, ptent);
5894
5895 if (!page)
5896 return NULL;
5897 if (PageAnon(page)) {
5898 if (!(mc.flags & MOVE_ANON))
5899 return NULL;
5900 } else {
5901 if (!(mc.flags & MOVE_FILE))
5902 return NULL;
5903 }
5904 get_page(page);
5905
5906 return page;
5907}
5908
5909#if defined(CONFIG_SWAP) || defined(CONFIG_DEVICE_PRIVATE)
5910static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5911 pte_t ptent, swp_entry_t *entry)
5912{
5913 struct page *page = NULL;
5914 swp_entry_t ent = pte_to_swp_entry(ptent);
5915
5916 if (!(mc.flags & MOVE_ANON))
5917 return NULL;
5918
5919 /*
5920 * Handle device private pages that are not accessible by the CPU, but
5921 * stored as special swap entries in the page table.
5922 */
5923 if (is_device_private_entry(ent)) {
5924 page = pfn_swap_entry_to_page(ent);
5925 if (!get_page_unless_zero(page))
5926 return NULL;
5927 return page;
5928 }
5929
5930 if (non_swap_entry(ent))
5931 return NULL;
5932
5933 /*
5934 * Because swap_cache_get_folio() updates some statistics counter,
5935 * we call find_get_page() with swapper_space directly.
5936 */
5937 page = find_get_page(swap_address_space(ent), swp_offset(ent));
5938 entry->val = ent.val;
5939
5940 return page;
5941}
5942#else
5943static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5944 pte_t ptent, swp_entry_t *entry)
5945{
5946 return NULL;
5947}
5948#endif
5949
5950static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
5951 unsigned long addr, pte_t ptent)
5952{
5953 unsigned long index;
5954 struct folio *folio;
5955
5956 if (!vma->vm_file) /* anonymous vma */
5957 return NULL;
5958 if (!(mc.flags & MOVE_FILE))
5959 return NULL;
5960
5961 /* folio is moved even if it's not RSS of this task(page-faulted). */
5962 /* shmem/tmpfs may report page out on swap: account for that too. */
5963 index = linear_page_index(vma, addr);
5964 folio = filemap_get_incore_folio(vma->vm_file->f_mapping, index);
5965 if (IS_ERR(folio))
5966 return NULL;
5967 return folio_file_page(folio, index);
5968}
5969
5970/**
5971 * mem_cgroup_move_account - move account of the folio
5972 * @folio: The folio.
5973 * @compound: charge the page as compound or small page
5974 * @from: mem_cgroup which the folio is moved from.
5975 * @to: mem_cgroup which the folio is moved to. @from != @to.
5976 *
5977 * The folio must be locked and not on the LRU.
5978 *
5979 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
5980 * from old cgroup.
5981 */
5982static int mem_cgroup_move_account(struct folio *folio,
5983 bool compound,
5984 struct mem_cgroup *from,
5985 struct mem_cgroup *to)
5986{
5987 struct lruvec *from_vec, *to_vec;
5988 struct pglist_data *pgdat;
5989 unsigned int nr_pages = compound ? folio_nr_pages(folio) : 1;
5990 int nid, ret;
5991
5992 VM_BUG_ON(from == to);
5993 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
5994 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
5995 VM_BUG_ON(compound && !folio_test_large(folio));
5996
5997 ret = -EINVAL;
5998 if (folio_memcg(folio) != from)
5999 goto out;
6000
6001 pgdat = folio_pgdat(folio);
6002 from_vec = mem_cgroup_lruvec(from, pgdat);
6003 to_vec = mem_cgroup_lruvec(to, pgdat);
6004
6005 folio_memcg_lock(folio);
6006
6007 if (folio_test_anon(folio)) {
6008 if (folio_mapped(folio)) {
6009 __mod_lruvec_state(from_vec, NR_ANON_MAPPED, -nr_pages);
6010 __mod_lruvec_state(to_vec, NR_ANON_MAPPED, nr_pages);
6011 if (folio_test_pmd_mappable(folio)) {
6012 __mod_lruvec_state(from_vec, NR_ANON_THPS,
6013 -nr_pages);
6014 __mod_lruvec_state(to_vec, NR_ANON_THPS,
6015 nr_pages);
6016 }
6017 }
6018 } else {
6019 __mod_lruvec_state(from_vec, NR_FILE_PAGES, -nr_pages);
6020 __mod_lruvec_state(to_vec, NR_FILE_PAGES, nr_pages);
6021
6022 if (folio_test_swapbacked(folio)) {
6023 __mod_lruvec_state(from_vec, NR_SHMEM, -nr_pages);
6024 __mod_lruvec_state(to_vec, NR_SHMEM, nr_pages);
6025 }
6026
6027 if (folio_mapped(folio)) {
6028 __mod_lruvec_state(from_vec, NR_FILE_MAPPED, -nr_pages);
6029 __mod_lruvec_state(to_vec, NR_FILE_MAPPED, nr_pages);
6030 }
6031
6032 if (folio_test_dirty(folio)) {
6033 struct address_space *mapping = folio_mapping(folio);
6034
6035 if (mapping_can_writeback(mapping)) {
6036 __mod_lruvec_state(from_vec, NR_FILE_DIRTY,
6037 -nr_pages);
6038 __mod_lruvec_state(to_vec, NR_FILE_DIRTY,
6039 nr_pages);
6040 }
6041 }
6042 }
6043
6044#ifdef CONFIG_SWAP
6045 if (folio_test_swapcache(folio)) {
6046 __mod_lruvec_state(from_vec, NR_SWAPCACHE, -nr_pages);
6047 __mod_lruvec_state(to_vec, NR_SWAPCACHE, nr_pages);
6048 }
6049#endif
6050 if (folio_test_writeback(folio)) {
6051 __mod_lruvec_state(from_vec, NR_WRITEBACK, -nr_pages);
6052 __mod_lruvec_state(to_vec, NR_WRITEBACK, nr_pages);
6053 }
6054
6055 /*
6056 * All state has been migrated, let's switch to the new memcg.
6057 *
6058 * It is safe to change page's memcg here because the page
6059 * is referenced, charged, isolated, and locked: we can't race
6060 * with (un)charging, migration, LRU putback, or anything else
6061 * that would rely on a stable page's memory cgroup.
6062 *
6063 * Note that folio_memcg_lock is a memcg lock, not a page lock,
6064 * to save space. As soon as we switch page's memory cgroup to a
6065 * new memcg that isn't locked, the above state can change
6066 * concurrently again. Make sure we're truly done with it.
6067 */
6068 smp_mb();
6069
6070 css_get(&to->css);
6071 css_put(&from->css);
6072
6073 folio->memcg_data = (unsigned long)to;
6074
6075 __folio_memcg_unlock(from);
6076
6077 ret = 0;
6078 nid = folio_nid(folio);
6079
6080 local_irq_disable();
6081 mem_cgroup_charge_statistics(to, nr_pages);
6082 memcg_check_events(to, nid);
6083 mem_cgroup_charge_statistics(from, -nr_pages);
6084 memcg_check_events(from, nid);
6085 local_irq_enable();
6086out:
6087 return ret;
6088}
6089
6090/**
6091 * get_mctgt_type - get target type of moving charge
6092 * @vma: the vma the pte to be checked belongs
6093 * @addr: the address corresponding to the pte to be checked
6094 * @ptent: the pte to be checked
6095 * @target: the pointer the target page or swap ent will be stored(can be NULL)
6096 *
6097 * Context: Called with pte lock held.
6098 * Return:
6099 * * MC_TARGET_NONE - If the pte is not a target for move charge.
6100 * * MC_TARGET_PAGE - If the page corresponding to this pte is a target for
6101 * move charge. If @target is not NULL, the folio is stored in target->folio
6102 * with extra refcnt taken (Caller should release it).
6103 * * MC_TARGET_SWAP - If the swap entry corresponding to this pte is a
6104 * target for charge migration. If @target is not NULL, the entry is
6105 * stored in target->ent.
6106 * * MC_TARGET_DEVICE - Like MC_TARGET_PAGE but page is device memory and
6107 * thus not on the lru. For now such page is charged like a regular page
6108 * would be as it is just special memory taking the place of a regular page.
6109 * See Documentations/vm/hmm.txt and include/linux/hmm.h
6110 */
6111static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
6112 unsigned long addr, pte_t ptent, union mc_target *target)
6113{
6114 struct page *page = NULL;
6115 struct folio *folio;
6116 enum mc_target_type ret = MC_TARGET_NONE;
6117 swp_entry_t ent = { .val = 0 };
6118
6119 if (pte_present(ptent))
6120 page = mc_handle_present_pte(vma, addr, ptent);
6121 else if (pte_none_mostly(ptent))
6122 /*
6123 * PTE markers should be treated as a none pte here, separated
6124 * from other swap handling below.
6125 */
6126 page = mc_handle_file_pte(vma, addr, ptent);
6127 else if (is_swap_pte(ptent))
6128 page = mc_handle_swap_pte(vma, ptent, &ent);
6129
6130 if (page)
6131 folio = page_folio(page);
6132 if (target && page) {
6133 if (!folio_trylock(folio)) {
6134 folio_put(folio);
6135 return ret;
6136 }
6137 /*
6138 * page_mapped() must be stable during the move. This
6139 * pte is locked, so if it's present, the page cannot
6140 * become unmapped. If it isn't, we have only partial
6141 * control over the mapped state: the page lock will
6142 * prevent new faults against pagecache and swapcache,
6143 * so an unmapped page cannot become mapped. However,
6144 * if the page is already mapped elsewhere, it can
6145 * unmap, and there is nothing we can do about it.
6146 * Alas, skip moving the page in this case.
6147 */
6148 if (!pte_present(ptent) && page_mapped(page)) {
6149 folio_unlock(folio);
6150 folio_put(folio);
6151 return ret;
6152 }
6153 }
6154
6155 if (!page && !ent.val)
6156 return ret;
6157 if (page) {
6158 /*
6159 * Do only loose check w/o serialization.
6160 * mem_cgroup_move_account() checks the page is valid or
6161 * not under LRU exclusion.
6162 */
6163 if (folio_memcg(folio) == mc.from) {
6164 ret = MC_TARGET_PAGE;
6165 if (folio_is_device_private(folio) ||
6166 folio_is_device_coherent(folio))
6167 ret = MC_TARGET_DEVICE;
6168 if (target)
6169 target->folio = folio;
6170 }
6171 if (!ret || !target) {
6172 if (target)
6173 folio_unlock(folio);
6174 folio_put(folio);
6175 }
6176 }
6177 /*
6178 * There is a swap entry and a page doesn't exist or isn't charged.
6179 * But we cannot move a tail-page in a THP.
6180 */
6181 if (ent.val && !ret && (!page || !PageTransCompound(page)) &&
6182 mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) {
6183 ret = MC_TARGET_SWAP;
6184 if (target)
6185 target->ent = ent;
6186 }
6187 return ret;
6188}
6189
6190#ifdef CONFIG_TRANSPARENT_HUGEPAGE
6191/*
6192 * We don't consider PMD mapped swapping or file mapped pages because THP does
6193 * not support them for now.
6194 * Caller should make sure that pmd_trans_huge(pmd) is true.
6195 */
6196static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
6197 unsigned long addr, pmd_t pmd, union mc_target *target)
6198{
6199 struct page *page = NULL;
6200 struct folio *folio;
6201 enum mc_target_type ret = MC_TARGET_NONE;
6202
6203 if (unlikely(is_swap_pmd(pmd))) {
6204 VM_BUG_ON(thp_migration_supported() &&
6205 !is_pmd_migration_entry(pmd));
6206 return ret;
6207 }
6208 page = pmd_page(pmd);
6209 VM_BUG_ON_PAGE(!page || !PageHead(page), page);
6210 folio = page_folio(page);
6211 if (!(mc.flags & MOVE_ANON))
6212 return ret;
6213 if (folio_memcg(folio) == mc.from) {
6214 ret = MC_TARGET_PAGE;
6215 if (target) {
6216 folio_get(folio);
6217 if (!folio_trylock(folio)) {
6218 folio_put(folio);
6219 return MC_TARGET_NONE;
6220 }
6221 target->folio = folio;
6222 }
6223 }
6224 return ret;
6225}
6226#else
6227static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
6228 unsigned long addr, pmd_t pmd, union mc_target *target)
6229{
6230 return MC_TARGET_NONE;
6231}
6232#endif
6233
6234static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
6235 unsigned long addr, unsigned long end,
6236 struct mm_walk *walk)
6237{
6238 struct vm_area_struct *vma = walk->vma;
6239 pte_t *pte;
6240 spinlock_t *ptl;
6241
6242 ptl = pmd_trans_huge_lock(pmd, vma);
6243 if (ptl) {
6244 /*
6245 * Note their can not be MC_TARGET_DEVICE for now as we do not
6246 * support transparent huge page with MEMORY_DEVICE_PRIVATE but
6247 * this might change.
6248 */
6249 if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
6250 mc.precharge += HPAGE_PMD_NR;
6251 spin_unlock(ptl);
6252 return 0;
6253 }
6254
6255 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
6256 if (!pte)
6257 return 0;
6258 for (; addr != end; pte++, addr += PAGE_SIZE)
6259 if (get_mctgt_type(vma, addr, ptep_get(pte), NULL))
6260 mc.precharge++; /* increment precharge temporarily */
6261 pte_unmap_unlock(pte - 1, ptl);
6262 cond_resched();
6263
6264 return 0;
6265}
6266
6267static const struct mm_walk_ops precharge_walk_ops = {
6268 .pmd_entry = mem_cgroup_count_precharge_pte_range,
6269 .walk_lock = PGWALK_RDLOCK,
6270};
6271
6272static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
6273{
6274 unsigned long precharge;
6275
6276 mmap_read_lock(mm);
6277 walk_page_range(mm, 0, ULONG_MAX, &precharge_walk_ops, NULL);
6278 mmap_read_unlock(mm);
6279
6280 precharge = mc.precharge;
6281 mc.precharge = 0;
6282
6283 return precharge;
6284}
6285
6286static int mem_cgroup_precharge_mc(struct mm_struct *mm)
6287{
6288 unsigned long precharge = mem_cgroup_count_precharge(mm);
6289
6290 VM_BUG_ON(mc.moving_task);
6291 mc.moving_task = current;
6292 return mem_cgroup_do_precharge(precharge);
6293}
6294
6295/* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
6296static void __mem_cgroup_clear_mc(void)
6297{
6298 struct mem_cgroup *from = mc.from;
6299 struct mem_cgroup *to = mc.to;
6300
6301 /* we must uncharge all the leftover precharges from mc.to */
6302 if (mc.precharge) {
6303 mem_cgroup_cancel_charge(mc.to, mc.precharge);
6304 mc.precharge = 0;
6305 }
6306 /*
6307 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
6308 * we must uncharge here.
6309 */
6310 if (mc.moved_charge) {
6311 mem_cgroup_cancel_charge(mc.from, mc.moved_charge);
6312 mc.moved_charge = 0;
6313 }
6314 /* we must fixup refcnts and charges */
6315 if (mc.moved_swap) {
6316 /* uncharge swap account from the old cgroup */
6317 if (!mem_cgroup_is_root(mc.from))
6318 page_counter_uncharge(&mc.from->memsw, mc.moved_swap);
6319
6320 mem_cgroup_id_put_many(mc.from, mc.moved_swap);
6321
6322 /*
6323 * we charged both to->memory and to->memsw, so we
6324 * should uncharge to->memory.
6325 */
6326 if (!mem_cgroup_is_root(mc.to))
6327 page_counter_uncharge(&mc.to->memory, mc.moved_swap);
6328
6329 mc.moved_swap = 0;
6330 }
6331 memcg_oom_recover(from);
6332 memcg_oom_recover(to);
6333 wake_up_all(&mc.waitq);
6334}
6335
6336static void mem_cgroup_clear_mc(void)
6337{
6338 struct mm_struct *mm = mc.mm;
6339
6340 /*
6341 * we must clear moving_task before waking up waiters at the end of
6342 * task migration.
6343 */
6344 mc.moving_task = NULL;
6345 __mem_cgroup_clear_mc();
6346 spin_lock(&mc.lock);
6347 mc.from = NULL;
6348 mc.to = NULL;
6349 mc.mm = NULL;
6350 spin_unlock(&mc.lock);
6351
6352 mmput(mm);
6353}
6354
6355static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
6356{
6357 struct cgroup_subsys_state *css;
6358 struct mem_cgroup *memcg = NULL; /* unneeded init to make gcc happy */
6359 struct mem_cgroup *from;
6360 struct task_struct *leader, *p;
6361 struct mm_struct *mm;
6362 unsigned long move_flags;
6363 int ret = 0;
6364
6365 /* charge immigration isn't supported on the default hierarchy */
6366 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
6367 return 0;
6368
6369 /*
6370 * Multi-process migrations only happen on the default hierarchy
6371 * where charge immigration is not used. Perform charge
6372 * immigration if @tset contains a leader and whine if there are
6373 * multiple.
6374 */
6375 p = NULL;
6376 cgroup_taskset_for_each_leader(leader, css, tset) {
6377 WARN_ON_ONCE(p);
6378 p = leader;
6379 memcg = mem_cgroup_from_css(css);
6380 }
6381 if (!p)
6382 return 0;
6383
6384 /*
6385 * We are now committed to this value whatever it is. Changes in this
6386 * tunable will only affect upcoming migrations, not the current one.
6387 * So we need to save it, and keep it going.
6388 */
6389 move_flags = READ_ONCE(memcg->move_charge_at_immigrate);
6390 if (!move_flags)
6391 return 0;
6392
6393 from = mem_cgroup_from_task(p);
6394
6395 VM_BUG_ON(from == memcg);
6396
6397 mm = get_task_mm(p);
6398 if (!mm)
6399 return 0;
6400 /* We move charges only when we move a owner of the mm */
6401 if (mm->owner == p) {
6402 VM_BUG_ON(mc.from);
6403 VM_BUG_ON(mc.to);
6404 VM_BUG_ON(mc.precharge);
6405 VM_BUG_ON(mc.moved_charge);
6406 VM_BUG_ON(mc.moved_swap);
6407
6408 spin_lock(&mc.lock);
6409 mc.mm = mm;
6410 mc.from = from;
6411 mc.to = memcg;
6412 mc.flags = move_flags;
6413 spin_unlock(&mc.lock);
6414 /* We set mc.moving_task later */
6415
6416 ret = mem_cgroup_precharge_mc(mm);
6417 if (ret)
6418 mem_cgroup_clear_mc();
6419 } else {
6420 mmput(mm);
6421 }
6422 return ret;
6423}
6424
6425static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
6426{
6427 if (mc.to)
6428 mem_cgroup_clear_mc();
6429}
6430
6431static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
6432 unsigned long addr, unsigned long end,
6433 struct mm_walk *walk)
6434{
6435 int ret = 0;
6436 struct vm_area_struct *vma = walk->vma;
6437 pte_t *pte;
6438 spinlock_t *ptl;
6439 enum mc_target_type target_type;
6440 union mc_target target;
6441 struct folio *folio;
6442
6443 ptl = pmd_trans_huge_lock(pmd, vma);
6444 if (ptl) {
6445 if (mc.precharge < HPAGE_PMD_NR) {
6446 spin_unlock(ptl);
6447 return 0;
6448 }
6449 target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
6450 if (target_type == MC_TARGET_PAGE) {
6451 folio = target.folio;
6452 if (folio_isolate_lru(folio)) {
6453 if (!mem_cgroup_move_account(folio, true,
6454 mc.from, mc.to)) {
6455 mc.precharge -= HPAGE_PMD_NR;
6456 mc.moved_charge += HPAGE_PMD_NR;
6457 }
6458 folio_putback_lru(folio);
6459 }
6460 folio_unlock(folio);
6461 folio_put(folio);
6462 } else if (target_type == MC_TARGET_DEVICE) {
6463 folio = target.folio;
6464 if (!mem_cgroup_move_account(folio, true,
6465 mc.from, mc.to)) {
6466 mc.precharge -= HPAGE_PMD_NR;
6467 mc.moved_charge += HPAGE_PMD_NR;
6468 }
6469 folio_unlock(folio);
6470 folio_put(folio);
6471 }
6472 spin_unlock(ptl);
6473 return 0;
6474 }
6475
6476retry:
6477 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
6478 if (!pte)
6479 return 0;
6480 for (; addr != end; addr += PAGE_SIZE) {
6481 pte_t ptent = ptep_get(pte++);
6482 bool device = false;
6483 swp_entry_t ent;
6484
6485 if (!mc.precharge)
6486 break;
6487
6488 switch (get_mctgt_type(vma, addr, ptent, &target)) {
6489 case MC_TARGET_DEVICE:
6490 device = true;
6491 fallthrough;
6492 case MC_TARGET_PAGE:
6493 folio = target.folio;
6494 /*
6495 * We can have a part of the split pmd here. Moving it
6496 * can be done but it would be too convoluted so simply
6497 * ignore such a partial THP and keep it in original
6498 * memcg. There should be somebody mapping the head.
6499 */
6500 if (folio_test_large(folio))
6501 goto put;
6502 if (!device && !folio_isolate_lru(folio))
6503 goto put;
6504 if (!mem_cgroup_move_account(folio, false,
6505 mc.from, mc.to)) {
6506 mc.precharge--;
6507 /* we uncharge from mc.from later. */
6508 mc.moved_charge++;
6509 }
6510 if (!device)
6511 folio_putback_lru(folio);
6512put: /* get_mctgt_type() gets & locks the page */
6513 folio_unlock(folio);
6514 folio_put(folio);
6515 break;
6516 case MC_TARGET_SWAP:
6517 ent = target.ent;
6518 if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
6519 mc.precharge--;
6520 mem_cgroup_id_get_many(mc.to, 1);
6521 /* we fixup other refcnts and charges later. */
6522 mc.moved_swap++;
6523 }
6524 break;
6525 default:
6526 break;
6527 }
6528 }
6529 pte_unmap_unlock(pte - 1, ptl);
6530 cond_resched();
6531
6532 if (addr != end) {
6533 /*
6534 * We have consumed all precharges we got in can_attach().
6535 * We try charge one by one, but don't do any additional
6536 * charges to mc.to if we have failed in charge once in attach()
6537 * phase.
6538 */
6539 ret = mem_cgroup_do_precharge(1);
6540 if (!ret)
6541 goto retry;
6542 }
6543
6544 return ret;
6545}
6546
6547static const struct mm_walk_ops charge_walk_ops = {
6548 .pmd_entry = mem_cgroup_move_charge_pte_range,
6549 .walk_lock = PGWALK_RDLOCK,
6550};
6551
6552static void mem_cgroup_move_charge(void)
6553{
6554 lru_add_drain_all();
6555 /*
6556 * Signal folio_memcg_lock() to take the memcg's move_lock
6557 * while we're moving its pages to another memcg. Then wait
6558 * for already started RCU-only updates to finish.
6559 */
6560 atomic_inc(&mc.from->moving_account);
6561 synchronize_rcu();
6562retry:
6563 if (unlikely(!mmap_read_trylock(mc.mm))) {
6564 /*
6565 * Someone who are holding the mmap_lock might be waiting in
6566 * waitq. So we cancel all extra charges, wake up all waiters,
6567 * and retry. Because we cancel precharges, we might not be able
6568 * to move enough charges, but moving charge is a best-effort
6569 * feature anyway, so it wouldn't be a big problem.
6570 */
6571 __mem_cgroup_clear_mc();
6572 cond_resched();
6573 goto retry;
6574 }
6575 /*
6576 * When we have consumed all precharges and failed in doing
6577 * additional charge, the page walk just aborts.
6578 */
6579 walk_page_range(mc.mm, 0, ULONG_MAX, &charge_walk_ops, NULL);
6580 mmap_read_unlock(mc.mm);
6581 atomic_dec(&mc.from->moving_account);
6582}
6583
6584static void mem_cgroup_move_task(void)
6585{
6586 if (mc.to) {
6587 mem_cgroup_move_charge();
6588 mem_cgroup_clear_mc();
6589 }
6590}
6591
6592#else /* !CONFIG_MMU */
6593static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
6594{
6595 return 0;
6596}
6597static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
6598{
6599}
6600static void mem_cgroup_move_task(void)
6601{
6602}
6603#endif
6604
6605#ifdef CONFIG_MEMCG_KMEM
6606static void mem_cgroup_fork(struct task_struct *task)
6607{
6608 /*
6609 * Set the update flag to cause task->objcg to be initialized lazily
6610 * on the first allocation. It can be done without any synchronization
6611 * because it's always performed on the current task, so does
6612 * current_objcg_update().
6613 */
6614 task->objcg = (struct obj_cgroup *)CURRENT_OBJCG_UPDATE_FLAG;
6615}
6616
6617static void mem_cgroup_exit(struct task_struct *task)
6618{
6619 struct obj_cgroup *objcg = task->objcg;
6620
6621 objcg = (struct obj_cgroup *)
6622 ((unsigned long)objcg & ~CURRENT_OBJCG_UPDATE_FLAG);
6623 if (objcg)
6624 obj_cgroup_put(objcg);
6625
6626 /*
6627 * Some kernel allocations can happen after this point,
6628 * but let's ignore them. It can be done without any synchronization
6629 * because it's always performed on the current task, so does
6630 * current_objcg_update().
6631 */
6632 task->objcg = NULL;
6633}
6634#endif
6635
6636#ifdef CONFIG_LRU_GEN
6637static void mem_cgroup_lru_gen_attach(struct cgroup_taskset *tset)
6638{
6639 struct task_struct *task;
6640 struct cgroup_subsys_state *css;
6641
6642 /* find the first leader if there is any */
6643 cgroup_taskset_for_each_leader(task, css, tset)
6644 break;
6645
6646 if (!task)
6647 return;
6648
6649 task_lock(task);
6650 if (task->mm && READ_ONCE(task->mm->owner) == task)
6651 lru_gen_migrate_mm(task->mm);
6652 task_unlock(task);
6653}
6654#else
6655static void mem_cgroup_lru_gen_attach(struct cgroup_taskset *tset) {}
6656#endif /* CONFIG_LRU_GEN */
6657
6658#ifdef CONFIG_MEMCG_KMEM
6659static void mem_cgroup_kmem_attach(struct cgroup_taskset *tset)
6660{
6661 struct task_struct *task;
6662 struct cgroup_subsys_state *css;
6663
6664 cgroup_taskset_for_each(task, css, tset) {
6665 /* atomically set the update bit */
6666 set_bit(CURRENT_OBJCG_UPDATE_BIT, (unsigned long *)&task->objcg);
6667 }
6668}
6669#else
6670static void mem_cgroup_kmem_attach(struct cgroup_taskset *tset) {}
6671#endif /* CONFIG_MEMCG_KMEM */
6672
6673#if defined(CONFIG_LRU_GEN) || defined(CONFIG_MEMCG_KMEM)
6674static void mem_cgroup_attach(struct cgroup_taskset *tset)
6675{
6676 mem_cgroup_lru_gen_attach(tset);
6677 mem_cgroup_kmem_attach(tset);
6678}
6679#endif
6680
6681static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value)
6682{
6683 if (value == PAGE_COUNTER_MAX)
6684 seq_puts(m, "max\n");
6685 else
6686 seq_printf(m, "%llu\n", (u64)value * PAGE_SIZE);
6687
6688 return 0;
6689}
6690
6691static u64 memory_current_read(struct cgroup_subsys_state *css,
6692 struct cftype *cft)
6693{
6694 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6695
6696 return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE;
6697}
6698
6699static u64 memory_peak_read(struct cgroup_subsys_state *css,
6700 struct cftype *cft)
6701{
6702 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6703
6704 return (u64)memcg->memory.watermark * PAGE_SIZE;
6705}
6706
6707static int memory_min_show(struct seq_file *m, void *v)
6708{
6709 return seq_puts_memcg_tunable(m,
6710 READ_ONCE(mem_cgroup_from_seq(m)->memory.min));
6711}
6712
6713static ssize_t memory_min_write(struct kernfs_open_file *of,
6714 char *buf, size_t nbytes, loff_t off)
6715{
6716 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6717 unsigned long min;
6718 int err;
6719
6720 buf = strstrip(buf);
6721 err = page_counter_memparse(buf, "max", &min);
6722 if (err)
6723 return err;
6724
6725 page_counter_set_min(&memcg->memory, min);
6726
6727 return nbytes;
6728}
6729
6730static int memory_low_show(struct seq_file *m, void *v)
6731{
6732 return seq_puts_memcg_tunable(m,
6733 READ_ONCE(mem_cgroup_from_seq(m)->memory.low));
6734}
6735
6736static ssize_t memory_low_write(struct kernfs_open_file *of,
6737 char *buf, size_t nbytes, loff_t off)
6738{
6739 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6740 unsigned long low;
6741 int err;
6742
6743 buf = strstrip(buf);
6744 err = page_counter_memparse(buf, "max", &low);
6745 if (err)
6746 return err;
6747
6748 page_counter_set_low(&memcg->memory, low);
6749
6750 return nbytes;
6751}
6752
6753static int memory_high_show(struct seq_file *m, void *v)
6754{
6755 return seq_puts_memcg_tunable(m,
6756 READ_ONCE(mem_cgroup_from_seq(m)->memory.high));
6757}
6758
6759static ssize_t memory_high_write(struct kernfs_open_file *of,
6760 char *buf, size_t nbytes, loff_t off)
6761{
6762 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6763 unsigned int nr_retries = MAX_RECLAIM_RETRIES;
6764 bool drained = false;
6765 unsigned long high;
6766 int err;
6767
6768 buf = strstrip(buf);
6769 err = page_counter_memparse(buf, "max", &high);
6770 if (err)
6771 return err;
6772
6773 page_counter_set_high(&memcg->memory, high);
6774
6775 for (;;) {
6776 unsigned long nr_pages = page_counter_read(&memcg->memory);
6777 unsigned long reclaimed;
6778
6779 if (nr_pages <= high)
6780 break;
6781
6782 if (signal_pending(current))
6783 break;
6784
6785 if (!drained) {
6786 drain_all_stock(memcg);
6787 drained = true;
6788 continue;
6789 }
6790
6791 reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
6792 GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP);
6793
6794 if (!reclaimed && !nr_retries--)
6795 break;
6796 }
6797
6798 memcg_wb_domain_size_changed(memcg);
6799 return nbytes;
6800}
6801
6802static int memory_max_show(struct seq_file *m, void *v)
6803{
6804 return seq_puts_memcg_tunable(m,
6805 READ_ONCE(mem_cgroup_from_seq(m)->memory.max));
6806}
6807
6808static ssize_t memory_max_write(struct kernfs_open_file *of,
6809 char *buf, size_t nbytes, loff_t off)
6810{
6811 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6812 unsigned int nr_reclaims = MAX_RECLAIM_RETRIES;
6813 bool drained = false;
6814 unsigned long max;
6815 int err;
6816
6817 buf = strstrip(buf);
6818 err = page_counter_memparse(buf, "max", &max);
6819 if (err)
6820 return err;
6821
6822 xchg(&memcg->memory.max, max);
6823
6824 for (;;) {
6825 unsigned long nr_pages = page_counter_read(&memcg->memory);
6826
6827 if (nr_pages <= max)
6828 break;
6829
6830 if (signal_pending(current))
6831 break;
6832
6833 if (!drained) {
6834 drain_all_stock(memcg);
6835 drained = true;
6836 continue;
6837 }
6838
6839 if (nr_reclaims) {
6840 if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max,
6841 GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP))
6842 nr_reclaims--;
6843 continue;
6844 }
6845
6846 memcg_memory_event(memcg, MEMCG_OOM);
6847 if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
6848 break;
6849 }
6850
6851 memcg_wb_domain_size_changed(memcg);
6852 return nbytes;
6853}
6854
6855/*
6856 * Note: don't forget to update the 'samples/cgroup/memcg_event_listener'
6857 * if any new events become available.
6858 */
6859static void __memory_events_show(struct seq_file *m, atomic_long_t *events)
6860{
6861 seq_printf(m, "low %lu\n", atomic_long_read(&events[MEMCG_LOW]));
6862 seq_printf(m, "high %lu\n", atomic_long_read(&events[MEMCG_HIGH]));
6863 seq_printf(m, "max %lu\n", atomic_long_read(&events[MEMCG_MAX]));
6864 seq_printf(m, "oom %lu\n", atomic_long_read(&events[MEMCG_OOM]));
6865 seq_printf(m, "oom_kill %lu\n",
6866 atomic_long_read(&events[MEMCG_OOM_KILL]));
6867 seq_printf(m, "oom_group_kill %lu\n",
6868 atomic_long_read(&events[MEMCG_OOM_GROUP_KILL]));
6869}
6870
6871static int memory_events_show(struct seq_file *m, void *v)
6872{
6873 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6874
6875 __memory_events_show(m, memcg->memory_events);
6876 return 0;
6877}
6878
6879static int memory_events_local_show(struct seq_file *m, void *v)
6880{
6881 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6882
6883 __memory_events_show(m, memcg->memory_events_local);
6884 return 0;
6885}
6886
6887static int memory_stat_show(struct seq_file *m, void *v)
6888{
6889 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6890 char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
6891 struct seq_buf s;
6892
6893 if (!buf)
6894 return -ENOMEM;
6895 seq_buf_init(&s, buf, PAGE_SIZE);
6896 memory_stat_format(memcg, &s);
6897 seq_puts(m, buf);
6898 kfree(buf);
6899 return 0;
6900}
6901
6902#ifdef CONFIG_NUMA
6903static inline unsigned long lruvec_page_state_output(struct lruvec *lruvec,
6904 int item)
6905{
6906 return lruvec_page_state(lruvec, item) *
6907 memcg_page_state_output_unit(item);
6908}
6909
6910static int memory_numa_stat_show(struct seq_file *m, void *v)
6911{
6912 int i;
6913 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6914
6915 mem_cgroup_flush_stats(memcg);
6916
6917 for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
6918 int nid;
6919
6920 if (memory_stats[i].idx >= NR_VM_NODE_STAT_ITEMS)
6921 continue;
6922
6923 seq_printf(m, "%s", memory_stats[i].name);
6924 for_each_node_state(nid, N_MEMORY) {
6925 u64 size;
6926 struct lruvec *lruvec;
6927
6928 lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
6929 size = lruvec_page_state_output(lruvec,
6930 memory_stats[i].idx);
6931 seq_printf(m, " N%d=%llu", nid, size);
6932 }
6933 seq_putc(m, '\n');
6934 }
6935
6936 return 0;
6937}
6938#endif
6939
6940static int memory_oom_group_show(struct seq_file *m, void *v)
6941{
6942 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6943
6944 seq_printf(m, "%d\n", READ_ONCE(memcg->oom_group));
6945
6946 return 0;
6947}
6948
6949static ssize_t memory_oom_group_write(struct kernfs_open_file *of,
6950 char *buf, size_t nbytes, loff_t off)
6951{
6952 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6953 int ret, oom_group;
6954
6955 buf = strstrip(buf);
6956 if (!buf)
6957 return -EINVAL;
6958
6959 ret = kstrtoint(buf, 0, &oom_group);
6960 if (ret)
6961 return ret;
6962
6963 if (oom_group != 0 && oom_group != 1)
6964 return -EINVAL;
6965
6966 WRITE_ONCE(memcg->oom_group, oom_group);
6967
6968 return nbytes;
6969}
6970
6971static ssize_t memory_reclaim(struct kernfs_open_file *of, char *buf,
6972 size_t nbytes, loff_t off)
6973{
6974 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6975 unsigned int nr_retries = MAX_RECLAIM_RETRIES;
6976 unsigned long nr_to_reclaim, nr_reclaimed = 0;
6977 unsigned int reclaim_options;
6978 int err;
6979
6980 buf = strstrip(buf);
6981 err = page_counter_memparse(buf, "", &nr_to_reclaim);
6982 if (err)
6983 return err;
6984
6985 reclaim_options = MEMCG_RECLAIM_MAY_SWAP | MEMCG_RECLAIM_PROACTIVE;
6986 while (nr_reclaimed < nr_to_reclaim) {
6987 /* Will converge on zero, but reclaim enforces a minimum */
6988 unsigned long batch_size = (nr_to_reclaim - nr_reclaimed) / 4;
6989 unsigned long reclaimed;
6990
6991 if (signal_pending(current))
6992 return -EINTR;
6993
6994 /*
6995 * This is the final attempt, drain percpu lru caches in the
6996 * hope of introducing more evictable pages for
6997 * try_to_free_mem_cgroup_pages().
6998 */
6999 if (!nr_retries)
7000 lru_add_drain_all();
7001
7002 reclaimed = try_to_free_mem_cgroup_pages(memcg,
7003 batch_size, GFP_KERNEL, reclaim_options);
7004
7005 if (!reclaimed && !nr_retries--)
7006 return -EAGAIN;
7007
7008 nr_reclaimed += reclaimed;
7009 }
7010
7011 return nbytes;
7012}
7013
7014static struct cftype memory_files[] = {
7015 {
7016 .name = "current",
7017 .flags = CFTYPE_NOT_ON_ROOT,
7018 .read_u64 = memory_current_read,
7019 },
7020 {
7021 .name = "peak",
7022 .flags = CFTYPE_NOT_ON_ROOT,
7023 .read_u64 = memory_peak_read,
7024 },
7025 {
7026 .name = "min",
7027 .flags = CFTYPE_NOT_ON_ROOT,
7028 .seq_show = memory_min_show,
7029 .write = memory_min_write,
7030 },
7031 {
7032 .name = "low",
7033 .flags = CFTYPE_NOT_ON_ROOT,
7034 .seq_show = memory_low_show,
7035 .write = memory_low_write,
7036 },
7037 {
7038 .name = "high",
7039 .flags = CFTYPE_NOT_ON_ROOT,
7040 .seq_show = memory_high_show,
7041 .write = memory_high_write,
7042 },
7043 {
7044 .name = "max",
7045 .flags = CFTYPE_NOT_ON_ROOT,
7046 .seq_show = memory_max_show,
7047 .write = memory_max_write,
7048 },
7049 {
7050 .name = "events",
7051 .flags = CFTYPE_NOT_ON_ROOT,
7052 .file_offset = offsetof(struct mem_cgroup, events_file),
7053 .seq_show = memory_events_show,
7054 },
7055 {
7056 .name = "events.local",
7057 .flags = CFTYPE_NOT_ON_ROOT,
7058 .file_offset = offsetof(struct mem_cgroup, events_local_file),
7059 .seq_show = memory_events_local_show,
7060 },
7061 {
7062 .name = "stat",
7063 .seq_show = memory_stat_show,
7064 },
7065#ifdef CONFIG_NUMA
7066 {
7067 .name = "numa_stat",
7068 .seq_show = memory_numa_stat_show,
7069 },
7070#endif
7071 {
7072 .name = "oom.group",
7073 .flags = CFTYPE_NOT_ON_ROOT | CFTYPE_NS_DELEGATABLE,
7074 .seq_show = memory_oom_group_show,
7075 .write = memory_oom_group_write,
7076 },
7077 {
7078 .name = "reclaim",
7079 .flags = CFTYPE_NS_DELEGATABLE,
7080 .write = memory_reclaim,
7081 },
7082 { } /* terminate */
7083};
7084
7085struct cgroup_subsys memory_cgrp_subsys = {
7086 .css_alloc = mem_cgroup_css_alloc,
7087 .css_online = mem_cgroup_css_online,
7088 .css_offline = mem_cgroup_css_offline,
7089 .css_released = mem_cgroup_css_released,
7090 .css_free = mem_cgroup_css_free,
7091 .css_reset = mem_cgroup_css_reset,
7092 .css_rstat_flush = mem_cgroup_css_rstat_flush,
7093 .can_attach = mem_cgroup_can_attach,
7094#if defined(CONFIG_LRU_GEN) || defined(CONFIG_MEMCG_KMEM)
7095 .attach = mem_cgroup_attach,
7096#endif
7097 .cancel_attach = mem_cgroup_cancel_attach,
7098 .post_attach = mem_cgroup_move_task,
7099#ifdef CONFIG_MEMCG_KMEM
7100 .fork = mem_cgroup_fork,
7101 .exit = mem_cgroup_exit,
7102#endif
7103 .dfl_cftypes = memory_files,
7104 .legacy_cftypes = mem_cgroup_legacy_files,
7105 .early_init = 0,
7106};
7107
7108/*
7109 * This function calculates an individual cgroup's effective
7110 * protection which is derived from its own memory.min/low, its
7111 * parent's and siblings' settings, as well as the actual memory
7112 * distribution in the tree.
7113 *
7114 * The following rules apply to the effective protection values:
7115 *
7116 * 1. At the first level of reclaim, effective protection is equal to
7117 * the declared protection in memory.min and memory.low.
7118 *
7119 * 2. To enable safe delegation of the protection configuration, at
7120 * subsequent levels the effective protection is capped to the
7121 * parent's effective protection.
7122 *
7123 * 3. To make complex and dynamic subtrees easier to configure, the
7124 * user is allowed to overcommit the declared protection at a given
7125 * level. If that is the case, the parent's effective protection is
7126 * distributed to the children in proportion to how much protection
7127 * they have declared and how much of it they are utilizing.
7128 *
7129 * This makes distribution proportional, but also work-conserving:
7130 * if one cgroup claims much more protection than it uses memory,
7131 * the unused remainder is available to its siblings.
7132 *
7133 * 4. Conversely, when the declared protection is undercommitted at a
7134 * given level, the distribution of the larger parental protection
7135 * budget is NOT proportional. A cgroup's protection from a sibling
7136 * is capped to its own memory.min/low setting.
7137 *
7138 * 5. However, to allow protecting recursive subtrees from each other
7139 * without having to declare each individual cgroup's fixed share
7140 * of the ancestor's claim to protection, any unutilized -
7141 * "floating" - protection from up the tree is distributed in
7142 * proportion to each cgroup's *usage*. This makes the protection
7143 * neutral wrt sibling cgroups and lets them compete freely over
7144 * the shared parental protection budget, but it protects the
7145 * subtree as a whole from neighboring subtrees.
7146 *
7147 * Note that 4. and 5. are not in conflict: 4. is about protecting
7148 * against immediate siblings whereas 5. is about protecting against
7149 * neighboring subtrees.
7150 */
7151static unsigned long effective_protection(unsigned long usage,
7152 unsigned long parent_usage,
7153 unsigned long setting,
7154 unsigned long parent_effective,
7155 unsigned long siblings_protected)
7156{
7157 unsigned long protected;
7158 unsigned long ep;
7159
7160 protected = min(usage, setting);
7161 /*
7162 * If all cgroups at this level combined claim and use more
7163 * protection than what the parent affords them, distribute
7164 * shares in proportion to utilization.
7165 *
7166 * We are using actual utilization rather than the statically
7167 * claimed protection in order to be work-conserving: claimed
7168 * but unused protection is available to siblings that would
7169 * otherwise get a smaller chunk than what they claimed.
7170 */
7171 if (siblings_protected > parent_effective)
7172 return protected * parent_effective / siblings_protected;
7173
7174 /*
7175 * Ok, utilized protection of all children is within what the
7176 * parent affords them, so we know whatever this child claims
7177 * and utilizes is effectively protected.
7178 *
7179 * If there is unprotected usage beyond this value, reclaim
7180 * will apply pressure in proportion to that amount.
7181 *
7182 * If there is unutilized protection, the cgroup will be fully
7183 * shielded from reclaim, but we do return a smaller value for
7184 * protection than what the group could enjoy in theory. This
7185 * is okay. With the overcommit distribution above, effective
7186 * protection is always dependent on how memory is actually
7187 * consumed among the siblings anyway.
7188 */
7189 ep = protected;
7190
7191 /*
7192 * If the children aren't claiming (all of) the protection
7193 * afforded to them by the parent, distribute the remainder in
7194 * proportion to the (unprotected) memory of each cgroup. That
7195 * way, cgroups that aren't explicitly prioritized wrt each
7196 * other compete freely over the allowance, but they are
7197 * collectively protected from neighboring trees.
7198 *
7199 * We're using unprotected memory for the weight so that if
7200 * some cgroups DO claim explicit protection, we don't protect
7201 * the same bytes twice.
7202 *
7203 * Check both usage and parent_usage against the respective
7204 * protected values. One should imply the other, but they
7205 * aren't read atomically - make sure the division is sane.
7206 */
7207 if (!(cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT))
7208 return ep;
7209 if (parent_effective > siblings_protected &&
7210 parent_usage > siblings_protected &&
7211 usage > protected) {
7212 unsigned long unclaimed;
7213
7214 unclaimed = parent_effective - siblings_protected;
7215 unclaimed *= usage - protected;
7216 unclaimed /= parent_usage - siblings_protected;
7217
7218 ep += unclaimed;
7219 }
7220
7221 return ep;
7222}
7223
7224/**
7225 * mem_cgroup_calculate_protection - check if memory consumption is in the normal range
7226 * @root: the top ancestor of the sub-tree being checked
7227 * @memcg: the memory cgroup to check
7228 *
7229 * WARNING: This function is not stateless! It can only be used as part
7230 * of a top-down tree iteration, not for isolated queries.
7231 */
7232void mem_cgroup_calculate_protection(struct mem_cgroup *root,
7233 struct mem_cgroup *memcg)
7234{
7235 unsigned long usage, parent_usage;
7236 struct mem_cgroup *parent;
7237
7238 if (mem_cgroup_disabled())
7239 return;
7240
7241 if (!root)
7242 root = root_mem_cgroup;
7243
7244 /*
7245 * Effective values of the reclaim targets are ignored so they
7246 * can be stale. Have a look at mem_cgroup_protection for more
7247 * details.
7248 * TODO: calculation should be more robust so that we do not need
7249 * that special casing.
7250 */
7251 if (memcg == root)
7252 return;
7253
7254 usage = page_counter_read(&memcg->memory);
7255 if (!usage)
7256 return;
7257
7258 parent = parent_mem_cgroup(memcg);
7259
7260 if (parent == root) {
7261 memcg->memory.emin = READ_ONCE(memcg->memory.min);
7262 memcg->memory.elow = READ_ONCE(memcg->memory.low);
7263 return;
7264 }
7265
7266 parent_usage = page_counter_read(&parent->memory);
7267
7268 WRITE_ONCE(memcg->memory.emin, effective_protection(usage, parent_usage,
7269 READ_ONCE(memcg->memory.min),
7270 READ_ONCE(parent->memory.emin),
7271 atomic_long_read(&parent->memory.children_min_usage)));
7272
7273 WRITE_ONCE(memcg->memory.elow, effective_protection(usage, parent_usage,
7274 READ_ONCE(memcg->memory.low),
7275 READ_ONCE(parent->memory.elow),
7276 atomic_long_read(&parent->memory.children_low_usage)));
7277}
7278
7279static int charge_memcg(struct folio *folio, struct mem_cgroup *memcg,
7280 gfp_t gfp)
7281{
7282 int ret;
7283
7284 ret = try_charge(memcg, gfp, folio_nr_pages(folio));
7285 if (ret)
7286 goto out;
7287
7288 mem_cgroup_commit_charge(folio, memcg);
7289out:
7290 return ret;
7291}
7292
7293int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp)
7294{
7295 struct mem_cgroup *memcg;
7296 int ret;
7297
7298 memcg = get_mem_cgroup_from_mm(mm);
7299 ret = charge_memcg(folio, memcg, gfp);
7300 css_put(&memcg->css);
7301
7302 return ret;
7303}
7304
7305/**
7306 * mem_cgroup_hugetlb_try_charge - try to charge the memcg for a hugetlb folio
7307 * @memcg: memcg to charge.
7308 * @gfp: reclaim mode.
7309 * @nr_pages: number of pages to charge.
7310 *
7311 * This function is called when allocating a huge page folio to determine if
7312 * the memcg has the capacity for it. It does not commit the charge yet,
7313 * as the hugetlb folio itself has not been obtained from the hugetlb pool.
7314 *
7315 * Once we have obtained the hugetlb folio, we can call
7316 * mem_cgroup_commit_charge() to commit the charge. If we fail to obtain the
7317 * folio, we should instead call mem_cgroup_cancel_charge() to undo the effect
7318 * of try_charge().
7319 *
7320 * Returns 0 on success. Otherwise, an error code is returned.
7321 */
7322int mem_cgroup_hugetlb_try_charge(struct mem_cgroup *memcg, gfp_t gfp,
7323 long nr_pages)
7324{
7325 /*
7326 * If hugetlb memcg charging is not enabled, do not fail hugetlb allocation,
7327 * but do not attempt to commit charge later (or cancel on error) either.
7328 */
7329 if (mem_cgroup_disabled() || !memcg ||
7330 !cgroup_subsys_on_dfl(memory_cgrp_subsys) ||
7331 !(cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING))
7332 return -EOPNOTSUPP;
7333
7334 if (try_charge(memcg, gfp, nr_pages))
7335 return -ENOMEM;
7336
7337 return 0;
7338}
7339
7340/**
7341 * mem_cgroup_swapin_charge_folio - Charge a newly allocated folio for swapin.
7342 * @folio: folio to charge.
7343 * @mm: mm context of the victim
7344 * @gfp: reclaim mode
7345 * @entry: swap entry for which the folio is allocated
7346 *
7347 * This function charges a folio allocated for swapin. Please call this before
7348 * adding the folio to the swapcache.
7349 *
7350 * Returns 0 on success. Otherwise, an error code is returned.
7351 */
7352int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm,
7353 gfp_t gfp, swp_entry_t entry)
7354{
7355 struct mem_cgroup *memcg;
7356 unsigned short id;
7357 int ret;
7358
7359 if (mem_cgroup_disabled())
7360 return 0;
7361
7362 id = lookup_swap_cgroup_id(entry);
7363 rcu_read_lock();
7364 memcg = mem_cgroup_from_id(id);
7365 if (!memcg || !css_tryget_online(&memcg->css))
7366 memcg = get_mem_cgroup_from_mm(mm);
7367 rcu_read_unlock();
7368
7369 ret = charge_memcg(folio, memcg, gfp);
7370
7371 css_put(&memcg->css);
7372 return ret;
7373}
7374
7375/*
7376 * mem_cgroup_swapin_uncharge_swap - uncharge swap slot
7377 * @entry: swap entry for which the page is charged
7378 *
7379 * Call this function after successfully adding the charged page to swapcache.
7380 *
7381 * Note: This function assumes the page for which swap slot is being uncharged
7382 * is order 0 page.
7383 */
7384void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry)
7385{
7386 /*
7387 * Cgroup1's unified memory+swap counter has been charged with the
7388 * new swapcache page, finish the transfer by uncharging the swap
7389 * slot. The swap slot would also get uncharged when it dies, but
7390 * it can stick around indefinitely and we'd count the page twice
7391 * the entire time.
7392 *
7393 * Cgroup2 has separate resource counters for memory and swap,
7394 * so this is a non-issue here. Memory and swap charge lifetimes
7395 * correspond 1:1 to page and swap slot lifetimes: we charge the
7396 * page to memory here, and uncharge swap when the slot is freed.
7397 */
7398 if (!mem_cgroup_disabled() && do_memsw_account()) {
7399 /*
7400 * The swap entry might not get freed for a long time,
7401 * let's not wait for it. The page already received a
7402 * memory+swap charge, drop the swap entry duplicate.
7403 */
7404 mem_cgroup_uncharge_swap(entry, 1);
7405 }
7406}
7407
7408struct uncharge_gather {
7409 struct mem_cgroup *memcg;
7410 unsigned long nr_memory;
7411 unsigned long pgpgout;
7412 unsigned long nr_kmem;
7413 int nid;
7414};
7415
7416static inline void uncharge_gather_clear(struct uncharge_gather *ug)
7417{
7418 memset(ug, 0, sizeof(*ug));
7419}
7420
7421static void uncharge_batch(const struct uncharge_gather *ug)
7422{
7423 unsigned long flags;
7424
7425 if (ug->nr_memory) {
7426 page_counter_uncharge(&ug->memcg->memory, ug->nr_memory);
7427 if (do_memsw_account())
7428 page_counter_uncharge(&ug->memcg->memsw, ug->nr_memory);
7429 if (ug->nr_kmem)
7430 memcg_account_kmem(ug->memcg, -ug->nr_kmem);
7431 memcg_oom_recover(ug->memcg);
7432 }
7433
7434 local_irq_save(flags);
7435 __count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout);
7436 __this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_memory);
7437 memcg_check_events(ug->memcg, ug->nid);
7438 local_irq_restore(flags);
7439
7440 /* drop reference from uncharge_folio */
7441 css_put(&ug->memcg->css);
7442}
7443
7444static void uncharge_folio(struct folio *folio, struct uncharge_gather *ug)
7445{
7446 long nr_pages;
7447 struct mem_cgroup *memcg;
7448 struct obj_cgroup *objcg;
7449
7450 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
7451
7452 /*
7453 * Nobody should be changing or seriously looking at
7454 * folio memcg or objcg at this point, we have fully
7455 * exclusive access to the folio.
7456 */
7457 if (folio_memcg_kmem(folio)) {
7458 objcg = __folio_objcg(folio);
7459 /*
7460 * This get matches the put at the end of the function and
7461 * kmem pages do not hold memcg references anymore.
7462 */
7463 memcg = get_mem_cgroup_from_objcg(objcg);
7464 } else {
7465 memcg = __folio_memcg(folio);
7466 }
7467
7468 if (!memcg)
7469 return;
7470
7471 if (ug->memcg != memcg) {
7472 if (ug->memcg) {
7473 uncharge_batch(ug);
7474 uncharge_gather_clear(ug);
7475 }
7476 ug->memcg = memcg;
7477 ug->nid = folio_nid(folio);
7478
7479 /* pairs with css_put in uncharge_batch */
7480 css_get(&memcg->css);
7481 }
7482
7483 nr_pages = folio_nr_pages(folio);
7484
7485 if (folio_memcg_kmem(folio)) {
7486 ug->nr_memory += nr_pages;
7487 ug->nr_kmem += nr_pages;
7488
7489 folio->memcg_data = 0;
7490 obj_cgroup_put(objcg);
7491 } else {
7492 /* LRU pages aren't accounted at the root level */
7493 if (!mem_cgroup_is_root(memcg))
7494 ug->nr_memory += nr_pages;
7495 ug->pgpgout++;
7496
7497 folio->memcg_data = 0;
7498 }
7499
7500 css_put(&memcg->css);
7501}
7502
7503void __mem_cgroup_uncharge(struct folio *folio)
7504{
7505 struct uncharge_gather ug;
7506
7507 /* Don't touch folio->lru of any random page, pre-check: */
7508 if (!folio_memcg(folio))
7509 return;
7510
7511 uncharge_gather_clear(&ug);
7512 uncharge_folio(folio, &ug);
7513 uncharge_batch(&ug);
7514}
7515
7516void __mem_cgroup_uncharge_folios(struct folio_batch *folios)
7517{
7518 struct uncharge_gather ug;
7519 unsigned int i;
7520
7521 uncharge_gather_clear(&ug);
7522 for (i = 0; i < folios->nr; i++)
7523 uncharge_folio(folios->folios[i], &ug);
7524 if (ug.memcg)
7525 uncharge_batch(&ug);
7526}
7527
7528/**
7529 * mem_cgroup_replace_folio - Charge a folio's replacement.
7530 * @old: Currently circulating folio.
7531 * @new: Replacement folio.
7532 *
7533 * Charge @new as a replacement folio for @old. @old will
7534 * be uncharged upon free. This is only used by the page cache
7535 * (in replace_page_cache_folio()).
7536 *
7537 * Both folios must be locked, @new->mapping must be set up.
7538 */
7539void mem_cgroup_replace_folio(struct folio *old, struct folio *new)
7540{
7541 struct mem_cgroup *memcg;
7542 long nr_pages = folio_nr_pages(new);
7543 unsigned long flags;
7544
7545 VM_BUG_ON_FOLIO(!folio_test_locked(old), old);
7546 VM_BUG_ON_FOLIO(!folio_test_locked(new), new);
7547 VM_BUG_ON_FOLIO(folio_test_anon(old) != folio_test_anon(new), new);
7548 VM_BUG_ON_FOLIO(folio_nr_pages(old) != nr_pages, new);
7549
7550 if (mem_cgroup_disabled())
7551 return;
7552
7553 /* Page cache replacement: new folio already charged? */
7554 if (folio_memcg(new))
7555 return;
7556
7557 memcg = folio_memcg(old);
7558 VM_WARN_ON_ONCE_FOLIO(!memcg, old);
7559 if (!memcg)
7560 return;
7561
7562 /* Force-charge the new page. The old one will be freed soon */
7563 if (!mem_cgroup_is_root(memcg)) {
7564 page_counter_charge(&memcg->memory, nr_pages);
7565 if (do_memsw_account())
7566 page_counter_charge(&memcg->memsw, nr_pages);
7567 }
7568
7569 css_get(&memcg->css);
7570 commit_charge(new, memcg);
7571
7572 local_irq_save(flags);
7573 mem_cgroup_charge_statistics(memcg, nr_pages);
7574 memcg_check_events(memcg, folio_nid(new));
7575 local_irq_restore(flags);
7576}
7577
7578/**
7579 * mem_cgroup_migrate - Transfer the memcg data from the old to the new folio.
7580 * @old: Currently circulating folio.
7581 * @new: Replacement folio.
7582 *
7583 * Transfer the memcg data from the old folio to the new folio for migration.
7584 * The old folio's data info will be cleared. Note that the memory counters
7585 * will remain unchanged throughout the process.
7586 *
7587 * Both folios must be locked, @new->mapping must be set up.
7588 */
7589void mem_cgroup_migrate(struct folio *old, struct folio *new)
7590{
7591 struct mem_cgroup *memcg;
7592
7593 VM_BUG_ON_FOLIO(!folio_test_locked(old), old);
7594 VM_BUG_ON_FOLIO(!folio_test_locked(new), new);
7595 VM_BUG_ON_FOLIO(folio_test_anon(old) != folio_test_anon(new), new);
7596 VM_BUG_ON_FOLIO(folio_nr_pages(old) != folio_nr_pages(new), new);
7597
7598 if (mem_cgroup_disabled())
7599 return;
7600
7601 memcg = folio_memcg(old);
7602 /*
7603 * Note that it is normal to see !memcg for a hugetlb folio.
7604 * For e.g, itt could have been allocated when memory_hugetlb_accounting
7605 * was not selected.
7606 */
7607 VM_WARN_ON_ONCE_FOLIO(!folio_test_hugetlb(old) && !memcg, old);
7608 if (!memcg)
7609 return;
7610
7611 /* Transfer the charge and the css ref */
7612 commit_charge(new, memcg);
7613 /*
7614 * If the old folio is a large folio and is in the split queue, it needs
7615 * to be removed from the split queue now, in case getting an incorrect
7616 * split queue in destroy_large_folio() after the memcg of the old folio
7617 * is cleared.
7618 *
7619 * In addition, the old folio is about to be freed after migration, so
7620 * removing from the split queue a bit earlier seems reasonable.
7621 */
7622 if (folio_test_large(old) && folio_test_large_rmappable(old))
7623 folio_undo_large_rmappable(old);
7624 old->memcg_data = 0;
7625}
7626
7627DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
7628EXPORT_SYMBOL(memcg_sockets_enabled_key);
7629
7630void mem_cgroup_sk_alloc(struct sock *sk)
7631{
7632 struct mem_cgroup *memcg;
7633
7634 if (!mem_cgroup_sockets_enabled)
7635 return;
7636
7637 /* Do not associate the sock with unrelated interrupted task's memcg. */
7638 if (!in_task())
7639 return;
7640
7641 rcu_read_lock();
7642 memcg = mem_cgroup_from_task(current);
7643 if (mem_cgroup_is_root(memcg))
7644 goto out;
7645 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active)
7646 goto out;
7647 if (css_tryget(&memcg->css))
7648 sk->sk_memcg = memcg;
7649out:
7650 rcu_read_unlock();
7651}
7652
7653void mem_cgroup_sk_free(struct sock *sk)
7654{
7655 if (sk->sk_memcg)
7656 css_put(&sk->sk_memcg->css);
7657}
7658
7659/**
7660 * mem_cgroup_charge_skmem - charge socket memory
7661 * @memcg: memcg to charge
7662 * @nr_pages: number of pages to charge
7663 * @gfp_mask: reclaim mode
7664 *
7665 * Charges @nr_pages to @memcg. Returns %true if the charge fit within
7666 * @memcg's configured limit, %false if it doesn't.
7667 */
7668bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages,
7669 gfp_t gfp_mask)
7670{
7671 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
7672 struct page_counter *fail;
7673
7674 if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) {
7675 memcg->tcpmem_pressure = 0;
7676 return true;
7677 }
7678 memcg->tcpmem_pressure = 1;
7679 if (gfp_mask & __GFP_NOFAIL) {
7680 page_counter_charge(&memcg->tcpmem, nr_pages);
7681 return true;
7682 }
7683 return false;
7684 }
7685
7686 if (try_charge(memcg, gfp_mask, nr_pages) == 0) {
7687 mod_memcg_state(memcg, MEMCG_SOCK, nr_pages);
7688 return true;
7689 }
7690
7691 return false;
7692}
7693
7694/**
7695 * mem_cgroup_uncharge_skmem - uncharge socket memory
7696 * @memcg: memcg to uncharge
7697 * @nr_pages: number of pages to uncharge
7698 */
7699void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
7700{
7701 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
7702 page_counter_uncharge(&memcg->tcpmem, nr_pages);
7703 return;
7704 }
7705
7706 mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages);
7707
7708 refill_stock(memcg, nr_pages);
7709}
7710
7711static int __init cgroup_memory(char *s)
7712{
7713 char *token;
7714
7715 while ((token = strsep(&s, ",")) != NULL) {
7716 if (!*token)
7717 continue;
7718 if (!strcmp(token, "nosocket"))
7719 cgroup_memory_nosocket = true;
7720 if (!strcmp(token, "nokmem"))
7721 cgroup_memory_nokmem = true;
7722 if (!strcmp(token, "nobpf"))
7723 cgroup_memory_nobpf = true;
7724 }
7725 return 1;
7726}
7727__setup("cgroup.memory=", cgroup_memory);
7728
7729/*
7730 * subsys_initcall() for memory controller.
7731 *
7732 * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this
7733 * context because of lock dependencies (cgroup_lock -> cpu hotplug) but
7734 * basically everything that doesn't depend on a specific mem_cgroup structure
7735 * should be initialized from here.
7736 */
7737static int __init mem_cgroup_init(void)
7738{
7739 int cpu, node;
7740
7741 /*
7742 * Currently s32 type (can refer to struct batched_lruvec_stat) is
7743 * used for per-memcg-per-cpu caching of per-node statistics. In order
7744 * to work fine, we should make sure that the overfill threshold can't
7745 * exceed S32_MAX / PAGE_SIZE.
7746 */
7747 BUILD_BUG_ON(MEMCG_CHARGE_BATCH > S32_MAX / PAGE_SIZE);
7748
7749 cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
7750 memcg_hotplug_cpu_dead);
7751
7752 for_each_possible_cpu(cpu)
7753 INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
7754 drain_local_stock);
7755
7756 for_each_node(node) {
7757 struct mem_cgroup_tree_per_node *rtpn;
7758
7759 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, node);
7760
7761 rtpn->rb_root = RB_ROOT;
7762 rtpn->rb_rightmost = NULL;
7763 spin_lock_init(&rtpn->lock);
7764 soft_limit_tree.rb_tree_per_node[node] = rtpn;
7765 }
7766
7767 return 0;
7768}
7769subsys_initcall(mem_cgroup_init);
7770
7771#ifdef CONFIG_SWAP
7772static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg)
7773{
7774 while (!refcount_inc_not_zero(&memcg->id.ref)) {
7775 /*
7776 * The root cgroup cannot be destroyed, so it's refcount must
7777 * always be >= 1.
7778 */
7779 if (WARN_ON_ONCE(mem_cgroup_is_root(memcg))) {
7780 VM_BUG_ON(1);
7781 break;
7782 }
7783 memcg = parent_mem_cgroup(memcg);
7784 if (!memcg)
7785 memcg = root_mem_cgroup;
7786 }
7787 return memcg;
7788}
7789
7790/**
7791 * mem_cgroup_swapout - transfer a memsw charge to swap
7792 * @folio: folio whose memsw charge to transfer
7793 * @entry: swap entry to move the charge to
7794 *
7795 * Transfer the memsw charge of @folio to @entry.
7796 */
7797void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry)
7798{
7799 struct mem_cgroup *memcg, *swap_memcg;
7800 unsigned int nr_entries;
7801 unsigned short oldid;
7802
7803 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
7804 VM_BUG_ON_FOLIO(folio_ref_count(folio), folio);
7805
7806 if (mem_cgroup_disabled())
7807 return;
7808
7809 if (!do_memsw_account())
7810 return;
7811
7812 memcg = folio_memcg(folio);
7813
7814 VM_WARN_ON_ONCE_FOLIO(!memcg, folio);
7815 if (!memcg)
7816 return;
7817
7818 /*
7819 * In case the memcg owning these pages has been offlined and doesn't
7820 * have an ID allocated to it anymore, charge the closest online
7821 * ancestor for the swap instead and transfer the memory+swap charge.
7822 */
7823 swap_memcg = mem_cgroup_id_get_online(memcg);
7824 nr_entries = folio_nr_pages(folio);
7825 /* Get references for the tail pages, too */
7826 if (nr_entries > 1)
7827 mem_cgroup_id_get_many(swap_memcg, nr_entries - 1);
7828 oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg),
7829 nr_entries);
7830 VM_BUG_ON_FOLIO(oldid, folio);
7831 mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries);
7832
7833 folio->memcg_data = 0;
7834
7835 if (!mem_cgroup_is_root(memcg))
7836 page_counter_uncharge(&memcg->memory, nr_entries);
7837
7838 if (memcg != swap_memcg) {
7839 if (!mem_cgroup_is_root(swap_memcg))
7840 page_counter_charge(&swap_memcg->memsw, nr_entries);
7841 page_counter_uncharge(&memcg->memsw, nr_entries);
7842 }
7843
7844 /*
7845 * Interrupts should be disabled here because the caller holds the
7846 * i_pages lock which is taken with interrupts-off. It is
7847 * important here to have the interrupts disabled because it is the
7848 * only synchronisation we have for updating the per-CPU variables.
7849 */
7850 memcg_stats_lock();
7851 mem_cgroup_charge_statistics(memcg, -nr_entries);
7852 memcg_stats_unlock();
7853 memcg_check_events(memcg, folio_nid(folio));
7854
7855 css_put(&memcg->css);
7856}
7857
7858/**
7859 * __mem_cgroup_try_charge_swap - try charging swap space for a folio
7860 * @folio: folio being added to swap
7861 * @entry: swap entry to charge
7862 *
7863 * Try to charge @folio's memcg for the swap space at @entry.
7864 *
7865 * Returns 0 on success, -ENOMEM on failure.
7866 */
7867int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry)
7868{
7869 unsigned int nr_pages = folio_nr_pages(folio);
7870 struct page_counter *counter;
7871 struct mem_cgroup *memcg;
7872 unsigned short oldid;
7873
7874 if (do_memsw_account())
7875 return 0;
7876
7877 memcg = folio_memcg(folio);
7878
7879 VM_WARN_ON_ONCE_FOLIO(!memcg, folio);
7880 if (!memcg)
7881 return 0;
7882
7883 if (!entry.val) {
7884 memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
7885 return 0;
7886 }
7887
7888 memcg = mem_cgroup_id_get_online(memcg);
7889
7890 if (!mem_cgroup_is_root(memcg) &&
7891 !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) {
7892 memcg_memory_event(memcg, MEMCG_SWAP_MAX);
7893 memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
7894 mem_cgroup_id_put(memcg);
7895 return -ENOMEM;
7896 }
7897
7898 /* Get references for the tail pages, too */
7899 if (nr_pages > 1)
7900 mem_cgroup_id_get_many(memcg, nr_pages - 1);
7901 oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg), nr_pages);
7902 VM_BUG_ON_FOLIO(oldid, folio);
7903 mod_memcg_state(memcg, MEMCG_SWAP, nr_pages);
7904
7905 return 0;
7906}
7907
7908/**
7909 * __mem_cgroup_uncharge_swap - uncharge swap space
7910 * @entry: swap entry to uncharge
7911 * @nr_pages: the amount of swap space to uncharge
7912 */
7913void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
7914{
7915 struct mem_cgroup *memcg;
7916 unsigned short id;
7917
7918 id = swap_cgroup_record(entry, 0, nr_pages);
7919 rcu_read_lock();
7920 memcg = mem_cgroup_from_id(id);
7921 if (memcg) {
7922 if (!mem_cgroup_is_root(memcg)) {
7923 if (do_memsw_account())
7924 page_counter_uncharge(&memcg->memsw, nr_pages);
7925 else
7926 page_counter_uncharge(&memcg->swap, nr_pages);
7927 }
7928 mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages);
7929 mem_cgroup_id_put_many(memcg, nr_pages);
7930 }
7931 rcu_read_unlock();
7932}
7933
7934long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
7935{
7936 long nr_swap_pages = get_nr_swap_pages();
7937
7938 if (mem_cgroup_disabled() || do_memsw_account())
7939 return nr_swap_pages;
7940 for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg))
7941 nr_swap_pages = min_t(long, nr_swap_pages,
7942 READ_ONCE(memcg->swap.max) -
7943 page_counter_read(&memcg->swap));
7944 return nr_swap_pages;
7945}
7946
7947bool mem_cgroup_swap_full(struct folio *folio)
7948{
7949 struct mem_cgroup *memcg;
7950
7951 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
7952
7953 if (vm_swap_full())
7954 return true;
7955 if (do_memsw_account())
7956 return false;
7957
7958 memcg = folio_memcg(folio);
7959 if (!memcg)
7960 return false;
7961
7962 for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) {
7963 unsigned long usage = page_counter_read(&memcg->swap);
7964
7965 if (usage * 2 >= READ_ONCE(memcg->swap.high) ||
7966 usage * 2 >= READ_ONCE(memcg->swap.max))
7967 return true;
7968 }
7969
7970 return false;
7971}
7972
7973static int __init setup_swap_account(char *s)
7974{
7975 bool res;
7976
7977 if (!kstrtobool(s, &res) && !res)
7978 pr_warn_once("The swapaccount=0 commandline option is deprecated "
7979 "in favor of configuring swap control via cgroupfs. "
7980 "Please report your usecase to linux-mm@kvack.org if you "
7981 "depend on this functionality.\n");
7982 return 1;
7983}
7984__setup("swapaccount=", setup_swap_account);
7985
7986static u64 swap_current_read(struct cgroup_subsys_state *css,
7987 struct cftype *cft)
7988{
7989 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
7990
7991 return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE;
7992}
7993
7994static u64 swap_peak_read(struct cgroup_subsys_state *css,
7995 struct cftype *cft)
7996{
7997 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
7998
7999 return (u64)memcg->swap.watermark * PAGE_SIZE;
8000}
8001
8002static int swap_high_show(struct seq_file *m, void *v)
8003{
8004 return seq_puts_memcg_tunable(m,
8005 READ_ONCE(mem_cgroup_from_seq(m)->swap.high));
8006}
8007
8008static ssize_t swap_high_write(struct kernfs_open_file *of,
8009 char *buf, size_t nbytes, loff_t off)
8010{
8011 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
8012 unsigned long high;
8013 int err;
8014
8015 buf = strstrip(buf);
8016 err = page_counter_memparse(buf, "max", &high);
8017 if (err)
8018 return err;
8019
8020 page_counter_set_high(&memcg->swap, high);
8021
8022 return nbytes;
8023}
8024
8025static int swap_max_show(struct seq_file *m, void *v)
8026{
8027 return seq_puts_memcg_tunable(m,
8028 READ_ONCE(mem_cgroup_from_seq(m)->swap.max));
8029}
8030
8031static ssize_t swap_max_write(struct kernfs_open_file *of,
8032 char *buf, size_t nbytes, loff_t off)
8033{
8034 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
8035 unsigned long max;
8036 int err;
8037
8038 buf = strstrip(buf);
8039 err = page_counter_memparse(buf, "max", &max);
8040 if (err)
8041 return err;
8042
8043 xchg(&memcg->swap.max, max);
8044
8045 return nbytes;
8046}
8047
8048static int swap_events_show(struct seq_file *m, void *v)
8049{
8050 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
8051
8052 seq_printf(m, "high %lu\n",
8053 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_HIGH]));
8054 seq_printf(m, "max %lu\n",
8055 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX]));
8056 seq_printf(m, "fail %lu\n",
8057 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL]));
8058
8059 return 0;
8060}
8061
8062static struct cftype swap_files[] = {
8063 {
8064 .name = "swap.current",
8065 .flags = CFTYPE_NOT_ON_ROOT,
8066 .read_u64 = swap_current_read,
8067 },
8068 {
8069 .name = "swap.high",
8070 .flags = CFTYPE_NOT_ON_ROOT,
8071 .seq_show = swap_high_show,
8072 .write = swap_high_write,
8073 },
8074 {
8075 .name = "swap.max",
8076 .flags = CFTYPE_NOT_ON_ROOT,
8077 .seq_show = swap_max_show,
8078 .write = swap_max_write,
8079 },
8080 {
8081 .name = "swap.peak",
8082 .flags = CFTYPE_NOT_ON_ROOT,
8083 .read_u64 = swap_peak_read,
8084 },
8085 {
8086 .name = "swap.events",
8087 .flags = CFTYPE_NOT_ON_ROOT,
8088 .file_offset = offsetof(struct mem_cgroup, swap_events_file),
8089 .seq_show = swap_events_show,
8090 },
8091 { } /* terminate */
8092};
8093
8094static struct cftype memsw_files[] = {
8095 {
8096 .name = "memsw.usage_in_bytes",
8097 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
8098 .read_u64 = mem_cgroup_read_u64,
8099 },
8100 {
8101 .name = "memsw.max_usage_in_bytes",
8102 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
8103 .write = mem_cgroup_reset,
8104 .read_u64 = mem_cgroup_read_u64,
8105 },
8106 {
8107 .name = "memsw.limit_in_bytes",
8108 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
8109 .write = mem_cgroup_write,
8110 .read_u64 = mem_cgroup_read_u64,
8111 },
8112 {
8113 .name = "memsw.failcnt",
8114 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
8115 .write = mem_cgroup_reset,
8116 .read_u64 = mem_cgroup_read_u64,
8117 },
8118 { }, /* terminate */
8119};
8120
8121#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
8122/**
8123 * obj_cgroup_may_zswap - check if this cgroup can zswap
8124 * @objcg: the object cgroup
8125 *
8126 * Check if the hierarchical zswap limit has been reached.
8127 *
8128 * This doesn't check for specific headroom, and it is not atomic
8129 * either. But with zswap, the size of the allocation is only known
8130 * once compression has occurred, and this optimistic pre-check avoids
8131 * spending cycles on compression when there is already no room left
8132 * or zswap is disabled altogether somewhere in the hierarchy.
8133 */
8134bool obj_cgroup_may_zswap(struct obj_cgroup *objcg)
8135{
8136 struct mem_cgroup *memcg, *original_memcg;
8137 bool ret = true;
8138
8139 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
8140 return true;
8141
8142 original_memcg = get_mem_cgroup_from_objcg(objcg);
8143 for (memcg = original_memcg; !mem_cgroup_is_root(memcg);
8144 memcg = parent_mem_cgroup(memcg)) {
8145 unsigned long max = READ_ONCE(memcg->zswap_max);
8146 unsigned long pages;
8147
8148 if (max == PAGE_COUNTER_MAX)
8149 continue;
8150 if (max == 0) {
8151 ret = false;
8152 break;
8153 }
8154
8155 /*
8156 * mem_cgroup_flush_stats() ignores small changes. Use
8157 * do_flush_stats() directly to get accurate stats for charging.
8158 */
8159 do_flush_stats(memcg);
8160 pages = memcg_page_state(memcg, MEMCG_ZSWAP_B) / PAGE_SIZE;
8161 if (pages < max)
8162 continue;
8163 ret = false;
8164 break;
8165 }
8166 mem_cgroup_put(original_memcg);
8167 return ret;
8168}
8169
8170/**
8171 * obj_cgroup_charge_zswap - charge compression backend memory
8172 * @objcg: the object cgroup
8173 * @size: size of compressed object
8174 *
8175 * This forces the charge after obj_cgroup_may_zswap() allowed
8176 * compression and storage in zwap for this cgroup to go ahead.
8177 */
8178void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size)
8179{
8180 struct mem_cgroup *memcg;
8181
8182 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
8183 return;
8184
8185 VM_WARN_ON_ONCE(!(current->flags & PF_MEMALLOC));
8186
8187 /* PF_MEMALLOC context, charging must succeed */
8188 if (obj_cgroup_charge(objcg, GFP_KERNEL, size))
8189 VM_WARN_ON_ONCE(1);
8190
8191 rcu_read_lock();
8192 memcg = obj_cgroup_memcg(objcg);
8193 mod_memcg_state(memcg, MEMCG_ZSWAP_B, size);
8194 mod_memcg_state(memcg, MEMCG_ZSWAPPED, 1);
8195 rcu_read_unlock();
8196}
8197
8198/**
8199 * obj_cgroup_uncharge_zswap - uncharge compression backend memory
8200 * @objcg: the object cgroup
8201 * @size: size of compressed object
8202 *
8203 * Uncharges zswap memory on page in.
8204 */
8205void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size)
8206{
8207 struct mem_cgroup *memcg;
8208
8209 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
8210 return;
8211
8212 obj_cgroup_uncharge(objcg, size);
8213
8214 rcu_read_lock();
8215 memcg = obj_cgroup_memcg(objcg);
8216 mod_memcg_state(memcg, MEMCG_ZSWAP_B, -size);
8217 mod_memcg_state(memcg, MEMCG_ZSWAPPED, -1);
8218 rcu_read_unlock();
8219}
8220
8221bool mem_cgroup_zswap_writeback_enabled(struct mem_cgroup *memcg)
8222{
8223 /* if zswap is disabled, do not block pages going to the swapping device */
8224 return !is_zswap_enabled() || !memcg || READ_ONCE(memcg->zswap_writeback);
8225}
8226
8227static u64 zswap_current_read(struct cgroup_subsys_state *css,
8228 struct cftype *cft)
8229{
8230 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
8231
8232 mem_cgroup_flush_stats(memcg);
8233 return memcg_page_state(memcg, MEMCG_ZSWAP_B);
8234}
8235
8236static int zswap_max_show(struct seq_file *m, void *v)
8237{
8238 return seq_puts_memcg_tunable(m,
8239 READ_ONCE(mem_cgroup_from_seq(m)->zswap_max));
8240}
8241
8242static ssize_t zswap_max_write(struct kernfs_open_file *of,
8243 char *buf, size_t nbytes, loff_t off)
8244{
8245 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
8246 unsigned long max;
8247 int err;
8248
8249 buf = strstrip(buf);
8250 err = page_counter_memparse(buf, "max", &max);
8251 if (err)
8252 return err;
8253
8254 xchg(&memcg->zswap_max, max);
8255
8256 return nbytes;
8257}
8258
8259static int zswap_writeback_show(struct seq_file *m, void *v)
8260{
8261 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
8262
8263 seq_printf(m, "%d\n", READ_ONCE(memcg->zswap_writeback));
8264 return 0;
8265}
8266
8267static ssize_t zswap_writeback_write(struct kernfs_open_file *of,
8268 char *buf, size_t nbytes, loff_t off)
8269{
8270 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
8271 int zswap_writeback;
8272 ssize_t parse_ret = kstrtoint(strstrip(buf), 0, &zswap_writeback);
8273
8274 if (parse_ret)
8275 return parse_ret;
8276
8277 if (zswap_writeback != 0 && zswap_writeback != 1)
8278 return -EINVAL;
8279
8280 WRITE_ONCE(memcg->zswap_writeback, zswap_writeback);
8281 return nbytes;
8282}
8283
8284static struct cftype zswap_files[] = {
8285 {
8286 .name = "zswap.current",
8287 .flags = CFTYPE_NOT_ON_ROOT,
8288 .read_u64 = zswap_current_read,
8289 },
8290 {
8291 .name = "zswap.max",
8292 .flags = CFTYPE_NOT_ON_ROOT,
8293 .seq_show = zswap_max_show,
8294 .write = zswap_max_write,
8295 },
8296 {
8297 .name = "zswap.writeback",
8298 .seq_show = zswap_writeback_show,
8299 .write = zswap_writeback_write,
8300 },
8301 { } /* terminate */
8302};
8303#endif /* CONFIG_MEMCG_KMEM && CONFIG_ZSWAP */
8304
8305static int __init mem_cgroup_swap_init(void)
8306{
8307 if (mem_cgroup_disabled())
8308 return 0;
8309
8310 WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, swap_files));
8311 WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, memsw_files));
8312#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
8313 WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, zswap_files));
8314#endif
8315 return 0;
8316}
8317subsys_initcall(mem_cgroup_swap_init);
8318
8319#endif /* CONFIG_SWAP */