Linux Audio

Check our new training course

Loading...
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/* memcontrol.c - Memory Controller
   3 *
   4 * Copyright IBM Corporation, 2007
   5 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
   6 *
   7 * Copyright 2007 OpenVZ SWsoft Inc
   8 * Author: Pavel Emelianov <xemul@openvz.org>
   9 *
  10 * Memory thresholds
  11 * Copyright (C) 2009 Nokia Corporation
  12 * Author: Kirill A. Shutemov
  13 *
  14 * Kernel Memory Controller
  15 * Copyright (C) 2012 Parallels Inc. and Google Inc.
  16 * Authors: Glauber Costa and Suleiman Souhlal
  17 *
  18 * Native page reclaim
  19 * Charge lifetime sanitation
  20 * Lockless page tracking & accounting
  21 * Unified hierarchy configuration model
  22 * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner
  23 *
  24 * Per memcg lru locking
  25 * Copyright (C) 2020 Alibaba, Inc, Alex Shi
  26 */
  27
  28#include <linux/page_counter.h>
  29#include <linux/memcontrol.h>
  30#include <linux/cgroup.h>
  31#include <linux/pagewalk.h>
  32#include <linux/sched/mm.h>
  33#include <linux/shmem_fs.h>
  34#include <linux/hugetlb.h>
  35#include <linux/pagemap.h>
  36#include <linux/vm_event_item.h>
  37#include <linux/smp.h>
  38#include <linux/page-flags.h>
  39#include <linux/backing-dev.h>
  40#include <linux/bit_spinlock.h>
  41#include <linux/rcupdate.h>
  42#include <linux/limits.h>
  43#include <linux/export.h>
  44#include <linux/mutex.h>
  45#include <linux/rbtree.h>
  46#include <linux/slab.h>
  47#include <linux/swap.h>
  48#include <linux/swapops.h>
  49#include <linux/spinlock.h>
  50#include <linux/eventfd.h>
  51#include <linux/poll.h>
  52#include <linux/sort.h>
  53#include <linux/fs.h>
  54#include <linux/seq_file.h>
  55#include <linux/vmpressure.h>
 
  56#include <linux/mm_inline.h>
  57#include <linux/swap_cgroup.h>
  58#include <linux/cpu.h>
  59#include <linux/oom.h>
  60#include <linux/lockdep.h>
  61#include <linux/file.h>
  62#include <linux/tracehook.h>
  63#include <linux/psi.h>
  64#include <linux/seq_buf.h>
 
 
  65#include "internal.h"
  66#include <net/sock.h>
  67#include <net/ip.h>
  68#include "slab.h"
 
  69
  70#include <linux/uaccess.h>
  71
  72#include <trace/events/vmscan.h>
  73
  74struct cgroup_subsys memory_cgrp_subsys __read_mostly;
  75EXPORT_SYMBOL(memory_cgrp_subsys);
  76
  77struct mem_cgroup *root_mem_cgroup __read_mostly;
  78
  79/* Active memory cgroup to use from an interrupt context */
  80DEFINE_PER_CPU(struct mem_cgroup *, int_active_memcg);
  81EXPORT_PER_CPU_SYMBOL_GPL(int_active_memcg);
  82
  83/* Socket memory accounting disabled? */
  84static bool cgroup_memory_nosocket __ro_after_init;
  85
  86/* Kernel memory accounting disabled? */
  87bool cgroup_memory_nokmem __ro_after_init;
  88
  89/* Whether the swap controller is active */
  90#ifdef CONFIG_MEMCG_SWAP
  91bool cgroup_memory_noswap __ro_after_init;
  92#else
  93#define cgroup_memory_noswap		1
  94#endif
  95
  96#ifdef CONFIG_CGROUP_WRITEBACK
  97static DECLARE_WAIT_QUEUE_HEAD(memcg_cgwb_frn_waitq);
  98#endif
  99
 100/* Whether legacy memory+swap accounting is active */
 101static bool do_memsw_account(void)
 102{
 103	return !cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_noswap;
 104}
 105
 106#define THRESHOLDS_EVENTS_TARGET 128
 107#define SOFTLIMIT_EVENTS_TARGET 1024
 108
 109/*
 110 * Cgroups above their limits are maintained in a RB-Tree, independent of
 111 * their hierarchy representation
 112 */
 113
 114struct mem_cgroup_tree_per_node {
 115	struct rb_root rb_root;
 116	struct rb_node *rb_rightmost;
 117	spinlock_t lock;
 118};
 119
 120struct mem_cgroup_tree {
 121	struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
 122};
 123
 124static struct mem_cgroup_tree soft_limit_tree __read_mostly;
 125
 126/* for OOM */
 127struct mem_cgroup_eventfd_list {
 128	struct list_head list;
 129	struct eventfd_ctx *eventfd;
 130};
 131
 132/*
 133 * cgroup_event represents events which userspace want to receive.
 134 */
 135struct mem_cgroup_event {
 136	/*
 137	 * memcg which the event belongs to.
 138	 */
 139	struct mem_cgroup *memcg;
 140	/*
 141	 * eventfd to signal userspace about the event.
 142	 */
 143	struct eventfd_ctx *eventfd;
 144	/*
 145	 * Each of these stored in a list by the cgroup.
 146	 */
 147	struct list_head list;
 148	/*
 149	 * register_event() callback will be used to add new userspace
 150	 * waiter for changes related to this event.  Use eventfd_signal()
 151	 * on eventfd to send notification to userspace.
 152	 */
 153	int (*register_event)(struct mem_cgroup *memcg,
 154			      struct eventfd_ctx *eventfd, const char *args);
 155	/*
 156	 * unregister_event() callback will be called when userspace closes
 157	 * the eventfd or on cgroup removing.  This callback must be set,
 158	 * if you want provide notification functionality.
 159	 */
 160	void (*unregister_event)(struct mem_cgroup *memcg,
 161				 struct eventfd_ctx *eventfd);
 162	/*
 163	 * All fields below needed to unregister event when
 164	 * userspace closes eventfd.
 165	 */
 166	poll_table pt;
 167	wait_queue_head_t *wqh;
 168	wait_queue_entry_t wait;
 169	struct work_struct remove;
 170};
 171
 172static void mem_cgroup_threshold(struct mem_cgroup *memcg);
 173static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
 174
 175/* Stuffs for move charges at task migration. */
 176/*
 177 * Types of charges to be moved.
 178 */
 179#define MOVE_ANON	0x1U
 180#define MOVE_FILE	0x2U
 181#define MOVE_MASK	(MOVE_ANON | MOVE_FILE)
 182
 183/* "mc" and its members are protected by cgroup_mutex */
 184static struct move_charge_struct {
 185	spinlock_t	  lock; /* for from, to */
 186	struct mm_struct  *mm;
 187	struct mem_cgroup *from;
 188	struct mem_cgroup *to;
 189	unsigned long flags;
 190	unsigned long precharge;
 191	unsigned long moved_charge;
 192	unsigned long moved_swap;
 193	struct task_struct *moving_task;	/* a task moving charges */
 194	wait_queue_head_t waitq;		/* a waitq for other context */
 195} mc = {
 196	.lock = __SPIN_LOCK_UNLOCKED(mc.lock),
 197	.waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
 198};
 199
 200/*
 201 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
 202 * limit reclaim to prevent infinite loops, if they ever occur.
 203 */
 204#define	MEM_CGROUP_MAX_RECLAIM_LOOPS		100
 205#define	MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS	2
 206
 207/* for encoding cft->private value on file */
 208enum res_type {
 209	_MEM,
 210	_MEMSWAP,
 211	_OOM_TYPE,
 212	_KMEM,
 213	_TCP,
 214};
 215
 216#define MEMFILE_PRIVATE(x, val)	((x) << 16 | (val))
 217#define MEMFILE_TYPE(val)	((val) >> 16 & 0xffff)
 218#define MEMFILE_ATTR(val)	((val) & 0xffff)
 219/* Used for OOM notifier */
 220#define OOM_CONTROL		(0)
 221
 222/*
 223 * Iteration constructs for visiting all cgroups (under a tree).  If
 224 * loops are exited prematurely (break), mem_cgroup_iter_break() must
 225 * be used for reference counting.
 226 */
 227#define for_each_mem_cgroup_tree(iter, root)		\
 228	for (iter = mem_cgroup_iter(root, NULL, NULL);	\
 229	     iter != NULL;				\
 230	     iter = mem_cgroup_iter(root, iter, NULL))
 231
 232#define for_each_mem_cgroup(iter)			\
 233	for (iter = mem_cgroup_iter(NULL, NULL, NULL);	\
 234	     iter != NULL;				\
 235	     iter = mem_cgroup_iter(NULL, iter, NULL))
 236
 237static inline bool should_force_charge(void)
 238{
 239	return tsk_is_oom_victim(current) || fatal_signal_pending(current) ||
 240		(current->flags & PF_EXITING);
 241}
 242
 243/* Some nice accessors for the vmpressure. */
 244struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
 245{
 246	if (!memcg)
 247		memcg = root_mem_cgroup;
 248	return &memcg->vmpressure;
 249}
 250
 251struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr)
 252{
 253	return &container_of(vmpr, struct mem_cgroup, vmpressure)->css;
 254}
 255
 
 
 
 256#ifdef CONFIG_MEMCG_KMEM
 257extern spinlock_t css_set_lock;
 258
 259bool mem_cgroup_kmem_disabled(void)
 260{
 261	return cgroup_memory_nokmem;
 262}
 263
 264static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg,
 265				      unsigned int nr_pages);
 266
 267static void obj_cgroup_release(struct percpu_ref *ref)
 268{
 269	struct obj_cgroup *objcg = container_of(ref, struct obj_cgroup, refcnt);
 270	unsigned int nr_bytes;
 271	unsigned int nr_pages;
 272	unsigned long flags;
 273
 274	/*
 275	 * At this point all allocated objects are freed, and
 276	 * objcg->nr_charged_bytes can't have an arbitrary byte value.
 277	 * However, it can be PAGE_SIZE or (x * PAGE_SIZE).
 278	 *
 279	 * The following sequence can lead to it:
 280	 * 1) CPU0: objcg == stock->cached_objcg
 281	 * 2) CPU1: we do a small allocation (e.g. 92 bytes),
 282	 *          PAGE_SIZE bytes are charged
 283	 * 3) CPU1: a process from another memcg is allocating something,
 284	 *          the stock if flushed,
 285	 *          objcg->nr_charged_bytes = PAGE_SIZE - 92
 286	 * 5) CPU0: we do release this object,
 287	 *          92 bytes are added to stock->nr_bytes
 288	 * 6) CPU0: stock is flushed,
 289	 *          92 bytes are added to objcg->nr_charged_bytes
 290	 *
 291	 * In the result, nr_charged_bytes == PAGE_SIZE.
 292	 * This page will be uncharged in obj_cgroup_release().
 293	 */
 294	nr_bytes = atomic_read(&objcg->nr_charged_bytes);
 295	WARN_ON_ONCE(nr_bytes & (PAGE_SIZE - 1));
 296	nr_pages = nr_bytes >> PAGE_SHIFT;
 297
 298	if (nr_pages)
 299		obj_cgroup_uncharge_pages(objcg, nr_pages);
 300
 301	spin_lock_irqsave(&css_set_lock, flags);
 302	list_del(&objcg->list);
 303	spin_unlock_irqrestore(&css_set_lock, flags);
 304
 305	percpu_ref_exit(ref);
 306	kfree_rcu(objcg, rcu);
 307}
 308
 309static struct obj_cgroup *obj_cgroup_alloc(void)
 310{
 311	struct obj_cgroup *objcg;
 312	int ret;
 313
 314	objcg = kzalloc(sizeof(struct obj_cgroup), GFP_KERNEL);
 315	if (!objcg)
 316		return NULL;
 317
 318	ret = percpu_ref_init(&objcg->refcnt, obj_cgroup_release, 0,
 319			      GFP_KERNEL);
 320	if (ret) {
 321		kfree(objcg);
 322		return NULL;
 323	}
 324	INIT_LIST_HEAD(&objcg->list);
 325	return objcg;
 326}
 327
 328static void memcg_reparent_objcgs(struct mem_cgroup *memcg,
 329				  struct mem_cgroup *parent)
 330{
 331	struct obj_cgroup *objcg, *iter;
 332
 333	objcg = rcu_replace_pointer(memcg->objcg, NULL, true);
 334
 335	spin_lock_irq(&css_set_lock);
 336
 337	/* 1) Ready to reparent active objcg. */
 338	list_add(&objcg->list, &memcg->objcg_list);
 339	/* 2) Reparent active objcg and already reparented objcgs to parent. */
 340	list_for_each_entry(iter, &memcg->objcg_list, list)
 341		WRITE_ONCE(iter->memcg, parent);
 342	/* 3) Move already reparented objcgs to the parent's list */
 343	list_splice(&memcg->objcg_list, &parent->objcg_list);
 344
 345	spin_unlock_irq(&css_set_lock);
 346
 347	percpu_ref_kill(&objcg->refcnt);
 348}
 349
 350/*
 351 * This will be used as a shrinker list's index.
 352 * The main reason for not using cgroup id for this:
 353 *  this works better in sparse environments, where we have a lot of memcgs,
 354 *  but only a few kmem-limited. Or also, if we have, for instance, 200
 355 *  memcgs, and none but the 200th is kmem-limited, we'd have to have a
 356 *  200 entry array for that.
 357 *
 358 * The current size of the caches array is stored in memcg_nr_cache_ids. It
 359 * will double each time we have to increase it.
 360 */
 361static DEFINE_IDA(memcg_cache_ida);
 362int memcg_nr_cache_ids;
 363
 364/* Protects memcg_nr_cache_ids */
 365static DECLARE_RWSEM(memcg_cache_ids_sem);
 366
 367void memcg_get_cache_ids(void)
 368{
 369	down_read(&memcg_cache_ids_sem);
 370}
 371
 372void memcg_put_cache_ids(void)
 373{
 374	up_read(&memcg_cache_ids_sem);
 375}
 376
 377/*
 378 * MIN_SIZE is different than 1, because we would like to avoid going through
 379 * the alloc/free process all the time. In a small machine, 4 kmem-limited
 380 * cgroups is a reasonable guess. In the future, it could be a parameter or
 381 * tunable, but that is strictly not necessary.
 382 *
 383 * MAX_SIZE should be as large as the number of cgrp_ids. Ideally, we could get
 384 * this constant directly from cgroup, but it is understandable that this is
 385 * better kept as an internal representation in cgroup.c. In any case, the
 386 * cgrp_id space is not getting any smaller, and we don't have to necessarily
 387 * increase ours as well if it increases.
 388 */
 389#define MEMCG_CACHES_MIN_SIZE 4
 390#define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX
 391
 392/*
 393 * A lot of the calls to the cache allocation functions are expected to be
 394 * inlined by the compiler. Since the calls to memcg_slab_pre_alloc_hook() are
 395 * conditional to this static branch, we'll have to allow modules that does
 396 * kmem_cache_alloc and the such to see this symbol as well
 397 */
 398DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key);
 399EXPORT_SYMBOL(memcg_kmem_enabled_key);
 
 
 
 400#endif
 401
 402/**
 403 * mem_cgroup_css_from_page - css of the memcg associated with a page
 404 * @page: page of interest
 405 *
 406 * If memcg is bound to the default hierarchy, css of the memcg associated
 407 * with @page is returned.  The returned css remains associated with @page
 408 * until it is released.
 409 *
 410 * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup
 411 * is returned.
 412 */
 413struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page)
 414{
 415	struct mem_cgroup *memcg;
 416
 417	memcg = page_memcg(page);
 418
 419	if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
 420		memcg = root_mem_cgroup;
 421
 422	return &memcg->css;
 423}
 424
 425/**
 426 * page_cgroup_ino - return inode number of the memcg a page is charged to
 427 * @page: the page
 428 *
 429 * Look up the closest online ancestor of the memory cgroup @page is charged to
 430 * and return its inode number or 0 if @page is not charged to any cgroup. It
 431 * is safe to call this function without holding a reference to @page.
 432 *
 433 * Note, this function is inherently racy, because there is nothing to prevent
 434 * the cgroup inode from getting torn down and potentially reallocated a moment
 435 * after page_cgroup_ino() returns, so it only should be used by callers that
 436 * do not care (such as procfs interfaces).
 437 */
 438ino_t page_cgroup_ino(struct page *page)
 439{
 440	struct mem_cgroup *memcg;
 441	unsigned long ino = 0;
 442
 443	rcu_read_lock();
 444	memcg = page_memcg_check(page);
 
 445
 446	while (memcg && !(memcg->css.flags & CSS_ONLINE))
 447		memcg = parent_mem_cgroup(memcg);
 448	if (memcg)
 449		ino = cgroup_ino(memcg->css.cgroup);
 450	rcu_read_unlock();
 451	return ino;
 452}
 453
 454static struct mem_cgroup_per_node *
 455mem_cgroup_page_nodeinfo(struct mem_cgroup *memcg, struct page *page)
 456{
 457	int nid = page_to_nid(page);
 458
 459	return memcg->nodeinfo[nid];
 460}
 461
 462static struct mem_cgroup_tree_per_node *
 463soft_limit_tree_node(int nid)
 464{
 465	return soft_limit_tree.rb_tree_per_node[nid];
 466}
 467
 468static struct mem_cgroup_tree_per_node *
 469soft_limit_tree_from_page(struct page *page)
 470{
 471	int nid = page_to_nid(page);
 472
 473	return soft_limit_tree.rb_tree_per_node[nid];
 474}
 475
 476static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_node *mz,
 477					 struct mem_cgroup_tree_per_node *mctz,
 478					 unsigned long new_usage_in_excess)
 479{
 480	struct rb_node **p = &mctz->rb_root.rb_node;
 481	struct rb_node *parent = NULL;
 482	struct mem_cgroup_per_node *mz_node;
 483	bool rightmost = true;
 484
 485	if (mz->on_tree)
 486		return;
 487
 488	mz->usage_in_excess = new_usage_in_excess;
 489	if (!mz->usage_in_excess)
 490		return;
 491	while (*p) {
 492		parent = *p;
 493		mz_node = rb_entry(parent, struct mem_cgroup_per_node,
 494					tree_node);
 495		if (mz->usage_in_excess < mz_node->usage_in_excess) {
 496			p = &(*p)->rb_left;
 497			rightmost = false;
 498		} else {
 499			p = &(*p)->rb_right;
 500		}
 501	}
 502
 503	if (rightmost)
 504		mctz->rb_rightmost = &mz->tree_node;
 505
 506	rb_link_node(&mz->tree_node, parent, p);
 507	rb_insert_color(&mz->tree_node, &mctz->rb_root);
 508	mz->on_tree = true;
 509}
 510
 511static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
 512					 struct mem_cgroup_tree_per_node *mctz)
 513{
 514	if (!mz->on_tree)
 515		return;
 516
 517	if (&mz->tree_node == mctz->rb_rightmost)
 518		mctz->rb_rightmost = rb_prev(&mz->tree_node);
 519
 520	rb_erase(&mz->tree_node, &mctz->rb_root);
 521	mz->on_tree = false;
 522}
 523
 524static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
 525				       struct mem_cgroup_tree_per_node *mctz)
 526{
 527	unsigned long flags;
 528
 529	spin_lock_irqsave(&mctz->lock, flags);
 530	__mem_cgroup_remove_exceeded(mz, mctz);
 531	spin_unlock_irqrestore(&mctz->lock, flags);
 532}
 533
 534static unsigned long soft_limit_excess(struct mem_cgroup *memcg)
 535{
 536	unsigned long nr_pages = page_counter_read(&memcg->memory);
 537	unsigned long soft_limit = READ_ONCE(memcg->soft_limit);
 538	unsigned long excess = 0;
 539
 540	if (nr_pages > soft_limit)
 541		excess = nr_pages - soft_limit;
 542
 543	return excess;
 544}
 545
 546static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
 547{
 548	unsigned long excess;
 549	struct mem_cgroup_per_node *mz;
 550	struct mem_cgroup_tree_per_node *mctz;
 551
 552	mctz = soft_limit_tree_from_page(page);
 
 
 
 
 
 
 553	if (!mctz)
 554		return;
 555	/*
 556	 * Necessary to update all ancestors when hierarchy is used.
 557	 * because their event counter is not touched.
 558	 */
 559	for (; memcg; memcg = parent_mem_cgroup(memcg)) {
 560		mz = mem_cgroup_page_nodeinfo(memcg, page);
 561		excess = soft_limit_excess(memcg);
 562		/*
 563		 * We have to update the tree if mz is on RB-tree or
 564		 * mem is over its softlimit.
 565		 */
 566		if (excess || mz->on_tree) {
 567			unsigned long flags;
 568
 569			spin_lock_irqsave(&mctz->lock, flags);
 570			/* if on-tree, remove it */
 571			if (mz->on_tree)
 572				__mem_cgroup_remove_exceeded(mz, mctz);
 573			/*
 574			 * Insert again. mz->usage_in_excess will be updated.
 575			 * If excess is 0, no tree ops.
 576			 */
 577			__mem_cgroup_insert_exceeded(mz, mctz, excess);
 578			spin_unlock_irqrestore(&mctz->lock, flags);
 579		}
 580	}
 581}
 582
 583static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
 584{
 585	struct mem_cgroup_tree_per_node *mctz;
 586	struct mem_cgroup_per_node *mz;
 587	int nid;
 588
 589	for_each_node(nid) {
 590		mz = memcg->nodeinfo[nid];
 591		mctz = soft_limit_tree_node(nid);
 592		if (mctz)
 593			mem_cgroup_remove_exceeded(mz, mctz);
 594	}
 595}
 596
 597static struct mem_cgroup_per_node *
 598__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
 599{
 600	struct mem_cgroup_per_node *mz;
 601
 602retry:
 603	mz = NULL;
 604	if (!mctz->rb_rightmost)
 605		goto done;		/* Nothing to reclaim from */
 606
 607	mz = rb_entry(mctz->rb_rightmost,
 608		      struct mem_cgroup_per_node, tree_node);
 609	/*
 610	 * Remove the node now but someone else can add it back,
 611	 * we will to add it back at the end of reclaim to its correct
 612	 * position in the tree.
 613	 */
 614	__mem_cgroup_remove_exceeded(mz, mctz);
 615	if (!soft_limit_excess(mz->memcg) ||
 616	    !css_tryget(&mz->memcg->css))
 617		goto retry;
 618done:
 619	return mz;
 620}
 621
 622static struct mem_cgroup_per_node *
 623mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
 624{
 625	struct mem_cgroup_per_node *mz;
 626
 627	spin_lock_irq(&mctz->lock);
 628	mz = __mem_cgroup_largest_soft_limit_node(mctz);
 629	spin_unlock_irq(&mctz->lock);
 630	return mz;
 631}
 632
 633/**
 634 * __mod_memcg_state - update cgroup memory statistics
 635 * @memcg: the memory cgroup
 636 * @idx: the stat item - can be enum memcg_stat_item or enum node_stat_item
 637 * @val: delta to add to the counter, can be negative
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 638 */
 639void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 640{
 641	if (mem_cgroup_disabled())
 642		return;
 643
 644	__this_cpu_add(memcg->vmstats_percpu->state[idx], val);
 645	cgroup_rstat_updated(memcg->css.cgroup, smp_processor_id());
 
 
 
 646}
 647
 648/* idx can be of type enum memcg_stat_item or node_stat_item. */
 649static unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 650{
 651	long x = READ_ONCE(memcg->vmstats.state[idx]);
 652#ifdef CONFIG_SMP
 653	if (x < 0)
 654		x = 0;
 655#endif
 656	return x;
 657}
 658
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 659/* idx can be of type enum memcg_stat_item or node_stat_item. */
 660static unsigned long memcg_page_state_local(struct mem_cgroup *memcg, int idx)
 661{
 662	long x = 0;
 663	int cpu;
 664
 665	for_each_possible_cpu(cpu)
 666		x += per_cpu(memcg->vmstats_percpu->state[idx], cpu);
 667#ifdef CONFIG_SMP
 668	if (x < 0)
 669		x = 0;
 670#endif
 671	return x;
 672}
 673
 674static struct mem_cgroup_per_node *
 675parent_nodeinfo(struct mem_cgroup_per_node *pn, int nid)
 676{
 677	struct mem_cgroup *parent;
 678
 679	parent = parent_mem_cgroup(pn->memcg);
 680	if (!parent)
 681		return NULL;
 682	return parent->nodeinfo[nid];
 683}
 684
 685void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
 686			      int val)
 687{
 688	struct mem_cgroup_per_node *pn;
 689	struct mem_cgroup *memcg;
 690	long x, threshold = MEMCG_CHARGE_BATCH;
 691
 692	pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
 693	memcg = pn->memcg;
 694
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 695	/* Update memcg */
 696	__mod_memcg_state(memcg, idx, val);
 697
 698	/* Update lruvec */
 699	__this_cpu_add(pn->lruvec_stat_local->count[idx], val);
 700
 701	if (vmstat_item_in_bytes(idx))
 702		threshold <<= PAGE_SHIFT;
 703
 704	x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]);
 705	if (unlikely(abs(x) > threshold)) {
 706		pg_data_t *pgdat = lruvec_pgdat(lruvec);
 707		struct mem_cgroup_per_node *pi;
 708
 709		for (pi = pn; pi; pi = parent_nodeinfo(pi, pgdat->node_id))
 710			atomic_long_add(x, &pi->lruvec_stat[idx]);
 711		x = 0;
 712	}
 713	__this_cpu_write(pn->lruvec_stat_cpu->count[idx], x);
 714}
 715
 716/**
 717 * __mod_lruvec_state - update lruvec memory statistics
 718 * @lruvec: the lruvec
 719 * @idx: the stat item
 720 * @val: delta to add to the counter, can be negative
 721 *
 722 * The lruvec is the intersection of the NUMA node and a cgroup. This
 723 * function updates the all three counters that are affected by a
 724 * change of state at this level: per-node, per-cgroup, per-lruvec.
 725 */
 726void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
 727			int val)
 728{
 729	/* Update node */
 730	__mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
 731
 732	/* Update memcg and lruvec */
 733	if (!mem_cgroup_disabled())
 734		__mod_memcg_lruvec_state(lruvec, idx, val);
 735}
 736
 737void __mod_lruvec_page_state(struct page *page, enum node_stat_item idx,
 738			     int val)
 739{
 740	struct page *head = compound_head(page); /* rmap on tail pages */
 741	struct mem_cgroup *memcg;
 742	pg_data_t *pgdat = page_pgdat(page);
 743	struct lruvec *lruvec;
 744
 745	rcu_read_lock();
 746	memcg = page_memcg(head);
 747	/* Untracked pages have no memcg, no lruvec. Update only the node */
 748	if (!memcg) {
 749		rcu_read_unlock();
 750		__mod_node_page_state(pgdat, idx, val);
 751		return;
 752	}
 753
 754	lruvec = mem_cgroup_lruvec(memcg, pgdat);
 755	__mod_lruvec_state(lruvec, idx, val);
 756	rcu_read_unlock();
 757}
 758EXPORT_SYMBOL(__mod_lruvec_page_state);
 759
 760void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val)
 761{
 762	pg_data_t *pgdat = page_pgdat(virt_to_page(p));
 763	struct mem_cgroup *memcg;
 764	struct lruvec *lruvec;
 765
 766	rcu_read_lock();
 767	memcg = mem_cgroup_from_obj(p);
 768
 769	/*
 770	 * Untracked pages have no memcg, no lruvec. Update only the
 771	 * node. If we reparent the slab objects to the root memcg,
 772	 * when we free the slab object, we need to update the per-memcg
 773	 * vmstats to keep it correct for the root memcg.
 774	 */
 775	if (!memcg) {
 776		__mod_node_page_state(pgdat, idx, val);
 777	} else {
 778		lruvec = mem_cgroup_lruvec(memcg, pgdat);
 779		__mod_lruvec_state(lruvec, idx, val);
 780	}
 781	rcu_read_unlock();
 782}
 783
 784/*
 785 * mod_objcg_mlstate() may be called with irq enabled, so
 786 * mod_memcg_lruvec_state() should be used.
 787 */
 788static inline void mod_objcg_mlstate(struct obj_cgroup *objcg,
 789				     struct pglist_data *pgdat,
 790				     enum node_stat_item idx, int nr)
 791{
 792	struct mem_cgroup *memcg;
 793	struct lruvec *lruvec;
 794
 795	rcu_read_lock();
 796	memcg = obj_cgroup_memcg(objcg);
 797	lruvec = mem_cgroup_lruvec(memcg, pgdat);
 798	mod_memcg_lruvec_state(lruvec, idx, nr);
 799	rcu_read_unlock();
 800}
 801
 802/**
 803 * __count_memcg_events - account VM events in a cgroup
 804 * @memcg: the memory cgroup
 805 * @idx: the event item
 806 * @count: the number of events that occurred
 807 */
 808void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
 809			  unsigned long count)
 810{
 811	if (mem_cgroup_disabled())
 
 
 812		return;
 813
 814	__this_cpu_add(memcg->vmstats_percpu->events[idx], count);
 815	cgroup_rstat_updated(memcg->css.cgroup, smp_processor_id());
 
 
 816}
 817
 818static unsigned long memcg_events(struct mem_cgroup *memcg, int event)
 819{
 820	return READ_ONCE(memcg->vmstats.events[event]);
 
 
 
 
 821}
 822
 823static unsigned long memcg_events_local(struct mem_cgroup *memcg, int event)
 824{
 825	long x = 0;
 826	int cpu;
 827
 828	for_each_possible_cpu(cpu)
 829		x += per_cpu(memcg->vmstats_percpu->events[event], cpu);
 830	return x;
 
 831}
 832
 833static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
 834					 struct page *page,
 835					 int nr_pages)
 836{
 837	/* pagein of a big page is an event. So, ignore page size */
 838	if (nr_pages > 0)
 839		__count_memcg_events(memcg, PGPGIN, 1);
 840	else {
 841		__count_memcg_events(memcg, PGPGOUT, 1);
 842		nr_pages = -nr_pages; /* for event */
 843	}
 844
 845	__this_cpu_add(memcg->vmstats_percpu->nr_page_events, nr_pages);
 846}
 847
 848static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
 849				       enum mem_cgroup_events_target target)
 850{
 851	unsigned long val, next;
 852
 853	val = __this_cpu_read(memcg->vmstats_percpu->nr_page_events);
 854	next = __this_cpu_read(memcg->vmstats_percpu->targets[target]);
 855	/* from time_after() in jiffies.h */
 856	if ((long)(next - val) < 0) {
 857		switch (target) {
 858		case MEM_CGROUP_TARGET_THRESH:
 859			next = val + THRESHOLDS_EVENTS_TARGET;
 860			break;
 861		case MEM_CGROUP_TARGET_SOFTLIMIT:
 862			next = val + SOFTLIMIT_EVENTS_TARGET;
 863			break;
 864		default:
 865			break;
 866		}
 867		__this_cpu_write(memcg->vmstats_percpu->targets[target], next);
 868		return true;
 869	}
 870	return false;
 871}
 872
 873/*
 874 * Check events in order.
 875 *
 876 */
 877static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
 878{
 
 
 
 879	/* threshold event is triggered in finer grain than soft limit */
 880	if (unlikely(mem_cgroup_event_ratelimit(memcg,
 881						MEM_CGROUP_TARGET_THRESH))) {
 882		bool do_softlimit;
 883
 884		do_softlimit = mem_cgroup_event_ratelimit(memcg,
 885						MEM_CGROUP_TARGET_SOFTLIMIT);
 886		mem_cgroup_threshold(memcg);
 887		if (unlikely(do_softlimit))
 888			mem_cgroup_update_tree(memcg, page);
 889	}
 890}
 891
 892struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
 893{
 894	/*
 895	 * mm_update_next_owner() may clear mm->owner to NULL
 896	 * if it races with swapoff, page migration, etc.
 897	 * So this can be called with p == NULL.
 898	 */
 899	if (unlikely(!p))
 900		return NULL;
 901
 902	return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
 903}
 904EXPORT_SYMBOL(mem_cgroup_from_task);
 905
 906static __always_inline struct mem_cgroup *active_memcg(void)
 907{
 908	if (in_interrupt())
 909		return this_cpu_read(int_active_memcg);
 910	else
 911		return current->active_memcg;
 912}
 913
 914/**
 915 * get_mem_cgroup_from_mm: Obtain a reference on given mm_struct's memcg.
 916 * @mm: mm from which memcg should be extracted. It can be NULL.
 917 *
 918 * Obtain a reference on mm->memcg and returns it if successful. If mm
 919 * is NULL, then the memcg is chosen as follows:
 920 * 1) The active memcg, if set.
 921 * 2) current->mm->memcg, if available
 922 * 3) root memcg
 923 * If mem_cgroup is disabled, NULL is returned.
 924 */
 925struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
 926{
 927	struct mem_cgroup *memcg;
 928
 929	if (mem_cgroup_disabled())
 930		return NULL;
 931
 932	/*
 933	 * Page cache insertions can happen without an
 934	 * actual mm context, e.g. during disk probing
 935	 * on boot, loopback IO, acct() writes etc.
 936	 *
 937	 * No need to css_get on root memcg as the reference
 938	 * counting is disabled on the root level in the
 939	 * cgroup core. See CSS_NO_REF.
 940	 */
 941	if (unlikely(!mm)) {
 942		memcg = active_memcg();
 943		if (unlikely(memcg)) {
 944			/* remote memcg must hold a ref */
 945			css_get(&memcg->css);
 946			return memcg;
 947		}
 948		mm = current->mm;
 949		if (unlikely(!mm))
 950			return root_mem_cgroup;
 951	}
 952
 953	rcu_read_lock();
 954	do {
 955		memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
 956		if (unlikely(!memcg))
 957			memcg = root_mem_cgroup;
 958	} while (!css_tryget(&memcg->css));
 959	rcu_read_unlock();
 960	return memcg;
 961}
 962EXPORT_SYMBOL(get_mem_cgroup_from_mm);
 963
 964static __always_inline bool memcg_kmem_bypass(void)
 
 
 
 965{
 966	/* Allow remote memcg charging from any context. */
 967	if (unlikely(active_memcg()))
 968		return false;
 969
 970	/* Memcg to charge can't be determined. */
 971	if (in_interrupt() || !current->mm || (current->flags & PF_KTHREAD))
 972		return true;
 973
 974	return false;
 
 
 
 
 
 
 
 
 975}
 976
 977/**
 978 * mem_cgroup_iter - iterate over memory cgroup hierarchy
 979 * @root: hierarchy root
 980 * @prev: previously returned memcg, NULL on first invocation
 981 * @reclaim: cookie for shared reclaim walks, NULL for full walks
 982 *
 983 * Returns references to children of the hierarchy below @root, or
 984 * @root itself, or %NULL after a full round-trip.
 985 *
 986 * Caller must pass the return value in @prev on subsequent
 987 * invocations for reference counting, or use mem_cgroup_iter_break()
 988 * to cancel a hierarchy walk before the round-trip is complete.
 989 *
 990 * Reclaimers can specify a node in @reclaim to divide up the memcgs
 991 * in the hierarchy among all concurrent reclaimers operating on the
 992 * same node.
 993 */
 994struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
 995				   struct mem_cgroup *prev,
 996				   struct mem_cgroup_reclaim_cookie *reclaim)
 997{
 998	struct mem_cgroup_reclaim_iter *iter;
 999	struct cgroup_subsys_state *css = NULL;
1000	struct mem_cgroup *memcg = NULL;
1001	struct mem_cgroup *pos = NULL;
1002
1003	if (mem_cgroup_disabled())
1004		return NULL;
1005
1006	if (!root)
1007		root = root_mem_cgroup;
1008
1009	if (prev && !reclaim)
1010		pos = prev;
1011
1012	rcu_read_lock();
1013
1014	if (reclaim) {
1015		struct mem_cgroup_per_node *mz;
1016
1017		mz = root->nodeinfo[reclaim->pgdat->node_id];
1018		iter = &mz->iter;
1019
1020		if (prev && reclaim->generation != iter->generation)
 
 
 
 
 
 
1021			goto out_unlock;
1022
1023		while (1) {
1024			pos = READ_ONCE(iter->position);
1025			if (!pos || css_tryget(&pos->css))
1026				break;
1027			/*
1028			 * css reference reached zero, so iter->position will
1029			 * be cleared by ->css_released. However, we should not
1030			 * rely on this happening soon, because ->css_released
1031			 * is called from a work queue, and by busy-waiting we
1032			 * might block it. So we clear iter->position right
1033			 * away.
1034			 */
1035			(void)cmpxchg(&iter->position, pos, NULL);
1036		}
 
 
1037	}
1038
1039	if (pos)
1040		css = &pos->css;
1041
1042	for (;;) {
1043		css = css_next_descendant_pre(css, &root->css);
1044		if (!css) {
1045			/*
1046			 * Reclaimers share the hierarchy walk, and a
1047			 * new one might jump in right at the end of
1048			 * the hierarchy - make sure they see at least
1049			 * one group and restart from the beginning.
1050			 */
1051			if (!prev)
1052				continue;
1053			break;
1054		}
1055
1056		/*
1057		 * Verify the css and acquire a reference.  The root
1058		 * is provided by the caller, so we know it's alive
1059		 * and kicking, and don't take an extra reference.
1060		 */
1061		memcg = mem_cgroup_from_css(css);
1062
1063		if (css == &root->css)
1064			break;
1065
1066		if (css_tryget(css))
1067			break;
1068
1069		memcg = NULL;
1070	}
1071
1072	if (reclaim) {
1073		/*
1074		 * The position could have already been updated by a competing
1075		 * thread, so check that the value hasn't changed since we read
1076		 * it to avoid reclaiming from the same cgroup twice.
1077		 */
1078		(void)cmpxchg(&iter->position, pos, memcg);
1079
1080		if (pos)
1081			css_put(&pos->css);
1082
1083		if (!memcg)
1084			iter->generation++;
1085		else if (!prev)
1086			reclaim->generation = iter->generation;
1087	}
1088
1089out_unlock:
1090	rcu_read_unlock();
1091	if (prev && prev != root)
1092		css_put(&prev->css);
1093
1094	return memcg;
1095}
1096
1097/**
1098 * mem_cgroup_iter_break - abort a hierarchy walk prematurely
1099 * @root: hierarchy root
1100 * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
1101 */
1102void mem_cgroup_iter_break(struct mem_cgroup *root,
1103			   struct mem_cgroup *prev)
1104{
1105	if (!root)
1106		root = root_mem_cgroup;
1107	if (prev && prev != root)
1108		css_put(&prev->css);
1109}
1110
1111static void __invalidate_reclaim_iterators(struct mem_cgroup *from,
1112					struct mem_cgroup *dead_memcg)
1113{
1114	struct mem_cgroup_reclaim_iter *iter;
1115	struct mem_cgroup_per_node *mz;
1116	int nid;
1117
1118	for_each_node(nid) {
1119		mz = from->nodeinfo[nid];
1120		iter = &mz->iter;
1121		cmpxchg(&iter->position, dead_memcg, NULL);
1122	}
1123}
1124
1125static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
1126{
1127	struct mem_cgroup *memcg = dead_memcg;
1128	struct mem_cgroup *last;
1129
1130	do {
1131		__invalidate_reclaim_iterators(memcg, dead_memcg);
1132		last = memcg;
1133	} while ((memcg = parent_mem_cgroup(memcg)));
1134
1135	/*
1136	 * When cgruop1 non-hierarchy mode is used,
1137	 * parent_mem_cgroup() does not walk all the way up to the
1138	 * cgroup root (root_mem_cgroup). So we have to handle
1139	 * dead_memcg from cgroup root separately.
1140	 */
1141	if (last != root_mem_cgroup)
1142		__invalidate_reclaim_iterators(root_mem_cgroup,
1143						dead_memcg);
1144}
1145
1146/**
1147 * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
1148 * @memcg: hierarchy root
1149 * @fn: function to call for each task
1150 * @arg: argument passed to @fn
1151 *
1152 * This function iterates over tasks attached to @memcg or to any of its
1153 * descendants and calls @fn for each task. If @fn returns a non-zero
1154 * value, the function breaks the iteration loop and returns the value.
1155 * Otherwise, it will iterate over all tasks and return 0.
1156 *
1157 * This function must not be called for the root memory cgroup.
1158 */
1159int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
1160			  int (*fn)(struct task_struct *, void *), void *arg)
1161{
1162	struct mem_cgroup *iter;
1163	int ret = 0;
1164
1165	BUG_ON(memcg == root_mem_cgroup);
1166
1167	for_each_mem_cgroup_tree(iter, memcg) {
1168		struct css_task_iter it;
1169		struct task_struct *task;
1170
1171		css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it);
1172		while (!ret && (task = css_task_iter_next(&it)))
1173			ret = fn(task, arg);
1174		css_task_iter_end(&it);
1175		if (ret) {
1176			mem_cgroup_iter_break(memcg, iter);
1177			break;
1178		}
1179	}
1180	return ret;
1181}
1182
1183#ifdef CONFIG_DEBUG_VM
1184void lruvec_memcg_debug(struct lruvec *lruvec, struct page *page)
1185{
1186	struct mem_cgroup *memcg;
1187
1188	if (mem_cgroup_disabled())
1189		return;
1190
1191	memcg = page_memcg(page);
1192
1193	if (!memcg)
1194		VM_BUG_ON_PAGE(lruvec_memcg(lruvec) != root_mem_cgroup, page);
1195	else
1196		VM_BUG_ON_PAGE(lruvec_memcg(lruvec) != memcg, page);
1197}
1198#endif
1199
1200/**
1201 * lock_page_lruvec - lock and return lruvec for a given page.
1202 * @page: the page
1203 *
1204 * These functions are safe to use under any of the following conditions:
1205 * - page locked
1206 * - PageLRU cleared
1207 * - lock_page_memcg()
1208 * - page->_refcount is zero
 
 
1209 */
1210struct lruvec *lock_page_lruvec(struct page *page)
1211{
1212	struct lruvec *lruvec;
1213
1214	lruvec = mem_cgroup_page_lruvec(page);
1215	spin_lock(&lruvec->lru_lock);
1216
1217	lruvec_memcg_debug(lruvec, page);
1218
1219	return lruvec;
1220}
1221
1222struct lruvec *lock_page_lruvec_irq(struct page *page)
 
 
 
 
 
 
 
 
 
 
 
 
 
1223{
1224	struct lruvec *lruvec;
1225
1226	lruvec = mem_cgroup_page_lruvec(page);
1227	spin_lock_irq(&lruvec->lru_lock);
1228
1229	lruvec_memcg_debug(lruvec, page);
1230
1231	return lruvec;
1232}
1233
1234struct lruvec *lock_page_lruvec_irqsave(struct page *page, unsigned long *flags)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1235{
1236	struct lruvec *lruvec;
1237
1238	lruvec = mem_cgroup_page_lruvec(page);
1239	spin_lock_irqsave(&lruvec->lru_lock, *flags);
1240
1241	lruvec_memcg_debug(lruvec, page);
1242
1243	return lruvec;
1244}
1245
1246/**
1247 * mem_cgroup_update_lru_size - account for adding or removing an lru page
1248 * @lruvec: mem_cgroup per zone lru vector
1249 * @lru: index of lru list the page is sitting on
1250 * @zid: zone id of the accounted pages
1251 * @nr_pages: positive when adding or negative when removing
1252 *
1253 * This function must be called under lru_lock, just before a page is added
1254 * to or just after a page is removed from an lru list (that ordering being
1255 * so as to allow it to check that lru_size 0 is consistent with list_empty).
1256 */
1257void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
1258				int zid, int nr_pages)
1259{
1260	struct mem_cgroup_per_node *mz;
1261	unsigned long *lru_size;
1262	long size;
1263
1264	if (mem_cgroup_disabled())
1265		return;
1266
1267	mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
1268	lru_size = &mz->lru_zone_size[zid][lru];
1269
1270	if (nr_pages < 0)
1271		*lru_size += nr_pages;
1272
1273	size = *lru_size;
1274	if (WARN_ONCE(size < 0,
1275		"%s(%p, %d, %d): lru_size %ld\n",
1276		__func__, lruvec, lru, nr_pages, size)) {
1277		VM_BUG_ON(1);
1278		*lru_size = 0;
1279	}
1280
1281	if (nr_pages > 0)
1282		*lru_size += nr_pages;
1283}
1284
1285/**
1286 * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1287 * @memcg: the memory cgroup
1288 *
1289 * Returns the maximum amount of memory @mem can be charged with, in
1290 * pages.
1291 */
1292static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
1293{
1294	unsigned long margin = 0;
1295	unsigned long count;
1296	unsigned long limit;
1297
1298	count = page_counter_read(&memcg->memory);
1299	limit = READ_ONCE(memcg->memory.max);
1300	if (count < limit)
1301		margin = limit - count;
1302
1303	if (do_memsw_account()) {
1304		count = page_counter_read(&memcg->memsw);
1305		limit = READ_ONCE(memcg->memsw.max);
1306		if (count < limit)
1307			margin = min(margin, limit - count);
1308		else
1309			margin = 0;
1310	}
1311
1312	return margin;
1313}
1314
1315/*
1316 * A routine for checking "mem" is under move_account() or not.
1317 *
1318 * Checking a cgroup is mc.from or mc.to or under hierarchy of
1319 * moving cgroups. This is for waiting at high-memory pressure
1320 * caused by "move".
1321 */
1322static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
1323{
1324	struct mem_cgroup *from;
1325	struct mem_cgroup *to;
1326	bool ret = false;
1327	/*
1328	 * Unlike task_move routines, we access mc.to, mc.from not under
1329	 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
1330	 */
1331	spin_lock(&mc.lock);
1332	from = mc.from;
1333	to = mc.to;
1334	if (!from)
1335		goto unlock;
1336
1337	ret = mem_cgroup_is_descendant(from, memcg) ||
1338		mem_cgroup_is_descendant(to, memcg);
1339unlock:
1340	spin_unlock(&mc.lock);
1341	return ret;
1342}
1343
1344static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
1345{
1346	if (mc.moving_task && current != mc.moving_task) {
1347		if (mem_cgroup_under_move(memcg)) {
1348			DEFINE_WAIT(wait);
1349			prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1350			/* moving charge context might have finished. */
1351			if (mc.moving_task)
1352				schedule();
1353			finish_wait(&mc.waitq, &wait);
1354			return true;
1355		}
1356	}
1357	return false;
1358}
1359
1360struct memory_stat {
1361	const char *name;
1362	unsigned int idx;
1363};
1364
1365static const struct memory_stat memory_stats[] = {
1366	{ "anon",			NR_ANON_MAPPED			},
1367	{ "file",			NR_FILE_PAGES			},
 
1368	{ "kernel_stack",		NR_KERNEL_STACK_KB		},
1369	{ "pagetables",			NR_PAGETABLE			},
 
1370	{ "percpu",			MEMCG_PERCPU_B			},
1371	{ "sock",			MEMCG_SOCK			},
 
1372	{ "shmem",			NR_SHMEM			},
 
 
 
 
1373	{ "file_mapped",		NR_FILE_MAPPED			},
1374	{ "file_dirty",			NR_FILE_DIRTY			},
1375	{ "file_writeback",		NR_WRITEBACK			},
1376#ifdef CONFIG_SWAP
1377	{ "swapcached",			NR_SWAPCACHE			},
1378#endif
1379#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1380	{ "anon_thp",			NR_ANON_THPS			},
1381	{ "file_thp",			NR_FILE_THPS			},
1382	{ "shmem_thp",			NR_SHMEM_THPS			},
1383#endif
1384	{ "inactive_anon",		NR_INACTIVE_ANON		},
1385	{ "active_anon",		NR_ACTIVE_ANON			},
1386	{ "inactive_file",		NR_INACTIVE_FILE		},
1387	{ "active_file",		NR_ACTIVE_FILE			},
1388	{ "unevictable",		NR_UNEVICTABLE			},
1389	{ "slab_reclaimable",		NR_SLAB_RECLAIMABLE_B		},
1390	{ "slab_unreclaimable",		NR_SLAB_UNRECLAIMABLE_B		},
1391
1392	/* The memory events */
1393	{ "workingset_refault_anon",	WORKINGSET_REFAULT_ANON		},
1394	{ "workingset_refault_file",	WORKINGSET_REFAULT_FILE		},
1395	{ "workingset_activate_anon",	WORKINGSET_ACTIVATE_ANON	},
1396	{ "workingset_activate_file",	WORKINGSET_ACTIVATE_FILE	},
1397	{ "workingset_restore_anon",	WORKINGSET_RESTORE_ANON		},
1398	{ "workingset_restore_file",	WORKINGSET_RESTORE_FILE		},
1399	{ "workingset_nodereclaim",	WORKINGSET_NODERECLAIM		},
1400};
1401
1402/* Translate stat items to the correct unit for memory.stat output */
1403static int memcg_page_state_unit(int item)
1404{
1405	switch (item) {
1406	case MEMCG_PERCPU_B:
 
1407	case NR_SLAB_RECLAIMABLE_B:
1408	case NR_SLAB_UNRECLAIMABLE_B:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1409	case WORKINGSET_REFAULT_ANON:
1410	case WORKINGSET_REFAULT_FILE:
1411	case WORKINGSET_ACTIVATE_ANON:
1412	case WORKINGSET_ACTIVATE_FILE:
1413	case WORKINGSET_RESTORE_ANON:
1414	case WORKINGSET_RESTORE_FILE:
1415	case WORKINGSET_NODERECLAIM:
1416		return 1;
1417	case NR_KERNEL_STACK_KB:
1418		return SZ_1K;
1419	default:
1420		return PAGE_SIZE;
1421	}
1422}
1423
1424static inline unsigned long memcg_page_state_output(struct mem_cgroup *memcg,
1425						    int item)
1426{
1427	return memcg_page_state(memcg, item) * memcg_page_state_unit(item);
 
1428}
1429
1430static char *memory_stat_format(struct mem_cgroup *memcg)
 
1431{
1432	struct seq_buf s;
1433	int i;
 
1434
1435	seq_buf_init(&s, kmalloc(PAGE_SIZE, GFP_KERNEL), PAGE_SIZE);
1436	if (!s.buffer)
1437		return NULL;
1438
1439	/*
1440	 * Provide statistics on the state of the memory subsystem as
1441	 * well as cumulative event counters that show past behavior.
1442	 *
1443	 * This list is ordered following a combination of these gradients:
1444	 * 1) generic big picture -> specifics and details
1445	 * 2) reflecting userspace activity -> reflecting kernel heuristics
1446	 *
1447	 * Current memory state:
1448	 */
1449	cgroup_rstat_flush(memcg->css.cgroup);
1450
1451	for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
1452		u64 size;
1453
1454		size = memcg_page_state_output(memcg, memory_stats[i].idx);
1455		seq_buf_printf(&s, "%s %llu\n", memory_stats[i].name, size);
1456
1457		if (unlikely(memory_stats[i].idx == NR_SLAB_UNRECLAIMABLE_B)) {
1458			size += memcg_page_state_output(memcg,
1459							NR_SLAB_RECLAIMABLE_B);
1460			seq_buf_printf(&s, "slab %llu\n", size);
1461		}
1462	}
1463
1464	/* Accumulated memory events */
1465
1466	seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGFAULT),
1467		       memcg_events(memcg, PGFAULT));
1468	seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGMAJFAULT),
1469		       memcg_events(memcg, PGMAJFAULT));
1470	seq_buf_printf(&s, "%s %lu\n",  vm_event_name(PGREFILL),
1471		       memcg_events(memcg, PGREFILL));
1472	seq_buf_printf(&s, "pgscan %lu\n",
1473		       memcg_events(memcg, PGSCAN_KSWAPD) +
1474		       memcg_events(memcg, PGSCAN_DIRECT));
1475	seq_buf_printf(&s, "pgsteal %lu\n",
 
1476		       memcg_events(memcg, PGSTEAL_KSWAPD) +
1477		       memcg_events(memcg, PGSTEAL_DIRECT));
1478	seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGACTIVATE),
1479		       memcg_events(memcg, PGACTIVATE));
1480	seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGDEACTIVATE),
1481		       memcg_events(memcg, PGDEACTIVATE));
1482	seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGLAZYFREE),
1483		       memcg_events(memcg, PGLAZYFREE));
1484	seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGLAZYFREED),
1485		       memcg_events(memcg, PGLAZYFREED));
1486
1487#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1488	seq_buf_printf(&s, "%s %lu\n", vm_event_name(THP_FAULT_ALLOC),
1489		       memcg_events(memcg, THP_FAULT_ALLOC));
1490	seq_buf_printf(&s, "%s %lu\n", vm_event_name(THP_COLLAPSE_ALLOC),
1491		       memcg_events(memcg, THP_COLLAPSE_ALLOC));
1492#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 
 
 
1493
1494	/* The above should easily fit into one page */
1495	WARN_ON_ONCE(seq_buf_has_overflowed(&s));
 
1496
1497	return s.buffer;
 
 
 
 
 
 
 
 
1498}
1499
1500#define K(x) ((x) << (PAGE_SHIFT-10))
1501/**
1502 * mem_cgroup_print_oom_context: Print OOM information relevant to
1503 * memory controller.
1504 * @memcg: The memory cgroup that went over limit
1505 * @p: Task that is going to be killed
1506 *
1507 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1508 * enabled
1509 */
1510void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
1511{
1512	rcu_read_lock();
1513
1514	if (memcg) {
1515		pr_cont(",oom_memcg=");
1516		pr_cont_cgroup_path(memcg->css.cgroup);
1517	} else
1518		pr_cont(",global_oom");
1519	if (p) {
1520		pr_cont(",task_memcg=");
1521		pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
1522	}
1523	rcu_read_unlock();
1524}
1525
1526/**
1527 * mem_cgroup_print_oom_meminfo: Print OOM memory information relevant to
1528 * memory controller.
1529 * @memcg: The memory cgroup that went over limit
1530 */
1531void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
1532{
1533	char *buf;
 
 
 
 
1534
1535	pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
1536		K((u64)page_counter_read(&memcg->memory)),
1537		K((u64)READ_ONCE(memcg->memory.max)), memcg->memory.failcnt);
1538	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
1539		pr_info("swap: usage %llukB, limit %llukB, failcnt %lu\n",
1540			K((u64)page_counter_read(&memcg->swap)),
1541			K((u64)READ_ONCE(memcg->swap.max)), memcg->swap.failcnt);
1542	else {
1543		pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
1544			K((u64)page_counter_read(&memcg->memsw)),
1545			K((u64)memcg->memsw.max), memcg->memsw.failcnt);
1546		pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
1547			K((u64)page_counter_read(&memcg->kmem)),
1548			K((u64)memcg->kmem.max), memcg->kmem.failcnt);
1549	}
1550
1551	pr_info("Memory cgroup stats for ");
1552	pr_cont_cgroup_path(memcg->css.cgroup);
1553	pr_cont(":");
1554	buf = memory_stat_format(memcg);
1555	if (!buf)
1556		return;
1557	pr_info("%s", buf);
1558	kfree(buf);
1559}
1560
1561/*
1562 * Return the memory (and swap, if configured) limit for a memcg.
1563 */
1564unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
1565{
1566	unsigned long max = READ_ONCE(memcg->memory.max);
1567
1568	if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
1569		if (mem_cgroup_swappiness(memcg))
1570			max += min(READ_ONCE(memcg->swap.max),
1571				   (unsigned long)total_swap_pages);
1572	} else { /* v1 */
1573		if (mem_cgroup_swappiness(memcg)) {
1574			/* Calculate swap excess capacity from memsw limit */
1575			unsigned long swap = READ_ONCE(memcg->memsw.max) - max;
1576
1577			max += min(swap, (unsigned long)total_swap_pages);
1578		}
 
 
 
 
1579	}
1580	return max;
1581}
1582
1583unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
1584{
1585	return page_counter_read(&memcg->memory);
1586}
1587
1588static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
1589				     int order)
1590{
1591	struct oom_control oc = {
1592		.zonelist = NULL,
1593		.nodemask = NULL,
1594		.memcg = memcg,
1595		.gfp_mask = gfp_mask,
1596		.order = order,
1597	};
1598	bool ret = true;
1599
1600	if (mutex_lock_killable(&oom_lock))
1601		return true;
1602
1603	if (mem_cgroup_margin(memcg) >= (1 << order))
1604		goto unlock;
1605
1606	/*
1607	 * A few threads which were not waiting at mutex_lock_killable() can
1608	 * fail to bail out. Therefore, check again after holding oom_lock.
1609	 */
1610	ret = should_force_charge() || out_of_memory(&oc);
1611
1612unlock:
1613	mutex_unlock(&oom_lock);
1614	return ret;
1615}
1616
1617static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
1618				   pg_data_t *pgdat,
1619				   gfp_t gfp_mask,
1620				   unsigned long *total_scanned)
1621{
1622	struct mem_cgroup *victim = NULL;
1623	int total = 0;
1624	int loop = 0;
1625	unsigned long excess;
1626	unsigned long nr_scanned;
1627	struct mem_cgroup_reclaim_cookie reclaim = {
1628		.pgdat = pgdat,
1629	};
1630
1631	excess = soft_limit_excess(root_memcg);
1632
1633	while (1) {
1634		victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
1635		if (!victim) {
1636			loop++;
1637			if (loop >= 2) {
1638				/*
1639				 * If we have not been able to reclaim
1640				 * anything, it might because there are
1641				 * no reclaimable pages under this hierarchy
1642				 */
1643				if (!total)
1644					break;
1645				/*
1646				 * We want to do more targeted reclaim.
1647				 * excess >> 2 is not to excessive so as to
1648				 * reclaim too much, nor too less that we keep
1649				 * coming back to reclaim from this cgroup
1650				 */
1651				if (total >= (excess >> 2) ||
1652					(loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
1653					break;
1654			}
1655			continue;
1656		}
1657		total += mem_cgroup_shrink_node(victim, gfp_mask, false,
1658					pgdat, &nr_scanned);
1659		*total_scanned += nr_scanned;
1660		if (!soft_limit_excess(root_memcg))
1661			break;
1662	}
1663	mem_cgroup_iter_break(root_memcg, victim);
1664	return total;
1665}
1666
1667#ifdef CONFIG_LOCKDEP
1668static struct lockdep_map memcg_oom_lock_dep_map = {
1669	.name = "memcg_oom_lock",
1670};
1671#endif
1672
1673static DEFINE_SPINLOCK(memcg_oom_lock);
1674
1675/*
1676 * Check OOM-Killer is already running under our hierarchy.
1677 * If someone is running, return false.
1678 */
1679static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
1680{
1681	struct mem_cgroup *iter, *failed = NULL;
1682
1683	spin_lock(&memcg_oom_lock);
1684
1685	for_each_mem_cgroup_tree(iter, memcg) {
1686		if (iter->oom_lock) {
1687			/*
1688			 * this subtree of our hierarchy is already locked
1689			 * so we cannot give a lock.
1690			 */
1691			failed = iter;
1692			mem_cgroup_iter_break(memcg, iter);
1693			break;
1694		} else
1695			iter->oom_lock = true;
1696	}
1697
1698	if (failed) {
1699		/*
1700		 * OK, we failed to lock the whole subtree so we have
1701		 * to clean up what we set up to the failing subtree
1702		 */
1703		for_each_mem_cgroup_tree(iter, memcg) {
1704			if (iter == failed) {
1705				mem_cgroup_iter_break(memcg, iter);
1706				break;
1707			}
1708			iter->oom_lock = false;
1709		}
1710	} else
1711		mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_);
1712
1713	spin_unlock(&memcg_oom_lock);
1714
1715	return !failed;
1716}
1717
1718static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
1719{
1720	struct mem_cgroup *iter;
1721
1722	spin_lock(&memcg_oom_lock);
1723	mutex_release(&memcg_oom_lock_dep_map, _RET_IP_);
1724	for_each_mem_cgroup_tree(iter, memcg)
1725		iter->oom_lock = false;
1726	spin_unlock(&memcg_oom_lock);
1727}
1728
1729static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
1730{
1731	struct mem_cgroup *iter;
1732
1733	spin_lock(&memcg_oom_lock);
1734	for_each_mem_cgroup_tree(iter, memcg)
1735		iter->under_oom++;
1736	spin_unlock(&memcg_oom_lock);
1737}
1738
1739static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
1740{
1741	struct mem_cgroup *iter;
1742
1743	/*
1744	 * Be careful about under_oom underflows because a child memcg
1745	 * could have been added after mem_cgroup_mark_under_oom.
1746	 */
1747	spin_lock(&memcg_oom_lock);
1748	for_each_mem_cgroup_tree(iter, memcg)
1749		if (iter->under_oom > 0)
1750			iter->under_oom--;
1751	spin_unlock(&memcg_oom_lock);
1752}
1753
1754static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
1755
1756struct oom_wait_info {
1757	struct mem_cgroup *memcg;
1758	wait_queue_entry_t	wait;
1759};
1760
1761static int memcg_oom_wake_function(wait_queue_entry_t *wait,
1762	unsigned mode, int sync, void *arg)
1763{
1764	struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
1765	struct mem_cgroup *oom_wait_memcg;
1766	struct oom_wait_info *oom_wait_info;
1767
1768	oom_wait_info = container_of(wait, struct oom_wait_info, wait);
1769	oom_wait_memcg = oom_wait_info->memcg;
1770
1771	if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) &&
1772	    !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg))
1773		return 0;
1774	return autoremove_wake_function(wait, mode, sync, arg);
1775}
1776
1777static void memcg_oom_recover(struct mem_cgroup *memcg)
1778{
1779	/*
1780	 * For the following lockless ->under_oom test, the only required
1781	 * guarantee is that it must see the state asserted by an OOM when
1782	 * this function is called as a result of userland actions
1783	 * triggered by the notification of the OOM.  This is trivially
1784	 * achieved by invoking mem_cgroup_mark_under_oom() before
1785	 * triggering notification.
1786	 */
1787	if (memcg && memcg->under_oom)
1788		__wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
1789}
1790
1791enum oom_status {
1792	OOM_SUCCESS,
1793	OOM_FAILED,
1794	OOM_ASYNC,
1795	OOM_SKIPPED
1796};
1797
1798static enum oom_status mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
1799{
1800	enum oom_status ret;
1801	bool locked;
1802
1803	if (order > PAGE_ALLOC_COSTLY_ORDER)
1804		return OOM_SKIPPED;
1805
1806	memcg_memory_event(memcg, MEMCG_OOM);
1807
1808	/*
1809	 * We are in the middle of the charge context here, so we
1810	 * don't want to block when potentially sitting on a callstack
1811	 * that holds all kinds of filesystem and mm locks.
1812	 *
1813	 * cgroup1 allows disabling the OOM killer and waiting for outside
1814	 * handling until the charge can succeed; remember the context and put
1815	 * the task to sleep at the end of the page fault when all locks are
1816	 * released.
1817	 *
1818	 * On the other hand, in-kernel OOM killer allows for an async victim
1819	 * memory reclaim (oom_reaper) and that means that we are not solely
1820	 * relying on the oom victim to make a forward progress and we can
1821	 * invoke the oom killer here.
1822	 *
1823	 * Please note that mem_cgroup_out_of_memory might fail to find a
1824	 * victim and then we have to bail out from the charge path.
1825	 */
1826	if (memcg->oom_kill_disable) {
1827		if (!current->in_user_fault)
1828			return OOM_SKIPPED;
1829		css_get(&memcg->css);
1830		current->memcg_in_oom = memcg;
1831		current->memcg_oom_gfp_mask = mask;
1832		current->memcg_oom_order = order;
1833
1834		return OOM_ASYNC;
1835	}
1836
1837	mem_cgroup_mark_under_oom(memcg);
1838
1839	locked = mem_cgroup_oom_trylock(memcg);
1840
1841	if (locked)
1842		mem_cgroup_oom_notify(memcg);
1843
1844	mem_cgroup_unmark_under_oom(memcg);
1845	if (mem_cgroup_out_of_memory(memcg, mask, order))
1846		ret = OOM_SUCCESS;
1847	else
1848		ret = OOM_FAILED;
1849
1850	if (locked)
1851		mem_cgroup_oom_unlock(memcg);
1852
1853	return ret;
1854}
1855
1856/**
1857 * mem_cgroup_oom_synchronize - complete memcg OOM handling
1858 * @handle: actually kill/wait or just clean up the OOM state
1859 *
1860 * This has to be called at the end of a page fault if the memcg OOM
1861 * handler was enabled.
1862 *
1863 * Memcg supports userspace OOM handling where failed allocations must
1864 * sleep on a waitqueue until the userspace task resolves the
1865 * situation.  Sleeping directly in the charge context with all kinds
1866 * of locks held is not a good idea, instead we remember an OOM state
1867 * in the task and mem_cgroup_oom_synchronize() has to be called at
1868 * the end of the page fault to complete the OOM handling.
1869 *
1870 * Returns %true if an ongoing memcg OOM situation was detected and
1871 * completed, %false otherwise.
1872 */
1873bool mem_cgroup_oom_synchronize(bool handle)
1874{
1875	struct mem_cgroup *memcg = current->memcg_in_oom;
1876	struct oom_wait_info owait;
1877	bool locked;
1878
1879	/* OOM is global, do not handle */
1880	if (!memcg)
1881		return false;
1882
1883	if (!handle)
1884		goto cleanup;
1885
1886	owait.memcg = memcg;
1887	owait.wait.flags = 0;
1888	owait.wait.func = memcg_oom_wake_function;
1889	owait.wait.private = current;
1890	INIT_LIST_HEAD(&owait.wait.entry);
1891
1892	prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
1893	mem_cgroup_mark_under_oom(memcg);
1894
1895	locked = mem_cgroup_oom_trylock(memcg);
1896
1897	if (locked)
1898		mem_cgroup_oom_notify(memcg);
1899
1900	if (locked && !memcg->oom_kill_disable) {
1901		mem_cgroup_unmark_under_oom(memcg);
1902		finish_wait(&memcg_oom_waitq, &owait.wait);
1903		mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask,
1904					 current->memcg_oom_order);
1905	} else {
1906		schedule();
1907		mem_cgroup_unmark_under_oom(memcg);
1908		finish_wait(&memcg_oom_waitq, &owait.wait);
1909	}
1910
1911	if (locked) {
1912		mem_cgroup_oom_unlock(memcg);
1913		/*
1914		 * There is no guarantee that an OOM-lock contender
1915		 * sees the wakeups triggered by the OOM kill
1916		 * uncharges.  Wake any sleepers explicitly.
1917		 */
1918		memcg_oom_recover(memcg);
1919	}
1920cleanup:
1921	current->memcg_in_oom = NULL;
1922	css_put(&memcg->css);
1923	return true;
1924}
1925
1926/**
1927 * mem_cgroup_get_oom_group - get a memory cgroup to clean up after OOM
1928 * @victim: task to be killed by the OOM killer
1929 * @oom_domain: memcg in case of memcg OOM, NULL in case of system-wide OOM
1930 *
1931 * Returns a pointer to a memory cgroup, which has to be cleaned up
1932 * by killing all belonging OOM-killable tasks.
1933 *
1934 * Caller has to call mem_cgroup_put() on the returned non-NULL memcg.
1935 */
1936struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
1937					    struct mem_cgroup *oom_domain)
1938{
1939	struct mem_cgroup *oom_group = NULL;
1940	struct mem_cgroup *memcg;
1941
1942	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
1943		return NULL;
1944
1945	if (!oom_domain)
1946		oom_domain = root_mem_cgroup;
1947
1948	rcu_read_lock();
1949
1950	memcg = mem_cgroup_from_task(victim);
1951	if (memcg == root_mem_cgroup)
1952		goto out;
1953
1954	/*
1955	 * If the victim task has been asynchronously moved to a different
1956	 * memory cgroup, we might end up killing tasks outside oom_domain.
1957	 * In this case it's better to ignore memory.group.oom.
1958	 */
1959	if (unlikely(!mem_cgroup_is_descendant(memcg, oom_domain)))
1960		goto out;
1961
1962	/*
1963	 * Traverse the memory cgroup hierarchy from the victim task's
1964	 * cgroup up to the OOMing cgroup (or root) to find the
1965	 * highest-level memory cgroup with oom.group set.
1966	 */
1967	for (; memcg; memcg = parent_mem_cgroup(memcg)) {
1968		if (memcg->oom_group)
1969			oom_group = memcg;
1970
1971		if (memcg == oom_domain)
1972			break;
1973	}
1974
1975	if (oom_group)
1976		css_get(&oom_group->css);
1977out:
1978	rcu_read_unlock();
1979
1980	return oom_group;
1981}
1982
1983void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
1984{
1985	pr_info("Tasks in ");
1986	pr_cont_cgroup_path(memcg->css.cgroup);
1987	pr_cont(" are going to be killed due to memory.oom.group set\n");
1988}
1989
1990/**
1991 * lock_page_memcg - lock a page and memcg binding
1992 * @page: the page
1993 *
1994 * This function protects unlocked LRU pages from being moved to
1995 * another cgroup.
1996 *
1997 * It ensures lifetime of the locked memcg. Caller is responsible
1998 * for the lifetime of the page.
1999 */
2000void lock_page_memcg(struct page *page)
2001{
2002	struct page *head = compound_head(page); /* rmap on tail pages */
2003	struct mem_cgroup *memcg;
2004	unsigned long flags;
2005
2006	/*
2007	 * The RCU lock is held throughout the transaction.  The fast
2008	 * path can get away without acquiring the memcg->move_lock
2009	 * because page moving starts with an RCU grace period.
2010         */
2011	rcu_read_lock();
2012
2013	if (mem_cgroup_disabled())
2014		return;
2015again:
2016	memcg = page_memcg(head);
2017	if (unlikely(!memcg))
2018		return;
2019
2020#ifdef CONFIG_PROVE_LOCKING
2021	local_irq_save(flags);
2022	might_lock(&memcg->move_lock);
2023	local_irq_restore(flags);
2024#endif
2025
2026	if (atomic_read(&memcg->moving_account) <= 0)
2027		return;
2028
2029	spin_lock_irqsave(&memcg->move_lock, flags);
2030	if (memcg != page_memcg(head)) {
2031		spin_unlock_irqrestore(&memcg->move_lock, flags);
2032		goto again;
2033	}
2034
2035	/*
2036	 * When charge migration first begins, we can have multiple
2037	 * critical sections holding the fast-path RCU lock and one
2038	 * holding the slowpath move_lock. Track the task who has the
2039	 * move_lock for unlock_page_memcg().
2040	 */
2041	memcg->move_lock_task = current;
2042	memcg->move_lock_flags = flags;
2043}
2044EXPORT_SYMBOL(lock_page_memcg);
2045
2046static void __unlock_page_memcg(struct mem_cgroup *memcg)
2047{
2048	if (memcg && memcg->move_lock_task == current) {
2049		unsigned long flags = memcg->move_lock_flags;
2050
2051		memcg->move_lock_task = NULL;
2052		memcg->move_lock_flags = 0;
2053
2054		spin_unlock_irqrestore(&memcg->move_lock, flags);
2055	}
2056
2057	rcu_read_unlock();
2058}
2059
2060/**
2061 * unlock_page_memcg - unlock a page and memcg binding
2062 * @page: the page
 
 
 
 
2063 */
2064void unlock_page_memcg(struct page *page)
2065{
2066	struct page *head = compound_head(page);
2067
2068	__unlock_page_memcg(page_memcg(head));
2069}
2070EXPORT_SYMBOL(unlock_page_memcg);
2071
2072struct obj_stock {
 
 
 
 
2073#ifdef CONFIG_MEMCG_KMEM
2074	struct obj_cgroup *cached_objcg;
2075	struct pglist_data *cached_pgdat;
2076	unsigned int nr_bytes;
2077	int nr_slab_reclaimable_b;
2078	int nr_slab_unreclaimable_b;
2079#else
2080	int dummy[0];
2081#endif
2082};
2083
2084struct memcg_stock_pcp {
2085	struct mem_cgroup *cached; /* this never be root cgroup */
2086	unsigned int nr_pages;
2087	struct obj_stock task_obj;
2088	struct obj_stock irq_obj;
2089
2090	struct work_struct work;
2091	unsigned long flags;
2092#define FLUSHING_CACHED_CHARGE	0
2093};
2094static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
 
 
2095static DEFINE_MUTEX(percpu_charge_mutex);
2096
2097#ifdef CONFIG_MEMCG_KMEM
2098static void drain_obj_stock(struct obj_stock *stock);
2099static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
2100				     struct mem_cgroup *root_memcg);
 
2101
2102#else
2103static inline void drain_obj_stock(struct obj_stock *stock)
2104{
 
2105}
2106static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
2107				     struct mem_cgroup *root_memcg)
2108{
2109	return false;
2110}
2111#endif
2112
2113/*
2114 * Most kmem_cache_alloc() calls are from user context. The irq disable/enable
2115 * sequence used in this case to access content from object stock is slow.
2116 * To optimize for user context access, there are now two object stocks for
2117 * task context and interrupt context access respectively.
2118 *
2119 * The task context object stock can be accessed by disabling preemption only
2120 * which is cheap in non-preempt kernel. The interrupt context object stock
2121 * can only be accessed after disabling interrupt. User context code can
2122 * access interrupt object stock, but not vice versa.
2123 */
2124static inline struct obj_stock *get_obj_stock(unsigned long *pflags)
2125{
2126	struct memcg_stock_pcp *stock;
2127
2128	if (likely(in_task())) {
2129		*pflags = 0UL;
2130		preempt_disable();
2131		stock = this_cpu_ptr(&memcg_stock);
2132		return &stock->task_obj;
2133	}
2134
2135	local_irq_save(*pflags);
2136	stock = this_cpu_ptr(&memcg_stock);
2137	return &stock->irq_obj;
2138}
2139
2140static inline void put_obj_stock(unsigned long flags)
2141{
2142	if (likely(in_task()))
2143		preempt_enable();
2144	else
2145		local_irq_restore(flags);
2146}
 
2147
2148/**
2149 * consume_stock: Try to consume stocked charge on this cpu.
2150 * @memcg: memcg to consume from.
2151 * @nr_pages: how many pages to charge.
2152 *
2153 * The charges will only happen if @memcg matches the current cpu's memcg
2154 * stock, and at least @nr_pages are available in that stock.  Failure to
2155 * service an allocation will refill the stock.
2156 *
2157 * returns true if successful, false otherwise.
2158 */
2159static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2160{
2161	struct memcg_stock_pcp *stock;
2162	unsigned long flags;
2163	bool ret = false;
2164
2165	if (nr_pages > MEMCG_CHARGE_BATCH)
2166		return ret;
2167
2168	local_irq_save(flags);
2169
2170	stock = this_cpu_ptr(&memcg_stock);
2171	if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
2172		stock->nr_pages -= nr_pages;
2173		ret = true;
2174	}
2175
2176	local_irq_restore(flags);
2177
2178	return ret;
2179}
2180
2181/*
2182 * Returns stocks cached in percpu and reset cached information.
2183 */
2184static void drain_stock(struct memcg_stock_pcp *stock)
2185{
2186	struct mem_cgroup *old = stock->cached;
2187
2188	if (!old)
2189		return;
2190
2191	if (stock->nr_pages) {
2192		page_counter_uncharge(&old->memory, stock->nr_pages);
2193		if (do_memsw_account())
2194			page_counter_uncharge(&old->memsw, stock->nr_pages);
2195		stock->nr_pages = 0;
2196	}
2197
2198	css_put(&old->css);
2199	stock->cached = NULL;
2200}
2201
2202static void drain_local_stock(struct work_struct *dummy)
2203{
2204	struct memcg_stock_pcp *stock;
 
2205	unsigned long flags;
2206
2207	/*
2208	 * The only protection from memory hotplug vs. drain_stock races is
2209	 * that we always operate on local CPU stock here with IRQ disabled
 
2210	 */
2211	local_irq_save(flags);
2212
2213	stock = this_cpu_ptr(&memcg_stock);
2214	drain_obj_stock(&stock->irq_obj);
2215	if (in_task())
2216		drain_obj_stock(&stock->task_obj);
2217	drain_stock(stock);
2218	clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
2219
2220	local_irq_restore(flags);
 
 
2221}
2222
2223/*
2224 * Cache charges(val) to local per_cpu area.
2225 * This will be consumed by consume_stock() function, later.
2226 */
2227static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2228{
2229	struct memcg_stock_pcp *stock;
2230	unsigned long flags;
2231
2232	local_irq_save(flags);
2233
2234	stock = this_cpu_ptr(&memcg_stock);
2235	if (stock->cached != memcg) { /* reset if necessary */
2236		drain_stock(stock);
2237		css_get(&memcg->css);
2238		stock->cached = memcg;
2239	}
2240	stock->nr_pages += nr_pages;
2241
2242	if (stock->nr_pages > MEMCG_CHARGE_BATCH)
2243		drain_stock(stock);
 
2244
2245	local_irq_restore(flags);
 
 
 
 
 
 
2246}
2247
2248/*
2249 * Drains all per-CPU charge caches for given root_memcg resp. subtree
2250 * of the hierarchy under it.
2251 */
2252static void drain_all_stock(struct mem_cgroup *root_memcg)
2253{
2254	int cpu, curcpu;
2255
2256	/* If someone's already draining, avoid adding running more workers. */
2257	if (!mutex_trylock(&percpu_charge_mutex))
2258		return;
2259	/*
2260	 * Notify other cpus that system-wide "drain" is running
2261	 * We do not care about races with the cpu hotplug because cpu down
2262	 * as well as workers from this path always operate on the local
2263	 * per-cpu data. CPU up doesn't touch memcg_stock at all.
2264	 */
2265	curcpu = get_cpu();
 
2266	for_each_online_cpu(cpu) {
2267		struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
2268		struct mem_cgroup *memcg;
2269		bool flush = false;
2270
2271		rcu_read_lock();
2272		memcg = stock->cached;
2273		if (memcg && stock->nr_pages &&
2274		    mem_cgroup_is_descendant(memcg, root_memcg))
2275			flush = true;
2276		if (obj_stock_flush_required(stock, root_memcg))
2277			flush = true;
2278		rcu_read_unlock();
2279
2280		if (flush &&
2281		    !test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
2282			if (cpu == curcpu)
2283				drain_local_stock(&stock->work);
2284			else
2285				schedule_work_on(cpu, &stock->work);
2286		}
2287	}
2288	put_cpu();
2289	mutex_unlock(&percpu_charge_mutex);
2290}
2291
2292static void memcg_flush_lruvec_page_state(struct mem_cgroup *memcg, int cpu)
2293{
2294	int nid;
2295
2296	for_each_node(nid) {
2297		struct mem_cgroup_per_node *pn = memcg->nodeinfo[nid];
2298		unsigned long stat[NR_VM_NODE_STAT_ITEMS];
2299		struct batched_lruvec_stat *lstatc;
2300		int i;
2301
2302		lstatc = per_cpu_ptr(pn->lruvec_stat_cpu, cpu);
2303		for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
2304			stat[i] = lstatc->count[i];
2305			lstatc->count[i] = 0;
2306		}
2307
2308		do {
2309			for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
2310				atomic_long_add(stat[i], &pn->lruvec_stat[i]);
2311		} while ((pn = parent_nodeinfo(pn, nid)));
2312	}
2313}
2314
2315static int memcg_hotplug_cpu_dead(unsigned int cpu)
2316{
2317	struct memcg_stock_pcp *stock;
2318	struct mem_cgroup *memcg;
2319
2320	stock = &per_cpu(memcg_stock, cpu);
2321	drain_stock(stock);
2322
2323	for_each_mem_cgroup(memcg)
2324		memcg_flush_lruvec_page_state(memcg, cpu);
2325
2326	return 0;
2327}
2328
2329static unsigned long reclaim_high(struct mem_cgroup *memcg,
2330				  unsigned int nr_pages,
2331				  gfp_t gfp_mask)
2332{
2333	unsigned long nr_reclaimed = 0;
2334
2335	do {
2336		unsigned long pflags;
2337
2338		if (page_counter_read(&memcg->memory) <=
2339		    READ_ONCE(memcg->memory.high))
2340			continue;
2341
2342		memcg_memory_event(memcg, MEMCG_HIGH);
2343
2344		psi_memstall_enter(&pflags);
2345		nr_reclaimed += try_to_free_mem_cgroup_pages(memcg, nr_pages,
2346							     gfp_mask, true);
 
2347		psi_memstall_leave(&pflags);
2348	} while ((memcg = parent_mem_cgroup(memcg)) &&
2349		 !mem_cgroup_is_root(memcg));
2350
2351	return nr_reclaimed;
2352}
2353
2354static void high_work_func(struct work_struct *work)
2355{
2356	struct mem_cgroup *memcg;
2357
2358	memcg = container_of(work, struct mem_cgroup, high_work);
2359	reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL);
2360}
2361
2362/*
2363 * Clamp the maximum sleep time per allocation batch to 2 seconds. This is
2364 * enough to still cause a significant slowdown in most cases, while still
2365 * allowing diagnostics and tracing to proceed without becoming stuck.
2366 */
2367#define MEMCG_MAX_HIGH_DELAY_JIFFIES (2UL*HZ)
2368
2369/*
2370 * When calculating the delay, we use these either side of the exponentiation to
2371 * maintain precision and scale to a reasonable number of jiffies (see the table
2372 * below.
2373 *
2374 * - MEMCG_DELAY_PRECISION_SHIFT: Extra precision bits while translating the
2375 *   overage ratio to a delay.
2376 * - MEMCG_DELAY_SCALING_SHIFT: The number of bits to scale down the
2377 *   proposed penalty in order to reduce to a reasonable number of jiffies, and
2378 *   to produce a reasonable delay curve.
2379 *
2380 * MEMCG_DELAY_SCALING_SHIFT just happens to be a number that produces a
2381 * reasonable delay curve compared to precision-adjusted overage, not
2382 * penalising heavily at first, but still making sure that growth beyond the
2383 * limit penalises misbehaviour cgroups by slowing them down exponentially. For
2384 * example, with a high of 100 megabytes:
2385 *
2386 *  +-------+------------------------+
2387 *  | usage | time to allocate in ms |
2388 *  +-------+------------------------+
2389 *  | 100M  |                      0 |
2390 *  | 101M  |                      6 |
2391 *  | 102M  |                     25 |
2392 *  | 103M  |                     57 |
2393 *  | 104M  |                    102 |
2394 *  | 105M  |                    159 |
2395 *  | 106M  |                    230 |
2396 *  | 107M  |                    313 |
2397 *  | 108M  |                    409 |
2398 *  | 109M  |                    518 |
2399 *  | 110M  |                    639 |
2400 *  | 111M  |                    774 |
2401 *  | 112M  |                    921 |
2402 *  | 113M  |                   1081 |
2403 *  | 114M  |                   1254 |
2404 *  | 115M  |                   1439 |
2405 *  | 116M  |                   1638 |
2406 *  | 117M  |                   1849 |
2407 *  | 118M  |                   2000 |
2408 *  | 119M  |                   2000 |
2409 *  | 120M  |                   2000 |
2410 *  +-------+------------------------+
2411 */
2412 #define MEMCG_DELAY_PRECISION_SHIFT 20
2413 #define MEMCG_DELAY_SCALING_SHIFT 14
2414
2415static u64 calculate_overage(unsigned long usage, unsigned long high)
2416{
2417	u64 overage;
2418
2419	if (usage <= high)
2420		return 0;
2421
2422	/*
2423	 * Prevent division by 0 in overage calculation by acting as if
2424	 * it was a threshold of 1 page
2425	 */
2426	high = max(high, 1UL);
2427
2428	overage = usage - high;
2429	overage <<= MEMCG_DELAY_PRECISION_SHIFT;
2430	return div64_u64(overage, high);
2431}
2432
2433static u64 mem_find_max_overage(struct mem_cgroup *memcg)
2434{
2435	u64 overage, max_overage = 0;
2436
2437	do {
2438		overage = calculate_overage(page_counter_read(&memcg->memory),
2439					    READ_ONCE(memcg->memory.high));
2440		max_overage = max(overage, max_overage);
2441	} while ((memcg = parent_mem_cgroup(memcg)) &&
2442		 !mem_cgroup_is_root(memcg));
2443
2444	return max_overage;
2445}
2446
2447static u64 swap_find_max_overage(struct mem_cgroup *memcg)
2448{
2449	u64 overage, max_overage = 0;
2450
2451	do {
2452		overage = calculate_overage(page_counter_read(&memcg->swap),
2453					    READ_ONCE(memcg->swap.high));
2454		if (overage)
2455			memcg_memory_event(memcg, MEMCG_SWAP_HIGH);
2456		max_overage = max(overage, max_overage);
2457	} while ((memcg = parent_mem_cgroup(memcg)) &&
2458		 !mem_cgroup_is_root(memcg));
2459
2460	return max_overage;
2461}
2462
2463/*
2464 * Get the number of jiffies that we should penalise a mischievous cgroup which
2465 * is exceeding its memory.high by checking both it and its ancestors.
2466 */
2467static unsigned long calculate_high_delay(struct mem_cgroup *memcg,
2468					  unsigned int nr_pages,
2469					  u64 max_overage)
2470{
2471	unsigned long penalty_jiffies;
2472
2473	if (!max_overage)
2474		return 0;
2475
2476	/*
2477	 * We use overage compared to memory.high to calculate the number of
2478	 * jiffies to sleep (penalty_jiffies). Ideally this value should be
2479	 * fairly lenient on small overages, and increasingly harsh when the
2480	 * memcg in question makes it clear that it has no intention of stopping
2481	 * its crazy behaviour, so we exponentially increase the delay based on
2482	 * overage amount.
2483	 */
2484	penalty_jiffies = max_overage * max_overage * HZ;
2485	penalty_jiffies >>= MEMCG_DELAY_PRECISION_SHIFT;
2486	penalty_jiffies >>= MEMCG_DELAY_SCALING_SHIFT;
2487
2488	/*
2489	 * Factor in the task's own contribution to the overage, such that four
2490	 * N-sized allocations are throttled approximately the same as one
2491	 * 4N-sized allocation.
2492	 *
2493	 * MEMCG_CHARGE_BATCH pages is nominal, so work out how much smaller or
2494	 * larger the current charge patch is than that.
2495	 */
2496	return penalty_jiffies * nr_pages / MEMCG_CHARGE_BATCH;
2497}
2498
2499/*
2500 * Scheduled by try_charge() to be executed from the userland return path
2501 * and reclaims memory over the high limit.
 
2502 */
2503void mem_cgroup_handle_over_high(void)
2504{
2505	unsigned long penalty_jiffies;
2506	unsigned long pflags;
2507	unsigned long nr_reclaimed;
2508	unsigned int nr_pages = current->memcg_nr_pages_over_high;
2509	int nr_retries = MAX_RECLAIM_RETRIES;
2510	struct mem_cgroup *memcg;
2511	bool in_retry = false;
2512
2513	if (likely(!nr_pages))
2514		return;
2515
2516	memcg = get_mem_cgroup_from_mm(current->mm);
2517	current->memcg_nr_pages_over_high = 0;
2518
2519retry_reclaim:
2520	/*
 
 
 
 
 
 
 
 
 
 
 
2521	 * The allocating task should reclaim at least the batch size, but for
2522	 * subsequent retries we only want to do what's necessary to prevent oom
2523	 * or breaching resource isolation.
2524	 *
2525	 * This is distinct from memory.max or page allocator behaviour because
2526	 * memory.high is currently batched, whereas memory.max and the page
2527	 * allocator run every time an allocation is made.
2528	 */
2529	nr_reclaimed = reclaim_high(memcg,
2530				    in_retry ? SWAP_CLUSTER_MAX : nr_pages,
2531				    GFP_KERNEL);
2532
2533	/*
2534	 * memory.high is breached and reclaim is unable to keep up. Throttle
2535	 * allocators proactively to slow down excessive growth.
2536	 */
2537	penalty_jiffies = calculate_high_delay(memcg, nr_pages,
2538					       mem_find_max_overage(memcg));
2539
2540	penalty_jiffies += calculate_high_delay(memcg, nr_pages,
2541						swap_find_max_overage(memcg));
2542
2543	/*
2544	 * Clamp the max delay per usermode return so as to still keep the
2545	 * application moving forwards and also permit diagnostics, albeit
2546	 * extremely slowly.
2547	 */
2548	penalty_jiffies = min(penalty_jiffies, MEMCG_MAX_HIGH_DELAY_JIFFIES);
2549
2550	/*
2551	 * Don't sleep if the amount of jiffies this memcg owes us is so low
2552	 * that it's not even worth doing, in an attempt to be nice to those who
2553	 * go only a small amount over their memory.high value and maybe haven't
2554	 * been aggressively reclaimed enough yet.
2555	 */
2556	if (penalty_jiffies <= HZ / 100)
2557		goto out;
2558
2559	/*
2560	 * If reclaim is making forward progress but we're still over
2561	 * memory.high, we want to encourage that rather than doing allocator
2562	 * throttling.
2563	 */
2564	if (nr_reclaimed || nr_retries--) {
2565		in_retry = true;
2566		goto retry_reclaim;
2567	}
2568
2569	/*
 
 
 
2570	 * If we exit early, we're guaranteed to die (since
2571	 * schedule_timeout_killable sets TASK_KILLABLE). This means we don't
2572	 * need to account for any ill-begotten jiffies to pay them off later.
2573	 */
2574	psi_memstall_enter(&pflags);
2575	schedule_timeout_killable(penalty_jiffies);
2576	psi_memstall_leave(&pflags);
2577
2578out:
2579	css_put(&memcg->css);
2580}
2581
2582static int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask,
2583			unsigned int nr_pages)
2584{
2585	unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages);
2586	int nr_retries = MAX_RECLAIM_RETRIES;
2587	struct mem_cgroup *mem_over_limit;
2588	struct page_counter *counter;
2589	enum oom_status oom_status;
2590	unsigned long nr_reclaimed;
2591	bool may_swap = true;
 
2592	bool drained = false;
 
2593	unsigned long pflags;
2594
2595retry:
2596	if (consume_stock(memcg, nr_pages))
2597		return 0;
2598
2599	if (!do_memsw_account() ||
2600	    page_counter_try_charge(&memcg->memsw, batch, &counter)) {
2601		if (page_counter_try_charge(&memcg->memory, batch, &counter))
2602			goto done_restock;
2603		if (do_memsw_account())
2604			page_counter_uncharge(&memcg->memsw, batch);
2605		mem_over_limit = mem_cgroup_from_counter(counter, memory);
2606	} else {
2607		mem_over_limit = mem_cgroup_from_counter(counter, memsw);
2608		may_swap = false;
2609	}
2610
2611	if (batch > nr_pages) {
2612		batch = nr_pages;
2613		goto retry;
2614	}
2615
2616	/*
2617	 * Memcg doesn't have a dedicated reserve for atomic
2618	 * allocations. But like the global atomic pool, we need to
2619	 * put the burden of reclaim on regular allocation requests
2620	 * and let these go through as privileged allocations.
2621	 */
2622	if (gfp_mask & __GFP_ATOMIC)
2623		goto force;
2624
2625	/*
2626	 * Unlike in global OOM situations, memcg is not in a physical
2627	 * memory shortage.  Allow dying and OOM-killed tasks to
2628	 * bypass the last charges so that they can exit quickly and
2629	 * free their memory.
2630	 */
2631	if (unlikely(should_force_charge()))
2632		goto force;
2633
2634	/*
2635	 * Prevent unbounded recursion when reclaim operations need to
2636	 * allocate memory. This might exceed the limits temporarily,
2637	 * but we prefer facilitating memory reclaim and getting back
2638	 * under the limit over triggering OOM kills in these cases.
2639	 */
2640	if (unlikely(current->flags & PF_MEMALLOC))
2641		goto force;
2642
2643	if (unlikely(task_in_memcg_oom(current)))
2644		goto nomem;
2645
2646	if (!gfpflags_allow_blocking(gfp_mask))
2647		goto nomem;
2648
2649	memcg_memory_event(mem_over_limit, MEMCG_MAX);
 
2650
2651	psi_memstall_enter(&pflags);
2652	nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
2653						    gfp_mask, may_swap);
2654	psi_memstall_leave(&pflags);
2655
2656	if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
2657		goto retry;
2658
2659	if (!drained) {
2660		drain_all_stock(mem_over_limit);
2661		drained = true;
2662		goto retry;
2663	}
2664
2665	if (gfp_mask & __GFP_NORETRY)
2666		goto nomem;
2667	/*
2668	 * Even though the limit is exceeded at this point, reclaim
2669	 * may have been able to free some pages.  Retry the charge
2670	 * before killing the task.
2671	 *
2672	 * Only for regular pages, though: huge pages are rather
2673	 * unlikely to succeed so close to the limit, and we fall back
2674	 * to regular pages anyway in case of failure.
2675	 */
2676	if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))
2677		goto retry;
2678	/*
2679	 * At task move, charge accounts can be doubly counted. So, it's
2680	 * better to wait until the end of task_move if something is going on.
2681	 */
2682	if (mem_cgroup_wait_acct_move(mem_over_limit))
2683		goto retry;
2684
2685	if (nr_retries--)
2686		goto retry;
2687
2688	if (gfp_mask & __GFP_RETRY_MAYFAIL)
2689		goto nomem;
2690
2691	if (fatal_signal_pending(current))
2692		goto force;
 
2693
2694	/*
2695	 * keep retrying as long as the memcg oom killer is able to make
2696	 * a forward progress or bypass the charge if the oom killer
2697	 * couldn't make any progress.
2698	 */
2699	oom_status = mem_cgroup_oom(mem_over_limit, gfp_mask,
2700		       get_order(nr_pages * PAGE_SIZE));
2701	switch (oom_status) {
2702	case OOM_SUCCESS:
2703		nr_retries = MAX_RECLAIM_RETRIES;
2704		goto retry;
2705	case OOM_FAILED:
2706		goto force;
2707	default:
2708		goto nomem;
2709	}
2710nomem:
2711	if (!(gfp_mask & __GFP_NOFAIL))
 
 
 
 
 
 
2712		return -ENOMEM;
2713force:
2714	/*
 
 
 
 
 
 
 
2715	 * The allocation either can't fail or will lead to more memory
2716	 * being freed very soon.  Allow memory usage go over the limit
2717	 * temporarily by force charging it.
2718	 */
2719	page_counter_charge(&memcg->memory, nr_pages);
2720	if (do_memsw_account())
2721		page_counter_charge(&memcg->memsw, nr_pages);
2722
2723	return 0;
2724
2725done_restock:
2726	if (batch > nr_pages)
2727		refill_stock(memcg, batch - nr_pages);
2728
2729	/*
2730	 * If the hierarchy is above the normal consumption range, schedule
2731	 * reclaim on returning to userland.  We can perform reclaim here
2732	 * if __GFP_RECLAIM but let's always punt for simplicity and so that
2733	 * GFP_KERNEL can consistently be used during reclaim.  @memcg is
2734	 * not recorded as it most likely matches current's and won't
2735	 * change in the meantime.  As high limit is checked again before
2736	 * reclaim, the cost of mismatch is negligible.
2737	 */
2738	do {
2739		bool mem_high, swap_high;
2740
2741		mem_high = page_counter_read(&memcg->memory) >
2742			READ_ONCE(memcg->memory.high);
2743		swap_high = page_counter_read(&memcg->swap) >
2744			READ_ONCE(memcg->swap.high);
2745
2746		/* Don't bother a random interrupted task */
2747		if (in_interrupt()) {
2748			if (mem_high) {
2749				schedule_work(&memcg->high_work);
2750				break;
2751			}
2752			continue;
2753		}
2754
2755		if (mem_high || swap_high) {
2756			/*
2757			 * The allocating tasks in this cgroup will need to do
2758			 * reclaim or be throttled to prevent further growth
2759			 * of the memory or swap footprints.
2760			 *
2761			 * Target some best-effort fairness between the tasks,
2762			 * and distribute reclaim work and delay penalties
2763			 * based on how much each task is actually allocating.
2764			 */
2765			current->memcg_nr_pages_over_high += batch;
2766			set_notify_resume(current);
2767			break;
2768		}
2769	} while ((memcg = parent_mem_cgroup(memcg)));
2770
 
 
 
 
 
 
 
 
 
 
 
2771	return 0;
2772}
2773
2774static inline int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
2775			     unsigned int nr_pages)
2776{
2777	if (mem_cgroup_is_root(memcg))
2778		return 0;
2779
2780	return try_charge_memcg(memcg, gfp_mask, nr_pages);
2781}
2782
2783#if defined(CONFIG_MEMCG_KMEM) || defined(CONFIG_MMU)
2784static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
 
 
 
 
2785{
2786	if (mem_cgroup_is_root(memcg))
2787		return;
2788
2789	page_counter_uncharge(&memcg->memory, nr_pages);
2790	if (do_memsw_account())
2791		page_counter_uncharge(&memcg->memsw, nr_pages);
2792}
2793#endif
2794
2795static void commit_charge(struct page *page, struct mem_cgroup *memcg)
2796{
2797	VM_BUG_ON_PAGE(page_memcg(page), page);
2798	/*
2799	 * Any of the following ensures page's memcg stability:
2800	 *
2801	 * - the page lock
2802	 * - LRU isolation
2803	 * - lock_page_memcg()
2804	 * - exclusive reference
 
2805	 */
2806	page->memcg_data = (unsigned long)memcg;
2807}
2808
2809static struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg)
 
 
 
 
 
2810{
2811	struct mem_cgroup *memcg;
2812
2813	rcu_read_lock();
2814retry:
2815	memcg = obj_cgroup_memcg(objcg);
2816	if (unlikely(!css_tryget(&memcg->css)))
2817		goto retry;
2818	rcu_read_unlock();
2819
2820	return memcg;
 
 
 
2821}
2822
2823#ifdef CONFIG_MEMCG_KMEM
2824/*
2825 * The allocated objcg pointers array is not accounted directly.
2826 * Moreover, it should not come from DMA buffer and is not readily
2827 * reclaimable. So those GFP bits should be masked off.
2828 */
2829#define OBJCGS_CLEAR_MASK	(__GFP_DMA | __GFP_RECLAIMABLE | __GFP_ACCOUNT)
 
2830
2831int memcg_alloc_page_obj_cgroups(struct page *page, struct kmem_cache *s,
2832				 gfp_t gfp, bool new_page)
 
 
 
 
 
2833{
2834	unsigned int objects = objs_per_slab_page(s, page);
 
 
 
 
 
 
 
 
 
 
 
 
 
2835	unsigned long memcg_data;
2836	void *vec;
2837
2838	gfp &= ~OBJCGS_CLEAR_MASK;
2839	vec = kcalloc_node(objects, sizeof(struct obj_cgroup *), gfp,
2840			   page_to_nid(page));
2841	if (!vec)
2842		return -ENOMEM;
2843
2844	memcg_data = (unsigned long) vec | MEMCG_DATA_OBJCGS;
2845	if (new_page) {
2846		/*
2847		 * If the slab page is brand new and nobody can yet access
2848		 * it's memcg_data, no synchronization is required and
2849		 * memcg_data can be simply assigned.
2850		 */
2851		page->memcg_data = memcg_data;
2852	} else if (cmpxchg(&page->memcg_data, 0, memcg_data)) {
2853		/*
2854		 * If the slab page is already in use, somebody can allocate
2855		 * and assign obj_cgroups in parallel. In this case the existing
2856		 * objcg vector should be reused.
2857		 */
2858		kfree(vec);
2859		return 0;
2860	}
2861
2862	kmemleak_not_leak(vec);
2863	return 0;
2864}
2865
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2866/*
2867 * Returns a pointer to the memory cgroup to which the kernel object is charged.
2868 *
2869 * A passed kernel object can be a slab object or a generic kernel page, so
2870 * different mechanisms for getting the memory cgroup pointer should be used.
 
 
2871 * In certain cases (e.g. kernel stacks or large kmallocs with SLUB) the caller
2872 * can not know for sure how the kernel object is implemented.
2873 * mem_cgroup_from_obj() can be safely used in such cases.
2874 *
2875 * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(),
2876 * cgroup_mutex, etc.
2877 */
2878struct mem_cgroup *mem_cgroup_from_obj(void *p)
2879{
2880	struct page *page;
2881
2882	if (mem_cgroup_disabled())
2883		return NULL;
2884
2885	page = virt_to_head_page(p);
2886
2887	/*
2888	 * Slab objects are accounted individually, not per-page.
2889	 * Memcg membership data for each individual object is saved in
2890	 * the page->obj_cgroups.
2891	 */
2892	if (page_objcgs_check(page)) {
2893		struct obj_cgroup *objcg;
2894		unsigned int off;
2895
2896		off = obj_to_index(page->slab_cache, page, p);
2897		objcg = page_objcgs(page)[off];
2898		if (objcg)
2899			return obj_cgroup_memcg(objcg);
2900
 
 
 
 
 
 
 
 
 
 
 
 
 
2901		return NULL;
2902	}
2903
2904	/*
2905	 * page_memcg_check() is used here, because page_has_obj_cgroups()
2906	 * check above could fail because the object cgroups vector wasn't set
2907	 * at that moment, but it can be set concurrently.
2908	 * page_memcg_check(page) will guarantee that a proper memory
2909	 * cgroup pointer or NULL will be returned.
2910	 */
2911	return page_memcg_check(page);
2912}
2913
2914__always_inline struct obj_cgroup *get_obj_cgroup_from_current(void)
2915{
2916	struct obj_cgroup *objcg = NULL;
2917	struct mem_cgroup *memcg;
2918
2919	if (memcg_kmem_bypass())
2920		return NULL;
2921
2922	rcu_read_lock();
2923	if (unlikely(active_memcg()))
2924		memcg = active_memcg();
2925	else
2926		memcg = mem_cgroup_from_task(current);
2927
2928	for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) {
2929		objcg = rcu_dereference(memcg->objcg);
2930		if (objcg && obj_cgroup_tryget(objcg))
2931			break;
2932		objcg = NULL;
2933	}
2934	rcu_read_unlock();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2935
2936	return objcg;
2937}
2938
2939static int memcg_alloc_cache_id(void)
2940{
2941	int id, size;
2942	int err;
2943
2944	id = ida_simple_get(&memcg_cache_ida,
2945			    0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL);
2946	if (id < 0)
2947		return id;
 
 
 
 
 
 
 
 
 
 
2948
2949	if (id < memcg_nr_cache_ids)
2950		return id;
 
2951
2952	/*
2953	 * There's no space for the new id in memcg_caches arrays,
2954	 * so we have to grow them.
2955	 */
2956	down_write(&memcg_cache_ids_sem);
2957
2958	size = 2 * (id + 1);
2959	if (size < MEMCG_CACHES_MIN_SIZE)
2960		size = MEMCG_CACHES_MIN_SIZE;
2961	else if (size > MEMCG_CACHES_MAX_SIZE)
2962		size = MEMCG_CACHES_MAX_SIZE;
 
 
 
 
 
 
 
 
2963
2964	err = memcg_update_all_list_lrus(size);
2965	if (!err)
2966		memcg_nr_cache_ids = size;
2967
2968	up_write(&memcg_cache_ids_sem);
 
 
2969
2970	if (err) {
2971		ida_simple_remove(&memcg_cache_ida, id);
2972		return err;
 
 
 
 
 
 
 
 
 
 
 
 
 
2973	}
2974	return id;
2975}
2976
2977static void memcg_free_cache_id(int id)
2978{
2979	ida_simple_remove(&memcg_cache_ida, id);
 
 
 
 
 
 
2980}
2981
 
2982/*
2983 * obj_cgroup_uncharge_pages: uncharge a number of kernel pages from a objcg
2984 * @objcg: object cgroup to uncharge
2985 * @nr_pages: number of pages to uncharge
2986 */
2987static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg,
2988				      unsigned int nr_pages)
2989{
2990	struct mem_cgroup *memcg;
2991
2992	memcg = get_mem_cgroup_from_objcg(objcg);
2993
2994	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
2995		page_counter_uncharge(&memcg->kmem, nr_pages);
2996	refill_stock(memcg, nr_pages);
2997
2998	css_put(&memcg->css);
2999}
3000
3001/*
3002 * obj_cgroup_charge_pages: charge a number of kernel pages to a objcg
3003 * @objcg: object cgroup to charge
3004 * @gfp: reclaim mode
3005 * @nr_pages: number of pages to charge
3006 *
3007 * Returns 0 on success, an error code on failure.
3008 */
3009static int obj_cgroup_charge_pages(struct obj_cgroup *objcg, gfp_t gfp,
3010				   unsigned int nr_pages)
3011{
3012	struct page_counter *counter;
3013	struct mem_cgroup *memcg;
3014	int ret;
3015
3016	memcg = get_mem_cgroup_from_objcg(objcg);
3017
3018	ret = try_charge_memcg(memcg, gfp, nr_pages);
3019	if (ret)
3020		goto out;
3021
3022	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) &&
3023	    !page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) {
3024
3025		/*
3026		 * Enforce __GFP_NOFAIL allocation because callers are not
3027		 * prepared to see failures and likely do not have any failure
3028		 * handling code.
3029		 */
3030		if (gfp & __GFP_NOFAIL) {
3031			page_counter_charge(&memcg->kmem, nr_pages);
3032			goto out;
3033		}
3034		cancel_charge(memcg, nr_pages);
3035		ret = -ENOMEM;
3036	}
3037out:
3038	css_put(&memcg->css);
3039
3040	return ret;
3041}
3042
3043/**
3044 * __memcg_kmem_charge_page: charge a kmem page to the current memory cgroup
3045 * @page: page to charge
3046 * @gfp: reclaim mode
3047 * @order: allocation order
3048 *
3049 * Returns 0 on success, an error code on failure.
3050 */
3051int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order)
3052{
3053	struct obj_cgroup *objcg;
3054	int ret = 0;
3055
3056	objcg = get_obj_cgroup_from_current();
3057	if (objcg) {
3058		ret = obj_cgroup_charge_pages(objcg, gfp, 1 << order);
3059		if (!ret) {
 
3060			page->memcg_data = (unsigned long)objcg |
3061				MEMCG_DATA_KMEM;
3062			return 0;
3063		}
3064		obj_cgroup_put(objcg);
3065	}
3066	return ret;
3067}
3068
3069/**
3070 * __memcg_kmem_uncharge_page: uncharge a kmem page
3071 * @page: page to uncharge
3072 * @order: allocation order
3073 */
3074void __memcg_kmem_uncharge_page(struct page *page, int order)
3075{
 
3076	struct obj_cgroup *objcg;
3077	unsigned int nr_pages = 1 << order;
3078
3079	if (!PageMemcgKmem(page))
3080		return;
3081
3082	objcg = __page_objcg(page);
3083	obj_cgroup_uncharge_pages(objcg, nr_pages);
3084	page->memcg_data = 0;
3085	obj_cgroup_put(objcg);
3086}
3087
3088void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
3089		     enum node_stat_item idx, int nr)
3090{
 
 
3091	unsigned long flags;
3092	struct obj_stock *stock = get_obj_stock(&flags);
3093	int *bytes;
3094
 
 
 
3095	/*
3096	 * Save vmstat data in stock and skip vmstat array update unless
3097	 * accumulating over a page of vmstat data or when pgdat or idx
3098	 * changes.
3099	 */
3100	if (stock->cached_objcg != objcg) {
3101		drain_obj_stock(stock);
3102		obj_cgroup_get(objcg);
3103		stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
3104				? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
3105		stock->cached_objcg = objcg;
3106		stock->cached_pgdat = pgdat;
3107	} else if (stock->cached_pgdat != pgdat) {
3108		/* Flush the existing cached vmstat data */
3109		struct pglist_data *oldpg = stock->cached_pgdat;
3110
3111		if (stock->nr_slab_reclaimable_b) {
3112			mod_objcg_mlstate(objcg, oldpg, NR_SLAB_RECLAIMABLE_B,
3113					  stock->nr_slab_reclaimable_b);
3114			stock->nr_slab_reclaimable_b = 0;
3115		}
3116		if (stock->nr_slab_unreclaimable_b) {
3117			mod_objcg_mlstate(objcg, oldpg, NR_SLAB_UNRECLAIMABLE_B,
3118					  stock->nr_slab_unreclaimable_b);
3119			stock->nr_slab_unreclaimable_b = 0;
3120		}
3121		stock->cached_pgdat = pgdat;
3122	}
3123
3124	bytes = (idx == NR_SLAB_RECLAIMABLE_B) ? &stock->nr_slab_reclaimable_b
3125					       : &stock->nr_slab_unreclaimable_b;
3126	/*
3127	 * Even for large object >= PAGE_SIZE, the vmstat data will still be
3128	 * cached locally at least once before pushing it out.
3129	 */
3130	if (!*bytes) {
3131		*bytes = nr;
3132		nr = 0;
3133	} else {
3134		*bytes += nr;
3135		if (abs(*bytes) > PAGE_SIZE) {
3136			nr = *bytes;
3137			*bytes = 0;
3138		} else {
3139			nr = 0;
3140		}
3141	}
3142	if (nr)
3143		mod_objcg_mlstate(objcg, pgdat, idx, nr);
3144
3145	put_obj_stock(flags);
 
 
3146}
3147
3148static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
3149{
 
3150	unsigned long flags;
3151	struct obj_stock *stock = get_obj_stock(&flags);
3152	bool ret = false;
3153
3154	if (objcg == stock->cached_objcg && stock->nr_bytes >= nr_bytes) {
 
 
 
3155		stock->nr_bytes -= nr_bytes;
3156		ret = true;
3157	}
3158
3159	put_obj_stock(flags);
3160
3161	return ret;
3162}
3163
3164static void drain_obj_stock(struct obj_stock *stock)
3165{
3166	struct obj_cgroup *old = stock->cached_objcg;
3167
3168	if (!old)
3169		return;
3170
3171	if (stock->nr_bytes) {
3172		unsigned int nr_pages = stock->nr_bytes >> PAGE_SHIFT;
3173		unsigned int nr_bytes = stock->nr_bytes & (PAGE_SIZE - 1);
3174
3175		if (nr_pages)
3176			obj_cgroup_uncharge_pages(old, nr_pages);
 
 
 
 
 
 
 
 
3177
3178		/*
3179		 * The leftover is flushed to the centralized per-memcg value.
3180		 * On the next attempt to refill obj stock it will be moved
3181		 * to a per-cpu stock (probably, on an other CPU), see
3182		 * refill_obj_stock().
3183		 *
3184		 * How often it's flushed is a trade-off between the memory
3185		 * limit enforcement accuracy and potential CPU contention,
3186		 * so it might be changed in the future.
3187		 */
3188		atomic_add(nr_bytes, &old->nr_charged_bytes);
3189		stock->nr_bytes = 0;
3190	}
3191
3192	/*
3193	 * Flush the vmstat data in current stock
3194	 */
3195	if (stock->nr_slab_reclaimable_b || stock->nr_slab_unreclaimable_b) {
3196		if (stock->nr_slab_reclaimable_b) {
3197			mod_objcg_mlstate(old, stock->cached_pgdat,
3198					  NR_SLAB_RECLAIMABLE_B,
3199					  stock->nr_slab_reclaimable_b);
3200			stock->nr_slab_reclaimable_b = 0;
3201		}
3202		if (stock->nr_slab_unreclaimable_b) {
3203			mod_objcg_mlstate(old, stock->cached_pgdat,
3204					  NR_SLAB_UNRECLAIMABLE_B,
3205					  stock->nr_slab_unreclaimable_b);
3206			stock->nr_slab_unreclaimable_b = 0;
3207		}
3208		stock->cached_pgdat = NULL;
3209	}
3210
3211	obj_cgroup_put(old);
3212	stock->cached_objcg = NULL;
 
 
 
 
3213}
3214
3215static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
3216				     struct mem_cgroup *root_memcg)
3217{
 
3218	struct mem_cgroup *memcg;
3219
3220	if (in_task() && stock->task_obj.cached_objcg) {
3221		memcg = obj_cgroup_memcg(stock->task_obj.cached_objcg);
3222		if (memcg && mem_cgroup_is_descendant(memcg, root_memcg))
3223			return true;
3224	}
3225	if (stock->irq_obj.cached_objcg) {
3226		memcg = obj_cgroup_memcg(stock->irq_obj.cached_objcg);
3227		if (memcg && mem_cgroup_is_descendant(memcg, root_memcg))
3228			return true;
3229	}
3230
3231	return false;
3232}
3233
3234static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
3235			     bool allow_uncharge)
3236{
 
 
3237	unsigned long flags;
3238	struct obj_stock *stock = get_obj_stock(&flags);
3239	unsigned int nr_pages = 0;
3240
3241	if (stock->cached_objcg != objcg) { /* reset if necessary */
3242		drain_obj_stock(stock);
 
 
 
3243		obj_cgroup_get(objcg);
3244		stock->cached_objcg = objcg;
3245		stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
3246				? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
3247		allow_uncharge = true;	/* Allow uncharge when objcg changes */
3248	}
3249	stock->nr_bytes += nr_bytes;
3250
3251	if (allow_uncharge && (stock->nr_bytes > PAGE_SIZE)) {
3252		nr_pages = stock->nr_bytes >> PAGE_SHIFT;
3253		stock->nr_bytes &= (PAGE_SIZE - 1);
3254	}
3255
3256	put_obj_stock(flags);
 
 
3257
3258	if (nr_pages)
3259		obj_cgroup_uncharge_pages(objcg, nr_pages);
3260}
3261
3262int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size)
3263{
3264	unsigned int nr_pages, nr_bytes;
3265	int ret;
3266
3267	if (consume_obj_stock(objcg, size))
3268		return 0;
3269
3270	/*
3271	 * In theory, objcg->nr_charged_bytes can have enough
3272	 * pre-charged bytes to satisfy the allocation. However,
3273	 * flushing objcg->nr_charged_bytes requires two atomic
3274	 * operations, and objcg->nr_charged_bytes can't be big.
3275	 * The shared objcg->nr_charged_bytes can also become a
3276	 * performance bottleneck if all tasks of the same memcg are
3277	 * trying to update it. So it's better to ignore it and try
3278	 * grab some new pages. The stock's nr_bytes will be flushed to
3279	 * objcg->nr_charged_bytes later on when objcg changes.
3280	 *
3281	 * The stock's nr_bytes may contain enough pre-charged bytes
3282	 * to allow one less page from being charged, but we can't rely
3283	 * on the pre-charged bytes not being changed outside of
3284	 * consume_obj_stock() or refill_obj_stock(). So ignore those
3285	 * pre-charged bytes as well when charging pages. To avoid a
3286	 * page uncharge right after a page charge, we set the
3287	 * allow_uncharge flag to false when calling refill_obj_stock()
3288	 * to temporarily allow the pre-charged bytes to exceed the page
3289	 * size limit. The maximum reachable value of the pre-charged
3290	 * bytes is (sizeof(object) + PAGE_SIZE - 2) if there is no data
3291	 * race.
3292	 */
3293	nr_pages = size >> PAGE_SHIFT;
3294	nr_bytes = size & (PAGE_SIZE - 1);
3295
3296	if (nr_bytes)
3297		nr_pages += 1;
3298
3299	ret = obj_cgroup_charge_pages(objcg, gfp, nr_pages);
3300	if (!ret && nr_bytes)
3301		refill_obj_stock(objcg, PAGE_SIZE - nr_bytes, false);
3302
3303	return ret;
3304}
3305
3306void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size)
3307{
3308	refill_obj_stock(objcg, size, true);
3309}
3310
3311#endif /* CONFIG_MEMCG_KMEM */
3312
3313/*
3314 * Because page_memcg(head) is not set on tails, set it now.
3315 */
3316void split_page_memcg(struct page *head, unsigned int nr)
3317{
3318	struct mem_cgroup *memcg = page_memcg(head);
 
3319	int i;
3320
3321	if (mem_cgroup_disabled() || !memcg)
3322		return;
3323
3324	for (i = 1; i < nr; i++)
3325		head[i].memcg_data = head->memcg_data;
3326
3327	if (PageMemcgKmem(head))
3328		obj_cgroup_get_many(__page_objcg(head), nr - 1);
3329	else
3330		css_get_many(&memcg->css, nr - 1);
3331}
3332
3333#ifdef CONFIG_MEMCG_SWAP
3334/**
3335 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
3336 * @entry: swap entry to be moved
3337 * @from:  mem_cgroup which the entry is moved from
3338 * @to:  mem_cgroup which the entry is moved to
3339 *
3340 * It succeeds only when the swap_cgroup's record for this entry is the same
3341 * as the mem_cgroup's id of @from.
3342 *
3343 * Returns 0 on success, -EINVAL on failure.
3344 *
3345 * The caller must have charged to @to, IOW, called page_counter_charge() about
3346 * both res and memsw, and called css_get().
3347 */
3348static int mem_cgroup_move_swap_account(swp_entry_t entry,
3349				struct mem_cgroup *from, struct mem_cgroup *to)
3350{
3351	unsigned short old_id, new_id;
3352
3353	old_id = mem_cgroup_id(from);
3354	new_id = mem_cgroup_id(to);
3355
3356	if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
3357		mod_memcg_state(from, MEMCG_SWAP, -1);
3358		mod_memcg_state(to, MEMCG_SWAP, 1);
3359		return 0;
3360	}
3361	return -EINVAL;
3362}
3363#else
3364static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
3365				struct mem_cgroup *from, struct mem_cgroup *to)
3366{
3367	return -EINVAL;
3368}
3369#endif
3370
3371static DEFINE_MUTEX(memcg_max_mutex);
3372
3373static int mem_cgroup_resize_max(struct mem_cgroup *memcg,
3374				 unsigned long max, bool memsw)
3375{
3376	bool enlarge = false;
3377	bool drained = false;
3378	int ret;
3379	bool limits_invariant;
3380	struct page_counter *counter = memsw ? &memcg->memsw : &memcg->memory;
3381
3382	do {
3383		if (signal_pending(current)) {
3384			ret = -EINTR;
3385			break;
3386		}
3387
3388		mutex_lock(&memcg_max_mutex);
3389		/*
3390		 * Make sure that the new limit (memsw or memory limit) doesn't
3391		 * break our basic invariant rule memory.max <= memsw.max.
3392		 */
3393		limits_invariant = memsw ? max >= READ_ONCE(memcg->memory.max) :
3394					   max <= memcg->memsw.max;
3395		if (!limits_invariant) {
3396			mutex_unlock(&memcg_max_mutex);
3397			ret = -EINVAL;
3398			break;
3399		}
3400		if (max > counter->max)
3401			enlarge = true;
3402		ret = page_counter_set_max(counter, max);
3403		mutex_unlock(&memcg_max_mutex);
3404
3405		if (!ret)
3406			break;
3407
3408		if (!drained) {
3409			drain_all_stock(memcg);
3410			drained = true;
3411			continue;
3412		}
3413
3414		if (!try_to_free_mem_cgroup_pages(memcg, 1,
3415					GFP_KERNEL, !memsw)) {
3416			ret = -EBUSY;
3417			break;
3418		}
3419	} while (true);
3420
3421	if (!ret && enlarge)
3422		memcg_oom_recover(memcg);
3423
3424	return ret;
3425}
3426
3427unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
3428					    gfp_t gfp_mask,
3429					    unsigned long *total_scanned)
3430{
3431	unsigned long nr_reclaimed = 0;
3432	struct mem_cgroup_per_node *mz, *next_mz = NULL;
3433	unsigned long reclaimed;
3434	int loop = 0;
3435	struct mem_cgroup_tree_per_node *mctz;
3436	unsigned long excess;
3437	unsigned long nr_scanned;
 
 
3438
3439	if (order > 0)
3440		return 0;
3441
3442	mctz = soft_limit_tree_node(pgdat->node_id);
3443
3444	/*
3445	 * Do not even bother to check the largest node if the root
3446	 * is empty. Do it lockless to prevent lock bouncing. Races
3447	 * are acceptable as soft limit is best effort anyway.
3448	 */
3449	if (!mctz || RB_EMPTY_ROOT(&mctz->rb_root))
3450		return 0;
3451
3452	/*
3453	 * This loop can run a while, specially if mem_cgroup's continuously
3454	 * keep exceeding their soft limit and putting the system under
3455	 * pressure
3456	 */
3457	do {
3458		if (next_mz)
3459			mz = next_mz;
3460		else
3461			mz = mem_cgroup_largest_soft_limit_node(mctz);
3462		if (!mz)
3463			break;
3464
3465		nr_scanned = 0;
3466		reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat,
3467						    gfp_mask, &nr_scanned);
3468		nr_reclaimed += reclaimed;
3469		*total_scanned += nr_scanned;
3470		spin_lock_irq(&mctz->lock);
3471		__mem_cgroup_remove_exceeded(mz, mctz);
3472
3473		/*
3474		 * If we failed to reclaim anything from this memory cgroup
3475		 * it is time to move on to the next cgroup
3476		 */
3477		next_mz = NULL;
3478		if (!reclaimed)
3479			next_mz = __mem_cgroup_largest_soft_limit_node(mctz);
3480
3481		excess = soft_limit_excess(mz->memcg);
3482		/*
3483		 * One school of thought says that we should not add
3484		 * back the node to the tree if reclaim returns 0.
3485		 * But our reclaim could return 0, simply because due
3486		 * to priority we are exposing a smaller subset of
3487		 * memory to reclaim from. Consider this as a longer
3488		 * term TODO.
3489		 */
3490		/* If excess == 0, no tree ops */
3491		__mem_cgroup_insert_exceeded(mz, mctz, excess);
3492		spin_unlock_irq(&mctz->lock);
3493		css_put(&mz->memcg->css);
3494		loop++;
3495		/*
3496		 * Could not reclaim anything and there are no more
3497		 * mem cgroups to try or we seem to be looping without
3498		 * reclaiming anything.
3499		 */
3500		if (!nr_reclaimed &&
3501			(next_mz == NULL ||
3502			loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
3503			break;
3504	} while (!nr_reclaimed);
3505	if (next_mz)
3506		css_put(&next_mz->memcg->css);
3507	return nr_reclaimed;
3508}
3509
3510/*
3511 * Reclaims as many pages from the given memcg as possible.
3512 *
3513 * Caller is responsible for holding css reference for memcg.
3514 */
3515static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
3516{
3517	int nr_retries = MAX_RECLAIM_RETRIES;
3518
3519	/* we call try-to-free pages for make this cgroup empty */
3520	lru_add_drain_all();
3521
3522	drain_all_stock(memcg);
3523
3524	/* try to free all pages in this cgroup */
3525	while (nr_retries && page_counter_read(&memcg->memory)) {
3526		int progress;
3527
3528		if (signal_pending(current))
3529			return -EINTR;
3530
3531		progress = try_to_free_mem_cgroup_pages(memcg, 1,
3532							GFP_KERNEL, true);
3533		if (!progress) {
3534			nr_retries--;
3535			/* maybe some writeback is necessary */
3536			congestion_wait(BLK_RW_ASYNC, HZ/10);
3537		}
3538
3539	}
3540
3541	return 0;
3542}
3543
3544static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of,
3545					    char *buf, size_t nbytes,
3546					    loff_t off)
3547{
3548	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3549
3550	if (mem_cgroup_is_root(memcg))
3551		return -EINVAL;
3552	return mem_cgroup_force_empty(memcg) ?: nbytes;
3553}
3554
3555static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css,
3556				     struct cftype *cft)
3557{
3558	return 1;
3559}
3560
3561static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
3562				      struct cftype *cft, u64 val)
3563{
3564	if (val == 1)
3565		return 0;
3566
3567	pr_warn_once("Non-hierarchical mode is deprecated. "
3568		     "Please report your usecase to linux-mm@kvack.org if you "
3569		     "depend on this functionality.\n");
3570
3571	return -EINVAL;
3572}
3573
3574static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
3575{
3576	unsigned long val;
3577
3578	if (mem_cgroup_is_root(memcg)) {
3579		/* mem_cgroup_threshold() calls here from irqsafe context */
3580		cgroup_rstat_flush_irqsafe(memcg->css.cgroup);
3581		val = memcg_page_state(memcg, NR_FILE_PAGES) +
3582			memcg_page_state(memcg, NR_ANON_MAPPED);
 
 
3583		if (swap)
3584			val += memcg_page_state(memcg, MEMCG_SWAP);
3585	} else {
3586		if (!swap)
3587			val = page_counter_read(&memcg->memory);
3588		else
3589			val = page_counter_read(&memcg->memsw);
3590	}
3591	return val;
3592}
3593
3594enum {
3595	RES_USAGE,
3596	RES_LIMIT,
3597	RES_MAX_USAGE,
3598	RES_FAILCNT,
3599	RES_SOFT_LIMIT,
3600};
3601
3602static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
3603			       struct cftype *cft)
3604{
3605	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3606	struct page_counter *counter;
3607
3608	switch (MEMFILE_TYPE(cft->private)) {
3609	case _MEM:
3610		counter = &memcg->memory;
3611		break;
3612	case _MEMSWAP:
3613		counter = &memcg->memsw;
3614		break;
3615	case _KMEM:
3616		counter = &memcg->kmem;
3617		break;
3618	case _TCP:
3619		counter = &memcg->tcpmem;
3620		break;
3621	default:
3622		BUG();
3623	}
3624
3625	switch (MEMFILE_ATTR(cft->private)) {
3626	case RES_USAGE:
3627		if (counter == &memcg->memory)
3628			return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE;
3629		if (counter == &memcg->memsw)
3630			return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE;
3631		return (u64)page_counter_read(counter) * PAGE_SIZE;
3632	case RES_LIMIT:
3633		return (u64)counter->max * PAGE_SIZE;
3634	case RES_MAX_USAGE:
3635		return (u64)counter->watermark * PAGE_SIZE;
3636	case RES_FAILCNT:
3637		return counter->failcnt;
3638	case RES_SOFT_LIMIT:
3639		return (u64)memcg->soft_limit * PAGE_SIZE;
3640	default:
3641		BUG();
3642	}
3643}
3644
 
 
 
 
 
 
 
 
 
 
3645#ifdef CONFIG_MEMCG_KMEM
3646static int memcg_online_kmem(struct mem_cgroup *memcg)
3647{
3648	struct obj_cgroup *objcg;
3649	int memcg_id;
3650
3651	if (cgroup_memory_nokmem)
3652		return 0;
3653
3654	BUG_ON(memcg->kmemcg_id >= 0);
3655	BUG_ON(memcg->kmem_state);
3656
3657	memcg_id = memcg_alloc_cache_id();
3658	if (memcg_id < 0)
3659		return memcg_id;
3660
3661	objcg = obj_cgroup_alloc();
3662	if (!objcg) {
3663		memcg_free_cache_id(memcg_id);
3664		return -ENOMEM;
3665	}
3666	objcg->memcg = memcg;
3667	rcu_assign_pointer(memcg->objcg, objcg);
 
 
3668
3669	static_branch_enable(&memcg_kmem_enabled_key);
3670
3671	memcg->kmemcg_id = memcg_id;
3672	memcg->kmem_state = KMEM_ONLINE;
3673
3674	return 0;
3675}
3676
3677static void memcg_offline_kmem(struct mem_cgroup *memcg)
3678{
3679	struct cgroup_subsys_state *css;
3680	struct mem_cgroup *parent, *child;
3681	int kmemcg_id;
3682
3683	if (memcg->kmem_state != KMEM_ONLINE)
3684		return;
3685
3686	memcg->kmem_state = KMEM_ALLOCATED;
 
3687
3688	parent = parent_mem_cgroup(memcg);
3689	if (!parent)
3690		parent = root_mem_cgroup;
3691
3692	memcg_reparent_objcgs(memcg, parent);
3693
3694	kmemcg_id = memcg->kmemcg_id;
3695	BUG_ON(kmemcg_id < 0);
3696
3697	/*
3698	 * Change kmemcg_id of this cgroup and all its descendants to the
3699	 * parent's id, and then move all entries from this cgroup's list_lrus
3700	 * to ones of the parent. After we have finished, all list_lrus
3701	 * corresponding to this cgroup are guaranteed to remain empty. The
3702	 * ordering is imposed by list_lru_node->lock taken by
3703	 * memcg_drain_all_list_lrus().
3704	 */
3705	rcu_read_lock(); /* can be called from css_free w/o cgroup_mutex */
3706	css_for_each_descendant_pre(css, &memcg->css) {
3707		child = mem_cgroup_from_css(css);
3708		BUG_ON(child->kmemcg_id != kmemcg_id);
3709		child->kmemcg_id = parent->kmemcg_id;
3710	}
3711	rcu_read_unlock();
3712
3713	memcg_drain_all_list_lrus(kmemcg_id, parent);
3714
3715	memcg_free_cache_id(kmemcg_id);
3716}
3717
3718static void memcg_free_kmem(struct mem_cgroup *memcg)
3719{
3720	/* css_alloc() failed, offlining didn't happen */
3721	if (unlikely(memcg->kmem_state == KMEM_ONLINE))
3722		memcg_offline_kmem(memcg);
3723}
3724#else
3725static int memcg_online_kmem(struct mem_cgroup *memcg)
3726{
3727	return 0;
3728}
3729static void memcg_offline_kmem(struct mem_cgroup *memcg)
3730{
3731}
3732static void memcg_free_kmem(struct mem_cgroup *memcg)
3733{
3734}
3735#endif /* CONFIG_MEMCG_KMEM */
3736
3737static int memcg_update_kmem_max(struct mem_cgroup *memcg,
3738				 unsigned long max)
3739{
3740	int ret;
3741
3742	mutex_lock(&memcg_max_mutex);
3743	ret = page_counter_set_max(&memcg->kmem, max);
3744	mutex_unlock(&memcg_max_mutex);
3745	return ret;
3746}
3747
3748static int memcg_update_tcp_max(struct mem_cgroup *memcg, unsigned long max)
3749{
3750	int ret;
3751
3752	mutex_lock(&memcg_max_mutex);
3753
3754	ret = page_counter_set_max(&memcg->tcpmem, max);
3755	if (ret)
3756		goto out;
3757
3758	if (!memcg->tcpmem_active) {
3759		/*
3760		 * The active flag needs to be written after the static_key
3761		 * update. This is what guarantees that the socket activation
3762		 * function is the last one to run. See mem_cgroup_sk_alloc()
3763		 * for details, and note that we don't mark any socket as
3764		 * belonging to this memcg until that flag is up.
3765		 *
3766		 * We need to do this, because static_keys will span multiple
3767		 * sites, but we can't control their order. If we mark a socket
3768		 * as accounted, but the accounting functions are not patched in
3769		 * yet, we'll lose accounting.
3770		 *
3771		 * We never race with the readers in mem_cgroup_sk_alloc(),
3772		 * because when this value change, the code to process it is not
3773		 * patched in yet.
3774		 */
3775		static_branch_inc(&memcg_sockets_enabled_key);
3776		memcg->tcpmem_active = true;
3777	}
3778out:
3779	mutex_unlock(&memcg_max_mutex);
3780	return ret;
3781}
3782
3783/*
3784 * The user of this function is...
3785 * RES_LIMIT.
3786 */
3787static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
3788				char *buf, size_t nbytes, loff_t off)
3789{
3790	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3791	unsigned long nr_pages;
3792	int ret;
3793
3794	buf = strstrip(buf);
3795	ret = page_counter_memparse(buf, "-1", &nr_pages);
3796	if (ret)
3797		return ret;
3798
3799	switch (MEMFILE_ATTR(of_cft(of)->private)) {
3800	case RES_LIMIT:
3801		if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
3802			ret = -EINVAL;
3803			break;
3804		}
3805		switch (MEMFILE_TYPE(of_cft(of)->private)) {
3806		case _MEM:
3807			ret = mem_cgroup_resize_max(memcg, nr_pages, false);
3808			break;
3809		case _MEMSWAP:
3810			ret = mem_cgroup_resize_max(memcg, nr_pages, true);
3811			break;
3812		case _KMEM:
3813			pr_warn_once("kmem.limit_in_bytes is deprecated and will be removed. "
 
3814				     "Please report your usecase to linux-mm@kvack.org if you "
3815				     "depend on this functionality.\n");
3816			ret = memcg_update_kmem_max(memcg, nr_pages);
3817			break;
3818		case _TCP:
3819			ret = memcg_update_tcp_max(memcg, nr_pages);
3820			break;
3821		}
3822		break;
3823	case RES_SOFT_LIMIT:
3824		memcg->soft_limit = nr_pages;
3825		ret = 0;
 
 
 
 
3826		break;
3827	}
3828	return ret ?: nbytes;
3829}
3830
3831static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf,
3832				size_t nbytes, loff_t off)
3833{
3834	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3835	struct page_counter *counter;
3836
3837	switch (MEMFILE_TYPE(of_cft(of)->private)) {
3838	case _MEM:
3839		counter = &memcg->memory;
3840		break;
3841	case _MEMSWAP:
3842		counter = &memcg->memsw;
3843		break;
3844	case _KMEM:
3845		counter = &memcg->kmem;
3846		break;
3847	case _TCP:
3848		counter = &memcg->tcpmem;
3849		break;
3850	default:
3851		BUG();
3852	}
3853
3854	switch (MEMFILE_ATTR(of_cft(of)->private)) {
3855	case RES_MAX_USAGE:
3856		page_counter_reset_watermark(counter);
3857		break;
3858	case RES_FAILCNT:
3859		counter->failcnt = 0;
3860		break;
3861	default:
3862		BUG();
3863	}
3864
3865	return nbytes;
3866}
3867
3868static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css,
3869					struct cftype *cft)
3870{
3871	return mem_cgroup_from_css(css)->move_charge_at_immigrate;
3872}
3873
3874#ifdef CONFIG_MMU
3875static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3876					struct cftype *cft, u64 val)
3877{
3878	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3879
 
 
 
 
3880	if (val & ~MOVE_MASK)
3881		return -EINVAL;
3882
3883	/*
3884	 * No kind of locking is needed in here, because ->can_attach() will
3885	 * check this value once in the beginning of the process, and then carry
3886	 * on with stale data. This means that changes to this value will only
3887	 * affect task migrations starting after the change.
3888	 */
3889	memcg->move_charge_at_immigrate = val;
3890	return 0;
3891}
3892#else
3893static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3894					struct cftype *cft, u64 val)
3895{
3896	return -ENOSYS;
3897}
3898#endif
3899
3900#ifdef CONFIG_NUMA
3901
3902#define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE))
3903#define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
3904#define LRU_ALL	     ((1 << NR_LRU_LISTS) - 1)
3905
3906static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
3907				int nid, unsigned int lru_mask, bool tree)
3908{
3909	struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
3910	unsigned long nr = 0;
3911	enum lru_list lru;
3912
3913	VM_BUG_ON((unsigned)nid >= nr_node_ids);
3914
3915	for_each_lru(lru) {
3916		if (!(BIT(lru) & lru_mask))
3917			continue;
3918		if (tree)
3919			nr += lruvec_page_state(lruvec, NR_LRU_BASE + lru);
3920		else
3921			nr += lruvec_page_state_local(lruvec, NR_LRU_BASE + lru);
3922	}
3923	return nr;
3924}
3925
3926static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
3927					     unsigned int lru_mask,
3928					     bool tree)
3929{
3930	unsigned long nr = 0;
3931	enum lru_list lru;
3932
3933	for_each_lru(lru) {
3934		if (!(BIT(lru) & lru_mask))
3935			continue;
3936		if (tree)
3937			nr += memcg_page_state(memcg, NR_LRU_BASE + lru);
3938		else
3939			nr += memcg_page_state_local(memcg, NR_LRU_BASE + lru);
3940	}
3941	return nr;
3942}
3943
3944static int memcg_numa_stat_show(struct seq_file *m, void *v)
3945{
3946	struct numa_stat {
3947		const char *name;
3948		unsigned int lru_mask;
3949	};
3950
3951	static const struct numa_stat stats[] = {
3952		{ "total", LRU_ALL },
3953		{ "file", LRU_ALL_FILE },
3954		{ "anon", LRU_ALL_ANON },
3955		{ "unevictable", BIT(LRU_UNEVICTABLE) },
3956	};
3957	const struct numa_stat *stat;
3958	int nid;
3959	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
3960
3961	cgroup_rstat_flush(memcg->css.cgroup);
3962
3963	for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3964		seq_printf(m, "%s=%lu", stat->name,
3965			   mem_cgroup_nr_lru_pages(memcg, stat->lru_mask,
3966						   false));
3967		for_each_node_state(nid, N_MEMORY)
3968			seq_printf(m, " N%d=%lu", nid,
3969				   mem_cgroup_node_nr_lru_pages(memcg, nid,
3970							stat->lru_mask, false));
3971		seq_putc(m, '\n');
3972	}
3973
3974	for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3975
3976		seq_printf(m, "hierarchical_%s=%lu", stat->name,
3977			   mem_cgroup_nr_lru_pages(memcg, stat->lru_mask,
3978						   true));
3979		for_each_node_state(nid, N_MEMORY)
3980			seq_printf(m, " N%d=%lu", nid,
3981				   mem_cgroup_node_nr_lru_pages(memcg, nid,
3982							stat->lru_mask, true));
3983		seq_putc(m, '\n');
3984	}
3985
3986	return 0;
3987}
3988#endif /* CONFIG_NUMA */
3989
3990static const unsigned int memcg1_stats[] = {
3991	NR_FILE_PAGES,
3992	NR_ANON_MAPPED,
3993#ifdef CONFIG_TRANSPARENT_HUGEPAGE
3994	NR_ANON_THPS,
3995#endif
3996	NR_SHMEM,
3997	NR_FILE_MAPPED,
3998	NR_FILE_DIRTY,
3999	NR_WRITEBACK,
 
 
 
4000	MEMCG_SWAP,
 
 
4001};
4002
4003static const char *const memcg1_stat_names[] = {
4004	"cache",
4005	"rss",
4006#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4007	"rss_huge",
4008#endif
4009	"shmem",
4010	"mapped_file",
4011	"dirty",
4012	"writeback",
 
 
 
4013	"swap",
 
 
4014};
4015
4016/* Universal VM events cgroup1 shows, original sort order */
4017static const unsigned int memcg1_events[] = {
4018	PGPGIN,
4019	PGPGOUT,
4020	PGFAULT,
4021	PGMAJFAULT,
4022};
4023
4024static int memcg_stat_show(struct seq_file *m, void *v)
4025{
4026	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4027	unsigned long memory, memsw;
4028	struct mem_cgroup *mi;
4029	unsigned int i;
4030
4031	BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names) != ARRAY_SIZE(memcg1_stats));
4032
4033	cgroup_rstat_flush(memcg->css.cgroup);
4034
4035	for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
4036		unsigned long nr;
4037
4038		if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
4039			continue;
4040		nr = memcg_page_state_local(memcg, memcg1_stats[i]);
4041		seq_printf(m, "%s %lu\n", memcg1_stat_names[i], nr * PAGE_SIZE);
4042	}
4043
4044	for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
4045		seq_printf(m, "%s %lu\n", vm_event_name(memcg1_events[i]),
4046			   memcg_events_local(memcg, memcg1_events[i]));
4047
4048	for (i = 0; i < NR_LRU_LISTS; i++)
4049		seq_printf(m, "%s %lu\n", lru_list_name(i),
4050			   memcg_page_state_local(memcg, NR_LRU_BASE + i) *
4051			   PAGE_SIZE);
4052
4053	/* Hierarchical information */
4054	memory = memsw = PAGE_COUNTER_MAX;
4055	for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) {
4056		memory = min(memory, READ_ONCE(mi->memory.max));
4057		memsw = min(memsw, READ_ONCE(mi->memsw.max));
4058	}
4059	seq_printf(m, "hierarchical_memory_limit %llu\n",
4060		   (u64)memory * PAGE_SIZE);
4061	if (do_memsw_account())
4062		seq_printf(m, "hierarchical_memsw_limit %llu\n",
4063			   (u64)memsw * PAGE_SIZE);
4064
4065	for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
4066		unsigned long nr;
4067
4068		if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
4069			continue;
4070		nr = memcg_page_state(memcg, memcg1_stats[i]);
4071		seq_printf(m, "total_%s %llu\n", memcg1_stat_names[i],
4072						(u64)nr * PAGE_SIZE);
4073	}
4074
4075	for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
4076		seq_printf(m, "total_%s %llu\n",
4077			   vm_event_name(memcg1_events[i]),
4078			   (u64)memcg_events(memcg, memcg1_events[i]));
4079
4080	for (i = 0; i < NR_LRU_LISTS; i++)
4081		seq_printf(m, "total_%s %llu\n", lru_list_name(i),
4082			   (u64)memcg_page_state(memcg, NR_LRU_BASE + i) *
4083			   PAGE_SIZE);
4084
4085#ifdef CONFIG_DEBUG_VM
4086	{
4087		pg_data_t *pgdat;
4088		struct mem_cgroup_per_node *mz;
4089		unsigned long anon_cost = 0;
4090		unsigned long file_cost = 0;
4091
4092		for_each_online_pgdat(pgdat) {
4093			mz = memcg->nodeinfo[pgdat->node_id];
4094
4095			anon_cost += mz->lruvec.anon_cost;
4096			file_cost += mz->lruvec.file_cost;
4097		}
4098		seq_printf(m, "anon_cost %lu\n", anon_cost);
4099		seq_printf(m, "file_cost %lu\n", file_cost);
4100	}
4101#endif
4102
4103	return 0;
4104}
4105
4106static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css,
4107				      struct cftype *cft)
4108{
4109	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4110
4111	return mem_cgroup_swappiness(memcg);
4112}
4113
4114static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
4115				       struct cftype *cft, u64 val)
4116{
4117	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4118
4119	if (val > 100)
4120		return -EINVAL;
4121
4122	if (!mem_cgroup_is_root(memcg))
4123		memcg->swappiness = val;
4124	else
4125		vm_swappiness = val;
4126
4127	return 0;
4128}
4129
4130static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
4131{
4132	struct mem_cgroup_threshold_ary *t;
4133	unsigned long usage;
4134	int i;
4135
4136	rcu_read_lock();
4137	if (!swap)
4138		t = rcu_dereference(memcg->thresholds.primary);
4139	else
4140		t = rcu_dereference(memcg->memsw_thresholds.primary);
4141
4142	if (!t)
4143		goto unlock;
4144
4145	usage = mem_cgroup_usage(memcg, swap);
4146
4147	/*
4148	 * current_threshold points to threshold just below or equal to usage.
4149	 * If it's not true, a threshold was crossed after last
4150	 * call of __mem_cgroup_threshold().
4151	 */
4152	i = t->current_threshold;
4153
4154	/*
4155	 * Iterate backward over array of thresholds starting from
4156	 * current_threshold and check if a threshold is crossed.
4157	 * If none of thresholds below usage is crossed, we read
4158	 * only one element of the array here.
4159	 */
4160	for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
4161		eventfd_signal(t->entries[i].eventfd, 1);
4162
4163	/* i = current_threshold + 1 */
4164	i++;
4165
4166	/*
4167	 * Iterate forward over array of thresholds starting from
4168	 * current_threshold+1 and check if a threshold is crossed.
4169	 * If none of thresholds above usage is crossed, we read
4170	 * only one element of the array here.
4171	 */
4172	for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
4173		eventfd_signal(t->entries[i].eventfd, 1);
4174
4175	/* Update current_threshold */
4176	t->current_threshold = i - 1;
4177unlock:
4178	rcu_read_unlock();
4179}
4180
4181static void mem_cgroup_threshold(struct mem_cgroup *memcg)
4182{
4183	while (memcg) {
4184		__mem_cgroup_threshold(memcg, false);
4185		if (do_memsw_account())
4186			__mem_cgroup_threshold(memcg, true);
4187
4188		memcg = parent_mem_cgroup(memcg);
4189	}
4190}
4191
4192static int compare_thresholds(const void *a, const void *b)
4193{
4194	const struct mem_cgroup_threshold *_a = a;
4195	const struct mem_cgroup_threshold *_b = b;
4196
4197	if (_a->threshold > _b->threshold)
4198		return 1;
4199
4200	if (_a->threshold < _b->threshold)
4201		return -1;
4202
4203	return 0;
4204}
4205
4206static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
4207{
4208	struct mem_cgroup_eventfd_list *ev;
4209
4210	spin_lock(&memcg_oom_lock);
4211
4212	list_for_each_entry(ev, &memcg->oom_notify, list)
4213		eventfd_signal(ev->eventfd, 1);
4214
4215	spin_unlock(&memcg_oom_lock);
4216	return 0;
4217}
4218
4219static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
4220{
4221	struct mem_cgroup *iter;
4222
4223	for_each_mem_cgroup_tree(iter, memcg)
4224		mem_cgroup_oom_notify_cb(iter);
4225}
4226
4227static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
4228	struct eventfd_ctx *eventfd, const char *args, enum res_type type)
4229{
4230	struct mem_cgroup_thresholds *thresholds;
4231	struct mem_cgroup_threshold_ary *new;
4232	unsigned long threshold;
4233	unsigned long usage;
4234	int i, size, ret;
4235
4236	ret = page_counter_memparse(args, "-1", &threshold);
4237	if (ret)
4238		return ret;
4239
4240	mutex_lock(&memcg->thresholds_lock);
4241
4242	if (type == _MEM) {
4243		thresholds = &memcg->thresholds;
4244		usage = mem_cgroup_usage(memcg, false);
4245	} else if (type == _MEMSWAP) {
4246		thresholds = &memcg->memsw_thresholds;
4247		usage = mem_cgroup_usage(memcg, true);
4248	} else
4249		BUG();
4250
4251	/* Check if a threshold crossed before adding a new one */
4252	if (thresholds->primary)
4253		__mem_cgroup_threshold(memcg, type == _MEMSWAP);
4254
4255	size = thresholds->primary ? thresholds->primary->size + 1 : 1;
4256
4257	/* Allocate memory for new array of thresholds */
4258	new = kmalloc(struct_size(new, entries, size), GFP_KERNEL);
4259	if (!new) {
4260		ret = -ENOMEM;
4261		goto unlock;
4262	}
4263	new->size = size;
4264
4265	/* Copy thresholds (if any) to new array */
4266	if (thresholds->primary)
4267		memcpy(new->entries, thresholds->primary->entries,
4268		       flex_array_size(new, entries, size - 1));
4269
4270	/* Add new threshold */
4271	new->entries[size - 1].eventfd = eventfd;
4272	new->entries[size - 1].threshold = threshold;
4273
4274	/* Sort thresholds. Registering of new threshold isn't time-critical */
4275	sort(new->entries, size, sizeof(*new->entries),
4276			compare_thresholds, NULL);
4277
4278	/* Find current threshold */
4279	new->current_threshold = -1;
4280	for (i = 0; i < size; i++) {
4281		if (new->entries[i].threshold <= usage) {
4282			/*
4283			 * new->current_threshold will not be used until
4284			 * rcu_assign_pointer(), so it's safe to increment
4285			 * it here.
4286			 */
4287			++new->current_threshold;
4288		} else
4289			break;
4290	}
4291
4292	/* Free old spare buffer and save old primary buffer as spare */
4293	kfree(thresholds->spare);
4294	thresholds->spare = thresholds->primary;
4295
4296	rcu_assign_pointer(thresholds->primary, new);
4297
4298	/* To be sure that nobody uses thresholds */
4299	synchronize_rcu();
4300
4301unlock:
4302	mutex_unlock(&memcg->thresholds_lock);
4303
4304	return ret;
4305}
4306
4307static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
4308	struct eventfd_ctx *eventfd, const char *args)
4309{
4310	return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM);
4311}
4312
4313static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg,
4314	struct eventfd_ctx *eventfd, const char *args)
4315{
4316	return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP);
4317}
4318
4319static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4320	struct eventfd_ctx *eventfd, enum res_type type)
4321{
4322	struct mem_cgroup_thresholds *thresholds;
4323	struct mem_cgroup_threshold_ary *new;
4324	unsigned long usage;
4325	int i, j, size, entries;
4326
4327	mutex_lock(&memcg->thresholds_lock);
4328
4329	if (type == _MEM) {
4330		thresholds = &memcg->thresholds;
4331		usage = mem_cgroup_usage(memcg, false);
4332	} else if (type == _MEMSWAP) {
4333		thresholds = &memcg->memsw_thresholds;
4334		usage = mem_cgroup_usage(memcg, true);
4335	} else
4336		BUG();
4337
4338	if (!thresholds->primary)
4339		goto unlock;
4340
4341	/* Check if a threshold crossed before removing */
4342	__mem_cgroup_threshold(memcg, type == _MEMSWAP);
4343
4344	/* Calculate new number of threshold */
4345	size = entries = 0;
4346	for (i = 0; i < thresholds->primary->size; i++) {
4347		if (thresholds->primary->entries[i].eventfd != eventfd)
4348			size++;
4349		else
4350			entries++;
4351	}
4352
4353	new = thresholds->spare;
4354
4355	/* If no items related to eventfd have been cleared, nothing to do */
4356	if (!entries)
4357		goto unlock;
4358
4359	/* Set thresholds array to NULL if we don't have thresholds */
4360	if (!size) {
4361		kfree(new);
4362		new = NULL;
4363		goto swap_buffers;
4364	}
4365
4366	new->size = size;
4367
4368	/* Copy thresholds and find current threshold */
4369	new->current_threshold = -1;
4370	for (i = 0, j = 0; i < thresholds->primary->size; i++) {
4371		if (thresholds->primary->entries[i].eventfd == eventfd)
4372			continue;
4373
4374		new->entries[j] = thresholds->primary->entries[i];
4375		if (new->entries[j].threshold <= usage) {
4376			/*
4377			 * new->current_threshold will not be used
4378			 * until rcu_assign_pointer(), so it's safe to increment
4379			 * it here.
4380			 */
4381			++new->current_threshold;
4382		}
4383		j++;
4384	}
4385
4386swap_buffers:
4387	/* Swap primary and spare array */
4388	thresholds->spare = thresholds->primary;
4389
4390	rcu_assign_pointer(thresholds->primary, new);
4391
4392	/* To be sure that nobody uses thresholds */
4393	synchronize_rcu();
4394
4395	/* If all events are unregistered, free the spare array */
4396	if (!new) {
4397		kfree(thresholds->spare);
4398		thresholds->spare = NULL;
4399	}
4400unlock:
4401	mutex_unlock(&memcg->thresholds_lock);
4402}
4403
4404static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4405	struct eventfd_ctx *eventfd)
4406{
4407	return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM);
4408}
4409
4410static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4411	struct eventfd_ctx *eventfd)
4412{
4413	return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP);
4414}
4415
4416static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg,
4417	struct eventfd_ctx *eventfd, const char *args)
4418{
4419	struct mem_cgroup_eventfd_list *event;
4420
4421	event = kmalloc(sizeof(*event),	GFP_KERNEL);
4422	if (!event)
4423		return -ENOMEM;
4424
4425	spin_lock(&memcg_oom_lock);
4426
4427	event->eventfd = eventfd;
4428	list_add(&event->list, &memcg->oom_notify);
4429
4430	/* already in OOM ? */
4431	if (memcg->under_oom)
4432		eventfd_signal(eventfd, 1);
4433	spin_unlock(&memcg_oom_lock);
4434
4435	return 0;
4436}
4437
4438static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg,
4439	struct eventfd_ctx *eventfd)
4440{
4441	struct mem_cgroup_eventfd_list *ev, *tmp;
4442
4443	spin_lock(&memcg_oom_lock);
4444
4445	list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
4446		if (ev->eventfd == eventfd) {
4447			list_del(&ev->list);
4448			kfree(ev);
4449		}
4450	}
4451
4452	spin_unlock(&memcg_oom_lock);
4453}
4454
4455static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v)
4456{
4457	struct mem_cgroup *memcg = mem_cgroup_from_seq(sf);
4458
4459	seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable);
4460	seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom);
4461	seq_printf(sf, "oom_kill %lu\n",
4462		   atomic_long_read(&memcg->memory_events[MEMCG_OOM_KILL]));
4463	return 0;
4464}
4465
4466static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
4467	struct cftype *cft, u64 val)
4468{
4469	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4470
4471	/* cannot set to root cgroup and only 0 and 1 are allowed */
4472	if (mem_cgroup_is_root(memcg) || !((val == 0) || (val == 1)))
4473		return -EINVAL;
4474
4475	memcg->oom_kill_disable = val;
4476	if (!val)
4477		memcg_oom_recover(memcg);
4478
4479	return 0;
4480}
4481
4482#ifdef CONFIG_CGROUP_WRITEBACK
4483
4484#include <trace/events/writeback.h>
4485
4486static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
4487{
4488	return wb_domain_init(&memcg->cgwb_domain, gfp);
4489}
4490
4491static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
4492{
4493	wb_domain_exit(&memcg->cgwb_domain);
4494}
4495
4496static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
4497{
4498	wb_domain_size_changed(&memcg->cgwb_domain);
4499}
4500
4501struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
4502{
4503	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4504
4505	if (!memcg->css.parent)
4506		return NULL;
4507
4508	return &memcg->cgwb_domain;
4509}
4510
4511/**
4512 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
4513 * @wb: bdi_writeback in question
4514 * @pfilepages: out parameter for number of file pages
4515 * @pheadroom: out parameter for number of allocatable pages according to memcg
4516 * @pdirty: out parameter for number of dirty pages
4517 * @pwriteback: out parameter for number of pages under writeback
4518 *
4519 * Determine the numbers of file, headroom, dirty, and writeback pages in
4520 * @wb's memcg.  File, dirty and writeback are self-explanatory.  Headroom
4521 * is a bit more involved.
4522 *
4523 * A memcg's headroom is "min(max, high) - used".  In the hierarchy, the
4524 * headroom is calculated as the lowest headroom of itself and the
4525 * ancestors.  Note that this doesn't consider the actual amount of
4526 * available memory in the system.  The caller should further cap
4527 * *@pheadroom accordingly.
4528 */
4529void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
4530			 unsigned long *pheadroom, unsigned long *pdirty,
4531			 unsigned long *pwriteback)
4532{
4533	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4534	struct mem_cgroup *parent;
4535
4536	cgroup_rstat_flush_irqsafe(memcg->css.cgroup);
4537
4538	*pdirty = memcg_page_state(memcg, NR_FILE_DIRTY);
4539	*pwriteback = memcg_page_state(memcg, NR_WRITEBACK);
4540	*pfilepages = memcg_page_state(memcg, NR_INACTIVE_FILE) +
4541			memcg_page_state(memcg, NR_ACTIVE_FILE);
4542
4543	*pheadroom = PAGE_COUNTER_MAX;
4544	while ((parent = parent_mem_cgroup(memcg))) {
4545		unsigned long ceiling = min(READ_ONCE(memcg->memory.max),
4546					    READ_ONCE(memcg->memory.high));
4547		unsigned long used = page_counter_read(&memcg->memory);
4548
4549		*pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
4550		memcg = parent;
4551	}
4552}
4553
4554/*
4555 * Foreign dirty flushing
4556 *
4557 * There's an inherent mismatch between memcg and writeback.  The former
4558 * tracks ownership per-page while the latter per-inode.  This was a
4559 * deliberate design decision because honoring per-page ownership in the
4560 * writeback path is complicated, may lead to higher CPU and IO overheads
4561 * and deemed unnecessary given that write-sharing an inode across
4562 * different cgroups isn't a common use-case.
4563 *
4564 * Combined with inode majority-writer ownership switching, this works well
4565 * enough in most cases but there are some pathological cases.  For
4566 * example, let's say there are two cgroups A and B which keep writing to
4567 * different but confined parts of the same inode.  B owns the inode and
4568 * A's memory is limited far below B's.  A's dirty ratio can rise enough to
4569 * trigger balance_dirty_pages() sleeps but B's can be low enough to avoid
4570 * triggering background writeback.  A will be slowed down without a way to
4571 * make writeback of the dirty pages happen.
4572 *
4573 * Conditions like the above can lead to a cgroup getting repeatedly and
4574 * severely throttled after making some progress after each
4575 * dirty_expire_interval while the underlying IO device is almost
4576 * completely idle.
4577 *
4578 * Solving this problem completely requires matching the ownership tracking
4579 * granularities between memcg and writeback in either direction.  However,
4580 * the more egregious behaviors can be avoided by simply remembering the
4581 * most recent foreign dirtying events and initiating remote flushes on
4582 * them when local writeback isn't enough to keep the memory clean enough.
4583 *
4584 * The following two functions implement such mechanism.  When a foreign
4585 * page - a page whose memcg and writeback ownerships don't match - is
4586 * dirtied, mem_cgroup_track_foreign_dirty() records the inode owning
4587 * bdi_writeback on the page owning memcg.  When balance_dirty_pages()
4588 * decides that the memcg needs to sleep due to high dirty ratio, it calls
4589 * mem_cgroup_flush_foreign() which queues writeback on the recorded
4590 * foreign bdi_writebacks which haven't expired.  Both the numbers of
4591 * recorded bdi_writebacks and concurrent in-flight foreign writebacks are
4592 * limited to MEMCG_CGWB_FRN_CNT.
4593 *
4594 * The mechanism only remembers IDs and doesn't hold any object references.
4595 * As being wrong occasionally doesn't matter, updates and accesses to the
4596 * records are lockless and racy.
4597 */
4598void mem_cgroup_track_foreign_dirty_slowpath(struct page *page,
4599					     struct bdi_writeback *wb)
4600{
4601	struct mem_cgroup *memcg = page_memcg(page);
4602	struct memcg_cgwb_frn *frn;
4603	u64 now = get_jiffies_64();
4604	u64 oldest_at = now;
4605	int oldest = -1;
4606	int i;
4607
4608	trace_track_foreign_dirty(page, wb);
4609
4610	/*
4611	 * Pick the slot to use.  If there is already a slot for @wb, keep
4612	 * using it.  If not replace the oldest one which isn't being
4613	 * written out.
4614	 */
4615	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
4616		frn = &memcg->cgwb_frn[i];
4617		if (frn->bdi_id == wb->bdi->id &&
4618		    frn->memcg_id == wb->memcg_css->id)
4619			break;
4620		if (time_before64(frn->at, oldest_at) &&
4621		    atomic_read(&frn->done.cnt) == 1) {
4622			oldest = i;
4623			oldest_at = frn->at;
4624		}
4625	}
4626
4627	if (i < MEMCG_CGWB_FRN_CNT) {
4628		/*
4629		 * Re-using an existing one.  Update timestamp lazily to
4630		 * avoid making the cacheline hot.  We want them to be
4631		 * reasonably up-to-date and significantly shorter than
4632		 * dirty_expire_interval as that's what expires the record.
4633		 * Use the shorter of 1s and dirty_expire_interval / 8.
4634		 */
4635		unsigned long update_intv =
4636			min_t(unsigned long, HZ,
4637			      msecs_to_jiffies(dirty_expire_interval * 10) / 8);
4638
4639		if (time_before64(frn->at, now - update_intv))
4640			frn->at = now;
4641	} else if (oldest >= 0) {
4642		/* replace the oldest free one */
4643		frn = &memcg->cgwb_frn[oldest];
4644		frn->bdi_id = wb->bdi->id;
4645		frn->memcg_id = wb->memcg_css->id;
4646		frn->at = now;
4647	}
4648}
4649
4650/* issue foreign writeback flushes for recorded foreign dirtying events */
4651void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
4652{
4653	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4654	unsigned long intv = msecs_to_jiffies(dirty_expire_interval * 10);
4655	u64 now = jiffies_64;
4656	int i;
4657
4658	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
4659		struct memcg_cgwb_frn *frn = &memcg->cgwb_frn[i];
4660
4661		/*
4662		 * If the record is older than dirty_expire_interval,
4663		 * writeback on it has already started.  No need to kick it
4664		 * off again.  Also, don't start a new one if there's
4665		 * already one in flight.
4666		 */
4667		if (time_after64(frn->at, now - intv) &&
4668		    atomic_read(&frn->done.cnt) == 1) {
4669			frn->at = 0;
4670			trace_flush_foreign(wb, frn->bdi_id, frn->memcg_id);
4671			cgroup_writeback_by_id(frn->bdi_id, frn->memcg_id, 0,
4672					       WB_REASON_FOREIGN_FLUSH,
4673					       &frn->done);
4674		}
4675	}
4676}
4677
4678#else	/* CONFIG_CGROUP_WRITEBACK */
4679
4680static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
4681{
4682	return 0;
4683}
4684
4685static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
4686{
4687}
4688
4689static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
4690{
4691}
4692
4693#endif	/* CONFIG_CGROUP_WRITEBACK */
4694
4695/*
4696 * DO NOT USE IN NEW FILES.
4697 *
4698 * "cgroup.event_control" implementation.
4699 *
4700 * This is way over-engineered.  It tries to support fully configurable
4701 * events for each user.  Such level of flexibility is completely
4702 * unnecessary especially in the light of the planned unified hierarchy.
4703 *
4704 * Please deprecate this and replace with something simpler if at all
4705 * possible.
4706 */
4707
4708/*
4709 * Unregister event and free resources.
4710 *
4711 * Gets called from workqueue.
4712 */
4713static void memcg_event_remove(struct work_struct *work)
4714{
4715	struct mem_cgroup_event *event =
4716		container_of(work, struct mem_cgroup_event, remove);
4717	struct mem_cgroup *memcg = event->memcg;
4718
4719	remove_wait_queue(event->wqh, &event->wait);
4720
4721	event->unregister_event(memcg, event->eventfd);
4722
4723	/* Notify userspace the event is going away. */
4724	eventfd_signal(event->eventfd, 1);
4725
4726	eventfd_ctx_put(event->eventfd);
4727	kfree(event);
4728	css_put(&memcg->css);
4729}
4730
4731/*
4732 * Gets called on EPOLLHUP on eventfd when user closes it.
4733 *
4734 * Called with wqh->lock held and interrupts disabled.
4735 */
4736static int memcg_event_wake(wait_queue_entry_t *wait, unsigned mode,
4737			    int sync, void *key)
4738{
4739	struct mem_cgroup_event *event =
4740		container_of(wait, struct mem_cgroup_event, wait);
4741	struct mem_cgroup *memcg = event->memcg;
4742	__poll_t flags = key_to_poll(key);
4743
4744	if (flags & EPOLLHUP) {
4745		/*
4746		 * If the event has been detached at cgroup removal, we
4747		 * can simply return knowing the other side will cleanup
4748		 * for us.
4749		 *
4750		 * We can't race against event freeing since the other
4751		 * side will require wqh->lock via remove_wait_queue(),
4752		 * which we hold.
4753		 */
4754		spin_lock(&memcg->event_list_lock);
4755		if (!list_empty(&event->list)) {
4756			list_del_init(&event->list);
4757			/*
4758			 * We are in atomic context, but cgroup_event_remove()
4759			 * may sleep, so we have to call it in workqueue.
4760			 */
4761			schedule_work(&event->remove);
4762		}
4763		spin_unlock(&memcg->event_list_lock);
4764	}
4765
4766	return 0;
4767}
4768
4769static void memcg_event_ptable_queue_proc(struct file *file,
4770		wait_queue_head_t *wqh, poll_table *pt)
4771{
4772	struct mem_cgroup_event *event =
4773		container_of(pt, struct mem_cgroup_event, pt);
4774
4775	event->wqh = wqh;
4776	add_wait_queue(wqh, &event->wait);
4777}
4778
4779/*
4780 * DO NOT USE IN NEW FILES.
4781 *
4782 * Parse input and register new cgroup event handler.
4783 *
4784 * Input must be in format '<event_fd> <control_fd> <args>'.
4785 * Interpretation of args is defined by control file implementation.
4786 */
4787static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
4788					 char *buf, size_t nbytes, loff_t off)
4789{
4790	struct cgroup_subsys_state *css = of_css(of);
4791	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4792	struct mem_cgroup_event *event;
4793	struct cgroup_subsys_state *cfile_css;
4794	unsigned int efd, cfd;
4795	struct fd efile;
4796	struct fd cfile;
 
4797	const char *name;
4798	char *endp;
4799	int ret;
4800
 
 
 
4801	buf = strstrip(buf);
4802
4803	efd = simple_strtoul(buf, &endp, 10);
4804	if (*endp != ' ')
4805		return -EINVAL;
4806	buf = endp + 1;
4807
4808	cfd = simple_strtoul(buf, &endp, 10);
4809	if ((*endp != ' ') && (*endp != '\0'))
4810		return -EINVAL;
4811	buf = endp + 1;
4812
4813	event = kzalloc(sizeof(*event), GFP_KERNEL);
4814	if (!event)
4815		return -ENOMEM;
4816
4817	event->memcg = memcg;
4818	INIT_LIST_HEAD(&event->list);
4819	init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc);
4820	init_waitqueue_func_entry(&event->wait, memcg_event_wake);
4821	INIT_WORK(&event->remove, memcg_event_remove);
4822
4823	efile = fdget(efd);
4824	if (!efile.file) {
4825		ret = -EBADF;
4826		goto out_kfree;
4827	}
4828
4829	event->eventfd = eventfd_ctx_fileget(efile.file);
4830	if (IS_ERR(event->eventfd)) {
4831		ret = PTR_ERR(event->eventfd);
4832		goto out_put_efile;
4833	}
4834
4835	cfile = fdget(cfd);
4836	if (!cfile.file) {
4837		ret = -EBADF;
4838		goto out_put_eventfd;
4839	}
4840
4841	/* the process need read permission on control file */
4842	/* AV: shouldn't we check that it's been opened for read instead? */
4843	ret = file_permission(cfile.file, MAY_READ);
4844	if (ret < 0)
4845		goto out_put_cfile;
4846
4847	/*
 
 
 
 
 
 
 
 
 
 
4848	 * Determine the event callbacks and set them in @event.  This used
4849	 * to be done via struct cftype but cgroup core no longer knows
4850	 * about these events.  The following is crude but the whole thing
4851	 * is for compatibility anyway.
4852	 *
4853	 * DO NOT ADD NEW FILES.
4854	 */
4855	name = cfile.file->f_path.dentry->d_name.name;
4856
4857	if (!strcmp(name, "memory.usage_in_bytes")) {
4858		event->register_event = mem_cgroup_usage_register_event;
4859		event->unregister_event = mem_cgroup_usage_unregister_event;
4860	} else if (!strcmp(name, "memory.oom_control")) {
4861		event->register_event = mem_cgroup_oom_register_event;
4862		event->unregister_event = mem_cgroup_oom_unregister_event;
4863	} else if (!strcmp(name, "memory.pressure_level")) {
4864		event->register_event = vmpressure_register_event;
4865		event->unregister_event = vmpressure_unregister_event;
4866	} else if (!strcmp(name, "memory.memsw.usage_in_bytes")) {
4867		event->register_event = memsw_cgroup_usage_register_event;
4868		event->unregister_event = memsw_cgroup_usage_unregister_event;
4869	} else {
4870		ret = -EINVAL;
4871		goto out_put_cfile;
4872	}
4873
4874	/*
4875	 * Verify @cfile should belong to @css.  Also, remaining events are
4876	 * automatically removed on cgroup destruction but the removal is
4877	 * asynchronous, so take an extra ref on @css.
4878	 */
4879	cfile_css = css_tryget_online_from_dir(cfile.file->f_path.dentry->d_parent,
4880					       &memory_cgrp_subsys);
4881	ret = -EINVAL;
4882	if (IS_ERR(cfile_css))
4883		goto out_put_cfile;
4884	if (cfile_css != css) {
4885		css_put(cfile_css);
4886		goto out_put_cfile;
4887	}
4888
4889	ret = event->register_event(memcg, event->eventfd, buf);
4890	if (ret)
4891		goto out_put_css;
4892
4893	vfs_poll(efile.file, &event->pt);
4894
4895	spin_lock(&memcg->event_list_lock);
4896	list_add(&event->list, &memcg->event_list);
4897	spin_unlock(&memcg->event_list_lock);
4898
4899	fdput(cfile);
4900	fdput(efile);
4901
4902	return nbytes;
4903
4904out_put_css:
4905	css_put(css);
4906out_put_cfile:
4907	fdput(cfile);
4908out_put_eventfd:
4909	eventfd_ctx_put(event->eventfd);
4910out_put_efile:
4911	fdput(efile);
4912out_kfree:
4913	kfree(event);
4914
4915	return ret;
4916}
4917
 
 
 
 
 
 
 
 
 
 
 
 
 
4918static struct cftype mem_cgroup_legacy_files[] = {
4919	{
4920		.name = "usage_in_bytes",
4921		.private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
4922		.read_u64 = mem_cgroup_read_u64,
4923	},
4924	{
4925		.name = "max_usage_in_bytes",
4926		.private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
4927		.write = mem_cgroup_reset,
4928		.read_u64 = mem_cgroup_read_u64,
4929	},
4930	{
4931		.name = "limit_in_bytes",
4932		.private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
4933		.write = mem_cgroup_write,
4934		.read_u64 = mem_cgroup_read_u64,
4935	},
4936	{
4937		.name = "soft_limit_in_bytes",
4938		.private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
4939		.write = mem_cgroup_write,
4940		.read_u64 = mem_cgroup_read_u64,
4941	},
4942	{
4943		.name = "failcnt",
4944		.private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
4945		.write = mem_cgroup_reset,
4946		.read_u64 = mem_cgroup_read_u64,
4947	},
4948	{
4949		.name = "stat",
4950		.seq_show = memcg_stat_show,
4951	},
4952	{
4953		.name = "force_empty",
4954		.write = mem_cgroup_force_empty_write,
4955	},
4956	{
4957		.name = "use_hierarchy",
4958		.write_u64 = mem_cgroup_hierarchy_write,
4959		.read_u64 = mem_cgroup_hierarchy_read,
4960	},
4961	{
4962		.name = "cgroup.event_control",		/* XXX: for compat */
4963		.write = memcg_write_event_control,
4964		.flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE,
4965	},
4966	{
4967		.name = "swappiness",
4968		.read_u64 = mem_cgroup_swappiness_read,
4969		.write_u64 = mem_cgroup_swappiness_write,
4970	},
4971	{
4972		.name = "move_charge_at_immigrate",
4973		.read_u64 = mem_cgroup_move_charge_read,
4974		.write_u64 = mem_cgroup_move_charge_write,
4975	},
4976	{
4977		.name = "oom_control",
4978		.seq_show = mem_cgroup_oom_control_read,
4979		.write_u64 = mem_cgroup_oom_control_write,
4980		.private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
4981	},
4982	{
4983		.name = "pressure_level",
 
4984	},
4985#ifdef CONFIG_NUMA
4986	{
4987		.name = "numa_stat",
4988		.seq_show = memcg_numa_stat_show,
4989	},
4990#endif
4991	{
4992		.name = "kmem.limit_in_bytes",
4993		.private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
4994		.write = mem_cgroup_write,
4995		.read_u64 = mem_cgroup_read_u64,
4996	},
4997	{
4998		.name = "kmem.usage_in_bytes",
4999		.private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
5000		.read_u64 = mem_cgroup_read_u64,
5001	},
5002	{
5003		.name = "kmem.failcnt",
5004		.private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT),
5005		.write = mem_cgroup_reset,
5006		.read_u64 = mem_cgroup_read_u64,
5007	},
5008	{
5009		.name = "kmem.max_usage_in_bytes",
5010		.private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE),
5011		.write = mem_cgroup_reset,
5012		.read_u64 = mem_cgroup_read_u64,
5013	},
5014#if defined(CONFIG_MEMCG_KMEM) && \
5015	(defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG))
5016	{
5017		.name = "kmem.slabinfo",
5018		.seq_show = memcg_slab_show,
5019	},
5020#endif
5021	{
5022		.name = "kmem.tcp.limit_in_bytes",
5023		.private = MEMFILE_PRIVATE(_TCP, RES_LIMIT),
5024		.write = mem_cgroup_write,
5025		.read_u64 = mem_cgroup_read_u64,
5026	},
5027	{
5028		.name = "kmem.tcp.usage_in_bytes",
5029		.private = MEMFILE_PRIVATE(_TCP, RES_USAGE),
5030		.read_u64 = mem_cgroup_read_u64,
5031	},
5032	{
5033		.name = "kmem.tcp.failcnt",
5034		.private = MEMFILE_PRIVATE(_TCP, RES_FAILCNT),
5035		.write = mem_cgroup_reset,
5036		.read_u64 = mem_cgroup_read_u64,
5037	},
5038	{
5039		.name = "kmem.tcp.max_usage_in_bytes",
5040		.private = MEMFILE_PRIVATE(_TCP, RES_MAX_USAGE),
5041		.write = mem_cgroup_reset,
5042		.read_u64 = mem_cgroup_read_u64,
5043	},
5044	{ },	/* terminate */
5045};
5046
5047/*
5048 * Private memory cgroup IDR
5049 *
5050 * Swap-out records and page cache shadow entries need to store memcg
5051 * references in constrained space, so we maintain an ID space that is
5052 * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of
5053 * memory-controlled cgroups to 64k.
5054 *
5055 * However, there usually are many references to the offline CSS after
5056 * the cgroup has been destroyed, such as page cache or reclaimable
5057 * slab objects, that don't need to hang on to the ID. We want to keep
5058 * those dead CSS from occupying IDs, or we might quickly exhaust the
5059 * relatively small ID space and prevent the creation of new cgroups
5060 * even when there are much fewer than 64k cgroups - possibly none.
5061 *
5062 * Maintain a private 16-bit ID space for memcg, and allow the ID to
5063 * be freed and recycled when it's no longer needed, which is usually
5064 * when the CSS is offlined.
5065 *
5066 * The only exception to that are records of swapped out tmpfs/shmem
5067 * pages that need to be attributed to live ancestors on swapin. But
5068 * those references are manageable from userspace.
5069 */
5070
 
5071static DEFINE_IDR(mem_cgroup_idr);
5072
5073static void mem_cgroup_id_remove(struct mem_cgroup *memcg)
5074{
5075	if (memcg->id.id > 0) {
5076		idr_remove(&mem_cgroup_idr, memcg->id.id);
5077		memcg->id.id = 0;
5078	}
5079}
5080
5081static void __maybe_unused mem_cgroup_id_get_many(struct mem_cgroup *memcg,
5082						  unsigned int n)
5083{
5084	refcount_add(n, &memcg->id.ref);
5085}
5086
5087static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
5088{
5089	if (refcount_sub_and_test(n, &memcg->id.ref)) {
5090		mem_cgroup_id_remove(memcg);
5091
5092		/* Memcg ID pins CSS */
5093		css_put(&memcg->css);
5094	}
5095}
5096
5097static inline void mem_cgroup_id_put(struct mem_cgroup *memcg)
5098{
5099	mem_cgroup_id_put_many(memcg, 1);
5100}
5101
5102/**
5103 * mem_cgroup_from_id - look up a memcg from a memcg id
5104 * @id: the memcg id to look up
5105 *
5106 * Caller must hold rcu_read_lock().
5107 */
5108struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
5109{
5110	WARN_ON_ONCE(!rcu_read_lock_held());
5111	return idr_find(&mem_cgroup_idr, id);
5112}
5113
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5114static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
5115{
5116	struct mem_cgroup_per_node *pn;
5117	int tmp = node;
5118	/*
5119	 * This routine is called against possible nodes.
5120	 * But it's BUG to call kmalloc() against offline node.
5121	 *
5122	 * TODO: this routine can waste much memory for nodes which will
5123	 *       never be onlined. It's better to use memory hotplug callback
5124	 *       function.
5125	 */
5126	if (!node_state(node, N_NORMAL_MEMORY))
5127		tmp = -1;
5128	pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
5129	if (!pn)
5130		return 1;
5131
5132	pn->lruvec_stat_local = alloc_percpu_gfp(struct lruvec_stat,
5133						 GFP_KERNEL_ACCOUNT);
5134	if (!pn->lruvec_stat_local) {
5135		kfree(pn);
5136		return 1;
5137	}
5138
5139	pn->lruvec_stat_cpu = alloc_percpu_gfp(struct batched_lruvec_stat,
5140					       GFP_KERNEL_ACCOUNT);
5141	if (!pn->lruvec_stat_cpu) {
5142		free_percpu(pn->lruvec_stat_local);
5143		kfree(pn);
5144		return 1;
5145	}
5146
5147	lruvec_init(&pn->lruvec);
5148	pn->usage_in_excess = 0;
5149	pn->on_tree = false;
5150	pn->memcg = memcg;
5151
5152	memcg->nodeinfo[node] = pn;
5153	return 0;
5154}
5155
5156static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
5157{
5158	struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
5159
5160	if (!pn)
5161		return;
5162
5163	free_percpu(pn->lruvec_stat_cpu);
5164	free_percpu(pn->lruvec_stat_local);
5165	kfree(pn);
5166}
5167
5168static void __mem_cgroup_free(struct mem_cgroup *memcg)
5169{
5170	int node;
5171
 
 
 
5172	for_each_node(node)
5173		free_mem_cgroup_per_node_info(memcg, node);
 
5174	free_percpu(memcg->vmstats_percpu);
5175	kfree(memcg);
5176}
5177
5178static void mem_cgroup_free(struct mem_cgroup *memcg)
5179{
5180	int cpu;
5181
5182	memcg_wb_domain_exit(memcg);
5183	/*
5184	 * Flush percpu lruvec stats to guarantee the value
5185	 * correctness on parent's and all ancestor levels.
5186	 */
5187	for_each_online_cpu(cpu)
5188		memcg_flush_lruvec_page_state(memcg, cpu);
5189	__mem_cgroup_free(memcg);
5190}
5191
5192static struct mem_cgroup *mem_cgroup_alloc(void)
5193{
 
5194	struct mem_cgroup *memcg;
5195	unsigned int size;
5196	int node;
5197	int __maybe_unused i;
5198	long error = -ENOMEM;
5199
5200	size = sizeof(struct mem_cgroup);
5201	size += nr_node_ids * sizeof(struct mem_cgroup_per_node *);
5202
5203	memcg = kzalloc(size, GFP_KERNEL);
5204	if (!memcg)
5205		return ERR_PTR(error);
5206
5207	memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL,
5208				 1, MEM_CGROUP_ID_MAX,
5209				 GFP_KERNEL);
5210	if (memcg->id.id < 0) {
5211		error = memcg->id.id;
5212		goto fail;
5213	}
5214
 
 
 
 
5215	memcg->vmstats_percpu = alloc_percpu_gfp(struct memcg_vmstats_percpu,
5216						 GFP_KERNEL_ACCOUNT);
5217	if (!memcg->vmstats_percpu)
5218		goto fail;
5219
 
 
 
 
 
 
 
 
5220	for_each_node(node)
5221		if (alloc_mem_cgroup_per_node_info(memcg, node))
5222			goto fail;
5223
5224	if (memcg_wb_domain_init(memcg, GFP_KERNEL))
5225		goto fail;
5226
5227	INIT_WORK(&memcg->high_work, high_work_func);
5228	INIT_LIST_HEAD(&memcg->oom_notify);
5229	mutex_init(&memcg->thresholds_lock);
5230	spin_lock_init(&memcg->move_lock);
5231	vmpressure_init(&memcg->vmpressure);
5232	INIT_LIST_HEAD(&memcg->event_list);
5233	spin_lock_init(&memcg->event_list_lock);
5234	memcg->socket_pressure = jiffies;
5235#ifdef CONFIG_MEMCG_KMEM
5236	memcg->kmemcg_id = -1;
5237	INIT_LIST_HEAD(&memcg->objcg_list);
5238#endif
5239#ifdef CONFIG_CGROUP_WRITEBACK
5240	INIT_LIST_HEAD(&memcg->cgwb_list);
5241	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
5242		memcg->cgwb_frn[i].done =
5243			__WB_COMPLETION_INIT(&memcg_cgwb_frn_waitq);
5244#endif
5245#ifdef CONFIG_TRANSPARENT_HUGEPAGE
5246	spin_lock_init(&memcg->deferred_split_queue.split_queue_lock);
5247	INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue);
5248	memcg->deferred_split_queue.split_queue_len = 0;
5249#endif
5250	idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
5251	return memcg;
5252fail:
5253	mem_cgroup_id_remove(memcg);
5254	__mem_cgroup_free(memcg);
5255	return ERR_PTR(error);
5256}
5257
5258static struct cgroup_subsys_state * __ref
5259mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
5260{
5261	struct mem_cgroup *parent = mem_cgroup_from_css(parent_css);
5262	struct mem_cgroup *memcg, *old_memcg;
5263	long error = -ENOMEM;
5264
5265	old_memcg = set_active_memcg(parent);
5266	memcg = mem_cgroup_alloc();
5267	set_active_memcg(old_memcg);
5268	if (IS_ERR(memcg))
5269		return ERR_CAST(memcg);
5270
5271	page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
5272	memcg->soft_limit = PAGE_COUNTER_MAX;
 
 
 
 
 
5273	page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
5274	if (parent) {
5275		memcg->swappiness = mem_cgroup_swappiness(parent);
5276		memcg->oom_kill_disable = parent->oom_kill_disable;
5277
5278		page_counter_init(&memcg->memory, &parent->memory);
5279		page_counter_init(&memcg->swap, &parent->swap);
5280		page_counter_init(&memcg->kmem, &parent->kmem);
5281		page_counter_init(&memcg->tcpmem, &parent->tcpmem);
5282	} else {
 
5283		page_counter_init(&memcg->memory, NULL);
5284		page_counter_init(&memcg->swap, NULL);
5285		page_counter_init(&memcg->kmem, NULL);
5286		page_counter_init(&memcg->tcpmem, NULL);
5287
5288		root_mem_cgroup = memcg;
5289		return &memcg->css;
5290	}
5291
5292	/* The following stuff does not apply to the root */
5293	error = memcg_online_kmem(memcg);
5294	if (error)
5295		goto fail;
5296
5297	if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
5298		static_branch_inc(&memcg_sockets_enabled_key);
5299
 
 
 
 
 
5300	return &memcg->css;
5301fail:
5302	mem_cgroup_id_remove(memcg);
5303	mem_cgroup_free(memcg);
5304	return ERR_PTR(error);
5305}
5306
5307static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
5308{
5309	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5310
 
 
 
5311	/*
5312	 * A memcg must be visible for expand_shrinker_info()
5313	 * by the time the maps are allocated. So, we allocate maps
5314	 * here, when for_each_mem_cgroup() can't skip it.
5315	 */
5316	if (alloc_shrinker_info(memcg)) {
5317		mem_cgroup_id_remove(memcg);
5318		return -ENOMEM;
5319	}
 
 
 
5320
5321	/* Online state pins memcg ID, memcg ID pins CSS */
5322	refcount_set(&memcg->id.ref, 1);
5323	css_get(css);
 
 
 
 
 
 
 
 
 
 
 
 
 
5324	return 0;
 
 
 
 
 
5325}
5326
5327static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
5328{
5329	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5330	struct mem_cgroup_event *event, *tmp;
5331
5332	/*
5333	 * Unregister events and notify userspace.
5334	 * Notify userspace about cgroup removing only after rmdir of cgroup
5335	 * directory to avoid race between userspace and kernelspace.
5336	 */
5337	spin_lock(&memcg->event_list_lock);
5338	list_for_each_entry_safe(event, tmp, &memcg->event_list, list) {
5339		list_del_init(&event->list);
5340		schedule_work(&event->remove);
5341	}
5342	spin_unlock(&memcg->event_list_lock);
5343
5344	page_counter_set_min(&memcg->memory, 0);
5345	page_counter_set_low(&memcg->memory, 0);
5346
 
 
5347	memcg_offline_kmem(memcg);
5348	reparent_shrinker_deferred(memcg);
5349	wb_memcg_offline(memcg);
 
5350
5351	drain_all_stock(memcg);
5352
5353	mem_cgroup_id_put(memcg);
5354}
5355
5356static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
5357{
5358	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5359
5360	invalidate_reclaim_iterators(memcg);
 
5361}
5362
5363static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
5364{
5365	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5366	int __maybe_unused i;
5367
5368#ifdef CONFIG_CGROUP_WRITEBACK
5369	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
5370		wb_wait_for_completion(&memcg->cgwb_frn[i].done);
5371#endif
5372	if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
5373		static_branch_dec(&memcg_sockets_enabled_key);
5374
5375	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active)
5376		static_branch_dec(&memcg_sockets_enabled_key);
5377
 
 
 
 
 
5378	vmpressure_cleanup(&memcg->vmpressure);
5379	cancel_work_sync(&memcg->high_work);
5380	mem_cgroup_remove_from_trees(memcg);
5381	free_shrinker_info(memcg);
5382	memcg_free_kmem(memcg);
5383	mem_cgroup_free(memcg);
5384}
5385
5386/**
5387 * mem_cgroup_css_reset - reset the states of a mem_cgroup
5388 * @css: the target css
5389 *
5390 * Reset the states of the mem_cgroup associated with @css.  This is
5391 * invoked when the userland requests disabling on the default hierarchy
5392 * but the memcg is pinned through dependency.  The memcg should stop
5393 * applying policies and should revert to the vanilla state as it may be
5394 * made visible again.
5395 *
5396 * The current implementation only resets the essential configurations.
5397 * This needs to be expanded to cover all the visible parts.
5398 */
5399static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
5400{
5401	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5402
5403	page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX);
5404	page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX);
5405	page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX);
5406	page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX);
5407	page_counter_set_min(&memcg->memory, 0);
5408	page_counter_set_low(&memcg->memory, 0);
5409	page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
5410	memcg->soft_limit = PAGE_COUNTER_MAX;
5411	page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
5412	memcg_wb_domain_size_changed(memcg);
5413}
5414
5415static void mem_cgroup_css_rstat_flush(struct cgroup_subsys_state *css, int cpu)
5416{
5417	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5418	struct mem_cgroup *parent = parent_mem_cgroup(memcg);
5419	struct memcg_vmstats_percpu *statc;
5420	long delta, v;
5421	int i;
5422
5423	statc = per_cpu_ptr(memcg->vmstats_percpu, cpu);
5424
5425	for (i = 0; i < MEMCG_NR_STAT; i++) {
5426		/*
5427		 * Collect the aggregated propagation counts of groups
5428		 * below us. We're in a per-cpu loop here and this is
5429		 * a global counter, so the first cycle will get them.
5430		 */
5431		delta = memcg->vmstats.state_pending[i];
5432		if (delta)
5433			memcg->vmstats.state_pending[i] = 0;
5434
5435		/* Add CPU changes on this level since the last flush */
 
5436		v = READ_ONCE(statc->state[i]);
5437		if (v != statc->state_prev[i]) {
5438			delta += v - statc->state_prev[i];
 
5439			statc->state_prev[i] = v;
5440		}
5441
5442		if (!delta)
5443			continue;
5444
5445		/* Aggregate counts on this level and propagate upwards */
5446		memcg->vmstats.state[i] += delta;
5447		if (parent)
5448			parent->vmstats.state_pending[i] += delta;
 
 
 
 
 
5449	}
5450
5451	for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
5452		delta = memcg->vmstats.events_pending[i];
5453		if (delta)
5454			memcg->vmstats.events_pending[i] = 0;
5455
 
5456		v = READ_ONCE(statc->events[i]);
5457		if (v != statc->events_prev[i]) {
5458			delta += v - statc->events_prev[i];
 
5459			statc->events_prev[i] = v;
5460		}
5461
5462		if (!delta)
5463			continue;
 
 
 
 
 
 
 
 
 
 
 
 
5464
5465		memcg->vmstats.events[i] += delta;
5466		if (parent)
5467			parent->vmstats.events_pending[i] += delta;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5468	}
 
 
 
 
5469}
5470
5471#ifdef CONFIG_MMU
5472/* Handlers for move charge at task migration. */
5473static int mem_cgroup_do_precharge(unsigned long count)
5474{
5475	int ret;
5476
5477	/* Try a single bulk charge without reclaim first, kswapd may wake */
5478	ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count);
5479	if (!ret) {
5480		mc.precharge += count;
5481		return ret;
5482	}
5483
5484	/* Try charges one by one with reclaim, but do not retry */
5485	while (count--) {
5486		ret = try_charge(mc.to, GFP_KERNEL | __GFP_NORETRY, 1);
5487		if (ret)
5488			return ret;
5489		mc.precharge++;
5490		cond_resched();
5491	}
5492	return 0;
5493}
5494
5495union mc_target {
5496	struct page	*page;
5497	swp_entry_t	ent;
5498};
5499
5500enum mc_target_type {
5501	MC_TARGET_NONE = 0,
5502	MC_TARGET_PAGE,
5503	MC_TARGET_SWAP,
5504	MC_TARGET_DEVICE,
5505};
5506
5507static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
5508						unsigned long addr, pte_t ptent)
5509{
5510	struct page *page = vm_normal_page(vma, addr, ptent);
5511
5512	if (!page || !page_mapped(page))
5513		return NULL;
5514	if (PageAnon(page)) {
5515		if (!(mc.flags & MOVE_ANON))
5516			return NULL;
5517	} else {
5518		if (!(mc.flags & MOVE_FILE))
5519			return NULL;
5520	}
5521	if (!get_page_unless_zero(page))
5522		return NULL;
5523
5524	return page;
5525}
5526
5527#if defined(CONFIG_SWAP) || defined(CONFIG_DEVICE_PRIVATE)
5528static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5529			pte_t ptent, swp_entry_t *entry)
5530{
5531	struct page *page = NULL;
5532	swp_entry_t ent = pte_to_swp_entry(ptent);
5533
5534	if (!(mc.flags & MOVE_ANON))
5535		return NULL;
5536
5537	/*
5538	 * Handle MEMORY_DEVICE_PRIVATE which are ZONE_DEVICE page belonging to
5539	 * a device and because they are not accessible by CPU they are store
5540	 * as special swap entry in the CPU page table.
5541	 */
5542	if (is_device_private_entry(ent)) {
5543		page = pfn_swap_entry_to_page(ent);
5544		/*
5545		 * MEMORY_DEVICE_PRIVATE means ZONE_DEVICE page and which have
5546		 * a refcount of 1 when free (unlike normal page)
5547		 */
5548		if (!page_ref_add_unless(page, 1, 1))
5549			return NULL;
5550		return page;
5551	}
5552
5553	if (non_swap_entry(ent))
5554		return NULL;
5555
5556	/*
5557	 * Because lookup_swap_cache() updates some statistics counter,
5558	 * we call find_get_page() with swapper_space directly.
5559	 */
5560	page = find_get_page(swap_address_space(ent), swp_offset(ent));
5561	entry->val = ent.val;
5562
5563	return page;
5564}
5565#else
5566static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5567			pte_t ptent, swp_entry_t *entry)
5568{
5569	return NULL;
5570}
5571#endif
5572
5573static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
5574			unsigned long addr, pte_t ptent, swp_entry_t *entry)
5575{
 
 
 
5576	if (!vma->vm_file) /* anonymous vma */
5577		return NULL;
5578	if (!(mc.flags & MOVE_FILE))
5579		return NULL;
5580
5581	/* page is moved even if it's not RSS of this task(page-faulted). */
5582	/* shmem/tmpfs may report page out on swap: account for that too. */
5583	return find_get_incore_page(vma->vm_file->f_mapping,
5584			linear_page_index(vma, addr));
 
 
 
5585}
5586
5587/**
5588 * mem_cgroup_move_account - move account of the page
5589 * @page: the page
5590 * @compound: charge the page as compound or small page
5591 * @from: mem_cgroup which the page is moved from.
5592 * @to:	mem_cgroup which the page is moved to. @from != @to.
5593 *
5594 * The caller must make sure the page is not on LRU (isolate_page() is useful.)
5595 *
5596 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
5597 * from old cgroup.
5598 */
5599static int mem_cgroup_move_account(struct page *page,
5600				   bool compound,
5601				   struct mem_cgroup *from,
5602				   struct mem_cgroup *to)
5603{
 
5604	struct lruvec *from_vec, *to_vec;
5605	struct pglist_data *pgdat;
5606	unsigned int nr_pages = compound ? thp_nr_pages(page) : 1;
5607	int ret;
5608
5609	VM_BUG_ON(from == to);
5610	VM_BUG_ON_PAGE(PageLRU(page), page);
5611	VM_BUG_ON(compound && !PageTransHuge(page));
5612
5613	/*
5614	 * Prevent mem_cgroup_migrate() from looking at
5615	 * page's memory cgroup of its source page while we change it.
5616	 */
5617	ret = -EBUSY;
5618	if (!trylock_page(page))
5619		goto out;
5620
5621	ret = -EINVAL;
5622	if (page_memcg(page) != from)
5623		goto out_unlock;
5624
5625	pgdat = page_pgdat(page);
5626	from_vec = mem_cgroup_lruvec(from, pgdat);
5627	to_vec = mem_cgroup_lruvec(to, pgdat);
5628
5629	lock_page_memcg(page);
5630
5631	if (PageAnon(page)) {
5632		if (page_mapped(page)) {
5633			__mod_lruvec_state(from_vec, NR_ANON_MAPPED, -nr_pages);
5634			__mod_lruvec_state(to_vec, NR_ANON_MAPPED, nr_pages);
5635			if (PageTransHuge(page)) {
5636				__mod_lruvec_state(from_vec, NR_ANON_THPS,
5637						   -nr_pages);
5638				__mod_lruvec_state(to_vec, NR_ANON_THPS,
5639						   nr_pages);
5640			}
5641		}
5642	} else {
5643		__mod_lruvec_state(from_vec, NR_FILE_PAGES, -nr_pages);
5644		__mod_lruvec_state(to_vec, NR_FILE_PAGES, nr_pages);
5645
5646		if (PageSwapBacked(page)) {
5647			__mod_lruvec_state(from_vec, NR_SHMEM, -nr_pages);
5648			__mod_lruvec_state(to_vec, NR_SHMEM, nr_pages);
5649		}
5650
5651		if (page_mapped(page)) {
5652			__mod_lruvec_state(from_vec, NR_FILE_MAPPED, -nr_pages);
5653			__mod_lruvec_state(to_vec, NR_FILE_MAPPED, nr_pages);
5654		}
5655
5656		if (PageDirty(page)) {
5657			struct address_space *mapping = page_mapping(page);
5658
5659			if (mapping_can_writeback(mapping)) {
5660				__mod_lruvec_state(from_vec, NR_FILE_DIRTY,
5661						   -nr_pages);
5662				__mod_lruvec_state(to_vec, NR_FILE_DIRTY,
5663						   nr_pages);
5664			}
5665		}
5666	}
5667
5668	if (PageWriteback(page)) {
 
 
 
 
 
 
5669		__mod_lruvec_state(from_vec, NR_WRITEBACK, -nr_pages);
5670		__mod_lruvec_state(to_vec, NR_WRITEBACK, nr_pages);
5671	}
5672
5673	/*
5674	 * All state has been migrated, let's switch to the new memcg.
5675	 *
5676	 * It is safe to change page's memcg here because the page
5677	 * is referenced, charged, isolated, and locked: we can't race
5678	 * with (un)charging, migration, LRU putback, or anything else
5679	 * that would rely on a stable page's memory cgroup.
5680	 *
5681	 * Note that lock_page_memcg is a memcg lock, not a page lock,
5682	 * to save space. As soon as we switch page's memory cgroup to a
5683	 * new memcg that isn't locked, the above state can change
5684	 * concurrently again. Make sure we're truly done with it.
5685	 */
5686	smp_mb();
5687
5688	css_get(&to->css);
5689	css_put(&from->css);
5690
5691	page->memcg_data = (unsigned long)to;
5692
5693	__unlock_page_memcg(from);
5694
5695	ret = 0;
 
5696
5697	local_irq_disable();
5698	mem_cgroup_charge_statistics(to, page, nr_pages);
5699	memcg_check_events(to, page);
5700	mem_cgroup_charge_statistics(from, page, -nr_pages);
5701	memcg_check_events(from, page);
5702	local_irq_enable();
5703out_unlock:
5704	unlock_page(page);
5705out:
5706	return ret;
5707}
5708
5709/**
5710 * get_mctgt_type - get target type of moving charge
5711 * @vma: the vma the pte to be checked belongs
5712 * @addr: the address corresponding to the pte to be checked
5713 * @ptent: the pte to be checked
5714 * @target: the pointer the target page or swap ent will be stored(can be NULL)
5715 *
5716 * Returns
5717 *   0(MC_TARGET_NONE): if the pte is not a target for move charge.
5718 *   1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
5719 *     move charge. if @target is not NULL, the page is stored in target->page
5720 *     with extra refcnt got(Callers should handle it).
5721 *   2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
5722 *     target for charge migration. if @target is not NULL, the entry is stored
5723 *     in target->ent.
5724 *   3(MC_TARGET_DEVICE): like MC_TARGET_PAGE  but page is MEMORY_DEVICE_PRIVATE
5725 *     (so ZONE_DEVICE page and thus not on the lru).
5726 *     For now we such page is charge like a regular page would be as for all
5727 *     intent and purposes it is just special memory taking the place of a
5728 *     regular page.
5729 *
5730 *     See Documentations/vm/hmm.txt and include/linux/hmm.h
5731 *
5732 * Called with pte lock held.
5733 */
5734
5735static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
5736		unsigned long addr, pte_t ptent, union mc_target *target)
5737{
5738	struct page *page = NULL;
5739	enum mc_target_type ret = MC_TARGET_NONE;
5740	swp_entry_t ent = { .val = 0 };
5741
5742	if (pte_present(ptent))
5743		page = mc_handle_present_pte(vma, addr, ptent);
 
 
 
 
 
 
5744	else if (is_swap_pte(ptent))
5745		page = mc_handle_swap_pte(vma, ptent, &ent);
5746	else if (pte_none(ptent))
5747		page = mc_handle_file_pte(vma, addr, ptent, &ent);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5748
5749	if (!page && !ent.val)
5750		return ret;
5751	if (page) {
5752		/*
5753		 * Do only loose check w/o serialization.
5754		 * mem_cgroup_move_account() checks the page is valid or
5755		 * not under LRU exclusion.
5756		 */
5757		if (page_memcg(page) == mc.from) {
5758			ret = MC_TARGET_PAGE;
5759			if (is_device_private_page(page))
 
5760				ret = MC_TARGET_DEVICE;
5761			if (target)
5762				target->page = page;
5763		}
5764		if (!ret || !target)
 
 
5765			put_page(page);
 
5766	}
5767	/*
5768	 * There is a swap entry and a page doesn't exist or isn't charged.
5769	 * But we cannot move a tail-page in a THP.
5770	 */
5771	if (ent.val && !ret && (!page || !PageTransCompound(page)) &&
5772	    mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) {
5773		ret = MC_TARGET_SWAP;
5774		if (target)
5775			target->ent = ent;
5776	}
5777	return ret;
5778}
5779
5780#ifdef CONFIG_TRANSPARENT_HUGEPAGE
5781/*
5782 * We don't consider PMD mapped swapping or file mapped pages because THP does
5783 * not support them for now.
5784 * Caller should make sure that pmd_trans_huge(pmd) is true.
5785 */
5786static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
5787		unsigned long addr, pmd_t pmd, union mc_target *target)
5788{
5789	struct page *page = NULL;
5790	enum mc_target_type ret = MC_TARGET_NONE;
5791
5792	if (unlikely(is_swap_pmd(pmd))) {
5793		VM_BUG_ON(thp_migration_supported() &&
5794				  !is_pmd_migration_entry(pmd));
5795		return ret;
5796	}
5797	page = pmd_page(pmd);
5798	VM_BUG_ON_PAGE(!page || !PageHead(page), page);
5799	if (!(mc.flags & MOVE_ANON))
5800		return ret;
5801	if (page_memcg(page) == mc.from) {
5802		ret = MC_TARGET_PAGE;
5803		if (target) {
5804			get_page(page);
 
 
 
 
5805			target->page = page;
5806		}
5807	}
5808	return ret;
5809}
5810#else
5811static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
5812		unsigned long addr, pmd_t pmd, union mc_target *target)
5813{
5814	return MC_TARGET_NONE;
5815}
5816#endif
5817
5818static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
5819					unsigned long addr, unsigned long end,
5820					struct mm_walk *walk)
5821{
5822	struct vm_area_struct *vma = walk->vma;
5823	pte_t *pte;
5824	spinlock_t *ptl;
5825
5826	ptl = pmd_trans_huge_lock(pmd, vma);
5827	if (ptl) {
5828		/*
5829		 * Note their can not be MC_TARGET_DEVICE for now as we do not
5830		 * support transparent huge page with MEMORY_DEVICE_PRIVATE but
5831		 * this might change.
5832		 */
5833		if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
5834			mc.precharge += HPAGE_PMD_NR;
5835		spin_unlock(ptl);
5836		return 0;
5837	}
5838
5839	if (pmd_trans_unstable(pmd))
5840		return 0;
5841	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
 
 
5842	for (; addr != end; pte++, addr += PAGE_SIZE)
5843		if (get_mctgt_type(vma, addr, *pte, NULL))
5844			mc.precharge++;	/* increment precharge temporarily */
5845	pte_unmap_unlock(pte - 1, ptl);
5846	cond_resched();
5847
5848	return 0;
5849}
5850
5851static const struct mm_walk_ops precharge_walk_ops = {
5852	.pmd_entry	= mem_cgroup_count_precharge_pte_range,
 
5853};
5854
5855static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
5856{
5857	unsigned long precharge;
5858
5859	mmap_read_lock(mm);
5860	walk_page_range(mm, 0, mm->highest_vm_end, &precharge_walk_ops, NULL);
5861	mmap_read_unlock(mm);
5862
5863	precharge = mc.precharge;
5864	mc.precharge = 0;
5865
5866	return precharge;
5867}
5868
5869static int mem_cgroup_precharge_mc(struct mm_struct *mm)
5870{
5871	unsigned long precharge = mem_cgroup_count_precharge(mm);
5872
5873	VM_BUG_ON(mc.moving_task);
5874	mc.moving_task = current;
5875	return mem_cgroup_do_precharge(precharge);
5876}
5877
5878/* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
5879static void __mem_cgroup_clear_mc(void)
5880{
5881	struct mem_cgroup *from = mc.from;
5882	struct mem_cgroup *to = mc.to;
5883
5884	/* we must uncharge all the leftover precharges from mc.to */
5885	if (mc.precharge) {
5886		cancel_charge(mc.to, mc.precharge);
5887		mc.precharge = 0;
5888	}
5889	/*
5890	 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
5891	 * we must uncharge here.
5892	 */
5893	if (mc.moved_charge) {
5894		cancel_charge(mc.from, mc.moved_charge);
5895		mc.moved_charge = 0;
5896	}
5897	/* we must fixup refcnts and charges */
5898	if (mc.moved_swap) {
5899		/* uncharge swap account from the old cgroup */
5900		if (!mem_cgroup_is_root(mc.from))
5901			page_counter_uncharge(&mc.from->memsw, mc.moved_swap);
5902
5903		mem_cgroup_id_put_many(mc.from, mc.moved_swap);
5904
5905		/*
5906		 * we charged both to->memory and to->memsw, so we
5907		 * should uncharge to->memory.
5908		 */
5909		if (!mem_cgroup_is_root(mc.to))
5910			page_counter_uncharge(&mc.to->memory, mc.moved_swap);
5911
5912		mc.moved_swap = 0;
5913	}
5914	memcg_oom_recover(from);
5915	memcg_oom_recover(to);
5916	wake_up_all(&mc.waitq);
5917}
5918
5919static void mem_cgroup_clear_mc(void)
5920{
5921	struct mm_struct *mm = mc.mm;
5922
5923	/*
5924	 * we must clear moving_task before waking up waiters at the end of
5925	 * task migration.
5926	 */
5927	mc.moving_task = NULL;
5928	__mem_cgroup_clear_mc();
5929	spin_lock(&mc.lock);
5930	mc.from = NULL;
5931	mc.to = NULL;
5932	mc.mm = NULL;
5933	spin_unlock(&mc.lock);
5934
5935	mmput(mm);
5936}
5937
5938static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
5939{
5940	struct cgroup_subsys_state *css;
5941	struct mem_cgroup *memcg = NULL; /* unneeded init to make gcc happy */
5942	struct mem_cgroup *from;
5943	struct task_struct *leader, *p;
5944	struct mm_struct *mm;
5945	unsigned long move_flags;
5946	int ret = 0;
5947
5948	/* charge immigration isn't supported on the default hierarchy */
5949	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
5950		return 0;
5951
5952	/*
5953	 * Multi-process migrations only happen on the default hierarchy
5954	 * where charge immigration is not used.  Perform charge
5955	 * immigration if @tset contains a leader and whine if there are
5956	 * multiple.
5957	 */
5958	p = NULL;
5959	cgroup_taskset_for_each_leader(leader, css, tset) {
5960		WARN_ON_ONCE(p);
5961		p = leader;
5962		memcg = mem_cgroup_from_css(css);
5963	}
5964	if (!p)
5965		return 0;
5966
5967	/*
5968	 * We are now committed to this value whatever it is. Changes in this
5969	 * tunable will only affect upcoming migrations, not the current one.
5970	 * So we need to save it, and keep it going.
5971	 */
5972	move_flags = READ_ONCE(memcg->move_charge_at_immigrate);
5973	if (!move_flags)
5974		return 0;
5975
5976	from = mem_cgroup_from_task(p);
5977
5978	VM_BUG_ON(from == memcg);
5979
5980	mm = get_task_mm(p);
5981	if (!mm)
5982		return 0;
5983	/* We move charges only when we move a owner of the mm */
5984	if (mm->owner == p) {
5985		VM_BUG_ON(mc.from);
5986		VM_BUG_ON(mc.to);
5987		VM_BUG_ON(mc.precharge);
5988		VM_BUG_ON(mc.moved_charge);
5989		VM_BUG_ON(mc.moved_swap);
5990
5991		spin_lock(&mc.lock);
5992		mc.mm = mm;
5993		mc.from = from;
5994		mc.to = memcg;
5995		mc.flags = move_flags;
5996		spin_unlock(&mc.lock);
5997		/* We set mc.moving_task later */
5998
5999		ret = mem_cgroup_precharge_mc(mm);
6000		if (ret)
6001			mem_cgroup_clear_mc();
6002	} else {
6003		mmput(mm);
6004	}
6005	return ret;
6006}
6007
6008static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
6009{
6010	if (mc.to)
6011		mem_cgroup_clear_mc();
6012}
6013
6014static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
6015				unsigned long addr, unsigned long end,
6016				struct mm_walk *walk)
6017{
6018	int ret = 0;
6019	struct vm_area_struct *vma = walk->vma;
6020	pte_t *pte;
6021	spinlock_t *ptl;
6022	enum mc_target_type target_type;
6023	union mc_target target;
6024	struct page *page;
6025
6026	ptl = pmd_trans_huge_lock(pmd, vma);
6027	if (ptl) {
6028		if (mc.precharge < HPAGE_PMD_NR) {
6029			spin_unlock(ptl);
6030			return 0;
6031		}
6032		target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
6033		if (target_type == MC_TARGET_PAGE) {
6034			page = target.page;
6035			if (!isolate_lru_page(page)) {
6036				if (!mem_cgroup_move_account(page, true,
6037							     mc.from, mc.to)) {
6038					mc.precharge -= HPAGE_PMD_NR;
6039					mc.moved_charge += HPAGE_PMD_NR;
6040				}
6041				putback_lru_page(page);
6042			}
 
6043			put_page(page);
6044		} else if (target_type == MC_TARGET_DEVICE) {
6045			page = target.page;
6046			if (!mem_cgroup_move_account(page, true,
6047						     mc.from, mc.to)) {
6048				mc.precharge -= HPAGE_PMD_NR;
6049				mc.moved_charge += HPAGE_PMD_NR;
6050			}
 
6051			put_page(page);
6052		}
6053		spin_unlock(ptl);
6054		return 0;
6055	}
6056
6057	if (pmd_trans_unstable(pmd))
6058		return 0;
6059retry:
6060	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
 
 
6061	for (; addr != end; addr += PAGE_SIZE) {
6062		pte_t ptent = *(pte++);
6063		bool device = false;
6064		swp_entry_t ent;
6065
6066		if (!mc.precharge)
6067			break;
6068
6069		switch (get_mctgt_type(vma, addr, ptent, &target)) {
6070		case MC_TARGET_DEVICE:
6071			device = true;
6072			fallthrough;
6073		case MC_TARGET_PAGE:
6074			page = target.page;
6075			/*
6076			 * We can have a part of the split pmd here. Moving it
6077			 * can be done but it would be too convoluted so simply
6078			 * ignore such a partial THP and keep it in original
6079			 * memcg. There should be somebody mapping the head.
6080			 */
6081			if (PageTransCompound(page))
6082				goto put;
6083			if (!device && isolate_lru_page(page))
6084				goto put;
6085			if (!mem_cgroup_move_account(page, false,
6086						mc.from, mc.to)) {
6087				mc.precharge--;
6088				/* we uncharge from mc.from later. */
6089				mc.moved_charge++;
6090			}
6091			if (!device)
6092				putback_lru_page(page);
6093put:			/* get_mctgt_type() gets the page */
 
6094			put_page(page);
6095			break;
6096		case MC_TARGET_SWAP:
6097			ent = target.ent;
6098			if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
6099				mc.precharge--;
6100				mem_cgroup_id_get_many(mc.to, 1);
6101				/* we fixup other refcnts and charges later. */
6102				mc.moved_swap++;
6103			}
6104			break;
6105		default:
6106			break;
6107		}
6108	}
6109	pte_unmap_unlock(pte - 1, ptl);
6110	cond_resched();
6111
6112	if (addr != end) {
6113		/*
6114		 * We have consumed all precharges we got in can_attach().
6115		 * We try charge one by one, but don't do any additional
6116		 * charges to mc.to if we have failed in charge once in attach()
6117		 * phase.
6118		 */
6119		ret = mem_cgroup_do_precharge(1);
6120		if (!ret)
6121			goto retry;
6122	}
6123
6124	return ret;
6125}
6126
6127static const struct mm_walk_ops charge_walk_ops = {
6128	.pmd_entry	= mem_cgroup_move_charge_pte_range,
 
6129};
6130
6131static void mem_cgroup_move_charge(void)
6132{
6133	lru_add_drain_all();
6134	/*
6135	 * Signal lock_page_memcg() to take the memcg's move_lock
6136	 * while we're moving its pages to another memcg. Then wait
6137	 * for already started RCU-only updates to finish.
6138	 */
6139	atomic_inc(&mc.from->moving_account);
6140	synchronize_rcu();
6141retry:
6142	if (unlikely(!mmap_read_trylock(mc.mm))) {
6143		/*
6144		 * Someone who are holding the mmap_lock might be waiting in
6145		 * waitq. So we cancel all extra charges, wake up all waiters,
6146		 * and retry. Because we cancel precharges, we might not be able
6147		 * to move enough charges, but moving charge is a best-effort
6148		 * feature anyway, so it wouldn't be a big problem.
6149		 */
6150		__mem_cgroup_clear_mc();
6151		cond_resched();
6152		goto retry;
6153	}
6154	/*
6155	 * When we have consumed all precharges and failed in doing
6156	 * additional charge, the page walk just aborts.
6157	 */
6158	walk_page_range(mc.mm, 0, mc.mm->highest_vm_end, &charge_walk_ops,
6159			NULL);
6160
6161	mmap_read_unlock(mc.mm);
6162	atomic_dec(&mc.from->moving_account);
6163}
6164
6165static void mem_cgroup_move_task(void)
6166{
6167	if (mc.to) {
6168		mem_cgroup_move_charge();
6169		mem_cgroup_clear_mc();
6170	}
6171}
 
6172#else	/* !CONFIG_MMU */
6173static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
6174{
6175	return 0;
6176}
6177static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
6178{
6179}
6180static void mem_cgroup_move_task(void)
6181{
6182}
6183#endif
6184
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6185static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value)
6186{
6187	if (value == PAGE_COUNTER_MAX)
6188		seq_puts(m, "max\n");
6189	else
6190		seq_printf(m, "%llu\n", (u64)value * PAGE_SIZE);
6191
6192	return 0;
6193}
6194
6195static u64 memory_current_read(struct cgroup_subsys_state *css,
6196			       struct cftype *cft)
6197{
6198	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6199
6200	return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE;
6201}
6202
 
 
 
 
 
 
 
 
6203static int memory_min_show(struct seq_file *m, void *v)
6204{
6205	return seq_puts_memcg_tunable(m,
6206		READ_ONCE(mem_cgroup_from_seq(m)->memory.min));
6207}
6208
6209static ssize_t memory_min_write(struct kernfs_open_file *of,
6210				char *buf, size_t nbytes, loff_t off)
6211{
6212	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6213	unsigned long min;
6214	int err;
6215
6216	buf = strstrip(buf);
6217	err = page_counter_memparse(buf, "max", &min);
6218	if (err)
6219		return err;
6220
6221	page_counter_set_min(&memcg->memory, min);
6222
6223	return nbytes;
6224}
6225
6226static int memory_low_show(struct seq_file *m, void *v)
6227{
6228	return seq_puts_memcg_tunable(m,
6229		READ_ONCE(mem_cgroup_from_seq(m)->memory.low));
6230}
6231
6232static ssize_t memory_low_write(struct kernfs_open_file *of,
6233				char *buf, size_t nbytes, loff_t off)
6234{
6235	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6236	unsigned long low;
6237	int err;
6238
6239	buf = strstrip(buf);
6240	err = page_counter_memparse(buf, "max", &low);
6241	if (err)
6242		return err;
6243
6244	page_counter_set_low(&memcg->memory, low);
6245
6246	return nbytes;
6247}
6248
6249static int memory_high_show(struct seq_file *m, void *v)
6250{
6251	return seq_puts_memcg_tunable(m,
6252		READ_ONCE(mem_cgroup_from_seq(m)->memory.high));
6253}
6254
6255static ssize_t memory_high_write(struct kernfs_open_file *of,
6256				 char *buf, size_t nbytes, loff_t off)
6257{
6258	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6259	unsigned int nr_retries = MAX_RECLAIM_RETRIES;
6260	bool drained = false;
6261	unsigned long high;
6262	int err;
6263
6264	buf = strstrip(buf);
6265	err = page_counter_memparse(buf, "max", &high);
6266	if (err)
6267		return err;
6268
6269	page_counter_set_high(&memcg->memory, high);
6270
6271	for (;;) {
6272		unsigned long nr_pages = page_counter_read(&memcg->memory);
6273		unsigned long reclaimed;
6274
6275		if (nr_pages <= high)
6276			break;
6277
6278		if (signal_pending(current))
6279			break;
6280
6281		if (!drained) {
6282			drain_all_stock(memcg);
6283			drained = true;
6284			continue;
6285		}
6286
6287		reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
6288							 GFP_KERNEL, true);
6289
6290		if (!reclaimed && !nr_retries--)
6291			break;
6292	}
6293
6294	memcg_wb_domain_size_changed(memcg);
6295	return nbytes;
6296}
6297
6298static int memory_max_show(struct seq_file *m, void *v)
6299{
6300	return seq_puts_memcg_tunable(m,
6301		READ_ONCE(mem_cgroup_from_seq(m)->memory.max));
6302}
6303
6304static ssize_t memory_max_write(struct kernfs_open_file *of,
6305				char *buf, size_t nbytes, loff_t off)
6306{
6307	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6308	unsigned int nr_reclaims = MAX_RECLAIM_RETRIES;
6309	bool drained = false;
6310	unsigned long max;
6311	int err;
6312
6313	buf = strstrip(buf);
6314	err = page_counter_memparse(buf, "max", &max);
6315	if (err)
6316		return err;
6317
6318	xchg(&memcg->memory.max, max);
6319
6320	for (;;) {
6321		unsigned long nr_pages = page_counter_read(&memcg->memory);
6322
6323		if (nr_pages <= max)
6324			break;
6325
6326		if (signal_pending(current))
6327			break;
6328
6329		if (!drained) {
6330			drain_all_stock(memcg);
6331			drained = true;
6332			continue;
6333		}
6334
6335		if (nr_reclaims) {
6336			if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max,
6337							  GFP_KERNEL, true))
6338				nr_reclaims--;
6339			continue;
6340		}
6341
6342		memcg_memory_event(memcg, MEMCG_OOM);
6343		if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
6344			break;
6345	}
6346
6347	memcg_wb_domain_size_changed(memcg);
6348	return nbytes;
6349}
6350
 
 
 
 
6351static void __memory_events_show(struct seq_file *m, atomic_long_t *events)
6352{
6353	seq_printf(m, "low %lu\n", atomic_long_read(&events[MEMCG_LOW]));
6354	seq_printf(m, "high %lu\n", atomic_long_read(&events[MEMCG_HIGH]));
6355	seq_printf(m, "max %lu\n", atomic_long_read(&events[MEMCG_MAX]));
6356	seq_printf(m, "oom %lu\n", atomic_long_read(&events[MEMCG_OOM]));
6357	seq_printf(m, "oom_kill %lu\n",
6358		   atomic_long_read(&events[MEMCG_OOM_KILL]));
 
 
6359}
6360
6361static int memory_events_show(struct seq_file *m, void *v)
6362{
6363	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6364
6365	__memory_events_show(m, memcg->memory_events);
6366	return 0;
6367}
6368
6369static int memory_events_local_show(struct seq_file *m, void *v)
6370{
6371	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6372
6373	__memory_events_show(m, memcg->memory_events_local);
6374	return 0;
6375}
6376
6377static int memory_stat_show(struct seq_file *m, void *v)
6378{
6379	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6380	char *buf;
 
6381
6382	buf = memory_stat_format(memcg);
6383	if (!buf)
6384		return -ENOMEM;
 
 
6385	seq_puts(m, buf);
6386	kfree(buf);
6387	return 0;
6388}
6389
6390#ifdef CONFIG_NUMA
6391static inline unsigned long lruvec_page_state_output(struct lruvec *lruvec,
6392						     int item)
6393{
6394	return lruvec_page_state(lruvec, item) * memcg_page_state_unit(item);
 
6395}
6396
6397static int memory_numa_stat_show(struct seq_file *m, void *v)
6398{
6399	int i;
6400	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6401
 
 
6402	for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
6403		int nid;
6404
6405		if (memory_stats[i].idx >= NR_VM_NODE_STAT_ITEMS)
6406			continue;
6407
6408		seq_printf(m, "%s", memory_stats[i].name);
6409		for_each_node_state(nid, N_MEMORY) {
6410			u64 size;
6411			struct lruvec *lruvec;
6412
6413			lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
6414			size = lruvec_page_state_output(lruvec,
6415							memory_stats[i].idx);
6416			seq_printf(m, " N%d=%llu", nid, size);
6417		}
6418		seq_putc(m, '\n');
6419	}
6420
6421	return 0;
6422}
6423#endif
6424
6425static int memory_oom_group_show(struct seq_file *m, void *v)
6426{
6427	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6428
6429	seq_printf(m, "%d\n", memcg->oom_group);
6430
6431	return 0;
6432}
6433
6434static ssize_t memory_oom_group_write(struct kernfs_open_file *of,
6435				      char *buf, size_t nbytes, loff_t off)
6436{
6437	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6438	int ret, oom_group;
6439
6440	buf = strstrip(buf);
6441	if (!buf)
6442		return -EINVAL;
6443
6444	ret = kstrtoint(buf, 0, &oom_group);
6445	if (ret)
6446		return ret;
6447
6448	if (oom_group != 0 && oom_group != 1)
6449		return -EINVAL;
6450
6451	memcg->oom_group = oom_group;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6452
6453	return nbytes;
6454}
6455
6456static struct cftype memory_files[] = {
6457	{
6458		.name = "current",
6459		.flags = CFTYPE_NOT_ON_ROOT,
6460		.read_u64 = memory_current_read,
6461	},
6462	{
 
 
 
 
 
6463		.name = "min",
6464		.flags = CFTYPE_NOT_ON_ROOT,
6465		.seq_show = memory_min_show,
6466		.write = memory_min_write,
6467	},
6468	{
6469		.name = "low",
6470		.flags = CFTYPE_NOT_ON_ROOT,
6471		.seq_show = memory_low_show,
6472		.write = memory_low_write,
6473	},
6474	{
6475		.name = "high",
6476		.flags = CFTYPE_NOT_ON_ROOT,
6477		.seq_show = memory_high_show,
6478		.write = memory_high_write,
6479	},
6480	{
6481		.name = "max",
6482		.flags = CFTYPE_NOT_ON_ROOT,
6483		.seq_show = memory_max_show,
6484		.write = memory_max_write,
6485	},
6486	{
6487		.name = "events",
6488		.flags = CFTYPE_NOT_ON_ROOT,
6489		.file_offset = offsetof(struct mem_cgroup, events_file),
6490		.seq_show = memory_events_show,
6491	},
6492	{
6493		.name = "events.local",
6494		.flags = CFTYPE_NOT_ON_ROOT,
6495		.file_offset = offsetof(struct mem_cgroup, events_local_file),
6496		.seq_show = memory_events_local_show,
6497	},
6498	{
6499		.name = "stat",
6500		.seq_show = memory_stat_show,
6501	},
6502#ifdef CONFIG_NUMA
6503	{
6504		.name = "numa_stat",
6505		.seq_show = memory_numa_stat_show,
6506	},
6507#endif
6508	{
6509		.name = "oom.group",
6510		.flags = CFTYPE_NOT_ON_ROOT | CFTYPE_NS_DELEGATABLE,
6511		.seq_show = memory_oom_group_show,
6512		.write = memory_oom_group_write,
6513	},
 
 
 
 
 
6514	{ }	/* terminate */
6515};
6516
6517struct cgroup_subsys memory_cgrp_subsys = {
6518	.css_alloc = mem_cgroup_css_alloc,
6519	.css_online = mem_cgroup_css_online,
6520	.css_offline = mem_cgroup_css_offline,
6521	.css_released = mem_cgroup_css_released,
6522	.css_free = mem_cgroup_css_free,
6523	.css_reset = mem_cgroup_css_reset,
6524	.css_rstat_flush = mem_cgroup_css_rstat_flush,
6525	.can_attach = mem_cgroup_can_attach,
 
 
 
6526	.cancel_attach = mem_cgroup_cancel_attach,
6527	.post_attach = mem_cgroup_move_task,
 
 
 
 
6528	.dfl_cftypes = memory_files,
6529	.legacy_cftypes = mem_cgroup_legacy_files,
6530	.early_init = 0,
6531};
6532
6533/*
6534 * This function calculates an individual cgroup's effective
6535 * protection which is derived from its own memory.min/low, its
6536 * parent's and siblings' settings, as well as the actual memory
6537 * distribution in the tree.
6538 *
6539 * The following rules apply to the effective protection values:
6540 *
6541 * 1. At the first level of reclaim, effective protection is equal to
6542 *    the declared protection in memory.min and memory.low.
6543 *
6544 * 2. To enable safe delegation of the protection configuration, at
6545 *    subsequent levels the effective protection is capped to the
6546 *    parent's effective protection.
6547 *
6548 * 3. To make complex and dynamic subtrees easier to configure, the
6549 *    user is allowed to overcommit the declared protection at a given
6550 *    level. If that is the case, the parent's effective protection is
6551 *    distributed to the children in proportion to how much protection
6552 *    they have declared and how much of it they are utilizing.
6553 *
6554 *    This makes distribution proportional, but also work-conserving:
6555 *    if one cgroup claims much more protection than it uses memory,
6556 *    the unused remainder is available to its siblings.
6557 *
6558 * 4. Conversely, when the declared protection is undercommitted at a
6559 *    given level, the distribution of the larger parental protection
6560 *    budget is NOT proportional. A cgroup's protection from a sibling
6561 *    is capped to its own memory.min/low setting.
6562 *
6563 * 5. However, to allow protecting recursive subtrees from each other
6564 *    without having to declare each individual cgroup's fixed share
6565 *    of the ancestor's claim to protection, any unutilized -
6566 *    "floating" - protection from up the tree is distributed in
6567 *    proportion to each cgroup's *usage*. This makes the protection
6568 *    neutral wrt sibling cgroups and lets them compete freely over
6569 *    the shared parental protection budget, but it protects the
6570 *    subtree as a whole from neighboring subtrees.
6571 *
6572 * Note that 4. and 5. are not in conflict: 4. is about protecting
6573 * against immediate siblings whereas 5. is about protecting against
6574 * neighboring subtrees.
6575 */
6576static unsigned long effective_protection(unsigned long usage,
6577					  unsigned long parent_usage,
6578					  unsigned long setting,
6579					  unsigned long parent_effective,
6580					  unsigned long siblings_protected)
6581{
6582	unsigned long protected;
6583	unsigned long ep;
6584
6585	protected = min(usage, setting);
6586	/*
6587	 * If all cgroups at this level combined claim and use more
6588	 * protection then what the parent affords them, distribute
6589	 * shares in proportion to utilization.
6590	 *
6591	 * We are using actual utilization rather than the statically
6592	 * claimed protection in order to be work-conserving: claimed
6593	 * but unused protection is available to siblings that would
6594	 * otherwise get a smaller chunk than what they claimed.
6595	 */
6596	if (siblings_protected > parent_effective)
6597		return protected * parent_effective / siblings_protected;
6598
6599	/*
6600	 * Ok, utilized protection of all children is within what the
6601	 * parent affords them, so we know whatever this child claims
6602	 * and utilizes is effectively protected.
6603	 *
6604	 * If there is unprotected usage beyond this value, reclaim
6605	 * will apply pressure in proportion to that amount.
6606	 *
6607	 * If there is unutilized protection, the cgroup will be fully
6608	 * shielded from reclaim, but we do return a smaller value for
6609	 * protection than what the group could enjoy in theory. This
6610	 * is okay. With the overcommit distribution above, effective
6611	 * protection is always dependent on how memory is actually
6612	 * consumed among the siblings anyway.
6613	 */
6614	ep = protected;
6615
6616	/*
6617	 * If the children aren't claiming (all of) the protection
6618	 * afforded to them by the parent, distribute the remainder in
6619	 * proportion to the (unprotected) memory of each cgroup. That
6620	 * way, cgroups that aren't explicitly prioritized wrt each
6621	 * other compete freely over the allowance, but they are
6622	 * collectively protected from neighboring trees.
6623	 *
6624	 * We're using unprotected memory for the weight so that if
6625	 * some cgroups DO claim explicit protection, we don't protect
6626	 * the same bytes twice.
6627	 *
6628	 * Check both usage and parent_usage against the respective
6629	 * protected values. One should imply the other, but they
6630	 * aren't read atomically - make sure the division is sane.
6631	 */
6632	if (!(cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT))
6633		return ep;
6634	if (parent_effective > siblings_protected &&
6635	    parent_usage > siblings_protected &&
6636	    usage > protected) {
6637		unsigned long unclaimed;
6638
6639		unclaimed = parent_effective - siblings_protected;
6640		unclaimed *= usage - protected;
6641		unclaimed /= parent_usage - siblings_protected;
6642
6643		ep += unclaimed;
6644	}
6645
6646	return ep;
6647}
6648
6649/**
6650 * mem_cgroup_calculate_protection - check if memory consumption is in the normal range
6651 * @root: the top ancestor of the sub-tree being checked
6652 * @memcg: the memory cgroup to check
6653 *
6654 * WARNING: This function is not stateless! It can only be used as part
6655 *          of a top-down tree iteration, not for isolated queries.
6656 */
6657void mem_cgroup_calculate_protection(struct mem_cgroup *root,
6658				     struct mem_cgroup *memcg)
6659{
6660	unsigned long usage, parent_usage;
6661	struct mem_cgroup *parent;
6662
6663	if (mem_cgroup_disabled())
6664		return;
6665
6666	if (!root)
6667		root = root_mem_cgroup;
6668
6669	/*
6670	 * Effective values of the reclaim targets are ignored so they
6671	 * can be stale. Have a look at mem_cgroup_protection for more
6672	 * details.
6673	 * TODO: calculation should be more robust so that we do not need
6674	 * that special casing.
6675	 */
6676	if (memcg == root)
6677		return;
6678
6679	usage = page_counter_read(&memcg->memory);
6680	if (!usage)
6681		return;
6682
6683	parent = parent_mem_cgroup(memcg);
6684	/* No parent means a non-hierarchical mode on v1 memcg */
6685	if (!parent)
6686		return;
6687
6688	if (parent == root) {
6689		memcg->memory.emin = READ_ONCE(memcg->memory.min);
6690		memcg->memory.elow = READ_ONCE(memcg->memory.low);
6691		return;
6692	}
6693
6694	parent_usage = page_counter_read(&parent->memory);
6695
6696	WRITE_ONCE(memcg->memory.emin, effective_protection(usage, parent_usage,
6697			READ_ONCE(memcg->memory.min),
6698			READ_ONCE(parent->memory.emin),
6699			atomic_long_read(&parent->memory.children_min_usage)));
6700
6701	WRITE_ONCE(memcg->memory.elow, effective_protection(usage, parent_usage,
6702			READ_ONCE(memcg->memory.low),
6703			READ_ONCE(parent->memory.elow),
6704			atomic_long_read(&parent->memory.children_low_usage)));
6705}
6706
6707static int __mem_cgroup_charge(struct page *page, struct mem_cgroup *memcg,
6708			       gfp_t gfp)
6709{
6710	unsigned int nr_pages = thp_nr_pages(page);
6711	int ret;
6712
6713	ret = try_charge(memcg, gfp, nr_pages);
6714	if (ret)
6715		goto out;
6716
6717	css_get(&memcg->css);
6718	commit_charge(page, memcg);
6719
6720	local_irq_disable();
6721	mem_cgroup_charge_statistics(memcg, page, nr_pages);
6722	memcg_check_events(memcg, page);
6723	local_irq_enable();
6724out:
6725	return ret;
6726}
6727
6728/**
6729 * mem_cgroup_charge - charge a newly allocated page to a cgroup
6730 * @page: page to charge
6731 * @mm: mm context of the victim
6732 * @gfp_mask: reclaim mode
6733 *
6734 * Try to charge @page to the memcg that @mm belongs to, reclaiming
6735 * pages according to @gfp_mask if necessary. if @mm is NULL, try to
6736 * charge to the active memcg.
6737 *
6738 * Do not use this for pages allocated for swapin.
6739 *
6740 * Returns 0 on success. Otherwise, an error code is returned.
6741 */
6742int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask)
6743{
6744	struct mem_cgroup *memcg;
6745	int ret;
6746
6747	if (mem_cgroup_disabled())
6748		return 0;
6749
6750	memcg = get_mem_cgroup_from_mm(mm);
6751	ret = __mem_cgroup_charge(page, memcg, gfp_mask);
6752	css_put(&memcg->css);
6753
6754	return ret;
6755}
6756
6757/**
6758 * mem_cgroup_swapin_charge_page - charge a newly allocated page for swapin
6759 * @page: page to charge
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6760 * @mm: mm context of the victim
6761 * @gfp: reclaim mode
6762 * @entry: swap entry for which the page is allocated
6763 *
6764 * This function charges a page allocated for swapin. Please call this before
6765 * adding the page to the swapcache.
6766 *
6767 * Returns 0 on success. Otherwise, an error code is returned.
6768 */
6769int mem_cgroup_swapin_charge_page(struct page *page, struct mm_struct *mm,
6770				  gfp_t gfp, swp_entry_t entry)
6771{
6772	struct mem_cgroup *memcg;
6773	unsigned short id;
6774	int ret;
6775
6776	if (mem_cgroup_disabled())
6777		return 0;
6778
6779	id = lookup_swap_cgroup_id(entry);
6780	rcu_read_lock();
6781	memcg = mem_cgroup_from_id(id);
6782	if (!memcg || !css_tryget_online(&memcg->css))
6783		memcg = get_mem_cgroup_from_mm(mm);
6784	rcu_read_unlock();
6785
6786	ret = __mem_cgroup_charge(page, memcg, gfp);
6787
6788	css_put(&memcg->css);
6789	return ret;
6790}
6791
6792/*
6793 * mem_cgroup_swapin_uncharge_swap - uncharge swap slot
6794 * @entry: swap entry for which the page is charged
6795 *
6796 * Call this function after successfully adding the charged page to swapcache.
6797 *
6798 * Note: This function assumes the page for which swap slot is being uncharged
6799 * is order 0 page.
6800 */
6801void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry)
6802{
6803	/*
6804	 * Cgroup1's unified memory+swap counter has been charged with the
6805	 * new swapcache page, finish the transfer by uncharging the swap
6806	 * slot. The swap slot would also get uncharged when it dies, but
6807	 * it can stick around indefinitely and we'd count the page twice
6808	 * the entire time.
6809	 *
6810	 * Cgroup2 has separate resource counters for memory and swap,
6811	 * so this is a non-issue here. Memory and swap charge lifetimes
6812	 * correspond 1:1 to page and swap slot lifetimes: we charge the
6813	 * page to memory here, and uncharge swap when the slot is freed.
6814	 */
6815	if (!mem_cgroup_disabled() && do_memsw_account()) {
6816		/*
6817		 * The swap entry might not get freed for a long time,
6818		 * let's not wait for it.  The page already received a
6819		 * memory+swap charge, drop the swap entry duplicate.
6820		 */
6821		mem_cgroup_uncharge_swap(entry, 1);
6822	}
6823}
6824
6825struct uncharge_gather {
6826	struct mem_cgroup *memcg;
6827	unsigned long nr_memory;
6828	unsigned long pgpgout;
6829	unsigned long nr_kmem;
6830	struct page *dummy_page;
6831};
6832
6833static inline void uncharge_gather_clear(struct uncharge_gather *ug)
6834{
6835	memset(ug, 0, sizeof(*ug));
6836}
6837
6838static void uncharge_batch(const struct uncharge_gather *ug)
6839{
6840	unsigned long flags;
6841
6842	if (ug->nr_memory) {
6843		page_counter_uncharge(&ug->memcg->memory, ug->nr_memory);
6844		if (do_memsw_account())
6845			page_counter_uncharge(&ug->memcg->memsw, ug->nr_memory);
6846		if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && ug->nr_kmem)
6847			page_counter_uncharge(&ug->memcg->kmem, ug->nr_kmem);
6848		memcg_oom_recover(ug->memcg);
6849	}
6850
6851	local_irq_save(flags);
6852	__count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout);
6853	__this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_memory);
6854	memcg_check_events(ug->memcg, ug->dummy_page);
6855	local_irq_restore(flags);
6856
6857	/* drop reference from uncharge_page */
6858	css_put(&ug->memcg->css);
6859}
6860
6861static void uncharge_page(struct page *page, struct uncharge_gather *ug)
6862{
6863	unsigned long nr_pages;
6864	struct mem_cgroup *memcg;
6865	struct obj_cgroup *objcg;
6866	bool use_objcg = PageMemcgKmem(page);
6867
6868	VM_BUG_ON_PAGE(PageLRU(page), page);
6869
6870	/*
6871	 * Nobody should be changing or seriously looking at
6872	 * page memcg or objcg at this point, we have fully
6873	 * exclusive access to the page.
6874	 */
6875	if (use_objcg) {
6876		objcg = __page_objcg(page);
6877		/*
6878		 * This get matches the put at the end of the function and
6879		 * kmem pages do not hold memcg references anymore.
6880		 */
6881		memcg = get_mem_cgroup_from_objcg(objcg);
6882	} else {
6883		memcg = __page_memcg(page);
6884	}
6885
6886	if (!memcg)
6887		return;
6888
6889	if (ug->memcg != memcg) {
6890		if (ug->memcg) {
6891			uncharge_batch(ug);
6892			uncharge_gather_clear(ug);
6893		}
6894		ug->memcg = memcg;
6895		ug->dummy_page = page;
6896
6897		/* pairs with css_put in uncharge_batch */
6898		css_get(&memcg->css);
6899	}
6900
6901	nr_pages = compound_nr(page);
6902
6903	if (use_objcg) {
6904		ug->nr_memory += nr_pages;
6905		ug->nr_kmem += nr_pages;
6906
6907		page->memcg_data = 0;
6908		obj_cgroup_put(objcg);
6909	} else {
6910		/* LRU pages aren't accounted at the root level */
6911		if (!mem_cgroup_is_root(memcg))
6912			ug->nr_memory += nr_pages;
6913		ug->pgpgout++;
6914
6915		page->memcg_data = 0;
6916	}
6917
6918	css_put(&memcg->css);
6919}
6920
6921/**
6922 * mem_cgroup_uncharge - uncharge a page
6923 * @page: page to uncharge
6924 *
6925 * Uncharge a page previously charged with mem_cgroup_charge().
6926 */
6927void mem_cgroup_uncharge(struct page *page)
6928{
6929	struct uncharge_gather ug;
6930
6931	if (mem_cgroup_disabled())
6932		return;
6933
6934	/* Don't touch page->lru of any random page, pre-check: */
6935	if (!page_memcg(page))
6936		return;
6937
6938	uncharge_gather_clear(&ug);
6939	uncharge_page(page, &ug);
6940	uncharge_batch(&ug);
6941}
6942
6943/**
6944 * mem_cgroup_uncharge_list - uncharge a list of page
6945 * @page_list: list of pages to uncharge
6946 *
6947 * Uncharge a list of pages previously charged with
6948 * mem_cgroup_charge().
6949 */
6950void mem_cgroup_uncharge_list(struct list_head *page_list)
6951{
6952	struct uncharge_gather ug;
6953	struct page *page;
6954
6955	if (mem_cgroup_disabled())
6956		return;
6957
6958	uncharge_gather_clear(&ug);
6959	list_for_each_entry(page, page_list, lru)
6960		uncharge_page(page, &ug);
6961	if (ug.memcg)
6962		uncharge_batch(&ug);
6963}
6964
6965/**
6966 * mem_cgroup_migrate - charge a page's replacement
6967 * @oldpage: currently circulating page
6968 * @newpage: replacement page
6969 *
6970 * Charge @newpage as a replacement page for @oldpage. @oldpage will
6971 * be uncharged upon free.
 
6972 *
6973 * Both pages must be locked, @newpage->mapping must be set up.
6974 */
6975void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
6976{
6977	struct mem_cgroup *memcg;
6978	unsigned int nr_pages;
6979	unsigned long flags;
6980
6981	VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
6982	VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
6983	VM_BUG_ON_PAGE(PageAnon(oldpage) != PageAnon(newpage), newpage);
6984	VM_BUG_ON_PAGE(PageTransHuge(oldpage) != PageTransHuge(newpage),
6985		       newpage);
6986
6987	if (mem_cgroup_disabled())
6988		return;
6989
6990	/* Page cache replacement: new page already charged? */
6991	if (page_memcg(newpage))
6992		return;
6993
6994	memcg = page_memcg(oldpage);
6995	VM_WARN_ON_ONCE_PAGE(!memcg, oldpage);
6996	if (!memcg)
6997		return;
6998
6999	/* Force-charge the new page. The old one will be freed soon */
7000	nr_pages = thp_nr_pages(newpage);
7001
7002	if (!mem_cgroup_is_root(memcg)) {
7003		page_counter_charge(&memcg->memory, nr_pages);
7004		if (do_memsw_account())
7005			page_counter_charge(&memcg->memsw, nr_pages);
7006	}
7007
7008	css_get(&memcg->css);
7009	commit_charge(newpage, memcg);
7010
7011	local_irq_save(flags);
7012	mem_cgroup_charge_statistics(memcg, newpage, nr_pages);
7013	memcg_check_events(memcg, newpage);
7014	local_irq_restore(flags);
7015}
7016
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7017DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
7018EXPORT_SYMBOL(memcg_sockets_enabled_key);
7019
7020void mem_cgroup_sk_alloc(struct sock *sk)
7021{
7022	struct mem_cgroup *memcg;
7023
7024	if (!mem_cgroup_sockets_enabled)
7025		return;
7026
7027	/* Do not associate the sock with unrelated interrupted task's memcg. */
7028	if (in_interrupt())
7029		return;
7030
7031	rcu_read_lock();
7032	memcg = mem_cgroup_from_task(current);
7033	if (memcg == root_mem_cgroup)
7034		goto out;
7035	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active)
7036		goto out;
7037	if (css_tryget(&memcg->css))
7038		sk->sk_memcg = memcg;
7039out:
7040	rcu_read_unlock();
7041}
7042
7043void mem_cgroup_sk_free(struct sock *sk)
7044{
7045	if (sk->sk_memcg)
7046		css_put(&sk->sk_memcg->css);
7047}
7048
7049/**
7050 * mem_cgroup_charge_skmem - charge socket memory
7051 * @memcg: memcg to charge
7052 * @nr_pages: number of pages to charge
 
7053 *
7054 * Charges @nr_pages to @memcg. Returns %true if the charge fit within
7055 * @memcg's configured limit, %false if the charge had to be forced.
7056 */
7057bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
 
7058{
7059	gfp_t gfp_mask = GFP_KERNEL;
7060
7061	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
7062		struct page_counter *fail;
7063
7064		if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) {
7065			memcg->tcpmem_pressure = 0;
7066			return true;
7067		}
7068		page_counter_charge(&memcg->tcpmem, nr_pages);
7069		memcg->tcpmem_pressure = 1;
 
 
 
 
7070		return false;
7071	}
7072
7073	/* Don't block in the packet receive path */
7074	if (in_softirq())
7075		gfp_mask = GFP_NOWAIT;
7076
7077	mod_memcg_state(memcg, MEMCG_SOCK, nr_pages);
7078
7079	if (try_charge(memcg, gfp_mask, nr_pages) == 0)
7080		return true;
 
7081
7082	try_charge(memcg, gfp_mask|__GFP_NOFAIL, nr_pages);
7083	return false;
7084}
7085
7086/**
7087 * mem_cgroup_uncharge_skmem - uncharge socket memory
7088 * @memcg: memcg to uncharge
7089 * @nr_pages: number of pages to uncharge
7090 */
7091void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
7092{
7093	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
7094		page_counter_uncharge(&memcg->tcpmem, nr_pages);
7095		return;
7096	}
7097
7098	mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages);
7099
7100	refill_stock(memcg, nr_pages);
7101}
7102
7103static int __init cgroup_memory(char *s)
7104{
7105	char *token;
7106
7107	while ((token = strsep(&s, ",")) != NULL) {
7108		if (!*token)
7109			continue;
7110		if (!strcmp(token, "nosocket"))
7111			cgroup_memory_nosocket = true;
7112		if (!strcmp(token, "nokmem"))
7113			cgroup_memory_nokmem = true;
 
 
7114	}
7115	return 0;
7116}
7117__setup("cgroup.memory=", cgroup_memory);
7118
7119/*
7120 * subsys_initcall() for memory controller.
7121 *
7122 * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this
7123 * context because of lock dependencies (cgroup_lock -> cpu hotplug) but
7124 * basically everything that doesn't depend on a specific mem_cgroup structure
7125 * should be initialized from here.
7126 */
7127static int __init mem_cgroup_init(void)
7128{
7129	int cpu, node;
7130
7131	/*
7132	 * Currently s32 type (can refer to struct batched_lruvec_stat) is
7133	 * used for per-memcg-per-cpu caching of per-node statistics. In order
7134	 * to work fine, we should make sure that the overfill threshold can't
7135	 * exceed S32_MAX / PAGE_SIZE.
7136	 */
7137	BUILD_BUG_ON(MEMCG_CHARGE_BATCH > S32_MAX / PAGE_SIZE);
7138
7139	cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
7140				  memcg_hotplug_cpu_dead);
7141
7142	for_each_possible_cpu(cpu)
7143		INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
7144			  drain_local_stock);
7145
7146	for_each_node(node) {
7147		struct mem_cgroup_tree_per_node *rtpn;
7148
7149		rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL,
7150				    node_online(node) ? node : NUMA_NO_NODE);
7151
7152		rtpn->rb_root = RB_ROOT;
7153		rtpn->rb_rightmost = NULL;
7154		spin_lock_init(&rtpn->lock);
7155		soft_limit_tree.rb_tree_per_node[node] = rtpn;
7156	}
7157
7158	return 0;
7159}
7160subsys_initcall(mem_cgroup_init);
7161
7162#ifdef CONFIG_MEMCG_SWAP
7163static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg)
7164{
7165	while (!refcount_inc_not_zero(&memcg->id.ref)) {
7166		/*
7167		 * The root cgroup cannot be destroyed, so it's refcount must
7168		 * always be >= 1.
7169		 */
7170		if (WARN_ON_ONCE(memcg == root_mem_cgroup)) {
7171			VM_BUG_ON(1);
7172			break;
7173		}
7174		memcg = parent_mem_cgroup(memcg);
7175		if (!memcg)
7176			memcg = root_mem_cgroup;
7177	}
7178	return memcg;
7179}
7180
7181/**
7182 * mem_cgroup_swapout - transfer a memsw charge to swap
7183 * @page: page whose memsw charge to transfer
7184 * @entry: swap entry to move the charge to
7185 *
7186 * Transfer the memsw charge of @page to @entry.
7187 */
7188void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
7189{
7190	struct mem_cgroup *memcg, *swap_memcg;
7191	unsigned int nr_entries;
7192	unsigned short oldid;
7193
7194	VM_BUG_ON_PAGE(PageLRU(page), page);
7195	VM_BUG_ON_PAGE(page_count(page), page);
7196
7197	if (mem_cgroup_disabled())
7198		return;
7199
7200	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
7201		return;
7202
7203	memcg = page_memcg(page);
7204
7205	VM_WARN_ON_ONCE_PAGE(!memcg, page);
7206	if (!memcg)
7207		return;
7208
7209	/*
7210	 * In case the memcg owning these pages has been offlined and doesn't
7211	 * have an ID allocated to it anymore, charge the closest online
7212	 * ancestor for the swap instead and transfer the memory+swap charge.
7213	 */
7214	swap_memcg = mem_cgroup_id_get_online(memcg);
7215	nr_entries = thp_nr_pages(page);
7216	/* Get references for the tail pages, too */
7217	if (nr_entries > 1)
7218		mem_cgroup_id_get_many(swap_memcg, nr_entries - 1);
7219	oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg),
7220				   nr_entries);
7221	VM_BUG_ON_PAGE(oldid, page);
7222	mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries);
7223
7224	page->memcg_data = 0;
7225
7226	if (!mem_cgroup_is_root(memcg))
7227		page_counter_uncharge(&memcg->memory, nr_entries);
7228
7229	if (!cgroup_memory_noswap && memcg != swap_memcg) {
7230		if (!mem_cgroup_is_root(swap_memcg))
7231			page_counter_charge(&swap_memcg->memsw, nr_entries);
7232		page_counter_uncharge(&memcg->memsw, nr_entries);
7233	}
7234
7235	/*
7236	 * Interrupts should be disabled here because the caller holds the
7237	 * i_pages lock which is taken with interrupts-off. It is
7238	 * important here to have the interrupts disabled because it is the
7239	 * only synchronisation we have for updating the per-CPU variables.
7240	 */
7241	VM_BUG_ON(!irqs_disabled());
7242	mem_cgroup_charge_statistics(memcg, page, -nr_entries);
7243	memcg_check_events(memcg, page);
 
7244
7245	css_put(&memcg->css);
7246}
7247
7248/**
7249 * mem_cgroup_try_charge_swap - try charging swap space for a page
7250 * @page: page being added to swap
7251 * @entry: swap entry to charge
7252 *
7253 * Try to charge @page's memcg for the swap space at @entry.
7254 *
7255 * Returns 0 on success, -ENOMEM on failure.
7256 */
7257int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry)
7258{
7259	unsigned int nr_pages = thp_nr_pages(page);
7260	struct page_counter *counter;
7261	struct mem_cgroup *memcg;
7262	unsigned short oldid;
7263
7264	if (mem_cgroup_disabled())
7265		return 0;
7266
7267	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
7268		return 0;
7269
7270	memcg = page_memcg(page);
7271
7272	VM_WARN_ON_ONCE_PAGE(!memcg, page);
7273	if (!memcg)
7274		return 0;
7275
7276	if (!entry.val) {
7277		memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
7278		return 0;
7279	}
7280
7281	memcg = mem_cgroup_id_get_online(memcg);
7282
7283	if (!cgroup_memory_noswap && !mem_cgroup_is_root(memcg) &&
7284	    !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) {
7285		memcg_memory_event(memcg, MEMCG_SWAP_MAX);
7286		memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
7287		mem_cgroup_id_put(memcg);
7288		return -ENOMEM;
7289	}
7290
7291	/* Get references for the tail pages, too */
7292	if (nr_pages > 1)
7293		mem_cgroup_id_get_many(memcg, nr_pages - 1);
7294	oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg), nr_pages);
7295	VM_BUG_ON_PAGE(oldid, page);
7296	mod_memcg_state(memcg, MEMCG_SWAP, nr_pages);
7297
7298	return 0;
7299}
7300
7301/**
7302 * mem_cgroup_uncharge_swap - uncharge swap space
7303 * @entry: swap entry to uncharge
7304 * @nr_pages: the amount of swap space to uncharge
7305 */
7306void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
7307{
7308	struct mem_cgroup *memcg;
7309	unsigned short id;
7310
7311	id = swap_cgroup_record(entry, 0, nr_pages);
7312	rcu_read_lock();
7313	memcg = mem_cgroup_from_id(id);
7314	if (memcg) {
7315		if (!cgroup_memory_noswap && !mem_cgroup_is_root(memcg)) {
7316			if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
7317				page_counter_uncharge(&memcg->swap, nr_pages);
7318			else
7319				page_counter_uncharge(&memcg->memsw, nr_pages);
 
 
7320		}
7321		mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages);
7322		mem_cgroup_id_put_many(memcg, nr_pages);
7323	}
7324	rcu_read_unlock();
7325}
7326
7327long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
7328{
7329	long nr_swap_pages = get_nr_swap_pages();
7330
7331	if (cgroup_memory_noswap || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
7332		return nr_swap_pages;
7333	for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg))
7334		nr_swap_pages = min_t(long, nr_swap_pages,
7335				      READ_ONCE(memcg->swap.max) -
7336				      page_counter_read(&memcg->swap));
7337	return nr_swap_pages;
7338}
7339
7340bool mem_cgroup_swap_full(struct page *page)
7341{
7342	struct mem_cgroup *memcg;
7343
7344	VM_BUG_ON_PAGE(!PageLocked(page), page);
7345
7346	if (vm_swap_full())
7347		return true;
7348	if (cgroup_memory_noswap || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
7349		return false;
7350
7351	memcg = page_memcg(page);
7352	if (!memcg)
7353		return false;
7354
7355	for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) {
7356		unsigned long usage = page_counter_read(&memcg->swap);
7357
7358		if (usage * 2 >= READ_ONCE(memcg->swap.high) ||
7359		    usage * 2 >= READ_ONCE(memcg->swap.max))
7360			return true;
7361	}
7362
7363	return false;
7364}
7365
7366static int __init setup_swap_account(char *s)
7367{
7368	if (!strcmp(s, "1"))
7369		cgroup_memory_noswap = false;
7370	else if (!strcmp(s, "0"))
7371		cgroup_memory_noswap = true;
 
 
 
7372	return 1;
7373}
7374__setup("swapaccount=", setup_swap_account);
7375
7376static u64 swap_current_read(struct cgroup_subsys_state *css,
7377			     struct cftype *cft)
7378{
7379	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
7380
7381	return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE;
7382}
7383
 
 
 
 
 
 
 
 
7384static int swap_high_show(struct seq_file *m, void *v)
7385{
7386	return seq_puts_memcg_tunable(m,
7387		READ_ONCE(mem_cgroup_from_seq(m)->swap.high));
7388}
7389
7390static ssize_t swap_high_write(struct kernfs_open_file *of,
7391			       char *buf, size_t nbytes, loff_t off)
7392{
7393	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
7394	unsigned long high;
7395	int err;
7396
7397	buf = strstrip(buf);
7398	err = page_counter_memparse(buf, "max", &high);
7399	if (err)
7400		return err;
7401
7402	page_counter_set_high(&memcg->swap, high);
7403
7404	return nbytes;
7405}
7406
7407static int swap_max_show(struct seq_file *m, void *v)
7408{
7409	return seq_puts_memcg_tunable(m,
7410		READ_ONCE(mem_cgroup_from_seq(m)->swap.max));
7411}
7412
7413static ssize_t swap_max_write(struct kernfs_open_file *of,
7414			      char *buf, size_t nbytes, loff_t off)
7415{
7416	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
7417	unsigned long max;
7418	int err;
7419
7420	buf = strstrip(buf);
7421	err = page_counter_memparse(buf, "max", &max);
7422	if (err)
7423		return err;
7424
7425	xchg(&memcg->swap.max, max);
7426
7427	return nbytes;
7428}
7429
7430static int swap_events_show(struct seq_file *m, void *v)
7431{
7432	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
7433
7434	seq_printf(m, "high %lu\n",
7435		   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_HIGH]));
7436	seq_printf(m, "max %lu\n",
7437		   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX]));
7438	seq_printf(m, "fail %lu\n",
7439		   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL]));
7440
7441	return 0;
7442}
7443
7444static struct cftype swap_files[] = {
7445	{
7446		.name = "swap.current",
7447		.flags = CFTYPE_NOT_ON_ROOT,
7448		.read_u64 = swap_current_read,
7449	},
7450	{
7451		.name = "swap.high",
7452		.flags = CFTYPE_NOT_ON_ROOT,
7453		.seq_show = swap_high_show,
7454		.write = swap_high_write,
7455	},
7456	{
7457		.name = "swap.max",
7458		.flags = CFTYPE_NOT_ON_ROOT,
7459		.seq_show = swap_max_show,
7460		.write = swap_max_write,
7461	},
7462	{
 
 
 
 
 
7463		.name = "swap.events",
7464		.flags = CFTYPE_NOT_ON_ROOT,
7465		.file_offset = offsetof(struct mem_cgroup, swap_events_file),
7466		.seq_show = swap_events_show,
7467	},
7468	{ }	/* terminate */
7469};
7470
7471static struct cftype memsw_files[] = {
7472	{
7473		.name = "memsw.usage_in_bytes",
7474		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
7475		.read_u64 = mem_cgroup_read_u64,
7476	},
7477	{
7478		.name = "memsw.max_usage_in_bytes",
7479		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
7480		.write = mem_cgroup_reset,
7481		.read_u64 = mem_cgroup_read_u64,
7482	},
7483	{
7484		.name = "memsw.limit_in_bytes",
7485		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
7486		.write = mem_cgroup_write,
7487		.read_u64 = mem_cgroup_read_u64,
7488	},
7489	{
7490		.name = "memsw.failcnt",
7491		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
7492		.write = mem_cgroup_reset,
7493		.read_u64 = mem_cgroup_read_u64,
7494	},
7495	{ },	/* terminate */
7496};
7497
7498/*
7499 * If mem_cgroup_swap_init() is implemented as a subsys_initcall()
7500 * instead of a core_initcall(), this could mean cgroup_memory_noswap still
7501 * remains set to false even when memcg is disabled via "cgroup_disable=memory"
7502 * boot parameter. This may result in premature OOPS inside
7503 * mem_cgroup_get_nr_swap_pages() function in corner cases.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7504 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7505static int __init mem_cgroup_swap_init(void)
7506{
7507	/* No memory control -> no swap control */
7508	if (mem_cgroup_disabled())
7509		cgroup_memory_noswap = true;
7510
7511	if (cgroup_memory_noswap)
7512		return 0;
7513
7514	WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, swap_files));
7515	WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, memsw_files));
7516
 
 
7517	return 0;
7518}
7519core_initcall(mem_cgroup_swap_init);
7520
7521#endif /* CONFIG_MEMCG_SWAP */
v6.8
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/* memcontrol.c - Memory Controller
   3 *
   4 * Copyright IBM Corporation, 2007
   5 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
   6 *
   7 * Copyright 2007 OpenVZ SWsoft Inc
   8 * Author: Pavel Emelianov <xemul@openvz.org>
   9 *
  10 * Memory thresholds
  11 * Copyright (C) 2009 Nokia Corporation
  12 * Author: Kirill A. Shutemov
  13 *
  14 * Kernel Memory Controller
  15 * Copyright (C) 2012 Parallels Inc. and Google Inc.
  16 * Authors: Glauber Costa and Suleiman Souhlal
  17 *
  18 * Native page reclaim
  19 * Charge lifetime sanitation
  20 * Lockless page tracking & accounting
  21 * Unified hierarchy configuration model
  22 * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner
  23 *
  24 * Per memcg lru locking
  25 * Copyright (C) 2020 Alibaba, Inc, Alex Shi
  26 */
  27
  28#include <linux/page_counter.h>
  29#include <linux/memcontrol.h>
  30#include <linux/cgroup.h>
  31#include <linux/pagewalk.h>
  32#include <linux/sched/mm.h>
  33#include <linux/shmem_fs.h>
  34#include <linux/hugetlb.h>
  35#include <linux/pagemap.h>
  36#include <linux/vm_event_item.h>
  37#include <linux/smp.h>
  38#include <linux/page-flags.h>
  39#include <linux/backing-dev.h>
  40#include <linux/bit_spinlock.h>
  41#include <linux/rcupdate.h>
  42#include <linux/limits.h>
  43#include <linux/export.h>
  44#include <linux/mutex.h>
  45#include <linux/rbtree.h>
  46#include <linux/slab.h>
  47#include <linux/swap.h>
  48#include <linux/swapops.h>
  49#include <linux/spinlock.h>
  50#include <linux/eventfd.h>
  51#include <linux/poll.h>
  52#include <linux/sort.h>
  53#include <linux/fs.h>
  54#include <linux/seq_file.h>
  55#include <linux/vmpressure.h>
  56#include <linux/memremap.h>
  57#include <linux/mm_inline.h>
  58#include <linux/swap_cgroup.h>
  59#include <linux/cpu.h>
  60#include <linux/oom.h>
  61#include <linux/lockdep.h>
  62#include <linux/file.h>
  63#include <linux/resume_user_mode.h>
  64#include <linux/psi.h>
  65#include <linux/seq_buf.h>
  66#include <linux/sched/isolation.h>
  67#include <linux/kmemleak.h>
  68#include "internal.h"
  69#include <net/sock.h>
  70#include <net/ip.h>
  71#include "slab.h"
  72#include "swap.h"
  73
  74#include <linux/uaccess.h>
  75
  76#include <trace/events/vmscan.h>
  77
  78struct cgroup_subsys memory_cgrp_subsys __read_mostly;
  79EXPORT_SYMBOL(memory_cgrp_subsys);
  80
  81struct mem_cgroup *root_mem_cgroup __read_mostly;
  82
  83/* Active memory cgroup to use from an interrupt context */
  84DEFINE_PER_CPU(struct mem_cgroup *, int_active_memcg);
  85EXPORT_PER_CPU_SYMBOL_GPL(int_active_memcg);
  86
  87/* Socket memory accounting disabled? */
  88static bool cgroup_memory_nosocket __ro_after_init;
  89
  90/* Kernel memory accounting disabled? */
  91static bool cgroup_memory_nokmem __ro_after_init;
  92
  93/* BPF memory accounting disabled? */
  94static bool cgroup_memory_nobpf __ro_after_init;
 
 
 
 
  95
  96#ifdef CONFIG_CGROUP_WRITEBACK
  97static DECLARE_WAIT_QUEUE_HEAD(memcg_cgwb_frn_waitq);
  98#endif
  99
 100/* Whether legacy memory+swap accounting is active */
 101static bool do_memsw_account(void)
 102{
 103	return !cgroup_subsys_on_dfl(memory_cgrp_subsys);
 104}
 105
 106#define THRESHOLDS_EVENTS_TARGET 128
 107#define SOFTLIMIT_EVENTS_TARGET 1024
 108
 109/*
 110 * Cgroups above their limits are maintained in a RB-Tree, independent of
 111 * their hierarchy representation
 112 */
 113
 114struct mem_cgroup_tree_per_node {
 115	struct rb_root rb_root;
 116	struct rb_node *rb_rightmost;
 117	spinlock_t lock;
 118};
 119
 120struct mem_cgroup_tree {
 121	struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
 122};
 123
 124static struct mem_cgroup_tree soft_limit_tree __read_mostly;
 125
 126/* for OOM */
 127struct mem_cgroup_eventfd_list {
 128	struct list_head list;
 129	struct eventfd_ctx *eventfd;
 130};
 131
 132/*
 133 * cgroup_event represents events which userspace want to receive.
 134 */
 135struct mem_cgroup_event {
 136	/*
 137	 * memcg which the event belongs to.
 138	 */
 139	struct mem_cgroup *memcg;
 140	/*
 141	 * eventfd to signal userspace about the event.
 142	 */
 143	struct eventfd_ctx *eventfd;
 144	/*
 145	 * Each of these stored in a list by the cgroup.
 146	 */
 147	struct list_head list;
 148	/*
 149	 * register_event() callback will be used to add new userspace
 150	 * waiter for changes related to this event.  Use eventfd_signal()
 151	 * on eventfd to send notification to userspace.
 152	 */
 153	int (*register_event)(struct mem_cgroup *memcg,
 154			      struct eventfd_ctx *eventfd, const char *args);
 155	/*
 156	 * unregister_event() callback will be called when userspace closes
 157	 * the eventfd or on cgroup removing.  This callback must be set,
 158	 * if you want provide notification functionality.
 159	 */
 160	void (*unregister_event)(struct mem_cgroup *memcg,
 161				 struct eventfd_ctx *eventfd);
 162	/*
 163	 * All fields below needed to unregister event when
 164	 * userspace closes eventfd.
 165	 */
 166	poll_table pt;
 167	wait_queue_head_t *wqh;
 168	wait_queue_entry_t wait;
 169	struct work_struct remove;
 170};
 171
 172static void mem_cgroup_threshold(struct mem_cgroup *memcg);
 173static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
 174
 175/* Stuffs for move charges at task migration. */
 176/*
 177 * Types of charges to be moved.
 178 */
 179#define MOVE_ANON	0x1U
 180#define MOVE_FILE	0x2U
 181#define MOVE_MASK	(MOVE_ANON | MOVE_FILE)
 182
 183/* "mc" and its members are protected by cgroup_mutex */
 184static struct move_charge_struct {
 185	spinlock_t	  lock; /* for from, to */
 186	struct mm_struct  *mm;
 187	struct mem_cgroup *from;
 188	struct mem_cgroup *to;
 189	unsigned long flags;
 190	unsigned long precharge;
 191	unsigned long moved_charge;
 192	unsigned long moved_swap;
 193	struct task_struct *moving_task;	/* a task moving charges */
 194	wait_queue_head_t waitq;		/* a waitq for other context */
 195} mc = {
 196	.lock = __SPIN_LOCK_UNLOCKED(mc.lock),
 197	.waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
 198};
 199
 200/*
 201 * Maximum loops in mem_cgroup_soft_reclaim(), used for soft
 202 * limit reclaim to prevent infinite loops, if they ever occur.
 203 */
 204#define	MEM_CGROUP_MAX_RECLAIM_LOOPS		100
 205#define	MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS	2
 206
 207/* for encoding cft->private value on file */
 208enum res_type {
 209	_MEM,
 210	_MEMSWAP,
 
 211	_KMEM,
 212	_TCP,
 213};
 214
 215#define MEMFILE_PRIVATE(x, val)	((x) << 16 | (val))
 216#define MEMFILE_TYPE(val)	((val) >> 16 & 0xffff)
 217#define MEMFILE_ATTR(val)	((val) & 0xffff)
 
 
 218
 219/*
 220 * Iteration constructs for visiting all cgroups (under a tree).  If
 221 * loops are exited prematurely (break), mem_cgroup_iter_break() must
 222 * be used for reference counting.
 223 */
 224#define for_each_mem_cgroup_tree(iter, root)		\
 225	for (iter = mem_cgroup_iter(root, NULL, NULL);	\
 226	     iter != NULL;				\
 227	     iter = mem_cgroup_iter(root, iter, NULL))
 228
 229#define for_each_mem_cgroup(iter)			\
 230	for (iter = mem_cgroup_iter(NULL, NULL, NULL);	\
 231	     iter != NULL;				\
 232	     iter = mem_cgroup_iter(NULL, iter, NULL))
 233
 234static inline bool task_is_dying(void)
 235{
 236	return tsk_is_oom_victim(current) || fatal_signal_pending(current) ||
 237		(current->flags & PF_EXITING);
 238}
 239
 240/* Some nice accessors for the vmpressure. */
 241struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
 242{
 243	if (!memcg)
 244		memcg = root_mem_cgroup;
 245	return &memcg->vmpressure;
 246}
 247
 248struct mem_cgroup *vmpressure_to_memcg(struct vmpressure *vmpr)
 249{
 250	return container_of(vmpr, struct mem_cgroup, vmpressure);
 251}
 252
 253#define CURRENT_OBJCG_UPDATE_BIT 0
 254#define CURRENT_OBJCG_UPDATE_FLAG (1UL << CURRENT_OBJCG_UPDATE_BIT)
 255
 256#ifdef CONFIG_MEMCG_KMEM
 257static DEFINE_SPINLOCK(objcg_lock);
 258
 259bool mem_cgroup_kmem_disabled(void)
 260{
 261	return cgroup_memory_nokmem;
 262}
 263
 264static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg,
 265				      unsigned int nr_pages);
 266
 267static void obj_cgroup_release(struct percpu_ref *ref)
 268{
 269	struct obj_cgroup *objcg = container_of(ref, struct obj_cgroup, refcnt);
 270	unsigned int nr_bytes;
 271	unsigned int nr_pages;
 272	unsigned long flags;
 273
 274	/*
 275	 * At this point all allocated objects are freed, and
 276	 * objcg->nr_charged_bytes can't have an arbitrary byte value.
 277	 * However, it can be PAGE_SIZE or (x * PAGE_SIZE).
 278	 *
 279	 * The following sequence can lead to it:
 280	 * 1) CPU0: objcg == stock->cached_objcg
 281	 * 2) CPU1: we do a small allocation (e.g. 92 bytes),
 282	 *          PAGE_SIZE bytes are charged
 283	 * 3) CPU1: a process from another memcg is allocating something,
 284	 *          the stock if flushed,
 285	 *          objcg->nr_charged_bytes = PAGE_SIZE - 92
 286	 * 5) CPU0: we do release this object,
 287	 *          92 bytes are added to stock->nr_bytes
 288	 * 6) CPU0: stock is flushed,
 289	 *          92 bytes are added to objcg->nr_charged_bytes
 290	 *
 291	 * In the result, nr_charged_bytes == PAGE_SIZE.
 292	 * This page will be uncharged in obj_cgroup_release().
 293	 */
 294	nr_bytes = atomic_read(&objcg->nr_charged_bytes);
 295	WARN_ON_ONCE(nr_bytes & (PAGE_SIZE - 1));
 296	nr_pages = nr_bytes >> PAGE_SHIFT;
 297
 298	if (nr_pages)
 299		obj_cgroup_uncharge_pages(objcg, nr_pages);
 300
 301	spin_lock_irqsave(&objcg_lock, flags);
 302	list_del(&objcg->list);
 303	spin_unlock_irqrestore(&objcg_lock, flags);
 304
 305	percpu_ref_exit(ref);
 306	kfree_rcu(objcg, rcu);
 307}
 308
 309static struct obj_cgroup *obj_cgroup_alloc(void)
 310{
 311	struct obj_cgroup *objcg;
 312	int ret;
 313
 314	objcg = kzalloc(sizeof(struct obj_cgroup), GFP_KERNEL);
 315	if (!objcg)
 316		return NULL;
 317
 318	ret = percpu_ref_init(&objcg->refcnt, obj_cgroup_release, 0,
 319			      GFP_KERNEL);
 320	if (ret) {
 321		kfree(objcg);
 322		return NULL;
 323	}
 324	INIT_LIST_HEAD(&objcg->list);
 325	return objcg;
 326}
 327
 328static void memcg_reparent_objcgs(struct mem_cgroup *memcg,
 329				  struct mem_cgroup *parent)
 330{
 331	struct obj_cgroup *objcg, *iter;
 332
 333	objcg = rcu_replace_pointer(memcg->objcg, NULL, true);
 334
 335	spin_lock_irq(&objcg_lock);
 336
 337	/* 1) Ready to reparent active objcg. */
 338	list_add(&objcg->list, &memcg->objcg_list);
 339	/* 2) Reparent active objcg and already reparented objcgs to parent. */
 340	list_for_each_entry(iter, &memcg->objcg_list, list)
 341		WRITE_ONCE(iter->memcg, parent);
 342	/* 3) Move already reparented objcgs to the parent's list */
 343	list_splice(&memcg->objcg_list, &parent->objcg_list);
 344
 345	spin_unlock_irq(&objcg_lock);
 346
 347	percpu_ref_kill(&objcg->refcnt);
 348}
 349
 350/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 351 * A lot of the calls to the cache allocation functions are expected to be
 352 * inlined by the compiler. Since the calls to memcg_slab_pre_alloc_hook() are
 353 * conditional to this static branch, we'll have to allow modules that does
 354 * kmem_cache_alloc and the such to see this symbol as well
 355 */
 356DEFINE_STATIC_KEY_FALSE(memcg_kmem_online_key);
 357EXPORT_SYMBOL(memcg_kmem_online_key);
 358
 359DEFINE_STATIC_KEY_FALSE(memcg_bpf_enabled_key);
 360EXPORT_SYMBOL(memcg_bpf_enabled_key);
 361#endif
 362
 363/**
 364 * mem_cgroup_css_from_folio - css of the memcg associated with a folio
 365 * @folio: folio of interest
 366 *
 367 * If memcg is bound to the default hierarchy, css of the memcg associated
 368 * with @folio is returned.  The returned css remains associated with @folio
 369 * until it is released.
 370 *
 371 * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup
 372 * is returned.
 373 */
 374struct cgroup_subsys_state *mem_cgroup_css_from_folio(struct folio *folio)
 375{
 376	struct mem_cgroup *memcg = folio_memcg(folio);
 
 
 377
 378	if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
 379		memcg = root_mem_cgroup;
 380
 381	return &memcg->css;
 382}
 383
 384/**
 385 * page_cgroup_ino - return inode number of the memcg a page is charged to
 386 * @page: the page
 387 *
 388 * Look up the closest online ancestor of the memory cgroup @page is charged to
 389 * and return its inode number or 0 if @page is not charged to any cgroup. It
 390 * is safe to call this function without holding a reference to @page.
 391 *
 392 * Note, this function is inherently racy, because there is nothing to prevent
 393 * the cgroup inode from getting torn down and potentially reallocated a moment
 394 * after page_cgroup_ino() returns, so it only should be used by callers that
 395 * do not care (such as procfs interfaces).
 396 */
 397ino_t page_cgroup_ino(struct page *page)
 398{
 399	struct mem_cgroup *memcg;
 400	unsigned long ino = 0;
 401
 402	rcu_read_lock();
 403	/* page_folio() is racy here, but the entire function is racy anyway */
 404	memcg = folio_memcg_check(page_folio(page));
 405
 406	while (memcg && !(memcg->css.flags & CSS_ONLINE))
 407		memcg = parent_mem_cgroup(memcg);
 408	if (memcg)
 409		ino = cgroup_ino(memcg->css.cgroup);
 410	rcu_read_unlock();
 411	return ino;
 412}
 413
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 414static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_node *mz,
 415					 struct mem_cgroup_tree_per_node *mctz,
 416					 unsigned long new_usage_in_excess)
 417{
 418	struct rb_node **p = &mctz->rb_root.rb_node;
 419	struct rb_node *parent = NULL;
 420	struct mem_cgroup_per_node *mz_node;
 421	bool rightmost = true;
 422
 423	if (mz->on_tree)
 424		return;
 425
 426	mz->usage_in_excess = new_usage_in_excess;
 427	if (!mz->usage_in_excess)
 428		return;
 429	while (*p) {
 430		parent = *p;
 431		mz_node = rb_entry(parent, struct mem_cgroup_per_node,
 432					tree_node);
 433		if (mz->usage_in_excess < mz_node->usage_in_excess) {
 434			p = &(*p)->rb_left;
 435			rightmost = false;
 436		} else {
 437			p = &(*p)->rb_right;
 438		}
 439	}
 440
 441	if (rightmost)
 442		mctz->rb_rightmost = &mz->tree_node;
 443
 444	rb_link_node(&mz->tree_node, parent, p);
 445	rb_insert_color(&mz->tree_node, &mctz->rb_root);
 446	mz->on_tree = true;
 447}
 448
 449static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
 450					 struct mem_cgroup_tree_per_node *mctz)
 451{
 452	if (!mz->on_tree)
 453		return;
 454
 455	if (&mz->tree_node == mctz->rb_rightmost)
 456		mctz->rb_rightmost = rb_prev(&mz->tree_node);
 457
 458	rb_erase(&mz->tree_node, &mctz->rb_root);
 459	mz->on_tree = false;
 460}
 461
 462static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
 463				       struct mem_cgroup_tree_per_node *mctz)
 464{
 465	unsigned long flags;
 466
 467	spin_lock_irqsave(&mctz->lock, flags);
 468	__mem_cgroup_remove_exceeded(mz, mctz);
 469	spin_unlock_irqrestore(&mctz->lock, flags);
 470}
 471
 472static unsigned long soft_limit_excess(struct mem_cgroup *memcg)
 473{
 474	unsigned long nr_pages = page_counter_read(&memcg->memory);
 475	unsigned long soft_limit = READ_ONCE(memcg->soft_limit);
 476	unsigned long excess = 0;
 477
 478	if (nr_pages > soft_limit)
 479		excess = nr_pages - soft_limit;
 480
 481	return excess;
 482}
 483
 484static void mem_cgroup_update_tree(struct mem_cgroup *memcg, int nid)
 485{
 486	unsigned long excess;
 487	struct mem_cgroup_per_node *mz;
 488	struct mem_cgroup_tree_per_node *mctz;
 489
 490	if (lru_gen_enabled()) {
 491		if (soft_limit_excess(memcg))
 492			lru_gen_soft_reclaim(memcg, nid);
 493		return;
 494	}
 495
 496	mctz = soft_limit_tree.rb_tree_per_node[nid];
 497	if (!mctz)
 498		return;
 499	/*
 500	 * Necessary to update all ancestors when hierarchy is used.
 501	 * because their event counter is not touched.
 502	 */
 503	for (; memcg; memcg = parent_mem_cgroup(memcg)) {
 504		mz = memcg->nodeinfo[nid];
 505		excess = soft_limit_excess(memcg);
 506		/*
 507		 * We have to update the tree if mz is on RB-tree or
 508		 * mem is over its softlimit.
 509		 */
 510		if (excess || mz->on_tree) {
 511			unsigned long flags;
 512
 513			spin_lock_irqsave(&mctz->lock, flags);
 514			/* if on-tree, remove it */
 515			if (mz->on_tree)
 516				__mem_cgroup_remove_exceeded(mz, mctz);
 517			/*
 518			 * Insert again. mz->usage_in_excess will be updated.
 519			 * If excess is 0, no tree ops.
 520			 */
 521			__mem_cgroup_insert_exceeded(mz, mctz, excess);
 522			spin_unlock_irqrestore(&mctz->lock, flags);
 523		}
 524	}
 525}
 526
 527static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
 528{
 529	struct mem_cgroup_tree_per_node *mctz;
 530	struct mem_cgroup_per_node *mz;
 531	int nid;
 532
 533	for_each_node(nid) {
 534		mz = memcg->nodeinfo[nid];
 535		mctz = soft_limit_tree.rb_tree_per_node[nid];
 536		if (mctz)
 537			mem_cgroup_remove_exceeded(mz, mctz);
 538	}
 539}
 540
 541static struct mem_cgroup_per_node *
 542__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
 543{
 544	struct mem_cgroup_per_node *mz;
 545
 546retry:
 547	mz = NULL;
 548	if (!mctz->rb_rightmost)
 549		goto done;		/* Nothing to reclaim from */
 550
 551	mz = rb_entry(mctz->rb_rightmost,
 552		      struct mem_cgroup_per_node, tree_node);
 553	/*
 554	 * Remove the node now but someone else can add it back,
 555	 * we will to add it back at the end of reclaim to its correct
 556	 * position in the tree.
 557	 */
 558	__mem_cgroup_remove_exceeded(mz, mctz);
 559	if (!soft_limit_excess(mz->memcg) ||
 560	    !css_tryget(&mz->memcg->css))
 561		goto retry;
 562done:
 563	return mz;
 564}
 565
 566static struct mem_cgroup_per_node *
 567mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
 568{
 569	struct mem_cgroup_per_node *mz;
 570
 571	spin_lock_irq(&mctz->lock);
 572	mz = __mem_cgroup_largest_soft_limit_node(mctz);
 573	spin_unlock_irq(&mctz->lock);
 574	return mz;
 575}
 576
 577/* Subset of vm_event_item to report for memcg event stats */
 578static const unsigned int memcg_vm_event_stat[] = {
 579	PGPGIN,
 580	PGPGOUT,
 581	PGSCAN_KSWAPD,
 582	PGSCAN_DIRECT,
 583	PGSCAN_KHUGEPAGED,
 584	PGSTEAL_KSWAPD,
 585	PGSTEAL_DIRECT,
 586	PGSTEAL_KHUGEPAGED,
 587	PGFAULT,
 588	PGMAJFAULT,
 589	PGREFILL,
 590	PGACTIVATE,
 591	PGDEACTIVATE,
 592	PGLAZYFREE,
 593	PGLAZYFREED,
 594#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
 595	ZSWPIN,
 596	ZSWPOUT,
 597	ZSWPWB,
 598#endif
 599#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 600	THP_FAULT_ALLOC,
 601	THP_COLLAPSE_ALLOC,
 602	THP_SWPOUT,
 603	THP_SWPOUT_FALLBACK,
 604#endif
 605};
 606
 607#define NR_MEMCG_EVENTS ARRAY_SIZE(memcg_vm_event_stat)
 608static int mem_cgroup_events_index[NR_VM_EVENT_ITEMS] __read_mostly;
 609
 610static void init_memcg_events(void)
 611{
 612	int i;
 613
 614	for (i = 0; i < NR_MEMCG_EVENTS; ++i)
 615		mem_cgroup_events_index[memcg_vm_event_stat[i]] = i + 1;
 616}
 617
 618static inline int memcg_events_index(enum vm_event_item idx)
 619{
 620	return mem_cgroup_events_index[idx] - 1;
 621}
 622
 623struct memcg_vmstats_percpu {
 624	/* Stats updates since the last flush */
 625	unsigned int			stats_updates;
 626
 627	/* Cached pointers for fast iteration in memcg_rstat_updated() */
 628	struct memcg_vmstats_percpu	*parent;
 629	struct memcg_vmstats		*vmstats;
 630
 631	/* The above should fit a single cacheline for memcg_rstat_updated() */
 632
 633	/* Local (CPU and cgroup) page state & events */
 634	long			state[MEMCG_NR_STAT];
 635	unsigned long		events[NR_MEMCG_EVENTS];
 636
 637	/* Delta calculation for lockless upward propagation */
 638	long			state_prev[MEMCG_NR_STAT];
 639	unsigned long		events_prev[NR_MEMCG_EVENTS];
 640
 641	/* Cgroup1: threshold notifications & softlimit tree updates */
 642	unsigned long		nr_page_events;
 643	unsigned long		targets[MEM_CGROUP_NTARGETS];
 644} ____cacheline_aligned;
 645
 646struct memcg_vmstats {
 647	/* Aggregated (CPU and subtree) page state & events */
 648	long			state[MEMCG_NR_STAT];
 649	unsigned long		events[NR_MEMCG_EVENTS];
 650
 651	/* Non-hierarchical (CPU aggregated) page state & events */
 652	long			state_local[MEMCG_NR_STAT];
 653	unsigned long		events_local[NR_MEMCG_EVENTS];
 654
 655	/* Pending child counts during tree propagation */
 656	long			state_pending[MEMCG_NR_STAT];
 657	unsigned long		events_pending[NR_MEMCG_EVENTS];
 658
 659	/* Stats updates since the last flush */
 660	atomic64_t		stats_updates;
 661};
 662
 663/*
 664 * memcg and lruvec stats flushing
 665 *
 666 * Many codepaths leading to stats update or read are performance sensitive and
 667 * adding stats flushing in such codepaths is not desirable. So, to optimize the
 668 * flushing the kernel does:
 669 *
 670 * 1) Periodically and asynchronously flush the stats every 2 seconds to not let
 671 *    rstat update tree grow unbounded.
 672 *
 673 * 2) Flush the stats synchronously on reader side only when there are more than
 674 *    (MEMCG_CHARGE_BATCH * nr_cpus) update events. Though this optimization
 675 *    will let stats be out of sync by atmost (MEMCG_CHARGE_BATCH * nr_cpus) but
 676 *    only for 2 seconds due to (1).
 677 */
 678static void flush_memcg_stats_dwork(struct work_struct *w);
 679static DECLARE_DEFERRABLE_WORK(stats_flush_dwork, flush_memcg_stats_dwork);
 680static u64 flush_last_time;
 681
 682#define FLUSH_TIME (2UL*HZ)
 683
 684/*
 685 * Accessors to ensure that preemption is disabled on PREEMPT_RT because it can
 686 * not rely on this as part of an acquired spinlock_t lock. These functions are
 687 * never used in hardirq context on PREEMPT_RT and therefore disabling preemtion
 688 * is sufficient.
 689 */
 690static void memcg_stats_lock(void)
 691{
 692	preempt_disable_nested();
 693	VM_WARN_ON_IRQS_ENABLED();
 694}
 695
 696static void __memcg_stats_lock(void)
 697{
 698	preempt_disable_nested();
 699}
 700
 701static void memcg_stats_unlock(void)
 702{
 703	preempt_enable_nested();
 704}
 705
 706
 707static bool memcg_vmstats_needs_flush(struct memcg_vmstats *vmstats)
 708{
 709	return atomic64_read(&vmstats->stats_updates) >
 710		MEMCG_CHARGE_BATCH * num_online_cpus();
 711}
 712
 713static inline void memcg_rstat_updated(struct mem_cgroup *memcg, int val)
 714{
 715	struct memcg_vmstats_percpu *statc;
 716	int cpu = smp_processor_id();
 717
 718	if (!val)
 719		return;
 720
 721	cgroup_rstat_updated(memcg->css.cgroup, cpu);
 722	statc = this_cpu_ptr(memcg->vmstats_percpu);
 723	for (; statc; statc = statc->parent) {
 724		statc->stats_updates += abs(val);
 725		if (statc->stats_updates < MEMCG_CHARGE_BATCH)
 726			continue;
 727
 728		/*
 729		 * If @memcg is already flush-able, increasing stats_updates is
 730		 * redundant. Avoid the overhead of the atomic update.
 731		 */
 732		if (!memcg_vmstats_needs_flush(statc->vmstats))
 733			atomic64_add(statc->stats_updates,
 734				     &statc->vmstats->stats_updates);
 735		statc->stats_updates = 0;
 736	}
 737}
 738
 739static void do_flush_stats(struct mem_cgroup *memcg)
 740{
 741	if (mem_cgroup_is_root(memcg))
 742		WRITE_ONCE(flush_last_time, jiffies_64);
 743
 744	cgroup_rstat_flush(memcg->css.cgroup);
 745}
 746
 747/*
 748 * mem_cgroup_flush_stats - flush the stats of a memory cgroup subtree
 749 * @memcg: root of the subtree to flush
 750 *
 751 * Flushing is serialized by the underlying global rstat lock. There is also a
 752 * minimum amount of work to be done even if there are no stat updates to flush.
 753 * Hence, we only flush the stats if the updates delta exceeds a threshold. This
 754 * avoids unnecessary work and contention on the underlying lock.
 755 */
 756void mem_cgroup_flush_stats(struct mem_cgroup *memcg)
 757{
 758	if (mem_cgroup_disabled())
 759		return;
 760
 761	if (!memcg)
 762		memcg = root_mem_cgroup;
 763
 764	if (memcg_vmstats_needs_flush(memcg->vmstats))
 765		do_flush_stats(memcg);
 766}
 767
 768void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg)
 769{
 770	/* Only flush if the periodic flusher is one full cycle late */
 771	if (time_after64(jiffies_64, READ_ONCE(flush_last_time) + 2*FLUSH_TIME))
 772		mem_cgroup_flush_stats(memcg);
 773}
 774
 775static void flush_memcg_stats_dwork(struct work_struct *w)
 776{
 777	/*
 778	 * Deliberately ignore memcg_vmstats_needs_flush() here so that flushing
 779	 * in latency-sensitive paths is as cheap as possible.
 780	 */
 781	do_flush_stats(root_mem_cgroup);
 782	queue_delayed_work(system_unbound_wq, &stats_flush_dwork, FLUSH_TIME);
 783}
 784
 785unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
 786{
 787	long x = READ_ONCE(memcg->vmstats->state[idx]);
 788#ifdef CONFIG_SMP
 789	if (x < 0)
 790		x = 0;
 791#endif
 792	return x;
 793}
 794
 795static int memcg_page_state_unit(int item);
 796
 797/*
 798 * Normalize the value passed into memcg_rstat_updated() to be in pages. Round
 799 * up non-zero sub-page updates to 1 page as zero page updates are ignored.
 800 */
 801static int memcg_state_val_in_pages(int idx, int val)
 802{
 803	int unit = memcg_page_state_unit(idx);
 804
 805	if (!val || unit == PAGE_SIZE)
 806		return val;
 807	else
 808		return max(val * unit / PAGE_SIZE, 1UL);
 809}
 810
 811/**
 812 * __mod_memcg_state - update cgroup memory statistics
 813 * @memcg: the memory cgroup
 814 * @idx: the stat item - can be enum memcg_stat_item or enum node_stat_item
 815 * @val: delta to add to the counter, can be negative
 816 */
 817void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val)
 818{
 819	if (mem_cgroup_disabled())
 820		return;
 821
 822	__this_cpu_add(memcg->vmstats_percpu->state[idx], val);
 823	memcg_rstat_updated(memcg, memcg_state_val_in_pages(idx, val));
 824}
 825
 826/* idx can be of type enum memcg_stat_item or node_stat_item. */
 827static unsigned long memcg_page_state_local(struct mem_cgroup *memcg, int idx)
 828{
 829	long x = READ_ONCE(memcg->vmstats->state_local[idx]);
 
 830
 
 
 831#ifdef CONFIG_SMP
 832	if (x < 0)
 833		x = 0;
 834#endif
 835	return x;
 836}
 837
 
 
 
 
 
 
 
 
 
 
 
 838void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
 839			      int val)
 840{
 841	struct mem_cgroup_per_node *pn;
 842	struct mem_cgroup *memcg;
 
 843
 844	pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
 845	memcg = pn->memcg;
 846
 847	/*
 848	 * The caller from rmap relies on disabled preemption because they never
 849	 * update their counter from in-interrupt context. For these two
 850	 * counters we check that the update is never performed from an
 851	 * interrupt context while other caller need to have disabled interrupt.
 852	 */
 853	__memcg_stats_lock();
 854	if (IS_ENABLED(CONFIG_DEBUG_VM)) {
 855		switch (idx) {
 856		case NR_ANON_MAPPED:
 857		case NR_FILE_MAPPED:
 858		case NR_ANON_THPS:
 859		case NR_SHMEM_PMDMAPPED:
 860		case NR_FILE_PMDMAPPED:
 861			WARN_ON_ONCE(!in_task());
 862			break;
 863		default:
 864			VM_WARN_ON_IRQS_ENABLED();
 865		}
 866	}
 867
 868	/* Update memcg */
 869	__this_cpu_add(memcg->vmstats_percpu->state[idx], val);
 870
 871	/* Update lruvec */
 872	__this_cpu_add(pn->lruvec_stats_percpu->state[idx], val);
 
 
 
 873
 874	memcg_rstat_updated(memcg, memcg_state_val_in_pages(idx, val));
 875	memcg_stats_unlock();
 
 
 
 
 
 
 
 
 876}
 877
 878/**
 879 * __mod_lruvec_state - update lruvec memory statistics
 880 * @lruvec: the lruvec
 881 * @idx: the stat item
 882 * @val: delta to add to the counter, can be negative
 883 *
 884 * The lruvec is the intersection of the NUMA node and a cgroup. This
 885 * function updates the all three counters that are affected by a
 886 * change of state at this level: per-node, per-cgroup, per-lruvec.
 887 */
 888void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
 889			int val)
 890{
 891	/* Update node */
 892	__mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
 893
 894	/* Update memcg and lruvec */
 895	if (!mem_cgroup_disabled())
 896		__mod_memcg_lruvec_state(lruvec, idx, val);
 897}
 898
 899void __lruvec_stat_mod_folio(struct folio *folio, enum node_stat_item idx,
 900			     int val)
 901{
 
 902	struct mem_cgroup *memcg;
 903	pg_data_t *pgdat = folio_pgdat(folio);
 904	struct lruvec *lruvec;
 905
 906	rcu_read_lock();
 907	memcg = folio_memcg(folio);
 908	/* Untracked pages have no memcg, no lruvec. Update only the node */
 909	if (!memcg) {
 910		rcu_read_unlock();
 911		__mod_node_page_state(pgdat, idx, val);
 912		return;
 913	}
 914
 915	lruvec = mem_cgroup_lruvec(memcg, pgdat);
 916	__mod_lruvec_state(lruvec, idx, val);
 917	rcu_read_unlock();
 918}
 919EXPORT_SYMBOL(__lruvec_stat_mod_folio);
 920
 921void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val)
 922{
 923	pg_data_t *pgdat = page_pgdat(virt_to_page(p));
 924	struct mem_cgroup *memcg;
 925	struct lruvec *lruvec;
 926
 927	rcu_read_lock();
 928	memcg = mem_cgroup_from_slab_obj(p);
 929
 930	/*
 931	 * Untracked pages have no memcg, no lruvec. Update only the
 932	 * node. If we reparent the slab objects to the root memcg,
 933	 * when we free the slab object, we need to update the per-memcg
 934	 * vmstats to keep it correct for the root memcg.
 935	 */
 936	if (!memcg) {
 937		__mod_node_page_state(pgdat, idx, val);
 938	} else {
 939		lruvec = mem_cgroup_lruvec(memcg, pgdat);
 940		__mod_lruvec_state(lruvec, idx, val);
 941	}
 942	rcu_read_unlock();
 943}
 944
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 945/**
 946 * __count_memcg_events - account VM events in a cgroup
 947 * @memcg: the memory cgroup
 948 * @idx: the event item
 949 * @count: the number of events that occurred
 950 */
 951void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
 952			  unsigned long count)
 953{
 954	int index = memcg_events_index(idx);
 955
 956	if (mem_cgroup_disabled() || index < 0)
 957		return;
 958
 959	memcg_stats_lock();
 960	__this_cpu_add(memcg->vmstats_percpu->events[index], count);
 961	memcg_rstat_updated(memcg, count);
 962	memcg_stats_unlock();
 963}
 964
 965static unsigned long memcg_events(struct mem_cgroup *memcg, int event)
 966{
 967	int index = memcg_events_index(event);
 968
 969	if (index < 0)
 970		return 0;
 971	return READ_ONCE(memcg->vmstats->events[index]);
 972}
 973
 974static unsigned long memcg_events_local(struct mem_cgroup *memcg, int event)
 975{
 976	int index = memcg_events_index(event);
 
 977
 978	if (index < 0)
 979		return 0;
 980
 981	return READ_ONCE(memcg->vmstats->events_local[index]);
 982}
 983
 984static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
 
 985					 int nr_pages)
 986{
 987	/* pagein of a big page is an event. So, ignore page size */
 988	if (nr_pages > 0)
 989		__count_memcg_events(memcg, PGPGIN, 1);
 990	else {
 991		__count_memcg_events(memcg, PGPGOUT, 1);
 992		nr_pages = -nr_pages; /* for event */
 993	}
 994
 995	__this_cpu_add(memcg->vmstats_percpu->nr_page_events, nr_pages);
 996}
 997
 998static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
 999				       enum mem_cgroup_events_target target)
1000{
1001	unsigned long val, next;
1002
1003	val = __this_cpu_read(memcg->vmstats_percpu->nr_page_events);
1004	next = __this_cpu_read(memcg->vmstats_percpu->targets[target]);
1005	/* from time_after() in jiffies.h */
1006	if ((long)(next - val) < 0) {
1007		switch (target) {
1008		case MEM_CGROUP_TARGET_THRESH:
1009			next = val + THRESHOLDS_EVENTS_TARGET;
1010			break;
1011		case MEM_CGROUP_TARGET_SOFTLIMIT:
1012			next = val + SOFTLIMIT_EVENTS_TARGET;
1013			break;
1014		default:
1015			break;
1016		}
1017		__this_cpu_write(memcg->vmstats_percpu->targets[target], next);
1018		return true;
1019	}
1020	return false;
1021}
1022
1023/*
1024 * Check events in order.
1025 *
1026 */
1027static void memcg_check_events(struct mem_cgroup *memcg, int nid)
1028{
1029	if (IS_ENABLED(CONFIG_PREEMPT_RT))
1030		return;
1031
1032	/* threshold event is triggered in finer grain than soft limit */
1033	if (unlikely(mem_cgroup_event_ratelimit(memcg,
1034						MEM_CGROUP_TARGET_THRESH))) {
1035		bool do_softlimit;
1036
1037		do_softlimit = mem_cgroup_event_ratelimit(memcg,
1038						MEM_CGROUP_TARGET_SOFTLIMIT);
1039		mem_cgroup_threshold(memcg);
1040		if (unlikely(do_softlimit))
1041			mem_cgroup_update_tree(memcg, nid);
1042	}
1043}
1044
1045struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
1046{
1047	/*
1048	 * mm_update_next_owner() may clear mm->owner to NULL
1049	 * if it races with swapoff, page migration, etc.
1050	 * So this can be called with p == NULL.
1051	 */
1052	if (unlikely(!p))
1053		return NULL;
1054
1055	return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
1056}
1057EXPORT_SYMBOL(mem_cgroup_from_task);
1058
1059static __always_inline struct mem_cgroup *active_memcg(void)
1060{
1061	if (!in_task())
1062		return this_cpu_read(int_active_memcg);
1063	else
1064		return current->active_memcg;
1065}
1066
1067/**
1068 * get_mem_cgroup_from_mm: Obtain a reference on given mm_struct's memcg.
1069 * @mm: mm from which memcg should be extracted. It can be NULL.
1070 *
1071 * Obtain a reference on mm->memcg and returns it if successful. If mm
1072 * is NULL, then the memcg is chosen as follows:
1073 * 1) The active memcg, if set.
1074 * 2) current->mm->memcg, if available
1075 * 3) root memcg
1076 * If mem_cgroup is disabled, NULL is returned.
1077 */
1078struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
1079{
1080	struct mem_cgroup *memcg;
1081
1082	if (mem_cgroup_disabled())
1083		return NULL;
1084
1085	/*
1086	 * Page cache insertions can happen without an
1087	 * actual mm context, e.g. during disk probing
1088	 * on boot, loopback IO, acct() writes etc.
1089	 *
1090	 * No need to css_get on root memcg as the reference
1091	 * counting is disabled on the root level in the
1092	 * cgroup core. See CSS_NO_REF.
1093	 */
1094	if (unlikely(!mm)) {
1095		memcg = active_memcg();
1096		if (unlikely(memcg)) {
1097			/* remote memcg must hold a ref */
1098			css_get(&memcg->css);
1099			return memcg;
1100		}
1101		mm = current->mm;
1102		if (unlikely(!mm))
1103			return root_mem_cgroup;
1104	}
1105
1106	rcu_read_lock();
1107	do {
1108		memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1109		if (unlikely(!memcg))
1110			memcg = root_mem_cgroup;
1111	} while (!css_tryget(&memcg->css));
1112	rcu_read_unlock();
1113	return memcg;
1114}
1115EXPORT_SYMBOL(get_mem_cgroup_from_mm);
1116
1117/**
1118 * get_mem_cgroup_from_current - Obtain a reference on current task's memcg.
1119 */
1120struct mem_cgroup *get_mem_cgroup_from_current(void)
1121{
1122	struct mem_cgroup *memcg;
 
 
1123
1124	if (mem_cgroup_disabled())
1125		return NULL;
 
1126
1127again:
1128	rcu_read_lock();
1129	memcg = mem_cgroup_from_task(current);
1130	if (!css_tryget(&memcg->css)) {
1131		rcu_read_unlock();
1132		goto again;
1133	}
1134	rcu_read_unlock();
1135	return memcg;
1136}
1137
1138/**
1139 * mem_cgroup_iter - iterate over memory cgroup hierarchy
1140 * @root: hierarchy root
1141 * @prev: previously returned memcg, NULL on first invocation
1142 * @reclaim: cookie for shared reclaim walks, NULL for full walks
1143 *
1144 * Returns references to children of the hierarchy below @root, or
1145 * @root itself, or %NULL after a full round-trip.
1146 *
1147 * Caller must pass the return value in @prev on subsequent
1148 * invocations for reference counting, or use mem_cgroup_iter_break()
1149 * to cancel a hierarchy walk before the round-trip is complete.
1150 *
1151 * Reclaimers can specify a node in @reclaim to divide up the memcgs
1152 * in the hierarchy among all concurrent reclaimers operating on the
1153 * same node.
1154 */
1155struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
1156				   struct mem_cgroup *prev,
1157				   struct mem_cgroup_reclaim_cookie *reclaim)
1158{
1159	struct mem_cgroup_reclaim_iter *iter;
1160	struct cgroup_subsys_state *css = NULL;
1161	struct mem_cgroup *memcg = NULL;
1162	struct mem_cgroup *pos = NULL;
1163
1164	if (mem_cgroup_disabled())
1165		return NULL;
1166
1167	if (!root)
1168		root = root_mem_cgroup;
1169
 
 
 
1170	rcu_read_lock();
1171
1172	if (reclaim) {
1173		struct mem_cgroup_per_node *mz;
1174
1175		mz = root->nodeinfo[reclaim->pgdat->node_id];
1176		iter = &mz->iter;
1177
1178		/*
1179		 * On start, join the current reclaim iteration cycle.
1180		 * Exit when a concurrent walker completes it.
1181		 */
1182		if (!prev)
1183			reclaim->generation = iter->generation;
1184		else if (reclaim->generation != iter->generation)
1185			goto out_unlock;
1186
1187		while (1) {
1188			pos = READ_ONCE(iter->position);
1189			if (!pos || css_tryget(&pos->css))
1190				break;
1191			/*
1192			 * css reference reached zero, so iter->position will
1193			 * be cleared by ->css_released. However, we should not
1194			 * rely on this happening soon, because ->css_released
1195			 * is called from a work queue, and by busy-waiting we
1196			 * might block it. So we clear iter->position right
1197			 * away.
1198			 */
1199			(void)cmpxchg(&iter->position, pos, NULL);
1200		}
1201	} else if (prev) {
1202		pos = prev;
1203	}
1204
1205	if (pos)
1206		css = &pos->css;
1207
1208	for (;;) {
1209		css = css_next_descendant_pre(css, &root->css);
1210		if (!css) {
1211			/*
1212			 * Reclaimers share the hierarchy walk, and a
1213			 * new one might jump in right at the end of
1214			 * the hierarchy - make sure they see at least
1215			 * one group and restart from the beginning.
1216			 */
1217			if (!prev)
1218				continue;
1219			break;
1220		}
1221
1222		/*
1223		 * Verify the css and acquire a reference.  The root
1224		 * is provided by the caller, so we know it's alive
1225		 * and kicking, and don't take an extra reference.
1226		 */
1227		if (css == &root->css || css_tryget(css)) {
1228			memcg = mem_cgroup_from_css(css);
 
 
 
 
1229			break;
1230		}
 
1231	}
1232
1233	if (reclaim) {
1234		/*
1235		 * The position could have already been updated by a competing
1236		 * thread, so check that the value hasn't changed since we read
1237		 * it to avoid reclaiming from the same cgroup twice.
1238		 */
1239		(void)cmpxchg(&iter->position, pos, memcg);
1240
1241		if (pos)
1242			css_put(&pos->css);
1243
1244		if (!memcg)
1245			iter->generation++;
 
 
1246	}
1247
1248out_unlock:
1249	rcu_read_unlock();
1250	if (prev && prev != root)
1251		css_put(&prev->css);
1252
1253	return memcg;
1254}
1255
1256/**
1257 * mem_cgroup_iter_break - abort a hierarchy walk prematurely
1258 * @root: hierarchy root
1259 * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
1260 */
1261void mem_cgroup_iter_break(struct mem_cgroup *root,
1262			   struct mem_cgroup *prev)
1263{
1264	if (!root)
1265		root = root_mem_cgroup;
1266	if (prev && prev != root)
1267		css_put(&prev->css);
1268}
1269
1270static void __invalidate_reclaim_iterators(struct mem_cgroup *from,
1271					struct mem_cgroup *dead_memcg)
1272{
1273	struct mem_cgroup_reclaim_iter *iter;
1274	struct mem_cgroup_per_node *mz;
1275	int nid;
1276
1277	for_each_node(nid) {
1278		mz = from->nodeinfo[nid];
1279		iter = &mz->iter;
1280		cmpxchg(&iter->position, dead_memcg, NULL);
1281	}
1282}
1283
1284static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
1285{
1286	struct mem_cgroup *memcg = dead_memcg;
1287	struct mem_cgroup *last;
1288
1289	do {
1290		__invalidate_reclaim_iterators(memcg, dead_memcg);
1291		last = memcg;
1292	} while ((memcg = parent_mem_cgroup(memcg)));
1293
1294	/*
1295	 * When cgroup1 non-hierarchy mode is used,
1296	 * parent_mem_cgroup() does not walk all the way up to the
1297	 * cgroup root (root_mem_cgroup). So we have to handle
1298	 * dead_memcg from cgroup root separately.
1299	 */
1300	if (!mem_cgroup_is_root(last))
1301		__invalidate_reclaim_iterators(root_mem_cgroup,
1302						dead_memcg);
1303}
1304
1305/**
1306 * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
1307 * @memcg: hierarchy root
1308 * @fn: function to call for each task
1309 * @arg: argument passed to @fn
1310 *
1311 * This function iterates over tasks attached to @memcg or to any of its
1312 * descendants and calls @fn for each task. If @fn returns a non-zero
1313 * value, the function breaks the iteration loop. Otherwise, it will iterate
1314 * over all tasks and return 0.
1315 *
1316 * This function must not be called for the root memory cgroup.
1317 */
1318void mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
1319			   int (*fn)(struct task_struct *, void *), void *arg)
1320{
1321	struct mem_cgroup *iter;
1322	int ret = 0;
1323
1324	BUG_ON(mem_cgroup_is_root(memcg));
1325
1326	for_each_mem_cgroup_tree(iter, memcg) {
1327		struct css_task_iter it;
1328		struct task_struct *task;
1329
1330		css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it);
1331		while (!ret && (task = css_task_iter_next(&it)))
1332			ret = fn(task, arg);
1333		css_task_iter_end(&it);
1334		if (ret) {
1335			mem_cgroup_iter_break(memcg, iter);
1336			break;
1337		}
1338	}
 
1339}
1340
1341#ifdef CONFIG_DEBUG_VM
1342void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
1343{
1344	struct mem_cgroup *memcg;
1345
1346	if (mem_cgroup_disabled())
1347		return;
1348
1349	memcg = folio_memcg(folio);
1350
1351	if (!memcg)
1352		VM_BUG_ON_FOLIO(!mem_cgroup_is_root(lruvec_memcg(lruvec)), folio);
1353	else
1354		VM_BUG_ON_FOLIO(lruvec_memcg(lruvec) != memcg, folio);
1355}
1356#endif
1357
1358/**
1359 * folio_lruvec_lock - Lock the lruvec for a folio.
1360 * @folio: Pointer to the folio.
1361 *
1362 * These functions are safe to use under any of the following conditions:
1363 * - folio locked
1364 * - folio_test_lru false
1365 * - folio_memcg_lock()
1366 * - folio frozen (refcount of 0)
1367 *
1368 * Return: The lruvec this folio is on with its lock held.
1369 */
1370struct lruvec *folio_lruvec_lock(struct folio *folio)
1371{
1372	struct lruvec *lruvec = folio_lruvec(folio);
1373
 
1374	spin_lock(&lruvec->lru_lock);
1375	lruvec_memcg_debug(lruvec, folio);
 
1376
1377	return lruvec;
1378}
1379
1380/**
1381 * folio_lruvec_lock_irq - Lock the lruvec for a folio.
1382 * @folio: Pointer to the folio.
1383 *
1384 * These functions are safe to use under any of the following conditions:
1385 * - folio locked
1386 * - folio_test_lru false
1387 * - folio_memcg_lock()
1388 * - folio frozen (refcount of 0)
1389 *
1390 * Return: The lruvec this folio is on with its lock held and interrupts
1391 * disabled.
1392 */
1393struct lruvec *folio_lruvec_lock_irq(struct folio *folio)
1394{
1395	struct lruvec *lruvec = folio_lruvec(folio);
1396
 
1397	spin_lock_irq(&lruvec->lru_lock);
1398	lruvec_memcg_debug(lruvec, folio);
 
1399
1400	return lruvec;
1401}
1402
1403/**
1404 * folio_lruvec_lock_irqsave - Lock the lruvec for a folio.
1405 * @folio: Pointer to the folio.
1406 * @flags: Pointer to irqsave flags.
1407 *
1408 * These functions are safe to use under any of the following conditions:
1409 * - folio locked
1410 * - folio_test_lru false
1411 * - folio_memcg_lock()
1412 * - folio frozen (refcount of 0)
1413 *
1414 * Return: The lruvec this folio is on with its lock held and interrupts
1415 * disabled.
1416 */
1417struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
1418		unsigned long *flags)
1419{
1420	struct lruvec *lruvec = folio_lruvec(folio);
1421
 
1422	spin_lock_irqsave(&lruvec->lru_lock, *flags);
1423	lruvec_memcg_debug(lruvec, folio);
 
1424
1425	return lruvec;
1426}
1427
1428/**
1429 * mem_cgroup_update_lru_size - account for adding or removing an lru page
1430 * @lruvec: mem_cgroup per zone lru vector
1431 * @lru: index of lru list the page is sitting on
1432 * @zid: zone id of the accounted pages
1433 * @nr_pages: positive when adding or negative when removing
1434 *
1435 * This function must be called under lru_lock, just before a page is added
1436 * to or just after a page is removed from an lru list.
 
1437 */
1438void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
1439				int zid, int nr_pages)
1440{
1441	struct mem_cgroup_per_node *mz;
1442	unsigned long *lru_size;
1443	long size;
1444
1445	if (mem_cgroup_disabled())
1446		return;
1447
1448	mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
1449	lru_size = &mz->lru_zone_size[zid][lru];
1450
1451	if (nr_pages < 0)
1452		*lru_size += nr_pages;
1453
1454	size = *lru_size;
1455	if (WARN_ONCE(size < 0,
1456		"%s(%p, %d, %d): lru_size %ld\n",
1457		__func__, lruvec, lru, nr_pages, size)) {
1458		VM_BUG_ON(1);
1459		*lru_size = 0;
1460	}
1461
1462	if (nr_pages > 0)
1463		*lru_size += nr_pages;
1464}
1465
1466/**
1467 * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1468 * @memcg: the memory cgroup
1469 *
1470 * Returns the maximum amount of memory @mem can be charged with, in
1471 * pages.
1472 */
1473static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
1474{
1475	unsigned long margin = 0;
1476	unsigned long count;
1477	unsigned long limit;
1478
1479	count = page_counter_read(&memcg->memory);
1480	limit = READ_ONCE(memcg->memory.max);
1481	if (count < limit)
1482		margin = limit - count;
1483
1484	if (do_memsw_account()) {
1485		count = page_counter_read(&memcg->memsw);
1486		limit = READ_ONCE(memcg->memsw.max);
1487		if (count < limit)
1488			margin = min(margin, limit - count);
1489		else
1490			margin = 0;
1491	}
1492
1493	return margin;
1494}
1495
1496/*
1497 * A routine for checking "mem" is under move_account() or not.
1498 *
1499 * Checking a cgroup is mc.from or mc.to or under hierarchy of
1500 * moving cgroups. This is for waiting at high-memory pressure
1501 * caused by "move".
1502 */
1503static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
1504{
1505	struct mem_cgroup *from;
1506	struct mem_cgroup *to;
1507	bool ret = false;
1508	/*
1509	 * Unlike task_move routines, we access mc.to, mc.from not under
1510	 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
1511	 */
1512	spin_lock(&mc.lock);
1513	from = mc.from;
1514	to = mc.to;
1515	if (!from)
1516		goto unlock;
1517
1518	ret = mem_cgroup_is_descendant(from, memcg) ||
1519		mem_cgroup_is_descendant(to, memcg);
1520unlock:
1521	spin_unlock(&mc.lock);
1522	return ret;
1523}
1524
1525static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
1526{
1527	if (mc.moving_task && current != mc.moving_task) {
1528		if (mem_cgroup_under_move(memcg)) {
1529			DEFINE_WAIT(wait);
1530			prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1531			/* moving charge context might have finished. */
1532			if (mc.moving_task)
1533				schedule();
1534			finish_wait(&mc.waitq, &wait);
1535			return true;
1536		}
1537	}
1538	return false;
1539}
1540
1541struct memory_stat {
1542	const char *name;
1543	unsigned int idx;
1544};
1545
1546static const struct memory_stat memory_stats[] = {
1547	{ "anon",			NR_ANON_MAPPED			},
1548	{ "file",			NR_FILE_PAGES			},
1549	{ "kernel",			MEMCG_KMEM			},
1550	{ "kernel_stack",		NR_KERNEL_STACK_KB		},
1551	{ "pagetables",			NR_PAGETABLE			},
1552	{ "sec_pagetables",		NR_SECONDARY_PAGETABLE		},
1553	{ "percpu",			MEMCG_PERCPU_B			},
1554	{ "sock",			MEMCG_SOCK			},
1555	{ "vmalloc",			MEMCG_VMALLOC			},
1556	{ "shmem",			NR_SHMEM			},
1557#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
1558	{ "zswap",			MEMCG_ZSWAP_B			},
1559	{ "zswapped",			MEMCG_ZSWAPPED			},
1560#endif
1561	{ "file_mapped",		NR_FILE_MAPPED			},
1562	{ "file_dirty",			NR_FILE_DIRTY			},
1563	{ "file_writeback",		NR_WRITEBACK			},
1564#ifdef CONFIG_SWAP
1565	{ "swapcached",			NR_SWAPCACHE			},
1566#endif
1567#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1568	{ "anon_thp",			NR_ANON_THPS			},
1569	{ "file_thp",			NR_FILE_THPS			},
1570	{ "shmem_thp",			NR_SHMEM_THPS			},
1571#endif
1572	{ "inactive_anon",		NR_INACTIVE_ANON		},
1573	{ "active_anon",		NR_ACTIVE_ANON			},
1574	{ "inactive_file",		NR_INACTIVE_FILE		},
1575	{ "active_file",		NR_ACTIVE_FILE			},
1576	{ "unevictable",		NR_UNEVICTABLE			},
1577	{ "slab_reclaimable",		NR_SLAB_RECLAIMABLE_B		},
1578	{ "slab_unreclaimable",		NR_SLAB_UNRECLAIMABLE_B		},
1579
1580	/* The memory events */
1581	{ "workingset_refault_anon",	WORKINGSET_REFAULT_ANON		},
1582	{ "workingset_refault_file",	WORKINGSET_REFAULT_FILE		},
1583	{ "workingset_activate_anon",	WORKINGSET_ACTIVATE_ANON	},
1584	{ "workingset_activate_file",	WORKINGSET_ACTIVATE_FILE	},
1585	{ "workingset_restore_anon",	WORKINGSET_RESTORE_ANON		},
1586	{ "workingset_restore_file",	WORKINGSET_RESTORE_FILE		},
1587	{ "workingset_nodereclaim",	WORKINGSET_NODERECLAIM		},
1588};
1589
1590/* The actual unit of the state item, not the same as the output unit */
1591static int memcg_page_state_unit(int item)
1592{
1593	switch (item) {
1594	case MEMCG_PERCPU_B:
1595	case MEMCG_ZSWAP_B:
1596	case NR_SLAB_RECLAIMABLE_B:
1597	case NR_SLAB_UNRECLAIMABLE_B:
1598		return 1;
1599	case NR_KERNEL_STACK_KB:
1600		return SZ_1K;
1601	default:
1602		return PAGE_SIZE;
1603	}
1604}
1605
1606/* Translate stat items to the correct unit for memory.stat output */
1607static int memcg_page_state_output_unit(int item)
1608{
1609	/*
1610	 * Workingset state is actually in pages, but we export it to userspace
1611	 * as a scalar count of events, so special case it here.
1612	 */
1613	switch (item) {
1614	case WORKINGSET_REFAULT_ANON:
1615	case WORKINGSET_REFAULT_FILE:
1616	case WORKINGSET_ACTIVATE_ANON:
1617	case WORKINGSET_ACTIVATE_FILE:
1618	case WORKINGSET_RESTORE_ANON:
1619	case WORKINGSET_RESTORE_FILE:
1620	case WORKINGSET_NODERECLAIM:
1621		return 1;
 
 
1622	default:
1623		return memcg_page_state_unit(item);
1624	}
1625}
1626
1627static inline unsigned long memcg_page_state_output(struct mem_cgroup *memcg,
1628						    int item)
1629{
1630	return memcg_page_state(memcg, item) *
1631		memcg_page_state_output_unit(item);
1632}
1633
1634static inline unsigned long memcg_page_state_local_output(
1635		struct mem_cgroup *memcg, int item)
1636{
1637	return memcg_page_state_local(memcg, item) *
1638		memcg_page_state_output_unit(item);
1639}
1640
1641static void memcg_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
1642{
1643	int i;
1644
1645	/*
1646	 * Provide statistics on the state of the memory subsystem as
1647	 * well as cumulative event counters that show past behavior.
1648	 *
1649	 * This list is ordered following a combination of these gradients:
1650	 * 1) generic big picture -> specifics and details
1651	 * 2) reflecting userspace activity -> reflecting kernel heuristics
1652	 *
1653	 * Current memory state:
1654	 */
1655	mem_cgroup_flush_stats(memcg);
1656
1657	for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
1658		u64 size;
1659
1660		size = memcg_page_state_output(memcg, memory_stats[i].idx);
1661		seq_buf_printf(s, "%s %llu\n", memory_stats[i].name, size);
1662
1663		if (unlikely(memory_stats[i].idx == NR_SLAB_UNRECLAIMABLE_B)) {
1664			size += memcg_page_state_output(memcg,
1665							NR_SLAB_RECLAIMABLE_B);
1666			seq_buf_printf(s, "slab %llu\n", size);
1667		}
1668	}
1669
1670	/* Accumulated memory events */
1671	seq_buf_printf(s, "pgscan %lu\n",
 
 
 
 
 
 
 
1672		       memcg_events(memcg, PGSCAN_KSWAPD) +
1673		       memcg_events(memcg, PGSCAN_DIRECT) +
1674		       memcg_events(memcg, PGSCAN_KHUGEPAGED));
1675	seq_buf_printf(s, "pgsteal %lu\n",
1676		       memcg_events(memcg, PGSTEAL_KSWAPD) +
1677		       memcg_events(memcg, PGSTEAL_DIRECT) +
1678		       memcg_events(memcg, PGSTEAL_KHUGEPAGED));
 
 
 
 
 
 
 
1679
1680	for (i = 0; i < ARRAY_SIZE(memcg_vm_event_stat); i++) {
1681		if (memcg_vm_event_stat[i] == PGPGIN ||
1682		    memcg_vm_event_stat[i] == PGPGOUT)
1683			continue;
1684
1685		seq_buf_printf(s, "%s %lu\n",
1686			       vm_event_name(memcg_vm_event_stat[i]),
1687			       memcg_events(memcg, memcg_vm_event_stat[i]));
1688	}
1689
1690	/* The above should easily fit into one page */
1691	WARN_ON_ONCE(seq_buf_has_overflowed(s));
1692}
1693
1694static void memcg1_stat_format(struct mem_cgroup *memcg, struct seq_buf *s);
1695
1696static void memory_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
1697{
1698	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
1699		memcg_stat_format(memcg, s);
1700	else
1701		memcg1_stat_format(memcg, s);
1702	WARN_ON_ONCE(seq_buf_has_overflowed(s));
1703}
1704
 
1705/**
1706 * mem_cgroup_print_oom_context: Print OOM information relevant to
1707 * memory controller.
1708 * @memcg: The memory cgroup that went over limit
1709 * @p: Task that is going to be killed
1710 *
1711 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1712 * enabled
1713 */
1714void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
1715{
1716	rcu_read_lock();
1717
1718	if (memcg) {
1719		pr_cont(",oom_memcg=");
1720		pr_cont_cgroup_path(memcg->css.cgroup);
1721	} else
1722		pr_cont(",global_oom");
1723	if (p) {
1724		pr_cont(",task_memcg=");
1725		pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
1726	}
1727	rcu_read_unlock();
1728}
1729
1730/**
1731 * mem_cgroup_print_oom_meminfo: Print OOM memory information relevant to
1732 * memory controller.
1733 * @memcg: The memory cgroup that went over limit
1734 */
1735void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
1736{
1737	/* Use static buffer, for the caller is holding oom_lock. */
1738	static char buf[PAGE_SIZE];
1739	struct seq_buf s;
1740
1741	lockdep_assert_held(&oom_lock);
1742
1743	pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
1744		K((u64)page_counter_read(&memcg->memory)),
1745		K((u64)READ_ONCE(memcg->memory.max)), memcg->memory.failcnt);
1746	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
1747		pr_info("swap: usage %llukB, limit %llukB, failcnt %lu\n",
1748			K((u64)page_counter_read(&memcg->swap)),
1749			K((u64)READ_ONCE(memcg->swap.max)), memcg->swap.failcnt);
1750	else {
1751		pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
1752			K((u64)page_counter_read(&memcg->memsw)),
1753			K((u64)memcg->memsw.max), memcg->memsw.failcnt);
1754		pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
1755			K((u64)page_counter_read(&memcg->kmem)),
1756			K((u64)memcg->kmem.max), memcg->kmem.failcnt);
1757	}
1758
1759	pr_info("Memory cgroup stats for ");
1760	pr_cont_cgroup_path(memcg->css.cgroup);
1761	pr_cont(":");
1762	seq_buf_init(&s, buf, sizeof(buf));
1763	memory_stat_format(memcg, &s);
1764	seq_buf_do_printk(&s, KERN_INFO);
 
 
1765}
1766
1767/*
1768 * Return the memory (and swap, if configured) limit for a memcg.
1769 */
1770unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
1771{
1772	unsigned long max = READ_ONCE(memcg->memory.max);
1773
1774	if (do_memsw_account()) {
 
 
 
 
1775		if (mem_cgroup_swappiness(memcg)) {
1776			/* Calculate swap excess capacity from memsw limit */
1777			unsigned long swap = READ_ONCE(memcg->memsw.max) - max;
1778
1779			max += min(swap, (unsigned long)total_swap_pages);
1780		}
1781	} else {
1782		if (mem_cgroup_swappiness(memcg))
1783			max += min(READ_ONCE(memcg->swap.max),
1784				   (unsigned long)total_swap_pages);
1785	}
1786	return max;
1787}
1788
1789unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
1790{
1791	return page_counter_read(&memcg->memory);
1792}
1793
1794static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
1795				     int order)
1796{
1797	struct oom_control oc = {
1798		.zonelist = NULL,
1799		.nodemask = NULL,
1800		.memcg = memcg,
1801		.gfp_mask = gfp_mask,
1802		.order = order,
1803	};
1804	bool ret = true;
1805
1806	if (mutex_lock_killable(&oom_lock))
1807		return true;
1808
1809	if (mem_cgroup_margin(memcg) >= (1 << order))
1810		goto unlock;
1811
1812	/*
1813	 * A few threads which were not waiting at mutex_lock_killable() can
1814	 * fail to bail out. Therefore, check again after holding oom_lock.
1815	 */
1816	ret = task_is_dying() || out_of_memory(&oc);
1817
1818unlock:
1819	mutex_unlock(&oom_lock);
1820	return ret;
1821}
1822
1823static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
1824				   pg_data_t *pgdat,
1825				   gfp_t gfp_mask,
1826				   unsigned long *total_scanned)
1827{
1828	struct mem_cgroup *victim = NULL;
1829	int total = 0;
1830	int loop = 0;
1831	unsigned long excess;
1832	unsigned long nr_scanned;
1833	struct mem_cgroup_reclaim_cookie reclaim = {
1834		.pgdat = pgdat,
1835	};
1836
1837	excess = soft_limit_excess(root_memcg);
1838
1839	while (1) {
1840		victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
1841		if (!victim) {
1842			loop++;
1843			if (loop >= 2) {
1844				/*
1845				 * If we have not been able to reclaim
1846				 * anything, it might because there are
1847				 * no reclaimable pages under this hierarchy
1848				 */
1849				if (!total)
1850					break;
1851				/*
1852				 * We want to do more targeted reclaim.
1853				 * excess >> 2 is not to excessive so as to
1854				 * reclaim too much, nor too less that we keep
1855				 * coming back to reclaim from this cgroup
1856				 */
1857				if (total >= (excess >> 2) ||
1858					(loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
1859					break;
1860			}
1861			continue;
1862		}
1863		total += mem_cgroup_shrink_node(victim, gfp_mask, false,
1864					pgdat, &nr_scanned);
1865		*total_scanned += nr_scanned;
1866		if (!soft_limit_excess(root_memcg))
1867			break;
1868	}
1869	mem_cgroup_iter_break(root_memcg, victim);
1870	return total;
1871}
1872
1873#ifdef CONFIG_LOCKDEP
1874static struct lockdep_map memcg_oom_lock_dep_map = {
1875	.name = "memcg_oom_lock",
1876};
1877#endif
1878
1879static DEFINE_SPINLOCK(memcg_oom_lock);
1880
1881/*
1882 * Check OOM-Killer is already running under our hierarchy.
1883 * If someone is running, return false.
1884 */
1885static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
1886{
1887	struct mem_cgroup *iter, *failed = NULL;
1888
1889	spin_lock(&memcg_oom_lock);
1890
1891	for_each_mem_cgroup_tree(iter, memcg) {
1892		if (iter->oom_lock) {
1893			/*
1894			 * this subtree of our hierarchy is already locked
1895			 * so we cannot give a lock.
1896			 */
1897			failed = iter;
1898			mem_cgroup_iter_break(memcg, iter);
1899			break;
1900		} else
1901			iter->oom_lock = true;
1902	}
1903
1904	if (failed) {
1905		/*
1906		 * OK, we failed to lock the whole subtree so we have
1907		 * to clean up what we set up to the failing subtree
1908		 */
1909		for_each_mem_cgroup_tree(iter, memcg) {
1910			if (iter == failed) {
1911				mem_cgroup_iter_break(memcg, iter);
1912				break;
1913			}
1914			iter->oom_lock = false;
1915		}
1916	} else
1917		mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_);
1918
1919	spin_unlock(&memcg_oom_lock);
1920
1921	return !failed;
1922}
1923
1924static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
1925{
1926	struct mem_cgroup *iter;
1927
1928	spin_lock(&memcg_oom_lock);
1929	mutex_release(&memcg_oom_lock_dep_map, _RET_IP_);
1930	for_each_mem_cgroup_tree(iter, memcg)
1931		iter->oom_lock = false;
1932	spin_unlock(&memcg_oom_lock);
1933}
1934
1935static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
1936{
1937	struct mem_cgroup *iter;
1938
1939	spin_lock(&memcg_oom_lock);
1940	for_each_mem_cgroup_tree(iter, memcg)
1941		iter->under_oom++;
1942	spin_unlock(&memcg_oom_lock);
1943}
1944
1945static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
1946{
1947	struct mem_cgroup *iter;
1948
1949	/*
1950	 * Be careful about under_oom underflows because a child memcg
1951	 * could have been added after mem_cgroup_mark_under_oom.
1952	 */
1953	spin_lock(&memcg_oom_lock);
1954	for_each_mem_cgroup_tree(iter, memcg)
1955		if (iter->under_oom > 0)
1956			iter->under_oom--;
1957	spin_unlock(&memcg_oom_lock);
1958}
1959
1960static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
1961
1962struct oom_wait_info {
1963	struct mem_cgroup *memcg;
1964	wait_queue_entry_t	wait;
1965};
1966
1967static int memcg_oom_wake_function(wait_queue_entry_t *wait,
1968	unsigned mode, int sync, void *arg)
1969{
1970	struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
1971	struct mem_cgroup *oom_wait_memcg;
1972	struct oom_wait_info *oom_wait_info;
1973
1974	oom_wait_info = container_of(wait, struct oom_wait_info, wait);
1975	oom_wait_memcg = oom_wait_info->memcg;
1976
1977	if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) &&
1978	    !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg))
1979		return 0;
1980	return autoremove_wake_function(wait, mode, sync, arg);
1981}
1982
1983static void memcg_oom_recover(struct mem_cgroup *memcg)
1984{
1985	/*
1986	 * For the following lockless ->under_oom test, the only required
1987	 * guarantee is that it must see the state asserted by an OOM when
1988	 * this function is called as a result of userland actions
1989	 * triggered by the notification of the OOM.  This is trivially
1990	 * achieved by invoking mem_cgroup_mark_under_oom() before
1991	 * triggering notification.
1992	 */
1993	if (memcg && memcg->under_oom)
1994		__wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
1995}
1996
1997/*
1998 * Returns true if successfully killed one or more processes. Though in some
1999 * corner cases it can return true even without killing any process.
2000 */
2001static bool mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
 
 
 
2002{
2003	bool locked, ret;
 
2004
2005	if (order > PAGE_ALLOC_COSTLY_ORDER)
2006		return false;
2007
2008	memcg_memory_event(memcg, MEMCG_OOM);
2009
2010	/*
2011	 * We are in the middle of the charge context here, so we
2012	 * don't want to block when potentially sitting on a callstack
2013	 * that holds all kinds of filesystem and mm locks.
2014	 *
2015	 * cgroup1 allows disabling the OOM killer and waiting for outside
2016	 * handling until the charge can succeed; remember the context and put
2017	 * the task to sleep at the end of the page fault when all locks are
2018	 * released.
2019	 *
2020	 * On the other hand, in-kernel OOM killer allows for an async victim
2021	 * memory reclaim (oom_reaper) and that means that we are not solely
2022	 * relying on the oom victim to make a forward progress and we can
2023	 * invoke the oom killer here.
2024	 *
2025	 * Please note that mem_cgroup_out_of_memory might fail to find a
2026	 * victim and then we have to bail out from the charge path.
2027	 */
2028	if (READ_ONCE(memcg->oom_kill_disable)) {
2029		if (current->in_user_fault) {
2030			css_get(&memcg->css);
2031			current->memcg_in_oom = memcg;
2032			current->memcg_oom_gfp_mask = mask;
2033			current->memcg_oom_order = order;
2034		}
2035		return false;
 
2036	}
2037
2038	mem_cgroup_mark_under_oom(memcg);
2039
2040	locked = mem_cgroup_oom_trylock(memcg);
2041
2042	if (locked)
2043		mem_cgroup_oom_notify(memcg);
2044
2045	mem_cgroup_unmark_under_oom(memcg);
2046	ret = mem_cgroup_out_of_memory(memcg, mask, order);
 
 
 
2047
2048	if (locked)
2049		mem_cgroup_oom_unlock(memcg);
2050
2051	return ret;
2052}
2053
2054/**
2055 * mem_cgroup_oom_synchronize - complete memcg OOM handling
2056 * @handle: actually kill/wait or just clean up the OOM state
2057 *
2058 * This has to be called at the end of a page fault if the memcg OOM
2059 * handler was enabled.
2060 *
2061 * Memcg supports userspace OOM handling where failed allocations must
2062 * sleep on a waitqueue until the userspace task resolves the
2063 * situation.  Sleeping directly in the charge context with all kinds
2064 * of locks held is not a good idea, instead we remember an OOM state
2065 * in the task and mem_cgroup_oom_synchronize() has to be called at
2066 * the end of the page fault to complete the OOM handling.
2067 *
2068 * Returns %true if an ongoing memcg OOM situation was detected and
2069 * completed, %false otherwise.
2070 */
2071bool mem_cgroup_oom_synchronize(bool handle)
2072{
2073	struct mem_cgroup *memcg = current->memcg_in_oom;
2074	struct oom_wait_info owait;
2075	bool locked;
2076
2077	/* OOM is global, do not handle */
2078	if (!memcg)
2079		return false;
2080
2081	if (!handle)
2082		goto cleanup;
2083
2084	owait.memcg = memcg;
2085	owait.wait.flags = 0;
2086	owait.wait.func = memcg_oom_wake_function;
2087	owait.wait.private = current;
2088	INIT_LIST_HEAD(&owait.wait.entry);
2089
2090	prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
2091	mem_cgroup_mark_under_oom(memcg);
2092
2093	locked = mem_cgroup_oom_trylock(memcg);
2094
2095	if (locked)
2096		mem_cgroup_oom_notify(memcg);
2097
2098	schedule();
2099	mem_cgroup_unmark_under_oom(memcg);
2100	finish_wait(&memcg_oom_waitq, &owait.wait);
 
 
 
 
 
 
 
2101
2102	if (locked)
2103		mem_cgroup_oom_unlock(memcg);
 
 
 
 
 
 
 
2104cleanup:
2105	current->memcg_in_oom = NULL;
2106	css_put(&memcg->css);
2107	return true;
2108}
2109
2110/**
2111 * mem_cgroup_get_oom_group - get a memory cgroup to clean up after OOM
2112 * @victim: task to be killed by the OOM killer
2113 * @oom_domain: memcg in case of memcg OOM, NULL in case of system-wide OOM
2114 *
2115 * Returns a pointer to a memory cgroup, which has to be cleaned up
2116 * by killing all belonging OOM-killable tasks.
2117 *
2118 * Caller has to call mem_cgroup_put() on the returned non-NULL memcg.
2119 */
2120struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
2121					    struct mem_cgroup *oom_domain)
2122{
2123	struct mem_cgroup *oom_group = NULL;
2124	struct mem_cgroup *memcg;
2125
2126	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
2127		return NULL;
2128
2129	if (!oom_domain)
2130		oom_domain = root_mem_cgroup;
2131
2132	rcu_read_lock();
2133
2134	memcg = mem_cgroup_from_task(victim);
2135	if (mem_cgroup_is_root(memcg))
2136		goto out;
2137
2138	/*
2139	 * If the victim task has been asynchronously moved to a different
2140	 * memory cgroup, we might end up killing tasks outside oom_domain.
2141	 * In this case it's better to ignore memory.group.oom.
2142	 */
2143	if (unlikely(!mem_cgroup_is_descendant(memcg, oom_domain)))
2144		goto out;
2145
2146	/*
2147	 * Traverse the memory cgroup hierarchy from the victim task's
2148	 * cgroup up to the OOMing cgroup (or root) to find the
2149	 * highest-level memory cgroup with oom.group set.
2150	 */
2151	for (; memcg; memcg = parent_mem_cgroup(memcg)) {
2152		if (READ_ONCE(memcg->oom_group))
2153			oom_group = memcg;
2154
2155		if (memcg == oom_domain)
2156			break;
2157	}
2158
2159	if (oom_group)
2160		css_get(&oom_group->css);
2161out:
2162	rcu_read_unlock();
2163
2164	return oom_group;
2165}
2166
2167void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
2168{
2169	pr_info("Tasks in ");
2170	pr_cont_cgroup_path(memcg->css.cgroup);
2171	pr_cont(" are going to be killed due to memory.oom.group set\n");
2172}
2173
2174/**
2175 * folio_memcg_lock - Bind a folio to its memcg.
2176 * @folio: The folio.
2177 *
2178 * This function prevents unlocked LRU folios from being moved to
2179 * another cgroup.
2180 *
2181 * It ensures lifetime of the bound memcg.  The caller is responsible
2182 * for the lifetime of the folio.
2183 */
2184void folio_memcg_lock(struct folio *folio)
2185{
 
2186	struct mem_cgroup *memcg;
2187	unsigned long flags;
2188
2189	/*
2190	 * The RCU lock is held throughout the transaction.  The fast
2191	 * path can get away without acquiring the memcg->move_lock
2192	 * because page moving starts with an RCU grace period.
2193         */
2194	rcu_read_lock();
2195
2196	if (mem_cgroup_disabled())
2197		return;
2198again:
2199	memcg = folio_memcg(folio);
2200	if (unlikely(!memcg))
2201		return;
2202
2203#ifdef CONFIG_PROVE_LOCKING
2204	local_irq_save(flags);
2205	might_lock(&memcg->move_lock);
2206	local_irq_restore(flags);
2207#endif
2208
2209	if (atomic_read(&memcg->moving_account) <= 0)
2210		return;
2211
2212	spin_lock_irqsave(&memcg->move_lock, flags);
2213	if (memcg != folio_memcg(folio)) {
2214		spin_unlock_irqrestore(&memcg->move_lock, flags);
2215		goto again;
2216	}
2217
2218	/*
2219	 * When charge migration first begins, we can have multiple
2220	 * critical sections holding the fast-path RCU lock and one
2221	 * holding the slowpath move_lock. Track the task who has the
2222	 * move_lock for folio_memcg_unlock().
2223	 */
2224	memcg->move_lock_task = current;
2225	memcg->move_lock_flags = flags;
2226}
 
2227
2228static void __folio_memcg_unlock(struct mem_cgroup *memcg)
2229{
2230	if (memcg && memcg->move_lock_task == current) {
2231		unsigned long flags = memcg->move_lock_flags;
2232
2233		memcg->move_lock_task = NULL;
2234		memcg->move_lock_flags = 0;
2235
2236		spin_unlock_irqrestore(&memcg->move_lock, flags);
2237	}
2238
2239	rcu_read_unlock();
2240}
2241
2242/**
2243 * folio_memcg_unlock - Release the binding between a folio and its memcg.
2244 * @folio: The folio.
2245 *
2246 * This releases the binding created by folio_memcg_lock().  This does
2247 * not change the accounting of this folio to its memcg, but it does
2248 * permit others to change it.
2249 */
2250void folio_memcg_unlock(struct folio *folio)
2251{
2252	__folio_memcg_unlock(folio_memcg(folio));
 
 
2253}
 
2254
2255struct memcg_stock_pcp {
2256	local_lock_t stock_lock;
2257	struct mem_cgroup *cached; /* this never be root cgroup */
2258	unsigned int nr_pages;
2259
2260#ifdef CONFIG_MEMCG_KMEM
2261	struct obj_cgroup *cached_objcg;
2262	struct pglist_data *cached_pgdat;
2263	unsigned int nr_bytes;
2264	int nr_slab_reclaimable_b;
2265	int nr_slab_unreclaimable_b;
 
 
2266#endif
 
 
 
 
 
 
 
2267
2268	struct work_struct work;
2269	unsigned long flags;
2270#define FLUSHING_CACHED_CHARGE	0
2271};
2272static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock) = {
2273	.stock_lock = INIT_LOCAL_LOCK(stock_lock),
2274};
2275static DEFINE_MUTEX(percpu_charge_mutex);
2276
2277#ifdef CONFIG_MEMCG_KMEM
2278static struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock);
2279static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
2280				     struct mem_cgroup *root_memcg);
2281static void memcg_account_kmem(struct mem_cgroup *memcg, int nr_pages);
2282
2283#else
2284static inline struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock)
2285{
2286	return NULL;
2287}
2288static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
2289				     struct mem_cgroup *root_memcg)
2290{
2291	return false;
2292}
2293static void memcg_account_kmem(struct mem_cgroup *memcg, int nr_pages)
 
 
 
 
 
 
 
 
 
 
 
 
 
2294{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2295}
2296#endif
2297
2298/**
2299 * consume_stock: Try to consume stocked charge on this cpu.
2300 * @memcg: memcg to consume from.
2301 * @nr_pages: how many pages to charge.
2302 *
2303 * The charges will only happen if @memcg matches the current cpu's memcg
2304 * stock, and at least @nr_pages are available in that stock.  Failure to
2305 * service an allocation will refill the stock.
2306 *
2307 * returns true if successful, false otherwise.
2308 */
2309static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2310{
2311	struct memcg_stock_pcp *stock;
2312	unsigned long flags;
2313	bool ret = false;
2314
2315	if (nr_pages > MEMCG_CHARGE_BATCH)
2316		return ret;
2317
2318	local_lock_irqsave(&memcg_stock.stock_lock, flags);
2319
2320	stock = this_cpu_ptr(&memcg_stock);
2321	if (memcg == READ_ONCE(stock->cached) && stock->nr_pages >= nr_pages) {
2322		stock->nr_pages -= nr_pages;
2323		ret = true;
2324	}
2325
2326	local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
2327
2328	return ret;
2329}
2330
2331/*
2332 * Returns stocks cached in percpu and reset cached information.
2333 */
2334static void drain_stock(struct memcg_stock_pcp *stock)
2335{
2336	struct mem_cgroup *old = READ_ONCE(stock->cached);
2337
2338	if (!old)
2339		return;
2340
2341	if (stock->nr_pages) {
2342		page_counter_uncharge(&old->memory, stock->nr_pages);
2343		if (do_memsw_account())
2344			page_counter_uncharge(&old->memsw, stock->nr_pages);
2345		stock->nr_pages = 0;
2346	}
2347
2348	css_put(&old->css);
2349	WRITE_ONCE(stock->cached, NULL);
2350}
2351
2352static void drain_local_stock(struct work_struct *dummy)
2353{
2354	struct memcg_stock_pcp *stock;
2355	struct obj_cgroup *old = NULL;
2356	unsigned long flags;
2357
2358	/*
2359	 * The only protection from cpu hotplug (memcg_hotplug_cpu_dead) vs.
2360	 * drain_stock races is that we always operate on local CPU stock
2361	 * here with IRQ disabled
2362	 */
2363	local_lock_irqsave(&memcg_stock.stock_lock, flags);
2364
2365	stock = this_cpu_ptr(&memcg_stock);
2366	old = drain_obj_stock(stock);
 
 
2367	drain_stock(stock);
2368	clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
2369
2370	local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
2371	if (old)
2372		obj_cgroup_put(old);
2373}
2374
2375/*
2376 * Cache charges(val) to local per_cpu area.
2377 * This will be consumed by consume_stock() function, later.
2378 */
2379static void __refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2380{
2381	struct memcg_stock_pcp *stock;
 
 
 
2382
2383	stock = this_cpu_ptr(&memcg_stock);
2384	if (READ_ONCE(stock->cached) != memcg) { /* reset if necessary */
2385		drain_stock(stock);
2386		css_get(&memcg->css);
2387		WRITE_ONCE(stock->cached, memcg);
2388	}
2389	stock->nr_pages += nr_pages;
2390
2391	if (stock->nr_pages > MEMCG_CHARGE_BATCH)
2392		drain_stock(stock);
2393}
2394
2395static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2396{
2397	unsigned long flags;
2398
2399	local_lock_irqsave(&memcg_stock.stock_lock, flags);
2400	__refill_stock(memcg, nr_pages);
2401	local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
2402}
2403
2404/*
2405 * Drains all per-CPU charge caches for given root_memcg resp. subtree
2406 * of the hierarchy under it.
2407 */
2408static void drain_all_stock(struct mem_cgroup *root_memcg)
2409{
2410	int cpu, curcpu;
2411
2412	/* If someone's already draining, avoid adding running more workers. */
2413	if (!mutex_trylock(&percpu_charge_mutex))
2414		return;
2415	/*
2416	 * Notify other cpus that system-wide "drain" is running
2417	 * We do not care about races with the cpu hotplug because cpu down
2418	 * as well as workers from this path always operate on the local
2419	 * per-cpu data. CPU up doesn't touch memcg_stock at all.
2420	 */
2421	migrate_disable();
2422	curcpu = smp_processor_id();
2423	for_each_online_cpu(cpu) {
2424		struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
2425		struct mem_cgroup *memcg;
2426		bool flush = false;
2427
2428		rcu_read_lock();
2429		memcg = READ_ONCE(stock->cached);
2430		if (memcg && stock->nr_pages &&
2431		    mem_cgroup_is_descendant(memcg, root_memcg))
2432			flush = true;
2433		else if (obj_stock_flush_required(stock, root_memcg))
2434			flush = true;
2435		rcu_read_unlock();
2436
2437		if (flush &&
2438		    !test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
2439			if (cpu == curcpu)
2440				drain_local_stock(&stock->work);
2441			else if (!cpu_is_isolated(cpu))
2442				schedule_work_on(cpu, &stock->work);
2443		}
2444	}
2445	migrate_enable();
2446	mutex_unlock(&percpu_charge_mutex);
2447}
2448
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2449static int memcg_hotplug_cpu_dead(unsigned int cpu)
2450{
2451	struct memcg_stock_pcp *stock;
 
2452
2453	stock = &per_cpu(memcg_stock, cpu);
2454	drain_stock(stock);
2455
 
 
 
2456	return 0;
2457}
2458
2459static unsigned long reclaim_high(struct mem_cgroup *memcg,
2460				  unsigned int nr_pages,
2461				  gfp_t gfp_mask)
2462{
2463	unsigned long nr_reclaimed = 0;
2464
2465	do {
2466		unsigned long pflags;
2467
2468		if (page_counter_read(&memcg->memory) <=
2469		    READ_ONCE(memcg->memory.high))
2470			continue;
2471
2472		memcg_memory_event(memcg, MEMCG_HIGH);
2473
2474		psi_memstall_enter(&pflags);
2475		nr_reclaimed += try_to_free_mem_cgroup_pages(memcg, nr_pages,
2476							gfp_mask,
2477							MEMCG_RECLAIM_MAY_SWAP);
2478		psi_memstall_leave(&pflags);
2479	} while ((memcg = parent_mem_cgroup(memcg)) &&
2480		 !mem_cgroup_is_root(memcg));
2481
2482	return nr_reclaimed;
2483}
2484
2485static void high_work_func(struct work_struct *work)
2486{
2487	struct mem_cgroup *memcg;
2488
2489	memcg = container_of(work, struct mem_cgroup, high_work);
2490	reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL);
2491}
2492
2493/*
2494 * Clamp the maximum sleep time per allocation batch to 2 seconds. This is
2495 * enough to still cause a significant slowdown in most cases, while still
2496 * allowing diagnostics and tracing to proceed without becoming stuck.
2497 */
2498#define MEMCG_MAX_HIGH_DELAY_JIFFIES (2UL*HZ)
2499
2500/*
2501 * When calculating the delay, we use these either side of the exponentiation to
2502 * maintain precision and scale to a reasonable number of jiffies (see the table
2503 * below.
2504 *
2505 * - MEMCG_DELAY_PRECISION_SHIFT: Extra precision bits while translating the
2506 *   overage ratio to a delay.
2507 * - MEMCG_DELAY_SCALING_SHIFT: The number of bits to scale down the
2508 *   proposed penalty in order to reduce to a reasonable number of jiffies, and
2509 *   to produce a reasonable delay curve.
2510 *
2511 * MEMCG_DELAY_SCALING_SHIFT just happens to be a number that produces a
2512 * reasonable delay curve compared to precision-adjusted overage, not
2513 * penalising heavily at first, but still making sure that growth beyond the
2514 * limit penalises misbehaviour cgroups by slowing them down exponentially. For
2515 * example, with a high of 100 megabytes:
2516 *
2517 *  +-------+------------------------+
2518 *  | usage | time to allocate in ms |
2519 *  +-------+------------------------+
2520 *  | 100M  |                      0 |
2521 *  | 101M  |                      6 |
2522 *  | 102M  |                     25 |
2523 *  | 103M  |                     57 |
2524 *  | 104M  |                    102 |
2525 *  | 105M  |                    159 |
2526 *  | 106M  |                    230 |
2527 *  | 107M  |                    313 |
2528 *  | 108M  |                    409 |
2529 *  | 109M  |                    518 |
2530 *  | 110M  |                    639 |
2531 *  | 111M  |                    774 |
2532 *  | 112M  |                    921 |
2533 *  | 113M  |                   1081 |
2534 *  | 114M  |                   1254 |
2535 *  | 115M  |                   1439 |
2536 *  | 116M  |                   1638 |
2537 *  | 117M  |                   1849 |
2538 *  | 118M  |                   2000 |
2539 *  | 119M  |                   2000 |
2540 *  | 120M  |                   2000 |
2541 *  +-------+------------------------+
2542 */
2543 #define MEMCG_DELAY_PRECISION_SHIFT 20
2544 #define MEMCG_DELAY_SCALING_SHIFT 14
2545
2546static u64 calculate_overage(unsigned long usage, unsigned long high)
2547{
2548	u64 overage;
2549
2550	if (usage <= high)
2551		return 0;
2552
2553	/*
2554	 * Prevent division by 0 in overage calculation by acting as if
2555	 * it was a threshold of 1 page
2556	 */
2557	high = max(high, 1UL);
2558
2559	overage = usage - high;
2560	overage <<= MEMCG_DELAY_PRECISION_SHIFT;
2561	return div64_u64(overage, high);
2562}
2563
2564static u64 mem_find_max_overage(struct mem_cgroup *memcg)
2565{
2566	u64 overage, max_overage = 0;
2567
2568	do {
2569		overage = calculate_overage(page_counter_read(&memcg->memory),
2570					    READ_ONCE(memcg->memory.high));
2571		max_overage = max(overage, max_overage);
2572	} while ((memcg = parent_mem_cgroup(memcg)) &&
2573		 !mem_cgroup_is_root(memcg));
2574
2575	return max_overage;
2576}
2577
2578static u64 swap_find_max_overage(struct mem_cgroup *memcg)
2579{
2580	u64 overage, max_overage = 0;
2581
2582	do {
2583		overage = calculate_overage(page_counter_read(&memcg->swap),
2584					    READ_ONCE(memcg->swap.high));
2585		if (overage)
2586			memcg_memory_event(memcg, MEMCG_SWAP_HIGH);
2587		max_overage = max(overage, max_overage);
2588	} while ((memcg = parent_mem_cgroup(memcg)) &&
2589		 !mem_cgroup_is_root(memcg));
2590
2591	return max_overage;
2592}
2593
2594/*
2595 * Get the number of jiffies that we should penalise a mischievous cgroup which
2596 * is exceeding its memory.high by checking both it and its ancestors.
2597 */
2598static unsigned long calculate_high_delay(struct mem_cgroup *memcg,
2599					  unsigned int nr_pages,
2600					  u64 max_overage)
2601{
2602	unsigned long penalty_jiffies;
2603
2604	if (!max_overage)
2605		return 0;
2606
2607	/*
2608	 * We use overage compared to memory.high to calculate the number of
2609	 * jiffies to sleep (penalty_jiffies). Ideally this value should be
2610	 * fairly lenient on small overages, and increasingly harsh when the
2611	 * memcg in question makes it clear that it has no intention of stopping
2612	 * its crazy behaviour, so we exponentially increase the delay based on
2613	 * overage amount.
2614	 */
2615	penalty_jiffies = max_overage * max_overage * HZ;
2616	penalty_jiffies >>= MEMCG_DELAY_PRECISION_SHIFT;
2617	penalty_jiffies >>= MEMCG_DELAY_SCALING_SHIFT;
2618
2619	/*
2620	 * Factor in the task's own contribution to the overage, such that four
2621	 * N-sized allocations are throttled approximately the same as one
2622	 * 4N-sized allocation.
2623	 *
2624	 * MEMCG_CHARGE_BATCH pages is nominal, so work out how much smaller or
2625	 * larger the current charge patch is than that.
2626	 */
2627	return penalty_jiffies * nr_pages / MEMCG_CHARGE_BATCH;
2628}
2629
2630/*
2631 * Reclaims memory over the high limit. Called directly from
2632 * try_charge() (context permitting), as well as from the userland
2633 * return path where reclaim is always able to block.
2634 */
2635void mem_cgroup_handle_over_high(gfp_t gfp_mask)
2636{
2637	unsigned long penalty_jiffies;
2638	unsigned long pflags;
2639	unsigned long nr_reclaimed;
2640	unsigned int nr_pages = current->memcg_nr_pages_over_high;
2641	int nr_retries = MAX_RECLAIM_RETRIES;
2642	struct mem_cgroup *memcg;
2643	bool in_retry = false;
2644
2645	if (likely(!nr_pages))
2646		return;
2647
2648	memcg = get_mem_cgroup_from_mm(current->mm);
2649	current->memcg_nr_pages_over_high = 0;
2650
2651retry_reclaim:
2652	/*
2653	 * Bail if the task is already exiting. Unlike memory.max,
2654	 * memory.high enforcement isn't as strict, and there is no
2655	 * OOM killer involved, which means the excess could already
2656	 * be much bigger (and still growing) than it could for
2657	 * memory.max; the dying task could get stuck in fruitless
2658	 * reclaim for a long time, which isn't desirable.
2659	 */
2660	if (task_is_dying())
2661		goto out;
2662
2663	/*
2664	 * The allocating task should reclaim at least the batch size, but for
2665	 * subsequent retries we only want to do what's necessary to prevent oom
2666	 * or breaching resource isolation.
2667	 *
2668	 * This is distinct from memory.max or page allocator behaviour because
2669	 * memory.high is currently batched, whereas memory.max and the page
2670	 * allocator run every time an allocation is made.
2671	 */
2672	nr_reclaimed = reclaim_high(memcg,
2673				    in_retry ? SWAP_CLUSTER_MAX : nr_pages,
2674				    gfp_mask);
2675
2676	/*
2677	 * memory.high is breached and reclaim is unable to keep up. Throttle
2678	 * allocators proactively to slow down excessive growth.
2679	 */
2680	penalty_jiffies = calculate_high_delay(memcg, nr_pages,
2681					       mem_find_max_overage(memcg));
2682
2683	penalty_jiffies += calculate_high_delay(memcg, nr_pages,
2684						swap_find_max_overage(memcg));
2685
2686	/*
2687	 * Clamp the max delay per usermode return so as to still keep the
2688	 * application moving forwards and also permit diagnostics, albeit
2689	 * extremely slowly.
2690	 */
2691	penalty_jiffies = min(penalty_jiffies, MEMCG_MAX_HIGH_DELAY_JIFFIES);
2692
2693	/*
2694	 * Don't sleep if the amount of jiffies this memcg owes us is so low
2695	 * that it's not even worth doing, in an attempt to be nice to those who
2696	 * go only a small amount over their memory.high value and maybe haven't
2697	 * been aggressively reclaimed enough yet.
2698	 */
2699	if (penalty_jiffies <= HZ / 100)
2700		goto out;
2701
2702	/*
2703	 * If reclaim is making forward progress but we're still over
2704	 * memory.high, we want to encourage that rather than doing allocator
2705	 * throttling.
2706	 */
2707	if (nr_reclaimed || nr_retries--) {
2708		in_retry = true;
2709		goto retry_reclaim;
2710	}
2711
2712	/*
2713	 * Reclaim didn't manage to push usage below the limit, slow
2714	 * this allocating task down.
2715	 *
2716	 * If we exit early, we're guaranteed to die (since
2717	 * schedule_timeout_killable sets TASK_KILLABLE). This means we don't
2718	 * need to account for any ill-begotten jiffies to pay them off later.
2719	 */
2720	psi_memstall_enter(&pflags);
2721	schedule_timeout_killable(penalty_jiffies);
2722	psi_memstall_leave(&pflags);
2723
2724out:
2725	css_put(&memcg->css);
2726}
2727
2728static int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask,
2729			unsigned int nr_pages)
2730{
2731	unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages);
2732	int nr_retries = MAX_RECLAIM_RETRIES;
2733	struct mem_cgroup *mem_over_limit;
2734	struct page_counter *counter;
 
2735	unsigned long nr_reclaimed;
2736	bool passed_oom = false;
2737	unsigned int reclaim_options = MEMCG_RECLAIM_MAY_SWAP;
2738	bool drained = false;
2739	bool raised_max_event = false;
2740	unsigned long pflags;
2741
2742retry:
2743	if (consume_stock(memcg, nr_pages))
2744		return 0;
2745
2746	if (!do_memsw_account() ||
2747	    page_counter_try_charge(&memcg->memsw, batch, &counter)) {
2748		if (page_counter_try_charge(&memcg->memory, batch, &counter))
2749			goto done_restock;
2750		if (do_memsw_account())
2751			page_counter_uncharge(&memcg->memsw, batch);
2752		mem_over_limit = mem_cgroup_from_counter(counter, memory);
2753	} else {
2754		mem_over_limit = mem_cgroup_from_counter(counter, memsw);
2755		reclaim_options &= ~MEMCG_RECLAIM_MAY_SWAP;
2756	}
2757
2758	if (batch > nr_pages) {
2759		batch = nr_pages;
2760		goto retry;
2761	}
2762
2763	/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2764	 * Prevent unbounded recursion when reclaim operations need to
2765	 * allocate memory. This might exceed the limits temporarily,
2766	 * but we prefer facilitating memory reclaim and getting back
2767	 * under the limit over triggering OOM kills in these cases.
2768	 */
2769	if (unlikely(current->flags & PF_MEMALLOC))
2770		goto force;
2771
2772	if (unlikely(task_in_memcg_oom(current)))
2773		goto nomem;
2774
2775	if (!gfpflags_allow_blocking(gfp_mask))
2776		goto nomem;
2777
2778	memcg_memory_event(mem_over_limit, MEMCG_MAX);
2779	raised_max_event = true;
2780
2781	psi_memstall_enter(&pflags);
2782	nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
2783						    gfp_mask, reclaim_options);
2784	psi_memstall_leave(&pflags);
2785
2786	if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
2787		goto retry;
2788
2789	if (!drained) {
2790		drain_all_stock(mem_over_limit);
2791		drained = true;
2792		goto retry;
2793	}
2794
2795	if (gfp_mask & __GFP_NORETRY)
2796		goto nomem;
2797	/*
2798	 * Even though the limit is exceeded at this point, reclaim
2799	 * may have been able to free some pages.  Retry the charge
2800	 * before killing the task.
2801	 *
2802	 * Only for regular pages, though: huge pages are rather
2803	 * unlikely to succeed so close to the limit, and we fall back
2804	 * to regular pages anyway in case of failure.
2805	 */
2806	if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))
2807		goto retry;
2808	/*
2809	 * At task move, charge accounts can be doubly counted. So, it's
2810	 * better to wait until the end of task_move if something is going on.
2811	 */
2812	if (mem_cgroup_wait_acct_move(mem_over_limit))
2813		goto retry;
2814
2815	if (nr_retries--)
2816		goto retry;
2817
2818	if (gfp_mask & __GFP_RETRY_MAYFAIL)
2819		goto nomem;
2820
2821	/* Avoid endless loop for tasks bypassed by the oom killer */
2822	if (passed_oom && task_is_dying())
2823		goto nomem;
2824
2825	/*
2826	 * keep retrying as long as the memcg oom killer is able to make
2827	 * a forward progress or bypass the charge if the oom killer
2828	 * couldn't make any progress.
2829	 */
2830	if (mem_cgroup_oom(mem_over_limit, gfp_mask,
2831			   get_order(nr_pages * PAGE_SIZE))) {
2832		passed_oom = true;
 
2833		nr_retries = MAX_RECLAIM_RETRIES;
2834		goto retry;
 
 
 
 
2835	}
2836nomem:
2837	/*
2838	 * Memcg doesn't have a dedicated reserve for atomic
2839	 * allocations. But like the global atomic pool, we need to
2840	 * put the burden of reclaim on regular allocation requests
2841	 * and let these go through as privileged allocations.
2842	 */
2843	if (!(gfp_mask & (__GFP_NOFAIL | __GFP_HIGH)))
2844		return -ENOMEM;
2845force:
2846	/*
2847	 * If the allocation has to be enforced, don't forget to raise
2848	 * a MEMCG_MAX event.
2849	 */
2850	if (!raised_max_event)
2851		memcg_memory_event(mem_over_limit, MEMCG_MAX);
2852
2853	/*
2854	 * The allocation either can't fail or will lead to more memory
2855	 * being freed very soon.  Allow memory usage go over the limit
2856	 * temporarily by force charging it.
2857	 */
2858	page_counter_charge(&memcg->memory, nr_pages);
2859	if (do_memsw_account())
2860		page_counter_charge(&memcg->memsw, nr_pages);
2861
2862	return 0;
2863
2864done_restock:
2865	if (batch > nr_pages)
2866		refill_stock(memcg, batch - nr_pages);
2867
2868	/*
2869	 * If the hierarchy is above the normal consumption range, schedule
2870	 * reclaim on returning to userland.  We can perform reclaim here
2871	 * if __GFP_RECLAIM but let's always punt for simplicity and so that
2872	 * GFP_KERNEL can consistently be used during reclaim.  @memcg is
2873	 * not recorded as it most likely matches current's and won't
2874	 * change in the meantime.  As high limit is checked again before
2875	 * reclaim, the cost of mismatch is negligible.
2876	 */
2877	do {
2878		bool mem_high, swap_high;
2879
2880		mem_high = page_counter_read(&memcg->memory) >
2881			READ_ONCE(memcg->memory.high);
2882		swap_high = page_counter_read(&memcg->swap) >
2883			READ_ONCE(memcg->swap.high);
2884
2885		/* Don't bother a random interrupted task */
2886		if (!in_task()) {
2887			if (mem_high) {
2888				schedule_work(&memcg->high_work);
2889				break;
2890			}
2891			continue;
2892		}
2893
2894		if (mem_high || swap_high) {
2895			/*
2896			 * The allocating tasks in this cgroup will need to do
2897			 * reclaim or be throttled to prevent further growth
2898			 * of the memory or swap footprints.
2899			 *
2900			 * Target some best-effort fairness between the tasks,
2901			 * and distribute reclaim work and delay penalties
2902			 * based on how much each task is actually allocating.
2903			 */
2904			current->memcg_nr_pages_over_high += batch;
2905			set_notify_resume(current);
2906			break;
2907		}
2908	} while ((memcg = parent_mem_cgroup(memcg)));
2909
2910	/*
2911	 * Reclaim is set up above to be called from the userland
2912	 * return path. But also attempt synchronous reclaim to avoid
2913	 * excessive overrun while the task is still inside the
2914	 * kernel. If this is successful, the return path will see it
2915	 * when it rechecks the overage and simply bail out.
2916	 */
2917	if (current->memcg_nr_pages_over_high > MEMCG_CHARGE_BATCH &&
2918	    !(current->flags & PF_MEMALLOC) &&
2919	    gfpflags_allow_blocking(gfp_mask))
2920		mem_cgroup_handle_over_high(gfp_mask);
2921	return 0;
2922}
2923
2924static inline int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
2925			     unsigned int nr_pages)
2926{
2927	if (mem_cgroup_is_root(memcg))
2928		return 0;
2929
2930	return try_charge_memcg(memcg, gfp_mask, nr_pages);
2931}
2932
2933/**
2934 * mem_cgroup_cancel_charge() - cancel an uncommitted try_charge() call.
2935 * @memcg: memcg previously charged.
2936 * @nr_pages: number of pages previously charged.
2937 */
2938void mem_cgroup_cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
2939{
2940	if (mem_cgroup_is_root(memcg))
2941		return;
2942
2943	page_counter_uncharge(&memcg->memory, nr_pages);
2944	if (do_memsw_account())
2945		page_counter_uncharge(&memcg->memsw, nr_pages);
2946}
 
2947
2948static void commit_charge(struct folio *folio, struct mem_cgroup *memcg)
2949{
2950	VM_BUG_ON_FOLIO(folio_memcg(folio), folio);
2951	/*
2952	 * Any of the following ensures page's memcg stability:
2953	 *
2954	 * - the page lock
2955	 * - LRU isolation
2956	 * - folio_memcg_lock()
2957	 * - exclusive reference
2958	 * - mem_cgroup_trylock_pages()
2959	 */
2960	folio->memcg_data = (unsigned long)memcg;
2961}
2962
2963/**
2964 * mem_cgroup_commit_charge - commit a previously successful try_charge().
2965 * @folio: folio to commit the charge to.
2966 * @memcg: memcg previously charged.
2967 */
2968void mem_cgroup_commit_charge(struct folio *folio, struct mem_cgroup *memcg)
2969{
2970	css_get(&memcg->css);
2971	commit_charge(folio, memcg);
 
 
 
 
 
 
2972
2973	local_irq_disable();
2974	mem_cgroup_charge_statistics(memcg, folio_nr_pages(folio));
2975	memcg_check_events(memcg, folio_nid(folio));
2976	local_irq_enable();
2977}
2978
2979#ifdef CONFIG_MEMCG_KMEM
2980/*
2981 * The allocated objcg pointers array is not accounted directly.
2982 * Moreover, it should not come from DMA buffer and is not readily
2983 * reclaimable. So those GFP bits should be masked off.
2984 */
2985#define OBJCGS_CLEAR_MASK	(__GFP_DMA | __GFP_RECLAIMABLE | \
2986				 __GFP_ACCOUNT | __GFP_NOFAIL)
2987
2988/*
2989 * mod_objcg_mlstate() may be called with irq enabled, so
2990 * mod_memcg_lruvec_state() should be used.
2991 */
2992static inline void mod_objcg_mlstate(struct obj_cgroup *objcg,
2993				     struct pglist_data *pgdat,
2994				     enum node_stat_item idx, int nr)
2995{
2996	struct mem_cgroup *memcg;
2997	struct lruvec *lruvec;
2998
2999	rcu_read_lock();
3000	memcg = obj_cgroup_memcg(objcg);
3001	lruvec = mem_cgroup_lruvec(memcg, pgdat);
3002	mod_memcg_lruvec_state(lruvec, idx, nr);
3003	rcu_read_unlock();
3004}
3005
3006int memcg_alloc_slab_cgroups(struct slab *slab, struct kmem_cache *s,
3007				 gfp_t gfp, bool new_slab)
3008{
3009	unsigned int objects = objs_per_slab(s, slab);
3010	unsigned long memcg_data;
3011	void *vec;
3012
3013	gfp &= ~OBJCGS_CLEAR_MASK;
3014	vec = kcalloc_node(objects, sizeof(struct obj_cgroup *), gfp,
3015			   slab_nid(slab));
3016	if (!vec)
3017		return -ENOMEM;
3018
3019	memcg_data = (unsigned long) vec | MEMCG_DATA_OBJCGS;
3020	if (new_slab) {
3021		/*
3022		 * If the slab is brand new and nobody can yet access its
3023		 * memcg_data, no synchronization is required and memcg_data can
3024		 * be simply assigned.
3025		 */
3026		slab->memcg_data = memcg_data;
3027	} else if (cmpxchg(&slab->memcg_data, 0, memcg_data)) {
3028		/*
3029		 * If the slab is already in use, somebody can allocate and
3030		 * assign obj_cgroups in parallel. In this case the existing
3031		 * objcg vector should be reused.
3032		 */
3033		kfree(vec);
3034		return 0;
3035	}
3036
3037	kmemleak_not_leak(vec);
3038	return 0;
3039}
3040
3041static __always_inline
3042struct mem_cgroup *mem_cgroup_from_obj_folio(struct folio *folio, void *p)
3043{
3044	/*
3045	 * Slab objects are accounted individually, not per-page.
3046	 * Memcg membership data for each individual object is saved in
3047	 * slab->memcg_data.
3048	 */
3049	if (folio_test_slab(folio)) {
3050		struct obj_cgroup **objcgs;
3051		struct slab *slab;
3052		unsigned int off;
3053
3054		slab = folio_slab(folio);
3055		objcgs = slab_objcgs(slab);
3056		if (!objcgs)
3057			return NULL;
3058
3059		off = obj_to_index(slab->slab_cache, slab, p);
3060		if (objcgs[off])
3061			return obj_cgroup_memcg(objcgs[off]);
3062
3063		return NULL;
3064	}
3065
3066	/*
3067	 * folio_memcg_check() is used here, because in theory we can encounter
3068	 * a folio where the slab flag has been cleared already, but
3069	 * slab->memcg_data has not been freed yet
3070	 * folio_memcg_check() will guarantee that a proper memory
3071	 * cgroup pointer or NULL will be returned.
3072	 */
3073	return folio_memcg_check(folio);
3074}
3075
3076/*
3077 * Returns a pointer to the memory cgroup to which the kernel object is charged.
3078 *
3079 * A passed kernel object can be a slab object, vmalloc object or a generic
3080 * kernel page, so different mechanisms for getting the memory cgroup pointer
3081 * should be used.
3082 *
3083 * In certain cases (e.g. kernel stacks or large kmallocs with SLUB) the caller
3084 * can not know for sure how the kernel object is implemented.
3085 * mem_cgroup_from_obj() can be safely used in such cases.
3086 *
3087 * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(),
3088 * cgroup_mutex, etc.
3089 */
3090struct mem_cgroup *mem_cgroup_from_obj(void *p)
3091{
3092	struct folio *folio;
3093
3094	if (mem_cgroup_disabled())
3095		return NULL;
3096
3097	if (unlikely(is_vmalloc_addr(p)))
3098		folio = page_folio(vmalloc_to_page(p));
3099	else
3100		folio = virt_to_folio(p);
 
 
 
 
 
 
3101
3102	return mem_cgroup_from_obj_folio(folio, p);
3103}
 
 
3104
3105/*
3106 * Returns a pointer to the memory cgroup to which the kernel object is charged.
3107 * Similar to mem_cgroup_from_obj(), but faster and not suitable for objects,
3108 * allocated using vmalloc().
3109 *
3110 * A passed kernel object must be a slab object or a generic kernel page.
3111 *
3112 * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(),
3113 * cgroup_mutex, etc.
3114 */
3115struct mem_cgroup *mem_cgroup_from_slab_obj(void *p)
3116{
3117	if (mem_cgroup_disabled())
3118		return NULL;
 
3119
3120	return mem_cgroup_from_obj_folio(virt_to_folio(p), p);
 
 
 
 
 
 
 
3121}
3122
3123static struct obj_cgroup *__get_obj_cgroup_from_memcg(struct mem_cgroup *memcg)
3124{
3125	struct obj_cgroup *objcg = NULL;
 
3126
3127	for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) {
 
 
 
 
 
 
 
 
 
3128		objcg = rcu_dereference(memcg->objcg);
3129		if (likely(objcg && obj_cgroup_tryget(objcg)))
3130			break;
3131		objcg = NULL;
3132	}
3133	return objcg;
3134}
3135
3136static struct obj_cgroup *current_objcg_update(void)
3137{
3138	struct mem_cgroup *memcg;
3139	struct obj_cgroup *old, *objcg = NULL;
3140
3141	do {
3142		/* Atomically drop the update bit. */
3143		old = xchg(&current->objcg, NULL);
3144		if (old) {
3145			old = (struct obj_cgroup *)
3146				((unsigned long)old & ~CURRENT_OBJCG_UPDATE_FLAG);
3147			if (old)
3148				obj_cgroup_put(old);
3149
3150			old = NULL;
3151		}
3152
3153		/* If new objcg is NULL, no reason for the second atomic update. */
3154		if (!current->mm || (current->flags & PF_KTHREAD))
3155			return NULL;
3156
3157		/*
3158		 * Release the objcg pointer from the previous iteration,
3159		 * if try_cmpxcg() below fails.
3160		 */
3161		if (unlikely(objcg)) {
3162			obj_cgroup_put(objcg);
3163			objcg = NULL;
3164		}
3165
3166		/*
3167		 * Obtain the new objcg pointer. The current task can be
3168		 * asynchronously moved to another memcg and the previous
3169		 * memcg can be offlined. So let's get the memcg pointer
3170		 * and try get a reference to objcg under a rcu read lock.
3171		 */
3172
3173		rcu_read_lock();
3174		memcg = mem_cgroup_from_task(current);
3175		objcg = __get_obj_cgroup_from_memcg(memcg);
3176		rcu_read_unlock();
3177
3178		/*
3179		 * Try set up a new objcg pointer atomically. If it
3180		 * fails, it means the update flag was set concurrently, so
3181		 * the whole procedure should be repeated.
3182		 */
3183	} while (!try_cmpxchg(&current->objcg, &old, objcg));
3184
3185	return objcg;
3186}
3187
3188__always_inline struct obj_cgroup *current_obj_cgroup(void)
3189{
3190	struct mem_cgroup *memcg;
3191	struct obj_cgroup *objcg;
3192
3193	if (in_task()) {
3194		memcg = current->active_memcg;
3195		if (unlikely(memcg))
3196			goto from_memcg;
3197
3198		objcg = READ_ONCE(current->objcg);
3199		if (unlikely((unsigned long)objcg & CURRENT_OBJCG_UPDATE_FLAG))
3200			objcg = current_objcg_update();
3201		/*
3202		 * Objcg reference is kept by the task, so it's safe
3203		 * to use the objcg by the current task.
3204		 */
3205		return objcg;
3206	}
3207
3208	memcg = this_cpu_read(int_active_memcg);
3209	if (unlikely(memcg))
3210		goto from_memcg;
3211
3212	return NULL;
 
 
 
 
3213
3214from_memcg:
3215	objcg = NULL;
3216	for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) {
3217		/*
3218		 * Memcg pointer is protected by scope (see set_active_memcg())
3219		 * and is pinning the corresponding objcg, so objcg can't go
3220		 * away and can be used within the scope without any additional
3221		 * protection.
3222		 */
3223		objcg = rcu_dereference_check(memcg->objcg, 1);
3224		if (likely(objcg))
3225			break;
3226	}
3227
3228	return objcg;
3229}
 
3230
3231struct obj_cgroup *get_obj_cgroup_from_folio(struct folio *folio)
3232{
3233	struct obj_cgroup *objcg;
3234
3235	if (!memcg_kmem_online())
3236		return NULL;
3237
3238	if (folio_memcg_kmem(folio)) {
3239		objcg = __folio_objcg(folio);
3240		obj_cgroup_get(objcg);
3241	} else {
3242		struct mem_cgroup *memcg;
3243
3244		rcu_read_lock();
3245		memcg = __folio_memcg(folio);
3246		if (memcg)
3247			objcg = __get_obj_cgroup_from_memcg(memcg);
3248		else
3249			objcg = NULL;
3250		rcu_read_unlock();
3251	}
3252	return objcg;
3253}
3254
3255static void memcg_account_kmem(struct mem_cgroup *memcg, int nr_pages)
3256{
3257	mod_memcg_state(memcg, MEMCG_KMEM, nr_pages);
3258	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
3259		if (nr_pages > 0)
3260			page_counter_charge(&memcg->kmem, nr_pages);
3261		else
3262			page_counter_uncharge(&memcg->kmem, -nr_pages);
3263	}
3264}
3265
3266
3267/*
3268 * obj_cgroup_uncharge_pages: uncharge a number of kernel pages from a objcg
3269 * @objcg: object cgroup to uncharge
3270 * @nr_pages: number of pages to uncharge
3271 */
3272static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg,
3273				      unsigned int nr_pages)
3274{
3275	struct mem_cgroup *memcg;
3276
3277	memcg = get_mem_cgroup_from_objcg(objcg);
3278
3279	memcg_account_kmem(memcg, -nr_pages);
 
3280	refill_stock(memcg, nr_pages);
3281
3282	css_put(&memcg->css);
3283}
3284
3285/*
3286 * obj_cgroup_charge_pages: charge a number of kernel pages to a objcg
3287 * @objcg: object cgroup to charge
3288 * @gfp: reclaim mode
3289 * @nr_pages: number of pages to charge
3290 *
3291 * Returns 0 on success, an error code on failure.
3292 */
3293static int obj_cgroup_charge_pages(struct obj_cgroup *objcg, gfp_t gfp,
3294				   unsigned int nr_pages)
3295{
 
3296	struct mem_cgroup *memcg;
3297	int ret;
3298
3299	memcg = get_mem_cgroup_from_objcg(objcg);
3300
3301	ret = try_charge_memcg(memcg, gfp, nr_pages);
3302	if (ret)
3303		goto out;
3304
3305	memcg_account_kmem(memcg, nr_pages);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3306out:
3307	css_put(&memcg->css);
3308
3309	return ret;
3310}
3311
3312/**
3313 * __memcg_kmem_charge_page: charge a kmem page to the current memory cgroup
3314 * @page: page to charge
3315 * @gfp: reclaim mode
3316 * @order: allocation order
3317 *
3318 * Returns 0 on success, an error code on failure.
3319 */
3320int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order)
3321{
3322	struct obj_cgroup *objcg;
3323	int ret = 0;
3324
3325	objcg = current_obj_cgroup();
3326	if (objcg) {
3327		ret = obj_cgroup_charge_pages(objcg, gfp, 1 << order);
3328		if (!ret) {
3329			obj_cgroup_get(objcg);
3330			page->memcg_data = (unsigned long)objcg |
3331				MEMCG_DATA_KMEM;
3332			return 0;
3333		}
 
3334	}
3335	return ret;
3336}
3337
3338/**
3339 * __memcg_kmem_uncharge_page: uncharge a kmem page
3340 * @page: page to uncharge
3341 * @order: allocation order
3342 */
3343void __memcg_kmem_uncharge_page(struct page *page, int order)
3344{
3345	struct folio *folio = page_folio(page);
3346	struct obj_cgroup *objcg;
3347	unsigned int nr_pages = 1 << order;
3348
3349	if (!folio_memcg_kmem(folio))
3350		return;
3351
3352	objcg = __folio_objcg(folio);
3353	obj_cgroup_uncharge_pages(objcg, nr_pages);
3354	folio->memcg_data = 0;
3355	obj_cgroup_put(objcg);
3356}
3357
3358void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
3359		     enum node_stat_item idx, int nr)
3360{
3361	struct memcg_stock_pcp *stock;
3362	struct obj_cgroup *old = NULL;
3363	unsigned long flags;
 
3364	int *bytes;
3365
3366	local_lock_irqsave(&memcg_stock.stock_lock, flags);
3367	stock = this_cpu_ptr(&memcg_stock);
3368
3369	/*
3370	 * Save vmstat data in stock and skip vmstat array update unless
3371	 * accumulating over a page of vmstat data or when pgdat or idx
3372	 * changes.
3373	 */
3374	if (READ_ONCE(stock->cached_objcg) != objcg) {
3375		old = drain_obj_stock(stock);
3376		obj_cgroup_get(objcg);
3377		stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
3378				? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
3379		WRITE_ONCE(stock->cached_objcg, objcg);
3380		stock->cached_pgdat = pgdat;
3381	} else if (stock->cached_pgdat != pgdat) {
3382		/* Flush the existing cached vmstat data */
3383		struct pglist_data *oldpg = stock->cached_pgdat;
3384
3385		if (stock->nr_slab_reclaimable_b) {
3386			mod_objcg_mlstate(objcg, oldpg, NR_SLAB_RECLAIMABLE_B,
3387					  stock->nr_slab_reclaimable_b);
3388			stock->nr_slab_reclaimable_b = 0;
3389		}
3390		if (stock->nr_slab_unreclaimable_b) {
3391			mod_objcg_mlstate(objcg, oldpg, NR_SLAB_UNRECLAIMABLE_B,
3392					  stock->nr_slab_unreclaimable_b);
3393			stock->nr_slab_unreclaimable_b = 0;
3394		}
3395		stock->cached_pgdat = pgdat;
3396	}
3397
3398	bytes = (idx == NR_SLAB_RECLAIMABLE_B) ? &stock->nr_slab_reclaimable_b
3399					       : &stock->nr_slab_unreclaimable_b;
3400	/*
3401	 * Even for large object >= PAGE_SIZE, the vmstat data will still be
3402	 * cached locally at least once before pushing it out.
3403	 */
3404	if (!*bytes) {
3405		*bytes = nr;
3406		nr = 0;
3407	} else {
3408		*bytes += nr;
3409		if (abs(*bytes) > PAGE_SIZE) {
3410			nr = *bytes;
3411			*bytes = 0;
3412		} else {
3413			nr = 0;
3414		}
3415	}
3416	if (nr)
3417		mod_objcg_mlstate(objcg, pgdat, idx, nr);
3418
3419	local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
3420	if (old)
3421		obj_cgroup_put(old);
3422}
3423
3424static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
3425{
3426	struct memcg_stock_pcp *stock;
3427	unsigned long flags;
 
3428	bool ret = false;
3429
3430	local_lock_irqsave(&memcg_stock.stock_lock, flags);
3431
3432	stock = this_cpu_ptr(&memcg_stock);
3433	if (objcg == READ_ONCE(stock->cached_objcg) && stock->nr_bytes >= nr_bytes) {
3434		stock->nr_bytes -= nr_bytes;
3435		ret = true;
3436	}
3437
3438	local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
3439
3440	return ret;
3441}
3442
3443static struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock)
3444{
3445	struct obj_cgroup *old = READ_ONCE(stock->cached_objcg);
3446
3447	if (!old)
3448		return NULL;
3449
3450	if (stock->nr_bytes) {
3451		unsigned int nr_pages = stock->nr_bytes >> PAGE_SHIFT;
3452		unsigned int nr_bytes = stock->nr_bytes & (PAGE_SIZE - 1);
3453
3454		if (nr_pages) {
3455			struct mem_cgroup *memcg;
3456
3457			memcg = get_mem_cgroup_from_objcg(old);
3458
3459			memcg_account_kmem(memcg, -nr_pages);
3460			__refill_stock(memcg, nr_pages);
3461
3462			css_put(&memcg->css);
3463		}
3464
3465		/*
3466		 * The leftover is flushed to the centralized per-memcg value.
3467		 * On the next attempt to refill obj stock it will be moved
3468		 * to a per-cpu stock (probably, on an other CPU), see
3469		 * refill_obj_stock().
3470		 *
3471		 * How often it's flushed is a trade-off between the memory
3472		 * limit enforcement accuracy and potential CPU contention,
3473		 * so it might be changed in the future.
3474		 */
3475		atomic_add(nr_bytes, &old->nr_charged_bytes);
3476		stock->nr_bytes = 0;
3477	}
3478
3479	/*
3480	 * Flush the vmstat data in current stock
3481	 */
3482	if (stock->nr_slab_reclaimable_b || stock->nr_slab_unreclaimable_b) {
3483		if (stock->nr_slab_reclaimable_b) {
3484			mod_objcg_mlstate(old, stock->cached_pgdat,
3485					  NR_SLAB_RECLAIMABLE_B,
3486					  stock->nr_slab_reclaimable_b);
3487			stock->nr_slab_reclaimable_b = 0;
3488		}
3489		if (stock->nr_slab_unreclaimable_b) {
3490			mod_objcg_mlstate(old, stock->cached_pgdat,
3491					  NR_SLAB_UNRECLAIMABLE_B,
3492					  stock->nr_slab_unreclaimable_b);
3493			stock->nr_slab_unreclaimable_b = 0;
3494		}
3495		stock->cached_pgdat = NULL;
3496	}
3497
3498	WRITE_ONCE(stock->cached_objcg, NULL);
3499	/*
3500	 * The `old' objects needs to be released by the caller via
3501	 * obj_cgroup_put() outside of memcg_stock_pcp::stock_lock.
3502	 */
3503	return old;
3504}
3505
3506static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
3507				     struct mem_cgroup *root_memcg)
3508{
3509	struct obj_cgroup *objcg = READ_ONCE(stock->cached_objcg);
3510	struct mem_cgroup *memcg;
3511
3512	if (objcg) {
3513		memcg = obj_cgroup_memcg(objcg);
 
 
 
 
 
3514		if (memcg && mem_cgroup_is_descendant(memcg, root_memcg))
3515			return true;
3516	}
3517
3518	return false;
3519}
3520
3521static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
3522			     bool allow_uncharge)
3523{
3524	struct memcg_stock_pcp *stock;
3525	struct obj_cgroup *old = NULL;
3526	unsigned long flags;
 
3527	unsigned int nr_pages = 0;
3528
3529	local_lock_irqsave(&memcg_stock.stock_lock, flags);
3530
3531	stock = this_cpu_ptr(&memcg_stock);
3532	if (READ_ONCE(stock->cached_objcg) != objcg) { /* reset if necessary */
3533		old = drain_obj_stock(stock);
3534		obj_cgroup_get(objcg);
3535		WRITE_ONCE(stock->cached_objcg, objcg);
3536		stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
3537				? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
3538		allow_uncharge = true;	/* Allow uncharge when objcg changes */
3539	}
3540	stock->nr_bytes += nr_bytes;
3541
3542	if (allow_uncharge && (stock->nr_bytes > PAGE_SIZE)) {
3543		nr_pages = stock->nr_bytes >> PAGE_SHIFT;
3544		stock->nr_bytes &= (PAGE_SIZE - 1);
3545	}
3546
3547	local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
3548	if (old)
3549		obj_cgroup_put(old);
3550
3551	if (nr_pages)
3552		obj_cgroup_uncharge_pages(objcg, nr_pages);
3553}
3554
3555int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size)
3556{
3557	unsigned int nr_pages, nr_bytes;
3558	int ret;
3559
3560	if (consume_obj_stock(objcg, size))
3561		return 0;
3562
3563	/*
3564	 * In theory, objcg->nr_charged_bytes can have enough
3565	 * pre-charged bytes to satisfy the allocation. However,
3566	 * flushing objcg->nr_charged_bytes requires two atomic
3567	 * operations, and objcg->nr_charged_bytes can't be big.
3568	 * The shared objcg->nr_charged_bytes can also become a
3569	 * performance bottleneck if all tasks of the same memcg are
3570	 * trying to update it. So it's better to ignore it and try
3571	 * grab some new pages. The stock's nr_bytes will be flushed to
3572	 * objcg->nr_charged_bytes later on when objcg changes.
3573	 *
3574	 * The stock's nr_bytes may contain enough pre-charged bytes
3575	 * to allow one less page from being charged, but we can't rely
3576	 * on the pre-charged bytes not being changed outside of
3577	 * consume_obj_stock() or refill_obj_stock(). So ignore those
3578	 * pre-charged bytes as well when charging pages. To avoid a
3579	 * page uncharge right after a page charge, we set the
3580	 * allow_uncharge flag to false when calling refill_obj_stock()
3581	 * to temporarily allow the pre-charged bytes to exceed the page
3582	 * size limit. The maximum reachable value of the pre-charged
3583	 * bytes is (sizeof(object) + PAGE_SIZE - 2) if there is no data
3584	 * race.
3585	 */
3586	nr_pages = size >> PAGE_SHIFT;
3587	nr_bytes = size & (PAGE_SIZE - 1);
3588
3589	if (nr_bytes)
3590		nr_pages += 1;
3591
3592	ret = obj_cgroup_charge_pages(objcg, gfp, nr_pages);
3593	if (!ret && nr_bytes)
3594		refill_obj_stock(objcg, PAGE_SIZE - nr_bytes, false);
3595
3596	return ret;
3597}
3598
3599void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size)
3600{
3601	refill_obj_stock(objcg, size, true);
3602}
3603
3604#endif /* CONFIG_MEMCG_KMEM */
3605
3606/*
3607 * Because page_memcg(head) is not set on tails, set it now.
3608 */
3609void split_page_memcg(struct page *head, unsigned int nr)
3610{
3611	struct folio *folio = page_folio(head);
3612	struct mem_cgroup *memcg = folio_memcg(folio);
3613	int i;
3614
3615	if (mem_cgroup_disabled() || !memcg)
3616		return;
3617
3618	for (i = 1; i < nr; i++)
3619		folio_page(folio, i)->memcg_data = folio->memcg_data;
3620
3621	if (folio_memcg_kmem(folio))
3622		obj_cgroup_get_many(__folio_objcg(folio), nr - 1);
3623	else
3624		css_get_many(&memcg->css, nr - 1);
3625}
3626
3627#ifdef CONFIG_SWAP
3628/**
3629 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
3630 * @entry: swap entry to be moved
3631 * @from:  mem_cgroup which the entry is moved from
3632 * @to:  mem_cgroup which the entry is moved to
3633 *
3634 * It succeeds only when the swap_cgroup's record for this entry is the same
3635 * as the mem_cgroup's id of @from.
3636 *
3637 * Returns 0 on success, -EINVAL on failure.
3638 *
3639 * The caller must have charged to @to, IOW, called page_counter_charge() about
3640 * both res and memsw, and called css_get().
3641 */
3642static int mem_cgroup_move_swap_account(swp_entry_t entry,
3643				struct mem_cgroup *from, struct mem_cgroup *to)
3644{
3645	unsigned short old_id, new_id;
3646
3647	old_id = mem_cgroup_id(from);
3648	new_id = mem_cgroup_id(to);
3649
3650	if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
3651		mod_memcg_state(from, MEMCG_SWAP, -1);
3652		mod_memcg_state(to, MEMCG_SWAP, 1);
3653		return 0;
3654	}
3655	return -EINVAL;
3656}
3657#else
3658static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
3659				struct mem_cgroup *from, struct mem_cgroup *to)
3660{
3661	return -EINVAL;
3662}
3663#endif
3664
3665static DEFINE_MUTEX(memcg_max_mutex);
3666
3667static int mem_cgroup_resize_max(struct mem_cgroup *memcg,
3668				 unsigned long max, bool memsw)
3669{
3670	bool enlarge = false;
3671	bool drained = false;
3672	int ret;
3673	bool limits_invariant;
3674	struct page_counter *counter = memsw ? &memcg->memsw : &memcg->memory;
3675
3676	do {
3677		if (signal_pending(current)) {
3678			ret = -EINTR;
3679			break;
3680		}
3681
3682		mutex_lock(&memcg_max_mutex);
3683		/*
3684		 * Make sure that the new limit (memsw or memory limit) doesn't
3685		 * break our basic invariant rule memory.max <= memsw.max.
3686		 */
3687		limits_invariant = memsw ? max >= READ_ONCE(memcg->memory.max) :
3688					   max <= memcg->memsw.max;
3689		if (!limits_invariant) {
3690			mutex_unlock(&memcg_max_mutex);
3691			ret = -EINVAL;
3692			break;
3693		}
3694		if (max > counter->max)
3695			enlarge = true;
3696		ret = page_counter_set_max(counter, max);
3697		mutex_unlock(&memcg_max_mutex);
3698
3699		if (!ret)
3700			break;
3701
3702		if (!drained) {
3703			drain_all_stock(memcg);
3704			drained = true;
3705			continue;
3706		}
3707
3708		if (!try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL,
3709					memsw ? 0 : MEMCG_RECLAIM_MAY_SWAP)) {
3710			ret = -EBUSY;
3711			break;
3712		}
3713	} while (true);
3714
3715	if (!ret && enlarge)
3716		memcg_oom_recover(memcg);
3717
3718	return ret;
3719}
3720
3721unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
3722					    gfp_t gfp_mask,
3723					    unsigned long *total_scanned)
3724{
3725	unsigned long nr_reclaimed = 0;
3726	struct mem_cgroup_per_node *mz, *next_mz = NULL;
3727	unsigned long reclaimed;
3728	int loop = 0;
3729	struct mem_cgroup_tree_per_node *mctz;
3730	unsigned long excess;
3731
3732	if (lru_gen_enabled())
3733		return 0;
3734
3735	if (order > 0)
3736		return 0;
3737
3738	mctz = soft_limit_tree.rb_tree_per_node[pgdat->node_id];
3739
3740	/*
3741	 * Do not even bother to check the largest node if the root
3742	 * is empty. Do it lockless to prevent lock bouncing. Races
3743	 * are acceptable as soft limit is best effort anyway.
3744	 */
3745	if (!mctz || RB_EMPTY_ROOT(&mctz->rb_root))
3746		return 0;
3747
3748	/*
3749	 * This loop can run a while, specially if mem_cgroup's continuously
3750	 * keep exceeding their soft limit and putting the system under
3751	 * pressure
3752	 */
3753	do {
3754		if (next_mz)
3755			mz = next_mz;
3756		else
3757			mz = mem_cgroup_largest_soft_limit_node(mctz);
3758		if (!mz)
3759			break;
3760
 
3761		reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat,
3762						    gfp_mask, total_scanned);
3763		nr_reclaimed += reclaimed;
 
3764		spin_lock_irq(&mctz->lock);
 
3765
3766		/*
3767		 * If we failed to reclaim anything from this memory cgroup
3768		 * it is time to move on to the next cgroup
3769		 */
3770		next_mz = NULL;
3771		if (!reclaimed)
3772			next_mz = __mem_cgroup_largest_soft_limit_node(mctz);
3773
3774		excess = soft_limit_excess(mz->memcg);
3775		/*
3776		 * One school of thought says that we should not add
3777		 * back the node to the tree if reclaim returns 0.
3778		 * But our reclaim could return 0, simply because due
3779		 * to priority we are exposing a smaller subset of
3780		 * memory to reclaim from. Consider this as a longer
3781		 * term TODO.
3782		 */
3783		/* If excess == 0, no tree ops */
3784		__mem_cgroup_insert_exceeded(mz, mctz, excess);
3785		spin_unlock_irq(&mctz->lock);
3786		css_put(&mz->memcg->css);
3787		loop++;
3788		/*
3789		 * Could not reclaim anything and there are no more
3790		 * mem cgroups to try or we seem to be looping without
3791		 * reclaiming anything.
3792		 */
3793		if (!nr_reclaimed &&
3794			(next_mz == NULL ||
3795			loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
3796			break;
3797	} while (!nr_reclaimed);
3798	if (next_mz)
3799		css_put(&next_mz->memcg->css);
3800	return nr_reclaimed;
3801}
3802
3803/*
3804 * Reclaims as many pages from the given memcg as possible.
3805 *
3806 * Caller is responsible for holding css reference for memcg.
3807 */
3808static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
3809{
3810	int nr_retries = MAX_RECLAIM_RETRIES;
3811
3812	/* we call try-to-free pages for make this cgroup empty */
3813	lru_add_drain_all();
3814
3815	drain_all_stock(memcg);
3816
3817	/* try to free all pages in this cgroup */
3818	while (nr_retries && page_counter_read(&memcg->memory)) {
 
 
3819		if (signal_pending(current))
3820			return -EINTR;
3821
3822		if (!try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL,
3823						  MEMCG_RECLAIM_MAY_SWAP))
 
3824			nr_retries--;
 
 
 
 
3825	}
3826
3827	return 0;
3828}
3829
3830static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of,
3831					    char *buf, size_t nbytes,
3832					    loff_t off)
3833{
3834	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3835
3836	if (mem_cgroup_is_root(memcg))
3837		return -EINVAL;
3838	return mem_cgroup_force_empty(memcg) ?: nbytes;
3839}
3840
3841static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css,
3842				     struct cftype *cft)
3843{
3844	return 1;
3845}
3846
3847static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
3848				      struct cftype *cft, u64 val)
3849{
3850	if (val == 1)
3851		return 0;
3852
3853	pr_warn_once("Non-hierarchical mode is deprecated. "
3854		     "Please report your usecase to linux-mm@kvack.org if you "
3855		     "depend on this functionality.\n");
3856
3857	return -EINVAL;
3858}
3859
3860static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
3861{
3862	unsigned long val;
3863
3864	if (mem_cgroup_is_root(memcg)) {
3865		/*
3866		 * Approximate root's usage from global state. This isn't
3867		 * perfect, but the root usage was always an approximation.
3868		 */
3869		val = global_node_page_state(NR_FILE_PAGES) +
3870			global_node_page_state(NR_ANON_MAPPED);
3871		if (swap)
3872			val += total_swap_pages - get_nr_swap_pages();
3873	} else {
3874		if (!swap)
3875			val = page_counter_read(&memcg->memory);
3876		else
3877			val = page_counter_read(&memcg->memsw);
3878	}
3879	return val;
3880}
3881
3882enum {
3883	RES_USAGE,
3884	RES_LIMIT,
3885	RES_MAX_USAGE,
3886	RES_FAILCNT,
3887	RES_SOFT_LIMIT,
3888};
3889
3890static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
3891			       struct cftype *cft)
3892{
3893	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3894	struct page_counter *counter;
3895
3896	switch (MEMFILE_TYPE(cft->private)) {
3897	case _MEM:
3898		counter = &memcg->memory;
3899		break;
3900	case _MEMSWAP:
3901		counter = &memcg->memsw;
3902		break;
3903	case _KMEM:
3904		counter = &memcg->kmem;
3905		break;
3906	case _TCP:
3907		counter = &memcg->tcpmem;
3908		break;
3909	default:
3910		BUG();
3911	}
3912
3913	switch (MEMFILE_ATTR(cft->private)) {
3914	case RES_USAGE:
3915		if (counter == &memcg->memory)
3916			return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE;
3917		if (counter == &memcg->memsw)
3918			return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE;
3919		return (u64)page_counter_read(counter) * PAGE_SIZE;
3920	case RES_LIMIT:
3921		return (u64)counter->max * PAGE_SIZE;
3922	case RES_MAX_USAGE:
3923		return (u64)counter->watermark * PAGE_SIZE;
3924	case RES_FAILCNT:
3925		return counter->failcnt;
3926	case RES_SOFT_LIMIT:
3927		return (u64)READ_ONCE(memcg->soft_limit) * PAGE_SIZE;
3928	default:
3929		BUG();
3930	}
3931}
3932
3933/*
3934 * This function doesn't do anything useful. Its only job is to provide a read
3935 * handler for a file so that cgroup_file_mode() will add read permissions.
3936 */
3937static int mem_cgroup_dummy_seq_show(__always_unused struct seq_file *m,
3938				     __always_unused void *v)
3939{
3940	return -EINVAL;
3941}
3942
3943#ifdef CONFIG_MEMCG_KMEM
3944static int memcg_online_kmem(struct mem_cgroup *memcg)
3945{
3946	struct obj_cgroup *objcg;
 
3947
3948	if (mem_cgroup_kmem_disabled())
3949		return 0;
3950
3951	if (unlikely(mem_cgroup_is_root(memcg)))
3952		return 0;
 
 
 
 
3953
3954	objcg = obj_cgroup_alloc();
3955	if (!objcg)
 
3956		return -ENOMEM;
3957
3958	objcg->memcg = memcg;
3959	rcu_assign_pointer(memcg->objcg, objcg);
3960	obj_cgroup_get(objcg);
3961	memcg->orig_objcg = objcg;
3962
3963	static_branch_enable(&memcg_kmem_online_key);
3964
3965	memcg->kmemcg_id = memcg->id.id;
 
3966
3967	return 0;
3968}
3969
3970static void memcg_offline_kmem(struct mem_cgroup *memcg)
3971{
3972	struct mem_cgroup *parent;
 
 
3973
3974	if (mem_cgroup_kmem_disabled())
3975		return;
3976
3977	if (unlikely(mem_cgroup_is_root(memcg)))
3978		return;
3979
3980	parent = parent_mem_cgroup(memcg);
3981	if (!parent)
3982		parent = root_mem_cgroup;
3983
3984	memcg_reparent_objcgs(memcg, parent);
3985
 
 
 
3986	/*
3987	 * After we have finished memcg_reparent_objcgs(), all list_lrus
3988	 * corresponding to this cgroup are guaranteed to remain empty.
3989	 * The ordering is imposed by list_lru_node->lock taken by
3990	 * memcg_reparent_list_lrus().
3991	 */
3992	memcg_reparent_list_lrus(memcg, parent);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3993}
3994#else
3995static int memcg_online_kmem(struct mem_cgroup *memcg)
3996{
3997	return 0;
3998}
3999static void memcg_offline_kmem(struct mem_cgroup *memcg)
4000{
4001}
 
 
 
4002#endif /* CONFIG_MEMCG_KMEM */
4003
 
 
 
 
 
 
 
 
 
 
 
4004static int memcg_update_tcp_max(struct mem_cgroup *memcg, unsigned long max)
4005{
4006	int ret;
4007
4008	mutex_lock(&memcg_max_mutex);
4009
4010	ret = page_counter_set_max(&memcg->tcpmem, max);
4011	if (ret)
4012		goto out;
4013
4014	if (!memcg->tcpmem_active) {
4015		/*
4016		 * The active flag needs to be written after the static_key
4017		 * update. This is what guarantees that the socket activation
4018		 * function is the last one to run. See mem_cgroup_sk_alloc()
4019		 * for details, and note that we don't mark any socket as
4020		 * belonging to this memcg until that flag is up.
4021		 *
4022		 * We need to do this, because static_keys will span multiple
4023		 * sites, but we can't control their order. If we mark a socket
4024		 * as accounted, but the accounting functions are not patched in
4025		 * yet, we'll lose accounting.
4026		 *
4027		 * We never race with the readers in mem_cgroup_sk_alloc(),
4028		 * because when this value change, the code to process it is not
4029		 * patched in yet.
4030		 */
4031		static_branch_inc(&memcg_sockets_enabled_key);
4032		memcg->tcpmem_active = true;
4033	}
4034out:
4035	mutex_unlock(&memcg_max_mutex);
4036	return ret;
4037}
4038
4039/*
4040 * The user of this function is...
4041 * RES_LIMIT.
4042 */
4043static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
4044				char *buf, size_t nbytes, loff_t off)
4045{
4046	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4047	unsigned long nr_pages;
4048	int ret;
4049
4050	buf = strstrip(buf);
4051	ret = page_counter_memparse(buf, "-1", &nr_pages);
4052	if (ret)
4053		return ret;
4054
4055	switch (MEMFILE_ATTR(of_cft(of)->private)) {
4056	case RES_LIMIT:
4057		if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
4058			ret = -EINVAL;
4059			break;
4060		}
4061		switch (MEMFILE_TYPE(of_cft(of)->private)) {
4062		case _MEM:
4063			ret = mem_cgroup_resize_max(memcg, nr_pages, false);
4064			break;
4065		case _MEMSWAP:
4066			ret = mem_cgroup_resize_max(memcg, nr_pages, true);
4067			break;
4068		case _KMEM:
4069			pr_warn_once("kmem.limit_in_bytes is deprecated and will be removed. "
4070				     "Writing any value to this file has no effect. "
4071				     "Please report your usecase to linux-mm@kvack.org if you "
4072				     "depend on this functionality.\n");
4073			ret = 0;
4074			break;
4075		case _TCP:
4076			ret = memcg_update_tcp_max(memcg, nr_pages);
4077			break;
4078		}
4079		break;
4080	case RES_SOFT_LIMIT:
4081		if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
4082			ret = -EOPNOTSUPP;
4083		} else {
4084			WRITE_ONCE(memcg->soft_limit, nr_pages);
4085			ret = 0;
4086		}
4087		break;
4088	}
4089	return ret ?: nbytes;
4090}
4091
4092static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf,
4093				size_t nbytes, loff_t off)
4094{
4095	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4096	struct page_counter *counter;
4097
4098	switch (MEMFILE_TYPE(of_cft(of)->private)) {
4099	case _MEM:
4100		counter = &memcg->memory;
4101		break;
4102	case _MEMSWAP:
4103		counter = &memcg->memsw;
4104		break;
4105	case _KMEM:
4106		counter = &memcg->kmem;
4107		break;
4108	case _TCP:
4109		counter = &memcg->tcpmem;
4110		break;
4111	default:
4112		BUG();
4113	}
4114
4115	switch (MEMFILE_ATTR(of_cft(of)->private)) {
4116	case RES_MAX_USAGE:
4117		page_counter_reset_watermark(counter);
4118		break;
4119	case RES_FAILCNT:
4120		counter->failcnt = 0;
4121		break;
4122	default:
4123		BUG();
4124	}
4125
4126	return nbytes;
4127}
4128
4129static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css,
4130					struct cftype *cft)
4131{
4132	return mem_cgroup_from_css(css)->move_charge_at_immigrate;
4133}
4134
4135#ifdef CONFIG_MMU
4136static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
4137					struct cftype *cft, u64 val)
4138{
4139	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4140
4141	pr_warn_once("Cgroup memory moving (move_charge_at_immigrate) is deprecated. "
4142		     "Please report your usecase to linux-mm@kvack.org if you "
4143		     "depend on this functionality.\n");
4144
4145	if (val & ~MOVE_MASK)
4146		return -EINVAL;
4147
4148	/*
4149	 * No kind of locking is needed in here, because ->can_attach() will
4150	 * check this value once in the beginning of the process, and then carry
4151	 * on with stale data. This means that changes to this value will only
4152	 * affect task migrations starting after the change.
4153	 */
4154	memcg->move_charge_at_immigrate = val;
4155	return 0;
4156}
4157#else
4158static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
4159					struct cftype *cft, u64 val)
4160{
4161	return -ENOSYS;
4162}
4163#endif
4164
4165#ifdef CONFIG_NUMA
4166
4167#define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE))
4168#define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
4169#define LRU_ALL	     ((1 << NR_LRU_LISTS) - 1)
4170
4171static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
4172				int nid, unsigned int lru_mask, bool tree)
4173{
4174	struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
4175	unsigned long nr = 0;
4176	enum lru_list lru;
4177
4178	VM_BUG_ON((unsigned)nid >= nr_node_ids);
4179
4180	for_each_lru(lru) {
4181		if (!(BIT(lru) & lru_mask))
4182			continue;
4183		if (tree)
4184			nr += lruvec_page_state(lruvec, NR_LRU_BASE + lru);
4185		else
4186			nr += lruvec_page_state_local(lruvec, NR_LRU_BASE + lru);
4187	}
4188	return nr;
4189}
4190
4191static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
4192					     unsigned int lru_mask,
4193					     bool tree)
4194{
4195	unsigned long nr = 0;
4196	enum lru_list lru;
4197
4198	for_each_lru(lru) {
4199		if (!(BIT(lru) & lru_mask))
4200			continue;
4201		if (tree)
4202			nr += memcg_page_state(memcg, NR_LRU_BASE + lru);
4203		else
4204			nr += memcg_page_state_local(memcg, NR_LRU_BASE + lru);
4205	}
4206	return nr;
4207}
4208
4209static int memcg_numa_stat_show(struct seq_file *m, void *v)
4210{
4211	struct numa_stat {
4212		const char *name;
4213		unsigned int lru_mask;
4214	};
4215
4216	static const struct numa_stat stats[] = {
4217		{ "total", LRU_ALL },
4218		{ "file", LRU_ALL_FILE },
4219		{ "anon", LRU_ALL_ANON },
4220		{ "unevictable", BIT(LRU_UNEVICTABLE) },
4221	};
4222	const struct numa_stat *stat;
4223	int nid;
4224	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4225
4226	mem_cgroup_flush_stats(memcg);
4227
4228	for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
4229		seq_printf(m, "%s=%lu", stat->name,
4230			   mem_cgroup_nr_lru_pages(memcg, stat->lru_mask,
4231						   false));
4232		for_each_node_state(nid, N_MEMORY)
4233			seq_printf(m, " N%d=%lu", nid,
4234				   mem_cgroup_node_nr_lru_pages(memcg, nid,
4235							stat->lru_mask, false));
4236		seq_putc(m, '\n');
4237	}
4238
4239	for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
4240
4241		seq_printf(m, "hierarchical_%s=%lu", stat->name,
4242			   mem_cgroup_nr_lru_pages(memcg, stat->lru_mask,
4243						   true));
4244		for_each_node_state(nid, N_MEMORY)
4245			seq_printf(m, " N%d=%lu", nid,
4246				   mem_cgroup_node_nr_lru_pages(memcg, nid,
4247							stat->lru_mask, true));
4248		seq_putc(m, '\n');
4249	}
4250
4251	return 0;
4252}
4253#endif /* CONFIG_NUMA */
4254
4255static const unsigned int memcg1_stats[] = {
4256	NR_FILE_PAGES,
4257	NR_ANON_MAPPED,
4258#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4259	NR_ANON_THPS,
4260#endif
4261	NR_SHMEM,
4262	NR_FILE_MAPPED,
4263	NR_FILE_DIRTY,
4264	NR_WRITEBACK,
4265	WORKINGSET_REFAULT_ANON,
4266	WORKINGSET_REFAULT_FILE,
4267#ifdef CONFIG_SWAP
4268	MEMCG_SWAP,
4269	NR_SWAPCACHE,
4270#endif
4271};
4272
4273static const char *const memcg1_stat_names[] = {
4274	"cache",
4275	"rss",
4276#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4277	"rss_huge",
4278#endif
4279	"shmem",
4280	"mapped_file",
4281	"dirty",
4282	"writeback",
4283	"workingset_refault_anon",
4284	"workingset_refault_file",
4285#ifdef CONFIG_SWAP
4286	"swap",
4287	"swapcached",
4288#endif
4289};
4290
4291/* Universal VM events cgroup1 shows, original sort order */
4292static const unsigned int memcg1_events[] = {
4293	PGPGIN,
4294	PGPGOUT,
4295	PGFAULT,
4296	PGMAJFAULT,
4297};
4298
4299static void memcg1_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
4300{
 
4301	unsigned long memory, memsw;
4302	struct mem_cgroup *mi;
4303	unsigned int i;
4304
4305	BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names) != ARRAY_SIZE(memcg1_stats));
4306
4307	mem_cgroup_flush_stats(memcg);
4308
4309	for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
4310		unsigned long nr;
4311
4312		nr = memcg_page_state_local_output(memcg, memcg1_stats[i]);
4313		seq_buf_printf(s, "%s %lu\n", memcg1_stat_names[i], nr);
 
 
4314	}
4315
4316	for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
4317		seq_buf_printf(s, "%s %lu\n", vm_event_name(memcg1_events[i]),
4318			       memcg_events_local(memcg, memcg1_events[i]));
4319
4320	for (i = 0; i < NR_LRU_LISTS; i++)
4321		seq_buf_printf(s, "%s %lu\n", lru_list_name(i),
4322			       memcg_page_state_local(memcg, NR_LRU_BASE + i) *
4323			       PAGE_SIZE);
4324
4325	/* Hierarchical information */
4326	memory = memsw = PAGE_COUNTER_MAX;
4327	for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) {
4328		memory = min(memory, READ_ONCE(mi->memory.max));
4329		memsw = min(memsw, READ_ONCE(mi->memsw.max));
4330	}
4331	seq_buf_printf(s, "hierarchical_memory_limit %llu\n",
4332		       (u64)memory * PAGE_SIZE);
4333	seq_buf_printf(s, "hierarchical_memsw_limit %llu\n",
4334		       (u64)memsw * PAGE_SIZE);
 
4335
4336	for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
4337		unsigned long nr;
4338
4339		nr = memcg_page_state_output(memcg, memcg1_stats[i]);
4340		seq_buf_printf(s, "total_%s %llu\n", memcg1_stat_names[i],
4341			       (u64)nr);
 
 
4342	}
4343
4344	for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
4345		seq_buf_printf(s, "total_%s %llu\n",
4346			       vm_event_name(memcg1_events[i]),
4347			       (u64)memcg_events(memcg, memcg1_events[i]));
4348
4349	for (i = 0; i < NR_LRU_LISTS; i++)
4350		seq_buf_printf(s, "total_%s %llu\n", lru_list_name(i),
4351			       (u64)memcg_page_state(memcg, NR_LRU_BASE + i) *
4352			       PAGE_SIZE);
4353
4354#ifdef CONFIG_DEBUG_VM
4355	{
4356		pg_data_t *pgdat;
4357		struct mem_cgroup_per_node *mz;
4358		unsigned long anon_cost = 0;
4359		unsigned long file_cost = 0;
4360
4361		for_each_online_pgdat(pgdat) {
4362			mz = memcg->nodeinfo[pgdat->node_id];
4363
4364			anon_cost += mz->lruvec.anon_cost;
4365			file_cost += mz->lruvec.file_cost;
4366		}
4367		seq_buf_printf(s, "anon_cost %lu\n", anon_cost);
4368		seq_buf_printf(s, "file_cost %lu\n", file_cost);
4369	}
4370#endif
 
 
4371}
4372
4373static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css,
4374				      struct cftype *cft)
4375{
4376	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4377
4378	return mem_cgroup_swappiness(memcg);
4379}
4380
4381static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
4382				       struct cftype *cft, u64 val)
4383{
4384	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4385
4386	if (val > 200)
4387		return -EINVAL;
4388
4389	if (!mem_cgroup_is_root(memcg))
4390		WRITE_ONCE(memcg->swappiness, val);
4391	else
4392		WRITE_ONCE(vm_swappiness, val);
4393
4394	return 0;
4395}
4396
4397static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
4398{
4399	struct mem_cgroup_threshold_ary *t;
4400	unsigned long usage;
4401	int i;
4402
4403	rcu_read_lock();
4404	if (!swap)
4405		t = rcu_dereference(memcg->thresholds.primary);
4406	else
4407		t = rcu_dereference(memcg->memsw_thresholds.primary);
4408
4409	if (!t)
4410		goto unlock;
4411
4412	usage = mem_cgroup_usage(memcg, swap);
4413
4414	/*
4415	 * current_threshold points to threshold just below or equal to usage.
4416	 * If it's not true, a threshold was crossed after last
4417	 * call of __mem_cgroup_threshold().
4418	 */
4419	i = t->current_threshold;
4420
4421	/*
4422	 * Iterate backward over array of thresholds starting from
4423	 * current_threshold and check if a threshold is crossed.
4424	 * If none of thresholds below usage is crossed, we read
4425	 * only one element of the array here.
4426	 */
4427	for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
4428		eventfd_signal(t->entries[i].eventfd);
4429
4430	/* i = current_threshold + 1 */
4431	i++;
4432
4433	/*
4434	 * Iterate forward over array of thresholds starting from
4435	 * current_threshold+1 and check if a threshold is crossed.
4436	 * If none of thresholds above usage is crossed, we read
4437	 * only one element of the array here.
4438	 */
4439	for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
4440		eventfd_signal(t->entries[i].eventfd);
4441
4442	/* Update current_threshold */
4443	t->current_threshold = i - 1;
4444unlock:
4445	rcu_read_unlock();
4446}
4447
4448static void mem_cgroup_threshold(struct mem_cgroup *memcg)
4449{
4450	while (memcg) {
4451		__mem_cgroup_threshold(memcg, false);
4452		if (do_memsw_account())
4453			__mem_cgroup_threshold(memcg, true);
4454
4455		memcg = parent_mem_cgroup(memcg);
4456	}
4457}
4458
4459static int compare_thresholds(const void *a, const void *b)
4460{
4461	const struct mem_cgroup_threshold *_a = a;
4462	const struct mem_cgroup_threshold *_b = b;
4463
4464	if (_a->threshold > _b->threshold)
4465		return 1;
4466
4467	if (_a->threshold < _b->threshold)
4468		return -1;
4469
4470	return 0;
4471}
4472
4473static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
4474{
4475	struct mem_cgroup_eventfd_list *ev;
4476
4477	spin_lock(&memcg_oom_lock);
4478
4479	list_for_each_entry(ev, &memcg->oom_notify, list)
4480		eventfd_signal(ev->eventfd);
4481
4482	spin_unlock(&memcg_oom_lock);
4483	return 0;
4484}
4485
4486static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
4487{
4488	struct mem_cgroup *iter;
4489
4490	for_each_mem_cgroup_tree(iter, memcg)
4491		mem_cgroup_oom_notify_cb(iter);
4492}
4493
4494static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
4495	struct eventfd_ctx *eventfd, const char *args, enum res_type type)
4496{
4497	struct mem_cgroup_thresholds *thresholds;
4498	struct mem_cgroup_threshold_ary *new;
4499	unsigned long threshold;
4500	unsigned long usage;
4501	int i, size, ret;
4502
4503	ret = page_counter_memparse(args, "-1", &threshold);
4504	if (ret)
4505		return ret;
4506
4507	mutex_lock(&memcg->thresholds_lock);
4508
4509	if (type == _MEM) {
4510		thresholds = &memcg->thresholds;
4511		usage = mem_cgroup_usage(memcg, false);
4512	} else if (type == _MEMSWAP) {
4513		thresholds = &memcg->memsw_thresholds;
4514		usage = mem_cgroup_usage(memcg, true);
4515	} else
4516		BUG();
4517
4518	/* Check if a threshold crossed before adding a new one */
4519	if (thresholds->primary)
4520		__mem_cgroup_threshold(memcg, type == _MEMSWAP);
4521
4522	size = thresholds->primary ? thresholds->primary->size + 1 : 1;
4523
4524	/* Allocate memory for new array of thresholds */
4525	new = kmalloc(struct_size(new, entries, size), GFP_KERNEL);
4526	if (!new) {
4527		ret = -ENOMEM;
4528		goto unlock;
4529	}
4530	new->size = size;
4531
4532	/* Copy thresholds (if any) to new array */
4533	if (thresholds->primary)
4534		memcpy(new->entries, thresholds->primary->entries,
4535		       flex_array_size(new, entries, size - 1));
4536
4537	/* Add new threshold */
4538	new->entries[size - 1].eventfd = eventfd;
4539	new->entries[size - 1].threshold = threshold;
4540
4541	/* Sort thresholds. Registering of new threshold isn't time-critical */
4542	sort(new->entries, size, sizeof(*new->entries),
4543			compare_thresholds, NULL);
4544
4545	/* Find current threshold */
4546	new->current_threshold = -1;
4547	for (i = 0; i < size; i++) {
4548		if (new->entries[i].threshold <= usage) {
4549			/*
4550			 * new->current_threshold will not be used until
4551			 * rcu_assign_pointer(), so it's safe to increment
4552			 * it here.
4553			 */
4554			++new->current_threshold;
4555		} else
4556			break;
4557	}
4558
4559	/* Free old spare buffer and save old primary buffer as spare */
4560	kfree(thresholds->spare);
4561	thresholds->spare = thresholds->primary;
4562
4563	rcu_assign_pointer(thresholds->primary, new);
4564
4565	/* To be sure that nobody uses thresholds */
4566	synchronize_rcu();
4567
4568unlock:
4569	mutex_unlock(&memcg->thresholds_lock);
4570
4571	return ret;
4572}
4573
4574static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
4575	struct eventfd_ctx *eventfd, const char *args)
4576{
4577	return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM);
4578}
4579
4580static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg,
4581	struct eventfd_ctx *eventfd, const char *args)
4582{
4583	return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP);
4584}
4585
4586static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4587	struct eventfd_ctx *eventfd, enum res_type type)
4588{
4589	struct mem_cgroup_thresholds *thresholds;
4590	struct mem_cgroup_threshold_ary *new;
4591	unsigned long usage;
4592	int i, j, size, entries;
4593
4594	mutex_lock(&memcg->thresholds_lock);
4595
4596	if (type == _MEM) {
4597		thresholds = &memcg->thresholds;
4598		usage = mem_cgroup_usage(memcg, false);
4599	} else if (type == _MEMSWAP) {
4600		thresholds = &memcg->memsw_thresholds;
4601		usage = mem_cgroup_usage(memcg, true);
4602	} else
4603		BUG();
4604
4605	if (!thresholds->primary)
4606		goto unlock;
4607
4608	/* Check if a threshold crossed before removing */
4609	__mem_cgroup_threshold(memcg, type == _MEMSWAP);
4610
4611	/* Calculate new number of threshold */
4612	size = entries = 0;
4613	for (i = 0; i < thresholds->primary->size; i++) {
4614		if (thresholds->primary->entries[i].eventfd != eventfd)
4615			size++;
4616		else
4617			entries++;
4618	}
4619
4620	new = thresholds->spare;
4621
4622	/* If no items related to eventfd have been cleared, nothing to do */
4623	if (!entries)
4624		goto unlock;
4625
4626	/* Set thresholds array to NULL if we don't have thresholds */
4627	if (!size) {
4628		kfree(new);
4629		new = NULL;
4630		goto swap_buffers;
4631	}
4632
4633	new->size = size;
4634
4635	/* Copy thresholds and find current threshold */
4636	new->current_threshold = -1;
4637	for (i = 0, j = 0; i < thresholds->primary->size; i++) {
4638		if (thresholds->primary->entries[i].eventfd == eventfd)
4639			continue;
4640
4641		new->entries[j] = thresholds->primary->entries[i];
4642		if (new->entries[j].threshold <= usage) {
4643			/*
4644			 * new->current_threshold will not be used
4645			 * until rcu_assign_pointer(), so it's safe to increment
4646			 * it here.
4647			 */
4648			++new->current_threshold;
4649		}
4650		j++;
4651	}
4652
4653swap_buffers:
4654	/* Swap primary and spare array */
4655	thresholds->spare = thresholds->primary;
4656
4657	rcu_assign_pointer(thresholds->primary, new);
4658
4659	/* To be sure that nobody uses thresholds */
4660	synchronize_rcu();
4661
4662	/* If all events are unregistered, free the spare array */
4663	if (!new) {
4664		kfree(thresholds->spare);
4665		thresholds->spare = NULL;
4666	}
4667unlock:
4668	mutex_unlock(&memcg->thresholds_lock);
4669}
4670
4671static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4672	struct eventfd_ctx *eventfd)
4673{
4674	return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM);
4675}
4676
4677static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4678	struct eventfd_ctx *eventfd)
4679{
4680	return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP);
4681}
4682
4683static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg,
4684	struct eventfd_ctx *eventfd, const char *args)
4685{
4686	struct mem_cgroup_eventfd_list *event;
4687
4688	event = kmalloc(sizeof(*event),	GFP_KERNEL);
4689	if (!event)
4690		return -ENOMEM;
4691
4692	spin_lock(&memcg_oom_lock);
4693
4694	event->eventfd = eventfd;
4695	list_add(&event->list, &memcg->oom_notify);
4696
4697	/* already in OOM ? */
4698	if (memcg->under_oom)
4699		eventfd_signal(eventfd);
4700	spin_unlock(&memcg_oom_lock);
4701
4702	return 0;
4703}
4704
4705static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg,
4706	struct eventfd_ctx *eventfd)
4707{
4708	struct mem_cgroup_eventfd_list *ev, *tmp;
4709
4710	spin_lock(&memcg_oom_lock);
4711
4712	list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
4713		if (ev->eventfd == eventfd) {
4714			list_del(&ev->list);
4715			kfree(ev);
4716		}
4717	}
4718
4719	spin_unlock(&memcg_oom_lock);
4720}
4721
4722static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v)
4723{
4724	struct mem_cgroup *memcg = mem_cgroup_from_seq(sf);
4725
4726	seq_printf(sf, "oom_kill_disable %d\n", READ_ONCE(memcg->oom_kill_disable));
4727	seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom);
4728	seq_printf(sf, "oom_kill %lu\n",
4729		   atomic_long_read(&memcg->memory_events[MEMCG_OOM_KILL]));
4730	return 0;
4731}
4732
4733static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
4734	struct cftype *cft, u64 val)
4735{
4736	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4737
4738	/* cannot set to root cgroup and only 0 and 1 are allowed */
4739	if (mem_cgroup_is_root(memcg) || !((val == 0) || (val == 1)))
4740		return -EINVAL;
4741
4742	WRITE_ONCE(memcg->oom_kill_disable, val);
4743	if (!val)
4744		memcg_oom_recover(memcg);
4745
4746	return 0;
4747}
4748
4749#ifdef CONFIG_CGROUP_WRITEBACK
4750
4751#include <trace/events/writeback.h>
4752
4753static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
4754{
4755	return wb_domain_init(&memcg->cgwb_domain, gfp);
4756}
4757
4758static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
4759{
4760	wb_domain_exit(&memcg->cgwb_domain);
4761}
4762
4763static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
4764{
4765	wb_domain_size_changed(&memcg->cgwb_domain);
4766}
4767
4768struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
4769{
4770	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4771
4772	if (!memcg->css.parent)
4773		return NULL;
4774
4775	return &memcg->cgwb_domain;
4776}
4777
4778/**
4779 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
4780 * @wb: bdi_writeback in question
4781 * @pfilepages: out parameter for number of file pages
4782 * @pheadroom: out parameter for number of allocatable pages according to memcg
4783 * @pdirty: out parameter for number of dirty pages
4784 * @pwriteback: out parameter for number of pages under writeback
4785 *
4786 * Determine the numbers of file, headroom, dirty, and writeback pages in
4787 * @wb's memcg.  File, dirty and writeback are self-explanatory.  Headroom
4788 * is a bit more involved.
4789 *
4790 * A memcg's headroom is "min(max, high) - used".  In the hierarchy, the
4791 * headroom is calculated as the lowest headroom of itself and the
4792 * ancestors.  Note that this doesn't consider the actual amount of
4793 * available memory in the system.  The caller should further cap
4794 * *@pheadroom accordingly.
4795 */
4796void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
4797			 unsigned long *pheadroom, unsigned long *pdirty,
4798			 unsigned long *pwriteback)
4799{
4800	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4801	struct mem_cgroup *parent;
4802
4803	mem_cgroup_flush_stats(memcg);
4804
4805	*pdirty = memcg_page_state(memcg, NR_FILE_DIRTY);
4806	*pwriteback = memcg_page_state(memcg, NR_WRITEBACK);
4807	*pfilepages = memcg_page_state(memcg, NR_INACTIVE_FILE) +
4808			memcg_page_state(memcg, NR_ACTIVE_FILE);
4809
4810	*pheadroom = PAGE_COUNTER_MAX;
4811	while ((parent = parent_mem_cgroup(memcg))) {
4812		unsigned long ceiling = min(READ_ONCE(memcg->memory.max),
4813					    READ_ONCE(memcg->memory.high));
4814		unsigned long used = page_counter_read(&memcg->memory);
4815
4816		*pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
4817		memcg = parent;
4818	}
4819}
4820
4821/*
4822 * Foreign dirty flushing
4823 *
4824 * There's an inherent mismatch between memcg and writeback.  The former
4825 * tracks ownership per-page while the latter per-inode.  This was a
4826 * deliberate design decision because honoring per-page ownership in the
4827 * writeback path is complicated, may lead to higher CPU and IO overheads
4828 * and deemed unnecessary given that write-sharing an inode across
4829 * different cgroups isn't a common use-case.
4830 *
4831 * Combined with inode majority-writer ownership switching, this works well
4832 * enough in most cases but there are some pathological cases.  For
4833 * example, let's say there are two cgroups A and B which keep writing to
4834 * different but confined parts of the same inode.  B owns the inode and
4835 * A's memory is limited far below B's.  A's dirty ratio can rise enough to
4836 * trigger balance_dirty_pages() sleeps but B's can be low enough to avoid
4837 * triggering background writeback.  A will be slowed down without a way to
4838 * make writeback of the dirty pages happen.
4839 *
4840 * Conditions like the above can lead to a cgroup getting repeatedly and
4841 * severely throttled after making some progress after each
4842 * dirty_expire_interval while the underlying IO device is almost
4843 * completely idle.
4844 *
4845 * Solving this problem completely requires matching the ownership tracking
4846 * granularities between memcg and writeback in either direction.  However,
4847 * the more egregious behaviors can be avoided by simply remembering the
4848 * most recent foreign dirtying events and initiating remote flushes on
4849 * them when local writeback isn't enough to keep the memory clean enough.
4850 *
4851 * The following two functions implement such mechanism.  When a foreign
4852 * page - a page whose memcg and writeback ownerships don't match - is
4853 * dirtied, mem_cgroup_track_foreign_dirty() records the inode owning
4854 * bdi_writeback on the page owning memcg.  When balance_dirty_pages()
4855 * decides that the memcg needs to sleep due to high dirty ratio, it calls
4856 * mem_cgroup_flush_foreign() which queues writeback on the recorded
4857 * foreign bdi_writebacks which haven't expired.  Both the numbers of
4858 * recorded bdi_writebacks and concurrent in-flight foreign writebacks are
4859 * limited to MEMCG_CGWB_FRN_CNT.
4860 *
4861 * The mechanism only remembers IDs and doesn't hold any object references.
4862 * As being wrong occasionally doesn't matter, updates and accesses to the
4863 * records are lockless and racy.
4864 */
4865void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio,
4866					     struct bdi_writeback *wb)
4867{
4868	struct mem_cgroup *memcg = folio_memcg(folio);
4869	struct memcg_cgwb_frn *frn;
4870	u64 now = get_jiffies_64();
4871	u64 oldest_at = now;
4872	int oldest = -1;
4873	int i;
4874
4875	trace_track_foreign_dirty(folio, wb);
4876
4877	/*
4878	 * Pick the slot to use.  If there is already a slot for @wb, keep
4879	 * using it.  If not replace the oldest one which isn't being
4880	 * written out.
4881	 */
4882	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
4883		frn = &memcg->cgwb_frn[i];
4884		if (frn->bdi_id == wb->bdi->id &&
4885		    frn->memcg_id == wb->memcg_css->id)
4886			break;
4887		if (time_before64(frn->at, oldest_at) &&
4888		    atomic_read(&frn->done.cnt) == 1) {
4889			oldest = i;
4890			oldest_at = frn->at;
4891		}
4892	}
4893
4894	if (i < MEMCG_CGWB_FRN_CNT) {
4895		/*
4896		 * Re-using an existing one.  Update timestamp lazily to
4897		 * avoid making the cacheline hot.  We want them to be
4898		 * reasonably up-to-date and significantly shorter than
4899		 * dirty_expire_interval as that's what expires the record.
4900		 * Use the shorter of 1s and dirty_expire_interval / 8.
4901		 */
4902		unsigned long update_intv =
4903			min_t(unsigned long, HZ,
4904			      msecs_to_jiffies(dirty_expire_interval * 10) / 8);
4905
4906		if (time_before64(frn->at, now - update_intv))
4907			frn->at = now;
4908	} else if (oldest >= 0) {
4909		/* replace the oldest free one */
4910		frn = &memcg->cgwb_frn[oldest];
4911		frn->bdi_id = wb->bdi->id;
4912		frn->memcg_id = wb->memcg_css->id;
4913		frn->at = now;
4914	}
4915}
4916
4917/* issue foreign writeback flushes for recorded foreign dirtying events */
4918void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
4919{
4920	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4921	unsigned long intv = msecs_to_jiffies(dirty_expire_interval * 10);
4922	u64 now = jiffies_64;
4923	int i;
4924
4925	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
4926		struct memcg_cgwb_frn *frn = &memcg->cgwb_frn[i];
4927
4928		/*
4929		 * If the record is older than dirty_expire_interval,
4930		 * writeback on it has already started.  No need to kick it
4931		 * off again.  Also, don't start a new one if there's
4932		 * already one in flight.
4933		 */
4934		if (time_after64(frn->at, now - intv) &&
4935		    atomic_read(&frn->done.cnt) == 1) {
4936			frn->at = 0;
4937			trace_flush_foreign(wb, frn->bdi_id, frn->memcg_id);
4938			cgroup_writeback_by_id(frn->bdi_id, frn->memcg_id,
4939					       WB_REASON_FOREIGN_FLUSH,
4940					       &frn->done);
4941		}
4942	}
4943}
4944
4945#else	/* CONFIG_CGROUP_WRITEBACK */
4946
4947static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
4948{
4949	return 0;
4950}
4951
4952static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
4953{
4954}
4955
4956static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
4957{
4958}
4959
4960#endif	/* CONFIG_CGROUP_WRITEBACK */
4961
4962/*
4963 * DO NOT USE IN NEW FILES.
4964 *
4965 * "cgroup.event_control" implementation.
4966 *
4967 * This is way over-engineered.  It tries to support fully configurable
4968 * events for each user.  Such level of flexibility is completely
4969 * unnecessary especially in the light of the planned unified hierarchy.
4970 *
4971 * Please deprecate this and replace with something simpler if at all
4972 * possible.
4973 */
4974
4975/*
4976 * Unregister event and free resources.
4977 *
4978 * Gets called from workqueue.
4979 */
4980static void memcg_event_remove(struct work_struct *work)
4981{
4982	struct mem_cgroup_event *event =
4983		container_of(work, struct mem_cgroup_event, remove);
4984	struct mem_cgroup *memcg = event->memcg;
4985
4986	remove_wait_queue(event->wqh, &event->wait);
4987
4988	event->unregister_event(memcg, event->eventfd);
4989
4990	/* Notify userspace the event is going away. */
4991	eventfd_signal(event->eventfd);
4992
4993	eventfd_ctx_put(event->eventfd);
4994	kfree(event);
4995	css_put(&memcg->css);
4996}
4997
4998/*
4999 * Gets called on EPOLLHUP on eventfd when user closes it.
5000 *
5001 * Called with wqh->lock held and interrupts disabled.
5002 */
5003static int memcg_event_wake(wait_queue_entry_t *wait, unsigned mode,
5004			    int sync, void *key)
5005{
5006	struct mem_cgroup_event *event =
5007		container_of(wait, struct mem_cgroup_event, wait);
5008	struct mem_cgroup *memcg = event->memcg;
5009	__poll_t flags = key_to_poll(key);
5010
5011	if (flags & EPOLLHUP) {
5012		/*
5013		 * If the event has been detached at cgroup removal, we
5014		 * can simply return knowing the other side will cleanup
5015		 * for us.
5016		 *
5017		 * We can't race against event freeing since the other
5018		 * side will require wqh->lock via remove_wait_queue(),
5019		 * which we hold.
5020		 */
5021		spin_lock(&memcg->event_list_lock);
5022		if (!list_empty(&event->list)) {
5023			list_del_init(&event->list);
5024			/*
5025			 * We are in atomic context, but cgroup_event_remove()
5026			 * may sleep, so we have to call it in workqueue.
5027			 */
5028			schedule_work(&event->remove);
5029		}
5030		spin_unlock(&memcg->event_list_lock);
5031	}
5032
5033	return 0;
5034}
5035
5036static void memcg_event_ptable_queue_proc(struct file *file,
5037		wait_queue_head_t *wqh, poll_table *pt)
5038{
5039	struct mem_cgroup_event *event =
5040		container_of(pt, struct mem_cgroup_event, pt);
5041
5042	event->wqh = wqh;
5043	add_wait_queue(wqh, &event->wait);
5044}
5045
5046/*
5047 * DO NOT USE IN NEW FILES.
5048 *
5049 * Parse input and register new cgroup event handler.
5050 *
5051 * Input must be in format '<event_fd> <control_fd> <args>'.
5052 * Interpretation of args is defined by control file implementation.
5053 */
5054static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
5055					 char *buf, size_t nbytes, loff_t off)
5056{
5057	struct cgroup_subsys_state *css = of_css(of);
5058	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5059	struct mem_cgroup_event *event;
5060	struct cgroup_subsys_state *cfile_css;
5061	unsigned int efd, cfd;
5062	struct fd efile;
5063	struct fd cfile;
5064	struct dentry *cdentry;
5065	const char *name;
5066	char *endp;
5067	int ret;
5068
5069	if (IS_ENABLED(CONFIG_PREEMPT_RT))
5070		return -EOPNOTSUPP;
5071
5072	buf = strstrip(buf);
5073
5074	efd = simple_strtoul(buf, &endp, 10);
5075	if (*endp != ' ')
5076		return -EINVAL;
5077	buf = endp + 1;
5078
5079	cfd = simple_strtoul(buf, &endp, 10);
5080	if ((*endp != ' ') && (*endp != '\0'))
5081		return -EINVAL;
5082	buf = endp + 1;
5083
5084	event = kzalloc(sizeof(*event), GFP_KERNEL);
5085	if (!event)
5086		return -ENOMEM;
5087
5088	event->memcg = memcg;
5089	INIT_LIST_HEAD(&event->list);
5090	init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc);
5091	init_waitqueue_func_entry(&event->wait, memcg_event_wake);
5092	INIT_WORK(&event->remove, memcg_event_remove);
5093
5094	efile = fdget(efd);
5095	if (!efile.file) {
5096		ret = -EBADF;
5097		goto out_kfree;
5098	}
5099
5100	event->eventfd = eventfd_ctx_fileget(efile.file);
5101	if (IS_ERR(event->eventfd)) {
5102		ret = PTR_ERR(event->eventfd);
5103		goto out_put_efile;
5104	}
5105
5106	cfile = fdget(cfd);
5107	if (!cfile.file) {
5108		ret = -EBADF;
5109		goto out_put_eventfd;
5110	}
5111
5112	/* the process need read permission on control file */
5113	/* AV: shouldn't we check that it's been opened for read instead? */
5114	ret = file_permission(cfile.file, MAY_READ);
5115	if (ret < 0)
5116		goto out_put_cfile;
5117
5118	/*
5119	 * The control file must be a regular cgroup1 file. As a regular cgroup
5120	 * file can't be renamed, it's safe to access its name afterwards.
5121	 */
5122	cdentry = cfile.file->f_path.dentry;
5123	if (cdentry->d_sb->s_type != &cgroup_fs_type || !d_is_reg(cdentry)) {
5124		ret = -EINVAL;
5125		goto out_put_cfile;
5126	}
5127
5128	/*
5129	 * Determine the event callbacks and set them in @event.  This used
5130	 * to be done via struct cftype but cgroup core no longer knows
5131	 * about these events.  The following is crude but the whole thing
5132	 * is for compatibility anyway.
5133	 *
5134	 * DO NOT ADD NEW FILES.
5135	 */
5136	name = cdentry->d_name.name;
5137
5138	if (!strcmp(name, "memory.usage_in_bytes")) {
5139		event->register_event = mem_cgroup_usage_register_event;
5140		event->unregister_event = mem_cgroup_usage_unregister_event;
5141	} else if (!strcmp(name, "memory.oom_control")) {
5142		event->register_event = mem_cgroup_oom_register_event;
5143		event->unregister_event = mem_cgroup_oom_unregister_event;
5144	} else if (!strcmp(name, "memory.pressure_level")) {
5145		event->register_event = vmpressure_register_event;
5146		event->unregister_event = vmpressure_unregister_event;
5147	} else if (!strcmp(name, "memory.memsw.usage_in_bytes")) {
5148		event->register_event = memsw_cgroup_usage_register_event;
5149		event->unregister_event = memsw_cgroup_usage_unregister_event;
5150	} else {
5151		ret = -EINVAL;
5152		goto out_put_cfile;
5153	}
5154
5155	/*
5156	 * Verify @cfile should belong to @css.  Also, remaining events are
5157	 * automatically removed on cgroup destruction but the removal is
5158	 * asynchronous, so take an extra ref on @css.
5159	 */
5160	cfile_css = css_tryget_online_from_dir(cdentry->d_parent,
5161					       &memory_cgrp_subsys);
5162	ret = -EINVAL;
5163	if (IS_ERR(cfile_css))
5164		goto out_put_cfile;
5165	if (cfile_css != css) {
5166		css_put(cfile_css);
5167		goto out_put_cfile;
5168	}
5169
5170	ret = event->register_event(memcg, event->eventfd, buf);
5171	if (ret)
5172		goto out_put_css;
5173
5174	vfs_poll(efile.file, &event->pt);
5175
5176	spin_lock_irq(&memcg->event_list_lock);
5177	list_add(&event->list, &memcg->event_list);
5178	spin_unlock_irq(&memcg->event_list_lock);
5179
5180	fdput(cfile);
5181	fdput(efile);
5182
5183	return nbytes;
5184
5185out_put_css:
5186	css_put(css);
5187out_put_cfile:
5188	fdput(cfile);
5189out_put_eventfd:
5190	eventfd_ctx_put(event->eventfd);
5191out_put_efile:
5192	fdput(efile);
5193out_kfree:
5194	kfree(event);
5195
5196	return ret;
5197}
5198
5199#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_SLUB_DEBUG)
5200static int mem_cgroup_slab_show(struct seq_file *m, void *p)
5201{
5202	/*
5203	 * Deprecated.
5204	 * Please, take a look at tools/cgroup/memcg_slabinfo.py .
5205	 */
5206	return 0;
5207}
5208#endif
5209
5210static int memory_stat_show(struct seq_file *m, void *v);
5211
5212static struct cftype mem_cgroup_legacy_files[] = {
5213	{
5214		.name = "usage_in_bytes",
5215		.private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
5216		.read_u64 = mem_cgroup_read_u64,
5217	},
5218	{
5219		.name = "max_usage_in_bytes",
5220		.private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
5221		.write = mem_cgroup_reset,
5222		.read_u64 = mem_cgroup_read_u64,
5223	},
5224	{
5225		.name = "limit_in_bytes",
5226		.private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
5227		.write = mem_cgroup_write,
5228		.read_u64 = mem_cgroup_read_u64,
5229	},
5230	{
5231		.name = "soft_limit_in_bytes",
5232		.private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
5233		.write = mem_cgroup_write,
5234		.read_u64 = mem_cgroup_read_u64,
5235	},
5236	{
5237		.name = "failcnt",
5238		.private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
5239		.write = mem_cgroup_reset,
5240		.read_u64 = mem_cgroup_read_u64,
5241	},
5242	{
5243		.name = "stat",
5244		.seq_show = memory_stat_show,
5245	},
5246	{
5247		.name = "force_empty",
5248		.write = mem_cgroup_force_empty_write,
5249	},
5250	{
5251		.name = "use_hierarchy",
5252		.write_u64 = mem_cgroup_hierarchy_write,
5253		.read_u64 = mem_cgroup_hierarchy_read,
5254	},
5255	{
5256		.name = "cgroup.event_control",		/* XXX: for compat */
5257		.write = memcg_write_event_control,
5258		.flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE,
5259	},
5260	{
5261		.name = "swappiness",
5262		.read_u64 = mem_cgroup_swappiness_read,
5263		.write_u64 = mem_cgroup_swappiness_write,
5264	},
5265	{
5266		.name = "move_charge_at_immigrate",
5267		.read_u64 = mem_cgroup_move_charge_read,
5268		.write_u64 = mem_cgroup_move_charge_write,
5269	},
5270	{
5271		.name = "oom_control",
5272		.seq_show = mem_cgroup_oom_control_read,
5273		.write_u64 = mem_cgroup_oom_control_write,
 
5274	},
5275	{
5276		.name = "pressure_level",
5277		.seq_show = mem_cgroup_dummy_seq_show,
5278	},
5279#ifdef CONFIG_NUMA
5280	{
5281		.name = "numa_stat",
5282		.seq_show = memcg_numa_stat_show,
5283	},
5284#endif
5285	{
5286		.name = "kmem.limit_in_bytes",
5287		.private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
5288		.write = mem_cgroup_write,
5289		.read_u64 = mem_cgroup_read_u64,
5290	},
5291	{
5292		.name = "kmem.usage_in_bytes",
5293		.private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
5294		.read_u64 = mem_cgroup_read_u64,
5295	},
5296	{
5297		.name = "kmem.failcnt",
5298		.private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT),
5299		.write = mem_cgroup_reset,
5300		.read_u64 = mem_cgroup_read_u64,
5301	},
5302	{
5303		.name = "kmem.max_usage_in_bytes",
5304		.private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE),
5305		.write = mem_cgroup_reset,
5306		.read_u64 = mem_cgroup_read_u64,
5307	},
5308#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_SLUB_DEBUG)
 
5309	{
5310		.name = "kmem.slabinfo",
5311		.seq_show = mem_cgroup_slab_show,
5312	},
5313#endif
5314	{
5315		.name = "kmem.tcp.limit_in_bytes",
5316		.private = MEMFILE_PRIVATE(_TCP, RES_LIMIT),
5317		.write = mem_cgroup_write,
5318		.read_u64 = mem_cgroup_read_u64,
5319	},
5320	{
5321		.name = "kmem.tcp.usage_in_bytes",
5322		.private = MEMFILE_PRIVATE(_TCP, RES_USAGE),
5323		.read_u64 = mem_cgroup_read_u64,
5324	},
5325	{
5326		.name = "kmem.tcp.failcnt",
5327		.private = MEMFILE_PRIVATE(_TCP, RES_FAILCNT),
5328		.write = mem_cgroup_reset,
5329		.read_u64 = mem_cgroup_read_u64,
5330	},
5331	{
5332		.name = "kmem.tcp.max_usage_in_bytes",
5333		.private = MEMFILE_PRIVATE(_TCP, RES_MAX_USAGE),
5334		.write = mem_cgroup_reset,
5335		.read_u64 = mem_cgroup_read_u64,
5336	},
5337	{ },	/* terminate */
5338};
5339
5340/*
5341 * Private memory cgroup IDR
5342 *
5343 * Swap-out records and page cache shadow entries need to store memcg
5344 * references in constrained space, so we maintain an ID space that is
5345 * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of
5346 * memory-controlled cgroups to 64k.
5347 *
5348 * However, there usually are many references to the offline CSS after
5349 * the cgroup has been destroyed, such as page cache or reclaimable
5350 * slab objects, that don't need to hang on to the ID. We want to keep
5351 * those dead CSS from occupying IDs, or we might quickly exhaust the
5352 * relatively small ID space and prevent the creation of new cgroups
5353 * even when there are much fewer than 64k cgroups - possibly none.
5354 *
5355 * Maintain a private 16-bit ID space for memcg, and allow the ID to
5356 * be freed and recycled when it's no longer needed, which is usually
5357 * when the CSS is offlined.
5358 *
5359 * The only exception to that are records of swapped out tmpfs/shmem
5360 * pages that need to be attributed to live ancestors on swapin. But
5361 * those references are manageable from userspace.
5362 */
5363
5364#define MEM_CGROUP_ID_MAX	((1UL << MEM_CGROUP_ID_SHIFT) - 1)
5365static DEFINE_IDR(mem_cgroup_idr);
5366
5367static void mem_cgroup_id_remove(struct mem_cgroup *memcg)
5368{
5369	if (memcg->id.id > 0) {
5370		idr_remove(&mem_cgroup_idr, memcg->id.id);
5371		memcg->id.id = 0;
5372	}
5373}
5374
5375static void __maybe_unused mem_cgroup_id_get_many(struct mem_cgroup *memcg,
5376						  unsigned int n)
5377{
5378	refcount_add(n, &memcg->id.ref);
5379}
5380
5381static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
5382{
5383	if (refcount_sub_and_test(n, &memcg->id.ref)) {
5384		mem_cgroup_id_remove(memcg);
5385
5386		/* Memcg ID pins CSS */
5387		css_put(&memcg->css);
5388	}
5389}
5390
5391static inline void mem_cgroup_id_put(struct mem_cgroup *memcg)
5392{
5393	mem_cgroup_id_put_many(memcg, 1);
5394}
5395
5396/**
5397 * mem_cgroup_from_id - look up a memcg from a memcg id
5398 * @id: the memcg id to look up
5399 *
5400 * Caller must hold rcu_read_lock().
5401 */
5402struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
5403{
5404	WARN_ON_ONCE(!rcu_read_lock_held());
5405	return idr_find(&mem_cgroup_idr, id);
5406}
5407
5408#ifdef CONFIG_SHRINKER_DEBUG
5409struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino)
5410{
5411	struct cgroup *cgrp;
5412	struct cgroup_subsys_state *css;
5413	struct mem_cgroup *memcg;
5414
5415	cgrp = cgroup_get_from_id(ino);
5416	if (IS_ERR(cgrp))
5417		return ERR_CAST(cgrp);
5418
5419	css = cgroup_get_e_css(cgrp, &memory_cgrp_subsys);
5420	if (css)
5421		memcg = container_of(css, struct mem_cgroup, css);
5422	else
5423		memcg = ERR_PTR(-ENOENT);
5424
5425	cgroup_put(cgrp);
5426
5427	return memcg;
5428}
5429#endif
5430
5431static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
5432{
5433	struct mem_cgroup_per_node *pn;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5434
5435	pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, node);
5436	if (!pn)
 
 
5437		return 1;
 
5438
5439	pn->lruvec_stats_percpu = alloc_percpu_gfp(struct lruvec_stats_percpu,
5440						   GFP_KERNEL_ACCOUNT);
5441	if (!pn->lruvec_stats_percpu) {
 
5442		kfree(pn);
5443		return 1;
5444	}
5445
5446	lruvec_init(&pn->lruvec);
 
 
5447	pn->memcg = memcg;
5448
5449	memcg->nodeinfo[node] = pn;
5450	return 0;
5451}
5452
5453static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
5454{
5455	struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
5456
5457	if (!pn)
5458		return;
5459
5460	free_percpu(pn->lruvec_stats_percpu);
 
5461	kfree(pn);
5462}
5463
5464static void __mem_cgroup_free(struct mem_cgroup *memcg)
5465{
5466	int node;
5467
5468	if (memcg->orig_objcg)
5469		obj_cgroup_put(memcg->orig_objcg);
5470
5471	for_each_node(node)
5472		free_mem_cgroup_per_node_info(memcg, node);
5473	kfree(memcg->vmstats);
5474	free_percpu(memcg->vmstats_percpu);
5475	kfree(memcg);
5476}
5477
5478static void mem_cgroup_free(struct mem_cgroup *memcg)
5479{
5480	lru_gen_exit_memcg(memcg);
 
5481	memcg_wb_domain_exit(memcg);
 
 
 
 
 
 
5482	__mem_cgroup_free(memcg);
5483}
5484
5485static struct mem_cgroup *mem_cgroup_alloc(struct mem_cgroup *parent)
5486{
5487	struct memcg_vmstats_percpu *statc, *pstatc;
5488	struct mem_cgroup *memcg;
5489	int node, cpu;
 
5490	int __maybe_unused i;
5491	long error = -ENOMEM;
5492
5493	memcg = kzalloc(struct_size(memcg, nodeinfo, nr_node_ids), GFP_KERNEL);
 
 
 
5494	if (!memcg)
5495		return ERR_PTR(error);
5496
5497	memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL,
5498				 1, MEM_CGROUP_ID_MAX + 1, GFP_KERNEL);
 
5499	if (memcg->id.id < 0) {
5500		error = memcg->id.id;
5501		goto fail;
5502	}
5503
5504	memcg->vmstats = kzalloc(sizeof(struct memcg_vmstats), GFP_KERNEL);
5505	if (!memcg->vmstats)
5506		goto fail;
5507
5508	memcg->vmstats_percpu = alloc_percpu_gfp(struct memcg_vmstats_percpu,
5509						 GFP_KERNEL_ACCOUNT);
5510	if (!memcg->vmstats_percpu)
5511		goto fail;
5512
5513	for_each_possible_cpu(cpu) {
5514		if (parent)
5515			pstatc = per_cpu_ptr(parent->vmstats_percpu, cpu);
5516		statc = per_cpu_ptr(memcg->vmstats_percpu, cpu);
5517		statc->parent = parent ? pstatc : NULL;
5518		statc->vmstats = memcg->vmstats;
5519	}
5520
5521	for_each_node(node)
5522		if (alloc_mem_cgroup_per_node_info(memcg, node))
5523			goto fail;
5524
5525	if (memcg_wb_domain_init(memcg, GFP_KERNEL))
5526		goto fail;
5527
5528	INIT_WORK(&memcg->high_work, high_work_func);
5529	INIT_LIST_HEAD(&memcg->oom_notify);
5530	mutex_init(&memcg->thresholds_lock);
5531	spin_lock_init(&memcg->move_lock);
5532	vmpressure_init(&memcg->vmpressure);
5533	INIT_LIST_HEAD(&memcg->event_list);
5534	spin_lock_init(&memcg->event_list_lock);
5535	memcg->socket_pressure = jiffies;
5536#ifdef CONFIG_MEMCG_KMEM
5537	memcg->kmemcg_id = -1;
5538	INIT_LIST_HEAD(&memcg->objcg_list);
5539#endif
5540#ifdef CONFIG_CGROUP_WRITEBACK
5541	INIT_LIST_HEAD(&memcg->cgwb_list);
5542	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
5543		memcg->cgwb_frn[i].done =
5544			__WB_COMPLETION_INIT(&memcg_cgwb_frn_waitq);
5545#endif
5546#ifdef CONFIG_TRANSPARENT_HUGEPAGE
5547	spin_lock_init(&memcg->deferred_split_queue.split_queue_lock);
5548	INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue);
5549	memcg->deferred_split_queue.split_queue_len = 0;
5550#endif
5551	lru_gen_init_memcg(memcg);
5552	return memcg;
5553fail:
5554	mem_cgroup_id_remove(memcg);
5555	__mem_cgroup_free(memcg);
5556	return ERR_PTR(error);
5557}
5558
5559static struct cgroup_subsys_state * __ref
5560mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
5561{
5562	struct mem_cgroup *parent = mem_cgroup_from_css(parent_css);
5563	struct mem_cgroup *memcg, *old_memcg;
 
5564
5565	old_memcg = set_active_memcg(parent);
5566	memcg = mem_cgroup_alloc(parent);
5567	set_active_memcg(old_memcg);
5568	if (IS_ERR(memcg))
5569		return ERR_CAST(memcg);
5570
5571	page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
5572	WRITE_ONCE(memcg->soft_limit, PAGE_COUNTER_MAX);
5573#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
5574	memcg->zswap_max = PAGE_COUNTER_MAX;
5575	WRITE_ONCE(memcg->zswap_writeback,
5576		!parent || READ_ONCE(parent->zswap_writeback));
5577#endif
5578	page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
5579	if (parent) {
5580		WRITE_ONCE(memcg->swappiness, mem_cgroup_swappiness(parent));
5581		WRITE_ONCE(memcg->oom_kill_disable, READ_ONCE(parent->oom_kill_disable));
5582
5583		page_counter_init(&memcg->memory, &parent->memory);
5584		page_counter_init(&memcg->swap, &parent->swap);
5585		page_counter_init(&memcg->kmem, &parent->kmem);
5586		page_counter_init(&memcg->tcpmem, &parent->tcpmem);
5587	} else {
5588		init_memcg_events();
5589		page_counter_init(&memcg->memory, NULL);
5590		page_counter_init(&memcg->swap, NULL);
5591		page_counter_init(&memcg->kmem, NULL);
5592		page_counter_init(&memcg->tcpmem, NULL);
5593
5594		root_mem_cgroup = memcg;
5595		return &memcg->css;
5596	}
5597
 
 
 
 
 
5598	if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
5599		static_branch_inc(&memcg_sockets_enabled_key);
5600
5601#if defined(CONFIG_MEMCG_KMEM)
5602	if (!cgroup_memory_nobpf)
5603		static_branch_inc(&memcg_bpf_enabled_key);
5604#endif
5605
5606	return &memcg->css;
 
 
 
 
5607}
5608
5609static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
5610{
5611	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5612
5613	if (memcg_online_kmem(memcg))
5614		goto remove_id;
5615
5616	/*
5617	 * A memcg must be visible for expand_shrinker_info()
5618	 * by the time the maps are allocated. So, we allocate maps
5619	 * here, when for_each_mem_cgroup() can't skip it.
5620	 */
5621	if (alloc_shrinker_info(memcg))
5622		goto offline_kmem;
5623
5624	if (unlikely(mem_cgroup_is_root(memcg)))
5625		queue_delayed_work(system_unbound_wq, &stats_flush_dwork,
5626				   FLUSH_TIME);
5627	lru_gen_online_memcg(memcg);
5628
5629	/* Online state pins memcg ID, memcg ID pins CSS */
5630	refcount_set(&memcg->id.ref, 1);
5631	css_get(css);
5632
5633	/*
5634	 * Ensure mem_cgroup_from_id() works once we're fully online.
5635	 *
5636	 * We could do this earlier and require callers to filter with
5637	 * css_tryget_online(). But right now there are no users that
5638	 * need earlier access, and the workingset code relies on the
5639	 * cgroup tree linkage (mem_cgroup_get_nr_swap_pages()). So
5640	 * publish it here at the end of onlining. This matches the
5641	 * regular ID destruction during offlining.
5642	 */
5643	idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
5644
5645	return 0;
5646offline_kmem:
5647	memcg_offline_kmem(memcg);
5648remove_id:
5649	mem_cgroup_id_remove(memcg);
5650	return -ENOMEM;
5651}
5652
5653static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
5654{
5655	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5656	struct mem_cgroup_event *event, *tmp;
5657
5658	/*
5659	 * Unregister events and notify userspace.
5660	 * Notify userspace about cgroup removing only after rmdir of cgroup
5661	 * directory to avoid race between userspace and kernelspace.
5662	 */
5663	spin_lock_irq(&memcg->event_list_lock);
5664	list_for_each_entry_safe(event, tmp, &memcg->event_list, list) {
5665		list_del_init(&event->list);
5666		schedule_work(&event->remove);
5667	}
5668	spin_unlock_irq(&memcg->event_list_lock);
5669
5670	page_counter_set_min(&memcg->memory, 0);
5671	page_counter_set_low(&memcg->memory, 0);
5672
5673	zswap_memcg_offline_cleanup(memcg);
5674
5675	memcg_offline_kmem(memcg);
5676	reparent_shrinker_deferred(memcg);
5677	wb_memcg_offline(memcg);
5678	lru_gen_offline_memcg(memcg);
5679
5680	drain_all_stock(memcg);
5681
5682	mem_cgroup_id_put(memcg);
5683}
5684
5685static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
5686{
5687	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5688
5689	invalidate_reclaim_iterators(memcg);
5690	lru_gen_release_memcg(memcg);
5691}
5692
5693static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
5694{
5695	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5696	int __maybe_unused i;
5697
5698#ifdef CONFIG_CGROUP_WRITEBACK
5699	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
5700		wb_wait_for_completion(&memcg->cgwb_frn[i].done);
5701#endif
5702	if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
5703		static_branch_dec(&memcg_sockets_enabled_key);
5704
5705	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active)
5706		static_branch_dec(&memcg_sockets_enabled_key);
5707
5708#if defined(CONFIG_MEMCG_KMEM)
5709	if (!cgroup_memory_nobpf)
5710		static_branch_dec(&memcg_bpf_enabled_key);
5711#endif
5712
5713	vmpressure_cleanup(&memcg->vmpressure);
5714	cancel_work_sync(&memcg->high_work);
5715	mem_cgroup_remove_from_trees(memcg);
5716	free_shrinker_info(memcg);
 
5717	mem_cgroup_free(memcg);
5718}
5719
5720/**
5721 * mem_cgroup_css_reset - reset the states of a mem_cgroup
5722 * @css: the target css
5723 *
5724 * Reset the states of the mem_cgroup associated with @css.  This is
5725 * invoked when the userland requests disabling on the default hierarchy
5726 * but the memcg is pinned through dependency.  The memcg should stop
5727 * applying policies and should revert to the vanilla state as it may be
5728 * made visible again.
5729 *
5730 * The current implementation only resets the essential configurations.
5731 * This needs to be expanded to cover all the visible parts.
5732 */
5733static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
5734{
5735	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5736
5737	page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX);
5738	page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX);
5739	page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX);
5740	page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX);
5741	page_counter_set_min(&memcg->memory, 0);
5742	page_counter_set_low(&memcg->memory, 0);
5743	page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
5744	WRITE_ONCE(memcg->soft_limit, PAGE_COUNTER_MAX);
5745	page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
5746	memcg_wb_domain_size_changed(memcg);
5747}
5748
5749static void mem_cgroup_css_rstat_flush(struct cgroup_subsys_state *css, int cpu)
5750{
5751	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5752	struct mem_cgroup *parent = parent_mem_cgroup(memcg);
5753	struct memcg_vmstats_percpu *statc;
5754	long delta, delta_cpu, v;
5755	int i, nid;
5756
5757	statc = per_cpu_ptr(memcg->vmstats_percpu, cpu);
5758
5759	for (i = 0; i < MEMCG_NR_STAT; i++) {
5760		/*
5761		 * Collect the aggregated propagation counts of groups
5762		 * below us. We're in a per-cpu loop here and this is
5763		 * a global counter, so the first cycle will get them.
5764		 */
5765		delta = memcg->vmstats->state_pending[i];
5766		if (delta)
5767			memcg->vmstats->state_pending[i] = 0;
5768
5769		/* Add CPU changes on this level since the last flush */
5770		delta_cpu = 0;
5771		v = READ_ONCE(statc->state[i]);
5772		if (v != statc->state_prev[i]) {
5773			delta_cpu = v - statc->state_prev[i];
5774			delta += delta_cpu;
5775			statc->state_prev[i] = v;
5776		}
5777
 
 
 
5778		/* Aggregate counts on this level and propagate upwards */
5779		if (delta_cpu)
5780			memcg->vmstats->state_local[i] += delta_cpu;
5781
5782		if (delta) {
5783			memcg->vmstats->state[i] += delta;
5784			if (parent)
5785				parent->vmstats->state_pending[i] += delta;
5786		}
5787	}
5788
5789	for (i = 0; i < NR_MEMCG_EVENTS; i++) {
5790		delta = memcg->vmstats->events_pending[i];
5791		if (delta)
5792			memcg->vmstats->events_pending[i] = 0;
5793
5794		delta_cpu = 0;
5795		v = READ_ONCE(statc->events[i]);
5796		if (v != statc->events_prev[i]) {
5797			delta_cpu = v - statc->events_prev[i];
5798			delta += delta_cpu;
5799			statc->events_prev[i] = v;
5800		}
5801
5802		if (delta_cpu)
5803			memcg->vmstats->events_local[i] += delta_cpu;
5804
5805		if (delta) {
5806			memcg->vmstats->events[i] += delta;
5807			if (parent)
5808				parent->vmstats->events_pending[i] += delta;
5809		}
5810	}
5811
5812	for_each_node_state(nid, N_MEMORY) {
5813		struct mem_cgroup_per_node *pn = memcg->nodeinfo[nid];
5814		struct mem_cgroup_per_node *ppn = NULL;
5815		struct lruvec_stats_percpu *lstatc;
5816
 
5817		if (parent)
5818			ppn = parent->nodeinfo[nid];
5819
5820		lstatc = per_cpu_ptr(pn->lruvec_stats_percpu, cpu);
5821
5822		for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
5823			delta = pn->lruvec_stats.state_pending[i];
5824			if (delta)
5825				pn->lruvec_stats.state_pending[i] = 0;
5826
5827			delta_cpu = 0;
5828			v = READ_ONCE(lstatc->state[i]);
5829			if (v != lstatc->state_prev[i]) {
5830				delta_cpu = v - lstatc->state_prev[i];
5831				delta += delta_cpu;
5832				lstatc->state_prev[i] = v;
5833			}
5834
5835			if (delta_cpu)
5836				pn->lruvec_stats.state_local[i] += delta_cpu;
5837
5838			if (delta) {
5839				pn->lruvec_stats.state[i] += delta;
5840				if (ppn)
5841					ppn->lruvec_stats.state_pending[i] += delta;
5842			}
5843		}
5844	}
5845	statc->stats_updates = 0;
5846	/* We are in a per-cpu loop here, only do the atomic write once */
5847	if (atomic64_read(&memcg->vmstats->stats_updates))
5848		atomic64_set(&memcg->vmstats->stats_updates, 0);
5849}
5850
5851#ifdef CONFIG_MMU
5852/* Handlers for move charge at task migration. */
5853static int mem_cgroup_do_precharge(unsigned long count)
5854{
5855	int ret;
5856
5857	/* Try a single bulk charge without reclaim first, kswapd may wake */
5858	ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count);
5859	if (!ret) {
5860		mc.precharge += count;
5861		return ret;
5862	}
5863
5864	/* Try charges one by one with reclaim, but do not retry */
5865	while (count--) {
5866		ret = try_charge(mc.to, GFP_KERNEL | __GFP_NORETRY, 1);
5867		if (ret)
5868			return ret;
5869		mc.precharge++;
5870		cond_resched();
5871	}
5872	return 0;
5873}
5874
5875union mc_target {
5876	struct page	*page;
5877	swp_entry_t	ent;
5878};
5879
5880enum mc_target_type {
5881	MC_TARGET_NONE = 0,
5882	MC_TARGET_PAGE,
5883	MC_TARGET_SWAP,
5884	MC_TARGET_DEVICE,
5885};
5886
5887static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
5888						unsigned long addr, pte_t ptent)
5889{
5890	struct page *page = vm_normal_page(vma, addr, ptent);
5891
5892	if (!page)
5893		return NULL;
5894	if (PageAnon(page)) {
5895		if (!(mc.flags & MOVE_ANON))
5896			return NULL;
5897	} else {
5898		if (!(mc.flags & MOVE_FILE))
5899			return NULL;
5900	}
5901	get_page(page);
 
5902
5903	return page;
5904}
5905
5906#if defined(CONFIG_SWAP) || defined(CONFIG_DEVICE_PRIVATE)
5907static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5908			pte_t ptent, swp_entry_t *entry)
5909{
5910	struct page *page = NULL;
5911	swp_entry_t ent = pte_to_swp_entry(ptent);
5912
5913	if (!(mc.flags & MOVE_ANON))
5914		return NULL;
5915
5916	/*
5917	 * Handle device private pages that are not accessible by the CPU, but
5918	 * stored as special swap entries in the page table.
 
5919	 */
5920	if (is_device_private_entry(ent)) {
5921		page = pfn_swap_entry_to_page(ent);
5922		if (!get_page_unless_zero(page))
 
 
 
 
5923			return NULL;
5924		return page;
5925	}
5926
5927	if (non_swap_entry(ent))
5928		return NULL;
5929
5930	/*
5931	 * Because swap_cache_get_folio() updates some statistics counter,
5932	 * we call find_get_page() with swapper_space directly.
5933	 */
5934	page = find_get_page(swap_address_space(ent), swp_offset(ent));
5935	entry->val = ent.val;
5936
5937	return page;
5938}
5939#else
5940static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5941			pte_t ptent, swp_entry_t *entry)
5942{
5943	return NULL;
5944}
5945#endif
5946
5947static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
5948			unsigned long addr, pte_t ptent)
5949{
5950	unsigned long index;
5951	struct folio *folio;
5952
5953	if (!vma->vm_file) /* anonymous vma */
5954		return NULL;
5955	if (!(mc.flags & MOVE_FILE))
5956		return NULL;
5957
5958	/* folio is moved even if it's not RSS of this task(page-faulted). */
5959	/* shmem/tmpfs may report page out on swap: account for that too. */
5960	index = linear_page_index(vma, addr);
5961	folio = filemap_get_incore_folio(vma->vm_file->f_mapping, index);
5962	if (IS_ERR(folio))
5963		return NULL;
5964	return folio_file_page(folio, index);
5965}
5966
5967/**
5968 * mem_cgroup_move_account - move account of the page
5969 * @page: the page
5970 * @compound: charge the page as compound or small page
5971 * @from: mem_cgroup which the page is moved from.
5972 * @to:	mem_cgroup which the page is moved to. @from != @to.
5973 *
5974 * The page must be locked and not on the LRU.
5975 *
5976 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
5977 * from old cgroup.
5978 */
5979static int mem_cgroup_move_account(struct page *page,
5980				   bool compound,
5981				   struct mem_cgroup *from,
5982				   struct mem_cgroup *to)
5983{
5984	struct folio *folio = page_folio(page);
5985	struct lruvec *from_vec, *to_vec;
5986	struct pglist_data *pgdat;
5987	unsigned int nr_pages = compound ? folio_nr_pages(folio) : 1;
5988	int nid, ret;
5989
5990	VM_BUG_ON(from == to);
5991	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
5992	VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
5993	VM_BUG_ON(compound && !folio_test_large(folio));
 
 
 
 
 
 
 
5994
5995	ret = -EINVAL;
5996	if (folio_memcg(folio) != from)
5997		goto out;
5998
5999	pgdat = folio_pgdat(folio);
6000	from_vec = mem_cgroup_lruvec(from, pgdat);
6001	to_vec = mem_cgroup_lruvec(to, pgdat);
6002
6003	folio_memcg_lock(folio);
6004
6005	if (folio_test_anon(folio)) {
6006		if (folio_mapped(folio)) {
6007			__mod_lruvec_state(from_vec, NR_ANON_MAPPED, -nr_pages);
6008			__mod_lruvec_state(to_vec, NR_ANON_MAPPED, nr_pages);
6009			if (folio_test_pmd_mappable(folio)) {
6010				__mod_lruvec_state(from_vec, NR_ANON_THPS,
6011						   -nr_pages);
6012				__mod_lruvec_state(to_vec, NR_ANON_THPS,
6013						   nr_pages);
6014			}
6015		}
6016	} else {
6017		__mod_lruvec_state(from_vec, NR_FILE_PAGES, -nr_pages);
6018		__mod_lruvec_state(to_vec, NR_FILE_PAGES, nr_pages);
6019
6020		if (folio_test_swapbacked(folio)) {
6021			__mod_lruvec_state(from_vec, NR_SHMEM, -nr_pages);
6022			__mod_lruvec_state(to_vec, NR_SHMEM, nr_pages);
6023		}
6024
6025		if (folio_mapped(folio)) {
6026			__mod_lruvec_state(from_vec, NR_FILE_MAPPED, -nr_pages);
6027			__mod_lruvec_state(to_vec, NR_FILE_MAPPED, nr_pages);
6028		}
6029
6030		if (folio_test_dirty(folio)) {
6031			struct address_space *mapping = folio_mapping(folio);
6032
6033			if (mapping_can_writeback(mapping)) {
6034				__mod_lruvec_state(from_vec, NR_FILE_DIRTY,
6035						   -nr_pages);
6036				__mod_lruvec_state(to_vec, NR_FILE_DIRTY,
6037						   nr_pages);
6038			}
6039		}
6040	}
6041
6042#ifdef CONFIG_SWAP
6043	if (folio_test_swapcache(folio)) {
6044		__mod_lruvec_state(from_vec, NR_SWAPCACHE, -nr_pages);
6045		__mod_lruvec_state(to_vec, NR_SWAPCACHE, nr_pages);
6046	}
6047#endif
6048	if (folio_test_writeback(folio)) {
6049		__mod_lruvec_state(from_vec, NR_WRITEBACK, -nr_pages);
6050		__mod_lruvec_state(to_vec, NR_WRITEBACK, nr_pages);
6051	}
6052
6053	/*
6054	 * All state has been migrated, let's switch to the new memcg.
6055	 *
6056	 * It is safe to change page's memcg here because the page
6057	 * is referenced, charged, isolated, and locked: we can't race
6058	 * with (un)charging, migration, LRU putback, or anything else
6059	 * that would rely on a stable page's memory cgroup.
6060	 *
6061	 * Note that folio_memcg_lock is a memcg lock, not a page lock,
6062	 * to save space. As soon as we switch page's memory cgroup to a
6063	 * new memcg that isn't locked, the above state can change
6064	 * concurrently again. Make sure we're truly done with it.
6065	 */
6066	smp_mb();
6067
6068	css_get(&to->css);
6069	css_put(&from->css);
6070
6071	folio->memcg_data = (unsigned long)to;
6072
6073	__folio_memcg_unlock(from);
6074
6075	ret = 0;
6076	nid = folio_nid(folio);
6077
6078	local_irq_disable();
6079	mem_cgroup_charge_statistics(to, nr_pages);
6080	memcg_check_events(to, nid);
6081	mem_cgroup_charge_statistics(from, -nr_pages);
6082	memcg_check_events(from, nid);
6083	local_irq_enable();
 
 
6084out:
6085	return ret;
6086}
6087
6088/**
6089 * get_mctgt_type - get target type of moving charge
6090 * @vma: the vma the pte to be checked belongs
6091 * @addr: the address corresponding to the pte to be checked
6092 * @ptent: the pte to be checked
6093 * @target: the pointer the target page or swap ent will be stored(can be NULL)
6094 *
6095 * Context: Called with pte lock held.
6096 * Return:
6097 * * MC_TARGET_NONE - If the pte is not a target for move charge.
6098 * * MC_TARGET_PAGE - If the page corresponding to this pte is a target for
6099 *   move charge. If @target is not NULL, the page is stored in target->page
6100 *   with extra refcnt taken (Caller should release it).
6101 * * MC_TARGET_SWAP - If the swap entry corresponding to this pte is a
6102 *   target for charge migration.  If @target is not NULL, the entry is
6103 *   stored in target->ent.
6104 * * MC_TARGET_DEVICE - Like MC_TARGET_PAGE but page is device memory and
6105 *   thus not on the lru.  For now such page is charged like a regular page
6106 *   would be as it is just special memory taking the place of a regular page.
6107 *   See Documentations/vm/hmm.txt and include/linux/hmm.h
 
 
 
 
6108 */
 
6109static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
6110		unsigned long addr, pte_t ptent, union mc_target *target)
6111{
6112	struct page *page = NULL;
6113	enum mc_target_type ret = MC_TARGET_NONE;
6114	swp_entry_t ent = { .val = 0 };
6115
6116	if (pte_present(ptent))
6117		page = mc_handle_present_pte(vma, addr, ptent);
6118	else if (pte_none_mostly(ptent))
6119		/*
6120		 * PTE markers should be treated as a none pte here, separated
6121		 * from other swap handling below.
6122		 */
6123		page = mc_handle_file_pte(vma, addr, ptent);
6124	else if (is_swap_pte(ptent))
6125		page = mc_handle_swap_pte(vma, ptent, &ent);
6126
6127	if (target && page) {
6128		if (!trylock_page(page)) {
6129			put_page(page);
6130			return ret;
6131		}
6132		/*
6133		 * page_mapped() must be stable during the move. This
6134		 * pte is locked, so if it's present, the page cannot
6135		 * become unmapped. If it isn't, we have only partial
6136		 * control over the mapped state: the page lock will
6137		 * prevent new faults against pagecache and swapcache,
6138		 * so an unmapped page cannot become mapped. However,
6139		 * if the page is already mapped elsewhere, it can
6140		 * unmap, and there is nothing we can do about it.
6141		 * Alas, skip moving the page in this case.
6142		 */
6143		if (!pte_present(ptent) && page_mapped(page)) {
6144			unlock_page(page);
6145			put_page(page);
6146			return ret;
6147		}
6148	}
6149
6150	if (!page && !ent.val)
6151		return ret;
6152	if (page) {
6153		/*
6154		 * Do only loose check w/o serialization.
6155		 * mem_cgroup_move_account() checks the page is valid or
6156		 * not under LRU exclusion.
6157		 */
6158		if (page_memcg(page) == mc.from) {
6159			ret = MC_TARGET_PAGE;
6160			if (is_device_private_page(page) ||
6161			    is_device_coherent_page(page))
6162				ret = MC_TARGET_DEVICE;
6163			if (target)
6164				target->page = page;
6165		}
6166		if (!ret || !target) {
6167			if (target)
6168				unlock_page(page);
6169			put_page(page);
6170		}
6171	}
6172	/*
6173	 * There is a swap entry and a page doesn't exist or isn't charged.
6174	 * But we cannot move a tail-page in a THP.
6175	 */
6176	if (ent.val && !ret && (!page || !PageTransCompound(page)) &&
6177	    mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) {
6178		ret = MC_TARGET_SWAP;
6179		if (target)
6180			target->ent = ent;
6181	}
6182	return ret;
6183}
6184
6185#ifdef CONFIG_TRANSPARENT_HUGEPAGE
6186/*
6187 * We don't consider PMD mapped swapping or file mapped pages because THP does
6188 * not support them for now.
6189 * Caller should make sure that pmd_trans_huge(pmd) is true.
6190 */
6191static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
6192		unsigned long addr, pmd_t pmd, union mc_target *target)
6193{
6194	struct page *page = NULL;
6195	enum mc_target_type ret = MC_TARGET_NONE;
6196
6197	if (unlikely(is_swap_pmd(pmd))) {
6198		VM_BUG_ON(thp_migration_supported() &&
6199				  !is_pmd_migration_entry(pmd));
6200		return ret;
6201	}
6202	page = pmd_page(pmd);
6203	VM_BUG_ON_PAGE(!page || !PageHead(page), page);
6204	if (!(mc.flags & MOVE_ANON))
6205		return ret;
6206	if (page_memcg(page) == mc.from) {
6207		ret = MC_TARGET_PAGE;
6208		if (target) {
6209			get_page(page);
6210			if (!trylock_page(page)) {
6211				put_page(page);
6212				return MC_TARGET_NONE;
6213			}
6214			target->page = page;
6215		}
6216	}
6217	return ret;
6218}
6219#else
6220static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
6221		unsigned long addr, pmd_t pmd, union mc_target *target)
6222{
6223	return MC_TARGET_NONE;
6224}
6225#endif
6226
6227static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
6228					unsigned long addr, unsigned long end,
6229					struct mm_walk *walk)
6230{
6231	struct vm_area_struct *vma = walk->vma;
6232	pte_t *pte;
6233	spinlock_t *ptl;
6234
6235	ptl = pmd_trans_huge_lock(pmd, vma);
6236	if (ptl) {
6237		/*
6238		 * Note their can not be MC_TARGET_DEVICE for now as we do not
6239		 * support transparent huge page with MEMORY_DEVICE_PRIVATE but
6240		 * this might change.
6241		 */
6242		if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
6243			mc.precharge += HPAGE_PMD_NR;
6244		spin_unlock(ptl);
6245		return 0;
6246	}
6247
 
 
6248	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
6249	if (!pte)
6250		return 0;
6251	for (; addr != end; pte++, addr += PAGE_SIZE)
6252		if (get_mctgt_type(vma, addr, ptep_get(pte), NULL))
6253			mc.precharge++;	/* increment precharge temporarily */
6254	pte_unmap_unlock(pte - 1, ptl);
6255	cond_resched();
6256
6257	return 0;
6258}
6259
6260static const struct mm_walk_ops precharge_walk_ops = {
6261	.pmd_entry	= mem_cgroup_count_precharge_pte_range,
6262	.walk_lock	= PGWALK_RDLOCK,
6263};
6264
6265static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
6266{
6267	unsigned long precharge;
6268
6269	mmap_read_lock(mm);
6270	walk_page_range(mm, 0, ULONG_MAX, &precharge_walk_ops, NULL);
6271	mmap_read_unlock(mm);
6272
6273	precharge = mc.precharge;
6274	mc.precharge = 0;
6275
6276	return precharge;
6277}
6278
6279static int mem_cgroup_precharge_mc(struct mm_struct *mm)
6280{
6281	unsigned long precharge = mem_cgroup_count_precharge(mm);
6282
6283	VM_BUG_ON(mc.moving_task);
6284	mc.moving_task = current;
6285	return mem_cgroup_do_precharge(precharge);
6286}
6287
6288/* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
6289static void __mem_cgroup_clear_mc(void)
6290{
6291	struct mem_cgroup *from = mc.from;
6292	struct mem_cgroup *to = mc.to;
6293
6294	/* we must uncharge all the leftover precharges from mc.to */
6295	if (mc.precharge) {
6296		mem_cgroup_cancel_charge(mc.to, mc.precharge);
6297		mc.precharge = 0;
6298	}
6299	/*
6300	 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
6301	 * we must uncharge here.
6302	 */
6303	if (mc.moved_charge) {
6304		mem_cgroup_cancel_charge(mc.from, mc.moved_charge);
6305		mc.moved_charge = 0;
6306	}
6307	/* we must fixup refcnts and charges */
6308	if (mc.moved_swap) {
6309		/* uncharge swap account from the old cgroup */
6310		if (!mem_cgroup_is_root(mc.from))
6311			page_counter_uncharge(&mc.from->memsw, mc.moved_swap);
6312
6313		mem_cgroup_id_put_many(mc.from, mc.moved_swap);
6314
6315		/*
6316		 * we charged both to->memory and to->memsw, so we
6317		 * should uncharge to->memory.
6318		 */
6319		if (!mem_cgroup_is_root(mc.to))
6320			page_counter_uncharge(&mc.to->memory, mc.moved_swap);
6321
6322		mc.moved_swap = 0;
6323	}
6324	memcg_oom_recover(from);
6325	memcg_oom_recover(to);
6326	wake_up_all(&mc.waitq);
6327}
6328
6329static void mem_cgroup_clear_mc(void)
6330{
6331	struct mm_struct *mm = mc.mm;
6332
6333	/*
6334	 * we must clear moving_task before waking up waiters at the end of
6335	 * task migration.
6336	 */
6337	mc.moving_task = NULL;
6338	__mem_cgroup_clear_mc();
6339	spin_lock(&mc.lock);
6340	mc.from = NULL;
6341	mc.to = NULL;
6342	mc.mm = NULL;
6343	spin_unlock(&mc.lock);
6344
6345	mmput(mm);
6346}
6347
6348static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
6349{
6350	struct cgroup_subsys_state *css;
6351	struct mem_cgroup *memcg = NULL; /* unneeded init to make gcc happy */
6352	struct mem_cgroup *from;
6353	struct task_struct *leader, *p;
6354	struct mm_struct *mm;
6355	unsigned long move_flags;
6356	int ret = 0;
6357
6358	/* charge immigration isn't supported on the default hierarchy */
6359	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
6360		return 0;
6361
6362	/*
6363	 * Multi-process migrations only happen on the default hierarchy
6364	 * where charge immigration is not used.  Perform charge
6365	 * immigration if @tset contains a leader and whine if there are
6366	 * multiple.
6367	 */
6368	p = NULL;
6369	cgroup_taskset_for_each_leader(leader, css, tset) {
6370		WARN_ON_ONCE(p);
6371		p = leader;
6372		memcg = mem_cgroup_from_css(css);
6373	}
6374	if (!p)
6375		return 0;
6376
6377	/*
6378	 * We are now committed to this value whatever it is. Changes in this
6379	 * tunable will only affect upcoming migrations, not the current one.
6380	 * So we need to save it, and keep it going.
6381	 */
6382	move_flags = READ_ONCE(memcg->move_charge_at_immigrate);
6383	if (!move_flags)
6384		return 0;
6385
6386	from = mem_cgroup_from_task(p);
6387
6388	VM_BUG_ON(from == memcg);
6389
6390	mm = get_task_mm(p);
6391	if (!mm)
6392		return 0;
6393	/* We move charges only when we move a owner of the mm */
6394	if (mm->owner == p) {
6395		VM_BUG_ON(mc.from);
6396		VM_BUG_ON(mc.to);
6397		VM_BUG_ON(mc.precharge);
6398		VM_BUG_ON(mc.moved_charge);
6399		VM_BUG_ON(mc.moved_swap);
6400
6401		spin_lock(&mc.lock);
6402		mc.mm = mm;
6403		mc.from = from;
6404		mc.to = memcg;
6405		mc.flags = move_flags;
6406		spin_unlock(&mc.lock);
6407		/* We set mc.moving_task later */
6408
6409		ret = mem_cgroup_precharge_mc(mm);
6410		if (ret)
6411			mem_cgroup_clear_mc();
6412	} else {
6413		mmput(mm);
6414	}
6415	return ret;
6416}
6417
6418static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
6419{
6420	if (mc.to)
6421		mem_cgroup_clear_mc();
6422}
6423
6424static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
6425				unsigned long addr, unsigned long end,
6426				struct mm_walk *walk)
6427{
6428	int ret = 0;
6429	struct vm_area_struct *vma = walk->vma;
6430	pte_t *pte;
6431	spinlock_t *ptl;
6432	enum mc_target_type target_type;
6433	union mc_target target;
6434	struct page *page;
6435
6436	ptl = pmd_trans_huge_lock(pmd, vma);
6437	if (ptl) {
6438		if (mc.precharge < HPAGE_PMD_NR) {
6439			spin_unlock(ptl);
6440			return 0;
6441		}
6442		target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
6443		if (target_type == MC_TARGET_PAGE) {
6444			page = target.page;
6445			if (isolate_lru_page(page)) {
6446				if (!mem_cgroup_move_account(page, true,
6447							     mc.from, mc.to)) {
6448					mc.precharge -= HPAGE_PMD_NR;
6449					mc.moved_charge += HPAGE_PMD_NR;
6450				}
6451				putback_lru_page(page);
6452			}
6453			unlock_page(page);
6454			put_page(page);
6455		} else if (target_type == MC_TARGET_DEVICE) {
6456			page = target.page;
6457			if (!mem_cgroup_move_account(page, true,
6458						     mc.from, mc.to)) {
6459				mc.precharge -= HPAGE_PMD_NR;
6460				mc.moved_charge += HPAGE_PMD_NR;
6461			}
6462			unlock_page(page);
6463			put_page(page);
6464		}
6465		spin_unlock(ptl);
6466		return 0;
6467	}
6468
 
 
6469retry:
6470	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
6471	if (!pte)
6472		return 0;
6473	for (; addr != end; addr += PAGE_SIZE) {
6474		pte_t ptent = ptep_get(pte++);
6475		bool device = false;
6476		swp_entry_t ent;
6477
6478		if (!mc.precharge)
6479			break;
6480
6481		switch (get_mctgt_type(vma, addr, ptent, &target)) {
6482		case MC_TARGET_DEVICE:
6483			device = true;
6484			fallthrough;
6485		case MC_TARGET_PAGE:
6486			page = target.page;
6487			/*
6488			 * We can have a part of the split pmd here. Moving it
6489			 * can be done but it would be too convoluted so simply
6490			 * ignore such a partial THP and keep it in original
6491			 * memcg. There should be somebody mapping the head.
6492			 */
6493			if (PageTransCompound(page))
6494				goto put;
6495			if (!device && !isolate_lru_page(page))
6496				goto put;
6497			if (!mem_cgroup_move_account(page, false,
6498						mc.from, mc.to)) {
6499				mc.precharge--;
6500				/* we uncharge from mc.from later. */
6501				mc.moved_charge++;
6502			}
6503			if (!device)
6504				putback_lru_page(page);
6505put:			/* get_mctgt_type() gets & locks the page */
6506			unlock_page(page);
6507			put_page(page);
6508			break;
6509		case MC_TARGET_SWAP:
6510			ent = target.ent;
6511			if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
6512				mc.precharge--;
6513				mem_cgroup_id_get_many(mc.to, 1);
6514				/* we fixup other refcnts and charges later. */
6515				mc.moved_swap++;
6516			}
6517			break;
6518		default:
6519			break;
6520		}
6521	}
6522	pte_unmap_unlock(pte - 1, ptl);
6523	cond_resched();
6524
6525	if (addr != end) {
6526		/*
6527		 * We have consumed all precharges we got in can_attach().
6528		 * We try charge one by one, but don't do any additional
6529		 * charges to mc.to if we have failed in charge once in attach()
6530		 * phase.
6531		 */
6532		ret = mem_cgroup_do_precharge(1);
6533		if (!ret)
6534			goto retry;
6535	}
6536
6537	return ret;
6538}
6539
6540static const struct mm_walk_ops charge_walk_ops = {
6541	.pmd_entry	= mem_cgroup_move_charge_pte_range,
6542	.walk_lock	= PGWALK_RDLOCK,
6543};
6544
6545static void mem_cgroup_move_charge(void)
6546{
6547	lru_add_drain_all();
6548	/*
6549	 * Signal folio_memcg_lock() to take the memcg's move_lock
6550	 * while we're moving its pages to another memcg. Then wait
6551	 * for already started RCU-only updates to finish.
6552	 */
6553	atomic_inc(&mc.from->moving_account);
6554	synchronize_rcu();
6555retry:
6556	if (unlikely(!mmap_read_trylock(mc.mm))) {
6557		/*
6558		 * Someone who are holding the mmap_lock might be waiting in
6559		 * waitq. So we cancel all extra charges, wake up all waiters,
6560		 * and retry. Because we cancel precharges, we might not be able
6561		 * to move enough charges, but moving charge is a best-effort
6562		 * feature anyway, so it wouldn't be a big problem.
6563		 */
6564		__mem_cgroup_clear_mc();
6565		cond_resched();
6566		goto retry;
6567	}
6568	/*
6569	 * When we have consumed all precharges and failed in doing
6570	 * additional charge, the page walk just aborts.
6571	 */
6572	walk_page_range(mc.mm, 0, ULONG_MAX, &charge_walk_ops, NULL);
 
 
6573	mmap_read_unlock(mc.mm);
6574	atomic_dec(&mc.from->moving_account);
6575}
6576
6577static void mem_cgroup_move_task(void)
6578{
6579	if (mc.to) {
6580		mem_cgroup_move_charge();
6581		mem_cgroup_clear_mc();
6582	}
6583}
6584
6585#else	/* !CONFIG_MMU */
6586static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
6587{
6588	return 0;
6589}
6590static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
6591{
6592}
6593static void mem_cgroup_move_task(void)
6594{
6595}
6596#endif
6597
6598#ifdef CONFIG_MEMCG_KMEM
6599static void mem_cgroup_fork(struct task_struct *task)
6600{
6601	/*
6602	 * Set the update flag to cause task->objcg to be initialized lazily
6603	 * on the first allocation. It can be done without any synchronization
6604	 * because it's always performed on the current task, so does
6605	 * current_objcg_update().
6606	 */
6607	task->objcg = (struct obj_cgroup *)CURRENT_OBJCG_UPDATE_FLAG;
6608}
6609
6610static void mem_cgroup_exit(struct task_struct *task)
6611{
6612	struct obj_cgroup *objcg = task->objcg;
6613
6614	objcg = (struct obj_cgroup *)
6615		((unsigned long)objcg & ~CURRENT_OBJCG_UPDATE_FLAG);
6616	if (objcg)
6617		obj_cgroup_put(objcg);
6618
6619	/*
6620	 * Some kernel allocations can happen after this point,
6621	 * but let's ignore them. It can be done without any synchronization
6622	 * because it's always performed on the current task, so does
6623	 * current_objcg_update().
6624	 */
6625	task->objcg = NULL;
6626}
6627#endif
6628
6629#ifdef CONFIG_LRU_GEN
6630static void mem_cgroup_lru_gen_attach(struct cgroup_taskset *tset)
6631{
6632	struct task_struct *task;
6633	struct cgroup_subsys_state *css;
6634
6635	/* find the first leader if there is any */
6636	cgroup_taskset_for_each_leader(task, css, tset)
6637		break;
6638
6639	if (!task)
6640		return;
6641
6642	task_lock(task);
6643	if (task->mm && READ_ONCE(task->mm->owner) == task)
6644		lru_gen_migrate_mm(task->mm);
6645	task_unlock(task);
6646}
6647#else
6648static void mem_cgroup_lru_gen_attach(struct cgroup_taskset *tset) {}
6649#endif /* CONFIG_LRU_GEN */
6650
6651#ifdef CONFIG_MEMCG_KMEM
6652static void mem_cgroup_kmem_attach(struct cgroup_taskset *tset)
6653{
6654	struct task_struct *task;
6655	struct cgroup_subsys_state *css;
6656
6657	cgroup_taskset_for_each(task, css, tset) {
6658		/* atomically set the update bit */
6659		set_bit(CURRENT_OBJCG_UPDATE_BIT, (unsigned long *)&task->objcg);
6660	}
6661}
6662#else
6663static void mem_cgroup_kmem_attach(struct cgroup_taskset *tset) {}
6664#endif /* CONFIG_MEMCG_KMEM */
6665
6666#if defined(CONFIG_LRU_GEN) || defined(CONFIG_MEMCG_KMEM)
6667static void mem_cgroup_attach(struct cgroup_taskset *tset)
6668{
6669	mem_cgroup_lru_gen_attach(tset);
6670	mem_cgroup_kmem_attach(tset);
6671}
6672#endif
6673
6674static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value)
6675{
6676	if (value == PAGE_COUNTER_MAX)
6677		seq_puts(m, "max\n");
6678	else
6679		seq_printf(m, "%llu\n", (u64)value * PAGE_SIZE);
6680
6681	return 0;
6682}
6683
6684static u64 memory_current_read(struct cgroup_subsys_state *css,
6685			       struct cftype *cft)
6686{
6687	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6688
6689	return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE;
6690}
6691
6692static u64 memory_peak_read(struct cgroup_subsys_state *css,
6693			    struct cftype *cft)
6694{
6695	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6696
6697	return (u64)memcg->memory.watermark * PAGE_SIZE;
6698}
6699
6700static int memory_min_show(struct seq_file *m, void *v)
6701{
6702	return seq_puts_memcg_tunable(m,
6703		READ_ONCE(mem_cgroup_from_seq(m)->memory.min));
6704}
6705
6706static ssize_t memory_min_write(struct kernfs_open_file *of,
6707				char *buf, size_t nbytes, loff_t off)
6708{
6709	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6710	unsigned long min;
6711	int err;
6712
6713	buf = strstrip(buf);
6714	err = page_counter_memparse(buf, "max", &min);
6715	if (err)
6716		return err;
6717
6718	page_counter_set_min(&memcg->memory, min);
6719
6720	return nbytes;
6721}
6722
6723static int memory_low_show(struct seq_file *m, void *v)
6724{
6725	return seq_puts_memcg_tunable(m,
6726		READ_ONCE(mem_cgroup_from_seq(m)->memory.low));
6727}
6728
6729static ssize_t memory_low_write(struct kernfs_open_file *of,
6730				char *buf, size_t nbytes, loff_t off)
6731{
6732	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6733	unsigned long low;
6734	int err;
6735
6736	buf = strstrip(buf);
6737	err = page_counter_memparse(buf, "max", &low);
6738	if (err)
6739		return err;
6740
6741	page_counter_set_low(&memcg->memory, low);
6742
6743	return nbytes;
6744}
6745
6746static int memory_high_show(struct seq_file *m, void *v)
6747{
6748	return seq_puts_memcg_tunable(m,
6749		READ_ONCE(mem_cgroup_from_seq(m)->memory.high));
6750}
6751
6752static ssize_t memory_high_write(struct kernfs_open_file *of,
6753				 char *buf, size_t nbytes, loff_t off)
6754{
6755	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6756	unsigned int nr_retries = MAX_RECLAIM_RETRIES;
6757	bool drained = false;
6758	unsigned long high;
6759	int err;
6760
6761	buf = strstrip(buf);
6762	err = page_counter_memparse(buf, "max", &high);
6763	if (err)
6764		return err;
6765
6766	page_counter_set_high(&memcg->memory, high);
6767
6768	for (;;) {
6769		unsigned long nr_pages = page_counter_read(&memcg->memory);
6770		unsigned long reclaimed;
6771
6772		if (nr_pages <= high)
6773			break;
6774
6775		if (signal_pending(current))
6776			break;
6777
6778		if (!drained) {
6779			drain_all_stock(memcg);
6780			drained = true;
6781			continue;
6782		}
6783
6784		reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
6785					GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP);
6786
6787		if (!reclaimed && !nr_retries--)
6788			break;
6789	}
6790
6791	memcg_wb_domain_size_changed(memcg);
6792	return nbytes;
6793}
6794
6795static int memory_max_show(struct seq_file *m, void *v)
6796{
6797	return seq_puts_memcg_tunable(m,
6798		READ_ONCE(mem_cgroup_from_seq(m)->memory.max));
6799}
6800
6801static ssize_t memory_max_write(struct kernfs_open_file *of,
6802				char *buf, size_t nbytes, loff_t off)
6803{
6804	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6805	unsigned int nr_reclaims = MAX_RECLAIM_RETRIES;
6806	bool drained = false;
6807	unsigned long max;
6808	int err;
6809
6810	buf = strstrip(buf);
6811	err = page_counter_memparse(buf, "max", &max);
6812	if (err)
6813		return err;
6814
6815	xchg(&memcg->memory.max, max);
6816
6817	for (;;) {
6818		unsigned long nr_pages = page_counter_read(&memcg->memory);
6819
6820		if (nr_pages <= max)
6821			break;
6822
6823		if (signal_pending(current))
6824			break;
6825
6826		if (!drained) {
6827			drain_all_stock(memcg);
6828			drained = true;
6829			continue;
6830		}
6831
6832		if (nr_reclaims) {
6833			if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max,
6834					GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP))
6835				nr_reclaims--;
6836			continue;
6837		}
6838
6839		memcg_memory_event(memcg, MEMCG_OOM);
6840		if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
6841			break;
6842	}
6843
6844	memcg_wb_domain_size_changed(memcg);
6845	return nbytes;
6846}
6847
6848/*
6849 * Note: don't forget to update the 'samples/cgroup/memcg_event_listener'
6850 * if any new events become available.
6851 */
6852static void __memory_events_show(struct seq_file *m, atomic_long_t *events)
6853{
6854	seq_printf(m, "low %lu\n", atomic_long_read(&events[MEMCG_LOW]));
6855	seq_printf(m, "high %lu\n", atomic_long_read(&events[MEMCG_HIGH]));
6856	seq_printf(m, "max %lu\n", atomic_long_read(&events[MEMCG_MAX]));
6857	seq_printf(m, "oom %lu\n", atomic_long_read(&events[MEMCG_OOM]));
6858	seq_printf(m, "oom_kill %lu\n",
6859		   atomic_long_read(&events[MEMCG_OOM_KILL]));
6860	seq_printf(m, "oom_group_kill %lu\n",
6861		   atomic_long_read(&events[MEMCG_OOM_GROUP_KILL]));
6862}
6863
6864static int memory_events_show(struct seq_file *m, void *v)
6865{
6866	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6867
6868	__memory_events_show(m, memcg->memory_events);
6869	return 0;
6870}
6871
6872static int memory_events_local_show(struct seq_file *m, void *v)
6873{
6874	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6875
6876	__memory_events_show(m, memcg->memory_events_local);
6877	return 0;
6878}
6879
6880static int memory_stat_show(struct seq_file *m, void *v)
6881{
6882	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6883	char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
6884	struct seq_buf s;
6885
 
6886	if (!buf)
6887		return -ENOMEM;
6888	seq_buf_init(&s, buf, PAGE_SIZE);
6889	memory_stat_format(memcg, &s);
6890	seq_puts(m, buf);
6891	kfree(buf);
6892	return 0;
6893}
6894
6895#ifdef CONFIG_NUMA
6896static inline unsigned long lruvec_page_state_output(struct lruvec *lruvec,
6897						     int item)
6898{
6899	return lruvec_page_state(lruvec, item) *
6900		memcg_page_state_output_unit(item);
6901}
6902
6903static int memory_numa_stat_show(struct seq_file *m, void *v)
6904{
6905	int i;
6906	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6907
6908	mem_cgroup_flush_stats(memcg);
6909
6910	for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
6911		int nid;
6912
6913		if (memory_stats[i].idx >= NR_VM_NODE_STAT_ITEMS)
6914			continue;
6915
6916		seq_printf(m, "%s", memory_stats[i].name);
6917		for_each_node_state(nid, N_MEMORY) {
6918			u64 size;
6919			struct lruvec *lruvec;
6920
6921			lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
6922			size = lruvec_page_state_output(lruvec,
6923							memory_stats[i].idx);
6924			seq_printf(m, " N%d=%llu", nid, size);
6925		}
6926		seq_putc(m, '\n');
6927	}
6928
6929	return 0;
6930}
6931#endif
6932
6933static int memory_oom_group_show(struct seq_file *m, void *v)
6934{
6935	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6936
6937	seq_printf(m, "%d\n", READ_ONCE(memcg->oom_group));
6938
6939	return 0;
6940}
6941
6942static ssize_t memory_oom_group_write(struct kernfs_open_file *of,
6943				      char *buf, size_t nbytes, loff_t off)
6944{
6945	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6946	int ret, oom_group;
6947
6948	buf = strstrip(buf);
6949	if (!buf)
6950		return -EINVAL;
6951
6952	ret = kstrtoint(buf, 0, &oom_group);
6953	if (ret)
6954		return ret;
6955
6956	if (oom_group != 0 && oom_group != 1)
6957		return -EINVAL;
6958
6959	WRITE_ONCE(memcg->oom_group, oom_group);
6960
6961	return nbytes;
6962}
6963
6964static ssize_t memory_reclaim(struct kernfs_open_file *of, char *buf,
6965			      size_t nbytes, loff_t off)
6966{
6967	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6968	unsigned int nr_retries = MAX_RECLAIM_RETRIES;
6969	unsigned long nr_to_reclaim, nr_reclaimed = 0;
6970	unsigned int reclaim_options;
6971	int err;
6972
6973	buf = strstrip(buf);
6974	err = page_counter_memparse(buf, "", &nr_to_reclaim);
6975	if (err)
6976		return err;
6977
6978	reclaim_options	= MEMCG_RECLAIM_MAY_SWAP | MEMCG_RECLAIM_PROACTIVE;
6979	while (nr_reclaimed < nr_to_reclaim) {
6980		unsigned long reclaimed;
6981
6982		if (signal_pending(current))
6983			return -EINTR;
6984
6985		/*
6986		 * This is the final attempt, drain percpu lru caches in the
6987		 * hope of introducing more evictable pages for
6988		 * try_to_free_mem_cgroup_pages().
6989		 */
6990		if (!nr_retries)
6991			lru_add_drain_all();
6992
6993		reclaimed = try_to_free_mem_cgroup_pages(memcg,
6994					min(nr_to_reclaim - nr_reclaimed, SWAP_CLUSTER_MAX),
6995					GFP_KERNEL, reclaim_options);
6996
6997		if (!reclaimed && !nr_retries--)
6998			return -EAGAIN;
6999
7000		nr_reclaimed += reclaimed;
7001	}
7002
7003	return nbytes;
7004}
7005
7006static struct cftype memory_files[] = {
7007	{
7008		.name = "current",
7009		.flags = CFTYPE_NOT_ON_ROOT,
7010		.read_u64 = memory_current_read,
7011	},
7012	{
7013		.name = "peak",
7014		.flags = CFTYPE_NOT_ON_ROOT,
7015		.read_u64 = memory_peak_read,
7016	},
7017	{
7018		.name = "min",
7019		.flags = CFTYPE_NOT_ON_ROOT,
7020		.seq_show = memory_min_show,
7021		.write = memory_min_write,
7022	},
7023	{
7024		.name = "low",
7025		.flags = CFTYPE_NOT_ON_ROOT,
7026		.seq_show = memory_low_show,
7027		.write = memory_low_write,
7028	},
7029	{
7030		.name = "high",
7031		.flags = CFTYPE_NOT_ON_ROOT,
7032		.seq_show = memory_high_show,
7033		.write = memory_high_write,
7034	},
7035	{
7036		.name = "max",
7037		.flags = CFTYPE_NOT_ON_ROOT,
7038		.seq_show = memory_max_show,
7039		.write = memory_max_write,
7040	},
7041	{
7042		.name = "events",
7043		.flags = CFTYPE_NOT_ON_ROOT,
7044		.file_offset = offsetof(struct mem_cgroup, events_file),
7045		.seq_show = memory_events_show,
7046	},
7047	{
7048		.name = "events.local",
7049		.flags = CFTYPE_NOT_ON_ROOT,
7050		.file_offset = offsetof(struct mem_cgroup, events_local_file),
7051		.seq_show = memory_events_local_show,
7052	},
7053	{
7054		.name = "stat",
7055		.seq_show = memory_stat_show,
7056	},
7057#ifdef CONFIG_NUMA
7058	{
7059		.name = "numa_stat",
7060		.seq_show = memory_numa_stat_show,
7061	},
7062#endif
7063	{
7064		.name = "oom.group",
7065		.flags = CFTYPE_NOT_ON_ROOT | CFTYPE_NS_DELEGATABLE,
7066		.seq_show = memory_oom_group_show,
7067		.write = memory_oom_group_write,
7068	},
7069	{
7070		.name = "reclaim",
7071		.flags = CFTYPE_NS_DELEGATABLE,
7072		.write = memory_reclaim,
7073	},
7074	{ }	/* terminate */
7075};
7076
7077struct cgroup_subsys memory_cgrp_subsys = {
7078	.css_alloc = mem_cgroup_css_alloc,
7079	.css_online = mem_cgroup_css_online,
7080	.css_offline = mem_cgroup_css_offline,
7081	.css_released = mem_cgroup_css_released,
7082	.css_free = mem_cgroup_css_free,
7083	.css_reset = mem_cgroup_css_reset,
7084	.css_rstat_flush = mem_cgroup_css_rstat_flush,
7085	.can_attach = mem_cgroup_can_attach,
7086#if defined(CONFIG_LRU_GEN) || defined(CONFIG_MEMCG_KMEM)
7087	.attach = mem_cgroup_attach,
7088#endif
7089	.cancel_attach = mem_cgroup_cancel_attach,
7090	.post_attach = mem_cgroup_move_task,
7091#ifdef CONFIG_MEMCG_KMEM
7092	.fork = mem_cgroup_fork,
7093	.exit = mem_cgroup_exit,
7094#endif
7095	.dfl_cftypes = memory_files,
7096	.legacy_cftypes = mem_cgroup_legacy_files,
7097	.early_init = 0,
7098};
7099
7100/*
7101 * This function calculates an individual cgroup's effective
7102 * protection which is derived from its own memory.min/low, its
7103 * parent's and siblings' settings, as well as the actual memory
7104 * distribution in the tree.
7105 *
7106 * The following rules apply to the effective protection values:
7107 *
7108 * 1. At the first level of reclaim, effective protection is equal to
7109 *    the declared protection in memory.min and memory.low.
7110 *
7111 * 2. To enable safe delegation of the protection configuration, at
7112 *    subsequent levels the effective protection is capped to the
7113 *    parent's effective protection.
7114 *
7115 * 3. To make complex and dynamic subtrees easier to configure, the
7116 *    user is allowed to overcommit the declared protection at a given
7117 *    level. If that is the case, the parent's effective protection is
7118 *    distributed to the children in proportion to how much protection
7119 *    they have declared and how much of it they are utilizing.
7120 *
7121 *    This makes distribution proportional, but also work-conserving:
7122 *    if one cgroup claims much more protection than it uses memory,
7123 *    the unused remainder is available to its siblings.
7124 *
7125 * 4. Conversely, when the declared protection is undercommitted at a
7126 *    given level, the distribution of the larger parental protection
7127 *    budget is NOT proportional. A cgroup's protection from a sibling
7128 *    is capped to its own memory.min/low setting.
7129 *
7130 * 5. However, to allow protecting recursive subtrees from each other
7131 *    without having to declare each individual cgroup's fixed share
7132 *    of the ancestor's claim to protection, any unutilized -
7133 *    "floating" - protection from up the tree is distributed in
7134 *    proportion to each cgroup's *usage*. This makes the protection
7135 *    neutral wrt sibling cgroups and lets them compete freely over
7136 *    the shared parental protection budget, but it protects the
7137 *    subtree as a whole from neighboring subtrees.
7138 *
7139 * Note that 4. and 5. are not in conflict: 4. is about protecting
7140 * against immediate siblings whereas 5. is about protecting against
7141 * neighboring subtrees.
7142 */
7143static unsigned long effective_protection(unsigned long usage,
7144					  unsigned long parent_usage,
7145					  unsigned long setting,
7146					  unsigned long parent_effective,
7147					  unsigned long siblings_protected)
7148{
7149	unsigned long protected;
7150	unsigned long ep;
7151
7152	protected = min(usage, setting);
7153	/*
7154	 * If all cgroups at this level combined claim and use more
7155	 * protection than what the parent affords them, distribute
7156	 * shares in proportion to utilization.
7157	 *
7158	 * We are using actual utilization rather than the statically
7159	 * claimed protection in order to be work-conserving: claimed
7160	 * but unused protection is available to siblings that would
7161	 * otherwise get a smaller chunk than what they claimed.
7162	 */
7163	if (siblings_protected > parent_effective)
7164		return protected * parent_effective / siblings_protected;
7165
7166	/*
7167	 * Ok, utilized protection of all children is within what the
7168	 * parent affords them, so we know whatever this child claims
7169	 * and utilizes is effectively protected.
7170	 *
7171	 * If there is unprotected usage beyond this value, reclaim
7172	 * will apply pressure in proportion to that amount.
7173	 *
7174	 * If there is unutilized protection, the cgroup will be fully
7175	 * shielded from reclaim, but we do return a smaller value for
7176	 * protection than what the group could enjoy in theory. This
7177	 * is okay. With the overcommit distribution above, effective
7178	 * protection is always dependent on how memory is actually
7179	 * consumed among the siblings anyway.
7180	 */
7181	ep = protected;
7182
7183	/*
7184	 * If the children aren't claiming (all of) the protection
7185	 * afforded to them by the parent, distribute the remainder in
7186	 * proportion to the (unprotected) memory of each cgroup. That
7187	 * way, cgroups that aren't explicitly prioritized wrt each
7188	 * other compete freely over the allowance, but they are
7189	 * collectively protected from neighboring trees.
7190	 *
7191	 * We're using unprotected memory for the weight so that if
7192	 * some cgroups DO claim explicit protection, we don't protect
7193	 * the same bytes twice.
7194	 *
7195	 * Check both usage and parent_usage against the respective
7196	 * protected values. One should imply the other, but they
7197	 * aren't read atomically - make sure the division is sane.
7198	 */
7199	if (!(cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT))
7200		return ep;
7201	if (parent_effective > siblings_protected &&
7202	    parent_usage > siblings_protected &&
7203	    usage > protected) {
7204		unsigned long unclaimed;
7205
7206		unclaimed = parent_effective - siblings_protected;
7207		unclaimed *= usage - protected;
7208		unclaimed /= parent_usage - siblings_protected;
7209
7210		ep += unclaimed;
7211	}
7212
7213	return ep;
7214}
7215
7216/**
7217 * mem_cgroup_calculate_protection - check if memory consumption is in the normal range
7218 * @root: the top ancestor of the sub-tree being checked
7219 * @memcg: the memory cgroup to check
7220 *
7221 * WARNING: This function is not stateless! It can only be used as part
7222 *          of a top-down tree iteration, not for isolated queries.
7223 */
7224void mem_cgroup_calculate_protection(struct mem_cgroup *root,
7225				     struct mem_cgroup *memcg)
7226{
7227	unsigned long usage, parent_usage;
7228	struct mem_cgroup *parent;
7229
7230	if (mem_cgroup_disabled())
7231		return;
7232
7233	if (!root)
7234		root = root_mem_cgroup;
7235
7236	/*
7237	 * Effective values of the reclaim targets are ignored so they
7238	 * can be stale. Have a look at mem_cgroup_protection for more
7239	 * details.
7240	 * TODO: calculation should be more robust so that we do not need
7241	 * that special casing.
7242	 */
7243	if (memcg == root)
7244		return;
7245
7246	usage = page_counter_read(&memcg->memory);
7247	if (!usage)
7248		return;
7249
7250	parent = parent_mem_cgroup(memcg);
 
 
 
7251
7252	if (parent == root) {
7253		memcg->memory.emin = READ_ONCE(memcg->memory.min);
7254		memcg->memory.elow = READ_ONCE(memcg->memory.low);
7255		return;
7256	}
7257
7258	parent_usage = page_counter_read(&parent->memory);
7259
7260	WRITE_ONCE(memcg->memory.emin, effective_protection(usage, parent_usage,
7261			READ_ONCE(memcg->memory.min),
7262			READ_ONCE(parent->memory.emin),
7263			atomic_long_read(&parent->memory.children_min_usage)));
7264
7265	WRITE_ONCE(memcg->memory.elow, effective_protection(usage, parent_usage,
7266			READ_ONCE(memcg->memory.low),
7267			READ_ONCE(parent->memory.elow),
7268			atomic_long_read(&parent->memory.children_low_usage)));
7269}
7270
7271static int charge_memcg(struct folio *folio, struct mem_cgroup *memcg,
7272			gfp_t gfp)
7273{
 
7274	int ret;
7275
7276	ret = try_charge(memcg, gfp, folio_nr_pages(folio));
7277	if (ret)
7278		goto out;
7279
7280	mem_cgroup_commit_charge(folio, memcg);
 
 
 
 
 
 
7281out:
7282	return ret;
7283}
7284
7285int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7286{
7287	struct mem_cgroup *memcg;
7288	int ret;
7289
 
 
 
7290	memcg = get_mem_cgroup_from_mm(mm);
7291	ret = charge_memcg(folio, memcg, gfp);
7292	css_put(&memcg->css);
7293
7294	return ret;
7295}
7296
7297/**
7298 * mem_cgroup_hugetlb_try_charge - try to charge the memcg for a hugetlb folio
7299 * @memcg: memcg to charge.
7300 * @gfp: reclaim mode.
7301 * @nr_pages: number of pages to charge.
7302 *
7303 * This function is called when allocating a huge page folio to determine if
7304 * the memcg has the capacity for it. It does not commit the charge yet,
7305 * as the hugetlb folio itself has not been obtained from the hugetlb pool.
7306 *
7307 * Once we have obtained the hugetlb folio, we can call
7308 * mem_cgroup_commit_charge() to commit the charge. If we fail to obtain the
7309 * folio, we should instead call mem_cgroup_cancel_charge() to undo the effect
7310 * of try_charge().
7311 *
7312 * Returns 0 on success. Otherwise, an error code is returned.
7313 */
7314int mem_cgroup_hugetlb_try_charge(struct mem_cgroup *memcg, gfp_t gfp,
7315			long nr_pages)
7316{
7317	/*
7318	 * If hugetlb memcg charging is not enabled, do not fail hugetlb allocation,
7319	 * but do not attempt to commit charge later (or cancel on error) either.
7320	 */
7321	if (mem_cgroup_disabled() || !memcg ||
7322		!cgroup_subsys_on_dfl(memory_cgrp_subsys) ||
7323		!(cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING))
7324		return -EOPNOTSUPP;
7325
7326	if (try_charge(memcg, gfp, nr_pages))
7327		return -ENOMEM;
7328
7329	return 0;
7330}
7331
7332/**
7333 * mem_cgroup_swapin_charge_folio - Charge a newly allocated folio for swapin.
7334 * @folio: folio to charge.
7335 * @mm: mm context of the victim
7336 * @gfp: reclaim mode
7337 * @entry: swap entry for which the folio is allocated
7338 *
7339 * This function charges a folio allocated for swapin. Please call this before
7340 * adding the folio to the swapcache.
7341 *
7342 * Returns 0 on success. Otherwise, an error code is returned.
7343 */
7344int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm,
7345				  gfp_t gfp, swp_entry_t entry)
7346{
7347	struct mem_cgroup *memcg;
7348	unsigned short id;
7349	int ret;
7350
7351	if (mem_cgroup_disabled())
7352		return 0;
7353
7354	id = lookup_swap_cgroup_id(entry);
7355	rcu_read_lock();
7356	memcg = mem_cgroup_from_id(id);
7357	if (!memcg || !css_tryget_online(&memcg->css))
7358		memcg = get_mem_cgroup_from_mm(mm);
7359	rcu_read_unlock();
7360
7361	ret = charge_memcg(folio, memcg, gfp);
7362
7363	css_put(&memcg->css);
7364	return ret;
7365}
7366
7367/*
7368 * mem_cgroup_swapin_uncharge_swap - uncharge swap slot
7369 * @entry: swap entry for which the page is charged
7370 *
7371 * Call this function after successfully adding the charged page to swapcache.
7372 *
7373 * Note: This function assumes the page for which swap slot is being uncharged
7374 * is order 0 page.
7375 */
7376void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry)
7377{
7378	/*
7379	 * Cgroup1's unified memory+swap counter has been charged with the
7380	 * new swapcache page, finish the transfer by uncharging the swap
7381	 * slot. The swap slot would also get uncharged when it dies, but
7382	 * it can stick around indefinitely and we'd count the page twice
7383	 * the entire time.
7384	 *
7385	 * Cgroup2 has separate resource counters for memory and swap,
7386	 * so this is a non-issue here. Memory and swap charge lifetimes
7387	 * correspond 1:1 to page and swap slot lifetimes: we charge the
7388	 * page to memory here, and uncharge swap when the slot is freed.
7389	 */
7390	if (!mem_cgroup_disabled() && do_memsw_account()) {
7391		/*
7392		 * The swap entry might not get freed for a long time,
7393		 * let's not wait for it.  The page already received a
7394		 * memory+swap charge, drop the swap entry duplicate.
7395		 */
7396		mem_cgroup_uncharge_swap(entry, 1);
7397	}
7398}
7399
7400struct uncharge_gather {
7401	struct mem_cgroup *memcg;
7402	unsigned long nr_memory;
7403	unsigned long pgpgout;
7404	unsigned long nr_kmem;
7405	int nid;
7406};
7407
7408static inline void uncharge_gather_clear(struct uncharge_gather *ug)
7409{
7410	memset(ug, 0, sizeof(*ug));
7411}
7412
7413static void uncharge_batch(const struct uncharge_gather *ug)
7414{
7415	unsigned long flags;
7416
7417	if (ug->nr_memory) {
7418		page_counter_uncharge(&ug->memcg->memory, ug->nr_memory);
7419		if (do_memsw_account())
7420			page_counter_uncharge(&ug->memcg->memsw, ug->nr_memory);
7421		if (ug->nr_kmem)
7422			memcg_account_kmem(ug->memcg, -ug->nr_kmem);
7423		memcg_oom_recover(ug->memcg);
7424	}
7425
7426	local_irq_save(flags);
7427	__count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout);
7428	__this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_memory);
7429	memcg_check_events(ug->memcg, ug->nid);
7430	local_irq_restore(flags);
7431
7432	/* drop reference from uncharge_folio */
7433	css_put(&ug->memcg->css);
7434}
7435
7436static void uncharge_folio(struct folio *folio, struct uncharge_gather *ug)
7437{
7438	long nr_pages;
7439	struct mem_cgroup *memcg;
7440	struct obj_cgroup *objcg;
 
7441
7442	VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
7443
7444	/*
7445	 * Nobody should be changing or seriously looking at
7446	 * folio memcg or objcg at this point, we have fully
7447	 * exclusive access to the folio.
7448	 */
7449	if (folio_memcg_kmem(folio)) {
7450		objcg = __folio_objcg(folio);
7451		/*
7452		 * This get matches the put at the end of the function and
7453		 * kmem pages do not hold memcg references anymore.
7454		 */
7455		memcg = get_mem_cgroup_from_objcg(objcg);
7456	} else {
7457		memcg = __folio_memcg(folio);
7458	}
7459
7460	if (!memcg)
7461		return;
7462
7463	if (ug->memcg != memcg) {
7464		if (ug->memcg) {
7465			uncharge_batch(ug);
7466			uncharge_gather_clear(ug);
7467		}
7468		ug->memcg = memcg;
7469		ug->nid = folio_nid(folio);
7470
7471		/* pairs with css_put in uncharge_batch */
7472		css_get(&memcg->css);
7473	}
7474
7475	nr_pages = folio_nr_pages(folio);
7476
7477	if (folio_memcg_kmem(folio)) {
7478		ug->nr_memory += nr_pages;
7479		ug->nr_kmem += nr_pages;
7480
7481		folio->memcg_data = 0;
7482		obj_cgroup_put(objcg);
7483	} else {
7484		/* LRU pages aren't accounted at the root level */
7485		if (!mem_cgroup_is_root(memcg))
7486			ug->nr_memory += nr_pages;
7487		ug->pgpgout++;
7488
7489		folio->memcg_data = 0;
7490	}
7491
7492	css_put(&memcg->css);
7493}
7494
7495void __mem_cgroup_uncharge(struct folio *folio)
 
 
 
 
 
 
7496{
7497	struct uncharge_gather ug;
7498
7499	/* Don't touch folio->lru of any random page, pre-check: */
7500	if (!folio_memcg(folio))
 
 
 
7501		return;
7502
7503	uncharge_gather_clear(&ug);
7504	uncharge_folio(folio, &ug);
7505	uncharge_batch(&ug);
7506}
7507
7508/**
7509 * __mem_cgroup_uncharge_list - uncharge a list of page
7510 * @page_list: list of pages to uncharge
7511 *
7512 * Uncharge a list of pages previously charged with
7513 * __mem_cgroup_charge().
7514 */
7515void __mem_cgroup_uncharge_list(struct list_head *page_list)
7516{
7517	struct uncharge_gather ug;
7518	struct folio *folio;
 
 
 
7519
7520	uncharge_gather_clear(&ug);
7521	list_for_each_entry(folio, page_list, lru)
7522		uncharge_folio(folio, &ug);
7523	if (ug.memcg)
7524		uncharge_batch(&ug);
7525}
7526
7527/**
7528 * mem_cgroup_replace_folio - Charge a folio's replacement.
7529 * @old: Currently circulating folio.
7530 * @new: Replacement folio.
7531 *
7532 * Charge @new as a replacement folio for @old. @old will
7533 * be uncharged upon free. This is only used by the page cache
7534 * (in replace_page_cache_folio()).
7535 *
7536 * Both folios must be locked, @new->mapping must be set up.
7537 */
7538void mem_cgroup_replace_folio(struct folio *old, struct folio *new)
7539{
7540	struct mem_cgroup *memcg;
7541	long nr_pages = folio_nr_pages(new);
7542	unsigned long flags;
7543
7544	VM_BUG_ON_FOLIO(!folio_test_locked(old), old);
7545	VM_BUG_ON_FOLIO(!folio_test_locked(new), new);
7546	VM_BUG_ON_FOLIO(folio_test_anon(old) != folio_test_anon(new), new);
7547	VM_BUG_ON_FOLIO(folio_nr_pages(old) != nr_pages, new);
 
7548
7549	if (mem_cgroup_disabled())
7550		return;
7551
7552	/* Page cache replacement: new folio already charged? */
7553	if (folio_memcg(new))
7554		return;
7555
7556	memcg = folio_memcg(old);
7557	VM_WARN_ON_ONCE_FOLIO(!memcg, old);
7558	if (!memcg)
7559		return;
7560
7561	/* Force-charge the new page. The old one will be freed soon */
 
 
7562	if (!mem_cgroup_is_root(memcg)) {
7563		page_counter_charge(&memcg->memory, nr_pages);
7564		if (do_memsw_account())
7565			page_counter_charge(&memcg->memsw, nr_pages);
7566	}
7567
7568	css_get(&memcg->css);
7569	commit_charge(new, memcg);
7570
7571	local_irq_save(flags);
7572	mem_cgroup_charge_statistics(memcg, nr_pages);
7573	memcg_check_events(memcg, folio_nid(new));
7574	local_irq_restore(flags);
7575}
7576
7577/**
7578 * mem_cgroup_migrate - Transfer the memcg data from the old to the new folio.
7579 * @old: Currently circulating folio.
7580 * @new: Replacement folio.
7581 *
7582 * Transfer the memcg data from the old folio to the new folio for migration.
7583 * The old folio's data info will be cleared. Note that the memory counters
7584 * will remain unchanged throughout the process.
7585 *
7586 * Both folios must be locked, @new->mapping must be set up.
7587 */
7588void mem_cgroup_migrate(struct folio *old, struct folio *new)
7589{
7590	struct mem_cgroup *memcg;
7591
7592	VM_BUG_ON_FOLIO(!folio_test_locked(old), old);
7593	VM_BUG_ON_FOLIO(!folio_test_locked(new), new);
7594	VM_BUG_ON_FOLIO(folio_test_anon(old) != folio_test_anon(new), new);
7595	VM_BUG_ON_FOLIO(folio_nr_pages(old) != folio_nr_pages(new), new);
7596
7597	if (mem_cgroup_disabled())
7598		return;
7599
7600	memcg = folio_memcg(old);
7601	/*
7602	 * Note that it is normal to see !memcg for a hugetlb folio.
7603	 * For e.g, itt could have been allocated when memory_hugetlb_accounting
7604	 * was not selected.
7605	 */
7606	VM_WARN_ON_ONCE_FOLIO(!folio_test_hugetlb(old) && !memcg, old);
7607	if (!memcg)
7608		return;
7609
7610	/* Transfer the charge and the css ref */
7611	commit_charge(new, memcg);
7612	/*
7613	 * If the old folio is a large folio and is in the split queue, it needs
7614	 * to be removed from the split queue now, in case getting an incorrect
7615	 * split queue in destroy_large_folio() after the memcg of the old folio
7616	 * is cleared.
7617	 *
7618	 * In addition, the old folio is about to be freed after migration, so
7619	 * removing from the split queue a bit earlier seems reasonable.
7620	 */
7621	if (folio_test_large(old) && folio_test_large_rmappable(old))
7622		folio_undo_large_rmappable(old);
7623	old->memcg_data = 0;
7624}
7625
7626DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
7627EXPORT_SYMBOL(memcg_sockets_enabled_key);
7628
7629void mem_cgroup_sk_alloc(struct sock *sk)
7630{
7631	struct mem_cgroup *memcg;
7632
7633	if (!mem_cgroup_sockets_enabled)
7634		return;
7635
7636	/* Do not associate the sock with unrelated interrupted task's memcg. */
7637	if (!in_task())
7638		return;
7639
7640	rcu_read_lock();
7641	memcg = mem_cgroup_from_task(current);
7642	if (mem_cgroup_is_root(memcg))
7643		goto out;
7644	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active)
7645		goto out;
7646	if (css_tryget(&memcg->css))
7647		sk->sk_memcg = memcg;
7648out:
7649	rcu_read_unlock();
7650}
7651
7652void mem_cgroup_sk_free(struct sock *sk)
7653{
7654	if (sk->sk_memcg)
7655		css_put(&sk->sk_memcg->css);
7656}
7657
7658/**
7659 * mem_cgroup_charge_skmem - charge socket memory
7660 * @memcg: memcg to charge
7661 * @nr_pages: number of pages to charge
7662 * @gfp_mask: reclaim mode
7663 *
7664 * Charges @nr_pages to @memcg. Returns %true if the charge fit within
7665 * @memcg's configured limit, %false if it doesn't.
7666 */
7667bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages,
7668			     gfp_t gfp_mask)
7669{
 
 
7670	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
7671		struct page_counter *fail;
7672
7673		if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) {
7674			memcg->tcpmem_pressure = 0;
7675			return true;
7676		}
 
7677		memcg->tcpmem_pressure = 1;
7678		if (gfp_mask & __GFP_NOFAIL) {
7679			page_counter_charge(&memcg->tcpmem, nr_pages);
7680			return true;
7681		}
7682		return false;
7683	}
7684
7685	if (try_charge(memcg, gfp_mask, nr_pages) == 0) {
7686		mod_memcg_state(memcg, MEMCG_SOCK, nr_pages);
 
 
 
 
 
7687		return true;
7688	}
7689
 
7690	return false;
7691}
7692
7693/**
7694 * mem_cgroup_uncharge_skmem - uncharge socket memory
7695 * @memcg: memcg to uncharge
7696 * @nr_pages: number of pages to uncharge
7697 */
7698void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
7699{
7700	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
7701		page_counter_uncharge(&memcg->tcpmem, nr_pages);
7702		return;
7703	}
7704
7705	mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages);
7706
7707	refill_stock(memcg, nr_pages);
7708}
7709
7710static int __init cgroup_memory(char *s)
7711{
7712	char *token;
7713
7714	while ((token = strsep(&s, ",")) != NULL) {
7715		if (!*token)
7716			continue;
7717		if (!strcmp(token, "nosocket"))
7718			cgroup_memory_nosocket = true;
7719		if (!strcmp(token, "nokmem"))
7720			cgroup_memory_nokmem = true;
7721		if (!strcmp(token, "nobpf"))
7722			cgroup_memory_nobpf = true;
7723	}
7724	return 1;
7725}
7726__setup("cgroup.memory=", cgroup_memory);
7727
7728/*
7729 * subsys_initcall() for memory controller.
7730 *
7731 * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this
7732 * context because of lock dependencies (cgroup_lock -> cpu hotplug) but
7733 * basically everything that doesn't depend on a specific mem_cgroup structure
7734 * should be initialized from here.
7735 */
7736static int __init mem_cgroup_init(void)
7737{
7738	int cpu, node;
7739
7740	/*
7741	 * Currently s32 type (can refer to struct batched_lruvec_stat) is
7742	 * used for per-memcg-per-cpu caching of per-node statistics. In order
7743	 * to work fine, we should make sure that the overfill threshold can't
7744	 * exceed S32_MAX / PAGE_SIZE.
7745	 */
7746	BUILD_BUG_ON(MEMCG_CHARGE_BATCH > S32_MAX / PAGE_SIZE);
7747
7748	cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
7749				  memcg_hotplug_cpu_dead);
7750
7751	for_each_possible_cpu(cpu)
7752		INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
7753			  drain_local_stock);
7754
7755	for_each_node(node) {
7756		struct mem_cgroup_tree_per_node *rtpn;
7757
7758		rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, node);
 
7759
7760		rtpn->rb_root = RB_ROOT;
7761		rtpn->rb_rightmost = NULL;
7762		spin_lock_init(&rtpn->lock);
7763		soft_limit_tree.rb_tree_per_node[node] = rtpn;
7764	}
7765
7766	return 0;
7767}
7768subsys_initcall(mem_cgroup_init);
7769
7770#ifdef CONFIG_SWAP
7771static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg)
7772{
7773	while (!refcount_inc_not_zero(&memcg->id.ref)) {
7774		/*
7775		 * The root cgroup cannot be destroyed, so it's refcount must
7776		 * always be >= 1.
7777		 */
7778		if (WARN_ON_ONCE(mem_cgroup_is_root(memcg))) {
7779			VM_BUG_ON(1);
7780			break;
7781		}
7782		memcg = parent_mem_cgroup(memcg);
7783		if (!memcg)
7784			memcg = root_mem_cgroup;
7785	}
7786	return memcg;
7787}
7788
7789/**
7790 * mem_cgroup_swapout - transfer a memsw charge to swap
7791 * @folio: folio whose memsw charge to transfer
7792 * @entry: swap entry to move the charge to
7793 *
7794 * Transfer the memsw charge of @folio to @entry.
7795 */
7796void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry)
7797{
7798	struct mem_cgroup *memcg, *swap_memcg;
7799	unsigned int nr_entries;
7800	unsigned short oldid;
7801
7802	VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
7803	VM_BUG_ON_FOLIO(folio_ref_count(folio), folio);
7804
7805	if (mem_cgroup_disabled())
7806		return;
7807
7808	if (!do_memsw_account())
7809		return;
7810
7811	memcg = folio_memcg(folio);
7812
7813	VM_WARN_ON_ONCE_FOLIO(!memcg, folio);
7814	if (!memcg)
7815		return;
7816
7817	/*
7818	 * In case the memcg owning these pages has been offlined and doesn't
7819	 * have an ID allocated to it anymore, charge the closest online
7820	 * ancestor for the swap instead and transfer the memory+swap charge.
7821	 */
7822	swap_memcg = mem_cgroup_id_get_online(memcg);
7823	nr_entries = folio_nr_pages(folio);
7824	/* Get references for the tail pages, too */
7825	if (nr_entries > 1)
7826		mem_cgroup_id_get_many(swap_memcg, nr_entries - 1);
7827	oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg),
7828				   nr_entries);
7829	VM_BUG_ON_FOLIO(oldid, folio);
7830	mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries);
7831
7832	folio->memcg_data = 0;
7833
7834	if (!mem_cgroup_is_root(memcg))
7835		page_counter_uncharge(&memcg->memory, nr_entries);
7836
7837	if (memcg != swap_memcg) {
7838		if (!mem_cgroup_is_root(swap_memcg))
7839			page_counter_charge(&swap_memcg->memsw, nr_entries);
7840		page_counter_uncharge(&memcg->memsw, nr_entries);
7841	}
7842
7843	/*
7844	 * Interrupts should be disabled here because the caller holds the
7845	 * i_pages lock which is taken with interrupts-off. It is
7846	 * important here to have the interrupts disabled because it is the
7847	 * only synchronisation we have for updating the per-CPU variables.
7848	 */
7849	memcg_stats_lock();
7850	mem_cgroup_charge_statistics(memcg, -nr_entries);
7851	memcg_stats_unlock();
7852	memcg_check_events(memcg, folio_nid(folio));
7853
7854	css_put(&memcg->css);
7855}
7856
7857/**
7858 * __mem_cgroup_try_charge_swap - try charging swap space for a folio
7859 * @folio: folio being added to swap
7860 * @entry: swap entry to charge
7861 *
7862 * Try to charge @folio's memcg for the swap space at @entry.
7863 *
7864 * Returns 0 on success, -ENOMEM on failure.
7865 */
7866int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry)
7867{
7868	unsigned int nr_pages = folio_nr_pages(folio);
7869	struct page_counter *counter;
7870	struct mem_cgroup *memcg;
7871	unsigned short oldid;
7872
7873	if (do_memsw_account())
 
 
 
7874		return 0;
7875
7876	memcg = folio_memcg(folio);
7877
7878	VM_WARN_ON_ONCE_FOLIO(!memcg, folio);
7879	if (!memcg)
7880		return 0;
7881
7882	if (!entry.val) {
7883		memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
7884		return 0;
7885	}
7886
7887	memcg = mem_cgroup_id_get_online(memcg);
7888
7889	if (!mem_cgroup_is_root(memcg) &&
7890	    !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) {
7891		memcg_memory_event(memcg, MEMCG_SWAP_MAX);
7892		memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
7893		mem_cgroup_id_put(memcg);
7894		return -ENOMEM;
7895	}
7896
7897	/* Get references for the tail pages, too */
7898	if (nr_pages > 1)
7899		mem_cgroup_id_get_many(memcg, nr_pages - 1);
7900	oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg), nr_pages);
7901	VM_BUG_ON_FOLIO(oldid, folio);
7902	mod_memcg_state(memcg, MEMCG_SWAP, nr_pages);
7903
7904	return 0;
7905}
7906
7907/**
7908 * __mem_cgroup_uncharge_swap - uncharge swap space
7909 * @entry: swap entry to uncharge
7910 * @nr_pages: the amount of swap space to uncharge
7911 */
7912void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
7913{
7914	struct mem_cgroup *memcg;
7915	unsigned short id;
7916
7917	id = swap_cgroup_record(entry, 0, nr_pages);
7918	rcu_read_lock();
7919	memcg = mem_cgroup_from_id(id);
7920	if (memcg) {
7921		if (!mem_cgroup_is_root(memcg)) {
7922			if (do_memsw_account())
 
 
7923				page_counter_uncharge(&memcg->memsw, nr_pages);
7924			else
7925				page_counter_uncharge(&memcg->swap, nr_pages);
7926		}
7927		mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages);
7928		mem_cgroup_id_put_many(memcg, nr_pages);
7929	}
7930	rcu_read_unlock();
7931}
7932
7933long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
7934{
7935	long nr_swap_pages = get_nr_swap_pages();
7936
7937	if (mem_cgroup_disabled() || do_memsw_account())
7938		return nr_swap_pages;
7939	for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg))
7940		nr_swap_pages = min_t(long, nr_swap_pages,
7941				      READ_ONCE(memcg->swap.max) -
7942				      page_counter_read(&memcg->swap));
7943	return nr_swap_pages;
7944}
7945
7946bool mem_cgroup_swap_full(struct folio *folio)
7947{
7948	struct mem_cgroup *memcg;
7949
7950	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
7951
7952	if (vm_swap_full())
7953		return true;
7954	if (do_memsw_account())
7955		return false;
7956
7957	memcg = folio_memcg(folio);
7958	if (!memcg)
7959		return false;
7960
7961	for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) {
7962		unsigned long usage = page_counter_read(&memcg->swap);
7963
7964		if (usage * 2 >= READ_ONCE(memcg->swap.high) ||
7965		    usage * 2 >= READ_ONCE(memcg->swap.max))
7966			return true;
7967	}
7968
7969	return false;
7970}
7971
7972static int __init setup_swap_account(char *s)
7973{
7974	bool res;
7975
7976	if (!kstrtobool(s, &res) && !res)
7977		pr_warn_once("The swapaccount=0 commandline option is deprecated "
7978			     "in favor of configuring swap control via cgroupfs. "
7979			     "Please report your usecase to linux-mm@kvack.org if you "
7980			     "depend on this functionality.\n");
7981	return 1;
7982}
7983__setup("swapaccount=", setup_swap_account);
7984
7985static u64 swap_current_read(struct cgroup_subsys_state *css,
7986			     struct cftype *cft)
7987{
7988	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
7989
7990	return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE;
7991}
7992
7993static u64 swap_peak_read(struct cgroup_subsys_state *css,
7994			  struct cftype *cft)
7995{
7996	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
7997
7998	return (u64)memcg->swap.watermark * PAGE_SIZE;
7999}
8000
8001static int swap_high_show(struct seq_file *m, void *v)
8002{
8003	return seq_puts_memcg_tunable(m,
8004		READ_ONCE(mem_cgroup_from_seq(m)->swap.high));
8005}
8006
8007static ssize_t swap_high_write(struct kernfs_open_file *of,
8008			       char *buf, size_t nbytes, loff_t off)
8009{
8010	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
8011	unsigned long high;
8012	int err;
8013
8014	buf = strstrip(buf);
8015	err = page_counter_memparse(buf, "max", &high);
8016	if (err)
8017		return err;
8018
8019	page_counter_set_high(&memcg->swap, high);
8020
8021	return nbytes;
8022}
8023
8024static int swap_max_show(struct seq_file *m, void *v)
8025{
8026	return seq_puts_memcg_tunable(m,
8027		READ_ONCE(mem_cgroup_from_seq(m)->swap.max));
8028}
8029
8030static ssize_t swap_max_write(struct kernfs_open_file *of,
8031			      char *buf, size_t nbytes, loff_t off)
8032{
8033	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
8034	unsigned long max;
8035	int err;
8036
8037	buf = strstrip(buf);
8038	err = page_counter_memparse(buf, "max", &max);
8039	if (err)
8040		return err;
8041
8042	xchg(&memcg->swap.max, max);
8043
8044	return nbytes;
8045}
8046
8047static int swap_events_show(struct seq_file *m, void *v)
8048{
8049	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
8050
8051	seq_printf(m, "high %lu\n",
8052		   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_HIGH]));
8053	seq_printf(m, "max %lu\n",
8054		   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX]));
8055	seq_printf(m, "fail %lu\n",
8056		   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL]));
8057
8058	return 0;
8059}
8060
8061static struct cftype swap_files[] = {
8062	{
8063		.name = "swap.current",
8064		.flags = CFTYPE_NOT_ON_ROOT,
8065		.read_u64 = swap_current_read,
8066	},
8067	{
8068		.name = "swap.high",
8069		.flags = CFTYPE_NOT_ON_ROOT,
8070		.seq_show = swap_high_show,
8071		.write = swap_high_write,
8072	},
8073	{
8074		.name = "swap.max",
8075		.flags = CFTYPE_NOT_ON_ROOT,
8076		.seq_show = swap_max_show,
8077		.write = swap_max_write,
8078	},
8079	{
8080		.name = "swap.peak",
8081		.flags = CFTYPE_NOT_ON_ROOT,
8082		.read_u64 = swap_peak_read,
8083	},
8084	{
8085		.name = "swap.events",
8086		.flags = CFTYPE_NOT_ON_ROOT,
8087		.file_offset = offsetof(struct mem_cgroup, swap_events_file),
8088		.seq_show = swap_events_show,
8089	},
8090	{ }	/* terminate */
8091};
8092
8093static struct cftype memsw_files[] = {
8094	{
8095		.name = "memsw.usage_in_bytes",
8096		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
8097		.read_u64 = mem_cgroup_read_u64,
8098	},
8099	{
8100		.name = "memsw.max_usage_in_bytes",
8101		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
8102		.write = mem_cgroup_reset,
8103		.read_u64 = mem_cgroup_read_u64,
8104	},
8105	{
8106		.name = "memsw.limit_in_bytes",
8107		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
8108		.write = mem_cgroup_write,
8109		.read_u64 = mem_cgroup_read_u64,
8110	},
8111	{
8112		.name = "memsw.failcnt",
8113		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
8114		.write = mem_cgroup_reset,
8115		.read_u64 = mem_cgroup_read_u64,
8116	},
8117	{ },	/* terminate */
8118};
8119
8120#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
8121/**
8122 * obj_cgroup_may_zswap - check if this cgroup can zswap
8123 * @objcg: the object cgroup
8124 *
8125 * Check if the hierarchical zswap limit has been reached.
8126 *
8127 * This doesn't check for specific headroom, and it is not atomic
8128 * either. But with zswap, the size of the allocation is only known
8129 * once compression has occurred, and this optimistic pre-check avoids
8130 * spending cycles on compression when there is already no room left
8131 * or zswap is disabled altogether somewhere in the hierarchy.
8132 */
8133bool obj_cgroup_may_zswap(struct obj_cgroup *objcg)
8134{
8135	struct mem_cgroup *memcg, *original_memcg;
8136	bool ret = true;
8137
8138	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
8139		return true;
8140
8141	original_memcg = get_mem_cgroup_from_objcg(objcg);
8142	for (memcg = original_memcg; !mem_cgroup_is_root(memcg);
8143	     memcg = parent_mem_cgroup(memcg)) {
8144		unsigned long max = READ_ONCE(memcg->zswap_max);
8145		unsigned long pages;
8146
8147		if (max == PAGE_COUNTER_MAX)
8148			continue;
8149		if (max == 0) {
8150			ret = false;
8151			break;
8152		}
8153
8154		/*
8155		 * mem_cgroup_flush_stats() ignores small changes. Use
8156		 * do_flush_stats() directly to get accurate stats for charging.
8157		 */
8158		do_flush_stats(memcg);
8159		pages = memcg_page_state(memcg, MEMCG_ZSWAP_B) / PAGE_SIZE;
8160		if (pages < max)
8161			continue;
8162		ret = false;
8163		break;
8164	}
8165	mem_cgroup_put(original_memcg);
8166	return ret;
8167}
8168
8169/**
8170 * obj_cgroup_charge_zswap - charge compression backend memory
8171 * @objcg: the object cgroup
8172 * @size: size of compressed object
8173 *
8174 * This forces the charge after obj_cgroup_may_zswap() allowed
8175 * compression and storage in zwap for this cgroup to go ahead.
8176 */
8177void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size)
8178{
8179	struct mem_cgroup *memcg;
8180
8181	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
8182		return;
8183
8184	VM_WARN_ON_ONCE(!(current->flags & PF_MEMALLOC));
8185
8186	/* PF_MEMALLOC context, charging must succeed */
8187	if (obj_cgroup_charge(objcg, GFP_KERNEL, size))
8188		VM_WARN_ON_ONCE(1);
8189
8190	rcu_read_lock();
8191	memcg = obj_cgroup_memcg(objcg);
8192	mod_memcg_state(memcg, MEMCG_ZSWAP_B, size);
8193	mod_memcg_state(memcg, MEMCG_ZSWAPPED, 1);
8194	rcu_read_unlock();
8195}
8196
8197/**
8198 * obj_cgroup_uncharge_zswap - uncharge compression backend memory
8199 * @objcg: the object cgroup
8200 * @size: size of compressed object
8201 *
8202 * Uncharges zswap memory on page in.
8203 */
8204void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size)
8205{
8206	struct mem_cgroup *memcg;
8207
8208	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
8209		return;
8210
8211	obj_cgroup_uncharge(objcg, size);
8212
8213	rcu_read_lock();
8214	memcg = obj_cgroup_memcg(objcg);
8215	mod_memcg_state(memcg, MEMCG_ZSWAP_B, -size);
8216	mod_memcg_state(memcg, MEMCG_ZSWAPPED, -1);
8217	rcu_read_unlock();
8218}
8219
8220bool mem_cgroup_zswap_writeback_enabled(struct mem_cgroup *memcg)
8221{
8222	/* if zswap is disabled, do not block pages going to the swapping device */
8223	return !is_zswap_enabled() || !memcg || READ_ONCE(memcg->zswap_writeback);
8224}
8225
8226static u64 zswap_current_read(struct cgroup_subsys_state *css,
8227			      struct cftype *cft)
8228{
8229	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
8230
8231	mem_cgroup_flush_stats(memcg);
8232	return memcg_page_state(memcg, MEMCG_ZSWAP_B);
8233}
8234
8235static int zswap_max_show(struct seq_file *m, void *v)
8236{
8237	return seq_puts_memcg_tunable(m,
8238		READ_ONCE(mem_cgroup_from_seq(m)->zswap_max));
8239}
8240
8241static ssize_t zswap_max_write(struct kernfs_open_file *of,
8242			       char *buf, size_t nbytes, loff_t off)
8243{
8244	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
8245	unsigned long max;
8246	int err;
8247
8248	buf = strstrip(buf);
8249	err = page_counter_memparse(buf, "max", &max);
8250	if (err)
8251		return err;
8252
8253	xchg(&memcg->zswap_max, max);
8254
8255	return nbytes;
8256}
8257
8258static int zswap_writeback_show(struct seq_file *m, void *v)
8259{
8260	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
8261
8262	seq_printf(m, "%d\n", READ_ONCE(memcg->zswap_writeback));
8263	return 0;
8264}
8265
8266static ssize_t zswap_writeback_write(struct kernfs_open_file *of,
8267				char *buf, size_t nbytes, loff_t off)
8268{
8269	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
8270	int zswap_writeback;
8271	ssize_t parse_ret = kstrtoint(strstrip(buf), 0, &zswap_writeback);
8272
8273	if (parse_ret)
8274		return parse_ret;
8275
8276	if (zswap_writeback != 0 && zswap_writeback != 1)
8277		return -EINVAL;
8278
8279	WRITE_ONCE(memcg->zswap_writeback, zswap_writeback);
8280	return nbytes;
8281}
8282
8283static struct cftype zswap_files[] = {
8284	{
8285		.name = "zswap.current",
8286		.flags = CFTYPE_NOT_ON_ROOT,
8287		.read_u64 = zswap_current_read,
8288	},
8289	{
8290		.name = "zswap.max",
8291		.flags = CFTYPE_NOT_ON_ROOT,
8292		.seq_show = zswap_max_show,
8293		.write = zswap_max_write,
8294	},
8295	{
8296		.name = "zswap.writeback",
8297		.seq_show = zswap_writeback_show,
8298		.write = zswap_writeback_write,
8299	},
8300	{ }	/* terminate */
8301};
8302#endif /* CONFIG_MEMCG_KMEM && CONFIG_ZSWAP */
8303
8304static int __init mem_cgroup_swap_init(void)
8305{
 
8306	if (mem_cgroup_disabled())
 
 
 
8307		return 0;
8308
8309	WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, swap_files));
8310	WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, memsw_files));
8311#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
8312	WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, zswap_files));
8313#endif
8314	return 0;
8315}
8316subsys_initcall(mem_cgroup_swap_init);
8317
8318#endif /* CONFIG_SWAP */