Linux Audio

Check our new training course

Loading...
v4.17
 
   1/* memcontrol.c - Memory Controller
   2 *
   3 * Copyright IBM Corporation, 2007
   4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
   5 *
   6 * Copyright 2007 OpenVZ SWsoft Inc
   7 * Author: Pavel Emelianov <xemul@openvz.org>
   8 *
   9 * Memory thresholds
  10 * Copyright (C) 2009 Nokia Corporation
  11 * Author: Kirill A. Shutemov
  12 *
  13 * Kernel Memory Controller
  14 * Copyright (C) 2012 Parallels Inc. and Google Inc.
  15 * Authors: Glauber Costa and Suleiman Souhlal
  16 *
  17 * Native page reclaim
  18 * Charge lifetime sanitation
  19 * Lockless page tracking & accounting
  20 * Unified hierarchy configuration model
  21 * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner
  22 *
  23 * This program is free software; you can redistribute it and/or modify
  24 * it under the terms of the GNU General Public License as published by
  25 * the Free Software Foundation; either version 2 of the License, or
  26 * (at your option) any later version.
  27 *
  28 * This program is distributed in the hope that it will be useful,
  29 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  30 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  31 * GNU General Public License for more details.
  32 */
  33
  34#include <linux/page_counter.h>
  35#include <linux/memcontrol.h>
  36#include <linux/cgroup.h>
  37#include <linux/mm.h>
  38#include <linux/sched/mm.h>
  39#include <linux/shmem_fs.h>
  40#include <linux/hugetlb.h>
  41#include <linux/pagemap.h>
 
  42#include <linux/smp.h>
  43#include <linux/page-flags.h>
  44#include <linux/backing-dev.h>
  45#include <linux/bit_spinlock.h>
  46#include <linux/rcupdate.h>
  47#include <linux/limits.h>
  48#include <linux/export.h>
  49#include <linux/mutex.h>
  50#include <linux/rbtree.h>
  51#include <linux/slab.h>
  52#include <linux/swap.h>
  53#include <linux/swapops.h>
  54#include <linux/spinlock.h>
  55#include <linux/eventfd.h>
  56#include <linux/poll.h>
  57#include <linux/sort.h>
  58#include <linux/fs.h>
  59#include <linux/seq_file.h>
  60#include <linux/vmpressure.h>
 
  61#include <linux/mm_inline.h>
  62#include <linux/swap_cgroup.h>
  63#include <linux/cpu.h>
  64#include <linux/oom.h>
  65#include <linux/lockdep.h>
  66#include <linux/file.h>
  67#include <linux/tracehook.h>
 
 
 
 
  68#include "internal.h"
  69#include <net/sock.h>
  70#include <net/ip.h>
  71#include "slab.h"
 
  72
  73#include <linux/uaccess.h>
  74
  75#include <trace/events/vmscan.h>
  76
  77struct cgroup_subsys memory_cgrp_subsys __read_mostly;
  78EXPORT_SYMBOL(memory_cgrp_subsys);
  79
  80struct mem_cgroup *root_mem_cgroup __read_mostly;
  81
  82#define MEM_CGROUP_RECLAIM_RETRIES	5
 
 
  83
  84/* Socket memory accounting disabled? */
  85static bool cgroup_memory_nosocket;
  86
  87/* Kernel memory accounting disabled? */
  88static bool cgroup_memory_nokmem;
  89
  90/* Whether the swap controller is active */
  91#ifdef CONFIG_MEMCG_SWAP
  92int do_swap_account __read_mostly;
  93#else
  94#define do_swap_account		0
  95#endif
  96
  97/* Whether legacy memory+swap accounting is active */
  98static bool do_memsw_account(void)
  99{
 100	return !cgroup_subsys_on_dfl(memory_cgrp_subsys) && do_swap_account;
 101}
 102
 103static const char *const mem_cgroup_lru_names[] = {
 104	"inactive_anon",
 105	"active_anon",
 106	"inactive_file",
 107	"active_file",
 108	"unevictable",
 109};
 110
 111#define THRESHOLDS_EVENTS_TARGET 128
 112#define SOFTLIMIT_EVENTS_TARGET 1024
 113#define NUMAINFO_EVENTS_TARGET	1024
 114
 115/*
 116 * Cgroups above their limits are maintained in a RB-Tree, independent of
 117 * their hierarchy representation
 118 */
 119
 120struct mem_cgroup_tree_per_node {
 121	struct rb_root rb_root;
 122	struct rb_node *rb_rightmost;
 123	spinlock_t lock;
 124};
 125
 126struct mem_cgroup_tree {
 127	struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
 128};
 129
 130static struct mem_cgroup_tree soft_limit_tree __read_mostly;
 131
 132/* for OOM */
 133struct mem_cgroup_eventfd_list {
 134	struct list_head list;
 135	struct eventfd_ctx *eventfd;
 136};
 137
 138/*
 139 * cgroup_event represents events which userspace want to receive.
 140 */
 141struct mem_cgroup_event {
 142	/*
 143	 * memcg which the event belongs to.
 144	 */
 145	struct mem_cgroup *memcg;
 146	/*
 147	 * eventfd to signal userspace about the event.
 148	 */
 149	struct eventfd_ctx *eventfd;
 150	/*
 151	 * Each of these stored in a list by the cgroup.
 152	 */
 153	struct list_head list;
 154	/*
 155	 * register_event() callback will be used to add new userspace
 156	 * waiter for changes related to this event.  Use eventfd_signal()
 157	 * on eventfd to send notification to userspace.
 158	 */
 159	int (*register_event)(struct mem_cgroup *memcg,
 160			      struct eventfd_ctx *eventfd, const char *args);
 161	/*
 162	 * unregister_event() callback will be called when userspace closes
 163	 * the eventfd or on cgroup removing.  This callback must be set,
 164	 * if you want provide notification functionality.
 165	 */
 166	void (*unregister_event)(struct mem_cgroup *memcg,
 167				 struct eventfd_ctx *eventfd);
 168	/*
 169	 * All fields below needed to unregister event when
 170	 * userspace closes eventfd.
 171	 */
 172	poll_table pt;
 173	wait_queue_head_t *wqh;
 174	wait_queue_entry_t wait;
 175	struct work_struct remove;
 176};
 177
 178static void mem_cgroup_threshold(struct mem_cgroup *memcg);
 179static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
 180
 181/* Stuffs for move charges at task migration. */
 182/*
 183 * Types of charges to be moved.
 184 */
 185#define MOVE_ANON	0x1U
 186#define MOVE_FILE	0x2U
 187#define MOVE_MASK	(MOVE_ANON | MOVE_FILE)
 188
 189/* "mc" and its members are protected by cgroup_mutex */
 190static struct move_charge_struct {
 191	spinlock_t	  lock; /* for from, to */
 192	struct mm_struct  *mm;
 193	struct mem_cgroup *from;
 194	struct mem_cgroup *to;
 195	unsigned long flags;
 196	unsigned long precharge;
 197	unsigned long moved_charge;
 198	unsigned long moved_swap;
 199	struct task_struct *moving_task;	/* a task moving charges */
 200	wait_queue_head_t waitq;		/* a waitq for other context */
 201} mc = {
 202	.lock = __SPIN_LOCK_UNLOCKED(mc.lock),
 203	.waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
 204};
 205
 206/*
 207 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
 208 * limit reclaim to prevent infinite loops, if they ever occur.
 209 */
 210#define	MEM_CGROUP_MAX_RECLAIM_LOOPS		100
 211#define	MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS	2
 212
 213enum charge_type {
 214	MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
 215	MEM_CGROUP_CHARGE_TYPE_ANON,
 216	MEM_CGROUP_CHARGE_TYPE_SWAPOUT,	/* for accounting swapcache */
 217	MEM_CGROUP_CHARGE_TYPE_DROP,	/* a page was unused swap cache */
 218	NR_CHARGE_TYPE,
 219};
 220
 221/* for encoding cft->private value on file */
 222enum res_type {
 223	_MEM,
 224	_MEMSWAP,
 225	_OOM_TYPE,
 226	_KMEM,
 227	_TCP,
 228};
 229
 230#define MEMFILE_PRIVATE(x, val)	((x) << 16 | (val))
 231#define MEMFILE_TYPE(val)	((val) >> 16 & 0xffff)
 232#define MEMFILE_ATTR(val)	((val) & 0xffff)
 233/* Used for OOM nofiier */
 234#define OOM_CONTROL		(0)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 235
 236/* Some nice accessors for the vmpressure. */
 237struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
 238{
 239	if (!memcg)
 240		memcg = root_mem_cgroup;
 241	return &memcg->vmpressure;
 242}
 243
 244struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr)
 245{
 246	return &container_of(vmpr, struct mem_cgroup, vmpressure)->css;
 247}
 248
 249static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
 
 
 
 
 
 
 250{
 251	return (memcg == root_mem_cgroup);
 252}
 253
 254#ifndef CONFIG_SLOB
 255/*
 256 * This will be the memcg's index in each cache's ->memcg_params.memcg_caches.
 257 * The main reason for not using cgroup id for this:
 258 *  this works better in sparse environments, where we have a lot of memcgs,
 259 *  but only a few kmem-limited. Or also, if we have, for instance, 200
 260 *  memcgs, and none but the 200th is kmem-limited, we'd have to have a
 261 *  200 entry array for that.
 262 *
 263 * The current size of the caches array is stored in memcg_nr_cache_ids. It
 264 * will double each time we have to increase it.
 265 */
 266static DEFINE_IDA(memcg_cache_ida);
 267int memcg_nr_cache_ids;
 268
 269/* Protects memcg_nr_cache_ids */
 270static DECLARE_RWSEM(memcg_cache_ids_sem);
 271
 272void memcg_get_cache_ids(void)
 273{
 274	down_read(&memcg_cache_ids_sem);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 275}
 276
 277void memcg_put_cache_ids(void)
 278{
 279	up_read(&memcg_cache_ids_sem);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 280}
 281
 282/*
 283 * MIN_SIZE is different than 1, because we would like to avoid going through
 284 * the alloc/free process all the time. In a small machine, 4 kmem-limited
 285 * cgroups is a reasonable guess. In the future, it could be a parameter or
 286 * tunable, but that is strictly not necessary.
 287 *
 288 * MAX_SIZE should be as large as the number of cgrp_ids. Ideally, we could get
 289 * this constant directly from cgroup, but it is understandable that this is
 290 * better kept as an internal representation in cgroup.c. In any case, the
 291 * cgrp_id space is not getting any smaller, and we don't have to necessarily
 292 * increase ours as well if it increases.
 293 */
 294#define MEMCG_CACHES_MIN_SIZE 4
 295#define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX
 
 
 
 
 
 
 
 296
 297/*
 298 * A lot of the calls to the cache allocation functions are expected to be
 299 * inlined by the compiler. Since the calls to memcg_kmem_get_cache are
 300 * conditional to this static branch, we'll have to allow modules that does
 301 * kmem_cache_alloc and the such to see this symbol as well
 302 */
 303DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key);
 304EXPORT_SYMBOL(memcg_kmem_enabled_key);
 305
 306struct workqueue_struct *memcg_kmem_cache_wq;
 307
 308#endif /* !CONFIG_SLOB */
 
 
 309
 310/**
 311 * mem_cgroup_css_from_page - css of the memcg associated with a page
 312 * @page: page of interest
 313 *
 314 * If memcg is bound to the default hierarchy, css of the memcg associated
 315 * with @page is returned.  The returned css remains associated with @page
 316 * until it is released.
 317 *
 318 * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup
 319 * is returned.
 320 */
 321struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page)
 322{
 323	struct mem_cgroup *memcg;
 324
 325	memcg = page->mem_cgroup;
 326
 327	if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
 328		memcg = root_mem_cgroup;
 329
 330	return &memcg->css;
 331}
 332
 333/**
 334 * page_cgroup_ino - return inode number of the memcg a page is charged to
 335 * @page: the page
 336 *
 337 * Look up the closest online ancestor of the memory cgroup @page is charged to
 338 * and return its inode number or 0 if @page is not charged to any cgroup. It
 339 * is safe to call this function without holding a reference to @page.
 340 *
 341 * Note, this function is inherently racy, because there is nothing to prevent
 342 * the cgroup inode from getting torn down and potentially reallocated a moment
 343 * after page_cgroup_ino() returns, so it only should be used by callers that
 344 * do not care (such as procfs interfaces).
 345 */
 346ino_t page_cgroup_ino(struct page *page)
 347{
 348	struct mem_cgroup *memcg;
 349	unsigned long ino = 0;
 350
 351	rcu_read_lock();
 352	memcg = READ_ONCE(page->mem_cgroup);
 
 
 353	while (memcg && !(memcg->css.flags & CSS_ONLINE))
 354		memcg = parent_mem_cgroup(memcg);
 355	if (memcg)
 356		ino = cgroup_ino(memcg->css.cgroup);
 357	rcu_read_unlock();
 358	return ino;
 359}
 360
 361static struct mem_cgroup_per_node *
 362mem_cgroup_page_nodeinfo(struct mem_cgroup *memcg, struct page *page)
 363{
 364	int nid = page_to_nid(page);
 365
 366	return memcg->nodeinfo[nid];
 367}
 368
 369static struct mem_cgroup_tree_per_node *
 370soft_limit_tree_node(int nid)
 371{
 372	return soft_limit_tree.rb_tree_per_node[nid];
 373}
 374
 375static struct mem_cgroup_tree_per_node *
 376soft_limit_tree_from_page(struct page *page)
 377{
 378	int nid = page_to_nid(page);
 379
 380	return soft_limit_tree.rb_tree_per_node[nid];
 381}
 382
 383static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_node *mz,
 384					 struct mem_cgroup_tree_per_node *mctz,
 385					 unsigned long new_usage_in_excess)
 386{
 387	struct rb_node **p = &mctz->rb_root.rb_node;
 388	struct rb_node *parent = NULL;
 389	struct mem_cgroup_per_node *mz_node;
 390	bool rightmost = true;
 391
 392	if (mz->on_tree)
 393		return;
 394
 395	mz->usage_in_excess = new_usage_in_excess;
 396	if (!mz->usage_in_excess)
 397		return;
 398	while (*p) {
 399		parent = *p;
 400		mz_node = rb_entry(parent, struct mem_cgroup_per_node,
 401					tree_node);
 402		if (mz->usage_in_excess < mz_node->usage_in_excess) {
 403			p = &(*p)->rb_left;
 404			rightmost = false;
 405		}
 406
 407		/*
 408		 * We can't avoid mem cgroups that are over their soft
 409		 * limit by the same amount
 410		 */
 411		else if (mz->usage_in_excess >= mz_node->usage_in_excess)
 412			p = &(*p)->rb_right;
 
 413	}
 414
 415	if (rightmost)
 416		mctz->rb_rightmost = &mz->tree_node;
 417
 418	rb_link_node(&mz->tree_node, parent, p);
 419	rb_insert_color(&mz->tree_node, &mctz->rb_root);
 420	mz->on_tree = true;
 421}
 422
 423static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
 424					 struct mem_cgroup_tree_per_node *mctz)
 425{
 426	if (!mz->on_tree)
 427		return;
 428
 429	if (&mz->tree_node == mctz->rb_rightmost)
 430		mctz->rb_rightmost = rb_prev(&mz->tree_node);
 431
 432	rb_erase(&mz->tree_node, &mctz->rb_root);
 433	mz->on_tree = false;
 434}
 435
 436static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
 437				       struct mem_cgroup_tree_per_node *mctz)
 438{
 439	unsigned long flags;
 440
 441	spin_lock_irqsave(&mctz->lock, flags);
 442	__mem_cgroup_remove_exceeded(mz, mctz);
 443	spin_unlock_irqrestore(&mctz->lock, flags);
 444}
 445
 446static unsigned long soft_limit_excess(struct mem_cgroup *memcg)
 447{
 448	unsigned long nr_pages = page_counter_read(&memcg->memory);
 449	unsigned long soft_limit = READ_ONCE(memcg->soft_limit);
 450	unsigned long excess = 0;
 451
 452	if (nr_pages > soft_limit)
 453		excess = nr_pages - soft_limit;
 454
 455	return excess;
 456}
 457
 458static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
 459{
 460	unsigned long excess;
 461	struct mem_cgroup_per_node *mz;
 462	struct mem_cgroup_tree_per_node *mctz;
 463
 464	mctz = soft_limit_tree_from_page(page);
 
 
 
 
 
 
 465	if (!mctz)
 466		return;
 467	/*
 468	 * Necessary to update all ancestors when hierarchy is used.
 469	 * because their event counter is not touched.
 470	 */
 471	for (; memcg; memcg = parent_mem_cgroup(memcg)) {
 472		mz = mem_cgroup_page_nodeinfo(memcg, page);
 473		excess = soft_limit_excess(memcg);
 474		/*
 475		 * We have to update the tree if mz is on RB-tree or
 476		 * mem is over its softlimit.
 477		 */
 478		if (excess || mz->on_tree) {
 479			unsigned long flags;
 480
 481			spin_lock_irqsave(&mctz->lock, flags);
 482			/* if on-tree, remove it */
 483			if (mz->on_tree)
 484				__mem_cgroup_remove_exceeded(mz, mctz);
 485			/*
 486			 * Insert again. mz->usage_in_excess will be updated.
 487			 * If excess is 0, no tree ops.
 488			 */
 489			__mem_cgroup_insert_exceeded(mz, mctz, excess);
 490			spin_unlock_irqrestore(&mctz->lock, flags);
 491		}
 492	}
 493}
 494
 495static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
 496{
 497	struct mem_cgroup_tree_per_node *mctz;
 498	struct mem_cgroup_per_node *mz;
 499	int nid;
 500
 501	for_each_node(nid) {
 502		mz = mem_cgroup_nodeinfo(memcg, nid);
 503		mctz = soft_limit_tree_node(nid);
 504		if (mctz)
 505			mem_cgroup_remove_exceeded(mz, mctz);
 506	}
 507}
 508
 509static struct mem_cgroup_per_node *
 510__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
 511{
 512	struct mem_cgroup_per_node *mz;
 513
 514retry:
 515	mz = NULL;
 516	if (!mctz->rb_rightmost)
 517		goto done;		/* Nothing to reclaim from */
 518
 519	mz = rb_entry(mctz->rb_rightmost,
 520		      struct mem_cgroup_per_node, tree_node);
 521	/*
 522	 * Remove the node now but someone else can add it back,
 523	 * we will to add it back at the end of reclaim to its correct
 524	 * position in the tree.
 525	 */
 526	__mem_cgroup_remove_exceeded(mz, mctz);
 527	if (!soft_limit_excess(mz->memcg) ||
 528	    !css_tryget_online(&mz->memcg->css))
 529		goto retry;
 530done:
 531	return mz;
 532}
 533
 534static struct mem_cgroup_per_node *
 535mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
 536{
 537	struct mem_cgroup_per_node *mz;
 538
 539	spin_lock_irq(&mctz->lock);
 540	mz = __mem_cgroup_largest_soft_limit_node(mctz);
 541	spin_unlock_irq(&mctz->lock);
 542	return mz;
 543}
 544
 545static unsigned long memcg_sum_events(struct mem_cgroup *memcg,
 546				      int event)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 547{
 548	return atomic_long_read(&memcg->events[event]);
 
 
 
 549}
 550
 551static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
 552					 struct page *page,
 553					 bool compound, int nr_pages)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 554{
 555	/*
 556	 * Here, RSS means 'mapped anon' and anon's SwapCache. Shmem/tmpfs is
 557	 * counted as CACHE even if it's on ANON LRU.
 558	 */
 559	if (PageAnon(page))
 560		__mod_memcg_state(memcg, MEMCG_RSS, nr_pages);
 561	else {
 562		__mod_memcg_state(memcg, MEMCG_CACHE, nr_pages);
 563		if (PageSwapBacked(page))
 564			__mod_memcg_state(memcg, NR_SHMEM, nr_pages);
 565	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 566
 567	if (compound) {
 568		VM_BUG_ON_PAGE(!PageTransHuge(page), page);
 569		__mod_memcg_state(memcg, MEMCG_RSS_HUGE, nr_pages);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 570	}
 571
 572	/* pagein of a big page is an event. So, ignore page size */
 573	if (nr_pages > 0)
 574		__count_memcg_events(memcg, PGPGIN, 1);
 575	else {
 576		__count_memcg_events(memcg, PGPGOUT, 1);
 577		nr_pages = -nr_pages; /* for event */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 578	}
 579
 580	__this_cpu_add(memcg->stat_cpu->nr_page_events, nr_pages);
 
 
 581}
 
 582
 583unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
 584					   int nid, unsigned int lru_mask)
 585{
 586	struct lruvec *lruvec = mem_cgroup_lruvec(NODE_DATA(nid), memcg);
 587	unsigned long nr = 0;
 588	enum lru_list lru;
 589
 590	VM_BUG_ON((unsigned)nid >= nr_node_ids);
 
 591
 592	for_each_lru(lru) {
 593		if (!(BIT(lru) & lru_mask))
 594			continue;
 595		nr += mem_cgroup_get_lru_size(lruvec, lru);
 
 
 
 
 
 
 
 596	}
 597	return nr;
 598}
 599
 600static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
 601			unsigned int lru_mask)
 
 
 
 
 
 
 602{
 603	unsigned long nr = 0;
 604	int nid;
 605
 606	for_each_node_state(nid, N_MEMORY)
 607		nr += mem_cgroup_node_nr_lru_pages(memcg, nid, lru_mask);
 608	return nr;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 609}
 610
 611static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
 612				       enum mem_cgroup_events_target target)
 613{
 614	unsigned long val, next;
 615
 616	val = __this_cpu_read(memcg->stat_cpu->nr_page_events);
 617	next = __this_cpu_read(memcg->stat_cpu->targets[target]);
 618	/* from time_after() in jiffies.h */
 619	if ((long)(next - val) < 0) {
 620		switch (target) {
 621		case MEM_CGROUP_TARGET_THRESH:
 622			next = val + THRESHOLDS_EVENTS_TARGET;
 623			break;
 624		case MEM_CGROUP_TARGET_SOFTLIMIT:
 625			next = val + SOFTLIMIT_EVENTS_TARGET;
 626			break;
 627		case MEM_CGROUP_TARGET_NUMAINFO:
 628			next = val + NUMAINFO_EVENTS_TARGET;
 629			break;
 630		default:
 631			break;
 632		}
 633		__this_cpu_write(memcg->stat_cpu->targets[target], next);
 634		return true;
 635	}
 636	return false;
 637}
 638
 639/*
 640 * Check events in order.
 641 *
 642 */
 643static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
 644{
 
 
 
 645	/* threshold event is triggered in finer grain than soft limit */
 646	if (unlikely(mem_cgroup_event_ratelimit(memcg,
 647						MEM_CGROUP_TARGET_THRESH))) {
 648		bool do_softlimit;
 649		bool do_numainfo __maybe_unused;
 650
 651		do_softlimit = mem_cgroup_event_ratelimit(memcg,
 652						MEM_CGROUP_TARGET_SOFTLIMIT);
 653#if MAX_NUMNODES > 1
 654		do_numainfo = mem_cgroup_event_ratelimit(memcg,
 655						MEM_CGROUP_TARGET_NUMAINFO);
 656#endif
 657		mem_cgroup_threshold(memcg);
 658		if (unlikely(do_softlimit))
 659			mem_cgroup_update_tree(memcg, page);
 660#if MAX_NUMNODES > 1
 661		if (unlikely(do_numainfo))
 662			atomic_inc(&memcg->numainfo_events);
 663#endif
 664	}
 665}
 666
 667struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
 668{
 669	/*
 670	 * mm_update_next_owner() may clear mm->owner to NULL
 671	 * if it races with swapoff, page migration, etc.
 672	 * So this can be called with p == NULL.
 673	 */
 674	if (unlikely(!p))
 675		return NULL;
 676
 677	return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
 678}
 679EXPORT_SYMBOL(mem_cgroup_from_task);
 680
 681static struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
 682{
 683	struct mem_cgroup *memcg = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 684
 685	rcu_read_lock();
 686	do {
 687		/*
 688		 * Page cache insertions can happen withou an
 689		 * actual mm context, e.g. during disk probing
 690		 * on boot, loopback IO, acct() writes etc.
 691		 */
 692		if (unlikely(!mm))
 693			memcg = root_mem_cgroup;
 694		else {
 695			memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
 696			if (unlikely(!memcg))
 697				memcg = root_mem_cgroup;
 698		}
 699	} while (!css_tryget_online(&memcg->css));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 700	rcu_read_unlock();
 701	return memcg;
 702}
 703
 704/**
 705 * mem_cgroup_iter - iterate over memory cgroup hierarchy
 706 * @root: hierarchy root
 707 * @prev: previously returned memcg, NULL on first invocation
 708 * @reclaim: cookie for shared reclaim walks, NULL for full walks
 709 *
 710 * Returns references to children of the hierarchy below @root, or
 711 * @root itself, or %NULL after a full round-trip.
 712 *
 713 * Caller must pass the return value in @prev on subsequent
 714 * invocations for reference counting, or use mem_cgroup_iter_break()
 715 * to cancel a hierarchy walk before the round-trip is complete.
 716 *
 717 * Reclaimers can specify a node and a priority level in @reclaim to
 718 * divide up the memcgs in the hierarchy among all concurrent
 719 * reclaimers operating on the same node and priority.
 720 */
 721struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
 722				   struct mem_cgroup *prev,
 723				   struct mem_cgroup_reclaim_cookie *reclaim)
 724{
 725	struct mem_cgroup_reclaim_iter *uninitialized_var(iter);
 726	struct cgroup_subsys_state *css = NULL;
 727	struct mem_cgroup *memcg = NULL;
 728	struct mem_cgroup *pos = NULL;
 729
 730	if (mem_cgroup_disabled())
 731		return NULL;
 732
 733	if (!root)
 734		root = root_mem_cgroup;
 735
 736	if (prev && !reclaim)
 737		pos = prev;
 738
 739	if (!root->use_hierarchy && root != root_mem_cgroup) {
 740		if (prev)
 741			goto out;
 742		return root;
 743	}
 744
 745	rcu_read_lock();
 746
 747	if (reclaim) {
 748		struct mem_cgroup_per_node *mz;
 749
 750		mz = mem_cgroup_nodeinfo(root, reclaim->pgdat->node_id);
 751		iter = &mz->iter[reclaim->priority];
 752
 753		if (prev && reclaim->generation != iter->generation)
 
 
 
 
 
 
 754			goto out_unlock;
 755
 756		while (1) {
 757			pos = READ_ONCE(iter->position);
 758			if (!pos || css_tryget(&pos->css))
 759				break;
 760			/*
 761			 * css reference reached zero, so iter->position will
 762			 * be cleared by ->css_released. However, we should not
 763			 * rely on this happening soon, because ->css_released
 764			 * is called from a work queue, and by busy-waiting we
 765			 * might block it. So we clear iter->position right
 766			 * away.
 767			 */
 768			(void)cmpxchg(&iter->position, pos, NULL);
 769		}
 
 
 770	}
 771
 772	if (pos)
 773		css = &pos->css;
 774
 775	for (;;) {
 776		css = css_next_descendant_pre(css, &root->css);
 777		if (!css) {
 778			/*
 779			 * Reclaimers share the hierarchy walk, and a
 780			 * new one might jump in right at the end of
 781			 * the hierarchy - make sure they see at least
 782			 * one group and restart from the beginning.
 783			 */
 784			if (!prev)
 785				continue;
 786			break;
 787		}
 788
 789		/*
 790		 * Verify the css and acquire a reference.  The root
 791		 * is provided by the caller, so we know it's alive
 792		 * and kicking, and don't take an extra reference.
 793		 */
 794		memcg = mem_cgroup_from_css(css);
 795
 796		if (css == &root->css)
 797			break;
 798
 799		if (css_tryget(css))
 800			break;
 801
 802		memcg = NULL;
 803	}
 804
 805	if (reclaim) {
 806		/*
 807		 * The position could have already been updated by a competing
 808		 * thread, so check that the value hasn't changed since we read
 809		 * it to avoid reclaiming from the same cgroup twice.
 810		 */
 811		(void)cmpxchg(&iter->position, pos, memcg);
 812
 813		if (pos)
 814			css_put(&pos->css);
 815
 816		if (!memcg)
 817			iter->generation++;
 818		else if (!prev)
 819			reclaim->generation = iter->generation;
 820	}
 821
 822out_unlock:
 823	rcu_read_unlock();
 824out:
 825	if (prev && prev != root)
 826		css_put(&prev->css);
 827
 828	return memcg;
 829}
 830
 831/**
 832 * mem_cgroup_iter_break - abort a hierarchy walk prematurely
 833 * @root: hierarchy root
 834 * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
 835 */
 836void mem_cgroup_iter_break(struct mem_cgroup *root,
 837			   struct mem_cgroup *prev)
 838{
 839	if (!root)
 840		root = root_mem_cgroup;
 841	if (prev && prev != root)
 842		css_put(&prev->css);
 843}
 844
 845static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
 
 846{
 847	struct mem_cgroup *memcg = dead_memcg;
 848	struct mem_cgroup_reclaim_iter *iter;
 849	struct mem_cgroup_per_node *mz;
 850	int nid;
 851	int i;
 852
 853	while ((memcg = parent_mem_cgroup(memcg))) {
 854		for_each_node(nid) {
 855			mz = mem_cgroup_nodeinfo(memcg, nid);
 856			for (i = 0; i <= DEF_PRIORITY; i++) {
 857				iter = &mz->iter[i];
 858				cmpxchg(&iter->position,
 859					dead_memcg, NULL);
 860			}
 861		}
 862	}
 863}
 864
 865/*
 866 * Iteration constructs for visiting all cgroups (under a tree).  If
 867 * loops are exited prematurely (break), mem_cgroup_iter_break() must
 868 * be used for reference counting.
 869 */
 870#define for_each_mem_cgroup_tree(iter, root)		\
 871	for (iter = mem_cgroup_iter(root, NULL, NULL);	\
 872	     iter != NULL;				\
 873	     iter = mem_cgroup_iter(root, iter, NULL))
 874
 875#define for_each_mem_cgroup(iter)			\
 876	for (iter = mem_cgroup_iter(NULL, NULL, NULL);	\
 877	     iter != NULL;				\
 878	     iter = mem_cgroup_iter(NULL, iter, NULL))
 
 
 
 
 
 
 
 
 
 
 
 879
 880/**
 881 * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
 882 * @memcg: hierarchy root
 883 * @fn: function to call for each task
 884 * @arg: argument passed to @fn
 885 *
 886 * This function iterates over tasks attached to @memcg or to any of its
 887 * descendants and calls @fn for each task. If @fn returns a non-zero
 888 * value, the function breaks the iteration loop and returns the value.
 889 * Otherwise, it will iterate over all tasks and return 0.
 890 *
 891 * This function must not be called for the root memory cgroup.
 892 */
 893int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
 894			  int (*fn)(struct task_struct *, void *), void *arg)
 895{
 896	struct mem_cgroup *iter;
 897	int ret = 0;
 898
 899	BUG_ON(memcg == root_mem_cgroup);
 900
 901	for_each_mem_cgroup_tree(iter, memcg) {
 902		struct css_task_iter it;
 903		struct task_struct *task;
 904
 905		css_task_iter_start(&iter->css, 0, &it);
 906		while (!ret && (task = css_task_iter_next(&it)))
 907			ret = fn(task, arg);
 908		css_task_iter_end(&it);
 909		if (ret) {
 910			mem_cgroup_iter_break(memcg, iter);
 911			break;
 912		}
 913	}
 914	return ret;
 915}
 916
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 917/**
 918 * mem_cgroup_page_lruvec - return lruvec for isolating/putting an LRU page
 919 * @page: the page
 920 * @pgdat: pgdat of the page
 921 *
 922 * This function is only safe when following the LRU page isolation
 923 * and putback protocol: the LRU lock must be held, and the page must
 924 * either be PageLRU() or the caller must have isolated/allocated it.
 
 
 
 
 925 */
 926struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct pglist_data *pgdat)
 927{
 928	struct mem_cgroup_per_node *mz;
 929	struct mem_cgroup *memcg;
 930	struct lruvec *lruvec;
 931
 932	if (mem_cgroup_disabled()) {
 933		lruvec = &pgdat->lruvec;
 934		goto out;
 935	}
 936
 937	memcg = page->mem_cgroup;
 938	/*
 939	 * Swapcache readahead pages are added to the LRU - and
 940	 * possibly migrated - before they are charged.
 941	 */
 942	if (!memcg)
 943		memcg = root_mem_cgroup;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 944
 945	mz = mem_cgroup_page_nodeinfo(memcg, page);
 946	lruvec = &mz->lruvec;
 947out:
 948	/*
 949	 * Since a node can be onlined after the mem_cgroup was created,
 950	 * we have to be prepared to initialize lruvec->zone here;
 951	 * and if offlined then reonlined, we need to reinitialize it.
 952	 */
 953	if (unlikely(lruvec->pgdat != pgdat))
 954		lruvec->pgdat = pgdat;
 955	return lruvec;
 956}
 957
 958/**
 959 * mem_cgroup_update_lru_size - account for adding or removing an lru page
 960 * @lruvec: mem_cgroup per zone lru vector
 961 * @lru: index of lru list the page is sitting on
 962 * @zid: zone id of the accounted pages
 963 * @nr_pages: positive when adding or negative when removing
 964 *
 965 * This function must be called under lru_lock, just before a page is added
 966 * to or just after a page is removed from an lru list (that ordering being
 967 * so as to allow it to check that lru_size 0 is consistent with list_empty).
 968 */
 969void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
 970				int zid, int nr_pages)
 971{
 972	struct mem_cgroup_per_node *mz;
 973	unsigned long *lru_size;
 974	long size;
 975
 976	if (mem_cgroup_disabled())
 977		return;
 978
 979	mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
 980	lru_size = &mz->lru_zone_size[zid][lru];
 981
 982	if (nr_pages < 0)
 983		*lru_size += nr_pages;
 984
 985	size = *lru_size;
 986	if (WARN_ONCE(size < 0,
 987		"%s(%p, %d, %d): lru_size %ld\n",
 988		__func__, lruvec, lru, nr_pages, size)) {
 989		VM_BUG_ON(1);
 990		*lru_size = 0;
 991	}
 992
 993	if (nr_pages > 0)
 994		*lru_size += nr_pages;
 995}
 996
 997bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg)
 998{
 999	struct mem_cgroup *task_memcg;
1000	struct task_struct *p;
1001	bool ret;
1002
1003	p = find_lock_task_mm(task);
1004	if (p) {
1005		task_memcg = get_mem_cgroup_from_mm(p->mm);
1006		task_unlock(p);
1007	} else {
1008		/*
1009		 * All threads may have already detached their mm's, but the oom
1010		 * killer still needs to detect if they have already been oom
1011		 * killed to prevent needlessly killing additional tasks.
1012		 */
1013		rcu_read_lock();
1014		task_memcg = mem_cgroup_from_task(task);
1015		css_get(&task_memcg->css);
1016		rcu_read_unlock();
1017	}
1018	ret = mem_cgroup_is_descendant(task_memcg, memcg);
1019	css_put(&task_memcg->css);
1020	return ret;
1021}
1022
1023/**
1024 * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1025 * @memcg: the memory cgroup
1026 *
1027 * Returns the maximum amount of memory @mem can be charged with, in
1028 * pages.
1029 */
1030static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
1031{
1032	unsigned long margin = 0;
1033	unsigned long count;
1034	unsigned long limit;
1035
1036	count = page_counter_read(&memcg->memory);
1037	limit = READ_ONCE(memcg->memory.limit);
1038	if (count < limit)
1039		margin = limit - count;
1040
1041	if (do_memsw_account()) {
1042		count = page_counter_read(&memcg->memsw);
1043		limit = READ_ONCE(memcg->memsw.limit);
1044		if (count <= limit)
1045			margin = min(margin, limit - count);
1046		else
1047			margin = 0;
1048	}
1049
1050	return margin;
1051}
1052
1053/*
1054 * A routine for checking "mem" is under move_account() or not.
1055 *
1056 * Checking a cgroup is mc.from or mc.to or under hierarchy of
1057 * moving cgroups. This is for waiting at high-memory pressure
1058 * caused by "move".
1059 */
1060static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
1061{
1062	struct mem_cgroup *from;
1063	struct mem_cgroup *to;
1064	bool ret = false;
1065	/*
1066	 * Unlike task_move routines, we access mc.to, mc.from not under
1067	 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
1068	 */
1069	spin_lock(&mc.lock);
1070	from = mc.from;
1071	to = mc.to;
1072	if (!from)
1073		goto unlock;
1074
1075	ret = mem_cgroup_is_descendant(from, memcg) ||
1076		mem_cgroup_is_descendant(to, memcg);
1077unlock:
1078	spin_unlock(&mc.lock);
1079	return ret;
1080}
1081
1082static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
1083{
1084	if (mc.moving_task && current != mc.moving_task) {
1085		if (mem_cgroup_under_move(memcg)) {
1086			DEFINE_WAIT(wait);
1087			prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1088			/* moving charge context might have finished. */
1089			if (mc.moving_task)
1090				schedule();
1091			finish_wait(&mc.waitq, &wait);
1092			return true;
1093		}
1094	}
1095	return false;
1096}
1097
1098static const unsigned int memcg1_stats[] = {
1099	MEMCG_CACHE,
1100	MEMCG_RSS,
1101	MEMCG_RSS_HUGE,
1102	NR_SHMEM,
1103	NR_FILE_MAPPED,
1104	NR_FILE_DIRTY,
1105	NR_WRITEBACK,
1106	MEMCG_SWAP,
1107};
1108
1109static const char *const memcg1_stat_names[] = {
1110	"cache",
1111	"rss",
1112	"rss_huge",
1113	"shmem",
1114	"mapped_file",
1115	"dirty",
1116	"writeback",
1117	"swap",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1118};
1119
1120#define K(x) ((x) << (PAGE_SHIFT-10))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1121/**
1122 * mem_cgroup_print_oom_info: Print OOM information relevant to memory controller.
 
1123 * @memcg: The memory cgroup that went over limit
1124 * @p: Task that is going to be killed
1125 *
1126 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1127 * enabled
1128 */
1129void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
1130{
1131	struct mem_cgroup *iter;
1132	unsigned int i;
1133
1134	rcu_read_lock();
1135
 
 
 
 
 
1136	if (p) {
1137		pr_info("Task in ");
1138		pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
1139		pr_cont(" killed as a result of limit of ");
1140	} else {
1141		pr_info("Memory limit reached of cgroup ");
1142	}
 
 
1143
1144	pr_cont_cgroup_path(memcg->css.cgroup);
1145	pr_cont("\n");
 
 
 
 
 
 
 
 
1146
1147	rcu_read_unlock();
1148
1149	pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
1150		K((u64)page_counter_read(&memcg->memory)),
1151		K((u64)memcg->memory.limit), memcg->memory.failcnt);
1152	pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
1153		K((u64)page_counter_read(&memcg->memsw)),
1154		K((u64)memcg->memsw.limit), memcg->memsw.failcnt);
1155	pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
1156		K((u64)page_counter_read(&memcg->kmem)),
1157		K((u64)memcg->kmem.limit), memcg->kmem.failcnt);
1158
1159	for_each_mem_cgroup_tree(iter, memcg) {
1160		pr_info("Memory cgroup stats for ");
1161		pr_cont_cgroup_path(iter->css.cgroup);
1162		pr_cont(":");
1163
1164		for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
1165			if (memcg1_stats[i] == MEMCG_SWAP && !do_swap_account)
1166				continue;
1167			pr_cont(" %s:%luKB", memcg1_stat_names[i],
1168				K(memcg_page_state(iter, memcg1_stats[i])));
1169		}
1170
1171		for (i = 0; i < NR_LRU_LISTS; i++)
1172			pr_cont(" %s:%luKB", mem_cgroup_lru_names[i],
1173				K(mem_cgroup_nr_lru_pages(iter, BIT(i))));
1174
1175		pr_cont("\n");
1176	}
 
 
 
 
 
 
 
1177}
1178
1179/*
1180 * Return the memory (and swap, if configured) limit for a memcg.
1181 */
1182unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg)
1183{
1184	unsigned long limit;
 
 
 
 
 
1185
1186	limit = memcg->memory.limit;
1187	if (mem_cgroup_swappiness(memcg)) {
1188		unsigned long memsw_limit;
1189		unsigned long swap_limit;
1190
1191		memsw_limit = memcg->memsw.limit;
1192		swap_limit = memcg->swap.limit;
1193		swap_limit = min(swap_limit, (unsigned long)total_swap_pages);
1194		limit = min(limit + swap_limit, memsw_limit);
1195	}
1196	return limit;
 
 
 
 
 
1197}
1198
1199static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
1200				     int order)
1201{
1202	struct oom_control oc = {
1203		.zonelist = NULL,
1204		.nodemask = NULL,
1205		.memcg = memcg,
1206		.gfp_mask = gfp_mask,
1207		.order = order,
1208	};
1209	bool ret;
1210
1211	mutex_lock(&oom_lock);
1212	ret = out_of_memory(&oc);
1213	mutex_unlock(&oom_lock);
1214	return ret;
1215}
1216
1217#if MAX_NUMNODES > 1
1218
1219/**
1220 * test_mem_cgroup_node_reclaimable
1221 * @memcg: the target memcg
1222 * @nid: the node ID to be checked.
1223 * @noswap : specify true here if the user wants flle only information.
1224 *
1225 * This function returns whether the specified memcg contains any
1226 * reclaimable pages on a node. Returns true if there are any reclaimable
1227 * pages in the node.
1228 */
1229static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg,
1230		int nid, bool noswap)
1231{
1232	if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_FILE))
1233		return true;
1234	if (noswap || !total_swap_pages)
1235		return false;
1236	if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_ANON))
1237		return true;
1238	return false;
1239
1240}
1241
1242/*
1243 * Always updating the nodemask is not very good - even if we have an empty
1244 * list or the wrong list here, we can start from some node and traverse all
1245 * nodes based on the zonelist. So update the list loosely once per 10 secs.
1246 *
1247 */
1248static void mem_cgroup_may_update_nodemask(struct mem_cgroup *memcg)
1249{
1250	int nid;
1251	/*
1252	 * numainfo_events > 0 means there was at least NUMAINFO_EVENTS_TARGET
1253	 * pagein/pageout changes since the last update.
1254	 */
1255	if (!atomic_read(&memcg->numainfo_events))
1256		return;
1257	if (atomic_inc_return(&memcg->numainfo_updating) > 1)
1258		return;
1259
1260	/* make a nodemask where this memcg uses memory from */
1261	memcg->scan_nodes = node_states[N_MEMORY];
1262
1263	for_each_node_mask(nid, node_states[N_MEMORY]) {
1264
1265		if (!test_mem_cgroup_node_reclaimable(memcg, nid, false))
1266			node_clear(nid, memcg->scan_nodes);
1267	}
1268
1269	atomic_set(&memcg->numainfo_events, 0);
1270	atomic_set(&memcg->numainfo_updating, 0);
1271}
1272
1273/*
1274 * Selecting a node where we start reclaim from. Because what we need is just
1275 * reducing usage counter, start from anywhere is O,K. Considering
1276 * memory reclaim from current node, there are pros. and cons.
1277 *
1278 * Freeing memory from current node means freeing memory from a node which
1279 * we'll use or we've used. So, it may make LRU bad. And if several threads
1280 * hit limits, it will see a contention on a node. But freeing from remote
1281 * node means more costs for memory reclaim because of memory latency.
1282 *
1283 * Now, we use round-robin. Better algorithm is welcomed.
1284 */
1285int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
1286{
1287	int node;
1288
1289	mem_cgroup_may_update_nodemask(memcg);
1290	node = memcg->last_scanned_node;
1291
1292	node = next_node_in(node, memcg->scan_nodes);
1293	/*
1294	 * mem_cgroup_may_update_nodemask might have seen no reclaimmable pages
1295	 * last time it really checked all the LRUs due to rate limiting.
1296	 * Fallback to the current node in that case for simplicity.
1297	 */
1298	if (unlikely(node == MAX_NUMNODES))
1299		node = numa_node_id();
1300
1301	memcg->last_scanned_node = node;
1302	return node;
1303}
1304#else
1305int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
1306{
1307	return 0;
1308}
1309#endif
1310
1311static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
1312				   pg_data_t *pgdat,
1313				   gfp_t gfp_mask,
1314				   unsigned long *total_scanned)
1315{
1316	struct mem_cgroup *victim = NULL;
1317	int total = 0;
1318	int loop = 0;
1319	unsigned long excess;
1320	unsigned long nr_scanned;
1321	struct mem_cgroup_reclaim_cookie reclaim = {
1322		.pgdat = pgdat,
1323		.priority = 0,
1324	};
1325
1326	excess = soft_limit_excess(root_memcg);
1327
1328	while (1) {
1329		victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
1330		if (!victim) {
1331			loop++;
1332			if (loop >= 2) {
1333				/*
1334				 * If we have not been able to reclaim
1335				 * anything, it might because there are
1336				 * no reclaimable pages under this hierarchy
1337				 */
1338				if (!total)
1339					break;
1340				/*
1341				 * We want to do more targeted reclaim.
1342				 * excess >> 2 is not to excessive so as to
1343				 * reclaim too much, nor too less that we keep
1344				 * coming back to reclaim from this cgroup
1345				 */
1346				if (total >= (excess >> 2) ||
1347					(loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
1348					break;
1349			}
1350			continue;
1351		}
1352		total += mem_cgroup_shrink_node(victim, gfp_mask, false,
1353					pgdat, &nr_scanned);
1354		*total_scanned += nr_scanned;
1355		if (!soft_limit_excess(root_memcg))
1356			break;
1357	}
1358	mem_cgroup_iter_break(root_memcg, victim);
1359	return total;
1360}
1361
1362#ifdef CONFIG_LOCKDEP
1363static struct lockdep_map memcg_oom_lock_dep_map = {
1364	.name = "memcg_oom_lock",
1365};
1366#endif
1367
1368static DEFINE_SPINLOCK(memcg_oom_lock);
1369
1370/*
1371 * Check OOM-Killer is already running under our hierarchy.
1372 * If someone is running, return false.
1373 */
1374static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
1375{
1376	struct mem_cgroup *iter, *failed = NULL;
1377
1378	spin_lock(&memcg_oom_lock);
1379
1380	for_each_mem_cgroup_tree(iter, memcg) {
1381		if (iter->oom_lock) {
1382			/*
1383			 * this subtree of our hierarchy is already locked
1384			 * so we cannot give a lock.
1385			 */
1386			failed = iter;
1387			mem_cgroup_iter_break(memcg, iter);
1388			break;
1389		} else
1390			iter->oom_lock = true;
1391	}
1392
1393	if (failed) {
1394		/*
1395		 * OK, we failed to lock the whole subtree so we have
1396		 * to clean up what we set up to the failing subtree
1397		 */
1398		for_each_mem_cgroup_tree(iter, memcg) {
1399			if (iter == failed) {
1400				mem_cgroup_iter_break(memcg, iter);
1401				break;
1402			}
1403			iter->oom_lock = false;
1404		}
1405	} else
1406		mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_);
1407
1408	spin_unlock(&memcg_oom_lock);
1409
1410	return !failed;
1411}
1412
1413static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
1414{
1415	struct mem_cgroup *iter;
1416
1417	spin_lock(&memcg_oom_lock);
1418	mutex_release(&memcg_oom_lock_dep_map, 1, _RET_IP_);
1419	for_each_mem_cgroup_tree(iter, memcg)
1420		iter->oom_lock = false;
1421	spin_unlock(&memcg_oom_lock);
1422}
1423
1424static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
1425{
1426	struct mem_cgroup *iter;
1427
1428	spin_lock(&memcg_oom_lock);
1429	for_each_mem_cgroup_tree(iter, memcg)
1430		iter->under_oom++;
1431	spin_unlock(&memcg_oom_lock);
1432}
1433
1434static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
1435{
1436	struct mem_cgroup *iter;
1437
1438	/*
1439	 * When a new child is created while the hierarchy is under oom,
1440	 * mem_cgroup_oom_lock() may not be called. Watch for underflow.
1441	 */
1442	spin_lock(&memcg_oom_lock);
1443	for_each_mem_cgroup_tree(iter, memcg)
1444		if (iter->under_oom > 0)
1445			iter->under_oom--;
1446	spin_unlock(&memcg_oom_lock);
1447}
1448
1449static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
1450
1451struct oom_wait_info {
1452	struct mem_cgroup *memcg;
1453	wait_queue_entry_t	wait;
1454};
1455
1456static int memcg_oom_wake_function(wait_queue_entry_t *wait,
1457	unsigned mode, int sync, void *arg)
1458{
1459	struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
1460	struct mem_cgroup *oom_wait_memcg;
1461	struct oom_wait_info *oom_wait_info;
1462
1463	oom_wait_info = container_of(wait, struct oom_wait_info, wait);
1464	oom_wait_memcg = oom_wait_info->memcg;
1465
1466	if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) &&
1467	    !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg))
1468		return 0;
1469	return autoremove_wake_function(wait, mode, sync, arg);
1470}
1471
1472static void memcg_oom_recover(struct mem_cgroup *memcg)
1473{
1474	/*
1475	 * For the following lockless ->under_oom test, the only required
1476	 * guarantee is that it must see the state asserted by an OOM when
1477	 * this function is called as a result of userland actions
1478	 * triggered by the notification of the OOM.  This is trivially
1479	 * achieved by invoking mem_cgroup_mark_under_oom() before
1480	 * triggering notification.
1481	 */
1482	if (memcg && memcg->under_oom)
1483		__wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
1484}
1485
1486static void mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
 
 
 
 
1487{
1488	if (!current->memcg_may_oom || order > PAGE_ALLOC_COSTLY_ORDER)
1489		return;
 
 
 
 
 
1490	/*
1491	 * We are in the middle of the charge context here, so we
1492	 * don't want to block when potentially sitting on a callstack
1493	 * that holds all kinds of filesystem and mm locks.
1494	 *
1495	 * Also, the caller may handle a failed allocation gracefully
1496	 * (like optional page cache readahead) and so an OOM killer
1497	 * invocation might not even be necessary.
 
 
 
 
 
 
1498	 *
1499	 * That's why we don't do anything here except remember the
1500	 * OOM context and then deal with it at the end of the page
1501	 * fault when the stack is unwound, the locks are released,
1502	 * and when we know whether the fault was overall successful.
1503	 */
1504	css_get(&memcg->css);
1505	current->memcg_in_oom = memcg;
1506	current->memcg_oom_gfp_mask = mask;
1507	current->memcg_oom_order = order;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1508}
1509
1510/**
1511 * mem_cgroup_oom_synchronize - complete memcg OOM handling
1512 * @handle: actually kill/wait or just clean up the OOM state
1513 *
1514 * This has to be called at the end of a page fault if the memcg OOM
1515 * handler was enabled.
1516 *
1517 * Memcg supports userspace OOM handling where failed allocations must
1518 * sleep on a waitqueue until the userspace task resolves the
1519 * situation.  Sleeping directly in the charge context with all kinds
1520 * of locks held is not a good idea, instead we remember an OOM state
1521 * in the task and mem_cgroup_oom_synchronize() has to be called at
1522 * the end of the page fault to complete the OOM handling.
1523 *
1524 * Returns %true if an ongoing memcg OOM situation was detected and
1525 * completed, %false otherwise.
1526 */
1527bool mem_cgroup_oom_synchronize(bool handle)
1528{
1529	struct mem_cgroup *memcg = current->memcg_in_oom;
1530	struct oom_wait_info owait;
1531	bool locked;
1532
1533	/* OOM is global, do not handle */
1534	if (!memcg)
1535		return false;
1536
1537	if (!handle)
1538		goto cleanup;
1539
1540	owait.memcg = memcg;
1541	owait.wait.flags = 0;
1542	owait.wait.func = memcg_oom_wake_function;
1543	owait.wait.private = current;
1544	INIT_LIST_HEAD(&owait.wait.entry);
1545
1546	prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
1547	mem_cgroup_mark_under_oom(memcg);
1548
1549	locked = mem_cgroup_oom_trylock(memcg);
1550
1551	if (locked)
1552		mem_cgroup_oom_notify(memcg);
1553
1554	if (locked && !memcg->oom_kill_disable) {
1555		mem_cgroup_unmark_under_oom(memcg);
1556		finish_wait(&memcg_oom_waitq, &owait.wait);
1557		mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask,
1558					 current->memcg_oom_order);
1559	} else {
1560		schedule();
1561		mem_cgroup_unmark_under_oom(memcg);
1562		finish_wait(&memcg_oom_waitq, &owait.wait);
1563	}
1564
1565	if (locked) {
1566		mem_cgroup_oom_unlock(memcg);
1567		/*
1568		 * There is no guarantee that an OOM-lock contender
1569		 * sees the wakeups triggered by the OOM kill
1570		 * uncharges.  Wake any sleepers explicitely.
1571		 */
1572		memcg_oom_recover(memcg);
1573	}
1574cleanup:
1575	current->memcg_in_oom = NULL;
1576	css_put(&memcg->css);
1577	return true;
1578}
1579
1580/**
1581 * lock_page_memcg - lock a page->mem_cgroup binding
1582 * @page: the page
 
1583 *
1584 * This function protects unlocked LRU pages from being moved to
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1585 * another cgroup.
1586 *
1587 * It ensures lifetime of the returned memcg. Caller is responsible
1588 * for the lifetime of the page; __unlock_page_memcg() is available
1589 * when @page might get freed inside the locked section.
1590 */
1591struct mem_cgroup *lock_page_memcg(struct page *page)
1592{
1593	struct mem_cgroup *memcg;
1594	unsigned long flags;
1595
1596	/*
1597	 * The RCU lock is held throughout the transaction.  The fast
1598	 * path can get away without acquiring the memcg->move_lock
1599	 * because page moving starts with an RCU grace period.
1600	 *
1601	 * The RCU lock also protects the memcg from being freed when
1602	 * the page state that is going to change is the only thing
1603	 * preventing the page itself from being freed. E.g. writeback
1604	 * doesn't hold a page reference and relies on PG_writeback to
1605	 * keep off truncation, migration and so forth.
1606         */
1607	rcu_read_lock();
1608
1609	if (mem_cgroup_disabled())
1610		return NULL;
1611again:
1612	memcg = page->mem_cgroup;
1613	if (unlikely(!memcg))
1614		return NULL;
 
 
 
 
 
 
1615
1616	if (atomic_read(&memcg->moving_account) <= 0)
1617		return memcg;
1618
1619	spin_lock_irqsave(&memcg->move_lock, flags);
1620	if (memcg != page->mem_cgroup) {
1621		spin_unlock_irqrestore(&memcg->move_lock, flags);
1622		goto again;
1623	}
1624
1625	/*
1626	 * When charge migration first begins, we can have locked and
1627	 * unlocked page stat updates happening concurrently.  Track
1628	 * the task who has the lock for unlock_page_memcg().
 
1629	 */
1630	memcg->move_lock_task = current;
1631	memcg->move_lock_flags = flags;
1632
1633	return memcg;
1634}
1635EXPORT_SYMBOL(lock_page_memcg);
1636
1637/**
1638 * __unlock_page_memcg - unlock and unpin a memcg
1639 * @memcg: the memcg
1640 *
1641 * Unlock and unpin a memcg returned by lock_page_memcg().
1642 */
1643void __unlock_page_memcg(struct mem_cgroup *memcg)
1644{
1645	if (memcg && memcg->move_lock_task == current) {
1646		unsigned long flags = memcg->move_lock_flags;
1647
1648		memcg->move_lock_task = NULL;
1649		memcg->move_lock_flags = 0;
1650
1651		spin_unlock_irqrestore(&memcg->move_lock, flags);
1652	}
1653
1654	rcu_read_unlock();
1655}
1656
1657/**
1658 * unlock_page_memcg - unlock a page->mem_cgroup binding
1659 * @page: the page
 
 
 
 
1660 */
1661void unlock_page_memcg(struct page *page)
1662{
1663	__unlock_page_memcg(page->mem_cgroup);
1664}
1665EXPORT_SYMBOL(unlock_page_memcg);
1666
1667struct memcg_stock_pcp {
 
1668	struct mem_cgroup *cached; /* this never be root cgroup */
1669	unsigned int nr_pages;
 
 
 
 
 
 
 
 
 
1670	struct work_struct work;
1671	unsigned long flags;
1672#define FLUSHING_CACHED_CHARGE	0
1673};
1674static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
 
 
1675static DEFINE_MUTEX(percpu_charge_mutex);
1676
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1677/**
1678 * consume_stock: Try to consume stocked charge on this cpu.
1679 * @memcg: memcg to consume from.
1680 * @nr_pages: how many pages to charge.
1681 *
1682 * The charges will only happen if @memcg matches the current cpu's memcg
1683 * stock, and at least @nr_pages are available in that stock.  Failure to
1684 * service an allocation will refill the stock.
1685 *
1686 * returns true if successful, false otherwise.
1687 */
1688static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
1689{
1690	struct memcg_stock_pcp *stock;
1691	unsigned long flags;
1692	bool ret = false;
1693
1694	if (nr_pages > MEMCG_CHARGE_BATCH)
1695		return ret;
1696
1697	local_irq_save(flags);
1698
1699	stock = this_cpu_ptr(&memcg_stock);
1700	if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
1701		stock->nr_pages -= nr_pages;
1702		ret = true;
1703	}
1704
1705	local_irq_restore(flags);
1706
1707	return ret;
1708}
1709
1710/*
1711 * Returns stocks cached in percpu and reset cached information.
1712 */
1713static void drain_stock(struct memcg_stock_pcp *stock)
1714{
1715	struct mem_cgroup *old = stock->cached;
 
 
 
1716
1717	if (stock->nr_pages) {
1718		page_counter_uncharge(&old->memory, stock->nr_pages);
1719		if (do_memsw_account())
1720			page_counter_uncharge(&old->memsw, stock->nr_pages);
1721		css_put_many(&old->css, stock->nr_pages);
1722		stock->nr_pages = 0;
1723	}
1724	stock->cached = NULL;
 
 
1725}
1726
1727static void drain_local_stock(struct work_struct *dummy)
1728{
1729	struct memcg_stock_pcp *stock;
 
1730	unsigned long flags;
1731
1732	/*
1733	 * The only protection from memory hotplug vs. drain_stock races is
1734	 * that we always operate on local CPU stock here with IRQ disabled
 
1735	 */
1736	local_irq_save(flags);
1737
1738	stock = this_cpu_ptr(&memcg_stock);
 
1739	drain_stock(stock);
1740	clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
1741
1742	local_irq_restore(flags);
 
 
1743}
1744
1745/*
1746 * Cache charges(val) to local per_cpu area.
1747 * This will be consumed by consume_stock() function, later.
1748 */
1749static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
1750{
1751	struct memcg_stock_pcp *stock;
1752	unsigned long flags;
1753
1754	local_irq_save(flags);
1755
1756	stock = this_cpu_ptr(&memcg_stock);
1757	if (stock->cached != memcg) { /* reset if necessary */
1758		drain_stock(stock);
1759		stock->cached = memcg;
 
1760	}
1761	stock->nr_pages += nr_pages;
1762
1763	if (stock->nr_pages > MEMCG_CHARGE_BATCH)
1764		drain_stock(stock);
 
1765
1766	local_irq_restore(flags);
 
 
 
 
 
 
1767}
1768
1769/*
1770 * Drains all per-CPU charge caches for given root_memcg resp. subtree
1771 * of the hierarchy under it.
1772 */
1773static void drain_all_stock(struct mem_cgroup *root_memcg)
1774{
1775	int cpu, curcpu;
1776
1777	/* If someone's already draining, avoid adding running more workers. */
1778	if (!mutex_trylock(&percpu_charge_mutex))
1779		return;
1780	/*
1781	 * Notify other cpus that system-wide "drain" is running
1782	 * We do not care about races with the cpu hotplug because cpu down
1783	 * as well as workers from this path always operate on the local
1784	 * per-cpu data. CPU up doesn't touch memcg_stock at all.
1785	 */
1786	curcpu = get_cpu();
 
1787	for_each_online_cpu(cpu) {
1788		struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
1789		struct mem_cgroup *memcg;
 
1790
1791		memcg = stock->cached;
1792		if (!memcg || !stock->nr_pages || !css_tryget(&memcg->css))
1793			continue;
1794		if (!mem_cgroup_is_descendant(memcg, root_memcg)) {
1795			css_put(&memcg->css);
1796			continue;
1797		}
1798		if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
 
 
 
1799			if (cpu == curcpu)
1800				drain_local_stock(&stock->work);
1801			else
1802				schedule_work_on(cpu, &stock->work);
1803		}
1804		css_put(&memcg->css);
1805	}
1806	put_cpu();
1807	mutex_unlock(&percpu_charge_mutex);
1808}
1809
1810static int memcg_hotplug_cpu_dead(unsigned int cpu)
1811{
1812	struct memcg_stock_pcp *stock;
1813	struct mem_cgroup *memcg;
1814
1815	stock = &per_cpu(memcg_stock, cpu);
1816	drain_stock(stock);
1817
1818	for_each_mem_cgroup(memcg) {
1819		int i;
1820
1821		for (i = 0; i < MEMCG_NR_STAT; i++) {
1822			int nid;
1823			long x;
1824
1825			x = this_cpu_xchg(memcg->stat_cpu->count[i], 0);
1826			if (x)
1827				atomic_long_add(x, &memcg->stat[i]);
1828
1829			if (i >= NR_VM_NODE_STAT_ITEMS)
1830				continue;
1831
1832			for_each_node(nid) {
1833				struct mem_cgroup_per_node *pn;
1834
1835				pn = mem_cgroup_nodeinfo(memcg, nid);
1836				x = this_cpu_xchg(pn->lruvec_stat_cpu->count[i], 0);
1837				if (x)
1838					atomic_long_add(x, &pn->lruvec_stat[i]);
1839			}
1840		}
1841
1842		for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
1843			long x;
1844
1845			x = this_cpu_xchg(memcg->stat_cpu->events[i], 0);
1846			if (x)
1847				atomic_long_add(x, &memcg->events[i]);
1848		}
1849	}
1850
1851	return 0;
1852}
1853
1854static void reclaim_high(struct mem_cgroup *memcg,
1855			 unsigned int nr_pages,
1856			 gfp_t gfp_mask)
1857{
 
 
1858	do {
1859		if (page_counter_read(&memcg->memory) <= memcg->high)
 
 
 
1860			continue;
 
1861		memcg_memory_event(memcg, MEMCG_HIGH);
1862		try_to_free_mem_cgroup_pages(memcg, nr_pages, gfp_mask, true);
1863	} while ((memcg = parent_mem_cgroup(memcg)));
 
 
 
 
 
 
 
 
1864}
1865
1866static void high_work_func(struct work_struct *work)
1867{
1868	struct mem_cgroup *memcg;
1869
1870	memcg = container_of(work, struct mem_cgroup, high_work);
1871	reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL);
1872}
1873
1874/*
1875 * Scheduled by try_charge() to be executed from the userland return path
1876 * and reclaims memory over the high limit.
 
1877 */
1878void mem_cgroup_handle_over_high(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1879{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1880	unsigned int nr_pages = current->memcg_nr_pages_over_high;
 
1881	struct mem_cgroup *memcg;
 
1882
1883	if (likely(!nr_pages))
1884		return;
1885
1886	memcg = get_mem_cgroup_from_mm(current->mm);
1887	reclaim_high(memcg, nr_pages, GFP_KERNEL);
1888	css_put(&memcg->css);
1889	current->memcg_nr_pages_over_high = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1890}
1891
1892static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
1893		      unsigned int nr_pages)
1894{
1895	unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages);
1896	int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
1897	struct mem_cgroup *mem_over_limit;
1898	struct page_counter *counter;
1899	unsigned long nr_reclaimed;
1900	bool may_swap = true;
 
1901	bool drained = false;
 
 
1902
1903	if (mem_cgroup_is_root(memcg))
1904		return 0;
1905retry:
1906	if (consume_stock(memcg, nr_pages))
1907		return 0;
1908
1909	if (!do_memsw_account() ||
1910	    page_counter_try_charge(&memcg->memsw, batch, &counter)) {
1911		if (page_counter_try_charge(&memcg->memory, batch, &counter))
1912			goto done_restock;
1913		if (do_memsw_account())
1914			page_counter_uncharge(&memcg->memsw, batch);
1915		mem_over_limit = mem_cgroup_from_counter(counter, memory);
1916	} else {
1917		mem_over_limit = mem_cgroup_from_counter(counter, memsw);
1918		may_swap = false;
1919	}
1920
1921	if (batch > nr_pages) {
1922		batch = nr_pages;
1923		goto retry;
1924	}
1925
1926	/*
1927	 * Unlike in global OOM situations, memcg is not in a physical
1928	 * memory shortage.  Allow dying and OOM-killed tasks to
1929	 * bypass the last charges so that they can exit quickly and
1930	 * free their memory.
1931	 */
1932	if (unlikely(tsk_is_oom_victim(current) ||
1933		     fatal_signal_pending(current) ||
1934		     current->flags & PF_EXITING))
1935		goto force;
1936
1937	/*
1938	 * Prevent unbounded recursion when reclaim operations need to
1939	 * allocate memory. This might exceed the limits temporarily,
1940	 * but we prefer facilitating memory reclaim and getting back
1941	 * under the limit over triggering OOM kills in these cases.
1942	 */
1943	if (unlikely(current->flags & PF_MEMALLOC))
1944		goto force;
1945
1946	if (unlikely(task_in_memcg_oom(current)))
1947		goto nomem;
1948
1949	if (!gfpflags_allow_blocking(gfp_mask))
1950		goto nomem;
1951
1952	memcg_memory_event(mem_over_limit, MEMCG_MAX);
 
1953
 
1954	nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
1955						    gfp_mask, may_swap);
 
1956
1957	if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
1958		goto retry;
1959
1960	if (!drained) {
1961		drain_all_stock(mem_over_limit);
1962		drained = true;
1963		goto retry;
1964	}
1965
1966	if (gfp_mask & __GFP_NORETRY)
1967		goto nomem;
1968	/*
1969	 * Even though the limit is exceeded at this point, reclaim
1970	 * may have been able to free some pages.  Retry the charge
1971	 * before killing the task.
1972	 *
1973	 * Only for regular pages, though: huge pages are rather
1974	 * unlikely to succeed so close to the limit, and we fall back
1975	 * to regular pages anyway in case of failure.
1976	 */
1977	if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))
1978		goto retry;
1979	/*
1980	 * At task move, charge accounts can be doubly counted. So, it's
1981	 * better to wait until the end of task_move if something is going on.
1982	 */
1983	if (mem_cgroup_wait_acct_move(mem_over_limit))
1984		goto retry;
1985
1986	if (nr_retries--)
1987		goto retry;
1988
1989	if (gfp_mask & __GFP_NOFAIL)
1990		goto force;
1991
1992	if (fatal_signal_pending(current))
1993		goto force;
1994
1995	memcg_memory_event(mem_over_limit, MEMCG_OOM);
 
 
1996
1997	mem_cgroup_oom(mem_over_limit, gfp_mask,
1998		       get_order(nr_pages * PAGE_SIZE));
 
 
 
 
 
 
 
 
 
1999nomem:
2000	if (!(gfp_mask & __GFP_NOFAIL))
 
 
 
 
 
 
2001		return -ENOMEM;
2002force:
2003	/*
 
 
 
 
 
 
 
2004	 * The allocation either can't fail or will lead to more memory
2005	 * being freed very soon.  Allow memory usage go over the limit
2006	 * temporarily by force charging it.
2007	 */
2008	page_counter_charge(&memcg->memory, nr_pages);
2009	if (do_memsw_account())
2010		page_counter_charge(&memcg->memsw, nr_pages);
2011	css_get_many(&memcg->css, nr_pages);
2012
2013	return 0;
2014
2015done_restock:
2016	css_get_many(&memcg->css, batch);
2017	if (batch > nr_pages)
2018		refill_stock(memcg, batch - nr_pages);
2019
2020	/*
2021	 * If the hierarchy is above the normal consumption range, schedule
2022	 * reclaim on returning to userland.  We can perform reclaim here
2023	 * if __GFP_RECLAIM but let's always punt for simplicity and so that
2024	 * GFP_KERNEL can consistently be used during reclaim.  @memcg is
2025	 * not recorded as it most likely matches current's and won't
2026	 * change in the meantime.  As high limit is checked again before
2027	 * reclaim, the cost of mismatch is negligible.
2028	 */
2029	do {
2030		if (page_counter_read(&memcg->memory) > memcg->high) {
2031			/* Don't bother a random interrupted task */
2032			if (in_interrupt()) {
 
 
 
 
 
 
 
2033				schedule_work(&memcg->high_work);
2034				break;
2035			}
 
 
 
 
 
 
 
 
 
 
 
 
 
2036			current->memcg_nr_pages_over_high += batch;
2037			set_notify_resume(current);
2038			break;
2039		}
2040	} while ((memcg = parent_mem_cgroup(memcg)));
2041
 
 
 
 
 
 
 
 
 
 
 
2042	return 0;
2043}
2044
2045static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2046{
2047	if (mem_cgroup_is_root(memcg))
2048		return;
2049
2050	page_counter_uncharge(&memcg->memory, nr_pages);
2051	if (do_memsw_account())
2052		page_counter_uncharge(&memcg->memsw, nr_pages);
2053
2054	css_put_many(&memcg->css, nr_pages);
2055}
2056
2057static void lock_page_lru(struct page *page, int *isolated)
2058{
2059	struct zone *zone = page_zone(page);
 
 
 
 
 
 
 
 
 
 
 
2060
2061	spin_lock_irq(zone_lru_lock(zone));
2062	if (PageLRU(page)) {
2063		struct lruvec *lruvec;
 
 
 
 
 
 
2064
2065		lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat);
2066		ClearPageLRU(page);
2067		del_page_from_lru_list(page, lruvec, page_lru(page));
2068		*isolated = 1;
2069	} else
2070		*isolated = 0;
2071}
2072
2073static void unlock_page_lru(struct page *page, int isolated)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2074{
2075	struct zone *zone = page_zone(page);
 
 
 
 
 
 
 
 
2076
2077	if (isolated) {
2078		struct lruvec *lruvec;
 
 
 
 
 
 
 
 
 
 
2079
2080		lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat);
2081		VM_BUG_ON_PAGE(PageLRU(page), page);
2082		SetPageLRU(page);
2083		add_page_to_lru_list(page, lruvec, page_lru(page));
 
 
 
 
 
 
 
 
 
 
 
 
2084	}
2085	spin_unlock_irq(zone_lru_lock(zone));
 
 
2086}
2087
2088static void commit_charge(struct page *page, struct mem_cgroup *memcg,
2089			  bool lrucare)
2090{
2091	int isolated;
 
 
 
 
 
 
 
 
 
 
 
 
 
2092
2093	VM_BUG_ON_PAGE(page->mem_cgroup, page);
 
 
2094
2095	/*
2096	 * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page
2097	 * may already be on some other mem_cgroup's LRU.  Take care of it.
2098	 */
2099	if (lrucare)
2100		lock_page_lru(page, &isolated);
2101
2102	/*
2103	 * Nobody should be changing or seriously looking at
2104	 * page->mem_cgroup at this point:
2105	 *
2106	 * - the page is uncharged
2107	 *
2108	 * - the page is off-LRU
2109	 *
2110	 * - an anonymous fault has exclusive page access, except for
2111	 *   a locked page table
2112	 *
2113	 * - a page cache insertion, a swapin fault, or a migration
2114	 *   have the page locked
2115	 */
2116	page->mem_cgroup = memcg;
2117
2118	if (lrucare)
2119		unlock_page_lru(page, isolated);
2120}
2121
2122#ifndef CONFIG_SLOB
2123static int memcg_alloc_cache_id(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
2124{
2125	int id, size;
2126	int err;
2127
2128	id = ida_simple_get(&memcg_cache_ida,
2129			    0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL);
2130	if (id < 0)
2131		return id;
2132
2133	if (id < memcg_nr_cache_ids)
2134		return id;
 
 
2135
2136	/*
2137	 * There's no space for the new id in memcg_caches arrays,
2138	 * so we have to grow them.
2139	 */
2140	down_write(&memcg_cache_ids_sem);
2141
2142	size = 2 * (id + 1);
2143	if (size < MEMCG_CACHES_MIN_SIZE)
2144		size = MEMCG_CACHES_MIN_SIZE;
2145	else if (size > MEMCG_CACHES_MAX_SIZE)
2146		size = MEMCG_CACHES_MAX_SIZE;
 
 
 
 
 
 
 
 
 
2147
2148	err = memcg_update_all_caches(size);
2149	if (!err)
2150		err = memcg_update_all_list_lrus(size);
2151	if (!err)
2152		memcg_nr_cache_ids = size;
2153
2154	up_write(&memcg_cache_ids_sem);
 
 
2155
2156	if (err) {
2157		ida_simple_remove(&memcg_cache_ida, id);
2158		return err;
 
 
2159	}
2160	return id;
2161}
2162
2163static void memcg_free_cache_id(int id)
2164{
2165	ida_simple_remove(&memcg_cache_ida, id);
2166}
2167
2168struct memcg_kmem_cache_create_work {
2169	struct mem_cgroup *memcg;
2170	struct kmem_cache *cachep;
2171	struct work_struct work;
2172};
2173
2174static void memcg_kmem_cache_create_func(struct work_struct *w)
2175{
2176	struct memcg_kmem_cache_create_work *cw =
2177		container_of(w, struct memcg_kmem_cache_create_work, work);
2178	struct mem_cgroup *memcg = cw->memcg;
2179	struct kmem_cache *cachep = cw->cachep;
 
 
 
 
 
2180
2181	memcg_create_kmem_cache(memcg, cachep);
 
 
2182
2183	css_put(&memcg->css);
2184	kfree(cw);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2185}
2186
2187/*
2188 * Enqueue the creation of a per-memcg kmem_cache.
2189 */
2190static void __memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
2191					       struct kmem_cache *cachep)
2192{
2193	struct memcg_kmem_cache_create_work *cw;
 
2194
2195	cw = kmalloc(sizeof(*cw), GFP_NOWAIT | __GFP_NOWARN);
2196	if (!cw)
2197		return;
 
 
 
 
 
 
 
 
 
 
 
2198
2199	css_get(&memcg->css);
 
 
2200
2201	cw->memcg = memcg;
2202	cw->cachep = cachep;
2203	INIT_WORK(&cw->work, memcg_kmem_cache_create_func);
2204
2205	queue_work(memcg_kmem_cache_wq, &cw->work);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2206}
2207
2208static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
2209					     struct kmem_cache *cachep)
2210{
2211	/*
2212	 * We need to stop accounting when we kmalloc, because if the
2213	 * corresponding kmalloc cache is not yet created, the first allocation
2214	 * in __memcg_schedule_kmem_cache_create will recurse.
2215	 *
2216	 * However, it is better to enclose the whole function. Depending on
2217	 * the debugging options enabled, INIT_WORK(), for instance, can
2218	 * trigger an allocation. This too, will make us recurse. Because at
2219	 * this point we can't allow ourselves back into memcg_kmem_get_cache,
2220	 * the safest choice is to do it like this, wrapping the whole function.
2221	 */
2222	current->memcg_kmem_skip_account = 1;
2223	__memcg_schedule_kmem_cache_create(memcg, cachep);
2224	current->memcg_kmem_skip_account = 0;
 
 
 
 
 
 
2225}
2226
2227static inline bool memcg_kmem_bypass(void)
2228{
2229	if (in_interrupt() || !current->mm || (current->flags & PF_KTHREAD))
2230		return true;
2231	return false;
 
 
 
 
2232}
2233
2234/**
2235 * memcg_kmem_get_cache: select the correct per-memcg cache for allocation
2236 * @cachep: the original global kmem cache
2237 *
2238 * Return the kmem_cache we're supposed to use for a slab allocation.
2239 * We try to use the current memcg's version of the cache.
2240 *
2241 * If the cache does not exist yet, if we are the first user of it, we
2242 * create it asynchronously in a workqueue and let the current allocation
2243 * go through with the original cache.
2244 *
2245 * This function takes a reference to the cache it returns to assure it
2246 * won't get destroyed while we are working with it. Once the caller is
2247 * done with it, memcg_kmem_put_cache() must be called to release the
2248 * reference.
2249 */
2250struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep)
 
2251{
2252	struct mem_cgroup *memcg;
2253	struct kmem_cache *memcg_cachep;
2254	int kmemcg_id;
2255
2256	VM_BUG_ON(!is_root_cache(cachep));
2257
2258	if (memcg_kmem_bypass())
2259		return cachep;
2260
2261	if (current->memcg_kmem_skip_account)
2262		return cachep;
2263
2264	memcg = get_mem_cgroup_from_mm(current->mm);
2265	kmemcg_id = READ_ONCE(memcg->kmemcg_id);
2266	if (kmemcg_id < 0)
2267		goto out;
2268
2269	memcg_cachep = cache_from_memcg_idx(cachep, kmemcg_id);
2270	if (likely(memcg_cachep))
2271		return memcg_cachep;
2272
2273	/*
2274	 * If we are in a safe context (can wait, and not in interrupt
2275	 * context), we could be be predictable and return right away.
2276	 * This would guarantee that the allocation being performed
2277	 * already belongs in the new cache.
2278	 *
2279	 * However, there are some clashes that can arrive from locking.
2280	 * For instance, because we acquire the slab_mutex while doing
2281	 * memcg_create_kmem_cache, this means no further allocation
2282	 * could happen with the slab_mutex held. So it's better to
2283	 * defer everything.
2284	 */
2285	memcg_schedule_kmem_cache_create(memcg, cachep);
2286out:
2287	css_put(&memcg->css);
2288	return cachep;
2289}
2290
2291/**
2292 * memcg_kmem_put_cache: drop reference taken by memcg_kmem_get_cache
2293 * @cachep: the cache returned by memcg_kmem_get_cache
2294 */
2295void memcg_kmem_put_cache(struct kmem_cache *cachep)
2296{
2297	if (!is_root_cache(cachep))
2298		css_put(&cachep->memcg_params.memcg->css);
2299}
2300
2301/**
2302 * memcg_kmem_charge_memcg: charge a kmem page
2303 * @page: page to charge
2304 * @gfp: reclaim mode
2305 * @order: allocation order
2306 * @memcg: memory cgroup to charge
2307 *
2308 * Returns 0 on success, an error code on failure.
2309 */
2310int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
2311			    struct mem_cgroup *memcg)
2312{
2313	unsigned int nr_pages = 1 << order;
2314	struct page_counter *counter;
2315	int ret;
2316
2317	ret = try_charge(memcg, gfp, nr_pages);
2318	if (ret)
2319		return ret;
2320
2321	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) &&
2322	    !page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) {
2323		cancel_charge(memcg, nr_pages);
2324		return -ENOMEM;
2325	}
2326
2327	page->mem_cgroup = memcg;
 
 
2328
2329	return 0;
2330}
2331
2332/**
2333 * memcg_kmem_charge: charge a kmem page to the current memory cgroup
2334 * @page: page to charge
2335 * @gfp: reclaim mode
2336 * @order: allocation order
2337 *
2338 * Returns 0 on success, an error code on failure.
2339 */
2340int memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
2341{
2342	struct mem_cgroup *memcg;
2343	int ret = 0;
2344
2345	if (memcg_kmem_bypass())
2346		return 0;
2347
2348	memcg = get_mem_cgroup_from_mm(current->mm);
2349	if (!mem_cgroup_is_root(memcg)) {
2350		ret = memcg_kmem_charge_memcg(page, gfp, order, memcg);
2351		if (!ret)
2352			__SetPageKmemcg(page);
 
2353	}
2354	css_put(&memcg->css);
2355	return ret;
2356}
 
2357/**
2358 * memcg_kmem_uncharge: uncharge a kmem page
2359 * @page: page to uncharge
2360 * @order: allocation order
2361 */
2362void memcg_kmem_uncharge(struct page *page, int order)
2363{
2364	struct mem_cgroup *memcg = page->mem_cgroup;
 
2365	unsigned int nr_pages = 1 << order;
2366
2367	if (!memcg)
2368		return;
2369
2370	VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page);
 
 
 
 
2371
2372	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
2373		page_counter_uncharge(&memcg->kmem, nr_pages);
 
 
 
 
 
2374
2375	page_counter_uncharge(&memcg->memory, nr_pages);
2376	if (do_memsw_account())
2377		page_counter_uncharge(&memcg->memsw, nr_pages);
2378
2379	page->mem_cgroup = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2380
2381	/* slab pages do not have PageKmemcg flag set */
2382	if (PageKmemcg(page))
2383		__ClearPageKmemcg(page);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2384
2385	css_put_many(&memcg->css, nr_pages);
 
 
2386}
2387#endif /* !CONFIG_SLOB */
2388
2389#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2390
2391/*
2392 * Because tail pages are not marked as "used", set it. We're under
2393 * zone_lru_lock and migration entries setup in all page mappings.
2394 */
2395void mem_cgroup_split_huge_fixup(struct page *head)
2396{
 
 
2397	int i;
2398
2399	if (mem_cgroup_disabled())
2400		return;
2401
2402	for (i = 1; i < HPAGE_PMD_NR; i++)
2403		head[i].mem_cgroup = head->mem_cgroup;
2404
2405	__mod_memcg_state(head->mem_cgroup, MEMCG_RSS_HUGE, -HPAGE_PMD_NR);
 
 
 
2406}
2407#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
2408
2409#ifdef CONFIG_MEMCG_SWAP
2410/**
2411 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
2412 * @entry: swap entry to be moved
2413 * @from:  mem_cgroup which the entry is moved from
2414 * @to:  mem_cgroup which the entry is moved to
2415 *
2416 * It succeeds only when the swap_cgroup's record for this entry is the same
2417 * as the mem_cgroup's id of @from.
2418 *
2419 * Returns 0 on success, -EINVAL on failure.
2420 *
2421 * The caller must have charged to @to, IOW, called page_counter_charge() about
2422 * both res and memsw, and called css_get().
2423 */
2424static int mem_cgroup_move_swap_account(swp_entry_t entry,
2425				struct mem_cgroup *from, struct mem_cgroup *to)
2426{
2427	unsigned short old_id, new_id;
2428
2429	old_id = mem_cgroup_id(from);
2430	new_id = mem_cgroup_id(to);
2431
2432	if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
2433		mod_memcg_state(from, MEMCG_SWAP, -1);
2434		mod_memcg_state(to, MEMCG_SWAP, 1);
2435		return 0;
2436	}
2437	return -EINVAL;
2438}
2439#else
2440static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
2441				struct mem_cgroup *from, struct mem_cgroup *to)
2442{
2443	return -EINVAL;
2444}
2445#endif
2446
2447static DEFINE_MUTEX(memcg_limit_mutex);
2448
2449static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
2450				   unsigned long limit, bool memsw)
2451{
2452	bool enlarge = false;
 
2453	int ret;
2454	bool limits_invariant;
2455	struct page_counter *counter = memsw ? &memcg->memsw : &memcg->memory;
2456
2457	do {
2458		if (signal_pending(current)) {
2459			ret = -EINTR;
2460			break;
2461		}
2462
2463		mutex_lock(&memcg_limit_mutex);
2464		/*
2465		 * Make sure that the new limit (memsw or memory limit) doesn't
2466		 * break our basic invariant rule memory.limit <= memsw.limit.
2467		 */
2468		limits_invariant = memsw ? limit >= memcg->memory.limit :
2469					   limit <= memcg->memsw.limit;
2470		if (!limits_invariant) {
2471			mutex_unlock(&memcg_limit_mutex);
2472			ret = -EINVAL;
2473			break;
2474		}
2475		if (limit > counter->limit)
2476			enlarge = true;
2477		ret = page_counter_limit(counter, limit);
2478		mutex_unlock(&memcg_limit_mutex);
2479
2480		if (!ret)
2481			break;
2482
2483		if (!try_to_free_mem_cgroup_pages(memcg, 1,
2484					GFP_KERNEL, !memsw)) {
 
 
 
 
 
 
2485			ret = -EBUSY;
2486			break;
2487		}
2488	} while (true);
2489
2490	if (!ret && enlarge)
2491		memcg_oom_recover(memcg);
2492
2493	return ret;
2494}
2495
2496unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
2497					    gfp_t gfp_mask,
2498					    unsigned long *total_scanned)
2499{
2500	unsigned long nr_reclaimed = 0;
2501	struct mem_cgroup_per_node *mz, *next_mz = NULL;
2502	unsigned long reclaimed;
2503	int loop = 0;
2504	struct mem_cgroup_tree_per_node *mctz;
2505	unsigned long excess;
2506	unsigned long nr_scanned;
 
 
2507
2508	if (order > 0)
2509		return 0;
2510
2511	mctz = soft_limit_tree_node(pgdat->node_id);
2512
2513	/*
2514	 * Do not even bother to check the largest node if the root
2515	 * is empty. Do it lockless to prevent lock bouncing. Races
2516	 * are acceptable as soft limit is best effort anyway.
2517	 */
2518	if (!mctz || RB_EMPTY_ROOT(&mctz->rb_root))
2519		return 0;
2520
2521	/*
2522	 * This loop can run a while, specially if mem_cgroup's continuously
2523	 * keep exceeding their soft limit and putting the system under
2524	 * pressure
2525	 */
2526	do {
2527		if (next_mz)
2528			mz = next_mz;
2529		else
2530			mz = mem_cgroup_largest_soft_limit_node(mctz);
2531		if (!mz)
2532			break;
2533
2534		nr_scanned = 0;
2535		reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat,
2536						    gfp_mask, &nr_scanned);
2537		nr_reclaimed += reclaimed;
2538		*total_scanned += nr_scanned;
2539		spin_lock_irq(&mctz->lock);
2540		__mem_cgroup_remove_exceeded(mz, mctz);
2541
2542		/*
2543		 * If we failed to reclaim anything from this memory cgroup
2544		 * it is time to move on to the next cgroup
2545		 */
2546		next_mz = NULL;
2547		if (!reclaimed)
2548			next_mz = __mem_cgroup_largest_soft_limit_node(mctz);
2549
2550		excess = soft_limit_excess(mz->memcg);
2551		/*
2552		 * One school of thought says that we should not add
2553		 * back the node to the tree if reclaim returns 0.
2554		 * But our reclaim could return 0, simply because due
2555		 * to priority we are exposing a smaller subset of
2556		 * memory to reclaim from. Consider this as a longer
2557		 * term TODO.
2558		 */
2559		/* If excess == 0, no tree ops */
2560		__mem_cgroup_insert_exceeded(mz, mctz, excess);
2561		spin_unlock_irq(&mctz->lock);
2562		css_put(&mz->memcg->css);
2563		loop++;
2564		/*
2565		 * Could not reclaim anything and there are no more
2566		 * mem cgroups to try or we seem to be looping without
2567		 * reclaiming anything.
2568		 */
2569		if (!nr_reclaimed &&
2570			(next_mz == NULL ||
2571			loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
2572			break;
2573	} while (!nr_reclaimed);
2574	if (next_mz)
2575		css_put(&next_mz->memcg->css);
2576	return nr_reclaimed;
2577}
2578
2579/*
2580 * Test whether @memcg has children, dead or alive.  Note that this
2581 * function doesn't care whether @memcg has use_hierarchy enabled and
2582 * returns %true if there are child csses according to the cgroup
2583 * hierarchy.  Testing use_hierarchy is the caller's responsiblity.
2584 */
2585static inline bool memcg_has_children(struct mem_cgroup *memcg)
2586{
2587	bool ret;
2588
2589	rcu_read_lock();
2590	ret = css_next_child(NULL, &memcg->css);
2591	rcu_read_unlock();
2592	return ret;
2593}
2594
2595/*
2596 * Reclaims as many pages from the given memcg as possible.
2597 *
2598 * Caller is responsible for holding css reference for memcg.
2599 */
2600static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
2601{
2602	int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
2603
2604	/* we call try-to-free pages for make this cgroup empty */
2605	lru_add_drain_all();
 
 
 
2606	/* try to free all pages in this cgroup */
2607	while (nr_retries && page_counter_read(&memcg->memory)) {
2608		int progress;
2609
2610		if (signal_pending(current))
2611			return -EINTR;
2612
2613		progress = try_to_free_mem_cgroup_pages(memcg, 1,
2614							GFP_KERNEL, true);
2615		if (!progress) {
2616			nr_retries--;
2617			/* maybe some writeback is necessary */
2618			congestion_wait(BLK_RW_ASYNC, HZ/10);
2619		}
2620
2621	}
2622
2623	return 0;
2624}
2625
2626static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of,
2627					    char *buf, size_t nbytes,
2628					    loff_t off)
2629{
2630	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
2631
2632	if (mem_cgroup_is_root(memcg))
2633		return -EINVAL;
2634	return mem_cgroup_force_empty(memcg) ?: nbytes;
2635}
2636
2637static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css,
2638				     struct cftype *cft)
2639{
2640	return mem_cgroup_from_css(css)->use_hierarchy;
2641}
2642
2643static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
2644				      struct cftype *cft, u64 val)
2645{
2646	int retval = 0;
2647	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
2648	struct mem_cgroup *parent_memcg = mem_cgroup_from_css(memcg->css.parent);
2649
2650	if (memcg->use_hierarchy == val)
2651		return 0;
2652
2653	/*
2654	 * If parent's use_hierarchy is set, we can't make any modifications
2655	 * in the child subtrees. If it is unset, then the change can
2656	 * occur, provided the current cgroup has no children.
2657	 *
2658	 * For the root cgroup, parent_mem is NULL, we allow value to be
2659	 * set if there are no children.
2660	 */
2661	if ((!parent_memcg || !parent_memcg->use_hierarchy) &&
2662				(val == 1 || val == 0)) {
2663		if (!memcg_has_children(memcg))
2664			memcg->use_hierarchy = val;
2665		else
2666			retval = -EBUSY;
2667	} else
2668		retval = -EINVAL;
2669
2670	return retval;
2671}
2672
2673static void tree_stat(struct mem_cgroup *memcg, unsigned long *stat)
2674{
2675	struct mem_cgroup *iter;
2676	int i;
2677
2678	memset(stat, 0, sizeof(*stat) * MEMCG_NR_STAT);
2679
2680	for_each_mem_cgroup_tree(iter, memcg) {
2681		for (i = 0; i < MEMCG_NR_STAT; i++)
2682			stat[i] += memcg_page_state(iter, i);
2683	}
2684}
2685
2686static void tree_events(struct mem_cgroup *memcg, unsigned long *events)
2687{
2688	struct mem_cgroup *iter;
2689	int i;
2690
2691	memset(events, 0, sizeof(*events) * NR_VM_EVENT_ITEMS);
2692
2693	for_each_mem_cgroup_tree(iter, memcg) {
2694		for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
2695			events[i] += memcg_sum_events(iter, i);
2696	}
2697}
2698
2699static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
2700{
2701	unsigned long val = 0;
2702
2703	if (mem_cgroup_is_root(memcg)) {
2704		struct mem_cgroup *iter;
2705
2706		for_each_mem_cgroup_tree(iter, memcg) {
2707			val += memcg_page_state(iter, MEMCG_CACHE);
2708			val += memcg_page_state(iter, MEMCG_RSS);
2709			if (swap)
2710				val += memcg_page_state(iter, MEMCG_SWAP);
2711		}
2712	} else {
2713		if (!swap)
2714			val = page_counter_read(&memcg->memory);
2715		else
2716			val = page_counter_read(&memcg->memsw);
2717	}
2718	return val;
2719}
2720
2721enum {
2722	RES_USAGE,
2723	RES_LIMIT,
2724	RES_MAX_USAGE,
2725	RES_FAILCNT,
2726	RES_SOFT_LIMIT,
2727};
2728
2729static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
2730			       struct cftype *cft)
2731{
2732	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
2733	struct page_counter *counter;
2734
2735	switch (MEMFILE_TYPE(cft->private)) {
2736	case _MEM:
2737		counter = &memcg->memory;
2738		break;
2739	case _MEMSWAP:
2740		counter = &memcg->memsw;
2741		break;
2742	case _KMEM:
2743		counter = &memcg->kmem;
2744		break;
2745	case _TCP:
2746		counter = &memcg->tcpmem;
2747		break;
2748	default:
2749		BUG();
2750	}
2751
2752	switch (MEMFILE_ATTR(cft->private)) {
2753	case RES_USAGE:
2754		if (counter == &memcg->memory)
2755			return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE;
2756		if (counter == &memcg->memsw)
2757			return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE;
2758		return (u64)page_counter_read(counter) * PAGE_SIZE;
2759	case RES_LIMIT:
2760		return (u64)counter->limit * PAGE_SIZE;
2761	case RES_MAX_USAGE:
2762		return (u64)counter->watermark * PAGE_SIZE;
2763	case RES_FAILCNT:
2764		return counter->failcnt;
2765	case RES_SOFT_LIMIT:
2766		return (u64)memcg->soft_limit * PAGE_SIZE;
2767	default:
2768		BUG();
2769	}
2770}
2771
2772#ifndef CONFIG_SLOB
 
 
 
 
 
 
 
 
 
 
2773static int memcg_online_kmem(struct mem_cgroup *memcg)
2774{
2775	int memcg_id;
 
 
 
2776
2777	if (cgroup_memory_nokmem)
2778		return 0;
2779
2780	BUG_ON(memcg->kmemcg_id >= 0);
2781	BUG_ON(memcg->kmem_state);
 
 
 
 
 
 
2782
2783	memcg_id = memcg_alloc_cache_id();
2784	if (memcg_id < 0)
2785		return memcg_id;
2786
2787	static_branch_inc(&memcg_kmem_enabled_key);
2788	/*
2789	 * A memory cgroup is considered kmem-online as soon as it gets
2790	 * kmemcg_id. Setting the id after enabling static branching will
2791	 * guarantee no one starts accounting before all call sites are
2792	 * patched.
2793	 */
2794	memcg->kmemcg_id = memcg_id;
2795	memcg->kmem_state = KMEM_ONLINE;
2796	INIT_LIST_HEAD(&memcg->kmem_caches);
2797
2798	return 0;
2799}
2800
2801static void memcg_offline_kmem(struct mem_cgroup *memcg)
2802{
2803	struct cgroup_subsys_state *css;
2804	struct mem_cgroup *parent, *child;
2805	int kmemcg_id;
2806
2807	if (memcg->kmem_state != KMEM_ONLINE)
2808		return;
2809	/*
2810	 * Clear the online state before clearing memcg_caches array
2811	 * entries. The slab_mutex in memcg_deactivate_kmem_caches()
2812	 * guarantees that no cache will be created for this cgroup
2813	 * after we are done (see memcg_create_kmem_cache()).
2814	 */
2815	memcg->kmem_state = KMEM_ALLOCATED;
2816
2817	memcg_deactivate_kmem_caches(memcg);
2818
2819	kmemcg_id = memcg->kmemcg_id;
2820	BUG_ON(kmemcg_id < 0);
2821
2822	parent = parent_mem_cgroup(memcg);
2823	if (!parent)
2824		parent = root_mem_cgroup;
2825
2826	/*
2827	 * Change kmemcg_id of this cgroup and all its descendants to the
2828	 * parent's id, and then move all entries from this cgroup's list_lrus
2829	 * to ones of the parent. After we have finished, all list_lrus
2830	 * corresponding to this cgroup are guaranteed to remain empty. The
2831	 * ordering is imposed by list_lru_node->lock taken by
2832	 * memcg_drain_all_list_lrus().
2833	 */
2834	rcu_read_lock(); /* can be called from css_free w/o cgroup_mutex */
2835	css_for_each_descendant_pre(css, &memcg->css) {
2836		child = mem_cgroup_from_css(css);
2837		BUG_ON(child->kmemcg_id != kmemcg_id);
2838		child->kmemcg_id = parent->kmemcg_id;
2839		if (!memcg->use_hierarchy)
2840			break;
2841	}
2842	rcu_read_unlock();
2843
2844	memcg_drain_all_list_lrus(kmemcg_id, parent->kmemcg_id);
2845
2846	memcg_free_cache_id(kmemcg_id);
2847}
2848
2849static void memcg_free_kmem(struct mem_cgroup *memcg)
2850{
2851	/* css_alloc() failed, offlining didn't happen */
2852	if (unlikely(memcg->kmem_state == KMEM_ONLINE))
2853		memcg_offline_kmem(memcg);
2854
2855	if (memcg->kmem_state == KMEM_ALLOCATED) {
2856		memcg_destroy_kmem_caches(memcg);
2857		static_branch_dec(&memcg_kmem_enabled_key);
2858		WARN_ON(page_counter_read(&memcg->kmem));
2859	}
2860}
2861#else
2862static int memcg_online_kmem(struct mem_cgroup *memcg)
2863{
2864	return 0;
2865}
2866static void memcg_offline_kmem(struct mem_cgroup *memcg)
2867{
2868}
2869static void memcg_free_kmem(struct mem_cgroup *memcg)
2870{
2871}
2872#endif /* !CONFIG_SLOB */
2873
2874static int memcg_update_kmem_limit(struct mem_cgroup *memcg,
2875				   unsigned long limit)
2876{
2877	int ret;
2878
2879	mutex_lock(&memcg_limit_mutex);
2880	ret = page_counter_limit(&memcg->kmem, limit);
2881	mutex_unlock(&memcg_limit_mutex);
2882	return ret;
2883}
2884
2885static int memcg_update_tcp_limit(struct mem_cgroup *memcg, unsigned long limit)
2886{
2887	int ret;
2888
2889	mutex_lock(&memcg_limit_mutex);
2890
2891	ret = page_counter_limit(&memcg->tcpmem, limit);
2892	if (ret)
2893		goto out;
2894
2895	if (!memcg->tcpmem_active) {
2896		/*
2897		 * The active flag needs to be written after the static_key
2898		 * update. This is what guarantees that the socket activation
2899		 * function is the last one to run. See mem_cgroup_sk_alloc()
2900		 * for details, and note that we don't mark any socket as
2901		 * belonging to this memcg until that flag is up.
2902		 *
2903		 * We need to do this, because static_keys will span multiple
2904		 * sites, but we can't control their order. If we mark a socket
2905		 * as accounted, but the accounting functions are not patched in
2906		 * yet, we'll lose accounting.
2907		 *
2908		 * We never race with the readers in mem_cgroup_sk_alloc(),
2909		 * because when this value change, the code to process it is not
2910		 * patched in yet.
2911		 */
2912		static_branch_inc(&memcg_sockets_enabled_key);
2913		memcg->tcpmem_active = true;
2914	}
2915out:
2916	mutex_unlock(&memcg_limit_mutex);
2917	return ret;
2918}
2919
2920/*
2921 * The user of this function is...
2922 * RES_LIMIT.
2923 */
2924static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
2925				char *buf, size_t nbytes, loff_t off)
2926{
2927	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
2928	unsigned long nr_pages;
2929	int ret;
2930
2931	buf = strstrip(buf);
2932	ret = page_counter_memparse(buf, "-1", &nr_pages);
2933	if (ret)
2934		return ret;
2935
2936	switch (MEMFILE_ATTR(of_cft(of)->private)) {
2937	case RES_LIMIT:
2938		if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
2939			ret = -EINVAL;
2940			break;
2941		}
2942		switch (MEMFILE_TYPE(of_cft(of)->private)) {
2943		case _MEM:
2944			ret = mem_cgroup_resize_limit(memcg, nr_pages, false);
2945			break;
2946		case _MEMSWAP:
2947			ret = mem_cgroup_resize_limit(memcg, nr_pages, true);
2948			break;
2949		case _KMEM:
2950			ret = memcg_update_kmem_limit(memcg, nr_pages);
 
 
 
 
2951			break;
2952		case _TCP:
2953			ret = memcg_update_tcp_limit(memcg, nr_pages);
2954			break;
2955		}
2956		break;
2957	case RES_SOFT_LIMIT:
2958		memcg->soft_limit = nr_pages;
2959		ret = 0;
 
 
 
 
2960		break;
2961	}
2962	return ret ?: nbytes;
2963}
2964
2965static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf,
2966				size_t nbytes, loff_t off)
2967{
2968	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
2969	struct page_counter *counter;
2970
2971	switch (MEMFILE_TYPE(of_cft(of)->private)) {
2972	case _MEM:
2973		counter = &memcg->memory;
2974		break;
2975	case _MEMSWAP:
2976		counter = &memcg->memsw;
2977		break;
2978	case _KMEM:
2979		counter = &memcg->kmem;
2980		break;
2981	case _TCP:
2982		counter = &memcg->tcpmem;
2983		break;
2984	default:
2985		BUG();
2986	}
2987
2988	switch (MEMFILE_ATTR(of_cft(of)->private)) {
2989	case RES_MAX_USAGE:
2990		page_counter_reset_watermark(counter);
2991		break;
2992	case RES_FAILCNT:
2993		counter->failcnt = 0;
2994		break;
2995	default:
2996		BUG();
2997	}
2998
2999	return nbytes;
3000}
3001
3002static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css,
3003					struct cftype *cft)
3004{
3005	return mem_cgroup_from_css(css)->move_charge_at_immigrate;
3006}
3007
3008#ifdef CONFIG_MMU
3009static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3010					struct cftype *cft, u64 val)
3011{
3012	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3013
 
 
 
 
3014	if (val & ~MOVE_MASK)
3015		return -EINVAL;
3016
3017	/*
3018	 * No kind of locking is needed in here, because ->can_attach() will
3019	 * check this value once in the beginning of the process, and then carry
3020	 * on with stale data. This means that changes to this value will only
3021	 * affect task migrations starting after the change.
3022	 */
3023	memcg->move_charge_at_immigrate = val;
3024	return 0;
3025}
3026#else
3027static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3028					struct cftype *cft, u64 val)
3029{
3030	return -ENOSYS;
3031}
3032#endif
3033
3034#ifdef CONFIG_NUMA
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3035static int memcg_numa_stat_show(struct seq_file *m, void *v)
3036{
3037	struct numa_stat {
3038		const char *name;
3039		unsigned int lru_mask;
3040	};
3041
3042	static const struct numa_stat stats[] = {
3043		{ "total", LRU_ALL },
3044		{ "file", LRU_ALL_FILE },
3045		{ "anon", LRU_ALL_ANON },
3046		{ "unevictable", BIT(LRU_UNEVICTABLE) },
3047	};
3048	const struct numa_stat *stat;
3049	int nid;
3050	unsigned long nr;
3051	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
 
3052
3053	for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3054		nr = mem_cgroup_nr_lru_pages(memcg, stat->lru_mask);
3055		seq_printf(m, "%s=%lu", stat->name, nr);
3056		for_each_node_state(nid, N_MEMORY) {
3057			nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
3058							  stat->lru_mask);
3059			seq_printf(m, " N%d=%lu", nid, nr);
3060		}
3061		seq_putc(m, '\n');
3062	}
3063
3064	for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3065		struct mem_cgroup *iter;
3066
3067		nr = 0;
3068		for_each_mem_cgroup_tree(iter, memcg)
3069			nr += mem_cgroup_nr_lru_pages(iter, stat->lru_mask);
3070		seq_printf(m, "hierarchical_%s=%lu", stat->name, nr);
3071		for_each_node_state(nid, N_MEMORY) {
3072			nr = 0;
3073			for_each_mem_cgroup_tree(iter, memcg)
3074				nr += mem_cgroup_node_nr_lru_pages(
3075					iter, nid, stat->lru_mask);
3076			seq_printf(m, " N%d=%lu", nid, nr);
3077		}
3078		seq_putc(m, '\n');
3079	}
3080
3081	return 0;
3082}
3083#endif /* CONFIG_NUMA */
3084
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3085/* Universal VM events cgroup1 shows, original sort order */
3086unsigned int memcg1_events[] = {
3087	PGPGIN,
3088	PGPGOUT,
3089	PGFAULT,
3090	PGMAJFAULT,
3091};
3092
3093static const char *const memcg1_event_names[] = {
3094	"pgpgin",
3095	"pgpgout",
3096	"pgfault",
3097	"pgmajfault",
3098};
3099
3100static int memcg_stat_show(struct seq_file *m, void *v)
3101{
3102	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
3103	unsigned long memory, memsw;
3104	struct mem_cgroup *mi;
3105	unsigned int i;
3106
3107	BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names) != ARRAY_SIZE(memcg1_stats));
3108	BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS);
 
3109
3110	for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
3111		if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
3112			continue;
3113		seq_printf(m, "%s %lu\n", memcg1_stat_names[i],
3114			   memcg_page_state(memcg, memcg1_stats[i]) *
3115			   PAGE_SIZE);
3116	}
3117
3118	for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
3119		seq_printf(m, "%s %lu\n", memcg1_event_names[i],
3120			   memcg_sum_events(memcg, memcg1_events[i]));
3121
3122	for (i = 0; i < NR_LRU_LISTS; i++)
3123		seq_printf(m, "%s %lu\n", mem_cgroup_lru_names[i],
3124			   mem_cgroup_nr_lru_pages(memcg, BIT(i)) * PAGE_SIZE);
 
3125
3126	/* Hierarchical information */
3127	memory = memsw = PAGE_COUNTER_MAX;
3128	for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) {
3129		memory = min(memory, mi->memory.limit);
3130		memsw = min(memsw, mi->memsw.limit);
3131	}
3132	seq_printf(m, "hierarchical_memory_limit %llu\n",
3133		   (u64)memory * PAGE_SIZE);
3134	if (do_memsw_account())
3135		seq_printf(m, "hierarchical_memsw_limit %llu\n",
3136			   (u64)memsw * PAGE_SIZE);
3137
3138	for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
3139		unsigned long long val = 0;
3140
3141		if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
3142			continue;
3143		for_each_mem_cgroup_tree(mi, memcg)
3144			val += memcg_page_state(mi, memcg1_stats[i]) *
3145			PAGE_SIZE;
3146		seq_printf(m, "total_%s %llu\n", memcg1_stat_names[i], val);
3147	}
3148
3149	for (i = 0; i < ARRAY_SIZE(memcg1_events); i++) {
3150		unsigned long long val = 0;
3151
3152		for_each_mem_cgroup_tree(mi, memcg)
3153			val += memcg_sum_events(mi, memcg1_events[i]);
3154		seq_printf(m, "total_%s %llu\n", memcg1_event_names[i], val);
3155	}
3156
3157	for (i = 0; i < NR_LRU_LISTS; i++) {
3158		unsigned long long val = 0;
 
 
3159
3160		for_each_mem_cgroup_tree(mi, memcg)
3161			val += mem_cgroup_nr_lru_pages(mi, BIT(i)) * PAGE_SIZE;
3162		seq_printf(m, "total_%s %llu\n", mem_cgroup_lru_names[i], val);
3163	}
3164
3165#ifdef CONFIG_DEBUG_VM
3166	{
3167		pg_data_t *pgdat;
3168		struct mem_cgroup_per_node *mz;
3169		struct zone_reclaim_stat *rstat;
3170		unsigned long recent_rotated[2] = {0, 0};
3171		unsigned long recent_scanned[2] = {0, 0};
3172
3173		for_each_online_pgdat(pgdat) {
3174			mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
3175			rstat = &mz->lruvec.reclaim_stat;
3176
3177			recent_rotated[0] += rstat->recent_rotated[0];
3178			recent_rotated[1] += rstat->recent_rotated[1];
3179			recent_scanned[0] += rstat->recent_scanned[0];
3180			recent_scanned[1] += rstat->recent_scanned[1];
3181		}
3182		seq_printf(m, "recent_rotated_anon %lu\n", recent_rotated[0]);
3183		seq_printf(m, "recent_rotated_file %lu\n", recent_rotated[1]);
3184		seq_printf(m, "recent_scanned_anon %lu\n", recent_scanned[0]);
3185		seq_printf(m, "recent_scanned_file %lu\n", recent_scanned[1]);
3186	}
3187#endif
3188
3189	return 0;
3190}
3191
3192static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css,
3193				      struct cftype *cft)
3194{
3195	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3196
3197	return mem_cgroup_swappiness(memcg);
3198}
3199
3200static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
3201				       struct cftype *cft, u64 val)
3202{
3203	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3204
3205	if (val > 100)
3206		return -EINVAL;
3207
3208	if (css->parent)
3209		memcg->swappiness = val;
3210	else
3211		vm_swappiness = val;
3212
3213	return 0;
3214}
3215
3216static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
3217{
3218	struct mem_cgroup_threshold_ary *t;
3219	unsigned long usage;
3220	int i;
3221
3222	rcu_read_lock();
3223	if (!swap)
3224		t = rcu_dereference(memcg->thresholds.primary);
3225	else
3226		t = rcu_dereference(memcg->memsw_thresholds.primary);
3227
3228	if (!t)
3229		goto unlock;
3230
3231	usage = mem_cgroup_usage(memcg, swap);
3232
3233	/*
3234	 * current_threshold points to threshold just below or equal to usage.
3235	 * If it's not true, a threshold was crossed after last
3236	 * call of __mem_cgroup_threshold().
3237	 */
3238	i = t->current_threshold;
3239
3240	/*
3241	 * Iterate backward over array of thresholds starting from
3242	 * current_threshold and check if a threshold is crossed.
3243	 * If none of thresholds below usage is crossed, we read
3244	 * only one element of the array here.
3245	 */
3246	for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
3247		eventfd_signal(t->entries[i].eventfd, 1);
3248
3249	/* i = current_threshold + 1 */
3250	i++;
3251
3252	/*
3253	 * Iterate forward over array of thresholds starting from
3254	 * current_threshold+1 and check if a threshold is crossed.
3255	 * If none of thresholds above usage is crossed, we read
3256	 * only one element of the array here.
3257	 */
3258	for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
3259		eventfd_signal(t->entries[i].eventfd, 1);
3260
3261	/* Update current_threshold */
3262	t->current_threshold = i - 1;
3263unlock:
3264	rcu_read_unlock();
3265}
3266
3267static void mem_cgroup_threshold(struct mem_cgroup *memcg)
3268{
3269	while (memcg) {
3270		__mem_cgroup_threshold(memcg, false);
3271		if (do_memsw_account())
3272			__mem_cgroup_threshold(memcg, true);
3273
3274		memcg = parent_mem_cgroup(memcg);
3275	}
3276}
3277
3278static int compare_thresholds(const void *a, const void *b)
3279{
3280	const struct mem_cgroup_threshold *_a = a;
3281	const struct mem_cgroup_threshold *_b = b;
3282
3283	if (_a->threshold > _b->threshold)
3284		return 1;
3285
3286	if (_a->threshold < _b->threshold)
3287		return -1;
3288
3289	return 0;
3290}
3291
3292static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
3293{
3294	struct mem_cgroup_eventfd_list *ev;
3295
3296	spin_lock(&memcg_oom_lock);
3297
3298	list_for_each_entry(ev, &memcg->oom_notify, list)
3299		eventfd_signal(ev->eventfd, 1);
3300
3301	spin_unlock(&memcg_oom_lock);
3302	return 0;
3303}
3304
3305static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
3306{
3307	struct mem_cgroup *iter;
3308
3309	for_each_mem_cgroup_tree(iter, memcg)
3310		mem_cgroup_oom_notify_cb(iter);
3311}
3312
3313static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
3314	struct eventfd_ctx *eventfd, const char *args, enum res_type type)
3315{
3316	struct mem_cgroup_thresholds *thresholds;
3317	struct mem_cgroup_threshold_ary *new;
3318	unsigned long threshold;
3319	unsigned long usage;
3320	int i, size, ret;
3321
3322	ret = page_counter_memparse(args, "-1", &threshold);
3323	if (ret)
3324		return ret;
3325
3326	mutex_lock(&memcg->thresholds_lock);
3327
3328	if (type == _MEM) {
3329		thresholds = &memcg->thresholds;
3330		usage = mem_cgroup_usage(memcg, false);
3331	} else if (type == _MEMSWAP) {
3332		thresholds = &memcg->memsw_thresholds;
3333		usage = mem_cgroup_usage(memcg, true);
3334	} else
3335		BUG();
3336
3337	/* Check if a threshold crossed before adding a new one */
3338	if (thresholds->primary)
3339		__mem_cgroup_threshold(memcg, type == _MEMSWAP);
3340
3341	size = thresholds->primary ? thresholds->primary->size + 1 : 1;
3342
3343	/* Allocate memory for new array of thresholds */
3344	new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold),
3345			GFP_KERNEL);
3346	if (!new) {
3347		ret = -ENOMEM;
3348		goto unlock;
3349	}
3350	new->size = size;
3351
3352	/* Copy thresholds (if any) to new array */
3353	if (thresholds->primary) {
3354		memcpy(new->entries, thresholds->primary->entries, (size - 1) *
3355				sizeof(struct mem_cgroup_threshold));
3356	}
3357
3358	/* Add new threshold */
3359	new->entries[size - 1].eventfd = eventfd;
3360	new->entries[size - 1].threshold = threshold;
3361
3362	/* Sort thresholds. Registering of new threshold isn't time-critical */
3363	sort(new->entries, size, sizeof(struct mem_cgroup_threshold),
3364			compare_thresholds, NULL);
3365
3366	/* Find current threshold */
3367	new->current_threshold = -1;
3368	for (i = 0; i < size; i++) {
3369		if (new->entries[i].threshold <= usage) {
3370			/*
3371			 * new->current_threshold will not be used until
3372			 * rcu_assign_pointer(), so it's safe to increment
3373			 * it here.
3374			 */
3375			++new->current_threshold;
3376		} else
3377			break;
3378	}
3379
3380	/* Free old spare buffer and save old primary buffer as spare */
3381	kfree(thresholds->spare);
3382	thresholds->spare = thresholds->primary;
3383
3384	rcu_assign_pointer(thresholds->primary, new);
3385
3386	/* To be sure that nobody uses thresholds */
3387	synchronize_rcu();
3388
3389unlock:
3390	mutex_unlock(&memcg->thresholds_lock);
3391
3392	return ret;
3393}
3394
3395static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
3396	struct eventfd_ctx *eventfd, const char *args)
3397{
3398	return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM);
3399}
3400
3401static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg,
3402	struct eventfd_ctx *eventfd, const char *args)
3403{
3404	return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP);
3405}
3406
3407static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
3408	struct eventfd_ctx *eventfd, enum res_type type)
3409{
3410	struct mem_cgroup_thresholds *thresholds;
3411	struct mem_cgroup_threshold_ary *new;
3412	unsigned long usage;
3413	int i, j, size;
3414
3415	mutex_lock(&memcg->thresholds_lock);
3416
3417	if (type == _MEM) {
3418		thresholds = &memcg->thresholds;
3419		usage = mem_cgroup_usage(memcg, false);
3420	} else if (type == _MEMSWAP) {
3421		thresholds = &memcg->memsw_thresholds;
3422		usage = mem_cgroup_usage(memcg, true);
3423	} else
3424		BUG();
3425
3426	if (!thresholds->primary)
3427		goto unlock;
3428
3429	/* Check if a threshold crossed before removing */
3430	__mem_cgroup_threshold(memcg, type == _MEMSWAP);
3431
3432	/* Calculate new number of threshold */
3433	size = 0;
3434	for (i = 0; i < thresholds->primary->size; i++) {
3435		if (thresholds->primary->entries[i].eventfd != eventfd)
3436			size++;
 
 
3437	}
3438
3439	new = thresholds->spare;
3440
 
 
 
 
3441	/* Set thresholds array to NULL if we don't have thresholds */
3442	if (!size) {
3443		kfree(new);
3444		new = NULL;
3445		goto swap_buffers;
3446	}
3447
3448	new->size = size;
3449
3450	/* Copy thresholds and find current threshold */
3451	new->current_threshold = -1;
3452	for (i = 0, j = 0; i < thresholds->primary->size; i++) {
3453		if (thresholds->primary->entries[i].eventfd == eventfd)
3454			continue;
3455
3456		new->entries[j] = thresholds->primary->entries[i];
3457		if (new->entries[j].threshold <= usage) {
3458			/*
3459			 * new->current_threshold will not be used
3460			 * until rcu_assign_pointer(), so it's safe to increment
3461			 * it here.
3462			 */
3463			++new->current_threshold;
3464		}
3465		j++;
3466	}
3467
3468swap_buffers:
3469	/* Swap primary and spare array */
3470	thresholds->spare = thresholds->primary;
3471
3472	rcu_assign_pointer(thresholds->primary, new);
3473
3474	/* To be sure that nobody uses thresholds */
3475	synchronize_rcu();
3476
3477	/* If all events are unregistered, free the spare array */
3478	if (!new) {
3479		kfree(thresholds->spare);
3480		thresholds->spare = NULL;
3481	}
3482unlock:
3483	mutex_unlock(&memcg->thresholds_lock);
3484}
3485
3486static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
3487	struct eventfd_ctx *eventfd)
3488{
3489	return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM);
3490}
3491
3492static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
3493	struct eventfd_ctx *eventfd)
3494{
3495	return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP);
3496}
3497
3498static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg,
3499	struct eventfd_ctx *eventfd, const char *args)
3500{
3501	struct mem_cgroup_eventfd_list *event;
3502
3503	event = kmalloc(sizeof(*event),	GFP_KERNEL);
3504	if (!event)
3505		return -ENOMEM;
3506
3507	spin_lock(&memcg_oom_lock);
3508
3509	event->eventfd = eventfd;
3510	list_add(&event->list, &memcg->oom_notify);
3511
3512	/* already in OOM ? */
3513	if (memcg->under_oom)
3514		eventfd_signal(eventfd, 1);
3515	spin_unlock(&memcg_oom_lock);
3516
3517	return 0;
3518}
3519
3520static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg,
3521	struct eventfd_ctx *eventfd)
3522{
3523	struct mem_cgroup_eventfd_list *ev, *tmp;
3524
3525	spin_lock(&memcg_oom_lock);
3526
3527	list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
3528		if (ev->eventfd == eventfd) {
3529			list_del(&ev->list);
3530			kfree(ev);
3531		}
3532	}
3533
3534	spin_unlock(&memcg_oom_lock);
3535}
3536
3537static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v)
3538{
3539	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf));
3540
3541	seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable);
3542	seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom);
3543	seq_printf(sf, "oom_kill %lu\n", memcg_sum_events(memcg, OOM_KILL));
 
3544	return 0;
3545}
3546
3547static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
3548	struct cftype *cft, u64 val)
3549{
3550	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3551
3552	/* cannot set to root cgroup and only 0 and 1 are allowed */
3553	if (!css->parent || !((val == 0) || (val == 1)))
3554		return -EINVAL;
3555
3556	memcg->oom_kill_disable = val;
3557	if (!val)
3558		memcg_oom_recover(memcg);
3559
3560	return 0;
3561}
3562
3563#ifdef CONFIG_CGROUP_WRITEBACK
3564
3565struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg)
3566{
3567	return &memcg->cgwb_list;
3568}
3569
3570static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
3571{
3572	return wb_domain_init(&memcg->cgwb_domain, gfp);
3573}
3574
3575static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
3576{
3577	wb_domain_exit(&memcg->cgwb_domain);
3578}
3579
3580static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
3581{
3582	wb_domain_size_changed(&memcg->cgwb_domain);
3583}
3584
3585struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
3586{
3587	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3588
3589	if (!memcg->css.parent)
3590		return NULL;
3591
3592	return &memcg->cgwb_domain;
3593}
3594
3595/**
3596 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
3597 * @wb: bdi_writeback in question
3598 * @pfilepages: out parameter for number of file pages
3599 * @pheadroom: out parameter for number of allocatable pages according to memcg
3600 * @pdirty: out parameter for number of dirty pages
3601 * @pwriteback: out parameter for number of pages under writeback
3602 *
3603 * Determine the numbers of file, headroom, dirty, and writeback pages in
3604 * @wb's memcg.  File, dirty and writeback are self-explanatory.  Headroom
3605 * is a bit more involved.
3606 *
3607 * A memcg's headroom is "min(max, high) - used".  In the hierarchy, the
3608 * headroom is calculated as the lowest headroom of itself and the
3609 * ancestors.  Note that this doesn't consider the actual amount of
3610 * available memory in the system.  The caller should further cap
3611 * *@pheadroom accordingly.
3612 */
3613void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
3614			 unsigned long *pheadroom, unsigned long *pdirty,
3615			 unsigned long *pwriteback)
3616{
3617	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3618	struct mem_cgroup *parent;
3619
3620	*pdirty = memcg_page_state(memcg, NR_FILE_DIRTY);
3621
3622	/* this should eventually include NR_UNSTABLE_NFS */
3623	*pwriteback = memcg_page_state(memcg, NR_WRITEBACK);
3624	*pfilepages = mem_cgroup_nr_lru_pages(memcg, (1 << LRU_INACTIVE_FILE) |
3625						     (1 << LRU_ACTIVE_FILE));
3626	*pheadroom = PAGE_COUNTER_MAX;
3627
 
3628	while ((parent = parent_mem_cgroup(memcg))) {
3629		unsigned long ceiling = min(memcg->memory.limit, memcg->high);
 
3630		unsigned long used = page_counter_read(&memcg->memory);
3631
3632		*pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
3633		memcg = parent;
3634	}
3635}
3636
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3637#else	/* CONFIG_CGROUP_WRITEBACK */
3638
3639static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
3640{
3641	return 0;
3642}
3643
3644static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
3645{
3646}
3647
3648static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
3649{
3650}
3651
3652#endif	/* CONFIG_CGROUP_WRITEBACK */
3653
3654/*
3655 * DO NOT USE IN NEW FILES.
3656 *
3657 * "cgroup.event_control" implementation.
3658 *
3659 * This is way over-engineered.  It tries to support fully configurable
3660 * events for each user.  Such level of flexibility is completely
3661 * unnecessary especially in the light of the planned unified hierarchy.
3662 *
3663 * Please deprecate this and replace with something simpler if at all
3664 * possible.
3665 */
3666
3667/*
3668 * Unregister event and free resources.
3669 *
3670 * Gets called from workqueue.
3671 */
3672static void memcg_event_remove(struct work_struct *work)
3673{
3674	struct mem_cgroup_event *event =
3675		container_of(work, struct mem_cgroup_event, remove);
3676	struct mem_cgroup *memcg = event->memcg;
3677
3678	remove_wait_queue(event->wqh, &event->wait);
3679
3680	event->unregister_event(memcg, event->eventfd);
3681
3682	/* Notify userspace the event is going away. */
3683	eventfd_signal(event->eventfd, 1);
3684
3685	eventfd_ctx_put(event->eventfd);
3686	kfree(event);
3687	css_put(&memcg->css);
3688}
3689
3690/*
3691 * Gets called on EPOLLHUP on eventfd when user closes it.
3692 *
3693 * Called with wqh->lock held and interrupts disabled.
3694 */
3695static int memcg_event_wake(wait_queue_entry_t *wait, unsigned mode,
3696			    int sync, void *key)
3697{
3698	struct mem_cgroup_event *event =
3699		container_of(wait, struct mem_cgroup_event, wait);
3700	struct mem_cgroup *memcg = event->memcg;
3701	__poll_t flags = key_to_poll(key);
3702
3703	if (flags & EPOLLHUP) {
3704		/*
3705		 * If the event has been detached at cgroup removal, we
3706		 * can simply return knowing the other side will cleanup
3707		 * for us.
3708		 *
3709		 * We can't race against event freeing since the other
3710		 * side will require wqh->lock via remove_wait_queue(),
3711		 * which we hold.
3712		 */
3713		spin_lock(&memcg->event_list_lock);
3714		if (!list_empty(&event->list)) {
3715			list_del_init(&event->list);
3716			/*
3717			 * We are in atomic context, but cgroup_event_remove()
3718			 * may sleep, so we have to call it in workqueue.
3719			 */
3720			schedule_work(&event->remove);
3721		}
3722		spin_unlock(&memcg->event_list_lock);
3723	}
3724
3725	return 0;
3726}
3727
3728static void memcg_event_ptable_queue_proc(struct file *file,
3729		wait_queue_head_t *wqh, poll_table *pt)
3730{
3731	struct mem_cgroup_event *event =
3732		container_of(pt, struct mem_cgroup_event, pt);
3733
3734	event->wqh = wqh;
3735	add_wait_queue(wqh, &event->wait);
3736}
3737
3738/*
3739 * DO NOT USE IN NEW FILES.
3740 *
3741 * Parse input and register new cgroup event handler.
3742 *
3743 * Input must be in format '<event_fd> <control_fd> <args>'.
3744 * Interpretation of args is defined by control file implementation.
3745 */
3746static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
3747					 char *buf, size_t nbytes, loff_t off)
3748{
3749	struct cgroup_subsys_state *css = of_css(of);
3750	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3751	struct mem_cgroup_event *event;
3752	struct cgroup_subsys_state *cfile_css;
3753	unsigned int efd, cfd;
3754	struct fd efile;
3755	struct fd cfile;
 
3756	const char *name;
3757	char *endp;
3758	int ret;
3759
 
 
 
3760	buf = strstrip(buf);
3761
3762	efd = simple_strtoul(buf, &endp, 10);
3763	if (*endp != ' ')
3764		return -EINVAL;
3765	buf = endp + 1;
3766
3767	cfd = simple_strtoul(buf, &endp, 10);
3768	if ((*endp != ' ') && (*endp != '\0'))
3769		return -EINVAL;
3770	buf = endp + 1;
3771
3772	event = kzalloc(sizeof(*event), GFP_KERNEL);
3773	if (!event)
3774		return -ENOMEM;
3775
3776	event->memcg = memcg;
3777	INIT_LIST_HEAD(&event->list);
3778	init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc);
3779	init_waitqueue_func_entry(&event->wait, memcg_event_wake);
3780	INIT_WORK(&event->remove, memcg_event_remove);
3781
3782	efile = fdget(efd);
3783	if (!efile.file) {
3784		ret = -EBADF;
3785		goto out_kfree;
3786	}
3787
3788	event->eventfd = eventfd_ctx_fileget(efile.file);
3789	if (IS_ERR(event->eventfd)) {
3790		ret = PTR_ERR(event->eventfd);
3791		goto out_put_efile;
3792	}
3793
3794	cfile = fdget(cfd);
3795	if (!cfile.file) {
3796		ret = -EBADF;
3797		goto out_put_eventfd;
3798	}
3799
3800	/* the process need read permission on control file */
3801	/* AV: shouldn't we check that it's been opened for read instead? */
3802	ret = inode_permission(file_inode(cfile.file), MAY_READ);
3803	if (ret < 0)
3804		goto out_put_cfile;
3805
3806	/*
 
 
 
 
 
 
 
 
 
 
3807	 * Determine the event callbacks and set them in @event.  This used
3808	 * to be done via struct cftype but cgroup core no longer knows
3809	 * about these events.  The following is crude but the whole thing
3810	 * is for compatibility anyway.
3811	 *
3812	 * DO NOT ADD NEW FILES.
3813	 */
3814	name = cfile.file->f_path.dentry->d_name.name;
3815
3816	if (!strcmp(name, "memory.usage_in_bytes")) {
3817		event->register_event = mem_cgroup_usage_register_event;
3818		event->unregister_event = mem_cgroup_usage_unregister_event;
3819	} else if (!strcmp(name, "memory.oom_control")) {
3820		event->register_event = mem_cgroup_oom_register_event;
3821		event->unregister_event = mem_cgroup_oom_unregister_event;
3822	} else if (!strcmp(name, "memory.pressure_level")) {
3823		event->register_event = vmpressure_register_event;
3824		event->unregister_event = vmpressure_unregister_event;
3825	} else if (!strcmp(name, "memory.memsw.usage_in_bytes")) {
3826		event->register_event = memsw_cgroup_usage_register_event;
3827		event->unregister_event = memsw_cgroup_usage_unregister_event;
3828	} else {
3829		ret = -EINVAL;
3830		goto out_put_cfile;
3831	}
3832
3833	/*
3834	 * Verify @cfile should belong to @css.  Also, remaining events are
3835	 * automatically removed on cgroup destruction but the removal is
3836	 * asynchronous, so take an extra ref on @css.
3837	 */
3838	cfile_css = css_tryget_online_from_dir(cfile.file->f_path.dentry->d_parent,
3839					       &memory_cgrp_subsys);
3840	ret = -EINVAL;
3841	if (IS_ERR(cfile_css))
3842		goto out_put_cfile;
3843	if (cfile_css != css) {
3844		css_put(cfile_css);
3845		goto out_put_cfile;
3846	}
3847
3848	ret = event->register_event(memcg, event->eventfd, buf);
3849	if (ret)
3850		goto out_put_css;
3851
3852	efile.file->f_op->poll(efile.file, &event->pt);
3853
3854	spin_lock(&memcg->event_list_lock);
3855	list_add(&event->list, &memcg->event_list);
3856	spin_unlock(&memcg->event_list_lock);
3857
3858	fdput(cfile);
3859	fdput(efile);
3860
3861	return nbytes;
3862
3863out_put_css:
3864	css_put(css);
3865out_put_cfile:
3866	fdput(cfile);
3867out_put_eventfd:
3868	eventfd_ctx_put(event->eventfd);
3869out_put_efile:
3870	fdput(efile);
3871out_kfree:
3872	kfree(event);
3873
3874	return ret;
3875}
3876
 
 
 
 
 
 
 
 
 
 
 
 
 
3877static struct cftype mem_cgroup_legacy_files[] = {
3878	{
3879		.name = "usage_in_bytes",
3880		.private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
3881		.read_u64 = mem_cgroup_read_u64,
3882	},
3883	{
3884		.name = "max_usage_in_bytes",
3885		.private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
3886		.write = mem_cgroup_reset,
3887		.read_u64 = mem_cgroup_read_u64,
3888	},
3889	{
3890		.name = "limit_in_bytes",
3891		.private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
3892		.write = mem_cgroup_write,
3893		.read_u64 = mem_cgroup_read_u64,
3894	},
3895	{
3896		.name = "soft_limit_in_bytes",
3897		.private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
3898		.write = mem_cgroup_write,
3899		.read_u64 = mem_cgroup_read_u64,
3900	},
3901	{
3902		.name = "failcnt",
3903		.private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
3904		.write = mem_cgroup_reset,
3905		.read_u64 = mem_cgroup_read_u64,
3906	},
3907	{
3908		.name = "stat",
3909		.seq_show = memcg_stat_show,
3910	},
3911	{
3912		.name = "force_empty",
3913		.write = mem_cgroup_force_empty_write,
3914	},
3915	{
3916		.name = "use_hierarchy",
3917		.write_u64 = mem_cgroup_hierarchy_write,
3918		.read_u64 = mem_cgroup_hierarchy_read,
3919	},
3920	{
3921		.name = "cgroup.event_control",		/* XXX: for compat */
3922		.write = memcg_write_event_control,
3923		.flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE,
3924	},
3925	{
3926		.name = "swappiness",
3927		.read_u64 = mem_cgroup_swappiness_read,
3928		.write_u64 = mem_cgroup_swappiness_write,
3929	},
3930	{
3931		.name = "move_charge_at_immigrate",
3932		.read_u64 = mem_cgroup_move_charge_read,
3933		.write_u64 = mem_cgroup_move_charge_write,
3934	},
3935	{
3936		.name = "oom_control",
3937		.seq_show = mem_cgroup_oom_control_read,
3938		.write_u64 = mem_cgroup_oom_control_write,
3939		.private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
3940	},
3941	{
3942		.name = "pressure_level",
 
3943	},
3944#ifdef CONFIG_NUMA
3945	{
3946		.name = "numa_stat",
3947		.seq_show = memcg_numa_stat_show,
3948	},
3949#endif
3950	{
3951		.name = "kmem.limit_in_bytes",
3952		.private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
3953		.write = mem_cgroup_write,
3954		.read_u64 = mem_cgroup_read_u64,
3955	},
3956	{
3957		.name = "kmem.usage_in_bytes",
3958		.private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
3959		.read_u64 = mem_cgroup_read_u64,
3960	},
3961	{
3962		.name = "kmem.failcnt",
3963		.private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT),
3964		.write = mem_cgroup_reset,
3965		.read_u64 = mem_cgroup_read_u64,
3966	},
3967	{
3968		.name = "kmem.max_usage_in_bytes",
3969		.private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE),
3970		.write = mem_cgroup_reset,
3971		.read_u64 = mem_cgroup_read_u64,
3972	},
3973#if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
3974	{
3975		.name = "kmem.slabinfo",
3976		.seq_start = memcg_slab_start,
3977		.seq_next = memcg_slab_next,
3978		.seq_stop = memcg_slab_stop,
3979		.seq_show = memcg_slab_show,
3980	},
3981#endif
3982	{
3983		.name = "kmem.tcp.limit_in_bytes",
3984		.private = MEMFILE_PRIVATE(_TCP, RES_LIMIT),
3985		.write = mem_cgroup_write,
3986		.read_u64 = mem_cgroup_read_u64,
3987	},
3988	{
3989		.name = "kmem.tcp.usage_in_bytes",
3990		.private = MEMFILE_PRIVATE(_TCP, RES_USAGE),
3991		.read_u64 = mem_cgroup_read_u64,
3992	},
3993	{
3994		.name = "kmem.tcp.failcnt",
3995		.private = MEMFILE_PRIVATE(_TCP, RES_FAILCNT),
3996		.write = mem_cgroup_reset,
3997		.read_u64 = mem_cgroup_read_u64,
3998	},
3999	{
4000		.name = "kmem.tcp.max_usage_in_bytes",
4001		.private = MEMFILE_PRIVATE(_TCP, RES_MAX_USAGE),
4002		.write = mem_cgroup_reset,
4003		.read_u64 = mem_cgroup_read_u64,
4004	},
4005	{ },	/* terminate */
4006};
4007
4008/*
4009 * Private memory cgroup IDR
4010 *
4011 * Swap-out records and page cache shadow entries need to store memcg
4012 * references in constrained space, so we maintain an ID space that is
4013 * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of
4014 * memory-controlled cgroups to 64k.
4015 *
4016 * However, there usually are many references to the oflline CSS after
4017 * the cgroup has been destroyed, such as page cache or reclaimable
4018 * slab objects, that don't need to hang on to the ID. We want to keep
4019 * those dead CSS from occupying IDs, or we might quickly exhaust the
4020 * relatively small ID space and prevent the creation of new cgroups
4021 * even when there are much fewer than 64k cgroups - possibly none.
4022 *
4023 * Maintain a private 16-bit ID space for memcg, and allow the ID to
4024 * be freed and recycled when it's no longer needed, which is usually
4025 * when the CSS is offlined.
4026 *
4027 * The only exception to that are records of swapped out tmpfs/shmem
4028 * pages that need to be attributed to live ancestors on swapin. But
4029 * those references are manageable from userspace.
4030 */
4031
 
4032static DEFINE_IDR(mem_cgroup_idr);
4033
4034static void mem_cgroup_id_get_many(struct mem_cgroup *memcg, unsigned int n)
 
 
 
 
 
 
 
 
 
4035{
4036	VM_BUG_ON(atomic_read(&memcg->id.ref) <= 0);
4037	atomic_add(n, &memcg->id.ref);
4038}
4039
4040static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
4041{
4042	VM_BUG_ON(atomic_read(&memcg->id.ref) < n);
4043	if (atomic_sub_and_test(n, &memcg->id.ref)) {
4044		idr_remove(&mem_cgroup_idr, memcg->id.id);
4045		memcg->id.id = 0;
4046
4047		/* Memcg ID pins CSS */
4048		css_put(&memcg->css);
4049	}
4050}
4051
4052static inline void mem_cgroup_id_get(struct mem_cgroup *memcg)
4053{
4054	mem_cgroup_id_get_many(memcg, 1);
4055}
4056
4057static inline void mem_cgroup_id_put(struct mem_cgroup *memcg)
4058{
4059	mem_cgroup_id_put_many(memcg, 1);
4060}
4061
4062/**
4063 * mem_cgroup_from_id - look up a memcg from a memcg id
4064 * @id: the memcg id to look up
4065 *
4066 * Caller must hold rcu_read_lock().
4067 */
4068struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
4069{
4070	WARN_ON_ONCE(!rcu_read_lock_held());
4071	return idr_find(&mem_cgroup_idr, id);
4072}
4073
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4074static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
4075{
4076	struct mem_cgroup_per_node *pn;
4077	int tmp = node;
4078	/*
4079	 * This routine is called against possible nodes.
4080	 * But it's BUG to call kmalloc() against offline node.
4081	 *
4082	 * TODO: this routine can waste much memory for nodes which will
4083	 *       never be onlined. It's better to use memory hotplug callback
4084	 *       function.
4085	 */
4086	if (!node_state(node, N_NORMAL_MEMORY))
4087		tmp = -1;
4088	pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
4089	if (!pn)
4090		return 1;
4091
4092	pn->lruvec_stat_cpu = alloc_percpu(struct lruvec_stat);
4093	if (!pn->lruvec_stat_cpu) {
 
4094		kfree(pn);
4095		return 1;
4096	}
4097
4098	lruvec_init(&pn->lruvec);
4099	pn->usage_in_excess = 0;
4100	pn->on_tree = false;
4101	pn->memcg = memcg;
4102
4103	memcg->nodeinfo[node] = pn;
4104	return 0;
4105}
4106
4107static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
4108{
4109	struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
4110
4111	if (!pn)
4112		return;
4113
4114	free_percpu(pn->lruvec_stat_cpu);
4115	kfree(pn);
4116}
4117
4118static void __mem_cgroup_free(struct mem_cgroup *memcg)
4119{
4120	int node;
4121
 
 
 
4122	for_each_node(node)
4123		free_mem_cgroup_per_node_info(memcg, node);
4124	free_percpu(memcg->stat_cpu);
 
4125	kfree(memcg);
4126}
4127
4128static void mem_cgroup_free(struct mem_cgroup *memcg)
4129{
 
4130	memcg_wb_domain_exit(memcg);
4131	__mem_cgroup_free(memcg);
4132}
4133
4134static struct mem_cgroup *mem_cgroup_alloc(void)
4135{
 
4136	struct mem_cgroup *memcg;
4137	size_t size;
4138	int node;
4139
4140	size = sizeof(struct mem_cgroup);
4141	size += nr_node_ids * sizeof(struct mem_cgroup_per_node *);
4142
4143	memcg = kzalloc(size, GFP_KERNEL);
4144	if (!memcg)
4145		return NULL;
4146
4147	memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL,
4148				 1, MEM_CGROUP_ID_MAX,
4149				 GFP_KERNEL);
4150	if (memcg->id.id < 0)
 
 
 
 
 
4151		goto fail;
4152
4153	memcg->stat_cpu = alloc_percpu(struct mem_cgroup_stat_cpu);
4154	if (!memcg->stat_cpu)
 
4155		goto fail;
4156
 
 
 
 
 
 
 
 
4157	for_each_node(node)
4158		if (alloc_mem_cgroup_per_node_info(memcg, node))
4159			goto fail;
4160
4161	if (memcg_wb_domain_init(memcg, GFP_KERNEL))
4162		goto fail;
4163
4164	INIT_WORK(&memcg->high_work, high_work_func);
4165	memcg->last_scanned_node = MAX_NUMNODES;
4166	INIT_LIST_HEAD(&memcg->oom_notify);
4167	mutex_init(&memcg->thresholds_lock);
4168	spin_lock_init(&memcg->move_lock);
4169	vmpressure_init(&memcg->vmpressure);
4170	INIT_LIST_HEAD(&memcg->event_list);
4171	spin_lock_init(&memcg->event_list_lock);
4172	memcg->socket_pressure = jiffies;
4173#ifndef CONFIG_SLOB
4174	memcg->kmemcg_id = -1;
 
4175#endif
4176#ifdef CONFIG_CGROUP_WRITEBACK
4177	INIT_LIST_HEAD(&memcg->cgwb_list);
 
 
 
4178#endif
4179	idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
 
 
 
 
 
4180	return memcg;
4181fail:
4182	if (memcg->id.id > 0)
4183		idr_remove(&mem_cgroup_idr, memcg->id.id);
4184	__mem_cgroup_free(memcg);
4185	return NULL;
4186}
4187
4188static struct cgroup_subsys_state * __ref
4189mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
4190{
4191	struct mem_cgroup *parent = mem_cgroup_from_css(parent_css);
4192	struct mem_cgroup *memcg;
4193	long error = -ENOMEM;
4194
4195	memcg = mem_cgroup_alloc();
4196	if (!memcg)
4197		return ERR_PTR(error);
4198
4199	memcg->high = PAGE_COUNTER_MAX;
4200	memcg->soft_limit = PAGE_COUNTER_MAX;
 
 
 
 
 
 
 
 
 
 
 
 
4201	if (parent) {
4202		memcg->swappiness = mem_cgroup_swappiness(parent);
4203		memcg->oom_kill_disable = parent->oom_kill_disable;
4204	}
4205	if (parent && parent->use_hierarchy) {
4206		memcg->use_hierarchy = true;
4207		page_counter_init(&memcg->memory, &parent->memory);
4208		page_counter_init(&memcg->swap, &parent->swap);
4209		page_counter_init(&memcg->memsw, &parent->memsw);
4210		page_counter_init(&memcg->kmem, &parent->kmem);
4211		page_counter_init(&memcg->tcpmem, &parent->tcpmem);
4212	} else {
 
4213		page_counter_init(&memcg->memory, NULL);
4214		page_counter_init(&memcg->swap, NULL);
4215		page_counter_init(&memcg->memsw, NULL);
4216		page_counter_init(&memcg->kmem, NULL);
4217		page_counter_init(&memcg->tcpmem, NULL);
4218		/*
4219		 * Deeper hierachy with use_hierarchy == false doesn't make
4220		 * much sense so let cgroup subsystem know about this
4221		 * unfortunate state in our controller.
4222		 */
4223		if (parent != root_mem_cgroup)
4224			memory_cgrp_subsys.broken_hierarchy = true;
4225	}
4226
4227	/* The following stuff does not apply to the root */
4228	if (!parent) {
4229		root_mem_cgroup = memcg;
4230		return &memcg->css;
4231	}
4232
4233	error = memcg_online_kmem(memcg);
4234	if (error)
4235		goto fail;
4236
4237	if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
4238		static_branch_inc(&memcg_sockets_enabled_key);
4239
 
 
 
 
 
4240	return &memcg->css;
4241fail:
4242	mem_cgroup_free(memcg);
4243	return ERR_PTR(-ENOMEM);
4244}
4245
4246static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
4247{
4248	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4249
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4250	/* Online state pins memcg ID, memcg ID pins CSS */
4251	atomic_set(&memcg->id.ref, 1);
4252	css_get(css);
 
 
 
 
 
 
 
 
 
 
 
 
 
4253	return 0;
 
 
 
 
 
4254}
4255
4256static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
4257{
4258	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4259	struct mem_cgroup_event *event, *tmp;
4260
4261	/*
4262	 * Unregister events and notify userspace.
4263	 * Notify userspace about cgroup removing only after rmdir of cgroup
4264	 * directory to avoid race between userspace and kernelspace.
4265	 */
4266	spin_lock(&memcg->event_list_lock);
4267	list_for_each_entry_safe(event, tmp, &memcg->event_list, list) {
4268		list_del_init(&event->list);
4269		schedule_work(&event->remove);
4270	}
4271	spin_unlock(&memcg->event_list_lock);
 
 
 
4272
4273	memcg->low = 0;
4274
4275	memcg_offline_kmem(memcg);
 
4276	wb_memcg_offline(memcg);
 
 
 
4277
4278	mem_cgroup_id_put(memcg);
4279}
4280
4281static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
4282{
4283	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4284
4285	invalidate_reclaim_iterators(memcg);
 
4286}
4287
4288static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
4289{
4290	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
 
4291
 
 
 
 
4292	if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
4293		static_branch_dec(&memcg_sockets_enabled_key);
4294
4295	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active)
4296		static_branch_dec(&memcg_sockets_enabled_key);
4297
 
 
 
 
 
4298	vmpressure_cleanup(&memcg->vmpressure);
4299	cancel_work_sync(&memcg->high_work);
4300	mem_cgroup_remove_from_trees(memcg);
4301	memcg_free_kmem(memcg);
4302	mem_cgroup_free(memcg);
4303}
4304
4305/**
4306 * mem_cgroup_css_reset - reset the states of a mem_cgroup
4307 * @css: the target css
4308 *
4309 * Reset the states of the mem_cgroup associated with @css.  This is
4310 * invoked when the userland requests disabling on the default hierarchy
4311 * but the memcg is pinned through dependency.  The memcg should stop
4312 * applying policies and should revert to the vanilla state as it may be
4313 * made visible again.
4314 *
4315 * The current implementation only resets the essential configurations.
4316 * This needs to be expanded to cover all the visible parts.
4317 */
4318static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
4319{
4320	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4321
4322	page_counter_limit(&memcg->memory, PAGE_COUNTER_MAX);
4323	page_counter_limit(&memcg->swap, PAGE_COUNTER_MAX);
4324	page_counter_limit(&memcg->memsw, PAGE_COUNTER_MAX);
4325	page_counter_limit(&memcg->kmem, PAGE_COUNTER_MAX);
4326	page_counter_limit(&memcg->tcpmem, PAGE_COUNTER_MAX);
4327	memcg->low = 0;
4328	memcg->high = PAGE_COUNTER_MAX;
4329	memcg->soft_limit = PAGE_COUNTER_MAX;
 
4330	memcg_wb_domain_size_changed(memcg);
4331}
4332
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4333#ifdef CONFIG_MMU
4334/* Handlers for move charge at task migration. */
4335static int mem_cgroup_do_precharge(unsigned long count)
4336{
4337	int ret;
4338
4339	/* Try a single bulk charge without reclaim first, kswapd may wake */
4340	ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count);
4341	if (!ret) {
4342		mc.precharge += count;
4343		return ret;
4344	}
4345
4346	/* Try charges one by one with reclaim, but do not retry */
4347	while (count--) {
4348		ret = try_charge(mc.to, GFP_KERNEL | __GFP_NORETRY, 1);
4349		if (ret)
4350			return ret;
4351		mc.precharge++;
4352		cond_resched();
4353	}
4354	return 0;
4355}
4356
4357union mc_target {
4358	struct page	*page;
4359	swp_entry_t	ent;
4360};
4361
4362enum mc_target_type {
4363	MC_TARGET_NONE = 0,
4364	MC_TARGET_PAGE,
4365	MC_TARGET_SWAP,
4366	MC_TARGET_DEVICE,
4367};
4368
4369static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
4370						unsigned long addr, pte_t ptent)
4371{
4372	struct page *page = _vm_normal_page(vma, addr, ptent, true);
4373
4374	if (!page || !page_mapped(page))
4375		return NULL;
4376	if (PageAnon(page)) {
4377		if (!(mc.flags & MOVE_ANON))
4378			return NULL;
4379	} else {
4380		if (!(mc.flags & MOVE_FILE))
4381			return NULL;
4382	}
4383	if (!get_page_unless_zero(page))
4384		return NULL;
4385
4386	return page;
4387}
4388
4389#if defined(CONFIG_SWAP) || defined(CONFIG_DEVICE_PRIVATE)
4390static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
4391			pte_t ptent, swp_entry_t *entry)
4392{
4393	struct page *page = NULL;
4394	swp_entry_t ent = pte_to_swp_entry(ptent);
4395
4396	if (!(mc.flags & MOVE_ANON) || non_swap_entry(ent))
4397		return NULL;
4398
4399	/*
4400	 * Handle MEMORY_DEVICE_PRIVATE which are ZONE_DEVICE page belonging to
4401	 * a device and because they are not accessible by CPU they are store
4402	 * as special swap entry in the CPU page table.
4403	 */
4404	if (is_device_private_entry(ent)) {
4405		page = device_private_entry_to_page(ent);
4406		/*
4407		 * MEMORY_DEVICE_PRIVATE means ZONE_DEVICE page and which have
4408		 * a refcount of 1 when free (unlike normal page)
4409		 */
4410		if (!page_ref_add_unless(page, 1, 1))
4411			return NULL;
4412		return page;
4413	}
4414
 
 
 
4415	/*
4416	 * Because lookup_swap_cache() updates some statistics counter,
4417	 * we call find_get_page() with swapper_space directly.
4418	 */
4419	page = find_get_page(swap_address_space(ent), swp_offset(ent));
4420	if (do_memsw_account())
4421		entry->val = ent.val;
4422
4423	return page;
4424}
4425#else
4426static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
4427			pte_t ptent, swp_entry_t *entry)
4428{
4429	return NULL;
4430}
4431#endif
4432
4433static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
4434			unsigned long addr, pte_t ptent, swp_entry_t *entry)
4435{
4436	struct page *page = NULL;
4437	struct address_space *mapping;
4438	pgoff_t pgoff;
4439
4440	if (!vma->vm_file) /* anonymous vma */
4441		return NULL;
4442	if (!(mc.flags & MOVE_FILE))
4443		return NULL;
4444
4445	mapping = vma->vm_file->f_mapping;
4446	pgoff = linear_page_index(vma, addr);
4447
4448	/* page is moved even if it's not RSS of this task(page-faulted). */
4449#ifdef CONFIG_SWAP
4450	/* shmem/tmpfs may report page out on swap: account for that too. */
4451	if (shmem_mapping(mapping)) {
4452		page = find_get_entry(mapping, pgoff);
4453		if (radix_tree_exceptional_entry(page)) {
4454			swp_entry_t swp = radix_to_swp_entry(page);
4455			if (do_memsw_account())
4456				*entry = swp;
4457			page = find_get_page(swap_address_space(swp),
4458					     swp_offset(swp));
4459		}
4460	} else
4461		page = find_get_page(mapping, pgoff);
4462#else
4463	page = find_get_page(mapping, pgoff);
4464#endif
4465	return page;
4466}
4467
4468/**
4469 * mem_cgroup_move_account - move account of the page
4470 * @page: the page
4471 * @compound: charge the page as compound or small page
4472 * @from: mem_cgroup which the page is moved from.
4473 * @to:	mem_cgroup which the page is moved to. @from != @to.
4474 *
4475 * The caller must make sure the page is not on LRU (isolate_page() is useful.)
4476 *
4477 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
4478 * from old cgroup.
4479 */
4480static int mem_cgroup_move_account(struct page *page,
4481				   bool compound,
4482				   struct mem_cgroup *from,
4483				   struct mem_cgroup *to)
4484{
4485	unsigned long flags;
4486	unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
4487	int ret;
4488	bool anon;
 
4489
4490	VM_BUG_ON(from == to);
4491	VM_BUG_ON_PAGE(PageLRU(page), page);
4492	VM_BUG_ON(compound && !PageTransHuge(page));
4493
4494	/*
4495	 * Prevent mem_cgroup_migrate() from looking at
4496	 * page->mem_cgroup of its source page while we change it.
4497	 */
4498	ret = -EBUSY;
4499	if (!trylock_page(page))
4500		goto out;
4501
4502	ret = -EINVAL;
4503	if (page->mem_cgroup != from)
4504		goto out_unlock;
4505
4506	anon = PageAnon(page);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4507
4508	spin_lock_irqsave(&from->move_lock, flags);
 
 
 
4509
4510	if (!anon && page_mapped(page)) {
4511		__mod_memcg_state(from, NR_FILE_MAPPED, -nr_pages);
4512		__mod_memcg_state(to, NR_FILE_MAPPED, nr_pages);
4513	}
4514
4515	/*
4516	 * move_lock grabbed above and caller set from->moving_account, so
4517	 * mod_memcg_page_state will serialize updates to PageDirty.
4518	 * So mapping should be stable for dirty pages.
4519	 */
4520	if (!anon && PageDirty(page)) {
4521		struct address_space *mapping = page_mapping(page);
4522
4523		if (mapping_cap_account_dirty(mapping)) {
4524			__mod_memcg_state(from, NR_FILE_DIRTY, -nr_pages);
4525			__mod_memcg_state(to, NR_FILE_DIRTY, nr_pages);
 
 
 
4526		}
4527	}
4528
4529	if (PageWriteback(page)) {
4530		__mod_memcg_state(from, NR_WRITEBACK, -nr_pages);
4531		__mod_memcg_state(to, NR_WRITEBACK, nr_pages);
 
 
 
 
 
 
4532	}
4533
4534	/*
4535	 * It is safe to change page->mem_cgroup here because the page
4536	 * is referenced, charged, and isolated - we can't race with
4537	 * uncharging, charging, migration, or LRU putback.
 
 
 
 
 
 
 
 
4538	 */
 
 
 
 
 
 
4539
4540	/* caller should have done css_get */
4541	page->mem_cgroup = to;
4542	spin_unlock_irqrestore(&from->move_lock, flags);
4543
4544	ret = 0;
 
4545
4546	local_irq_disable();
4547	mem_cgroup_charge_statistics(to, page, compound, nr_pages);
4548	memcg_check_events(to, page);
4549	mem_cgroup_charge_statistics(from, page, compound, -nr_pages);
4550	memcg_check_events(from, page);
4551	local_irq_enable();
4552out_unlock:
4553	unlock_page(page);
4554out:
4555	return ret;
4556}
4557
4558/**
4559 * get_mctgt_type - get target type of moving charge
4560 * @vma: the vma the pte to be checked belongs
4561 * @addr: the address corresponding to the pte to be checked
4562 * @ptent: the pte to be checked
4563 * @target: the pointer the target page or swap ent will be stored(can be NULL)
4564 *
4565 * Returns
4566 *   0(MC_TARGET_NONE): if the pte is not a target for move charge.
4567 *   1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
4568 *     move charge. if @target is not NULL, the page is stored in target->page
4569 *     with extra refcnt got(Callers should handle it).
4570 *   2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
4571 *     target for charge migration. if @target is not NULL, the entry is stored
4572 *     in target->ent.
4573 *   3(MC_TARGET_DEVICE): like MC_TARGET_PAGE  but page is MEMORY_DEVICE_PUBLIC
4574 *     or MEMORY_DEVICE_PRIVATE (so ZONE_DEVICE page and thus not on the lru).
4575 *     For now we such page is charge like a regular page would be as for all
4576 *     intent and purposes it is just special memory taking the place of a
4577 *     regular page.
4578 *
4579 *     See Documentations/vm/hmm.txt and include/linux/hmm.h
4580 *
4581 * Called with pte lock held.
4582 */
4583
4584static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
4585		unsigned long addr, pte_t ptent, union mc_target *target)
4586{
4587	struct page *page = NULL;
4588	enum mc_target_type ret = MC_TARGET_NONE;
4589	swp_entry_t ent = { .val = 0 };
4590
4591	if (pte_present(ptent))
4592		page = mc_handle_present_pte(vma, addr, ptent);
 
 
 
 
 
 
4593	else if (is_swap_pte(ptent))
4594		page = mc_handle_swap_pte(vma, ptent, &ent);
4595	else if (pte_none(ptent))
4596		page = mc_handle_file_pte(vma, addr, ptent, &ent);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4597
4598	if (!page && !ent.val)
4599		return ret;
4600	if (page) {
4601		/*
4602		 * Do only loose check w/o serialization.
4603		 * mem_cgroup_move_account() checks the page is valid or
4604		 * not under LRU exclusion.
4605		 */
4606		if (page->mem_cgroup == mc.from) {
4607			ret = MC_TARGET_PAGE;
4608			if (is_device_private_page(page) ||
4609			    is_device_public_page(page))
4610				ret = MC_TARGET_DEVICE;
4611			if (target)
4612				target->page = page;
4613		}
4614		if (!ret || !target)
 
 
4615			put_page(page);
 
4616	}
4617	/*
4618	 * There is a swap entry and a page doesn't exist or isn't charged.
4619	 * But we cannot move a tail-page in a THP.
4620	 */
4621	if (ent.val && !ret && (!page || !PageTransCompound(page)) &&
4622	    mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) {
4623		ret = MC_TARGET_SWAP;
4624		if (target)
4625			target->ent = ent;
4626	}
4627	return ret;
4628}
4629
4630#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4631/*
4632 * We don't consider PMD mapped swapping or file mapped pages because THP does
4633 * not support them for now.
4634 * Caller should make sure that pmd_trans_huge(pmd) is true.
4635 */
4636static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
4637		unsigned long addr, pmd_t pmd, union mc_target *target)
4638{
4639	struct page *page = NULL;
4640	enum mc_target_type ret = MC_TARGET_NONE;
4641
4642	if (unlikely(is_swap_pmd(pmd))) {
4643		VM_BUG_ON(thp_migration_supported() &&
4644				  !is_pmd_migration_entry(pmd));
4645		return ret;
4646	}
4647	page = pmd_page(pmd);
4648	VM_BUG_ON_PAGE(!page || !PageHead(page), page);
4649	if (!(mc.flags & MOVE_ANON))
4650		return ret;
4651	if (page->mem_cgroup == mc.from) {
4652		ret = MC_TARGET_PAGE;
4653		if (target) {
4654			get_page(page);
 
 
 
 
4655			target->page = page;
4656		}
4657	}
4658	return ret;
4659}
4660#else
4661static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
4662		unsigned long addr, pmd_t pmd, union mc_target *target)
4663{
4664	return MC_TARGET_NONE;
4665}
4666#endif
4667
4668static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
4669					unsigned long addr, unsigned long end,
4670					struct mm_walk *walk)
4671{
4672	struct vm_area_struct *vma = walk->vma;
4673	pte_t *pte;
4674	spinlock_t *ptl;
4675
4676	ptl = pmd_trans_huge_lock(pmd, vma);
4677	if (ptl) {
4678		/*
4679		 * Note their can not be MC_TARGET_DEVICE for now as we do not
4680		 * support transparent huge page with MEMORY_DEVICE_PUBLIC or
4681		 * MEMORY_DEVICE_PRIVATE but this might change.
4682		 */
4683		if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
4684			mc.precharge += HPAGE_PMD_NR;
4685		spin_unlock(ptl);
4686		return 0;
4687	}
4688
4689	if (pmd_trans_unstable(pmd))
4690		return 0;
4691	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
 
 
4692	for (; addr != end; pte++, addr += PAGE_SIZE)
4693		if (get_mctgt_type(vma, addr, *pte, NULL))
4694			mc.precharge++;	/* increment precharge temporarily */
4695	pte_unmap_unlock(pte - 1, ptl);
4696	cond_resched();
4697
4698	return 0;
4699}
4700
 
 
 
 
 
4701static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
4702{
4703	unsigned long precharge;
4704
4705	struct mm_walk mem_cgroup_count_precharge_walk = {
4706		.pmd_entry = mem_cgroup_count_precharge_pte_range,
4707		.mm = mm,
4708	};
4709	down_read(&mm->mmap_sem);
4710	walk_page_range(0, mm->highest_vm_end,
4711			&mem_cgroup_count_precharge_walk);
4712	up_read(&mm->mmap_sem);
4713
4714	precharge = mc.precharge;
4715	mc.precharge = 0;
4716
4717	return precharge;
4718}
4719
4720static int mem_cgroup_precharge_mc(struct mm_struct *mm)
4721{
4722	unsigned long precharge = mem_cgroup_count_precharge(mm);
4723
4724	VM_BUG_ON(mc.moving_task);
4725	mc.moving_task = current;
4726	return mem_cgroup_do_precharge(precharge);
4727}
4728
4729/* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
4730static void __mem_cgroup_clear_mc(void)
4731{
4732	struct mem_cgroup *from = mc.from;
4733	struct mem_cgroup *to = mc.to;
4734
4735	/* we must uncharge all the leftover precharges from mc.to */
4736	if (mc.precharge) {
4737		cancel_charge(mc.to, mc.precharge);
4738		mc.precharge = 0;
4739	}
4740	/*
4741	 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
4742	 * we must uncharge here.
4743	 */
4744	if (mc.moved_charge) {
4745		cancel_charge(mc.from, mc.moved_charge);
4746		mc.moved_charge = 0;
4747	}
4748	/* we must fixup refcnts and charges */
4749	if (mc.moved_swap) {
4750		/* uncharge swap account from the old cgroup */
4751		if (!mem_cgroup_is_root(mc.from))
4752			page_counter_uncharge(&mc.from->memsw, mc.moved_swap);
4753
4754		mem_cgroup_id_put_many(mc.from, mc.moved_swap);
4755
4756		/*
4757		 * we charged both to->memory and to->memsw, so we
4758		 * should uncharge to->memory.
4759		 */
4760		if (!mem_cgroup_is_root(mc.to))
4761			page_counter_uncharge(&mc.to->memory, mc.moved_swap);
4762
4763		mem_cgroup_id_get_many(mc.to, mc.moved_swap);
4764		css_put_many(&mc.to->css, mc.moved_swap);
4765
4766		mc.moved_swap = 0;
4767	}
4768	memcg_oom_recover(from);
4769	memcg_oom_recover(to);
4770	wake_up_all(&mc.waitq);
4771}
4772
4773static void mem_cgroup_clear_mc(void)
4774{
4775	struct mm_struct *mm = mc.mm;
4776
4777	/*
4778	 * we must clear moving_task before waking up waiters at the end of
4779	 * task migration.
4780	 */
4781	mc.moving_task = NULL;
4782	__mem_cgroup_clear_mc();
4783	spin_lock(&mc.lock);
4784	mc.from = NULL;
4785	mc.to = NULL;
4786	mc.mm = NULL;
4787	spin_unlock(&mc.lock);
4788
4789	mmput(mm);
4790}
4791
4792static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
4793{
4794	struct cgroup_subsys_state *css;
4795	struct mem_cgroup *memcg = NULL; /* unneeded init to make gcc happy */
4796	struct mem_cgroup *from;
4797	struct task_struct *leader, *p;
4798	struct mm_struct *mm;
4799	unsigned long move_flags;
4800	int ret = 0;
4801
4802	/* charge immigration isn't supported on the default hierarchy */
4803	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
4804		return 0;
4805
4806	/*
4807	 * Multi-process migrations only happen on the default hierarchy
4808	 * where charge immigration is not used.  Perform charge
4809	 * immigration if @tset contains a leader and whine if there are
4810	 * multiple.
4811	 */
4812	p = NULL;
4813	cgroup_taskset_for_each_leader(leader, css, tset) {
4814		WARN_ON_ONCE(p);
4815		p = leader;
4816		memcg = mem_cgroup_from_css(css);
4817	}
4818	if (!p)
4819		return 0;
4820
4821	/*
4822	 * We are now commited to this value whatever it is. Changes in this
4823	 * tunable will only affect upcoming migrations, not the current one.
4824	 * So we need to save it, and keep it going.
4825	 */
4826	move_flags = READ_ONCE(memcg->move_charge_at_immigrate);
4827	if (!move_flags)
4828		return 0;
4829
4830	from = mem_cgroup_from_task(p);
4831
4832	VM_BUG_ON(from == memcg);
4833
4834	mm = get_task_mm(p);
4835	if (!mm)
4836		return 0;
4837	/* We move charges only when we move a owner of the mm */
4838	if (mm->owner == p) {
4839		VM_BUG_ON(mc.from);
4840		VM_BUG_ON(mc.to);
4841		VM_BUG_ON(mc.precharge);
4842		VM_BUG_ON(mc.moved_charge);
4843		VM_BUG_ON(mc.moved_swap);
4844
4845		spin_lock(&mc.lock);
4846		mc.mm = mm;
4847		mc.from = from;
4848		mc.to = memcg;
4849		mc.flags = move_flags;
4850		spin_unlock(&mc.lock);
4851		/* We set mc.moving_task later */
4852
4853		ret = mem_cgroup_precharge_mc(mm);
4854		if (ret)
4855			mem_cgroup_clear_mc();
4856	} else {
4857		mmput(mm);
4858	}
4859	return ret;
4860}
4861
4862static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
4863{
4864	if (mc.to)
4865		mem_cgroup_clear_mc();
4866}
4867
4868static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
4869				unsigned long addr, unsigned long end,
4870				struct mm_walk *walk)
4871{
4872	int ret = 0;
4873	struct vm_area_struct *vma = walk->vma;
4874	pte_t *pte;
4875	spinlock_t *ptl;
4876	enum mc_target_type target_type;
4877	union mc_target target;
4878	struct page *page;
4879
4880	ptl = pmd_trans_huge_lock(pmd, vma);
4881	if (ptl) {
4882		if (mc.precharge < HPAGE_PMD_NR) {
4883			spin_unlock(ptl);
4884			return 0;
4885		}
4886		target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
4887		if (target_type == MC_TARGET_PAGE) {
4888			page = target.page;
4889			if (!isolate_lru_page(page)) {
4890				if (!mem_cgroup_move_account(page, true,
4891							     mc.from, mc.to)) {
4892					mc.precharge -= HPAGE_PMD_NR;
4893					mc.moved_charge += HPAGE_PMD_NR;
4894				}
4895				putback_lru_page(page);
4896			}
 
4897			put_page(page);
4898		} else if (target_type == MC_TARGET_DEVICE) {
4899			page = target.page;
4900			if (!mem_cgroup_move_account(page, true,
4901						     mc.from, mc.to)) {
4902				mc.precharge -= HPAGE_PMD_NR;
4903				mc.moved_charge += HPAGE_PMD_NR;
4904			}
 
4905			put_page(page);
4906		}
4907		spin_unlock(ptl);
4908		return 0;
4909	}
4910
4911	if (pmd_trans_unstable(pmd))
4912		return 0;
4913retry:
4914	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
 
 
4915	for (; addr != end; addr += PAGE_SIZE) {
4916		pte_t ptent = *(pte++);
4917		bool device = false;
4918		swp_entry_t ent;
4919
4920		if (!mc.precharge)
4921			break;
4922
4923		switch (get_mctgt_type(vma, addr, ptent, &target)) {
4924		case MC_TARGET_DEVICE:
4925			device = true;
4926			/* fall through */
4927		case MC_TARGET_PAGE:
4928			page = target.page;
4929			/*
4930			 * We can have a part of the split pmd here. Moving it
4931			 * can be done but it would be too convoluted so simply
4932			 * ignore such a partial THP and keep it in original
4933			 * memcg. There should be somebody mapping the head.
4934			 */
4935			if (PageTransCompound(page))
4936				goto put;
4937			if (!device && isolate_lru_page(page))
4938				goto put;
4939			if (!mem_cgroup_move_account(page, false,
4940						mc.from, mc.to)) {
4941				mc.precharge--;
4942				/* we uncharge from mc.from later. */
4943				mc.moved_charge++;
4944			}
4945			if (!device)
4946				putback_lru_page(page);
4947put:			/* get_mctgt_type() gets the page */
 
4948			put_page(page);
4949			break;
4950		case MC_TARGET_SWAP:
4951			ent = target.ent;
4952			if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
4953				mc.precharge--;
4954				/* we fixup refcnts and charges later. */
 
4955				mc.moved_swap++;
4956			}
4957			break;
4958		default:
4959			break;
4960		}
4961	}
4962	pte_unmap_unlock(pte - 1, ptl);
4963	cond_resched();
4964
4965	if (addr != end) {
4966		/*
4967		 * We have consumed all precharges we got in can_attach().
4968		 * We try charge one by one, but don't do any additional
4969		 * charges to mc.to if we have failed in charge once in attach()
4970		 * phase.
4971		 */
4972		ret = mem_cgroup_do_precharge(1);
4973		if (!ret)
4974			goto retry;
4975	}
4976
4977	return ret;
4978}
4979
 
 
 
 
 
4980static void mem_cgroup_move_charge(void)
4981{
4982	struct mm_walk mem_cgroup_move_charge_walk = {
4983		.pmd_entry = mem_cgroup_move_charge_pte_range,
4984		.mm = mc.mm,
4985	};
4986
4987	lru_add_drain_all();
4988	/*
4989	 * Signal lock_page_memcg() to take the memcg's move_lock
4990	 * while we're moving its pages to another memcg. Then wait
4991	 * for already started RCU-only updates to finish.
4992	 */
4993	atomic_inc(&mc.from->moving_account);
4994	synchronize_rcu();
4995retry:
4996	if (unlikely(!down_read_trylock(&mc.mm->mmap_sem))) {
4997		/*
4998		 * Someone who are holding the mmap_sem might be waiting in
4999		 * waitq. So we cancel all extra charges, wake up all waiters,
5000		 * and retry. Because we cancel precharges, we might not be able
5001		 * to move enough charges, but moving charge is a best-effort
5002		 * feature anyway, so it wouldn't be a big problem.
5003		 */
5004		__mem_cgroup_clear_mc();
5005		cond_resched();
5006		goto retry;
5007	}
5008	/*
5009	 * When we have consumed all precharges and failed in doing
5010	 * additional charge, the page walk just aborts.
5011	 */
5012	walk_page_range(0, mc.mm->highest_vm_end, &mem_cgroup_move_charge_walk);
5013
5014	up_read(&mc.mm->mmap_sem);
5015	atomic_dec(&mc.from->moving_account);
5016}
5017
5018static void mem_cgroup_move_task(void)
5019{
5020	if (mc.to) {
5021		mem_cgroup_move_charge();
5022		mem_cgroup_clear_mc();
5023	}
5024}
 
5025#else	/* !CONFIG_MMU */
5026static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
5027{
5028	return 0;
5029}
5030static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
5031{
5032}
5033static void mem_cgroup_move_task(void)
5034{
5035}
5036#endif
5037
5038/*
5039 * Cgroup retains root cgroups across [un]mount cycles making it necessary
5040 * to verify whether we're attached to the default hierarchy on each mount
5041 * attempt.
5042 */
5043static void mem_cgroup_bind(struct cgroup_subsys_state *root_css)
5044{
5045	/*
5046	 * use_hierarchy is forced on the default hierarchy.  cgroup core
5047	 * guarantees that @root doesn't have any children, so turning it
5048	 * on for the root memcg is enough.
 
5049	 */
5050	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
5051		root_mem_cgroup->use_hierarchy = true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5052	else
5053		root_mem_cgroup->use_hierarchy = false;
 
 
5054}
5055
5056static u64 memory_current_read(struct cgroup_subsys_state *css,
5057			       struct cftype *cft)
5058{
5059	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5060
5061	return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE;
5062}
5063
5064static int memory_low_show(struct seq_file *m, void *v)
 
5065{
5066	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5067	unsigned long low = READ_ONCE(memcg->low);
5068
5069	if (low == PAGE_COUNTER_MAX)
5070		seq_puts(m, "max\n");
5071	else
5072		seq_printf(m, "%llu\n", (u64)low * PAGE_SIZE);
5073
5074	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5075}
5076
5077static ssize_t memory_low_write(struct kernfs_open_file *of,
5078				char *buf, size_t nbytes, loff_t off)
5079{
5080	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5081	unsigned long low;
5082	int err;
5083
5084	buf = strstrip(buf);
5085	err = page_counter_memparse(buf, "max", &low);
5086	if (err)
5087		return err;
5088
5089	memcg->low = low;
5090
5091	return nbytes;
5092}
5093
5094static int memory_high_show(struct seq_file *m, void *v)
5095{
5096	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5097	unsigned long high = READ_ONCE(memcg->high);
5098
5099	if (high == PAGE_COUNTER_MAX)
5100		seq_puts(m, "max\n");
5101	else
5102		seq_printf(m, "%llu\n", (u64)high * PAGE_SIZE);
5103
5104	return 0;
5105}
5106
5107static ssize_t memory_high_write(struct kernfs_open_file *of,
5108				 char *buf, size_t nbytes, loff_t off)
5109{
5110	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5111	unsigned long nr_pages;
 
5112	unsigned long high;
5113	int err;
5114
5115	buf = strstrip(buf);
5116	err = page_counter_memparse(buf, "max", &high);
5117	if (err)
5118		return err;
5119
5120	memcg->high = high;
5121
5122	nr_pages = page_counter_read(&memcg->memory);
5123	if (nr_pages > high)
5124		try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
5125					     GFP_KERNEL, true);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5126
5127	memcg_wb_domain_size_changed(memcg);
5128	return nbytes;
5129}
5130
5131static int memory_max_show(struct seq_file *m, void *v)
5132{
5133	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5134	unsigned long max = READ_ONCE(memcg->memory.limit);
5135
5136	if (max == PAGE_COUNTER_MAX)
5137		seq_puts(m, "max\n");
5138	else
5139		seq_printf(m, "%llu\n", (u64)max * PAGE_SIZE);
5140
5141	return 0;
5142}
5143
5144static ssize_t memory_max_write(struct kernfs_open_file *of,
5145				char *buf, size_t nbytes, loff_t off)
5146{
5147	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5148	unsigned int nr_reclaims = MEM_CGROUP_RECLAIM_RETRIES;
5149	bool drained = false;
5150	unsigned long max;
5151	int err;
5152
5153	buf = strstrip(buf);
5154	err = page_counter_memparse(buf, "max", &max);
5155	if (err)
5156		return err;
5157
5158	xchg(&memcg->memory.limit, max);
5159
5160	for (;;) {
5161		unsigned long nr_pages = page_counter_read(&memcg->memory);
5162
5163		if (nr_pages <= max)
5164			break;
5165
5166		if (signal_pending(current)) {
5167			err = -EINTR;
5168			break;
5169		}
5170
5171		if (!drained) {
5172			drain_all_stock(memcg);
5173			drained = true;
5174			continue;
5175		}
5176
5177		if (nr_reclaims) {
5178			if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max,
5179							  GFP_KERNEL, true))
5180				nr_reclaims--;
5181			continue;
5182		}
5183
5184		memcg_memory_event(memcg, MEMCG_OOM);
5185		if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
5186			break;
5187	}
5188
5189	memcg_wb_domain_size_changed(memcg);
5190	return nbytes;
5191}
5192
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5193static int memory_events_show(struct seq_file *m, void *v)
5194{
5195	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5196
5197	seq_printf(m, "low %lu\n",
5198		   atomic_long_read(&memcg->memory_events[MEMCG_LOW]));
5199	seq_printf(m, "high %lu\n",
5200		   atomic_long_read(&memcg->memory_events[MEMCG_HIGH]));
5201	seq_printf(m, "max %lu\n",
5202		   atomic_long_read(&memcg->memory_events[MEMCG_MAX]));
5203	seq_printf(m, "oom %lu\n",
5204		   atomic_long_read(&memcg->memory_events[MEMCG_OOM]));
5205	seq_printf(m, "oom_kill %lu\n", memcg_sum_events(memcg, OOM_KILL));
5206
 
5207	return 0;
5208}
5209
5210static int memory_stat_show(struct seq_file *m, void *v)
5211{
5212	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5213	unsigned long stat[MEMCG_NR_STAT];
5214	unsigned long events[NR_VM_EVENT_ITEMS];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5215	int i;
 
5216
5217	/*
5218	 * Provide statistics on the state of the memory subsystem as
5219	 * well as cumulative event counters that show past behavior.
5220	 *
5221	 * This list is ordered following a combination of these gradients:
5222	 * 1) generic big picture -> specifics and details
5223	 * 2) reflecting userspace activity -> reflecting kernel heuristics
5224	 *
5225	 * Current memory state:
5226	 */
5227
5228	tree_stat(memcg, stat);
5229	tree_events(memcg, events);
5230
5231	seq_printf(m, "anon %llu\n",
5232		   (u64)stat[MEMCG_RSS] * PAGE_SIZE);
5233	seq_printf(m, "file %llu\n",
5234		   (u64)stat[MEMCG_CACHE] * PAGE_SIZE);
5235	seq_printf(m, "kernel_stack %llu\n",
5236		   (u64)stat[MEMCG_KERNEL_STACK_KB] * 1024);
5237	seq_printf(m, "slab %llu\n",
5238		   (u64)(stat[NR_SLAB_RECLAIMABLE] +
5239			 stat[NR_SLAB_UNRECLAIMABLE]) * PAGE_SIZE);
5240	seq_printf(m, "sock %llu\n",
5241		   (u64)stat[MEMCG_SOCK] * PAGE_SIZE);
5242
5243	seq_printf(m, "shmem %llu\n",
5244		   (u64)stat[NR_SHMEM] * PAGE_SIZE);
5245	seq_printf(m, "file_mapped %llu\n",
5246		   (u64)stat[NR_FILE_MAPPED] * PAGE_SIZE);
5247	seq_printf(m, "file_dirty %llu\n",
5248		   (u64)stat[NR_FILE_DIRTY] * PAGE_SIZE);
5249	seq_printf(m, "file_writeback %llu\n",
5250		   (u64)stat[NR_WRITEBACK] * PAGE_SIZE);
5251
5252	for (i = 0; i < NR_LRU_LISTS; i++) {
5253		struct mem_cgroup *mi;
5254		unsigned long val = 0;
5255
5256		for_each_mem_cgroup_tree(mi, memcg)
5257			val += mem_cgroup_nr_lru_pages(mi, BIT(i));
5258		seq_printf(m, "%s %llu\n",
5259			   mem_cgroup_lru_names[i], (u64)val * PAGE_SIZE);
5260	}
5261
5262	seq_printf(m, "slab_reclaimable %llu\n",
5263		   (u64)stat[NR_SLAB_RECLAIMABLE] * PAGE_SIZE);
5264	seq_printf(m, "slab_unreclaimable %llu\n",
5265		   (u64)stat[NR_SLAB_UNRECLAIMABLE] * PAGE_SIZE);
5266
5267	/* Accumulated memory events */
 
 
 
5268
5269	seq_printf(m, "pgfault %lu\n", events[PGFAULT]);
5270	seq_printf(m, "pgmajfault %lu\n", events[PGMAJFAULT]);
 
 
 
 
 
5271
5272	seq_printf(m, "pgrefill %lu\n", events[PGREFILL]);
5273	seq_printf(m, "pgscan %lu\n", events[PGSCAN_KSWAPD] +
5274		   events[PGSCAN_DIRECT]);
5275	seq_printf(m, "pgsteal %lu\n", events[PGSTEAL_KSWAPD] +
5276		   events[PGSTEAL_DIRECT]);
5277	seq_printf(m, "pgactivate %lu\n", events[PGACTIVATE]);
5278	seq_printf(m, "pgdeactivate %lu\n", events[PGDEACTIVATE]);
5279	seq_printf(m, "pglazyfree %lu\n", events[PGLAZYFREE]);
5280	seq_printf(m, "pglazyfreed %lu\n", events[PGLAZYFREED]);
5281
5282	seq_printf(m, "workingset_refault %lu\n",
5283		   stat[WORKINGSET_REFAULT]);
5284	seq_printf(m, "workingset_activate %lu\n",
5285		   stat[WORKINGSET_ACTIVATE]);
5286	seq_printf(m, "workingset_nodereclaim %lu\n",
5287		   stat[WORKINGSET_NODERECLAIM]);
5288
5289	return 0;
5290}
5291
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5292static struct cftype memory_files[] = {
5293	{
5294		.name = "current",
5295		.flags = CFTYPE_NOT_ON_ROOT,
5296		.read_u64 = memory_current_read,
5297	},
5298	{
 
 
 
 
 
 
 
 
 
 
 
5299		.name = "low",
5300		.flags = CFTYPE_NOT_ON_ROOT,
5301		.seq_show = memory_low_show,
5302		.write = memory_low_write,
5303	},
5304	{
5305		.name = "high",
5306		.flags = CFTYPE_NOT_ON_ROOT,
5307		.seq_show = memory_high_show,
5308		.write = memory_high_write,
5309	},
5310	{
5311		.name = "max",
5312		.flags = CFTYPE_NOT_ON_ROOT,
5313		.seq_show = memory_max_show,
5314		.write = memory_max_write,
5315	},
5316	{
5317		.name = "events",
5318		.flags = CFTYPE_NOT_ON_ROOT,
5319		.file_offset = offsetof(struct mem_cgroup, events_file),
5320		.seq_show = memory_events_show,
5321	},
5322	{
5323		.name = "stat",
5324		.flags = CFTYPE_NOT_ON_ROOT,
 
 
 
 
 
5325		.seq_show = memory_stat_show,
5326	},
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5327	{ }	/* terminate */
5328};
5329
5330struct cgroup_subsys memory_cgrp_subsys = {
5331	.css_alloc = mem_cgroup_css_alloc,
5332	.css_online = mem_cgroup_css_online,
5333	.css_offline = mem_cgroup_css_offline,
5334	.css_released = mem_cgroup_css_released,
5335	.css_free = mem_cgroup_css_free,
5336	.css_reset = mem_cgroup_css_reset,
 
5337	.can_attach = mem_cgroup_can_attach,
 
 
 
5338	.cancel_attach = mem_cgroup_cancel_attach,
5339	.post_attach = mem_cgroup_move_task,
5340	.bind = mem_cgroup_bind,
 
 
 
5341	.dfl_cftypes = memory_files,
5342	.legacy_cftypes = mem_cgroup_legacy_files,
5343	.early_init = 0,
5344};
5345
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5346/**
5347 * mem_cgroup_low - check if memory consumption is below the normal range
5348 * @root: the top ancestor of the sub-tree being checked
5349 * @memcg: the memory cgroup to check
5350 *
5351 * Returns %true if memory consumption of @memcg, and that of all
5352 * ancestors up to (but not including) @root, is below the normal range.
5353 *
5354 * @root is exclusive; it is never low when looked at directly and isn't
5355 * checked when traversing the hierarchy.
5356 *
5357 * Excluding @root enables using memory.low to prioritize memory usage
5358 * between cgroups within a subtree of the hierarchy that is limited by
5359 * memory.high or memory.max.
5360 *
5361 * For example, given cgroup A with children B and C:
5362 *
5363 *    A
5364 *   / \
5365 *  B   C
5366 *
5367 * and
5368 *
5369 *  1. A/memory.current > A/memory.high
5370 *  2. A/B/memory.current < A/B/memory.low
5371 *  3. A/C/memory.current >= A/C/memory.low
5372 *
5373 * As 'A' is high, i.e. triggers reclaim from 'A', and 'B' is low, we
5374 * should reclaim from 'C' until 'A' is no longer high or until we can
5375 * no longer reclaim from 'C'.  If 'A', i.e. @root, isn't excluded by
5376 * mem_cgroup_low when reclaming from 'A', then 'B' won't be considered
5377 * low and we will reclaim indiscriminately from both 'B' and 'C'.
5378 */
5379bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg)
 
5380{
 
 
 
5381	if (mem_cgroup_disabled())
5382		return false;
5383
5384	if (!root)
5385		root = root_mem_cgroup;
 
 
 
 
 
 
 
 
5386	if (memcg == root)
5387		return false;
 
 
 
 
 
 
5388
5389	for (; memcg != root; memcg = parent_mem_cgroup(memcg)) {
5390		if (page_counter_read(&memcg->memory) >= memcg->low)
5391			return false;
 
5392	}
5393
5394	return true;
 
 
 
 
 
 
 
 
 
 
5395}
5396
5397/**
5398 * mem_cgroup_try_charge - try charging a page
5399 * @page: page to charge
5400 * @mm: mm context of the victim
5401 * @gfp_mask: reclaim mode
5402 * @memcgp: charged memcg return
5403 * @compound: charge the page as compound or small page
5404 *
5405 * Try to charge @page to the memcg that @mm belongs to, reclaiming
5406 * pages according to @gfp_mask if necessary.
5407 *
5408 * Returns 0 on success, with *@memcgp pointing to the charged memcg.
5409 * Otherwise, an error code is returned.
5410 *
5411 * After page->mapping has been set up, the caller must finalize the
5412 * charge with mem_cgroup_commit_charge().  Or abort the transaction
5413 * with mem_cgroup_cancel_charge() in case page instantiation fails.
5414 */
5415int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
5416			  gfp_t gfp_mask, struct mem_cgroup **memcgp,
5417			  bool compound)
5418{
5419	struct mem_cgroup *memcg = NULL;
5420	unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
5421	int ret = 0;
5422
5423	if (mem_cgroup_disabled())
 
5424		goto out;
5425
5426	if (PageSwapCache(page)) {
5427		/*
5428		 * Every swap fault against a single page tries to charge the
5429		 * page, bail as early as possible.  shmem_unuse() encounters
5430		 * already charged pages, too.  The USED bit is protected by
5431		 * the page lock, which serializes swap cache removal, which
5432		 * in turn serializes uncharging.
5433		 */
5434		VM_BUG_ON_PAGE(!PageLocked(page), page);
5435		if (compound_head(page)->mem_cgroup)
5436			goto out;
5437
5438		if (do_swap_account) {
5439			swp_entry_t ent = { .val = page_private(page), };
5440			unsigned short id = lookup_swap_cgroup_id(ent);
5441
5442			rcu_read_lock();
5443			memcg = mem_cgroup_from_id(id);
5444			if (memcg && !css_tryget_online(&memcg->css))
5445				memcg = NULL;
5446			rcu_read_unlock();
5447		}
5448	}
5449
5450	if (!memcg)
5451		memcg = get_mem_cgroup_from_mm(mm);
5452
5453	ret = try_charge(memcg, gfp_mask, nr_pages);
 
 
 
5454
 
 
5455	css_put(&memcg->css);
5456out:
5457	*memcgp = memcg;
5458	return ret;
5459}
5460
5461/**
5462 * mem_cgroup_commit_charge - commit a page charge
5463 * @page: page to charge
5464 * @memcg: memcg to charge the page to
5465 * @lrucare: page might be on LRU already
5466 * @compound: charge the page as compound or small page
5467 *
5468 * Finalize a charge transaction started by mem_cgroup_try_charge(),
5469 * after page->mapping has been set up.  This must happen atomically
5470 * as part of the page instantiation, i.e. under the page table lock
5471 * for anonymous pages, under the page lock for page and swap cache.
5472 *
5473 * In addition, the page must not be on the LRU during the commit, to
5474 * prevent racing with task migration.  If it might be, use @lrucare.
 
 
5475 *
5476 * Use mem_cgroup_cancel_charge() to cancel the transaction instead.
5477 */
5478void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
5479			      bool lrucare, bool compound)
5480{
5481	unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
5482
5483	VM_BUG_ON_PAGE(!page->mapping, page);
5484	VM_BUG_ON_PAGE(PageLRU(page) && !lrucare, page);
5485
5486	if (mem_cgroup_disabled())
5487		return;
5488	/*
5489	 * Swap faults will attempt to charge the same page multiple
5490	 * times.  But reuse_swap_page() might have removed the page
5491	 * from swapcache already, so we can't check PageSwapCache().
5492	 */
5493	if (!memcg)
5494		return;
 
 
5495
5496	commit_charge(page, memcg, lrucare);
5497
5498	local_irq_disable();
5499	mem_cgroup_charge_statistics(memcg, page, compound, nr_pages);
5500	memcg_check_events(memcg, page);
5501	local_irq_enable();
5502
5503	if (do_memsw_account() && PageSwapCache(page)) {
5504		swp_entry_t entry = { .val = page_private(page) };
5505		/*
5506		 * The swap entry might not get freed for a long time,
5507		 * let's not wait for it.  The page already received a
5508		 * memory+swap charge, drop the swap entry duplicate.
5509		 */
5510		mem_cgroup_uncharge_swap(entry, nr_pages);
5511	}
5512}
5513
5514/**
5515 * mem_cgroup_cancel_charge - cancel a page charge
5516 * @page: page to charge
5517 * @memcg: memcg to charge the page to
5518 * @compound: charge the page as compound or small page
 
 
 
 
5519 *
5520 * Cancel a charge transaction started by mem_cgroup_try_charge().
5521 */
5522void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg,
5523		bool compound)
5524{
5525	unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
 
 
5526
5527	if (mem_cgroup_disabled())
5528		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5529	/*
5530	 * Swap faults will attempt to charge the same page multiple
5531	 * times.  But reuse_swap_page() might have removed the page
5532	 * from swapcache already, so we can't check PageSwapCache().
 
 
 
 
 
 
 
5533	 */
5534	if (!memcg)
5535		return;
5536
5537	cancel_charge(memcg, nr_pages);
 
 
 
 
5538}
5539
5540struct uncharge_gather {
5541	struct mem_cgroup *memcg;
 
5542	unsigned long pgpgout;
5543	unsigned long nr_anon;
5544	unsigned long nr_file;
5545	unsigned long nr_kmem;
5546	unsigned long nr_huge;
5547	unsigned long nr_shmem;
5548	struct page *dummy_page;
5549};
5550
5551static inline void uncharge_gather_clear(struct uncharge_gather *ug)
5552{
5553	memset(ug, 0, sizeof(*ug));
5554}
5555
5556static void uncharge_batch(const struct uncharge_gather *ug)
5557{
5558	unsigned long nr_pages = ug->nr_anon + ug->nr_file + ug->nr_kmem;
5559	unsigned long flags;
5560
5561	if (!mem_cgroup_is_root(ug->memcg)) {
5562		page_counter_uncharge(&ug->memcg->memory, nr_pages);
5563		if (do_memsw_account())
5564			page_counter_uncharge(&ug->memcg->memsw, nr_pages);
5565		if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && ug->nr_kmem)
5566			page_counter_uncharge(&ug->memcg->kmem, ug->nr_kmem);
5567		memcg_oom_recover(ug->memcg);
5568	}
5569
5570	local_irq_save(flags);
5571	__mod_memcg_state(ug->memcg, MEMCG_RSS, -ug->nr_anon);
5572	__mod_memcg_state(ug->memcg, MEMCG_CACHE, -ug->nr_file);
5573	__mod_memcg_state(ug->memcg, MEMCG_RSS_HUGE, -ug->nr_huge);
5574	__mod_memcg_state(ug->memcg, NR_SHMEM, -ug->nr_shmem);
5575	__count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout);
5576	__this_cpu_add(ug->memcg->stat_cpu->nr_page_events, nr_pages);
5577	memcg_check_events(ug->memcg, ug->dummy_page);
5578	local_irq_restore(flags);
5579
5580	if (!mem_cgroup_is_root(ug->memcg))
5581		css_put_many(&ug->memcg->css, nr_pages);
5582}
5583
5584static void uncharge_page(struct page *page, struct uncharge_gather *ug)
5585{
5586	VM_BUG_ON_PAGE(PageLRU(page), page);
5587	VM_BUG_ON_PAGE(page_count(page) && !is_zone_device_page(page) &&
5588			!PageHWPoison(page) , page);
5589
5590	if (!page->mem_cgroup)
5591		return;
5592
5593	/*
5594	 * Nobody should be changing or seriously looking at
5595	 * page->mem_cgroup at this point, we have fully
5596	 * exclusive access to the page.
5597	 */
 
 
 
 
 
 
 
 
 
 
5598
5599	if (ug->memcg != page->mem_cgroup) {
 
 
 
5600		if (ug->memcg) {
5601			uncharge_batch(ug);
5602			uncharge_gather_clear(ug);
5603		}
5604		ug->memcg = page->mem_cgroup;
5605	}
5606
5607	if (!PageKmemcg(page)) {
5608		unsigned int nr_pages = 1;
5609
5610		if (PageTransHuge(page)) {
5611			nr_pages <<= compound_order(page);
5612			ug->nr_huge += nr_pages;
5613		}
5614		if (PageAnon(page))
5615			ug->nr_anon += nr_pages;
5616		else {
5617			ug->nr_file += nr_pages;
5618			if (PageSwapBacked(page))
5619				ug->nr_shmem += nr_pages;
5620		}
5621		ug->pgpgout++;
5622	} else {
5623		ug->nr_kmem += 1 << compound_order(page);
5624		__ClearPageKmemcg(page);
5625	}
5626
5627	ug->dummy_page = page;
5628	page->mem_cgroup = NULL;
5629}
5630
5631static void uncharge_list(struct list_head *page_list)
5632{
5633	struct uncharge_gather ug;
5634	struct list_head *next;
5635
5636	uncharge_gather_clear(&ug);
 
 
5637
5638	/*
5639	 * Note that the list can be a single page->lru; hence the
5640	 * do-while loop instead of a simple list_for_each_entry().
5641	 */
5642	next = page_list->next;
5643	do {
5644		struct page *page;
5645
5646		page = list_entry(next, struct page, lru);
5647		next = page->lru.next;
5648
5649		uncharge_page(page, &ug);
5650	} while (next != page_list);
5651
5652	if (ug.memcg)
5653		uncharge_batch(&ug);
5654}
5655
5656/**
5657 * mem_cgroup_uncharge - uncharge a page
5658 * @page: page to uncharge
5659 *
5660 * Uncharge a page previously charged with mem_cgroup_try_charge() and
5661 * mem_cgroup_commit_charge().
5662 */
5663void mem_cgroup_uncharge(struct page *page)
5664{
5665	struct uncharge_gather ug;
5666
5667	if (mem_cgroup_disabled())
5668		return;
5669
5670	/* Don't touch page->lru of any random page, pre-check: */
5671	if (!page->mem_cgroup)
5672		return;
5673
5674	uncharge_gather_clear(&ug);
5675	uncharge_page(page, &ug);
5676	uncharge_batch(&ug);
5677}
5678
5679/**
5680 * mem_cgroup_uncharge_list - uncharge a list of page
5681 * @page_list: list of pages to uncharge
5682 *
5683 * Uncharge a list of pages previously charged with
5684 * mem_cgroup_try_charge() and mem_cgroup_commit_charge().
5685 */
5686void mem_cgroup_uncharge_list(struct list_head *page_list)
5687{
5688	if (mem_cgroup_disabled())
5689		return;
5690
5691	if (!list_empty(page_list))
5692		uncharge_list(page_list);
 
 
 
5693}
5694
5695/**
5696 * mem_cgroup_migrate - charge a page's replacement
5697 * @oldpage: currently circulating page
5698 * @newpage: replacement page
5699 *
5700 * Charge @newpage as a replacement page for @oldpage. @oldpage will
5701 * be uncharged upon free.
 
5702 *
5703 * Both pages must be locked, @newpage->mapping must be set up.
5704 */
5705void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
5706{
5707	struct mem_cgroup *memcg;
5708	unsigned int nr_pages;
5709	bool compound;
5710	unsigned long flags;
5711
5712	VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
5713	VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
5714	VM_BUG_ON_PAGE(PageAnon(oldpage) != PageAnon(newpage), newpage);
5715	VM_BUG_ON_PAGE(PageTransHuge(oldpage) != PageTransHuge(newpage),
5716		       newpage);
5717
5718	if (mem_cgroup_disabled())
5719		return;
5720
5721	/* Page cache replacement: new page already charged? */
5722	if (newpage->mem_cgroup)
5723		return;
5724
5725	/* Swapcache readahead pages can get replaced before being charged */
5726	memcg = oldpage->mem_cgroup;
5727	if (!memcg)
5728		return;
5729
5730	/* Force-charge the new page. The old one will be freed soon */
5731	compound = PageTransHuge(newpage);
5732	nr_pages = compound ? hpage_nr_pages(newpage) : 1;
5733
5734	page_counter_charge(&memcg->memory, nr_pages);
5735	if (do_memsw_account())
5736		page_counter_charge(&memcg->memsw, nr_pages);
5737	css_get_many(&memcg->css, nr_pages);
5738
5739	commit_charge(newpage, memcg, false);
 
5740
5741	local_irq_save(flags);
5742	mem_cgroup_charge_statistics(memcg, newpage, compound, nr_pages);
5743	memcg_check_events(memcg, newpage);
5744	local_irq_restore(flags);
5745}
5746
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5747DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
5748EXPORT_SYMBOL(memcg_sockets_enabled_key);
5749
5750void mem_cgroup_sk_alloc(struct sock *sk)
5751{
5752	struct mem_cgroup *memcg;
5753
5754	if (!mem_cgroup_sockets_enabled)
5755		return;
5756
5757	/*
5758	 * Socket cloning can throw us here with sk_memcg already
5759	 * filled. It won't however, necessarily happen from
5760	 * process context. So the test for root memcg given
5761	 * the current task's memcg won't help us in this case.
5762	 *
5763	 * Respecting the original socket's memcg is a better
5764	 * decision in this case.
5765	 */
5766	if (sk->sk_memcg) {
5767		css_get(&sk->sk_memcg->css);
5768		return;
5769	}
5770
5771	rcu_read_lock();
5772	memcg = mem_cgroup_from_task(current);
5773	if (memcg == root_mem_cgroup)
5774		goto out;
5775	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active)
5776		goto out;
5777	if (css_tryget_online(&memcg->css))
5778		sk->sk_memcg = memcg;
5779out:
5780	rcu_read_unlock();
5781}
5782
5783void mem_cgroup_sk_free(struct sock *sk)
5784{
5785	if (sk->sk_memcg)
5786		css_put(&sk->sk_memcg->css);
5787}
5788
5789/**
5790 * mem_cgroup_charge_skmem - charge socket memory
5791 * @memcg: memcg to charge
5792 * @nr_pages: number of pages to charge
 
5793 *
5794 * Charges @nr_pages to @memcg. Returns %true if the charge fit within
5795 * @memcg's configured limit, %false if the charge had to be forced.
5796 */
5797bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
 
5798{
5799	gfp_t gfp_mask = GFP_KERNEL;
5800
5801	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
5802		struct page_counter *fail;
5803
5804		if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) {
5805			memcg->tcpmem_pressure = 0;
5806			return true;
5807		}
5808		page_counter_charge(&memcg->tcpmem, nr_pages);
5809		memcg->tcpmem_pressure = 1;
 
 
 
 
5810		return false;
5811	}
5812
5813	/* Don't block in the packet receive path */
5814	if (in_softirq())
5815		gfp_mask = GFP_NOWAIT;
5816
5817	mod_memcg_state(memcg, MEMCG_SOCK, nr_pages);
5818
5819	if (try_charge(memcg, gfp_mask, nr_pages) == 0)
5820		return true;
 
5821
5822	try_charge(memcg, gfp_mask|__GFP_NOFAIL, nr_pages);
5823	return false;
5824}
5825
5826/**
5827 * mem_cgroup_uncharge_skmem - uncharge socket memory
5828 * @memcg: memcg to uncharge
5829 * @nr_pages: number of pages to uncharge
5830 */
5831void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
5832{
5833	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
5834		page_counter_uncharge(&memcg->tcpmem, nr_pages);
5835		return;
5836	}
5837
5838	mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages);
5839
5840	refill_stock(memcg, nr_pages);
5841}
5842
5843static int __init cgroup_memory(char *s)
5844{
5845	char *token;
5846
5847	while ((token = strsep(&s, ",")) != NULL) {
5848		if (!*token)
5849			continue;
5850		if (!strcmp(token, "nosocket"))
5851			cgroup_memory_nosocket = true;
5852		if (!strcmp(token, "nokmem"))
5853			cgroup_memory_nokmem = true;
 
 
5854	}
5855	return 0;
5856}
5857__setup("cgroup.memory=", cgroup_memory);
5858
5859/*
5860 * subsys_initcall() for memory controller.
5861 *
5862 * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this
5863 * context because of lock dependencies (cgroup_lock -> cpu hotplug) but
5864 * basically everything that doesn't depend on a specific mem_cgroup structure
5865 * should be initialized from here.
5866 */
5867static int __init mem_cgroup_init(void)
5868{
5869	int cpu, node;
5870
5871#ifndef CONFIG_SLOB
5872	/*
5873	 * Kmem cache creation is mostly done with the slab_mutex held,
5874	 * so use a workqueue with limited concurrency to avoid stalling
5875	 * all worker threads in case lots of cgroups are created and
5876	 * destroyed simultaneously.
5877	 */
5878	memcg_kmem_cache_wq = alloc_workqueue("memcg_kmem_cache", 0, 1);
5879	BUG_ON(!memcg_kmem_cache_wq);
5880#endif
5881
5882	cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
5883				  memcg_hotplug_cpu_dead);
5884
5885	for_each_possible_cpu(cpu)
5886		INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
5887			  drain_local_stock);
5888
5889	for_each_node(node) {
5890		struct mem_cgroup_tree_per_node *rtpn;
5891
5892		rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL,
5893				    node_online(node) ? node : NUMA_NO_NODE);
5894
5895		rtpn->rb_root = RB_ROOT;
5896		rtpn->rb_rightmost = NULL;
5897		spin_lock_init(&rtpn->lock);
5898		soft_limit_tree.rb_tree_per_node[node] = rtpn;
5899	}
5900
5901	return 0;
5902}
5903subsys_initcall(mem_cgroup_init);
5904
5905#ifdef CONFIG_MEMCG_SWAP
5906static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg)
5907{
5908	while (!atomic_inc_not_zero(&memcg->id.ref)) {
5909		/*
5910		 * The root cgroup cannot be destroyed, so it's refcount must
5911		 * always be >= 1.
5912		 */
5913		if (WARN_ON_ONCE(memcg == root_mem_cgroup)) {
5914			VM_BUG_ON(1);
5915			break;
5916		}
5917		memcg = parent_mem_cgroup(memcg);
5918		if (!memcg)
5919			memcg = root_mem_cgroup;
5920	}
5921	return memcg;
5922}
5923
5924/**
5925 * mem_cgroup_swapout - transfer a memsw charge to swap
5926 * @page: page whose memsw charge to transfer
5927 * @entry: swap entry to move the charge to
5928 *
5929 * Transfer the memsw charge of @page to @entry.
5930 */
5931void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
5932{
5933	struct mem_cgroup *memcg, *swap_memcg;
5934	unsigned int nr_entries;
5935	unsigned short oldid;
5936
5937	VM_BUG_ON_PAGE(PageLRU(page), page);
5938	VM_BUG_ON_PAGE(page_count(page), page);
 
 
 
5939
5940	if (!do_memsw_account())
5941		return;
5942
5943	memcg = page->mem_cgroup;
5944
5945	/* Readahead page, never charged */
5946	if (!memcg)
5947		return;
5948
5949	/*
5950	 * In case the memcg owning these pages has been offlined and doesn't
5951	 * have an ID allocated to it anymore, charge the closest online
5952	 * ancestor for the swap instead and transfer the memory+swap charge.
5953	 */
5954	swap_memcg = mem_cgroup_id_get_online(memcg);
5955	nr_entries = hpage_nr_pages(page);
5956	/* Get references for the tail pages, too */
5957	if (nr_entries > 1)
5958		mem_cgroup_id_get_many(swap_memcg, nr_entries - 1);
5959	oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg),
5960				   nr_entries);
5961	VM_BUG_ON_PAGE(oldid, page);
5962	mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries);
5963
5964	page->mem_cgroup = NULL;
5965
5966	if (!mem_cgroup_is_root(memcg))
5967		page_counter_uncharge(&memcg->memory, nr_entries);
5968
5969	if (memcg != swap_memcg) {
5970		if (!mem_cgroup_is_root(swap_memcg))
5971			page_counter_charge(&swap_memcg->memsw, nr_entries);
5972		page_counter_uncharge(&memcg->memsw, nr_entries);
5973	}
5974
5975	/*
5976	 * Interrupts should be disabled here because the caller holds the
5977	 * i_pages lock which is taken with interrupts-off. It is
5978	 * important here to have the interrupts disabled because it is the
5979	 * only synchronisation we have for updating the per-CPU variables.
5980	 */
5981	VM_BUG_ON(!irqs_disabled());
5982	mem_cgroup_charge_statistics(memcg, page, PageTransHuge(page),
5983				     -nr_entries);
5984	memcg_check_events(memcg, page);
5985
5986	if (!mem_cgroup_is_root(memcg))
5987		css_put_many(&memcg->css, nr_entries);
5988}
5989
5990/**
5991 * mem_cgroup_try_charge_swap - try charging swap space for a page
5992 * @page: page being added to swap
5993 * @entry: swap entry to charge
5994 *
5995 * Try to charge @page's memcg for the swap space at @entry.
5996 *
5997 * Returns 0 on success, -ENOMEM on failure.
5998 */
5999int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry)
6000{
6001	unsigned int nr_pages = hpage_nr_pages(page);
6002	struct page_counter *counter;
6003	struct mem_cgroup *memcg;
6004	unsigned short oldid;
6005
6006	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) || !do_swap_account)
6007		return 0;
6008
6009	memcg = page->mem_cgroup;
6010
6011	/* Readahead page, never charged */
6012	if (!memcg)
6013		return 0;
6014
 
 
 
 
 
6015	memcg = mem_cgroup_id_get_online(memcg);
6016
6017	if (!mem_cgroup_is_root(memcg) &&
6018	    !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) {
 
 
6019		mem_cgroup_id_put(memcg);
6020		return -ENOMEM;
6021	}
6022
6023	/* Get references for the tail pages, too */
6024	if (nr_pages > 1)
6025		mem_cgroup_id_get_many(memcg, nr_pages - 1);
6026	oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg), nr_pages);
6027	VM_BUG_ON_PAGE(oldid, page);
6028	mod_memcg_state(memcg, MEMCG_SWAP, nr_pages);
6029
6030	return 0;
6031}
6032
6033/**
6034 * mem_cgroup_uncharge_swap - uncharge swap space
6035 * @entry: swap entry to uncharge
6036 * @nr_pages: the amount of swap space to uncharge
6037 */
6038void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
6039{
6040	struct mem_cgroup *memcg;
6041	unsigned short id;
6042
6043	if (!do_swap_account)
6044		return;
6045
6046	id = swap_cgroup_record(entry, 0, nr_pages);
6047	rcu_read_lock();
6048	memcg = mem_cgroup_from_id(id);
6049	if (memcg) {
6050		if (!mem_cgroup_is_root(memcg)) {
6051			if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
6052				page_counter_uncharge(&memcg->swap, nr_pages);
6053			else
6054				page_counter_uncharge(&memcg->memsw, nr_pages);
 
 
6055		}
6056		mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages);
6057		mem_cgroup_id_put_many(memcg, nr_pages);
6058	}
6059	rcu_read_unlock();
6060}
6061
6062long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
6063{
6064	long nr_swap_pages = get_nr_swap_pages();
6065
6066	if (!do_swap_account || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
6067		return nr_swap_pages;
6068	for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg))
6069		nr_swap_pages = min_t(long, nr_swap_pages,
6070				      READ_ONCE(memcg->swap.limit) -
6071				      page_counter_read(&memcg->swap));
6072	return nr_swap_pages;
6073}
6074
6075bool mem_cgroup_swap_full(struct page *page)
6076{
6077	struct mem_cgroup *memcg;
6078
6079	VM_BUG_ON_PAGE(!PageLocked(page), page);
6080
6081	if (vm_swap_full())
6082		return true;
6083	if (!do_swap_account || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
6084		return false;
6085
6086	memcg = page->mem_cgroup;
6087	if (!memcg)
6088		return false;
6089
6090	for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg))
6091		if (page_counter_read(&memcg->swap) * 2 >= memcg->swap.limit)
 
 
 
6092			return true;
 
6093
6094	return false;
6095}
6096
6097/* for remember boot option*/
6098#ifdef CONFIG_MEMCG_SWAP_ENABLED
6099static int really_do_swap_account __initdata = 1;
6100#else
6101static int really_do_swap_account __initdata;
6102#endif
6103
6104static int __init enable_swap_account(char *s)
6105{
6106	if (!strcmp(s, "1"))
6107		really_do_swap_account = 1;
6108	else if (!strcmp(s, "0"))
6109		really_do_swap_account = 0;
 
 
 
6110	return 1;
6111}
6112__setup("swapaccount=", enable_swap_account);
6113
6114static u64 swap_current_read(struct cgroup_subsys_state *css,
6115			     struct cftype *cft)
6116{
6117	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6118
6119	return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE;
6120}
6121
6122static int swap_max_show(struct seq_file *m, void *v)
 
6123{
6124	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
6125	unsigned long max = READ_ONCE(memcg->swap.limit);
6126
6127	if (max == PAGE_COUNTER_MAX)
6128		seq_puts(m, "max\n");
6129	else
6130		seq_printf(m, "%llu\n", (u64)max * PAGE_SIZE);
6131
6132	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6133}
6134
6135static ssize_t swap_max_write(struct kernfs_open_file *of,
6136			      char *buf, size_t nbytes, loff_t off)
6137{
6138	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6139	unsigned long max;
6140	int err;
6141
6142	buf = strstrip(buf);
6143	err = page_counter_memparse(buf, "max", &max);
6144	if (err)
6145		return err;
6146
6147	mutex_lock(&memcg_limit_mutex);
6148	err = page_counter_limit(&memcg->swap, max);
6149	mutex_unlock(&memcg_limit_mutex);
6150	if (err)
6151		return err;
6152
6153	return nbytes;
6154}
6155
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6156static struct cftype swap_files[] = {
6157	{
6158		.name = "swap.current",
6159		.flags = CFTYPE_NOT_ON_ROOT,
6160		.read_u64 = swap_current_read,
6161	},
6162	{
 
 
 
 
 
 
6163		.name = "swap.max",
6164		.flags = CFTYPE_NOT_ON_ROOT,
6165		.seq_show = swap_max_show,
6166		.write = swap_max_write,
6167	},
 
 
 
 
 
 
 
 
 
 
 
6168	{ }	/* terminate */
6169};
6170
6171static struct cftype memsw_cgroup_files[] = {
6172	{
6173		.name = "memsw.usage_in_bytes",
6174		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
6175		.read_u64 = mem_cgroup_read_u64,
6176	},
6177	{
6178		.name = "memsw.max_usage_in_bytes",
6179		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
6180		.write = mem_cgroup_reset,
6181		.read_u64 = mem_cgroup_read_u64,
6182	},
6183	{
6184		.name = "memsw.limit_in_bytes",
6185		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
6186		.write = mem_cgroup_write,
6187		.read_u64 = mem_cgroup_read_u64,
6188	},
6189	{
6190		.name = "memsw.failcnt",
6191		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
6192		.write = mem_cgroup_reset,
6193		.read_u64 = mem_cgroup_read_u64,
6194	},
6195	{ },	/* terminate */
6196};
6197
6198static int __init mem_cgroup_swap_init(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
6199{
6200	if (!mem_cgroup_disabled() && really_do_swap_account) {
6201		do_swap_account = 1;
6202		WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys,
6203					       swap_files));
6204		WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys,
6205						  memsw_cgroup_files));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6206	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6207	return 0;
6208}
6209subsys_initcall(mem_cgroup_swap_init);
6210
6211#endif /* CONFIG_MEMCG_SWAP */
v6.8
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/* memcontrol.c - Memory Controller
   3 *
   4 * Copyright IBM Corporation, 2007
   5 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
   6 *
   7 * Copyright 2007 OpenVZ SWsoft Inc
   8 * Author: Pavel Emelianov <xemul@openvz.org>
   9 *
  10 * Memory thresholds
  11 * Copyright (C) 2009 Nokia Corporation
  12 * Author: Kirill A. Shutemov
  13 *
  14 * Kernel Memory Controller
  15 * Copyright (C) 2012 Parallels Inc. and Google Inc.
  16 * Authors: Glauber Costa and Suleiman Souhlal
  17 *
  18 * Native page reclaim
  19 * Charge lifetime sanitation
  20 * Lockless page tracking & accounting
  21 * Unified hierarchy configuration model
  22 * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner
  23 *
  24 * Per memcg lru locking
  25 * Copyright (C) 2020 Alibaba, Inc, Alex Shi
 
 
 
 
 
 
 
  26 */
  27
  28#include <linux/page_counter.h>
  29#include <linux/memcontrol.h>
  30#include <linux/cgroup.h>
  31#include <linux/pagewalk.h>
  32#include <linux/sched/mm.h>
  33#include <linux/shmem_fs.h>
  34#include <linux/hugetlb.h>
  35#include <linux/pagemap.h>
  36#include <linux/vm_event_item.h>
  37#include <linux/smp.h>
  38#include <linux/page-flags.h>
  39#include <linux/backing-dev.h>
  40#include <linux/bit_spinlock.h>
  41#include <linux/rcupdate.h>
  42#include <linux/limits.h>
  43#include <linux/export.h>
  44#include <linux/mutex.h>
  45#include <linux/rbtree.h>
  46#include <linux/slab.h>
  47#include <linux/swap.h>
  48#include <linux/swapops.h>
  49#include <linux/spinlock.h>
  50#include <linux/eventfd.h>
  51#include <linux/poll.h>
  52#include <linux/sort.h>
  53#include <linux/fs.h>
  54#include <linux/seq_file.h>
  55#include <linux/vmpressure.h>
  56#include <linux/memremap.h>
  57#include <linux/mm_inline.h>
  58#include <linux/swap_cgroup.h>
  59#include <linux/cpu.h>
  60#include <linux/oom.h>
  61#include <linux/lockdep.h>
  62#include <linux/file.h>
  63#include <linux/resume_user_mode.h>
  64#include <linux/psi.h>
  65#include <linux/seq_buf.h>
  66#include <linux/sched/isolation.h>
  67#include <linux/kmemleak.h>
  68#include "internal.h"
  69#include <net/sock.h>
  70#include <net/ip.h>
  71#include "slab.h"
  72#include "swap.h"
  73
  74#include <linux/uaccess.h>
  75
  76#include <trace/events/vmscan.h>
  77
  78struct cgroup_subsys memory_cgrp_subsys __read_mostly;
  79EXPORT_SYMBOL(memory_cgrp_subsys);
  80
  81struct mem_cgroup *root_mem_cgroup __read_mostly;
  82
  83/* Active memory cgroup to use from an interrupt context */
  84DEFINE_PER_CPU(struct mem_cgroup *, int_active_memcg);
  85EXPORT_PER_CPU_SYMBOL_GPL(int_active_memcg);
  86
  87/* Socket memory accounting disabled? */
  88static bool cgroup_memory_nosocket __ro_after_init;
  89
  90/* Kernel memory accounting disabled? */
  91static bool cgroup_memory_nokmem __ro_after_init;
  92
  93/* BPF memory accounting disabled? */
  94static bool cgroup_memory_nobpf __ro_after_init;
  95
  96#ifdef CONFIG_CGROUP_WRITEBACK
  97static DECLARE_WAIT_QUEUE_HEAD(memcg_cgwb_frn_waitq);
  98#endif
  99
 100/* Whether legacy memory+swap accounting is active */
 101static bool do_memsw_account(void)
 102{
 103	return !cgroup_subsys_on_dfl(memory_cgrp_subsys);
 104}
 105
 
 
 
 
 
 
 
 
 106#define THRESHOLDS_EVENTS_TARGET 128
 107#define SOFTLIMIT_EVENTS_TARGET 1024
 
 108
 109/*
 110 * Cgroups above their limits are maintained in a RB-Tree, independent of
 111 * their hierarchy representation
 112 */
 113
 114struct mem_cgroup_tree_per_node {
 115	struct rb_root rb_root;
 116	struct rb_node *rb_rightmost;
 117	spinlock_t lock;
 118};
 119
 120struct mem_cgroup_tree {
 121	struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
 122};
 123
 124static struct mem_cgroup_tree soft_limit_tree __read_mostly;
 125
 126/* for OOM */
 127struct mem_cgroup_eventfd_list {
 128	struct list_head list;
 129	struct eventfd_ctx *eventfd;
 130};
 131
 132/*
 133 * cgroup_event represents events which userspace want to receive.
 134 */
 135struct mem_cgroup_event {
 136	/*
 137	 * memcg which the event belongs to.
 138	 */
 139	struct mem_cgroup *memcg;
 140	/*
 141	 * eventfd to signal userspace about the event.
 142	 */
 143	struct eventfd_ctx *eventfd;
 144	/*
 145	 * Each of these stored in a list by the cgroup.
 146	 */
 147	struct list_head list;
 148	/*
 149	 * register_event() callback will be used to add new userspace
 150	 * waiter for changes related to this event.  Use eventfd_signal()
 151	 * on eventfd to send notification to userspace.
 152	 */
 153	int (*register_event)(struct mem_cgroup *memcg,
 154			      struct eventfd_ctx *eventfd, const char *args);
 155	/*
 156	 * unregister_event() callback will be called when userspace closes
 157	 * the eventfd or on cgroup removing.  This callback must be set,
 158	 * if you want provide notification functionality.
 159	 */
 160	void (*unregister_event)(struct mem_cgroup *memcg,
 161				 struct eventfd_ctx *eventfd);
 162	/*
 163	 * All fields below needed to unregister event when
 164	 * userspace closes eventfd.
 165	 */
 166	poll_table pt;
 167	wait_queue_head_t *wqh;
 168	wait_queue_entry_t wait;
 169	struct work_struct remove;
 170};
 171
 172static void mem_cgroup_threshold(struct mem_cgroup *memcg);
 173static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
 174
 175/* Stuffs for move charges at task migration. */
 176/*
 177 * Types of charges to be moved.
 178 */
 179#define MOVE_ANON	0x1U
 180#define MOVE_FILE	0x2U
 181#define MOVE_MASK	(MOVE_ANON | MOVE_FILE)
 182
 183/* "mc" and its members are protected by cgroup_mutex */
 184static struct move_charge_struct {
 185	spinlock_t	  lock; /* for from, to */
 186	struct mm_struct  *mm;
 187	struct mem_cgroup *from;
 188	struct mem_cgroup *to;
 189	unsigned long flags;
 190	unsigned long precharge;
 191	unsigned long moved_charge;
 192	unsigned long moved_swap;
 193	struct task_struct *moving_task;	/* a task moving charges */
 194	wait_queue_head_t waitq;		/* a waitq for other context */
 195} mc = {
 196	.lock = __SPIN_LOCK_UNLOCKED(mc.lock),
 197	.waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
 198};
 199
 200/*
 201 * Maximum loops in mem_cgroup_soft_reclaim(), used for soft
 202 * limit reclaim to prevent infinite loops, if they ever occur.
 203 */
 204#define	MEM_CGROUP_MAX_RECLAIM_LOOPS		100
 205#define	MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS	2
 206
 
 
 
 
 
 
 
 
 207/* for encoding cft->private value on file */
 208enum res_type {
 209	_MEM,
 210	_MEMSWAP,
 
 211	_KMEM,
 212	_TCP,
 213};
 214
 215#define MEMFILE_PRIVATE(x, val)	((x) << 16 | (val))
 216#define MEMFILE_TYPE(val)	((val) >> 16 & 0xffff)
 217#define MEMFILE_ATTR(val)	((val) & 0xffff)
 218
 219/*
 220 * Iteration constructs for visiting all cgroups (under a tree).  If
 221 * loops are exited prematurely (break), mem_cgroup_iter_break() must
 222 * be used for reference counting.
 223 */
 224#define for_each_mem_cgroup_tree(iter, root)		\
 225	for (iter = mem_cgroup_iter(root, NULL, NULL);	\
 226	     iter != NULL;				\
 227	     iter = mem_cgroup_iter(root, iter, NULL))
 228
 229#define for_each_mem_cgroup(iter)			\
 230	for (iter = mem_cgroup_iter(NULL, NULL, NULL);	\
 231	     iter != NULL;				\
 232	     iter = mem_cgroup_iter(NULL, iter, NULL))
 233
 234static inline bool task_is_dying(void)
 235{
 236	return tsk_is_oom_victim(current) || fatal_signal_pending(current) ||
 237		(current->flags & PF_EXITING);
 238}
 239
 240/* Some nice accessors for the vmpressure. */
 241struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
 242{
 243	if (!memcg)
 244		memcg = root_mem_cgroup;
 245	return &memcg->vmpressure;
 246}
 247
 248struct mem_cgroup *vmpressure_to_memcg(struct vmpressure *vmpr)
 249{
 250	return container_of(vmpr, struct mem_cgroup, vmpressure);
 251}
 252
 253#define CURRENT_OBJCG_UPDATE_BIT 0
 254#define CURRENT_OBJCG_UPDATE_FLAG (1UL << CURRENT_OBJCG_UPDATE_BIT)
 255
 256#ifdef CONFIG_MEMCG_KMEM
 257static DEFINE_SPINLOCK(objcg_lock);
 258
 259bool mem_cgroup_kmem_disabled(void)
 260{
 261	return cgroup_memory_nokmem;
 262}
 263
 264static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg,
 265				      unsigned int nr_pages);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 266
 267static void obj_cgroup_release(struct percpu_ref *ref)
 268{
 269	struct obj_cgroup *objcg = container_of(ref, struct obj_cgroup, refcnt);
 270	unsigned int nr_bytes;
 271	unsigned int nr_pages;
 272	unsigned long flags;
 273
 274	/*
 275	 * At this point all allocated objects are freed, and
 276	 * objcg->nr_charged_bytes can't have an arbitrary byte value.
 277	 * However, it can be PAGE_SIZE or (x * PAGE_SIZE).
 278	 *
 279	 * The following sequence can lead to it:
 280	 * 1) CPU0: objcg == stock->cached_objcg
 281	 * 2) CPU1: we do a small allocation (e.g. 92 bytes),
 282	 *          PAGE_SIZE bytes are charged
 283	 * 3) CPU1: a process from another memcg is allocating something,
 284	 *          the stock if flushed,
 285	 *          objcg->nr_charged_bytes = PAGE_SIZE - 92
 286	 * 5) CPU0: we do release this object,
 287	 *          92 bytes are added to stock->nr_bytes
 288	 * 6) CPU0: stock is flushed,
 289	 *          92 bytes are added to objcg->nr_charged_bytes
 290	 *
 291	 * In the result, nr_charged_bytes == PAGE_SIZE.
 292	 * This page will be uncharged in obj_cgroup_release().
 293	 */
 294	nr_bytes = atomic_read(&objcg->nr_charged_bytes);
 295	WARN_ON_ONCE(nr_bytes & (PAGE_SIZE - 1));
 296	nr_pages = nr_bytes >> PAGE_SHIFT;
 297
 298	if (nr_pages)
 299		obj_cgroup_uncharge_pages(objcg, nr_pages);
 300
 301	spin_lock_irqsave(&objcg_lock, flags);
 302	list_del(&objcg->list);
 303	spin_unlock_irqrestore(&objcg_lock, flags);
 304
 305	percpu_ref_exit(ref);
 306	kfree_rcu(objcg, rcu);
 307}
 308
 309static struct obj_cgroup *obj_cgroup_alloc(void)
 310{
 311	struct obj_cgroup *objcg;
 312	int ret;
 313
 314	objcg = kzalloc(sizeof(struct obj_cgroup), GFP_KERNEL);
 315	if (!objcg)
 316		return NULL;
 317
 318	ret = percpu_ref_init(&objcg->refcnt, obj_cgroup_release, 0,
 319			      GFP_KERNEL);
 320	if (ret) {
 321		kfree(objcg);
 322		return NULL;
 323	}
 324	INIT_LIST_HEAD(&objcg->list);
 325	return objcg;
 326}
 327
 328static void memcg_reparent_objcgs(struct mem_cgroup *memcg,
 329				  struct mem_cgroup *parent)
 330{
 331	struct obj_cgroup *objcg, *iter;
 332
 333	objcg = rcu_replace_pointer(memcg->objcg, NULL, true);
 334
 335	spin_lock_irq(&objcg_lock);
 336
 337	/* 1) Ready to reparent active objcg. */
 338	list_add(&objcg->list, &memcg->objcg_list);
 339	/* 2) Reparent active objcg and already reparented objcgs to parent. */
 340	list_for_each_entry(iter, &memcg->objcg_list, list)
 341		WRITE_ONCE(iter->memcg, parent);
 342	/* 3) Move already reparented objcgs to the parent's list */
 343	list_splice(&memcg->objcg_list, &parent->objcg_list);
 344
 345	spin_unlock_irq(&objcg_lock);
 346
 347	percpu_ref_kill(&objcg->refcnt);
 348}
 349
 350/*
 351 * A lot of the calls to the cache allocation functions are expected to be
 352 * inlined by the compiler. Since the calls to memcg_slab_pre_alloc_hook() are
 353 * conditional to this static branch, we'll have to allow modules that does
 354 * kmem_cache_alloc and the such to see this symbol as well
 355 */
 356DEFINE_STATIC_KEY_FALSE(memcg_kmem_online_key);
 357EXPORT_SYMBOL(memcg_kmem_online_key);
 
 
 358
 359DEFINE_STATIC_KEY_FALSE(memcg_bpf_enabled_key);
 360EXPORT_SYMBOL(memcg_bpf_enabled_key);
 361#endif
 362
 363/**
 364 * mem_cgroup_css_from_folio - css of the memcg associated with a folio
 365 * @folio: folio of interest
 366 *
 367 * If memcg is bound to the default hierarchy, css of the memcg associated
 368 * with @folio is returned.  The returned css remains associated with @folio
 369 * until it is released.
 370 *
 371 * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup
 372 * is returned.
 373 */
 374struct cgroup_subsys_state *mem_cgroup_css_from_folio(struct folio *folio)
 375{
 376	struct mem_cgroup *memcg = folio_memcg(folio);
 
 
 377
 378	if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
 379		memcg = root_mem_cgroup;
 380
 381	return &memcg->css;
 382}
 383
 384/**
 385 * page_cgroup_ino - return inode number of the memcg a page is charged to
 386 * @page: the page
 387 *
 388 * Look up the closest online ancestor of the memory cgroup @page is charged to
 389 * and return its inode number or 0 if @page is not charged to any cgroup. It
 390 * is safe to call this function without holding a reference to @page.
 391 *
 392 * Note, this function is inherently racy, because there is nothing to prevent
 393 * the cgroup inode from getting torn down and potentially reallocated a moment
 394 * after page_cgroup_ino() returns, so it only should be used by callers that
 395 * do not care (such as procfs interfaces).
 396 */
 397ino_t page_cgroup_ino(struct page *page)
 398{
 399	struct mem_cgroup *memcg;
 400	unsigned long ino = 0;
 401
 402	rcu_read_lock();
 403	/* page_folio() is racy here, but the entire function is racy anyway */
 404	memcg = folio_memcg_check(page_folio(page));
 405
 406	while (memcg && !(memcg->css.flags & CSS_ONLINE))
 407		memcg = parent_mem_cgroup(memcg);
 408	if (memcg)
 409		ino = cgroup_ino(memcg->css.cgroup);
 410	rcu_read_unlock();
 411	return ino;
 412}
 413
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 414static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_node *mz,
 415					 struct mem_cgroup_tree_per_node *mctz,
 416					 unsigned long new_usage_in_excess)
 417{
 418	struct rb_node **p = &mctz->rb_root.rb_node;
 419	struct rb_node *parent = NULL;
 420	struct mem_cgroup_per_node *mz_node;
 421	bool rightmost = true;
 422
 423	if (mz->on_tree)
 424		return;
 425
 426	mz->usage_in_excess = new_usage_in_excess;
 427	if (!mz->usage_in_excess)
 428		return;
 429	while (*p) {
 430		parent = *p;
 431		mz_node = rb_entry(parent, struct mem_cgroup_per_node,
 432					tree_node);
 433		if (mz->usage_in_excess < mz_node->usage_in_excess) {
 434			p = &(*p)->rb_left;
 435			rightmost = false;
 436		} else {
 
 
 
 
 
 
 437			p = &(*p)->rb_right;
 438		}
 439	}
 440
 441	if (rightmost)
 442		mctz->rb_rightmost = &mz->tree_node;
 443
 444	rb_link_node(&mz->tree_node, parent, p);
 445	rb_insert_color(&mz->tree_node, &mctz->rb_root);
 446	mz->on_tree = true;
 447}
 448
 449static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
 450					 struct mem_cgroup_tree_per_node *mctz)
 451{
 452	if (!mz->on_tree)
 453		return;
 454
 455	if (&mz->tree_node == mctz->rb_rightmost)
 456		mctz->rb_rightmost = rb_prev(&mz->tree_node);
 457
 458	rb_erase(&mz->tree_node, &mctz->rb_root);
 459	mz->on_tree = false;
 460}
 461
 462static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
 463				       struct mem_cgroup_tree_per_node *mctz)
 464{
 465	unsigned long flags;
 466
 467	spin_lock_irqsave(&mctz->lock, flags);
 468	__mem_cgroup_remove_exceeded(mz, mctz);
 469	spin_unlock_irqrestore(&mctz->lock, flags);
 470}
 471
 472static unsigned long soft_limit_excess(struct mem_cgroup *memcg)
 473{
 474	unsigned long nr_pages = page_counter_read(&memcg->memory);
 475	unsigned long soft_limit = READ_ONCE(memcg->soft_limit);
 476	unsigned long excess = 0;
 477
 478	if (nr_pages > soft_limit)
 479		excess = nr_pages - soft_limit;
 480
 481	return excess;
 482}
 483
 484static void mem_cgroup_update_tree(struct mem_cgroup *memcg, int nid)
 485{
 486	unsigned long excess;
 487	struct mem_cgroup_per_node *mz;
 488	struct mem_cgroup_tree_per_node *mctz;
 489
 490	if (lru_gen_enabled()) {
 491		if (soft_limit_excess(memcg))
 492			lru_gen_soft_reclaim(memcg, nid);
 493		return;
 494	}
 495
 496	mctz = soft_limit_tree.rb_tree_per_node[nid];
 497	if (!mctz)
 498		return;
 499	/*
 500	 * Necessary to update all ancestors when hierarchy is used.
 501	 * because their event counter is not touched.
 502	 */
 503	for (; memcg; memcg = parent_mem_cgroup(memcg)) {
 504		mz = memcg->nodeinfo[nid];
 505		excess = soft_limit_excess(memcg);
 506		/*
 507		 * We have to update the tree if mz is on RB-tree or
 508		 * mem is over its softlimit.
 509		 */
 510		if (excess || mz->on_tree) {
 511			unsigned long flags;
 512
 513			spin_lock_irqsave(&mctz->lock, flags);
 514			/* if on-tree, remove it */
 515			if (mz->on_tree)
 516				__mem_cgroup_remove_exceeded(mz, mctz);
 517			/*
 518			 * Insert again. mz->usage_in_excess will be updated.
 519			 * If excess is 0, no tree ops.
 520			 */
 521			__mem_cgroup_insert_exceeded(mz, mctz, excess);
 522			spin_unlock_irqrestore(&mctz->lock, flags);
 523		}
 524	}
 525}
 526
 527static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
 528{
 529	struct mem_cgroup_tree_per_node *mctz;
 530	struct mem_cgroup_per_node *mz;
 531	int nid;
 532
 533	for_each_node(nid) {
 534		mz = memcg->nodeinfo[nid];
 535		mctz = soft_limit_tree.rb_tree_per_node[nid];
 536		if (mctz)
 537			mem_cgroup_remove_exceeded(mz, mctz);
 538	}
 539}
 540
 541static struct mem_cgroup_per_node *
 542__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
 543{
 544	struct mem_cgroup_per_node *mz;
 545
 546retry:
 547	mz = NULL;
 548	if (!mctz->rb_rightmost)
 549		goto done;		/* Nothing to reclaim from */
 550
 551	mz = rb_entry(mctz->rb_rightmost,
 552		      struct mem_cgroup_per_node, tree_node);
 553	/*
 554	 * Remove the node now but someone else can add it back,
 555	 * we will to add it back at the end of reclaim to its correct
 556	 * position in the tree.
 557	 */
 558	__mem_cgroup_remove_exceeded(mz, mctz);
 559	if (!soft_limit_excess(mz->memcg) ||
 560	    !css_tryget(&mz->memcg->css))
 561		goto retry;
 562done:
 563	return mz;
 564}
 565
 566static struct mem_cgroup_per_node *
 567mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
 568{
 569	struct mem_cgroup_per_node *mz;
 570
 571	spin_lock_irq(&mctz->lock);
 572	mz = __mem_cgroup_largest_soft_limit_node(mctz);
 573	spin_unlock_irq(&mctz->lock);
 574	return mz;
 575}
 576
 577/* Subset of vm_event_item to report for memcg event stats */
 578static const unsigned int memcg_vm_event_stat[] = {
 579	PGPGIN,
 580	PGPGOUT,
 581	PGSCAN_KSWAPD,
 582	PGSCAN_DIRECT,
 583	PGSCAN_KHUGEPAGED,
 584	PGSTEAL_KSWAPD,
 585	PGSTEAL_DIRECT,
 586	PGSTEAL_KHUGEPAGED,
 587	PGFAULT,
 588	PGMAJFAULT,
 589	PGREFILL,
 590	PGACTIVATE,
 591	PGDEACTIVATE,
 592	PGLAZYFREE,
 593	PGLAZYFREED,
 594#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
 595	ZSWPIN,
 596	ZSWPOUT,
 597	ZSWPWB,
 598#endif
 599#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 600	THP_FAULT_ALLOC,
 601	THP_COLLAPSE_ALLOC,
 602	THP_SWPOUT,
 603	THP_SWPOUT_FALLBACK,
 604#endif
 605};
 606
 607#define NR_MEMCG_EVENTS ARRAY_SIZE(memcg_vm_event_stat)
 608static int mem_cgroup_events_index[NR_VM_EVENT_ITEMS] __read_mostly;
 609
 610static void init_memcg_events(void)
 611{
 612	int i;
 613
 614	for (i = 0; i < NR_MEMCG_EVENTS; ++i)
 615		mem_cgroup_events_index[memcg_vm_event_stat[i]] = i + 1;
 616}
 617
 618static inline int memcg_events_index(enum vm_event_item idx)
 619{
 620	return mem_cgroup_events_index[idx] - 1;
 621}
 622
 623struct memcg_vmstats_percpu {
 624	/* Stats updates since the last flush */
 625	unsigned int			stats_updates;
 626
 627	/* Cached pointers for fast iteration in memcg_rstat_updated() */
 628	struct memcg_vmstats_percpu	*parent;
 629	struct memcg_vmstats		*vmstats;
 630
 631	/* The above should fit a single cacheline for memcg_rstat_updated() */
 632
 633	/* Local (CPU and cgroup) page state & events */
 634	long			state[MEMCG_NR_STAT];
 635	unsigned long		events[NR_MEMCG_EVENTS];
 636
 637	/* Delta calculation for lockless upward propagation */
 638	long			state_prev[MEMCG_NR_STAT];
 639	unsigned long		events_prev[NR_MEMCG_EVENTS];
 640
 641	/* Cgroup1: threshold notifications & softlimit tree updates */
 642	unsigned long		nr_page_events;
 643	unsigned long		targets[MEM_CGROUP_NTARGETS];
 644} ____cacheline_aligned;
 645
 646struct memcg_vmstats {
 647	/* Aggregated (CPU and subtree) page state & events */
 648	long			state[MEMCG_NR_STAT];
 649	unsigned long		events[NR_MEMCG_EVENTS];
 650
 651	/* Non-hierarchical (CPU aggregated) page state & events */
 652	long			state_local[MEMCG_NR_STAT];
 653	unsigned long		events_local[NR_MEMCG_EVENTS];
 654
 655	/* Pending child counts during tree propagation */
 656	long			state_pending[MEMCG_NR_STAT];
 657	unsigned long		events_pending[NR_MEMCG_EVENTS];
 658
 659	/* Stats updates since the last flush */
 660	atomic64_t		stats_updates;
 661};
 662
 663/*
 664 * memcg and lruvec stats flushing
 665 *
 666 * Many codepaths leading to stats update or read are performance sensitive and
 667 * adding stats flushing in such codepaths is not desirable. So, to optimize the
 668 * flushing the kernel does:
 669 *
 670 * 1) Periodically and asynchronously flush the stats every 2 seconds to not let
 671 *    rstat update tree grow unbounded.
 672 *
 673 * 2) Flush the stats synchronously on reader side only when there are more than
 674 *    (MEMCG_CHARGE_BATCH * nr_cpus) update events. Though this optimization
 675 *    will let stats be out of sync by atmost (MEMCG_CHARGE_BATCH * nr_cpus) but
 676 *    only for 2 seconds due to (1).
 677 */
 678static void flush_memcg_stats_dwork(struct work_struct *w);
 679static DECLARE_DEFERRABLE_WORK(stats_flush_dwork, flush_memcg_stats_dwork);
 680static u64 flush_last_time;
 681
 682#define FLUSH_TIME (2UL*HZ)
 683
 684/*
 685 * Accessors to ensure that preemption is disabled on PREEMPT_RT because it can
 686 * not rely on this as part of an acquired spinlock_t lock. These functions are
 687 * never used in hardirq context on PREEMPT_RT and therefore disabling preemtion
 688 * is sufficient.
 689 */
 690static void memcg_stats_lock(void)
 691{
 692	preempt_disable_nested();
 693	VM_WARN_ON_IRQS_ENABLED();
 694}
 695
 696static void __memcg_stats_lock(void)
 697{
 698	preempt_disable_nested();
 699}
 700
 701static void memcg_stats_unlock(void)
 702{
 703	preempt_enable_nested();
 704}
 705
 706
 707static bool memcg_vmstats_needs_flush(struct memcg_vmstats *vmstats)
 708{
 709	return atomic64_read(&vmstats->stats_updates) >
 710		MEMCG_CHARGE_BATCH * num_online_cpus();
 711}
 712
 713static inline void memcg_rstat_updated(struct mem_cgroup *memcg, int val)
 714{
 715	struct memcg_vmstats_percpu *statc;
 716	int cpu = smp_processor_id();
 717
 718	if (!val)
 719		return;
 720
 721	cgroup_rstat_updated(memcg->css.cgroup, cpu);
 722	statc = this_cpu_ptr(memcg->vmstats_percpu);
 723	for (; statc; statc = statc->parent) {
 724		statc->stats_updates += abs(val);
 725		if (statc->stats_updates < MEMCG_CHARGE_BATCH)
 726			continue;
 727
 728		/*
 729		 * If @memcg is already flush-able, increasing stats_updates is
 730		 * redundant. Avoid the overhead of the atomic update.
 731		 */
 732		if (!memcg_vmstats_needs_flush(statc->vmstats))
 733			atomic64_add(statc->stats_updates,
 734				     &statc->vmstats->stats_updates);
 735		statc->stats_updates = 0;
 736	}
 737}
 738
 739static void do_flush_stats(struct mem_cgroup *memcg)
 740{
 741	if (mem_cgroup_is_root(memcg))
 742		WRITE_ONCE(flush_last_time, jiffies_64);
 743
 744	cgroup_rstat_flush(memcg->css.cgroup);
 745}
 746
 747/*
 748 * mem_cgroup_flush_stats - flush the stats of a memory cgroup subtree
 749 * @memcg: root of the subtree to flush
 750 *
 751 * Flushing is serialized by the underlying global rstat lock. There is also a
 752 * minimum amount of work to be done even if there are no stat updates to flush.
 753 * Hence, we only flush the stats if the updates delta exceeds a threshold. This
 754 * avoids unnecessary work and contention on the underlying lock.
 755 */
 756void mem_cgroup_flush_stats(struct mem_cgroup *memcg)
 757{
 758	if (mem_cgroup_disabled())
 759		return;
 760
 761	if (!memcg)
 762		memcg = root_mem_cgroup;
 763
 764	if (memcg_vmstats_needs_flush(memcg->vmstats))
 765		do_flush_stats(memcg);
 766}
 767
 768void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg)
 769{
 770	/* Only flush if the periodic flusher is one full cycle late */
 771	if (time_after64(jiffies_64, READ_ONCE(flush_last_time) + 2*FLUSH_TIME))
 772		mem_cgroup_flush_stats(memcg);
 773}
 774
 775static void flush_memcg_stats_dwork(struct work_struct *w)
 776{
 777	/*
 778	 * Deliberately ignore memcg_vmstats_needs_flush() here so that flushing
 779	 * in latency-sensitive paths is as cheap as possible.
 780	 */
 781	do_flush_stats(root_mem_cgroup);
 782	queue_delayed_work(system_unbound_wq, &stats_flush_dwork, FLUSH_TIME);
 783}
 784
 785unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
 786{
 787	long x = READ_ONCE(memcg->vmstats->state[idx]);
 788#ifdef CONFIG_SMP
 789	if (x < 0)
 790		x = 0;
 791#endif
 792	return x;
 793}
 794
 795static int memcg_page_state_unit(int item);
 796
 797/*
 798 * Normalize the value passed into memcg_rstat_updated() to be in pages. Round
 799 * up non-zero sub-page updates to 1 page as zero page updates are ignored.
 800 */
 801static int memcg_state_val_in_pages(int idx, int val)
 802{
 803	int unit = memcg_page_state_unit(idx);
 804
 805	if (!val || unit == PAGE_SIZE)
 806		return val;
 807	else
 808		return max(val * unit / PAGE_SIZE, 1UL);
 809}
 810
 811/**
 812 * __mod_memcg_state - update cgroup memory statistics
 813 * @memcg: the memory cgroup
 814 * @idx: the stat item - can be enum memcg_stat_item or enum node_stat_item
 815 * @val: delta to add to the counter, can be negative
 816 */
 817void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val)
 818{
 819	if (mem_cgroup_disabled())
 820		return;
 821
 822	__this_cpu_add(memcg->vmstats_percpu->state[idx], val);
 823	memcg_rstat_updated(memcg, memcg_state_val_in_pages(idx, val));
 824}
 825
 826/* idx can be of type enum memcg_stat_item or node_stat_item. */
 827static unsigned long memcg_page_state_local(struct mem_cgroup *memcg, int idx)
 828{
 829	long x = READ_ONCE(memcg->vmstats->state_local[idx]);
 830
 831#ifdef CONFIG_SMP
 832	if (x < 0)
 833		x = 0;
 834#endif
 835	return x;
 836}
 837
 838void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
 839			      int val)
 840{
 841	struct mem_cgroup_per_node *pn;
 842	struct mem_cgroup *memcg;
 843
 844	pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
 845	memcg = pn->memcg;
 846
 847	/*
 848	 * The caller from rmap relies on disabled preemption because they never
 849	 * update their counter from in-interrupt context. For these two
 850	 * counters we check that the update is never performed from an
 851	 * interrupt context while other caller need to have disabled interrupt.
 852	 */
 853	__memcg_stats_lock();
 854	if (IS_ENABLED(CONFIG_DEBUG_VM)) {
 855		switch (idx) {
 856		case NR_ANON_MAPPED:
 857		case NR_FILE_MAPPED:
 858		case NR_ANON_THPS:
 859		case NR_SHMEM_PMDMAPPED:
 860		case NR_FILE_PMDMAPPED:
 861			WARN_ON_ONCE(!in_task());
 862			break;
 863		default:
 864			VM_WARN_ON_IRQS_ENABLED();
 865		}
 866	}
 867
 868	/* Update memcg */
 869	__this_cpu_add(memcg->vmstats_percpu->state[idx], val);
 870
 871	/* Update lruvec */
 872	__this_cpu_add(pn->lruvec_stats_percpu->state[idx], val);
 873
 874	memcg_rstat_updated(memcg, memcg_state_val_in_pages(idx, val));
 875	memcg_stats_unlock();
 876}
 877
 878/**
 879 * __mod_lruvec_state - update lruvec memory statistics
 880 * @lruvec: the lruvec
 881 * @idx: the stat item
 882 * @val: delta to add to the counter, can be negative
 883 *
 884 * The lruvec is the intersection of the NUMA node and a cgroup. This
 885 * function updates the all three counters that are affected by a
 886 * change of state at this level: per-node, per-cgroup, per-lruvec.
 887 */
 888void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
 889			int val)
 890{
 891	/* Update node */
 892	__mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
 893
 894	/* Update memcg and lruvec */
 895	if (!mem_cgroup_disabled())
 896		__mod_memcg_lruvec_state(lruvec, idx, val);
 897}
 898
 899void __lruvec_stat_mod_folio(struct folio *folio, enum node_stat_item idx,
 900			     int val)
 901{
 902	struct mem_cgroup *memcg;
 903	pg_data_t *pgdat = folio_pgdat(folio);
 904	struct lruvec *lruvec;
 905
 906	rcu_read_lock();
 907	memcg = folio_memcg(folio);
 908	/* Untracked pages have no memcg, no lruvec. Update only the node */
 909	if (!memcg) {
 910		rcu_read_unlock();
 911		__mod_node_page_state(pgdat, idx, val);
 912		return;
 913	}
 914
 915	lruvec = mem_cgroup_lruvec(memcg, pgdat);
 916	__mod_lruvec_state(lruvec, idx, val);
 917	rcu_read_unlock();
 918}
 919EXPORT_SYMBOL(__lruvec_stat_mod_folio);
 920
 921void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val)
 
 922{
 923	pg_data_t *pgdat = page_pgdat(virt_to_page(p));
 924	struct mem_cgroup *memcg;
 925	struct lruvec *lruvec;
 926
 927	rcu_read_lock();
 928	memcg = mem_cgroup_from_slab_obj(p);
 929
 930	/*
 931	 * Untracked pages have no memcg, no lruvec. Update only the
 932	 * node. If we reparent the slab objects to the root memcg,
 933	 * when we free the slab object, we need to update the per-memcg
 934	 * vmstats to keep it correct for the root memcg.
 935	 */
 936	if (!memcg) {
 937		__mod_node_page_state(pgdat, idx, val);
 938	} else {
 939		lruvec = mem_cgroup_lruvec(memcg, pgdat);
 940		__mod_lruvec_state(lruvec, idx, val);
 941	}
 942	rcu_read_unlock();
 943}
 944
 945/**
 946 * __count_memcg_events - account VM events in a cgroup
 947 * @memcg: the memory cgroup
 948 * @idx: the event item
 949 * @count: the number of events that occurred
 950 */
 951void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
 952			  unsigned long count)
 953{
 954	int index = memcg_events_index(idx);
 
 955
 956	if (mem_cgroup_disabled() || index < 0)
 957		return;
 958
 959	memcg_stats_lock();
 960	__this_cpu_add(memcg->vmstats_percpu->events[index], count);
 961	memcg_rstat_updated(memcg, count);
 962	memcg_stats_unlock();
 963}
 964
 965static unsigned long memcg_events(struct mem_cgroup *memcg, int event)
 966{
 967	int index = memcg_events_index(event);
 968
 969	if (index < 0)
 970		return 0;
 971	return READ_ONCE(memcg->vmstats->events[index]);
 972}
 973
 974static unsigned long memcg_events_local(struct mem_cgroup *memcg, int event)
 975{
 976	int index = memcg_events_index(event);
 977
 978	if (index < 0)
 979		return 0;
 980
 981	return READ_ONCE(memcg->vmstats->events_local[index]);
 982}
 983
 984static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
 985					 int nr_pages)
 986{
 987	/* pagein of a big page is an event. So, ignore page size */
 988	if (nr_pages > 0)
 989		__count_memcg_events(memcg, PGPGIN, 1);
 990	else {
 991		__count_memcg_events(memcg, PGPGOUT, 1);
 992		nr_pages = -nr_pages; /* for event */
 993	}
 994
 995	__this_cpu_add(memcg->vmstats_percpu->nr_page_events, nr_pages);
 996}
 997
 998static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
 999				       enum mem_cgroup_events_target target)
1000{
1001	unsigned long val, next;
1002
1003	val = __this_cpu_read(memcg->vmstats_percpu->nr_page_events);
1004	next = __this_cpu_read(memcg->vmstats_percpu->targets[target]);
1005	/* from time_after() in jiffies.h */
1006	if ((long)(next - val) < 0) {
1007		switch (target) {
1008		case MEM_CGROUP_TARGET_THRESH:
1009			next = val + THRESHOLDS_EVENTS_TARGET;
1010			break;
1011		case MEM_CGROUP_TARGET_SOFTLIMIT:
1012			next = val + SOFTLIMIT_EVENTS_TARGET;
1013			break;
 
 
 
1014		default:
1015			break;
1016		}
1017		__this_cpu_write(memcg->vmstats_percpu->targets[target], next);
1018		return true;
1019	}
1020	return false;
1021}
1022
1023/*
1024 * Check events in order.
1025 *
1026 */
1027static void memcg_check_events(struct mem_cgroup *memcg, int nid)
1028{
1029	if (IS_ENABLED(CONFIG_PREEMPT_RT))
1030		return;
1031
1032	/* threshold event is triggered in finer grain than soft limit */
1033	if (unlikely(mem_cgroup_event_ratelimit(memcg,
1034						MEM_CGROUP_TARGET_THRESH))) {
1035		bool do_softlimit;
 
1036
1037		do_softlimit = mem_cgroup_event_ratelimit(memcg,
1038						MEM_CGROUP_TARGET_SOFTLIMIT);
 
 
 
 
1039		mem_cgroup_threshold(memcg);
1040		if (unlikely(do_softlimit))
1041			mem_cgroup_update_tree(memcg, nid);
 
 
 
 
1042	}
1043}
1044
1045struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
1046{
1047	/*
1048	 * mm_update_next_owner() may clear mm->owner to NULL
1049	 * if it races with swapoff, page migration, etc.
1050	 * So this can be called with p == NULL.
1051	 */
1052	if (unlikely(!p))
1053		return NULL;
1054
1055	return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
1056}
1057EXPORT_SYMBOL(mem_cgroup_from_task);
1058
1059static __always_inline struct mem_cgroup *active_memcg(void)
1060{
1061	if (!in_task())
1062		return this_cpu_read(int_active_memcg);
1063	else
1064		return current->active_memcg;
1065}
1066
1067/**
1068 * get_mem_cgroup_from_mm: Obtain a reference on given mm_struct's memcg.
1069 * @mm: mm from which memcg should be extracted. It can be NULL.
1070 *
1071 * Obtain a reference on mm->memcg and returns it if successful. If mm
1072 * is NULL, then the memcg is chosen as follows:
1073 * 1) The active memcg, if set.
1074 * 2) current->mm->memcg, if available
1075 * 3) root memcg
1076 * If mem_cgroup is disabled, NULL is returned.
1077 */
1078struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
1079{
1080	struct mem_cgroup *memcg;
1081
1082	if (mem_cgroup_disabled())
1083		return NULL;
1084
1085	/*
1086	 * Page cache insertions can happen without an
1087	 * actual mm context, e.g. during disk probing
1088	 * on boot, loopback IO, acct() writes etc.
1089	 *
1090	 * No need to css_get on root memcg as the reference
1091	 * counting is disabled on the root level in the
1092	 * cgroup core. See CSS_NO_REF.
1093	 */
1094	if (unlikely(!mm)) {
1095		memcg = active_memcg();
1096		if (unlikely(memcg)) {
1097			/* remote memcg must hold a ref */
1098			css_get(&memcg->css);
1099			return memcg;
1100		}
1101		mm = current->mm;
1102		if (unlikely(!mm))
1103			return root_mem_cgroup;
1104	}
1105
1106	rcu_read_lock();
1107	do {
1108		memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1109		if (unlikely(!memcg))
 
 
 
 
1110			memcg = root_mem_cgroup;
1111	} while (!css_tryget(&memcg->css));
1112	rcu_read_unlock();
1113	return memcg;
1114}
1115EXPORT_SYMBOL(get_mem_cgroup_from_mm);
1116
1117/**
1118 * get_mem_cgroup_from_current - Obtain a reference on current task's memcg.
1119 */
1120struct mem_cgroup *get_mem_cgroup_from_current(void)
1121{
1122	struct mem_cgroup *memcg;
1123
1124	if (mem_cgroup_disabled())
1125		return NULL;
1126
1127again:
1128	rcu_read_lock();
1129	memcg = mem_cgroup_from_task(current);
1130	if (!css_tryget(&memcg->css)) {
1131		rcu_read_unlock();
1132		goto again;
1133	}
1134	rcu_read_unlock();
1135	return memcg;
1136}
1137
1138/**
1139 * mem_cgroup_iter - iterate over memory cgroup hierarchy
1140 * @root: hierarchy root
1141 * @prev: previously returned memcg, NULL on first invocation
1142 * @reclaim: cookie for shared reclaim walks, NULL for full walks
1143 *
1144 * Returns references to children of the hierarchy below @root, or
1145 * @root itself, or %NULL after a full round-trip.
1146 *
1147 * Caller must pass the return value in @prev on subsequent
1148 * invocations for reference counting, or use mem_cgroup_iter_break()
1149 * to cancel a hierarchy walk before the round-trip is complete.
1150 *
1151 * Reclaimers can specify a node in @reclaim to divide up the memcgs
1152 * in the hierarchy among all concurrent reclaimers operating on the
1153 * same node.
1154 */
1155struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
1156				   struct mem_cgroup *prev,
1157				   struct mem_cgroup_reclaim_cookie *reclaim)
1158{
1159	struct mem_cgroup_reclaim_iter *iter;
1160	struct cgroup_subsys_state *css = NULL;
1161	struct mem_cgroup *memcg = NULL;
1162	struct mem_cgroup *pos = NULL;
1163
1164	if (mem_cgroup_disabled())
1165		return NULL;
1166
1167	if (!root)
1168		root = root_mem_cgroup;
1169
 
 
 
 
 
 
 
 
 
1170	rcu_read_lock();
1171
1172	if (reclaim) {
1173		struct mem_cgroup_per_node *mz;
1174
1175		mz = root->nodeinfo[reclaim->pgdat->node_id];
1176		iter = &mz->iter;
1177
1178		/*
1179		 * On start, join the current reclaim iteration cycle.
1180		 * Exit when a concurrent walker completes it.
1181		 */
1182		if (!prev)
1183			reclaim->generation = iter->generation;
1184		else if (reclaim->generation != iter->generation)
1185			goto out_unlock;
1186
1187		while (1) {
1188			pos = READ_ONCE(iter->position);
1189			if (!pos || css_tryget(&pos->css))
1190				break;
1191			/*
1192			 * css reference reached zero, so iter->position will
1193			 * be cleared by ->css_released. However, we should not
1194			 * rely on this happening soon, because ->css_released
1195			 * is called from a work queue, and by busy-waiting we
1196			 * might block it. So we clear iter->position right
1197			 * away.
1198			 */
1199			(void)cmpxchg(&iter->position, pos, NULL);
1200		}
1201	} else if (prev) {
1202		pos = prev;
1203	}
1204
1205	if (pos)
1206		css = &pos->css;
1207
1208	for (;;) {
1209		css = css_next_descendant_pre(css, &root->css);
1210		if (!css) {
1211			/*
1212			 * Reclaimers share the hierarchy walk, and a
1213			 * new one might jump in right at the end of
1214			 * the hierarchy - make sure they see at least
1215			 * one group and restart from the beginning.
1216			 */
1217			if (!prev)
1218				continue;
1219			break;
1220		}
1221
1222		/*
1223		 * Verify the css and acquire a reference.  The root
1224		 * is provided by the caller, so we know it's alive
1225		 * and kicking, and don't take an extra reference.
1226		 */
1227		if (css == &root->css || css_tryget(css)) {
1228			memcg = mem_cgroup_from_css(css);
 
 
 
 
1229			break;
1230		}
 
1231	}
1232
1233	if (reclaim) {
1234		/*
1235		 * The position could have already been updated by a competing
1236		 * thread, so check that the value hasn't changed since we read
1237		 * it to avoid reclaiming from the same cgroup twice.
1238		 */
1239		(void)cmpxchg(&iter->position, pos, memcg);
1240
1241		if (pos)
1242			css_put(&pos->css);
1243
1244		if (!memcg)
1245			iter->generation++;
 
 
1246	}
1247
1248out_unlock:
1249	rcu_read_unlock();
 
1250	if (prev && prev != root)
1251		css_put(&prev->css);
1252
1253	return memcg;
1254}
1255
1256/**
1257 * mem_cgroup_iter_break - abort a hierarchy walk prematurely
1258 * @root: hierarchy root
1259 * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
1260 */
1261void mem_cgroup_iter_break(struct mem_cgroup *root,
1262			   struct mem_cgroup *prev)
1263{
1264	if (!root)
1265		root = root_mem_cgroup;
1266	if (prev && prev != root)
1267		css_put(&prev->css);
1268}
1269
1270static void __invalidate_reclaim_iterators(struct mem_cgroup *from,
1271					struct mem_cgroup *dead_memcg)
1272{
 
1273	struct mem_cgroup_reclaim_iter *iter;
1274	struct mem_cgroup_per_node *mz;
1275	int nid;
 
1276
1277	for_each_node(nid) {
1278		mz = from->nodeinfo[nid];
1279		iter = &mz->iter;
1280		cmpxchg(&iter->position, dead_memcg, NULL);
 
 
 
 
 
1281	}
1282}
1283
1284static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
1285{
1286	struct mem_cgroup *memcg = dead_memcg;
1287	struct mem_cgroup *last;
 
 
 
 
 
1288
1289	do {
1290		__invalidate_reclaim_iterators(memcg, dead_memcg);
1291		last = memcg;
1292	} while ((memcg = parent_mem_cgroup(memcg)));
1293
1294	/*
1295	 * When cgroup1 non-hierarchy mode is used,
1296	 * parent_mem_cgroup() does not walk all the way up to the
1297	 * cgroup root (root_mem_cgroup). So we have to handle
1298	 * dead_memcg from cgroup root separately.
1299	 */
1300	if (!mem_cgroup_is_root(last))
1301		__invalidate_reclaim_iterators(root_mem_cgroup,
1302						dead_memcg);
1303}
1304
1305/**
1306 * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
1307 * @memcg: hierarchy root
1308 * @fn: function to call for each task
1309 * @arg: argument passed to @fn
1310 *
1311 * This function iterates over tasks attached to @memcg or to any of its
1312 * descendants and calls @fn for each task. If @fn returns a non-zero
1313 * value, the function breaks the iteration loop. Otherwise, it will iterate
1314 * over all tasks and return 0.
1315 *
1316 * This function must not be called for the root memory cgroup.
1317 */
1318void mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
1319			   int (*fn)(struct task_struct *, void *), void *arg)
1320{
1321	struct mem_cgroup *iter;
1322	int ret = 0;
1323
1324	BUG_ON(mem_cgroup_is_root(memcg));
1325
1326	for_each_mem_cgroup_tree(iter, memcg) {
1327		struct css_task_iter it;
1328		struct task_struct *task;
1329
1330		css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it);
1331		while (!ret && (task = css_task_iter_next(&it)))
1332			ret = fn(task, arg);
1333		css_task_iter_end(&it);
1334		if (ret) {
1335			mem_cgroup_iter_break(memcg, iter);
1336			break;
1337		}
1338	}
 
1339}
1340
1341#ifdef CONFIG_DEBUG_VM
1342void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
1343{
1344	struct mem_cgroup *memcg;
1345
1346	if (mem_cgroup_disabled())
1347		return;
1348
1349	memcg = folio_memcg(folio);
1350
1351	if (!memcg)
1352		VM_BUG_ON_FOLIO(!mem_cgroup_is_root(lruvec_memcg(lruvec)), folio);
1353	else
1354		VM_BUG_ON_FOLIO(lruvec_memcg(lruvec) != memcg, folio);
1355}
1356#endif
1357
1358/**
1359 * folio_lruvec_lock - Lock the lruvec for a folio.
1360 * @folio: Pointer to the folio.
 
1361 *
1362 * These functions are safe to use under any of the following conditions:
1363 * - folio locked
1364 * - folio_test_lru false
1365 * - folio_memcg_lock()
1366 * - folio frozen (refcount of 0)
1367 *
1368 * Return: The lruvec this folio is on with its lock held.
1369 */
1370struct lruvec *folio_lruvec_lock(struct folio *folio)
1371{
1372	struct lruvec *lruvec = folio_lruvec(folio);
 
 
1373
1374	spin_lock(&lruvec->lru_lock);
1375	lruvec_memcg_debug(lruvec, folio);
 
 
1376
1377	return lruvec;
1378}
1379
1380/**
1381 * folio_lruvec_lock_irq - Lock the lruvec for a folio.
1382 * @folio: Pointer to the folio.
1383 *
1384 * These functions are safe to use under any of the following conditions:
1385 * - folio locked
1386 * - folio_test_lru false
1387 * - folio_memcg_lock()
1388 * - folio frozen (refcount of 0)
1389 *
1390 * Return: The lruvec this folio is on with its lock held and interrupts
1391 * disabled.
1392 */
1393struct lruvec *folio_lruvec_lock_irq(struct folio *folio)
1394{
1395	struct lruvec *lruvec = folio_lruvec(folio);
1396
1397	spin_lock_irq(&lruvec->lru_lock);
1398	lruvec_memcg_debug(lruvec, folio);
1399
1400	return lruvec;
1401}
1402
1403/**
1404 * folio_lruvec_lock_irqsave - Lock the lruvec for a folio.
1405 * @folio: Pointer to the folio.
1406 * @flags: Pointer to irqsave flags.
1407 *
1408 * These functions are safe to use under any of the following conditions:
1409 * - folio locked
1410 * - folio_test_lru false
1411 * - folio_memcg_lock()
1412 * - folio frozen (refcount of 0)
1413 *
1414 * Return: The lruvec this folio is on with its lock held and interrupts
1415 * disabled.
1416 */
1417struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
1418		unsigned long *flags)
1419{
1420	struct lruvec *lruvec = folio_lruvec(folio);
1421
1422	spin_lock_irqsave(&lruvec->lru_lock, *flags);
1423	lruvec_memcg_debug(lruvec, folio);
1424
 
 
 
 
 
 
 
 
 
 
1425	return lruvec;
1426}
1427
1428/**
1429 * mem_cgroup_update_lru_size - account for adding or removing an lru page
1430 * @lruvec: mem_cgroup per zone lru vector
1431 * @lru: index of lru list the page is sitting on
1432 * @zid: zone id of the accounted pages
1433 * @nr_pages: positive when adding or negative when removing
1434 *
1435 * This function must be called under lru_lock, just before a page is added
1436 * to or just after a page is removed from an lru list.
 
1437 */
1438void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
1439				int zid, int nr_pages)
1440{
1441	struct mem_cgroup_per_node *mz;
1442	unsigned long *lru_size;
1443	long size;
1444
1445	if (mem_cgroup_disabled())
1446		return;
1447
1448	mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
1449	lru_size = &mz->lru_zone_size[zid][lru];
1450
1451	if (nr_pages < 0)
1452		*lru_size += nr_pages;
1453
1454	size = *lru_size;
1455	if (WARN_ONCE(size < 0,
1456		"%s(%p, %d, %d): lru_size %ld\n",
1457		__func__, lruvec, lru, nr_pages, size)) {
1458		VM_BUG_ON(1);
1459		*lru_size = 0;
1460	}
1461
1462	if (nr_pages > 0)
1463		*lru_size += nr_pages;
1464}
1465
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1466/**
1467 * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1468 * @memcg: the memory cgroup
1469 *
1470 * Returns the maximum amount of memory @mem can be charged with, in
1471 * pages.
1472 */
1473static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
1474{
1475	unsigned long margin = 0;
1476	unsigned long count;
1477	unsigned long limit;
1478
1479	count = page_counter_read(&memcg->memory);
1480	limit = READ_ONCE(memcg->memory.max);
1481	if (count < limit)
1482		margin = limit - count;
1483
1484	if (do_memsw_account()) {
1485		count = page_counter_read(&memcg->memsw);
1486		limit = READ_ONCE(memcg->memsw.max);
1487		if (count < limit)
1488			margin = min(margin, limit - count);
1489		else
1490			margin = 0;
1491	}
1492
1493	return margin;
1494}
1495
1496/*
1497 * A routine for checking "mem" is under move_account() or not.
1498 *
1499 * Checking a cgroup is mc.from or mc.to or under hierarchy of
1500 * moving cgroups. This is for waiting at high-memory pressure
1501 * caused by "move".
1502 */
1503static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
1504{
1505	struct mem_cgroup *from;
1506	struct mem_cgroup *to;
1507	bool ret = false;
1508	/*
1509	 * Unlike task_move routines, we access mc.to, mc.from not under
1510	 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
1511	 */
1512	spin_lock(&mc.lock);
1513	from = mc.from;
1514	to = mc.to;
1515	if (!from)
1516		goto unlock;
1517
1518	ret = mem_cgroup_is_descendant(from, memcg) ||
1519		mem_cgroup_is_descendant(to, memcg);
1520unlock:
1521	spin_unlock(&mc.lock);
1522	return ret;
1523}
1524
1525static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
1526{
1527	if (mc.moving_task && current != mc.moving_task) {
1528		if (mem_cgroup_under_move(memcg)) {
1529			DEFINE_WAIT(wait);
1530			prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1531			/* moving charge context might have finished. */
1532			if (mc.moving_task)
1533				schedule();
1534			finish_wait(&mc.waitq, &wait);
1535			return true;
1536		}
1537	}
1538	return false;
1539}
1540
1541struct memory_stat {
1542	const char *name;
1543	unsigned int idx;
 
 
 
 
 
 
1544};
1545
1546static const struct memory_stat memory_stats[] = {
1547	{ "anon",			NR_ANON_MAPPED			},
1548	{ "file",			NR_FILE_PAGES			},
1549	{ "kernel",			MEMCG_KMEM			},
1550	{ "kernel_stack",		NR_KERNEL_STACK_KB		},
1551	{ "pagetables",			NR_PAGETABLE			},
1552	{ "sec_pagetables",		NR_SECONDARY_PAGETABLE		},
1553	{ "percpu",			MEMCG_PERCPU_B			},
1554	{ "sock",			MEMCG_SOCK			},
1555	{ "vmalloc",			MEMCG_VMALLOC			},
1556	{ "shmem",			NR_SHMEM			},
1557#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
1558	{ "zswap",			MEMCG_ZSWAP_B			},
1559	{ "zswapped",			MEMCG_ZSWAPPED			},
1560#endif
1561	{ "file_mapped",		NR_FILE_MAPPED			},
1562	{ "file_dirty",			NR_FILE_DIRTY			},
1563	{ "file_writeback",		NR_WRITEBACK			},
1564#ifdef CONFIG_SWAP
1565	{ "swapcached",			NR_SWAPCACHE			},
1566#endif
1567#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1568	{ "anon_thp",			NR_ANON_THPS			},
1569	{ "file_thp",			NR_FILE_THPS			},
1570	{ "shmem_thp",			NR_SHMEM_THPS			},
1571#endif
1572	{ "inactive_anon",		NR_INACTIVE_ANON		},
1573	{ "active_anon",		NR_ACTIVE_ANON			},
1574	{ "inactive_file",		NR_INACTIVE_FILE		},
1575	{ "active_file",		NR_ACTIVE_FILE			},
1576	{ "unevictable",		NR_UNEVICTABLE			},
1577	{ "slab_reclaimable",		NR_SLAB_RECLAIMABLE_B		},
1578	{ "slab_unreclaimable",		NR_SLAB_UNRECLAIMABLE_B		},
1579
1580	/* The memory events */
1581	{ "workingset_refault_anon",	WORKINGSET_REFAULT_ANON		},
1582	{ "workingset_refault_file",	WORKINGSET_REFAULT_FILE		},
1583	{ "workingset_activate_anon",	WORKINGSET_ACTIVATE_ANON	},
1584	{ "workingset_activate_file",	WORKINGSET_ACTIVATE_FILE	},
1585	{ "workingset_restore_anon",	WORKINGSET_RESTORE_ANON		},
1586	{ "workingset_restore_file",	WORKINGSET_RESTORE_FILE		},
1587	{ "workingset_nodereclaim",	WORKINGSET_NODERECLAIM		},
1588};
1589
1590/* The actual unit of the state item, not the same as the output unit */
1591static int memcg_page_state_unit(int item)
1592{
1593	switch (item) {
1594	case MEMCG_PERCPU_B:
1595	case MEMCG_ZSWAP_B:
1596	case NR_SLAB_RECLAIMABLE_B:
1597	case NR_SLAB_UNRECLAIMABLE_B:
1598		return 1;
1599	case NR_KERNEL_STACK_KB:
1600		return SZ_1K;
1601	default:
1602		return PAGE_SIZE;
1603	}
1604}
1605
1606/* Translate stat items to the correct unit for memory.stat output */
1607static int memcg_page_state_output_unit(int item)
1608{
1609	/*
1610	 * Workingset state is actually in pages, but we export it to userspace
1611	 * as a scalar count of events, so special case it here.
1612	 */
1613	switch (item) {
1614	case WORKINGSET_REFAULT_ANON:
1615	case WORKINGSET_REFAULT_FILE:
1616	case WORKINGSET_ACTIVATE_ANON:
1617	case WORKINGSET_ACTIVATE_FILE:
1618	case WORKINGSET_RESTORE_ANON:
1619	case WORKINGSET_RESTORE_FILE:
1620	case WORKINGSET_NODERECLAIM:
1621		return 1;
1622	default:
1623		return memcg_page_state_unit(item);
1624	}
1625}
1626
1627static inline unsigned long memcg_page_state_output(struct mem_cgroup *memcg,
1628						    int item)
1629{
1630	return memcg_page_state(memcg, item) *
1631		memcg_page_state_output_unit(item);
1632}
1633
1634static inline unsigned long memcg_page_state_local_output(
1635		struct mem_cgroup *memcg, int item)
1636{
1637	return memcg_page_state_local(memcg, item) *
1638		memcg_page_state_output_unit(item);
1639}
1640
1641static void memcg_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
1642{
1643	int i;
1644
1645	/*
1646	 * Provide statistics on the state of the memory subsystem as
1647	 * well as cumulative event counters that show past behavior.
1648	 *
1649	 * This list is ordered following a combination of these gradients:
1650	 * 1) generic big picture -> specifics and details
1651	 * 2) reflecting userspace activity -> reflecting kernel heuristics
1652	 *
1653	 * Current memory state:
1654	 */
1655	mem_cgroup_flush_stats(memcg);
1656
1657	for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
1658		u64 size;
1659
1660		size = memcg_page_state_output(memcg, memory_stats[i].idx);
1661		seq_buf_printf(s, "%s %llu\n", memory_stats[i].name, size);
1662
1663		if (unlikely(memory_stats[i].idx == NR_SLAB_UNRECLAIMABLE_B)) {
1664			size += memcg_page_state_output(memcg,
1665							NR_SLAB_RECLAIMABLE_B);
1666			seq_buf_printf(s, "slab %llu\n", size);
1667		}
1668	}
1669
1670	/* Accumulated memory events */
1671	seq_buf_printf(s, "pgscan %lu\n",
1672		       memcg_events(memcg, PGSCAN_KSWAPD) +
1673		       memcg_events(memcg, PGSCAN_DIRECT) +
1674		       memcg_events(memcg, PGSCAN_KHUGEPAGED));
1675	seq_buf_printf(s, "pgsteal %lu\n",
1676		       memcg_events(memcg, PGSTEAL_KSWAPD) +
1677		       memcg_events(memcg, PGSTEAL_DIRECT) +
1678		       memcg_events(memcg, PGSTEAL_KHUGEPAGED));
1679
1680	for (i = 0; i < ARRAY_SIZE(memcg_vm_event_stat); i++) {
1681		if (memcg_vm_event_stat[i] == PGPGIN ||
1682		    memcg_vm_event_stat[i] == PGPGOUT)
1683			continue;
1684
1685		seq_buf_printf(s, "%s %lu\n",
1686			       vm_event_name(memcg_vm_event_stat[i]),
1687			       memcg_events(memcg, memcg_vm_event_stat[i]));
1688	}
1689
1690	/* The above should easily fit into one page */
1691	WARN_ON_ONCE(seq_buf_has_overflowed(s));
1692}
1693
1694static void memcg1_stat_format(struct mem_cgroup *memcg, struct seq_buf *s);
1695
1696static void memory_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
1697{
1698	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
1699		memcg_stat_format(memcg, s);
1700	else
1701		memcg1_stat_format(memcg, s);
1702	WARN_ON_ONCE(seq_buf_has_overflowed(s));
1703}
1704
1705/**
1706 * mem_cgroup_print_oom_context: Print OOM information relevant to
1707 * memory controller.
1708 * @memcg: The memory cgroup that went over limit
1709 * @p: Task that is going to be killed
1710 *
1711 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1712 * enabled
1713 */
1714void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
1715{
 
 
 
1716	rcu_read_lock();
1717
1718	if (memcg) {
1719		pr_cont(",oom_memcg=");
1720		pr_cont_cgroup_path(memcg->css.cgroup);
1721	} else
1722		pr_cont(",global_oom");
1723	if (p) {
1724		pr_cont(",task_memcg=");
1725		pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
 
 
 
1726	}
1727	rcu_read_unlock();
1728}
1729
1730/**
1731 * mem_cgroup_print_oom_meminfo: Print OOM memory information relevant to
1732 * memory controller.
1733 * @memcg: The memory cgroup that went over limit
1734 */
1735void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
1736{
1737	/* Use static buffer, for the caller is holding oom_lock. */
1738	static char buf[PAGE_SIZE];
1739	struct seq_buf s;
1740
1741	lockdep_assert_held(&oom_lock);
1742
1743	pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
1744		K((u64)page_counter_read(&memcg->memory)),
1745		K((u64)READ_ONCE(memcg->memory.max)), memcg->memory.failcnt);
1746	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
1747		pr_info("swap: usage %llukB, limit %llukB, failcnt %lu\n",
1748			K((u64)page_counter_read(&memcg->swap)),
1749			K((u64)READ_ONCE(memcg->swap.max)), memcg->swap.failcnt);
1750	else {
1751		pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
1752			K((u64)page_counter_read(&memcg->memsw)),
1753			K((u64)memcg->memsw.max), memcg->memsw.failcnt);
1754		pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
1755			K((u64)page_counter_read(&memcg->kmem)),
1756			K((u64)memcg->kmem.max), memcg->kmem.failcnt);
 
 
 
 
 
 
 
 
 
 
 
 
 
1757	}
1758
1759	pr_info("Memory cgroup stats for ");
1760	pr_cont_cgroup_path(memcg->css.cgroup);
1761	pr_cont(":");
1762	seq_buf_init(&s, buf, sizeof(buf));
1763	memory_stat_format(memcg, &s);
1764	seq_buf_do_printk(&s, KERN_INFO);
1765}
1766
1767/*
1768 * Return the memory (and swap, if configured) limit for a memcg.
1769 */
1770unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
1771{
1772	unsigned long max = READ_ONCE(memcg->memory.max);
1773
1774	if (do_memsw_account()) {
1775		if (mem_cgroup_swappiness(memcg)) {
1776			/* Calculate swap excess capacity from memsw limit */
1777			unsigned long swap = READ_ONCE(memcg->memsw.max) - max;
1778
1779			max += min(swap, (unsigned long)total_swap_pages);
1780		}
1781	} else {
1782		if (mem_cgroup_swappiness(memcg))
1783			max += min(READ_ONCE(memcg->swap.max),
1784				   (unsigned long)total_swap_pages);
 
 
 
1785	}
1786	return max;
1787}
1788
1789unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
1790{
1791	return page_counter_read(&memcg->memory);
1792}
1793
1794static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
1795				     int order)
1796{
1797	struct oom_control oc = {
1798		.zonelist = NULL,
1799		.nodemask = NULL,
1800		.memcg = memcg,
1801		.gfp_mask = gfp_mask,
1802		.order = order,
1803	};
1804	bool ret = true;
1805
1806	if (mutex_lock_killable(&oom_lock))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1807		return true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1808
1809	if (mem_cgroup_margin(memcg) >= (1 << order))
1810		goto unlock;
1811
 
1812	/*
1813	 * A few threads which were not waiting at mutex_lock_killable() can
1814	 * fail to bail out. Therefore, check again after holding oom_lock.
 
1815	 */
1816	ret = task_is_dying() || out_of_memory(&oc);
 
1817
1818unlock:
1819	mutex_unlock(&oom_lock);
1820	return ret;
 
 
 
 
1821}
 
1822
1823static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
1824				   pg_data_t *pgdat,
1825				   gfp_t gfp_mask,
1826				   unsigned long *total_scanned)
1827{
1828	struct mem_cgroup *victim = NULL;
1829	int total = 0;
1830	int loop = 0;
1831	unsigned long excess;
1832	unsigned long nr_scanned;
1833	struct mem_cgroup_reclaim_cookie reclaim = {
1834		.pgdat = pgdat,
 
1835	};
1836
1837	excess = soft_limit_excess(root_memcg);
1838
1839	while (1) {
1840		victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
1841		if (!victim) {
1842			loop++;
1843			if (loop >= 2) {
1844				/*
1845				 * If we have not been able to reclaim
1846				 * anything, it might because there are
1847				 * no reclaimable pages under this hierarchy
1848				 */
1849				if (!total)
1850					break;
1851				/*
1852				 * We want to do more targeted reclaim.
1853				 * excess >> 2 is not to excessive so as to
1854				 * reclaim too much, nor too less that we keep
1855				 * coming back to reclaim from this cgroup
1856				 */
1857				if (total >= (excess >> 2) ||
1858					(loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
1859					break;
1860			}
1861			continue;
1862		}
1863		total += mem_cgroup_shrink_node(victim, gfp_mask, false,
1864					pgdat, &nr_scanned);
1865		*total_scanned += nr_scanned;
1866		if (!soft_limit_excess(root_memcg))
1867			break;
1868	}
1869	mem_cgroup_iter_break(root_memcg, victim);
1870	return total;
1871}
1872
1873#ifdef CONFIG_LOCKDEP
1874static struct lockdep_map memcg_oom_lock_dep_map = {
1875	.name = "memcg_oom_lock",
1876};
1877#endif
1878
1879static DEFINE_SPINLOCK(memcg_oom_lock);
1880
1881/*
1882 * Check OOM-Killer is already running under our hierarchy.
1883 * If someone is running, return false.
1884 */
1885static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
1886{
1887	struct mem_cgroup *iter, *failed = NULL;
1888
1889	spin_lock(&memcg_oom_lock);
1890
1891	for_each_mem_cgroup_tree(iter, memcg) {
1892		if (iter->oom_lock) {
1893			/*
1894			 * this subtree of our hierarchy is already locked
1895			 * so we cannot give a lock.
1896			 */
1897			failed = iter;
1898			mem_cgroup_iter_break(memcg, iter);
1899			break;
1900		} else
1901			iter->oom_lock = true;
1902	}
1903
1904	if (failed) {
1905		/*
1906		 * OK, we failed to lock the whole subtree so we have
1907		 * to clean up what we set up to the failing subtree
1908		 */
1909		for_each_mem_cgroup_tree(iter, memcg) {
1910			if (iter == failed) {
1911				mem_cgroup_iter_break(memcg, iter);
1912				break;
1913			}
1914			iter->oom_lock = false;
1915		}
1916	} else
1917		mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_);
1918
1919	spin_unlock(&memcg_oom_lock);
1920
1921	return !failed;
1922}
1923
1924static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
1925{
1926	struct mem_cgroup *iter;
1927
1928	spin_lock(&memcg_oom_lock);
1929	mutex_release(&memcg_oom_lock_dep_map, _RET_IP_);
1930	for_each_mem_cgroup_tree(iter, memcg)
1931		iter->oom_lock = false;
1932	spin_unlock(&memcg_oom_lock);
1933}
1934
1935static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
1936{
1937	struct mem_cgroup *iter;
1938
1939	spin_lock(&memcg_oom_lock);
1940	for_each_mem_cgroup_tree(iter, memcg)
1941		iter->under_oom++;
1942	spin_unlock(&memcg_oom_lock);
1943}
1944
1945static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
1946{
1947	struct mem_cgroup *iter;
1948
1949	/*
1950	 * Be careful about under_oom underflows because a child memcg
1951	 * could have been added after mem_cgroup_mark_under_oom.
1952	 */
1953	spin_lock(&memcg_oom_lock);
1954	for_each_mem_cgroup_tree(iter, memcg)
1955		if (iter->under_oom > 0)
1956			iter->under_oom--;
1957	spin_unlock(&memcg_oom_lock);
1958}
1959
1960static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
1961
1962struct oom_wait_info {
1963	struct mem_cgroup *memcg;
1964	wait_queue_entry_t	wait;
1965};
1966
1967static int memcg_oom_wake_function(wait_queue_entry_t *wait,
1968	unsigned mode, int sync, void *arg)
1969{
1970	struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
1971	struct mem_cgroup *oom_wait_memcg;
1972	struct oom_wait_info *oom_wait_info;
1973
1974	oom_wait_info = container_of(wait, struct oom_wait_info, wait);
1975	oom_wait_memcg = oom_wait_info->memcg;
1976
1977	if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) &&
1978	    !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg))
1979		return 0;
1980	return autoremove_wake_function(wait, mode, sync, arg);
1981}
1982
1983static void memcg_oom_recover(struct mem_cgroup *memcg)
1984{
1985	/*
1986	 * For the following lockless ->under_oom test, the only required
1987	 * guarantee is that it must see the state asserted by an OOM when
1988	 * this function is called as a result of userland actions
1989	 * triggered by the notification of the OOM.  This is trivially
1990	 * achieved by invoking mem_cgroup_mark_under_oom() before
1991	 * triggering notification.
1992	 */
1993	if (memcg && memcg->under_oom)
1994		__wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
1995}
1996
1997/*
1998 * Returns true if successfully killed one or more processes. Though in some
1999 * corner cases it can return true even without killing any process.
2000 */
2001static bool mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
2002{
2003	bool locked, ret;
2004
2005	if (order > PAGE_ALLOC_COSTLY_ORDER)
2006		return false;
2007
2008	memcg_memory_event(memcg, MEMCG_OOM);
2009
2010	/*
2011	 * We are in the middle of the charge context here, so we
2012	 * don't want to block when potentially sitting on a callstack
2013	 * that holds all kinds of filesystem and mm locks.
2014	 *
2015	 * cgroup1 allows disabling the OOM killer and waiting for outside
2016	 * handling until the charge can succeed; remember the context and put
2017	 * the task to sleep at the end of the page fault when all locks are
2018	 * released.
2019	 *
2020	 * On the other hand, in-kernel OOM killer allows for an async victim
2021	 * memory reclaim (oom_reaper) and that means that we are not solely
2022	 * relying on the oom victim to make a forward progress and we can
2023	 * invoke the oom killer here.
2024	 *
2025	 * Please note that mem_cgroup_out_of_memory might fail to find a
2026	 * victim and then we have to bail out from the charge path.
 
 
2027	 */
2028	if (READ_ONCE(memcg->oom_kill_disable)) {
2029		if (current->in_user_fault) {
2030			css_get(&memcg->css);
2031			current->memcg_in_oom = memcg;
2032			current->memcg_oom_gfp_mask = mask;
2033			current->memcg_oom_order = order;
2034		}
2035		return false;
2036	}
2037
2038	mem_cgroup_mark_under_oom(memcg);
2039
2040	locked = mem_cgroup_oom_trylock(memcg);
2041
2042	if (locked)
2043		mem_cgroup_oom_notify(memcg);
2044
2045	mem_cgroup_unmark_under_oom(memcg);
2046	ret = mem_cgroup_out_of_memory(memcg, mask, order);
2047
2048	if (locked)
2049		mem_cgroup_oom_unlock(memcg);
2050
2051	return ret;
2052}
2053
2054/**
2055 * mem_cgroup_oom_synchronize - complete memcg OOM handling
2056 * @handle: actually kill/wait or just clean up the OOM state
2057 *
2058 * This has to be called at the end of a page fault if the memcg OOM
2059 * handler was enabled.
2060 *
2061 * Memcg supports userspace OOM handling where failed allocations must
2062 * sleep on a waitqueue until the userspace task resolves the
2063 * situation.  Sleeping directly in the charge context with all kinds
2064 * of locks held is not a good idea, instead we remember an OOM state
2065 * in the task and mem_cgroup_oom_synchronize() has to be called at
2066 * the end of the page fault to complete the OOM handling.
2067 *
2068 * Returns %true if an ongoing memcg OOM situation was detected and
2069 * completed, %false otherwise.
2070 */
2071bool mem_cgroup_oom_synchronize(bool handle)
2072{
2073	struct mem_cgroup *memcg = current->memcg_in_oom;
2074	struct oom_wait_info owait;
2075	bool locked;
2076
2077	/* OOM is global, do not handle */
2078	if (!memcg)
2079		return false;
2080
2081	if (!handle)
2082		goto cleanup;
2083
2084	owait.memcg = memcg;
2085	owait.wait.flags = 0;
2086	owait.wait.func = memcg_oom_wake_function;
2087	owait.wait.private = current;
2088	INIT_LIST_HEAD(&owait.wait.entry);
2089
2090	prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
2091	mem_cgroup_mark_under_oom(memcg);
2092
2093	locked = mem_cgroup_oom_trylock(memcg);
2094
2095	if (locked)
2096		mem_cgroup_oom_notify(memcg);
2097
2098	schedule();
2099	mem_cgroup_unmark_under_oom(memcg);
2100	finish_wait(&memcg_oom_waitq, &owait.wait);
 
 
 
 
 
 
 
2101
2102	if (locked)
2103		mem_cgroup_oom_unlock(memcg);
 
 
 
 
 
 
 
2104cleanup:
2105	current->memcg_in_oom = NULL;
2106	css_put(&memcg->css);
2107	return true;
2108}
2109
2110/**
2111 * mem_cgroup_get_oom_group - get a memory cgroup to clean up after OOM
2112 * @victim: task to be killed by the OOM killer
2113 * @oom_domain: memcg in case of memcg OOM, NULL in case of system-wide OOM
2114 *
2115 * Returns a pointer to a memory cgroup, which has to be cleaned up
2116 * by killing all belonging OOM-killable tasks.
2117 *
2118 * Caller has to call mem_cgroup_put() on the returned non-NULL memcg.
2119 */
2120struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
2121					    struct mem_cgroup *oom_domain)
2122{
2123	struct mem_cgroup *oom_group = NULL;
2124	struct mem_cgroup *memcg;
2125
2126	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
2127		return NULL;
2128
2129	if (!oom_domain)
2130		oom_domain = root_mem_cgroup;
2131
2132	rcu_read_lock();
2133
2134	memcg = mem_cgroup_from_task(victim);
2135	if (mem_cgroup_is_root(memcg))
2136		goto out;
2137
2138	/*
2139	 * If the victim task has been asynchronously moved to a different
2140	 * memory cgroup, we might end up killing tasks outside oom_domain.
2141	 * In this case it's better to ignore memory.group.oom.
2142	 */
2143	if (unlikely(!mem_cgroup_is_descendant(memcg, oom_domain)))
2144		goto out;
2145
2146	/*
2147	 * Traverse the memory cgroup hierarchy from the victim task's
2148	 * cgroup up to the OOMing cgroup (or root) to find the
2149	 * highest-level memory cgroup with oom.group set.
2150	 */
2151	for (; memcg; memcg = parent_mem_cgroup(memcg)) {
2152		if (READ_ONCE(memcg->oom_group))
2153			oom_group = memcg;
2154
2155		if (memcg == oom_domain)
2156			break;
2157	}
2158
2159	if (oom_group)
2160		css_get(&oom_group->css);
2161out:
2162	rcu_read_unlock();
2163
2164	return oom_group;
2165}
2166
2167void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
2168{
2169	pr_info("Tasks in ");
2170	pr_cont_cgroup_path(memcg->css.cgroup);
2171	pr_cont(" are going to be killed due to memory.oom.group set\n");
2172}
2173
2174/**
2175 * folio_memcg_lock - Bind a folio to its memcg.
2176 * @folio: The folio.
2177 *
2178 * This function prevents unlocked LRU folios from being moved to
2179 * another cgroup.
2180 *
2181 * It ensures lifetime of the bound memcg.  The caller is responsible
2182 * for the lifetime of the folio.
 
2183 */
2184void folio_memcg_lock(struct folio *folio)
2185{
2186	struct mem_cgroup *memcg;
2187	unsigned long flags;
2188
2189	/*
2190	 * The RCU lock is held throughout the transaction.  The fast
2191	 * path can get away without acquiring the memcg->move_lock
2192	 * because page moving starts with an RCU grace period.
 
 
 
 
 
 
2193         */
2194	rcu_read_lock();
2195
2196	if (mem_cgroup_disabled())
2197		return;
2198again:
2199	memcg = folio_memcg(folio);
2200	if (unlikely(!memcg))
2201		return;
2202
2203#ifdef CONFIG_PROVE_LOCKING
2204	local_irq_save(flags);
2205	might_lock(&memcg->move_lock);
2206	local_irq_restore(flags);
2207#endif
2208
2209	if (atomic_read(&memcg->moving_account) <= 0)
2210		return;
2211
2212	spin_lock_irqsave(&memcg->move_lock, flags);
2213	if (memcg != folio_memcg(folio)) {
2214		spin_unlock_irqrestore(&memcg->move_lock, flags);
2215		goto again;
2216	}
2217
2218	/*
2219	 * When charge migration first begins, we can have multiple
2220	 * critical sections holding the fast-path RCU lock and one
2221	 * holding the slowpath move_lock. Track the task who has the
2222	 * move_lock for folio_memcg_unlock().
2223	 */
2224	memcg->move_lock_task = current;
2225	memcg->move_lock_flags = flags;
 
 
2226}
 
2227
2228static void __folio_memcg_unlock(struct mem_cgroup *memcg)
 
 
 
 
 
 
2229{
2230	if (memcg && memcg->move_lock_task == current) {
2231		unsigned long flags = memcg->move_lock_flags;
2232
2233		memcg->move_lock_task = NULL;
2234		memcg->move_lock_flags = 0;
2235
2236		spin_unlock_irqrestore(&memcg->move_lock, flags);
2237	}
2238
2239	rcu_read_unlock();
2240}
2241
2242/**
2243 * folio_memcg_unlock - Release the binding between a folio and its memcg.
2244 * @folio: The folio.
2245 *
2246 * This releases the binding created by folio_memcg_lock().  This does
2247 * not change the accounting of this folio to its memcg, but it does
2248 * permit others to change it.
2249 */
2250void folio_memcg_unlock(struct folio *folio)
2251{
2252	__folio_memcg_unlock(folio_memcg(folio));
2253}
 
2254
2255struct memcg_stock_pcp {
2256	local_lock_t stock_lock;
2257	struct mem_cgroup *cached; /* this never be root cgroup */
2258	unsigned int nr_pages;
2259
2260#ifdef CONFIG_MEMCG_KMEM
2261	struct obj_cgroup *cached_objcg;
2262	struct pglist_data *cached_pgdat;
2263	unsigned int nr_bytes;
2264	int nr_slab_reclaimable_b;
2265	int nr_slab_unreclaimable_b;
2266#endif
2267
2268	struct work_struct work;
2269	unsigned long flags;
2270#define FLUSHING_CACHED_CHARGE	0
2271};
2272static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock) = {
2273	.stock_lock = INIT_LOCAL_LOCK(stock_lock),
2274};
2275static DEFINE_MUTEX(percpu_charge_mutex);
2276
2277#ifdef CONFIG_MEMCG_KMEM
2278static struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock);
2279static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
2280				     struct mem_cgroup *root_memcg);
2281static void memcg_account_kmem(struct mem_cgroup *memcg, int nr_pages);
2282
2283#else
2284static inline struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock)
2285{
2286	return NULL;
2287}
2288static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
2289				     struct mem_cgroup *root_memcg)
2290{
2291	return false;
2292}
2293static void memcg_account_kmem(struct mem_cgroup *memcg, int nr_pages)
2294{
2295}
2296#endif
2297
2298/**
2299 * consume_stock: Try to consume stocked charge on this cpu.
2300 * @memcg: memcg to consume from.
2301 * @nr_pages: how many pages to charge.
2302 *
2303 * The charges will only happen if @memcg matches the current cpu's memcg
2304 * stock, and at least @nr_pages are available in that stock.  Failure to
2305 * service an allocation will refill the stock.
2306 *
2307 * returns true if successful, false otherwise.
2308 */
2309static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2310{
2311	struct memcg_stock_pcp *stock;
2312	unsigned long flags;
2313	bool ret = false;
2314
2315	if (nr_pages > MEMCG_CHARGE_BATCH)
2316		return ret;
2317
2318	local_lock_irqsave(&memcg_stock.stock_lock, flags);
2319
2320	stock = this_cpu_ptr(&memcg_stock);
2321	if (memcg == READ_ONCE(stock->cached) && stock->nr_pages >= nr_pages) {
2322		stock->nr_pages -= nr_pages;
2323		ret = true;
2324	}
2325
2326	local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
2327
2328	return ret;
2329}
2330
2331/*
2332 * Returns stocks cached in percpu and reset cached information.
2333 */
2334static void drain_stock(struct memcg_stock_pcp *stock)
2335{
2336	struct mem_cgroup *old = READ_ONCE(stock->cached);
2337
2338	if (!old)
2339		return;
2340
2341	if (stock->nr_pages) {
2342		page_counter_uncharge(&old->memory, stock->nr_pages);
2343		if (do_memsw_account())
2344			page_counter_uncharge(&old->memsw, stock->nr_pages);
 
2345		stock->nr_pages = 0;
2346	}
2347
2348	css_put(&old->css);
2349	WRITE_ONCE(stock->cached, NULL);
2350}
2351
2352static void drain_local_stock(struct work_struct *dummy)
2353{
2354	struct memcg_stock_pcp *stock;
2355	struct obj_cgroup *old = NULL;
2356	unsigned long flags;
2357
2358	/*
2359	 * The only protection from cpu hotplug (memcg_hotplug_cpu_dead) vs.
2360	 * drain_stock races is that we always operate on local CPU stock
2361	 * here with IRQ disabled
2362	 */
2363	local_lock_irqsave(&memcg_stock.stock_lock, flags);
2364
2365	stock = this_cpu_ptr(&memcg_stock);
2366	old = drain_obj_stock(stock);
2367	drain_stock(stock);
2368	clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
2369
2370	local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
2371	if (old)
2372		obj_cgroup_put(old);
2373}
2374
2375/*
2376 * Cache charges(val) to local per_cpu area.
2377 * This will be consumed by consume_stock() function, later.
2378 */
2379static void __refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2380{
2381	struct memcg_stock_pcp *stock;
 
 
 
2382
2383	stock = this_cpu_ptr(&memcg_stock);
2384	if (READ_ONCE(stock->cached) != memcg) { /* reset if necessary */
2385		drain_stock(stock);
2386		css_get(&memcg->css);
2387		WRITE_ONCE(stock->cached, memcg);
2388	}
2389	stock->nr_pages += nr_pages;
2390
2391	if (stock->nr_pages > MEMCG_CHARGE_BATCH)
2392		drain_stock(stock);
2393}
2394
2395static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2396{
2397	unsigned long flags;
2398
2399	local_lock_irqsave(&memcg_stock.stock_lock, flags);
2400	__refill_stock(memcg, nr_pages);
2401	local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
2402}
2403
2404/*
2405 * Drains all per-CPU charge caches for given root_memcg resp. subtree
2406 * of the hierarchy under it.
2407 */
2408static void drain_all_stock(struct mem_cgroup *root_memcg)
2409{
2410	int cpu, curcpu;
2411
2412	/* If someone's already draining, avoid adding running more workers. */
2413	if (!mutex_trylock(&percpu_charge_mutex))
2414		return;
2415	/*
2416	 * Notify other cpus that system-wide "drain" is running
2417	 * We do not care about races with the cpu hotplug because cpu down
2418	 * as well as workers from this path always operate on the local
2419	 * per-cpu data. CPU up doesn't touch memcg_stock at all.
2420	 */
2421	migrate_disable();
2422	curcpu = smp_processor_id();
2423	for_each_online_cpu(cpu) {
2424		struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
2425		struct mem_cgroup *memcg;
2426		bool flush = false;
2427
2428		rcu_read_lock();
2429		memcg = READ_ONCE(stock->cached);
2430		if (memcg && stock->nr_pages &&
2431		    mem_cgroup_is_descendant(memcg, root_memcg))
2432			flush = true;
2433		else if (obj_stock_flush_required(stock, root_memcg))
2434			flush = true;
2435		rcu_read_unlock();
2436
2437		if (flush &&
2438		    !test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
2439			if (cpu == curcpu)
2440				drain_local_stock(&stock->work);
2441			else if (!cpu_is_isolated(cpu))
2442				schedule_work_on(cpu, &stock->work);
2443		}
 
2444	}
2445	migrate_enable();
2446	mutex_unlock(&percpu_charge_mutex);
2447}
2448
2449static int memcg_hotplug_cpu_dead(unsigned int cpu)
2450{
2451	struct memcg_stock_pcp *stock;
 
2452
2453	stock = &per_cpu(memcg_stock, cpu);
2454	drain_stock(stock);
2455
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2456	return 0;
2457}
2458
2459static unsigned long reclaim_high(struct mem_cgroup *memcg,
2460				  unsigned int nr_pages,
2461				  gfp_t gfp_mask)
2462{
2463	unsigned long nr_reclaimed = 0;
2464
2465	do {
2466		unsigned long pflags;
2467
2468		if (page_counter_read(&memcg->memory) <=
2469		    READ_ONCE(memcg->memory.high))
2470			continue;
2471
2472		memcg_memory_event(memcg, MEMCG_HIGH);
2473
2474		psi_memstall_enter(&pflags);
2475		nr_reclaimed += try_to_free_mem_cgroup_pages(memcg, nr_pages,
2476							gfp_mask,
2477							MEMCG_RECLAIM_MAY_SWAP);
2478		psi_memstall_leave(&pflags);
2479	} while ((memcg = parent_mem_cgroup(memcg)) &&
2480		 !mem_cgroup_is_root(memcg));
2481
2482	return nr_reclaimed;
2483}
2484
2485static void high_work_func(struct work_struct *work)
2486{
2487	struct mem_cgroup *memcg;
2488
2489	memcg = container_of(work, struct mem_cgroup, high_work);
2490	reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL);
2491}
2492
2493/*
2494 * Clamp the maximum sleep time per allocation batch to 2 seconds. This is
2495 * enough to still cause a significant slowdown in most cases, while still
2496 * allowing diagnostics and tracing to proceed without becoming stuck.
2497 */
2498#define MEMCG_MAX_HIGH_DELAY_JIFFIES (2UL*HZ)
2499
2500/*
2501 * When calculating the delay, we use these either side of the exponentiation to
2502 * maintain precision and scale to a reasonable number of jiffies (see the table
2503 * below.
2504 *
2505 * - MEMCG_DELAY_PRECISION_SHIFT: Extra precision bits while translating the
2506 *   overage ratio to a delay.
2507 * - MEMCG_DELAY_SCALING_SHIFT: The number of bits to scale down the
2508 *   proposed penalty in order to reduce to a reasonable number of jiffies, and
2509 *   to produce a reasonable delay curve.
2510 *
2511 * MEMCG_DELAY_SCALING_SHIFT just happens to be a number that produces a
2512 * reasonable delay curve compared to precision-adjusted overage, not
2513 * penalising heavily at first, but still making sure that growth beyond the
2514 * limit penalises misbehaviour cgroups by slowing them down exponentially. For
2515 * example, with a high of 100 megabytes:
2516 *
2517 *  +-------+------------------------+
2518 *  | usage | time to allocate in ms |
2519 *  +-------+------------------------+
2520 *  | 100M  |                      0 |
2521 *  | 101M  |                      6 |
2522 *  | 102M  |                     25 |
2523 *  | 103M  |                     57 |
2524 *  | 104M  |                    102 |
2525 *  | 105M  |                    159 |
2526 *  | 106M  |                    230 |
2527 *  | 107M  |                    313 |
2528 *  | 108M  |                    409 |
2529 *  | 109M  |                    518 |
2530 *  | 110M  |                    639 |
2531 *  | 111M  |                    774 |
2532 *  | 112M  |                    921 |
2533 *  | 113M  |                   1081 |
2534 *  | 114M  |                   1254 |
2535 *  | 115M  |                   1439 |
2536 *  | 116M  |                   1638 |
2537 *  | 117M  |                   1849 |
2538 *  | 118M  |                   2000 |
2539 *  | 119M  |                   2000 |
2540 *  | 120M  |                   2000 |
2541 *  +-------+------------------------+
2542 */
2543 #define MEMCG_DELAY_PRECISION_SHIFT 20
2544 #define MEMCG_DELAY_SCALING_SHIFT 14
2545
2546static u64 calculate_overage(unsigned long usage, unsigned long high)
2547{
2548	u64 overage;
2549
2550	if (usage <= high)
2551		return 0;
2552
2553	/*
2554	 * Prevent division by 0 in overage calculation by acting as if
2555	 * it was a threshold of 1 page
2556	 */
2557	high = max(high, 1UL);
2558
2559	overage = usage - high;
2560	overage <<= MEMCG_DELAY_PRECISION_SHIFT;
2561	return div64_u64(overage, high);
2562}
2563
2564static u64 mem_find_max_overage(struct mem_cgroup *memcg)
2565{
2566	u64 overage, max_overage = 0;
2567
2568	do {
2569		overage = calculate_overage(page_counter_read(&memcg->memory),
2570					    READ_ONCE(memcg->memory.high));
2571		max_overage = max(overage, max_overage);
2572	} while ((memcg = parent_mem_cgroup(memcg)) &&
2573		 !mem_cgroup_is_root(memcg));
2574
2575	return max_overage;
2576}
2577
2578static u64 swap_find_max_overage(struct mem_cgroup *memcg)
2579{
2580	u64 overage, max_overage = 0;
2581
2582	do {
2583		overage = calculate_overage(page_counter_read(&memcg->swap),
2584					    READ_ONCE(memcg->swap.high));
2585		if (overage)
2586			memcg_memory_event(memcg, MEMCG_SWAP_HIGH);
2587		max_overage = max(overage, max_overage);
2588	} while ((memcg = parent_mem_cgroup(memcg)) &&
2589		 !mem_cgroup_is_root(memcg));
2590
2591	return max_overage;
2592}
2593
2594/*
2595 * Get the number of jiffies that we should penalise a mischievous cgroup which
2596 * is exceeding its memory.high by checking both it and its ancestors.
2597 */
2598static unsigned long calculate_high_delay(struct mem_cgroup *memcg,
2599					  unsigned int nr_pages,
2600					  u64 max_overage)
2601{
2602	unsigned long penalty_jiffies;
2603
2604	if (!max_overage)
2605		return 0;
2606
2607	/*
2608	 * We use overage compared to memory.high to calculate the number of
2609	 * jiffies to sleep (penalty_jiffies). Ideally this value should be
2610	 * fairly lenient on small overages, and increasingly harsh when the
2611	 * memcg in question makes it clear that it has no intention of stopping
2612	 * its crazy behaviour, so we exponentially increase the delay based on
2613	 * overage amount.
2614	 */
2615	penalty_jiffies = max_overage * max_overage * HZ;
2616	penalty_jiffies >>= MEMCG_DELAY_PRECISION_SHIFT;
2617	penalty_jiffies >>= MEMCG_DELAY_SCALING_SHIFT;
2618
2619	/*
2620	 * Factor in the task's own contribution to the overage, such that four
2621	 * N-sized allocations are throttled approximately the same as one
2622	 * 4N-sized allocation.
2623	 *
2624	 * MEMCG_CHARGE_BATCH pages is nominal, so work out how much smaller or
2625	 * larger the current charge patch is than that.
2626	 */
2627	return penalty_jiffies * nr_pages / MEMCG_CHARGE_BATCH;
2628}
2629
2630/*
2631 * Reclaims memory over the high limit. Called directly from
2632 * try_charge() (context permitting), as well as from the userland
2633 * return path where reclaim is always able to block.
2634 */
2635void mem_cgroup_handle_over_high(gfp_t gfp_mask)
2636{
2637	unsigned long penalty_jiffies;
2638	unsigned long pflags;
2639	unsigned long nr_reclaimed;
2640	unsigned int nr_pages = current->memcg_nr_pages_over_high;
2641	int nr_retries = MAX_RECLAIM_RETRIES;
2642	struct mem_cgroup *memcg;
2643	bool in_retry = false;
2644
2645	if (likely(!nr_pages))
2646		return;
2647
2648	memcg = get_mem_cgroup_from_mm(current->mm);
 
 
2649	current->memcg_nr_pages_over_high = 0;
2650
2651retry_reclaim:
2652	/*
2653	 * Bail if the task is already exiting. Unlike memory.max,
2654	 * memory.high enforcement isn't as strict, and there is no
2655	 * OOM killer involved, which means the excess could already
2656	 * be much bigger (and still growing) than it could for
2657	 * memory.max; the dying task could get stuck in fruitless
2658	 * reclaim for a long time, which isn't desirable.
2659	 */
2660	if (task_is_dying())
2661		goto out;
2662
2663	/*
2664	 * The allocating task should reclaim at least the batch size, but for
2665	 * subsequent retries we only want to do what's necessary to prevent oom
2666	 * or breaching resource isolation.
2667	 *
2668	 * This is distinct from memory.max or page allocator behaviour because
2669	 * memory.high is currently batched, whereas memory.max and the page
2670	 * allocator run every time an allocation is made.
2671	 */
2672	nr_reclaimed = reclaim_high(memcg,
2673				    in_retry ? SWAP_CLUSTER_MAX : nr_pages,
2674				    gfp_mask);
2675
2676	/*
2677	 * memory.high is breached and reclaim is unable to keep up. Throttle
2678	 * allocators proactively to slow down excessive growth.
2679	 */
2680	penalty_jiffies = calculate_high_delay(memcg, nr_pages,
2681					       mem_find_max_overage(memcg));
2682
2683	penalty_jiffies += calculate_high_delay(memcg, nr_pages,
2684						swap_find_max_overage(memcg));
2685
2686	/*
2687	 * Clamp the max delay per usermode return so as to still keep the
2688	 * application moving forwards and also permit diagnostics, albeit
2689	 * extremely slowly.
2690	 */
2691	penalty_jiffies = min(penalty_jiffies, MEMCG_MAX_HIGH_DELAY_JIFFIES);
2692
2693	/*
2694	 * Don't sleep if the amount of jiffies this memcg owes us is so low
2695	 * that it's not even worth doing, in an attempt to be nice to those who
2696	 * go only a small amount over their memory.high value and maybe haven't
2697	 * been aggressively reclaimed enough yet.
2698	 */
2699	if (penalty_jiffies <= HZ / 100)
2700		goto out;
2701
2702	/*
2703	 * If reclaim is making forward progress but we're still over
2704	 * memory.high, we want to encourage that rather than doing allocator
2705	 * throttling.
2706	 */
2707	if (nr_reclaimed || nr_retries--) {
2708		in_retry = true;
2709		goto retry_reclaim;
2710	}
2711
2712	/*
2713	 * Reclaim didn't manage to push usage below the limit, slow
2714	 * this allocating task down.
2715	 *
2716	 * If we exit early, we're guaranteed to die (since
2717	 * schedule_timeout_killable sets TASK_KILLABLE). This means we don't
2718	 * need to account for any ill-begotten jiffies to pay them off later.
2719	 */
2720	psi_memstall_enter(&pflags);
2721	schedule_timeout_killable(penalty_jiffies);
2722	psi_memstall_leave(&pflags);
2723
2724out:
2725	css_put(&memcg->css);
2726}
2727
2728static int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask,
2729			unsigned int nr_pages)
2730{
2731	unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages);
2732	int nr_retries = MAX_RECLAIM_RETRIES;
2733	struct mem_cgroup *mem_over_limit;
2734	struct page_counter *counter;
2735	unsigned long nr_reclaimed;
2736	bool passed_oom = false;
2737	unsigned int reclaim_options = MEMCG_RECLAIM_MAY_SWAP;
2738	bool drained = false;
2739	bool raised_max_event = false;
2740	unsigned long pflags;
2741
 
 
2742retry:
2743	if (consume_stock(memcg, nr_pages))
2744		return 0;
2745
2746	if (!do_memsw_account() ||
2747	    page_counter_try_charge(&memcg->memsw, batch, &counter)) {
2748		if (page_counter_try_charge(&memcg->memory, batch, &counter))
2749			goto done_restock;
2750		if (do_memsw_account())
2751			page_counter_uncharge(&memcg->memsw, batch);
2752		mem_over_limit = mem_cgroup_from_counter(counter, memory);
2753	} else {
2754		mem_over_limit = mem_cgroup_from_counter(counter, memsw);
2755		reclaim_options &= ~MEMCG_RECLAIM_MAY_SWAP;
2756	}
2757
2758	if (batch > nr_pages) {
2759		batch = nr_pages;
2760		goto retry;
2761	}
2762
2763	/*
 
 
 
 
 
 
 
 
 
 
 
2764	 * Prevent unbounded recursion when reclaim operations need to
2765	 * allocate memory. This might exceed the limits temporarily,
2766	 * but we prefer facilitating memory reclaim and getting back
2767	 * under the limit over triggering OOM kills in these cases.
2768	 */
2769	if (unlikely(current->flags & PF_MEMALLOC))
2770		goto force;
2771
2772	if (unlikely(task_in_memcg_oom(current)))
2773		goto nomem;
2774
2775	if (!gfpflags_allow_blocking(gfp_mask))
2776		goto nomem;
2777
2778	memcg_memory_event(mem_over_limit, MEMCG_MAX);
2779	raised_max_event = true;
2780
2781	psi_memstall_enter(&pflags);
2782	nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
2783						    gfp_mask, reclaim_options);
2784	psi_memstall_leave(&pflags);
2785
2786	if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
2787		goto retry;
2788
2789	if (!drained) {
2790		drain_all_stock(mem_over_limit);
2791		drained = true;
2792		goto retry;
2793	}
2794
2795	if (gfp_mask & __GFP_NORETRY)
2796		goto nomem;
2797	/*
2798	 * Even though the limit is exceeded at this point, reclaim
2799	 * may have been able to free some pages.  Retry the charge
2800	 * before killing the task.
2801	 *
2802	 * Only for regular pages, though: huge pages are rather
2803	 * unlikely to succeed so close to the limit, and we fall back
2804	 * to regular pages anyway in case of failure.
2805	 */
2806	if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))
2807		goto retry;
2808	/*
2809	 * At task move, charge accounts can be doubly counted. So, it's
2810	 * better to wait until the end of task_move if something is going on.
2811	 */
2812	if (mem_cgroup_wait_acct_move(mem_over_limit))
2813		goto retry;
2814
2815	if (nr_retries--)
2816		goto retry;
2817
2818	if (gfp_mask & __GFP_RETRY_MAYFAIL)
2819		goto nomem;
 
 
 
2820
2821	/* Avoid endless loop for tasks bypassed by the oom killer */
2822	if (passed_oom && task_is_dying())
2823		goto nomem;
2824
2825	/*
2826	 * keep retrying as long as the memcg oom killer is able to make
2827	 * a forward progress or bypass the charge if the oom killer
2828	 * couldn't make any progress.
2829	 */
2830	if (mem_cgroup_oom(mem_over_limit, gfp_mask,
2831			   get_order(nr_pages * PAGE_SIZE))) {
2832		passed_oom = true;
2833		nr_retries = MAX_RECLAIM_RETRIES;
2834		goto retry;
2835	}
2836nomem:
2837	/*
2838	 * Memcg doesn't have a dedicated reserve for atomic
2839	 * allocations. But like the global atomic pool, we need to
2840	 * put the burden of reclaim on regular allocation requests
2841	 * and let these go through as privileged allocations.
2842	 */
2843	if (!(gfp_mask & (__GFP_NOFAIL | __GFP_HIGH)))
2844		return -ENOMEM;
2845force:
2846	/*
2847	 * If the allocation has to be enforced, don't forget to raise
2848	 * a MEMCG_MAX event.
2849	 */
2850	if (!raised_max_event)
2851		memcg_memory_event(mem_over_limit, MEMCG_MAX);
2852
2853	/*
2854	 * The allocation either can't fail or will lead to more memory
2855	 * being freed very soon.  Allow memory usage go over the limit
2856	 * temporarily by force charging it.
2857	 */
2858	page_counter_charge(&memcg->memory, nr_pages);
2859	if (do_memsw_account())
2860		page_counter_charge(&memcg->memsw, nr_pages);
 
2861
2862	return 0;
2863
2864done_restock:
 
2865	if (batch > nr_pages)
2866		refill_stock(memcg, batch - nr_pages);
2867
2868	/*
2869	 * If the hierarchy is above the normal consumption range, schedule
2870	 * reclaim on returning to userland.  We can perform reclaim here
2871	 * if __GFP_RECLAIM but let's always punt for simplicity and so that
2872	 * GFP_KERNEL can consistently be used during reclaim.  @memcg is
2873	 * not recorded as it most likely matches current's and won't
2874	 * change in the meantime.  As high limit is checked again before
2875	 * reclaim, the cost of mismatch is negligible.
2876	 */
2877	do {
2878		bool mem_high, swap_high;
2879
2880		mem_high = page_counter_read(&memcg->memory) >
2881			READ_ONCE(memcg->memory.high);
2882		swap_high = page_counter_read(&memcg->swap) >
2883			READ_ONCE(memcg->swap.high);
2884
2885		/* Don't bother a random interrupted task */
2886		if (!in_task()) {
2887			if (mem_high) {
2888				schedule_work(&memcg->high_work);
2889				break;
2890			}
2891			continue;
2892		}
2893
2894		if (mem_high || swap_high) {
2895			/*
2896			 * The allocating tasks in this cgroup will need to do
2897			 * reclaim or be throttled to prevent further growth
2898			 * of the memory or swap footprints.
2899			 *
2900			 * Target some best-effort fairness between the tasks,
2901			 * and distribute reclaim work and delay penalties
2902			 * based on how much each task is actually allocating.
2903			 */
2904			current->memcg_nr_pages_over_high += batch;
2905			set_notify_resume(current);
2906			break;
2907		}
2908	} while ((memcg = parent_mem_cgroup(memcg)));
2909
2910	/*
2911	 * Reclaim is set up above to be called from the userland
2912	 * return path. But also attempt synchronous reclaim to avoid
2913	 * excessive overrun while the task is still inside the
2914	 * kernel. If this is successful, the return path will see it
2915	 * when it rechecks the overage and simply bail out.
2916	 */
2917	if (current->memcg_nr_pages_over_high > MEMCG_CHARGE_BATCH &&
2918	    !(current->flags & PF_MEMALLOC) &&
2919	    gfpflags_allow_blocking(gfp_mask))
2920		mem_cgroup_handle_over_high(gfp_mask);
2921	return 0;
2922}
2923
2924static inline int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
2925			     unsigned int nr_pages)
2926{
2927	if (mem_cgroup_is_root(memcg))
2928		return 0;
2929
2930	return try_charge_memcg(memcg, gfp_mask, nr_pages);
2931}
2932
2933/**
2934 * mem_cgroup_cancel_charge() - cancel an uncommitted try_charge() call.
2935 * @memcg: memcg previously charged.
2936 * @nr_pages: number of pages previously charged.
2937 */
2938void mem_cgroup_cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
2939{
2940	if (mem_cgroup_is_root(memcg))
2941		return;
2942
2943	page_counter_uncharge(&memcg->memory, nr_pages);
2944	if (do_memsw_account())
2945		page_counter_uncharge(&memcg->memsw, nr_pages);
 
 
2946}
2947
2948static void commit_charge(struct folio *folio, struct mem_cgroup *memcg)
2949{
2950	VM_BUG_ON_FOLIO(folio_memcg(folio), folio);
2951	/*
2952	 * Any of the following ensures page's memcg stability:
2953	 *
2954	 * - the page lock
2955	 * - LRU isolation
2956	 * - folio_memcg_lock()
2957	 * - exclusive reference
2958	 * - mem_cgroup_trylock_pages()
2959	 */
2960	folio->memcg_data = (unsigned long)memcg;
2961}
2962
2963/**
2964 * mem_cgroup_commit_charge - commit a previously successful try_charge().
2965 * @folio: folio to commit the charge to.
2966 * @memcg: memcg previously charged.
2967 */
2968void mem_cgroup_commit_charge(struct folio *folio, struct mem_cgroup *memcg)
2969{
2970	css_get(&memcg->css);
2971	commit_charge(folio, memcg);
2972
2973	local_irq_disable();
2974	mem_cgroup_charge_statistics(memcg, folio_nr_pages(folio));
2975	memcg_check_events(memcg, folio_nid(folio));
2976	local_irq_enable();
 
 
2977}
2978
2979#ifdef CONFIG_MEMCG_KMEM
2980/*
2981 * The allocated objcg pointers array is not accounted directly.
2982 * Moreover, it should not come from DMA buffer and is not readily
2983 * reclaimable. So those GFP bits should be masked off.
2984 */
2985#define OBJCGS_CLEAR_MASK	(__GFP_DMA | __GFP_RECLAIMABLE | \
2986				 __GFP_ACCOUNT | __GFP_NOFAIL)
2987
2988/*
2989 * mod_objcg_mlstate() may be called with irq enabled, so
2990 * mod_memcg_lruvec_state() should be used.
2991 */
2992static inline void mod_objcg_mlstate(struct obj_cgroup *objcg,
2993				     struct pglist_data *pgdat,
2994				     enum node_stat_item idx, int nr)
2995{
2996	struct mem_cgroup *memcg;
2997	struct lruvec *lruvec;
2998
2999	rcu_read_lock();
3000	memcg = obj_cgroup_memcg(objcg);
3001	lruvec = mem_cgroup_lruvec(memcg, pgdat);
3002	mod_memcg_lruvec_state(lruvec, idx, nr);
3003	rcu_read_unlock();
3004}
3005
3006int memcg_alloc_slab_cgroups(struct slab *slab, struct kmem_cache *s,
3007				 gfp_t gfp, bool new_slab)
3008{
3009	unsigned int objects = objs_per_slab(s, slab);
3010	unsigned long memcg_data;
3011	void *vec;
3012
3013	gfp &= ~OBJCGS_CLEAR_MASK;
3014	vec = kcalloc_node(objects, sizeof(struct obj_cgroup *), gfp,
3015			   slab_nid(slab));
3016	if (!vec)
3017		return -ENOMEM;
3018
3019	memcg_data = (unsigned long) vec | MEMCG_DATA_OBJCGS;
3020	if (new_slab) {
3021		/*
3022		 * If the slab is brand new and nobody can yet access its
3023		 * memcg_data, no synchronization is required and memcg_data can
3024		 * be simply assigned.
3025		 */
3026		slab->memcg_data = memcg_data;
3027	} else if (cmpxchg(&slab->memcg_data, 0, memcg_data)) {
3028		/*
3029		 * If the slab is already in use, somebody can allocate and
3030		 * assign obj_cgroups in parallel. In this case the existing
3031		 * objcg vector should be reused.
3032		 */
3033		kfree(vec);
3034		return 0;
3035	}
3036
3037	kmemleak_not_leak(vec);
3038	return 0;
3039}
3040
3041static __always_inline
3042struct mem_cgroup *mem_cgroup_from_obj_folio(struct folio *folio, void *p)
3043{
3044	/*
3045	 * Slab objects are accounted individually, not per-page.
3046	 * Memcg membership data for each individual object is saved in
3047	 * slab->memcg_data.
3048	 */
3049	if (folio_test_slab(folio)) {
3050		struct obj_cgroup **objcgs;
3051		struct slab *slab;
3052		unsigned int off;
3053
3054		slab = folio_slab(folio);
3055		objcgs = slab_objcgs(slab);
3056		if (!objcgs)
3057			return NULL;
3058
3059		off = obj_to_index(slab->slab_cache, slab, p);
3060		if (objcgs[off])
3061			return obj_cgroup_memcg(objcgs[off]);
3062
3063		return NULL;
3064	}
 
 
 
 
3065
3066	/*
3067	 * folio_memcg_check() is used here, because in theory we can encounter
3068	 * a folio where the slab flag has been cleared already, but
3069	 * slab->memcg_data has not been freed yet
3070	 * folio_memcg_check() will guarantee that a proper memory
3071	 * cgroup pointer or NULL will be returned.
 
 
 
 
 
 
 
3072	 */
3073	return folio_memcg_check(folio);
 
 
 
3074}
3075
3076/*
3077 * Returns a pointer to the memory cgroup to which the kernel object is charged.
3078 *
3079 * A passed kernel object can be a slab object, vmalloc object or a generic
3080 * kernel page, so different mechanisms for getting the memory cgroup pointer
3081 * should be used.
3082 *
3083 * In certain cases (e.g. kernel stacks or large kmallocs with SLUB) the caller
3084 * can not know for sure how the kernel object is implemented.
3085 * mem_cgroup_from_obj() can be safely used in such cases.
3086 *
3087 * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(),
3088 * cgroup_mutex, etc.
3089 */
3090struct mem_cgroup *mem_cgroup_from_obj(void *p)
3091{
3092	struct folio *folio;
 
3093
3094	if (mem_cgroup_disabled())
3095		return NULL;
 
 
3096
3097	if (unlikely(is_vmalloc_addr(p)))
3098		folio = page_folio(vmalloc_to_page(p));
3099	else
3100		folio = virt_to_folio(p);
3101
3102	return mem_cgroup_from_obj_folio(folio, p);
3103}
 
 
 
3104
3105/*
3106 * Returns a pointer to the memory cgroup to which the kernel object is charged.
3107 * Similar to mem_cgroup_from_obj(), but faster and not suitable for objects,
3108 * allocated using vmalloc().
3109 *
3110 * A passed kernel object must be a slab object or a generic kernel page.
3111 *
3112 * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(),
3113 * cgroup_mutex, etc.
3114 */
3115struct mem_cgroup *mem_cgroup_from_slab_obj(void *p)
3116{
3117	if (mem_cgroup_disabled())
3118		return NULL;
3119
3120	return mem_cgroup_from_obj_folio(virt_to_folio(p), p);
3121}
 
 
 
3122
3123static struct obj_cgroup *__get_obj_cgroup_from_memcg(struct mem_cgroup *memcg)
3124{
3125	struct obj_cgroup *objcg = NULL;
3126
3127	for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) {
3128		objcg = rcu_dereference(memcg->objcg);
3129		if (likely(objcg && obj_cgroup_tryget(objcg)))
3130			break;
3131		objcg = NULL;
3132	}
3133	return objcg;
3134}
3135
3136static struct obj_cgroup *current_objcg_update(void)
3137{
 
 
 
 
3138	struct mem_cgroup *memcg;
3139	struct obj_cgroup *old, *objcg = NULL;
 
 
3140
3141	do {
3142		/* Atomically drop the update bit. */
3143		old = xchg(&current->objcg, NULL);
3144		if (old) {
3145			old = (struct obj_cgroup *)
3146				((unsigned long)old & ~CURRENT_OBJCG_UPDATE_FLAG);
3147			if (old)
3148				obj_cgroup_put(old);
3149
3150			old = NULL;
3151		}
3152
3153		/* If new objcg is NULL, no reason for the second atomic update. */
3154		if (!current->mm || (current->flags & PF_KTHREAD))
3155			return NULL;
3156
3157		/*
3158		 * Release the objcg pointer from the previous iteration,
3159		 * if try_cmpxcg() below fails.
3160		 */
3161		if (unlikely(objcg)) {
3162			obj_cgroup_put(objcg);
3163			objcg = NULL;
3164		}
3165
3166		/*
3167		 * Obtain the new objcg pointer. The current task can be
3168		 * asynchronously moved to another memcg and the previous
3169		 * memcg can be offlined. So let's get the memcg pointer
3170		 * and try get a reference to objcg under a rcu read lock.
3171		 */
3172
3173		rcu_read_lock();
3174		memcg = mem_cgroup_from_task(current);
3175		objcg = __get_obj_cgroup_from_memcg(memcg);
3176		rcu_read_unlock();
3177
3178		/*
3179		 * Try set up a new objcg pointer atomically. If it
3180		 * fails, it means the update flag was set concurrently, so
3181		 * the whole procedure should be repeated.
3182		 */
3183	} while (!try_cmpxchg(&current->objcg, &old, objcg));
3184
3185	return objcg;
3186}
3187
3188__always_inline struct obj_cgroup *current_obj_cgroup(void)
 
 
 
 
3189{
3190	struct mem_cgroup *memcg;
3191	struct obj_cgroup *objcg;
3192
3193	if (in_task()) {
3194		memcg = current->active_memcg;
3195		if (unlikely(memcg))
3196			goto from_memcg;
3197
3198		objcg = READ_ONCE(current->objcg);
3199		if (unlikely((unsigned long)objcg & CURRENT_OBJCG_UPDATE_FLAG))
3200			objcg = current_objcg_update();
3201		/*
3202		 * Objcg reference is kept by the task, so it's safe
3203		 * to use the objcg by the current task.
3204		 */
3205		return objcg;
3206	}
3207
3208	memcg = this_cpu_read(int_active_memcg);
3209	if (unlikely(memcg))
3210		goto from_memcg;
3211
3212	return NULL;
 
 
3213
3214from_memcg:
3215	objcg = NULL;
3216	for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) {
3217		/*
3218		 * Memcg pointer is protected by scope (see set_active_memcg())
3219		 * and is pinning the corresponding objcg, so objcg can't go
3220		 * away and can be used within the scope without any additional
3221		 * protection.
3222		 */
3223		objcg = rcu_dereference_check(memcg->objcg, 1);
3224		if (likely(objcg))
3225			break;
3226	}
3227
3228	return objcg;
3229}
3230
3231struct obj_cgroup *get_obj_cgroup_from_folio(struct folio *folio)
 
3232{
3233	struct obj_cgroup *objcg;
3234
3235	if (!memcg_kmem_online())
3236		return NULL;
3237
3238	if (folio_memcg_kmem(folio)) {
3239		objcg = __folio_objcg(folio);
3240		obj_cgroup_get(objcg);
3241	} else {
3242		struct mem_cgroup *memcg;
3243
3244		rcu_read_lock();
3245		memcg = __folio_memcg(folio);
3246		if (memcg)
3247			objcg = __get_obj_cgroup_from_memcg(memcg);
3248		else
3249			objcg = NULL;
3250		rcu_read_unlock();
3251	}
3252	return objcg;
3253}
3254
3255static void memcg_account_kmem(struct mem_cgroup *memcg, int nr_pages)
3256{
3257	mod_memcg_state(memcg, MEMCG_KMEM, nr_pages);
3258	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
3259		if (nr_pages > 0)
3260			page_counter_charge(&memcg->kmem, nr_pages);
3261		else
3262			page_counter_uncharge(&memcg->kmem, -nr_pages);
3263	}
3264}
3265
3266
3267/*
3268 * obj_cgroup_uncharge_pages: uncharge a number of kernel pages from a objcg
3269 * @objcg: object cgroup to uncharge
3270 * @nr_pages: number of pages to uncharge
 
 
 
 
 
 
 
 
 
 
3271 */
3272static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg,
3273				      unsigned int nr_pages)
3274{
3275	struct mem_cgroup *memcg;
 
 
 
 
 
 
 
 
 
 
3276
3277	memcg = get_mem_cgroup_from_objcg(objcg);
 
 
 
3278
3279	memcg_account_kmem(memcg, -nr_pages);
3280	refill_stock(memcg, nr_pages);
 
3281
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3282	css_put(&memcg->css);
 
 
 
 
 
 
 
 
 
 
 
3283}
3284
3285/*
3286 * obj_cgroup_charge_pages: charge a number of kernel pages to a objcg
3287 * @objcg: object cgroup to charge
3288 * @gfp: reclaim mode
3289 * @nr_pages: number of pages to charge
 
3290 *
3291 * Returns 0 on success, an error code on failure.
3292 */
3293static int obj_cgroup_charge_pages(struct obj_cgroup *objcg, gfp_t gfp,
3294				   unsigned int nr_pages)
3295{
3296	struct mem_cgroup *memcg;
 
3297	int ret;
3298
3299	memcg = get_mem_cgroup_from_objcg(objcg);
 
 
3300
3301	ret = try_charge_memcg(memcg, gfp, nr_pages);
3302	if (ret)
3303		goto out;
 
 
3304
3305	memcg_account_kmem(memcg, nr_pages);
3306out:
3307	css_put(&memcg->css);
3308
3309	return ret;
3310}
3311
3312/**
3313 * __memcg_kmem_charge_page: charge a kmem page to the current memory cgroup
3314 * @page: page to charge
3315 * @gfp: reclaim mode
3316 * @order: allocation order
3317 *
3318 * Returns 0 on success, an error code on failure.
3319 */
3320int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order)
3321{
3322	struct obj_cgroup *objcg;
3323	int ret = 0;
3324
3325	objcg = current_obj_cgroup();
3326	if (objcg) {
3327		ret = obj_cgroup_charge_pages(objcg, gfp, 1 << order);
3328		if (!ret) {
3329			obj_cgroup_get(objcg);
3330			page->memcg_data = (unsigned long)objcg |
3331				MEMCG_DATA_KMEM;
3332			return 0;
3333		}
3334	}
 
3335	return ret;
3336}
3337
3338/**
3339 * __memcg_kmem_uncharge_page: uncharge a kmem page
3340 * @page: page to uncharge
3341 * @order: allocation order
3342 */
3343void __memcg_kmem_uncharge_page(struct page *page, int order)
3344{
3345	struct folio *folio = page_folio(page);
3346	struct obj_cgroup *objcg;
3347	unsigned int nr_pages = 1 << order;
3348
3349	if (!folio_memcg_kmem(folio))
3350		return;
3351
3352	objcg = __folio_objcg(folio);
3353	obj_cgroup_uncharge_pages(objcg, nr_pages);
3354	folio->memcg_data = 0;
3355	obj_cgroup_put(objcg);
3356}
3357
3358void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
3359		     enum node_stat_item idx, int nr)
3360{
3361	struct memcg_stock_pcp *stock;
3362	struct obj_cgroup *old = NULL;
3363	unsigned long flags;
3364	int *bytes;
3365
3366	local_lock_irqsave(&memcg_stock.stock_lock, flags);
3367	stock = this_cpu_ptr(&memcg_stock);
 
3368
3369	/*
3370	 * Save vmstat data in stock and skip vmstat array update unless
3371	 * accumulating over a page of vmstat data or when pgdat or idx
3372	 * changes.
3373	 */
3374	if (READ_ONCE(stock->cached_objcg) != objcg) {
3375		old = drain_obj_stock(stock);
3376		obj_cgroup_get(objcg);
3377		stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
3378				? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
3379		WRITE_ONCE(stock->cached_objcg, objcg);
3380		stock->cached_pgdat = pgdat;
3381	} else if (stock->cached_pgdat != pgdat) {
3382		/* Flush the existing cached vmstat data */
3383		struct pglist_data *oldpg = stock->cached_pgdat;
3384
3385		if (stock->nr_slab_reclaimable_b) {
3386			mod_objcg_mlstate(objcg, oldpg, NR_SLAB_RECLAIMABLE_B,
3387					  stock->nr_slab_reclaimable_b);
3388			stock->nr_slab_reclaimable_b = 0;
3389		}
3390		if (stock->nr_slab_unreclaimable_b) {
3391			mod_objcg_mlstate(objcg, oldpg, NR_SLAB_UNRECLAIMABLE_B,
3392					  stock->nr_slab_unreclaimable_b);
3393			stock->nr_slab_unreclaimable_b = 0;
3394		}
3395		stock->cached_pgdat = pgdat;
3396	}
3397
3398	bytes = (idx == NR_SLAB_RECLAIMABLE_B) ? &stock->nr_slab_reclaimable_b
3399					       : &stock->nr_slab_unreclaimable_b;
3400	/*
3401	 * Even for large object >= PAGE_SIZE, the vmstat data will still be
3402	 * cached locally at least once before pushing it out.
3403	 */
3404	if (!*bytes) {
3405		*bytes = nr;
3406		nr = 0;
3407	} else {
3408		*bytes += nr;
3409		if (abs(*bytes) > PAGE_SIZE) {
3410			nr = *bytes;
3411			*bytes = 0;
3412		} else {
3413			nr = 0;
3414		}
3415	}
3416	if (nr)
3417		mod_objcg_mlstate(objcg, pgdat, idx, nr);
3418
3419	local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
3420	if (old)
3421		obj_cgroup_put(old);
3422}
 
3423
3424static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
3425{
3426	struct memcg_stock_pcp *stock;
3427	unsigned long flags;
3428	bool ret = false;
3429
3430	local_lock_irqsave(&memcg_stock.stock_lock, flags);
3431
3432	stock = this_cpu_ptr(&memcg_stock);
3433	if (objcg == READ_ONCE(stock->cached_objcg) && stock->nr_bytes >= nr_bytes) {
3434		stock->nr_bytes -= nr_bytes;
3435		ret = true;
3436	}
3437
3438	local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
3439
3440	return ret;
3441}
3442
3443static struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock)
3444{
3445	struct obj_cgroup *old = READ_ONCE(stock->cached_objcg);
3446
3447	if (!old)
3448		return NULL;
3449
3450	if (stock->nr_bytes) {
3451		unsigned int nr_pages = stock->nr_bytes >> PAGE_SHIFT;
3452		unsigned int nr_bytes = stock->nr_bytes & (PAGE_SIZE - 1);
3453
3454		if (nr_pages) {
3455			struct mem_cgroup *memcg;
3456
3457			memcg = get_mem_cgroup_from_objcg(old);
3458
3459			memcg_account_kmem(memcg, -nr_pages);
3460			__refill_stock(memcg, nr_pages);
3461
3462			css_put(&memcg->css);
3463		}
3464
3465		/*
3466		 * The leftover is flushed to the centralized per-memcg value.
3467		 * On the next attempt to refill obj stock it will be moved
3468		 * to a per-cpu stock (probably, on an other CPU), see
3469		 * refill_obj_stock().
3470		 *
3471		 * How often it's flushed is a trade-off between the memory
3472		 * limit enforcement accuracy and potential CPU contention,
3473		 * so it might be changed in the future.
3474		 */
3475		atomic_add(nr_bytes, &old->nr_charged_bytes);
3476		stock->nr_bytes = 0;
3477	}
3478
3479	/*
3480	 * Flush the vmstat data in current stock
3481	 */
3482	if (stock->nr_slab_reclaimable_b || stock->nr_slab_unreclaimable_b) {
3483		if (stock->nr_slab_reclaimable_b) {
3484			mod_objcg_mlstate(old, stock->cached_pgdat,
3485					  NR_SLAB_RECLAIMABLE_B,
3486					  stock->nr_slab_reclaimable_b);
3487			stock->nr_slab_reclaimable_b = 0;
3488		}
3489		if (stock->nr_slab_unreclaimable_b) {
3490			mod_objcg_mlstate(old, stock->cached_pgdat,
3491					  NR_SLAB_UNRECLAIMABLE_B,
3492					  stock->nr_slab_unreclaimable_b);
3493			stock->nr_slab_unreclaimable_b = 0;
3494		}
3495		stock->cached_pgdat = NULL;
3496	}
3497
3498	WRITE_ONCE(stock->cached_objcg, NULL);
3499	/*
3500	 * The `old' objects needs to be released by the caller via
3501	 * obj_cgroup_put() outside of memcg_stock_pcp::stock_lock.
3502	 */
3503	return old;
3504}
3505
3506static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
3507				     struct mem_cgroup *root_memcg)
3508{
3509	struct obj_cgroup *objcg = READ_ONCE(stock->cached_objcg);
3510	struct mem_cgroup *memcg;
3511
3512	if (objcg) {
3513		memcg = obj_cgroup_memcg(objcg);
3514		if (memcg && mem_cgroup_is_descendant(memcg, root_memcg))
3515			return true;
3516	}
3517
3518	return false;
3519}
3520
3521static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
3522			     bool allow_uncharge)
3523{
3524	struct memcg_stock_pcp *stock;
3525	struct obj_cgroup *old = NULL;
3526	unsigned long flags;
3527	unsigned int nr_pages = 0;
3528
3529	local_lock_irqsave(&memcg_stock.stock_lock, flags);
3530
3531	stock = this_cpu_ptr(&memcg_stock);
3532	if (READ_ONCE(stock->cached_objcg) != objcg) { /* reset if necessary */
3533		old = drain_obj_stock(stock);
3534		obj_cgroup_get(objcg);
3535		WRITE_ONCE(stock->cached_objcg, objcg);
3536		stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
3537				? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
3538		allow_uncharge = true;	/* Allow uncharge when objcg changes */
3539	}
3540	stock->nr_bytes += nr_bytes;
3541
3542	if (allow_uncharge && (stock->nr_bytes > PAGE_SIZE)) {
3543		nr_pages = stock->nr_bytes >> PAGE_SHIFT;
3544		stock->nr_bytes &= (PAGE_SIZE - 1);
3545	}
3546
3547	local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
3548	if (old)
3549		obj_cgroup_put(old);
3550
3551	if (nr_pages)
3552		obj_cgroup_uncharge_pages(objcg, nr_pages);
3553}
3554
3555int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size)
3556{
3557	unsigned int nr_pages, nr_bytes;
3558	int ret;
3559
3560	if (consume_obj_stock(objcg, size))
3561		return 0;
3562
3563	/*
3564	 * In theory, objcg->nr_charged_bytes can have enough
3565	 * pre-charged bytes to satisfy the allocation. However,
3566	 * flushing objcg->nr_charged_bytes requires two atomic
3567	 * operations, and objcg->nr_charged_bytes can't be big.
3568	 * The shared objcg->nr_charged_bytes can also become a
3569	 * performance bottleneck if all tasks of the same memcg are
3570	 * trying to update it. So it's better to ignore it and try
3571	 * grab some new pages. The stock's nr_bytes will be flushed to
3572	 * objcg->nr_charged_bytes later on when objcg changes.
3573	 *
3574	 * The stock's nr_bytes may contain enough pre-charged bytes
3575	 * to allow one less page from being charged, but we can't rely
3576	 * on the pre-charged bytes not being changed outside of
3577	 * consume_obj_stock() or refill_obj_stock(). So ignore those
3578	 * pre-charged bytes as well when charging pages. To avoid a
3579	 * page uncharge right after a page charge, we set the
3580	 * allow_uncharge flag to false when calling refill_obj_stock()
3581	 * to temporarily allow the pre-charged bytes to exceed the page
3582	 * size limit. The maximum reachable value of the pre-charged
3583	 * bytes is (sizeof(object) + PAGE_SIZE - 2) if there is no data
3584	 * race.
3585	 */
3586	nr_pages = size >> PAGE_SHIFT;
3587	nr_bytes = size & (PAGE_SIZE - 1);
3588
3589	if (nr_bytes)
3590		nr_pages += 1;
3591
3592	ret = obj_cgroup_charge_pages(objcg, gfp, nr_pages);
3593	if (!ret && nr_bytes)
3594		refill_obj_stock(objcg, PAGE_SIZE - nr_bytes, false);
3595
3596	return ret;
3597}
3598
3599void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size)
3600{
3601	refill_obj_stock(objcg, size, true);
3602}
3603
3604#endif /* CONFIG_MEMCG_KMEM */
3605
3606/*
3607 * Because page_memcg(head) is not set on tails, set it now.
 
3608 */
3609void split_page_memcg(struct page *head, unsigned int nr)
3610{
3611	struct folio *folio = page_folio(head);
3612	struct mem_cgroup *memcg = folio_memcg(folio);
3613	int i;
3614
3615	if (mem_cgroup_disabled() || !memcg)
3616		return;
3617
3618	for (i = 1; i < nr; i++)
3619		folio_page(folio, i)->memcg_data = folio->memcg_data;
3620
3621	if (folio_memcg_kmem(folio))
3622		obj_cgroup_get_many(__folio_objcg(folio), nr - 1);
3623	else
3624		css_get_many(&memcg->css, nr - 1);
3625}
 
3626
3627#ifdef CONFIG_SWAP
3628/**
3629 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
3630 * @entry: swap entry to be moved
3631 * @from:  mem_cgroup which the entry is moved from
3632 * @to:  mem_cgroup which the entry is moved to
3633 *
3634 * It succeeds only when the swap_cgroup's record for this entry is the same
3635 * as the mem_cgroup's id of @from.
3636 *
3637 * Returns 0 on success, -EINVAL on failure.
3638 *
3639 * The caller must have charged to @to, IOW, called page_counter_charge() about
3640 * both res and memsw, and called css_get().
3641 */
3642static int mem_cgroup_move_swap_account(swp_entry_t entry,
3643				struct mem_cgroup *from, struct mem_cgroup *to)
3644{
3645	unsigned short old_id, new_id;
3646
3647	old_id = mem_cgroup_id(from);
3648	new_id = mem_cgroup_id(to);
3649
3650	if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
3651		mod_memcg_state(from, MEMCG_SWAP, -1);
3652		mod_memcg_state(to, MEMCG_SWAP, 1);
3653		return 0;
3654	}
3655	return -EINVAL;
3656}
3657#else
3658static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
3659				struct mem_cgroup *from, struct mem_cgroup *to)
3660{
3661	return -EINVAL;
3662}
3663#endif
3664
3665static DEFINE_MUTEX(memcg_max_mutex);
3666
3667static int mem_cgroup_resize_max(struct mem_cgroup *memcg,
3668				 unsigned long max, bool memsw)
3669{
3670	bool enlarge = false;
3671	bool drained = false;
3672	int ret;
3673	bool limits_invariant;
3674	struct page_counter *counter = memsw ? &memcg->memsw : &memcg->memory;
3675
3676	do {
3677		if (signal_pending(current)) {
3678			ret = -EINTR;
3679			break;
3680		}
3681
3682		mutex_lock(&memcg_max_mutex);
3683		/*
3684		 * Make sure that the new limit (memsw or memory limit) doesn't
3685		 * break our basic invariant rule memory.max <= memsw.max.
3686		 */
3687		limits_invariant = memsw ? max >= READ_ONCE(memcg->memory.max) :
3688					   max <= memcg->memsw.max;
3689		if (!limits_invariant) {
3690			mutex_unlock(&memcg_max_mutex);
3691			ret = -EINVAL;
3692			break;
3693		}
3694		if (max > counter->max)
3695			enlarge = true;
3696		ret = page_counter_set_max(counter, max);
3697		mutex_unlock(&memcg_max_mutex);
3698
3699		if (!ret)
3700			break;
3701
3702		if (!drained) {
3703			drain_all_stock(memcg);
3704			drained = true;
3705			continue;
3706		}
3707
3708		if (!try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL,
3709					memsw ? 0 : MEMCG_RECLAIM_MAY_SWAP)) {
3710			ret = -EBUSY;
3711			break;
3712		}
3713	} while (true);
3714
3715	if (!ret && enlarge)
3716		memcg_oom_recover(memcg);
3717
3718	return ret;
3719}
3720
3721unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
3722					    gfp_t gfp_mask,
3723					    unsigned long *total_scanned)
3724{
3725	unsigned long nr_reclaimed = 0;
3726	struct mem_cgroup_per_node *mz, *next_mz = NULL;
3727	unsigned long reclaimed;
3728	int loop = 0;
3729	struct mem_cgroup_tree_per_node *mctz;
3730	unsigned long excess;
3731
3732	if (lru_gen_enabled())
3733		return 0;
3734
3735	if (order > 0)
3736		return 0;
3737
3738	mctz = soft_limit_tree.rb_tree_per_node[pgdat->node_id];
3739
3740	/*
3741	 * Do not even bother to check the largest node if the root
3742	 * is empty. Do it lockless to prevent lock bouncing. Races
3743	 * are acceptable as soft limit is best effort anyway.
3744	 */
3745	if (!mctz || RB_EMPTY_ROOT(&mctz->rb_root))
3746		return 0;
3747
3748	/*
3749	 * This loop can run a while, specially if mem_cgroup's continuously
3750	 * keep exceeding their soft limit and putting the system under
3751	 * pressure
3752	 */
3753	do {
3754		if (next_mz)
3755			mz = next_mz;
3756		else
3757			mz = mem_cgroup_largest_soft_limit_node(mctz);
3758		if (!mz)
3759			break;
3760
 
3761		reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat,
3762						    gfp_mask, total_scanned);
3763		nr_reclaimed += reclaimed;
 
3764		spin_lock_irq(&mctz->lock);
 
3765
3766		/*
3767		 * If we failed to reclaim anything from this memory cgroup
3768		 * it is time to move on to the next cgroup
3769		 */
3770		next_mz = NULL;
3771		if (!reclaimed)
3772			next_mz = __mem_cgroup_largest_soft_limit_node(mctz);
3773
3774		excess = soft_limit_excess(mz->memcg);
3775		/*
3776		 * One school of thought says that we should not add
3777		 * back the node to the tree if reclaim returns 0.
3778		 * But our reclaim could return 0, simply because due
3779		 * to priority we are exposing a smaller subset of
3780		 * memory to reclaim from. Consider this as a longer
3781		 * term TODO.
3782		 */
3783		/* If excess == 0, no tree ops */
3784		__mem_cgroup_insert_exceeded(mz, mctz, excess);
3785		spin_unlock_irq(&mctz->lock);
3786		css_put(&mz->memcg->css);
3787		loop++;
3788		/*
3789		 * Could not reclaim anything and there are no more
3790		 * mem cgroups to try or we seem to be looping without
3791		 * reclaiming anything.
3792		 */
3793		if (!nr_reclaimed &&
3794			(next_mz == NULL ||
3795			loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
3796			break;
3797	} while (!nr_reclaimed);
3798	if (next_mz)
3799		css_put(&next_mz->memcg->css);
3800	return nr_reclaimed;
3801}
3802
3803/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3804 * Reclaims as many pages from the given memcg as possible.
3805 *
3806 * Caller is responsible for holding css reference for memcg.
3807 */
3808static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
3809{
3810	int nr_retries = MAX_RECLAIM_RETRIES;
3811
3812	/* we call try-to-free pages for make this cgroup empty */
3813	lru_add_drain_all();
3814
3815	drain_all_stock(memcg);
3816
3817	/* try to free all pages in this cgroup */
3818	while (nr_retries && page_counter_read(&memcg->memory)) {
 
 
3819		if (signal_pending(current))
3820			return -EINTR;
3821
3822		if (!try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL,
3823						  MEMCG_RECLAIM_MAY_SWAP))
 
3824			nr_retries--;
 
 
 
 
3825	}
3826
3827	return 0;
3828}
3829
3830static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of,
3831					    char *buf, size_t nbytes,
3832					    loff_t off)
3833{
3834	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3835
3836	if (mem_cgroup_is_root(memcg))
3837		return -EINVAL;
3838	return mem_cgroup_force_empty(memcg) ?: nbytes;
3839}
3840
3841static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css,
3842				     struct cftype *cft)
3843{
3844	return 1;
3845}
3846
3847static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
3848				      struct cftype *cft, u64 val)
3849{
3850	if (val == 1)
 
 
 
 
3851		return 0;
3852
3853	pr_warn_once("Non-hierarchical mode is deprecated. "
3854		     "Please report your usecase to linux-mm@kvack.org if you "
3855		     "depend on this functionality.\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3856
3857	return -EINVAL;
 
 
 
3858}
3859
3860static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
3861{
3862	unsigned long val;
3863
3864	if (mem_cgroup_is_root(memcg)) {
3865		/*
3866		 * Approximate root's usage from global state. This isn't
3867		 * perfect, but the root usage was always an approximation.
3868		 */
3869		val = global_node_page_state(NR_FILE_PAGES) +
3870			global_node_page_state(NR_ANON_MAPPED);
3871		if (swap)
3872			val += total_swap_pages - get_nr_swap_pages();
3873	} else {
3874		if (!swap)
3875			val = page_counter_read(&memcg->memory);
3876		else
3877			val = page_counter_read(&memcg->memsw);
3878	}
3879	return val;
3880}
3881
3882enum {
3883	RES_USAGE,
3884	RES_LIMIT,
3885	RES_MAX_USAGE,
3886	RES_FAILCNT,
3887	RES_SOFT_LIMIT,
3888};
3889
3890static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
3891			       struct cftype *cft)
3892{
3893	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3894	struct page_counter *counter;
3895
3896	switch (MEMFILE_TYPE(cft->private)) {
3897	case _MEM:
3898		counter = &memcg->memory;
3899		break;
3900	case _MEMSWAP:
3901		counter = &memcg->memsw;
3902		break;
3903	case _KMEM:
3904		counter = &memcg->kmem;
3905		break;
3906	case _TCP:
3907		counter = &memcg->tcpmem;
3908		break;
3909	default:
3910		BUG();
3911	}
3912
3913	switch (MEMFILE_ATTR(cft->private)) {
3914	case RES_USAGE:
3915		if (counter == &memcg->memory)
3916			return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE;
3917		if (counter == &memcg->memsw)
3918			return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE;
3919		return (u64)page_counter_read(counter) * PAGE_SIZE;
3920	case RES_LIMIT:
3921		return (u64)counter->max * PAGE_SIZE;
3922	case RES_MAX_USAGE:
3923		return (u64)counter->watermark * PAGE_SIZE;
3924	case RES_FAILCNT:
3925		return counter->failcnt;
3926	case RES_SOFT_LIMIT:
3927		return (u64)READ_ONCE(memcg->soft_limit) * PAGE_SIZE;
3928	default:
3929		BUG();
3930	}
3931}
3932
3933/*
3934 * This function doesn't do anything useful. Its only job is to provide a read
3935 * handler for a file so that cgroup_file_mode() will add read permissions.
3936 */
3937static int mem_cgroup_dummy_seq_show(__always_unused struct seq_file *m,
3938				     __always_unused void *v)
3939{
3940	return -EINVAL;
3941}
3942
3943#ifdef CONFIG_MEMCG_KMEM
3944static int memcg_online_kmem(struct mem_cgroup *memcg)
3945{
3946	struct obj_cgroup *objcg;
3947
3948	if (mem_cgroup_kmem_disabled())
3949		return 0;
3950
3951	if (unlikely(mem_cgroup_is_root(memcg)))
3952		return 0;
3953
3954	objcg = obj_cgroup_alloc();
3955	if (!objcg)
3956		return -ENOMEM;
3957
3958	objcg->memcg = memcg;
3959	rcu_assign_pointer(memcg->objcg, objcg);
3960	obj_cgroup_get(objcg);
3961	memcg->orig_objcg = objcg;
3962
3963	static_branch_enable(&memcg_kmem_online_key);
3964
3965	memcg->kmemcg_id = memcg->id.id;
 
 
 
 
 
 
 
 
 
 
 
3966
3967	return 0;
3968}
3969
3970static void memcg_offline_kmem(struct mem_cgroup *memcg)
3971{
3972	struct mem_cgroup *parent;
 
 
3973
3974	if (mem_cgroup_kmem_disabled())
3975		return;
 
 
 
 
 
 
 
 
 
3976
3977	if (unlikely(mem_cgroup_is_root(memcg)))
3978		return;
3979
3980	parent = parent_mem_cgroup(memcg);
3981	if (!parent)
3982		parent = root_mem_cgroup;
3983
3984	memcg_reparent_objcgs(memcg, parent);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3985
3986	/*
3987	 * After we have finished memcg_reparent_objcgs(), all list_lrus
3988	 * corresponding to this cgroup are guaranteed to remain empty.
3989	 * The ordering is imposed by list_lru_node->lock taken by
3990	 * memcg_reparent_list_lrus().
3991	 */
3992	memcg_reparent_list_lrus(memcg, parent);
 
 
 
 
3993}
3994#else
3995static int memcg_online_kmem(struct mem_cgroup *memcg)
3996{
3997	return 0;
3998}
3999static void memcg_offline_kmem(struct mem_cgroup *memcg)
4000{
4001}
4002#endif /* CONFIG_MEMCG_KMEM */
 
 
 
4003
4004static int memcg_update_tcp_max(struct mem_cgroup *memcg, unsigned long max)
 
4005{
4006	int ret;
4007
4008	mutex_lock(&memcg_max_mutex);
 
 
 
 
4009
4010	ret = page_counter_set_max(&memcg->tcpmem, max);
 
 
 
 
 
 
4011	if (ret)
4012		goto out;
4013
4014	if (!memcg->tcpmem_active) {
4015		/*
4016		 * The active flag needs to be written after the static_key
4017		 * update. This is what guarantees that the socket activation
4018		 * function is the last one to run. See mem_cgroup_sk_alloc()
4019		 * for details, and note that we don't mark any socket as
4020		 * belonging to this memcg until that flag is up.
4021		 *
4022		 * We need to do this, because static_keys will span multiple
4023		 * sites, but we can't control their order. If we mark a socket
4024		 * as accounted, but the accounting functions are not patched in
4025		 * yet, we'll lose accounting.
4026		 *
4027		 * We never race with the readers in mem_cgroup_sk_alloc(),
4028		 * because when this value change, the code to process it is not
4029		 * patched in yet.
4030		 */
4031		static_branch_inc(&memcg_sockets_enabled_key);
4032		memcg->tcpmem_active = true;
4033	}
4034out:
4035	mutex_unlock(&memcg_max_mutex);
4036	return ret;
4037}
4038
4039/*
4040 * The user of this function is...
4041 * RES_LIMIT.
4042 */
4043static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
4044				char *buf, size_t nbytes, loff_t off)
4045{
4046	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4047	unsigned long nr_pages;
4048	int ret;
4049
4050	buf = strstrip(buf);
4051	ret = page_counter_memparse(buf, "-1", &nr_pages);
4052	if (ret)
4053		return ret;
4054
4055	switch (MEMFILE_ATTR(of_cft(of)->private)) {
4056	case RES_LIMIT:
4057		if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
4058			ret = -EINVAL;
4059			break;
4060		}
4061		switch (MEMFILE_TYPE(of_cft(of)->private)) {
4062		case _MEM:
4063			ret = mem_cgroup_resize_max(memcg, nr_pages, false);
4064			break;
4065		case _MEMSWAP:
4066			ret = mem_cgroup_resize_max(memcg, nr_pages, true);
4067			break;
4068		case _KMEM:
4069			pr_warn_once("kmem.limit_in_bytes is deprecated and will be removed. "
4070				     "Writing any value to this file has no effect. "
4071				     "Please report your usecase to linux-mm@kvack.org if you "
4072				     "depend on this functionality.\n");
4073			ret = 0;
4074			break;
4075		case _TCP:
4076			ret = memcg_update_tcp_max(memcg, nr_pages);
4077			break;
4078		}
4079		break;
4080	case RES_SOFT_LIMIT:
4081		if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
4082			ret = -EOPNOTSUPP;
4083		} else {
4084			WRITE_ONCE(memcg->soft_limit, nr_pages);
4085			ret = 0;
4086		}
4087		break;
4088	}
4089	return ret ?: nbytes;
4090}
4091
4092static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf,
4093				size_t nbytes, loff_t off)
4094{
4095	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4096	struct page_counter *counter;
4097
4098	switch (MEMFILE_TYPE(of_cft(of)->private)) {
4099	case _MEM:
4100		counter = &memcg->memory;
4101		break;
4102	case _MEMSWAP:
4103		counter = &memcg->memsw;
4104		break;
4105	case _KMEM:
4106		counter = &memcg->kmem;
4107		break;
4108	case _TCP:
4109		counter = &memcg->tcpmem;
4110		break;
4111	default:
4112		BUG();
4113	}
4114
4115	switch (MEMFILE_ATTR(of_cft(of)->private)) {
4116	case RES_MAX_USAGE:
4117		page_counter_reset_watermark(counter);
4118		break;
4119	case RES_FAILCNT:
4120		counter->failcnt = 0;
4121		break;
4122	default:
4123		BUG();
4124	}
4125
4126	return nbytes;
4127}
4128
4129static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css,
4130					struct cftype *cft)
4131{
4132	return mem_cgroup_from_css(css)->move_charge_at_immigrate;
4133}
4134
4135#ifdef CONFIG_MMU
4136static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
4137					struct cftype *cft, u64 val)
4138{
4139	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4140
4141	pr_warn_once("Cgroup memory moving (move_charge_at_immigrate) is deprecated. "
4142		     "Please report your usecase to linux-mm@kvack.org if you "
4143		     "depend on this functionality.\n");
4144
4145	if (val & ~MOVE_MASK)
4146		return -EINVAL;
4147
4148	/*
4149	 * No kind of locking is needed in here, because ->can_attach() will
4150	 * check this value once in the beginning of the process, and then carry
4151	 * on with stale data. This means that changes to this value will only
4152	 * affect task migrations starting after the change.
4153	 */
4154	memcg->move_charge_at_immigrate = val;
4155	return 0;
4156}
4157#else
4158static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
4159					struct cftype *cft, u64 val)
4160{
4161	return -ENOSYS;
4162}
4163#endif
4164
4165#ifdef CONFIG_NUMA
4166
4167#define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE))
4168#define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
4169#define LRU_ALL	     ((1 << NR_LRU_LISTS) - 1)
4170
4171static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
4172				int nid, unsigned int lru_mask, bool tree)
4173{
4174	struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
4175	unsigned long nr = 0;
4176	enum lru_list lru;
4177
4178	VM_BUG_ON((unsigned)nid >= nr_node_ids);
4179
4180	for_each_lru(lru) {
4181		if (!(BIT(lru) & lru_mask))
4182			continue;
4183		if (tree)
4184			nr += lruvec_page_state(lruvec, NR_LRU_BASE + lru);
4185		else
4186			nr += lruvec_page_state_local(lruvec, NR_LRU_BASE + lru);
4187	}
4188	return nr;
4189}
4190
4191static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
4192					     unsigned int lru_mask,
4193					     bool tree)
4194{
4195	unsigned long nr = 0;
4196	enum lru_list lru;
4197
4198	for_each_lru(lru) {
4199		if (!(BIT(lru) & lru_mask))
4200			continue;
4201		if (tree)
4202			nr += memcg_page_state(memcg, NR_LRU_BASE + lru);
4203		else
4204			nr += memcg_page_state_local(memcg, NR_LRU_BASE + lru);
4205	}
4206	return nr;
4207}
4208
4209static int memcg_numa_stat_show(struct seq_file *m, void *v)
4210{
4211	struct numa_stat {
4212		const char *name;
4213		unsigned int lru_mask;
4214	};
4215
4216	static const struct numa_stat stats[] = {
4217		{ "total", LRU_ALL },
4218		{ "file", LRU_ALL_FILE },
4219		{ "anon", LRU_ALL_ANON },
4220		{ "unevictable", BIT(LRU_UNEVICTABLE) },
4221	};
4222	const struct numa_stat *stat;
4223	int nid;
4224	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4225
4226	mem_cgroup_flush_stats(memcg);
4227
4228	for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
4229		seq_printf(m, "%s=%lu", stat->name,
4230			   mem_cgroup_nr_lru_pages(memcg, stat->lru_mask,
4231						   false));
4232		for_each_node_state(nid, N_MEMORY)
4233			seq_printf(m, " N%d=%lu", nid,
4234				   mem_cgroup_node_nr_lru_pages(memcg, nid,
4235							stat->lru_mask, false));
4236		seq_putc(m, '\n');
4237	}
4238
4239	for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
 
4240
4241		seq_printf(m, "hierarchical_%s=%lu", stat->name,
4242			   mem_cgroup_nr_lru_pages(memcg, stat->lru_mask,
4243						   true));
4244		for_each_node_state(nid, N_MEMORY)
4245			seq_printf(m, " N%d=%lu", nid,
4246				   mem_cgroup_node_nr_lru_pages(memcg, nid,
4247							stat->lru_mask, true));
 
 
 
 
4248		seq_putc(m, '\n');
4249	}
4250
4251	return 0;
4252}
4253#endif /* CONFIG_NUMA */
4254
4255static const unsigned int memcg1_stats[] = {
4256	NR_FILE_PAGES,
4257	NR_ANON_MAPPED,
4258#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4259	NR_ANON_THPS,
4260#endif
4261	NR_SHMEM,
4262	NR_FILE_MAPPED,
4263	NR_FILE_DIRTY,
4264	NR_WRITEBACK,
4265	WORKINGSET_REFAULT_ANON,
4266	WORKINGSET_REFAULT_FILE,
4267#ifdef CONFIG_SWAP
4268	MEMCG_SWAP,
4269	NR_SWAPCACHE,
4270#endif
4271};
4272
4273static const char *const memcg1_stat_names[] = {
4274	"cache",
4275	"rss",
4276#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4277	"rss_huge",
4278#endif
4279	"shmem",
4280	"mapped_file",
4281	"dirty",
4282	"writeback",
4283	"workingset_refault_anon",
4284	"workingset_refault_file",
4285#ifdef CONFIG_SWAP
4286	"swap",
4287	"swapcached",
4288#endif
4289};
4290
4291/* Universal VM events cgroup1 shows, original sort order */
4292static const unsigned int memcg1_events[] = {
4293	PGPGIN,
4294	PGPGOUT,
4295	PGFAULT,
4296	PGMAJFAULT,
4297};
4298
4299static void memcg1_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
 
 
 
 
 
 
 
4300{
 
4301	unsigned long memory, memsw;
4302	struct mem_cgroup *mi;
4303	unsigned int i;
4304
4305	BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names) != ARRAY_SIZE(memcg1_stats));
4306
4307	mem_cgroup_flush_stats(memcg);
4308
4309	for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
4310		unsigned long nr;
4311
4312		nr = memcg_page_state_local_output(memcg, memcg1_stats[i]);
4313		seq_buf_printf(s, "%s %lu\n", memcg1_stat_names[i], nr);
 
4314	}
4315
4316	for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
4317		seq_buf_printf(s, "%s %lu\n", vm_event_name(memcg1_events[i]),
4318			       memcg_events_local(memcg, memcg1_events[i]));
4319
4320	for (i = 0; i < NR_LRU_LISTS; i++)
4321		seq_buf_printf(s, "%s %lu\n", lru_list_name(i),
4322			       memcg_page_state_local(memcg, NR_LRU_BASE + i) *
4323			       PAGE_SIZE);
4324
4325	/* Hierarchical information */
4326	memory = memsw = PAGE_COUNTER_MAX;
4327	for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) {
4328		memory = min(memory, READ_ONCE(mi->memory.max));
4329		memsw = min(memsw, READ_ONCE(mi->memsw.max));
4330	}
4331	seq_buf_printf(s, "hierarchical_memory_limit %llu\n",
4332		       (u64)memory * PAGE_SIZE);
4333	seq_buf_printf(s, "hierarchical_memsw_limit %llu\n",
4334		       (u64)memsw * PAGE_SIZE);
 
4335
4336	for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
4337		unsigned long nr;
 
 
 
 
 
 
 
 
 
 
 
4338
4339		nr = memcg_page_state_output(memcg, memcg1_stats[i]);
4340		seq_buf_printf(s, "total_%s %llu\n", memcg1_stat_names[i],
4341			       (u64)nr);
4342	}
4343
4344	for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
4345		seq_buf_printf(s, "total_%s %llu\n",
4346			       vm_event_name(memcg1_events[i]),
4347			       (u64)memcg_events(memcg, memcg1_events[i]));
4348
4349	for (i = 0; i < NR_LRU_LISTS; i++)
4350		seq_buf_printf(s, "total_%s %llu\n", lru_list_name(i),
4351			       (u64)memcg_page_state(memcg, NR_LRU_BASE + i) *
4352			       PAGE_SIZE);
4353
4354#ifdef CONFIG_DEBUG_VM
4355	{
4356		pg_data_t *pgdat;
4357		struct mem_cgroup_per_node *mz;
4358		unsigned long anon_cost = 0;
4359		unsigned long file_cost = 0;
 
4360
4361		for_each_online_pgdat(pgdat) {
4362			mz = memcg->nodeinfo[pgdat->node_id];
 
4363
4364			anon_cost += mz->lruvec.anon_cost;
4365			file_cost += mz->lruvec.file_cost;
4366		}
4367		seq_buf_printf(s, "anon_cost %lu\n", anon_cost);
4368		seq_buf_printf(s, "file_cost %lu\n", file_cost);
 
 
 
 
4369	}
4370#endif
 
 
4371}
4372
4373static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css,
4374				      struct cftype *cft)
4375{
4376	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4377
4378	return mem_cgroup_swappiness(memcg);
4379}
4380
4381static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
4382				       struct cftype *cft, u64 val)
4383{
4384	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4385
4386	if (val > 200)
4387		return -EINVAL;
4388
4389	if (!mem_cgroup_is_root(memcg))
4390		WRITE_ONCE(memcg->swappiness, val);
4391	else
4392		WRITE_ONCE(vm_swappiness, val);
4393
4394	return 0;
4395}
4396
4397static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
4398{
4399	struct mem_cgroup_threshold_ary *t;
4400	unsigned long usage;
4401	int i;
4402
4403	rcu_read_lock();
4404	if (!swap)
4405		t = rcu_dereference(memcg->thresholds.primary);
4406	else
4407		t = rcu_dereference(memcg->memsw_thresholds.primary);
4408
4409	if (!t)
4410		goto unlock;
4411
4412	usage = mem_cgroup_usage(memcg, swap);
4413
4414	/*
4415	 * current_threshold points to threshold just below or equal to usage.
4416	 * If it's not true, a threshold was crossed after last
4417	 * call of __mem_cgroup_threshold().
4418	 */
4419	i = t->current_threshold;
4420
4421	/*
4422	 * Iterate backward over array of thresholds starting from
4423	 * current_threshold and check if a threshold is crossed.
4424	 * If none of thresholds below usage is crossed, we read
4425	 * only one element of the array here.
4426	 */
4427	for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
4428		eventfd_signal(t->entries[i].eventfd);
4429
4430	/* i = current_threshold + 1 */
4431	i++;
4432
4433	/*
4434	 * Iterate forward over array of thresholds starting from
4435	 * current_threshold+1 and check if a threshold is crossed.
4436	 * If none of thresholds above usage is crossed, we read
4437	 * only one element of the array here.
4438	 */
4439	for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
4440		eventfd_signal(t->entries[i].eventfd);
4441
4442	/* Update current_threshold */
4443	t->current_threshold = i - 1;
4444unlock:
4445	rcu_read_unlock();
4446}
4447
4448static void mem_cgroup_threshold(struct mem_cgroup *memcg)
4449{
4450	while (memcg) {
4451		__mem_cgroup_threshold(memcg, false);
4452		if (do_memsw_account())
4453			__mem_cgroup_threshold(memcg, true);
4454
4455		memcg = parent_mem_cgroup(memcg);
4456	}
4457}
4458
4459static int compare_thresholds(const void *a, const void *b)
4460{
4461	const struct mem_cgroup_threshold *_a = a;
4462	const struct mem_cgroup_threshold *_b = b;
4463
4464	if (_a->threshold > _b->threshold)
4465		return 1;
4466
4467	if (_a->threshold < _b->threshold)
4468		return -1;
4469
4470	return 0;
4471}
4472
4473static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
4474{
4475	struct mem_cgroup_eventfd_list *ev;
4476
4477	spin_lock(&memcg_oom_lock);
4478
4479	list_for_each_entry(ev, &memcg->oom_notify, list)
4480		eventfd_signal(ev->eventfd);
4481
4482	spin_unlock(&memcg_oom_lock);
4483	return 0;
4484}
4485
4486static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
4487{
4488	struct mem_cgroup *iter;
4489
4490	for_each_mem_cgroup_tree(iter, memcg)
4491		mem_cgroup_oom_notify_cb(iter);
4492}
4493
4494static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
4495	struct eventfd_ctx *eventfd, const char *args, enum res_type type)
4496{
4497	struct mem_cgroup_thresholds *thresholds;
4498	struct mem_cgroup_threshold_ary *new;
4499	unsigned long threshold;
4500	unsigned long usage;
4501	int i, size, ret;
4502
4503	ret = page_counter_memparse(args, "-1", &threshold);
4504	if (ret)
4505		return ret;
4506
4507	mutex_lock(&memcg->thresholds_lock);
4508
4509	if (type == _MEM) {
4510		thresholds = &memcg->thresholds;
4511		usage = mem_cgroup_usage(memcg, false);
4512	} else if (type == _MEMSWAP) {
4513		thresholds = &memcg->memsw_thresholds;
4514		usage = mem_cgroup_usage(memcg, true);
4515	} else
4516		BUG();
4517
4518	/* Check if a threshold crossed before adding a new one */
4519	if (thresholds->primary)
4520		__mem_cgroup_threshold(memcg, type == _MEMSWAP);
4521
4522	size = thresholds->primary ? thresholds->primary->size + 1 : 1;
4523
4524	/* Allocate memory for new array of thresholds */
4525	new = kmalloc(struct_size(new, entries, size), GFP_KERNEL);
 
4526	if (!new) {
4527		ret = -ENOMEM;
4528		goto unlock;
4529	}
4530	new->size = size;
4531
4532	/* Copy thresholds (if any) to new array */
4533	if (thresholds->primary)
4534		memcpy(new->entries, thresholds->primary->entries,
4535		       flex_array_size(new, entries, size - 1));
 
4536
4537	/* Add new threshold */
4538	new->entries[size - 1].eventfd = eventfd;
4539	new->entries[size - 1].threshold = threshold;
4540
4541	/* Sort thresholds. Registering of new threshold isn't time-critical */
4542	sort(new->entries, size, sizeof(*new->entries),
4543			compare_thresholds, NULL);
4544
4545	/* Find current threshold */
4546	new->current_threshold = -1;
4547	for (i = 0; i < size; i++) {
4548		if (new->entries[i].threshold <= usage) {
4549			/*
4550			 * new->current_threshold will not be used until
4551			 * rcu_assign_pointer(), so it's safe to increment
4552			 * it here.
4553			 */
4554			++new->current_threshold;
4555		} else
4556			break;
4557	}
4558
4559	/* Free old spare buffer and save old primary buffer as spare */
4560	kfree(thresholds->spare);
4561	thresholds->spare = thresholds->primary;
4562
4563	rcu_assign_pointer(thresholds->primary, new);
4564
4565	/* To be sure that nobody uses thresholds */
4566	synchronize_rcu();
4567
4568unlock:
4569	mutex_unlock(&memcg->thresholds_lock);
4570
4571	return ret;
4572}
4573
4574static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
4575	struct eventfd_ctx *eventfd, const char *args)
4576{
4577	return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM);
4578}
4579
4580static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg,
4581	struct eventfd_ctx *eventfd, const char *args)
4582{
4583	return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP);
4584}
4585
4586static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4587	struct eventfd_ctx *eventfd, enum res_type type)
4588{
4589	struct mem_cgroup_thresholds *thresholds;
4590	struct mem_cgroup_threshold_ary *new;
4591	unsigned long usage;
4592	int i, j, size, entries;
4593
4594	mutex_lock(&memcg->thresholds_lock);
4595
4596	if (type == _MEM) {
4597		thresholds = &memcg->thresholds;
4598		usage = mem_cgroup_usage(memcg, false);
4599	} else if (type == _MEMSWAP) {
4600		thresholds = &memcg->memsw_thresholds;
4601		usage = mem_cgroup_usage(memcg, true);
4602	} else
4603		BUG();
4604
4605	if (!thresholds->primary)
4606		goto unlock;
4607
4608	/* Check if a threshold crossed before removing */
4609	__mem_cgroup_threshold(memcg, type == _MEMSWAP);
4610
4611	/* Calculate new number of threshold */
4612	size = entries = 0;
4613	for (i = 0; i < thresholds->primary->size; i++) {
4614		if (thresholds->primary->entries[i].eventfd != eventfd)
4615			size++;
4616		else
4617			entries++;
4618	}
4619
4620	new = thresholds->spare;
4621
4622	/* If no items related to eventfd have been cleared, nothing to do */
4623	if (!entries)
4624		goto unlock;
4625
4626	/* Set thresholds array to NULL if we don't have thresholds */
4627	if (!size) {
4628		kfree(new);
4629		new = NULL;
4630		goto swap_buffers;
4631	}
4632
4633	new->size = size;
4634
4635	/* Copy thresholds and find current threshold */
4636	new->current_threshold = -1;
4637	for (i = 0, j = 0; i < thresholds->primary->size; i++) {
4638		if (thresholds->primary->entries[i].eventfd == eventfd)
4639			continue;
4640
4641		new->entries[j] = thresholds->primary->entries[i];
4642		if (new->entries[j].threshold <= usage) {
4643			/*
4644			 * new->current_threshold will not be used
4645			 * until rcu_assign_pointer(), so it's safe to increment
4646			 * it here.
4647			 */
4648			++new->current_threshold;
4649		}
4650		j++;
4651	}
4652
4653swap_buffers:
4654	/* Swap primary and spare array */
4655	thresholds->spare = thresholds->primary;
4656
4657	rcu_assign_pointer(thresholds->primary, new);
4658
4659	/* To be sure that nobody uses thresholds */
4660	synchronize_rcu();
4661
4662	/* If all events are unregistered, free the spare array */
4663	if (!new) {
4664		kfree(thresholds->spare);
4665		thresholds->spare = NULL;
4666	}
4667unlock:
4668	mutex_unlock(&memcg->thresholds_lock);
4669}
4670
4671static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4672	struct eventfd_ctx *eventfd)
4673{
4674	return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM);
4675}
4676
4677static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4678	struct eventfd_ctx *eventfd)
4679{
4680	return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP);
4681}
4682
4683static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg,
4684	struct eventfd_ctx *eventfd, const char *args)
4685{
4686	struct mem_cgroup_eventfd_list *event;
4687
4688	event = kmalloc(sizeof(*event),	GFP_KERNEL);
4689	if (!event)
4690		return -ENOMEM;
4691
4692	spin_lock(&memcg_oom_lock);
4693
4694	event->eventfd = eventfd;
4695	list_add(&event->list, &memcg->oom_notify);
4696
4697	/* already in OOM ? */
4698	if (memcg->under_oom)
4699		eventfd_signal(eventfd);
4700	spin_unlock(&memcg_oom_lock);
4701
4702	return 0;
4703}
4704
4705static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg,
4706	struct eventfd_ctx *eventfd)
4707{
4708	struct mem_cgroup_eventfd_list *ev, *tmp;
4709
4710	spin_lock(&memcg_oom_lock);
4711
4712	list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
4713		if (ev->eventfd == eventfd) {
4714			list_del(&ev->list);
4715			kfree(ev);
4716		}
4717	}
4718
4719	spin_unlock(&memcg_oom_lock);
4720}
4721
4722static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v)
4723{
4724	struct mem_cgroup *memcg = mem_cgroup_from_seq(sf);
4725
4726	seq_printf(sf, "oom_kill_disable %d\n", READ_ONCE(memcg->oom_kill_disable));
4727	seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom);
4728	seq_printf(sf, "oom_kill %lu\n",
4729		   atomic_long_read(&memcg->memory_events[MEMCG_OOM_KILL]));
4730	return 0;
4731}
4732
4733static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
4734	struct cftype *cft, u64 val)
4735{
4736	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4737
4738	/* cannot set to root cgroup and only 0 and 1 are allowed */
4739	if (mem_cgroup_is_root(memcg) || !((val == 0) || (val == 1)))
4740		return -EINVAL;
4741
4742	WRITE_ONCE(memcg->oom_kill_disable, val);
4743	if (!val)
4744		memcg_oom_recover(memcg);
4745
4746	return 0;
4747}
4748
4749#ifdef CONFIG_CGROUP_WRITEBACK
4750
4751#include <trace/events/writeback.h>
 
 
 
4752
4753static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
4754{
4755	return wb_domain_init(&memcg->cgwb_domain, gfp);
4756}
4757
4758static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
4759{
4760	wb_domain_exit(&memcg->cgwb_domain);
4761}
4762
4763static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
4764{
4765	wb_domain_size_changed(&memcg->cgwb_domain);
4766}
4767
4768struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
4769{
4770	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4771
4772	if (!memcg->css.parent)
4773		return NULL;
4774
4775	return &memcg->cgwb_domain;
4776}
4777
4778/**
4779 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
4780 * @wb: bdi_writeback in question
4781 * @pfilepages: out parameter for number of file pages
4782 * @pheadroom: out parameter for number of allocatable pages according to memcg
4783 * @pdirty: out parameter for number of dirty pages
4784 * @pwriteback: out parameter for number of pages under writeback
4785 *
4786 * Determine the numbers of file, headroom, dirty, and writeback pages in
4787 * @wb's memcg.  File, dirty and writeback are self-explanatory.  Headroom
4788 * is a bit more involved.
4789 *
4790 * A memcg's headroom is "min(max, high) - used".  In the hierarchy, the
4791 * headroom is calculated as the lowest headroom of itself and the
4792 * ancestors.  Note that this doesn't consider the actual amount of
4793 * available memory in the system.  The caller should further cap
4794 * *@pheadroom accordingly.
4795 */
4796void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
4797			 unsigned long *pheadroom, unsigned long *pdirty,
4798			 unsigned long *pwriteback)
4799{
4800	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4801	struct mem_cgroup *parent;
4802
4803	mem_cgroup_flush_stats(memcg);
4804
4805	*pdirty = memcg_page_state(memcg, NR_FILE_DIRTY);
4806	*pwriteback = memcg_page_state(memcg, NR_WRITEBACK);
4807	*pfilepages = memcg_page_state(memcg, NR_INACTIVE_FILE) +
4808			memcg_page_state(memcg, NR_ACTIVE_FILE);
 
4809
4810	*pheadroom = PAGE_COUNTER_MAX;
4811	while ((parent = parent_mem_cgroup(memcg))) {
4812		unsigned long ceiling = min(READ_ONCE(memcg->memory.max),
4813					    READ_ONCE(memcg->memory.high));
4814		unsigned long used = page_counter_read(&memcg->memory);
4815
4816		*pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
4817		memcg = parent;
4818	}
4819}
4820
4821/*
4822 * Foreign dirty flushing
4823 *
4824 * There's an inherent mismatch between memcg and writeback.  The former
4825 * tracks ownership per-page while the latter per-inode.  This was a
4826 * deliberate design decision because honoring per-page ownership in the
4827 * writeback path is complicated, may lead to higher CPU and IO overheads
4828 * and deemed unnecessary given that write-sharing an inode across
4829 * different cgroups isn't a common use-case.
4830 *
4831 * Combined with inode majority-writer ownership switching, this works well
4832 * enough in most cases but there are some pathological cases.  For
4833 * example, let's say there are two cgroups A and B which keep writing to
4834 * different but confined parts of the same inode.  B owns the inode and
4835 * A's memory is limited far below B's.  A's dirty ratio can rise enough to
4836 * trigger balance_dirty_pages() sleeps but B's can be low enough to avoid
4837 * triggering background writeback.  A will be slowed down without a way to
4838 * make writeback of the dirty pages happen.
4839 *
4840 * Conditions like the above can lead to a cgroup getting repeatedly and
4841 * severely throttled after making some progress after each
4842 * dirty_expire_interval while the underlying IO device is almost
4843 * completely idle.
4844 *
4845 * Solving this problem completely requires matching the ownership tracking
4846 * granularities between memcg and writeback in either direction.  However,
4847 * the more egregious behaviors can be avoided by simply remembering the
4848 * most recent foreign dirtying events and initiating remote flushes on
4849 * them when local writeback isn't enough to keep the memory clean enough.
4850 *
4851 * The following two functions implement such mechanism.  When a foreign
4852 * page - a page whose memcg and writeback ownerships don't match - is
4853 * dirtied, mem_cgroup_track_foreign_dirty() records the inode owning
4854 * bdi_writeback on the page owning memcg.  When balance_dirty_pages()
4855 * decides that the memcg needs to sleep due to high dirty ratio, it calls
4856 * mem_cgroup_flush_foreign() which queues writeback on the recorded
4857 * foreign bdi_writebacks which haven't expired.  Both the numbers of
4858 * recorded bdi_writebacks and concurrent in-flight foreign writebacks are
4859 * limited to MEMCG_CGWB_FRN_CNT.
4860 *
4861 * The mechanism only remembers IDs and doesn't hold any object references.
4862 * As being wrong occasionally doesn't matter, updates and accesses to the
4863 * records are lockless and racy.
4864 */
4865void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio,
4866					     struct bdi_writeback *wb)
4867{
4868	struct mem_cgroup *memcg = folio_memcg(folio);
4869	struct memcg_cgwb_frn *frn;
4870	u64 now = get_jiffies_64();
4871	u64 oldest_at = now;
4872	int oldest = -1;
4873	int i;
4874
4875	trace_track_foreign_dirty(folio, wb);
4876
4877	/*
4878	 * Pick the slot to use.  If there is already a slot for @wb, keep
4879	 * using it.  If not replace the oldest one which isn't being
4880	 * written out.
4881	 */
4882	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
4883		frn = &memcg->cgwb_frn[i];
4884		if (frn->bdi_id == wb->bdi->id &&
4885		    frn->memcg_id == wb->memcg_css->id)
4886			break;
4887		if (time_before64(frn->at, oldest_at) &&
4888		    atomic_read(&frn->done.cnt) == 1) {
4889			oldest = i;
4890			oldest_at = frn->at;
4891		}
4892	}
4893
4894	if (i < MEMCG_CGWB_FRN_CNT) {
4895		/*
4896		 * Re-using an existing one.  Update timestamp lazily to
4897		 * avoid making the cacheline hot.  We want them to be
4898		 * reasonably up-to-date and significantly shorter than
4899		 * dirty_expire_interval as that's what expires the record.
4900		 * Use the shorter of 1s and dirty_expire_interval / 8.
4901		 */
4902		unsigned long update_intv =
4903			min_t(unsigned long, HZ,
4904			      msecs_to_jiffies(dirty_expire_interval * 10) / 8);
4905
4906		if (time_before64(frn->at, now - update_intv))
4907			frn->at = now;
4908	} else if (oldest >= 0) {
4909		/* replace the oldest free one */
4910		frn = &memcg->cgwb_frn[oldest];
4911		frn->bdi_id = wb->bdi->id;
4912		frn->memcg_id = wb->memcg_css->id;
4913		frn->at = now;
4914	}
4915}
4916
4917/* issue foreign writeback flushes for recorded foreign dirtying events */
4918void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
4919{
4920	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4921	unsigned long intv = msecs_to_jiffies(dirty_expire_interval * 10);
4922	u64 now = jiffies_64;
4923	int i;
4924
4925	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
4926		struct memcg_cgwb_frn *frn = &memcg->cgwb_frn[i];
4927
4928		/*
4929		 * If the record is older than dirty_expire_interval,
4930		 * writeback on it has already started.  No need to kick it
4931		 * off again.  Also, don't start a new one if there's
4932		 * already one in flight.
4933		 */
4934		if (time_after64(frn->at, now - intv) &&
4935		    atomic_read(&frn->done.cnt) == 1) {
4936			frn->at = 0;
4937			trace_flush_foreign(wb, frn->bdi_id, frn->memcg_id);
4938			cgroup_writeback_by_id(frn->bdi_id, frn->memcg_id,
4939					       WB_REASON_FOREIGN_FLUSH,
4940					       &frn->done);
4941		}
4942	}
4943}
4944
4945#else	/* CONFIG_CGROUP_WRITEBACK */
4946
4947static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
4948{
4949	return 0;
4950}
4951
4952static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
4953{
4954}
4955
4956static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
4957{
4958}
4959
4960#endif	/* CONFIG_CGROUP_WRITEBACK */
4961
4962/*
4963 * DO NOT USE IN NEW FILES.
4964 *
4965 * "cgroup.event_control" implementation.
4966 *
4967 * This is way over-engineered.  It tries to support fully configurable
4968 * events for each user.  Such level of flexibility is completely
4969 * unnecessary especially in the light of the planned unified hierarchy.
4970 *
4971 * Please deprecate this and replace with something simpler if at all
4972 * possible.
4973 */
4974
4975/*
4976 * Unregister event and free resources.
4977 *
4978 * Gets called from workqueue.
4979 */
4980static void memcg_event_remove(struct work_struct *work)
4981{
4982	struct mem_cgroup_event *event =
4983		container_of(work, struct mem_cgroup_event, remove);
4984	struct mem_cgroup *memcg = event->memcg;
4985
4986	remove_wait_queue(event->wqh, &event->wait);
4987
4988	event->unregister_event(memcg, event->eventfd);
4989
4990	/* Notify userspace the event is going away. */
4991	eventfd_signal(event->eventfd);
4992
4993	eventfd_ctx_put(event->eventfd);
4994	kfree(event);
4995	css_put(&memcg->css);
4996}
4997
4998/*
4999 * Gets called on EPOLLHUP on eventfd when user closes it.
5000 *
5001 * Called with wqh->lock held and interrupts disabled.
5002 */
5003static int memcg_event_wake(wait_queue_entry_t *wait, unsigned mode,
5004			    int sync, void *key)
5005{
5006	struct mem_cgroup_event *event =
5007		container_of(wait, struct mem_cgroup_event, wait);
5008	struct mem_cgroup *memcg = event->memcg;
5009	__poll_t flags = key_to_poll(key);
5010
5011	if (flags & EPOLLHUP) {
5012		/*
5013		 * If the event has been detached at cgroup removal, we
5014		 * can simply return knowing the other side will cleanup
5015		 * for us.
5016		 *
5017		 * We can't race against event freeing since the other
5018		 * side will require wqh->lock via remove_wait_queue(),
5019		 * which we hold.
5020		 */
5021		spin_lock(&memcg->event_list_lock);
5022		if (!list_empty(&event->list)) {
5023			list_del_init(&event->list);
5024			/*
5025			 * We are in atomic context, but cgroup_event_remove()
5026			 * may sleep, so we have to call it in workqueue.
5027			 */
5028			schedule_work(&event->remove);
5029		}
5030		spin_unlock(&memcg->event_list_lock);
5031	}
5032
5033	return 0;
5034}
5035
5036static void memcg_event_ptable_queue_proc(struct file *file,
5037		wait_queue_head_t *wqh, poll_table *pt)
5038{
5039	struct mem_cgroup_event *event =
5040		container_of(pt, struct mem_cgroup_event, pt);
5041
5042	event->wqh = wqh;
5043	add_wait_queue(wqh, &event->wait);
5044}
5045
5046/*
5047 * DO NOT USE IN NEW FILES.
5048 *
5049 * Parse input and register new cgroup event handler.
5050 *
5051 * Input must be in format '<event_fd> <control_fd> <args>'.
5052 * Interpretation of args is defined by control file implementation.
5053 */
5054static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
5055					 char *buf, size_t nbytes, loff_t off)
5056{
5057	struct cgroup_subsys_state *css = of_css(of);
5058	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5059	struct mem_cgroup_event *event;
5060	struct cgroup_subsys_state *cfile_css;
5061	unsigned int efd, cfd;
5062	struct fd efile;
5063	struct fd cfile;
5064	struct dentry *cdentry;
5065	const char *name;
5066	char *endp;
5067	int ret;
5068
5069	if (IS_ENABLED(CONFIG_PREEMPT_RT))
5070		return -EOPNOTSUPP;
5071
5072	buf = strstrip(buf);
5073
5074	efd = simple_strtoul(buf, &endp, 10);
5075	if (*endp != ' ')
5076		return -EINVAL;
5077	buf = endp + 1;
5078
5079	cfd = simple_strtoul(buf, &endp, 10);
5080	if ((*endp != ' ') && (*endp != '\0'))
5081		return -EINVAL;
5082	buf = endp + 1;
5083
5084	event = kzalloc(sizeof(*event), GFP_KERNEL);
5085	if (!event)
5086		return -ENOMEM;
5087
5088	event->memcg = memcg;
5089	INIT_LIST_HEAD(&event->list);
5090	init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc);
5091	init_waitqueue_func_entry(&event->wait, memcg_event_wake);
5092	INIT_WORK(&event->remove, memcg_event_remove);
5093
5094	efile = fdget(efd);
5095	if (!efile.file) {
5096		ret = -EBADF;
5097		goto out_kfree;
5098	}
5099
5100	event->eventfd = eventfd_ctx_fileget(efile.file);
5101	if (IS_ERR(event->eventfd)) {
5102		ret = PTR_ERR(event->eventfd);
5103		goto out_put_efile;
5104	}
5105
5106	cfile = fdget(cfd);
5107	if (!cfile.file) {
5108		ret = -EBADF;
5109		goto out_put_eventfd;
5110	}
5111
5112	/* the process need read permission on control file */
5113	/* AV: shouldn't we check that it's been opened for read instead? */
5114	ret = file_permission(cfile.file, MAY_READ);
5115	if (ret < 0)
5116		goto out_put_cfile;
5117
5118	/*
5119	 * The control file must be a regular cgroup1 file. As a regular cgroup
5120	 * file can't be renamed, it's safe to access its name afterwards.
5121	 */
5122	cdentry = cfile.file->f_path.dentry;
5123	if (cdentry->d_sb->s_type != &cgroup_fs_type || !d_is_reg(cdentry)) {
5124		ret = -EINVAL;
5125		goto out_put_cfile;
5126	}
5127
5128	/*
5129	 * Determine the event callbacks and set them in @event.  This used
5130	 * to be done via struct cftype but cgroup core no longer knows
5131	 * about these events.  The following is crude but the whole thing
5132	 * is for compatibility anyway.
5133	 *
5134	 * DO NOT ADD NEW FILES.
5135	 */
5136	name = cdentry->d_name.name;
5137
5138	if (!strcmp(name, "memory.usage_in_bytes")) {
5139		event->register_event = mem_cgroup_usage_register_event;
5140		event->unregister_event = mem_cgroup_usage_unregister_event;
5141	} else if (!strcmp(name, "memory.oom_control")) {
5142		event->register_event = mem_cgroup_oom_register_event;
5143		event->unregister_event = mem_cgroup_oom_unregister_event;
5144	} else if (!strcmp(name, "memory.pressure_level")) {
5145		event->register_event = vmpressure_register_event;
5146		event->unregister_event = vmpressure_unregister_event;
5147	} else if (!strcmp(name, "memory.memsw.usage_in_bytes")) {
5148		event->register_event = memsw_cgroup_usage_register_event;
5149		event->unregister_event = memsw_cgroup_usage_unregister_event;
5150	} else {
5151		ret = -EINVAL;
5152		goto out_put_cfile;
5153	}
5154
5155	/*
5156	 * Verify @cfile should belong to @css.  Also, remaining events are
5157	 * automatically removed on cgroup destruction but the removal is
5158	 * asynchronous, so take an extra ref on @css.
5159	 */
5160	cfile_css = css_tryget_online_from_dir(cdentry->d_parent,
5161					       &memory_cgrp_subsys);
5162	ret = -EINVAL;
5163	if (IS_ERR(cfile_css))
5164		goto out_put_cfile;
5165	if (cfile_css != css) {
5166		css_put(cfile_css);
5167		goto out_put_cfile;
5168	}
5169
5170	ret = event->register_event(memcg, event->eventfd, buf);
5171	if (ret)
5172		goto out_put_css;
5173
5174	vfs_poll(efile.file, &event->pt);
5175
5176	spin_lock_irq(&memcg->event_list_lock);
5177	list_add(&event->list, &memcg->event_list);
5178	spin_unlock_irq(&memcg->event_list_lock);
5179
5180	fdput(cfile);
5181	fdput(efile);
5182
5183	return nbytes;
5184
5185out_put_css:
5186	css_put(css);
5187out_put_cfile:
5188	fdput(cfile);
5189out_put_eventfd:
5190	eventfd_ctx_put(event->eventfd);
5191out_put_efile:
5192	fdput(efile);
5193out_kfree:
5194	kfree(event);
5195
5196	return ret;
5197}
5198
5199#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_SLUB_DEBUG)
5200static int mem_cgroup_slab_show(struct seq_file *m, void *p)
5201{
5202	/*
5203	 * Deprecated.
5204	 * Please, take a look at tools/cgroup/memcg_slabinfo.py .
5205	 */
5206	return 0;
5207}
5208#endif
5209
5210static int memory_stat_show(struct seq_file *m, void *v);
5211
5212static struct cftype mem_cgroup_legacy_files[] = {
5213	{
5214		.name = "usage_in_bytes",
5215		.private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
5216		.read_u64 = mem_cgroup_read_u64,
5217	},
5218	{
5219		.name = "max_usage_in_bytes",
5220		.private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
5221		.write = mem_cgroup_reset,
5222		.read_u64 = mem_cgroup_read_u64,
5223	},
5224	{
5225		.name = "limit_in_bytes",
5226		.private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
5227		.write = mem_cgroup_write,
5228		.read_u64 = mem_cgroup_read_u64,
5229	},
5230	{
5231		.name = "soft_limit_in_bytes",
5232		.private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
5233		.write = mem_cgroup_write,
5234		.read_u64 = mem_cgroup_read_u64,
5235	},
5236	{
5237		.name = "failcnt",
5238		.private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
5239		.write = mem_cgroup_reset,
5240		.read_u64 = mem_cgroup_read_u64,
5241	},
5242	{
5243		.name = "stat",
5244		.seq_show = memory_stat_show,
5245	},
5246	{
5247		.name = "force_empty",
5248		.write = mem_cgroup_force_empty_write,
5249	},
5250	{
5251		.name = "use_hierarchy",
5252		.write_u64 = mem_cgroup_hierarchy_write,
5253		.read_u64 = mem_cgroup_hierarchy_read,
5254	},
5255	{
5256		.name = "cgroup.event_control",		/* XXX: for compat */
5257		.write = memcg_write_event_control,
5258		.flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE,
5259	},
5260	{
5261		.name = "swappiness",
5262		.read_u64 = mem_cgroup_swappiness_read,
5263		.write_u64 = mem_cgroup_swappiness_write,
5264	},
5265	{
5266		.name = "move_charge_at_immigrate",
5267		.read_u64 = mem_cgroup_move_charge_read,
5268		.write_u64 = mem_cgroup_move_charge_write,
5269	},
5270	{
5271		.name = "oom_control",
5272		.seq_show = mem_cgroup_oom_control_read,
5273		.write_u64 = mem_cgroup_oom_control_write,
 
5274	},
5275	{
5276		.name = "pressure_level",
5277		.seq_show = mem_cgroup_dummy_seq_show,
5278	},
5279#ifdef CONFIG_NUMA
5280	{
5281		.name = "numa_stat",
5282		.seq_show = memcg_numa_stat_show,
5283	},
5284#endif
5285	{
5286		.name = "kmem.limit_in_bytes",
5287		.private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
5288		.write = mem_cgroup_write,
5289		.read_u64 = mem_cgroup_read_u64,
5290	},
5291	{
5292		.name = "kmem.usage_in_bytes",
5293		.private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
5294		.read_u64 = mem_cgroup_read_u64,
5295	},
5296	{
5297		.name = "kmem.failcnt",
5298		.private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT),
5299		.write = mem_cgroup_reset,
5300		.read_u64 = mem_cgroup_read_u64,
5301	},
5302	{
5303		.name = "kmem.max_usage_in_bytes",
5304		.private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE),
5305		.write = mem_cgroup_reset,
5306		.read_u64 = mem_cgroup_read_u64,
5307	},
5308#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_SLUB_DEBUG)
5309	{
5310		.name = "kmem.slabinfo",
5311		.seq_show = mem_cgroup_slab_show,
 
 
 
5312	},
5313#endif
5314	{
5315		.name = "kmem.tcp.limit_in_bytes",
5316		.private = MEMFILE_PRIVATE(_TCP, RES_LIMIT),
5317		.write = mem_cgroup_write,
5318		.read_u64 = mem_cgroup_read_u64,
5319	},
5320	{
5321		.name = "kmem.tcp.usage_in_bytes",
5322		.private = MEMFILE_PRIVATE(_TCP, RES_USAGE),
5323		.read_u64 = mem_cgroup_read_u64,
5324	},
5325	{
5326		.name = "kmem.tcp.failcnt",
5327		.private = MEMFILE_PRIVATE(_TCP, RES_FAILCNT),
5328		.write = mem_cgroup_reset,
5329		.read_u64 = mem_cgroup_read_u64,
5330	},
5331	{
5332		.name = "kmem.tcp.max_usage_in_bytes",
5333		.private = MEMFILE_PRIVATE(_TCP, RES_MAX_USAGE),
5334		.write = mem_cgroup_reset,
5335		.read_u64 = mem_cgroup_read_u64,
5336	},
5337	{ },	/* terminate */
5338};
5339
5340/*
5341 * Private memory cgroup IDR
5342 *
5343 * Swap-out records and page cache shadow entries need to store memcg
5344 * references in constrained space, so we maintain an ID space that is
5345 * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of
5346 * memory-controlled cgroups to 64k.
5347 *
5348 * However, there usually are many references to the offline CSS after
5349 * the cgroup has been destroyed, such as page cache or reclaimable
5350 * slab objects, that don't need to hang on to the ID. We want to keep
5351 * those dead CSS from occupying IDs, or we might quickly exhaust the
5352 * relatively small ID space and prevent the creation of new cgroups
5353 * even when there are much fewer than 64k cgroups - possibly none.
5354 *
5355 * Maintain a private 16-bit ID space for memcg, and allow the ID to
5356 * be freed and recycled when it's no longer needed, which is usually
5357 * when the CSS is offlined.
5358 *
5359 * The only exception to that are records of swapped out tmpfs/shmem
5360 * pages that need to be attributed to live ancestors on swapin. But
5361 * those references are manageable from userspace.
5362 */
5363
5364#define MEM_CGROUP_ID_MAX	((1UL << MEM_CGROUP_ID_SHIFT) - 1)
5365static DEFINE_IDR(mem_cgroup_idr);
5366
5367static void mem_cgroup_id_remove(struct mem_cgroup *memcg)
5368{
5369	if (memcg->id.id > 0) {
5370		idr_remove(&mem_cgroup_idr, memcg->id.id);
5371		memcg->id.id = 0;
5372	}
5373}
5374
5375static void __maybe_unused mem_cgroup_id_get_many(struct mem_cgroup *memcg,
5376						  unsigned int n)
5377{
5378	refcount_add(n, &memcg->id.ref);
 
5379}
5380
5381static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
5382{
5383	if (refcount_sub_and_test(n, &memcg->id.ref)) {
5384		mem_cgroup_id_remove(memcg);
 
 
5385
5386		/* Memcg ID pins CSS */
5387		css_put(&memcg->css);
5388	}
5389}
5390
 
 
 
 
 
5391static inline void mem_cgroup_id_put(struct mem_cgroup *memcg)
5392{
5393	mem_cgroup_id_put_many(memcg, 1);
5394}
5395
5396/**
5397 * mem_cgroup_from_id - look up a memcg from a memcg id
5398 * @id: the memcg id to look up
5399 *
5400 * Caller must hold rcu_read_lock().
5401 */
5402struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
5403{
5404	WARN_ON_ONCE(!rcu_read_lock_held());
5405	return idr_find(&mem_cgroup_idr, id);
5406}
5407
5408#ifdef CONFIG_SHRINKER_DEBUG
5409struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino)
5410{
5411	struct cgroup *cgrp;
5412	struct cgroup_subsys_state *css;
5413	struct mem_cgroup *memcg;
5414
5415	cgrp = cgroup_get_from_id(ino);
5416	if (IS_ERR(cgrp))
5417		return ERR_CAST(cgrp);
5418
5419	css = cgroup_get_e_css(cgrp, &memory_cgrp_subsys);
5420	if (css)
5421		memcg = container_of(css, struct mem_cgroup, css);
5422	else
5423		memcg = ERR_PTR(-ENOENT);
5424
5425	cgroup_put(cgrp);
5426
5427	return memcg;
5428}
5429#endif
5430
5431static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
5432{
5433	struct mem_cgroup_per_node *pn;
5434
5435	pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, node);
 
 
 
 
 
 
 
 
 
 
5436	if (!pn)
5437		return 1;
5438
5439	pn->lruvec_stats_percpu = alloc_percpu_gfp(struct lruvec_stats_percpu,
5440						   GFP_KERNEL_ACCOUNT);
5441	if (!pn->lruvec_stats_percpu) {
5442		kfree(pn);
5443		return 1;
5444	}
5445
5446	lruvec_init(&pn->lruvec);
 
 
5447	pn->memcg = memcg;
5448
5449	memcg->nodeinfo[node] = pn;
5450	return 0;
5451}
5452
5453static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
5454{
5455	struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
5456
5457	if (!pn)
5458		return;
5459
5460	free_percpu(pn->lruvec_stats_percpu);
5461	kfree(pn);
5462}
5463
5464static void __mem_cgroup_free(struct mem_cgroup *memcg)
5465{
5466	int node;
5467
5468	if (memcg->orig_objcg)
5469		obj_cgroup_put(memcg->orig_objcg);
5470
5471	for_each_node(node)
5472		free_mem_cgroup_per_node_info(memcg, node);
5473	kfree(memcg->vmstats);
5474	free_percpu(memcg->vmstats_percpu);
5475	kfree(memcg);
5476}
5477
5478static void mem_cgroup_free(struct mem_cgroup *memcg)
5479{
5480	lru_gen_exit_memcg(memcg);
5481	memcg_wb_domain_exit(memcg);
5482	__mem_cgroup_free(memcg);
5483}
5484
5485static struct mem_cgroup *mem_cgroup_alloc(struct mem_cgroup *parent)
5486{
5487	struct memcg_vmstats_percpu *statc, *pstatc;
5488	struct mem_cgroup *memcg;
5489	int node, cpu;
5490	int __maybe_unused i;
5491	long error = -ENOMEM;
 
 
5492
5493	memcg = kzalloc(struct_size(memcg, nodeinfo, nr_node_ids), GFP_KERNEL);
5494	if (!memcg)
5495		return ERR_PTR(error);
5496
5497	memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL,
5498				 1, MEM_CGROUP_ID_MAX + 1, GFP_KERNEL);
5499	if (memcg->id.id < 0) {
5500		error = memcg->id.id;
5501		goto fail;
5502	}
5503
5504	memcg->vmstats = kzalloc(sizeof(struct memcg_vmstats), GFP_KERNEL);
5505	if (!memcg->vmstats)
5506		goto fail;
5507
5508	memcg->vmstats_percpu = alloc_percpu_gfp(struct memcg_vmstats_percpu,
5509						 GFP_KERNEL_ACCOUNT);
5510	if (!memcg->vmstats_percpu)
5511		goto fail;
5512
5513	for_each_possible_cpu(cpu) {
5514		if (parent)
5515			pstatc = per_cpu_ptr(parent->vmstats_percpu, cpu);
5516		statc = per_cpu_ptr(memcg->vmstats_percpu, cpu);
5517		statc->parent = parent ? pstatc : NULL;
5518		statc->vmstats = memcg->vmstats;
5519	}
5520
5521	for_each_node(node)
5522		if (alloc_mem_cgroup_per_node_info(memcg, node))
5523			goto fail;
5524
5525	if (memcg_wb_domain_init(memcg, GFP_KERNEL))
5526		goto fail;
5527
5528	INIT_WORK(&memcg->high_work, high_work_func);
 
5529	INIT_LIST_HEAD(&memcg->oom_notify);
5530	mutex_init(&memcg->thresholds_lock);
5531	spin_lock_init(&memcg->move_lock);
5532	vmpressure_init(&memcg->vmpressure);
5533	INIT_LIST_HEAD(&memcg->event_list);
5534	spin_lock_init(&memcg->event_list_lock);
5535	memcg->socket_pressure = jiffies;
5536#ifdef CONFIG_MEMCG_KMEM
5537	memcg->kmemcg_id = -1;
5538	INIT_LIST_HEAD(&memcg->objcg_list);
5539#endif
5540#ifdef CONFIG_CGROUP_WRITEBACK
5541	INIT_LIST_HEAD(&memcg->cgwb_list);
5542	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
5543		memcg->cgwb_frn[i].done =
5544			__WB_COMPLETION_INIT(&memcg_cgwb_frn_waitq);
5545#endif
5546#ifdef CONFIG_TRANSPARENT_HUGEPAGE
5547	spin_lock_init(&memcg->deferred_split_queue.split_queue_lock);
5548	INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue);
5549	memcg->deferred_split_queue.split_queue_len = 0;
5550#endif
5551	lru_gen_init_memcg(memcg);
5552	return memcg;
5553fail:
5554	mem_cgroup_id_remove(memcg);
 
5555	__mem_cgroup_free(memcg);
5556	return ERR_PTR(error);
5557}
5558
5559static struct cgroup_subsys_state * __ref
5560mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
5561{
5562	struct mem_cgroup *parent = mem_cgroup_from_css(parent_css);
5563	struct mem_cgroup *memcg, *old_memcg;
 
 
 
 
 
5564
5565	old_memcg = set_active_memcg(parent);
5566	memcg = mem_cgroup_alloc(parent);
5567	set_active_memcg(old_memcg);
5568	if (IS_ERR(memcg))
5569		return ERR_CAST(memcg);
5570
5571	page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
5572	WRITE_ONCE(memcg->soft_limit, PAGE_COUNTER_MAX);
5573#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
5574	memcg->zswap_max = PAGE_COUNTER_MAX;
5575	WRITE_ONCE(memcg->zswap_writeback,
5576		!parent || READ_ONCE(parent->zswap_writeback));
5577#endif
5578	page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
5579	if (parent) {
5580		WRITE_ONCE(memcg->swappiness, mem_cgroup_swappiness(parent));
5581		WRITE_ONCE(memcg->oom_kill_disable, READ_ONCE(parent->oom_kill_disable));
5582
 
 
5583		page_counter_init(&memcg->memory, &parent->memory);
5584		page_counter_init(&memcg->swap, &parent->swap);
 
5585		page_counter_init(&memcg->kmem, &parent->kmem);
5586		page_counter_init(&memcg->tcpmem, &parent->tcpmem);
5587	} else {
5588		init_memcg_events();
5589		page_counter_init(&memcg->memory, NULL);
5590		page_counter_init(&memcg->swap, NULL);
 
5591		page_counter_init(&memcg->kmem, NULL);
5592		page_counter_init(&memcg->tcpmem, NULL);
 
 
 
 
 
 
 
 
5593
 
 
5594		root_mem_cgroup = memcg;
5595		return &memcg->css;
5596	}
5597
 
 
 
 
5598	if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
5599		static_branch_inc(&memcg_sockets_enabled_key);
5600
5601#if defined(CONFIG_MEMCG_KMEM)
5602	if (!cgroup_memory_nobpf)
5603		static_branch_inc(&memcg_bpf_enabled_key);
5604#endif
5605
5606	return &memcg->css;
 
 
 
5607}
5608
5609static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
5610{
5611	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5612
5613	if (memcg_online_kmem(memcg))
5614		goto remove_id;
5615
5616	/*
5617	 * A memcg must be visible for expand_shrinker_info()
5618	 * by the time the maps are allocated. So, we allocate maps
5619	 * here, when for_each_mem_cgroup() can't skip it.
5620	 */
5621	if (alloc_shrinker_info(memcg))
5622		goto offline_kmem;
5623
5624	if (unlikely(mem_cgroup_is_root(memcg)))
5625		queue_delayed_work(system_unbound_wq, &stats_flush_dwork,
5626				   FLUSH_TIME);
5627	lru_gen_online_memcg(memcg);
5628
5629	/* Online state pins memcg ID, memcg ID pins CSS */
5630	refcount_set(&memcg->id.ref, 1);
5631	css_get(css);
5632
5633	/*
5634	 * Ensure mem_cgroup_from_id() works once we're fully online.
5635	 *
5636	 * We could do this earlier and require callers to filter with
5637	 * css_tryget_online(). But right now there are no users that
5638	 * need earlier access, and the workingset code relies on the
5639	 * cgroup tree linkage (mem_cgroup_get_nr_swap_pages()). So
5640	 * publish it here at the end of onlining. This matches the
5641	 * regular ID destruction during offlining.
5642	 */
5643	idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
5644
5645	return 0;
5646offline_kmem:
5647	memcg_offline_kmem(memcg);
5648remove_id:
5649	mem_cgroup_id_remove(memcg);
5650	return -ENOMEM;
5651}
5652
5653static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
5654{
5655	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5656	struct mem_cgroup_event *event, *tmp;
5657
5658	/*
5659	 * Unregister events and notify userspace.
5660	 * Notify userspace about cgroup removing only after rmdir of cgroup
5661	 * directory to avoid race between userspace and kernelspace.
5662	 */
5663	spin_lock_irq(&memcg->event_list_lock);
5664	list_for_each_entry_safe(event, tmp, &memcg->event_list, list) {
5665		list_del_init(&event->list);
5666		schedule_work(&event->remove);
5667	}
5668	spin_unlock_irq(&memcg->event_list_lock);
5669
5670	page_counter_set_min(&memcg->memory, 0);
5671	page_counter_set_low(&memcg->memory, 0);
5672
5673	zswap_memcg_offline_cleanup(memcg);
5674
5675	memcg_offline_kmem(memcg);
5676	reparent_shrinker_deferred(memcg);
5677	wb_memcg_offline(memcg);
5678	lru_gen_offline_memcg(memcg);
5679
5680	drain_all_stock(memcg);
5681
5682	mem_cgroup_id_put(memcg);
5683}
5684
5685static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
5686{
5687	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5688
5689	invalidate_reclaim_iterators(memcg);
5690	lru_gen_release_memcg(memcg);
5691}
5692
5693static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
5694{
5695	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5696	int __maybe_unused i;
5697
5698#ifdef CONFIG_CGROUP_WRITEBACK
5699	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
5700		wb_wait_for_completion(&memcg->cgwb_frn[i].done);
5701#endif
5702	if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
5703		static_branch_dec(&memcg_sockets_enabled_key);
5704
5705	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active)
5706		static_branch_dec(&memcg_sockets_enabled_key);
5707
5708#if defined(CONFIG_MEMCG_KMEM)
5709	if (!cgroup_memory_nobpf)
5710		static_branch_dec(&memcg_bpf_enabled_key);
5711#endif
5712
5713	vmpressure_cleanup(&memcg->vmpressure);
5714	cancel_work_sync(&memcg->high_work);
5715	mem_cgroup_remove_from_trees(memcg);
5716	free_shrinker_info(memcg);
5717	mem_cgroup_free(memcg);
5718}
5719
5720/**
5721 * mem_cgroup_css_reset - reset the states of a mem_cgroup
5722 * @css: the target css
5723 *
5724 * Reset the states of the mem_cgroup associated with @css.  This is
5725 * invoked when the userland requests disabling on the default hierarchy
5726 * but the memcg is pinned through dependency.  The memcg should stop
5727 * applying policies and should revert to the vanilla state as it may be
5728 * made visible again.
5729 *
5730 * The current implementation only resets the essential configurations.
5731 * This needs to be expanded to cover all the visible parts.
5732 */
5733static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
5734{
5735	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5736
5737	page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX);
5738	page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX);
5739	page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX);
5740	page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX);
5741	page_counter_set_min(&memcg->memory, 0);
5742	page_counter_set_low(&memcg->memory, 0);
5743	page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
5744	WRITE_ONCE(memcg->soft_limit, PAGE_COUNTER_MAX);
5745	page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
5746	memcg_wb_domain_size_changed(memcg);
5747}
5748
5749static void mem_cgroup_css_rstat_flush(struct cgroup_subsys_state *css, int cpu)
5750{
5751	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5752	struct mem_cgroup *parent = parent_mem_cgroup(memcg);
5753	struct memcg_vmstats_percpu *statc;
5754	long delta, delta_cpu, v;
5755	int i, nid;
5756
5757	statc = per_cpu_ptr(memcg->vmstats_percpu, cpu);
5758
5759	for (i = 0; i < MEMCG_NR_STAT; i++) {
5760		/*
5761		 * Collect the aggregated propagation counts of groups
5762		 * below us. We're in a per-cpu loop here and this is
5763		 * a global counter, so the first cycle will get them.
5764		 */
5765		delta = memcg->vmstats->state_pending[i];
5766		if (delta)
5767			memcg->vmstats->state_pending[i] = 0;
5768
5769		/* Add CPU changes on this level since the last flush */
5770		delta_cpu = 0;
5771		v = READ_ONCE(statc->state[i]);
5772		if (v != statc->state_prev[i]) {
5773			delta_cpu = v - statc->state_prev[i];
5774			delta += delta_cpu;
5775			statc->state_prev[i] = v;
5776		}
5777
5778		/* Aggregate counts on this level and propagate upwards */
5779		if (delta_cpu)
5780			memcg->vmstats->state_local[i] += delta_cpu;
5781
5782		if (delta) {
5783			memcg->vmstats->state[i] += delta;
5784			if (parent)
5785				parent->vmstats->state_pending[i] += delta;
5786		}
5787	}
5788
5789	for (i = 0; i < NR_MEMCG_EVENTS; i++) {
5790		delta = memcg->vmstats->events_pending[i];
5791		if (delta)
5792			memcg->vmstats->events_pending[i] = 0;
5793
5794		delta_cpu = 0;
5795		v = READ_ONCE(statc->events[i]);
5796		if (v != statc->events_prev[i]) {
5797			delta_cpu = v - statc->events_prev[i];
5798			delta += delta_cpu;
5799			statc->events_prev[i] = v;
5800		}
5801
5802		if (delta_cpu)
5803			memcg->vmstats->events_local[i] += delta_cpu;
5804
5805		if (delta) {
5806			memcg->vmstats->events[i] += delta;
5807			if (parent)
5808				parent->vmstats->events_pending[i] += delta;
5809		}
5810	}
5811
5812	for_each_node_state(nid, N_MEMORY) {
5813		struct mem_cgroup_per_node *pn = memcg->nodeinfo[nid];
5814		struct mem_cgroup_per_node *ppn = NULL;
5815		struct lruvec_stats_percpu *lstatc;
5816
5817		if (parent)
5818			ppn = parent->nodeinfo[nid];
5819
5820		lstatc = per_cpu_ptr(pn->lruvec_stats_percpu, cpu);
5821
5822		for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
5823			delta = pn->lruvec_stats.state_pending[i];
5824			if (delta)
5825				pn->lruvec_stats.state_pending[i] = 0;
5826
5827			delta_cpu = 0;
5828			v = READ_ONCE(lstatc->state[i]);
5829			if (v != lstatc->state_prev[i]) {
5830				delta_cpu = v - lstatc->state_prev[i];
5831				delta += delta_cpu;
5832				lstatc->state_prev[i] = v;
5833			}
5834
5835			if (delta_cpu)
5836				pn->lruvec_stats.state_local[i] += delta_cpu;
5837
5838			if (delta) {
5839				pn->lruvec_stats.state[i] += delta;
5840				if (ppn)
5841					ppn->lruvec_stats.state_pending[i] += delta;
5842			}
5843		}
5844	}
5845	statc->stats_updates = 0;
5846	/* We are in a per-cpu loop here, only do the atomic write once */
5847	if (atomic64_read(&memcg->vmstats->stats_updates))
5848		atomic64_set(&memcg->vmstats->stats_updates, 0);
5849}
5850
5851#ifdef CONFIG_MMU
5852/* Handlers for move charge at task migration. */
5853static int mem_cgroup_do_precharge(unsigned long count)
5854{
5855	int ret;
5856
5857	/* Try a single bulk charge without reclaim first, kswapd may wake */
5858	ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count);
5859	if (!ret) {
5860		mc.precharge += count;
5861		return ret;
5862	}
5863
5864	/* Try charges one by one with reclaim, but do not retry */
5865	while (count--) {
5866		ret = try_charge(mc.to, GFP_KERNEL | __GFP_NORETRY, 1);
5867		if (ret)
5868			return ret;
5869		mc.precharge++;
5870		cond_resched();
5871	}
5872	return 0;
5873}
5874
5875union mc_target {
5876	struct page	*page;
5877	swp_entry_t	ent;
5878};
5879
5880enum mc_target_type {
5881	MC_TARGET_NONE = 0,
5882	MC_TARGET_PAGE,
5883	MC_TARGET_SWAP,
5884	MC_TARGET_DEVICE,
5885};
5886
5887static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
5888						unsigned long addr, pte_t ptent)
5889{
5890	struct page *page = vm_normal_page(vma, addr, ptent);
5891
5892	if (!page)
5893		return NULL;
5894	if (PageAnon(page)) {
5895		if (!(mc.flags & MOVE_ANON))
5896			return NULL;
5897	} else {
5898		if (!(mc.flags & MOVE_FILE))
5899			return NULL;
5900	}
5901	get_page(page);
 
5902
5903	return page;
5904}
5905
5906#if defined(CONFIG_SWAP) || defined(CONFIG_DEVICE_PRIVATE)
5907static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5908			pte_t ptent, swp_entry_t *entry)
5909{
5910	struct page *page = NULL;
5911	swp_entry_t ent = pte_to_swp_entry(ptent);
5912
5913	if (!(mc.flags & MOVE_ANON))
5914		return NULL;
5915
5916	/*
5917	 * Handle device private pages that are not accessible by the CPU, but
5918	 * stored as special swap entries in the page table.
 
5919	 */
5920	if (is_device_private_entry(ent)) {
5921		page = pfn_swap_entry_to_page(ent);
5922		if (!get_page_unless_zero(page))
 
 
 
 
5923			return NULL;
5924		return page;
5925	}
5926
5927	if (non_swap_entry(ent))
5928		return NULL;
5929
5930	/*
5931	 * Because swap_cache_get_folio() updates some statistics counter,
5932	 * we call find_get_page() with swapper_space directly.
5933	 */
5934	page = find_get_page(swap_address_space(ent), swp_offset(ent));
5935	entry->val = ent.val;
 
5936
5937	return page;
5938}
5939#else
5940static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5941			pte_t ptent, swp_entry_t *entry)
5942{
5943	return NULL;
5944}
5945#endif
5946
5947static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
5948			unsigned long addr, pte_t ptent)
5949{
5950	unsigned long index;
5951	struct folio *folio;
 
5952
5953	if (!vma->vm_file) /* anonymous vma */
5954		return NULL;
5955	if (!(mc.flags & MOVE_FILE))
5956		return NULL;
5957
5958	/* folio is moved even if it's not RSS of this task(page-faulted). */
 
 
 
 
5959	/* shmem/tmpfs may report page out on swap: account for that too. */
5960	index = linear_page_index(vma, addr);
5961	folio = filemap_get_incore_folio(vma->vm_file->f_mapping, index);
5962	if (IS_ERR(folio))
5963		return NULL;
5964	return folio_file_page(folio, index);
 
 
 
 
 
 
 
 
 
 
5965}
5966
5967/**
5968 * mem_cgroup_move_account - move account of the page
5969 * @page: the page
5970 * @compound: charge the page as compound or small page
5971 * @from: mem_cgroup which the page is moved from.
5972 * @to:	mem_cgroup which the page is moved to. @from != @to.
5973 *
5974 * The page must be locked and not on the LRU.
5975 *
5976 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
5977 * from old cgroup.
5978 */
5979static int mem_cgroup_move_account(struct page *page,
5980				   bool compound,
5981				   struct mem_cgroup *from,
5982				   struct mem_cgroup *to)
5983{
5984	struct folio *folio = page_folio(page);
5985	struct lruvec *from_vec, *to_vec;
5986	struct pglist_data *pgdat;
5987	unsigned int nr_pages = compound ? folio_nr_pages(folio) : 1;
5988	int nid, ret;
5989
5990	VM_BUG_ON(from == to);
5991	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
5992	VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
5993	VM_BUG_ON(compound && !folio_test_large(folio));
 
 
 
 
 
 
 
5994
5995	ret = -EINVAL;
5996	if (folio_memcg(folio) != from)
5997		goto out;
5998
5999	pgdat = folio_pgdat(folio);
6000	from_vec = mem_cgroup_lruvec(from, pgdat);
6001	to_vec = mem_cgroup_lruvec(to, pgdat);
6002
6003	folio_memcg_lock(folio);
6004
6005	if (folio_test_anon(folio)) {
6006		if (folio_mapped(folio)) {
6007			__mod_lruvec_state(from_vec, NR_ANON_MAPPED, -nr_pages);
6008			__mod_lruvec_state(to_vec, NR_ANON_MAPPED, nr_pages);
6009			if (folio_test_pmd_mappable(folio)) {
6010				__mod_lruvec_state(from_vec, NR_ANON_THPS,
6011						   -nr_pages);
6012				__mod_lruvec_state(to_vec, NR_ANON_THPS,
6013						   nr_pages);
6014			}
6015		}
6016	} else {
6017		__mod_lruvec_state(from_vec, NR_FILE_PAGES, -nr_pages);
6018		__mod_lruvec_state(to_vec, NR_FILE_PAGES, nr_pages);
6019
6020		if (folio_test_swapbacked(folio)) {
6021			__mod_lruvec_state(from_vec, NR_SHMEM, -nr_pages);
6022			__mod_lruvec_state(to_vec, NR_SHMEM, nr_pages);
6023		}
6024
6025		if (folio_mapped(folio)) {
6026			__mod_lruvec_state(from_vec, NR_FILE_MAPPED, -nr_pages);
6027			__mod_lruvec_state(to_vec, NR_FILE_MAPPED, nr_pages);
6028		}
6029
6030		if (folio_test_dirty(folio)) {
6031			struct address_space *mapping = folio_mapping(folio);
 
 
 
 
 
6032
6033			if (mapping_can_writeback(mapping)) {
6034				__mod_lruvec_state(from_vec, NR_FILE_DIRTY,
6035						   -nr_pages);
6036				__mod_lruvec_state(to_vec, NR_FILE_DIRTY,
6037						   nr_pages);
6038			}
6039		}
6040	}
6041
6042#ifdef CONFIG_SWAP
6043	if (folio_test_swapcache(folio)) {
6044		__mod_lruvec_state(from_vec, NR_SWAPCACHE, -nr_pages);
6045		__mod_lruvec_state(to_vec, NR_SWAPCACHE, nr_pages);
6046	}
6047#endif
6048	if (folio_test_writeback(folio)) {
6049		__mod_lruvec_state(from_vec, NR_WRITEBACK, -nr_pages);
6050		__mod_lruvec_state(to_vec, NR_WRITEBACK, nr_pages);
6051	}
6052
6053	/*
6054	 * All state has been migrated, let's switch to the new memcg.
6055	 *
6056	 * It is safe to change page's memcg here because the page
6057	 * is referenced, charged, isolated, and locked: we can't race
6058	 * with (un)charging, migration, LRU putback, or anything else
6059	 * that would rely on a stable page's memory cgroup.
6060	 *
6061	 * Note that folio_memcg_lock is a memcg lock, not a page lock,
6062	 * to save space. As soon as we switch page's memory cgroup to a
6063	 * new memcg that isn't locked, the above state can change
6064	 * concurrently again. Make sure we're truly done with it.
6065	 */
6066	smp_mb();
6067
6068	css_get(&to->css);
6069	css_put(&from->css);
6070
6071	folio->memcg_data = (unsigned long)to;
6072
6073	__folio_memcg_unlock(from);
 
 
6074
6075	ret = 0;
6076	nid = folio_nid(folio);
6077
6078	local_irq_disable();
6079	mem_cgroup_charge_statistics(to, nr_pages);
6080	memcg_check_events(to, nid);
6081	mem_cgroup_charge_statistics(from, -nr_pages);
6082	memcg_check_events(from, nid);
6083	local_irq_enable();
 
 
6084out:
6085	return ret;
6086}
6087
6088/**
6089 * get_mctgt_type - get target type of moving charge
6090 * @vma: the vma the pte to be checked belongs
6091 * @addr: the address corresponding to the pte to be checked
6092 * @ptent: the pte to be checked
6093 * @target: the pointer the target page or swap ent will be stored(can be NULL)
6094 *
6095 * Context: Called with pte lock held.
6096 * Return:
6097 * * MC_TARGET_NONE - If the pte is not a target for move charge.
6098 * * MC_TARGET_PAGE - If the page corresponding to this pte is a target for
6099 *   move charge. If @target is not NULL, the page is stored in target->page
6100 *   with extra refcnt taken (Caller should release it).
6101 * * MC_TARGET_SWAP - If the swap entry corresponding to this pte is a
6102 *   target for charge migration.  If @target is not NULL, the entry is
6103 *   stored in target->ent.
6104 * * MC_TARGET_DEVICE - Like MC_TARGET_PAGE but page is device memory and
6105 *   thus not on the lru.  For now such page is charged like a regular page
6106 *   would be as it is just special memory taking the place of a regular page.
6107 *   See Documentations/vm/hmm.txt and include/linux/hmm.h
 
 
 
 
6108 */
 
6109static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
6110		unsigned long addr, pte_t ptent, union mc_target *target)
6111{
6112	struct page *page = NULL;
6113	enum mc_target_type ret = MC_TARGET_NONE;
6114	swp_entry_t ent = { .val = 0 };
6115
6116	if (pte_present(ptent))
6117		page = mc_handle_present_pte(vma, addr, ptent);
6118	else if (pte_none_mostly(ptent))
6119		/*
6120		 * PTE markers should be treated as a none pte here, separated
6121		 * from other swap handling below.
6122		 */
6123		page = mc_handle_file_pte(vma, addr, ptent);
6124	else if (is_swap_pte(ptent))
6125		page = mc_handle_swap_pte(vma, ptent, &ent);
6126
6127	if (target && page) {
6128		if (!trylock_page(page)) {
6129			put_page(page);
6130			return ret;
6131		}
6132		/*
6133		 * page_mapped() must be stable during the move. This
6134		 * pte is locked, so if it's present, the page cannot
6135		 * become unmapped. If it isn't, we have only partial
6136		 * control over the mapped state: the page lock will
6137		 * prevent new faults against pagecache and swapcache,
6138		 * so an unmapped page cannot become mapped. However,
6139		 * if the page is already mapped elsewhere, it can
6140		 * unmap, and there is nothing we can do about it.
6141		 * Alas, skip moving the page in this case.
6142		 */
6143		if (!pte_present(ptent) && page_mapped(page)) {
6144			unlock_page(page);
6145			put_page(page);
6146			return ret;
6147		}
6148	}
6149
6150	if (!page && !ent.val)
6151		return ret;
6152	if (page) {
6153		/*
6154		 * Do only loose check w/o serialization.
6155		 * mem_cgroup_move_account() checks the page is valid or
6156		 * not under LRU exclusion.
6157		 */
6158		if (page_memcg(page) == mc.from) {
6159			ret = MC_TARGET_PAGE;
6160			if (is_device_private_page(page) ||
6161			    is_device_coherent_page(page))
6162				ret = MC_TARGET_DEVICE;
6163			if (target)
6164				target->page = page;
6165		}
6166		if (!ret || !target) {
6167			if (target)
6168				unlock_page(page);
6169			put_page(page);
6170		}
6171	}
6172	/*
6173	 * There is a swap entry and a page doesn't exist or isn't charged.
6174	 * But we cannot move a tail-page in a THP.
6175	 */
6176	if (ent.val && !ret && (!page || !PageTransCompound(page)) &&
6177	    mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) {
6178		ret = MC_TARGET_SWAP;
6179		if (target)
6180			target->ent = ent;
6181	}
6182	return ret;
6183}
6184
6185#ifdef CONFIG_TRANSPARENT_HUGEPAGE
6186/*
6187 * We don't consider PMD mapped swapping or file mapped pages because THP does
6188 * not support them for now.
6189 * Caller should make sure that pmd_trans_huge(pmd) is true.
6190 */
6191static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
6192		unsigned long addr, pmd_t pmd, union mc_target *target)
6193{
6194	struct page *page = NULL;
6195	enum mc_target_type ret = MC_TARGET_NONE;
6196
6197	if (unlikely(is_swap_pmd(pmd))) {
6198		VM_BUG_ON(thp_migration_supported() &&
6199				  !is_pmd_migration_entry(pmd));
6200		return ret;
6201	}
6202	page = pmd_page(pmd);
6203	VM_BUG_ON_PAGE(!page || !PageHead(page), page);
6204	if (!(mc.flags & MOVE_ANON))
6205		return ret;
6206	if (page_memcg(page) == mc.from) {
6207		ret = MC_TARGET_PAGE;
6208		if (target) {
6209			get_page(page);
6210			if (!trylock_page(page)) {
6211				put_page(page);
6212				return MC_TARGET_NONE;
6213			}
6214			target->page = page;
6215		}
6216	}
6217	return ret;
6218}
6219#else
6220static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
6221		unsigned long addr, pmd_t pmd, union mc_target *target)
6222{
6223	return MC_TARGET_NONE;
6224}
6225#endif
6226
6227static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
6228					unsigned long addr, unsigned long end,
6229					struct mm_walk *walk)
6230{
6231	struct vm_area_struct *vma = walk->vma;
6232	pte_t *pte;
6233	spinlock_t *ptl;
6234
6235	ptl = pmd_trans_huge_lock(pmd, vma);
6236	if (ptl) {
6237		/*
6238		 * Note their can not be MC_TARGET_DEVICE for now as we do not
6239		 * support transparent huge page with MEMORY_DEVICE_PRIVATE but
6240		 * this might change.
6241		 */
6242		if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
6243			mc.precharge += HPAGE_PMD_NR;
6244		spin_unlock(ptl);
6245		return 0;
6246	}
6247
 
 
6248	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
6249	if (!pte)
6250		return 0;
6251	for (; addr != end; pte++, addr += PAGE_SIZE)
6252		if (get_mctgt_type(vma, addr, ptep_get(pte), NULL))
6253			mc.precharge++;	/* increment precharge temporarily */
6254	pte_unmap_unlock(pte - 1, ptl);
6255	cond_resched();
6256
6257	return 0;
6258}
6259
6260static const struct mm_walk_ops precharge_walk_ops = {
6261	.pmd_entry	= mem_cgroup_count_precharge_pte_range,
6262	.walk_lock	= PGWALK_RDLOCK,
6263};
6264
6265static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
6266{
6267	unsigned long precharge;
6268
6269	mmap_read_lock(mm);
6270	walk_page_range(mm, 0, ULONG_MAX, &precharge_walk_ops, NULL);
6271	mmap_read_unlock(mm);
 
 
 
 
 
6272
6273	precharge = mc.precharge;
6274	mc.precharge = 0;
6275
6276	return precharge;
6277}
6278
6279static int mem_cgroup_precharge_mc(struct mm_struct *mm)
6280{
6281	unsigned long precharge = mem_cgroup_count_precharge(mm);
6282
6283	VM_BUG_ON(mc.moving_task);
6284	mc.moving_task = current;
6285	return mem_cgroup_do_precharge(precharge);
6286}
6287
6288/* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
6289static void __mem_cgroup_clear_mc(void)
6290{
6291	struct mem_cgroup *from = mc.from;
6292	struct mem_cgroup *to = mc.to;
6293
6294	/* we must uncharge all the leftover precharges from mc.to */
6295	if (mc.precharge) {
6296		mem_cgroup_cancel_charge(mc.to, mc.precharge);
6297		mc.precharge = 0;
6298	}
6299	/*
6300	 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
6301	 * we must uncharge here.
6302	 */
6303	if (mc.moved_charge) {
6304		mem_cgroup_cancel_charge(mc.from, mc.moved_charge);
6305		mc.moved_charge = 0;
6306	}
6307	/* we must fixup refcnts and charges */
6308	if (mc.moved_swap) {
6309		/* uncharge swap account from the old cgroup */
6310		if (!mem_cgroup_is_root(mc.from))
6311			page_counter_uncharge(&mc.from->memsw, mc.moved_swap);
6312
6313		mem_cgroup_id_put_many(mc.from, mc.moved_swap);
6314
6315		/*
6316		 * we charged both to->memory and to->memsw, so we
6317		 * should uncharge to->memory.
6318		 */
6319		if (!mem_cgroup_is_root(mc.to))
6320			page_counter_uncharge(&mc.to->memory, mc.moved_swap);
6321
 
 
 
6322		mc.moved_swap = 0;
6323	}
6324	memcg_oom_recover(from);
6325	memcg_oom_recover(to);
6326	wake_up_all(&mc.waitq);
6327}
6328
6329static void mem_cgroup_clear_mc(void)
6330{
6331	struct mm_struct *mm = mc.mm;
6332
6333	/*
6334	 * we must clear moving_task before waking up waiters at the end of
6335	 * task migration.
6336	 */
6337	mc.moving_task = NULL;
6338	__mem_cgroup_clear_mc();
6339	spin_lock(&mc.lock);
6340	mc.from = NULL;
6341	mc.to = NULL;
6342	mc.mm = NULL;
6343	spin_unlock(&mc.lock);
6344
6345	mmput(mm);
6346}
6347
6348static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
6349{
6350	struct cgroup_subsys_state *css;
6351	struct mem_cgroup *memcg = NULL; /* unneeded init to make gcc happy */
6352	struct mem_cgroup *from;
6353	struct task_struct *leader, *p;
6354	struct mm_struct *mm;
6355	unsigned long move_flags;
6356	int ret = 0;
6357
6358	/* charge immigration isn't supported on the default hierarchy */
6359	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
6360		return 0;
6361
6362	/*
6363	 * Multi-process migrations only happen on the default hierarchy
6364	 * where charge immigration is not used.  Perform charge
6365	 * immigration if @tset contains a leader and whine if there are
6366	 * multiple.
6367	 */
6368	p = NULL;
6369	cgroup_taskset_for_each_leader(leader, css, tset) {
6370		WARN_ON_ONCE(p);
6371		p = leader;
6372		memcg = mem_cgroup_from_css(css);
6373	}
6374	if (!p)
6375		return 0;
6376
6377	/*
6378	 * We are now committed to this value whatever it is. Changes in this
6379	 * tunable will only affect upcoming migrations, not the current one.
6380	 * So we need to save it, and keep it going.
6381	 */
6382	move_flags = READ_ONCE(memcg->move_charge_at_immigrate);
6383	if (!move_flags)
6384		return 0;
6385
6386	from = mem_cgroup_from_task(p);
6387
6388	VM_BUG_ON(from == memcg);
6389
6390	mm = get_task_mm(p);
6391	if (!mm)
6392		return 0;
6393	/* We move charges only when we move a owner of the mm */
6394	if (mm->owner == p) {
6395		VM_BUG_ON(mc.from);
6396		VM_BUG_ON(mc.to);
6397		VM_BUG_ON(mc.precharge);
6398		VM_BUG_ON(mc.moved_charge);
6399		VM_BUG_ON(mc.moved_swap);
6400
6401		spin_lock(&mc.lock);
6402		mc.mm = mm;
6403		mc.from = from;
6404		mc.to = memcg;
6405		mc.flags = move_flags;
6406		spin_unlock(&mc.lock);
6407		/* We set mc.moving_task later */
6408
6409		ret = mem_cgroup_precharge_mc(mm);
6410		if (ret)
6411			mem_cgroup_clear_mc();
6412	} else {
6413		mmput(mm);
6414	}
6415	return ret;
6416}
6417
6418static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
6419{
6420	if (mc.to)
6421		mem_cgroup_clear_mc();
6422}
6423
6424static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
6425				unsigned long addr, unsigned long end,
6426				struct mm_walk *walk)
6427{
6428	int ret = 0;
6429	struct vm_area_struct *vma = walk->vma;
6430	pte_t *pte;
6431	spinlock_t *ptl;
6432	enum mc_target_type target_type;
6433	union mc_target target;
6434	struct page *page;
6435
6436	ptl = pmd_trans_huge_lock(pmd, vma);
6437	if (ptl) {
6438		if (mc.precharge < HPAGE_PMD_NR) {
6439			spin_unlock(ptl);
6440			return 0;
6441		}
6442		target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
6443		if (target_type == MC_TARGET_PAGE) {
6444			page = target.page;
6445			if (isolate_lru_page(page)) {
6446				if (!mem_cgroup_move_account(page, true,
6447							     mc.from, mc.to)) {
6448					mc.precharge -= HPAGE_PMD_NR;
6449					mc.moved_charge += HPAGE_PMD_NR;
6450				}
6451				putback_lru_page(page);
6452			}
6453			unlock_page(page);
6454			put_page(page);
6455		} else if (target_type == MC_TARGET_DEVICE) {
6456			page = target.page;
6457			if (!mem_cgroup_move_account(page, true,
6458						     mc.from, mc.to)) {
6459				mc.precharge -= HPAGE_PMD_NR;
6460				mc.moved_charge += HPAGE_PMD_NR;
6461			}
6462			unlock_page(page);
6463			put_page(page);
6464		}
6465		spin_unlock(ptl);
6466		return 0;
6467	}
6468
 
 
6469retry:
6470	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
6471	if (!pte)
6472		return 0;
6473	for (; addr != end; addr += PAGE_SIZE) {
6474		pte_t ptent = ptep_get(pte++);
6475		bool device = false;
6476		swp_entry_t ent;
6477
6478		if (!mc.precharge)
6479			break;
6480
6481		switch (get_mctgt_type(vma, addr, ptent, &target)) {
6482		case MC_TARGET_DEVICE:
6483			device = true;
6484			fallthrough;
6485		case MC_TARGET_PAGE:
6486			page = target.page;
6487			/*
6488			 * We can have a part of the split pmd here. Moving it
6489			 * can be done but it would be too convoluted so simply
6490			 * ignore such a partial THP and keep it in original
6491			 * memcg. There should be somebody mapping the head.
6492			 */
6493			if (PageTransCompound(page))
6494				goto put;
6495			if (!device && !isolate_lru_page(page))
6496				goto put;
6497			if (!mem_cgroup_move_account(page, false,
6498						mc.from, mc.to)) {
6499				mc.precharge--;
6500				/* we uncharge from mc.from later. */
6501				mc.moved_charge++;
6502			}
6503			if (!device)
6504				putback_lru_page(page);
6505put:			/* get_mctgt_type() gets & locks the page */
6506			unlock_page(page);
6507			put_page(page);
6508			break;
6509		case MC_TARGET_SWAP:
6510			ent = target.ent;
6511			if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
6512				mc.precharge--;
6513				mem_cgroup_id_get_many(mc.to, 1);
6514				/* we fixup other refcnts and charges later. */
6515				mc.moved_swap++;
6516			}
6517			break;
6518		default:
6519			break;
6520		}
6521	}
6522	pte_unmap_unlock(pte - 1, ptl);
6523	cond_resched();
6524
6525	if (addr != end) {
6526		/*
6527		 * We have consumed all precharges we got in can_attach().
6528		 * We try charge one by one, but don't do any additional
6529		 * charges to mc.to if we have failed in charge once in attach()
6530		 * phase.
6531		 */
6532		ret = mem_cgroup_do_precharge(1);
6533		if (!ret)
6534			goto retry;
6535	}
6536
6537	return ret;
6538}
6539
6540static const struct mm_walk_ops charge_walk_ops = {
6541	.pmd_entry	= mem_cgroup_move_charge_pte_range,
6542	.walk_lock	= PGWALK_RDLOCK,
6543};
6544
6545static void mem_cgroup_move_charge(void)
6546{
 
 
 
 
 
6547	lru_add_drain_all();
6548	/*
6549	 * Signal folio_memcg_lock() to take the memcg's move_lock
6550	 * while we're moving its pages to another memcg. Then wait
6551	 * for already started RCU-only updates to finish.
6552	 */
6553	atomic_inc(&mc.from->moving_account);
6554	synchronize_rcu();
6555retry:
6556	if (unlikely(!mmap_read_trylock(mc.mm))) {
6557		/*
6558		 * Someone who are holding the mmap_lock might be waiting in
6559		 * waitq. So we cancel all extra charges, wake up all waiters,
6560		 * and retry. Because we cancel precharges, we might not be able
6561		 * to move enough charges, but moving charge is a best-effort
6562		 * feature anyway, so it wouldn't be a big problem.
6563		 */
6564		__mem_cgroup_clear_mc();
6565		cond_resched();
6566		goto retry;
6567	}
6568	/*
6569	 * When we have consumed all precharges and failed in doing
6570	 * additional charge, the page walk just aborts.
6571	 */
6572	walk_page_range(mc.mm, 0, ULONG_MAX, &charge_walk_ops, NULL);
6573	mmap_read_unlock(mc.mm);
 
6574	atomic_dec(&mc.from->moving_account);
6575}
6576
6577static void mem_cgroup_move_task(void)
6578{
6579	if (mc.to) {
6580		mem_cgroup_move_charge();
6581		mem_cgroup_clear_mc();
6582	}
6583}
6584
6585#else	/* !CONFIG_MMU */
6586static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
6587{
6588	return 0;
6589}
6590static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
6591{
6592}
6593static void mem_cgroup_move_task(void)
6594{
6595}
6596#endif
6597
6598#ifdef CONFIG_MEMCG_KMEM
6599static void mem_cgroup_fork(struct task_struct *task)
 
 
 
 
6600{
6601	/*
6602	 * Set the update flag to cause task->objcg to be initialized lazily
6603	 * on the first allocation. It can be done without any synchronization
6604	 * because it's always performed on the current task, so does
6605	 * current_objcg_update().
6606	 */
6607	task->objcg = (struct obj_cgroup *)CURRENT_OBJCG_UPDATE_FLAG;
6608}
6609
6610static void mem_cgroup_exit(struct task_struct *task)
6611{
6612	struct obj_cgroup *objcg = task->objcg;
6613
6614	objcg = (struct obj_cgroup *)
6615		((unsigned long)objcg & ~CURRENT_OBJCG_UPDATE_FLAG);
6616	if (objcg)
6617		obj_cgroup_put(objcg);
6618
6619	/*
6620	 * Some kernel allocations can happen after this point,
6621	 * but let's ignore them. It can be done without any synchronization
6622	 * because it's always performed on the current task, so does
6623	 * current_objcg_update().
6624	 */
6625	task->objcg = NULL;
6626}
6627#endif
6628
6629#ifdef CONFIG_LRU_GEN
6630static void mem_cgroup_lru_gen_attach(struct cgroup_taskset *tset)
6631{
6632	struct task_struct *task;
6633	struct cgroup_subsys_state *css;
6634
6635	/* find the first leader if there is any */
6636	cgroup_taskset_for_each_leader(task, css, tset)
6637		break;
6638
6639	if (!task)
6640		return;
6641
6642	task_lock(task);
6643	if (task->mm && READ_ONCE(task->mm->owner) == task)
6644		lru_gen_migrate_mm(task->mm);
6645	task_unlock(task);
6646}
6647#else
6648static void mem_cgroup_lru_gen_attach(struct cgroup_taskset *tset) {}
6649#endif /* CONFIG_LRU_GEN */
6650
6651#ifdef CONFIG_MEMCG_KMEM
6652static void mem_cgroup_kmem_attach(struct cgroup_taskset *tset)
6653{
6654	struct task_struct *task;
6655	struct cgroup_subsys_state *css;
6656
6657	cgroup_taskset_for_each(task, css, tset) {
6658		/* atomically set the update bit */
6659		set_bit(CURRENT_OBJCG_UPDATE_BIT, (unsigned long *)&task->objcg);
6660	}
6661}
6662#else
6663static void mem_cgroup_kmem_attach(struct cgroup_taskset *tset) {}
6664#endif /* CONFIG_MEMCG_KMEM */
6665
6666#if defined(CONFIG_LRU_GEN) || defined(CONFIG_MEMCG_KMEM)
6667static void mem_cgroup_attach(struct cgroup_taskset *tset)
6668{
6669	mem_cgroup_lru_gen_attach(tset);
6670	mem_cgroup_kmem_attach(tset);
6671}
6672#endif
6673
6674static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value)
6675{
6676	if (value == PAGE_COUNTER_MAX)
6677		seq_puts(m, "max\n");
6678	else
6679		seq_printf(m, "%llu\n", (u64)value * PAGE_SIZE);
6680
6681	return 0;
6682}
6683
6684static u64 memory_current_read(struct cgroup_subsys_state *css,
6685			       struct cftype *cft)
6686{
6687	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6688
6689	return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE;
6690}
6691
6692static u64 memory_peak_read(struct cgroup_subsys_state *css,
6693			    struct cftype *cft)
6694{
6695	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
 
6696
6697	return (u64)memcg->memory.watermark * PAGE_SIZE;
6698}
 
 
6699
6700static int memory_min_show(struct seq_file *m, void *v)
6701{
6702	return seq_puts_memcg_tunable(m,
6703		READ_ONCE(mem_cgroup_from_seq(m)->memory.min));
6704}
6705
6706static ssize_t memory_min_write(struct kernfs_open_file *of,
6707				char *buf, size_t nbytes, loff_t off)
6708{
6709	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6710	unsigned long min;
6711	int err;
6712
6713	buf = strstrip(buf);
6714	err = page_counter_memparse(buf, "max", &min);
6715	if (err)
6716		return err;
6717
6718	page_counter_set_min(&memcg->memory, min);
6719
6720	return nbytes;
6721}
6722
6723static int memory_low_show(struct seq_file *m, void *v)
6724{
6725	return seq_puts_memcg_tunable(m,
6726		READ_ONCE(mem_cgroup_from_seq(m)->memory.low));
6727}
6728
6729static ssize_t memory_low_write(struct kernfs_open_file *of,
6730				char *buf, size_t nbytes, loff_t off)
6731{
6732	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6733	unsigned long low;
6734	int err;
6735
6736	buf = strstrip(buf);
6737	err = page_counter_memparse(buf, "max", &low);
6738	if (err)
6739		return err;
6740
6741	page_counter_set_low(&memcg->memory, low);
6742
6743	return nbytes;
6744}
6745
6746static int memory_high_show(struct seq_file *m, void *v)
6747{
6748	return seq_puts_memcg_tunable(m,
6749		READ_ONCE(mem_cgroup_from_seq(m)->memory.high));
 
 
 
 
 
 
 
6750}
6751
6752static ssize_t memory_high_write(struct kernfs_open_file *of,
6753				 char *buf, size_t nbytes, loff_t off)
6754{
6755	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6756	unsigned int nr_retries = MAX_RECLAIM_RETRIES;
6757	bool drained = false;
6758	unsigned long high;
6759	int err;
6760
6761	buf = strstrip(buf);
6762	err = page_counter_memparse(buf, "max", &high);
6763	if (err)
6764		return err;
6765
6766	page_counter_set_high(&memcg->memory, high);
6767
6768	for (;;) {
6769		unsigned long nr_pages = page_counter_read(&memcg->memory);
6770		unsigned long reclaimed;
6771
6772		if (nr_pages <= high)
6773			break;
6774
6775		if (signal_pending(current))
6776			break;
6777
6778		if (!drained) {
6779			drain_all_stock(memcg);
6780			drained = true;
6781			continue;
6782		}
6783
6784		reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
6785					GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP);
6786
6787		if (!reclaimed && !nr_retries--)
6788			break;
6789	}
6790
6791	memcg_wb_domain_size_changed(memcg);
6792	return nbytes;
6793}
6794
6795static int memory_max_show(struct seq_file *m, void *v)
6796{
6797	return seq_puts_memcg_tunable(m,
6798		READ_ONCE(mem_cgroup_from_seq(m)->memory.max));
 
 
 
 
 
 
 
6799}
6800
6801static ssize_t memory_max_write(struct kernfs_open_file *of,
6802				char *buf, size_t nbytes, loff_t off)
6803{
6804	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6805	unsigned int nr_reclaims = MAX_RECLAIM_RETRIES;
6806	bool drained = false;
6807	unsigned long max;
6808	int err;
6809
6810	buf = strstrip(buf);
6811	err = page_counter_memparse(buf, "max", &max);
6812	if (err)
6813		return err;
6814
6815	xchg(&memcg->memory.max, max);
6816
6817	for (;;) {
6818		unsigned long nr_pages = page_counter_read(&memcg->memory);
6819
6820		if (nr_pages <= max)
6821			break;
6822
6823		if (signal_pending(current))
 
6824			break;
 
6825
6826		if (!drained) {
6827			drain_all_stock(memcg);
6828			drained = true;
6829			continue;
6830		}
6831
6832		if (nr_reclaims) {
6833			if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max,
6834					GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP))
6835				nr_reclaims--;
6836			continue;
6837		}
6838
6839		memcg_memory_event(memcg, MEMCG_OOM);
6840		if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
6841			break;
6842	}
6843
6844	memcg_wb_domain_size_changed(memcg);
6845	return nbytes;
6846}
6847
6848/*
6849 * Note: don't forget to update the 'samples/cgroup/memcg_event_listener'
6850 * if any new events become available.
6851 */
6852static void __memory_events_show(struct seq_file *m, atomic_long_t *events)
6853{
6854	seq_printf(m, "low %lu\n", atomic_long_read(&events[MEMCG_LOW]));
6855	seq_printf(m, "high %lu\n", atomic_long_read(&events[MEMCG_HIGH]));
6856	seq_printf(m, "max %lu\n", atomic_long_read(&events[MEMCG_MAX]));
6857	seq_printf(m, "oom %lu\n", atomic_long_read(&events[MEMCG_OOM]));
6858	seq_printf(m, "oom_kill %lu\n",
6859		   atomic_long_read(&events[MEMCG_OOM_KILL]));
6860	seq_printf(m, "oom_group_kill %lu\n",
6861		   atomic_long_read(&events[MEMCG_OOM_GROUP_KILL]));
6862}
6863
6864static int memory_events_show(struct seq_file *m, void *v)
6865{
6866	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6867
6868	__memory_events_show(m, memcg->memory_events);
6869	return 0;
6870}
6871
6872static int memory_events_local_show(struct seq_file *m, void *v)
6873{
6874	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
 
 
6875
6876	__memory_events_show(m, memcg->memory_events_local);
6877	return 0;
6878}
6879
6880static int memory_stat_show(struct seq_file *m, void *v)
6881{
6882	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6883	char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
6884	struct seq_buf s;
6885
6886	if (!buf)
6887		return -ENOMEM;
6888	seq_buf_init(&s, buf, PAGE_SIZE);
6889	memory_stat_format(memcg, &s);
6890	seq_puts(m, buf);
6891	kfree(buf);
6892	return 0;
6893}
6894
6895#ifdef CONFIG_NUMA
6896static inline unsigned long lruvec_page_state_output(struct lruvec *lruvec,
6897						     int item)
6898{
6899	return lruvec_page_state(lruvec, item) *
6900		memcg_page_state_output_unit(item);
6901}
6902
6903static int memory_numa_stat_show(struct seq_file *m, void *v)
6904{
6905	int i;
6906	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6907
6908	mem_cgroup_flush_stats(memcg);
 
 
 
 
 
 
 
 
 
6909
6910	for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
6911		int nid;
6912
6913		if (memory_stats[i].idx >= NR_VM_NODE_STAT_ITEMS)
6914			continue;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6915
6916		seq_printf(m, "%s", memory_stats[i].name);
6917		for_each_node_state(nid, N_MEMORY) {
6918			u64 size;
6919			struct lruvec *lruvec;
6920
6921			lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
6922			size = lruvec_page_state_output(lruvec,
6923							memory_stats[i].idx);
6924			seq_printf(m, " N%d=%llu", nid, size);
6925		}
6926		seq_putc(m, '\n');
6927	}
6928
6929	return 0;
6930}
6931#endif
6932
6933static int memory_oom_group_show(struct seq_file *m, void *v)
6934{
6935	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6936
6937	seq_printf(m, "%d\n", READ_ONCE(memcg->oom_group));
 
 
 
 
 
 
 
6938
6939	return 0;
6940}
6941
6942static ssize_t memory_oom_group_write(struct kernfs_open_file *of,
6943				      char *buf, size_t nbytes, loff_t off)
6944{
6945	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6946	int ret, oom_group;
6947
6948	buf = strstrip(buf);
6949	if (!buf)
6950		return -EINVAL;
6951
6952	ret = kstrtoint(buf, 0, &oom_group);
6953	if (ret)
6954		return ret;
6955
6956	if (oom_group != 0 && oom_group != 1)
6957		return -EINVAL;
6958
6959	WRITE_ONCE(memcg->oom_group, oom_group);
6960
6961	return nbytes;
6962}
6963
6964static ssize_t memory_reclaim(struct kernfs_open_file *of, char *buf,
6965			      size_t nbytes, loff_t off)
6966{
6967	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6968	unsigned int nr_retries = MAX_RECLAIM_RETRIES;
6969	unsigned long nr_to_reclaim, nr_reclaimed = 0;
6970	unsigned int reclaim_options;
6971	int err;
6972
6973	buf = strstrip(buf);
6974	err = page_counter_memparse(buf, "", &nr_to_reclaim);
6975	if (err)
6976		return err;
6977
6978	reclaim_options	= MEMCG_RECLAIM_MAY_SWAP | MEMCG_RECLAIM_PROACTIVE;
6979	while (nr_reclaimed < nr_to_reclaim) {
6980		unsigned long reclaimed;
6981
6982		if (signal_pending(current))
6983			return -EINTR;
6984
6985		/*
6986		 * This is the final attempt, drain percpu lru caches in the
6987		 * hope of introducing more evictable pages for
6988		 * try_to_free_mem_cgroup_pages().
6989		 */
6990		if (!nr_retries)
6991			lru_add_drain_all();
6992
6993		reclaimed = try_to_free_mem_cgroup_pages(memcg,
6994					min(nr_to_reclaim - nr_reclaimed, SWAP_CLUSTER_MAX),
6995					GFP_KERNEL, reclaim_options);
6996
6997		if (!reclaimed && !nr_retries--)
6998			return -EAGAIN;
6999
7000		nr_reclaimed += reclaimed;
7001	}
7002
7003	return nbytes;
7004}
7005
7006static struct cftype memory_files[] = {
7007	{
7008		.name = "current",
7009		.flags = CFTYPE_NOT_ON_ROOT,
7010		.read_u64 = memory_current_read,
7011	},
7012	{
7013		.name = "peak",
7014		.flags = CFTYPE_NOT_ON_ROOT,
7015		.read_u64 = memory_peak_read,
7016	},
7017	{
7018		.name = "min",
7019		.flags = CFTYPE_NOT_ON_ROOT,
7020		.seq_show = memory_min_show,
7021		.write = memory_min_write,
7022	},
7023	{
7024		.name = "low",
7025		.flags = CFTYPE_NOT_ON_ROOT,
7026		.seq_show = memory_low_show,
7027		.write = memory_low_write,
7028	},
7029	{
7030		.name = "high",
7031		.flags = CFTYPE_NOT_ON_ROOT,
7032		.seq_show = memory_high_show,
7033		.write = memory_high_write,
7034	},
7035	{
7036		.name = "max",
7037		.flags = CFTYPE_NOT_ON_ROOT,
7038		.seq_show = memory_max_show,
7039		.write = memory_max_write,
7040	},
7041	{
7042		.name = "events",
7043		.flags = CFTYPE_NOT_ON_ROOT,
7044		.file_offset = offsetof(struct mem_cgroup, events_file),
7045		.seq_show = memory_events_show,
7046	},
7047	{
7048		.name = "events.local",
7049		.flags = CFTYPE_NOT_ON_ROOT,
7050		.file_offset = offsetof(struct mem_cgroup, events_local_file),
7051		.seq_show = memory_events_local_show,
7052	},
7053	{
7054		.name = "stat",
7055		.seq_show = memory_stat_show,
7056	},
7057#ifdef CONFIG_NUMA
7058	{
7059		.name = "numa_stat",
7060		.seq_show = memory_numa_stat_show,
7061	},
7062#endif
7063	{
7064		.name = "oom.group",
7065		.flags = CFTYPE_NOT_ON_ROOT | CFTYPE_NS_DELEGATABLE,
7066		.seq_show = memory_oom_group_show,
7067		.write = memory_oom_group_write,
7068	},
7069	{
7070		.name = "reclaim",
7071		.flags = CFTYPE_NS_DELEGATABLE,
7072		.write = memory_reclaim,
7073	},
7074	{ }	/* terminate */
7075};
7076
7077struct cgroup_subsys memory_cgrp_subsys = {
7078	.css_alloc = mem_cgroup_css_alloc,
7079	.css_online = mem_cgroup_css_online,
7080	.css_offline = mem_cgroup_css_offline,
7081	.css_released = mem_cgroup_css_released,
7082	.css_free = mem_cgroup_css_free,
7083	.css_reset = mem_cgroup_css_reset,
7084	.css_rstat_flush = mem_cgroup_css_rstat_flush,
7085	.can_attach = mem_cgroup_can_attach,
7086#if defined(CONFIG_LRU_GEN) || defined(CONFIG_MEMCG_KMEM)
7087	.attach = mem_cgroup_attach,
7088#endif
7089	.cancel_attach = mem_cgroup_cancel_attach,
7090	.post_attach = mem_cgroup_move_task,
7091#ifdef CONFIG_MEMCG_KMEM
7092	.fork = mem_cgroup_fork,
7093	.exit = mem_cgroup_exit,
7094#endif
7095	.dfl_cftypes = memory_files,
7096	.legacy_cftypes = mem_cgroup_legacy_files,
7097	.early_init = 0,
7098};
7099
7100/*
7101 * This function calculates an individual cgroup's effective
7102 * protection which is derived from its own memory.min/low, its
7103 * parent's and siblings' settings, as well as the actual memory
7104 * distribution in the tree.
7105 *
7106 * The following rules apply to the effective protection values:
7107 *
7108 * 1. At the first level of reclaim, effective protection is equal to
7109 *    the declared protection in memory.min and memory.low.
7110 *
7111 * 2. To enable safe delegation of the protection configuration, at
7112 *    subsequent levels the effective protection is capped to the
7113 *    parent's effective protection.
7114 *
7115 * 3. To make complex and dynamic subtrees easier to configure, the
7116 *    user is allowed to overcommit the declared protection at a given
7117 *    level. If that is the case, the parent's effective protection is
7118 *    distributed to the children in proportion to how much protection
7119 *    they have declared and how much of it they are utilizing.
7120 *
7121 *    This makes distribution proportional, but also work-conserving:
7122 *    if one cgroup claims much more protection than it uses memory,
7123 *    the unused remainder is available to its siblings.
7124 *
7125 * 4. Conversely, when the declared protection is undercommitted at a
7126 *    given level, the distribution of the larger parental protection
7127 *    budget is NOT proportional. A cgroup's protection from a sibling
7128 *    is capped to its own memory.min/low setting.
7129 *
7130 * 5. However, to allow protecting recursive subtrees from each other
7131 *    without having to declare each individual cgroup's fixed share
7132 *    of the ancestor's claim to protection, any unutilized -
7133 *    "floating" - protection from up the tree is distributed in
7134 *    proportion to each cgroup's *usage*. This makes the protection
7135 *    neutral wrt sibling cgroups and lets them compete freely over
7136 *    the shared parental protection budget, but it protects the
7137 *    subtree as a whole from neighboring subtrees.
7138 *
7139 * Note that 4. and 5. are not in conflict: 4. is about protecting
7140 * against immediate siblings whereas 5. is about protecting against
7141 * neighboring subtrees.
7142 */
7143static unsigned long effective_protection(unsigned long usage,
7144					  unsigned long parent_usage,
7145					  unsigned long setting,
7146					  unsigned long parent_effective,
7147					  unsigned long siblings_protected)
7148{
7149	unsigned long protected;
7150	unsigned long ep;
7151
7152	protected = min(usage, setting);
7153	/*
7154	 * If all cgroups at this level combined claim and use more
7155	 * protection than what the parent affords them, distribute
7156	 * shares in proportion to utilization.
7157	 *
7158	 * We are using actual utilization rather than the statically
7159	 * claimed protection in order to be work-conserving: claimed
7160	 * but unused protection is available to siblings that would
7161	 * otherwise get a smaller chunk than what they claimed.
7162	 */
7163	if (siblings_protected > parent_effective)
7164		return protected * parent_effective / siblings_protected;
7165
7166	/*
7167	 * Ok, utilized protection of all children is within what the
7168	 * parent affords them, so we know whatever this child claims
7169	 * and utilizes is effectively protected.
7170	 *
7171	 * If there is unprotected usage beyond this value, reclaim
7172	 * will apply pressure in proportion to that amount.
7173	 *
7174	 * If there is unutilized protection, the cgroup will be fully
7175	 * shielded from reclaim, but we do return a smaller value for
7176	 * protection than what the group could enjoy in theory. This
7177	 * is okay. With the overcommit distribution above, effective
7178	 * protection is always dependent on how memory is actually
7179	 * consumed among the siblings anyway.
7180	 */
7181	ep = protected;
7182
7183	/*
7184	 * If the children aren't claiming (all of) the protection
7185	 * afforded to them by the parent, distribute the remainder in
7186	 * proportion to the (unprotected) memory of each cgroup. That
7187	 * way, cgroups that aren't explicitly prioritized wrt each
7188	 * other compete freely over the allowance, but they are
7189	 * collectively protected from neighboring trees.
7190	 *
7191	 * We're using unprotected memory for the weight so that if
7192	 * some cgroups DO claim explicit protection, we don't protect
7193	 * the same bytes twice.
7194	 *
7195	 * Check both usage and parent_usage against the respective
7196	 * protected values. One should imply the other, but they
7197	 * aren't read atomically - make sure the division is sane.
7198	 */
7199	if (!(cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT))
7200		return ep;
7201	if (parent_effective > siblings_protected &&
7202	    parent_usage > siblings_protected &&
7203	    usage > protected) {
7204		unsigned long unclaimed;
7205
7206		unclaimed = parent_effective - siblings_protected;
7207		unclaimed *= usage - protected;
7208		unclaimed /= parent_usage - siblings_protected;
7209
7210		ep += unclaimed;
7211	}
7212
7213	return ep;
7214}
7215
7216/**
7217 * mem_cgroup_calculate_protection - check if memory consumption is in the normal range
7218 * @root: the top ancestor of the sub-tree being checked
7219 * @memcg: the memory cgroup to check
7220 *
7221 * WARNING: This function is not stateless! It can only be used as part
7222 *          of a top-down tree iteration, not for isolated queries.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7223 */
7224void mem_cgroup_calculate_protection(struct mem_cgroup *root,
7225				     struct mem_cgroup *memcg)
7226{
7227	unsigned long usage, parent_usage;
7228	struct mem_cgroup *parent;
7229
7230	if (mem_cgroup_disabled())
7231		return;
7232
7233	if (!root)
7234		root = root_mem_cgroup;
7235
7236	/*
7237	 * Effective values of the reclaim targets are ignored so they
7238	 * can be stale. Have a look at mem_cgroup_protection for more
7239	 * details.
7240	 * TODO: calculation should be more robust so that we do not need
7241	 * that special casing.
7242	 */
7243	if (memcg == root)
7244		return;
7245
7246	usage = page_counter_read(&memcg->memory);
7247	if (!usage)
7248		return;
7249
7250	parent = parent_mem_cgroup(memcg);
7251
7252	if (parent == root) {
7253		memcg->memory.emin = READ_ONCE(memcg->memory.min);
7254		memcg->memory.elow = READ_ONCE(memcg->memory.low);
7255		return;
7256	}
7257
7258	parent_usage = page_counter_read(&parent->memory);
7259
7260	WRITE_ONCE(memcg->memory.emin, effective_protection(usage, parent_usage,
7261			READ_ONCE(memcg->memory.min),
7262			READ_ONCE(parent->memory.emin),
7263			atomic_long_read(&parent->memory.children_min_usage)));
7264
7265	WRITE_ONCE(memcg->memory.elow, effective_protection(usage, parent_usage,
7266			READ_ONCE(memcg->memory.low),
7267			READ_ONCE(parent->memory.elow),
7268			atomic_long_read(&parent->memory.children_low_usage)));
7269}
7270
7271static int charge_memcg(struct folio *folio, struct mem_cgroup *memcg,
7272			gfp_t gfp)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7273{
7274	int ret;
 
 
7275
7276	ret = try_charge(memcg, gfp, folio_nr_pages(folio));
7277	if (ret)
7278		goto out;
7279
7280	mem_cgroup_commit_charge(folio, memcg);
7281out:
7282	return ret;
7283}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7284
7285int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp)
7286{
7287	struct mem_cgroup *memcg;
7288	int ret;
7289
7290	memcg = get_mem_cgroup_from_mm(mm);
7291	ret = charge_memcg(folio, memcg, gfp);
7292	css_put(&memcg->css);
7293
 
7294	return ret;
7295}
7296
7297/**
7298 * mem_cgroup_hugetlb_try_charge - try to charge the memcg for a hugetlb folio
7299 * @memcg: memcg to charge.
7300 * @gfp: reclaim mode.
7301 * @nr_pages: number of pages to charge.
 
7302 *
7303 * This function is called when allocating a huge page folio to determine if
7304 * the memcg has the capacity for it. It does not commit the charge yet,
7305 * as the hugetlb folio itself has not been obtained from the hugetlb pool.
 
7306 *
7307 * Once we have obtained the hugetlb folio, we can call
7308 * mem_cgroup_commit_charge() to commit the charge. If we fail to obtain the
7309 * folio, we should instead call mem_cgroup_cancel_charge() to undo the effect
7310 * of try_charge().
7311 *
7312 * Returns 0 on success. Otherwise, an error code is returned.
7313 */
7314int mem_cgroup_hugetlb_try_charge(struct mem_cgroup *memcg, gfp_t gfp,
7315			long nr_pages)
7316{
 
 
 
 
 
 
 
7317	/*
7318	 * If hugetlb memcg charging is not enabled, do not fail hugetlb allocation,
7319	 * but do not attempt to commit charge later (or cancel on error) either.
 
7320	 */
7321	if (mem_cgroup_disabled() || !memcg ||
7322		!cgroup_subsys_on_dfl(memory_cgrp_subsys) ||
7323		!(cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING))
7324		return -EOPNOTSUPP;
7325
7326	if (try_charge(memcg, gfp, nr_pages))
7327		return -ENOMEM;
 
 
 
 
7328
7329	return 0;
 
 
 
 
 
 
 
 
7330}
7331
7332/**
7333 * mem_cgroup_swapin_charge_folio - Charge a newly allocated folio for swapin.
7334 * @folio: folio to charge.
7335 * @mm: mm context of the victim
7336 * @gfp: reclaim mode
7337 * @entry: swap entry for which the folio is allocated
7338 *
7339 * This function charges a folio allocated for swapin. Please call this before
7340 * adding the folio to the swapcache.
7341 *
7342 * Returns 0 on success. Otherwise, an error code is returned.
7343 */
7344int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm,
7345				  gfp_t gfp, swp_entry_t entry)
7346{
7347	struct mem_cgroup *memcg;
7348	unsigned short id;
7349	int ret;
7350
7351	if (mem_cgroup_disabled())
7352		return 0;
7353
7354	id = lookup_swap_cgroup_id(entry);
7355	rcu_read_lock();
7356	memcg = mem_cgroup_from_id(id);
7357	if (!memcg || !css_tryget_online(&memcg->css))
7358		memcg = get_mem_cgroup_from_mm(mm);
7359	rcu_read_unlock();
7360
7361	ret = charge_memcg(folio, memcg, gfp);
7362
7363	css_put(&memcg->css);
7364	return ret;
7365}
7366
7367/*
7368 * mem_cgroup_swapin_uncharge_swap - uncharge swap slot
7369 * @entry: swap entry for which the page is charged
7370 *
7371 * Call this function after successfully adding the charged page to swapcache.
7372 *
7373 * Note: This function assumes the page for which swap slot is being uncharged
7374 * is order 0 page.
7375 */
7376void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry)
7377{
7378	/*
7379	 * Cgroup1's unified memory+swap counter has been charged with the
7380	 * new swapcache page, finish the transfer by uncharging the swap
7381	 * slot. The swap slot would also get uncharged when it dies, but
7382	 * it can stick around indefinitely and we'd count the page twice
7383	 * the entire time.
7384	 *
7385	 * Cgroup2 has separate resource counters for memory and swap,
7386	 * so this is a non-issue here. Memory and swap charge lifetimes
7387	 * correspond 1:1 to page and swap slot lifetimes: we charge the
7388	 * page to memory here, and uncharge swap when the slot is freed.
7389	 */
7390	if (!mem_cgroup_disabled() && do_memsw_account()) {
7391		/*
7392		 * The swap entry might not get freed for a long time,
7393		 * let's not wait for it.  The page already received a
7394		 * memory+swap charge, drop the swap entry duplicate.
7395		 */
7396		mem_cgroup_uncharge_swap(entry, 1);
7397	}
7398}
7399
7400struct uncharge_gather {
7401	struct mem_cgroup *memcg;
7402	unsigned long nr_memory;
7403	unsigned long pgpgout;
 
 
7404	unsigned long nr_kmem;
7405	int nid;
 
 
7406};
7407
7408static inline void uncharge_gather_clear(struct uncharge_gather *ug)
7409{
7410	memset(ug, 0, sizeof(*ug));
7411}
7412
7413static void uncharge_batch(const struct uncharge_gather *ug)
7414{
 
7415	unsigned long flags;
7416
7417	if (ug->nr_memory) {
7418		page_counter_uncharge(&ug->memcg->memory, ug->nr_memory);
7419		if (do_memsw_account())
7420			page_counter_uncharge(&ug->memcg->memsw, ug->nr_memory);
7421		if (ug->nr_kmem)
7422			memcg_account_kmem(ug->memcg, -ug->nr_kmem);
7423		memcg_oom_recover(ug->memcg);
7424	}
7425
7426	local_irq_save(flags);
 
 
 
 
7427	__count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout);
7428	__this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_memory);
7429	memcg_check_events(ug->memcg, ug->nid);
7430	local_irq_restore(flags);
7431
7432	/* drop reference from uncharge_folio */
7433	css_put(&ug->memcg->css);
7434}
7435
7436static void uncharge_folio(struct folio *folio, struct uncharge_gather *ug)
7437{
7438	long nr_pages;
7439	struct mem_cgroup *memcg;
7440	struct obj_cgroup *objcg;
7441
7442	VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
 
7443
7444	/*
7445	 * Nobody should be changing or seriously looking at
7446	 * folio memcg or objcg at this point, we have fully
7447	 * exclusive access to the folio.
7448	 */
7449	if (folio_memcg_kmem(folio)) {
7450		objcg = __folio_objcg(folio);
7451		/*
7452		 * This get matches the put at the end of the function and
7453		 * kmem pages do not hold memcg references anymore.
7454		 */
7455		memcg = get_mem_cgroup_from_objcg(objcg);
7456	} else {
7457		memcg = __folio_memcg(folio);
7458	}
7459
7460	if (!memcg)
7461		return;
7462
7463	if (ug->memcg != memcg) {
7464		if (ug->memcg) {
7465			uncharge_batch(ug);
7466			uncharge_gather_clear(ug);
7467		}
7468		ug->memcg = memcg;
7469		ug->nid = folio_nid(folio);
 
 
 
7470
7471		/* pairs with css_put in uncharge_batch */
7472		css_get(&memcg->css);
 
 
 
 
 
 
 
 
 
 
 
 
 
7473	}
7474
7475	nr_pages = folio_nr_pages(folio);
 
 
 
 
 
 
 
7476
7477	if (folio_memcg_kmem(folio)) {
7478		ug->nr_memory += nr_pages;
7479		ug->nr_kmem += nr_pages;
7480
7481		folio->memcg_data = 0;
7482		obj_cgroup_put(objcg);
7483	} else {
7484		/* LRU pages aren't accounted at the root level */
7485		if (!mem_cgroup_is_root(memcg))
7486			ug->nr_memory += nr_pages;
7487		ug->pgpgout++;
 
 
 
7488
7489		folio->memcg_data = 0;
7490	}
7491
7492	css_put(&memcg->css);
 
7493}
7494
7495void __mem_cgroup_uncharge(struct folio *folio)
 
 
 
 
 
 
 
7496{
7497	struct uncharge_gather ug;
7498
7499	/* Don't touch folio->lru of any random page, pre-check: */
7500	if (!folio_memcg(folio))
 
 
 
7501		return;
7502
7503	uncharge_gather_clear(&ug);
7504	uncharge_folio(folio, &ug);
7505	uncharge_batch(&ug);
7506}
7507
7508/**
7509 * __mem_cgroup_uncharge_list - uncharge a list of page
7510 * @page_list: list of pages to uncharge
7511 *
7512 * Uncharge a list of pages previously charged with
7513 * __mem_cgroup_charge().
7514 */
7515void __mem_cgroup_uncharge_list(struct list_head *page_list)
7516{
7517	struct uncharge_gather ug;
7518	struct folio *folio;
7519
7520	uncharge_gather_clear(&ug);
7521	list_for_each_entry(folio, page_list, lru)
7522		uncharge_folio(folio, &ug);
7523	if (ug.memcg)
7524		uncharge_batch(&ug);
7525}
7526
7527/**
7528 * mem_cgroup_replace_folio - Charge a folio's replacement.
7529 * @old: Currently circulating folio.
7530 * @new: Replacement folio.
7531 *
7532 * Charge @new as a replacement folio for @old. @old will
7533 * be uncharged upon free. This is only used by the page cache
7534 * (in replace_page_cache_folio()).
7535 *
7536 * Both folios must be locked, @new->mapping must be set up.
7537 */
7538void mem_cgroup_replace_folio(struct folio *old, struct folio *new)
7539{
7540	struct mem_cgroup *memcg;
7541	long nr_pages = folio_nr_pages(new);
 
7542	unsigned long flags;
7543
7544	VM_BUG_ON_FOLIO(!folio_test_locked(old), old);
7545	VM_BUG_ON_FOLIO(!folio_test_locked(new), new);
7546	VM_BUG_ON_FOLIO(folio_test_anon(old) != folio_test_anon(new), new);
7547	VM_BUG_ON_FOLIO(folio_nr_pages(old) != nr_pages, new);
 
7548
7549	if (mem_cgroup_disabled())
7550		return;
7551
7552	/* Page cache replacement: new folio already charged? */
7553	if (folio_memcg(new))
7554		return;
7555
7556	memcg = folio_memcg(old);
7557	VM_WARN_ON_ONCE_FOLIO(!memcg, old);
7558	if (!memcg)
7559		return;
7560
7561	/* Force-charge the new page. The old one will be freed soon */
7562	if (!mem_cgroup_is_root(memcg)) {
7563		page_counter_charge(&memcg->memory, nr_pages);
7564		if (do_memsw_account())
7565			page_counter_charge(&memcg->memsw, nr_pages);
7566	}
 
 
7567
7568	css_get(&memcg->css);
7569	commit_charge(new, memcg);
7570
7571	local_irq_save(flags);
7572	mem_cgroup_charge_statistics(memcg, nr_pages);
7573	memcg_check_events(memcg, folio_nid(new));
7574	local_irq_restore(flags);
7575}
7576
7577/**
7578 * mem_cgroup_migrate - Transfer the memcg data from the old to the new folio.
7579 * @old: Currently circulating folio.
7580 * @new: Replacement folio.
7581 *
7582 * Transfer the memcg data from the old folio to the new folio for migration.
7583 * The old folio's data info will be cleared. Note that the memory counters
7584 * will remain unchanged throughout the process.
7585 *
7586 * Both folios must be locked, @new->mapping must be set up.
7587 */
7588void mem_cgroup_migrate(struct folio *old, struct folio *new)
7589{
7590	struct mem_cgroup *memcg;
7591
7592	VM_BUG_ON_FOLIO(!folio_test_locked(old), old);
7593	VM_BUG_ON_FOLIO(!folio_test_locked(new), new);
7594	VM_BUG_ON_FOLIO(folio_test_anon(old) != folio_test_anon(new), new);
7595	VM_BUG_ON_FOLIO(folio_nr_pages(old) != folio_nr_pages(new), new);
7596
7597	if (mem_cgroup_disabled())
7598		return;
7599
7600	memcg = folio_memcg(old);
7601	/*
7602	 * Note that it is normal to see !memcg for a hugetlb folio.
7603	 * For e.g, itt could have been allocated when memory_hugetlb_accounting
7604	 * was not selected.
7605	 */
7606	VM_WARN_ON_ONCE_FOLIO(!folio_test_hugetlb(old) && !memcg, old);
7607	if (!memcg)
7608		return;
7609
7610	/* Transfer the charge and the css ref */
7611	commit_charge(new, memcg);
7612	/*
7613	 * If the old folio is a large folio and is in the split queue, it needs
7614	 * to be removed from the split queue now, in case getting an incorrect
7615	 * split queue in destroy_large_folio() after the memcg of the old folio
7616	 * is cleared.
7617	 *
7618	 * In addition, the old folio is about to be freed after migration, so
7619	 * removing from the split queue a bit earlier seems reasonable.
7620	 */
7621	if (folio_test_large(old) && folio_test_large_rmappable(old))
7622		folio_undo_large_rmappable(old);
7623	old->memcg_data = 0;
7624}
7625
7626DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
7627EXPORT_SYMBOL(memcg_sockets_enabled_key);
7628
7629void mem_cgroup_sk_alloc(struct sock *sk)
7630{
7631	struct mem_cgroup *memcg;
7632
7633	if (!mem_cgroup_sockets_enabled)
7634		return;
7635
7636	/* Do not associate the sock with unrelated interrupted task's memcg. */
7637	if (!in_task())
 
 
 
 
 
 
 
 
 
7638		return;
 
7639
7640	rcu_read_lock();
7641	memcg = mem_cgroup_from_task(current);
7642	if (mem_cgroup_is_root(memcg))
7643		goto out;
7644	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active)
7645		goto out;
7646	if (css_tryget(&memcg->css))
7647		sk->sk_memcg = memcg;
7648out:
7649	rcu_read_unlock();
7650}
7651
7652void mem_cgroup_sk_free(struct sock *sk)
7653{
7654	if (sk->sk_memcg)
7655		css_put(&sk->sk_memcg->css);
7656}
7657
7658/**
7659 * mem_cgroup_charge_skmem - charge socket memory
7660 * @memcg: memcg to charge
7661 * @nr_pages: number of pages to charge
7662 * @gfp_mask: reclaim mode
7663 *
7664 * Charges @nr_pages to @memcg. Returns %true if the charge fit within
7665 * @memcg's configured limit, %false if it doesn't.
7666 */
7667bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages,
7668			     gfp_t gfp_mask)
7669{
 
 
7670	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
7671		struct page_counter *fail;
7672
7673		if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) {
7674			memcg->tcpmem_pressure = 0;
7675			return true;
7676		}
 
7677		memcg->tcpmem_pressure = 1;
7678		if (gfp_mask & __GFP_NOFAIL) {
7679			page_counter_charge(&memcg->tcpmem, nr_pages);
7680			return true;
7681		}
7682		return false;
7683	}
7684
7685	if (try_charge(memcg, gfp_mask, nr_pages) == 0) {
7686		mod_memcg_state(memcg, MEMCG_SOCK, nr_pages);
 
 
 
 
 
7687		return true;
7688	}
7689
 
7690	return false;
7691}
7692
7693/**
7694 * mem_cgroup_uncharge_skmem - uncharge socket memory
7695 * @memcg: memcg to uncharge
7696 * @nr_pages: number of pages to uncharge
7697 */
7698void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
7699{
7700	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
7701		page_counter_uncharge(&memcg->tcpmem, nr_pages);
7702		return;
7703	}
7704
7705	mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages);
7706
7707	refill_stock(memcg, nr_pages);
7708}
7709
7710static int __init cgroup_memory(char *s)
7711{
7712	char *token;
7713
7714	while ((token = strsep(&s, ",")) != NULL) {
7715		if (!*token)
7716			continue;
7717		if (!strcmp(token, "nosocket"))
7718			cgroup_memory_nosocket = true;
7719		if (!strcmp(token, "nokmem"))
7720			cgroup_memory_nokmem = true;
7721		if (!strcmp(token, "nobpf"))
7722			cgroup_memory_nobpf = true;
7723	}
7724	return 1;
7725}
7726__setup("cgroup.memory=", cgroup_memory);
7727
7728/*
7729 * subsys_initcall() for memory controller.
7730 *
7731 * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this
7732 * context because of lock dependencies (cgroup_lock -> cpu hotplug) but
7733 * basically everything that doesn't depend on a specific mem_cgroup structure
7734 * should be initialized from here.
7735 */
7736static int __init mem_cgroup_init(void)
7737{
7738	int cpu, node;
7739
 
7740	/*
7741	 * Currently s32 type (can refer to struct batched_lruvec_stat) is
7742	 * used for per-memcg-per-cpu caching of per-node statistics. In order
7743	 * to work fine, we should make sure that the overfill threshold can't
7744	 * exceed S32_MAX / PAGE_SIZE.
7745	 */
7746	BUILD_BUG_ON(MEMCG_CHARGE_BATCH > S32_MAX / PAGE_SIZE);
 
 
7747
7748	cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
7749				  memcg_hotplug_cpu_dead);
7750
7751	for_each_possible_cpu(cpu)
7752		INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
7753			  drain_local_stock);
7754
7755	for_each_node(node) {
7756		struct mem_cgroup_tree_per_node *rtpn;
7757
7758		rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, node);
 
7759
7760		rtpn->rb_root = RB_ROOT;
7761		rtpn->rb_rightmost = NULL;
7762		spin_lock_init(&rtpn->lock);
7763		soft_limit_tree.rb_tree_per_node[node] = rtpn;
7764	}
7765
7766	return 0;
7767}
7768subsys_initcall(mem_cgroup_init);
7769
7770#ifdef CONFIG_SWAP
7771static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg)
7772{
7773	while (!refcount_inc_not_zero(&memcg->id.ref)) {
7774		/*
7775		 * The root cgroup cannot be destroyed, so it's refcount must
7776		 * always be >= 1.
7777		 */
7778		if (WARN_ON_ONCE(mem_cgroup_is_root(memcg))) {
7779			VM_BUG_ON(1);
7780			break;
7781		}
7782		memcg = parent_mem_cgroup(memcg);
7783		if (!memcg)
7784			memcg = root_mem_cgroup;
7785	}
7786	return memcg;
7787}
7788
7789/**
7790 * mem_cgroup_swapout - transfer a memsw charge to swap
7791 * @folio: folio whose memsw charge to transfer
7792 * @entry: swap entry to move the charge to
7793 *
7794 * Transfer the memsw charge of @folio to @entry.
7795 */
7796void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry)
7797{
7798	struct mem_cgroup *memcg, *swap_memcg;
7799	unsigned int nr_entries;
7800	unsigned short oldid;
7801
7802	VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
7803	VM_BUG_ON_FOLIO(folio_ref_count(folio), folio);
7804
7805	if (mem_cgroup_disabled())
7806		return;
7807
7808	if (!do_memsw_account())
7809		return;
7810
7811	memcg = folio_memcg(folio);
7812
7813	VM_WARN_ON_ONCE_FOLIO(!memcg, folio);
7814	if (!memcg)
7815		return;
7816
7817	/*
7818	 * In case the memcg owning these pages has been offlined and doesn't
7819	 * have an ID allocated to it anymore, charge the closest online
7820	 * ancestor for the swap instead and transfer the memory+swap charge.
7821	 */
7822	swap_memcg = mem_cgroup_id_get_online(memcg);
7823	nr_entries = folio_nr_pages(folio);
7824	/* Get references for the tail pages, too */
7825	if (nr_entries > 1)
7826		mem_cgroup_id_get_many(swap_memcg, nr_entries - 1);
7827	oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg),
7828				   nr_entries);
7829	VM_BUG_ON_FOLIO(oldid, folio);
7830	mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries);
7831
7832	folio->memcg_data = 0;
7833
7834	if (!mem_cgroup_is_root(memcg))
7835		page_counter_uncharge(&memcg->memory, nr_entries);
7836
7837	if (memcg != swap_memcg) {
7838		if (!mem_cgroup_is_root(swap_memcg))
7839			page_counter_charge(&swap_memcg->memsw, nr_entries);
7840		page_counter_uncharge(&memcg->memsw, nr_entries);
7841	}
7842
7843	/*
7844	 * Interrupts should be disabled here because the caller holds the
7845	 * i_pages lock which is taken with interrupts-off. It is
7846	 * important here to have the interrupts disabled because it is the
7847	 * only synchronisation we have for updating the per-CPU variables.
7848	 */
7849	memcg_stats_lock();
7850	mem_cgroup_charge_statistics(memcg, -nr_entries);
7851	memcg_stats_unlock();
7852	memcg_check_events(memcg, folio_nid(folio));
7853
7854	css_put(&memcg->css);
 
7855}
7856
7857/**
7858 * __mem_cgroup_try_charge_swap - try charging swap space for a folio
7859 * @folio: folio being added to swap
7860 * @entry: swap entry to charge
7861 *
7862 * Try to charge @folio's memcg for the swap space at @entry.
7863 *
7864 * Returns 0 on success, -ENOMEM on failure.
7865 */
7866int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry)
7867{
7868	unsigned int nr_pages = folio_nr_pages(folio);
7869	struct page_counter *counter;
7870	struct mem_cgroup *memcg;
7871	unsigned short oldid;
7872
7873	if (do_memsw_account())
7874		return 0;
7875
7876	memcg = folio_memcg(folio);
7877
7878	VM_WARN_ON_ONCE_FOLIO(!memcg, folio);
7879	if (!memcg)
7880		return 0;
7881
7882	if (!entry.val) {
7883		memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
7884		return 0;
7885	}
7886
7887	memcg = mem_cgroup_id_get_online(memcg);
7888
7889	if (!mem_cgroup_is_root(memcg) &&
7890	    !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) {
7891		memcg_memory_event(memcg, MEMCG_SWAP_MAX);
7892		memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
7893		mem_cgroup_id_put(memcg);
7894		return -ENOMEM;
7895	}
7896
7897	/* Get references for the tail pages, too */
7898	if (nr_pages > 1)
7899		mem_cgroup_id_get_many(memcg, nr_pages - 1);
7900	oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg), nr_pages);
7901	VM_BUG_ON_FOLIO(oldid, folio);
7902	mod_memcg_state(memcg, MEMCG_SWAP, nr_pages);
7903
7904	return 0;
7905}
7906
7907/**
7908 * __mem_cgroup_uncharge_swap - uncharge swap space
7909 * @entry: swap entry to uncharge
7910 * @nr_pages: the amount of swap space to uncharge
7911 */
7912void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
7913{
7914	struct mem_cgroup *memcg;
7915	unsigned short id;
7916
 
 
 
7917	id = swap_cgroup_record(entry, 0, nr_pages);
7918	rcu_read_lock();
7919	memcg = mem_cgroup_from_id(id);
7920	if (memcg) {
7921		if (!mem_cgroup_is_root(memcg)) {
7922			if (do_memsw_account())
 
 
7923				page_counter_uncharge(&memcg->memsw, nr_pages);
7924			else
7925				page_counter_uncharge(&memcg->swap, nr_pages);
7926		}
7927		mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages);
7928		mem_cgroup_id_put_many(memcg, nr_pages);
7929	}
7930	rcu_read_unlock();
7931}
7932
7933long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
7934{
7935	long nr_swap_pages = get_nr_swap_pages();
7936
7937	if (mem_cgroup_disabled() || do_memsw_account())
7938		return nr_swap_pages;
7939	for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg))
7940		nr_swap_pages = min_t(long, nr_swap_pages,
7941				      READ_ONCE(memcg->swap.max) -
7942				      page_counter_read(&memcg->swap));
7943	return nr_swap_pages;
7944}
7945
7946bool mem_cgroup_swap_full(struct folio *folio)
7947{
7948	struct mem_cgroup *memcg;
7949
7950	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
7951
7952	if (vm_swap_full())
7953		return true;
7954	if (do_memsw_account())
7955		return false;
7956
7957	memcg = folio_memcg(folio);
7958	if (!memcg)
7959		return false;
7960
7961	for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) {
7962		unsigned long usage = page_counter_read(&memcg->swap);
7963
7964		if (usage * 2 >= READ_ONCE(memcg->swap.high) ||
7965		    usage * 2 >= READ_ONCE(memcg->swap.max))
7966			return true;
7967	}
7968
7969	return false;
7970}
7971
7972static int __init setup_swap_account(char *s)
 
 
 
 
 
 
 
7973{
7974	bool res;
7975
7976	if (!kstrtobool(s, &res) && !res)
7977		pr_warn_once("The swapaccount=0 commandline option is deprecated "
7978			     "in favor of configuring swap control via cgroupfs. "
7979			     "Please report your usecase to linux-mm@kvack.org if you "
7980			     "depend on this functionality.\n");
7981	return 1;
7982}
7983__setup("swapaccount=", setup_swap_account);
7984
7985static u64 swap_current_read(struct cgroup_subsys_state *css,
7986			     struct cftype *cft)
7987{
7988	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
7989
7990	return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE;
7991}
7992
7993static u64 swap_peak_read(struct cgroup_subsys_state *css,
7994			  struct cftype *cft)
7995{
7996	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
 
7997
7998	return (u64)memcg->swap.watermark * PAGE_SIZE;
7999}
 
 
8000
8001static int swap_high_show(struct seq_file *m, void *v)
8002{
8003	return seq_puts_memcg_tunable(m,
8004		READ_ONCE(mem_cgroup_from_seq(m)->swap.high));
8005}
8006
8007static ssize_t swap_high_write(struct kernfs_open_file *of,
8008			       char *buf, size_t nbytes, loff_t off)
8009{
8010	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
8011	unsigned long high;
8012	int err;
8013
8014	buf = strstrip(buf);
8015	err = page_counter_memparse(buf, "max", &high);
8016	if (err)
8017		return err;
8018
8019	page_counter_set_high(&memcg->swap, high);
8020
8021	return nbytes;
8022}
8023
8024static int swap_max_show(struct seq_file *m, void *v)
8025{
8026	return seq_puts_memcg_tunable(m,
8027		READ_ONCE(mem_cgroup_from_seq(m)->swap.max));
8028}
8029
8030static ssize_t swap_max_write(struct kernfs_open_file *of,
8031			      char *buf, size_t nbytes, loff_t off)
8032{
8033	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
8034	unsigned long max;
8035	int err;
8036
8037	buf = strstrip(buf);
8038	err = page_counter_memparse(buf, "max", &max);
8039	if (err)
8040		return err;
8041
8042	xchg(&memcg->swap.max, max);
 
 
 
 
8043
8044	return nbytes;
8045}
8046
8047static int swap_events_show(struct seq_file *m, void *v)
8048{
8049	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
8050
8051	seq_printf(m, "high %lu\n",
8052		   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_HIGH]));
8053	seq_printf(m, "max %lu\n",
8054		   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX]));
8055	seq_printf(m, "fail %lu\n",
8056		   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL]));
8057
8058	return 0;
8059}
8060
8061static struct cftype swap_files[] = {
8062	{
8063		.name = "swap.current",
8064		.flags = CFTYPE_NOT_ON_ROOT,
8065		.read_u64 = swap_current_read,
8066	},
8067	{
8068		.name = "swap.high",
8069		.flags = CFTYPE_NOT_ON_ROOT,
8070		.seq_show = swap_high_show,
8071		.write = swap_high_write,
8072	},
8073	{
8074		.name = "swap.max",
8075		.flags = CFTYPE_NOT_ON_ROOT,
8076		.seq_show = swap_max_show,
8077		.write = swap_max_write,
8078	},
8079	{
8080		.name = "swap.peak",
8081		.flags = CFTYPE_NOT_ON_ROOT,
8082		.read_u64 = swap_peak_read,
8083	},
8084	{
8085		.name = "swap.events",
8086		.flags = CFTYPE_NOT_ON_ROOT,
8087		.file_offset = offsetof(struct mem_cgroup, swap_events_file),
8088		.seq_show = swap_events_show,
8089	},
8090	{ }	/* terminate */
8091};
8092
8093static struct cftype memsw_files[] = {
8094	{
8095		.name = "memsw.usage_in_bytes",
8096		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
8097		.read_u64 = mem_cgroup_read_u64,
8098	},
8099	{
8100		.name = "memsw.max_usage_in_bytes",
8101		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
8102		.write = mem_cgroup_reset,
8103		.read_u64 = mem_cgroup_read_u64,
8104	},
8105	{
8106		.name = "memsw.limit_in_bytes",
8107		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
8108		.write = mem_cgroup_write,
8109		.read_u64 = mem_cgroup_read_u64,
8110	},
8111	{
8112		.name = "memsw.failcnt",
8113		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
8114		.write = mem_cgroup_reset,
8115		.read_u64 = mem_cgroup_read_u64,
8116	},
8117	{ },	/* terminate */
8118};
8119
8120#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
8121/**
8122 * obj_cgroup_may_zswap - check if this cgroup can zswap
8123 * @objcg: the object cgroup
8124 *
8125 * Check if the hierarchical zswap limit has been reached.
8126 *
8127 * This doesn't check for specific headroom, and it is not atomic
8128 * either. But with zswap, the size of the allocation is only known
8129 * once compression has occurred, and this optimistic pre-check avoids
8130 * spending cycles on compression when there is already no room left
8131 * or zswap is disabled altogether somewhere in the hierarchy.
8132 */
8133bool obj_cgroup_may_zswap(struct obj_cgroup *objcg)
8134{
8135	struct mem_cgroup *memcg, *original_memcg;
8136	bool ret = true;
8137
8138	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
8139		return true;
8140
8141	original_memcg = get_mem_cgroup_from_objcg(objcg);
8142	for (memcg = original_memcg; !mem_cgroup_is_root(memcg);
8143	     memcg = parent_mem_cgroup(memcg)) {
8144		unsigned long max = READ_ONCE(memcg->zswap_max);
8145		unsigned long pages;
8146
8147		if (max == PAGE_COUNTER_MAX)
8148			continue;
8149		if (max == 0) {
8150			ret = false;
8151			break;
8152		}
8153
8154		/*
8155		 * mem_cgroup_flush_stats() ignores small changes. Use
8156		 * do_flush_stats() directly to get accurate stats for charging.
8157		 */
8158		do_flush_stats(memcg);
8159		pages = memcg_page_state(memcg, MEMCG_ZSWAP_B) / PAGE_SIZE;
8160		if (pages < max)
8161			continue;
8162		ret = false;
8163		break;
8164	}
8165	mem_cgroup_put(original_memcg);
8166	return ret;
8167}
8168
8169/**
8170 * obj_cgroup_charge_zswap - charge compression backend memory
8171 * @objcg: the object cgroup
8172 * @size: size of compressed object
8173 *
8174 * This forces the charge after obj_cgroup_may_zswap() allowed
8175 * compression and storage in zwap for this cgroup to go ahead.
8176 */
8177void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size)
8178{
8179	struct mem_cgroup *memcg;
8180
8181	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
8182		return;
8183
8184	VM_WARN_ON_ONCE(!(current->flags & PF_MEMALLOC));
8185
8186	/* PF_MEMALLOC context, charging must succeed */
8187	if (obj_cgroup_charge(objcg, GFP_KERNEL, size))
8188		VM_WARN_ON_ONCE(1);
8189
8190	rcu_read_lock();
8191	memcg = obj_cgroup_memcg(objcg);
8192	mod_memcg_state(memcg, MEMCG_ZSWAP_B, size);
8193	mod_memcg_state(memcg, MEMCG_ZSWAPPED, 1);
8194	rcu_read_unlock();
8195}
8196
8197/**
8198 * obj_cgroup_uncharge_zswap - uncharge compression backend memory
8199 * @objcg: the object cgroup
8200 * @size: size of compressed object
8201 *
8202 * Uncharges zswap memory on page in.
8203 */
8204void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size)
8205{
8206	struct mem_cgroup *memcg;
8207
8208	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
8209		return;
8210
8211	obj_cgroup_uncharge(objcg, size);
8212
8213	rcu_read_lock();
8214	memcg = obj_cgroup_memcg(objcg);
8215	mod_memcg_state(memcg, MEMCG_ZSWAP_B, -size);
8216	mod_memcg_state(memcg, MEMCG_ZSWAPPED, -1);
8217	rcu_read_unlock();
8218}
8219
8220bool mem_cgroup_zswap_writeback_enabled(struct mem_cgroup *memcg)
8221{
8222	/* if zswap is disabled, do not block pages going to the swapping device */
8223	return !is_zswap_enabled() || !memcg || READ_ONCE(memcg->zswap_writeback);
8224}
8225
8226static u64 zswap_current_read(struct cgroup_subsys_state *css,
8227			      struct cftype *cft)
8228{
8229	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
8230
8231	mem_cgroup_flush_stats(memcg);
8232	return memcg_page_state(memcg, MEMCG_ZSWAP_B);
8233}
8234
8235static int zswap_max_show(struct seq_file *m, void *v)
8236{
8237	return seq_puts_memcg_tunable(m,
8238		READ_ONCE(mem_cgroup_from_seq(m)->zswap_max));
8239}
8240
8241static ssize_t zswap_max_write(struct kernfs_open_file *of,
8242			       char *buf, size_t nbytes, loff_t off)
8243{
8244	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
8245	unsigned long max;
8246	int err;
8247
8248	buf = strstrip(buf);
8249	err = page_counter_memparse(buf, "max", &max);
8250	if (err)
8251		return err;
8252
8253	xchg(&memcg->zswap_max, max);
8254
8255	return nbytes;
8256}
8257
8258static int zswap_writeback_show(struct seq_file *m, void *v)
8259{
8260	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
8261
8262	seq_printf(m, "%d\n", READ_ONCE(memcg->zswap_writeback));
8263	return 0;
8264}
8265
8266static ssize_t zswap_writeback_write(struct kernfs_open_file *of,
8267				char *buf, size_t nbytes, loff_t off)
8268{
8269	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
8270	int zswap_writeback;
8271	ssize_t parse_ret = kstrtoint(strstrip(buf), 0, &zswap_writeback);
8272
8273	if (parse_ret)
8274		return parse_ret;
8275
8276	if (zswap_writeback != 0 && zswap_writeback != 1)
8277		return -EINVAL;
8278
8279	WRITE_ONCE(memcg->zswap_writeback, zswap_writeback);
8280	return nbytes;
8281}
8282
8283static struct cftype zswap_files[] = {
8284	{
8285		.name = "zswap.current",
8286		.flags = CFTYPE_NOT_ON_ROOT,
8287		.read_u64 = zswap_current_read,
8288	},
8289	{
8290		.name = "zswap.max",
8291		.flags = CFTYPE_NOT_ON_ROOT,
8292		.seq_show = zswap_max_show,
8293		.write = zswap_max_write,
8294	},
8295	{
8296		.name = "zswap.writeback",
8297		.seq_show = zswap_writeback_show,
8298		.write = zswap_writeback_write,
8299	},
8300	{ }	/* terminate */
8301};
8302#endif /* CONFIG_MEMCG_KMEM && CONFIG_ZSWAP */
8303
8304static int __init mem_cgroup_swap_init(void)
8305{
8306	if (mem_cgroup_disabled())
8307		return 0;
8308
8309	WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, swap_files));
8310	WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, memsw_files));
8311#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
8312	WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, zswap_files));
8313#endif
8314	return 0;
8315}
8316subsys_initcall(mem_cgroup_swap_init);
8317
8318#endif /* CONFIG_SWAP */