Linux Audio

Check our new training course

Loading...
v4.6
 
   1/* memcontrol.c - Memory Controller
   2 *
   3 * Copyright IBM Corporation, 2007
   4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
   5 *
   6 * Copyright 2007 OpenVZ SWsoft Inc
   7 * Author: Pavel Emelianov <xemul@openvz.org>
   8 *
   9 * Memory thresholds
  10 * Copyright (C) 2009 Nokia Corporation
  11 * Author: Kirill A. Shutemov
  12 *
  13 * Kernel Memory Controller
  14 * Copyright (C) 2012 Parallels Inc. and Google Inc.
  15 * Authors: Glauber Costa and Suleiman Souhlal
  16 *
  17 * Native page reclaim
  18 * Charge lifetime sanitation
  19 * Lockless page tracking & accounting
  20 * Unified hierarchy configuration model
  21 * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner
  22 *
  23 * This program is free software; you can redistribute it and/or modify
  24 * it under the terms of the GNU General Public License as published by
  25 * the Free Software Foundation; either version 2 of the License, or
  26 * (at your option) any later version.
  27 *
  28 * This program is distributed in the hope that it will be useful,
  29 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  30 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  31 * GNU General Public License for more details.
  32 */
  33
  34#include <linux/page_counter.h>
  35#include <linux/memcontrol.h>
  36#include <linux/cgroup.h>
  37#include <linux/mm.h>
 
 
  38#include <linux/hugetlb.h>
  39#include <linux/pagemap.h>
 
  40#include <linux/smp.h>
  41#include <linux/page-flags.h>
  42#include <linux/backing-dev.h>
  43#include <linux/bit_spinlock.h>
  44#include <linux/rcupdate.h>
  45#include <linux/limits.h>
  46#include <linux/export.h>
  47#include <linux/mutex.h>
  48#include <linux/rbtree.h>
  49#include <linux/slab.h>
  50#include <linux/swap.h>
  51#include <linux/swapops.h>
  52#include <linux/spinlock.h>
  53#include <linux/eventfd.h>
  54#include <linux/poll.h>
  55#include <linux/sort.h>
  56#include <linux/fs.h>
  57#include <linux/seq_file.h>
  58#include <linux/vmpressure.h>
 
  59#include <linux/mm_inline.h>
  60#include <linux/swap_cgroup.h>
  61#include <linux/cpu.h>
  62#include <linux/oom.h>
  63#include <linux/lockdep.h>
  64#include <linux/file.h>
  65#include <linux/tracehook.h>
 
 
  66#include "internal.h"
  67#include <net/sock.h>
  68#include <net/ip.h>
  69#include "slab.h"
 
  70
  71#include <asm/uaccess.h>
  72
  73#include <trace/events/vmscan.h>
  74
  75struct cgroup_subsys memory_cgrp_subsys __read_mostly;
  76EXPORT_SYMBOL(memory_cgrp_subsys);
  77
  78struct mem_cgroup *root_mem_cgroup __read_mostly;
  79
  80#define MEM_CGROUP_RECLAIM_RETRIES	5
 
 
  81
  82/* Socket memory accounting disabled? */
  83static bool cgroup_memory_nosocket;
  84
  85/* Kernel memory accounting disabled? */
  86static bool cgroup_memory_nokmem;
  87
  88/* Whether the swap controller is active */
  89#ifdef CONFIG_MEMCG_SWAP
  90int do_swap_account __read_mostly;
  91#else
  92#define do_swap_account		0
  93#endif
  94
  95/* Whether legacy memory+swap accounting is active */
  96static bool do_memsw_account(void)
  97{
  98	return !cgroup_subsys_on_dfl(memory_cgrp_subsys) && do_swap_account;
  99}
 100
 101static const char * const mem_cgroup_stat_names[] = {
 102	"cache",
 103	"rss",
 104	"rss_huge",
 105	"mapped_file",
 106	"dirty",
 107	"writeback",
 108	"swap",
 109};
 110
 111static const char * const mem_cgroup_events_names[] = {
 112	"pgpgin",
 113	"pgpgout",
 114	"pgfault",
 115	"pgmajfault",
 116};
 117
 118static const char * const mem_cgroup_lru_names[] = {
 119	"inactive_anon",
 120	"active_anon",
 121	"inactive_file",
 122	"active_file",
 123	"unevictable",
 124};
 125
 126#define THRESHOLDS_EVENTS_TARGET 128
 127#define SOFTLIMIT_EVENTS_TARGET 1024
 128#define NUMAINFO_EVENTS_TARGET	1024
 129
 130/*
 131 * Cgroups above their limits are maintained in a RB-Tree, independent of
 132 * their hierarchy representation
 133 */
 134
 135struct mem_cgroup_tree_per_zone {
 136	struct rb_root rb_root;
 
 137	spinlock_t lock;
 138};
 139
 140struct mem_cgroup_tree_per_node {
 141	struct mem_cgroup_tree_per_zone rb_tree_per_zone[MAX_NR_ZONES];
 142};
 143
 144struct mem_cgroup_tree {
 145	struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
 146};
 147
 148static struct mem_cgroup_tree soft_limit_tree __read_mostly;
 149
 150/* for OOM */
 151struct mem_cgroup_eventfd_list {
 152	struct list_head list;
 153	struct eventfd_ctx *eventfd;
 154};
 155
 156/*
 157 * cgroup_event represents events which userspace want to receive.
 158 */
 159struct mem_cgroup_event {
 160	/*
 161	 * memcg which the event belongs to.
 162	 */
 163	struct mem_cgroup *memcg;
 164	/*
 165	 * eventfd to signal userspace about the event.
 166	 */
 167	struct eventfd_ctx *eventfd;
 168	/*
 169	 * Each of these stored in a list by the cgroup.
 170	 */
 171	struct list_head list;
 172	/*
 173	 * register_event() callback will be used to add new userspace
 174	 * waiter for changes related to this event.  Use eventfd_signal()
 175	 * on eventfd to send notification to userspace.
 176	 */
 177	int (*register_event)(struct mem_cgroup *memcg,
 178			      struct eventfd_ctx *eventfd, const char *args);
 179	/*
 180	 * unregister_event() callback will be called when userspace closes
 181	 * the eventfd or on cgroup removing.  This callback must be set,
 182	 * if you want provide notification functionality.
 183	 */
 184	void (*unregister_event)(struct mem_cgroup *memcg,
 185				 struct eventfd_ctx *eventfd);
 186	/*
 187	 * All fields below needed to unregister event when
 188	 * userspace closes eventfd.
 189	 */
 190	poll_table pt;
 191	wait_queue_head_t *wqh;
 192	wait_queue_t wait;
 193	struct work_struct remove;
 194};
 195
 196static void mem_cgroup_threshold(struct mem_cgroup *memcg);
 197static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
 198
 199/* Stuffs for move charges at task migration. */
 200/*
 201 * Types of charges to be moved.
 202 */
 203#define MOVE_ANON	0x1U
 204#define MOVE_FILE	0x2U
 205#define MOVE_MASK	(MOVE_ANON | MOVE_FILE)
 206
 207/* "mc" and its members are protected by cgroup_mutex */
 208static struct move_charge_struct {
 209	spinlock_t	  lock; /* for from, to */
 210	struct mm_struct  *mm;
 211	struct mem_cgroup *from;
 212	struct mem_cgroup *to;
 213	unsigned long flags;
 214	unsigned long precharge;
 215	unsigned long moved_charge;
 216	unsigned long moved_swap;
 217	struct task_struct *moving_task;	/* a task moving charges */
 218	wait_queue_head_t waitq;		/* a waitq for other context */
 219} mc = {
 220	.lock = __SPIN_LOCK_UNLOCKED(mc.lock),
 221	.waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
 222};
 223
 224/*
 225 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
 226 * limit reclaim to prevent infinite loops, if they ever occur.
 227 */
 228#define	MEM_CGROUP_MAX_RECLAIM_LOOPS		100
 229#define	MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS	2
 230
 231enum charge_type {
 232	MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
 233	MEM_CGROUP_CHARGE_TYPE_ANON,
 234	MEM_CGROUP_CHARGE_TYPE_SWAPOUT,	/* for accounting swapcache */
 235	MEM_CGROUP_CHARGE_TYPE_DROP,	/* a page was unused swap cache */
 236	NR_CHARGE_TYPE,
 237};
 238
 239/* for encoding cft->private value on file */
 240enum res_type {
 241	_MEM,
 242	_MEMSWAP,
 243	_OOM_TYPE,
 244	_KMEM,
 245	_TCP,
 246};
 247
 248#define MEMFILE_PRIVATE(x, val)	((x) << 16 | (val))
 249#define MEMFILE_TYPE(val)	((val) >> 16 & 0xffff)
 250#define MEMFILE_ATTR(val)	((val) & 0xffff)
 251/* Used for OOM nofiier */
 252#define OOM_CONTROL		(0)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 253
 254/* Some nice accessors for the vmpressure. */
 255struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
 256{
 257	if (!memcg)
 258		memcg = root_mem_cgroup;
 259	return &memcg->vmpressure;
 260}
 261
 262struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr)
 263{
 264	return &container_of(vmpr, struct mem_cgroup, vmpressure)->css;
 265}
 266
 267static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
 
 
 
 268{
 269	return (memcg == root_mem_cgroup);
 270}
 271
 272#ifndef CONFIG_SLOB
 273/*
 274 * This will be the memcg's index in each cache's ->memcg_params.memcg_caches.
 275 * The main reason for not using cgroup id for this:
 276 *  this works better in sparse environments, where we have a lot of memcgs,
 277 *  but only a few kmem-limited. Or also, if we have, for instance, 200
 278 *  memcgs, and none but the 200th is kmem-limited, we'd have to have a
 279 *  200 entry array for that.
 280 *
 281 * The current size of the caches array is stored in memcg_nr_cache_ids. It
 282 * will double each time we have to increase it.
 283 */
 284static DEFINE_IDA(memcg_cache_ida);
 285int memcg_nr_cache_ids;
 286
 287/* Protects memcg_nr_cache_ids */
 288static DECLARE_RWSEM(memcg_cache_ids_sem);
 289
 290void memcg_get_cache_ids(void)
 291{
 292	down_read(&memcg_cache_ids_sem);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 293}
 294
 295void memcg_put_cache_ids(void)
 296{
 297	up_read(&memcg_cache_ids_sem);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 298}
 299
 300/*
 301 * MIN_SIZE is different than 1, because we would like to avoid going through
 302 * the alloc/free process all the time. In a small machine, 4 kmem-limited
 303 * cgroups is a reasonable guess. In the future, it could be a parameter or
 304 * tunable, but that is strictly not necessary.
 305 *
 306 * MAX_SIZE should be as large as the number of cgrp_ids. Ideally, we could get
 307 * this constant directly from cgroup, but it is understandable that this is
 308 * better kept as an internal representation in cgroup.c. In any case, the
 309 * cgrp_id space is not getting any smaller, and we don't have to necessarily
 310 * increase ours as well if it increases.
 311 */
 312#define MEMCG_CACHES_MIN_SIZE 4
 313#define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX
 
 
 
 
 
 
 
 314
 315/*
 316 * A lot of the calls to the cache allocation functions are expected to be
 317 * inlined by the compiler. Since the calls to memcg_kmem_get_cache are
 318 * conditional to this static branch, we'll have to allow modules that does
 319 * kmem_cache_alloc and the such to see this symbol as well
 320 */
 321DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key);
 322EXPORT_SYMBOL(memcg_kmem_enabled_key);
 323
 324#endif /* !CONFIG_SLOB */
 325
 326static struct mem_cgroup_per_zone *
 327mem_cgroup_zone_zoneinfo(struct mem_cgroup *memcg, struct zone *zone)
 328{
 329	int nid = zone_to_nid(zone);
 330	int zid = zone_idx(zone);
 331
 332	return &memcg->nodeinfo[nid]->zoneinfo[zid];
 333}
 334
 335/**
 336 * mem_cgroup_css_from_page - css of the memcg associated with a page
 337 * @page: page of interest
 338 *
 339 * If memcg is bound to the default hierarchy, css of the memcg associated
 340 * with @page is returned.  The returned css remains associated with @page
 341 * until it is released.
 342 *
 343 * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup
 344 * is returned.
 345 */
 346struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page)
 347{
 348	struct mem_cgroup *memcg;
 349
 350	memcg = page->mem_cgroup;
 351
 352	if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
 353		memcg = root_mem_cgroup;
 354
 355	return &memcg->css;
 356}
 357
 358/**
 359 * page_cgroup_ino - return inode number of the memcg a page is charged to
 360 * @page: the page
 361 *
 362 * Look up the closest online ancestor of the memory cgroup @page is charged to
 363 * and return its inode number or 0 if @page is not charged to any cgroup. It
 364 * is safe to call this function without holding a reference to @page.
 365 *
 366 * Note, this function is inherently racy, because there is nothing to prevent
 367 * the cgroup inode from getting torn down and potentially reallocated a moment
 368 * after page_cgroup_ino() returns, so it only should be used by callers that
 369 * do not care (such as procfs interfaces).
 370 */
 371ino_t page_cgroup_ino(struct page *page)
 372{
 373	struct mem_cgroup *memcg;
 374	unsigned long ino = 0;
 375
 376	rcu_read_lock();
 377	memcg = READ_ONCE(page->mem_cgroup);
 
 378	while (memcg && !(memcg->css.flags & CSS_ONLINE))
 379		memcg = parent_mem_cgroup(memcg);
 380	if (memcg)
 381		ino = cgroup_ino(memcg->css.cgroup);
 382	rcu_read_unlock();
 383	return ino;
 384}
 385
 386static struct mem_cgroup_per_zone *
 387mem_cgroup_page_zoneinfo(struct mem_cgroup *memcg, struct page *page)
 388{
 389	int nid = page_to_nid(page);
 390	int zid = page_zonenum(page);
 391
 392	return &memcg->nodeinfo[nid]->zoneinfo[zid];
 393}
 394
 395static struct mem_cgroup_tree_per_zone *
 396soft_limit_tree_node_zone(int nid, int zid)
 397{
 398	return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
 399}
 400
 401static struct mem_cgroup_tree_per_zone *
 402soft_limit_tree_from_page(struct page *page)
 403{
 404	int nid = page_to_nid(page);
 405	int zid = page_zonenum(page);
 406
 407	return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
 408}
 409
 410static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_zone *mz,
 411					 struct mem_cgroup_tree_per_zone *mctz,
 412					 unsigned long new_usage_in_excess)
 413{
 414	struct rb_node **p = &mctz->rb_root.rb_node;
 415	struct rb_node *parent = NULL;
 416	struct mem_cgroup_per_zone *mz_node;
 
 417
 418	if (mz->on_tree)
 419		return;
 420
 421	mz->usage_in_excess = new_usage_in_excess;
 422	if (!mz->usage_in_excess)
 423		return;
 424	while (*p) {
 425		parent = *p;
 426		mz_node = rb_entry(parent, struct mem_cgroup_per_zone,
 427					tree_node);
 428		if (mz->usage_in_excess < mz_node->usage_in_excess)
 429			p = &(*p)->rb_left;
 430		/*
 431		 * We can't avoid mem cgroups that are over their soft
 432		 * limit by the same amount
 433		 */
 434		else if (mz->usage_in_excess >= mz_node->usage_in_excess)
 435			p = &(*p)->rb_right;
 
 436	}
 
 
 
 
 437	rb_link_node(&mz->tree_node, parent, p);
 438	rb_insert_color(&mz->tree_node, &mctz->rb_root);
 439	mz->on_tree = true;
 440}
 441
 442static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_zone *mz,
 443					 struct mem_cgroup_tree_per_zone *mctz)
 444{
 445	if (!mz->on_tree)
 446		return;
 
 
 
 
 447	rb_erase(&mz->tree_node, &mctz->rb_root);
 448	mz->on_tree = false;
 449}
 450
 451static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_zone *mz,
 452				       struct mem_cgroup_tree_per_zone *mctz)
 453{
 454	unsigned long flags;
 455
 456	spin_lock_irqsave(&mctz->lock, flags);
 457	__mem_cgroup_remove_exceeded(mz, mctz);
 458	spin_unlock_irqrestore(&mctz->lock, flags);
 459}
 460
 461static unsigned long soft_limit_excess(struct mem_cgroup *memcg)
 462{
 463	unsigned long nr_pages = page_counter_read(&memcg->memory);
 464	unsigned long soft_limit = READ_ONCE(memcg->soft_limit);
 465	unsigned long excess = 0;
 466
 467	if (nr_pages > soft_limit)
 468		excess = nr_pages - soft_limit;
 469
 470	return excess;
 471}
 472
 473static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
 474{
 475	unsigned long excess;
 476	struct mem_cgroup_per_zone *mz;
 477	struct mem_cgroup_tree_per_zone *mctz;
 478
 479	mctz = soft_limit_tree_from_page(page);
 
 
 480	/*
 481	 * Necessary to update all ancestors when hierarchy is used.
 482	 * because their event counter is not touched.
 483	 */
 484	for (; memcg; memcg = parent_mem_cgroup(memcg)) {
 485		mz = mem_cgroup_page_zoneinfo(memcg, page);
 486		excess = soft_limit_excess(memcg);
 487		/*
 488		 * We have to update the tree if mz is on RB-tree or
 489		 * mem is over its softlimit.
 490		 */
 491		if (excess || mz->on_tree) {
 492			unsigned long flags;
 493
 494			spin_lock_irqsave(&mctz->lock, flags);
 495			/* if on-tree, remove it */
 496			if (mz->on_tree)
 497				__mem_cgroup_remove_exceeded(mz, mctz);
 498			/*
 499			 * Insert again. mz->usage_in_excess will be updated.
 500			 * If excess is 0, no tree ops.
 501			 */
 502			__mem_cgroup_insert_exceeded(mz, mctz, excess);
 503			spin_unlock_irqrestore(&mctz->lock, flags);
 504		}
 505	}
 506}
 507
 508static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
 509{
 510	struct mem_cgroup_tree_per_zone *mctz;
 511	struct mem_cgroup_per_zone *mz;
 512	int nid, zid;
 513
 514	for_each_node(nid) {
 515		for (zid = 0; zid < MAX_NR_ZONES; zid++) {
 516			mz = &memcg->nodeinfo[nid]->zoneinfo[zid];
 517			mctz = soft_limit_tree_node_zone(nid, zid);
 518			mem_cgroup_remove_exceeded(mz, mctz);
 519		}
 520	}
 521}
 522
 523static struct mem_cgroup_per_zone *
 524__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
 525{
 526	struct rb_node *rightmost = NULL;
 527	struct mem_cgroup_per_zone *mz;
 528
 529retry:
 530	mz = NULL;
 531	rightmost = rb_last(&mctz->rb_root);
 532	if (!rightmost)
 533		goto done;		/* Nothing to reclaim from */
 534
 535	mz = rb_entry(rightmost, struct mem_cgroup_per_zone, tree_node);
 
 536	/*
 537	 * Remove the node now but someone else can add it back,
 538	 * we will to add it back at the end of reclaim to its correct
 539	 * position in the tree.
 540	 */
 541	__mem_cgroup_remove_exceeded(mz, mctz);
 542	if (!soft_limit_excess(mz->memcg) ||
 543	    !css_tryget_online(&mz->memcg->css))
 544		goto retry;
 545done:
 546	return mz;
 547}
 548
 549static struct mem_cgroup_per_zone *
 550mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
 551{
 552	struct mem_cgroup_per_zone *mz;
 553
 554	spin_lock_irq(&mctz->lock);
 555	mz = __mem_cgroup_largest_soft_limit_node(mctz);
 556	spin_unlock_irq(&mctz->lock);
 557	return mz;
 558}
 559
 560/*
 561 * Return page count for single (non recursive) @memcg.
 562 *
 563 * Implementation Note: reading percpu statistics for memcg.
 564 *
 565 * Both of vmstat[] and percpu_counter has threshold and do periodic
 566 * synchronization to implement "quick" read. There are trade-off between
 567 * reading cost and precision of value. Then, we may have a chance to implement
 568 * a periodic synchronization of counter in memcg's counter.
 569 *
 570 * But this _read() function is used for user interface now. The user accounts
 571 * memory usage by memory cgroup and he _always_ requires exact value because
 572 * he accounts memory. Even if we provide quick-and-fuzzy read, we always
 573 * have to visit all online cpus and make sum. So, for now, unnecessary
 574 * synchronization is not implemented. (just implemented for cpu hotplug)
 575 *
 576 * If there are kernel internal actions which can make use of some not-exact
 577 * value, and reading all cpu value can be performance bottleneck in some
 578 * common workload, threshold and synchronization as vmstat[] should be
 579 * implemented.
 
 
 
 
 
 
 
 
 
 580 */
 581static unsigned long
 582mem_cgroup_read_stat(struct mem_cgroup *memcg, enum mem_cgroup_stat_index idx)
 583{
 584	long val = 0;
 585	int cpu;
 
 586
 587	/* Per-cpu values can be negative, use a signed accumulator */
 588	for_each_possible_cpu(cpu)
 589		val += per_cpu(memcg->stat->count[idx], cpu);
 590	/*
 591	 * Summing races with updates, so val may be negative.  Avoid exposing
 592	 * transient negative values.
 593	 */
 594	if (val < 0)
 595		val = 0;
 596	return val;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 597}
 598
 599static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg,
 600					    enum mem_cgroup_events_index idx)
 601{
 602	unsigned long val = 0;
 603	int cpu;
 604
 605	for_each_possible_cpu(cpu)
 606		val += per_cpu(memcg->stat->events[idx], cpu);
 607	return val;
 
 
 
 
 608}
 609
 610static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
 611					 struct page *page,
 612					 bool compound, int nr_pages)
 613{
 
 
 
 
 
 
 614	/*
 615	 * Here, RSS means 'mapped anon' and anon's SwapCache. Shmem/tmpfs is
 616	 * counted as CACHE even if it's on ANON LRU.
 617	 */
 618	if (PageAnon(page))
 619		__this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS],
 620				nr_pages);
 621	else
 622		__this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE],
 623				nr_pages);
 
 
 
 
 
 
 
 
 
 
 
 
 
 624
 625	if (compound) {
 626		VM_BUG_ON_PAGE(!PageTransHuge(page), page);
 627		__this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE],
 628				nr_pages);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 629	}
 630
 631	/* pagein of a big page is an event. So, ignore page size */
 632	if (nr_pages > 0)
 633		__this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGIN]);
 634	else {
 635		__this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]);
 636		nr_pages = -nr_pages; /* for event */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 637	}
 
 
 638
 639	__this_cpu_add(memcg->stat->nr_page_events, nr_pages);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 640}
 641
 642unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
 643					   int nid, unsigned int lru_mask)
 644{
 645	unsigned long nr = 0;
 646	int zid;
 647
 648	VM_BUG_ON((unsigned)nid >= nr_node_ids);
 
 
 
 649
 650	for (zid = 0; zid < MAX_NR_ZONES; zid++) {
 651		struct mem_cgroup_per_zone *mz;
 652		enum lru_list lru;
 
 
 653
 654		for_each_lru(lru) {
 655			if (!(BIT(lru) & lru_mask))
 656				continue;
 657			mz = &memcg->nodeinfo[nid]->zoneinfo[zid];
 658			nr += mz->lru_size[lru];
 659		}
 660	}
 661	return nr;
 662}
 663
 664static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
 665			unsigned int lru_mask)
 666{
 667	unsigned long nr = 0;
 668	int nid;
 
 
 
 
 
 669
 670	for_each_node_state(nid, N_MEMORY)
 671		nr += mem_cgroup_node_nr_lru_pages(memcg, nid, lru_mask);
 672	return nr;
 673}
 674
 675static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
 676				       enum mem_cgroup_events_target target)
 677{
 678	unsigned long val, next;
 679
 680	val = __this_cpu_read(memcg->stat->nr_page_events);
 681	next = __this_cpu_read(memcg->stat->targets[target]);
 682	/* from time_after() in jiffies.h */
 683	if ((long)next - (long)val < 0) {
 684		switch (target) {
 685		case MEM_CGROUP_TARGET_THRESH:
 686			next = val + THRESHOLDS_EVENTS_TARGET;
 687			break;
 688		case MEM_CGROUP_TARGET_SOFTLIMIT:
 689			next = val + SOFTLIMIT_EVENTS_TARGET;
 690			break;
 691		case MEM_CGROUP_TARGET_NUMAINFO:
 692			next = val + NUMAINFO_EVENTS_TARGET;
 693			break;
 694		default:
 695			break;
 696		}
 697		__this_cpu_write(memcg->stat->targets[target], next);
 698		return true;
 699	}
 700	return false;
 701}
 702
 703/*
 704 * Check events in order.
 705 *
 706 */
 707static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
 708{
 
 
 
 709	/* threshold event is triggered in finer grain than soft limit */
 710	if (unlikely(mem_cgroup_event_ratelimit(memcg,
 711						MEM_CGROUP_TARGET_THRESH))) {
 712		bool do_softlimit;
 713		bool do_numainfo __maybe_unused;
 714
 715		do_softlimit = mem_cgroup_event_ratelimit(memcg,
 716						MEM_CGROUP_TARGET_SOFTLIMIT);
 717#if MAX_NUMNODES > 1
 718		do_numainfo = mem_cgroup_event_ratelimit(memcg,
 719						MEM_CGROUP_TARGET_NUMAINFO);
 720#endif
 721		mem_cgroup_threshold(memcg);
 722		if (unlikely(do_softlimit))
 723			mem_cgroup_update_tree(memcg, page);
 724#if MAX_NUMNODES > 1
 725		if (unlikely(do_numainfo))
 726			atomic_inc(&memcg->numainfo_events);
 727#endif
 728	}
 729}
 730
 731struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
 732{
 733	/*
 734	 * mm_update_next_owner() may clear mm->owner to NULL
 735	 * if it races with swapoff, page migration, etc.
 736	 * So this can be called with p == NULL.
 737	 */
 738	if (unlikely(!p))
 739		return NULL;
 740
 741	return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
 742}
 743EXPORT_SYMBOL(mem_cgroup_from_task);
 744
 745static struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
 746{
 747	struct mem_cgroup *memcg = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 748
 749	rcu_read_lock();
 750	do {
 751		/*
 752		 * Page cache insertions can happen withou an
 753		 * actual mm context, e.g. during disk probing
 754		 * on boot, loopback IO, acct() writes etc.
 755		 */
 756		if (unlikely(!mm))
 757			memcg = root_mem_cgroup;
 758		else {
 759			memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
 760			if (unlikely(!memcg))
 761				memcg = root_mem_cgroup;
 762		}
 763	} while (!css_tryget_online(&memcg->css));
 764	rcu_read_unlock();
 765	return memcg;
 766}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 767
 768/**
 769 * mem_cgroup_iter - iterate over memory cgroup hierarchy
 770 * @root: hierarchy root
 771 * @prev: previously returned memcg, NULL on first invocation
 772 * @reclaim: cookie for shared reclaim walks, NULL for full walks
 773 *
 774 * Returns references to children of the hierarchy below @root, or
 775 * @root itself, or %NULL after a full round-trip.
 776 *
 777 * Caller must pass the return value in @prev on subsequent
 778 * invocations for reference counting, or use mem_cgroup_iter_break()
 779 * to cancel a hierarchy walk before the round-trip is complete.
 780 *
 781 * Reclaimers can specify a zone and a priority level in @reclaim to
 782 * divide up the memcgs in the hierarchy among all concurrent
 783 * reclaimers operating on the same zone and priority.
 784 */
 785struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
 786				   struct mem_cgroup *prev,
 787				   struct mem_cgroup_reclaim_cookie *reclaim)
 788{
 789	struct mem_cgroup_reclaim_iter *uninitialized_var(iter);
 790	struct cgroup_subsys_state *css = NULL;
 791	struct mem_cgroup *memcg = NULL;
 792	struct mem_cgroup *pos = NULL;
 793
 794	if (mem_cgroup_disabled())
 795		return NULL;
 796
 797	if (!root)
 798		root = root_mem_cgroup;
 799
 800	if (prev && !reclaim)
 801		pos = prev;
 802
 803	if (!root->use_hierarchy && root != root_mem_cgroup) {
 804		if (prev)
 805			goto out;
 806		return root;
 807	}
 808
 809	rcu_read_lock();
 810
 811	if (reclaim) {
 812		struct mem_cgroup_per_zone *mz;
 813
 814		mz = mem_cgroup_zone_zoneinfo(root, reclaim->zone);
 815		iter = &mz->iter[reclaim->priority];
 816
 817		if (prev && reclaim->generation != iter->generation)
 
 
 
 
 
 
 818			goto out_unlock;
 819
 820		while (1) {
 821			pos = READ_ONCE(iter->position);
 822			if (!pos || css_tryget(&pos->css))
 823				break;
 824			/*
 825			 * css reference reached zero, so iter->position will
 826			 * be cleared by ->css_released. However, we should not
 827			 * rely on this happening soon, because ->css_released
 828			 * is called from a work queue, and by busy-waiting we
 829			 * might block it. So we clear iter->position right
 830			 * away.
 831			 */
 832			(void)cmpxchg(&iter->position, pos, NULL);
 833		}
 
 
 834	}
 835
 836	if (pos)
 837		css = &pos->css;
 838
 839	for (;;) {
 840		css = css_next_descendant_pre(css, &root->css);
 841		if (!css) {
 842			/*
 843			 * Reclaimers share the hierarchy walk, and a
 844			 * new one might jump in right at the end of
 845			 * the hierarchy - make sure they see at least
 846			 * one group and restart from the beginning.
 847			 */
 848			if (!prev)
 849				continue;
 850			break;
 851		}
 852
 853		/*
 854		 * Verify the css and acquire a reference.  The root
 855		 * is provided by the caller, so we know it's alive
 856		 * and kicking, and don't take an extra reference.
 857		 */
 858		memcg = mem_cgroup_from_css(css);
 859
 860		if (css == &root->css)
 861			break;
 862
 863		if (css_tryget(css))
 864			break;
 865
 866		memcg = NULL;
 867	}
 868
 869	if (reclaim) {
 870		/*
 871		 * The position could have already been updated by a competing
 872		 * thread, so check that the value hasn't changed since we read
 873		 * it to avoid reclaiming from the same cgroup twice.
 874		 */
 875		(void)cmpxchg(&iter->position, pos, memcg);
 876
 877		if (pos)
 878			css_put(&pos->css);
 879
 880		if (!memcg)
 881			iter->generation++;
 882		else if (!prev)
 883			reclaim->generation = iter->generation;
 884	}
 885
 886out_unlock:
 887	rcu_read_unlock();
 888out:
 889	if (prev && prev != root)
 890		css_put(&prev->css);
 891
 892	return memcg;
 893}
 894
 895/**
 896 * mem_cgroup_iter_break - abort a hierarchy walk prematurely
 897 * @root: hierarchy root
 898 * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
 899 */
 900void mem_cgroup_iter_break(struct mem_cgroup *root,
 901			   struct mem_cgroup *prev)
 902{
 903	if (!root)
 904		root = root_mem_cgroup;
 905	if (prev && prev != root)
 906		css_put(&prev->css);
 907}
 908
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 909static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
 910{
 911	struct mem_cgroup *memcg = dead_memcg;
 912	struct mem_cgroup_reclaim_iter *iter;
 913	struct mem_cgroup_per_zone *mz;
 914	int nid, zid;
 915	int i;
 916
 917	while ((memcg = parent_mem_cgroup(memcg))) {
 918		for_each_node(nid) {
 919			for (zid = 0; zid < MAX_NR_ZONES; zid++) {
 920				mz = &memcg->nodeinfo[nid]->zoneinfo[zid];
 921				for (i = 0; i <= DEF_PRIORITY; i++) {
 922					iter = &mz->iter[i];
 923					cmpxchg(&iter->position,
 924						dead_memcg, NULL);
 925				}
 926			}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 927		}
 928	}
 
 929}
 930
 931/*
 932 * Iteration constructs for visiting all cgroups (under a tree).  If
 933 * loops are exited prematurely (break), mem_cgroup_iter_break() must
 934 * be used for reference counting.
 935 */
 936#define for_each_mem_cgroup_tree(iter, root)		\
 937	for (iter = mem_cgroup_iter(root, NULL, NULL);	\
 938	     iter != NULL;				\
 939	     iter = mem_cgroup_iter(root, iter, NULL))
 940
 941#define for_each_mem_cgroup(iter)			\
 942	for (iter = mem_cgroup_iter(NULL, NULL, NULL);	\
 943	     iter != NULL;				\
 944	     iter = mem_cgroup_iter(NULL, iter, NULL))
 
 
 
 
 
 
 
 945
 946/**
 947 * mem_cgroup_zone_lruvec - get the lru list vector for a zone and memcg
 948 * @zone: zone of the wanted lruvec
 949 * @memcg: memcg of the wanted lruvec
 950 *
 951 * Returns the lru list vector holding pages for the given @zone and
 952 * @mem.  This can be the global zone lruvec, if the memory controller
 953 * is disabled.
 
 
 
 954 */
 955struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
 956				      struct mem_cgroup *memcg)
 957{
 958	struct mem_cgroup_per_zone *mz;
 959	struct lruvec *lruvec;
 960
 961	if (mem_cgroup_disabled()) {
 962		lruvec = &zone->lruvec;
 963		goto out;
 964	}
 965
 966	mz = mem_cgroup_zone_zoneinfo(memcg, zone);
 967	lruvec = &mz->lruvec;
 968out:
 969	/*
 970	 * Since a node can be onlined after the mem_cgroup was created,
 971	 * we have to be prepared to initialize lruvec->zone here;
 972	 * and if offlined then reonlined, we need to reinitialize it.
 973	 */
 974	if (unlikely(lruvec->zone != zone))
 975		lruvec->zone = zone;
 976	return lruvec;
 977}
 978
 979/**
 980 * mem_cgroup_page_lruvec - return lruvec for isolating/putting an LRU page
 981 * @page: the page
 982 * @zone: zone of the page
 983 *
 984 * This function is only safe when following the LRU page isolation
 985 * and putback protocol: the LRU lock must be held, and the page must
 986 * either be PageLRU() or the caller must have isolated/allocated it.
 
 
 
 
 
 987 */
 988struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone)
 989{
 990	struct mem_cgroup_per_zone *mz;
 991	struct mem_cgroup *memcg;
 992	struct lruvec *lruvec;
 993
 994	if (mem_cgroup_disabled()) {
 995		lruvec = &zone->lruvec;
 996		goto out;
 997	}
 998
 999	memcg = page->mem_cgroup;
1000	/*
1001	 * Swapcache readahead pages are added to the LRU - and
1002	 * possibly migrated - before they are charged.
1003	 */
1004	if (!memcg)
1005		memcg = root_mem_cgroup;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1006
1007	mz = mem_cgroup_page_zoneinfo(memcg, page);
1008	lruvec = &mz->lruvec;
1009out:
1010	/*
1011	 * Since a node can be onlined after the mem_cgroup was created,
1012	 * we have to be prepared to initialize lruvec->zone here;
1013	 * and if offlined then reonlined, we need to reinitialize it.
1014	 */
1015	if (unlikely(lruvec->zone != zone))
1016		lruvec->zone = zone;
1017	return lruvec;
1018}
1019
1020/**
1021 * mem_cgroup_update_lru_size - account for adding or removing an lru page
1022 * @lruvec: mem_cgroup per zone lru vector
1023 * @lru: index of lru list the page is sitting on
 
1024 * @nr_pages: positive when adding or negative when removing
1025 *
1026 * This function must be called when a page is added to or removed from an
1027 * lru list.
1028 */
1029void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
1030				int nr_pages)
1031{
1032	struct mem_cgroup_per_zone *mz;
1033	unsigned long *lru_size;
 
1034
1035	if (mem_cgroup_disabled())
1036		return;
1037
1038	mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
1039	lru_size = mz->lru_size + lru;
1040	*lru_size += nr_pages;
1041	VM_BUG_ON((long)(*lru_size) < 0);
1042}
1043
1044bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg)
1045{
1046	struct mem_cgroup *task_memcg;
1047	struct task_struct *p;
1048	bool ret;
1049
1050	p = find_lock_task_mm(task);
1051	if (p) {
1052		task_memcg = get_mem_cgroup_from_mm(p->mm);
1053		task_unlock(p);
1054	} else {
1055		/*
1056		 * All threads may have already detached their mm's, but the oom
1057		 * killer still needs to detect if they have already been oom
1058		 * killed to prevent needlessly killing additional tasks.
1059		 */
1060		rcu_read_lock();
1061		task_memcg = mem_cgroup_from_task(task);
1062		css_get(&task_memcg->css);
1063		rcu_read_unlock();
1064	}
1065	ret = mem_cgroup_is_descendant(task_memcg, memcg);
1066	css_put(&task_memcg->css);
1067	return ret;
1068}
1069
1070/**
1071 * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1072 * @memcg: the memory cgroup
1073 *
1074 * Returns the maximum amount of memory @mem can be charged with, in
1075 * pages.
1076 */
1077static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
1078{
1079	unsigned long margin = 0;
1080	unsigned long count;
1081	unsigned long limit;
1082
1083	count = page_counter_read(&memcg->memory);
1084	limit = READ_ONCE(memcg->memory.limit);
1085	if (count < limit)
1086		margin = limit - count;
1087
1088	if (do_memsw_account()) {
1089		count = page_counter_read(&memcg->memsw);
1090		limit = READ_ONCE(memcg->memsw.limit);
1091		if (count <= limit)
1092			margin = min(margin, limit - count);
 
 
1093	}
1094
1095	return margin;
1096}
1097
1098/*
1099 * A routine for checking "mem" is under move_account() or not.
1100 *
1101 * Checking a cgroup is mc.from or mc.to or under hierarchy of
1102 * moving cgroups. This is for waiting at high-memory pressure
1103 * caused by "move".
1104 */
1105static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
1106{
1107	struct mem_cgroup *from;
1108	struct mem_cgroup *to;
1109	bool ret = false;
1110	/*
1111	 * Unlike task_move routines, we access mc.to, mc.from not under
1112	 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
1113	 */
1114	spin_lock(&mc.lock);
1115	from = mc.from;
1116	to = mc.to;
1117	if (!from)
1118		goto unlock;
1119
1120	ret = mem_cgroup_is_descendant(from, memcg) ||
1121		mem_cgroup_is_descendant(to, memcg);
1122unlock:
1123	spin_unlock(&mc.lock);
1124	return ret;
1125}
1126
1127static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
1128{
1129	if (mc.moving_task && current != mc.moving_task) {
1130		if (mem_cgroup_under_move(memcg)) {
1131			DEFINE_WAIT(wait);
1132			prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1133			/* moving charge context might have finished. */
1134			if (mc.moving_task)
1135				schedule();
1136			finish_wait(&mc.waitq, &wait);
1137			return true;
1138		}
1139	}
1140	return false;
1141}
1142
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1143#define K(x) ((x) << (PAGE_SHIFT-10))
1144/**
1145 * mem_cgroup_print_oom_info: Print OOM information relevant to memory controller.
 
1146 * @memcg: The memory cgroup that went over limit
1147 * @p: Task that is going to be killed
1148 *
1149 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1150 * enabled
1151 */
1152void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
1153{
1154	struct mem_cgroup *iter;
1155	unsigned int i;
1156
1157	rcu_read_lock();
1158
 
 
 
 
 
1159	if (p) {
1160		pr_info("Task in ");
1161		pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
1162		pr_cont(" killed as a result of limit of ");
1163	} else {
1164		pr_info("Memory limit reached of cgroup ");
1165	}
 
 
1166
1167	pr_cont_cgroup_path(memcg->css.cgroup);
1168	pr_cont("\n");
 
 
 
 
 
 
 
1169
1170	rcu_read_unlock();
1171
1172	pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
1173		K((u64)page_counter_read(&memcg->memory)),
1174		K((u64)memcg->memory.limit), memcg->memory.failcnt);
1175	pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
1176		K((u64)page_counter_read(&memcg->memsw)),
1177		K((u64)memcg->memsw.limit), memcg->memsw.failcnt);
1178	pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
1179		K((u64)page_counter_read(&memcg->kmem)),
1180		K((u64)memcg->kmem.limit), memcg->kmem.failcnt);
1181
1182	for_each_mem_cgroup_tree(iter, memcg) {
1183		pr_info("Memory cgroup stats for ");
1184		pr_cont_cgroup_path(iter->css.cgroup);
1185		pr_cont(":");
1186
1187		for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
1188			if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
1189				continue;
1190			pr_cont(" %s:%luKB", mem_cgroup_stat_names[i],
1191				K(mem_cgroup_read_stat(iter, i)));
1192		}
1193
1194		for (i = 0; i < NR_LRU_LISTS; i++)
1195			pr_cont(" %s:%luKB", mem_cgroup_lru_names[i],
1196				K(mem_cgroup_nr_lru_pages(iter, BIT(i))));
1197
1198		pr_cont("\n");
1199	}
1200}
1201
1202/*
1203 * This function returns the number of memcg under hierarchy tree. Returns
1204 * 1(self count) if no children.
1205 */
1206static int mem_cgroup_count_children(struct mem_cgroup *memcg)
1207{
1208	int num = 0;
1209	struct mem_cgroup *iter;
1210
1211	for_each_mem_cgroup_tree(iter, memcg)
1212		num++;
1213	return num;
1214}
1215
1216/*
1217 * Return the memory (and swap, if configured) limit for a memcg.
1218 */
1219static unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg)
1220{
1221	unsigned long limit;
 
 
 
 
 
1222
1223	limit = memcg->memory.limit;
1224	if (mem_cgroup_swappiness(memcg)) {
1225		unsigned long memsw_limit;
1226		unsigned long swap_limit;
1227
1228		memsw_limit = memcg->memsw.limit;
1229		swap_limit = memcg->swap.limit;
1230		swap_limit = min(swap_limit, (unsigned long)total_swap_pages);
1231		limit = min(limit + swap_limit, memsw_limit);
1232	}
1233	return limit;
 
 
 
 
 
1234}
1235
1236static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
1237				     int order)
1238{
1239	struct oom_control oc = {
1240		.zonelist = NULL,
1241		.nodemask = NULL,
 
1242		.gfp_mask = gfp_mask,
1243		.order = order,
1244	};
1245	struct mem_cgroup *iter;
1246	unsigned long chosen_points = 0;
1247	unsigned long totalpages;
1248	unsigned int points = 0;
1249	struct task_struct *chosen = NULL;
1250
1251	mutex_lock(&oom_lock);
1252
1253	/*
1254	 * If current has a pending SIGKILL or is exiting, then automatically
1255	 * select it.  The goal is to allow it to allocate so that it may
1256	 * quickly exit and free its memory.
1257	 */
1258	if (fatal_signal_pending(current) || task_will_free_mem(current)) {
1259		mark_oom_victim(current);
1260		goto unlock;
1261	}
1262
1263	check_panic_on_oom(&oc, CONSTRAINT_MEMCG, memcg);
1264	totalpages = mem_cgroup_get_limit(memcg) ? : 1;
1265	for_each_mem_cgroup_tree(iter, memcg) {
1266		struct css_task_iter it;
1267		struct task_struct *task;
1268
1269		css_task_iter_start(&iter->css, &it);
1270		while ((task = css_task_iter_next(&it))) {
1271			switch (oom_scan_process_thread(&oc, task, totalpages)) {
1272			case OOM_SCAN_SELECT:
1273				if (chosen)
1274					put_task_struct(chosen);
1275				chosen = task;
1276				chosen_points = ULONG_MAX;
1277				get_task_struct(chosen);
1278				/* fall through */
1279			case OOM_SCAN_CONTINUE:
1280				continue;
1281			case OOM_SCAN_ABORT:
1282				css_task_iter_end(&it);
1283				mem_cgroup_iter_break(memcg, iter);
1284				if (chosen)
1285					put_task_struct(chosen);
1286				goto unlock;
1287			case OOM_SCAN_OK:
1288				break;
1289			};
1290			points = oom_badness(task, memcg, NULL, totalpages);
1291			if (!points || points < chosen_points)
1292				continue;
1293			/* Prefer thread group leaders for display purposes */
1294			if (points == chosen_points &&
1295			    thread_group_leader(chosen))
1296				continue;
1297
1298			if (chosen)
1299				put_task_struct(chosen);
1300			chosen = task;
1301			chosen_points = points;
1302			get_task_struct(chosen);
1303		}
1304		css_task_iter_end(&it);
1305	}
1306
1307	if (chosen) {
1308		points = chosen_points * 1000 / totalpages;
1309		oom_kill_process(&oc, chosen, points, totalpages, memcg,
1310				 "Memory cgroup out of memory");
1311	}
1312unlock:
1313	mutex_unlock(&oom_lock);
1314	return chosen;
1315}
1316
1317#if MAX_NUMNODES > 1
1318
1319/**
1320 * test_mem_cgroup_node_reclaimable
1321 * @memcg: the target memcg
1322 * @nid: the node ID to be checked.
1323 * @noswap : specify true here if the user wants flle only information.
1324 *
1325 * This function returns whether the specified memcg contains any
1326 * reclaimable pages on a node. Returns true if there are any reclaimable
1327 * pages in the node.
1328 */
1329static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg,
1330		int nid, bool noswap)
1331{
1332	if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_FILE))
1333		return true;
1334	if (noswap || !total_swap_pages)
1335		return false;
1336	if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_ANON))
1337		return true;
1338	return false;
1339
1340}
 
1341
1342/*
1343 * Always updating the nodemask is not very good - even if we have an empty
1344 * list or the wrong list here, we can start from some node and traverse all
1345 * nodes based on the zonelist. So update the list loosely once per 10 secs.
1346 *
1347 */
1348static void mem_cgroup_may_update_nodemask(struct mem_cgroup *memcg)
1349{
1350	int nid;
1351	/*
1352	 * numainfo_events > 0 means there was at least NUMAINFO_EVENTS_TARGET
1353	 * pagein/pageout changes since the last update.
1354	 */
1355	if (!atomic_read(&memcg->numainfo_events))
1356		return;
1357	if (atomic_inc_return(&memcg->numainfo_updating) > 1)
1358		return;
1359
1360	/* make a nodemask where this memcg uses memory from */
1361	memcg->scan_nodes = node_states[N_MEMORY];
1362
1363	for_each_node_mask(nid, node_states[N_MEMORY]) {
1364
1365		if (!test_mem_cgroup_node_reclaimable(memcg, nid, false))
1366			node_clear(nid, memcg->scan_nodes);
1367	}
1368
1369	atomic_set(&memcg->numainfo_events, 0);
1370	atomic_set(&memcg->numainfo_updating, 0);
1371}
1372
1373/*
1374 * Selecting a node where we start reclaim from. Because what we need is just
1375 * reducing usage counter, start from anywhere is O,K. Considering
1376 * memory reclaim from current node, there are pros. and cons.
1377 *
1378 * Freeing memory from current node means freeing memory from a node which
1379 * we'll use or we've used. So, it may make LRU bad. And if several threads
1380 * hit limits, it will see a contention on a node. But freeing from remote
1381 * node means more costs for memory reclaim because of memory latency.
1382 *
1383 * Now, we use round-robin. Better algorithm is welcomed.
1384 */
1385int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
1386{
1387	int node;
1388
1389	mem_cgroup_may_update_nodemask(memcg);
1390	node = memcg->last_scanned_node;
1391
1392	node = next_node(node, memcg->scan_nodes);
1393	if (node == MAX_NUMNODES)
1394		node = first_node(memcg->scan_nodes);
1395	/*
1396	 * We call this when we hit limit, not when pages are added to LRU.
1397	 * No LRU may hold pages because all pages are UNEVICTABLE or
1398	 * memcg is too small and all pages are not on LRU. In that case,
1399	 * we use curret node.
1400	 */
1401	if (unlikely(node == MAX_NUMNODES))
1402		node = numa_node_id();
1403
1404	memcg->last_scanned_node = node;
1405	return node;
1406}
1407#else
1408int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
1409{
1410	return 0;
1411}
1412#endif
1413
1414static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
1415				   struct zone *zone,
1416				   gfp_t gfp_mask,
1417				   unsigned long *total_scanned)
1418{
1419	struct mem_cgroup *victim = NULL;
1420	int total = 0;
1421	int loop = 0;
1422	unsigned long excess;
1423	unsigned long nr_scanned;
1424	struct mem_cgroup_reclaim_cookie reclaim = {
1425		.zone = zone,
1426		.priority = 0,
1427	};
1428
1429	excess = soft_limit_excess(root_memcg);
1430
1431	while (1) {
1432		victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
1433		if (!victim) {
1434			loop++;
1435			if (loop >= 2) {
1436				/*
1437				 * If we have not been able to reclaim
1438				 * anything, it might because there are
1439				 * no reclaimable pages under this hierarchy
1440				 */
1441				if (!total)
1442					break;
1443				/*
1444				 * We want to do more targeted reclaim.
1445				 * excess >> 2 is not to excessive so as to
1446				 * reclaim too much, nor too less that we keep
1447				 * coming back to reclaim from this cgroup
1448				 */
1449				if (total >= (excess >> 2) ||
1450					(loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
1451					break;
1452			}
1453			continue;
1454		}
1455		total += mem_cgroup_shrink_node_zone(victim, gfp_mask, false,
1456						     zone, &nr_scanned);
1457		*total_scanned += nr_scanned;
1458		if (!soft_limit_excess(root_memcg))
1459			break;
1460	}
1461	mem_cgroup_iter_break(root_memcg, victim);
1462	return total;
1463}
1464
1465#ifdef CONFIG_LOCKDEP
1466static struct lockdep_map memcg_oom_lock_dep_map = {
1467	.name = "memcg_oom_lock",
1468};
1469#endif
1470
1471static DEFINE_SPINLOCK(memcg_oom_lock);
1472
1473/*
1474 * Check OOM-Killer is already running under our hierarchy.
1475 * If someone is running, return false.
1476 */
1477static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
1478{
1479	struct mem_cgroup *iter, *failed = NULL;
1480
1481	spin_lock(&memcg_oom_lock);
1482
1483	for_each_mem_cgroup_tree(iter, memcg) {
1484		if (iter->oom_lock) {
1485			/*
1486			 * this subtree of our hierarchy is already locked
1487			 * so we cannot give a lock.
1488			 */
1489			failed = iter;
1490			mem_cgroup_iter_break(memcg, iter);
1491			break;
1492		} else
1493			iter->oom_lock = true;
1494	}
1495
1496	if (failed) {
1497		/*
1498		 * OK, we failed to lock the whole subtree so we have
1499		 * to clean up what we set up to the failing subtree
1500		 */
1501		for_each_mem_cgroup_tree(iter, memcg) {
1502			if (iter == failed) {
1503				mem_cgroup_iter_break(memcg, iter);
1504				break;
1505			}
1506			iter->oom_lock = false;
1507		}
1508	} else
1509		mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_);
1510
1511	spin_unlock(&memcg_oom_lock);
1512
1513	return !failed;
1514}
1515
1516static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
1517{
1518	struct mem_cgroup *iter;
1519
1520	spin_lock(&memcg_oom_lock);
1521	mutex_release(&memcg_oom_lock_dep_map, 1, _RET_IP_);
1522	for_each_mem_cgroup_tree(iter, memcg)
1523		iter->oom_lock = false;
1524	spin_unlock(&memcg_oom_lock);
1525}
1526
1527static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
1528{
1529	struct mem_cgroup *iter;
1530
1531	spin_lock(&memcg_oom_lock);
1532	for_each_mem_cgroup_tree(iter, memcg)
1533		iter->under_oom++;
1534	spin_unlock(&memcg_oom_lock);
1535}
1536
1537static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
1538{
1539	struct mem_cgroup *iter;
1540
1541	/*
1542	 * When a new child is created while the hierarchy is under oom,
1543	 * mem_cgroup_oom_lock() may not be called. Watch for underflow.
1544	 */
1545	spin_lock(&memcg_oom_lock);
1546	for_each_mem_cgroup_tree(iter, memcg)
1547		if (iter->under_oom > 0)
1548			iter->under_oom--;
1549	spin_unlock(&memcg_oom_lock);
1550}
1551
1552static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
1553
1554struct oom_wait_info {
1555	struct mem_cgroup *memcg;
1556	wait_queue_t	wait;
1557};
1558
1559static int memcg_oom_wake_function(wait_queue_t *wait,
1560	unsigned mode, int sync, void *arg)
1561{
1562	struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
1563	struct mem_cgroup *oom_wait_memcg;
1564	struct oom_wait_info *oom_wait_info;
1565
1566	oom_wait_info = container_of(wait, struct oom_wait_info, wait);
1567	oom_wait_memcg = oom_wait_info->memcg;
1568
1569	if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) &&
1570	    !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg))
1571		return 0;
1572	return autoremove_wake_function(wait, mode, sync, arg);
1573}
1574
1575static void memcg_oom_recover(struct mem_cgroup *memcg)
1576{
1577	/*
1578	 * For the following lockless ->under_oom test, the only required
1579	 * guarantee is that it must see the state asserted by an OOM when
1580	 * this function is called as a result of userland actions
1581	 * triggered by the notification of the OOM.  This is trivially
1582	 * achieved by invoking mem_cgroup_mark_under_oom() before
1583	 * triggering notification.
1584	 */
1585	if (memcg && memcg->under_oom)
1586		__wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
1587}
1588
1589static void mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
 
 
 
 
1590{
1591	if (!current->memcg_may_oom)
1592		return;
 
 
 
 
 
1593	/*
1594	 * We are in the middle of the charge context here, so we
1595	 * don't want to block when potentially sitting on a callstack
1596	 * that holds all kinds of filesystem and mm locks.
1597	 *
1598	 * Also, the caller may handle a failed allocation gracefully
1599	 * (like optional page cache readahead) and so an OOM killer
1600	 * invocation might not even be necessary.
 
 
 
 
 
 
1601	 *
1602	 * That's why we don't do anything here except remember the
1603	 * OOM context and then deal with it at the end of the page
1604	 * fault when the stack is unwound, the locks are released,
1605	 * and when we know whether the fault was overall successful.
1606	 */
1607	css_get(&memcg->css);
1608	current->memcg_in_oom = memcg;
1609	current->memcg_oom_gfp_mask = mask;
1610	current->memcg_oom_order = order;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1611}
1612
1613/**
1614 * mem_cgroup_oom_synchronize - complete memcg OOM handling
1615 * @handle: actually kill/wait or just clean up the OOM state
1616 *
1617 * This has to be called at the end of a page fault if the memcg OOM
1618 * handler was enabled.
1619 *
1620 * Memcg supports userspace OOM handling where failed allocations must
1621 * sleep on a waitqueue until the userspace task resolves the
1622 * situation.  Sleeping directly in the charge context with all kinds
1623 * of locks held is not a good idea, instead we remember an OOM state
1624 * in the task and mem_cgroup_oom_synchronize() has to be called at
1625 * the end of the page fault to complete the OOM handling.
1626 *
1627 * Returns %true if an ongoing memcg OOM situation was detected and
1628 * completed, %false otherwise.
1629 */
1630bool mem_cgroup_oom_synchronize(bool handle)
1631{
1632	struct mem_cgroup *memcg = current->memcg_in_oom;
1633	struct oom_wait_info owait;
1634	bool locked;
1635
1636	/* OOM is global, do not handle */
1637	if (!memcg)
1638		return false;
1639
1640	if (!handle || oom_killer_disabled)
1641		goto cleanup;
1642
1643	owait.memcg = memcg;
1644	owait.wait.flags = 0;
1645	owait.wait.func = memcg_oom_wake_function;
1646	owait.wait.private = current;
1647	INIT_LIST_HEAD(&owait.wait.task_list);
1648
1649	prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
1650	mem_cgroup_mark_under_oom(memcg);
1651
1652	locked = mem_cgroup_oom_trylock(memcg);
1653
1654	if (locked)
1655		mem_cgroup_oom_notify(memcg);
1656
1657	if (locked && !memcg->oom_kill_disable) {
1658		mem_cgroup_unmark_under_oom(memcg);
1659		finish_wait(&memcg_oom_waitq, &owait.wait);
1660		mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask,
1661					 current->memcg_oom_order);
1662	} else {
1663		schedule();
1664		mem_cgroup_unmark_under_oom(memcg);
1665		finish_wait(&memcg_oom_waitq, &owait.wait);
1666	}
1667
1668	if (locked) {
1669		mem_cgroup_oom_unlock(memcg);
1670		/*
1671		 * There is no guarantee that an OOM-lock contender
1672		 * sees the wakeups triggered by the OOM kill
1673		 * uncharges.  Wake any sleepers explicitely.
1674		 */
1675		memcg_oom_recover(memcg);
1676	}
1677cleanup:
1678	current->memcg_in_oom = NULL;
1679	css_put(&memcg->css);
1680	return true;
1681}
1682
1683/**
1684 * lock_page_memcg - lock a page->mem_cgroup binding
1685 * @page: the page
 
 
 
 
1686 *
1687 * This function protects unlocked LRU pages from being moved to
1688 * another cgroup and stabilizes their page->mem_cgroup binding.
1689 */
1690void lock_page_memcg(struct page *page)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1691{
1692	struct mem_cgroup *memcg;
1693	unsigned long flags;
1694
1695	/*
1696	 * The RCU lock is held throughout the transaction.  The fast
1697	 * path can get away without acquiring the memcg->move_lock
1698	 * because page moving starts with an RCU grace period.
1699	 */
1700	rcu_read_lock();
1701
1702	if (mem_cgroup_disabled())
1703		return;
1704again:
1705	memcg = page->mem_cgroup;
1706	if (unlikely(!memcg))
1707		return;
1708
 
 
 
 
 
 
1709	if (atomic_read(&memcg->moving_account) <= 0)
1710		return;
1711
1712	spin_lock_irqsave(&memcg->move_lock, flags);
1713	if (memcg != page->mem_cgroup) {
1714		spin_unlock_irqrestore(&memcg->move_lock, flags);
1715		goto again;
1716	}
1717
1718	/*
1719	 * When charge migration first begins, we can have locked and
1720	 * unlocked page stat updates happening concurrently.  Track
1721	 * the task who has the lock for unlock_page_memcg().
 
1722	 */
1723	memcg->move_lock_task = current;
1724	memcg->move_lock_flags = flags;
1725
1726	return;
1727}
1728EXPORT_SYMBOL(lock_page_memcg);
1729
1730/**
1731 * unlock_page_memcg - unlock a page->mem_cgroup binding
1732 * @page: the page
1733 */
1734void unlock_page_memcg(struct page *page)
1735{
1736	struct mem_cgroup *memcg = page->mem_cgroup;
 
1737
 
 
1738	if (memcg && memcg->move_lock_task == current) {
1739		unsigned long flags = memcg->move_lock_flags;
1740
1741		memcg->move_lock_task = NULL;
1742		memcg->move_lock_flags = 0;
1743
1744		spin_unlock_irqrestore(&memcg->move_lock, flags);
1745	}
1746
1747	rcu_read_unlock();
1748}
1749EXPORT_SYMBOL(unlock_page_memcg);
1750
1751/*
1752 * size of first charge trial. "32" comes from vmscan.c's magic value.
1753 * TODO: maybe necessary to use big numbers in big irons.
 
 
 
 
1754 */
1755#define CHARGE_BATCH	32U
 
 
 
 
 
 
 
 
 
1756struct memcg_stock_pcp {
 
1757	struct mem_cgroup *cached; /* this never be root cgroup */
1758	unsigned int nr_pages;
 
 
 
 
 
 
 
 
 
1759	struct work_struct work;
1760	unsigned long flags;
1761#define FLUSHING_CACHED_CHARGE	0
1762};
1763static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
 
 
1764static DEFINE_MUTEX(percpu_charge_mutex);
1765
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1766/**
1767 * consume_stock: Try to consume stocked charge on this cpu.
1768 * @memcg: memcg to consume from.
1769 * @nr_pages: how many pages to charge.
1770 *
1771 * The charges will only happen if @memcg matches the current cpu's memcg
1772 * stock, and at least @nr_pages are available in that stock.  Failure to
1773 * service an allocation will refill the stock.
1774 *
1775 * returns true if successful, false otherwise.
1776 */
1777static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
1778{
1779	struct memcg_stock_pcp *stock;
 
1780	bool ret = false;
1781
1782	if (nr_pages > CHARGE_BATCH)
1783		return ret;
1784
1785	stock = &get_cpu_var(memcg_stock);
 
 
1786	if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
1787		stock->nr_pages -= nr_pages;
1788		ret = true;
1789	}
1790	put_cpu_var(memcg_stock);
 
 
1791	return ret;
1792}
1793
1794/*
1795 * Returns stocks cached in percpu and reset cached information.
1796 */
1797static void drain_stock(struct memcg_stock_pcp *stock)
1798{
1799	struct mem_cgroup *old = stock->cached;
1800
 
 
 
1801	if (stock->nr_pages) {
1802		page_counter_uncharge(&old->memory, stock->nr_pages);
1803		if (do_memsw_account())
1804			page_counter_uncharge(&old->memsw, stock->nr_pages);
1805		css_put_many(&old->css, stock->nr_pages);
1806		stock->nr_pages = 0;
1807	}
 
 
1808	stock->cached = NULL;
1809}
1810
1811/*
1812 * This must be called under preempt disabled or must be called by
1813 * a thread which is pinned to local cpu.
1814 */
1815static void drain_local_stock(struct work_struct *dummy)
1816{
1817	struct memcg_stock_pcp *stock = this_cpu_ptr(&memcg_stock);
 
 
 
 
 
 
 
 
 
 
 
 
1818	drain_stock(stock);
1819	clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
 
 
 
 
1820}
1821
1822/*
1823 * Cache charges(val) to local per_cpu area.
1824 * This will be consumed by consume_stock() function, later.
1825 */
1826static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
1827{
1828	struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock);
1829
 
1830	if (stock->cached != memcg) { /* reset if necessary */
1831		drain_stock(stock);
 
1832		stock->cached = memcg;
1833	}
1834	stock->nr_pages += nr_pages;
1835	put_cpu_var(memcg_stock);
 
 
 
 
 
 
 
 
 
 
 
1836}
1837
1838/*
1839 * Drains all per-CPU charge caches for given root_memcg resp. subtree
1840 * of the hierarchy under it.
1841 */
1842static void drain_all_stock(struct mem_cgroup *root_memcg)
1843{
1844	int cpu, curcpu;
1845
1846	/* If someone's already draining, avoid adding running more workers. */
1847	if (!mutex_trylock(&percpu_charge_mutex))
1848		return;
1849	/* Notify other cpus that system-wide "drain" is running */
1850	get_online_cpus();
1851	curcpu = get_cpu();
 
 
 
 
 
1852	for_each_online_cpu(cpu) {
1853		struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
1854		struct mem_cgroup *memcg;
 
1855
 
1856		memcg = stock->cached;
1857		if (!memcg || !stock->nr_pages)
1858			continue;
1859		if (!mem_cgroup_is_descendant(memcg, root_memcg))
1860			continue;
1861		if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
 
 
 
 
1862			if (cpu == curcpu)
1863				drain_local_stock(&stock->work);
1864			else
1865				schedule_work_on(cpu, &stock->work);
1866		}
1867	}
1868	put_cpu();
1869	put_online_cpus();
1870	mutex_unlock(&percpu_charge_mutex);
1871}
1872
1873static int memcg_cpu_hotplug_callback(struct notifier_block *nb,
1874					unsigned long action,
1875					void *hcpu)
1876{
1877	int cpu = (unsigned long)hcpu;
1878	struct memcg_stock_pcp *stock;
1879
1880	if (action == CPU_ONLINE)
1881		return NOTIFY_OK;
1882
1883	if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
1884		return NOTIFY_OK;
1885
1886	stock = &per_cpu(memcg_stock, cpu);
1887	drain_stock(stock);
1888	return NOTIFY_OK;
 
1889}
1890
1891static void reclaim_high(struct mem_cgroup *memcg,
1892			 unsigned int nr_pages,
1893			 gfp_t gfp_mask)
1894{
 
 
1895	do {
1896		if (page_counter_read(&memcg->memory) <= memcg->high)
 
 
 
1897			continue;
1898		mem_cgroup_events(memcg, MEMCG_HIGH, 1);
1899		try_to_free_mem_cgroup_pages(memcg, nr_pages, gfp_mask, true);
1900	} while ((memcg = parent_mem_cgroup(memcg)));
 
 
 
 
 
 
 
 
 
1901}
1902
1903static void high_work_func(struct work_struct *work)
1904{
1905	struct mem_cgroup *memcg;
1906
1907	memcg = container_of(work, struct mem_cgroup, high_work);
1908	reclaim_high(memcg, CHARGE_BATCH, GFP_KERNEL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1909}
1910
1911/*
1912 * Scheduled by try_charge() to be executed from the userland return path
1913 * and reclaims memory over the high limit.
1914 */
1915void mem_cgroup_handle_over_high(void)
1916{
 
 
 
1917	unsigned int nr_pages = current->memcg_nr_pages_over_high;
 
1918	struct mem_cgroup *memcg;
 
1919
1920	if (likely(!nr_pages))
1921		return;
1922
1923	memcg = get_mem_cgroup_from_mm(current->mm);
1924	reclaim_high(memcg, nr_pages, GFP_KERNEL);
1925	css_put(&memcg->css);
1926	current->memcg_nr_pages_over_high = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1927}
1928
1929static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
1930		      unsigned int nr_pages)
1931{
1932	unsigned int batch = max(CHARGE_BATCH, nr_pages);
1933	int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
1934	struct mem_cgroup *mem_over_limit;
1935	struct page_counter *counter;
1936	unsigned long nr_reclaimed;
1937	bool may_swap = true;
 
1938	bool drained = false;
 
 
1939
1940	if (mem_cgroup_is_root(memcg))
1941		return 0;
1942retry:
1943	if (consume_stock(memcg, nr_pages))
1944		return 0;
1945
1946	if (!do_memsw_account() ||
1947	    page_counter_try_charge(&memcg->memsw, batch, &counter)) {
1948		if (page_counter_try_charge(&memcg->memory, batch, &counter))
1949			goto done_restock;
1950		if (do_memsw_account())
1951			page_counter_uncharge(&memcg->memsw, batch);
1952		mem_over_limit = mem_cgroup_from_counter(counter, memory);
1953	} else {
1954		mem_over_limit = mem_cgroup_from_counter(counter, memsw);
1955		may_swap = false;
1956	}
1957
1958	if (batch > nr_pages) {
1959		batch = nr_pages;
1960		goto retry;
1961	}
1962
1963	/*
1964	 * Unlike in global OOM situations, memcg is not in a physical
1965	 * memory shortage.  Allow dying and OOM-killed tasks to
1966	 * bypass the last charges so that they can exit quickly and
1967	 * free their memory.
1968	 */
1969	if (unlikely(test_thread_flag(TIF_MEMDIE) ||
1970		     fatal_signal_pending(current) ||
1971		     current->flags & PF_EXITING))
1972		goto force;
1973
1974	if (unlikely(task_in_memcg_oom(current)))
1975		goto nomem;
1976
1977	if (!gfpflags_allow_blocking(gfp_mask))
1978		goto nomem;
1979
1980	mem_cgroup_events(mem_over_limit, MEMCG_MAX, 1);
 
1981
 
1982	nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
1983						    gfp_mask, may_swap);
 
1984
1985	if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
1986		goto retry;
1987
1988	if (!drained) {
1989		drain_all_stock(mem_over_limit);
1990		drained = true;
1991		goto retry;
1992	}
1993
1994	if (gfp_mask & __GFP_NORETRY)
1995		goto nomem;
1996	/*
1997	 * Even though the limit is exceeded at this point, reclaim
1998	 * may have been able to free some pages.  Retry the charge
1999	 * before killing the task.
2000	 *
2001	 * Only for regular pages, though: huge pages are rather
2002	 * unlikely to succeed so close to the limit, and we fall back
2003	 * to regular pages anyway in case of failure.
2004	 */
2005	if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))
2006		goto retry;
2007	/*
2008	 * At task move, charge accounts can be doubly counted. So, it's
2009	 * better to wait until the end of task_move if something is going on.
2010	 */
2011	if (mem_cgroup_wait_acct_move(mem_over_limit))
2012		goto retry;
2013
2014	if (nr_retries--)
2015		goto retry;
2016
2017	if (gfp_mask & __GFP_NOFAIL)
2018		goto force;
2019
2020	if (fatal_signal_pending(current))
2021		goto force;
2022
2023	mem_cgroup_events(mem_over_limit, MEMCG_OOM, 1);
 
 
2024
2025	mem_cgroup_oom(mem_over_limit, gfp_mask,
2026		       get_order(nr_pages * PAGE_SIZE));
 
 
 
 
 
 
 
 
 
2027nomem:
2028	if (!(gfp_mask & __GFP_NOFAIL))
 
 
 
 
 
 
2029		return -ENOMEM;
2030force:
2031	/*
 
 
 
 
 
 
 
2032	 * The allocation either can't fail or will lead to more memory
2033	 * being freed very soon.  Allow memory usage go over the limit
2034	 * temporarily by force charging it.
2035	 */
2036	page_counter_charge(&memcg->memory, nr_pages);
2037	if (do_memsw_account())
2038		page_counter_charge(&memcg->memsw, nr_pages);
2039	css_get_many(&memcg->css, nr_pages);
2040
2041	return 0;
2042
2043done_restock:
2044	css_get_many(&memcg->css, batch);
2045	if (batch > nr_pages)
2046		refill_stock(memcg, batch - nr_pages);
2047
2048	/*
2049	 * If the hierarchy is above the normal consumption range, schedule
2050	 * reclaim on returning to userland.  We can perform reclaim here
2051	 * if __GFP_RECLAIM but let's always punt for simplicity and so that
2052	 * GFP_KERNEL can consistently be used during reclaim.  @memcg is
2053	 * not recorded as it most likely matches current's and won't
2054	 * change in the meantime.  As high limit is checked again before
2055	 * reclaim, the cost of mismatch is negligible.
2056	 */
2057	do {
2058		if (page_counter_read(&memcg->memory) > memcg->high) {
2059			/* Don't bother a random interrupted task */
2060			if (in_interrupt()) {
 
 
 
 
 
 
 
2061				schedule_work(&memcg->high_work);
2062				break;
2063			}
 
 
 
 
 
 
 
 
 
 
 
 
 
2064			current->memcg_nr_pages_over_high += batch;
2065			set_notify_resume(current);
2066			break;
2067		}
2068	} while ((memcg = parent_mem_cgroup(memcg)));
2069
 
 
 
 
 
2070	return 0;
2071}
2072
2073static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
 
 
 
 
 
 
 
 
 
2074{
2075	if (mem_cgroup_is_root(memcg))
2076		return;
2077
2078	page_counter_uncharge(&memcg->memory, nr_pages);
2079	if (do_memsw_account())
2080		page_counter_uncharge(&memcg->memsw, nr_pages);
2081
2082	css_put_many(&memcg->css, nr_pages);
2083}
2084
2085static void lock_page_lru(struct page *page, int *isolated)
2086{
2087	struct zone *zone = page_zone(page);
 
 
 
 
 
 
 
 
 
 
 
2088
2089	spin_lock_irq(&zone->lru_lock);
2090	if (PageLRU(page)) {
2091		struct lruvec *lruvec;
 
 
 
 
2092
2093		lruvec = mem_cgroup_page_lruvec(page, zone);
2094		ClearPageLRU(page);
2095		del_page_from_lru_list(page, lruvec, page_lru(page));
2096		*isolated = 1;
2097	} else
2098		*isolated = 0;
 
 
 
 
 
 
 
 
 
 
2099}
2100
2101static void unlock_page_lru(struct page *page, int isolated)
 
2102{
2103	struct zone *zone = page_zone(page);
2104
2105	if (isolated) {
2106		struct lruvec *lruvec;
 
 
 
 
 
2107
2108		lruvec = mem_cgroup_page_lruvec(page, zone);
2109		VM_BUG_ON_PAGE(PageLRU(page), page);
2110		SetPageLRU(page);
2111		add_page_to_lru_list(page, lruvec, page_lru(page));
 
 
 
 
 
 
 
 
 
 
 
 
2112	}
2113	spin_unlock_irq(&zone->lru_lock);
 
 
2114}
2115
2116static void commit_charge(struct page *page, struct mem_cgroup *memcg,
2117			  bool lrucare)
2118{
2119	int isolated;
 
 
 
 
 
 
 
 
 
 
 
 
 
2120
2121	VM_BUG_ON_PAGE(page->mem_cgroup, page);
 
 
2122
2123	/*
2124	 * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page
2125	 * may already be on some other mem_cgroup's LRU.  Take care of it.
2126	 */
2127	if (lrucare)
2128		lock_page_lru(page, &isolated);
2129
2130	/*
2131	 * Nobody should be changing or seriously looking at
2132	 * page->mem_cgroup at this point:
2133	 *
2134	 * - the page is uncharged
2135	 *
2136	 * - the page is off-LRU
2137	 *
2138	 * - an anonymous fault has exclusive page access, except for
2139	 *   a locked page table
2140	 *
2141	 * - a page cache insertion, a swapin fault, or a migration
2142	 *   have the page locked
2143	 */
2144	page->mem_cgroup = memcg;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2145
2146	if (lrucare)
2147		unlock_page_lru(page, isolated);
2148}
2149
2150#ifndef CONFIG_SLOB
2151static int memcg_alloc_cache_id(void)
 
 
 
 
 
 
 
 
 
2152{
2153	int id, size;
2154	int err;
2155
2156	id = ida_simple_get(&memcg_cache_ida,
2157			    0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL);
2158	if (id < 0)
2159		return id;
2160
2161	if (id < memcg_nr_cache_ids)
2162		return id;
 
2163
2164	/*
2165	 * There's no space for the new id in memcg_caches arrays,
2166	 * so we have to grow them.
2167	 */
2168	down_write(&memcg_cache_ids_sem);
 
 
 
 
 
 
 
 
2169
2170	size = 2 * (id + 1);
2171	if (size < MEMCG_CACHES_MIN_SIZE)
2172		size = MEMCG_CACHES_MIN_SIZE;
2173	else if (size > MEMCG_CACHES_MAX_SIZE)
2174		size = MEMCG_CACHES_MAX_SIZE;
2175
2176	err = memcg_update_all_caches(size);
2177	if (!err)
2178		err = memcg_update_all_list_lrus(size);
2179	if (!err)
2180		memcg_nr_cache_ids = size;
 
 
 
 
2181
2182	up_write(&memcg_cache_ids_sem);
 
 
2183
2184	if (err) {
2185		ida_simple_remove(&memcg_cache_ida, id);
2186		return err;
 
 
 
 
 
 
 
 
 
 
 
 
 
2187	}
2188	return id;
2189}
2190
2191static void memcg_free_cache_id(int id)
2192{
2193	ida_simple_remove(&memcg_cache_ida, id);
 
 
 
 
 
 
2194}
2195
2196struct memcg_kmem_cache_create_work {
2197	struct mem_cgroup *memcg;
2198	struct kmem_cache *cachep;
2199	struct work_struct work;
2200};
2201
2202static void memcg_kmem_cache_create_func(struct work_struct *w)
 
 
 
 
 
 
2203{
2204	struct memcg_kmem_cache_create_work *cw =
2205		container_of(w, struct memcg_kmem_cache_create_work, work);
2206	struct mem_cgroup *memcg = cw->memcg;
2207	struct kmem_cache *cachep = cw->cachep;
2208
2209	memcg_create_kmem_cache(memcg, cachep);
 
2210
2211	css_put(&memcg->css);
2212	kfree(cw);
2213}
2214
2215/*
2216 * Enqueue the creation of a per-memcg kmem_cache.
 
 
 
 
 
2217 */
2218static void __memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
2219					       struct kmem_cache *cachep)
2220{
2221	struct memcg_kmem_cache_create_work *cw;
 
2222
2223	cw = kmalloc(sizeof(*cw), GFP_NOWAIT);
2224	if (!cw)
2225		return;
2226
2227	css_get(&memcg->css);
 
 
2228
2229	cw->memcg = memcg;
2230	cw->cachep = cachep;
2231	INIT_WORK(&cw->work, memcg_kmem_cache_create_func);
2232
2233	schedule_work(&cw->work);
2234}
2235
2236static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
2237					     struct kmem_cache *cachep)
 
 
 
 
 
 
 
2238{
2239	/*
2240	 * We need to stop accounting when we kmalloc, because if the
2241	 * corresponding kmalloc cache is not yet created, the first allocation
2242	 * in __memcg_schedule_kmem_cache_create will recurse.
2243	 *
2244	 * However, it is better to enclose the whole function. Depending on
2245	 * the debugging options enabled, INIT_WORK(), for instance, can
2246	 * trigger an allocation. This too, will make us recurse. Because at
2247	 * this point we can't allow ourselves back into memcg_kmem_get_cache,
2248	 * the safest choice is to do it like this, wrapping the whole function.
2249	 */
2250	current->memcg_kmem_skip_account = 1;
2251	__memcg_schedule_kmem_cache_create(memcg, cachep);
2252	current->memcg_kmem_skip_account = 0;
2253}
2254
2255/*
2256 * Return the kmem_cache we're supposed to use for a slab allocation.
2257 * We try to use the current memcg's version of the cache.
2258 *
2259 * If the cache does not exist yet, if we are the first user of it,
2260 * we either create it immediately, if possible, or create it asynchronously
2261 * in a workqueue.
2262 * In the latter case, we will let the current allocation go through with
2263 * the original cache.
2264 *
2265 * Can't be called in interrupt context or from kernel threads.
2266 * This function needs to be called with rcu_read_lock() held.
2267 */
2268struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
2269{
2270	struct mem_cgroup *memcg;
2271	struct kmem_cache *memcg_cachep;
2272	int kmemcg_id;
2273
2274	VM_BUG_ON(!is_root_cache(cachep));
 
2275
2276	if (cachep->flags & SLAB_ACCOUNT)
2277		gfp |= __GFP_ACCOUNT;
 
 
 
2278
2279	if (!(gfp & __GFP_ACCOUNT))
2280		return cachep;
 
 
 
 
 
2281
2282	if (current->memcg_kmem_skip_account)
2283		return cachep;
2284
2285	memcg = get_mem_cgroup_from_mm(current->mm);
2286	kmemcg_id = READ_ONCE(memcg->kmemcg_id);
2287	if (kmemcg_id < 0)
2288		goto out;
 
 
 
 
 
 
 
 
 
 
 
2289
2290	memcg_cachep = cache_from_memcg_idx(cachep, kmemcg_id);
2291	if (likely(memcg_cachep))
2292		return memcg_cachep;
 
 
 
 
 
 
 
 
 
2293
 
 
2294	/*
2295	 * If we are in a safe context (can wait, and not in interrupt
2296	 * context), we could be be predictable and return right away.
2297	 * This would guarantee that the allocation being performed
2298	 * already belongs in the new cache.
2299	 *
2300	 * However, there are some clashes that can arrive from locking.
2301	 * For instance, because we acquire the slab_mutex while doing
2302	 * memcg_create_kmem_cache, this means no further allocation
2303	 * could happen with the slab_mutex held. So it's better to
2304	 * defer everything.
2305	 */
2306	memcg_schedule_kmem_cache_create(memcg, cachep);
2307out:
2308	css_put(&memcg->css);
2309	return cachep;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2310}
2311
2312void __memcg_kmem_put_cache(struct kmem_cache *cachep)
2313{
2314	if (!is_root_cache(cachep))
2315		css_put(&cachep->memcg_params.memcg->css);
 
 
 
 
 
 
 
 
 
 
 
 
 
2316}
2317
2318int __memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
2319			      struct mem_cgroup *memcg)
2320{
2321	unsigned int nr_pages = 1 << order;
2322	struct page_counter *counter;
2323	int ret;
2324
2325	ret = try_charge(memcg, gfp, nr_pages);
2326	if (ret)
2327		return ret;
2328
2329	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) &&
2330	    !page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) {
2331		cancel_charge(memcg, nr_pages);
2332		return -ENOMEM;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2333	}
2334
2335	page->mem_cgroup = memcg;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2336
2337	return 0;
 
 
 
 
 
2338}
2339
2340int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
 
2341{
2342	struct mem_cgroup *memcg;
2343	int ret = 0;
2344
2345	memcg = get_mem_cgroup_from_mm(current->mm);
2346	if (!mem_cgroup_is_root(memcg))
2347		ret = __memcg_kmem_charge_memcg(page, gfp, order, memcg);
2348	css_put(&memcg->css);
2349	return ret;
 
 
2350}
2351
2352void __memcg_kmem_uncharge(struct page *page, int order)
 
2353{
2354	struct mem_cgroup *memcg = page->mem_cgroup;
2355	unsigned int nr_pages = 1 << order;
 
 
2356
2357	if (!memcg)
2358		return;
2359
2360	VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page);
 
 
 
 
 
 
 
 
 
2361
2362	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
2363		page_counter_uncharge(&memcg->kmem, nr_pages);
 
 
2364
2365	page_counter_uncharge(&memcg->memory, nr_pages);
2366	if (do_memsw_account())
2367		page_counter_uncharge(&memcg->memsw, nr_pages);
2368
2369	page->mem_cgroup = NULL;
2370	css_put_many(&memcg->css, nr_pages);
2371}
2372#endif /* !CONFIG_SLOB */
2373
2374#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2375
2376/*
2377 * Because tail pages are not marked as "used", set it. We're under
2378 * zone->lru_lock and migration entries setup in all page mappings.
2379 */
2380void mem_cgroup_split_huge_fixup(struct page *head)
2381{
 
 
2382	int i;
2383
2384	if (mem_cgroup_disabled())
2385		return;
2386
2387	for (i = 1; i < HPAGE_PMD_NR; i++)
2388		head[i].mem_cgroup = head->mem_cgroup;
2389
2390	__this_cpu_sub(head->mem_cgroup->stat->count[MEM_CGROUP_STAT_RSS_HUGE],
2391		       HPAGE_PMD_NR);
2392}
2393#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
2394
2395#ifdef CONFIG_MEMCG_SWAP
2396static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg,
2397					 bool charge)
2398{
2399	int val = (charge) ? 1 : -1;
2400	this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_SWAP], val);
2401}
2402
 
2403/**
2404 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
2405 * @entry: swap entry to be moved
2406 * @from:  mem_cgroup which the entry is moved from
2407 * @to:  mem_cgroup which the entry is moved to
2408 *
2409 * It succeeds only when the swap_cgroup's record for this entry is the same
2410 * as the mem_cgroup's id of @from.
2411 *
2412 * Returns 0 on success, -EINVAL on failure.
2413 *
2414 * The caller must have charged to @to, IOW, called page_counter_charge() about
2415 * both res and memsw, and called css_get().
2416 */
2417static int mem_cgroup_move_swap_account(swp_entry_t entry,
2418				struct mem_cgroup *from, struct mem_cgroup *to)
2419{
2420	unsigned short old_id, new_id;
2421
2422	old_id = mem_cgroup_id(from);
2423	new_id = mem_cgroup_id(to);
2424
2425	if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
2426		mem_cgroup_swap_statistics(from, false);
2427		mem_cgroup_swap_statistics(to, true);
2428		return 0;
2429	}
2430	return -EINVAL;
2431}
2432#else
2433static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
2434				struct mem_cgroup *from, struct mem_cgroup *to)
2435{
2436	return -EINVAL;
2437}
2438#endif
2439
2440static DEFINE_MUTEX(memcg_limit_mutex);
2441
2442static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
2443				   unsigned long limit)
2444{
2445	unsigned long curusage;
2446	unsigned long oldusage;
2447	bool enlarge = false;
2448	int retry_count;
2449	int ret;
2450
2451	/*
2452	 * For keeping hierarchical_reclaim simple, how long we should retry
2453	 * is depends on callers. We set our retry-count to be function
2454	 * of # of children which we should visit in this loop.
2455	 */
2456	retry_count = MEM_CGROUP_RECLAIM_RETRIES *
2457		      mem_cgroup_count_children(memcg);
2458
2459	oldusage = page_counter_read(&memcg->memory);
2460
2461	do {
2462		if (signal_pending(current)) {
2463			ret = -EINTR;
2464			break;
2465		}
2466
2467		mutex_lock(&memcg_limit_mutex);
2468		if (limit > memcg->memsw.limit) {
2469			mutex_unlock(&memcg_limit_mutex);
 
 
 
 
 
 
2470			ret = -EINVAL;
2471			break;
2472		}
2473		if (limit > memcg->memory.limit)
2474			enlarge = true;
2475		ret = page_counter_limit(&memcg->memory, limit);
2476		mutex_unlock(&memcg_limit_mutex);
2477
2478		if (!ret)
2479			break;
2480
2481		try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, true);
2482
2483		curusage = page_counter_read(&memcg->memory);
2484		/* Usage is reduced ? */
2485		if (curusage >= oldusage)
2486			retry_count--;
2487		else
2488			oldusage = curusage;
2489	} while (retry_count);
2490
2491	if (!ret && enlarge)
2492		memcg_oom_recover(memcg);
2493
2494	return ret;
2495}
2496
2497static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
2498					 unsigned long limit)
2499{
2500	unsigned long curusage;
2501	unsigned long oldusage;
2502	bool enlarge = false;
2503	int retry_count;
2504	int ret;
2505
2506	/* see mem_cgroup_resize_res_limit */
2507	retry_count = MEM_CGROUP_RECLAIM_RETRIES *
2508		      mem_cgroup_count_children(memcg);
2509
2510	oldusage = page_counter_read(&memcg->memsw);
2511
2512	do {
2513		if (signal_pending(current)) {
2514			ret = -EINTR;
2515			break;
2516		}
2517
2518		mutex_lock(&memcg_limit_mutex);
2519		if (limit < memcg->memory.limit) {
2520			mutex_unlock(&memcg_limit_mutex);
2521			ret = -EINVAL;
2522			break;
2523		}
2524		if (limit > memcg->memsw.limit)
2525			enlarge = true;
2526		ret = page_counter_limit(&memcg->memsw, limit);
2527		mutex_unlock(&memcg_limit_mutex);
2528
2529		if (!ret)
2530			break;
2531
2532		try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, false);
2533
2534		curusage = page_counter_read(&memcg->memsw);
2535		/* Usage is reduced ? */
2536		if (curusage >= oldusage)
2537			retry_count--;
2538		else
2539			oldusage = curusage;
2540	} while (retry_count);
2541
2542	if (!ret && enlarge)
2543		memcg_oom_recover(memcg);
2544
2545	return ret;
2546}
2547
2548unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
2549					    gfp_t gfp_mask,
2550					    unsigned long *total_scanned)
2551{
2552	unsigned long nr_reclaimed = 0;
2553	struct mem_cgroup_per_zone *mz, *next_mz = NULL;
2554	unsigned long reclaimed;
2555	int loop = 0;
2556	struct mem_cgroup_tree_per_zone *mctz;
2557	unsigned long excess;
2558	unsigned long nr_scanned;
2559
2560	if (order > 0)
2561		return 0;
2562
2563	mctz = soft_limit_tree_node_zone(zone_to_nid(zone), zone_idx(zone));
 
 
 
 
 
 
 
 
 
2564	/*
2565	 * This loop can run a while, specially if mem_cgroup's continuously
2566	 * keep exceeding their soft limit and putting the system under
2567	 * pressure
2568	 */
2569	do {
2570		if (next_mz)
2571			mz = next_mz;
2572		else
2573			mz = mem_cgroup_largest_soft_limit_node(mctz);
2574		if (!mz)
2575			break;
2576
2577		nr_scanned = 0;
2578		reclaimed = mem_cgroup_soft_reclaim(mz->memcg, zone,
2579						    gfp_mask, &nr_scanned);
2580		nr_reclaimed += reclaimed;
2581		*total_scanned += nr_scanned;
2582		spin_lock_irq(&mctz->lock);
2583		__mem_cgroup_remove_exceeded(mz, mctz);
2584
2585		/*
2586		 * If we failed to reclaim anything from this memory cgroup
2587		 * it is time to move on to the next cgroup
2588		 */
2589		next_mz = NULL;
2590		if (!reclaimed)
2591			next_mz = __mem_cgroup_largest_soft_limit_node(mctz);
2592
2593		excess = soft_limit_excess(mz->memcg);
2594		/*
2595		 * One school of thought says that we should not add
2596		 * back the node to the tree if reclaim returns 0.
2597		 * But our reclaim could return 0, simply because due
2598		 * to priority we are exposing a smaller subset of
2599		 * memory to reclaim from. Consider this as a longer
2600		 * term TODO.
2601		 */
2602		/* If excess == 0, no tree ops */
2603		__mem_cgroup_insert_exceeded(mz, mctz, excess);
2604		spin_unlock_irq(&mctz->lock);
2605		css_put(&mz->memcg->css);
2606		loop++;
2607		/*
2608		 * Could not reclaim anything and there are no more
2609		 * mem cgroups to try or we seem to be looping without
2610		 * reclaiming anything.
2611		 */
2612		if (!nr_reclaimed &&
2613			(next_mz == NULL ||
2614			loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
2615			break;
2616	} while (!nr_reclaimed);
2617	if (next_mz)
2618		css_put(&next_mz->memcg->css);
2619	return nr_reclaimed;
2620}
2621
2622/*
2623 * Test whether @memcg has children, dead or alive.  Note that this
2624 * function doesn't care whether @memcg has use_hierarchy enabled and
2625 * returns %true if there are child csses according to the cgroup
2626 * hierarchy.  Testing use_hierarchy is the caller's responsiblity.
2627 */
2628static inline bool memcg_has_children(struct mem_cgroup *memcg)
2629{
2630	bool ret;
2631
2632	rcu_read_lock();
2633	ret = css_next_child(NULL, &memcg->css);
2634	rcu_read_unlock();
2635	return ret;
2636}
2637
2638/*
2639 * Reclaims as many pages from the given memcg as possible and moves
2640 * the rest to the parent.
2641 *
2642 * Caller is responsible for holding css reference for memcg.
2643 */
2644static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
2645{
2646	int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
2647
2648	/* we call try-to-free pages for make this cgroup empty */
2649	lru_add_drain_all();
 
 
 
2650	/* try to free all pages in this cgroup */
2651	while (nr_retries && page_counter_read(&memcg->memory)) {
2652		int progress;
2653
2654		if (signal_pending(current))
2655			return -EINTR;
2656
2657		progress = try_to_free_mem_cgroup_pages(memcg, 1,
2658							GFP_KERNEL, true);
2659		if (!progress) {
2660			nr_retries--;
2661			/* maybe some writeback is necessary */
2662			congestion_wait(BLK_RW_ASYNC, HZ/10);
2663		}
2664
2665	}
2666
2667	return 0;
2668}
2669
2670static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of,
2671					    char *buf, size_t nbytes,
2672					    loff_t off)
2673{
2674	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
2675
2676	if (mem_cgroup_is_root(memcg))
2677		return -EINVAL;
2678	return mem_cgroup_force_empty(memcg) ?: nbytes;
2679}
2680
2681static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css,
2682				     struct cftype *cft)
2683{
2684	return mem_cgroup_from_css(css)->use_hierarchy;
2685}
2686
2687static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
2688				      struct cftype *cft, u64 val)
2689{
2690	int retval = 0;
2691	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
2692	struct mem_cgroup *parent_memcg = mem_cgroup_from_css(memcg->css.parent);
2693
2694	if (memcg->use_hierarchy == val)
2695		return 0;
2696
2697	/*
2698	 * If parent's use_hierarchy is set, we can't make any modifications
2699	 * in the child subtrees. If it is unset, then the change can
2700	 * occur, provided the current cgroup has no children.
2701	 *
2702	 * For the root cgroup, parent_mem is NULL, we allow value to be
2703	 * set if there are no children.
2704	 */
2705	if ((!parent_memcg || !parent_memcg->use_hierarchy) &&
2706				(val == 1 || val == 0)) {
2707		if (!memcg_has_children(memcg))
2708			memcg->use_hierarchy = val;
2709		else
2710			retval = -EBUSY;
2711	} else
2712		retval = -EINVAL;
2713
2714	return retval;
2715}
2716
2717static void tree_stat(struct mem_cgroup *memcg, unsigned long *stat)
2718{
2719	struct mem_cgroup *iter;
2720	int i;
2721
2722	memset(stat, 0, sizeof(*stat) * MEMCG_NR_STAT);
2723
2724	for_each_mem_cgroup_tree(iter, memcg) {
2725		for (i = 0; i < MEMCG_NR_STAT; i++)
2726			stat[i] += mem_cgroup_read_stat(iter, i);
2727	}
2728}
2729
2730static void tree_events(struct mem_cgroup *memcg, unsigned long *events)
2731{
2732	struct mem_cgroup *iter;
2733	int i;
2734
2735	memset(events, 0, sizeof(*events) * MEMCG_NR_EVENTS);
2736
2737	for_each_mem_cgroup_tree(iter, memcg) {
2738		for (i = 0; i < MEMCG_NR_EVENTS; i++)
2739			events[i] += mem_cgroup_read_events(iter, i);
2740	}
2741}
2742
2743static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
2744{
2745	unsigned long val = 0;
2746
2747	if (mem_cgroup_is_root(memcg)) {
2748		struct mem_cgroup *iter;
2749
2750		for_each_mem_cgroup_tree(iter, memcg) {
2751			val += mem_cgroup_read_stat(iter,
2752					MEM_CGROUP_STAT_CACHE);
2753			val += mem_cgroup_read_stat(iter,
2754					MEM_CGROUP_STAT_RSS);
2755			if (swap)
2756				val += mem_cgroup_read_stat(iter,
2757						MEM_CGROUP_STAT_SWAP);
2758		}
2759	} else {
2760		if (!swap)
2761			val = page_counter_read(&memcg->memory);
2762		else
2763			val = page_counter_read(&memcg->memsw);
2764	}
2765	return val;
2766}
2767
2768enum {
2769	RES_USAGE,
2770	RES_LIMIT,
2771	RES_MAX_USAGE,
2772	RES_FAILCNT,
2773	RES_SOFT_LIMIT,
2774};
2775
2776static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
2777			       struct cftype *cft)
2778{
2779	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
2780	struct page_counter *counter;
2781
2782	switch (MEMFILE_TYPE(cft->private)) {
2783	case _MEM:
2784		counter = &memcg->memory;
2785		break;
2786	case _MEMSWAP:
2787		counter = &memcg->memsw;
2788		break;
2789	case _KMEM:
2790		counter = &memcg->kmem;
2791		break;
2792	case _TCP:
2793		counter = &memcg->tcpmem;
2794		break;
2795	default:
2796		BUG();
2797	}
2798
2799	switch (MEMFILE_ATTR(cft->private)) {
2800	case RES_USAGE:
2801		if (counter == &memcg->memory)
2802			return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE;
2803		if (counter == &memcg->memsw)
2804			return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE;
2805		return (u64)page_counter_read(counter) * PAGE_SIZE;
2806	case RES_LIMIT:
2807		return (u64)counter->limit * PAGE_SIZE;
2808	case RES_MAX_USAGE:
2809		return (u64)counter->watermark * PAGE_SIZE;
2810	case RES_FAILCNT:
2811		return counter->failcnt;
2812	case RES_SOFT_LIMIT:
2813		return (u64)memcg->soft_limit * PAGE_SIZE;
2814	default:
2815		BUG();
2816	}
2817}
2818
2819#ifndef CONFIG_SLOB
2820static int memcg_online_kmem(struct mem_cgroup *memcg)
2821{
2822	int memcg_id;
2823
2824	if (cgroup_memory_nokmem)
2825		return 0;
2826
2827	BUG_ON(memcg->kmemcg_id >= 0);
2828	BUG_ON(memcg->kmem_state);
2829
2830	memcg_id = memcg_alloc_cache_id();
2831	if (memcg_id < 0)
2832		return memcg_id;
2833
2834	static_branch_inc(&memcg_kmem_enabled_key);
2835	/*
2836	 * A memory cgroup is considered kmem-online as soon as it gets
2837	 * kmemcg_id. Setting the id after enabling static branching will
2838	 * guarantee no one starts accounting before all call sites are
2839	 * patched.
2840	 */
2841	memcg->kmemcg_id = memcg_id;
2842	memcg->kmem_state = KMEM_ONLINE;
2843
2844	return 0;
2845}
2846
2847static void memcg_offline_kmem(struct mem_cgroup *memcg)
2848{
2849	struct cgroup_subsys_state *css;
2850	struct mem_cgroup *parent, *child;
2851	int kmemcg_id;
2852
2853	if (memcg->kmem_state != KMEM_ONLINE)
2854		return;
2855	/*
2856	 * Clear the online state before clearing memcg_caches array
2857	 * entries. The slab_mutex in memcg_deactivate_kmem_caches()
2858	 * guarantees that no cache will be created for this cgroup
2859	 * after we are done (see memcg_create_kmem_cache()).
2860	 */
2861	memcg->kmem_state = KMEM_ALLOCATED;
2862
2863	memcg_deactivate_kmem_caches(memcg);
2864
2865	kmemcg_id = memcg->kmemcg_id;
2866	BUG_ON(kmemcg_id < 0);
2867
2868	parent = parent_mem_cgroup(memcg);
2869	if (!parent)
2870		parent = root_mem_cgroup;
2871
2872	/*
2873	 * Change kmemcg_id of this cgroup and all its descendants to the
2874	 * parent's id, and then move all entries from this cgroup's list_lrus
2875	 * to ones of the parent. After we have finished, all list_lrus
2876	 * corresponding to this cgroup are guaranteed to remain empty. The
2877	 * ordering is imposed by list_lru_node->lock taken by
2878	 * memcg_drain_all_list_lrus().
2879	 */
2880	css_for_each_descendant_pre(css, &memcg->css) {
2881		child = mem_cgroup_from_css(css);
2882		BUG_ON(child->kmemcg_id != kmemcg_id);
2883		child->kmemcg_id = parent->kmemcg_id;
2884		if (!memcg->use_hierarchy)
2885			break;
2886	}
2887	memcg_drain_all_list_lrus(kmemcg_id, parent->kmemcg_id);
2888
2889	memcg_free_cache_id(kmemcg_id);
2890}
2891
2892static void memcg_free_kmem(struct mem_cgroup *memcg)
2893{
2894	/* css_alloc() failed, offlining didn't happen */
2895	if (unlikely(memcg->kmem_state == KMEM_ONLINE))
2896		memcg_offline_kmem(memcg);
2897
2898	if (memcg->kmem_state == KMEM_ALLOCATED) {
2899		memcg_destroy_kmem_caches(memcg);
2900		static_branch_dec(&memcg_kmem_enabled_key);
2901		WARN_ON(page_counter_read(&memcg->kmem));
2902	}
 
 
2903}
2904#else
2905static int memcg_online_kmem(struct mem_cgroup *memcg)
2906{
2907	return 0;
2908}
2909static void memcg_offline_kmem(struct mem_cgroup *memcg)
2910{
2911}
2912static void memcg_free_kmem(struct mem_cgroup *memcg)
2913{
2914}
2915#endif /* !CONFIG_SLOB */
2916
2917static int memcg_update_kmem_limit(struct mem_cgroup *memcg,
2918				   unsigned long limit)
2919{
2920	int ret;
2921
2922	mutex_lock(&memcg_limit_mutex);
2923	ret = page_counter_limit(&memcg->kmem, limit);
2924	mutex_unlock(&memcg_limit_mutex);
2925	return ret;
2926}
2927
2928static int memcg_update_tcp_limit(struct mem_cgroup *memcg, unsigned long limit)
2929{
2930	int ret;
2931
2932	mutex_lock(&memcg_limit_mutex);
2933
2934	ret = page_counter_limit(&memcg->tcpmem, limit);
2935	if (ret)
2936		goto out;
2937
2938	if (!memcg->tcpmem_active) {
2939		/*
2940		 * The active flag needs to be written after the static_key
2941		 * update. This is what guarantees that the socket activation
2942		 * function is the last one to run. See sock_update_memcg() for
2943		 * details, and note that we don't mark any socket as belonging
2944		 * to this memcg until that flag is up.
2945		 *
2946		 * We need to do this, because static_keys will span multiple
2947		 * sites, but we can't control their order. If we mark a socket
2948		 * as accounted, but the accounting functions are not patched in
2949		 * yet, we'll lose accounting.
2950		 *
2951		 * We never race with the readers in sock_update_memcg(),
2952		 * because when this value change, the code to process it is not
2953		 * patched in yet.
2954		 */
2955		static_branch_inc(&memcg_sockets_enabled_key);
2956		memcg->tcpmem_active = true;
2957	}
2958out:
2959	mutex_unlock(&memcg_limit_mutex);
2960	return ret;
2961}
2962
2963/*
2964 * The user of this function is...
2965 * RES_LIMIT.
2966 */
2967static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
2968				char *buf, size_t nbytes, loff_t off)
2969{
2970	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
2971	unsigned long nr_pages;
2972	int ret;
2973
2974	buf = strstrip(buf);
2975	ret = page_counter_memparse(buf, "-1", &nr_pages);
2976	if (ret)
2977		return ret;
2978
2979	switch (MEMFILE_ATTR(of_cft(of)->private)) {
2980	case RES_LIMIT:
2981		if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
2982			ret = -EINVAL;
2983			break;
2984		}
2985		switch (MEMFILE_TYPE(of_cft(of)->private)) {
2986		case _MEM:
2987			ret = mem_cgroup_resize_limit(memcg, nr_pages);
2988			break;
2989		case _MEMSWAP:
2990			ret = mem_cgroup_resize_memsw_limit(memcg, nr_pages);
2991			break;
2992		case _KMEM:
2993			ret = memcg_update_kmem_limit(memcg, nr_pages);
 
2994			break;
2995		case _TCP:
2996			ret = memcg_update_tcp_limit(memcg, nr_pages);
2997			break;
2998		}
2999		break;
3000	case RES_SOFT_LIMIT:
3001		memcg->soft_limit = nr_pages;
3002		ret = 0;
 
 
 
 
3003		break;
3004	}
3005	return ret ?: nbytes;
3006}
3007
3008static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf,
3009				size_t nbytes, loff_t off)
3010{
3011	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3012	struct page_counter *counter;
3013
3014	switch (MEMFILE_TYPE(of_cft(of)->private)) {
3015	case _MEM:
3016		counter = &memcg->memory;
3017		break;
3018	case _MEMSWAP:
3019		counter = &memcg->memsw;
3020		break;
3021	case _KMEM:
3022		counter = &memcg->kmem;
3023		break;
3024	case _TCP:
3025		counter = &memcg->tcpmem;
3026		break;
3027	default:
3028		BUG();
3029	}
3030
3031	switch (MEMFILE_ATTR(of_cft(of)->private)) {
3032	case RES_MAX_USAGE:
3033		page_counter_reset_watermark(counter);
3034		break;
3035	case RES_FAILCNT:
3036		counter->failcnt = 0;
3037		break;
3038	default:
3039		BUG();
3040	}
3041
3042	return nbytes;
3043}
3044
3045static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css,
3046					struct cftype *cft)
3047{
3048	return mem_cgroup_from_css(css)->move_charge_at_immigrate;
3049}
3050
3051#ifdef CONFIG_MMU
3052static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3053					struct cftype *cft, u64 val)
3054{
3055	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3056
3057	if (val & ~MOVE_MASK)
3058		return -EINVAL;
3059
3060	/*
3061	 * No kind of locking is needed in here, because ->can_attach() will
3062	 * check this value once in the beginning of the process, and then carry
3063	 * on with stale data. This means that changes to this value will only
3064	 * affect task migrations starting after the change.
3065	 */
3066	memcg->move_charge_at_immigrate = val;
3067	return 0;
3068}
3069#else
3070static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3071					struct cftype *cft, u64 val)
3072{
3073	return -ENOSYS;
3074}
3075#endif
3076
3077#ifdef CONFIG_NUMA
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3078static int memcg_numa_stat_show(struct seq_file *m, void *v)
3079{
3080	struct numa_stat {
3081		const char *name;
3082		unsigned int lru_mask;
3083	};
3084
3085	static const struct numa_stat stats[] = {
3086		{ "total", LRU_ALL },
3087		{ "file", LRU_ALL_FILE },
3088		{ "anon", LRU_ALL_ANON },
3089		{ "unevictable", BIT(LRU_UNEVICTABLE) },
3090	};
3091	const struct numa_stat *stat;
3092	int nid;
3093	unsigned long nr;
3094	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
 
3095
3096	for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3097		nr = mem_cgroup_nr_lru_pages(memcg, stat->lru_mask);
3098		seq_printf(m, "%s=%lu", stat->name, nr);
3099		for_each_node_state(nid, N_MEMORY) {
3100			nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
3101							  stat->lru_mask);
3102			seq_printf(m, " N%d=%lu", nid, nr);
3103		}
3104		seq_putc(m, '\n');
3105	}
3106
3107	for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3108		struct mem_cgroup *iter;
3109
3110		nr = 0;
3111		for_each_mem_cgroup_tree(iter, memcg)
3112			nr += mem_cgroup_nr_lru_pages(iter, stat->lru_mask);
3113		seq_printf(m, "hierarchical_%s=%lu", stat->name, nr);
3114		for_each_node_state(nid, N_MEMORY) {
3115			nr = 0;
3116			for_each_mem_cgroup_tree(iter, memcg)
3117				nr += mem_cgroup_node_nr_lru_pages(
3118					iter, nid, stat->lru_mask);
3119			seq_printf(m, " N%d=%lu", nid, nr);
3120		}
3121		seq_putc(m, '\n');
3122	}
3123
3124	return 0;
3125}
3126#endif /* CONFIG_NUMA */
3127
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3128static int memcg_stat_show(struct seq_file *m, void *v)
3129{
3130	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
3131	unsigned long memory, memsw;
3132	struct mem_cgroup *mi;
3133	unsigned int i;
3134
3135	BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_stat_names) !=
3136		     MEM_CGROUP_STAT_NSTATS);
3137	BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_events_names) !=
3138		     MEM_CGROUP_EVENTS_NSTATS);
3139	BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS);
3140
3141	for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
3142		if (i == MEM_CGROUP_STAT_SWAP && !do_memsw_account())
 
 
3143			continue;
3144		seq_printf(m, "%s %lu\n", mem_cgroup_stat_names[i],
3145			   mem_cgroup_read_stat(memcg, i) * PAGE_SIZE);
 
3146	}
3147
3148	for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++)
3149		seq_printf(m, "%s %lu\n", mem_cgroup_events_names[i],
3150			   mem_cgroup_read_events(memcg, i));
3151
3152	for (i = 0; i < NR_LRU_LISTS; i++)
3153		seq_printf(m, "%s %lu\n", mem_cgroup_lru_names[i],
3154			   mem_cgroup_nr_lru_pages(memcg, BIT(i)) * PAGE_SIZE);
 
3155
3156	/* Hierarchical information */
3157	memory = memsw = PAGE_COUNTER_MAX;
3158	for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) {
3159		memory = min(memory, mi->memory.limit);
3160		memsw = min(memsw, mi->memsw.limit);
3161	}
3162	seq_printf(m, "hierarchical_memory_limit %llu\n",
3163		   (u64)memory * PAGE_SIZE);
3164	if (do_memsw_account())
3165		seq_printf(m, "hierarchical_memsw_limit %llu\n",
3166			   (u64)memsw * PAGE_SIZE);
3167
3168	for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
3169		unsigned long long val = 0;
3170
3171		if (i == MEM_CGROUP_STAT_SWAP && !do_memsw_account())
3172			continue;
3173		for_each_mem_cgroup_tree(mi, memcg)
3174			val += mem_cgroup_read_stat(mi, i) * PAGE_SIZE;
3175		seq_printf(m, "total_%s %llu\n", mem_cgroup_stat_names[i], val);
3176	}
3177
3178	for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
3179		unsigned long long val = 0;
3180
3181		for_each_mem_cgroup_tree(mi, memcg)
3182			val += mem_cgroup_read_events(mi, i);
3183		seq_printf(m, "total_%s %llu\n",
3184			   mem_cgroup_events_names[i], val);
3185	}
3186
3187	for (i = 0; i < NR_LRU_LISTS; i++) {
3188		unsigned long long val = 0;
3189
3190		for_each_mem_cgroup_tree(mi, memcg)
3191			val += mem_cgroup_nr_lru_pages(mi, BIT(i)) * PAGE_SIZE;
3192		seq_printf(m, "total_%s %llu\n", mem_cgroup_lru_names[i], val);
3193	}
3194
3195#ifdef CONFIG_DEBUG_VM
3196	{
3197		int nid, zid;
3198		struct mem_cgroup_per_zone *mz;
3199		struct zone_reclaim_stat *rstat;
3200		unsigned long recent_rotated[2] = {0, 0};
3201		unsigned long recent_scanned[2] = {0, 0};
3202
3203		for_each_online_node(nid)
3204			for (zid = 0; zid < MAX_NR_ZONES; zid++) {
3205				mz = &memcg->nodeinfo[nid]->zoneinfo[zid];
3206				rstat = &mz->lruvec.reclaim_stat;
3207
3208				recent_rotated[0] += rstat->recent_rotated[0];
3209				recent_rotated[1] += rstat->recent_rotated[1];
3210				recent_scanned[0] += rstat->recent_scanned[0];
3211				recent_scanned[1] += rstat->recent_scanned[1];
3212			}
3213		seq_printf(m, "recent_rotated_anon %lu\n", recent_rotated[0]);
3214		seq_printf(m, "recent_rotated_file %lu\n", recent_rotated[1]);
3215		seq_printf(m, "recent_scanned_anon %lu\n", recent_scanned[0]);
3216		seq_printf(m, "recent_scanned_file %lu\n", recent_scanned[1]);
3217	}
3218#endif
3219
3220	return 0;
3221}
3222
3223static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css,
3224				      struct cftype *cft)
3225{
3226	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3227
3228	return mem_cgroup_swappiness(memcg);
3229}
3230
3231static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
3232				       struct cftype *cft, u64 val)
3233{
3234	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3235
3236	if (val > 100)
3237		return -EINVAL;
3238
3239	if (css->parent)
3240		memcg->swappiness = val;
3241	else
3242		vm_swappiness = val;
3243
3244	return 0;
3245}
3246
3247static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
3248{
3249	struct mem_cgroup_threshold_ary *t;
3250	unsigned long usage;
3251	int i;
3252
3253	rcu_read_lock();
3254	if (!swap)
3255		t = rcu_dereference(memcg->thresholds.primary);
3256	else
3257		t = rcu_dereference(memcg->memsw_thresholds.primary);
3258
3259	if (!t)
3260		goto unlock;
3261
3262	usage = mem_cgroup_usage(memcg, swap);
3263
3264	/*
3265	 * current_threshold points to threshold just below or equal to usage.
3266	 * If it's not true, a threshold was crossed after last
3267	 * call of __mem_cgroup_threshold().
3268	 */
3269	i = t->current_threshold;
3270
3271	/*
3272	 * Iterate backward over array of thresholds starting from
3273	 * current_threshold and check if a threshold is crossed.
3274	 * If none of thresholds below usage is crossed, we read
3275	 * only one element of the array here.
3276	 */
3277	for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
3278		eventfd_signal(t->entries[i].eventfd, 1);
3279
3280	/* i = current_threshold + 1 */
3281	i++;
3282
3283	/*
3284	 * Iterate forward over array of thresholds starting from
3285	 * current_threshold+1 and check if a threshold is crossed.
3286	 * If none of thresholds above usage is crossed, we read
3287	 * only one element of the array here.
3288	 */
3289	for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
3290		eventfd_signal(t->entries[i].eventfd, 1);
3291
3292	/* Update current_threshold */
3293	t->current_threshold = i - 1;
3294unlock:
3295	rcu_read_unlock();
3296}
3297
3298static void mem_cgroup_threshold(struct mem_cgroup *memcg)
3299{
3300	while (memcg) {
3301		__mem_cgroup_threshold(memcg, false);
3302		if (do_memsw_account())
3303			__mem_cgroup_threshold(memcg, true);
3304
3305		memcg = parent_mem_cgroup(memcg);
3306	}
3307}
3308
3309static int compare_thresholds(const void *a, const void *b)
3310{
3311	const struct mem_cgroup_threshold *_a = a;
3312	const struct mem_cgroup_threshold *_b = b;
3313
3314	if (_a->threshold > _b->threshold)
3315		return 1;
3316
3317	if (_a->threshold < _b->threshold)
3318		return -1;
3319
3320	return 0;
3321}
3322
3323static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
3324{
3325	struct mem_cgroup_eventfd_list *ev;
3326
3327	spin_lock(&memcg_oom_lock);
3328
3329	list_for_each_entry(ev, &memcg->oom_notify, list)
3330		eventfd_signal(ev->eventfd, 1);
3331
3332	spin_unlock(&memcg_oom_lock);
3333	return 0;
3334}
3335
3336static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
3337{
3338	struct mem_cgroup *iter;
3339
3340	for_each_mem_cgroup_tree(iter, memcg)
3341		mem_cgroup_oom_notify_cb(iter);
3342}
3343
3344static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
3345	struct eventfd_ctx *eventfd, const char *args, enum res_type type)
3346{
3347	struct mem_cgroup_thresholds *thresholds;
3348	struct mem_cgroup_threshold_ary *new;
3349	unsigned long threshold;
3350	unsigned long usage;
3351	int i, size, ret;
3352
3353	ret = page_counter_memparse(args, "-1", &threshold);
3354	if (ret)
3355		return ret;
3356
3357	mutex_lock(&memcg->thresholds_lock);
3358
3359	if (type == _MEM) {
3360		thresholds = &memcg->thresholds;
3361		usage = mem_cgroup_usage(memcg, false);
3362	} else if (type == _MEMSWAP) {
3363		thresholds = &memcg->memsw_thresholds;
3364		usage = mem_cgroup_usage(memcg, true);
3365	} else
3366		BUG();
3367
3368	/* Check if a threshold crossed before adding a new one */
3369	if (thresholds->primary)
3370		__mem_cgroup_threshold(memcg, type == _MEMSWAP);
3371
3372	size = thresholds->primary ? thresholds->primary->size + 1 : 1;
3373
3374	/* Allocate memory for new array of thresholds */
3375	new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold),
3376			GFP_KERNEL);
3377	if (!new) {
3378		ret = -ENOMEM;
3379		goto unlock;
3380	}
3381	new->size = size;
3382
3383	/* Copy thresholds (if any) to new array */
3384	if (thresholds->primary) {
3385		memcpy(new->entries, thresholds->primary->entries, (size - 1) *
3386				sizeof(struct mem_cgroup_threshold));
3387	}
3388
3389	/* Add new threshold */
3390	new->entries[size - 1].eventfd = eventfd;
3391	new->entries[size - 1].threshold = threshold;
3392
3393	/* Sort thresholds. Registering of new threshold isn't time-critical */
3394	sort(new->entries, size, sizeof(struct mem_cgroup_threshold),
3395			compare_thresholds, NULL);
3396
3397	/* Find current threshold */
3398	new->current_threshold = -1;
3399	for (i = 0; i < size; i++) {
3400		if (new->entries[i].threshold <= usage) {
3401			/*
3402			 * new->current_threshold will not be used until
3403			 * rcu_assign_pointer(), so it's safe to increment
3404			 * it here.
3405			 */
3406			++new->current_threshold;
3407		} else
3408			break;
3409	}
3410
3411	/* Free old spare buffer and save old primary buffer as spare */
3412	kfree(thresholds->spare);
3413	thresholds->spare = thresholds->primary;
3414
3415	rcu_assign_pointer(thresholds->primary, new);
3416
3417	/* To be sure that nobody uses thresholds */
3418	synchronize_rcu();
3419
3420unlock:
3421	mutex_unlock(&memcg->thresholds_lock);
3422
3423	return ret;
3424}
3425
3426static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
3427	struct eventfd_ctx *eventfd, const char *args)
3428{
3429	return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM);
3430}
3431
3432static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg,
3433	struct eventfd_ctx *eventfd, const char *args)
3434{
3435	return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP);
3436}
3437
3438static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
3439	struct eventfd_ctx *eventfd, enum res_type type)
3440{
3441	struct mem_cgroup_thresholds *thresholds;
3442	struct mem_cgroup_threshold_ary *new;
3443	unsigned long usage;
3444	int i, j, size;
3445
3446	mutex_lock(&memcg->thresholds_lock);
3447
3448	if (type == _MEM) {
3449		thresholds = &memcg->thresholds;
3450		usage = mem_cgroup_usage(memcg, false);
3451	} else if (type == _MEMSWAP) {
3452		thresholds = &memcg->memsw_thresholds;
3453		usage = mem_cgroup_usage(memcg, true);
3454	} else
3455		BUG();
3456
3457	if (!thresholds->primary)
3458		goto unlock;
3459
3460	/* Check if a threshold crossed before removing */
3461	__mem_cgroup_threshold(memcg, type == _MEMSWAP);
3462
3463	/* Calculate new number of threshold */
3464	size = 0;
3465	for (i = 0; i < thresholds->primary->size; i++) {
3466		if (thresholds->primary->entries[i].eventfd != eventfd)
3467			size++;
 
 
3468	}
3469
3470	new = thresholds->spare;
3471
 
 
 
 
3472	/* Set thresholds array to NULL if we don't have thresholds */
3473	if (!size) {
3474		kfree(new);
3475		new = NULL;
3476		goto swap_buffers;
3477	}
3478
3479	new->size = size;
3480
3481	/* Copy thresholds and find current threshold */
3482	new->current_threshold = -1;
3483	for (i = 0, j = 0; i < thresholds->primary->size; i++) {
3484		if (thresholds->primary->entries[i].eventfd == eventfd)
3485			continue;
3486
3487		new->entries[j] = thresholds->primary->entries[i];
3488		if (new->entries[j].threshold <= usage) {
3489			/*
3490			 * new->current_threshold will not be used
3491			 * until rcu_assign_pointer(), so it's safe to increment
3492			 * it here.
3493			 */
3494			++new->current_threshold;
3495		}
3496		j++;
3497	}
3498
3499swap_buffers:
3500	/* Swap primary and spare array */
3501	thresholds->spare = thresholds->primary;
3502
3503	rcu_assign_pointer(thresholds->primary, new);
3504
3505	/* To be sure that nobody uses thresholds */
3506	synchronize_rcu();
3507
3508	/* If all events are unregistered, free the spare array */
3509	if (!new) {
3510		kfree(thresholds->spare);
3511		thresholds->spare = NULL;
3512	}
3513unlock:
3514	mutex_unlock(&memcg->thresholds_lock);
3515}
3516
3517static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
3518	struct eventfd_ctx *eventfd)
3519{
3520	return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM);
3521}
3522
3523static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
3524	struct eventfd_ctx *eventfd)
3525{
3526	return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP);
3527}
3528
3529static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg,
3530	struct eventfd_ctx *eventfd, const char *args)
3531{
3532	struct mem_cgroup_eventfd_list *event;
3533
3534	event = kmalloc(sizeof(*event),	GFP_KERNEL);
3535	if (!event)
3536		return -ENOMEM;
3537
3538	spin_lock(&memcg_oom_lock);
3539
3540	event->eventfd = eventfd;
3541	list_add(&event->list, &memcg->oom_notify);
3542
3543	/* already in OOM ? */
3544	if (memcg->under_oom)
3545		eventfd_signal(eventfd, 1);
3546	spin_unlock(&memcg_oom_lock);
3547
3548	return 0;
3549}
3550
3551static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg,
3552	struct eventfd_ctx *eventfd)
3553{
3554	struct mem_cgroup_eventfd_list *ev, *tmp;
3555
3556	spin_lock(&memcg_oom_lock);
3557
3558	list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
3559		if (ev->eventfd == eventfd) {
3560			list_del(&ev->list);
3561			kfree(ev);
3562		}
3563	}
3564
3565	spin_unlock(&memcg_oom_lock);
3566}
3567
3568static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v)
3569{
3570	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf));
3571
3572	seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable);
3573	seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom);
 
 
3574	return 0;
3575}
3576
3577static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
3578	struct cftype *cft, u64 val)
3579{
3580	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3581
3582	/* cannot set to root cgroup and only 0 and 1 are allowed */
3583	if (!css->parent || !((val == 0) || (val == 1)))
3584		return -EINVAL;
3585
3586	memcg->oom_kill_disable = val;
3587	if (!val)
3588		memcg_oom_recover(memcg);
3589
3590	return 0;
3591}
3592
3593#ifdef CONFIG_CGROUP_WRITEBACK
3594
3595struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg)
3596{
3597	return &memcg->cgwb_list;
3598}
3599
3600static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
3601{
3602	return wb_domain_init(&memcg->cgwb_domain, gfp);
3603}
3604
3605static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
3606{
3607	wb_domain_exit(&memcg->cgwb_domain);
3608}
3609
3610static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
3611{
3612	wb_domain_size_changed(&memcg->cgwb_domain);
3613}
3614
3615struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
3616{
3617	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3618
3619	if (!memcg->css.parent)
3620		return NULL;
3621
3622	return &memcg->cgwb_domain;
3623}
3624
3625/**
3626 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
3627 * @wb: bdi_writeback in question
3628 * @pfilepages: out parameter for number of file pages
3629 * @pheadroom: out parameter for number of allocatable pages according to memcg
3630 * @pdirty: out parameter for number of dirty pages
3631 * @pwriteback: out parameter for number of pages under writeback
3632 *
3633 * Determine the numbers of file, headroom, dirty, and writeback pages in
3634 * @wb's memcg.  File, dirty and writeback are self-explanatory.  Headroom
3635 * is a bit more involved.
3636 *
3637 * A memcg's headroom is "min(max, high) - used".  In the hierarchy, the
3638 * headroom is calculated as the lowest headroom of itself and the
3639 * ancestors.  Note that this doesn't consider the actual amount of
3640 * available memory in the system.  The caller should further cap
3641 * *@pheadroom accordingly.
3642 */
3643void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
3644			 unsigned long *pheadroom, unsigned long *pdirty,
3645			 unsigned long *pwriteback)
3646{
3647	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3648	struct mem_cgroup *parent;
3649
3650	*pdirty = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_DIRTY);
3651
3652	/* this should eventually include NR_UNSTABLE_NFS */
3653	*pwriteback = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_WRITEBACK);
3654	*pfilepages = mem_cgroup_nr_lru_pages(memcg, (1 << LRU_INACTIVE_FILE) |
3655						     (1 << LRU_ACTIVE_FILE));
3656	*pheadroom = PAGE_COUNTER_MAX;
3657
 
3658	while ((parent = parent_mem_cgroup(memcg))) {
3659		unsigned long ceiling = min(memcg->memory.limit, memcg->high);
 
3660		unsigned long used = page_counter_read(&memcg->memory);
3661
3662		*pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
3663		memcg = parent;
3664	}
3665}
3666
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3667#else	/* CONFIG_CGROUP_WRITEBACK */
3668
3669static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
3670{
3671	return 0;
3672}
3673
3674static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
3675{
3676}
3677
3678static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
3679{
3680}
3681
3682#endif	/* CONFIG_CGROUP_WRITEBACK */
3683
3684/*
3685 * DO NOT USE IN NEW FILES.
3686 *
3687 * "cgroup.event_control" implementation.
3688 *
3689 * This is way over-engineered.  It tries to support fully configurable
3690 * events for each user.  Such level of flexibility is completely
3691 * unnecessary especially in the light of the planned unified hierarchy.
3692 *
3693 * Please deprecate this and replace with something simpler if at all
3694 * possible.
3695 */
3696
3697/*
3698 * Unregister event and free resources.
3699 *
3700 * Gets called from workqueue.
3701 */
3702static void memcg_event_remove(struct work_struct *work)
3703{
3704	struct mem_cgroup_event *event =
3705		container_of(work, struct mem_cgroup_event, remove);
3706	struct mem_cgroup *memcg = event->memcg;
3707
3708	remove_wait_queue(event->wqh, &event->wait);
3709
3710	event->unregister_event(memcg, event->eventfd);
3711
3712	/* Notify userspace the event is going away. */
3713	eventfd_signal(event->eventfd, 1);
3714
3715	eventfd_ctx_put(event->eventfd);
3716	kfree(event);
3717	css_put(&memcg->css);
3718}
3719
3720/*
3721 * Gets called on POLLHUP on eventfd when user closes it.
3722 *
3723 * Called with wqh->lock held and interrupts disabled.
3724 */
3725static int memcg_event_wake(wait_queue_t *wait, unsigned mode,
3726			    int sync, void *key)
3727{
3728	struct mem_cgroup_event *event =
3729		container_of(wait, struct mem_cgroup_event, wait);
3730	struct mem_cgroup *memcg = event->memcg;
3731	unsigned long flags = (unsigned long)key;
3732
3733	if (flags & POLLHUP) {
3734		/*
3735		 * If the event has been detached at cgroup removal, we
3736		 * can simply return knowing the other side will cleanup
3737		 * for us.
3738		 *
3739		 * We can't race against event freeing since the other
3740		 * side will require wqh->lock via remove_wait_queue(),
3741		 * which we hold.
3742		 */
3743		spin_lock(&memcg->event_list_lock);
3744		if (!list_empty(&event->list)) {
3745			list_del_init(&event->list);
3746			/*
3747			 * We are in atomic context, but cgroup_event_remove()
3748			 * may sleep, so we have to call it in workqueue.
3749			 */
3750			schedule_work(&event->remove);
3751		}
3752		spin_unlock(&memcg->event_list_lock);
3753	}
3754
3755	return 0;
3756}
3757
3758static void memcg_event_ptable_queue_proc(struct file *file,
3759		wait_queue_head_t *wqh, poll_table *pt)
3760{
3761	struct mem_cgroup_event *event =
3762		container_of(pt, struct mem_cgroup_event, pt);
3763
3764	event->wqh = wqh;
3765	add_wait_queue(wqh, &event->wait);
3766}
3767
3768/*
3769 * DO NOT USE IN NEW FILES.
3770 *
3771 * Parse input and register new cgroup event handler.
3772 *
3773 * Input must be in format '<event_fd> <control_fd> <args>'.
3774 * Interpretation of args is defined by control file implementation.
3775 */
3776static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
3777					 char *buf, size_t nbytes, loff_t off)
3778{
3779	struct cgroup_subsys_state *css = of_css(of);
3780	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3781	struct mem_cgroup_event *event;
3782	struct cgroup_subsys_state *cfile_css;
3783	unsigned int efd, cfd;
3784	struct fd efile;
3785	struct fd cfile;
 
3786	const char *name;
3787	char *endp;
3788	int ret;
3789
 
 
 
3790	buf = strstrip(buf);
3791
3792	efd = simple_strtoul(buf, &endp, 10);
3793	if (*endp != ' ')
3794		return -EINVAL;
3795	buf = endp + 1;
3796
3797	cfd = simple_strtoul(buf, &endp, 10);
3798	if ((*endp != ' ') && (*endp != '\0'))
3799		return -EINVAL;
3800	buf = endp + 1;
3801
3802	event = kzalloc(sizeof(*event), GFP_KERNEL);
3803	if (!event)
3804		return -ENOMEM;
3805
3806	event->memcg = memcg;
3807	INIT_LIST_HEAD(&event->list);
3808	init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc);
3809	init_waitqueue_func_entry(&event->wait, memcg_event_wake);
3810	INIT_WORK(&event->remove, memcg_event_remove);
3811
3812	efile = fdget(efd);
3813	if (!efile.file) {
3814		ret = -EBADF;
3815		goto out_kfree;
3816	}
3817
3818	event->eventfd = eventfd_ctx_fileget(efile.file);
3819	if (IS_ERR(event->eventfd)) {
3820		ret = PTR_ERR(event->eventfd);
3821		goto out_put_efile;
3822	}
3823
3824	cfile = fdget(cfd);
3825	if (!cfile.file) {
3826		ret = -EBADF;
3827		goto out_put_eventfd;
3828	}
3829
3830	/* the process need read permission on control file */
3831	/* AV: shouldn't we check that it's been opened for read instead? */
3832	ret = inode_permission(file_inode(cfile.file), MAY_READ);
3833	if (ret < 0)
3834		goto out_put_cfile;
3835
3836	/*
 
 
 
 
 
 
 
 
 
 
3837	 * Determine the event callbacks and set them in @event.  This used
3838	 * to be done via struct cftype but cgroup core no longer knows
3839	 * about these events.  The following is crude but the whole thing
3840	 * is for compatibility anyway.
3841	 *
3842	 * DO NOT ADD NEW FILES.
3843	 */
3844	name = cfile.file->f_path.dentry->d_name.name;
3845
3846	if (!strcmp(name, "memory.usage_in_bytes")) {
3847		event->register_event = mem_cgroup_usage_register_event;
3848		event->unregister_event = mem_cgroup_usage_unregister_event;
3849	} else if (!strcmp(name, "memory.oom_control")) {
3850		event->register_event = mem_cgroup_oom_register_event;
3851		event->unregister_event = mem_cgroup_oom_unregister_event;
3852	} else if (!strcmp(name, "memory.pressure_level")) {
3853		event->register_event = vmpressure_register_event;
3854		event->unregister_event = vmpressure_unregister_event;
3855	} else if (!strcmp(name, "memory.memsw.usage_in_bytes")) {
3856		event->register_event = memsw_cgroup_usage_register_event;
3857		event->unregister_event = memsw_cgroup_usage_unregister_event;
3858	} else {
3859		ret = -EINVAL;
3860		goto out_put_cfile;
3861	}
3862
3863	/*
3864	 * Verify @cfile should belong to @css.  Also, remaining events are
3865	 * automatically removed on cgroup destruction but the removal is
3866	 * asynchronous, so take an extra ref on @css.
3867	 */
3868	cfile_css = css_tryget_online_from_dir(cfile.file->f_path.dentry->d_parent,
3869					       &memory_cgrp_subsys);
3870	ret = -EINVAL;
3871	if (IS_ERR(cfile_css))
3872		goto out_put_cfile;
3873	if (cfile_css != css) {
3874		css_put(cfile_css);
3875		goto out_put_cfile;
3876	}
3877
3878	ret = event->register_event(memcg, event->eventfd, buf);
3879	if (ret)
3880		goto out_put_css;
3881
3882	efile.file->f_op->poll(efile.file, &event->pt);
3883
3884	spin_lock(&memcg->event_list_lock);
3885	list_add(&event->list, &memcg->event_list);
3886	spin_unlock(&memcg->event_list_lock);
3887
3888	fdput(cfile);
3889	fdput(efile);
3890
3891	return nbytes;
3892
3893out_put_css:
3894	css_put(css);
3895out_put_cfile:
3896	fdput(cfile);
3897out_put_eventfd:
3898	eventfd_ctx_put(event->eventfd);
3899out_put_efile:
3900	fdput(efile);
3901out_kfree:
3902	kfree(event);
3903
3904	return ret;
3905}
3906
 
 
 
 
 
 
 
 
 
 
 
3907static struct cftype mem_cgroup_legacy_files[] = {
3908	{
3909		.name = "usage_in_bytes",
3910		.private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
3911		.read_u64 = mem_cgroup_read_u64,
3912	},
3913	{
3914		.name = "max_usage_in_bytes",
3915		.private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
3916		.write = mem_cgroup_reset,
3917		.read_u64 = mem_cgroup_read_u64,
3918	},
3919	{
3920		.name = "limit_in_bytes",
3921		.private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
3922		.write = mem_cgroup_write,
3923		.read_u64 = mem_cgroup_read_u64,
3924	},
3925	{
3926		.name = "soft_limit_in_bytes",
3927		.private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
3928		.write = mem_cgroup_write,
3929		.read_u64 = mem_cgroup_read_u64,
3930	},
3931	{
3932		.name = "failcnt",
3933		.private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
3934		.write = mem_cgroup_reset,
3935		.read_u64 = mem_cgroup_read_u64,
3936	},
3937	{
3938		.name = "stat",
3939		.seq_show = memcg_stat_show,
3940	},
3941	{
3942		.name = "force_empty",
3943		.write = mem_cgroup_force_empty_write,
3944	},
3945	{
3946		.name = "use_hierarchy",
3947		.write_u64 = mem_cgroup_hierarchy_write,
3948		.read_u64 = mem_cgroup_hierarchy_read,
3949	},
3950	{
3951		.name = "cgroup.event_control",		/* XXX: for compat */
3952		.write = memcg_write_event_control,
3953		.flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE,
3954	},
3955	{
3956		.name = "swappiness",
3957		.read_u64 = mem_cgroup_swappiness_read,
3958		.write_u64 = mem_cgroup_swappiness_write,
3959	},
3960	{
3961		.name = "move_charge_at_immigrate",
3962		.read_u64 = mem_cgroup_move_charge_read,
3963		.write_u64 = mem_cgroup_move_charge_write,
3964	},
3965	{
3966		.name = "oom_control",
3967		.seq_show = mem_cgroup_oom_control_read,
3968		.write_u64 = mem_cgroup_oom_control_write,
3969		.private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
3970	},
3971	{
3972		.name = "pressure_level",
3973	},
3974#ifdef CONFIG_NUMA
3975	{
3976		.name = "numa_stat",
3977		.seq_show = memcg_numa_stat_show,
3978	},
3979#endif
3980	{
3981		.name = "kmem.limit_in_bytes",
3982		.private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
3983		.write = mem_cgroup_write,
3984		.read_u64 = mem_cgroup_read_u64,
3985	},
3986	{
3987		.name = "kmem.usage_in_bytes",
3988		.private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
3989		.read_u64 = mem_cgroup_read_u64,
3990	},
3991	{
3992		.name = "kmem.failcnt",
3993		.private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT),
3994		.write = mem_cgroup_reset,
3995		.read_u64 = mem_cgroup_read_u64,
3996	},
3997	{
3998		.name = "kmem.max_usage_in_bytes",
3999		.private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE),
4000		.write = mem_cgroup_reset,
4001		.read_u64 = mem_cgroup_read_u64,
4002	},
4003#ifdef CONFIG_SLABINFO
 
4004	{
4005		.name = "kmem.slabinfo",
4006		.seq_start = slab_start,
4007		.seq_next = slab_next,
4008		.seq_stop = slab_stop,
4009		.seq_show = memcg_slab_show,
4010	},
4011#endif
4012	{
4013		.name = "kmem.tcp.limit_in_bytes",
4014		.private = MEMFILE_PRIVATE(_TCP, RES_LIMIT),
4015		.write = mem_cgroup_write,
4016		.read_u64 = mem_cgroup_read_u64,
4017	},
4018	{
4019		.name = "kmem.tcp.usage_in_bytes",
4020		.private = MEMFILE_PRIVATE(_TCP, RES_USAGE),
4021		.read_u64 = mem_cgroup_read_u64,
4022	},
4023	{
4024		.name = "kmem.tcp.failcnt",
4025		.private = MEMFILE_PRIVATE(_TCP, RES_FAILCNT),
4026		.write = mem_cgroup_reset,
4027		.read_u64 = mem_cgroup_read_u64,
4028	},
4029	{
4030		.name = "kmem.tcp.max_usage_in_bytes",
4031		.private = MEMFILE_PRIVATE(_TCP, RES_MAX_USAGE),
4032		.write = mem_cgroup_reset,
4033		.read_u64 = mem_cgroup_read_u64,
4034	},
4035	{ },	/* terminate */
4036};
4037
4038static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4039{
4040	struct mem_cgroup_per_node *pn;
4041	struct mem_cgroup_per_zone *mz;
4042	int zone, tmp = node;
4043	/*
4044	 * This routine is called against possible nodes.
4045	 * But it's BUG to call kmalloc() against offline node.
4046	 *
4047	 * TODO: this routine can waste much memory for nodes which will
4048	 *       never be onlined. It's better to use memory hotplug callback
4049	 *       function.
4050	 */
4051	if (!node_state(node, N_NORMAL_MEMORY))
4052		tmp = -1;
4053	pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
4054	if (!pn)
4055		return 1;
4056
4057	for (zone = 0; zone < MAX_NR_ZONES; zone++) {
4058		mz = &pn->zoneinfo[zone];
4059		lruvec_init(&mz->lruvec);
4060		mz->usage_in_excess = 0;
4061		mz->on_tree = false;
4062		mz->memcg = memcg;
4063	}
 
 
 
 
4064	memcg->nodeinfo[node] = pn;
4065	return 0;
4066}
4067
4068static void free_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
4069{
4070	kfree(memcg->nodeinfo[node]);
 
 
 
 
 
 
4071}
4072
4073static void mem_cgroup_free(struct mem_cgroup *memcg)
4074{
4075	int node;
4076
4077	memcg_wb_domain_exit(memcg);
4078	for_each_node(node)
4079		free_mem_cgroup_per_zone_info(memcg, node);
4080	free_percpu(memcg->stat);
 
4081	kfree(memcg);
4082}
4083
 
 
 
 
 
 
 
4084static struct mem_cgroup *mem_cgroup_alloc(void)
4085{
4086	struct mem_cgroup *memcg;
4087	size_t size;
4088	int node;
 
 
4089
4090	size = sizeof(struct mem_cgroup);
4091	size += nr_node_ids * sizeof(struct mem_cgroup_per_node *);
4092
4093	memcg = kzalloc(size, GFP_KERNEL);
4094	if (!memcg)
4095		return NULL;
4096
4097	memcg->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
4098	if (!memcg->stat)
 
 
 
 
 
 
 
 
 
 
 
 
4099		goto fail;
4100
4101	for_each_node(node)
4102		if (alloc_mem_cgroup_per_zone_info(memcg, node))
4103			goto fail;
4104
4105	if (memcg_wb_domain_init(memcg, GFP_KERNEL))
4106		goto fail;
4107
4108	INIT_WORK(&memcg->high_work, high_work_func);
4109	memcg->last_scanned_node = MAX_NUMNODES;
4110	INIT_LIST_HEAD(&memcg->oom_notify);
4111	mutex_init(&memcg->thresholds_lock);
4112	spin_lock_init(&memcg->move_lock);
4113	vmpressure_init(&memcg->vmpressure);
4114	INIT_LIST_HEAD(&memcg->event_list);
4115	spin_lock_init(&memcg->event_list_lock);
4116	memcg->socket_pressure = jiffies;
4117#ifndef CONFIG_SLOB
4118	memcg->kmemcg_id = -1;
 
4119#endif
4120#ifdef CONFIG_CGROUP_WRITEBACK
4121	INIT_LIST_HEAD(&memcg->cgwb_list);
 
 
 
 
 
 
 
 
4122#endif
 
 
4123	return memcg;
4124fail:
4125	mem_cgroup_free(memcg);
4126	return NULL;
 
4127}
4128
4129static struct cgroup_subsys_state * __ref
4130mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
4131{
4132	struct mem_cgroup *parent = mem_cgroup_from_css(parent_css);
4133	struct mem_cgroup *memcg;
4134	long error = -ENOMEM;
4135
 
4136	memcg = mem_cgroup_alloc();
4137	if (!memcg)
4138		return ERR_PTR(error);
 
4139
4140	memcg->high = PAGE_COUNTER_MAX;
4141	memcg->soft_limit = PAGE_COUNTER_MAX;
 
 
 
 
4142	if (parent) {
4143		memcg->swappiness = mem_cgroup_swappiness(parent);
4144		memcg->oom_kill_disable = parent->oom_kill_disable;
4145	}
4146	if (parent && parent->use_hierarchy) {
4147		memcg->use_hierarchy = true;
4148		page_counter_init(&memcg->memory, &parent->memory);
4149		page_counter_init(&memcg->swap, &parent->swap);
4150		page_counter_init(&memcg->memsw, &parent->memsw);
4151		page_counter_init(&memcg->kmem, &parent->kmem);
4152		page_counter_init(&memcg->tcpmem, &parent->tcpmem);
4153	} else {
 
4154		page_counter_init(&memcg->memory, NULL);
4155		page_counter_init(&memcg->swap, NULL);
4156		page_counter_init(&memcg->memsw, NULL);
4157		page_counter_init(&memcg->kmem, NULL);
4158		page_counter_init(&memcg->tcpmem, NULL);
4159		/*
4160		 * Deeper hierachy with use_hierarchy == false doesn't make
4161		 * much sense so let cgroup subsystem know about this
4162		 * unfortunate state in our controller.
4163		 */
4164		if (parent != root_mem_cgroup)
4165			memory_cgrp_subsys.broken_hierarchy = true;
4166	}
4167
4168	/* The following stuff does not apply to the root */
4169	if (!parent) {
4170		root_mem_cgroup = memcg;
4171		return &memcg->css;
4172	}
4173
4174	error = memcg_online_kmem(memcg);
4175	if (error)
4176		goto fail;
4177
4178	if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
4179		static_branch_inc(&memcg_sockets_enabled_key);
4180
4181	return &memcg->css;
4182fail:
4183	mem_cgroup_free(memcg);
4184	return NULL;
4185}
4186
4187static int
4188mem_cgroup_css_online(struct cgroup_subsys_state *css)
4189{
4190	if (css->id > MEM_CGROUP_ID_MAX)
4191		return -ENOSPC;
 
 
4192
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4193	return 0;
 
 
 
 
 
4194}
4195
4196static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
4197{
4198	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4199	struct mem_cgroup_event *event, *tmp;
4200
4201	/*
4202	 * Unregister events and notify userspace.
4203	 * Notify userspace about cgroup removing only after rmdir of cgroup
4204	 * directory to avoid race between userspace and kernelspace.
4205	 */
4206	spin_lock(&memcg->event_list_lock);
4207	list_for_each_entry_safe(event, tmp, &memcg->event_list, list) {
4208		list_del_init(&event->list);
4209		schedule_work(&event->remove);
4210	}
4211	spin_unlock(&memcg->event_list_lock);
 
 
 
4212
4213	memcg_offline_kmem(memcg);
 
4214	wb_memcg_offline(memcg);
 
 
 
 
4215}
4216
4217static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
4218{
4219	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4220
4221	invalidate_reclaim_iterators(memcg);
4222}
4223
4224static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
4225{
4226	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
 
4227
 
 
 
 
4228	if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
4229		static_branch_dec(&memcg_sockets_enabled_key);
4230
4231	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active)
4232		static_branch_dec(&memcg_sockets_enabled_key);
4233
4234	vmpressure_cleanup(&memcg->vmpressure);
4235	cancel_work_sync(&memcg->high_work);
4236	mem_cgroup_remove_from_trees(memcg);
4237	memcg_free_kmem(memcg);
4238	mem_cgroup_free(memcg);
4239}
4240
4241/**
4242 * mem_cgroup_css_reset - reset the states of a mem_cgroup
4243 * @css: the target css
4244 *
4245 * Reset the states of the mem_cgroup associated with @css.  This is
4246 * invoked when the userland requests disabling on the default hierarchy
4247 * but the memcg is pinned through dependency.  The memcg should stop
4248 * applying policies and should revert to the vanilla state as it may be
4249 * made visible again.
4250 *
4251 * The current implementation only resets the essential configurations.
4252 * This needs to be expanded to cover all the visible parts.
4253 */
4254static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
4255{
4256	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4257
4258	page_counter_limit(&memcg->memory, PAGE_COUNTER_MAX);
4259	page_counter_limit(&memcg->swap, PAGE_COUNTER_MAX);
4260	page_counter_limit(&memcg->memsw, PAGE_COUNTER_MAX);
4261	page_counter_limit(&memcg->kmem, PAGE_COUNTER_MAX);
4262	page_counter_limit(&memcg->tcpmem, PAGE_COUNTER_MAX);
4263	memcg->low = 0;
4264	memcg->high = PAGE_COUNTER_MAX;
4265	memcg->soft_limit = PAGE_COUNTER_MAX;
 
4266	memcg_wb_domain_size_changed(memcg);
4267}
4268
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4269#ifdef CONFIG_MMU
4270/* Handlers for move charge at task migration. */
4271static int mem_cgroup_do_precharge(unsigned long count)
4272{
4273	int ret;
4274
4275	/* Try a single bulk charge without reclaim first, kswapd may wake */
4276	ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count);
4277	if (!ret) {
4278		mc.precharge += count;
4279		return ret;
4280	}
4281
4282	/* Try charges one by one with reclaim */
4283	while (count--) {
4284		ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_NORETRY, 1);
4285		if (ret)
4286			return ret;
4287		mc.precharge++;
4288		cond_resched();
4289	}
4290	return 0;
4291}
4292
4293/**
4294 * get_mctgt_type - get target type of moving charge
4295 * @vma: the vma the pte to be checked belongs
4296 * @addr: the address corresponding to the pte to be checked
4297 * @ptent: the pte to be checked
4298 * @target: the pointer the target page or swap ent will be stored(can be NULL)
4299 *
4300 * Returns
4301 *   0(MC_TARGET_NONE): if the pte is not a target for move charge.
4302 *   1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
4303 *     move charge. if @target is not NULL, the page is stored in target->page
4304 *     with extra refcnt got(Callers should handle it).
4305 *   2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
4306 *     target for charge migration. if @target is not NULL, the entry is stored
4307 *     in target->ent.
4308 *
4309 * Called with pte lock held.
4310 */
4311union mc_target {
4312	struct page	*page;
4313	swp_entry_t	ent;
4314};
4315
4316enum mc_target_type {
4317	MC_TARGET_NONE = 0,
4318	MC_TARGET_PAGE,
4319	MC_TARGET_SWAP,
 
4320};
4321
4322static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
4323						unsigned long addr, pte_t ptent)
4324{
4325	struct page *page = vm_normal_page(vma, addr, ptent);
4326
4327	if (!page || !page_mapped(page))
4328		return NULL;
4329	if (PageAnon(page)) {
4330		if (!(mc.flags & MOVE_ANON))
4331			return NULL;
4332	} else {
4333		if (!(mc.flags & MOVE_FILE))
4334			return NULL;
4335	}
4336	if (!get_page_unless_zero(page))
4337		return NULL;
4338
4339	return page;
4340}
4341
4342#ifdef CONFIG_SWAP
4343static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
4344			unsigned long addr, pte_t ptent, swp_entry_t *entry)
4345{
4346	struct page *page = NULL;
4347	swp_entry_t ent = pte_to_swp_entry(ptent);
4348
4349	if (!(mc.flags & MOVE_ANON) || non_swap_entry(ent))
4350		return NULL;
 
4351	/*
4352	 * Because lookup_swap_cache() updates some statistics counter,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4353	 * we call find_get_page() with swapper_space directly.
4354	 */
4355	page = find_get_page(swap_address_space(ent), ent.val);
4356	if (do_memsw_account())
4357		entry->val = ent.val;
4358
4359	return page;
4360}
4361#else
4362static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
4363			unsigned long addr, pte_t ptent, swp_entry_t *entry)
4364{
4365	return NULL;
4366}
4367#endif
4368
4369static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
4370			unsigned long addr, pte_t ptent, swp_entry_t *entry)
4371{
4372	struct page *page = NULL;
4373	struct address_space *mapping;
4374	pgoff_t pgoff;
4375
4376	if (!vma->vm_file) /* anonymous vma */
4377		return NULL;
4378	if (!(mc.flags & MOVE_FILE))
4379		return NULL;
4380
4381	mapping = vma->vm_file->f_mapping;
4382	pgoff = linear_page_index(vma, addr);
4383
4384	/* page is moved even if it's not RSS of this task(page-faulted). */
4385#ifdef CONFIG_SWAP
4386	/* shmem/tmpfs may report page out on swap: account for that too. */
4387	if (shmem_mapping(mapping)) {
4388		page = find_get_entry(mapping, pgoff);
4389		if (radix_tree_exceptional_entry(page)) {
4390			swp_entry_t swp = radix_to_swp_entry(page);
4391			if (do_memsw_account())
4392				*entry = swp;
4393			page = find_get_page(swap_address_space(swp), swp.val);
4394		}
4395	} else
4396		page = find_get_page(mapping, pgoff);
4397#else
4398	page = find_get_page(mapping, pgoff);
4399#endif
4400	return page;
4401}
4402
4403/**
4404 * mem_cgroup_move_account - move account of the page
4405 * @page: the page
4406 * @nr_pages: number of regular pages (>1 for huge pages)
4407 * @from: mem_cgroup which the page is moved from.
4408 * @to:	mem_cgroup which the page is moved to. @from != @to.
4409 *
4410 * The caller must make sure the page is not on LRU (isolate_page() is useful.)
4411 *
4412 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
4413 * from old cgroup.
4414 */
4415static int mem_cgroup_move_account(struct page *page,
4416				   bool compound,
4417				   struct mem_cgroup *from,
4418				   struct mem_cgroup *to)
4419{
4420	unsigned long flags;
4421	unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
4422	int ret;
4423	bool anon;
 
4424
4425	VM_BUG_ON(from == to);
4426	VM_BUG_ON_PAGE(PageLRU(page), page);
4427	VM_BUG_ON(compound && !PageTransHuge(page));
4428
4429	/*
4430	 * Prevent mem_cgroup_migrate() from looking at
4431	 * page->mem_cgroup of its source page while we change it.
4432	 */
4433	ret = -EBUSY;
4434	if (!trylock_page(page))
4435		goto out;
4436
4437	ret = -EINVAL;
4438	if (page->mem_cgroup != from)
4439		goto out_unlock;
4440
4441	anon = PageAnon(page);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4442
4443	spin_lock_irqsave(&from->move_lock, flags);
 
 
 
4444
4445	if (!anon && page_mapped(page)) {
4446		__this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED],
4447			       nr_pages);
4448		__this_cpu_add(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED],
4449			       nr_pages);
4450	}
4451
4452	/*
4453	 * move_lock grabbed above and caller set from->moving_account, so
4454	 * mem_cgroup_update_page_stat() will serialize updates to PageDirty.
4455	 * So mapping should be stable for dirty pages.
4456	 */
4457	if (!anon && PageDirty(page)) {
4458		struct address_space *mapping = page_mapping(page);
4459
4460		if (mapping_cap_account_dirty(mapping)) {
4461			__this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_DIRTY],
4462				       nr_pages);
4463			__this_cpu_add(to->stat->count[MEM_CGROUP_STAT_DIRTY],
4464				       nr_pages);
 
4465		}
4466	}
4467
4468	if (PageWriteback(page)) {
4469		__this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_WRITEBACK],
4470			       nr_pages);
4471		__this_cpu_add(to->stat->count[MEM_CGROUP_STAT_WRITEBACK],
4472			       nr_pages);
 
 
 
 
4473	}
4474
4475	/*
4476	 * It is safe to change page->mem_cgroup here because the page
4477	 * is referenced, charged, and isolated - we can't race with
4478	 * uncharging, charging, migration, or LRU putback.
 
 
 
 
 
 
 
 
4479	 */
 
 
 
 
4480
4481	/* caller should have done css_get */
4482	page->mem_cgroup = to;
4483	spin_unlock_irqrestore(&from->move_lock, flags);
4484
4485	ret = 0;
 
4486
4487	local_irq_disable();
4488	mem_cgroup_charge_statistics(to, page, compound, nr_pages);
4489	memcg_check_events(to, page);
4490	mem_cgroup_charge_statistics(from, page, compound, -nr_pages);
4491	memcg_check_events(from, page);
4492	local_irq_enable();
4493out_unlock:
4494	unlock_page(page);
4495out:
4496	return ret;
4497}
4498
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4499static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
4500		unsigned long addr, pte_t ptent, union mc_target *target)
4501{
4502	struct page *page = NULL;
4503	enum mc_target_type ret = MC_TARGET_NONE;
4504	swp_entry_t ent = { .val = 0 };
4505
4506	if (pte_present(ptent))
4507		page = mc_handle_present_pte(vma, addr, ptent);
 
 
 
 
 
 
4508	else if (is_swap_pte(ptent))
4509		page = mc_handle_swap_pte(vma, addr, ptent, &ent);
4510	else if (pte_none(ptent))
4511		page = mc_handle_file_pte(vma, addr, ptent, &ent);
4512
4513	if (!page && !ent.val)
4514		return ret;
4515	if (page) {
4516		/*
4517		 * Do only loose check w/o serialization.
4518		 * mem_cgroup_move_account() checks the page is valid or
4519		 * not under LRU exclusion.
4520		 */
4521		if (page->mem_cgroup == mc.from) {
4522			ret = MC_TARGET_PAGE;
 
 
 
4523			if (target)
4524				target->page = page;
4525		}
4526		if (!ret || !target)
4527			put_page(page);
4528	}
4529	/* There is a swap entry and a page doesn't exist or isn't charged */
4530	if (ent.val && !ret &&
 
 
 
4531	    mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) {
4532		ret = MC_TARGET_SWAP;
4533		if (target)
4534			target->ent = ent;
4535	}
4536	return ret;
4537}
4538
4539#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4540/*
4541 * We don't consider swapping or file mapped pages because THP does not
4542 * support them for now.
4543 * Caller should make sure that pmd_trans_huge(pmd) is true.
4544 */
4545static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
4546		unsigned long addr, pmd_t pmd, union mc_target *target)
4547{
4548	struct page *page = NULL;
4549	enum mc_target_type ret = MC_TARGET_NONE;
4550
 
 
 
 
 
4551	page = pmd_page(pmd);
4552	VM_BUG_ON_PAGE(!page || !PageHead(page), page);
4553	if (!(mc.flags & MOVE_ANON))
4554		return ret;
4555	if (page->mem_cgroup == mc.from) {
4556		ret = MC_TARGET_PAGE;
4557		if (target) {
4558			get_page(page);
4559			target->page = page;
4560		}
4561	}
4562	return ret;
4563}
4564#else
4565static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
4566		unsigned long addr, pmd_t pmd, union mc_target *target)
4567{
4568	return MC_TARGET_NONE;
4569}
4570#endif
4571
4572static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
4573					unsigned long addr, unsigned long end,
4574					struct mm_walk *walk)
4575{
4576	struct vm_area_struct *vma = walk->vma;
4577	pte_t *pte;
4578	spinlock_t *ptl;
4579
4580	ptl = pmd_trans_huge_lock(pmd, vma);
4581	if (ptl) {
 
 
 
 
 
4582		if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
4583			mc.precharge += HPAGE_PMD_NR;
4584		spin_unlock(ptl);
4585		return 0;
4586	}
4587
4588	if (pmd_trans_unstable(pmd))
4589		return 0;
4590	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
4591	for (; addr != end; pte++, addr += PAGE_SIZE)
4592		if (get_mctgt_type(vma, addr, *pte, NULL))
4593			mc.precharge++;	/* increment precharge temporarily */
4594	pte_unmap_unlock(pte - 1, ptl);
4595	cond_resched();
4596
4597	return 0;
4598}
4599
 
 
 
 
4600static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
4601{
4602	unsigned long precharge;
4603
4604	struct mm_walk mem_cgroup_count_precharge_walk = {
4605		.pmd_entry = mem_cgroup_count_precharge_pte_range,
4606		.mm = mm,
4607	};
4608	down_read(&mm->mmap_sem);
4609	walk_page_range(0, ~0UL, &mem_cgroup_count_precharge_walk);
4610	up_read(&mm->mmap_sem);
4611
4612	precharge = mc.precharge;
4613	mc.precharge = 0;
4614
4615	return precharge;
4616}
4617
4618static int mem_cgroup_precharge_mc(struct mm_struct *mm)
4619{
4620	unsigned long precharge = mem_cgroup_count_precharge(mm);
4621
4622	VM_BUG_ON(mc.moving_task);
4623	mc.moving_task = current;
4624	return mem_cgroup_do_precharge(precharge);
4625}
4626
4627/* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
4628static void __mem_cgroup_clear_mc(void)
4629{
4630	struct mem_cgroup *from = mc.from;
4631	struct mem_cgroup *to = mc.to;
4632
4633	/* we must uncharge all the leftover precharges from mc.to */
4634	if (mc.precharge) {
4635		cancel_charge(mc.to, mc.precharge);
4636		mc.precharge = 0;
4637	}
4638	/*
4639	 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
4640	 * we must uncharge here.
4641	 */
4642	if (mc.moved_charge) {
4643		cancel_charge(mc.from, mc.moved_charge);
4644		mc.moved_charge = 0;
4645	}
4646	/* we must fixup refcnts and charges */
4647	if (mc.moved_swap) {
4648		/* uncharge swap account from the old cgroup */
4649		if (!mem_cgroup_is_root(mc.from))
4650			page_counter_uncharge(&mc.from->memsw, mc.moved_swap);
4651
 
 
4652		/*
4653		 * we charged both to->memory and to->memsw, so we
4654		 * should uncharge to->memory.
4655		 */
4656		if (!mem_cgroup_is_root(mc.to))
4657			page_counter_uncharge(&mc.to->memory, mc.moved_swap);
4658
4659		css_put_many(&mc.from->css, mc.moved_swap);
4660
4661		/* we've already done css_get(mc.to) */
4662		mc.moved_swap = 0;
4663	}
4664	memcg_oom_recover(from);
4665	memcg_oom_recover(to);
4666	wake_up_all(&mc.waitq);
4667}
4668
4669static void mem_cgroup_clear_mc(void)
4670{
4671	struct mm_struct *mm = mc.mm;
4672
4673	/*
4674	 * we must clear moving_task before waking up waiters at the end of
4675	 * task migration.
4676	 */
4677	mc.moving_task = NULL;
4678	__mem_cgroup_clear_mc();
4679	spin_lock(&mc.lock);
4680	mc.from = NULL;
4681	mc.to = NULL;
4682	mc.mm = NULL;
4683	spin_unlock(&mc.lock);
4684
4685	mmput(mm);
4686}
4687
4688static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
4689{
4690	struct cgroup_subsys_state *css;
4691	struct mem_cgroup *memcg = NULL; /* unneeded init to make gcc happy */
4692	struct mem_cgroup *from;
4693	struct task_struct *leader, *p;
4694	struct mm_struct *mm;
4695	unsigned long move_flags;
4696	int ret = 0;
4697
4698	/* charge immigration isn't supported on the default hierarchy */
4699	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
4700		return 0;
4701
4702	/*
4703	 * Multi-process migrations only happen on the default hierarchy
4704	 * where charge immigration is not used.  Perform charge
4705	 * immigration if @tset contains a leader and whine if there are
4706	 * multiple.
4707	 */
4708	p = NULL;
4709	cgroup_taskset_for_each_leader(leader, css, tset) {
4710		WARN_ON_ONCE(p);
4711		p = leader;
4712		memcg = mem_cgroup_from_css(css);
4713	}
4714	if (!p)
4715		return 0;
4716
4717	/*
4718	 * We are now commited to this value whatever it is. Changes in this
4719	 * tunable will only affect upcoming migrations, not the current one.
4720	 * So we need to save it, and keep it going.
4721	 */
4722	move_flags = READ_ONCE(memcg->move_charge_at_immigrate);
4723	if (!move_flags)
4724		return 0;
4725
4726	from = mem_cgroup_from_task(p);
4727
4728	VM_BUG_ON(from == memcg);
4729
4730	mm = get_task_mm(p);
4731	if (!mm)
4732		return 0;
4733	/* We move charges only when we move a owner of the mm */
4734	if (mm->owner == p) {
4735		VM_BUG_ON(mc.from);
4736		VM_BUG_ON(mc.to);
4737		VM_BUG_ON(mc.precharge);
4738		VM_BUG_ON(mc.moved_charge);
4739		VM_BUG_ON(mc.moved_swap);
4740
4741		spin_lock(&mc.lock);
4742		mc.mm = mm;
4743		mc.from = from;
4744		mc.to = memcg;
4745		mc.flags = move_flags;
4746		spin_unlock(&mc.lock);
4747		/* We set mc.moving_task later */
4748
4749		ret = mem_cgroup_precharge_mc(mm);
4750		if (ret)
4751			mem_cgroup_clear_mc();
4752	} else {
4753		mmput(mm);
4754	}
4755	return ret;
4756}
4757
4758static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
4759{
4760	if (mc.to)
4761		mem_cgroup_clear_mc();
4762}
4763
4764static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
4765				unsigned long addr, unsigned long end,
4766				struct mm_walk *walk)
4767{
4768	int ret = 0;
4769	struct vm_area_struct *vma = walk->vma;
4770	pte_t *pte;
4771	spinlock_t *ptl;
4772	enum mc_target_type target_type;
4773	union mc_target target;
4774	struct page *page;
4775
4776	ptl = pmd_trans_huge_lock(pmd, vma);
4777	if (ptl) {
4778		if (mc.precharge < HPAGE_PMD_NR) {
4779			spin_unlock(ptl);
4780			return 0;
4781		}
4782		target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
4783		if (target_type == MC_TARGET_PAGE) {
4784			page = target.page;
4785			if (!isolate_lru_page(page)) {
4786				if (!mem_cgroup_move_account(page, true,
4787							     mc.from, mc.to)) {
4788					mc.precharge -= HPAGE_PMD_NR;
4789					mc.moved_charge += HPAGE_PMD_NR;
4790				}
4791				putback_lru_page(page);
4792			}
4793			put_page(page);
 
 
 
 
 
 
 
 
4794		}
4795		spin_unlock(ptl);
4796		return 0;
4797	}
4798
4799	if (pmd_trans_unstable(pmd))
4800		return 0;
4801retry:
4802	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
4803	for (; addr != end; addr += PAGE_SIZE) {
4804		pte_t ptent = *(pte++);
 
4805		swp_entry_t ent;
4806
4807		if (!mc.precharge)
4808			break;
4809
4810		switch (get_mctgt_type(vma, addr, ptent, &target)) {
 
 
 
4811		case MC_TARGET_PAGE:
4812			page = target.page;
4813			/*
4814			 * We can have a part of the split pmd here. Moving it
4815			 * can be done but it would be too convoluted so simply
4816			 * ignore such a partial THP and keep it in original
4817			 * memcg. There should be somebody mapping the head.
4818			 */
4819			if (PageTransCompound(page))
4820				goto put;
4821			if (isolate_lru_page(page))
4822				goto put;
4823			if (!mem_cgroup_move_account(page, false,
4824						mc.from, mc.to)) {
4825				mc.precharge--;
4826				/* we uncharge from mc.from later. */
4827				mc.moved_charge++;
4828			}
4829			putback_lru_page(page);
 
4830put:			/* get_mctgt_type() gets the page */
4831			put_page(page);
4832			break;
4833		case MC_TARGET_SWAP:
4834			ent = target.ent;
4835			if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
4836				mc.precharge--;
4837				/* we fixup refcnts and charges later. */
 
4838				mc.moved_swap++;
4839			}
4840			break;
4841		default:
4842			break;
4843		}
4844	}
4845	pte_unmap_unlock(pte - 1, ptl);
4846	cond_resched();
4847
4848	if (addr != end) {
4849		/*
4850		 * We have consumed all precharges we got in can_attach().
4851		 * We try charge one by one, but don't do any additional
4852		 * charges to mc.to if we have failed in charge once in attach()
4853		 * phase.
4854		 */
4855		ret = mem_cgroup_do_precharge(1);
4856		if (!ret)
4857			goto retry;
4858	}
4859
4860	return ret;
4861}
4862
 
 
 
 
4863static void mem_cgroup_move_charge(void)
4864{
4865	struct mm_walk mem_cgroup_move_charge_walk = {
4866		.pmd_entry = mem_cgroup_move_charge_pte_range,
4867		.mm = mc.mm,
4868	};
4869
4870	lru_add_drain_all();
4871	/*
4872	 * Signal lock_page_memcg() to take the memcg's move_lock
4873	 * while we're moving its pages to another memcg. Then wait
4874	 * for already started RCU-only updates to finish.
4875	 */
4876	atomic_inc(&mc.from->moving_account);
4877	synchronize_rcu();
4878retry:
4879	if (unlikely(!down_read_trylock(&mc.mm->mmap_sem))) {
4880		/*
4881		 * Someone who are holding the mmap_sem might be waiting in
4882		 * waitq. So we cancel all extra charges, wake up all waiters,
4883		 * and retry. Because we cancel precharges, we might not be able
4884		 * to move enough charges, but moving charge is a best-effort
4885		 * feature anyway, so it wouldn't be a big problem.
4886		 */
4887		__mem_cgroup_clear_mc();
4888		cond_resched();
4889		goto retry;
4890	}
4891	/*
4892	 * When we have consumed all precharges and failed in doing
4893	 * additional charge, the page walk just aborts.
4894	 */
4895	walk_page_range(0, ~0UL, &mem_cgroup_move_charge_walk);
4896	up_read(&mc.mm->mmap_sem);
4897	atomic_dec(&mc.from->moving_account);
4898}
4899
4900static void mem_cgroup_move_task(void)
4901{
4902	if (mc.to) {
4903		mem_cgroup_move_charge();
4904		mem_cgroup_clear_mc();
4905	}
4906}
4907#else	/* !CONFIG_MMU */
4908static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
4909{
4910	return 0;
4911}
4912static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
4913{
4914}
4915static void mem_cgroup_move_task(void)
4916{
4917}
4918#endif
4919
4920/*
4921 * Cgroup retains root cgroups across [un]mount cycles making it necessary
4922 * to verify whether we're attached to the default hierarchy on each mount
4923 * attempt.
4924 */
4925static void mem_cgroup_bind(struct cgroup_subsys_state *root_css)
4926{
4927	/*
4928	 * use_hierarchy is forced on the default hierarchy.  cgroup core
4929	 * guarantees that @root doesn't have any children, so turning it
4930	 * on for the root memcg is enough.
4931	 */
4932	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
4933		root_mem_cgroup->use_hierarchy = true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4934	else
4935		root_mem_cgroup->use_hierarchy = false;
 
 
4936}
4937
4938static u64 memory_current_read(struct cgroup_subsys_state *css,
4939			       struct cftype *cft)
4940{
4941	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4942
4943	return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE;
4944}
4945
4946static int memory_low_show(struct seq_file *m, void *v)
 
4947{
4948	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
4949	unsigned long low = READ_ONCE(memcg->low);
4950
4951	if (low == PAGE_COUNTER_MAX)
4952		seq_puts(m, "max\n");
4953	else
4954		seq_printf(m, "%llu\n", (u64)low * PAGE_SIZE);
4955
4956	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4957}
4958
4959static ssize_t memory_low_write(struct kernfs_open_file *of,
4960				char *buf, size_t nbytes, loff_t off)
4961{
4962	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4963	unsigned long low;
4964	int err;
4965
4966	buf = strstrip(buf);
4967	err = page_counter_memparse(buf, "max", &low);
4968	if (err)
4969		return err;
4970
4971	memcg->low = low;
4972
4973	return nbytes;
4974}
4975
4976static int memory_high_show(struct seq_file *m, void *v)
4977{
4978	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
4979	unsigned long high = READ_ONCE(memcg->high);
4980
4981	if (high == PAGE_COUNTER_MAX)
4982		seq_puts(m, "max\n");
4983	else
4984		seq_printf(m, "%llu\n", (u64)high * PAGE_SIZE);
4985
4986	return 0;
4987}
4988
4989static ssize_t memory_high_write(struct kernfs_open_file *of,
4990				 char *buf, size_t nbytes, loff_t off)
4991{
4992	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4993	unsigned long nr_pages;
 
4994	unsigned long high;
4995	int err;
4996
4997	buf = strstrip(buf);
4998	err = page_counter_memparse(buf, "max", &high);
4999	if (err)
5000		return err;
5001
5002	memcg->high = high;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5003
5004	nr_pages = page_counter_read(&memcg->memory);
5005	if (nr_pages > high)
5006		try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
5007					     GFP_KERNEL, true);
 
 
5008
5009	memcg_wb_domain_size_changed(memcg);
5010	return nbytes;
5011}
5012
5013static int memory_max_show(struct seq_file *m, void *v)
5014{
5015	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5016	unsigned long max = READ_ONCE(memcg->memory.limit);
5017
5018	if (max == PAGE_COUNTER_MAX)
5019		seq_puts(m, "max\n");
5020	else
5021		seq_printf(m, "%llu\n", (u64)max * PAGE_SIZE);
5022
5023	return 0;
5024}
5025
5026static ssize_t memory_max_write(struct kernfs_open_file *of,
5027				char *buf, size_t nbytes, loff_t off)
5028{
5029	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5030	unsigned int nr_reclaims = MEM_CGROUP_RECLAIM_RETRIES;
5031	bool drained = false;
5032	unsigned long max;
5033	int err;
5034
5035	buf = strstrip(buf);
5036	err = page_counter_memparse(buf, "max", &max);
5037	if (err)
5038		return err;
5039
5040	xchg(&memcg->memory.limit, max);
5041
5042	for (;;) {
5043		unsigned long nr_pages = page_counter_read(&memcg->memory);
5044
5045		if (nr_pages <= max)
5046			break;
5047
5048		if (signal_pending(current)) {
5049			err = -EINTR;
5050			break;
5051		}
5052
5053		if (!drained) {
5054			drain_all_stock(memcg);
5055			drained = true;
5056			continue;
5057		}
5058
5059		if (nr_reclaims) {
5060			if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max,
5061							  GFP_KERNEL, true))
5062				nr_reclaims--;
5063			continue;
5064		}
5065
5066		mem_cgroup_events(memcg, MEMCG_OOM, 1);
5067		if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
5068			break;
5069	}
5070
5071	memcg_wb_domain_size_changed(memcg);
5072	return nbytes;
5073}
5074
 
 
 
 
 
 
 
 
 
 
 
 
5075static int memory_events_show(struct seq_file *m, void *v)
5076{
5077	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5078
5079	seq_printf(m, "low %lu\n", mem_cgroup_read_events(memcg, MEMCG_LOW));
5080	seq_printf(m, "high %lu\n", mem_cgroup_read_events(memcg, MEMCG_HIGH));
5081	seq_printf(m, "max %lu\n", mem_cgroup_read_events(memcg, MEMCG_MAX));
5082	seq_printf(m, "oom %lu\n", mem_cgroup_read_events(memcg, MEMCG_OOM));
 
 
 
5083
 
5084	return 0;
5085}
5086
5087static int memory_stat_show(struct seq_file *m, void *v)
5088{
5089	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5090	unsigned long stat[MEMCG_NR_STAT];
5091	unsigned long events[MEMCG_NR_EVENTS];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5092	int i;
 
5093
5094	/*
5095	 * Provide statistics on the state of the memory subsystem as
5096	 * well as cumulative event counters that show past behavior.
5097	 *
5098	 * This list is ordered following a combination of these gradients:
5099	 * 1) generic big picture -> specifics and details
5100	 * 2) reflecting userspace activity -> reflecting kernel heuristics
5101	 *
5102	 * Current memory state:
5103	 */
5104
5105	tree_stat(memcg, stat);
5106	tree_events(memcg, events);
5107
5108	seq_printf(m, "anon %llu\n",
5109		   (u64)stat[MEM_CGROUP_STAT_RSS] * PAGE_SIZE);
5110	seq_printf(m, "file %llu\n",
5111		   (u64)stat[MEM_CGROUP_STAT_CACHE] * PAGE_SIZE);
5112	seq_printf(m, "kernel_stack %llu\n",
5113		   (u64)stat[MEMCG_KERNEL_STACK] * PAGE_SIZE);
5114	seq_printf(m, "slab %llu\n",
5115		   (u64)(stat[MEMCG_SLAB_RECLAIMABLE] +
5116			 stat[MEMCG_SLAB_UNRECLAIMABLE]) * PAGE_SIZE);
5117	seq_printf(m, "sock %llu\n",
5118		   (u64)stat[MEMCG_SOCK] * PAGE_SIZE);
5119
5120	seq_printf(m, "file_mapped %llu\n",
5121		   (u64)stat[MEM_CGROUP_STAT_FILE_MAPPED] * PAGE_SIZE);
5122	seq_printf(m, "file_dirty %llu\n",
5123		   (u64)stat[MEM_CGROUP_STAT_DIRTY] * PAGE_SIZE);
5124	seq_printf(m, "file_writeback %llu\n",
5125		   (u64)stat[MEM_CGROUP_STAT_WRITEBACK] * PAGE_SIZE);
5126
5127	for (i = 0; i < NR_LRU_LISTS; i++) {
5128		struct mem_cgroup *mi;
5129		unsigned long val = 0;
5130
5131		for_each_mem_cgroup_tree(mi, memcg)
5132			val += mem_cgroup_nr_lru_pages(mi, BIT(i));
5133		seq_printf(m, "%s %llu\n",
5134			   mem_cgroup_lru_names[i], (u64)val * PAGE_SIZE);
5135	}
5136
5137	seq_printf(m, "slab_reclaimable %llu\n",
5138		   (u64)stat[MEMCG_SLAB_RECLAIMABLE] * PAGE_SIZE);
5139	seq_printf(m, "slab_unreclaimable %llu\n",
5140		   (u64)stat[MEMCG_SLAB_UNRECLAIMABLE] * PAGE_SIZE);
5141
5142	/* Accumulated memory events */
 
 
 
5143
5144	seq_printf(m, "pgfault %lu\n",
5145		   events[MEM_CGROUP_EVENTS_PGFAULT]);
5146	seq_printf(m, "pgmajfault %lu\n",
5147		   events[MEM_CGROUP_EVENTS_PGMAJFAULT]);
 
 
 
5148
5149	return 0;
5150}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5151
5152static struct cftype memory_files[] = {
5153	{
5154		.name = "current",
5155		.flags = CFTYPE_NOT_ON_ROOT,
5156		.read_u64 = memory_current_read,
5157	},
5158	{
 
 
 
 
 
 
 
 
 
 
 
5159		.name = "low",
5160		.flags = CFTYPE_NOT_ON_ROOT,
5161		.seq_show = memory_low_show,
5162		.write = memory_low_write,
5163	},
5164	{
5165		.name = "high",
5166		.flags = CFTYPE_NOT_ON_ROOT,
5167		.seq_show = memory_high_show,
5168		.write = memory_high_write,
5169	},
5170	{
5171		.name = "max",
5172		.flags = CFTYPE_NOT_ON_ROOT,
5173		.seq_show = memory_max_show,
5174		.write = memory_max_write,
5175	},
5176	{
5177		.name = "events",
5178		.flags = CFTYPE_NOT_ON_ROOT,
5179		.file_offset = offsetof(struct mem_cgroup, events_file),
5180		.seq_show = memory_events_show,
5181	},
5182	{
5183		.name = "stat",
5184		.flags = CFTYPE_NOT_ON_ROOT,
 
 
 
 
 
5185		.seq_show = memory_stat_show,
5186	},
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5187	{ }	/* terminate */
5188};
5189
5190struct cgroup_subsys memory_cgrp_subsys = {
5191	.css_alloc = mem_cgroup_css_alloc,
5192	.css_online = mem_cgroup_css_online,
5193	.css_offline = mem_cgroup_css_offline,
5194	.css_released = mem_cgroup_css_released,
5195	.css_free = mem_cgroup_css_free,
5196	.css_reset = mem_cgroup_css_reset,
 
5197	.can_attach = mem_cgroup_can_attach,
 
5198	.cancel_attach = mem_cgroup_cancel_attach,
5199	.post_attach = mem_cgroup_move_task,
5200	.bind = mem_cgroup_bind,
5201	.dfl_cftypes = memory_files,
5202	.legacy_cftypes = mem_cgroup_legacy_files,
5203	.early_init = 0,
5204};
5205
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5206/**
5207 * mem_cgroup_low - check if memory consumption is below the normal range
5208 * @root: the highest ancestor to consider
5209 * @memcg: the memory cgroup to check
5210 *
5211 * Returns %true if memory consumption of @memcg, and that of all
5212 * configurable ancestors up to @root, is below the normal range.
5213 */
5214bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg)
 
5215{
 
 
 
5216	if (mem_cgroup_disabled())
5217		return false;
 
 
 
5218
5219	/*
5220	 * The toplevel group doesn't have a configurable range, so
5221	 * it's never low when looked at directly, and it is not
5222	 * considered an ancestor when assessing the hierarchy.
 
 
5223	 */
 
 
5224
5225	if (memcg == root_mem_cgroup)
5226		return false;
 
5227
5228	if (page_counter_read(&memcg->memory) >= memcg->low)
5229		return false;
5230
5231	while (memcg != root) {
5232		memcg = parent_mem_cgroup(memcg);
 
 
 
5233
5234		if (memcg == root_mem_cgroup)
5235			break;
5236
5237		if (page_counter_read(&memcg->memory) >= memcg->low)
5238			return false;
5239	}
5240	return true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5241}
5242
5243/**
5244 * mem_cgroup_try_charge - try charging a page
5245 * @page: page to charge
5246 * @mm: mm context of the victim
5247 * @gfp_mask: reclaim mode
5248 * @memcgp: charged memcg return
5249 *
5250 * Try to charge @page to the memcg that @mm belongs to, reclaiming
5251 * pages according to @gfp_mask if necessary.
5252 *
5253 * Returns 0 on success, with *@memcgp pointing to the charged memcg.
5254 * Otherwise, an error code is returned.
5255 *
5256 * After page->mapping has been set up, the caller must finalize the
5257 * charge with mem_cgroup_commit_charge().  Or abort the transaction
5258 * with mem_cgroup_cancel_charge() in case page instantiation fails.
5259 */
5260int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
5261			  gfp_t gfp_mask, struct mem_cgroup **memcgp,
5262			  bool compound)
5263{
5264	struct mem_cgroup *memcg = NULL;
5265	unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
5266	int ret = 0;
5267
5268	if (mem_cgroup_disabled())
5269		goto out;
5270
5271	if (PageSwapCache(page)) {
5272		/*
5273		 * Every swap fault against a single page tries to charge the
5274		 * page, bail as early as possible.  shmem_unuse() encounters
5275		 * already charged pages, too.  The USED bit is protected by
5276		 * the page lock, which serializes swap cache removal, which
5277		 * in turn serializes uncharging.
5278		 */
5279		VM_BUG_ON_PAGE(!PageLocked(page), page);
5280		if (page->mem_cgroup)
5281			goto out;
5282
5283		if (do_swap_account) {
5284			swp_entry_t ent = { .val = page_private(page), };
5285			unsigned short id = lookup_swap_cgroup_id(ent);
5286
5287			rcu_read_lock();
5288			memcg = mem_cgroup_from_id(id);
5289			if (memcg && !css_tryget_online(&memcg->css))
5290				memcg = NULL;
5291			rcu_read_unlock();
5292		}
5293	}
5294
5295	if (!memcg)
 
 
 
5296		memcg = get_mem_cgroup_from_mm(mm);
 
5297
5298	ret = try_charge(memcg, gfp_mask, nr_pages);
5299
5300	css_put(&memcg->css);
5301out:
5302	*memcgp = memcg;
5303	return ret;
5304}
5305
5306/**
5307 * mem_cgroup_commit_charge - commit a page charge
5308 * @page: page to charge
5309 * @memcg: memcg to charge the page to
5310 * @lrucare: page might be on LRU already
5311 *
5312 * Finalize a charge transaction started by mem_cgroup_try_charge(),
5313 * after page->mapping has been set up.  This must happen atomically
5314 * as part of the page instantiation, i.e. under the page table lock
5315 * for anonymous pages, under the page lock for page and swap cache.
5316 *
5317 * In addition, the page must not be on the LRU during the commit, to
5318 * prevent racing with task migration.  If it might be, use @lrucare.
5319 *
5320 * Use mem_cgroup_cancel_charge() to cancel the transaction instead.
 
5321 */
5322void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
5323			      bool lrucare, bool compound)
5324{
5325	unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
5326
5327	VM_BUG_ON_PAGE(!page->mapping, page);
5328	VM_BUG_ON_PAGE(PageLRU(page) && !lrucare, page);
5329
5330	if (mem_cgroup_disabled())
5331		return;
5332	/*
5333	 * Swap faults will attempt to charge the same page multiple
5334	 * times.  But reuse_swap_page() might have removed the page
5335	 * from swapcache already, so we can't check PageSwapCache().
 
 
 
 
 
 
 
5336	 */
5337	if (!memcg)
5338		return;
5339
5340	commit_charge(page, memcg, lrucare);
5341
5342	local_irq_disable();
5343	mem_cgroup_charge_statistics(memcg, page, compound, nr_pages);
5344	memcg_check_events(memcg, page);
5345	local_irq_enable();
5346
5347	if (do_memsw_account() && PageSwapCache(page)) {
5348		swp_entry_t entry = { .val = page_private(page) };
5349		/*
5350		 * The swap entry might not get freed for a long time,
5351		 * let's not wait for it.  The page already received a
5352		 * memory+swap charge, drop the swap entry duplicate.
5353		 */
5354		mem_cgroup_uncharge_swap(entry);
5355	}
5356}
5357
5358/**
5359 * mem_cgroup_cancel_charge - cancel a page charge
5360 * @page: page to charge
5361 * @memcg: memcg to charge the page to
5362 *
5363 * Cancel a charge transaction started by mem_cgroup_try_charge().
5364 */
5365void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg,
5366		bool compound)
5367{
5368	unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
5369
5370	if (mem_cgroup_disabled())
5371		return;
5372	/*
5373	 * Swap faults will attempt to charge the same page multiple
5374	 * times.  But reuse_swap_page() might have removed the page
5375	 * from swapcache already, so we can't check PageSwapCache().
5376	 */
5377	if (!memcg)
5378		return;
5379
5380	cancel_charge(memcg, nr_pages);
 
 
5381}
5382
5383static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout,
5384			   unsigned long nr_anon, unsigned long nr_file,
5385			   unsigned long nr_huge, struct page *dummy_page)
5386{
5387	unsigned long nr_pages = nr_anon + nr_file;
5388	unsigned long flags;
5389
5390	if (!mem_cgroup_is_root(memcg)) {
5391		page_counter_uncharge(&memcg->memory, nr_pages);
5392		if (do_memsw_account())
5393			page_counter_uncharge(&memcg->memsw, nr_pages);
5394		memcg_oom_recover(memcg);
 
 
5395	}
5396
5397	local_irq_save(flags);
5398	__this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS], nr_anon);
5399	__this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_CACHE], nr_file);
5400	__this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], nr_huge);
5401	__this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT], pgpgout);
5402	__this_cpu_add(memcg->stat->nr_page_events, nr_pages);
5403	memcg_check_events(memcg, dummy_page);
5404	local_irq_restore(flags);
5405
5406	if (!mem_cgroup_is_root(memcg))
5407		css_put_many(&memcg->css, nr_pages);
5408}
5409
5410static void uncharge_list(struct list_head *page_list)
5411{
5412	struct mem_cgroup *memcg = NULL;
5413	unsigned long nr_anon = 0;
5414	unsigned long nr_file = 0;
5415	unsigned long nr_huge = 0;
5416	unsigned long pgpgout = 0;
5417	struct list_head *next;
5418	struct page *page;
5419
5420	/*
5421	 * Note that the list can be a single page->lru; hence the
5422	 * do-while loop instead of a simple list_for_each_entry().
 
5423	 */
5424	next = page_list->next;
5425	do {
5426		unsigned int nr_pages = 1;
5427
5428		page = list_entry(next, struct page, lru);
5429		next = page->lru.next;
5430
5431		VM_BUG_ON_PAGE(PageLRU(page), page);
5432		VM_BUG_ON_PAGE(page_count(page), page);
5433
5434		if (!page->mem_cgroup)
5435			continue;
5436
5437		/*
5438		 * Nobody should be changing or seriously looking at
5439		 * page->mem_cgroup at this point, we have fully
5440		 * exclusive access to the page.
5441		 */
 
 
 
 
5442
5443		if (memcg != page->mem_cgroup) {
5444			if (memcg) {
5445				uncharge_batch(memcg, pgpgout, nr_anon, nr_file,
5446					       nr_huge, page);
5447				pgpgout = nr_anon = nr_file = nr_huge = 0;
5448			}
5449			memcg = page->mem_cgroup;
5450		}
5451
5452		if (PageTransHuge(page)) {
5453			nr_pages <<= compound_order(page);
5454			VM_BUG_ON_PAGE(!PageTransHuge(page), page);
5455			nr_huge += nr_pages;
5456		}
 
 
5457
5458		if (PageAnon(page))
5459			nr_anon += nr_pages;
5460		else
5461			nr_file += nr_pages;
5462
5463		page->mem_cgroup = NULL;
5464
5465		pgpgout++;
5466	} while (next != page_list);
 
5467
5468	if (memcg)
5469		uncharge_batch(memcg, pgpgout, nr_anon, nr_file,
5470			       nr_huge, page);
 
 
 
 
 
 
 
 
 
5471}
5472
5473/**
5474 * mem_cgroup_uncharge - uncharge a page
5475 * @page: page to uncharge
5476 *
5477 * Uncharge a page previously charged with mem_cgroup_try_charge() and
5478 * mem_cgroup_commit_charge().
5479 */
5480void mem_cgroup_uncharge(struct page *page)
5481{
5482	if (mem_cgroup_disabled())
5483		return;
5484
5485	/* Don't touch page->lru of any random page, pre-check: */
5486	if (!page->mem_cgroup)
5487		return;
5488
5489	INIT_LIST_HEAD(&page->lru);
5490	uncharge_list(&page->lru);
 
5491}
5492
5493/**
5494 * mem_cgroup_uncharge_list - uncharge a list of page
5495 * @page_list: list of pages to uncharge
5496 *
5497 * Uncharge a list of pages previously charged with
5498 * mem_cgroup_try_charge() and mem_cgroup_commit_charge().
5499 */
5500void mem_cgroup_uncharge_list(struct list_head *page_list)
5501{
5502	if (mem_cgroup_disabled())
5503		return;
5504
5505	if (!list_empty(page_list))
5506		uncharge_list(page_list);
 
 
 
5507}
5508
5509/**
5510 * mem_cgroup_migrate - charge a page's replacement
5511 * @oldpage: currently circulating page
5512 * @newpage: replacement page
5513 *
5514 * Charge @newpage as a replacement page for @oldpage. @oldpage will
5515 * be uncharged upon free.
5516 *
5517 * Both pages must be locked, @newpage->mapping must be set up.
5518 */
5519void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
5520{
5521	struct mem_cgroup *memcg;
5522	unsigned int nr_pages;
5523	bool compound;
5524
5525	VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
5526	VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
5527	VM_BUG_ON_PAGE(PageAnon(oldpage) != PageAnon(newpage), newpage);
5528	VM_BUG_ON_PAGE(PageTransHuge(oldpage) != PageTransHuge(newpage),
5529		       newpage);
5530
5531	if (mem_cgroup_disabled())
5532		return;
5533
5534	/* Page cache replacement: new page already charged? */
5535	if (newpage->mem_cgroup)
5536		return;
5537
5538	/* Swapcache readahead pages can get replaced before being charged */
5539	memcg = oldpage->mem_cgroup;
5540	if (!memcg)
5541		return;
5542
5543	/* Force-charge the new page. The old one will be freed soon */
5544	compound = PageTransHuge(newpage);
5545	nr_pages = compound ? hpage_nr_pages(newpage) : 1;
5546
5547	page_counter_charge(&memcg->memory, nr_pages);
5548	if (do_memsw_account())
5549		page_counter_charge(&memcg->memsw, nr_pages);
5550	css_get_many(&memcg->css, nr_pages);
5551
5552	commit_charge(newpage, memcg, false);
 
5553
5554	local_irq_disable();
5555	mem_cgroup_charge_statistics(memcg, newpage, compound, nr_pages);
5556	memcg_check_events(memcg, newpage);
5557	local_irq_enable();
5558}
5559
5560DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
5561EXPORT_SYMBOL(memcg_sockets_enabled_key);
5562
5563void sock_update_memcg(struct sock *sk)
5564{
5565	struct mem_cgroup *memcg;
5566
5567	/* Socket cloning can throw us here with sk_cgrp already
5568	 * filled. It won't however, necessarily happen from
5569	 * process context. So the test for root memcg given
5570	 * the current task's memcg won't help us in this case.
5571	 *
5572	 * Respecting the original socket's memcg is a better
5573	 * decision in this case.
5574	 */
5575	if (sk->sk_memcg) {
5576		BUG_ON(mem_cgroup_is_root(sk->sk_memcg));
5577		css_get(&sk->sk_memcg->css);
5578		return;
5579	}
5580
5581	rcu_read_lock();
5582	memcg = mem_cgroup_from_task(current);
5583	if (memcg == root_mem_cgroup)
5584		goto out;
5585	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active)
5586		goto out;
5587	if (css_tryget_online(&memcg->css))
5588		sk->sk_memcg = memcg;
5589out:
5590	rcu_read_unlock();
5591}
5592EXPORT_SYMBOL(sock_update_memcg);
5593
5594void sock_release_memcg(struct sock *sk)
5595{
5596	WARN_ON(!sk->sk_memcg);
5597	css_put(&sk->sk_memcg->css);
5598}
5599
5600/**
5601 * mem_cgroup_charge_skmem - charge socket memory
5602 * @memcg: memcg to charge
5603 * @nr_pages: number of pages to charge
 
5604 *
5605 * Charges @nr_pages to @memcg. Returns %true if the charge fit within
5606 * @memcg's configured limit, %false if the charge had to be forced.
5607 */
5608bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
 
5609{
5610	gfp_t gfp_mask = GFP_KERNEL;
5611
5612	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
5613		struct page_counter *fail;
5614
5615		if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) {
5616			memcg->tcpmem_pressure = 0;
5617			return true;
5618		}
5619		page_counter_charge(&memcg->tcpmem, nr_pages);
5620		memcg->tcpmem_pressure = 1;
 
 
 
 
5621		return false;
5622	}
5623
5624	/* Don't block in the packet receive path */
5625	if (in_softirq())
5626		gfp_mask = GFP_NOWAIT;
5627
5628	this_cpu_add(memcg->stat->count[MEMCG_SOCK], nr_pages);
5629
5630	if (try_charge(memcg, gfp_mask, nr_pages) == 0)
5631		return true;
 
5632
5633	try_charge(memcg, gfp_mask|__GFP_NOFAIL, nr_pages);
5634	return false;
5635}
5636
5637/**
5638 * mem_cgroup_uncharge_skmem - uncharge socket memory
5639 * @memcg - memcg to uncharge
5640 * @nr_pages - number of pages to uncharge
5641 */
5642void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
5643{
5644	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
5645		page_counter_uncharge(&memcg->tcpmem, nr_pages);
5646		return;
5647	}
5648
5649	this_cpu_sub(memcg->stat->count[MEMCG_SOCK], nr_pages);
5650
5651	page_counter_uncharge(&memcg->memory, nr_pages);
5652	css_put_many(&memcg->css, nr_pages);
5653}
5654
5655static int __init cgroup_memory(char *s)
5656{
5657	char *token;
5658
5659	while ((token = strsep(&s, ",")) != NULL) {
5660		if (!*token)
5661			continue;
5662		if (!strcmp(token, "nosocket"))
5663			cgroup_memory_nosocket = true;
5664		if (!strcmp(token, "nokmem"))
5665			cgroup_memory_nokmem = true;
5666	}
5667	return 0;
5668}
5669__setup("cgroup.memory=", cgroup_memory);
5670
5671/*
5672 * subsys_initcall() for memory controller.
5673 *
5674 * Some parts like hotcpu_notifier() have to be initialized from this context
5675 * because of lock dependencies (cgroup_lock -> cpu hotplug) but basically
5676 * everything that doesn't depend on a specific mem_cgroup structure should
5677 * be initialized from here.
5678 */
5679static int __init mem_cgroup_init(void)
5680{
5681	int cpu, node;
5682
5683	hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
 
 
 
 
 
 
 
 
 
5684
5685	for_each_possible_cpu(cpu)
5686		INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
5687			  drain_local_stock);
5688
5689	for_each_node(node) {
5690		struct mem_cgroup_tree_per_node *rtpn;
5691		int zone;
5692
5693		rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL,
5694				    node_online(node) ? node : NUMA_NO_NODE);
5695
5696		for (zone = 0; zone < MAX_NR_ZONES; zone++) {
5697			struct mem_cgroup_tree_per_zone *rtpz;
5698
5699			rtpz = &rtpn->rb_tree_per_zone[zone];
5700			rtpz->rb_root = RB_ROOT;
5701			spin_lock_init(&rtpz->lock);
5702		}
5703		soft_limit_tree.rb_tree_per_node[node] = rtpn;
5704	}
5705
5706	return 0;
5707}
5708subsys_initcall(mem_cgroup_init);
5709
5710#ifdef CONFIG_MEMCG_SWAP
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5711/**
5712 * mem_cgroup_swapout - transfer a memsw charge to swap
5713 * @page: page whose memsw charge to transfer
5714 * @entry: swap entry to move the charge to
5715 *
5716 * Transfer the memsw charge of @page to @entry.
5717 */
5718void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
5719{
5720	struct mem_cgroup *memcg;
 
5721	unsigned short oldid;
5722
5723	VM_BUG_ON_PAGE(PageLRU(page), page);
5724	VM_BUG_ON_PAGE(page_count(page), page);
 
 
 
5725
5726	if (!do_memsw_account())
5727		return;
5728
5729	memcg = page->mem_cgroup;
5730
5731	/* Readahead page, never charged */
5732	if (!memcg)
5733		return;
5734
5735	oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg));
5736	VM_BUG_ON_PAGE(oldid, page);
5737	mem_cgroup_swap_statistics(memcg, true);
 
 
 
 
 
 
 
 
 
 
 
5738
5739	page->mem_cgroup = NULL;
5740
5741	if (!mem_cgroup_is_root(memcg))
5742		page_counter_uncharge(&memcg->memory, 1);
 
 
 
 
 
 
5743
5744	/*
5745	 * Interrupts should be disabled here because the caller holds the
5746	 * mapping->tree_lock lock which is taken with interrupts-off. It is
5747	 * important here to have the interrupts disabled because it is the
5748	 * only synchronisation we have for udpating the per-CPU variables.
5749	 */
5750	VM_BUG_ON(!irqs_disabled());
5751	mem_cgroup_charge_statistics(memcg, page, false, -1);
5752	memcg_check_events(memcg, page);
 
 
 
5753}
5754
5755/*
5756 * mem_cgroup_try_charge_swap - try charging a swap entry
5757 * @page: page being added to swap
5758 * @entry: swap entry to charge
5759 *
5760 * Try to charge @entry to the memcg that @page belongs to.
5761 *
5762 * Returns 0 on success, -ENOMEM on failure.
5763 */
5764int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry)
5765{
5766	struct mem_cgroup *memcg;
5767	struct page_counter *counter;
 
5768	unsigned short oldid;
5769
5770	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) || !do_swap_account)
5771		return 0;
5772
5773	memcg = page->mem_cgroup;
5774
5775	/* Readahead page, never charged */
5776	if (!memcg)
5777		return 0;
5778
 
 
 
 
 
 
 
5779	if (!mem_cgroup_is_root(memcg) &&
5780	    !page_counter_try_charge(&memcg->swap, 1, &counter))
 
 
 
5781		return -ENOMEM;
 
5782
5783	oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg));
5784	VM_BUG_ON_PAGE(oldid, page);
5785	mem_cgroup_swap_statistics(memcg, true);
 
 
 
5786
5787	css_get(&memcg->css);
5788	return 0;
5789}
5790
5791/**
5792 * mem_cgroup_uncharge_swap - uncharge a swap entry
5793 * @entry: swap entry to uncharge
5794 *
5795 * Drop the swap charge associated with @entry.
5796 */
5797void mem_cgroup_uncharge_swap(swp_entry_t entry)
5798{
5799	struct mem_cgroup *memcg;
5800	unsigned short id;
5801
5802	if (!do_swap_account)
5803		return;
5804
5805	id = swap_cgroup_record(entry, 0);
5806	rcu_read_lock();
5807	memcg = mem_cgroup_from_id(id);
5808	if (memcg) {
5809		if (!mem_cgroup_is_root(memcg)) {
5810			if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
5811				page_counter_uncharge(&memcg->swap, 1);
5812			else
5813				page_counter_uncharge(&memcg->memsw, 1);
5814		}
5815		mem_cgroup_swap_statistics(memcg, false);
5816		css_put(&memcg->css);
5817	}
5818	rcu_read_unlock();
5819}
5820
5821long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
5822{
5823	long nr_swap_pages = get_nr_swap_pages();
5824
5825	if (!do_swap_account || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
5826		return nr_swap_pages;
5827	for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg))
5828		nr_swap_pages = min_t(long, nr_swap_pages,
5829				      READ_ONCE(memcg->swap.limit) -
5830				      page_counter_read(&memcg->swap));
5831	return nr_swap_pages;
5832}
5833
5834bool mem_cgroup_swap_full(struct page *page)
5835{
5836	struct mem_cgroup *memcg;
5837
5838	VM_BUG_ON_PAGE(!PageLocked(page), page);
5839
5840	if (vm_swap_full())
5841		return true;
5842	if (!do_swap_account || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
5843		return false;
5844
5845	memcg = page->mem_cgroup;
5846	if (!memcg)
5847		return false;
5848
5849	for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg))
5850		if (page_counter_read(&memcg->swap) * 2 >= memcg->swap.limit)
 
 
 
5851			return true;
 
5852
5853	return false;
5854}
5855
5856/* for remember boot option*/
5857#ifdef CONFIG_MEMCG_SWAP_ENABLED
5858static int really_do_swap_account __initdata = 1;
5859#else
5860static int really_do_swap_account __initdata;
5861#endif
5862
5863static int __init enable_swap_account(char *s)
5864{
5865	if (!strcmp(s, "1"))
5866		really_do_swap_account = 1;
5867	else if (!strcmp(s, "0"))
5868		really_do_swap_account = 0;
5869	return 1;
5870}
5871__setup("swapaccount=", enable_swap_account);
5872
5873static u64 swap_current_read(struct cgroup_subsys_state *css,
5874			     struct cftype *cft)
5875{
5876	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5877
5878	return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE;
5879}
5880
5881static int swap_max_show(struct seq_file *m, void *v)
5882{
5883	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5884	unsigned long max = READ_ONCE(memcg->swap.limit);
 
5885
5886	if (max == PAGE_COUNTER_MAX)
5887		seq_puts(m, "max\n");
5888	else
5889		seq_printf(m, "%llu\n", (u64)max * PAGE_SIZE);
 
 
5890
5891	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
5892}
5893
5894static ssize_t swap_max_write(struct kernfs_open_file *of,
5895			      char *buf, size_t nbytes, loff_t off)
5896{
5897	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5898	unsigned long max;
5899	int err;
5900
5901	buf = strstrip(buf);
5902	err = page_counter_memparse(buf, "max", &max);
5903	if (err)
5904		return err;
5905
5906	mutex_lock(&memcg_limit_mutex);
5907	err = page_counter_limit(&memcg->swap, max);
5908	mutex_unlock(&memcg_limit_mutex);
5909	if (err)
5910		return err;
5911
5912	return nbytes;
5913}
5914
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5915static struct cftype swap_files[] = {
5916	{
5917		.name = "swap.current",
5918		.flags = CFTYPE_NOT_ON_ROOT,
5919		.read_u64 = swap_current_read,
5920	},
5921	{
 
 
 
 
 
 
5922		.name = "swap.max",
5923		.flags = CFTYPE_NOT_ON_ROOT,
5924		.seq_show = swap_max_show,
5925		.write = swap_max_write,
5926	},
 
 
 
 
 
 
5927	{ }	/* terminate */
5928};
5929
5930static struct cftype memsw_cgroup_files[] = {
5931	{
5932		.name = "memsw.usage_in_bytes",
5933		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
5934		.read_u64 = mem_cgroup_read_u64,
5935	},
5936	{
5937		.name = "memsw.max_usage_in_bytes",
5938		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
5939		.write = mem_cgroup_reset,
5940		.read_u64 = mem_cgroup_read_u64,
5941	},
5942	{
5943		.name = "memsw.limit_in_bytes",
5944		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
5945		.write = mem_cgroup_write,
5946		.read_u64 = mem_cgroup_read_u64,
5947	},
5948	{
5949		.name = "memsw.failcnt",
5950		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
5951		.write = mem_cgroup_reset,
5952		.read_u64 = mem_cgroup_read_u64,
5953	},
5954	{ },	/* terminate */
5955};
5956
5957static int __init mem_cgroup_swap_init(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
5958{
5959	if (!mem_cgroup_disabled() && really_do_swap_account) {
5960		do_swap_account = 1;
5961		WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys,
5962					       swap_files));
5963		WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys,
5964						  memsw_cgroup_files));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5965	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5966	return 0;
5967}
5968subsys_initcall(mem_cgroup_swap_init);
5969
5970#endif /* CONFIG_MEMCG_SWAP */
v6.2
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/* memcontrol.c - Memory Controller
   3 *
   4 * Copyright IBM Corporation, 2007
   5 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
   6 *
   7 * Copyright 2007 OpenVZ SWsoft Inc
   8 * Author: Pavel Emelianov <xemul@openvz.org>
   9 *
  10 * Memory thresholds
  11 * Copyright (C) 2009 Nokia Corporation
  12 * Author: Kirill A. Shutemov
  13 *
  14 * Kernel Memory Controller
  15 * Copyright (C) 2012 Parallels Inc. and Google Inc.
  16 * Authors: Glauber Costa and Suleiman Souhlal
  17 *
  18 * Native page reclaim
  19 * Charge lifetime sanitation
  20 * Lockless page tracking & accounting
  21 * Unified hierarchy configuration model
  22 * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner
  23 *
  24 * Per memcg lru locking
  25 * Copyright (C) 2020 Alibaba, Inc, Alex Shi
 
 
 
 
 
 
 
  26 */
  27
  28#include <linux/page_counter.h>
  29#include <linux/memcontrol.h>
  30#include <linux/cgroup.h>
  31#include <linux/pagewalk.h>
  32#include <linux/sched/mm.h>
  33#include <linux/shmem_fs.h>
  34#include <linux/hugetlb.h>
  35#include <linux/pagemap.h>
  36#include <linux/vm_event_item.h>
  37#include <linux/smp.h>
  38#include <linux/page-flags.h>
  39#include <linux/backing-dev.h>
  40#include <linux/bit_spinlock.h>
  41#include <linux/rcupdate.h>
  42#include <linux/limits.h>
  43#include <linux/export.h>
  44#include <linux/mutex.h>
  45#include <linux/rbtree.h>
  46#include <linux/slab.h>
  47#include <linux/swap.h>
  48#include <linux/swapops.h>
  49#include <linux/spinlock.h>
  50#include <linux/eventfd.h>
  51#include <linux/poll.h>
  52#include <linux/sort.h>
  53#include <linux/fs.h>
  54#include <linux/seq_file.h>
  55#include <linux/vmpressure.h>
  56#include <linux/memremap.h>
  57#include <linux/mm_inline.h>
  58#include <linux/swap_cgroup.h>
  59#include <linux/cpu.h>
  60#include <linux/oom.h>
  61#include <linux/lockdep.h>
  62#include <linux/file.h>
  63#include <linux/resume_user_mode.h>
  64#include <linux/psi.h>
  65#include <linux/seq_buf.h>
  66#include "internal.h"
  67#include <net/sock.h>
  68#include <net/ip.h>
  69#include "slab.h"
  70#include "swap.h"
  71
  72#include <linux/uaccess.h>
  73
  74#include <trace/events/vmscan.h>
  75
  76struct cgroup_subsys memory_cgrp_subsys __read_mostly;
  77EXPORT_SYMBOL(memory_cgrp_subsys);
  78
  79struct mem_cgroup *root_mem_cgroup __read_mostly;
  80
  81/* Active memory cgroup to use from an interrupt context */
  82DEFINE_PER_CPU(struct mem_cgroup *, int_active_memcg);
  83EXPORT_PER_CPU_SYMBOL_GPL(int_active_memcg);
  84
  85/* Socket memory accounting disabled? */
  86static bool cgroup_memory_nosocket __ro_after_init;
  87
  88/* Kernel memory accounting disabled? */
  89static bool cgroup_memory_nokmem __ro_after_init;
  90
  91#ifdef CONFIG_CGROUP_WRITEBACK
  92static DECLARE_WAIT_QUEUE_HEAD(memcg_cgwb_frn_waitq);
 
 
 
  93#endif
  94
  95/* Whether legacy memory+swap accounting is active */
  96static bool do_memsw_account(void)
  97{
  98	return !cgroup_subsys_on_dfl(memory_cgrp_subsys);
  99}
 100
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 101#define THRESHOLDS_EVENTS_TARGET 128
 102#define SOFTLIMIT_EVENTS_TARGET 1024
 
 103
 104/*
 105 * Cgroups above their limits are maintained in a RB-Tree, independent of
 106 * their hierarchy representation
 107 */
 108
 109struct mem_cgroup_tree_per_node {
 110	struct rb_root rb_root;
 111	struct rb_node *rb_rightmost;
 112	spinlock_t lock;
 113};
 114
 
 
 
 
 115struct mem_cgroup_tree {
 116	struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
 117};
 118
 119static struct mem_cgroup_tree soft_limit_tree __read_mostly;
 120
 121/* for OOM */
 122struct mem_cgroup_eventfd_list {
 123	struct list_head list;
 124	struct eventfd_ctx *eventfd;
 125};
 126
 127/*
 128 * cgroup_event represents events which userspace want to receive.
 129 */
 130struct mem_cgroup_event {
 131	/*
 132	 * memcg which the event belongs to.
 133	 */
 134	struct mem_cgroup *memcg;
 135	/*
 136	 * eventfd to signal userspace about the event.
 137	 */
 138	struct eventfd_ctx *eventfd;
 139	/*
 140	 * Each of these stored in a list by the cgroup.
 141	 */
 142	struct list_head list;
 143	/*
 144	 * register_event() callback will be used to add new userspace
 145	 * waiter for changes related to this event.  Use eventfd_signal()
 146	 * on eventfd to send notification to userspace.
 147	 */
 148	int (*register_event)(struct mem_cgroup *memcg,
 149			      struct eventfd_ctx *eventfd, const char *args);
 150	/*
 151	 * unregister_event() callback will be called when userspace closes
 152	 * the eventfd or on cgroup removing.  This callback must be set,
 153	 * if you want provide notification functionality.
 154	 */
 155	void (*unregister_event)(struct mem_cgroup *memcg,
 156				 struct eventfd_ctx *eventfd);
 157	/*
 158	 * All fields below needed to unregister event when
 159	 * userspace closes eventfd.
 160	 */
 161	poll_table pt;
 162	wait_queue_head_t *wqh;
 163	wait_queue_entry_t wait;
 164	struct work_struct remove;
 165};
 166
 167static void mem_cgroup_threshold(struct mem_cgroup *memcg);
 168static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
 169
 170/* Stuffs for move charges at task migration. */
 171/*
 172 * Types of charges to be moved.
 173 */
 174#define MOVE_ANON	0x1U
 175#define MOVE_FILE	0x2U
 176#define MOVE_MASK	(MOVE_ANON | MOVE_FILE)
 177
 178/* "mc" and its members are protected by cgroup_mutex */
 179static struct move_charge_struct {
 180	spinlock_t	  lock; /* for from, to */
 181	struct mm_struct  *mm;
 182	struct mem_cgroup *from;
 183	struct mem_cgroup *to;
 184	unsigned long flags;
 185	unsigned long precharge;
 186	unsigned long moved_charge;
 187	unsigned long moved_swap;
 188	struct task_struct *moving_task;	/* a task moving charges */
 189	wait_queue_head_t waitq;		/* a waitq for other context */
 190} mc = {
 191	.lock = __SPIN_LOCK_UNLOCKED(mc.lock),
 192	.waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
 193};
 194
 195/*
 196 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
 197 * limit reclaim to prevent infinite loops, if they ever occur.
 198 */
 199#define	MEM_CGROUP_MAX_RECLAIM_LOOPS		100
 200#define	MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS	2
 201
 
 
 
 
 
 
 
 
 202/* for encoding cft->private value on file */
 203enum res_type {
 204	_MEM,
 205	_MEMSWAP,
 
 206	_KMEM,
 207	_TCP,
 208};
 209
 210#define MEMFILE_PRIVATE(x, val)	((x) << 16 | (val))
 211#define MEMFILE_TYPE(val)	((val) >> 16 & 0xffff)
 212#define MEMFILE_ATTR(val)	((val) & 0xffff)
 213
 214/*
 215 * Iteration constructs for visiting all cgroups (under a tree).  If
 216 * loops are exited prematurely (break), mem_cgroup_iter_break() must
 217 * be used for reference counting.
 218 */
 219#define for_each_mem_cgroup_tree(iter, root)		\
 220	for (iter = mem_cgroup_iter(root, NULL, NULL);	\
 221	     iter != NULL;				\
 222	     iter = mem_cgroup_iter(root, iter, NULL))
 223
 224#define for_each_mem_cgroup(iter)			\
 225	for (iter = mem_cgroup_iter(NULL, NULL, NULL);	\
 226	     iter != NULL;				\
 227	     iter = mem_cgroup_iter(NULL, iter, NULL))
 228
 229static inline bool task_is_dying(void)
 230{
 231	return tsk_is_oom_victim(current) || fatal_signal_pending(current) ||
 232		(current->flags & PF_EXITING);
 233}
 234
 235/* Some nice accessors for the vmpressure. */
 236struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
 237{
 238	if (!memcg)
 239		memcg = root_mem_cgroup;
 240	return &memcg->vmpressure;
 241}
 242
 243struct mem_cgroup *vmpressure_to_memcg(struct vmpressure *vmpr)
 244{
 245	return container_of(vmpr, struct mem_cgroup, vmpressure);
 246}
 247
 248#ifdef CONFIG_MEMCG_KMEM
 249static DEFINE_SPINLOCK(objcg_lock);
 250
 251bool mem_cgroup_kmem_disabled(void)
 252{
 253	return cgroup_memory_nokmem;
 254}
 255
 256static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg,
 257				      unsigned int nr_pages);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 258
 259static void obj_cgroup_release(struct percpu_ref *ref)
 260{
 261	struct obj_cgroup *objcg = container_of(ref, struct obj_cgroup, refcnt);
 262	unsigned int nr_bytes;
 263	unsigned int nr_pages;
 264	unsigned long flags;
 265
 266	/*
 267	 * At this point all allocated objects are freed, and
 268	 * objcg->nr_charged_bytes can't have an arbitrary byte value.
 269	 * However, it can be PAGE_SIZE or (x * PAGE_SIZE).
 270	 *
 271	 * The following sequence can lead to it:
 272	 * 1) CPU0: objcg == stock->cached_objcg
 273	 * 2) CPU1: we do a small allocation (e.g. 92 bytes),
 274	 *          PAGE_SIZE bytes are charged
 275	 * 3) CPU1: a process from another memcg is allocating something,
 276	 *          the stock if flushed,
 277	 *          objcg->nr_charged_bytes = PAGE_SIZE - 92
 278	 * 5) CPU0: we do release this object,
 279	 *          92 bytes are added to stock->nr_bytes
 280	 * 6) CPU0: stock is flushed,
 281	 *          92 bytes are added to objcg->nr_charged_bytes
 282	 *
 283	 * In the result, nr_charged_bytes == PAGE_SIZE.
 284	 * This page will be uncharged in obj_cgroup_release().
 285	 */
 286	nr_bytes = atomic_read(&objcg->nr_charged_bytes);
 287	WARN_ON_ONCE(nr_bytes & (PAGE_SIZE - 1));
 288	nr_pages = nr_bytes >> PAGE_SHIFT;
 289
 290	if (nr_pages)
 291		obj_cgroup_uncharge_pages(objcg, nr_pages);
 292
 293	spin_lock_irqsave(&objcg_lock, flags);
 294	list_del(&objcg->list);
 295	spin_unlock_irqrestore(&objcg_lock, flags);
 296
 297	percpu_ref_exit(ref);
 298	kfree_rcu(objcg, rcu);
 299}
 300
 301static struct obj_cgroup *obj_cgroup_alloc(void)
 302{
 303	struct obj_cgroup *objcg;
 304	int ret;
 305
 306	objcg = kzalloc(sizeof(struct obj_cgroup), GFP_KERNEL);
 307	if (!objcg)
 308		return NULL;
 309
 310	ret = percpu_ref_init(&objcg->refcnt, obj_cgroup_release, 0,
 311			      GFP_KERNEL);
 312	if (ret) {
 313		kfree(objcg);
 314		return NULL;
 315	}
 316	INIT_LIST_HEAD(&objcg->list);
 317	return objcg;
 318}
 319
 320static void memcg_reparent_objcgs(struct mem_cgroup *memcg,
 321				  struct mem_cgroup *parent)
 322{
 323	struct obj_cgroup *objcg, *iter;
 324
 325	objcg = rcu_replace_pointer(memcg->objcg, NULL, true);
 326
 327	spin_lock_irq(&objcg_lock);
 328
 329	/* 1) Ready to reparent active objcg. */
 330	list_add(&objcg->list, &memcg->objcg_list);
 331	/* 2) Reparent active objcg and already reparented objcgs to parent. */
 332	list_for_each_entry(iter, &memcg->objcg_list, list)
 333		WRITE_ONCE(iter->memcg, parent);
 334	/* 3) Move already reparented objcgs to the parent's list */
 335	list_splice(&memcg->objcg_list, &parent->objcg_list);
 336
 337	spin_unlock_irq(&objcg_lock);
 338
 339	percpu_ref_kill(&objcg->refcnt);
 340}
 341
 342/*
 343 * A lot of the calls to the cache allocation functions are expected to be
 344 * inlined by the compiler. Since the calls to memcg_slab_pre_alloc_hook() are
 345 * conditional to this static branch, we'll have to allow modules that does
 346 * kmem_cache_alloc and the such to see this symbol as well
 347 */
 348DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key);
 349EXPORT_SYMBOL(memcg_kmem_enabled_key);
 350#endif
 
 
 
 
 
 
 
 
 
 
 351
 352/**
 353 * mem_cgroup_css_from_page - css of the memcg associated with a page
 354 * @page: page of interest
 355 *
 356 * If memcg is bound to the default hierarchy, css of the memcg associated
 357 * with @page is returned.  The returned css remains associated with @page
 358 * until it is released.
 359 *
 360 * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup
 361 * is returned.
 362 */
 363struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page)
 364{
 365	struct mem_cgroup *memcg;
 366
 367	memcg = page_memcg(page);
 368
 369	if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
 370		memcg = root_mem_cgroup;
 371
 372	return &memcg->css;
 373}
 374
 375/**
 376 * page_cgroup_ino - return inode number of the memcg a page is charged to
 377 * @page: the page
 378 *
 379 * Look up the closest online ancestor of the memory cgroup @page is charged to
 380 * and return its inode number or 0 if @page is not charged to any cgroup. It
 381 * is safe to call this function without holding a reference to @page.
 382 *
 383 * Note, this function is inherently racy, because there is nothing to prevent
 384 * the cgroup inode from getting torn down and potentially reallocated a moment
 385 * after page_cgroup_ino() returns, so it only should be used by callers that
 386 * do not care (such as procfs interfaces).
 387 */
 388ino_t page_cgroup_ino(struct page *page)
 389{
 390	struct mem_cgroup *memcg;
 391	unsigned long ino = 0;
 392
 393	rcu_read_lock();
 394	memcg = page_memcg_check(page);
 395
 396	while (memcg && !(memcg->css.flags & CSS_ONLINE))
 397		memcg = parent_mem_cgroup(memcg);
 398	if (memcg)
 399		ino = cgroup_ino(memcg->css.cgroup);
 400	rcu_read_unlock();
 401	return ino;
 402}
 403
 404static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_node *mz,
 405					 struct mem_cgroup_tree_per_node *mctz,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 406					 unsigned long new_usage_in_excess)
 407{
 408	struct rb_node **p = &mctz->rb_root.rb_node;
 409	struct rb_node *parent = NULL;
 410	struct mem_cgroup_per_node *mz_node;
 411	bool rightmost = true;
 412
 413	if (mz->on_tree)
 414		return;
 415
 416	mz->usage_in_excess = new_usage_in_excess;
 417	if (!mz->usage_in_excess)
 418		return;
 419	while (*p) {
 420		parent = *p;
 421		mz_node = rb_entry(parent, struct mem_cgroup_per_node,
 422					tree_node);
 423		if (mz->usage_in_excess < mz_node->usage_in_excess) {
 424			p = &(*p)->rb_left;
 425			rightmost = false;
 426		} else {
 
 
 
 427			p = &(*p)->rb_right;
 428		}
 429	}
 430
 431	if (rightmost)
 432		mctz->rb_rightmost = &mz->tree_node;
 433
 434	rb_link_node(&mz->tree_node, parent, p);
 435	rb_insert_color(&mz->tree_node, &mctz->rb_root);
 436	mz->on_tree = true;
 437}
 438
 439static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
 440					 struct mem_cgroup_tree_per_node *mctz)
 441{
 442	if (!mz->on_tree)
 443		return;
 444
 445	if (&mz->tree_node == mctz->rb_rightmost)
 446		mctz->rb_rightmost = rb_prev(&mz->tree_node);
 447
 448	rb_erase(&mz->tree_node, &mctz->rb_root);
 449	mz->on_tree = false;
 450}
 451
 452static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
 453				       struct mem_cgroup_tree_per_node *mctz)
 454{
 455	unsigned long flags;
 456
 457	spin_lock_irqsave(&mctz->lock, flags);
 458	__mem_cgroup_remove_exceeded(mz, mctz);
 459	spin_unlock_irqrestore(&mctz->lock, flags);
 460}
 461
 462static unsigned long soft_limit_excess(struct mem_cgroup *memcg)
 463{
 464	unsigned long nr_pages = page_counter_read(&memcg->memory);
 465	unsigned long soft_limit = READ_ONCE(memcg->soft_limit);
 466	unsigned long excess = 0;
 467
 468	if (nr_pages > soft_limit)
 469		excess = nr_pages - soft_limit;
 470
 471	return excess;
 472}
 473
 474static void mem_cgroup_update_tree(struct mem_cgroup *memcg, int nid)
 475{
 476	unsigned long excess;
 477	struct mem_cgroup_per_node *mz;
 478	struct mem_cgroup_tree_per_node *mctz;
 479
 480	mctz = soft_limit_tree.rb_tree_per_node[nid];
 481	if (!mctz)
 482		return;
 483	/*
 484	 * Necessary to update all ancestors when hierarchy is used.
 485	 * because their event counter is not touched.
 486	 */
 487	for (; memcg; memcg = parent_mem_cgroup(memcg)) {
 488		mz = memcg->nodeinfo[nid];
 489		excess = soft_limit_excess(memcg);
 490		/*
 491		 * We have to update the tree if mz is on RB-tree or
 492		 * mem is over its softlimit.
 493		 */
 494		if (excess || mz->on_tree) {
 495			unsigned long flags;
 496
 497			spin_lock_irqsave(&mctz->lock, flags);
 498			/* if on-tree, remove it */
 499			if (mz->on_tree)
 500				__mem_cgroup_remove_exceeded(mz, mctz);
 501			/*
 502			 * Insert again. mz->usage_in_excess will be updated.
 503			 * If excess is 0, no tree ops.
 504			 */
 505			__mem_cgroup_insert_exceeded(mz, mctz, excess);
 506			spin_unlock_irqrestore(&mctz->lock, flags);
 507		}
 508	}
 509}
 510
 511static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
 512{
 513	struct mem_cgroup_tree_per_node *mctz;
 514	struct mem_cgroup_per_node *mz;
 515	int nid;
 516
 517	for_each_node(nid) {
 518		mz = memcg->nodeinfo[nid];
 519		mctz = soft_limit_tree.rb_tree_per_node[nid];
 520		if (mctz)
 521			mem_cgroup_remove_exceeded(mz, mctz);
 
 522	}
 523}
 524
 525static struct mem_cgroup_per_node *
 526__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
 527{
 528	struct mem_cgroup_per_node *mz;
 
 529
 530retry:
 531	mz = NULL;
 532	if (!mctz->rb_rightmost)
 
 533		goto done;		/* Nothing to reclaim from */
 534
 535	mz = rb_entry(mctz->rb_rightmost,
 536		      struct mem_cgroup_per_node, tree_node);
 537	/*
 538	 * Remove the node now but someone else can add it back,
 539	 * we will to add it back at the end of reclaim to its correct
 540	 * position in the tree.
 541	 */
 542	__mem_cgroup_remove_exceeded(mz, mctz);
 543	if (!soft_limit_excess(mz->memcg) ||
 544	    !css_tryget(&mz->memcg->css))
 545		goto retry;
 546done:
 547	return mz;
 548}
 549
 550static struct mem_cgroup_per_node *
 551mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
 552{
 553	struct mem_cgroup_per_node *mz;
 554
 555	spin_lock_irq(&mctz->lock);
 556	mz = __mem_cgroup_largest_soft_limit_node(mctz);
 557	spin_unlock_irq(&mctz->lock);
 558	return mz;
 559}
 560
 561/*
 562 * memcg and lruvec stats flushing
 563 *
 564 * Many codepaths leading to stats update or read are performance sensitive and
 565 * adding stats flushing in such codepaths is not desirable. So, to optimize the
 566 * flushing the kernel does:
 567 *
 568 * 1) Periodically and asynchronously flush the stats every 2 seconds to not let
 569 *    rstat update tree grow unbounded.
 570 *
 571 * 2) Flush the stats synchronously on reader side only when there are more than
 572 *    (MEMCG_CHARGE_BATCH * nr_cpus) update events. Though this optimization
 573 *    will let stats be out of sync by atmost (MEMCG_CHARGE_BATCH * nr_cpus) but
 574 *    only for 2 seconds due to (1).
 575 */
 576static void flush_memcg_stats_dwork(struct work_struct *w);
 577static DECLARE_DEFERRABLE_WORK(stats_flush_dwork, flush_memcg_stats_dwork);
 578static DEFINE_SPINLOCK(stats_flush_lock);
 579static DEFINE_PER_CPU(unsigned int, stats_updates);
 580static atomic_t stats_flush_threshold = ATOMIC_INIT(0);
 581static u64 flush_next_time;
 582
 583#define FLUSH_TIME (2UL*HZ)
 584
 585/*
 586 * Accessors to ensure that preemption is disabled on PREEMPT_RT because it can
 587 * not rely on this as part of an acquired spinlock_t lock. These functions are
 588 * never used in hardirq context on PREEMPT_RT and therefore disabling preemtion
 589 * is sufficient.
 590 */
 591static void memcg_stats_lock(void)
 
 592{
 593	preempt_disable_nested();
 594	VM_WARN_ON_IRQS_ENABLED();
 595}
 596
 597static void __memcg_stats_lock(void)
 598{
 599	preempt_disable_nested();
 600}
 601
 602static void memcg_stats_unlock(void)
 603{
 604	preempt_enable_nested();
 605}
 606
 607static inline void memcg_rstat_updated(struct mem_cgroup *memcg, int val)
 608{
 609	unsigned int x;
 610
 611	cgroup_rstat_updated(memcg->css.cgroup, smp_processor_id());
 612
 613	x = __this_cpu_add_return(stats_updates, abs(val));
 614	if (x > MEMCG_CHARGE_BATCH) {
 615		/*
 616		 * If stats_flush_threshold exceeds the threshold
 617		 * (>num_online_cpus()), cgroup stats update will be triggered
 618		 * in __mem_cgroup_flush_stats(). Increasing this var further
 619		 * is redundant and simply adds overhead in atomic update.
 620		 */
 621		if (atomic_read(&stats_flush_threshold) <= num_online_cpus())
 622			atomic_add(x / MEMCG_CHARGE_BATCH, &stats_flush_threshold);
 623		__this_cpu_write(stats_updates, 0);
 624	}
 625}
 626
 627static void __mem_cgroup_flush_stats(void)
 628{
 629	unsigned long flag;
 630
 631	if (!spin_trylock_irqsave(&stats_flush_lock, flag))
 632		return;
 633
 634	flush_next_time = jiffies_64 + 2*FLUSH_TIME;
 635	cgroup_rstat_flush_irqsafe(root_mem_cgroup->css.cgroup);
 636	atomic_set(&stats_flush_threshold, 0);
 637	spin_unlock_irqrestore(&stats_flush_lock, flag);
 638}
 639
 640void mem_cgroup_flush_stats(void)
 641{
 642	if (atomic_read(&stats_flush_threshold) > num_online_cpus())
 643		__mem_cgroup_flush_stats();
 644}
 645
 646void mem_cgroup_flush_stats_delayed(void)
 647{
 648	if (time_after64(jiffies_64, flush_next_time))
 649		mem_cgroup_flush_stats();
 650}
 651
 652static void flush_memcg_stats_dwork(struct work_struct *w)
 653{
 654	__mem_cgroup_flush_stats();
 655	queue_delayed_work(system_unbound_wq, &stats_flush_dwork, FLUSH_TIME);
 656}
 657
 658/* Subset of vm_event_item to report for memcg event stats */
 659static const unsigned int memcg_vm_event_stat[] = {
 660	PGPGIN,
 661	PGPGOUT,
 662	PGSCAN_KSWAPD,
 663	PGSCAN_DIRECT,
 664	PGSCAN_KHUGEPAGED,
 665	PGSTEAL_KSWAPD,
 666	PGSTEAL_DIRECT,
 667	PGSTEAL_KHUGEPAGED,
 668	PGFAULT,
 669	PGMAJFAULT,
 670	PGREFILL,
 671	PGACTIVATE,
 672	PGDEACTIVATE,
 673	PGLAZYFREE,
 674	PGLAZYFREED,
 675#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
 676	ZSWPIN,
 677	ZSWPOUT,
 678#endif
 679#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 680	THP_FAULT_ALLOC,
 681	THP_COLLAPSE_ALLOC,
 682#endif
 683};
 684
 685#define NR_MEMCG_EVENTS ARRAY_SIZE(memcg_vm_event_stat)
 686static int mem_cgroup_events_index[NR_VM_EVENT_ITEMS] __read_mostly;
 687
 688static void init_memcg_events(void)
 689{
 690	int i;
 691
 692	for (i = 0; i < NR_MEMCG_EVENTS; ++i)
 693		mem_cgroup_events_index[memcg_vm_event_stat[i]] = i + 1;
 694}
 695
 696static inline int memcg_events_index(enum vm_event_item idx)
 697{
 698	return mem_cgroup_events_index[idx] - 1;
 699}
 700
 701struct memcg_vmstats_percpu {
 702	/* Local (CPU and cgroup) page state & events */
 703	long			state[MEMCG_NR_STAT];
 704	unsigned long		events[NR_MEMCG_EVENTS];
 705
 706	/* Delta calculation for lockless upward propagation */
 707	long			state_prev[MEMCG_NR_STAT];
 708	unsigned long		events_prev[NR_MEMCG_EVENTS];
 709
 710	/* Cgroup1: threshold notifications & softlimit tree updates */
 711	unsigned long		nr_page_events;
 712	unsigned long		targets[MEM_CGROUP_NTARGETS];
 713};
 714
 715struct memcg_vmstats {
 716	/* Aggregated (CPU and subtree) page state & events */
 717	long			state[MEMCG_NR_STAT];
 718	unsigned long		events[NR_MEMCG_EVENTS];
 719
 720	/* Pending child counts during tree propagation */
 721	long			state_pending[MEMCG_NR_STAT];
 722	unsigned long		events_pending[NR_MEMCG_EVENTS];
 723};
 724
 725unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
 726{
 727	long x = READ_ONCE(memcg->vmstats->state[idx]);
 728#ifdef CONFIG_SMP
 729	if (x < 0)
 730		x = 0;
 731#endif
 732	return x;
 733}
 734
 735/**
 736 * __mod_memcg_state - update cgroup memory statistics
 737 * @memcg: the memory cgroup
 738 * @idx: the stat item - can be enum memcg_stat_item or enum node_stat_item
 739 * @val: delta to add to the counter, can be negative
 740 */
 741void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val)
 742{
 743	if (mem_cgroup_disabled())
 744		return;
 745
 746	__this_cpu_add(memcg->vmstats_percpu->state[idx], val);
 747	memcg_rstat_updated(memcg, val);
 748}
 749
 750/* idx can be of type enum memcg_stat_item or node_stat_item. */
 751static unsigned long memcg_page_state_local(struct mem_cgroup *memcg, int idx)
 752{
 753	long x = 0;
 754	int cpu;
 755
 756	for_each_possible_cpu(cpu)
 757		x += per_cpu(memcg->vmstats_percpu->state[idx], cpu);
 758#ifdef CONFIG_SMP
 759	if (x < 0)
 760		x = 0;
 761#endif
 762	return x;
 763}
 764
 765void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
 766			      int val)
 
 767{
 768	struct mem_cgroup_per_node *pn;
 769	struct mem_cgroup *memcg;
 770
 771	pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
 772	memcg = pn->memcg;
 773
 774	/*
 775	 * The caller from rmap relay on disabled preemption becase they never
 776	 * update their counter from in-interrupt context. For these two
 777	 * counters we check that the update is never performed from an
 778	 * interrupt context while other caller need to have disabled interrupt.
 779	 */
 780	__memcg_stats_lock();
 781	if (IS_ENABLED(CONFIG_DEBUG_VM)) {
 782		switch (idx) {
 783		case NR_ANON_MAPPED:
 784		case NR_FILE_MAPPED:
 785		case NR_ANON_THPS:
 786		case NR_SHMEM_PMDMAPPED:
 787		case NR_FILE_PMDMAPPED:
 788			WARN_ON_ONCE(!in_task());
 789			break;
 790		default:
 791			VM_WARN_ON_IRQS_ENABLED();
 792		}
 793	}
 794
 795	/* Update memcg */
 796	__this_cpu_add(memcg->vmstats_percpu->state[idx], val);
 797
 798	/* Update lruvec */
 799	__this_cpu_add(pn->lruvec_stats_percpu->state[idx], val);
 800
 801	memcg_rstat_updated(memcg, val);
 802	memcg_stats_unlock();
 803}
 804
 805/**
 806 * __mod_lruvec_state - update lruvec memory statistics
 807 * @lruvec: the lruvec
 808 * @idx: the stat item
 809 * @val: delta to add to the counter, can be negative
 810 *
 811 * The lruvec is the intersection of the NUMA node and a cgroup. This
 812 * function updates the all three counters that are affected by a
 813 * change of state at this level: per-node, per-cgroup, per-lruvec.
 814 */
 815void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
 816			int val)
 817{
 818	/* Update node */
 819	__mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
 820
 821	/* Update memcg and lruvec */
 822	if (!mem_cgroup_disabled())
 823		__mod_memcg_lruvec_state(lruvec, idx, val);
 824}
 825
 826void __mod_lruvec_page_state(struct page *page, enum node_stat_item idx,
 827			     int val)
 828{
 829	struct page *head = compound_head(page); /* rmap on tail pages */
 830	struct mem_cgroup *memcg;
 831	pg_data_t *pgdat = page_pgdat(page);
 832	struct lruvec *lruvec;
 833
 834	rcu_read_lock();
 835	memcg = page_memcg(head);
 836	/* Untracked pages have no memcg, no lruvec. Update only the node */
 837	if (!memcg) {
 838		rcu_read_unlock();
 839		__mod_node_page_state(pgdat, idx, val);
 840		return;
 841	}
 842
 843	lruvec = mem_cgroup_lruvec(memcg, pgdat);
 844	__mod_lruvec_state(lruvec, idx, val);
 845	rcu_read_unlock();
 846}
 847EXPORT_SYMBOL(__mod_lruvec_page_state);
 848
 849void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val)
 850{
 851	pg_data_t *pgdat = page_pgdat(virt_to_page(p));
 852	struct mem_cgroup *memcg;
 853	struct lruvec *lruvec;
 854
 855	rcu_read_lock();
 856	memcg = mem_cgroup_from_slab_obj(p);
 857
 858	/*
 859	 * Untracked pages have no memcg, no lruvec. Update only the
 860	 * node. If we reparent the slab objects to the root memcg,
 861	 * when we free the slab object, we need to update the per-memcg
 862	 * vmstats to keep it correct for the root memcg.
 863	 */
 864	if (!memcg) {
 865		__mod_node_page_state(pgdat, idx, val);
 866	} else {
 867		lruvec = mem_cgroup_lruvec(memcg, pgdat);
 868		__mod_lruvec_state(lruvec, idx, val);
 869	}
 870	rcu_read_unlock();
 871}
 872
 873/**
 874 * __count_memcg_events - account VM events in a cgroup
 875 * @memcg: the memory cgroup
 876 * @idx: the event item
 877 * @count: the number of events that occurred
 878 */
 879void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
 880			  unsigned long count)
 881{
 882	int index = memcg_events_index(idx);
 883
 884	if (mem_cgroup_disabled() || index < 0)
 885		return;
 886
 887	memcg_stats_lock();
 888	__this_cpu_add(memcg->vmstats_percpu->events[index], count);
 889	memcg_rstat_updated(memcg, count);
 890	memcg_stats_unlock();
 891}
 892
 893static unsigned long memcg_events(struct mem_cgroup *memcg, int event)
 
 894{
 895	int index = memcg_events_index(event);
 
 896
 897	if (index < 0)
 898		return 0;
 899	return READ_ONCE(memcg->vmstats->events[index]);
 900}
 901
 902static unsigned long memcg_events_local(struct mem_cgroup *memcg, int event)
 903{
 904	long x = 0;
 905	int cpu;
 906	int index = memcg_events_index(event);
 907
 908	if (index < 0)
 909		return 0;
 910
 911	for_each_possible_cpu(cpu)
 912		x += per_cpu(memcg->vmstats_percpu->events[index], cpu);
 913	return x;
 
 
 914}
 915
 916static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
 917					 int nr_pages)
 918{
 919	/* pagein of a big page is an event. So, ignore page size */
 920	if (nr_pages > 0)
 921		__count_memcg_events(memcg, PGPGIN, 1);
 922	else {
 923		__count_memcg_events(memcg, PGPGOUT, 1);
 924		nr_pages = -nr_pages; /* for event */
 925	}
 926
 927	__this_cpu_add(memcg->vmstats_percpu->nr_page_events, nr_pages);
 
 
 928}
 929
 930static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
 931				       enum mem_cgroup_events_target target)
 932{
 933	unsigned long val, next;
 934
 935	val = __this_cpu_read(memcg->vmstats_percpu->nr_page_events);
 936	next = __this_cpu_read(memcg->vmstats_percpu->targets[target]);
 937	/* from time_after() in jiffies.h */
 938	if ((long)(next - val) < 0) {
 939		switch (target) {
 940		case MEM_CGROUP_TARGET_THRESH:
 941			next = val + THRESHOLDS_EVENTS_TARGET;
 942			break;
 943		case MEM_CGROUP_TARGET_SOFTLIMIT:
 944			next = val + SOFTLIMIT_EVENTS_TARGET;
 945			break;
 
 
 
 946		default:
 947			break;
 948		}
 949		__this_cpu_write(memcg->vmstats_percpu->targets[target], next);
 950		return true;
 951	}
 952	return false;
 953}
 954
 955/*
 956 * Check events in order.
 957 *
 958 */
 959static void memcg_check_events(struct mem_cgroup *memcg, int nid)
 960{
 961	if (IS_ENABLED(CONFIG_PREEMPT_RT))
 962		return;
 963
 964	/* threshold event is triggered in finer grain than soft limit */
 965	if (unlikely(mem_cgroup_event_ratelimit(memcg,
 966						MEM_CGROUP_TARGET_THRESH))) {
 967		bool do_softlimit;
 
 968
 969		do_softlimit = mem_cgroup_event_ratelimit(memcg,
 970						MEM_CGROUP_TARGET_SOFTLIMIT);
 
 
 
 
 971		mem_cgroup_threshold(memcg);
 972		if (unlikely(do_softlimit))
 973			mem_cgroup_update_tree(memcg, nid);
 
 
 
 
 974	}
 975}
 976
 977struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
 978{
 979	/*
 980	 * mm_update_next_owner() may clear mm->owner to NULL
 981	 * if it races with swapoff, page migration, etc.
 982	 * So this can be called with p == NULL.
 983	 */
 984	if (unlikely(!p))
 985		return NULL;
 986
 987	return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
 988}
 989EXPORT_SYMBOL(mem_cgroup_from_task);
 990
 991static __always_inline struct mem_cgroup *active_memcg(void)
 992{
 993	if (!in_task())
 994		return this_cpu_read(int_active_memcg);
 995	else
 996		return current->active_memcg;
 997}
 998
 999/**
1000 * get_mem_cgroup_from_mm: Obtain a reference on given mm_struct's memcg.
1001 * @mm: mm from which memcg should be extracted. It can be NULL.
1002 *
1003 * Obtain a reference on mm->memcg and returns it if successful. If mm
1004 * is NULL, then the memcg is chosen as follows:
1005 * 1) The active memcg, if set.
1006 * 2) current->mm->memcg, if available
1007 * 3) root memcg
1008 * If mem_cgroup is disabled, NULL is returned.
1009 */
1010struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
1011{
1012	struct mem_cgroup *memcg;
1013
1014	if (mem_cgroup_disabled())
1015		return NULL;
1016
1017	/*
1018	 * Page cache insertions can happen without an
1019	 * actual mm context, e.g. during disk probing
1020	 * on boot, loopback IO, acct() writes etc.
1021	 *
1022	 * No need to css_get on root memcg as the reference
1023	 * counting is disabled on the root level in the
1024	 * cgroup core. See CSS_NO_REF.
1025	 */
1026	if (unlikely(!mm)) {
1027		memcg = active_memcg();
1028		if (unlikely(memcg)) {
1029			/* remote memcg must hold a ref */
1030			css_get(&memcg->css);
1031			return memcg;
1032		}
1033		mm = current->mm;
1034		if (unlikely(!mm))
1035			return root_mem_cgroup;
1036	}
1037
1038	rcu_read_lock();
1039	do {
1040		memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1041		if (unlikely(!memcg))
 
 
 
 
1042			memcg = root_mem_cgroup;
1043	} while (!css_tryget(&memcg->css));
 
 
 
 
 
1044	rcu_read_unlock();
1045	return memcg;
1046}
1047EXPORT_SYMBOL(get_mem_cgroup_from_mm);
1048
1049static __always_inline bool memcg_kmem_bypass(void)
1050{
1051	/* Allow remote memcg charging from any context. */
1052	if (unlikely(active_memcg()))
1053		return false;
1054
1055	/* Memcg to charge can't be determined. */
1056	if (!in_task() || !current->mm || (current->flags & PF_KTHREAD))
1057		return true;
1058
1059	return false;
1060}
1061
1062/**
1063 * mem_cgroup_iter - iterate over memory cgroup hierarchy
1064 * @root: hierarchy root
1065 * @prev: previously returned memcg, NULL on first invocation
1066 * @reclaim: cookie for shared reclaim walks, NULL for full walks
1067 *
1068 * Returns references to children of the hierarchy below @root, or
1069 * @root itself, or %NULL after a full round-trip.
1070 *
1071 * Caller must pass the return value in @prev on subsequent
1072 * invocations for reference counting, or use mem_cgroup_iter_break()
1073 * to cancel a hierarchy walk before the round-trip is complete.
1074 *
1075 * Reclaimers can specify a node in @reclaim to divide up the memcgs
1076 * in the hierarchy among all concurrent reclaimers operating on the
1077 * same node.
1078 */
1079struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
1080				   struct mem_cgroup *prev,
1081				   struct mem_cgroup_reclaim_cookie *reclaim)
1082{
1083	struct mem_cgroup_reclaim_iter *iter;
1084	struct cgroup_subsys_state *css = NULL;
1085	struct mem_cgroup *memcg = NULL;
1086	struct mem_cgroup *pos = NULL;
1087
1088	if (mem_cgroup_disabled())
1089		return NULL;
1090
1091	if (!root)
1092		root = root_mem_cgroup;
1093
 
 
 
 
 
 
 
 
 
1094	rcu_read_lock();
1095
1096	if (reclaim) {
1097		struct mem_cgroup_per_node *mz;
1098
1099		mz = root->nodeinfo[reclaim->pgdat->node_id];
1100		iter = &mz->iter;
1101
1102		/*
1103		 * On start, join the current reclaim iteration cycle.
1104		 * Exit when a concurrent walker completes it.
1105		 */
1106		if (!prev)
1107			reclaim->generation = iter->generation;
1108		else if (reclaim->generation != iter->generation)
1109			goto out_unlock;
1110
1111		while (1) {
1112			pos = READ_ONCE(iter->position);
1113			if (!pos || css_tryget(&pos->css))
1114				break;
1115			/*
1116			 * css reference reached zero, so iter->position will
1117			 * be cleared by ->css_released. However, we should not
1118			 * rely on this happening soon, because ->css_released
1119			 * is called from a work queue, and by busy-waiting we
1120			 * might block it. So we clear iter->position right
1121			 * away.
1122			 */
1123			(void)cmpxchg(&iter->position, pos, NULL);
1124		}
1125	} else if (prev) {
1126		pos = prev;
1127	}
1128
1129	if (pos)
1130		css = &pos->css;
1131
1132	for (;;) {
1133		css = css_next_descendant_pre(css, &root->css);
1134		if (!css) {
1135			/*
1136			 * Reclaimers share the hierarchy walk, and a
1137			 * new one might jump in right at the end of
1138			 * the hierarchy - make sure they see at least
1139			 * one group and restart from the beginning.
1140			 */
1141			if (!prev)
1142				continue;
1143			break;
1144		}
1145
1146		/*
1147		 * Verify the css and acquire a reference.  The root
1148		 * is provided by the caller, so we know it's alive
1149		 * and kicking, and don't take an extra reference.
1150		 */
1151		if (css == &root->css || css_tryget(css)) {
1152			memcg = mem_cgroup_from_css(css);
 
1153			break;
1154		}
 
 
 
 
1155	}
1156
1157	if (reclaim) {
1158		/*
1159		 * The position could have already been updated by a competing
1160		 * thread, so check that the value hasn't changed since we read
1161		 * it to avoid reclaiming from the same cgroup twice.
1162		 */
1163		(void)cmpxchg(&iter->position, pos, memcg);
1164
1165		if (pos)
1166			css_put(&pos->css);
1167
1168		if (!memcg)
1169			iter->generation++;
 
 
1170	}
1171
1172out_unlock:
1173	rcu_read_unlock();
 
1174	if (prev && prev != root)
1175		css_put(&prev->css);
1176
1177	return memcg;
1178}
1179
1180/**
1181 * mem_cgroup_iter_break - abort a hierarchy walk prematurely
1182 * @root: hierarchy root
1183 * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
1184 */
1185void mem_cgroup_iter_break(struct mem_cgroup *root,
1186			   struct mem_cgroup *prev)
1187{
1188	if (!root)
1189		root = root_mem_cgroup;
1190	if (prev && prev != root)
1191		css_put(&prev->css);
1192}
1193
1194static void __invalidate_reclaim_iterators(struct mem_cgroup *from,
1195					struct mem_cgroup *dead_memcg)
1196{
1197	struct mem_cgroup_reclaim_iter *iter;
1198	struct mem_cgroup_per_node *mz;
1199	int nid;
1200
1201	for_each_node(nid) {
1202		mz = from->nodeinfo[nid];
1203		iter = &mz->iter;
1204		cmpxchg(&iter->position, dead_memcg, NULL);
1205	}
1206}
1207
1208static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
1209{
1210	struct mem_cgroup *memcg = dead_memcg;
1211	struct mem_cgroup *last;
 
 
 
1212
1213	do {
1214		__invalidate_reclaim_iterators(memcg, dead_memcg);
1215		last = memcg;
1216	} while ((memcg = parent_mem_cgroup(memcg)));
1217
1218	/*
1219	 * When cgroup1 non-hierarchy mode is used,
1220	 * parent_mem_cgroup() does not walk all the way up to the
1221	 * cgroup root (root_mem_cgroup). So we have to handle
1222	 * dead_memcg from cgroup root separately.
1223	 */
1224	if (!mem_cgroup_is_root(last))
1225		__invalidate_reclaim_iterators(root_mem_cgroup,
1226						dead_memcg);
1227}
1228
1229/**
1230 * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
1231 * @memcg: hierarchy root
1232 * @fn: function to call for each task
1233 * @arg: argument passed to @fn
1234 *
1235 * This function iterates over tasks attached to @memcg or to any of its
1236 * descendants and calls @fn for each task. If @fn returns a non-zero
1237 * value, the function breaks the iteration loop and returns the value.
1238 * Otherwise, it will iterate over all tasks and return 0.
1239 *
1240 * This function must not be called for the root memory cgroup.
1241 */
1242int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
1243			  int (*fn)(struct task_struct *, void *), void *arg)
1244{
1245	struct mem_cgroup *iter;
1246	int ret = 0;
1247
1248	BUG_ON(mem_cgroup_is_root(memcg));
1249
1250	for_each_mem_cgroup_tree(iter, memcg) {
1251		struct css_task_iter it;
1252		struct task_struct *task;
1253
1254		css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it);
1255		while (!ret && (task = css_task_iter_next(&it)))
1256			ret = fn(task, arg);
1257		css_task_iter_end(&it);
1258		if (ret) {
1259			mem_cgroup_iter_break(memcg, iter);
1260			break;
1261		}
1262	}
1263	return ret;
1264}
1265
1266#ifdef CONFIG_DEBUG_VM
1267void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
1268{
1269	struct mem_cgroup *memcg;
 
 
 
 
 
1270
1271	if (mem_cgroup_disabled())
1272		return;
1273
1274	memcg = folio_memcg(folio);
1275
1276	if (!memcg)
1277		VM_BUG_ON_FOLIO(!mem_cgroup_is_root(lruvec_memcg(lruvec)), folio);
1278	else
1279		VM_BUG_ON_FOLIO(lruvec_memcg(lruvec) != memcg, folio);
1280}
1281#endif
1282
1283/**
1284 * folio_lruvec_lock - Lock the lruvec for a folio.
1285 * @folio: Pointer to the folio.
1286 *
1287 * These functions are safe to use under any of the following conditions:
1288 * - folio locked
1289 * - folio_test_lru false
1290 * - folio_memcg_lock()
1291 * - folio frozen (refcount of 0)
1292 *
1293 * Return: The lruvec this folio is on with its lock held.
1294 */
1295struct lruvec *folio_lruvec_lock(struct folio *folio)
 
1296{
1297	struct lruvec *lruvec = folio_lruvec(folio);
 
1298
1299	spin_lock(&lruvec->lru_lock);
1300	lruvec_memcg_debug(lruvec, folio);
 
 
1301
 
 
 
 
 
 
 
 
 
 
1302	return lruvec;
1303}
1304
1305/**
1306 * folio_lruvec_lock_irq - Lock the lruvec for a folio.
1307 * @folio: Pointer to the folio.
 
1308 *
1309 * These functions are safe to use under any of the following conditions:
1310 * - folio locked
1311 * - folio_test_lru false
1312 * - folio_memcg_lock()
1313 * - folio frozen (refcount of 0)
1314 *
1315 * Return: The lruvec this folio is on with its lock held and interrupts
1316 * disabled.
1317 */
1318struct lruvec *folio_lruvec_lock_irq(struct folio *folio)
1319{
1320	struct lruvec *lruvec = folio_lruvec(folio);
 
 
1321
1322	spin_lock_irq(&lruvec->lru_lock);
1323	lruvec_memcg_debug(lruvec, folio);
 
 
1324
1325	return lruvec;
1326}
1327
1328/**
1329 * folio_lruvec_lock_irqsave - Lock the lruvec for a folio.
1330 * @folio: Pointer to the folio.
1331 * @flags: Pointer to irqsave flags.
1332 *
1333 * These functions are safe to use under any of the following conditions:
1334 * - folio locked
1335 * - folio_test_lru false
1336 * - folio_memcg_lock()
1337 * - folio frozen (refcount of 0)
1338 *
1339 * Return: The lruvec this folio is on with its lock held and interrupts
1340 * disabled.
1341 */
1342struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
1343		unsigned long *flags)
1344{
1345	struct lruvec *lruvec = folio_lruvec(folio);
1346
1347	spin_lock_irqsave(&lruvec->lru_lock, *flags);
1348	lruvec_memcg_debug(lruvec, folio);
1349
 
 
 
 
 
 
 
 
 
 
1350	return lruvec;
1351}
1352
1353/**
1354 * mem_cgroup_update_lru_size - account for adding or removing an lru page
1355 * @lruvec: mem_cgroup per zone lru vector
1356 * @lru: index of lru list the page is sitting on
1357 * @zid: zone id of the accounted pages
1358 * @nr_pages: positive when adding or negative when removing
1359 *
1360 * This function must be called under lru_lock, just before a page is added
1361 * to or just after a page is removed from an lru list.
1362 */
1363void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
1364				int zid, int nr_pages)
1365{
1366	struct mem_cgroup_per_node *mz;
1367	unsigned long *lru_size;
1368	long size;
1369
1370	if (mem_cgroup_disabled())
1371		return;
1372
1373	mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
1374	lru_size = &mz->lru_zone_size[zid][lru];
 
 
 
1375
1376	if (nr_pages < 0)
1377		*lru_size += nr_pages;
 
 
 
1378
1379	size = *lru_size;
1380	if (WARN_ONCE(size < 0,
1381		"%s(%p, %d, %d): lru_size %ld\n",
1382		__func__, lruvec, lru, nr_pages, size)) {
1383		VM_BUG_ON(1);
1384		*lru_size = 0;
 
 
 
 
 
 
 
 
1385	}
1386
1387	if (nr_pages > 0)
1388		*lru_size += nr_pages;
1389}
1390
1391/**
1392 * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1393 * @memcg: the memory cgroup
1394 *
1395 * Returns the maximum amount of memory @mem can be charged with, in
1396 * pages.
1397 */
1398static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
1399{
1400	unsigned long margin = 0;
1401	unsigned long count;
1402	unsigned long limit;
1403
1404	count = page_counter_read(&memcg->memory);
1405	limit = READ_ONCE(memcg->memory.max);
1406	if (count < limit)
1407		margin = limit - count;
1408
1409	if (do_memsw_account()) {
1410		count = page_counter_read(&memcg->memsw);
1411		limit = READ_ONCE(memcg->memsw.max);
1412		if (count < limit)
1413			margin = min(margin, limit - count);
1414		else
1415			margin = 0;
1416	}
1417
1418	return margin;
1419}
1420
1421/*
1422 * A routine for checking "mem" is under move_account() or not.
1423 *
1424 * Checking a cgroup is mc.from or mc.to or under hierarchy of
1425 * moving cgroups. This is for waiting at high-memory pressure
1426 * caused by "move".
1427 */
1428static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
1429{
1430	struct mem_cgroup *from;
1431	struct mem_cgroup *to;
1432	bool ret = false;
1433	/*
1434	 * Unlike task_move routines, we access mc.to, mc.from not under
1435	 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
1436	 */
1437	spin_lock(&mc.lock);
1438	from = mc.from;
1439	to = mc.to;
1440	if (!from)
1441		goto unlock;
1442
1443	ret = mem_cgroup_is_descendant(from, memcg) ||
1444		mem_cgroup_is_descendant(to, memcg);
1445unlock:
1446	spin_unlock(&mc.lock);
1447	return ret;
1448}
1449
1450static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
1451{
1452	if (mc.moving_task && current != mc.moving_task) {
1453		if (mem_cgroup_under_move(memcg)) {
1454			DEFINE_WAIT(wait);
1455			prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1456			/* moving charge context might have finished. */
1457			if (mc.moving_task)
1458				schedule();
1459			finish_wait(&mc.waitq, &wait);
1460			return true;
1461		}
1462	}
1463	return false;
1464}
1465
1466struct memory_stat {
1467	const char *name;
1468	unsigned int idx;
1469};
1470
1471static const struct memory_stat memory_stats[] = {
1472	{ "anon",			NR_ANON_MAPPED			},
1473	{ "file",			NR_FILE_PAGES			},
1474	{ "kernel",			MEMCG_KMEM			},
1475	{ "kernel_stack",		NR_KERNEL_STACK_KB		},
1476	{ "pagetables",			NR_PAGETABLE			},
1477	{ "sec_pagetables",		NR_SECONDARY_PAGETABLE		},
1478	{ "percpu",			MEMCG_PERCPU_B			},
1479	{ "sock",			MEMCG_SOCK			},
1480	{ "vmalloc",			MEMCG_VMALLOC			},
1481	{ "shmem",			NR_SHMEM			},
1482#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
1483	{ "zswap",			MEMCG_ZSWAP_B			},
1484	{ "zswapped",			MEMCG_ZSWAPPED			},
1485#endif
1486	{ "file_mapped",		NR_FILE_MAPPED			},
1487	{ "file_dirty",			NR_FILE_DIRTY			},
1488	{ "file_writeback",		NR_WRITEBACK			},
1489#ifdef CONFIG_SWAP
1490	{ "swapcached",			NR_SWAPCACHE			},
1491#endif
1492#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1493	{ "anon_thp",			NR_ANON_THPS			},
1494	{ "file_thp",			NR_FILE_THPS			},
1495	{ "shmem_thp",			NR_SHMEM_THPS			},
1496#endif
1497	{ "inactive_anon",		NR_INACTIVE_ANON		},
1498	{ "active_anon",		NR_ACTIVE_ANON			},
1499	{ "inactive_file",		NR_INACTIVE_FILE		},
1500	{ "active_file",		NR_ACTIVE_FILE			},
1501	{ "unevictable",		NR_UNEVICTABLE			},
1502	{ "slab_reclaimable",		NR_SLAB_RECLAIMABLE_B		},
1503	{ "slab_unreclaimable",		NR_SLAB_UNRECLAIMABLE_B		},
1504
1505	/* The memory events */
1506	{ "workingset_refault_anon",	WORKINGSET_REFAULT_ANON		},
1507	{ "workingset_refault_file",	WORKINGSET_REFAULT_FILE		},
1508	{ "workingset_activate_anon",	WORKINGSET_ACTIVATE_ANON	},
1509	{ "workingset_activate_file",	WORKINGSET_ACTIVATE_FILE	},
1510	{ "workingset_restore_anon",	WORKINGSET_RESTORE_ANON		},
1511	{ "workingset_restore_file",	WORKINGSET_RESTORE_FILE		},
1512	{ "workingset_nodereclaim",	WORKINGSET_NODERECLAIM		},
1513};
1514
1515/* Translate stat items to the correct unit for memory.stat output */
1516static int memcg_page_state_unit(int item)
1517{
1518	switch (item) {
1519	case MEMCG_PERCPU_B:
1520	case MEMCG_ZSWAP_B:
1521	case NR_SLAB_RECLAIMABLE_B:
1522	case NR_SLAB_UNRECLAIMABLE_B:
1523	case WORKINGSET_REFAULT_ANON:
1524	case WORKINGSET_REFAULT_FILE:
1525	case WORKINGSET_ACTIVATE_ANON:
1526	case WORKINGSET_ACTIVATE_FILE:
1527	case WORKINGSET_RESTORE_ANON:
1528	case WORKINGSET_RESTORE_FILE:
1529	case WORKINGSET_NODERECLAIM:
1530		return 1;
1531	case NR_KERNEL_STACK_KB:
1532		return SZ_1K;
1533	default:
1534		return PAGE_SIZE;
1535	}
1536}
1537
1538static inline unsigned long memcg_page_state_output(struct mem_cgroup *memcg,
1539						    int item)
1540{
1541	return memcg_page_state(memcg, item) * memcg_page_state_unit(item);
1542}
1543
1544static void memory_stat_format(struct mem_cgroup *memcg, char *buf, int bufsize)
1545{
1546	struct seq_buf s;
1547	int i;
1548
1549	seq_buf_init(&s, buf, bufsize);
1550
1551	/*
1552	 * Provide statistics on the state of the memory subsystem as
1553	 * well as cumulative event counters that show past behavior.
1554	 *
1555	 * This list is ordered following a combination of these gradients:
1556	 * 1) generic big picture -> specifics and details
1557	 * 2) reflecting userspace activity -> reflecting kernel heuristics
1558	 *
1559	 * Current memory state:
1560	 */
1561	mem_cgroup_flush_stats();
1562
1563	for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
1564		u64 size;
1565
1566		size = memcg_page_state_output(memcg, memory_stats[i].idx);
1567		seq_buf_printf(&s, "%s %llu\n", memory_stats[i].name, size);
1568
1569		if (unlikely(memory_stats[i].idx == NR_SLAB_UNRECLAIMABLE_B)) {
1570			size += memcg_page_state_output(memcg,
1571							NR_SLAB_RECLAIMABLE_B);
1572			seq_buf_printf(&s, "slab %llu\n", size);
1573		}
1574	}
1575
1576	/* Accumulated memory events */
1577	seq_buf_printf(&s, "pgscan %lu\n",
1578		       memcg_events(memcg, PGSCAN_KSWAPD) +
1579		       memcg_events(memcg, PGSCAN_DIRECT) +
1580		       memcg_events(memcg, PGSCAN_KHUGEPAGED));
1581	seq_buf_printf(&s, "pgsteal %lu\n",
1582		       memcg_events(memcg, PGSTEAL_KSWAPD) +
1583		       memcg_events(memcg, PGSTEAL_DIRECT) +
1584		       memcg_events(memcg, PGSTEAL_KHUGEPAGED));
1585
1586	for (i = 0; i < ARRAY_SIZE(memcg_vm_event_stat); i++) {
1587		if (memcg_vm_event_stat[i] == PGPGIN ||
1588		    memcg_vm_event_stat[i] == PGPGOUT)
1589			continue;
1590
1591		seq_buf_printf(&s, "%s %lu\n",
1592			       vm_event_name(memcg_vm_event_stat[i]),
1593			       memcg_events(memcg, memcg_vm_event_stat[i]));
1594	}
1595
1596	/* The above should easily fit into one page */
1597	WARN_ON_ONCE(seq_buf_has_overflowed(&s));
1598}
1599
1600#define K(x) ((x) << (PAGE_SHIFT-10))
1601/**
1602 * mem_cgroup_print_oom_context: Print OOM information relevant to
1603 * memory controller.
1604 * @memcg: The memory cgroup that went over limit
1605 * @p: Task that is going to be killed
1606 *
1607 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1608 * enabled
1609 */
1610void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
1611{
 
 
 
1612	rcu_read_lock();
1613
1614	if (memcg) {
1615		pr_cont(",oom_memcg=");
1616		pr_cont_cgroup_path(memcg->css.cgroup);
1617	} else
1618		pr_cont(",global_oom");
1619	if (p) {
1620		pr_cont(",task_memcg=");
1621		pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
 
 
 
1622	}
1623	rcu_read_unlock();
1624}
1625
1626/**
1627 * mem_cgroup_print_oom_meminfo: Print OOM memory information relevant to
1628 * memory controller.
1629 * @memcg: The memory cgroup that went over limit
1630 */
1631void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
1632{
1633	/* Use static buffer, for the caller is holding oom_lock. */
1634	static char buf[PAGE_SIZE];
1635
1636	lockdep_assert_held(&oom_lock);
1637
1638	pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
1639		K((u64)page_counter_read(&memcg->memory)),
1640		K((u64)READ_ONCE(memcg->memory.max)), memcg->memory.failcnt);
1641	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
1642		pr_info("swap: usage %llukB, limit %llukB, failcnt %lu\n",
1643			K((u64)page_counter_read(&memcg->swap)),
1644			K((u64)READ_ONCE(memcg->swap.max)), memcg->swap.failcnt);
1645	else {
1646		pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
1647			K((u64)page_counter_read(&memcg->memsw)),
1648			K((u64)memcg->memsw.max), memcg->memsw.failcnt);
1649		pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
1650			K((u64)page_counter_read(&memcg->kmem)),
1651			K((u64)memcg->kmem.max), memcg->kmem.failcnt);
 
 
 
 
 
 
 
 
 
 
 
 
 
1652	}
 
1653
1654	pr_info("Memory cgroup stats for ");
1655	pr_cont_cgroup_path(memcg->css.cgroup);
1656	pr_cont(":");
1657	memory_stat_format(memcg, buf, sizeof(buf));
1658	pr_info("%s", buf);
 
 
 
 
 
 
 
1659}
1660
1661/*
1662 * Return the memory (and swap, if configured) limit for a memcg.
1663 */
1664unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
1665{
1666	unsigned long max = READ_ONCE(memcg->memory.max);
1667
1668	if (do_memsw_account()) {
1669		if (mem_cgroup_swappiness(memcg)) {
1670			/* Calculate swap excess capacity from memsw limit */
1671			unsigned long swap = READ_ONCE(memcg->memsw.max) - max;
1672
1673			max += min(swap, (unsigned long)total_swap_pages);
1674		}
1675	} else {
1676		if (mem_cgroup_swappiness(memcg))
1677			max += min(READ_ONCE(memcg->swap.max),
1678				   (unsigned long)total_swap_pages);
 
 
 
1679	}
1680	return max;
1681}
1682
1683unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
1684{
1685	return page_counter_read(&memcg->memory);
1686}
1687
1688static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
1689				     int order)
1690{
1691	struct oom_control oc = {
1692		.zonelist = NULL,
1693		.nodemask = NULL,
1694		.memcg = memcg,
1695		.gfp_mask = gfp_mask,
1696		.order = order,
1697	};
1698	bool ret = true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1699
1700	if (mutex_lock_killable(&oom_lock))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1701		return true;
 
1702
1703	if (mem_cgroup_margin(memcg) >= (1 << order))
1704		goto unlock;
1705
 
 
 
 
 
 
 
 
 
1706	/*
1707	 * A few threads which were not waiting at mutex_lock_killable() can
1708	 * fail to bail out. Therefore, check again after holding oom_lock.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1709	 */
1710	ret = task_is_dying() || out_of_memory(&oc);
 
1711
1712unlock:
1713	mutex_unlock(&oom_lock);
1714	return ret;
 
 
 
 
1715}
 
1716
1717static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
1718				   pg_data_t *pgdat,
1719				   gfp_t gfp_mask,
1720				   unsigned long *total_scanned)
1721{
1722	struct mem_cgroup *victim = NULL;
1723	int total = 0;
1724	int loop = 0;
1725	unsigned long excess;
1726	unsigned long nr_scanned;
1727	struct mem_cgroup_reclaim_cookie reclaim = {
1728		.pgdat = pgdat,
 
1729	};
1730
1731	excess = soft_limit_excess(root_memcg);
1732
1733	while (1) {
1734		victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
1735		if (!victim) {
1736			loop++;
1737			if (loop >= 2) {
1738				/*
1739				 * If we have not been able to reclaim
1740				 * anything, it might because there are
1741				 * no reclaimable pages under this hierarchy
1742				 */
1743				if (!total)
1744					break;
1745				/*
1746				 * We want to do more targeted reclaim.
1747				 * excess >> 2 is not to excessive so as to
1748				 * reclaim too much, nor too less that we keep
1749				 * coming back to reclaim from this cgroup
1750				 */
1751				if (total >= (excess >> 2) ||
1752					(loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
1753					break;
1754			}
1755			continue;
1756		}
1757		total += mem_cgroup_shrink_node(victim, gfp_mask, false,
1758					pgdat, &nr_scanned);
1759		*total_scanned += nr_scanned;
1760		if (!soft_limit_excess(root_memcg))
1761			break;
1762	}
1763	mem_cgroup_iter_break(root_memcg, victim);
1764	return total;
1765}
1766
1767#ifdef CONFIG_LOCKDEP
1768static struct lockdep_map memcg_oom_lock_dep_map = {
1769	.name = "memcg_oom_lock",
1770};
1771#endif
1772
1773static DEFINE_SPINLOCK(memcg_oom_lock);
1774
1775/*
1776 * Check OOM-Killer is already running under our hierarchy.
1777 * If someone is running, return false.
1778 */
1779static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
1780{
1781	struct mem_cgroup *iter, *failed = NULL;
1782
1783	spin_lock(&memcg_oom_lock);
1784
1785	for_each_mem_cgroup_tree(iter, memcg) {
1786		if (iter->oom_lock) {
1787			/*
1788			 * this subtree of our hierarchy is already locked
1789			 * so we cannot give a lock.
1790			 */
1791			failed = iter;
1792			mem_cgroup_iter_break(memcg, iter);
1793			break;
1794		} else
1795			iter->oom_lock = true;
1796	}
1797
1798	if (failed) {
1799		/*
1800		 * OK, we failed to lock the whole subtree so we have
1801		 * to clean up what we set up to the failing subtree
1802		 */
1803		for_each_mem_cgroup_tree(iter, memcg) {
1804			if (iter == failed) {
1805				mem_cgroup_iter_break(memcg, iter);
1806				break;
1807			}
1808			iter->oom_lock = false;
1809		}
1810	} else
1811		mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_);
1812
1813	spin_unlock(&memcg_oom_lock);
1814
1815	return !failed;
1816}
1817
1818static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
1819{
1820	struct mem_cgroup *iter;
1821
1822	spin_lock(&memcg_oom_lock);
1823	mutex_release(&memcg_oom_lock_dep_map, _RET_IP_);
1824	for_each_mem_cgroup_tree(iter, memcg)
1825		iter->oom_lock = false;
1826	spin_unlock(&memcg_oom_lock);
1827}
1828
1829static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
1830{
1831	struct mem_cgroup *iter;
1832
1833	spin_lock(&memcg_oom_lock);
1834	for_each_mem_cgroup_tree(iter, memcg)
1835		iter->under_oom++;
1836	spin_unlock(&memcg_oom_lock);
1837}
1838
1839static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
1840{
1841	struct mem_cgroup *iter;
1842
1843	/*
1844	 * Be careful about under_oom underflows because a child memcg
1845	 * could have been added after mem_cgroup_mark_under_oom.
1846	 */
1847	spin_lock(&memcg_oom_lock);
1848	for_each_mem_cgroup_tree(iter, memcg)
1849		if (iter->under_oom > 0)
1850			iter->under_oom--;
1851	spin_unlock(&memcg_oom_lock);
1852}
1853
1854static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
1855
1856struct oom_wait_info {
1857	struct mem_cgroup *memcg;
1858	wait_queue_entry_t	wait;
1859};
1860
1861static int memcg_oom_wake_function(wait_queue_entry_t *wait,
1862	unsigned mode, int sync, void *arg)
1863{
1864	struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
1865	struct mem_cgroup *oom_wait_memcg;
1866	struct oom_wait_info *oom_wait_info;
1867
1868	oom_wait_info = container_of(wait, struct oom_wait_info, wait);
1869	oom_wait_memcg = oom_wait_info->memcg;
1870
1871	if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) &&
1872	    !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg))
1873		return 0;
1874	return autoremove_wake_function(wait, mode, sync, arg);
1875}
1876
1877static void memcg_oom_recover(struct mem_cgroup *memcg)
1878{
1879	/*
1880	 * For the following lockless ->under_oom test, the only required
1881	 * guarantee is that it must see the state asserted by an OOM when
1882	 * this function is called as a result of userland actions
1883	 * triggered by the notification of the OOM.  This is trivially
1884	 * achieved by invoking mem_cgroup_mark_under_oom() before
1885	 * triggering notification.
1886	 */
1887	if (memcg && memcg->under_oom)
1888		__wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
1889}
1890
1891/*
1892 * Returns true if successfully killed one or more processes. Though in some
1893 * corner cases it can return true even without killing any process.
1894 */
1895static bool mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
1896{
1897	bool locked, ret;
1898
1899	if (order > PAGE_ALLOC_COSTLY_ORDER)
1900		return false;
1901
1902	memcg_memory_event(memcg, MEMCG_OOM);
1903
1904	/*
1905	 * We are in the middle of the charge context here, so we
1906	 * don't want to block when potentially sitting on a callstack
1907	 * that holds all kinds of filesystem and mm locks.
1908	 *
1909	 * cgroup1 allows disabling the OOM killer and waiting for outside
1910	 * handling until the charge can succeed; remember the context and put
1911	 * the task to sleep at the end of the page fault when all locks are
1912	 * released.
1913	 *
1914	 * On the other hand, in-kernel OOM killer allows for an async victim
1915	 * memory reclaim (oom_reaper) and that means that we are not solely
1916	 * relying on the oom victim to make a forward progress and we can
1917	 * invoke the oom killer here.
1918	 *
1919	 * Please note that mem_cgroup_out_of_memory might fail to find a
1920	 * victim and then we have to bail out from the charge path.
 
 
1921	 */
1922	if (memcg->oom_kill_disable) {
1923		if (current->in_user_fault) {
1924			css_get(&memcg->css);
1925			current->memcg_in_oom = memcg;
1926			current->memcg_oom_gfp_mask = mask;
1927			current->memcg_oom_order = order;
1928		}
1929		return false;
1930	}
1931
1932	mem_cgroup_mark_under_oom(memcg);
1933
1934	locked = mem_cgroup_oom_trylock(memcg);
1935
1936	if (locked)
1937		mem_cgroup_oom_notify(memcg);
1938
1939	mem_cgroup_unmark_under_oom(memcg);
1940	ret = mem_cgroup_out_of_memory(memcg, mask, order);
1941
1942	if (locked)
1943		mem_cgroup_oom_unlock(memcg);
1944
1945	return ret;
1946}
1947
1948/**
1949 * mem_cgroup_oom_synchronize - complete memcg OOM handling
1950 * @handle: actually kill/wait or just clean up the OOM state
1951 *
1952 * This has to be called at the end of a page fault if the memcg OOM
1953 * handler was enabled.
1954 *
1955 * Memcg supports userspace OOM handling where failed allocations must
1956 * sleep on a waitqueue until the userspace task resolves the
1957 * situation.  Sleeping directly in the charge context with all kinds
1958 * of locks held is not a good idea, instead we remember an OOM state
1959 * in the task and mem_cgroup_oom_synchronize() has to be called at
1960 * the end of the page fault to complete the OOM handling.
1961 *
1962 * Returns %true if an ongoing memcg OOM situation was detected and
1963 * completed, %false otherwise.
1964 */
1965bool mem_cgroup_oom_synchronize(bool handle)
1966{
1967	struct mem_cgroup *memcg = current->memcg_in_oom;
1968	struct oom_wait_info owait;
1969	bool locked;
1970
1971	/* OOM is global, do not handle */
1972	if (!memcg)
1973		return false;
1974
1975	if (!handle)
1976		goto cleanup;
1977
1978	owait.memcg = memcg;
1979	owait.wait.flags = 0;
1980	owait.wait.func = memcg_oom_wake_function;
1981	owait.wait.private = current;
1982	INIT_LIST_HEAD(&owait.wait.entry);
1983
1984	prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
1985	mem_cgroup_mark_under_oom(memcg);
1986
1987	locked = mem_cgroup_oom_trylock(memcg);
1988
1989	if (locked)
1990		mem_cgroup_oom_notify(memcg);
1991
1992	if (locked && !memcg->oom_kill_disable) {
1993		mem_cgroup_unmark_under_oom(memcg);
1994		finish_wait(&memcg_oom_waitq, &owait.wait);
1995		mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask,
1996					 current->memcg_oom_order);
1997	} else {
1998		schedule();
1999		mem_cgroup_unmark_under_oom(memcg);
2000		finish_wait(&memcg_oom_waitq, &owait.wait);
2001	}
2002
2003	if (locked) {
2004		mem_cgroup_oom_unlock(memcg);
2005		/*
2006		 * There is no guarantee that an OOM-lock contender
2007		 * sees the wakeups triggered by the OOM kill
2008		 * uncharges.  Wake any sleepers explicitly.
2009		 */
2010		memcg_oom_recover(memcg);
2011	}
2012cleanup:
2013	current->memcg_in_oom = NULL;
2014	css_put(&memcg->css);
2015	return true;
2016}
2017
2018/**
2019 * mem_cgroup_get_oom_group - get a memory cgroup to clean up after OOM
2020 * @victim: task to be killed by the OOM killer
2021 * @oom_domain: memcg in case of memcg OOM, NULL in case of system-wide OOM
2022 *
2023 * Returns a pointer to a memory cgroup, which has to be cleaned up
2024 * by killing all belonging OOM-killable tasks.
2025 *
2026 * Caller has to call mem_cgroup_put() on the returned non-NULL memcg.
 
2027 */
2028struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
2029					    struct mem_cgroup *oom_domain)
2030{
2031	struct mem_cgroup *oom_group = NULL;
2032	struct mem_cgroup *memcg;
2033
2034	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
2035		return NULL;
2036
2037	if (!oom_domain)
2038		oom_domain = root_mem_cgroup;
2039
2040	rcu_read_lock();
2041
2042	memcg = mem_cgroup_from_task(victim);
2043	if (mem_cgroup_is_root(memcg))
2044		goto out;
2045
2046	/*
2047	 * If the victim task has been asynchronously moved to a different
2048	 * memory cgroup, we might end up killing tasks outside oom_domain.
2049	 * In this case it's better to ignore memory.group.oom.
2050	 */
2051	if (unlikely(!mem_cgroup_is_descendant(memcg, oom_domain)))
2052		goto out;
2053
2054	/*
2055	 * Traverse the memory cgroup hierarchy from the victim task's
2056	 * cgroup up to the OOMing cgroup (or root) to find the
2057	 * highest-level memory cgroup with oom.group set.
2058	 */
2059	for (; memcg; memcg = parent_mem_cgroup(memcg)) {
2060		if (memcg->oom_group)
2061			oom_group = memcg;
2062
2063		if (memcg == oom_domain)
2064			break;
2065	}
2066
2067	if (oom_group)
2068		css_get(&oom_group->css);
2069out:
2070	rcu_read_unlock();
2071
2072	return oom_group;
2073}
2074
2075void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
2076{
2077	pr_info("Tasks in ");
2078	pr_cont_cgroup_path(memcg->css.cgroup);
2079	pr_cont(" are going to be killed due to memory.oom.group set\n");
2080}
2081
2082/**
2083 * folio_memcg_lock - Bind a folio to its memcg.
2084 * @folio: The folio.
2085 *
2086 * This function prevents unlocked LRU folios from being moved to
2087 * another cgroup.
2088 *
2089 * It ensures lifetime of the bound memcg.  The caller is responsible
2090 * for the lifetime of the folio.
2091 */
2092void folio_memcg_lock(struct folio *folio)
2093{
2094	struct mem_cgroup *memcg;
2095	unsigned long flags;
2096
2097	/*
2098	 * The RCU lock is held throughout the transaction.  The fast
2099	 * path can get away without acquiring the memcg->move_lock
2100	 * because page moving starts with an RCU grace period.
2101         */
2102	rcu_read_lock();
2103
2104	if (mem_cgroup_disabled())
2105		return;
2106again:
2107	memcg = folio_memcg(folio);
2108	if (unlikely(!memcg))
2109		return;
2110
2111#ifdef CONFIG_PROVE_LOCKING
2112	local_irq_save(flags);
2113	might_lock(&memcg->move_lock);
2114	local_irq_restore(flags);
2115#endif
2116
2117	if (atomic_read(&memcg->moving_account) <= 0)
2118		return;
2119
2120	spin_lock_irqsave(&memcg->move_lock, flags);
2121	if (memcg != folio_memcg(folio)) {
2122		spin_unlock_irqrestore(&memcg->move_lock, flags);
2123		goto again;
2124	}
2125
2126	/*
2127	 * When charge migration first begins, we can have multiple
2128	 * critical sections holding the fast-path RCU lock and one
2129	 * holding the slowpath move_lock. Track the task who has the
2130	 * move_lock for unlock_page_memcg().
2131	 */
2132	memcg->move_lock_task = current;
2133	memcg->move_lock_flags = flags;
 
 
2134}
 
2135
2136void lock_page_memcg(struct page *page)
 
 
 
 
2137{
2138	folio_memcg_lock(page_folio(page));
2139}
2140
2141static void __folio_memcg_unlock(struct mem_cgroup *memcg)
2142{
2143	if (memcg && memcg->move_lock_task == current) {
2144		unsigned long flags = memcg->move_lock_flags;
2145
2146		memcg->move_lock_task = NULL;
2147		memcg->move_lock_flags = 0;
2148
2149		spin_unlock_irqrestore(&memcg->move_lock, flags);
2150	}
2151
2152	rcu_read_unlock();
2153}
 
2154
2155/**
2156 * folio_memcg_unlock - Release the binding between a folio and its memcg.
2157 * @folio: The folio.
2158 *
2159 * This releases the binding created by folio_memcg_lock().  This does
2160 * not change the accounting of this folio to its memcg, but it does
2161 * permit others to change it.
2162 */
2163void folio_memcg_unlock(struct folio *folio)
2164{
2165	__folio_memcg_unlock(folio_memcg(folio));
2166}
2167
2168void unlock_page_memcg(struct page *page)
2169{
2170	folio_memcg_unlock(page_folio(page));
2171}
2172
2173struct memcg_stock_pcp {
2174	local_lock_t stock_lock;
2175	struct mem_cgroup *cached; /* this never be root cgroup */
2176	unsigned int nr_pages;
2177
2178#ifdef CONFIG_MEMCG_KMEM
2179	struct obj_cgroup *cached_objcg;
2180	struct pglist_data *cached_pgdat;
2181	unsigned int nr_bytes;
2182	int nr_slab_reclaimable_b;
2183	int nr_slab_unreclaimable_b;
2184#endif
2185
2186	struct work_struct work;
2187	unsigned long flags;
2188#define FLUSHING_CACHED_CHARGE	0
2189};
2190static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock) = {
2191	.stock_lock = INIT_LOCAL_LOCK(stock_lock),
2192};
2193static DEFINE_MUTEX(percpu_charge_mutex);
2194
2195#ifdef CONFIG_MEMCG_KMEM
2196static struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock);
2197static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
2198				     struct mem_cgroup *root_memcg);
2199static void memcg_account_kmem(struct mem_cgroup *memcg, int nr_pages);
2200
2201#else
2202static inline struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock)
2203{
2204	return NULL;
2205}
2206static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
2207				     struct mem_cgroup *root_memcg)
2208{
2209	return false;
2210}
2211static void memcg_account_kmem(struct mem_cgroup *memcg, int nr_pages)
2212{
2213}
2214#endif
2215
2216/**
2217 * consume_stock: Try to consume stocked charge on this cpu.
2218 * @memcg: memcg to consume from.
2219 * @nr_pages: how many pages to charge.
2220 *
2221 * The charges will only happen if @memcg matches the current cpu's memcg
2222 * stock, and at least @nr_pages are available in that stock.  Failure to
2223 * service an allocation will refill the stock.
2224 *
2225 * returns true if successful, false otherwise.
2226 */
2227static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2228{
2229	struct memcg_stock_pcp *stock;
2230	unsigned long flags;
2231	bool ret = false;
2232
2233	if (nr_pages > MEMCG_CHARGE_BATCH)
2234		return ret;
2235
2236	local_lock_irqsave(&memcg_stock.stock_lock, flags);
2237
2238	stock = this_cpu_ptr(&memcg_stock);
2239	if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
2240		stock->nr_pages -= nr_pages;
2241		ret = true;
2242	}
2243
2244	local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
2245
2246	return ret;
2247}
2248
2249/*
2250 * Returns stocks cached in percpu and reset cached information.
2251 */
2252static void drain_stock(struct memcg_stock_pcp *stock)
2253{
2254	struct mem_cgroup *old = stock->cached;
2255
2256	if (!old)
2257		return;
2258
2259	if (stock->nr_pages) {
2260		page_counter_uncharge(&old->memory, stock->nr_pages);
2261		if (do_memsw_account())
2262			page_counter_uncharge(&old->memsw, stock->nr_pages);
 
2263		stock->nr_pages = 0;
2264	}
2265
2266	css_put(&old->css);
2267	stock->cached = NULL;
2268}
2269
 
 
 
 
2270static void drain_local_stock(struct work_struct *dummy)
2271{
2272	struct memcg_stock_pcp *stock;
2273	struct obj_cgroup *old = NULL;
2274	unsigned long flags;
2275
2276	/*
2277	 * The only protection from cpu hotplug (memcg_hotplug_cpu_dead) vs.
2278	 * drain_stock races is that we always operate on local CPU stock
2279	 * here with IRQ disabled
2280	 */
2281	local_lock_irqsave(&memcg_stock.stock_lock, flags);
2282
2283	stock = this_cpu_ptr(&memcg_stock);
2284	old = drain_obj_stock(stock);
2285	drain_stock(stock);
2286	clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
2287
2288	local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
2289	if (old)
2290		obj_cgroup_put(old);
2291}
2292
2293/*
2294 * Cache charges(val) to local per_cpu area.
2295 * This will be consumed by consume_stock() function, later.
2296 */
2297static void __refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2298{
2299	struct memcg_stock_pcp *stock;
2300
2301	stock = this_cpu_ptr(&memcg_stock);
2302	if (stock->cached != memcg) { /* reset if necessary */
2303		drain_stock(stock);
2304		css_get(&memcg->css);
2305		stock->cached = memcg;
2306	}
2307	stock->nr_pages += nr_pages;
2308
2309	if (stock->nr_pages > MEMCG_CHARGE_BATCH)
2310		drain_stock(stock);
2311}
2312
2313static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2314{
2315	unsigned long flags;
2316
2317	local_lock_irqsave(&memcg_stock.stock_lock, flags);
2318	__refill_stock(memcg, nr_pages);
2319	local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
2320}
2321
2322/*
2323 * Drains all per-CPU charge caches for given root_memcg resp. subtree
2324 * of the hierarchy under it.
2325 */
2326static void drain_all_stock(struct mem_cgroup *root_memcg)
2327{
2328	int cpu, curcpu;
2329
2330	/* If someone's already draining, avoid adding running more workers. */
2331	if (!mutex_trylock(&percpu_charge_mutex))
2332		return;
2333	/*
2334	 * Notify other cpus that system-wide "drain" is running
2335	 * We do not care about races with the cpu hotplug because cpu down
2336	 * as well as workers from this path always operate on the local
2337	 * per-cpu data. CPU up doesn't touch memcg_stock at all.
2338	 */
2339	migrate_disable();
2340	curcpu = smp_processor_id();
2341	for_each_online_cpu(cpu) {
2342		struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
2343		struct mem_cgroup *memcg;
2344		bool flush = false;
2345
2346		rcu_read_lock();
2347		memcg = stock->cached;
2348		if (memcg && stock->nr_pages &&
2349		    mem_cgroup_is_descendant(memcg, root_memcg))
2350			flush = true;
2351		else if (obj_stock_flush_required(stock, root_memcg))
2352			flush = true;
2353		rcu_read_unlock();
2354
2355		if (flush &&
2356		    !test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
2357			if (cpu == curcpu)
2358				drain_local_stock(&stock->work);
2359			else
2360				schedule_work_on(cpu, &stock->work);
2361		}
2362	}
2363	migrate_enable();
 
2364	mutex_unlock(&percpu_charge_mutex);
2365}
2366
2367static int memcg_hotplug_cpu_dead(unsigned int cpu)
 
 
2368{
 
2369	struct memcg_stock_pcp *stock;
2370
 
 
 
 
 
 
2371	stock = &per_cpu(memcg_stock, cpu);
2372	drain_stock(stock);
2373
2374	return 0;
2375}
2376
2377static unsigned long reclaim_high(struct mem_cgroup *memcg,
2378				  unsigned int nr_pages,
2379				  gfp_t gfp_mask)
2380{
2381	unsigned long nr_reclaimed = 0;
2382
2383	do {
2384		unsigned long pflags;
2385
2386		if (page_counter_read(&memcg->memory) <=
2387		    READ_ONCE(memcg->memory.high))
2388			continue;
2389
2390		memcg_memory_event(memcg, MEMCG_HIGH);
2391
2392		psi_memstall_enter(&pflags);
2393		nr_reclaimed += try_to_free_mem_cgroup_pages(memcg, nr_pages,
2394							gfp_mask,
2395							MEMCG_RECLAIM_MAY_SWAP);
2396		psi_memstall_leave(&pflags);
2397	} while ((memcg = parent_mem_cgroup(memcg)) &&
2398		 !mem_cgroup_is_root(memcg));
2399
2400	return nr_reclaimed;
2401}
2402
2403static void high_work_func(struct work_struct *work)
2404{
2405	struct mem_cgroup *memcg;
2406
2407	memcg = container_of(work, struct mem_cgroup, high_work);
2408	reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL);
2409}
2410
2411/*
2412 * Clamp the maximum sleep time per allocation batch to 2 seconds. This is
2413 * enough to still cause a significant slowdown in most cases, while still
2414 * allowing diagnostics and tracing to proceed without becoming stuck.
2415 */
2416#define MEMCG_MAX_HIGH_DELAY_JIFFIES (2UL*HZ)
2417
2418/*
2419 * When calculating the delay, we use these either side of the exponentiation to
2420 * maintain precision and scale to a reasonable number of jiffies (see the table
2421 * below.
2422 *
2423 * - MEMCG_DELAY_PRECISION_SHIFT: Extra precision bits while translating the
2424 *   overage ratio to a delay.
2425 * - MEMCG_DELAY_SCALING_SHIFT: The number of bits to scale down the
2426 *   proposed penalty in order to reduce to a reasonable number of jiffies, and
2427 *   to produce a reasonable delay curve.
2428 *
2429 * MEMCG_DELAY_SCALING_SHIFT just happens to be a number that produces a
2430 * reasonable delay curve compared to precision-adjusted overage, not
2431 * penalising heavily at first, but still making sure that growth beyond the
2432 * limit penalises misbehaviour cgroups by slowing them down exponentially. For
2433 * example, with a high of 100 megabytes:
2434 *
2435 *  +-------+------------------------+
2436 *  | usage | time to allocate in ms |
2437 *  +-------+------------------------+
2438 *  | 100M  |                      0 |
2439 *  | 101M  |                      6 |
2440 *  | 102M  |                     25 |
2441 *  | 103M  |                     57 |
2442 *  | 104M  |                    102 |
2443 *  | 105M  |                    159 |
2444 *  | 106M  |                    230 |
2445 *  | 107M  |                    313 |
2446 *  | 108M  |                    409 |
2447 *  | 109M  |                    518 |
2448 *  | 110M  |                    639 |
2449 *  | 111M  |                    774 |
2450 *  | 112M  |                    921 |
2451 *  | 113M  |                   1081 |
2452 *  | 114M  |                   1254 |
2453 *  | 115M  |                   1439 |
2454 *  | 116M  |                   1638 |
2455 *  | 117M  |                   1849 |
2456 *  | 118M  |                   2000 |
2457 *  | 119M  |                   2000 |
2458 *  | 120M  |                   2000 |
2459 *  +-------+------------------------+
2460 */
2461 #define MEMCG_DELAY_PRECISION_SHIFT 20
2462 #define MEMCG_DELAY_SCALING_SHIFT 14
2463
2464static u64 calculate_overage(unsigned long usage, unsigned long high)
2465{
2466	u64 overage;
2467
2468	if (usage <= high)
2469		return 0;
2470
2471	/*
2472	 * Prevent division by 0 in overage calculation by acting as if
2473	 * it was a threshold of 1 page
2474	 */
2475	high = max(high, 1UL);
2476
2477	overage = usage - high;
2478	overage <<= MEMCG_DELAY_PRECISION_SHIFT;
2479	return div64_u64(overage, high);
2480}
2481
2482static u64 mem_find_max_overage(struct mem_cgroup *memcg)
2483{
2484	u64 overage, max_overage = 0;
2485
2486	do {
2487		overage = calculate_overage(page_counter_read(&memcg->memory),
2488					    READ_ONCE(memcg->memory.high));
2489		max_overage = max(overage, max_overage);
2490	} while ((memcg = parent_mem_cgroup(memcg)) &&
2491		 !mem_cgroup_is_root(memcg));
2492
2493	return max_overage;
2494}
2495
2496static u64 swap_find_max_overage(struct mem_cgroup *memcg)
2497{
2498	u64 overage, max_overage = 0;
2499
2500	do {
2501		overage = calculate_overage(page_counter_read(&memcg->swap),
2502					    READ_ONCE(memcg->swap.high));
2503		if (overage)
2504			memcg_memory_event(memcg, MEMCG_SWAP_HIGH);
2505		max_overage = max(overage, max_overage);
2506	} while ((memcg = parent_mem_cgroup(memcg)) &&
2507		 !mem_cgroup_is_root(memcg));
2508
2509	return max_overage;
2510}
2511
2512/*
2513 * Get the number of jiffies that we should penalise a mischievous cgroup which
2514 * is exceeding its memory.high by checking both it and its ancestors.
2515 */
2516static unsigned long calculate_high_delay(struct mem_cgroup *memcg,
2517					  unsigned int nr_pages,
2518					  u64 max_overage)
2519{
2520	unsigned long penalty_jiffies;
2521
2522	if (!max_overage)
2523		return 0;
2524
2525	/*
2526	 * We use overage compared to memory.high to calculate the number of
2527	 * jiffies to sleep (penalty_jiffies). Ideally this value should be
2528	 * fairly lenient on small overages, and increasingly harsh when the
2529	 * memcg in question makes it clear that it has no intention of stopping
2530	 * its crazy behaviour, so we exponentially increase the delay based on
2531	 * overage amount.
2532	 */
2533	penalty_jiffies = max_overage * max_overage * HZ;
2534	penalty_jiffies >>= MEMCG_DELAY_PRECISION_SHIFT;
2535	penalty_jiffies >>= MEMCG_DELAY_SCALING_SHIFT;
2536
2537	/*
2538	 * Factor in the task's own contribution to the overage, such that four
2539	 * N-sized allocations are throttled approximately the same as one
2540	 * 4N-sized allocation.
2541	 *
2542	 * MEMCG_CHARGE_BATCH pages is nominal, so work out how much smaller or
2543	 * larger the current charge patch is than that.
2544	 */
2545	return penalty_jiffies * nr_pages / MEMCG_CHARGE_BATCH;
2546}
2547
2548/*
2549 * Scheduled by try_charge() to be executed from the userland return path
2550 * and reclaims memory over the high limit.
2551 */
2552void mem_cgroup_handle_over_high(void)
2553{
2554	unsigned long penalty_jiffies;
2555	unsigned long pflags;
2556	unsigned long nr_reclaimed;
2557	unsigned int nr_pages = current->memcg_nr_pages_over_high;
2558	int nr_retries = MAX_RECLAIM_RETRIES;
2559	struct mem_cgroup *memcg;
2560	bool in_retry = false;
2561
2562	if (likely(!nr_pages))
2563		return;
2564
2565	memcg = get_mem_cgroup_from_mm(current->mm);
 
 
2566	current->memcg_nr_pages_over_high = 0;
2567
2568retry_reclaim:
2569	/*
2570	 * The allocating task should reclaim at least the batch size, but for
2571	 * subsequent retries we only want to do what's necessary to prevent oom
2572	 * or breaching resource isolation.
2573	 *
2574	 * This is distinct from memory.max or page allocator behaviour because
2575	 * memory.high is currently batched, whereas memory.max and the page
2576	 * allocator run every time an allocation is made.
2577	 */
2578	nr_reclaimed = reclaim_high(memcg,
2579				    in_retry ? SWAP_CLUSTER_MAX : nr_pages,
2580				    GFP_KERNEL);
2581
2582	/*
2583	 * memory.high is breached and reclaim is unable to keep up. Throttle
2584	 * allocators proactively to slow down excessive growth.
2585	 */
2586	penalty_jiffies = calculate_high_delay(memcg, nr_pages,
2587					       mem_find_max_overage(memcg));
2588
2589	penalty_jiffies += calculate_high_delay(memcg, nr_pages,
2590						swap_find_max_overage(memcg));
2591
2592	/*
2593	 * Clamp the max delay per usermode return so as to still keep the
2594	 * application moving forwards and also permit diagnostics, albeit
2595	 * extremely slowly.
2596	 */
2597	penalty_jiffies = min(penalty_jiffies, MEMCG_MAX_HIGH_DELAY_JIFFIES);
2598
2599	/*
2600	 * Don't sleep if the amount of jiffies this memcg owes us is so low
2601	 * that it's not even worth doing, in an attempt to be nice to those who
2602	 * go only a small amount over their memory.high value and maybe haven't
2603	 * been aggressively reclaimed enough yet.
2604	 */
2605	if (penalty_jiffies <= HZ / 100)
2606		goto out;
2607
2608	/*
2609	 * If reclaim is making forward progress but we're still over
2610	 * memory.high, we want to encourage that rather than doing allocator
2611	 * throttling.
2612	 */
2613	if (nr_reclaimed || nr_retries--) {
2614		in_retry = true;
2615		goto retry_reclaim;
2616	}
2617
2618	/*
2619	 * If we exit early, we're guaranteed to die (since
2620	 * schedule_timeout_killable sets TASK_KILLABLE). This means we don't
2621	 * need to account for any ill-begotten jiffies to pay them off later.
2622	 */
2623	psi_memstall_enter(&pflags);
2624	schedule_timeout_killable(penalty_jiffies);
2625	psi_memstall_leave(&pflags);
2626
2627out:
2628	css_put(&memcg->css);
2629}
2630
2631static int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask,
2632			unsigned int nr_pages)
2633{
2634	unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages);
2635	int nr_retries = MAX_RECLAIM_RETRIES;
2636	struct mem_cgroup *mem_over_limit;
2637	struct page_counter *counter;
2638	unsigned long nr_reclaimed;
2639	bool passed_oom = false;
2640	unsigned int reclaim_options = MEMCG_RECLAIM_MAY_SWAP;
2641	bool drained = false;
2642	bool raised_max_event = false;
2643	unsigned long pflags;
2644
 
 
2645retry:
2646	if (consume_stock(memcg, nr_pages))
2647		return 0;
2648
2649	if (!do_memsw_account() ||
2650	    page_counter_try_charge(&memcg->memsw, batch, &counter)) {
2651		if (page_counter_try_charge(&memcg->memory, batch, &counter))
2652			goto done_restock;
2653		if (do_memsw_account())
2654			page_counter_uncharge(&memcg->memsw, batch);
2655		mem_over_limit = mem_cgroup_from_counter(counter, memory);
2656	} else {
2657		mem_over_limit = mem_cgroup_from_counter(counter, memsw);
2658		reclaim_options &= ~MEMCG_RECLAIM_MAY_SWAP;
2659	}
2660
2661	if (batch > nr_pages) {
2662		batch = nr_pages;
2663		goto retry;
2664	}
2665
2666	/*
2667	 * Prevent unbounded recursion when reclaim operations need to
2668	 * allocate memory. This might exceed the limits temporarily,
2669	 * but we prefer facilitating memory reclaim and getting back
2670	 * under the limit over triggering OOM kills in these cases.
2671	 */
2672	if (unlikely(current->flags & PF_MEMALLOC))
 
 
2673		goto force;
2674
2675	if (unlikely(task_in_memcg_oom(current)))
2676		goto nomem;
2677
2678	if (!gfpflags_allow_blocking(gfp_mask))
2679		goto nomem;
2680
2681	memcg_memory_event(mem_over_limit, MEMCG_MAX);
2682	raised_max_event = true;
2683
2684	psi_memstall_enter(&pflags);
2685	nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
2686						    gfp_mask, reclaim_options);
2687	psi_memstall_leave(&pflags);
2688
2689	if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
2690		goto retry;
2691
2692	if (!drained) {
2693		drain_all_stock(mem_over_limit);
2694		drained = true;
2695		goto retry;
2696	}
2697
2698	if (gfp_mask & __GFP_NORETRY)
2699		goto nomem;
2700	/*
2701	 * Even though the limit is exceeded at this point, reclaim
2702	 * may have been able to free some pages.  Retry the charge
2703	 * before killing the task.
2704	 *
2705	 * Only for regular pages, though: huge pages are rather
2706	 * unlikely to succeed so close to the limit, and we fall back
2707	 * to regular pages anyway in case of failure.
2708	 */
2709	if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))
2710		goto retry;
2711	/*
2712	 * At task move, charge accounts can be doubly counted. So, it's
2713	 * better to wait until the end of task_move if something is going on.
2714	 */
2715	if (mem_cgroup_wait_acct_move(mem_over_limit))
2716		goto retry;
2717
2718	if (nr_retries--)
2719		goto retry;
2720
2721	if (gfp_mask & __GFP_RETRY_MAYFAIL)
2722		goto nomem;
 
 
 
2723
2724	/* Avoid endless loop for tasks bypassed by the oom killer */
2725	if (passed_oom && task_is_dying())
2726		goto nomem;
2727
2728	/*
2729	 * keep retrying as long as the memcg oom killer is able to make
2730	 * a forward progress or bypass the charge if the oom killer
2731	 * couldn't make any progress.
2732	 */
2733	if (mem_cgroup_oom(mem_over_limit, gfp_mask,
2734			   get_order(nr_pages * PAGE_SIZE))) {
2735		passed_oom = true;
2736		nr_retries = MAX_RECLAIM_RETRIES;
2737		goto retry;
2738	}
2739nomem:
2740	/*
2741	 * Memcg doesn't have a dedicated reserve for atomic
2742	 * allocations. But like the global atomic pool, we need to
2743	 * put the burden of reclaim on regular allocation requests
2744	 * and let these go through as privileged allocations.
2745	 */
2746	if (!(gfp_mask & (__GFP_NOFAIL | __GFP_HIGH)))
2747		return -ENOMEM;
2748force:
2749	/*
2750	 * If the allocation has to be enforced, don't forget to raise
2751	 * a MEMCG_MAX event.
2752	 */
2753	if (!raised_max_event)
2754		memcg_memory_event(mem_over_limit, MEMCG_MAX);
2755
2756	/*
2757	 * The allocation either can't fail or will lead to more memory
2758	 * being freed very soon.  Allow memory usage go over the limit
2759	 * temporarily by force charging it.
2760	 */
2761	page_counter_charge(&memcg->memory, nr_pages);
2762	if (do_memsw_account())
2763		page_counter_charge(&memcg->memsw, nr_pages);
 
2764
2765	return 0;
2766
2767done_restock:
 
2768	if (batch > nr_pages)
2769		refill_stock(memcg, batch - nr_pages);
2770
2771	/*
2772	 * If the hierarchy is above the normal consumption range, schedule
2773	 * reclaim on returning to userland.  We can perform reclaim here
2774	 * if __GFP_RECLAIM but let's always punt for simplicity and so that
2775	 * GFP_KERNEL can consistently be used during reclaim.  @memcg is
2776	 * not recorded as it most likely matches current's and won't
2777	 * change in the meantime.  As high limit is checked again before
2778	 * reclaim, the cost of mismatch is negligible.
2779	 */
2780	do {
2781		bool mem_high, swap_high;
2782
2783		mem_high = page_counter_read(&memcg->memory) >
2784			READ_ONCE(memcg->memory.high);
2785		swap_high = page_counter_read(&memcg->swap) >
2786			READ_ONCE(memcg->swap.high);
2787
2788		/* Don't bother a random interrupted task */
2789		if (!in_task()) {
2790			if (mem_high) {
2791				schedule_work(&memcg->high_work);
2792				break;
2793			}
2794			continue;
2795		}
2796
2797		if (mem_high || swap_high) {
2798			/*
2799			 * The allocating tasks in this cgroup will need to do
2800			 * reclaim or be throttled to prevent further growth
2801			 * of the memory or swap footprints.
2802			 *
2803			 * Target some best-effort fairness between the tasks,
2804			 * and distribute reclaim work and delay penalties
2805			 * based on how much each task is actually allocating.
2806			 */
2807			current->memcg_nr_pages_over_high += batch;
2808			set_notify_resume(current);
2809			break;
2810		}
2811	} while ((memcg = parent_mem_cgroup(memcg)));
2812
2813	if (current->memcg_nr_pages_over_high > MEMCG_CHARGE_BATCH &&
2814	    !(current->flags & PF_MEMALLOC) &&
2815	    gfpflags_allow_blocking(gfp_mask)) {
2816		mem_cgroup_handle_over_high();
2817	}
2818	return 0;
2819}
2820
2821static inline int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
2822			     unsigned int nr_pages)
2823{
2824	if (mem_cgroup_is_root(memcg))
2825		return 0;
2826
2827	return try_charge_memcg(memcg, gfp_mask, nr_pages);
2828}
2829
2830static inline void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
2831{
2832	if (mem_cgroup_is_root(memcg))
2833		return;
2834
2835	page_counter_uncharge(&memcg->memory, nr_pages);
2836	if (do_memsw_account())
2837		page_counter_uncharge(&memcg->memsw, nr_pages);
 
 
2838}
2839
2840static void commit_charge(struct folio *folio, struct mem_cgroup *memcg)
2841{
2842	VM_BUG_ON_FOLIO(folio_memcg(folio), folio);
2843	/*
2844	 * Any of the following ensures page's memcg stability:
2845	 *
2846	 * - the page lock
2847	 * - LRU isolation
2848	 * - lock_page_memcg()
2849	 * - exclusive reference
2850	 * - mem_cgroup_trylock_pages()
2851	 */
2852	folio->memcg_data = (unsigned long)memcg;
2853}
2854
2855#ifdef CONFIG_MEMCG_KMEM
2856/*
2857 * The allocated objcg pointers array is not accounted directly.
2858 * Moreover, it should not come from DMA buffer and is not readily
2859 * reclaimable. So those GFP bits should be masked off.
2860 */
2861#define OBJCGS_CLEAR_MASK	(__GFP_DMA | __GFP_RECLAIMABLE | __GFP_ACCOUNT)
2862
2863/*
2864 * mod_objcg_mlstate() may be called with irq enabled, so
2865 * mod_memcg_lruvec_state() should be used.
2866 */
2867static inline void mod_objcg_mlstate(struct obj_cgroup *objcg,
2868				     struct pglist_data *pgdat,
2869				     enum node_stat_item idx, int nr)
2870{
2871	struct mem_cgroup *memcg;
2872	struct lruvec *lruvec;
2873
2874	rcu_read_lock();
2875	memcg = obj_cgroup_memcg(objcg);
2876	lruvec = mem_cgroup_lruvec(memcg, pgdat);
2877	mod_memcg_lruvec_state(lruvec, idx, nr);
2878	rcu_read_unlock();
2879}
2880
2881int memcg_alloc_slab_cgroups(struct slab *slab, struct kmem_cache *s,
2882				 gfp_t gfp, bool new_slab)
2883{
2884	unsigned int objects = objs_per_slab(s, slab);
2885	unsigned long memcg_data;
2886	void *vec;
2887
2888	gfp &= ~OBJCGS_CLEAR_MASK;
2889	vec = kcalloc_node(objects, sizeof(struct obj_cgroup *), gfp,
2890			   slab_nid(slab));
2891	if (!vec)
2892		return -ENOMEM;
2893
2894	memcg_data = (unsigned long) vec | MEMCG_DATA_OBJCGS;
2895	if (new_slab) {
2896		/*
2897		 * If the slab is brand new and nobody can yet access its
2898		 * memcg_data, no synchronization is required and memcg_data can
2899		 * be simply assigned.
2900		 */
2901		slab->memcg_data = memcg_data;
2902	} else if (cmpxchg(&slab->memcg_data, 0, memcg_data)) {
2903		/*
2904		 * If the slab is already in use, somebody can allocate and
2905		 * assign obj_cgroups in parallel. In this case the existing
2906		 * objcg vector should be reused.
2907		 */
2908		kfree(vec);
2909		return 0;
2910	}
2911
2912	kmemleak_not_leak(vec);
2913	return 0;
2914}
2915
2916static __always_inline
2917struct mem_cgroup *mem_cgroup_from_obj_folio(struct folio *folio, void *p)
2918{
2919	/*
2920	 * Slab objects are accounted individually, not per-page.
2921	 * Memcg membership data for each individual object is saved in
2922	 * slab->memcg_data.
2923	 */
2924	if (folio_test_slab(folio)) {
2925		struct obj_cgroup **objcgs;
2926		struct slab *slab;
2927		unsigned int off;
2928
2929		slab = folio_slab(folio);
2930		objcgs = slab_objcgs(slab);
2931		if (!objcgs)
2932			return NULL;
2933
2934		off = obj_to_index(slab->slab_cache, slab, p);
2935		if (objcgs[off])
2936			return obj_cgroup_memcg(objcgs[off]);
2937
2938		return NULL;
2939	}
 
 
 
 
2940
2941	/*
2942	 * page_memcg_check() is used here, because in theory we can encounter
2943	 * a folio where the slab flag has been cleared already, but
2944	 * slab->memcg_data has not been freed yet
2945	 * page_memcg_check(page) will guarantee that a proper memory
2946	 * cgroup pointer or NULL will be returned.
 
 
 
 
 
 
 
2947	 */
2948	return page_memcg_check(folio_page(folio, 0));
2949}
2950
2951/*
2952 * Returns a pointer to the memory cgroup to which the kernel object is charged.
2953 *
2954 * A passed kernel object can be a slab object, vmalloc object or a generic
2955 * kernel page, so different mechanisms for getting the memory cgroup pointer
2956 * should be used.
2957 *
2958 * In certain cases (e.g. kernel stacks or large kmallocs with SLUB) the caller
2959 * can not know for sure how the kernel object is implemented.
2960 * mem_cgroup_from_obj() can be safely used in such cases.
2961 *
2962 * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(),
2963 * cgroup_mutex, etc.
2964 */
2965struct mem_cgroup *mem_cgroup_from_obj(void *p)
2966{
2967	struct folio *folio;
2968
2969	if (mem_cgroup_disabled())
2970		return NULL;
2971
2972	if (unlikely(is_vmalloc_addr(p)))
2973		folio = page_folio(vmalloc_to_page(p));
2974	else
2975		folio = virt_to_folio(p);
2976
2977	return mem_cgroup_from_obj_folio(folio, p);
 
2978}
2979
2980/*
2981 * Returns a pointer to the memory cgroup to which the kernel object is charged.
2982 * Similar to mem_cgroup_from_obj(), but faster and not suitable for objects,
2983 * allocated using vmalloc().
2984 *
2985 * A passed kernel object must be a slab object or a generic kernel page.
2986 *
2987 * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(),
2988 * cgroup_mutex, etc.
2989 */
2990struct mem_cgroup *mem_cgroup_from_slab_obj(void *p)
2991{
2992	if (mem_cgroup_disabled())
2993		return NULL;
2994
2995	return mem_cgroup_from_obj_folio(virt_to_folio(p), p);
2996}
 
 
2997
2998static struct obj_cgroup *__get_obj_cgroup_from_memcg(struct mem_cgroup *memcg)
2999{
3000	struct obj_cgroup *objcg = NULL;
3001
3002	for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) {
3003		objcg = rcu_dereference(memcg->objcg);
3004		if (objcg && obj_cgroup_tryget(objcg))
3005			break;
3006		objcg = NULL;
3007	}
3008	return objcg;
3009}
3010
3011__always_inline struct obj_cgroup *get_obj_cgroup_from_current(void)
3012{
3013	struct obj_cgroup *objcg = NULL;
3014	struct mem_cgroup *memcg;
3015
3016	if (memcg_kmem_bypass())
3017		return NULL;
 
 
 
3018
3019	rcu_read_lock();
3020	if (unlikely(active_memcg()))
3021		memcg = active_memcg();
3022	else
3023		memcg = mem_cgroup_from_task(current);
3024	objcg = __get_obj_cgroup_from_memcg(memcg);
3025	rcu_read_unlock();
3026	return objcg;
3027}
3028
3029struct obj_cgroup *get_obj_cgroup_from_page(struct page *page)
3030{
3031	struct obj_cgroup *objcg;
3032
3033	if (!memcg_kmem_enabled())
3034		return NULL;
3035
3036	if (PageMemcgKmem(page)) {
3037		objcg = __folio_objcg(page_folio(page));
3038		obj_cgroup_get(objcg);
3039	} else {
3040		struct mem_cgroup *memcg;
3041
3042		rcu_read_lock();
3043		memcg = __folio_memcg(page_folio(page));
3044		if (memcg)
3045			objcg = __get_obj_cgroup_from_memcg(memcg);
3046		else
3047			objcg = NULL;
3048		rcu_read_unlock();
3049	}
3050	return objcg;
3051}
3052
3053static void memcg_account_kmem(struct mem_cgroup *memcg, int nr_pages)
3054{
3055	mod_memcg_state(memcg, MEMCG_KMEM, nr_pages);
3056	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
3057		if (nr_pages > 0)
3058			page_counter_charge(&memcg->kmem, nr_pages);
3059		else
3060			page_counter_uncharge(&memcg->kmem, -nr_pages);
3061	}
3062}
3063
 
 
 
 
 
3064
3065/*
3066 * obj_cgroup_uncharge_pages: uncharge a number of kernel pages from a objcg
3067 * @objcg: object cgroup to uncharge
3068 * @nr_pages: number of pages to uncharge
3069 */
3070static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg,
3071				      unsigned int nr_pages)
3072{
3073	struct mem_cgroup *memcg;
3074
3075	memcg = get_mem_cgroup_from_objcg(objcg);
 
3076
3077	memcg_account_kmem(memcg, -nr_pages);
3078	refill_stock(memcg, nr_pages);
3079
3080	css_put(&memcg->css);
 
3081}
3082
3083/*
3084 * obj_cgroup_charge_pages: charge a number of kernel pages to a objcg
3085 * @objcg: object cgroup to charge
3086 * @gfp: reclaim mode
3087 * @nr_pages: number of pages to charge
3088 *
3089 * Returns 0 on success, an error code on failure.
3090 */
3091static int obj_cgroup_charge_pages(struct obj_cgroup *objcg, gfp_t gfp,
3092				   unsigned int nr_pages)
3093{
3094	struct mem_cgroup *memcg;
3095	int ret;
3096
3097	memcg = get_mem_cgroup_from_objcg(objcg);
 
 
3098
3099	ret = try_charge_memcg(memcg, gfp, nr_pages);
3100	if (ret)
3101		goto out;
3102
3103	memcg_account_kmem(memcg, nr_pages);
3104out:
3105	css_put(&memcg->css);
3106
3107	return ret;
3108}
3109
3110/**
3111 * __memcg_kmem_charge_page: charge a kmem page to the current memory cgroup
3112 * @page: page to charge
3113 * @gfp: reclaim mode
3114 * @order: allocation order
3115 *
3116 * Returns 0 on success, an error code on failure.
3117 */
3118int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order)
3119{
3120	struct obj_cgroup *objcg;
3121	int ret = 0;
3122
3123	objcg = get_obj_cgroup_from_current();
3124	if (objcg) {
3125		ret = obj_cgroup_charge_pages(objcg, gfp, 1 << order);
3126		if (!ret) {
3127			page->memcg_data = (unsigned long)objcg |
3128				MEMCG_DATA_KMEM;
3129			return 0;
3130		}
3131		obj_cgroup_put(objcg);
3132	}
3133	return ret;
3134}
3135
3136/**
3137 * __memcg_kmem_uncharge_page: uncharge a kmem page
3138 * @page: page to uncharge
3139 * @order: allocation order
 
 
 
 
 
 
 
 
3140 */
3141void __memcg_kmem_uncharge_page(struct page *page, int order)
3142{
3143	struct folio *folio = page_folio(page);
3144	struct obj_cgroup *objcg;
3145	unsigned int nr_pages = 1 << order;
3146
3147	if (!folio_memcg_kmem(folio))
3148		return;
3149
3150	objcg = __folio_objcg(folio);
3151	obj_cgroup_uncharge_pages(objcg, nr_pages);
3152	folio->memcg_data = 0;
3153	obj_cgroup_put(objcg);
3154}
3155
3156void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
3157		     enum node_stat_item idx, int nr)
3158{
3159	struct memcg_stock_pcp *stock;
3160	struct obj_cgroup *old = NULL;
3161	unsigned long flags;
3162	int *bytes;
3163
3164	local_lock_irqsave(&memcg_stock.stock_lock, flags);
3165	stock = this_cpu_ptr(&memcg_stock);
3166
3167	/*
3168	 * Save vmstat data in stock and skip vmstat array update unless
3169	 * accumulating over a page of vmstat data or when pgdat or idx
3170	 * changes.
3171	 */
3172	if (stock->cached_objcg != objcg) {
3173		old = drain_obj_stock(stock);
3174		obj_cgroup_get(objcg);
3175		stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
3176				? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
3177		stock->cached_objcg = objcg;
3178		stock->cached_pgdat = pgdat;
3179	} else if (stock->cached_pgdat != pgdat) {
3180		/* Flush the existing cached vmstat data */
3181		struct pglist_data *oldpg = stock->cached_pgdat;
3182
3183		if (stock->nr_slab_reclaimable_b) {
3184			mod_objcg_mlstate(objcg, oldpg, NR_SLAB_RECLAIMABLE_B,
3185					  stock->nr_slab_reclaimable_b);
3186			stock->nr_slab_reclaimable_b = 0;
3187		}
3188		if (stock->nr_slab_unreclaimable_b) {
3189			mod_objcg_mlstate(objcg, oldpg, NR_SLAB_UNRECLAIMABLE_B,
3190					  stock->nr_slab_unreclaimable_b);
3191			stock->nr_slab_unreclaimable_b = 0;
3192		}
3193		stock->cached_pgdat = pgdat;
3194	}
3195
3196	bytes = (idx == NR_SLAB_RECLAIMABLE_B) ? &stock->nr_slab_reclaimable_b
3197					       : &stock->nr_slab_unreclaimable_b;
3198	/*
3199	 * Even for large object >= PAGE_SIZE, the vmstat data will still be
3200	 * cached locally at least once before pushing it out.
 
 
 
 
 
 
 
 
3201	 */
3202	if (!*bytes) {
3203		*bytes = nr;
3204		nr = 0;
3205	} else {
3206		*bytes += nr;
3207		if (abs(*bytes) > PAGE_SIZE) {
3208			nr = *bytes;
3209			*bytes = 0;
3210		} else {
3211			nr = 0;
3212		}
3213	}
3214	if (nr)
3215		mod_objcg_mlstate(objcg, pgdat, idx, nr);
3216
3217	local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
3218	if (old)
3219		obj_cgroup_put(old);
3220}
3221
3222static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
3223{
3224	struct memcg_stock_pcp *stock;
3225	unsigned long flags;
3226	bool ret = false;
3227
3228	local_lock_irqsave(&memcg_stock.stock_lock, flags);
3229
3230	stock = this_cpu_ptr(&memcg_stock);
3231	if (objcg == stock->cached_objcg && stock->nr_bytes >= nr_bytes) {
3232		stock->nr_bytes -= nr_bytes;
3233		ret = true;
3234	}
3235
3236	local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
3237
3238	return ret;
3239}
3240
3241static struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock)
 
3242{
3243	struct obj_cgroup *old = stock->cached_objcg;
 
 
3244
3245	if (!old)
3246		return NULL;
 
3247
3248	if (stock->nr_bytes) {
3249		unsigned int nr_pages = stock->nr_bytes >> PAGE_SHIFT;
3250		unsigned int nr_bytes = stock->nr_bytes & (PAGE_SIZE - 1);
3251
3252		if (nr_pages) {
3253			struct mem_cgroup *memcg;
3254
3255			memcg = get_mem_cgroup_from_objcg(old);
3256
3257			memcg_account_kmem(memcg, -nr_pages);
3258			__refill_stock(memcg, nr_pages);
3259
3260			css_put(&memcg->css);
3261		}
3262
3263		/*
3264		 * The leftover is flushed to the centralized per-memcg value.
3265		 * On the next attempt to refill obj stock it will be moved
3266		 * to a per-cpu stock (probably, on an other CPU), see
3267		 * refill_obj_stock().
3268		 *
3269		 * How often it's flushed is a trade-off between the memory
3270		 * limit enforcement accuracy and potential CPU contention,
3271		 * so it might be changed in the future.
3272		 */
3273		atomic_add(nr_bytes, &old->nr_charged_bytes);
3274		stock->nr_bytes = 0;
3275	}
3276
3277	/*
3278	 * Flush the vmstat data in current stock
3279	 */
3280	if (stock->nr_slab_reclaimable_b || stock->nr_slab_unreclaimable_b) {
3281		if (stock->nr_slab_reclaimable_b) {
3282			mod_objcg_mlstate(old, stock->cached_pgdat,
3283					  NR_SLAB_RECLAIMABLE_B,
3284					  stock->nr_slab_reclaimable_b);
3285			stock->nr_slab_reclaimable_b = 0;
3286		}
3287		if (stock->nr_slab_unreclaimable_b) {
3288			mod_objcg_mlstate(old, stock->cached_pgdat,
3289					  NR_SLAB_UNRECLAIMABLE_B,
3290					  stock->nr_slab_unreclaimable_b);
3291			stock->nr_slab_unreclaimable_b = 0;
3292		}
3293		stock->cached_pgdat = NULL;
3294	}
3295
3296	stock->cached_objcg = NULL;
3297	/*
3298	 * The `old' objects needs to be released by the caller via
3299	 * obj_cgroup_put() outside of memcg_stock_pcp::stock_lock.
3300	 */
3301	return old;
3302}
3303
3304static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
3305				     struct mem_cgroup *root_memcg)
3306{
3307	struct mem_cgroup *memcg;
 
3308
3309	if (stock->cached_objcg) {
3310		memcg = obj_cgroup_memcg(stock->cached_objcg);
3311		if (memcg && mem_cgroup_is_descendant(memcg, root_memcg))
3312			return true;
3313	}
3314
3315	return false;
3316}
3317
3318static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
3319			     bool allow_uncharge)
3320{
3321	struct memcg_stock_pcp *stock;
3322	struct obj_cgroup *old = NULL;
3323	unsigned long flags;
3324	unsigned int nr_pages = 0;
3325
3326	local_lock_irqsave(&memcg_stock.stock_lock, flags);
 
3327
3328	stock = this_cpu_ptr(&memcg_stock);
3329	if (stock->cached_objcg != objcg) { /* reset if necessary */
3330		old = drain_obj_stock(stock);
3331		obj_cgroup_get(objcg);
3332		stock->cached_objcg = objcg;
3333		stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
3334				? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
3335		allow_uncharge = true;	/* Allow uncharge when objcg changes */
3336	}
3337	stock->nr_bytes += nr_bytes;
3338
3339	if (allow_uncharge && (stock->nr_bytes > PAGE_SIZE)) {
3340		nr_pages = stock->nr_bytes >> PAGE_SHIFT;
3341		stock->nr_bytes &= (PAGE_SIZE - 1);
3342	}
3343
3344	local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
3345	if (old)
3346		obj_cgroup_put(old);
3347
3348	if (nr_pages)
3349		obj_cgroup_uncharge_pages(objcg, nr_pages);
3350}
 
3351
3352int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size)
3353{
3354	unsigned int nr_pages, nr_bytes;
3355	int ret;
3356
3357	if (consume_obj_stock(objcg, size))
3358		return 0;
3359
3360	/*
3361	 * In theory, objcg->nr_charged_bytes can have enough
3362	 * pre-charged bytes to satisfy the allocation. However,
3363	 * flushing objcg->nr_charged_bytes requires two atomic
3364	 * operations, and objcg->nr_charged_bytes can't be big.
3365	 * The shared objcg->nr_charged_bytes can also become a
3366	 * performance bottleneck if all tasks of the same memcg are
3367	 * trying to update it. So it's better to ignore it and try
3368	 * grab some new pages. The stock's nr_bytes will be flushed to
3369	 * objcg->nr_charged_bytes later on when objcg changes.
3370	 *
3371	 * The stock's nr_bytes may contain enough pre-charged bytes
3372	 * to allow one less page from being charged, but we can't rely
3373	 * on the pre-charged bytes not being changed outside of
3374	 * consume_obj_stock() or refill_obj_stock(). So ignore those
3375	 * pre-charged bytes as well when charging pages. To avoid a
3376	 * page uncharge right after a page charge, we set the
3377	 * allow_uncharge flag to false when calling refill_obj_stock()
3378	 * to temporarily allow the pre-charged bytes to exceed the page
3379	 * size limit. The maximum reachable value of the pre-charged
3380	 * bytes is (sizeof(object) + PAGE_SIZE - 2) if there is no data
3381	 * race.
3382	 */
3383	nr_pages = size >> PAGE_SHIFT;
3384	nr_bytes = size & (PAGE_SIZE - 1);
3385
3386	if (nr_bytes)
3387		nr_pages += 1;
3388
3389	ret = obj_cgroup_charge_pages(objcg, gfp, nr_pages);
3390	if (!ret && nr_bytes)
3391		refill_obj_stock(objcg, PAGE_SIZE - nr_bytes, false);
3392
3393	return ret;
3394}
3395
3396void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size)
3397{
3398	refill_obj_stock(objcg, size, true);
3399}
3400
3401#endif /* CONFIG_MEMCG_KMEM */
3402
3403/*
3404 * Because page_memcg(head) is not set on tails, set it now.
 
3405 */
3406void split_page_memcg(struct page *head, unsigned int nr)
3407{
3408	struct folio *folio = page_folio(head);
3409	struct mem_cgroup *memcg = folio_memcg(folio);
3410	int i;
3411
3412	if (mem_cgroup_disabled() || !memcg)
3413		return;
3414
3415	for (i = 1; i < nr; i++)
3416		folio_page(folio, i)->memcg_data = folio->memcg_data;
3417
3418	if (folio_memcg_kmem(folio))
3419		obj_cgroup_get_many(__folio_objcg(folio), nr - 1);
3420	else
3421		css_get_many(&memcg->css, nr - 1);
 
 
 
 
 
 
 
3422}
3423
3424#ifdef CONFIG_SWAP
3425/**
3426 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
3427 * @entry: swap entry to be moved
3428 * @from:  mem_cgroup which the entry is moved from
3429 * @to:  mem_cgroup which the entry is moved to
3430 *
3431 * It succeeds only when the swap_cgroup's record for this entry is the same
3432 * as the mem_cgroup's id of @from.
3433 *
3434 * Returns 0 on success, -EINVAL on failure.
3435 *
3436 * The caller must have charged to @to, IOW, called page_counter_charge() about
3437 * both res and memsw, and called css_get().
3438 */
3439static int mem_cgroup_move_swap_account(swp_entry_t entry,
3440				struct mem_cgroup *from, struct mem_cgroup *to)
3441{
3442	unsigned short old_id, new_id;
3443
3444	old_id = mem_cgroup_id(from);
3445	new_id = mem_cgroup_id(to);
3446
3447	if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
3448		mod_memcg_state(from, MEMCG_SWAP, -1);
3449		mod_memcg_state(to, MEMCG_SWAP, 1);
3450		return 0;
3451	}
3452	return -EINVAL;
3453}
3454#else
3455static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
3456				struct mem_cgroup *from, struct mem_cgroup *to)
3457{
3458	return -EINVAL;
3459}
3460#endif
3461
3462static DEFINE_MUTEX(memcg_max_mutex);
3463
3464static int mem_cgroup_resize_max(struct mem_cgroup *memcg,
3465				 unsigned long max, bool memsw)
3466{
 
 
3467	bool enlarge = false;
3468	bool drained = false;
3469	int ret;
3470	bool limits_invariant;
3471	struct page_counter *counter = memsw ? &memcg->memsw : &memcg->memory;
 
 
 
 
 
 
 
 
3472
3473	do {
3474		if (signal_pending(current)) {
3475			ret = -EINTR;
3476			break;
3477		}
3478
3479		mutex_lock(&memcg_max_mutex);
3480		/*
3481		 * Make sure that the new limit (memsw or memory limit) doesn't
3482		 * break our basic invariant rule memory.max <= memsw.max.
3483		 */
3484		limits_invariant = memsw ? max >= READ_ONCE(memcg->memory.max) :
3485					   max <= memcg->memsw.max;
3486		if (!limits_invariant) {
3487			mutex_unlock(&memcg_max_mutex);
3488			ret = -EINVAL;
3489			break;
3490		}
3491		if (max > counter->max)
3492			enlarge = true;
3493		ret = page_counter_set_max(counter, max);
3494		mutex_unlock(&memcg_max_mutex);
3495
3496		if (!ret)
3497			break;
3498
3499		if (!drained) {
3500			drain_all_stock(memcg);
3501			drained = true;
3502			continue;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3503		}
3504
3505		if (!try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL,
3506					memsw ? 0 : MEMCG_RECLAIM_MAY_SWAP)) {
3507			ret = -EBUSY;
 
3508			break;
3509		}
3510	} while (true);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3511
3512	if (!ret && enlarge)
3513		memcg_oom_recover(memcg);
3514
3515	return ret;
3516}
3517
3518unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
3519					    gfp_t gfp_mask,
3520					    unsigned long *total_scanned)
3521{
3522	unsigned long nr_reclaimed = 0;
3523	struct mem_cgroup_per_node *mz, *next_mz = NULL;
3524	unsigned long reclaimed;
3525	int loop = 0;
3526	struct mem_cgroup_tree_per_node *mctz;
3527	unsigned long excess;
 
3528
3529	if (order > 0)
3530		return 0;
3531
3532	mctz = soft_limit_tree.rb_tree_per_node[pgdat->node_id];
3533
3534	/*
3535	 * Do not even bother to check the largest node if the root
3536	 * is empty. Do it lockless to prevent lock bouncing. Races
3537	 * are acceptable as soft limit is best effort anyway.
3538	 */
3539	if (!mctz || RB_EMPTY_ROOT(&mctz->rb_root))
3540		return 0;
3541
3542	/*
3543	 * This loop can run a while, specially if mem_cgroup's continuously
3544	 * keep exceeding their soft limit and putting the system under
3545	 * pressure
3546	 */
3547	do {
3548		if (next_mz)
3549			mz = next_mz;
3550		else
3551			mz = mem_cgroup_largest_soft_limit_node(mctz);
3552		if (!mz)
3553			break;
3554
3555		reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat,
3556						    gfp_mask, total_scanned);
 
3557		nr_reclaimed += reclaimed;
 
3558		spin_lock_irq(&mctz->lock);
 
3559
3560		/*
3561		 * If we failed to reclaim anything from this memory cgroup
3562		 * it is time to move on to the next cgroup
3563		 */
3564		next_mz = NULL;
3565		if (!reclaimed)
3566			next_mz = __mem_cgroup_largest_soft_limit_node(mctz);
3567
3568		excess = soft_limit_excess(mz->memcg);
3569		/*
3570		 * One school of thought says that we should not add
3571		 * back the node to the tree if reclaim returns 0.
3572		 * But our reclaim could return 0, simply because due
3573		 * to priority we are exposing a smaller subset of
3574		 * memory to reclaim from. Consider this as a longer
3575		 * term TODO.
3576		 */
3577		/* If excess == 0, no tree ops */
3578		__mem_cgroup_insert_exceeded(mz, mctz, excess);
3579		spin_unlock_irq(&mctz->lock);
3580		css_put(&mz->memcg->css);
3581		loop++;
3582		/*
3583		 * Could not reclaim anything and there are no more
3584		 * mem cgroups to try or we seem to be looping without
3585		 * reclaiming anything.
3586		 */
3587		if (!nr_reclaimed &&
3588			(next_mz == NULL ||
3589			loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
3590			break;
3591	} while (!nr_reclaimed);
3592	if (next_mz)
3593		css_put(&next_mz->memcg->css);
3594	return nr_reclaimed;
3595}
3596
3597/*
3598 * Reclaims as many pages from the given memcg as possible.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3599 *
3600 * Caller is responsible for holding css reference for memcg.
3601 */
3602static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
3603{
3604	int nr_retries = MAX_RECLAIM_RETRIES;
3605
3606	/* we call try-to-free pages for make this cgroup empty */
3607	lru_add_drain_all();
3608
3609	drain_all_stock(memcg);
3610
3611	/* try to free all pages in this cgroup */
3612	while (nr_retries && page_counter_read(&memcg->memory)) {
 
 
3613		if (signal_pending(current))
3614			return -EINTR;
3615
3616		if (!try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL,
3617						  MEMCG_RECLAIM_MAY_SWAP))
 
3618			nr_retries--;
 
 
 
 
3619	}
3620
3621	return 0;
3622}
3623
3624static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of,
3625					    char *buf, size_t nbytes,
3626					    loff_t off)
3627{
3628	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3629
3630	if (mem_cgroup_is_root(memcg))
3631		return -EINVAL;
3632	return mem_cgroup_force_empty(memcg) ?: nbytes;
3633}
3634
3635static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css,
3636				     struct cftype *cft)
3637{
3638	return 1;
3639}
3640
3641static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
3642				      struct cftype *cft, u64 val)
3643{
3644	if (val == 1)
 
 
 
 
3645		return 0;
3646
3647	pr_warn_once("Non-hierarchical mode is deprecated. "
3648		     "Please report your usecase to linux-mm@kvack.org if you "
3649		     "depend on this functionality.\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3650
3651	return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3652}
3653
3654static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
3655{
3656	unsigned long val;
3657
3658	if (mem_cgroup_is_root(memcg)) {
3659		mem_cgroup_flush_stats();
3660		val = memcg_page_state(memcg, NR_FILE_PAGES) +
3661			memcg_page_state(memcg, NR_ANON_MAPPED);
3662		if (swap)
3663			val += memcg_page_state(memcg, MEMCG_SWAP);
 
 
 
 
 
 
3664	} else {
3665		if (!swap)
3666			val = page_counter_read(&memcg->memory);
3667		else
3668			val = page_counter_read(&memcg->memsw);
3669	}
3670	return val;
3671}
3672
3673enum {
3674	RES_USAGE,
3675	RES_LIMIT,
3676	RES_MAX_USAGE,
3677	RES_FAILCNT,
3678	RES_SOFT_LIMIT,
3679};
3680
3681static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
3682			       struct cftype *cft)
3683{
3684	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3685	struct page_counter *counter;
3686
3687	switch (MEMFILE_TYPE(cft->private)) {
3688	case _MEM:
3689		counter = &memcg->memory;
3690		break;
3691	case _MEMSWAP:
3692		counter = &memcg->memsw;
3693		break;
3694	case _KMEM:
3695		counter = &memcg->kmem;
3696		break;
3697	case _TCP:
3698		counter = &memcg->tcpmem;
3699		break;
3700	default:
3701		BUG();
3702	}
3703
3704	switch (MEMFILE_ATTR(cft->private)) {
3705	case RES_USAGE:
3706		if (counter == &memcg->memory)
3707			return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE;
3708		if (counter == &memcg->memsw)
3709			return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE;
3710		return (u64)page_counter_read(counter) * PAGE_SIZE;
3711	case RES_LIMIT:
3712		return (u64)counter->max * PAGE_SIZE;
3713	case RES_MAX_USAGE:
3714		return (u64)counter->watermark * PAGE_SIZE;
3715	case RES_FAILCNT:
3716		return counter->failcnt;
3717	case RES_SOFT_LIMIT:
3718		return (u64)memcg->soft_limit * PAGE_SIZE;
3719	default:
3720		BUG();
3721	}
3722}
3723
3724#ifdef CONFIG_MEMCG_KMEM
3725static int memcg_online_kmem(struct mem_cgroup *memcg)
3726{
3727	struct obj_cgroup *objcg;
3728
3729	if (mem_cgroup_kmem_disabled())
3730		return 0;
3731
3732	if (unlikely(mem_cgroup_is_root(memcg)))
3733		return 0;
3734
3735	objcg = obj_cgroup_alloc();
3736	if (!objcg)
3737		return -ENOMEM;
3738
3739	objcg->memcg = memcg;
3740	rcu_assign_pointer(memcg->objcg, objcg);
3741
3742	static_branch_enable(&memcg_kmem_enabled_key);
3743
3744	memcg->kmemcg_id = memcg->id.id;
 
 
 
3745
3746	return 0;
3747}
3748
3749static void memcg_offline_kmem(struct mem_cgroup *memcg)
3750{
3751	struct mem_cgroup *parent;
 
 
3752
3753	if (mem_cgroup_kmem_disabled())
3754		return;
 
 
 
 
 
 
 
3755
3756	if (unlikely(mem_cgroup_is_root(memcg)))
3757		return;
 
 
3758
3759	parent = parent_mem_cgroup(memcg);
3760	if (!parent)
3761		parent = root_mem_cgroup;
3762
3763	memcg_reparent_objcgs(memcg, parent);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3764
3765	/*
3766	 * After we have finished memcg_reparent_objcgs(), all list_lrus
3767	 * corresponding to this cgroup are guaranteed to remain empty.
3768	 * The ordering is imposed by list_lru_node->lock taken by
3769	 * memcg_reparent_list_lrus().
3770	 */
3771	memcg_reparent_list_lrus(memcg, parent);
3772}
3773#else
3774static int memcg_online_kmem(struct mem_cgroup *memcg)
3775{
3776	return 0;
3777}
3778static void memcg_offline_kmem(struct mem_cgroup *memcg)
3779{
3780}
3781#endif /* CONFIG_MEMCG_KMEM */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3782
3783static int memcg_update_tcp_max(struct mem_cgroup *memcg, unsigned long max)
3784{
3785	int ret;
3786
3787	mutex_lock(&memcg_max_mutex);
3788
3789	ret = page_counter_set_max(&memcg->tcpmem, max);
3790	if (ret)
3791		goto out;
3792
3793	if (!memcg->tcpmem_active) {
3794		/*
3795		 * The active flag needs to be written after the static_key
3796		 * update. This is what guarantees that the socket activation
3797		 * function is the last one to run. See mem_cgroup_sk_alloc()
3798		 * for details, and note that we don't mark any socket as
3799		 * belonging to this memcg until that flag is up.
3800		 *
3801		 * We need to do this, because static_keys will span multiple
3802		 * sites, but we can't control their order. If we mark a socket
3803		 * as accounted, but the accounting functions are not patched in
3804		 * yet, we'll lose accounting.
3805		 *
3806		 * We never race with the readers in mem_cgroup_sk_alloc(),
3807		 * because when this value change, the code to process it is not
3808		 * patched in yet.
3809		 */
3810		static_branch_inc(&memcg_sockets_enabled_key);
3811		memcg->tcpmem_active = true;
3812	}
3813out:
3814	mutex_unlock(&memcg_max_mutex);
3815	return ret;
3816}
3817
3818/*
3819 * The user of this function is...
3820 * RES_LIMIT.
3821 */
3822static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
3823				char *buf, size_t nbytes, loff_t off)
3824{
3825	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3826	unsigned long nr_pages;
3827	int ret;
3828
3829	buf = strstrip(buf);
3830	ret = page_counter_memparse(buf, "-1", &nr_pages);
3831	if (ret)
3832		return ret;
3833
3834	switch (MEMFILE_ATTR(of_cft(of)->private)) {
3835	case RES_LIMIT:
3836		if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
3837			ret = -EINVAL;
3838			break;
3839		}
3840		switch (MEMFILE_TYPE(of_cft(of)->private)) {
3841		case _MEM:
3842			ret = mem_cgroup_resize_max(memcg, nr_pages, false);
3843			break;
3844		case _MEMSWAP:
3845			ret = mem_cgroup_resize_max(memcg, nr_pages, true);
3846			break;
3847		case _KMEM:
3848			/* kmem.limit_in_bytes is deprecated. */
3849			ret = -EOPNOTSUPP;
3850			break;
3851		case _TCP:
3852			ret = memcg_update_tcp_max(memcg, nr_pages);
3853			break;
3854		}
3855		break;
3856	case RES_SOFT_LIMIT:
3857		if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
3858			ret = -EOPNOTSUPP;
3859		} else {
3860			memcg->soft_limit = nr_pages;
3861			ret = 0;
3862		}
3863		break;
3864	}
3865	return ret ?: nbytes;
3866}
3867
3868static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf,
3869				size_t nbytes, loff_t off)
3870{
3871	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3872	struct page_counter *counter;
3873
3874	switch (MEMFILE_TYPE(of_cft(of)->private)) {
3875	case _MEM:
3876		counter = &memcg->memory;
3877		break;
3878	case _MEMSWAP:
3879		counter = &memcg->memsw;
3880		break;
3881	case _KMEM:
3882		counter = &memcg->kmem;
3883		break;
3884	case _TCP:
3885		counter = &memcg->tcpmem;
3886		break;
3887	default:
3888		BUG();
3889	}
3890
3891	switch (MEMFILE_ATTR(of_cft(of)->private)) {
3892	case RES_MAX_USAGE:
3893		page_counter_reset_watermark(counter);
3894		break;
3895	case RES_FAILCNT:
3896		counter->failcnt = 0;
3897		break;
3898	default:
3899		BUG();
3900	}
3901
3902	return nbytes;
3903}
3904
3905static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css,
3906					struct cftype *cft)
3907{
3908	return mem_cgroup_from_css(css)->move_charge_at_immigrate;
3909}
3910
3911#ifdef CONFIG_MMU
3912static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3913					struct cftype *cft, u64 val)
3914{
3915	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3916
3917	if (val & ~MOVE_MASK)
3918		return -EINVAL;
3919
3920	/*
3921	 * No kind of locking is needed in here, because ->can_attach() will
3922	 * check this value once in the beginning of the process, and then carry
3923	 * on with stale data. This means that changes to this value will only
3924	 * affect task migrations starting after the change.
3925	 */
3926	memcg->move_charge_at_immigrate = val;
3927	return 0;
3928}
3929#else
3930static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3931					struct cftype *cft, u64 val)
3932{
3933	return -ENOSYS;
3934}
3935#endif
3936
3937#ifdef CONFIG_NUMA
3938
3939#define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE))
3940#define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
3941#define LRU_ALL	     ((1 << NR_LRU_LISTS) - 1)
3942
3943static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
3944				int nid, unsigned int lru_mask, bool tree)
3945{
3946	struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
3947	unsigned long nr = 0;
3948	enum lru_list lru;
3949
3950	VM_BUG_ON((unsigned)nid >= nr_node_ids);
3951
3952	for_each_lru(lru) {
3953		if (!(BIT(lru) & lru_mask))
3954			continue;
3955		if (tree)
3956			nr += lruvec_page_state(lruvec, NR_LRU_BASE + lru);
3957		else
3958			nr += lruvec_page_state_local(lruvec, NR_LRU_BASE + lru);
3959	}
3960	return nr;
3961}
3962
3963static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
3964					     unsigned int lru_mask,
3965					     bool tree)
3966{
3967	unsigned long nr = 0;
3968	enum lru_list lru;
3969
3970	for_each_lru(lru) {
3971		if (!(BIT(lru) & lru_mask))
3972			continue;
3973		if (tree)
3974			nr += memcg_page_state(memcg, NR_LRU_BASE + lru);
3975		else
3976			nr += memcg_page_state_local(memcg, NR_LRU_BASE + lru);
3977	}
3978	return nr;
3979}
3980
3981static int memcg_numa_stat_show(struct seq_file *m, void *v)
3982{
3983	struct numa_stat {
3984		const char *name;
3985		unsigned int lru_mask;
3986	};
3987
3988	static const struct numa_stat stats[] = {
3989		{ "total", LRU_ALL },
3990		{ "file", LRU_ALL_FILE },
3991		{ "anon", LRU_ALL_ANON },
3992		{ "unevictable", BIT(LRU_UNEVICTABLE) },
3993	};
3994	const struct numa_stat *stat;
3995	int nid;
3996	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
3997
3998	mem_cgroup_flush_stats();
3999
4000	for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
4001		seq_printf(m, "%s=%lu", stat->name,
4002			   mem_cgroup_nr_lru_pages(memcg, stat->lru_mask,
4003						   false));
4004		for_each_node_state(nid, N_MEMORY)
4005			seq_printf(m, " N%d=%lu", nid,
4006				   mem_cgroup_node_nr_lru_pages(memcg, nid,
4007							stat->lru_mask, false));
4008		seq_putc(m, '\n');
4009	}
4010
4011	for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
 
4012
4013		seq_printf(m, "hierarchical_%s=%lu", stat->name,
4014			   mem_cgroup_nr_lru_pages(memcg, stat->lru_mask,
4015						   true));
4016		for_each_node_state(nid, N_MEMORY)
4017			seq_printf(m, " N%d=%lu", nid,
4018				   mem_cgroup_node_nr_lru_pages(memcg, nid,
4019							stat->lru_mask, true));
 
 
 
 
4020		seq_putc(m, '\n');
4021	}
4022
4023	return 0;
4024}
4025#endif /* CONFIG_NUMA */
4026
4027static const unsigned int memcg1_stats[] = {
4028	NR_FILE_PAGES,
4029	NR_ANON_MAPPED,
4030#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4031	NR_ANON_THPS,
4032#endif
4033	NR_SHMEM,
4034	NR_FILE_MAPPED,
4035	NR_FILE_DIRTY,
4036	NR_WRITEBACK,
4037	WORKINGSET_REFAULT_ANON,
4038	WORKINGSET_REFAULT_FILE,
4039	MEMCG_SWAP,
4040};
4041
4042static const char *const memcg1_stat_names[] = {
4043	"cache",
4044	"rss",
4045#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4046	"rss_huge",
4047#endif
4048	"shmem",
4049	"mapped_file",
4050	"dirty",
4051	"writeback",
4052	"workingset_refault_anon",
4053	"workingset_refault_file",
4054	"swap",
4055};
4056
4057/* Universal VM events cgroup1 shows, original sort order */
4058static const unsigned int memcg1_events[] = {
4059	PGPGIN,
4060	PGPGOUT,
4061	PGFAULT,
4062	PGMAJFAULT,
4063};
4064
4065static int memcg_stat_show(struct seq_file *m, void *v)
4066{
4067	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4068	unsigned long memory, memsw;
4069	struct mem_cgroup *mi;
4070	unsigned int i;
4071
4072	BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names) != ARRAY_SIZE(memcg1_stats));
4073
4074	mem_cgroup_flush_stats();
 
 
4075
4076	for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
4077		unsigned long nr;
4078
4079		if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
4080			continue;
4081		nr = memcg_page_state_local(memcg, memcg1_stats[i]);
4082		seq_printf(m, "%s %lu\n", memcg1_stat_names[i],
4083			   nr * memcg_page_state_unit(memcg1_stats[i]));
4084	}
4085
4086	for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
4087		seq_printf(m, "%s %lu\n", vm_event_name(memcg1_events[i]),
4088			   memcg_events_local(memcg, memcg1_events[i]));
4089
4090	for (i = 0; i < NR_LRU_LISTS; i++)
4091		seq_printf(m, "%s %lu\n", lru_list_name(i),
4092			   memcg_page_state_local(memcg, NR_LRU_BASE + i) *
4093			   PAGE_SIZE);
4094
4095	/* Hierarchical information */
4096	memory = memsw = PAGE_COUNTER_MAX;
4097	for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) {
4098		memory = min(memory, READ_ONCE(mi->memory.max));
4099		memsw = min(memsw, READ_ONCE(mi->memsw.max));
4100	}
4101	seq_printf(m, "hierarchical_memory_limit %llu\n",
4102		   (u64)memory * PAGE_SIZE);
4103	if (do_memsw_account())
4104		seq_printf(m, "hierarchical_memsw_limit %llu\n",
4105			   (u64)memsw * PAGE_SIZE);
4106
4107	for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
4108		unsigned long nr;
4109
4110		if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
4111			continue;
4112		nr = memcg_page_state(memcg, memcg1_stats[i]);
4113		seq_printf(m, "total_%s %llu\n", memcg1_stat_names[i],
4114			   (u64)nr * memcg_page_state_unit(memcg1_stats[i]));
4115	}
4116
4117	for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
 
 
 
 
4118		seq_printf(m, "total_%s %llu\n",
4119			   vm_event_name(memcg1_events[i]),
4120			   (u64)memcg_events(memcg, memcg1_events[i]));
 
 
 
4121
4122	for (i = 0; i < NR_LRU_LISTS; i++)
4123		seq_printf(m, "total_%s %llu\n", lru_list_name(i),
4124			   (u64)memcg_page_state(memcg, NR_LRU_BASE + i) *
4125			   PAGE_SIZE);
4126
4127#ifdef CONFIG_DEBUG_VM
4128	{
4129		pg_data_t *pgdat;
4130		struct mem_cgroup_per_node *mz;
4131		unsigned long anon_cost = 0;
4132		unsigned long file_cost = 0;
4133
4134		for_each_online_pgdat(pgdat) {
4135			mz = memcg->nodeinfo[pgdat->node_id];
4136
4137			anon_cost += mz->lruvec.anon_cost;
4138			file_cost += mz->lruvec.file_cost;
4139		}
4140		seq_printf(m, "anon_cost %lu\n", anon_cost);
4141		seq_printf(m, "file_cost %lu\n", file_cost);
 
 
 
 
 
 
 
4142	}
4143#endif
4144
4145	return 0;
4146}
4147
4148static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css,
4149				      struct cftype *cft)
4150{
4151	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4152
4153	return mem_cgroup_swappiness(memcg);
4154}
4155
4156static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
4157				       struct cftype *cft, u64 val)
4158{
4159	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4160
4161	if (val > 200)
4162		return -EINVAL;
4163
4164	if (!mem_cgroup_is_root(memcg))
4165		memcg->swappiness = val;
4166	else
4167		vm_swappiness = val;
4168
4169	return 0;
4170}
4171
4172static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
4173{
4174	struct mem_cgroup_threshold_ary *t;
4175	unsigned long usage;
4176	int i;
4177
4178	rcu_read_lock();
4179	if (!swap)
4180		t = rcu_dereference(memcg->thresholds.primary);
4181	else
4182		t = rcu_dereference(memcg->memsw_thresholds.primary);
4183
4184	if (!t)
4185		goto unlock;
4186
4187	usage = mem_cgroup_usage(memcg, swap);
4188
4189	/*
4190	 * current_threshold points to threshold just below or equal to usage.
4191	 * If it's not true, a threshold was crossed after last
4192	 * call of __mem_cgroup_threshold().
4193	 */
4194	i = t->current_threshold;
4195
4196	/*
4197	 * Iterate backward over array of thresholds starting from
4198	 * current_threshold and check if a threshold is crossed.
4199	 * If none of thresholds below usage is crossed, we read
4200	 * only one element of the array here.
4201	 */
4202	for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
4203		eventfd_signal(t->entries[i].eventfd, 1);
4204
4205	/* i = current_threshold + 1 */
4206	i++;
4207
4208	/*
4209	 * Iterate forward over array of thresholds starting from
4210	 * current_threshold+1 and check if a threshold is crossed.
4211	 * If none of thresholds above usage is crossed, we read
4212	 * only one element of the array here.
4213	 */
4214	for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
4215		eventfd_signal(t->entries[i].eventfd, 1);
4216
4217	/* Update current_threshold */
4218	t->current_threshold = i - 1;
4219unlock:
4220	rcu_read_unlock();
4221}
4222
4223static void mem_cgroup_threshold(struct mem_cgroup *memcg)
4224{
4225	while (memcg) {
4226		__mem_cgroup_threshold(memcg, false);
4227		if (do_memsw_account())
4228			__mem_cgroup_threshold(memcg, true);
4229
4230		memcg = parent_mem_cgroup(memcg);
4231	}
4232}
4233
4234static int compare_thresholds(const void *a, const void *b)
4235{
4236	const struct mem_cgroup_threshold *_a = a;
4237	const struct mem_cgroup_threshold *_b = b;
4238
4239	if (_a->threshold > _b->threshold)
4240		return 1;
4241
4242	if (_a->threshold < _b->threshold)
4243		return -1;
4244
4245	return 0;
4246}
4247
4248static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
4249{
4250	struct mem_cgroup_eventfd_list *ev;
4251
4252	spin_lock(&memcg_oom_lock);
4253
4254	list_for_each_entry(ev, &memcg->oom_notify, list)
4255		eventfd_signal(ev->eventfd, 1);
4256
4257	spin_unlock(&memcg_oom_lock);
4258	return 0;
4259}
4260
4261static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
4262{
4263	struct mem_cgroup *iter;
4264
4265	for_each_mem_cgroup_tree(iter, memcg)
4266		mem_cgroup_oom_notify_cb(iter);
4267}
4268
4269static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
4270	struct eventfd_ctx *eventfd, const char *args, enum res_type type)
4271{
4272	struct mem_cgroup_thresholds *thresholds;
4273	struct mem_cgroup_threshold_ary *new;
4274	unsigned long threshold;
4275	unsigned long usage;
4276	int i, size, ret;
4277
4278	ret = page_counter_memparse(args, "-1", &threshold);
4279	if (ret)
4280		return ret;
4281
4282	mutex_lock(&memcg->thresholds_lock);
4283
4284	if (type == _MEM) {
4285		thresholds = &memcg->thresholds;
4286		usage = mem_cgroup_usage(memcg, false);
4287	} else if (type == _MEMSWAP) {
4288		thresholds = &memcg->memsw_thresholds;
4289		usage = mem_cgroup_usage(memcg, true);
4290	} else
4291		BUG();
4292
4293	/* Check if a threshold crossed before adding a new one */
4294	if (thresholds->primary)
4295		__mem_cgroup_threshold(memcg, type == _MEMSWAP);
4296
4297	size = thresholds->primary ? thresholds->primary->size + 1 : 1;
4298
4299	/* Allocate memory for new array of thresholds */
4300	new = kmalloc(struct_size(new, entries, size), GFP_KERNEL);
 
4301	if (!new) {
4302		ret = -ENOMEM;
4303		goto unlock;
4304	}
4305	new->size = size;
4306
4307	/* Copy thresholds (if any) to new array */
4308	if (thresholds->primary)
4309		memcpy(new->entries, thresholds->primary->entries,
4310		       flex_array_size(new, entries, size - 1));
 
4311
4312	/* Add new threshold */
4313	new->entries[size - 1].eventfd = eventfd;
4314	new->entries[size - 1].threshold = threshold;
4315
4316	/* Sort thresholds. Registering of new threshold isn't time-critical */
4317	sort(new->entries, size, sizeof(*new->entries),
4318			compare_thresholds, NULL);
4319
4320	/* Find current threshold */
4321	new->current_threshold = -1;
4322	for (i = 0; i < size; i++) {
4323		if (new->entries[i].threshold <= usage) {
4324			/*
4325			 * new->current_threshold will not be used until
4326			 * rcu_assign_pointer(), so it's safe to increment
4327			 * it here.
4328			 */
4329			++new->current_threshold;
4330		} else
4331			break;
4332	}
4333
4334	/* Free old spare buffer and save old primary buffer as spare */
4335	kfree(thresholds->spare);
4336	thresholds->spare = thresholds->primary;
4337
4338	rcu_assign_pointer(thresholds->primary, new);
4339
4340	/* To be sure that nobody uses thresholds */
4341	synchronize_rcu();
4342
4343unlock:
4344	mutex_unlock(&memcg->thresholds_lock);
4345
4346	return ret;
4347}
4348
4349static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
4350	struct eventfd_ctx *eventfd, const char *args)
4351{
4352	return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM);
4353}
4354
4355static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg,
4356	struct eventfd_ctx *eventfd, const char *args)
4357{
4358	return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP);
4359}
4360
4361static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4362	struct eventfd_ctx *eventfd, enum res_type type)
4363{
4364	struct mem_cgroup_thresholds *thresholds;
4365	struct mem_cgroup_threshold_ary *new;
4366	unsigned long usage;
4367	int i, j, size, entries;
4368
4369	mutex_lock(&memcg->thresholds_lock);
4370
4371	if (type == _MEM) {
4372		thresholds = &memcg->thresholds;
4373		usage = mem_cgroup_usage(memcg, false);
4374	} else if (type == _MEMSWAP) {
4375		thresholds = &memcg->memsw_thresholds;
4376		usage = mem_cgroup_usage(memcg, true);
4377	} else
4378		BUG();
4379
4380	if (!thresholds->primary)
4381		goto unlock;
4382
4383	/* Check if a threshold crossed before removing */
4384	__mem_cgroup_threshold(memcg, type == _MEMSWAP);
4385
4386	/* Calculate new number of threshold */
4387	size = entries = 0;
4388	for (i = 0; i < thresholds->primary->size; i++) {
4389		if (thresholds->primary->entries[i].eventfd != eventfd)
4390			size++;
4391		else
4392			entries++;
4393	}
4394
4395	new = thresholds->spare;
4396
4397	/* If no items related to eventfd have been cleared, nothing to do */
4398	if (!entries)
4399		goto unlock;
4400
4401	/* Set thresholds array to NULL if we don't have thresholds */
4402	if (!size) {
4403		kfree(new);
4404		new = NULL;
4405		goto swap_buffers;
4406	}
4407
4408	new->size = size;
4409
4410	/* Copy thresholds and find current threshold */
4411	new->current_threshold = -1;
4412	for (i = 0, j = 0; i < thresholds->primary->size; i++) {
4413		if (thresholds->primary->entries[i].eventfd == eventfd)
4414			continue;
4415
4416		new->entries[j] = thresholds->primary->entries[i];
4417		if (new->entries[j].threshold <= usage) {
4418			/*
4419			 * new->current_threshold will not be used
4420			 * until rcu_assign_pointer(), so it's safe to increment
4421			 * it here.
4422			 */
4423			++new->current_threshold;
4424		}
4425		j++;
4426	}
4427
4428swap_buffers:
4429	/* Swap primary and spare array */
4430	thresholds->spare = thresholds->primary;
4431
4432	rcu_assign_pointer(thresholds->primary, new);
4433
4434	/* To be sure that nobody uses thresholds */
4435	synchronize_rcu();
4436
4437	/* If all events are unregistered, free the spare array */
4438	if (!new) {
4439		kfree(thresholds->spare);
4440		thresholds->spare = NULL;
4441	}
4442unlock:
4443	mutex_unlock(&memcg->thresholds_lock);
4444}
4445
4446static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4447	struct eventfd_ctx *eventfd)
4448{
4449	return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM);
4450}
4451
4452static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4453	struct eventfd_ctx *eventfd)
4454{
4455	return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP);
4456}
4457
4458static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg,
4459	struct eventfd_ctx *eventfd, const char *args)
4460{
4461	struct mem_cgroup_eventfd_list *event;
4462
4463	event = kmalloc(sizeof(*event),	GFP_KERNEL);
4464	if (!event)
4465		return -ENOMEM;
4466
4467	spin_lock(&memcg_oom_lock);
4468
4469	event->eventfd = eventfd;
4470	list_add(&event->list, &memcg->oom_notify);
4471
4472	/* already in OOM ? */
4473	if (memcg->under_oom)
4474		eventfd_signal(eventfd, 1);
4475	spin_unlock(&memcg_oom_lock);
4476
4477	return 0;
4478}
4479
4480static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg,
4481	struct eventfd_ctx *eventfd)
4482{
4483	struct mem_cgroup_eventfd_list *ev, *tmp;
4484
4485	spin_lock(&memcg_oom_lock);
4486
4487	list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
4488		if (ev->eventfd == eventfd) {
4489			list_del(&ev->list);
4490			kfree(ev);
4491		}
4492	}
4493
4494	spin_unlock(&memcg_oom_lock);
4495}
4496
4497static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v)
4498{
4499	struct mem_cgroup *memcg = mem_cgroup_from_seq(sf);
4500
4501	seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable);
4502	seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom);
4503	seq_printf(sf, "oom_kill %lu\n",
4504		   atomic_long_read(&memcg->memory_events[MEMCG_OOM_KILL]));
4505	return 0;
4506}
4507
4508static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
4509	struct cftype *cft, u64 val)
4510{
4511	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4512
4513	/* cannot set to root cgroup and only 0 and 1 are allowed */
4514	if (mem_cgroup_is_root(memcg) || !((val == 0) || (val == 1)))
4515		return -EINVAL;
4516
4517	memcg->oom_kill_disable = val;
4518	if (!val)
4519		memcg_oom_recover(memcg);
4520
4521	return 0;
4522}
4523
4524#ifdef CONFIG_CGROUP_WRITEBACK
4525
4526#include <trace/events/writeback.h>
 
 
 
4527
4528static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
4529{
4530	return wb_domain_init(&memcg->cgwb_domain, gfp);
4531}
4532
4533static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
4534{
4535	wb_domain_exit(&memcg->cgwb_domain);
4536}
4537
4538static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
4539{
4540	wb_domain_size_changed(&memcg->cgwb_domain);
4541}
4542
4543struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
4544{
4545	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4546
4547	if (!memcg->css.parent)
4548		return NULL;
4549
4550	return &memcg->cgwb_domain;
4551}
4552
4553/**
4554 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
4555 * @wb: bdi_writeback in question
4556 * @pfilepages: out parameter for number of file pages
4557 * @pheadroom: out parameter for number of allocatable pages according to memcg
4558 * @pdirty: out parameter for number of dirty pages
4559 * @pwriteback: out parameter for number of pages under writeback
4560 *
4561 * Determine the numbers of file, headroom, dirty, and writeback pages in
4562 * @wb's memcg.  File, dirty and writeback are self-explanatory.  Headroom
4563 * is a bit more involved.
4564 *
4565 * A memcg's headroom is "min(max, high) - used".  In the hierarchy, the
4566 * headroom is calculated as the lowest headroom of itself and the
4567 * ancestors.  Note that this doesn't consider the actual amount of
4568 * available memory in the system.  The caller should further cap
4569 * *@pheadroom accordingly.
4570 */
4571void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
4572			 unsigned long *pheadroom, unsigned long *pdirty,
4573			 unsigned long *pwriteback)
4574{
4575	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4576	struct mem_cgroup *parent;
4577
4578	mem_cgroup_flush_stats();
4579
4580	*pdirty = memcg_page_state(memcg, NR_FILE_DIRTY);
4581	*pwriteback = memcg_page_state(memcg, NR_WRITEBACK);
4582	*pfilepages = memcg_page_state(memcg, NR_INACTIVE_FILE) +
4583			memcg_page_state(memcg, NR_ACTIVE_FILE);
 
4584
4585	*pheadroom = PAGE_COUNTER_MAX;
4586	while ((parent = parent_mem_cgroup(memcg))) {
4587		unsigned long ceiling = min(READ_ONCE(memcg->memory.max),
4588					    READ_ONCE(memcg->memory.high));
4589		unsigned long used = page_counter_read(&memcg->memory);
4590
4591		*pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
4592		memcg = parent;
4593	}
4594}
4595
4596/*
4597 * Foreign dirty flushing
4598 *
4599 * There's an inherent mismatch between memcg and writeback.  The former
4600 * tracks ownership per-page while the latter per-inode.  This was a
4601 * deliberate design decision because honoring per-page ownership in the
4602 * writeback path is complicated, may lead to higher CPU and IO overheads
4603 * and deemed unnecessary given that write-sharing an inode across
4604 * different cgroups isn't a common use-case.
4605 *
4606 * Combined with inode majority-writer ownership switching, this works well
4607 * enough in most cases but there are some pathological cases.  For
4608 * example, let's say there are two cgroups A and B which keep writing to
4609 * different but confined parts of the same inode.  B owns the inode and
4610 * A's memory is limited far below B's.  A's dirty ratio can rise enough to
4611 * trigger balance_dirty_pages() sleeps but B's can be low enough to avoid
4612 * triggering background writeback.  A will be slowed down without a way to
4613 * make writeback of the dirty pages happen.
4614 *
4615 * Conditions like the above can lead to a cgroup getting repeatedly and
4616 * severely throttled after making some progress after each
4617 * dirty_expire_interval while the underlying IO device is almost
4618 * completely idle.
4619 *
4620 * Solving this problem completely requires matching the ownership tracking
4621 * granularities between memcg and writeback in either direction.  However,
4622 * the more egregious behaviors can be avoided by simply remembering the
4623 * most recent foreign dirtying events and initiating remote flushes on
4624 * them when local writeback isn't enough to keep the memory clean enough.
4625 *
4626 * The following two functions implement such mechanism.  When a foreign
4627 * page - a page whose memcg and writeback ownerships don't match - is
4628 * dirtied, mem_cgroup_track_foreign_dirty() records the inode owning
4629 * bdi_writeback on the page owning memcg.  When balance_dirty_pages()
4630 * decides that the memcg needs to sleep due to high dirty ratio, it calls
4631 * mem_cgroup_flush_foreign() which queues writeback on the recorded
4632 * foreign bdi_writebacks which haven't expired.  Both the numbers of
4633 * recorded bdi_writebacks and concurrent in-flight foreign writebacks are
4634 * limited to MEMCG_CGWB_FRN_CNT.
4635 *
4636 * The mechanism only remembers IDs and doesn't hold any object references.
4637 * As being wrong occasionally doesn't matter, updates and accesses to the
4638 * records are lockless and racy.
4639 */
4640void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio,
4641					     struct bdi_writeback *wb)
4642{
4643	struct mem_cgroup *memcg = folio_memcg(folio);
4644	struct memcg_cgwb_frn *frn;
4645	u64 now = get_jiffies_64();
4646	u64 oldest_at = now;
4647	int oldest = -1;
4648	int i;
4649
4650	trace_track_foreign_dirty(folio, wb);
4651
4652	/*
4653	 * Pick the slot to use.  If there is already a slot for @wb, keep
4654	 * using it.  If not replace the oldest one which isn't being
4655	 * written out.
4656	 */
4657	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
4658		frn = &memcg->cgwb_frn[i];
4659		if (frn->bdi_id == wb->bdi->id &&
4660		    frn->memcg_id == wb->memcg_css->id)
4661			break;
4662		if (time_before64(frn->at, oldest_at) &&
4663		    atomic_read(&frn->done.cnt) == 1) {
4664			oldest = i;
4665			oldest_at = frn->at;
4666		}
4667	}
4668
4669	if (i < MEMCG_CGWB_FRN_CNT) {
4670		/*
4671		 * Re-using an existing one.  Update timestamp lazily to
4672		 * avoid making the cacheline hot.  We want them to be
4673		 * reasonably up-to-date and significantly shorter than
4674		 * dirty_expire_interval as that's what expires the record.
4675		 * Use the shorter of 1s and dirty_expire_interval / 8.
4676		 */
4677		unsigned long update_intv =
4678			min_t(unsigned long, HZ,
4679			      msecs_to_jiffies(dirty_expire_interval * 10) / 8);
4680
4681		if (time_before64(frn->at, now - update_intv))
4682			frn->at = now;
4683	} else if (oldest >= 0) {
4684		/* replace the oldest free one */
4685		frn = &memcg->cgwb_frn[oldest];
4686		frn->bdi_id = wb->bdi->id;
4687		frn->memcg_id = wb->memcg_css->id;
4688		frn->at = now;
4689	}
4690}
4691
4692/* issue foreign writeback flushes for recorded foreign dirtying events */
4693void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
4694{
4695	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4696	unsigned long intv = msecs_to_jiffies(dirty_expire_interval * 10);
4697	u64 now = jiffies_64;
4698	int i;
4699
4700	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
4701		struct memcg_cgwb_frn *frn = &memcg->cgwb_frn[i];
4702
4703		/*
4704		 * If the record is older than dirty_expire_interval,
4705		 * writeback on it has already started.  No need to kick it
4706		 * off again.  Also, don't start a new one if there's
4707		 * already one in flight.
4708		 */
4709		if (time_after64(frn->at, now - intv) &&
4710		    atomic_read(&frn->done.cnt) == 1) {
4711			frn->at = 0;
4712			trace_flush_foreign(wb, frn->bdi_id, frn->memcg_id);
4713			cgroup_writeback_by_id(frn->bdi_id, frn->memcg_id,
4714					       WB_REASON_FOREIGN_FLUSH,
4715					       &frn->done);
4716		}
4717	}
4718}
4719
4720#else	/* CONFIG_CGROUP_WRITEBACK */
4721
4722static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
4723{
4724	return 0;
4725}
4726
4727static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
4728{
4729}
4730
4731static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
4732{
4733}
4734
4735#endif	/* CONFIG_CGROUP_WRITEBACK */
4736
4737/*
4738 * DO NOT USE IN NEW FILES.
4739 *
4740 * "cgroup.event_control" implementation.
4741 *
4742 * This is way over-engineered.  It tries to support fully configurable
4743 * events for each user.  Such level of flexibility is completely
4744 * unnecessary especially in the light of the planned unified hierarchy.
4745 *
4746 * Please deprecate this and replace with something simpler if at all
4747 * possible.
4748 */
4749
4750/*
4751 * Unregister event and free resources.
4752 *
4753 * Gets called from workqueue.
4754 */
4755static void memcg_event_remove(struct work_struct *work)
4756{
4757	struct mem_cgroup_event *event =
4758		container_of(work, struct mem_cgroup_event, remove);
4759	struct mem_cgroup *memcg = event->memcg;
4760
4761	remove_wait_queue(event->wqh, &event->wait);
4762
4763	event->unregister_event(memcg, event->eventfd);
4764
4765	/* Notify userspace the event is going away. */
4766	eventfd_signal(event->eventfd, 1);
4767
4768	eventfd_ctx_put(event->eventfd);
4769	kfree(event);
4770	css_put(&memcg->css);
4771}
4772
4773/*
4774 * Gets called on EPOLLHUP on eventfd when user closes it.
4775 *
4776 * Called with wqh->lock held and interrupts disabled.
4777 */
4778static int memcg_event_wake(wait_queue_entry_t *wait, unsigned mode,
4779			    int sync, void *key)
4780{
4781	struct mem_cgroup_event *event =
4782		container_of(wait, struct mem_cgroup_event, wait);
4783	struct mem_cgroup *memcg = event->memcg;
4784	__poll_t flags = key_to_poll(key);
4785
4786	if (flags & EPOLLHUP) {
4787		/*
4788		 * If the event has been detached at cgroup removal, we
4789		 * can simply return knowing the other side will cleanup
4790		 * for us.
4791		 *
4792		 * We can't race against event freeing since the other
4793		 * side will require wqh->lock via remove_wait_queue(),
4794		 * which we hold.
4795		 */
4796		spin_lock(&memcg->event_list_lock);
4797		if (!list_empty(&event->list)) {
4798			list_del_init(&event->list);
4799			/*
4800			 * We are in atomic context, but cgroup_event_remove()
4801			 * may sleep, so we have to call it in workqueue.
4802			 */
4803			schedule_work(&event->remove);
4804		}
4805		spin_unlock(&memcg->event_list_lock);
4806	}
4807
4808	return 0;
4809}
4810
4811static void memcg_event_ptable_queue_proc(struct file *file,
4812		wait_queue_head_t *wqh, poll_table *pt)
4813{
4814	struct mem_cgroup_event *event =
4815		container_of(pt, struct mem_cgroup_event, pt);
4816
4817	event->wqh = wqh;
4818	add_wait_queue(wqh, &event->wait);
4819}
4820
4821/*
4822 * DO NOT USE IN NEW FILES.
4823 *
4824 * Parse input and register new cgroup event handler.
4825 *
4826 * Input must be in format '<event_fd> <control_fd> <args>'.
4827 * Interpretation of args is defined by control file implementation.
4828 */
4829static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
4830					 char *buf, size_t nbytes, loff_t off)
4831{
4832	struct cgroup_subsys_state *css = of_css(of);
4833	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4834	struct mem_cgroup_event *event;
4835	struct cgroup_subsys_state *cfile_css;
4836	unsigned int efd, cfd;
4837	struct fd efile;
4838	struct fd cfile;
4839	struct dentry *cdentry;
4840	const char *name;
4841	char *endp;
4842	int ret;
4843
4844	if (IS_ENABLED(CONFIG_PREEMPT_RT))
4845		return -EOPNOTSUPP;
4846
4847	buf = strstrip(buf);
4848
4849	efd = simple_strtoul(buf, &endp, 10);
4850	if (*endp != ' ')
4851		return -EINVAL;
4852	buf = endp + 1;
4853
4854	cfd = simple_strtoul(buf, &endp, 10);
4855	if ((*endp != ' ') && (*endp != '\0'))
4856		return -EINVAL;
4857	buf = endp + 1;
4858
4859	event = kzalloc(sizeof(*event), GFP_KERNEL);
4860	if (!event)
4861		return -ENOMEM;
4862
4863	event->memcg = memcg;
4864	INIT_LIST_HEAD(&event->list);
4865	init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc);
4866	init_waitqueue_func_entry(&event->wait, memcg_event_wake);
4867	INIT_WORK(&event->remove, memcg_event_remove);
4868
4869	efile = fdget(efd);
4870	if (!efile.file) {
4871		ret = -EBADF;
4872		goto out_kfree;
4873	}
4874
4875	event->eventfd = eventfd_ctx_fileget(efile.file);
4876	if (IS_ERR(event->eventfd)) {
4877		ret = PTR_ERR(event->eventfd);
4878		goto out_put_efile;
4879	}
4880
4881	cfile = fdget(cfd);
4882	if (!cfile.file) {
4883		ret = -EBADF;
4884		goto out_put_eventfd;
4885	}
4886
4887	/* the process need read permission on control file */
4888	/* AV: shouldn't we check that it's been opened for read instead? */
4889	ret = file_permission(cfile.file, MAY_READ);
4890	if (ret < 0)
4891		goto out_put_cfile;
4892
4893	/*
4894	 * The control file must be a regular cgroup1 file. As a regular cgroup
4895	 * file can't be renamed, it's safe to access its name afterwards.
4896	 */
4897	cdentry = cfile.file->f_path.dentry;
4898	if (cdentry->d_sb->s_type != &cgroup_fs_type || !d_is_reg(cdentry)) {
4899		ret = -EINVAL;
4900		goto out_put_cfile;
4901	}
4902
4903	/*
4904	 * Determine the event callbacks and set them in @event.  This used
4905	 * to be done via struct cftype but cgroup core no longer knows
4906	 * about these events.  The following is crude but the whole thing
4907	 * is for compatibility anyway.
4908	 *
4909	 * DO NOT ADD NEW FILES.
4910	 */
4911	name = cdentry->d_name.name;
4912
4913	if (!strcmp(name, "memory.usage_in_bytes")) {
4914		event->register_event = mem_cgroup_usage_register_event;
4915		event->unregister_event = mem_cgroup_usage_unregister_event;
4916	} else if (!strcmp(name, "memory.oom_control")) {
4917		event->register_event = mem_cgroup_oom_register_event;
4918		event->unregister_event = mem_cgroup_oom_unregister_event;
4919	} else if (!strcmp(name, "memory.pressure_level")) {
4920		event->register_event = vmpressure_register_event;
4921		event->unregister_event = vmpressure_unregister_event;
4922	} else if (!strcmp(name, "memory.memsw.usage_in_bytes")) {
4923		event->register_event = memsw_cgroup_usage_register_event;
4924		event->unregister_event = memsw_cgroup_usage_unregister_event;
4925	} else {
4926		ret = -EINVAL;
4927		goto out_put_cfile;
4928	}
4929
4930	/*
4931	 * Verify @cfile should belong to @css.  Also, remaining events are
4932	 * automatically removed on cgroup destruction but the removal is
4933	 * asynchronous, so take an extra ref on @css.
4934	 */
4935	cfile_css = css_tryget_online_from_dir(cdentry->d_parent,
4936					       &memory_cgrp_subsys);
4937	ret = -EINVAL;
4938	if (IS_ERR(cfile_css))
4939		goto out_put_cfile;
4940	if (cfile_css != css) {
4941		css_put(cfile_css);
4942		goto out_put_cfile;
4943	}
4944
4945	ret = event->register_event(memcg, event->eventfd, buf);
4946	if (ret)
4947		goto out_put_css;
4948
4949	vfs_poll(efile.file, &event->pt);
4950
4951	spin_lock_irq(&memcg->event_list_lock);
4952	list_add(&event->list, &memcg->event_list);
4953	spin_unlock_irq(&memcg->event_list_lock);
4954
4955	fdput(cfile);
4956	fdput(efile);
4957
4958	return nbytes;
4959
4960out_put_css:
4961	css_put(css);
4962out_put_cfile:
4963	fdput(cfile);
4964out_put_eventfd:
4965	eventfd_ctx_put(event->eventfd);
4966out_put_efile:
4967	fdput(efile);
4968out_kfree:
4969	kfree(event);
4970
4971	return ret;
4972}
4973
4974#if defined(CONFIG_MEMCG_KMEM) && (defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG))
4975static int mem_cgroup_slab_show(struct seq_file *m, void *p)
4976{
4977	/*
4978	 * Deprecated.
4979	 * Please, take a look at tools/cgroup/memcg_slabinfo.py .
4980	 */
4981	return 0;
4982}
4983#endif
4984
4985static struct cftype mem_cgroup_legacy_files[] = {
4986	{
4987		.name = "usage_in_bytes",
4988		.private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
4989		.read_u64 = mem_cgroup_read_u64,
4990	},
4991	{
4992		.name = "max_usage_in_bytes",
4993		.private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
4994		.write = mem_cgroup_reset,
4995		.read_u64 = mem_cgroup_read_u64,
4996	},
4997	{
4998		.name = "limit_in_bytes",
4999		.private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
5000		.write = mem_cgroup_write,
5001		.read_u64 = mem_cgroup_read_u64,
5002	},
5003	{
5004		.name = "soft_limit_in_bytes",
5005		.private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
5006		.write = mem_cgroup_write,
5007		.read_u64 = mem_cgroup_read_u64,
5008	},
5009	{
5010		.name = "failcnt",
5011		.private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
5012		.write = mem_cgroup_reset,
5013		.read_u64 = mem_cgroup_read_u64,
5014	},
5015	{
5016		.name = "stat",
5017		.seq_show = memcg_stat_show,
5018	},
5019	{
5020		.name = "force_empty",
5021		.write = mem_cgroup_force_empty_write,
5022	},
5023	{
5024		.name = "use_hierarchy",
5025		.write_u64 = mem_cgroup_hierarchy_write,
5026		.read_u64 = mem_cgroup_hierarchy_read,
5027	},
5028	{
5029		.name = "cgroup.event_control",		/* XXX: for compat */
5030		.write = memcg_write_event_control,
5031		.flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE,
5032	},
5033	{
5034		.name = "swappiness",
5035		.read_u64 = mem_cgroup_swappiness_read,
5036		.write_u64 = mem_cgroup_swappiness_write,
5037	},
5038	{
5039		.name = "move_charge_at_immigrate",
5040		.read_u64 = mem_cgroup_move_charge_read,
5041		.write_u64 = mem_cgroup_move_charge_write,
5042	},
5043	{
5044		.name = "oom_control",
5045		.seq_show = mem_cgroup_oom_control_read,
5046		.write_u64 = mem_cgroup_oom_control_write,
 
5047	},
5048	{
5049		.name = "pressure_level",
5050	},
5051#ifdef CONFIG_NUMA
5052	{
5053		.name = "numa_stat",
5054		.seq_show = memcg_numa_stat_show,
5055	},
5056#endif
5057	{
5058		.name = "kmem.limit_in_bytes",
5059		.private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
5060		.write = mem_cgroup_write,
5061		.read_u64 = mem_cgroup_read_u64,
5062	},
5063	{
5064		.name = "kmem.usage_in_bytes",
5065		.private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
5066		.read_u64 = mem_cgroup_read_u64,
5067	},
5068	{
5069		.name = "kmem.failcnt",
5070		.private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT),
5071		.write = mem_cgroup_reset,
5072		.read_u64 = mem_cgroup_read_u64,
5073	},
5074	{
5075		.name = "kmem.max_usage_in_bytes",
5076		.private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE),
5077		.write = mem_cgroup_reset,
5078		.read_u64 = mem_cgroup_read_u64,
5079	},
5080#if defined(CONFIG_MEMCG_KMEM) && \
5081	(defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG))
5082	{
5083		.name = "kmem.slabinfo",
5084		.seq_show = mem_cgroup_slab_show,
 
 
 
5085	},
5086#endif
5087	{
5088		.name = "kmem.tcp.limit_in_bytes",
5089		.private = MEMFILE_PRIVATE(_TCP, RES_LIMIT),
5090		.write = mem_cgroup_write,
5091		.read_u64 = mem_cgroup_read_u64,
5092	},
5093	{
5094		.name = "kmem.tcp.usage_in_bytes",
5095		.private = MEMFILE_PRIVATE(_TCP, RES_USAGE),
5096		.read_u64 = mem_cgroup_read_u64,
5097	},
5098	{
5099		.name = "kmem.tcp.failcnt",
5100		.private = MEMFILE_PRIVATE(_TCP, RES_FAILCNT),
5101		.write = mem_cgroup_reset,
5102		.read_u64 = mem_cgroup_read_u64,
5103	},
5104	{
5105		.name = "kmem.tcp.max_usage_in_bytes",
5106		.private = MEMFILE_PRIVATE(_TCP, RES_MAX_USAGE),
5107		.write = mem_cgroup_reset,
5108		.read_u64 = mem_cgroup_read_u64,
5109	},
5110	{ },	/* terminate */
5111};
5112
5113/*
5114 * Private memory cgroup IDR
5115 *
5116 * Swap-out records and page cache shadow entries need to store memcg
5117 * references in constrained space, so we maintain an ID space that is
5118 * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of
5119 * memory-controlled cgroups to 64k.
5120 *
5121 * However, there usually are many references to the offline CSS after
5122 * the cgroup has been destroyed, such as page cache or reclaimable
5123 * slab objects, that don't need to hang on to the ID. We want to keep
5124 * those dead CSS from occupying IDs, or we might quickly exhaust the
5125 * relatively small ID space and prevent the creation of new cgroups
5126 * even when there are much fewer than 64k cgroups - possibly none.
5127 *
5128 * Maintain a private 16-bit ID space for memcg, and allow the ID to
5129 * be freed and recycled when it's no longer needed, which is usually
5130 * when the CSS is offlined.
5131 *
5132 * The only exception to that are records of swapped out tmpfs/shmem
5133 * pages that need to be attributed to live ancestors on swapin. But
5134 * those references are manageable from userspace.
5135 */
5136
5137static DEFINE_IDR(mem_cgroup_idr);
5138
5139static void mem_cgroup_id_remove(struct mem_cgroup *memcg)
5140{
5141	if (memcg->id.id > 0) {
5142		idr_remove(&mem_cgroup_idr, memcg->id.id);
5143		memcg->id.id = 0;
5144	}
5145}
5146
5147static void __maybe_unused mem_cgroup_id_get_many(struct mem_cgroup *memcg,
5148						  unsigned int n)
5149{
5150	refcount_add(n, &memcg->id.ref);
5151}
5152
5153static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
5154{
5155	if (refcount_sub_and_test(n, &memcg->id.ref)) {
5156		mem_cgroup_id_remove(memcg);
5157
5158		/* Memcg ID pins CSS */
5159		css_put(&memcg->css);
5160	}
5161}
5162
5163static inline void mem_cgroup_id_put(struct mem_cgroup *memcg)
5164{
5165	mem_cgroup_id_put_many(memcg, 1);
5166}
5167
5168/**
5169 * mem_cgroup_from_id - look up a memcg from a memcg id
5170 * @id: the memcg id to look up
5171 *
5172 * Caller must hold rcu_read_lock().
5173 */
5174struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
5175{
5176	WARN_ON_ONCE(!rcu_read_lock_held());
5177	return idr_find(&mem_cgroup_idr, id);
5178}
5179
5180#ifdef CONFIG_SHRINKER_DEBUG
5181struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino)
5182{
5183	struct cgroup *cgrp;
5184	struct cgroup_subsys_state *css;
5185	struct mem_cgroup *memcg;
5186
5187	cgrp = cgroup_get_from_id(ino);
5188	if (IS_ERR(cgrp))
5189		return ERR_CAST(cgrp);
5190
5191	css = cgroup_get_e_css(cgrp, &memory_cgrp_subsys);
5192	if (css)
5193		memcg = container_of(css, struct mem_cgroup, css);
5194	else
5195		memcg = ERR_PTR(-ENOENT);
5196
5197	cgroup_put(cgrp);
5198
5199	return memcg;
5200}
5201#endif
5202
5203static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
5204{
5205	struct mem_cgroup_per_node *pn;
5206
5207	pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, node);
 
 
 
 
 
 
 
 
 
 
 
5208	if (!pn)
5209		return 1;
5210
5211	pn->lruvec_stats_percpu = alloc_percpu_gfp(struct lruvec_stats_percpu,
5212						   GFP_KERNEL_ACCOUNT);
5213	if (!pn->lruvec_stats_percpu) {
5214		kfree(pn);
5215		return 1;
 
5216	}
5217
5218	lruvec_init(&pn->lruvec);
5219	pn->memcg = memcg;
5220
5221	memcg->nodeinfo[node] = pn;
5222	return 0;
5223}
5224
5225static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
5226{
5227	struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
5228
5229	if (!pn)
5230		return;
5231
5232	free_percpu(pn->lruvec_stats_percpu);
5233	kfree(pn);
5234}
5235
5236static void __mem_cgroup_free(struct mem_cgroup *memcg)
5237{
5238	int node;
5239
 
5240	for_each_node(node)
5241		free_mem_cgroup_per_node_info(memcg, node);
5242	kfree(memcg->vmstats);
5243	free_percpu(memcg->vmstats_percpu);
5244	kfree(memcg);
5245}
5246
5247static void mem_cgroup_free(struct mem_cgroup *memcg)
5248{
5249	lru_gen_exit_memcg(memcg);
5250	memcg_wb_domain_exit(memcg);
5251	__mem_cgroup_free(memcg);
5252}
5253
5254static struct mem_cgroup *mem_cgroup_alloc(void)
5255{
5256	struct mem_cgroup *memcg;
 
5257	int node;
5258	int __maybe_unused i;
5259	long error = -ENOMEM;
5260
5261	memcg = kzalloc(struct_size(memcg, nodeinfo, nr_node_ids), GFP_KERNEL);
 
 
 
5262	if (!memcg)
5263		return ERR_PTR(error);
5264
5265	memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL,
5266				 1, MEM_CGROUP_ID_MAX + 1, GFP_KERNEL);
5267	if (memcg->id.id < 0) {
5268		error = memcg->id.id;
5269		goto fail;
5270	}
5271
5272	memcg->vmstats = kzalloc(sizeof(struct memcg_vmstats), GFP_KERNEL);
5273	if (!memcg->vmstats)
5274		goto fail;
5275
5276	memcg->vmstats_percpu = alloc_percpu_gfp(struct memcg_vmstats_percpu,
5277						 GFP_KERNEL_ACCOUNT);
5278	if (!memcg->vmstats_percpu)
5279		goto fail;
5280
5281	for_each_node(node)
5282		if (alloc_mem_cgroup_per_node_info(memcg, node))
5283			goto fail;
5284
5285	if (memcg_wb_domain_init(memcg, GFP_KERNEL))
5286		goto fail;
5287
5288	INIT_WORK(&memcg->high_work, high_work_func);
 
5289	INIT_LIST_HEAD(&memcg->oom_notify);
5290	mutex_init(&memcg->thresholds_lock);
5291	spin_lock_init(&memcg->move_lock);
5292	vmpressure_init(&memcg->vmpressure);
5293	INIT_LIST_HEAD(&memcg->event_list);
5294	spin_lock_init(&memcg->event_list_lock);
5295	memcg->socket_pressure = jiffies;
5296#ifdef CONFIG_MEMCG_KMEM
5297	memcg->kmemcg_id = -1;
5298	INIT_LIST_HEAD(&memcg->objcg_list);
5299#endif
5300#ifdef CONFIG_CGROUP_WRITEBACK
5301	INIT_LIST_HEAD(&memcg->cgwb_list);
5302	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
5303		memcg->cgwb_frn[i].done =
5304			__WB_COMPLETION_INIT(&memcg_cgwb_frn_waitq);
5305#endif
5306#ifdef CONFIG_TRANSPARENT_HUGEPAGE
5307	spin_lock_init(&memcg->deferred_split_queue.split_queue_lock);
5308	INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue);
5309	memcg->deferred_split_queue.split_queue_len = 0;
5310#endif
5311	idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
5312	lru_gen_init_memcg(memcg);
5313	return memcg;
5314fail:
5315	mem_cgroup_id_remove(memcg);
5316	__mem_cgroup_free(memcg);
5317	return ERR_PTR(error);
5318}
5319
5320static struct cgroup_subsys_state * __ref
5321mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
5322{
5323	struct mem_cgroup *parent = mem_cgroup_from_css(parent_css);
5324	struct mem_cgroup *memcg, *old_memcg;
 
5325
5326	old_memcg = set_active_memcg(parent);
5327	memcg = mem_cgroup_alloc();
5328	set_active_memcg(old_memcg);
5329	if (IS_ERR(memcg))
5330		return ERR_CAST(memcg);
5331
5332	page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
5333	memcg->soft_limit = PAGE_COUNTER_MAX;
5334#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
5335	memcg->zswap_max = PAGE_COUNTER_MAX;
5336#endif
5337	page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
5338	if (parent) {
5339		memcg->swappiness = mem_cgroup_swappiness(parent);
5340		memcg->oom_kill_disable = parent->oom_kill_disable;
5341
 
 
5342		page_counter_init(&memcg->memory, &parent->memory);
5343		page_counter_init(&memcg->swap, &parent->swap);
 
5344		page_counter_init(&memcg->kmem, &parent->kmem);
5345		page_counter_init(&memcg->tcpmem, &parent->tcpmem);
5346	} else {
5347		init_memcg_events();
5348		page_counter_init(&memcg->memory, NULL);
5349		page_counter_init(&memcg->swap, NULL);
 
5350		page_counter_init(&memcg->kmem, NULL);
5351		page_counter_init(&memcg->tcpmem, NULL);
 
 
 
 
 
 
 
 
5352
 
 
5353		root_mem_cgroup = memcg;
5354		return &memcg->css;
5355	}
5356
 
 
 
 
5357	if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
5358		static_branch_inc(&memcg_sockets_enabled_key);
5359
5360	return &memcg->css;
 
 
 
5361}
5362
5363static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
 
5364{
5365	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5366
5367	if (memcg_online_kmem(memcg))
5368		goto remove_id;
5369
5370	/*
5371	 * A memcg must be visible for expand_shrinker_info()
5372	 * by the time the maps are allocated. So, we allocate maps
5373	 * here, when for_each_mem_cgroup() can't skip it.
5374	 */
5375	if (alloc_shrinker_info(memcg))
5376		goto offline_kmem;
5377
5378	/* Online state pins memcg ID, memcg ID pins CSS */
5379	refcount_set(&memcg->id.ref, 1);
5380	css_get(css);
5381
5382	if (unlikely(mem_cgroup_is_root(memcg)))
5383		queue_delayed_work(system_unbound_wq, &stats_flush_dwork,
5384				   2UL*HZ);
5385	return 0;
5386offline_kmem:
5387	memcg_offline_kmem(memcg);
5388remove_id:
5389	mem_cgroup_id_remove(memcg);
5390	return -ENOMEM;
5391}
5392
5393static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
5394{
5395	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5396	struct mem_cgroup_event *event, *tmp;
5397
5398	/*
5399	 * Unregister events and notify userspace.
5400	 * Notify userspace about cgroup removing only after rmdir of cgroup
5401	 * directory to avoid race between userspace and kernelspace.
5402	 */
5403	spin_lock_irq(&memcg->event_list_lock);
5404	list_for_each_entry_safe(event, tmp, &memcg->event_list, list) {
5405		list_del_init(&event->list);
5406		schedule_work(&event->remove);
5407	}
5408	spin_unlock_irq(&memcg->event_list_lock);
5409
5410	page_counter_set_min(&memcg->memory, 0);
5411	page_counter_set_low(&memcg->memory, 0);
5412
5413	memcg_offline_kmem(memcg);
5414	reparent_shrinker_deferred(memcg);
5415	wb_memcg_offline(memcg);
5416
5417	drain_all_stock(memcg);
5418
5419	mem_cgroup_id_put(memcg);
5420}
5421
5422static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
5423{
5424	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5425
5426	invalidate_reclaim_iterators(memcg);
5427}
5428
5429static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
5430{
5431	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5432	int __maybe_unused i;
5433
5434#ifdef CONFIG_CGROUP_WRITEBACK
5435	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
5436		wb_wait_for_completion(&memcg->cgwb_frn[i].done);
5437#endif
5438	if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
5439		static_branch_dec(&memcg_sockets_enabled_key);
5440
5441	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active)
5442		static_branch_dec(&memcg_sockets_enabled_key);
5443
5444	vmpressure_cleanup(&memcg->vmpressure);
5445	cancel_work_sync(&memcg->high_work);
5446	mem_cgroup_remove_from_trees(memcg);
5447	free_shrinker_info(memcg);
5448	mem_cgroup_free(memcg);
5449}
5450
5451/**
5452 * mem_cgroup_css_reset - reset the states of a mem_cgroup
5453 * @css: the target css
5454 *
5455 * Reset the states of the mem_cgroup associated with @css.  This is
5456 * invoked when the userland requests disabling on the default hierarchy
5457 * but the memcg is pinned through dependency.  The memcg should stop
5458 * applying policies and should revert to the vanilla state as it may be
5459 * made visible again.
5460 *
5461 * The current implementation only resets the essential configurations.
5462 * This needs to be expanded to cover all the visible parts.
5463 */
5464static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
5465{
5466	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5467
5468	page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX);
5469	page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX);
5470	page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX);
5471	page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX);
5472	page_counter_set_min(&memcg->memory, 0);
5473	page_counter_set_low(&memcg->memory, 0);
5474	page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
5475	memcg->soft_limit = PAGE_COUNTER_MAX;
5476	page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
5477	memcg_wb_domain_size_changed(memcg);
5478}
5479
5480static void mem_cgroup_css_rstat_flush(struct cgroup_subsys_state *css, int cpu)
5481{
5482	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5483	struct mem_cgroup *parent = parent_mem_cgroup(memcg);
5484	struct memcg_vmstats_percpu *statc;
5485	long delta, v;
5486	int i, nid;
5487
5488	statc = per_cpu_ptr(memcg->vmstats_percpu, cpu);
5489
5490	for (i = 0; i < MEMCG_NR_STAT; i++) {
5491		/*
5492		 * Collect the aggregated propagation counts of groups
5493		 * below us. We're in a per-cpu loop here and this is
5494		 * a global counter, so the first cycle will get them.
5495		 */
5496		delta = memcg->vmstats->state_pending[i];
5497		if (delta)
5498			memcg->vmstats->state_pending[i] = 0;
5499
5500		/* Add CPU changes on this level since the last flush */
5501		v = READ_ONCE(statc->state[i]);
5502		if (v != statc->state_prev[i]) {
5503			delta += v - statc->state_prev[i];
5504			statc->state_prev[i] = v;
5505		}
5506
5507		if (!delta)
5508			continue;
5509
5510		/* Aggregate counts on this level and propagate upwards */
5511		memcg->vmstats->state[i] += delta;
5512		if (parent)
5513			parent->vmstats->state_pending[i] += delta;
5514	}
5515
5516	for (i = 0; i < NR_MEMCG_EVENTS; i++) {
5517		delta = memcg->vmstats->events_pending[i];
5518		if (delta)
5519			memcg->vmstats->events_pending[i] = 0;
5520
5521		v = READ_ONCE(statc->events[i]);
5522		if (v != statc->events_prev[i]) {
5523			delta += v - statc->events_prev[i];
5524			statc->events_prev[i] = v;
5525		}
5526
5527		if (!delta)
5528			continue;
5529
5530		memcg->vmstats->events[i] += delta;
5531		if (parent)
5532			parent->vmstats->events_pending[i] += delta;
5533	}
5534
5535	for_each_node_state(nid, N_MEMORY) {
5536		struct mem_cgroup_per_node *pn = memcg->nodeinfo[nid];
5537		struct mem_cgroup_per_node *ppn = NULL;
5538		struct lruvec_stats_percpu *lstatc;
5539
5540		if (parent)
5541			ppn = parent->nodeinfo[nid];
5542
5543		lstatc = per_cpu_ptr(pn->lruvec_stats_percpu, cpu);
5544
5545		for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
5546			delta = pn->lruvec_stats.state_pending[i];
5547			if (delta)
5548				pn->lruvec_stats.state_pending[i] = 0;
5549
5550			v = READ_ONCE(lstatc->state[i]);
5551			if (v != lstatc->state_prev[i]) {
5552				delta += v - lstatc->state_prev[i];
5553				lstatc->state_prev[i] = v;
5554			}
5555
5556			if (!delta)
5557				continue;
5558
5559			pn->lruvec_stats.state[i] += delta;
5560			if (ppn)
5561				ppn->lruvec_stats.state_pending[i] += delta;
5562		}
5563	}
5564}
5565
5566#ifdef CONFIG_MMU
5567/* Handlers for move charge at task migration. */
5568static int mem_cgroup_do_precharge(unsigned long count)
5569{
5570	int ret;
5571
5572	/* Try a single bulk charge without reclaim first, kswapd may wake */
5573	ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count);
5574	if (!ret) {
5575		mc.precharge += count;
5576		return ret;
5577	}
5578
5579	/* Try charges one by one with reclaim, but do not retry */
5580	while (count--) {
5581		ret = try_charge(mc.to, GFP_KERNEL | __GFP_NORETRY, 1);
5582		if (ret)
5583			return ret;
5584		mc.precharge++;
5585		cond_resched();
5586	}
5587	return 0;
5588}
5589
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5590union mc_target {
5591	struct page	*page;
5592	swp_entry_t	ent;
5593};
5594
5595enum mc_target_type {
5596	MC_TARGET_NONE = 0,
5597	MC_TARGET_PAGE,
5598	MC_TARGET_SWAP,
5599	MC_TARGET_DEVICE,
5600};
5601
5602static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
5603						unsigned long addr, pte_t ptent)
5604{
5605	struct page *page = vm_normal_page(vma, addr, ptent);
5606
5607	if (!page || !page_mapped(page))
5608		return NULL;
5609	if (PageAnon(page)) {
5610		if (!(mc.flags & MOVE_ANON))
5611			return NULL;
5612	} else {
5613		if (!(mc.flags & MOVE_FILE))
5614			return NULL;
5615	}
5616	if (!get_page_unless_zero(page))
5617		return NULL;
5618
5619	return page;
5620}
5621
5622#if defined(CONFIG_SWAP) || defined(CONFIG_DEVICE_PRIVATE)
5623static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5624			pte_t ptent, swp_entry_t *entry)
5625{
5626	struct page *page = NULL;
5627	swp_entry_t ent = pte_to_swp_entry(ptent);
5628
5629	if (!(mc.flags & MOVE_ANON))
5630		return NULL;
5631
5632	/*
5633	 * Handle device private pages that are not accessible by the CPU, but
5634	 * stored as special swap entries in the page table.
5635	 */
5636	if (is_device_private_entry(ent)) {
5637		page = pfn_swap_entry_to_page(ent);
5638		if (!get_page_unless_zero(page))
5639			return NULL;
5640		return page;
5641	}
5642
5643	if (non_swap_entry(ent))
5644		return NULL;
5645
5646	/*
5647	 * Because swap_cache_get_folio() updates some statistics counter,
5648	 * we call find_get_page() with swapper_space directly.
5649	 */
5650	page = find_get_page(swap_address_space(ent), swp_offset(ent));
5651	entry->val = ent.val;
 
5652
5653	return page;
5654}
5655#else
5656static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5657			pte_t ptent, swp_entry_t *entry)
5658{
5659	return NULL;
5660}
5661#endif
5662
5663static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
5664			unsigned long addr, pte_t ptent)
5665{
5666	unsigned long index;
5667	struct folio *folio;
 
5668
5669	if (!vma->vm_file) /* anonymous vma */
5670		return NULL;
5671	if (!(mc.flags & MOVE_FILE))
5672		return NULL;
5673
5674	/* folio is moved even if it's not RSS of this task(page-faulted). */
 
 
 
 
5675	/* shmem/tmpfs may report page out on swap: account for that too. */
5676	index = linear_page_index(vma, addr);
5677	folio = filemap_get_incore_folio(vma->vm_file->f_mapping, index);
5678	if (!folio)
5679		return NULL;
5680	return folio_file_page(folio, index);
 
 
 
 
 
 
 
 
 
5681}
5682
5683/**
5684 * mem_cgroup_move_account - move account of the page
5685 * @page: the page
5686 * @compound: charge the page as compound or small page
5687 * @from: mem_cgroup which the page is moved from.
5688 * @to:	mem_cgroup which the page is moved to. @from != @to.
5689 *
5690 * The caller must make sure the page is not on LRU (isolate_page() is useful.)
5691 *
5692 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
5693 * from old cgroup.
5694 */
5695static int mem_cgroup_move_account(struct page *page,
5696				   bool compound,
5697				   struct mem_cgroup *from,
5698				   struct mem_cgroup *to)
5699{
5700	struct folio *folio = page_folio(page);
5701	struct lruvec *from_vec, *to_vec;
5702	struct pglist_data *pgdat;
5703	unsigned int nr_pages = compound ? folio_nr_pages(folio) : 1;
5704	int nid, ret;
5705
5706	VM_BUG_ON(from == to);
5707	VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
5708	VM_BUG_ON(compound && !folio_test_large(folio));
5709
5710	/*
5711	 * Prevent mem_cgroup_migrate() from looking at
5712	 * page's memory cgroup of its source page while we change it.
5713	 */
5714	ret = -EBUSY;
5715	if (!folio_trylock(folio))
5716		goto out;
5717
5718	ret = -EINVAL;
5719	if (folio_memcg(folio) != from)
5720		goto out_unlock;
5721
5722	pgdat = folio_pgdat(folio);
5723	from_vec = mem_cgroup_lruvec(from, pgdat);
5724	to_vec = mem_cgroup_lruvec(to, pgdat);
5725
5726	folio_memcg_lock(folio);
5727
5728	if (folio_test_anon(folio)) {
5729		if (folio_mapped(folio)) {
5730			__mod_lruvec_state(from_vec, NR_ANON_MAPPED, -nr_pages);
5731			__mod_lruvec_state(to_vec, NR_ANON_MAPPED, nr_pages);
5732			if (folio_test_transhuge(folio)) {
5733				__mod_lruvec_state(from_vec, NR_ANON_THPS,
5734						   -nr_pages);
5735				__mod_lruvec_state(to_vec, NR_ANON_THPS,
5736						   nr_pages);
5737			}
5738		}
5739	} else {
5740		__mod_lruvec_state(from_vec, NR_FILE_PAGES, -nr_pages);
5741		__mod_lruvec_state(to_vec, NR_FILE_PAGES, nr_pages);
5742
5743		if (folio_test_swapbacked(folio)) {
5744			__mod_lruvec_state(from_vec, NR_SHMEM, -nr_pages);
5745			__mod_lruvec_state(to_vec, NR_SHMEM, nr_pages);
5746		}
5747
5748		if (folio_mapped(folio)) {
5749			__mod_lruvec_state(from_vec, NR_FILE_MAPPED, -nr_pages);
5750			__mod_lruvec_state(to_vec, NR_FILE_MAPPED, nr_pages);
5751		}
 
 
5752
5753		if (folio_test_dirty(folio)) {
5754			struct address_space *mapping = folio_mapping(folio);
 
 
 
 
 
5755
5756			if (mapping_can_writeback(mapping)) {
5757				__mod_lruvec_state(from_vec, NR_FILE_DIRTY,
5758						   -nr_pages);
5759				__mod_lruvec_state(to_vec, NR_FILE_DIRTY,
5760						   nr_pages);
5761			}
5762		}
5763	}
5764
5765#ifdef CONFIG_SWAP
5766	if (folio_test_swapcache(folio)) {
5767		__mod_lruvec_state(from_vec, NR_SWAPCACHE, -nr_pages);
5768		__mod_lruvec_state(to_vec, NR_SWAPCACHE, nr_pages);
5769	}
5770#endif
5771	if (folio_test_writeback(folio)) {
5772		__mod_lruvec_state(from_vec, NR_WRITEBACK, -nr_pages);
5773		__mod_lruvec_state(to_vec, NR_WRITEBACK, nr_pages);
5774	}
5775
5776	/*
5777	 * All state has been migrated, let's switch to the new memcg.
5778	 *
5779	 * It is safe to change page's memcg here because the page
5780	 * is referenced, charged, isolated, and locked: we can't race
5781	 * with (un)charging, migration, LRU putback, or anything else
5782	 * that would rely on a stable page's memory cgroup.
5783	 *
5784	 * Note that lock_page_memcg is a memcg lock, not a page lock,
5785	 * to save space. As soon as we switch page's memory cgroup to a
5786	 * new memcg that isn't locked, the above state can change
5787	 * concurrently again. Make sure we're truly done with it.
5788	 */
5789	smp_mb();
5790
5791	css_get(&to->css);
5792	css_put(&from->css);
5793
5794	folio->memcg_data = (unsigned long)to;
5795
5796	__folio_memcg_unlock(from);
5797
5798	ret = 0;
5799	nid = folio_nid(folio);
5800
5801	local_irq_disable();
5802	mem_cgroup_charge_statistics(to, nr_pages);
5803	memcg_check_events(to, nid);
5804	mem_cgroup_charge_statistics(from, -nr_pages);
5805	memcg_check_events(from, nid);
5806	local_irq_enable();
5807out_unlock:
5808	folio_unlock(folio);
5809out:
5810	return ret;
5811}
5812
5813/**
5814 * get_mctgt_type - get target type of moving charge
5815 * @vma: the vma the pte to be checked belongs
5816 * @addr: the address corresponding to the pte to be checked
5817 * @ptent: the pte to be checked
5818 * @target: the pointer the target page or swap ent will be stored(can be NULL)
5819 *
5820 * Returns
5821 *   0(MC_TARGET_NONE): if the pte is not a target for move charge.
5822 *   1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
5823 *     move charge. if @target is not NULL, the page is stored in target->page
5824 *     with extra refcnt got(Callers should handle it).
5825 *   2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
5826 *     target for charge migration. if @target is not NULL, the entry is stored
5827 *     in target->ent.
5828 *   3(MC_TARGET_DEVICE): like MC_TARGET_PAGE  but page is device memory and
5829 *   thus not on the lru.
5830 *     For now we such page is charge like a regular page would be as for all
5831 *     intent and purposes it is just special memory taking the place of a
5832 *     regular page.
5833 *
5834 *     See Documentations/vm/hmm.txt and include/linux/hmm.h
5835 *
5836 * Called with pte lock held.
5837 */
5838
5839static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
5840		unsigned long addr, pte_t ptent, union mc_target *target)
5841{
5842	struct page *page = NULL;
5843	enum mc_target_type ret = MC_TARGET_NONE;
5844	swp_entry_t ent = { .val = 0 };
5845
5846	if (pte_present(ptent))
5847		page = mc_handle_present_pte(vma, addr, ptent);
5848	else if (pte_none_mostly(ptent))
5849		/*
5850		 * PTE markers should be treated as a none pte here, separated
5851		 * from other swap handling below.
5852		 */
5853		page = mc_handle_file_pte(vma, addr, ptent);
5854	else if (is_swap_pte(ptent))
5855		page = mc_handle_swap_pte(vma, ptent, &ent);
 
 
5856
5857	if (!page && !ent.val)
5858		return ret;
5859	if (page) {
5860		/*
5861		 * Do only loose check w/o serialization.
5862		 * mem_cgroup_move_account() checks the page is valid or
5863		 * not under LRU exclusion.
5864		 */
5865		if (page_memcg(page) == mc.from) {
5866			ret = MC_TARGET_PAGE;
5867			if (is_device_private_page(page) ||
5868			    is_device_coherent_page(page))
5869				ret = MC_TARGET_DEVICE;
5870			if (target)
5871				target->page = page;
5872		}
5873		if (!ret || !target)
5874			put_page(page);
5875	}
5876	/*
5877	 * There is a swap entry and a page doesn't exist or isn't charged.
5878	 * But we cannot move a tail-page in a THP.
5879	 */
5880	if (ent.val && !ret && (!page || !PageTransCompound(page)) &&
5881	    mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) {
5882		ret = MC_TARGET_SWAP;
5883		if (target)
5884			target->ent = ent;
5885	}
5886	return ret;
5887}
5888
5889#ifdef CONFIG_TRANSPARENT_HUGEPAGE
5890/*
5891 * We don't consider PMD mapped swapping or file mapped pages because THP does
5892 * not support them for now.
5893 * Caller should make sure that pmd_trans_huge(pmd) is true.
5894 */
5895static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
5896		unsigned long addr, pmd_t pmd, union mc_target *target)
5897{
5898	struct page *page = NULL;
5899	enum mc_target_type ret = MC_TARGET_NONE;
5900
5901	if (unlikely(is_swap_pmd(pmd))) {
5902		VM_BUG_ON(thp_migration_supported() &&
5903				  !is_pmd_migration_entry(pmd));
5904		return ret;
5905	}
5906	page = pmd_page(pmd);
5907	VM_BUG_ON_PAGE(!page || !PageHead(page), page);
5908	if (!(mc.flags & MOVE_ANON))
5909		return ret;
5910	if (page_memcg(page) == mc.from) {
5911		ret = MC_TARGET_PAGE;
5912		if (target) {
5913			get_page(page);
5914			target->page = page;
5915		}
5916	}
5917	return ret;
5918}
5919#else
5920static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
5921		unsigned long addr, pmd_t pmd, union mc_target *target)
5922{
5923	return MC_TARGET_NONE;
5924}
5925#endif
5926
5927static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
5928					unsigned long addr, unsigned long end,
5929					struct mm_walk *walk)
5930{
5931	struct vm_area_struct *vma = walk->vma;
5932	pte_t *pte;
5933	spinlock_t *ptl;
5934
5935	ptl = pmd_trans_huge_lock(pmd, vma);
5936	if (ptl) {
5937		/*
5938		 * Note their can not be MC_TARGET_DEVICE for now as we do not
5939		 * support transparent huge page with MEMORY_DEVICE_PRIVATE but
5940		 * this might change.
5941		 */
5942		if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
5943			mc.precharge += HPAGE_PMD_NR;
5944		spin_unlock(ptl);
5945		return 0;
5946	}
5947
5948	if (pmd_trans_unstable(pmd))
5949		return 0;
5950	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
5951	for (; addr != end; pte++, addr += PAGE_SIZE)
5952		if (get_mctgt_type(vma, addr, *pte, NULL))
5953			mc.precharge++;	/* increment precharge temporarily */
5954	pte_unmap_unlock(pte - 1, ptl);
5955	cond_resched();
5956
5957	return 0;
5958}
5959
5960static const struct mm_walk_ops precharge_walk_ops = {
5961	.pmd_entry	= mem_cgroup_count_precharge_pte_range,
5962};
5963
5964static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
5965{
5966	unsigned long precharge;
5967
5968	mmap_read_lock(mm);
5969	walk_page_range(mm, 0, ULONG_MAX, &precharge_walk_ops, NULL);
5970	mmap_read_unlock(mm);
 
 
 
 
5971
5972	precharge = mc.precharge;
5973	mc.precharge = 0;
5974
5975	return precharge;
5976}
5977
5978static int mem_cgroup_precharge_mc(struct mm_struct *mm)
5979{
5980	unsigned long precharge = mem_cgroup_count_precharge(mm);
5981
5982	VM_BUG_ON(mc.moving_task);
5983	mc.moving_task = current;
5984	return mem_cgroup_do_precharge(precharge);
5985}
5986
5987/* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
5988static void __mem_cgroup_clear_mc(void)
5989{
5990	struct mem_cgroup *from = mc.from;
5991	struct mem_cgroup *to = mc.to;
5992
5993	/* we must uncharge all the leftover precharges from mc.to */
5994	if (mc.precharge) {
5995		cancel_charge(mc.to, mc.precharge);
5996		mc.precharge = 0;
5997	}
5998	/*
5999	 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
6000	 * we must uncharge here.
6001	 */
6002	if (mc.moved_charge) {
6003		cancel_charge(mc.from, mc.moved_charge);
6004		mc.moved_charge = 0;
6005	}
6006	/* we must fixup refcnts and charges */
6007	if (mc.moved_swap) {
6008		/* uncharge swap account from the old cgroup */
6009		if (!mem_cgroup_is_root(mc.from))
6010			page_counter_uncharge(&mc.from->memsw, mc.moved_swap);
6011
6012		mem_cgroup_id_put_many(mc.from, mc.moved_swap);
6013
6014		/*
6015		 * we charged both to->memory and to->memsw, so we
6016		 * should uncharge to->memory.
6017		 */
6018		if (!mem_cgroup_is_root(mc.to))
6019			page_counter_uncharge(&mc.to->memory, mc.moved_swap);
6020
 
 
 
6021		mc.moved_swap = 0;
6022	}
6023	memcg_oom_recover(from);
6024	memcg_oom_recover(to);
6025	wake_up_all(&mc.waitq);
6026}
6027
6028static void mem_cgroup_clear_mc(void)
6029{
6030	struct mm_struct *mm = mc.mm;
6031
6032	/*
6033	 * we must clear moving_task before waking up waiters at the end of
6034	 * task migration.
6035	 */
6036	mc.moving_task = NULL;
6037	__mem_cgroup_clear_mc();
6038	spin_lock(&mc.lock);
6039	mc.from = NULL;
6040	mc.to = NULL;
6041	mc.mm = NULL;
6042	spin_unlock(&mc.lock);
6043
6044	mmput(mm);
6045}
6046
6047static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
6048{
6049	struct cgroup_subsys_state *css;
6050	struct mem_cgroup *memcg = NULL; /* unneeded init to make gcc happy */
6051	struct mem_cgroup *from;
6052	struct task_struct *leader, *p;
6053	struct mm_struct *mm;
6054	unsigned long move_flags;
6055	int ret = 0;
6056
6057	/* charge immigration isn't supported on the default hierarchy */
6058	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
6059		return 0;
6060
6061	/*
6062	 * Multi-process migrations only happen on the default hierarchy
6063	 * where charge immigration is not used.  Perform charge
6064	 * immigration if @tset contains a leader and whine if there are
6065	 * multiple.
6066	 */
6067	p = NULL;
6068	cgroup_taskset_for_each_leader(leader, css, tset) {
6069		WARN_ON_ONCE(p);
6070		p = leader;
6071		memcg = mem_cgroup_from_css(css);
6072	}
6073	if (!p)
6074		return 0;
6075
6076	/*
6077	 * We are now committed to this value whatever it is. Changes in this
6078	 * tunable will only affect upcoming migrations, not the current one.
6079	 * So we need to save it, and keep it going.
6080	 */
6081	move_flags = READ_ONCE(memcg->move_charge_at_immigrate);
6082	if (!move_flags)
6083		return 0;
6084
6085	from = mem_cgroup_from_task(p);
6086
6087	VM_BUG_ON(from == memcg);
6088
6089	mm = get_task_mm(p);
6090	if (!mm)
6091		return 0;
6092	/* We move charges only when we move a owner of the mm */
6093	if (mm->owner == p) {
6094		VM_BUG_ON(mc.from);
6095		VM_BUG_ON(mc.to);
6096		VM_BUG_ON(mc.precharge);
6097		VM_BUG_ON(mc.moved_charge);
6098		VM_BUG_ON(mc.moved_swap);
6099
6100		spin_lock(&mc.lock);
6101		mc.mm = mm;
6102		mc.from = from;
6103		mc.to = memcg;
6104		mc.flags = move_flags;
6105		spin_unlock(&mc.lock);
6106		/* We set mc.moving_task later */
6107
6108		ret = mem_cgroup_precharge_mc(mm);
6109		if (ret)
6110			mem_cgroup_clear_mc();
6111	} else {
6112		mmput(mm);
6113	}
6114	return ret;
6115}
6116
6117static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
6118{
6119	if (mc.to)
6120		mem_cgroup_clear_mc();
6121}
6122
6123static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
6124				unsigned long addr, unsigned long end,
6125				struct mm_walk *walk)
6126{
6127	int ret = 0;
6128	struct vm_area_struct *vma = walk->vma;
6129	pte_t *pte;
6130	spinlock_t *ptl;
6131	enum mc_target_type target_type;
6132	union mc_target target;
6133	struct page *page;
6134
6135	ptl = pmd_trans_huge_lock(pmd, vma);
6136	if (ptl) {
6137		if (mc.precharge < HPAGE_PMD_NR) {
6138			spin_unlock(ptl);
6139			return 0;
6140		}
6141		target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
6142		if (target_type == MC_TARGET_PAGE) {
6143			page = target.page;
6144			if (!isolate_lru_page(page)) {
6145				if (!mem_cgroup_move_account(page, true,
6146							     mc.from, mc.to)) {
6147					mc.precharge -= HPAGE_PMD_NR;
6148					mc.moved_charge += HPAGE_PMD_NR;
6149				}
6150				putback_lru_page(page);
6151			}
6152			put_page(page);
6153		} else if (target_type == MC_TARGET_DEVICE) {
6154			page = target.page;
6155			if (!mem_cgroup_move_account(page, true,
6156						     mc.from, mc.to)) {
6157				mc.precharge -= HPAGE_PMD_NR;
6158				mc.moved_charge += HPAGE_PMD_NR;
6159			}
6160			put_page(page);
6161		}
6162		spin_unlock(ptl);
6163		return 0;
6164	}
6165
6166	if (pmd_trans_unstable(pmd))
6167		return 0;
6168retry:
6169	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
6170	for (; addr != end; addr += PAGE_SIZE) {
6171		pte_t ptent = *(pte++);
6172		bool device = false;
6173		swp_entry_t ent;
6174
6175		if (!mc.precharge)
6176			break;
6177
6178		switch (get_mctgt_type(vma, addr, ptent, &target)) {
6179		case MC_TARGET_DEVICE:
6180			device = true;
6181			fallthrough;
6182		case MC_TARGET_PAGE:
6183			page = target.page;
6184			/*
6185			 * We can have a part of the split pmd here. Moving it
6186			 * can be done but it would be too convoluted so simply
6187			 * ignore such a partial THP and keep it in original
6188			 * memcg. There should be somebody mapping the head.
6189			 */
6190			if (PageTransCompound(page))
6191				goto put;
6192			if (!device && isolate_lru_page(page))
6193				goto put;
6194			if (!mem_cgroup_move_account(page, false,
6195						mc.from, mc.to)) {
6196				mc.precharge--;
6197				/* we uncharge from mc.from later. */
6198				mc.moved_charge++;
6199			}
6200			if (!device)
6201				putback_lru_page(page);
6202put:			/* get_mctgt_type() gets the page */
6203			put_page(page);
6204			break;
6205		case MC_TARGET_SWAP:
6206			ent = target.ent;
6207			if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
6208				mc.precharge--;
6209				mem_cgroup_id_get_many(mc.to, 1);
6210				/* we fixup other refcnts and charges later. */
6211				mc.moved_swap++;
6212			}
6213			break;
6214		default:
6215			break;
6216		}
6217	}
6218	pte_unmap_unlock(pte - 1, ptl);
6219	cond_resched();
6220
6221	if (addr != end) {
6222		/*
6223		 * We have consumed all precharges we got in can_attach().
6224		 * We try charge one by one, but don't do any additional
6225		 * charges to mc.to if we have failed in charge once in attach()
6226		 * phase.
6227		 */
6228		ret = mem_cgroup_do_precharge(1);
6229		if (!ret)
6230			goto retry;
6231	}
6232
6233	return ret;
6234}
6235
6236static const struct mm_walk_ops charge_walk_ops = {
6237	.pmd_entry	= mem_cgroup_move_charge_pte_range,
6238};
6239
6240static void mem_cgroup_move_charge(void)
6241{
 
 
 
 
 
6242	lru_add_drain_all();
6243	/*
6244	 * Signal lock_page_memcg() to take the memcg's move_lock
6245	 * while we're moving its pages to another memcg. Then wait
6246	 * for already started RCU-only updates to finish.
6247	 */
6248	atomic_inc(&mc.from->moving_account);
6249	synchronize_rcu();
6250retry:
6251	if (unlikely(!mmap_read_trylock(mc.mm))) {
6252		/*
6253		 * Someone who are holding the mmap_lock might be waiting in
6254		 * waitq. So we cancel all extra charges, wake up all waiters,
6255		 * and retry. Because we cancel precharges, we might not be able
6256		 * to move enough charges, but moving charge is a best-effort
6257		 * feature anyway, so it wouldn't be a big problem.
6258		 */
6259		__mem_cgroup_clear_mc();
6260		cond_resched();
6261		goto retry;
6262	}
6263	/*
6264	 * When we have consumed all precharges and failed in doing
6265	 * additional charge, the page walk just aborts.
6266	 */
6267	walk_page_range(mc.mm, 0, ULONG_MAX, &charge_walk_ops, NULL);
6268	mmap_read_unlock(mc.mm);
6269	atomic_dec(&mc.from->moving_account);
6270}
6271
6272static void mem_cgroup_move_task(void)
6273{
6274	if (mc.to) {
6275		mem_cgroup_move_charge();
6276		mem_cgroup_clear_mc();
6277	}
6278}
6279#else	/* !CONFIG_MMU */
6280static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
6281{
6282	return 0;
6283}
6284static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
6285{
6286}
6287static void mem_cgroup_move_task(void)
6288{
6289}
6290#endif
6291
6292#ifdef CONFIG_LRU_GEN
6293static void mem_cgroup_attach(struct cgroup_taskset *tset)
 
 
 
 
6294{
6295	struct task_struct *task;
6296	struct cgroup_subsys_state *css;
6297
6298	/* find the first leader if there is any */
6299	cgroup_taskset_for_each_leader(task, css, tset)
6300		break;
6301
6302	if (!task)
6303		return;
6304
6305	task_lock(task);
6306	if (task->mm && READ_ONCE(task->mm->owner) == task)
6307		lru_gen_migrate_mm(task->mm);
6308	task_unlock(task);
6309}
6310#else
6311static void mem_cgroup_attach(struct cgroup_taskset *tset)
6312{
6313}
6314#endif /* CONFIG_LRU_GEN */
6315
6316static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value)
6317{
6318	if (value == PAGE_COUNTER_MAX)
6319		seq_puts(m, "max\n");
6320	else
6321		seq_printf(m, "%llu\n", (u64)value * PAGE_SIZE);
6322
6323	return 0;
6324}
6325
6326static u64 memory_current_read(struct cgroup_subsys_state *css,
6327			       struct cftype *cft)
6328{
6329	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6330
6331	return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE;
6332}
6333
6334static u64 memory_peak_read(struct cgroup_subsys_state *css,
6335			    struct cftype *cft)
6336{
6337	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
 
6338
6339	return (u64)memcg->memory.watermark * PAGE_SIZE;
6340}
 
 
6341
6342static int memory_min_show(struct seq_file *m, void *v)
6343{
6344	return seq_puts_memcg_tunable(m,
6345		READ_ONCE(mem_cgroup_from_seq(m)->memory.min));
6346}
6347
6348static ssize_t memory_min_write(struct kernfs_open_file *of,
6349				char *buf, size_t nbytes, loff_t off)
6350{
6351	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6352	unsigned long min;
6353	int err;
6354
6355	buf = strstrip(buf);
6356	err = page_counter_memparse(buf, "max", &min);
6357	if (err)
6358		return err;
6359
6360	page_counter_set_min(&memcg->memory, min);
6361
6362	return nbytes;
6363}
6364
6365static int memory_low_show(struct seq_file *m, void *v)
6366{
6367	return seq_puts_memcg_tunable(m,
6368		READ_ONCE(mem_cgroup_from_seq(m)->memory.low));
6369}
6370
6371static ssize_t memory_low_write(struct kernfs_open_file *of,
6372				char *buf, size_t nbytes, loff_t off)
6373{
6374	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6375	unsigned long low;
6376	int err;
6377
6378	buf = strstrip(buf);
6379	err = page_counter_memparse(buf, "max", &low);
6380	if (err)
6381		return err;
6382
6383	page_counter_set_low(&memcg->memory, low);
6384
6385	return nbytes;
6386}
6387
6388static int memory_high_show(struct seq_file *m, void *v)
6389{
6390	return seq_puts_memcg_tunable(m,
6391		READ_ONCE(mem_cgroup_from_seq(m)->memory.high));
 
 
 
 
 
 
 
6392}
6393
6394static ssize_t memory_high_write(struct kernfs_open_file *of,
6395				 char *buf, size_t nbytes, loff_t off)
6396{
6397	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6398	unsigned int nr_retries = MAX_RECLAIM_RETRIES;
6399	bool drained = false;
6400	unsigned long high;
6401	int err;
6402
6403	buf = strstrip(buf);
6404	err = page_counter_memparse(buf, "max", &high);
6405	if (err)
6406		return err;
6407
6408	page_counter_set_high(&memcg->memory, high);
6409
6410	for (;;) {
6411		unsigned long nr_pages = page_counter_read(&memcg->memory);
6412		unsigned long reclaimed;
6413
6414		if (nr_pages <= high)
6415			break;
6416
6417		if (signal_pending(current))
6418			break;
6419
6420		if (!drained) {
6421			drain_all_stock(memcg);
6422			drained = true;
6423			continue;
6424		}
6425
6426		reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
6427					GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP);
6428
6429		if (!reclaimed && !nr_retries--)
6430			break;
6431	}
6432
6433	memcg_wb_domain_size_changed(memcg);
6434	return nbytes;
6435}
6436
6437static int memory_max_show(struct seq_file *m, void *v)
6438{
6439	return seq_puts_memcg_tunable(m,
6440		READ_ONCE(mem_cgroup_from_seq(m)->memory.max));
 
 
 
 
 
 
 
6441}
6442
6443static ssize_t memory_max_write(struct kernfs_open_file *of,
6444				char *buf, size_t nbytes, loff_t off)
6445{
6446	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6447	unsigned int nr_reclaims = MAX_RECLAIM_RETRIES;
6448	bool drained = false;
6449	unsigned long max;
6450	int err;
6451
6452	buf = strstrip(buf);
6453	err = page_counter_memparse(buf, "max", &max);
6454	if (err)
6455		return err;
6456
6457	xchg(&memcg->memory.max, max);
6458
6459	for (;;) {
6460		unsigned long nr_pages = page_counter_read(&memcg->memory);
6461
6462		if (nr_pages <= max)
6463			break;
6464
6465		if (signal_pending(current))
 
6466			break;
 
6467
6468		if (!drained) {
6469			drain_all_stock(memcg);
6470			drained = true;
6471			continue;
6472		}
6473
6474		if (nr_reclaims) {
6475			if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max,
6476					GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP))
6477				nr_reclaims--;
6478			continue;
6479		}
6480
6481		memcg_memory_event(memcg, MEMCG_OOM);
6482		if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
6483			break;
6484	}
6485
6486	memcg_wb_domain_size_changed(memcg);
6487	return nbytes;
6488}
6489
6490static void __memory_events_show(struct seq_file *m, atomic_long_t *events)
6491{
6492	seq_printf(m, "low %lu\n", atomic_long_read(&events[MEMCG_LOW]));
6493	seq_printf(m, "high %lu\n", atomic_long_read(&events[MEMCG_HIGH]));
6494	seq_printf(m, "max %lu\n", atomic_long_read(&events[MEMCG_MAX]));
6495	seq_printf(m, "oom %lu\n", atomic_long_read(&events[MEMCG_OOM]));
6496	seq_printf(m, "oom_kill %lu\n",
6497		   atomic_long_read(&events[MEMCG_OOM_KILL]));
6498	seq_printf(m, "oom_group_kill %lu\n",
6499		   atomic_long_read(&events[MEMCG_OOM_GROUP_KILL]));
6500}
6501
6502static int memory_events_show(struct seq_file *m, void *v)
6503{
6504	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6505
6506	__memory_events_show(m, memcg->memory_events);
6507	return 0;
6508}
6509
6510static int memory_events_local_show(struct seq_file *m, void *v)
6511{
6512	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6513
6514	__memory_events_show(m, memcg->memory_events_local);
6515	return 0;
6516}
6517
6518static int memory_stat_show(struct seq_file *m, void *v)
6519{
6520	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6521	char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
6522
6523	if (!buf)
6524		return -ENOMEM;
6525	memory_stat_format(memcg, buf, PAGE_SIZE);
6526	seq_puts(m, buf);
6527	kfree(buf);
6528	return 0;
6529}
6530
6531#ifdef CONFIG_NUMA
6532static inline unsigned long lruvec_page_state_output(struct lruvec *lruvec,
6533						     int item)
6534{
6535	return lruvec_page_state(lruvec, item) * memcg_page_state_unit(item);
6536}
6537
6538static int memory_numa_stat_show(struct seq_file *m, void *v)
6539{
6540	int i;
6541	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6542
6543	mem_cgroup_flush_stats();
 
 
 
 
 
 
 
 
 
6544
6545	for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
6546		int nid;
6547
6548		if (memory_stats[i].idx >= NR_VM_NODE_STAT_ITEMS)
6549			continue;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6550
6551		seq_printf(m, "%s", memory_stats[i].name);
6552		for_each_node_state(nid, N_MEMORY) {
6553			u64 size;
6554			struct lruvec *lruvec;
6555
6556			lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
6557			size = lruvec_page_state_output(lruvec,
6558							memory_stats[i].idx);
6559			seq_printf(m, " N%d=%llu", nid, size);
6560		}
6561		seq_putc(m, '\n');
6562	}
6563
6564	return 0;
6565}
6566#endif
6567
6568static int memory_oom_group_show(struct seq_file *m, void *v)
6569{
6570	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6571
6572	seq_printf(m, "%d\n", memcg->oom_group);
6573
6574	return 0;
6575}
6576
6577static ssize_t memory_oom_group_write(struct kernfs_open_file *of,
6578				      char *buf, size_t nbytes, loff_t off)
6579{
6580	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6581	int ret, oom_group;
6582
6583	buf = strstrip(buf);
6584	if (!buf)
6585		return -EINVAL;
6586
6587	ret = kstrtoint(buf, 0, &oom_group);
6588	if (ret)
6589		return ret;
6590
6591	if (oom_group != 0 && oom_group != 1)
6592		return -EINVAL;
6593
6594	memcg->oom_group = oom_group;
6595
6596	return nbytes;
6597}
6598
6599static ssize_t memory_reclaim(struct kernfs_open_file *of, char *buf,
6600			      size_t nbytes, loff_t off)
6601{
6602	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6603	unsigned int nr_retries = MAX_RECLAIM_RETRIES;
6604	unsigned long nr_to_reclaim, nr_reclaimed = 0;
6605	unsigned int reclaim_options;
6606	int err;
6607
6608	buf = strstrip(buf);
6609	err = page_counter_memparse(buf, "", &nr_to_reclaim);
6610	if (err)
6611		return err;
6612
6613	reclaim_options	= MEMCG_RECLAIM_MAY_SWAP | MEMCG_RECLAIM_PROACTIVE;
6614	while (nr_reclaimed < nr_to_reclaim) {
6615		unsigned long reclaimed;
6616
6617		if (signal_pending(current))
6618			return -EINTR;
6619
6620		/*
6621		 * This is the final attempt, drain percpu lru caches in the
6622		 * hope of introducing more evictable pages for
6623		 * try_to_free_mem_cgroup_pages().
6624		 */
6625		if (!nr_retries)
6626			lru_add_drain_all();
6627
6628		reclaimed = try_to_free_mem_cgroup_pages(memcg,
6629						nr_to_reclaim - nr_reclaimed,
6630						GFP_KERNEL, reclaim_options);
6631
6632		if (!reclaimed && !nr_retries--)
6633			return -EAGAIN;
6634
6635		nr_reclaimed += reclaimed;
6636	}
6637
6638	return nbytes;
6639}
6640
6641static struct cftype memory_files[] = {
6642	{
6643		.name = "current",
6644		.flags = CFTYPE_NOT_ON_ROOT,
6645		.read_u64 = memory_current_read,
6646	},
6647	{
6648		.name = "peak",
6649		.flags = CFTYPE_NOT_ON_ROOT,
6650		.read_u64 = memory_peak_read,
6651	},
6652	{
6653		.name = "min",
6654		.flags = CFTYPE_NOT_ON_ROOT,
6655		.seq_show = memory_min_show,
6656		.write = memory_min_write,
6657	},
6658	{
6659		.name = "low",
6660		.flags = CFTYPE_NOT_ON_ROOT,
6661		.seq_show = memory_low_show,
6662		.write = memory_low_write,
6663	},
6664	{
6665		.name = "high",
6666		.flags = CFTYPE_NOT_ON_ROOT,
6667		.seq_show = memory_high_show,
6668		.write = memory_high_write,
6669	},
6670	{
6671		.name = "max",
6672		.flags = CFTYPE_NOT_ON_ROOT,
6673		.seq_show = memory_max_show,
6674		.write = memory_max_write,
6675	},
6676	{
6677		.name = "events",
6678		.flags = CFTYPE_NOT_ON_ROOT,
6679		.file_offset = offsetof(struct mem_cgroup, events_file),
6680		.seq_show = memory_events_show,
6681	},
6682	{
6683		.name = "events.local",
6684		.flags = CFTYPE_NOT_ON_ROOT,
6685		.file_offset = offsetof(struct mem_cgroup, events_local_file),
6686		.seq_show = memory_events_local_show,
6687	},
6688	{
6689		.name = "stat",
6690		.seq_show = memory_stat_show,
6691	},
6692#ifdef CONFIG_NUMA
6693	{
6694		.name = "numa_stat",
6695		.seq_show = memory_numa_stat_show,
6696	},
6697#endif
6698	{
6699		.name = "oom.group",
6700		.flags = CFTYPE_NOT_ON_ROOT | CFTYPE_NS_DELEGATABLE,
6701		.seq_show = memory_oom_group_show,
6702		.write = memory_oom_group_write,
6703	},
6704	{
6705		.name = "reclaim",
6706		.flags = CFTYPE_NS_DELEGATABLE,
6707		.write = memory_reclaim,
6708	},
6709	{ }	/* terminate */
6710};
6711
6712struct cgroup_subsys memory_cgrp_subsys = {
6713	.css_alloc = mem_cgroup_css_alloc,
6714	.css_online = mem_cgroup_css_online,
6715	.css_offline = mem_cgroup_css_offline,
6716	.css_released = mem_cgroup_css_released,
6717	.css_free = mem_cgroup_css_free,
6718	.css_reset = mem_cgroup_css_reset,
6719	.css_rstat_flush = mem_cgroup_css_rstat_flush,
6720	.can_attach = mem_cgroup_can_attach,
6721	.attach = mem_cgroup_attach,
6722	.cancel_attach = mem_cgroup_cancel_attach,
6723	.post_attach = mem_cgroup_move_task,
 
6724	.dfl_cftypes = memory_files,
6725	.legacy_cftypes = mem_cgroup_legacy_files,
6726	.early_init = 0,
6727};
6728
6729/*
6730 * This function calculates an individual cgroup's effective
6731 * protection which is derived from its own memory.min/low, its
6732 * parent's and siblings' settings, as well as the actual memory
6733 * distribution in the tree.
6734 *
6735 * The following rules apply to the effective protection values:
6736 *
6737 * 1. At the first level of reclaim, effective protection is equal to
6738 *    the declared protection in memory.min and memory.low.
6739 *
6740 * 2. To enable safe delegation of the protection configuration, at
6741 *    subsequent levels the effective protection is capped to the
6742 *    parent's effective protection.
6743 *
6744 * 3. To make complex and dynamic subtrees easier to configure, the
6745 *    user is allowed to overcommit the declared protection at a given
6746 *    level. If that is the case, the parent's effective protection is
6747 *    distributed to the children in proportion to how much protection
6748 *    they have declared and how much of it they are utilizing.
6749 *
6750 *    This makes distribution proportional, but also work-conserving:
6751 *    if one cgroup claims much more protection than it uses memory,
6752 *    the unused remainder is available to its siblings.
6753 *
6754 * 4. Conversely, when the declared protection is undercommitted at a
6755 *    given level, the distribution of the larger parental protection
6756 *    budget is NOT proportional. A cgroup's protection from a sibling
6757 *    is capped to its own memory.min/low setting.
6758 *
6759 * 5. However, to allow protecting recursive subtrees from each other
6760 *    without having to declare each individual cgroup's fixed share
6761 *    of the ancestor's claim to protection, any unutilized -
6762 *    "floating" - protection from up the tree is distributed in
6763 *    proportion to each cgroup's *usage*. This makes the protection
6764 *    neutral wrt sibling cgroups and lets them compete freely over
6765 *    the shared parental protection budget, but it protects the
6766 *    subtree as a whole from neighboring subtrees.
6767 *
6768 * Note that 4. and 5. are not in conflict: 4. is about protecting
6769 * against immediate siblings whereas 5. is about protecting against
6770 * neighboring subtrees.
6771 */
6772static unsigned long effective_protection(unsigned long usage,
6773					  unsigned long parent_usage,
6774					  unsigned long setting,
6775					  unsigned long parent_effective,
6776					  unsigned long siblings_protected)
6777{
6778	unsigned long protected;
6779	unsigned long ep;
6780
6781	protected = min(usage, setting);
6782	/*
6783	 * If all cgroups at this level combined claim and use more
6784	 * protection then what the parent affords them, distribute
6785	 * shares in proportion to utilization.
6786	 *
6787	 * We are using actual utilization rather than the statically
6788	 * claimed protection in order to be work-conserving: claimed
6789	 * but unused protection is available to siblings that would
6790	 * otherwise get a smaller chunk than what they claimed.
6791	 */
6792	if (siblings_protected > parent_effective)
6793		return protected * parent_effective / siblings_protected;
6794
6795	/*
6796	 * Ok, utilized protection of all children is within what the
6797	 * parent affords them, so we know whatever this child claims
6798	 * and utilizes is effectively protected.
6799	 *
6800	 * If there is unprotected usage beyond this value, reclaim
6801	 * will apply pressure in proportion to that amount.
6802	 *
6803	 * If there is unutilized protection, the cgroup will be fully
6804	 * shielded from reclaim, but we do return a smaller value for
6805	 * protection than what the group could enjoy in theory. This
6806	 * is okay. With the overcommit distribution above, effective
6807	 * protection is always dependent on how memory is actually
6808	 * consumed among the siblings anyway.
6809	 */
6810	ep = protected;
6811
6812	/*
6813	 * If the children aren't claiming (all of) the protection
6814	 * afforded to them by the parent, distribute the remainder in
6815	 * proportion to the (unprotected) memory of each cgroup. That
6816	 * way, cgroups that aren't explicitly prioritized wrt each
6817	 * other compete freely over the allowance, but they are
6818	 * collectively protected from neighboring trees.
6819	 *
6820	 * We're using unprotected memory for the weight so that if
6821	 * some cgroups DO claim explicit protection, we don't protect
6822	 * the same bytes twice.
6823	 *
6824	 * Check both usage and parent_usage against the respective
6825	 * protected values. One should imply the other, but they
6826	 * aren't read atomically - make sure the division is sane.
6827	 */
6828	if (!(cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT))
6829		return ep;
6830	if (parent_effective > siblings_protected &&
6831	    parent_usage > siblings_protected &&
6832	    usage > protected) {
6833		unsigned long unclaimed;
6834
6835		unclaimed = parent_effective - siblings_protected;
6836		unclaimed *= usage - protected;
6837		unclaimed /= parent_usage - siblings_protected;
6838
6839		ep += unclaimed;
6840	}
6841
6842	return ep;
6843}
6844
6845/**
6846 * mem_cgroup_calculate_protection - check if memory consumption is in the normal range
6847 * @root: the top ancestor of the sub-tree being checked
6848 * @memcg: the memory cgroup to check
6849 *
6850 * WARNING: This function is not stateless! It can only be used as part
6851 *          of a top-down tree iteration, not for isolated queries.
6852 */
6853void mem_cgroup_calculate_protection(struct mem_cgroup *root,
6854				     struct mem_cgroup *memcg)
6855{
6856	unsigned long usage, parent_usage;
6857	struct mem_cgroup *parent;
6858
6859	if (mem_cgroup_disabled())
6860		return;
6861
6862	if (!root)
6863		root = root_mem_cgroup;
6864
6865	/*
6866	 * Effective values of the reclaim targets are ignored so they
6867	 * can be stale. Have a look at mem_cgroup_protection for more
6868	 * details.
6869	 * TODO: calculation should be more robust so that we do not need
6870	 * that special casing.
6871	 */
6872	if (memcg == root)
6873		return;
6874
6875	usage = page_counter_read(&memcg->memory);
6876	if (!usage)
6877		return;
6878
6879	parent = parent_mem_cgroup(memcg);
 
6880
6881	if (parent == root) {
6882		memcg->memory.emin = READ_ONCE(memcg->memory.min);
6883		memcg->memory.elow = READ_ONCE(memcg->memory.low);
6884		return;
6885	}
6886
6887	parent_usage = page_counter_read(&parent->memory);
 
6888
6889	WRITE_ONCE(memcg->memory.emin, effective_protection(usage, parent_usage,
6890			READ_ONCE(memcg->memory.min),
6891			READ_ONCE(parent->memory.emin),
6892			atomic_long_read(&parent->memory.children_min_usage)));
6893
6894	WRITE_ONCE(memcg->memory.elow, effective_protection(usage, parent_usage,
6895			READ_ONCE(memcg->memory.low),
6896			READ_ONCE(parent->memory.elow),
6897			atomic_long_read(&parent->memory.children_low_usage)));
6898}
6899
6900static int charge_memcg(struct folio *folio, struct mem_cgroup *memcg,
6901			gfp_t gfp)
6902{
6903	long nr_pages = folio_nr_pages(folio);
6904	int ret;
6905
6906	ret = try_charge(memcg, gfp, nr_pages);
6907	if (ret)
6908		goto out;
6909
6910	css_get(&memcg->css);
6911	commit_charge(folio, memcg);
6912
6913	local_irq_disable();
6914	mem_cgroup_charge_statistics(memcg, nr_pages);
6915	memcg_check_events(memcg, folio_nid(folio));
6916	local_irq_enable();
6917out:
6918	return ret;
6919}
6920
6921int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp)
6922{
6923	struct mem_cgroup *memcg;
6924	int ret;
6925
6926	memcg = get_mem_cgroup_from_mm(mm);
6927	ret = charge_memcg(folio, memcg, gfp);
6928	css_put(&memcg->css);
6929
6930	return ret;
6931}
6932
6933/**
6934 * mem_cgroup_swapin_charge_folio - Charge a newly allocated folio for swapin.
6935 * @folio: folio to charge.
6936 * @mm: mm context of the victim
6937 * @gfp: reclaim mode
6938 * @entry: swap entry for which the folio is allocated
 
 
 
6939 *
6940 * This function charges a folio allocated for swapin. Please call this before
6941 * adding the folio to the swapcache.
6942 *
6943 * Returns 0 on success. Otherwise, an error code is returned.
 
 
6944 */
6945int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm,
6946				  gfp_t gfp, swp_entry_t entry)
 
6947{
6948	struct mem_cgroup *memcg;
6949	unsigned short id;
6950	int ret;
6951
6952	if (mem_cgroup_disabled())
6953		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6954
6955	id = lookup_swap_cgroup_id(entry);
6956	rcu_read_lock();
6957	memcg = mem_cgroup_from_id(id);
6958	if (!memcg || !css_tryget_online(&memcg->css))
6959		memcg = get_mem_cgroup_from_mm(mm);
6960	rcu_read_unlock();
6961
6962	ret = charge_memcg(folio, memcg, gfp);
6963
6964	css_put(&memcg->css);
 
 
6965	return ret;
6966}
6967
6968/*
6969 * mem_cgroup_swapin_uncharge_swap - uncharge swap slot
6970 * @entry: swap entry for which the page is charged
 
 
 
 
 
 
 
6971 *
6972 * Call this function after successfully adding the charged page to swapcache.
 
6973 *
6974 * Note: This function assumes the page for which swap slot is being uncharged
6975 * is order 0 page.
6976 */
6977void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry)
 
6978{
 
 
 
 
 
 
 
6979	/*
6980	 * Cgroup1's unified memory+swap counter has been charged with the
6981	 * new swapcache page, finish the transfer by uncharging the swap
6982	 * slot. The swap slot would also get uncharged when it dies, but
6983	 * it can stick around indefinitely and we'd count the page twice
6984	 * the entire time.
6985	 *
6986	 * Cgroup2 has separate resource counters for memory and swap,
6987	 * so this is a non-issue here. Memory and swap charge lifetimes
6988	 * correspond 1:1 to page and swap slot lifetimes: we charge the
6989	 * page to memory here, and uncharge swap when the slot is freed.
6990	 */
6991	if (!mem_cgroup_disabled() && do_memsw_account()) {
 
 
 
 
 
 
 
 
 
 
 
6992		/*
6993		 * The swap entry might not get freed for a long time,
6994		 * let's not wait for it.  The page already received a
6995		 * memory+swap charge, drop the swap entry duplicate.
6996		 */
6997		mem_cgroup_uncharge_swap(entry, 1);
6998	}
6999}
7000
7001struct uncharge_gather {
7002	struct mem_cgroup *memcg;
7003	unsigned long nr_memory;
7004	unsigned long pgpgout;
7005	unsigned long nr_kmem;
7006	int nid;
7007};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7008
7009static inline void uncharge_gather_clear(struct uncharge_gather *ug)
7010{
7011	memset(ug, 0, sizeof(*ug));
7012}
7013
7014static void uncharge_batch(const struct uncharge_gather *ug)
 
 
7015{
 
7016	unsigned long flags;
7017
7018	if (ug->nr_memory) {
7019		page_counter_uncharge(&ug->memcg->memory, ug->nr_memory);
7020		if (do_memsw_account())
7021			page_counter_uncharge(&ug->memcg->memsw, ug->nr_memory);
7022		if (ug->nr_kmem)
7023			memcg_account_kmem(ug->memcg, -ug->nr_kmem);
7024		memcg_oom_recover(ug->memcg);
7025	}
7026
7027	local_irq_save(flags);
7028	__count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout);
7029	__this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_memory);
7030	memcg_check_events(ug->memcg, ug->nid);
 
 
 
7031	local_irq_restore(flags);
7032
7033	/* drop reference from uncharge_folio */
7034	css_put(&ug->memcg->css);
7035}
7036
7037static void uncharge_folio(struct folio *folio, struct uncharge_gather *ug)
7038{
7039	long nr_pages;
7040	struct mem_cgroup *memcg;
7041	struct obj_cgroup *objcg;
7042
7043	VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
 
 
7044
7045	/*
7046	 * Nobody should be changing or seriously looking at
7047	 * folio memcg or objcg at this point, we have fully
7048	 * exclusive access to the folio.
7049	 */
7050	if (folio_memcg_kmem(folio)) {
7051		objcg = __folio_objcg(folio);
 
 
 
 
 
 
 
 
 
 
 
7052		/*
7053		 * This get matches the put at the end of the function and
7054		 * kmem pages do not hold memcg references anymore.
 
7055		 */
7056		memcg = get_mem_cgroup_from_objcg(objcg);
7057	} else {
7058		memcg = __folio_memcg(folio);
7059	}
7060
7061	if (!memcg)
7062		return;
 
 
 
 
 
 
7063
7064	if (ug->memcg != memcg) {
7065		if (ug->memcg) {
7066			uncharge_batch(ug);
7067			uncharge_gather_clear(ug);
7068		}
7069		ug->memcg = memcg;
7070		ug->nid = folio_nid(folio);
7071
7072		/* pairs with css_put in uncharge_batch */
7073		css_get(&memcg->css);
7074	}
 
7075
7076	nr_pages = folio_nr_pages(folio);
7077
7078	if (folio_memcg_kmem(folio)) {
7079		ug->nr_memory += nr_pages;
7080		ug->nr_kmem += nr_pages;
7081
7082		folio->memcg_data = 0;
7083		obj_cgroup_put(objcg);
7084	} else {
7085		/* LRU pages aren't accounted at the root level */
7086		if (!mem_cgroup_is_root(memcg))
7087			ug->nr_memory += nr_pages;
7088		ug->pgpgout++;
7089
7090		folio->memcg_data = 0;
7091	}
7092
7093	css_put(&memcg->css);
7094}
7095
7096void __mem_cgroup_uncharge(struct folio *folio)
 
 
 
 
 
 
 
7097{
7098	struct uncharge_gather ug;
 
7099
7100	/* Don't touch folio->lru of any random page, pre-check: */
7101	if (!folio_memcg(folio))
7102		return;
7103
7104	uncharge_gather_clear(&ug);
7105	uncharge_folio(folio, &ug);
7106	uncharge_batch(&ug);
7107}
7108
7109/**
7110 * __mem_cgroup_uncharge_list - uncharge a list of page
7111 * @page_list: list of pages to uncharge
7112 *
7113 * Uncharge a list of pages previously charged with
7114 * __mem_cgroup_charge().
7115 */
7116void __mem_cgroup_uncharge_list(struct list_head *page_list)
7117{
7118	struct uncharge_gather ug;
7119	struct folio *folio;
7120
7121	uncharge_gather_clear(&ug);
7122	list_for_each_entry(folio, page_list, lru)
7123		uncharge_folio(folio, &ug);
7124	if (ug.memcg)
7125		uncharge_batch(&ug);
7126}
7127
7128/**
7129 * mem_cgroup_migrate - Charge a folio's replacement.
7130 * @old: Currently circulating folio.
7131 * @new: Replacement folio.
7132 *
7133 * Charge @new as a replacement folio for @old. @old will
7134 * be uncharged upon free.
7135 *
7136 * Both folios must be locked, @new->mapping must be set up.
7137 */
7138void mem_cgroup_migrate(struct folio *old, struct folio *new)
7139{
7140	struct mem_cgroup *memcg;
7141	long nr_pages = folio_nr_pages(new);
7142	unsigned long flags;
7143
7144	VM_BUG_ON_FOLIO(!folio_test_locked(old), old);
7145	VM_BUG_ON_FOLIO(!folio_test_locked(new), new);
7146	VM_BUG_ON_FOLIO(folio_test_anon(old) != folio_test_anon(new), new);
7147	VM_BUG_ON_FOLIO(folio_nr_pages(old) != nr_pages, new);
 
7148
7149	if (mem_cgroup_disabled())
7150		return;
7151
7152	/* Page cache replacement: new folio already charged? */
7153	if (folio_memcg(new))
7154		return;
7155
7156	memcg = folio_memcg(old);
7157	VM_WARN_ON_ONCE_FOLIO(!memcg, old);
7158	if (!memcg)
7159		return;
7160
7161	/* Force-charge the new page. The old one will be freed soon */
7162	if (!mem_cgroup_is_root(memcg)) {
7163		page_counter_charge(&memcg->memory, nr_pages);
7164		if (do_memsw_account())
7165			page_counter_charge(&memcg->memsw, nr_pages);
7166	}
 
 
7167
7168	css_get(&memcg->css);
7169	commit_charge(new, memcg);
7170
7171	local_irq_save(flags);
7172	mem_cgroup_charge_statistics(memcg, nr_pages);
7173	memcg_check_events(memcg, folio_nid(new));
7174	local_irq_restore(flags);
7175}
7176
7177DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
7178EXPORT_SYMBOL(memcg_sockets_enabled_key);
7179
7180void mem_cgroup_sk_alloc(struct sock *sk)
7181{
7182	struct mem_cgroup *memcg;
7183
7184	if (!mem_cgroup_sockets_enabled)
7185		return;
7186
7187	/* Do not associate the sock with unrelated interrupted task's memcg. */
7188	if (!in_task())
 
 
 
 
 
 
7189		return;
 
7190
7191	rcu_read_lock();
7192	memcg = mem_cgroup_from_task(current);
7193	if (mem_cgroup_is_root(memcg))
7194		goto out;
7195	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active)
7196		goto out;
7197	if (css_tryget(&memcg->css))
7198		sk->sk_memcg = memcg;
7199out:
7200	rcu_read_unlock();
7201}
 
7202
7203void mem_cgroup_sk_free(struct sock *sk)
7204{
7205	if (sk->sk_memcg)
7206		css_put(&sk->sk_memcg->css);
7207}
7208
7209/**
7210 * mem_cgroup_charge_skmem - charge socket memory
7211 * @memcg: memcg to charge
7212 * @nr_pages: number of pages to charge
7213 * @gfp_mask: reclaim mode
7214 *
7215 * Charges @nr_pages to @memcg. Returns %true if the charge fit within
7216 * @memcg's configured limit, %false if it doesn't.
7217 */
7218bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages,
7219			     gfp_t gfp_mask)
7220{
 
 
7221	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
7222		struct page_counter *fail;
7223
7224		if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) {
7225			memcg->tcpmem_pressure = 0;
7226			return true;
7227		}
 
7228		memcg->tcpmem_pressure = 1;
7229		if (gfp_mask & __GFP_NOFAIL) {
7230			page_counter_charge(&memcg->tcpmem, nr_pages);
7231			return true;
7232		}
7233		return false;
7234	}
7235
7236	if (try_charge(memcg, gfp_mask, nr_pages) == 0) {
7237		mod_memcg_state(memcg, MEMCG_SOCK, nr_pages);
 
 
 
 
 
7238		return true;
7239	}
7240
 
7241	return false;
7242}
7243
7244/**
7245 * mem_cgroup_uncharge_skmem - uncharge socket memory
7246 * @memcg: memcg to uncharge
7247 * @nr_pages: number of pages to uncharge
7248 */
7249void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
7250{
7251	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
7252		page_counter_uncharge(&memcg->tcpmem, nr_pages);
7253		return;
7254	}
7255
7256	mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages);
7257
7258	refill_stock(memcg, nr_pages);
 
7259}
7260
7261static int __init cgroup_memory(char *s)
7262{
7263	char *token;
7264
7265	while ((token = strsep(&s, ",")) != NULL) {
7266		if (!*token)
7267			continue;
7268		if (!strcmp(token, "nosocket"))
7269			cgroup_memory_nosocket = true;
7270		if (!strcmp(token, "nokmem"))
7271			cgroup_memory_nokmem = true;
7272	}
7273	return 1;
7274}
7275__setup("cgroup.memory=", cgroup_memory);
7276
7277/*
7278 * subsys_initcall() for memory controller.
7279 *
7280 * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this
7281 * context because of lock dependencies (cgroup_lock -> cpu hotplug) but
7282 * basically everything that doesn't depend on a specific mem_cgroup structure
7283 * should be initialized from here.
7284 */
7285static int __init mem_cgroup_init(void)
7286{
7287	int cpu, node;
7288
7289	/*
7290	 * Currently s32 type (can refer to struct batched_lruvec_stat) is
7291	 * used for per-memcg-per-cpu caching of per-node statistics. In order
7292	 * to work fine, we should make sure that the overfill threshold can't
7293	 * exceed S32_MAX / PAGE_SIZE.
7294	 */
7295	BUILD_BUG_ON(MEMCG_CHARGE_BATCH > S32_MAX / PAGE_SIZE);
7296
7297	cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
7298				  memcg_hotplug_cpu_dead);
7299
7300	for_each_possible_cpu(cpu)
7301		INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
7302			  drain_local_stock);
7303
7304	for_each_node(node) {
7305		struct mem_cgroup_tree_per_node *rtpn;
 
7306
7307		rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL,
7308				    node_online(node) ? node : NUMA_NO_NODE);
7309
7310		rtpn->rb_root = RB_ROOT;
7311		rtpn->rb_rightmost = NULL;
7312		spin_lock_init(&rtpn->lock);
 
 
 
 
7313		soft_limit_tree.rb_tree_per_node[node] = rtpn;
7314	}
7315
7316	return 0;
7317}
7318subsys_initcall(mem_cgroup_init);
7319
7320#ifdef CONFIG_SWAP
7321static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg)
7322{
7323	while (!refcount_inc_not_zero(&memcg->id.ref)) {
7324		/*
7325		 * The root cgroup cannot be destroyed, so it's refcount must
7326		 * always be >= 1.
7327		 */
7328		if (WARN_ON_ONCE(mem_cgroup_is_root(memcg))) {
7329			VM_BUG_ON(1);
7330			break;
7331		}
7332		memcg = parent_mem_cgroup(memcg);
7333		if (!memcg)
7334			memcg = root_mem_cgroup;
7335	}
7336	return memcg;
7337}
7338
7339/**
7340 * mem_cgroup_swapout - transfer a memsw charge to swap
7341 * @folio: folio whose memsw charge to transfer
7342 * @entry: swap entry to move the charge to
7343 *
7344 * Transfer the memsw charge of @folio to @entry.
7345 */
7346void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry)
7347{
7348	struct mem_cgroup *memcg, *swap_memcg;
7349	unsigned int nr_entries;
7350	unsigned short oldid;
7351
7352	VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
7353	VM_BUG_ON_FOLIO(folio_ref_count(folio), folio);
7354
7355	if (mem_cgroup_disabled())
7356		return;
7357
7358	if (!do_memsw_account())
7359		return;
7360
7361	memcg = folio_memcg(folio);
7362
7363	VM_WARN_ON_ONCE_FOLIO(!memcg, folio);
7364	if (!memcg)
7365		return;
7366
7367	/*
7368	 * In case the memcg owning these pages has been offlined and doesn't
7369	 * have an ID allocated to it anymore, charge the closest online
7370	 * ancestor for the swap instead and transfer the memory+swap charge.
7371	 */
7372	swap_memcg = mem_cgroup_id_get_online(memcg);
7373	nr_entries = folio_nr_pages(folio);
7374	/* Get references for the tail pages, too */
7375	if (nr_entries > 1)
7376		mem_cgroup_id_get_many(swap_memcg, nr_entries - 1);
7377	oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg),
7378				   nr_entries);
7379	VM_BUG_ON_FOLIO(oldid, folio);
7380	mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries);
7381
7382	folio->memcg_data = 0;
7383
7384	if (!mem_cgroup_is_root(memcg))
7385		page_counter_uncharge(&memcg->memory, nr_entries);
7386
7387	if (memcg != swap_memcg) {
7388		if (!mem_cgroup_is_root(swap_memcg))
7389			page_counter_charge(&swap_memcg->memsw, nr_entries);
7390		page_counter_uncharge(&memcg->memsw, nr_entries);
7391	}
7392
7393	/*
7394	 * Interrupts should be disabled here because the caller holds the
7395	 * i_pages lock which is taken with interrupts-off. It is
7396	 * important here to have the interrupts disabled because it is the
7397	 * only synchronisation we have for updating the per-CPU variables.
7398	 */
7399	memcg_stats_lock();
7400	mem_cgroup_charge_statistics(memcg, -nr_entries);
7401	memcg_stats_unlock();
7402	memcg_check_events(memcg, folio_nid(folio));
7403
7404	css_put(&memcg->css);
7405}
7406
7407/**
7408 * __mem_cgroup_try_charge_swap - try charging swap space for a folio
7409 * @folio: folio being added to swap
7410 * @entry: swap entry to charge
7411 *
7412 * Try to charge @folio's memcg for the swap space at @entry.
7413 *
7414 * Returns 0 on success, -ENOMEM on failure.
7415 */
7416int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry)
7417{
7418	unsigned int nr_pages = folio_nr_pages(folio);
7419	struct page_counter *counter;
7420	struct mem_cgroup *memcg;
7421	unsigned short oldid;
7422
7423	if (do_memsw_account())
7424		return 0;
7425
7426	memcg = folio_memcg(folio);
7427
7428	VM_WARN_ON_ONCE_FOLIO(!memcg, folio);
7429	if (!memcg)
7430		return 0;
7431
7432	if (!entry.val) {
7433		memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
7434		return 0;
7435	}
7436
7437	memcg = mem_cgroup_id_get_online(memcg);
7438
7439	if (!mem_cgroup_is_root(memcg) &&
7440	    !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) {
7441		memcg_memory_event(memcg, MEMCG_SWAP_MAX);
7442		memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
7443		mem_cgroup_id_put(memcg);
7444		return -ENOMEM;
7445	}
7446
7447	/* Get references for the tail pages, too */
7448	if (nr_pages > 1)
7449		mem_cgroup_id_get_many(memcg, nr_pages - 1);
7450	oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg), nr_pages);
7451	VM_BUG_ON_FOLIO(oldid, folio);
7452	mod_memcg_state(memcg, MEMCG_SWAP, nr_pages);
7453
 
7454	return 0;
7455}
7456
7457/**
7458 * __mem_cgroup_uncharge_swap - uncharge swap space
7459 * @entry: swap entry to uncharge
7460 * @nr_pages: the amount of swap space to uncharge
 
7461 */
7462void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
7463{
7464	struct mem_cgroup *memcg;
7465	unsigned short id;
7466
7467	if (mem_cgroup_disabled())
7468		return;
7469
7470	id = swap_cgroup_record(entry, 0, nr_pages);
7471	rcu_read_lock();
7472	memcg = mem_cgroup_from_id(id);
7473	if (memcg) {
7474		if (!mem_cgroup_is_root(memcg)) {
7475			if (do_memsw_account())
7476				page_counter_uncharge(&memcg->memsw, nr_pages);
7477			else
7478				page_counter_uncharge(&memcg->swap, nr_pages);
7479		}
7480		mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages);
7481		mem_cgroup_id_put_many(memcg, nr_pages);
7482	}
7483	rcu_read_unlock();
7484}
7485
7486long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
7487{
7488	long nr_swap_pages = get_nr_swap_pages();
7489
7490	if (mem_cgroup_disabled() || do_memsw_account())
7491		return nr_swap_pages;
7492	for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg))
7493		nr_swap_pages = min_t(long, nr_swap_pages,
7494				      READ_ONCE(memcg->swap.max) -
7495				      page_counter_read(&memcg->swap));
7496	return nr_swap_pages;
7497}
7498
7499bool mem_cgroup_swap_full(struct folio *folio)
7500{
7501	struct mem_cgroup *memcg;
7502
7503	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
7504
7505	if (vm_swap_full())
7506		return true;
7507	if (do_memsw_account())
7508		return false;
7509
7510	memcg = folio_memcg(folio);
7511	if (!memcg)
7512		return false;
7513
7514	for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) {
7515		unsigned long usage = page_counter_read(&memcg->swap);
7516
7517		if (usage * 2 >= READ_ONCE(memcg->swap.high) ||
7518		    usage * 2 >= READ_ONCE(memcg->swap.max))
7519			return true;
7520	}
7521
7522	return false;
7523}
7524
7525static int __init setup_swap_account(char *s)
 
 
 
 
 
 
 
7526{
7527	pr_warn_once("The swapaccount= commandline option is deprecated. "
7528		     "Please report your usecase to linux-mm@kvack.org if you "
7529		     "depend on this functionality.\n");
 
7530	return 1;
7531}
7532__setup("swapaccount=", setup_swap_account);
7533
7534static u64 swap_current_read(struct cgroup_subsys_state *css,
7535			     struct cftype *cft)
7536{
7537	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
7538
7539	return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE;
7540}
7541
7542static int swap_high_show(struct seq_file *m, void *v)
7543{
7544	return seq_puts_memcg_tunable(m,
7545		READ_ONCE(mem_cgroup_from_seq(m)->swap.high));
7546}
7547
7548static ssize_t swap_high_write(struct kernfs_open_file *of,
7549			       char *buf, size_t nbytes, loff_t off)
7550{
7551	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
7552	unsigned long high;
7553	int err;
7554
7555	buf = strstrip(buf);
7556	err = page_counter_memparse(buf, "max", &high);
7557	if (err)
7558		return err;
7559
7560	page_counter_set_high(&memcg->swap, high);
7561
7562	return nbytes;
7563}
7564
7565static int swap_max_show(struct seq_file *m, void *v)
7566{
7567	return seq_puts_memcg_tunable(m,
7568		READ_ONCE(mem_cgroup_from_seq(m)->swap.max));
7569}
7570
7571static ssize_t swap_max_write(struct kernfs_open_file *of,
7572			      char *buf, size_t nbytes, loff_t off)
7573{
7574	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
7575	unsigned long max;
7576	int err;
7577
7578	buf = strstrip(buf);
7579	err = page_counter_memparse(buf, "max", &max);
7580	if (err)
7581		return err;
7582
7583	xchg(&memcg->swap.max, max);
 
 
 
 
7584
7585	return nbytes;
7586}
7587
7588static int swap_events_show(struct seq_file *m, void *v)
7589{
7590	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
7591
7592	seq_printf(m, "high %lu\n",
7593		   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_HIGH]));
7594	seq_printf(m, "max %lu\n",
7595		   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX]));
7596	seq_printf(m, "fail %lu\n",
7597		   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL]));
7598
7599	return 0;
7600}
7601
7602static struct cftype swap_files[] = {
7603	{
7604		.name = "swap.current",
7605		.flags = CFTYPE_NOT_ON_ROOT,
7606		.read_u64 = swap_current_read,
7607	},
7608	{
7609		.name = "swap.high",
7610		.flags = CFTYPE_NOT_ON_ROOT,
7611		.seq_show = swap_high_show,
7612		.write = swap_high_write,
7613	},
7614	{
7615		.name = "swap.max",
7616		.flags = CFTYPE_NOT_ON_ROOT,
7617		.seq_show = swap_max_show,
7618		.write = swap_max_write,
7619	},
7620	{
7621		.name = "swap.events",
7622		.flags = CFTYPE_NOT_ON_ROOT,
7623		.file_offset = offsetof(struct mem_cgroup, swap_events_file),
7624		.seq_show = swap_events_show,
7625	},
7626	{ }	/* terminate */
7627};
7628
7629static struct cftype memsw_files[] = {
7630	{
7631		.name = "memsw.usage_in_bytes",
7632		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
7633		.read_u64 = mem_cgroup_read_u64,
7634	},
7635	{
7636		.name = "memsw.max_usage_in_bytes",
7637		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
7638		.write = mem_cgroup_reset,
7639		.read_u64 = mem_cgroup_read_u64,
7640	},
7641	{
7642		.name = "memsw.limit_in_bytes",
7643		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
7644		.write = mem_cgroup_write,
7645		.read_u64 = mem_cgroup_read_u64,
7646	},
7647	{
7648		.name = "memsw.failcnt",
7649		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
7650		.write = mem_cgroup_reset,
7651		.read_u64 = mem_cgroup_read_u64,
7652	},
7653	{ },	/* terminate */
7654};
7655
7656#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
7657/**
7658 * obj_cgroup_may_zswap - check if this cgroup can zswap
7659 * @objcg: the object cgroup
7660 *
7661 * Check if the hierarchical zswap limit has been reached.
7662 *
7663 * This doesn't check for specific headroom, and it is not atomic
7664 * either. But with zswap, the size of the allocation is only known
7665 * once compression has occured, and this optimistic pre-check avoids
7666 * spending cycles on compression when there is already no room left
7667 * or zswap is disabled altogether somewhere in the hierarchy.
7668 */
7669bool obj_cgroup_may_zswap(struct obj_cgroup *objcg)
7670{
7671	struct mem_cgroup *memcg, *original_memcg;
7672	bool ret = true;
7673
7674	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
7675		return true;
7676
7677	original_memcg = get_mem_cgroup_from_objcg(objcg);
7678	for (memcg = original_memcg; !mem_cgroup_is_root(memcg);
7679	     memcg = parent_mem_cgroup(memcg)) {
7680		unsigned long max = READ_ONCE(memcg->zswap_max);
7681		unsigned long pages;
7682
7683		if (max == PAGE_COUNTER_MAX)
7684			continue;
7685		if (max == 0) {
7686			ret = false;
7687			break;
7688		}
7689
7690		cgroup_rstat_flush(memcg->css.cgroup);
7691		pages = memcg_page_state(memcg, MEMCG_ZSWAP_B) / PAGE_SIZE;
7692		if (pages < max)
7693			continue;
7694		ret = false;
7695		break;
7696	}
7697	mem_cgroup_put(original_memcg);
7698	return ret;
7699}
7700
7701/**
7702 * obj_cgroup_charge_zswap - charge compression backend memory
7703 * @objcg: the object cgroup
7704 * @size: size of compressed object
7705 *
7706 * This forces the charge after obj_cgroup_may_swap() allowed
7707 * compression and storage in zwap for this cgroup to go ahead.
7708 */
7709void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size)
7710{
7711	struct mem_cgroup *memcg;
7712
7713	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
7714		return;
7715
7716	VM_WARN_ON_ONCE(!(current->flags & PF_MEMALLOC));
7717
7718	/* PF_MEMALLOC context, charging must succeed */
7719	if (obj_cgroup_charge(objcg, GFP_KERNEL, size))
7720		VM_WARN_ON_ONCE(1);
7721
7722	rcu_read_lock();
7723	memcg = obj_cgroup_memcg(objcg);
7724	mod_memcg_state(memcg, MEMCG_ZSWAP_B, size);
7725	mod_memcg_state(memcg, MEMCG_ZSWAPPED, 1);
7726	rcu_read_unlock();
7727}
7728
7729/**
7730 * obj_cgroup_uncharge_zswap - uncharge compression backend memory
7731 * @objcg: the object cgroup
7732 * @size: size of compressed object
7733 *
7734 * Uncharges zswap memory on page in.
7735 */
7736void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size)
7737{
7738	struct mem_cgroup *memcg;
7739
7740	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
7741		return;
7742
7743	obj_cgroup_uncharge(objcg, size);
7744
7745	rcu_read_lock();
7746	memcg = obj_cgroup_memcg(objcg);
7747	mod_memcg_state(memcg, MEMCG_ZSWAP_B, -size);
7748	mod_memcg_state(memcg, MEMCG_ZSWAPPED, -1);
7749	rcu_read_unlock();
7750}
7751
7752static u64 zswap_current_read(struct cgroup_subsys_state *css,
7753			      struct cftype *cft)
7754{
7755	cgroup_rstat_flush(css->cgroup);
7756	return memcg_page_state(mem_cgroup_from_css(css), MEMCG_ZSWAP_B);
7757}
7758
7759static int zswap_max_show(struct seq_file *m, void *v)
7760{
7761	return seq_puts_memcg_tunable(m,
7762		READ_ONCE(mem_cgroup_from_seq(m)->zswap_max));
7763}
7764
7765static ssize_t zswap_max_write(struct kernfs_open_file *of,
7766			       char *buf, size_t nbytes, loff_t off)
7767{
7768	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
7769	unsigned long max;
7770	int err;
7771
7772	buf = strstrip(buf);
7773	err = page_counter_memparse(buf, "max", &max);
7774	if (err)
7775		return err;
7776
7777	xchg(&memcg->zswap_max, max);
7778
7779	return nbytes;
7780}
7781
7782static struct cftype zswap_files[] = {
7783	{
7784		.name = "zswap.current",
7785		.flags = CFTYPE_NOT_ON_ROOT,
7786		.read_u64 = zswap_current_read,
7787	},
7788	{
7789		.name = "zswap.max",
7790		.flags = CFTYPE_NOT_ON_ROOT,
7791		.seq_show = zswap_max_show,
7792		.write = zswap_max_write,
7793	},
7794	{ }	/* terminate */
7795};
7796#endif /* CONFIG_MEMCG_KMEM && CONFIG_ZSWAP */
7797
7798static int __init mem_cgroup_swap_init(void)
7799{
7800	if (mem_cgroup_disabled())
7801		return 0;
7802
7803	WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, swap_files));
7804	WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, memsw_files));
7805#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
7806	WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, zswap_files));
7807#endif
7808	return 0;
7809}
7810subsys_initcall(mem_cgroup_swap_init);
7811
7812#endif /* CONFIG_SWAP */