Linux Audio

Check our new training course

Linux debugging, profiling, tracing and performance analysis training

Mar 24-27, 2025, special US time zones
Register
Loading...
v3.5.6
 
   1/* memcontrol.c - Memory Controller
   2 *
   3 * Copyright IBM Corporation, 2007
   4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
   5 *
   6 * Copyright 2007 OpenVZ SWsoft Inc
   7 * Author: Pavel Emelianov <xemul@openvz.org>
   8 *
   9 * Memory thresholds
  10 * Copyright (C) 2009 Nokia Corporation
  11 * Author: Kirill A. Shutemov
  12 *
  13 * This program is free software; you can redistribute it and/or modify
  14 * it under the terms of the GNU General Public License as published by
  15 * the Free Software Foundation; either version 2 of the License, or
  16 * (at your option) any later version.
  17 *
  18 * This program is distributed in the hope that it will be useful,
  19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  21 * GNU General Public License for more details.
 
 
 
  22 */
  23
  24#include <linux/res_counter.h>
 
  25#include <linux/memcontrol.h>
  26#include <linux/cgroup.h>
  27#include <linux/mm.h>
 
  28#include <linux/hugetlb.h>
  29#include <linux/pagemap.h>
 
 
  30#include <linux/smp.h>
  31#include <linux/page-flags.h>
  32#include <linux/backing-dev.h>
  33#include <linux/bit_spinlock.h>
  34#include <linux/rcupdate.h>
  35#include <linux/limits.h>
  36#include <linux/export.h>
 
  37#include <linux/mutex.h>
  38#include <linux/rbtree.h>
  39#include <linux/slab.h>
  40#include <linux/swap.h>
  41#include <linux/swapops.h>
  42#include <linux/spinlock.h>
  43#include <linux/eventfd.h>
  44#include <linux/sort.h>
  45#include <linux/fs.h>
  46#include <linux/seq_file.h>
  47#include <linux/vmalloc.h>
 
 
  48#include <linux/mm_inline.h>
  49#include <linux/page_cgroup.h>
  50#include <linux/cpu.h>
  51#include <linux/oom.h>
 
 
 
 
 
 
  52#include "internal.h"
  53#include <net/sock.h>
  54#include <net/tcp_memcontrol.h>
 
 
 
 
  55
  56#include <asm/uaccess.h>
 
 
  57
  58#include <trace/events/vmscan.h>
  59
  60struct cgroup_subsys mem_cgroup_subsys __read_mostly;
  61#define MEM_CGROUP_RECLAIM_RETRIES	5
  62static struct mem_cgroup *root_mem_cgroup __read_mostly;
  63
  64#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
  65/* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */
  66int do_swap_account __read_mostly;
  67
  68/* for remember boot option*/
  69#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP_ENABLED
  70static int really_do_swap_account __initdata = 1;
  71#else
  72static int really_do_swap_account __initdata = 0;
  73#endif
  74
  75#else
  76#define do_swap_account		0
  77#endif
  78
 
 
 
  79
  80/*
  81 * Statistics for memory cgroup.
  82 */
  83enum mem_cgroup_stat_index {
  84	/*
  85	 * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
  86	 */
  87	MEM_CGROUP_STAT_CACHE, 	   /* # of pages charged as cache */
  88	MEM_CGROUP_STAT_RSS,	   /* # of pages charged as anon rss */
  89	MEM_CGROUP_STAT_FILE_MAPPED,  /* # of pages charged as file rss */
  90	MEM_CGROUP_STAT_SWAPOUT, /* # of pages, swapped out */
  91	MEM_CGROUP_STAT_NSTATS,
  92};
  93
  94static const char * const mem_cgroup_stat_names[] = {
  95	"cache",
  96	"rss",
  97	"mapped_file",
  98	"swap",
  99};
 100
 101enum mem_cgroup_events_index {
 102	MEM_CGROUP_EVENTS_PGPGIN,	/* # of pages paged in */
 103	MEM_CGROUP_EVENTS_PGPGOUT,	/* # of pages paged out */
 104	MEM_CGROUP_EVENTS_PGFAULT,	/* # of page-faults */
 105	MEM_CGROUP_EVENTS_PGMAJFAULT,	/* # of major page-faults */
 106	MEM_CGROUP_EVENTS_NSTATS,
 107};
 108
 109static const char * const mem_cgroup_events_names[] = {
 110	"pgpgin",
 111	"pgpgout",
 112	"pgfault",
 113	"pgmajfault",
 114};
 115
 116/*
 117 * Per memcg event counter is incremented at every pagein/pageout. With THP,
 118 * it will be incremated by the number of pages. This counter is used for
 119 * for trigger some periodic events. This is straightforward and better
 120 * than using jiffies etc. to handle periodic memcg event.
 121 */
 122enum mem_cgroup_events_target {
 123	MEM_CGROUP_TARGET_THRESH,
 124	MEM_CGROUP_TARGET_SOFTLIMIT,
 125	MEM_CGROUP_TARGET_NUMAINFO,
 126	MEM_CGROUP_NTARGETS,
 127};
 128#define THRESHOLDS_EVENTS_TARGET 128
 129#define SOFTLIMIT_EVENTS_TARGET 1024
 130#define NUMAINFO_EVENTS_TARGET	1024
 131
 132struct mem_cgroup_stat_cpu {
 133	long count[MEM_CGROUP_STAT_NSTATS];
 134	unsigned long events[MEM_CGROUP_EVENTS_NSTATS];
 135	unsigned long nr_page_events;
 136	unsigned long targets[MEM_CGROUP_NTARGETS];
 137};
 138
 139struct mem_cgroup_reclaim_iter {
 140	/* css_id of the last scanned hierarchy member */
 141	int position;
 142	/* scan generation, increased every round-trip */
 143	unsigned int generation;
 144};
 
 145
 146/*
 147 * per-zone information in memory controller.
 148 */
 149struct mem_cgroup_per_zone {
 150	struct lruvec		lruvec;
 151	unsigned long		lru_size[NR_LRU_LISTS];
 152
 153	struct mem_cgroup_reclaim_iter reclaim_iter[DEF_PRIORITY + 1];
 154
 155	struct rb_node		tree_node;	/* RB tree node */
 156	unsigned long long	usage_in_excess;/* Set to the value by which */
 157						/* the soft limit is exceeded*/
 158	bool			on_tree;
 159	struct mem_cgroup	*memcg;		/* Back pointer, we cannot */
 160						/* use container_of	   */
 161};
 162
 163struct mem_cgroup_per_node {
 164	struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
 165};
 166
 167struct mem_cgroup_lru_info {
 168	struct mem_cgroup_per_node *nodeinfo[MAX_NUMNODES];
 169};
 170
 171/*
 172 * Cgroups above their limits are maintained in a RB-Tree, independent of
 173 * their hierarchy representation
 174 */
 175
 176struct mem_cgroup_tree_per_zone {
 177	struct rb_root rb_root;
 178	spinlock_t lock;
 179};
 180
 181struct mem_cgroup_tree_per_node {
 182	struct mem_cgroup_tree_per_zone rb_tree_per_zone[MAX_NR_ZONES];
 183};
 
 
 
 184
 185struct mem_cgroup_tree {
 186	struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
 187};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 188
 189static struct mem_cgroup_tree soft_limit_tree __read_mostly;
 
 190
 191struct mem_cgroup_threshold {
 192	struct eventfd_ctx *eventfd;
 193	u64 threshold;
 194};
 195
 196/* For threshold */
 197struct mem_cgroup_threshold_ary {
 198	/* An array index points to threshold just below or equal to usage. */
 199	int current_threshold;
 200	/* Size of entries[] */
 201	unsigned int size;
 202	/* Array of thresholds */
 203	struct mem_cgroup_threshold entries[0];
 204};
 205
 206struct mem_cgroup_thresholds {
 207	/* Primary thresholds array */
 208	struct mem_cgroup_threshold_ary *primary;
 209	/*
 210	 * Spare threshold array.
 211	 * This is needed to make mem_cgroup_unregister_event() "never fail".
 212	 * It must be able to store at least primary->size - 1 entries.
 213	 */
 214	struct mem_cgroup_threshold_ary *spare;
 215};
 216
 217/* for OOM */
 218struct mem_cgroup_eventfd_list {
 219	struct list_head list;
 220	struct eventfd_ctx *eventfd;
 221};
 222
 223static void mem_cgroup_threshold(struct mem_cgroup *memcg);
 224static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
 
 
 
 
 
 
 
 225
 226/*
 227 * The memory controller data structure. The memory controller controls both
 228 * page cache and RSS per cgroup. We would eventually like to provide
 229 * statistics based on the statistics developed by Rik Van Riel for clock-pro,
 230 * to help the administrator determine what knobs to tune.
 231 *
 232 * TODO: Add a water mark for the memory controller. Reclaim will begin when
 233 * we hit the water mark. May be even add a low water mark, such that
 234 * no reclaim occurs from a cgroup at it's low water mark, this is
 235 * a feature that will be implemented much later in the future.
 236 */
 237struct mem_cgroup {
 238	struct cgroup_subsys_state css;
 239	/*
 240	 * the counter to account for memory usage
 241	 */
 242	struct res_counter res;
 243
 244	union {
 245		/*
 246		 * the counter to account for mem+swap usage.
 247		 */
 248		struct res_counter memsw;
 249
 250		/*
 251		 * rcu_freeing is used only when freeing struct mem_cgroup,
 252		 * so put it into a union to avoid wasting more memory.
 253		 * It must be disjoint from the css field.  It could be
 254		 * in a union with the res field, but res plays a much
 255		 * larger part in mem_cgroup life than memsw, and might
 256		 * be of interest, even at time of free, when debugging.
 257		 * So share rcu_head with the less interesting memsw.
 258		 */
 259		struct rcu_head rcu_freeing;
 260		/*
 261		 * We also need some space for a worker in deferred freeing.
 262		 * By the time we call it, rcu_freeing is no longer in use.
 263		 */
 264		struct work_struct work_freeing;
 265	};
 266
 267	/*
 268	 * Per cgroup active and inactive list, similar to the
 269	 * per zone LRU lists.
 270	 */
 271	struct mem_cgroup_lru_info info;
 272	int last_scanned_node;
 273#if MAX_NUMNODES > 1
 274	nodemask_t	scan_nodes;
 275	atomic_t	numainfo_events;
 276	atomic_t	numainfo_updating;
 277#endif
 278	/*
 279	 * Should the accounting and control be hierarchical, per subtree?
 280	 */
 281	bool use_hierarchy;
 282
 283	bool		oom_lock;
 284	atomic_t	under_oom;
 285
 286	atomic_t	refcnt;
 
 287
 288	int	swappiness;
 289	/* OOM-Killer disable */
 290	int		oom_kill_disable;
 
 
 
 
 
 291
 292	/* set when res.limit == memsw.limit */
 293	bool		memsw_is_minimum;
 294
 295	/* protect arrays of thresholds */
 296	struct mutex thresholds_lock;
 
 
 
 
 
 
 
 
 
 
 
 
 297
 298	/* thresholds for memory usage. RCU-protected */
 299	struct mem_cgroup_thresholds thresholds;
 300
 301	/* thresholds for mem+swap usage. RCU-protected */
 302	struct mem_cgroup_thresholds memsw_thresholds;
 303
 304	/* For oom notifier event fd */
 305	struct list_head oom_notify;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 306
 307	/*
 308	 * Should we move charges of a task when a task is moved into this
 309	 * mem_cgroup ? And what type of charges should we move ?
 310	 */
 311	unsigned long 	move_charge_at_immigrate;
 312	/*
 313	 * set > 0 if pages under this cgroup are moving to other cgroup.
 314	 */
 315	atomic_t	moving_account;
 316	/* taken only while moving_account > 0 */
 317	spinlock_t	move_lock;
 318	/*
 319	 * percpu counter.
 320	 */
 321	struct mem_cgroup_stat_cpu __percpu *stat;
 322	/*
 323	 * used when a cpu is offlined or other synchronizations
 324	 * See mem_cgroup_read_stat().
 325	 */
 326	struct mem_cgroup_stat_cpu nocpu_base;
 327	spinlock_t pcp_counter_lock;
 328
 329#ifdef CONFIG_INET
 330	struct tcp_memcontrol tcp_mem;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 331#endif
 332};
 333
 334/* Stuffs for move charges at task migration. */
 335/*
 336 * Types of charges to be moved. "move_charge_at_immitgrate" is treated as a
 337 * left-shifted bitmap of these types.
 338 */
 339enum move_type {
 340	MOVE_CHARGE_TYPE_ANON,	/* private anonymous page and swap of it */
 341	MOVE_CHARGE_TYPE_FILE,	/* file page(including tmpfs) and swap of it */
 342	NR_MOVE_TYPE,
 343};
 344
 345/* "mc" and its members are protected by cgroup_mutex */
 346static struct move_charge_struct {
 347	spinlock_t	  lock; /* for from, to */
 348	struct mem_cgroup *from;
 349	struct mem_cgroup *to;
 350	unsigned long precharge;
 351	unsigned long moved_charge;
 352	unsigned long moved_swap;
 353	struct task_struct *moving_task;	/* a task moving charges */
 354	wait_queue_head_t waitq;		/* a waitq for other context */
 355} mc = {
 356	.lock = __SPIN_LOCK_UNLOCKED(mc.lock),
 357	.waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
 358};
 359
 360static bool move_anon(void)
 361{
 362	return test_bit(MOVE_CHARGE_TYPE_ANON,
 363					&mc.to->move_charge_at_immigrate);
 
 
 
 
 
 
 
 
 
 364}
 365
 366static bool move_file(void)
 367{
 368	return test_bit(MOVE_CHARGE_TYPE_FILE,
 369					&mc.to->move_charge_at_immigrate);
 370}
 371
 372/*
 373 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
 374 * limit reclaim to prevent infinite loops, if they ever occur.
 375 */
 376#define	MEM_CGROUP_MAX_RECLAIM_LOOPS		100
 377#define	MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS	2
 378
 379enum charge_type {
 380	MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
 381	MEM_CGROUP_CHARGE_TYPE_MAPPED,
 382	MEM_CGROUP_CHARGE_TYPE_SHMEM,	/* used by page migration of shmem */
 383	MEM_CGROUP_CHARGE_TYPE_FORCE,	/* used by force_empty */
 384	MEM_CGROUP_CHARGE_TYPE_SWAPOUT,	/* for accounting swapcache */
 385	MEM_CGROUP_CHARGE_TYPE_DROP,	/* a page was unused swap cache */
 386	NR_CHARGE_TYPE,
 387};
 388
 389/* for encoding cft->private value on file */
 390#define _MEM			(0)
 391#define _MEMSWAP		(1)
 392#define _OOM_TYPE		(2)
 393#define MEMFILE_PRIVATE(x, val)	((x) << 16 | (val))
 394#define MEMFILE_TYPE(val)	((val) >> 16 & 0xffff)
 395#define MEMFILE_ATTR(val)	((val) & 0xffff)
 396/* Used for OOM nofiier */
 397#define OOM_CONTROL		(0)
 398
 399/*
 400 * Reclaim flags for mem_cgroup_hierarchical_reclaim
 401 */
 402#define MEM_CGROUP_RECLAIM_NOSWAP_BIT	0x0
 403#define MEM_CGROUP_RECLAIM_NOSWAP	(1 << MEM_CGROUP_RECLAIM_NOSWAP_BIT)
 404#define MEM_CGROUP_RECLAIM_SHRINK_BIT	0x1
 405#define MEM_CGROUP_RECLAIM_SHRINK	(1 << MEM_CGROUP_RECLAIM_SHRINK_BIT)
 406
 407static void mem_cgroup_get(struct mem_cgroup *memcg);
 408static void mem_cgroup_put(struct mem_cgroup *memcg);
 409
 410/* Writing them here to avoid exposing memcg's inner layout */
 411#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
 412#include <net/sock.h>
 413#include <net/ip.h>
 414
 415static bool mem_cgroup_is_root(struct mem_cgroup *memcg);
 416void sock_update_memcg(struct sock *sk)
 417{
 418	if (mem_cgroup_sockets_enabled) {
 419		struct mem_cgroup *memcg;
 420		struct cg_proto *cg_proto;
 421
 422		BUG_ON(!sk->sk_prot->proto_cgroup);
 
 423
 424		/* Socket cloning can throw us here with sk_cgrp already
 425		 * filled. It won't however, necessarily happen from
 426		 * process context. So the test for root memcg given
 427		 * the current task's memcg won't help us in this case.
 428		 *
 429		 * Respecting the original socket's memcg is a better
 430		 * decision in this case.
 431		 */
 432		if (sk->sk_cgrp) {
 433			BUG_ON(mem_cgroup_is_root(sk->sk_cgrp->memcg));
 434			mem_cgroup_get(sk->sk_cgrp->memcg);
 435			return;
 436		}
 437
 438		rcu_read_lock();
 439		memcg = mem_cgroup_from_task(current);
 440		cg_proto = sk->sk_prot->proto_cgroup(memcg);
 441		if (!mem_cgroup_is_root(memcg) && memcg_proto_active(cg_proto)) {
 442			mem_cgroup_get(memcg);
 443			sk->sk_cgrp = cg_proto;
 444		}
 445		rcu_read_unlock();
 446	}
 447}
 448EXPORT_SYMBOL(sock_update_memcg);
 449
 450void sock_release_memcg(struct sock *sk)
 
 451{
 452	if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
 453		struct mem_cgroup *memcg;
 454		WARN_ON(!sk->sk_cgrp->memcg);
 455		memcg = sk->sk_cgrp->memcg;
 456		mem_cgroup_put(memcg);
 457	}
 458}
 459
 460#ifdef CONFIG_INET
 461struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg)
 462{
 463	if (!memcg || mem_cgroup_is_root(memcg))
 464		return NULL;
 465
 466	return &memcg->tcp_mem.cg_proto;
 467}
 468EXPORT_SYMBOL(tcp_proto_cgroup);
 469#endif /* CONFIG_INET */
 470#endif /* CONFIG_CGROUP_MEM_RES_CTLR_KMEM */
 471
 472#if defined(CONFIG_INET) && defined(CONFIG_CGROUP_MEM_RES_CTLR_KMEM)
 473static void disarm_sock_keys(struct mem_cgroup *memcg)
 474{
 475	if (!memcg_proto_activated(&memcg->tcp_mem.cg_proto))
 476		return;
 477	static_key_slow_dec(&memcg_socket_limit_enabled);
 478}
 479#else
 480static void disarm_sock_keys(struct mem_cgroup *memcg)
 481{
 482}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 483#endif
 
 
 
 
 
 
 
 
 
 
 
 
 484
 485static void drain_all_stock_async(struct mem_cgroup *memcg);
 
 486
 487static struct mem_cgroup_per_zone *
 488mem_cgroup_zoneinfo(struct mem_cgroup *memcg, int nid, int zid)
 489{
 490	return &memcg->info.nodeinfo[nid]->zoneinfo[zid];
 
 
 
 
 
 
 
 
 491}
 492
 493struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg)
 494{
 495	return &memcg->css;
 496}
 497
 498static struct mem_cgroup_per_zone *
 499page_cgroup_zoneinfo(struct mem_cgroup *memcg, struct page *page)
 500{
 501	int nid = page_to_nid(page);
 502	int zid = page_zonenum(page);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 503
 504	return mem_cgroup_zoneinfo(memcg, nid, zid);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 505}
 506
 507static struct mem_cgroup_tree_per_zone *
 508soft_limit_tree_node_zone(int nid, int zid)
 509{
 510	return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
 511}
 512
 513static struct mem_cgroup_tree_per_zone *
 514soft_limit_tree_from_page(struct page *page)
 515{
 516	int nid = page_to_nid(page);
 517	int zid = page_zonenum(page);
 518
 519	return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
 
 
 
 
 520}
 521
 522static void
 523__mem_cgroup_insert_exceeded(struct mem_cgroup *memcg,
 524				struct mem_cgroup_per_zone *mz,
 525				struct mem_cgroup_tree_per_zone *mctz,
 526				unsigned long long new_usage_in_excess)
 527{
 528	struct rb_node **p = &mctz->rb_root.rb_node;
 529	struct rb_node *parent = NULL;
 530	struct mem_cgroup_per_zone *mz_node;
 531
 532	if (mz->on_tree)
 533		return;
 534
 535	mz->usage_in_excess = new_usage_in_excess;
 536	if (!mz->usage_in_excess)
 537		return;
 538	while (*p) {
 539		parent = *p;
 540		mz_node = rb_entry(parent, struct mem_cgroup_per_zone,
 541					tree_node);
 542		if (mz->usage_in_excess < mz_node->usage_in_excess)
 543			p = &(*p)->rb_left;
 544		/*
 545		 * We can't avoid mem cgroups that are over their soft
 546		 * limit by the same amount
 547		 */
 548		else if (mz->usage_in_excess >= mz_node->usage_in_excess)
 549			p = &(*p)->rb_right;
 
 
 550	}
 551	rb_link_node(&mz->tree_node, parent, p);
 552	rb_insert_color(&mz->tree_node, &mctz->rb_root);
 553	mz->on_tree = true;
 554}
 555
 556static void
 557__mem_cgroup_remove_exceeded(struct mem_cgroup *memcg,
 558				struct mem_cgroup_per_zone *mz,
 559				struct mem_cgroup_tree_per_zone *mctz)
 560{
 561	if (!mz->on_tree)
 
 
 
 
 
 562		return;
 563	rb_erase(&mz->tree_node, &mctz->rb_root);
 564	mz->on_tree = false;
 565}
 566
 567static void
 568mem_cgroup_remove_exceeded(struct mem_cgroup *memcg,
 569				struct mem_cgroup_per_zone *mz,
 570				struct mem_cgroup_tree_per_zone *mctz)
 571{
 572	spin_lock(&mctz->lock);
 573	__mem_cgroup_remove_exceeded(memcg, mz, mctz);
 574	spin_unlock(&mctz->lock);
 575}
 576
 
 
 577
 578static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
 
 
 
 
 
 
 
 
 
 579{
 580	unsigned long long excess;
 581	struct mem_cgroup_per_zone *mz;
 582	struct mem_cgroup_tree_per_zone *mctz;
 583	int nid = page_to_nid(page);
 584	int zid = page_zonenum(page);
 585	mctz = soft_limit_tree_from_page(page);
 586
 587	/*
 588	 * Necessary to update all ancestors when hierarchy is used.
 589	 * because their event counter is not touched.
 590	 */
 591	for (; memcg; memcg = parent_mem_cgroup(memcg)) {
 592		mz = mem_cgroup_zoneinfo(memcg, nid, zid);
 593		excess = res_counter_soft_limit_excess(&memcg->res);
 594		/*
 595		 * We have to update the tree if mz is on RB-tree or
 596		 * mem is over its softlimit.
 597		 */
 598		if (excess || mz->on_tree) {
 599			spin_lock(&mctz->lock);
 600			/* if on-tree, remove it */
 601			if (mz->on_tree)
 602				__mem_cgroup_remove_exceeded(memcg, mz, mctz);
 603			/*
 604			 * Insert again. mz->usage_in_excess will be updated.
 605			 * If excess is 0, no tree ops.
 606			 */
 607			__mem_cgroup_insert_exceeded(memcg, mz, mctz, excess);
 608			spin_unlock(&mctz->lock);
 609		}
 610	}
 611}
 612
 613static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
 614{
 615	int node, zone;
 616	struct mem_cgroup_per_zone *mz;
 617	struct mem_cgroup_tree_per_zone *mctz;
 618
 619	for_each_node(node) {
 620		for (zone = 0; zone < MAX_NR_ZONES; zone++) {
 621			mz = mem_cgroup_zoneinfo(memcg, node, zone);
 622			mctz = soft_limit_tree_node_zone(node, zone);
 623			mem_cgroup_remove_exceeded(memcg, mz, mctz);
 624		}
 625	}
 626}
 627
 628static struct mem_cgroup_per_zone *
 629__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
 630{
 631	struct rb_node *rightmost = NULL;
 632	struct mem_cgroup_per_zone *mz;
 633
 634retry:
 635	mz = NULL;
 636	rightmost = rb_last(&mctz->rb_root);
 637	if (!rightmost)
 638		goto done;		/* Nothing to reclaim from */
 639
 640	mz = rb_entry(rightmost, struct mem_cgroup_per_zone, tree_node);
 641	/*
 642	 * Remove the node now but someone else can add it back,
 643	 * we will to add it back at the end of reclaim to its correct
 644	 * position in the tree.
 645	 */
 646	__mem_cgroup_remove_exceeded(mz->memcg, mz, mctz);
 647	if (!res_counter_soft_limit_excess(&mz->memcg->res) ||
 648		!css_tryget(&mz->memcg->css))
 649		goto retry;
 650done:
 651	return mz;
 652}
 653
 654static struct mem_cgroup_per_zone *
 655mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
 656{
 657	struct mem_cgroup_per_zone *mz;
 
 658
 659	spin_lock(&mctz->lock);
 660	mz = __mem_cgroup_largest_soft_limit_node(mctz);
 661	spin_unlock(&mctz->lock);
 662	return mz;
 
 
 
 
 
 663}
 664
 
 
 665/*
 666 * Implementation Note: reading percpu statistics for memcg.
 667 *
 668 * Both of vmstat[] and percpu_counter has threshold and do periodic
 669 * synchronization to implement "quick" read. There are trade-off between
 670 * reading cost and precision of value. Then, we may have a chance to implement
 671 * a periodic synchronizion of counter in memcg's counter.
 672 *
 673 * But this _read() function is used for user interface now. The user accounts
 674 * memory usage by memory cgroup and he _always_ requires exact value because
 675 * he accounts memory. Even if we provide quick-and-fuzzy read, we always
 676 * have to visit all online cpus and make sum. So, for now, unnecessary
 677 * synchronization is not implemented. (just implemented for cpu hotplug)
 678 *
 679 * If there are kernel internal actions which can make use of some not-exact
 680 * value, and reading all cpu value can be performance bottleneck in some
 681 * common workload, threashold and synchonization as vmstat[] should be
 682 * implemented.
 683 */
 684static long mem_cgroup_read_stat(struct mem_cgroup *memcg,
 685				 enum mem_cgroup_stat_index idx)
 686{
 687	long val = 0;
 688	int cpu;
 689
 690	get_online_cpus();
 691	for_each_online_cpu(cpu)
 692		val += per_cpu(memcg->stat->count[idx], cpu);
 693#ifdef CONFIG_HOTPLUG_CPU
 694	spin_lock(&memcg->pcp_counter_lock);
 695	val += memcg->nocpu_base.count[idx];
 696	spin_unlock(&memcg->pcp_counter_lock);
 697#endif
 698	put_online_cpus();
 699	return val;
 700}
 701
 702static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg,
 703					 bool charge)
 
 
 
 
 
 
 704{
 705	int val = (charge) ? 1 : -1;
 706	this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_SWAPOUT], val);
 
 
 
 
 
 
 
 
 
 
 707}
 708
 709static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg,
 710					    enum mem_cgroup_events_index idx)
 711{
 712	unsigned long val = 0;
 713	int cpu;
 
 
 
 714
 715	for_each_online_cpu(cpu)
 716		val += per_cpu(memcg->stat->events[idx], cpu);
 717#ifdef CONFIG_HOTPLUG_CPU
 718	spin_lock(&memcg->pcp_counter_lock);
 719	val += memcg->nocpu_base.events[idx];
 720	spin_unlock(&memcg->pcp_counter_lock);
 721#endif
 722	return val;
 723}
 724
 725static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
 726					 bool anon, int nr_pages)
 
 727{
 728	preempt_disable();
 
 
 729
 730	/*
 731	 * Here, RSS means 'mapped anon' and anon's SwapCache. Shmem/tmpfs is
 732	 * counted as CACHE even if it's on ANON LRU.
 733	 */
 734	if (anon)
 735		__this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS],
 736				nr_pages);
 737	else
 738		__this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE],
 739				nr_pages);
 740
 741	/* pagein of a big page is an event. So, ignore page size */
 742	if (nr_pages > 0)
 743		__this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGIN]);
 744	else {
 745		__this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]);
 746		nr_pages = -nr_pages; /* for event */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 747	}
 748
 749	__this_cpu_add(memcg->stat->nr_page_events, nr_pages);
 
 
 
 
 750
 751	preempt_enable();
 
 
 
 752}
 753
 754unsigned long
 755mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
 
 
 
 
 
 
 
 
 
 
 756{
 757	struct mem_cgroup_per_zone *mz;
 
 758
 759	mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
 760	return mz->lru_size[lru];
 
 761}
 762
 763static unsigned long
 764mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, int nid, int zid,
 765			unsigned int lru_mask)
 766{
 767	struct mem_cgroup_per_zone *mz;
 768	enum lru_list lru;
 769	unsigned long ret = 0;
 770
 771	mz = mem_cgroup_zoneinfo(memcg, nid, zid);
 772
 773	for_each_lru(lru) {
 774		if (BIT(lru) & lru_mask)
 775			ret += mz->lru_size[lru];
 
 
 
 
 776	}
 777	return ret;
 
 
 
 778}
 
 779
 780static unsigned long
 781mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
 782			int nid, unsigned int lru_mask)
 783{
 784	u64 total = 0;
 785	int zid;
 
 786
 787	for (zid = 0; zid < MAX_NR_ZONES; zid++)
 788		total += mem_cgroup_zone_nr_lru_pages(memcg,
 789						nid, zid, lru_mask);
 790
 791	return total;
 
 
 
 
 
 
 
 
 
 
 
 
 792}
 793
 794static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
 795			unsigned int lru_mask)
 
 
 
 
 
 
 796{
 797	int nid;
 798	u64 total = 0;
 799
 800	for_each_node_state(nid, N_HIGH_MEMORY)
 801		total += mem_cgroup_node_nr_lru_pages(memcg, nid, lru_mask);
 802	return total;
 803}
 804
 805static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
 806				       enum mem_cgroup_events_target target)
 807{
 808	unsigned long val, next;
 809
 810	val = __this_cpu_read(memcg->stat->nr_page_events);
 811	next = __this_cpu_read(memcg->stat->targets[target]);
 812	/* from time_after() in jiffies.h */
 813	if ((long)next - (long)val < 0) {
 814		switch (target) {
 815		case MEM_CGROUP_TARGET_THRESH:
 816			next = val + THRESHOLDS_EVENTS_TARGET;
 817			break;
 818		case MEM_CGROUP_TARGET_SOFTLIMIT:
 819			next = val + SOFTLIMIT_EVENTS_TARGET;
 820			break;
 821		case MEM_CGROUP_TARGET_NUMAINFO:
 822			next = val + NUMAINFO_EVENTS_TARGET;
 823			break;
 824		default:
 825			break;
 826		}
 827		__this_cpu_write(memcg->stat->targets[target], next);
 828		return true;
 829	}
 830	return false;
 831}
 832
 833/*
 834 * Check events in order.
 835 *
 836 */
 837static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
 838{
 839	preempt_disable();
 840	/* threshold event is triggered in finer grain than soft limit */
 841	if (unlikely(mem_cgroup_event_ratelimit(memcg,
 842						MEM_CGROUP_TARGET_THRESH))) {
 843		bool do_softlimit;
 844		bool do_numainfo __maybe_unused;
 845
 846		do_softlimit = mem_cgroup_event_ratelimit(memcg,
 847						MEM_CGROUP_TARGET_SOFTLIMIT);
 848#if MAX_NUMNODES > 1
 849		do_numainfo = mem_cgroup_event_ratelimit(memcg,
 850						MEM_CGROUP_TARGET_NUMAINFO);
 851#endif
 852		preempt_enable();
 853
 854		mem_cgroup_threshold(memcg);
 855		if (unlikely(do_softlimit))
 856			mem_cgroup_update_tree(memcg, page);
 857#if MAX_NUMNODES > 1
 858		if (unlikely(do_numainfo))
 859			atomic_inc(&memcg->numainfo_events);
 860#endif
 861	} else
 862		preempt_enable();
 863}
 864
 865struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
 866{
 867	return container_of(cgroup_subsys_state(cont,
 868				mem_cgroup_subsys_id), struct mem_cgroup,
 869				css);
 
 
 
 870}
 871
 872struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
 873{
 874	/*
 875	 * mm_update_next_owner() may clear mm->owner to NULL
 876	 * if it races with swapoff, page migration, etc.
 877	 * So this can be called with p == NULL.
 878	 */
 879	if (unlikely(!p))
 880		return NULL;
 881
 882	return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
 883				struct mem_cgroup, css);
 
 
 
 
 
 
 
 
 884}
 885
 886struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
 
 
 
 
 
 
 
 
 
 
 
 887{
 888	struct mem_cgroup *memcg = NULL;
 889
 890	if (!mm)
 891		return NULL;
 
 892	/*
 893	 * Because we have no locks, mm->owner's may be being moved to other
 894	 * cgroup. We use css_tryget() here even if this looks
 895	 * pessimistic (rather than adding locks here).
 896	 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 897	rcu_read_lock();
 898	do {
 899		memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
 900		if (unlikely(!memcg))
 901			break;
 902	} while (!css_tryget(&memcg->css));
 903	rcu_read_unlock();
 904	return memcg;
 905}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 906
 907/**
 908 * mem_cgroup_iter - iterate over memory cgroup hierarchy
 909 * @root: hierarchy root
 910 * @prev: previously returned memcg, NULL on first invocation
 911 * @reclaim: cookie for shared reclaim walks, NULL for full walks
 912 *
 913 * Returns references to children of the hierarchy below @root, or
 914 * @root itself, or %NULL after a full round-trip.
 915 *
 916 * Caller must pass the return value in @prev on subsequent
 917 * invocations for reference counting, or use mem_cgroup_iter_break()
 918 * to cancel a hierarchy walk before the round-trip is complete.
 919 *
 920 * Reclaimers can specify a zone and a priority level in @reclaim to
 921 * divide up the memcgs in the hierarchy among all concurrent
 922 * reclaimers operating on the same zone and priority.
 923 */
 924struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
 925				   struct mem_cgroup *prev,
 926				   struct mem_cgroup_reclaim_cookie *reclaim)
 927{
 928	struct mem_cgroup *memcg = NULL;
 929	int id = 0;
 
 
 930
 931	if (mem_cgroup_disabled())
 932		return NULL;
 933
 934	if (!root)
 935		root = root_mem_cgroup;
 936
 937	if (prev && !reclaim)
 938		id = css_id(&prev->css);
 
 939
 940	if (prev && prev != root)
 941		css_put(&prev->css);
 
 942
 943	if (!root->use_hierarchy && root != root_mem_cgroup) {
 944		if (prev)
 945			return NULL;
 946		return root;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 947	}
 948
 949	while (!memcg) {
 950		struct mem_cgroup_reclaim_iter *uninitialized_var(iter);
 951		struct cgroup_subsys_state *css;
 952
 953		if (reclaim) {
 954			int nid = zone_to_nid(reclaim->zone);
 955			int zid = zone_idx(reclaim->zone);
 956			struct mem_cgroup_per_zone *mz;
 957
 958			mz = mem_cgroup_zoneinfo(root, nid, zid);
 959			iter = &mz->reclaim_iter[reclaim->priority];
 960			if (prev && reclaim->generation != iter->generation)
 961				return NULL;
 962			id = iter->position;
 963		}
 964
 965		rcu_read_lock();
 966		css = css_get_next(&mem_cgroup_subsys, id + 1, &root->css, &id);
 967		if (css) {
 968			if (css == &root->css || css_tryget(css))
 969				memcg = container_of(css,
 970						     struct mem_cgroup, css);
 971		} else
 972			id = 0;
 973		rcu_read_unlock();
 974
 975		if (reclaim) {
 976			iter->position = id;
 977			if (!css)
 978				iter->generation++;
 979			else if (!prev && memcg)
 980				reclaim->generation = iter->generation;
 
 
 981		}
 982
 983		if (prev && !css)
 984			return NULL;
 985	}
 986	return memcg;
 
 
 
 
 
 
 987}
 988
 989/**
 990 * mem_cgroup_iter_break - abort a hierarchy walk prematurely
 991 * @root: hierarchy root
 992 * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
 993 */
 994void mem_cgroup_iter_break(struct mem_cgroup *root,
 995			   struct mem_cgroup *prev)
 996{
 997	if (!root)
 998		root = root_mem_cgroup;
 999	if (prev && prev != root)
1000		css_put(&prev->css);
1001}
1002
1003/*
1004 * Iteration constructs for visiting all cgroups (under a tree).  If
1005 * loops are exited prematurely (break), mem_cgroup_iter_break() must
1006 * be used for reference counting.
1007 */
1008#define for_each_mem_cgroup_tree(iter, root)		\
1009	for (iter = mem_cgroup_iter(root, NULL, NULL);	\
1010	     iter != NULL;				\
1011	     iter = mem_cgroup_iter(root, iter, NULL))
1012
1013#define for_each_mem_cgroup(iter)			\
1014	for (iter = mem_cgroup_iter(NULL, NULL, NULL);	\
1015	     iter != NULL;				\
1016	     iter = mem_cgroup_iter(NULL, iter, NULL))
 
 
1017
1018static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
1019{
1020	return (memcg == root_mem_cgroup);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1021}
1022
1023void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1024{
1025	struct mem_cgroup *memcg;
 
 
1026
1027	if (!mm)
1028		return;
1029
1030	rcu_read_lock();
1031	memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1032	if (unlikely(!memcg))
1033		goto out;
1034
1035	switch (idx) {
1036	case PGFAULT:
1037		this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGFAULT]);
1038		break;
1039	case PGMAJFAULT:
1040		this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGMAJFAULT]);
1041		break;
1042	default:
1043		BUG();
 
 
 
1044	}
1045out:
1046	rcu_read_unlock();
1047}
1048EXPORT_SYMBOL(mem_cgroup_count_vm_event);
1049
1050/**
1051 * mem_cgroup_zone_lruvec - get the lru list vector for a zone and memcg
1052 * @zone: zone of the wanted lruvec
1053 * @memcg: memcg of the wanted lruvec
1054 *
1055 * Returns the lru list vector holding pages for the given @zone and
1056 * @mem.  This can be the global zone lruvec, if the memory controller
1057 * is disabled.
1058 */
1059struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
1060				      struct mem_cgroup *memcg)
1061{
1062	struct mem_cgroup_per_zone *mz;
1063
1064	if (mem_cgroup_disabled())
1065		return &zone->lruvec;
 
 
1066
1067	mz = mem_cgroup_zoneinfo(memcg, zone_to_nid(zone), zone_idx(zone));
1068	return &mz->lruvec;
 
 
1069}
 
1070
1071/*
1072 * Following LRU functions are allowed to be used without PCG_LOCK.
1073 * Operations are called by routine of global LRU independently from memcg.
1074 * What we have to take care of here is validness of pc->mem_cgroup.
1075 *
1076 * Changes to pc->mem_cgroup happens when
1077 * 1. charge
1078 * 2. moving account
1079 * In typical case, "charge" is done before add-to-lru. Exception is SwapCache.
1080 * It is added to LRU before charge.
1081 * If PCG_USED bit is not set, page_cgroup is not added to this private LRU.
1082 * When moving account, the page is not on LRU. It's isolated.
1083 */
 
 
 
 
 
 
 
 
 
1084
1085/**
1086 * mem_cgroup_page_lruvec - return lruvec for adding an lru page
1087 * @page: the page
1088 * @zone: zone of the page
 
 
 
 
 
 
 
1089 */
1090struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone)
1091{
1092	struct mem_cgroup_per_zone *mz;
1093	struct mem_cgroup *memcg;
1094	struct page_cgroup *pc;
1095
1096	if (mem_cgroup_disabled())
1097		return &zone->lruvec;
1098
1099	pc = lookup_page_cgroup(page);
1100	memcg = pc->mem_cgroup;
1101
1102	/*
1103	 * Surreptitiously switch any uncharged offlist page to root:
1104	 * an uncharged page off lru does nothing to secure
1105	 * its former mem_cgroup from sudden removal.
1106	 *
1107	 * Our caller holds lru_lock, and PageCgroupUsed is updated
1108	 * under page_cgroup lock: between them, they make all uses
1109	 * of pc->mem_cgroup safe.
1110	 */
1111	if (!PageLRU(page) && !PageCgroupUsed(pc) && memcg != root_mem_cgroup)
1112		pc->mem_cgroup = memcg = root_mem_cgroup;
 
 
 
 
 
 
1113
1114	mz = page_cgroup_zoneinfo(memcg, page);
1115	return &mz->lruvec;
 
 
1116}
1117
1118/**
1119 * mem_cgroup_update_lru_size - account for adding or removing an lru page
1120 * @lruvec: mem_cgroup per zone lru vector
1121 * @lru: index of lru list the page is sitting on
 
1122 * @nr_pages: positive when adding or negative when removing
1123 *
1124 * This function must be called when a page is added to or removed from an
1125 * lru list.
1126 */
1127void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
1128				int nr_pages)
1129{
1130	struct mem_cgroup_per_zone *mz;
1131	unsigned long *lru_size;
 
1132
1133	if (mem_cgroup_disabled())
1134		return;
1135
1136	mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
1137	lru_size = mz->lru_size + lru;
1138	*lru_size += nr_pages;
1139	VM_BUG_ON((long)(*lru_size) < 0);
1140}
1141
1142/*
1143 * Checks whether given mem is same or in the root_mem_cgroup's
1144 * hierarchy subtree
1145 */
1146bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
1147				  struct mem_cgroup *memcg)
1148{
1149	if (root_memcg == memcg)
1150		return true;
1151	if (!root_memcg->use_hierarchy || !memcg)
1152		return false;
1153	return css_is_ancestor(&memcg->css, &root_memcg->css);
1154}
1155
1156static bool mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
1157				       struct mem_cgroup *memcg)
1158{
1159	bool ret;
1160
1161	rcu_read_lock();
1162	ret = __mem_cgroup_same_or_subtree(root_memcg, memcg);
1163	rcu_read_unlock();
1164	return ret;
1165}
1166
1167int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *memcg)
1168{
1169	int ret;
1170	struct mem_cgroup *curr = NULL;
1171	struct task_struct *p;
1172
1173	p = find_lock_task_mm(task);
1174	if (p) {
1175		curr = try_get_mem_cgroup_from_mm(p->mm);
1176		task_unlock(p);
1177	} else {
1178		/*
1179		 * All threads may have already detached their mm's, but the oom
1180		 * killer still needs to detect if they have already been oom
1181		 * killed to prevent needlessly killing additional tasks.
1182		 */
1183		task_lock(task);
1184		curr = mem_cgroup_from_task(task);
1185		if (curr)
1186			css_get(&curr->css);
1187		task_unlock(task);
1188	}
1189	if (!curr)
1190		return 0;
1191	/*
1192	 * We should check use_hierarchy of "memcg" not "curr". Because checking
1193	 * use_hierarchy of "curr" here make this function true if hierarchy is
1194	 * enabled in "curr" and "curr" is a child of "memcg" in *cgroup*
1195	 * hierarchy(even if use_hierarchy is disabled in "memcg").
1196	 */
1197	ret = mem_cgroup_same_or_subtree(memcg, curr);
1198	css_put(&curr->css);
1199	return ret;
1200}
1201
1202int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec)
1203{
1204	unsigned long inactive_ratio;
1205	unsigned long inactive;
1206	unsigned long active;
1207	unsigned long gb;
1208
1209	inactive = mem_cgroup_get_lru_size(lruvec, LRU_INACTIVE_ANON);
1210	active = mem_cgroup_get_lru_size(lruvec, LRU_ACTIVE_ANON);
1211
1212	gb = (inactive + active) >> (30 - PAGE_SHIFT);
1213	if (gb)
1214		inactive_ratio = int_sqrt(10 * gb);
1215	else
1216		inactive_ratio = 1;
1217
1218	return inactive * inactive_ratio < active;
1219}
1220
1221int mem_cgroup_inactive_file_is_low(struct lruvec *lruvec)
1222{
1223	unsigned long active;
1224	unsigned long inactive;
1225
1226	inactive = mem_cgroup_get_lru_size(lruvec, LRU_INACTIVE_FILE);
1227	active = mem_cgroup_get_lru_size(lruvec, LRU_ACTIVE_FILE);
1228
1229	return (active > inactive);
1230}
1231
1232#define mem_cgroup_from_res_counter(counter, member)	\
1233	container_of(counter, struct mem_cgroup, member)
1234
1235/**
1236 * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1237 * @memcg: the memory cgroup
1238 *
1239 * Returns the maximum amount of memory @mem can be charged with, in
1240 * pages.
1241 */
1242static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
1243{
1244	unsigned long long margin;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1245
1246	margin = res_counter_margin(&memcg->res);
1247	if (do_swap_account)
1248		margin = min(margin, res_counter_margin(&memcg->memsw));
1249	return margin >> PAGE_SHIFT;
1250}
1251
1252int mem_cgroup_swappiness(struct mem_cgroup *memcg)
1253{
1254	struct cgroup *cgrp = memcg->css.cgroup;
1255
1256	/* root ? */
1257	if (cgrp->parent == NULL)
1258		return vm_swappiness;
1259
1260	return memcg->swappiness;
1261}
1262
1263/*
1264 * memcg->moving_account is used for checking possibility that some thread is
1265 * calling move_account(). When a thread on CPU-A starts moving pages under
1266 * a memcg, other threads should check memcg->moving_account under
1267 * rcu_read_lock(), like this:
1268 *
1269 *         CPU-A                                    CPU-B
1270 *                                              rcu_read_lock()
1271 *         memcg->moving_account+1              if (memcg->mocing_account)
1272 *                                                   take heavy locks.
1273 *         synchronize_rcu()                    update something.
1274 *                                              rcu_read_unlock()
1275 *         start move here.
1276 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1277
1278/* for quick checking without looking up memcg */
1279atomic_t memcg_moving __read_mostly;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1280
1281static void mem_cgroup_start_move(struct mem_cgroup *memcg)
 
1282{
1283	atomic_inc(&memcg_moving);
1284	atomic_inc(&memcg->moving_account);
1285	synchronize_rcu();
 
 
 
 
 
 
 
 
1286}
1287
1288static void mem_cgroup_end_move(struct mem_cgroup *memcg)
 
1289{
1290	/*
1291	 * Now, mem_cgroup_clear_mc() may call this function with NULL.
1292	 * We check NULL in callee rather than caller.
 
 
 
1293	 */
1294	if (memcg) {
1295		atomic_dec(&memcg_moving);
1296		atomic_dec(&memcg->moving_account);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1297	}
1298}
1299
1300/*
1301 * 2 routines for checking "mem" is under move_account() or not.
1302 *
1303 * mem_cgroup_stolen() -  checking whether a cgroup is mc.from or not. This
1304 *			  is used for avoiding races in accounting.  If true,
1305 *			  pc->mem_cgroup may be overwritten.
1306 *
1307 * mem_cgroup_under_move() - checking a cgroup is mc.from or mc.to or
1308 *			  under hierarchy of moving cgroups. This is for
1309 *			  waiting at hith-memory prressure caused by "move".
1310 */
1311
1312static bool mem_cgroup_stolen(struct mem_cgroup *memcg)
1313{
1314	VM_BUG_ON(!rcu_read_lock_held());
1315	return atomic_read(&memcg->moving_account) > 0;
1316}
1317
1318static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
1319{
1320	struct mem_cgroup *from;
1321	struct mem_cgroup *to;
1322	bool ret = false;
1323	/*
1324	 * Unlike task_move routines, we access mc.to, mc.from not under
1325	 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
 
 
 
 
 
 
1326	 */
1327	spin_lock(&mc.lock);
1328	from = mc.from;
1329	to = mc.to;
1330	if (!from)
1331		goto unlock;
1332
1333	ret = mem_cgroup_same_or_subtree(memcg, from)
1334		|| mem_cgroup_same_or_subtree(memcg, to);
1335unlock:
1336	spin_unlock(&mc.lock);
1337	return ret;
1338}
1339
1340static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
1341{
1342	if (mc.moving_task && current != mc.moving_task) {
1343		if (mem_cgroup_under_move(memcg)) {
1344			DEFINE_WAIT(wait);
1345			prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1346			/* moving charge context might have finished. */
1347			if (mc.moving_task)
1348				schedule();
1349			finish_wait(&mc.waitq, &wait);
1350			return true;
 
1351		}
1352	}
1353	return false;
1354}
1355
1356/*
1357 * Take this lock when
1358 * - a code tries to modify page's memcg while it's USED.
1359 * - a code tries to modify page state accounting in a memcg.
1360 * see mem_cgroup_stolen(), too.
1361 */
1362static void move_lock_mem_cgroup(struct mem_cgroup *memcg,
1363				  unsigned long *flags)
1364{
1365	spin_lock_irqsave(&memcg->move_lock, *flags);
 
 
 
 
 
 
 
 
 
 
1366}
1367
1368static void move_unlock_mem_cgroup(struct mem_cgroup *memcg,
1369				unsigned long *flags)
1370{
1371	spin_unlock_irqrestore(&memcg->move_lock, *flags);
 
 
 
 
 
1372}
1373
1374/**
1375 * mem_cgroup_print_oom_info: Called from OOM with tasklist_lock held in read mode.
 
1376 * @memcg: The memory cgroup that went over limit
1377 * @p: Task that is going to be killed
1378 *
1379 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1380 * enabled
1381 */
1382void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
1383{
1384	struct cgroup *task_cgrp;
1385	struct cgroup *mem_cgrp;
1386	/*
1387	 * Need a buffer in BSS, can't rely on allocations. The code relies
1388	 * on the assumption that OOM is serialized for memory controller.
1389	 * If this assumption is broken, revisit this code.
1390	 */
1391	static char memcg_name[PATH_MAX];
1392	int ret;
1393
1394	if (!memcg || !p)
1395		return;
1396
1397	rcu_read_lock();
1398
1399	mem_cgrp = memcg->css.cgroup;
1400	task_cgrp = task_cgroup(p, mem_cgroup_subsys_id);
1401
1402	ret = cgroup_path(task_cgrp, memcg_name, PATH_MAX);
1403	if (ret < 0) {
1404		/*
1405		 * Unfortunately, we are unable to convert to a useful name
1406		 * But we'll still print out the usage information
1407		 */
1408		rcu_read_unlock();
1409		goto done;
1410	}
1411	rcu_read_unlock();
1412
1413	printk(KERN_INFO "Task in %s killed", memcg_name);
1414
1415	rcu_read_lock();
1416	ret = cgroup_path(mem_cgrp, memcg_name, PATH_MAX);
1417	if (ret < 0) {
1418		rcu_read_unlock();
1419		goto done;
1420	}
1421	rcu_read_unlock();
1422
1423	/*
1424	 * Continues from above, so we don't need an KERN_ level
1425	 */
1426	printk(KERN_CONT " as a result of limit of %s\n", memcg_name);
1427done:
1428
1429	printk(KERN_INFO "memory: usage %llukB, limit %llukB, failcnt %llu\n",
1430		res_counter_read_u64(&memcg->res, RES_USAGE) >> 10,
1431		res_counter_read_u64(&memcg->res, RES_LIMIT) >> 10,
1432		res_counter_read_u64(&memcg->res, RES_FAILCNT));
1433	printk(KERN_INFO "memory+swap: usage %llukB, limit %llukB, "
1434		"failcnt %llu\n",
1435		res_counter_read_u64(&memcg->memsw, RES_USAGE) >> 10,
1436		res_counter_read_u64(&memcg->memsw, RES_LIMIT) >> 10,
1437		res_counter_read_u64(&memcg->memsw, RES_FAILCNT));
1438}
1439
1440/*
1441 * This function returns the number of memcg under hierarchy tree. Returns
1442 * 1(self count) if no children.
 
1443 */
1444static int mem_cgroup_count_children(struct mem_cgroup *memcg)
1445{
1446	int num = 0;
1447	struct mem_cgroup *iter;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1448
1449	for_each_mem_cgroup_tree(iter, memcg)
1450		num++;
1451	return num;
 
 
 
1452}
1453
1454/*
1455 * Return the memory (and swap, if configured) limit for a memcg.
1456 */
1457u64 mem_cgroup_get_limit(struct mem_cgroup *memcg)
1458{
1459	u64 limit;
1460	u64 memsw;
1461
1462	limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
1463	limit += total_swap_pages << PAGE_SHIFT;
 
 
1464
1465	memsw = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
1466	/*
1467	 * If memsw is finite and limits the amount of swap space available
1468	 * to this memcg, return that limit.
1469	 */
1470	return min(limit, memsw);
 
 
1471}
1472
1473static unsigned long mem_cgroup_reclaim(struct mem_cgroup *memcg,
1474					gfp_t gfp_mask,
1475					unsigned long flags)
1476{
1477	unsigned long total = 0;
1478	bool noswap = false;
1479	int loop;
1480
1481	if (flags & MEM_CGROUP_RECLAIM_NOSWAP)
1482		noswap = true;
1483	if (!(flags & MEM_CGROUP_RECLAIM_SHRINK) && memcg->memsw_is_minimum)
1484		noswap = true;
1485
1486	for (loop = 0; loop < MEM_CGROUP_MAX_RECLAIM_LOOPS; loop++) {
1487		if (loop)
1488			drain_all_stock_async(memcg);
1489		total += try_to_free_mem_cgroup_pages(memcg, gfp_mask, noswap);
1490		/*
1491		 * Allow limit shrinkers, which are triggered directly
1492		 * by userspace, to catch signals and stop reclaim
1493		 * after minimal progress, regardless of the margin.
1494		 */
1495		if (total && (flags & MEM_CGROUP_RECLAIM_SHRINK))
1496			break;
1497		if (mem_cgroup_margin(memcg))
1498			break;
1499		/*
1500		 * If nothing was reclaimed after two attempts, there
1501		 * may be no reclaimable pages in this hierarchy.
1502		 */
1503		if (loop && !total)
1504			break;
1505	}
1506	return total;
1507}
1508
1509/**
1510 * test_mem_cgroup_node_reclaimable
1511 * @memcg: the target memcg
1512 * @nid: the node ID to be checked.
1513 * @noswap : specify true here if the user wants flle only information.
1514 *
1515 * This function returns whether the specified memcg contains any
1516 * reclaimable pages on a node. Returns true if there are any reclaimable
1517 * pages in the node.
1518 */
1519static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg,
1520		int nid, bool noswap)
1521{
1522	if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_FILE))
1523		return true;
1524	if (noswap || !total_swap_pages)
1525		return false;
1526	if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_ANON))
 
 
 
 
 
1527		return true;
1528	return false;
1529
 
 
 
 
 
 
 
 
 
 
 
 
1530}
1531#if MAX_NUMNODES > 1
1532
1533/*
1534 * Always updating the nodemask is not very good - even if we have an empty
1535 * list or the wrong list here, we can start from some node and traverse all
1536 * nodes based on the zonelist. So update the list loosely once per 10 secs.
1537 *
1538 */
1539static void mem_cgroup_may_update_nodemask(struct mem_cgroup *memcg)
1540{
1541	int nid;
1542	/*
1543	 * numainfo_events > 0 means there was at least NUMAINFO_EVENTS_TARGET
1544	 * pagein/pageout changes since the last update.
1545	 */
1546	if (!atomic_read(&memcg->numainfo_events))
1547		return;
1548	if (atomic_inc_return(&memcg->numainfo_updating) > 1)
1549		return;
1550
1551	/* make a nodemask where this memcg uses memory from */
1552	memcg->scan_nodes = node_states[N_HIGH_MEMORY];
1553
1554	for_each_node_mask(nid, node_states[N_HIGH_MEMORY]) {
1555
1556		if (!test_mem_cgroup_node_reclaimable(memcg, nid, false))
1557			node_clear(nid, memcg->scan_nodes);
1558	}
1559
1560	atomic_set(&memcg->numainfo_events, 0);
1561	atomic_set(&memcg->numainfo_updating, 0);
 
 
 
1562}
1563
1564/*
1565 * Selecting a node where we start reclaim from. Because what we need is just
1566 * reducing usage counter, start from anywhere is O,K. Considering
1567 * memory reclaim from current node, there are pros. and cons.
1568 *
1569 * Freeing memory from current node means freeing memory from a node which
1570 * we'll use or we've used. So, it may make LRU bad. And if several threads
1571 * hit limits, it will see a contention on a node. But freeing from remote
1572 * node means more costs for memory reclaim because of memory latency.
1573 *
1574 * Now, we use round-robin. Better algorithm is welcomed.
 
 
 
1575 */
1576int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
 
1577{
1578	int node;
 
1579
1580	mem_cgroup_may_update_nodemask(memcg);
1581	node = memcg->last_scanned_node;
1582
1583	node = next_node(node, memcg->scan_nodes);
1584	if (node == MAX_NUMNODES)
1585		node = first_node(memcg->scan_nodes);
1586	/*
1587	 * We call this when we hit limit, not when pages are added to LRU.
1588	 * No LRU may hold pages because all pages are UNEVICTABLE or
1589	 * memcg is too small and all pages are not on LRU. In that case,
1590	 * we use curret node.
1591	 */
1592	if (unlikely(node == MAX_NUMNODES))
1593		node = numa_node_id();
1594
1595	memcg->last_scanned_node = node;
1596	return node;
1597}
1598
1599/*
1600 * Check all nodes whether it contains reclaimable pages or not.
1601 * For quick scan, we make use of scan_nodes. This will allow us to skip
1602 * unused nodes. But scan_nodes is lazily updated and may not cotain
1603 * enough new information. We need to do double check.
1604 */
1605static bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
1606{
1607	int nid;
1608
1609	/*
1610	 * quick check...making use of scan_node.
1611	 * We can skip unused nodes.
 
1612	 */
1613	if (!nodes_empty(memcg->scan_nodes)) {
1614		for (nid = first_node(memcg->scan_nodes);
1615		     nid < MAX_NUMNODES;
1616		     nid = next_node(nid, memcg->scan_nodes)) {
1617
1618			if (test_mem_cgroup_node_reclaimable(memcg, nid, noswap))
1619				return true;
1620		}
1621	}
1622	/*
1623	 * Check rest of nodes.
 
 
1624	 */
1625	for_each_node_state(nid, N_HIGH_MEMORY) {
1626		if (node_isset(nid, memcg->scan_nodes))
1627			continue;
1628		if (test_mem_cgroup_node_reclaimable(memcg, nid, noswap))
1629			return true;
 
1630	}
1631	return false;
1632}
1633
1634#else
1635int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
1636{
1637	return 0;
 
 
1638}
1639
1640static bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
1641{
1642	return test_mem_cgroup_node_reclaimable(memcg, 0, noswap);
 
 
1643}
1644#endif
1645
1646static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
1647				   struct zone *zone,
1648				   gfp_t gfp_mask,
1649				   unsigned long *total_scanned)
1650{
1651	struct mem_cgroup *victim = NULL;
1652	int total = 0;
1653	int loop = 0;
1654	unsigned long excess;
1655	unsigned long nr_scanned;
1656	struct mem_cgroup_reclaim_cookie reclaim = {
1657		.zone = zone,
1658		.priority = 0,
1659	};
1660
1661	excess = res_counter_soft_limit_excess(&root_memcg->res) >> PAGE_SHIFT;
 
 
 
 
1662
1663	while (1) {
1664		victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
1665		if (!victim) {
1666			loop++;
1667			if (loop >= 2) {
1668				/*
1669				 * If we have not been able to reclaim
1670				 * anything, it might because there are
1671				 * no reclaimable pages under this hierarchy
1672				 */
1673				if (!total)
1674					break;
1675				/*
1676				 * We want to do more targeted reclaim.
1677				 * excess >> 2 is not to excessive so as to
1678				 * reclaim too much, nor too less that we keep
1679				 * coming back to reclaim from this cgroup
1680				 */
1681				if (total >= (excess >> 2) ||
1682					(loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
1683					break;
1684			}
1685			continue;
1686		}
1687		if (!mem_cgroup_reclaimable(victim, false))
1688			continue;
1689		total += mem_cgroup_shrink_node_zone(victim, gfp_mask, false,
1690						     zone, &nr_scanned);
1691		*total_scanned += nr_scanned;
1692		if (!res_counter_soft_limit_excess(&root_memcg->res))
1693			break;
1694	}
1695	mem_cgroup_iter_break(root_memcg, victim);
1696	return total;
1697}
1698
1699/*
1700 * Check OOM-Killer is already running under our hierarchy.
1701 * If someone is running, return false.
1702 * Has to be called with memcg_oom_lock
 
 
 
 
 
 
 
 
 
 
1703 */
1704static bool mem_cgroup_oom_lock(struct mem_cgroup *memcg)
1705{
1706	struct mem_cgroup *iter, *failed = NULL;
 
 
 
1707
1708	for_each_mem_cgroup_tree(iter, memcg) {
1709		if (iter->oom_lock) {
1710			/*
1711			 * this subtree of our hierarchy is already locked
1712			 * so we cannot give a lock.
1713			 */
1714			failed = iter;
1715			mem_cgroup_iter_break(memcg, iter);
1716			break;
1717		} else
1718			iter->oom_lock = true;
1719	}
1720
1721	if (!failed)
1722		return true;
1723
1724	/*
1725	 * OK, we failed to lock the whole subtree so we have to clean up
1726	 * what we set up to the failing subtree
1727	 */
1728	for_each_mem_cgroup_tree(iter, memcg) {
1729		if (iter == failed) {
1730			mem_cgroup_iter_break(memcg, iter);
1731			break;
1732		}
1733		iter->oom_lock = false;
1734	}
1735	return false;
 
 
 
1736}
1737
1738/*
1739 * Has to be called with memcg_oom_lock
1740 */
1741static int mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
1742{
1743	struct mem_cgroup *iter;
 
1744
1745	for_each_mem_cgroup_tree(iter, memcg)
1746		iter->oom_lock = false;
1747	return 0;
1748}
1749
1750static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
1751{
1752	struct mem_cgroup *iter;
1753
1754	for_each_mem_cgroup_tree(iter, memcg)
1755		atomic_inc(&iter->under_oom);
1756}
 
1757
1758static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
1759{
1760	struct mem_cgroup *iter;
1761
1762	/*
1763	 * When a new child is created while the hierarchy is under oom,
1764	 * mem_cgroup_oom_lock() may not be called. We have to use
1765	 * atomic_add_unless() here.
1766	 */
1767	for_each_mem_cgroup_tree(iter, memcg)
1768		atomic_add_unless(&iter->under_oom, -1, 0);
1769}
1770
1771static DEFINE_SPINLOCK(memcg_oom_lock);
1772static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
1773
1774struct oom_wait_info {
1775	struct mem_cgroup *memcg;
1776	wait_queue_t	wait;
1777};
1778
1779static int memcg_oom_wake_function(wait_queue_t *wait,
1780	unsigned mode, int sync, void *arg)
1781{
1782	struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
1783	struct mem_cgroup *oom_wait_memcg;
1784	struct oom_wait_info *oom_wait_info;
1785
1786	oom_wait_info = container_of(wait, struct oom_wait_info, wait);
1787	oom_wait_memcg = oom_wait_info->memcg;
1788
1789	/*
1790	 * Both of oom_wait_info->memcg and wake_memcg are stable under us.
1791	 * Then we can use css_is_ancestor without taking care of RCU.
 
1792	 */
1793	if (!mem_cgroup_same_or_subtree(oom_wait_memcg, wake_memcg)
1794		&& !mem_cgroup_same_or_subtree(wake_memcg, oom_wait_memcg))
1795		return 0;
1796	return autoremove_wake_function(wait, mode, sync, arg);
1797}
1798
1799static void memcg_wakeup_oom(struct mem_cgroup *memcg)
1800{
1801	/* for filtering, pass "memcg" as argument. */
1802	__wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
1803}
1804
1805static void memcg_oom_recover(struct mem_cgroup *memcg)
1806{
1807	if (memcg && atomic_read(&memcg->under_oom))
1808		memcg_wakeup_oom(memcg);
1809}
1810
1811/*
1812 * try to call OOM killer. returns false if we should exit memory-reclaim loop.
 
1813 */
1814static bool mem_cgroup_handle_oom(struct mem_cgroup *memcg, gfp_t mask,
1815				  int order)
1816{
1817	struct oom_wait_info owait;
1818	bool locked, need_to_kill;
1819
1820	owait.memcg = memcg;
1821	owait.wait.flags = 0;
1822	owait.wait.func = memcg_oom_wake_function;
1823	owait.wait.private = current;
1824	INIT_LIST_HEAD(&owait.wait.task_list);
1825	need_to_kill = true;
1826	mem_cgroup_mark_under_oom(memcg);
1827
1828	/* At first, try to OOM lock hierarchy under memcg.*/
1829	spin_lock(&memcg_oom_lock);
1830	locked = mem_cgroup_oom_lock(memcg);
1831	/*
1832	 * Even if signal_pending(), we can't quit charge() loop without
1833	 * accounting. So, UNINTERRUPTIBLE is appropriate. But SIGKILL
1834	 * under OOM is always welcomed, use TASK_KILLABLE here.
1835	 */
1836	prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
1837	if (!locked || memcg->oom_kill_disable)
1838		need_to_kill = false;
1839	if (locked)
1840		mem_cgroup_oom_notify(memcg);
1841	spin_unlock(&memcg_oom_lock);
1842
1843	if (need_to_kill) {
1844		finish_wait(&memcg_oom_waitq, &owait.wait);
1845		mem_cgroup_out_of_memory(memcg, mask, order);
1846	} else {
1847		schedule();
1848		finish_wait(&memcg_oom_waitq, &owait.wait);
1849	}
1850	spin_lock(&memcg_oom_lock);
1851	if (locked)
1852		mem_cgroup_oom_unlock(memcg);
1853	memcg_wakeup_oom(memcg);
1854	spin_unlock(&memcg_oom_lock);
1855
1856	mem_cgroup_unmark_under_oom(memcg);
 
 
1857
1858	if (test_thread_flag(TIF_MEMDIE) || fatal_signal_pending(current))
1859		return false;
1860	/* Give chance to dying process */
1861	schedule_timeout_uninterruptible(1);
1862	return true;
 
 
1863}
1864
1865/*
1866 * Currently used to update mapped file statistics, but the routine can be
1867 * generalized to update other statistics as well.
1868 *
1869 * Notes: Race condition
1870 *
1871 * We usually use page_cgroup_lock() for accessing page_cgroup member but
1872 * it tends to be costly. But considering some conditions, we doesn't need
1873 * to do so _always_.
1874 *
1875 * Considering "charge", lock_page_cgroup() is not required because all
1876 * file-stat operations happen after a page is attached to radix-tree. There
1877 * are no race with "charge".
1878 *
1879 * Considering "uncharge", we know that memcg doesn't clear pc->mem_cgroup
1880 * at "uncharge" intentionally. So, we always see valid pc->mem_cgroup even
1881 * if there are race with "uncharge". Statistics itself is properly handled
1882 * by flags.
1883 *
1884 * Considering "move", this is an only case we see a race. To make the race
1885 * small, we check mm->moving_account and detect there are possibility of race
1886 * If there is, we take a lock.
1887 */
1888
1889void __mem_cgroup_begin_update_page_stat(struct page *page,
1890				bool *locked, unsigned long *flags)
1891{
1892	struct mem_cgroup *memcg;
1893	struct page_cgroup *pc;
1894
1895	pc = lookup_page_cgroup(page);
1896again:
1897	memcg = pc->mem_cgroup;
1898	if (unlikely(!memcg || !PageCgroupUsed(pc)))
1899		return;
1900	/*
1901	 * If this memory cgroup is not under account moving, we don't
1902	 * need to take move_lock_page_cgroup(). Because we already hold
1903	 * rcu_read_lock(), any calls to move_account will be delayed until
1904	 * rcu_read_unlock() if mem_cgroup_stolen() == true.
1905	 */
1906	if (!mem_cgroup_stolen(memcg))
1907		return;
 
 
 
 
1908
1909	move_lock_mem_cgroup(memcg, flags);
1910	if (memcg != pc->mem_cgroup || !PageCgroupUsed(pc)) {
1911		move_unlock_mem_cgroup(memcg, flags);
1912		goto again;
 
 
 
 
 
 
 
 
 
 
 
 
1913	}
1914	*locked = true;
 
1915}
1916
1917void __mem_cgroup_end_update_page_stat(struct page *page, unsigned long *flags)
1918{
1919	struct page_cgroup *pc = lookup_page_cgroup(page);
1920
1921	/*
1922	 * It's guaranteed that pc->mem_cgroup never changes while
1923	 * lock is held because a routine modifies pc->mem_cgroup
1924	 * should take move_lock_page_cgroup().
1925	 */
1926	move_unlock_mem_cgroup(pc->mem_cgroup, flags);
1927}
1928
1929void mem_cgroup_update_page_stat(struct page *page,
1930				 enum mem_cgroup_page_stat_item idx, int val)
 
1931{
1932	struct mem_cgroup *memcg;
1933	struct page_cgroup *pc = lookup_page_cgroup(page);
1934	unsigned long uninitialized_var(flags);
1935
1936	if (mem_cgroup_disabled())
1937		return;
1938
1939	memcg = pc->mem_cgroup;
1940	if (unlikely(!memcg || !PageCgroupUsed(pc)))
1941		return;
1942
1943	switch (idx) {
1944	case MEMCG_NR_FILE_MAPPED:
1945		idx = MEM_CGROUP_STAT_FILE_MAPPED;
1946		break;
1947	default:
1948		BUG();
1949	}
1950
1951	this_cpu_add(memcg->stat->count[idx], val);
1952}
 
 
 
 
 
 
1953
1954/*
1955 * size of first charge trial. "32" comes from vmscan.c's magic value.
1956 * TODO: maybe necessary to use big numbers in big irons.
1957 */
1958#define CHARGE_BATCH	32U
1959struct memcg_stock_pcp {
1960	struct mem_cgroup *cached; /* this never be root cgroup */
1961	unsigned int nr_pages;
1962	struct work_struct work;
1963	unsigned long flags;
1964#define FLUSHING_CACHED_CHARGE	0
1965};
1966static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
1967static DEFINE_MUTEX(percpu_charge_mutex);
1968
1969/*
1970 * Try to consume stocked charge on this cpu. If success, one page is consumed
1971 * from local stock and true is returned. If the stock is 0 or charges from a
1972 * cgroup which is not current target, returns false. This stock will be
1973 * refilled.
1974 */
1975static bool consume_stock(struct mem_cgroup *memcg)
1976{
1977	struct memcg_stock_pcp *stock;
1978	bool ret = true;
1979
1980	stock = &get_cpu_var(memcg_stock);
1981	if (memcg == stock->cached && stock->nr_pages)
1982		stock->nr_pages--;
1983	else /* need to call res_counter_charge */
1984		ret = false;
1985	put_cpu_var(memcg_stock);
1986	return ret;
1987}
1988
1989/*
1990 * Returns stocks cached in percpu to res_counter and reset cached information.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1991 */
1992static void drain_stock(struct memcg_stock_pcp *stock)
 
 
 
1993{
1994	struct mem_cgroup *old = stock->cached;
1995
1996	if (stock->nr_pages) {
1997		unsigned long bytes = stock->nr_pages * PAGE_SIZE;
1998
1999		res_counter_uncharge(&old->res, bytes);
2000		if (do_swap_account)
2001			res_counter_uncharge(&old->memsw, bytes);
2002		stock->nr_pages = 0;
2003	}
2004	stock->cached = NULL;
2005}
2006
2007/*
2008 * This must be called under preempt disabled or must be called by
2009 * a thread which is pinned to local cpu.
2010 */
2011static void drain_local_stock(struct work_struct *dummy)
2012{
2013	struct memcg_stock_pcp *stock = &__get_cpu_var(memcg_stock);
2014	drain_stock(stock);
2015	clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
2016}
2017
2018/*
2019 * Cache charges(val) which is from res_counter, to local per_cpu area.
2020 * This will be consumed by consume_stock() function, later.
2021 */
2022static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2023{
2024	struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock);
2025
2026	if (stock->cached != memcg) { /* reset if necessary */
2027		drain_stock(stock);
2028		stock->cached = memcg;
2029	}
2030	stock->nr_pages += nr_pages;
2031	put_cpu_var(memcg_stock);
 
 
2032}
2033
2034/*
2035 * Drains all per-CPU charge caches for given root_memcg resp. subtree
2036 * of the hierarchy under it. sync flag says whether we should block
2037 * until the work is done.
2038 */
2039static void drain_all_stock(struct mem_cgroup *root_memcg, bool sync)
2040{
2041	int cpu, curcpu;
2042
2043	/* Notify other cpus that system-wide "drain" is running */
2044	get_online_cpus();
2045	curcpu = get_cpu();
2046	for_each_online_cpu(cpu) {
2047		struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
2048		struct mem_cgroup *memcg;
2049
2050		memcg = stock->cached;
2051		if (!memcg || !stock->nr_pages)
2052			continue;
2053		if (!mem_cgroup_same_or_subtree(root_memcg, memcg))
2054			continue;
2055		if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
2056			if (cpu == curcpu)
2057				drain_local_stock(&stock->work);
2058			else
2059				schedule_work_on(cpu, &stock->work);
2060		}
2061	}
2062	put_cpu();
2063
2064	if (!sync)
2065		goto out;
2066
2067	for_each_online_cpu(cpu) {
2068		struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
2069		if (test_bit(FLUSHING_CACHED_CHARGE, &stock->flags))
2070			flush_work(&stock->work);
2071	}
2072out:
2073 	put_online_cpus();
2074}
2075
2076/*
2077 * Tries to drain stocked charges in other cpus. This function is asynchronous
2078 * and just put a work per cpu for draining localy on each cpu. Caller can
2079 * expects some charges will be back to res_counter later but cannot wait for
2080 * it.
2081 */
2082static void drain_all_stock_async(struct mem_cgroup *root_memcg)
 
 
2083{
 
 
 
 
 
2084	/*
2085	 * If someone calls draining, avoid adding more kworker runs.
2086	 */
2087	if (!mutex_trylock(&percpu_charge_mutex))
2088		return;
2089	drain_all_stock(root_memcg, false);
2090	mutex_unlock(&percpu_charge_mutex);
2091}
 
 
 
2092
2093/* This is a synchronous drain interface. */
2094static void drain_all_stock_sync(struct mem_cgroup *root_memcg)
2095{
2096	/* called when force_empty is called */
2097	mutex_lock(&percpu_charge_mutex);
2098	drain_all_stock(root_memcg, true);
2099	mutex_unlock(&percpu_charge_mutex);
 
 
2100}
2101
2102/*
2103 * This function drains percpu counter value from DEAD cpu and
2104 * move it to local cpu. Note that this function can be preempted.
 
2105 */
2106static void mem_cgroup_drain_pcp_counter(struct mem_cgroup *memcg, int cpu)
2107{
2108	int i;
 
 
 
 
 
 
2109
2110	spin_lock(&memcg->pcp_counter_lock);
2111	for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
2112		long x = per_cpu(memcg->stat->count[i], cpu);
2113
2114		per_cpu(memcg->stat->count[i], cpu) = 0;
2115		memcg->nocpu_base.count[i] += x;
2116	}
2117	for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
2118		unsigned long x = per_cpu(memcg->stat->events[i], cpu);
2119
2120		per_cpu(memcg->stat->events[i], cpu) = 0;
2121		memcg->nocpu_base.events[i] += x;
2122	}
2123	spin_unlock(&memcg->pcp_counter_lock);
2124}
 
 
 
 
 
 
2125
2126static int __cpuinit memcg_cpu_hotplug_callback(struct notifier_block *nb,
2127					unsigned long action,
2128					void *hcpu)
2129{
2130	int cpu = (unsigned long)hcpu;
2131	struct memcg_stock_pcp *stock;
2132	struct mem_cgroup *iter;
 
 
 
 
 
2133
2134	if (action == CPU_ONLINE)
2135		return NOTIFY_OK;
 
 
 
 
2136
2137	if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
2138		return NOTIFY_OK;
2139
2140	for_each_mem_cgroup(iter)
2141		mem_cgroup_drain_pcp_counter(iter, cpu);
 
 
 
 
2142
2143	stock = &per_cpu(memcg_stock, cpu);
2144	drain_stock(stock);
2145	return NOTIFY_OK;
2146}
 
 
 
 
2147
 
 
 
 
 
 
 
 
 
2148
2149/* See __mem_cgroup_try_charge() for details */
2150enum {
2151	CHARGE_OK,		/* success */
2152	CHARGE_RETRY,		/* need to retry but retry is not bad */
2153	CHARGE_NOMEM,		/* we can't do more. return -ENOMEM */
2154	CHARGE_WOULDBLOCK,	/* GFP_WAIT wasn't set and no enough res. */
2155	CHARGE_OOM_DIE,		/* the current is killed because of OOM */
2156};
 
 
 
2157
2158static int mem_cgroup_do_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
2159				unsigned int nr_pages, bool oom_check)
 
 
 
 
2160{
2161	unsigned long csize = nr_pages * PAGE_SIZE;
 
2162	struct mem_cgroup *mem_over_limit;
2163	struct res_counter *fail_res;
2164	unsigned long flags = 0;
2165	int ret;
 
 
 
 
2166
2167	ret = res_counter_charge(&memcg->res, csize, &fail_res);
 
 
2168
2169	if (likely(!ret)) {
2170		if (!do_swap_account)
2171			return CHARGE_OK;
2172		ret = res_counter_charge(&memcg->memsw, csize, &fail_res);
2173		if (likely(!ret))
2174			return CHARGE_OK;
2175
2176		res_counter_uncharge(&memcg->res, csize);
2177		mem_over_limit = mem_cgroup_from_res_counter(fail_res, memsw);
2178		flags |= MEM_CGROUP_RECLAIM_NOSWAP;
2179	} else
2180		mem_over_limit = mem_cgroup_from_res_counter(fail_res, res);
2181	/*
2182	 * nr_pages can be either a huge page (HPAGE_PMD_NR), a batch
2183	 * of regular pages (CHARGE_BATCH), or a single regular page (1).
2184	 *
2185	 * Never reclaim on behalf of optional batching, retry with a
2186	 * single page instead.
2187	 */
2188	if (nr_pages == CHARGE_BATCH)
2189		return CHARGE_RETRY;
2190
2191	if (!(gfp_mask & __GFP_WAIT))
2192		return CHARGE_WOULDBLOCK;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2193
2194	ret = mem_cgroup_reclaim(mem_over_limit, gfp_mask, flags);
2195	if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
2196		return CHARGE_RETRY;
 
 
 
 
 
 
 
 
 
2197	/*
2198	 * Even though the limit is exceeded at this point, reclaim
2199	 * may have been able to free some pages.  Retry the charge
2200	 * before killing the task.
2201	 *
2202	 * Only for regular pages, though: huge pages are rather
2203	 * unlikely to succeed so close to the limit, and we fall back
2204	 * to regular pages anyway in case of failure.
2205	 */
2206	if (nr_pages == 1 && ret)
2207		return CHARGE_RETRY;
2208
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2209	/*
2210	 * At task move, charge accounts can be doubly counted. So, it's
2211	 * better to wait until the end of task_move if something is going on.
2212	 */
2213	if (mem_cgroup_wait_acct_move(mem_over_limit))
2214		return CHARGE_RETRY;
2215
2216	/* If we don't need to call oom-killer at el, return immediately */
2217	if (!oom_check)
2218		return CHARGE_NOMEM;
2219	/* check OOM */
2220	if (!mem_cgroup_handle_oom(mem_over_limit, gfp_mask, get_order(csize)))
2221		return CHARGE_OOM_DIE;
 
 
2222
2223	return CHARGE_RETRY;
2224}
2225
2226/*
2227 * __mem_cgroup_try_charge() does
2228 * 1. detect memcg to be charged against from passed *mm and *ptr,
2229 * 2. update res_counter
2230 * 3. call memory reclaim if necessary.
2231 *
2232 * In some special case, if the task is fatal, fatal_signal_pending() or
2233 * has TIF_MEMDIE, this function returns -EINTR while writing root_mem_cgroup
2234 * to *ptr. There are two reasons for this. 1: fatal threads should quit as soon
2235 * as possible without any hazards. 2: all pages should have a valid
2236 * pc->mem_cgroup. If mm is NULL and the caller doesn't pass a valid memcg
2237 * pointer, that is treated as a charge to root_mem_cgroup.
2238 *
2239 * So __mem_cgroup_try_charge() will return
2240 *  0       ...  on success, filling *ptr with a valid memcg pointer.
2241 *  -ENOMEM ...  charge failure because of resource limits.
2242 *  -EINTR  ...  if thread is fatal. *ptr is filled with root_mem_cgroup.
2243 *
2244 * Unlike the exported interface, an "oom" parameter is added. if oom==true,
2245 * the oom-killer can be invoked.
2246 */
2247static int __mem_cgroup_try_charge(struct mm_struct *mm,
2248				   gfp_t gfp_mask,
2249				   unsigned int nr_pages,
2250				   struct mem_cgroup **ptr,
2251				   bool oom)
2252{
2253	unsigned int batch = max(CHARGE_BATCH, nr_pages);
2254	int nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES;
2255	struct mem_cgroup *memcg = NULL;
2256	int ret;
2257
2258	/*
2259	 * Unlike gloval-vm's OOM-kill, we're not in memory shortage
2260	 * in system level. So, allow to go ahead dying process in addition to
2261	 * MEMDIE process.
2262	 */
2263	if (unlikely(test_thread_flag(TIF_MEMDIE)
2264		     || fatal_signal_pending(current)))
2265		goto bypass;
2266
2267	/*
2268	 * We always charge the cgroup the mm_struct belongs to.
2269	 * The mm_struct's mem_cgroup changes on task migration if the
2270	 * thread group leader migrates. It's possible that mm is not
2271	 * set, if so charge the init_mm (happens for pagecache usage).
2272	 */
2273	if (!*ptr && !mm)
2274		*ptr = root_mem_cgroup;
2275again:
2276	if (*ptr) { /* css should be a valid one */
2277		memcg = *ptr;
2278		VM_BUG_ON(css_is_removed(&memcg->css));
2279		if (mem_cgroup_is_root(memcg))
2280			goto done;
2281		if (nr_pages == 1 && consume_stock(memcg))
2282			goto done;
2283		css_get(&memcg->css);
2284	} else {
2285		struct task_struct *p;
2286
2287		rcu_read_lock();
2288		p = rcu_dereference(mm->owner);
2289		/*
2290		 * Because we don't have task_lock(), "p" can exit.
2291		 * In that case, "memcg" can point to root or p can be NULL with
2292		 * race with swapoff. Then, we have small risk of mis-accouning.
2293		 * But such kind of mis-account by race always happens because
2294		 * we don't have cgroup_mutex(). It's overkill and we allo that
2295		 * small race, here.
2296		 * (*) swapoff at el will charge against mm-struct not against
2297		 * task-struct. So, mm->owner can be NULL.
2298		 */
2299		memcg = mem_cgroup_from_task(p);
2300		if (!memcg)
2301			memcg = root_mem_cgroup;
2302		if (mem_cgroup_is_root(memcg)) {
2303			rcu_read_unlock();
2304			goto done;
2305		}
2306		if (nr_pages == 1 && consume_stock(memcg)) {
2307			/*
2308			 * It seems dagerous to access memcg without css_get().
2309			 * But considering how consume_stok works, it's not
2310			 * necessary. If consume_stock success, some charges
2311			 * from this memcg are cached on this cpu. So, we
2312			 * don't need to call css_get()/css_tryget() before
2313			 * calling consume_stock().
2314			 */
2315			rcu_read_unlock();
2316			goto done;
2317		}
2318		/* after here, we may be blocked. we need to get refcnt */
2319		if (!css_tryget(&memcg->css)) {
2320			rcu_read_unlock();
2321			goto again;
2322		}
2323		rcu_read_unlock();
2324	}
2325
2326	do {
2327		bool oom_check;
2328
2329		/* If killed, bypass charge */
2330		if (fatal_signal_pending(current)) {
2331			css_put(&memcg->css);
2332			goto bypass;
2333		}
2334
2335		oom_check = false;
2336		if (oom && !nr_oom_retries) {
2337			oom_check = true;
2338			nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES;
 
 
2339		}
2340
2341		ret = mem_cgroup_do_charge(memcg, gfp_mask, batch, oom_check);
2342		switch (ret) {
2343		case CHARGE_OK:
2344			break;
2345		case CHARGE_RETRY: /* not in OOM situation but retry */
2346			batch = nr_pages;
2347			css_put(&memcg->css);
2348			memcg = NULL;
2349			goto again;
2350		case CHARGE_WOULDBLOCK: /* !__GFP_WAIT */
2351			css_put(&memcg->css);
2352			goto nomem;
2353		case CHARGE_NOMEM: /* OOM routine works */
2354			if (!oom) {
2355				css_put(&memcg->css);
2356				goto nomem;
2357			}
2358			/* If oom, we never return -ENOMEM */
2359			nr_oom_retries--;
2360			break;
2361		case CHARGE_OOM_DIE: /* Killed by OOM Killer */
2362			css_put(&memcg->css);
2363			goto bypass;
2364		}
2365	} while (ret != CHARGE_OK);
2366
2367	if (batch > nr_pages)
2368		refill_stock(memcg, batch - nr_pages);
2369	css_put(&memcg->css);
2370done:
2371	*ptr = memcg;
 
 
 
 
 
 
2372	return 0;
2373nomem:
2374	*ptr = NULL;
2375	return -ENOMEM;
2376bypass:
2377	*ptr = root_mem_cgroup;
2378	return -EINTR;
2379}
2380
2381/*
2382 * Somemtimes we have to undo a charge we got by try_charge().
2383 * This function is for that and do uncharge, put css's refcnt.
2384 * gotten by try_charge().
2385 */
2386static void __mem_cgroup_cancel_charge(struct mem_cgroup *memcg,
2387				       unsigned int nr_pages)
2388{
2389	if (!mem_cgroup_is_root(memcg)) {
2390		unsigned long bytes = nr_pages * PAGE_SIZE;
2391
2392		res_counter_uncharge(&memcg->res, bytes);
2393		if (do_swap_account)
2394			res_counter_uncharge(&memcg->memsw, bytes);
2395	}
2396}
2397
2398/*
2399 * Cancel chrages in this cgroup....doesn't propagate to parent cgroup.
2400 * This is useful when moving usage to parent cgroup.
2401 */
2402static void __mem_cgroup_cancel_local_charge(struct mem_cgroup *memcg,
2403					unsigned int nr_pages)
2404{
2405	unsigned long bytes = nr_pages * PAGE_SIZE;
2406
2407	if (mem_cgroup_is_root(memcg))
2408		return;
2409
2410	res_counter_uncharge_until(&memcg->res, memcg->res.parent, bytes);
2411	if (do_swap_account)
2412		res_counter_uncharge_until(&memcg->memsw,
2413						memcg->memsw.parent, bytes);
2414}
2415
2416/*
2417 * A helper function to get mem_cgroup from ID. must be called under
2418 * rcu_read_lock(). The caller must check css_is_removed() or some if
2419 * it's concern. (dropping refcnt from swap can be called against removed
2420 * memcg.)
2421 */
2422static struct mem_cgroup *mem_cgroup_lookup(unsigned short id)
2423{
2424	struct cgroup_subsys_state *css;
2425
2426	/* ID 0 is unused ID */
2427	if (!id)
2428		return NULL;
2429	css = css_lookup(&mem_cgroup_subsys, id);
2430	if (!css)
2431		return NULL;
2432	return container_of(css, struct mem_cgroup, css);
2433}
2434
2435struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
 
 
2436{
2437	struct mem_cgroup *memcg = NULL;
2438	struct page_cgroup *pc;
2439	unsigned short id;
2440	swp_entry_t ent;
2441
2442	VM_BUG_ON(!PageLocked(page));
2443
2444	pc = lookup_page_cgroup(page);
2445	lock_page_cgroup(pc);
2446	if (PageCgroupUsed(pc)) {
2447		memcg = pc->mem_cgroup;
2448		if (memcg && !css_tryget(&memcg->css))
2449			memcg = NULL;
2450	} else if (PageSwapCache(page)) {
2451		ent.val = page_private(page);
2452		id = lookup_swap_cgroup_id(ent);
2453		rcu_read_lock();
2454		memcg = mem_cgroup_lookup(id);
2455		if (memcg && !css_tryget(&memcg->css))
2456			memcg = NULL;
2457		rcu_read_unlock();
2458	}
2459	unlock_page_cgroup(pc);
2460	return memcg;
2461}
2462
2463static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
2464				       struct page *page,
2465				       unsigned int nr_pages,
2466				       enum charge_type ctype,
2467				       bool lrucare)
2468{
2469	struct page_cgroup *pc = lookup_page_cgroup(page);
2470	struct zone *uninitialized_var(zone);
2471	struct lruvec *lruvec;
2472	bool was_on_lru = false;
2473	bool anon;
2474
2475	lock_page_cgroup(pc);
2476	if (unlikely(PageCgroupUsed(pc))) {
2477		unlock_page_cgroup(pc);
2478		__mem_cgroup_cancel_charge(memcg, nr_pages);
2479		return;
2480	}
2481	/*
2482	 * we don't need page_cgroup_lock about tail pages, becase they are not
2483	 * accessed by any other context at this point.
2484	 */
 
 
 
 
 
 
 
 
 
 
2485
2486	/*
2487	 * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page
2488	 * may already be on some other mem_cgroup's LRU.  Take care of it.
2489	 */
2490	if (lrucare) {
2491		zone = page_zone(page);
2492		spin_lock_irq(&zone->lru_lock);
2493		if (PageLRU(page)) {
2494			lruvec = mem_cgroup_zone_lruvec(zone, pc->mem_cgroup);
2495			ClearPageLRU(page);
2496			del_page_from_lru_list(page, lruvec, page_lru(page));
2497			was_on_lru = true;
2498		}
2499	}
2500
2501	pc->mem_cgroup = memcg;
2502	/*
2503	 * We access a page_cgroup asynchronously without lock_page_cgroup().
2504	 * Especially when a page_cgroup is taken from a page, pc->mem_cgroup
2505	 * is accessed after testing USED bit. To make pc->mem_cgroup visible
2506	 * before USED bit, we need memory barrier here.
2507	 * See mem_cgroup_add_lru_list(), etc.
2508 	 */
2509	smp_wmb();
2510	SetPageCgroupUsed(pc);
2511
2512	if (lrucare) {
2513		if (was_on_lru) {
2514			lruvec = mem_cgroup_zone_lruvec(zone, pc->mem_cgroup);
2515			VM_BUG_ON(PageLRU(page));
2516			SetPageLRU(page);
2517			add_page_to_lru_list(page, lruvec, page_lru(page));
2518		}
2519		spin_unlock_irq(&zone->lru_lock);
2520	}
2521
2522	if (ctype == MEM_CGROUP_CHARGE_TYPE_MAPPED)
2523		anon = true;
2524	else
2525		anon = false;
2526
2527	mem_cgroup_charge_statistics(memcg, anon, nr_pages);
2528	unlock_page_cgroup(pc);
2529
2530	/*
2531	 * "charge_statistics" updated event counter. Then, check it.
2532	 * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
2533	 * if they exceeds softlimit.
 
 
2534	 */
2535	memcg_check_events(memcg, page);
2536}
2537
2538#ifdef CONFIG_TRANSPARENT_HUGEPAGE
2539
2540#define PCGF_NOCOPY_AT_SPLIT (1 << PCG_LOCK | 1 << PCG_MIGRATION)
2541/*
2542 * Because tail pages are not marked as "used", set it. We're under
2543 * zone->lru_lock, 'splitting on pmd' and compound_lock.
2544 * charge/uncharge will be never happen and move_account() is done under
2545 * compound_lock(), so we don't have to take care of races.
 
 
 
2546 */
2547void mem_cgroup_split_huge_fixup(struct page *head)
2548{
2549	struct page_cgroup *head_pc = lookup_page_cgroup(head);
2550	struct page_cgroup *pc;
2551	int i;
2552
2553	if (mem_cgroup_disabled())
2554		return;
2555	for (i = 1; i < HPAGE_PMD_NR; i++) {
2556		pc = head_pc + i;
2557		pc->mem_cgroup = head_pc->mem_cgroup;
2558		smp_wmb();/* see __commit_charge() */
2559		pc->flags = head_pc->flags & ~PCGF_NOCOPY_AT_SPLIT;
2560	}
2561}
2562#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
2563
2564/**
2565 * mem_cgroup_move_account - move account of the page
2566 * @page: the page
2567 * @nr_pages: number of regular pages (>1 for huge pages)
2568 * @pc:	page_cgroup of the page.
2569 * @from: mem_cgroup which the page is moved from.
2570 * @to:	mem_cgroup which the page is moved to. @from != @to.
2571 *
2572 * The caller must confirm following.
2573 * - page is not on LRU (isolate_page() is useful.)
2574 * - compound_lock is held when nr_pages > 1
2575 *
2576 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
2577 * from old cgroup.
2578 */
2579static int mem_cgroup_move_account(struct page *page,
2580				   unsigned int nr_pages,
2581				   struct page_cgroup *pc,
2582				   struct mem_cgroup *from,
2583				   struct mem_cgroup *to)
2584{
2585	unsigned long flags;
2586	int ret;
2587	bool anon = PageAnon(page);
2588
2589	VM_BUG_ON(from == to);
2590	VM_BUG_ON(PageLRU(page));
2591	/*
2592	 * The page is isolated from LRU. So, collapse function
2593	 * will not handle this page. But page splitting can happen.
2594	 * Do this check under compound_page_lock(). The caller should
2595	 * hold it.
2596	 */
2597	ret = -EBUSY;
2598	if (nr_pages > 1 && !PageTransHuge(page))
2599		goto out;
2600
2601	lock_page_cgroup(pc);
 
 
 
 
 
 
 
2602
2603	ret = -EINVAL;
2604	if (!PageCgroupUsed(pc) || pc->mem_cgroup != from)
2605		goto unlock;
 
2606
2607	move_lock_mem_cgroup(from, &flags);
 
 
 
 
 
 
2608
2609	if (!anon && page_mapped(page)) {
2610		/* Update mapped_file data for mem_cgroup */
2611		preempt_disable();
2612		__this_cpu_dec(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
2613		__this_cpu_inc(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
2614		preempt_enable();
2615	}
2616	mem_cgroup_charge_statistics(from, anon, -nr_pages);
2617
2618	/* caller should have done css_get */
2619	pc->mem_cgroup = to;
2620	mem_cgroup_charge_statistics(to, anon, nr_pages);
2621	/*
2622	 * We charges against "to" which may not have any tasks. Then, "to"
2623	 * can be under rmdir(). But in current implementation, caller of
2624	 * this function is just force_empty() and move charge, so it's
2625	 * guaranteed that "to" is never removed. So, we don't check rmdir
2626	 * status here.
2627	 */
2628	move_unlock_mem_cgroup(from, &flags);
2629	ret = 0;
2630unlock:
2631	unlock_page_cgroup(pc);
2632	/*
2633	 * check events
2634	 */
2635	memcg_check_events(to, page);
2636	memcg_check_events(from, page);
2637out:
2638	return ret;
2639}
2640
2641/*
2642 * move charges to its parent.
2643 */
2644
2645static int mem_cgroup_move_parent(struct page *page,
2646				  struct page_cgroup *pc,
2647				  struct mem_cgroup *child,
2648				  gfp_t gfp_mask)
2649{
2650	struct mem_cgroup *parent;
2651	unsigned int nr_pages;
2652	unsigned long uninitialized_var(flags);
2653	int ret;
2654
2655	/* Is ROOT ? */
2656	if (mem_cgroup_is_root(child))
2657		return -EINVAL;
 
 
 
2658
2659	ret = -EBUSY;
2660	if (!get_page_unless_zero(page))
2661		goto out;
2662	if (isolate_lru_page(page))
2663		goto put;
2664
2665	nr_pages = hpage_nr_pages(page);
 
 
 
 
 
2666
2667	parent = parent_mem_cgroup(child);
2668	/*
2669	 * If no parent, move charges to root cgroup.
2670	 */
2671	if (!parent)
2672		parent = root_mem_cgroup;
2673
2674	if (nr_pages > 1)
2675		flags = compound_lock_irqsave(page);
 
 
2676
2677	ret = mem_cgroup_move_account(page, nr_pages,
2678				pc, child, parent);
2679	if (!ret)
2680		__mem_cgroup_cancel_local_charge(child, nr_pages);
 
 
 
 
 
 
 
 
 
 
2681
2682	if (nr_pages > 1)
2683		compound_unlock_irqrestore(page, flags);
2684	putback_lru_page(page);
2685put:
2686	put_page(page);
2687out:
2688	return ret;
2689}
2690
2691/*
2692 * Charge the memory controller for page usage.
2693 * Return
2694 * 0 if the charge was successful
2695 * < 0 if the cgroup is over its limit
2696 */
2697static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
2698				gfp_t gfp_mask, enum charge_type ctype)
2699{
2700	struct mem_cgroup *memcg = NULL;
2701	unsigned int nr_pages = 1;
2702	bool oom = true;
2703	int ret;
2704
2705	if (PageTransHuge(page)) {
2706		nr_pages <<= compound_order(page);
2707		VM_BUG_ON(!PageTransHuge(page));
2708		/*
2709		 * Never OOM-kill a process for a huge page.  The
2710		 * fault handler will fall back to regular pages.
 
 
2711		 */
2712		oom = false;
 
 
2713	}
2714
2715	ret = __mem_cgroup_try_charge(mm, gfp_mask, nr_pages, &memcg, oom);
2716	if (ret == -ENOMEM)
2717		return ret;
2718	__mem_cgroup_commit_charge(memcg, page, nr_pages, ctype, false);
2719	return 0;
2720}
2721
2722int mem_cgroup_newpage_charge(struct page *page,
2723			      struct mm_struct *mm, gfp_t gfp_mask)
2724{
2725	if (mem_cgroup_disabled())
2726		return 0;
2727	VM_BUG_ON(page_mapped(page));
2728	VM_BUG_ON(page->mapping && !PageAnon(page));
2729	VM_BUG_ON(!mm);
2730	return mem_cgroup_charge_common(page, mm, gfp_mask,
2731					MEM_CGROUP_CHARGE_TYPE_MAPPED);
2732}
2733
2734static void
2735__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
2736					enum charge_type ctype);
2737
2738int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
2739				gfp_t gfp_mask)
2740{
2741	struct mem_cgroup *memcg = NULL;
2742	enum charge_type type = MEM_CGROUP_CHARGE_TYPE_CACHE;
2743	int ret;
2744
2745	if (mem_cgroup_disabled())
2746		return 0;
2747	if (PageCompound(page))
2748		return 0;
 
2749
2750	if (unlikely(!mm))
2751		mm = &init_mm;
2752	if (!page_is_file_cache(page))
2753		type = MEM_CGROUP_CHARGE_TYPE_SHMEM;
2754
2755	if (!PageSwapCache(page))
2756		ret = mem_cgroup_charge_common(page, mm, gfp_mask, type);
2757	else { /* page is swapcache/shmem */
2758		ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &memcg);
2759		if (!ret)
2760			__mem_cgroup_commit_charge_swapin(page, memcg, type);
2761	}
2762	return ret;
2763}
2764
2765/*
2766 * While swap-in, try_charge -> commit or cancel, the page is locked.
2767 * And when try_charge() successfully returns, one refcnt to memcg without
2768 * struct page_cgroup is acquired. This refcnt will be consumed by
2769 * "commit()" or removed by "cancel()"
2770 */
2771int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
2772				 struct page *page,
2773				 gfp_t mask, struct mem_cgroup **memcgp)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2774{
2775	struct mem_cgroup *memcg;
2776	int ret;
2777
2778	*memcgp = NULL;
2779
2780	if (mem_cgroup_disabled())
2781		return 0;
 
2782
2783	if (!do_swap_account)
2784		goto charge_cur_mm;
2785	/*
2786	 * A racing thread's fault, or swapoff, may have already updated
2787	 * the pte, and even removed page from swap cache: in those cases
2788	 * do_swap_page()'s pte_same() test will fail; but there's also a
2789	 * KSM case which does need to charge the page.
2790	 */
2791	if (!PageSwapCache(page))
2792		goto charge_cur_mm;
2793	memcg = try_get_mem_cgroup_from_page(page);
2794	if (!memcg)
2795		goto charge_cur_mm;
2796	*memcgp = memcg;
2797	ret = __mem_cgroup_try_charge(NULL, mask, 1, memcgp, true);
2798	css_put(&memcg->css);
2799	if (ret == -EINTR)
2800		ret = 0;
2801	return ret;
2802charge_cur_mm:
2803	if (unlikely(!mm))
2804		mm = &init_mm;
2805	ret = __mem_cgroup_try_charge(mm, mask, 1, memcgp, true);
2806	if (ret == -EINTR)
2807		ret = 0;
2808	return ret;
2809}
2810
2811static void
2812__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *memcg,
2813					enum charge_type ctype)
 
 
 
 
 
 
2814{
2815	if (mem_cgroup_disabled())
2816		return;
2817	if (!memcg)
2818		return;
2819	cgroup_exclude_rmdir(&memcg->css);
2820
2821	__mem_cgroup_commit_charge(memcg, page, 1, ctype, true);
2822	/*
2823	 * Now swap is on-memory. This means this page may be
2824	 * counted both as mem and swap....double count.
2825	 * Fix it by uncharging from memsw. Basically, this SwapCache is stable
2826	 * under lock_page(). But in do_swap_page()::memory.c, reuse_swap_page()
2827	 * may call delete_from_swap_cache() before reach here.
2828	 */
2829	if (do_swap_account && PageSwapCache(page)) {
2830		swp_entry_t ent = {.val = page_private(page)};
2831		mem_cgroup_uncharge_swap(ent);
2832	}
2833	/*
2834	 * At swapin, we may charge account against cgroup which has no tasks.
2835	 * So, rmdir()->pre_destroy() can be called while we do this charge.
2836	 * In that case, we need to call pre_destroy() again. check it here.
2837	 */
2838	cgroup_release_and_wakeup_rmdir(&memcg->css);
2839}
2840
2841void mem_cgroup_commit_charge_swapin(struct page *page,
2842				     struct mem_cgroup *memcg)
 
 
 
 
2843{
2844	__mem_cgroup_commit_charge_swapin(page, memcg,
2845					  MEM_CGROUP_CHARGE_TYPE_MAPPED);
2846}
2847
2848void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg)
2849{
2850	if (mem_cgroup_disabled())
2851		return;
2852	if (!memcg)
2853		return;
2854	__mem_cgroup_cancel_charge(memcg, 1);
 
 
2855}
2856
2857static void mem_cgroup_do_uncharge(struct mem_cgroup *memcg,
2858				   unsigned int nr_pages,
2859				   const enum charge_type ctype)
2860{
2861	struct memcg_batch_info *batch = NULL;
2862	bool uncharge_memsw = true;
 
 
2863
2864	/* If swapout, usage of swap doesn't decrease */
2865	if (!do_swap_account || ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
2866		uncharge_memsw = false;
2867
2868	batch = &current->memcg_batch;
2869	/*
2870	 * In usual, we do css_get() when we remember memcg pointer.
2871	 * But in this case, we keep res->usage until end of a series of
2872	 * uncharges. Then, it's ok to ignore memcg's refcnt.
2873	 */
2874	if (!batch->memcg)
2875		batch->memcg = memcg;
2876	/*
2877	 * do_batch > 0 when unmapping pages or inode invalidate/truncate.
2878	 * In those cases, all pages freed continuously can be expected to be in
2879	 * the same cgroup and we have chance to coalesce uncharges.
2880	 * But we do uncharge one by one if this is killed by OOM(TIF_MEMDIE)
2881	 * because we want to do uncharge as soon as possible.
2882	 */
 
 
 
 
 
 
 
 
 
 
2883
2884	if (!batch->do_batch || test_thread_flag(TIF_MEMDIE))
2885		goto direct_uncharge;
2886
2887	if (nr_pages > 1)
2888		goto direct_uncharge;
 
 
 
 
 
 
 
2889
 
 
2890	/*
2891	 * In typical case, batch->memcg == mem. This means we can
2892	 * merge a series of uncharges to an uncharge of res_counter.
2893	 * If not, we uncharge res_counter ony by one.
2894	 */
2895	if (batch->memcg != memcg)
2896		goto direct_uncharge;
2897	/* remember freed charge and uncharge it later */
2898	batch->nr_pages++;
2899	if (uncharge_memsw)
2900		batch->memsw_nr_pages++;
2901	return;
2902direct_uncharge:
2903	res_counter_uncharge(&memcg->res, nr_pages * PAGE_SIZE);
2904	if (uncharge_memsw)
2905		res_counter_uncharge(&memcg->memsw, nr_pages * PAGE_SIZE);
2906	if (unlikely(batch->memcg != memcg))
2907		memcg_oom_recover(memcg);
 
 
 
2908}
2909
2910/*
2911 * uncharge if !page_mapped(page)
2912 */
2913static struct mem_cgroup *
2914__mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
2915{
2916	struct mem_cgroup *memcg = NULL;
2917	unsigned int nr_pages = 1;
2918	struct page_cgroup *pc;
2919	bool anon;
2920
2921	if (mem_cgroup_disabled())
2922		return NULL;
2923
2924	if (PageSwapCache(page))
2925		return NULL;
2926
2927	if (PageTransHuge(page)) {
2928		nr_pages <<= compound_order(page);
2929		VM_BUG_ON(!PageTransHuge(page));
 
2930	}
2931	/*
2932	 * Check if our page_cgroup is valid
2933	 */
2934	pc = lookup_page_cgroup(page);
2935	if (unlikely(!PageCgroupUsed(pc)))
2936		return NULL;
2937
2938	lock_page_cgroup(pc);
 
 
 
 
 
 
 
2939
2940	memcg = pc->mem_cgroup;
 
2941
2942	if (!PageCgroupUsed(pc))
2943		goto unlock_out;
 
 
 
 
 
 
 
 
 
 
2944
2945	anon = PageAnon(page);
 
2946
2947	switch (ctype) {
2948	case MEM_CGROUP_CHARGE_TYPE_MAPPED:
2949		/*
2950		 * Generally PageAnon tells if it's the anon statistics to be
2951		 * updated; but sometimes e.g. mem_cgroup_uncharge_page() is
2952		 * used before page reached the stage of being marked PageAnon.
 
 
 
 
 
2953		 */
2954		anon = true;
2955		/* fallthrough */
2956	case MEM_CGROUP_CHARGE_TYPE_DROP:
2957		/* See mem_cgroup_prepare_migration() */
2958		if (page_mapped(page) || PageCgroupMigration(pc))
2959			goto unlock_out;
2960		break;
2961	case MEM_CGROUP_CHARGE_TYPE_SWAPOUT:
2962		if (!PageAnon(page)) {	/* Shared memory */
2963			if (page->mapping && !page_is_file_cache(page))
2964				goto unlock_out;
2965		} else if (page_mapped(page)) /* Anon */
2966				goto unlock_out;
2967		break;
2968	default:
2969		break;
2970	}
2971
2972	mem_cgroup_charge_statistics(memcg, anon, -nr_pages);
2973
2974	ClearPageCgroupUsed(pc);
2975	/*
2976	 * pc->mem_cgroup is not cleared here. It will be accessed when it's
2977	 * freed from LRU. This is safe because uncharged page is expected not
2978	 * to be reused (freed soon). Exception is SwapCache, it's handled by
2979	 * special functions.
2980	 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2981
2982	unlock_page_cgroup(pc);
2983	/*
2984	 * even after unlock, we have memcg->res.usage here and this memcg
2985	 * will never be freed.
2986	 */
2987	memcg_check_events(memcg, page);
2988	if (do_swap_account && ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT) {
2989		mem_cgroup_swap_statistics(memcg, true);
2990		mem_cgroup_get(memcg);
2991	}
2992	if (!mem_cgroup_is_root(memcg))
2993		mem_cgroup_do_uncharge(memcg, nr_pages, ctype);
2994
2995	return memcg;
2996
2997unlock_out:
2998	unlock_page_cgroup(pc);
2999	return NULL;
3000}
3001
3002void mem_cgroup_uncharge_page(struct page *page)
 
3003{
3004	/* early check. */
3005	if (page_mapped(page))
3006		return;
3007	VM_BUG_ON(page->mapping && !PageAnon(page));
3008	__mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_MAPPED);
 
 
 
 
 
3009}
3010
3011void mem_cgroup_uncharge_cache_page(struct page *page)
 
3012{
3013	VM_BUG_ON(page_mapped(page));
3014	VM_BUG_ON(page->mapping);
3015	__mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE);
3016}
3017
3018/*
3019 * Batch_start/batch_end is called in unmap_page_range/invlidate/trucate.
3020 * In that cases, pages are freed continuously and we can expect pages
3021 * are in the same memcg. All these calls itself limits the number of
3022 * pages freed at once, then uncharge_start/end() is called properly.
3023 * This may be called prural(2) times in a context,
3024 */
3025
3026void mem_cgroup_uncharge_start(void)
3027{
3028	current->memcg_batch.do_batch++;
3029	/* We can do nest. */
3030	if (current->memcg_batch.do_batch == 1) {
3031		current->memcg_batch.memcg = NULL;
3032		current->memcg_batch.nr_pages = 0;
3033		current->memcg_batch.memsw_nr_pages = 0;
 
 
 
 
 
 
3034	}
 
 
 
 
 
 
3035}
3036
3037void mem_cgroup_uncharge_end(void)
3038{
3039	struct memcg_batch_info *batch = &current->memcg_batch;
 
3040
3041	if (!batch->do_batch)
3042		return;
3043
3044	batch->do_batch--;
3045	if (batch->do_batch) /* If stacked, do nothing. */
3046		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3047
3048	if (!batch->memcg)
3049		return;
 
 
 
 
 
 
 
 
3050	/*
3051	 * This "batch->memcg" is valid without any css_get/put etc...
3052	 * bacause we hide charges behind us.
3053	 */
3054	if (batch->nr_pages)
3055		res_counter_uncharge(&batch->memcg->res,
3056				     batch->nr_pages * PAGE_SIZE);
3057	if (batch->memsw_nr_pages)
3058		res_counter_uncharge(&batch->memcg->memsw,
3059				     batch->memsw_nr_pages * PAGE_SIZE);
3060	memcg_oom_recover(batch->memcg);
3061	/* forget this pointer (for sanity check) */
3062	batch->memcg = NULL;
3063}
3064
3065#ifdef CONFIG_SWAP
3066/*
3067 * called after __delete_from_swap_cache() and drop "page" account.
3068 * memcg information is recorded to swap_cgroup of "ent"
3069 */
3070void
3071mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout)
3072{
3073	struct mem_cgroup *memcg;
3074	int ctype = MEM_CGROUP_CHARGE_TYPE_SWAPOUT;
3075
3076	if (!swapout) /* this was a swap cache but the swap is unused ! */
3077		ctype = MEM_CGROUP_CHARGE_TYPE_DROP;
3078
3079	memcg = __mem_cgroup_uncharge_common(page, ctype);
 
 
 
 
 
 
 
3080
3081	/*
3082	 * record memcg information,  if swapout && memcg != NULL,
3083	 * mem_cgroup_get() was called in uncharge().
 
 
3084	 */
3085	if (do_swap_account && swapout && memcg)
3086		swap_cgroup_record(ent, css_id(&memcg->css));
3087}
3088#endif
3089
3090#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
3091/*
3092 * called from swap_entry_free(). remove record in swap_cgroup and
3093 * uncharge "memsw" account.
3094 */
3095void mem_cgroup_uncharge_swap(swp_entry_t ent)
3096{
3097	struct mem_cgroup *memcg;
3098	unsigned short id;
3099
3100	if (!do_swap_account)
3101		return;
 
3102
3103	id = swap_cgroup_record(ent, 0);
3104	rcu_read_lock();
3105	memcg = mem_cgroup_lookup(id);
3106	if (memcg) {
3107		/*
3108		 * We uncharge this because swap is freed.
3109		 * This memcg can be obsolete one. We avoid calling css_tryget
3110		 */
3111		if (!mem_cgroup_is_root(memcg))
3112			res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
3113		mem_cgroup_swap_statistics(memcg, false);
3114		mem_cgroup_put(memcg);
3115	}
3116	rcu_read_unlock();
3117}
3118
3119/**
3120 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
3121 * @entry: swap entry to be moved
3122 * @from:  mem_cgroup which the entry is moved from
3123 * @to:  mem_cgroup which the entry is moved to
3124 *
3125 * It succeeds only when the swap_cgroup's record for this entry is the same
3126 * as the mem_cgroup's id of @from.
3127 *
3128 * Returns 0 on success, -EINVAL on failure.
3129 *
3130 * The caller must have charged to @to, IOW, called res_counter_charge() about
3131 * both res and memsw, and called css_get().
3132 */
3133static int mem_cgroup_move_swap_account(swp_entry_t entry,
3134				struct mem_cgroup *from, struct mem_cgroup *to)
3135{
3136	unsigned short old_id, new_id;
3137
3138	old_id = css_id(&from->css);
3139	new_id = css_id(&to->css);
3140
3141	if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
3142		mem_cgroup_swap_statistics(from, false);
3143		mem_cgroup_swap_statistics(to, true);
3144		/*
3145		 * This function is only called from task migration context now.
3146		 * It postpones res_counter and refcount handling till the end
3147		 * of task migration(mem_cgroup_clear_mc()) for performance
3148		 * improvement. But we cannot postpone mem_cgroup_get(to)
3149		 * because if the process that has been moved to @to does
3150		 * swap-in, the refcount of @to might be decreased to 0.
3151		 */
3152		mem_cgroup_get(to);
3153		return 0;
3154	}
3155	return -EINVAL;
 
3156}
3157#else
3158static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
3159				struct mem_cgroup *from, struct mem_cgroup *to)
3160{
3161	return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3162}
3163#endif
3164
3165/*
3166 * Before starting migration, account PAGE_SIZE to mem_cgroup that the old
3167 * page belongs to.
3168 */
3169int mem_cgroup_prepare_migration(struct page *page,
3170	struct page *newpage, struct mem_cgroup **memcgp, gfp_t gfp_mask)
3171{
3172	struct mem_cgroup *memcg = NULL;
3173	struct page_cgroup *pc;
3174	enum charge_type ctype;
3175	int ret = 0;
3176
3177	*memcgp = NULL;
 
3178
3179	VM_BUG_ON(PageTransHuge(page));
3180	if (mem_cgroup_disabled())
3181		return 0;
3182
3183	pc = lookup_page_cgroup(page);
3184	lock_page_cgroup(pc);
3185	if (PageCgroupUsed(pc)) {
3186		memcg = pc->mem_cgroup;
3187		css_get(&memcg->css);
 
 
 
 
 
 
3188		/*
3189		 * At migrating an anonymous page, its mapcount goes down
3190		 * to 0 and uncharge() will be called. But, even if it's fully
3191		 * unmapped, migration may fail and this page has to be
3192		 * charged again. We set MIGRATION flag here and delay uncharge
3193		 * until end_migration() is called
3194		 *
3195		 * Corner Case Thinking
3196		 * A)
3197		 * When the old page was mapped as Anon and it's unmap-and-freed
3198		 * while migration was ongoing.
3199		 * If unmap finds the old page, uncharge() of it will be delayed
3200		 * until end_migration(). If unmap finds a new page, it's
3201		 * uncharged when it make mapcount to be 1->0. If unmap code
3202		 * finds swap_migration_entry, the new page will not be mapped
3203		 * and end_migration() will find it(mapcount==0).
3204		 *
3205		 * B)
3206		 * When the old page was mapped but migraion fails, the kernel
3207		 * remaps it. A charge for it is kept by MIGRATION flag even
3208		 * if mapcount goes down to 0. We can do remap successfully
3209		 * without charging it again.
3210		 *
3211		 * C)
3212		 * The "old" page is under lock_page() until the end of
3213		 * migration, so, the old page itself will not be swapped-out.
3214		 * If the new page is swapped out before end_migraton, our
3215		 * hook to usual swap-out path will catch the event.
3216		 */
3217		if (PageAnon(page))
3218			SetPageCgroupMigration(pc);
 
 
 
 
 
 
 
3219	}
3220	unlock_page_cgroup(pc);
3221	/*
3222	 * If the page is not charged at this point,
3223	 * we return here.
3224	 */
3225	if (!memcg)
 
 
3226		return 0;
3227
3228	*memcgp = memcg;
3229	ret = __mem_cgroup_try_charge(NULL, gfp_mask, 1, memcgp, false);
3230	css_put(&memcg->css);/* drop extra refcnt */
3231	if (ret) {
3232		if (PageAnon(page)) {
3233			lock_page_cgroup(pc);
3234			ClearPageCgroupMigration(pc);
3235			unlock_page_cgroup(pc);
3236			/*
3237			 * The old page may be fully unmapped while we kept it.
3238			 */
3239			mem_cgroup_uncharge_page(page);
3240		}
3241		/* we'll need to revisit this error code (we have -EINTR) */
3242		return -ENOMEM;
3243	}
3244	/*
3245	 * We charge new page before it's used/mapped. So, even if unlock_page()
3246	 * is called before end_migration, we can catch all events on this new
3247	 * page. In the case new page is migrated but not remapped, new page's
3248	 * mapcount will be finally 0 and we call uncharge in end_migration().
3249	 */
3250	if (PageAnon(page))
3251		ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED;
3252	else if (page_is_file_cache(page))
3253		ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
3254	else
3255		ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
3256	__mem_cgroup_commit_charge(memcg, newpage, 1, ctype, false);
3257	return ret;
3258}
3259
3260/* remove redundant charge if migration failed*/
3261void mem_cgroup_end_migration(struct mem_cgroup *memcg,
3262	struct page *oldpage, struct page *newpage, bool migration_ok)
3263{
3264	struct page *used, *unused;
3265	struct page_cgroup *pc;
3266	bool anon;
3267
3268	if (!memcg)
3269		return;
3270	/* blocks rmdir() */
3271	cgroup_exclude_rmdir(&memcg->css);
3272	if (!migration_ok) {
3273		used = oldpage;
3274		unused = newpage;
3275	} else {
3276		used = newpage;
3277		unused = oldpage;
3278	}
3279	/*
3280	 * We disallowed uncharge of pages under migration because mapcount
3281	 * of the page goes down to zero, temporarly.
3282	 * Clear the flag and check the page should be charged.
3283	 */
3284	pc = lookup_page_cgroup(oldpage);
3285	lock_page_cgroup(pc);
3286	ClearPageCgroupMigration(pc);
3287	unlock_page_cgroup(pc);
3288	anon = PageAnon(used);
3289	__mem_cgroup_uncharge_common(unused,
3290		anon ? MEM_CGROUP_CHARGE_TYPE_MAPPED
3291		     : MEM_CGROUP_CHARGE_TYPE_CACHE);
3292
3293	/*
3294	 * If a page is a file cache, radix-tree replacement is very atomic
3295	 * and we can skip this check. When it was an Anon page, its mapcount
3296	 * goes down to 0. But because we added MIGRATION flage, it's not
3297	 * uncharged yet. There are several case but page->mapcount check
3298	 * and USED bit check in mem_cgroup_uncharge_page() will do enough
3299	 * check. (see prepare_charge() also)
3300	 */
3301	if (anon)
3302		mem_cgroup_uncharge_page(used);
3303	/*
3304	 * At migration, we may charge account against cgroup which has no
3305	 * tasks.
3306	 * So, rmdir()->pre_destroy() can be called while we do this charge.
3307	 * In that case, we need to call pre_destroy() again. check it here.
3308	 */
3309	cgroup_release_and_wakeup_rmdir(&memcg->css);
3310}
3311
3312/*
3313 * At replace page cache, newpage is not under any memcg but it's on
3314 * LRU. So, this function doesn't touch res_counter but handles LRU
3315 * in correct way. Both pages are locked so we cannot race with uncharge.
3316 */
3317void mem_cgroup_replace_page_cache(struct page *oldpage,
3318				  struct page *newpage)
3319{
3320	struct mem_cgroup *memcg = NULL;
3321	struct page_cgroup *pc;
3322	enum charge_type type = MEM_CGROUP_CHARGE_TYPE_CACHE;
3323
3324	if (mem_cgroup_disabled())
3325		return;
3326
3327	pc = lookup_page_cgroup(oldpage);
3328	/* fix accounting on old pages */
3329	lock_page_cgroup(pc);
3330	if (PageCgroupUsed(pc)) {
3331		memcg = pc->mem_cgroup;
3332		mem_cgroup_charge_statistics(memcg, false, -1);
3333		ClearPageCgroupUsed(pc);
3334	}
3335	unlock_page_cgroup(pc);
3336
3337	/*
3338	 * When called from shmem_replace_page(), in some cases the
3339	 * oldpage has already been charged, and in some cases not.
3340	 */
3341	if (!memcg)
3342		return;
3343
3344	if (PageSwapBacked(oldpage))
3345		type = MEM_CGROUP_CHARGE_TYPE_SHMEM;
 
 
 
3346
3347	/*
3348	 * Even if newpage->mapping was NULL before starting replacement,
3349	 * the newpage may be on LRU(or pagevec for LRU) already. We lock
3350	 * LRU while we overwrite pc->mem_cgroup.
3351	 */
3352	__mem_cgroup_commit_charge(memcg, newpage, 1, type, true);
3353}
3354
3355#ifdef CONFIG_DEBUG_VM
3356static struct page_cgroup *lookup_page_cgroup_used(struct page *page)
3357{
3358	struct page_cgroup *pc;
3359
3360	pc = lookup_page_cgroup(page);
3361	/*
3362	 * Can be NULL while feeding pages into the page allocator for
3363	 * the first time, i.e. during boot or memory hotplug;
3364	 * or when mem_cgroup_disabled().
3365	 */
3366	if (likely(pc) && PageCgroupUsed(pc))
3367		return pc;
3368	return NULL;
3369}
3370
3371bool mem_cgroup_bad_page_check(struct page *page)
3372{
3373	if (mem_cgroup_disabled())
3374		return false;
3375
3376	return lookup_page_cgroup_used(page) != NULL;
 
 
3377}
3378
3379void mem_cgroup_print_bad_page(struct page *page)
3380{
3381	struct page_cgroup *pc;
3382
3383	pc = lookup_page_cgroup_used(page);
3384	if (pc) {
3385		printk(KERN_ALERT "pc:%p pc->flags:%lx pc->mem_cgroup:%p\n",
3386		       pc, pc->flags, pc->mem_cgroup);
3387	}
3388}
3389#endif
3390
3391static DEFINE_MUTEX(set_limit_mutex);
 
3392
3393static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
3394				unsigned long long val)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3395{
3396	int retry_count;
3397	u64 memswlimit, memlimit;
3398	int ret = 0;
3399	int children = mem_cgroup_count_children(memcg);
3400	u64 curusage, oldusage;
3401	int enlarge;
3402
3403	/*
3404	 * For keeping hierarchical_reclaim simple, how long we should retry
3405	 * is depends on callers. We set our retry-count to be function
3406	 * of # of children which we should visit in this loop.
3407	 */
3408	retry_count = MEM_CGROUP_RECLAIM_RETRIES * children;
3409
3410	oldusage = res_counter_read_u64(&memcg->res, RES_USAGE);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3411
3412	enlarge = 0;
3413	while (retry_count) {
3414		if (signal_pending(current)) {
3415			ret = -EINTR;
 
 
 
 
 
 
 
3416			break;
 
 
 
 
3417		}
 
 
 
3418		/*
3419		 * Rather than hide all in some function, I do this in
3420		 * open coded manner. You see what this really does.
3421		 * We have to guarantee memcg->res.limit < memcg->memsw.limit.
 
 
3422		 */
3423		mutex_lock(&set_limit_mutex);
3424		memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
3425		if (memswlimit < val) {
3426			ret = -EINVAL;
3427			mutex_unlock(&set_limit_mutex);
3428			break;
3429		}
 
 
 
 
 
 
 
3430
3431		memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
3432		if (memlimit < val)
3433			enlarge = 1;
 
 
 
 
3434
3435		ret = res_counter_set_limit(&memcg->res, val);
3436		if (!ret) {
3437			if (memswlimit == val)
3438				memcg->memsw_is_minimum = true;
3439			else
3440				memcg->memsw_is_minimum = false;
 
 
 
 
 
 
 
 
 
 
3441		}
3442		mutex_unlock(&set_limit_mutex);
 
3443
3444		if (!ret)
3445			break;
3446
3447		mem_cgroup_reclaim(memcg, GFP_KERNEL,
3448				   MEM_CGROUP_RECLAIM_SHRINK);
3449		curusage = res_counter_read_u64(&memcg->res, RES_USAGE);
3450		/* Usage is reduced ? */
3451  		if (curusage >= oldusage)
3452			retry_count--;
3453		else
3454			oldusage = curusage;
3455	}
3456	if (!ret && enlarge)
3457		memcg_oom_recover(memcg);
3458
3459	return ret;
 
3460}
3461
3462static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
3463					unsigned long long val)
3464{
3465	int retry_count;
3466	u64 memlimit, memswlimit, oldusage, curusage;
3467	int children = mem_cgroup_count_children(memcg);
3468	int ret = -EBUSY;
3469	int enlarge = 0;
3470
3471	/* see mem_cgroup_resize_res_limit */
3472 	retry_count = children * MEM_CGROUP_RECLAIM_RETRIES;
3473	oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
3474	while (retry_count) {
3475		if (signal_pending(current)) {
3476			ret = -EINTR;
3477			break;
3478		}
3479		/*
3480		 * Rather than hide all in some function, I do this in
3481		 * open coded manner. You see what this really does.
3482		 * We have to guarantee memcg->res.limit < memcg->memsw.limit.
3483		 */
3484		mutex_lock(&set_limit_mutex);
3485		memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
3486		if (memlimit > val) {
3487			ret = -EINVAL;
3488			mutex_unlock(&set_limit_mutex);
3489			break;
3490		}
3491		memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
3492		if (memswlimit < val)
3493			enlarge = 1;
3494		ret = res_counter_set_limit(&memcg->memsw, val);
3495		if (!ret) {
3496			if (memlimit == val)
3497				memcg->memsw_is_minimum = true;
3498			else
3499				memcg->memsw_is_minimum = false;
3500		}
3501		mutex_unlock(&set_limit_mutex);
3502
3503		if (!ret)
3504			break;
3505
3506		mem_cgroup_reclaim(memcg, GFP_KERNEL,
3507				   MEM_CGROUP_RECLAIM_NOSWAP |
3508				   MEM_CGROUP_RECLAIM_SHRINK);
3509		curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
3510		/* Usage is reduced ? */
3511		if (curusage >= oldusage)
3512			retry_count--;
3513		else
3514			oldusage = curusage;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3515	}
3516	if (!ret && enlarge)
3517		memcg_oom_recover(memcg);
3518	return ret;
3519}
3520
3521unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
3522					    gfp_t gfp_mask,
3523					    unsigned long *total_scanned)
3524{
3525	unsigned long nr_reclaimed = 0;
3526	struct mem_cgroup_per_zone *mz, *next_mz = NULL;
3527	unsigned long reclaimed;
3528	int loop = 0;
3529	struct mem_cgroup_tree_per_zone *mctz;
3530	unsigned long long excess;
3531	unsigned long nr_scanned;
3532
3533	if (order > 0)
3534		return 0;
3535
3536	mctz = soft_limit_tree_node_zone(zone_to_nid(zone), zone_idx(zone));
3537	/*
3538	 * This loop can run a while, specially if mem_cgroup's continuously
3539	 * keep exceeding their soft limit and putting the system under
3540	 * pressure
3541	 */
3542	do {
3543		if (next_mz)
3544			mz = next_mz;
3545		else
3546			mz = mem_cgroup_largest_soft_limit_node(mctz);
3547		if (!mz)
3548			break;
3549
3550		nr_scanned = 0;
3551		reclaimed = mem_cgroup_soft_reclaim(mz->memcg, zone,
3552						    gfp_mask, &nr_scanned);
3553		nr_reclaimed += reclaimed;
3554		*total_scanned += nr_scanned;
3555		spin_lock(&mctz->lock);
3556
3557		/*
3558		 * If we failed to reclaim anything from this memory cgroup
3559		 * it is time to move on to the next cgroup
3560		 */
3561		next_mz = NULL;
3562		if (!reclaimed) {
3563			do {
3564				/*
3565				 * Loop until we find yet another one.
3566				 *
3567				 * By the time we get the soft_limit lock
3568				 * again, someone might have aded the
3569				 * group back on the RB tree. Iterate to
3570				 * make sure we get a different mem.
3571				 * mem_cgroup_largest_soft_limit_node returns
3572				 * NULL if no other cgroup is present on
3573				 * the tree
3574				 */
3575				next_mz =
3576				__mem_cgroup_largest_soft_limit_node(mctz);
3577				if (next_mz == mz)
3578					css_put(&next_mz->memcg->css);
3579				else /* next_mz == NULL or other memcg */
3580					break;
3581			} while (1);
3582		}
3583		__mem_cgroup_remove_exceeded(mz->memcg, mz, mctz);
3584		excess = res_counter_soft_limit_excess(&mz->memcg->res);
3585		/*
3586		 * One school of thought says that we should not add
3587		 * back the node to the tree if reclaim returns 0.
3588		 * But our reclaim could return 0, simply because due
3589		 * to priority we are exposing a smaller subset of
3590		 * memory to reclaim from. Consider this as a longer
3591		 * term TODO.
3592		 */
3593		/* If excess == 0, no tree ops */
3594		__mem_cgroup_insert_exceeded(mz->memcg, mz, mctz, excess);
3595		spin_unlock(&mctz->lock);
3596		css_put(&mz->memcg->css);
3597		loop++;
3598		/*
3599		 * Could not reclaim anything and there are no more
3600		 * mem cgroups to try or we seem to be looping without
3601		 * reclaiming anything.
3602		 */
3603		if (!nr_reclaimed &&
3604			(next_mz == NULL ||
3605			loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
3606			break;
3607	} while (!nr_reclaimed);
3608	if (next_mz)
3609		css_put(&next_mz->memcg->css);
3610	return nr_reclaimed;
3611}
3612
3613/*
3614 * This routine traverse page_cgroup in given list and drop them all.
3615 * *And* this routine doesn't reclaim page itself, just removes page_cgroup.
 
 
3616 */
3617static int mem_cgroup_force_empty_list(struct mem_cgroup *memcg,
3618				int node, int zid, enum lru_list lru)
3619{
3620	struct mem_cgroup_per_zone *mz;
3621	unsigned long flags, loop;
3622	struct list_head *list;
3623	struct page *busy;
3624	struct zone *zone;
3625	int ret = 0;
3626
3627	zone = &NODE_DATA(node)->node_zones[zid];
3628	mz = mem_cgroup_zoneinfo(memcg, node, zid);
3629	list = &mz->lruvec.lists[lru];
3630
3631	loop = mz->lru_size[lru];
3632	/* give some margin against EBUSY etc...*/
3633	loop += 256;
3634	busy = NULL;
3635	while (loop--) {
3636		struct page_cgroup *pc;
3637		struct page *page;
3638
3639		ret = 0;
3640		spin_lock_irqsave(&zone->lru_lock, flags);
3641		if (list_empty(list)) {
3642			spin_unlock_irqrestore(&zone->lru_lock, flags);
3643			break;
3644		}
3645		page = list_entry(list->prev, struct page, lru);
3646		if (busy == page) {
3647			list_move(&page->lru, list);
3648			busy = NULL;
3649			spin_unlock_irqrestore(&zone->lru_lock, flags);
3650			continue;
3651		}
3652		spin_unlock_irqrestore(&zone->lru_lock, flags);
3653
3654		pc = lookup_page_cgroup(page);
 
 
 
 
 
3655
3656		ret = mem_cgroup_move_parent(page, pc, memcg, GFP_KERNEL);
3657		if (ret == -ENOMEM || ret == -EINTR)
3658			break;
 
 
 
 
 
 
3659
3660		if (ret == -EBUSY || ret == -EINVAL) {
3661			/* found lock contention or "pc" is obsolete. */
3662			busy = page;
3663			cond_resched();
3664		} else
3665			busy = NULL;
3666	}
3667
3668	if (!ret && !list_empty(list))
3669		return -EBUSY;
3670	return ret;
3671}
 
3672
3673/*
3674 * make mem_cgroup's charge to be 0 if there is no task.
3675 * This enables deleting this mem_cgroup.
3676 */
3677static int mem_cgroup_force_empty(struct mem_cgroup *memcg, bool free_all)
3678{
3679	int ret;
3680	int node, zid, shrink;
3681	int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
3682	struct cgroup *cgrp = memcg->css.cgroup;
3683
3684	css_get(&memcg->css);
 
 
3685
3686	shrink = 0;
3687	/* should free all ? */
3688	if (free_all)
3689		goto try_to_free;
3690move_account:
3691	do {
3692		ret = -EBUSY;
3693		if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children))
3694			goto out;
3695		ret = -EINTR;
3696		if (signal_pending(current))
3697			goto out;
3698		/* This is for making all *used* pages to be on LRU. */
3699		lru_add_drain_all();
3700		drain_all_stock_sync(memcg);
3701		ret = 0;
3702		mem_cgroup_start_move(memcg);
3703		for_each_node_state(node, N_HIGH_MEMORY) {
3704			for (zid = 0; !ret && zid < MAX_NR_ZONES; zid++) {
3705				enum lru_list lru;
3706				for_each_lru(lru) {
3707					ret = mem_cgroup_force_empty_list(memcg,
3708							node, zid, lru);
3709					if (ret)
3710						break;
3711				}
3712			}
3713			if (ret)
3714				break;
3715		}
3716		mem_cgroup_end_move(memcg);
3717		memcg_oom_recover(memcg);
3718		/* it seems parent cgroup doesn't have enough mem */
3719		if (ret == -ENOMEM)
3720			goto try_to_free;
3721		cond_resched();
3722	/* "ret" should also be checked to ensure all lists are empty. */
3723	} while (res_counter_read_u64(&memcg->res, RES_USAGE) > 0 || ret);
3724out:
3725	css_put(&memcg->css);
3726	return ret;
3727
3728try_to_free:
3729	/* returns EBUSY if there is a task or if we come here twice. */
3730	if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children) || shrink) {
3731		ret = -EBUSY;
3732		goto out;
3733	}
3734	/* we call try-to-free pages for make this cgroup empty */
3735	lru_add_drain_all();
3736	/* try to free all pages in this cgroup */
3737	shrink = 1;
3738	while (nr_retries && res_counter_read_u64(&memcg->res, RES_USAGE) > 0) {
3739		int progress;
3740
3741		if (signal_pending(current)) {
3742			ret = -EINTR;
3743			goto out;
3744		}
3745		progress = try_to_free_mem_cgroup_pages(memcg, GFP_KERNEL,
3746						false);
3747		if (!progress) {
3748			nr_retries--;
3749			/* maybe some writeback is necessary */
3750			congestion_wait(BLK_RW_ASYNC, HZ/10);
3751		}
3752
3753	}
3754	lru_add_drain();
3755	/* try move_account...there may be some *locked* pages. */
3756	goto move_account;
 
 
3757}
3758
3759static int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event)
3760{
3761	return mem_cgroup_force_empty(mem_cgroup_from_cont(cont), true);
3762}
3763
 
 
3764
3765static u64 mem_cgroup_hierarchy_read(struct cgroup *cont, struct cftype *cft)
3766{
3767	return mem_cgroup_from_cont(cont)->use_hierarchy;
3768}
3769
3770static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft,
3771					u64 val)
3772{
3773	int retval = 0;
3774	struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
3775	struct cgroup *parent = cont->parent;
3776	struct mem_cgroup *parent_memcg = NULL;
3777
3778	if (parent)
3779		parent_memcg = mem_cgroup_from_cont(parent);
3780
3781	cgroup_lock();
3782	/*
3783	 * If parent's use_hierarchy is set, we can't make any modifications
3784	 * in the child subtrees. If it is unset, then the change can
3785	 * occur, provided the current cgroup has no children.
3786	 *
3787	 * For the root cgroup, parent_mem is NULL, we allow value to be
3788	 * set if there are no children.
3789	 */
3790	if ((!parent_memcg || !parent_memcg->use_hierarchy) &&
3791				(val == 1 || val == 0)) {
3792		if (list_empty(&cont->children))
3793			memcg->use_hierarchy = val;
3794		else
3795			retval = -EBUSY;
3796	} else
3797		retval = -EINVAL;
3798	cgroup_unlock();
3799
3800	return retval;
3801}
3802
 
 
 
 
 
 
3803
3804static unsigned long mem_cgroup_recursive_stat(struct mem_cgroup *memcg,
3805					       enum mem_cgroup_stat_index idx)
3806{
3807	struct mem_cgroup *iter;
3808	long val = 0;
 
 
 
3809
3810	/* Per-cpu values can be negative, use a signed accumulator */
3811	for_each_mem_cgroup_tree(iter, memcg)
3812		val += mem_cgroup_read_stat(iter, idx);
3813
3814	if (val < 0) /* race ? */
3815		val = 0;
3816	return val;
3817}
 
 
 
 
 
 
 
 
 
 
 
3818
3819static inline u64 mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
3820{
3821	u64 val;
3822
3823	if (!mem_cgroup_is_root(memcg)) {
3824		if (!swap)
3825			return res_counter_read_u64(&memcg->res, RES_USAGE);
3826		else
3827			return res_counter_read_u64(&memcg->memsw, RES_USAGE);
 
3828	}
3829
3830	val = mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_CACHE);
3831	val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_RSS);
 
3832
3833	if (swap)
3834		val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_SWAPOUT);
3835
3836	return val << PAGE_SHIFT;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3837}
3838
3839static ssize_t mem_cgroup_read(struct cgroup *cont, struct cftype *cft,
3840			       struct file *file, char __user *buf,
3841			       size_t nbytes, loff_t *ppos)
3842{
3843	struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
3844	char str[64];
3845	u64 val;
3846	int type, name, len;
3847
3848	type = MEMFILE_TYPE(cft->private);
3849	name = MEMFILE_ATTR(cft->private);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3850
3851	if (!do_swap_account && type == _MEMSWAP)
3852		return -EOPNOTSUPP;
3853
3854	switch (type) {
3855	case _MEM:
3856		if (name == RES_USAGE)
3857			val = mem_cgroup_usage(memcg, false);
3858		else
3859			val = res_counter_read_u64(&memcg->res, name);
3860		break;
3861	case _MEMSWAP:
3862		if (name == RES_USAGE)
3863			val = mem_cgroup_usage(memcg, true);
3864		else
3865			val = res_counter_read_u64(&memcg->memsw, name);
3866		break;
3867	default:
3868		BUG();
3869	}
3870
3871	len = scnprintf(str, sizeof(str), "%llu\n", (unsigned long long)val);
3872	return simple_read_from_buffer(buf, nbytes, ppos, str, len);
3873}
3874/*
3875 * The user of this function is...
3876 * RES_LIMIT.
3877 */
3878static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
3879			    const char *buffer)
3880{
3881	struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
3882	int type, name;
3883	unsigned long long val;
3884	int ret;
3885
3886	type = MEMFILE_TYPE(cft->private);
3887	name = MEMFILE_ATTR(cft->private);
3888
3889	if (!do_swap_account && type == _MEMSWAP)
3890		return -EOPNOTSUPP;
 
 
 
 
 
3891
3892	switch (name) {
3893	case RES_LIMIT:
3894		if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
3895			ret = -EINVAL;
3896			break;
3897		}
3898		/* This function does all necessary parse...reuse it */
3899		ret = res_counter_memparse_write_strategy(buffer, &val);
3900		if (ret)
3901			break;
3902		if (type == _MEM)
3903			ret = mem_cgroup_resize_limit(memcg, val);
3904		else
3905			ret = mem_cgroup_resize_memsw_limit(memcg, val);
3906		break;
3907	case RES_SOFT_LIMIT:
3908		ret = res_counter_memparse_write_strategy(buffer, &val);
3909		if (ret)
3910			break;
3911		/*
3912		 * For memsw, soft limits are hard to implement in terms
3913		 * of semantics, for now, we support soft limits for
3914		 * control without swap
3915		 */
3916		if (type == _MEM)
3917			ret = res_counter_set_soft_limit(&memcg->res, val);
3918		else
3919			ret = -EINVAL;
3920		break;
3921	default:
3922		ret = -EINVAL; /* should be BUG() ? */
3923		break;
3924	}
3925	return ret;
3926}
3927
3928static void memcg_get_hierarchical_limit(struct mem_cgroup *memcg,
3929		unsigned long long *mem_limit, unsigned long long *memsw_limit)
3930{
3931	struct cgroup *cgroup;
3932	unsigned long long min_limit, min_memsw_limit, tmp;
3933
3934	min_limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
3935	min_memsw_limit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
3936	cgroup = memcg->css.cgroup;
3937	if (!memcg->use_hierarchy)
3938		goto out;
 
 
 
 
 
 
3939
3940	while (cgroup->parent) {
3941		cgroup = cgroup->parent;
3942		memcg = mem_cgroup_from_cont(cgroup);
3943		if (!memcg->use_hierarchy)
3944			break;
3945		tmp = res_counter_read_u64(&memcg->res, RES_LIMIT);
3946		min_limit = min(min_limit, tmp);
3947		tmp = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
3948		min_memsw_limit = min(min_memsw_limit, tmp);
3949	}
3950out:
3951	*mem_limit = min_limit;
3952	*memsw_limit = min_memsw_limit;
3953}
3954
3955static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
3956{
3957	struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
3958	int type, name;
3959
3960	type = MEMFILE_TYPE(event);
3961	name = MEMFILE_ATTR(event);
3962
3963	if (!do_swap_account && type == _MEMSWAP)
3964		return -EOPNOTSUPP;
3965
3966	switch (name) {
3967	case RES_MAX_USAGE:
3968		if (type == _MEM)
3969			res_counter_reset_max(&memcg->res);
3970		else
3971			res_counter_reset_max(&memcg->memsw);
3972		break;
3973	case RES_FAILCNT:
3974		if (type == _MEM)
3975			res_counter_reset_failcnt(&memcg->res);
3976		else
3977			res_counter_reset_failcnt(&memcg->memsw);
3978		break;
3979	}
3980
3981	return 0;
3982}
 
 
3983
3984static u64 mem_cgroup_move_charge_read(struct cgroup *cgrp,
3985					struct cftype *cft)
3986{
3987	return mem_cgroup_from_cont(cgrp)->move_charge_at_immigrate;
3988}
3989
3990#ifdef CONFIG_MMU
3991static int mem_cgroup_move_charge_write(struct cgroup *cgrp,
3992					struct cftype *cft, u64 val)
3993{
3994	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
3995
3996	if (val >= (1 << NR_MOVE_TYPE))
3997		return -EINVAL;
3998	/*
3999	 * We check this value several times in both in can_attach() and
4000	 * attach(), so we need cgroup lock to prevent this value from being
4001	 * inconsistent.
4002	 */
4003	cgroup_lock();
4004	memcg->move_charge_at_immigrate = val;
4005	cgroup_unlock();
4006
4007	return 0;
 
4008}
4009#else
4010static int mem_cgroup_move_charge_write(struct cgroup *cgrp,
4011					struct cftype *cft, u64 val)
4012{
4013	return -ENOSYS;
4014}
 
 
 
 
4015#endif
 
 
4016
4017#ifdef CONFIG_NUMA
4018static int mem_control_numa_stat_show(struct cgroup *cont, struct cftype *cft,
4019				      struct seq_file *m)
4020{
4021	int nid;
4022	unsigned long total_nr, file_nr, anon_nr, unevictable_nr;
4023	unsigned long node_nr;
4024	struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
4025
4026	total_nr = mem_cgroup_nr_lru_pages(memcg, LRU_ALL);
4027	seq_printf(m, "total=%lu", total_nr);
4028	for_each_node_state(nid, N_HIGH_MEMORY) {
4029		node_nr = mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL);
4030		seq_printf(m, " N%d=%lu", nid, node_nr);
4031	}
4032	seq_putc(m, '\n');
4033
4034	file_nr = mem_cgroup_nr_lru_pages(memcg, LRU_ALL_FILE);
4035	seq_printf(m, "file=%lu", file_nr);
4036	for_each_node_state(nid, N_HIGH_MEMORY) {
4037		node_nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
4038				LRU_ALL_FILE);
4039		seq_printf(m, " N%d=%lu", nid, node_nr);
4040	}
4041	seq_putc(m, '\n');
4042
4043	anon_nr = mem_cgroup_nr_lru_pages(memcg, LRU_ALL_ANON);
4044	seq_printf(m, "anon=%lu", anon_nr);
4045	for_each_node_state(nid, N_HIGH_MEMORY) {
4046		node_nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
4047				LRU_ALL_ANON);
4048		seq_printf(m, " N%d=%lu", nid, node_nr);
4049	}
4050	seq_putc(m, '\n');
4051
4052	unevictable_nr = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_UNEVICTABLE));
4053	seq_printf(m, "unevictable=%lu", unevictable_nr);
4054	for_each_node_state(nid, N_HIGH_MEMORY) {
4055		node_nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
4056				BIT(LRU_UNEVICTABLE));
4057		seq_printf(m, " N%d=%lu", nid, node_nr);
4058	}
4059	seq_putc(m, '\n');
4060	return 0;
4061}
4062#endif /* CONFIG_NUMA */
4063
4064static const char * const mem_cgroup_lru_names[] = {
4065	"inactive_anon",
4066	"active_anon",
4067	"inactive_file",
4068	"active_file",
4069	"unevictable",
4070};
4071
4072static inline void mem_cgroup_lru_names_not_uptodate(void)
4073{
4074	BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS);
 
 
4075}
4076
4077static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
4078				 struct seq_file *m)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4079{
4080	struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
4081	struct mem_cgroup *mi;
4082	unsigned int i;
4083
4084	for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
4085		if (i == MEM_CGROUP_STAT_SWAPOUT && !do_swap_account)
4086			continue;
4087		seq_printf(m, "%s %ld\n", mem_cgroup_stat_names[i],
4088			   mem_cgroup_read_stat(memcg, i) * PAGE_SIZE);
4089	}
 
 
 
4090
4091	for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++)
4092		seq_printf(m, "%s %lu\n", mem_cgroup_events_names[i],
4093			   mem_cgroup_read_events(memcg, i));
 
 
 
 
 
4094
4095	for (i = 0; i < NR_LRU_LISTS; i++)
4096		seq_printf(m, "%s %lu\n", mem_cgroup_lru_names[i],
4097			   mem_cgroup_nr_lru_pages(memcg, BIT(i)) * PAGE_SIZE);
4098
4099	/* Hierarchical information */
4100	{
4101		unsigned long long limit, memsw_limit;
4102		memcg_get_hierarchical_limit(memcg, &limit, &memsw_limit);
4103		seq_printf(m, "hierarchical_memory_limit %llu\n", limit);
4104		if (do_swap_account)
4105			seq_printf(m, "hierarchical_memsw_limit %llu\n",
4106				   memsw_limit);
4107	}
 
4108
4109	for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
4110		long long val = 0;
 
 
 
 
 
4111
4112		if (i == MEM_CGROUP_STAT_SWAPOUT && !do_swap_account)
4113			continue;
4114		for_each_mem_cgroup_tree(mi, memcg)
4115			val += mem_cgroup_read_stat(mi, i) * PAGE_SIZE;
4116		seq_printf(m, "total_%s %lld\n", mem_cgroup_stat_names[i], val);
4117	}
4118
4119	for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
4120		unsigned long long val = 0;
 
 
 
 
 
 
 
 
4121
4122		for_each_mem_cgroup_tree(mi, memcg)
4123			val += mem_cgroup_read_events(mi, i);
4124		seq_printf(m, "total_%s %llu\n",
4125			   mem_cgroup_events_names[i], val);
4126	}
 
 
 
 
 
4127
4128	for (i = 0; i < NR_LRU_LISTS; i++) {
4129		unsigned long long val = 0;
 
 
 
4130
4131		for_each_mem_cgroup_tree(mi, memcg)
4132			val += mem_cgroup_nr_lru_pages(mi, BIT(i)) * PAGE_SIZE;
4133		seq_printf(m, "total_%s %llu\n", mem_cgroup_lru_names[i], val);
4134	}
 
 
 
 
 
 
 
 
 
 
 
4135
4136#ifdef CONFIG_DEBUG_VM
4137	{
4138		int nid, zid;
4139		struct mem_cgroup_per_zone *mz;
4140		struct zone_reclaim_stat *rstat;
4141		unsigned long recent_rotated[2] = {0, 0};
4142		unsigned long recent_scanned[2] = {0, 0};
4143
4144		for_each_online_node(nid)
4145			for (zid = 0; zid < MAX_NR_ZONES; zid++) {
4146				mz = mem_cgroup_zoneinfo(memcg, nid, zid);
4147				rstat = &mz->lruvec.reclaim_stat;
4148
4149				recent_rotated[0] += rstat->recent_rotated[0];
4150				recent_rotated[1] += rstat->recent_rotated[1];
4151				recent_scanned[0] += rstat->recent_scanned[0];
4152				recent_scanned[1] += rstat->recent_scanned[1];
4153			}
4154		seq_printf(m, "recent_rotated_anon %lu\n", recent_rotated[0]);
4155		seq_printf(m, "recent_rotated_file %lu\n", recent_rotated[1]);
4156		seq_printf(m, "recent_scanned_anon %lu\n", recent_scanned[0]);
4157		seq_printf(m, "recent_scanned_file %lu\n", recent_scanned[1]);
4158	}
4159#endif
 
 
 
 
4160
4161	return 0;
 
 
 
 
 
 
 
 
4162}
4163
4164static u64 mem_cgroup_swappiness_read(struct cgroup *cgrp, struct cftype *cft)
4165{
4166	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
 
 
 
 
4167
4168	return mem_cgroup_swappiness(memcg);
 
 
 
 
 
 
4169}
4170
4171static int mem_cgroup_swappiness_write(struct cgroup *cgrp, struct cftype *cft,
4172				       u64 val)
4173{
4174	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
4175	struct mem_cgroup *parent;
4176
4177	if (val > 100)
4178		return -EINVAL;
 
4179
4180	if (cgrp->parent == NULL)
4181		return -EINVAL;
4182
4183	parent = mem_cgroup_from_cont(cgrp->parent);
 
 
 
 
 
 
 
4184
4185	cgroup_lock();
 
 
 
4186
4187	/* If under hierarchy, only empty-root can set this value */
4188	if ((parent->use_hierarchy) ||
4189	    (memcg->use_hierarchy && !list_empty(&cgrp->children))) {
4190		cgroup_unlock();
4191		return -EINVAL;
4192	}
 
4193
4194	memcg->swappiness = val;
 
 
 
 
4195
4196	cgroup_unlock();
 
 
 
 
 
4197
4198	return 0;
4199}
4200
4201static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
 
4202{
4203	struct mem_cgroup_threshold_ary *t;
4204	u64 usage;
4205	int i;
4206
4207	rcu_read_lock();
4208	if (!swap)
4209		t = rcu_dereference(memcg->thresholds.primary);
4210	else
4211		t = rcu_dereference(memcg->memsw_thresholds.primary);
4212
4213	if (!t)
4214		goto unlock;
4215
4216	usage = mem_cgroup_usage(memcg, swap);
 
 
 
4217
4218	/*
4219	 * current_threshold points to threshold just below or equal to usage.
4220	 * If it's not true, a threshold was crossed after last
4221	 * call of __mem_cgroup_threshold().
4222	 */
4223	i = t->current_threshold;
4224
4225	/*
4226	 * Iterate backward over array of thresholds starting from
4227	 * current_threshold and check if a threshold is crossed.
4228	 * If none of thresholds below usage is crossed, we read
4229	 * only one element of the array here.
4230	 */
4231	for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
4232		eventfd_signal(t->entries[i].eventfd, 1);
4233
4234	/* i = current_threshold + 1 */
4235	i++;
 
4236
4237	/*
4238	 * Iterate forward over array of thresholds starting from
4239	 * current_threshold+1 and check if a threshold is crossed.
4240	 * If none of thresholds above usage is crossed, we read
4241	 * only one element of the array here.
4242	 */
4243	for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
4244		eventfd_signal(t->entries[i].eventfd, 1);
4245
4246	/* Update current_threshold */
4247	t->current_threshold = i - 1;
4248unlock:
4249	rcu_read_unlock();
 
 
4250}
4251
4252static void mem_cgroup_threshold(struct mem_cgroup *memcg)
4253{
4254	while (memcg) {
4255		__mem_cgroup_threshold(memcg, false);
4256		if (do_swap_account)
4257			__mem_cgroup_threshold(memcg, true);
4258
4259		memcg = parent_mem_cgroup(memcg);
 
 
4260	}
 
 
 
4261}
4262
4263static int compare_thresholds(const void *a, const void *b)
 
 
4264{
4265	const struct mem_cgroup_threshold *_a = a;
4266	const struct mem_cgroup_threshold *_b = b;
 
 
 
 
 
 
 
 
 
 
 
4267
4268	return _a->threshold - _b->threshold;
 
 
 
 
 
 
 
4269}
4270
4271static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
 
4272{
4273	struct mem_cgroup_eventfd_list *ev;
4274
4275	list_for_each_entry(ev, &memcg->oom_notify, list)
4276		eventfd_signal(ev->eventfd, 1);
4277	return 0;
4278}
4279
4280static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
4281{
4282	struct mem_cgroup *iter;
4283
4284	for_each_mem_cgroup_tree(iter, memcg)
4285		mem_cgroup_oom_notify_cb(iter);
 
 
4286}
4287
4288static int mem_cgroup_usage_register_event(struct cgroup *cgrp,
4289	struct cftype *cft, struct eventfd_ctx *eventfd, const char *args)
4290{
4291	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
4292	struct mem_cgroup_thresholds *thresholds;
4293	struct mem_cgroup_threshold_ary *new;
4294	int type = MEMFILE_TYPE(cft->private);
4295	u64 threshold, usage;
4296	int i, size, ret;
4297
4298	ret = res_counter_memparse_write_strategy(args, &threshold);
4299	if (ret)
4300		return ret;
 
4301
4302	mutex_lock(&memcg->thresholds_lock);
4303
4304	if (type == _MEM)
4305		thresholds = &memcg->thresholds;
4306	else if (type == _MEMSWAP)
4307		thresholds = &memcg->memsw_thresholds;
4308	else
4309		BUG();
4310
4311	usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
 
 
 
 
4312
4313	/* Check if a threshold crossed before adding a new one */
4314	if (thresholds->primary)
4315		__mem_cgroup_threshold(memcg, type == _MEMSWAP);
 
 
 
4316
4317	size = thresholds->primary ? thresholds->primary->size + 1 : 1;
 
 
 
4318
4319	/* Allocate memory for new array of thresholds */
4320	new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold),
4321			GFP_KERNEL);
4322	if (!new) {
4323		ret = -ENOMEM;
4324		goto unlock;
4325	}
4326	new->size = size;
4327
4328	/* Copy thresholds (if any) to new array */
4329	if (thresholds->primary) {
4330		memcpy(new->entries, thresholds->primary->entries, (size - 1) *
4331				sizeof(struct mem_cgroup_threshold));
4332	}
4333
4334	/* Add new threshold */
4335	new->entries[size - 1].eventfd = eventfd;
4336	new->entries[size - 1].threshold = threshold;
 
 
4337
4338	/* Sort thresholds. Registering of new threshold isn't time-critical */
4339	sort(new->entries, size, sizeof(struct mem_cgroup_threshold),
4340			compare_thresholds, NULL);
 
 
 
 
 
4341
4342	/* Find current threshold */
4343	new->current_threshold = -1;
4344	for (i = 0; i < size; i++) {
4345		if (new->entries[i].threshold <= usage) {
4346			/*
4347			 * new->current_threshold will not be used until
4348			 * rcu_assign_pointer(), so it's safe to increment
4349			 * it here.
4350			 */
4351			++new->current_threshold;
4352		} else
 
4353			break;
4354	}
4355
4356	/* Free old spare buffer and save old primary buffer as spare */
4357	kfree(thresholds->spare);
4358	thresholds->spare = thresholds->primary;
4359
4360	rcu_assign_pointer(thresholds->primary, new);
 
 
 
 
4361
4362	/* To be sure that nobody uses thresholds */
4363	synchronize_rcu();
4364
4365unlock:
4366	mutex_unlock(&memcg->thresholds_lock);
 
4367
4368	return ret;
 
4369}
4370
4371static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
4372	struct cftype *cft, struct eventfd_ctx *eventfd)
4373{
4374	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
4375	struct mem_cgroup_thresholds *thresholds;
4376	struct mem_cgroup_threshold_ary *new;
4377	int type = MEMFILE_TYPE(cft->private);
4378	u64 usage;
4379	int i, j, size;
4380
4381	mutex_lock(&memcg->thresholds_lock);
4382	if (type == _MEM)
4383		thresholds = &memcg->thresholds;
4384	else if (type == _MEMSWAP)
4385		thresholds = &memcg->memsw_thresholds;
4386	else
4387		BUG();
4388
4389	if (!thresholds->primary)
4390		goto unlock;
 
 
 
 
 
 
4391
4392	usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
 
 
 
4393
4394	/* Check if a threshold crossed before removing */
4395	__mem_cgroup_threshold(memcg, type == _MEMSWAP);
4396
4397	/* Calculate new number of threshold */
4398	size = 0;
4399	for (i = 0; i < thresholds->primary->size; i++) {
4400		if (thresholds->primary->entries[i].eventfd != eventfd)
4401			size++;
4402	}
4403
4404	new = thresholds->spare;
4405
4406	/* Set thresholds array to NULL if we don't have thresholds */
4407	if (!size) {
4408		kfree(new);
4409		new = NULL;
4410		goto swap_buffers;
4411	}
4412
4413	new->size = size;
 
4414
4415	/* Copy thresholds and find current threshold */
4416	new->current_threshold = -1;
4417	for (i = 0, j = 0; i < thresholds->primary->size; i++) {
4418		if (thresholds->primary->entries[i].eventfd == eventfd)
4419			continue;
 
4420
4421		new->entries[j] = thresholds->primary->entries[i];
4422		if (new->entries[j].threshold <= usage) {
4423			/*
4424			 * new->current_threshold will not be used
4425			 * until rcu_assign_pointer(), so it's safe to increment
4426			 * it here.
4427			 */
4428			++new->current_threshold;
4429		}
4430		j++;
4431	}
4432
4433swap_buffers:
4434	/* Swap primary and spare array */
4435	thresholds->spare = thresholds->primary;
4436	/* If all events are unregistered, free the spare array */
4437	if (!new) {
4438		kfree(thresholds->spare);
4439		thresholds->spare = NULL;
4440	}
4441
4442	rcu_assign_pointer(thresholds->primary, new);
 
 
4443
4444	/* To be sure that nobody uses thresholds */
4445	synchronize_rcu();
4446unlock:
4447	mutex_unlock(&memcg->thresholds_lock);
 
 
 
 
 
 
 
 
 
 
4448}
4449
4450static int mem_cgroup_oom_register_event(struct cgroup *cgrp,
4451	struct cftype *cft, struct eventfd_ctx *eventfd, const char *args)
4452{
4453	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
4454	struct mem_cgroup_eventfd_list *event;
4455	int type = MEMFILE_TYPE(cft->private);
4456
4457	BUG_ON(type != _OOM_TYPE);
4458	event = kmalloc(sizeof(*event),	GFP_KERNEL);
4459	if (!event)
4460		return -ENOMEM;
4461
4462	spin_lock(&memcg_oom_lock);
 
 
4463
4464	event->eventfd = eventfd;
4465	list_add(&event->list, &memcg->oom_notify);
 
4466
4467	/* already in OOM ? */
4468	if (atomic_read(&memcg->under_oom))
4469		eventfd_signal(eventfd, 1);
4470	spin_unlock(&memcg_oom_lock);
 
4471
 
 
 
 
 
 
4472	return 0;
4473}
4474
4475static void mem_cgroup_oom_unregister_event(struct cgroup *cgrp,
4476	struct cftype *cft, struct eventfd_ctx *eventfd)
 
 
 
 
 
 
 
4477{
4478	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
4479	struct mem_cgroup_eventfd_list *ev, *tmp;
4480	int type = MEMFILE_TYPE(cft->private);
 
4481
4482	BUG_ON(type != _OOM_TYPE);
 
4483
4484	spin_lock(&memcg_oom_lock);
 
4485
4486	list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
4487		if (ev->eventfd == eventfd) {
4488			list_del(&ev->list);
4489			kfree(ev);
 
 
 
 
 
4490		}
 
4491	}
4492
4493	spin_unlock(&memcg_oom_lock);
4494}
 
4495
4496static int mem_cgroup_oom_control_read(struct cgroup *cgrp,
4497	struct cftype *cft,  struct cgroup_map_cb *cb)
4498{
4499	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
4500
4501	cb->fill(cb, "oom_kill_disable", memcg->oom_kill_disable);
4502
4503	if (atomic_read(&memcg->under_oom))
4504		cb->fill(cb, "under_oom", 1);
4505	else
4506		cb->fill(cb, "under_oom", 0);
4507	return 0;
4508}
4509
4510static int mem_cgroup_oom_control_write(struct cgroup *cgrp,
4511	struct cftype *cft, u64 val)
4512{
4513	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
4514	struct mem_cgroup *parent;
4515
4516	/* cannot set to root cgroup and only 0 and 1 are allowed */
4517	if (!cgrp->parent || !((val == 0) || (val == 1)))
4518		return -EINVAL;
4519
4520	parent = mem_cgroup_from_cont(cgrp->parent);
 
 
4521
4522	cgroup_lock();
4523	/* oom-kill-disable is a flag for subhierarchy. */
4524	if ((parent->use_hierarchy) ||
4525	    (memcg->use_hierarchy && !list_empty(&cgrp->children))) {
4526		cgroup_unlock();
4527		return -EINVAL;
4528	}
4529	memcg->oom_kill_disable = val;
4530	if (!val)
4531		memcg_oom_recover(memcg);
4532	cgroup_unlock();
4533	return 0;
4534}
4535
4536#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
4537static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
4538{
4539	return mem_cgroup_sockets_init(memcg, ss);
4540};
4541
4542static void kmem_cgroup_destroy(struct mem_cgroup *memcg)
4543{
4544	mem_cgroup_sockets_destroy(memcg);
4545}
4546#else
4547static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
4548{
4549	return 0;
4550}
4551
4552static void kmem_cgroup_destroy(struct mem_cgroup *memcg)
 
4553{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4554}
4555#endif
4556
4557static struct cftype mem_cgroup_files[] = {
4558	{
4559		.name = "usage_in_bytes",
4560		.private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
4561		.read = mem_cgroup_read,
4562		.register_event = mem_cgroup_usage_register_event,
4563		.unregister_event = mem_cgroup_usage_unregister_event,
4564	},
4565	{
4566		.name = "max_usage_in_bytes",
4567		.private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
4568		.trigger = mem_cgroup_reset,
4569		.read = mem_cgroup_read,
4570	},
4571	{
4572		.name = "limit_in_bytes",
4573		.private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
4574		.write_string = mem_cgroup_write,
4575		.read = mem_cgroup_read,
 
 
4576	},
4577	{
4578		.name = "soft_limit_in_bytes",
4579		.private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
4580		.write_string = mem_cgroup_write,
4581		.read = mem_cgroup_read,
4582	},
4583	{
4584		.name = "failcnt",
4585		.private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
4586		.trigger = mem_cgroup_reset,
4587		.read = mem_cgroup_read,
4588	},
4589	{
4590		.name = "stat",
4591		.read_seq_string = mem_control_stat_show,
4592	},
4593	{
4594		.name = "force_empty",
4595		.trigger = mem_cgroup_force_empty_write,
4596	},
4597	{
4598		.name = "use_hierarchy",
4599		.write_u64 = mem_cgroup_hierarchy_write,
4600		.read_u64 = mem_cgroup_hierarchy_read,
 
4601	},
4602	{
4603		.name = "swappiness",
4604		.read_u64 = mem_cgroup_swappiness_read,
4605		.write_u64 = mem_cgroup_swappiness_write,
 
4606	},
4607	{
4608		.name = "move_charge_at_immigrate",
4609		.read_u64 = mem_cgroup_move_charge_read,
4610		.write_u64 = mem_cgroup_move_charge_write,
 
4611	},
4612	{
4613		.name = "oom_control",
4614		.read_map = mem_cgroup_oom_control_read,
4615		.write_u64 = mem_cgroup_oom_control_write,
4616		.register_event = mem_cgroup_oom_register_event,
4617		.unregister_event = mem_cgroup_oom_unregister_event,
4618		.private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
4619	},
4620#ifdef CONFIG_NUMA
4621	{
4622		.name = "numa_stat",
4623		.read_seq_string = mem_control_numa_stat_show,
4624	},
4625#endif
4626#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
4627	{
4628		.name = "memsw.usage_in_bytes",
4629		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
4630		.read = mem_cgroup_read,
4631		.register_event = mem_cgroup_usage_register_event,
4632		.unregister_event = mem_cgroup_usage_unregister_event,
4633	},
4634	{
4635		.name = "memsw.max_usage_in_bytes",
4636		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
4637		.trigger = mem_cgroup_reset,
4638		.read = mem_cgroup_read,
4639	},
4640	{
4641		.name = "memsw.limit_in_bytes",
4642		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
4643		.write_string = mem_cgroup_write,
4644		.read = mem_cgroup_read,
4645	},
4646	{
4647		.name = "memsw.failcnt",
4648		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
4649		.trigger = mem_cgroup_reset,
4650		.read = mem_cgroup_read,
4651	},
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4652#endif
4653	{ },	/* terminate */
4654};
4655
4656static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
 
 
 
 
 
 
 
 
 
4657{
4658	struct mem_cgroup_per_node *pn;
4659	struct mem_cgroup_per_zone *mz;
4660	int zone, tmp = node;
4661	/*
4662	 * This routine is called against possible nodes.
4663	 * But it's BUG to call kmalloc() against offline node.
4664	 *
4665	 * TODO: this routine can waste much memory for nodes which will
4666	 *       never be onlined. It's better to use memory hotplug callback
4667	 *       function.
4668	 */
4669	if (!node_state(node, N_NORMAL_MEMORY))
4670		tmp = -1;
4671	pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
4672	if (!pn)
4673		return 1;
4674
4675	for (zone = 0; zone < MAX_NR_ZONES; zone++) {
4676		mz = &pn->zoneinfo[zone];
4677		lruvec_init(&mz->lruvec, &NODE_DATA(node)->node_zones[zone]);
4678		mz->usage_in_excess = 0;
4679		mz->on_tree = false;
4680		mz->memcg = memcg;
4681	}
4682	memcg->info.nodeinfo[node] = pn;
4683	return 0;
4684}
4685
4686static void free_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
 
4687{
4688	kfree(memcg->info.nodeinfo[node]);
 
 
 
 
 
 
 
 
4689}
4690
4691static struct mem_cgroup *mem_cgroup_alloc(void)
4692{
4693	struct mem_cgroup *memcg;
4694	int size = sizeof(struct mem_cgroup);
4695
4696	/* Can be very big if MAX_NUMNODES is very big */
4697	if (size < PAGE_SIZE)
4698		memcg = kzalloc(size, GFP_KERNEL);
4699	else
4700		memcg = vzalloc(size);
4701
4702	if (!memcg)
4703		return NULL;
4704
4705	memcg->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
4706	if (!memcg->stat)
4707		goto out_free;
4708	spin_lock_init(&memcg->pcp_counter_lock);
4709	return memcg;
4710
4711out_free:
4712	if (size < PAGE_SIZE)
4713		kfree(memcg);
4714	else
4715		vfree(memcg);
4716	return NULL;
4717}
4718
4719/*
4720 * Helpers for freeing a kmalloc()ed/vzalloc()ed mem_cgroup by RCU,
4721 * but in process context.  The work_freeing structure is overlaid
4722 * on the rcu_freeing structure, which itself is overlaid on memsw.
 
 
 
 
 
 
 
 
 
 
 
 
4723 */
4724static void free_work(struct work_struct *work)
 
4725{
4726	struct mem_cgroup *memcg;
4727	int size = sizeof(struct mem_cgroup);
4728
4729	memcg = container_of(work, struct mem_cgroup, work_freeing);
4730	/*
4731	 * We need to make sure that (at least for now), the jump label
4732	 * destruction code runs outside of the cgroup lock. This is because
4733	 * get_online_cpus(), which is called from the static_branch update,
4734	 * can't be called inside the cgroup_lock. cpusets are the ones
4735	 * enforcing this dependency, so if they ever change, we might as well.
4736	 *
4737	 * schedule_work() will guarantee this happens. Be careful if you need
4738	 * to move this code around, and make sure it is outside
4739	 * the cgroup_lock.
4740	 */
4741	disarm_sock_keys(memcg);
4742	if (size < PAGE_SIZE)
4743		kfree(memcg);
4744	else
4745		vfree(memcg);
4746}
4747
4748static void free_rcu(struct rcu_head *rcu_head)
 
 
 
 
 
 
 
 
 
 
 
 
 
4749{
4750	struct mem_cgroup *memcg;
 
 
 
 
 
4751
4752	memcg = container_of(rcu_head, struct mem_cgroup, rcu_freeing);
4753	INIT_WORK(&memcg->work_freeing, free_work);
4754	schedule_work(&memcg->work_freeing);
 
 
 
 
 
 
 
 
4755}
4756
4757/*
4758 * At destroying mem_cgroup, references from swap_cgroup can remain.
4759 * (scanning all at force_empty is too costly...)
 
4760 *
4761 * Instead of clearing all references at force_empty, we remember
4762 * the number of reference from swap_cgroup and free mem_cgroup when
4763 * it goes down to 0.
4764 *
4765 * Removal of cgroup itself succeeds regardless of refs from swap.
 
4766 */
4767
4768static void __mem_cgroup_free(struct mem_cgroup *memcg)
4769{
4770	int node;
4771
4772	mem_cgroup_remove_from_trees(memcg);
4773	free_css_id(&mem_cgroup_subsys, &memcg->css);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4774
4775	for_each_node(node)
4776		free_mem_cgroup_per_zone_info(memcg, node);
 
 
 
 
 
4777
4778	free_percpu(memcg->stat);
4779	call_rcu(&memcg->rcu_freeing, free_rcu);
 
4780}
4781
4782static void mem_cgroup_get(struct mem_cgroup *memcg)
4783{
4784	atomic_inc(&memcg->refcnt);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4785}
4786
4787static void __mem_cgroup_put(struct mem_cgroup *memcg, int count)
4788{
4789	if (atomic_sub_and_test(count, &memcg->refcnt)) {
4790		struct mem_cgroup *parent = parent_mem_cgroup(memcg);
4791		__mem_cgroup_free(memcg);
4792		if (parent)
4793			mem_cgroup_put(parent);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4794	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4795}
4796
4797static void mem_cgroup_put(struct mem_cgroup *memcg)
4798{
4799	__mem_cgroup_put(memcg, 1);
 
 
 
 
 
 
 
 
4800}
4801
4802/*
4803 * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled.
4804 */
4805struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
4806{
4807	if (!memcg->res.parent)
4808		return NULL;
4809	return mem_cgroup_from_res_counter(memcg->res.parent, res);
 
 
 
 
 
4810}
4811EXPORT_SYMBOL(parent_mem_cgroup);
4812
4813#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
4814static void __init enable_swap_cgroup(void)
 
 
 
 
 
 
 
 
 
4815{
4816	if (!mem_cgroup_disabled() && really_do_swap_account)
4817		do_swap_account = 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4818}
4819#else
4820static void __init enable_swap_cgroup(void)
 
 
 
 
 
 
 
 
 
 
 
4821{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4822}
4823#endif
4824
4825static int mem_cgroup_soft_limit_tree_init(void)
 
 
 
4826{
4827	struct mem_cgroup_tree_per_node *rtpn;
4828	struct mem_cgroup_tree_per_zone *rtpz;
4829	int tmp, node, zone;
4830
4831	for_each_node(node) {
4832		tmp = node;
4833		if (!node_state(node, N_NORMAL_MEMORY))
4834			tmp = -1;
4835		rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, tmp);
4836		if (!rtpn)
4837			goto err_cleanup;
4838
4839		soft_limit_tree.rb_tree_per_node[node] = rtpn;
4840
4841		for (zone = 0; zone < MAX_NR_ZONES; zone++) {
4842			rtpz = &rtpn->rb_tree_per_zone[zone];
4843			rtpz->rb_root = RB_ROOT;
4844			spin_lock_init(&rtpz->lock);
4845		}
4846	}
4847	return 0;
4848
4849err_cleanup:
4850	for_each_node(node) {
4851		if (!soft_limit_tree.rb_tree_per_node[node])
4852			break;
4853		kfree(soft_limit_tree.rb_tree_per_node[node]);
4854		soft_limit_tree.rb_tree_per_node[node] = NULL;
4855	}
4856	return 1;
4857
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4858}
4859
4860static struct cgroup_subsys_state * __ref
4861mem_cgroup_create(struct cgroup *cont)
4862{
4863	struct mem_cgroup *memcg, *parent;
4864	long error = -ENOMEM;
4865	int node;
4866
4867	memcg = mem_cgroup_alloc();
4868	if (!memcg)
4869		return ERR_PTR(error);
4870
4871	for_each_node(node)
4872		if (alloc_mem_cgroup_per_zone_info(memcg, node))
4873			goto free_out;
 
 
 
 
 
 
 
 
 
 
 
4874
4875	/* root ? */
4876	if (cont->parent == NULL) {
4877		int cpu;
4878		enable_swap_cgroup();
4879		parent = NULL;
4880		if (mem_cgroup_soft_limit_tree_init())
4881			goto free_out;
4882		root_mem_cgroup = memcg;
4883		for_each_possible_cpu(cpu) {
4884			struct memcg_stock_pcp *stock =
4885						&per_cpu(memcg_stock, cpu);
4886			INIT_WORK(&stock->work, drain_local_stock);
4887		}
4888		hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
4889	} else {
4890		parent = mem_cgroup_from_cont(cont->parent);
4891		memcg->use_hierarchy = parent->use_hierarchy;
4892		memcg->oom_kill_disable = parent->oom_kill_disable;
4893	}
4894
4895	if (parent && parent->use_hierarchy) {
4896		res_counter_init(&memcg->res, &parent->res);
4897		res_counter_init(&memcg->memsw, &parent->memsw);
4898		/*
4899		 * We increment refcnt of the parent to ensure that we can
4900		 * safely access it on res_counter_charge/uncharge.
4901		 * This refcnt will be decremented when freeing this
4902		 * mem_cgroup(see mem_cgroup_put).
4903		 */
4904		mem_cgroup_get(parent);
4905	} else {
4906		res_counter_init(&memcg->res, NULL);
4907		res_counter_init(&memcg->memsw, NULL);
4908	}
4909	memcg->last_scanned_node = MAX_NUMNODES;
4910	INIT_LIST_HEAD(&memcg->oom_notify);
4911
4912	if (parent)
4913		memcg->swappiness = mem_cgroup_swappiness(parent);
4914	atomic_set(&memcg->refcnt, 1);
4915	memcg->move_charge_at_immigrate = 0;
4916	mutex_init(&memcg->thresholds_lock);
4917	spin_lock_init(&memcg->move_lock);
4918
4919	error = memcg_init_kmem(memcg, &mem_cgroup_subsys);
4920	if (error) {
4921		/*
4922		 * We call put now because our (and parent's) refcnts
4923		 * are already in place. mem_cgroup_put() will internally
4924		 * call __mem_cgroup_free, so return directly
4925		 */
4926		mem_cgroup_put(memcg);
4927		return ERR_PTR(error);
4928	}
4929	return &memcg->css;
4930free_out:
4931	__mem_cgroup_free(memcg);
4932	return ERR_PTR(error);
4933}
4934
4935static int mem_cgroup_pre_destroy(struct cgroup *cont)
4936{
4937	struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
4938
4939	return mem_cgroup_force_empty(memcg, false);
 
 
 
 
 
 
 
 
 
 
4940}
 
4941
4942static void mem_cgroup_destroy(struct cgroup *cont)
 
 
 
 
 
 
 
 
4943{
4944	struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
4945
4946	kmem_cgroup_destroy(memcg);
 
 
 
 
 
 
 
 
 
 
 
 
 
4947
4948	mem_cgroup_put(memcg);
4949}
 
4950
4951#ifdef CONFIG_MMU
4952/* Handlers for move charge at task migration. */
4953#define PRECHARGE_COUNT_AT_ONCE	256
4954static int mem_cgroup_do_precharge(unsigned long count)
4955{
4956	int ret = 0;
4957	int batch_count = PRECHARGE_COUNT_AT_ONCE;
4958	struct mem_cgroup *memcg = mc.to;
4959
4960	if (mem_cgroup_is_root(memcg)) {
4961		mc.precharge += count;
4962		/* we don't need css_get for root */
4963		return ret;
4964	}
4965	/* try to charge at once */
4966	if (count > 1) {
4967		struct res_counter *dummy;
4968		/*
4969		 * "memcg" cannot be under rmdir() because we've already checked
4970		 * by cgroup_lock_live_cgroup() that it is not removed and we
4971		 * are still under the same cgroup_mutex. So we can postpone
4972		 * css_get().
4973		 */
4974		if (res_counter_charge(&memcg->res, PAGE_SIZE * count, &dummy))
4975			goto one_by_one;
4976		if (do_swap_account && res_counter_charge(&memcg->memsw,
4977						PAGE_SIZE * count, &dummy)) {
4978			res_counter_uncharge(&memcg->res, PAGE_SIZE * count);
4979			goto one_by_one;
4980		}
4981		mc.precharge += count;
4982		return ret;
4983	}
4984one_by_one:
4985	/* fall back to one by one charge */
4986	while (count--) {
4987		if (signal_pending(current)) {
4988			ret = -EINTR;
4989			break;
4990		}
4991		if (!batch_count--) {
4992			batch_count = PRECHARGE_COUNT_AT_ONCE;
4993			cond_resched();
4994		}
4995		ret = __mem_cgroup_try_charge(NULL,
4996					GFP_KERNEL, 1, &memcg, false);
4997		if (ret)
4998			/* mem_cgroup_clear_mc() will do uncharge later */
4999			return ret;
5000		mc.precharge++;
5001	}
5002	return ret;
5003}
5004
5005/**
5006 * get_mctgt_type - get target type of moving charge
5007 * @vma: the vma the pte to be checked belongs
5008 * @addr: the address corresponding to the pte to be checked
5009 * @ptent: the pte to be checked
5010 * @target: the pointer the target page or swap ent will be stored(can be NULL)
5011 *
5012 * Returns
5013 *   0(MC_TARGET_NONE): if the pte is not a target for move charge.
5014 *   1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
5015 *     move charge. if @target is not NULL, the page is stored in target->page
5016 *     with extra refcnt got(Callers should handle it).
5017 *   2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
5018 *     target for charge migration. if @target is not NULL, the entry is stored
5019 *     in target->ent.
5020 *
5021 * Called with pte lock held.
5022 */
5023union mc_target {
5024	struct page	*page;
5025	swp_entry_t	ent;
5026};
5027
5028enum mc_target_type {
5029	MC_TARGET_NONE = 0,
5030	MC_TARGET_PAGE,
5031	MC_TARGET_SWAP,
5032};
5033
5034static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
5035						unsigned long addr, pte_t ptent)
5036{
5037	struct page *page = vm_normal_page(vma, addr, ptent);
5038
5039	if (!page || !page_mapped(page))
5040		return NULL;
5041	if (PageAnon(page)) {
5042		/* we don't move shared anon */
5043		if (!move_anon())
5044			return NULL;
5045	} else if (!move_file())
5046		/* we ignore mapcount for file pages */
5047		return NULL;
5048	if (!get_page_unless_zero(page))
5049		return NULL;
5050
5051	return page;
5052}
5053
5054#ifdef CONFIG_SWAP
5055static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5056			unsigned long addr, pte_t ptent, swp_entry_t *entry)
5057{
5058	struct page *page = NULL;
5059	swp_entry_t ent = pte_to_swp_entry(ptent);
5060
5061	if (!move_anon() || non_swap_entry(ent))
5062		return NULL;
5063	/*
5064	 * Because lookup_swap_cache() updates some statistics counter,
5065	 * we call find_get_page() with swapper_space directly.
5066	 */
5067	page = find_get_page(&swapper_space, ent.val);
5068	if (do_swap_account)
5069		entry->val = ent.val;
 
 
 
 
 
 
 
5070
5071	return page;
5072}
5073#else
5074static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5075			unsigned long addr, pte_t ptent, swp_entry_t *entry)
5076{
5077	return NULL;
 
 
 
 
 
 
 
5078}
5079#endif
5080
5081static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
5082			unsigned long addr, pte_t ptent, swp_entry_t *entry)
 
 
 
 
 
 
 
 
5083{
5084	struct page *page = NULL;
5085	struct address_space *mapping;
5086	pgoff_t pgoff;
 
5087
5088	if (!vma->vm_file) /* anonymous vma */
5089		return NULL;
5090	if (!move_file())
5091		return NULL;
5092
5093	mapping = vma->vm_file->f_mapping;
5094	if (pte_none(ptent))
5095		pgoff = linear_page_index(vma, addr);
5096	else /* pte_file(ptent) is true */
5097		pgoff = pte_to_pgoff(ptent);
5098
5099	/* page is moved even if it's not RSS of this task(page-faulted). */
5100	page = find_get_page(mapping, pgoff);
 
5101
5102#ifdef CONFIG_SWAP
5103	/* shmem/tmpfs may report page out on swap: account for that too. */
5104	if (radix_tree_exceptional_entry(page)) {
5105		swp_entry_t swap = radix_to_swp_entry(page);
5106		if (do_swap_account)
5107			*entry = swap;
5108		page = find_get_page(&swapper_space, swap.val);
5109	}
5110#endif
5111	return page;
5112}
5113
5114static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
5115		unsigned long addr, pte_t ptent, union mc_target *target)
5116{
5117	struct page *page = NULL;
5118	struct page_cgroup *pc;
5119	enum mc_target_type ret = MC_TARGET_NONE;
5120	swp_entry_t ent = { .val = 0 };
5121
5122	if (pte_present(ptent))
5123		page = mc_handle_present_pte(vma, addr, ptent);
5124	else if (is_swap_pte(ptent))
5125		page = mc_handle_swap_pte(vma, addr, ptent, &ent);
5126	else if (pte_none(ptent) || pte_file(ptent))
5127		page = mc_handle_file_pte(vma, addr, ptent, &ent);
5128
5129	if (!page && !ent.val)
5130		return ret;
5131	if (page) {
5132		pc = lookup_page_cgroup(page);
5133		/*
5134		 * Do only loose check w/o page_cgroup lock.
5135		 * mem_cgroup_move_account() checks the pc is valid or not under
5136		 * the lock.
5137		 */
5138		if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) {
5139			ret = MC_TARGET_PAGE;
5140			if (target)
5141				target->page = page;
5142		}
5143		if (!ret || !target)
5144			put_page(page);
5145	}
5146	/* There is a swap entry and a page doesn't exist or isn't charged */
5147	if (ent.val && !ret &&
5148			css_id(&mc.from->css) == lookup_swap_cgroup_id(ent)) {
5149		ret = MC_TARGET_SWAP;
5150		if (target)
5151			target->ent = ent;
5152	}
5153	return ret;
 
 
 
 
 
 
 
 
5154}
5155
5156#ifdef CONFIG_TRANSPARENT_HUGEPAGE
5157/*
5158 * We don't consider swapping or file mapped pages because THP does not
5159 * support them for now.
5160 * Caller should make sure that pmd_trans_huge(pmd) is true.
5161 */
5162static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
5163		unsigned long addr, pmd_t pmd, union mc_target *target)
5164{
5165	struct page *page = NULL;
5166	struct page_cgroup *pc;
5167	enum mc_target_type ret = MC_TARGET_NONE;
5168
5169	page = pmd_page(pmd);
5170	VM_BUG_ON(!page || !PageHead(page));
5171	if (!move_anon())
5172		return ret;
5173	pc = lookup_page_cgroup(page);
5174	if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) {
5175		ret = MC_TARGET_PAGE;
5176		if (target) {
5177			get_page(page);
5178			target->page = page;
5179		}
 
 
5180	}
5181	return ret;
5182}
5183#else
5184static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
5185		unsigned long addr, pmd_t pmd, union mc_target *target)
5186{
5187	return MC_TARGET_NONE;
 
 
 
 
 
 
 
 
5188}
5189#endif
5190
5191static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
5192					unsigned long addr, unsigned long end,
5193					struct mm_walk *walk)
5194{
5195	struct vm_area_struct *vma = walk->private;
5196	pte_t *pte;
5197	spinlock_t *ptl;
5198
5199	if (pmd_trans_huge_lock(pmd, vma) == 1) {
5200		if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
5201			mc.precharge += HPAGE_PMD_NR;
5202		spin_unlock(&vma->vm_mm->page_table_lock);
5203		return 0;
5204	}
5205
5206	if (pmd_trans_unstable(pmd))
5207		return 0;
5208	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
5209	for (; addr != end; pte++, addr += PAGE_SIZE)
5210		if (get_mctgt_type(vma, addr, *pte, NULL))
5211			mc.precharge++;	/* increment precharge temporarily */
5212	pte_unmap_unlock(pte - 1, ptl);
5213	cond_resched();
5214
5215	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5216}
5217
5218static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
5219{
5220	unsigned long precharge;
5221	struct vm_area_struct *vma;
5222
5223	down_read(&mm->mmap_sem);
5224	for (vma = mm->mmap; vma; vma = vma->vm_next) {
5225		struct mm_walk mem_cgroup_count_precharge_walk = {
5226			.pmd_entry = mem_cgroup_count_precharge_pte_range,
5227			.mm = mm,
5228			.private = vma,
5229		};
5230		if (is_vm_hugetlb_page(vma))
5231			continue;
5232		walk_page_range(vma->vm_start, vma->vm_end,
5233					&mem_cgroup_count_precharge_walk);
5234	}
5235	up_read(&mm->mmap_sem);
5236
5237	precharge = mc.precharge;
5238	mc.precharge = 0;
 
 
5239
5240	return precharge;
5241}
5242
5243static int mem_cgroup_precharge_mc(struct mm_struct *mm)
5244{
5245	unsigned long precharge = mem_cgroup_count_precharge(mm);
5246
5247	VM_BUG_ON(mc.moving_task);
5248	mc.moving_task = current;
5249	return mem_cgroup_do_precharge(precharge);
5250}
5251
5252/* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
5253static void __mem_cgroup_clear_mc(void)
5254{
5255	struct mem_cgroup *from = mc.from;
5256	struct mem_cgroup *to = mc.to;
5257
5258	/* we must uncharge all the leftover precharges from mc.to */
5259	if (mc.precharge) {
5260		__mem_cgroup_cancel_charge(mc.to, mc.precharge);
5261		mc.precharge = 0;
5262	}
5263	/*
5264	 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
5265	 * we must uncharge here.
5266	 */
5267	if (mc.moved_charge) {
5268		__mem_cgroup_cancel_charge(mc.from, mc.moved_charge);
5269		mc.moved_charge = 0;
5270	}
5271	/* we must fixup refcnts and charges */
5272	if (mc.moved_swap) {
5273		/* uncharge swap account from the old cgroup */
5274		if (!mem_cgroup_is_root(mc.from))
5275			res_counter_uncharge(&mc.from->memsw,
5276						PAGE_SIZE * mc.moved_swap);
5277		__mem_cgroup_put(mc.from, mc.moved_swap);
5278
5279		if (!mem_cgroup_is_root(mc.to)) {
5280			/*
5281			 * we charged both to->res and to->memsw, so we should
5282			 * uncharge to->res.
5283			 */
5284			res_counter_uncharge(&mc.to->res,
5285						PAGE_SIZE * mc.moved_swap);
5286		}
5287		/* we've already done mem_cgroup_get(mc.to) */
5288		mc.moved_swap = 0;
5289	}
5290	memcg_oom_recover(from);
5291	memcg_oom_recover(to);
5292	wake_up_all(&mc.waitq);
5293}
5294
5295static void mem_cgroup_clear_mc(void)
 
5296{
5297	struct mem_cgroup *from = mc.from;
 
 
5298
5299	/*
5300	 * we must clear moving_task before waking up waiters at the end of
5301	 * task migration.
5302	 */
5303	mc.moving_task = NULL;
5304	__mem_cgroup_clear_mc();
5305	spin_lock(&mc.lock);
5306	mc.from = NULL;
5307	mc.to = NULL;
5308	spin_unlock(&mc.lock);
5309	mem_cgroup_end_move(from);
5310}
5311
5312static int mem_cgroup_can_attach(struct cgroup *cgroup,
5313				 struct cgroup_taskset *tset)
5314{
5315	struct task_struct *p = cgroup_taskset_first(tset);
5316	int ret = 0;
5317	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgroup);
5318
5319	if (memcg->move_charge_at_immigrate) {
5320		struct mm_struct *mm;
5321		struct mem_cgroup *from = mem_cgroup_from_task(p);
 
 
 
5322
5323		VM_BUG_ON(from == memcg);
 
 
 
5324
5325		mm = get_task_mm(p);
5326		if (!mm)
5327			return 0;
5328		/* We move charges only when we move a owner of the mm */
5329		if (mm->owner == p) {
5330			VM_BUG_ON(mc.from);
5331			VM_BUG_ON(mc.to);
5332			VM_BUG_ON(mc.precharge);
5333			VM_BUG_ON(mc.moved_charge);
5334			VM_BUG_ON(mc.moved_swap);
5335			mem_cgroup_start_move(from);
5336			spin_lock(&mc.lock);
5337			mc.from = from;
5338			mc.to = memcg;
5339			spin_unlock(&mc.lock);
5340			/* We set mc.moving_task later */
5341
5342			ret = mem_cgroup_precharge_mc(mm);
5343			if (ret)
5344				mem_cgroup_clear_mc();
5345		}
5346		mmput(mm);
5347	}
5348	return ret;
5349}
5350
5351static void mem_cgroup_cancel_attach(struct cgroup *cgroup,
5352				     struct cgroup_taskset *tset)
5353{
5354	mem_cgroup_clear_mc();
 
 
 
 
 
 
 
 
 
5355}
5356
5357static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
5358				unsigned long addr, unsigned long end,
5359				struct mm_walk *walk)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5360{
5361	int ret = 0;
5362	struct vm_area_struct *vma = walk->private;
5363	pte_t *pte;
5364	spinlock_t *ptl;
5365	enum mc_target_type target_type;
5366	union mc_target target;
5367	struct page *page;
5368	struct page_cgroup *pc;
5369
5370	/*
5371	 * We don't take compound_lock() here but no race with splitting thp
5372	 * happens because:
5373	 *  - if pmd_trans_huge_lock() returns 1, the relevant thp is not
5374	 *    under splitting, which means there's no concurrent thp split,
5375	 *  - if another thread runs into split_huge_page() just after we
5376	 *    entered this if-block, the thread must wait for page table lock
5377	 *    to be unlocked in __split_huge_page_splitting(), where the main
5378	 *    part of thp split is not executed yet.
5379	 */
5380	if (pmd_trans_huge_lock(pmd, vma) == 1) {
5381		if (mc.precharge < HPAGE_PMD_NR) {
5382			spin_unlock(&vma->vm_mm->page_table_lock);
5383			return 0;
5384		}
5385		target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
5386		if (target_type == MC_TARGET_PAGE) {
5387			page = target.page;
5388			if (!isolate_lru_page(page)) {
5389				pc = lookup_page_cgroup(page);
5390				if (!mem_cgroup_move_account(page, HPAGE_PMD_NR,
5391							pc, mc.from, mc.to)) {
5392					mc.precharge -= HPAGE_PMD_NR;
5393					mc.moved_charge += HPAGE_PMD_NR;
5394				}
5395				putback_lru_page(page);
5396			}
5397			put_page(page);
5398		}
5399		spin_unlock(&vma->vm_mm->page_table_lock);
5400		return 0;
5401	}
5402
5403	if (pmd_trans_unstable(pmd))
5404		return 0;
5405retry:
5406	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
5407	for (; addr != end; addr += PAGE_SIZE) {
5408		pte_t ptent = *(pte++);
5409		swp_entry_t ent;
5410
5411		if (!mc.precharge)
5412			break;
 
 
 
5413
5414		switch (get_mctgt_type(vma, addr, ptent, &target)) {
5415		case MC_TARGET_PAGE:
5416			page = target.page;
5417			if (isolate_lru_page(page))
5418				goto put;
5419			pc = lookup_page_cgroup(page);
5420			if (!mem_cgroup_move_account(page, 1, pc,
5421						     mc.from, mc.to)) {
5422				mc.precharge--;
5423				/* we uncharge from mc.from later. */
5424				mc.moved_charge++;
5425			}
5426			putback_lru_page(page);
5427put:			/* get_mctgt_type() gets the page */
5428			put_page(page);
5429			break;
5430		case MC_TARGET_SWAP:
5431			ent = target.ent;
5432			if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
5433				mc.precharge--;
5434				/* we fixup refcnts and charges later. */
5435				mc.moved_swap++;
5436			}
5437			break;
5438		default:
5439			break;
5440		}
5441	}
5442	pte_unmap_unlock(pte - 1, ptl);
5443	cond_resched();
5444
5445	if (addr != end) {
5446		/*
5447		 * We have consumed all precharges we got in can_attach().
5448		 * We try charge one by one, but don't do any additional
5449		 * charges to mc.to if we have failed in charge once in attach()
5450		 * phase.
5451		 */
5452		ret = mem_cgroup_do_precharge(1);
5453		if (!ret)
5454			goto retry;
5455	}
5456
5457	return ret;
5458}
5459
5460static void mem_cgroup_move_charge(struct mm_struct *mm)
 
 
 
 
 
 
 
 
5461{
5462	struct vm_area_struct *vma;
5463
5464	lru_add_drain_all();
5465retry:
5466	if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
5467		/*
5468		 * Someone who are holding the mmap_sem might be waiting in
5469		 * waitq. So we cancel all extra charges, wake up all waiters,
5470		 * and retry. Because we cancel precharges, we might not be able
5471		 * to move enough charges, but moving charge is a best-effort
5472		 * feature anyway, so it wouldn't be a big problem.
5473		 */
5474		__mem_cgroup_clear_mc();
5475		cond_resched();
5476		goto retry;
5477	}
5478	for (vma = mm->mmap; vma; vma = vma->vm_next) {
5479		int ret;
5480		struct mm_walk mem_cgroup_move_charge_walk = {
5481			.pmd_entry = mem_cgroup_move_charge_pte_range,
5482			.mm = mm,
5483			.private = vma,
5484		};
5485		if (is_vm_hugetlb_page(vma))
5486			continue;
5487		ret = walk_page_range(vma->vm_start, vma->vm_end,
5488						&mem_cgroup_move_charge_walk);
5489		if (ret)
5490			/*
5491			 * means we have consumed all precharges and failed in
5492			 * doing additional charge. Just abandon here.
5493			 */
5494			break;
5495	}
5496	up_read(&mm->mmap_sem);
5497}
5498
5499static void mem_cgroup_move_task(struct cgroup *cont,
5500				 struct cgroup_taskset *tset)
 
 
 
 
 
 
5501{
5502	struct task_struct *p = cgroup_taskset_first(tset);
5503	struct mm_struct *mm = get_task_mm(p);
5504
5505	if (mm) {
5506		if (mc.to)
5507			mem_cgroup_move_charge(mm);
5508		mmput(mm);
5509	}
5510	if (mc.to)
5511		mem_cgroup_clear_mc();
 
 
 
5512}
5513#else	/* !CONFIG_MMU */
5514static int mem_cgroup_can_attach(struct cgroup *cgroup,
5515				 struct cgroup_taskset *tset)
5516{
5517	return 0;
 
 
 
 
 
 
 
 
5518}
5519static void mem_cgroup_cancel_attach(struct cgroup *cgroup,
5520				     struct cgroup_taskset *tset)
 
5521{
 
 
 
 
5522}
5523static void mem_cgroup_move_task(struct cgroup *cont,
5524				 struct cgroup_taskset *tset)
5525{
 
 
5526}
5527#endif
5528
5529struct cgroup_subsys mem_cgroup_subsys = {
5530	.name = "memory",
5531	.subsys_id = mem_cgroup_subsys_id,
5532	.create = mem_cgroup_create,
5533	.pre_destroy = mem_cgroup_pre_destroy,
5534	.destroy = mem_cgroup_destroy,
5535	.can_attach = mem_cgroup_can_attach,
5536	.cancel_attach = mem_cgroup_cancel_attach,
5537	.attach = mem_cgroup_move_task,
5538	.base_cftypes = mem_cgroup_files,
5539	.early_init = 0,
5540	.use_id = 1,
5541	.__DEPRECATED_clear_css_refs = true,
5542};
5543
5544#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
5545static int __init enable_swap_account(char *s)
 
 
 
 
5546{
5547	/* consider enabled if no parameter or 1 is given */
5548	if (!strcmp(s, "1"))
5549		really_do_swap_account = 1;
5550	else if (!strcmp(s, "0"))
5551		really_do_swap_account = 0;
5552	return 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5553}
5554__setup("swapaccount=", enable_swap_account);
5555
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5556#endif
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/* memcontrol.c - Memory Controller
   3 *
   4 * Copyright IBM Corporation, 2007
   5 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
   6 *
   7 * Copyright 2007 OpenVZ SWsoft Inc
   8 * Author: Pavel Emelianov <xemul@openvz.org>
   9 *
  10 * Memory thresholds
  11 * Copyright (C) 2009 Nokia Corporation
  12 * Author: Kirill A. Shutemov
  13 *
  14 * Kernel Memory Controller
  15 * Copyright (C) 2012 Parallels Inc. and Google Inc.
  16 * Authors: Glauber Costa and Suleiman Souhlal
  17 *
  18 * Native page reclaim
  19 * Charge lifetime sanitation
  20 * Lockless page tracking & accounting
  21 * Unified hierarchy configuration model
  22 * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner
  23 *
  24 * Per memcg lru locking
  25 * Copyright (C) 2020 Alibaba, Inc, Alex Shi
  26 */
  27
  28#include <linux/cgroup-defs.h>
  29#include <linux/page_counter.h>
  30#include <linux/memcontrol.h>
  31#include <linux/cgroup.h>
  32#include <linux/sched/mm.h>
  33#include <linux/shmem_fs.h>
  34#include <linux/hugetlb.h>
  35#include <linux/pagemap.h>
  36#include <linux/pagevec.h>
  37#include <linux/vm_event_item.h>
  38#include <linux/smp.h>
  39#include <linux/page-flags.h>
  40#include <linux/backing-dev.h>
  41#include <linux/bit_spinlock.h>
  42#include <linux/rcupdate.h>
  43#include <linux/limits.h>
  44#include <linux/export.h>
  45#include <linux/list.h>
  46#include <linux/mutex.h>
  47#include <linux/rbtree.h>
  48#include <linux/slab.h>
 
  49#include <linux/swapops.h>
  50#include <linux/spinlock.h>
 
 
  51#include <linux/fs.h>
  52#include <linux/seq_file.h>
  53#include <linux/parser.h>
  54#include <linux/vmpressure.h>
  55#include <linux/memremap.h>
  56#include <linux/mm_inline.h>
  57#include <linux/swap_cgroup.h>
  58#include <linux/cpu.h>
  59#include <linux/oom.h>
  60#include <linux/lockdep.h>
  61#include <linux/resume_user_mode.h>
  62#include <linux/psi.h>
  63#include <linux/seq_buf.h>
  64#include <linux/sched/isolation.h>
  65#include <linux/kmemleak.h>
  66#include "internal.h"
  67#include <net/sock.h>
  68#include <net/ip.h>
  69#include "slab.h"
  70#include "memcontrol-v1.h"
  71
  72#include <linux/uaccess.h>
  73
  74#define CREATE_TRACE_POINTS
  75#include <trace/events/memcg.h>
  76#undef CREATE_TRACE_POINTS
  77
  78#include <trace/events/vmscan.h>
  79
  80struct cgroup_subsys memory_cgrp_subsys __read_mostly;
  81EXPORT_SYMBOL(memory_cgrp_subsys);
 
 
 
 
 
 
 
 
 
 
 
 
  82
  83struct mem_cgroup *root_mem_cgroup __read_mostly;
 
 
  84
  85/* Active memory cgroup to use from an interrupt context */
  86DEFINE_PER_CPU(struct mem_cgroup *, int_active_memcg);
  87EXPORT_PER_CPU_SYMBOL_GPL(int_active_memcg);
  88
  89/* Socket memory accounting disabled? */
  90static bool cgroup_memory_nosocket __ro_after_init;
 
 
 
 
 
 
 
 
 
 
 
  91
  92/* Kernel memory accounting disabled? */
  93static bool cgroup_memory_nokmem __ro_after_init;
 
 
 
 
  94
  95/* BPF memory accounting disabled? */
  96static bool cgroup_memory_nobpf __ro_after_init;
 
 
 
 
 
  97
  98#ifdef CONFIG_CGROUP_WRITEBACK
  99static DECLARE_WAIT_QUEUE_HEAD(memcg_cgwb_frn_waitq);
 100#endif
 
 
 
 101
 102static inline bool task_is_dying(void)
 103{
 104	return tsk_is_oom_victim(current) || fatal_signal_pending(current) ||
 105		(current->flags & PF_EXITING);
 106}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 107
 108/* Some nice accessors for the vmpressure. */
 109struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
 110{
 111	if (!memcg)
 112		memcg = root_mem_cgroup;
 113	return &memcg->vmpressure;
 114}
 115
 116struct mem_cgroup *vmpressure_to_memcg(struct vmpressure *vmpr)
 117{
 118	return container_of(vmpr, struct mem_cgroup, vmpressure);
 119}
 
 
 
 
 
 
 
 
 
 
 
 
 120
 121#define SEQ_BUF_SIZE SZ_4K
 122#define CURRENT_OBJCG_UPDATE_BIT 0
 123#define CURRENT_OBJCG_UPDATE_FLAG (1UL << CURRENT_OBJCG_UPDATE_BIT)
 124
 125static DEFINE_SPINLOCK(objcg_lock);
 
 
 126
 127bool mem_cgroup_kmem_disabled(void)
 128{
 129	return cgroup_memory_nokmem;
 130}
 131
 132static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg,
 133				      unsigned int nr_pages);
 
 
 134
 135static void obj_cgroup_release(struct percpu_ref *ref)
 136{
 137	struct obj_cgroup *objcg = container_of(ref, struct obj_cgroup, refcnt);
 138	unsigned int nr_bytes;
 139	unsigned int nr_pages;
 140	unsigned long flags;
 141
 142	/*
 143	 * At this point all allocated objects are freed, and
 144	 * objcg->nr_charged_bytes can't have an arbitrary byte value.
 145	 * However, it can be PAGE_SIZE or (x * PAGE_SIZE).
 146	 *
 147	 * The following sequence can lead to it:
 148	 * 1) CPU0: objcg == stock->cached_objcg
 149	 * 2) CPU1: we do a small allocation (e.g. 92 bytes),
 150	 *          PAGE_SIZE bytes are charged
 151	 * 3) CPU1: a process from another memcg is allocating something,
 152	 *          the stock if flushed,
 153	 *          objcg->nr_charged_bytes = PAGE_SIZE - 92
 154	 * 5) CPU0: we do release this object,
 155	 *          92 bytes are added to stock->nr_bytes
 156	 * 6) CPU0: stock is flushed,
 157	 *          92 bytes are added to objcg->nr_charged_bytes
 158	 *
 159	 * In the result, nr_charged_bytes == PAGE_SIZE.
 160	 * This page will be uncharged in obj_cgroup_release().
 161	 */
 162	nr_bytes = atomic_read(&objcg->nr_charged_bytes);
 163	WARN_ON_ONCE(nr_bytes & (PAGE_SIZE - 1));
 164	nr_pages = nr_bytes >> PAGE_SHIFT;
 165
 166	if (nr_pages)
 167		obj_cgroup_uncharge_pages(objcg, nr_pages);
 168
 169	spin_lock_irqsave(&objcg_lock, flags);
 170	list_del(&objcg->list);
 171	spin_unlock_irqrestore(&objcg_lock, flags);
 
 172
 173	percpu_ref_exit(ref);
 174	kfree_rcu(objcg, rcu);
 175}
 
 
 
 
 
 
 176
 177static struct obj_cgroup *obj_cgroup_alloc(void)
 178{
 179	struct obj_cgroup *objcg;
 180	int ret;
 
 
 
 
 
 
 181
 182	objcg = kzalloc(sizeof(struct obj_cgroup), GFP_KERNEL);
 183	if (!objcg)
 184		return NULL;
 
 
 185
 186	ret = percpu_ref_init(&objcg->refcnt, obj_cgroup_release, 0,
 187			      GFP_KERNEL);
 188	if (ret) {
 189		kfree(objcg);
 190		return NULL;
 191	}
 192	INIT_LIST_HEAD(&objcg->list);
 193	return objcg;
 194}
 195
 196static void memcg_reparent_objcgs(struct mem_cgroup *memcg,
 197				  struct mem_cgroup *parent)
 198{
 199	struct obj_cgroup *objcg, *iter;
 
 
 
 
 
 
 
 
 
 
 
 
 
 200
 201	objcg = rcu_replace_pointer(memcg->objcg, NULL, true);
 
 
 
 
 202
 203	spin_lock_irq(&objcg_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 204
 205	/* 1) Ready to reparent active objcg. */
 206	list_add(&objcg->list, &memcg->objcg_list);
 207	/* 2) Reparent active objcg and already reparented objcgs to parent. */
 208	list_for_each_entry(iter, &memcg->objcg_list, list)
 209		WRITE_ONCE(iter->memcg, parent);
 210	/* 3) Move already reparented objcgs to the parent's list */
 211	list_splice(&memcg->objcg_list, &parent->objcg_list);
 
 
 
 
 
 
 
 
 212
 213	spin_unlock_irq(&objcg_lock);
 
 214
 215	percpu_ref_kill(&objcg->refcnt);
 216}
 217
 218/*
 219 * A lot of the calls to the cache allocation functions are expected to be
 220 * inlined by the compiler. Since the calls to memcg_slab_post_alloc_hook() are
 221 * conditional to this static branch, we'll have to allow modules that does
 222 * kmem_cache_alloc and the such to see this symbol as well
 223 */
 224DEFINE_STATIC_KEY_FALSE(memcg_kmem_online_key);
 225EXPORT_SYMBOL(memcg_kmem_online_key);
 226
 227DEFINE_STATIC_KEY_FALSE(memcg_bpf_enabled_key);
 228EXPORT_SYMBOL(memcg_bpf_enabled_key);
 229
 230/**
 231 * mem_cgroup_css_from_folio - css of the memcg associated with a folio
 232 * @folio: folio of interest
 233 *
 234 * If memcg is bound to the default hierarchy, css of the memcg associated
 235 * with @folio is returned.  The returned css remains associated with @folio
 236 * until it is released.
 237 *
 238 * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup
 239 * is returned.
 240 */
 241struct cgroup_subsys_state *mem_cgroup_css_from_folio(struct folio *folio)
 242{
 243	struct mem_cgroup *memcg = folio_memcg(folio);
 244
 245	if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
 246		memcg = root_mem_cgroup;
 247
 248	return &memcg->css;
 249}
 250
 251/**
 252 * page_cgroup_ino - return inode number of the memcg a page is charged to
 253 * @page: the page
 254 *
 255 * Look up the closest online ancestor of the memory cgroup @page is charged to
 256 * and return its inode number or 0 if @page is not charged to any cgroup. It
 257 * is safe to call this function without holding a reference to @page.
 258 *
 259 * Note, this function is inherently racy, because there is nothing to prevent
 260 * the cgroup inode from getting torn down and potentially reallocated a moment
 261 * after page_cgroup_ino() returns, so it only should be used by callers that
 262 * do not care (such as procfs interfaces).
 263 */
 264ino_t page_cgroup_ino(struct page *page)
 265{
 266	struct mem_cgroup *memcg;
 267	unsigned long ino = 0;
 268
 269	rcu_read_lock();
 270	/* page_folio() is racy here, but the entire function is racy anyway */
 271	memcg = folio_memcg_check(page_folio(page));
 272
 273	while (memcg && !(memcg->css.flags & CSS_ONLINE))
 274		memcg = parent_mem_cgroup(memcg);
 275	if (memcg)
 276		ino = cgroup_ino(memcg->css.cgroup);
 277	rcu_read_unlock();
 278	return ino;
 279}
 
 
 
 
 
 
 
 
 
 
 280
 281/* Subset of node_stat_item for memcg stats */
 282static const unsigned int memcg_node_stat_items[] = {
 283	NR_INACTIVE_ANON,
 284	NR_ACTIVE_ANON,
 285	NR_INACTIVE_FILE,
 286	NR_ACTIVE_FILE,
 287	NR_UNEVICTABLE,
 288	NR_SLAB_RECLAIMABLE_B,
 289	NR_SLAB_UNRECLAIMABLE_B,
 290	WORKINGSET_REFAULT_ANON,
 291	WORKINGSET_REFAULT_FILE,
 292	WORKINGSET_ACTIVATE_ANON,
 293	WORKINGSET_ACTIVATE_FILE,
 294	WORKINGSET_RESTORE_ANON,
 295	WORKINGSET_RESTORE_FILE,
 296	WORKINGSET_NODERECLAIM,
 297	NR_ANON_MAPPED,
 298	NR_FILE_MAPPED,
 299	NR_FILE_PAGES,
 300	NR_FILE_DIRTY,
 301	NR_WRITEBACK,
 302	NR_SHMEM,
 303	NR_SHMEM_THPS,
 304	NR_FILE_THPS,
 305	NR_ANON_THPS,
 306	NR_KERNEL_STACK_KB,
 307	NR_PAGETABLE,
 308	NR_SECONDARY_PAGETABLE,
 309#ifdef CONFIG_SWAP
 310	NR_SWAPCACHE,
 311#endif
 312#ifdef CONFIG_NUMA_BALANCING
 313	PGPROMOTE_SUCCESS,
 314#endif
 315	PGDEMOTE_KSWAPD,
 316	PGDEMOTE_DIRECT,
 317	PGDEMOTE_KHUGEPAGED,
 318#ifdef CONFIG_HUGETLB_PAGE
 319	NR_HUGETLB,
 320#endif
 321};
 322
 323static const unsigned int memcg_stat_items[] = {
 324	MEMCG_SWAP,
 325	MEMCG_SOCK,
 326	MEMCG_PERCPU_B,
 327	MEMCG_VMALLOC,
 328	MEMCG_KMEM,
 329	MEMCG_ZSWAP_B,
 330	MEMCG_ZSWAPPED,
 
 331};
 332
 333#define NR_MEMCG_NODE_STAT_ITEMS ARRAY_SIZE(memcg_node_stat_items)
 334#define MEMCG_VMSTAT_SIZE (NR_MEMCG_NODE_STAT_ITEMS + \
 335			   ARRAY_SIZE(memcg_stat_items))
 336#define BAD_STAT_IDX(index) ((u32)(index) >= U8_MAX)
 337static u8 mem_cgroup_stats_index[MEMCG_NR_STAT] __read_mostly;
 
 
 
 
 
 
 
 
 
 338
 339static void init_memcg_stats(void)
 340{
 341	u8 i, j = 0;
 342
 343	BUILD_BUG_ON(MEMCG_NR_STAT >= U8_MAX);
 344
 345	memset(mem_cgroup_stats_index, U8_MAX, sizeof(mem_cgroup_stats_index));
 346
 347	for (i = 0; i < NR_MEMCG_NODE_STAT_ITEMS; ++i, ++j)
 348		mem_cgroup_stats_index[memcg_node_stat_items[i]] = j;
 349
 350	for (i = 0; i < ARRAY_SIZE(memcg_stat_items); ++i, ++j)
 351		mem_cgroup_stats_index[memcg_stat_items[i]] = j;
 352}
 353
 354static inline int memcg_stats_index(int idx)
 355{
 356	return mem_cgroup_stats_index[idx];
 
 357}
 358
 359struct lruvec_stats_percpu {
 360	/* Local (CPU and cgroup) state */
 361	long state[NR_MEMCG_NODE_STAT_ITEMS];
 
 
 
 362
 363	/* Delta calculation for lockless upward propagation */
 364	long state_prev[NR_MEMCG_NODE_STAT_ITEMS];
 
 
 
 
 
 
 365};
 366
 367struct lruvec_stats {
 368	/* Aggregated (CPU and subtree) state */
 369	long state[NR_MEMCG_NODE_STAT_ITEMS];
 
 
 
 
 
 
 370
 371	/* Non-hierarchical (CPU aggregated) state */
 372	long state_local[NR_MEMCG_NODE_STAT_ITEMS];
 
 
 
 
 
 
 
 
 373
 374	/* Pending child counts during tree propagation */
 375	long state_pending[NR_MEMCG_NODE_STAT_ITEMS];
 376};
 
 377
 378unsigned long lruvec_page_state(struct lruvec *lruvec, enum node_stat_item idx)
 
 379{
 380	struct mem_cgroup_per_node *pn;
 381	long x;
 382	int i;
 383
 384	if (mem_cgroup_disabled())
 385		return node_page_state(lruvec_pgdat(lruvec), idx);
 386
 387	i = memcg_stats_index(idx);
 388	if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
 389		return 0;
 
 
 
 
 
 
 
 
 
 
 390
 391	pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
 392	x = READ_ONCE(pn->lruvec_stats->state[i]);
 393#ifdef CONFIG_SMP
 394	if (x < 0)
 395		x = 0;
 396#endif
 397	return x;
 
 
 398}
 
 399
 400unsigned long lruvec_page_state_local(struct lruvec *lruvec,
 401				      enum node_stat_item idx)
 402{
 403	struct mem_cgroup_per_node *pn;
 404	long x;
 405	int i;
 
 
 
 
 406
 407	if (mem_cgroup_disabled())
 408		return node_page_state(lruvec_pgdat(lruvec), idx);
 
 
 
 409
 410	i = memcg_stats_index(idx);
 411	if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
 412		return 0;
 
 
 413
 414	pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
 415	x = READ_ONCE(pn->lruvec_stats->state_local[i]);
 416#ifdef CONFIG_SMP
 417	if (x < 0)
 418		x = 0;
 419#endif
 420	return x;
 
 
 
 421}
 422
 423/* Subset of vm_event_item to report for memcg event stats */
 424static const unsigned int memcg_vm_event_stat[] = {
 425#ifdef CONFIG_MEMCG_V1
 426	PGPGIN,
 427	PGPGOUT,
 428#endif
 429	PSWPIN,
 430	PSWPOUT,
 431	PGSCAN_KSWAPD,
 432	PGSCAN_DIRECT,
 433	PGSCAN_KHUGEPAGED,
 434	PGSTEAL_KSWAPD,
 435	PGSTEAL_DIRECT,
 436	PGSTEAL_KHUGEPAGED,
 437	PGFAULT,
 438	PGMAJFAULT,
 439	PGREFILL,
 440	PGACTIVATE,
 441	PGDEACTIVATE,
 442	PGLAZYFREE,
 443	PGLAZYFREED,
 444#ifdef CONFIG_SWAP
 445	SWPIN_ZERO,
 446	SWPOUT_ZERO,
 447#endif
 448#ifdef CONFIG_ZSWAP
 449	ZSWPIN,
 450	ZSWPOUT,
 451	ZSWPWB,
 452#endif
 453#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 454	THP_FAULT_ALLOC,
 455	THP_COLLAPSE_ALLOC,
 456	THP_SWPOUT,
 457	THP_SWPOUT_FALLBACK,
 458#endif
 459#ifdef CONFIG_NUMA_BALANCING
 460	NUMA_PAGE_MIGRATE,
 461	NUMA_PTE_UPDATES,
 462	NUMA_HINT_FAULTS,
 463#endif
 464};
 465
 466#define NR_MEMCG_EVENTS ARRAY_SIZE(memcg_vm_event_stat)
 467static u8 mem_cgroup_events_index[NR_VM_EVENT_ITEMS] __read_mostly;
 468
 469static void init_memcg_events(void)
 
 470{
 471	u8 i;
 472
 473	BUILD_BUG_ON(NR_VM_EVENT_ITEMS >= U8_MAX);
 474
 475	memset(mem_cgroup_events_index, U8_MAX,
 476	       sizeof(mem_cgroup_events_index));
 477
 478	for (i = 0; i < NR_MEMCG_EVENTS; ++i)
 479		mem_cgroup_events_index[memcg_vm_event_stat[i]] = i;
 480}
 481
 482static inline int memcg_events_index(enum vm_event_item idx)
 483{
 484	return mem_cgroup_events_index[idx];
 485}
 486
 487struct memcg_vmstats_percpu {
 488	/* Stats updates since the last flush */
 489	unsigned int			stats_updates;
 490
 491	/* Cached pointers for fast iteration in memcg_rstat_updated() */
 492	struct memcg_vmstats_percpu	*parent;
 493	struct memcg_vmstats		*vmstats;
 494
 495	/* The above should fit a single cacheline for memcg_rstat_updated() */
 496
 497	/* Local (CPU and cgroup) page state & events */
 498	long			state[MEMCG_VMSTAT_SIZE];
 499	unsigned long		events[NR_MEMCG_EVENTS];
 500
 501	/* Delta calculation for lockless upward propagation */
 502	long			state_prev[MEMCG_VMSTAT_SIZE];
 503	unsigned long		events_prev[NR_MEMCG_EVENTS];
 504} ____cacheline_aligned;
 505
 506struct memcg_vmstats {
 507	/* Aggregated (CPU and subtree) page state & events */
 508	long			state[MEMCG_VMSTAT_SIZE];
 509	unsigned long		events[NR_MEMCG_EVENTS];
 510
 511	/* Non-hierarchical (CPU aggregated) page state & events */
 512	long			state_local[MEMCG_VMSTAT_SIZE];
 513	unsigned long		events_local[NR_MEMCG_EVENTS];
 514
 515	/* Pending child counts during tree propagation */
 516	long			state_pending[MEMCG_VMSTAT_SIZE];
 517	unsigned long		events_pending[NR_MEMCG_EVENTS];
 518
 519	/* Stats updates since the last flush */
 520	atomic64_t		stats_updates;
 521};
 522
 523/*
 524 * memcg and lruvec stats flushing
 525 *
 526 * Many codepaths leading to stats update or read are performance sensitive and
 527 * adding stats flushing in such codepaths is not desirable. So, to optimize the
 528 * flushing the kernel does:
 529 *
 530 * 1) Periodically and asynchronously flush the stats every 2 seconds to not let
 531 *    rstat update tree grow unbounded.
 532 *
 533 * 2) Flush the stats synchronously on reader side only when there are more than
 534 *    (MEMCG_CHARGE_BATCH * nr_cpus) update events. Though this optimization
 535 *    will let stats be out of sync by atmost (MEMCG_CHARGE_BATCH * nr_cpus) but
 536 *    only for 2 seconds due to (1).
 537 */
 538static void flush_memcg_stats_dwork(struct work_struct *w);
 539static DECLARE_DEFERRABLE_WORK(stats_flush_dwork, flush_memcg_stats_dwork);
 540static u64 flush_last_time;
 541
 542#define FLUSH_TIME (2UL*HZ)
 543
 544/*
 545 * Accessors to ensure that preemption is disabled on PREEMPT_RT because it can
 546 * not rely on this as part of an acquired spinlock_t lock. These functions are
 547 * never used in hardirq context on PREEMPT_RT and therefore disabling preemtion
 548 * is sufficient.
 549 */
 550static void memcg_stats_lock(void)
 551{
 552	preempt_disable_nested();
 553	VM_WARN_ON_IRQS_ENABLED();
 554}
 555
 556static void __memcg_stats_lock(void)
 
 557{
 558	preempt_disable_nested();
 559}
 560
 561static void memcg_stats_unlock(void)
 
 562{
 563	preempt_enable_nested();
 564}
 565
 566
 567static bool memcg_vmstats_needs_flush(struct memcg_vmstats *vmstats)
 568{
 569	return atomic64_read(&vmstats->stats_updates) >
 570		MEMCG_CHARGE_BATCH * num_online_cpus();
 571}
 572
 573static inline void memcg_rstat_updated(struct mem_cgroup *memcg, int val)
 
 
 
 
 574{
 575	struct memcg_vmstats_percpu *statc;
 576	int cpu = smp_processor_id();
 577	unsigned int stats_updates;
 578
 579	if (!val)
 580		return;
 581
 582	cgroup_rstat_updated(memcg->css.cgroup, cpu);
 583	statc = this_cpu_ptr(memcg->vmstats_percpu);
 584	for (; statc; statc = statc->parent) {
 585		stats_updates = READ_ONCE(statc->stats_updates) + abs(val);
 586		WRITE_ONCE(statc->stats_updates, stats_updates);
 587		if (stats_updates < MEMCG_CHARGE_BATCH)
 588			continue;
 589
 
 590		/*
 591		 * If @memcg is already flush-able, increasing stats_updates is
 592		 * redundant. Avoid the overhead of the atomic update.
 593		 */
 594		if (!memcg_vmstats_needs_flush(statc->vmstats))
 595			atomic64_add(stats_updates,
 596				     &statc->vmstats->stats_updates);
 597		WRITE_ONCE(statc->stats_updates, 0);
 598	}
 
 
 
 599}
 600
 601static void __mem_cgroup_flush_stats(struct mem_cgroup *memcg, bool force)
 
 
 
 602{
 603	bool needs_flush = memcg_vmstats_needs_flush(memcg->vmstats);
 604
 605	trace_memcg_flush_stats(memcg, atomic64_read(&memcg->vmstats->stats_updates),
 606		force, needs_flush);
 607
 608	if (!force && !needs_flush)
 609		return;
 
 
 
 610
 611	if (mem_cgroup_is_root(memcg))
 612		WRITE_ONCE(flush_last_time, jiffies_64);
 
 
 
 
 
 
 
 613
 614	cgroup_rstat_flush(memcg->css.cgroup);
 615}
 616
 617/*
 618 * mem_cgroup_flush_stats - flush the stats of a memory cgroup subtree
 619 * @memcg: root of the subtree to flush
 620 *
 621 * Flushing is serialized by the underlying global rstat lock. There is also a
 622 * minimum amount of work to be done even if there are no stat updates to flush.
 623 * Hence, we only flush the stats if the updates delta exceeds a threshold. This
 624 * avoids unnecessary work and contention on the underlying lock.
 625 */
 626void mem_cgroup_flush_stats(struct mem_cgroup *memcg)
 627{
 628	if (mem_cgroup_disabled())
 629		return;
 
 
 
 
 630
 631	if (!memcg)
 632		memcg = root_mem_cgroup;
 633
 634	__mem_cgroup_flush_stats(memcg, false);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 635}
 636
 637void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg)
 638{
 639	/* Only flush if the periodic flusher is one full cycle late */
 640	if (time_after64(jiffies_64, READ_ONCE(flush_last_time) + 2*FLUSH_TIME))
 641		mem_cgroup_flush_stats(memcg);
 
 
 
 
 
 
 
 
 642}
 643
 644static void flush_memcg_stats_dwork(struct work_struct *w)
 
 645{
 646	/*
 647	 * Deliberately ignore memcg_vmstats_needs_flush() here so that flushing
 648	 * in latency-sensitive paths is as cheap as possible.
 649	 */
 650	__mem_cgroup_flush_stats(root_mem_cgroup, true);
 651	queue_delayed_work(system_unbound_wq, &stats_flush_dwork, FLUSH_TIME);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 652}
 653
 654unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
 
 655{
 656	long x;
 657	int i = memcg_stats_index(idx);
 658
 659	if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
 660		return 0;
 661
 662	x = READ_ONCE(memcg->vmstats->state[i]);
 663#ifdef CONFIG_SMP
 664	if (x < 0)
 665		x = 0;
 666#endif
 667	return x;
 668}
 669
 670static int memcg_page_state_unit(int item);
 671
 672/*
 673 * Normalize the value passed into memcg_rstat_updated() to be in pages. Round
 674 * up non-zero sub-page updates to 1 page as zero page updates are ignored.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 675 */
 676static int memcg_state_val_in_pages(int idx, int val)
 
 677{
 678	int unit = memcg_page_state_unit(idx);
 
 679
 680	if (!val || unit == PAGE_SIZE)
 681		return val;
 682	else
 683		return max(val * unit / PAGE_SIZE, 1UL);
 
 
 
 
 
 
 684}
 685
 686/**
 687 * __mod_memcg_state - update cgroup memory statistics
 688 * @memcg: the memory cgroup
 689 * @idx: the stat item - can be enum memcg_stat_item or enum node_stat_item
 690 * @val: delta to add to the counter, can be negative
 691 */
 692void __mod_memcg_state(struct mem_cgroup *memcg, enum memcg_stat_item idx,
 693		       int val)
 694{
 695	int i = memcg_stats_index(idx);
 696
 697	if (mem_cgroup_disabled())
 698		return;
 699
 700	if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
 701		return;
 702
 703	__this_cpu_add(memcg->vmstats_percpu->state[i], val);
 704	val = memcg_state_val_in_pages(idx, val);
 705	memcg_rstat_updated(memcg, val);
 706	trace_mod_memcg_state(memcg, idx, val);
 707}
 708
 709/* idx can be of type enum memcg_stat_item or node_stat_item. */
 710unsigned long memcg_page_state_local(struct mem_cgroup *memcg, int idx)
 711{
 712	long x;
 713	int i = memcg_stats_index(idx);
 714
 715	if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
 716		return 0;
 717
 718	x = READ_ONCE(memcg->vmstats->state_local[i]);
 719#ifdef CONFIG_SMP
 720	if (x < 0)
 721		x = 0;
 
 
 722#endif
 723	return x;
 724}
 725
 726static void __mod_memcg_lruvec_state(struct lruvec *lruvec,
 727				     enum node_stat_item idx,
 728				     int val)
 729{
 730	struct mem_cgroup_per_node *pn;
 731	struct mem_cgroup *memcg;
 732	int i = memcg_stats_index(idx);
 733
 734	if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
 735		return;
 
 
 
 
 
 
 
 
 736
 737	pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
 738	memcg = pn->memcg;
 739
 740	/*
 741	 * The caller from rmap relies on disabled preemption because they never
 742	 * update their counter from in-interrupt context. For these two
 743	 * counters we check that the update is never performed from an
 744	 * interrupt context while other caller need to have disabled interrupt.
 745	 */
 746	__memcg_stats_lock();
 747	if (IS_ENABLED(CONFIG_DEBUG_VM)) {
 748		switch (idx) {
 749		case NR_ANON_MAPPED:
 750		case NR_FILE_MAPPED:
 751		case NR_ANON_THPS:
 752			WARN_ON_ONCE(!in_task());
 753			break;
 754		default:
 755			VM_WARN_ON_IRQS_ENABLED();
 756		}
 757	}
 758
 759	/* Update memcg */
 760	__this_cpu_add(memcg->vmstats_percpu->state[i], val);
 761
 762	/* Update lruvec */
 763	__this_cpu_add(pn->lruvec_stats_percpu->state[i], val);
 764
 765	val = memcg_state_val_in_pages(idx, val);
 766	memcg_rstat_updated(memcg, val);
 767	trace_mod_memcg_lruvec_state(memcg, idx, val);
 768	memcg_stats_unlock();
 769}
 770
 771/**
 772 * __mod_lruvec_state - update lruvec memory statistics
 773 * @lruvec: the lruvec
 774 * @idx: the stat item
 775 * @val: delta to add to the counter, can be negative
 776 *
 777 * The lruvec is the intersection of the NUMA node and a cgroup. This
 778 * function updates the all three counters that are affected by a
 779 * change of state at this level: per-node, per-cgroup, per-lruvec.
 780 */
 781void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
 782			int val)
 783{
 784	/* Update node */
 785	__mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
 786
 787	/* Update memcg and lruvec */
 788	if (!mem_cgroup_disabled())
 789		__mod_memcg_lruvec_state(lruvec, idx, val);
 790}
 791
 792void __lruvec_stat_mod_folio(struct folio *folio, enum node_stat_item idx,
 793			     int val)
 
 794{
 795	struct mem_cgroup *memcg;
 796	pg_data_t *pgdat = folio_pgdat(folio);
 797	struct lruvec *lruvec;
 
 
 798
 799	rcu_read_lock();
 800	memcg = folio_memcg(folio);
 801	/* Untracked pages have no memcg, no lruvec. Update only the node */
 802	if (!memcg) {
 803		rcu_read_unlock();
 804		__mod_node_page_state(pgdat, idx, val);
 805		return;
 806	}
 807
 808	lruvec = mem_cgroup_lruvec(memcg, pgdat);
 809	__mod_lruvec_state(lruvec, idx, val);
 810	rcu_read_unlock();
 811}
 812EXPORT_SYMBOL(__lruvec_stat_mod_folio);
 813
 814void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val)
 
 
 815{
 816	pg_data_t *pgdat = page_pgdat(virt_to_page(p));
 817	struct mem_cgroup *memcg;
 818	struct lruvec *lruvec;
 819
 820	rcu_read_lock();
 821	memcg = mem_cgroup_from_slab_obj(p);
 
 822
 823	/*
 824	 * Untracked pages have no memcg, no lruvec. Update only the
 825	 * node. If we reparent the slab objects to the root memcg,
 826	 * when we free the slab object, we need to update the per-memcg
 827	 * vmstats to keep it correct for the root memcg.
 828	 */
 829	if (!memcg) {
 830		__mod_node_page_state(pgdat, idx, val);
 831	} else {
 832		lruvec = mem_cgroup_lruvec(memcg, pgdat);
 833		__mod_lruvec_state(lruvec, idx, val);
 834	}
 835	rcu_read_unlock();
 836}
 837
 838/**
 839 * __count_memcg_events - account VM events in a cgroup
 840 * @memcg: the memory cgroup
 841 * @idx: the event item
 842 * @count: the number of events that occurred
 843 */
 844void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
 845			  unsigned long count)
 846{
 847	int i = memcg_events_index(idx);
 
 848
 849	if (mem_cgroup_disabled())
 850		return;
 
 
 851
 852	if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, idx))
 853		return;
 
 
 854
 855	memcg_stats_lock();
 856	__this_cpu_add(memcg->vmstats_percpu->events[i], count);
 857	memcg_rstat_updated(memcg, count);
 858	trace_count_memcg_events(memcg, idx, count);
 859	memcg_stats_unlock();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 860}
 861
 862unsigned long memcg_events(struct mem_cgroup *memcg, int event)
 
 
 
 
 863{
 864	int i = memcg_events_index(event);
 
 
 
 
 
 
 
 
 
 
 
 
 
 865
 866	if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, event))
 867		return 0;
 868
 869	return READ_ONCE(memcg->vmstats->events[i]);
 
 
 
 
 
 870}
 871
 872unsigned long memcg_events_local(struct mem_cgroup *memcg, int event)
 873{
 874	int i = memcg_events_index(event);
 875
 876	if (WARN_ONCE(BAD_STAT_IDX(i), "%s: missing stat item %d\n", __func__, event))
 877		return 0;
 878
 879	return READ_ONCE(memcg->vmstats->events_local[i]);
 880}
 881
 882struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
 883{
 884	/*
 885	 * mm_update_next_owner() may clear mm->owner to NULL
 886	 * if it races with swapoff, page migration, etc.
 887	 * So this can be called with p == NULL.
 888	 */
 889	if (unlikely(!p))
 890		return NULL;
 891
 892	return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
 893}
 894EXPORT_SYMBOL(mem_cgroup_from_task);
 895
 896static __always_inline struct mem_cgroup *active_memcg(void)
 897{
 898	if (!in_task())
 899		return this_cpu_read(int_active_memcg);
 900	else
 901		return current->active_memcg;
 902}
 903
 904/**
 905 * get_mem_cgroup_from_mm: Obtain a reference on given mm_struct's memcg.
 906 * @mm: mm from which memcg should be extracted. It can be NULL.
 907 *
 908 * Obtain a reference on mm->memcg and returns it if successful. If mm
 909 * is NULL, then the memcg is chosen as follows:
 910 * 1) The active memcg, if set.
 911 * 2) current->mm->memcg, if available
 912 * 3) root memcg
 913 * If mem_cgroup is disabled, NULL is returned.
 914 */
 915struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
 916{
 917	struct mem_cgroup *memcg;
 918
 919	if (mem_cgroup_disabled())
 920		return NULL;
 921
 922	/*
 923	 * Page cache insertions can happen without an
 924	 * actual mm context, e.g. during disk probing
 925	 * on boot, loopback IO, acct() writes etc.
 926	 *
 927	 * No need to css_get on root memcg as the reference
 928	 * counting is disabled on the root level in the
 929	 * cgroup core. See CSS_NO_REF.
 930	 */
 931	if (unlikely(!mm)) {
 932		memcg = active_memcg();
 933		if (unlikely(memcg)) {
 934			/* remote memcg must hold a ref */
 935			css_get(&memcg->css);
 936			return memcg;
 937		}
 938		mm = current->mm;
 939		if (unlikely(!mm))
 940			return root_mem_cgroup;
 941	}
 942
 943	rcu_read_lock();
 944	do {
 945		memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
 946		if (unlikely(!memcg))
 947			memcg = root_mem_cgroup;
 948	} while (!css_tryget(&memcg->css));
 949	rcu_read_unlock();
 950	return memcg;
 951}
 952EXPORT_SYMBOL(get_mem_cgroup_from_mm);
 953
 954/**
 955 * get_mem_cgroup_from_current - Obtain a reference on current task's memcg.
 956 */
 957struct mem_cgroup *get_mem_cgroup_from_current(void)
 958{
 959	struct mem_cgroup *memcg;
 960
 961	if (mem_cgroup_disabled())
 962		return NULL;
 963
 964again:
 965	rcu_read_lock();
 966	memcg = mem_cgroup_from_task(current);
 967	if (!css_tryget(&memcg->css)) {
 968		rcu_read_unlock();
 969		goto again;
 970	}
 971	rcu_read_unlock();
 972	return memcg;
 973}
 974
 975/**
 976 * get_mem_cgroup_from_folio - Obtain a reference on a given folio's memcg.
 977 * @folio: folio from which memcg should be extracted.
 978 */
 979struct mem_cgroup *get_mem_cgroup_from_folio(struct folio *folio)
 980{
 981	struct mem_cgroup *memcg = folio_memcg(folio);
 982
 983	if (mem_cgroup_disabled())
 984		return NULL;
 985
 986	rcu_read_lock();
 987	if (!memcg || WARN_ON_ONCE(!css_tryget(&memcg->css)))
 988		memcg = root_mem_cgroup;
 989	rcu_read_unlock();
 990	return memcg;
 991}
 992
 993/**
 994 * mem_cgroup_iter - iterate over memory cgroup hierarchy
 995 * @root: hierarchy root
 996 * @prev: previously returned memcg, NULL on first invocation
 997 * @reclaim: cookie for shared reclaim walks, NULL for full walks
 998 *
 999 * Returns references to children of the hierarchy below @root, or
1000 * @root itself, or %NULL after a full round-trip.
1001 *
1002 * Caller must pass the return value in @prev on subsequent
1003 * invocations for reference counting, or use mem_cgroup_iter_break()
1004 * to cancel a hierarchy walk before the round-trip is complete.
1005 *
1006 * Reclaimers can specify a node in @reclaim to divide up the memcgs
1007 * in the hierarchy among all concurrent reclaimers operating on the
1008 * same node.
1009 */
1010struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
1011				   struct mem_cgroup *prev,
1012				   struct mem_cgroup_reclaim_cookie *reclaim)
1013{
1014	struct mem_cgroup_reclaim_iter *iter;
1015	struct cgroup_subsys_state *css;
1016	struct mem_cgroup *pos;
1017	struct mem_cgroup *next;
1018
1019	if (mem_cgroup_disabled())
1020		return NULL;
1021
1022	if (!root)
1023		root = root_mem_cgroup;
1024
1025	rcu_read_lock();
1026restart:
1027	next = NULL;
1028
1029	if (reclaim) {
1030		int gen;
1031		int nid = reclaim->pgdat->node_id;
1032
1033		iter = &root->nodeinfo[nid]->iter;
1034		gen = atomic_read(&iter->generation);
1035
1036		/*
1037		 * On start, join the current reclaim iteration cycle.
1038		 * Exit when a concurrent walker completes it.
1039		 */
1040		if (!prev)
1041			reclaim->generation = gen;
1042		else if (reclaim->generation != gen)
1043			goto out_unlock;
1044
1045		pos = READ_ONCE(iter->position);
1046	} else
1047		pos = prev;
1048
1049	css = pos ? &pos->css : NULL;
1050
1051	while ((css = css_next_descendant_pre(css, &root->css))) {
1052		/*
1053		 * Verify the css and acquire a reference.  The root
1054		 * is provided by the caller, so we know it's alive
1055		 * and kicking, and don't take an extra reference.
1056		 */
1057		if (css == &root->css || css_tryget(css))
1058			break;
1059	}
1060
1061	next = mem_cgroup_from_css(css);
1062
1063	if (reclaim) {
1064		/*
1065		 * The position could have already been updated by a competing
1066		 * thread, so check that the value hasn't changed since we read
1067		 * it to avoid reclaiming from the same cgroup twice.
1068		 */
1069		if (cmpxchg(&iter->position, pos, next) != pos) {
1070			if (css && css != &root->css)
1071				css_put(css);
1072			goto restart;
 
 
1073		}
1074
1075		if (!next) {
1076			atomic_inc(&iter->generation);
 
 
 
 
 
 
 
1077
1078			/*
1079			 * Reclaimers share the hierarchy walk, and a
1080			 * new one might jump in right at the end of
1081			 * the hierarchy - make sure they see at least
1082			 * one group and restart from the beginning.
1083			 */
1084			if (!prev)
1085				goto restart;
1086		}
 
 
 
1087	}
1088
1089out_unlock:
1090	rcu_read_unlock();
1091	if (prev && prev != root)
1092		css_put(&prev->css);
1093
1094	return next;
1095}
1096
1097/**
1098 * mem_cgroup_iter_break - abort a hierarchy walk prematurely
1099 * @root: hierarchy root
1100 * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
1101 */
1102void mem_cgroup_iter_break(struct mem_cgroup *root,
1103			   struct mem_cgroup *prev)
1104{
1105	if (!root)
1106		root = root_mem_cgroup;
1107	if (prev && prev != root)
1108		css_put(&prev->css);
1109}
1110
1111static void __invalidate_reclaim_iterators(struct mem_cgroup *from,
1112					struct mem_cgroup *dead_memcg)
1113{
1114	struct mem_cgroup_reclaim_iter *iter;
1115	struct mem_cgroup_per_node *mz;
1116	int nid;
 
 
 
1117
1118	for_each_node(nid) {
1119		mz = from->nodeinfo[nid];
1120		iter = &mz->iter;
1121		cmpxchg(&iter->position, dead_memcg, NULL);
1122	}
1123}
1124
1125static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
1126{
1127	struct mem_cgroup *memcg = dead_memcg;
1128	struct mem_cgroup *last;
1129
1130	do {
1131		__invalidate_reclaim_iterators(memcg, dead_memcg);
1132		last = memcg;
1133	} while ((memcg = parent_mem_cgroup(memcg)));
1134
1135	/*
1136	 * When cgroup1 non-hierarchy mode is used,
1137	 * parent_mem_cgroup() does not walk all the way up to the
1138	 * cgroup root (root_mem_cgroup). So we have to handle
1139	 * dead_memcg from cgroup root separately.
1140	 */
1141	if (!mem_cgroup_is_root(last))
1142		__invalidate_reclaim_iterators(root_mem_cgroup,
1143						dead_memcg);
1144}
1145
1146/**
1147 * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
1148 * @memcg: hierarchy root
1149 * @fn: function to call for each task
1150 * @arg: argument passed to @fn
1151 *
1152 * This function iterates over tasks attached to @memcg or to any of its
1153 * descendants and calls @fn for each task. If @fn returns a non-zero
1154 * value, the function breaks the iteration loop. Otherwise, it will iterate
1155 * over all tasks and return 0.
1156 *
1157 * This function must not be called for the root memory cgroup.
1158 */
1159void mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
1160			   int (*fn)(struct task_struct *, void *), void *arg)
1161{
1162	struct mem_cgroup *iter;
1163	int ret = 0;
1164	int i = 0;
1165
1166	BUG_ON(mem_cgroup_is_root(memcg));
 
1167
1168	for_each_mem_cgroup_tree(iter, memcg) {
1169		struct css_task_iter it;
1170		struct task_struct *task;
 
1171
1172		css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it);
1173		while (!ret && (task = css_task_iter_next(&it))) {
1174			/* Avoid potential softlockup warning */
1175			if ((++i & 1023) == 0)
1176				cond_resched();
1177			ret = fn(task, arg);
1178		}
1179		css_task_iter_end(&it);
1180		if (ret) {
1181			mem_cgroup_iter_break(memcg, iter);
1182			break;
1183		}
1184	}
 
 
1185}
 
1186
1187#ifdef CONFIG_DEBUG_VM
1188void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
 
 
 
 
 
 
 
 
 
1189{
1190	struct mem_cgroup *memcg;
1191
1192	if (mem_cgroup_disabled())
1193		return;
1194
1195	memcg = folio_memcg(folio);
1196
1197	if (!memcg)
1198		VM_BUG_ON_FOLIO(!mem_cgroup_is_root(lruvec_memcg(lruvec)), folio);
1199	else
1200		VM_BUG_ON_FOLIO(lruvec_memcg(lruvec) != memcg, folio);
1201}
1202#endif
1203
1204/**
1205 * folio_lruvec_lock - Lock the lruvec for a folio.
1206 * @folio: Pointer to the folio.
1207 *
1208 * These functions are safe to use under any of the following conditions:
1209 * - folio locked
1210 * - folio_test_lru false
1211 * - folio frozen (refcount of 0)
1212 *
1213 * Return: The lruvec this folio is on with its lock held.
 
 
1214 */
1215struct lruvec *folio_lruvec_lock(struct folio *folio)
1216{
1217	struct lruvec *lruvec = folio_lruvec(folio);
1218
1219	spin_lock(&lruvec->lru_lock);
1220	lruvec_memcg_debug(lruvec, folio);
1221
1222	return lruvec;
1223}
1224
1225/**
1226 * folio_lruvec_lock_irq - Lock the lruvec for a folio.
1227 * @folio: Pointer to the folio.
1228 *
1229 * These functions are safe to use under any of the following conditions:
1230 * - folio locked
1231 * - folio_test_lru false
1232 * - folio frozen (refcount of 0)
1233 *
1234 * Return: The lruvec this folio is on with its lock held and interrupts
1235 * disabled.
1236 */
1237struct lruvec *folio_lruvec_lock_irq(struct folio *folio)
1238{
1239	struct lruvec *lruvec = folio_lruvec(folio);
 
 
1240
1241	spin_lock_irq(&lruvec->lru_lock);
1242	lruvec_memcg_debug(lruvec, folio);
1243
1244	return lruvec;
1245}
1246
1247/**
1248 * folio_lruvec_lock_irqsave - Lock the lruvec for a folio.
1249 * @folio: Pointer to the folio.
1250 * @flags: Pointer to irqsave flags.
1251 *
1252 * These functions are safe to use under any of the following conditions:
1253 * - folio locked
1254 * - folio_test_lru false
1255 * - folio frozen (refcount of 0)
1256 *
1257 * Return: The lruvec this folio is on with its lock held and interrupts
1258 * disabled.
1259 */
1260struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
1261		unsigned long *flags)
1262{
1263	struct lruvec *lruvec = folio_lruvec(folio);
1264
1265	spin_lock_irqsave(&lruvec->lru_lock, *flags);
1266	lruvec_memcg_debug(lruvec, folio);
1267
1268	return lruvec;
1269}
1270
1271/**
1272 * mem_cgroup_update_lru_size - account for adding or removing an lru page
1273 * @lruvec: mem_cgroup per zone lru vector
1274 * @lru: index of lru list the page is sitting on
1275 * @zid: zone id of the accounted pages
1276 * @nr_pages: positive when adding or negative when removing
1277 *
1278 * This function must be called under lru_lock, just before a page is added
1279 * to or just after a page is removed from an lru list.
1280 */
1281void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
1282				int zid, int nr_pages)
1283{
1284	struct mem_cgroup_per_node *mz;
1285	unsigned long *lru_size;
1286	long size;
1287
1288	if (mem_cgroup_disabled())
1289		return;
1290
1291	mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
1292	lru_size = &mz->lru_zone_size[zid][lru];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1293
1294	if (nr_pages < 0)
1295		*lru_size += nr_pages;
 
 
 
1296
1297	size = *lru_size;
1298	if (WARN_ONCE(size < 0,
1299		"%s(%p, %d, %d): lru_size %ld\n",
1300		__func__, lruvec, lru, nr_pages, size)) {
1301		VM_BUG_ON(1);
1302		*lru_size = 0;
 
 
 
 
 
 
 
 
 
1303	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1304
1305	if (nr_pages > 0)
1306		*lru_size += nr_pages;
 
 
 
 
 
 
 
 
 
 
1307}
1308
 
 
 
1309/**
1310 * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1311 * @memcg: the memory cgroup
1312 *
1313 * Returns the maximum amount of memory @mem can be charged with, in
1314 * pages.
1315 */
1316static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
1317{
1318	unsigned long margin = 0;
1319	unsigned long count;
1320	unsigned long limit;
1321
1322	count = page_counter_read(&memcg->memory);
1323	limit = READ_ONCE(memcg->memory.max);
1324	if (count < limit)
1325		margin = limit - count;
1326
1327	if (do_memsw_account()) {
1328		count = page_counter_read(&memcg->memsw);
1329		limit = READ_ONCE(memcg->memsw.max);
1330		if (count < limit)
1331			margin = min(margin, limit - count);
1332		else
1333			margin = 0;
1334	}
1335
1336	return margin;
 
 
 
1337}
1338
1339struct memory_stat {
1340	const char *name;
1341	unsigned int idx;
1342};
 
 
 
 
 
 
1343
1344static const struct memory_stat memory_stats[] = {
1345	{ "anon",			NR_ANON_MAPPED			},
1346	{ "file",			NR_FILE_PAGES			},
1347	{ "kernel",			MEMCG_KMEM			},
1348	{ "kernel_stack",		NR_KERNEL_STACK_KB		},
1349	{ "pagetables",			NR_PAGETABLE			},
1350	{ "sec_pagetables",		NR_SECONDARY_PAGETABLE		},
1351	{ "percpu",			MEMCG_PERCPU_B			},
1352	{ "sock",			MEMCG_SOCK			},
1353	{ "vmalloc",			MEMCG_VMALLOC			},
1354	{ "shmem",			NR_SHMEM			},
1355#ifdef CONFIG_ZSWAP
1356	{ "zswap",			MEMCG_ZSWAP_B			},
1357	{ "zswapped",			MEMCG_ZSWAPPED			},
1358#endif
1359	{ "file_mapped",		NR_FILE_MAPPED			},
1360	{ "file_dirty",			NR_FILE_DIRTY			},
1361	{ "file_writeback",		NR_WRITEBACK			},
1362#ifdef CONFIG_SWAP
1363	{ "swapcached",			NR_SWAPCACHE			},
1364#endif
1365#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1366	{ "anon_thp",			NR_ANON_THPS			},
1367	{ "file_thp",			NR_FILE_THPS			},
1368	{ "shmem_thp",			NR_SHMEM_THPS			},
1369#endif
1370	{ "inactive_anon",		NR_INACTIVE_ANON		},
1371	{ "active_anon",		NR_ACTIVE_ANON			},
1372	{ "inactive_file",		NR_INACTIVE_FILE		},
1373	{ "active_file",		NR_ACTIVE_FILE			},
1374	{ "unevictable",		NR_UNEVICTABLE			},
1375	{ "slab_reclaimable",		NR_SLAB_RECLAIMABLE_B		},
1376	{ "slab_unreclaimable",		NR_SLAB_UNRECLAIMABLE_B		},
1377#ifdef CONFIG_HUGETLB_PAGE
1378	{ "hugetlb",			NR_HUGETLB			},
1379#endif
1380
1381	/* The memory events */
1382	{ "workingset_refault_anon",	WORKINGSET_REFAULT_ANON		},
1383	{ "workingset_refault_file",	WORKINGSET_REFAULT_FILE		},
1384	{ "workingset_activate_anon",	WORKINGSET_ACTIVATE_ANON	},
1385	{ "workingset_activate_file",	WORKINGSET_ACTIVATE_FILE	},
1386	{ "workingset_restore_anon",	WORKINGSET_RESTORE_ANON		},
1387	{ "workingset_restore_file",	WORKINGSET_RESTORE_FILE		},
1388	{ "workingset_nodereclaim",	WORKINGSET_NODERECLAIM		},
1389
1390	{ "pgdemote_kswapd",		PGDEMOTE_KSWAPD		},
1391	{ "pgdemote_direct",		PGDEMOTE_DIRECT		},
1392	{ "pgdemote_khugepaged",	PGDEMOTE_KHUGEPAGED	},
1393#ifdef CONFIG_NUMA_BALANCING
1394	{ "pgpromote_success",		PGPROMOTE_SUCCESS	},
1395#endif
1396};
1397
1398/* The actual unit of the state item, not the same as the output unit */
1399static int memcg_page_state_unit(int item)
1400{
1401	switch (item) {
1402	case MEMCG_PERCPU_B:
1403	case MEMCG_ZSWAP_B:
1404	case NR_SLAB_RECLAIMABLE_B:
1405	case NR_SLAB_UNRECLAIMABLE_B:
1406		return 1;
1407	case NR_KERNEL_STACK_KB:
1408		return SZ_1K;
1409	default:
1410		return PAGE_SIZE;
1411	}
1412}
1413
1414/* Translate stat items to the correct unit for memory.stat output */
1415static int memcg_page_state_output_unit(int item)
1416{
1417	/*
1418	 * Workingset state is actually in pages, but we export it to userspace
1419	 * as a scalar count of events, so special case it here.
1420	 *
1421	 * Demotion and promotion activities are exported in pages, consistent
1422	 * with their global counterparts.
1423	 */
1424	switch (item) {
1425	case WORKINGSET_REFAULT_ANON:
1426	case WORKINGSET_REFAULT_FILE:
1427	case WORKINGSET_ACTIVATE_ANON:
1428	case WORKINGSET_ACTIVATE_FILE:
1429	case WORKINGSET_RESTORE_ANON:
1430	case WORKINGSET_RESTORE_FILE:
1431	case WORKINGSET_NODERECLAIM:
1432	case PGDEMOTE_KSWAPD:
1433	case PGDEMOTE_DIRECT:
1434	case PGDEMOTE_KHUGEPAGED:
1435#ifdef CONFIG_NUMA_BALANCING
1436	case PGPROMOTE_SUCCESS:
1437#endif
1438		return 1;
1439	default:
1440		return memcg_page_state_unit(item);
1441	}
1442}
1443
1444unsigned long memcg_page_state_output(struct mem_cgroup *memcg, int item)
1445{
1446	return memcg_page_state(memcg, item) *
1447		memcg_page_state_output_unit(item);
1448}
 
 
 
 
 
 
1449
1450unsigned long memcg_page_state_local_output(struct mem_cgroup *memcg, int item)
1451{
1452	return memcg_page_state_local(memcg, item) *
1453		memcg_page_state_output_unit(item);
1454}
1455
1456static void memcg_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
1457{
1458	int i;
1459
 
1460	/*
1461	 * Provide statistics on the state of the memory subsystem as
1462	 * well as cumulative event counters that show past behavior.
1463	 *
1464	 * This list is ordered following a combination of these gradients:
1465	 * 1) generic big picture -> specifics and details
1466	 * 2) reflecting userspace activity -> reflecting kernel heuristics
1467	 *
1468	 * Current memory state:
1469	 */
1470	mem_cgroup_flush_stats(memcg);
 
 
 
 
1471
1472	for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
1473		u64 size;
 
 
 
 
1474
1475#ifdef CONFIG_HUGETLB_PAGE
1476		if (unlikely(memory_stats[i].idx == NR_HUGETLB) &&
1477		    !(cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING))
1478			continue;
1479#endif
1480		size = memcg_page_state_output(memcg, memory_stats[i].idx);
1481		seq_buf_printf(s, "%s %llu\n", memory_stats[i].name, size);
1482
1483		if (unlikely(memory_stats[i].idx == NR_SLAB_UNRECLAIMABLE_B)) {
1484			size += memcg_page_state_output(memcg,
1485							NR_SLAB_RECLAIMABLE_B);
1486			seq_buf_printf(s, "slab %llu\n", size);
1487		}
1488	}
 
 
1489
1490	/* Accumulated memory events */
1491	seq_buf_printf(s, "pgscan %lu\n",
1492		       memcg_events(memcg, PGSCAN_KSWAPD) +
1493		       memcg_events(memcg, PGSCAN_DIRECT) +
1494		       memcg_events(memcg, PGSCAN_KHUGEPAGED));
1495	seq_buf_printf(s, "pgsteal %lu\n",
1496		       memcg_events(memcg, PGSTEAL_KSWAPD) +
1497		       memcg_events(memcg, PGSTEAL_DIRECT) +
1498		       memcg_events(memcg, PGSTEAL_KHUGEPAGED));
1499
1500	for (i = 0; i < ARRAY_SIZE(memcg_vm_event_stat); i++) {
1501#ifdef CONFIG_MEMCG_V1
1502		if (memcg_vm_event_stat[i] == PGPGIN ||
1503		    memcg_vm_event_stat[i] == PGPGOUT)
1504			continue;
1505#endif
1506		seq_buf_printf(s, "%s %lu\n",
1507			       vm_event_name(memcg_vm_event_stat[i]),
1508			       memcg_events(memcg, memcg_vm_event_stat[i]));
1509	}
1510}
1511
1512static void memory_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
 
1513{
1514	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
1515		memcg_stat_format(memcg, s);
1516	else
1517		memcg1_stat_format(memcg, s);
1518	if (seq_buf_has_overflowed(s))
1519		pr_warn("%s: Warning, stat buffer overflow, please report\n", __func__);
1520}
1521
1522/**
1523 * mem_cgroup_print_oom_context: Print OOM information relevant to
1524 * memory controller.
1525 * @memcg: The memory cgroup that went over limit
1526 * @p: Task that is going to be killed
1527 *
1528 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1529 * enabled
1530 */
1531void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
1532{
 
 
 
 
 
 
 
 
 
 
 
 
 
1533	rcu_read_lock();
1534
1535	if (memcg) {
1536		pr_cont(",oom_memcg=");
1537		pr_cont_cgroup_path(memcg->css.cgroup);
1538	} else
1539		pr_cont(",global_oom");
1540	if (p) {
1541		pr_cont(",task_memcg=");
1542		pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
 
 
 
 
 
 
 
 
 
 
 
 
 
1543	}
1544	rcu_read_unlock();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1545}
1546
1547/**
1548 * mem_cgroup_print_oom_meminfo: Print OOM memory information relevant to
1549 * memory controller.
1550 * @memcg: The memory cgroup that went over limit
1551 */
1552void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
1553{
1554	/* Use static buffer, for the caller is holding oom_lock. */
1555	static char buf[SEQ_BUF_SIZE];
1556	struct seq_buf s;
1557
1558	lockdep_assert_held(&oom_lock);
1559
1560	pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
1561		K((u64)page_counter_read(&memcg->memory)),
1562		K((u64)READ_ONCE(memcg->memory.max)), memcg->memory.failcnt);
1563	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
1564		pr_info("swap: usage %llukB, limit %llukB, failcnt %lu\n",
1565			K((u64)page_counter_read(&memcg->swap)),
1566			K((u64)READ_ONCE(memcg->swap.max)), memcg->swap.failcnt);
1567#ifdef CONFIG_MEMCG_V1
1568	else {
1569		pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
1570			K((u64)page_counter_read(&memcg->memsw)),
1571			K((u64)memcg->memsw.max), memcg->memsw.failcnt);
1572		pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
1573			K((u64)page_counter_read(&memcg->kmem)),
1574			K((u64)memcg->kmem.max), memcg->kmem.failcnt);
1575	}
1576#endif
1577
1578	pr_info("Memory cgroup stats for ");
1579	pr_cont_cgroup_path(memcg->css.cgroup);
1580	pr_cont(":");
1581	seq_buf_init(&s, buf, SEQ_BUF_SIZE);
1582	memory_stat_format(memcg, &s);
1583	seq_buf_do_printk(&s, KERN_INFO);
1584}
1585
1586/*
1587 * Return the memory (and swap, if configured) limit for a memcg.
1588 */
1589unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
1590{
1591	unsigned long max = READ_ONCE(memcg->memory.max);
 
1592
1593	if (do_memsw_account()) {
1594		if (mem_cgroup_swappiness(memcg)) {
1595			/* Calculate swap excess capacity from memsw limit */
1596			unsigned long swap = READ_ONCE(memcg->memsw.max) - max;
1597
1598			max += min(swap, (unsigned long)total_swap_pages);
1599		}
1600	} else {
1601		if (mem_cgroup_swappiness(memcg))
1602			max += min(READ_ONCE(memcg->swap.max),
1603				   (unsigned long)total_swap_pages);
1604	}
1605	return max;
1606}
1607
1608unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
 
 
1609{
1610	return page_counter_read(&memcg->memory);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1611}
1612
1613static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
1614				     int order)
 
 
 
 
 
 
 
 
 
 
1615{
1616	struct oom_control oc = {
1617		.zonelist = NULL,
1618		.nodemask = NULL,
1619		.memcg = memcg,
1620		.gfp_mask = gfp_mask,
1621		.order = order,
1622	};
1623	bool ret = true;
1624
1625	if (mutex_lock_killable(&oom_lock))
1626		return true;
 
1627
1628	if (mem_cgroup_margin(memcg) >= (1 << order))
1629		goto unlock;
1630
1631	/*
1632	 * A few threads which were not waiting at mutex_lock_killable() can
1633	 * fail to bail out. Therefore, check again after holding oom_lock.
1634	 */
1635	ret = task_is_dying() || out_of_memory(&oc);
1636
1637unlock:
1638	mutex_unlock(&oom_lock);
1639	return ret;
1640}
 
1641
1642/*
1643 * Returns true if successfully killed one or more processes. Though in some
1644 * corner cases it can return true even without killing any process.
 
 
1645 */
1646static bool mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
1647{
1648	bool locked, ret;
 
 
 
 
 
 
 
 
1649
1650	if (order > PAGE_ALLOC_COSTLY_ORDER)
1651		return false;
1652
1653	memcg_memory_event(memcg, MEMCG_OOM);
1654
1655	if (!memcg1_oom_prepare(memcg, &locked))
1656		return false;
 
1657
1658	ret = mem_cgroup_out_of_memory(memcg, mask, order);
1659
1660	memcg1_oom_finish(memcg, locked);
1661
1662	return ret;
1663}
1664
1665/**
1666 * mem_cgroup_get_oom_group - get a memory cgroup to clean up after OOM
1667 * @victim: task to be killed by the OOM killer
1668 * @oom_domain: memcg in case of memcg OOM, NULL in case of system-wide OOM
 
 
 
 
 
1669 *
1670 * Returns a pointer to a memory cgroup, which has to be cleaned up
1671 * by killing all belonging OOM-killable tasks.
1672 *
1673 * Caller has to call mem_cgroup_put() on the returned non-NULL memcg.
1674 */
1675struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
1676					    struct mem_cgroup *oom_domain)
1677{
1678	struct mem_cgroup *oom_group = NULL;
1679	struct mem_cgroup *memcg;
1680
1681	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
1682		return NULL;
1683
1684	if (!oom_domain)
1685		oom_domain = root_mem_cgroup;
 
 
 
 
 
 
 
 
 
1686
1687	rcu_read_lock();
 
 
1688
1689	memcg = mem_cgroup_from_task(victim);
1690	if (mem_cgroup_is_root(memcg))
1691		goto out;
 
 
 
 
 
 
1692
1693	/*
1694	 * If the victim task has been asynchronously moved to a different
1695	 * memory cgroup, we might end up killing tasks outside oom_domain.
1696	 * In this case it's better to ignore memory.group.oom.
1697	 */
1698	if (unlikely(!mem_cgroup_is_descendant(memcg, oom_domain)))
1699		goto out;
 
 
1700
 
 
 
 
1701	/*
1702	 * Traverse the memory cgroup hierarchy from the victim task's
1703	 * cgroup up to the OOMing cgroup (or root) to find the
1704	 * highest-level memory cgroup with oom.group set.
1705	 */
1706	for (; memcg; memcg = parent_mem_cgroup(memcg)) {
1707		if (READ_ONCE(memcg->oom_group))
1708			oom_group = memcg;
1709
1710		if (memcg == oom_domain)
1711			break;
1712	}
 
 
1713
1714	if (oom_group)
1715		css_get(&oom_group->css);
1716out:
1717	rcu_read_unlock();
1718
1719	return oom_group;
1720}
1721
1722void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
1723{
1724	pr_info("Tasks in ");
1725	pr_cont_cgroup_path(memcg->css.cgroup);
1726	pr_cont(" are going to be killed due to memory.oom.group set\n");
1727}
 
1728
1729struct memcg_stock_pcp {
1730	local_lock_t stock_lock;
1731	struct mem_cgroup *cached; /* this never be root cgroup */
1732	unsigned int nr_pages;
 
 
 
 
 
 
 
 
 
 
1733
1734	struct obj_cgroup *cached_objcg;
1735	struct pglist_data *cached_pgdat;
1736	unsigned int nr_bytes;
1737	int nr_slab_reclaimable_b;
1738	int nr_slab_unreclaimable_b;
1739
1740	struct work_struct work;
1741	unsigned long flags;
1742#define FLUSHING_CACHED_CHARGE	0
1743};
1744static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock) = {
1745	.stock_lock = INIT_LOCAL_LOCK(stock_lock),
1746};
1747static DEFINE_MUTEX(percpu_charge_mutex);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1748
1749static struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock);
1750static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
1751				     struct mem_cgroup *root_memcg);
1752
1753/**
1754 * consume_stock: Try to consume stocked charge on this cpu.
1755 * @memcg: memcg to consume from.
1756 * @nr_pages: how many pages to charge.
1757 *
1758 * The charges will only happen if @memcg matches the current cpu's memcg
1759 * stock, and at least @nr_pages are available in that stock.  Failure to
1760 * service an allocation will refill the stock.
1761 *
1762 * returns true if successful, false otherwise.
1763 */
1764static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
1765{
1766	struct memcg_stock_pcp *stock;
1767	unsigned int stock_pages;
1768	unsigned long flags;
1769	bool ret = false;
1770
1771	if (nr_pages > MEMCG_CHARGE_BATCH)
1772		return ret;
 
 
 
 
 
 
 
 
 
 
1773
1774	local_lock_irqsave(&memcg_stock.stock_lock, flags);
 
1775
1776	stock = this_cpu_ptr(&memcg_stock);
1777	stock_pages = READ_ONCE(stock->nr_pages);
1778	if (memcg == READ_ONCE(stock->cached) && stock_pages >= nr_pages) {
1779		WRITE_ONCE(stock->nr_pages, stock_pages - nr_pages);
1780		ret = true;
 
 
 
 
 
1781	}
1782
1783	local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
1784
1785	return ret;
1786}
1787
1788/*
1789 * Returns stocks cached in percpu and reset cached information.
1790 */
1791static void drain_stock(struct memcg_stock_pcp *stock)
1792{
1793	unsigned int stock_pages = READ_ONCE(stock->nr_pages);
1794	struct mem_cgroup *old = READ_ONCE(stock->cached);
1795
1796	if (!old)
1797		return;
 
 
 
 
 
 
1798
1799	if (stock_pages) {
1800		page_counter_uncharge(&old->memory, stock_pages);
1801		if (do_memsw_account())
1802			page_counter_uncharge(&old->memsw, stock_pages);
1803
1804		WRITE_ONCE(stock->nr_pages, 0);
1805	}
 
1806
1807	css_put(&old->css);
1808	WRITE_ONCE(stock->cached, NULL);
 
 
 
 
 
1809}
1810
1811static void drain_local_stock(struct work_struct *dummy)
 
 
 
 
 
 
 
 
 
1812{
1813	struct memcg_stock_pcp *stock;
1814	struct obj_cgroup *old = NULL;
1815	unsigned long flags;
 
 
 
1816
1817	/*
1818	 * The only protection from cpu hotplug (memcg_hotplug_cpu_dead) vs.
1819	 * drain_stock races is that we always operate on local CPU stock
1820	 * here with IRQ disabled
1821	 */
1822	local_lock_irqsave(&memcg_stock.stock_lock, flags);
 
 
 
 
1823
1824	stock = this_cpu_ptr(&memcg_stock);
1825	old = drain_obj_stock(stock);
1826	drain_stock(stock);
1827	clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
 
1828
1829	local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
1830	obj_cgroup_put(old);
 
 
1831}
1832
1833/*
1834 * Cache charges(val) to local per_cpu area.
1835 * This will be consumed by consume_stock() function, later.
1836 */
1837static void __refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
 
1838{
1839	struct memcg_stock_pcp *stock;
1840	unsigned int stock_pages;
1841
1842	stock = this_cpu_ptr(&memcg_stock);
1843	if (READ_ONCE(stock->cached) != memcg) { /* reset if necessary */
1844		drain_stock(stock);
1845		css_get(&memcg->css);
1846		WRITE_ONCE(stock->cached, memcg);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1847	}
1848	stock_pages = READ_ONCE(stock->nr_pages) + nr_pages;
1849	WRITE_ONCE(stock->nr_pages, stock_pages);
 
 
 
1850
1851	if (stock_pages > MEMCG_CHARGE_BATCH)
1852		drain_stock(stock);
1853}
1854
1855static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
1856{
1857	unsigned long flags;
1858
1859	local_lock_irqsave(&memcg_stock.stock_lock, flags);
1860	__refill_stock(memcg, nr_pages);
1861	local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
1862}
1863
1864/*
1865 * Drains all per-CPU charge caches for given root_memcg resp. subtree
1866 * of the hierarchy under it.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1867 */
1868void drain_all_stock(struct mem_cgroup *root_memcg)
 
 
1869{
1870	int cpu, curcpu;
 
1871
1872	/* If someone's already draining, avoid adding running more workers. */
1873	if (!mutex_trylock(&percpu_charge_mutex))
 
 
1874		return;
1875	/*
1876	 * Notify other cpus that system-wide "drain" is running
1877	 * We do not care about races with the cpu hotplug because cpu down
1878	 * as well as workers from this path always operate on the local
1879	 * per-cpu data. CPU up doesn't touch memcg_stock at all.
1880	 */
1881	migrate_disable();
1882	curcpu = smp_processor_id();
1883	for_each_online_cpu(cpu) {
1884		struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
1885		struct mem_cgroup *memcg;
1886		bool flush = false;
1887
1888		rcu_read_lock();
1889		memcg = READ_ONCE(stock->cached);
1890		if (memcg && READ_ONCE(stock->nr_pages) &&
1891		    mem_cgroup_is_descendant(memcg, root_memcg))
1892			flush = true;
1893		else if (obj_stock_flush_required(stock, root_memcg))
1894			flush = true;
1895		rcu_read_unlock();
1896
1897		if (flush &&
1898		    !test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
1899			if (cpu == curcpu)
1900				drain_local_stock(&stock->work);
1901			else if (!cpu_is_isolated(cpu))
1902				schedule_work_on(cpu, &stock->work);
1903		}
1904	}
1905	migrate_enable();
1906	mutex_unlock(&percpu_charge_mutex);
1907}
1908
1909static int memcg_hotplug_cpu_dead(unsigned int cpu)
1910{
1911	struct memcg_stock_pcp *stock;
1912
1913	stock = &per_cpu(memcg_stock, cpu);
1914	drain_stock(stock);
1915
1916	return 0;
 
 
1917}
1918
1919static unsigned long reclaim_high(struct mem_cgroup *memcg,
1920				  unsigned int nr_pages,
1921				  gfp_t gfp_mask)
1922{
1923	unsigned long nr_reclaimed = 0;
 
 
1924
1925	do {
1926		unsigned long pflags;
1927
1928		if (page_counter_read(&memcg->memory) <=
1929		    READ_ONCE(memcg->memory.high))
1930			continue;
1931
1932		memcg_memory_event(memcg, MEMCG_HIGH);
 
 
 
 
 
 
1933
1934		psi_memstall_enter(&pflags);
1935		nr_reclaimed += try_to_free_mem_cgroup_pages(memcg, nr_pages,
1936							gfp_mask,
1937							MEMCG_RECLAIM_MAY_SWAP,
1938							NULL);
1939		psi_memstall_leave(&pflags);
1940	} while ((memcg = parent_mem_cgroup(memcg)) &&
1941		 !mem_cgroup_is_root(memcg));
1942
1943	return nr_reclaimed;
1944}
 
 
 
 
 
 
 
 
 
 
 
 
1945
1946static void high_work_func(struct work_struct *work)
 
 
 
 
 
 
1947{
1948	struct mem_cgroup *memcg;
 
1949
1950	memcg = container_of(work, struct mem_cgroup, high_work);
1951	reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL);
 
 
 
 
 
1952}
1953
1954/*
1955 * Clamp the maximum sleep time per allocation batch to 2 seconds. This is
1956 * enough to still cause a significant slowdown in most cases, while still
1957 * allowing diagnostics and tracing to proceed without becoming stuck.
1958 */
1959#define MEMCG_MAX_HIGH_DELAY_JIFFIES (2UL*HZ)
1960
1961/*
1962 * When calculating the delay, we use these either side of the exponentiation to
1963 * maintain precision and scale to a reasonable number of jiffies (see the table
1964 * below.
1965 *
1966 * - MEMCG_DELAY_PRECISION_SHIFT: Extra precision bits while translating the
1967 *   overage ratio to a delay.
1968 * - MEMCG_DELAY_SCALING_SHIFT: The number of bits to scale down the
1969 *   proposed penalty in order to reduce to a reasonable number of jiffies, and
1970 *   to produce a reasonable delay curve.
1971 *
1972 * MEMCG_DELAY_SCALING_SHIFT just happens to be a number that produces a
1973 * reasonable delay curve compared to precision-adjusted overage, not
1974 * penalising heavily at first, but still making sure that growth beyond the
1975 * limit penalises misbehaviour cgroups by slowing them down exponentially. For
1976 * example, with a high of 100 megabytes:
1977 *
1978 *  +-------+------------------------+
1979 *  | usage | time to allocate in ms |
1980 *  +-------+------------------------+
1981 *  | 100M  |                      0 |
1982 *  | 101M  |                      6 |
1983 *  | 102M  |                     25 |
1984 *  | 103M  |                     57 |
1985 *  | 104M  |                    102 |
1986 *  | 105M  |                    159 |
1987 *  | 106M  |                    230 |
1988 *  | 107M  |                    313 |
1989 *  | 108M  |                    409 |
1990 *  | 109M  |                    518 |
1991 *  | 110M  |                    639 |
1992 *  | 111M  |                    774 |
1993 *  | 112M  |                    921 |
1994 *  | 113M  |                   1081 |
1995 *  | 114M  |                   1254 |
1996 *  | 115M  |                   1439 |
1997 *  | 116M  |                   1638 |
1998 *  | 117M  |                   1849 |
1999 *  | 118M  |                   2000 |
2000 *  | 119M  |                   2000 |
2001 *  | 120M  |                   2000 |
2002 *  +-------+------------------------+
2003 */
2004 #define MEMCG_DELAY_PRECISION_SHIFT 20
2005 #define MEMCG_DELAY_SCALING_SHIFT 14
2006
2007static u64 calculate_overage(unsigned long usage, unsigned long high)
2008{
2009	u64 overage;
2010
2011	if (usage <= high)
2012		return 0;
2013
2014	/*
2015	 * Prevent division by 0 in overage calculation by acting as if
2016	 * it was a threshold of 1 page
2017	 */
2018	high = max(high, 1UL);
 
 
2019
2020	overage = usage - high;
2021	overage <<= MEMCG_DELAY_PRECISION_SHIFT;
2022	return div64_u64(overage, high);
 
 
 
 
 
 
2023}
2024
2025static u64 mem_find_max_overage(struct mem_cgroup *memcg)
 
 
 
 
2026{
2027	u64 overage, max_overage = 0;
2028
2029	do {
2030		overage = calculate_overage(page_counter_read(&memcg->memory),
2031					    READ_ONCE(memcg->memory.high));
2032		max_overage = max(overage, max_overage);
2033	} while ((memcg = parent_mem_cgroup(memcg)) &&
2034		 !mem_cgroup_is_root(memcg));
2035
2036	return max_overage;
2037}
2038
2039static u64 swap_find_max_overage(struct mem_cgroup *memcg)
 
 
 
 
 
2040{
2041	u64 overage, max_overage = 0;
 
 
 
 
 
 
 
2042
2043	do {
2044		overage = calculate_overage(page_counter_read(&memcg->swap),
2045					    READ_ONCE(memcg->swap.high));
2046		if (overage)
2047			memcg_memory_event(memcg, MEMCG_SWAP_HIGH);
2048		max_overage = max(overage, max_overage);
2049	} while ((memcg = parent_mem_cgroup(memcg)) &&
2050		 !mem_cgroup_is_root(memcg));
 
 
 
 
 
 
 
 
2051
2052	return max_overage;
 
 
 
 
 
 
2053}
2054
2055/*
2056 * Get the number of jiffies that we should penalise a mischievous cgroup which
2057 * is exceeding its memory.high by checking both it and its ancestors.
 
 
2058 */
2059static unsigned long calculate_high_delay(struct mem_cgroup *memcg,
2060					  unsigned int nr_pages,
2061					  u64 max_overage)
2062{
2063	unsigned long penalty_jiffies;
2064
2065	if (!max_overage)
2066		return 0;
2067
2068	/*
2069	 * We use overage compared to memory.high to calculate the number of
2070	 * jiffies to sleep (penalty_jiffies). Ideally this value should be
2071	 * fairly lenient on small overages, and increasingly harsh when the
2072	 * memcg in question makes it clear that it has no intention of stopping
2073	 * its crazy behaviour, so we exponentially increase the delay based on
2074	 * overage amount.
2075	 */
2076	penalty_jiffies = max_overage * max_overage * HZ;
2077	penalty_jiffies >>= MEMCG_DELAY_PRECISION_SHIFT;
2078	penalty_jiffies >>= MEMCG_DELAY_SCALING_SHIFT;
2079
2080	/*
2081	 * Factor in the task's own contribution to the overage, such that four
2082	 * N-sized allocations are throttled approximately the same as one
2083	 * 4N-sized allocation.
2084	 *
2085	 * MEMCG_CHARGE_BATCH pages is nominal, so work out how much smaller or
2086	 * larger the current charge patch is than that.
2087	 */
2088	return penalty_jiffies * nr_pages / MEMCG_CHARGE_BATCH;
2089}
2090
2091/*
2092 * Reclaims memory over the high limit. Called directly from
2093 * try_charge() (context permitting), as well as from the userland
2094 * return path where reclaim is always able to block.
2095 */
2096void mem_cgroup_handle_over_high(gfp_t gfp_mask)
2097{
2098	unsigned long penalty_jiffies;
2099	unsigned long pflags;
2100	unsigned long nr_reclaimed;
2101	unsigned int nr_pages = current->memcg_nr_pages_over_high;
2102	int nr_retries = MAX_RECLAIM_RETRIES;
2103	struct mem_cgroup *memcg;
2104	bool in_retry = false;
2105
2106	if (likely(!nr_pages))
2107		return;
 
2108
2109	memcg = get_mem_cgroup_from_mm(current->mm);
2110	current->memcg_nr_pages_over_high = 0;
 
 
 
2111
2112retry_reclaim:
2113	/*
2114	 * Bail if the task is already exiting. Unlike memory.max,
2115	 * memory.high enforcement isn't as strict, and there is no
2116	 * OOM killer involved, which means the excess could already
2117	 * be much bigger (and still growing) than it could for
2118	 * memory.max; the dying task could get stuck in fruitless
2119	 * reclaim for a long time, which isn't desirable.
2120	 */
2121	if (task_is_dying())
2122		goto out;
2123
2124	/*
2125	 * The allocating task should reclaim at least the batch size, but for
2126	 * subsequent retries we only want to do what's necessary to prevent oom
2127	 * or breaching resource isolation.
2128	 *
2129	 * This is distinct from memory.max or page allocator behaviour because
2130	 * memory.high is currently batched, whereas memory.max and the page
2131	 * allocator run every time an allocation is made.
2132	 */
2133	nr_reclaimed = reclaim_high(memcg,
2134				    in_retry ? SWAP_CLUSTER_MAX : nr_pages,
2135				    gfp_mask);
2136
2137	/*
2138	 * memory.high is breached and reclaim is unable to keep up. Throttle
2139	 * allocators proactively to slow down excessive growth.
2140	 */
2141	penalty_jiffies = calculate_high_delay(memcg, nr_pages,
2142					       mem_find_max_overage(memcg));
2143
2144	penalty_jiffies += calculate_high_delay(memcg, nr_pages,
2145						swap_find_max_overage(memcg));
2146
2147	/*
2148	 * Clamp the max delay per usermode return so as to still keep the
2149	 * application moving forwards and also permit diagnostics, albeit
2150	 * extremely slowly.
2151	 */
2152	penalty_jiffies = min(penalty_jiffies, MEMCG_MAX_HIGH_DELAY_JIFFIES);
2153
2154	/*
2155	 * Don't sleep if the amount of jiffies this memcg owes us is so low
2156	 * that it's not even worth doing, in an attempt to be nice to those who
2157	 * go only a small amount over their memory.high value and maybe haven't
2158	 * been aggressively reclaimed enough yet.
2159	 */
2160	if (penalty_jiffies <= HZ / 100)
2161		goto out;
2162
2163	/*
2164	 * If reclaim is making forward progress but we're still over
2165	 * memory.high, we want to encourage that rather than doing allocator
2166	 * throttling.
2167	 */
2168	if (nr_reclaimed || nr_retries--) {
2169		in_retry = true;
2170		goto retry_reclaim;
2171	}
2172
2173	/*
2174	 * Reclaim didn't manage to push usage below the limit, slow
2175	 * this allocating task down.
2176	 *
2177	 * If we exit early, we're guaranteed to die (since
2178	 * schedule_timeout_killable sets TASK_KILLABLE). This means we don't
2179	 * need to account for any ill-begotten jiffies to pay them off later.
2180	 */
2181	psi_memstall_enter(&pflags);
2182	schedule_timeout_killable(penalty_jiffies);
2183	psi_memstall_leave(&pflags);
2184
2185out:
2186	css_put(&memcg->css);
2187}
2188
2189int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask,
2190		     unsigned int nr_pages)
2191{
2192	unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages);
2193	int nr_retries = MAX_RECLAIM_RETRIES;
2194	struct mem_cgroup *mem_over_limit;
2195	struct page_counter *counter;
2196	unsigned long nr_reclaimed;
2197	bool passed_oom = false;
2198	unsigned int reclaim_options = MEMCG_RECLAIM_MAY_SWAP;
2199	bool drained = false;
2200	bool raised_max_event = false;
2201	unsigned long pflags;
2202
2203retry:
2204	if (consume_stock(memcg, nr_pages))
2205		return 0;
2206
2207	if (!do_memsw_account() ||
2208	    page_counter_try_charge(&memcg->memsw, batch, &counter)) {
2209		if (page_counter_try_charge(&memcg->memory, batch, &counter))
2210			goto done_restock;
2211		if (do_memsw_account())
2212			page_counter_uncharge(&memcg->memsw, batch);
2213		mem_over_limit = mem_cgroup_from_counter(counter, memory);
2214	} else {
2215		mem_over_limit = mem_cgroup_from_counter(counter, memsw);
2216		reclaim_options &= ~MEMCG_RECLAIM_MAY_SWAP;
2217	}
 
 
 
 
 
 
 
 
 
 
2218
2219	if (batch > nr_pages) {
2220		batch = nr_pages;
2221		goto retry;
2222	}
2223
2224	/*
2225	 * Prevent unbounded recursion when reclaim operations need to
2226	 * allocate memory. This might exceed the limits temporarily,
2227	 * but we prefer facilitating memory reclaim and getting back
2228	 * under the limit over triggering OOM kills in these cases.
2229	 */
2230	if (unlikely(current->flags & PF_MEMALLOC))
2231		goto force;
2232
2233	if (unlikely(task_in_memcg_oom(current)))
2234		goto nomem;
2235
2236	if (!gfpflags_allow_blocking(gfp_mask))
2237		goto nomem;
2238
2239	memcg_memory_event(mem_over_limit, MEMCG_MAX);
2240	raised_max_event = true;
2241
2242	psi_memstall_enter(&pflags);
2243	nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
2244						    gfp_mask, reclaim_options, NULL);
2245	psi_memstall_leave(&pflags);
2246
 
2247	if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
2248		goto retry;
2249
2250	if (!drained) {
2251		drain_all_stock(mem_over_limit);
2252		drained = true;
2253		goto retry;
2254	}
2255
2256	if (gfp_mask & __GFP_NORETRY)
2257		goto nomem;
2258	/*
2259	 * Even though the limit is exceeded at this point, reclaim
2260	 * may have been able to free some pages.  Retry the charge
2261	 * before killing the task.
2262	 *
2263	 * Only for regular pages, though: huge pages are rather
2264	 * unlikely to succeed so close to the limit, and we fall back
2265	 * to regular pages anyway in case of failure.
2266	 */
2267	if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))
2268		goto retry;
2269
2270	if (nr_retries--)
2271		goto retry;
2272
2273	if (gfp_mask & __GFP_RETRY_MAYFAIL)
2274		goto nomem;
2275
2276	/* Avoid endless loop for tasks bypassed by the oom killer */
2277	if (passed_oom && task_is_dying())
2278		goto nomem;
2279
2280	/*
2281	 * keep retrying as long as the memcg oom killer is able to make
2282	 * a forward progress or bypass the charge if the oom killer
2283	 * couldn't make any progress.
2284	 */
2285	if (mem_cgroup_oom(mem_over_limit, gfp_mask,
2286			   get_order(nr_pages * PAGE_SIZE))) {
2287		passed_oom = true;
2288		nr_retries = MAX_RECLAIM_RETRIES;
2289		goto retry;
2290	}
2291nomem:
2292	/*
2293	 * Memcg doesn't have a dedicated reserve for atomic
2294	 * allocations. But like the global atomic pool, we need to
2295	 * put the burden of reclaim on regular allocation requests
2296	 * and let these go through as privileged allocations.
2297	 */
2298	if (!(gfp_mask & (__GFP_NOFAIL | __GFP_HIGH)))
2299		return -ENOMEM;
2300force:
2301	/*
2302	 * If the allocation has to be enforced, don't forget to raise
2303	 * a MEMCG_MAX event.
2304	 */
2305	if (!raised_max_event)
2306		memcg_memory_event(mem_over_limit, MEMCG_MAX);
2307
2308	/*
2309	 * The allocation either can't fail or will lead to more memory
2310	 * being freed very soon.  Allow memory usage go over the limit
2311	 * temporarily by force charging it.
2312	 */
2313	page_counter_charge(&memcg->memory, nr_pages);
2314	if (do_memsw_account())
2315		page_counter_charge(&memcg->memsw, nr_pages);
2316
2317	return 0;
 
2318
2319done_restock:
2320	if (batch > nr_pages)
2321		refill_stock(memcg, batch - nr_pages);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2322
2323	/*
2324	 * If the hierarchy is above the normal consumption range, schedule
2325	 * reclaim on returning to userland.  We can perform reclaim here
2326	 * if __GFP_RECLAIM but let's always punt for simplicity and so that
2327	 * GFP_KERNEL can consistently be used during reclaim.  @memcg is
2328	 * not recorded as it most likely matches current's and won't
2329	 * change in the meantime.  As high limit is checked again before
2330	 * reclaim, the cost of mismatch is negligible.
 
 
 
 
 
 
2331	 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2332	do {
2333		bool mem_high, swap_high;
2334
2335		mem_high = page_counter_read(&memcg->memory) >
2336			READ_ONCE(memcg->memory.high);
2337		swap_high = page_counter_read(&memcg->swap) >
2338			READ_ONCE(memcg->swap.high);
2339
2340		/* Don't bother a random interrupted task */
2341		if (!in_task()) {
2342			if (mem_high) {
2343				schedule_work(&memcg->high_work);
2344				break;
2345			}
2346			continue;
2347		}
2348
2349		if (mem_high || swap_high) {
2350			/*
2351			 * The allocating tasks in this cgroup will need to do
2352			 * reclaim or be throttled to prevent further growth
2353			 * of the memory or swap footprints.
2354			 *
2355			 * Target some best-effort fairness between the tasks,
2356			 * and distribute reclaim work and delay penalties
2357			 * based on how much each task is actually allocating.
2358			 */
2359			current->memcg_nr_pages_over_high += batch;
2360			set_notify_resume(current);
 
 
 
 
 
 
 
2361			break;
 
 
 
2362		}
2363	} while ((memcg = parent_mem_cgroup(memcg)));
2364
2365	/*
2366	 * Reclaim is set up above to be called from the userland
2367	 * return path. But also attempt synchronous reclaim to avoid
2368	 * excessive overrun while the task is still inside the
2369	 * kernel. If this is successful, the return path will see it
2370	 * when it rechecks the overage and simply bail out.
2371	 */
2372	if (current->memcg_nr_pages_over_high > MEMCG_CHARGE_BATCH &&
2373	    !(current->flags & PF_MEMALLOC) &&
2374	    gfpflags_allow_blocking(gfp_mask))
2375		mem_cgroup_handle_over_high(gfp_mask);
2376	return 0;
 
 
 
 
 
 
2377}
2378
2379/**
2380 * mem_cgroup_cancel_charge() - cancel an uncommitted try_charge() call.
2381 * @memcg: memcg previously charged.
2382 * @nr_pages: number of pages previously charged.
2383 */
2384void mem_cgroup_cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
 
2385{
2386	if (mem_cgroup_is_root(memcg))
2387		return;
2388
2389	page_counter_uncharge(&memcg->memory, nr_pages);
2390	if (do_memsw_account())
2391		page_counter_uncharge(&memcg->memsw, nr_pages);
 
2392}
2393
2394static void commit_charge(struct folio *folio, struct mem_cgroup *memcg)
 
 
 
 
 
2395{
2396	VM_BUG_ON_FOLIO(folio_memcg_charged(folio), folio);
2397	/*
2398	 * Any of the following ensures page's memcg stability:
2399	 *
2400	 * - the page lock
2401	 * - LRU isolation
2402	 * - exclusive reference
2403	 */
2404	folio->memcg_data = (unsigned long)memcg;
2405}
2406
2407/**
2408 * mem_cgroup_commit_charge - commit a previously successful try_charge().
2409 * @folio: folio to commit the charge to.
2410 * @memcg: memcg previously charged.
 
2411 */
2412void mem_cgroup_commit_charge(struct folio *folio, struct mem_cgroup *memcg)
2413{
2414	css_get(&memcg->css);
2415	commit_charge(folio, memcg);
2416	memcg1_commit_charge(folio, memcg);
 
 
 
 
 
 
2417}
2418
2419static inline void __mod_objcg_mlstate(struct obj_cgroup *objcg,
2420				       struct pglist_data *pgdat,
2421				       enum node_stat_item idx, int nr)
2422{
2423	struct mem_cgroup *memcg;
2424	struct lruvec *lruvec;
 
 
 
 
2425
2426	rcu_read_lock();
2427	memcg = obj_cgroup_memcg(objcg);
2428	lruvec = mem_cgroup_lruvec(memcg, pgdat);
2429	__mod_memcg_lruvec_state(lruvec, idx, nr);
2430	rcu_read_unlock();
 
 
 
 
 
 
 
 
 
 
 
 
2431}
2432
2433static __always_inline
2434struct mem_cgroup *mem_cgroup_from_obj_folio(struct folio *folio, void *p)
 
 
 
2435{
 
 
 
 
 
 
 
 
 
 
 
 
2436	/*
2437	 * Slab objects are accounted individually, not per-page.
2438	 * Memcg membership data for each individual object is saved in
2439	 * slab->obj_exts.
2440	 */
2441	if (folio_test_slab(folio)) {
2442		struct slabobj_ext *obj_exts;
2443		struct slab *slab;
2444		unsigned int off;
2445
2446		slab = folio_slab(folio);
2447		obj_exts = slab_obj_exts(slab);
2448		if (!obj_exts)
2449			return NULL;
2450
2451		off = obj_to_index(slab->slab_cache, slab, p);
2452		if (obj_exts[off].objcg)
2453			return obj_cgroup_memcg(obj_exts[off].objcg);
 
 
 
 
 
 
 
 
 
 
 
2454
2455		return NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2456	}
2457
 
 
 
 
 
 
 
 
2458	/*
2459	 * folio_memcg_check() is used here, because in theory we can encounter
2460	 * a folio where the slab flag has been cleared already, but
2461	 * slab->obj_exts has not been freed yet
2462	 * folio_memcg_check() will guarantee that a proper memory
2463	 * cgroup pointer or NULL will be returned.
2464	 */
2465	return folio_memcg_check(folio);
2466}
2467
 
 
 
2468/*
2469 * Returns a pointer to the memory cgroup to which the kernel object is charged.
2470 * It is not suitable for objects allocated using vmalloc().
2471 *
2472 * A passed kernel object must be a slab object or a generic kernel page.
2473 *
2474 * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(),
2475 * cgroup_mutex, etc.
2476 */
2477struct mem_cgroup *mem_cgroup_from_slab_obj(void *p)
2478{
 
 
 
 
2479	if (mem_cgroup_disabled())
2480		return NULL;
2481
2482	return mem_cgroup_from_obj_folio(virt_to_folio(p), p);
 
 
 
 
2483}
 
2484
2485static struct obj_cgroup *__get_obj_cgroup_from_memcg(struct mem_cgroup *memcg)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2486{
2487	struct obj_cgroup *objcg = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2488
2489	for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) {
2490		objcg = rcu_dereference(memcg->objcg);
2491		if (likely(objcg && obj_cgroup_tryget(objcg)))
2492			break;
2493		objcg = NULL;
2494	}
2495	return objcg;
2496}
2497
2498static struct obj_cgroup *current_objcg_update(void)
2499{
2500	struct mem_cgroup *memcg;
2501	struct obj_cgroup *old, *objcg = NULL;
2502
2503	do {
2504		/* Atomically drop the update bit. */
2505		old = xchg(&current->objcg, NULL);
2506		if (old) {
2507			old = (struct obj_cgroup *)
2508				((unsigned long)old & ~CURRENT_OBJCG_UPDATE_FLAG);
2509			obj_cgroup_put(old);
2510
2511			old = NULL;
2512		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2513
2514		/* If new objcg is NULL, no reason for the second atomic update. */
2515		if (!current->mm || (current->flags & PF_KTHREAD))
2516			return NULL;
2517
2518		/*
2519		 * Release the objcg pointer from the previous iteration,
2520		 * if try_cmpxcg() below fails.
2521		 */
2522		if (unlikely(objcg)) {
2523			obj_cgroup_put(objcg);
2524			objcg = NULL;
2525		}
 
2526
2527		/*
2528		 * Obtain the new objcg pointer. The current task can be
2529		 * asynchronously moved to another memcg and the previous
2530		 * memcg can be offlined. So let's get the memcg pointer
2531		 * and try get a reference to objcg under a rcu read lock.
2532		 */
2533
2534		rcu_read_lock();
2535		memcg = mem_cgroup_from_task(current);
2536		objcg = __get_obj_cgroup_from_memcg(memcg);
2537		rcu_read_unlock();
 
2538
2539		/*
2540		 * Try set up a new objcg pointer atomically. If it
2541		 * fails, it means the update flag was set concurrently, so
2542		 * the whole procedure should be repeated.
2543		 */
2544	} while (!try_cmpxchg(&current->objcg, &old, objcg));
2545
2546	return objcg;
2547}
 
 
 
 
2548
2549__always_inline struct obj_cgroup *current_obj_cgroup(void)
2550{
2551	struct mem_cgroup *memcg;
2552	struct obj_cgroup *objcg;
2553
2554	if (in_task()) {
2555		memcg = current->active_memcg;
2556		if (unlikely(memcg))
2557			goto from_memcg;
2558
2559		objcg = READ_ONCE(current->objcg);
2560		if (unlikely((unsigned long)objcg & CURRENT_OBJCG_UPDATE_FLAG))
2561			objcg = current_objcg_update();
2562		/*
2563		 * Objcg reference is kept by the task, so it's safe
2564		 * to use the objcg by the current task.
2565		 */
2566		return objcg;
2567	}
2568
2569	memcg = this_cpu_read(int_active_memcg);
2570	if (unlikely(memcg))
2571		goto from_memcg;
 
 
 
 
 
2572
2573	return NULL;
 
 
 
 
 
 
 
 
 
 
 
 
2574
2575from_memcg:
2576	objcg = NULL;
2577	for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) {
2578		/*
2579		 * Memcg pointer is protected by scope (see set_active_memcg())
2580		 * and is pinning the corresponding objcg, so objcg can't go
2581		 * away and can be used within the scope without any additional
2582		 * protection.
2583		 */
2584		objcg = rcu_dereference_check(memcg->objcg, 1);
2585		if (likely(objcg))
2586			break;
2587	}
2588
2589	return objcg;
 
 
 
 
2590}
2591
2592struct obj_cgroup *get_obj_cgroup_from_folio(struct folio *folio)
 
2593{
2594	struct obj_cgroup *objcg;
 
 
 
 
 
 
 
 
 
 
 
2595
2596	if (!memcg_kmem_online())
2597		return NULL;
 
 
 
 
2598
2599	if (folio_memcg_kmem(folio)) {
2600		objcg = __folio_objcg(folio);
2601		obj_cgroup_get(objcg);
2602	} else {
2603		struct mem_cgroup *memcg;
2604
2605		rcu_read_lock();
2606		memcg = __folio_memcg(folio);
2607		if (memcg)
2608			objcg = __get_obj_cgroup_from_memcg(memcg);
2609		else
2610			objcg = NULL;
2611		rcu_read_unlock();
 
 
 
 
2612	}
2613	return objcg;
2614}
2615
2616/*
2617 * obj_cgroup_uncharge_pages: uncharge a number of kernel pages from a objcg
2618 * @objcg: object cgroup to uncharge
2619 * @nr_pages: number of pages to uncharge
2620 */
2621static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg,
2622				      unsigned int nr_pages)
2623{
2624	struct mem_cgroup *memcg;
2625
2626	memcg = get_mem_cgroup_from_objcg(objcg);
2627
2628	mod_memcg_state(memcg, MEMCG_KMEM, -nr_pages);
2629	memcg1_account_kmem(memcg, -nr_pages);
2630	refill_stock(memcg, nr_pages);
2631
2632	css_put(&memcg->css);
2633}
2634
2635/*
2636 * obj_cgroup_charge_pages: charge a number of kernel pages to a objcg
2637 * @objcg: object cgroup to charge
2638 * @gfp: reclaim mode
2639 * @nr_pages: number of pages to charge
2640 *
2641 * Returns 0 on success, an error code on failure.
2642 */
2643static int obj_cgroup_charge_pages(struct obj_cgroup *objcg, gfp_t gfp,
2644				   unsigned int nr_pages)
2645{
2646	struct mem_cgroup *memcg;
2647	int ret;
2648
2649	memcg = get_mem_cgroup_from_objcg(objcg);
2650
2651	ret = try_charge_memcg(memcg, gfp, nr_pages);
2652	if (ret)
2653		goto out;
2654
2655	mod_memcg_state(memcg, MEMCG_KMEM, nr_pages);
2656	memcg1_account_kmem(memcg, nr_pages);
2657out:
 
 
 
 
 
 
 
 
 
 
 
 
2658	css_put(&memcg->css);
2659
 
 
 
 
 
 
 
 
2660	return ret;
2661}
2662
2663/**
2664 * __memcg_kmem_charge_page: charge a kmem page to the current memory cgroup
2665 * @page: page to charge
2666 * @gfp: reclaim mode
2667 * @order: allocation order
2668 *
2669 * Returns 0 on success, an error code on failure.
2670 */
2671int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order)
2672{
2673	struct obj_cgroup *objcg;
2674	int ret = 0;
 
 
 
2675
2676	objcg = current_obj_cgroup();
2677	if (objcg) {
2678		ret = obj_cgroup_charge_pages(objcg, gfp, 1 << order);
2679		if (!ret) {
2680			obj_cgroup_get(objcg);
2681			page->memcg_data = (unsigned long)objcg |
2682				MEMCG_DATA_KMEM;
2683			return 0;
2684		}
 
 
2685	}
2686	return ret;
 
 
 
 
 
2687}
2688
2689/**
2690 * __memcg_kmem_uncharge_page: uncharge a kmem page
2691 * @page: page to uncharge
2692 * @order: allocation order
2693 */
2694void __memcg_kmem_uncharge_page(struct page *page, int order)
2695{
2696	struct folio *folio = page_folio(page);
2697	struct obj_cgroup *objcg;
2698	unsigned int nr_pages = 1 << order;
2699
2700	if (!folio_memcg_kmem(folio))
 
 
2701		return;
2702
2703	objcg = __folio_objcg(folio);
2704	obj_cgroup_uncharge_pages(objcg, nr_pages);
2705	folio->memcg_data = 0;
2706	obj_cgroup_put(objcg);
2707}
2708
2709static void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
2710		     enum node_stat_item idx, int nr)
 
2711{
2712	struct memcg_stock_pcp *stock;
2713	struct obj_cgroup *old = NULL;
2714	unsigned long flags;
2715	int *bytes;
2716
2717	local_lock_irqsave(&memcg_stock.stock_lock, flags);
2718	stock = this_cpu_ptr(&memcg_stock);
 
2719
 
 
 
 
 
 
 
 
2720	/*
2721	 * Save vmstat data in stock and skip vmstat array update unless
2722	 * accumulating over a page of vmstat data or when pgdat or idx
2723	 * changes.
 
 
2724	 */
2725	if (READ_ONCE(stock->cached_objcg) != objcg) {
2726		old = drain_obj_stock(stock);
2727		obj_cgroup_get(objcg);
2728		stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
2729				? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
2730		WRITE_ONCE(stock->cached_objcg, objcg);
2731		stock->cached_pgdat = pgdat;
2732	} else if (stock->cached_pgdat != pgdat) {
2733		/* Flush the existing cached vmstat data */
2734		struct pglist_data *oldpg = stock->cached_pgdat;
2735
2736		if (stock->nr_slab_reclaimable_b) {
2737			__mod_objcg_mlstate(objcg, oldpg, NR_SLAB_RECLAIMABLE_B,
2738					  stock->nr_slab_reclaimable_b);
2739			stock->nr_slab_reclaimable_b = 0;
2740		}
2741		if (stock->nr_slab_unreclaimable_b) {
2742			__mod_objcg_mlstate(objcg, oldpg, NR_SLAB_UNRECLAIMABLE_B,
2743					  stock->nr_slab_unreclaimable_b);
2744			stock->nr_slab_unreclaimable_b = 0;
2745		}
2746		stock->cached_pgdat = pgdat;
2747	}
2748
2749	bytes = (idx == NR_SLAB_RECLAIMABLE_B) ? &stock->nr_slab_reclaimable_b
2750					       : &stock->nr_slab_unreclaimable_b;
2751	/*
2752	 * Even for large object >= PAGE_SIZE, the vmstat data will still be
2753	 * cached locally at least once before pushing it out.
2754	 */
2755	if (!*bytes) {
2756		*bytes = nr;
2757		nr = 0;
2758	} else {
2759		*bytes += nr;
2760		if (abs(*bytes) > PAGE_SIZE) {
2761			nr = *bytes;
2762			*bytes = 0;
2763		} else {
2764			nr = 0;
2765		}
2766	}
2767	if (nr)
2768		__mod_objcg_mlstate(objcg, pgdat, idx, nr);
2769
2770	local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
2771	obj_cgroup_put(old);
2772}
2773
2774static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
 
 
 
 
2775{
2776	struct memcg_stock_pcp *stock;
2777	unsigned long flags;
2778	bool ret = false;
 
 
 
 
2779
2780	local_lock_irqsave(&memcg_stock.stock_lock, flags);
 
2781
2782	stock = this_cpu_ptr(&memcg_stock);
2783	if (objcg == READ_ONCE(stock->cached_objcg) && stock->nr_bytes >= nr_bytes) {
2784		stock->nr_bytes -= nr_bytes;
2785		ret = true;
2786	}
 
 
 
 
 
 
2787
2788	local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
2789
2790	return ret;
2791}
2792
2793static struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock)
2794{
2795	struct obj_cgroup *old = READ_ONCE(stock->cached_objcg);
2796
2797	if (!old)
2798		return NULL;
2799
2800	if (stock->nr_bytes) {
2801		unsigned int nr_pages = stock->nr_bytes >> PAGE_SHIFT;
2802		unsigned int nr_bytes = stock->nr_bytes & (PAGE_SIZE - 1);
2803
2804		if (nr_pages) {
2805			struct mem_cgroup *memcg;
2806
2807			memcg = get_mem_cgroup_from_objcg(old);
2808
2809			mod_memcg_state(memcg, MEMCG_KMEM, -nr_pages);
2810			memcg1_account_kmem(memcg, -nr_pages);
2811			__refill_stock(memcg, nr_pages);
2812
2813			css_put(&memcg->css);
2814		}
2815
 
 
2816		/*
2817		 * The leftover is flushed to the centralized per-memcg value.
2818		 * On the next attempt to refill obj stock it will be moved
2819		 * to a per-cpu stock (probably, on an other CPU), see
2820		 * refill_obj_stock().
2821		 *
2822		 * How often it's flushed is a trade-off between the memory
2823		 * limit enforcement accuracy and potential CPU contention,
2824		 * so it might be changed in the future.
2825		 */
2826		atomic_add(nr_bytes, &old->nr_charged_bytes);
2827		stock->nr_bytes = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2828	}
2829
 
 
 
2830	/*
2831	 * Flush the vmstat data in current stock
 
 
 
2832	 */
2833	if (stock->nr_slab_reclaimable_b || stock->nr_slab_unreclaimable_b) {
2834		if (stock->nr_slab_reclaimable_b) {
2835			__mod_objcg_mlstate(old, stock->cached_pgdat,
2836					  NR_SLAB_RECLAIMABLE_B,
2837					  stock->nr_slab_reclaimable_b);
2838			stock->nr_slab_reclaimable_b = 0;
2839		}
2840		if (stock->nr_slab_unreclaimable_b) {
2841			__mod_objcg_mlstate(old, stock->cached_pgdat,
2842					  NR_SLAB_UNRECLAIMABLE_B,
2843					  stock->nr_slab_unreclaimable_b);
2844			stock->nr_slab_unreclaimable_b = 0;
2845		}
2846		stock->cached_pgdat = NULL;
2847	}
2848
2849	WRITE_ONCE(stock->cached_objcg, NULL);
2850	/*
2851	 * The `old' objects needs to be released by the caller via
2852	 * obj_cgroup_put() outside of memcg_stock_pcp::stock_lock.
2853	 */
2854	return old;
 
 
 
 
 
 
 
 
 
 
 
 
2855}
2856
2857static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
2858				     struct mem_cgroup *root_memcg)
2859{
2860	struct obj_cgroup *objcg = READ_ONCE(stock->cached_objcg);
2861	struct mem_cgroup *memcg;
2862
2863	if (objcg) {
2864		memcg = obj_cgroup_memcg(objcg);
2865		if (memcg && mem_cgroup_is_descendant(memcg, root_memcg))
2866			return true;
2867	}
2868
2869	return false;
2870}
2871
2872static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
2873			     bool allow_uncharge)
2874{
2875	struct memcg_stock_pcp *stock;
2876	struct obj_cgroup *old = NULL;
2877	unsigned long flags;
2878	unsigned int nr_pages = 0;
2879
2880	local_lock_irqsave(&memcg_stock.stock_lock, flags);
 
 
 
 
 
 
2881
2882	stock = this_cpu_ptr(&memcg_stock);
2883	if (READ_ONCE(stock->cached_objcg) != objcg) { /* reset if necessary */
2884		old = drain_obj_stock(stock);
2885		obj_cgroup_get(objcg);
2886		WRITE_ONCE(stock->cached_objcg, objcg);
2887		stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
2888				? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
2889		allow_uncharge = true;	/* Allow uncharge when objcg changes */
2890	}
2891	stock->nr_bytes += nr_bytes;
2892
2893	if (allow_uncharge && (stock->nr_bytes > PAGE_SIZE)) {
2894		nr_pages = stock->nr_bytes >> PAGE_SHIFT;
2895		stock->nr_bytes &= (PAGE_SIZE - 1);
2896	}
2897
2898	local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
2899	obj_cgroup_put(old);
2900
2901	if (nr_pages)
2902		obj_cgroup_uncharge_pages(objcg, nr_pages);
2903}
2904
2905int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size)
2906{
2907	unsigned int nr_pages, nr_bytes;
2908	int ret;
2909
2910	if (consume_obj_stock(objcg, size))
2911		return 0;
2912
2913	/*
2914	 * In theory, objcg->nr_charged_bytes can have enough
2915	 * pre-charged bytes to satisfy the allocation. However,
2916	 * flushing objcg->nr_charged_bytes requires two atomic
2917	 * operations, and objcg->nr_charged_bytes can't be big.
2918	 * The shared objcg->nr_charged_bytes can also become a
2919	 * performance bottleneck if all tasks of the same memcg are
2920	 * trying to update it. So it's better to ignore it and try
2921	 * grab some new pages. The stock's nr_bytes will be flushed to
2922	 * objcg->nr_charged_bytes later on when objcg changes.
2923	 *
2924	 * The stock's nr_bytes may contain enough pre-charged bytes
2925	 * to allow one less page from being charged, but we can't rely
2926	 * on the pre-charged bytes not being changed outside of
2927	 * consume_obj_stock() or refill_obj_stock(). So ignore those
2928	 * pre-charged bytes as well when charging pages. To avoid a
2929	 * page uncharge right after a page charge, we set the
2930	 * allow_uncharge flag to false when calling refill_obj_stock()
2931	 * to temporarily allow the pre-charged bytes to exceed the page
2932	 * size limit. The maximum reachable value of the pre-charged
2933	 * bytes is (sizeof(object) + PAGE_SIZE - 2) if there is no data
2934	 * race.
2935	 */
2936	nr_pages = size >> PAGE_SHIFT;
2937	nr_bytes = size & (PAGE_SIZE - 1);
2938
2939	if (nr_bytes)
2940		nr_pages += 1;
2941
2942	ret = obj_cgroup_charge_pages(objcg, gfp, nr_pages);
2943	if (!ret && nr_bytes)
2944		refill_obj_stock(objcg, PAGE_SIZE - nr_bytes, false);
2945
2946	return ret;
2947}
2948
2949void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size)
2950{
2951	refill_obj_stock(objcg, size, true);
2952}
2953
2954static inline size_t obj_full_size(struct kmem_cache *s)
2955{
2956	/*
2957	 * For each accounted object there is an extra space which is used
2958	 * to store obj_cgroup membership. Charge it too.
2959	 */
2960	return s->size + sizeof(struct obj_cgroup *);
 
 
 
 
 
 
 
 
2961}
2962
2963bool __memcg_slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru,
2964				  gfp_t flags, size_t size, void **p)
 
 
 
 
 
2965{
2966	struct obj_cgroup *objcg;
2967	struct slab *slab;
2968	unsigned long off;
2969	size_t i;
 
2970
2971	/*
2972	 * The obtained objcg pointer is safe to use within the current scope,
2973	 * defined by current task or set_active_memcg() pair.
2974	 * obj_cgroup_get() is used to get a permanent reference.
2975	 */
2976	objcg = current_obj_cgroup();
2977	if (!objcg)
2978		return true;
2979
2980	/*
2981	 * slab_alloc_node() avoids the NULL check, so we might be called with a
2982	 * single NULL object. kmem_cache_alloc_bulk() aborts if it can't fill
2983	 * the whole requested size.
2984	 * return success as there's nothing to free back
2985	 */
2986	if (unlikely(*p == NULL))
2987		return true;
 
 
2988
2989	flags &= gfp_allowed_mask;
 
 
 
 
 
 
 
 
2990
2991	if (lru) {
2992		int ret;
2993		struct mem_cgroup *memcg;
2994
2995		memcg = get_mem_cgroup_from_objcg(objcg);
2996		ret = memcg_list_lru_alloc(memcg, lru, flags);
2997		css_put(&memcg->css);
2998
2999		if (ret)
3000			return false;
 
 
 
 
 
 
3001	}
 
 
3002
3003	if (obj_cgroup_charge(objcg, flags, size * obj_full_size(s)))
3004		return false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3005
3006	for (i = 0; i < size; i++) {
3007		slab = virt_to_slab(p[i]);
3008
3009		if (!slab_obj_exts(slab) &&
3010		    alloc_slab_obj_exts(slab, s, flags, false)) {
3011			obj_cgroup_uncharge(objcg, obj_full_size(s));
3012			continue;
3013		}
3014
3015		off = obj_to_index(s, slab, p[i]);
3016		obj_cgroup_get(objcg);
3017		slab_obj_exts(slab)[off].objcg = objcg;
3018		mod_objcg_state(objcg, slab_pgdat(slab),
3019				cache_vmstat_idx(s), obj_full_size(s));
 
 
3020	}
3021
3022	return true;
3023}
3024
3025void __memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
3026			    void **p, int objects, struct slabobj_ext *obj_exts)
3027{
3028	for (int i = 0; i < objects; i++) {
3029		struct obj_cgroup *objcg;
3030		unsigned int off;
3031
3032		off = obj_to_index(s, slab, p[i]);
3033		objcg = obj_exts[off].objcg;
3034		if (!objcg)
3035			continue;
3036
3037		obj_exts[off].objcg = NULL;
3038		obj_cgroup_uncharge(objcg, obj_full_size(s));
3039		mod_objcg_state(objcg, slab_pgdat(slab), cache_vmstat_idx(s),
3040				-obj_full_size(s));
3041		obj_cgroup_put(objcg);
3042	}
3043}
 
3044
3045/*
3046 * Because folio_memcg(head) is not set on tails, set it now.
 
3047 */
3048void split_page_memcg(struct page *head, int old_order, int new_order)
 
3049{
3050	struct folio *folio = page_folio(head);
3051	int i;
3052	unsigned int old_nr = 1 << old_order;
3053	unsigned int new_nr = 1 << new_order;
3054
3055	if (mem_cgroup_disabled() || !folio_memcg_charged(folio))
3056		return;
3057
3058	for (i = new_nr; i < old_nr; i += new_nr)
3059		folio_page(folio, i)->memcg_data = folio->memcg_data;
 
3060
3061	if (folio_memcg_kmem(folio))
3062		obj_cgroup_get_many(__folio_objcg(folio), old_nr / new_nr - 1);
3063	else
3064		css_get_many(&folio_memcg(folio)->css, old_nr / new_nr - 1);
3065}
3066
3067unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
3068{
3069	unsigned long val;
3070
3071	if (mem_cgroup_is_root(memcg)) {
3072		/*
3073		 * Approximate root's usage from global state. This isn't
3074		 * perfect, but the root usage was always an approximation.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3075		 */
3076		val = global_node_page_state(NR_FILE_PAGES) +
3077			global_node_page_state(NR_ANON_MAPPED);
3078		if (swap)
3079			val += total_swap_pages - get_nr_swap_pages();
3080	} else {
3081		if (!swap)
3082			val = page_counter_read(&memcg->memory);
3083		else
3084			val = page_counter_read(&memcg->memsw);
3085	}
3086	return val;
3087}
3088
3089static int memcg_online_kmem(struct mem_cgroup *memcg)
3090{
3091	struct obj_cgroup *objcg;
3092
3093	if (mem_cgroup_kmem_disabled())
3094		return 0;
3095
3096	if (unlikely(mem_cgroup_is_root(memcg)))
3097		return 0;
3098
3099	objcg = obj_cgroup_alloc();
3100	if (!objcg)
 
 
 
 
 
 
 
 
 
3101		return -ENOMEM;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3102
3103	objcg->memcg = memcg;
3104	rcu_assign_pointer(memcg->objcg, objcg);
3105	obj_cgroup_get(objcg);
3106	memcg->orig_objcg = objcg;
 
 
 
3107
3108	static_branch_enable(&memcg_kmem_online_key);
3109
3110	memcg->kmemcg_id = memcg->id.id;
3111
3112	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3113}
3114
3115static void memcg_offline_kmem(struct mem_cgroup *memcg)
3116{
3117	struct mem_cgroup *parent;
 
 
 
 
 
 
 
 
3118
3119	if (mem_cgroup_kmem_disabled())
3120		return;
3121
3122	if (unlikely(mem_cgroup_is_root(memcg)))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3123		return;
3124
3125	parent = parent_mem_cgroup(memcg);
3126	if (!parent)
3127		parent = root_mem_cgroup;
3128
3129	memcg_reparent_list_lrus(memcg, parent);
3130
3131	/*
3132	 * Objcg's reparenting must be after list_lru's, make sure list_lru
3133	 * helpers won't use parent's list_lru until child is drained.
 
3134	 */
3135	memcg_reparent_objcgs(memcg, parent);
3136}
3137
3138#ifdef CONFIG_CGROUP_WRITEBACK
 
 
 
3139
3140#include <trace/events/writeback.h>
3141
3142static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
3143{
3144	return wb_domain_init(&memcg->cgwb_domain, gfp);
 
 
 
 
3145}
3146
3147static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
3148{
3149	wb_domain_exit(&memcg->cgwb_domain);
3150}
3151
3152static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
3153{
3154	wb_domain_size_changed(&memcg->cgwb_domain);
3155}
3156
3157struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
3158{
3159	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3160
3161	if (!memcg->css.parent)
3162		return NULL;
 
 
 
 
 
3163
3164	return &memcg->cgwb_domain;
3165}
3166
3167/**
3168 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
3169 * @wb: bdi_writeback in question
3170 * @pfilepages: out parameter for number of file pages
3171 * @pheadroom: out parameter for number of allocatable pages according to memcg
3172 * @pdirty: out parameter for number of dirty pages
3173 * @pwriteback: out parameter for number of pages under writeback
3174 *
3175 * Determine the numbers of file, headroom, dirty, and writeback pages in
3176 * @wb's memcg.  File, dirty and writeback are self-explanatory.  Headroom
3177 * is a bit more involved.
3178 *
3179 * A memcg's headroom is "min(max, high) - used".  In the hierarchy, the
3180 * headroom is calculated as the lowest headroom of itself and the
3181 * ancestors.  Note that this doesn't consider the actual amount of
3182 * available memory in the system.  The caller should further cap
3183 * *@pheadroom accordingly.
3184 */
3185void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
3186			 unsigned long *pheadroom, unsigned long *pdirty,
3187			 unsigned long *pwriteback)
3188{
3189	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3190	struct mem_cgroup *parent;
 
 
 
 
3191
3192	mem_cgroup_flush_stats_ratelimited(memcg);
 
 
 
 
 
3193
3194	*pdirty = memcg_page_state(memcg, NR_FILE_DIRTY);
3195	*pwriteback = memcg_page_state(memcg, NR_WRITEBACK);
3196	*pfilepages = memcg_page_state(memcg, NR_INACTIVE_FILE) +
3197			memcg_page_state(memcg, NR_ACTIVE_FILE);
3198
3199	*pheadroom = PAGE_COUNTER_MAX;
3200	while ((parent = parent_mem_cgroup(memcg))) {
3201		unsigned long ceiling = min(READ_ONCE(memcg->memory.max),
3202					    READ_ONCE(memcg->memory.high));
3203		unsigned long used = page_counter_read(&memcg->memory);
3204
3205		*pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
3206		memcg = parent;
3207	}
3208}
3209
3210/*
3211 * Foreign dirty flushing
3212 *
3213 * There's an inherent mismatch between memcg and writeback.  The former
3214 * tracks ownership per-page while the latter per-inode.  This was a
3215 * deliberate design decision because honoring per-page ownership in the
3216 * writeback path is complicated, may lead to higher CPU and IO overheads
3217 * and deemed unnecessary given that write-sharing an inode across
3218 * different cgroups isn't a common use-case.
3219 *
3220 * Combined with inode majority-writer ownership switching, this works well
3221 * enough in most cases but there are some pathological cases.  For
3222 * example, let's say there are two cgroups A and B which keep writing to
3223 * different but confined parts of the same inode.  B owns the inode and
3224 * A's memory is limited far below B's.  A's dirty ratio can rise enough to
3225 * trigger balance_dirty_pages() sleeps but B's can be low enough to avoid
3226 * triggering background writeback.  A will be slowed down without a way to
3227 * make writeback of the dirty pages happen.
3228 *
3229 * Conditions like the above can lead to a cgroup getting repeatedly and
3230 * severely throttled after making some progress after each
3231 * dirty_expire_interval while the underlying IO device is almost
3232 * completely idle.
3233 *
3234 * Solving this problem completely requires matching the ownership tracking
3235 * granularities between memcg and writeback in either direction.  However,
3236 * the more egregious behaviors can be avoided by simply remembering the
3237 * most recent foreign dirtying events and initiating remote flushes on
3238 * them when local writeback isn't enough to keep the memory clean enough.
3239 *
3240 * The following two functions implement such mechanism.  When a foreign
3241 * page - a page whose memcg and writeback ownerships don't match - is
3242 * dirtied, mem_cgroup_track_foreign_dirty() records the inode owning
3243 * bdi_writeback on the page owning memcg.  When balance_dirty_pages()
3244 * decides that the memcg needs to sleep due to high dirty ratio, it calls
3245 * mem_cgroup_flush_foreign() which queues writeback on the recorded
3246 * foreign bdi_writebacks which haven't expired.  Both the numbers of
3247 * recorded bdi_writebacks and concurrent in-flight foreign writebacks are
3248 * limited to MEMCG_CGWB_FRN_CNT.
3249 *
3250 * The mechanism only remembers IDs and doesn't hold any object references.
3251 * As being wrong occasionally doesn't matter, updates and accesses to the
3252 * records are lockless and racy.
3253 */
3254void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio,
3255					     struct bdi_writeback *wb)
3256{
3257	struct mem_cgroup *memcg = folio_memcg(folio);
3258	struct memcg_cgwb_frn *frn;
3259	u64 now = get_jiffies_64();
3260	u64 oldest_at = now;
3261	int oldest = -1;
3262	int i;
3263
3264	trace_track_foreign_dirty(folio, wb);
3265
3266	/*
3267	 * Pick the slot to use.  If there is already a slot for @wb, keep
3268	 * using it.  If not replace the oldest one which isn't being
3269	 * written out.
3270	 */
3271	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
3272		frn = &memcg->cgwb_frn[i];
3273		if (frn->bdi_id == wb->bdi->id &&
3274		    frn->memcg_id == wb->memcg_css->id)
3275			break;
3276		if (time_before64(frn->at, oldest_at) &&
3277		    atomic_read(&frn->done.cnt) == 1) {
3278			oldest = i;
3279			oldest_at = frn->at;
3280		}
3281	}
3282
3283	if (i < MEMCG_CGWB_FRN_CNT) {
3284		/*
3285		 * Re-using an existing one.  Update timestamp lazily to
3286		 * avoid making the cacheline hot.  We want them to be
3287		 * reasonably up-to-date and significantly shorter than
3288		 * dirty_expire_interval as that's what expires the record.
3289		 * Use the shorter of 1s and dirty_expire_interval / 8.
3290		 */
3291		unsigned long update_intv =
3292			min_t(unsigned long, HZ,
3293			      msecs_to_jiffies(dirty_expire_interval * 10) / 8);
3294
3295		if (time_before64(frn->at, now - update_intv))
3296			frn->at = now;
3297	} else if (oldest >= 0) {
3298		/* replace the oldest free one */
3299		frn = &memcg->cgwb_frn[oldest];
3300		frn->bdi_id = wb->bdi->id;
3301		frn->memcg_id = wb->memcg_css->id;
3302		frn->at = now;
3303	}
3304}
3305
3306/* issue foreign writeback flushes for recorded foreign dirtying events */
3307void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
3308{
3309	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3310	unsigned long intv = msecs_to_jiffies(dirty_expire_interval * 10);
3311	u64 now = jiffies_64;
3312	int i;
3313
3314	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
3315		struct memcg_cgwb_frn *frn = &memcg->cgwb_frn[i];
3316
3317		/*
3318		 * If the record is older than dirty_expire_interval,
3319		 * writeback on it has already started.  No need to kick it
3320		 * off again.  Also, don't start a new one if there's
3321		 * already one in flight.
3322		 */
3323		if (time_after64(frn->at, now - intv) &&
3324		    atomic_read(&frn->done.cnt) == 1) {
3325			frn->at = 0;
3326			trace_flush_foreign(wb, frn->bdi_id, frn->memcg_id);
3327			cgroup_writeback_by_id(frn->bdi_id, frn->memcg_id,
3328					       WB_REASON_FOREIGN_FLUSH,
3329					       &frn->done);
3330		}
3331	}
3332}
3333
3334#else	/* CONFIG_CGROUP_WRITEBACK */
 
3335
3336static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
3337{
3338	return 0;
3339}
 
 
 
 
 
 
 
3340
3341static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
3342{
3343}
3344
3345static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
 
3346{
3347}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3348
3349#endif	/* CONFIG_CGROUP_WRITEBACK */
 
3350
3351/*
3352 * Private memory cgroup IDR
3353 *
3354 * Swap-out records and page cache shadow entries need to store memcg
3355 * references in constrained space, so we maintain an ID space that is
3356 * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of
3357 * memory-controlled cgroups to 64k.
3358 *
3359 * However, there usually are many references to the offline CSS after
3360 * the cgroup has been destroyed, such as page cache or reclaimable
3361 * slab objects, that don't need to hang on to the ID. We want to keep
3362 * those dead CSS from occupying IDs, or we might quickly exhaust the
3363 * relatively small ID space and prevent the creation of new cgroups
3364 * even when there are much fewer than 64k cgroups - possibly none.
3365 *
3366 * Maintain a private 16-bit ID space for memcg, and allow the ID to
3367 * be freed and recycled when it's no longer needed, which is usually
3368 * when the CSS is offlined.
3369 *
3370 * The only exception to that are records of swapped out tmpfs/shmem
3371 * pages that need to be attributed to live ancestors on swapin. But
3372 * those references are manageable from userspace.
3373 */
3374
3375#define MEM_CGROUP_ID_MAX	((1UL << MEM_CGROUP_ID_SHIFT) - 1)
3376static DEFINE_XARRAY_ALLOC1(mem_cgroup_ids);
3377
3378static void mem_cgroup_id_remove(struct mem_cgroup *memcg)
3379{
3380	if (memcg->id.id > 0) {
3381		xa_erase(&mem_cgroup_ids, memcg->id.id);
3382		memcg->id.id = 0;
3383	}
 
 
 
3384}
3385
3386void __maybe_unused mem_cgroup_id_get_many(struct mem_cgroup *memcg,
3387					   unsigned int n)
 
3388{
3389	refcount_add(n, &memcg->id.ref);
3390}
 
 
 
 
 
 
 
 
3391
3392void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
3393{
3394	if (refcount_sub_and_test(n, &memcg->id.ref)) {
3395		mem_cgroup_id_remove(memcg);
 
 
 
 
 
 
 
 
 
3396
3397		/* Memcg ID pins CSS */
3398		css_put(&memcg->css);
3399	}
3400}
 
 
3401
3402static inline void mem_cgroup_id_put(struct mem_cgroup *memcg)
3403{
3404	mem_cgroup_id_put_many(memcg, 1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3405}
3406
3407/**
3408 * mem_cgroup_from_id - look up a memcg from a memcg id
3409 * @id: the memcg id to look up
3410 *
3411 * Caller must hold rcu_read_lock().
3412 */
3413struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
 
3414{
3415	WARN_ON_ONCE(!rcu_read_lock_held());
3416	return xa_load(&mem_cgroup_ids, id);
3417}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3418
3419#ifdef CONFIG_SHRINKER_DEBUG
3420struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino)
3421{
3422	struct cgroup *cgrp;
3423	struct cgroup_subsys_state *css;
3424	struct mem_cgroup *memcg;
3425
3426	cgrp = cgroup_get_from_id(ino);
3427	if (IS_ERR(cgrp))
3428		return ERR_CAST(cgrp);
3429
3430	css = cgroup_get_e_css(cgrp, &memory_cgrp_subsys);
3431	if (css)
3432		memcg = container_of(css, struct mem_cgroup, css);
3433	else
3434		memcg = ERR_PTR(-ENOENT);
3435
3436	cgroup_put(cgrp);
 
 
 
 
 
 
3437
3438	return memcg;
 
 
3439}
3440#endif
3441
3442static bool alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
 
 
 
 
3443{
3444	struct mem_cgroup_per_node *pn;
 
 
 
3445
3446	pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, node);
3447	if (!pn)
3448		return false;
3449
3450	pn->lruvec_stats = kzalloc_node(sizeof(struct lruvec_stats),
3451					GFP_KERNEL_ACCOUNT, node);
3452	if (!pn->lruvec_stats)
3453		goto fail;
3454
3455	pn->lruvec_stats_percpu = alloc_percpu_gfp(struct lruvec_stats_percpu,
3456						   GFP_KERNEL_ACCOUNT);
3457	if (!pn->lruvec_stats_percpu)
3458		goto fail;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3459
3460	lruvec_init(&pn->lruvec);
3461	pn->memcg = memcg;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3462
3463	memcg->nodeinfo[node] = pn;
3464	return true;
3465fail:
3466	kfree(pn->lruvec_stats);
3467	kfree(pn);
3468	return false;
3469}
3470
3471static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
3472{
3473	struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
 
3474
3475	if (!pn)
3476		return;
3477
3478	free_percpu(pn->lruvec_stats_percpu);
3479	kfree(pn->lruvec_stats);
3480	kfree(pn);
3481}
3482
3483static void __mem_cgroup_free(struct mem_cgroup *memcg)
 
3484{
3485	int node;
 
 
 
3486
3487	obj_cgroup_put(memcg->orig_objcg);
 
3488
3489	for_each_node(node)
3490		free_mem_cgroup_per_node_info(memcg, node);
3491	memcg1_free_events(memcg);
3492	kfree(memcg->vmstats);
3493	free_percpu(memcg->vmstats_percpu);
3494	kfree(memcg);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3495}
3496
3497static void mem_cgroup_free(struct mem_cgroup *memcg)
3498{
3499	lru_gen_exit_memcg(memcg);
3500	memcg_wb_domain_exit(memcg);
3501	__mem_cgroup_free(memcg);
3502}
3503
3504static struct mem_cgroup *mem_cgroup_alloc(struct mem_cgroup *parent)
 
3505{
3506	struct memcg_vmstats_percpu *statc, *pstatc;
3507	struct mem_cgroup *memcg;
3508	int node, cpu;
3509	int __maybe_unused i;
3510	long error;
3511
3512	memcg = kzalloc(struct_size(memcg, nodeinfo, nr_node_ids), GFP_KERNEL);
3513	if (!memcg)
3514		return ERR_PTR(-ENOMEM);
3515
3516	error = xa_alloc(&mem_cgroup_ids, &memcg->id.id, NULL,
3517			 XA_LIMIT(1, MEM_CGROUP_ID_MAX), GFP_KERNEL);
3518	if (error)
3519		goto fail;
3520	error = -ENOMEM;
3521
3522	memcg->vmstats = kzalloc(sizeof(struct memcg_vmstats),
3523				 GFP_KERNEL_ACCOUNT);
3524	if (!memcg->vmstats)
3525		goto fail;
3526
3527	memcg->vmstats_percpu = alloc_percpu_gfp(struct memcg_vmstats_percpu,
3528						 GFP_KERNEL_ACCOUNT);
3529	if (!memcg->vmstats_percpu)
3530		goto fail;
3531
3532	if (!memcg1_alloc_events(memcg))
3533		goto fail;
 
3534
3535	for_each_possible_cpu(cpu) {
3536		if (parent)
3537			pstatc = per_cpu_ptr(parent->vmstats_percpu, cpu);
3538		statc = per_cpu_ptr(memcg->vmstats_percpu, cpu);
3539		statc->parent = parent ? pstatc : NULL;
3540		statc->vmstats = memcg->vmstats;
3541	}
3542
3543	for_each_node(node)
3544		if (!alloc_mem_cgroup_per_node_info(memcg, node))
3545			goto fail;
3546
3547	if (memcg_wb_domain_init(memcg, GFP_KERNEL))
3548		goto fail;
3549
3550	INIT_WORK(&memcg->high_work, high_work_func);
3551	vmpressure_init(&memcg->vmpressure);
3552	INIT_LIST_HEAD(&memcg->memory_peaks);
3553	INIT_LIST_HEAD(&memcg->swap_peaks);
3554	spin_lock_init(&memcg->peaks_lock);
3555	memcg->socket_pressure = jiffies;
3556	memcg1_memcg_init(memcg);
3557	memcg->kmemcg_id = -1;
3558	INIT_LIST_HEAD(&memcg->objcg_list);
3559#ifdef CONFIG_CGROUP_WRITEBACK
3560	INIT_LIST_HEAD(&memcg->cgwb_list);
3561	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
3562		memcg->cgwb_frn[i].done =
3563			__WB_COMPLETION_INIT(&memcg_cgwb_frn_waitq);
3564#endif
3565#ifdef CONFIG_TRANSPARENT_HUGEPAGE
3566	spin_lock_init(&memcg->deferred_split_queue.split_queue_lock);
3567	INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue);
3568	memcg->deferred_split_queue.split_queue_len = 0;
3569#endif
3570	lru_gen_init_memcg(memcg);
3571	return memcg;
3572fail:
3573	mem_cgroup_id_remove(memcg);
3574	__mem_cgroup_free(memcg);
3575	return ERR_PTR(error);
3576}
3577
3578static struct cgroup_subsys_state * __ref
3579mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
 
3580{
3581	struct mem_cgroup *parent = mem_cgroup_from_css(parent_css);
3582	struct mem_cgroup *memcg, *old_memcg;
 
 
3583
3584	old_memcg = set_active_memcg(parent);
3585	memcg = mem_cgroup_alloc(parent);
3586	set_active_memcg(old_memcg);
3587	if (IS_ERR(memcg))
3588		return ERR_CAST(memcg);
3589
3590	page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
3591	memcg1_soft_limit_reset(memcg);
3592#ifdef CONFIG_ZSWAP
3593	memcg->zswap_max = PAGE_COUNTER_MAX;
3594	WRITE_ONCE(memcg->zswap_writeback, true);
3595#endif
3596	page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
3597	if (parent) {
3598		WRITE_ONCE(memcg->swappiness, mem_cgroup_swappiness(parent));
3599
3600		page_counter_init(&memcg->memory, &parent->memory, true);
3601		page_counter_init(&memcg->swap, &parent->swap, false);
3602#ifdef CONFIG_MEMCG_V1
3603		WRITE_ONCE(memcg->oom_kill_disable, READ_ONCE(parent->oom_kill_disable));
3604		page_counter_init(&memcg->kmem, &parent->kmem, false);
3605		page_counter_init(&memcg->tcpmem, &parent->tcpmem, false);
3606#endif
3607	} else {
3608		init_memcg_stats();
3609		init_memcg_events();
3610		page_counter_init(&memcg->memory, NULL, true);
3611		page_counter_init(&memcg->swap, NULL, false);
3612#ifdef CONFIG_MEMCG_V1
3613		page_counter_init(&memcg->kmem, NULL, false);
3614		page_counter_init(&memcg->tcpmem, NULL, false);
3615#endif
3616		root_mem_cgroup = memcg;
3617		return &memcg->css;
3618	}
3619
3620	if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
3621		static_branch_inc(&memcg_sockets_enabled_key);
3622
3623	if (!cgroup_memory_nobpf)
3624		static_branch_inc(&memcg_bpf_enabled_key);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3625
3626	return &memcg->css;
 
3627}
3628
3629static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
 
 
 
 
3630{
3631	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
 
 
 
3632
3633	if (memcg_online_kmem(memcg))
3634		goto remove_id;
3635
3636	/*
3637	 * A memcg must be visible for expand_shrinker_info()
3638	 * by the time the maps are allocated. So, we allocate maps
3639	 * here, when for_each_mem_cgroup() can't skip it.
3640	 */
3641	if (alloc_shrinker_info(memcg))
3642		goto offline_kmem;
3643
3644	if (unlikely(mem_cgroup_is_root(memcg)) && !mem_cgroup_disabled())
3645		queue_delayed_work(system_unbound_wq, &stats_flush_dwork,
3646				   FLUSH_TIME);
3647	lru_gen_online_memcg(memcg);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3648
3649	/* Online state pins memcg ID, memcg ID pins CSS */
3650	refcount_set(&memcg->id.ref, 1);
3651	css_get(css);
 
 
3652
3653	/*
3654	 * Ensure mem_cgroup_from_id() works once we're fully online.
3655	 *
3656	 * We could do this earlier and require callers to filter with
3657	 * css_tryget_online(). But right now there are no users that
3658	 * need earlier access, and the workingset code relies on the
3659	 * cgroup tree linkage (mem_cgroup_get_nr_swap_pages()). So
3660	 * publish it here at the end of onlining. This matches the
3661	 * regular ID destruction during offlining.
3662	 */
3663	xa_store(&mem_cgroup_ids, memcg->id.id, memcg, GFP_KERNEL);
3664
3665	return 0;
3666offline_kmem:
3667	memcg_offline_kmem(memcg);
3668remove_id:
3669	mem_cgroup_id_remove(memcg);
3670	return -ENOMEM;
 
 
 
 
 
 
 
3671}
3672
3673static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
3674{
3675	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
 
3676
3677	memcg1_css_offline(memcg);
 
3678
3679	page_counter_set_min(&memcg->memory, 0);
3680	page_counter_set_low(&memcg->memory, 0);
3681
3682	zswap_memcg_offline_cleanup(memcg);
 
 
 
 
 
 
 
 
 
 
 
 
 
3683
3684	memcg_offline_kmem(memcg);
3685	reparent_shrinker_deferred(memcg);
3686	wb_memcg_offline(memcg);
3687	lru_gen_offline_memcg(memcg);
3688
3689	drain_all_stock(memcg);
3690
3691	mem_cgroup_id_put(memcg);
 
3692}
3693
3694static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
 
 
3695{
3696	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
 
 
 
 
 
 
 
 
 
 
 
3697
3698	invalidate_reclaim_iterators(memcg);
3699	lru_gen_release_memcg(memcg);
3700}
3701
3702static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
 
3703{
3704	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3705	int __maybe_unused i;
3706
3707#ifdef CONFIG_CGROUP_WRITEBACK
3708	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
3709		wb_wait_for_completion(&memcg->cgwb_frn[i].done);
3710#endif
3711	if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
3712		static_branch_dec(&memcg_sockets_enabled_key);
3713
3714	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg1_tcpmem_active(memcg))
3715		static_branch_dec(&memcg_sockets_enabled_key);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3716
3717	if (!cgroup_memory_nobpf)
3718		static_branch_dec(&memcg_bpf_enabled_key);
 
 
 
 
 
3719
3720	vmpressure_cleanup(&memcg->vmpressure);
3721	cancel_work_sync(&memcg->high_work);
3722	memcg1_remove_from_trees(memcg);
3723	free_shrinker_info(memcg);
3724	mem_cgroup_free(memcg);
3725}
3726
3727/**
3728 * mem_cgroup_css_reset - reset the states of a mem_cgroup
3729 * @css: the target css
3730 *
3731 * Reset the states of the mem_cgroup associated with @css.  This is
3732 * invoked when the userland requests disabling on the default hierarchy
3733 * but the memcg is pinned through dependency.  The memcg should stop
3734 * applying policies and should revert to the vanilla state as it may be
3735 * made visible again.
3736 *
3737 * The current implementation only resets the essential configurations.
3738 * This needs to be expanded to cover all the visible parts.
3739 */
3740static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
3741{
3742	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3743
3744	page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX);
3745	page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX);
3746#ifdef CONFIG_MEMCG_V1
3747	page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX);
3748	page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX);
3749#endif
3750	page_counter_set_min(&memcg->memory, 0);
3751	page_counter_set_low(&memcg->memory, 0);
3752	page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
3753	memcg1_soft_limit_reset(memcg);
3754	page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
3755	memcg_wb_domain_size_changed(memcg);
3756}
3757
3758struct aggregate_control {
3759	/* pointer to the aggregated (CPU and subtree aggregated) counters */
3760	long *aggregate;
3761	/* pointer to the non-hierarchichal (CPU aggregated) counters */
3762	long *local;
3763	/* pointer to the pending child counters during tree propagation */
3764	long *pending;
3765	/* pointer to the parent's pending counters, could be NULL */
3766	long *ppending;
3767	/* pointer to the percpu counters to be aggregated */
3768	long *cstat;
3769	/* pointer to the percpu counters of the last aggregation*/
3770	long *cstat_prev;
3771	/* size of the above counters */
3772	int size;
3773};
3774
3775static void mem_cgroup_stat_aggregate(struct aggregate_control *ac)
3776{
3777	int i;
3778	long delta, delta_cpu, v;
 
3779
3780	for (i = 0; i < ac->size; i++) {
3781		/*
3782		 * Collect the aggregated propagation counts of groups
3783		 * below us. We're in a per-cpu loop here and this is
3784		 * a global counter, so the first cycle will get them.
3785		 */
3786		delta = ac->pending[i];
3787		if (delta)
3788			ac->pending[i] = 0;
3789
3790		/* Add CPU changes on this level since the last flush */
3791		delta_cpu = 0;
3792		v = READ_ONCE(ac->cstat[i]);
3793		if (v != ac->cstat_prev[i]) {
3794			delta_cpu = v - ac->cstat_prev[i];
3795			delta += delta_cpu;
3796			ac->cstat_prev[i] = v;
3797		}
3798
3799		/* Aggregate counts on this level and propagate upwards */
3800		if (delta_cpu)
3801			ac->local[i] += delta_cpu;
3802
3803		if (delta) {
3804			ac->aggregate[i] += delta;
3805			if (ac->ppending)
3806				ac->ppending[i] += delta;
3807		}
 
 
 
3808	}
3809}
3810
3811static void mem_cgroup_css_rstat_flush(struct cgroup_subsys_state *css, int cpu)
3812{
3813	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3814	struct mem_cgroup *parent = parent_mem_cgroup(memcg);
3815	struct memcg_vmstats_percpu *statc;
3816	struct aggregate_control ac;
3817	int nid;
3818
3819	statc = per_cpu_ptr(memcg->vmstats_percpu, cpu);
 
 
 
 
 
3820
3821	ac = (struct aggregate_control) {
3822		.aggregate = memcg->vmstats->state,
3823		.local = memcg->vmstats->state_local,
3824		.pending = memcg->vmstats->state_pending,
3825		.ppending = parent ? parent->vmstats->state_pending : NULL,
3826		.cstat = statc->state,
3827		.cstat_prev = statc->state_prev,
3828		.size = MEMCG_VMSTAT_SIZE,
3829	};
3830	mem_cgroup_stat_aggregate(&ac);
3831
3832	ac = (struct aggregate_control) {
3833		.aggregate = memcg->vmstats->events,
3834		.local = memcg->vmstats->events_local,
3835		.pending = memcg->vmstats->events_pending,
3836		.ppending = parent ? parent->vmstats->events_pending : NULL,
3837		.cstat = statc->events,
3838		.cstat_prev = statc->events_prev,
3839		.size = NR_MEMCG_EVENTS,
3840	};
3841	mem_cgroup_stat_aggregate(&ac);
3842
3843	for_each_node_state(nid, N_MEMORY) {
3844		struct mem_cgroup_per_node *pn = memcg->nodeinfo[nid];
3845		struct lruvec_stats *lstats = pn->lruvec_stats;
3846		struct lruvec_stats *plstats = NULL;
3847		struct lruvec_stats_percpu *lstatc;
3848
3849		if (parent)
3850			plstats = parent->nodeinfo[nid]->lruvec_stats;
3851
3852		lstatc = per_cpu_ptr(pn->lruvec_stats_percpu, cpu);
3853
3854		ac = (struct aggregate_control) {
3855			.aggregate = lstats->state,
3856			.local = lstats->state_local,
3857			.pending = lstats->state_pending,
3858			.ppending = plstats ? plstats->state_pending : NULL,
3859			.cstat = lstatc->state,
3860			.cstat_prev = lstatc->state_prev,
3861			.size = NR_MEMCG_NODE_STAT_ITEMS,
3862		};
3863		mem_cgroup_stat_aggregate(&ac);
3864
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3865	}
3866	WRITE_ONCE(statc->stats_updates, 0);
3867	/* We are in a per-cpu loop here, only do the atomic write once */
3868	if (atomic64_read(&memcg->vmstats->stats_updates))
3869		atomic64_set(&memcg->vmstats->stats_updates, 0);
3870}
3871
3872static void mem_cgroup_fork(struct task_struct *task)
3873{
3874	/*
3875	 * Set the update flag to cause task->objcg to be initialized lazily
3876	 * on the first allocation. It can be done without any synchronization
3877	 * because it's always performed on the current task, so does
3878	 * current_objcg_update().
3879	 */
3880	task->objcg = (struct obj_cgroup *)CURRENT_OBJCG_UPDATE_FLAG;
3881}
3882
3883static void mem_cgroup_exit(struct task_struct *task)
3884{
3885	struct obj_cgroup *objcg = task->objcg;
3886
3887	objcg = (struct obj_cgroup *)
3888		((unsigned long)objcg & ~CURRENT_OBJCG_UPDATE_FLAG);
3889	obj_cgroup_put(objcg);
3890
3891	/*
3892	 * Some kernel allocations can happen after this point,
3893	 * but let's ignore them. It can be done without any synchronization
3894	 * because it's always performed on the current task, so does
3895	 * current_objcg_update().
3896	 */
3897	task->objcg = NULL;
3898}
3899
3900#ifdef CONFIG_LRU_GEN
3901static void mem_cgroup_lru_gen_attach(struct cgroup_taskset *tset)
3902{
3903	struct task_struct *task;
3904	struct cgroup_subsys_state *css;
3905
3906	/* find the first leader if there is any */
3907	cgroup_taskset_for_each_leader(task, css, tset)
3908		break;
3909
3910	if (!task)
3911		return;
3912
3913	task_lock(task);
3914	if (task->mm && READ_ONCE(task->mm->owner) == task)
3915		lru_gen_migrate_mm(task->mm);
3916	task_unlock(task);
3917}
3918#else
3919static void mem_cgroup_lru_gen_attach(struct cgroup_taskset *tset) {}
3920#endif /* CONFIG_LRU_GEN */
3921
3922static void mem_cgroup_kmem_attach(struct cgroup_taskset *tset)
3923{
3924	struct task_struct *task;
3925	struct cgroup_subsys_state *css;
3926
3927	cgroup_taskset_for_each(task, css, tset) {
3928		/* atomically set the update bit */
3929		set_bit(CURRENT_OBJCG_UPDATE_BIT, (unsigned long *)&task->objcg);
 
 
3930	}
3931}
3932
3933static void mem_cgroup_attach(struct cgroup_taskset *tset)
3934{
3935	mem_cgroup_lru_gen_attach(tset);
3936	mem_cgroup_kmem_attach(tset);
3937}
3938
3939static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value)
3940{
3941	if (value == PAGE_COUNTER_MAX)
3942		seq_puts(m, "max\n");
3943	else
3944		seq_printf(m, "%llu\n", (u64)value * PAGE_SIZE);
3945
3946	return 0;
3947}
3948
3949static u64 memory_current_read(struct cgroup_subsys_state *css,
3950			       struct cftype *cft)
3951{
3952	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
 
 
3953
3954	return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE;
3955}
 
 
 
3956
3957#define OFP_PEAK_UNSET (((-1UL)))
 
3958
3959static int peak_show(struct seq_file *sf, void *v, struct page_counter *pc)
3960{
3961	struct cgroup_of_peak *ofp = of_peak(sf->private);
3962	u64 fd_peak = READ_ONCE(ofp->value), peak;
3963
3964	/* User wants global or local peak? */
3965	if (fd_peak == OFP_PEAK_UNSET)
3966		peak = pc->watermark;
3967	else
3968		peak = max(fd_peak, READ_ONCE(pc->local_watermark));
 
3969
3970	seq_printf(sf, "%llu\n", peak * PAGE_SIZE);
3971	return 0;
3972}
 
 
 
 
 
3973
3974static int memory_peak_show(struct seq_file *sf, void *v)
3975{
3976	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf));
3977
3978	return peak_show(sf, v, &memcg->memory);
3979}
 
 
 
 
 
 
3980
3981static int peak_open(struct kernfs_open_file *of)
3982{
3983	struct cgroup_of_peak *ofp = of_peak(of);
3984
3985	ofp->value = OFP_PEAK_UNSET;
3986	return 0;
3987}
3988
3989static void peak_release(struct kernfs_open_file *of)
3990{
3991	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3992	struct cgroup_of_peak *ofp = of_peak(of);
 
 
3993
3994	if (ofp->value == OFP_PEAK_UNSET) {
3995		/* fast path (no writes on this fd) */
3996		return;
3997	}
3998	spin_lock(&memcg->peaks_lock);
3999	list_del(&ofp->list);
4000	spin_unlock(&memcg->peaks_lock);
4001}
4002
4003static ssize_t peak_write(struct kernfs_open_file *of, char *buf, size_t nbytes,
4004			  loff_t off, struct page_counter *pc,
4005			  struct list_head *watchers)
4006{
4007	unsigned long usage;
4008	struct cgroup_of_peak *peer_ctx;
4009	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4010	struct cgroup_of_peak *ofp = of_peak(of);
4011
4012	spin_lock(&memcg->peaks_lock);
4013
4014	usage = page_counter_read(pc);
4015	WRITE_ONCE(pc->local_watermark, usage);
4016
4017	list_for_each_entry(peer_ctx, watchers, list)
4018		if (usage > peer_ctx->value)
4019			WRITE_ONCE(peer_ctx->value, usage);
4020
4021	/* initial write, register watcher */
4022	if (ofp->value == -1)
4023		list_add(&ofp->list, watchers);
4024
4025	WRITE_ONCE(ofp->value, usage);
4026	spin_unlock(&memcg->peaks_lock);
4027
4028	return nbytes;
4029}
4030
4031static ssize_t memory_peak_write(struct kernfs_open_file *of, char *buf,
4032				 size_t nbytes, loff_t off)
4033{
4034	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4035
4036	return peak_write(of, buf, nbytes, off, &memcg->memory,
4037			  &memcg->memory_peaks);
 
4038}
4039
4040#undef OFP_PEAK_UNSET
 
 
4041
4042static int memory_min_show(struct seq_file *m, void *v)
4043{
4044	return seq_puts_memcg_tunable(m,
4045		READ_ONCE(mem_cgroup_from_seq(m)->memory.min));
4046}
4047
4048static ssize_t memory_min_write(struct kernfs_open_file *of,
4049				char *buf, size_t nbytes, loff_t off)
4050{
4051	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4052	unsigned long min;
4053	int err;
 
 
 
4054
4055	buf = strstrip(buf);
4056	err = page_counter_memparse(buf, "max", &min);
4057	if (err)
4058		return err;
4059
4060	page_counter_set_min(&memcg->memory, min);
4061
4062	return nbytes;
4063}
 
 
 
 
4064
4065static int memory_low_show(struct seq_file *m, void *v)
4066{
4067	return seq_puts_memcg_tunable(m,
4068		READ_ONCE(mem_cgroup_from_seq(m)->memory.low));
4069}
4070
4071static ssize_t memory_low_write(struct kernfs_open_file *of,
4072				char *buf, size_t nbytes, loff_t off)
4073{
4074	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4075	unsigned long low;
4076	int err;
4077
4078	buf = strstrip(buf);
4079	err = page_counter_memparse(buf, "max", &low);
4080	if (err)
4081		return err;
4082
4083	page_counter_set_low(&memcg->memory, low);
 
 
 
 
 
 
 
4084
4085	return nbytes;
4086}
 
 
 
4087
4088static int memory_high_show(struct seq_file *m, void *v)
4089{
4090	return seq_puts_memcg_tunable(m,
4091		READ_ONCE(mem_cgroup_from_seq(m)->memory.high));
4092}
4093
4094static ssize_t memory_high_write(struct kernfs_open_file *of,
4095				 char *buf, size_t nbytes, loff_t off)
4096{
4097	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4098	unsigned int nr_retries = MAX_RECLAIM_RETRIES;
4099	bool drained = false;
4100	unsigned long high;
4101	int err;
4102
4103	buf = strstrip(buf);
4104	err = page_counter_memparse(buf, "max", &high);
4105	if (err)
4106		return err;
4107
4108	page_counter_set_high(&memcg->memory, high);
4109
4110	for (;;) {
4111		unsigned long nr_pages = page_counter_read(&memcg->memory);
4112		unsigned long reclaimed;
4113
4114		if (nr_pages <= high)
4115			break;
 
4116
4117		if (signal_pending(current))
4118			break;
 
4119
4120		if (!drained) {
4121			drain_all_stock(memcg);
4122			drained = true;
4123			continue;
4124		}
4125
4126		reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
4127					GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP, NULL);
4128
4129		if (!reclaimed && !nr_retries--)
4130			break;
4131	}
4132
4133	memcg_wb_domain_size_changed(memcg);
4134	return nbytes;
4135}
4136
4137static int memory_max_show(struct seq_file *m, void *v)
 
4138{
4139	return seq_puts_memcg_tunable(m,
4140		READ_ONCE(mem_cgroup_from_seq(m)->memory.max));
4141}
 
 
 
 
 
 
 
 
 
 
 
4142
4143static ssize_t memory_max_write(struct kernfs_open_file *of,
4144				char *buf, size_t nbytes, loff_t off)
4145{
4146	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4147	unsigned int nr_reclaims = MAX_RECLAIM_RETRIES;
4148	bool drained = false;
4149	unsigned long max;
4150	int err;
4151
4152	buf = strstrip(buf);
4153	err = page_counter_memparse(buf, "max", &max);
4154	if (err)
4155		return err;
4156
4157	xchg(&memcg->memory.max, max);
 
4158
4159	for (;;) {
4160		unsigned long nr_pages = page_counter_read(&memcg->memory);
 
 
 
 
4161
4162		if (nr_pages <= max)
4163			break;
 
 
 
 
 
 
4164
4165		if (signal_pending(current))
4166			break;
4167
4168		if (!drained) {
4169			drain_all_stock(memcg);
4170			drained = true;
 
4171			continue;
4172		}
4173
4174		if (nr_reclaims) {
4175			if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max,
4176					GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP, NULL))
4177				nr_reclaims--;
4178			continue;
 
 
 
4179		}
 
 
4180
4181		memcg_memory_event(memcg, MEMCG_OOM);
4182		if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
4183			break;
 
 
 
 
4184	}
4185
4186	memcg_wb_domain_size_changed(memcg);
4187	return nbytes;
4188}
4189
4190/*
4191 * Note: don't forget to update the 'samples/cgroup/memcg_event_listener'
4192 * if any new events become available.
4193 */
4194static void __memory_events_show(struct seq_file *m, atomic_long_t *events)
4195{
4196	seq_printf(m, "low %lu\n", atomic_long_read(&events[MEMCG_LOW]));
4197	seq_printf(m, "high %lu\n", atomic_long_read(&events[MEMCG_HIGH]));
4198	seq_printf(m, "max %lu\n", atomic_long_read(&events[MEMCG_MAX]));
4199	seq_printf(m, "oom %lu\n", atomic_long_read(&events[MEMCG_OOM]));
4200	seq_printf(m, "oom_kill %lu\n",
4201		   atomic_long_read(&events[MEMCG_OOM_KILL]));
4202	seq_printf(m, "oom_group_kill %lu\n",
4203		   atomic_long_read(&events[MEMCG_OOM_GROUP_KILL]));
4204}
4205
4206static int memory_events_show(struct seq_file *m, void *v)
 
4207{
4208	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
 
 
4209
4210	__memory_events_show(m, memcg->memory_events);
4211	return 0;
4212}
 
4213
4214static int memory_events_local_show(struct seq_file *m, void *v)
4215{
4216	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4217
4218	__memory_events_show(m, memcg->memory_events_local);
4219	return 0;
4220}
4221
4222int memory_stat_show(struct seq_file *m, void *v)
4223{
4224	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4225	char *buf = kmalloc(SEQ_BUF_SIZE, GFP_KERNEL);
4226	struct seq_buf s;
4227
4228	if (!buf)
4229		return -ENOMEM;
4230	seq_buf_init(&s, buf, SEQ_BUF_SIZE);
4231	memory_stat_format(memcg, &s);
4232	seq_puts(m, buf);
4233	kfree(buf);
4234	return 0;
4235}
4236
4237#ifdef CONFIG_NUMA
4238static inline unsigned long lruvec_page_state_output(struct lruvec *lruvec,
4239						     int item)
4240{
4241	return lruvec_page_state(lruvec, item) *
4242		memcg_page_state_output_unit(item);
4243}
4244
4245static int memory_numa_stat_show(struct seq_file *m, void *v)
4246{
4247	int i;
4248	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4249
4250	mem_cgroup_flush_stats(memcg);
4251
4252	for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
4253		int nid;
4254
4255		if (memory_stats[i].idx >= NR_VM_NODE_STAT_ITEMS)
4256			continue;
4257
4258		seq_printf(m, "%s", memory_stats[i].name);
4259		for_each_node_state(nid, N_MEMORY) {
4260			u64 size;
4261			struct lruvec *lruvec;
4262
4263			lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
4264			size = lruvec_page_state_output(lruvec,
4265							memory_stats[i].idx);
4266			seq_printf(m, " N%d=%llu", nid, size);
4267		}
4268		seq_putc(m, '\n');
4269	}
4270
4271	return 0;
4272}
4273#endif
4274
4275static int memory_oom_group_show(struct seq_file *m, void *v)
 
4276{
4277	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4278
4279	seq_printf(m, "%d\n", READ_ONCE(memcg->oom_group));
4280
 
 
 
 
4281	return 0;
4282}
4283
4284static ssize_t memory_oom_group_write(struct kernfs_open_file *of,
4285				      char *buf, size_t nbytes, loff_t off)
4286{
4287	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4288	int ret, oom_group;
4289
4290	buf = strstrip(buf);
4291	if (!buf)
4292		return -EINVAL;
4293
4294	ret = kstrtoint(buf, 0, &oom_group);
4295	if (ret)
4296		return ret;
4297
4298	if (oom_group != 0 && oom_group != 1)
 
 
 
 
4299		return -EINVAL;
4300
4301	WRITE_ONCE(memcg->oom_group, oom_group);
4302
4303	return nbytes;
 
 
4304}
4305
4306enum {
4307	MEMORY_RECLAIM_SWAPPINESS = 0,
4308	MEMORY_RECLAIM_NULL,
 
4309};
4310
4311static const match_table_t tokens = {
4312	{ MEMORY_RECLAIM_SWAPPINESS, "swappiness=%d"},
4313	{ MEMORY_RECLAIM_NULL, NULL },
4314};
 
 
 
 
 
4315
4316static ssize_t memory_reclaim(struct kernfs_open_file *of, char *buf,
4317			      size_t nbytes, loff_t off)
4318{
4319	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4320	unsigned int nr_retries = MAX_RECLAIM_RETRIES;
4321	unsigned long nr_to_reclaim, nr_reclaimed = 0;
4322	int swappiness = -1;
4323	unsigned int reclaim_options;
4324	char *old_buf, *start;
4325	substring_t args[MAX_OPT_ARGS];
4326
4327	buf = strstrip(buf);
4328
4329	old_buf = buf;
4330	nr_to_reclaim = memparse(buf, &buf) / PAGE_SIZE;
4331	if (buf == old_buf)
4332		return -EINVAL;
4333
4334	buf = strstrip(buf);
4335
4336	while ((start = strsep(&buf, " ")) != NULL) {
4337		if (!strlen(start))
4338			continue;
4339		switch (match_token(start, tokens, args)) {
4340		case MEMORY_RECLAIM_SWAPPINESS:
4341			if (match_int(&args[0], &swappiness))
4342				return -EINVAL;
4343			if (swappiness < MIN_SWAPPINESS || swappiness > MAX_SWAPPINESS)
4344				return -EINVAL;
4345			break;
4346		default:
4347			return -EINVAL;
4348		}
4349	}
4350
4351	reclaim_options	= MEMCG_RECLAIM_MAY_SWAP | MEMCG_RECLAIM_PROACTIVE;
4352	while (nr_reclaimed < nr_to_reclaim) {
4353		/* Will converge on zero, but reclaim enforces a minimum */
4354		unsigned long batch_size = (nr_to_reclaim - nr_reclaimed) / 4;
4355		unsigned long reclaimed;
4356
4357		if (signal_pending(current))
4358			return -EINTR;
4359
4360		/*
4361		 * This is the final attempt, drain percpu lru caches in the
4362		 * hope of introducing more evictable pages for
4363		 * try_to_free_mem_cgroup_pages().
4364		 */
4365		if (!nr_retries)
4366			lru_add_drain_all();
4367
4368		reclaimed = try_to_free_mem_cgroup_pages(memcg,
4369					batch_size, GFP_KERNEL,
4370					reclaim_options,
4371					swappiness == -1 ? NULL : &swappiness);
4372
4373		if (!reclaimed && !nr_retries--)
4374			return -EAGAIN;
4375
4376		nr_reclaimed += reclaimed;
4377	}
4378
4379	return nbytes;
4380}
 
4381
4382static struct cftype memory_files[] = {
 
 
 
 
 
 
 
4383	{
4384		.name = "current",
4385		.flags = CFTYPE_NOT_ON_ROOT,
4386		.read_u64 = memory_current_read,
 
4387	},
4388	{
4389		.name = "peak",
4390		.flags = CFTYPE_NOT_ON_ROOT,
4391		.open = peak_open,
4392		.release = peak_release,
4393		.seq_show = memory_peak_show,
4394		.write = memory_peak_write,
4395	},
4396	{
4397		.name = "min",
4398		.flags = CFTYPE_NOT_ON_ROOT,
4399		.seq_show = memory_min_show,
4400		.write = memory_min_write,
4401	},
4402	{
4403		.name = "low",
4404		.flags = CFTYPE_NOT_ON_ROOT,
4405		.seq_show = memory_low_show,
4406		.write = memory_low_write,
4407	},
4408	{
4409		.name = "high",
4410		.flags = CFTYPE_NOT_ON_ROOT,
4411		.seq_show = memory_high_show,
4412		.write = memory_high_write,
 
 
4413	},
4414	{
4415		.name = "max",
4416		.flags = CFTYPE_NOT_ON_ROOT,
4417		.seq_show = memory_max_show,
4418		.write = memory_max_write,
4419	},
4420	{
4421		.name = "events",
4422		.flags = CFTYPE_NOT_ON_ROOT,
4423		.file_offset = offsetof(struct mem_cgroup, events_file),
4424		.seq_show = memory_events_show,
4425	},
4426	{
4427		.name = "events.local",
4428		.flags = CFTYPE_NOT_ON_ROOT,
4429		.file_offset = offsetof(struct mem_cgroup, events_local_file),
4430		.seq_show = memory_events_local_show,
4431	},
4432	{
4433		.name = "stat",
4434		.seq_show = memory_stat_show,
 
 
 
 
4435	},
4436#ifdef CONFIG_NUMA
4437	{
4438		.name = "numa_stat",
4439		.seq_show = memory_numa_stat_show,
4440	},
4441#endif
 
4442	{
4443		.name = "oom.group",
4444		.flags = CFTYPE_NOT_ON_ROOT | CFTYPE_NS_DELEGATABLE,
4445		.seq_show = memory_oom_group_show,
4446		.write = memory_oom_group_write,
 
4447	},
4448	{
4449		.name = "reclaim",
4450		.flags = CFTYPE_NS_DELEGATABLE,
4451		.write = memory_reclaim,
 
 
 
 
 
 
 
 
 
 
 
 
 
4452	},
4453	{ }	/* terminate */
4454};
4455
4456struct cgroup_subsys memory_cgrp_subsys = {
4457	.css_alloc = mem_cgroup_css_alloc,
4458	.css_online = mem_cgroup_css_online,
4459	.css_offline = mem_cgroup_css_offline,
4460	.css_released = mem_cgroup_css_released,
4461	.css_free = mem_cgroup_css_free,
4462	.css_reset = mem_cgroup_css_reset,
4463	.css_rstat_flush = mem_cgroup_css_rstat_flush,
4464	.attach = mem_cgroup_attach,
4465	.fork = mem_cgroup_fork,
4466	.exit = mem_cgroup_exit,
4467	.dfl_cftypes = memory_files,
4468#ifdef CONFIG_MEMCG_V1
4469	.legacy_cftypes = mem_cgroup_legacy_files,
4470#endif
4471	.early_init = 0,
4472};
4473
4474/**
4475 * mem_cgroup_calculate_protection - check if memory consumption is in the normal range
4476 * @root: the top ancestor of the sub-tree being checked
4477 * @memcg: the memory cgroup to check
4478 *
4479 * WARNING: This function is not stateless! It can only be used as part
4480 *          of a top-down tree iteration, not for isolated queries.
4481 */
4482void mem_cgroup_calculate_protection(struct mem_cgroup *root,
4483				     struct mem_cgroup *memcg)
4484{
4485	bool recursive_protection =
4486		cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4487
4488	if (mem_cgroup_disabled())
4489		return;
4490
4491	if (!root)
4492		root = root_mem_cgroup;
4493
4494	page_counter_calculate_protection(&root->memory, &memcg->memory, recursive_protection);
 
 
4495}
4496
4497static int charge_memcg(struct folio *folio, struct mem_cgroup *memcg,
4498			gfp_t gfp)
4499{
4500	int ret;
4501
4502	ret = try_charge(memcg, gfp, folio_nr_pages(folio));
4503	if (ret)
4504		goto out;
4505
4506	mem_cgroup_commit_charge(folio, memcg);
4507out:
4508	return ret;
4509}
4510
4511int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp)
4512{
4513	struct mem_cgroup *memcg;
4514	int ret;
 
 
 
 
 
 
 
 
 
4515
4516	memcg = get_mem_cgroup_from_mm(mm);
4517	ret = charge_memcg(folio, memcg, gfp);
4518	css_put(&memcg->css);
 
 
4519
4520	return ret;
 
 
 
 
 
4521}
4522
4523/**
4524 * mem_cgroup_hugetlb_try_charge - try to charge the memcg for a hugetlb folio
4525 * @memcg: memcg to charge.
4526 * @gfp: reclaim mode.
4527 * @nr_pages: number of pages to charge.
4528 *
4529 * This function is called when allocating a huge page folio to determine if
4530 * the memcg has the capacity for it. It does not commit the charge yet,
4531 * as the hugetlb folio itself has not been obtained from the hugetlb pool.
4532 *
4533 * Once we have obtained the hugetlb folio, we can call
4534 * mem_cgroup_commit_charge() to commit the charge. If we fail to obtain the
4535 * folio, we should instead call mem_cgroup_cancel_charge() to undo the effect
4536 * of try_charge().
4537 *
4538 * Returns 0 on success. Otherwise, an error code is returned.
4539 */
4540int mem_cgroup_hugetlb_try_charge(struct mem_cgroup *memcg, gfp_t gfp,
4541			long nr_pages)
4542{
 
 
 
 
4543	/*
4544	 * If hugetlb memcg charging is not enabled, do not fail hugetlb allocation,
4545	 * but do not attempt to commit charge later (or cancel on error) either.
4546	 */
4547	if (mem_cgroup_disabled() || !memcg ||
4548		!cgroup_subsys_on_dfl(memory_cgrp_subsys) ||
4549		!(cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING))
4550		return -EOPNOTSUPP;
4551
4552	if (try_charge(memcg, gfp, nr_pages))
4553		return -ENOMEM;
4554
4555	return 0;
 
 
 
4556}
4557
4558/**
4559 * mem_cgroup_swapin_charge_folio - Charge a newly allocated folio for swapin.
4560 * @folio: folio to charge.
4561 * @mm: mm context of the victim
4562 * @gfp: reclaim mode
4563 * @entry: swap entry for which the folio is allocated
4564 *
4565 * This function charges a folio allocated for swapin. Please call this before
4566 * adding the folio to the swapcache.
4567 *
4568 * Returns 0 on success. Otherwise, an error code is returned.
4569 */
4570int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm,
4571				  gfp_t gfp, swp_entry_t entry)
4572{
4573	struct mem_cgroup *memcg;
4574	unsigned short id;
4575	int ret;
4576
4577	if (mem_cgroup_disabled())
4578		return 0;
4579
4580	id = lookup_swap_cgroup_id(entry);
4581	rcu_read_lock();
4582	memcg = mem_cgroup_from_id(id);
4583	if (!memcg || !css_tryget_online(&memcg->css))
4584		memcg = get_mem_cgroup_from_mm(mm);
4585	rcu_read_unlock();
4586
4587	ret = charge_memcg(folio, memcg, gfp);
4588
4589	css_put(&memcg->css);
4590	return ret;
4591}
4592
4593/*
4594 * mem_cgroup_swapin_uncharge_swap - uncharge swap slot
4595 * @entry: the first swap entry for which the pages are charged
4596 * @nr_pages: number of pages which will be uncharged
4597 *
4598 * Call this function after successfully adding the charged page to swapcache.
 
 
4599 *
4600 * Note: This function assumes the page for which swap slot is being uncharged
4601 * is order 0 page.
4602 */
4603void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
 
4604{
4605	/*
4606	 * Cgroup1's unified memory+swap counter has been charged with the
4607	 * new swapcache page, finish the transfer by uncharging the swap
4608	 * slot. The swap slot would also get uncharged when it dies, but
4609	 * it can stick around indefinitely and we'd count the page twice
4610	 * the entire time.
4611	 *
4612	 * Cgroup2 has separate resource counters for memory and swap,
4613	 * so this is a non-issue here. Memory and swap charge lifetimes
4614	 * correspond 1:1 to page and swap slot lifetimes: we charge the
4615	 * page to memory here, and uncharge swap when the slot is freed.
4616	 */
4617	if (!mem_cgroup_disabled() && do_memsw_account()) {
4618		/*
4619		 * The swap entry might not get freed for a long time,
4620		 * let's not wait for it.  The page already received a
4621		 * memory+swap charge, drop the swap entry duplicate.
4622		 */
4623		mem_cgroup_uncharge_swap(entry, nr_pages);
4624	}
4625}
4626
4627struct uncharge_gather {
4628	struct mem_cgroup *memcg;
4629	unsigned long nr_memory;
4630	unsigned long pgpgout;
4631	unsigned long nr_kmem;
4632	int nid;
4633};
4634
4635static inline void uncharge_gather_clear(struct uncharge_gather *ug)
4636{
4637	memset(ug, 0, sizeof(*ug));
4638}
4639
4640static void uncharge_batch(const struct uncharge_gather *ug)
4641{
4642	if (ug->nr_memory) {
4643		page_counter_uncharge(&ug->memcg->memory, ug->nr_memory);
4644		if (do_memsw_account())
4645			page_counter_uncharge(&ug->memcg->memsw, ug->nr_memory);
4646		if (ug->nr_kmem) {
4647			mod_memcg_state(ug->memcg, MEMCG_KMEM, -ug->nr_kmem);
4648			memcg1_account_kmem(ug->memcg, -ug->nr_kmem);
4649		}
4650		memcg1_oom_recover(ug->memcg);
4651	}
4652
4653	memcg1_uncharge_batch(ug->memcg, ug->pgpgout, ug->nr_memory, ug->nid);
4654
4655	/* drop reference from uncharge_folio */
4656	css_put(&ug->memcg->css);
4657}
4658
4659static void uncharge_folio(struct folio *folio, struct uncharge_gather *ug)
4660{
4661	long nr_pages;
4662	struct mem_cgroup *memcg;
4663	struct obj_cgroup *objcg;
4664
4665	VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
4666
4667	/*
4668	 * Nobody should be changing or seriously looking at
4669	 * folio memcg or objcg at this point, we have fully
4670	 * exclusive access to the folio.
4671	 */
4672	if (folio_memcg_kmem(folio)) {
4673		objcg = __folio_objcg(folio);
4674		/*
4675		 * This get matches the put at the end of the function and
4676		 * kmem pages do not hold memcg references anymore.
4677		 */
4678		memcg = get_mem_cgroup_from_objcg(objcg);
4679	} else {
4680		memcg = __folio_memcg(folio);
4681	}
4682
4683	if (!memcg)
4684		return;
4685
4686	if (ug->memcg != memcg) {
4687		if (ug->memcg) {
4688			uncharge_batch(ug);
4689			uncharge_gather_clear(ug);
4690		}
4691		ug->memcg = memcg;
4692		ug->nid = folio_nid(folio);
4693
4694		/* pairs with css_put in uncharge_batch */
4695		css_get(&memcg->css);
4696	}
4697
4698	nr_pages = folio_nr_pages(folio);
4699
4700	if (folio_memcg_kmem(folio)) {
4701		ug->nr_memory += nr_pages;
4702		ug->nr_kmem += nr_pages;
4703
4704		folio->memcg_data = 0;
4705		obj_cgroup_put(objcg);
4706	} else {
4707		/* LRU pages aren't accounted at the root level */
4708		if (!mem_cgroup_is_root(memcg))
4709			ug->nr_memory += nr_pages;
4710		ug->pgpgout++;
4711
4712		WARN_ON_ONCE(folio_unqueue_deferred_split(folio));
4713		folio->memcg_data = 0;
4714	}
4715
4716	css_put(&memcg->css);
4717}
4718
4719void __mem_cgroup_uncharge(struct folio *folio)
4720{
4721	struct uncharge_gather ug;
4722
4723	/* Don't touch folio->lru of any random page, pre-check: */
4724	if (!folio_memcg_charged(folio))
4725		return;
4726
4727	uncharge_gather_clear(&ug);
4728	uncharge_folio(folio, &ug);
4729	uncharge_batch(&ug);
4730}
4731
4732void __mem_cgroup_uncharge_folios(struct folio_batch *folios)
 
 
 
4733{
4734	struct uncharge_gather ug;
4735	unsigned int i;
4736
4737	uncharge_gather_clear(&ug);
4738	for (i = 0; i < folios->nr; i++)
4739		uncharge_folio(folios->folios[i], &ug);
4740	if (ug.memcg)
4741		uncharge_batch(&ug);
4742}
 
4743
4744/**
4745 * mem_cgroup_replace_folio - Charge a folio's replacement.
4746 * @old: Currently circulating folio.
4747 * @new: Replacement folio.
4748 *
4749 * Charge @new as a replacement folio for @old. @old will
4750 * be uncharged upon free.
4751 *
4752 * Both folios must be locked, @new->mapping must be set up.
4753 */
4754void mem_cgroup_replace_folio(struct folio *old, struct folio *new)
4755{
4756	struct mem_cgroup *memcg;
4757	long nr_pages = folio_nr_pages(new);
4758
4759	VM_BUG_ON_FOLIO(!folio_test_locked(old), old);
4760	VM_BUG_ON_FOLIO(!folio_test_locked(new), new);
4761	VM_BUG_ON_FOLIO(folio_test_anon(old) != folio_test_anon(new), new);
4762	VM_BUG_ON_FOLIO(folio_nr_pages(old) != nr_pages, new);
4763
4764	if (mem_cgroup_disabled())
4765		return;
4766
4767	/* Page cache replacement: new folio already charged? */
4768	if (folio_memcg_charged(new))
4769		return;
4770
4771	memcg = folio_memcg(old);
4772	VM_WARN_ON_ONCE_FOLIO(!memcg, old);
4773	if (!memcg)
4774		return;
4775
4776	/* Force-charge the new page. The old one will be freed soon */
4777	if (!mem_cgroup_is_root(memcg)) {
4778		page_counter_charge(&memcg->memory, nr_pages);
4779		if (do_memsw_account())
4780			page_counter_charge(&memcg->memsw, nr_pages);
4781	}
4782
4783	css_get(&memcg->css);
4784	commit_charge(new, memcg);
4785	memcg1_commit_charge(new, memcg);
4786}
4787
4788/**
4789 * mem_cgroup_migrate - Transfer the memcg data from the old to the new folio.
4790 * @old: Currently circulating folio.
4791 * @new: Replacement folio.
4792 *
4793 * Transfer the memcg data from the old folio to the new folio for migration.
4794 * The old folio's data info will be cleared. Note that the memory counters
4795 * will remain unchanged throughout the process.
4796 *
4797 * Both folios must be locked, @new->mapping must be set up.
4798 */
4799void mem_cgroup_migrate(struct folio *old, struct folio *new)
4800{
4801	struct mem_cgroup *memcg;
4802
4803	VM_BUG_ON_FOLIO(!folio_test_locked(old), old);
4804	VM_BUG_ON_FOLIO(!folio_test_locked(new), new);
4805	VM_BUG_ON_FOLIO(folio_test_anon(old) != folio_test_anon(new), new);
4806	VM_BUG_ON_FOLIO(folio_nr_pages(old) != folio_nr_pages(new), new);
4807	VM_BUG_ON_FOLIO(folio_test_lru(old), old);
4808
4809	if (mem_cgroup_disabled())
4810		return;
4811
4812	memcg = folio_memcg(old);
4813	/*
4814	 * Note that it is normal to see !memcg for a hugetlb folio.
4815	 * For e.g, itt could have been allocated when memory_hugetlb_accounting
4816	 * was not selected.
4817	 */
4818	VM_WARN_ON_ONCE_FOLIO(!folio_test_hugetlb(old) && !memcg, old);
4819	if (!memcg)
4820		return;
4821
4822	/* Transfer the charge and the css ref */
4823	commit_charge(new, memcg);
4824
4825	/* Warning should never happen, so don't worry about refcount non-0 */
4826	WARN_ON_ONCE(folio_unqueue_deferred_split(old));
4827	old->memcg_data = 0;
4828}
 
4829
4830DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
4831EXPORT_SYMBOL(memcg_sockets_enabled_key);
4832
4833void mem_cgroup_sk_alloc(struct sock *sk)
4834{
4835	struct mem_cgroup *memcg;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4836
4837	if (!mem_cgroup_sockets_enabled)
4838		return;
 
 
 
 
 
 
4839
4840	/* Do not associate the sock with unrelated interrupted task's memcg. */
4841	if (!in_task())
4842		return;
4843
4844	rcu_read_lock();
4845	memcg = mem_cgroup_from_task(current);
4846	if (mem_cgroup_is_root(memcg))
4847		goto out;
4848	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg1_tcpmem_active(memcg))
4849		goto out;
4850	if (css_tryget(&memcg->css))
4851		sk->sk_memcg = memcg;
4852out:
4853	rcu_read_unlock();
4854}
4855
4856void mem_cgroup_sk_free(struct sock *sk)
 
4857{
4858	if (sk->sk_memcg)
4859		css_put(&sk->sk_memcg->css);
4860}
 
 
 
 
4861
4862/**
4863 * mem_cgroup_charge_skmem - charge socket memory
4864 * @memcg: memcg to charge
4865 * @nr_pages: number of pages to charge
4866 * @gfp_mask: reclaim mode
4867 *
4868 * Charges @nr_pages to @memcg. Returns %true if the charge fit within
4869 * @memcg's configured limit, %false if it doesn't.
4870 */
4871bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages,
4872			     gfp_t gfp_mask)
4873{
4874	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
4875		return memcg1_charge_skmem(memcg, nr_pages, gfp_mask);
4876
4877	if (try_charge(memcg, gfp_mask, nr_pages) == 0) {
4878		mod_memcg_state(memcg, MEMCG_SOCK, nr_pages);
4879		return true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4880	}
4881
4882	return false;
4883}
4884
4885/**
4886 * mem_cgroup_uncharge_skmem - uncharge socket memory
4887 * @memcg: memcg to uncharge
4888 * @nr_pages: number of pages to uncharge
4889 */
4890void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
4891{
4892	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
4893		memcg1_uncharge_skmem(memcg, nr_pages);
4894		return;
4895	}
 
 
4896
4897	mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages);
 
 
 
 
 
4898
4899	refill_stock(memcg, nr_pages);
 
 
 
 
 
 
 
 
 
 
 
 
 
4900}
4901
4902static int __init cgroup_memory(char *s)
4903{
4904	char *token;
4905
4906	while ((token = strsep(&s, ",")) != NULL) {
4907		if (!*token)
4908			continue;
4909		if (!strcmp(token, "nosocket"))
4910			cgroup_memory_nosocket = true;
4911		if (!strcmp(token, "nokmem"))
4912			cgroup_memory_nokmem = true;
4913		if (!strcmp(token, "nobpf"))
4914			cgroup_memory_nobpf = true;
4915	}
4916	return 1;
4917}
4918__setup("cgroup.memory=", cgroup_memory);
4919
4920/*
4921 * subsys_initcall() for memory controller.
4922 *
4923 * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this
4924 * context because of lock dependencies (cgroup_lock -> cpu hotplug) but
4925 * basically everything that doesn't depend on a specific mem_cgroup structure
4926 * should be initialized from here.
4927 */
4928static int __init mem_cgroup_init(void)
4929{
4930	int cpu;
4931
4932	/*
4933	 * Currently s32 type (can refer to struct batched_lruvec_stat) is
4934	 * used for per-memcg-per-cpu caching of per-node statistics. In order
4935	 * to work fine, we should make sure that the overfill threshold can't
4936	 * exceed S32_MAX / PAGE_SIZE.
4937	 */
4938	BUILD_BUG_ON(MEMCG_CHARGE_BATCH > S32_MAX / PAGE_SIZE);
4939
4940	cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
4941				  memcg_hotplug_cpu_dead);
4942
4943	for_each_possible_cpu(cpu)
4944		INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
4945			  drain_local_stock);
4946
4947	return 0;
4948}
4949subsys_initcall(mem_cgroup_init);
4950
4951#ifdef CONFIG_SWAP
4952static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg)
 
 
4953{
4954	while (!refcount_inc_not_zero(&memcg->id.ref)) {
 
 
 
 
 
 
 
 
 
 
 
4955		/*
4956		 * The root cgroup cannot be destroyed, so it's refcount must
4957		 * always be >= 1.
 
 
4958		 */
4959		if (WARN_ON_ONCE(mem_cgroup_is_root(memcg))) {
4960			VM_BUG_ON(1);
 
 
 
 
 
 
 
 
 
 
 
 
 
4961			break;
4962		}
4963		memcg = parent_mem_cgroup(memcg);
4964		if (!memcg)
4965			memcg = root_mem_cgroup;
 
 
 
 
 
 
 
4966	}
4967	return memcg;
4968}
4969
4970/**
4971 * mem_cgroup_swapout - transfer a memsw charge to swap
4972 * @folio: folio whose memsw charge to transfer
4973 * @entry: swap entry to move the charge to
4974 *
4975 * Transfer the memsw charge of @folio to @entry.
4976 */
4977void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry)
4978{
4979	struct mem_cgroup *memcg, *swap_memcg;
4980	unsigned int nr_entries;
4981	unsigned short oldid;
 
 
 
 
 
 
 
 
 
 
4982
4983	VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
4984	VM_BUG_ON_FOLIO(folio_ref_count(folio), folio);
 
 
 
4985
4986	if (mem_cgroup_disabled())
4987		return;
 
 
4988
4989	if (!do_memsw_account())
4990		return;
 
 
 
 
 
 
 
 
 
4991
4992	memcg = folio_memcg(folio);
 
4993
4994	VM_WARN_ON_ONCE_FOLIO(!memcg, folio);
4995	if (!memcg)
4996		return;
 
 
 
4997
 
 
4998	/*
4999	 * In case the memcg owning these pages has been offlined and doesn't
5000	 * have an ID allocated to it anymore, charge the closest online
5001	 * ancestor for the swap instead and transfer the memory+swap charge.
5002	 */
5003	swap_memcg = mem_cgroup_id_get_online(memcg);
5004	nr_entries = folio_nr_pages(folio);
5005	/* Get references for the tail pages, too */
5006	if (nr_entries > 1)
5007		mem_cgroup_id_get_many(swap_memcg, nr_entries - 1);
5008	oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg),
5009				   nr_entries);
5010	VM_BUG_ON_FOLIO(oldid, folio);
5011	mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries);
5012
5013	folio_unqueue_deferred_split(folio);
5014	folio->memcg_data = 0;
5015
5016	if (!mem_cgroup_is_root(memcg))
5017		page_counter_uncharge(&memcg->memory, nr_entries);
5018
5019	if (memcg != swap_memcg) {
5020		if (!mem_cgroup_is_root(swap_memcg))
5021			page_counter_charge(&swap_memcg->memsw, nr_entries);
5022		page_counter_uncharge(&memcg->memsw, nr_entries);
5023	}
5024
5025	memcg1_swapout(folio, memcg);
5026	css_put(&memcg->css);
5027}
 
5028
5029/**
5030 * __mem_cgroup_try_charge_swap - try charging swap space for a folio
5031 * @folio: folio being added to swap
5032 * @entry: swap entry to charge
5033 *
5034 * Try to charge @folio's memcg for the swap space at @entry.
5035 *
5036 * Returns 0 on success, -ENOMEM on failure.
5037 */
5038int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry)
5039{
5040	unsigned int nr_pages = folio_nr_pages(folio);
5041	struct page_counter *counter;
5042	struct mem_cgroup *memcg;
5043	unsigned short oldid;
5044
5045	if (do_memsw_account())
5046		return 0;
 
 
5047
5048	memcg = folio_memcg(folio);
 
 
 
 
5049
5050	VM_WARN_ON_ONCE_FOLIO(!memcg, folio);
5051	if (!memcg)
5052		return 0;
5053
5054	if (!entry.val) {
5055		memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
5056		return 0;
 
 
 
 
5057	}
 
 
 
5058
5059	memcg = mem_cgroup_id_get_online(memcg);
 
 
 
 
 
 
5060
5061	if (!mem_cgroup_is_root(memcg) &&
5062	    !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) {
5063		memcg_memory_event(memcg, MEMCG_SWAP_MAX);
5064		memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
5065		mem_cgroup_id_put(memcg);
5066		return -ENOMEM;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5067	}
5068
5069	/* Get references for the tail pages, too */
5070	if (nr_pages > 1)
5071		mem_cgroup_id_get_many(memcg, nr_pages - 1);
5072	oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg), nr_pages);
5073	VM_BUG_ON_FOLIO(oldid, folio);
5074	mod_memcg_state(memcg, MEMCG_SWAP, nr_pages);
5075
5076	return 0;
5077}
5078
5079/**
5080 * __mem_cgroup_uncharge_swap - uncharge swap space
5081 * @entry: swap entry to uncharge
5082 * @nr_pages: the amount of swap space to uncharge
5083 */
5084void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
5085{
5086	struct mem_cgroup *memcg;
5087	unsigned short id;
5088
5089	id = swap_cgroup_record(entry, 0, nr_pages);
5090	rcu_read_lock();
5091	memcg = mem_cgroup_from_id(id);
5092	if (memcg) {
5093		if (!mem_cgroup_is_root(memcg)) {
5094			if (do_memsw_account())
5095				page_counter_uncharge(&memcg->memsw, nr_pages);
5096			else
5097				page_counter_uncharge(&memcg->swap, nr_pages);
 
 
 
 
5098		}
5099		mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages);
5100		mem_cgroup_id_put_many(memcg, nr_pages);
5101	}
5102	rcu_read_unlock();
5103}
5104
5105long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
 
5106{
5107	long nr_swap_pages = get_nr_swap_pages();
5108
5109	if (mem_cgroup_disabled() || do_memsw_account())
5110		return nr_swap_pages;
5111	for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg))
5112		nr_swap_pages = min_t(long, nr_swap_pages,
5113				      READ_ONCE(memcg->swap.max) -
5114				      page_counter_read(&memcg->swap));
5115	return nr_swap_pages;
5116}
 
5117
5118bool mem_cgroup_swap_full(struct folio *folio)
5119{
5120	struct mem_cgroup *memcg;
 
 
 
 
 
 
 
 
 
 
 
5121
5122	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
 
 
 
 
 
 
 
5123
5124	if (vm_swap_full())
5125		return true;
5126	if (do_memsw_account())
5127		return false;
5128
5129	memcg = folio_memcg(folio);
5130	if (!memcg)
5131		return false;
5132
5133	for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) {
5134		unsigned long usage = page_counter_read(&memcg->swap);
5135
5136		if (usage * 2 >= READ_ONCE(memcg->swap.high) ||
5137		    usage * 2 >= READ_ONCE(memcg->swap.max))
5138			return true;
5139	}
5140
5141	return false;
5142}
5143
5144static int __init setup_swap_account(char *s)
5145{
5146	bool res;
 
5147
5148	if (!kstrtobool(s, &res) && !res)
5149		pr_warn_once("The swapaccount=0 commandline option is deprecated "
5150			     "in favor of configuring swap control via cgroupfs. "
5151			     "Please report your usecase to linux-mm@kvack.org if you "
5152			     "depend on this functionality.\n");
5153	return 1;
5154}
5155__setup("swapaccount=", setup_swap_account);
 
 
 
 
 
5156
5157static u64 swap_current_read(struct cgroup_subsys_state *css,
5158			     struct cftype *cft)
5159{
5160	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5161
5162	return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE;
5163}
5164
5165static int swap_peak_show(struct seq_file *sf, void *v)
5166{
5167	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf));
5168
5169	return peak_show(sf, v, &memcg->swap);
 
 
5170}
5171
5172static ssize_t swap_peak_write(struct kernfs_open_file *of, char *buf,
5173			       size_t nbytes, loff_t off)
5174{
5175	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
 
5176
5177	return peak_write(of, buf, nbytes, off, &memcg->swap,
5178			  &memcg->swap_peaks);
5179}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5180
5181static int swap_high_show(struct seq_file *m, void *v)
5182{
5183	return seq_puts_memcg_tunable(m,
5184		READ_ONCE(mem_cgroup_from_seq(m)->swap.high));
 
 
 
 
 
 
 
 
 
 
5185}
5186
5187static ssize_t swap_high_write(struct kernfs_open_file *of,
5188			       char *buf, size_t nbytes, loff_t off)
5189{
5190	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5191	unsigned long high;
5192	int err;
5193
5194	buf = strstrip(buf);
5195	err = page_counter_memparse(buf, "max", &high);
5196	if (err)
5197		return err;
5198
5199	page_counter_set_high(&memcg->swap, high);
5200
5201	return nbytes;
 
 
 
5202}
5203
5204static int swap_max_show(struct seq_file *m, void *v)
 
5205{
5206	return seq_puts_memcg_tunable(m,
5207		READ_ONCE(mem_cgroup_from_seq(m)->swap.max));
5208}
5209
5210static ssize_t swap_max_write(struct kernfs_open_file *of,
5211			      char *buf, size_t nbytes, loff_t off)
5212{
5213	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5214	unsigned long max;
5215	int err;
5216
5217	buf = strstrip(buf);
5218	err = page_counter_memparse(buf, "max", &max);
5219	if (err)
5220		return err;
5221
5222	xchg(&memcg->swap.max, max);
5223
5224	return nbytes;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5225}
5226
5227static int swap_events_show(struct seq_file *m, void *v)
 
5228{
5229	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
5230
5231	seq_printf(m, "high %lu\n",
5232		   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_HIGH]));
5233	seq_printf(m, "max %lu\n",
5234		   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX]));
5235	seq_printf(m, "fail %lu\n",
5236		   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL]));
5237
5238	return 0;
5239}
5240
5241static struct cftype swap_files[] = {
5242	{
5243		.name = "swap.current",
5244		.flags = CFTYPE_NOT_ON_ROOT,
5245		.read_u64 = swap_current_read,
5246	},
5247	{
5248		.name = "swap.high",
5249		.flags = CFTYPE_NOT_ON_ROOT,
5250		.seq_show = swap_high_show,
5251		.write = swap_high_write,
5252	},
5253	{
5254		.name = "swap.max",
5255		.flags = CFTYPE_NOT_ON_ROOT,
5256		.seq_show = swap_max_show,
5257		.write = swap_max_write,
5258	},
5259	{
5260		.name = "swap.peak",
5261		.flags = CFTYPE_NOT_ON_ROOT,
5262		.open = peak_open,
5263		.release = peak_release,
5264		.seq_show = swap_peak_show,
5265		.write = swap_peak_write,
5266	},
5267	{
5268		.name = "swap.events",
5269		.flags = CFTYPE_NOT_ON_ROOT,
5270		.file_offset = offsetof(struct mem_cgroup, swap_events_file),
5271		.seq_show = swap_events_show,
5272	},
5273	{ }	/* terminate */
5274};
5275
5276#ifdef CONFIG_ZSWAP
5277/**
5278 * obj_cgroup_may_zswap - check if this cgroup can zswap
5279 * @objcg: the object cgroup
5280 *
5281 * Check if the hierarchical zswap limit has been reached.
5282 *
5283 * This doesn't check for specific headroom, and it is not atomic
5284 * either. But with zswap, the size of the allocation is only known
5285 * once compression has occurred, and this optimistic pre-check avoids
5286 * spending cycles on compression when there is already no room left
5287 * or zswap is disabled altogether somewhere in the hierarchy.
5288 */
5289bool obj_cgroup_may_zswap(struct obj_cgroup *objcg)
5290{
5291	struct mem_cgroup *memcg, *original_memcg;
5292	bool ret = true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5293
5294	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
5295		return true;
 
 
 
 
 
5296
5297	original_memcg = get_mem_cgroup_from_objcg(objcg);
5298	for (memcg = original_memcg; !mem_cgroup_is_root(memcg);
5299	     memcg = parent_mem_cgroup(memcg)) {
5300		unsigned long max = READ_ONCE(memcg->zswap_max);
5301		unsigned long pages;
5302
5303		if (max == PAGE_COUNTER_MAX)
5304			continue;
5305		if (max == 0) {
5306			ret = false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5307			break;
5308		}
 
 
 
5309
5310		/* Force flush to get accurate stats for charging */
5311		__mem_cgroup_flush_stats(memcg, true);
5312		pages = memcg_page_state(memcg, MEMCG_ZSWAP_B) / PAGE_SIZE;
5313		if (pages < max)
5314			continue;
5315		ret = false;
5316		break;
 
 
 
5317	}
5318	mem_cgroup_put(original_memcg);
5319	return ret;
5320}
5321
5322/**
5323 * obj_cgroup_charge_zswap - charge compression backend memory
5324 * @objcg: the object cgroup
5325 * @size: size of compressed object
5326 *
5327 * This forces the charge after obj_cgroup_may_zswap() allowed
5328 * compression and storage in zwap for this cgroup to go ahead.
5329 */
5330void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size)
5331{
5332	struct mem_cgroup *memcg;
5333
5334	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
5335		return;
5336
5337	VM_WARN_ON_ONCE(!(current->flags & PF_MEMALLOC));
5338
5339	/* PF_MEMALLOC context, charging must succeed */
5340	if (obj_cgroup_charge(objcg, GFP_KERNEL, size))
5341		VM_WARN_ON_ONCE(1);
5342
5343	rcu_read_lock();
5344	memcg = obj_cgroup_memcg(objcg);
5345	mod_memcg_state(memcg, MEMCG_ZSWAP_B, size);
5346	mod_memcg_state(memcg, MEMCG_ZSWAPPED, 1);
5347	rcu_read_unlock();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5348}
5349
5350/**
5351 * obj_cgroup_uncharge_zswap - uncharge compression backend memory
5352 * @objcg: the object cgroup
5353 * @size: size of compressed object
5354 *
5355 * Uncharges zswap memory on page in.
5356 */
5357void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size)
5358{
5359	struct mem_cgroup *memcg;
 
5360
5361	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
5362		return;
5363
5364	obj_cgroup_uncharge(objcg, size);
5365
5366	rcu_read_lock();
5367	memcg = obj_cgroup_memcg(objcg);
5368	mod_memcg_state(memcg, MEMCG_ZSWAP_B, -size);
5369	mod_memcg_state(memcg, MEMCG_ZSWAPPED, -1);
5370	rcu_read_unlock();
5371}
5372
5373bool mem_cgroup_zswap_writeback_enabled(struct mem_cgroup *memcg)
 
5374{
5375	/* if zswap is disabled, do not block pages going to the swapping device */
5376	if (!zswap_is_enabled())
5377		return true;
5378
5379	for (; memcg; memcg = parent_mem_cgroup(memcg))
5380		if (!READ_ONCE(memcg->zswap_writeback))
5381			return false;
5382
5383	return true;
5384}
5385
5386static u64 zswap_current_read(struct cgroup_subsys_state *css,
5387			      struct cftype *cft)
5388{
5389	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5390
5391	mem_cgroup_flush_stats(memcg);
5392	return memcg_page_state(memcg, MEMCG_ZSWAP_B);
5393}
5394
5395static int zswap_max_show(struct seq_file *m, void *v)
5396{
5397	return seq_puts_memcg_tunable(m,
5398		READ_ONCE(mem_cgroup_from_seq(m)->zswap_max));
5399}
 
5400
5401static ssize_t zswap_max_write(struct kernfs_open_file *of,
5402			       char *buf, size_t nbytes, loff_t off)
5403{
5404	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5405	unsigned long max;
5406	int err;
5407
5408	buf = strstrip(buf);
5409	err = page_counter_memparse(buf, "max", &max);
5410	if (err)
5411		return err;
 
 
 
5412
5413	xchg(&memcg->zswap_max, max);
5414
5415	return nbytes;
5416}
5417
5418static int zswap_writeback_show(struct seq_file *m, void *v)
5419{
5420	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
5421
5422	seq_printf(m, "%d\n", READ_ONCE(memcg->zswap_writeback));
5423	return 0;
5424}
5425
5426static ssize_t zswap_writeback_write(struct kernfs_open_file *of,
5427				char *buf, size_t nbytes, loff_t off)
5428{
5429	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5430	int zswap_writeback;
5431	ssize_t parse_ret = kstrtoint(strstrip(buf), 0, &zswap_writeback);
5432
5433	if (parse_ret)
5434		return parse_ret;
5435
5436	if (zswap_writeback != 0 && zswap_writeback != 1)
5437		return -EINVAL;
5438
5439	WRITE_ONCE(memcg->zswap_writeback, zswap_writeback);
5440	return nbytes;
5441}
 
5442
5443static struct cftype zswap_files[] = {
5444	{
5445		.name = "zswap.current",
5446		.flags = CFTYPE_NOT_ON_ROOT,
5447		.read_u64 = zswap_current_read,
5448	},
5449	{
5450		.name = "zswap.max",
5451		.flags = CFTYPE_NOT_ON_ROOT,
5452		.seq_show = zswap_max_show,
5453		.write = zswap_max_write,
5454	},
5455	{
5456		.name = "zswap.writeback",
5457		.seq_show = zswap_writeback_show,
5458		.write = zswap_writeback_write,
5459	},
5460	{ }	/* terminate */
5461};
5462#endif /* CONFIG_ZSWAP */
5463
5464static int __init mem_cgroup_swap_init(void)
5465{
5466	if (mem_cgroup_disabled())
5467		return 0;
5468
5469	WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, swap_files));
5470#ifdef CONFIG_MEMCG_V1
5471	WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, memsw_files));
5472#endif
5473#ifdef CONFIG_ZSWAP
5474	WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, zswap_files));
5475#endif
5476	return 0;
5477}
5478subsys_initcall(mem_cgroup_swap_init);
5479
5480#endif /* CONFIG_SWAP */