Linux Audio

Check our new training course

Loading...
v3.15
 
   1/* memcontrol.c - Memory Controller
   2 *
   3 * Copyright IBM Corporation, 2007
   4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
   5 *
   6 * Copyright 2007 OpenVZ SWsoft Inc
   7 * Author: Pavel Emelianov <xemul@openvz.org>
   8 *
   9 * Memory thresholds
  10 * Copyright (C) 2009 Nokia Corporation
  11 * Author: Kirill A. Shutemov
  12 *
  13 * Kernel Memory Controller
  14 * Copyright (C) 2012 Parallels Inc. and Google Inc.
  15 * Authors: Glauber Costa and Suleiman Souhlal
  16 *
  17 * This program is free software; you can redistribute it and/or modify
  18 * it under the terms of the GNU General Public License as published by
  19 * the Free Software Foundation; either version 2 of the License, or
  20 * (at your option) any later version.
  21 *
  22 * This program is distributed in the hope that it will be useful,
  23 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  25 * GNU General Public License for more details.
  26 */
  27
  28#include <linux/res_counter.h>
  29#include <linux/memcontrol.h>
  30#include <linux/cgroup.h>
  31#include <linux/mm.h>
 
 
  32#include <linux/hugetlb.h>
  33#include <linux/pagemap.h>
 
 
  34#include <linux/smp.h>
  35#include <linux/page-flags.h>
  36#include <linux/backing-dev.h>
  37#include <linux/bit_spinlock.h>
  38#include <linux/rcupdate.h>
  39#include <linux/limits.h>
  40#include <linux/export.h>
  41#include <linux/mutex.h>
  42#include <linux/rbtree.h>
  43#include <linux/slab.h>
  44#include <linux/swap.h>
  45#include <linux/swapops.h>
  46#include <linux/spinlock.h>
  47#include <linux/eventfd.h>
  48#include <linux/poll.h>
  49#include <linux/sort.h>
  50#include <linux/fs.h>
  51#include <linux/seq_file.h>
  52#include <linux/vmpressure.h>
 
  53#include <linux/mm_inline.h>
  54#include <linux/page_cgroup.h>
  55#include <linux/cpu.h>
  56#include <linux/oom.h>
  57#include <linux/lockdep.h>
  58#include <linux/file.h>
 
 
 
 
 
  59#include "internal.h"
  60#include <net/sock.h>
  61#include <net/ip.h>
  62#include <net/tcp_memcontrol.h>
  63#include "slab.h"
 
  64
  65#include <asm/uaccess.h>
  66
  67#include <trace/events/vmscan.h>
  68
  69struct cgroup_subsys memory_cgrp_subsys __read_mostly;
  70EXPORT_SYMBOL(memory_cgrp_subsys);
  71
  72#define MEM_CGROUP_RECLAIM_RETRIES	5
  73static struct mem_cgroup *root_mem_cgroup __read_mostly;
  74
  75#ifdef CONFIG_MEMCG_SWAP
  76/* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */
  77int do_swap_account __read_mostly;
  78
  79/* for remember boot option*/
  80#ifdef CONFIG_MEMCG_SWAP_ENABLED
  81static int really_do_swap_account __initdata = 1;
  82#else
  83static int really_do_swap_account __initdata = 0;
  84#endif
  85
  86#else
  87#define do_swap_account		0
  88#endif
  89
 
 
  90
  91static const char * const mem_cgroup_stat_names[] = {
  92	"cache",
  93	"rss",
  94	"rss_huge",
  95	"mapped_file",
  96	"writeback",
  97	"swap",
  98};
  99
 100enum mem_cgroup_events_index {
 101	MEM_CGROUP_EVENTS_PGPGIN,	/* # of pages paged in */
 102	MEM_CGROUP_EVENTS_PGPGOUT,	/* # of pages paged out */
 103	MEM_CGROUP_EVENTS_PGFAULT,	/* # of page-faults */
 104	MEM_CGROUP_EVENTS_PGMAJFAULT,	/* # of major page-faults */
 105	MEM_CGROUP_EVENTS_NSTATS,
 106};
 107
 108static const char * const mem_cgroup_events_names[] = {
 109	"pgpgin",
 110	"pgpgout",
 111	"pgfault",
 112	"pgmajfault",
 113};
 114
 115static const char * const mem_cgroup_lru_names[] = {
 116	"inactive_anon",
 117	"active_anon",
 118	"inactive_file",
 119	"active_file",
 120	"unevictable",
 121};
 122
 123/*
 124 * Per memcg event counter is incremented at every pagein/pageout. With THP,
 125 * it will be incremated by the number of pages. This counter is used for
 126 * for trigger some periodic events. This is straightforward and better
 127 * than using jiffies etc. to handle periodic memcg event.
 128 */
 129enum mem_cgroup_events_target {
 130	MEM_CGROUP_TARGET_THRESH,
 131	MEM_CGROUP_TARGET_SOFTLIMIT,
 132	MEM_CGROUP_TARGET_NUMAINFO,
 133	MEM_CGROUP_NTARGETS,
 134};
 135#define THRESHOLDS_EVENTS_TARGET 128
 136#define SOFTLIMIT_EVENTS_TARGET 1024
 137#define NUMAINFO_EVENTS_TARGET	1024
 138
 139struct mem_cgroup_stat_cpu {
 140	long count[MEM_CGROUP_STAT_NSTATS];
 141	unsigned long events[MEM_CGROUP_EVENTS_NSTATS];
 142	unsigned long nr_page_events;
 143	unsigned long targets[MEM_CGROUP_NTARGETS];
 144};
 145
 146struct mem_cgroup_reclaim_iter {
 147	/*
 148	 * last scanned hierarchy member. Valid only if last_dead_count
 149	 * matches memcg->dead_count of the hierarchy root group.
 150	 */
 151	struct mem_cgroup *last_visited;
 152	int last_dead_count;
 153
 154	/* scan generation, increased every round-trip */
 155	unsigned int generation;
 156};
 157
 158/*
 159 * per-zone information in memory controller.
 160 */
 161struct mem_cgroup_per_zone {
 162	struct lruvec		lruvec;
 163	unsigned long		lru_size[NR_LRU_LISTS];
 164
 165	struct mem_cgroup_reclaim_iter reclaim_iter[DEF_PRIORITY + 1];
 166
 167	struct rb_node		tree_node;	/* RB tree node */
 168	unsigned long long	usage_in_excess;/* Set to the value by which */
 169						/* the soft limit is exceeded*/
 170	bool			on_tree;
 171	struct mem_cgroup	*memcg;		/* Back pointer, we cannot */
 172						/* use container_of	   */
 173};
 174
 175struct mem_cgroup_per_node {
 176	struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
 177};
 178
 179/*
 180 * Cgroups above their limits are maintained in a RB-Tree, independent of
 181 * their hierarchy representation
 182 */
 183
 184struct mem_cgroup_tree_per_zone {
 185	struct rb_root rb_root;
 
 186	spinlock_t lock;
 187};
 188
 189struct mem_cgroup_tree_per_node {
 190	struct mem_cgroup_tree_per_zone rb_tree_per_zone[MAX_NR_ZONES];
 191};
 192
 193struct mem_cgroup_tree {
 194	struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
 195};
 196
 197static struct mem_cgroup_tree soft_limit_tree __read_mostly;
 198
 199struct mem_cgroup_threshold {
 200	struct eventfd_ctx *eventfd;
 201	u64 threshold;
 202};
 203
 204/* For threshold */
 205struct mem_cgroup_threshold_ary {
 206	/* An array index points to threshold just below or equal to usage. */
 207	int current_threshold;
 208	/* Size of entries[] */
 209	unsigned int size;
 210	/* Array of thresholds */
 211	struct mem_cgroup_threshold entries[0];
 212};
 213
 214struct mem_cgroup_thresholds {
 215	/* Primary thresholds array */
 216	struct mem_cgroup_threshold_ary *primary;
 217	/*
 218	 * Spare threshold array.
 219	 * This is needed to make mem_cgroup_unregister_event() "never fail".
 220	 * It must be able to store at least primary->size - 1 entries.
 221	 */
 222	struct mem_cgroup_threshold_ary *spare;
 223};
 224
 225/* for OOM */
 226struct mem_cgroup_eventfd_list {
 227	struct list_head list;
 228	struct eventfd_ctx *eventfd;
 229};
 230
 231/*
 232 * cgroup_event represents events which userspace want to receive.
 233 */
 234struct mem_cgroup_event {
 235	/*
 236	 * memcg which the event belongs to.
 237	 */
 238	struct mem_cgroup *memcg;
 239	/*
 240	 * eventfd to signal userspace about the event.
 241	 */
 242	struct eventfd_ctx *eventfd;
 243	/*
 244	 * Each of these stored in a list by the cgroup.
 245	 */
 246	struct list_head list;
 247	/*
 248	 * register_event() callback will be used to add new userspace
 249	 * waiter for changes related to this event.  Use eventfd_signal()
 250	 * on eventfd to send notification to userspace.
 251	 */
 252	int (*register_event)(struct mem_cgroup *memcg,
 253			      struct eventfd_ctx *eventfd, const char *args);
 254	/*
 255	 * unregister_event() callback will be called when userspace closes
 256	 * the eventfd or on cgroup removing.  This callback must be set,
 257	 * if you want provide notification functionality.
 258	 */
 259	void (*unregister_event)(struct mem_cgroup *memcg,
 260				 struct eventfd_ctx *eventfd);
 261	/*
 262	 * All fields below needed to unregister event when
 263	 * userspace closes eventfd.
 264	 */
 265	poll_table pt;
 266	wait_queue_head_t *wqh;
 267	wait_queue_t wait;
 268	struct work_struct remove;
 269};
 270
 271static void mem_cgroup_threshold(struct mem_cgroup *memcg);
 272static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
 273
 274/*
 275 * The memory controller data structure. The memory controller controls both
 276 * page cache and RSS per cgroup. We would eventually like to provide
 277 * statistics based on the statistics developed by Rik Van Riel for clock-pro,
 278 * to help the administrator determine what knobs to tune.
 279 *
 280 * TODO: Add a water mark for the memory controller. Reclaim will begin when
 281 * we hit the water mark. May be even add a low water mark, such that
 282 * no reclaim occurs from a cgroup at it's low water mark, this is
 283 * a feature that will be implemented much later in the future.
 284 */
 285struct mem_cgroup {
 286	struct cgroup_subsys_state css;
 287	/*
 288	 * the counter to account for memory usage
 289	 */
 290	struct res_counter res;
 291
 292	/* vmpressure notifications */
 293	struct vmpressure vmpressure;
 294
 295	/*
 296	 * the counter to account for mem+swap usage.
 297	 */
 298	struct res_counter memsw;
 299
 300	/*
 301	 * the counter to account for kernel memory usage.
 302	 */
 303	struct res_counter kmem;
 304	/*
 305	 * Should the accounting and control be hierarchical, per subtree?
 306	 */
 307	bool use_hierarchy;
 308	unsigned long kmem_account_flags; /* See KMEM_ACCOUNTED_*, below */
 309
 310	bool		oom_lock;
 311	atomic_t	under_oom;
 312	atomic_t	oom_wakeups;
 313
 314	int	swappiness;
 315	/* OOM-Killer disable */
 316	int		oom_kill_disable;
 317
 318	/* set when res.limit == memsw.limit */
 319	bool		memsw_is_minimum;
 320
 321	/* protect arrays of thresholds */
 322	struct mutex thresholds_lock;
 323
 324	/* thresholds for memory usage. RCU-protected */
 325	struct mem_cgroup_thresholds thresholds;
 326
 327	/* thresholds for mem+swap usage. RCU-protected */
 328	struct mem_cgroup_thresholds memsw_thresholds;
 329
 330	/* For oom notifier event fd */
 331	struct list_head oom_notify;
 332
 333	/*
 334	 * Should we move charges of a task when a task is moved into this
 335	 * mem_cgroup ? And what type of charges should we move ?
 336	 */
 337	unsigned long move_charge_at_immigrate;
 338	/*
 339	 * set > 0 if pages under this cgroup are moving to other cgroup.
 340	 */
 341	atomic_t	moving_account;
 342	/* taken only while moving_account > 0 */
 343	spinlock_t	move_lock;
 344	/*
 345	 * percpu counter.
 346	 */
 347	struct mem_cgroup_stat_cpu __percpu *stat;
 348	/*
 349	 * used when a cpu is offlined or other synchronizations
 350	 * See mem_cgroup_read_stat().
 351	 */
 352	struct mem_cgroup_stat_cpu nocpu_base;
 353	spinlock_t pcp_counter_lock;
 354
 355	atomic_t	dead_count;
 356#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET)
 357	struct cg_proto tcp_mem;
 358#endif
 359#if defined(CONFIG_MEMCG_KMEM)
 360	/* analogous to slab_common's slab_caches list. per-memcg */
 361	struct list_head memcg_slab_caches;
 362	/* Not a spinlock, we can take a lot of time walking the list */
 363	struct mutex slab_caches_mutex;
 364        /* Index in the kmem_cache->memcg_params->memcg_caches array */
 365	int kmemcg_id;
 366#endif
 367
 368	int last_scanned_node;
 369#if MAX_NUMNODES > 1
 370	nodemask_t	scan_nodes;
 371	atomic_t	numainfo_events;
 372	atomic_t	numainfo_updating;
 373#endif
 374
 375	/* List of events which userspace want to receive */
 376	struct list_head event_list;
 377	spinlock_t event_list_lock;
 378
 379	struct mem_cgroup_per_node *nodeinfo[0];
 380	/* WARNING: nodeinfo must be the last member here */
 381};
 382
 383/* internal only representation about the status of kmem accounting. */
 384enum {
 385	KMEM_ACCOUNTED_ACTIVE, /* accounted by this cgroup itself */
 386	KMEM_ACCOUNTED_DEAD, /* dead memcg with pending kmem charges */
 387};
 388
 389#ifdef CONFIG_MEMCG_KMEM
 390static inline void memcg_kmem_set_active(struct mem_cgroup *memcg)
 391{
 392	set_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags);
 393}
 394
 395static bool memcg_kmem_is_active(struct mem_cgroup *memcg)
 396{
 397	return test_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags);
 398}
 399
 400static void memcg_kmem_mark_dead(struct mem_cgroup *memcg)
 401{
 402	/*
 403	 * Our caller must use css_get() first, because memcg_uncharge_kmem()
 404	 * will call css_put() if it sees the memcg is dead.
 405	 */
 406	smp_wmb();
 407	if (test_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags))
 408		set_bit(KMEM_ACCOUNTED_DEAD, &memcg->kmem_account_flags);
 409}
 410
 411static bool memcg_kmem_test_and_clear_dead(struct mem_cgroup *memcg)
 412{
 413	return test_and_clear_bit(KMEM_ACCOUNTED_DEAD,
 414				  &memcg->kmem_account_flags);
 415}
 416#endif
 417
 418/* Stuffs for move charges at task migration. */
 419/*
 420 * Types of charges to be moved. "move_charge_at_immitgrate" and
 421 * "immigrate_flags" are treated as a left-shifted bitmap of these types.
 422 */
 423enum move_type {
 424	MOVE_CHARGE_TYPE_ANON,	/* private anonymous page and swap of it */
 425	MOVE_CHARGE_TYPE_FILE,	/* file page(including tmpfs) and swap of it */
 426	NR_MOVE_TYPE,
 427};
 428
 429/* "mc" and its members are protected by cgroup_mutex */
 430static struct move_charge_struct {
 431	spinlock_t	  lock; /* for from, to */
 
 432	struct mem_cgroup *from;
 433	struct mem_cgroup *to;
 434	unsigned long immigrate_flags;
 435	unsigned long precharge;
 436	unsigned long moved_charge;
 437	unsigned long moved_swap;
 438	struct task_struct *moving_task;	/* a task moving charges */
 439	wait_queue_head_t waitq;		/* a waitq for other context */
 440} mc = {
 441	.lock = __SPIN_LOCK_UNLOCKED(mc.lock),
 442	.waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
 443};
 444
 445static bool move_anon(void)
 446{
 447	return test_bit(MOVE_CHARGE_TYPE_ANON, &mc.immigrate_flags);
 448}
 449
 450static bool move_file(void)
 451{
 452	return test_bit(MOVE_CHARGE_TYPE_FILE, &mc.immigrate_flags);
 453}
 454
 455/*
 456 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
 457 * limit reclaim to prevent infinite loops, if they ever occur.
 458 */
 459#define	MEM_CGROUP_MAX_RECLAIM_LOOPS		100
 460#define	MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS	2
 461
 462enum charge_type {
 463	MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
 464	MEM_CGROUP_CHARGE_TYPE_ANON,
 465	MEM_CGROUP_CHARGE_TYPE_SWAPOUT,	/* for accounting swapcache */
 466	MEM_CGROUP_CHARGE_TYPE_DROP,	/* a page was unused swap cache */
 467	NR_CHARGE_TYPE,
 468};
 469
 470/* for encoding cft->private value on file */
 471enum res_type {
 472	_MEM,
 473	_MEMSWAP,
 474	_OOM_TYPE,
 475	_KMEM,
 
 476};
 477
 478#define MEMFILE_PRIVATE(x, val)	((x) << 16 | (val))
 479#define MEMFILE_TYPE(val)	((val) >> 16 & 0xffff)
 480#define MEMFILE_ATTR(val)	((val) & 0xffff)
 481/* Used for OOM nofiier */
 482#define OOM_CONTROL		(0)
 483
 484/*
 485 * Reclaim flags for mem_cgroup_hierarchical_reclaim
 
 
 486 */
 487#define MEM_CGROUP_RECLAIM_NOSWAP_BIT	0x0
 488#define MEM_CGROUP_RECLAIM_NOSWAP	(1 << MEM_CGROUP_RECLAIM_NOSWAP_BIT)
 489#define MEM_CGROUP_RECLAIM_SHRINK_BIT	0x1
 490#define MEM_CGROUP_RECLAIM_SHRINK	(1 << MEM_CGROUP_RECLAIM_SHRINK_BIT)
 491
 492/*
 493 * The memcg_create_mutex will be held whenever a new cgroup is created.
 494 * As a consequence, any change that needs to protect against new child cgroups
 495 * appearing has to hold it as well.
 496 */
 497static DEFINE_MUTEX(memcg_create_mutex);
 498
 499struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *s)
 500{
 501	return s ? container_of(s, struct mem_cgroup, css) : NULL;
 
 502}
 503
 504/* Some nice accessors for the vmpressure. */
 505struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
 506{
 507	if (!memcg)
 508		memcg = root_mem_cgroup;
 509	return &memcg->vmpressure;
 510}
 511
 512struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr)
 513{
 514	return &container_of(vmpr, struct mem_cgroup, vmpressure)->css;
 515}
 516
 517static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
 
 
 
 
 
 
 518{
 519	return (memcg == root_mem_cgroup);
 520}
 521
 522/*
 523 * We restrict the id in the range of [1, 65535], so it can fit into
 524 * an unsigned short.
 525 */
 526#define MEM_CGROUP_ID_MAX	USHRT_MAX
 527
 528static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
 529{
 
 
 
 
 
 530	/*
 531	 * The ID of the root cgroup is 0, but memcg treat 0 as an
 532	 * invalid ID, so we return (cgroup_id + 1).
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 533	 */
 534	return memcg->css.cgroup->id + 1;
 535}
 
 536
 537static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
 538{
 539	struct cgroup_subsys_state *css;
 540
 541	css = css_from_id(id - 1, &memory_cgrp_subsys);
 542	return mem_cgroup_from_css(css);
 543}
 544
 545/* Writing them here to avoid exposing memcg's inner layout */
 546#if defined(CONFIG_INET) && defined(CONFIG_MEMCG_KMEM)
 
 547
 548void sock_update_memcg(struct sock *sk)
 549{
 550	if (mem_cgroup_sockets_enabled) {
 551		struct mem_cgroup *memcg;
 552		struct cg_proto *cg_proto;
 553
 554		BUG_ON(!sk->sk_prot->proto_cgroup);
 555
 556		/* Socket cloning can throw us here with sk_cgrp already
 557		 * filled. It won't however, necessarily happen from
 558		 * process context. So the test for root memcg given
 559		 * the current task's memcg won't help us in this case.
 560		 *
 561		 * Respecting the original socket's memcg is a better
 562		 * decision in this case.
 563		 */
 564		if (sk->sk_cgrp) {
 565			BUG_ON(mem_cgroup_is_root(sk->sk_cgrp->memcg));
 566			css_get(&sk->sk_cgrp->memcg->css);
 567			return;
 568		}
 569
 570		rcu_read_lock();
 571		memcg = mem_cgroup_from_task(current);
 572		cg_proto = sk->sk_prot->proto_cgroup(memcg);
 573		if (!mem_cgroup_is_root(memcg) &&
 574		    memcg_proto_active(cg_proto) && css_tryget(&memcg->css)) {
 575			sk->sk_cgrp = cg_proto;
 576		}
 577		rcu_read_unlock();
 578	}
 
 
 579}
 580EXPORT_SYMBOL(sock_update_memcg);
 581
 582void sock_release_memcg(struct sock *sk)
 
 583{
 584	if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
 585		struct mem_cgroup *memcg;
 586		WARN_ON(!sk->sk_cgrp->memcg);
 587		memcg = sk->sk_cgrp->memcg;
 588		css_put(&sk->sk_cgrp->memcg->css);
 589	}
 590}
 591
 592struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg)
 593{
 594	if (!memcg || mem_cgroup_is_root(memcg))
 595		return NULL;
 596
 597	return &memcg->tcp_mem;
 598}
 599EXPORT_SYMBOL(tcp_proto_cgroup);
 600
 601static void disarm_sock_keys(struct mem_cgroup *memcg)
 602{
 603	if (!memcg_proto_activated(&memcg->tcp_mem))
 604		return;
 605	static_key_slow_dec(&memcg_socket_limit_enabled);
 606}
 607#else
 608static void disarm_sock_keys(struct mem_cgroup *memcg)
 609{
 610}
 611#endif
 612
 613#ifdef CONFIG_MEMCG_KMEM
 614/*
 615 * This will be the memcg's index in each cache's ->memcg_params->memcg_caches.
 616 * The main reason for not using cgroup id for this:
 617 *  this works better in sparse environments, where we have a lot of memcgs,
 618 *  but only a few kmem-limited. Or also, if we have, for instance, 200
 619 *  memcgs, and none but the 200th is kmem-limited, we'd have to have a
 620 *  200 entry array for that.
 621 *
 622 * The current size of the caches array is stored in
 623 * memcg_limited_groups_array_size.  It will double each time we have to
 624 * increase it.
 625 */
 626static DEFINE_IDA(kmem_limited_groups);
 627int memcg_limited_groups_array_size;
 628
 629/*
 630 * MIN_SIZE is different than 1, because we would like to avoid going through
 631 * the alloc/free process all the time. In a small machine, 4 kmem-limited
 632 * cgroups is a reasonable guess. In the future, it could be a parameter or
 633 * tunable, but that is strictly not necessary.
 634 *
 635 * MAX_SIZE should be as large as the number of cgrp_ids. Ideally, we could get
 636 * this constant directly from cgroup, but it is understandable that this is
 637 * better kept as an internal representation in cgroup.c. In any case, the
 638 * cgrp_id space is not getting any smaller, and we don't have to necessarily
 639 * increase ours as well if it increases.
 640 */
 641#define MEMCG_CACHES_MIN_SIZE 4
 642#define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX
 643
 644/*
 645 * A lot of the calls to the cache allocation functions are expected to be
 646 * inlined by the compiler. Since the calls to memcg_kmem_get_cache are
 647 * conditional to this static branch, we'll have to allow modules that does
 648 * kmem_cache_alloc and the such to see this symbol as well
 649 */
 650struct static_key memcg_kmem_enabled_key;
 651EXPORT_SYMBOL(memcg_kmem_enabled_key);
 652
 653static void disarm_kmem_keys(struct mem_cgroup *memcg)
 654{
 655	if (memcg_kmem_is_active(memcg)) {
 656		static_key_slow_dec(&memcg_kmem_enabled_key);
 657		ida_simple_remove(&kmem_limited_groups, memcg->kmemcg_id);
 658	}
 659	/*
 660	 * This check can't live in kmem destruction function,
 661	 * since the charges will outlive the cgroup
 662	 */
 663	WARN_ON(res_counter_read_u64(&memcg->kmem, RES_USAGE) != 0);
 664}
 665#else
 666static void disarm_kmem_keys(struct mem_cgroup *memcg)
 667{
 668}
 669#endif /* CONFIG_MEMCG_KMEM */
 670
 671static void disarm_static_keys(struct mem_cgroup *memcg)
 
 
 
 
 
 
 
 
 
 
 
 672{
 673	disarm_sock_keys(memcg);
 674	disarm_kmem_keys(memcg);
 675}
 676
 677static void drain_all_stock_async(struct mem_cgroup *memcg);
 678
 679static struct mem_cgroup_per_zone *
 680mem_cgroup_zoneinfo(struct mem_cgroup *memcg, int nid, int zid)
 681{
 682	VM_BUG_ON((unsigned)nid >= nr_node_ids);
 683	return &memcg->nodeinfo[nid]->zoneinfo[zid];
 684}
 685
 686struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg)
 687{
 688	return &memcg->css;
 689}
 690
 691static struct mem_cgroup_per_zone *
 692page_cgroup_zoneinfo(struct mem_cgroup *memcg, struct page *page)
 693{
 694	int nid = page_to_nid(page);
 695	int zid = page_zonenum(page);
 696
 697	return mem_cgroup_zoneinfo(memcg, nid, zid);
 698}
 699
 700static struct mem_cgroup_tree_per_zone *
 701soft_limit_tree_node_zone(int nid, int zid)
 
 
 
 702{
 703	return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
 704}
 705
 706static struct mem_cgroup_tree_per_zone *
 707soft_limit_tree_from_page(struct page *page)
 708{
 709	int nid = page_to_nid(page);
 710	int zid = page_zonenum(page);
 711
 712	return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
 
 
 
 
 
 713}
 714
 715static void
 716__mem_cgroup_insert_exceeded(struct mem_cgroup *memcg,
 717				struct mem_cgroup_per_zone *mz,
 718				struct mem_cgroup_tree_per_zone *mctz,
 719				unsigned long long new_usage_in_excess)
 720{
 721	struct rb_node **p = &mctz->rb_root.rb_node;
 722	struct rb_node *parent = NULL;
 723	struct mem_cgroup_per_zone *mz_node;
 
 724
 725	if (mz->on_tree)
 726		return;
 727
 728	mz->usage_in_excess = new_usage_in_excess;
 729	if (!mz->usage_in_excess)
 730		return;
 731	while (*p) {
 732		parent = *p;
 733		mz_node = rb_entry(parent, struct mem_cgroup_per_zone,
 734					tree_node);
 735		if (mz->usage_in_excess < mz_node->usage_in_excess)
 736			p = &(*p)->rb_left;
 737		/*
 738		 * We can't avoid mem cgroups that are over their soft
 739		 * limit by the same amount
 740		 */
 741		else if (mz->usage_in_excess >= mz_node->usage_in_excess)
 742			p = &(*p)->rb_right;
 
 743	}
 
 
 
 
 744	rb_link_node(&mz->tree_node, parent, p);
 745	rb_insert_color(&mz->tree_node, &mctz->rb_root);
 746	mz->on_tree = true;
 747}
 748
 749static void
 750__mem_cgroup_remove_exceeded(struct mem_cgroup *memcg,
 751				struct mem_cgroup_per_zone *mz,
 752				struct mem_cgroup_tree_per_zone *mctz)
 753{
 754	if (!mz->on_tree)
 755		return;
 
 
 
 
 756	rb_erase(&mz->tree_node, &mctz->rb_root);
 757	mz->on_tree = false;
 758}
 759
 760static void
 761mem_cgroup_remove_exceeded(struct mem_cgroup *memcg,
 762				struct mem_cgroup_per_zone *mz,
 763				struct mem_cgroup_tree_per_zone *mctz)
 764{
 765	spin_lock(&mctz->lock);
 766	__mem_cgroup_remove_exceeded(memcg, mz, mctz);
 767	spin_unlock(&mctz->lock);
 
 
 768}
 769
 
 
 
 
 
 
 
 
 770
 771static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
 
 
 
 772{
 773	unsigned long long excess;
 774	struct mem_cgroup_per_zone *mz;
 775	struct mem_cgroup_tree_per_zone *mctz;
 776	int nid = page_to_nid(page);
 777	int zid = page_zonenum(page);
 778	mctz = soft_limit_tree_from_page(page);
 
 
 
 779
 
 
 
 780	/*
 781	 * Necessary to update all ancestors when hierarchy is used.
 782	 * because their event counter is not touched.
 783	 */
 784	for (; memcg; memcg = parent_mem_cgroup(memcg)) {
 785		mz = mem_cgroup_zoneinfo(memcg, nid, zid);
 786		excess = res_counter_soft_limit_excess(&memcg->res);
 787		/*
 788		 * We have to update the tree if mz is on RB-tree or
 789		 * mem is over its softlimit.
 790		 */
 791		if (excess || mz->on_tree) {
 792			spin_lock(&mctz->lock);
 
 
 793			/* if on-tree, remove it */
 794			if (mz->on_tree)
 795				__mem_cgroup_remove_exceeded(memcg, mz, mctz);
 796			/*
 797			 * Insert again. mz->usage_in_excess will be updated.
 798			 * If excess is 0, no tree ops.
 799			 */
 800			__mem_cgroup_insert_exceeded(memcg, mz, mctz, excess);
 801			spin_unlock(&mctz->lock);
 802		}
 803	}
 804}
 805
 806static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
 807{
 808	int node, zone;
 809	struct mem_cgroup_per_zone *mz;
 810	struct mem_cgroup_tree_per_zone *mctz;
 811
 812	for_each_node(node) {
 813		for (zone = 0; zone < MAX_NR_ZONES; zone++) {
 814			mz = mem_cgroup_zoneinfo(memcg, node, zone);
 815			mctz = soft_limit_tree_node_zone(node, zone);
 816			mem_cgroup_remove_exceeded(memcg, mz, mctz);
 817		}
 818	}
 819}
 820
 821static struct mem_cgroup_per_zone *
 822__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
 823{
 824	struct rb_node *rightmost = NULL;
 825	struct mem_cgroup_per_zone *mz;
 826
 827retry:
 828	mz = NULL;
 829	rightmost = rb_last(&mctz->rb_root);
 830	if (!rightmost)
 831		goto done;		/* Nothing to reclaim from */
 832
 833	mz = rb_entry(rightmost, struct mem_cgroup_per_zone, tree_node);
 
 834	/*
 835	 * Remove the node now but someone else can add it back,
 836	 * we will to add it back at the end of reclaim to its correct
 837	 * position in the tree.
 838	 */
 839	__mem_cgroup_remove_exceeded(mz->memcg, mz, mctz);
 840	if (!res_counter_soft_limit_excess(&mz->memcg->res) ||
 841		!css_tryget(&mz->memcg->css))
 842		goto retry;
 843done:
 844	return mz;
 845}
 846
 847static struct mem_cgroup_per_zone *
 848mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
 849{
 850	struct mem_cgroup_per_zone *mz;
 851
 852	spin_lock(&mctz->lock);
 853	mz = __mem_cgroup_largest_soft_limit_node(mctz);
 854	spin_unlock(&mctz->lock);
 855	return mz;
 856}
 857
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 858/*
 859 * Implementation Note: reading percpu statistics for memcg.
 860 *
 861 * Both of vmstat[] and percpu_counter has threshold and do periodic
 862 * synchronization to implement "quick" read. There are trade-off between
 863 * reading cost and precision of value. Then, we may have a chance to implement
 864 * a periodic synchronizion of counter in memcg's counter.
 865 *
 866 * But this _read() function is used for user interface now. The user accounts
 867 * memory usage by memory cgroup and he _always_ requires exact value because
 868 * he accounts memory. Even if we provide quick-and-fuzzy read, we always
 869 * have to visit all online cpus and make sum. So, for now, unnecessary
 870 * synchronization is not implemented. (just implemented for cpu hotplug)
 871 *
 872 * If there are kernel internal actions which can make use of some not-exact
 873 * value, and reading all cpu value can be performance bottleneck in some
 874 * common workload, threashold and synchonization as vmstat[] should be
 875 * implemented.
 876 */
 877static long mem_cgroup_read_stat(struct mem_cgroup *memcg,
 878				 enum mem_cgroup_stat_index idx)
 879{
 880	long val = 0;
 881	int cpu;
 882
 883	get_online_cpus();
 884	for_each_online_cpu(cpu)
 885		val += per_cpu(memcg->stat->count[idx], cpu);
 886#ifdef CONFIG_HOTPLUG_CPU
 887	spin_lock(&memcg->pcp_counter_lock);
 888	val += memcg->nocpu_base.count[idx];
 889	spin_unlock(&memcg->pcp_counter_lock);
 890#endif
 891	put_online_cpus();
 892	return val;
 893}
 894
 895static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg,
 896					 bool charge)
 897{
 898	int val = (charge) ? 1 : -1;
 899	this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_SWAP], val);
 900}
 901
 902static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg,
 903					    enum mem_cgroup_events_index idx)
 904{
 905	unsigned long val = 0;
 906	int cpu;
 907
 908	get_online_cpus();
 909	for_each_online_cpu(cpu)
 910		val += per_cpu(memcg->stat->events[idx], cpu);
 911#ifdef CONFIG_HOTPLUG_CPU
 912	spin_lock(&memcg->pcp_counter_lock);
 913	val += memcg->nocpu_base.events[idx];
 914	spin_unlock(&memcg->pcp_counter_lock);
 915#endif
 916	put_online_cpus();
 917	return val;
 918}
 919
 920static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
 921					 struct page *page,
 922					 bool anon, int nr_pages)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 923{
 924	/*
 925	 * Here, RSS means 'mapped anon' and anon's SwapCache. Shmem/tmpfs is
 926	 * counted as CACHE even if it's on ANON LRU.
 927	 */
 928	if (anon)
 929		__this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS],
 930				nr_pages);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 931	else
 932		__this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE],
 933				nr_pages);
 934
 935	if (PageTransHuge(page))
 936		__this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE],
 937				nr_pages);
 
 
 
 
 
 
 
 938
 939	/* pagein of a big page is an event. So, ignore page size */
 940	if (nr_pages > 0)
 941		__this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGIN]);
 942	else {
 943		__this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]);
 944		nr_pages = -nr_pages; /* for event */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 945	}
 946
 947	__this_cpu_add(memcg->stat->nr_page_events, nr_pages);
 
 
 
 
 
 
 
 948}
 949
 950unsigned long
 951mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
 
 
 
 
 
 
 
 
 
 
 952{
 953	struct mem_cgroup_per_zone *mz;
 
 954
 955	mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
 956	return mz->lru_size[lru];
 
 957}
 958
 959static unsigned long
 960mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, int nid, int zid,
 961			unsigned int lru_mask)
 962{
 963	struct mem_cgroup_per_zone *mz;
 964	enum lru_list lru;
 965	unsigned long ret = 0;
 966
 967	mz = mem_cgroup_zoneinfo(memcg, nid, zid);
 
 
 
 
 
 
 
 968
 969	for_each_lru(lru) {
 970		if (BIT(lru) & lru_mask)
 971			ret += mz->lru_size[lru];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 972	}
 973	return ret;
 974}
 975
 976static unsigned long
 977mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
 978			int nid, unsigned int lru_mask)
 979{
 980	u64 total = 0;
 981	int zid;
 982
 983	for (zid = 0; zid < MAX_NR_ZONES; zid++)
 984		total += mem_cgroup_zone_nr_lru_pages(memcg,
 985						nid, zid, lru_mask);
 986
 987	return total;
 
 
 
 
 
 
 988}
 989
 990static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
 991			unsigned int lru_mask)
 992{
 993	int nid;
 994	u64 total = 0;
 995
 996	for_each_node_state(nid, N_MEMORY)
 997		total += mem_cgroup_node_nr_lru_pages(memcg, nid, lru_mask);
 998	return total;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 999}
1000
1001static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
1002				       enum mem_cgroup_events_target target)
1003{
1004	unsigned long val, next;
1005
1006	val = __this_cpu_read(memcg->stat->nr_page_events);
1007	next = __this_cpu_read(memcg->stat->targets[target]);
1008	/* from time_after() in jiffies.h */
1009	if ((long)next - (long)val < 0) {
1010		switch (target) {
1011		case MEM_CGROUP_TARGET_THRESH:
1012			next = val + THRESHOLDS_EVENTS_TARGET;
1013			break;
1014		case MEM_CGROUP_TARGET_SOFTLIMIT:
1015			next = val + SOFTLIMIT_EVENTS_TARGET;
1016			break;
1017		case MEM_CGROUP_TARGET_NUMAINFO:
1018			next = val + NUMAINFO_EVENTS_TARGET;
1019			break;
1020		default:
1021			break;
1022		}
1023		__this_cpu_write(memcg->stat->targets[target], next);
1024		return true;
1025	}
1026	return false;
1027}
1028
1029/*
1030 * Check events in order.
1031 *
1032 */
1033static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
1034{
1035	preempt_disable();
 
 
1036	/* threshold event is triggered in finer grain than soft limit */
1037	if (unlikely(mem_cgroup_event_ratelimit(memcg,
1038						MEM_CGROUP_TARGET_THRESH))) {
1039		bool do_softlimit;
1040		bool do_numainfo __maybe_unused;
1041
1042		do_softlimit = mem_cgroup_event_ratelimit(memcg,
1043						MEM_CGROUP_TARGET_SOFTLIMIT);
1044#if MAX_NUMNODES > 1
1045		do_numainfo = mem_cgroup_event_ratelimit(memcg,
1046						MEM_CGROUP_TARGET_NUMAINFO);
1047#endif
1048		preempt_enable();
1049
1050		mem_cgroup_threshold(memcg);
1051		if (unlikely(do_softlimit))
1052			mem_cgroup_update_tree(memcg, page);
1053#if MAX_NUMNODES > 1
1054		if (unlikely(do_numainfo))
1055			atomic_inc(&memcg->numainfo_events);
1056#endif
1057	} else
1058		preempt_enable();
1059}
1060
1061struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
1062{
1063	/*
1064	 * mm_update_next_owner() may clear mm->owner to NULL
1065	 * if it races with swapoff, page migration, etc.
1066	 * So this can be called with p == NULL.
1067	 */
1068	if (unlikely(!p))
1069		return NULL;
1070
1071	return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
1072}
 
1073
1074static struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
1075{
1076	struct mem_cgroup *memcg = NULL;
1077
1078	rcu_read_lock();
1079	do {
1080		/*
1081		 * Page cache insertions can happen withou an
1082		 * actual mm context, e.g. during disk probing
1083		 * on boot, loopback IO, acct() writes etc.
1084		 */
1085		if (unlikely(!mm))
1086			memcg = root_mem_cgroup;
1087		else {
1088			memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1089			if (unlikely(!memcg))
1090				memcg = root_mem_cgroup;
1091		}
1092	} while (!css_tryget(&memcg->css));
1093	rcu_read_unlock();
1094	return memcg;
1095}
1096
1097/*
1098 * Returns a next (in a pre-order walk) alive memcg (with elevated css
1099 * ref. count) or NULL if the whole root's subtree has been visited.
1100 *
1101 * helper function to be used by mem_cgroup_iter
 
 
 
 
 
1102 */
1103static struct mem_cgroup *__mem_cgroup_iter_next(struct mem_cgroup *root,
1104		struct mem_cgroup *last_visited)
1105{
1106	struct cgroup_subsys_state *prev_css, *next_css;
1107
1108	prev_css = last_visited ? &last_visited->css : NULL;
1109skip_node:
1110	next_css = css_next_descendant_pre(prev_css, &root->css);
1111
1112	/*
1113	 * Even if we found a group we have to make sure it is
1114	 * alive. css && !memcg means that the groups should be
1115	 * skipped and we should continue the tree walk.
1116	 * last_visited css is safe to use because it is
1117	 * protected by css_get and the tree walk is rcu safe.
1118	 *
1119	 * We do not take a reference on the root of the tree walk
1120	 * because we might race with the root removal when it would
1121	 * be the only node in the iterated hierarchy and mem_cgroup_iter
1122	 * would end up in an endless loop because it expects that at
1123	 * least one valid node will be returned. Root cannot disappear
1124	 * because caller of the iterator should hold it already so
1125	 * skipping css reference should be safe.
1126	 */
1127	if (next_css) {
1128		if ((next_css == &root->css) ||
1129		    ((next_css->flags & CSS_ONLINE) && css_tryget(next_css)))
1130			return mem_cgroup_from_css(next_css);
1131
1132		prev_css = next_css;
1133		goto skip_node;
1134	}
1135
1136	return NULL;
 
 
 
 
 
 
 
1137}
 
1138
1139static void mem_cgroup_iter_invalidate(struct mem_cgroup *root)
 
 
 
1140{
1141	/*
1142	 * When a group in the hierarchy below root is destroyed, the
1143	 * hierarchy iterator can no longer be trusted since it might
1144	 * have pointed to the destroyed group.  Invalidate it.
1145	 */
1146	atomic_inc(&root->dead_count);
1147}
1148
1149static struct mem_cgroup *
1150mem_cgroup_iter_load(struct mem_cgroup_reclaim_iter *iter,
1151		     struct mem_cgroup *root,
1152		     int *sequence)
1153{
1154	struct mem_cgroup *position = NULL;
1155	/*
1156	 * A cgroup destruction happens in two stages: offlining and
1157	 * release.  They are separated by a RCU grace period.
1158	 *
1159	 * If the iterator is valid, we may still race with an
1160	 * offlining.  The RCU lock ensures the object won't be
1161	 * released, tryget will fail if we lost the race.
1162	 */
1163	*sequence = atomic_read(&root->dead_count);
1164	if (iter->last_dead_count == *sequence) {
1165		smp_rmb();
1166		position = iter->last_visited;
1167
1168		/*
1169		 * We cannot take a reference to root because we might race
1170		 * with root removal and returning NULL would end up in
1171		 * an endless loop on the iterator user level when root
1172		 * would be returned all the time.
1173		 */
1174		if (position && position != root &&
1175				!css_tryget(&position->css))
1176			position = NULL;
1177	}
1178	return position;
1179}
1180
1181static void mem_cgroup_iter_update(struct mem_cgroup_reclaim_iter *iter,
1182				   struct mem_cgroup *last_visited,
1183				   struct mem_cgroup *new_position,
1184				   struct mem_cgroup *root,
1185				   int sequence)
1186{
1187	/* root reference counting symmetric to mem_cgroup_iter_load */
1188	if (last_visited && last_visited != root)
1189		css_put(&last_visited->css);
1190	/*
1191	 * We store the sequence count from the time @last_visited was
1192	 * loaded successfully instead of rereading it here so that we
1193	 * don't lose destruction events in between.  We could have
1194	 * raced with the destruction of @new_position after all.
1195	 */
1196	iter->last_visited = new_position;
1197	smp_wmb();
1198	iter->last_dead_count = sequence;
1199}
1200
1201/**
1202 * mem_cgroup_iter - iterate over memory cgroup hierarchy
1203 * @root: hierarchy root
1204 * @prev: previously returned memcg, NULL on first invocation
1205 * @reclaim: cookie for shared reclaim walks, NULL for full walks
1206 *
1207 * Returns references to children of the hierarchy below @root, or
1208 * @root itself, or %NULL after a full round-trip.
1209 *
1210 * Caller must pass the return value in @prev on subsequent
1211 * invocations for reference counting, or use mem_cgroup_iter_break()
1212 * to cancel a hierarchy walk before the round-trip is complete.
1213 *
1214 * Reclaimers can specify a zone and a priority level in @reclaim to
1215 * divide up the memcgs in the hierarchy among all concurrent
1216 * reclaimers operating on the same zone and priority.
1217 */
1218struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
1219				   struct mem_cgroup *prev,
1220				   struct mem_cgroup_reclaim_cookie *reclaim)
1221{
 
 
1222	struct mem_cgroup *memcg = NULL;
1223	struct mem_cgroup *last_visited = NULL;
1224
1225	if (mem_cgroup_disabled())
1226		return NULL;
1227
1228	if (!root)
1229		root = root_mem_cgroup;
1230
1231	if (prev && !reclaim)
1232		last_visited = prev;
1233
1234	if (!root->use_hierarchy && root != root_mem_cgroup) {
1235		if (prev)
1236			goto out_css_put;
1237		return root;
1238	}
1239
1240	rcu_read_lock();
1241	while (!memcg) {
1242		struct mem_cgroup_reclaim_iter *uninitialized_var(iter);
1243		int uninitialized_var(seq);
1244
1245		if (reclaim) {
1246			int nid = zone_to_nid(reclaim->zone);
1247			int zid = zone_idx(reclaim->zone);
1248			struct mem_cgroup_per_zone *mz;
1249
1250			mz = mem_cgroup_zoneinfo(root, nid, zid);
1251			iter = &mz->reclaim_iter[reclaim->priority];
1252			if (prev && reclaim->generation != iter->generation) {
1253				iter->last_visited = NULL;
1254				goto out_unlock;
1255			}
1256
1257			last_visited = mem_cgroup_iter_load(iter, root, &seq);
 
 
 
 
 
 
 
 
 
 
 
 
1258		}
 
 
 
1259
1260		memcg = __mem_cgroup_iter_next(root, last_visited);
 
1261
1262		if (reclaim) {
1263			mem_cgroup_iter_update(iter, last_visited, memcg, root,
1264					seq);
 
 
 
 
 
 
 
 
 
 
1265
1266			if (!memcg)
1267				iter->generation++;
1268			else if (!prev && memcg)
1269				reclaim->generation = iter->generation;
 
 
 
 
1270		}
 
1271
1272		if (prev && !memcg)
1273			goto out_unlock;
 
 
 
 
 
 
 
 
 
 
 
1274	}
 
1275out_unlock:
1276	rcu_read_unlock();
1277out_css_put:
1278	if (prev && prev != root)
1279		css_put(&prev->css);
1280
1281	return memcg;
1282}
1283
1284/**
1285 * mem_cgroup_iter_break - abort a hierarchy walk prematurely
1286 * @root: hierarchy root
1287 * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
1288 */
1289void mem_cgroup_iter_break(struct mem_cgroup *root,
1290			   struct mem_cgroup *prev)
1291{
1292	if (!root)
1293		root = root_mem_cgroup;
1294	if (prev && prev != root)
1295		css_put(&prev->css);
1296}
1297
1298/*
1299 * Iteration constructs for visiting all cgroups (under a tree).  If
1300 * loops are exited prematurely (break), mem_cgroup_iter_break() must
1301 * be used for reference counting.
1302 */
1303#define for_each_mem_cgroup_tree(iter, root)		\
1304	for (iter = mem_cgroup_iter(root, NULL, NULL);	\
1305	     iter != NULL;				\
1306	     iter = mem_cgroup_iter(root, iter, NULL))
1307
1308#define for_each_mem_cgroup(iter)			\
1309	for (iter = mem_cgroup_iter(NULL, NULL, NULL);	\
1310	     iter != NULL;				\
1311	     iter = mem_cgroup_iter(NULL, iter, NULL))
 
 
1312
1313void __mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
1314{
1315	struct mem_cgroup *memcg;
 
1316
1317	rcu_read_lock();
1318	memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1319	if (unlikely(!memcg))
1320		goto out;
1321
1322	switch (idx) {
1323	case PGFAULT:
1324		this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGFAULT]);
1325		break;
1326	case PGMAJFAULT:
1327		this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGMAJFAULT]);
1328		break;
1329	default:
1330		BUG();
1331	}
1332out:
1333	rcu_read_unlock();
1334}
1335EXPORT_SYMBOL(__mem_cgroup_count_vm_event);
1336
1337/**
1338 * mem_cgroup_zone_lruvec - get the lru list vector for a zone and memcg
1339 * @zone: zone of the wanted lruvec
1340 * @memcg: memcg of the wanted lruvec
 
 
 
 
 
 
1341 *
1342 * Returns the lru list vector holding pages for the given @zone and
1343 * @mem.  This can be the global zone lruvec, if the memory controller
1344 * is disabled.
1345 */
1346struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
1347				      struct mem_cgroup *memcg)
1348{
1349	struct mem_cgroup_per_zone *mz;
1350	struct lruvec *lruvec;
1351
1352	if (mem_cgroup_disabled()) {
1353		lruvec = &zone->lruvec;
1354		goto out;
 
 
 
 
 
 
 
 
 
 
 
1355	}
 
1356
1357	mz = mem_cgroup_zoneinfo(memcg, zone_to_nid(zone), zone_idx(zone));
1358	lruvec = &mz->lruvec;
1359out:
1360	/*
1361	 * Since a node can be onlined after the mem_cgroup was created,
1362	 * we have to be prepared to initialize lruvec->zone here;
1363	 * and if offlined then reonlined, we need to reinitialize it.
1364	 */
1365	if (unlikely(lruvec->zone != zone))
1366		lruvec->zone = zone;
1367	return lruvec;
 
 
 
1368}
 
1369
1370/*
1371 * Following LRU functions are allowed to be used without PCG_LOCK.
1372 * Operations are called by routine of global LRU independently from memcg.
1373 * What we have to take care of here is validness of pc->mem_cgroup.
 
 
 
 
 
1374 *
1375 * Changes to pc->mem_cgroup happens when
1376 * 1. charge
1377 * 2. moving account
1378 * In typical case, "charge" is done before add-to-lru. Exception is SwapCache.
1379 * It is added to LRU before charge.
1380 * If PCG_USED bit is not set, page_cgroup is not added to this private LRU.
1381 * When moving account, the page is not on LRU. It's isolated.
1382 */
 
 
 
 
 
 
 
 
 
1383
1384/**
1385 * mem_cgroup_page_lruvec - return lruvec for adding an lru page
1386 * @page: the page
1387 * @zone: zone of the page
 
 
 
 
 
 
 
 
1388 */
1389struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone)
1390{
1391	struct mem_cgroup_per_zone *mz;
1392	struct mem_cgroup *memcg;
1393	struct page_cgroup *pc;
1394	struct lruvec *lruvec;
1395
1396	if (mem_cgroup_disabled()) {
1397		lruvec = &zone->lruvec;
1398		goto out;
1399	}
1400
1401	pc = lookup_page_cgroup(page);
1402	memcg = pc->mem_cgroup;
1403
1404	/*
1405	 * Surreptitiously switch any uncharged offlist page to root:
1406	 * an uncharged page off lru does nothing to secure
1407	 * its former mem_cgroup from sudden removal.
1408	 *
1409	 * Our caller holds lru_lock, and PageCgroupUsed is updated
1410	 * under page_cgroup lock: between them, they make all uses
1411	 * of pc->mem_cgroup safe.
1412	 */
1413	if (!PageLRU(page) && !PageCgroupUsed(pc) && memcg != root_mem_cgroup)
1414		pc->mem_cgroup = memcg = root_mem_cgroup;
 
 
 
 
 
 
 
 
 
 
1415
1416	mz = page_cgroup_zoneinfo(memcg, page);
1417	lruvec = &mz->lruvec;
1418out:
1419	/*
1420	 * Since a node can be onlined after the mem_cgroup was created,
1421	 * we have to be prepared to initialize lruvec->zone here;
1422	 * and if offlined then reonlined, we need to reinitialize it.
1423	 */
1424	if (unlikely(lruvec->zone != zone))
1425		lruvec->zone = zone;
1426	return lruvec;
1427}
1428
1429/**
1430 * mem_cgroup_update_lru_size - account for adding or removing an lru page
1431 * @lruvec: mem_cgroup per zone lru vector
1432 * @lru: index of lru list the page is sitting on
 
1433 * @nr_pages: positive when adding or negative when removing
1434 *
1435 * This function must be called when a page is added to or removed from an
1436 * lru list.
1437 */
1438void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
1439				int nr_pages)
1440{
1441	struct mem_cgroup_per_zone *mz;
1442	unsigned long *lru_size;
 
1443
1444	if (mem_cgroup_disabled())
1445		return;
1446
1447	mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
1448	lru_size = mz->lru_size + lru;
1449	*lru_size += nr_pages;
1450	VM_BUG_ON((long)(*lru_size) < 0);
1451}
1452
1453/*
1454 * Checks whether given mem is same or in the root_mem_cgroup's
1455 * hierarchy subtree
1456 */
1457bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
1458				  struct mem_cgroup *memcg)
1459{
1460	if (root_memcg == memcg)
1461		return true;
1462	if (!root_memcg->use_hierarchy || !memcg)
1463		return false;
1464	return cgroup_is_descendant(memcg->css.cgroup, root_memcg->css.cgroup);
1465}
1466
1467static bool mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
1468				       struct mem_cgroup *memcg)
1469{
1470	bool ret;
1471
1472	rcu_read_lock();
1473	ret = __mem_cgroup_same_or_subtree(root_memcg, memcg);
1474	rcu_read_unlock();
1475	return ret;
1476}
1477
1478bool task_in_mem_cgroup(struct task_struct *task,
1479			const struct mem_cgroup *memcg)
1480{
1481	struct mem_cgroup *curr = NULL;
1482	struct task_struct *p;
1483	bool ret;
1484
1485	p = find_lock_task_mm(task);
1486	if (p) {
1487		curr = get_mem_cgroup_from_mm(p->mm);
1488		task_unlock(p);
1489	} else {
1490		/*
1491		 * All threads may have already detached their mm's, but the oom
1492		 * killer still needs to detect if they have already been oom
1493		 * killed to prevent needlessly killing additional tasks.
1494		 */
1495		rcu_read_lock();
1496		curr = mem_cgroup_from_task(task);
1497		if (curr)
1498			css_get(&curr->css);
1499		rcu_read_unlock();
1500	}
1501	/*
1502	 * We should check use_hierarchy of "memcg" not "curr". Because checking
1503	 * use_hierarchy of "curr" here make this function true if hierarchy is
1504	 * enabled in "curr" and "curr" is a child of "memcg" in *cgroup*
1505	 * hierarchy(even if use_hierarchy is disabled in "memcg").
1506	 */
1507	ret = mem_cgroup_same_or_subtree(memcg, curr);
1508	css_put(&curr->css);
1509	return ret;
1510}
1511
1512int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec)
1513{
1514	unsigned long inactive_ratio;
1515	unsigned long inactive;
1516	unsigned long active;
1517	unsigned long gb;
1518
1519	inactive = mem_cgroup_get_lru_size(lruvec, LRU_INACTIVE_ANON);
1520	active = mem_cgroup_get_lru_size(lruvec, LRU_ACTIVE_ANON);
1521
1522	gb = (inactive + active) >> (30 - PAGE_SHIFT);
1523	if (gb)
1524		inactive_ratio = int_sqrt(10 * gb);
1525	else
1526		inactive_ratio = 1;
1527
1528	return inactive * inactive_ratio < active;
1529}
1530
1531#define mem_cgroup_from_res_counter(counter, member)	\
1532	container_of(counter, struct mem_cgroup, member)
1533
1534/**
1535 * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1536 * @memcg: the memory cgroup
1537 *
1538 * Returns the maximum amount of memory @mem can be charged with, in
1539 * pages.
1540 */
1541static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
1542{
1543	unsigned long long margin;
1544
1545	margin = res_counter_margin(&memcg->res);
1546	if (do_swap_account)
1547		margin = min(margin, res_counter_margin(&memcg->memsw));
1548	return margin >> PAGE_SHIFT;
1549}
1550
1551int mem_cgroup_swappiness(struct mem_cgroup *memcg)
1552{
1553	/* root ? */
1554	if (!css_parent(&memcg->css))
1555		return vm_swappiness;
1556
1557	return memcg->swappiness;
1558}
1559
1560/*
1561 * memcg->moving_account is used for checking possibility that some thread is
1562 * calling move_account(). When a thread on CPU-A starts moving pages under
1563 * a memcg, other threads should check memcg->moving_account under
1564 * rcu_read_lock(), like this:
1565 *
1566 *         CPU-A                                    CPU-B
1567 *                                              rcu_read_lock()
1568 *         memcg->moving_account+1              if (memcg->mocing_account)
1569 *                                                   take heavy locks.
1570 *         synchronize_rcu()                    update something.
1571 *                                              rcu_read_unlock()
1572 *         start move here.
1573 */
1574
1575/* for quick checking without looking up memcg */
1576atomic_t memcg_moving __read_mostly;
1577
1578static void mem_cgroup_start_move(struct mem_cgroup *memcg)
1579{
1580	atomic_inc(&memcg_moving);
1581	atomic_inc(&memcg->moving_account);
1582	synchronize_rcu();
1583}
1584
1585static void mem_cgroup_end_move(struct mem_cgroup *memcg)
1586{
1587	/*
1588	 * Now, mem_cgroup_clear_mc() may call this function with NULL.
1589	 * We check NULL in callee rather than caller.
1590	 */
1591	if (memcg) {
1592		atomic_dec(&memcg_moving);
1593		atomic_dec(&memcg->moving_account);
1594	}
 
 
1595}
1596
1597/*
1598 * 2 routines for checking "mem" is under move_account() or not.
1599 *
1600 * mem_cgroup_stolen() -  checking whether a cgroup is mc.from or not. This
1601 *			  is used for avoiding races in accounting.  If true,
1602 *			  pc->mem_cgroup may be overwritten.
1603 *
1604 * mem_cgroup_under_move() - checking a cgroup is mc.from or mc.to or
1605 *			  under hierarchy of moving cgroups. This is for
1606 *			  waiting at hith-memory prressure caused by "move".
1607 */
1608
1609static bool mem_cgroup_stolen(struct mem_cgroup *memcg)
1610{
1611	VM_BUG_ON(!rcu_read_lock_held());
1612	return atomic_read(&memcg->moving_account) > 0;
1613}
1614
1615static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
1616{
1617	struct mem_cgroup *from;
1618	struct mem_cgroup *to;
1619	bool ret = false;
1620	/*
1621	 * Unlike task_move routines, we access mc.to, mc.from not under
1622	 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
1623	 */
1624	spin_lock(&mc.lock);
1625	from = mc.from;
1626	to = mc.to;
1627	if (!from)
1628		goto unlock;
1629
1630	ret = mem_cgroup_same_or_subtree(memcg, from)
1631		|| mem_cgroup_same_or_subtree(memcg, to);
1632unlock:
1633	spin_unlock(&mc.lock);
1634	return ret;
1635}
1636
1637static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
1638{
1639	if (mc.moving_task && current != mc.moving_task) {
1640		if (mem_cgroup_under_move(memcg)) {
1641			DEFINE_WAIT(wait);
1642			prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1643			/* moving charge context might have finished. */
1644			if (mc.moving_task)
1645				schedule();
1646			finish_wait(&mc.waitq, &wait);
1647			return true;
1648		}
1649	}
1650	return false;
1651}
1652
1653/*
1654 * Take this lock when
1655 * - a code tries to modify page's memcg while it's USED.
1656 * - a code tries to modify page state accounting in a memcg.
1657 * see mem_cgroup_stolen(), too.
1658 */
1659static void move_lock_mem_cgroup(struct mem_cgroup *memcg,
1660				  unsigned long *flags)
1661{
1662	spin_lock_irqsave(&memcg->move_lock, *flags);
1663}
1664
1665static void move_unlock_mem_cgroup(struct mem_cgroup *memcg,
1666				unsigned long *flags)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1667{
1668	spin_unlock_irqrestore(&memcg->move_lock, *flags);
 
 
 
 
 
 
 
 
 
 
1669}
1670
1671#define K(x) ((x) << (PAGE_SHIFT-10))
1672/**
1673 * mem_cgroup_print_oom_info: Print OOM information relevant to memory controller.
1674 * @memcg: The memory cgroup that went over limit
1675 * @p: Task that is going to be killed
1676 *
1677 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1678 * enabled
1679 */
1680void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
1681{
1682	/* oom_info_lock ensures that parallel ooms do not interleave */
1683	static DEFINE_MUTEX(oom_info_lock);
1684	struct mem_cgroup *iter;
1685	unsigned int i;
1686
1687	if (!p)
1688		return;
1689
1690	mutex_lock(&oom_info_lock);
1691	rcu_read_lock();
1692
1693	pr_info("Task in ");
1694	pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
1695	pr_info(" killed as a result of limit of ");
1696	pr_cont_cgroup_path(memcg->css.cgroup);
1697	pr_info("\n");
1698
1699	rcu_read_unlock();
1700
1701	pr_info("memory: usage %llukB, limit %llukB, failcnt %llu\n",
1702		res_counter_read_u64(&memcg->res, RES_USAGE) >> 10,
1703		res_counter_read_u64(&memcg->res, RES_LIMIT) >> 10,
1704		res_counter_read_u64(&memcg->res, RES_FAILCNT));
1705	pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %llu\n",
1706		res_counter_read_u64(&memcg->memsw, RES_USAGE) >> 10,
1707		res_counter_read_u64(&memcg->memsw, RES_LIMIT) >> 10,
1708		res_counter_read_u64(&memcg->memsw, RES_FAILCNT));
1709	pr_info("kmem: usage %llukB, limit %llukB, failcnt %llu\n",
1710		res_counter_read_u64(&memcg->kmem, RES_USAGE) >> 10,
1711		res_counter_read_u64(&memcg->kmem, RES_LIMIT) >> 10,
1712		res_counter_read_u64(&memcg->kmem, RES_FAILCNT));
1713
1714	for_each_mem_cgroup_tree(iter, memcg) {
1715		pr_info("Memory cgroup stats for ");
1716		pr_cont_cgroup_path(iter->css.cgroup);
1717		pr_cont(":");
1718
1719		for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
1720			if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
1721				continue;
1722			pr_cont(" %s:%ldKB", mem_cgroup_stat_names[i],
1723				K(mem_cgroup_read_stat(iter, i)));
1724		}
1725
1726		for (i = 0; i < NR_LRU_LISTS; i++)
1727			pr_cont(" %s:%luKB", mem_cgroup_lru_names[i],
1728				K(mem_cgroup_nr_lru_pages(iter, BIT(i))));
1729
1730		pr_cont("\n");
1731	}
1732	mutex_unlock(&oom_info_lock);
1733}
1734
1735/*
1736 * This function returns the number of memcg under hierarchy tree. Returns
1737 * 1(self count) if no children.
1738 */
1739static int mem_cgroup_count_children(struct mem_cgroup *memcg)
1740{
1741	int num = 0;
1742	struct mem_cgroup *iter;
1743
1744	for_each_mem_cgroup_tree(iter, memcg)
1745		num++;
1746	return num;
1747}
1748
1749/*
1750 * Return the memory (and swap, if configured) limit for a memcg.
1751 */
1752static u64 mem_cgroup_get_limit(struct mem_cgroup *memcg)
1753{
1754	u64 limit;
1755
1756	limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
1757
1758	/*
1759	 * Do not consider swap space if we cannot swap due to swappiness
1760	 */
1761	if (mem_cgroup_swappiness(memcg)) {
1762		u64 memsw;
1763
1764		limit += total_swap_pages << PAGE_SHIFT;
1765		memsw = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
1766
1767		/*
1768		 * If memsw is finite and limits the amount of swap space
1769		 * available to this memcg, return that limit.
1770		 */
1771		limit = min(limit, memsw);
1772	}
1773
1774	return limit;
1775}
1776
1777static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
1778				     int order)
1779{
1780	struct mem_cgroup *iter;
1781	unsigned long chosen_points = 0;
1782	unsigned long totalpages;
1783	unsigned int points = 0;
1784	struct task_struct *chosen = NULL;
1785
1786	/*
1787	 * If current has a pending SIGKILL or is exiting, then automatically
1788	 * select it.  The goal is to allow it to allocate so that it may
1789	 * quickly exit and free its memory.
 
 
 
 
 
1790	 */
1791	if (fatal_signal_pending(current) || current->flags & PF_EXITING) {
1792		set_thread_flag(TIF_MEMDIE);
1793		return;
1794	}
1795
1796	check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, order, NULL);
1797	totalpages = mem_cgroup_get_limit(memcg) >> PAGE_SHIFT ? : 1;
1798	for_each_mem_cgroup_tree(iter, memcg) {
1799		struct css_task_iter it;
1800		struct task_struct *task;
1801
1802		css_task_iter_start(&iter->css, &it);
1803		while ((task = css_task_iter_next(&it))) {
1804			switch (oom_scan_process_thread(task, totalpages, NULL,
1805							false)) {
1806			case OOM_SCAN_SELECT:
1807				if (chosen)
1808					put_task_struct(chosen);
1809				chosen = task;
1810				chosen_points = ULONG_MAX;
1811				get_task_struct(chosen);
1812				/* fall through */
1813			case OOM_SCAN_CONTINUE:
1814				continue;
1815			case OOM_SCAN_ABORT:
1816				css_task_iter_end(&it);
1817				mem_cgroup_iter_break(memcg, iter);
1818				if (chosen)
1819					put_task_struct(chosen);
1820				return;
1821			case OOM_SCAN_OK:
1822				break;
1823			};
1824			points = oom_badness(task, memcg, NULL, totalpages);
1825			if (!points || points < chosen_points)
1826				continue;
1827			/* Prefer thread group leaders for display purposes */
1828			if (points == chosen_points &&
1829			    thread_group_leader(chosen))
1830				continue;
1831
1832			if (chosen)
1833				put_task_struct(chosen);
1834			chosen = task;
1835			chosen_points = points;
1836			get_task_struct(chosen);
1837		}
1838		css_task_iter_end(&it);
1839	}
1840
1841	if (!chosen)
1842		return;
1843	points = chosen_points * 1000 / totalpages;
1844	oom_kill_process(chosen, gfp_mask, order, points, totalpages, memcg,
1845			 NULL, "Memory cgroup out of memory");
1846}
1847
1848static unsigned long mem_cgroup_reclaim(struct mem_cgroup *memcg,
1849					gfp_t gfp_mask,
1850					unsigned long flags)
1851{
1852	unsigned long total = 0;
1853	bool noswap = false;
1854	int loop;
1855
1856	if (flags & MEM_CGROUP_RECLAIM_NOSWAP)
1857		noswap = true;
1858	if (!(flags & MEM_CGROUP_RECLAIM_SHRINK) && memcg->memsw_is_minimum)
1859		noswap = true;
1860
1861	for (loop = 0; loop < MEM_CGROUP_MAX_RECLAIM_LOOPS; loop++) {
1862		if (loop)
1863			drain_all_stock_async(memcg);
1864		total += try_to_free_mem_cgroup_pages(memcg, gfp_mask, noswap);
1865		/*
1866		 * Allow limit shrinkers, which are triggered directly
1867		 * by userspace, to catch signals and stop reclaim
1868		 * after minimal progress, regardless of the margin.
1869		 */
1870		if (total && (flags & MEM_CGROUP_RECLAIM_SHRINK))
1871			break;
1872		if (mem_cgroup_margin(memcg))
1873			break;
1874		/*
1875		 * If nothing was reclaimed after two attempts, there
1876		 * may be no reclaimable pages in this hierarchy.
1877		 */
1878		if (loop && !total)
1879			break;
1880	}
1881	return total;
 
 
1882}
1883
1884/**
1885 * test_mem_cgroup_node_reclaimable
1886 * @memcg: the target memcg
1887 * @nid: the node ID to be checked.
1888 * @noswap : specify true here if the user wants flle only information.
1889 *
1890 * This function returns whether the specified memcg contains any
1891 * reclaimable pages on a node. Returns true if there are any reclaimable
1892 * pages in the node.
1893 */
1894static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg,
1895		int nid, bool noswap)
1896{
1897	if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_FILE))
1898		return true;
1899	if (noswap || !total_swap_pages)
1900		return false;
1901	if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_ANON))
1902		return true;
1903	return false;
1904
 
 
 
 
 
 
 
1905}
1906#if MAX_NUMNODES > 1
1907
1908/*
1909 * Always updating the nodemask is not very good - even if we have an empty
1910 * list or the wrong list here, we can start from some node and traverse all
1911 * nodes based on the zonelist. So update the list loosely once per 10 secs.
 
1912 *
 
 
1913 */
1914static void mem_cgroup_may_update_nodemask(struct mem_cgroup *memcg)
1915{
1916	int nid;
1917	/*
1918	 * numainfo_events > 0 means there was at least NUMAINFO_EVENTS_TARGET
1919	 * pagein/pageout changes since the last update.
1920	 */
1921	if (!atomic_read(&memcg->numainfo_events))
1922		return;
1923	if (atomic_inc_return(&memcg->numainfo_updating) > 1)
1924		return;
1925
1926	/* make a nodemask where this memcg uses memory from */
1927	memcg->scan_nodes = node_states[N_MEMORY];
1928
1929	for_each_node_mask(nid, node_states[N_MEMORY]) {
1930
1931		if (!test_mem_cgroup_node_reclaimable(memcg, nid, false))
1932			node_clear(nid, memcg->scan_nodes);
 
 
 
 
 
 
1933	}
1934
1935	atomic_set(&memcg->numainfo_events, 0);
1936	atomic_set(&memcg->numainfo_updating, 0);
1937}
1938
1939/*
1940 * Selecting a node where we start reclaim from. Because what we need is just
1941 * reducing usage counter, start from anywhere is O,K. Considering
1942 * memory reclaim from current node, there are pros. and cons.
1943 *
1944 * Freeing memory from current node means freeing memory from a node which
1945 * we'll use or we've used. So, it may make LRU bad. And if several threads
1946 * hit limits, it will see a contention on a node. But freeing from remote
1947 * node means more costs for memory reclaim because of memory latency.
1948 *
1949 * Now, we use round-robin. Better algorithm is welcomed.
1950 */
1951int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
1952{
1953	int node;
1954
1955	mem_cgroup_may_update_nodemask(memcg);
1956	node = memcg->last_scanned_node;
1957
1958	node = next_node(node, memcg->scan_nodes);
1959	if (node == MAX_NUMNODES)
1960		node = first_node(memcg->scan_nodes);
1961	/*
1962	 * We call this when we hit limit, not when pages are added to LRU.
1963	 * No LRU may hold pages because all pages are UNEVICTABLE or
1964	 * memcg is too small and all pages are not on LRU. In that case,
1965	 * we use curret node.
1966	 */
1967	if (unlikely(node == MAX_NUMNODES))
1968		node = numa_node_id();
 
 
 
 
 
1969
1970	memcg->last_scanned_node = node;
1971	return node;
 
 
 
 
1972}
1973
1974/*
1975 * Check all nodes whether it contains reclaimable pages or not.
1976 * For quick scan, we make use of scan_nodes. This will allow us to skip
1977 * unused nodes. But scan_nodes is lazily updated and may not cotain
1978 * enough new information. We need to do double check.
1979 */
1980static bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
1981{
1982	int nid;
1983
1984	/*
1985	 * quick check...making use of scan_node.
1986	 * We can skip unused nodes.
1987	 */
1988	if (!nodes_empty(memcg->scan_nodes)) {
1989		for (nid = first_node(memcg->scan_nodes);
1990		     nid < MAX_NUMNODES;
1991		     nid = next_node(nid, memcg->scan_nodes)) {
1992
1993			if (test_mem_cgroup_node_reclaimable(memcg, nid, noswap))
1994				return true;
1995		}
 
 
 
 
1996	}
1997	/*
1998	 * Check rest of nodes.
1999	 */
2000	for_each_node_state(nid, N_MEMORY) {
2001		if (node_isset(nid, memcg->scan_nodes))
2002			continue;
2003		if (test_mem_cgroup_node_reclaimable(memcg, nid, noswap))
2004			return true;
2005	}
2006	return false;
2007}
2008
2009#else
2010int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
2011{
2012	return 0;
2013}
2014
2015static bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
 
2016{
2017	return test_mem_cgroup_node_reclaimable(memcg, 0, noswap);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2018}
2019#endif
2020
2021static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
2022				   struct zone *zone,
2023				   gfp_t gfp_mask,
2024				   unsigned long *total_scanned)
2025{
2026	struct mem_cgroup *victim = NULL;
2027	int total = 0;
2028	int loop = 0;
2029	unsigned long excess;
2030	unsigned long nr_scanned;
2031	struct mem_cgroup_reclaim_cookie reclaim = {
2032		.zone = zone,
2033		.priority = 0,
2034	};
2035
2036	excess = res_counter_soft_limit_excess(&root_memcg->res) >> PAGE_SHIFT;
2037
2038	while (1) {
2039		victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
2040		if (!victim) {
2041			loop++;
2042			if (loop >= 2) {
2043				/*
2044				 * If we have not been able to reclaim
2045				 * anything, it might because there are
2046				 * no reclaimable pages under this hierarchy
2047				 */
2048				if (!total)
2049					break;
2050				/*
2051				 * We want to do more targeted reclaim.
2052				 * excess >> 2 is not to excessive so as to
2053				 * reclaim too much, nor too less that we keep
2054				 * coming back to reclaim from this cgroup
2055				 */
2056				if (total >= (excess >> 2) ||
2057					(loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
2058					break;
2059			}
2060			continue;
2061		}
2062		if (!mem_cgroup_reclaimable(victim, false))
2063			continue;
2064		total += mem_cgroup_shrink_node_zone(victim, gfp_mask, false,
2065						     zone, &nr_scanned);
2066		*total_scanned += nr_scanned;
2067		if (!res_counter_soft_limit_excess(&root_memcg->res))
2068			break;
2069	}
2070	mem_cgroup_iter_break(root_memcg, victim);
2071	return total;
2072}
2073
2074#ifdef CONFIG_LOCKDEP
2075static struct lockdep_map memcg_oom_lock_dep_map = {
2076	.name = "memcg_oom_lock",
2077};
2078#endif
2079
2080static DEFINE_SPINLOCK(memcg_oom_lock);
2081
2082/*
2083 * Check OOM-Killer is already running under our hierarchy.
2084 * If someone is running, return false.
2085 */
2086static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
2087{
2088	struct mem_cgroup *iter, *failed = NULL;
2089
2090	spin_lock(&memcg_oom_lock);
2091
2092	for_each_mem_cgroup_tree(iter, memcg) {
2093		if (iter->oom_lock) {
2094			/*
2095			 * this subtree of our hierarchy is already locked
2096			 * so we cannot give a lock.
2097			 */
2098			failed = iter;
2099			mem_cgroup_iter_break(memcg, iter);
2100			break;
2101		} else
2102			iter->oom_lock = true;
2103	}
2104
2105	if (failed) {
2106		/*
2107		 * OK, we failed to lock the whole subtree so we have
2108		 * to clean up what we set up to the failing subtree
2109		 */
2110		for_each_mem_cgroup_tree(iter, memcg) {
2111			if (iter == failed) {
2112				mem_cgroup_iter_break(memcg, iter);
2113				break;
2114			}
2115			iter->oom_lock = false;
2116		}
2117	} else
2118		mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_);
2119
2120	spin_unlock(&memcg_oom_lock);
2121
2122	return !failed;
2123}
2124
2125static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
2126{
2127	struct mem_cgroup *iter;
2128
2129	spin_lock(&memcg_oom_lock);
2130	mutex_release(&memcg_oom_lock_dep_map, 1, _RET_IP_);
2131	for_each_mem_cgroup_tree(iter, memcg)
2132		iter->oom_lock = false;
2133	spin_unlock(&memcg_oom_lock);
2134}
2135
2136static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
2137{
2138	struct mem_cgroup *iter;
2139
 
2140	for_each_mem_cgroup_tree(iter, memcg)
2141		atomic_inc(&iter->under_oom);
 
2142}
2143
2144static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
2145{
2146	struct mem_cgroup *iter;
2147
2148	/*
2149	 * When a new child is created while the hierarchy is under oom,
2150	 * mem_cgroup_oom_lock() may not be called. We have to use
2151	 * atomic_add_unless() here.
2152	 */
 
2153	for_each_mem_cgroup_tree(iter, memcg)
2154		atomic_add_unless(&iter->under_oom, -1, 0);
 
 
2155}
2156
2157static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
2158
2159struct oom_wait_info {
2160	struct mem_cgroup *memcg;
2161	wait_queue_t	wait;
2162};
2163
2164static int memcg_oom_wake_function(wait_queue_t *wait,
2165	unsigned mode, int sync, void *arg)
2166{
2167	struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
2168	struct mem_cgroup *oom_wait_memcg;
2169	struct oom_wait_info *oom_wait_info;
2170
2171	oom_wait_info = container_of(wait, struct oom_wait_info, wait);
2172	oom_wait_memcg = oom_wait_info->memcg;
2173
2174	/*
2175	 * Both of oom_wait_info->memcg and wake_memcg are stable under us.
2176	 * Then we can use css_is_ancestor without taking care of RCU.
2177	 */
2178	if (!mem_cgroup_same_or_subtree(oom_wait_memcg, wake_memcg)
2179		&& !mem_cgroup_same_or_subtree(wake_memcg, oom_wait_memcg))
2180		return 0;
2181	return autoremove_wake_function(wait, mode, sync, arg);
2182}
2183
2184static void memcg_wakeup_oom(struct mem_cgroup *memcg)
2185{
2186	atomic_inc(&memcg->oom_wakeups);
2187	/* for filtering, pass "memcg" as argument. */
2188	__wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
2189}
2190
2191static void memcg_oom_recover(struct mem_cgroup *memcg)
2192{
2193	if (memcg && atomic_read(&memcg->under_oom))
2194		memcg_wakeup_oom(memcg);
 
 
 
 
 
 
 
 
2195}
2196
2197static void mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
 
 
 
 
2198{
2199	if (!current->memcg_oom.may_oom)
2200		return;
 
 
 
 
 
2201	/*
2202	 * We are in the middle of the charge context here, so we
2203	 * don't want to block when potentially sitting on a callstack
2204	 * that holds all kinds of filesystem and mm locks.
2205	 *
2206	 * Also, the caller may handle a failed allocation gracefully
2207	 * (like optional page cache readahead) and so an OOM killer
2208	 * invocation might not even be necessary.
 
2209	 *
2210	 * That's why we don't do anything here except remember the
2211	 * OOM context and then deal with it at the end of the page
2212	 * fault when the stack is unwound, the locks are released,
2213	 * and when we know whether the fault was overall successful.
 
 
 
2214	 */
2215	css_get(&memcg->css);
2216	current->memcg_oom.memcg = memcg;
2217	current->memcg_oom.gfp_mask = mask;
2218	current->memcg_oom.order = order;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2219}
2220
2221/**
2222 * mem_cgroup_oom_synchronize - complete memcg OOM handling
2223 * @handle: actually kill/wait or just clean up the OOM state
2224 *
2225 * This has to be called at the end of a page fault if the memcg OOM
2226 * handler was enabled.
2227 *
2228 * Memcg supports userspace OOM handling where failed allocations must
2229 * sleep on a waitqueue until the userspace task resolves the
2230 * situation.  Sleeping directly in the charge context with all kinds
2231 * of locks held is not a good idea, instead we remember an OOM state
2232 * in the task and mem_cgroup_oom_synchronize() has to be called at
2233 * the end of the page fault to complete the OOM handling.
2234 *
2235 * Returns %true if an ongoing memcg OOM situation was detected and
2236 * completed, %false otherwise.
2237 */
2238bool mem_cgroup_oom_synchronize(bool handle)
2239{
2240	struct mem_cgroup *memcg = current->memcg_oom.memcg;
2241	struct oom_wait_info owait;
2242	bool locked;
2243
2244	/* OOM is global, do not handle */
2245	if (!memcg)
2246		return false;
2247
2248	if (!handle)
2249		goto cleanup;
2250
2251	owait.memcg = memcg;
2252	owait.wait.flags = 0;
2253	owait.wait.func = memcg_oom_wake_function;
2254	owait.wait.private = current;
2255	INIT_LIST_HEAD(&owait.wait.task_list);
2256
2257	prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
2258	mem_cgroup_mark_under_oom(memcg);
2259
2260	locked = mem_cgroup_oom_trylock(memcg);
2261
2262	if (locked)
2263		mem_cgroup_oom_notify(memcg);
2264
2265	if (locked && !memcg->oom_kill_disable) {
2266		mem_cgroup_unmark_under_oom(memcg);
2267		finish_wait(&memcg_oom_waitq, &owait.wait);
2268		mem_cgroup_out_of_memory(memcg, current->memcg_oom.gfp_mask,
2269					 current->memcg_oom.order);
2270	} else {
2271		schedule();
2272		mem_cgroup_unmark_under_oom(memcg);
2273		finish_wait(&memcg_oom_waitq, &owait.wait);
2274	}
2275
2276	if (locked) {
2277		mem_cgroup_oom_unlock(memcg);
2278		/*
2279		 * There is no guarantee that an OOM-lock contender
2280		 * sees the wakeups triggered by the OOM kill
2281		 * uncharges.  Wake any sleepers explicitely.
2282		 */
2283		memcg_oom_recover(memcg);
2284	}
2285cleanup:
2286	current->memcg_oom.memcg = NULL;
2287	css_put(&memcg->css);
2288	return true;
2289}
2290
2291/*
2292 * Currently used to update mapped file statistics, but the routine can be
2293 * generalized to update other statistics as well.
2294 *
2295 * Notes: Race condition
2296 *
2297 * We usually use page_cgroup_lock() for accessing page_cgroup member but
2298 * it tends to be costly. But considering some conditions, we doesn't need
2299 * to do so _always_.
2300 *
2301 * Considering "charge", lock_page_cgroup() is not required because all
2302 * file-stat operations happen after a page is attached to radix-tree. There
2303 * are no race with "charge".
2304 *
2305 * Considering "uncharge", we know that memcg doesn't clear pc->mem_cgroup
2306 * at "uncharge" intentionally. So, we always see valid pc->mem_cgroup even
2307 * if there are race with "uncharge". Statistics itself is properly handled
2308 * by flags.
2309 *
2310 * Considering "move", this is an only case we see a race. To make the race
2311 * small, we check mm->moving_account and detect there are possibility of race
2312 * If there is, we take a lock.
2313 */
2314
2315void __mem_cgroup_begin_update_page_stat(struct page *page,
2316				bool *locked, unsigned long *flags)
2317{
 
2318	struct mem_cgroup *memcg;
2319	struct page_cgroup *pc;
2320
2321	pc = lookup_page_cgroup(page);
2322again:
2323	memcg = pc->mem_cgroup;
2324	if (unlikely(!memcg || !PageCgroupUsed(pc)))
2325		return;
 
 
 
 
 
 
 
2326	/*
2327	 * If this memory cgroup is not under account moving, we don't
2328	 * need to take move_lock_mem_cgroup(). Because we already hold
2329	 * rcu_read_lock(), any calls to move_account will be delayed until
2330	 * rcu_read_unlock() if mem_cgroup_stolen() == true.
2331	 */
2332	if (!mem_cgroup_stolen(memcg))
2333		return;
2334
2335	move_lock_mem_cgroup(memcg, flags);
2336	if (memcg != pc->mem_cgroup || !PageCgroupUsed(pc)) {
2337		move_unlock_mem_cgroup(memcg, flags);
2338		goto again;
 
 
 
 
 
 
 
2339	}
2340	*locked = true;
 
 
 
 
 
 
2341}
2342
2343void __mem_cgroup_end_update_page_stat(struct page *page, unsigned long *flags)
2344{
2345	struct page_cgroup *pc = lookup_page_cgroup(page);
2346
2347	/*
2348	 * It's guaranteed that pc->mem_cgroup never changes while
2349	 * lock is held because a routine modifies pc->mem_cgroup
2350	 * should take move_lock_mem_cgroup().
2351	 */
2352	move_unlock_mem_cgroup(pc->mem_cgroup, flags);
2353}
2354
2355void mem_cgroup_update_page_stat(struct page *page,
2356				 enum mem_cgroup_stat_index idx, int val)
 
 
 
 
 
 
 
 
 
2357{
2358	struct mem_cgroup *memcg;
2359	struct page_cgroup *pc = lookup_page_cgroup(page);
2360	unsigned long uninitialized_var(flags);
 
 
 
 
 
 
2361
2362	if (mem_cgroup_disabled())
2363		return;
 
 
 
 
 
 
 
 
 
 
2364
2365	VM_BUG_ON(!rcu_read_lock_held());
2366	memcg = pc->mem_cgroup;
2367	if (unlikely(!memcg || !PageCgroupUsed(pc)))
2368		return;
2369
2370	this_cpu_add(memcg->stat->count[idx], val);
 
 
 
 
 
 
 
 
 
 
 
 
 
2371}
2372
2373/*
2374 * size of first charge trial. "32" comes from vmscan.c's magic value.
2375 * TODO: maybe necessary to use big numbers in big irons.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2376 */
2377#define CHARGE_BATCH	32U
 
 
 
 
2378struct memcg_stock_pcp {
 
2379	struct mem_cgroup *cached; /* this never be root cgroup */
2380	unsigned int nr_pages;
 
 
 
 
 
 
 
 
 
2381	struct work_struct work;
2382	unsigned long flags;
2383#define FLUSHING_CACHED_CHARGE	0
2384};
2385static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
 
 
2386static DEFINE_MUTEX(percpu_charge_mutex);
2387
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2388/**
2389 * consume_stock: Try to consume stocked charge on this cpu.
2390 * @memcg: memcg to consume from.
2391 * @nr_pages: how many pages to charge.
2392 *
2393 * The charges will only happen if @memcg matches the current cpu's memcg
2394 * stock, and at least @nr_pages are available in that stock.  Failure to
2395 * service an allocation will refill the stock.
2396 *
2397 * returns true if successful, false otherwise.
2398 */
2399static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2400{
2401	struct memcg_stock_pcp *stock;
2402	bool ret = true;
 
2403
2404	if (nr_pages > CHARGE_BATCH)
2405		return false;
 
 
2406
2407	stock = &get_cpu_var(memcg_stock);
2408	if (memcg == stock->cached && stock->nr_pages >= nr_pages)
2409		stock->nr_pages -= nr_pages;
2410	else /* need to call res_counter_charge */
2411		ret = false;
2412	put_cpu_var(memcg_stock);
 
 
2413	return ret;
2414}
2415
2416/*
2417 * Returns stocks cached in percpu to res_counter and reset cached information.
2418 */
2419static void drain_stock(struct memcg_stock_pcp *stock)
2420{
2421	struct mem_cgroup *old = stock->cached;
2422
2423	if (stock->nr_pages) {
2424		unsigned long bytes = stock->nr_pages * PAGE_SIZE;
2425
2426		res_counter_uncharge(&old->res, bytes);
2427		if (do_swap_account)
2428			res_counter_uncharge(&old->memsw, bytes);
 
2429		stock->nr_pages = 0;
2430	}
2431	stock->cached = NULL;
 
 
2432}
2433
2434/*
2435 * This must be called under preempt disabled or must be called by
2436 * a thread which is pinned to local cpu.
2437 */
2438static void drain_local_stock(struct work_struct *dummy)
2439{
2440	struct memcg_stock_pcp *stock = &__get_cpu_var(memcg_stock);
 
 
 
 
 
 
 
 
 
 
 
 
2441	drain_stock(stock);
2442	clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
2443}
2444
2445static void __init memcg_stock_init(void)
2446{
2447	int cpu;
2448
2449	for_each_possible_cpu(cpu) {
2450		struct memcg_stock_pcp *stock =
2451					&per_cpu(memcg_stock, cpu);
2452		INIT_WORK(&stock->work, drain_local_stock);
2453	}
2454}
2455
2456/*
2457 * Cache charges(val) which is from res_counter, to local per_cpu area.
2458 * This will be consumed by consume_stock() function, later.
2459 */
2460static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2461{
2462	struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock);
2463
2464	if (stock->cached != memcg) { /* reset if necessary */
 
2465		drain_stock(stock);
2466		stock->cached = memcg;
 
2467	}
2468	stock->nr_pages += nr_pages;
2469	put_cpu_var(memcg_stock);
 
 
 
 
 
 
 
 
 
 
 
2470}
2471
2472/*
2473 * Drains all per-CPU charge caches for given root_memcg resp. subtree
2474 * of the hierarchy under it. sync flag says whether we should block
2475 * until the work is done.
2476 */
2477static void drain_all_stock(struct mem_cgroup *root_memcg, bool sync)
2478{
2479	int cpu, curcpu;
2480
2481	/* Notify other cpus that system-wide "drain" is running */
2482	get_online_cpus();
2483	curcpu = get_cpu();
 
 
 
 
 
 
 
 
2484	for_each_online_cpu(cpu) {
2485		struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
2486		struct mem_cgroup *memcg;
 
2487
2488		memcg = stock->cached;
2489		if (!memcg || !stock->nr_pages)
2490			continue;
2491		if (!mem_cgroup_same_or_subtree(root_memcg, memcg))
2492			continue;
2493		if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
 
 
 
 
 
2494			if (cpu == curcpu)
2495				drain_local_stock(&stock->work);
2496			else
2497				schedule_work_on(cpu, &stock->work);
2498		}
2499	}
2500	put_cpu();
2501
2502	if (!sync)
2503		goto out;
2504
2505	for_each_online_cpu(cpu) {
2506		struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
2507		if (test_bit(FLUSHING_CACHED_CHARGE, &stock->flags))
2508			flush_work(&stock->work);
2509	}
2510out:
2511	put_online_cpus();
2512}
2513
2514/*
2515 * Tries to drain stocked charges in other cpus. This function is asynchronous
2516 * and just put a work per cpu for draining localy on each cpu. Caller can
2517 * expects some charges will be back to res_counter later but cannot wait for
2518 * it.
2519 */
2520static void drain_all_stock_async(struct mem_cgroup *root_memcg)
2521{
2522	/*
2523	 * If someone calls draining, avoid adding more kworker runs.
2524	 */
2525	if (!mutex_trylock(&percpu_charge_mutex))
2526		return;
2527	drain_all_stock(root_memcg, false);
2528	mutex_unlock(&percpu_charge_mutex);
2529}
2530
2531/* This is a synchronous drain interface. */
2532static void drain_all_stock_sync(struct mem_cgroup *root_memcg)
2533{
2534	/* called when force_empty is called */
2535	mutex_lock(&percpu_charge_mutex);
2536	drain_all_stock(root_memcg, true);
2537	mutex_unlock(&percpu_charge_mutex);
2538}
2539
2540/*
2541 * This function drains percpu counter value from DEAD cpu and
2542 * move it to local cpu. Note that this function can be preempted.
2543 */
2544static void mem_cgroup_drain_pcp_counter(struct mem_cgroup *memcg, int cpu)
2545{
2546	int i;
2547
2548	spin_lock(&memcg->pcp_counter_lock);
2549	for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
2550		long x = per_cpu(memcg->stat->count[i], cpu);
2551
2552		per_cpu(memcg->stat->count[i], cpu) = 0;
2553		memcg->nocpu_base.count[i] += x;
2554	}
2555	for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
2556		unsigned long x = per_cpu(memcg->stat->events[i], cpu);
2557
2558		per_cpu(memcg->stat->events[i], cpu) = 0;
2559		memcg->nocpu_base.events[i] += x;
2560	}
2561	spin_unlock(&memcg->pcp_counter_lock);
2562}
2563
2564static int memcg_cpu_hotplug_callback(struct notifier_block *nb,
2565					unsigned long action,
2566					void *hcpu)
2567{
2568	int cpu = (unsigned long)hcpu;
2569	struct memcg_stock_pcp *stock;
2570	struct mem_cgroup *iter;
2571
2572	if (action == CPU_ONLINE)
2573		return NOTIFY_OK;
2574
2575	if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
2576		return NOTIFY_OK;
 
2577
2578	for_each_mem_cgroup(iter)
2579		mem_cgroup_drain_pcp_counter(iter, cpu);
2580
2581	stock = &per_cpu(memcg_stock, cpu);
2582	drain_stock(stock);
2583	return NOTIFY_OK;
2584}
2585
 
 
 
 
 
 
 
2586
2587/* See mem_cgroup_try_charge() for details */
2588enum {
2589	CHARGE_OK,		/* success */
2590	CHARGE_RETRY,		/* need to retry but retry is not bad */
2591	CHARGE_NOMEM,		/* we can't do more. return -ENOMEM */
2592	CHARGE_WOULDBLOCK,	/* GFP_WAIT wasn't set and no enough res. */
2593};
2594
2595static int mem_cgroup_do_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
2596				unsigned int nr_pages, unsigned int min_pages,
2597				bool invoke_oom)
2598{
2599	unsigned long csize = nr_pages * PAGE_SIZE;
2600	struct mem_cgroup *mem_over_limit;
2601	struct res_counter *fail_res;
2602	unsigned long flags = 0;
2603	int ret;
2604
2605	ret = res_counter_charge(&memcg->res, csize, &fail_res);
2606
2607	if (likely(!ret)) {
2608		if (!do_swap_account)
2609			return CHARGE_OK;
2610		ret = res_counter_charge(&memcg->memsw, csize, &fail_res);
2611		if (likely(!ret))
2612			return CHARGE_OK;
2613
2614		res_counter_uncharge(&memcg->res, csize);
2615		mem_over_limit = mem_cgroup_from_res_counter(fail_res, memsw);
2616		flags |= MEM_CGROUP_RECLAIM_NOSWAP;
2617	} else
2618		mem_over_limit = mem_cgroup_from_res_counter(fail_res, res);
2619	/*
2620	 * Never reclaim on behalf of optional batching, retry with a
2621	 * single page instead.
2622	 */
2623	if (nr_pages > min_pages)
2624		return CHARGE_RETRY;
2625
2626	if (!(gfp_mask & __GFP_WAIT))
2627		return CHARGE_WOULDBLOCK;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2628
2629	if (gfp_mask & __GFP_NORETRY)
2630		return CHARGE_NOMEM;
 
2631
2632	ret = mem_cgroup_reclaim(mem_over_limit, gfp_mask, flags);
2633	if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
2634		return CHARGE_RETRY;
2635	/*
2636	 * Even though the limit is exceeded at this point, reclaim
2637	 * may have been able to free some pages.  Retry the charge
2638	 * before killing the task.
2639	 *
2640	 * Only for regular pages, though: huge pages are rather
2641	 * unlikely to succeed so close to the limit, and we fall back
2642	 * to regular pages anyway in case of failure.
2643	 */
2644	if (nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER) && ret)
2645		return CHARGE_RETRY;
2646
2647	/*
2648	 * At task move, charge accounts can be doubly counted. So, it's
2649	 * better to wait until the end of task_move if something is going on.
2650	 */
2651	if (mem_cgroup_wait_acct_move(mem_over_limit))
2652		return CHARGE_RETRY;
2653
2654	if (invoke_oom)
2655		mem_cgroup_oom(mem_over_limit, gfp_mask, get_order(csize));
2656
2657	return CHARGE_NOMEM;
 
 
2658}
2659
2660/**
2661 * mem_cgroup_try_charge - try charging a memcg
2662 * @memcg: memcg to charge
2663 * @nr_pages: number of pages to charge
2664 * @oom: trigger OOM if reclaim fails
2665 *
2666 * Returns 0 if @memcg was charged successfully, -EINTR if the charge
2667 * was bypassed to root_mem_cgroup, and -ENOMEM if the charge failed.
2668 */
2669static int mem_cgroup_try_charge(struct mem_cgroup *memcg,
2670				 gfp_t gfp_mask,
2671				 unsigned int nr_pages,
2672				 bool oom)
2673{
2674	unsigned int batch = max(CHARGE_BATCH, nr_pages);
2675	int nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES;
2676	int ret;
2677
2678	if (mem_cgroup_is_root(memcg))
2679		goto done;
2680	/*
2681	 * Unlike in global OOM situations, memcg is not in a physical
2682	 * memory shortage.  Allow dying and OOM-killed tasks to
2683	 * bypass the last charges so that they can exit quickly and
2684	 * free their memory.
2685	 */
2686	if (unlikely(test_thread_flag(TIF_MEMDIE) ||
2687		     fatal_signal_pending(current)))
2688		goto bypass;
2689
2690	if (unlikely(task_in_memcg_oom(current)))
2691		goto nomem;
2692
2693	if (gfp_mask & __GFP_NOFAIL)
2694		oom = false;
2695again:
2696	if (consume_stock(memcg, nr_pages))
2697		goto done;
2698
2699	do {
2700		bool invoke_oom = oom && !nr_oom_retries;
 
 
 
 
2701
2702		/* If killed, bypass charge */
2703		if (fatal_signal_pending(current))
2704			goto bypass;
2705
2706		ret = mem_cgroup_do_charge(memcg, gfp_mask, batch,
2707					   nr_pages, invoke_oom);
2708		switch (ret) {
2709		case CHARGE_OK:
2710			break;
2711		case CHARGE_RETRY: /* not in OOM situation but retry */
2712			batch = nr_pages;
2713			goto again;
2714		case CHARGE_WOULDBLOCK: /* !__GFP_WAIT */
2715			goto nomem;
2716		case CHARGE_NOMEM: /* OOM routine works */
2717			if (!oom || invoke_oom)
2718				goto nomem;
2719			nr_oom_retries--;
2720			break;
2721		}
2722	} while (ret != CHARGE_OK);
2723
2724	if (batch > nr_pages)
2725		refill_stock(memcg, batch - nr_pages);
2726done:
2727	return 0;
2728nomem:
2729	if (!(gfp_mask & __GFP_NOFAIL))
2730		return -ENOMEM;
2731bypass:
2732	return -EINTR;
2733}
2734
2735/**
2736 * mem_cgroup_try_charge_mm - try charging a mm
2737 * @mm: mm_struct to charge
2738 * @nr_pages: number of pages to charge
2739 * @oom: trigger OOM if reclaim fails
2740 *
2741 * Returns the charged mem_cgroup associated with the given mm_struct or
2742 * NULL the charge failed.
2743 */
2744static struct mem_cgroup *mem_cgroup_try_charge_mm(struct mm_struct *mm,
2745				 gfp_t gfp_mask,
2746				 unsigned int nr_pages,
2747				 bool oom)
2748
2749{
2750	struct mem_cgroup *memcg;
2751	int ret;
2752
2753	memcg = get_mem_cgroup_from_mm(mm);
2754	ret = mem_cgroup_try_charge(memcg, gfp_mask, nr_pages, oom);
2755	css_put(&memcg->css);
2756	if (ret == -EINTR)
2757		memcg = root_mem_cgroup;
2758	else if (ret)
2759		memcg = NULL;
 
2760
2761	return memcg;
2762}
2763
2764/*
2765 * Somemtimes we have to undo a charge we got by try_charge().
2766 * This function is for that and do uncharge, put css's refcnt.
2767 * gotten by try_charge().
2768 */
2769static void __mem_cgroup_cancel_charge(struct mem_cgroup *memcg,
2770				       unsigned int nr_pages)
 
2771{
2772	if (!mem_cgroup_is_root(memcg)) {
2773		unsigned long bytes = nr_pages * PAGE_SIZE;
2774
2775		res_counter_uncharge(&memcg->res, bytes);
2776		if (do_swap_account)
2777			res_counter_uncharge(&memcg->memsw, bytes);
2778	}
2779}
2780
2781/*
2782 * Cancel chrages in this cgroup....doesn't propagate to parent cgroup.
2783 * This is useful when moving usage to parent cgroup.
2784 */
2785static void __mem_cgroup_cancel_local_charge(struct mem_cgroup *memcg,
2786					unsigned int nr_pages)
2787{
2788	unsigned long bytes = nr_pages * PAGE_SIZE;
2789
2790	if (mem_cgroup_is_root(memcg))
2791		return;
 
 
 
 
 
 
 
 
 
2792
2793	res_counter_uncharge_until(&memcg->res, memcg->res.parent, bytes);
2794	if (do_swap_account)
2795		res_counter_uncharge_until(&memcg->memsw,
2796						memcg->memsw.parent, bytes);
 
 
 
 
 
2797}
2798
2799/*
2800 * A helper function to get mem_cgroup from ID. must be called under
2801 * rcu_read_lock().  The caller is responsible for calling css_tryget if
2802 * the mem_cgroup is used for charging. (dropping refcnt from swap can be
2803 * called against removed memcg.)
2804 */
2805static struct mem_cgroup *mem_cgroup_lookup(unsigned short id)
2806{
2807	/* ID 0 is unused ID */
2808	if (!id)
2809		return NULL;
2810	return mem_cgroup_from_id(id);
2811}
2812
2813struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
2814{
2815	struct mem_cgroup *memcg = NULL;
2816	struct page_cgroup *pc;
2817	unsigned short id;
2818	swp_entry_t ent;
2819
2820	VM_BUG_ON_PAGE(!PageLocked(page), page);
2821
2822	pc = lookup_page_cgroup(page);
2823	lock_page_cgroup(pc);
2824	if (PageCgroupUsed(pc)) {
2825		memcg = pc->mem_cgroup;
2826		if (memcg && !css_tryget(&memcg->css))
2827			memcg = NULL;
2828	} else if (PageSwapCache(page)) {
2829		ent.val = page_private(page);
2830		id = lookup_swap_cgroup_id(ent);
2831		rcu_read_lock();
2832		memcg = mem_cgroup_lookup(id);
2833		if (memcg && !css_tryget(&memcg->css))
2834			memcg = NULL;
2835		rcu_read_unlock();
2836	}
2837	unlock_page_cgroup(pc);
2838	return memcg;
2839}
2840
2841static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
2842				       struct page *page,
2843				       unsigned int nr_pages,
2844				       enum charge_type ctype,
2845				       bool lrucare)
2846{
2847	struct page_cgroup *pc = lookup_page_cgroup(page);
2848	struct zone *uninitialized_var(zone);
2849	struct lruvec *lruvec;
2850	bool was_on_lru = false;
2851	bool anon;
2852
2853	lock_page_cgroup(pc);
2854	VM_BUG_ON_PAGE(PageCgroupUsed(pc), page);
2855	/*
2856	 * we don't need page_cgroup_lock about tail pages, becase they are not
2857	 * accessed by any other context at this point.
 
 
 
 
2858	 */
 
 
2859
2860	/*
2861	 * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page
2862	 * may already be on some other mem_cgroup's LRU.  Take care of it.
 
 
 
 
 
2863	 */
2864	if (lrucare) {
2865		zone = page_zone(page);
2866		spin_lock_irq(&zone->lru_lock);
2867		if (PageLRU(page)) {
2868			lruvec = mem_cgroup_zone_lruvec(zone, pc->mem_cgroup);
2869			ClearPageLRU(page);
2870			del_page_from_lru_list(page, lruvec, page_lru(page));
2871			was_on_lru = true;
2872		}
2873	}
2874
2875	pc->mem_cgroup = memcg;
2876	/*
2877	 * We access a page_cgroup asynchronously without lock_page_cgroup().
2878	 * Especially when a page_cgroup is taken from a page, pc->mem_cgroup
2879	 * is accessed after testing USED bit. To make pc->mem_cgroup visible
2880	 * before USED bit, we need memory barrier here.
2881	 * See mem_cgroup_add_lru_list(), etc.
2882	 */
2883	smp_wmb();
2884	SetPageCgroupUsed(pc);
2885
2886	if (lrucare) {
2887		if (was_on_lru) {
2888			lruvec = mem_cgroup_zone_lruvec(zone, pc->mem_cgroup);
2889			VM_BUG_ON_PAGE(PageLRU(page), page);
2890			SetPageLRU(page);
2891			add_page_to_lru_list(page, lruvec, page_lru(page));
2892		}
2893		spin_unlock_irq(&zone->lru_lock);
2894	}
2895
2896	if (ctype == MEM_CGROUP_CHARGE_TYPE_ANON)
2897		anon = true;
2898	else
2899		anon = false;
2900
2901	mem_cgroup_charge_statistics(memcg, page, anon, nr_pages);
2902	unlock_page_cgroup(pc);
2903
2904	/*
2905	 * "charge_statistics" updated event counter. Then, check it.
2906	 * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
2907	 * if they exceeds softlimit.
2908	 */
2909	memcg_check_events(memcg, page);
2910}
2911
2912static DEFINE_MUTEX(set_limit_mutex);
2913
2914#ifdef CONFIG_MEMCG_KMEM
2915static DEFINE_MUTEX(activate_kmem_mutex);
2916
2917static inline bool memcg_can_account_kmem(struct mem_cgroup *memcg)
2918{
2919	return !mem_cgroup_disabled() && !mem_cgroup_is_root(memcg) &&
2920		memcg_kmem_is_active(memcg);
2921}
2922
2923/*
2924 * This is a bit cumbersome, but it is rarely used and avoids a backpointer
2925 * in the memcg_cache_params struct.
2926 */
2927static struct kmem_cache *memcg_params_to_cache(struct memcg_cache_params *p)
2928{
2929	struct kmem_cache *cachep;
2930
2931	VM_BUG_ON(p->is_root_cache);
2932	cachep = p->root_cache;
2933	return cache_from_memcg_idx(cachep, memcg_cache_id(p->memcg));
2934}
2935
2936#ifdef CONFIG_SLABINFO
2937static int mem_cgroup_slabinfo_read(struct seq_file *m, void *v)
2938{
2939	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
2940	struct memcg_cache_params *params;
2941
2942	if (!memcg_can_account_kmem(memcg))
2943		return -EIO;
2944
2945	print_slabinfo_header(m);
2946
2947	mutex_lock(&memcg->slab_caches_mutex);
2948	list_for_each_entry(params, &memcg->memcg_slab_caches, list)
2949		cache_show(memcg_params_to_cache(params), m);
2950	mutex_unlock(&memcg->slab_caches_mutex);
2951
2952	return 0;
2953}
2954#endif
2955
2956static int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, u64 size)
2957{
2958	struct res_counter *fail_res;
2959	int ret = 0;
2960
2961	ret = res_counter_charge(&memcg->kmem, size, &fail_res);
2962	if (ret)
2963		return ret;
2964
2965	ret = mem_cgroup_try_charge(memcg, gfp, size >> PAGE_SHIFT,
2966				    oom_gfp_allowed(gfp));
2967	if (ret == -EINTR)  {
2968		/*
2969		 * mem_cgroup_try_charge() chosed to bypass to root due to
2970		 * OOM kill or fatal signal.  Since our only options are to
2971		 * either fail the allocation or charge it to this cgroup, do
2972		 * it as a temporary condition. But we can't fail. From a
2973		 * kmem/slab perspective, the cache has already been selected,
2974		 * by mem_cgroup_kmem_get_cache(), so it is too late to change
2975		 * our minds.
2976		 *
2977		 * This condition will only trigger if the task entered
2978		 * memcg_charge_kmem in a sane state, but was OOM-killed during
2979		 * mem_cgroup_try_charge() above. Tasks that were already
2980		 * dying when the allocation triggers should have been already
2981		 * directed to the root cgroup in memcontrol.h
2982		 */
2983		res_counter_charge_nofail(&memcg->res, size, &fail_res);
2984		if (do_swap_account)
2985			res_counter_charge_nofail(&memcg->memsw, size,
2986						  &fail_res);
2987		ret = 0;
2988	} else if (ret)
2989		res_counter_uncharge(&memcg->kmem, size);
2990
2991	return ret;
2992}
2993
2994static void memcg_uncharge_kmem(struct mem_cgroup *memcg, u64 size)
2995{
2996	res_counter_uncharge(&memcg->res, size);
2997	if (do_swap_account)
2998		res_counter_uncharge(&memcg->memsw, size);
2999
3000	/* Not down to 0 */
3001	if (res_counter_uncharge(&memcg->kmem, size))
3002		return;
3003
3004	/*
3005	 * Releases a reference taken in kmem_cgroup_css_offline in case
3006	 * this last uncharge is racing with the offlining code or it is
3007	 * outliving the memcg existence.
3008	 *
3009	 * The memory barrier imposed by test&clear is paired with the
3010	 * explicit one in memcg_kmem_mark_dead().
3011	 */
3012	if (memcg_kmem_test_and_clear_dead(memcg))
3013		css_put(&memcg->css);
3014}
3015
3016/*
3017 * helper for acessing a memcg's index. It will be used as an index in the
3018 * child cache array in kmem_cache, and also to derive its name. This function
3019 * will return -1 when this is not a kmem-limited memcg.
3020 */
3021int memcg_cache_id(struct mem_cgroup *memcg)
3022{
3023	return memcg ? memcg->kmemcg_id : -1;
3024}
3025
3026static size_t memcg_caches_array_size(int num_groups)
3027{
3028	ssize_t size;
3029	if (num_groups <= 0)
3030		return 0;
3031
3032	size = 2 * num_groups;
3033	if (size < MEMCG_CACHES_MIN_SIZE)
3034		size = MEMCG_CACHES_MIN_SIZE;
3035	else if (size > MEMCG_CACHES_MAX_SIZE)
3036		size = MEMCG_CACHES_MAX_SIZE;
 
 
 
 
3037
3038	return size;
3039}
 
 
 
 
 
 
 
 
 
3040
3041/*
3042 * We should update the current array size iff all caches updates succeed. This
3043 * can only be done from the slab side. The slab mutex needs to be held when
3044 * calling this.
3045 */
3046void memcg_update_array_size(int num)
3047{
3048	if (num > memcg_limited_groups_array_size)
3049		memcg_limited_groups_array_size = memcg_caches_array_size(num);
3050}
3051
3052static void kmem_cache_destroy_work_func(struct work_struct *w);
3053
3054int memcg_update_cache_size(struct kmem_cache *s, int num_groups)
3055{
3056	struct memcg_cache_params *cur_params = s->memcg_params;
3057
3058	VM_BUG_ON(!is_root_cache(s));
3059
3060	if (num_groups > memcg_limited_groups_array_size) {
3061		int i;
3062		struct memcg_cache_params *new_params;
3063		ssize_t size = memcg_caches_array_size(num_groups);
3064
3065		size *= sizeof(void *);
3066		size += offsetof(struct memcg_cache_params, memcg_caches);
3067
3068		new_params = kzalloc(size, GFP_KERNEL);
3069		if (!new_params)
3070			return -ENOMEM;
3071
3072		new_params->is_root_cache = true;
3073
3074		/*
3075		 * There is the chance it will be bigger than
3076		 * memcg_limited_groups_array_size, if we failed an allocation
3077		 * in a cache, in which case all caches updated before it, will
3078		 * have a bigger array.
3079		 *
3080		 * But if that is the case, the data after
3081		 * memcg_limited_groups_array_size is certainly unused
3082		 */
3083		for (i = 0; i < memcg_limited_groups_array_size; i++) {
3084			if (!cur_params->memcg_caches[i])
3085				continue;
3086			new_params->memcg_caches[i] =
3087						cur_params->memcg_caches[i];
3088		}
3089
3090		/*
3091		 * Ideally, we would wait until all caches succeed, and only
3092		 * then free the old one. But this is not worth the extra
3093		 * pointer per-cache we'd have to have for this.
3094		 *
3095		 * It is not a big deal if some caches are left with a size
3096		 * bigger than the others. And all updates will reset this
3097		 * anyway.
3098		 */
3099		rcu_assign_pointer(s->memcg_params, new_params);
3100		if (cur_params)
3101			kfree_rcu(cur_params, rcu_head);
3102	}
3103	return 0;
3104}
3105
3106char *memcg_create_cache_name(struct mem_cgroup *memcg,
3107			      struct kmem_cache *root_cache)
3108{
3109	static char *buf = NULL;
3110
3111	/*
3112	 * We need a mutex here to protect the shared buffer. Since this is
3113	 * expected to be called only on cache creation, we can employ the
3114	 * slab_mutex for that purpose.
 
3115	 */
3116	lockdep_assert_held(&slab_mutex);
3117
3118	if (!buf) {
3119		buf = kmalloc(NAME_MAX + 1, GFP_KERNEL);
3120		if (!buf)
3121			return NULL;
3122	}
3123
3124	cgroup_name(memcg->css.cgroup, buf, NAME_MAX + 1);
3125	return kasprintf(GFP_KERNEL, "%s(%d:%s)", root_cache->name,
3126			 memcg_cache_id(memcg), buf);
3127}
3128
3129int memcg_alloc_cache_params(struct mem_cgroup *memcg, struct kmem_cache *s,
3130			     struct kmem_cache *root_cache)
3131{
3132	size_t size;
3133
3134	if (!memcg_kmem_enabled())
3135		return 0;
3136
3137	if (!memcg) {
3138		size = offsetof(struct memcg_cache_params, memcg_caches);
3139		size += memcg_limited_groups_array_size * sizeof(void *);
3140	} else
3141		size = sizeof(struct memcg_cache_params);
3142
3143	s->memcg_params = kzalloc(size, GFP_KERNEL);
3144	if (!s->memcg_params)
3145		return -ENOMEM;
3146
3147	if (memcg) {
3148		s->memcg_params->memcg = memcg;
3149		s->memcg_params->root_cache = root_cache;
3150		INIT_WORK(&s->memcg_params->destroy,
3151				kmem_cache_destroy_work_func);
3152		css_get(&memcg->css);
3153	} else
3154		s->memcg_params->is_root_cache = true;
3155
3156	return 0;
3157}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3158
3159void memcg_free_cache_params(struct kmem_cache *s)
3160{
3161	if (!s->memcg_params)
3162		return;
3163	if (!s->memcg_params->is_root_cache)
3164		css_put(&s->memcg_params->memcg->css);
3165	kfree(s->memcg_params);
3166}
3167
3168void memcg_register_cache(struct kmem_cache *s)
3169{
3170	struct kmem_cache *root;
3171	struct mem_cgroup *memcg;
3172	int id;
3173
3174	if (is_root_cache(s))
3175		return;
 
3176
3177	/*
3178	 * Holding the slab_mutex assures nobody will touch the memcg_caches
3179	 * array while we are modifying it.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3180	 */
3181	lockdep_assert_held(&slab_mutex);
3182
3183	root = s->memcg_params->root_cache;
3184	memcg = s->memcg_params->memcg;
3185	id = memcg_cache_id(memcg);
3186
3187	/*
3188	 * Since readers won't lock (see cache_from_memcg_idx()), we need a
3189	 * barrier here to ensure nobody will see the kmem_cache partially
3190	 * initialized.
3191	 */
3192	smp_wmb();
 
3193
3194	/*
3195	 * Initialize the pointer to this cache in its parent's memcg_params
3196	 * before adding it to the memcg_slab_caches list, otherwise we can
3197	 * fail to convert memcg_params_to_cache() while traversing the list.
3198	 */
3199	VM_BUG_ON(root->memcg_params->memcg_caches[id]);
3200	root->memcg_params->memcg_caches[id] = s;
 
3201
3202	mutex_lock(&memcg->slab_caches_mutex);
3203	list_add(&s->memcg_params->list, &memcg->memcg_slab_caches);
3204	mutex_unlock(&memcg->slab_caches_mutex);
3205}
3206
3207void memcg_unregister_cache(struct kmem_cache *s)
3208{
3209	struct kmem_cache *root;
3210	struct mem_cgroup *memcg;
3211	int id;
3212
3213	if (is_root_cache(s))
3214		return;
 
3215
3216	/*
3217	 * Holding the slab_mutex assures nobody will touch the memcg_caches
3218	 * array while we are modifying it.
 
 
 
 
 
3219	 */
3220	lockdep_assert_held(&slab_mutex);
 
3221
3222	root = s->memcg_params->root_cache;
3223	memcg = s->memcg_params->memcg;
3224	id = memcg_cache_id(memcg);
 
 
 
 
 
 
 
 
 
 
3225
3226	mutex_lock(&memcg->slab_caches_mutex);
3227	list_del(&s->memcg_params->list);
3228	mutex_unlock(&memcg->slab_caches_mutex);
 
 
 
 
 
 
 
 
 
 
 
 
3229
3230	/*
3231	 * Clear the pointer to this cache in its parent's memcg_params only
3232	 * after removing it from the memcg_slab_caches list, otherwise we can
3233	 * fail to convert memcg_params_to_cache() while traversing the list.
3234	 */
3235	VM_BUG_ON(root->memcg_params->memcg_caches[id] != s);
3236	root->memcg_params->memcg_caches[id] = NULL;
 
 
 
 
 
3237}
3238
3239/*
3240 * During the creation a new cache, we need to disable our accounting mechanism
3241 * altogether. This is true even if we are not creating, but rather just
3242 * enqueing new caches to be created.
3243 *
3244 * This is because that process will trigger allocations; some visible, like
3245 * explicit kmallocs to auxiliary data structures, name strings and internal
3246 * cache structures; some well concealed, like INIT_WORK() that can allocate
3247 * objects during debug.
3248 *
3249 * If any allocation happens during memcg_kmem_get_cache, we will recurse back
3250 * to it. This may not be a bounded recursion: since the first cache creation
3251 * failed to complete (waiting on the allocation), we'll just try to create the
3252 * cache again, failing at the same point.
3253 *
3254 * memcg_kmem_get_cache is prepared to abort after seeing a positive count of
3255 * memcg_kmem_skip_account. So we enclose anything that might allocate memory
3256 * inside the following two functions.
3257 */
3258static inline void memcg_stop_kmem_account(void)
3259{
3260	VM_BUG_ON(!current->mm);
3261	current->memcg_kmem_skip_account++;
3262}
3263
3264static inline void memcg_resume_kmem_account(void)
3265{
3266	VM_BUG_ON(!current->mm);
3267	current->memcg_kmem_skip_account--;
3268}
3269
3270static void kmem_cache_destroy_work_func(struct work_struct *w)
 
 
 
 
 
3271{
3272	struct kmem_cache *cachep;
3273	struct memcg_cache_params *p;
3274
3275	p = container_of(w, struct memcg_cache_params, destroy);
3276
3277	cachep = memcg_params_to_cache(p);
 
 
 
3278
 
 
 
3279	/*
3280	 * If we get down to 0 after shrink, we could delete right away.
3281	 * However, memcg_release_pages() already puts us back in the workqueue
3282	 * in that case. If we proceed deleting, we'll get a dangling
3283	 * reference, and removing the object from the workqueue in that case
3284	 * is unnecessary complication. We are not a fast path.
3285	 *
3286	 * Note that this case is fundamentally different from racing with
3287	 * shrink_slab(): if memcg_cgroup_destroy_cache() is called in
3288	 * kmem_cache_shrink, not only we would be reinserting a dead cache
3289	 * into the queue, but doing so from inside the worker racing to
3290	 * destroy it.
3291	 *
3292	 * So if we aren't down to zero, we'll just schedule a worker and try
3293	 * again
 
 
 
3294	 */
3295	if (atomic_read(&cachep->memcg_params->nr_pages) != 0)
3296		kmem_cache_shrink(cachep);
3297	else
3298		kmem_cache_destroy(cachep);
3299}
3300
3301void mem_cgroup_destroy_cache(struct kmem_cache *cachep)
 
 
 
 
 
3302{
3303	if (!cachep->memcg_params->dead)
3304		return;
3305
3306	/*
3307	 * There are many ways in which we can get here.
3308	 *
3309	 * We can get to a memory-pressure situation while the delayed work is
3310	 * still pending to run. The vmscan shrinkers can then release all
3311	 * cache memory and get us to destruction. If this is the case, we'll
3312	 * be executed twice, which is a bug (the second time will execute over
3313	 * bogus data). In this case, cancelling the work should be fine.
3314	 *
3315	 * But we can also get here from the worker itself, if
3316	 * kmem_cache_shrink is enough to shake all the remaining objects and
3317	 * get the page count to 0. In this case, we'll deadlock if we try to
3318	 * cancel the work (the worker runs with an internal lock held, which
3319	 * is the same lock we would hold for cancel_work_sync().)
3320	 *
3321	 * Since we can't possibly know who got us here, just refrain from
3322	 * running if there is already work pending
3323	 */
3324	if (work_pending(&cachep->memcg_params->destroy))
3325		return;
3326	/*
3327	 * We have to defer the actual destroying to a workqueue, because
3328	 * we might currently be in a context that cannot sleep.
3329	 */
3330	schedule_work(&cachep->memcg_params->destroy);
3331}
3332
3333int __kmem_cache_destroy_memcg_children(struct kmem_cache *s)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3334{
3335	struct kmem_cache *c;
3336	int i, failed = 0;
3337
3338	/*
3339	 * If the cache is being destroyed, we trust that there is no one else
3340	 * requesting objects from it. Even if there are, the sanity checks in
3341	 * kmem_cache_destroy should caught this ill-case.
3342	 *
3343	 * Still, we don't want anyone else freeing memcg_caches under our
3344	 * noses, which can happen if a new memcg comes to life. As usual,
3345	 * we'll take the activate_kmem_mutex to protect ourselves against
3346	 * this.
3347	 */
3348	mutex_lock(&activate_kmem_mutex);
3349	for_each_memcg_cache_index(i) {
3350		c = cache_from_memcg_idx(s, i);
3351		if (!c)
3352			continue;
 
 
 
 
3353
 
 
3354		/*
3355		 * We will now manually delete the caches, so to avoid races
3356		 * we need to cancel all pending destruction workers and
3357		 * proceed with destruction ourselves.
3358		 *
3359		 * kmem_cache_destroy() will call kmem_cache_shrink internally,
3360		 * and that could spawn the workers again: it is likely that
3361		 * the cache still have active pages until this very moment.
3362		 * This would lead us back to mem_cgroup_destroy_cache.
3363		 *
3364		 * But that will not execute at all if the "dead" flag is not
3365		 * set, so flip it down to guarantee we are in control.
3366		 */
3367		c->memcg_params->dead = false;
3368		cancel_work_sync(&c->memcg_params->destroy);
3369		kmem_cache_destroy(c);
3370
3371		if (cache_from_memcg_idx(s, i))
3372			failed++;
 
 
 
3373	}
3374	mutex_unlock(&activate_kmem_mutex);
3375	return failed;
 
3376}
3377
3378static void mem_cgroup_destroy_all_caches(struct mem_cgroup *memcg)
 
3379{
3380	struct kmem_cache *cachep;
3381	struct memcg_cache_params *params;
 
 
 
 
 
 
 
 
 
 
 
 
3382
3383	if (!memcg_kmem_is_active(memcg))
3384		return;
 
3385
3386	mutex_lock(&memcg->slab_caches_mutex);
3387	list_for_each_entry(params, &memcg->memcg_slab_caches, list) {
3388		cachep = memcg_params_to_cache(params);
3389		cachep->memcg_params->dead = true;
3390		schedule_work(&cachep->memcg_params->destroy);
3391	}
3392	mutex_unlock(&memcg->slab_caches_mutex);
3393}
3394
3395struct create_work {
3396	struct mem_cgroup *memcg;
3397	struct kmem_cache *cachep;
3398	struct work_struct work;
3399};
3400
3401static void memcg_create_cache_work_func(struct work_struct *w)
3402{
3403	struct create_work *cw = container_of(w, struct create_work, work);
3404	struct mem_cgroup *memcg = cw->memcg;
3405	struct kmem_cache *cachep = cw->cachep;
3406
3407	kmem_cache_create_memcg(memcg, cachep);
3408	css_put(&memcg->css);
3409	kfree(cw);
 
 
 
 
 
3410}
3411
3412/*
3413 * Enqueue the creation of a per-memcg kmem_cache.
 
 
 
 
 
 
 
 
 
 
 
3414 */
3415static void __memcg_create_cache_enqueue(struct mem_cgroup *memcg,
3416					 struct kmem_cache *cachep)
3417{
3418	struct create_work *cw;
3419
3420	cw = kmalloc(sizeof(struct create_work), GFP_NOWAIT);
3421	if (cw == NULL) {
3422		css_put(&memcg->css);
3423		return;
3424	}
3425
3426	cw->memcg = memcg;
3427	cw->cachep = cachep;
 
 
3428
3429	INIT_WORK(&cw->work, memcg_create_cache_work_func);
3430	schedule_work(&cw->work);
3431}
3432
3433static void memcg_create_cache_enqueue(struct mem_cgroup *memcg,
3434				       struct kmem_cache *cachep)
3435{
3436	/*
3437	 * We need to stop accounting when we kmalloc, because if the
3438	 * corresponding kmalloc cache is not yet created, the first allocation
3439	 * in __memcg_create_cache_enqueue will recurse.
3440	 *
3441	 * However, it is better to enclose the whole function. Depending on
3442	 * the debugging options enabled, INIT_WORK(), for instance, can
3443	 * trigger an allocation. This too, will make us recurse. Because at
3444	 * this point we can't allow ourselves back into memcg_kmem_get_cache,
3445	 * the safest choice is to do it like this, wrapping the whole function.
3446	 */
3447	memcg_stop_kmem_account();
3448	__memcg_create_cache_enqueue(memcg, cachep);
3449	memcg_resume_kmem_account();
3450}
3451/*
3452 * Return the kmem_cache we're supposed to use for a slab allocation.
3453 * We try to use the current memcg's version of the cache.
3454 *
3455 * If the cache does not exist yet, if we are the first user of it,
3456 * we either create it immediately, if possible, or create it asynchronously
3457 * in a workqueue.
3458 * In the latter case, we will let the current allocation go through with
3459 * the original cache.
3460 *
3461 * Can't be called in interrupt context or from kernel threads.
3462 * This function needs to be called with rcu_read_lock() held.
 
 
3463 */
3464struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep,
3465					  gfp_t gfp)
3466{
3467	struct mem_cgroup *memcg;
3468	struct kmem_cache *memcg_cachep;
3469
3470	VM_BUG_ON(!cachep->memcg_params);
3471	VM_BUG_ON(!cachep->memcg_params->is_root_cache);
3472
3473	if (!current->mm || current->memcg_kmem_skip_account)
3474		return cachep;
3475
3476	rcu_read_lock();
3477	memcg = mem_cgroup_from_task(rcu_dereference(current->mm->owner));
3478
3479	if (!memcg_can_account_kmem(memcg))
3480		goto out;
 
3481
3482	memcg_cachep = cache_from_memcg_idx(cachep, memcg_cache_id(memcg));
3483	if (likely(memcg_cachep)) {
3484		cachep = memcg_cachep;
3485		goto out;
 
3486	}
3487
3488	/* The corresponding put will be done in the workqueue. */
3489	if (!css_tryget(&memcg->css))
3490		goto out;
3491	rcu_read_unlock();
3492
3493	/*
3494	 * If we are in a safe context (can wait, and not in interrupt
3495	 * context), we could be be predictable and return right away.
3496	 * This would guarantee that the allocation being performed
3497	 * already belongs in the new cache.
3498	 *
3499	 * However, there are some clashes that can arrive from locking.
3500	 * For instance, because we acquire the slab_mutex while doing
3501	 * kmem_cache_dup, this means no further allocation could happen
3502	 * with the slab_mutex held.
3503	 *
3504	 * Also, because cache creation issue get_online_cpus(), this
3505	 * creates a lock chain: memcg_slab_mutex -> cpu_hotplug_mutex,
3506	 * that ends up reversed during cpu hotplug. (cpuset allocates
3507	 * a bunch of GFP_KERNEL memory during cpuup). Due to all that,
3508	 * better to defer everything.
3509	 */
3510	memcg_create_cache_enqueue(memcg, cachep);
3511	return cachep;
3512out:
3513	rcu_read_unlock();
3514	return cachep;
3515}
3516EXPORT_SYMBOL(__memcg_kmem_get_cache);
3517
3518/*
3519 * We need to verify if the allocation against current->mm->owner's memcg is
3520 * possible for the given order. But the page is not allocated yet, so we'll
3521 * need a further commit step to do the final arrangements.
3522 *
3523 * It is possible for the task to switch cgroups in this mean time, so at
3524 * commit time, we can't rely on task conversion any longer.  We'll then use
3525 * the handle argument to return to the caller which cgroup we should commit
3526 * against. We could also return the memcg directly and avoid the pointer
3527 * passing, but a boolean return value gives better semantics considering
3528 * the compiled-out case as well.
3529 *
3530 * Returning true means the allocation is possible.
3531 */
3532bool
3533__memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **_memcg, int order)
3534{
3535	struct mem_cgroup *memcg;
3536	int ret;
3537
3538	*_memcg = NULL;
 
 
 
 
 
 
 
3539
3540	/*
3541	 * Disabling accounting is only relevant for some specific memcg
3542	 * internal allocations. Therefore we would initially not have such
3543	 * check here, since direct calls to the page allocator that are marked
3544	 * with GFP_KMEMCG only happen outside memcg core. We are mostly
3545	 * concerned with cache allocations, and by having this test at
3546	 * memcg_kmem_get_cache, we are already able to relay the allocation to
3547	 * the root cache and bypass the memcg cache altogether.
3548	 *
3549	 * There is one exception, though: the SLUB allocator does not create
3550	 * large order caches, but rather service large kmallocs directly from
3551	 * the page allocator. Therefore, the following sequence when backed by
3552	 * the SLUB allocator:
3553	 *
3554	 *	memcg_stop_kmem_account();
3555	 *	kmalloc(<large_number>)
3556	 *	memcg_resume_kmem_account();
3557	 *
3558	 * would effectively ignore the fact that we should skip accounting,
3559	 * since it will drive us directly to this function without passing
3560	 * through the cache selector memcg_kmem_get_cache. Such large
3561	 * allocations are extremely rare but can happen, for instance, for the
3562	 * cache arrays. We bring this test here.
3563	 */
3564	if (!current->mm || current->memcg_kmem_skip_account)
3565		return true;
3566
3567	memcg = get_mem_cgroup_from_mm(current->mm);
 
 
3568
3569	if (!memcg_can_account_kmem(memcg)) {
3570		css_put(&memcg->css);
3571		return true;
3572	}
 
 
 
 
3573
3574	ret = memcg_charge_kmem(memcg, gfp, PAGE_SIZE << order);
3575	if (!ret)
3576		*_memcg = memcg;
 
 
 
3577
3578	css_put(&memcg->css);
3579	return (ret == 0);
 
 
 
 
 
 
 
 
 
 
 
3580}
3581
3582void __memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg,
3583			      int order)
3584{
3585	struct page_cgroup *pc;
 
3586
3587	VM_BUG_ON(mem_cgroup_is_root(memcg));
 
 
 
 
 
 
 
 
 
 
 
 
 
3588
3589	/* The page allocation failed. Revert */
3590	if (!page) {
3591		memcg_uncharge_kmem(memcg, PAGE_SIZE << order);
3592		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3593	}
3594
3595	pc = lookup_page_cgroup(page);
3596	lock_page_cgroup(pc);
3597	pc->mem_cgroup = memcg;
3598	SetPageCgroupUsed(pc);
3599	unlock_page_cgroup(pc);
3600}
3601
3602void __memcg_kmem_uncharge_pages(struct page *page, int order)
3603{
3604	struct mem_cgroup *memcg = NULL;
3605	struct page_cgroup *pc;
3606
 
 
3607
3608	pc = lookup_page_cgroup(page);
3609	/*
3610	 * Fast unlocked return. Theoretically might have changed, have to
3611	 * check again after locking.
3612	 */
3613	if (!PageCgroupUsed(pc))
3614		return;
3615
3616	lock_page_cgroup(pc);
3617	if (PageCgroupUsed(pc)) {
3618		memcg = pc->mem_cgroup;
3619		ClearPageCgroupUsed(pc);
 
 
 
3620	}
3621	unlock_page_cgroup(pc);
3622
3623	/*
3624	 * We trust that only if there is a memcg associated with the page, it
3625	 * is a valid allocation
3626	 */
3627	if (!memcg)
3628		return;
3629
3630	VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page);
3631	memcg_uncharge_kmem(memcg, PAGE_SIZE << order);
3632}
3633#else
3634static inline void mem_cgroup_destroy_all_caches(struct mem_cgroup *memcg)
3635{
 
 
 
 
 
 
 
3636}
3637#endif /* CONFIG_MEMCG_KMEM */
3638
3639#ifdef CONFIG_TRANSPARENT_HUGEPAGE
3640
3641#define PCGF_NOCOPY_AT_SPLIT (1 << PCG_LOCK | 1 << PCG_MIGRATION)
3642/*
3643 * Because tail pages are not marked as "used", set it. We're under
3644 * zone->lru_lock, 'splitting on pmd' and compound_lock.
3645 * charge/uncharge will be never happen and move_account() is done under
3646 * compound_lock(), so we don't have to take care of races.
3647 */
3648void mem_cgroup_split_huge_fixup(struct page *head)
 
3649{
3650	struct page_cgroup *head_pc = lookup_page_cgroup(head);
3651	struct page_cgroup *pc;
3652	struct mem_cgroup *memcg;
3653	int i;
3654
3655	if (mem_cgroup_disabled())
3656		return;
3657
3658	memcg = head_pc->mem_cgroup;
3659	for (i = 1; i < HPAGE_PMD_NR; i++) {
3660		pc = head_pc + i;
3661		pc->mem_cgroup = memcg;
3662		smp_wmb();/* see __commit_charge() */
3663		pc->flags = head_pc->flags & ~PCGF_NOCOPY_AT_SPLIT;
3664	}
3665	__this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE],
3666		       HPAGE_PMD_NR);
3667}
3668#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
3669
3670/**
3671 * mem_cgroup_move_account - move account of the page
3672 * @page: the page
3673 * @nr_pages: number of regular pages (>1 for huge pages)
3674 * @pc:	page_cgroup of the page.
3675 * @from: mem_cgroup which the page is moved from.
3676 * @to:	mem_cgroup which the page is moved to. @from != @to.
3677 *
3678 * The caller must confirm following.
3679 * - page is not on LRU (isolate_page() is useful.)
3680 * - compound_lock is held when nr_pages > 1
3681 *
3682 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
3683 * from old cgroup.
3684 */
3685static int mem_cgroup_move_account(struct page *page,
3686				   unsigned int nr_pages,
3687				   struct page_cgroup *pc,
3688				   struct mem_cgroup *from,
3689				   struct mem_cgroup *to)
3690{
3691	unsigned long flags;
3692	int ret;
3693	bool anon = PageAnon(page);
3694
3695	VM_BUG_ON(from == to);
3696	VM_BUG_ON_PAGE(PageLRU(page), page);
3697	/*
3698	 * The page is isolated from LRU. So, collapse function
3699	 * will not handle this page. But page splitting can happen.
3700	 * Do this check under compound_page_lock(). The caller should
3701	 * hold it.
3702	 */
3703	ret = -EBUSY;
3704	if (nr_pages > 1 && !PageTransHuge(page))
3705		goto out;
3706
3707	lock_page_cgroup(pc);
3708
3709	ret = -EINVAL;
3710	if (!PageCgroupUsed(pc) || pc->mem_cgroup != from)
3711		goto unlock;
3712
3713	move_lock_mem_cgroup(from, &flags);
3714
3715	if (!anon && page_mapped(page)) {
3716		__this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED],
3717			       nr_pages);
3718		__this_cpu_add(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED],
3719			       nr_pages);
3720	}
3721
3722	if (PageWriteback(page)) {
3723		__this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_WRITEBACK],
3724			       nr_pages);
3725		__this_cpu_add(to->stat->count[MEM_CGROUP_STAT_WRITEBACK],
3726			       nr_pages);
3727	}
3728
3729	mem_cgroup_charge_statistics(from, page, anon, -nr_pages);
3730
3731	/* caller should have done css_get */
3732	pc->mem_cgroup = to;
3733	mem_cgroup_charge_statistics(to, page, anon, nr_pages);
3734	move_unlock_mem_cgroup(from, &flags);
3735	ret = 0;
3736unlock:
3737	unlock_page_cgroup(pc);
3738	/*
3739	 * check events
3740	 */
3741	memcg_check_events(to, page);
3742	memcg_check_events(from, page);
3743out:
 
 
3744	return ret;
3745}
3746
3747/**
3748 * mem_cgroup_move_parent - moves page to the parent group
3749 * @page: the page to move
3750 * @pc: page_cgroup of the page
3751 * @child: page's cgroup
3752 *
3753 * move charges to its parent or the root cgroup if the group has no
3754 * parent (aka use_hierarchy==0).
3755 * Although this might fail (get_page_unless_zero, isolate_lru_page or
3756 * mem_cgroup_move_account fails) the failure is always temporary and
3757 * it signals a race with a page removal/uncharge or migration. In the
3758 * first case the page is on the way out and it will vanish from the LRU
3759 * on the next attempt and the call should be retried later.
3760 * Isolation from the LRU fails only if page has been isolated from
3761 * the LRU since we looked at it and that usually means either global
3762 * reclaim or migration going on. The page will either get back to the
3763 * LRU or vanish.
3764 * Finaly mem_cgroup_move_account fails only if the page got uncharged
3765 * (!PageCgroupUsed) or moved to a different group. The page will
3766 * disappear in the next attempt.
3767 */
3768static int mem_cgroup_move_parent(struct page *page,
3769				  struct page_cgroup *pc,
3770				  struct mem_cgroup *child)
3771{
3772	struct mem_cgroup *parent;
3773	unsigned int nr_pages;
3774	unsigned long uninitialized_var(flags);
3775	int ret;
3776
3777	VM_BUG_ON(mem_cgroup_is_root(child));
3778
3779	ret = -EBUSY;
3780	if (!get_page_unless_zero(page))
3781		goto out;
3782	if (isolate_lru_page(page))
3783		goto put;
3784
3785	nr_pages = hpage_nr_pages(page);
3786
3787	parent = parent_mem_cgroup(child);
3788	/*
3789	 * If no parent, move charges to root cgroup.
3790	 */
3791	if (!parent)
3792		parent = root_mem_cgroup;
3793
3794	if (nr_pages > 1) {
3795		VM_BUG_ON_PAGE(!PageTransHuge(page), page);
3796		flags = compound_lock_irqsave(page);
 
 
 
 
 
 
3797	}
3798
3799	ret = mem_cgroup_move_account(page, nr_pages,
3800				pc, child, parent);
3801	if (!ret)
3802		__mem_cgroup_cancel_local_charge(child, nr_pages);
3803
3804	if (nr_pages > 1)
3805		compound_unlock_irqrestore(page, flags);
3806	putback_lru_page(page);
3807put:
3808	put_page(page);
3809out:
3810	return ret;
3811}
3812
3813int mem_cgroup_charge_anon(struct page *page,
3814			      struct mm_struct *mm, gfp_t gfp_mask)
 
 
 
 
3815{
3816	unsigned int nr_pages = 1;
3817	struct mem_cgroup *memcg;
3818	bool oom = true;
3819
3820	if (mem_cgroup_disabled())
3821		return 0;
3822
3823	VM_BUG_ON_PAGE(page_mapped(page), page);
3824	VM_BUG_ON_PAGE(page->mapping && !PageAnon(page), page);
3825	VM_BUG_ON(!mm);
3826
3827	if (PageTransHuge(page)) {
3828		nr_pages <<= compound_order(page);
3829		VM_BUG_ON_PAGE(!PageTransHuge(page), page);
3830		/*
3831		 * Never OOM-kill a process for a huge page.  The
3832		 * fault handler will fall back to regular pages.
3833		 */
3834		oom = false;
3835	}
3836
3837	memcg = mem_cgroup_try_charge_mm(mm, gfp_mask, nr_pages, oom);
3838	if (!memcg)
3839		return -ENOMEM;
3840	__mem_cgroup_commit_charge(memcg, page, nr_pages,
3841				   MEM_CGROUP_CHARGE_TYPE_ANON, false);
3842	return 0;
3843}
3844
3845/*
3846 * While swap-in, try_charge -> commit or cancel, the page is locked.
3847 * And when try_charge() successfully returns, one refcnt to memcg without
3848 * struct page_cgroup is acquired. This refcnt will be consumed by
3849 * "commit()" or removed by "cancel()"
3850 */
3851static int __mem_cgroup_try_charge_swapin(struct mm_struct *mm,
3852					  struct page *page,
3853					  gfp_t mask,
3854					  struct mem_cgroup **memcgp)
3855{
3856	struct mem_cgroup *memcg = NULL;
3857	struct page_cgroup *pc;
3858	int ret;
 
3859
3860	pc = lookup_page_cgroup(page);
3861	/*
3862	 * Every swap fault against a single page tries to charge the
3863	 * page, bail as early as possible.  shmem_unuse() encounters
3864	 * already charged pages, too.  The USED bit is protected by
3865	 * the page lock, which serializes swap cache removal, which
3866	 * in turn serializes uncharging.
3867	 */
3868	if (PageCgroupUsed(pc))
3869		goto out;
3870	if (do_swap_account)
3871		memcg = try_get_mem_cgroup_from_page(page);
3872	if (!memcg)
3873		memcg = get_mem_cgroup_from_mm(mm);
3874	ret = mem_cgroup_try_charge(memcg, mask, 1, true);
3875	css_put(&memcg->css);
3876	if (ret == -EINTR)
3877		memcg = root_mem_cgroup;
3878	else if (ret)
3879		return ret;
3880out:
3881	*memcgp = memcg;
3882	return 0;
3883}
3884
3885int mem_cgroup_try_charge_swapin(struct mm_struct *mm, struct page *page,
3886				 gfp_t gfp_mask, struct mem_cgroup **memcgp)
3887{
3888	if (mem_cgroup_disabled()) {
3889		*memcgp = NULL;
3890		return 0;
3891	}
3892	/*
3893	 * A racing thread's fault, or swapoff, may have already
3894	 * updated the pte, and even removed page from swap cache: in
3895	 * those cases unuse_pte()'s pte_same() test will fail; but
3896	 * there's also a KSM case which does need to charge the page.
3897	 */
3898	if (!PageSwapCache(page)) {
3899		struct mem_cgroup *memcg;
 
 
 
 
 
 
 
 
3900
3901		memcg = mem_cgroup_try_charge_mm(mm, gfp_mask, 1, true);
3902		if (!memcg)
3903			return -ENOMEM;
3904		*memcgp = memcg;
3905		return 0;
 
 
 
 
 
 
3906	}
3907	return __mem_cgroup_try_charge_swapin(mm, page, gfp_mask, memcgp);
3908}
3909
3910void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg)
3911{
3912	if (mem_cgroup_disabled())
3913		return;
3914	if (!memcg)
3915		return;
3916	__mem_cgroup_cancel_charge(memcg, 1);
3917}
3918
3919static void
3920__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *memcg,
3921					enum charge_type ctype)
3922{
3923	if (mem_cgroup_disabled())
3924		return;
3925	if (!memcg)
3926		return;
3927
3928	__mem_cgroup_commit_charge(memcg, page, 1, ctype, true);
3929	/*
3930	 * Now swap is on-memory. This means this page may be
3931	 * counted both as mem and swap....double count.
3932	 * Fix it by uncharging from memsw. Basically, this SwapCache is stable
3933	 * under lock_page(). But in do_swap_page()::memory.c, reuse_swap_page()
3934	 * may call delete_from_swap_cache() before reach here.
3935	 */
3936	if (do_swap_account && PageSwapCache(page)) {
3937		swp_entry_t ent = {.val = page_private(page)};
3938		mem_cgroup_uncharge_swap(ent);
 
 
 
 
 
 
 
 
3939	}
3940}
 
3941
3942void mem_cgroup_commit_charge_swapin(struct page *page,
3943				     struct mem_cgroup *memcg)
3944{
3945	__mem_cgroup_commit_charge_swapin(page, memcg,
3946					  MEM_CGROUP_CHARGE_TYPE_ANON);
3947}
3948
3949int mem_cgroup_charge_file(struct page *page, struct mm_struct *mm,
3950				gfp_t gfp_mask)
3951{
3952	enum charge_type type = MEM_CGROUP_CHARGE_TYPE_CACHE;
3953	struct mem_cgroup *memcg;
3954	int ret;
3955
3956	if (mem_cgroup_disabled())
3957		return 0;
3958	if (PageCompound(page))
3959		return 0;
3960
3961	if (PageSwapCache(page)) { /* shmem */
3962		ret = __mem_cgroup_try_charge_swapin(mm, page,
3963						     gfp_mask, &memcg);
3964		if (ret)
3965			return ret;
3966		__mem_cgroup_commit_charge_swapin(page, memcg, type);
3967		return 0;
3968	}
3969
3970	memcg = mem_cgroup_try_charge_mm(mm, gfp_mask, 1, true);
3971	if (!memcg)
3972		return -ENOMEM;
3973	__mem_cgroup_commit_charge(memcg, page, 1, type, false);
3974	return 0;
3975}
3976
3977static void mem_cgroup_do_uncharge(struct mem_cgroup *memcg,
3978				   unsigned int nr_pages,
3979				   const enum charge_type ctype)
3980{
3981	struct memcg_batch_info *batch = NULL;
3982	bool uncharge_memsw = true;
3983
3984	/* If swapout, usage of swap doesn't decrease */
3985	if (!do_swap_account || ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
3986		uncharge_memsw = false;
3987
3988	batch = &current->memcg_batch;
3989	/*
3990	 * In usual, we do css_get() when we remember memcg pointer.
3991	 * But in this case, we keep res->usage until end of a series of
3992	 * uncharges. Then, it's ok to ignore memcg's refcnt.
3993	 */
3994	if (!batch->memcg)
3995		batch->memcg = memcg;
3996	/*
3997	 * do_batch > 0 when unmapping pages or inode invalidate/truncate.
3998	 * In those cases, all pages freed continuously can be expected to be in
3999	 * the same cgroup and we have chance to coalesce uncharges.
4000	 * But we do uncharge one by one if this is killed by OOM(TIF_MEMDIE)
4001	 * because we want to do uncharge as soon as possible.
4002	 */
4003
4004	if (!batch->do_batch || test_thread_flag(TIF_MEMDIE))
4005		goto direct_uncharge;
4006
4007	if (nr_pages > 1)
4008		goto direct_uncharge;
4009
4010	/*
4011	 * In typical case, batch->memcg == mem. This means we can
4012	 * merge a series of uncharges to an uncharge of res_counter.
4013	 * If not, we uncharge res_counter ony by one.
4014	 */
4015	if (batch->memcg != memcg)
4016		goto direct_uncharge;
4017	/* remember freed charge and uncharge it later */
4018	batch->nr_pages++;
4019	if (uncharge_memsw)
4020		batch->memsw_nr_pages++;
4021	return;
4022direct_uncharge:
4023	res_counter_uncharge(&memcg->res, nr_pages * PAGE_SIZE);
4024	if (uncharge_memsw)
4025		res_counter_uncharge(&memcg->memsw, nr_pages * PAGE_SIZE);
4026	if (unlikely(batch->memcg != memcg))
4027		memcg_oom_recover(memcg);
4028}
4029
4030/*
4031 * uncharge if !page_mapped(page)
4032 */
4033static struct mem_cgroup *
4034__mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype,
4035			     bool end_migration)
4036{
4037	struct mem_cgroup *memcg = NULL;
4038	unsigned int nr_pages = 1;
4039	struct page_cgroup *pc;
4040	bool anon;
4041
4042	if (mem_cgroup_disabled())
4043		return NULL;
4044
4045	if (PageTransHuge(page)) {
4046		nr_pages <<= compound_order(page);
4047		VM_BUG_ON_PAGE(!PageTransHuge(page), page);
4048	}
4049	/*
4050	 * Check if our page_cgroup is valid
4051	 */
4052	pc = lookup_page_cgroup(page);
4053	if (unlikely(!PageCgroupUsed(pc)))
4054		return NULL;
4055
4056	lock_page_cgroup(pc);
 
4057
4058	memcg = pc->mem_cgroup;
4059
4060	if (!PageCgroupUsed(pc))
4061		goto unlock_out;
4062
4063	anon = PageAnon(page);
 
4064
4065	switch (ctype) {
4066	case MEM_CGROUP_CHARGE_TYPE_ANON:
4067		/*
4068		 * Generally PageAnon tells if it's the anon statistics to be
4069		 * updated; but sometimes e.g. mem_cgroup_uncharge_page() is
4070		 * used before page reached the stage of being marked PageAnon.
4071		 */
4072		anon = true;
4073		/* fallthrough */
4074	case MEM_CGROUP_CHARGE_TYPE_DROP:
4075		/* See mem_cgroup_prepare_migration() */
4076		if (page_mapped(page))
4077			goto unlock_out;
4078		/*
4079		 * Pages under migration may not be uncharged.  But
4080		 * end_migration() /must/ be the one uncharging the
4081		 * unused post-migration page and so it has to call
4082		 * here with the migration bit still set.  See the
4083		 * res_counter handling below.
 
 
 
4084		 */
4085		if (!end_migration && PageCgroupMigration(pc))
4086			goto unlock_out;
4087		break;
4088	case MEM_CGROUP_CHARGE_TYPE_SWAPOUT:
4089		if (!PageAnon(page)) {	/* Shared memory */
4090			if (page->mapping && !page_is_file_cache(page))
4091				goto unlock_out;
4092		} else if (page_mapped(page)) /* Anon */
4093				goto unlock_out;
4094		break;
4095	default:
4096		break;
4097	}
4098
4099	mem_cgroup_charge_statistics(memcg, page, anon, -nr_pages);
4100
4101	ClearPageCgroupUsed(pc);
4102	/*
4103	 * pc->mem_cgroup is not cleared here. It will be accessed when it's
4104	 * freed from LRU. This is safe because uncharged page is expected not
4105	 * to be reused (freed soon). Exception is SwapCache, it's handled by
4106	 * special functions.
4107	 */
4108
4109	unlock_page_cgroup(pc);
4110	/*
4111	 * even after unlock, we have memcg->res.usage here and this memcg
4112	 * will never be freed, so it's safe to call css_get().
4113	 */
4114	memcg_check_events(memcg, page);
4115	if (do_swap_account && ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT) {
4116		mem_cgroup_swap_statistics(memcg, true);
4117		css_get(&memcg->css);
 
 
 
 
4118	}
4119	/*
4120	 * Migration does not charge the res_counter for the
4121	 * replacement page, so leave it alone when phasing out the
4122	 * page that is unused after the migration.
4123	 */
4124	if (!end_migration && !mem_cgroup_is_root(memcg))
4125		mem_cgroup_do_uncharge(memcg, nr_pages, ctype);
4126
4127	return memcg;
4128
4129unlock_out:
4130	unlock_page_cgroup(pc);
4131	return NULL;
4132}
4133
4134void mem_cgroup_uncharge_page(struct page *page)
4135{
4136	/* early check. */
4137	if (page_mapped(page))
4138		return;
4139	VM_BUG_ON_PAGE(page->mapping && !PageAnon(page), page);
4140	/*
4141	 * If the page is in swap cache, uncharge should be deferred
4142	 * to the swap path, which also properly accounts swap usage
4143	 * and handles memcg lifetime.
4144	 *
4145	 * Note that this check is not stable and reclaim may add the
4146	 * page to swap cache at any time after this.  However, if the
4147	 * page is not in swap cache by the time page->mapcount hits
4148	 * 0, there won't be any page table references to the swap
4149	 * slot, and reclaim will free it and not actually write the
4150	 * page to disk.
4151	 */
4152	if (PageSwapCache(page))
4153		return;
4154	__mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_ANON, false);
4155}
4156
4157void mem_cgroup_uncharge_cache_page(struct page *page)
 
4158{
4159	VM_BUG_ON_PAGE(page_mapped(page), page);
4160	VM_BUG_ON_PAGE(page->mapping, page);
4161	__mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE, false);
4162}
4163
4164/*
4165 * Batch_start/batch_end is called in unmap_page_range/invlidate/trucate.
4166 * In that cases, pages are freed continuously and we can expect pages
4167 * are in the same memcg. All these calls itself limits the number of
4168 * pages freed at once, then uncharge_start/end() is called properly.
4169 * This may be called prural(2) times in a context,
4170 */
4171
4172void mem_cgroup_uncharge_start(void)
4173{
4174	current->memcg_batch.do_batch++;
4175	/* We can do nest. */
4176	if (current->memcg_batch.do_batch == 1) {
4177		current->memcg_batch.memcg = NULL;
4178		current->memcg_batch.nr_pages = 0;
4179		current->memcg_batch.memsw_nr_pages = 0;
4180	}
 
 
4181}
4182
4183void mem_cgroup_uncharge_end(void)
 
4184{
4185	struct memcg_batch_info *batch = &current->memcg_batch;
 
 
 
4186
4187	if (!batch->do_batch)
4188		return;
4189
4190	batch->do_batch--;
4191	if (batch->do_batch) /* If stacked, do nothing. */
4192		return;
 
 
 
 
 
 
 
4193
4194	if (!batch->memcg)
4195		return;
4196	/*
4197	 * This "batch->memcg" is valid without any css_get/put etc...
4198	 * bacause we hide charges behind us.
4199	 */
4200	if (batch->nr_pages)
4201		res_counter_uncharge(&batch->memcg->res,
4202				     batch->nr_pages * PAGE_SIZE);
4203	if (batch->memsw_nr_pages)
4204		res_counter_uncharge(&batch->memcg->memsw,
4205				     batch->memsw_nr_pages * PAGE_SIZE);
4206	memcg_oom_recover(batch->memcg);
4207	/* forget this pointer (for sanity check) */
4208	batch->memcg = NULL;
4209}
4210
4211#ifdef CONFIG_SWAP
4212/*
4213 * called after __delete_from_swap_cache() and drop "page" account.
4214 * memcg information is recorded to swap_cgroup of "ent"
4215 */
4216void
4217mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout)
4218{
4219	struct mem_cgroup *memcg;
4220	int ctype = MEM_CGROUP_CHARGE_TYPE_SWAPOUT;
4221
4222	if (!swapout) /* this was a swap cache but the swap is unused ! */
4223		ctype = MEM_CGROUP_CHARGE_TYPE_DROP;
4224
4225	memcg = __mem_cgroup_uncharge_common(page, ctype, false);
 
4226
4227	/*
4228	 * record memcg information,  if swapout && memcg != NULL,
4229	 * css_get() was called in uncharge().
4230	 */
4231	if (do_swap_account && swapout && memcg)
4232		swap_cgroup_record(ent, mem_cgroup_id(memcg));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4233}
4234#endif
4235
4236#ifdef CONFIG_MEMCG_SWAP
 
4237/*
4238 * called from swap_entry_free(). remove record in swap_cgroup and
4239 * uncharge "memsw" account.
4240 */
4241void mem_cgroup_uncharge_swap(swp_entry_t ent)
4242{
4243	struct mem_cgroup *memcg;
4244	unsigned short id;
 
 
 
4245
4246	if (!do_swap_account)
4247		return;
4248
4249	id = swap_cgroup_record(ent, 0);
4250	rcu_read_lock();
4251	memcg = mem_cgroup_lookup(id);
4252	if (memcg) {
4253		/*
4254		 * We uncharge this because swap is freed.
4255		 * This memcg can be obsolete one. We avoid calling css_tryget
4256		 */
4257		if (!mem_cgroup_is_root(memcg))
4258			res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
4259		mem_cgroup_swap_statistics(memcg, false);
4260		css_put(&memcg->css);
4261	}
4262	rcu_read_unlock();
4263}
4264
 
4265/**
4266 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
4267 * @entry: swap entry to be moved
4268 * @from:  mem_cgroup which the entry is moved from
4269 * @to:  mem_cgroup which the entry is moved to
4270 *
4271 * It succeeds only when the swap_cgroup's record for this entry is the same
4272 * as the mem_cgroup's id of @from.
4273 *
4274 * Returns 0 on success, -EINVAL on failure.
4275 *
4276 * The caller must have charged to @to, IOW, called res_counter_charge() about
4277 * both res and memsw, and called css_get().
4278 */
4279static int mem_cgroup_move_swap_account(swp_entry_t entry,
4280				struct mem_cgroup *from, struct mem_cgroup *to)
4281{
4282	unsigned short old_id, new_id;
4283
4284	old_id = mem_cgroup_id(from);
4285	new_id = mem_cgroup_id(to);
4286
4287	if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
4288		mem_cgroup_swap_statistics(from, false);
4289		mem_cgroup_swap_statistics(to, true);
4290		/*
4291		 * This function is only called from task migration context now.
4292		 * It postpones res_counter and refcount handling till the end
4293		 * of task migration(mem_cgroup_clear_mc()) for performance
4294		 * improvement. But we cannot postpone css_get(to)  because if
4295		 * the process that has been moved to @to does swap-in, the
4296		 * refcount of @to might be decreased to 0.
4297		 *
4298		 * We are in attach() phase, so the cgroup is guaranteed to be
4299		 * alive, so we can just call css_get().
4300		 */
4301		css_get(&to->css);
4302		return 0;
4303	}
4304	return -EINVAL;
4305}
4306#else
4307static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
4308				struct mem_cgroup *from, struct mem_cgroup *to)
4309{
4310	return -EINVAL;
4311}
4312#endif
4313
4314/*
4315 * Before starting migration, account PAGE_SIZE to mem_cgroup that the old
4316 * page belongs to.
4317 */
4318void mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
4319				  struct mem_cgroup **memcgp)
4320{
4321	struct mem_cgroup *memcg = NULL;
4322	unsigned int nr_pages = 1;
4323	struct page_cgroup *pc;
4324	enum charge_type ctype;
4325
4326	*memcgp = NULL;
4327
4328	if (mem_cgroup_disabled())
4329		return;
4330
4331	if (PageTransHuge(page))
4332		nr_pages <<= compound_order(page);
4333
4334	pc = lookup_page_cgroup(page);
4335	lock_page_cgroup(pc);
4336	if (PageCgroupUsed(pc)) {
4337		memcg = pc->mem_cgroup;
4338		css_get(&memcg->css);
4339		/*
4340		 * At migrating an anonymous page, its mapcount goes down
4341		 * to 0 and uncharge() will be called. But, even if it's fully
4342		 * unmapped, migration may fail and this page has to be
4343		 * charged again. We set MIGRATION flag here and delay uncharge
4344		 * until end_migration() is called
4345		 *
4346		 * Corner Case Thinking
4347		 * A)
4348		 * When the old page was mapped as Anon and it's unmap-and-freed
4349		 * while migration was ongoing.
4350		 * If unmap finds the old page, uncharge() of it will be delayed
4351		 * until end_migration(). If unmap finds a new page, it's
4352		 * uncharged when it make mapcount to be 1->0. If unmap code
4353		 * finds swap_migration_entry, the new page will not be mapped
4354		 * and end_migration() will find it(mapcount==0).
4355		 *
4356		 * B)
4357		 * When the old page was mapped but migraion fails, the kernel
4358		 * remaps it. A charge for it is kept by MIGRATION flag even
4359		 * if mapcount goes down to 0. We can do remap successfully
4360		 * without charging it again.
4361		 *
4362		 * C)
4363		 * The "old" page is under lock_page() until the end of
4364		 * migration, so, the old page itself will not be swapped-out.
4365		 * If the new page is swapped out before end_migraton, our
4366		 * hook to usual swap-out path will catch the event.
4367		 */
4368		if (PageAnon(page))
4369			SetPageCgroupMigration(pc);
4370	}
4371	unlock_page_cgroup(pc);
4372	/*
4373	 * If the page is not charged at this point,
4374	 * we return here.
4375	 */
4376	if (!memcg)
4377		return;
4378
4379	*memcgp = memcg;
4380	/*
4381	 * We charge new page before it's used/mapped. So, even if unlock_page()
4382	 * is called before end_migration, we can catch all events on this new
4383	 * page. In the case new page is migrated but not remapped, new page's
4384	 * mapcount will be finally 0 and we call uncharge in end_migration().
4385	 */
4386	if (PageAnon(page))
4387		ctype = MEM_CGROUP_CHARGE_TYPE_ANON;
4388	else
4389		ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
4390	/*
4391	 * The page is committed to the memcg, but it's not actually
4392	 * charged to the res_counter since we plan on replacing the
4393	 * old one and only one page is going to be left afterwards.
4394	 */
4395	__mem_cgroup_commit_charge(memcg, newpage, nr_pages, ctype, false);
4396}
4397
4398/* remove redundant charge if migration failed*/
4399void mem_cgroup_end_migration(struct mem_cgroup *memcg,
4400	struct page *oldpage, struct page *newpage, bool migration_ok)
4401{
4402	struct page *used, *unused;
4403	struct page_cgroup *pc;
4404	bool anon;
4405
4406	if (!memcg)
4407		return;
4408
4409	if (!migration_ok) {
4410		used = oldpage;
4411		unused = newpage;
4412	} else {
4413		used = newpage;
4414		unused = oldpage;
4415	}
4416	anon = PageAnon(used);
4417	__mem_cgroup_uncharge_common(unused,
4418				     anon ? MEM_CGROUP_CHARGE_TYPE_ANON
4419				     : MEM_CGROUP_CHARGE_TYPE_CACHE,
4420				     true);
4421	css_put(&memcg->css);
4422	/*
4423	 * We disallowed uncharge of pages under migration because mapcount
4424	 * of the page goes down to zero, temporarly.
4425	 * Clear the flag and check the page should be charged.
4426	 */
4427	pc = lookup_page_cgroup(oldpage);
4428	lock_page_cgroup(pc);
4429	ClearPageCgroupMigration(pc);
4430	unlock_page_cgroup(pc);
4431
4432	/*
4433	 * If a page is a file cache, radix-tree replacement is very atomic
4434	 * and we can skip this check. When it was an Anon page, its mapcount
4435	 * goes down to 0. But because we added MIGRATION flage, it's not
4436	 * uncharged yet. There are several case but page->mapcount check
4437	 * and USED bit check in mem_cgroup_uncharge_page() will do enough
4438	 * check. (see prepare_charge() also)
4439	 */
4440	if (anon)
4441		mem_cgroup_uncharge_page(used);
4442}
4443
4444/*
4445 * At replace page cache, newpage is not under any memcg but it's on
4446 * LRU. So, this function doesn't touch res_counter but handles LRU
4447 * in correct way. Both pages are locked so we cannot race with uncharge.
4448 */
4449void mem_cgroup_replace_page_cache(struct page *oldpage,
4450				  struct page *newpage)
4451{
4452	struct mem_cgroup *memcg = NULL;
4453	struct page_cgroup *pc;
4454	enum charge_type type = MEM_CGROUP_CHARGE_TYPE_CACHE;
4455
4456	if (mem_cgroup_disabled())
4457		return;
4458
4459	pc = lookup_page_cgroup(oldpage);
4460	/* fix accounting on old pages */
4461	lock_page_cgroup(pc);
4462	if (PageCgroupUsed(pc)) {
4463		memcg = pc->mem_cgroup;
4464		mem_cgroup_charge_statistics(memcg, oldpage, false, -1);
4465		ClearPageCgroupUsed(pc);
4466	}
4467	unlock_page_cgroup(pc);
4468
4469	/*
4470	 * When called from shmem_replace_page(), in some cases the
4471	 * oldpage has already been charged, and in some cases not.
4472	 */
4473	if (!memcg)
4474		return;
4475	/*
4476	 * Even if newpage->mapping was NULL before starting replacement,
4477	 * the newpage may be on LRU(or pagevec for LRU) already. We lock
4478	 * LRU while we overwrite pc->mem_cgroup.
4479	 */
4480	__mem_cgroup_commit_charge(memcg, newpage, 1, type, true);
4481}
4482
4483#ifdef CONFIG_DEBUG_VM
4484static struct page_cgroup *lookup_page_cgroup_used(struct page *page)
4485{
4486	struct page_cgroup *pc;
4487
4488	pc = lookup_page_cgroup(page);
4489	/*
4490	 * Can be NULL while feeding pages into the page allocator for
4491	 * the first time, i.e. during boot or memory hotplug;
4492	 * or when mem_cgroup_disabled().
4493	 */
4494	if (likely(pc) && PageCgroupUsed(pc))
4495		return pc;
4496	return NULL;
4497}
4498
4499bool mem_cgroup_bad_page_check(struct page *page)
4500{
4501	if (mem_cgroup_disabled())
4502		return false;
4503
4504	return lookup_page_cgroup_used(page) != NULL;
4505}
4506
4507void mem_cgroup_print_bad_page(struct page *page)
 
4508{
4509	struct page_cgroup *pc;
4510
4511	pc = lookup_page_cgroup_used(page);
4512	if (pc) {
4513		pr_alert("pc:%p pc->flags:%lx pc->mem_cgroup:%p\n",
4514			 pc, pc->flags, pc->mem_cgroup);
4515	}
4516}
4517#endif
4518
4519static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
4520				unsigned long long val)
4521{
4522	int retry_count;
4523	u64 memswlimit, memlimit;
4524	int ret = 0;
4525	int children = mem_cgroup_count_children(memcg);
4526	u64 curusage, oldusage;
4527	int enlarge;
4528
4529	/*
4530	 * For keeping hierarchical_reclaim simple, how long we should retry
4531	 * is depends on callers. We set our retry-count to be function
4532	 * of # of children which we should visit in this loop.
4533	 */
4534	retry_count = MEM_CGROUP_RECLAIM_RETRIES * children;
4535
4536	oldusage = res_counter_read_u64(&memcg->res, RES_USAGE);
4537
4538	enlarge = 0;
4539	while (retry_count) {
4540		if (signal_pending(current)) {
4541			ret = -EINTR;
4542			break;
4543		}
 
 
4544		/*
4545		 * Rather than hide all in some function, I do this in
4546		 * open coded manner. You see what this really does.
4547		 * We have to guarantee memcg->res.limit <= memcg->memsw.limit.
4548		 */
4549		mutex_lock(&set_limit_mutex);
4550		memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
4551		if (memswlimit < val) {
 
4552			ret = -EINVAL;
4553			mutex_unlock(&set_limit_mutex);
4554			break;
4555		}
4556
4557		memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
4558		if (memlimit < val)
4559			enlarge = 1;
4560
4561		ret = res_counter_set_limit(&memcg->res, val);
4562		if (!ret) {
4563			if (memswlimit == val)
4564				memcg->memsw_is_minimum = true;
4565			else
4566				memcg->memsw_is_minimum = false;
4567		}
4568		mutex_unlock(&set_limit_mutex);
4569
4570		if (!ret)
4571			break;
4572
4573		mem_cgroup_reclaim(memcg, GFP_KERNEL,
4574				   MEM_CGROUP_RECLAIM_SHRINK);
4575		curusage = res_counter_read_u64(&memcg->res, RES_USAGE);
4576		/* Usage is reduced ? */
4577		if (curusage >= oldusage)
4578			retry_count--;
4579		else
4580			oldusage = curusage;
4581	}
4582	if (!ret && enlarge)
4583		memcg_oom_recover(memcg);
4584
4585	return ret;
4586}
4587
4588static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
4589					unsigned long long val)
4590{
4591	int retry_count;
4592	u64 memlimit, memswlimit, oldusage, curusage;
4593	int children = mem_cgroup_count_children(memcg);
4594	int ret = -EBUSY;
4595	int enlarge = 0;
4596
4597	/* see mem_cgroup_resize_res_limit */
4598	retry_count = children * MEM_CGROUP_RECLAIM_RETRIES;
4599	oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
4600	while (retry_count) {
4601		if (signal_pending(current)) {
4602			ret = -EINTR;
4603			break;
4604		}
4605		/*
4606		 * Rather than hide all in some function, I do this in
4607		 * open coded manner. You see what this really does.
4608		 * We have to guarantee memcg->res.limit <= memcg->memsw.limit.
4609		 */
4610		mutex_lock(&set_limit_mutex);
4611		memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
4612		if (memlimit > val) {
4613			ret = -EINVAL;
4614			mutex_unlock(&set_limit_mutex);
4615			break;
4616		}
4617		memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
4618		if (memswlimit < val)
4619			enlarge = 1;
4620		ret = res_counter_set_limit(&memcg->memsw, val);
4621		if (!ret) {
4622			if (memlimit == val)
4623				memcg->memsw_is_minimum = true;
4624			else
4625				memcg->memsw_is_minimum = false;
4626		}
4627		mutex_unlock(&set_limit_mutex);
4628
4629		if (!ret)
 
 
4630			break;
 
 
4631
4632		mem_cgroup_reclaim(memcg, GFP_KERNEL,
4633				   MEM_CGROUP_RECLAIM_NOSWAP |
4634				   MEM_CGROUP_RECLAIM_SHRINK);
4635		curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
4636		/* Usage is reduced ? */
4637		if (curusage >= oldusage)
4638			retry_count--;
4639		else
4640			oldusage = curusage;
4641	}
4642	if (!ret && enlarge)
4643		memcg_oom_recover(memcg);
 
4644	return ret;
4645}
4646
4647unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
4648					    gfp_t gfp_mask,
4649					    unsigned long *total_scanned)
4650{
4651	unsigned long nr_reclaimed = 0;
4652	struct mem_cgroup_per_zone *mz, *next_mz = NULL;
4653	unsigned long reclaimed;
4654	int loop = 0;
4655	struct mem_cgroup_tree_per_zone *mctz;
4656	unsigned long long excess;
4657	unsigned long nr_scanned;
 
 
4658
4659	if (order > 0)
4660		return 0;
4661
4662	mctz = soft_limit_tree_node_zone(zone_to_nid(zone), zone_idx(zone));
 
 
 
 
 
 
 
 
 
4663	/*
4664	 * This loop can run a while, specially if mem_cgroup's continuously
4665	 * keep exceeding their soft limit and putting the system under
4666	 * pressure
4667	 */
4668	do {
4669		if (next_mz)
4670			mz = next_mz;
4671		else
4672			mz = mem_cgroup_largest_soft_limit_node(mctz);
4673		if (!mz)
4674			break;
4675
4676		nr_scanned = 0;
4677		reclaimed = mem_cgroup_soft_reclaim(mz->memcg, zone,
4678						    gfp_mask, &nr_scanned);
4679		nr_reclaimed += reclaimed;
4680		*total_scanned += nr_scanned;
4681		spin_lock(&mctz->lock);
4682
4683		/*
4684		 * If we failed to reclaim anything from this memory cgroup
4685		 * it is time to move on to the next cgroup
4686		 */
4687		next_mz = NULL;
4688		if (!reclaimed) {
4689			do {
4690				/*
4691				 * Loop until we find yet another one.
4692				 *
4693				 * By the time we get the soft_limit lock
4694				 * again, someone might have aded the
4695				 * group back on the RB tree. Iterate to
4696				 * make sure we get a different mem.
4697				 * mem_cgroup_largest_soft_limit_node returns
4698				 * NULL if no other cgroup is present on
4699				 * the tree
4700				 */
4701				next_mz =
4702				__mem_cgroup_largest_soft_limit_node(mctz);
4703				if (next_mz == mz)
4704					css_put(&next_mz->memcg->css);
4705				else /* next_mz == NULL or other memcg */
4706					break;
4707			} while (1);
4708		}
4709		__mem_cgroup_remove_exceeded(mz->memcg, mz, mctz);
4710		excess = res_counter_soft_limit_excess(&mz->memcg->res);
4711		/*
4712		 * One school of thought says that we should not add
4713		 * back the node to the tree if reclaim returns 0.
4714		 * But our reclaim could return 0, simply because due
4715		 * to priority we are exposing a smaller subset of
4716		 * memory to reclaim from. Consider this as a longer
4717		 * term TODO.
4718		 */
4719		/* If excess == 0, no tree ops */
4720		__mem_cgroup_insert_exceeded(mz->memcg, mz, mctz, excess);
4721		spin_unlock(&mctz->lock);
4722		css_put(&mz->memcg->css);
4723		loop++;
4724		/*
4725		 * Could not reclaim anything and there are no more
4726		 * mem cgroups to try or we seem to be looping without
4727		 * reclaiming anything.
4728		 */
4729		if (!nr_reclaimed &&
4730			(next_mz == NULL ||
4731			loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
4732			break;
4733	} while (!nr_reclaimed);
4734	if (next_mz)
4735		css_put(&next_mz->memcg->css);
4736	return nr_reclaimed;
4737}
4738
4739/**
4740 * mem_cgroup_force_empty_list - clears LRU of a group
4741 * @memcg: group to clear
4742 * @node: NUMA node
4743 * @zid: zone id
4744 * @lru: lru to to clear
4745 *
4746 * Traverse a specified page_cgroup list and try to drop them all.  This doesn't
4747 * reclaim the pages page themselves - pages are moved to the parent (or root)
4748 * group.
4749 */
4750static void mem_cgroup_force_empty_list(struct mem_cgroup *memcg,
4751				int node, int zid, enum lru_list lru)
4752{
4753	struct lruvec *lruvec;
4754	unsigned long flags;
4755	struct list_head *list;
4756	struct page *busy;
4757	struct zone *zone;
4758
4759	zone = &NODE_DATA(node)->node_zones[zid];
4760	lruvec = mem_cgroup_zone_lruvec(zone, memcg);
4761	list = &lruvec->lists[lru];
4762
4763	busy = NULL;
4764	do {
4765		struct page_cgroup *pc;
4766		struct page *page;
4767
4768		spin_lock_irqsave(&zone->lru_lock, flags);
4769		if (list_empty(list)) {
4770			spin_unlock_irqrestore(&zone->lru_lock, flags);
4771			break;
4772		}
4773		page = list_entry(list->prev, struct page, lru);
4774		if (busy == page) {
4775			list_move(&page->lru, list);
4776			busy = NULL;
4777			spin_unlock_irqrestore(&zone->lru_lock, flags);
4778			continue;
4779		}
4780		spin_unlock_irqrestore(&zone->lru_lock, flags);
4781
4782		pc = lookup_page_cgroup(page);
4783
4784		if (mem_cgroup_move_parent(page, pc, memcg)) {
4785			/* found lock contention or "pc" is obsolete. */
4786			busy = page;
4787			cond_resched();
4788		} else
4789			busy = NULL;
4790	} while (!list_empty(list));
4791}
4792
4793/*
4794 * make mem_cgroup's charge to be 0 if there is no task by moving
4795 * all the charges and pages to the parent.
4796 * This enables deleting this mem_cgroup.
4797 *
4798 * Caller is responsible for holding css reference on the memcg.
4799 */
4800static void mem_cgroup_reparent_charges(struct mem_cgroup *memcg)
4801{
4802	int node, zid;
4803	u64 usage;
4804
4805	do {
4806		/* This is for making all *used* pages to be on LRU. */
4807		lru_add_drain_all();
4808		drain_all_stock_sync(memcg);
4809		mem_cgroup_start_move(memcg);
4810		for_each_node_state(node, N_MEMORY) {
4811			for (zid = 0; zid < MAX_NR_ZONES; zid++) {
4812				enum lru_list lru;
4813				for_each_lru(lru) {
4814					mem_cgroup_force_empty_list(memcg,
4815							node, zid, lru);
4816				}
4817			}
4818		}
4819		mem_cgroup_end_move(memcg);
4820		memcg_oom_recover(memcg);
4821		cond_resched();
4822
4823		/*
4824		 * Kernel memory may not necessarily be trackable to a specific
4825		 * process. So they are not migrated, and therefore we can't
4826		 * expect their value to drop to 0 here.
4827		 * Having res filled up with kmem only is enough.
4828		 *
4829		 * This is a safety check because mem_cgroup_force_empty_list
4830		 * could have raced with mem_cgroup_replace_page_cache callers
4831		 * so the lru seemed empty but the page could have been added
4832		 * right after the check. RES_USAGE should be safe as we always
4833		 * charge before adding to the LRU.
4834		 */
4835		usage = res_counter_read_u64(&memcg->res, RES_USAGE) -
4836			res_counter_read_u64(&memcg->kmem, RES_USAGE);
4837	} while (usage > 0);
4838}
4839
4840static inline bool memcg_has_children(struct mem_cgroup *memcg)
4841{
4842	lockdep_assert_held(&memcg_create_mutex);
4843	/*
4844	 * The lock does not prevent addition or deletion to the list
4845	 * of children, but it prevents a new child from being
4846	 * initialized based on this parent in css_online(), so it's
4847	 * enough to decide whether hierarchically inherited
4848	 * attributes can still be changed or not.
4849	 */
4850	return memcg->use_hierarchy &&
4851		!list_empty(&memcg->css.cgroup->children);
4852}
4853
4854/*
4855 * Reclaims as many pages from the given memcg as possible and moves
4856 * the rest to the parent.
4857 *
4858 * Caller is responsible for holding css reference for memcg.
4859 */
4860static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
4861{
4862	int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
4863	struct cgroup *cgrp = memcg->css.cgroup;
4864
4865	/* returns EBUSY if there is a task or if we come here twice. */
4866	if (cgroup_has_tasks(cgrp) || !list_empty(&cgrp->children))
4867		return -EBUSY;
4868
4869	/* we call try-to-free pages for make this cgroup empty */
4870	lru_add_drain_all();
4871	/* try to free all pages in this cgroup */
4872	while (nr_retries && res_counter_read_u64(&memcg->res, RES_USAGE) > 0) {
4873		int progress;
4874
 
 
 
 
4875		if (signal_pending(current))
4876			return -EINTR;
4877
4878		progress = try_to_free_mem_cgroup_pages(memcg, GFP_KERNEL,
4879						false);
4880		if (!progress) {
4881			nr_retries--;
4882			/* maybe some writeback is necessary */
4883			congestion_wait(BLK_RW_ASYNC, HZ/10);
4884		}
4885
4886	}
4887	lru_add_drain();
4888	mem_cgroup_reparent_charges(memcg);
4889
4890	return 0;
4891}
4892
4893static int mem_cgroup_force_empty_write(struct cgroup_subsys_state *css,
4894					unsigned int event)
 
4895{
4896	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4897
4898	if (mem_cgroup_is_root(memcg))
4899		return -EINVAL;
4900	return mem_cgroup_force_empty(memcg);
4901}
4902
4903static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css,
4904				     struct cftype *cft)
4905{
4906	return mem_cgroup_from_css(css)->use_hierarchy;
4907}
4908
4909static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
4910				      struct cftype *cft, u64 val)
4911{
4912	int retval = 0;
4913	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4914	struct mem_cgroup *parent_memcg = mem_cgroup_from_css(css_parent(&memcg->css));
4915
4916	mutex_lock(&memcg_create_mutex);
4917
4918	if (memcg->use_hierarchy == val)
4919		goto out;
4920
4921	/*
4922	 * If parent's use_hierarchy is set, we can't make any modifications
4923	 * in the child subtrees. If it is unset, then the change can
4924	 * occur, provided the current cgroup has no children.
4925	 *
4926	 * For the root cgroup, parent_mem is NULL, we allow value to be
4927	 * set if there are no children.
4928	 */
4929	if ((!parent_memcg || !parent_memcg->use_hierarchy) &&
4930				(val == 1 || val == 0)) {
4931		if (list_empty(&memcg->css.cgroup->children))
4932			memcg->use_hierarchy = val;
4933		else
4934			retval = -EBUSY;
4935	} else
4936		retval = -EINVAL;
4937
4938out:
4939	mutex_unlock(&memcg_create_mutex);
4940
4941	return retval;
4942}
4943
4944
4945static unsigned long mem_cgroup_recursive_stat(struct mem_cgroup *memcg,
4946					       enum mem_cgroup_stat_index idx)
4947{
4948	struct mem_cgroup *iter;
4949	long val = 0;
4950
4951	/* Per-cpu values can be negative, use a signed accumulator */
4952	for_each_mem_cgroup_tree(iter, memcg)
4953		val += mem_cgroup_read_stat(iter, idx);
4954
4955	if (val < 0) /* race ? */
4956		val = 0;
4957	return val;
4958}
4959
4960static inline u64 mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
4961{
4962	u64 val;
4963
4964	if (!mem_cgroup_is_root(memcg)) {
 
 
 
 
 
 
 
 
 
4965		if (!swap)
4966			return res_counter_read_u64(&memcg->res, RES_USAGE);
4967		else
4968			return res_counter_read_u64(&memcg->memsw, RES_USAGE);
4969	}
4970
4971	/*
4972	 * Transparent hugepages are still accounted for in MEM_CGROUP_STAT_RSS
4973	 * as well as in MEM_CGROUP_STAT_RSS_HUGE.
4974	 */
4975	val = mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_CACHE);
4976	val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_RSS);
4977
4978	if (swap)
4979		val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_SWAP);
4980
4981	return val << PAGE_SHIFT;
4982}
4983
 
 
 
 
 
 
 
 
4984static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
4985				   struct cftype *cft)
4986{
4987	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4988	u64 val;
4989	int name;
4990	enum res_type type;
4991
4992	type = MEMFILE_TYPE(cft->private);
4993	name = MEMFILE_ATTR(cft->private);
4994
4995	switch (type) {
4996	case _MEM:
4997		if (name == RES_USAGE)
4998			val = mem_cgroup_usage(memcg, false);
4999		else
5000			val = res_counter_read_u64(&memcg->res, name);
5001		break;
5002	case _MEMSWAP:
5003		if (name == RES_USAGE)
5004			val = mem_cgroup_usage(memcg, true);
5005		else
5006			val = res_counter_read_u64(&memcg->memsw, name);
5007		break;
5008	case _KMEM:
5009		val = res_counter_read_u64(&memcg->kmem, name);
 
 
 
5010		break;
5011	default:
5012		BUG();
5013	}
5014
5015	return val;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5016}
5017
5018#ifdef CONFIG_MEMCG_KMEM
5019/* should be called with activate_kmem_mutex held */
5020static int __memcg_activate_kmem(struct mem_cgroup *memcg,
5021				 unsigned long long limit)
5022{
5023	int err = 0;
5024	int memcg_id;
5025
5026	if (memcg_kmem_is_active(memcg))
5027		return 0;
5028
5029	/*
5030	 * We are going to allocate memory for data shared by all memory
5031	 * cgroups so let's stop accounting here.
5032	 */
5033	memcg_stop_kmem_account();
5034
5035	/*
5036	 * For simplicity, we won't allow this to be disabled.  It also can't
5037	 * be changed if the cgroup has children already, or if tasks had
5038	 * already joined.
5039	 *
5040	 * If tasks join before we set the limit, a person looking at
5041	 * kmem.usage_in_bytes will have no way to determine when it took
5042	 * place, which makes the value quite meaningless.
5043	 *
5044	 * After it first became limited, changes in the value of the limit are
5045	 * of course permitted.
5046	 */
5047	mutex_lock(&memcg_create_mutex);
5048	if (cgroup_has_tasks(memcg->css.cgroup) || memcg_has_children(memcg))
5049		err = -EBUSY;
5050	mutex_unlock(&memcg_create_mutex);
5051	if (err)
5052		goto out;
5053
5054	memcg_id = ida_simple_get(&kmem_limited_groups,
5055				  0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL);
5056	if (memcg_id < 0) {
5057		err = memcg_id;
5058		goto out;
5059	}
5060
5061	/*
5062	 * Make sure we have enough space for this cgroup in each root cache's
5063	 * memcg_params.
5064	 */
5065	err = memcg_update_all_caches(memcg_id + 1);
5066	if (err)
5067		goto out_rmid;
5068
5069	memcg->kmemcg_id = memcg_id;
5070	INIT_LIST_HEAD(&memcg->memcg_slab_caches);
5071	mutex_init(&memcg->slab_caches_mutex);
5072
5073	/*
5074	 * We couldn't have accounted to this cgroup, because it hasn't got the
5075	 * active bit set yet, so this should succeed.
5076	 */
5077	err = res_counter_set_limit(&memcg->kmem, limit);
5078	VM_BUG_ON(err);
5079
5080	static_key_slow_inc(&memcg_kmem_enabled_key);
5081	/*
5082	 * Setting the active bit after enabling static branching will
5083	 * guarantee no one starts accounting before all call sites are
5084	 * patched.
5085	 */
5086	memcg_kmem_set_active(memcg);
5087out:
5088	memcg_resume_kmem_account();
5089	return err;
5090
5091out_rmid:
5092	ida_simple_remove(&kmem_limited_groups, memcg_id);
5093	goto out;
5094}
5095
5096static int memcg_activate_kmem(struct mem_cgroup *memcg,
5097			       unsigned long long limit)
5098{
5099	int ret;
5100
5101	mutex_lock(&activate_kmem_mutex);
5102	ret = __memcg_activate_kmem(memcg, limit);
5103	mutex_unlock(&activate_kmem_mutex);
5104	return ret;
5105}
5106
5107static int memcg_update_kmem_limit(struct mem_cgroup *memcg,
5108				   unsigned long long val)
5109{
5110	int ret;
5111
5112	if (!memcg_kmem_is_active(memcg))
5113		ret = memcg_activate_kmem(memcg, val);
5114	else
5115		ret = res_counter_set_limit(&memcg->kmem, val);
5116	return ret;
5117}
5118
5119static int memcg_propagate_kmem(struct mem_cgroup *memcg)
5120{
5121	int ret = 0;
5122	struct mem_cgroup *parent = parent_mem_cgroup(memcg);
5123
 
5124	if (!parent)
5125		return 0;
 
 
5126
5127	mutex_lock(&activate_kmem_mutex);
5128	/*
5129	 * If the parent cgroup is not kmem-active now, it cannot be activated
5130	 * after this point, because it has at least one child already.
 
 
5131	 */
5132	if (memcg_kmem_is_active(parent))
5133		ret = __memcg_activate_kmem(memcg, RES_COUNTER_MAX);
5134	mutex_unlock(&activate_kmem_mutex);
5135	return ret;
5136}
5137#else
5138static int memcg_update_kmem_limit(struct mem_cgroup *memcg,
5139				   unsigned long long val)
 
 
 
5140{
5141	return -EINVAL;
5142}
5143#endif /* CONFIG_MEMCG_KMEM */
5144
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5145/*
5146 * The user of this function is...
5147 * RES_LIMIT.
5148 */
5149static int mem_cgroup_write(struct cgroup_subsys_state *css, struct cftype *cft,
5150			    char *buffer)
5151{
5152	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5153	enum res_type type;
5154	int name;
5155	unsigned long long val;
5156	int ret;
5157
5158	type = MEMFILE_TYPE(cft->private);
5159	name = MEMFILE_ATTR(cft->private);
 
 
5160
5161	switch (name) {
5162	case RES_LIMIT:
5163		if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
5164			ret = -EINVAL;
5165			break;
5166		}
5167		/* This function does all necessary parse...reuse it */
5168		ret = res_counter_memparse_write_strategy(buffer, &val);
5169		if (ret)
5170			break;
5171		if (type == _MEM)
5172			ret = mem_cgroup_resize_limit(memcg, val);
5173		else if (type == _MEMSWAP)
5174			ret = mem_cgroup_resize_memsw_limit(memcg, val);
5175		else if (type == _KMEM)
5176			ret = memcg_update_kmem_limit(memcg, val);
5177		else
5178			return -EINVAL;
5179		break;
5180	case RES_SOFT_LIMIT:
5181		ret = res_counter_memparse_write_strategy(buffer, &val);
5182		if (ret)
5183			break;
5184		/*
5185		 * For memsw, soft limits are hard to implement in terms
5186		 * of semantics, for now, we support soft limits for
5187		 * control without swap
5188		 */
5189		if (type == _MEM)
5190			ret = res_counter_set_soft_limit(&memcg->res, val);
5191		else
5192			ret = -EINVAL;
 
 
5193		break;
5194	default:
5195		ret = -EINVAL; /* should be BUG() ? */
 
 
 
 
 
5196		break;
5197	}
5198	return ret;
5199}
5200
5201static void memcg_get_hierarchical_limit(struct mem_cgroup *memcg,
5202		unsigned long long *mem_limit, unsigned long long *memsw_limit)
5203{
5204	unsigned long long min_limit, min_memsw_limit, tmp;
 
5205
5206	min_limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
5207	min_memsw_limit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
5208	if (!memcg->use_hierarchy)
5209		goto out;
5210
5211	while (css_parent(&memcg->css)) {
5212		memcg = mem_cgroup_from_css(css_parent(&memcg->css));
5213		if (!memcg->use_hierarchy)
5214			break;
5215		tmp = res_counter_read_u64(&memcg->res, RES_LIMIT);
5216		min_limit = min(min_limit, tmp);
5217		tmp = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
5218		min_memsw_limit = min(min_memsw_limit, tmp);
 
 
5219	}
5220out:
5221	*mem_limit = min_limit;
5222	*memsw_limit = min_memsw_limit;
5223}
5224
5225static int mem_cgroup_reset(struct cgroup_subsys_state *css, unsigned int event)
5226{
5227	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5228	int name;
5229	enum res_type type;
5230
5231	type = MEMFILE_TYPE(event);
5232	name = MEMFILE_ATTR(event);
5233
5234	switch (name) {
5235	case RES_MAX_USAGE:
5236		if (type == _MEM)
5237			res_counter_reset_max(&memcg->res);
5238		else if (type == _MEMSWAP)
5239			res_counter_reset_max(&memcg->memsw);
5240		else if (type == _KMEM)
5241			res_counter_reset_max(&memcg->kmem);
5242		else
5243			return -EINVAL;
5244		break;
5245	case RES_FAILCNT:
5246		if (type == _MEM)
5247			res_counter_reset_failcnt(&memcg->res);
5248		else if (type == _MEMSWAP)
5249			res_counter_reset_failcnt(&memcg->memsw);
5250		else if (type == _KMEM)
5251			res_counter_reset_failcnt(&memcg->kmem);
5252		else
5253			return -EINVAL;
5254		break;
 
 
5255	}
5256
5257	return 0;
5258}
5259
5260static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css,
5261					struct cftype *cft)
5262{
5263	return mem_cgroup_from_css(css)->move_charge_at_immigrate;
5264}
5265
5266#ifdef CONFIG_MMU
5267static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
5268					struct cftype *cft, u64 val)
5269{
5270	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5271
5272	if (val >= (1 << NR_MOVE_TYPE))
 
 
 
 
5273		return -EINVAL;
5274
5275	/*
5276	 * No kind of locking is needed in here, because ->can_attach() will
5277	 * check this value once in the beginning of the process, and then carry
5278	 * on with stale data. This means that changes to this value will only
5279	 * affect task migrations starting after the change.
5280	 */
5281	memcg->move_charge_at_immigrate = val;
5282	return 0;
5283}
5284#else
5285static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
5286					struct cftype *cft, u64 val)
5287{
5288	return -ENOSYS;
5289}
5290#endif
5291
5292#ifdef CONFIG_NUMA
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5293static int memcg_numa_stat_show(struct seq_file *m, void *v)
5294{
5295	struct numa_stat {
5296		const char *name;
5297		unsigned int lru_mask;
5298	};
5299
5300	static const struct numa_stat stats[] = {
5301		{ "total", LRU_ALL },
5302		{ "file", LRU_ALL_FILE },
5303		{ "anon", LRU_ALL_ANON },
5304		{ "unevictable", BIT(LRU_UNEVICTABLE) },
5305	};
5306	const struct numa_stat *stat;
5307	int nid;
5308	unsigned long nr;
5309	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
 
5310
5311	for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
5312		nr = mem_cgroup_nr_lru_pages(memcg, stat->lru_mask);
5313		seq_printf(m, "%s=%lu", stat->name, nr);
5314		for_each_node_state(nid, N_MEMORY) {
5315			nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
5316							  stat->lru_mask);
5317			seq_printf(m, " N%d=%lu", nid, nr);
5318		}
5319		seq_putc(m, '\n');
5320	}
5321
5322	for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
5323		struct mem_cgroup *iter;
5324
5325		nr = 0;
5326		for_each_mem_cgroup_tree(iter, memcg)
5327			nr += mem_cgroup_nr_lru_pages(iter, stat->lru_mask);
5328		seq_printf(m, "hierarchical_%s=%lu", stat->name, nr);
5329		for_each_node_state(nid, N_MEMORY) {
5330			nr = 0;
5331			for_each_mem_cgroup_tree(iter, memcg)
5332				nr += mem_cgroup_node_nr_lru_pages(
5333					iter, nid, stat->lru_mask);
5334			seq_printf(m, " N%d=%lu", nid, nr);
5335		}
5336		seq_putc(m, '\n');
5337	}
5338
5339	return 0;
5340}
5341#endif /* CONFIG_NUMA */
5342
5343static inline void mem_cgroup_lru_names_not_uptodate(void)
5344{
5345	BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS);
5346}
 
 
 
 
 
 
 
 
 
 
 
 
 
5347
5348static int memcg_stat_show(struct seq_file *m, void *v)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5349{
5350	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5351	struct mem_cgroup *mi;
5352	unsigned int i;
5353
5354	for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
5355		if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
5356			continue;
5357		seq_printf(m, "%s %ld\n", mem_cgroup_stat_names[i],
5358			   mem_cgroup_read_stat(memcg, i) * PAGE_SIZE);
5359	}
5360
5361	for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++)
5362		seq_printf(m, "%s %lu\n", mem_cgroup_events_names[i],
5363			   mem_cgroup_read_events(memcg, i));
5364
5365	for (i = 0; i < NR_LRU_LISTS; i++)
5366		seq_printf(m, "%s %lu\n", mem_cgroup_lru_names[i],
5367			   mem_cgroup_nr_lru_pages(memcg, BIT(i)) * PAGE_SIZE);
5368
5369	/* Hierarchical information */
5370	{
5371		unsigned long long limit, memsw_limit;
5372		memcg_get_hierarchical_limit(memcg, &limit, &memsw_limit);
5373		seq_printf(m, "hierarchical_memory_limit %llu\n", limit);
5374		if (do_swap_account)
5375			seq_printf(m, "hierarchical_memsw_limit %llu\n",
5376				   memsw_limit);
5377	}
5378
5379	for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
5380		long long val = 0;
5381
5382		if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
5383			continue;
5384		for_each_mem_cgroup_tree(mi, memcg)
5385			val += mem_cgroup_read_stat(mi, i) * PAGE_SIZE;
5386		seq_printf(m, "total_%s %lld\n", mem_cgroup_stat_names[i], val);
5387	}
5388
5389	for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
5390		unsigned long long val = 0;
 
5391
5392		for_each_mem_cgroup_tree(mi, memcg)
5393			val += mem_cgroup_read_events(mi, i);
5394		seq_printf(m, "total_%s %llu\n",
5395			   mem_cgroup_events_names[i], val);
5396	}
5397
5398	for (i = 0; i < NR_LRU_LISTS; i++) {
5399		unsigned long long val = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5400
5401		for_each_mem_cgroup_tree(mi, memcg)
5402			val += mem_cgroup_nr_lru_pages(mi, BIT(i)) * PAGE_SIZE;
5403		seq_printf(m, "total_%s %llu\n", mem_cgroup_lru_names[i], val);
5404	}
5405
5406#ifdef CONFIG_DEBUG_VM
5407	{
5408		int nid, zid;
5409		struct mem_cgroup_per_zone *mz;
5410		struct zone_reclaim_stat *rstat;
5411		unsigned long recent_rotated[2] = {0, 0};
5412		unsigned long recent_scanned[2] = {0, 0};
5413
5414		for_each_online_node(nid)
5415			for (zid = 0; zid < MAX_NR_ZONES; zid++) {
5416				mz = mem_cgroup_zoneinfo(memcg, nid, zid);
5417				rstat = &mz->lruvec.reclaim_stat;
5418
5419				recent_rotated[0] += rstat->recent_rotated[0];
5420				recent_rotated[1] += rstat->recent_rotated[1];
5421				recent_scanned[0] += rstat->recent_scanned[0];
5422				recent_scanned[1] += rstat->recent_scanned[1];
5423			}
5424		seq_printf(m, "recent_rotated_anon %lu\n", recent_rotated[0]);
5425		seq_printf(m, "recent_rotated_file %lu\n", recent_rotated[1]);
5426		seq_printf(m, "recent_scanned_anon %lu\n", recent_scanned[0]);
5427		seq_printf(m, "recent_scanned_file %lu\n", recent_scanned[1]);
5428	}
5429#endif
5430
5431	return 0;
5432}
5433
5434static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css,
5435				      struct cftype *cft)
5436{
5437	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5438
5439	return mem_cgroup_swappiness(memcg);
5440}
5441
5442static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
5443				       struct cftype *cft, u64 val)
5444{
5445	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5446	struct mem_cgroup *parent = mem_cgroup_from_css(css_parent(&memcg->css));
5447
5448	if (val > 100 || !parent)
5449		return -EINVAL;
5450
5451	mutex_lock(&memcg_create_mutex);
5452
5453	/* If under hierarchy, only empty-root can set this value */
5454	if ((parent->use_hierarchy) || memcg_has_children(memcg)) {
5455		mutex_unlock(&memcg_create_mutex);
5456		return -EINVAL;
5457	}
5458
5459	memcg->swappiness = val;
5460
5461	mutex_unlock(&memcg_create_mutex);
5462
5463	return 0;
5464}
5465
5466static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
5467{
5468	struct mem_cgroup_threshold_ary *t;
5469	u64 usage;
5470	int i;
5471
5472	rcu_read_lock();
5473	if (!swap)
5474		t = rcu_dereference(memcg->thresholds.primary);
5475	else
5476		t = rcu_dereference(memcg->memsw_thresholds.primary);
5477
5478	if (!t)
5479		goto unlock;
5480
5481	usage = mem_cgroup_usage(memcg, swap);
5482
5483	/*
5484	 * current_threshold points to threshold just below or equal to usage.
5485	 * If it's not true, a threshold was crossed after last
5486	 * call of __mem_cgroup_threshold().
5487	 */
5488	i = t->current_threshold;
5489
5490	/*
5491	 * Iterate backward over array of thresholds starting from
5492	 * current_threshold and check if a threshold is crossed.
5493	 * If none of thresholds below usage is crossed, we read
5494	 * only one element of the array here.
5495	 */
5496	for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
5497		eventfd_signal(t->entries[i].eventfd, 1);
5498
5499	/* i = current_threshold + 1 */
5500	i++;
5501
5502	/*
5503	 * Iterate forward over array of thresholds starting from
5504	 * current_threshold+1 and check if a threshold is crossed.
5505	 * If none of thresholds above usage is crossed, we read
5506	 * only one element of the array here.
5507	 */
5508	for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
5509		eventfd_signal(t->entries[i].eventfd, 1);
5510
5511	/* Update current_threshold */
5512	t->current_threshold = i - 1;
5513unlock:
5514	rcu_read_unlock();
5515}
5516
5517static void mem_cgroup_threshold(struct mem_cgroup *memcg)
5518{
5519	while (memcg) {
5520		__mem_cgroup_threshold(memcg, false);
5521		if (do_swap_account)
5522			__mem_cgroup_threshold(memcg, true);
5523
5524		memcg = parent_mem_cgroup(memcg);
5525	}
5526}
5527
5528static int compare_thresholds(const void *a, const void *b)
5529{
5530	const struct mem_cgroup_threshold *_a = a;
5531	const struct mem_cgroup_threshold *_b = b;
5532
5533	if (_a->threshold > _b->threshold)
5534		return 1;
5535
5536	if (_a->threshold < _b->threshold)
5537		return -1;
5538
5539	return 0;
5540}
5541
5542static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
5543{
5544	struct mem_cgroup_eventfd_list *ev;
5545
 
 
5546	list_for_each_entry(ev, &memcg->oom_notify, list)
5547		eventfd_signal(ev->eventfd, 1);
 
 
5548	return 0;
5549}
5550
5551static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
5552{
5553	struct mem_cgroup *iter;
5554
5555	for_each_mem_cgroup_tree(iter, memcg)
5556		mem_cgroup_oom_notify_cb(iter);
5557}
5558
5559static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
5560	struct eventfd_ctx *eventfd, const char *args, enum res_type type)
5561{
5562	struct mem_cgroup_thresholds *thresholds;
5563	struct mem_cgroup_threshold_ary *new;
5564	u64 threshold, usage;
 
5565	int i, size, ret;
5566
5567	ret = res_counter_memparse_write_strategy(args, &threshold);
5568	if (ret)
5569		return ret;
5570
5571	mutex_lock(&memcg->thresholds_lock);
5572
5573	if (type == _MEM)
5574		thresholds = &memcg->thresholds;
5575	else if (type == _MEMSWAP)
 
5576		thresholds = &memcg->memsw_thresholds;
5577	else
 
5578		BUG();
5579
5580	usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
5581
5582	/* Check if a threshold crossed before adding a new one */
5583	if (thresholds->primary)
5584		__mem_cgroup_threshold(memcg, type == _MEMSWAP);
5585
5586	size = thresholds->primary ? thresholds->primary->size + 1 : 1;
5587
5588	/* Allocate memory for new array of thresholds */
5589	new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold),
5590			GFP_KERNEL);
5591	if (!new) {
5592		ret = -ENOMEM;
5593		goto unlock;
5594	}
5595	new->size = size;
5596
5597	/* Copy thresholds (if any) to new array */
5598	if (thresholds->primary) {
5599		memcpy(new->entries, thresholds->primary->entries, (size - 1) *
5600				sizeof(struct mem_cgroup_threshold));
5601	}
5602
5603	/* Add new threshold */
5604	new->entries[size - 1].eventfd = eventfd;
5605	new->entries[size - 1].threshold = threshold;
5606
5607	/* Sort thresholds. Registering of new threshold isn't time-critical */
5608	sort(new->entries, size, sizeof(struct mem_cgroup_threshold),
5609			compare_thresholds, NULL);
5610
5611	/* Find current threshold */
5612	new->current_threshold = -1;
5613	for (i = 0; i < size; i++) {
5614		if (new->entries[i].threshold <= usage) {
5615			/*
5616			 * new->current_threshold will not be used until
5617			 * rcu_assign_pointer(), so it's safe to increment
5618			 * it here.
5619			 */
5620			++new->current_threshold;
5621		} else
5622			break;
5623	}
5624
5625	/* Free old spare buffer and save old primary buffer as spare */
5626	kfree(thresholds->spare);
5627	thresholds->spare = thresholds->primary;
5628
5629	rcu_assign_pointer(thresholds->primary, new);
5630
5631	/* To be sure that nobody uses thresholds */
5632	synchronize_rcu();
5633
5634unlock:
5635	mutex_unlock(&memcg->thresholds_lock);
5636
5637	return ret;
5638}
5639
5640static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
5641	struct eventfd_ctx *eventfd, const char *args)
5642{
5643	return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM);
5644}
5645
5646static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg,
5647	struct eventfd_ctx *eventfd, const char *args)
5648{
5649	return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP);
5650}
5651
5652static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
5653	struct eventfd_ctx *eventfd, enum res_type type)
5654{
5655	struct mem_cgroup_thresholds *thresholds;
5656	struct mem_cgroup_threshold_ary *new;
5657	u64 usage;
5658	int i, j, size;
5659
5660	mutex_lock(&memcg->thresholds_lock);
5661	if (type == _MEM)
 
5662		thresholds = &memcg->thresholds;
5663	else if (type == _MEMSWAP)
 
5664		thresholds = &memcg->memsw_thresholds;
5665	else
 
5666		BUG();
5667
5668	if (!thresholds->primary)
5669		goto unlock;
5670
5671	usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
5672
5673	/* Check if a threshold crossed before removing */
5674	__mem_cgroup_threshold(memcg, type == _MEMSWAP);
5675
5676	/* Calculate new number of threshold */
5677	size = 0;
5678	for (i = 0; i < thresholds->primary->size; i++) {
5679		if (thresholds->primary->entries[i].eventfd != eventfd)
5680			size++;
 
 
5681	}
5682
5683	new = thresholds->spare;
5684
 
 
 
 
5685	/* Set thresholds array to NULL if we don't have thresholds */
5686	if (!size) {
5687		kfree(new);
5688		new = NULL;
5689		goto swap_buffers;
5690	}
5691
5692	new->size = size;
5693
5694	/* Copy thresholds and find current threshold */
5695	new->current_threshold = -1;
5696	for (i = 0, j = 0; i < thresholds->primary->size; i++) {
5697		if (thresholds->primary->entries[i].eventfd == eventfd)
5698			continue;
5699
5700		new->entries[j] = thresholds->primary->entries[i];
5701		if (new->entries[j].threshold <= usage) {
5702			/*
5703			 * new->current_threshold will not be used
5704			 * until rcu_assign_pointer(), so it's safe to increment
5705			 * it here.
5706			 */
5707			++new->current_threshold;
5708		}
5709		j++;
5710	}
5711
5712swap_buffers:
5713	/* Swap primary and spare array */
5714	thresholds->spare = thresholds->primary;
5715	/* If all events are unregistered, free the spare array */
5716	if (!new) {
5717		kfree(thresholds->spare);
5718		thresholds->spare = NULL;
5719	}
5720
5721	rcu_assign_pointer(thresholds->primary, new);
5722
5723	/* To be sure that nobody uses thresholds */
5724	synchronize_rcu();
 
 
 
 
 
 
5725unlock:
5726	mutex_unlock(&memcg->thresholds_lock);
5727}
5728
5729static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
5730	struct eventfd_ctx *eventfd)
5731{
5732	return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM);
5733}
5734
5735static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
5736	struct eventfd_ctx *eventfd)
5737{
5738	return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP);
5739}
5740
5741static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg,
5742	struct eventfd_ctx *eventfd, const char *args)
5743{
5744	struct mem_cgroup_eventfd_list *event;
5745
5746	event = kmalloc(sizeof(*event),	GFP_KERNEL);
5747	if (!event)
5748		return -ENOMEM;
5749
5750	spin_lock(&memcg_oom_lock);
5751
5752	event->eventfd = eventfd;
5753	list_add(&event->list, &memcg->oom_notify);
5754
5755	/* already in OOM ? */
5756	if (atomic_read(&memcg->under_oom))
5757		eventfd_signal(eventfd, 1);
5758	spin_unlock(&memcg_oom_lock);
5759
5760	return 0;
5761}
5762
5763static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg,
5764	struct eventfd_ctx *eventfd)
5765{
5766	struct mem_cgroup_eventfd_list *ev, *tmp;
5767
5768	spin_lock(&memcg_oom_lock);
5769
5770	list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
5771		if (ev->eventfd == eventfd) {
5772			list_del(&ev->list);
5773			kfree(ev);
5774		}
5775	}
5776
5777	spin_unlock(&memcg_oom_lock);
5778}
5779
5780static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v)
5781{
5782	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf));
5783
5784	seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable);
5785	seq_printf(sf, "under_oom %d\n", (bool)atomic_read(&memcg->under_oom));
 
 
5786	return 0;
5787}
5788
5789static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
5790	struct cftype *cft, u64 val)
5791{
5792	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5793	struct mem_cgroup *parent = mem_cgroup_from_css(css_parent(&memcg->css));
5794
5795	/* cannot set to root cgroup and only 0 and 1 are allowed */
5796	if (!parent || !((val == 0) || (val == 1)))
5797		return -EINVAL;
5798
5799	mutex_lock(&memcg_create_mutex);
5800	/* oom-kill-disable is a flag for subhierarchy. */
5801	if ((parent->use_hierarchy) || memcg_has_children(memcg)) {
5802		mutex_unlock(&memcg_create_mutex);
5803		return -EINVAL;
5804	}
5805	memcg->oom_kill_disable = val;
5806	if (!val)
5807		memcg_oom_recover(memcg);
5808	mutex_unlock(&memcg_create_mutex);
5809	return 0;
5810}
5811
5812#ifdef CONFIG_MEMCG_KMEM
5813static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
 
 
 
5814{
5815	int ret;
 
5816
5817	memcg->kmemcg_id = -1;
5818	ret = memcg_propagate_kmem(memcg);
5819	if (ret)
5820		return ret;
5821
5822	return mem_cgroup_sockets_init(memcg, ss);
 
 
5823}
5824
5825static void memcg_destroy_kmem(struct mem_cgroup *memcg)
5826{
5827	mem_cgroup_sockets_destroy(memcg);
 
 
 
 
 
5828}
5829
5830static void kmem_cgroup_css_offline(struct mem_cgroup *memcg)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5831{
5832	if (!memcg_kmem_is_active(memcg))
5833		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5834
5835	/*
5836	 * kmem charges can outlive the cgroup. In the case of slab
5837	 * pages, for instance, a page contain objects from various
5838	 * processes. As we prevent from taking a reference for every
5839	 * such allocation we have to be careful when doing uncharge
5840	 * (see memcg_uncharge_kmem) and here during offlining.
5841	 *
5842	 * The idea is that that only the _last_ uncharge which sees
5843	 * the dead memcg will drop the last reference. An additional
5844	 * reference is taken here before the group is marked dead
5845	 * which is then paired with css_put during uncharge resp. here.
5846	 *
5847	 * Although this might sound strange as this path is called from
5848	 * css_offline() when the referencemight have dropped down to 0
5849	 * and shouldn't be incremented anymore (css_tryget would fail)
5850	 * we do not have other options because of the kmem allocations
5851	 * lifetime.
5852	 */
5853	css_get(&memcg->css);
 
 
 
 
 
 
 
 
 
 
5854
5855	memcg_kmem_mark_dead(memcg);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5856
5857	if (res_counter_read_u64(&memcg->kmem, RES_USAGE) != 0)
5858		return;
 
 
 
 
 
5859
5860	if (memcg_kmem_test_and_clear_dead(memcg))
5861		css_put(&memcg->css);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5862}
5863#else
5864static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
 
 
5865{
5866	return 0;
5867}
5868
5869static void memcg_destroy_kmem(struct mem_cgroup *memcg)
5870{
5871}
5872
5873static void kmem_cgroup_css_offline(struct mem_cgroup *memcg)
5874{
5875}
5876#endif
 
5877
5878/*
5879 * DO NOT USE IN NEW FILES.
5880 *
5881 * "cgroup.event_control" implementation.
5882 *
5883 * This is way over-engineered.  It tries to support fully configurable
5884 * events for each user.  Such level of flexibility is completely
5885 * unnecessary especially in the light of the planned unified hierarchy.
5886 *
5887 * Please deprecate this and replace with something simpler if at all
5888 * possible.
5889 */
5890
5891/*
5892 * Unregister event and free resources.
5893 *
5894 * Gets called from workqueue.
5895 */
5896static void memcg_event_remove(struct work_struct *work)
5897{
5898	struct mem_cgroup_event *event =
5899		container_of(work, struct mem_cgroup_event, remove);
5900	struct mem_cgroup *memcg = event->memcg;
5901
5902	remove_wait_queue(event->wqh, &event->wait);
5903
5904	event->unregister_event(memcg, event->eventfd);
5905
5906	/* Notify userspace the event is going away. */
5907	eventfd_signal(event->eventfd, 1);
5908
5909	eventfd_ctx_put(event->eventfd);
5910	kfree(event);
5911	css_put(&memcg->css);
5912}
5913
5914/*
5915 * Gets called on POLLHUP on eventfd when user closes it.
5916 *
5917 * Called with wqh->lock held and interrupts disabled.
5918 */
5919static int memcg_event_wake(wait_queue_t *wait, unsigned mode,
5920			    int sync, void *key)
5921{
5922	struct mem_cgroup_event *event =
5923		container_of(wait, struct mem_cgroup_event, wait);
5924	struct mem_cgroup *memcg = event->memcg;
5925	unsigned long flags = (unsigned long)key;
5926
5927	if (flags & POLLHUP) {
5928		/*
5929		 * If the event has been detached at cgroup removal, we
5930		 * can simply return knowing the other side will cleanup
5931		 * for us.
5932		 *
5933		 * We can't race against event freeing since the other
5934		 * side will require wqh->lock via remove_wait_queue(),
5935		 * which we hold.
5936		 */
5937		spin_lock(&memcg->event_list_lock);
5938		if (!list_empty(&event->list)) {
5939			list_del_init(&event->list);
5940			/*
5941			 * We are in atomic context, but cgroup_event_remove()
5942			 * may sleep, so we have to call it in workqueue.
5943			 */
5944			schedule_work(&event->remove);
5945		}
5946		spin_unlock(&memcg->event_list_lock);
5947	}
5948
5949	return 0;
5950}
5951
5952static void memcg_event_ptable_queue_proc(struct file *file,
5953		wait_queue_head_t *wqh, poll_table *pt)
5954{
5955	struct mem_cgroup_event *event =
5956		container_of(pt, struct mem_cgroup_event, pt);
5957
5958	event->wqh = wqh;
5959	add_wait_queue(wqh, &event->wait);
5960}
5961
5962/*
5963 * DO NOT USE IN NEW FILES.
5964 *
5965 * Parse input and register new cgroup event handler.
5966 *
5967 * Input must be in format '<event_fd> <control_fd> <args>'.
5968 * Interpretation of args is defined by control file implementation.
5969 */
5970static int memcg_write_event_control(struct cgroup_subsys_state *css,
5971				     struct cftype *cft, char *buffer)
5972{
 
5973	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5974	struct mem_cgroup_event *event;
5975	struct cgroup_subsys_state *cfile_css;
5976	unsigned int efd, cfd;
5977	struct fd efile;
5978	struct fd cfile;
 
5979	const char *name;
5980	char *endp;
5981	int ret;
5982
5983	efd = simple_strtoul(buffer, &endp, 10);
 
 
 
 
 
5984	if (*endp != ' ')
5985		return -EINVAL;
5986	buffer = endp + 1;
5987
5988	cfd = simple_strtoul(buffer, &endp, 10);
5989	if ((*endp != ' ') && (*endp != '\0'))
5990		return -EINVAL;
5991	buffer = endp + 1;
5992
5993	event = kzalloc(sizeof(*event), GFP_KERNEL);
5994	if (!event)
5995		return -ENOMEM;
5996
5997	event->memcg = memcg;
5998	INIT_LIST_HEAD(&event->list);
5999	init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc);
6000	init_waitqueue_func_entry(&event->wait, memcg_event_wake);
6001	INIT_WORK(&event->remove, memcg_event_remove);
6002
6003	efile = fdget(efd);
6004	if (!efile.file) {
6005		ret = -EBADF;
6006		goto out_kfree;
6007	}
6008
6009	event->eventfd = eventfd_ctx_fileget(efile.file);
6010	if (IS_ERR(event->eventfd)) {
6011		ret = PTR_ERR(event->eventfd);
6012		goto out_put_efile;
6013	}
6014
6015	cfile = fdget(cfd);
6016	if (!cfile.file) {
6017		ret = -EBADF;
6018		goto out_put_eventfd;
6019	}
6020
6021	/* the process need read permission on control file */
6022	/* AV: shouldn't we check that it's been opened for read instead? */
6023	ret = inode_permission(file_inode(cfile.file), MAY_READ);
6024	if (ret < 0)
6025		goto out_put_cfile;
6026
6027	/*
 
 
 
 
 
 
 
 
 
 
6028	 * Determine the event callbacks and set them in @event.  This used
6029	 * to be done via struct cftype but cgroup core no longer knows
6030	 * about these events.  The following is crude but the whole thing
6031	 * is for compatibility anyway.
6032	 *
6033	 * DO NOT ADD NEW FILES.
6034	 */
6035	name = cfile.file->f_dentry->d_name.name;
6036
6037	if (!strcmp(name, "memory.usage_in_bytes")) {
6038		event->register_event = mem_cgroup_usage_register_event;
6039		event->unregister_event = mem_cgroup_usage_unregister_event;
6040	} else if (!strcmp(name, "memory.oom_control")) {
6041		event->register_event = mem_cgroup_oom_register_event;
6042		event->unregister_event = mem_cgroup_oom_unregister_event;
6043	} else if (!strcmp(name, "memory.pressure_level")) {
6044		event->register_event = vmpressure_register_event;
6045		event->unregister_event = vmpressure_unregister_event;
6046	} else if (!strcmp(name, "memory.memsw.usage_in_bytes")) {
6047		event->register_event = memsw_cgroup_usage_register_event;
6048		event->unregister_event = memsw_cgroup_usage_unregister_event;
6049	} else {
6050		ret = -EINVAL;
6051		goto out_put_cfile;
6052	}
6053
6054	/*
6055	 * Verify @cfile should belong to @css.  Also, remaining events are
6056	 * automatically removed on cgroup destruction but the removal is
6057	 * asynchronous, so take an extra ref on @css.
6058	 */
6059	cfile_css = css_tryget_from_dir(cfile.file->f_dentry->d_parent,
6060					&memory_cgrp_subsys);
6061	ret = -EINVAL;
6062	if (IS_ERR(cfile_css))
6063		goto out_put_cfile;
6064	if (cfile_css != css) {
6065		css_put(cfile_css);
6066		goto out_put_cfile;
6067	}
6068
6069	ret = event->register_event(memcg, event->eventfd, buffer);
6070	if (ret)
6071		goto out_put_css;
6072
6073	efile.file->f_op->poll(efile.file, &event->pt);
6074
6075	spin_lock(&memcg->event_list_lock);
6076	list_add(&event->list, &memcg->event_list);
6077	spin_unlock(&memcg->event_list_lock);
6078
6079	fdput(cfile);
6080	fdput(efile);
6081
6082	return 0;
6083
6084out_put_css:
6085	css_put(css);
6086out_put_cfile:
6087	fdput(cfile);
6088out_put_eventfd:
6089	eventfd_ctx_put(event->eventfd);
6090out_put_efile:
6091	fdput(efile);
6092out_kfree:
6093	kfree(event);
6094
6095	return ret;
6096}
6097
6098static struct cftype mem_cgroup_files[] = {
 
 
 
 
 
 
 
 
 
 
 
 
 
6099	{
6100		.name = "usage_in_bytes",
6101		.private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
6102		.read_u64 = mem_cgroup_read_u64,
6103	},
6104	{
6105		.name = "max_usage_in_bytes",
6106		.private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
6107		.trigger = mem_cgroup_reset,
6108		.read_u64 = mem_cgroup_read_u64,
6109	},
6110	{
6111		.name = "limit_in_bytes",
6112		.private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
6113		.write_string = mem_cgroup_write,
6114		.read_u64 = mem_cgroup_read_u64,
6115	},
6116	{
6117		.name = "soft_limit_in_bytes",
6118		.private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
6119		.write_string = mem_cgroup_write,
6120		.read_u64 = mem_cgroup_read_u64,
6121	},
6122	{
6123		.name = "failcnt",
6124		.private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
6125		.trigger = mem_cgroup_reset,
6126		.read_u64 = mem_cgroup_read_u64,
6127	},
6128	{
6129		.name = "stat",
6130		.seq_show = memcg_stat_show,
6131	},
6132	{
6133		.name = "force_empty",
6134		.trigger = mem_cgroup_force_empty_write,
6135	},
6136	{
6137		.name = "use_hierarchy",
6138		.flags = CFTYPE_INSANE,
6139		.write_u64 = mem_cgroup_hierarchy_write,
6140		.read_u64 = mem_cgroup_hierarchy_read,
6141	},
6142	{
6143		.name = "cgroup.event_control",		/* XXX: for compat */
6144		.write_string = memcg_write_event_control,
6145		.flags = CFTYPE_NO_PREFIX,
6146		.mode = S_IWUGO,
6147	},
6148	{
6149		.name = "swappiness",
6150		.read_u64 = mem_cgroup_swappiness_read,
6151		.write_u64 = mem_cgroup_swappiness_write,
6152	},
6153	{
6154		.name = "move_charge_at_immigrate",
6155		.read_u64 = mem_cgroup_move_charge_read,
6156		.write_u64 = mem_cgroup_move_charge_write,
6157	},
6158	{
6159		.name = "oom_control",
6160		.seq_show = mem_cgroup_oom_control_read,
6161		.write_u64 = mem_cgroup_oom_control_write,
6162		.private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
6163	},
6164	{
6165		.name = "pressure_level",
 
6166	},
6167#ifdef CONFIG_NUMA
6168	{
6169		.name = "numa_stat",
6170		.seq_show = memcg_numa_stat_show,
6171	},
6172#endif
6173#ifdef CONFIG_MEMCG_KMEM
6174	{
6175		.name = "kmem.limit_in_bytes",
6176		.private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
6177		.write_string = mem_cgroup_write,
6178		.read_u64 = mem_cgroup_read_u64,
6179	},
6180	{
6181		.name = "kmem.usage_in_bytes",
6182		.private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
6183		.read_u64 = mem_cgroup_read_u64,
6184	},
6185	{
6186		.name = "kmem.failcnt",
6187		.private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT),
6188		.trigger = mem_cgroup_reset,
6189		.read_u64 = mem_cgroup_read_u64,
6190	},
6191	{
6192		.name = "kmem.max_usage_in_bytes",
6193		.private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE),
6194		.trigger = mem_cgroup_reset,
6195		.read_u64 = mem_cgroup_read_u64,
6196	},
6197#ifdef CONFIG_SLABINFO
6198	{
6199		.name = "kmem.slabinfo",
6200		.seq_show = mem_cgroup_slabinfo_read,
6201	},
6202#endif
6203#endif
6204	{ },	/* terminate */
6205};
6206
6207#ifdef CONFIG_MEMCG_SWAP
6208static struct cftype memsw_cgroup_files[] = {
6209	{
6210		.name = "memsw.usage_in_bytes",
6211		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
 
6212		.read_u64 = mem_cgroup_read_u64,
6213	},
6214	{
6215		.name = "memsw.max_usage_in_bytes",
6216		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
6217		.trigger = mem_cgroup_reset,
6218		.read_u64 = mem_cgroup_read_u64,
6219	},
6220	{
6221		.name = "memsw.limit_in_bytes",
6222		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
6223		.write_string = mem_cgroup_write,
6224		.read_u64 = mem_cgroup_read_u64,
6225	},
6226	{
6227		.name = "memsw.failcnt",
6228		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
6229		.trigger = mem_cgroup_reset,
6230		.read_u64 = mem_cgroup_read_u64,
6231	},
6232	{ },	/* terminate */
6233};
6234#endif
6235static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6236{
6237	struct mem_cgroup_per_node *pn;
6238	struct mem_cgroup_per_zone *mz;
6239	int zone, tmp = node;
6240	/*
6241	 * This routine is called against possible nodes.
6242	 * But it's BUG to call kmalloc() against offline node.
6243	 *
6244	 * TODO: this routine can waste much memory for nodes which will
6245	 *       never be onlined. It's better to use memory hotplug callback
6246	 *       function.
6247	 */
6248	if (!node_state(node, N_NORMAL_MEMORY))
6249		tmp = -1;
6250	pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
6251	if (!pn)
6252		return 1;
6253
6254	for (zone = 0; zone < MAX_NR_ZONES; zone++) {
6255		mz = &pn->zoneinfo[zone];
6256		lruvec_init(&mz->lruvec);
6257		mz->usage_in_excess = 0;
6258		mz->on_tree = false;
6259		mz->memcg = memcg;
6260	}
6261	memcg->nodeinfo[node] = pn;
6262	return 0;
6263}
6264
6265static void free_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
 
 
 
 
 
 
 
 
 
 
 
6266{
6267	kfree(memcg->nodeinfo[node]);
 
6268}
6269
6270static struct mem_cgroup *mem_cgroup_alloc(void)
 
6271{
 
 
6272	struct mem_cgroup *memcg;
6273	size_t size;
6274
6275	size = sizeof(struct mem_cgroup);
6276	size += nr_node_ids * sizeof(struct mem_cgroup_per_node *);
 
 
 
 
 
 
 
6277
6278	memcg = kzalloc(size, GFP_KERNEL);
6279	if (!memcg)
6280		return NULL;
6281
6282	memcg->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
6283	if (!memcg->stat)
6284		goto out_free;
6285	spin_lock_init(&memcg->pcp_counter_lock);
6286	return memcg;
6287
6288out_free:
6289	kfree(memcg);
6290	return NULL;
6291}
 
6292
6293/*
6294 * At destroying mem_cgroup, references from swap_cgroup can remain.
6295 * (scanning all at force_empty is too costly...)
6296 *
6297 * Instead of clearing all references at force_empty, we remember
6298 * the number of reference from swap_cgroup and free mem_cgroup when
6299 * it goes down to 0.
6300 *
6301 * Removal of cgroup itself succeeds regardless of refs from swap.
6302 */
6303
6304static void __mem_cgroup_free(struct mem_cgroup *memcg)
6305{
6306	int node;
6307
6308	mem_cgroup_remove_from_trees(memcg);
 
 
6309
6310	for_each_node(node)
6311		free_mem_cgroup_per_zone_info(memcg, node);
 
 
 
 
6312
6313	free_percpu(memcg->stat);
 
6314
6315	/*
6316	 * We need to make sure that (at least for now), the jump label
6317	 * destruction code runs outside of the cgroup lock. This is because
6318	 * get_online_cpus(), which is called from the static_branch update,
6319	 * can't be called inside the cgroup_lock. cpusets are the ones
6320	 * enforcing this dependency, so if they ever change, we might as well.
6321	 *
6322	 * schedule_work() will guarantee this happens. Be careful if you need
6323	 * to move this code around, and make sure it is outside
6324	 * the cgroup_lock.
6325	 */
6326	disarm_static_keys(memcg);
6327	kfree(memcg);
6328}
6329
6330/*
6331 * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled.
6332 */
6333struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
6334{
6335	if (!memcg->res.parent)
6336		return NULL;
6337	return mem_cgroup_from_res_counter(memcg->res.parent, res);
 
 
 
 
6338}
6339EXPORT_SYMBOL(parent_mem_cgroup);
6340
6341static void __init mem_cgroup_soft_limit_tree_init(void)
6342{
6343	struct mem_cgroup_tree_per_node *rtpn;
6344	struct mem_cgroup_tree_per_zone *rtpz;
6345	int tmp, node, zone;
6346
6347	for_each_node(node) {
6348		tmp = node;
6349		if (!node_state(node, N_NORMAL_MEMORY))
6350			tmp = -1;
6351		rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, tmp);
6352		BUG_ON(!rtpn);
6353
6354		soft_limit_tree.rb_tree_per_node[node] = rtpn;
 
 
 
 
 
6355
6356		for (zone = 0; zone < MAX_NR_ZONES; zone++) {
6357			rtpz = &rtpn->rb_tree_per_zone[zone];
6358			rtpz->rb_root = RB_ROOT;
6359			spin_lock_init(&rtpz->lock);
6360		}
6361	}
6362}
6363
6364static struct cgroup_subsys_state * __ref
6365mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
6366{
 
6367	struct mem_cgroup *memcg;
 
 
6368	long error = -ENOMEM;
6369	int node;
6370
6371	memcg = mem_cgroup_alloc();
6372	if (!memcg)
6373		return ERR_PTR(error);
6374
6375	for_each_node(node)
6376		if (alloc_mem_cgroup_per_zone_info(memcg, node))
6377			goto free_out;
 
 
 
6378
6379	/* root ? */
6380	if (parent_css == NULL) {
6381		root_mem_cgroup = memcg;
6382		res_counter_init(&memcg->res, NULL);
6383		res_counter_init(&memcg->memsw, NULL);
6384		res_counter_init(&memcg->kmem, NULL);
 
 
 
 
 
 
 
 
 
6385	}
6386
6387	memcg->last_scanned_node = MAX_NUMNODES;
 
 
 
 
 
 
 
6388	INIT_LIST_HEAD(&memcg->oom_notify);
6389	memcg->move_charge_at_immigrate = 0;
6390	mutex_init(&memcg->thresholds_lock);
6391	spin_lock_init(&memcg->move_lock);
6392	vmpressure_init(&memcg->vmpressure);
6393	INIT_LIST_HEAD(&memcg->event_list);
6394	spin_lock_init(&memcg->event_list_lock);
6395
6396	return &memcg->css;
6397
6398free_out:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6399	__mem_cgroup_free(memcg);
6400	return ERR_PTR(error);
6401}
6402
6403static int
6404mem_cgroup_css_online(struct cgroup_subsys_state *css)
6405{
6406	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6407	struct mem_cgroup *parent = mem_cgroup_from_css(css_parent(css));
6408
6409	if (css->cgroup->id > MEM_CGROUP_ID_MAX)
6410		return -ENOSPC;
6411
6412	if (!parent)
6413		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6414
6415	mutex_lock(&memcg_create_mutex);
 
 
6416
6417	memcg->use_hierarchy = parent->use_hierarchy;
6418	memcg->oom_kill_disable = parent->oom_kill_disable;
6419	memcg->swappiness = mem_cgroup_swappiness(parent);
6420
6421	if (parent->use_hierarchy) {
6422		res_counter_init(&memcg->res, &parent->res);
6423		res_counter_init(&memcg->memsw, &parent->memsw);
6424		res_counter_init(&memcg->kmem, &parent->kmem);
6425
6426		/*
6427		 * No need to take a reference to the parent because cgroup
6428		 * core guarantees its existence.
6429		 */
6430	} else {
6431		res_counter_init(&memcg->res, NULL);
6432		res_counter_init(&memcg->memsw, NULL);
6433		res_counter_init(&memcg->kmem, NULL);
6434		/*
6435		 * Deeper hierachy with use_hierarchy == false doesn't make
6436		 * much sense so let cgroup subsystem know about this
6437		 * unfortunate state in our controller.
6438		 */
6439		if (parent != root_mem_cgroup)
6440			memory_cgrp_subsys.broken_hierarchy = true;
6441	}
6442	mutex_unlock(&memcg_create_mutex);
6443
6444	return memcg_init_kmem(memcg, &memory_cgrp_subsys);
6445}
6446
6447/*
6448 * Announce all parents that a group from their hierarchy is gone.
6449 */
6450static void mem_cgroup_invalidate_reclaim_iterators(struct mem_cgroup *memcg)
6451{
6452	struct mem_cgroup *parent = memcg;
6453
6454	while ((parent = parent_mem_cgroup(parent)))
6455		mem_cgroup_iter_invalidate(parent);
6456
6457	/*
6458	 * if the root memcg is not hierarchical we have to check it
6459	 * explicitely.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6460	 */
6461	if (!root_mem_cgroup->use_hierarchy)
6462		mem_cgroup_iter_invalidate(root_mem_cgroup);
 
 
 
 
 
 
6463}
6464
6465static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
6466{
6467	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6468	struct mem_cgroup_event *event, *tmp;
6469	struct cgroup_subsys_state *iter;
6470
6471	/*
6472	 * Unregister events and notify userspace.
6473	 * Notify userspace about cgroup removing only after rmdir of cgroup
6474	 * directory to avoid race between userspace and kernelspace.
6475	 */
6476	spin_lock(&memcg->event_list_lock);
6477	list_for_each_entry_safe(event, tmp, &memcg->event_list, list) {
6478		list_del_init(&event->list);
6479		schedule_work(&event->remove);
6480	}
6481	spin_unlock(&memcg->event_list_lock);
6482
6483	kmem_cgroup_css_offline(memcg);
 
6484
6485	mem_cgroup_invalidate_reclaim_iterators(memcg);
6486
6487	/*
6488	 * This requires that offlining is serialized.  Right now that is
6489	 * guaranteed because css_killed_work_fn() holds the cgroup_mutex.
6490	 */
6491	css_for_each_descendant_post(iter, css)
6492		mem_cgroup_reparent_charges(mem_cgroup_from_css(iter));
6493
6494	mem_cgroup_destroy_all_caches(memcg);
6495	vmpressure_cleanup(&memcg->vmpressure);
 
 
 
 
 
 
 
 
 
6496}
6497
6498static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
6499{
6500	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6501	/*
6502	 * XXX: css_offline() would be where we should reparent all
6503	 * memory to prepare the cgroup for destruction.  However,
6504	 * memcg does not do css_tryget() and res_counter charging
6505	 * under the same RCU lock region, which means that charging
6506	 * could race with offlining.  Offlining only happens to
6507	 * cgroups with no tasks in them but charges can show up
6508	 * without any tasks from the swapin path when the target
6509	 * memcg is looked up from the swapout record and not from the
6510	 * current task as it usually is.  A race like this can leak
6511	 * charges and put pages with stale cgroup pointers into
6512	 * circulation:
6513	 *
6514	 * #0                        #1
6515	 *                           lookup_swap_cgroup_id()
6516	 *                           rcu_read_lock()
6517	 *                           mem_cgroup_lookup()
6518	 *                           css_tryget()
6519	 *                           rcu_read_unlock()
6520	 * disable css_tryget()
6521	 * call_rcu()
6522	 *   offline_css()
6523	 *     reparent_charges()
6524	 *                           res_counter_charge()
6525	 *                           css_put()
6526	 *                             css_free()
6527	 *                           pc->mem_cgroup = dead memcg
6528	 *                           add page to lru
6529	 *
6530	 * The bulk of the charges are still moved in offline_css() to
6531	 * avoid pinning a lot of pages in case a long-term reference
6532	 * like a swapout record is deferring the css_free() to long
6533	 * after offlining.  But this makes sure we catch any charges
6534	 * made after offlining:
6535	 */
6536	mem_cgroup_reparent_charges(memcg);
6537
6538	memcg_destroy_kmem(memcg);
6539	__mem_cgroup_free(memcg);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6540}
6541
6542#ifdef CONFIG_MMU
6543/* Handlers for move charge at task migration. */
6544#define PRECHARGE_COUNT_AT_ONCE	256
6545static int mem_cgroup_do_precharge(unsigned long count)
 
 
 
 
 
 
 
 
 
 
6546{
6547	int ret = 0;
6548	int batch_count = PRECHARGE_COUNT_AT_ONCE;
6549	struct mem_cgroup *memcg = mc.to;
6550
6551	if (mem_cgroup_is_root(memcg)) {
6552		mc.precharge += count;
6553		/* we don't need css_get for root */
6554		return ret;
6555	}
6556	/* try to charge at once */
6557	if (count > 1) {
6558		struct res_counter *dummy;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6559		/*
6560		 * "memcg" cannot be under rmdir() because we've already checked
6561		 * by cgroup_lock_live_cgroup() that it is not removed and we
6562		 * are still under the same cgroup_mutex. So we can postpone
6563		 * css_get().
6564		 */
6565		if (res_counter_charge(&memcg->res, PAGE_SIZE * count, &dummy))
6566			goto one_by_one;
6567		if (do_swap_account && res_counter_charge(&memcg->memsw,
6568						PAGE_SIZE * count, &dummy)) {
6569			res_counter_uncharge(&memcg->res, PAGE_SIZE * count);
6570			goto one_by_one;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6571		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6572		mc.precharge += count;
6573		return ret;
6574	}
6575one_by_one:
6576	/* fall back to one by one charge */
6577	while (count--) {
6578		if (signal_pending(current)) {
6579			ret = -EINTR;
6580			break;
6581		}
6582		if (!batch_count--) {
6583			batch_count = PRECHARGE_COUNT_AT_ONCE;
6584			cond_resched();
6585		}
6586		ret = mem_cgroup_try_charge(memcg, GFP_KERNEL, 1, false);
6587		if (ret)
6588			/* mem_cgroup_clear_mc() will do uncharge later */
6589			return ret;
6590		mc.precharge++;
 
6591	}
6592	return ret;
6593}
6594
6595/**
6596 * get_mctgt_type - get target type of moving charge
6597 * @vma: the vma the pte to be checked belongs
6598 * @addr: the address corresponding to the pte to be checked
6599 * @ptent: the pte to be checked
6600 * @target: the pointer the target page or swap ent will be stored(can be NULL)
6601 *
6602 * Returns
6603 *   0(MC_TARGET_NONE): if the pte is not a target for move charge.
6604 *   1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
6605 *     move charge. if @target is not NULL, the page is stored in target->page
6606 *     with extra refcnt got(Callers should handle it).
6607 *   2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
6608 *     target for charge migration. if @target is not NULL, the entry is stored
6609 *     in target->ent.
6610 *
6611 * Called with pte lock held.
6612 */
6613union mc_target {
6614	struct page	*page;
6615	swp_entry_t	ent;
6616};
6617
6618enum mc_target_type {
6619	MC_TARGET_NONE = 0,
6620	MC_TARGET_PAGE,
6621	MC_TARGET_SWAP,
 
6622};
6623
6624static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
6625						unsigned long addr, pte_t ptent)
6626{
6627	struct page *page = vm_normal_page(vma, addr, ptent);
6628
6629	if (!page || !page_mapped(page))
6630		return NULL;
6631	if (PageAnon(page)) {
6632		/* we don't move shared anon */
6633		if (!move_anon())
6634			return NULL;
6635	} else if (!move_file())
6636		/* we ignore mapcount for file pages */
6637		return NULL;
6638	if (!get_page_unless_zero(page))
6639		return NULL;
6640
6641	return page;
6642}
6643
6644#ifdef CONFIG_SWAP
6645static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
6646			unsigned long addr, pte_t ptent, swp_entry_t *entry)
6647{
6648	struct page *page = NULL;
6649	swp_entry_t ent = pte_to_swp_entry(ptent);
6650
6651	if (!move_anon() || non_swap_entry(ent))
6652		return NULL;
 
6653	/*
6654	 * Because lookup_swap_cache() updates some statistics counter,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6655	 * we call find_get_page() with swapper_space directly.
6656	 */
6657	page = find_get_page(swap_address_space(ent), ent.val);
6658	if (do_swap_account)
6659		entry->val = ent.val;
6660
6661	return page;
6662}
6663#else
6664static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
6665			unsigned long addr, pte_t ptent, swp_entry_t *entry)
6666{
6667	return NULL;
6668}
6669#endif
6670
6671static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
6672			unsigned long addr, pte_t ptent, swp_entry_t *entry)
6673{
6674	struct page *page = NULL;
6675	struct address_space *mapping;
6676	pgoff_t pgoff;
6677
6678	if (!vma->vm_file) /* anonymous vma */
6679		return NULL;
6680	if (!move_file())
6681		return NULL;
6682
6683	mapping = vma->vm_file->f_mapping;
6684	if (pte_none(ptent))
6685		pgoff = linear_page_index(vma, addr);
6686	else /* pte_file(ptent) is true */
6687		pgoff = pte_to_pgoff(ptent);
6688
6689	/* page is moved even if it's not RSS of this task(page-faulted). */
6690#ifdef CONFIG_SWAP
6691	/* shmem/tmpfs may report page out on swap: account for that too. */
6692	if (shmem_mapping(mapping)) {
6693		page = find_get_entry(mapping, pgoff);
6694		if (radix_tree_exceptional_entry(page)) {
6695			swp_entry_t swp = radix_to_swp_entry(page);
6696			if (do_swap_account)
6697				*entry = swp;
6698			page = find_get_page(swap_address_space(swp), swp.val);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6699		}
6700	} else
6701		page = find_get_page(mapping, pgoff);
6702#else
6703	page = find_get_page(mapping, pgoff);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6704#endif
6705	return page;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6706}
6707
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6708static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
6709		unsigned long addr, pte_t ptent, union mc_target *target)
6710{
6711	struct page *page = NULL;
6712	struct page_cgroup *pc;
6713	enum mc_target_type ret = MC_TARGET_NONE;
6714	swp_entry_t ent = { .val = 0 };
6715
6716	if (pte_present(ptent))
6717		page = mc_handle_present_pte(vma, addr, ptent);
 
 
 
 
 
 
6718	else if (is_swap_pte(ptent))
6719		page = mc_handle_swap_pte(vma, addr, ptent, &ent);
6720	else if (pte_none(ptent) || pte_file(ptent))
6721		page = mc_handle_file_pte(vma, addr, ptent, &ent);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6722
6723	if (!page && !ent.val)
6724		return ret;
6725	if (page) {
6726		pc = lookup_page_cgroup(page);
6727		/*
6728		 * Do only loose check w/o page_cgroup lock.
6729		 * mem_cgroup_move_account() checks the pc is valid or not under
6730		 * the lock.
6731		 */
6732		if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) {
6733			ret = MC_TARGET_PAGE;
 
 
 
 
 
 
 
6734			if (target)
6735				target->page = page;
 
6736		}
6737		if (!ret || !target)
6738			put_page(page);
6739	}
6740	/* There is a swap entry and a page doesn't exist or isn't charged */
6741	if (ent.val && !ret &&
 
 
 
6742	    mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) {
6743		ret = MC_TARGET_SWAP;
6744		if (target)
6745			target->ent = ent;
6746	}
6747	return ret;
6748}
6749
6750#ifdef CONFIG_TRANSPARENT_HUGEPAGE
6751/*
6752 * We don't consider swapping or file mapped pages because THP does not
6753 * support them for now.
6754 * Caller should make sure that pmd_trans_huge(pmd) is true.
6755 */
6756static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
6757		unsigned long addr, pmd_t pmd, union mc_target *target)
6758{
6759	struct page *page = NULL;
6760	struct page_cgroup *pc;
6761	enum mc_target_type ret = MC_TARGET_NONE;
6762
 
 
 
 
 
6763	page = pmd_page(pmd);
6764	VM_BUG_ON_PAGE(!page || !PageHead(page), page);
6765	if (!move_anon())
 
6766		return ret;
6767	pc = lookup_page_cgroup(page);
6768	if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) {
6769		ret = MC_TARGET_PAGE;
6770		if (target) {
6771			get_page(page);
6772			target->page = page;
 
 
 
 
6773		}
6774	}
6775	return ret;
6776}
6777#else
6778static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
6779		unsigned long addr, pmd_t pmd, union mc_target *target)
6780{
6781	return MC_TARGET_NONE;
6782}
6783#endif
6784
6785static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
6786					unsigned long addr, unsigned long end,
6787					struct mm_walk *walk)
6788{
6789	struct vm_area_struct *vma = walk->private;
6790	pte_t *pte;
6791	spinlock_t *ptl;
6792
6793	if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
 
 
 
 
 
 
6794		if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
6795			mc.precharge += HPAGE_PMD_NR;
6796		spin_unlock(ptl);
6797		return 0;
6798	}
6799
6800	if (pmd_trans_unstable(pmd))
6801		return 0;
6802	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
 
 
6803	for (; addr != end; pte++, addr += PAGE_SIZE)
6804		if (get_mctgt_type(vma, addr, *pte, NULL))
6805			mc.precharge++;	/* increment precharge temporarily */
6806	pte_unmap_unlock(pte - 1, ptl);
6807	cond_resched();
6808
6809	return 0;
6810}
6811
 
 
 
 
 
6812static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
6813{
6814	unsigned long precharge;
6815	struct vm_area_struct *vma;
6816
6817	down_read(&mm->mmap_sem);
6818	for (vma = mm->mmap; vma; vma = vma->vm_next) {
6819		struct mm_walk mem_cgroup_count_precharge_walk = {
6820			.pmd_entry = mem_cgroup_count_precharge_pte_range,
6821			.mm = mm,
6822			.private = vma,
6823		};
6824		if (is_vm_hugetlb_page(vma))
6825			continue;
6826		walk_page_range(vma->vm_start, vma->vm_end,
6827					&mem_cgroup_count_precharge_walk);
6828	}
6829	up_read(&mm->mmap_sem);
6830
6831	precharge = mc.precharge;
6832	mc.precharge = 0;
6833
6834	return precharge;
6835}
6836
6837static int mem_cgroup_precharge_mc(struct mm_struct *mm)
6838{
6839	unsigned long precharge = mem_cgroup_count_precharge(mm);
6840
6841	VM_BUG_ON(mc.moving_task);
6842	mc.moving_task = current;
6843	return mem_cgroup_do_precharge(precharge);
6844}
6845
6846/* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
6847static void __mem_cgroup_clear_mc(void)
6848{
6849	struct mem_cgroup *from = mc.from;
6850	struct mem_cgroup *to = mc.to;
6851	int i;
6852
6853	/* we must uncharge all the leftover precharges from mc.to */
6854	if (mc.precharge) {
6855		__mem_cgroup_cancel_charge(mc.to, mc.precharge);
6856		mc.precharge = 0;
6857	}
6858	/*
6859	 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
6860	 * we must uncharge here.
6861	 */
6862	if (mc.moved_charge) {
6863		__mem_cgroup_cancel_charge(mc.from, mc.moved_charge);
6864		mc.moved_charge = 0;
6865	}
6866	/* we must fixup refcnts and charges */
6867	if (mc.moved_swap) {
6868		/* uncharge swap account from the old cgroup */
6869		if (!mem_cgroup_is_root(mc.from))
6870			res_counter_uncharge(&mc.from->memsw,
6871						PAGE_SIZE * mc.moved_swap);
6872
6873		for (i = 0; i < mc.moved_swap; i++)
6874			css_put(&mc.from->css);
 
 
 
 
 
 
6875
6876		if (!mem_cgroup_is_root(mc.to)) {
6877			/*
6878			 * we charged both to->res and to->memsw, so we should
6879			 * uncharge to->res.
6880			 */
6881			res_counter_uncharge(&mc.to->res,
6882						PAGE_SIZE * mc.moved_swap);
6883		}
6884		/* we've already done css_get(mc.to) */
6885		mc.moved_swap = 0;
6886	}
6887	memcg_oom_recover(from);
6888	memcg_oom_recover(to);
6889	wake_up_all(&mc.waitq);
6890}
6891
6892static void mem_cgroup_clear_mc(void)
6893{
6894	struct mem_cgroup *from = mc.from;
6895
6896	/*
6897	 * we must clear moving_task before waking up waiters at the end of
6898	 * task migration.
6899	 */
6900	mc.moving_task = NULL;
6901	__mem_cgroup_clear_mc();
6902	spin_lock(&mc.lock);
6903	mc.from = NULL;
6904	mc.to = NULL;
 
6905	spin_unlock(&mc.lock);
6906	mem_cgroup_end_move(from);
 
6907}
6908
6909static int mem_cgroup_can_attach(struct cgroup_subsys_state *css,
6910				 struct cgroup_taskset *tset)
6911{
6912	struct task_struct *p = cgroup_taskset_first(tset);
 
 
 
 
 
6913	int ret = 0;
6914	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6915	unsigned long move_charge_at_immigrate;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6916
6917	/*
6918	 * We are now commited to this value whatever it is. Changes in this
6919	 * tunable will only affect upcoming migrations, not the current one.
6920	 * So we need to save it, and keep it going.
6921	 */
6922	move_charge_at_immigrate  = memcg->move_charge_at_immigrate;
6923	if (move_charge_at_immigrate) {
6924		struct mm_struct *mm;
6925		struct mem_cgroup *from = mem_cgroup_from_task(p);
6926
6927		VM_BUG_ON(from == memcg);
6928
6929		mm = get_task_mm(p);
6930		if (!mm)
6931			return 0;
6932		/* We move charges only when we move a owner of the mm */
6933		if (mm->owner == p) {
6934			VM_BUG_ON(mc.from);
6935			VM_BUG_ON(mc.to);
6936			VM_BUG_ON(mc.precharge);
6937			VM_BUG_ON(mc.moved_charge);
6938			VM_BUG_ON(mc.moved_swap);
6939			mem_cgroup_start_move(from);
6940			spin_lock(&mc.lock);
6941			mc.from = from;
6942			mc.to = memcg;
6943			mc.immigrate_flags = move_charge_at_immigrate;
6944			spin_unlock(&mc.lock);
6945			/* We set mc.moving_task later */
6946
6947			ret = mem_cgroup_precharge_mc(mm);
6948			if (ret)
6949				mem_cgroup_clear_mc();
6950		}
 
 
 
6951		mmput(mm);
6952	}
6953	return ret;
6954}
6955
6956static void mem_cgroup_cancel_attach(struct cgroup_subsys_state *css,
6957				     struct cgroup_taskset *tset)
6958{
6959	mem_cgroup_clear_mc();
 
6960}
6961
6962static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
6963				unsigned long addr, unsigned long end,
6964				struct mm_walk *walk)
6965{
6966	int ret = 0;
6967	struct vm_area_struct *vma = walk->private;
6968	pte_t *pte;
6969	spinlock_t *ptl;
6970	enum mc_target_type target_type;
6971	union mc_target target;
6972	struct page *page;
6973	struct page_cgroup *pc;
6974
6975	/*
6976	 * We don't take compound_lock() here but no race with splitting thp
6977	 * happens because:
6978	 *  - if pmd_trans_huge_lock() returns 1, the relevant thp is not
6979	 *    under splitting, which means there's no concurrent thp split,
6980	 *  - if another thread runs into split_huge_page() just after we
6981	 *    entered this if-block, the thread must wait for page table lock
6982	 *    to be unlocked in __split_huge_page_splitting(), where the main
6983	 *    part of thp split is not executed yet.
6984	 */
6985	if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
6986		if (mc.precharge < HPAGE_PMD_NR) {
6987			spin_unlock(ptl);
6988			return 0;
6989		}
6990		target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
6991		if (target_type == MC_TARGET_PAGE) {
6992			page = target.page;
6993			if (!isolate_lru_page(page)) {
6994				pc = lookup_page_cgroup(page);
6995				if (!mem_cgroup_move_account(page, HPAGE_PMD_NR,
6996							pc, mc.from, mc.to)) {
6997					mc.precharge -= HPAGE_PMD_NR;
6998					mc.moved_charge += HPAGE_PMD_NR;
6999				}
7000				putback_lru_page(page);
 
 
 
 
 
 
 
 
 
7001			}
7002			put_page(page);
 
7003		}
7004		spin_unlock(ptl);
7005		return 0;
7006	}
7007
7008	if (pmd_trans_unstable(pmd))
7009		return 0;
7010retry:
7011	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
 
 
7012	for (; addr != end; addr += PAGE_SIZE) {
7013		pte_t ptent = *(pte++);
 
7014		swp_entry_t ent;
7015
7016		if (!mc.precharge)
7017			break;
7018
7019		switch (get_mctgt_type(vma, addr, ptent, &target)) {
 
 
 
7020		case MC_TARGET_PAGE:
7021			page = target.page;
7022			if (isolate_lru_page(page))
 
 
 
 
 
 
7023				goto put;
7024			pc = lookup_page_cgroup(page);
7025			if (!mem_cgroup_move_account(page, 1, pc,
7026						     mc.from, mc.to)) {
 
7027				mc.precharge--;
7028				/* we uncharge from mc.from later. */
7029				mc.moved_charge++;
7030			}
7031			putback_lru_page(page);
7032put:			/* get_mctgt_type() gets the page */
7033			put_page(page);
 
 
7034			break;
7035		case MC_TARGET_SWAP:
7036			ent = target.ent;
7037			if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
7038				mc.precharge--;
7039				/* we fixup refcnts and charges later. */
 
7040				mc.moved_swap++;
7041			}
7042			break;
7043		default:
7044			break;
7045		}
7046	}
7047	pte_unmap_unlock(pte - 1, ptl);
7048	cond_resched();
7049
7050	if (addr != end) {
7051		/*
7052		 * We have consumed all precharges we got in can_attach().
7053		 * We try charge one by one, but don't do any additional
7054		 * charges to mc.to if we have failed in charge once in attach()
7055		 * phase.
7056		 */
7057		ret = mem_cgroup_do_precharge(1);
7058		if (!ret)
7059			goto retry;
7060	}
7061
7062	return ret;
7063}
7064
7065static void mem_cgroup_move_charge(struct mm_struct *mm)
7066{
7067	struct vm_area_struct *vma;
 
7068
 
 
7069	lru_add_drain_all();
 
 
 
 
 
 
 
7070retry:
7071	if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
7072		/*
7073		 * Someone who are holding the mmap_sem might be waiting in
7074		 * waitq. So we cancel all extra charges, wake up all waiters,
7075		 * and retry. Because we cancel precharges, we might not be able
7076		 * to move enough charges, but moving charge is a best-effort
7077		 * feature anyway, so it wouldn't be a big problem.
7078		 */
7079		__mem_cgroup_clear_mc();
7080		cond_resched();
7081		goto retry;
7082	}
7083	for (vma = mm->mmap; vma; vma = vma->vm_next) {
7084		int ret;
7085		struct mm_walk mem_cgroup_move_charge_walk = {
7086			.pmd_entry = mem_cgroup_move_charge_pte_range,
7087			.mm = mm,
7088			.private = vma,
7089		};
7090		if (is_vm_hugetlb_page(vma))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7091			continue;
7092		ret = walk_page_range(vma->vm_start, vma->vm_end,
7093						&mem_cgroup_move_charge_walk);
7094		if (ret)
7095			/*
7096			 * means we have consumed all precharges and failed in
7097			 * doing additional charge. Just abandon here.
7098			 */
7099			break;
7100	}
7101	up_read(&mm->mmap_sem);
 
 
 
 
 
 
 
 
7102}
7103
7104static void mem_cgroup_move_task(struct cgroup_subsys_state *css,
7105				 struct cgroup_taskset *tset)
7106{
7107	struct task_struct *p = cgroup_taskset_first(tset);
7108	struct mm_struct *mm = get_task_mm(p);
 
 
 
7109
7110	if (mm) {
7111		if (mc.to)
7112			mem_cgroup_move_charge(mm);
7113		mmput(mm);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7114	}
7115	if (mc.to)
7116		mem_cgroup_clear_mc();
 
7117}
7118#else	/* !CONFIG_MMU */
7119static int mem_cgroup_can_attach(struct cgroup_subsys_state *css,
7120				 struct cgroup_taskset *tset)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7121{
 
 
 
7122	return 0;
7123}
7124static void mem_cgroup_cancel_attach(struct cgroup_subsys_state *css,
7125				     struct cgroup_taskset *tset)
7126{
 
 
 
 
7127}
7128static void mem_cgroup_move_task(struct cgroup_subsys_state *css,
7129				 struct cgroup_taskset *tset)
7130{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7131}
7132#endif
7133
7134/*
7135 * Cgroup retains root cgroups across [un]mount cycles making it necessary
7136 * to verify sane_behavior flag on each mount attempt.
7137 */
7138static void mem_cgroup_bind(struct cgroup_subsys_state *root_css)
7139{
7140	/*
7141	 * use_hierarchy is forced with sane_behavior.  cgroup core
7142	 * guarantees that @root doesn't have any children, so turning it
7143	 * on for the root memcg is enough.
7144	 */
7145	if (cgroup_sane_behavior(root_css->cgroup))
7146		mem_cgroup_from_css(root_css)->use_hierarchy = true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7147}
7148
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7149struct cgroup_subsys memory_cgrp_subsys = {
7150	.css_alloc = mem_cgroup_css_alloc,
7151	.css_online = mem_cgroup_css_online,
7152	.css_offline = mem_cgroup_css_offline,
 
7153	.css_free = mem_cgroup_css_free,
 
 
7154	.can_attach = mem_cgroup_can_attach,
 
 
 
7155	.cancel_attach = mem_cgroup_cancel_attach,
7156	.attach = mem_cgroup_move_task,
7157	.bind = mem_cgroup_bind,
7158	.base_cftypes = mem_cgroup_files,
 
 
 
 
7159	.early_init = 0,
7160};
7161
7162#ifdef CONFIG_MEMCG_SWAP
7163static int __init enable_swap_account(char *s)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7164{
7165	if (!strcmp(s, "1"))
7166		really_do_swap_account = 1;
7167	else if (!strcmp(s, "0"))
7168		really_do_swap_account = 0;
7169	return 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7170}
7171__setup("swapaccount=", enable_swap_account);
7172
7173static void __init memsw_file_init(void)
7174{
7175	WARN_ON(cgroup_add_cftypes(&memory_cgrp_subsys, memsw_cgroup_files));
 
 
 
 
 
 
 
7176}
7177
7178static void __init enable_swap_cgroup(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7179{
7180	if (!mem_cgroup_disabled() && really_do_swap_account) {
7181		do_swap_account = 1;
7182		memsw_file_init();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7183	}
7184}
7185
7186#else
7187static void __init enable_swap_cgroup(void)
 
 
 
 
 
 
 
7188{
 
7189}
7190#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7191
7192/*
7193 * subsys_initcall() for memory controller.
7194 *
7195 * Some parts like hotcpu_notifier() have to be initialized from this context
7196 * because of lock dependencies (cgroup_lock -> cpu hotplug) but basically
7197 * everything that doesn't depend on a specific mem_cgroup structure should
7198 * be initialized from here.
7199 */
7200static int __init mem_cgroup_init(void)
7201{
7202	hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
7203	enable_swap_cgroup();
7204	mem_cgroup_soft_limit_tree_init();
7205	memcg_stock_init();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7206	return 0;
7207}
7208subsys_initcall(mem_cgroup_init);
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/* memcontrol.c - Memory Controller
   3 *
   4 * Copyright IBM Corporation, 2007
   5 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
   6 *
   7 * Copyright 2007 OpenVZ SWsoft Inc
   8 * Author: Pavel Emelianov <xemul@openvz.org>
   9 *
  10 * Memory thresholds
  11 * Copyright (C) 2009 Nokia Corporation
  12 * Author: Kirill A. Shutemov
  13 *
  14 * Kernel Memory Controller
  15 * Copyright (C) 2012 Parallels Inc. and Google Inc.
  16 * Authors: Glauber Costa and Suleiman Souhlal
  17 *
  18 * Native page reclaim
  19 * Charge lifetime sanitation
  20 * Lockless page tracking & accounting
  21 * Unified hierarchy configuration model
  22 * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner
  23 *
  24 * Per memcg lru locking
  25 * Copyright (C) 2020 Alibaba, Inc, Alex Shi
 
  26 */
  27
  28#include <linux/page_counter.h>
  29#include <linux/memcontrol.h>
  30#include <linux/cgroup.h>
  31#include <linux/pagewalk.h>
  32#include <linux/sched/mm.h>
  33#include <linux/shmem_fs.h>
  34#include <linux/hugetlb.h>
  35#include <linux/pagemap.h>
  36#include <linux/pagevec.h>
  37#include <linux/vm_event_item.h>
  38#include <linux/smp.h>
  39#include <linux/page-flags.h>
  40#include <linux/backing-dev.h>
  41#include <linux/bit_spinlock.h>
  42#include <linux/rcupdate.h>
  43#include <linux/limits.h>
  44#include <linux/export.h>
  45#include <linux/mutex.h>
  46#include <linux/rbtree.h>
  47#include <linux/slab.h>
  48#include <linux/swap.h>
  49#include <linux/swapops.h>
  50#include <linux/spinlock.h>
  51#include <linux/eventfd.h>
  52#include <linux/poll.h>
  53#include <linux/sort.h>
  54#include <linux/fs.h>
  55#include <linux/seq_file.h>
  56#include <linux/vmpressure.h>
  57#include <linux/memremap.h>
  58#include <linux/mm_inline.h>
  59#include <linux/swap_cgroup.h>
  60#include <linux/cpu.h>
  61#include <linux/oom.h>
  62#include <linux/lockdep.h>
  63#include <linux/file.h>
  64#include <linux/resume_user_mode.h>
  65#include <linux/psi.h>
  66#include <linux/seq_buf.h>
  67#include <linux/sched/isolation.h>
  68#include <linux/kmemleak.h>
  69#include "internal.h"
  70#include <net/sock.h>
  71#include <net/ip.h>
 
  72#include "slab.h"
  73#include "swap.h"
  74
  75#include <linux/uaccess.h>
  76
  77#include <trace/events/vmscan.h>
  78
  79struct cgroup_subsys memory_cgrp_subsys __read_mostly;
  80EXPORT_SYMBOL(memory_cgrp_subsys);
  81
  82struct mem_cgroup *root_mem_cgroup __read_mostly;
 
  83
  84/* Active memory cgroup to use from an interrupt context */
  85DEFINE_PER_CPU(struct mem_cgroup *, int_active_memcg);
  86EXPORT_PER_CPU_SYMBOL_GPL(int_active_memcg);
 
 
 
 
 
 
 
 
 
 
 
  87
  88/* Socket memory accounting disabled? */
  89static bool cgroup_memory_nosocket __ro_after_init;
  90
  91/* Kernel memory accounting disabled? */
  92static bool cgroup_memory_nokmem __ro_after_init;
 
 
 
 
 
 
  93
  94/* BPF memory accounting disabled? */
  95static bool cgroup_memory_nobpf __ro_after_init;
 
 
 
 
 
  96
  97#ifdef CONFIG_CGROUP_WRITEBACK
  98static DECLARE_WAIT_QUEUE_HEAD(memcg_cgwb_frn_waitq);
  99#endif
 
 
 
 100
 101/* Whether legacy memory+swap accounting is active */
 102static bool do_memsw_account(void)
 103{
 104	return !cgroup_subsys_on_dfl(memory_cgrp_subsys);
 105}
 
 
 106
 
 
 
 
 
 
 
 
 
 
 
 
 107#define THRESHOLDS_EVENTS_TARGET 128
 108#define SOFTLIMIT_EVENTS_TARGET 1024
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 109
 110/*
 111 * Cgroups above their limits are maintained in a RB-Tree, independent of
 112 * their hierarchy representation
 113 */
 114
 115struct mem_cgroup_tree_per_node {
 116	struct rb_root rb_root;
 117	struct rb_node *rb_rightmost;
 118	spinlock_t lock;
 119};
 120
 
 
 
 
 121struct mem_cgroup_tree {
 122	struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
 123};
 124
 125static struct mem_cgroup_tree soft_limit_tree __read_mostly;
 126
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 127/* for OOM */
 128struct mem_cgroup_eventfd_list {
 129	struct list_head list;
 130	struct eventfd_ctx *eventfd;
 131};
 132
 133/*
 134 * cgroup_event represents events which userspace want to receive.
 135 */
 136struct mem_cgroup_event {
 137	/*
 138	 * memcg which the event belongs to.
 139	 */
 140	struct mem_cgroup *memcg;
 141	/*
 142	 * eventfd to signal userspace about the event.
 143	 */
 144	struct eventfd_ctx *eventfd;
 145	/*
 146	 * Each of these stored in a list by the cgroup.
 147	 */
 148	struct list_head list;
 149	/*
 150	 * register_event() callback will be used to add new userspace
 151	 * waiter for changes related to this event.  Use eventfd_signal()
 152	 * on eventfd to send notification to userspace.
 153	 */
 154	int (*register_event)(struct mem_cgroup *memcg,
 155			      struct eventfd_ctx *eventfd, const char *args);
 156	/*
 157	 * unregister_event() callback will be called when userspace closes
 158	 * the eventfd or on cgroup removing.  This callback must be set,
 159	 * if you want provide notification functionality.
 160	 */
 161	void (*unregister_event)(struct mem_cgroup *memcg,
 162				 struct eventfd_ctx *eventfd);
 163	/*
 164	 * All fields below needed to unregister event when
 165	 * userspace closes eventfd.
 166	 */
 167	poll_table pt;
 168	wait_queue_head_t *wqh;
 169	wait_queue_entry_t wait;
 170	struct work_struct remove;
 171};
 172
 173static void mem_cgroup_threshold(struct mem_cgroup *memcg);
 174static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
 175
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 176/* Stuffs for move charges at task migration. */
 177/*
 178 * Types of charges to be moved.
 
 179 */
 180#define MOVE_ANON	0x1U
 181#define MOVE_FILE	0x2U
 182#define MOVE_MASK	(MOVE_ANON | MOVE_FILE)
 
 
 183
 184/* "mc" and its members are protected by cgroup_mutex */
 185static struct move_charge_struct {
 186	spinlock_t	  lock; /* for from, to */
 187	struct mm_struct  *mm;
 188	struct mem_cgroup *from;
 189	struct mem_cgroup *to;
 190	unsigned long flags;
 191	unsigned long precharge;
 192	unsigned long moved_charge;
 193	unsigned long moved_swap;
 194	struct task_struct *moving_task;	/* a task moving charges */
 195	wait_queue_head_t waitq;		/* a waitq for other context */
 196} mc = {
 197	.lock = __SPIN_LOCK_UNLOCKED(mc.lock),
 198	.waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
 199};
 200
 
 
 
 
 
 
 
 
 
 
 201/*
 202 * Maximum loops in mem_cgroup_soft_reclaim(), used for soft
 203 * limit reclaim to prevent infinite loops, if they ever occur.
 204 */
 205#define	MEM_CGROUP_MAX_RECLAIM_LOOPS		100
 206#define	MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS	2
 207
 
 
 
 
 
 
 
 
 208/* for encoding cft->private value on file */
 209enum res_type {
 210	_MEM,
 211	_MEMSWAP,
 
 212	_KMEM,
 213	_TCP,
 214};
 215
 216#define MEMFILE_PRIVATE(x, val)	((x) << 16 | (val))
 217#define MEMFILE_TYPE(val)	((val) >> 16 & 0xffff)
 218#define MEMFILE_ATTR(val)	((val) & 0xffff)
 
 
 219
 220/*
 221 * Iteration constructs for visiting all cgroups (under a tree).  If
 222 * loops are exited prematurely (break), mem_cgroup_iter_break() must
 223 * be used for reference counting.
 224 */
 225#define for_each_mem_cgroup_tree(iter, root)		\
 226	for (iter = mem_cgroup_iter(root, NULL, NULL);	\
 227	     iter != NULL;				\
 228	     iter = mem_cgroup_iter(root, iter, NULL))
 229
 230#define for_each_mem_cgroup(iter)			\
 231	for (iter = mem_cgroup_iter(NULL, NULL, NULL);	\
 232	     iter != NULL;				\
 233	     iter = mem_cgroup_iter(NULL, iter, NULL))
 
 
 234
 235static inline bool task_is_dying(void)
 236{
 237	return tsk_is_oom_victim(current) || fatal_signal_pending(current) ||
 238		(current->flags & PF_EXITING);
 239}
 240
 241/* Some nice accessors for the vmpressure. */
 242struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
 243{
 244	if (!memcg)
 245		memcg = root_mem_cgroup;
 246	return &memcg->vmpressure;
 247}
 248
 249struct mem_cgroup *vmpressure_to_memcg(struct vmpressure *vmpr)
 250{
 251	return container_of(vmpr, struct mem_cgroup, vmpressure);
 252}
 253
 254#define CURRENT_OBJCG_UPDATE_BIT 0
 255#define CURRENT_OBJCG_UPDATE_FLAG (1UL << CURRENT_OBJCG_UPDATE_BIT)
 256
 257#ifdef CONFIG_MEMCG_KMEM
 258static DEFINE_SPINLOCK(objcg_lock);
 259
 260bool mem_cgroup_kmem_disabled(void)
 261{
 262	return cgroup_memory_nokmem;
 263}
 264
 265static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg,
 266				      unsigned int nr_pages);
 
 
 
 267
 268static void obj_cgroup_release(struct percpu_ref *ref)
 269{
 270	struct obj_cgroup *objcg = container_of(ref, struct obj_cgroup, refcnt);
 271	unsigned int nr_bytes;
 272	unsigned int nr_pages;
 273	unsigned long flags;
 274
 275	/*
 276	 * At this point all allocated objects are freed, and
 277	 * objcg->nr_charged_bytes can't have an arbitrary byte value.
 278	 * However, it can be PAGE_SIZE or (x * PAGE_SIZE).
 279	 *
 280	 * The following sequence can lead to it:
 281	 * 1) CPU0: objcg == stock->cached_objcg
 282	 * 2) CPU1: we do a small allocation (e.g. 92 bytes),
 283	 *          PAGE_SIZE bytes are charged
 284	 * 3) CPU1: a process from another memcg is allocating something,
 285	 *          the stock if flushed,
 286	 *          objcg->nr_charged_bytes = PAGE_SIZE - 92
 287	 * 5) CPU0: we do release this object,
 288	 *          92 bytes are added to stock->nr_bytes
 289	 * 6) CPU0: stock is flushed,
 290	 *          92 bytes are added to objcg->nr_charged_bytes
 291	 *
 292	 * In the result, nr_charged_bytes == PAGE_SIZE.
 293	 * This page will be uncharged in obj_cgroup_release().
 294	 */
 295	nr_bytes = atomic_read(&objcg->nr_charged_bytes);
 296	WARN_ON_ONCE(nr_bytes & (PAGE_SIZE - 1));
 297	nr_pages = nr_bytes >> PAGE_SHIFT;
 298
 299	if (nr_pages)
 300		obj_cgroup_uncharge_pages(objcg, nr_pages);
 
 301
 302	spin_lock_irqsave(&objcg_lock, flags);
 303	list_del(&objcg->list);
 304	spin_unlock_irqrestore(&objcg_lock, flags);
 305
 306	percpu_ref_exit(ref);
 307	kfree_rcu(objcg, rcu);
 308}
 309
 310static struct obj_cgroup *obj_cgroup_alloc(void)
 311{
 312	struct obj_cgroup *objcg;
 313	int ret;
 
 
 
 314
 315	objcg = kzalloc(sizeof(struct obj_cgroup), GFP_KERNEL);
 316	if (!objcg)
 317		return NULL;
 
 
 
 
 
 
 
 
 
 
 318
 319	ret = percpu_ref_init(&objcg->refcnt, obj_cgroup_release, 0,
 320			      GFP_KERNEL);
 321	if (ret) {
 322		kfree(objcg);
 323		return NULL;
 
 
 
 324	}
 325	INIT_LIST_HEAD(&objcg->list);
 326	return objcg;
 327}
 
 328
 329static void memcg_reparent_objcgs(struct mem_cgroup *memcg,
 330				  struct mem_cgroup *parent)
 331{
 332	struct obj_cgroup *objcg, *iter;
 
 
 
 
 
 
 333
 334	objcg = rcu_replace_pointer(memcg->objcg, NULL, true);
 
 
 
 335
 336	spin_lock_irq(&objcg_lock);
 
 
 337
 338	/* 1) Ready to reparent active objcg. */
 339	list_add(&objcg->list, &memcg->objcg_list);
 340	/* 2) Reparent active objcg and already reparented objcgs to parent. */
 341	list_for_each_entry(iter, &memcg->objcg_list, list)
 342		WRITE_ONCE(iter->memcg, parent);
 343	/* 3) Move already reparented objcgs to the parent's list */
 344	list_splice(&memcg->objcg_list, &parent->objcg_list);
 
 
 
 
 345
 346	spin_unlock_irq(&objcg_lock);
 347
 348	percpu_ref_kill(&objcg->refcnt);
 349}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 350
 351/*
 352 * A lot of the calls to the cache allocation functions are expected to be
 353 * inlined by the compiler. Since the calls to memcg_slab_pre_alloc_hook() are
 354 * conditional to this static branch, we'll have to allow modules that does
 355 * kmem_cache_alloc and the such to see this symbol as well
 356 */
 357DEFINE_STATIC_KEY_FALSE(memcg_kmem_online_key);
 358EXPORT_SYMBOL(memcg_kmem_online_key);
 359
 360DEFINE_STATIC_KEY_FALSE(memcg_bpf_enabled_key);
 361EXPORT_SYMBOL(memcg_bpf_enabled_key);
 362#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 363
 364/**
 365 * mem_cgroup_css_from_folio - css of the memcg associated with a folio
 366 * @folio: folio of interest
 367 *
 368 * If memcg is bound to the default hierarchy, css of the memcg associated
 369 * with @folio is returned.  The returned css remains associated with @folio
 370 * until it is released.
 371 *
 372 * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup
 373 * is returned.
 374 */
 375struct cgroup_subsys_state *mem_cgroup_css_from_folio(struct folio *folio)
 376{
 377	struct mem_cgroup *memcg = folio_memcg(folio);
 
 
 378
 379	if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
 380		memcg = root_mem_cgroup;
 
 
 
 
 
 
 381
 
 
 382	return &memcg->css;
 383}
 384
 385/**
 386 * page_cgroup_ino - return inode number of the memcg a page is charged to
 387 * @page: the page
 388 *
 389 * Look up the closest online ancestor of the memory cgroup @page is charged to
 390 * and return its inode number or 0 if @page is not charged to any cgroup. It
 391 * is safe to call this function without holding a reference to @page.
 392 *
 393 * Note, this function is inherently racy, because there is nothing to prevent
 394 * the cgroup inode from getting torn down and potentially reallocated a moment
 395 * after page_cgroup_ino() returns, so it only should be used by callers that
 396 * do not care (such as procfs interfaces).
 397 */
 398ino_t page_cgroup_ino(struct page *page)
 399{
 400	struct mem_cgroup *memcg;
 401	unsigned long ino = 0;
 402
 403	rcu_read_lock();
 404	/* page_folio() is racy here, but the entire function is racy anyway */
 405	memcg = folio_memcg_check(page_folio(page));
 
 
 406
 407	while (memcg && !(memcg->css.flags & CSS_ONLINE))
 408		memcg = parent_mem_cgroup(memcg);
 409	if (memcg)
 410		ino = cgroup_ino(memcg->css.cgroup);
 411	rcu_read_unlock();
 412	return ino;
 413}
 414
 415static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_node *mz,
 416					 struct mem_cgroup_tree_per_node *mctz,
 417					 unsigned long new_usage_in_excess)
 
 
 418{
 419	struct rb_node **p = &mctz->rb_root.rb_node;
 420	struct rb_node *parent = NULL;
 421	struct mem_cgroup_per_node *mz_node;
 422	bool rightmost = true;
 423
 424	if (mz->on_tree)
 425		return;
 426
 427	mz->usage_in_excess = new_usage_in_excess;
 428	if (!mz->usage_in_excess)
 429		return;
 430	while (*p) {
 431		parent = *p;
 432		mz_node = rb_entry(parent, struct mem_cgroup_per_node,
 433					tree_node);
 434		if (mz->usage_in_excess < mz_node->usage_in_excess) {
 435			p = &(*p)->rb_left;
 436			rightmost = false;
 437		} else {
 
 
 
 438			p = &(*p)->rb_right;
 439		}
 440	}
 441
 442	if (rightmost)
 443		mctz->rb_rightmost = &mz->tree_node;
 444
 445	rb_link_node(&mz->tree_node, parent, p);
 446	rb_insert_color(&mz->tree_node, &mctz->rb_root);
 447	mz->on_tree = true;
 448}
 449
 450static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
 451					 struct mem_cgroup_tree_per_node *mctz)
 
 
 452{
 453	if (!mz->on_tree)
 454		return;
 455
 456	if (&mz->tree_node == mctz->rb_rightmost)
 457		mctz->rb_rightmost = rb_prev(&mz->tree_node);
 458
 459	rb_erase(&mz->tree_node, &mctz->rb_root);
 460	mz->on_tree = false;
 461}
 462
 463static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
 464				       struct mem_cgroup_tree_per_node *mctz)
 
 
 465{
 466	unsigned long flags;
 467
 468	spin_lock_irqsave(&mctz->lock, flags);
 469	__mem_cgroup_remove_exceeded(mz, mctz);
 470	spin_unlock_irqrestore(&mctz->lock, flags);
 471}
 472
 473static unsigned long soft_limit_excess(struct mem_cgroup *memcg)
 474{
 475	unsigned long nr_pages = page_counter_read(&memcg->memory);
 476	unsigned long soft_limit = READ_ONCE(memcg->soft_limit);
 477	unsigned long excess = 0;
 478
 479	if (nr_pages > soft_limit)
 480		excess = nr_pages - soft_limit;
 481
 482	return excess;
 483}
 484
 485static void mem_cgroup_update_tree(struct mem_cgroup *memcg, int nid)
 486{
 487	unsigned long excess;
 488	struct mem_cgroup_per_node *mz;
 489	struct mem_cgroup_tree_per_node *mctz;
 490
 491	if (lru_gen_enabled()) {
 492		if (soft_limit_excess(memcg))
 493			lru_gen_soft_reclaim(memcg, nid);
 494		return;
 495	}
 496
 497	mctz = soft_limit_tree.rb_tree_per_node[nid];
 498	if (!mctz)
 499		return;
 500	/*
 501	 * Necessary to update all ancestors when hierarchy is used.
 502	 * because their event counter is not touched.
 503	 */
 504	for (; memcg; memcg = parent_mem_cgroup(memcg)) {
 505		mz = memcg->nodeinfo[nid];
 506		excess = soft_limit_excess(memcg);
 507		/*
 508		 * We have to update the tree if mz is on RB-tree or
 509		 * mem is over its softlimit.
 510		 */
 511		if (excess || mz->on_tree) {
 512			unsigned long flags;
 513
 514			spin_lock_irqsave(&mctz->lock, flags);
 515			/* if on-tree, remove it */
 516			if (mz->on_tree)
 517				__mem_cgroup_remove_exceeded(mz, mctz);
 518			/*
 519			 * Insert again. mz->usage_in_excess will be updated.
 520			 * If excess is 0, no tree ops.
 521			 */
 522			__mem_cgroup_insert_exceeded(mz, mctz, excess);
 523			spin_unlock_irqrestore(&mctz->lock, flags);
 524		}
 525	}
 526}
 527
 528static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
 529{
 530	struct mem_cgroup_tree_per_node *mctz;
 531	struct mem_cgroup_per_node *mz;
 532	int nid;
 533
 534	for_each_node(nid) {
 535		mz = memcg->nodeinfo[nid];
 536		mctz = soft_limit_tree.rb_tree_per_node[nid];
 537		if (mctz)
 538			mem_cgroup_remove_exceeded(mz, mctz);
 
 539	}
 540}
 541
 542static struct mem_cgroup_per_node *
 543__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
 544{
 545	struct mem_cgroup_per_node *mz;
 
 546
 547retry:
 548	mz = NULL;
 549	if (!mctz->rb_rightmost)
 
 550		goto done;		/* Nothing to reclaim from */
 551
 552	mz = rb_entry(mctz->rb_rightmost,
 553		      struct mem_cgroup_per_node, tree_node);
 554	/*
 555	 * Remove the node now but someone else can add it back,
 556	 * we will to add it back at the end of reclaim to its correct
 557	 * position in the tree.
 558	 */
 559	__mem_cgroup_remove_exceeded(mz, mctz);
 560	if (!soft_limit_excess(mz->memcg) ||
 561	    !css_tryget(&mz->memcg->css))
 562		goto retry;
 563done:
 564	return mz;
 565}
 566
 567static struct mem_cgroup_per_node *
 568mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
 569{
 570	struct mem_cgroup_per_node *mz;
 571
 572	spin_lock_irq(&mctz->lock);
 573	mz = __mem_cgroup_largest_soft_limit_node(mctz);
 574	spin_unlock_irq(&mctz->lock);
 575	return mz;
 576}
 577
 578/* Subset of vm_event_item to report for memcg event stats */
 579static const unsigned int memcg_vm_event_stat[] = {
 580	PGPGIN,
 581	PGPGOUT,
 582	PGSCAN_KSWAPD,
 583	PGSCAN_DIRECT,
 584	PGSCAN_KHUGEPAGED,
 585	PGSTEAL_KSWAPD,
 586	PGSTEAL_DIRECT,
 587	PGSTEAL_KHUGEPAGED,
 588	PGFAULT,
 589	PGMAJFAULT,
 590	PGREFILL,
 591	PGACTIVATE,
 592	PGDEACTIVATE,
 593	PGLAZYFREE,
 594	PGLAZYFREED,
 595#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
 596	ZSWPIN,
 597	ZSWPOUT,
 598	ZSWPWB,
 599#endif
 600#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 601	THP_FAULT_ALLOC,
 602	THP_COLLAPSE_ALLOC,
 603	THP_SWPOUT,
 604	THP_SWPOUT_FALLBACK,
 605#endif
 606};
 607
 608#define NR_MEMCG_EVENTS ARRAY_SIZE(memcg_vm_event_stat)
 609static int mem_cgroup_events_index[NR_VM_EVENT_ITEMS] __read_mostly;
 610
 611static void init_memcg_events(void)
 612{
 613	int i;
 614
 615	for (i = 0; i < NR_MEMCG_EVENTS; ++i)
 616		mem_cgroup_events_index[memcg_vm_event_stat[i]] = i + 1;
 617}
 618
 619static inline int memcg_events_index(enum vm_event_item idx)
 620{
 621	return mem_cgroup_events_index[idx] - 1;
 622}
 623
 624struct memcg_vmstats_percpu {
 625	/* Stats updates since the last flush */
 626	unsigned int			stats_updates;
 627
 628	/* Cached pointers for fast iteration in memcg_rstat_updated() */
 629	struct memcg_vmstats_percpu	*parent;
 630	struct memcg_vmstats		*vmstats;
 631
 632	/* The above should fit a single cacheline for memcg_rstat_updated() */
 633
 634	/* Local (CPU and cgroup) page state & events */
 635	long			state[MEMCG_NR_STAT];
 636	unsigned long		events[NR_MEMCG_EVENTS];
 637
 638	/* Delta calculation for lockless upward propagation */
 639	long			state_prev[MEMCG_NR_STAT];
 640	unsigned long		events_prev[NR_MEMCG_EVENTS];
 641
 642	/* Cgroup1: threshold notifications & softlimit tree updates */
 643	unsigned long		nr_page_events;
 644	unsigned long		targets[MEM_CGROUP_NTARGETS];
 645} ____cacheline_aligned;
 646
 647struct memcg_vmstats {
 648	/* Aggregated (CPU and subtree) page state & events */
 649	long			state[MEMCG_NR_STAT];
 650	unsigned long		events[NR_MEMCG_EVENTS];
 651
 652	/* Non-hierarchical (CPU aggregated) page state & events */
 653	long			state_local[MEMCG_NR_STAT];
 654	unsigned long		events_local[NR_MEMCG_EVENTS];
 655
 656	/* Pending child counts during tree propagation */
 657	long			state_pending[MEMCG_NR_STAT];
 658	unsigned long		events_pending[NR_MEMCG_EVENTS];
 659
 660	/* Stats updates since the last flush */
 661	atomic64_t		stats_updates;
 662};
 663
 664/*
 665 * memcg and lruvec stats flushing
 666 *
 667 * Many codepaths leading to stats update or read are performance sensitive and
 668 * adding stats flushing in such codepaths is not desirable. So, to optimize the
 669 * flushing the kernel does:
 670 *
 671 * 1) Periodically and asynchronously flush the stats every 2 seconds to not let
 672 *    rstat update tree grow unbounded.
 673 *
 674 * 2) Flush the stats synchronously on reader side only when there are more than
 675 *    (MEMCG_CHARGE_BATCH * nr_cpus) update events. Though this optimization
 676 *    will let stats be out of sync by atmost (MEMCG_CHARGE_BATCH * nr_cpus) but
 677 *    only for 2 seconds due to (1).
 678 */
 679static void flush_memcg_stats_dwork(struct work_struct *w);
 680static DECLARE_DEFERRABLE_WORK(stats_flush_dwork, flush_memcg_stats_dwork);
 681static u64 flush_last_time;
 682
 683#define FLUSH_TIME (2UL*HZ)
 684
 685/*
 686 * Accessors to ensure that preemption is disabled on PREEMPT_RT because it can
 687 * not rely on this as part of an acquired spinlock_t lock. These functions are
 688 * never used in hardirq context on PREEMPT_RT and therefore disabling preemtion
 689 * is sufficient.
 690 */
 691static void memcg_stats_lock(void)
 692{
 693	preempt_disable_nested();
 694	VM_WARN_ON_IRQS_ENABLED();
 
 
 
 
 695}
 696
 697static void __memcg_stats_lock(void)
 
 698{
 699	preempt_disable_nested();
 
 700}
 701
 702static void memcg_stats_unlock(void)
 
 703{
 704	preempt_enable_nested();
 705}
 706
 707
 708static bool memcg_vmstats_needs_flush(struct memcg_vmstats *vmstats)
 709{
 710	return atomic64_read(&vmstats->stats_updates) >
 711		MEMCG_CHARGE_BATCH * num_online_cpus();
 
 
 
 
 
 712}
 713
 714static inline void memcg_rstat_updated(struct mem_cgroup *memcg, int val)
 715{
 716	struct memcg_vmstats_percpu *statc;
 717	int cpu = smp_processor_id();
 718
 719	if (!val)
 720		return;
 721
 722	cgroup_rstat_updated(memcg->css.cgroup, cpu);
 723	statc = this_cpu_ptr(memcg->vmstats_percpu);
 724	for (; statc; statc = statc->parent) {
 725		statc->stats_updates += abs(val);
 726		if (statc->stats_updates < MEMCG_CHARGE_BATCH)
 727			continue;
 728
 729		/*
 730		 * If @memcg is already flush-able, increasing stats_updates is
 731		 * redundant. Avoid the overhead of the atomic update.
 732		 */
 733		if (!memcg_vmstats_needs_flush(statc->vmstats))
 734			atomic64_add(statc->stats_updates,
 735				     &statc->vmstats->stats_updates);
 736		statc->stats_updates = 0;
 737	}
 738}
 739
 740static void do_flush_stats(struct mem_cgroup *memcg)
 741{
 742	if (mem_cgroup_is_root(memcg))
 743		WRITE_ONCE(flush_last_time, jiffies_64);
 744
 745	cgroup_rstat_flush(memcg->css.cgroup);
 746}
 747
 748/*
 749 * mem_cgroup_flush_stats - flush the stats of a memory cgroup subtree
 750 * @memcg: root of the subtree to flush
 751 *
 752 * Flushing is serialized by the underlying global rstat lock. There is also a
 753 * minimum amount of work to be done even if there are no stat updates to flush.
 754 * Hence, we only flush the stats if the updates delta exceeds a threshold. This
 755 * avoids unnecessary work and contention on the underlying lock.
 756 */
 757void mem_cgroup_flush_stats(struct mem_cgroup *memcg)
 758{
 759	if (mem_cgroup_disabled())
 760		return;
 761
 762	if (!memcg)
 763		memcg = root_mem_cgroup;
 764
 765	if (memcg_vmstats_needs_flush(memcg->vmstats))
 766		do_flush_stats(memcg);
 767}
 768
 769void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg)
 770{
 771	/* Only flush if the periodic flusher is one full cycle late */
 772	if (time_after64(jiffies_64, READ_ONCE(flush_last_time) + 2*FLUSH_TIME))
 773		mem_cgroup_flush_stats(memcg);
 774}
 775
 776static void flush_memcg_stats_dwork(struct work_struct *w)
 777{
 778	/*
 779	 * Deliberately ignore memcg_vmstats_needs_flush() here so that flushing
 780	 * in latency-sensitive paths is as cheap as possible.
 781	 */
 782	do_flush_stats(root_mem_cgroup);
 783	queue_delayed_work(system_unbound_wq, &stats_flush_dwork, FLUSH_TIME);
 784}
 785
 786unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
 787{
 788	long x = READ_ONCE(memcg->vmstats->state[idx]);
 789#ifdef CONFIG_SMP
 790	if (x < 0)
 791		x = 0;
 792#endif
 793	return x;
 794}
 795
 796static int memcg_page_state_unit(int item);
 797
 798/*
 799 * Normalize the value passed into memcg_rstat_updated() to be in pages. Round
 800 * up non-zero sub-page updates to 1 page as zero page updates are ignored.
 801 */
 802static int memcg_state_val_in_pages(int idx, int val)
 803{
 804	int unit = memcg_page_state_unit(idx);
 805
 806	if (!val || unit == PAGE_SIZE)
 807		return val;
 808	else
 809		return max(val * unit / PAGE_SIZE, 1UL);
 810}
 811
 812/**
 813 * __mod_memcg_state - update cgroup memory statistics
 814 * @memcg: the memory cgroup
 815 * @idx: the stat item - can be enum memcg_stat_item or enum node_stat_item
 816 * @val: delta to add to the counter, can be negative
 817 */
 818void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val)
 819{
 820	if (mem_cgroup_disabled())
 821		return;
 822
 823	__this_cpu_add(memcg->vmstats_percpu->state[idx], val);
 824	memcg_rstat_updated(memcg, memcg_state_val_in_pages(idx, val));
 825}
 826
 827/* idx can be of type enum memcg_stat_item or node_stat_item. */
 828static unsigned long memcg_page_state_local(struct mem_cgroup *memcg, int idx)
 829{
 830	long x = READ_ONCE(memcg->vmstats->state_local[idx]);
 831
 832#ifdef CONFIG_SMP
 833	if (x < 0)
 834		x = 0;
 835#endif
 836	return x;
 837}
 838
 839void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
 840			      int val)
 841{
 842	struct mem_cgroup_per_node *pn;
 843	struct mem_cgroup *memcg;
 844
 845	pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
 846	memcg = pn->memcg;
 847
 848	/*
 849	 * The caller from rmap relies on disabled preemption because they never
 850	 * update their counter from in-interrupt context. For these two
 851	 * counters we check that the update is never performed from an
 852	 * interrupt context while other caller need to have disabled interrupt.
 853	 */
 854	__memcg_stats_lock();
 855	if (IS_ENABLED(CONFIG_DEBUG_VM)) {
 856		switch (idx) {
 857		case NR_ANON_MAPPED:
 858		case NR_FILE_MAPPED:
 859		case NR_ANON_THPS:
 860		case NR_SHMEM_PMDMAPPED:
 861		case NR_FILE_PMDMAPPED:
 862			WARN_ON_ONCE(!in_task());
 863			break;
 864		default:
 865			VM_WARN_ON_IRQS_ENABLED();
 866		}
 867	}
 868
 869	/* Update memcg */
 870	__this_cpu_add(memcg->vmstats_percpu->state[idx], val);
 871
 872	/* Update lruvec */
 873	__this_cpu_add(pn->lruvec_stats_percpu->state[idx], val);
 874
 875	memcg_rstat_updated(memcg, memcg_state_val_in_pages(idx, val));
 876	memcg_stats_unlock();
 877}
 878
 879/**
 880 * __mod_lruvec_state - update lruvec memory statistics
 881 * @lruvec: the lruvec
 882 * @idx: the stat item
 883 * @val: delta to add to the counter, can be negative
 884 *
 885 * The lruvec is the intersection of the NUMA node and a cgroup. This
 886 * function updates the all three counters that are affected by a
 887 * change of state at this level: per-node, per-cgroup, per-lruvec.
 888 */
 889void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
 890			int val)
 891{
 892	/* Update node */
 893	__mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
 894
 895	/* Update memcg and lruvec */
 896	if (!mem_cgroup_disabled())
 897		__mod_memcg_lruvec_state(lruvec, idx, val);
 898}
 899
 900void __lruvec_stat_mod_folio(struct folio *folio, enum node_stat_item idx,
 901			     int val)
 
 902{
 903	struct mem_cgroup *memcg;
 904	pg_data_t *pgdat = folio_pgdat(folio);
 905	struct lruvec *lruvec;
 906
 907	rcu_read_lock();
 908	memcg = folio_memcg(folio);
 909	/* Untracked pages have no memcg, no lruvec. Update only the node */
 910	if (!memcg) {
 911		rcu_read_unlock();
 912		__mod_node_page_state(pgdat, idx, val);
 913		return;
 914	}
 915
 916	lruvec = mem_cgroup_lruvec(memcg, pgdat);
 917	__mod_lruvec_state(lruvec, idx, val);
 918	rcu_read_unlock();
 919}
 920EXPORT_SYMBOL(__lruvec_stat_mod_folio);
 921
 922void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val)
 923{
 924	pg_data_t *pgdat = page_pgdat(virt_to_page(p));
 925	struct mem_cgroup *memcg;
 926	struct lruvec *lruvec;
 927
 928	rcu_read_lock();
 929	memcg = mem_cgroup_from_slab_obj(p);
 930
 931	/*
 932	 * Untracked pages have no memcg, no lruvec. Update only the
 933	 * node. If we reparent the slab objects to the root memcg,
 934	 * when we free the slab object, we need to update the per-memcg
 935	 * vmstats to keep it correct for the root memcg.
 936	 */
 937	if (!memcg) {
 938		__mod_node_page_state(pgdat, idx, val);
 939	} else {
 940		lruvec = mem_cgroup_lruvec(memcg, pgdat);
 941		__mod_lruvec_state(lruvec, idx, val);
 942	}
 943	rcu_read_unlock();
 944}
 945
 946/**
 947 * __count_memcg_events - account VM events in a cgroup
 948 * @memcg: the memory cgroup
 949 * @idx: the event item
 950 * @count: the number of events that occurred
 951 */
 952void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
 953			  unsigned long count)
 954{
 955	int index = memcg_events_index(idx);
 956
 957	if (mem_cgroup_disabled() || index < 0)
 958		return;
 959
 960	memcg_stats_lock();
 961	__this_cpu_add(memcg->vmstats_percpu->events[index], count);
 962	memcg_rstat_updated(memcg, count);
 963	memcg_stats_unlock();
 964}
 965
 966static unsigned long memcg_events(struct mem_cgroup *memcg, int event)
 
 967{
 968	int index = memcg_events_index(event);
 
 969
 970	if (index < 0)
 971		return 0;
 972	return READ_ONCE(memcg->vmstats->events[index]);
 973}
 974
 975static unsigned long memcg_events_local(struct mem_cgroup *memcg, int event)
 976{
 977	int index = memcg_events_index(event);
 978
 979	if (index < 0)
 980		return 0;
 981
 982	return READ_ONCE(memcg->vmstats->events_local[index]);
 983}
 984
 985static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
 986					 int nr_pages)
 987{
 988	/* pagein of a big page is an event. So, ignore page size */
 989	if (nr_pages > 0)
 990		__count_memcg_events(memcg, PGPGIN, 1);
 991	else {
 992		__count_memcg_events(memcg, PGPGOUT, 1);
 993		nr_pages = -nr_pages; /* for event */
 994	}
 995
 996	__this_cpu_add(memcg->vmstats_percpu->nr_page_events, nr_pages);
 997}
 998
 999static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
1000				       enum mem_cgroup_events_target target)
1001{
1002	unsigned long val, next;
1003
1004	val = __this_cpu_read(memcg->vmstats_percpu->nr_page_events);
1005	next = __this_cpu_read(memcg->vmstats_percpu->targets[target]);
1006	/* from time_after() in jiffies.h */
1007	if ((long)(next - val) < 0) {
1008		switch (target) {
1009		case MEM_CGROUP_TARGET_THRESH:
1010			next = val + THRESHOLDS_EVENTS_TARGET;
1011			break;
1012		case MEM_CGROUP_TARGET_SOFTLIMIT:
1013			next = val + SOFTLIMIT_EVENTS_TARGET;
1014			break;
 
 
 
1015		default:
1016			break;
1017		}
1018		__this_cpu_write(memcg->vmstats_percpu->targets[target], next);
1019		return true;
1020	}
1021	return false;
1022}
1023
1024/*
1025 * Check events in order.
1026 *
1027 */
1028static void memcg_check_events(struct mem_cgroup *memcg, int nid)
1029{
1030	if (IS_ENABLED(CONFIG_PREEMPT_RT))
1031		return;
1032
1033	/* threshold event is triggered in finer grain than soft limit */
1034	if (unlikely(mem_cgroup_event_ratelimit(memcg,
1035						MEM_CGROUP_TARGET_THRESH))) {
1036		bool do_softlimit;
 
1037
1038		do_softlimit = mem_cgroup_event_ratelimit(memcg,
1039						MEM_CGROUP_TARGET_SOFTLIMIT);
 
 
 
 
 
 
1040		mem_cgroup_threshold(memcg);
1041		if (unlikely(do_softlimit))
1042			mem_cgroup_update_tree(memcg, nid);
1043	}
 
 
 
 
 
1044}
1045
1046struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
1047{
1048	/*
1049	 * mm_update_next_owner() may clear mm->owner to NULL
1050	 * if it races with swapoff, page migration, etc.
1051	 * So this can be called with p == NULL.
1052	 */
1053	if (unlikely(!p))
1054		return NULL;
1055
1056	return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
1057}
1058EXPORT_SYMBOL(mem_cgroup_from_task);
1059
1060static __always_inline struct mem_cgroup *active_memcg(void)
1061{
1062	if (!in_task())
1063		return this_cpu_read(int_active_memcg);
1064	else
1065		return current->active_memcg;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1066}
1067
1068/**
1069 * get_mem_cgroup_from_mm: Obtain a reference on given mm_struct's memcg.
1070 * @mm: mm from which memcg should be extracted. It can be NULL.
1071 *
1072 * Obtain a reference on mm->memcg and returns it if successful. If mm
1073 * is NULL, then the memcg is chosen as follows:
1074 * 1) The active memcg, if set.
1075 * 2) current->mm->memcg, if available
1076 * 3) root memcg
1077 * If mem_cgroup is disabled, NULL is returned.
1078 */
1079struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
 
1080{
1081	struct mem_cgroup *memcg;
1082
1083	if (mem_cgroup_disabled())
1084		return NULL;
 
1085
1086	/*
1087	 * Page cache insertions can happen without an
1088	 * actual mm context, e.g. during disk probing
1089	 * on boot, loopback IO, acct() writes etc.
 
 
1090	 *
1091	 * No need to css_get on root memcg as the reference
1092	 * counting is disabled on the root level in the
1093	 * cgroup core. See CSS_NO_REF.
1094	 */
1095	if (unlikely(!mm)) {
1096		memcg = active_memcg();
1097		if (unlikely(memcg)) {
1098			/* remote memcg must hold a ref */
1099			css_get(&memcg->css);
1100			return memcg;
1101		}
1102		mm = current->mm;
1103		if (unlikely(!mm))
1104			return root_mem_cgroup;
 
1105	}
1106
1107	rcu_read_lock();
1108	do {
1109		memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1110		if (unlikely(!memcg))
1111			memcg = root_mem_cgroup;
1112	} while (!css_tryget(&memcg->css));
1113	rcu_read_unlock();
1114	return memcg;
1115}
1116EXPORT_SYMBOL(get_mem_cgroup_from_mm);
1117
1118/**
1119 * get_mem_cgroup_from_current - Obtain a reference on current task's memcg.
1120 */
1121struct mem_cgroup *get_mem_cgroup_from_current(void)
1122{
1123	struct mem_cgroup *memcg;
 
 
 
 
 
 
1124
1125	if (mem_cgroup_disabled())
1126		return NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1127
1128again:
1129	rcu_read_lock();
1130	memcg = mem_cgroup_from_task(current);
1131	if (!css_tryget(&memcg->css)) {
1132		rcu_read_unlock();
1133		goto again;
1134	}
1135	rcu_read_unlock();
1136	return memcg;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1137}
1138
1139/**
1140 * mem_cgroup_iter - iterate over memory cgroup hierarchy
1141 * @root: hierarchy root
1142 * @prev: previously returned memcg, NULL on first invocation
1143 * @reclaim: cookie for shared reclaim walks, NULL for full walks
1144 *
1145 * Returns references to children of the hierarchy below @root, or
1146 * @root itself, or %NULL after a full round-trip.
1147 *
1148 * Caller must pass the return value in @prev on subsequent
1149 * invocations for reference counting, or use mem_cgroup_iter_break()
1150 * to cancel a hierarchy walk before the round-trip is complete.
1151 *
1152 * Reclaimers can specify a node in @reclaim to divide up the memcgs
1153 * in the hierarchy among all concurrent reclaimers operating on the
1154 * same node.
1155 */
1156struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
1157				   struct mem_cgroup *prev,
1158				   struct mem_cgroup_reclaim_cookie *reclaim)
1159{
1160	struct mem_cgroup_reclaim_iter *iter;
1161	struct cgroup_subsys_state *css = NULL;
1162	struct mem_cgroup *memcg = NULL;
1163	struct mem_cgroup *pos = NULL;
1164
1165	if (mem_cgroup_disabled())
1166		return NULL;
1167
1168	if (!root)
1169		root = root_mem_cgroup;
1170
1171	rcu_read_lock();
 
1172
1173	if (reclaim) {
1174		struct mem_cgroup_per_node *mz;
 
 
 
1175
1176		mz = root->nodeinfo[reclaim->pgdat->node_id];
1177		iter = &mz->iter;
1178
1179		/*
1180		 * On start, join the current reclaim iteration cycle.
1181		 * Exit when a concurrent walker completes it.
1182		 */
1183		if (!prev)
1184			reclaim->generation = iter->generation;
1185		else if (reclaim->generation != iter->generation)
1186			goto out_unlock;
 
 
 
 
 
1187
1188		while (1) {
1189			pos = READ_ONCE(iter->position);
1190			if (!pos || css_tryget(&pos->css))
1191				break;
1192			/*
1193			 * css reference reached zero, so iter->position will
1194			 * be cleared by ->css_released. However, we should not
1195			 * rely on this happening soon, because ->css_released
1196			 * is called from a work queue, and by busy-waiting we
1197			 * might block it. So we clear iter->position right
1198			 * away.
1199			 */
1200			(void)cmpxchg(&iter->position, pos, NULL);
1201		}
1202	} else if (prev) {
1203		pos = prev;
1204	}
1205
1206	if (pos)
1207		css = &pos->css;
1208
1209	for (;;) {
1210		css = css_next_descendant_pre(css, &root->css);
1211		if (!css) {
1212			/*
1213			 * Reclaimers share the hierarchy walk, and a
1214			 * new one might jump in right at the end of
1215			 * the hierarchy - make sure they see at least
1216			 * one group and restart from the beginning.
1217			 */
1218			if (!prev)
1219				continue;
1220			break;
1221		}
1222
1223		/*
1224		 * Verify the css and acquire a reference.  The root
1225		 * is provided by the caller, so we know it's alive
1226		 * and kicking, and don't take an extra reference.
1227		 */
1228		if (css == &root->css || css_tryget(css)) {
1229			memcg = mem_cgroup_from_css(css);
1230			break;
1231		}
1232	}
1233
1234	if (reclaim) {
1235		/*
1236		 * The position could have already been updated by a competing
1237		 * thread, so check that the value hasn't changed since we read
1238		 * it to avoid reclaiming from the same cgroup twice.
1239		 */
1240		(void)cmpxchg(&iter->position, pos, memcg);
1241
1242		if (pos)
1243			css_put(&pos->css);
1244
1245		if (!memcg)
1246			iter->generation++;
1247	}
1248
1249out_unlock:
1250	rcu_read_unlock();
 
1251	if (prev && prev != root)
1252		css_put(&prev->css);
1253
1254	return memcg;
1255}
1256
1257/**
1258 * mem_cgroup_iter_break - abort a hierarchy walk prematurely
1259 * @root: hierarchy root
1260 * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
1261 */
1262void mem_cgroup_iter_break(struct mem_cgroup *root,
1263			   struct mem_cgroup *prev)
1264{
1265	if (!root)
1266		root = root_mem_cgroup;
1267	if (prev && prev != root)
1268		css_put(&prev->css);
1269}
1270
1271static void __invalidate_reclaim_iterators(struct mem_cgroup *from,
1272					struct mem_cgroup *dead_memcg)
1273{
1274	struct mem_cgroup_reclaim_iter *iter;
1275	struct mem_cgroup_per_node *mz;
1276	int nid;
 
 
 
1277
1278	for_each_node(nid) {
1279		mz = from->nodeinfo[nid];
1280		iter = &mz->iter;
1281		cmpxchg(&iter->position, dead_memcg, NULL);
1282	}
1283}
1284
1285static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
1286{
1287	struct mem_cgroup *memcg = dead_memcg;
1288	struct mem_cgroup *last;
1289
1290	do {
1291		__invalidate_reclaim_iterators(memcg, dead_memcg);
1292		last = memcg;
1293	} while ((memcg = parent_mem_cgroup(memcg)));
1294
1295	/*
1296	 * When cgroup1 non-hierarchy mode is used,
1297	 * parent_mem_cgroup() does not walk all the way up to the
1298	 * cgroup root (root_mem_cgroup). So we have to handle
1299	 * dead_memcg from cgroup root separately.
1300	 */
1301	if (!mem_cgroup_is_root(last))
1302		__invalidate_reclaim_iterators(root_mem_cgroup,
1303						dead_memcg);
 
 
 
1304}
 
1305
1306/**
1307 * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
1308 * @memcg: hierarchy root
1309 * @fn: function to call for each task
1310 * @arg: argument passed to @fn
1311 *
1312 * This function iterates over tasks attached to @memcg or to any of its
1313 * descendants and calls @fn for each task. If @fn returns a non-zero
1314 * value, the function breaks the iteration loop. Otherwise, it will iterate
1315 * over all tasks and return 0.
1316 *
1317 * This function must not be called for the root memory cgroup.
 
 
1318 */
1319void mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
1320			   int (*fn)(struct task_struct *, void *), void *arg)
1321{
1322	struct mem_cgroup *iter;
1323	int ret = 0;
1324
1325	BUG_ON(mem_cgroup_is_root(memcg));
1326
1327	for_each_mem_cgroup_tree(iter, memcg) {
1328		struct css_task_iter it;
1329		struct task_struct *task;
1330
1331		css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it);
1332		while (!ret && (task = css_task_iter_next(&it)))
1333			ret = fn(task, arg);
1334		css_task_iter_end(&it);
1335		if (ret) {
1336			mem_cgroup_iter_break(memcg, iter);
1337			break;
1338		}
1339	}
1340}
1341
1342#ifdef CONFIG_DEBUG_VM
1343void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
1344{
1345	struct mem_cgroup *memcg;
1346
1347	if (mem_cgroup_disabled())
1348		return;
1349
1350	memcg = folio_memcg(folio);
1351
1352	if (!memcg)
1353		VM_BUG_ON_FOLIO(!mem_cgroup_is_root(lruvec_memcg(lruvec)), folio);
1354	else
1355		VM_BUG_ON_FOLIO(lruvec_memcg(lruvec) != memcg, folio);
1356}
1357#endif
1358
1359/**
1360 * folio_lruvec_lock - Lock the lruvec for a folio.
1361 * @folio: Pointer to the folio.
1362 *
1363 * These functions are safe to use under any of the following conditions:
1364 * - folio locked
1365 * - folio_test_lru false
1366 * - folio_memcg_lock()
1367 * - folio frozen (refcount of 0)
1368 *
1369 * Return: The lruvec this folio is on with its lock held.
 
 
 
 
 
 
1370 */
1371struct lruvec *folio_lruvec_lock(struct folio *folio)
1372{
1373	struct lruvec *lruvec = folio_lruvec(folio);
1374
1375	spin_lock(&lruvec->lru_lock);
1376	lruvec_memcg_debug(lruvec, folio);
1377
1378	return lruvec;
1379}
1380
1381/**
1382 * folio_lruvec_lock_irq - Lock the lruvec for a folio.
1383 * @folio: Pointer to the folio.
1384 *
1385 * These functions are safe to use under any of the following conditions:
1386 * - folio locked
1387 * - folio_test_lru false
1388 * - folio_memcg_lock()
1389 * - folio frozen (refcount of 0)
1390 *
1391 * Return: The lruvec this folio is on with its lock held and interrupts
1392 * disabled.
1393 */
1394struct lruvec *folio_lruvec_lock_irq(struct folio *folio)
1395{
1396	struct lruvec *lruvec = folio_lruvec(folio);
 
 
 
1397
1398	spin_lock_irq(&lruvec->lru_lock);
1399	lruvec_memcg_debug(lruvec, folio);
 
 
1400
1401	return lruvec;
1402}
1403
1404/**
1405 * folio_lruvec_lock_irqsave - Lock the lruvec for a folio.
1406 * @folio: Pointer to the folio.
1407 * @flags: Pointer to irqsave flags.
1408 *
1409 * These functions are safe to use under any of the following conditions:
1410 * - folio locked
1411 * - folio_test_lru false
1412 * - folio_memcg_lock()
1413 * - folio frozen (refcount of 0)
1414 *
1415 * Return: The lruvec this folio is on with its lock held and interrupts
1416 * disabled.
1417 */
1418struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
1419		unsigned long *flags)
1420{
1421	struct lruvec *lruvec = folio_lruvec(folio);
1422
1423	spin_lock_irqsave(&lruvec->lru_lock, *flags);
1424	lruvec_memcg_debug(lruvec, folio);
1425
 
 
 
 
 
 
 
 
 
 
1426	return lruvec;
1427}
1428
1429/**
1430 * mem_cgroup_update_lru_size - account for adding or removing an lru page
1431 * @lruvec: mem_cgroup per zone lru vector
1432 * @lru: index of lru list the page is sitting on
1433 * @zid: zone id of the accounted pages
1434 * @nr_pages: positive when adding or negative when removing
1435 *
1436 * This function must be called under lru_lock, just before a page is added
1437 * to or just after a page is removed from an lru list.
1438 */
1439void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
1440				int zid, int nr_pages)
1441{
1442	struct mem_cgroup_per_node *mz;
1443	unsigned long *lru_size;
1444	long size;
1445
1446	if (mem_cgroup_disabled())
1447		return;
1448
1449	mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
1450	lru_size = &mz->lru_zone_size[zid][lru];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1451
1452	if (nr_pages < 0)
1453		*lru_size += nr_pages;
 
 
 
1454
1455	size = *lru_size;
1456	if (WARN_ONCE(size < 0,
1457		"%s(%p, %d, %d): lru_size %ld\n",
1458		__func__, lruvec, lru, nr_pages, size)) {
1459		VM_BUG_ON(1);
1460		*lru_size = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1461	}
 
 
 
 
 
 
 
 
 
 
1462
1463	if (nr_pages > 0)
1464		*lru_size += nr_pages;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1465}
1466
 
 
 
1467/**
1468 * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1469 * @memcg: the memory cgroup
1470 *
1471 * Returns the maximum amount of memory @mem can be charged with, in
1472 * pages.
1473 */
1474static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
1475{
1476	unsigned long margin = 0;
1477	unsigned long count;
1478	unsigned long limit;
1479
1480	count = page_counter_read(&memcg->memory);
1481	limit = READ_ONCE(memcg->memory.max);
1482	if (count < limit)
1483		margin = limit - count;
1484
1485	if (do_memsw_account()) {
1486		count = page_counter_read(&memcg->memsw);
1487		limit = READ_ONCE(memcg->memsw.max);
1488		if (count < limit)
1489			margin = min(margin, limit - count);
1490		else
1491			margin = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1492	}
1493
1494	return margin;
1495}
1496
1497/*
1498 * A routine for checking "mem" is under move_account() or not.
 
 
 
 
1499 *
1500 * Checking a cgroup is mc.from or mc.to or under hierarchy of
1501 * moving cgroups. This is for waiting at high-memory pressure
1502 * caused by "move".
1503 */
 
 
 
 
 
 
 
1504static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
1505{
1506	struct mem_cgroup *from;
1507	struct mem_cgroup *to;
1508	bool ret = false;
1509	/*
1510	 * Unlike task_move routines, we access mc.to, mc.from not under
1511	 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
1512	 */
1513	spin_lock(&mc.lock);
1514	from = mc.from;
1515	to = mc.to;
1516	if (!from)
1517		goto unlock;
1518
1519	ret = mem_cgroup_is_descendant(from, memcg) ||
1520		mem_cgroup_is_descendant(to, memcg);
1521unlock:
1522	spin_unlock(&mc.lock);
1523	return ret;
1524}
1525
1526static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
1527{
1528	if (mc.moving_task && current != mc.moving_task) {
1529		if (mem_cgroup_under_move(memcg)) {
1530			DEFINE_WAIT(wait);
1531			prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1532			/* moving charge context might have finished. */
1533			if (mc.moving_task)
1534				schedule();
1535			finish_wait(&mc.waitq, &wait);
1536			return true;
1537		}
1538	}
1539	return false;
1540}
1541
1542struct memory_stat {
1543	const char *name;
1544	unsigned int idx;
1545};
 
 
 
 
 
 
 
1546
1547static const struct memory_stat memory_stats[] = {
1548	{ "anon",			NR_ANON_MAPPED			},
1549	{ "file",			NR_FILE_PAGES			},
1550	{ "kernel",			MEMCG_KMEM			},
1551	{ "kernel_stack",		NR_KERNEL_STACK_KB		},
1552	{ "pagetables",			NR_PAGETABLE			},
1553	{ "sec_pagetables",		NR_SECONDARY_PAGETABLE		},
1554	{ "percpu",			MEMCG_PERCPU_B			},
1555	{ "sock",			MEMCG_SOCK			},
1556	{ "vmalloc",			MEMCG_VMALLOC			},
1557	{ "shmem",			NR_SHMEM			},
1558#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
1559	{ "zswap",			MEMCG_ZSWAP_B			},
1560	{ "zswapped",			MEMCG_ZSWAPPED			},
1561#endif
1562	{ "file_mapped",		NR_FILE_MAPPED			},
1563	{ "file_dirty",			NR_FILE_DIRTY			},
1564	{ "file_writeback",		NR_WRITEBACK			},
1565#ifdef CONFIG_SWAP
1566	{ "swapcached",			NR_SWAPCACHE			},
1567#endif
1568#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1569	{ "anon_thp",			NR_ANON_THPS			},
1570	{ "file_thp",			NR_FILE_THPS			},
1571	{ "shmem_thp",			NR_SHMEM_THPS			},
1572#endif
1573	{ "inactive_anon",		NR_INACTIVE_ANON		},
1574	{ "active_anon",		NR_ACTIVE_ANON			},
1575	{ "inactive_file",		NR_INACTIVE_FILE		},
1576	{ "active_file",		NR_ACTIVE_FILE			},
1577	{ "unevictable",		NR_UNEVICTABLE			},
1578	{ "slab_reclaimable",		NR_SLAB_RECLAIMABLE_B		},
1579	{ "slab_unreclaimable",		NR_SLAB_UNRECLAIMABLE_B		},
1580
1581	/* The memory events */
1582	{ "workingset_refault_anon",	WORKINGSET_REFAULT_ANON		},
1583	{ "workingset_refault_file",	WORKINGSET_REFAULT_FILE		},
1584	{ "workingset_activate_anon",	WORKINGSET_ACTIVATE_ANON	},
1585	{ "workingset_activate_file",	WORKINGSET_ACTIVATE_FILE	},
1586	{ "workingset_restore_anon",	WORKINGSET_RESTORE_ANON		},
1587	{ "workingset_restore_file",	WORKINGSET_RESTORE_FILE		},
1588	{ "workingset_nodereclaim",	WORKINGSET_NODERECLAIM		},
1589};
1590
1591/* The actual unit of the state item, not the same as the output unit */
1592static int memcg_page_state_unit(int item)
1593{
1594	switch (item) {
1595	case MEMCG_PERCPU_B:
1596	case MEMCG_ZSWAP_B:
1597	case NR_SLAB_RECLAIMABLE_B:
1598	case NR_SLAB_UNRECLAIMABLE_B:
1599		return 1;
1600	case NR_KERNEL_STACK_KB:
1601		return SZ_1K;
1602	default:
1603		return PAGE_SIZE;
1604	}
1605}
1606
1607/* Translate stat items to the correct unit for memory.stat output */
1608static int memcg_page_state_output_unit(int item)
 
 
 
 
 
 
 
 
1609{
1610	/*
1611	 * Workingset state is actually in pages, but we export it to userspace
1612	 * as a scalar count of events, so special case it here.
1613	 */
1614	switch (item) {
1615	case WORKINGSET_REFAULT_ANON:
1616	case WORKINGSET_REFAULT_FILE:
1617	case WORKINGSET_ACTIVATE_ANON:
1618	case WORKINGSET_ACTIVATE_FILE:
1619	case WORKINGSET_RESTORE_ANON:
1620	case WORKINGSET_RESTORE_FILE:
1621	case WORKINGSET_NODERECLAIM:
1622		return 1;
1623	default:
1624		return memcg_page_state_unit(item);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1625	}
 
1626}
1627
1628static inline unsigned long memcg_page_state_output(struct mem_cgroup *memcg,
1629						    int item)
 
 
 
1630{
1631	return memcg_page_state(memcg, item) *
1632		memcg_page_state_output_unit(item);
 
 
 
 
1633}
1634
1635static inline unsigned long memcg_page_state_local_output(
1636		struct mem_cgroup *memcg, int item)
 
 
1637{
1638	return memcg_page_state_local(memcg, item) *
1639		memcg_page_state_output_unit(item);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1640}
1641
1642static void memcg_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
 
1643{
1644	int i;
 
 
 
 
1645
1646	/*
1647	 * Provide statistics on the state of the memory subsystem as
1648	 * well as cumulative event counters that show past behavior.
1649	 *
1650	 * This list is ordered following a combination of these gradients:
1651	 * 1) generic big picture -> specifics and details
1652	 * 2) reflecting userspace activity -> reflecting kernel heuristics
1653	 *
1654	 * Current memory state:
1655	 */
1656	mem_cgroup_flush_stats(memcg);
 
 
 
1657
1658	for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
1659		u64 size;
 
 
 
1660
1661		size = memcg_page_state_output(memcg, memory_stats[i].idx);
1662		seq_buf_printf(s, "%s %llu\n", memory_stats[i].name, size);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1663
1664		if (unlikely(memory_stats[i].idx == NR_SLAB_UNRECLAIMABLE_B)) {
1665			size += memcg_page_state_output(memcg,
1666							NR_SLAB_RECLAIMABLE_B);
1667			seq_buf_printf(s, "slab %llu\n", size);
 
1668		}
 
1669	}
1670
1671	/* Accumulated memory events */
1672	seq_buf_printf(s, "pgscan %lu\n",
1673		       memcg_events(memcg, PGSCAN_KSWAPD) +
1674		       memcg_events(memcg, PGSCAN_DIRECT) +
1675		       memcg_events(memcg, PGSCAN_KHUGEPAGED));
1676	seq_buf_printf(s, "pgsteal %lu\n",
1677		       memcg_events(memcg, PGSTEAL_KSWAPD) +
1678		       memcg_events(memcg, PGSTEAL_DIRECT) +
1679		       memcg_events(memcg, PGSTEAL_KHUGEPAGED));
1680
1681	for (i = 0; i < ARRAY_SIZE(memcg_vm_event_stat); i++) {
1682		if (memcg_vm_event_stat[i] == PGPGIN ||
1683		    memcg_vm_event_stat[i] == PGPGOUT)
1684			continue;
1685
1686		seq_buf_printf(s, "%s %lu\n",
1687			       vm_event_name(memcg_vm_event_stat[i]),
1688			       memcg_events(memcg, memcg_vm_event_stat[i]));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1689	}
1690
1691	/* The above should easily fit into one page */
1692	WARN_ON_ONCE(seq_buf_has_overflowed(s));
1693}
1694
1695static void memcg1_stat_format(struct mem_cgroup *memcg, struct seq_buf *s);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1696
1697static void memory_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
1698{
1699	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
1700		memcg_stat_format(memcg, s);
1701	else
1702		memcg1_stat_format(memcg, s);
1703	WARN_ON_ONCE(seq_buf_has_overflowed(s));
1704}
 
1705
1706/**
1707 * mem_cgroup_print_oom_context: Print OOM information relevant to
1708 * memory controller.
1709 * @memcg: The memory cgroup that went over limit
1710 * @p: Task that is going to be killed
1711 *
1712 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1713 * enabled
1714 */
1715void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
1716{
1717	rcu_read_lock();
 
 
 
 
 
 
 
 
 
 
 
 
 
1718
1719	if (memcg) {
1720		pr_cont(",oom_memcg=");
1721		pr_cont_cgroup_path(memcg->css.cgroup);
1722	} else
1723		pr_cont(",global_oom");
1724	if (p) {
1725		pr_cont(",task_memcg=");
1726		pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
1727	}
1728	rcu_read_unlock();
 
 
1729}
1730
1731/**
1732 * mem_cgroup_print_oom_meminfo: Print OOM memory information relevant to
1733 * memory controller.
1734 * @memcg: The memory cgroup that went over limit
 
 
 
 
 
 
 
1735 */
1736void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
1737{
1738	/* Use static buffer, for the caller is holding oom_lock. */
1739	static char buf[PAGE_SIZE];
1740	struct seq_buf s;
1741
1742	lockdep_assert_held(&oom_lock);
1743
1744	pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
1745		K((u64)page_counter_read(&memcg->memory)),
1746		K((u64)READ_ONCE(memcg->memory.max)), memcg->memory.failcnt);
1747	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
1748		pr_info("swap: usage %llukB, limit %llukB, failcnt %lu\n",
1749			K((u64)page_counter_read(&memcg->swap)),
1750			K((u64)READ_ONCE(memcg->swap.max)), memcg->swap.failcnt);
1751	else {
1752		pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
1753			K((u64)page_counter_read(&memcg->memsw)),
1754			K((u64)memcg->memsw.max), memcg->memsw.failcnt);
1755		pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
1756			K((u64)page_counter_read(&memcg->kmem)),
1757			K((u64)memcg->kmem.max), memcg->kmem.failcnt);
1758	}
1759
1760	pr_info("Memory cgroup stats for ");
1761	pr_cont_cgroup_path(memcg->css.cgroup);
1762	pr_cont(":");
1763	seq_buf_init(&s, buf, sizeof(buf));
1764	memory_stat_format(memcg, &s);
1765	seq_buf_do_printk(&s, KERN_INFO);
1766}
1767
1768/*
1769 * Return the memory (and swap, if configured) limit for a memcg.
 
 
 
1770 */
1771unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
1772{
1773	unsigned long max = READ_ONCE(memcg->memory.max);
1774
1775	if (do_memsw_account()) {
1776		if (mem_cgroup_swappiness(memcg)) {
1777			/* Calculate swap excess capacity from memsw limit */
1778			unsigned long swap = READ_ONCE(memcg->memsw.max) - max;
 
 
 
 
1779
1780			max += min(swap, (unsigned long)total_swap_pages);
 
1781		}
1782	} else {
1783		if (mem_cgroup_swappiness(memcg))
1784			max += min(READ_ONCE(memcg->swap.max),
1785				   (unsigned long)total_swap_pages);
1786	}
1787	return max;
 
 
 
 
 
 
 
 
 
1788}
1789
1790unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
 
1791{
1792	return page_counter_read(&memcg->memory);
1793}
1794
1795static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
1796				     int order)
1797{
1798	struct oom_control oc = {
1799		.zonelist = NULL,
1800		.nodemask = NULL,
1801		.memcg = memcg,
1802		.gfp_mask = gfp_mask,
1803		.order = order,
1804	};
1805	bool ret = true;
1806
1807	if (mutex_lock_killable(&oom_lock))
1808		return true;
1809
1810	if (mem_cgroup_margin(memcg) >= (1 << order))
1811		goto unlock;
1812
1813	/*
1814	 * A few threads which were not waiting at mutex_lock_killable() can
1815	 * fail to bail out. Therefore, check again after holding oom_lock.
1816	 */
1817	ret = task_is_dying() || out_of_memory(&oc);
1818
1819unlock:
1820	mutex_unlock(&oom_lock);
1821	return ret;
1822}
 
1823
1824static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
1825				   pg_data_t *pgdat,
1826				   gfp_t gfp_mask,
1827				   unsigned long *total_scanned)
1828{
1829	struct mem_cgroup *victim = NULL;
1830	int total = 0;
1831	int loop = 0;
1832	unsigned long excess;
1833	unsigned long nr_scanned;
1834	struct mem_cgroup_reclaim_cookie reclaim = {
1835		.pgdat = pgdat,
 
1836	};
1837
1838	excess = soft_limit_excess(root_memcg);
1839
1840	while (1) {
1841		victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
1842		if (!victim) {
1843			loop++;
1844			if (loop >= 2) {
1845				/*
1846				 * If we have not been able to reclaim
1847				 * anything, it might because there are
1848				 * no reclaimable pages under this hierarchy
1849				 */
1850				if (!total)
1851					break;
1852				/*
1853				 * We want to do more targeted reclaim.
1854				 * excess >> 2 is not to excessive so as to
1855				 * reclaim too much, nor too less that we keep
1856				 * coming back to reclaim from this cgroup
1857				 */
1858				if (total >= (excess >> 2) ||
1859					(loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
1860					break;
1861			}
1862			continue;
1863		}
1864		total += mem_cgroup_shrink_node(victim, gfp_mask, false,
1865					pgdat, &nr_scanned);
 
 
1866		*total_scanned += nr_scanned;
1867		if (!soft_limit_excess(root_memcg))
1868			break;
1869	}
1870	mem_cgroup_iter_break(root_memcg, victim);
1871	return total;
1872}
1873
1874#ifdef CONFIG_LOCKDEP
1875static struct lockdep_map memcg_oom_lock_dep_map = {
1876	.name = "memcg_oom_lock",
1877};
1878#endif
1879
1880static DEFINE_SPINLOCK(memcg_oom_lock);
1881
1882/*
1883 * Check OOM-Killer is already running under our hierarchy.
1884 * If someone is running, return false.
1885 */
1886static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
1887{
1888	struct mem_cgroup *iter, *failed = NULL;
1889
1890	spin_lock(&memcg_oom_lock);
1891
1892	for_each_mem_cgroup_tree(iter, memcg) {
1893		if (iter->oom_lock) {
1894			/*
1895			 * this subtree of our hierarchy is already locked
1896			 * so we cannot give a lock.
1897			 */
1898			failed = iter;
1899			mem_cgroup_iter_break(memcg, iter);
1900			break;
1901		} else
1902			iter->oom_lock = true;
1903	}
1904
1905	if (failed) {
1906		/*
1907		 * OK, we failed to lock the whole subtree so we have
1908		 * to clean up what we set up to the failing subtree
1909		 */
1910		for_each_mem_cgroup_tree(iter, memcg) {
1911			if (iter == failed) {
1912				mem_cgroup_iter_break(memcg, iter);
1913				break;
1914			}
1915			iter->oom_lock = false;
1916		}
1917	} else
1918		mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_);
1919
1920	spin_unlock(&memcg_oom_lock);
1921
1922	return !failed;
1923}
1924
1925static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
1926{
1927	struct mem_cgroup *iter;
1928
1929	spin_lock(&memcg_oom_lock);
1930	mutex_release(&memcg_oom_lock_dep_map, _RET_IP_);
1931	for_each_mem_cgroup_tree(iter, memcg)
1932		iter->oom_lock = false;
1933	spin_unlock(&memcg_oom_lock);
1934}
1935
1936static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
1937{
1938	struct mem_cgroup *iter;
1939
1940	spin_lock(&memcg_oom_lock);
1941	for_each_mem_cgroup_tree(iter, memcg)
1942		iter->under_oom++;
1943	spin_unlock(&memcg_oom_lock);
1944}
1945
1946static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
1947{
1948	struct mem_cgroup *iter;
1949
1950	/*
1951	 * Be careful about under_oom underflows because a child memcg
1952	 * could have been added after mem_cgroup_mark_under_oom.
 
1953	 */
1954	spin_lock(&memcg_oom_lock);
1955	for_each_mem_cgroup_tree(iter, memcg)
1956		if (iter->under_oom > 0)
1957			iter->under_oom--;
1958	spin_unlock(&memcg_oom_lock);
1959}
1960
1961static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
1962
1963struct oom_wait_info {
1964	struct mem_cgroup *memcg;
1965	wait_queue_entry_t	wait;
1966};
1967
1968static int memcg_oom_wake_function(wait_queue_entry_t *wait,
1969	unsigned mode, int sync, void *arg)
1970{
1971	struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
1972	struct mem_cgroup *oom_wait_memcg;
1973	struct oom_wait_info *oom_wait_info;
1974
1975	oom_wait_info = container_of(wait, struct oom_wait_info, wait);
1976	oom_wait_memcg = oom_wait_info->memcg;
1977
1978	if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) &&
1979	    !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg))
 
 
 
 
1980		return 0;
1981	return autoremove_wake_function(wait, mode, sync, arg);
1982}
1983
 
 
 
 
 
 
 
1984static void memcg_oom_recover(struct mem_cgroup *memcg)
1985{
1986	/*
1987	 * For the following lockless ->under_oom test, the only required
1988	 * guarantee is that it must see the state asserted by an OOM when
1989	 * this function is called as a result of userland actions
1990	 * triggered by the notification of the OOM.  This is trivially
1991	 * achieved by invoking mem_cgroup_mark_under_oom() before
1992	 * triggering notification.
1993	 */
1994	if (memcg && memcg->under_oom)
1995		__wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
1996}
1997
1998/*
1999 * Returns true if successfully killed one or more processes. Though in some
2000 * corner cases it can return true even without killing any process.
2001 */
2002static bool mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
2003{
2004	bool locked, ret;
2005
2006	if (order > PAGE_ALLOC_COSTLY_ORDER)
2007		return false;
2008
2009	memcg_memory_event(memcg, MEMCG_OOM);
2010
2011	/*
2012	 * We are in the middle of the charge context here, so we
2013	 * don't want to block when potentially sitting on a callstack
2014	 * that holds all kinds of filesystem and mm locks.
2015	 *
2016	 * cgroup1 allows disabling the OOM killer and waiting for outside
2017	 * handling until the charge can succeed; remember the context and put
2018	 * the task to sleep at the end of the page fault when all locks are
2019	 * released.
2020	 *
2021	 * On the other hand, in-kernel OOM killer allows for an async victim
2022	 * memory reclaim (oom_reaper) and that means that we are not solely
2023	 * relying on the oom victim to make a forward progress and we can
2024	 * invoke the oom killer here.
2025	 *
2026	 * Please note that mem_cgroup_out_of_memory might fail to find a
2027	 * victim and then we have to bail out from the charge path.
2028	 */
2029	if (READ_ONCE(memcg->oom_kill_disable)) {
2030		if (current->in_user_fault) {
2031			css_get(&memcg->css);
2032			current->memcg_in_oom = memcg;
2033			current->memcg_oom_gfp_mask = mask;
2034			current->memcg_oom_order = order;
2035		}
2036		return false;
2037	}
2038
2039	mem_cgroup_mark_under_oom(memcg);
2040
2041	locked = mem_cgroup_oom_trylock(memcg);
2042
2043	if (locked)
2044		mem_cgroup_oom_notify(memcg);
2045
2046	mem_cgroup_unmark_under_oom(memcg);
2047	ret = mem_cgroup_out_of_memory(memcg, mask, order);
2048
2049	if (locked)
2050		mem_cgroup_oom_unlock(memcg);
2051
2052	return ret;
2053}
2054
2055/**
2056 * mem_cgroup_oom_synchronize - complete memcg OOM handling
2057 * @handle: actually kill/wait or just clean up the OOM state
2058 *
2059 * This has to be called at the end of a page fault if the memcg OOM
2060 * handler was enabled.
2061 *
2062 * Memcg supports userspace OOM handling where failed allocations must
2063 * sleep on a waitqueue until the userspace task resolves the
2064 * situation.  Sleeping directly in the charge context with all kinds
2065 * of locks held is not a good idea, instead we remember an OOM state
2066 * in the task and mem_cgroup_oom_synchronize() has to be called at
2067 * the end of the page fault to complete the OOM handling.
2068 *
2069 * Returns %true if an ongoing memcg OOM situation was detected and
2070 * completed, %false otherwise.
2071 */
2072bool mem_cgroup_oom_synchronize(bool handle)
2073{
2074	struct mem_cgroup *memcg = current->memcg_in_oom;
2075	struct oom_wait_info owait;
2076	bool locked;
2077
2078	/* OOM is global, do not handle */
2079	if (!memcg)
2080		return false;
2081
2082	if (!handle)
2083		goto cleanup;
2084
2085	owait.memcg = memcg;
2086	owait.wait.flags = 0;
2087	owait.wait.func = memcg_oom_wake_function;
2088	owait.wait.private = current;
2089	INIT_LIST_HEAD(&owait.wait.entry);
2090
2091	prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
2092	mem_cgroup_mark_under_oom(memcg);
2093
2094	locked = mem_cgroup_oom_trylock(memcg);
2095
2096	if (locked)
2097		mem_cgroup_oom_notify(memcg);
2098
2099	schedule();
2100	mem_cgroup_unmark_under_oom(memcg);
2101	finish_wait(&memcg_oom_waitq, &owait.wait);
 
 
 
 
 
 
 
2102
2103	if (locked)
2104		mem_cgroup_oom_unlock(memcg);
 
 
 
 
 
 
 
2105cleanup:
2106	current->memcg_in_oom = NULL;
2107	css_put(&memcg->css);
2108	return true;
2109}
2110
2111/**
2112 * mem_cgroup_get_oom_group - get a memory cgroup to clean up after OOM
2113 * @victim: task to be killed by the OOM killer
2114 * @oom_domain: memcg in case of memcg OOM, NULL in case of system-wide OOM
 
 
 
 
 
 
 
 
 
2115 *
2116 * Returns a pointer to a memory cgroup, which has to be cleaned up
2117 * by killing all belonging OOM-killable tasks.
 
 
2118 *
2119 * Caller has to call mem_cgroup_put() on the returned non-NULL memcg.
 
 
2120 */
2121struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
2122					    struct mem_cgroup *oom_domain)
 
2123{
2124	struct mem_cgroup *oom_group = NULL;
2125	struct mem_cgroup *memcg;
 
2126
2127	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
2128		return NULL;
2129
2130	if (!oom_domain)
2131		oom_domain = root_mem_cgroup;
2132
2133	rcu_read_lock();
2134
2135	memcg = mem_cgroup_from_task(victim);
2136	if (mem_cgroup_is_root(memcg))
2137		goto out;
2138
2139	/*
2140	 * If the victim task has been asynchronously moved to a different
2141	 * memory cgroup, we might end up killing tasks outside oom_domain.
2142	 * In this case it's better to ignore memory.group.oom.
 
2143	 */
2144	if (unlikely(!mem_cgroup_is_descendant(memcg, oom_domain)))
2145		goto out;
2146
2147	/*
2148	 * Traverse the memory cgroup hierarchy from the victim task's
2149	 * cgroup up to the OOMing cgroup (or root) to find the
2150	 * highest-level memory cgroup with oom.group set.
2151	 */
2152	for (; memcg; memcg = parent_mem_cgroup(memcg)) {
2153		if (READ_ONCE(memcg->oom_group))
2154			oom_group = memcg;
2155
2156		if (memcg == oom_domain)
2157			break;
2158	}
2159
2160	if (oom_group)
2161		css_get(&oom_group->css);
2162out:
2163	rcu_read_unlock();
2164
2165	return oom_group;
2166}
2167
2168void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
2169{
2170	pr_info("Tasks in ");
2171	pr_cont_cgroup_path(memcg->css.cgroup);
2172	pr_cont(" are going to be killed due to memory.oom.group set\n");
 
 
 
 
 
2173}
2174
2175/**
2176 * folio_memcg_lock - Bind a folio to its memcg.
2177 * @folio: The folio.
2178 *
2179 * This function prevents unlocked LRU folios from being moved to
2180 * another cgroup.
2181 *
2182 * It ensures lifetime of the bound memcg.  The caller is responsible
2183 * for the lifetime of the folio.
2184 */
2185void folio_memcg_lock(struct folio *folio)
2186{
2187	struct mem_cgroup *memcg;
2188	unsigned long flags;
2189
2190	/*
2191	 * The RCU lock is held throughout the transaction.  The fast
2192	 * path can get away without acquiring the memcg->move_lock
2193	 * because page moving starts with an RCU grace period.
2194         */
2195	rcu_read_lock();
2196
2197	if (mem_cgroup_disabled())
2198		return;
2199again:
2200	memcg = folio_memcg(folio);
2201	if (unlikely(!memcg))
2202		return;
2203
2204#ifdef CONFIG_PROVE_LOCKING
2205	local_irq_save(flags);
2206	might_lock(&memcg->move_lock);
2207	local_irq_restore(flags);
2208#endif
2209
2210	if (atomic_read(&memcg->moving_account) <= 0)
 
 
2211		return;
2212
2213	spin_lock_irqsave(&memcg->move_lock, flags);
2214	if (memcg != folio_memcg(folio)) {
2215		spin_unlock_irqrestore(&memcg->move_lock, flags);
2216		goto again;
2217	}
2218
2219	/*
2220	 * When charge migration first begins, we can have multiple
2221	 * critical sections holding the fast-path RCU lock and one
2222	 * holding the slowpath move_lock. Track the task who has the
2223	 * move_lock for folio_memcg_unlock().
2224	 */
2225	memcg->move_lock_task = current;
2226	memcg->move_lock_flags = flags;
2227}
2228
2229static void __folio_memcg_unlock(struct mem_cgroup *memcg)
2230{
2231	if (memcg && memcg->move_lock_task == current) {
2232		unsigned long flags = memcg->move_lock_flags;
2233
2234		memcg->move_lock_task = NULL;
2235		memcg->move_lock_flags = 0;
2236
2237		spin_unlock_irqrestore(&memcg->move_lock, flags);
2238	}
2239
2240	rcu_read_unlock();
2241}
2242
2243/**
2244 * folio_memcg_unlock - Release the binding between a folio and its memcg.
2245 * @folio: The folio.
2246 *
2247 * This releases the binding created by folio_memcg_lock().  This does
2248 * not change the accounting of this folio to its memcg, but it does
2249 * permit others to change it.
2250 */
2251void folio_memcg_unlock(struct folio *folio)
2252{
2253	__folio_memcg_unlock(folio_memcg(folio));
2254}
2255
2256struct memcg_stock_pcp {
2257	local_lock_t stock_lock;
2258	struct mem_cgroup *cached; /* this never be root cgroup */
2259	unsigned int nr_pages;
2260
2261#ifdef CONFIG_MEMCG_KMEM
2262	struct obj_cgroup *cached_objcg;
2263	struct pglist_data *cached_pgdat;
2264	unsigned int nr_bytes;
2265	int nr_slab_reclaimable_b;
2266	int nr_slab_unreclaimable_b;
2267#endif
2268
2269	struct work_struct work;
2270	unsigned long flags;
2271#define FLUSHING_CACHED_CHARGE	0
2272};
2273static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock) = {
2274	.stock_lock = INIT_LOCAL_LOCK(stock_lock),
2275};
2276static DEFINE_MUTEX(percpu_charge_mutex);
2277
2278#ifdef CONFIG_MEMCG_KMEM
2279static struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock);
2280static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
2281				     struct mem_cgroup *root_memcg);
2282static void memcg_account_kmem(struct mem_cgroup *memcg, int nr_pages);
2283
2284#else
2285static inline struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock)
2286{
2287	return NULL;
2288}
2289static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
2290				     struct mem_cgroup *root_memcg)
2291{
2292	return false;
2293}
2294static void memcg_account_kmem(struct mem_cgroup *memcg, int nr_pages)
2295{
2296}
2297#endif
2298
2299/**
2300 * consume_stock: Try to consume stocked charge on this cpu.
2301 * @memcg: memcg to consume from.
2302 * @nr_pages: how many pages to charge.
2303 *
2304 * The charges will only happen if @memcg matches the current cpu's memcg
2305 * stock, and at least @nr_pages are available in that stock.  Failure to
2306 * service an allocation will refill the stock.
2307 *
2308 * returns true if successful, false otherwise.
2309 */
2310static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2311{
2312	struct memcg_stock_pcp *stock;
2313	unsigned long flags;
2314	bool ret = false;
2315
2316	if (nr_pages > MEMCG_CHARGE_BATCH)
2317		return ret;
2318
2319	local_lock_irqsave(&memcg_stock.stock_lock, flags);
2320
2321	stock = this_cpu_ptr(&memcg_stock);
2322	if (memcg == READ_ONCE(stock->cached) && stock->nr_pages >= nr_pages) {
2323		stock->nr_pages -= nr_pages;
2324		ret = true;
2325	}
2326
2327	local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
2328
2329	return ret;
2330}
2331
2332/*
2333 * Returns stocks cached in percpu and reset cached information.
2334 */
2335static void drain_stock(struct memcg_stock_pcp *stock)
2336{
2337	struct mem_cgroup *old = READ_ONCE(stock->cached);
2338
2339	if (!old)
2340		return;
2341
2342	if (stock->nr_pages) {
2343		page_counter_uncharge(&old->memory, stock->nr_pages);
2344		if (do_memsw_account())
2345			page_counter_uncharge(&old->memsw, stock->nr_pages);
2346		stock->nr_pages = 0;
2347	}
2348
2349	css_put(&old->css);
2350	WRITE_ONCE(stock->cached, NULL);
2351}
2352
 
 
 
 
2353static void drain_local_stock(struct work_struct *dummy)
2354{
2355	struct memcg_stock_pcp *stock;
2356	struct obj_cgroup *old = NULL;
2357	unsigned long flags;
2358
2359	/*
2360	 * The only protection from cpu hotplug (memcg_hotplug_cpu_dead) vs.
2361	 * drain_stock races is that we always operate on local CPU stock
2362	 * here with IRQ disabled
2363	 */
2364	local_lock_irqsave(&memcg_stock.stock_lock, flags);
2365
2366	stock = this_cpu_ptr(&memcg_stock);
2367	old = drain_obj_stock(stock);
2368	drain_stock(stock);
2369	clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
 
 
 
 
 
2370
2371	local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
2372	if (old)
2373		obj_cgroup_put(old);
 
 
2374}
2375
2376/*
2377 * Cache charges(val) to local per_cpu area.
2378 * This will be consumed by consume_stock() function, later.
2379 */
2380static void __refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2381{
2382	struct memcg_stock_pcp *stock;
2383
2384	stock = this_cpu_ptr(&memcg_stock);
2385	if (READ_ONCE(stock->cached) != memcg) { /* reset if necessary */
2386		drain_stock(stock);
2387		css_get(&memcg->css);
2388		WRITE_ONCE(stock->cached, memcg);
2389	}
2390	stock->nr_pages += nr_pages;
2391
2392	if (stock->nr_pages > MEMCG_CHARGE_BATCH)
2393		drain_stock(stock);
2394}
2395
2396static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2397{
2398	unsigned long flags;
2399
2400	local_lock_irqsave(&memcg_stock.stock_lock, flags);
2401	__refill_stock(memcg, nr_pages);
2402	local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
2403}
2404
2405/*
2406 * Drains all per-CPU charge caches for given root_memcg resp. subtree
2407 * of the hierarchy under it.
 
2408 */
2409static void drain_all_stock(struct mem_cgroup *root_memcg)
2410{
2411	int cpu, curcpu;
2412
2413	/* If someone's already draining, avoid adding running more workers. */
2414	if (!mutex_trylock(&percpu_charge_mutex))
2415		return;
2416	/*
2417	 * Notify other cpus that system-wide "drain" is running
2418	 * We do not care about races with the cpu hotplug because cpu down
2419	 * as well as workers from this path always operate on the local
2420	 * per-cpu data. CPU up doesn't touch memcg_stock at all.
2421	 */
2422	migrate_disable();
2423	curcpu = smp_processor_id();
2424	for_each_online_cpu(cpu) {
2425		struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
2426		struct mem_cgroup *memcg;
2427		bool flush = false;
2428
2429		rcu_read_lock();
2430		memcg = READ_ONCE(stock->cached);
2431		if (memcg && stock->nr_pages &&
2432		    mem_cgroup_is_descendant(memcg, root_memcg))
2433			flush = true;
2434		else if (obj_stock_flush_required(stock, root_memcg))
2435			flush = true;
2436		rcu_read_unlock();
2437
2438		if (flush &&
2439		    !test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
2440			if (cpu == curcpu)
2441				drain_local_stock(&stock->work);
2442			else if (!cpu_is_isolated(cpu))
2443				schedule_work_on(cpu, &stock->work);
2444		}
2445	}
2446	migrate_enable();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2447	mutex_unlock(&percpu_charge_mutex);
2448}
2449
2450static int memcg_hotplug_cpu_dead(unsigned int cpu)
 
2451{
2452	struct memcg_stock_pcp *stock;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2453
2454	stock = &per_cpu(memcg_stock, cpu);
2455	drain_stock(stock);
 
 
 
2456
2457	return 0;
 
 
 
2458}
2459
2460static unsigned long reclaim_high(struct mem_cgroup *memcg,
2461				  unsigned int nr_pages,
2462				  gfp_t gfp_mask)
2463{
2464	unsigned long nr_reclaimed = 0;
 
 
2465
2466	do {
2467		unsigned long pflags;
2468
2469		if (page_counter_read(&memcg->memory) <=
2470		    READ_ONCE(memcg->memory.high))
2471			continue;
2472
2473		memcg_memory_event(memcg, MEMCG_HIGH);
 
 
 
 
 
 
2474
2475		psi_memstall_enter(&pflags);
2476		nr_reclaimed += try_to_free_mem_cgroup_pages(memcg, nr_pages,
2477							gfp_mask,
2478							MEMCG_RECLAIM_MAY_SWAP);
2479		psi_memstall_leave(&pflags);
2480	} while ((memcg = parent_mem_cgroup(memcg)) &&
2481		 !mem_cgroup_is_root(memcg));
2482
2483	return nr_reclaimed;
2484}
 
 
 
 
 
2485
2486static void high_work_func(struct work_struct *work)
 
 
2487{
2488	struct mem_cgroup *memcg;
 
 
 
 
 
 
2489
2490	memcg = container_of(work, struct mem_cgroup, high_work);
2491	reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL);
2492}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2493
2494/*
2495 * Clamp the maximum sleep time per allocation batch to 2 seconds. This is
2496 * enough to still cause a significant slowdown in most cases, while still
2497 * allowing diagnostics and tracing to proceed without becoming stuck.
2498 */
2499#define MEMCG_MAX_HIGH_DELAY_JIFFIES (2UL*HZ)
2500
2501/*
2502 * When calculating the delay, we use these either side of the exponentiation to
2503 * maintain precision and scale to a reasonable number of jiffies (see the table
2504 * below.
2505 *
2506 * - MEMCG_DELAY_PRECISION_SHIFT: Extra precision bits while translating the
2507 *   overage ratio to a delay.
2508 * - MEMCG_DELAY_SCALING_SHIFT: The number of bits to scale down the
2509 *   proposed penalty in order to reduce to a reasonable number of jiffies, and
2510 *   to produce a reasonable delay curve.
2511 *
2512 * MEMCG_DELAY_SCALING_SHIFT just happens to be a number that produces a
2513 * reasonable delay curve compared to precision-adjusted overage, not
2514 * penalising heavily at first, but still making sure that growth beyond the
2515 * limit penalises misbehaviour cgroups by slowing them down exponentially. For
2516 * example, with a high of 100 megabytes:
2517 *
2518 *  +-------+------------------------+
2519 *  | usage | time to allocate in ms |
2520 *  +-------+------------------------+
2521 *  | 100M  |                      0 |
2522 *  | 101M  |                      6 |
2523 *  | 102M  |                     25 |
2524 *  | 103M  |                     57 |
2525 *  | 104M  |                    102 |
2526 *  | 105M  |                    159 |
2527 *  | 106M  |                    230 |
2528 *  | 107M  |                    313 |
2529 *  | 108M  |                    409 |
2530 *  | 109M  |                    518 |
2531 *  | 110M  |                    639 |
2532 *  | 111M  |                    774 |
2533 *  | 112M  |                    921 |
2534 *  | 113M  |                   1081 |
2535 *  | 114M  |                   1254 |
2536 *  | 115M  |                   1439 |
2537 *  | 116M  |                   1638 |
2538 *  | 117M  |                   1849 |
2539 *  | 118M  |                   2000 |
2540 *  | 119M  |                   2000 |
2541 *  | 120M  |                   2000 |
2542 *  +-------+------------------------+
2543 */
2544 #define MEMCG_DELAY_PRECISION_SHIFT 20
2545 #define MEMCG_DELAY_SCALING_SHIFT 14
2546
2547static u64 calculate_overage(unsigned long usage, unsigned long high)
2548{
2549	u64 overage;
2550
2551	if (usage <= high)
2552		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
2553
2554	/*
2555	 * Prevent division by 0 in overage calculation by acting as if
2556	 * it was a threshold of 1 page
2557	 */
2558	high = max(high, 1UL);
 
 
 
 
2559
2560	overage = usage - high;
2561	overage <<= MEMCG_DELAY_PRECISION_SHIFT;
2562	return div64_u64(overage, high);
2563}
2564
2565static u64 mem_find_max_overage(struct mem_cgroup *memcg)
 
 
 
 
 
 
 
 
 
 
 
 
2566{
2567	u64 overage, max_overage = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2568
2569	do {
2570		overage = calculate_overage(page_counter_read(&memcg->memory),
2571					    READ_ONCE(memcg->memory.high));
2572		max_overage = max(overage, max_overage);
2573	} while ((memcg = parent_mem_cgroup(memcg)) &&
2574		 !mem_cgroup_is_root(memcg));
2575
2576	return max_overage;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2577}
2578
2579static u64 swap_find_max_overage(struct mem_cgroup *memcg)
 
 
 
 
 
 
 
 
 
 
 
 
 
2580{
2581	u64 overage, max_overage = 0;
 
2582
2583	do {
2584		overage = calculate_overage(page_counter_read(&memcg->swap),
2585					    READ_ONCE(memcg->swap.high));
2586		if (overage)
2587			memcg_memory_event(memcg, MEMCG_SWAP_HIGH);
2588		max_overage = max(overage, max_overage);
2589	} while ((memcg = parent_mem_cgroup(memcg)) &&
2590		 !mem_cgroup_is_root(memcg));
2591
2592	return max_overage;
2593}
2594
2595/*
2596 * Get the number of jiffies that we should penalise a mischievous cgroup which
2597 * is exceeding its memory.high by checking both it and its ancestors.
 
2598 */
2599static unsigned long calculate_high_delay(struct mem_cgroup *memcg,
2600					  unsigned int nr_pages,
2601					  u64 max_overage)
2602{
2603	unsigned long penalty_jiffies;
 
 
 
 
 
 
 
2604
2605	if (!max_overage)
2606		return 0;
 
 
 
 
 
 
2607
2608	/*
2609	 * We use overage compared to memory.high to calculate the number of
2610	 * jiffies to sleep (penalty_jiffies). Ideally this value should be
2611	 * fairly lenient on small overages, and increasingly harsh when the
2612	 * memcg in question makes it clear that it has no intention of stopping
2613	 * its crazy behaviour, so we exponentially increase the delay based on
2614	 * overage amount.
2615	 */
2616	penalty_jiffies = max_overage * max_overage * HZ;
2617	penalty_jiffies >>= MEMCG_DELAY_PRECISION_SHIFT;
2618	penalty_jiffies >>= MEMCG_DELAY_SCALING_SHIFT;
2619
2620	/*
2621	 * Factor in the task's own contribution to the overage, such that four
2622	 * N-sized allocations are throttled approximately the same as one
2623	 * 4N-sized allocation.
2624	 *
2625	 * MEMCG_CHARGE_BATCH pages is nominal, so work out how much smaller or
2626	 * larger the current charge patch is than that.
2627	 */
2628	return penalty_jiffies * nr_pages / MEMCG_CHARGE_BATCH;
2629}
2630
2631/*
2632 * Reclaims memory over the high limit. Called directly from
2633 * try_charge() (context permitting), as well as from the userland
2634 * return path where reclaim is always able to block.
 
2635 */
2636void mem_cgroup_handle_over_high(gfp_t gfp_mask)
2637{
2638	unsigned long penalty_jiffies;
2639	unsigned long pflags;
2640	unsigned long nr_reclaimed;
2641	unsigned int nr_pages = current->memcg_nr_pages_over_high;
2642	int nr_retries = MAX_RECLAIM_RETRIES;
2643	struct mem_cgroup *memcg;
2644	bool in_retry = false;
 
 
 
 
 
 
 
2645
2646	if (likely(!nr_pages))
2647		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2648
2649	memcg = get_mem_cgroup_from_mm(current->mm);
2650	current->memcg_nr_pages_over_high = 0;
 
 
 
 
 
 
 
 
 
2651
2652retry_reclaim:
 
2653	/*
2654	 * Bail if the task is already exiting. Unlike memory.max,
2655	 * memory.high enforcement isn't as strict, and there is no
2656	 * OOM killer involved, which means the excess could already
2657	 * be much bigger (and still growing) than it could for
2658	 * memory.max; the dying task could get stuck in fruitless
2659	 * reclaim for a long time, which isn't desirable.
2660	 */
2661	if (task_is_dying())
2662		goto out;
2663
2664	/*
2665	 * The allocating task should reclaim at least the batch size, but for
2666	 * subsequent retries we only want to do what's necessary to prevent oom
2667	 * or breaching resource isolation.
2668	 *
2669	 * This is distinct from memory.max or page allocator behaviour because
2670	 * memory.high is currently batched, whereas memory.max and the page
2671	 * allocator run every time an allocation is made.
2672	 */
2673	nr_reclaimed = reclaim_high(memcg,
2674				    in_retry ? SWAP_CLUSTER_MAX : nr_pages,
2675				    gfp_mask);
 
 
 
 
 
 
 
2676
 
2677	/*
2678	 * memory.high is breached and reclaim is unable to keep up. Throttle
2679	 * allocators proactively to slow down excessive growth.
 
 
 
2680	 */
2681	penalty_jiffies = calculate_high_delay(memcg, nr_pages,
2682					       mem_find_max_overage(memcg));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2683
2684	penalty_jiffies += calculate_high_delay(memcg, nr_pages,
2685						swap_find_max_overage(memcg));
2686
2687	/*
2688	 * Clamp the max delay per usermode return so as to still keep the
2689	 * application moving forwards and also permit diagnostics, albeit
2690	 * extremely slowly.
2691	 */
2692	penalty_jiffies = min(penalty_jiffies, MEMCG_MAX_HIGH_DELAY_JIFFIES);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2693
2694	/*
2695	 * Don't sleep if the amount of jiffies this memcg owes us is so low
2696	 * that it's not even worth doing, in an attempt to be nice to those who
2697	 * go only a small amount over their memory.high value and maybe haven't
2698	 * been aggressively reclaimed enough yet.
 
 
2699	 */
2700	if (penalty_jiffies <= HZ / 100)
2701		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2702
2703	/*
2704	 * If reclaim is making forward progress but we're still over
2705	 * memory.high, we want to encourage that rather than doing allocator
2706	 * throttling.
2707	 */
2708	if (nr_reclaimed || nr_retries--) {
2709		in_retry = true;
2710		goto retry_reclaim;
2711	}
2712
2713	/*
2714	 * Reclaim didn't manage to push usage below the limit, slow
2715	 * this allocating task down.
2716	 *
2717	 * If we exit early, we're guaranteed to die (since
2718	 * schedule_timeout_killable sets TASK_KILLABLE). This means we don't
2719	 * need to account for any ill-begotten jiffies to pay them off later.
2720	 */
2721	psi_memstall_enter(&pflags);
2722	schedule_timeout_killable(penalty_jiffies);
2723	psi_memstall_leave(&pflags);
2724
2725out:
2726	css_put(&memcg->css);
 
 
 
 
 
 
 
2727}
2728
2729static int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask,
2730			unsigned int nr_pages)
 
2731{
2732	unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages);
2733	int nr_retries = MAX_RECLAIM_RETRIES;
2734	struct mem_cgroup *mem_over_limit;
2735	struct page_counter *counter;
2736	unsigned long nr_reclaimed;
2737	bool passed_oom = false;
2738	unsigned int reclaim_options = MEMCG_RECLAIM_MAY_SWAP;
2739	bool drained = false;
2740	bool raised_max_event = false;
2741	unsigned long pflags;
 
 
 
 
 
 
 
2742
2743retry:
2744	if (consume_stock(memcg, nr_pages))
2745		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
2746
2747	if (!do_memsw_account() ||
2748	    page_counter_try_charge(&memcg->memsw, batch, &counter)) {
2749		if (page_counter_try_charge(&memcg->memory, batch, &counter))
2750			goto done_restock;
2751		if (do_memsw_account())
2752			page_counter_uncharge(&memcg->memsw, batch);
2753		mem_over_limit = mem_cgroup_from_counter(counter, memory);
2754	} else {
2755		mem_over_limit = mem_cgroup_from_counter(counter, memsw);
2756		reclaim_options &= ~MEMCG_RECLAIM_MAY_SWAP;
 
 
2757	}
 
 
2758
2759	if (batch > nr_pages) {
2760		batch = nr_pages;
2761		goto retry;
2762	}
2763
2764	/*
2765	 * Prevent unbounded recursion when reclaim operations need to
2766	 * allocate memory. This might exceed the limits temporarily,
2767	 * but we prefer facilitating memory reclaim and getting back
2768	 * under the limit over triggering OOM kills in these cases.
2769	 */
2770	if (unlikely(current->flags & PF_MEMALLOC))
2771		goto force;
 
 
 
 
 
2772
2773	if (unlikely(task_in_memcg_oom(current)))
2774		goto nomem;
 
 
2775
2776	if (!gfpflags_allow_blocking(gfp_mask))
2777		goto nomem;
 
 
2778
2779	memcg_memory_event(mem_over_limit, MEMCG_MAX);
2780	raised_max_event = true;
2781
2782	psi_memstall_enter(&pflags);
2783	nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
2784						    gfp_mask, reclaim_options);
2785	psi_memstall_leave(&pflags);
 
2786
2787	if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
2788		goto retry;
 
2789
2790	if (!drained) {
2791		drain_all_stock(mem_over_limit);
2792		drained = true;
2793		goto retry;
2794	}
 
 
 
2795
2796	if (gfp_mask & __GFP_NORETRY)
2797		goto nomem;
2798	/*
2799	 * Even though the limit is exceeded at this point, reclaim
2800	 * may have been able to free some pages.  Retry the charge
2801	 * before killing the task.
2802	 *
2803	 * Only for regular pages, though: huge pages are rather
2804	 * unlikely to succeed so close to the limit, and we fall back
2805	 * to regular pages anyway in case of failure.
2806	 */
2807	if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))
2808		goto retry;
2809	/*
2810	 * At task move, charge accounts can be doubly counted. So, it's
2811	 * better to wait until the end of task_move if something is going on.
2812	 */
2813	if (mem_cgroup_wait_acct_move(mem_over_limit))
2814		goto retry;
2815
2816	if (nr_retries--)
2817		goto retry;
 
 
 
 
 
 
2818
2819	if (gfp_mask & __GFP_RETRY_MAYFAIL)
2820		goto nomem;
 
 
 
2821
2822	/* Avoid endless loop for tasks bypassed by the oom killer */
2823	if (passed_oom && task_is_dying())
2824		goto nomem;
2825
2826	/*
2827	 * keep retrying as long as the memcg oom killer is able to make
2828	 * a forward progress or bypass the charge if the oom killer
2829	 * couldn't make any progress.
2830	 */
2831	if (mem_cgroup_oom(mem_over_limit, gfp_mask,
2832			   get_order(nr_pages * PAGE_SIZE))) {
2833		passed_oom = true;
2834		nr_retries = MAX_RECLAIM_RETRIES;
2835		goto retry;
2836	}
2837nomem:
2838	/*
2839	 * Memcg doesn't have a dedicated reserve for atomic
2840	 * allocations. But like the global atomic pool, we need to
2841	 * put the burden of reclaim on regular allocation requests
2842	 * and let these go through as privileged allocations.
2843	 */
2844	if (!(gfp_mask & (__GFP_NOFAIL | __GFP_HIGH)))
2845		return -ENOMEM;
2846force:
 
 
 
2847	/*
2848	 * If the allocation has to be enforced, don't forget to raise
2849	 * a MEMCG_MAX event.
 
2850	 */
2851	if (!raised_max_event)
2852		memcg_memory_event(mem_over_limit, MEMCG_MAX);
2853
2854	/*
2855	 * The allocation either can't fail or will lead to more memory
2856	 * being freed very soon.  Allow memory usage go over the limit
2857	 * temporarily by force charging it.
2858	 */
2859	page_counter_charge(&memcg->memory, nr_pages);
2860	if (do_memsw_account())
2861		page_counter_charge(&memcg->memsw, nr_pages);
2862
2863	return 0;
 
 
 
 
 
 
 
 
 
2864
2865done_restock:
2866	if (batch > nr_pages)
2867		refill_stock(memcg, batch - nr_pages);
2868
2869	/*
2870	 * If the hierarchy is above the normal consumption range, schedule
2871	 * reclaim on returning to userland.  We can perform reclaim here
2872	 * if __GFP_RECLAIM but let's always punt for simplicity and so that
2873	 * GFP_KERNEL can consistently be used during reclaim.  @memcg is
2874	 * not recorded as it most likely matches current's and won't
2875	 * change in the meantime.  As high limit is checked again before
2876	 * reclaim, the cost of mismatch is negligible.
2877	 */
2878	do {
2879		bool mem_high, swap_high;
2880
2881		mem_high = page_counter_read(&memcg->memory) >
2882			READ_ONCE(memcg->memory.high);
2883		swap_high = page_counter_read(&memcg->swap) >
2884			READ_ONCE(memcg->swap.high);
2885
2886		/* Don't bother a random interrupted task */
2887		if (!in_task()) {
2888			if (mem_high) {
2889				schedule_work(&memcg->high_work);
2890				break;
2891			}
2892			continue;
2893		}
2894
2895		if (mem_high || swap_high) {
2896			/*
2897			 * The allocating tasks in this cgroup will need to do
2898			 * reclaim or be throttled to prevent further growth
2899			 * of the memory or swap footprints.
2900			 *
2901			 * Target some best-effort fairness between the tasks,
2902			 * and distribute reclaim work and delay penalties
2903			 * based on how much each task is actually allocating.
2904			 */
2905			current->memcg_nr_pages_over_high += batch;
2906			set_notify_resume(current);
2907			break;
2908		}
2909	} while ((memcg = parent_mem_cgroup(memcg)));
2910
2911	/*
2912	 * Reclaim is set up above to be called from the userland
2913	 * return path. But also attempt synchronous reclaim to avoid
2914	 * excessive overrun while the task is still inside the
2915	 * kernel. If this is successful, the return path will see it
2916	 * when it rechecks the overage and simply bail out.
2917	 */
2918	if (current->memcg_nr_pages_over_high > MEMCG_CHARGE_BATCH &&
2919	    !(current->flags & PF_MEMALLOC) &&
2920	    gfpflags_allow_blocking(gfp_mask))
2921		mem_cgroup_handle_over_high(gfp_mask);
2922	return 0;
2923}
2924
2925static inline int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
2926			     unsigned int nr_pages)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2927{
2928	if (mem_cgroup_is_root(memcg))
2929		return 0;
 
2930
2931	return try_charge_memcg(memcg, gfp_mask, nr_pages);
 
 
 
2932}
2933
2934/**
2935 * mem_cgroup_cancel_charge() - cancel an uncommitted try_charge() call.
2936 * @memcg: memcg previously charged.
2937 * @nr_pages: number of pages previously charged.
2938 */
2939void mem_cgroup_cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
2940{
2941	if (mem_cgroup_is_root(memcg))
2942		return;
 
 
2943
2944	page_counter_uncharge(&memcg->memory, nr_pages);
2945	if (do_memsw_account())
2946		page_counter_uncharge(&memcg->memsw, nr_pages);
2947}
2948
2949static void commit_charge(struct folio *folio, struct mem_cgroup *memcg)
2950{
2951	VM_BUG_ON_FOLIO(folio_memcg(folio), folio);
2952	/*
2953	 * Any of the following ensures page's memcg stability:
 
 
 
 
 
 
 
 
 
 
2954	 *
2955	 * - the page lock
2956	 * - LRU isolation
2957	 * - folio_memcg_lock()
2958	 * - exclusive reference
2959	 * - mem_cgroup_trylock_pages()
2960	 */
2961	folio->memcg_data = (unsigned long)memcg;
 
 
 
2962}
2963
2964/**
2965 * mem_cgroup_commit_charge - commit a previously successful try_charge().
2966 * @folio: folio to commit the charge to.
2967 * @memcg: memcg previously charged.
2968 */
2969void mem_cgroup_commit_charge(struct folio *folio, struct mem_cgroup *memcg)
2970{
2971	css_get(&memcg->css);
2972	commit_charge(folio, memcg);
2973
2974	local_irq_disable();
2975	mem_cgroup_charge_statistics(memcg, folio_nr_pages(folio));
2976	memcg_check_events(memcg, folio_nid(folio));
2977	local_irq_enable();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2978}
2979
2980#ifdef CONFIG_MEMCG_KMEM
2981/*
2982 * The allocated objcg pointers array is not accounted directly.
2983 * Moreover, it should not come from DMA buffer and is not readily
2984 * reclaimable. So those GFP bits should be masked off.
2985 */
2986#define OBJCGS_CLEAR_MASK	(__GFP_DMA | __GFP_RECLAIMABLE | \
2987				 __GFP_ACCOUNT | __GFP_NOFAIL)
2988
2989/*
2990 * mod_objcg_mlstate() may be called with irq enabled, so
2991 * mod_memcg_lruvec_state() should be used.
2992 */
2993static inline void mod_objcg_mlstate(struct obj_cgroup *objcg,
2994				     struct pglist_data *pgdat,
2995				     enum node_stat_item idx, int nr)
2996{
2997	struct mem_cgroup *memcg;
2998	struct lruvec *lruvec;
2999
3000	rcu_read_lock();
3001	memcg = obj_cgroup_memcg(objcg);
3002	lruvec = mem_cgroup_lruvec(memcg, pgdat);
3003	mod_memcg_lruvec_state(lruvec, idx, nr);
3004	rcu_read_unlock();
3005}
3006
3007int memcg_alloc_slab_cgroups(struct slab *slab, struct kmem_cache *s,
3008				 gfp_t gfp, bool new_slab)
3009{
3010	unsigned int objects = objs_per_slab(s, slab);
3011	unsigned long memcg_data;
3012	void *vec;
3013
3014	gfp &= ~OBJCGS_CLEAR_MASK;
3015	vec = kcalloc_node(objects, sizeof(struct obj_cgroup *), gfp,
3016			   slab_nid(slab));
3017	if (!vec)
3018		return -ENOMEM;
3019
3020	memcg_data = (unsigned long) vec | MEMCG_DATA_OBJCGS;
3021	if (new_slab) {
3022		/*
3023		 * If the slab is brand new and nobody can yet access its
3024		 * memcg_data, no synchronization is required and memcg_data can
3025		 * be simply assigned.
 
 
 
 
 
 
 
 
3026		 */
3027		slab->memcg_data = memcg_data;
3028	} else if (cmpxchg(&slab->memcg_data, 0, memcg_data)) {
3029		/*
3030		 * If the slab is already in use, somebody can allocate and
3031		 * assign obj_cgroups in parallel. In this case the existing
3032		 * objcg vector should be reused.
3033		 */
3034		kfree(vec);
3035		return 0;
3036	}
3037
3038	kmemleak_not_leak(vec);
3039	return 0;
3040}
3041
3042static __always_inline
3043struct mem_cgroup *mem_cgroup_from_obj_folio(struct folio *folio, void *p)
3044{
3045	/*
3046	 * Slab objects are accounted individually, not per-page.
3047	 * Memcg membership data for each individual object is saved in
3048	 * slab->memcg_data.
3049	 */
3050	if (folio_test_slab(folio)) {
3051		struct obj_cgroup **objcgs;
3052		struct slab *slab;
3053		unsigned int off;
3054
3055		slab = folio_slab(folio);
3056		objcgs = slab_objcgs(slab);
3057		if (!objcgs)
3058			return NULL;
3059
3060		off = obj_to_index(slab->slab_cache, slab, p);
3061		if (objcgs[off])
3062			return obj_cgroup_memcg(objcgs[off]);
3063
3064		return NULL;
 
 
 
 
3065	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3066
3067	/*
3068	 * folio_memcg_check() is used here, because in theory we can encounter
3069	 * a folio where the slab flag has been cleared already, but
3070	 * slab->memcg_data has not been freed yet
3071	 * folio_memcg_check() will guarantee that a proper memory
3072	 * cgroup pointer or NULL will be returned.
3073	 */
3074	return folio_memcg_check(folio);
3075}
3076
3077/*
3078 * Returns a pointer to the memory cgroup to which the kernel object is charged.
3079 *
3080 * A passed kernel object can be a slab object, vmalloc object or a generic
3081 * kernel page, so different mechanisms for getting the memory cgroup pointer
3082 * should be used.
3083 *
3084 * In certain cases (e.g. kernel stacks or large kmallocs with SLUB) the caller
3085 * can not know for sure how the kernel object is implemented.
3086 * mem_cgroup_from_obj() can be safely used in such cases.
3087 *
3088 * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(),
3089 * cgroup_mutex, etc.
3090 */
3091struct mem_cgroup *mem_cgroup_from_obj(void *p)
 
3092{
3093	struct folio *folio;
3094
3095	if (mem_cgroup_disabled())
3096		return NULL;
 
 
 
3097
3098	if (unlikely(is_vmalloc_addr(p)))
3099		folio = page_folio(vmalloc_to_page(p));
3100	else
3101		folio = virt_to_folio(p);
3102
3103	return mem_cgroup_from_obj_folio(folio, p);
 
3104}
3105
3106/*
3107 * Returns a pointer to the memory cgroup to which the kernel object is charged.
3108 * Similar to mem_cgroup_from_obj(), but faster and not suitable for objects,
3109 * allocated using vmalloc().
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3110 *
3111 * A passed kernel object must be a slab object or a generic kernel page.
3112 *
3113 * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(),
3114 * cgroup_mutex, etc.
3115 */
3116struct mem_cgroup *mem_cgroup_from_slab_obj(void *p)
 
3117{
3118	if (mem_cgroup_disabled())
3119		return NULL;
 
 
 
 
 
 
3120
3121	return mem_cgroup_from_obj_folio(virt_to_folio(p), p);
3122}
3123
3124static struct obj_cgroup *__get_obj_cgroup_from_memcg(struct mem_cgroup *memcg)
3125{
3126	struct obj_cgroup *objcg = NULL;
3127
3128	for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) {
3129		objcg = rcu_dereference(memcg->objcg);
3130		if (likely(objcg && obj_cgroup_tryget(objcg)))
3131			break;
3132		objcg = NULL;
3133	}
3134	return objcg;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3135}
 
3136
3137static struct obj_cgroup *current_objcg_update(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3138{
3139	struct mem_cgroup *memcg;
3140	struct obj_cgroup *old, *objcg = NULL;
3141
3142	do {
3143		/* Atomically drop the update bit. */
3144		old = xchg(&current->objcg, NULL);
3145		if (old) {
3146			old = (struct obj_cgroup *)
3147				((unsigned long)old & ~CURRENT_OBJCG_UPDATE_FLAG);
3148			if (old)
3149				obj_cgroup_put(old);
3150
3151			old = NULL;
3152		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3153
3154		/* If new objcg is NULL, no reason for the second atomic update. */
3155		if (!current->mm || (current->flags & PF_KTHREAD))
3156			return NULL;
3157
3158		/*
3159		 * Release the objcg pointer from the previous iteration,
3160		 * if try_cmpxcg() below fails.
3161		 */
3162		if (unlikely(objcg)) {
3163			obj_cgroup_put(objcg);
3164			objcg = NULL;
3165		}
3166
3167		/*
3168		 * Obtain the new objcg pointer. The current task can be
3169		 * asynchronously moved to another memcg and the previous
3170		 * memcg can be offlined. So let's get the memcg pointer
3171		 * and try get a reference to objcg under a rcu read lock.
3172		 */
3173
3174		rcu_read_lock();
3175		memcg = mem_cgroup_from_task(current);
3176		objcg = __get_obj_cgroup_from_memcg(memcg);
3177		rcu_read_unlock();
3178
3179		/*
3180		 * Try set up a new objcg pointer atomically. If it
3181		 * fails, it means the update flag was set concurrently, so
3182		 * the whole procedure should be repeated.
3183		 */
3184	} while (!try_cmpxchg(&current->objcg, &old, objcg));
3185
3186	return objcg;
3187}
3188
3189__always_inline struct obj_cgroup *current_obj_cgroup(void)
 
3190{
3191	struct mem_cgroup *memcg;
3192	struct obj_cgroup *objcg;
3193
3194	if (in_task()) {
3195		memcg = current->active_memcg;
3196		if (unlikely(memcg))
3197			goto from_memcg;
3198
3199		objcg = READ_ONCE(current->objcg);
3200		if (unlikely((unsigned long)objcg & CURRENT_OBJCG_UPDATE_FLAG))
3201			objcg = current_objcg_update();
3202		/*
3203		 * Objcg reference is kept by the task, so it's safe
3204		 * to use the objcg by the current task.
3205		 */
3206		return objcg;
3207	}
3208
3209	memcg = this_cpu_read(int_active_memcg);
3210	if (unlikely(memcg))
3211		goto from_memcg;
3212
3213	return NULL;
3214
3215from_memcg:
3216	objcg = NULL;
3217	for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) {
3218		/*
3219		 * Memcg pointer is protected by scope (see set_active_memcg())
3220		 * and is pinning the corresponding objcg, so objcg can't go
3221		 * away and can be used within the scope without any additional
3222		 * protection.
3223		 */
3224		objcg = rcu_dereference_check(memcg->objcg, 1);
3225		if (likely(objcg))
3226			break;
3227	}
3228
3229	return objcg;
 
 
 
 
3230}
3231
3232struct obj_cgroup *get_obj_cgroup_from_folio(struct folio *folio)
3233{
3234	struct obj_cgroup *objcg;
 
3235
3236	if (!memcg_kmem_online())
3237		return NULL;
3238
3239	if (folio_memcg_kmem(folio)) {
3240		objcg = __folio_objcg(folio);
3241		obj_cgroup_get(objcg);
3242	} else {
3243		struct mem_cgroup *memcg;
 
 
3244
3245		rcu_read_lock();
3246		memcg = __folio_memcg(folio);
3247		if (memcg)
3248			objcg = __get_obj_cgroup_from_memcg(memcg);
3249		else
3250			objcg = NULL;
3251		rcu_read_unlock();
3252	}
3253	return objcg;
 
 
 
 
 
 
 
 
 
 
3254}
3255
3256static void memcg_account_kmem(struct mem_cgroup *memcg, int nr_pages)
3257{
3258	mod_memcg_state(memcg, MEMCG_KMEM, nr_pages);
3259	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
3260		if (nr_pages > 0)
3261			page_counter_charge(&memcg->kmem, nr_pages);
3262		else
3263			page_counter_uncharge(&memcg->kmem, -nr_pages);
3264	}
3265}
 
3266
 
3267
 
3268/*
3269 * obj_cgroup_uncharge_pages: uncharge a number of kernel pages from a objcg
3270 * @objcg: object cgroup to uncharge
3271 * @nr_pages: number of pages to uncharge
 
3272 */
3273static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg,
3274				      unsigned int nr_pages)
3275{
 
 
3276	struct mem_cgroup *memcg;
 
3277
3278	memcg = get_mem_cgroup_from_objcg(objcg);
 
3279
3280	memcg_account_kmem(memcg, -nr_pages);
3281	refill_stock(memcg, nr_pages);
3282
3283	css_put(&memcg->css);
 
 
 
 
 
3284}
 
3285
3286/*
3287 * obj_cgroup_charge_pages: charge a number of kernel pages to a objcg
3288 * @objcg: object cgroup to charge
3289 * @gfp: reclaim mode
3290 * @nr_pages: number of pages to charge
 
 
 
 
 
 
3291 *
3292 * Returns 0 on success, an error code on failure.
 
3293 */
3294static int obj_cgroup_charge_pages(struct obj_cgroup *objcg, gfp_t gfp,
3295				   unsigned int nr_pages)
 
 
 
3296{
3297	struct mem_cgroup *memcg;
3298	int ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3299
3300	memcg = get_mem_cgroup_from_objcg(objcg);
 
 
 
 
 
 
 
 
 
 
 
3301
3302	ret = try_charge_memcg(memcg, gfp, nr_pages);
3303	if (ret)
3304		goto out;
 
 
 
 
 
3305
3306	memcg_account_kmem(memcg, nr_pages);
 
 
 
 
 
 
 
 
 
 
 
3307out:
3308	css_put(&memcg->css);
3309
3310	return ret;
3311}
3312
3313/**
3314 * __memcg_kmem_charge_page: charge a kmem page to the current memory cgroup
3315 * @page: page to charge
3316 * @gfp: reclaim mode
3317 * @order: allocation order
3318 *
3319 * Returns 0 on success, an error code on failure.
3320 */
3321int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3322{
3323	struct obj_cgroup *objcg;
3324	int ret = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3325
3326	objcg = current_obj_cgroup();
3327	if (objcg) {
3328		ret = obj_cgroup_charge_pages(objcg, gfp, 1 << order);
3329		if (!ret) {
3330			obj_cgroup_get(objcg);
3331			page->memcg_data = (unsigned long)objcg |
3332				MEMCG_DATA_KMEM;
3333			return 0;
3334		}
3335	}
 
 
 
 
 
 
 
 
 
 
 
 
3336	return ret;
3337}
3338
3339/**
3340 * __memcg_kmem_uncharge_page: uncharge a kmem page
3341 * @page: page to uncharge
3342 * @order: allocation order
3343 */
3344void __memcg_kmem_uncharge_page(struct page *page, int order)
3345{
3346	struct folio *folio = page_folio(page);
3347	struct obj_cgroup *objcg;
3348	unsigned int nr_pages = 1 << order;
3349
3350	if (!folio_memcg_kmem(folio))
3351		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3352
3353	objcg = __folio_objcg(folio);
3354	obj_cgroup_uncharge_pages(objcg, nr_pages);
3355	folio->memcg_data = 0;
3356	obj_cgroup_put(objcg);
 
 
3357}
3358
3359void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
3360		     enum node_stat_item idx, int nr)
 
 
 
 
 
 
 
 
3361{
3362	struct memcg_stock_pcp *stock;
3363	struct obj_cgroup *old = NULL;
3364	unsigned long flags;
3365	int *bytes;
3366
3367	local_lock_irqsave(&memcg_stock.stock_lock, flags);
3368	stock = this_cpu_ptr(&memcg_stock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3369
 
 
 
 
 
 
 
3370	/*
3371	 * Save vmstat data in stock and skip vmstat array update unless
3372	 * accumulating over a page of vmstat data or when pgdat or idx
3373	 * changes.
 
3374	 */
3375	if (READ_ONCE(stock->cached_objcg) != objcg) {
3376		old = drain_obj_stock(stock);
3377		obj_cgroup_get(objcg);
3378		stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
3379				? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
3380		WRITE_ONCE(stock->cached_objcg, objcg);
3381		stock->cached_pgdat = pgdat;
3382	} else if (stock->cached_pgdat != pgdat) {
3383		/* Flush the existing cached vmstat data */
3384		struct pglist_data *oldpg = stock->cached_pgdat;
3385
3386		if (stock->nr_slab_reclaimable_b) {
3387			mod_objcg_mlstate(objcg, oldpg, NR_SLAB_RECLAIMABLE_B,
3388					  stock->nr_slab_reclaimable_b);
3389			stock->nr_slab_reclaimable_b = 0;
3390		}
3391		if (stock->nr_slab_unreclaimable_b) {
3392			mod_objcg_mlstate(objcg, oldpg, NR_SLAB_UNRECLAIMABLE_B,
3393					  stock->nr_slab_unreclaimable_b);
3394			stock->nr_slab_unreclaimable_b = 0;
3395		}
3396		stock->cached_pgdat = pgdat;
3397	}
 
 
 
 
 
 
 
 
 
 
 
3398
3399	bytes = (idx == NR_SLAB_RECLAIMABLE_B) ? &stock->nr_slab_reclaimable_b
3400					       : &stock->nr_slab_unreclaimable_b;
 
 
 
 
 
 
 
 
3401	/*
3402	 * Even for large object >= PAGE_SIZE, the vmstat data will still be
3403	 * cached locally at least once before pushing it out.
 
 
 
3404	 */
3405	if (!*bytes) {
3406		*bytes = nr;
3407		nr = 0;
3408	} else {
3409		*bytes += nr;
3410		if (abs(*bytes) > PAGE_SIZE) {
3411			nr = *bytes;
3412			*bytes = 0;
3413		} else {
3414			nr = 0;
3415		}
3416	}
3417	if (nr)
3418		mod_objcg_mlstate(objcg, pgdat, idx, nr);
3419
3420	local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
3421	if (old)
3422		obj_cgroup_put(old);
 
 
3423}
3424
3425static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
 
3426{
3427	struct memcg_stock_pcp *stock;
3428	unsigned long flags;
3429	bool ret = false;
3430
3431	local_lock_irqsave(&memcg_stock.stock_lock, flags);
 
 
 
3432
3433	stock = this_cpu_ptr(&memcg_stock);
3434	if (objcg == READ_ONCE(stock->cached_objcg) && stock->nr_bytes >= nr_bytes) {
3435		stock->nr_bytes -= nr_bytes;
3436		ret = true;
 
 
 
3437	}
3438
3439	local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3440
3441	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3442}
3443
3444static struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock)
 
 
 
 
 
3445{
3446	struct obj_cgroup *old = READ_ONCE(stock->cached_objcg);
 
 
 
3447
3448	if (!old)
3449		return NULL;
3450
3451	if (stock->nr_bytes) {
3452		unsigned int nr_pages = stock->nr_bytes >> PAGE_SHIFT;
3453		unsigned int nr_bytes = stock->nr_bytes & (PAGE_SIZE - 1);
 
 
 
 
 
 
 
3454
3455		if (nr_pages) {
3456			struct mem_cgroup *memcg;
3457
3458			memcg = get_mem_cgroup_from_objcg(old);
3459
3460			memcg_account_kmem(memcg, -nr_pages);
3461			__refill_stock(memcg, nr_pages);
3462
3463			css_put(&memcg->css);
3464		}
3465
 
 
 
 
 
 
 
 
 
 
 
 
 
3466		/*
3467		 * The leftover is flushed to the centralized per-memcg value.
3468		 * On the next attempt to refill obj stock it will be moved
3469		 * to a per-cpu stock (probably, on an other CPU), see
3470		 * refill_obj_stock().
3471		 *
3472		 * How often it's flushed is a trade-off between the memory
3473		 * limit enforcement accuracy and potential CPU contention,
3474		 * so it might be changed in the future.
3475		 */
3476		atomic_add(nr_bytes, &old->nr_charged_bytes);
3477		stock->nr_bytes = 0;
 
 
 
 
 
 
 
 
 
 
3478	}
3479
 
 
 
3480	/*
3481	 * Flush the vmstat data in current stock
 
 
 
3482	 */
3483	if (stock->nr_slab_reclaimable_b || stock->nr_slab_unreclaimable_b) {
3484		if (stock->nr_slab_reclaimable_b) {
3485			mod_objcg_mlstate(old, stock->cached_pgdat,
3486					  NR_SLAB_RECLAIMABLE_B,
3487					  stock->nr_slab_reclaimable_b);
3488			stock->nr_slab_reclaimable_b = 0;
3489		}
3490		if (stock->nr_slab_unreclaimable_b) {
3491			mod_objcg_mlstate(old, stock->cached_pgdat,
3492					  NR_SLAB_UNRECLAIMABLE_B,
3493					  stock->nr_slab_unreclaimable_b);
3494			stock->nr_slab_unreclaimable_b = 0;
3495		}
3496		stock->cached_pgdat = NULL;
3497	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3498
3499	WRITE_ONCE(stock->cached_objcg, NULL);
 
 
 
 
 
3500	/*
3501	 * The `old' objects needs to be released by the caller via
3502	 * obj_cgroup_put() outside of memcg_stock_pcp::stock_lock.
 
 
 
 
 
 
 
 
3503	 */
3504	return old;
 
 
3505}
3506
3507static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
3508				     struct mem_cgroup *root_memcg)
3509{
3510	struct obj_cgroup *objcg = READ_ONCE(stock->cached_objcg);
3511	struct mem_cgroup *memcg;
 
 
 
 
 
 
 
 
 
 
3512
3513	if (objcg) {
3514		memcg = obj_cgroup_memcg(objcg);
3515		if (memcg && mem_cgroup_is_descendant(memcg, root_memcg))
3516			return true;
 
 
 
 
3517	}
3518
3519	return false;
3520}
3521
3522static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
3523			     bool allow_uncharge)
3524{
3525	struct memcg_stock_pcp *stock;
3526	struct obj_cgroup *old = NULL;
3527	unsigned long flags;
3528	unsigned int nr_pages = 0;
3529
3530	local_lock_irqsave(&memcg_stock.stock_lock, flags);
 
3531
3532	stock = this_cpu_ptr(&memcg_stock);
3533	if (READ_ONCE(stock->cached_objcg) != objcg) { /* reset if necessary */
3534		old = drain_obj_stock(stock);
3535		obj_cgroup_get(objcg);
3536		WRITE_ONCE(stock->cached_objcg, objcg);
3537		stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
3538				? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
3539		allow_uncharge = true;	/* Allow uncharge when objcg changes */
3540	}
3541	stock->nr_bytes += nr_bytes;
3542
3543	if (allow_uncharge && (stock->nr_bytes > PAGE_SIZE)) {
3544		nr_pages = stock->nr_bytes >> PAGE_SHIFT;
3545		stock->nr_bytes &= (PAGE_SIZE - 1);
3546	}
3547
3548	local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
3549	if (old)
3550		obj_cgroup_put(old);
3551
3552	if (nr_pages)
3553		obj_cgroup_uncharge_pages(objcg, nr_pages);
 
 
 
 
3554}
3555
3556int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size)
 
 
 
 
 
 
3557{
3558	unsigned int nr_pages, nr_bytes;
3559	int ret;
 
 
 
3560
3561	if (consume_obj_stock(objcg, size))
3562		return 0;
3563
3564	/*
3565	 * In theory, objcg->nr_charged_bytes can have enough
3566	 * pre-charged bytes to satisfy the allocation. However,
3567	 * flushing objcg->nr_charged_bytes requires two atomic
3568	 * operations, and objcg->nr_charged_bytes can't be big.
3569	 * The shared objcg->nr_charged_bytes can also become a
3570	 * performance bottleneck if all tasks of the same memcg are
3571	 * trying to update it. So it's better to ignore it and try
3572	 * grab some new pages. The stock's nr_bytes will be flushed to
3573	 * objcg->nr_charged_bytes later on when objcg changes.
3574	 *
3575	 * The stock's nr_bytes may contain enough pre-charged bytes
3576	 * to allow one less page from being charged, but we can't rely
3577	 * on the pre-charged bytes not being changed outside of
3578	 * consume_obj_stock() or refill_obj_stock(). So ignore those
3579	 * pre-charged bytes as well when charging pages. To avoid a
3580	 * page uncharge right after a page charge, we set the
3581	 * allow_uncharge flag to false when calling refill_obj_stock()
3582	 * to temporarily allow the pre-charged bytes to exceed the page
3583	 * size limit. The maximum reachable value of the pre-charged
3584	 * bytes is (sizeof(object) + PAGE_SIZE - 2) if there is no data
3585	 * race.
3586	 */
3587	nr_pages = size >> PAGE_SHIFT;
3588	nr_bytes = size & (PAGE_SIZE - 1);
3589
3590	if (nr_bytes)
3591		nr_pages += 1;
3592
3593	ret = obj_cgroup_charge_pages(objcg, gfp, nr_pages);
3594	if (!ret && nr_bytes)
3595		refill_obj_stock(objcg, PAGE_SIZE - nr_bytes, false);
3596
3597	return ret;
3598}
3599
3600void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size)
3601{
3602	refill_obj_stock(objcg, size, true);
3603}
 
3604
3605#endif /* CONFIG_MEMCG_KMEM */
3606
3607/*
3608 * Because page_memcg(head) is not set on tails, set it now.
 
3609 */
3610void split_page_memcg(struct page *head, int old_order, int new_order)
3611{
3612	struct folio *folio = page_folio(head);
3613	struct mem_cgroup *memcg = folio_memcg(folio);
3614	int i;
3615	unsigned int old_nr = 1 << old_order;
3616	unsigned int new_nr = 1 << new_order;
3617
3618	if (mem_cgroup_disabled() || !memcg)
3619		return;
3620
3621	for (i = new_nr; i < old_nr; i += new_nr)
3622		folio_page(folio, i)->memcg_data = folio->memcg_data;
3623
3624	if (folio_memcg_kmem(folio))
3625		obj_cgroup_get_many(__folio_objcg(folio), old_nr / new_nr - 1);
3626	else
3627		css_get_many(&memcg->css, old_nr / new_nr - 1);
 
 
 
 
 
 
 
3628}
3629
3630#ifdef CONFIG_SWAP
3631/**
3632 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
3633 * @entry: swap entry to be moved
3634 * @from:  mem_cgroup which the entry is moved from
3635 * @to:  mem_cgroup which the entry is moved to
3636 *
3637 * It succeeds only when the swap_cgroup's record for this entry is the same
3638 * as the mem_cgroup's id of @from.
3639 *
3640 * Returns 0 on success, -EINVAL on failure.
3641 *
3642 * The caller must have charged to @to, IOW, called page_counter_charge() about
3643 * both res and memsw, and called css_get().
3644 */
3645static int mem_cgroup_move_swap_account(swp_entry_t entry,
3646				struct mem_cgroup *from, struct mem_cgroup *to)
3647{
3648	unsigned short old_id, new_id;
3649
3650	old_id = mem_cgroup_id(from);
3651	new_id = mem_cgroup_id(to);
3652
3653	if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
3654		mod_memcg_state(from, MEMCG_SWAP, -1);
3655		mod_memcg_state(to, MEMCG_SWAP, 1);
 
 
 
 
 
 
 
 
 
 
 
 
3656		return 0;
3657	}
3658	return -EINVAL;
3659}
3660#else
3661static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
3662				struct mem_cgroup *from, struct mem_cgroup *to)
3663{
3664	return -EINVAL;
3665}
3666#endif
3667
3668static DEFINE_MUTEX(memcg_max_mutex);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3669
3670static int mem_cgroup_resize_max(struct mem_cgroup *memcg,
3671				 unsigned long max, bool memsw)
3672{
3673	bool enlarge = false;
3674	bool drained = false;
3675	int ret;
3676	bool limits_invariant;
3677	struct page_counter *counter = memsw ? &memcg->memsw : &memcg->memory;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3678
3679	do {
 
3680		if (signal_pending(current)) {
3681			ret = -EINTR;
3682			break;
3683		}
3684
3685		mutex_lock(&memcg_max_mutex);
3686		/*
3687		 * Make sure that the new limit (memsw or memory limit) doesn't
3688		 * break our basic invariant rule memory.max <= memsw.max.
 
3689		 */
3690		limits_invariant = memsw ? max >= READ_ONCE(memcg->memory.max) :
3691					   max <= memcg->memsw.max;
3692		if (!limits_invariant) {
3693			mutex_unlock(&memcg_max_mutex);
3694			ret = -EINVAL;
 
3695			break;
3696		}
3697		if (max > counter->max)
3698			enlarge = true;
3699		ret = page_counter_set_max(counter, max);
3700		mutex_unlock(&memcg_max_mutex);
 
 
 
 
 
 
 
 
 
3701
3702		if (!ret)
3703			break;
3704
3705		if (!drained) {
3706			drain_all_stock(memcg);
3707			drained = true;
3708			continue;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3709		}
 
3710
3711		if (!try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL,
3712					memsw ? 0 : MEMCG_RECLAIM_MAY_SWAP)) {
3713			ret = -EBUSY;
3714			break;
3715		}
3716	} while (true);
3717
 
 
 
 
 
 
 
 
 
 
3718	if (!ret && enlarge)
3719		memcg_oom_recover(memcg);
3720
3721	return ret;
3722}
3723
3724unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
3725					    gfp_t gfp_mask,
3726					    unsigned long *total_scanned)
3727{
3728	unsigned long nr_reclaimed = 0;
3729	struct mem_cgroup_per_node *mz, *next_mz = NULL;
3730	unsigned long reclaimed;
3731	int loop = 0;
3732	struct mem_cgroup_tree_per_node *mctz;
3733	unsigned long excess;
3734
3735	if (lru_gen_enabled())
3736		return 0;
3737
3738	if (order > 0)
3739		return 0;
3740
3741	mctz = soft_limit_tree.rb_tree_per_node[pgdat->node_id];
3742
3743	/*
3744	 * Do not even bother to check the largest node if the root
3745	 * is empty. Do it lockless to prevent lock bouncing. Races
3746	 * are acceptable as soft limit is best effort anyway.
3747	 */
3748	if (!mctz || RB_EMPTY_ROOT(&mctz->rb_root))
3749		return 0;
3750
3751	/*
3752	 * This loop can run a while, specially if mem_cgroup's continuously
3753	 * keep exceeding their soft limit and putting the system under
3754	 * pressure
3755	 */
3756	do {
3757		if (next_mz)
3758			mz = next_mz;
3759		else
3760			mz = mem_cgroup_largest_soft_limit_node(mctz);
3761		if (!mz)
3762			break;
3763
3764		reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat,
3765						    gfp_mask, total_scanned);
 
3766		nr_reclaimed += reclaimed;
3767		spin_lock_irq(&mctz->lock);
 
3768
3769		/*
3770		 * If we failed to reclaim anything from this memory cgroup
3771		 * it is time to move on to the next cgroup
3772		 */
3773		next_mz = NULL;
3774		if (!reclaimed)
3775			next_mz = __mem_cgroup_largest_soft_limit_node(mctz);
3776
3777		excess = soft_limit_excess(mz->memcg);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3778		/*
3779		 * One school of thought says that we should not add
3780		 * back the node to the tree if reclaim returns 0.
3781		 * But our reclaim could return 0, simply because due
3782		 * to priority we are exposing a smaller subset of
3783		 * memory to reclaim from. Consider this as a longer
3784		 * term TODO.
3785		 */
3786		/* If excess == 0, no tree ops */
3787		__mem_cgroup_insert_exceeded(mz, mctz, excess);
3788		spin_unlock_irq(&mctz->lock);
3789		css_put(&mz->memcg->css);
3790		loop++;
3791		/*
3792		 * Could not reclaim anything and there are no more
3793		 * mem cgroups to try or we seem to be looping without
3794		 * reclaiming anything.
3795		 */
3796		if (!nr_reclaimed &&
3797			(next_mz == NULL ||
3798			loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
3799			break;
3800	} while (!nr_reclaimed);
3801	if (next_mz)
3802		css_put(&next_mz->memcg->css);
3803	return nr_reclaimed;
3804}
3805
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3806/*
3807 * Reclaims as many pages from the given memcg as possible.
 
3808 *
3809 * Caller is responsible for holding css reference for memcg.
3810 */
3811static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
3812{
3813	int nr_retries = MAX_RECLAIM_RETRIES;
 
 
 
 
 
3814
3815	/* we call try-to-free pages for make this cgroup empty */
3816	lru_add_drain_all();
 
 
 
3817
3818	drain_all_stock(memcg);
3819
3820	/* try to free all pages in this cgroup */
3821	while (nr_retries && page_counter_read(&memcg->memory)) {
3822		if (signal_pending(current))
3823			return -EINTR;
3824
3825		if (!try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL,
3826						  MEMCG_RECLAIM_MAY_SWAP))
 
3827			nr_retries--;
 
 
 
 
3828	}
 
 
3829
3830	return 0;
3831}
3832
3833static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of,
3834					    char *buf, size_t nbytes,
3835					    loff_t off)
3836{
3837	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3838
3839	if (mem_cgroup_is_root(memcg))
3840		return -EINVAL;
3841	return mem_cgroup_force_empty(memcg) ?: nbytes;
3842}
3843
3844static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css,
3845				     struct cftype *cft)
3846{
3847	return 1;
3848}
3849
3850static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
3851				      struct cftype *cft, u64 val)
3852{
3853	if (val == 1)
3854		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3855
3856	pr_warn_once("Non-hierarchical mode is deprecated. "
3857		     "Please report your usecase to linux-mm@kvack.org if you "
3858		     "depend on this functionality.\n");
 
 
3859
3860	return -EINVAL;
 
 
 
 
 
 
3861}
3862
3863static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
3864{
3865	unsigned long val;
3866
3867	if (mem_cgroup_is_root(memcg)) {
3868		/*
3869		 * Approximate root's usage from global state. This isn't
3870		 * perfect, but the root usage was always an approximation.
3871		 */
3872		val = global_node_page_state(NR_FILE_PAGES) +
3873			global_node_page_state(NR_ANON_MAPPED);
3874		if (swap)
3875			val += total_swap_pages - get_nr_swap_pages();
3876	} else {
3877		if (!swap)
3878			val = page_counter_read(&memcg->memory);
3879		else
3880			val = page_counter_read(&memcg->memsw);
3881	}
3882	return val;
 
 
 
 
 
 
 
 
 
 
 
3883}
3884
3885enum {
3886	RES_USAGE,
3887	RES_LIMIT,
3888	RES_MAX_USAGE,
3889	RES_FAILCNT,
3890	RES_SOFT_LIMIT,
3891};
3892
3893static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
3894			       struct cftype *cft)
3895{
3896	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3897	struct page_counter *counter;
 
 
3898
3899	switch (MEMFILE_TYPE(cft->private)) {
 
 
 
3900	case _MEM:
3901		counter = &memcg->memory;
 
 
 
3902		break;
3903	case _MEMSWAP:
3904		counter = &memcg->memsw;
 
 
 
3905		break;
3906	case _KMEM:
3907		counter = &memcg->kmem;
3908		break;
3909	case _TCP:
3910		counter = &memcg->tcpmem;
3911		break;
3912	default:
3913		BUG();
3914	}
3915
3916	switch (MEMFILE_ATTR(cft->private)) {
3917	case RES_USAGE:
3918		if (counter == &memcg->memory)
3919			return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE;
3920		if (counter == &memcg->memsw)
3921			return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE;
3922		return (u64)page_counter_read(counter) * PAGE_SIZE;
3923	case RES_LIMIT:
3924		return (u64)counter->max * PAGE_SIZE;
3925	case RES_MAX_USAGE:
3926		return (u64)counter->watermark * PAGE_SIZE;
3927	case RES_FAILCNT:
3928		return counter->failcnt;
3929	case RES_SOFT_LIMIT:
3930		return (u64)READ_ONCE(memcg->soft_limit) * PAGE_SIZE;
3931	default:
3932		BUG();
3933	}
3934}
3935
3936/*
3937 * This function doesn't do anything useful. Its only job is to provide a read
3938 * handler for a file so that cgroup_file_mode() will add read permissions.
3939 */
3940static int mem_cgroup_dummy_seq_show(__always_unused struct seq_file *m,
3941				     __always_unused void *v)
3942{
3943	return -EINVAL;
3944}
3945
3946#ifdef CONFIG_MEMCG_KMEM
3947static int memcg_online_kmem(struct mem_cgroup *memcg)
 
 
3948{
3949	struct obj_cgroup *objcg;
 
3950
3951	if (mem_cgroup_kmem_disabled())
3952		return 0;
3953
3954	if (unlikely(mem_cgroup_is_root(memcg)))
3955		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3956
3957	objcg = obj_cgroup_alloc();
3958	if (!objcg)
3959		return -ENOMEM;
 
 
 
3960
3961	objcg->memcg = memcg;
3962	rcu_assign_pointer(memcg->objcg, objcg);
3963	obj_cgroup_get(objcg);
3964	memcg->orig_objcg = objcg;
 
 
 
 
 
 
3965
3966	static_branch_enable(&memcg_kmem_online_key);
 
 
 
3967
3968	memcg->kmemcg_id = memcg->id.id;
 
 
 
3969
3970	return 0;
 
 
 
3971}
3972
3973static void memcg_offline_kmem(struct mem_cgroup *memcg)
 
3974{
3975	struct mem_cgroup *parent;
3976
3977	if (mem_cgroup_kmem_disabled())
3978		return;
 
 
 
 
3979
3980	if (unlikely(mem_cgroup_is_root(memcg)))
3981		return;
 
 
3982
3983	parent = parent_mem_cgroup(memcg);
3984	if (!parent)
3985		parent = root_mem_cgroup;
3986
3987	memcg_reparent_objcgs(memcg, parent);
3988
 
3989	/*
3990	 * After we have finished memcg_reparent_objcgs(), all list_lrus
3991	 * corresponding to this cgroup are guaranteed to remain empty.
3992	 * The ordering is imposed by list_lru_node->lock taken by
3993	 * memcg_reparent_list_lrus().
3994	 */
3995	memcg_reparent_list_lrus(memcg, parent);
 
 
 
3996}
3997#else
3998static int memcg_online_kmem(struct mem_cgroup *memcg)
3999{
4000	return 0;
4001}
4002static void memcg_offline_kmem(struct mem_cgroup *memcg)
4003{
 
4004}
4005#endif /* CONFIG_MEMCG_KMEM */
4006
4007static int memcg_update_tcp_max(struct mem_cgroup *memcg, unsigned long max)
4008{
4009	int ret;
4010
4011	mutex_lock(&memcg_max_mutex);
4012
4013	ret = page_counter_set_max(&memcg->tcpmem, max);
4014	if (ret)
4015		goto out;
4016
4017	if (!memcg->tcpmem_active) {
4018		/*
4019		 * The active flag needs to be written after the static_key
4020		 * update. This is what guarantees that the socket activation
4021		 * function is the last one to run. See mem_cgroup_sk_alloc()
4022		 * for details, and note that we don't mark any socket as
4023		 * belonging to this memcg until that flag is up.
4024		 *
4025		 * We need to do this, because static_keys will span multiple
4026		 * sites, but we can't control their order. If we mark a socket
4027		 * as accounted, but the accounting functions are not patched in
4028		 * yet, we'll lose accounting.
4029		 *
4030		 * We never race with the readers in mem_cgroup_sk_alloc(),
4031		 * because when this value change, the code to process it is not
4032		 * patched in yet.
4033		 */
4034		static_branch_inc(&memcg_sockets_enabled_key);
4035		memcg->tcpmem_active = true;
4036	}
4037out:
4038	mutex_unlock(&memcg_max_mutex);
4039	return ret;
4040}
4041
4042/*
4043 * The user of this function is...
4044 * RES_LIMIT.
4045 */
4046static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
4047				char *buf, size_t nbytes, loff_t off)
4048{
4049	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4050	unsigned long nr_pages;
 
 
4051	int ret;
4052
4053	buf = strstrip(buf);
4054	ret = page_counter_memparse(buf, "-1", &nr_pages);
4055	if (ret)
4056		return ret;
4057
4058	switch (MEMFILE_ATTR(of_cft(of)->private)) {
4059	case RES_LIMIT:
4060		if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
4061			ret = -EINVAL;
4062			break;
4063		}
4064		switch (MEMFILE_TYPE(of_cft(of)->private)) {
4065		case _MEM:
4066			ret = mem_cgroup_resize_max(memcg, nr_pages, false);
4067			break;
4068		case _MEMSWAP:
4069			ret = mem_cgroup_resize_max(memcg, nr_pages, true);
 
 
 
 
 
 
 
 
 
 
4070			break;
4071		case _KMEM:
4072			pr_warn_once("kmem.limit_in_bytes is deprecated and will be removed. "
4073				     "Writing any value to this file has no effect. "
4074				     "Please report your usecase to linux-mm@kvack.org if you "
4075				     "depend on this functionality.\n");
4076			ret = 0;
4077			break;
4078		case _TCP:
4079			ret = memcg_update_tcp_max(memcg, nr_pages);
4080			break;
4081		}
4082		break;
4083	case RES_SOFT_LIMIT:
4084		if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
4085			ret = -EOPNOTSUPP;
4086		} else {
4087			WRITE_ONCE(memcg->soft_limit, nr_pages);
4088			ret = 0;
4089		}
4090		break;
4091	}
4092	return ret ?: nbytes;
4093}
4094
4095static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf,
4096				size_t nbytes, loff_t off)
4097{
4098	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4099	struct page_counter *counter;
4100
4101	switch (MEMFILE_TYPE(of_cft(of)->private)) {
4102	case _MEM:
4103		counter = &memcg->memory;
4104		break;
4105	case _MEMSWAP:
4106		counter = &memcg->memsw;
4107		break;
4108	case _KMEM:
4109		counter = &memcg->kmem;
4110		break;
4111	case _TCP:
4112		counter = &memcg->tcpmem;
4113		break;
4114	default:
4115		BUG();
4116	}
 
 
 
 
 
 
 
 
 
 
4117
4118	switch (MEMFILE_ATTR(of_cft(of)->private)) {
 
 
 
4119	case RES_MAX_USAGE:
4120		page_counter_reset_watermark(counter);
 
 
 
 
 
 
 
4121		break;
4122	case RES_FAILCNT:
4123		counter->failcnt = 0;
 
 
 
 
 
 
 
4124		break;
4125	default:
4126		BUG();
4127	}
4128
4129	return nbytes;
4130}
4131
4132static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css,
4133					struct cftype *cft)
4134{
4135	return mem_cgroup_from_css(css)->move_charge_at_immigrate;
4136}
4137
4138#ifdef CONFIG_MMU
4139static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
4140					struct cftype *cft, u64 val)
4141{
4142	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4143
4144	pr_warn_once("Cgroup memory moving (move_charge_at_immigrate) is deprecated. "
4145		     "Please report your usecase to linux-mm@kvack.org if you "
4146		     "depend on this functionality.\n");
4147
4148	if (val & ~MOVE_MASK)
4149		return -EINVAL;
4150
4151	/*
4152	 * No kind of locking is needed in here, because ->can_attach() will
4153	 * check this value once in the beginning of the process, and then carry
4154	 * on with stale data. This means that changes to this value will only
4155	 * affect task migrations starting after the change.
4156	 */
4157	memcg->move_charge_at_immigrate = val;
4158	return 0;
4159}
4160#else
4161static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
4162					struct cftype *cft, u64 val)
4163{
4164	return -ENOSYS;
4165}
4166#endif
4167
4168#ifdef CONFIG_NUMA
4169
4170#define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE))
4171#define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
4172#define LRU_ALL	     ((1 << NR_LRU_LISTS) - 1)
4173
4174static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
4175				int nid, unsigned int lru_mask, bool tree)
4176{
4177	struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
4178	unsigned long nr = 0;
4179	enum lru_list lru;
4180
4181	VM_BUG_ON((unsigned)nid >= nr_node_ids);
4182
4183	for_each_lru(lru) {
4184		if (!(BIT(lru) & lru_mask))
4185			continue;
4186		if (tree)
4187			nr += lruvec_page_state(lruvec, NR_LRU_BASE + lru);
4188		else
4189			nr += lruvec_page_state_local(lruvec, NR_LRU_BASE + lru);
4190	}
4191	return nr;
4192}
4193
4194static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
4195					     unsigned int lru_mask,
4196					     bool tree)
4197{
4198	unsigned long nr = 0;
4199	enum lru_list lru;
4200
4201	for_each_lru(lru) {
4202		if (!(BIT(lru) & lru_mask))
4203			continue;
4204		if (tree)
4205			nr += memcg_page_state(memcg, NR_LRU_BASE + lru);
4206		else
4207			nr += memcg_page_state_local(memcg, NR_LRU_BASE + lru);
4208	}
4209	return nr;
4210}
4211
4212static int memcg_numa_stat_show(struct seq_file *m, void *v)
4213{
4214	struct numa_stat {
4215		const char *name;
4216		unsigned int lru_mask;
4217	};
4218
4219	static const struct numa_stat stats[] = {
4220		{ "total", LRU_ALL },
4221		{ "file", LRU_ALL_FILE },
4222		{ "anon", LRU_ALL_ANON },
4223		{ "unevictable", BIT(LRU_UNEVICTABLE) },
4224	};
4225	const struct numa_stat *stat;
4226	int nid;
4227	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4228
4229	mem_cgroup_flush_stats(memcg);
4230
4231	for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
4232		seq_printf(m, "%s=%lu", stat->name,
4233			   mem_cgroup_nr_lru_pages(memcg, stat->lru_mask,
4234						   false));
4235		for_each_node_state(nid, N_MEMORY)
4236			seq_printf(m, " N%d=%lu", nid,
4237				   mem_cgroup_node_nr_lru_pages(memcg, nid,
4238							stat->lru_mask, false));
4239		seq_putc(m, '\n');
4240	}
4241
4242	for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
 
4243
4244		seq_printf(m, "hierarchical_%s=%lu", stat->name,
4245			   mem_cgroup_nr_lru_pages(memcg, stat->lru_mask,
4246						   true));
4247		for_each_node_state(nid, N_MEMORY)
4248			seq_printf(m, " N%d=%lu", nid,
4249				   mem_cgroup_node_nr_lru_pages(memcg, nid,
4250							stat->lru_mask, true));
 
 
 
 
4251		seq_putc(m, '\n');
4252	}
4253
4254	return 0;
4255}
4256#endif /* CONFIG_NUMA */
4257
4258static const unsigned int memcg1_stats[] = {
4259	NR_FILE_PAGES,
4260	NR_ANON_MAPPED,
4261#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4262	NR_ANON_THPS,
4263#endif
4264	NR_SHMEM,
4265	NR_FILE_MAPPED,
4266	NR_FILE_DIRTY,
4267	NR_WRITEBACK,
4268	WORKINGSET_REFAULT_ANON,
4269	WORKINGSET_REFAULT_FILE,
4270#ifdef CONFIG_SWAP
4271	MEMCG_SWAP,
4272	NR_SWAPCACHE,
4273#endif
4274};
4275
4276static const char *const memcg1_stat_names[] = {
4277	"cache",
4278	"rss",
4279#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4280	"rss_huge",
4281#endif
4282	"shmem",
4283	"mapped_file",
4284	"dirty",
4285	"writeback",
4286	"workingset_refault_anon",
4287	"workingset_refault_file",
4288#ifdef CONFIG_SWAP
4289	"swap",
4290	"swapcached",
4291#endif
4292};
4293
4294/* Universal VM events cgroup1 shows, original sort order */
4295static const unsigned int memcg1_events[] = {
4296	PGPGIN,
4297	PGPGOUT,
4298	PGFAULT,
4299	PGMAJFAULT,
4300};
4301
4302static void memcg1_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
4303{
4304	unsigned long memory, memsw;
4305	struct mem_cgroup *mi;
4306	unsigned int i;
4307
4308	BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names) != ARRAY_SIZE(memcg1_stats));
 
 
 
 
 
4309
4310	mem_cgroup_flush_stats(memcg);
 
 
4311
4312	for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
4313		unsigned long nr;
 
4314
4315		nr = memcg_page_state_local_output(memcg, memcg1_stats[i]);
4316		seq_buf_printf(s, "%s %lu\n", memcg1_stat_names[i], nr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4317	}
4318
4319	for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
4320		seq_buf_printf(s, "%s %lu\n", vm_event_name(memcg1_events[i]),
4321			       memcg_events_local(memcg, memcg1_events[i]));
4322
4323	for (i = 0; i < NR_LRU_LISTS; i++)
4324		seq_buf_printf(s, "%s %lu\n", lru_list_name(i),
4325			       memcg_page_state_local(memcg, NR_LRU_BASE + i) *
4326			       PAGE_SIZE);
 
4327
4328	/* Hierarchical information */
4329	memory = memsw = PAGE_COUNTER_MAX;
4330	for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) {
4331		memory = min(memory, READ_ONCE(mi->memory.max));
4332		memsw = min(memsw, READ_ONCE(mi->memsw.max));
4333	}
4334	seq_buf_printf(s, "hierarchical_memory_limit %llu\n",
4335		       (u64)memory * PAGE_SIZE);
4336	seq_buf_printf(s, "hierarchical_memsw_limit %llu\n",
4337		       (u64)memsw * PAGE_SIZE);
4338
4339	for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
4340		unsigned long nr;
4341
4342		nr = memcg_page_state_output(memcg, memcg1_stats[i]);
4343		seq_buf_printf(s, "total_%s %llu\n", memcg1_stat_names[i],
4344			       (u64)nr);
4345	}
4346
4347	for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
4348		seq_buf_printf(s, "total_%s %llu\n",
4349			       vm_event_name(memcg1_events[i]),
4350			       (u64)memcg_events(memcg, memcg1_events[i]));
4351
4352	for (i = 0; i < NR_LRU_LISTS; i++)
4353		seq_buf_printf(s, "total_%s %llu\n", lru_list_name(i),
4354			       (u64)memcg_page_state(memcg, NR_LRU_BASE + i) *
4355			       PAGE_SIZE);
4356
4357#ifdef CONFIG_DEBUG_VM
4358	{
4359		pg_data_t *pgdat;
4360		struct mem_cgroup_per_node *mz;
4361		unsigned long anon_cost = 0;
4362		unsigned long file_cost = 0;
4363
4364		for_each_online_pgdat(pgdat) {
4365			mz = memcg->nodeinfo[pgdat->node_id];
4366
4367			anon_cost += mz->lruvec.anon_cost;
4368			file_cost += mz->lruvec.file_cost;
4369		}
4370		seq_buf_printf(s, "anon_cost %lu\n", anon_cost);
4371		seq_buf_printf(s, "file_cost %lu\n", file_cost);
 
 
 
 
 
 
 
4372	}
4373#endif
 
 
4374}
4375
4376static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css,
4377				      struct cftype *cft)
4378{
4379	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4380
4381	return mem_cgroup_swappiness(memcg);
4382}
4383
4384static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
4385				       struct cftype *cft, u64 val)
4386{
4387	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
 
4388
4389	if (val > 200)
4390		return -EINVAL;
4391
4392	if (!mem_cgroup_is_root(memcg))
4393		WRITE_ONCE(memcg->swappiness, val);
4394	else
4395		WRITE_ONCE(vm_swappiness, val);
 
 
 
 
 
 
 
4396
4397	return 0;
4398}
4399
4400static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
4401{
4402	struct mem_cgroup_threshold_ary *t;
4403	unsigned long usage;
4404	int i;
4405
4406	rcu_read_lock();
4407	if (!swap)
4408		t = rcu_dereference(memcg->thresholds.primary);
4409	else
4410		t = rcu_dereference(memcg->memsw_thresholds.primary);
4411
4412	if (!t)
4413		goto unlock;
4414
4415	usage = mem_cgroup_usage(memcg, swap);
4416
4417	/*
4418	 * current_threshold points to threshold just below or equal to usage.
4419	 * If it's not true, a threshold was crossed after last
4420	 * call of __mem_cgroup_threshold().
4421	 */
4422	i = t->current_threshold;
4423
4424	/*
4425	 * Iterate backward over array of thresholds starting from
4426	 * current_threshold and check if a threshold is crossed.
4427	 * If none of thresholds below usage is crossed, we read
4428	 * only one element of the array here.
4429	 */
4430	for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
4431		eventfd_signal(t->entries[i].eventfd);
4432
4433	/* i = current_threshold + 1 */
4434	i++;
4435
4436	/*
4437	 * Iterate forward over array of thresholds starting from
4438	 * current_threshold+1 and check if a threshold is crossed.
4439	 * If none of thresholds above usage is crossed, we read
4440	 * only one element of the array here.
4441	 */
4442	for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
4443		eventfd_signal(t->entries[i].eventfd);
4444
4445	/* Update current_threshold */
4446	t->current_threshold = i - 1;
4447unlock:
4448	rcu_read_unlock();
4449}
4450
4451static void mem_cgroup_threshold(struct mem_cgroup *memcg)
4452{
4453	while (memcg) {
4454		__mem_cgroup_threshold(memcg, false);
4455		if (do_memsw_account())
4456			__mem_cgroup_threshold(memcg, true);
4457
4458		memcg = parent_mem_cgroup(memcg);
4459	}
4460}
4461
4462static int compare_thresholds(const void *a, const void *b)
4463{
4464	const struct mem_cgroup_threshold *_a = a;
4465	const struct mem_cgroup_threshold *_b = b;
4466
4467	if (_a->threshold > _b->threshold)
4468		return 1;
4469
4470	if (_a->threshold < _b->threshold)
4471		return -1;
4472
4473	return 0;
4474}
4475
4476static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
4477{
4478	struct mem_cgroup_eventfd_list *ev;
4479
4480	spin_lock(&memcg_oom_lock);
4481
4482	list_for_each_entry(ev, &memcg->oom_notify, list)
4483		eventfd_signal(ev->eventfd);
4484
4485	spin_unlock(&memcg_oom_lock);
4486	return 0;
4487}
4488
4489static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
4490{
4491	struct mem_cgroup *iter;
4492
4493	for_each_mem_cgroup_tree(iter, memcg)
4494		mem_cgroup_oom_notify_cb(iter);
4495}
4496
4497static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
4498	struct eventfd_ctx *eventfd, const char *args, enum res_type type)
4499{
4500	struct mem_cgroup_thresholds *thresholds;
4501	struct mem_cgroup_threshold_ary *new;
4502	unsigned long threshold;
4503	unsigned long usage;
4504	int i, size, ret;
4505
4506	ret = page_counter_memparse(args, "-1", &threshold);
4507	if (ret)
4508		return ret;
4509
4510	mutex_lock(&memcg->thresholds_lock);
4511
4512	if (type == _MEM) {
4513		thresholds = &memcg->thresholds;
4514		usage = mem_cgroup_usage(memcg, false);
4515	} else if (type == _MEMSWAP) {
4516		thresholds = &memcg->memsw_thresholds;
4517		usage = mem_cgroup_usage(memcg, true);
4518	} else
4519		BUG();
4520
 
 
4521	/* Check if a threshold crossed before adding a new one */
4522	if (thresholds->primary)
4523		__mem_cgroup_threshold(memcg, type == _MEMSWAP);
4524
4525	size = thresholds->primary ? thresholds->primary->size + 1 : 1;
4526
4527	/* Allocate memory for new array of thresholds */
4528	new = kmalloc(struct_size(new, entries, size), GFP_KERNEL);
 
4529	if (!new) {
4530		ret = -ENOMEM;
4531		goto unlock;
4532	}
4533	new->size = size;
4534
4535	/* Copy thresholds (if any) to new array */
4536	if (thresholds->primary)
4537		memcpy(new->entries, thresholds->primary->entries,
4538		       flex_array_size(new, entries, size - 1));
 
4539
4540	/* Add new threshold */
4541	new->entries[size - 1].eventfd = eventfd;
4542	new->entries[size - 1].threshold = threshold;
4543
4544	/* Sort thresholds. Registering of new threshold isn't time-critical */
4545	sort(new->entries, size, sizeof(*new->entries),
4546			compare_thresholds, NULL);
4547
4548	/* Find current threshold */
4549	new->current_threshold = -1;
4550	for (i = 0; i < size; i++) {
4551		if (new->entries[i].threshold <= usage) {
4552			/*
4553			 * new->current_threshold will not be used until
4554			 * rcu_assign_pointer(), so it's safe to increment
4555			 * it here.
4556			 */
4557			++new->current_threshold;
4558		} else
4559			break;
4560	}
4561
4562	/* Free old spare buffer and save old primary buffer as spare */
4563	kfree(thresholds->spare);
4564	thresholds->spare = thresholds->primary;
4565
4566	rcu_assign_pointer(thresholds->primary, new);
4567
4568	/* To be sure that nobody uses thresholds */
4569	synchronize_rcu();
4570
4571unlock:
4572	mutex_unlock(&memcg->thresholds_lock);
4573
4574	return ret;
4575}
4576
4577static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
4578	struct eventfd_ctx *eventfd, const char *args)
4579{
4580	return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM);
4581}
4582
4583static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg,
4584	struct eventfd_ctx *eventfd, const char *args)
4585{
4586	return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP);
4587}
4588
4589static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4590	struct eventfd_ctx *eventfd, enum res_type type)
4591{
4592	struct mem_cgroup_thresholds *thresholds;
4593	struct mem_cgroup_threshold_ary *new;
4594	unsigned long usage;
4595	int i, j, size, entries;
4596
4597	mutex_lock(&memcg->thresholds_lock);
4598
4599	if (type == _MEM) {
4600		thresholds = &memcg->thresholds;
4601		usage = mem_cgroup_usage(memcg, false);
4602	} else if (type == _MEMSWAP) {
4603		thresholds = &memcg->memsw_thresholds;
4604		usage = mem_cgroup_usage(memcg, true);
4605	} else
4606		BUG();
4607
4608	if (!thresholds->primary)
4609		goto unlock;
4610
 
 
4611	/* Check if a threshold crossed before removing */
4612	__mem_cgroup_threshold(memcg, type == _MEMSWAP);
4613
4614	/* Calculate new number of threshold */
4615	size = entries = 0;
4616	for (i = 0; i < thresholds->primary->size; i++) {
4617		if (thresholds->primary->entries[i].eventfd != eventfd)
4618			size++;
4619		else
4620			entries++;
4621	}
4622
4623	new = thresholds->spare;
4624
4625	/* If no items related to eventfd have been cleared, nothing to do */
4626	if (!entries)
4627		goto unlock;
4628
4629	/* Set thresholds array to NULL if we don't have thresholds */
4630	if (!size) {
4631		kfree(new);
4632		new = NULL;
4633		goto swap_buffers;
4634	}
4635
4636	new->size = size;
4637
4638	/* Copy thresholds and find current threshold */
4639	new->current_threshold = -1;
4640	for (i = 0, j = 0; i < thresholds->primary->size; i++) {
4641		if (thresholds->primary->entries[i].eventfd == eventfd)
4642			continue;
4643
4644		new->entries[j] = thresholds->primary->entries[i];
4645		if (new->entries[j].threshold <= usage) {
4646			/*
4647			 * new->current_threshold will not be used
4648			 * until rcu_assign_pointer(), so it's safe to increment
4649			 * it here.
4650			 */
4651			++new->current_threshold;
4652		}
4653		j++;
4654	}
4655
4656swap_buffers:
4657	/* Swap primary and spare array */
4658	thresholds->spare = thresholds->primary;
 
 
 
 
 
4659
4660	rcu_assign_pointer(thresholds->primary, new);
4661
4662	/* To be sure that nobody uses thresholds */
4663	synchronize_rcu();
4664
4665	/* If all events are unregistered, free the spare array */
4666	if (!new) {
4667		kfree(thresholds->spare);
4668		thresholds->spare = NULL;
4669	}
4670unlock:
4671	mutex_unlock(&memcg->thresholds_lock);
4672}
4673
4674static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4675	struct eventfd_ctx *eventfd)
4676{
4677	return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM);
4678}
4679
4680static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4681	struct eventfd_ctx *eventfd)
4682{
4683	return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP);
4684}
4685
4686static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg,
4687	struct eventfd_ctx *eventfd, const char *args)
4688{
4689	struct mem_cgroup_eventfd_list *event;
4690
4691	event = kmalloc(sizeof(*event),	GFP_KERNEL);
4692	if (!event)
4693		return -ENOMEM;
4694
4695	spin_lock(&memcg_oom_lock);
4696
4697	event->eventfd = eventfd;
4698	list_add(&event->list, &memcg->oom_notify);
4699
4700	/* already in OOM ? */
4701	if (memcg->under_oom)
4702		eventfd_signal(eventfd);
4703	spin_unlock(&memcg_oom_lock);
4704
4705	return 0;
4706}
4707
4708static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg,
4709	struct eventfd_ctx *eventfd)
4710{
4711	struct mem_cgroup_eventfd_list *ev, *tmp;
4712
4713	spin_lock(&memcg_oom_lock);
4714
4715	list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
4716		if (ev->eventfd == eventfd) {
4717			list_del(&ev->list);
4718			kfree(ev);
4719		}
4720	}
4721
4722	spin_unlock(&memcg_oom_lock);
4723}
4724
4725static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v)
4726{
4727	struct mem_cgroup *memcg = mem_cgroup_from_seq(sf);
4728
4729	seq_printf(sf, "oom_kill_disable %d\n", READ_ONCE(memcg->oom_kill_disable));
4730	seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom);
4731	seq_printf(sf, "oom_kill %lu\n",
4732		   atomic_long_read(&memcg->memory_events[MEMCG_OOM_KILL]));
4733	return 0;
4734}
4735
4736static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
4737	struct cftype *cft, u64 val)
4738{
4739	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
 
4740
4741	/* cannot set to root cgroup and only 0 and 1 are allowed */
4742	if (mem_cgroup_is_root(memcg) || !((val == 0) || (val == 1)))
4743		return -EINVAL;
4744
4745	WRITE_ONCE(memcg->oom_kill_disable, val);
 
 
 
 
 
 
4746	if (!val)
4747		memcg_oom_recover(memcg);
4748
4749	return 0;
4750}
4751
4752#ifdef CONFIG_CGROUP_WRITEBACK
4753
4754#include <trace/events/writeback.h>
4755
4756static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
4757{
4758	return wb_domain_init(&memcg->cgwb_domain, gfp);
4759}
4760
4761static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
4762{
4763	wb_domain_exit(&memcg->cgwb_domain);
4764}
4765
4766static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
4767{
4768	wb_domain_size_changed(&memcg->cgwb_domain);
4769}
4770
4771struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
4772{
4773	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4774
4775	if (!memcg->css.parent)
4776		return NULL;
4777
4778	return &memcg->cgwb_domain;
4779}
4780
4781/**
4782 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
4783 * @wb: bdi_writeback in question
4784 * @pfilepages: out parameter for number of file pages
4785 * @pheadroom: out parameter for number of allocatable pages according to memcg
4786 * @pdirty: out parameter for number of dirty pages
4787 * @pwriteback: out parameter for number of pages under writeback
4788 *
4789 * Determine the numbers of file, headroom, dirty, and writeback pages in
4790 * @wb's memcg.  File, dirty and writeback are self-explanatory.  Headroom
4791 * is a bit more involved.
4792 *
4793 * A memcg's headroom is "min(max, high) - used".  In the hierarchy, the
4794 * headroom is calculated as the lowest headroom of itself and the
4795 * ancestors.  Note that this doesn't consider the actual amount of
4796 * available memory in the system.  The caller should further cap
4797 * *@pheadroom accordingly.
4798 */
4799void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
4800			 unsigned long *pheadroom, unsigned long *pdirty,
4801			 unsigned long *pwriteback)
4802{
4803	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4804	struct mem_cgroup *parent;
4805
4806	mem_cgroup_flush_stats_ratelimited(memcg);
4807
4808	*pdirty = memcg_page_state(memcg, NR_FILE_DIRTY);
4809	*pwriteback = memcg_page_state(memcg, NR_WRITEBACK);
4810	*pfilepages = memcg_page_state(memcg, NR_INACTIVE_FILE) +
4811			memcg_page_state(memcg, NR_ACTIVE_FILE);
4812
4813	*pheadroom = PAGE_COUNTER_MAX;
4814	while ((parent = parent_mem_cgroup(memcg))) {
4815		unsigned long ceiling = min(READ_ONCE(memcg->memory.max),
4816					    READ_ONCE(memcg->memory.high));
4817		unsigned long used = page_counter_read(&memcg->memory);
4818
4819		*pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
4820		memcg = parent;
4821	}
4822}
4823
4824/*
4825 * Foreign dirty flushing
4826 *
4827 * There's an inherent mismatch between memcg and writeback.  The former
4828 * tracks ownership per-page while the latter per-inode.  This was a
4829 * deliberate design decision because honoring per-page ownership in the
4830 * writeback path is complicated, may lead to higher CPU and IO overheads
4831 * and deemed unnecessary given that write-sharing an inode across
4832 * different cgroups isn't a common use-case.
4833 *
4834 * Combined with inode majority-writer ownership switching, this works well
4835 * enough in most cases but there are some pathological cases.  For
4836 * example, let's say there are two cgroups A and B which keep writing to
4837 * different but confined parts of the same inode.  B owns the inode and
4838 * A's memory is limited far below B's.  A's dirty ratio can rise enough to
4839 * trigger balance_dirty_pages() sleeps but B's can be low enough to avoid
4840 * triggering background writeback.  A will be slowed down without a way to
4841 * make writeback of the dirty pages happen.
4842 *
4843 * Conditions like the above can lead to a cgroup getting repeatedly and
4844 * severely throttled after making some progress after each
4845 * dirty_expire_interval while the underlying IO device is almost
4846 * completely idle.
4847 *
4848 * Solving this problem completely requires matching the ownership tracking
4849 * granularities between memcg and writeback in either direction.  However,
4850 * the more egregious behaviors can be avoided by simply remembering the
4851 * most recent foreign dirtying events and initiating remote flushes on
4852 * them when local writeback isn't enough to keep the memory clean enough.
4853 *
4854 * The following two functions implement such mechanism.  When a foreign
4855 * page - a page whose memcg and writeback ownerships don't match - is
4856 * dirtied, mem_cgroup_track_foreign_dirty() records the inode owning
4857 * bdi_writeback on the page owning memcg.  When balance_dirty_pages()
4858 * decides that the memcg needs to sleep due to high dirty ratio, it calls
4859 * mem_cgroup_flush_foreign() which queues writeback on the recorded
4860 * foreign bdi_writebacks which haven't expired.  Both the numbers of
4861 * recorded bdi_writebacks and concurrent in-flight foreign writebacks are
4862 * limited to MEMCG_CGWB_FRN_CNT.
4863 *
4864 * The mechanism only remembers IDs and doesn't hold any object references.
4865 * As being wrong occasionally doesn't matter, updates and accesses to the
4866 * records are lockless and racy.
4867 */
4868void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio,
4869					     struct bdi_writeback *wb)
4870{
4871	struct mem_cgroup *memcg = folio_memcg(folio);
4872	struct memcg_cgwb_frn *frn;
4873	u64 now = get_jiffies_64();
4874	u64 oldest_at = now;
4875	int oldest = -1;
4876	int i;
4877
4878	trace_track_foreign_dirty(folio, wb);
4879
4880	/*
4881	 * Pick the slot to use.  If there is already a slot for @wb, keep
4882	 * using it.  If not replace the oldest one which isn't being
4883	 * written out.
 
 
 
 
 
 
 
 
 
 
 
 
 
4884	 */
4885	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
4886		frn = &memcg->cgwb_frn[i];
4887		if (frn->bdi_id == wb->bdi->id &&
4888		    frn->memcg_id == wb->memcg_css->id)
4889			break;
4890		if (time_before64(frn->at, oldest_at) &&
4891		    atomic_read(&frn->done.cnt) == 1) {
4892			oldest = i;
4893			oldest_at = frn->at;
4894		}
4895	}
4896
4897	if (i < MEMCG_CGWB_FRN_CNT) {
4898		/*
4899		 * Re-using an existing one.  Update timestamp lazily to
4900		 * avoid making the cacheline hot.  We want them to be
4901		 * reasonably up-to-date and significantly shorter than
4902		 * dirty_expire_interval as that's what expires the record.
4903		 * Use the shorter of 1s and dirty_expire_interval / 8.
4904		 */
4905		unsigned long update_intv =
4906			min_t(unsigned long, HZ,
4907			      msecs_to_jiffies(dirty_expire_interval * 10) / 8);
4908
4909		if (time_before64(frn->at, now - update_intv))
4910			frn->at = now;
4911	} else if (oldest >= 0) {
4912		/* replace the oldest free one */
4913		frn = &memcg->cgwb_frn[oldest];
4914		frn->bdi_id = wb->bdi->id;
4915		frn->memcg_id = wb->memcg_css->id;
4916		frn->at = now;
4917	}
4918}
4919
4920/* issue foreign writeback flushes for recorded foreign dirtying events */
4921void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
4922{
4923	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4924	unsigned long intv = msecs_to_jiffies(dirty_expire_interval * 10);
4925	u64 now = jiffies_64;
4926	int i;
4927
4928	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
4929		struct memcg_cgwb_frn *frn = &memcg->cgwb_frn[i];
4930
4931		/*
4932		 * If the record is older than dirty_expire_interval,
4933		 * writeback on it has already started.  No need to kick it
4934		 * off again.  Also, don't start a new one if there's
4935		 * already one in flight.
4936		 */
4937		if (time_after64(frn->at, now - intv) &&
4938		    atomic_read(&frn->done.cnt) == 1) {
4939			frn->at = 0;
4940			trace_flush_foreign(wb, frn->bdi_id, frn->memcg_id);
4941			cgroup_writeback_by_id(frn->bdi_id, frn->memcg_id,
4942					       WB_REASON_FOREIGN_FLUSH,
4943					       &frn->done);
4944		}
4945	}
4946}
4947
4948#else	/* CONFIG_CGROUP_WRITEBACK */
4949
4950static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
4951{
4952	return 0;
4953}
4954
4955static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
4956{
4957}
4958
4959static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
4960{
4961}
4962
4963#endif	/* CONFIG_CGROUP_WRITEBACK */
4964
4965/*
4966 * DO NOT USE IN NEW FILES.
4967 *
4968 * "cgroup.event_control" implementation.
4969 *
4970 * This is way over-engineered.  It tries to support fully configurable
4971 * events for each user.  Such level of flexibility is completely
4972 * unnecessary especially in the light of the planned unified hierarchy.
4973 *
4974 * Please deprecate this and replace with something simpler if at all
4975 * possible.
4976 */
4977
4978/*
4979 * Unregister event and free resources.
4980 *
4981 * Gets called from workqueue.
4982 */
4983static void memcg_event_remove(struct work_struct *work)
4984{
4985	struct mem_cgroup_event *event =
4986		container_of(work, struct mem_cgroup_event, remove);
4987	struct mem_cgroup *memcg = event->memcg;
4988
4989	remove_wait_queue(event->wqh, &event->wait);
4990
4991	event->unregister_event(memcg, event->eventfd);
4992
4993	/* Notify userspace the event is going away. */
4994	eventfd_signal(event->eventfd);
4995
4996	eventfd_ctx_put(event->eventfd);
4997	kfree(event);
4998	css_put(&memcg->css);
4999}
5000
5001/*
5002 * Gets called on EPOLLHUP on eventfd when user closes it.
5003 *
5004 * Called with wqh->lock held and interrupts disabled.
5005 */
5006static int memcg_event_wake(wait_queue_entry_t *wait, unsigned mode,
5007			    int sync, void *key)
5008{
5009	struct mem_cgroup_event *event =
5010		container_of(wait, struct mem_cgroup_event, wait);
5011	struct mem_cgroup *memcg = event->memcg;
5012	__poll_t flags = key_to_poll(key);
5013
5014	if (flags & EPOLLHUP) {
5015		/*
5016		 * If the event has been detached at cgroup removal, we
5017		 * can simply return knowing the other side will cleanup
5018		 * for us.
5019		 *
5020		 * We can't race against event freeing since the other
5021		 * side will require wqh->lock via remove_wait_queue(),
5022		 * which we hold.
5023		 */
5024		spin_lock(&memcg->event_list_lock);
5025		if (!list_empty(&event->list)) {
5026			list_del_init(&event->list);
5027			/*
5028			 * We are in atomic context, but cgroup_event_remove()
5029			 * may sleep, so we have to call it in workqueue.
5030			 */
5031			schedule_work(&event->remove);
5032		}
5033		spin_unlock(&memcg->event_list_lock);
5034	}
5035
5036	return 0;
5037}
5038
5039static void memcg_event_ptable_queue_proc(struct file *file,
5040		wait_queue_head_t *wqh, poll_table *pt)
5041{
5042	struct mem_cgroup_event *event =
5043		container_of(pt, struct mem_cgroup_event, pt);
5044
5045	event->wqh = wqh;
5046	add_wait_queue(wqh, &event->wait);
5047}
5048
5049/*
5050 * DO NOT USE IN NEW FILES.
5051 *
5052 * Parse input and register new cgroup event handler.
5053 *
5054 * Input must be in format '<event_fd> <control_fd> <args>'.
5055 * Interpretation of args is defined by control file implementation.
5056 */
5057static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
5058					 char *buf, size_t nbytes, loff_t off)
5059{
5060	struct cgroup_subsys_state *css = of_css(of);
5061	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5062	struct mem_cgroup_event *event;
5063	struct cgroup_subsys_state *cfile_css;
5064	unsigned int efd, cfd;
5065	struct fd efile;
5066	struct fd cfile;
5067	struct dentry *cdentry;
5068	const char *name;
5069	char *endp;
5070	int ret;
5071
5072	if (IS_ENABLED(CONFIG_PREEMPT_RT))
5073		return -EOPNOTSUPP;
5074
5075	buf = strstrip(buf);
5076
5077	efd = simple_strtoul(buf, &endp, 10);
5078	if (*endp != ' ')
5079		return -EINVAL;
5080	buf = endp + 1;
5081
5082	cfd = simple_strtoul(buf, &endp, 10);
5083	if ((*endp != ' ') && (*endp != '\0'))
5084		return -EINVAL;
5085	buf = endp + 1;
5086
5087	event = kzalloc(sizeof(*event), GFP_KERNEL);
5088	if (!event)
5089		return -ENOMEM;
5090
5091	event->memcg = memcg;
5092	INIT_LIST_HEAD(&event->list);
5093	init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc);
5094	init_waitqueue_func_entry(&event->wait, memcg_event_wake);
5095	INIT_WORK(&event->remove, memcg_event_remove);
5096
5097	efile = fdget(efd);
5098	if (!efile.file) {
5099		ret = -EBADF;
5100		goto out_kfree;
5101	}
5102
5103	event->eventfd = eventfd_ctx_fileget(efile.file);
5104	if (IS_ERR(event->eventfd)) {
5105		ret = PTR_ERR(event->eventfd);
5106		goto out_put_efile;
5107	}
5108
5109	cfile = fdget(cfd);
5110	if (!cfile.file) {
5111		ret = -EBADF;
5112		goto out_put_eventfd;
5113	}
5114
5115	/* the process need read permission on control file */
5116	/* AV: shouldn't we check that it's been opened for read instead? */
5117	ret = file_permission(cfile.file, MAY_READ);
5118	if (ret < 0)
5119		goto out_put_cfile;
5120
5121	/*
5122	 * The control file must be a regular cgroup1 file. As a regular cgroup
5123	 * file can't be renamed, it's safe to access its name afterwards.
5124	 */
5125	cdentry = cfile.file->f_path.dentry;
5126	if (cdentry->d_sb->s_type != &cgroup_fs_type || !d_is_reg(cdentry)) {
5127		ret = -EINVAL;
5128		goto out_put_cfile;
5129	}
5130
5131	/*
5132	 * Determine the event callbacks and set them in @event.  This used
5133	 * to be done via struct cftype but cgroup core no longer knows
5134	 * about these events.  The following is crude but the whole thing
5135	 * is for compatibility anyway.
5136	 *
5137	 * DO NOT ADD NEW FILES.
5138	 */
5139	name = cdentry->d_name.name;
5140
5141	if (!strcmp(name, "memory.usage_in_bytes")) {
5142		event->register_event = mem_cgroup_usage_register_event;
5143		event->unregister_event = mem_cgroup_usage_unregister_event;
5144	} else if (!strcmp(name, "memory.oom_control")) {
5145		event->register_event = mem_cgroup_oom_register_event;
5146		event->unregister_event = mem_cgroup_oom_unregister_event;
5147	} else if (!strcmp(name, "memory.pressure_level")) {
5148		event->register_event = vmpressure_register_event;
5149		event->unregister_event = vmpressure_unregister_event;
5150	} else if (!strcmp(name, "memory.memsw.usage_in_bytes")) {
5151		event->register_event = memsw_cgroup_usage_register_event;
5152		event->unregister_event = memsw_cgroup_usage_unregister_event;
5153	} else {
5154		ret = -EINVAL;
5155		goto out_put_cfile;
5156	}
5157
5158	/*
5159	 * Verify @cfile should belong to @css.  Also, remaining events are
5160	 * automatically removed on cgroup destruction but the removal is
5161	 * asynchronous, so take an extra ref on @css.
5162	 */
5163	cfile_css = css_tryget_online_from_dir(cdentry->d_parent,
5164					       &memory_cgrp_subsys);
5165	ret = -EINVAL;
5166	if (IS_ERR(cfile_css))
5167		goto out_put_cfile;
5168	if (cfile_css != css) {
5169		css_put(cfile_css);
5170		goto out_put_cfile;
5171	}
5172
5173	ret = event->register_event(memcg, event->eventfd, buf);
5174	if (ret)
5175		goto out_put_css;
5176
5177	vfs_poll(efile.file, &event->pt);
5178
5179	spin_lock_irq(&memcg->event_list_lock);
5180	list_add(&event->list, &memcg->event_list);
5181	spin_unlock_irq(&memcg->event_list_lock);
5182
5183	fdput(cfile);
5184	fdput(efile);
5185
5186	return nbytes;
5187
5188out_put_css:
5189	css_put(css);
5190out_put_cfile:
5191	fdput(cfile);
5192out_put_eventfd:
5193	eventfd_ctx_put(event->eventfd);
5194out_put_efile:
5195	fdput(efile);
5196out_kfree:
5197	kfree(event);
5198
5199	return ret;
5200}
5201
5202#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_SLUB_DEBUG)
5203static int mem_cgroup_slab_show(struct seq_file *m, void *p)
5204{
5205	/*
5206	 * Deprecated.
5207	 * Please, take a look at tools/cgroup/memcg_slabinfo.py .
5208	 */
5209	return 0;
5210}
5211#endif
5212
5213static int memory_stat_show(struct seq_file *m, void *v);
5214
5215static struct cftype mem_cgroup_legacy_files[] = {
5216	{
5217		.name = "usage_in_bytes",
5218		.private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
5219		.read_u64 = mem_cgroup_read_u64,
5220	},
5221	{
5222		.name = "max_usage_in_bytes",
5223		.private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
5224		.write = mem_cgroup_reset,
5225		.read_u64 = mem_cgroup_read_u64,
5226	},
5227	{
5228		.name = "limit_in_bytes",
5229		.private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
5230		.write = mem_cgroup_write,
5231		.read_u64 = mem_cgroup_read_u64,
5232	},
5233	{
5234		.name = "soft_limit_in_bytes",
5235		.private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
5236		.write = mem_cgroup_write,
5237		.read_u64 = mem_cgroup_read_u64,
5238	},
5239	{
5240		.name = "failcnt",
5241		.private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
5242		.write = mem_cgroup_reset,
5243		.read_u64 = mem_cgroup_read_u64,
5244	},
5245	{
5246		.name = "stat",
5247		.seq_show = memory_stat_show,
5248	},
5249	{
5250		.name = "force_empty",
5251		.write = mem_cgroup_force_empty_write,
5252	},
5253	{
5254		.name = "use_hierarchy",
 
5255		.write_u64 = mem_cgroup_hierarchy_write,
5256		.read_u64 = mem_cgroup_hierarchy_read,
5257	},
5258	{
5259		.name = "cgroup.event_control",		/* XXX: for compat */
5260		.write = memcg_write_event_control,
5261		.flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE,
 
5262	},
5263	{
5264		.name = "swappiness",
5265		.read_u64 = mem_cgroup_swappiness_read,
5266		.write_u64 = mem_cgroup_swappiness_write,
5267	},
5268	{
5269		.name = "move_charge_at_immigrate",
5270		.read_u64 = mem_cgroup_move_charge_read,
5271		.write_u64 = mem_cgroup_move_charge_write,
5272	},
5273	{
5274		.name = "oom_control",
5275		.seq_show = mem_cgroup_oom_control_read,
5276		.write_u64 = mem_cgroup_oom_control_write,
 
5277	},
5278	{
5279		.name = "pressure_level",
5280		.seq_show = mem_cgroup_dummy_seq_show,
5281	},
5282#ifdef CONFIG_NUMA
5283	{
5284		.name = "numa_stat",
5285		.seq_show = memcg_numa_stat_show,
5286	},
5287#endif
 
5288	{
5289		.name = "kmem.limit_in_bytes",
5290		.private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
5291		.write = mem_cgroup_write,
5292		.read_u64 = mem_cgroup_read_u64,
5293	},
5294	{
5295		.name = "kmem.usage_in_bytes",
5296		.private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
5297		.read_u64 = mem_cgroup_read_u64,
5298	},
5299	{
5300		.name = "kmem.failcnt",
5301		.private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT),
5302		.write = mem_cgroup_reset,
5303		.read_u64 = mem_cgroup_read_u64,
5304	},
5305	{
5306		.name = "kmem.max_usage_in_bytes",
5307		.private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE),
5308		.write = mem_cgroup_reset,
5309		.read_u64 = mem_cgroup_read_u64,
5310	},
5311#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_SLUB_DEBUG)
5312	{
5313		.name = "kmem.slabinfo",
5314		.seq_show = mem_cgroup_slab_show,
5315	},
5316#endif
 
 
 
 
 
 
5317	{
5318		.name = "kmem.tcp.limit_in_bytes",
5319		.private = MEMFILE_PRIVATE(_TCP, RES_LIMIT),
5320		.write = mem_cgroup_write,
5321		.read_u64 = mem_cgroup_read_u64,
5322	},
5323	{
5324		.name = "kmem.tcp.usage_in_bytes",
5325		.private = MEMFILE_PRIVATE(_TCP, RES_USAGE),
 
5326		.read_u64 = mem_cgroup_read_u64,
5327	},
5328	{
5329		.name = "kmem.tcp.failcnt",
5330		.private = MEMFILE_PRIVATE(_TCP, RES_FAILCNT),
5331		.write = mem_cgroup_reset,
5332		.read_u64 = mem_cgroup_read_u64,
5333	},
5334	{
5335		.name = "kmem.tcp.max_usage_in_bytes",
5336		.private = MEMFILE_PRIVATE(_TCP, RES_MAX_USAGE),
5337		.write = mem_cgroup_reset,
5338		.read_u64 = mem_cgroup_read_u64,
5339	},
5340	{ },	/* terminate */
5341};
5342
5343/*
5344 * Private memory cgroup IDR
5345 *
5346 * Swap-out records and page cache shadow entries need to store memcg
5347 * references in constrained space, so we maintain an ID space that is
5348 * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of
5349 * memory-controlled cgroups to 64k.
5350 *
5351 * However, there usually are many references to the offline CSS after
5352 * the cgroup has been destroyed, such as page cache or reclaimable
5353 * slab objects, that don't need to hang on to the ID. We want to keep
5354 * those dead CSS from occupying IDs, or we might quickly exhaust the
5355 * relatively small ID space and prevent the creation of new cgroups
5356 * even when there are much fewer than 64k cgroups - possibly none.
5357 *
5358 * Maintain a private 16-bit ID space for memcg, and allow the ID to
5359 * be freed and recycled when it's no longer needed, which is usually
5360 * when the CSS is offlined.
5361 *
5362 * The only exception to that are records of swapped out tmpfs/shmem
5363 * pages that need to be attributed to live ancestors on swapin. But
5364 * those references are manageable from userspace.
5365 */
5366
5367#define MEM_CGROUP_ID_MAX	((1UL << MEM_CGROUP_ID_SHIFT) - 1)
5368static DEFINE_IDR(mem_cgroup_idr);
5369
5370static void mem_cgroup_id_remove(struct mem_cgroup *memcg)
5371{
5372	if (memcg->id.id > 0) {
5373		idr_remove(&mem_cgroup_idr, memcg->id.id);
5374		memcg->id.id = 0;
5375	}
5376}
5377
5378static void __maybe_unused mem_cgroup_id_get_many(struct mem_cgroup *memcg,
5379						  unsigned int n)
5380{
5381	refcount_add(n, &memcg->id.ref);
5382}
5383
5384static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
5385{
5386	if (refcount_sub_and_test(n, &memcg->id.ref)) {
5387		mem_cgroup_id_remove(memcg);
5388
5389		/* Memcg ID pins CSS */
5390		css_put(&memcg->css);
 
 
 
 
5391	}
 
 
5392}
5393
5394static inline void mem_cgroup_id_put(struct mem_cgroup *memcg)
5395{
5396	mem_cgroup_id_put_many(memcg, 1);
5397}
5398
5399/**
5400 * mem_cgroup_from_id - look up a memcg from a memcg id
5401 * @id: the memcg id to look up
5402 *
5403 * Caller must hold rcu_read_lock().
5404 */
5405struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
5406{
5407	WARN_ON_ONCE(!rcu_read_lock_held());
5408	return idr_find(&mem_cgroup_idr, id);
5409}
5410
5411#ifdef CONFIG_SHRINKER_DEBUG
5412struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino)
5413{
5414	struct cgroup *cgrp;
5415	struct cgroup_subsys_state *css;
5416	struct mem_cgroup *memcg;
 
5417
5418	cgrp = cgroup_get_from_id(ino);
5419	if (IS_ERR(cgrp))
5420		return ERR_CAST(cgrp);
5421
5422	css = cgroup_get_e_css(cgrp, &memory_cgrp_subsys);
5423	if (css)
5424		memcg = container_of(css, struct mem_cgroup, css);
5425	else
5426		memcg = ERR_PTR(-ENOENT);
5427
5428	cgroup_put(cgrp);
 
 
5429
 
 
 
 
5430	return memcg;
 
 
 
 
5431}
5432#endif
5433
5434static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
 
 
 
 
 
 
 
 
 
 
 
5435{
5436	struct mem_cgroup_per_node *pn;
5437
5438	pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, node);
5439	if (!pn)
5440		return 1;
5441
5442	pn->lruvec_stats_percpu = alloc_percpu_gfp(struct lruvec_stats_percpu,
5443						   GFP_KERNEL_ACCOUNT);
5444	if (!pn->lruvec_stats_percpu) {
5445		kfree(pn);
5446		return 1;
5447	}
5448
5449	lruvec_init(&pn->lruvec);
5450	pn->memcg = memcg;
5451
5452	memcg->nodeinfo[node] = pn;
5453	return 0;
 
 
 
 
 
 
 
 
 
 
 
5454}
5455
5456static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
 
 
 
5457{
5458	struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
5459
5460	if (!pn)
5461		return;
5462
5463	free_percpu(pn->lruvec_stats_percpu);
5464	kfree(pn);
5465}
 
5466
5467static void __mem_cgroup_free(struct mem_cgroup *memcg)
5468{
5469	int node;
 
 
5470
5471	if (memcg->orig_objcg)
5472		obj_cgroup_put(memcg->orig_objcg);
 
 
 
 
5473
5474	for_each_node(node)
5475		free_mem_cgroup_per_node_info(memcg, node);
5476	kfree(memcg->vmstats);
5477	free_percpu(memcg->vmstats_percpu);
5478	kfree(memcg);
5479}
5480
5481static void mem_cgroup_free(struct mem_cgroup *memcg)
5482{
5483	lru_gen_exit_memcg(memcg);
5484	memcg_wb_domain_exit(memcg);
5485	__mem_cgroup_free(memcg);
 
5486}
5487
5488static struct mem_cgroup *mem_cgroup_alloc(struct mem_cgroup *parent)
 
5489{
5490	struct memcg_vmstats_percpu *statc, *pstatc;
5491	struct mem_cgroup *memcg;
5492	int node, cpu;
5493	int __maybe_unused i;
5494	long error = -ENOMEM;
 
5495
5496	memcg = kzalloc(struct_size(memcg, nodeinfo, nr_node_ids), GFP_KERNEL);
5497	if (!memcg)
5498		return ERR_PTR(error);
5499
5500	memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL,
5501				 1, MEM_CGROUP_ID_MAX + 1, GFP_KERNEL);
5502	if (memcg->id.id < 0) {
5503		error = memcg->id.id;
5504		goto fail;
5505	}
5506
5507	memcg->vmstats = kzalloc(sizeof(struct memcg_vmstats), GFP_KERNEL);
5508	if (!memcg->vmstats)
5509		goto fail;
5510
5511	memcg->vmstats_percpu = alloc_percpu_gfp(struct memcg_vmstats_percpu,
5512						 GFP_KERNEL_ACCOUNT);
5513	if (!memcg->vmstats_percpu)
5514		goto fail;
5515
5516	for_each_possible_cpu(cpu) {
5517		if (parent)
5518			pstatc = per_cpu_ptr(parent->vmstats_percpu, cpu);
5519		statc = per_cpu_ptr(memcg->vmstats_percpu, cpu);
5520		statc->parent = parent ? pstatc : NULL;
5521		statc->vmstats = memcg->vmstats;
5522	}
5523
5524	for_each_node(node)
5525		if (alloc_mem_cgroup_per_node_info(memcg, node))
5526			goto fail;
5527
5528	if (memcg_wb_domain_init(memcg, GFP_KERNEL))
5529		goto fail;
5530
5531	INIT_WORK(&memcg->high_work, high_work_func);
5532	INIT_LIST_HEAD(&memcg->oom_notify);
 
5533	mutex_init(&memcg->thresholds_lock);
5534	spin_lock_init(&memcg->move_lock);
5535	vmpressure_init(&memcg->vmpressure);
5536	INIT_LIST_HEAD(&memcg->event_list);
5537	spin_lock_init(&memcg->event_list_lock);
5538	memcg->socket_pressure = jiffies;
5539#ifdef CONFIG_MEMCG_KMEM
5540	memcg->kmemcg_id = -1;
5541	INIT_LIST_HEAD(&memcg->objcg_list);
5542#endif
5543#ifdef CONFIG_CGROUP_WRITEBACK
5544	INIT_LIST_HEAD(&memcg->cgwb_list);
5545	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
5546		memcg->cgwb_frn[i].done =
5547			__WB_COMPLETION_INIT(&memcg_cgwb_frn_waitq);
5548#endif
5549#ifdef CONFIG_TRANSPARENT_HUGEPAGE
5550	spin_lock_init(&memcg->deferred_split_queue.split_queue_lock);
5551	INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue);
5552	memcg->deferred_split_queue.split_queue_len = 0;
5553#endif
5554	lru_gen_init_memcg(memcg);
5555	return memcg;
5556fail:
5557	mem_cgroup_id_remove(memcg);
5558	__mem_cgroup_free(memcg);
5559	return ERR_PTR(error);
5560}
5561
5562static struct cgroup_subsys_state * __ref
5563mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
5564{
5565	struct mem_cgroup *parent = mem_cgroup_from_css(parent_css);
5566	struct mem_cgroup *memcg, *old_memcg;
 
 
 
5567
5568	old_memcg = set_active_memcg(parent);
5569	memcg = mem_cgroup_alloc(parent);
5570	set_active_memcg(old_memcg);
5571	if (IS_ERR(memcg))
5572		return ERR_CAST(memcg);
5573
5574	page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
5575	WRITE_ONCE(memcg->soft_limit, PAGE_COUNTER_MAX);
5576#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
5577	memcg->zswap_max = PAGE_COUNTER_MAX;
5578	WRITE_ONCE(memcg->zswap_writeback,
5579		!parent || READ_ONCE(parent->zswap_writeback));
5580#endif
5581	page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
5582	if (parent) {
5583		WRITE_ONCE(memcg->swappiness, mem_cgroup_swappiness(parent));
5584		WRITE_ONCE(memcg->oom_kill_disable, READ_ONCE(parent->oom_kill_disable));
5585
5586		page_counter_init(&memcg->memory, &parent->memory);
5587		page_counter_init(&memcg->swap, &parent->swap);
5588		page_counter_init(&memcg->kmem, &parent->kmem);
5589		page_counter_init(&memcg->tcpmem, &parent->tcpmem);
5590	} else {
5591		init_memcg_events();
5592		page_counter_init(&memcg->memory, NULL);
5593		page_counter_init(&memcg->swap, NULL);
5594		page_counter_init(&memcg->kmem, NULL);
5595		page_counter_init(&memcg->tcpmem, NULL);
5596
5597		root_mem_cgroup = memcg;
5598		return &memcg->css;
5599	}
5600
5601	if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
5602		static_branch_inc(&memcg_sockets_enabled_key);
 
 
 
 
 
 
5603
5604#if defined(CONFIG_MEMCG_KMEM)
5605	if (!cgroup_memory_nobpf)
5606		static_branch_inc(&memcg_bpf_enabled_key);
5607#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
5608
5609	return &memcg->css;
5610}
5611
5612static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
 
 
 
5613{
5614	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5615
5616	if (memcg_online_kmem(memcg))
5617		goto remove_id;
5618
5619	/*
5620	 * A memcg must be visible for expand_shrinker_info()
5621	 * by the time the maps are allocated. So, we allocate maps
5622	 * here, when for_each_mem_cgroup() can't skip it.
5623	 */
5624	if (alloc_shrinker_info(memcg))
5625		goto offline_kmem;
5626
5627	if (unlikely(mem_cgroup_is_root(memcg)) && !mem_cgroup_disabled())
5628		queue_delayed_work(system_unbound_wq, &stats_flush_dwork,
5629				   FLUSH_TIME);
5630	lru_gen_online_memcg(memcg);
5631
5632	/* Online state pins memcg ID, memcg ID pins CSS */
5633	refcount_set(&memcg->id.ref, 1);
5634	css_get(css);
5635
5636	/*
5637	 * Ensure mem_cgroup_from_id() works once we're fully online.
5638	 *
5639	 * We could do this earlier and require callers to filter with
5640	 * css_tryget_online(). But right now there are no users that
5641	 * need earlier access, and the workingset code relies on the
5642	 * cgroup tree linkage (mem_cgroup_get_nr_swap_pages()). So
5643	 * publish it here at the end of onlining. This matches the
5644	 * regular ID destruction during offlining.
5645	 */
5646	idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
5647
5648	return 0;
5649offline_kmem:
5650	memcg_offline_kmem(memcg);
5651remove_id:
5652	mem_cgroup_id_remove(memcg);
5653	return -ENOMEM;
5654}
5655
5656static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
5657{
5658	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5659	struct mem_cgroup_event *event, *tmp;
 
5660
5661	/*
5662	 * Unregister events and notify userspace.
5663	 * Notify userspace about cgroup removing only after rmdir of cgroup
5664	 * directory to avoid race between userspace and kernelspace.
5665	 */
5666	spin_lock_irq(&memcg->event_list_lock);
5667	list_for_each_entry_safe(event, tmp, &memcg->event_list, list) {
5668		list_del_init(&event->list);
5669		schedule_work(&event->remove);
5670	}
5671	spin_unlock_irq(&memcg->event_list_lock);
5672
5673	page_counter_set_min(&memcg->memory, 0);
5674	page_counter_set_low(&memcg->memory, 0);
5675
5676	zswap_memcg_offline_cleanup(memcg);
5677
5678	memcg_offline_kmem(memcg);
5679	reparent_shrinker_deferred(memcg);
5680	wb_memcg_offline(memcg);
5681	lru_gen_offline_memcg(memcg);
 
 
5682
5683	drain_all_stock(memcg);
5684
5685	mem_cgroup_id_put(memcg);
5686}
5687
5688static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
5689{
5690	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5691
5692	invalidate_reclaim_iterators(memcg);
5693	lru_gen_release_memcg(memcg);
5694}
5695
5696static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
5697{
5698	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5699	int __maybe_unused i;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5700
5701#ifdef CONFIG_CGROUP_WRITEBACK
5702	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
5703		wb_wait_for_completion(&memcg->cgwb_frn[i].done);
5704#endif
5705	if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
5706		static_branch_dec(&memcg_sockets_enabled_key);
5707
5708	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active)
5709		static_branch_dec(&memcg_sockets_enabled_key);
5710
5711#if defined(CONFIG_MEMCG_KMEM)
5712	if (!cgroup_memory_nobpf)
5713		static_branch_dec(&memcg_bpf_enabled_key);
5714#endif
5715
5716	vmpressure_cleanup(&memcg->vmpressure);
5717	cancel_work_sync(&memcg->high_work);
5718	mem_cgroup_remove_from_trees(memcg);
5719	free_shrinker_info(memcg);
5720	mem_cgroup_free(memcg);
5721}
5722
5723/**
5724 * mem_cgroup_css_reset - reset the states of a mem_cgroup
5725 * @css: the target css
5726 *
5727 * Reset the states of the mem_cgroup associated with @css.  This is
5728 * invoked when the userland requests disabling on the default hierarchy
5729 * but the memcg is pinned through dependency.  The memcg should stop
5730 * applying policies and should revert to the vanilla state as it may be
5731 * made visible again.
5732 *
5733 * The current implementation only resets the essential configurations.
5734 * This needs to be expanded to cover all the visible parts.
5735 */
5736static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
5737{
5738	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
 
 
5739
5740	page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX);
5741	page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX);
5742	page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX);
5743	page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX);
5744	page_counter_set_min(&memcg->memory, 0);
5745	page_counter_set_low(&memcg->memory, 0);
5746	page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
5747	WRITE_ONCE(memcg->soft_limit, PAGE_COUNTER_MAX);
5748	page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
5749	memcg_wb_domain_size_changed(memcg);
5750}
5751
5752static void mem_cgroup_css_rstat_flush(struct cgroup_subsys_state *css, int cpu)
5753{
5754	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5755	struct mem_cgroup *parent = parent_mem_cgroup(memcg);
5756	struct memcg_vmstats_percpu *statc;
5757	long delta, delta_cpu, v;
5758	int i, nid;
5759
5760	statc = per_cpu_ptr(memcg->vmstats_percpu, cpu);
5761
5762	for (i = 0; i < MEMCG_NR_STAT; i++) {
5763		/*
5764		 * Collect the aggregated propagation counts of groups
5765		 * below us. We're in a per-cpu loop here and this is
5766		 * a global counter, so the first cycle will get them.
 
5767		 */
5768		delta = memcg->vmstats->state_pending[i];
5769		if (delta)
5770			memcg->vmstats->state_pending[i] = 0;
5771
5772		/* Add CPU changes on this level since the last flush */
5773		delta_cpu = 0;
5774		v = READ_ONCE(statc->state[i]);
5775		if (v != statc->state_prev[i]) {
5776			delta_cpu = v - statc->state_prev[i];
5777			delta += delta_cpu;
5778			statc->state_prev[i] = v;
5779		}
5780
5781		/* Aggregate counts on this level and propagate upwards */
5782		if (delta_cpu)
5783			memcg->vmstats->state_local[i] += delta_cpu;
5784
5785		if (delta) {
5786			memcg->vmstats->state[i] += delta;
5787			if (parent)
5788				parent->vmstats->state_pending[i] += delta;
5789		}
5790	}
5791
5792	for (i = 0; i < NR_MEMCG_EVENTS; i++) {
5793		delta = memcg->vmstats->events_pending[i];
5794		if (delta)
5795			memcg->vmstats->events_pending[i] = 0;
5796
5797		delta_cpu = 0;
5798		v = READ_ONCE(statc->events[i]);
5799		if (v != statc->events_prev[i]) {
5800			delta_cpu = v - statc->events_prev[i];
5801			delta += delta_cpu;
5802			statc->events_prev[i] = v;
5803		}
5804
5805		if (delta_cpu)
5806			memcg->vmstats->events_local[i] += delta_cpu;
5807
5808		if (delta) {
5809			memcg->vmstats->events[i] += delta;
5810			if (parent)
5811				parent->vmstats->events_pending[i] += delta;
5812		}
5813	}
5814
5815	for_each_node_state(nid, N_MEMORY) {
5816		struct mem_cgroup_per_node *pn = memcg->nodeinfo[nid];
5817		struct mem_cgroup_per_node *ppn = NULL;
5818		struct lruvec_stats_percpu *lstatc;
5819
5820		if (parent)
5821			ppn = parent->nodeinfo[nid];
5822
5823		lstatc = per_cpu_ptr(pn->lruvec_stats_percpu, cpu);
5824
5825		for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
5826			delta = pn->lruvec_stats.state_pending[i];
5827			if (delta)
5828				pn->lruvec_stats.state_pending[i] = 0;
5829
5830			delta_cpu = 0;
5831			v = READ_ONCE(lstatc->state[i]);
5832			if (v != lstatc->state_prev[i]) {
5833				delta_cpu = v - lstatc->state_prev[i];
5834				delta += delta_cpu;
5835				lstatc->state_prev[i] = v;
5836			}
5837
5838			if (delta_cpu)
5839				pn->lruvec_stats.state_local[i] += delta_cpu;
5840
5841			if (delta) {
5842				pn->lruvec_stats.state[i] += delta;
5843				if (ppn)
5844					ppn->lruvec_stats.state_pending[i] += delta;
5845			}
5846		}
5847	}
5848	statc->stats_updates = 0;
5849	/* We are in a per-cpu loop here, only do the atomic write once */
5850	if (atomic64_read(&memcg->vmstats->stats_updates))
5851		atomic64_set(&memcg->vmstats->stats_updates, 0);
5852}
5853
5854#ifdef CONFIG_MMU
5855/* Handlers for move charge at task migration. */
5856static int mem_cgroup_do_precharge(unsigned long count)
5857{
5858	int ret;
5859
5860	/* Try a single bulk charge without reclaim first, kswapd may wake */
5861	ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count);
5862	if (!ret) {
5863		mc.precharge += count;
5864		return ret;
5865	}
5866
5867	/* Try charges one by one with reclaim, but do not retry */
5868	while (count--) {
5869		ret = try_charge(mc.to, GFP_KERNEL | __GFP_NORETRY, 1);
 
 
 
 
 
 
 
 
5870		if (ret)
 
5871			return ret;
5872		mc.precharge++;
5873		cond_resched();
5874	}
5875	return 0;
5876}
5877
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5878union mc_target {
5879	struct folio	*folio;
5880	swp_entry_t	ent;
5881};
5882
5883enum mc_target_type {
5884	MC_TARGET_NONE = 0,
5885	MC_TARGET_PAGE,
5886	MC_TARGET_SWAP,
5887	MC_TARGET_DEVICE,
5888};
5889
5890static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
5891						unsigned long addr, pte_t ptent)
5892{
5893	struct page *page = vm_normal_page(vma, addr, ptent);
5894
5895	if (!page)
5896		return NULL;
5897	if (PageAnon(page)) {
5898		if (!(mc.flags & MOVE_ANON))
 
5899			return NULL;
5900	} else {
5901		if (!(mc.flags & MOVE_FILE))
5902			return NULL;
5903	}
5904	get_page(page);
5905
5906	return page;
5907}
5908
5909#if defined(CONFIG_SWAP) || defined(CONFIG_DEVICE_PRIVATE)
5910static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5911			pte_t ptent, swp_entry_t *entry)
5912{
5913	struct page *page = NULL;
5914	swp_entry_t ent = pte_to_swp_entry(ptent);
5915
5916	if (!(mc.flags & MOVE_ANON))
5917		return NULL;
5918
5919	/*
5920	 * Handle device private pages that are not accessible by the CPU, but
5921	 * stored as special swap entries in the page table.
5922	 */
5923	if (is_device_private_entry(ent)) {
5924		page = pfn_swap_entry_to_page(ent);
5925		if (!get_page_unless_zero(page))
5926			return NULL;
5927		return page;
5928	}
5929
5930	if (non_swap_entry(ent))
5931		return NULL;
5932
5933	/*
5934	 * Because swap_cache_get_folio() updates some statistics counter,
5935	 * we call find_get_page() with swapper_space directly.
5936	 */
5937	page = find_get_page(swap_address_space(ent), swp_offset(ent));
5938	entry->val = ent.val;
 
5939
5940	return page;
5941}
5942#else
5943static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5944			pte_t ptent, swp_entry_t *entry)
5945{
5946	return NULL;
5947}
5948#endif
5949
5950static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
5951			unsigned long addr, pte_t ptent)
5952{
5953	unsigned long index;
5954	struct folio *folio;
 
5955
5956	if (!vma->vm_file) /* anonymous vma */
5957		return NULL;
5958	if (!(mc.flags & MOVE_FILE))
5959		return NULL;
5960
5961	/* folio is moved even if it's not RSS of this task(page-faulted). */
 
 
 
 
 
 
 
5962	/* shmem/tmpfs may report page out on swap: account for that too. */
5963	index = linear_page_index(vma, addr);
5964	folio = filemap_get_incore_folio(vma->vm_file->f_mapping, index);
5965	if (IS_ERR(folio))
5966		return NULL;
5967	return folio_file_page(folio, index);
5968}
5969
5970/**
5971 * mem_cgroup_move_account - move account of the folio
5972 * @folio: The folio.
5973 * @compound: charge the page as compound or small page
5974 * @from: mem_cgroup which the folio is moved from.
5975 * @to:	mem_cgroup which the folio is moved to. @from != @to.
5976 *
5977 * The folio must be locked and not on the LRU.
5978 *
5979 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
5980 * from old cgroup.
5981 */
5982static int mem_cgroup_move_account(struct folio *folio,
5983				   bool compound,
5984				   struct mem_cgroup *from,
5985				   struct mem_cgroup *to)
5986{
5987	struct lruvec *from_vec, *to_vec;
5988	struct pglist_data *pgdat;
5989	unsigned int nr_pages = compound ? folio_nr_pages(folio) : 1;
5990	int nid, ret;
5991
5992	VM_BUG_ON(from == to);
5993	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
5994	VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
5995	VM_BUG_ON(compound && !folio_test_large(folio));
5996
5997	ret = -EINVAL;
5998	if (folio_memcg(folio) != from)
5999		goto out;
6000
6001	pgdat = folio_pgdat(folio);
6002	from_vec = mem_cgroup_lruvec(from, pgdat);
6003	to_vec = mem_cgroup_lruvec(to, pgdat);
6004
6005	folio_memcg_lock(folio);
6006
6007	if (folio_test_anon(folio)) {
6008		if (folio_mapped(folio)) {
6009			__mod_lruvec_state(from_vec, NR_ANON_MAPPED, -nr_pages);
6010			__mod_lruvec_state(to_vec, NR_ANON_MAPPED, nr_pages);
6011			if (folio_test_pmd_mappable(folio)) {
6012				__mod_lruvec_state(from_vec, NR_ANON_THPS,
6013						   -nr_pages);
6014				__mod_lruvec_state(to_vec, NR_ANON_THPS,
6015						   nr_pages);
6016			}
6017		}
6018	} else {
6019		__mod_lruvec_state(from_vec, NR_FILE_PAGES, -nr_pages);
6020		__mod_lruvec_state(to_vec, NR_FILE_PAGES, nr_pages);
6021
6022		if (folio_test_swapbacked(folio)) {
6023			__mod_lruvec_state(from_vec, NR_SHMEM, -nr_pages);
6024			__mod_lruvec_state(to_vec, NR_SHMEM, nr_pages);
6025		}
6026
6027		if (folio_mapped(folio)) {
6028			__mod_lruvec_state(from_vec, NR_FILE_MAPPED, -nr_pages);
6029			__mod_lruvec_state(to_vec, NR_FILE_MAPPED, nr_pages);
6030		}
6031
6032		if (folio_test_dirty(folio)) {
6033			struct address_space *mapping = folio_mapping(folio);
6034
6035			if (mapping_can_writeback(mapping)) {
6036				__mod_lruvec_state(from_vec, NR_FILE_DIRTY,
6037						   -nr_pages);
6038				__mod_lruvec_state(to_vec, NR_FILE_DIRTY,
6039						   nr_pages);
6040			}
6041		}
6042	}
6043
6044#ifdef CONFIG_SWAP
6045	if (folio_test_swapcache(folio)) {
6046		__mod_lruvec_state(from_vec, NR_SWAPCACHE, -nr_pages);
6047		__mod_lruvec_state(to_vec, NR_SWAPCACHE, nr_pages);
6048	}
6049#endif
6050	if (folio_test_writeback(folio)) {
6051		__mod_lruvec_state(from_vec, NR_WRITEBACK, -nr_pages);
6052		__mod_lruvec_state(to_vec, NR_WRITEBACK, nr_pages);
6053	}
6054
6055	/*
6056	 * All state has been migrated, let's switch to the new memcg.
6057	 *
6058	 * It is safe to change page's memcg here because the page
6059	 * is referenced, charged, isolated, and locked: we can't race
6060	 * with (un)charging, migration, LRU putback, or anything else
6061	 * that would rely on a stable page's memory cgroup.
6062	 *
6063	 * Note that folio_memcg_lock is a memcg lock, not a page lock,
6064	 * to save space. As soon as we switch page's memory cgroup to a
6065	 * new memcg that isn't locked, the above state can change
6066	 * concurrently again. Make sure we're truly done with it.
6067	 */
6068	smp_mb();
6069
6070	css_get(&to->css);
6071	css_put(&from->css);
6072
6073	folio->memcg_data = (unsigned long)to;
6074
6075	__folio_memcg_unlock(from);
6076
6077	ret = 0;
6078	nid = folio_nid(folio);
6079
6080	local_irq_disable();
6081	mem_cgroup_charge_statistics(to, nr_pages);
6082	memcg_check_events(to, nid);
6083	mem_cgroup_charge_statistics(from, -nr_pages);
6084	memcg_check_events(from, nid);
6085	local_irq_enable();
6086out:
6087	return ret;
6088}
6089
6090/**
6091 * get_mctgt_type - get target type of moving charge
6092 * @vma: the vma the pte to be checked belongs
6093 * @addr: the address corresponding to the pte to be checked
6094 * @ptent: the pte to be checked
6095 * @target: the pointer the target page or swap ent will be stored(can be NULL)
6096 *
6097 * Context: Called with pte lock held.
6098 * Return:
6099 * * MC_TARGET_NONE - If the pte is not a target for move charge.
6100 * * MC_TARGET_PAGE - If the page corresponding to this pte is a target for
6101 *   move charge. If @target is not NULL, the folio is stored in target->folio
6102 *   with extra refcnt taken (Caller should release it).
6103 * * MC_TARGET_SWAP - If the swap entry corresponding to this pte is a
6104 *   target for charge migration.  If @target is not NULL, the entry is
6105 *   stored in target->ent.
6106 * * MC_TARGET_DEVICE - Like MC_TARGET_PAGE but page is device memory and
6107 *   thus not on the lru.  For now such page is charged like a regular page
6108 *   would be as it is just special memory taking the place of a regular page.
6109 *   See Documentations/vm/hmm.txt and include/linux/hmm.h
6110 */
6111static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
6112		unsigned long addr, pte_t ptent, union mc_target *target)
6113{
6114	struct page *page = NULL;
6115	struct folio *folio;
6116	enum mc_target_type ret = MC_TARGET_NONE;
6117	swp_entry_t ent = { .val = 0 };
6118
6119	if (pte_present(ptent))
6120		page = mc_handle_present_pte(vma, addr, ptent);
6121	else if (pte_none_mostly(ptent))
6122		/*
6123		 * PTE markers should be treated as a none pte here, separated
6124		 * from other swap handling below.
6125		 */
6126		page = mc_handle_file_pte(vma, addr, ptent);
6127	else if (is_swap_pte(ptent))
6128		page = mc_handle_swap_pte(vma, ptent, &ent);
6129
6130	if (page)
6131		folio = page_folio(page);
6132	if (target && page) {
6133		if (!folio_trylock(folio)) {
6134			folio_put(folio);
6135			return ret;
6136		}
6137		/*
6138		 * page_mapped() must be stable during the move. This
6139		 * pte is locked, so if it's present, the page cannot
6140		 * become unmapped. If it isn't, we have only partial
6141		 * control over the mapped state: the page lock will
6142		 * prevent new faults against pagecache and swapcache,
6143		 * so an unmapped page cannot become mapped. However,
6144		 * if the page is already mapped elsewhere, it can
6145		 * unmap, and there is nothing we can do about it.
6146		 * Alas, skip moving the page in this case.
6147		 */
6148		if (!pte_present(ptent) && page_mapped(page)) {
6149			folio_unlock(folio);
6150			folio_put(folio);
6151			return ret;
6152		}
6153	}
6154
6155	if (!page && !ent.val)
6156		return ret;
6157	if (page) {
 
6158		/*
6159		 * Do only loose check w/o serialization.
6160		 * mem_cgroup_move_account() checks the page is valid or
6161		 * not under LRU exclusion.
6162		 */
6163		if (folio_memcg(folio) == mc.from) {
6164			ret = MC_TARGET_PAGE;
6165			if (folio_is_device_private(folio) ||
6166			    folio_is_device_coherent(folio))
6167				ret = MC_TARGET_DEVICE;
6168			if (target)
6169				target->folio = folio;
6170		}
6171		if (!ret || !target) {
6172			if (target)
6173				folio_unlock(folio);
6174			folio_put(folio);
6175		}
 
 
6176	}
6177	/*
6178	 * There is a swap entry and a page doesn't exist or isn't charged.
6179	 * But we cannot move a tail-page in a THP.
6180	 */
6181	if (ent.val && !ret && (!page || !PageTransCompound(page)) &&
6182	    mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) {
6183		ret = MC_TARGET_SWAP;
6184		if (target)
6185			target->ent = ent;
6186	}
6187	return ret;
6188}
6189
6190#ifdef CONFIG_TRANSPARENT_HUGEPAGE
6191/*
6192 * We don't consider PMD mapped swapping or file mapped pages because THP does
6193 * not support them for now.
6194 * Caller should make sure that pmd_trans_huge(pmd) is true.
6195 */
6196static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
6197		unsigned long addr, pmd_t pmd, union mc_target *target)
6198{
6199	struct page *page = NULL;
6200	struct folio *folio;
6201	enum mc_target_type ret = MC_TARGET_NONE;
6202
6203	if (unlikely(is_swap_pmd(pmd))) {
6204		VM_BUG_ON(thp_migration_supported() &&
6205				  !is_pmd_migration_entry(pmd));
6206		return ret;
6207	}
6208	page = pmd_page(pmd);
6209	VM_BUG_ON_PAGE(!page || !PageHead(page), page);
6210	folio = page_folio(page);
6211	if (!(mc.flags & MOVE_ANON))
6212		return ret;
6213	if (folio_memcg(folio) == mc.from) {
 
6214		ret = MC_TARGET_PAGE;
6215		if (target) {
6216			folio_get(folio);
6217			if (!folio_trylock(folio)) {
6218				folio_put(folio);
6219				return MC_TARGET_NONE;
6220			}
6221			target->folio = folio;
6222		}
6223	}
6224	return ret;
6225}
6226#else
6227static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
6228		unsigned long addr, pmd_t pmd, union mc_target *target)
6229{
6230	return MC_TARGET_NONE;
6231}
6232#endif
6233
6234static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
6235					unsigned long addr, unsigned long end,
6236					struct mm_walk *walk)
6237{
6238	struct vm_area_struct *vma = walk->vma;
6239	pte_t *pte;
6240	spinlock_t *ptl;
6241
6242	ptl = pmd_trans_huge_lock(pmd, vma);
6243	if (ptl) {
6244		/*
6245		 * Note their can not be MC_TARGET_DEVICE for now as we do not
6246		 * support transparent huge page with MEMORY_DEVICE_PRIVATE but
6247		 * this might change.
6248		 */
6249		if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
6250			mc.precharge += HPAGE_PMD_NR;
6251		spin_unlock(ptl);
6252		return 0;
6253	}
6254
 
 
6255	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
6256	if (!pte)
6257		return 0;
6258	for (; addr != end; pte++, addr += PAGE_SIZE)
6259		if (get_mctgt_type(vma, addr, ptep_get(pte), NULL))
6260			mc.precharge++;	/* increment precharge temporarily */
6261	pte_unmap_unlock(pte - 1, ptl);
6262	cond_resched();
6263
6264	return 0;
6265}
6266
6267static const struct mm_walk_ops precharge_walk_ops = {
6268	.pmd_entry	= mem_cgroup_count_precharge_pte_range,
6269	.walk_lock	= PGWALK_RDLOCK,
6270};
6271
6272static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
6273{
6274	unsigned long precharge;
 
6275
6276	mmap_read_lock(mm);
6277	walk_page_range(mm, 0, ULONG_MAX, &precharge_walk_ops, NULL);
6278	mmap_read_unlock(mm);
 
 
 
 
 
 
 
 
 
 
6279
6280	precharge = mc.precharge;
6281	mc.precharge = 0;
6282
6283	return precharge;
6284}
6285
6286static int mem_cgroup_precharge_mc(struct mm_struct *mm)
6287{
6288	unsigned long precharge = mem_cgroup_count_precharge(mm);
6289
6290	VM_BUG_ON(mc.moving_task);
6291	mc.moving_task = current;
6292	return mem_cgroup_do_precharge(precharge);
6293}
6294
6295/* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
6296static void __mem_cgroup_clear_mc(void)
6297{
6298	struct mem_cgroup *from = mc.from;
6299	struct mem_cgroup *to = mc.to;
 
6300
6301	/* we must uncharge all the leftover precharges from mc.to */
6302	if (mc.precharge) {
6303		mem_cgroup_cancel_charge(mc.to, mc.precharge);
6304		mc.precharge = 0;
6305	}
6306	/*
6307	 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
6308	 * we must uncharge here.
6309	 */
6310	if (mc.moved_charge) {
6311		mem_cgroup_cancel_charge(mc.from, mc.moved_charge);
6312		mc.moved_charge = 0;
6313	}
6314	/* we must fixup refcnts and charges */
6315	if (mc.moved_swap) {
6316		/* uncharge swap account from the old cgroup */
6317		if (!mem_cgroup_is_root(mc.from))
6318			page_counter_uncharge(&mc.from->memsw, mc.moved_swap);
 
6319
6320		mem_cgroup_id_put_many(mc.from, mc.moved_swap);
6321
6322		/*
6323		 * we charged both to->memory and to->memsw, so we
6324		 * should uncharge to->memory.
6325		 */
6326		if (!mem_cgroup_is_root(mc.to))
6327			page_counter_uncharge(&mc.to->memory, mc.moved_swap);
6328
 
 
 
 
 
 
 
 
 
6329		mc.moved_swap = 0;
6330	}
6331	memcg_oom_recover(from);
6332	memcg_oom_recover(to);
6333	wake_up_all(&mc.waitq);
6334}
6335
6336static void mem_cgroup_clear_mc(void)
6337{
6338	struct mm_struct *mm = mc.mm;
6339
6340	/*
6341	 * we must clear moving_task before waking up waiters at the end of
6342	 * task migration.
6343	 */
6344	mc.moving_task = NULL;
6345	__mem_cgroup_clear_mc();
6346	spin_lock(&mc.lock);
6347	mc.from = NULL;
6348	mc.to = NULL;
6349	mc.mm = NULL;
6350	spin_unlock(&mc.lock);
6351
6352	mmput(mm);
6353}
6354
6355static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
 
6356{
6357	struct cgroup_subsys_state *css;
6358	struct mem_cgroup *memcg = NULL; /* unneeded init to make gcc happy */
6359	struct mem_cgroup *from;
6360	struct task_struct *leader, *p;
6361	struct mm_struct *mm;
6362	unsigned long move_flags;
6363	int ret = 0;
6364
6365	/* charge immigration isn't supported on the default hierarchy */
6366	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
6367		return 0;
6368
6369	/*
6370	 * Multi-process migrations only happen on the default hierarchy
6371	 * where charge immigration is not used.  Perform charge
6372	 * immigration if @tset contains a leader and whine if there are
6373	 * multiple.
6374	 */
6375	p = NULL;
6376	cgroup_taskset_for_each_leader(leader, css, tset) {
6377		WARN_ON_ONCE(p);
6378		p = leader;
6379		memcg = mem_cgroup_from_css(css);
6380	}
6381	if (!p)
6382		return 0;
6383
6384	/*
6385	 * We are now committed to this value whatever it is. Changes in this
6386	 * tunable will only affect upcoming migrations, not the current one.
6387	 * So we need to save it, and keep it going.
6388	 */
6389	move_flags = READ_ONCE(memcg->move_charge_at_immigrate);
6390	if (!move_flags)
6391		return 0;
 
6392
6393	from = mem_cgroup_from_task(p);
6394
6395	VM_BUG_ON(from == memcg);
6396
6397	mm = get_task_mm(p);
6398	if (!mm)
6399		return 0;
6400	/* We move charges only when we move a owner of the mm */
6401	if (mm->owner == p) {
6402		VM_BUG_ON(mc.from);
6403		VM_BUG_ON(mc.to);
6404		VM_BUG_ON(mc.precharge);
6405		VM_BUG_ON(mc.moved_charge);
6406		VM_BUG_ON(mc.moved_swap);
6407
6408		spin_lock(&mc.lock);
6409		mc.mm = mm;
6410		mc.from = from;
6411		mc.to = memcg;
6412		mc.flags = move_flags;
6413		spin_unlock(&mc.lock);
6414		/* We set mc.moving_task later */
6415
6416		ret = mem_cgroup_precharge_mc(mm);
6417		if (ret)
6418			mem_cgroup_clear_mc();
6419	} else {
6420		mmput(mm);
6421	}
6422	return ret;
6423}
6424
6425static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
 
6426{
6427	if (mc.to)
6428		mem_cgroup_clear_mc();
6429}
6430
6431static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
6432				unsigned long addr, unsigned long end,
6433				struct mm_walk *walk)
6434{
6435	int ret = 0;
6436	struct vm_area_struct *vma = walk->vma;
6437	pte_t *pte;
6438	spinlock_t *ptl;
6439	enum mc_target_type target_type;
6440	union mc_target target;
6441	struct folio *folio;
 
6442
6443	ptl = pmd_trans_huge_lock(pmd, vma);
6444	if (ptl) {
 
 
 
 
 
 
 
 
 
6445		if (mc.precharge < HPAGE_PMD_NR) {
6446			spin_unlock(ptl);
6447			return 0;
6448		}
6449		target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
6450		if (target_type == MC_TARGET_PAGE) {
6451			folio = target.folio;
6452			if (folio_isolate_lru(folio)) {
6453				if (!mem_cgroup_move_account(folio, true,
6454							     mc.from, mc.to)) {
 
6455					mc.precharge -= HPAGE_PMD_NR;
6456					mc.moved_charge += HPAGE_PMD_NR;
6457				}
6458				folio_putback_lru(folio);
6459			}
6460			folio_unlock(folio);
6461			folio_put(folio);
6462		} else if (target_type == MC_TARGET_DEVICE) {
6463			folio = target.folio;
6464			if (!mem_cgroup_move_account(folio, true,
6465						     mc.from, mc.to)) {
6466				mc.precharge -= HPAGE_PMD_NR;
6467				mc.moved_charge += HPAGE_PMD_NR;
6468			}
6469			folio_unlock(folio);
6470			folio_put(folio);
6471		}
6472		spin_unlock(ptl);
6473		return 0;
6474	}
6475
 
 
6476retry:
6477	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
6478	if (!pte)
6479		return 0;
6480	for (; addr != end; addr += PAGE_SIZE) {
6481		pte_t ptent = ptep_get(pte++);
6482		bool device = false;
6483		swp_entry_t ent;
6484
6485		if (!mc.precharge)
6486			break;
6487
6488		switch (get_mctgt_type(vma, addr, ptent, &target)) {
6489		case MC_TARGET_DEVICE:
6490			device = true;
6491			fallthrough;
6492		case MC_TARGET_PAGE:
6493			folio = target.folio;
6494			/*
6495			 * We can have a part of the split pmd here. Moving it
6496			 * can be done but it would be too convoluted so simply
6497			 * ignore such a partial THP and keep it in original
6498			 * memcg. There should be somebody mapping the head.
6499			 */
6500			if (folio_test_large(folio))
6501				goto put;
6502			if (!device && !folio_isolate_lru(folio))
6503				goto put;
6504			if (!mem_cgroup_move_account(folio, false,
6505						mc.from, mc.to)) {
6506				mc.precharge--;
6507				/* we uncharge from mc.from later. */
6508				mc.moved_charge++;
6509			}
6510			if (!device)
6511				folio_putback_lru(folio);
6512put:			/* get_mctgt_type() gets & locks the page */
6513			folio_unlock(folio);
6514			folio_put(folio);
6515			break;
6516		case MC_TARGET_SWAP:
6517			ent = target.ent;
6518			if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
6519				mc.precharge--;
6520				mem_cgroup_id_get_many(mc.to, 1);
6521				/* we fixup other refcnts and charges later. */
6522				mc.moved_swap++;
6523			}
6524			break;
6525		default:
6526			break;
6527		}
6528	}
6529	pte_unmap_unlock(pte - 1, ptl);
6530	cond_resched();
6531
6532	if (addr != end) {
6533		/*
6534		 * We have consumed all precharges we got in can_attach().
6535		 * We try charge one by one, but don't do any additional
6536		 * charges to mc.to if we have failed in charge once in attach()
6537		 * phase.
6538		 */
6539		ret = mem_cgroup_do_precharge(1);
6540		if (!ret)
6541			goto retry;
6542	}
6543
6544	return ret;
6545}
6546
6547static const struct mm_walk_ops charge_walk_ops = {
6548	.pmd_entry	= mem_cgroup_move_charge_pte_range,
6549	.walk_lock	= PGWALK_RDLOCK,
6550};
6551
6552static void mem_cgroup_move_charge(void)
6553{
6554	lru_add_drain_all();
6555	/*
6556	 * Signal folio_memcg_lock() to take the memcg's move_lock
6557	 * while we're moving its pages to another memcg. Then wait
6558	 * for already started RCU-only updates to finish.
6559	 */
6560	atomic_inc(&mc.from->moving_account);
6561	synchronize_rcu();
6562retry:
6563	if (unlikely(!mmap_read_trylock(mc.mm))) {
6564		/*
6565		 * Someone who are holding the mmap_lock might be waiting in
6566		 * waitq. So we cancel all extra charges, wake up all waiters,
6567		 * and retry. Because we cancel precharges, we might not be able
6568		 * to move enough charges, but moving charge is a best-effort
6569		 * feature anyway, so it wouldn't be a big problem.
6570		 */
6571		__mem_cgroup_clear_mc();
6572		cond_resched();
6573		goto retry;
6574	}
6575	/*
6576	 * When we have consumed all precharges and failed in doing
6577	 * additional charge, the page walk just aborts.
6578	 */
6579	walk_page_range(mc.mm, 0, ULONG_MAX, &charge_walk_ops, NULL);
6580	mmap_read_unlock(mc.mm);
6581	atomic_dec(&mc.from->moving_account);
6582}
6583
6584static void mem_cgroup_move_task(void)
6585{
6586	if (mc.to) {
6587		mem_cgroup_move_charge();
6588		mem_cgroup_clear_mc();
6589	}
6590}
6591
6592#else	/* !CONFIG_MMU */
6593static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
6594{
6595	return 0;
6596}
6597static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
6598{
6599}
6600static void mem_cgroup_move_task(void)
6601{
6602}
6603#endif
6604
6605#ifdef CONFIG_MEMCG_KMEM
6606static void mem_cgroup_fork(struct task_struct *task)
6607{
6608	/*
6609	 * Set the update flag to cause task->objcg to be initialized lazily
6610	 * on the first allocation. It can be done without any synchronization
6611	 * because it's always performed on the current task, so does
6612	 * current_objcg_update().
6613	 */
6614	task->objcg = (struct obj_cgroup *)CURRENT_OBJCG_UPDATE_FLAG;
6615}
6616
6617static void mem_cgroup_exit(struct task_struct *task)
6618{
6619	struct obj_cgroup *objcg = task->objcg;
6620
6621	objcg = (struct obj_cgroup *)
6622		((unsigned long)objcg & ~CURRENT_OBJCG_UPDATE_FLAG);
6623	if (objcg)
6624		obj_cgroup_put(objcg);
6625
6626	/*
6627	 * Some kernel allocations can happen after this point,
6628	 * but let's ignore them. It can be done without any synchronization
6629	 * because it's always performed on the current task, so does
6630	 * current_objcg_update().
6631	 */
6632	task->objcg = NULL;
6633}
6634#endif
6635
6636#ifdef CONFIG_LRU_GEN
6637static void mem_cgroup_lru_gen_attach(struct cgroup_taskset *tset)
6638{
6639	struct task_struct *task;
6640	struct cgroup_subsys_state *css;
6641
6642	/* find the first leader if there is any */
6643	cgroup_taskset_for_each_leader(task, css, tset)
6644		break;
6645
6646	if (!task)
6647		return;
6648
6649	task_lock(task);
6650	if (task->mm && READ_ONCE(task->mm->owner) == task)
6651		lru_gen_migrate_mm(task->mm);
6652	task_unlock(task);
6653}
6654#else
6655static void mem_cgroup_lru_gen_attach(struct cgroup_taskset *tset) {}
6656#endif /* CONFIG_LRU_GEN */
6657
6658#ifdef CONFIG_MEMCG_KMEM
6659static void mem_cgroup_kmem_attach(struct cgroup_taskset *tset)
6660{
6661	struct task_struct *task;
6662	struct cgroup_subsys_state *css;
6663
6664	cgroup_taskset_for_each(task, css, tset) {
6665		/* atomically set the update bit */
6666		set_bit(CURRENT_OBJCG_UPDATE_BIT, (unsigned long *)&task->objcg);
6667	}
6668}
6669#else
6670static void mem_cgroup_kmem_attach(struct cgroup_taskset *tset) {}
6671#endif /* CONFIG_MEMCG_KMEM */
6672
6673#if defined(CONFIG_LRU_GEN) || defined(CONFIG_MEMCG_KMEM)
6674static void mem_cgroup_attach(struct cgroup_taskset *tset)
6675{
6676	mem_cgroup_lru_gen_attach(tset);
6677	mem_cgroup_kmem_attach(tset);
6678}
6679#endif
6680
6681static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value)
6682{
6683	if (value == PAGE_COUNTER_MAX)
6684		seq_puts(m, "max\n");
6685	else
6686		seq_printf(m, "%llu\n", (u64)value * PAGE_SIZE);
6687
6688	return 0;
6689}
6690
6691static u64 memory_current_read(struct cgroup_subsys_state *css,
6692			       struct cftype *cft)
6693{
6694	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6695
6696	return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE;
6697}
6698
6699static u64 memory_peak_read(struct cgroup_subsys_state *css,
6700			    struct cftype *cft)
6701{
6702	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6703
6704	return (u64)memcg->memory.watermark * PAGE_SIZE;
6705}
6706
6707static int memory_min_show(struct seq_file *m, void *v)
6708{
6709	return seq_puts_memcg_tunable(m,
6710		READ_ONCE(mem_cgroup_from_seq(m)->memory.min));
6711}
6712
6713static ssize_t memory_min_write(struct kernfs_open_file *of,
6714				char *buf, size_t nbytes, loff_t off)
6715{
6716	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6717	unsigned long min;
6718	int err;
6719
6720	buf = strstrip(buf);
6721	err = page_counter_memparse(buf, "max", &min);
6722	if (err)
6723		return err;
6724
6725	page_counter_set_min(&memcg->memory, min);
6726
6727	return nbytes;
6728}
6729
6730static int memory_low_show(struct seq_file *m, void *v)
6731{
6732	return seq_puts_memcg_tunable(m,
6733		READ_ONCE(mem_cgroup_from_seq(m)->memory.low));
6734}
6735
6736static ssize_t memory_low_write(struct kernfs_open_file *of,
6737				char *buf, size_t nbytes, loff_t off)
6738{
6739	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6740	unsigned long low;
6741	int err;
6742
6743	buf = strstrip(buf);
6744	err = page_counter_memparse(buf, "max", &low);
6745	if (err)
6746		return err;
6747
6748	page_counter_set_low(&memcg->memory, low);
6749
6750	return nbytes;
6751}
6752
6753static int memory_high_show(struct seq_file *m, void *v)
6754{
6755	return seq_puts_memcg_tunable(m,
6756		READ_ONCE(mem_cgroup_from_seq(m)->memory.high));
6757}
6758
6759static ssize_t memory_high_write(struct kernfs_open_file *of,
6760				 char *buf, size_t nbytes, loff_t off)
6761{
6762	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6763	unsigned int nr_retries = MAX_RECLAIM_RETRIES;
6764	bool drained = false;
6765	unsigned long high;
6766	int err;
6767
6768	buf = strstrip(buf);
6769	err = page_counter_memparse(buf, "max", &high);
6770	if (err)
6771		return err;
6772
6773	page_counter_set_high(&memcg->memory, high);
6774
6775	for (;;) {
6776		unsigned long nr_pages = page_counter_read(&memcg->memory);
6777		unsigned long reclaimed;
6778
6779		if (nr_pages <= high)
6780			break;
6781
6782		if (signal_pending(current))
6783			break;
6784
6785		if (!drained) {
6786			drain_all_stock(memcg);
6787			drained = true;
6788			continue;
6789		}
6790
6791		reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
6792					GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP);
6793
6794		if (!reclaimed && !nr_retries--)
 
6795			break;
6796	}
6797
6798	memcg_wb_domain_size_changed(memcg);
6799	return nbytes;
6800}
6801
6802static int memory_max_show(struct seq_file *m, void *v)
6803{
6804	return seq_puts_memcg_tunable(m,
6805		READ_ONCE(mem_cgroup_from_seq(m)->memory.max));
6806}
6807
6808static ssize_t memory_max_write(struct kernfs_open_file *of,
6809				char *buf, size_t nbytes, loff_t off)
6810{
6811	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6812	unsigned int nr_reclaims = MAX_RECLAIM_RETRIES;
6813	bool drained = false;
6814	unsigned long max;
6815	int err;
6816
6817	buf = strstrip(buf);
6818	err = page_counter_memparse(buf, "max", &max);
6819	if (err)
6820		return err;
6821
6822	xchg(&memcg->memory.max, max);
6823
6824	for (;;) {
6825		unsigned long nr_pages = page_counter_read(&memcg->memory);
6826
6827		if (nr_pages <= max)
6828			break;
6829
6830		if (signal_pending(current))
6831			break;
6832
6833		if (!drained) {
6834			drain_all_stock(memcg);
6835			drained = true;
6836			continue;
6837		}
6838
6839		if (nr_reclaims) {
6840			if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max,
6841					GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP))
6842				nr_reclaims--;
6843			continue;
6844		}
6845
6846		memcg_memory_event(memcg, MEMCG_OOM);
6847		if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
6848			break;
6849	}
6850
6851	memcg_wb_domain_size_changed(memcg);
6852	return nbytes;
6853}
6854
6855/*
6856 * Note: don't forget to update the 'samples/cgroup/memcg_event_listener'
6857 * if any new events become available.
6858 */
6859static void __memory_events_show(struct seq_file *m, atomic_long_t *events)
6860{
6861	seq_printf(m, "low %lu\n", atomic_long_read(&events[MEMCG_LOW]));
6862	seq_printf(m, "high %lu\n", atomic_long_read(&events[MEMCG_HIGH]));
6863	seq_printf(m, "max %lu\n", atomic_long_read(&events[MEMCG_MAX]));
6864	seq_printf(m, "oom %lu\n", atomic_long_read(&events[MEMCG_OOM]));
6865	seq_printf(m, "oom_kill %lu\n",
6866		   atomic_long_read(&events[MEMCG_OOM_KILL]));
6867	seq_printf(m, "oom_group_kill %lu\n",
6868		   atomic_long_read(&events[MEMCG_OOM_GROUP_KILL]));
6869}
6870
6871static int memory_events_show(struct seq_file *m, void *v)
6872{
6873	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6874
6875	__memory_events_show(m, memcg->memory_events);
6876	return 0;
6877}
6878
6879static int memory_events_local_show(struct seq_file *m, void *v)
6880{
6881	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6882
6883	__memory_events_show(m, memcg->memory_events_local);
6884	return 0;
6885}
6886
6887static int memory_stat_show(struct seq_file *m, void *v)
6888{
6889	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6890	char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
6891	struct seq_buf s;
6892
6893	if (!buf)
6894		return -ENOMEM;
6895	seq_buf_init(&s, buf, PAGE_SIZE);
6896	memory_stat_format(memcg, &s);
6897	seq_puts(m, buf);
6898	kfree(buf);
6899	return 0;
6900}
6901
6902#ifdef CONFIG_NUMA
6903static inline unsigned long lruvec_page_state_output(struct lruvec *lruvec,
6904						     int item)
6905{
6906	return lruvec_page_state(lruvec, item) *
6907		memcg_page_state_output_unit(item);
6908}
6909
6910static int memory_numa_stat_show(struct seq_file *m, void *v)
6911{
6912	int i;
6913	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6914
6915	mem_cgroup_flush_stats(memcg);
6916
6917	for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
6918		int nid;
6919
6920		if (memory_stats[i].idx >= NR_VM_NODE_STAT_ITEMS)
6921			continue;
6922
6923		seq_printf(m, "%s", memory_stats[i].name);
6924		for_each_node_state(nid, N_MEMORY) {
6925			u64 size;
6926			struct lruvec *lruvec;
6927
6928			lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
6929			size = lruvec_page_state_output(lruvec,
6930							memory_stats[i].idx);
6931			seq_printf(m, " N%d=%llu", nid, size);
6932		}
6933		seq_putc(m, '\n');
6934	}
6935
6936	return 0;
6937}
6938#endif
6939
6940static int memory_oom_group_show(struct seq_file *m, void *v)
 
 
 
 
6941{
6942	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6943
6944	seq_printf(m, "%d\n", READ_ONCE(memcg->oom_group));
6945
6946	return 0;
6947}
6948
6949static ssize_t memory_oom_group_write(struct kernfs_open_file *of,
6950				      char *buf, size_t nbytes, loff_t off)
6951{
6952	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6953	int ret, oom_group;
6954
6955	buf = strstrip(buf);
6956	if (!buf)
6957		return -EINVAL;
6958
6959	ret = kstrtoint(buf, 0, &oom_group);
6960	if (ret)
6961		return ret;
6962
6963	if (oom_group != 0 && oom_group != 1)
6964		return -EINVAL;
6965
6966	WRITE_ONCE(memcg->oom_group, oom_group);
6967
6968	return nbytes;
6969}
6970
6971static ssize_t memory_reclaim(struct kernfs_open_file *of, char *buf,
6972			      size_t nbytes, loff_t off)
6973{
6974	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6975	unsigned int nr_retries = MAX_RECLAIM_RETRIES;
6976	unsigned long nr_to_reclaim, nr_reclaimed = 0;
6977	unsigned int reclaim_options;
6978	int err;
6979
6980	buf = strstrip(buf);
6981	err = page_counter_memparse(buf, "", &nr_to_reclaim);
6982	if (err)
6983		return err;
6984
6985	reclaim_options	= MEMCG_RECLAIM_MAY_SWAP | MEMCG_RECLAIM_PROACTIVE;
6986	while (nr_reclaimed < nr_to_reclaim) {
6987		/* Will converge on zero, but reclaim enforces a minimum */
6988		unsigned long batch_size = (nr_to_reclaim - nr_reclaimed) / 4;
6989		unsigned long reclaimed;
6990
6991		if (signal_pending(current))
6992			return -EINTR;
6993
6994		/*
6995		 * This is the final attempt, drain percpu lru caches in the
6996		 * hope of introducing more evictable pages for
6997		 * try_to_free_mem_cgroup_pages().
6998		 */
6999		if (!nr_retries)
7000			lru_add_drain_all();
7001
7002		reclaimed = try_to_free_mem_cgroup_pages(memcg,
7003					batch_size, GFP_KERNEL, reclaim_options);
7004
7005		if (!reclaimed && !nr_retries--)
7006			return -EAGAIN;
7007
7008		nr_reclaimed += reclaimed;
7009	}
7010
7011	return nbytes;
7012}
7013
7014static struct cftype memory_files[] = {
7015	{
7016		.name = "current",
7017		.flags = CFTYPE_NOT_ON_ROOT,
7018		.read_u64 = memory_current_read,
7019	},
7020	{
7021		.name = "peak",
7022		.flags = CFTYPE_NOT_ON_ROOT,
7023		.read_u64 = memory_peak_read,
7024	},
7025	{
7026		.name = "min",
7027		.flags = CFTYPE_NOT_ON_ROOT,
7028		.seq_show = memory_min_show,
7029		.write = memory_min_write,
7030	},
7031	{
7032		.name = "low",
7033		.flags = CFTYPE_NOT_ON_ROOT,
7034		.seq_show = memory_low_show,
7035		.write = memory_low_write,
7036	},
7037	{
7038		.name = "high",
7039		.flags = CFTYPE_NOT_ON_ROOT,
7040		.seq_show = memory_high_show,
7041		.write = memory_high_write,
7042	},
7043	{
7044		.name = "max",
7045		.flags = CFTYPE_NOT_ON_ROOT,
7046		.seq_show = memory_max_show,
7047		.write = memory_max_write,
7048	},
7049	{
7050		.name = "events",
7051		.flags = CFTYPE_NOT_ON_ROOT,
7052		.file_offset = offsetof(struct mem_cgroup, events_file),
7053		.seq_show = memory_events_show,
7054	},
7055	{
7056		.name = "events.local",
7057		.flags = CFTYPE_NOT_ON_ROOT,
7058		.file_offset = offsetof(struct mem_cgroup, events_local_file),
7059		.seq_show = memory_events_local_show,
7060	},
7061	{
7062		.name = "stat",
7063		.seq_show = memory_stat_show,
7064	},
7065#ifdef CONFIG_NUMA
7066	{
7067		.name = "numa_stat",
7068		.seq_show = memory_numa_stat_show,
7069	},
7070#endif
7071	{
7072		.name = "oom.group",
7073		.flags = CFTYPE_NOT_ON_ROOT | CFTYPE_NS_DELEGATABLE,
7074		.seq_show = memory_oom_group_show,
7075		.write = memory_oom_group_write,
7076	},
7077	{
7078		.name = "reclaim",
7079		.flags = CFTYPE_NS_DELEGATABLE,
7080		.write = memory_reclaim,
7081	},
7082	{ }	/* terminate */
7083};
7084
7085struct cgroup_subsys memory_cgrp_subsys = {
7086	.css_alloc = mem_cgroup_css_alloc,
7087	.css_online = mem_cgroup_css_online,
7088	.css_offline = mem_cgroup_css_offline,
7089	.css_released = mem_cgroup_css_released,
7090	.css_free = mem_cgroup_css_free,
7091	.css_reset = mem_cgroup_css_reset,
7092	.css_rstat_flush = mem_cgroup_css_rstat_flush,
7093	.can_attach = mem_cgroup_can_attach,
7094#if defined(CONFIG_LRU_GEN) || defined(CONFIG_MEMCG_KMEM)
7095	.attach = mem_cgroup_attach,
7096#endif
7097	.cancel_attach = mem_cgroup_cancel_attach,
7098	.post_attach = mem_cgroup_move_task,
7099#ifdef CONFIG_MEMCG_KMEM
7100	.fork = mem_cgroup_fork,
7101	.exit = mem_cgroup_exit,
7102#endif
7103	.dfl_cftypes = memory_files,
7104	.legacy_cftypes = mem_cgroup_legacy_files,
7105	.early_init = 0,
7106};
7107
7108/*
7109 * This function calculates an individual cgroup's effective
7110 * protection which is derived from its own memory.min/low, its
7111 * parent's and siblings' settings, as well as the actual memory
7112 * distribution in the tree.
7113 *
7114 * The following rules apply to the effective protection values:
7115 *
7116 * 1. At the first level of reclaim, effective protection is equal to
7117 *    the declared protection in memory.min and memory.low.
7118 *
7119 * 2. To enable safe delegation of the protection configuration, at
7120 *    subsequent levels the effective protection is capped to the
7121 *    parent's effective protection.
7122 *
7123 * 3. To make complex and dynamic subtrees easier to configure, the
7124 *    user is allowed to overcommit the declared protection at a given
7125 *    level. If that is the case, the parent's effective protection is
7126 *    distributed to the children in proportion to how much protection
7127 *    they have declared and how much of it they are utilizing.
7128 *
7129 *    This makes distribution proportional, but also work-conserving:
7130 *    if one cgroup claims much more protection than it uses memory,
7131 *    the unused remainder is available to its siblings.
7132 *
7133 * 4. Conversely, when the declared protection is undercommitted at a
7134 *    given level, the distribution of the larger parental protection
7135 *    budget is NOT proportional. A cgroup's protection from a sibling
7136 *    is capped to its own memory.min/low setting.
7137 *
7138 * 5. However, to allow protecting recursive subtrees from each other
7139 *    without having to declare each individual cgroup's fixed share
7140 *    of the ancestor's claim to protection, any unutilized -
7141 *    "floating" - protection from up the tree is distributed in
7142 *    proportion to each cgroup's *usage*. This makes the protection
7143 *    neutral wrt sibling cgroups and lets them compete freely over
7144 *    the shared parental protection budget, but it protects the
7145 *    subtree as a whole from neighboring subtrees.
7146 *
7147 * Note that 4. and 5. are not in conflict: 4. is about protecting
7148 * against immediate siblings whereas 5. is about protecting against
7149 * neighboring subtrees.
7150 */
7151static unsigned long effective_protection(unsigned long usage,
7152					  unsigned long parent_usage,
7153					  unsigned long setting,
7154					  unsigned long parent_effective,
7155					  unsigned long siblings_protected)
7156{
7157	unsigned long protected;
7158	unsigned long ep;
7159
7160	protected = min(usage, setting);
7161	/*
7162	 * If all cgroups at this level combined claim and use more
7163	 * protection than what the parent affords them, distribute
7164	 * shares in proportion to utilization.
7165	 *
7166	 * We are using actual utilization rather than the statically
7167	 * claimed protection in order to be work-conserving: claimed
7168	 * but unused protection is available to siblings that would
7169	 * otherwise get a smaller chunk than what they claimed.
7170	 */
7171	if (siblings_protected > parent_effective)
7172		return protected * parent_effective / siblings_protected;
7173
7174	/*
7175	 * Ok, utilized protection of all children is within what the
7176	 * parent affords them, so we know whatever this child claims
7177	 * and utilizes is effectively protected.
7178	 *
7179	 * If there is unprotected usage beyond this value, reclaim
7180	 * will apply pressure in proportion to that amount.
7181	 *
7182	 * If there is unutilized protection, the cgroup will be fully
7183	 * shielded from reclaim, but we do return a smaller value for
7184	 * protection than what the group could enjoy in theory. This
7185	 * is okay. With the overcommit distribution above, effective
7186	 * protection is always dependent on how memory is actually
7187	 * consumed among the siblings anyway.
7188	 */
7189	ep = protected;
7190
7191	/*
7192	 * If the children aren't claiming (all of) the protection
7193	 * afforded to them by the parent, distribute the remainder in
7194	 * proportion to the (unprotected) memory of each cgroup. That
7195	 * way, cgroups that aren't explicitly prioritized wrt each
7196	 * other compete freely over the allowance, but they are
7197	 * collectively protected from neighboring trees.
7198	 *
7199	 * We're using unprotected memory for the weight so that if
7200	 * some cgroups DO claim explicit protection, we don't protect
7201	 * the same bytes twice.
7202	 *
7203	 * Check both usage and parent_usage against the respective
7204	 * protected values. One should imply the other, but they
7205	 * aren't read atomically - make sure the division is sane.
7206	 */
7207	if (!(cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT))
7208		return ep;
7209	if (parent_effective > siblings_protected &&
7210	    parent_usage > siblings_protected &&
7211	    usage > protected) {
7212		unsigned long unclaimed;
7213
7214		unclaimed = parent_effective - siblings_protected;
7215		unclaimed *= usage - protected;
7216		unclaimed /= parent_usage - siblings_protected;
7217
7218		ep += unclaimed;
7219	}
7220
7221	return ep;
7222}
7223
7224/**
7225 * mem_cgroup_calculate_protection - check if memory consumption is in the normal range
7226 * @root: the top ancestor of the sub-tree being checked
7227 * @memcg: the memory cgroup to check
7228 *
7229 * WARNING: This function is not stateless! It can only be used as part
7230 *          of a top-down tree iteration, not for isolated queries.
7231 */
7232void mem_cgroup_calculate_protection(struct mem_cgroup *root,
7233				     struct mem_cgroup *memcg)
7234{
7235	unsigned long usage, parent_usage;
7236	struct mem_cgroup *parent;
7237
7238	if (mem_cgroup_disabled())
7239		return;
7240
7241	if (!root)
7242		root = root_mem_cgroup;
7243
7244	/*
7245	 * Effective values of the reclaim targets are ignored so they
7246	 * can be stale. Have a look at mem_cgroup_protection for more
7247	 * details.
7248	 * TODO: calculation should be more robust so that we do not need
7249	 * that special casing.
7250	 */
7251	if (memcg == root)
7252		return;
7253
7254	usage = page_counter_read(&memcg->memory);
7255	if (!usage)
7256		return;
7257
7258	parent = parent_mem_cgroup(memcg);
7259
7260	if (parent == root) {
7261		memcg->memory.emin = READ_ONCE(memcg->memory.min);
7262		memcg->memory.elow = READ_ONCE(memcg->memory.low);
7263		return;
7264	}
7265
7266	parent_usage = page_counter_read(&parent->memory);
7267
7268	WRITE_ONCE(memcg->memory.emin, effective_protection(usage, parent_usage,
7269			READ_ONCE(memcg->memory.min),
7270			READ_ONCE(parent->memory.emin),
7271			atomic_long_read(&parent->memory.children_min_usage)));
7272
7273	WRITE_ONCE(memcg->memory.elow, effective_protection(usage, parent_usage,
7274			READ_ONCE(memcg->memory.low),
7275			READ_ONCE(parent->memory.elow),
7276			atomic_long_read(&parent->memory.children_low_usage)));
7277}
7278
7279static int charge_memcg(struct folio *folio, struct mem_cgroup *memcg,
7280			gfp_t gfp)
7281{
7282	int ret;
7283
7284	ret = try_charge(memcg, gfp, folio_nr_pages(folio));
7285	if (ret)
7286		goto out;
7287
7288	mem_cgroup_commit_charge(folio, memcg);
7289out:
7290	return ret;
7291}
 
7292
7293int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp)
7294{
7295	struct mem_cgroup *memcg;
7296	int ret;
7297
7298	memcg = get_mem_cgroup_from_mm(mm);
7299	ret = charge_memcg(folio, memcg, gfp);
7300	css_put(&memcg->css);
7301
7302	return ret;
7303}
7304
7305/**
7306 * mem_cgroup_hugetlb_try_charge - try to charge the memcg for a hugetlb folio
7307 * @memcg: memcg to charge.
7308 * @gfp: reclaim mode.
7309 * @nr_pages: number of pages to charge.
7310 *
7311 * This function is called when allocating a huge page folio to determine if
7312 * the memcg has the capacity for it. It does not commit the charge yet,
7313 * as the hugetlb folio itself has not been obtained from the hugetlb pool.
7314 *
7315 * Once we have obtained the hugetlb folio, we can call
7316 * mem_cgroup_commit_charge() to commit the charge. If we fail to obtain the
7317 * folio, we should instead call mem_cgroup_cancel_charge() to undo the effect
7318 * of try_charge().
7319 *
7320 * Returns 0 on success. Otherwise, an error code is returned.
7321 */
7322int mem_cgroup_hugetlb_try_charge(struct mem_cgroup *memcg, gfp_t gfp,
7323			long nr_pages)
7324{
7325	/*
7326	 * If hugetlb memcg charging is not enabled, do not fail hugetlb allocation,
7327	 * but do not attempt to commit charge later (or cancel on error) either.
7328	 */
7329	if (mem_cgroup_disabled() || !memcg ||
7330		!cgroup_subsys_on_dfl(memory_cgrp_subsys) ||
7331		!(cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING))
7332		return -EOPNOTSUPP;
7333
7334	if (try_charge(memcg, gfp, nr_pages))
7335		return -ENOMEM;
7336
7337	return 0;
7338}
7339
7340/**
7341 * mem_cgroup_swapin_charge_folio - Charge a newly allocated folio for swapin.
7342 * @folio: folio to charge.
7343 * @mm: mm context of the victim
7344 * @gfp: reclaim mode
7345 * @entry: swap entry for which the folio is allocated
7346 *
7347 * This function charges a folio allocated for swapin. Please call this before
7348 * adding the folio to the swapcache.
7349 *
7350 * Returns 0 on success. Otherwise, an error code is returned.
7351 */
7352int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm,
7353				  gfp_t gfp, swp_entry_t entry)
7354{
7355	struct mem_cgroup *memcg;
7356	unsigned short id;
7357	int ret;
7358
7359	if (mem_cgroup_disabled())
7360		return 0;
7361
7362	id = lookup_swap_cgroup_id(entry);
7363	rcu_read_lock();
7364	memcg = mem_cgroup_from_id(id);
7365	if (!memcg || !css_tryget_online(&memcg->css))
7366		memcg = get_mem_cgroup_from_mm(mm);
7367	rcu_read_unlock();
7368
7369	ret = charge_memcg(folio, memcg, gfp);
7370
7371	css_put(&memcg->css);
7372	return ret;
7373}
7374
7375/*
7376 * mem_cgroup_swapin_uncharge_swap - uncharge swap slot
7377 * @entry: swap entry for which the page is charged
7378 *
7379 * Call this function after successfully adding the charged page to swapcache.
7380 *
7381 * Note: This function assumes the page for which swap slot is being uncharged
7382 * is order 0 page.
7383 */
7384void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry)
7385{
7386	/*
7387	 * Cgroup1's unified memory+swap counter has been charged with the
7388	 * new swapcache page, finish the transfer by uncharging the swap
7389	 * slot. The swap slot would also get uncharged when it dies, but
7390	 * it can stick around indefinitely and we'd count the page twice
7391	 * the entire time.
7392	 *
7393	 * Cgroup2 has separate resource counters for memory and swap,
7394	 * so this is a non-issue here. Memory and swap charge lifetimes
7395	 * correspond 1:1 to page and swap slot lifetimes: we charge the
7396	 * page to memory here, and uncharge swap when the slot is freed.
7397	 */
7398	if (!mem_cgroup_disabled() && do_memsw_account()) {
7399		/*
7400		 * The swap entry might not get freed for a long time,
7401		 * let's not wait for it.  The page already received a
7402		 * memory+swap charge, drop the swap entry duplicate.
7403		 */
7404		mem_cgroup_uncharge_swap(entry, 1);
7405	}
7406}
7407
7408struct uncharge_gather {
7409	struct mem_cgroup *memcg;
7410	unsigned long nr_memory;
7411	unsigned long pgpgout;
7412	unsigned long nr_kmem;
7413	int nid;
7414};
7415
7416static inline void uncharge_gather_clear(struct uncharge_gather *ug)
7417{
7418	memset(ug, 0, sizeof(*ug));
7419}
7420
7421static void uncharge_batch(const struct uncharge_gather *ug)
7422{
7423	unsigned long flags;
7424
7425	if (ug->nr_memory) {
7426		page_counter_uncharge(&ug->memcg->memory, ug->nr_memory);
7427		if (do_memsw_account())
7428			page_counter_uncharge(&ug->memcg->memsw, ug->nr_memory);
7429		if (ug->nr_kmem)
7430			memcg_account_kmem(ug->memcg, -ug->nr_kmem);
7431		memcg_oom_recover(ug->memcg);
7432	}
7433
7434	local_irq_save(flags);
7435	__count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout);
7436	__this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_memory);
7437	memcg_check_events(ug->memcg, ug->nid);
7438	local_irq_restore(flags);
7439
7440	/* drop reference from uncharge_folio */
7441	css_put(&ug->memcg->css);
7442}
7443
7444static void uncharge_folio(struct folio *folio, struct uncharge_gather *ug)
7445{
7446	long nr_pages;
7447	struct mem_cgroup *memcg;
7448	struct obj_cgroup *objcg;
7449
7450	VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
7451
7452	/*
7453	 * Nobody should be changing or seriously looking at
7454	 * folio memcg or objcg at this point, we have fully
7455	 * exclusive access to the folio.
7456	 */
7457	if (folio_memcg_kmem(folio)) {
7458		objcg = __folio_objcg(folio);
7459		/*
7460		 * This get matches the put at the end of the function and
7461		 * kmem pages do not hold memcg references anymore.
7462		 */
7463		memcg = get_mem_cgroup_from_objcg(objcg);
7464	} else {
7465		memcg = __folio_memcg(folio);
7466	}
7467
7468	if (!memcg)
7469		return;
7470
7471	if (ug->memcg != memcg) {
7472		if (ug->memcg) {
7473			uncharge_batch(ug);
7474			uncharge_gather_clear(ug);
7475		}
7476		ug->memcg = memcg;
7477		ug->nid = folio_nid(folio);
7478
7479		/* pairs with css_put in uncharge_batch */
7480		css_get(&memcg->css);
7481	}
7482
7483	nr_pages = folio_nr_pages(folio);
7484
7485	if (folio_memcg_kmem(folio)) {
7486		ug->nr_memory += nr_pages;
7487		ug->nr_kmem += nr_pages;
7488
7489		folio->memcg_data = 0;
7490		obj_cgroup_put(objcg);
7491	} else {
7492		/* LRU pages aren't accounted at the root level */
7493		if (!mem_cgroup_is_root(memcg))
7494			ug->nr_memory += nr_pages;
7495		ug->pgpgout++;
7496
7497		folio->memcg_data = 0;
7498	}
7499
7500	css_put(&memcg->css);
7501}
7502
7503void __mem_cgroup_uncharge(struct folio *folio)
7504{
7505	struct uncharge_gather ug;
7506
7507	/* Don't touch folio->lru of any random page, pre-check: */
7508	if (!folio_memcg(folio))
7509		return;
7510
7511	uncharge_gather_clear(&ug);
7512	uncharge_folio(folio, &ug);
7513	uncharge_batch(&ug);
7514}
7515
7516void __mem_cgroup_uncharge_folios(struct folio_batch *folios)
7517{
7518	struct uncharge_gather ug;
7519	unsigned int i;
7520
7521	uncharge_gather_clear(&ug);
7522	for (i = 0; i < folios->nr; i++)
7523		uncharge_folio(folios->folios[i], &ug);
7524	if (ug.memcg)
7525		uncharge_batch(&ug);
7526}
7527
7528/**
7529 * mem_cgroup_replace_folio - Charge a folio's replacement.
7530 * @old: Currently circulating folio.
7531 * @new: Replacement folio.
7532 *
7533 * Charge @new as a replacement folio for @old. @old will
7534 * be uncharged upon free. This is only used by the page cache
7535 * (in replace_page_cache_folio()).
7536 *
7537 * Both folios must be locked, @new->mapping must be set up.
7538 */
7539void mem_cgroup_replace_folio(struct folio *old, struct folio *new)
7540{
7541	struct mem_cgroup *memcg;
7542	long nr_pages = folio_nr_pages(new);
7543	unsigned long flags;
7544
7545	VM_BUG_ON_FOLIO(!folio_test_locked(old), old);
7546	VM_BUG_ON_FOLIO(!folio_test_locked(new), new);
7547	VM_BUG_ON_FOLIO(folio_test_anon(old) != folio_test_anon(new), new);
7548	VM_BUG_ON_FOLIO(folio_nr_pages(old) != nr_pages, new);
7549
7550	if (mem_cgroup_disabled())
7551		return;
7552
7553	/* Page cache replacement: new folio already charged? */
7554	if (folio_memcg(new))
7555		return;
7556
7557	memcg = folio_memcg(old);
7558	VM_WARN_ON_ONCE_FOLIO(!memcg, old);
7559	if (!memcg)
7560		return;
7561
7562	/* Force-charge the new page. The old one will be freed soon */
7563	if (!mem_cgroup_is_root(memcg)) {
7564		page_counter_charge(&memcg->memory, nr_pages);
7565		if (do_memsw_account())
7566			page_counter_charge(&memcg->memsw, nr_pages);
7567	}
7568
7569	css_get(&memcg->css);
7570	commit_charge(new, memcg);
7571
7572	local_irq_save(flags);
7573	mem_cgroup_charge_statistics(memcg, nr_pages);
7574	memcg_check_events(memcg, folio_nid(new));
7575	local_irq_restore(flags);
7576}
7577
7578/**
7579 * mem_cgroup_migrate - Transfer the memcg data from the old to the new folio.
7580 * @old: Currently circulating folio.
7581 * @new: Replacement folio.
7582 *
7583 * Transfer the memcg data from the old folio to the new folio for migration.
7584 * The old folio's data info will be cleared. Note that the memory counters
7585 * will remain unchanged throughout the process.
7586 *
7587 * Both folios must be locked, @new->mapping must be set up.
7588 */
7589void mem_cgroup_migrate(struct folio *old, struct folio *new)
7590{
7591	struct mem_cgroup *memcg;
7592
7593	VM_BUG_ON_FOLIO(!folio_test_locked(old), old);
7594	VM_BUG_ON_FOLIO(!folio_test_locked(new), new);
7595	VM_BUG_ON_FOLIO(folio_test_anon(old) != folio_test_anon(new), new);
7596	VM_BUG_ON_FOLIO(folio_nr_pages(old) != folio_nr_pages(new), new);
7597
7598	if (mem_cgroup_disabled())
7599		return;
7600
7601	memcg = folio_memcg(old);
7602	/*
7603	 * Note that it is normal to see !memcg for a hugetlb folio.
7604	 * For e.g, itt could have been allocated when memory_hugetlb_accounting
7605	 * was not selected.
7606	 */
7607	VM_WARN_ON_ONCE_FOLIO(!folio_test_hugetlb(old) && !memcg, old);
7608	if (!memcg)
7609		return;
7610
7611	/* Transfer the charge and the css ref */
7612	commit_charge(new, memcg);
7613	/*
7614	 * If the old folio is a large folio and is in the split queue, it needs
7615	 * to be removed from the split queue now, in case getting an incorrect
7616	 * split queue in destroy_large_folio() after the memcg of the old folio
7617	 * is cleared.
7618	 *
7619	 * In addition, the old folio is about to be freed after migration, so
7620	 * removing from the split queue a bit earlier seems reasonable.
7621	 */
7622	if (folio_test_large(old) && folio_test_large_rmappable(old))
7623		folio_undo_large_rmappable(old);
7624	old->memcg_data = 0;
7625}
7626
7627DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
7628EXPORT_SYMBOL(memcg_sockets_enabled_key);
7629
7630void mem_cgroup_sk_alloc(struct sock *sk)
7631{
7632	struct mem_cgroup *memcg;
7633
7634	if (!mem_cgroup_sockets_enabled)
7635		return;
7636
7637	/* Do not associate the sock with unrelated interrupted task's memcg. */
7638	if (!in_task())
7639		return;
7640
7641	rcu_read_lock();
7642	memcg = mem_cgroup_from_task(current);
7643	if (mem_cgroup_is_root(memcg))
7644		goto out;
7645	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active)
7646		goto out;
7647	if (css_tryget(&memcg->css))
7648		sk->sk_memcg = memcg;
7649out:
7650	rcu_read_unlock();
7651}
7652
7653void mem_cgroup_sk_free(struct sock *sk)
7654{
7655	if (sk->sk_memcg)
7656		css_put(&sk->sk_memcg->css);
7657}
7658
7659/**
7660 * mem_cgroup_charge_skmem - charge socket memory
7661 * @memcg: memcg to charge
7662 * @nr_pages: number of pages to charge
7663 * @gfp_mask: reclaim mode
7664 *
7665 * Charges @nr_pages to @memcg. Returns %true if the charge fit within
7666 * @memcg's configured limit, %false if it doesn't.
7667 */
7668bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages,
7669			     gfp_t gfp_mask)
7670{
7671	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
7672		struct page_counter *fail;
7673
7674		if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) {
7675			memcg->tcpmem_pressure = 0;
7676			return true;
7677		}
7678		memcg->tcpmem_pressure = 1;
7679		if (gfp_mask & __GFP_NOFAIL) {
7680			page_counter_charge(&memcg->tcpmem, nr_pages);
7681			return true;
7682		}
7683		return false;
7684	}
7685
7686	if (try_charge(memcg, gfp_mask, nr_pages) == 0) {
7687		mod_memcg_state(memcg, MEMCG_SOCK, nr_pages);
7688		return true;
7689	}
7690
7691	return false;
7692}
7693
7694/**
7695 * mem_cgroup_uncharge_skmem - uncharge socket memory
7696 * @memcg: memcg to uncharge
7697 * @nr_pages: number of pages to uncharge
7698 */
7699void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
7700{
7701	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
7702		page_counter_uncharge(&memcg->tcpmem, nr_pages);
7703		return;
7704	}
7705
7706	mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages);
7707
7708	refill_stock(memcg, nr_pages);
7709}
7710
7711static int __init cgroup_memory(char *s)
7712{
7713	char *token;
7714
7715	while ((token = strsep(&s, ",")) != NULL) {
7716		if (!*token)
7717			continue;
7718		if (!strcmp(token, "nosocket"))
7719			cgroup_memory_nosocket = true;
7720		if (!strcmp(token, "nokmem"))
7721			cgroup_memory_nokmem = true;
7722		if (!strcmp(token, "nobpf"))
7723			cgroup_memory_nobpf = true;
7724	}
7725	return 1;
7726}
7727__setup("cgroup.memory=", cgroup_memory);
7728
7729/*
7730 * subsys_initcall() for memory controller.
7731 *
7732 * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this
7733 * context because of lock dependencies (cgroup_lock -> cpu hotplug) but
7734 * basically everything that doesn't depend on a specific mem_cgroup structure
7735 * should be initialized from here.
7736 */
7737static int __init mem_cgroup_init(void)
7738{
7739	int cpu, node;
7740
7741	/*
7742	 * Currently s32 type (can refer to struct batched_lruvec_stat) is
7743	 * used for per-memcg-per-cpu caching of per-node statistics. In order
7744	 * to work fine, we should make sure that the overfill threshold can't
7745	 * exceed S32_MAX / PAGE_SIZE.
7746	 */
7747	BUILD_BUG_ON(MEMCG_CHARGE_BATCH > S32_MAX / PAGE_SIZE);
7748
7749	cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
7750				  memcg_hotplug_cpu_dead);
7751
7752	for_each_possible_cpu(cpu)
7753		INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
7754			  drain_local_stock);
7755
7756	for_each_node(node) {
7757		struct mem_cgroup_tree_per_node *rtpn;
7758
7759		rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, node);
7760
7761		rtpn->rb_root = RB_ROOT;
7762		rtpn->rb_rightmost = NULL;
7763		spin_lock_init(&rtpn->lock);
7764		soft_limit_tree.rb_tree_per_node[node] = rtpn;
7765	}
7766
7767	return 0;
7768}
7769subsys_initcall(mem_cgroup_init);
7770
7771#ifdef CONFIG_SWAP
7772static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg)
7773{
7774	while (!refcount_inc_not_zero(&memcg->id.ref)) {
7775		/*
7776		 * The root cgroup cannot be destroyed, so it's refcount must
7777		 * always be >= 1.
7778		 */
7779		if (WARN_ON_ONCE(mem_cgroup_is_root(memcg))) {
7780			VM_BUG_ON(1);
7781			break;
7782		}
7783		memcg = parent_mem_cgroup(memcg);
7784		if (!memcg)
7785			memcg = root_mem_cgroup;
7786	}
7787	return memcg;
7788}
7789
7790/**
7791 * mem_cgroup_swapout - transfer a memsw charge to swap
7792 * @folio: folio whose memsw charge to transfer
7793 * @entry: swap entry to move the charge to
7794 *
7795 * Transfer the memsw charge of @folio to @entry.
7796 */
7797void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry)
7798{
7799	struct mem_cgroup *memcg, *swap_memcg;
7800	unsigned int nr_entries;
7801	unsigned short oldid;
7802
7803	VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
7804	VM_BUG_ON_FOLIO(folio_ref_count(folio), folio);
7805
7806	if (mem_cgroup_disabled())
7807		return;
7808
7809	if (!do_memsw_account())
7810		return;
7811
7812	memcg = folio_memcg(folio);
7813
7814	VM_WARN_ON_ONCE_FOLIO(!memcg, folio);
7815	if (!memcg)
7816		return;
7817
7818	/*
7819	 * In case the memcg owning these pages has been offlined and doesn't
7820	 * have an ID allocated to it anymore, charge the closest online
7821	 * ancestor for the swap instead and transfer the memory+swap charge.
7822	 */
7823	swap_memcg = mem_cgroup_id_get_online(memcg);
7824	nr_entries = folio_nr_pages(folio);
7825	/* Get references for the tail pages, too */
7826	if (nr_entries > 1)
7827		mem_cgroup_id_get_many(swap_memcg, nr_entries - 1);
7828	oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg),
7829				   nr_entries);
7830	VM_BUG_ON_FOLIO(oldid, folio);
7831	mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries);
7832
7833	folio->memcg_data = 0;
7834
7835	if (!mem_cgroup_is_root(memcg))
7836		page_counter_uncharge(&memcg->memory, nr_entries);
7837
7838	if (memcg != swap_memcg) {
7839		if (!mem_cgroup_is_root(swap_memcg))
7840			page_counter_charge(&swap_memcg->memsw, nr_entries);
7841		page_counter_uncharge(&memcg->memsw, nr_entries);
7842	}
7843
7844	/*
7845	 * Interrupts should be disabled here because the caller holds the
7846	 * i_pages lock which is taken with interrupts-off. It is
7847	 * important here to have the interrupts disabled because it is the
7848	 * only synchronisation we have for updating the per-CPU variables.
7849	 */
7850	memcg_stats_lock();
7851	mem_cgroup_charge_statistics(memcg, -nr_entries);
7852	memcg_stats_unlock();
7853	memcg_check_events(memcg, folio_nid(folio));
7854
7855	css_put(&memcg->css);
7856}
7857
7858/**
7859 * __mem_cgroup_try_charge_swap - try charging swap space for a folio
7860 * @folio: folio being added to swap
7861 * @entry: swap entry to charge
7862 *
7863 * Try to charge @folio's memcg for the swap space at @entry.
7864 *
7865 * Returns 0 on success, -ENOMEM on failure.
7866 */
7867int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry)
7868{
7869	unsigned int nr_pages = folio_nr_pages(folio);
7870	struct page_counter *counter;
7871	struct mem_cgroup *memcg;
7872	unsigned short oldid;
7873
7874	if (do_memsw_account())
7875		return 0;
7876
7877	memcg = folio_memcg(folio);
7878
7879	VM_WARN_ON_ONCE_FOLIO(!memcg, folio);
7880	if (!memcg)
7881		return 0;
7882
7883	if (!entry.val) {
7884		memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
7885		return 0;
7886	}
7887
7888	memcg = mem_cgroup_id_get_online(memcg);
7889
7890	if (!mem_cgroup_is_root(memcg) &&
7891	    !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) {
7892		memcg_memory_event(memcg, MEMCG_SWAP_MAX);
7893		memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
7894		mem_cgroup_id_put(memcg);
7895		return -ENOMEM;
7896	}
7897
7898	/* Get references for the tail pages, too */
7899	if (nr_pages > 1)
7900		mem_cgroup_id_get_many(memcg, nr_pages - 1);
7901	oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg), nr_pages);
7902	VM_BUG_ON_FOLIO(oldid, folio);
7903	mod_memcg_state(memcg, MEMCG_SWAP, nr_pages);
7904
7905	return 0;
7906}
7907
7908/**
7909 * __mem_cgroup_uncharge_swap - uncharge swap space
7910 * @entry: swap entry to uncharge
7911 * @nr_pages: the amount of swap space to uncharge
7912 */
7913void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
7914{
7915	struct mem_cgroup *memcg;
7916	unsigned short id;
7917
7918	id = swap_cgroup_record(entry, 0, nr_pages);
7919	rcu_read_lock();
7920	memcg = mem_cgroup_from_id(id);
7921	if (memcg) {
7922		if (!mem_cgroup_is_root(memcg)) {
7923			if (do_memsw_account())
7924				page_counter_uncharge(&memcg->memsw, nr_pages);
7925			else
7926				page_counter_uncharge(&memcg->swap, nr_pages);
7927		}
7928		mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages);
7929		mem_cgroup_id_put_many(memcg, nr_pages);
7930	}
7931	rcu_read_unlock();
7932}
7933
7934long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
7935{
7936	long nr_swap_pages = get_nr_swap_pages();
7937
7938	if (mem_cgroup_disabled() || do_memsw_account())
7939		return nr_swap_pages;
7940	for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg))
7941		nr_swap_pages = min_t(long, nr_swap_pages,
7942				      READ_ONCE(memcg->swap.max) -
7943				      page_counter_read(&memcg->swap));
7944	return nr_swap_pages;
7945}
7946
7947bool mem_cgroup_swap_full(struct folio *folio)
7948{
7949	struct mem_cgroup *memcg;
7950
7951	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
7952
7953	if (vm_swap_full())
7954		return true;
7955	if (do_memsw_account())
7956		return false;
7957
7958	memcg = folio_memcg(folio);
7959	if (!memcg)
7960		return false;
7961
7962	for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) {
7963		unsigned long usage = page_counter_read(&memcg->swap);
7964
7965		if (usage * 2 >= READ_ONCE(memcg->swap.high) ||
7966		    usage * 2 >= READ_ONCE(memcg->swap.max))
7967			return true;
7968	}
7969
7970	return false;
7971}
7972
7973static int __init setup_swap_account(char *s)
7974{
7975	bool res;
7976
7977	if (!kstrtobool(s, &res) && !res)
7978		pr_warn_once("The swapaccount=0 commandline option is deprecated "
7979			     "in favor of configuring swap control via cgroupfs. "
7980			     "Please report your usecase to linux-mm@kvack.org if you "
7981			     "depend on this functionality.\n");
7982	return 1;
7983}
7984__setup("swapaccount=", setup_swap_account);
7985
7986static u64 swap_current_read(struct cgroup_subsys_state *css,
7987			     struct cftype *cft)
7988{
7989	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
7990
7991	return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE;
7992}
7993
7994static u64 swap_peak_read(struct cgroup_subsys_state *css,
7995			  struct cftype *cft)
7996{
7997	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
7998
7999	return (u64)memcg->swap.watermark * PAGE_SIZE;
8000}
8001
8002static int swap_high_show(struct seq_file *m, void *v)
8003{
8004	return seq_puts_memcg_tunable(m,
8005		READ_ONCE(mem_cgroup_from_seq(m)->swap.high));
8006}
8007
8008static ssize_t swap_high_write(struct kernfs_open_file *of,
8009			       char *buf, size_t nbytes, loff_t off)
8010{
8011	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
8012	unsigned long high;
8013	int err;
8014
8015	buf = strstrip(buf);
8016	err = page_counter_memparse(buf, "max", &high);
8017	if (err)
8018		return err;
8019
8020	page_counter_set_high(&memcg->swap, high);
8021
8022	return nbytes;
8023}
8024
8025static int swap_max_show(struct seq_file *m, void *v)
8026{
8027	return seq_puts_memcg_tunable(m,
8028		READ_ONCE(mem_cgroup_from_seq(m)->swap.max));
8029}
8030
8031static ssize_t swap_max_write(struct kernfs_open_file *of,
8032			      char *buf, size_t nbytes, loff_t off)
8033{
8034	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
8035	unsigned long max;
8036	int err;
8037
8038	buf = strstrip(buf);
8039	err = page_counter_memparse(buf, "max", &max);
8040	if (err)
8041		return err;
8042
8043	xchg(&memcg->swap.max, max);
8044
8045	return nbytes;
8046}
8047
8048static int swap_events_show(struct seq_file *m, void *v)
8049{
8050	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
8051
8052	seq_printf(m, "high %lu\n",
8053		   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_HIGH]));
8054	seq_printf(m, "max %lu\n",
8055		   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX]));
8056	seq_printf(m, "fail %lu\n",
8057		   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL]));
8058
8059	return 0;
8060}
8061
8062static struct cftype swap_files[] = {
8063	{
8064		.name = "swap.current",
8065		.flags = CFTYPE_NOT_ON_ROOT,
8066		.read_u64 = swap_current_read,
8067	},
8068	{
8069		.name = "swap.high",
8070		.flags = CFTYPE_NOT_ON_ROOT,
8071		.seq_show = swap_high_show,
8072		.write = swap_high_write,
8073	},
8074	{
8075		.name = "swap.max",
8076		.flags = CFTYPE_NOT_ON_ROOT,
8077		.seq_show = swap_max_show,
8078		.write = swap_max_write,
8079	},
8080	{
8081		.name = "swap.peak",
8082		.flags = CFTYPE_NOT_ON_ROOT,
8083		.read_u64 = swap_peak_read,
8084	},
8085	{
8086		.name = "swap.events",
8087		.flags = CFTYPE_NOT_ON_ROOT,
8088		.file_offset = offsetof(struct mem_cgroup, swap_events_file),
8089		.seq_show = swap_events_show,
8090	},
8091	{ }	/* terminate */
8092};
8093
8094static struct cftype memsw_files[] = {
8095	{
8096		.name = "memsw.usage_in_bytes",
8097		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
8098		.read_u64 = mem_cgroup_read_u64,
8099	},
8100	{
8101		.name = "memsw.max_usage_in_bytes",
8102		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
8103		.write = mem_cgroup_reset,
8104		.read_u64 = mem_cgroup_read_u64,
8105	},
8106	{
8107		.name = "memsw.limit_in_bytes",
8108		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
8109		.write = mem_cgroup_write,
8110		.read_u64 = mem_cgroup_read_u64,
8111	},
8112	{
8113		.name = "memsw.failcnt",
8114		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
8115		.write = mem_cgroup_reset,
8116		.read_u64 = mem_cgroup_read_u64,
8117	},
8118	{ },	/* terminate */
8119};
8120
8121#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
8122/**
8123 * obj_cgroup_may_zswap - check if this cgroup can zswap
8124 * @objcg: the object cgroup
8125 *
8126 * Check if the hierarchical zswap limit has been reached.
8127 *
8128 * This doesn't check for specific headroom, and it is not atomic
8129 * either. But with zswap, the size of the allocation is only known
8130 * once compression has occurred, and this optimistic pre-check avoids
8131 * spending cycles on compression when there is already no room left
8132 * or zswap is disabled altogether somewhere in the hierarchy.
8133 */
8134bool obj_cgroup_may_zswap(struct obj_cgroup *objcg)
8135{
8136	struct mem_cgroup *memcg, *original_memcg;
8137	bool ret = true;
8138
8139	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
8140		return true;
8141
8142	original_memcg = get_mem_cgroup_from_objcg(objcg);
8143	for (memcg = original_memcg; !mem_cgroup_is_root(memcg);
8144	     memcg = parent_mem_cgroup(memcg)) {
8145		unsigned long max = READ_ONCE(memcg->zswap_max);
8146		unsigned long pages;
8147
8148		if (max == PAGE_COUNTER_MAX)
8149			continue;
8150		if (max == 0) {
8151			ret = false;
8152			break;
8153		}
8154
8155		/*
8156		 * mem_cgroup_flush_stats() ignores small changes. Use
8157		 * do_flush_stats() directly to get accurate stats for charging.
8158		 */
8159		do_flush_stats(memcg);
8160		pages = memcg_page_state(memcg, MEMCG_ZSWAP_B) / PAGE_SIZE;
8161		if (pages < max)
8162			continue;
8163		ret = false;
8164		break;
8165	}
8166	mem_cgroup_put(original_memcg);
8167	return ret;
8168}
8169
8170/**
8171 * obj_cgroup_charge_zswap - charge compression backend memory
8172 * @objcg: the object cgroup
8173 * @size: size of compressed object
8174 *
8175 * This forces the charge after obj_cgroup_may_zswap() allowed
8176 * compression and storage in zwap for this cgroup to go ahead.
8177 */
8178void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size)
8179{
8180	struct mem_cgroup *memcg;
8181
8182	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
8183		return;
8184
8185	VM_WARN_ON_ONCE(!(current->flags & PF_MEMALLOC));
8186
8187	/* PF_MEMALLOC context, charging must succeed */
8188	if (obj_cgroup_charge(objcg, GFP_KERNEL, size))
8189		VM_WARN_ON_ONCE(1);
8190
8191	rcu_read_lock();
8192	memcg = obj_cgroup_memcg(objcg);
8193	mod_memcg_state(memcg, MEMCG_ZSWAP_B, size);
8194	mod_memcg_state(memcg, MEMCG_ZSWAPPED, 1);
8195	rcu_read_unlock();
8196}
8197
8198/**
8199 * obj_cgroup_uncharge_zswap - uncharge compression backend memory
8200 * @objcg: the object cgroup
8201 * @size: size of compressed object
8202 *
8203 * Uncharges zswap memory on page in.
8204 */
8205void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size)
8206{
8207	struct mem_cgroup *memcg;
8208
8209	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
8210		return;
8211
8212	obj_cgroup_uncharge(objcg, size);
8213
8214	rcu_read_lock();
8215	memcg = obj_cgroup_memcg(objcg);
8216	mod_memcg_state(memcg, MEMCG_ZSWAP_B, -size);
8217	mod_memcg_state(memcg, MEMCG_ZSWAPPED, -1);
8218	rcu_read_unlock();
8219}
8220
8221bool mem_cgroup_zswap_writeback_enabled(struct mem_cgroup *memcg)
8222{
8223	/* if zswap is disabled, do not block pages going to the swapping device */
8224	return !is_zswap_enabled() || !memcg || READ_ONCE(memcg->zswap_writeback);
8225}
8226
8227static u64 zswap_current_read(struct cgroup_subsys_state *css,
8228			      struct cftype *cft)
8229{
8230	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
8231
8232	mem_cgroup_flush_stats(memcg);
8233	return memcg_page_state(memcg, MEMCG_ZSWAP_B);
8234}
8235
8236static int zswap_max_show(struct seq_file *m, void *v)
8237{
8238	return seq_puts_memcg_tunable(m,
8239		READ_ONCE(mem_cgroup_from_seq(m)->zswap_max));
8240}
8241
8242static ssize_t zswap_max_write(struct kernfs_open_file *of,
8243			       char *buf, size_t nbytes, loff_t off)
8244{
8245	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
8246	unsigned long max;
8247	int err;
8248
8249	buf = strstrip(buf);
8250	err = page_counter_memparse(buf, "max", &max);
8251	if (err)
8252		return err;
8253
8254	xchg(&memcg->zswap_max, max);
8255
8256	return nbytes;
8257}
8258
8259static int zswap_writeback_show(struct seq_file *m, void *v)
8260{
8261	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
8262
8263	seq_printf(m, "%d\n", READ_ONCE(memcg->zswap_writeback));
8264	return 0;
8265}
8266
8267static ssize_t zswap_writeback_write(struct kernfs_open_file *of,
8268				char *buf, size_t nbytes, loff_t off)
8269{
8270	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
8271	int zswap_writeback;
8272	ssize_t parse_ret = kstrtoint(strstrip(buf), 0, &zswap_writeback);
8273
8274	if (parse_ret)
8275		return parse_ret;
8276
8277	if (zswap_writeback != 0 && zswap_writeback != 1)
8278		return -EINVAL;
8279
8280	WRITE_ONCE(memcg->zswap_writeback, zswap_writeback);
8281	return nbytes;
8282}
8283
8284static struct cftype zswap_files[] = {
8285	{
8286		.name = "zswap.current",
8287		.flags = CFTYPE_NOT_ON_ROOT,
8288		.read_u64 = zswap_current_read,
8289	},
8290	{
8291		.name = "zswap.max",
8292		.flags = CFTYPE_NOT_ON_ROOT,
8293		.seq_show = zswap_max_show,
8294		.write = zswap_max_write,
8295	},
8296	{
8297		.name = "zswap.writeback",
8298		.seq_show = zswap_writeback_show,
8299		.write = zswap_writeback_write,
8300	},
8301	{ }	/* terminate */
8302};
8303#endif /* CONFIG_MEMCG_KMEM && CONFIG_ZSWAP */
8304
8305static int __init mem_cgroup_swap_init(void)
8306{
8307	if (mem_cgroup_disabled())
8308		return 0;
8309
8310	WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, swap_files));
8311	WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, memsw_files));
8312#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
8313	WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, zswap_files));
8314#endif
8315	return 0;
8316}
8317subsys_initcall(mem_cgroup_swap_init);
8318
8319#endif /* CONFIG_SWAP */