Linux Audio

Check our new training course

Buildroot integration, development and maintenance

Need a Buildroot system for your embedded project?
Loading...
v4.17
 
   1/* memcontrol.c - Memory Controller
   2 *
   3 * Copyright IBM Corporation, 2007
   4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
   5 *
   6 * Copyright 2007 OpenVZ SWsoft Inc
   7 * Author: Pavel Emelianov <xemul@openvz.org>
   8 *
   9 * Memory thresholds
  10 * Copyright (C) 2009 Nokia Corporation
  11 * Author: Kirill A. Shutemov
  12 *
  13 * Kernel Memory Controller
  14 * Copyright (C) 2012 Parallels Inc. and Google Inc.
  15 * Authors: Glauber Costa and Suleiman Souhlal
  16 *
  17 * Native page reclaim
  18 * Charge lifetime sanitation
  19 * Lockless page tracking & accounting
  20 * Unified hierarchy configuration model
  21 * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner
  22 *
  23 * This program is free software; you can redistribute it and/or modify
  24 * it under the terms of the GNU General Public License as published by
  25 * the Free Software Foundation; either version 2 of the License, or
  26 * (at your option) any later version.
  27 *
  28 * This program is distributed in the hope that it will be useful,
  29 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  30 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  31 * GNU General Public License for more details.
  32 */
  33
  34#include <linux/page_counter.h>
  35#include <linux/memcontrol.h>
  36#include <linux/cgroup.h>
  37#include <linux/mm.h>
  38#include <linux/sched/mm.h>
  39#include <linux/shmem_fs.h>
  40#include <linux/hugetlb.h>
  41#include <linux/pagemap.h>
 
  42#include <linux/smp.h>
  43#include <linux/page-flags.h>
  44#include <linux/backing-dev.h>
  45#include <linux/bit_spinlock.h>
  46#include <linux/rcupdate.h>
  47#include <linux/limits.h>
  48#include <linux/export.h>
  49#include <linux/mutex.h>
  50#include <linux/rbtree.h>
  51#include <linux/slab.h>
  52#include <linux/swap.h>
  53#include <linux/swapops.h>
  54#include <linux/spinlock.h>
  55#include <linux/eventfd.h>
  56#include <linux/poll.h>
  57#include <linux/sort.h>
  58#include <linux/fs.h>
  59#include <linux/seq_file.h>
  60#include <linux/vmpressure.h>
  61#include <linux/mm_inline.h>
  62#include <linux/swap_cgroup.h>
  63#include <linux/cpu.h>
  64#include <linux/oom.h>
  65#include <linux/lockdep.h>
  66#include <linux/file.h>
  67#include <linux/tracehook.h>
 
 
  68#include "internal.h"
  69#include <net/sock.h>
  70#include <net/ip.h>
  71#include "slab.h"
  72
  73#include <linux/uaccess.h>
  74
  75#include <trace/events/vmscan.h>
  76
  77struct cgroup_subsys memory_cgrp_subsys __read_mostly;
  78EXPORT_SYMBOL(memory_cgrp_subsys);
  79
  80struct mem_cgroup *root_mem_cgroup __read_mostly;
  81
  82#define MEM_CGROUP_RECLAIM_RETRIES	5
  83
  84/* Socket memory accounting disabled? */
  85static bool cgroup_memory_nosocket;
  86
  87/* Kernel memory accounting disabled? */
  88static bool cgroup_memory_nokmem;
  89
  90/* Whether the swap controller is active */
  91#ifdef CONFIG_MEMCG_SWAP
  92int do_swap_account __read_mostly;
  93#else
  94#define do_swap_account		0
  95#endif
  96
 
 
 
 
  97/* Whether legacy memory+swap accounting is active */
  98static bool do_memsw_account(void)
  99{
 100	return !cgroup_subsys_on_dfl(memory_cgrp_subsys) && do_swap_account;
 101}
 102
 103static const char *const mem_cgroup_lru_names[] = {
 104	"inactive_anon",
 105	"active_anon",
 106	"inactive_file",
 107	"active_file",
 108	"unevictable",
 109};
 110
 111#define THRESHOLDS_EVENTS_TARGET 128
 112#define SOFTLIMIT_EVENTS_TARGET 1024
 113#define NUMAINFO_EVENTS_TARGET	1024
 114
 115/*
 116 * Cgroups above their limits are maintained in a RB-Tree, independent of
 117 * their hierarchy representation
 118 */
 119
 120struct mem_cgroup_tree_per_node {
 121	struct rb_root rb_root;
 122	struct rb_node *rb_rightmost;
 123	spinlock_t lock;
 124};
 125
 126struct mem_cgroup_tree {
 127	struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
 128};
 129
 130static struct mem_cgroup_tree soft_limit_tree __read_mostly;
 131
 132/* for OOM */
 133struct mem_cgroup_eventfd_list {
 134	struct list_head list;
 135	struct eventfd_ctx *eventfd;
 136};
 137
 138/*
 139 * cgroup_event represents events which userspace want to receive.
 140 */
 141struct mem_cgroup_event {
 142	/*
 143	 * memcg which the event belongs to.
 144	 */
 145	struct mem_cgroup *memcg;
 146	/*
 147	 * eventfd to signal userspace about the event.
 148	 */
 149	struct eventfd_ctx *eventfd;
 150	/*
 151	 * Each of these stored in a list by the cgroup.
 152	 */
 153	struct list_head list;
 154	/*
 155	 * register_event() callback will be used to add new userspace
 156	 * waiter for changes related to this event.  Use eventfd_signal()
 157	 * on eventfd to send notification to userspace.
 158	 */
 159	int (*register_event)(struct mem_cgroup *memcg,
 160			      struct eventfd_ctx *eventfd, const char *args);
 161	/*
 162	 * unregister_event() callback will be called when userspace closes
 163	 * the eventfd or on cgroup removing.  This callback must be set,
 164	 * if you want provide notification functionality.
 165	 */
 166	void (*unregister_event)(struct mem_cgroup *memcg,
 167				 struct eventfd_ctx *eventfd);
 168	/*
 169	 * All fields below needed to unregister event when
 170	 * userspace closes eventfd.
 171	 */
 172	poll_table pt;
 173	wait_queue_head_t *wqh;
 174	wait_queue_entry_t wait;
 175	struct work_struct remove;
 176};
 177
 178static void mem_cgroup_threshold(struct mem_cgroup *memcg);
 179static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
 180
 181/* Stuffs for move charges at task migration. */
 182/*
 183 * Types of charges to be moved.
 184 */
 185#define MOVE_ANON	0x1U
 186#define MOVE_FILE	0x2U
 187#define MOVE_MASK	(MOVE_ANON | MOVE_FILE)
 188
 189/* "mc" and its members are protected by cgroup_mutex */
 190static struct move_charge_struct {
 191	spinlock_t	  lock; /* for from, to */
 192	struct mm_struct  *mm;
 193	struct mem_cgroup *from;
 194	struct mem_cgroup *to;
 195	unsigned long flags;
 196	unsigned long precharge;
 197	unsigned long moved_charge;
 198	unsigned long moved_swap;
 199	struct task_struct *moving_task;	/* a task moving charges */
 200	wait_queue_head_t waitq;		/* a waitq for other context */
 201} mc = {
 202	.lock = __SPIN_LOCK_UNLOCKED(mc.lock),
 203	.waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
 204};
 205
 206/*
 207 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
 208 * limit reclaim to prevent infinite loops, if they ever occur.
 209 */
 210#define	MEM_CGROUP_MAX_RECLAIM_LOOPS		100
 211#define	MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS	2
 212
 213enum charge_type {
 214	MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
 215	MEM_CGROUP_CHARGE_TYPE_ANON,
 216	MEM_CGROUP_CHARGE_TYPE_SWAPOUT,	/* for accounting swapcache */
 217	MEM_CGROUP_CHARGE_TYPE_DROP,	/* a page was unused swap cache */
 218	NR_CHARGE_TYPE,
 219};
 220
 221/* for encoding cft->private value on file */
 222enum res_type {
 223	_MEM,
 224	_MEMSWAP,
 225	_OOM_TYPE,
 226	_KMEM,
 227	_TCP,
 228};
 229
 230#define MEMFILE_PRIVATE(x, val)	((x) << 16 | (val))
 231#define MEMFILE_TYPE(val)	((val) >> 16 & 0xffff)
 232#define MEMFILE_ATTR(val)	((val) & 0xffff)
 233/* Used for OOM nofiier */
 234#define OOM_CONTROL		(0)
 235
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 236/* Some nice accessors for the vmpressure. */
 237struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
 238{
 239	if (!memcg)
 240		memcg = root_mem_cgroup;
 241	return &memcg->vmpressure;
 242}
 243
 244struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr)
 245{
 246	return &container_of(vmpr, struct mem_cgroup, vmpressure)->css;
 247}
 248
 249static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
 250{
 251	return (memcg == root_mem_cgroup);
 252}
 253
 254#ifndef CONFIG_SLOB
 255/*
 256 * This will be the memcg's index in each cache's ->memcg_params.memcg_caches.
 257 * The main reason for not using cgroup id for this:
 258 *  this works better in sparse environments, where we have a lot of memcgs,
 259 *  but only a few kmem-limited. Or also, if we have, for instance, 200
 260 *  memcgs, and none but the 200th is kmem-limited, we'd have to have a
 261 *  200 entry array for that.
 262 *
 263 * The current size of the caches array is stored in memcg_nr_cache_ids. It
 264 * will double each time we have to increase it.
 265 */
 266static DEFINE_IDA(memcg_cache_ida);
 267int memcg_nr_cache_ids;
 268
 269/* Protects memcg_nr_cache_ids */
 270static DECLARE_RWSEM(memcg_cache_ids_sem);
 271
 272void memcg_get_cache_ids(void)
 273{
 274	down_read(&memcg_cache_ids_sem);
 275}
 276
 277void memcg_put_cache_ids(void)
 278{
 279	up_read(&memcg_cache_ids_sem);
 280}
 281
 282/*
 283 * MIN_SIZE is different than 1, because we would like to avoid going through
 284 * the alloc/free process all the time. In a small machine, 4 kmem-limited
 285 * cgroups is a reasonable guess. In the future, it could be a parameter or
 286 * tunable, but that is strictly not necessary.
 287 *
 288 * MAX_SIZE should be as large as the number of cgrp_ids. Ideally, we could get
 289 * this constant directly from cgroup, but it is understandable that this is
 290 * better kept as an internal representation in cgroup.c. In any case, the
 291 * cgrp_id space is not getting any smaller, and we don't have to necessarily
 292 * increase ours as well if it increases.
 293 */
 294#define MEMCG_CACHES_MIN_SIZE 4
 295#define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX
 296
 297/*
 298 * A lot of the calls to the cache allocation functions are expected to be
 299 * inlined by the compiler. Since the calls to memcg_kmem_get_cache are
 300 * conditional to this static branch, we'll have to allow modules that does
 301 * kmem_cache_alloc and the such to see this symbol as well
 302 */
 303DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key);
 304EXPORT_SYMBOL(memcg_kmem_enabled_key);
 305
 306struct workqueue_struct *memcg_kmem_cache_wq;
 
 
 
 
 307
 308#endif /* !CONFIG_SLOB */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 309
 310/**
 311 * mem_cgroup_css_from_page - css of the memcg associated with a page
 312 * @page: page of interest
 313 *
 314 * If memcg is bound to the default hierarchy, css of the memcg associated
 315 * with @page is returned.  The returned css remains associated with @page
 316 * until it is released.
 317 *
 318 * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup
 319 * is returned.
 320 */
 321struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page)
 322{
 323	struct mem_cgroup *memcg;
 324
 325	memcg = page->mem_cgroup;
 326
 327	if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
 328		memcg = root_mem_cgroup;
 329
 330	return &memcg->css;
 331}
 332
 333/**
 334 * page_cgroup_ino - return inode number of the memcg a page is charged to
 335 * @page: the page
 336 *
 337 * Look up the closest online ancestor of the memory cgroup @page is charged to
 338 * and return its inode number or 0 if @page is not charged to any cgroup. It
 339 * is safe to call this function without holding a reference to @page.
 340 *
 341 * Note, this function is inherently racy, because there is nothing to prevent
 342 * the cgroup inode from getting torn down and potentially reallocated a moment
 343 * after page_cgroup_ino() returns, so it only should be used by callers that
 344 * do not care (such as procfs interfaces).
 345 */
 346ino_t page_cgroup_ino(struct page *page)
 347{
 348	struct mem_cgroup *memcg;
 349	unsigned long ino = 0;
 350
 351	rcu_read_lock();
 352	memcg = READ_ONCE(page->mem_cgroup);
 
 
 
 353	while (memcg && !(memcg->css.flags & CSS_ONLINE))
 354		memcg = parent_mem_cgroup(memcg);
 355	if (memcg)
 356		ino = cgroup_ino(memcg->css.cgroup);
 357	rcu_read_unlock();
 358	return ino;
 359}
 360
 361static struct mem_cgroup_per_node *
 362mem_cgroup_page_nodeinfo(struct mem_cgroup *memcg, struct page *page)
 363{
 364	int nid = page_to_nid(page);
 365
 366	return memcg->nodeinfo[nid];
 367}
 368
 369static struct mem_cgroup_tree_per_node *
 370soft_limit_tree_node(int nid)
 371{
 372	return soft_limit_tree.rb_tree_per_node[nid];
 373}
 374
 375static struct mem_cgroup_tree_per_node *
 376soft_limit_tree_from_page(struct page *page)
 377{
 378	int nid = page_to_nid(page);
 379
 380	return soft_limit_tree.rb_tree_per_node[nid];
 381}
 382
 383static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_node *mz,
 384					 struct mem_cgroup_tree_per_node *mctz,
 385					 unsigned long new_usage_in_excess)
 386{
 387	struct rb_node **p = &mctz->rb_root.rb_node;
 388	struct rb_node *parent = NULL;
 389	struct mem_cgroup_per_node *mz_node;
 390	bool rightmost = true;
 391
 392	if (mz->on_tree)
 393		return;
 394
 395	mz->usage_in_excess = new_usage_in_excess;
 396	if (!mz->usage_in_excess)
 397		return;
 398	while (*p) {
 399		parent = *p;
 400		mz_node = rb_entry(parent, struct mem_cgroup_per_node,
 401					tree_node);
 402		if (mz->usage_in_excess < mz_node->usage_in_excess) {
 403			p = &(*p)->rb_left;
 404			rightmost = false;
 405		}
 406
 407		/*
 408		 * We can't avoid mem cgroups that are over their soft
 409		 * limit by the same amount
 410		 */
 411		else if (mz->usage_in_excess >= mz_node->usage_in_excess)
 412			p = &(*p)->rb_right;
 413	}
 414
 415	if (rightmost)
 416		mctz->rb_rightmost = &mz->tree_node;
 417
 418	rb_link_node(&mz->tree_node, parent, p);
 419	rb_insert_color(&mz->tree_node, &mctz->rb_root);
 420	mz->on_tree = true;
 421}
 422
 423static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
 424					 struct mem_cgroup_tree_per_node *mctz)
 425{
 426	if (!mz->on_tree)
 427		return;
 428
 429	if (&mz->tree_node == mctz->rb_rightmost)
 430		mctz->rb_rightmost = rb_prev(&mz->tree_node);
 431
 432	rb_erase(&mz->tree_node, &mctz->rb_root);
 433	mz->on_tree = false;
 434}
 435
 436static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
 437				       struct mem_cgroup_tree_per_node *mctz)
 438{
 439	unsigned long flags;
 440
 441	spin_lock_irqsave(&mctz->lock, flags);
 442	__mem_cgroup_remove_exceeded(mz, mctz);
 443	spin_unlock_irqrestore(&mctz->lock, flags);
 444}
 445
 446static unsigned long soft_limit_excess(struct mem_cgroup *memcg)
 447{
 448	unsigned long nr_pages = page_counter_read(&memcg->memory);
 449	unsigned long soft_limit = READ_ONCE(memcg->soft_limit);
 450	unsigned long excess = 0;
 451
 452	if (nr_pages > soft_limit)
 453		excess = nr_pages - soft_limit;
 454
 455	return excess;
 456}
 457
 458static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
 459{
 460	unsigned long excess;
 461	struct mem_cgroup_per_node *mz;
 462	struct mem_cgroup_tree_per_node *mctz;
 463
 464	mctz = soft_limit_tree_from_page(page);
 465	if (!mctz)
 466		return;
 467	/*
 468	 * Necessary to update all ancestors when hierarchy is used.
 469	 * because their event counter is not touched.
 470	 */
 471	for (; memcg; memcg = parent_mem_cgroup(memcg)) {
 472		mz = mem_cgroup_page_nodeinfo(memcg, page);
 473		excess = soft_limit_excess(memcg);
 474		/*
 475		 * We have to update the tree if mz is on RB-tree or
 476		 * mem is over its softlimit.
 477		 */
 478		if (excess || mz->on_tree) {
 479			unsigned long flags;
 480
 481			spin_lock_irqsave(&mctz->lock, flags);
 482			/* if on-tree, remove it */
 483			if (mz->on_tree)
 484				__mem_cgroup_remove_exceeded(mz, mctz);
 485			/*
 486			 * Insert again. mz->usage_in_excess will be updated.
 487			 * If excess is 0, no tree ops.
 488			 */
 489			__mem_cgroup_insert_exceeded(mz, mctz, excess);
 490			spin_unlock_irqrestore(&mctz->lock, flags);
 491		}
 492	}
 493}
 494
 495static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
 496{
 497	struct mem_cgroup_tree_per_node *mctz;
 498	struct mem_cgroup_per_node *mz;
 499	int nid;
 500
 501	for_each_node(nid) {
 502		mz = mem_cgroup_nodeinfo(memcg, nid);
 503		mctz = soft_limit_tree_node(nid);
 504		if (mctz)
 505			mem_cgroup_remove_exceeded(mz, mctz);
 506	}
 507}
 508
 509static struct mem_cgroup_per_node *
 510__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
 511{
 512	struct mem_cgroup_per_node *mz;
 513
 514retry:
 515	mz = NULL;
 516	if (!mctz->rb_rightmost)
 517		goto done;		/* Nothing to reclaim from */
 518
 519	mz = rb_entry(mctz->rb_rightmost,
 520		      struct mem_cgroup_per_node, tree_node);
 521	/*
 522	 * Remove the node now but someone else can add it back,
 523	 * we will to add it back at the end of reclaim to its correct
 524	 * position in the tree.
 525	 */
 526	__mem_cgroup_remove_exceeded(mz, mctz);
 527	if (!soft_limit_excess(mz->memcg) ||
 528	    !css_tryget_online(&mz->memcg->css))
 529		goto retry;
 530done:
 531	return mz;
 532}
 533
 534static struct mem_cgroup_per_node *
 535mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
 536{
 537	struct mem_cgroup_per_node *mz;
 538
 539	spin_lock_irq(&mctz->lock);
 540	mz = __mem_cgroup_largest_soft_limit_node(mctz);
 541	spin_unlock_irq(&mctz->lock);
 542	return mz;
 543}
 544
 545static unsigned long memcg_sum_events(struct mem_cgroup *memcg,
 546				      int event)
 
 
 
 
 
 547{
 548	return atomic_long_read(&memcg->events[event]);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 549}
 550
 551static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
 552					 struct page *page,
 553					 bool compound, int nr_pages)
 554{
 555	/*
 556	 * Here, RSS means 'mapped anon' and anon's SwapCache. Shmem/tmpfs is
 557	 * counted as CACHE even if it's on ANON LRU.
 558	 */
 559	if (PageAnon(page))
 560		__mod_memcg_state(memcg, MEMCG_RSS, nr_pages);
 561	else {
 562		__mod_memcg_state(memcg, MEMCG_CACHE, nr_pages);
 563		if (PageSwapBacked(page))
 564			__mod_memcg_state(memcg, NR_SHMEM, nr_pages);
 565	}
 566
 567	if (compound) {
 568		VM_BUG_ON_PAGE(!PageTransHuge(page), page);
 569		__mod_memcg_state(memcg, MEMCG_RSS_HUGE, nr_pages);
 570	}
 571
 572	/* pagein of a big page is an event. So, ignore page size */
 573	if (nr_pages > 0)
 574		__count_memcg_events(memcg, PGPGIN, 1);
 575	else {
 576		__count_memcg_events(memcg, PGPGOUT, 1);
 577		nr_pages = -nr_pages; /* for event */
 578	}
 579
 580	__this_cpu_add(memcg->stat_cpu->nr_page_events, nr_pages);
 581}
 582
 583unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
 584					   int nid, unsigned int lru_mask)
 585{
 586	struct lruvec *lruvec = mem_cgroup_lruvec(NODE_DATA(nid), memcg);
 587	unsigned long nr = 0;
 588	enum lru_list lru;
 589
 590	VM_BUG_ON((unsigned)nid >= nr_node_ids);
 591
 592	for_each_lru(lru) {
 593		if (!(BIT(lru) & lru_mask))
 594			continue;
 595		nr += mem_cgroup_get_lru_size(lruvec, lru);
 596	}
 597	return nr;
 598}
 599
 600static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
 601			unsigned int lru_mask)
 602{
 603	unsigned long nr = 0;
 604	int nid;
 605
 606	for_each_node_state(nid, N_MEMORY)
 607		nr += mem_cgroup_node_nr_lru_pages(memcg, nid, lru_mask);
 608	return nr;
 609}
 610
 611static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
 612				       enum mem_cgroup_events_target target)
 613{
 614	unsigned long val, next;
 615
 616	val = __this_cpu_read(memcg->stat_cpu->nr_page_events);
 617	next = __this_cpu_read(memcg->stat_cpu->targets[target]);
 618	/* from time_after() in jiffies.h */
 619	if ((long)(next - val) < 0) {
 620		switch (target) {
 621		case MEM_CGROUP_TARGET_THRESH:
 622			next = val + THRESHOLDS_EVENTS_TARGET;
 623			break;
 624		case MEM_CGROUP_TARGET_SOFTLIMIT:
 625			next = val + SOFTLIMIT_EVENTS_TARGET;
 626			break;
 627		case MEM_CGROUP_TARGET_NUMAINFO:
 628			next = val + NUMAINFO_EVENTS_TARGET;
 629			break;
 630		default:
 631			break;
 632		}
 633		__this_cpu_write(memcg->stat_cpu->targets[target], next);
 634		return true;
 635	}
 636	return false;
 637}
 638
 639/*
 640 * Check events in order.
 641 *
 642 */
 643static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
 644{
 645	/* threshold event is triggered in finer grain than soft limit */
 646	if (unlikely(mem_cgroup_event_ratelimit(memcg,
 647						MEM_CGROUP_TARGET_THRESH))) {
 648		bool do_softlimit;
 649		bool do_numainfo __maybe_unused;
 650
 651		do_softlimit = mem_cgroup_event_ratelimit(memcg,
 652						MEM_CGROUP_TARGET_SOFTLIMIT);
 653#if MAX_NUMNODES > 1
 654		do_numainfo = mem_cgroup_event_ratelimit(memcg,
 655						MEM_CGROUP_TARGET_NUMAINFO);
 656#endif
 657		mem_cgroup_threshold(memcg);
 658		if (unlikely(do_softlimit))
 659			mem_cgroup_update_tree(memcg, page);
 660#if MAX_NUMNODES > 1
 661		if (unlikely(do_numainfo))
 662			atomic_inc(&memcg->numainfo_events);
 663#endif
 664	}
 665}
 666
 667struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
 668{
 669	/*
 670	 * mm_update_next_owner() may clear mm->owner to NULL
 671	 * if it races with swapoff, page migration, etc.
 672	 * So this can be called with p == NULL.
 673	 */
 674	if (unlikely(!p))
 675		return NULL;
 676
 677	return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
 678}
 679EXPORT_SYMBOL(mem_cgroup_from_task);
 680
 681static struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
 
 
 
 
 
 
 
 
 682{
 683	struct mem_cgroup *memcg = NULL;
 
 
 
 684
 685	rcu_read_lock();
 686	do {
 687		/*
 688		 * Page cache insertions can happen withou an
 689		 * actual mm context, e.g. during disk probing
 690		 * on boot, loopback IO, acct() writes etc.
 691		 */
 692		if (unlikely(!mm))
 693			memcg = root_mem_cgroup;
 694		else {
 695			memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
 696			if (unlikely(!memcg))
 697				memcg = root_mem_cgroup;
 698		}
 699	} while (!css_tryget_online(&memcg->css));
 700	rcu_read_unlock();
 701	return memcg;
 702}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 703
 704/**
 705 * mem_cgroup_iter - iterate over memory cgroup hierarchy
 706 * @root: hierarchy root
 707 * @prev: previously returned memcg, NULL on first invocation
 708 * @reclaim: cookie for shared reclaim walks, NULL for full walks
 709 *
 710 * Returns references to children of the hierarchy below @root, or
 711 * @root itself, or %NULL after a full round-trip.
 712 *
 713 * Caller must pass the return value in @prev on subsequent
 714 * invocations for reference counting, or use mem_cgroup_iter_break()
 715 * to cancel a hierarchy walk before the round-trip is complete.
 716 *
 717 * Reclaimers can specify a node and a priority level in @reclaim to
 718 * divide up the memcgs in the hierarchy among all concurrent
 719 * reclaimers operating on the same node and priority.
 720 */
 721struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
 722				   struct mem_cgroup *prev,
 723				   struct mem_cgroup_reclaim_cookie *reclaim)
 724{
 725	struct mem_cgroup_reclaim_iter *uninitialized_var(iter);
 726	struct cgroup_subsys_state *css = NULL;
 727	struct mem_cgroup *memcg = NULL;
 728	struct mem_cgroup *pos = NULL;
 729
 730	if (mem_cgroup_disabled())
 731		return NULL;
 732
 733	if (!root)
 734		root = root_mem_cgroup;
 735
 736	if (prev && !reclaim)
 737		pos = prev;
 738
 739	if (!root->use_hierarchy && root != root_mem_cgroup) {
 740		if (prev)
 741			goto out;
 742		return root;
 743	}
 744
 745	rcu_read_lock();
 746
 747	if (reclaim) {
 748		struct mem_cgroup_per_node *mz;
 749
 750		mz = mem_cgroup_nodeinfo(root, reclaim->pgdat->node_id);
 751		iter = &mz->iter[reclaim->priority];
 752
 753		if (prev && reclaim->generation != iter->generation)
 754			goto out_unlock;
 755
 756		while (1) {
 757			pos = READ_ONCE(iter->position);
 758			if (!pos || css_tryget(&pos->css))
 759				break;
 760			/*
 761			 * css reference reached zero, so iter->position will
 762			 * be cleared by ->css_released. However, we should not
 763			 * rely on this happening soon, because ->css_released
 764			 * is called from a work queue, and by busy-waiting we
 765			 * might block it. So we clear iter->position right
 766			 * away.
 767			 */
 768			(void)cmpxchg(&iter->position, pos, NULL);
 769		}
 770	}
 771
 772	if (pos)
 773		css = &pos->css;
 774
 775	for (;;) {
 776		css = css_next_descendant_pre(css, &root->css);
 777		if (!css) {
 778			/*
 779			 * Reclaimers share the hierarchy walk, and a
 780			 * new one might jump in right at the end of
 781			 * the hierarchy - make sure they see at least
 782			 * one group and restart from the beginning.
 783			 */
 784			if (!prev)
 785				continue;
 786			break;
 787		}
 788
 789		/*
 790		 * Verify the css and acquire a reference.  The root
 791		 * is provided by the caller, so we know it's alive
 792		 * and kicking, and don't take an extra reference.
 793		 */
 794		memcg = mem_cgroup_from_css(css);
 795
 796		if (css == &root->css)
 797			break;
 798
 799		if (css_tryget(css))
 800			break;
 801
 802		memcg = NULL;
 803	}
 804
 805	if (reclaim) {
 806		/*
 807		 * The position could have already been updated by a competing
 808		 * thread, so check that the value hasn't changed since we read
 809		 * it to avoid reclaiming from the same cgroup twice.
 810		 */
 811		(void)cmpxchg(&iter->position, pos, memcg);
 812
 813		if (pos)
 814			css_put(&pos->css);
 815
 816		if (!memcg)
 817			iter->generation++;
 818		else if (!prev)
 819			reclaim->generation = iter->generation;
 820	}
 821
 822out_unlock:
 823	rcu_read_unlock();
 824out:
 825	if (prev && prev != root)
 826		css_put(&prev->css);
 827
 828	return memcg;
 829}
 830
 831/**
 832 * mem_cgroup_iter_break - abort a hierarchy walk prematurely
 833 * @root: hierarchy root
 834 * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
 835 */
 836void mem_cgroup_iter_break(struct mem_cgroup *root,
 837			   struct mem_cgroup *prev)
 838{
 839	if (!root)
 840		root = root_mem_cgroup;
 841	if (prev && prev != root)
 842		css_put(&prev->css);
 843}
 844
 845static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
 
 846{
 847	struct mem_cgroup *memcg = dead_memcg;
 848	struct mem_cgroup_reclaim_iter *iter;
 849	struct mem_cgroup_per_node *mz;
 850	int nid;
 851	int i;
 852
 853	while ((memcg = parent_mem_cgroup(memcg))) {
 854		for_each_node(nid) {
 855			mz = mem_cgroup_nodeinfo(memcg, nid);
 856			for (i = 0; i <= DEF_PRIORITY; i++) {
 857				iter = &mz->iter[i];
 858				cmpxchg(&iter->position,
 859					dead_memcg, NULL);
 860			}
 861		}
 862	}
 863}
 864
 865/*
 866 * Iteration constructs for visiting all cgroups (under a tree).  If
 867 * loops are exited prematurely (break), mem_cgroup_iter_break() must
 868 * be used for reference counting.
 869 */
 870#define for_each_mem_cgroup_tree(iter, root)		\
 871	for (iter = mem_cgroup_iter(root, NULL, NULL);	\
 872	     iter != NULL;				\
 873	     iter = mem_cgroup_iter(root, iter, NULL))
 874
 875#define for_each_mem_cgroup(iter)			\
 876	for (iter = mem_cgroup_iter(NULL, NULL, NULL);	\
 877	     iter != NULL;				\
 878	     iter = mem_cgroup_iter(NULL, iter, NULL))
 
 
 
 
 
 
 
 
 
 
 
 879
 880/**
 881 * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
 882 * @memcg: hierarchy root
 883 * @fn: function to call for each task
 884 * @arg: argument passed to @fn
 885 *
 886 * This function iterates over tasks attached to @memcg or to any of its
 887 * descendants and calls @fn for each task. If @fn returns a non-zero
 888 * value, the function breaks the iteration loop and returns the value.
 889 * Otherwise, it will iterate over all tasks and return 0.
 890 *
 891 * This function must not be called for the root memory cgroup.
 892 */
 893int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
 894			  int (*fn)(struct task_struct *, void *), void *arg)
 895{
 896	struct mem_cgroup *iter;
 897	int ret = 0;
 898
 899	BUG_ON(memcg == root_mem_cgroup);
 900
 901	for_each_mem_cgroup_tree(iter, memcg) {
 902		struct css_task_iter it;
 903		struct task_struct *task;
 904
 905		css_task_iter_start(&iter->css, 0, &it);
 906		while (!ret && (task = css_task_iter_next(&it)))
 907			ret = fn(task, arg);
 908		css_task_iter_end(&it);
 909		if (ret) {
 910			mem_cgroup_iter_break(memcg, iter);
 911			break;
 912		}
 913	}
 914	return ret;
 915}
 916
 917/**
 918 * mem_cgroup_page_lruvec - return lruvec for isolating/putting an LRU page
 919 * @page: the page
 920 * @pgdat: pgdat of the page
 921 *
 922 * This function is only safe when following the LRU page isolation
 923 * and putback protocol: the LRU lock must be held, and the page must
 924 * either be PageLRU() or the caller must have isolated/allocated it.
 925 */
 926struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct pglist_data *pgdat)
 927{
 928	struct mem_cgroup_per_node *mz;
 929	struct mem_cgroup *memcg;
 930	struct lruvec *lruvec;
 931
 932	if (mem_cgroup_disabled()) {
 933		lruvec = &pgdat->lruvec;
 934		goto out;
 935	}
 936
 937	memcg = page->mem_cgroup;
 938	/*
 939	 * Swapcache readahead pages are added to the LRU - and
 940	 * possibly migrated - before they are charged.
 941	 */
 942	if (!memcg)
 943		memcg = root_mem_cgroup;
 944
 945	mz = mem_cgroup_page_nodeinfo(memcg, page);
 946	lruvec = &mz->lruvec;
 947out:
 948	/*
 949	 * Since a node can be onlined after the mem_cgroup was created,
 950	 * we have to be prepared to initialize lruvec->zone here;
 951	 * and if offlined then reonlined, we need to reinitialize it.
 952	 */
 953	if (unlikely(lruvec->pgdat != pgdat))
 954		lruvec->pgdat = pgdat;
 955	return lruvec;
 956}
 957
 958/**
 959 * mem_cgroup_update_lru_size - account for adding or removing an lru page
 960 * @lruvec: mem_cgroup per zone lru vector
 961 * @lru: index of lru list the page is sitting on
 962 * @zid: zone id of the accounted pages
 963 * @nr_pages: positive when adding or negative when removing
 964 *
 965 * This function must be called under lru_lock, just before a page is added
 966 * to or just after a page is removed from an lru list (that ordering being
 967 * so as to allow it to check that lru_size 0 is consistent with list_empty).
 968 */
 969void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
 970				int zid, int nr_pages)
 971{
 972	struct mem_cgroup_per_node *mz;
 973	unsigned long *lru_size;
 974	long size;
 975
 976	if (mem_cgroup_disabled())
 977		return;
 978
 979	mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
 980	lru_size = &mz->lru_zone_size[zid][lru];
 981
 982	if (nr_pages < 0)
 983		*lru_size += nr_pages;
 984
 985	size = *lru_size;
 986	if (WARN_ONCE(size < 0,
 987		"%s(%p, %d, %d): lru_size %ld\n",
 988		__func__, lruvec, lru, nr_pages, size)) {
 989		VM_BUG_ON(1);
 990		*lru_size = 0;
 991	}
 992
 993	if (nr_pages > 0)
 994		*lru_size += nr_pages;
 995}
 996
 997bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg)
 998{
 999	struct mem_cgroup *task_memcg;
1000	struct task_struct *p;
1001	bool ret;
1002
1003	p = find_lock_task_mm(task);
1004	if (p) {
1005		task_memcg = get_mem_cgroup_from_mm(p->mm);
1006		task_unlock(p);
1007	} else {
1008		/*
1009		 * All threads may have already detached their mm's, but the oom
1010		 * killer still needs to detect if they have already been oom
1011		 * killed to prevent needlessly killing additional tasks.
1012		 */
1013		rcu_read_lock();
1014		task_memcg = mem_cgroup_from_task(task);
1015		css_get(&task_memcg->css);
1016		rcu_read_unlock();
1017	}
1018	ret = mem_cgroup_is_descendant(task_memcg, memcg);
1019	css_put(&task_memcg->css);
1020	return ret;
1021}
1022
1023/**
1024 * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1025 * @memcg: the memory cgroup
1026 *
1027 * Returns the maximum amount of memory @mem can be charged with, in
1028 * pages.
1029 */
1030static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
1031{
1032	unsigned long margin = 0;
1033	unsigned long count;
1034	unsigned long limit;
1035
1036	count = page_counter_read(&memcg->memory);
1037	limit = READ_ONCE(memcg->memory.limit);
1038	if (count < limit)
1039		margin = limit - count;
1040
1041	if (do_memsw_account()) {
1042		count = page_counter_read(&memcg->memsw);
1043		limit = READ_ONCE(memcg->memsw.limit);
1044		if (count <= limit)
1045			margin = min(margin, limit - count);
1046		else
1047			margin = 0;
1048	}
1049
1050	return margin;
1051}
1052
1053/*
1054 * A routine for checking "mem" is under move_account() or not.
1055 *
1056 * Checking a cgroup is mc.from or mc.to or under hierarchy of
1057 * moving cgroups. This is for waiting at high-memory pressure
1058 * caused by "move".
1059 */
1060static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
1061{
1062	struct mem_cgroup *from;
1063	struct mem_cgroup *to;
1064	bool ret = false;
1065	/*
1066	 * Unlike task_move routines, we access mc.to, mc.from not under
1067	 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
1068	 */
1069	spin_lock(&mc.lock);
1070	from = mc.from;
1071	to = mc.to;
1072	if (!from)
1073		goto unlock;
1074
1075	ret = mem_cgroup_is_descendant(from, memcg) ||
1076		mem_cgroup_is_descendant(to, memcg);
1077unlock:
1078	spin_unlock(&mc.lock);
1079	return ret;
1080}
1081
1082static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
1083{
1084	if (mc.moving_task && current != mc.moving_task) {
1085		if (mem_cgroup_under_move(memcg)) {
1086			DEFINE_WAIT(wait);
1087			prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1088			/* moving charge context might have finished. */
1089			if (mc.moving_task)
1090				schedule();
1091			finish_wait(&mc.waitq, &wait);
1092			return true;
1093		}
1094	}
1095	return false;
1096}
1097
1098static const unsigned int memcg1_stats[] = {
1099	MEMCG_CACHE,
1100	MEMCG_RSS,
1101	MEMCG_RSS_HUGE,
1102	NR_SHMEM,
1103	NR_FILE_MAPPED,
1104	NR_FILE_DIRTY,
1105	NR_WRITEBACK,
1106	MEMCG_SWAP,
1107};
1108
1109static const char *const memcg1_stat_names[] = {
1110	"cache",
1111	"rss",
1112	"rss_huge",
1113	"shmem",
1114	"mapped_file",
1115	"dirty",
1116	"writeback",
1117	"swap",
1118};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1119
1120#define K(x) ((x) << (PAGE_SHIFT-10))
1121/**
1122 * mem_cgroup_print_oom_info: Print OOM information relevant to memory controller.
 
1123 * @memcg: The memory cgroup that went over limit
1124 * @p: Task that is going to be killed
1125 *
1126 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1127 * enabled
1128 */
1129void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
1130{
1131	struct mem_cgroup *iter;
1132	unsigned int i;
1133
1134	rcu_read_lock();
1135
 
 
 
 
 
1136	if (p) {
1137		pr_info("Task in ");
1138		pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
1139		pr_cont(" killed as a result of limit of ");
1140	} else {
1141		pr_info("Memory limit reached of cgroup ");
1142	}
1143
1144	pr_cont_cgroup_path(memcg->css.cgroup);
1145	pr_cont("\n");
1146
1147	rcu_read_unlock();
 
 
 
 
 
 
 
 
 
 
1148
1149	pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
1150		K((u64)page_counter_read(&memcg->memory)),
1151		K((u64)memcg->memory.limit), memcg->memory.failcnt);
1152	pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
1153		K((u64)page_counter_read(&memcg->memsw)),
1154		K((u64)memcg->memsw.limit), memcg->memsw.failcnt);
1155	pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
1156		K((u64)page_counter_read(&memcg->kmem)),
1157		K((u64)memcg->kmem.limit), memcg->kmem.failcnt);
1158
1159	for_each_mem_cgroup_tree(iter, memcg) {
1160		pr_info("Memory cgroup stats for ");
1161		pr_cont_cgroup_path(iter->css.cgroup);
1162		pr_cont(":");
1163
1164		for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
1165			if (memcg1_stats[i] == MEMCG_SWAP && !do_swap_account)
1166				continue;
1167			pr_cont(" %s:%luKB", memcg1_stat_names[i],
1168				K(memcg_page_state(iter, memcg1_stats[i])));
1169		}
1170
1171		for (i = 0; i < NR_LRU_LISTS; i++)
1172			pr_cont(" %s:%luKB", mem_cgroup_lru_names[i],
1173				K(mem_cgroup_nr_lru_pages(iter, BIT(i))));
1174
1175		pr_cont("\n");
1176	}
 
 
 
 
 
 
 
 
 
1177}
1178
1179/*
1180 * Return the memory (and swap, if configured) limit for a memcg.
1181 */
1182unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg)
1183{
1184	unsigned long limit;
1185
1186	limit = memcg->memory.limit;
1187	if (mem_cgroup_swappiness(memcg)) {
1188		unsigned long memsw_limit;
1189		unsigned long swap_limit;
1190
1191		memsw_limit = memcg->memsw.limit;
1192		swap_limit = memcg->swap.limit;
1193		swap_limit = min(swap_limit, (unsigned long)total_swap_pages);
1194		limit = min(limit + swap_limit, memsw_limit);
1195	}
1196	return limit;
 
 
 
 
 
1197}
1198
1199static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
1200				     int order)
1201{
1202	struct oom_control oc = {
1203		.zonelist = NULL,
1204		.nodemask = NULL,
1205		.memcg = memcg,
1206		.gfp_mask = gfp_mask,
1207		.order = order,
1208	};
1209	bool ret;
1210
1211	mutex_lock(&oom_lock);
1212	ret = out_of_memory(&oc);
 
 
 
 
 
1213	mutex_unlock(&oom_lock);
1214	return ret;
1215}
1216
1217#if MAX_NUMNODES > 1
1218
1219/**
1220 * test_mem_cgroup_node_reclaimable
1221 * @memcg: the target memcg
1222 * @nid: the node ID to be checked.
1223 * @noswap : specify true here if the user wants flle only information.
1224 *
1225 * This function returns whether the specified memcg contains any
1226 * reclaimable pages on a node. Returns true if there are any reclaimable
1227 * pages in the node.
1228 */
1229static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg,
1230		int nid, bool noswap)
1231{
1232	if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_FILE))
 
 
 
1233		return true;
1234	if (noswap || !total_swap_pages)
1235		return false;
1236	if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_ANON))
 
1237		return true;
1238	return false;
1239
1240}
1241
1242/*
1243 * Always updating the nodemask is not very good - even if we have an empty
1244 * list or the wrong list here, we can start from some node and traverse all
1245 * nodes based on the zonelist. So update the list loosely once per 10 secs.
1246 *
1247 */
1248static void mem_cgroup_may_update_nodemask(struct mem_cgroup *memcg)
1249{
1250	int nid;
1251	/*
1252	 * numainfo_events > 0 means there was at least NUMAINFO_EVENTS_TARGET
1253	 * pagein/pageout changes since the last update.
1254	 */
1255	if (!atomic_read(&memcg->numainfo_events))
1256		return;
1257	if (atomic_inc_return(&memcg->numainfo_updating) > 1)
1258		return;
1259
1260	/* make a nodemask where this memcg uses memory from */
1261	memcg->scan_nodes = node_states[N_MEMORY];
1262
1263	for_each_node_mask(nid, node_states[N_MEMORY]) {
1264
1265		if (!test_mem_cgroup_node_reclaimable(memcg, nid, false))
1266			node_clear(nid, memcg->scan_nodes);
1267	}
1268
1269	atomic_set(&memcg->numainfo_events, 0);
1270	atomic_set(&memcg->numainfo_updating, 0);
1271}
1272
1273/*
1274 * Selecting a node where we start reclaim from. Because what we need is just
1275 * reducing usage counter, start from anywhere is O,K. Considering
1276 * memory reclaim from current node, there are pros. and cons.
1277 *
1278 * Freeing memory from current node means freeing memory from a node which
1279 * we'll use or we've used. So, it may make LRU bad. And if several threads
1280 * hit limits, it will see a contention on a node. But freeing from remote
1281 * node means more costs for memory reclaim because of memory latency.
1282 *
1283 * Now, we use round-robin. Better algorithm is welcomed.
1284 */
1285int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
1286{
1287	int node;
1288
1289	mem_cgroup_may_update_nodemask(memcg);
1290	node = memcg->last_scanned_node;
1291
1292	node = next_node_in(node, memcg->scan_nodes);
1293	/*
1294	 * mem_cgroup_may_update_nodemask might have seen no reclaimmable pages
1295	 * last time it really checked all the LRUs due to rate limiting.
1296	 * Fallback to the current node in that case for simplicity.
1297	 */
1298	if (unlikely(node == MAX_NUMNODES))
1299		node = numa_node_id();
1300
1301	memcg->last_scanned_node = node;
1302	return node;
1303}
1304#else
1305int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
1306{
1307	return 0;
1308}
1309#endif
1310
1311static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
1312				   pg_data_t *pgdat,
1313				   gfp_t gfp_mask,
1314				   unsigned long *total_scanned)
1315{
1316	struct mem_cgroup *victim = NULL;
1317	int total = 0;
1318	int loop = 0;
1319	unsigned long excess;
1320	unsigned long nr_scanned;
1321	struct mem_cgroup_reclaim_cookie reclaim = {
1322		.pgdat = pgdat,
1323		.priority = 0,
1324	};
1325
1326	excess = soft_limit_excess(root_memcg);
1327
1328	while (1) {
1329		victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
1330		if (!victim) {
1331			loop++;
1332			if (loop >= 2) {
1333				/*
1334				 * If we have not been able to reclaim
1335				 * anything, it might because there are
1336				 * no reclaimable pages under this hierarchy
1337				 */
1338				if (!total)
1339					break;
1340				/*
1341				 * We want to do more targeted reclaim.
1342				 * excess >> 2 is not to excessive so as to
1343				 * reclaim too much, nor too less that we keep
1344				 * coming back to reclaim from this cgroup
1345				 */
1346				if (total >= (excess >> 2) ||
1347					(loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
1348					break;
1349			}
1350			continue;
1351		}
1352		total += mem_cgroup_shrink_node(victim, gfp_mask, false,
1353					pgdat, &nr_scanned);
1354		*total_scanned += nr_scanned;
1355		if (!soft_limit_excess(root_memcg))
1356			break;
1357	}
1358	mem_cgroup_iter_break(root_memcg, victim);
1359	return total;
1360}
1361
1362#ifdef CONFIG_LOCKDEP
1363static struct lockdep_map memcg_oom_lock_dep_map = {
1364	.name = "memcg_oom_lock",
1365};
1366#endif
1367
1368static DEFINE_SPINLOCK(memcg_oom_lock);
1369
1370/*
1371 * Check OOM-Killer is already running under our hierarchy.
1372 * If someone is running, return false.
1373 */
1374static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
1375{
1376	struct mem_cgroup *iter, *failed = NULL;
1377
1378	spin_lock(&memcg_oom_lock);
1379
1380	for_each_mem_cgroup_tree(iter, memcg) {
1381		if (iter->oom_lock) {
1382			/*
1383			 * this subtree of our hierarchy is already locked
1384			 * so we cannot give a lock.
1385			 */
1386			failed = iter;
1387			mem_cgroup_iter_break(memcg, iter);
1388			break;
1389		} else
1390			iter->oom_lock = true;
1391	}
1392
1393	if (failed) {
1394		/*
1395		 * OK, we failed to lock the whole subtree so we have
1396		 * to clean up what we set up to the failing subtree
1397		 */
1398		for_each_mem_cgroup_tree(iter, memcg) {
1399			if (iter == failed) {
1400				mem_cgroup_iter_break(memcg, iter);
1401				break;
1402			}
1403			iter->oom_lock = false;
1404		}
1405	} else
1406		mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_);
1407
1408	spin_unlock(&memcg_oom_lock);
1409
1410	return !failed;
1411}
1412
1413static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
1414{
1415	struct mem_cgroup *iter;
1416
1417	spin_lock(&memcg_oom_lock);
1418	mutex_release(&memcg_oom_lock_dep_map, 1, _RET_IP_);
1419	for_each_mem_cgroup_tree(iter, memcg)
1420		iter->oom_lock = false;
1421	spin_unlock(&memcg_oom_lock);
1422}
1423
1424static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
1425{
1426	struct mem_cgroup *iter;
1427
1428	spin_lock(&memcg_oom_lock);
1429	for_each_mem_cgroup_tree(iter, memcg)
1430		iter->under_oom++;
1431	spin_unlock(&memcg_oom_lock);
1432}
1433
1434static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
1435{
1436	struct mem_cgroup *iter;
1437
1438	/*
1439	 * When a new child is created while the hierarchy is under oom,
1440	 * mem_cgroup_oom_lock() may not be called. Watch for underflow.
1441	 */
1442	spin_lock(&memcg_oom_lock);
1443	for_each_mem_cgroup_tree(iter, memcg)
1444		if (iter->under_oom > 0)
1445			iter->under_oom--;
1446	spin_unlock(&memcg_oom_lock);
1447}
1448
1449static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
1450
1451struct oom_wait_info {
1452	struct mem_cgroup *memcg;
1453	wait_queue_entry_t	wait;
1454};
1455
1456static int memcg_oom_wake_function(wait_queue_entry_t *wait,
1457	unsigned mode, int sync, void *arg)
1458{
1459	struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
1460	struct mem_cgroup *oom_wait_memcg;
1461	struct oom_wait_info *oom_wait_info;
1462
1463	oom_wait_info = container_of(wait, struct oom_wait_info, wait);
1464	oom_wait_memcg = oom_wait_info->memcg;
1465
1466	if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) &&
1467	    !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg))
1468		return 0;
1469	return autoremove_wake_function(wait, mode, sync, arg);
1470}
1471
1472static void memcg_oom_recover(struct mem_cgroup *memcg)
1473{
1474	/*
1475	 * For the following lockless ->under_oom test, the only required
1476	 * guarantee is that it must see the state asserted by an OOM when
1477	 * this function is called as a result of userland actions
1478	 * triggered by the notification of the OOM.  This is trivially
1479	 * achieved by invoking mem_cgroup_mark_under_oom() before
1480	 * triggering notification.
1481	 */
1482	if (memcg && memcg->under_oom)
1483		__wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
1484}
1485
1486static void mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
 
 
 
 
 
 
 
1487{
1488	if (!current->memcg_may_oom || order > PAGE_ALLOC_COSTLY_ORDER)
1489		return;
 
 
 
 
 
 
1490	/*
1491	 * We are in the middle of the charge context here, so we
1492	 * don't want to block when potentially sitting on a callstack
1493	 * that holds all kinds of filesystem and mm locks.
1494	 *
1495	 * Also, the caller may handle a failed allocation gracefully
1496	 * (like optional page cache readahead) and so an OOM killer
1497	 * invocation might not even be necessary.
 
 
 
 
 
 
1498	 *
1499	 * That's why we don't do anything here except remember the
1500	 * OOM context and then deal with it at the end of the page
1501	 * fault when the stack is unwound, the locks are released,
1502	 * and when we know whether the fault was overall successful.
1503	 */
1504	css_get(&memcg->css);
1505	current->memcg_in_oom = memcg;
1506	current->memcg_oom_gfp_mask = mask;
1507	current->memcg_oom_order = order;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1508}
1509
1510/**
1511 * mem_cgroup_oom_synchronize - complete memcg OOM handling
1512 * @handle: actually kill/wait or just clean up the OOM state
1513 *
1514 * This has to be called at the end of a page fault if the memcg OOM
1515 * handler was enabled.
1516 *
1517 * Memcg supports userspace OOM handling where failed allocations must
1518 * sleep on a waitqueue until the userspace task resolves the
1519 * situation.  Sleeping directly in the charge context with all kinds
1520 * of locks held is not a good idea, instead we remember an OOM state
1521 * in the task and mem_cgroup_oom_synchronize() has to be called at
1522 * the end of the page fault to complete the OOM handling.
1523 *
1524 * Returns %true if an ongoing memcg OOM situation was detected and
1525 * completed, %false otherwise.
1526 */
1527bool mem_cgroup_oom_synchronize(bool handle)
1528{
1529	struct mem_cgroup *memcg = current->memcg_in_oom;
1530	struct oom_wait_info owait;
1531	bool locked;
1532
1533	/* OOM is global, do not handle */
1534	if (!memcg)
1535		return false;
1536
1537	if (!handle)
1538		goto cleanup;
1539
1540	owait.memcg = memcg;
1541	owait.wait.flags = 0;
1542	owait.wait.func = memcg_oom_wake_function;
1543	owait.wait.private = current;
1544	INIT_LIST_HEAD(&owait.wait.entry);
1545
1546	prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
1547	mem_cgroup_mark_under_oom(memcg);
1548
1549	locked = mem_cgroup_oom_trylock(memcg);
1550
1551	if (locked)
1552		mem_cgroup_oom_notify(memcg);
1553
1554	if (locked && !memcg->oom_kill_disable) {
1555		mem_cgroup_unmark_under_oom(memcg);
1556		finish_wait(&memcg_oom_waitq, &owait.wait);
1557		mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask,
1558					 current->memcg_oom_order);
1559	} else {
1560		schedule();
1561		mem_cgroup_unmark_under_oom(memcg);
1562		finish_wait(&memcg_oom_waitq, &owait.wait);
1563	}
1564
1565	if (locked) {
1566		mem_cgroup_oom_unlock(memcg);
1567		/*
1568		 * There is no guarantee that an OOM-lock contender
1569		 * sees the wakeups triggered by the OOM kill
1570		 * uncharges.  Wake any sleepers explicitely.
1571		 */
1572		memcg_oom_recover(memcg);
1573	}
1574cleanup:
1575	current->memcg_in_oom = NULL;
1576	css_put(&memcg->css);
1577	return true;
1578}
1579
1580/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1581 * lock_page_memcg - lock a page->mem_cgroup binding
1582 * @page: the page
1583 *
1584 * This function protects unlocked LRU pages from being moved to
1585 * another cgroup.
1586 *
1587 * It ensures lifetime of the returned memcg. Caller is responsible
1588 * for the lifetime of the page; __unlock_page_memcg() is available
1589 * when @page might get freed inside the locked section.
1590 */
1591struct mem_cgroup *lock_page_memcg(struct page *page)
1592{
1593	struct mem_cgroup *memcg;
1594	unsigned long flags;
1595
1596	/*
1597	 * The RCU lock is held throughout the transaction.  The fast
1598	 * path can get away without acquiring the memcg->move_lock
1599	 * because page moving starts with an RCU grace period.
1600	 *
1601	 * The RCU lock also protects the memcg from being freed when
1602	 * the page state that is going to change is the only thing
1603	 * preventing the page itself from being freed. E.g. writeback
1604	 * doesn't hold a page reference and relies on PG_writeback to
1605	 * keep off truncation, migration and so forth.
1606         */
1607	rcu_read_lock();
1608
1609	if (mem_cgroup_disabled())
1610		return NULL;
1611again:
1612	memcg = page->mem_cgroup;
1613	if (unlikely(!memcg))
1614		return NULL;
1615
1616	if (atomic_read(&memcg->moving_account) <= 0)
1617		return memcg;
1618
1619	spin_lock_irqsave(&memcg->move_lock, flags);
1620	if (memcg != page->mem_cgroup) {
1621		spin_unlock_irqrestore(&memcg->move_lock, flags);
1622		goto again;
1623	}
1624
1625	/*
1626	 * When charge migration first begins, we can have locked and
1627	 * unlocked page stat updates happening concurrently.  Track
1628	 * the task who has the lock for unlock_page_memcg().
1629	 */
1630	memcg->move_lock_task = current;
1631	memcg->move_lock_flags = flags;
1632
1633	return memcg;
1634}
1635EXPORT_SYMBOL(lock_page_memcg);
1636
1637/**
1638 * __unlock_page_memcg - unlock and unpin a memcg
1639 * @memcg: the memcg
1640 *
1641 * Unlock and unpin a memcg returned by lock_page_memcg().
1642 */
1643void __unlock_page_memcg(struct mem_cgroup *memcg)
1644{
1645	if (memcg && memcg->move_lock_task == current) {
1646		unsigned long flags = memcg->move_lock_flags;
1647
1648		memcg->move_lock_task = NULL;
1649		memcg->move_lock_flags = 0;
1650
1651		spin_unlock_irqrestore(&memcg->move_lock, flags);
1652	}
1653
1654	rcu_read_unlock();
1655}
1656
1657/**
1658 * unlock_page_memcg - unlock a page->mem_cgroup binding
1659 * @page: the page
1660 */
1661void unlock_page_memcg(struct page *page)
1662{
1663	__unlock_page_memcg(page->mem_cgroup);
1664}
1665EXPORT_SYMBOL(unlock_page_memcg);
1666
1667struct memcg_stock_pcp {
1668	struct mem_cgroup *cached; /* this never be root cgroup */
1669	unsigned int nr_pages;
1670	struct work_struct work;
1671	unsigned long flags;
1672#define FLUSHING_CACHED_CHARGE	0
1673};
1674static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
1675static DEFINE_MUTEX(percpu_charge_mutex);
1676
1677/**
1678 * consume_stock: Try to consume stocked charge on this cpu.
1679 * @memcg: memcg to consume from.
1680 * @nr_pages: how many pages to charge.
1681 *
1682 * The charges will only happen if @memcg matches the current cpu's memcg
1683 * stock, and at least @nr_pages are available in that stock.  Failure to
1684 * service an allocation will refill the stock.
1685 *
1686 * returns true if successful, false otherwise.
1687 */
1688static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
1689{
1690	struct memcg_stock_pcp *stock;
1691	unsigned long flags;
1692	bool ret = false;
1693
1694	if (nr_pages > MEMCG_CHARGE_BATCH)
1695		return ret;
1696
1697	local_irq_save(flags);
1698
1699	stock = this_cpu_ptr(&memcg_stock);
1700	if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
1701		stock->nr_pages -= nr_pages;
1702		ret = true;
1703	}
1704
1705	local_irq_restore(flags);
1706
1707	return ret;
1708}
1709
1710/*
1711 * Returns stocks cached in percpu and reset cached information.
1712 */
1713static void drain_stock(struct memcg_stock_pcp *stock)
1714{
1715	struct mem_cgroup *old = stock->cached;
1716
1717	if (stock->nr_pages) {
1718		page_counter_uncharge(&old->memory, stock->nr_pages);
1719		if (do_memsw_account())
1720			page_counter_uncharge(&old->memsw, stock->nr_pages);
1721		css_put_many(&old->css, stock->nr_pages);
1722		stock->nr_pages = 0;
1723	}
1724	stock->cached = NULL;
1725}
1726
1727static void drain_local_stock(struct work_struct *dummy)
1728{
1729	struct memcg_stock_pcp *stock;
1730	unsigned long flags;
1731
1732	/*
1733	 * The only protection from memory hotplug vs. drain_stock races is
1734	 * that we always operate on local CPU stock here with IRQ disabled
1735	 */
1736	local_irq_save(flags);
1737
1738	stock = this_cpu_ptr(&memcg_stock);
1739	drain_stock(stock);
1740	clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
1741
1742	local_irq_restore(flags);
1743}
1744
1745/*
1746 * Cache charges(val) to local per_cpu area.
1747 * This will be consumed by consume_stock() function, later.
1748 */
1749static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
1750{
1751	struct memcg_stock_pcp *stock;
1752	unsigned long flags;
1753
1754	local_irq_save(flags);
1755
1756	stock = this_cpu_ptr(&memcg_stock);
1757	if (stock->cached != memcg) { /* reset if necessary */
1758		drain_stock(stock);
1759		stock->cached = memcg;
1760	}
1761	stock->nr_pages += nr_pages;
1762
1763	if (stock->nr_pages > MEMCG_CHARGE_BATCH)
1764		drain_stock(stock);
1765
1766	local_irq_restore(flags);
1767}
1768
1769/*
1770 * Drains all per-CPU charge caches for given root_memcg resp. subtree
1771 * of the hierarchy under it.
1772 */
1773static void drain_all_stock(struct mem_cgroup *root_memcg)
1774{
1775	int cpu, curcpu;
1776
1777	/* If someone's already draining, avoid adding running more workers. */
1778	if (!mutex_trylock(&percpu_charge_mutex))
1779		return;
1780	/*
1781	 * Notify other cpus that system-wide "drain" is running
1782	 * We do not care about races with the cpu hotplug because cpu down
1783	 * as well as workers from this path always operate on the local
1784	 * per-cpu data. CPU up doesn't touch memcg_stock at all.
1785	 */
1786	curcpu = get_cpu();
1787	for_each_online_cpu(cpu) {
1788		struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
1789		struct mem_cgroup *memcg;
 
1790
 
1791		memcg = stock->cached;
1792		if (!memcg || !stock->nr_pages || !css_tryget(&memcg->css))
1793			continue;
1794		if (!mem_cgroup_is_descendant(memcg, root_memcg)) {
1795			css_put(&memcg->css);
1796			continue;
1797		}
1798		if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
1799			if (cpu == curcpu)
1800				drain_local_stock(&stock->work);
1801			else
1802				schedule_work_on(cpu, &stock->work);
1803		}
1804		css_put(&memcg->css);
1805	}
1806	put_cpu();
1807	mutex_unlock(&percpu_charge_mutex);
1808}
1809
1810static int memcg_hotplug_cpu_dead(unsigned int cpu)
1811{
1812	struct memcg_stock_pcp *stock;
1813	struct mem_cgroup *memcg;
1814
1815	stock = &per_cpu(memcg_stock, cpu);
1816	drain_stock(stock);
1817
1818	for_each_mem_cgroup(memcg) {
1819		int i;
1820
1821		for (i = 0; i < MEMCG_NR_STAT; i++) {
1822			int nid;
1823			long x;
1824
1825			x = this_cpu_xchg(memcg->stat_cpu->count[i], 0);
1826			if (x)
1827				atomic_long_add(x, &memcg->stat[i]);
 
1828
1829			if (i >= NR_VM_NODE_STAT_ITEMS)
1830				continue;
1831
1832			for_each_node(nid) {
1833				struct mem_cgroup_per_node *pn;
1834
1835				pn = mem_cgroup_nodeinfo(memcg, nid);
1836				x = this_cpu_xchg(pn->lruvec_stat_cpu->count[i], 0);
1837				if (x)
1838					atomic_long_add(x, &pn->lruvec_stat[i]);
 
 
1839			}
1840		}
1841
1842		for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
1843			long x;
1844
1845			x = this_cpu_xchg(memcg->stat_cpu->events[i], 0);
1846			if (x)
1847				atomic_long_add(x, &memcg->events[i]);
 
1848		}
1849	}
1850
1851	return 0;
1852}
1853
1854static void reclaim_high(struct mem_cgroup *memcg,
1855			 unsigned int nr_pages,
1856			 gfp_t gfp_mask)
1857{
1858	do {
1859		if (page_counter_read(&memcg->memory) <= memcg->high)
1860			continue;
1861		memcg_memory_event(memcg, MEMCG_HIGH);
1862		try_to_free_mem_cgroup_pages(memcg, nr_pages, gfp_mask, true);
1863	} while ((memcg = parent_mem_cgroup(memcg)));
1864}
1865
1866static void high_work_func(struct work_struct *work)
1867{
1868	struct mem_cgroup *memcg;
1869
1870	memcg = container_of(work, struct mem_cgroup, high_work);
1871	reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL);
1872}
1873
1874/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1875 * Scheduled by try_charge() to be executed from the userland return path
1876 * and reclaims memory over the high limit.
1877 */
1878void mem_cgroup_handle_over_high(void)
1879{
 
 
 
1880	unsigned int nr_pages = current->memcg_nr_pages_over_high;
1881	struct mem_cgroup *memcg;
1882
1883	if (likely(!nr_pages))
1884		return;
1885
1886	memcg = get_mem_cgroup_from_mm(current->mm);
1887	reclaim_high(memcg, nr_pages, GFP_KERNEL);
1888	css_put(&memcg->css);
1889	current->memcg_nr_pages_over_high = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1890}
1891
1892static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
1893		      unsigned int nr_pages)
1894{
1895	unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages);
1896	int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
1897	struct mem_cgroup *mem_over_limit;
1898	struct page_counter *counter;
1899	unsigned long nr_reclaimed;
1900	bool may_swap = true;
1901	bool drained = false;
 
1902
1903	if (mem_cgroup_is_root(memcg))
1904		return 0;
1905retry:
1906	if (consume_stock(memcg, nr_pages))
1907		return 0;
1908
1909	if (!do_memsw_account() ||
1910	    page_counter_try_charge(&memcg->memsw, batch, &counter)) {
1911		if (page_counter_try_charge(&memcg->memory, batch, &counter))
1912			goto done_restock;
1913		if (do_memsw_account())
1914			page_counter_uncharge(&memcg->memsw, batch);
1915		mem_over_limit = mem_cgroup_from_counter(counter, memory);
1916	} else {
1917		mem_over_limit = mem_cgroup_from_counter(counter, memsw);
1918		may_swap = false;
1919	}
1920
1921	if (batch > nr_pages) {
1922		batch = nr_pages;
1923		goto retry;
1924	}
1925
1926	/*
 
 
 
 
 
 
 
 
 
1927	 * Unlike in global OOM situations, memcg is not in a physical
1928	 * memory shortage.  Allow dying and OOM-killed tasks to
1929	 * bypass the last charges so that they can exit quickly and
1930	 * free their memory.
1931	 */
1932	if (unlikely(tsk_is_oom_victim(current) ||
1933		     fatal_signal_pending(current) ||
1934		     current->flags & PF_EXITING))
1935		goto force;
1936
1937	/*
1938	 * Prevent unbounded recursion when reclaim operations need to
1939	 * allocate memory. This might exceed the limits temporarily,
1940	 * but we prefer facilitating memory reclaim and getting back
1941	 * under the limit over triggering OOM kills in these cases.
1942	 */
1943	if (unlikely(current->flags & PF_MEMALLOC))
1944		goto force;
1945
1946	if (unlikely(task_in_memcg_oom(current)))
1947		goto nomem;
1948
1949	if (!gfpflags_allow_blocking(gfp_mask))
1950		goto nomem;
1951
1952	memcg_memory_event(mem_over_limit, MEMCG_MAX);
1953
1954	nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
1955						    gfp_mask, may_swap);
1956
1957	if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
1958		goto retry;
1959
1960	if (!drained) {
1961		drain_all_stock(mem_over_limit);
1962		drained = true;
1963		goto retry;
1964	}
1965
1966	if (gfp_mask & __GFP_NORETRY)
1967		goto nomem;
1968	/*
1969	 * Even though the limit is exceeded at this point, reclaim
1970	 * may have been able to free some pages.  Retry the charge
1971	 * before killing the task.
1972	 *
1973	 * Only for regular pages, though: huge pages are rather
1974	 * unlikely to succeed so close to the limit, and we fall back
1975	 * to regular pages anyway in case of failure.
1976	 */
1977	if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))
1978		goto retry;
1979	/*
1980	 * At task move, charge accounts can be doubly counted. So, it's
1981	 * better to wait until the end of task_move if something is going on.
1982	 */
1983	if (mem_cgroup_wait_acct_move(mem_over_limit))
1984		goto retry;
1985
1986	if (nr_retries--)
1987		goto retry;
1988
 
 
 
1989	if (gfp_mask & __GFP_NOFAIL)
1990		goto force;
1991
1992	if (fatal_signal_pending(current))
1993		goto force;
1994
1995	memcg_memory_event(mem_over_limit, MEMCG_OOM);
1996
1997	mem_cgroup_oom(mem_over_limit, gfp_mask,
 
 
 
1998		       get_order(nr_pages * PAGE_SIZE));
 
 
 
 
 
 
 
 
 
1999nomem:
2000	if (!(gfp_mask & __GFP_NOFAIL))
2001		return -ENOMEM;
2002force:
2003	/*
2004	 * The allocation either can't fail or will lead to more memory
2005	 * being freed very soon.  Allow memory usage go over the limit
2006	 * temporarily by force charging it.
2007	 */
2008	page_counter_charge(&memcg->memory, nr_pages);
2009	if (do_memsw_account())
2010		page_counter_charge(&memcg->memsw, nr_pages);
2011	css_get_many(&memcg->css, nr_pages);
2012
2013	return 0;
2014
2015done_restock:
2016	css_get_many(&memcg->css, batch);
2017	if (batch > nr_pages)
2018		refill_stock(memcg, batch - nr_pages);
2019
2020	/*
2021	 * If the hierarchy is above the normal consumption range, schedule
2022	 * reclaim on returning to userland.  We can perform reclaim here
2023	 * if __GFP_RECLAIM but let's always punt for simplicity and so that
2024	 * GFP_KERNEL can consistently be used during reclaim.  @memcg is
2025	 * not recorded as it most likely matches current's and won't
2026	 * change in the meantime.  As high limit is checked again before
2027	 * reclaim, the cost of mismatch is negligible.
2028	 */
2029	do {
2030		if (page_counter_read(&memcg->memory) > memcg->high) {
2031			/* Don't bother a random interrupted task */
2032			if (in_interrupt()) {
2033				schedule_work(&memcg->high_work);
2034				break;
2035			}
2036			current->memcg_nr_pages_over_high += batch;
2037			set_notify_resume(current);
2038			break;
2039		}
2040	} while ((memcg = parent_mem_cgroup(memcg)));
2041
2042	return 0;
2043}
2044
2045static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
2046{
2047	if (mem_cgroup_is_root(memcg))
2048		return;
2049
2050	page_counter_uncharge(&memcg->memory, nr_pages);
2051	if (do_memsw_account())
2052		page_counter_uncharge(&memcg->memsw, nr_pages);
2053
2054	css_put_many(&memcg->css, nr_pages);
2055}
2056
2057static void lock_page_lru(struct page *page, int *isolated)
2058{
2059	struct zone *zone = page_zone(page);
2060
2061	spin_lock_irq(zone_lru_lock(zone));
2062	if (PageLRU(page)) {
2063		struct lruvec *lruvec;
2064
2065		lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat);
2066		ClearPageLRU(page);
2067		del_page_from_lru_list(page, lruvec, page_lru(page));
2068		*isolated = 1;
2069	} else
2070		*isolated = 0;
2071}
2072
2073static void unlock_page_lru(struct page *page, int isolated)
2074{
2075	struct zone *zone = page_zone(page);
2076
2077	if (isolated) {
2078		struct lruvec *lruvec;
2079
2080		lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat);
2081		VM_BUG_ON_PAGE(PageLRU(page), page);
2082		SetPageLRU(page);
2083		add_page_to_lru_list(page, lruvec, page_lru(page));
2084	}
2085	spin_unlock_irq(zone_lru_lock(zone));
2086}
2087
2088static void commit_charge(struct page *page, struct mem_cgroup *memcg,
2089			  bool lrucare)
2090{
2091	int isolated;
2092
2093	VM_BUG_ON_PAGE(page->mem_cgroup, page);
2094
2095	/*
2096	 * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page
2097	 * may already be on some other mem_cgroup's LRU.  Take care of it.
2098	 */
2099	if (lrucare)
2100		lock_page_lru(page, &isolated);
2101
2102	/*
2103	 * Nobody should be changing or seriously looking at
2104	 * page->mem_cgroup at this point:
2105	 *
2106	 * - the page is uncharged
2107	 *
2108	 * - the page is off-LRU
2109	 *
2110	 * - an anonymous fault has exclusive page access, except for
2111	 *   a locked page table
2112	 *
2113	 * - a page cache insertion, a swapin fault, or a migration
2114	 *   have the page locked
2115	 */
2116	page->mem_cgroup = memcg;
2117
2118	if (lrucare)
2119		unlock_page_lru(page, isolated);
2120}
2121
2122#ifndef CONFIG_SLOB
2123static int memcg_alloc_cache_id(void)
2124{
2125	int id, size;
2126	int err;
2127
2128	id = ida_simple_get(&memcg_cache_ida,
2129			    0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL);
2130	if (id < 0)
2131		return id;
2132
2133	if (id < memcg_nr_cache_ids)
2134		return id;
2135
2136	/*
2137	 * There's no space for the new id in memcg_caches arrays,
2138	 * so we have to grow them.
2139	 */
2140	down_write(&memcg_cache_ids_sem);
2141
2142	size = 2 * (id + 1);
2143	if (size < MEMCG_CACHES_MIN_SIZE)
2144		size = MEMCG_CACHES_MIN_SIZE;
2145	else if (size > MEMCG_CACHES_MAX_SIZE)
2146		size = MEMCG_CACHES_MAX_SIZE;
2147
2148	err = memcg_update_all_caches(size);
2149	if (!err)
2150		err = memcg_update_all_list_lrus(size);
2151	if (!err)
2152		memcg_nr_cache_ids = size;
2153
2154	up_write(&memcg_cache_ids_sem);
2155
2156	if (err) {
2157		ida_simple_remove(&memcg_cache_ida, id);
2158		return err;
2159	}
2160	return id;
2161}
2162
2163static void memcg_free_cache_id(int id)
2164{
2165	ida_simple_remove(&memcg_cache_ida, id);
2166}
2167
2168struct memcg_kmem_cache_create_work {
2169	struct mem_cgroup *memcg;
2170	struct kmem_cache *cachep;
2171	struct work_struct work;
2172};
2173
2174static void memcg_kmem_cache_create_func(struct work_struct *w)
2175{
2176	struct memcg_kmem_cache_create_work *cw =
2177		container_of(w, struct memcg_kmem_cache_create_work, work);
2178	struct mem_cgroup *memcg = cw->memcg;
2179	struct kmem_cache *cachep = cw->cachep;
2180
2181	memcg_create_kmem_cache(memcg, cachep);
2182
2183	css_put(&memcg->css);
2184	kfree(cw);
2185}
2186
2187/*
2188 * Enqueue the creation of a per-memcg kmem_cache.
2189 */
2190static void __memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
2191					       struct kmem_cache *cachep)
2192{
2193	struct memcg_kmem_cache_create_work *cw;
2194
 
 
 
2195	cw = kmalloc(sizeof(*cw), GFP_NOWAIT | __GFP_NOWARN);
2196	if (!cw)
2197		return;
2198
2199	css_get(&memcg->css);
2200
2201	cw->memcg = memcg;
2202	cw->cachep = cachep;
2203	INIT_WORK(&cw->work, memcg_kmem_cache_create_func);
2204
2205	queue_work(memcg_kmem_cache_wq, &cw->work);
2206}
2207
2208static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
2209					     struct kmem_cache *cachep)
2210{
2211	/*
2212	 * We need to stop accounting when we kmalloc, because if the
2213	 * corresponding kmalloc cache is not yet created, the first allocation
2214	 * in __memcg_schedule_kmem_cache_create will recurse.
2215	 *
2216	 * However, it is better to enclose the whole function. Depending on
2217	 * the debugging options enabled, INIT_WORK(), for instance, can
2218	 * trigger an allocation. This too, will make us recurse. Because at
2219	 * this point we can't allow ourselves back into memcg_kmem_get_cache,
2220	 * the safest choice is to do it like this, wrapping the whole function.
2221	 */
2222	current->memcg_kmem_skip_account = 1;
2223	__memcg_schedule_kmem_cache_create(memcg, cachep);
2224	current->memcg_kmem_skip_account = 0;
2225}
2226
2227static inline bool memcg_kmem_bypass(void)
2228{
2229	if (in_interrupt() || !current->mm || (current->flags & PF_KTHREAD))
2230		return true;
2231	return false;
2232}
2233
2234/**
2235 * memcg_kmem_get_cache: select the correct per-memcg cache for allocation
2236 * @cachep: the original global kmem cache
2237 *
2238 * Return the kmem_cache we're supposed to use for a slab allocation.
2239 * We try to use the current memcg's version of the cache.
2240 *
2241 * If the cache does not exist yet, if we are the first user of it, we
2242 * create it asynchronously in a workqueue and let the current allocation
2243 * go through with the original cache.
2244 *
2245 * This function takes a reference to the cache it returns to assure it
2246 * won't get destroyed while we are working with it. Once the caller is
2247 * done with it, memcg_kmem_put_cache() must be called to release the
2248 * reference.
2249 */
2250struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep)
2251{
2252	struct mem_cgroup *memcg;
2253	struct kmem_cache *memcg_cachep;
 
2254	int kmemcg_id;
2255
2256	VM_BUG_ON(!is_root_cache(cachep));
2257
2258	if (memcg_kmem_bypass())
2259		return cachep;
2260
2261	if (current->memcg_kmem_skip_account)
2262		return cachep;
 
 
 
 
 
 
 
2263
2264	memcg = get_mem_cgroup_from_mm(current->mm);
2265	kmemcg_id = READ_ONCE(memcg->kmemcg_id);
2266	if (kmemcg_id < 0)
2267		goto out;
2268
2269	memcg_cachep = cache_from_memcg_idx(cachep, kmemcg_id);
2270	if (likely(memcg_cachep))
2271		return memcg_cachep;
 
 
 
 
 
2272
2273	/*
2274	 * If we are in a safe context (can wait, and not in interrupt
2275	 * context), we could be be predictable and return right away.
2276	 * This would guarantee that the allocation being performed
2277	 * already belongs in the new cache.
2278	 *
2279	 * However, there are some clashes that can arrive from locking.
2280	 * For instance, because we acquire the slab_mutex while doing
2281	 * memcg_create_kmem_cache, this means no further allocation
2282	 * could happen with the slab_mutex held. So it's better to
2283	 * defer everything.
2284	 */
2285	memcg_schedule_kmem_cache_create(memcg, cachep);
2286out:
2287	css_put(&memcg->css);
 
 
 
 
 
 
 
 
 
 
2288	return cachep;
2289}
2290
2291/**
2292 * memcg_kmem_put_cache: drop reference taken by memcg_kmem_get_cache
2293 * @cachep: the cache returned by memcg_kmem_get_cache
2294 */
2295void memcg_kmem_put_cache(struct kmem_cache *cachep)
2296{
2297	if (!is_root_cache(cachep))
2298		css_put(&cachep->memcg_params.memcg->css);
2299}
2300
2301/**
2302 * memcg_kmem_charge_memcg: charge a kmem page
2303 * @page: page to charge
2304 * @gfp: reclaim mode
2305 * @order: allocation order
2306 * @memcg: memory cgroup to charge
2307 *
2308 * Returns 0 on success, an error code on failure.
2309 */
2310int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
2311			    struct mem_cgroup *memcg)
2312{
2313	unsigned int nr_pages = 1 << order;
2314	struct page_counter *counter;
2315	int ret;
2316
2317	ret = try_charge(memcg, gfp, nr_pages);
2318	if (ret)
2319		return ret;
2320
2321	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) &&
2322	    !page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) {
 
 
 
 
 
 
 
 
 
 
2323		cancel_charge(memcg, nr_pages);
2324		return -ENOMEM;
2325	}
2326
2327	page->mem_cgroup = memcg;
2328
2329	return 0;
2330}
2331
2332/**
2333 * memcg_kmem_charge: charge a kmem page to the current memory cgroup
2334 * @page: page to charge
2335 * @gfp: reclaim mode
2336 * @order: allocation order
2337 *
2338 * Returns 0 on success, an error code on failure.
2339 */
2340int memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
2341{
2342	struct mem_cgroup *memcg;
2343	int ret = 0;
2344
2345	if (memcg_kmem_bypass())
2346		return 0;
2347
2348	memcg = get_mem_cgroup_from_mm(current->mm);
2349	if (!mem_cgroup_is_root(memcg)) {
2350		ret = memcg_kmem_charge_memcg(page, gfp, order, memcg);
2351		if (!ret)
 
2352			__SetPageKmemcg(page);
 
2353	}
2354	css_put(&memcg->css);
2355	return ret;
2356}
 
2357/**
2358 * memcg_kmem_uncharge: uncharge a kmem page
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2359 * @page: page to uncharge
2360 * @order: allocation order
2361 */
2362void memcg_kmem_uncharge(struct page *page, int order)
2363{
2364	struct mem_cgroup *memcg = page->mem_cgroup;
2365	unsigned int nr_pages = 1 << order;
2366
2367	if (!memcg)
2368		return;
2369
2370	VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page);
2371
2372	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
2373		page_counter_uncharge(&memcg->kmem, nr_pages);
2374
2375	page_counter_uncharge(&memcg->memory, nr_pages);
2376	if (do_memsw_account())
2377		page_counter_uncharge(&memcg->memsw, nr_pages);
2378
2379	page->mem_cgroup = NULL;
2380
2381	/* slab pages do not have PageKmemcg flag set */
2382	if (PageKmemcg(page))
2383		__ClearPageKmemcg(page);
2384
2385	css_put_many(&memcg->css, nr_pages);
2386}
2387#endif /* !CONFIG_SLOB */
2388
2389#ifdef CONFIG_TRANSPARENT_HUGEPAGE
2390
2391/*
2392 * Because tail pages are not marked as "used", set it. We're under
2393 * zone_lru_lock and migration entries setup in all page mappings.
2394 */
2395void mem_cgroup_split_huge_fixup(struct page *head)
2396{
2397	int i;
2398
2399	if (mem_cgroup_disabled())
2400		return;
2401
2402	for (i = 1; i < HPAGE_PMD_NR; i++)
2403		head[i].mem_cgroup = head->mem_cgroup;
2404
2405	__mod_memcg_state(head->mem_cgroup, MEMCG_RSS_HUGE, -HPAGE_PMD_NR);
2406}
2407#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
2408
2409#ifdef CONFIG_MEMCG_SWAP
2410/**
2411 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
2412 * @entry: swap entry to be moved
2413 * @from:  mem_cgroup which the entry is moved from
2414 * @to:  mem_cgroup which the entry is moved to
2415 *
2416 * It succeeds only when the swap_cgroup's record for this entry is the same
2417 * as the mem_cgroup's id of @from.
2418 *
2419 * Returns 0 on success, -EINVAL on failure.
2420 *
2421 * The caller must have charged to @to, IOW, called page_counter_charge() about
2422 * both res and memsw, and called css_get().
2423 */
2424static int mem_cgroup_move_swap_account(swp_entry_t entry,
2425				struct mem_cgroup *from, struct mem_cgroup *to)
2426{
2427	unsigned short old_id, new_id;
2428
2429	old_id = mem_cgroup_id(from);
2430	new_id = mem_cgroup_id(to);
2431
2432	if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
2433		mod_memcg_state(from, MEMCG_SWAP, -1);
2434		mod_memcg_state(to, MEMCG_SWAP, 1);
2435		return 0;
2436	}
2437	return -EINVAL;
2438}
2439#else
2440static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
2441				struct mem_cgroup *from, struct mem_cgroup *to)
2442{
2443	return -EINVAL;
2444}
2445#endif
2446
2447static DEFINE_MUTEX(memcg_limit_mutex);
2448
2449static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
2450				   unsigned long limit, bool memsw)
2451{
2452	bool enlarge = false;
 
2453	int ret;
2454	bool limits_invariant;
2455	struct page_counter *counter = memsw ? &memcg->memsw : &memcg->memory;
2456
2457	do {
2458		if (signal_pending(current)) {
2459			ret = -EINTR;
2460			break;
2461		}
2462
2463		mutex_lock(&memcg_limit_mutex);
2464		/*
2465		 * Make sure that the new limit (memsw or memory limit) doesn't
2466		 * break our basic invariant rule memory.limit <= memsw.limit.
2467		 */
2468		limits_invariant = memsw ? limit >= memcg->memory.limit :
2469					   limit <= memcg->memsw.limit;
2470		if (!limits_invariant) {
2471			mutex_unlock(&memcg_limit_mutex);
2472			ret = -EINVAL;
2473			break;
2474		}
2475		if (limit > counter->limit)
2476			enlarge = true;
2477		ret = page_counter_limit(counter, limit);
2478		mutex_unlock(&memcg_limit_mutex);
2479
2480		if (!ret)
2481			break;
2482
 
 
 
 
 
 
2483		if (!try_to_free_mem_cgroup_pages(memcg, 1,
2484					GFP_KERNEL, !memsw)) {
2485			ret = -EBUSY;
2486			break;
2487		}
2488	} while (true);
2489
2490	if (!ret && enlarge)
2491		memcg_oom_recover(memcg);
2492
2493	return ret;
2494}
2495
2496unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
2497					    gfp_t gfp_mask,
2498					    unsigned long *total_scanned)
2499{
2500	unsigned long nr_reclaimed = 0;
2501	struct mem_cgroup_per_node *mz, *next_mz = NULL;
2502	unsigned long reclaimed;
2503	int loop = 0;
2504	struct mem_cgroup_tree_per_node *mctz;
2505	unsigned long excess;
2506	unsigned long nr_scanned;
2507
2508	if (order > 0)
2509		return 0;
2510
2511	mctz = soft_limit_tree_node(pgdat->node_id);
2512
2513	/*
2514	 * Do not even bother to check the largest node if the root
2515	 * is empty. Do it lockless to prevent lock bouncing. Races
2516	 * are acceptable as soft limit is best effort anyway.
2517	 */
2518	if (!mctz || RB_EMPTY_ROOT(&mctz->rb_root))
2519		return 0;
2520
2521	/*
2522	 * This loop can run a while, specially if mem_cgroup's continuously
2523	 * keep exceeding their soft limit and putting the system under
2524	 * pressure
2525	 */
2526	do {
2527		if (next_mz)
2528			mz = next_mz;
2529		else
2530			mz = mem_cgroup_largest_soft_limit_node(mctz);
2531		if (!mz)
2532			break;
2533
2534		nr_scanned = 0;
2535		reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat,
2536						    gfp_mask, &nr_scanned);
2537		nr_reclaimed += reclaimed;
2538		*total_scanned += nr_scanned;
2539		spin_lock_irq(&mctz->lock);
2540		__mem_cgroup_remove_exceeded(mz, mctz);
2541
2542		/*
2543		 * If we failed to reclaim anything from this memory cgroup
2544		 * it is time to move on to the next cgroup
2545		 */
2546		next_mz = NULL;
2547		if (!reclaimed)
2548			next_mz = __mem_cgroup_largest_soft_limit_node(mctz);
2549
2550		excess = soft_limit_excess(mz->memcg);
2551		/*
2552		 * One school of thought says that we should not add
2553		 * back the node to the tree if reclaim returns 0.
2554		 * But our reclaim could return 0, simply because due
2555		 * to priority we are exposing a smaller subset of
2556		 * memory to reclaim from. Consider this as a longer
2557		 * term TODO.
2558		 */
2559		/* If excess == 0, no tree ops */
2560		__mem_cgroup_insert_exceeded(mz, mctz, excess);
2561		spin_unlock_irq(&mctz->lock);
2562		css_put(&mz->memcg->css);
2563		loop++;
2564		/*
2565		 * Could not reclaim anything and there are no more
2566		 * mem cgroups to try or we seem to be looping without
2567		 * reclaiming anything.
2568		 */
2569		if (!nr_reclaimed &&
2570			(next_mz == NULL ||
2571			loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
2572			break;
2573	} while (!nr_reclaimed);
2574	if (next_mz)
2575		css_put(&next_mz->memcg->css);
2576	return nr_reclaimed;
2577}
2578
2579/*
2580 * Test whether @memcg has children, dead or alive.  Note that this
2581 * function doesn't care whether @memcg has use_hierarchy enabled and
2582 * returns %true if there are child csses according to the cgroup
2583 * hierarchy.  Testing use_hierarchy is the caller's responsiblity.
2584 */
2585static inline bool memcg_has_children(struct mem_cgroup *memcg)
2586{
2587	bool ret;
2588
2589	rcu_read_lock();
2590	ret = css_next_child(NULL, &memcg->css);
2591	rcu_read_unlock();
2592	return ret;
2593}
2594
2595/*
2596 * Reclaims as many pages from the given memcg as possible.
2597 *
2598 * Caller is responsible for holding css reference for memcg.
2599 */
2600static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
2601{
2602	int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
2603
2604	/* we call try-to-free pages for make this cgroup empty */
2605	lru_add_drain_all();
 
 
 
2606	/* try to free all pages in this cgroup */
2607	while (nr_retries && page_counter_read(&memcg->memory)) {
2608		int progress;
2609
2610		if (signal_pending(current))
2611			return -EINTR;
2612
2613		progress = try_to_free_mem_cgroup_pages(memcg, 1,
2614							GFP_KERNEL, true);
2615		if (!progress) {
2616			nr_retries--;
2617			/* maybe some writeback is necessary */
2618			congestion_wait(BLK_RW_ASYNC, HZ/10);
2619		}
2620
2621	}
2622
2623	return 0;
2624}
2625
2626static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of,
2627					    char *buf, size_t nbytes,
2628					    loff_t off)
2629{
2630	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
2631
2632	if (mem_cgroup_is_root(memcg))
2633		return -EINVAL;
2634	return mem_cgroup_force_empty(memcg) ?: nbytes;
2635}
2636
2637static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css,
2638				     struct cftype *cft)
2639{
2640	return mem_cgroup_from_css(css)->use_hierarchy;
2641}
2642
2643static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
2644				      struct cftype *cft, u64 val)
2645{
2646	int retval = 0;
2647	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
2648	struct mem_cgroup *parent_memcg = mem_cgroup_from_css(memcg->css.parent);
2649
2650	if (memcg->use_hierarchy == val)
2651		return 0;
2652
2653	/*
2654	 * If parent's use_hierarchy is set, we can't make any modifications
2655	 * in the child subtrees. If it is unset, then the change can
2656	 * occur, provided the current cgroup has no children.
2657	 *
2658	 * For the root cgroup, parent_mem is NULL, we allow value to be
2659	 * set if there are no children.
2660	 */
2661	if ((!parent_memcg || !parent_memcg->use_hierarchy) &&
2662				(val == 1 || val == 0)) {
2663		if (!memcg_has_children(memcg))
2664			memcg->use_hierarchy = val;
2665		else
2666			retval = -EBUSY;
2667	} else
2668		retval = -EINVAL;
2669
2670	return retval;
2671}
2672
2673static void tree_stat(struct mem_cgroup *memcg, unsigned long *stat)
2674{
2675	struct mem_cgroup *iter;
2676	int i;
2677
2678	memset(stat, 0, sizeof(*stat) * MEMCG_NR_STAT);
2679
2680	for_each_mem_cgroup_tree(iter, memcg) {
2681		for (i = 0; i < MEMCG_NR_STAT; i++)
2682			stat[i] += memcg_page_state(iter, i);
2683	}
2684}
2685
2686static void tree_events(struct mem_cgroup *memcg, unsigned long *events)
2687{
2688	struct mem_cgroup *iter;
2689	int i;
2690
2691	memset(events, 0, sizeof(*events) * NR_VM_EVENT_ITEMS);
2692
2693	for_each_mem_cgroup_tree(iter, memcg) {
2694		for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
2695			events[i] += memcg_sum_events(iter, i);
2696	}
2697}
2698
2699static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
2700{
2701	unsigned long val = 0;
2702
2703	if (mem_cgroup_is_root(memcg)) {
2704		struct mem_cgroup *iter;
2705
2706		for_each_mem_cgroup_tree(iter, memcg) {
2707			val += memcg_page_state(iter, MEMCG_CACHE);
2708			val += memcg_page_state(iter, MEMCG_RSS);
2709			if (swap)
2710				val += memcg_page_state(iter, MEMCG_SWAP);
2711		}
2712	} else {
2713		if (!swap)
2714			val = page_counter_read(&memcg->memory);
2715		else
2716			val = page_counter_read(&memcg->memsw);
2717	}
2718	return val;
2719}
2720
2721enum {
2722	RES_USAGE,
2723	RES_LIMIT,
2724	RES_MAX_USAGE,
2725	RES_FAILCNT,
2726	RES_SOFT_LIMIT,
2727};
2728
2729static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
2730			       struct cftype *cft)
2731{
2732	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
2733	struct page_counter *counter;
2734
2735	switch (MEMFILE_TYPE(cft->private)) {
2736	case _MEM:
2737		counter = &memcg->memory;
2738		break;
2739	case _MEMSWAP:
2740		counter = &memcg->memsw;
2741		break;
2742	case _KMEM:
2743		counter = &memcg->kmem;
2744		break;
2745	case _TCP:
2746		counter = &memcg->tcpmem;
2747		break;
2748	default:
2749		BUG();
2750	}
2751
2752	switch (MEMFILE_ATTR(cft->private)) {
2753	case RES_USAGE:
2754		if (counter == &memcg->memory)
2755			return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE;
2756		if (counter == &memcg->memsw)
2757			return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE;
2758		return (u64)page_counter_read(counter) * PAGE_SIZE;
2759	case RES_LIMIT:
2760		return (u64)counter->limit * PAGE_SIZE;
2761	case RES_MAX_USAGE:
2762		return (u64)counter->watermark * PAGE_SIZE;
2763	case RES_FAILCNT:
2764		return counter->failcnt;
2765	case RES_SOFT_LIMIT:
2766		return (u64)memcg->soft_limit * PAGE_SIZE;
2767	default:
2768		BUG();
2769	}
2770}
2771
2772#ifndef CONFIG_SLOB
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2773static int memcg_online_kmem(struct mem_cgroup *memcg)
2774{
2775	int memcg_id;
2776
2777	if (cgroup_memory_nokmem)
2778		return 0;
2779
2780	BUG_ON(memcg->kmemcg_id >= 0);
2781	BUG_ON(memcg->kmem_state);
2782
2783	memcg_id = memcg_alloc_cache_id();
2784	if (memcg_id < 0)
2785		return memcg_id;
2786
2787	static_branch_inc(&memcg_kmem_enabled_key);
2788	/*
2789	 * A memory cgroup is considered kmem-online as soon as it gets
2790	 * kmemcg_id. Setting the id after enabling static branching will
2791	 * guarantee no one starts accounting before all call sites are
2792	 * patched.
2793	 */
2794	memcg->kmemcg_id = memcg_id;
2795	memcg->kmem_state = KMEM_ONLINE;
2796	INIT_LIST_HEAD(&memcg->kmem_caches);
2797
2798	return 0;
2799}
2800
2801static void memcg_offline_kmem(struct mem_cgroup *memcg)
2802{
2803	struct cgroup_subsys_state *css;
2804	struct mem_cgroup *parent, *child;
2805	int kmemcg_id;
2806
2807	if (memcg->kmem_state != KMEM_ONLINE)
2808		return;
2809	/*
2810	 * Clear the online state before clearing memcg_caches array
2811	 * entries. The slab_mutex in memcg_deactivate_kmem_caches()
2812	 * guarantees that no cache will be created for this cgroup
2813	 * after we are done (see memcg_create_kmem_cache()).
2814	 */
2815	memcg->kmem_state = KMEM_ALLOCATED;
2816
2817	memcg_deactivate_kmem_caches(memcg);
2818
2819	kmemcg_id = memcg->kmemcg_id;
2820	BUG_ON(kmemcg_id < 0);
2821
2822	parent = parent_mem_cgroup(memcg);
2823	if (!parent)
2824		parent = root_mem_cgroup;
2825
2826	/*
 
 
 
 
 
 
 
 
 
 
 
 
2827	 * Change kmemcg_id of this cgroup and all its descendants to the
2828	 * parent's id, and then move all entries from this cgroup's list_lrus
2829	 * to ones of the parent. After we have finished, all list_lrus
2830	 * corresponding to this cgroup are guaranteed to remain empty. The
2831	 * ordering is imposed by list_lru_node->lock taken by
2832	 * memcg_drain_all_list_lrus().
2833	 */
2834	rcu_read_lock(); /* can be called from css_free w/o cgroup_mutex */
2835	css_for_each_descendant_pre(css, &memcg->css) {
2836		child = mem_cgroup_from_css(css);
2837		BUG_ON(child->kmemcg_id != kmemcg_id);
2838		child->kmemcg_id = parent->kmemcg_id;
2839		if (!memcg->use_hierarchy)
2840			break;
2841	}
2842	rcu_read_unlock();
2843
2844	memcg_drain_all_list_lrus(kmemcg_id, parent->kmemcg_id);
2845
2846	memcg_free_cache_id(kmemcg_id);
2847}
2848
2849static void memcg_free_kmem(struct mem_cgroup *memcg)
2850{
2851	/* css_alloc() failed, offlining didn't happen */
2852	if (unlikely(memcg->kmem_state == KMEM_ONLINE))
2853		memcg_offline_kmem(memcg);
2854
2855	if (memcg->kmem_state == KMEM_ALLOCATED) {
2856		memcg_destroy_kmem_caches(memcg);
2857		static_branch_dec(&memcg_kmem_enabled_key);
2858		WARN_ON(page_counter_read(&memcg->kmem));
2859	}
2860}
2861#else
2862static int memcg_online_kmem(struct mem_cgroup *memcg)
2863{
2864	return 0;
2865}
2866static void memcg_offline_kmem(struct mem_cgroup *memcg)
2867{
2868}
2869static void memcg_free_kmem(struct mem_cgroup *memcg)
2870{
2871}
2872#endif /* !CONFIG_SLOB */
2873
2874static int memcg_update_kmem_limit(struct mem_cgroup *memcg,
2875				   unsigned long limit)
2876{
2877	int ret;
2878
2879	mutex_lock(&memcg_limit_mutex);
2880	ret = page_counter_limit(&memcg->kmem, limit);
2881	mutex_unlock(&memcg_limit_mutex);
2882	return ret;
2883}
2884
2885static int memcg_update_tcp_limit(struct mem_cgroup *memcg, unsigned long limit)
2886{
2887	int ret;
2888
2889	mutex_lock(&memcg_limit_mutex);
2890
2891	ret = page_counter_limit(&memcg->tcpmem, limit);
2892	if (ret)
2893		goto out;
2894
2895	if (!memcg->tcpmem_active) {
2896		/*
2897		 * The active flag needs to be written after the static_key
2898		 * update. This is what guarantees that the socket activation
2899		 * function is the last one to run. See mem_cgroup_sk_alloc()
2900		 * for details, and note that we don't mark any socket as
2901		 * belonging to this memcg until that flag is up.
2902		 *
2903		 * We need to do this, because static_keys will span multiple
2904		 * sites, but we can't control their order. If we mark a socket
2905		 * as accounted, but the accounting functions are not patched in
2906		 * yet, we'll lose accounting.
2907		 *
2908		 * We never race with the readers in mem_cgroup_sk_alloc(),
2909		 * because when this value change, the code to process it is not
2910		 * patched in yet.
2911		 */
2912		static_branch_inc(&memcg_sockets_enabled_key);
2913		memcg->tcpmem_active = true;
2914	}
2915out:
2916	mutex_unlock(&memcg_limit_mutex);
2917	return ret;
2918}
2919
2920/*
2921 * The user of this function is...
2922 * RES_LIMIT.
2923 */
2924static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
2925				char *buf, size_t nbytes, loff_t off)
2926{
2927	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
2928	unsigned long nr_pages;
2929	int ret;
2930
2931	buf = strstrip(buf);
2932	ret = page_counter_memparse(buf, "-1", &nr_pages);
2933	if (ret)
2934		return ret;
2935
2936	switch (MEMFILE_ATTR(of_cft(of)->private)) {
2937	case RES_LIMIT:
2938		if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
2939			ret = -EINVAL;
2940			break;
2941		}
2942		switch (MEMFILE_TYPE(of_cft(of)->private)) {
2943		case _MEM:
2944			ret = mem_cgroup_resize_limit(memcg, nr_pages, false);
2945			break;
2946		case _MEMSWAP:
2947			ret = mem_cgroup_resize_limit(memcg, nr_pages, true);
2948			break;
2949		case _KMEM:
2950			ret = memcg_update_kmem_limit(memcg, nr_pages);
 
 
 
2951			break;
2952		case _TCP:
2953			ret = memcg_update_tcp_limit(memcg, nr_pages);
2954			break;
2955		}
2956		break;
2957	case RES_SOFT_LIMIT:
2958		memcg->soft_limit = nr_pages;
2959		ret = 0;
2960		break;
2961	}
2962	return ret ?: nbytes;
2963}
2964
2965static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf,
2966				size_t nbytes, loff_t off)
2967{
2968	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
2969	struct page_counter *counter;
2970
2971	switch (MEMFILE_TYPE(of_cft(of)->private)) {
2972	case _MEM:
2973		counter = &memcg->memory;
2974		break;
2975	case _MEMSWAP:
2976		counter = &memcg->memsw;
2977		break;
2978	case _KMEM:
2979		counter = &memcg->kmem;
2980		break;
2981	case _TCP:
2982		counter = &memcg->tcpmem;
2983		break;
2984	default:
2985		BUG();
2986	}
2987
2988	switch (MEMFILE_ATTR(of_cft(of)->private)) {
2989	case RES_MAX_USAGE:
2990		page_counter_reset_watermark(counter);
2991		break;
2992	case RES_FAILCNT:
2993		counter->failcnt = 0;
2994		break;
2995	default:
2996		BUG();
2997	}
2998
2999	return nbytes;
3000}
3001
3002static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css,
3003					struct cftype *cft)
3004{
3005	return mem_cgroup_from_css(css)->move_charge_at_immigrate;
3006}
3007
3008#ifdef CONFIG_MMU
3009static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3010					struct cftype *cft, u64 val)
3011{
3012	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3013
3014	if (val & ~MOVE_MASK)
3015		return -EINVAL;
3016
3017	/*
3018	 * No kind of locking is needed in here, because ->can_attach() will
3019	 * check this value once in the beginning of the process, and then carry
3020	 * on with stale data. This means that changes to this value will only
3021	 * affect task migrations starting after the change.
3022	 */
3023	memcg->move_charge_at_immigrate = val;
3024	return 0;
3025}
3026#else
3027static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3028					struct cftype *cft, u64 val)
3029{
3030	return -ENOSYS;
3031}
3032#endif
3033
3034#ifdef CONFIG_NUMA
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3035static int memcg_numa_stat_show(struct seq_file *m, void *v)
3036{
3037	struct numa_stat {
3038		const char *name;
3039		unsigned int lru_mask;
3040	};
3041
3042	static const struct numa_stat stats[] = {
3043		{ "total", LRU_ALL },
3044		{ "file", LRU_ALL_FILE },
3045		{ "anon", LRU_ALL_ANON },
3046		{ "unevictable", BIT(LRU_UNEVICTABLE) },
3047	};
3048	const struct numa_stat *stat;
3049	int nid;
3050	unsigned long nr;
3051	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
3052
3053	for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3054		nr = mem_cgroup_nr_lru_pages(memcg, stat->lru_mask);
3055		seq_printf(m, "%s=%lu", stat->name, nr);
3056		for_each_node_state(nid, N_MEMORY) {
3057			nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
3058							  stat->lru_mask);
3059			seq_printf(m, " N%d=%lu", nid, nr);
3060		}
3061		seq_putc(m, '\n');
3062	}
3063
3064	for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3065		struct mem_cgroup *iter;
3066
3067		nr = 0;
3068		for_each_mem_cgroup_tree(iter, memcg)
3069			nr += mem_cgroup_nr_lru_pages(iter, stat->lru_mask);
3070		seq_printf(m, "hierarchical_%s=%lu", stat->name, nr);
3071		for_each_node_state(nid, N_MEMORY) {
3072			nr = 0;
3073			for_each_mem_cgroup_tree(iter, memcg)
3074				nr += mem_cgroup_node_nr_lru_pages(
3075					iter, nid, stat->lru_mask);
3076			seq_printf(m, " N%d=%lu", nid, nr);
3077		}
3078		seq_putc(m, '\n');
3079	}
3080
3081	return 0;
3082}
3083#endif /* CONFIG_NUMA */
3084
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3085/* Universal VM events cgroup1 shows, original sort order */
3086unsigned int memcg1_events[] = {
3087	PGPGIN,
3088	PGPGOUT,
3089	PGFAULT,
3090	PGMAJFAULT,
3091};
3092
3093static const char *const memcg1_event_names[] = {
3094	"pgpgin",
3095	"pgpgout",
3096	"pgfault",
3097	"pgmajfault",
3098};
3099
3100static int memcg_stat_show(struct seq_file *m, void *v)
3101{
3102	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
3103	unsigned long memory, memsw;
3104	struct mem_cgroup *mi;
3105	unsigned int i;
3106
3107	BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names) != ARRAY_SIZE(memcg1_stats));
3108	BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS);
3109
3110	for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
3111		if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
3112			continue;
3113		seq_printf(m, "%s %lu\n", memcg1_stat_names[i],
3114			   memcg_page_state(memcg, memcg1_stats[i]) *
3115			   PAGE_SIZE);
3116	}
3117
3118	for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
3119		seq_printf(m, "%s %lu\n", memcg1_event_names[i],
3120			   memcg_sum_events(memcg, memcg1_events[i]));
3121
3122	for (i = 0; i < NR_LRU_LISTS; i++)
3123		seq_printf(m, "%s %lu\n", mem_cgroup_lru_names[i],
3124			   mem_cgroup_nr_lru_pages(memcg, BIT(i)) * PAGE_SIZE);
 
3125
3126	/* Hierarchical information */
3127	memory = memsw = PAGE_COUNTER_MAX;
3128	for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) {
3129		memory = min(memory, mi->memory.limit);
3130		memsw = min(memsw, mi->memsw.limit);
3131	}
3132	seq_printf(m, "hierarchical_memory_limit %llu\n",
3133		   (u64)memory * PAGE_SIZE);
3134	if (do_memsw_account())
3135		seq_printf(m, "hierarchical_memsw_limit %llu\n",
3136			   (u64)memsw * PAGE_SIZE);
3137
3138	for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
3139		unsigned long long val = 0;
3140
3141		if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
3142			continue;
3143		for_each_mem_cgroup_tree(mi, memcg)
3144			val += memcg_page_state(mi, memcg1_stats[i]) *
3145			PAGE_SIZE;
3146		seq_printf(m, "total_%s %llu\n", memcg1_stat_names[i], val);
3147	}
3148
3149	for (i = 0; i < ARRAY_SIZE(memcg1_events); i++) {
3150		unsigned long long val = 0;
3151
3152		for_each_mem_cgroup_tree(mi, memcg)
3153			val += memcg_sum_events(mi, memcg1_events[i]);
3154		seq_printf(m, "total_%s %llu\n", memcg1_event_names[i], val);
3155	}
3156
3157	for (i = 0; i < NR_LRU_LISTS; i++) {
3158		unsigned long long val = 0;
 
3159
3160		for_each_mem_cgroup_tree(mi, memcg)
3161			val += mem_cgroup_nr_lru_pages(mi, BIT(i)) * PAGE_SIZE;
3162		seq_printf(m, "total_%s %llu\n", mem_cgroup_lru_names[i], val);
3163	}
3164
3165#ifdef CONFIG_DEBUG_VM
3166	{
3167		pg_data_t *pgdat;
3168		struct mem_cgroup_per_node *mz;
3169		struct zone_reclaim_stat *rstat;
3170		unsigned long recent_rotated[2] = {0, 0};
3171		unsigned long recent_scanned[2] = {0, 0};
3172
3173		for_each_online_pgdat(pgdat) {
3174			mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
3175			rstat = &mz->lruvec.reclaim_stat;
3176
3177			recent_rotated[0] += rstat->recent_rotated[0];
3178			recent_rotated[1] += rstat->recent_rotated[1];
3179			recent_scanned[0] += rstat->recent_scanned[0];
3180			recent_scanned[1] += rstat->recent_scanned[1];
3181		}
3182		seq_printf(m, "recent_rotated_anon %lu\n", recent_rotated[0]);
3183		seq_printf(m, "recent_rotated_file %lu\n", recent_rotated[1]);
3184		seq_printf(m, "recent_scanned_anon %lu\n", recent_scanned[0]);
3185		seq_printf(m, "recent_scanned_file %lu\n", recent_scanned[1]);
3186	}
3187#endif
3188
3189	return 0;
3190}
3191
3192static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css,
3193				      struct cftype *cft)
3194{
3195	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3196
3197	return mem_cgroup_swappiness(memcg);
3198}
3199
3200static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
3201				       struct cftype *cft, u64 val)
3202{
3203	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3204
3205	if (val > 100)
3206		return -EINVAL;
3207
3208	if (css->parent)
3209		memcg->swappiness = val;
3210	else
3211		vm_swappiness = val;
3212
3213	return 0;
3214}
3215
3216static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
3217{
3218	struct mem_cgroup_threshold_ary *t;
3219	unsigned long usage;
3220	int i;
3221
3222	rcu_read_lock();
3223	if (!swap)
3224		t = rcu_dereference(memcg->thresholds.primary);
3225	else
3226		t = rcu_dereference(memcg->memsw_thresholds.primary);
3227
3228	if (!t)
3229		goto unlock;
3230
3231	usage = mem_cgroup_usage(memcg, swap);
3232
3233	/*
3234	 * current_threshold points to threshold just below or equal to usage.
3235	 * If it's not true, a threshold was crossed after last
3236	 * call of __mem_cgroup_threshold().
3237	 */
3238	i = t->current_threshold;
3239
3240	/*
3241	 * Iterate backward over array of thresholds starting from
3242	 * current_threshold and check if a threshold is crossed.
3243	 * If none of thresholds below usage is crossed, we read
3244	 * only one element of the array here.
3245	 */
3246	for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
3247		eventfd_signal(t->entries[i].eventfd, 1);
3248
3249	/* i = current_threshold + 1 */
3250	i++;
3251
3252	/*
3253	 * Iterate forward over array of thresholds starting from
3254	 * current_threshold+1 and check if a threshold is crossed.
3255	 * If none of thresholds above usage is crossed, we read
3256	 * only one element of the array here.
3257	 */
3258	for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
3259		eventfd_signal(t->entries[i].eventfd, 1);
3260
3261	/* Update current_threshold */
3262	t->current_threshold = i - 1;
3263unlock:
3264	rcu_read_unlock();
3265}
3266
3267static void mem_cgroup_threshold(struct mem_cgroup *memcg)
3268{
3269	while (memcg) {
3270		__mem_cgroup_threshold(memcg, false);
3271		if (do_memsw_account())
3272			__mem_cgroup_threshold(memcg, true);
3273
3274		memcg = parent_mem_cgroup(memcg);
3275	}
3276}
3277
3278static int compare_thresholds(const void *a, const void *b)
3279{
3280	const struct mem_cgroup_threshold *_a = a;
3281	const struct mem_cgroup_threshold *_b = b;
3282
3283	if (_a->threshold > _b->threshold)
3284		return 1;
3285
3286	if (_a->threshold < _b->threshold)
3287		return -1;
3288
3289	return 0;
3290}
3291
3292static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
3293{
3294	struct mem_cgroup_eventfd_list *ev;
3295
3296	spin_lock(&memcg_oom_lock);
3297
3298	list_for_each_entry(ev, &memcg->oom_notify, list)
3299		eventfd_signal(ev->eventfd, 1);
3300
3301	spin_unlock(&memcg_oom_lock);
3302	return 0;
3303}
3304
3305static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
3306{
3307	struct mem_cgroup *iter;
3308
3309	for_each_mem_cgroup_tree(iter, memcg)
3310		mem_cgroup_oom_notify_cb(iter);
3311}
3312
3313static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
3314	struct eventfd_ctx *eventfd, const char *args, enum res_type type)
3315{
3316	struct mem_cgroup_thresholds *thresholds;
3317	struct mem_cgroup_threshold_ary *new;
3318	unsigned long threshold;
3319	unsigned long usage;
3320	int i, size, ret;
3321
3322	ret = page_counter_memparse(args, "-1", &threshold);
3323	if (ret)
3324		return ret;
3325
3326	mutex_lock(&memcg->thresholds_lock);
3327
3328	if (type == _MEM) {
3329		thresholds = &memcg->thresholds;
3330		usage = mem_cgroup_usage(memcg, false);
3331	} else if (type == _MEMSWAP) {
3332		thresholds = &memcg->memsw_thresholds;
3333		usage = mem_cgroup_usage(memcg, true);
3334	} else
3335		BUG();
3336
3337	/* Check if a threshold crossed before adding a new one */
3338	if (thresholds->primary)
3339		__mem_cgroup_threshold(memcg, type == _MEMSWAP);
3340
3341	size = thresholds->primary ? thresholds->primary->size + 1 : 1;
3342
3343	/* Allocate memory for new array of thresholds */
3344	new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold),
3345			GFP_KERNEL);
3346	if (!new) {
3347		ret = -ENOMEM;
3348		goto unlock;
3349	}
3350	new->size = size;
3351
3352	/* Copy thresholds (if any) to new array */
3353	if (thresholds->primary) {
3354		memcpy(new->entries, thresholds->primary->entries, (size - 1) *
3355				sizeof(struct mem_cgroup_threshold));
3356	}
3357
3358	/* Add new threshold */
3359	new->entries[size - 1].eventfd = eventfd;
3360	new->entries[size - 1].threshold = threshold;
3361
3362	/* Sort thresholds. Registering of new threshold isn't time-critical */
3363	sort(new->entries, size, sizeof(struct mem_cgroup_threshold),
3364			compare_thresholds, NULL);
3365
3366	/* Find current threshold */
3367	new->current_threshold = -1;
3368	for (i = 0; i < size; i++) {
3369		if (new->entries[i].threshold <= usage) {
3370			/*
3371			 * new->current_threshold will not be used until
3372			 * rcu_assign_pointer(), so it's safe to increment
3373			 * it here.
3374			 */
3375			++new->current_threshold;
3376		} else
3377			break;
3378	}
3379
3380	/* Free old spare buffer and save old primary buffer as spare */
3381	kfree(thresholds->spare);
3382	thresholds->spare = thresholds->primary;
3383
3384	rcu_assign_pointer(thresholds->primary, new);
3385
3386	/* To be sure that nobody uses thresholds */
3387	synchronize_rcu();
3388
3389unlock:
3390	mutex_unlock(&memcg->thresholds_lock);
3391
3392	return ret;
3393}
3394
3395static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
3396	struct eventfd_ctx *eventfd, const char *args)
3397{
3398	return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM);
3399}
3400
3401static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg,
3402	struct eventfd_ctx *eventfd, const char *args)
3403{
3404	return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP);
3405}
3406
3407static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
3408	struct eventfd_ctx *eventfd, enum res_type type)
3409{
3410	struct mem_cgroup_thresholds *thresholds;
3411	struct mem_cgroup_threshold_ary *new;
3412	unsigned long usage;
3413	int i, j, size;
3414
3415	mutex_lock(&memcg->thresholds_lock);
3416
3417	if (type == _MEM) {
3418		thresholds = &memcg->thresholds;
3419		usage = mem_cgroup_usage(memcg, false);
3420	} else if (type == _MEMSWAP) {
3421		thresholds = &memcg->memsw_thresholds;
3422		usage = mem_cgroup_usage(memcg, true);
3423	} else
3424		BUG();
3425
3426	if (!thresholds->primary)
3427		goto unlock;
3428
3429	/* Check if a threshold crossed before removing */
3430	__mem_cgroup_threshold(memcg, type == _MEMSWAP);
3431
3432	/* Calculate new number of threshold */
3433	size = 0;
3434	for (i = 0; i < thresholds->primary->size; i++) {
3435		if (thresholds->primary->entries[i].eventfd != eventfd)
3436			size++;
3437	}
3438
3439	new = thresholds->spare;
3440
3441	/* Set thresholds array to NULL if we don't have thresholds */
3442	if (!size) {
3443		kfree(new);
3444		new = NULL;
3445		goto swap_buffers;
3446	}
3447
3448	new->size = size;
3449
3450	/* Copy thresholds and find current threshold */
3451	new->current_threshold = -1;
3452	for (i = 0, j = 0; i < thresholds->primary->size; i++) {
3453		if (thresholds->primary->entries[i].eventfd == eventfd)
3454			continue;
3455
3456		new->entries[j] = thresholds->primary->entries[i];
3457		if (new->entries[j].threshold <= usage) {
3458			/*
3459			 * new->current_threshold will not be used
3460			 * until rcu_assign_pointer(), so it's safe to increment
3461			 * it here.
3462			 */
3463			++new->current_threshold;
3464		}
3465		j++;
3466	}
3467
3468swap_buffers:
3469	/* Swap primary and spare array */
3470	thresholds->spare = thresholds->primary;
3471
3472	rcu_assign_pointer(thresholds->primary, new);
3473
3474	/* To be sure that nobody uses thresholds */
3475	synchronize_rcu();
3476
3477	/* If all events are unregistered, free the spare array */
3478	if (!new) {
3479		kfree(thresholds->spare);
3480		thresholds->spare = NULL;
3481	}
3482unlock:
3483	mutex_unlock(&memcg->thresholds_lock);
3484}
3485
3486static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
3487	struct eventfd_ctx *eventfd)
3488{
3489	return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM);
3490}
3491
3492static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
3493	struct eventfd_ctx *eventfd)
3494{
3495	return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP);
3496}
3497
3498static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg,
3499	struct eventfd_ctx *eventfd, const char *args)
3500{
3501	struct mem_cgroup_eventfd_list *event;
3502
3503	event = kmalloc(sizeof(*event),	GFP_KERNEL);
3504	if (!event)
3505		return -ENOMEM;
3506
3507	spin_lock(&memcg_oom_lock);
3508
3509	event->eventfd = eventfd;
3510	list_add(&event->list, &memcg->oom_notify);
3511
3512	/* already in OOM ? */
3513	if (memcg->under_oom)
3514		eventfd_signal(eventfd, 1);
3515	spin_unlock(&memcg_oom_lock);
3516
3517	return 0;
3518}
3519
3520static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg,
3521	struct eventfd_ctx *eventfd)
3522{
3523	struct mem_cgroup_eventfd_list *ev, *tmp;
3524
3525	spin_lock(&memcg_oom_lock);
3526
3527	list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
3528		if (ev->eventfd == eventfd) {
3529			list_del(&ev->list);
3530			kfree(ev);
3531		}
3532	}
3533
3534	spin_unlock(&memcg_oom_lock);
3535}
3536
3537static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v)
3538{
3539	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf));
3540
3541	seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable);
3542	seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom);
3543	seq_printf(sf, "oom_kill %lu\n", memcg_sum_events(memcg, OOM_KILL));
 
3544	return 0;
3545}
3546
3547static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
3548	struct cftype *cft, u64 val)
3549{
3550	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3551
3552	/* cannot set to root cgroup and only 0 and 1 are allowed */
3553	if (!css->parent || !((val == 0) || (val == 1)))
3554		return -EINVAL;
3555
3556	memcg->oom_kill_disable = val;
3557	if (!val)
3558		memcg_oom_recover(memcg);
3559
3560	return 0;
3561}
3562
3563#ifdef CONFIG_CGROUP_WRITEBACK
3564
3565struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg)
3566{
3567	return &memcg->cgwb_list;
3568}
3569
3570static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
3571{
3572	return wb_domain_init(&memcg->cgwb_domain, gfp);
3573}
3574
3575static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
3576{
3577	wb_domain_exit(&memcg->cgwb_domain);
3578}
3579
3580static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
3581{
3582	wb_domain_size_changed(&memcg->cgwb_domain);
3583}
3584
3585struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
3586{
3587	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3588
3589	if (!memcg->css.parent)
3590		return NULL;
3591
3592	return &memcg->cgwb_domain;
3593}
3594
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3595/**
3596 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
3597 * @wb: bdi_writeback in question
3598 * @pfilepages: out parameter for number of file pages
3599 * @pheadroom: out parameter for number of allocatable pages according to memcg
3600 * @pdirty: out parameter for number of dirty pages
3601 * @pwriteback: out parameter for number of pages under writeback
3602 *
3603 * Determine the numbers of file, headroom, dirty, and writeback pages in
3604 * @wb's memcg.  File, dirty and writeback are self-explanatory.  Headroom
3605 * is a bit more involved.
3606 *
3607 * A memcg's headroom is "min(max, high) - used".  In the hierarchy, the
3608 * headroom is calculated as the lowest headroom of itself and the
3609 * ancestors.  Note that this doesn't consider the actual amount of
3610 * available memory in the system.  The caller should further cap
3611 * *@pheadroom accordingly.
3612 */
3613void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
3614			 unsigned long *pheadroom, unsigned long *pdirty,
3615			 unsigned long *pwriteback)
3616{
3617	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3618	struct mem_cgroup *parent;
3619
3620	*pdirty = memcg_page_state(memcg, NR_FILE_DIRTY);
3621
3622	/* this should eventually include NR_UNSTABLE_NFS */
3623	*pwriteback = memcg_page_state(memcg, NR_WRITEBACK);
3624	*pfilepages = mem_cgroup_nr_lru_pages(memcg, (1 << LRU_INACTIVE_FILE) |
3625						     (1 << LRU_ACTIVE_FILE));
3626	*pheadroom = PAGE_COUNTER_MAX;
3627
3628	while ((parent = parent_mem_cgroup(memcg))) {
3629		unsigned long ceiling = min(memcg->memory.limit, memcg->high);
3630		unsigned long used = page_counter_read(&memcg->memory);
3631
3632		*pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
3633		memcg = parent;
3634	}
3635}
3636
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3637#else	/* CONFIG_CGROUP_WRITEBACK */
3638
3639static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
3640{
3641	return 0;
3642}
3643
3644static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
3645{
3646}
3647
3648static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
3649{
3650}
3651
3652#endif	/* CONFIG_CGROUP_WRITEBACK */
3653
3654/*
3655 * DO NOT USE IN NEW FILES.
3656 *
3657 * "cgroup.event_control" implementation.
3658 *
3659 * This is way over-engineered.  It tries to support fully configurable
3660 * events for each user.  Such level of flexibility is completely
3661 * unnecessary especially in the light of the planned unified hierarchy.
3662 *
3663 * Please deprecate this and replace with something simpler if at all
3664 * possible.
3665 */
3666
3667/*
3668 * Unregister event and free resources.
3669 *
3670 * Gets called from workqueue.
3671 */
3672static void memcg_event_remove(struct work_struct *work)
3673{
3674	struct mem_cgroup_event *event =
3675		container_of(work, struct mem_cgroup_event, remove);
3676	struct mem_cgroup *memcg = event->memcg;
3677
3678	remove_wait_queue(event->wqh, &event->wait);
3679
3680	event->unregister_event(memcg, event->eventfd);
3681
3682	/* Notify userspace the event is going away. */
3683	eventfd_signal(event->eventfd, 1);
3684
3685	eventfd_ctx_put(event->eventfd);
3686	kfree(event);
3687	css_put(&memcg->css);
3688}
3689
3690/*
3691 * Gets called on EPOLLHUP on eventfd when user closes it.
3692 *
3693 * Called with wqh->lock held and interrupts disabled.
3694 */
3695static int memcg_event_wake(wait_queue_entry_t *wait, unsigned mode,
3696			    int sync, void *key)
3697{
3698	struct mem_cgroup_event *event =
3699		container_of(wait, struct mem_cgroup_event, wait);
3700	struct mem_cgroup *memcg = event->memcg;
3701	__poll_t flags = key_to_poll(key);
3702
3703	if (flags & EPOLLHUP) {
3704		/*
3705		 * If the event has been detached at cgroup removal, we
3706		 * can simply return knowing the other side will cleanup
3707		 * for us.
3708		 *
3709		 * We can't race against event freeing since the other
3710		 * side will require wqh->lock via remove_wait_queue(),
3711		 * which we hold.
3712		 */
3713		spin_lock(&memcg->event_list_lock);
3714		if (!list_empty(&event->list)) {
3715			list_del_init(&event->list);
3716			/*
3717			 * We are in atomic context, but cgroup_event_remove()
3718			 * may sleep, so we have to call it in workqueue.
3719			 */
3720			schedule_work(&event->remove);
3721		}
3722		spin_unlock(&memcg->event_list_lock);
3723	}
3724
3725	return 0;
3726}
3727
3728static void memcg_event_ptable_queue_proc(struct file *file,
3729		wait_queue_head_t *wqh, poll_table *pt)
3730{
3731	struct mem_cgroup_event *event =
3732		container_of(pt, struct mem_cgroup_event, pt);
3733
3734	event->wqh = wqh;
3735	add_wait_queue(wqh, &event->wait);
3736}
3737
3738/*
3739 * DO NOT USE IN NEW FILES.
3740 *
3741 * Parse input and register new cgroup event handler.
3742 *
3743 * Input must be in format '<event_fd> <control_fd> <args>'.
3744 * Interpretation of args is defined by control file implementation.
3745 */
3746static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
3747					 char *buf, size_t nbytes, loff_t off)
3748{
3749	struct cgroup_subsys_state *css = of_css(of);
3750	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3751	struct mem_cgroup_event *event;
3752	struct cgroup_subsys_state *cfile_css;
3753	unsigned int efd, cfd;
3754	struct fd efile;
3755	struct fd cfile;
3756	const char *name;
3757	char *endp;
3758	int ret;
3759
3760	buf = strstrip(buf);
3761
3762	efd = simple_strtoul(buf, &endp, 10);
3763	if (*endp != ' ')
3764		return -EINVAL;
3765	buf = endp + 1;
3766
3767	cfd = simple_strtoul(buf, &endp, 10);
3768	if ((*endp != ' ') && (*endp != '\0'))
3769		return -EINVAL;
3770	buf = endp + 1;
3771
3772	event = kzalloc(sizeof(*event), GFP_KERNEL);
3773	if (!event)
3774		return -ENOMEM;
3775
3776	event->memcg = memcg;
3777	INIT_LIST_HEAD(&event->list);
3778	init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc);
3779	init_waitqueue_func_entry(&event->wait, memcg_event_wake);
3780	INIT_WORK(&event->remove, memcg_event_remove);
3781
3782	efile = fdget(efd);
3783	if (!efile.file) {
3784		ret = -EBADF;
3785		goto out_kfree;
3786	}
3787
3788	event->eventfd = eventfd_ctx_fileget(efile.file);
3789	if (IS_ERR(event->eventfd)) {
3790		ret = PTR_ERR(event->eventfd);
3791		goto out_put_efile;
3792	}
3793
3794	cfile = fdget(cfd);
3795	if (!cfile.file) {
3796		ret = -EBADF;
3797		goto out_put_eventfd;
3798	}
3799
3800	/* the process need read permission on control file */
3801	/* AV: shouldn't we check that it's been opened for read instead? */
3802	ret = inode_permission(file_inode(cfile.file), MAY_READ);
3803	if (ret < 0)
3804		goto out_put_cfile;
3805
3806	/*
3807	 * Determine the event callbacks and set them in @event.  This used
3808	 * to be done via struct cftype but cgroup core no longer knows
3809	 * about these events.  The following is crude but the whole thing
3810	 * is for compatibility anyway.
3811	 *
3812	 * DO NOT ADD NEW FILES.
3813	 */
3814	name = cfile.file->f_path.dentry->d_name.name;
3815
3816	if (!strcmp(name, "memory.usage_in_bytes")) {
3817		event->register_event = mem_cgroup_usage_register_event;
3818		event->unregister_event = mem_cgroup_usage_unregister_event;
3819	} else if (!strcmp(name, "memory.oom_control")) {
3820		event->register_event = mem_cgroup_oom_register_event;
3821		event->unregister_event = mem_cgroup_oom_unregister_event;
3822	} else if (!strcmp(name, "memory.pressure_level")) {
3823		event->register_event = vmpressure_register_event;
3824		event->unregister_event = vmpressure_unregister_event;
3825	} else if (!strcmp(name, "memory.memsw.usage_in_bytes")) {
3826		event->register_event = memsw_cgroup_usage_register_event;
3827		event->unregister_event = memsw_cgroup_usage_unregister_event;
3828	} else {
3829		ret = -EINVAL;
3830		goto out_put_cfile;
3831	}
3832
3833	/*
3834	 * Verify @cfile should belong to @css.  Also, remaining events are
3835	 * automatically removed on cgroup destruction but the removal is
3836	 * asynchronous, so take an extra ref on @css.
3837	 */
3838	cfile_css = css_tryget_online_from_dir(cfile.file->f_path.dentry->d_parent,
3839					       &memory_cgrp_subsys);
3840	ret = -EINVAL;
3841	if (IS_ERR(cfile_css))
3842		goto out_put_cfile;
3843	if (cfile_css != css) {
3844		css_put(cfile_css);
3845		goto out_put_cfile;
3846	}
3847
3848	ret = event->register_event(memcg, event->eventfd, buf);
3849	if (ret)
3850		goto out_put_css;
3851
3852	efile.file->f_op->poll(efile.file, &event->pt);
3853
3854	spin_lock(&memcg->event_list_lock);
3855	list_add(&event->list, &memcg->event_list);
3856	spin_unlock(&memcg->event_list_lock);
3857
3858	fdput(cfile);
3859	fdput(efile);
3860
3861	return nbytes;
3862
3863out_put_css:
3864	css_put(css);
3865out_put_cfile:
3866	fdput(cfile);
3867out_put_eventfd:
3868	eventfd_ctx_put(event->eventfd);
3869out_put_efile:
3870	fdput(efile);
3871out_kfree:
3872	kfree(event);
3873
3874	return ret;
3875}
3876
3877static struct cftype mem_cgroup_legacy_files[] = {
3878	{
3879		.name = "usage_in_bytes",
3880		.private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
3881		.read_u64 = mem_cgroup_read_u64,
3882	},
3883	{
3884		.name = "max_usage_in_bytes",
3885		.private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
3886		.write = mem_cgroup_reset,
3887		.read_u64 = mem_cgroup_read_u64,
3888	},
3889	{
3890		.name = "limit_in_bytes",
3891		.private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
3892		.write = mem_cgroup_write,
3893		.read_u64 = mem_cgroup_read_u64,
3894	},
3895	{
3896		.name = "soft_limit_in_bytes",
3897		.private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
3898		.write = mem_cgroup_write,
3899		.read_u64 = mem_cgroup_read_u64,
3900	},
3901	{
3902		.name = "failcnt",
3903		.private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
3904		.write = mem_cgroup_reset,
3905		.read_u64 = mem_cgroup_read_u64,
3906	},
3907	{
3908		.name = "stat",
3909		.seq_show = memcg_stat_show,
3910	},
3911	{
3912		.name = "force_empty",
3913		.write = mem_cgroup_force_empty_write,
3914	},
3915	{
3916		.name = "use_hierarchy",
3917		.write_u64 = mem_cgroup_hierarchy_write,
3918		.read_u64 = mem_cgroup_hierarchy_read,
3919	},
3920	{
3921		.name = "cgroup.event_control",		/* XXX: for compat */
3922		.write = memcg_write_event_control,
3923		.flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE,
3924	},
3925	{
3926		.name = "swappiness",
3927		.read_u64 = mem_cgroup_swappiness_read,
3928		.write_u64 = mem_cgroup_swappiness_write,
3929	},
3930	{
3931		.name = "move_charge_at_immigrate",
3932		.read_u64 = mem_cgroup_move_charge_read,
3933		.write_u64 = mem_cgroup_move_charge_write,
3934	},
3935	{
3936		.name = "oom_control",
3937		.seq_show = mem_cgroup_oom_control_read,
3938		.write_u64 = mem_cgroup_oom_control_write,
3939		.private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
3940	},
3941	{
3942		.name = "pressure_level",
3943	},
3944#ifdef CONFIG_NUMA
3945	{
3946		.name = "numa_stat",
3947		.seq_show = memcg_numa_stat_show,
3948	},
3949#endif
3950	{
3951		.name = "kmem.limit_in_bytes",
3952		.private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
3953		.write = mem_cgroup_write,
3954		.read_u64 = mem_cgroup_read_u64,
3955	},
3956	{
3957		.name = "kmem.usage_in_bytes",
3958		.private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
3959		.read_u64 = mem_cgroup_read_u64,
3960	},
3961	{
3962		.name = "kmem.failcnt",
3963		.private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT),
3964		.write = mem_cgroup_reset,
3965		.read_u64 = mem_cgroup_read_u64,
3966	},
3967	{
3968		.name = "kmem.max_usage_in_bytes",
3969		.private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE),
3970		.write = mem_cgroup_reset,
3971		.read_u64 = mem_cgroup_read_u64,
3972	},
3973#if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
3974	{
3975		.name = "kmem.slabinfo",
3976		.seq_start = memcg_slab_start,
3977		.seq_next = memcg_slab_next,
3978		.seq_stop = memcg_slab_stop,
3979		.seq_show = memcg_slab_show,
3980	},
3981#endif
3982	{
3983		.name = "kmem.tcp.limit_in_bytes",
3984		.private = MEMFILE_PRIVATE(_TCP, RES_LIMIT),
3985		.write = mem_cgroup_write,
3986		.read_u64 = mem_cgroup_read_u64,
3987	},
3988	{
3989		.name = "kmem.tcp.usage_in_bytes",
3990		.private = MEMFILE_PRIVATE(_TCP, RES_USAGE),
3991		.read_u64 = mem_cgroup_read_u64,
3992	},
3993	{
3994		.name = "kmem.tcp.failcnt",
3995		.private = MEMFILE_PRIVATE(_TCP, RES_FAILCNT),
3996		.write = mem_cgroup_reset,
3997		.read_u64 = mem_cgroup_read_u64,
3998	},
3999	{
4000		.name = "kmem.tcp.max_usage_in_bytes",
4001		.private = MEMFILE_PRIVATE(_TCP, RES_MAX_USAGE),
4002		.write = mem_cgroup_reset,
4003		.read_u64 = mem_cgroup_read_u64,
4004	},
4005	{ },	/* terminate */
4006};
4007
4008/*
4009 * Private memory cgroup IDR
4010 *
4011 * Swap-out records and page cache shadow entries need to store memcg
4012 * references in constrained space, so we maintain an ID space that is
4013 * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of
4014 * memory-controlled cgroups to 64k.
4015 *
4016 * However, there usually are many references to the oflline CSS after
4017 * the cgroup has been destroyed, such as page cache or reclaimable
4018 * slab objects, that don't need to hang on to the ID. We want to keep
4019 * those dead CSS from occupying IDs, or we might quickly exhaust the
4020 * relatively small ID space and prevent the creation of new cgroups
4021 * even when there are much fewer than 64k cgroups - possibly none.
4022 *
4023 * Maintain a private 16-bit ID space for memcg, and allow the ID to
4024 * be freed and recycled when it's no longer needed, which is usually
4025 * when the CSS is offlined.
4026 *
4027 * The only exception to that are records of swapped out tmpfs/shmem
4028 * pages that need to be attributed to live ancestors on swapin. But
4029 * those references are manageable from userspace.
4030 */
4031
4032static DEFINE_IDR(mem_cgroup_idr);
4033
 
 
 
 
 
 
 
 
4034static void mem_cgroup_id_get_many(struct mem_cgroup *memcg, unsigned int n)
4035{
4036	VM_BUG_ON(atomic_read(&memcg->id.ref) <= 0);
4037	atomic_add(n, &memcg->id.ref);
4038}
4039
4040static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
4041{
4042	VM_BUG_ON(atomic_read(&memcg->id.ref) < n);
4043	if (atomic_sub_and_test(n, &memcg->id.ref)) {
4044		idr_remove(&mem_cgroup_idr, memcg->id.id);
4045		memcg->id.id = 0;
4046
4047		/* Memcg ID pins CSS */
4048		css_put(&memcg->css);
4049	}
4050}
4051
4052static inline void mem_cgroup_id_get(struct mem_cgroup *memcg)
4053{
4054	mem_cgroup_id_get_many(memcg, 1);
4055}
4056
4057static inline void mem_cgroup_id_put(struct mem_cgroup *memcg)
4058{
4059	mem_cgroup_id_put_many(memcg, 1);
4060}
4061
4062/**
4063 * mem_cgroup_from_id - look up a memcg from a memcg id
4064 * @id: the memcg id to look up
4065 *
4066 * Caller must hold rcu_read_lock().
4067 */
4068struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
4069{
4070	WARN_ON_ONCE(!rcu_read_lock_held());
4071	return idr_find(&mem_cgroup_idr, id);
4072}
4073
4074static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
4075{
4076	struct mem_cgroup_per_node *pn;
4077	int tmp = node;
4078	/*
4079	 * This routine is called against possible nodes.
4080	 * But it's BUG to call kmalloc() against offline node.
4081	 *
4082	 * TODO: this routine can waste much memory for nodes which will
4083	 *       never be onlined. It's better to use memory hotplug callback
4084	 *       function.
4085	 */
4086	if (!node_state(node, N_NORMAL_MEMORY))
4087		tmp = -1;
4088	pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
4089	if (!pn)
4090		return 1;
4091
 
 
 
 
 
 
4092	pn->lruvec_stat_cpu = alloc_percpu(struct lruvec_stat);
4093	if (!pn->lruvec_stat_cpu) {
 
4094		kfree(pn);
4095		return 1;
4096	}
4097
4098	lruvec_init(&pn->lruvec);
4099	pn->usage_in_excess = 0;
4100	pn->on_tree = false;
4101	pn->memcg = memcg;
4102
4103	memcg->nodeinfo[node] = pn;
4104	return 0;
4105}
4106
4107static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
4108{
4109	struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
4110
4111	if (!pn)
4112		return;
4113
4114	free_percpu(pn->lruvec_stat_cpu);
 
4115	kfree(pn);
4116}
4117
4118static void __mem_cgroup_free(struct mem_cgroup *memcg)
4119{
4120	int node;
4121
4122	for_each_node(node)
4123		free_mem_cgroup_per_node_info(memcg, node);
4124	free_percpu(memcg->stat_cpu);
 
4125	kfree(memcg);
4126}
4127
4128static void mem_cgroup_free(struct mem_cgroup *memcg)
4129{
4130	memcg_wb_domain_exit(memcg);
 
 
 
 
 
 
4131	__mem_cgroup_free(memcg);
4132}
4133
4134static struct mem_cgroup *mem_cgroup_alloc(void)
4135{
4136	struct mem_cgroup *memcg;
4137	size_t size;
4138	int node;
 
4139
4140	size = sizeof(struct mem_cgroup);
4141	size += nr_node_ids * sizeof(struct mem_cgroup_per_node *);
4142
4143	memcg = kzalloc(size, GFP_KERNEL);
4144	if (!memcg)
4145		return NULL;
4146
4147	memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL,
4148				 1, MEM_CGROUP_ID_MAX,
4149				 GFP_KERNEL);
4150	if (memcg->id.id < 0)
4151		goto fail;
4152
4153	memcg->stat_cpu = alloc_percpu(struct mem_cgroup_stat_cpu);
4154	if (!memcg->stat_cpu)
 
 
 
 
4155		goto fail;
4156
4157	for_each_node(node)
4158		if (alloc_mem_cgroup_per_node_info(memcg, node))
4159			goto fail;
4160
4161	if (memcg_wb_domain_init(memcg, GFP_KERNEL))
4162		goto fail;
4163
4164	INIT_WORK(&memcg->high_work, high_work_func);
4165	memcg->last_scanned_node = MAX_NUMNODES;
4166	INIT_LIST_HEAD(&memcg->oom_notify);
4167	mutex_init(&memcg->thresholds_lock);
4168	spin_lock_init(&memcg->move_lock);
4169	vmpressure_init(&memcg->vmpressure);
4170	INIT_LIST_HEAD(&memcg->event_list);
4171	spin_lock_init(&memcg->event_list_lock);
4172	memcg->socket_pressure = jiffies;
4173#ifndef CONFIG_SLOB
4174	memcg->kmemcg_id = -1;
4175#endif
4176#ifdef CONFIG_CGROUP_WRITEBACK
4177	INIT_LIST_HEAD(&memcg->cgwb_list);
 
 
 
 
 
 
 
 
4178#endif
4179	idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
4180	return memcg;
4181fail:
4182	if (memcg->id.id > 0)
4183		idr_remove(&mem_cgroup_idr, memcg->id.id);
4184	__mem_cgroup_free(memcg);
4185	return NULL;
4186}
4187
4188static struct cgroup_subsys_state * __ref
4189mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
4190{
4191	struct mem_cgroup *parent = mem_cgroup_from_css(parent_css);
4192	struct mem_cgroup *memcg;
4193	long error = -ENOMEM;
4194
4195	memcg = mem_cgroup_alloc();
4196	if (!memcg)
4197		return ERR_PTR(error);
4198
4199	memcg->high = PAGE_COUNTER_MAX;
4200	memcg->soft_limit = PAGE_COUNTER_MAX;
4201	if (parent) {
4202		memcg->swappiness = mem_cgroup_swappiness(parent);
4203		memcg->oom_kill_disable = parent->oom_kill_disable;
4204	}
4205	if (parent && parent->use_hierarchy) {
4206		memcg->use_hierarchy = true;
4207		page_counter_init(&memcg->memory, &parent->memory);
4208		page_counter_init(&memcg->swap, &parent->swap);
4209		page_counter_init(&memcg->memsw, &parent->memsw);
4210		page_counter_init(&memcg->kmem, &parent->kmem);
4211		page_counter_init(&memcg->tcpmem, &parent->tcpmem);
4212	} else {
4213		page_counter_init(&memcg->memory, NULL);
4214		page_counter_init(&memcg->swap, NULL);
4215		page_counter_init(&memcg->memsw, NULL);
4216		page_counter_init(&memcg->kmem, NULL);
4217		page_counter_init(&memcg->tcpmem, NULL);
4218		/*
4219		 * Deeper hierachy with use_hierarchy == false doesn't make
4220		 * much sense so let cgroup subsystem know about this
4221		 * unfortunate state in our controller.
4222		 */
4223		if (parent != root_mem_cgroup)
4224			memory_cgrp_subsys.broken_hierarchy = true;
4225	}
4226
4227	/* The following stuff does not apply to the root */
4228	if (!parent) {
 
 
 
4229		root_mem_cgroup = memcg;
4230		return &memcg->css;
4231	}
4232
4233	error = memcg_online_kmem(memcg);
4234	if (error)
4235		goto fail;
4236
4237	if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
4238		static_branch_inc(&memcg_sockets_enabled_key);
4239
4240	return &memcg->css;
4241fail:
 
4242	mem_cgroup_free(memcg);
4243	return ERR_PTR(-ENOMEM);
4244}
4245
4246static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
4247{
4248	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4249
 
 
 
 
 
 
 
 
 
 
4250	/* Online state pins memcg ID, memcg ID pins CSS */
4251	atomic_set(&memcg->id.ref, 1);
4252	css_get(css);
4253	return 0;
4254}
4255
4256static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
4257{
4258	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4259	struct mem_cgroup_event *event, *tmp;
4260
4261	/*
4262	 * Unregister events and notify userspace.
4263	 * Notify userspace about cgroup removing only after rmdir of cgroup
4264	 * directory to avoid race between userspace and kernelspace.
4265	 */
4266	spin_lock(&memcg->event_list_lock);
4267	list_for_each_entry_safe(event, tmp, &memcg->event_list, list) {
4268		list_del_init(&event->list);
4269		schedule_work(&event->remove);
4270	}
4271	spin_unlock(&memcg->event_list_lock);
4272
4273	memcg->low = 0;
 
4274
4275	memcg_offline_kmem(memcg);
4276	wb_memcg_offline(memcg);
4277
 
 
4278	mem_cgroup_id_put(memcg);
4279}
4280
4281static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
4282{
4283	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4284
4285	invalidate_reclaim_iterators(memcg);
4286}
4287
4288static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
4289{
4290	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
 
4291
 
 
 
 
4292	if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
4293		static_branch_dec(&memcg_sockets_enabled_key);
4294
4295	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active)
4296		static_branch_dec(&memcg_sockets_enabled_key);
4297
4298	vmpressure_cleanup(&memcg->vmpressure);
4299	cancel_work_sync(&memcg->high_work);
4300	mem_cgroup_remove_from_trees(memcg);
 
4301	memcg_free_kmem(memcg);
4302	mem_cgroup_free(memcg);
4303}
4304
4305/**
4306 * mem_cgroup_css_reset - reset the states of a mem_cgroup
4307 * @css: the target css
4308 *
4309 * Reset the states of the mem_cgroup associated with @css.  This is
4310 * invoked when the userland requests disabling on the default hierarchy
4311 * but the memcg is pinned through dependency.  The memcg should stop
4312 * applying policies and should revert to the vanilla state as it may be
4313 * made visible again.
4314 *
4315 * The current implementation only resets the essential configurations.
4316 * This needs to be expanded to cover all the visible parts.
4317 */
4318static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
4319{
4320	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4321
4322	page_counter_limit(&memcg->memory, PAGE_COUNTER_MAX);
4323	page_counter_limit(&memcg->swap, PAGE_COUNTER_MAX);
4324	page_counter_limit(&memcg->memsw, PAGE_COUNTER_MAX);
4325	page_counter_limit(&memcg->kmem, PAGE_COUNTER_MAX);
4326	page_counter_limit(&memcg->tcpmem, PAGE_COUNTER_MAX);
4327	memcg->low = 0;
 
4328	memcg->high = PAGE_COUNTER_MAX;
4329	memcg->soft_limit = PAGE_COUNTER_MAX;
4330	memcg_wb_domain_size_changed(memcg);
4331}
4332
4333#ifdef CONFIG_MMU
4334/* Handlers for move charge at task migration. */
4335static int mem_cgroup_do_precharge(unsigned long count)
4336{
4337	int ret;
4338
4339	/* Try a single bulk charge without reclaim first, kswapd may wake */
4340	ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count);
4341	if (!ret) {
4342		mc.precharge += count;
4343		return ret;
4344	}
4345
4346	/* Try charges one by one with reclaim, but do not retry */
4347	while (count--) {
4348		ret = try_charge(mc.to, GFP_KERNEL | __GFP_NORETRY, 1);
4349		if (ret)
4350			return ret;
4351		mc.precharge++;
4352		cond_resched();
4353	}
4354	return 0;
4355}
4356
4357union mc_target {
4358	struct page	*page;
4359	swp_entry_t	ent;
4360};
4361
4362enum mc_target_type {
4363	MC_TARGET_NONE = 0,
4364	MC_TARGET_PAGE,
4365	MC_TARGET_SWAP,
4366	MC_TARGET_DEVICE,
4367};
4368
4369static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
4370						unsigned long addr, pte_t ptent)
4371{
4372	struct page *page = _vm_normal_page(vma, addr, ptent, true);
4373
4374	if (!page || !page_mapped(page))
4375		return NULL;
4376	if (PageAnon(page)) {
4377		if (!(mc.flags & MOVE_ANON))
4378			return NULL;
4379	} else {
4380		if (!(mc.flags & MOVE_FILE))
4381			return NULL;
4382	}
4383	if (!get_page_unless_zero(page))
4384		return NULL;
4385
4386	return page;
4387}
4388
4389#if defined(CONFIG_SWAP) || defined(CONFIG_DEVICE_PRIVATE)
4390static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
4391			pte_t ptent, swp_entry_t *entry)
4392{
4393	struct page *page = NULL;
4394	swp_entry_t ent = pte_to_swp_entry(ptent);
4395
4396	if (!(mc.flags & MOVE_ANON) || non_swap_entry(ent))
4397		return NULL;
4398
4399	/*
4400	 * Handle MEMORY_DEVICE_PRIVATE which are ZONE_DEVICE page belonging to
4401	 * a device and because they are not accessible by CPU they are store
4402	 * as special swap entry in the CPU page table.
4403	 */
4404	if (is_device_private_entry(ent)) {
4405		page = device_private_entry_to_page(ent);
4406		/*
4407		 * MEMORY_DEVICE_PRIVATE means ZONE_DEVICE page and which have
4408		 * a refcount of 1 when free (unlike normal page)
4409		 */
4410		if (!page_ref_add_unless(page, 1, 1))
4411			return NULL;
4412		return page;
4413	}
4414
4415	/*
4416	 * Because lookup_swap_cache() updates some statistics counter,
4417	 * we call find_get_page() with swapper_space directly.
4418	 */
4419	page = find_get_page(swap_address_space(ent), swp_offset(ent));
4420	if (do_memsw_account())
4421		entry->val = ent.val;
4422
4423	return page;
4424}
4425#else
4426static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
4427			pte_t ptent, swp_entry_t *entry)
4428{
4429	return NULL;
4430}
4431#endif
4432
4433static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
4434			unsigned long addr, pte_t ptent, swp_entry_t *entry)
4435{
4436	struct page *page = NULL;
4437	struct address_space *mapping;
4438	pgoff_t pgoff;
4439
4440	if (!vma->vm_file) /* anonymous vma */
4441		return NULL;
4442	if (!(mc.flags & MOVE_FILE))
4443		return NULL;
4444
4445	mapping = vma->vm_file->f_mapping;
4446	pgoff = linear_page_index(vma, addr);
4447
4448	/* page is moved even if it's not RSS of this task(page-faulted). */
4449#ifdef CONFIG_SWAP
4450	/* shmem/tmpfs may report page out on swap: account for that too. */
4451	if (shmem_mapping(mapping)) {
4452		page = find_get_entry(mapping, pgoff);
4453		if (radix_tree_exceptional_entry(page)) {
4454			swp_entry_t swp = radix_to_swp_entry(page);
4455			if (do_memsw_account())
4456				*entry = swp;
4457			page = find_get_page(swap_address_space(swp),
4458					     swp_offset(swp));
4459		}
4460	} else
4461		page = find_get_page(mapping, pgoff);
4462#else
4463	page = find_get_page(mapping, pgoff);
4464#endif
4465	return page;
4466}
4467
4468/**
4469 * mem_cgroup_move_account - move account of the page
4470 * @page: the page
4471 * @compound: charge the page as compound or small page
4472 * @from: mem_cgroup which the page is moved from.
4473 * @to:	mem_cgroup which the page is moved to. @from != @to.
4474 *
4475 * The caller must make sure the page is not on LRU (isolate_page() is useful.)
4476 *
4477 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
4478 * from old cgroup.
4479 */
4480static int mem_cgroup_move_account(struct page *page,
4481				   bool compound,
4482				   struct mem_cgroup *from,
4483				   struct mem_cgroup *to)
4484{
 
 
4485	unsigned long flags;
4486	unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
4487	int ret;
4488	bool anon;
4489
4490	VM_BUG_ON(from == to);
4491	VM_BUG_ON_PAGE(PageLRU(page), page);
4492	VM_BUG_ON(compound && !PageTransHuge(page));
4493
4494	/*
4495	 * Prevent mem_cgroup_migrate() from looking at
4496	 * page->mem_cgroup of its source page while we change it.
4497	 */
4498	ret = -EBUSY;
4499	if (!trylock_page(page))
4500		goto out;
4501
4502	ret = -EINVAL;
4503	if (page->mem_cgroup != from)
4504		goto out_unlock;
4505
4506	anon = PageAnon(page);
4507
 
 
 
 
4508	spin_lock_irqsave(&from->move_lock, flags);
4509
4510	if (!anon && page_mapped(page)) {
4511		__mod_memcg_state(from, NR_FILE_MAPPED, -nr_pages);
4512		__mod_memcg_state(to, NR_FILE_MAPPED, nr_pages);
4513	}
4514
4515	/*
4516	 * move_lock grabbed above and caller set from->moving_account, so
4517	 * mod_memcg_page_state will serialize updates to PageDirty.
4518	 * So mapping should be stable for dirty pages.
4519	 */
4520	if (!anon && PageDirty(page)) {
4521		struct address_space *mapping = page_mapping(page);
4522
4523		if (mapping_cap_account_dirty(mapping)) {
4524			__mod_memcg_state(from, NR_FILE_DIRTY, -nr_pages);
4525			__mod_memcg_state(to, NR_FILE_DIRTY, nr_pages);
4526		}
4527	}
4528
4529	if (PageWriteback(page)) {
4530		__mod_memcg_state(from, NR_WRITEBACK, -nr_pages);
4531		__mod_memcg_state(to, NR_WRITEBACK, nr_pages);
4532	}
4533
 
 
 
 
 
 
 
 
4534	/*
4535	 * It is safe to change page->mem_cgroup here because the page
4536	 * is referenced, charged, and isolated - we can't race with
4537	 * uncharging, charging, migration, or LRU putback.
4538	 */
4539
4540	/* caller should have done css_get */
4541	page->mem_cgroup = to;
 
 
 
 
 
 
 
 
 
 
 
4542	spin_unlock_irqrestore(&from->move_lock, flags);
4543
4544	ret = 0;
4545
4546	local_irq_disable();
4547	mem_cgroup_charge_statistics(to, page, compound, nr_pages);
4548	memcg_check_events(to, page);
4549	mem_cgroup_charge_statistics(from, page, compound, -nr_pages);
4550	memcg_check_events(from, page);
4551	local_irq_enable();
4552out_unlock:
4553	unlock_page(page);
4554out:
4555	return ret;
4556}
4557
4558/**
4559 * get_mctgt_type - get target type of moving charge
4560 * @vma: the vma the pte to be checked belongs
4561 * @addr: the address corresponding to the pte to be checked
4562 * @ptent: the pte to be checked
4563 * @target: the pointer the target page or swap ent will be stored(can be NULL)
4564 *
4565 * Returns
4566 *   0(MC_TARGET_NONE): if the pte is not a target for move charge.
4567 *   1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
4568 *     move charge. if @target is not NULL, the page is stored in target->page
4569 *     with extra refcnt got(Callers should handle it).
4570 *   2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
4571 *     target for charge migration. if @target is not NULL, the entry is stored
4572 *     in target->ent.
4573 *   3(MC_TARGET_DEVICE): like MC_TARGET_PAGE  but page is MEMORY_DEVICE_PUBLIC
4574 *     or MEMORY_DEVICE_PRIVATE (so ZONE_DEVICE page and thus not on the lru).
4575 *     For now we such page is charge like a regular page would be as for all
4576 *     intent and purposes it is just special memory taking the place of a
4577 *     regular page.
4578 *
4579 *     See Documentations/vm/hmm.txt and include/linux/hmm.h
4580 *
4581 * Called with pte lock held.
4582 */
4583
4584static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
4585		unsigned long addr, pte_t ptent, union mc_target *target)
4586{
4587	struct page *page = NULL;
4588	enum mc_target_type ret = MC_TARGET_NONE;
4589	swp_entry_t ent = { .val = 0 };
4590
4591	if (pte_present(ptent))
4592		page = mc_handle_present_pte(vma, addr, ptent);
4593	else if (is_swap_pte(ptent))
4594		page = mc_handle_swap_pte(vma, ptent, &ent);
4595	else if (pte_none(ptent))
4596		page = mc_handle_file_pte(vma, addr, ptent, &ent);
4597
4598	if (!page && !ent.val)
4599		return ret;
4600	if (page) {
4601		/*
4602		 * Do only loose check w/o serialization.
4603		 * mem_cgroup_move_account() checks the page is valid or
4604		 * not under LRU exclusion.
4605		 */
4606		if (page->mem_cgroup == mc.from) {
4607			ret = MC_TARGET_PAGE;
4608			if (is_device_private_page(page) ||
4609			    is_device_public_page(page))
4610				ret = MC_TARGET_DEVICE;
4611			if (target)
4612				target->page = page;
4613		}
4614		if (!ret || !target)
4615			put_page(page);
4616	}
4617	/*
4618	 * There is a swap entry and a page doesn't exist or isn't charged.
4619	 * But we cannot move a tail-page in a THP.
4620	 */
4621	if (ent.val && !ret && (!page || !PageTransCompound(page)) &&
4622	    mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) {
4623		ret = MC_TARGET_SWAP;
4624		if (target)
4625			target->ent = ent;
4626	}
4627	return ret;
4628}
4629
4630#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4631/*
4632 * We don't consider PMD mapped swapping or file mapped pages because THP does
4633 * not support them for now.
4634 * Caller should make sure that pmd_trans_huge(pmd) is true.
4635 */
4636static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
4637		unsigned long addr, pmd_t pmd, union mc_target *target)
4638{
4639	struct page *page = NULL;
4640	enum mc_target_type ret = MC_TARGET_NONE;
4641
4642	if (unlikely(is_swap_pmd(pmd))) {
4643		VM_BUG_ON(thp_migration_supported() &&
4644				  !is_pmd_migration_entry(pmd));
4645		return ret;
4646	}
4647	page = pmd_page(pmd);
4648	VM_BUG_ON_PAGE(!page || !PageHead(page), page);
4649	if (!(mc.flags & MOVE_ANON))
4650		return ret;
4651	if (page->mem_cgroup == mc.from) {
4652		ret = MC_TARGET_PAGE;
4653		if (target) {
4654			get_page(page);
4655			target->page = page;
4656		}
4657	}
4658	return ret;
4659}
4660#else
4661static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
4662		unsigned long addr, pmd_t pmd, union mc_target *target)
4663{
4664	return MC_TARGET_NONE;
4665}
4666#endif
4667
4668static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
4669					unsigned long addr, unsigned long end,
4670					struct mm_walk *walk)
4671{
4672	struct vm_area_struct *vma = walk->vma;
4673	pte_t *pte;
4674	spinlock_t *ptl;
4675
4676	ptl = pmd_trans_huge_lock(pmd, vma);
4677	if (ptl) {
4678		/*
4679		 * Note their can not be MC_TARGET_DEVICE for now as we do not
4680		 * support transparent huge page with MEMORY_DEVICE_PUBLIC or
4681		 * MEMORY_DEVICE_PRIVATE but this might change.
4682		 */
4683		if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
4684			mc.precharge += HPAGE_PMD_NR;
4685		spin_unlock(ptl);
4686		return 0;
4687	}
4688
4689	if (pmd_trans_unstable(pmd))
4690		return 0;
4691	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
4692	for (; addr != end; pte++, addr += PAGE_SIZE)
4693		if (get_mctgt_type(vma, addr, *pte, NULL))
4694			mc.precharge++;	/* increment precharge temporarily */
4695	pte_unmap_unlock(pte - 1, ptl);
4696	cond_resched();
4697
4698	return 0;
4699}
4700
 
 
 
 
4701static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
4702{
4703	unsigned long precharge;
4704
4705	struct mm_walk mem_cgroup_count_precharge_walk = {
4706		.pmd_entry = mem_cgroup_count_precharge_pte_range,
4707		.mm = mm,
4708	};
4709	down_read(&mm->mmap_sem);
4710	walk_page_range(0, mm->highest_vm_end,
4711			&mem_cgroup_count_precharge_walk);
4712	up_read(&mm->mmap_sem);
4713
4714	precharge = mc.precharge;
4715	mc.precharge = 0;
4716
4717	return precharge;
4718}
4719
4720static int mem_cgroup_precharge_mc(struct mm_struct *mm)
4721{
4722	unsigned long precharge = mem_cgroup_count_precharge(mm);
4723
4724	VM_BUG_ON(mc.moving_task);
4725	mc.moving_task = current;
4726	return mem_cgroup_do_precharge(precharge);
4727}
4728
4729/* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
4730static void __mem_cgroup_clear_mc(void)
4731{
4732	struct mem_cgroup *from = mc.from;
4733	struct mem_cgroup *to = mc.to;
4734
4735	/* we must uncharge all the leftover precharges from mc.to */
4736	if (mc.precharge) {
4737		cancel_charge(mc.to, mc.precharge);
4738		mc.precharge = 0;
4739	}
4740	/*
4741	 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
4742	 * we must uncharge here.
4743	 */
4744	if (mc.moved_charge) {
4745		cancel_charge(mc.from, mc.moved_charge);
4746		mc.moved_charge = 0;
4747	}
4748	/* we must fixup refcnts and charges */
4749	if (mc.moved_swap) {
4750		/* uncharge swap account from the old cgroup */
4751		if (!mem_cgroup_is_root(mc.from))
4752			page_counter_uncharge(&mc.from->memsw, mc.moved_swap);
4753
4754		mem_cgroup_id_put_many(mc.from, mc.moved_swap);
4755
4756		/*
4757		 * we charged both to->memory and to->memsw, so we
4758		 * should uncharge to->memory.
4759		 */
4760		if (!mem_cgroup_is_root(mc.to))
4761			page_counter_uncharge(&mc.to->memory, mc.moved_swap);
4762
4763		mem_cgroup_id_get_many(mc.to, mc.moved_swap);
4764		css_put_many(&mc.to->css, mc.moved_swap);
4765
4766		mc.moved_swap = 0;
4767	}
4768	memcg_oom_recover(from);
4769	memcg_oom_recover(to);
4770	wake_up_all(&mc.waitq);
4771}
4772
4773static void mem_cgroup_clear_mc(void)
4774{
4775	struct mm_struct *mm = mc.mm;
4776
4777	/*
4778	 * we must clear moving_task before waking up waiters at the end of
4779	 * task migration.
4780	 */
4781	mc.moving_task = NULL;
4782	__mem_cgroup_clear_mc();
4783	spin_lock(&mc.lock);
4784	mc.from = NULL;
4785	mc.to = NULL;
4786	mc.mm = NULL;
4787	spin_unlock(&mc.lock);
4788
4789	mmput(mm);
4790}
4791
4792static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
4793{
4794	struct cgroup_subsys_state *css;
4795	struct mem_cgroup *memcg = NULL; /* unneeded init to make gcc happy */
4796	struct mem_cgroup *from;
4797	struct task_struct *leader, *p;
4798	struct mm_struct *mm;
4799	unsigned long move_flags;
4800	int ret = 0;
4801
4802	/* charge immigration isn't supported on the default hierarchy */
4803	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
4804		return 0;
4805
4806	/*
4807	 * Multi-process migrations only happen on the default hierarchy
4808	 * where charge immigration is not used.  Perform charge
4809	 * immigration if @tset contains a leader and whine if there are
4810	 * multiple.
4811	 */
4812	p = NULL;
4813	cgroup_taskset_for_each_leader(leader, css, tset) {
4814		WARN_ON_ONCE(p);
4815		p = leader;
4816		memcg = mem_cgroup_from_css(css);
4817	}
4818	if (!p)
4819		return 0;
4820
4821	/*
4822	 * We are now commited to this value whatever it is. Changes in this
4823	 * tunable will only affect upcoming migrations, not the current one.
4824	 * So we need to save it, and keep it going.
4825	 */
4826	move_flags = READ_ONCE(memcg->move_charge_at_immigrate);
4827	if (!move_flags)
4828		return 0;
4829
4830	from = mem_cgroup_from_task(p);
4831
4832	VM_BUG_ON(from == memcg);
4833
4834	mm = get_task_mm(p);
4835	if (!mm)
4836		return 0;
4837	/* We move charges only when we move a owner of the mm */
4838	if (mm->owner == p) {
4839		VM_BUG_ON(mc.from);
4840		VM_BUG_ON(mc.to);
4841		VM_BUG_ON(mc.precharge);
4842		VM_BUG_ON(mc.moved_charge);
4843		VM_BUG_ON(mc.moved_swap);
4844
4845		spin_lock(&mc.lock);
4846		mc.mm = mm;
4847		mc.from = from;
4848		mc.to = memcg;
4849		mc.flags = move_flags;
4850		spin_unlock(&mc.lock);
4851		/* We set mc.moving_task later */
4852
4853		ret = mem_cgroup_precharge_mc(mm);
4854		if (ret)
4855			mem_cgroup_clear_mc();
4856	} else {
4857		mmput(mm);
4858	}
4859	return ret;
4860}
4861
4862static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
4863{
4864	if (mc.to)
4865		mem_cgroup_clear_mc();
4866}
4867
4868static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
4869				unsigned long addr, unsigned long end,
4870				struct mm_walk *walk)
4871{
4872	int ret = 0;
4873	struct vm_area_struct *vma = walk->vma;
4874	pte_t *pte;
4875	spinlock_t *ptl;
4876	enum mc_target_type target_type;
4877	union mc_target target;
4878	struct page *page;
4879
4880	ptl = pmd_trans_huge_lock(pmd, vma);
4881	if (ptl) {
4882		if (mc.precharge < HPAGE_PMD_NR) {
4883			spin_unlock(ptl);
4884			return 0;
4885		}
4886		target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
4887		if (target_type == MC_TARGET_PAGE) {
4888			page = target.page;
4889			if (!isolate_lru_page(page)) {
4890				if (!mem_cgroup_move_account(page, true,
4891							     mc.from, mc.to)) {
4892					mc.precharge -= HPAGE_PMD_NR;
4893					mc.moved_charge += HPAGE_PMD_NR;
4894				}
4895				putback_lru_page(page);
4896			}
4897			put_page(page);
4898		} else if (target_type == MC_TARGET_DEVICE) {
4899			page = target.page;
4900			if (!mem_cgroup_move_account(page, true,
4901						     mc.from, mc.to)) {
4902				mc.precharge -= HPAGE_PMD_NR;
4903				mc.moved_charge += HPAGE_PMD_NR;
4904			}
4905			put_page(page);
4906		}
4907		spin_unlock(ptl);
4908		return 0;
4909	}
4910
4911	if (pmd_trans_unstable(pmd))
4912		return 0;
4913retry:
4914	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
4915	for (; addr != end; addr += PAGE_SIZE) {
4916		pte_t ptent = *(pte++);
4917		bool device = false;
4918		swp_entry_t ent;
4919
4920		if (!mc.precharge)
4921			break;
4922
4923		switch (get_mctgt_type(vma, addr, ptent, &target)) {
4924		case MC_TARGET_DEVICE:
4925			device = true;
4926			/* fall through */
4927		case MC_TARGET_PAGE:
4928			page = target.page;
4929			/*
4930			 * We can have a part of the split pmd here. Moving it
4931			 * can be done but it would be too convoluted so simply
4932			 * ignore such a partial THP and keep it in original
4933			 * memcg. There should be somebody mapping the head.
4934			 */
4935			if (PageTransCompound(page))
4936				goto put;
4937			if (!device && isolate_lru_page(page))
4938				goto put;
4939			if (!mem_cgroup_move_account(page, false,
4940						mc.from, mc.to)) {
4941				mc.precharge--;
4942				/* we uncharge from mc.from later. */
4943				mc.moved_charge++;
4944			}
4945			if (!device)
4946				putback_lru_page(page);
4947put:			/* get_mctgt_type() gets the page */
4948			put_page(page);
4949			break;
4950		case MC_TARGET_SWAP:
4951			ent = target.ent;
4952			if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
4953				mc.precharge--;
4954				/* we fixup refcnts and charges later. */
4955				mc.moved_swap++;
4956			}
4957			break;
4958		default:
4959			break;
4960		}
4961	}
4962	pte_unmap_unlock(pte - 1, ptl);
4963	cond_resched();
4964
4965	if (addr != end) {
4966		/*
4967		 * We have consumed all precharges we got in can_attach().
4968		 * We try charge one by one, but don't do any additional
4969		 * charges to mc.to if we have failed in charge once in attach()
4970		 * phase.
4971		 */
4972		ret = mem_cgroup_do_precharge(1);
4973		if (!ret)
4974			goto retry;
4975	}
4976
4977	return ret;
4978}
4979
 
 
 
 
4980static void mem_cgroup_move_charge(void)
4981{
4982	struct mm_walk mem_cgroup_move_charge_walk = {
4983		.pmd_entry = mem_cgroup_move_charge_pte_range,
4984		.mm = mc.mm,
4985	};
4986
4987	lru_add_drain_all();
4988	/*
4989	 * Signal lock_page_memcg() to take the memcg's move_lock
4990	 * while we're moving its pages to another memcg. Then wait
4991	 * for already started RCU-only updates to finish.
4992	 */
4993	atomic_inc(&mc.from->moving_account);
4994	synchronize_rcu();
4995retry:
4996	if (unlikely(!down_read_trylock(&mc.mm->mmap_sem))) {
4997		/*
4998		 * Someone who are holding the mmap_sem might be waiting in
4999		 * waitq. So we cancel all extra charges, wake up all waiters,
5000		 * and retry. Because we cancel precharges, we might not be able
5001		 * to move enough charges, but moving charge is a best-effort
5002		 * feature anyway, so it wouldn't be a big problem.
5003		 */
5004		__mem_cgroup_clear_mc();
5005		cond_resched();
5006		goto retry;
5007	}
5008	/*
5009	 * When we have consumed all precharges and failed in doing
5010	 * additional charge, the page walk just aborts.
5011	 */
5012	walk_page_range(0, mc.mm->highest_vm_end, &mem_cgroup_move_charge_walk);
 
5013
5014	up_read(&mc.mm->mmap_sem);
5015	atomic_dec(&mc.from->moving_account);
5016}
5017
5018static void mem_cgroup_move_task(void)
5019{
5020	if (mc.to) {
5021		mem_cgroup_move_charge();
5022		mem_cgroup_clear_mc();
5023	}
5024}
5025#else	/* !CONFIG_MMU */
5026static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
5027{
5028	return 0;
5029}
5030static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
5031{
5032}
5033static void mem_cgroup_move_task(void)
5034{
5035}
5036#endif
5037
5038/*
5039 * Cgroup retains root cgroups across [un]mount cycles making it necessary
5040 * to verify whether we're attached to the default hierarchy on each mount
5041 * attempt.
5042 */
5043static void mem_cgroup_bind(struct cgroup_subsys_state *root_css)
5044{
5045	/*
5046	 * use_hierarchy is forced on the default hierarchy.  cgroup core
5047	 * guarantees that @root doesn't have any children, so turning it
5048	 * on for the root memcg is enough.
5049	 */
5050	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
5051		root_mem_cgroup->use_hierarchy = true;
5052	else
5053		root_mem_cgroup->use_hierarchy = false;
5054}
5055
 
 
 
 
 
 
 
 
 
 
5056static u64 memory_current_read(struct cgroup_subsys_state *css,
5057			       struct cftype *cft)
5058{
5059	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5060
5061	return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE;
5062}
5063
5064static int memory_low_show(struct seq_file *m, void *v)
5065{
5066	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5067	unsigned long low = READ_ONCE(memcg->low);
 
5068
5069	if (low == PAGE_COUNTER_MAX)
5070		seq_puts(m, "max\n");
5071	else
5072		seq_printf(m, "%llu\n", (u64)low * PAGE_SIZE);
 
 
5073
5074	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
5075}
5076
5077static ssize_t memory_low_write(struct kernfs_open_file *of,
5078				char *buf, size_t nbytes, loff_t off)
5079{
5080	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5081	unsigned long low;
5082	int err;
5083
5084	buf = strstrip(buf);
5085	err = page_counter_memparse(buf, "max", &low);
5086	if (err)
5087		return err;
5088
5089	memcg->low = low;
5090
5091	return nbytes;
5092}
5093
5094static int memory_high_show(struct seq_file *m, void *v)
5095{
5096	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5097	unsigned long high = READ_ONCE(memcg->high);
5098
5099	if (high == PAGE_COUNTER_MAX)
5100		seq_puts(m, "max\n");
5101	else
5102		seq_printf(m, "%llu\n", (u64)high * PAGE_SIZE);
5103
5104	return 0;
5105}
5106
5107static ssize_t memory_high_write(struct kernfs_open_file *of,
5108				 char *buf, size_t nbytes, loff_t off)
5109{
5110	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5111	unsigned long nr_pages;
5112	unsigned long high;
5113	int err;
5114
5115	buf = strstrip(buf);
5116	err = page_counter_memparse(buf, "max", &high);
5117	if (err)
5118		return err;
5119
5120	memcg->high = high;
5121
5122	nr_pages = page_counter_read(&memcg->memory);
5123	if (nr_pages > high)
5124		try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
5125					     GFP_KERNEL, true);
5126
5127	memcg_wb_domain_size_changed(memcg);
5128	return nbytes;
5129}
5130
5131static int memory_max_show(struct seq_file *m, void *v)
5132{
5133	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5134	unsigned long max = READ_ONCE(memcg->memory.limit);
5135
5136	if (max == PAGE_COUNTER_MAX)
5137		seq_puts(m, "max\n");
5138	else
5139		seq_printf(m, "%llu\n", (u64)max * PAGE_SIZE);
5140
5141	return 0;
5142}
5143
5144static ssize_t memory_max_write(struct kernfs_open_file *of,
5145				char *buf, size_t nbytes, loff_t off)
5146{
5147	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5148	unsigned int nr_reclaims = MEM_CGROUP_RECLAIM_RETRIES;
5149	bool drained = false;
5150	unsigned long max;
5151	int err;
5152
5153	buf = strstrip(buf);
5154	err = page_counter_memparse(buf, "max", &max);
5155	if (err)
5156		return err;
5157
5158	xchg(&memcg->memory.limit, max);
5159
5160	for (;;) {
5161		unsigned long nr_pages = page_counter_read(&memcg->memory);
5162
5163		if (nr_pages <= max)
5164			break;
5165
5166		if (signal_pending(current)) {
5167			err = -EINTR;
5168			break;
5169		}
5170
5171		if (!drained) {
5172			drain_all_stock(memcg);
5173			drained = true;
5174			continue;
5175		}
5176
5177		if (nr_reclaims) {
5178			if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max,
5179							  GFP_KERNEL, true))
5180				nr_reclaims--;
5181			continue;
5182		}
5183
5184		memcg_memory_event(memcg, MEMCG_OOM);
5185		if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
5186			break;
5187	}
5188
5189	memcg_wb_domain_size_changed(memcg);
5190	return nbytes;
5191}
5192
 
 
 
 
 
 
 
 
 
 
5193static int memory_events_show(struct seq_file *m, void *v)
5194{
5195	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5196
5197	seq_printf(m, "low %lu\n",
5198		   atomic_long_read(&memcg->memory_events[MEMCG_LOW]));
5199	seq_printf(m, "high %lu\n",
5200		   atomic_long_read(&memcg->memory_events[MEMCG_HIGH]));
5201	seq_printf(m, "max %lu\n",
5202		   atomic_long_read(&memcg->memory_events[MEMCG_MAX]));
5203	seq_printf(m, "oom %lu\n",
5204		   atomic_long_read(&memcg->memory_events[MEMCG_OOM]));
5205	seq_printf(m, "oom_kill %lu\n", memcg_sum_events(memcg, OOM_KILL));
5206
 
5207	return 0;
5208}
5209
5210static int memory_stat_show(struct seq_file *m, void *v)
5211{
5212	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5213	unsigned long stat[MEMCG_NR_STAT];
5214	unsigned long events[NR_VM_EVENT_ITEMS];
5215	int i;
5216
5217	/*
5218	 * Provide statistics on the state of the memory subsystem as
5219	 * well as cumulative event counters that show past behavior.
5220	 *
5221	 * This list is ordered following a combination of these gradients:
5222	 * 1) generic big picture -> specifics and details
5223	 * 2) reflecting userspace activity -> reflecting kernel heuristics
5224	 *
5225	 * Current memory state:
5226	 */
5227
5228	tree_stat(memcg, stat);
5229	tree_events(memcg, events);
 
5230
5231	seq_printf(m, "anon %llu\n",
5232		   (u64)stat[MEMCG_RSS] * PAGE_SIZE);
5233	seq_printf(m, "file %llu\n",
5234		   (u64)stat[MEMCG_CACHE] * PAGE_SIZE);
5235	seq_printf(m, "kernel_stack %llu\n",
5236		   (u64)stat[MEMCG_KERNEL_STACK_KB] * 1024);
5237	seq_printf(m, "slab %llu\n",
5238		   (u64)(stat[NR_SLAB_RECLAIMABLE] +
5239			 stat[NR_SLAB_UNRECLAIMABLE]) * PAGE_SIZE);
5240	seq_printf(m, "sock %llu\n",
5241		   (u64)stat[MEMCG_SOCK] * PAGE_SIZE);
5242
5243	seq_printf(m, "shmem %llu\n",
5244		   (u64)stat[NR_SHMEM] * PAGE_SIZE);
5245	seq_printf(m, "file_mapped %llu\n",
5246		   (u64)stat[NR_FILE_MAPPED] * PAGE_SIZE);
5247	seq_printf(m, "file_dirty %llu\n",
5248		   (u64)stat[NR_FILE_DIRTY] * PAGE_SIZE);
5249	seq_printf(m, "file_writeback %llu\n",
5250		   (u64)stat[NR_WRITEBACK] * PAGE_SIZE);
5251
5252	for (i = 0; i < NR_LRU_LISTS; i++) {
5253		struct mem_cgroup *mi;
5254		unsigned long val = 0;
5255
5256		for_each_mem_cgroup_tree(mi, memcg)
5257			val += mem_cgroup_nr_lru_pages(mi, BIT(i));
5258		seq_printf(m, "%s %llu\n",
5259			   mem_cgroup_lru_names[i], (u64)val * PAGE_SIZE);
5260	}
5261
5262	seq_printf(m, "slab_reclaimable %llu\n",
5263		   (u64)stat[NR_SLAB_RECLAIMABLE] * PAGE_SIZE);
5264	seq_printf(m, "slab_unreclaimable %llu\n",
5265		   (u64)stat[NR_SLAB_UNRECLAIMABLE] * PAGE_SIZE);
5266
5267	/* Accumulated memory events */
 
 
5268
5269	seq_printf(m, "pgfault %lu\n", events[PGFAULT]);
5270	seq_printf(m, "pgmajfault %lu\n", events[PGMAJFAULT]);
5271
5272	seq_printf(m, "pgrefill %lu\n", events[PGREFILL]);
5273	seq_printf(m, "pgscan %lu\n", events[PGSCAN_KSWAPD] +
5274		   events[PGSCAN_DIRECT]);
5275	seq_printf(m, "pgsteal %lu\n", events[PGSTEAL_KSWAPD] +
5276		   events[PGSTEAL_DIRECT]);
5277	seq_printf(m, "pgactivate %lu\n", events[PGACTIVATE]);
5278	seq_printf(m, "pgdeactivate %lu\n", events[PGDEACTIVATE]);
5279	seq_printf(m, "pglazyfree %lu\n", events[PGLAZYFREE]);
5280	seq_printf(m, "pglazyfreed %lu\n", events[PGLAZYFREED]);
5281
5282	seq_printf(m, "workingset_refault %lu\n",
5283		   stat[WORKINGSET_REFAULT]);
5284	seq_printf(m, "workingset_activate %lu\n",
5285		   stat[WORKINGSET_ACTIVATE]);
5286	seq_printf(m, "workingset_nodereclaim %lu\n",
5287		   stat[WORKINGSET_NODERECLAIM]);
5288
5289	return 0;
5290}
5291
5292static struct cftype memory_files[] = {
5293	{
5294		.name = "current",
5295		.flags = CFTYPE_NOT_ON_ROOT,
5296		.read_u64 = memory_current_read,
5297	},
5298	{
 
 
 
 
 
 
5299		.name = "low",
5300		.flags = CFTYPE_NOT_ON_ROOT,
5301		.seq_show = memory_low_show,
5302		.write = memory_low_write,
5303	},
5304	{
5305		.name = "high",
5306		.flags = CFTYPE_NOT_ON_ROOT,
5307		.seq_show = memory_high_show,
5308		.write = memory_high_write,
5309	},
5310	{
5311		.name = "max",
5312		.flags = CFTYPE_NOT_ON_ROOT,
5313		.seq_show = memory_max_show,
5314		.write = memory_max_write,
5315	},
5316	{
5317		.name = "events",
5318		.flags = CFTYPE_NOT_ON_ROOT,
5319		.file_offset = offsetof(struct mem_cgroup, events_file),
5320		.seq_show = memory_events_show,
5321	},
5322	{
 
 
 
 
 
 
5323		.name = "stat",
5324		.flags = CFTYPE_NOT_ON_ROOT,
5325		.seq_show = memory_stat_show,
5326	},
 
 
 
 
 
 
5327	{ }	/* terminate */
5328};
5329
5330struct cgroup_subsys memory_cgrp_subsys = {
5331	.css_alloc = mem_cgroup_css_alloc,
5332	.css_online = mem_cgroup_css_online,
5333	.css_offline = mem_cgroup_css_offline,
5334	.css_released = mem_cgroup_css_released,
5335	.css_free = mem_cgroup_css_free,
5336	.css_reset = mem_cgroup_css_reset,
5337	.can_attach = mem_cgroup_can_attach,
5338	.cancel_attach = mem_cgroup_cancel_attach,
5339	.post_attach = mem_cgroup_move_task,
5340	.bind = mem_cgroup_bind,
5341	.dfl_cftypes = memory_files,
5342	.legacy_cftypes = mem_cgroup_legacy_files,
5343	.early_init = 0,
5344};
5345
5346/**
5347 * mem_cgroup_low - check if memory consumption is below the normal range
5348 * @root: the top ancestor of the sub-tree being checked
5349 * @memcg: the memory cgroup to check
5350 *
5351 * Returns %true if memory consumption of @memcg, and that of all
5352 * ancestors up to (but not including) @root, is below the normal range.
5353 *
5354 * @root is exclusive; it is never low when looked at directly and isn't
5355 * checked when traversing the hierarchy.
5356 *
5357 * Excluding @root enables using memory.low to prioritize memory usage
5358 * between cgroups within a subtree of the hierarchy that is limited by
5359 * memory.high or memory.max.
5360 *
5361 * For example, given cgroup A with children B and C:
5362 *
5363 *    A
5364 *   / \
5365 *  B   C
5366 *
5367 * and
5368 *
5369 *  1. A/memory.current > A/memory.high
5370 *  2. A/B/memory.current < A/B/memory.low
5371 *  3. A/C/memory.current >= A/C/memory.low
5372 *
5373 * As 'A' is high, i.e. triggers reclaim from 'A', and 'B' is low, we
5374 * should reclaim from 'C' until 'A' is no longer high or until we can
5375 * no longer reclaim from 'C'.  If 'A', i.e. @root, isn't excluded by
5376 * mem_cgroup_low when reclaming from 'A', then 'B' won't be considered
5377 * low and we will reclaim indiscriminately from both 'B' and 'C'.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5378 */
5379bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg)
 
5380{
 
 
 
 
 
5381	if (mem_cgroup_disabled())
5382		return false;
5383
5384	if (!root)
5385		root = root_mem_cgroup;
5386	if (memcg == root)
5387		return false;
 
 
 
 
5388
5389	for (; memcg != root; memcg = parent_mem_cgroup(memcg)) {
5390		if (page_counter_read(&memcg->memory) >= memcg->low)
5391			return false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5392	}
5393
5394	return true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5395}
5396
5397/**
5398 * mem_cgroup_try_charge - try charging a page
5399 * @page: page to charge
5400 * @mm: mm context of the victim
5401 * @gfp_mask: reclaim mode
5402 * @memcgp: charged memcg return
5403 * @compound: charge the page as compound or small page
5404 *
5405 * Try to charge @page to the memcg that @mm belongs to, reclaiming
5406 * pages according to @gfp_mask if necessary.
5407 *
5408 * Returns 0 on success, with *@memcgp pointing to the charged memcg.
5409 * Otherwise, an error code is returned.
5410 *
5411 * After page->mapping has been set up, the caller must finalize the
5412 * charge with mem_cgroup_commit_charge().  Or abort the transaction
5413 * with mem_cgroup_cancel_charge() in case page instantiation fails.
5414 */
5415int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
5416			  gfp_t gfp_mask, struct mem_cgroup **memcgp,
5417			  bool compound)
5418{
5419	struct mem_cgroup *memcg = NULL;
5420	unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
5421	int ret = 0;
5422
5423	if (mem_cgroup_disabled())
5424		goto out;
5425
5426	if (PageSwapCache(page)) {
5427		/*
5428		 * Every swap fault against a single page tries to charge the
5429		 * page, bail as early as possible.  shmem_unuse() encounters
5430		 * already charged pages, too.  The USED bit is protected by
5431		 * the page lock, which serializes swap cache removal, which
5432		 * in turn serializes uncharging.
5433		 */
5434		VM_BUG_ON_PAGE(!PageLocked(page), page);
5435		if (compound_head(page)->mem_cgroup)
5436			goto out;
5437
5438		if (do_swap_account) {
5439			swp_entry_t ent = { .val = page_private(page), };
5440			unsigned short id = lookup_swap_cgroup_id(ent);
5441
5442			rcu_read_lock();
5443			memcg = mem_cgroup_from_id(id);
5444			if (memcg && !css_tryget_online(&memcg->css))
5445				memcg = NULL;
5446			rcu_read_unlock();
5447		}
5448	}
5449
5450	if (!memcg)
5451		memcg = get_mem_cgroup_from_mm(mm);
5452
5453	ret = try_charge(memcg, gfp_mask, nr_pages);
5454
5455	css_put(&memcg->css);
5456out:
5457	*memcgp = memcg;
5458	return ret;
5459}
5460
 
 
 
 
 
 
 
 
 
 
 
 
 
5461/**
5462 * mem_cgroup_commit_charge - commit a page charge
5463 * @page: page to charge
5464 * @memcg: memcg to charge the page to
5465 * @lrucare: page might be on LRU already
5466 * @compound: charge the page as compound or small page
5467 *
5468 * Finalize a charge transaction started by mem_cgroup_try_charge(),
5469 * after page->mapping has been set up.  This must happen atomically
5470 * as part of the page instantiation, i.e. under the page table lock
5471 * for anonymous pages, under the page lock for page and swap cache.
5472 *
5473 * In addition, the page must not be on the LRU during the commit, to
5474 * prevent racing with task migration.  If it might be, use @lrucare.
5475 *
5476 * Use mem_cgroup_cancel_charge() to cancel the transaction instead.
5477 */
5478void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
5479			      bool lrucare, bool compound)
5480{
5481	unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
5482
5483	VM_BUG_ON_PAGE(!page->mapping, page);
5484	VM_BUG_ON_PAGE(PageLRU(page) && !lrucare, page);
5485
5486	if (mem_cgroup_disabled())
5487		return;
5488	/*
5489	 * Swap faults will attempt to charge the same page multiple
5490	 * times.  But reuse_swap_page() might have removed the page
5491	 * from swapcache already, so we can't check PageSwapCache().
5492	 */
5493	if (!memcg)
5494		return;
5495
5496	commit_charge(page, memcg, lrucare);
5497
5498	local_irq_disable();
5499	mem_cgroup_charge_statistics(memcg, page, compound, nr_pages);
5500	memcg_check_events(memcg, page);
5501	local_irq_enable();
5502
5503	if (do_memsw_account() && PageSwapCache(page)) {
5504		swp_entry_t entry = { .val = page_private(page) };
5505		/*
5506		 * The swap entry might not get freed for a long time,
5507		 * let's not wait for it.  The page already received a
5508		 * memory+swap charge, drop the swap entry duplicate.
5509		 */
5510		mem_cgroup_uncharge_swap(entry, nr_pages);
5511	}
5512}
5513
5514/**
5515 * mem_cgroup_cancel_charge - cancel a page charge
5516 * @page: page to charge
5517 * @memcg: memcg to charge the page to
5518 * @compound: charge the page as compound or small page
5519 *
5520 * Cancel a charge transaction started by mem_cgroup_try_charge().
5521 */
5522void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg,
5523		bool compound)
5524{
5525	unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
5526
5527	if (mem_cgroup_disabled())
5528		return;
5529	/*
5530	 * Swap faults will attempt to charge the same page multiple
5531	 * times.  But reuse_swap_page() might have removed the page
5532	 * from swapcache already, so we can't check PageSwapCache().
5533	 */
5534	if (!memcg)
5535		return;
5536
5537	cancel_charge(memcg, nr_pages);
5538}
5539
5540struct uncharge_gather {
5541	struct mem_cgroup *memcg;
5542	unsigned long pgpgout;
5543	unsigned long nr_anon;
5544	unsigned long nr_file;
5545	unsigned long nr_kmem;
5546	unsigned long nr_huge;
5547	unsigned long nr_shmem;
5548	struct page *dummy_page;
5549};
5550
5551static inline void uncharge_gather_clear(struct uncharge_gather *ug)
5552{
5553	memset(ug, 0, sizeof(*ug));
5554}
5555
5556static void uncharge_batch(const struct uncharge_gather *ug)
5557{
5558	unsigned long nr_pages = ug->nr_anon + ug->nr_file + ug->nr_kmem;
5559	unsigned long flags;
5560
5561	if (!mem_cgroup_is_root(ug->memcg)) {
5562		page_counter_uncharge(&ug->memcg->memory, nr_pages);
5563		if (do_memsw_account())
5564			page_counter_uncharge(&ug->memcg->memsw, nr_pages);
5565		if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && ug->nr_kmem)
5566			page_counter_uncharge(&ug->memcg->kmem, ug->nr_kmem);
5567		memcg_oom_recover(ug->memcg);
5568	}
5569
5570	local_irq_save(flags);
5571	__mod_memcg_state(ug->memcg, MEMCG_RSS, -ug->nr_anon);
5572	__mod_memcg_state(ug->memcg, MEMCG_CACHE, -ug->nr_file);
5573	__mod_memcg_state(ug->memcg, MEMCG_RSS_HUGE, -ug->nr_huge);
5574	__mod_memcg_state(ug->memcg, NR_SHMEM, -ug->nr_shmem);
5575	__count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout);
5576	__this_cpu_add(ug->memcg->stat_cpu->nr_page_events, nr_pages);
5577	memcg_check_events(ug->memcg, ug->dummy_page);
5578	local_irq_restore(flags);
5579
5580	if (!mem_cgroup_is_root(ug->memcg))
5581		css_put_many(&ug->memcg->css, nr_pages);
5582}
5583
5584static void uncharge_page(struct page *page, struct uncharge_gather *ug)
5585{
5586	VM_BUG_ON_PAGE(PageLRU(page), page);
5587	VM_BUG_ON_PAGE(page_count(page) && !is_zone_device_page(page) &&
5588			!PageHWPoison(page) , page);
5589
5590	if (!page->mem_cgroup)
5591		return;
5592
5593	/*
5594	 * Nobody should be changing or seriously looking at
5595	 * page->mem_cgroup at this point, we have fully
5596	 * exclusive access to the page.
5597	 */
5598
5599	if (ug->memcg != page->mem_cgroup) {
5600		if (ug->memcg) {
5601			uncharge_batch(ug);
5602			uncharge_gather_clear(ug);
5603		}
5604		ug->memcg = page->mem_cgroup;
5605	}
5606
5607	if (!PageKmemcg(page)) {
5608		unsigned int nr_pages = 1;
5609
5610		if (PageTransHuge(page)) {
5611			nr_pages <<= compound_order(page);
5612			ug->nr_huge += nr_pages;
5613		}
5614		if (PageAnon(page))
5615			ug->nr_anon += nr_pages;
5616		else {
5617			ug->nr_file += nr_pages;
5618			if (PageSwapBacked(page))
5619				ug->nr_shmem += nr_pages;
5620		}
5621		ug->pgpgout++;
5622	} else {
5623		ug->nr_kmem += 1 << compound_order(page);
5624		__ClearPageKmemcg(page);
5625	}
5626
5627	ug->dummy_page = page;
5628	page->mem_cgroup = NULL;
5629}
5630
5631static void uncharge_list(struct list_head *page_list)
5632{
5633	struct uncharge_gather ug;
5634	struct list_head *next;
5635
5636	uncharge_gather_clear(&ug);
5637
5638	/*
5639	 * Note that the list can be a single page->lru; hence the
5640	 * do-while loop instead of a simple list_for_each_entry().
5641	 */
5642	next = page_list->next;
5643	do {
5644		struct page *page;
5645
5646		page = list_entry(next, struct page, lru);
5647		next = page->lru.next;
5648
5649		uncharge_page(page, &ug);
5650	} while (next != page_list);
5651
5652	if (ug.memcg)
5653		uncharge_batch(&ug);
5654}
5655
5656/**
5657 * mem_cgroup_uncharge - uncharge a page
5658 * @page: page to uncharge
5659 *
5660 * Uncharge a page previously charged with mem_cgroup_try_charge() and
5661 * mem_cgroup_commit_charge().
5662 */
5663void mem_cgroup_uncharge(struct page *page)
5664{
5665	struct uncharge_gather ug;
5666
5667	if (mem_cgroup_disabled())
5668		return;
5669
5670	/* Don't touch page->lru of any random page, pre-check: */
5671	if (!page->mem_cgroup)
5672		return;
5673
5674	uncharge_gather_clear(&ug);
5675	uncharge_page(page, &ug);
5676	uncharge_batch(&ug);
5677}
5678
5679/**
5680 * mem_cgroup_uncharge_list - uncharge a list of page
5681 * @page_list: list of pages to uncharge
5682 *
5683 * Uncharge a list of pages previously charged with
5684 * mem_cgroup_try_charge() and mem_cgroup_commit_charge().
5685 */
5686void mem_cgroup_uncharge_list(struct list_head *page_list)
5687{
5688	if (mem_cgroup_disabled())
5689		return;
5690
5691	if (!list_empty(page_list))
5692		uncharge_list(page_list);
5693}
5694
5695/**
5696 * mem_cgroup_migrate - charge a page's replacement
5697 * @oldpage: currently circulating page
5698 * @newpage: replacement page
5699 *
5700 * Charge @newpage as a replacement page for @oldpage. @oldpage will
5701 * be uncharged upon free.
5702 *
5703 * Both pages must be locked, @newpage->mapping must be set up.
5704 */
5705void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
5706{
5707	struct mem_cgroup *memcg;
5708	unsigned int nr_pages;
5709	bool compound;
5710	unsigned long flags;
5711
5712	VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
5713	VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
5714	VM_BUG_ON_PAGE(PageAnon(oldpage) != PageAnon(newpage), newpage);
5715	VM_BUG_ON_PAGE(PageTransHuge(oldpage) != PageTransHuge(newpage),
5716		       newpage);
5717
5718	if (mem_cgroup_disabled())
5719		return;
5720
5721	/* Page cache replacement: new page already charged? */
5722	if (newpage->mem_cgroup)
5723		return;
5724
5725	/* Swapcache readahead pages can get replaced before being charged */
5726	memcg = oldpage->mem_cgroup;
5727	if (!memcg)
5728		return;
5729
5730	/* Force-charge the new page. The old one will be freed soon */
5731	compound = PageTransHuge(newpage);
5732	nr_pages = compound ? hpage_nr_pages(newpage) : 1;
5733
5734	page_counter_charge(&memcg->memory, nr_pages);
5735	if (do_memsw_account())
5736		page_counter_charge(&memcg->memsw, nr_pages);
5737	css_get_many(&memcg->css, nr_pages);
5738
5739	commit_charge(newpage, memcg, false);
5740
5741	local_irq_save(flags);
5742	mem_cgroup_charge_statistics(memcg, newpage, compound, nr_pages);
5743	memcg_check_events(memcg, newpage);
5744	local_irq_restore(flags);
5745}
5746
5747DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
5748EXPORT_SYMBOL(memcg_sockets_enabled_key);
5749
5750void mem_cgroup_sk_alloc(struct sock *sk)
5751{
5752	struct mem_cgroup *memcg;
5753
5754	if (!mem_cgroup_sockets_enabled)
5755		return;
5756
5757	/*
5758	 * Socket cloning can throw us here with sk_memcg already
5759	 * filled. It won't however, necessarily happen from
5760	 * process context. So the test for root memcg given
5761	 * the current task's memcg won't help us in this case.
5762	 *
5763	 * Respecting the original socket's memcg is a better
5764	 * decision in this case.
5765	 */
5766	if (sk->sk_memcg) {
5767		css_get(&sk->sk_memcg->css);
5768		return;
5769	}
5770
5771	rcu_read_lock();
5772	memcg = mem_cgroup_from_task(current);
5773	if (memcg == root_mem_cgroup)
5774		goto out;
5775	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active)
5776		goto out;
5777	if (css_tryget_online(&memcg->css))
5778		sk->sk_memcg = memcg;
5779out:
5780	rcu_read_unlock();
5781}
5782
5783void mem_cgroup_sk_free(struct sock *sk)
5784{
5785	if (sk->sk_memcg)
5786		css_put(&sk->sk_memcg->css);
5787}
5788
5789/**
5790 * mem_cgroup_charge_skmem - charge socket memory
5791 * @memcg: memcg to charge
5792 * @nr_pages: number of pages to charge
5793 *
5794 * Charges @nr_pages to @memcg. Returns %true if the charge fit within
5795 * @memcg's configured limit, %false if the charge had to be forced.
5796 */
5797bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
5798{
5799	gfp_t gfp_mask = GFP_KERNEL;
5800
5801	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
5802		struct page_counter *fail;
5803
5804		if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) {
5805			memcg->tcpmem_pressure = 0;
5806			return true;
5807		}
5808		page_counter_charge(&memcg->tcpmem, nr_pages);
5809		memcg->tcpmem_pressure = 1;
5810		return false;
5811	}
5812
5813	/* Don't block in the packet receive path */
5814	if (in_softirq())
5815		gfp_mask = GFP_NOWAIT;
5816
5817	mod_memcg_state(memcg, MEMCG_SOCK, nr_pages);
5818
5819	if (try_charge(memcg, gfp_mask, nr_pages) == 0)
5820		return true;
5821
5822	try_charge(memcg, gfp_mask|__GFP_NOFAIL, nr_pages);
5823	return false;
5824}
5825
5826/**
5827 * mem_cgroup_uncharge_skmem - uncharge socket memory
5828 * @memcg: memcg to uncharge
5829 * @nr_pages: number of pages to uncharge
5830 */
5831void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
5832{
5833	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
5834		page_counter_uncharge(&memcg->tcpmem, nr_pages);
5835		return;
5836	}
5837
5838	mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages);
5839
5840	refill_stock(memcg, nr_pages);
5841}
5842
5843static int __init cgroup_memory(char *s)
5844{
5845	char *token;
5846
5847	while ((token = strsep(&s, ",")) != NULL) {
5848		if (!*token)
5849			continue;
5850		if (!strcmp(token, "nosocket"))
5851			cgroup_memory_nosocket = true;
5852		if (!strcmp(token, "nokmem"))
5853			cgroup_memory_nokmem = true;
5854	}
5855	return 0;
5856}
5857__setup("cgroup.memory=", cgroup_memory);
5858
5859/*
5860 * subsys_initcall() for memory controller.
5861 *
5862 * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this
5863 * context because of lock dependencies (cgroup_lock -> cpu hotplug) but
5864 * basically everything that doesn't depend on a specific mem_cgroup structure
5865 * should be initialized from here.
5866 */
5867static int __init mem_cgroup_init(void)
5868{
5869	int cpu, node;
5870
5871#ifndef CONFIG_SLOB
5872	/*
5873	 * Kmem cache creation is mostly done with the slab_mutex held,
5874	 * so use a workqueue with limited concurrency to avoid stalling
5875	 * all worker threads in case lots of cgroups are created and
5876	 * destroyed simultaneously.
5877	 */
5878	memcg_kmem_cache_wq = alloc_workqueue("memcg_kmem_cache", 0, 1);
5879	BUG_ON(!memcg_kmem_cache_wq);
5880#endif
5881
5882	cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
5883				  memcg_hotplug_cpu_dead);
5884
5885	for_each_possible_cpu(cpu)
5886		INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
5887			  drain_local_stock);
5888
5889	for_each_node(node) {
5890		struct mem_cgroup_tree_per_node *rtpn;
5891
5892		rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL,
5893				    node_online(node) ? node : NUMA_NO_NODE);
5894
5895		rtpn->rb_root = RB_ROOT;
5896		rtpn->rb_rightmost = NULL;
5897		spin_lock_init(&rtpn->lock);
5898		soft_limit_tree.rb_tree_per_node[node] = rtpn;
5899	}
5900
5901	return 0;
5902}
5903subsys_initcall(mem_cgroup_init);
5904
5905#ifdef CONFIG_MEMCG_SWAP
5906static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg)
5907{
5908	while (!atomic_inc_not_zero(&memcg->id.ref)) {
5909		/*
5910		 * The root cgroup cannot be destroyed, so it's refcount must
5911		 * always be >= 1.
5912		 */
5913		if (WARN_ON_ONCE(memcg == root_mem_cgroup)) {
5914			VM_BUG_ON(1);
5915			break;
5916		}
5917		memcg = parent_mem_cgroup(memcg);
5918		if (!memcg)
5919			memcg = root_mem_cgroup;
5920	}
5921	return memcg;
5922}
5923
5924/**
5925 * mem_cgroup_swapout - transfer a memsw charge to swap
5926 * @page: page whose memsw charge to transfer
5927 * @entry: swap entry to move the charge to
5928 *
5929 * Transfer the memsw charge of @page to @entry.
5930 */
5931void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
5932{
5933	struct mem_cgroup *memcg, *swap_memcg;
5934	unsigned int nr_entries;
5935	unsigned short oldid;
5936
5937	VM_BUG_ON_PAGE(PageLRU(page), page);
5938	VM_BUG_ON_PAGE(page_count(page), page);
5939
5940	if (!do_memsw_account())
5941		return;
5942
5943	memcg = page->mem_cgroup;
5944
5945	/* Readahead page, never charged */
5946	if (!memcg)
5947		return;
5948
5949	/*
5950	 * In case the memcg owning these pages has been offlined and doesn't
5951	 * have an ID allocated to it anymore, charge the closest online
5952	 * ancestor for the swap instead and transfer the memory+swap charge.
5953	 */
5954	swap_memcg = mem_cgroup_id_get_online(memcg);
5955	nr_entries = hpage_nr_pages(page);
5956	/* Get references for the tail pages, too */
5957	if (nr_entries > 1)
5958		mem_cgroup_id_get_many(swap_memcg, nr_entries - 1);
5959	oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg),
5960				   nr_entries);
5961	VM_BUG_ON_PAGE(oldid, page);
5962	mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries);
5963
5964	page->mem_cgroup = NULL;
5965
5966	if (!mem_cgroup_is_root(memcg))
5967		page_counter_uncharge(&memcg->memory, nr_entries);
5968
5969	if (memcg != swap_memcg) {
5970		if (!mem_cgroup_is_root(swap_memcg))
5971			page_counter_charge(&swap_memcg->memsw, nr_entries);
5972		page_counter_uncharge(&memcg->memsw, nr_entries);
5973	}
5974
5975	/*
5976	 * Interrupts should be disabled here because the caller holds the
5977	 * i_pages lock which is taken with interrupts-off. It is
5978	 * important here to have the interrupts disabled because it is the
5979	 * only synchronisation we have for updating the per-CPU variables.
5980	 */
5981	VM_BUG_ON(!irqs_disabled());
5982	mem_cgroup_charge_statistics(memcg, page, PageTransHuge(page),
5983				     -nr_entries);
5984	memcg_check_events(memcg, page);
5985
5986	if (!mem_cgroup_is_root(memcg))
5987		css_put_many(&memcg->css, nr_entries);
5988}
5989
5990/**
5991 * mem_cgroup_try_charge_swap - try charging swap space for a page
5992 * @page: page being added to swap
5993 * @entry: swap entry to charge
5994 *
5995 * Try to charge @page's memcg for the swap space at @entry.
5996 *
5997 * Returns 0 on success, -ENOMEM on failure.
5998 */
5999int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry)
6000{
6001	unsigned int nr_pages = hpage_nr_pages(page);
6002	struct page_counter *counter;
6003	struct mem_cgroup *memcg;
6004	unsigned short oldid;
6005
6006	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) || !do_swap_account)
6007		return 0;
6008
6009	memcg = page->mem_cgroup;
6010
6011	/* Readahead page, never charged */
6012	if (!memcg)
6013		return 0;
6014
 
 
 
 
 
6015	memcg = mem_cgroup_id_get_online(memcg);
6016
6017	if (!mem_cgroup_is_root(memcg) &&
6018	    !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) {
 
 
6019		mem_cgroup_id_put(memcg);
6020		return -ENOMEM;
6021	}
6022
6023	/* Get references for the tail pages, too */
6024	if (nr_pages > 1)
6025		mem_cgroup_id_get_many(memcg, nr_pages - 1);
6026	oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg), nr_pages);
6027	VM_BUG_ON_PAGE(oldid, page);
6028	mod_memcg_state(memcg, MEMCG_SWAP, nr_pages);
6029
6030	return 0;
6031}
6032
6033/**
6034 * mem_cgroup_uncharge_swap - uncharge swap space
6035 * @entry: swap entry to uncharge
6036 * @nr_pages: the amount of swap space to uncharge
6037 */
6038void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
6039{
6040	struct mem_cgroup *memcg;
6041	unsigned short id;
6042
6043	if (!do_swap_account)
6044		return;
6045
6046	id = swap_cgroup_record(entry, 0, nr_pages);
6047	rcu_read_lock();
6048	memcg = mem_cgroup_from_id(id);
6049	if (memcg) {
6050		if (!mem_cgroup_is_root(memcg)) {
6051			if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
6052				page_counter_uncharge(&memcg->swap, nr_pages);
6053			else
6054				page_counter_uncharge(&memcg->memsw, nr_pages);
6055		}
6056		mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages);
6057		mem_cgroup_id_put_many(memcg, nr_pages);
6058	}
6059	rcu_read_unlock();
6060}
6061
6062long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
6063{
6064	long nr_swap_pages = get_nr_swap_pages();
6065
6066	if (!do_swap_account || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
6067		return nr_swap_pages;
6068	for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg))
6069		nr_swap_pages = min_t(long, nr_swap_pages,
6070				      READ_ONCE(memcg->swap.limit) -
6071				      page_counter_read(&memcg->swap));
6072	return nr_swap_pages;
6073}
6074
6075bool mem_cgroup_swap_full(struct page *page)
6076{
6077	struct mem_cgroup *memcg;
6078
6079	VM_BUG_ON_PAGE(!PageLocked(page), page);
6080
6081	if (vm_swap_full())
6082		return true;
6083	if (!do_swap_account || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
6084		return false;
6085
6086	memcg = page->mem_cgroup;
6087	if (!memcg)
6088		return false;
6089
6090	for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg))
6091		if (page_counter_read(&memcg->swap) * 2 >= memcg->swap.limit)
6092			return true;
6093
6094	return false;
6095}
6096
6097/* for remember boot option*/
6098#ifdef CONFIG_MEMCG_SWAP_ENABLED
6099static int really_do_swap_account __initdata = 1;
6100#else
6101static int really_do_swap_account __initdata;
6102#endif
6103
6104static int __init enable_swap_account(char *s)
6105{
6106	if (!strcmp(s, "1"))
6107		really_do_swap_account = 1;
6108	else if (!strcmp(s, "0"))
6109		really_do_swap_account = 0;
6110	return 1;
6111}
6112__setup("swapaccount=", enable_swap_account);
6113
6114static u64 swap_current_read(struct cgroup_subsys_state *css,
6115			     struct cftype *cft)
6116{
6117	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6118
6119	return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE;
6120}
6121
6122static int swap_max_show(struct seq_file *m, void *v)
6123{
6124	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
6125	unsigned long max = READ_ONCE(memcg->swap.limit);
6126
6127	if (max == PAGE_COUNTER_MAX)
6128		seq_puts(m, "max\n");
6129	else
6130		seq_printf(m, "%llu\n", (u64)max * PAGE_SIZE);
6131
6132	return 0;
6133}
6134
6135static ssize_t swap_max_write(struct kernfs_open_file *of,
6136			      char *buf, size_t nbytes, loff_t off)
6137{
6138	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6139	unsigned long max;
6140	int err;
6141
6142	buf = strstrip(buf);
6143	err = page_counter_memparse(buf, "max", &max);
6144	if (err)
6145		return err;
6146
6147	mutex_lock(&memcg_limit_mutex);
6148	err = page_counter_limit(&memcg->swap, max);
6149	mutex_unlock(&memcg_limit_mutex);
6150	if (err)
6151		return err;
6152
6153	return nbytes;
6154}
6155
 
 
 
 
 
 
 
 
 
 
 
 
6156static struct cftype swap_files[] = {
6157	{
6158		.name = "swap.current",
6159		.flags = CFTYPE_NOT_ON_ROOT,
6160		.read_u64 = swap_current_read,
6161	},
6162	{
6163		.name = "swap.max",
6164		.flags = CFTYPE_NOT_ON_ROOT,
6165		.seq_show = swap_max_show,
6166		.write = swap_max_write,
 
 
 
 
 
 
6167	},
6168	{ }	/* terminate */
6169};
6170
6171static struct cftype memsw_cgroup_files[] = {
6172	{
6173		.name = "memsw.usage_in_bytes",
6174		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
6175		.read_u64 = mem_cgroup_read_u64,
6176	},
6177	{
6178		.name = "memsw.max_usage_in_bytes",
6179		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
6180		.write = mem_cgroup_reset,
6181		.read_u64 = mem_cgroup_read_u64,
6182	},
6183	{
6184		.name = "memsw.limit_in_bytes",
6185		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
6186		.write = mem_cgroup_write,
6187		.read_u64 = mem_cgroup_read_u64,
6188	},
6189	{
6190		.name = "memsw.failcnt",
6191		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
6192		.write = mem_cgroup_reset,
6193		.read_u64 = mem_cgroup_read_u64,
6194	},
6195	{ },	/* terminate */
6196};
6197
6198static int __init mem_cgroup_swap_init(void)
6199{
6200	if (!mem_cgroup_disabled() && really_do_swap_account) {
6201		do_swap_account = 1;
6202		WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys,
6203					       swap_files));
6204		WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys,
6205						  memsw_cgroup_files));
6206	}
6207	return 0;
6208}
6209subsys_initcall(mem_cgroup_swap_init);
6210
6211#endif /* CONFIG_MEMCG_SWAP */
v5.4
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/* memcontrol.c - Memory Controller
   3 *
   4 * Copyright IBM Corporation, 2007
   5 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
   6 *
   7 * Copyright 2007 OpenVZ SWsoft Inc
   8 * Author: Pavel Emelianov <xemul@openvz.org>
   9 *
  10 * Memory thresholds
  11 * Copyright (C) 2009 Nokia Corporation
  12 * Author: Kirill A. Shutemov
  13 *
  14 * Kernel Memory Controller
  15 * Copyright (C) 2012 Parallels Inc. and Google Inc.
  16 * Authors: Glauber Costa and Suleiman Souhlal
  17 *
  18 * Native page reclaim
  19 * Charge lifetime sanitation
  20 * Lockless page tracking & accounting
  21 * Unified hierarchy configuration model
  22 * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner
 
 
 
 
 
 
 
 
 
 
  23 */
  24
  25#include <linux/page_counter.h>
  26#include <linux/memcontrol.h>
  27#include <linux/cgroup.h>
  28#include <linux/pagewalk.h>
  29#include <linux/sched/mm.h>
  30#include <linux/shmem_fs.h>
  31#include <linux/hugetlb.h>
  32#include <linux/pagemap.h>
  33#include <linux/vm_event_item.h>
  34#include <linux/smp.h>
  35#include <linux/page-flags.h>
  36#include <linux/backing-dev.h>
  37#include <linux/bit_spinlock.h>
  38#include <linux/rcupdate.h>
  39#include <linux/limits.h>
  40#include <linux/export.h>
  41#include <linux/mutex.h>
  42#include <linux/rbtree.h>
  43#include <linux/slab.h>
  44#include <linux/swap.h>
  45#include <linux/swapops.h>
  46#include <linux/spinlock.h>
  47#include <linux/eventfd.h>
  48#include <linux/poll.h>
  49#include <linux/sort.h>
  50#include <linux/fs.h>
  51#include <linux/seq_file.h>
  52#include <linux/vmpressure.h>
  53#include <linux/mm_inline.h>
  54#include <linux/swap_cgroup.h>
  55#include <linux/cpu.h>
  56#include <linux/oom.h>
  57#include <linux/lockdep.h>
  58#include <linux/file.h>
  59#include <linux/tracehook.h>
  60#include <linux/psi.h>
  61#include <linux/seq_buf.h>
  62#include "internal.h"
  63#include <net/sock.h>
  64#include <net/ip.h>
  65#include "slab.h"
  66
  67#include <linux/uaccess.h>
  68
  69#include <trace/events/vmscan.h>
  70
  71struct cgroup_subsys memory_cgrp_subsys __read_mostly;
  72EXPORT_SYMBOL(memory_cgrp_subsys);
  73
  74struct mem_cgroup *root_mem_cgroup __read_mostly;
  75
  76#define MEM_CGROUP_RECLAIM_RETRIES	5
  77
  78/* Socket memory accounting disabled? */
  79static bool cgroup_memory_nosocket;
  80
  81/* Kernel memory accounting disabled? */
  82static bool cgroup_memory_nokmem;
  83
  84/* Whether the swap controller is active */
  85#ifdef CONFIG_MEMCG_SWAP
  86int do_swap_account __read_mostly;
  87#else
  88#define do_swap_account		0
  89#endif
  90
  91#ifdef CONFIG_CGROUP_WRITEBACK
  92static DECLARE_WAIT_QUEUE_HEAD(memcg_cgwb_frn_waitq);
  93#endif
  94
  95/* Whether legacy memory+swap accounting is active */
  96static bool do_memsw_account(void)
  97{
  98	return !cgroup_subsys_on_dfl(memory_cgrp_subsys) && do_swap_account;
  99}
 100
 101static const char *const mem_cgroup_lru_names[] = {
 102	"inactive_anon",
 103	"active_anon",
 104	"inactive_file",
 105	"active_file",
 106	"unevictable",
 107};
 108
 109#define THRESHOLDS_EVENTS_TARGET 128
 110#define SOFTLIMIT_EVENTS_TARGET 1024
 111#define NUMAINFO_EVENTS_TARGET	1024
 112
 113/*
 114 * Cgroups above their limits are maintained in a RB-Tree, independent of
 115 * their hierarchy representation
 116 */
 117
 118struct mem_cgroup_tree_per_node {
 119	struct rb_root rb_root;
 120	struct rb_node *rb_rightmost;
 121	spinlock_t lock;
 122};
 123
 124struct mem_cgroup_tree {
 125	struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
 126};
 127
 128static struct mem_cgroup_tree soft_limit_tree __read_mostly;
 129
 130/* for OOM */
 131struct mem_cgroup_eventfd_list {
 132	struct list_head list;
 133	struct eventfd_ctx *eventfd;
 134};
 135
 136/*
 137 * cgroup_event represents events which userspace want to receive.
 138 */
 139struct mem_cgroup_event {
 140	/*
 141	 * memcg which the event belongs to.
 142	 */
 143	struct mem_cgroup *memcg;
 144	/*
 145	 * eventfd to signal userspace about the event.
 146	 */
 147	struct eventfd_ctx *eventfd;
 148	/*
 149	 * Each of these stored in a list by the cgroup.
 150	 */
 151	struct list_head list;
 152	/*
 153	 * register_event() callback will be used to add new userspace
 154	 * waiter for changes related to this event.  Use eventfd_signal()
 155	 * on eventfd to send notification to userspace.
 156	 */
 157	int (*register_event)(struct mem_cgroup *memcg,
 158			      struct eventfd_ctx *eventfd, const char *args);
 159	/*
 160	 * unregister_event() callback will be called when userspace closes
 161	 * the eventfd or on cgroup removing.  This callback must be set,
 162	 * if you want provide notification functionality.
 163	 */
 164	void (*unregister_event)(struct mem_cgroup *memcg,
 165				 struct eventfd_ctx *eventfd);
 166	/*
 167	 * All fields below needed to unregister event when
 168	 * userspace closes eventfd.
 169	 */
 170	poll_table pt;
 171	wait_queue_head_t *wqh;
 172	wait_queue_entry_t wait;
 173	struct work_struct remove;
 174};
 175
 176static void mem_cgroup_threshold(struct mem_cgroup *memcg);
 177static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
 178
 179/* Stuffs for move charges at task migration. */
 180/*
 181 * Types of charges to be moved.
 182 */
 183#define MOVE_ANON	0x1U
 184#define MOVE_FILE	0x2U
 185#define MOVE_MASK	(MOVE_ANON | MOVE_FILE)
 186
 187/* "mc" and its members are protected by cgroup_mutex */
 188static struct move_charge_struct {
 189	spinlock_t	  lock; /* for from, to */
 190	struct mm_struct  *mm;
 191	struct mem_cgroup *from;
 192	struct mem_cgroup *to;
 193	unsigned long flags;
 194	unsigned long precharge;
 195	unsigned long moved_charge;
 196	unsigned long moved_swap;
 197	struct task_struct *moving_task;	/* a task moving charges */
 198	wait_queue_head_t waitq;		/* a waitq for other context */
 199} mc = {
 200	.lock = __SPIN_LOCK_UNLOCKED(mc.lock),
 201	.waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
 202};
 203
 204/*
 205 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
 206 * limit reclaim to prevent infinite loops, if they ever occur.
 207 */
 208#define	MEM_CGROUP_MAX_RECLAIM_LOOPS		100
 209#define	MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS	2
 210
 211enum charge_type {
 212	MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
 213	MEM_CGROUP_CHARGE_TYPE_ANON,
 214	MEM_CGROUP_CHARGE_TYPE_SWAPOUT,	/* for accounting swapcache */
 215	MEM_CGROUP_CHARGE_TYPE_DROP,	/* a page was unused swap cache */
 216	NR_CHARGE_TYPE,
 217};
 218
 219/* for encoding cft->private value on file */
 220enum res_type {
 221	_MEM,
 222	_MEMSWAP,
 223	_OOM_TYPE,
 224	_KMEM,
 225	_TCP,
 226};
 227
 228#define MEMFILE_PRIVATE(x, val)	((x) << 16 | (val))
 229#define MEMFILE_TYPE(val)	((val) >> 16 & 0xffff)
 230#define MEMFILE_ATTR(val)	((val) & 0xffff)
 231/* Used for OOM nofiier */
 232#define OOM_CONTROL		(0)
 233
 234/*
 235 * Iteration constructs for visiting all cgroups (under a tree).  If
 236 * loops are exited prematurely (break), mem_cgroup_iter_break() must
 237 * be used for reference counting.
 238 */
 239#define for_each_mem_cgroup_tree(iter, root)		\
 240	for (iter = mem_cgroup_iter(root, NULL, NULL);	\
 241	     iter != NULL;				\
 242	     iter = mem_cgroup_iter(root, iter, NULL))
 243
 244#define for_each_mem_cgroup(iter)			\
 245	for (iter = mem_cgroup_iter(NULL, NULL, NULL);	\
 246	     iter != NULL;				\
 247	     iter = mem_cgroup_iter(NULL, iter, NULL))
 248
 249static inline bool should_force_charge(void)
 250{
 251	return tsk_is_oom_victim(current) || fatal_signal_pending(current) ||
 252		(current->flags & PF_EXITING);
 253}
 254
 255/* Some nice accessors for the vmpressure. */
 256struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
 257{
 258	if (!memcg)
 259		memcg = root_mem_cgroup;
 260	return &memcg->vmpressure;
 261}
 262
 263struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr)
 264{
 265	return &container_of(vmpr, struct mem_cgroup, vmpressure)->css;
 266}
 267
 268#ifdef CONFIG_MEMCG_KMEM
 
 
 
 
 
 269/*
 270 * This will be the memcg's index in each cache's ->memcg_params.memcg_caches.
 271 * The main reason for not using cgroup id for this:
 272 *  this works better in sparse environments, where we have a lot of memcgs,
 273 *  but only a few kmem-limited. Or also, if we have, for instance, 200
 274 *  memcgs, and none but the 200th is kmem-limited, we'd have to have a
 275 *  200 entry array for that.
 276 *
 277 * The current size of the caches array is stored in memcg_nr_cache_ids. It
 278 * will double each time we have to increase it.
 279 */
 280static DEFINE_IDA(memcg_cache_ida);
 281int memcg_nr_cache_ids;
 282
 283/* Protects memcg_nr_cache_ids */
 284static DECLARE_RWSEM(memcg_cache_ids_sem);
 285
 286void memcg_get_cache_ids(void)
 287{
 288	down_read(&memcg_cache_ids_sem);
 289}
 290
 291void memcg_put_cache_ids(void)
 292{
 293	up_read(&memcg_cache_ids_sem);
 294}
 295
 296/*
 297 * MIN_SIZE is different than 1, because we would like to avoid going through
 298 * the alloc/free process all the time. In a small machine, 4 kmem-limited
 299 * cgroups is a reasonable guess. In the future, it could be a parameter or
 300 * tunable, but that is strictly not necessary.
 301 *
 302 * MAX_SIZE should be as large as the number of cgrp_ids. Ideally, we could get
 303 * this constant directly from cgroup, but it is understandable that this is
 304 * better kept as an internal representation in cgroup.c. In any case, the
 305 * cgrp_id space is not getting any smaller, and we don't have to necessarily
 306 * increase ours as well if it increases.
 307 */
 308#define MEMCG_CACHES_MIN_SIZE 4
 309#define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX
 310
 311/*
 312 * A lot of the calls to the cache allocation functions are expected to be
 313 * inlined by the compiler. Since the calls to memcg_kmem_get_cache are
 314 * conditional to this static branch, we'll have to allow modules that does
 315 * kmem_cache_alloc and the such to see this symbol as well
 316 */
 317DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key);
 318EXPORT_SYMBOL(memcg_kmem_enabled_key);
 319
 320struct workqueue_struct *memcg_kmem_cache_wq;
 321#endif
 322
 323static int memcg_shrinker_map_size;
 324static DEFINE_MUTEX(memcg_shrinker_map_mutex);
 325
 326static void memcg_free_shrinker_map_rcu(struct rcu_head *head)
 327{
 328	kvfree(container_of(head, struct memcg_shrinker_map, rcu));
 329}
 330
 331static int memcg_expand_one_shrinker_map(struct mem_cgroup *memcg,
 332					 int size, int old_size)
 333{
 334	struct memcg_shrinker_map *new, *old;
 335	int nid;
 336
 337	lockdep_assert_held(&memcg_shrinker_map_mutex);
 338
 339	for_each_node(nid) {
 340		old = rcu_dereference_protected(
 341			mem_cgroup_nodeinfo(memcg, nid)->shrinker_map, true);
 342		/* Not yet online memcg */
 343		if (!old)
 344			return 0;
 345
 346		new = kvmalloc(sizeof(*new) + size, GFP_KERNEL);
 347		if (!new)
 348			return -ENOMEM;
 349
 350		/* Set all old bits, clear all new bits */
 351		memset(new->map, (int)0xff, old_size);
 352		memset((void *)new->map + old_size, 0, size - old_size);
 353
 354		rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, new);
 355		call_rcu(&old->rcu, memcg_free_shrinker_map_rcu);
 356	}
 357
 358	return 0;
 359}
 360
 361static void memcg_free_shrinker_maps(struct mem_cgroup *memcg)
 362{
 363	struct mem_cgroup_per_node *pn;
 364	struct memcg_shrinker_map *map;
 365	int nid;
 366
 367	if (mem_cgroup_is_root(memcg))
 368		return;
 369
 370	for_each_node(nid) {
 371		pn = mem_cgroup_nodeinfo(memcg, nid);
 372		map = rcu_dereference_protected(pn->shrinker_map, true);
 373		if (map)
 374			kvfree(map);
 375		rcu_assign_pointer(pn->shrinker_map, NULL);
 376	}
 377}
 378
 379static int memcg_alloc_shrinker_maps(struct mem_cgroup *memcg)
 380{
 381	struct memcg_shrinker_map *map;
 382	int nid, size, ret = 0;
 383
 384	if (mem_cgroup_is_root(memcg))
 385		return 0;
 386
 387	mutex_lock(&memcg_shrinker_map_mutex);
 388	size = memcg_shrinker_map_size;
 389	for_each_node(nid) {
 390		map = kvzalloc(sizeof(*map) + size, GFP_KERNEL);
 391		if (!map) {
 392			memcg_free_shrinker_maps(memcg);
 393			ret = -ENOMEM;
 394			break;
 395		}
 396		rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, map);
 397	}
 398	mutex_unlock(&memcg_shrinker_map_mutex);
 399
 400	return ret;
 401}
 402
 403int memcg_expand_shrinker_maps(int new_id)
 404{
 405	int size, old_size, ret = 0;
 406	struct mem_cgroup *memcg;
 407
 408	size = DIV_ROUND_UP(new_id + 1, BITS_PER_LONG) * sizeof(unsigned long);
 409	old_size = memcg_shrinker_map_size;
 410	if (size <= old_size)
 411		return 0;
 412
 413	mutex_lock(&memcg_shrinker_map_mutex);
 414	if (!root_mem_cgroup)
 415		goto unlock;
 416
 417	for_each_mem_cgroup(memcg) {
 418		if (mem_cgroup_is_root(memcg))
 419			continue;
 420		ret = memcg_expand_one_shrinker_map(memcg, size, old_size);
 421		if (ret)
 422			goto unlock;
 423	}
 424unlock:
 425	if (!ret)
 426		memcg_shrinker_map_size = size;
 427	mutex_unlock(&memcg_shrinker_map_mutex);
 428	return ret;
 429}
 430
 431void memcg_set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id)
 432{
 433	if (shrinker_id >= 0 && memcg && !mem_cgroup_is_root(memcg)) {
 434		struct memcg_shrinker_map *map;
 435
 436		rcu_read_lock();
 437		map = rcu_dereference(memcg->nodeinfo[nid]->shrinker_map);
 438		/* Pairs with smp mb in shrink_slab() */
 439		smp_mb__before_atomic();
 440		set_bit(shrinker_id, map->map);
 441		rcu_read_unlock();
 442	}
 443}
 444
 445/**
 446 * mem_cgroup_css_from_page - css of the memcg associated with a page
 447 * @page: page of interest
 448 *
 449 * If memcg is bound to the default hierarchy, css of the memcg associated
 450 * with @page is returned.  The returned css remains associated with @page
 451 * until it is released.
 452 *
 453 * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup
 454 * is returned.
 455 */
 456struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page)
 457{
 458	struct mem_cgroup *memcg;
 459
 460	memcg = page->mem_cgroup;
 461
 462	if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
 463		memcg = root_mem_cgroup;
 464
 465	return &memcg->css;
 466}
 467
 468/**
 469 * page_cgroup_ino - return inode number of the memcg a page is charged to
 470 * @page: the page
 471 *
 472 * Look up the closest online ancestor of the memory cgroup @page is charged to
 473 * and return its inode number or 0 if @page is not charged to any cgroup. It
 474 * is safe to call this function without holding a reference to @page.
 475 *
 476 * Note, this function is inherently racy, because there is nothing to prevent
 477 * the cgroup inode from getting torn down and potentially reallocated a moment
 478 * after page_cgroup_ino() returns, so it only should be used by callers that
 479 * do not care (such as procfs interfaces).
 480 */
 481ino_t page_cgroup_ino(struct page *page)
 482{
 483	struct mem_cgroup *memcg;
 484	unsigned long ino = 0;
 485
 486	rcu_read_lock();
 487	if (PageSlab(page) && !PageTail(page))
 488		memcg = memcg_from_slab_page(page);
 489	else
 490		memcg = READ_ONCE(page->mem_cgroup);
 491	while (memcg && !(memcg->css.flags & CSS_ONLINE))
 492		memcg = parent_mem_cgroup(memcg);
 493	if (memcg)
 494		ino = cgroup_ino(memcg->css.cgroup);
 495	rcu_read_unlock();
 496	return ino;
 497}
 498
 499static struct mem_cgroup_per_node *
 500mem_cgroup_page_nodeinfo(struct mem_cgroup *memcg, struct page *page)
 501{
 502	int nid = page_to_nid(page);
 503
 504	return memcg->nodeinfo[nid];
 505}
 506
 507static struct mem_cgroup_tree_per_node *
 508soft_limit_tree_node(int nid)
 509{
 510	return soft_limit_tree.rb_tree_per_node[nid];
 511}
 512
 513static struct mem_cgroup_tree_per_node *
 514soft_limit_tree_from_page(struct page *page)
 515{
 516	int nid = page_to_nid(page);
 517
 518	return soft_limit_tree.rb_tree_per_node[nid];
 519}
 520
 521static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_node *mz,
 522					 struct mem_cgroup_tree_per_node *mctz,
 523					 unsigned long new_usage_in_excess)
 524{
 525	struct rb_node **p = &mctz->rb_root.rb_node;
 526	struct rb_node *parent = NULL;
 527	struct mem_cgroup_per_node *mz_node;
 528	bool rightmost = true;
 529
 530	if (mz->on_tree)
 531		return;
 532
 533	mz->usage_in_excess = new_usage_in_excess;
 534	if (!mz->usage_in_excess)
 535		return;
 536	while (*p) {
 537		parent = *p;
 538		mz_node = rb_entry(parent, struct mem_cgroup_per_node,
 539					tree_node);
 540		if (mz->usage_in_excess < mz_node->usage_in_excess) {
 541			p = &(*p)->rb_left;
 542			rightmost = false;
 543		}
 544
 545		/*
 546		 * We can't avoid mem cgroups that are over their soft
 547		 * limit by the same amount
 548		 */
 549		else if (mz->usage_in_excess >= mz_node->usage_in_excess)
 550			p = &(*p)->rb_right;
 551	}
 552
 553	if (rightmost)
 554		mctz->rb_rightmost = &mz->tree_node;
 555
 556	rb_link_node(&mz->tree_node, parent, p);
 557	rb_insert_color(&mz->tree_node, &mctz->rb_root);
 558	mz->on_tree = true;
 559}
 560
 561static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
 562					 struct mem_cgroup_tree_per_node *mctz)
 563{
 564	if (!mz->on_tree)
 565		return;
 566
 567	if (&mz->tree_node == mctz->rb_rightmost)
 568		mctz->rb_rightmost = rb_prev(&mz->tree_node);
 569
 570	rb_erase(&mz->tree_node, &mctz->rb_root);
 571	mz->on_tree = false;
 572}
 573
 574static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
 575				       struct mem_cgroup_tree_per_node *mctz)
 576{
 577	unsigned long flags;
 578
 579	spin_lock_irqsave(&mctz->lock, flags);
 580	__mem_cgroup_remove_exceeded(mz, mctz);
 581	spin_unlock_irqrestore(&mctz->lock, flags);
 582}
 583
 584static unsigned long soft_limit_excess(struct mem_cgroup *memcg)
 585{
 586	unsigned long nr_pages = page_counter_read(&memcg->memory);
 587	unsigned long soft_limit = READ_ONCE(memcg->soft_limit);
 588	unsigned long excess = 0;
 589
 590	if (nr_pages > soft_limit)
 591		excess = nr_pages - soft_limit;
 592
 593	return excess;
 594}
 595
 596static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
 597{
 598	unsigned long excess;
 599	struct mem_cgroup_per_node *mz;
 600	struct mem_cgroup_tree_per_node *mctz;
 601
 602	mctz = soft_limit_tree_from_page(page);
 603	if (!mctz)
 604		return;
 605	/*
 606	 * Necessary to update all ancestors when hierarchy is used.
 607	 * because their event counter is not touched.
 608	 */
 609	for (; memcg; memcg = parent_mem_cgroup(memcg)) {
 610		mz = mem_cgroup_page_nodeinfo(memcg, page);
 611		excess = soft_limit_excess(memcg);
 612		/*
 613		 * We have to update the tree if mz is on RB-tree or
 614		 * mem is over its softlimit.
 615		 */
 616		if (excess || mz->on_tree) {
 617			unsigned long flags;
 618
 619			spin_lock_irqsave(&mctz->lock, flags);
 620			/* if on-tree, remove it */
 621			if (mz->on_tree)
 622				__mem_cgroup_remove_exceeded(mz, mctz);
 623			/*
 624			 * Insert again. mz->usage_in_excess will be updated.
 625			 * If excess is 0, no tree ops.
 626			 */
 627			__mem_cgroup_insert_exceeded(mz, mctz, excess);
 628			spin_unlock_irqrestore(&mctz->lock, flags);
 629		}
 630	}
 631}
 632
 633static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
 634{
 635	struct mem_cgroup_tree_per_node *mctz;
 636	struct mem_cgroup_per_node *mz;
 637	int nid;
 638
 639	for_each_node(nid) {
 640		mz = mem_cgroup_nodeinfo(memcg, nid);
 641		mctz = soft_limit_tree_node(nid);
 642		if (mctz)
 643			mem_cgroup_remove_exceeded(mz, mctz);
 644	}
 645}
 646
 647static struct mem_cgroup_per_node *
 648__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
 649{
 650	struct mem_cgroup_per_node *mz;
 651
 652retry:
 653	mz = NULL;
 654	if (!mctz->rb_rightmost)
 655		goto done;		/* Nothing to reclaim from */
 656
 657	mz = rb_entry(mctz->rb_rightmost,
 658		      struct mem_cgroup_per_node, tree_node);
 659	/*
 660	 * Remove the node now but someone else can add it back,
 661	 * we will to add it back at the end of reclaim to its correct
 662	 * position in the tree.
 663	 */
 664	__mem_cgroup_remove_exceeded(mz, mctz);
 665	if (!soft_limit_excess(mz->memcg) ||
 666	    !css_tryget_online(&mz->memcg->css))
 667		goto retry;
 668done:
 669	return mz;
 670}
 671
 672static struct mem_cgroup_per_node *
 673mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
 674{
 675	struct mem_cgroup_per_node *mz;
 676
 677	spin_lock_irq(&mctz->lock);
 678	mz = __mem_cgroup_largest_soft_limit_node(mctz);
 679	spin_unlock_irq(&mctz->lock);
 680	return mz;
 681}
 682
 683/**
 684 * __mod_memcg_state - update cgroup memory statistics
 685 * @memcg: the memory cgroup
 686 * @idx: the stat item - can be enum memcg_stat_item or enum node_stat_item
 687 * @val: delta to add to the counter, can be negative
 688 */
 689void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val)
 690{
 691	long x;
 692
 693	if (mem_cgroup_disabled())
 694		return;
 695
 696	x = val + __this_cpu_read(memcg->vmstats_percpu->stat[idx]);
 697	if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) {
 698		struct mem_cgroup *mi;
 699
 700		/*
 701		 * Batch local counters to keep them in sync with
 702		 * the hierarchical ones.
 703		 */
 704		__this_cpu_add(memcg->vmstats_local->stat[idx], x);
 705		for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
 706			atomic_long_add(x, &mi->vmstats[idx]);
 707		x = 0;
 708	}
 709	__this_cpu_write(memcg->vmstats_percpu->stat[idx], x);
 710}
 711
 712static struct mem_cgroup_per_node *
 713parent_nodeinfo(struct mem_cgroup_per_node *pn, int nid)
 714{
 715	struct mem_cgroup *parent;
 716
 717	parent = parent_mem_cgroup(pn->memcg);
 718	if (!parent)
 719		return NULL;
 720	return mem_cgroup_nodeinfo(parent, nid);
 721}
 722
 723/**
 724 * __mod_lruvec_state - update lruvec memory statistics
 725 * @lruvec: the lruvec
 726 * @idx: the stat item
 727 * @val: delta to add to the counter, can be negative
 728 *
 729 * The lruvec is the intersection of the NUMA node and a cgroup. This
 730 * function updates the all three counters that are affected by a
 731 * change of state at this level: per-node, per-cgroup, per-lruvec.
 732 */
 733void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
 734			int val)
 735{
 736	pg_data_t *pgdat = lruvec_pgdat(lruvec);
 737	struct mem_cgroup_per_node *pn;
 738	struct mem_cgroup *memcg;
 739	long x;
 740
 741	/* Update node */
 742	__mod_node_page_state(pgdat, idx, val);
 743
 744	if (mem_cgroup_disabled())
 745		return;
 746
 747	pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
 748	memcg = pn->memcg;
 749
 750	/* Update memcg */
 751	__mod_memcg_state(memcg, idx, val);
 752
 753	/* Update lruvec */
 754	__this_cpu_add(pn->lruvec_stat_local->count[idx], val);
 755
 756	x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]);
 757	if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) {
 758		struct mem_cgroup_per_node *pi;
 759
 760		for (pi = pn; pi; pi = parent_nodeinfo(pi, pgdat->node_id))
 761			atomic_long_add(x, &pi->lruvec_stat[idx]);
 762		x = 0;
 763	}
 764	__this_cpu_write(pn->lruvec_stat_cpu->count[idx], x);
 765}
 766
 767void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val)
 768{
 769	struct page *page = virt_to_head_page(p);
 770	pg_data_t *pgdat = page_pgdat(page);
 771	struct mem_cgroup *memcg;
 772	struct lruvec *lruvec;
 773
 774	rcu_read_lock();
 775	memcg = memcg_from_slab_page(page);
 776
 777	/* Untracked pages have no memcg, no lruvec. Update only the node */
 778	if (!memcg || memcg == root_mem_cgroup) {
 779		__mod_node_page_state(pgdat, idx, val);
 780	} else {
 781		lruvec = mem_cgroup_lruvec(pgdat, memcg);
 782		__mod_lruvec_state(lruvec, idx, val);
 783	}
 784	rcu_read_unlock();
 785}
 786
 787/**
 788 * __count_memcg_events - account VM events in a cgroup
 789 * @memcg: the memory cgroup
 790 * @idx: the event item
 791 * @count: the number of events that occured
 792 */
 793void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
 794			  unsigned long count)
 795{
 796	unsigned long x;
 797
 798	if (mem_cgroup_disabled())
 799		return;
 800
 801	x = count + __this_cpu_read(memcg->vmstats_percpu->events[idx]);
 802	if (unlikely(x > MEMCG_CHARGE_BATCH)) {
 803		struct mem_cgroup *mi;
 804
 805		/*
 806		 * Batch local counters to keep them in sync with
 807		 * the hierarchical ones.
 808		 */
 809		__this_cpu_add(memcg->vmstats_local->events[idx], x);
 810		for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
 811			atomic_long_add(x, &mi->vmevents[idx]);
 812		x = 0;
 813	}
 814	__this_cpu_write(memcg->vmstats_percpu->events[idx], x);
 815}
 816
 817static unsigned long memcg_events(struct mem_cgroup *memcg, int event)
 818{
 819	return atomic_long_read(&memcg->vmevents[event]);
 820}
 821
 822static unsigned long memcg_events_local(struct mem_cgroup *memcg, int event)
 823{
 824	long x = 0;
 825	int cpu;
 826
 827	for_each_possible_cpu(cpu)
 828		x += per_cpu(memcg->vmstats_local->events[event], cpu);
 829	return x;
 830}
 831
 832static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
 833					 struct page *page,
 834					 bool compound, int nr_pages)
 835{
 836	/*
 837	 * Here, RSS means 'mapped anon' and anon's SwapCache. Shmem/tmpfs is
 838	 * counted as CACHE even if it's on ANON LRU.
 839	 */
 840	if (PageAnon(page))
 841		__mod_memcg_state(memcg, MEMCG_RSS, nr_pages);
 842	else {
 843		__mod_memcg_state(memcg, MEMCG_CACHE, nr_pages);
 844		if (PageSwapBacked(page))
 845			__mod_memcg_state(memcg, NR_SHMEM, nr_pages);
 846	}
 847
 848	if (compound) {
 849		VM_BUG_ON_PAGE(!PageTransHuge(page), page);
 850		__mod_memcg_state(memcg, MEMCG_RSS_HUGE, nr_pages);
 851	}
 852
 853	/* pagein of a big page is an event. So, ignore page size */
 854	if (nr_pages > 0)
 855		__count_memcg_events(memcg, PGPGIN, 1);
 856	else {
 857		__count_memcg_events(memcg, PGPGOUT, 1);
 858		nr_pages = -nr_pages; /* for event */
 859	}
 860
 861	__this_cpu_add(memcg->vmstats_percpu->nr_page_events, nr_pages);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 862}
 863
 864static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
 865				       enum mem_cgroup_events_target target)
 866{
 867	unsigned long val, next;
 868
 869	val = __this_cpu_read(memcg->vmstats_percpu->nr_page_events);
 870	next = __this_cpu_read(memcg->vmstats_percpu->targets[target]);
 871	/* from time_after() in jiffies.h */
 872	if ((long)(next - val) < 0) {
 873		switch (target) {
 874		case MEM_CGROUP_TARGET_THRESH:
 875			next = val + THRESHOLDS_EVENTS_TARGET;
 876			break;
 877		case MEM_CGROUP_TARGET_SOFTLIMIT:
 878			next = val + SOFTLIMIT_EVENTS_TARGET;
 879			break;
 880		case MEM_CGROUP_TARGET_NUMAINFO:
 881			next = val + NUMAINFO_EVENTS_TARGET;
 882			break;
 883		default:
 884			break;
 885		}
 886		__this_cpu_write(memcg->vmstats_percpu->targets[target], next);
 887		return true;
 888	}
 889	return false;
 890}
 891
 892/*
 893 * Check events in order.
 894 *
 895 */
 896static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
 897{
 898	/* threshold event is triggered in finer grain than soft limit */
 899	if (unlikely(mem_cgroup_event_ratelimit(memcg,
 900						MEM_CGROUP_TARGET_THRESH))) {
 901		bool do_softlimit;
 902		bool do_numainfo __maybe_unused;
 903
 904		do_softlimit = mem_cgroup_event_ratelimit(memcg,
 905						MEM_CGROUP_TARGET_SOFTLIMIT);
 906#if MAX_NUMNODES > 1
 907		do_numainfo = mem_cgroup_event_ratelimit(memcg,
 908						MEM_CGROUP_TARGET_NUMAINFO);
 909#endif
 910		mem_cgroup_threshold(memcg);
 911		if (unlikely(do_softlimit))
 912			mem_cgroup_update_tree(memcg, page);
 913#if MAX_NUMNODES > 1
 914		if (unlikely(do_numainfo))
 915			atomic_inc(&memcg->numainfo_events);
 916#endif
 917	}
 918}
 919
 920struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
 921{
 922	/*
 923	 * mm_update_next_owner() may clear mm->owner to NULL
 924	 * if it races with swapoff, page migration, etc.
 925	 * So this can be called with p == NULL.
 926	 */
 927	if (unlikely(!p))
 928		return NULL;
 929
 930	return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
 931}
 932EXPORT_SYMBOL(mem_cgroup_from_task);
 933
 934/**
 935 * get_mem_cgroup_from_mm: Obtain a reference on given mm_struct's memcg.
 936 * @mm: mm from which memcg should be extracted. It can be NULL.
 937 *
 938 * Obtain a reference on mm->memcg and returns it if successful. Otherwise
 939 * root_mem_cgroup is returned. However if mem_cgroup is disabled, NULL is
 940 * returned.
 941 */
 942struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
 943{
 944	struct mem_cgroup *memcg;
 945
 946	if (mem_cgroup_disabled())
 947		return NULL;
 948
 949	rcu_read_lock();
 950	do {
 951		/*
 952		 * Page cache insertions can happen withou an
 953		 * actual mm context, e.g. during disk probing
 954		 * on boot, loopback IO, acct() writes etc.
 955		 */
 956		if (unlikely(!mm))
 957			memcg = root_mem_cgroup;
 958		else {
 959			memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
 960			if (unlikely(!memcg))
 961				memcg = root_mem_cgroup;
 962		}
 963	} while (!css_tryget(&memcg->css));
 964	rcu_read_unlock();
 965	return memcg;
 966}
 967EXPORT_SYMBOL(get_mem_cgroup_from_mm);
 968
 969/**
 970 * get_mem_cgroup_from_page: Obtain a reference on given page's memcg.
 971 * @page: page from which memcg should be extracted.
 972 *
 973 * Obtain a reference on page->memcg and returns it if successful. Otherwise
 974 * root_mem_cgroup is returned.
 975 */
 976struct mem_cgroup *get_mem_cgroup_from_page(struct page *page)
 977{
 978	struct mem_cgroup *memcg = page->mem_cgroup;
 979
 980	if (mem_cgroup_disabled())
 981		return NULL;
 982
 983	rcu_read_lock();
 984	if (!memcg || !css_tryget_online(&memcg->css))
 985		memcg = root_mem_cgroup;
 986	rcu_read_unlock();
 987	return memcg;
 988}
 989EXPORT_SYMBOL(get_mem_cgroup_from_page);
 990
 991/**
 992 * If current->active_memcg is non-NULL, do not fallback to current->mm->memcg.
 993 */
 994static __always_inline struct mem_cgroup *get_mem_cgroup_from_current(void)
 995{
 996	if (unlikely(current->active_memcg)) {
 997		struct mem_cgroup *memcg = root_mem_cgroup;
 998
 999		rcu_read_lock();
1000		if (css_tryget_online(&current->active_memcg->css))
1001			memcg = current->active_memcg;
1002		rcu_read_unlock();
1003		return memcg;
1004	}
1005	return get_mem_cgroup_from_mm(current->mm);
1006}
1007
1008/**
1009 * mem_cgroup_iter - iterate over memory cgroup hierarchy
1010 * @root: hierarchy root
1011 * @prev: previously returned memcg, NULL on first invocation
1012 * @reclaim: cookie for shared reclaim walks, NULL for full walks
1013 *
1014 * Returns references to children of the hierarchy below @root, or
1015 * @root itself, or %NULL after a full round-trip.
1016 *
1017 * Caller must pass the return value in @prev on subsequent
1018 * invocations for reference counting, or use mem_cgroup_iter_break()
1019 * to cancel a hierarchy walk before the round-trip is complete.
1020 *
1021 * Reclaimers can specify a node and a priority level in @reclaim to
1022 * divide up the memcgs in the hierarchy among all concurrent
1023 * reclaimers operating on the same node and priority.
1024 */
1025struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
1026				   struct mem_cgroup *prev,
1027				   struct mem_cgroup_reclaim_cookie *reclaim)
1028{
1029	struct mem_cgroup_reclaim_iter *uninitialized_var(iter);
1030	struct cgroup_subsys_state *css = NULL;
1031	struct mem_cgroup *memcg = NULL;
1032	struct mem_cgroup *pos = NULL;
1033
1034	if (mem_cgroup_disabled())
1035		return NULL;
1036
1037	if (!root)
1038		root = root_mem_cgroup;
1039
1040	if (prev && !reclaim)
1041		pos = prev;
1042
1043	if (!root->use_hierarchy && root != root_mem_cgroup) {
1044		if (prev)
1045			goto out;
1046		return root;
1047	}
1048
1049	rcu_read_lock();
1050
1051	if (reclaim) {
1052		struct mem_cgroup_per_node *mz;
1053
1054		mz = mem_cgroup_nodeinfo(root, reclaim->pgdat->node_id);
1055		iter = &mz->iter[reclaim->priority];
1056
1057		if (prev && reclaim->generation != iter->generation)
1058			goto out_unlock;
1059
1060		while (1) {
1061			pos = READ_ONCE(iter->position);
1062			if (!pos || css_tryget(&pos->css))
1063				break;
1064			/*
1065			 * css reference reached zero, so iter->position will
1066			 * be cleared by ->css_released. However, we should not
1067			 * rely on this happening soon, because ->css_released
1068			 * is called from a work queue, and by busy-waiting we
1069			 * might block it. So we clear iter->position right
1070			 * away.
1071			 */
1072			(void)cmpxchg(&iter->position, pos, NULL);
1073		}
1074	}
1075
1076	if (pos)
1077		css = &pos->css;
1078
1079	for (;;) {
1080		css = css_next_descendant_pre(css, &root->css);
1081		if (!css) {
1082			/*
1083			 * Reclaimers share the hierarchy walk, and a
1084			 * new one might jump in right at the end of
1085			 * the hierarchy - make sure they see at least
1086			 * one group and restart from the beginning.
1087			 */
1088			if (!prev)
1089				continue;
1090			break;
1091		}
1092
1093		/*
1094		 * Verify the css and acquire a reference.  The root
1095		 * is provided by the caller, so we know it's alive
1096		 * and kicking, and don't take an extra reference.
1097		 */
1098		memcg = mem_cgroup_from_css(css);
1099
1100		if (css == &root->css)
1101			break;
1102
1103		if (css_tryget(css))
1104			break;
1105
1106		memcg = NULL;
1107	}
1108
1109	if (reclaim) {
1110		/*
1111		 * The position could have already been updated by a competing
1112		 * thread, so check that the value hasn't changed since we read
1113		 * it to avoid reclaiming from the same cgroup twice.
1114		 */
1115		(void)cmpxchg(&iter->position, pos, memcg);
1116
1117		if (pos)
1118			css_put(&pos->css);
1119
1120		if (!memcg)
1121			iter->generation++;
1122		else if (!prev)
1123			reclaim->generation = iter->generation;
1124	}
1125
1126out_unlock:
1127	rcu_read_unlock();
1128out:
1129	if (prev && prev != root)
1130		css_put(&prev->css);
1131
1132	return memcg;
1133}
1134
1135/**
1136 * mem_cgroup_iter_break - abort a hierarchy walk prematurely
1137 * @root: hierarchy root
1138 * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
1139 */
1140void mem_cgroup_iter_break(struct mem_cgroup *root,
1141			   struct mem_cgroup *prev)
1142{
1143	if (!root)
1144		root = root_mem_cgroup;
1145	if (prev && prev != root)
1146		css_put(&prev->css);
1147}
1148
1149static void __invalidate_reclaim_iterators(struct mem_cgroup *from,
1150					struct mem_cgroup *dead_memcg)
1151{
 
1152	struct mem_cgroup_reclaim_iter *iter;
1153	struct mem_cgroup_per_node *mz;
1154	int nid;
1155	int i;
1156
1157	for_each_node(nid) {
1158		mz = mem_cgroup_nodeinfo(from, nid);
1159		for (i = 0; i <= DEF_PRIORITY; i++) {
1160			iter = &mz->iter[i];
1161			cmpxchg(&iter->position,
1162				dead_memcg, NULL);
 
 
1163		}
1164	}
1165}
1166
1167static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
1168{
1169	struct mem_cgroup *memcg = dead_memcg;
1170	struct mem_cgroup *last;
 
 
 
 
 
1171
1172	do {
1173		__invalidate_reclaim_iterators(memcg, dead_memcg);
1174		last = memcg;
1175	} while ((memcg = parent_mem_cgroup(memcg)));
1176
1177	/*
1178	 * When cgruop1 non-hierarchy mode is used,
1179	 * parent_mem_cgroup() does not walk all the way up to the
1180	 * cgroup root (root_mem_cgroup). So we have to handle
1181	 * dead_memcg from cgroup root separately.
1182	 */
1183	if (last != root_mem_cgroup)
1184		__invalidate_reclaim_iterators(root_mem_cgroup,
1185						dead_memcg);
1186}
1187
1188/**
1189 * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
1190 * @memcg: hierarchy root
1191 * @fn: function to call for each task
1192 * @arg: argument passed to @fn
1193 *
1194 * This function iterates over tasks attached to @memcg or to any of its
1195 * descendants and calls @fn for each task. If @fn returns a non-zero
1196 * value, the function breaks the iteration loop and returns the value.
1197 * Otherwise, it will iterate over all tasks and return 0.
1198 *
1199 * This function must not be called for the root memory cgroup.
1200 */
1201int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
1202			  int (*fn)(struct task_struct *, void *), void *arg)
1203{
1204	struct mem_cgroup *iter;
1205	int ret = 0;
1206
1207	BUG_ON(memcg == root_mem_cgroup);
1208
1209	for_each_mem_cgroup_tree(iter, memcg) {
1210		struct css_task_iter it;
1211		struct task_struct *task;
1212
1213		css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it);
1214		while (!ret && (task = css_task_iter_next(&it)))
1215			ret = fn(task, arg);
1216		css_task_iter_end(&it);
1217		if (ret) {
1218			mem_cgroup_iter_break(memcg, iter);
1219			break;
1220		}
1221	}
1222	return ret;
1223}
1224
1225/**
1226 * mem_cgroup_page_lruvec - return lruvec for isolating/putting an LRU page
1227 * @page: the page
1228 * @pgdat: pgdat of the page
1229 *
1230 * This function is only safe when following the LRU page isolation
1231 * and putback protocol: the LRU lock must be held, and the page must
1232 * either be PageLRU() or the caller must have isolated/allocated it.
1233 */
1234struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct pglist_data *pgdat)
1235{
1236	struct mem_cgroup_per_node *mz;
1237	struct mem_cgroup *memcg;
1238	struct lruvec *lruvec;
1239
1240	if (mem_cgroup_disabled()) {
1241		lruvec = &pgdat->lruvec;
1242		goto out;
1243	}
1244
1245	memcg = page->mem_cgroup;
1246	/*
1247	 * Swapcache readahead pages are added to the LRU - and
1248	 * possibly migrated - before they are charged.
1249	 */
1250	if (!memcg)
1251		memcg = root_mem_cgroup;
1252
1253	mz = mem_cgroup_page_nodeinfo(memcg, page);
1254	lruvec = &mz->lruvec;
1255out:
1256	/*
1257	 * Since a node can be onlined after the mem_cgroup was created,
1258	 * we have to be prepared to initialize lruvec->zone here;
1259	 * and if offlined then reonlined, we need to reinitialize it.
1260	 */
1261	if (unlikely(lruvec->pgdat != pgdat))
1262		lruvec->pgdat = pgdat;
1263	return lruvec;
1264}
1265
1266/**
1267 * mem_cgroup_update_lru_size - account for adding or removing an lru page
1268 * @lruvec: mem_cgroup per zone lru vector
1269 * @lru: index of lru list the page is sitting on
1270 * @zid: zone id of the accounted pages
1271 * @nr_pages: positive when adding or negative when removing
1272 *
1273 * This function must be called under lru_lock, just before a page is added
1274 * to or just after a page is removed from an lru list (that ordering being
1275 * so as to allow it to check that lru_size 0 is consistent with list_empty).
1276 */
1277void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
1278				int zid, int nr_pages)
1279{
1280	struct mem_cgroup_per_node *mz;
1281	unsigned long *lru_size;
1282	long size;
1283
1284	if (mem_cgroup_disabled())
1285		return;
1286
1287	mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
1288	lru_size = &mz->lru_zone_size[zid][lru];
1289
1290	if (nr_pages < 0)
1291		*lru_size += nr_pages;
1292
1293	size = *lru_size;
1294	if (WARN_ONCE(size < 0,
1295		"%s(%p, %d, %d): lru_size %ld\n",
1296		__func__, lruvec, lru, nr_pages, size)) {
1297		VM_BUG_ON(1);
1298		*lru_size = 0;
1299	}
1300
1301	if (nr_pages > 0)
1302		*lru_size += nr_pages;
1303}
1304
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1305/**
1306 * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1307 * @memcg: the memory cgroup
1308 *
1309 * Returns the maximum amount of memory @mem can be charged with, in
1310 * pages.
1311 */
1312static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
1313{
1314	unsigned long margin = 0;
1315	unsigned long count;
1316	unsigned long limit;
1317
1318	count = page_counter_read(&memcg->memory);
1319	limit = READ_ONCE(memcg->memory.max);
1320	if (count < limit)
1321		margin = limit - count;
1322
1323	if (do_memsw_account()) {
1324		count = page_counter_read(&memcg->memsw);
1325		limit = READ_ONCE(memcg->memsw.max);
1326		if (count <= limit)
1327			margin = min(margin, limit - count);
1328		else
1329			margin = 0;
1330	}
1331
1332	return margin;
1333}
1334
1335/*
1336 * A routine for checking "mem" is under move_account() or not.
1337 *
1338 * Checking a cgroup is mc.from or mc.to or under hierarchy of
1339 * moving cgroups. This is for waiting at high-memory pressure
1340 * caused by "move".
1341 */
1342static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
1343{
1344	struct mem_cgroup *from;
1345	struct mem_cgroup *to;
1346	bool ret = false;
1347	/*
1348	 * Unlike task_move routines, we access mc.to, mc.from not under
1349	 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
1350	 */
1351	spin_lock(&mc.lock);
1352	from = mc.from;
1353	to = mc.to;
1354	if (!from)
1355		goto unlock;
1356
1357	ret = mem_cgroup_is_descendant(from, memcg) ||
1358		mem_cgroup_is_descendant(to, memcg);
1359unlock:
1360	spin_unlock(&mc.lock);
1361	return ret;
1362}
1363
1364static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
1365{
1366	if (mc.moving_task && current != mc.moving_task) {
1367		if (mem_cgroup_under_move(memcg)) {
1368			DEFINE_WAIT(wait);
1369			prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1370			/* moving charge context might have finished. */
1371			if (mc.moving_task)
1372				schedule();
1373			finish_wait(&mc.waitq, &wait);
1374			return true;
1375		}
1376	}
1377	return false;
1378}
1379
1380static char *memory_stat_format(struct mem_cgroup *memcg)
1381{
1382	struct seq_buf s;
1383	int i;
 
 
 
 
 
 
1384
1385	seq_buf_init(&s, kmalloc(PAGE_SIZE, GFP_KERNEL), PAGE_SIZE);
1386	if (!s.buffer)
1387		return NULL;
1388
1389	/*
1390	 * Provide statistics on the state of the memory subsystem as
1391	 * well as cumulative event counters that show past behavior.
1392	 *
1393	 * This list is ordered following a combination of these gradients:
1394	 * 1) generic big picture -> specifics and details
1395	 * 2) reflecting userspace activity -> reflecting kernel heuristics
1396	 *
1397	 * Current memory state:
1398	 */
1399
1400	seq_buf_printf(&s, "anon %llu\n",
1401		       (u64)memcg_page_state(memcg, MEMCG_RSS) *
1402		       PAGE_SIZE);
1403	seq_buf_printf(&s, "file %llu\n",
1404		       (u64)memcg_page_state(memcg, MEMCG_CACHE) *
1405		       PAGE_SIZE);
1406	seq_buf_printf(&s, "kernel_stack %llu\n",
1407		       (u64)memcg_page_state(memcg, MEMCG_KERNEL_STACK_KB) *
1408		       1024);
1409	seq_buf_printf(&s, "slab %llu\n",
1410		       (u64)(memcg_page_state(memcg, NR_SLAB_RECLAIMABLE) +
1411			     memcg_page_state(memcg, NR_SLAB_UNRECLAIMABLE)) *
1412		       PAGE_SIZE);
1413	seq_buf_printf(&s, "sock %llu\n",
1414		       (u64)memcg_page_state(memcg, MEMCG_SOCK) *
1415		       PAGE_SIZE);
1416
1417	seq_buf_printf(&s, "shmem %llu\n",
1418		       (u64)memcg_page_state(memcg, NR_SHMEM) *
1419		       PAGE_SIZE);
1420	seq_buf_printf(&s, "file_mapped %llu\n",
1421		       (u64)memcg_page_state(memcg, NR_FILE_MAPPED) *
1422		       PAGE_SIZE);
1423	seq_buf_printf(&s, "file_dirty %llu\n",
1424		       (u64)memcg_page_state(memcg, NR_FILE_DIRTY) *
1425		       PAGE_SIZE);
1426	seq_buf_printf(&s, "file_writeback %llu\n",
1427		       (u64)memcg_page_state(memcg, NR_WRITEBACK) *
1428		       PAGE_SIZE);
1429
1430	/*
1431	 * TODO: We should eventually replace our own MEMCG_RSS_HUGE counter
1432	 * with the NR_ANON_THP vm counter, but right now it's a pain in the
1433	 * arse because it requires migrating the work out of rmap to a place
1434	 * where the page->mem_cgroup is set up and stable.
1435	 */
1436	seq_buf_printf(&s, "anon_thp %llu\n",
1437		       (u64)memcg_page_state(memcg, MEMCG_RSS_HUGE) *
1438		       PAGE_SIZE);
1439
1440	for (i = 0; i < NR_LRU_LISTS; i++)
1441		seq_buf_printf(&s, "%s %llu\n", mem_cgroup_lru_names[i],
1442			       (u64)memcg_page_state(memcg, NR_LRU_BASE + i) *
1443			       PAGE_SIZE);
1444
1445	seq_buf_printf(&s, "slab_reclaimable %llu\n",
1446		       (u64)memcg_page_state(memcg, NR_SLAB_RECLAIMABLE) *
1447		       PAGE_SIZE);
1448	seq_buf_printf(&s, "slab_unreclaimable %llu\n",
1449		       (u64)memcg_page_state(memcg, NR_SLAB_UNRECLAIMABLE) *
1450		       PAGE_SIZE);
1451
1452	/* Accumulated memory events */
1453
1454	seq_buf_printf(&s, "pgfault %lu\n", memcg_events(memcg, PGFAULT));
1455	seq_buf_printf(&s, "pgmajfault %lu\n", memcg_events(memcg, PGMAJFAULT));
1456
1457	seq_buf_printf(&s, "workingset_refault %lu\n",
1458		       memcg_page_state(memcg, WORKINGSET_REFAULT));
1459	seq_buf_printf(&s, "workingset_activate %lu\n",
1460		       memcg_page_state(memcg, WORKINGSET_ACTIVATE));
1461	seq_buf_printf(&s, "workingset_nodereclaim %lu\n",
1462		       memcg_page_state(memcg, WORKINGSET_NODERECLAIM));
1463
1464	seq_buf_printf(&s, "pgrefill %lu\n", memcg_events(memcg, PGREFILL));
1465	seq_buf_printf(&s, "pgscan %lu\n",
1466		       memcg_events(memcg, PGSCAN_KSWAPD) +
1467		       memcg_events(memcg, PGSCAN_DIRECT));
1468	seq_buf_printf(&s, "pgsteal %lu\n",
1469		       memcg_events(memcg, PGSTEAL_KSWAPD) +
1470		       memcg_events(memcg, PGSTEAL_DIRECT));
1471	seq_buf_printf(&s, "pgactivate %lu\n", memcg_events(memcg, PGACTIVATE));
1472	seq_buf_printf(&s, "pgdeactivate %lu\n", memcg_events(memcg, PGDEACTIVATE));
1473	seq_buf_printf(&s, "pglazyfree %lu\n", memcg_events(memcg, PGLAZYFREE));
1474	seq_buf_printf(&s, "pglazyfreed %lu\n", memcg_events(memcg, PGLAZYFREED));
1475
1476#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1477	seq_buf_printf(&s, "thp_fault_alloc %lu\n",
1478		       memcg_events(memcg, THP_FAULT_ALLOC));
1479	seq_buf_printf(&s, "thp_collapse_alloc %lu\n",
1480		       memcg_events(memcg, THP_COLLAPSE_ALLOC));
1481#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1482
1483	/* The above should easily fit into one page */
1484	WARN_ON_ONCE(seq_buf_has_overflowed(&s));
1485
1486	return s.buffer;
1487}
1488
1489#define K(x) ((x) << (PAGE_SHIFT-10))
1490/**
1491 * mem_cgroup_print_oom_context: Print OOM information relevant to
1492 * memory controller.
1493 * @memcg: The memory cgroup that went over limit
1494 * @p: Task that is going to be killed
1495 *
1496 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1497 * enabled
1498 */
1499void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
1500{
 
 
 
1501	rcu_read_lock();
1502
1503	if (memcg) {
1504		pr_cont(",oom_memcg=");
1505		pr_cont_cgroup_path(memcg->css.cgroup);
1506	} else
1507		pr_cont(",global_oom");
1508	if (p) {
1509		pr_cont(",task_memcg=");
1510		pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
 
 
 
1511	}
 
 
 
 
1512	rcu_read_unlock();
1513}
1514
1515/**
1516 * mem_cgroup_print_oom_meminfo: Print OOM memory information relevant to
1517 * memory controller.
1518 * @memcg: The memory cgroup that went over limit
1519 */
1520void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
1521{
1522	char *buf;
1523
1524	pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
1525		K((u64)page_counter_read(&memcg->memory)),
1526		K((u64)memcg->memory.max), memcg->memory.failcnt);
1527	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
1528		pr_info("swap: usage %llukB, limit %llukB, failcnt %lu\n",
1529			K((u64)page_counter_read(&memcg->swap)),
1530			K((u64)memcg->swap.max), memcg->swap.failcnt);
1531	else {
1532		pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
1533			K((u64)page_counter_read(&memcg->memsw)),
1534			K((u64)memcg->memsw.max), memcg->memsw.failcnt);
1535		pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
1536			K((u64)page_counter_read(&memcg->kmem)),
1537			K((u64)memcg->kmem.max), memcg->kmem.failcnt);
 
 
 
 
 
 
 
 
 
 
 
 
 
1538	}
1539
1540	pr_info("Memory cgroup stats for ");
1541	pr_cont_cgroup_path(memcg->css.cgroup);
1542	pr_cont(":");
1543	buf = memory_stat_format(memcg);
1544	if (!buf)
1545		return;
1546	pr_info("%s", buf);
1547	kfree(buf);
1548}
1549
1550/*
1551 * Return the memory (and swap, if configured) limit for a memcg.
1552 */
1553unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
1554{
1555	unsigned long max;
1556
1557	max = memcg->memory.max;
1558	if (mem_cgroup_swappiness(memcg)) {
1559		unsigned long memsw_max;
1560		unsigned long swap_max;
1561
1562		memsw_max = memcg->memsw.max;
1563		swap_max = memcg->swap.max;
1564		swap_max = min(swap_max, (unsigned long)total_swap_pages);
1565		max = min(max + swap_max, memsw_max);
1566	}
1567	return max;
1568}
1569
1570unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
1571{
1572	return page_counter_read(&memcg->memory);
1573}
1574
1575static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
1576				     int order)
1577{
1578	struct oom_control oc = {
1579		.zonelist = NULL,
1580		.nodemask = NULL,
1581		.memcg = memcg,
1582		.gfp_mask = gfp_mask,
1583		.order = order,
1584	};
1585	bool ret;
1586
1587	if (mutex_lock_killable(&oom_lock))
1588		return true;
1589	/*
1590	 * A few threads which were not waiting at mutex_lock_killable() can
1591	 * fail to bail out. Therefore, check again after holding oom_lock.
1592	 */
1593	ret = should_force_charge() || out_of_memory(&oc);
1594	mutex_unlock(&oom_lock);
1595	return ret;
1596}
1597
1598#if MAX_NUMNODES > 1
1599
1600/**
1601 * test_mem_cgroup_node_reclaimable
1602 * @memcg: the target memcg
1603 * @nid: the node ID to be checked.
1604 * @noswap : specify true here if the user wants flle only information.
1605 *
1606 * This function returns whether the specified memcg contains any
1607 * reclaimable pages on a node. Returns true if there are any reclaimable
1608 * pages in the node.
1609 */
1610static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg,
1611		int nid, bool noswap)
1612{
1613	struct lruvec *lruvec = mem_cgroup_lruvec(NODE_DATA(nid), memcg);
1614
1615	if (lruvec_page_state(lruvec, NR_INACTIVE_FILE) ||
1616	    lruvec_page_state(lruvec, NR_ACTIVE_FILE))
1617		return true;
1618	if (noswap || !total_swap_pages)
1619		return false;
1620	if (lruvec_page_state(lruvec, NR_INACTIVE_ANON) ||
1621	    lruvec_page_state(lruvec, NR_ACTIVE_ANON))
1622		return true;
1623	return false;
1624
1625}
1626
1627/*
1628 * Always updating the nodemask is not very good - even if we have an empty
1629 * list or the wrong list here, we can start from some node and traverse all
1630 * nodes based on the zonelist. So update the list loosely once per 10 secs.
1631 *
1632 */
1633static void mem_cgroup_may_update_nodemask(struct mem_cgroup *memcg)
1634{
1635	int nid;
1636	/*
1637	 * numainfo_events > 0 means there was at least NUMAINFO_EVENTS_TARGET
1638	 * pagein/pageout changes since the last update.
1639	 */
1640	if (!atomic_read(&memcg->numainfo_events))
1641		return;
1642	if (atomic_inc_return(&memcg->numainfo_updating) > 1)
1643		return;
1644
1645	/* make a nodemask where this memcg uses memory from */
1646	memcg->scan_nodes = node_states[N_MEMORY];
1647
1648	for_each_node_mask(nid, node_states[N_MEMORY]) {
1649
1650		if (!test_mem_cgroup_node_reclaimable(memcg, nid, false))
1651			node_clear(nid, memcg->scan_nodes);
1652	}
1653
1654	atomic_set(&memcg->numainfo_events, 0);
1655	atomic_set(&memcg->numainfo_updating, 0);
1656}
1657
1658/*
1659 * Selecting a node where we start reclaim from. Because what we need is just
1660 * reducing usage counter, start from anywhere is O,K. Considering
1661 * memory reclaim from current node, there are pros. and cons.
1662 *
1663 * Freeing memory from current node means freeing memory from a node which
1664 * we'll use or we've used. So, it may make LRU bad. And if several threads
1665 * hit limits, it will see a contention on a node. But freeing from remote
1666 * node means more costs for memory reclaim because of memory latency.
1667 *
1668 * Now, we use round-robin. Better algorithm is welcomed.
1669 */
1670int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
1671{
1672	int node;
1673
1674	mem_cgroup_may_update_nodemask(memcg);
1675	node = memcg->last_scanned_node;
1676
1677	node = next_node_in(node, memcg->scan_nodes);
1678	/*
1679	 * mem_cgroup_may_update_nodemask might have seen no reclaimmable pages
1680	 * last time it really checked all the LRUs due to rate limiting.
1681	 * Fallback to the current node in that case for simplicity.
1682	 */
1683	if (unlikely(node == MAX_NUMNODES))
1684		node = numa_node_id();
1685
1686	memcg->last_scanned_node = node;
1687	return node;
1688}
1689#else
1690int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
1691{
1692	return 0;
1693}
1694#endif
1695
1696static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
1697				   pg_data_t *pgdat,
1698				   gfp_t gfp_mask,
1699				   unsigned long *total_scanned)
1700{
1701	struct mem_cgroup *victim = NULL;
1702	int total = 0;
1703	int loop = 0;
1704	unsigned long excess;
1705	unsigned long nr_scanned;
1706	struct mem_cgroup_reclaim_cookie reclaim = {
1707		.pgdat = pgdat,
1708		.priority = 0,
1709	};
1710
1711	excess = soft_limit_excess(root_memcg);
1712
1713	while (1) {
1714		victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
1715		if (!victim) {
1716			loop++;
1717			if (loop >= 2) {
1718				/*
1719				 * If we have not been able to reclaim
1720				 * anything, it might because there are
1721				 * no reclaimable pages under this hierarchy
1722				 */
1723				if (!total)
1724					break;
1725				/*
1726				 * We want to do more targeted reclaim.
1727				 * excess >> 2 is not to excessive so as to
1728				 * reclaim too much, nor too less that we keep
1729				 * coming back to reclaim from this cgroup
1730				 */
1731				if (total >= (excess >> 2) ||
1732					(loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
1733					break;
1734			}
1735			continue;
1736		}
1737		total += mem_cgroup_shrink_node(victim, gfp_mask, false,
1738					pgdat, &nr_scanned);
1739		*total_scanned += nr_scanned;
1740		if (!soft_limit_excess(root_memcg))
1741			break;
1742	}
1743	mem_cgroup_iter_break(root_memcg, victim);
1744	return total;
1745}
1746
1747#ifdef CONFIG_LOCKDEP
1748static struct lockdep_map memcg_oom_lock_dep_map = {
1749	.name = "memcg_oom_lock",
1750};
1751#endif
1752
1753static DEFINE_SPINLOCK(memcg_oom_lock);
1754
1755/*
1756 * Check OOM-Killer is already running under our hierarchy.
1757 * If someone is running, return false.
1758 */
1759static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
1760{
1761	struct mem_cgroup *iter, *failed = NULL;
1762
1763	spin_lock(&memcg_oom_lock);
1764
1765	for_each_mem_cgroup_tree(iter, memcg) {
1766		if (iter->oom_lock) {
1767			/*
1768			 * this subtree of our hierarchy is already locked
1769			 * so we cannot give a lock.
1770			 */
1771			failed = iter;
1772			mem_cgroup_iter_break(memcg, iter);
1773			break;
1774		} else
1775			iter->oom_lock = true;
1776	}
1777
1778	if (failed) {
1779		/*
1780		 * OK, we failed to lock the whole subtree so we have
1781		 * to clean up what we set up to the failing subtree
1782		 */
1783		for_each_mem_cgroup_tree(iter, memcg) {
1784			if (iter == failed) {
1785				mem_cgroup_iter_break(memcg, iter);
1786				break;
1787			}
1788			iter->oom_lock = false;
1789		}
1790	} else
1791		mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_);
1792
1793	spin_unlock(&memcg_oom_lock);
1794
1795	return !failed;
1796}
1797
1798static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
1799{
1800	struct mem_cgroup *iter;
1801
1802	spin_lock(&memcg_oom_lock);
1803	mutex_release(&memcg_oom_lock_dep_map, 1, _RET_IP_);
1804	for_each_mem_cgroup_tree(iter, memcg)
1805		iter->oom_lock = false;
1806	spin_unlock(&memcg_oom_lock);
1807}
1808
1809static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
1810{
1811	struct mem_cgroup *iter;
1812
1813	spin_lock(&memcg_oom_lock);
1814	for_each_mem_cgroup_tree(iter, memcg)
1815		iter->under_oom++;
1816	spin_unlock(&memcg_oom_lock);
1817}
1818
1819static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
1820{
1821	struct mem_cgroup *iter;
1822
1823	/*
1824	 * When a new child is created while the hierarchy is under oom,
1825	 * mem_cgroup_oom_lock() may not be called. Watch for underflow.
1826	 */
1827	spin_lock(&memcg_oom_lock);
1828	for_each_mem_cgroup_tree(iter, memcg)
1829		if (iter->under_oom > 0)
1830			iter->under_oom--;
1831	spin_unlock(&memcg_oom_lock);
1832}
1833
1834static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
1835
1836struct oom_wait_info {
1837	struct mem_cgroup *memcg;
1838	wait_queue_entry_t	wait;
1839};
1840
1841static int memcg_oom_wake_function(wait_queue_entry_t *wait,
1842	unsigned mode, int sync, void *arg)
1843{
1844	struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
1845	struct mem_cgroup *oom_wait_memcg;
1846	struct oom_wait_info *oom_wait_info;
1847
1848	oom_wait_info = container_of(wait, struct oom_wait_info, wait);
1849	oom_wait_memcg = oom_wait_info->memcg;
1850
1851	if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) &&
1852	    !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg))
1853		return 0;
1854	return autoremove_wake_function(wait, mode, sync, arg);
1855}
1856
1857static void memcg_oom_recover(struct mem_cgroup *memcg)
1858{
1859	/*
1860	 * For the following lockless ->under_oom test, the only required
1861	 * guarantee is that it must see the state asserted by an OOM when
1862	 * this function is called as a result of userland actions
1863	 * triggered by the notification of the OOM.  This is trivially
1864	 * achieved by invoking mem_cgroup_mark_under_oom() before
1865	 * triggering notification.
1866	 */
1867	if (memcg && memcg->under_oom)
1868		__wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
1869}
1870
1871enum oom_status {
1872	OOM_SUCCESS,
1873	OOM_FAILED,
1874	OOM_ASYNC,
1875	OOM_SKIPPED
1876};
1877
1878static enum oom_status mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
1879{
1880	enum oom_status ret;
1881	bool locked;
1882
1883	if (order > PAGE_ALLOC_COSTLY_ORDER)
1884		return OOM_SKIPPED;
1885
1886	memcg_memory_event(memcg, MEMCG_OOM);
1887
1888	/*
1889	 * We are in the middle of the charge context here, so we
1890	 * don't want to block when potentially sitting on a callstack
1891	 * that holds all kinds of filesystem and mm locks.
1892	 *
1893	 * cgroup1 allows disabling the OOM killer and waiting for outside
1894	 * handling until the charge can succeed; remember the context and put
1895	 * the task to sleep at the end of the page fault when all locks are
1896	 * released.
1897	 *
1898	 * On the other hand, in-kernel OOM killer allows for an async victim
1899	 * memory reclaim (oom_reaper) and that means that we are not solely
1900	 * relying on the oom victim to make a forward progress and we can
1901	 * invoke the oom killer here.
1902	 *
1903	 * Please note that mem_cgroup_out_of_memory might fail to find a
1904	 * victim and then we have to bail out from the charge path.
1905	 */
1906	if (memcg->oom_kill_disable) {
1907		if (!current->in_user_fault)
1908			return OOM_SKIPPED;
1909		css_get(&memcg->css);
1910		current->memcg_in_oom = memcg;
1911		current->memcg_oom_gfp_mask = mask;
1912		current->memcg_oom_order = order;
1913
1914		return OOM_ASYNC;
1915	}
1916
1917	mem_cgroup_mark_under_oom(memcg);
1918
1919	locked = mem_cgroup_oom_trylock(memcg);
1920
1921	if (locked)
1922		mem_cgroup_oom_notify(memcg);
1923
1924	mem_cgroup_unmark_under_oom(memcg);
1925	if (mem_cgroup_out_of_memory(memcg, mask, order))
1926		ret = OOM_SUCCESS;
1927	else
1928		ret = OOM_FAILED;
1929
1930	if (locked)
1931		mem_cgroup_oom_unlock(memcg);
1932
1933	return ret;
1934}
1935
1936/**
1937 * mem_cgroup_oom_synchronize - complete memcg OOM handling
1938 * @handle: actually kill/wait or just clean up the OOM state
1939 *
1940 * This has to be called at the end of a page fault if the memcg OOM
1941 * handler was enabled.
1942 *
1943 * Memcg supports userspace OOM handling where failed allocations must
1944 * sleep on a waitqueue until the userspace task resolves the
1945 * situation.  Sleeping directly in the charge context with all kinds
1946 * of locks held is not a good idea, instead we remember an OOM state
1947 * in the task and mem_cgroup_oom_synchronize() has to be called at
1948 * the end of the page fault to complete the OOM handling.
1949 *
1950 * Returns %true if an ongoing memcg OOM situation was detected and
1951 * completed, %false otherwise.
1952 */
1953bool mem_cgroup_oom_synchronize(bool handle)
1954{
1955	struct mem_cgroup *memcg = current->memcg_in_oom;
1956	struct oom_wait_info owait;
1957	bool locked;
1958
1959	/* OOM is global, do not handle */
1960	if (!memcg)
1961		return false;
1962
1963	if (!handle)
1964		goto cleanup;
1965
1966	owait.memcg = memcg;
1967	owait.wait.flags = 0;
1968	owait.wait.func = memcg_oom_wake_function;
1969	owait.wait.private = current;
1970	INIT_LIST_HEAD(&owait.wait.entry);
1971
1972	prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
1973	mem_cgroup_mark_under_oom(memcg);
1974
1975	locked = mem_cgroup_oom_trylock(memcg);
1976
1977	if (locked)
1978		mem_cgroup_oom_notify(memcg);
1979
1980	if (locked && !memcg->oom_kill_disable) {
1981		mem_cgroup_unmark_under_oom(memcg);
1982		finish_wait(&memcg_oom_waitq, &owait.wait);
1983		mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask,
1984					 current->memcg_oom_order);
1985	} else {
1986		schedule();
1987		mem_cgroup_unmark_under_oom(memcg);
1988		finish_wait(&memcg_oom_waitq, &owait.wait);
1989	}
1990
1991	if (locked) {
1992		mem_cgroup_oom_unlock(memcg);
1993		/*
1994		 * There is no guarantee that an OOM-lock contender
1995		 * sees the wakeups triggered by the OOM kill
1996		 * uncharges.  Wake any sleepers explicitely.
1997		 */
1998		memcg_oom_recover(memcg);
1999	}
2000cleanup:
2001	current->memcg_in_oom = NULL;
2002	css_put(&memcg->css);
2003	return true;
2004}
2005
2006/**
2007 * mem_cgroup_get_oom_group - get a memory cgroup to clean up after OOM
2008 * @victim: task to be killed by the OOM killer
2009 * @oom_domain: memcg in case of memcg OOM, NULL in case of system-wide OOM
2010 *
2011 * Returns a pointer to a memory cgroup, which has to be cleaned up
2012 * by killing all belonging OOM-killable tasks.
2013 *
2014 * Caller has to call mem_cgroup_put() on the returned non-NULL memcg.
2015 */
2016struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
2017					    struct mem_cgroup *oom_domain)
2018{
2019	struct mem_cgroup *oom_group = NULL;
2020	struct mem_cgroup *memcg;
2021
2022	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
2023		return NULL;
2024
2025	if (!oom_domain)
2026		oom_domain = root_mem_cgroup;
2027
2028	rcu_read_lock();
2029
2030	memcg = mem_cgroup_from_task(victim);
2031	if (memcg == root_mem_cgroup)
2032		goto out;
2033
2034	/*
2035	 * Traverse the memory cgroup hierarchy from the victim task's
2036	 * cgroup up to the OOMing cgroup (or root) to find the
2037	 * highest-level memory cgroup with oom.group set.
2038	 */
2039	for (; memcg; memcg = parent_mem_cgroup(memcg)) {
2040		if (memcg->oom_group)
2041			oom_group = memcg;
2042
2043		if (memcg == oom_domain)
2044			break;
2045	}
2046
2047	if (oom_group)
2048		css_get(&oom_group->css);
2049out:
2050	rcu_read_unlock();
2051
2052	return oom_group;
2053}
2054
2055void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
2056{
2057	pr_info("Tasks in ");
2058	pr_cont_cgroup_path(memcg->css.cgroup);
2059	pr_cont(" are going to be killed due to memory.oom.group set\n");
2060}
2061
2062/**
2063 * lock_page_memcg - lock a page->mem_cgroup binding
2064 * @page: the page
2065 *
2066 * This function protects unlocked LRU pages from being moved to
2067 * another cgroup.
2068 *
2069 * It ensures lifetime of the returned memcg. Caller is responsible
2070 * for the lifetime of the page; __unlock_page_memcg() is available
2071 * when @page might get freed inside the locked section.
2072 */
2073struct mem_cgroup *lock_page_memcg(struct page *page)
2074{
2075	struct mem_cgroup *memcg;
2076	unsigned long flags;
2077
2078	/*
2079	 * The RCU lock is held throughout the transaction.  The fast
2080	 * path can get away without acquiring the memcg->move_lock
2081	 * because page moving starts with an RCU grace period.
2082	 *
2083	 * The RCU lock also protects the memcg from being freed when
2084	 * the page state that is going to change is the only thing
2085	 * preventing the page itself from being freed. E.g. writeback
2086	 * doesn't hold a page reference and relies on PG_writeback to
2087	 * keep off truncation, migration and so forth.
2088         */
2089	rcu_read_lock();
2090
2091	if (mem_cgroup_disabled())
2092		return NULL;
2093again:
2094	memcg = page->mem_cgroup;
2095	if (unlikely(!memcg))
2096		return NULL;
2097
2098	if (atomic_read(&memcg->moving_account) <= 0)
2099		return memcg;
2100
2101	spin_lock_irqsave(&memcg->move_lock, flags);
2102	if (memcg != page->mem_cgroup) {
2103		spin_unlock_irqrestore(&memcg->move_lock, flags);
2104		goto again;
2105	}
2106
2107	/*
2108	 * When charge migration first begins, we can have locked and
2109	 * unlocked page stat updates happening concurrently.  Track
2110	 * the task who has the lock for unlock_page_memcg().
2111	 */
2112	memcg->move_lock_task = current;
2113	memcg->move_lock_flags = flags;
2114
2115	return memcg;
2116}
2117EXPORT_SYMBOL(lock_page_memcg);
2118
2119/**
2120 * __unlock_page_memcg - unlock and unpin a memcg
2121 * @memcg: the memcg
2122 *
2123 * Unlock and unpin a memcg returned by lock_page_memcg().
2124 */
2125void __unlock_page_memcg(struct mem_cgroup *memcg)
2126{
2127	if (memcg && memcg->move_lock_task == current) {
2128		unsigned long flags = memcg->move_lock_flags;
2129
2130		memcg->move_lock_task = NULL;
2131		memcg->move_lock_flags = 0;
2132
2133		spin_unlock_irqrestore(&memcg->move_lock, flags);
2134	}
2135
2136	rcu_read_unlock();
2137}
2138
2139/**
2140 * unlock_page_memcg - unlock a page->mem_cgroup binding
2141 * @page: the page
2142 */
2143void unlock_page_memcg(struct page *page)
2144{
2145	__unlock_page_memcg(page->mem_cgroup);
2146}
2147EXPORT_SYMBOL(unlock_page_memcg);
2148
2149struct memcg_stock_pcp {
2150	struct mem_cgroup *cached; /* this never be root cgroup */
2151	unsigned int nr_pages;
2152	struct work_struct work;
2153	unsigned long flags;
2154#define FLUSHING_CACHED_CHARGE	0
2155};
2156static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
2157static DEFINE_MUTEX(percpu_charge_mutex);
2158
2159/**
2160 * consume_stock: Try to consume stocked charge on this cpu.
2161 * @memcg: memcg to consume from.
2162 * @nr_pages: how many pages to charge.
2163 *
2164 * The charges will only happen if @memcg matches the current cpu's memcg
2165 * stock, and at least @nr_pages are available in that stock.  Failure to
2166 * service an allocation will refill the stock.
2167 *
2168 * returns true if successful, false otherwise.
2169 */
2170static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2171{
2172	struct memcg_stock_pcp *stock;
2173	unsigned long flags;
2174	bool ret = false;
2175
2176	if (nr_pages > MEMCG_CHARGE_BATCH)
2177		return ret;
2178
2179	local_irq_save(flags);
2180
2181	stock = this_cpu_ptr(&memcg_stock);
2182	if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
2183		stock->nr_pages -= nr_pages;
2184		ret = true;
2185	}
2186
2187	local_irq_restore(flags);
2188
2189	return ret;
2190}
2191
2192/*
2193 * Returns stocks cached in percpu and reset cached information.
2194 */
2195static void drain_stock(struct memcg_stock_pcp *stock)
2196{
2197	struct mem_cgroup *old = stock->cached;
2198
2199	if (stock->nr_pages) {
2200		page_counter_uncharge(&old->memory, stock->nr_pages);
2201		if (do_memsw_account())
2202			page_counter_uncharge(&old->memsw, stock->nr_pages);
2203		css_put_many(&old->css, stock->nr_pages);
2204		stock->nr_pages = 0;
2205	}
2206	stock->cached = NULL;
2207}
2208
2209static void drain_local_stock(struct work_struct *dummy)
2210{
2211	struct memcg_stock_pcp *stock;
2212	unsigned long flags;
2213
2214	/*
2215	 * The only protection from memory hotplug vs. drain_stock races is
2216	 * that we always operate on local CPU stock here with IRQ disabled
2217	 */
2218	local_irq_save(flags);
2219
2220	stock = this_cpu_ptr(&memcg_stock);
2221	drain_stock(stock);
2222	clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
2223
2224	local_irq_restore(flags);
2225}
2226
2227/*
2228 * Cache charges(val) to local per_cpu area.
2229 * This will be consumed by consume_stock() function, later.
2230 */
2231static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2232{
2233	struct memcg_stock_pcp *stock;
2234	unsigned long flags;
2235
2236	local_irq_save(flags);
2237
2238	stock = this_cpu_ptr(&memcg_stock);
2239	if (stock->cached != memcg) { /* reset if necessary */
2240		drain_stock(stock);
2241		stock->cached = memcg;
2242	}
2243	stock->nr_pages += nr_pages;
2244
2245	if (stock->nr_pages > MEMCG_CHARGE_BATCH)
2246		drain_stock(stock);
2247
2248	local_irq_restore(flags);
2249}
2250
2251/*
2252 * Drains all per-CPU charge caches for given root_memcg resp. subtree
2253 * of the hierarchy under it.
2254 */
2255static void drain_all_stock(struct mem_cgroup *root_memcg)
2256{
2257	int cpu, curcpu;
2258
2259	/* If someone's already draining, avoid adding running more workers. */
2260	if (!mutex_trylock(&percpu_charge_mutex))
2261		return;
2262	/*
2263	 * Notify other cpus that system-wide "drain" is running
2264	 * We do not care about races with the cpu hotplug because cpu down
2265	 * as well as workers from this path always operate on the local
2266	 * per-cpu data. CPU up doesn't touch memcg_stock at all.
2267	 */
2268	curcpu = get_cpu();
2269	for_each_online_cpu(cpu) {
2270		struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
2271		struct mem_cgroup *memcg;
2272		bool flush = false;
2273
2274		rcu_read_lock();
2275		memcg = stock->cached;
2276		if (memcg && stock->nr_pages &&
2277		    mem_cgroup_is_descendant(memcg, root_memcg))
2278			flush = true;
2279		rcu_read_unlock();
2280
2281		if (flush &&
2282		    !test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
2283			if (cpu == curcpu)
2284				drain_local_stock(&stock->work);
2285			else
2286				schedule_work_on(cpu, &stock->work);
2287		}
 
2288	}
2289	put_cpu();
2290	mutex_unlock(&percpu_charge_mutex);
2291}
2292
2293static int memcg_hotplug_cpu_dead(unsigned int cpu)
2294{
2295	struct memcg_stock_pcp *stock;
2296	struct mem_cgroup *memcg, *mi;
2297
2298	stock = &per_cpu(memcg_stock, cpu);
2299	drain_stock(stock);
2300
2301	for_each_mem_cgroup(memcg) {
2302		int i;
2303
2304		for (i = 0; i < MEMCG_NR_STAT; i++) {
2305			int nid;
2306			long x;
2307
2308			x = this_cpu_xchg(memcg->vmstats_percpu->stat[i], 0);
2309			if (x)
2310				for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
2311					atomic_long_add(x, &memcg->vmstats[i]);
2312
2313			if (i >= NR_VM_NODE_STAT_ITEMS)
2314				continue;
2315
2316			for_each_node(nid) {
2317				struct mem_cgroup_per_node *pn;
2318
2319				pn = mem_cgroup_nodeinfo(memcg, nid);
2320				x = this_cpu_xchg(pn->lruvec_stat_cpu->count[i], 0);
2321				if (x)
2322					do {
2323						atomic_long_add(x, &pn->lruvec_stat[i]);
2324					} while ((pn = parent_nodeinfo(pn, nid)));
2325			}
2326		}
2327
2328		for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
2329			long x;
2330
2331			x = this_cpu_xchg(memcg->vmstats_percpu->events[i], 0);
2332			if (x)
2333				for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
2334					atomic_long_add(x, &memcg->vmevents[i]);
2335		}
2336	}
2337
2338	return 0;
2339}
2340
2341static void reclaim_high(struct mem_cgroup *memcg,
2342			 unsigned int nr_pages,
2343			 gfp_t gfp_mask)
2344{
2345	do {
2346		if (page_counter_read(&memcg->memory) <= memcg->high)
2347			continue;
2348		memcg_memory_event(memcg, MEMCG_HIGH);
2349		try_to_free_mem_cgroup_pages(memcg, nr_pages, gfp_mask, true);
2350	} while ((memcg = parent_mem_cgroup(memcg)));
2351}
2352
2353static void high_work_func(struct work_struct *work)
2354{
2355	struct mem_cgroup *memcg;
2356
2357	memcg = container_of(work, struct mem_cgroup, high_work);
2358	reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL);
2359}
2360
2361/*
2362 * Clamp the maximum sleep time per allocation batch to 2 seconds. This is
2363 * enough to still cause a significant slowdown in most cases, while still
2364 * allowing diagnostics and tracing to proceed without becoming stuck.
2365 */
2366#define MEMCG_MAX_HIGH_DELAY_JIFFIES (2UL*HZ)
2367
2368/*
2369 * When calculating the delay, we use these either side of the exponentiation to
2370 * maintain precision and scale to a reasonable number of jiffies (see the table
2371 * below.
2372 *
2373 * - MEMCG_DELAY_PRECISION_SHIFT: Extra precision bits while translating the
2374 *   overage ratio to a delay.
2375 * - MEMCG_DELAY_SCALING_SHIFT: The number of bits to scale down down the
2376 *   proposed penalty in order to reduce to a reasonable number of jiffies, and
2377 *   to produce a reasonable delay curve.
2378 *
2379 * MEMCG_DELAY_SCALING_SHIFT just happens to be a number that produces a
2380 * reasonable delay curve compared to precision-adjusted overage, not
2381 * penalising heavily at first, but still making sure that growth beyond the
2382 * limit penalises misbehaviour cgroups by slowing them down exponentially. For
2383 * example, with a high of 100 megabytes:
2384 *
2385 *  +-------+------------------------+
2386 *  | usage | time to allocate in ms |
2387 *  +-------+------------------------+
2388 *  | 100M  |                      0 |
2389 *  | 101M  |                      6 |
2390 *  | 102M  |                     25 |
2391 *  | 103M  |                     57 |
2392 *  | 104M  |                    102 |
2393 *  | 105M  |                    159 |
2394 *  | 106M  |                    230 |
2395 *  | 107M  |                    313 |
2396 *  | 108M  |                    409 |
2397 *  | 109M  |                    518 |
2398 *  | 110M  |                    639 |
2399 *  | 111M  |                    774 |
2400 *  | 112M  |                    921 |
2401 *  | 113M  |                   1081 |
2402 *  | 114M  |                   1254 |
2403 *  | 115M  |                   1439 |
2404 *  | 116M  |                   1638 |
2405 *  | 117M  |                   1849 |
2406 *  | 118M  |                   2000 |
2407 *  | 119M  |                   2000 |
2408 *  | 120M  |                   2000 |
2409 *  +-------+------------------------+
2410 */
2411 #define MEMCG_DELAY_PRECISION_SHIFT 20
2412 #define MEMCG_DELAY_SCALING_SHIFT 14
2413
2414/*
2415 * Scheduled by try_charge() to be executed from the userland return path
2416 * and reclaims memory over the high limit.
2417 */
2418void mem_cgroup_handle_over_high(void)
2419{
2420	unsigned long usage, high, clamped_high;
2421	unsigned long pflags;
2422	unsigned long penalty_jiffies, overage;
2423	unsigned int nr_pages = current->memcg_nr_pages_over_high;
2424	struct mem_cgroup *memcg;
2425
2426	if (likely(!nr_pages))
2427		return;
2428
2429	memcg = get_mem_cgroup_from_mm(current->mm);
2430	reclaim_high(memcg, nr_pages, GFP_KERNEL);
 
2431	current->memcg_nr_pages_over_high = 0;
2432
2433	/*
2434	 * memory.high is breached and reclaim is unable to keep up. Throttle
2435	 * allocators proactively to slow down excessive growth.
2436	 *
2437	 * We use overage compared to memory.high to calculate the number of
2438	 * jiffies to sleep (penalty_jiffies). Ideally this value should be
2439	 * fairly lenient on small overages, and increasingly harsh when the
2440	 * memcg in question makes it clear that it has no intention of stopping
2441	 * its crazy behaviour, so we exponentially increase the delay based on
2442	 * overage amount.
2443	 */
2444
2445	usage = page_counter_read(&memcg->memory);
2446	high = READ_ONCE(memcg->high);
2447
2448	if (usage <= high)
2449		goto out;
2450
2451	/*
2452	 * Prevent division by 0 in overage calculation by acting as if it was a
2453	 * threshold of 1 page
2454	 */
2455	clamped_high = max(high, 1UL);
2456
2457	overage = div_u64((u64)(usage - high) << MEMCG_DELAY_PRECISION_SHIFT,
2458			  clamped_high);
2459
2460	penalty_jiffies = ((u64)overage * overage * HZ)
2461		>> (MEMCG_DELAY_PRECISION_SHIFT + MEMCG_DELAY_SCALING_SHIFT);
2462
2463	/*
2464	 * Factor in the task's own contribution to the overage, such that four
2465	 * N-sized allocations are throttled approximately the same as one
2466	 * 4N-sized allocation.
2467	 *
2468	 * MEMCG_CHARGE_BATCH pages is nominal, so work out how much smaller or
2469	 * larger the current charge patch is than that.
2470	 */
2471	penalty_jiffies = penalty_jiffies * nr_pages / MEMCG_CHARGE_BATCH;
2472
2473	/*
2474	 * Clamp the max delay per usermode return so as to still keep the
2475	 * application moving forwards and also permit diagnostics, albeit
2476	 * extremely slowly.
2477	 */
2478	penalty_jiffies = min(penalty_jiffies, MEMCG_MAX_HIGH_DELAY_JIFFIES);
2479
2480	/*
2481	 * Don't sleep if the amount of jiffies this memcg owes us is so low
2482	 * that it's not even worth doing, in an attempt to be nice to those who
2483	 * go only a small amount over their memory.high value and maybe haven't
2484	 * been aggressively reclaimed enough yet.
2485	 */
2486	if (penalty_jiffies <= HZ / 100)
2487		goto out;
2488
2489	/*
2490	 * If we exit early, we're guaranteed to die (since
2491	 * schedule_timeout_killable sets TASK_KILLABLE). This means we don't
2492	 * need to account for any ill-begotten jiffies to pay them off later.
2493	 */
2494	psi_memstall_enter(&pflags);
2495	schedule_timeout_killable(penalty_jiffies);
2496	psi_memstall_leave(&pflags);
2497
2498out:
2499	css_put(&memcg->css);
2500}
2501
2502static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
2503		      unsigned int nr_pages)
2504{
2505	unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages);
2506	int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
2507	struct mem_cgroup *mem_over_limit;
2508	struct page_counter *counter;
2509	unsigned long nr_reclaimed;
2510	bool may_swap = true;
2511	bool drained = false;
2512	enum oom_status oom_status;
2513
2514	if (mem_cgroup_is_root(memcg))
2515		return 0;
2516retry:
2517	if (consume_stock(memcg, nr_pages))
2518		return 0;
2519
2520	if (!do_memsw_account() ||
2521	    page_counter_try_charge(&memcg->memsw, batch, &counter)) {
2522		if (page_counter_try_charge(&memcg->memory, batch, &counter))
2523			goto done_restock;
2524		if (do_memsw_account())
2525			page_counter_uncharge(&memcg->memsw, batch);
2526		mem_over_limit = mem_cgroup_from_counter(counter, memory);
2527	} else {
2528		mem_over_limit = mem_cgroup_from_counter(counter, memsw);
2529		may_swap = false;
2530	}
2531
2532	if (batch > nr_pages) {
2533		batch = nr_pages;
2534		goto retry;
2535	}
2536
2537	/*
2538	 * Memcg doesn't have a dedicated reserve for atomic
2539	 * allocations. But like the global atomic pool, we need to
2540	 * put the burden of reclaim on regular allocation requests
2541	 * and let these go through as privileged allocations.
2542	 */
2543	if (gfp_mask & __GFP_ATOMIC)
2544		goto force;
2545
2546	/*
2547	 * Unlike in global OOM situations, memcg is not in a physical
2548	 * memory shortage.  Allow dying and OOM-killed tasks to
2549	 * bypass the last charges so that they can exit quickly and
2550	 * free their memory.
2551	 */
2552	if (unlikely(should_force_charge()))
 
 
2553		goto force;
2554
2555	/*
2556	 * Prevent unbounded recursion when reclaim operations need to
2557	 * allocate memory. This might exceed the limits temporarily,
2558	 * but we prefer facilitating memory reclaim and getting back
2559	 * under the limit over triggering OOM kills in these cases.
2560	 */
2561	if (unlikely(current->flags & PF_MEMALLOC))
2562		goto force;
2563
2564	if (unlikely(task_in_memcg_oom(current)))
2565		goto nomem;
2566
2567	if (!gfpflags_allow_blocking(gfp_mask))
2568		goto nomem;
2569
2570	memcg_memory_event(mem_over_limit, MEMCG_MAX);
2571
2572	nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
2573						    gfp_mask, may_swap);
2574
2575	if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
2576		goto retry;
2577
2578	if (!drained) {
2579		drain_all_stock(mem_over_limit);
2580		drained = true;
2581		goto retry;
2582	}
2583
2584	if (gfp_mask & __GFP_NORETRY)
2585		goto nomem;
2586	/*
2587	 * Even though the limit is exceeded at this point, reclaim
2588	 * may have been able to free some pages.  Retry the charge
2589	 * before killing the task.
2590	 *
2591	 * Only for regular pages, though: huge pages are rather
2592	 * unlikely to succeed so close to the limit, and we fall back
2593	 * to regular pages anyway in case of failure.
2594	 */
2595	if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))
2596		goto retry;
2597	/*
2598	 * At task move, charge accounts can be doubly counted. So, it's
2599	 * better to wait until the end of task_move if something is going on.
2600	 */
2601	if (mem_cgroup_wait_acct_move(mem_over_limit))
2602		goto retry;
2603
2604	if (nr_retries--)
2605		goto retry;
2606
2607	if (gfp_mask & __GFP_RETRY_MAYFAIL)
2608		goto nomem;
2609
2610	if (gfp_mask & __GFP_NOFAIL)
2611		goto force;
2612
2613	if (fatal_signal_pending(current))
2614		goto force;
2615
2616	/*
2617	 * keep retrying as long as the memcg oom killer is able to make
2618	 * a forward progress or bypass the charge if the oom killer
2619	 * couldn't make any progress.
2620	 */
2621	oom_status = mem_cgroup_oom(mem_over_limit, gfp_mask,
2622		       get_order(nr_pages * PAGE_SIZE));
2623	switch (oom_status) {
2624	case OOM_SUCCESS:
2625		nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
2626		goto retry;
2627	case OOM_FAILED:
2628		goto force;
2629	default:
2630		goto nomem;
2631	}
2632nomem:
2633	if (!(gfp_mask & __GFP_NOFAIL))
2634		return -ENOMEM;
2635force:
2636	/*
2637	 * The allocation either can't fail or will lead to more memory
2638	 * being freed very soon.  Allow memory usage go over the limit
2639	 * temporarily by force charging it.
2640	 */
2641	page_counter_charge(&memcg->memory, nr_pages);
2642	if (do_memsw_account())
2643		page_counter_charge(&memcg->memsw, nr_pages);
2644	css_get_many(&memcg->css, nr_pages);
2645
2646	return 0;
2647
2648done_restock:
2649	css_get_many(&memcg->css, batch);
2650	if (batch > nr_pages)
2651		refill_stock(memcg, batch - nr_pages);
2652
2653	/*
2654	 * If the hierarchy is above the normal consumption range, schedule
2655	 * reclaim on returning to userland.  We can perform reclaim here
2656	 * if __GFP_RECLAIM but let's always punt for simplicity and so that
2657	 * GFP_KERNEL can consistently be used during reclaim.  @memcg is
2658	 * not recorded as it most likely matches current's and won't
2659	 * change in the meantime.  As high limit is checked again before
2660	 * reclaim, the cost of mismatch is negligible.
2661	 */
2662	do {
2663		if (page_counter_read(&memcg->memory) > memcg->high) {
2664			/* Don't bother a random interrupted task */
2665			if (in_interrupt()) {
2666				schedule_work(&memcg->high_work);
2667				break;
2668			}
2669			current->memcg_nr_pages_over_high += batch;
2670			set_notify_resume(current);
2671			break;
2672		}
2673	} while ((memcg = parent_mem_cgroup(memcg)));
2674
2675	return 0;
2676}
2677
2678static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
2679{
2680	if (mem_cgroup_is_root(memcg))
2681		return;
2682
2683	page_counter_uncharge(&memcg->memory, nr_pages);
2684	if (do_memsw_account())
2685		page_counter_uncharge(&memcg->memsw, nr_pages);
2686
2687	css_put_many(&memcg->css, nr_pages);
2688}
2689
2690static void lock_page_lru(struct page *page, int *isolated)
2691{
2692	pg_data_t *pgdat = page_pgdat(page);
2693
2694	spin_lock_irq(&pgdat->lru_lock);
2695	if (PageLRU(page)) {
2696		struct lruvec *lruvec;
2697
2698		lruvec = mem_cgroup_page_lruvec(page, pgdat);
2699		ClearPageLRU(page);
2700		del_page_from_lru_list(page, lruvec, page_lru(page));
2701		*isolated = 1;
2702	} else
2703		*isolated = 0;
2704}
2705
2706static void unlock_page_lru(struct page *page, int isolated)
2707{
2708	pg_data_t *pgdat = page_pgdat(page);
2709
2710	if (isolated) {
2711		struct lruvec *lruvec;
2712
2713		lruvec = mem_cgroup_page_lruvec(page, pgdat);
2714		VM_BUG_ON_PAGE(PageLRU(page), page);
2715		SetPageLRU(page);
2716		add_page_to_lru_list(page, lruvec, page_lru(page));
2717	}
2718	spin_unlock_irq(&pgdat->lru_lock);
2719}
2720
2721static void commit_charge(struct page *page, struct mem_cgroup *memcg,
2722			  bool lrucare)
2723{
2724	int isolated;
2725
2726	VM_BUG_ON_PAGE(page->mem_cgroup, page);
2727
2728	/*
2729	 * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page
2730	 * may already be on some other mem_cgroup's LRU.  Take care of it.
2731	 */
2732	if (lrucare)
2733		lock_page_lru(page, &isolated);
2734
2735	/*
2736	 * Nobody should be changing or seriously looking at
2737	 * page->mem_cgroup at this point:
2738	 *
2739	 * - the page is uncharged
2740	 *
2741	 * - the page is off-LRU
2742	 *
2743	 * - an anonymous fault has exclusive page access, except for
2744	 *   a locked page table
2745	 *
2746	 * - a page cache insertion, a swapin fault, or a migration
2747	 *   have the page locked
2748	 */
2749	page->mem_cgroup = memcg;
2750
2751	if (lrucare)
2752		unlock_page_lru(page, isolated);
2753}
2754
2755#ifdef CONFIG_MEMCG_KMEM
2756static int memcg_alloc_cache_id(void)
2757{
2758	int id, size;
2759	int err;
2760
2761	id = ida_simple_get(&memcg_cache_ida,
2762			    0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL);
2763	if (id < 0)
2764		return id;
2765
2766	if (id < memcg_nr_cache_ids)
2767		return id;
2768
2769	/*
2770	 * There's no space for the new id in memcg_caches arrays,
2771	 * so we have to grow them.
2772	 */
2773	down_write(&memcg_cache_ids_sem);
2774
2775	size = 2 * (id + 1);
2776	if (size < MEMCG_CACHES_MIN_SIZE)
2777		size = MEMCG_CACHES_MIN_SIZE;
2778	else if (size > MEMCG_CACHES_MAX_SIZE)
2779		size = MEMCG_CACHES_MAX_SIZE;
2780
2781	err = memcg_update_all_caches(size);
2782	if (!err)
2783		err = memcg_update_all_list_lrus(size);
2784	if (!err)
2785		memcg_nr_cache_ids = size;
2786
2787	up_write(&memcg_cache_ids_sem);
2788
2789	if (err) {
2790		ida_simple_remove(&memcg_cache_ida, id);
2791		return err;
2792	}
2793	return id;
2794}
2795
2796static void memcg_free_cache_id(int id)
2797{
2798	ida_simple_remove(&memcg_cache_ida, id);
2799}
2800
2801struct memcg_kmem_cache_create_work {
2802	struct mem_cgroup *memcg;
2803	struct kmem_cache *cachep;
2804	struct work_struct work;
2805};
2806
2807static void memcg_kmem_cache_create_func(struct work_struct *w)
2808{
2809	struct memcg_kmem_cache_create_work *cw =
2810		container_of(w, struct memcg_kmem_cache_create_work, work);
2811	struct mem_cgroup *memcg = cw->memcg;
2812	struct kmem_cache *cachep = cw->cachep;
2813
2814	memcg_create_kmem_cache(memcg, cachep);
2815
2816	css_put(&memcg->css);
2817	kfree(cw);
2818}
2819
2820/*
2821 * Enqueue the creation of a per-memcg kmem_cache.
2822 */
2823static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
2824					       struct kmem_cache *cachep)
2825{
2826	struct memcg_kmem_cache_create_work *cw;
2827
2828	if (!css_tryget_online(&memcg->css))
2829		return;
2830
2831	cw = kmalloc(sizeof(*cw), GFP_NOWAIT | __GFP_NOWARN);
2832	if (!cw)
2833		return;
2834
 
 
2835	cw->memcg = memcg;
2836	cw->cachep = cachep;
2837	INIT_WORK(&cw->work, memcg_kmem_cache_create_func);
2838
2839	queue_work(memcg_kmem_cache_wq, &cw->work);
2840}
2841
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2842static inline bool memcg_kmem_bypass(void)
2843{
2844	if (in_interrupt() || !current->mm || (current->flags & PF_KTHREAD))
2845		return true;
2846	return false;
2847}
2848
2849/**
2850 * memcg_kmem_get_cache: select the correct per-memcg cache for allocation
2851 * @cachep: the original global kmem cache
2852 *
2853 * Return the kmem_cache we're supposed to use for a slab allocation.
2854 * We try to use the current memcg's version of the cache.
2855 *
2856 * If the cache does not exist yet, if we are the first user of it, we
2857 * create it asynchronously in a workqueue and let the current allocation
2858 * go through with the original cache.
2859 *
2860 * This function takes a reference to the cache it returns to assure it
2861 * won't get destroyed while we are working with it. Once the caller is
2862 * done with it, memcg_kmem_put_cache() must be called to release the
2863 * reference.
2864 */
2865struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep)
2866{
2867	struct mem_cgroup *memcg;
2868	struct kmem_cache *memcg_cachep;
2869	struct memcg_cache_array *arr;
2870	int kmemcg_id;
2871
2872	VM_BUG_ON(!is_root_cache(cachep));
2873
2874	if (memcg_kmem_bypass())
2875		return cachep;
2876
2877	rcu_read_lock();
2878
2879	if (unlikely(current->active_memcg))
2880		memcg = current->active_memcg;
2881	else
2882		memcg = mem_cgroup_from_task(current);
2883
2884	if (!memcg || memcg == root_mem_cgroup)
2885		goto out_unlock;
2886
 
2887	kmemcg_id = READ_ONCE(memcg->kmemcg_id);
2888	if (kmemcg_id < 0)
2889		goto out_unlock;
2890
2891	arr = rcu_dereference(cachep->memcg_params.memcg_caches);
2892
2893	/*
2894	 * Make sure we will access the up-to-date value. The code updating
2895	 * memcg_caches issues a write barrier to match the data dependency
2896	 * barrier inside READ_ONCE() (see memcg_create_kmem_cache()).
2897	 */
2898	memcg_cachep = READ_ONCE(arr->entries[kmemcg_id]);
2899
2900	/*
2901	 * If we are in a safe context (can wait, and not in interrupt
2902	 * context), we could be be predictable and return right away.
2903	 * This would guarantee that the allocation being performed
2904	 * already belongs in the new cache.
2905	 *
2906	 * However, there are some clashes that can arrive from locking.
2907	 * For instance, because we acquire the slab_mutex while doing
2908	 * memcg_create_kmem_cache, this means no further allocation
2909	 * could happen with the slab_mutex held. So it's better to
2910	 * defer everything.
2911	 *
2912	 * If the memcg is dying or memcg_cache is about to be released,
2913	 * don't bother creating new kmem_caches. Because memcg_cachep
2914	 * is ZEROed as the fist step of kmem offlining, we don't need
2915	 * percpu_ref_tryget_live() here. css_tryget_online() check in
2916	 * memcg_schedule_kmem_cache_create() will prevent us from
2917	 * creation of a new kmem_cache.
2918	 */
2919	if (unlikely(!memcg_cachep))
2920		memcg_schedule_kmem_cache_create(memcg, cachep);
2921	else if (percpu_ref_tryget(&memcg_cachep->memcg_params.refcnt))
2922		cachep = memcg_cachep;
2923out_unlock:
2924	rcu_read_unlock();
2925	return cachep;
2926}
2927
2928/**
2929 * memcg_kmem_put_cache: drop reference taken by memcg_kmem_get_cache
2930 * @cachep: the cache returned by memcg_kmem_get_cache
2931 */
2932void memcg_kmem_put_cache(struct kmem_cache *cachep)
2933{
2934	if (!is_root_cache(cachep))
2935		percpu_ref_put(&cachep->memcg_params.refcnt);
2936}
2937
2938/**
2939 * __memcg_kmem_charge_memcg: charge a kmem page
2940 * @page: page to charge
2941 * @gfp: reclaim mode
2942 * @order: allocation order
2943 * @memcg: memory cgroup to charge
2944 *
2945 * Returns 0 on success, an error code on failure.
2946 */
2947int __memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
2948			    struct mem_cgroup *memcg)
2949{
2950	unsigned int nr_pages = 1 << order;
2951	struct page_counter *counter;
2952	int ret;
2953
2954	ret = try_charge(memcg, gfp, nr_pages);
2955	if (ret)
2956		return ret;
2957
2958	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) &&
2959	    !page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) {
2960
2961		/*
2962		 * Enforce __GFP_NOFAIL allocation because callers are not
2963		 * prepared to see failures and likely do not have any failure
2964		 * handling code.
2965		 */
2966		if (gfp & __GFP_NOFAIL) {
2967			page_counter_charge(&memcg->kmem, nr_pages);
2968			return 0;
2969		}
2970		cancel_charge(memcg, nr_pages);
2971		return -ENOMEM;
2972	}
 
 
 
2973	return 0;
2974}
2975
2976/**
2977 * __memcg_kmem_charge: charge a kmem page to the current memory cgroup
2978 * @page: page to charge
2979 * @gfp: reclaim mode
2980 * @order: allocation order
2981 *
2982 * Returns 0 on success, an error code on failure.
2983 */
2984int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
2985{
2986	struct mem_cgroup *memcg;
2987	int ret = 0;
2988
2989	if (memcg_kmem_bypass())
2990		return 0;
2991
2992	memcg = get_mem_cgroup_from_current();
2993	if (!mem_cgroup_is_root(memcg)) {
2994		ret = __memcg_kmem_charge_memcg(page, gfp, order, memcg);
2995		if (!ret) {
2996			page->mem_cgroup = memcg;
2997			__SetPageKmemcg(page);
2998		}
2999	}
3000	css_put(&memcg->css);
3001	return ret;
3002}
3003
3004/**
3005 * __memcg_kmem_uncharge_memcg: uncharge a kmem page
3006 * @memcg: memcg to uncharge
3007 * @nr_pages: number of pages to uncharge
3008 */
3009void __memcg_kmem_uncharge_memcg(struct mem_cgroup *memcg,
3010				 unsigned int nr_pages)
3011{
3012	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
3013		page_counter_uncharge(&memcg->kmem, nr_pages);
3014
3015	page_counter_uncharge(&memcg->memory, nr_pages);
3016	if (do_memsw_account())
3017		page_counter_uncharge(&memcg->memsw, nr_pages);
3018}
3019/**
3020 * __memcg_kmem_uncharge: uncharge a kmem page
3021 * @page: page to uncharge
3022 * @order: allocation order
3023 */
3024void __memcg_kmem_uncharge(struct page *page, int order)
3025{
3026	struct mem_cgroup *memcg = page->mem_cgroup;
3027	unsigned int nr_pages = 1 << order;
3028
3029	if (!memcg)
3030		return;
3031
3032	VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page);
3033	__memcg_kmem_uncharge_memcg(memcg, nr_pages);
 
 
 
 
 
 
 
3034	page->mem_cgroup = NULL;
3035
3036	/* slab pages do not have PageKmemcg flag set */
3037	if (PageKmemcg(page))
3038		__ClearPageKmemcg(page);
3039
3040	css_put_many(&memcg->css, nr_pages);
3041}
3042#endif /* CONFIG_MEMCG_KMEM */
3043
3044#ifdef CONFIG_TRANSPARENT_HUGEPAGE
3045
3046/*
3047 * Because tail pages are not marked as "used", set it. We're under
3048 * pgdat->lru_lock and migration entries setup in all page mappings.
3049 */
3050void mem_cgroup_split_huge_fixup(struct page *head)
3051{
3052	int i;
3053
3054	if (mem_cgroup_disabled())
3055		return;
3056
3057	for (i = 1; i < HPAGE_PMD_NR; i++)
3058		head[i].mem_cgroup = head->mem_cgroup;
3059
3060	__mod_memcg_state(head->mem_cgroup, MEMCG_RSS_HUGE, -HPAGE_PMD_NR);
3061}
3062#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
3063
3064#ifdef CONFIG_MEMCG_SWAP
3065/**
3066 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
3067 * @entry: swap entry to be moved
3068 * @from:  mem_cgroup which the entry is moved from
3069 * @to:  mem_cgroup which the entry is moved to
3070 *
3071 * It succeeds only when the swap_cgroup's record for this entry is the same
3072 * as the mem_cgroup's id of @from.
3073 *
3074 * Returns 0 on success, -EINVAL on failure.
3075 *
3076 * The caller must have charged to @to, IOW, called page_counter_charge() about
3077 * both res and memsw, and called css_get().
3078 */
3079static int mem_cgroup_move_swap_account(swp_entry_t entry,
3080				struct mem_cgroup *from, struct mem_cgroup *to)
3081{
3082	unsigned short old_id, new_id;
3083
3084	old_id = mem_cgroup_id(from);
3085	new_id = mem_cgroup_id(to);
3086
3087	if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
3088		mod_memcg_state(from, MEMCG_SWAP, -1);
3089		mod_memcg_state(to, MEMCG_SWAP, 1);
3090		return 0;
3091	}
3092	return -EINVAL;
3093}
3094#else
3095static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
3096				struct mem_cgroup *from, struct mem_cgroup *to)
3097{
3098	return -EINVAL;
3099}
3100#endif
3101
3102static DEFINE_MUTEX(memcg_max_mutex);
3103
3104static int mem_cgroup_resize_max(struct mem_cgroup *memcg,
3105				 unsigned long max, bool memsw)
3106{
3107	bool enlarge = false;
3108	bool drained = false;
3109	int ret;
3110	bool limits_invariant;
3111	struct page_counter *counter = memsw ? &memcg->memsw : &memcg->memory;
3112
3113	do {
3114		if (signal_pending(current)) {
3115			ret = -EINTR;
3116			break;
3117		}
3118
3119		mutex_lock(&memcg_max_mutex);
3120		/*
3121		 * Make sure that the new limit (memsw or memory limit) doesn't
3122		 * break our basic invariant rule memory.max <= memsw.max.
3123		 */
3124		limits_invariant = memsw ? max >= memcg->memory.max :
3125					   max <= memcg->memsw.max;
3126		if (!limits_invariant) {
3127			mutex_unlock(&memcg_max_mutex);
3128			ret = -EINVAL;
3129			break;
3130		}
3131		if (max > counter->max)
3132			enlarge = true;
3133		ret = page_counter_set_max(counter, max);
3134		mutex_unlock(&memcg_max_mutex);
3135
3136		if (!ret)
3137			break;
3138
3139		if (!drained) {
3140			drain_all_stock(memcg);
3141			drained = true;
3142			continue;
3143		}
3144
3145		if (!try_to_free_mem_cgroup_pages(memcg, 1,
3146					GFP_KERNEL, !memsw)) {
3147			ret = -EBUSY;
3148			break;
3149		}
3150	} while (true);
3151
3152	if (!ret && enlarge)
3153		memcg_oom_recover(memcg);
3154
3155	return ret;
3156}
3157
3158unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
3159					    gfp_t gfp_mask,
3160					    unsigned long *total_scanned)
3161{
3162	unsigned long nr_reclaimed = 0;
3163	struct mem_cgroup_per_node *mz, *next_mz = NULL;
3164	unsigned long reclaimed;
3165	int loop = 0;
3166	struct mem_cgroup_tree_per_node *mctz;
3167	unsigned long excess;
3168	unsigned long nr_scanned;
3169
3170	if (order > 0)
3171		return 0;
3172
3173	mctz = soft_limit_tree_node(pgdat->node_id);
3174
3175	/*
3176	 * Do not even bother to check the largest node if the root
3177	 * is empty. Do it lockless to prevent lock bouncing. Races
3178	 * are acceptable as soft limit is best effort anyway.
3179	 */
3180	if (!mctz || RB_EMPTY_ROOT(&mctz->rb_root))
3181		return 0;
3182
3183	/*
3184	 * This loop can run a while, specially if mem_cgroup's continuously
3185	 * keep exceeding their soft limit and putting the system under
3186	 * pressure
3187	 */
3188	do {
3189		if (next_mz)
3190			mz = next_mz;
3191		else
3192			mz = mem_cgroup_largest_soft_limit_node(mctz);
3193		if (!mz)
3194			break;
3195
3196		nr_scanned = 0;
3197		reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat,
3198						    gfp_mask, &nr_scanned);
3199		nr_reclaimed += reclaimed;
3200		*total_scanned += nr_scanned;
3201		spin_lock_irq(&mctz->lock);
3202		__mem_cgroup_remove_exceeded(mz, mctz);
3203
3204		/*
3205		 * If we failed to reclaim anything from this memory cgroup
3206		 * it is time to move on to the next cgroup
3207		 */
3208		next_mz = NULL;
3209		if (!reclaimed)
3210			next_mz = __mem_cgroup_largest_soft_limit_node(mctz);
3211
3212		excess = soft_limit_excess(mz->memcg);
3213		/*
3214		 * One school of thought says that we should not add
3215		 * back the node to the tree if reclaim returns 0.
3216		 * But our reclaim could return 0, simply because due
3217		 * to priority we are exposing a smaller subset of
3218		 * memory to reclaim from. Consider this as a longer
3219		 * term TODO.
3220		 */
3221		/* If excess == 0, no tree ops */
3222		__mem_cgroup_insert_exceeded(mz, mctz, excess);
3223		spin_unlock_irq(&mctz->lock);
3224		css_put(&mz->memcg->css);
3225		loop++;
3226		/*
3227		 * Could not reclaim anything and there are no more
3228		 * mem cgroups to try or we seem to be looping without
3229		 * reclaiming anything.
3230		 */
3231		if (!nr_reclaimed &&
3232			(next_mz == NULL ||
3233			loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
3234			break;
3235	} while (!nr_reclaimed);
3236	if (next_mz)
3237		css_put(&next_mz->memcg->css);
3238	return nr_reclaimed;
3239}
3240
3241/*
3242 * Test whether @memcg has children, dead or alive.  Note that this
3243 * function doesn't care whether @memcg has use_hierarchy enabled and
3244 * returns %true if there are child csses according to the cgroup
3245 * hierarchy.  Testing use_hierarchy is the caller's responsiblity.
3246 */
3247static inline bool memcg_has_children(struct mem_cgroup *memcg)
3248{
3249	bool ret;
3250
3251	rcu_read_lock();
3252	ret = css_next_child(NULL, &memcg->css);
3253	rcu_read_unlock();
3254	return ret;
3255}
3256
3257/*
3258 * Reclaims as many pages from the given memcg as possible.
3259 *
3260 * Caller is responsible for holding css reference for memcg.
3261 */
3262static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
3263{
3264	int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
3265
3266	/* we call try-to-free pages for make this cgroup empty */
3267	lru_add_drain_all();
3268
3269	drain_all_stock(memcg);
3270
3271	/* try to free all pages in this cgroup */
3272	while (nr_retries && page_counter_read(&memcg->memory)) {
3273		int progress;
3274
3275		if (signal_pending(current))
3276			return -EINTR;
3277
3278		progress = try_to_free_mem_cgroup_pages(memcg, 1,
3279							GFP_KERNEL, true);
3280		if (!progress) {
3281			nr_retries--;
3282			/* maybe some writeback is necessary */
3283			congestion_wait(BLK_RW_ASYNC, HZ/10);
3284		}
3285
3286	}
3287
3288	return 0;
3289}
3290
3291static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of,
3292					    char *buf, size_t nbytes,
3293					    loff_t off)
3294{
3295	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3296
3297	if (mem_cgroup_is_root(memcg))
3298		return -EINVAL;
3299	return mem_cgroup_force_empty(memcg) ?: nbytes;
3300}
3301
3302static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css,
3303				     struct cftype *cft)
3304{
3305	return mem_cgroup_from_css(css)->use_hierarchy;
3306}
3307
3308static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
3309				      struct cftype *cft, u64 val)
3310{
3311	int retval = 0;
3312	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3313	struct mem_cgroup *parent_memcg = mem_cgroup_from_css(memcg->css.parent);
3314
3315	if (memcg->use_hierarchy == val)
3316		return 0;
3317
3318	/*
3319	 * If parent's use_hierarchy is set, we can't make any modifications
3320	 * in the child subtrees. If it is unset, then the change can
3321	 * occur, provided the current cgroup has no children.
3322	 *
3323	 * For the root cgroup, parent_mem is NULL, we allow value to be
3324	 * set if there are no children.
3325	 */
3326	if ((!parent_memcg || !parent_memcg->use_hierarchy) &&
3327				(val == 1 || val == 0)) {
3328		if (!memcg_has_children(memcg))
3329			memcg->use_hierarchy = val;
3330		else
3331			retval = -EBUSY;
3332	} else
3333		retval = -EINVAL;
3334
3335	return retval;
3336}
3337
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3338static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
3339{
3340	unsigned long val;
3341
3342	if (mem_cgroup_is_root(memcg)) {
3343		val = memcg_page_state(memcg, MEMCG_CACHE) +
3344			memcg_page_state(memcg, MEMCG_RSS);
3345		if (swap)
3346			val += memcg_page_state(memcg, MEMCG_SWAP);
 
 
 
 
3347	} else {
3348		if (!swap)
3349			val = page_counter_read(&memcg->memory);
3350		else
3351			val = page_counter_read(&memcg->memsw);
3352	}
3353	return val;
3354}
3355
3356enum {
3357	RES_USAGE,
3358	RES_LIMIT,
3359	RES_MAX_USAGE,
3360	RES_FAILCNT,
3361	RES_SOFT_LIMIT,
3362};
3363
3364static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
3365			       struct cftype *cft)
3366{
3367	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3368	struct page_counter *counter;
3369
3370	switch (MEMFILE_TYPE(cft->private)) {
3371	case _MEM:
3372		counter = &memcg->memory;
3373		break;
3374	case _MEMSWAP:
3375		counter = &memcg->memsw;
3376		break;
3377	case _KMEM:
3378		counter = &memcg->kmem;
3379		break;
3380	case _TCP:
3381		counter = &memcg->tcpmem;
3382		break;
3383	default:
3384		BUG();
3385	}
3386
3387	switch (MEMFILE_ATTR(cft->private)) {
3388	case RES_USAGE:
3389		if (counter == &memcg->memory)
3390			return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE;
3391		if (counter == &memcg->memsw)
3392			return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE;
3393		return (u64)page_counter_read(counter) * PAGE_SIZE;
3394	case RES_LIMIT:
3395		return (u64)counter->max * PAGE_SIZE;
3396	case RES_MAX_USAGE:
3397		return (u64)counter->watermark * PAGE_SIZE;
3398	case RES_FAILCNT:
3399		return counter->failcnt;
3400	case RES_SOFT_LIMIT:
3401		return (u64)memcg->soft_limit * PAGE_SIZE;
3402	default:
3403		BUG();
3404	}
3405}
3406
3407static void memcg_flush_percpu_vmstats(struct mem_cgroup *memcg, bool slab_only)
3408{
3409	unsigned long stat[MEMCG_NR_STAT];
3410	struct mem_cgroup *mi;
3411	int node, cpu, i;
3412	int min_idx, max_idx;
3413
3414	if (slab_only) {
3415		min_idx = NR_SLAB_RECLAIMABLE;
3416		max_idx = NR_SLAB_UNRECLAIMABLE;
3417	} else {
3418		min_idx = 0;
3419		max_idx = MEMCG_NR_STAT;
3420	}
3421
3422	for (i = min_idx; i < max_idx; i++)
3423		stat[i] = 0;
3424
3425	for_each_online_cpu(cpu)
3426		for (i = min_idx; i < max_idx; i++)
3427			stat[i] += per_cpu(memcg->vmstats_percpu->stat[i], cpu);
3428
3429	for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
3430		for (i = min_idx; i < max_idx; i++)
3431			atomic_long_add(stat[i], &mi->vmstats[i]);
3432
3433	if (!slab_only)
3434		max_idx = NR_VM_NODE_STAT_ITEMS;
3435
3436	for_each_node(node) {
3437		struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
3438		struct mem_cgroup_per_node *pi;
3439
3440		for (i = min_idx; i < max_idx; i++)
3441			stat[i] = 0;
3442
3443		for_each_online_cpu(cpu)
3444			for (i = min_idx; i < max_idx; i++)
3445				stat[i] += per_cpu(
3446					pn->lruvec_stat_cpu->count[i], cpu);
3447
3448		for (pi = pn; pi; pi = parent_nodeinfo(pi, node))
3449			for (i = min_idx; i < max_idx; i++)
3450				atomic_long_add(stat[i], &pi->lruvec_stat[i]);
3451	}
3452}
3453
3454static void memcg_flush_percpu_vmevents(struct mem_cgroup *memcg)
3455{
3456	unsigned long events[NR_VM_EVENT_ITEMS];
3457	struct mem_cgroup *mi;
3458	int cpu, i;
3459
3460	for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
3461		events[i] = 0;
3462
3463	for_each_online_cpu(cpu)
3464		for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
3465			events[i] += per_cpu(memcg->vmstats_percpu->events[i],
3466					     cpu);
3467
3468	for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
3469		for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
3470			atomic_long_add(events[i], &mi->vmevents[i]);
3471}
3472
3473#ifdef CONFIG_MEMCG_KMEM
3474static int memcg_online_kmem(struct mem_cgroup *memcg)
3475{
3476	int memcg_id;
3477
3478	if (cgroup_memory_nokmem)
3479		return 0;
3480
3481	BUG_ON(memcg->kmemcg_id >= 0);
3482	BUG_ON(memcg->kmem_state);
3483
3484	memcg_id = memcg_alloc_cache_id();
3485	if (memcg_id < 0)
3486		return memcg_id;
3487
3488	static_branch_inc(&memcg_kmem_enabled_key);
3489	/*
3490	 * A memory cgroup is considered kmem-online as soon as it gets
3491	 * kmemcg_id. Setting the id after enabling static branching will
3492	 * guarantee no one starts accounting before all call sites are
3493	 * patched.
3494	 */
3495	memcg->kmemcg_id = memcg_id;
3496	memcg->kmem_state = KMEM_ONLINE;
3497	INIT_LIST_HEAD(&memcg->kmem_caches);
3498
3499	return 0;
3500}
3501
3502static void memcg_offline_kmem(struct mem_cgroup *memcg)
3503{
3504	struct cgroup_subsys_state *css;
3505	struct mem_cgroup *parent, *child;
3506	int kmemcg_id;
3507
3508	if (memcg->kmem_state != KMEM_ONLINE)
3509		return;
3510	/*
3511	 * Clear the online state before clearing memcg_caches array
3512	 * entries. The slab_mutex in memcg_deactivate_kmem_caches()
3513	 * guarantees that no cache will be created for this cgroup
3514	 * after we are done (see memcg_create_kmem_cache()).
3515	 */
3516	memcg->kmem_state = KMEM_ALLOCATED;
3517
 
 
 
 
 
3518	parent = parent_mem_cgroup(memcg);
3519	if (!parent)
3520		parent = root_mem_cgroup;
3521
3522	/*
3523	 * Deactivate and reparent kmem_caches. Then flush percpu
3524	 * slab statistics to have precise values at the parent and
3525	 * all ancestor levels. It's required to keep slab stats
3526	 * accurate after the reparenting of kmem_caches.
3527	 */
3528	memcg_deactivate_kmem_caches(memcg, parent);
3529	memcg_flush_percpu_vmstats(memcg, true);
3530
3531	kmemcg_id = memcg->kmemcg_id;
3532	BUG_ON(kmemcg_id < 0);
3533
3534	/*
3535	 * Change kmemcg_id of this cgroup and all its descendants to the
3536	 * parent's id, and then move all entries from this cgroup's list_lrus
3537	 * to ones of the parent. After we have finished, all list_lrus
3538	 * corresponding to this cgroup are guaranteed to remain empty. The
3539	 * ordering is imposed by list_lru_node->lock taken by
3540	 * memcg_drain_all_list_lrus().
3541	 */
3542	rcu_read_lock(); /* can be called from css_free w/o cgroup_mutex */
3543	css_for_each_descendant_pre(css, &memcg->css) {
3544		child = mem_cgroup_from_css(css);
3545		BUG_ON(child->kmemcg_id != kmemcg_id);
3546		child->kmemcg_id = parent->kmemcg_id;
3547		if (!memcg->use_hierarchy)
3548			break;
3549	}
3550	rcu_read_unlock();
3551
3552	memcg_drain_all_list_lrus(kmemcg_id, parent);
3553
3554	memcg_free_cache_id(kmemcg_id);
3555}
3556
3557static void memcg_free_kmem(struct mem_cgroup *memcg)
3558{
3559	/* css_alloc() failed, offlining didn't happen */
3560	if (unlikely(memcg->kmem_state == KMEM_ONLINE))
3561		memcg_offline_kmem(memcg);
3562
3563	if (memcg->kmem_state == KMEM_ALLOCATED) {
3564		WARN_ON(!list_empty(&memcg->kmem_caches));
3565		static_branch_dec(&memcg_kmem_enabled_key);
 
3566	}
3567}
3568#else
3569static int memcg_online_kmem(struct mem_cgroup *memcg)
3570{
3571	return 0;
3572}
3573static void memcg_offline_kmem(struct mem_cgroup *memcg)
3574{
3575}
3576static void memcg_free_kmem(struct mem_cgroup *memcg)
3577{
3578}
3579#endif /* CONFIG_MEMCG_KMEM */
3580
3581static int memcg_update_kmem_max(struct mem_cgroup *memcg,
3582				 unsigned long max)
3583{
3584	int ret;
3585
3586	mutex_lock(&memcg_max_mutex);
3587	ret = page_counter_set_max(&memcg->kmem, max);
3588	mutex_unlock(&memcg_max_mutex);
3589	return ret;
3590}
3591
3592static int memcg_update_tcp_max(struct mem_cgroup *memcg, unsigned long max)
3593{
3594	int ret;
3595
3596	mutex_lock(&memcg_max_mutex);
3597
3598	ret = page_counter_set_max(&memcg->tcpmem, max);
3599	if (ret)
3600		goto out;
3601
3602	if (!memcg->tcpmem_active) {
3603		/*
3604		 * The active flag needs to be written after the static_key
3605		 * update. This is what guarantees that the socket activation
3606		 * function is the last one to run. See mem_cgroup_sk_alloc()
3607		 * for details, and note that we don't mark any socket as
3608		 * belonging to this memcg until that flag is up.
3609		 *
3610		 * We need to do this, because static_keys will span multiple
3611		 * sites, but we can't control their order. If we mark a socket
3612		 * as accounted, but the accounting functions are not patched in
3613		 * yet, we'll lose accounting.
3614		 *
3615		 * We never race with the readers in mem_cgroup_sk_alloc(),
3616		 * because when this value change, the code to process it is not
3617		 * patched in yet.
3618		 */
3619		static_branch_inc(&memcg_sockets_enabled_key);
3620		memcg->tcpmem_active = true;
3621	}
3622out:
3623	mutex_unlock(&memcg_max_mutex);
3624	return ret;
3625}
3626
3627/*
3628 * The user of this function is...
3629 * RES_LIMIT.
3630 */
3631static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
3632				char *buf, size_t nbytes, loff_t off)
3633{
3634	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3635	unsigned long nr_pages;
3636	int ret;
3637
3638	buf = strstrip(buf);
3639	ret = page_counter_memparse(buf, "-1", &nr_pages);
3640	if (ret)
3641		return ret;
3642
3643	switch (MEMFILE_ATTR(of_cft(of)->private)) {
3644	case RES_LIMIT:
3645		if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
3646			ret = -EINVAL;
3647			break;
3648		}
3649		switch (MEMFILE_TYPE(of_cft(of)->private)) {
3650		case _MEM:
3651			ret = mem_cgroup_resize_max(memcg, nr_pages, false);
3652			break;
3653		case _MEMSWAP:
3654			ret = mem_cgroup_resize_max(memcg, nr_pages, true);
3655			break;
3656		case _KMEM:
3657			pr_warn_once("kmem.limit_in_bytes is deprecated and will be removed. "
3658				     "Please report your usecase to linux-mm@kvack.org if you "
3659				     "depend on this functionality.\n");
3660			ret = memcg_update_kmem_max(memcg, nr_pages);
3661			break;
3662		case _TCP:
3663			ret = memcg_update_tcp_max(memcg, nr_pages);
3664			break;
3665		}
3666		break;
3667	case RES_SOFT_LIMIT:
3668		memcg->soft_limit = nr_pages;
3669		ret = 0;
3670		break;
3671	}
3672	return ret ?: nbytes;
3673}
3674
3675static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf,
3676				size_t nbytes, loff_t off)
3677{
3678	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3679	struct page_counter *counter;
3680
3681	switch (MEMFILE_TYPE(of_cft(of)->private)) {
3682	case _MEM:
3683		counter = &memcg->memory;
3684		break;
3685	case _MEMSWAP:
3686		counter = &memcg->memsw;
3687		break;
3688	case _KMEM:
3689		counter = &memcg->kmem;
3690		break;
3691	case _TCP:
3692		counter = &memcg->tcpmem;
3693		break;
3694	default:
3695		BUG();
3696	}
3697
3698	switch (MEMFILE_ATTR(of_cft(of)->private)) {
3699	case RES_MAX_USAGE:
3700		page_counter_reset_watermark(counter);
3701		break;
3702	case RES_FAILCNT:
3703		counter->failcnt = 0;
3704		break;
3705	default:
3706		BUG();
3707	}
3708
3709	return nbytes;
3710}
3711
3712static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css,
3713					struct cftype *cft)
3714{
3715	return mem_cgroup_from_css(css)->move_charge_at_immigrate;
3716}
3717
3718#ifdef CONFIG_MMU
3719static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3720					struct cftype *cft, u64 val)
3721{
3722	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3723
3724	if (val & ~MOVE_MASK)
3725		return -EINVAL;
3726
3727	/*
3728	 * No kind of locking is needed in here, because ->can_attach() will
3729	 * check this value once in the beginning of the process, and then carry
3730	 * on with stale data. This means that changes to this value will only
3731	 * affect task migrations starting after the change.
3732	 */
3733	memcg->move_charge_at_immigrate = val;
3734	return 0;
3735}
3736#else
3737static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3738					struct cftype *cft, u64 val)
3739{
3740	return -ENOSYS;
3741}
3742#endif
3743
3744#ifdef CONFIG_NUMA
3745
3746#define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE))
3747#define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
3748#define LRU_ALL	     ((1 << NR_LRU_LISTS) - 1)
3749
3750static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
3751					   int nid, unsigned int lru_mask)
3752{
3753	struct lruvec *lruvec = mem_cgroup_lruvec(NODE_DATA(nid), memcg);
3754	unsigned long nr = 0;
3755	enum lru_list lru;
3756
3757	VM_BUG_ON((unsigned)nid >= nr_node_ids);
3758
3759	for_each_lru(lru) {
3760		if (!(BIT(lru) & lru_mask))
3761			continue;
3762		nr += lruvec_page_state_local(lruvec, NR_LRU_BASE + lru);
3763	}
3764	return nr;
3765}
3766
3767static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
3768					     unsigned int lru_mask)
3769{
3770	unsigned long nr = 0;
3771	enum lru_list lru;
3772
3773	for_each_lru(lru) {
3774		if (!(BIT(lru) & lru_mask))
3775			continue;
3776		nr += memcg_page_state_local(memcg, NR_LRU_BASE + lru);
3777	}
3778	return nr;
3779}
3780
3781static int memcg_numa_stat_show(struct seq_file *m, void *v)
3782{
3783	struct numa_stat {
3784		const char *name;
3785		unsigned int lru_mask;
3786	};
3787
3788	static const struct numa_stat stats[] = {
3789		{ "total", LRU_ALL },
3790		{ "file", LRU_ALL_FILE },
3791		{ "anon", LRU_ALL_ANON },
3792		{ "unevictable", BIT(LRU_UNEVICTABLE) },
3793	};
3794	const struct numa_stat *stat;
3795	int nid;
3796	unsigned long nr;
3797	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
3798
3799	for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3800		nr = mem_cgroup_nr_lru_pages(memcg, stat->lru_mask);
3801		seq_printf(m, "%s=%lu", stat->name, nr);
3802		for_each_node_state(nid, N_MEMORY) {
3803			nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
3804							  stat->lru_mask);
3805			seq_printf(m, " N%d=%lu", nid, nr);
3806		}
3807		seq_putc(m, '\n');
3808	}
3809
3810	for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3811		struct mem_cgroup *iter;
3812
3813		nr = 0;
3814		for_each_mem_cgroup_tree(iter, memcg)
3815			nr += mem_cgroup_nr_lru_pages(iter, stat->lru_mask);
3816		seq_printf(m, "hierarchical_%s=%lu", stat->name, nr);
3817		for_each_node_state(nid, N_MEMORY) {
3818			nr = 0;
3819			for_each_mem_cgroup_tree(iter, memcg)
3820				nr += mem_cgroup_node_nr_lru_pages(
3821					iter, nid, stat->lru_mask);
3822			seq_printf(m, " N%d=%lu", nid, nr);
3823		}
3824		seq_putc(m, '\n');
3825	}
3826
3827	return 0;
3828}
3829#endif /* CONFIG_NUMA */
3830
3831static const unsigned int memcg1_stats[] = {
3832	MEMCG_CACHE,
3833	MEMCG_RSS,
3834	MEMCG_RSS_HUGE,
3835	NR_SHMEM,
3836	NR_FILE_MAPPED,
3837	NR_FILE_DIRTY,
3838	NR_WRITEBACK,
3839	MEMCG_SWAP,
3840};
3841
3842static const char *const memcg1_stat_names[] = {
3843	"cache",
3844	"rss",
3845	"rss_huge",
3846	"shmem",
3847	"mapped_file",
3848	"dirty",
3849	"writeback",
3850	"swap",
3851};
3852
3853/* Universal VM events cgroup1 shows, original sort order */
3854static const unsigned int memcg1_events[] = {
3855	PGPGIN,
3856	PGPGOUT,
3857	PGFAULT,
3858	PGMAJFAULT,
3859};
3860
3861static const char *const memcg1_event_names[] = {
3862	"pgpgin",
3863	"pgpgout",
3864	"pgfault",
3865	"pgmajfault",
3866};
3867
3868static int memcg_stat_show(struct seq_file *m, void *v)
3869{
3870	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
3871	unsigned long memory, memsw;
3872	struct mem_cgroup *mi;
3873	unsigned int i;
3874
3875	BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names) != ARRAY_SIZE(memcg1_stats));
3876	BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS);
3877
3878	for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
3879		if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
3880			continue;
3881		seq_printf(m, "%s %lu\n", memcg1_stat_names[i],
3882			   memcg_page_state_local(memcg, memcg1_stats[i]) *
3883			   PAGE_SIZE);
3884	}
3885
3886	for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
3887		seq_printf(m, "%s %lu\n", memcg1_event_names[i],
3888			   memcg_events_local(memcg, memcg1_events[i]));
3889
3890	for (i = 0; i < NR_LRU_LISTS; i++)
3891		seq_printf(m, "%s %lu\n", mem_cgroup_lru_names[i],
3892			   memcg_page_state_local(memcg, NR_LRU_BASE + i) *
3893			   PAGE_SIZE);
3894
3895	/* Hierarchical information */
3896	memory = memsw = PAGE_COUNTER_MAX;
3897	for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) {
3898		memory = min(memory, mi->memory.max);
3899		memsw = min(memsw, mi->memsw.max);
3900	}
3901	seq_printf(m, "hierarchical_memory_limit %llu\n",
3902		   (u64)memory * PAGE_SIZE);
3903	if (do_memsw_account())
3904		seq_printf(m, "hierarchical_memsw_limit %llu\n",
3905			   (u64)memsw * PAGE_SIZE);
3906
3907	for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
 
 
3908		if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
3909			continue;
3910		seq_printf(m, "total_%s %llu\n", memcg1_stat_names[i],
3911			   (u64)memcg_page_state(memcg, memcg1_stats[i]) *
3912			   PAGE_SIZE);
 
 
 
 
 
 
 
 
 
3913	}
3914
3915	for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
3916		seq_printf(m, "total_%s %llu\n", memcg1_event_names[i],
3917			   (u64)memcg_events(memcg, memcg1_events[i]));
3918
3919	for (i = 0; i < NR_LRU_LISTS; i++)
3920		seq_printf(m, "total_%s %llu\n", mem_cgroup_lru_names[i],
3921			   (u64)memcg_page_state(memcg, NR_LRU_BASE + i) *
3922			   PAGE_SIZE);
3923
3924#ifdef CONFIG_DEBUG_VM
3925	{
3926		pg_data_t *pgdat;
3927		struct mem_cgroup_per_node *mz;
3928		struct zone_reclaim_stat *rstat;
3929		unsigned long recent_rotated[2] = {0, 0};
3930		unsigned long recent_scanned[2] = {0, 0};
3931
3932		for_each_online_pgdat(pgdat) {
3933			mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
3934			rstat = &mz->lruvec.reclaim_stat;
3935
3936			recent_rotated[0] += rstat->recent_rotated[0];
3937			recent_rotated[1] += rstat->recent_rotated[1];
3938			recent_scanned[0] += rstat->recent_scanned[0];
3939			recent_scanned[1] += rstat->recent_scanned[1];
3940		}
3941		seq_printf(m, "recent_rotated_anon %lu\n", recent_rotated[0]);
3942		seq_printf(m, "recent_rotated_file %lu\n", recent_rotated[1]);
3943		seq_printf(m, "recent_scanned_anon %lu\n", recent_scanned[0]);
3944		seq_printf(m, "recent_scanned_file %lu\n", recent_scanned[1]);
3945	}
3946#endif
3947
3948	return 0;
3949}
3950
3951static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css,
3952				      struct cftype *cft)
3953{
3954	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3955
3956	return mem_cgroup_swappiness(memcg);
3957}
3958
3959static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
3960				       struct cftype *cft, u64 val)
3961{
3962	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3963
3964	if (val > 100)
3965		return -EINVAL;
3966
3967	if (css->parent)
3968		memcg->swappiness = val;
3969	else
3970		vm_swappiness = val;
3971
3972	return 0;
3973}
3974
3975static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
3976{
3977	struct mem_cgroup_threshold_ary *t;
3978	unsigned long usage;
3979	int i;
3980
3981	rcu_read_lock();
3982	if (!swap)
3983		t = rcu_dereference(memcg->thresholds.primary);
3984	else
3985		t = rcu_dereference(memcg->memsw_thresholds.primary);
3986
3987	if (!t)
3988		goto unlock;
3989
3990	usage = mem_cgroup_usage(memcg, swap);
3991
3992	/*
3993	 * current_threshold points to threshold just below or equal to usage.
3994	 * If it's not true, a threshold was crossed after last
3995	 * call of __mem_cgroup_threshold().
3996	 */
3997	i = t->current_threshold;
3998
3999	/*
4000	 * Iterate backward over array of thresholds starting from
4001	 * current_threshold and check if a threshold is crossed.
4002	 * If none of thresholds below usage is crossed, we read
4003	 * only one element of the array here.
4004	 */
4005	for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
4006		eventfd_signal(t->entries[i].eventfd, 1);
4007
4008	/* i = current_threshold + 1 */
4009	i++;
4010
4011	/*
4012	 * Iterate forward over array of thresholds starting from
4013	 * current_threshold+1 and check if a threshold is crossed.
4014	 * If none of thresholds above usage is crossed, we read
4015	 * only one element of the array here.
4016	 */
4017	for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
4018		eventfd_signal(t->entries[i].eventfd, 1);
4019
4020	/* Update current_threshold */
4021	t->current_threshold = i - 1;
4022unlock:
4023	rcu_read_unlock();
4024}
4025
4026static void mem_cgroup_threshold(struct mem_cgroup *memcg)
4027{
4028	while (memcg) {
4029		__mem_cgroup_threshold(memcg, false);
4030		if (do_memsw_account())
4031			__mem_cgroup_threshold(memcg, true);
4032
4033		memcg = parent_mem_cgroup(memcg);
4034	}
4035}
4036
4037static int compare_thresholds(const void *a, const void *b)
4038{
4039	const struct mem_cgroup_threshold *_a = a;
4040	const struct mem_cgroup_threshold *_b = b;
4041
4042	if (_a->threshold > _b->threshold)
4043		return 1;
4044
4045	if (_a->threshold < _b->threshold)
4046		return -1;
4047
4048	return 0;
4049}
4050
4051static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
4052{
4053	struct mem_cgroup_eventfd_list *ev;
4054
4055	spin_lock(&memcg_oom_lock);
4056
4057	list_for_each_entry(ev, &memcg->oom_notify, list)
4058		eventfd_signal(ev->eventfd, 1);
4059
4060	spin_unlock(&memcg_oom_lock);
4061	return 0;
4062}
4063
4064static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
4065{
4066	struct mem_cgroup *iter;
4067
4068	for_each_mem_cgroup_tree(iter, memcg)
4069		mem_cgroup_oom_notify_cb(iter);
4070}
4071
4072static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
4073	struct eventfd_ctx *eventfd, const char *args, enum res_type type)
4074{
4075	struct mem_cgroup_thresholds *thresholds;
4076	struct mem_cgroup_threshold_ary *new;
4077	unsigned long threshold;
4078	unsigned long usage;
4079	int i, size, ret;
4080
4081	ret = page_counter_memparse(args, "-1", &threshold);
4082	if (ret)
4083		return ret;
4084
4085	mutex_lock(&memcg->thresholds_lock);
4086
4087	if (type == _MEM) {
4088		thresholds = &memcg->thresholds;
4089		usage = mem_cgroup_usage(memcg, false);
4090	} else if (type == _MEMSWAP) {
4091		thresholds = &memcg->memsw_thresholds;
4092		usage = mem_cgroup_usage(memcg, true);
4093	} else
4094		BUG();
4095
4096	/* Check if a threshold crossed before adding a new one */
4097	if (thresholds->primary)
4098		__mem_cgroup_threshold(memcg, type == _MEMSWAP);
4099
4100	size = thresholds->primary ? thresholds->primary->size + 1 : 1;
4101
4102	/* Allocate memory for new array of thresholds */
4103	new = kmalloc(struct_size(new, entries, size), GFP_KERNEL);
 
4104	if (!new) {
4105		ret = -ENOMEM;
4106		goto unlock;
4107	}
4108	new->size = size;
4109
4110	/* Copy thresholds (if any) to new array */
4111	if (thresholds->primary) {
4112		memcpy(new->entries, thresholds->primary->entries, (size - 1) *
4113				sizeof(struct mem_cgroup_threshold));
4114	}
4115
4116	/* Add new threshold */
4117	new->entries[size - 1].eventfd = eventfd;
4118	new->entries[size - 1].threshold = threshold;
4119
4120	/* Sort thresholds. Registering of new threshold isn't time-critical */
4121	sort(new->entries, size, sizeof(struct mem_cgroup_threshold),
4122			compare_thresholds, NULL);
4123
4124	/* Find current threshold */
4125	new->current_threshold = -1;
4126	for (i = 0; i < size; i++) {
4127		if (new->entries[i].threshold <= usage) {
4128			/*
4129			 * new->current_threshold will not be used until
4130			 * rcu_assign_pointer(), so it's safe to increment
4131			 * it here.
4132			 */
4133			++new->current_threshold;
4134		} else
4135			break;
4136	}
4137
4138	/* Free old spare buffer and save old primary buffer as spare */
4139	kfree(thresholds->spare);
4140	thresholds->spare = thresholds->primary;
4141
4142	rcu_assign_pointer(thresholds->primary, new);
4143
4144	/* To be sure that nobody uses thresholds */
4145	synchronize_rcu();
4146
4147unlock:
4148	mutex_unlock(&memcg->thresholds_lock);
4149
4150	return ret;
4151}
4152
4153static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
4154	struct eventfd_ctx *eventfd, const char *args)
4155{
4156	return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM);
4157}
4158
4159static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg,
4160	struct eventfd_ctx *eventfd, const char *args)
4161{
4162	return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP);
4163}
4164
4165static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4166	struct eventfd_ctx *eventfd, enum res_type type)
4167{
4168	struct mem_cgroup_thresholds *thresholds;
4169	struct mem_cgroup_threshold_ary *new;
4170	unsigned long usage;
4171	int i, j, size;
4172
4173	mutex_lock(&memcg->thresholds_lock);
4174
4175	if (type == _MEM) {
4176		thresholds = &memcg->thresholds;
4177		usage = mem_cgroup_usage(memcg, false);
4178	} else if (type == _MEMSWAP) {
4179		thresholds = &memcg->memsw_thresholds;
4180		usage = mem_cgroup_usage(memcg, true);
4181	} else
4182		BUG();
4183
4184	if (!thresholds->primary)
4185		goto unlock;
4186
4187	/* Check if a threshold crossed before removing */
4188	__mem_cgroup_threshold(memcg, type == _MEMSWAP);
4189
4190	/* Calculate new number of threshold */
4191	size = 0;
4192	for (i = 0; i < thresholds->primary->size; i++) {
4193		if (thresholds->primary->entries[i].eventfd != eventfd)
4194			size++;
4195	}
4196
4197	new = thresholds->spare;
4198
4199	/* Set thresholds array to NULL if we don't have thresholds */
4200	if (!size) {
4201		kfree(new);
4202		new = NULL;
4203		goto swap_buffers;
4204	}
4205
4206	new->size = size;
4207
4208	/* Copy thresholds and find current threshold */
4209	new->current_threshold = -1;
4210	for (i = 0, j = 0; i < thresholds->primary->size; i++) {
4211		if (thresholds->primary->entries[i].eventfd == eventfd)
4212			continue;
4213
4214		new->entries[j] = thresholds->primary->entries[i];
4215		if (new->entries[j].threshold <= usage) {
4216			/*
4217			 * new->current_threshold will not be used
4218			 * until rcu_assign_pointer(), so it's safe to increment
4219			 * it here.
4220			 */
4221			++new->current_threshold;
4222		}
4223		j++;
4224	}
4225
4226swap_buffers:
4227	/* Swap primary and spare array */
4228	thresholds->spare = thresholds->primary;
4229
4230	rcu_assign_pointer(thresholds->primary, new);
4231
4232	/* To be sure that nobody uses thresholds */
4233	synchronize_rcu();
4234
4235	/* If all events are unregistered, free the spare array */
4236	if (!new) {
4237		kfree(thresholds->spare);
4238		thresholds->spare = NULL;
4239	}
4240unlock:
4241	mutex_unlock(&memcg->thresholds_lock);
4242}
4243
4244static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4245	struct eventfd_ctx *eventfd)
4246{
4247	return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM);
4248}
4249
4250static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4251	struct eventfd_ctx *eventfd)
4252{
4253	return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP);
4254}
4255
4256static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg,
4257	struct eventfd_ctx *eventfd, const char *args)
4258{
4259	struct mem_cgroup_eventfd_list *event;
4260
4261	event = kmalloc(sizeof(*event),	GFP_KERNEL);
4262	if (!event)
4263		return -ENOMEM;
4264
4265	spin_lock(&memcg_oom_lock);
4266
4267	event->eventfd = eventfd;
4268	list_add(&event->list, &memcg->oom_notify);
4269
4270	/* already in OOM ? */
4271	if (memcg->under_oom)
4272		eventfd_signal(eventfd, 1);
4273	spin_unlock(&memcg_oom_lock);
4274
4275	return 0;
4276}
4277
4278static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg,
4279	struct eventfd_ctx *eventfd)
4280{
4281	struct mem_cgroup_eventfd_list *ev, *tmp;
4282
4283	spin_lock(&memcg_oom_lock);
4284
4285	list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
4286		if (ev->eventfd == eventfd) {
4287			list_del(&ev->list);
4288			kfree(ev);
4289		}
4290	}
4291
4292	spin_unlock(&memcg_oom_lock);
4293}
4294
4295static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v)
4296{
4297	struct mem_cgroup *memcg = mem_cgroup_from_seq(sf);
4298
4299	seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable);
4300	seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom);
4301	seq_printf(sf, "oom_kill %lu\n",
4302		   atomic_long_read(&memcg->memory_events[MEMCG_OOM_KILL]));
4303	return 0;
4304}
4305
4306static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
4307	struct cftype *cft, u64 val)
4308{
4309	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4310
4311	/* cannot set to root cgroup and only 0 and 1 are allowed */
4312	if (!css->parent || !((val == 0) || (val == 1)))
4313		return -EINVAL;
4314
4315	memcg->oom_kill_disable = val;
4316	if (!val)
4317		memcg_oom_recover(memcg);
4318
4319	return 0;
4320}
4321
4322#ifdef CONFIG_CGROUP_WRITEBACK
4323
4324#include <trace/events/writeback.h>
 
 
 
4325
4326static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
4327{
4328	return wb_domain_init(&memcg->cgwb_domain, gfp);
4329}
4330
4331static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
4332{
4333	wb_domain_exit(&memcg->cgwb_domain);
4334}
4335
4336static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
4337{
4338	wb_domain_size_changed(&memcg->cgwb_domain);
4339}
4340
4341struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
4342{
4343	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4344
4345	if (!memcg->css.parent)
4346		return NULL;
4347
4348	return &memcg->cgwb_domain;
4349}
4350
4351/*
4352 * idx can be of type enum memcg_stat_item or node_stat_item.
4353 * Keep in sync with memcg_exact_page().
4354 */
4355static unsigned long memcg_exact_page_state(struct mem_cgroup *memcg, int idx)
4356{
4357	long x = atomic_long_read(&memcg->vmstats[idx]);
4358	int cpu;
4359
4360	for_each_online_cpu(cpu)
4361		x += per_cpu_ptr(memcg->vmstats_percpu, cpu)->stat[idx];
4362	if (x < 0)
4363		x = 0;
4364	return x;
4365}
4366
4367/**
4368 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
4369 * @wb: bdi_writeback in question
4370 * @pfilepages: out parameter for number of file pages
4371 * @pheadroom: out parameter for number of allocatable pages according to memcg
4372 * @pdirty: out parameter for number of dirty pages
4373 * @pwriteback: out parameter for number of pages under writeback
4374 *
4375 * Determine the numbers of file, headroom, dirty, and writeback pages in
4376 * @wb's memcg.  File, dirty and writeback are self-explanatory.  Headroom
4377 * is a bit more involved.
4378 *
4379 * A memcg's headroom is "min(max, high) - used".  In the hierarchy, the
4380 * headroom is calculated as the lowest headroom of itself and the
4381 * ancestors.  Note that this doesn't consider the actual amount of
4382 * available memory in the system.  The caller should further cap
4383 * *@pheadroom accordingly.
4384 */
4385void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
4386			 unsigned long *pheadroom, unsigned long *pdirty,
4387			 unsigned long *pwriteback)
4388{
4389	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4390	struct mem_cgroup *parent;
4391
4392	*pdirty = memcg_exact_page_state(memcg, NR_FILE_DIRTY);
4393
4394	/* this should eventually include NR_UNSTABLE_NFS */
4395	*pwriteback = memcg_exact_page_state(memcg, NR_WRITEBACK);
4396	*pfilepages = memcg_exact_page_state(memcg, NR_INACTIVE_FILE) +
4397			memcg_exact_page_state(memcg, NR_ACTIVE_FILE);
4398	*pheadroom = PAGE_COUNTER_MAX;
4399
4400	while ((parent = parent_mem_cgroup(memcg))) {
4401		unsigned long ceiling = min(memcg->memory.max, memcg->high);
4402		unsigned long used = page_counter_read(&memcg->memory);
4403
4404		*pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
4405		memcg = parent;
4406	}
4407}
4408
4409/*
4410 * Foreign dirty flushing
4411 *
4412 * There's an inherent mismatch between memcg and writeback.  The former
4413 * trackes ownership per-page while the latter per-inode.  This was a
4414 * deliberate design decision because honoring per-page ownership in the
4415 * writeback path is complicated, may lead to higher CPU and IO overheads
4416 * and deemed unnecessary given that write-sharing an inode across
4417 * different cgroups isn't a common use-case.
4418 *
4419 * Combined with inode majority-writer ownership switching, this works well
4420 * enough in most cases but there are some pathological cases.  For
4421 * example, let's say there are two cgroups A and B which keep writing to
4422 * different but confined parts of the same inode.  B owns the inode and
4423 * A's memory is limited far below B's.  A's dirty ratio can rise enough to
4424 * trigger balance_dirty_pages() sleeps but B's can be low enough to avoid
4425 * triggering background writeback.  A will be slowed down without a way to
4426 * make writeback of the dirty pages happen.
4427 *
4428 * Conditions like the above can lead to a cgroup getting repatedly and
4429 * severely throttled after making some progress after each
4430 * dirty_expire_interval while the underyling IO device is almost
4431 * completely idle.
4432 *
4433 * Solving this problem completely requires matching the ownership tracking
4434 * granularities between memcg and writeback in either direction.  However,
4435 * the more egregious behaviors can be avoided by simply remembering the
4436 * most recent foreign dirtying events and initiating remote flushes on
4437 * them when local writeback isn't enough to keep the memory clean enough.
4438 *
4439 * The following two functions implement such mechanism.  When a foreign
4440 * page - a page whose memcg and writeback ownerships don't match - is
4441 * dirtied, mem_cgroup_track_foreign_dirty() records the inode owning
4442 * bdi_writeback on the page owning memcg.  When balance_dirty_pages()
4443 * decides that the memcg needs to sleep due to high dirty ratio, it calls
4444 * mem_cgroup_flush_foreign() which queues writeback on the recorded
4445 * foreign bdi_writebacks which haven't expired.  Both the numbers of
4446 * recorded bdi_writebacks and concurrent in-flight foreign writebacks are
4447 * limited to MEMCG_CGWB_FRN_CNT.
4448 *
4449 * The mechanism only remembers IDs and doesn't hold any object references.
4450 * As being wrong occasionally doesn't matter, updates and accesses to the
4451 * records are lockless and racy.
4452 */
4453void mem_cgroup_track_foreign_dirty_slowpath(struct page *page,
4454					     struct bdi_writeback *wb)
4455{
4456	struct mem_cgroup *memcg = page->mem_cgroup;
4457	struct memcg_cgwb_frn *frn;
4458	u64 now = get_jiffies_64();
4459	u64 oldest_at = now;
4460	int oldest = -1;
4461	int i;
4462
4463	trace_track_foreign_dirty(page, wb);
4464
4465	/*
4466	 * Pick the slot to use.  If there is already a slot for @wb, keep
4467	 * using it.  If not replace the oldest one which isn't being
4468	 * written out.
4469	 */
4470	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
4471		frn = &memcg->cgwb_frn[i];
4472		if (frn->bdi_id == wb->bdi->id &&
4473		    frn->memcg_id == wb->memcg_css->id)
4474			break;
4475		if (time_before64(frn->at, oldest_at) &&
4476		    atomic_read(&frn->done.cnt) == 1) {
4477			oldest = i;
4478			oldest_at = frn->at;
4479		}
4480	}
4481
4482	if (i < MEMCG_CGWB_FRN_CNT) {
4483		/*
4484		 * Re-using an existing one.  Update timestamp lazily to
4485		 * avoid making the cacheline hot.  We want them to be
4486		 * reasonably up-to-date and significantly shorter than
4487		 * dirty_expire_interval as that's what expires the record.
4488		 * Use the shorter of 1s and dirty_expire_interval / 8.
4489		 */
4490		unsigned long update_intv =
4491			min_t(unsigned long, HZ,
4492			      msecs_to_jiffies(dirty_expire_interval * 10) / 8);
4493
4494		if (time_before64(frn->at, now - update_intv))
4495			frn->at = now;
4496	} else if (oldest >= 0) {
4497		/* replace the oldest free one */
4498		frn = &memcg->cgwb_frn[oldest];
4499		frn->bdi_id = wb->bdi->id;
4500		frn->memcg_id = wb->memcg_css->id;
4501		frn->at = now;
4502	}
4503}
4504
4505/* issue foreign writeback flushes for recorded foreign dirtying events */
4506void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
4507{
4508	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4509	unsigned long intv = msecs_to_jiffies(dirty_expire_interval * 10);
4510	u64 now = jiffies_64;
4511	int i;
4512
4513	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
4514		struct memcg_cgwb_frn *frn = &memcg->cgwb_frn[i];
4515
4516		/*
4517		 * If the record is older than dirty_expire_interval,
4518		 * writeback on it has already started.  No need to kick it
4519		 * off again.  Also, don't start a new one if there's
4520		 * already one in flight.
4521		 */
4522		if (time_after64(frn->at, now - intv) &&
4523		    atomic_read(&frn->done.cnt) == 1) {
4524			frn->at = 0;
4525			trace_flush_foreign(wb, frn->bdi_id, frn->memcg_id);
4526			cgroup_writeback_by_id(frn->bdi_id, frn->memcg_id, 0,
4527					       WB_REASON_FOREIGN_FLUSH,
4528					       &frn->done);
4529		}
4530	}
4531}
4532
4533#else	/* CONFIG_CGROUP_WRITEBACK */
4534
4535static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
4536{
4537	return 0;
4538}
4539
4540static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
4541{
4542}
4543
4544static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
4545{
4546}
4547
4548#endif	/* CONFIG_CGROUP_WRITEBACK */
4549
4550/*
4551 * DO NOT USE IN NEW FILES.
4552 *
4553 * "cgroup.event_control" implementation.
4554 *
4555 * This is way over-engineered.  It tries to support fully configurable
4556 * events for each user.  Such level of flexibility is completely
4557 * unnecessary especially in the light of the planned unified hierarchy.
4558 *
4559 * Please deprecate this and replace with something simpler if at all
4560 * possible.
4561 */
4562
4563/*
4564 * Unregister event and free resources.
4565 *
4566 * Gets called from workqueue.
4567 */
4568static void memcg_event_remove(struct work_struct *work)
4569{
4570	struct mem_cgroup_event *event =
4571		container_of(work, struct mem_cgroup_event, remove);
4572	struct mem_cgroup *memcg = event->memcg;
4573
4574	remove_wait_queue(event->wqh, &event->wait);
4575
4576	event->unregister_event(memcg, event->eventfd);
4577
4578	/* Notify userspace the event is going away. */
4579	eventfd_signal(event->eventfd, 1);
4580
4581	eventfd_ctx_put(event->eventfd);
4582	kfree(event);
4583	css_put(&memcg->css);
4584}
4585
4586/*
4587 * Gets called on EPOLLHUP on eventfd when user closes it.
4588 *
4589 * Called with wqh->lock held and interrupts disabled.
4590 */
4591static int memcg_event_wake(wait_queue_entry_t *wait, unsigned mode,
4592			    int sync, void *key)
4593{
4594	struct mem_cgroup_event *event =
4595		container_of(wait, struct mem_cgroup_event, wait);
4596	struct mem_cgroup *memcg = event->memcg;
4597	__poll_t flags = key_to_poll(key);
4598
4599	if (flags & EPOLLHUP) {
4600		/*
4601		 * If the event has been detached at cgroup removal, we
4602		 * can simply return knowing the other side will cleanup
4603		 * for us.
4604		 *
4605		 * We can't race against event freeing since the other
4606		 * side will require wqh->lock via remove_wait_queue(),
4607		 * which we hold.
4608		 */
4609		spin_lock(&memcg->event_list_lock);
4610		if (!list_empty(&event->list)) {
4611			list_del_init(&event->list);
4612			/*
4613			 * We are in atomic context, but cgroup_event_remove()
4614			 * may sleep, so we have to call it in workqueue.
4615			 */
4616			schedule_work(&event->remove);
4617		}
4618		spin_unlock(&memcg->event_list_lock);
4619	}
4620
4621	return 0;
4622}
4623
4624static void memcg_event_ptable_queue_proc(struct file *file,
4625		wait_queue_head_t *wqh, poll_table *pt)
4626{
4627	struct mem_cgroup_event *event =
4628		container_of(pt, struct mem_cgroup_event, pt);
4629
4630	event->wqh = wqh;
4631	add_wait_queue(wqh, &event->wait);
4632}
4633
4634/*
4635 * DO NOT USE IN NEW FILES.
4636 *
4637 * Parse input and register new cgroup event handler.
4638 *
4639 * Input must be in format '<event_fd> <control_fd> <args>'.
4640 * Interpretation of args is defined by control file implementation.
4641 */
4642static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
4643					 char *buf, size_t nbytes, loff_t off)
4644{
4645	struct cgroup_subsys_state *css = of_css(of);
4646	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4647	struct mem_cgroup_event *event;
4648	struct cgroup_subsys_state *cfile_css;
4649	unsigned int efd, cfd;
4650	struct fd efile;
4651	struct fd cfile;
4652	const char *name;
4653	char *endp;
4654	int ret;
4655
4656	buf = strstrip(buf);
4657
4658	efd = simple_strtoul(buf, &endp, 10);
4659	if (*endp != ' ')
4660		return -EINVAL;
4661	buf = endp + 1;
4662
4663	cfd = simple_strtoul(buf, &endp, 10);
4664	if ((*endp != ' ') && (*endp != '\0'))
4665		return -EINVAL;
4666	buf = endp + 1;
4667
4668	event = kzalloc(sizeof(*event), GFP_KERNEL);
4669	if (!event)
4670		return -ENOMEM;
4671
4672	event->memcg = memcg;
4673	INIT_LIST_HEAD(&event->list);
4674	init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc);
4675	init_waitqueue_func_entry(&event->wait, memcg_event_wake);
4676	INIT_WORK(&event->remove, memcg_event_remove);
4677
4678	efile = fdget(efd);
4679	if (!efile.file) {
4680		ret = -EBADF;
4681		goto out_kfree;
4682	}
4683
4684	event->eventfd = eventfd_ctx_fileget(efile.file);
4685	if (IS_ERR(event->eventfd)) {
4686		ret = PTR_ERR(event->eventfd);
4687		goto out_put_efile;
4688	}
4689
4690	cfile = fdget(cfd);
4691	if (!cfile.file) {
4692		ret = -EBADF;
4693		goto out_put_eventfd;
4694	}
4695
4696	/* the process need read permission on control file */
4697	/* AV: shouldn't we check that it's been opened for read instead? */
4698	ret = inode_permission(file_inode(cfile.file), MAY_READ);
4699	if (ret < 0)
4700		goto out_put_cfile;
4701
4702	/*
4703	 * Determine the event callbacks and set them in @event.  This used
4704	 * to be done via struct cftype but cgroup core no longer knows
4705	 * about these events.  The following is crude but the whole thing
4706	 * is for compatibility anyway.
4707	 *
4708	 * DO NOT ADD NEW FILES.
4709	 */
4710	name = cfile.file->f_path.dentry->d_name.name;
4711
4712	if (!strcmp(name, "memory.usage_in_bytes")) {
4713		event->register_event = mem_cgroup_usage_register_event;
4714		event->unregister_event = mem_cgroup_usage_unregister_event;
4715	} else if (!strcmp(name, "memory.oom_control")) {
4716		event->register_event = mem_cgroup_oom_register_event;
4717		event->unregister_event = mem_cgroup_oom_unregister_event;
4718	} else if (!strcmp(name, "memory.pressure_level")) {
4719		event->register_event = vmpressure_register_event;
4720		event->unregister_event = vmpressure_unregister_event;
4721	} else if (!strcmp(name, "memory.memsw.usage_in_bytes")) {
4722		event->register_event = memsw_cgroup_usage_register_event;
4723		event->unregister_event = memsw_cgroup_usage_unregister_event;
4724	} else {
4725		ret = -EINVAL;
4726		goto out_put_cfile;
4727	}
4728
4729	/*
4730	 * Verify @cfile should belong to @css.  Also, remaining events are
4731	 * automatically removed on cgroup destruction but the removal is
4732	 * asynchronous, so take an extra ref on @css.
4733	 */
4734	cfile_css = css_tryget_online_from_dir(cfile.file->f_path.dentry->d_parent,
4735					       &memory_cgrp_subsys);
4736	ret = -EINVAL;
4737	if (IS_ERR(cfile_css))
4738		goto out_put_cfile;
4739	if (cfile_css != css) {
4740		css_put(cfile_css);
4741		goto out_put_cfile;
4742	}
4743
4744	ret = event->register_event(memcg, event->eventfd, buf);
4745	if (ret)
4746		goto out_put_css;
4747
4748	vfs_poll(efile.file, &event->pt);
4749
4750	spin_lock(&memcg->event_list_lock);
4751	list_add(&event->list, &memcg->event_list);
4752	spin_unlock(&memcg->event_list_lock);
4753
4754	fdput(cfile);
4755	fdput(efile);
4756
4757	return nbytes;
4758
4759out_put_css:
4760	css_put(css);
4761out_put_cfile:
4762	fdput(cfile);
4763out_put_eventfd:
4764	eventfd_ctx_put(event->eventfd);
4765out_put_efile:
4766	fdput(efile);
4767out_kfree:
4768	kfree(event);
4769
4770	return ret;
4771}
4772
4773static struct cftype mem_cgroup_legacy_files[] = {
4774	{
4775		.name = "usage_in_bytes",
4776		.private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
4777		.read_u64 = mem_cgroup_read_u64,
4778	},
4779	{
4780		.name = "max_usage_in_bytes",
4781		.private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
4782		.write = mem_cgroup_reset,
4783		.read_u64 = mem_cgroup_read_u64,
4784	},
4785	{
4786		.name = "limit_in_bytes",
4787		.private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
4788		.write = mem_cgroup_write,
4789		.read_u64 = mem_cgroup_read_u64,
4790	},
4791	{
4792		.name = "soft_limit_in_bytes",
4793		.private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
4794		.write = mem_cgroup_write,
4795		.read_u64 = mem_cgroup_read_u64,
4796	},
4797	{
4798		.name = "failcnt",
4799		.private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
4800		.write = mem_cgroup_reset,
4801		.read_u64 = mem_cgroup_read_u64,
4802	},
4803	{
4804		.name = "stat",
4805		.seq_show = memcg_stat_show,
4806	},
4807	{
4808		.name = "force_empty",
4809		.write = mem_cgroup_force_empty_write,
4810	},
4811	{
4812		.name = "use_hierarchy",
4813		.write_u64 = mem_cgroup_hierarchy_write,
4814		.read_u64 = mem_cgroup_hierarchy_read,
4815	},
4816	{
4817		.name = "cgroup.event_control",		/* XXX: for compat */
4818		.write = memcg_write_event_control,
4819		.flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE,
4820	},
4821	{
4822		.name = "swappiness",
4823		.read_u64 = mem_cgroup_swappiness_read,
4824		.write_u64 = mem_cgroup_swappiness_write,
4825	},
4826	{
4827		.name = "move_charge_at_immigrate",
4828		.read_u64 = mem_cgroup_move_charge_read,
4829		.write_u64 = mem_cgroup_move_charge_write,
4830	},
4831	{
4832		.name = "oom_control",
4833		.seq_show = mem_cgroup_oom_control_read,
4834		.write_u64 = mem_cgroup_oom_control_write,
4835		.private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
4836	},
4837	{
4838		.name = "pressure_level",
4839	},
4840#ifdef CONFIG_NUMA
4841	{
4842		.name = "numa_stat",
4843		.seq_show = memcg_numa_stat_show,
4844	},
4845#endif
4846	{
4847		.name = "kmem.limit_in_bytes",
4848		.private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
4849		.write = mem_cgroup_write,
4850		.read_u64 = mem_cgroup_read_u64,
4851	},
4852	{
4853		.name = "kmem.usage_in_bytes",
4854		.private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
4855		.read_u64 = mem_cgroup_read_u64,
4856	},
4857	{
4858		.name = "kmem.failcnt",
4859		.private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT),
4860		.write = mem_cgroup_reset,
4861		.read_u64 = mem_cgroup_read_u64,
4862	},
4863	{
4864		.name = "kmem.max_usage_in_bytes",
4865		.private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE),
4866		.write = mem_cgroup_reset,
4867		.read_u64 = mem_cgroup_read_u64,
4868	},
4869#if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
4870	{
4871		.name = "kmem.slabinfo",
4872		.seq_start = memcg_slab_start,
4873		.seq_next = memcg_slab_next,
4874		.seq_stop = memcg_slab_stop,
4875		.seq_show = memcg_slab_show,
4876	},
4877#endif
4878	{
4879		.name = "kmem.tcp.limit_in_bytes",
4880		.private = MEMFILE_PRIVATE(_TCP, RES_LIMIT),
4881		.write = mem_cgroup_write,
4882		.read_u64 = mem_cgroup_read_u64,
4883	},
4884	{
4885		.name = "kmem.tcp.usage_in_bytes",
4886		.private = MEMFILE_PRIVATE(_TCP, RES_USAGE),
4887		.read_u64 = mem_cgroup_read_u64,
4888	},
4889	{
4890		.name = "kmem.tcp.failcnt",
4891		.private = MEMFILE_PRIVATE(_TCP, RES_FAILCNT),
4892		.write = mem_cgroup_reset,
4893		.read_u64 = mem_cgroup_read_u64,
4894	},
4895	{
4896		.name = "kmem.tcp.max_usage_in_bytes",
4897		.private = MEMFILE_PRIVATE(_TCP, RES_MAX_USAGE),
4898		.write = mem_cgroup_reset,
4899		.read_u64 = mem_cgroup_read_u64,
4900	},
4901	{ },	/* terminate */
4902};
4903
4904/*
4905 * Private memory cgroup IDR
4906 *
4907 * Swap-out records and page cache shadow entries need to store memcg
4908 * references in constrained space, so we maintain an ID space that is
4909 * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of
4910 * memory-controlled cgroups to 64k.
4911 *
4912 * However, there usually are many references to the oflline CSS after
4913 * the cgroup has been destroyed, such as page cache or reclaimable
4914 * slab objects, that don't need to hang on to the ID. We want to keep
4915 * those dead CSS from occupying IDs, or we might quickly exhaust the
4916 * relatively small ID space and prevent the creation of new cgroups
4917 * even when there are much fewer than 64k cgroups - possibly none.
4918 *
4919 * Maintain a private 16-bit ID space for memcg, and allow the ID to
4920 * be freed and recycled when it's no longer needed, which is usually
4921 * when the CSS is offlined.
4922 *
4923 * The only exception to that are records of swapped out tmpfs/shmem
4924 * pages that need to be attributed to live ancestors on swapin. But
4925 * those references are manageable from userspace.
4926 */
4927
4928static DEFINE_IDR(mem_cgroup_idr);
4929
4930static void mem_cgroup_id_remove(struct mem_cgroup *memcg)
4931{
4932	if (memcg->id.id > 0) {
4933		idr_remove(&mem_cgroup_idr, memcg->id.id);
4934		memcg->id.id = 0;
4935	}
4936}
4937
4938static void mem_cgroup_id_get_many(struct mem_cgroup *memcg, unsigned int n)
4939{
4940	refcount_add(n, &memcg->id.ref);
 
4941}
4942
4943static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
4944{
4945	if (refcount_sub_and_test(n, &memcg->id.ref)) {
4946		mem_cgroup_id_remove(memcg);
 
 
4947
4948		/* Memcg ID pins CSS */
4949		css_put(&memcg->css);
4950	}
4951}
4952
 
 
 
 
 
4953static inline void mem_cgroup_id_put(struct mem_cgroup *memcg)
4954{
4955	mem_cgroup_id_put_many(memcg, 1);
4956}
4957
4958/**
4959 * mem_cgroup_from_id - look up a memcg from a memcg id
4960 * @id: the memcg id to look up
4961 *
4962 * Caller must hold rcu_read_lock().
4963 */
4964struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
4965{
4966	WARN_ON_ONCE(!rcu_read_lock_held());
4967	return idr_find(&mem_cgroup_idr, id);
4968}
4969
4970static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
4971{
4972	struct mem_cgroup_per_node *pn;
4973	int tmp = node;
4974	/*
4975	 * This routine is called against possible nodes.
4976	 * But it's BUG to call kmalloc() against offline node.
4977	 *
4978	 * TODO: this routine can waste much memory for nodes which will
4979	 *       never be onlined. It's better to use memory hotplug callback
4980	 *       function.
4981	 */
4982	if (!node_state(node, N_NORMAL_MEMORY))
4983		tmp = -1;
4984	pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
4985	if (!pn)
4986		return 1;
4987
4988	pn->lruvec_stat_local = alloc_percpu(struct lruvec_stat);
4989	if (!pn->lruvec_stat_local) {
4990		kfree(pn);
4991		return 1;
4992	}
4993
4994	pn->lruvec_stat_cpu = alloc_percpu(struct lruvec_stat);
4995	if (!pn->lruvec_stat_cpu) {
4996		free_percpu(pn->lruvec_stat_local);
4997		kfree(pn);
4998		return 1;
4999	}
5000
5001	lruvec_init(&pn->lruvec);
5002	pn->usage_in_excess = 0;
5003	pn->on_tree = false;
5004	pn->memcg = memcg;
5005
5006	memcg->nodeinfo[node] = pn;
5007	return 0;
5008}
5009
5010static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
5011{
5012	struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
5013
5014	if (!pn)
5015		return;
5016
5017	free_percpu(pn->lruvec_stat_cpu);
5018	free_percpu(pn->lruvec_stat_local);
5019	kfree(pn);
5020}
5021
5022static void __mem_cgroup_free(struct mem_cgroup *memcg)
5023{
5024	int node;
5025
5026	for_each_node(node)
5027		free_mem_cgroup_per_node_info(memcg, node);
5028	free_percpu(memcg->vmstats_percpu);
5029	free_percpu(memcg->vmstats_local);
5030	kfree(memcg);
5031}
5032
5033static void mem_cgroup_free(struct mem_cgroup *memcg)
5034{
5035	memcg_wb_domain_exit(memcg);
5036	/*
5037	 * Flush percpu vmstats and vmevents to guarantee the value correctness
5038	 * on parent's and all ancestor levels.
5039	 */
5040	memcg_flush_percpu_vmstats(memcg, false);
5041	memcg_flush_percpu_vmevents(memcg);
5042	__mem_cgroup_free(memcg);
5043}
5044
5045static struct mem_cgroup *mem_cgroup_alloc(void)
5046{
5047	struct mem_cgroup *memcg;
5048	unsigned int size;
5049	int node;
5050	int __maybe_unused i;
5051
5052	size = sizeof(struct mem_cgroup);
5053	size += nr_node_ids * sizeof(struct mem_cgroup_per_node *);
5054
5055	memcg = kzalloc(size, GFP_KERNEL);
5056	if (!memcg)
5057		return NULL;
5058
5059	memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL,
5060				 1, MEM_CGROUP_ID_MAX,
5061				 GFP_KERNEL);
5062	if (memcg->id.id < 0)
5063		goto fail;
5064
5065	memcg->vmstats_local = alloc_percpu(struct memcg_vmstats_percpu);
5066	if (!memcg->vmstats_local)
5067		goto fail;
5068
5069	memcg->vmstats_percpu = alloc_percpu(struct memcg_vmstats_percpu);
5070	if (!memcg->vmstats_percpu)
5071		goto fail;
5072
5073	for_each_node(node)
5074		if (alloc_mem_cgroup_per_node_info(memcg, node))
5075			goto fail;
5076
5077	if (memcg_wb_domain_init(memcg, GFP_KERNEL))
5078		goto fail;
5079
5080	INIT_WORK(&memcg->high_work, high_work_func);
5081	memcg->last_scanned_node = MAX_NUMNODES;
5082	INIT_LIST_HEAD(&memcg->oom_notify);
5083	mutex_init(&memcg->thresholds_lock);
5084	spin_lock_init(&memcg->move_lock);
5085	vmpressure_init(&memcg->vmpressure);
5086	INIT_LIST_HEAD(&memcg->event_list);
5087	spin_lock_init(&memcg->event_list_lock);
5088	memcg->socket_pressure = jiffies;
5089#ifdef CONFIG_MEMCG_KMEM
5090	memcg->kmemcg_id = -1;
5091#endif
5092#ifdef CONFIG_CGROUP_WRITEBACK
5093	INIT_LIST_HEAD(&memcg->cgwb_list);
5094	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
5095		memcg->cgwb_frn[i].done =
5096			__WB_COMPLETION_INIT(&memcg_cgwb_frn_waitq);
5097#endif
5098#ifdef CONFIG_TRANSPARENT_HUGEPAGE
5099	spin_lock_init(&memcg->deferred_split_queue.split_queue_lock);
5100	INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue);
5101	memcg->deferred_split_queue.split_queue_len = 0;
5102#endif
5103	idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
5104	return memcg;
5105fail:
5106	mem_cgroup_id_remove(memcg);
 
5107	__mem_cgroup_free(memcg);
5108	return NULL;
5109}
5110
5111static struct cgroup_subsys_state * __ref
5112mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
5113{
5114	struct mem_cgroup *parent = mem_cgroup_from_css(parent_css);
5115	struct mem_cgroup *memcg;
5116	long error = -ENOMEM;
5117
5118	memcg = mem_cgroup_alloc();
5119	if (!memcg)
5120		return ERR_PTR(error);
5121
5122	memcg->high = PAGE_COUNTER_MAX;
5123	memcg->soft_limit = PAGE_COUNTER_MAX;
5124	if (parent) {
5125		memcg->swappiness = mem_cgroup_swappiness(parent);
5126		memcg->oom_kill_disable = parent->oom_kill_disable;
5127	}
5128	if (parent && parent->use_hierarchy) {
5129		memcg->use_hierarchy = true;
5130		page_counter_init(&memcg->memory, &parent->memory);
5131		page_counter_init(&memcg->swap, &parent->swap);
5132		page_counter_init(&memcg->memsw, &parent->memsw);
5133		page_counter_init(&memcg->kmem, &parent->kmem);
5134		page_counter_init(&memcg->tcpmem, &parent->tcpmem);
5135	} else {
5136		page_counter_init(&memcg->memory, NULL);
5137		page_counter_init(&memcg->swap, NULL);
5138		page_counter_init(&memcg->memsw, NULL);
5139		page_counter_init(&memcg->kmem, NULL);
5140		page_counter_init(&memcg->tcpmem, NULL);
5141		/*
5142		 * Deeper hierachy with use_hierarchy == false doesn't make
5143		 * much sense so let cgroup subsystem know about this
5144		 * unfortunate state in our controller.
5145		 */
5146		if (parent != root_mem_cgroup)
5147			memory_cgrp_subsys.broken_hierarchy = true;
5148	}
5149
5150	/* The following stuff does not apply to the root */
5151	if (!parent) {
5152#ifdef CONFIG_MEMCG_KMEM
5153		INIT_LIST_HEAD(&memcg->kmem_caches);
5154#endif
5155		root_mem_cgroup = memcg;
5156		return &memcg->css;
5157	}
5158
5159	error = memcg_online_kmem(memcg);
5160	if (error)
5161		goto fail;
5162
5163	if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
5164		static_branch_inc(&memcg_sockets_enabled_key);
5165
5166	return &memcg->css;
5167fail:
5168	mem_cgroup_id_remove(memcg);
5169	mem_cgroup_free(memcg);
5170	return ERR_PTR(-ENOMEM);
5171}
5172
5173static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
5174{
5175	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5176
5177	/*
5178	 * A memcg must be visible for memcg_expand_shrinker_maps()
5179	 * by the time the maps are allocated. So, we allocate maps
5180	 * here, when for_each_mem_cgroup() can't skip it.
5181	 */
5182	if (memcg_alloc_shrinker_maps(memcg)) {
5183		mem_cgroup_id_remove(memcg);
5184		return -ENOMEM;
5185	}
5186
5187	/* Online state pins memcg ID, memcg ID pins CSS */
5188	refcount_set(&memcg->id.ref, 1);
5189	css_get(css);
5190	return 0;
5191}
5192
5193static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
5194{
5195	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5196	struct mem_cgroup_event *event, *tmp;
5197
5198	/*
5199	 * Unregister events and notify userspace.
5200	 * Notify userspace about cgroup removing only after rmdir of cgroup
5201	 * directory to avoid race between userspace and kernelspace.
5202	 */
5203	spin_lock(&memcg->event_list_lock);
5204	list_for_each_entry_safe(event, tmp, &memcg->event_list, list) {
5205		list_del_init(&event->list);
5206		schedule_work(&event->remove);
5207	}
5208	spin_unlock(&memcg->event_list_lock);
5209
5210	page_counter_set_min(&memcg->memory, 0);
5211	page_counter_set_low(&memcg->memory, 0);
5212
5213	memcg_offline_kmem(memcg);
5214	wb_memcg_offline(memcg);
5215
5216	drain_all_stock(memcg);
5217
5218	mem_cgroup_id_put(memcg);
5219}
5220
5221static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
5222{
5223	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5224
5225	invalidate_reclaim_iterators(memcg);
5226}
5227
5228static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
5229{
5230	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5231	int __maybe_unused i;
5232
5233#ifdef CONFIG_CGROUP_WRITEBACK
5234	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
5235		wb_wait_for_completion(&memcg->cgwb_frn[i].done);
5236#endif
5237	if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
5238		static_branch_dec(&memcg_sockets_enabled_key);
5239
5240	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active)
5241		static_branch_dec(&memcg_sockets_enabled_key);
5242
5243	vmpressure_cleanup(&memcg->vmpressure);
5244	cancel_work_sync(&memcg->high_work);
5245	mem_cgroup_remove_from_trees(memcg);
5246	memcg_free_shrinker_maps(memcg);
5247	memcg_free_kmem(memcg);
5248	mem_cgroup_free(memcg);
5249}
5250
5251/**
5252 * mem_cgroup_css_reset - reset the states of a mem_cgroup
5253 * @css: the target css
5254 *
5255 * Reset the states of the mem_cgroup associated with @css.  This is
5256 * invoked when the userland requests disabling on the default hierarchy
5257 * but the memcg is pinned through dependency.  The memcg should stop
5258 * applying policies and should revert to the vanilla state as it may be
5259 * made visible again.
5260 *
5261 * The current implementation only resets the essential configurations.
5262 * This needs to be expanded to cover all the visible parts.
5263 */
5264static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
5265{
5266	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5267
5268	page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX);
5269	page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX);
5270	page_counter_set_max(&memcg->memsw, PAGE_COUNTER_MAX);
5271	page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX);
5272	page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX);
5273	page_counter_set_min(&memcg->memory, 0);
5274	page_counter_set_low(&memcg->memory, 0);
5275	memcg->high = PAGE_COUNTER_MAX;
5276	memcg->soft_limit = PAGE_COUNTER_MAX;
5277	memcg_wb_domain_size_changed(memcg);
5278}
5279
5280#ifdef CONFIG_MMU
5281/* Handlers for move charge at task migration. */
5282static int mem_cgroup_do_precharge(unsigned long count)
5283{
5284	int ret;
5285
5286	/* Try a single bulk charge without reclaim first, kswapd may wake */
5287	ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count);
5288	if (!ret) {
5289		mc.precharge += count;
5290		return ret;
5291	}
5292
5293	/* Try charges one by one with reclaim, but do not retry */
5294	while (count--) {
5295		ret = try_charge(mc.to, GFP_KERNEL | __GFP_NORETRY, 1);
5296		if (ret)
5297			return ret;
5298		mc.precharge++;
5299		cond_resched();
5300	}
5301	return 0;
5302}
5303
5304union mc_target {
5305	struct page	*page;
5306	swp_entry_t	ent;
5307};
5308
5309enum mc_target_type {
5310	MC_TARGET_NONE = 0,
5311	MC_TARGET_PAGE,
5312	MC_TARGET_SWAP,
5313	MC_TARGET_DEVICE,
5314};
5315
5316static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
5317						unsigned long addr, pte_t ptent)
5318{
5319	struct page *page = vm_normal_page(vma, addr, ptent);
5320
5321	if (!page || !page_mapped(page))
5322		return NULL;
5323	if (PageAnon(page)) {
5324		if (!(mc.flags & MOVE_ANON))
5325			return NULL;
5326	} else {
5327		if (!(mc.flags & MOVE_FILE))
5328			return NULL;
5329	}
5330	if (!get_page_unless_zero(page))
5331		return NULL;
5332
5333	return page;
5334}
5335
5336#if defined(CONFIG_SWAP) || defined(CONFIG_DEVICE_PRIVATE)
5337static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5338			pte_t ptent, swp_entry_t *entry)
5339{
5340	struct page *page = NULL;
5341	swp_entry_t ent = pte_to_swp_entry(ptent);
5342
5343	if (!(mc.flags & MOVE_ANON) || non_swap_entry(ent))
5344		return NULL;
5345
5346	/*
5347	 * Handle MEMORY_DEVICE_PRIVATE which are ZONE_DEVICE page belonging to
5348	 * a device and because they are not accessible by CPU they are store
5349	 * as special swap entry in the CPU page table.
5350	 */
5351	if (is_device_private_entry(ent)) {
5352		page = device_private_entry_to_page(ent);
5353		/*
5354		 * MEMORY_DEVICE_PRIVATE means ZONE_DEVICE page and which have
5355		 * a refcount of 1 when free (unlike normal page)
5356		 */
5357		if (!page_ref_add_unless(page, 1, 1))
5358			return NULL;
5359		return page;
5360	}
5361
5362	/*
5363	 * Because lookup_swap_cache() updates some statistics counter,
5364	 * we call find_get_page() with swapper_space directly.
5365	 */
5366	page = find_get_page(swap_address_space(ent), swp_offset(ent));
5367	if (do_memsw_account())
5368		entry->val = ent.val;
5369
5370	return page;
5371}
5372#else
5373static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5374			pte_t ptent, swp_entry_t *entry)
5375{
5376	return NULL;
5377}
5378#endif
5379
5380static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
5381			unsigned long addr, pte_t ptent, swp_entry_t *entry)
5382{
5383	struct page *page = NULL;
5384	struct address_space *mapping;
5385	pgoff_t pgoff;
5386
5387	if (!vma->vm_file) /* anonymous vma */
5388		return NULL;
5389	if (!(mc.flags & MOVE_FILE))
5390		return NULL;
5391
5392	mapping = vma->vm_file->f_mapping;
5393	pgoff = linear_page_index(vma, addr);
5394
5395	/* page is moved even if it's not RSS of this task(page-faulted). */
5396#ifdef CONFIG_SWAP
5397	/* shmem/tmpfs may report page out on swap: account for that too. */
5398	if (shmem_mapping(mapping)) {
5399		page = find_get_entry(mapping, pgoff);
5400		if (xa_is_value(page)) {
5401			swp_entry_t swp = radix_to_swp_entry(page);
5402			if (do_memsw_account())
5403				*entry = swp;
5404			page = find_get_page(swap_address_space(swp),
5405					     swp_offset(swp));
5406		}
5407	} else
5408		page = find_get_page(mapping, pgoff);
5409#else
5410	page = find_get_page(mapping, pgoff);
5411#endif
5412	return page;
5413}
5414
5415/**
5416 * mem_cgroup_move_account - move account of the page
5417 * @page: the page
5418 * @compound: charge the page as compound or small page
5419 * @from: mem_cgroup which the page is moved from.
5420 * @to:	mem_cgroup which the page is moved to. @from != @to.
5421 *
5422 * The caller must make sure the page is not on LRU (isolate_page() is useful.)
5423 *
5424 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
5425 * from old cgroup.
5426 */
5427static int mem_cgroup_move_account(struct page *page,
5428				   bool compound,
5429				   struct mem_cgroup *from,
5430				   struct mem_cgroup *to)
5431{
5432	struct lruvec *from_vec, *to_vec;
5433	struct pglist_data *pgdat;
5434	unsigned long flags;
5435	unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
5436	int ret;
5437	bool anon;
5438
5439	VM_BUG_ON(from == to);
5440	VM_BUG_ON_PAGE(PageLRU(page), page);
5441	VM_BUG_ON(compound && !PageTransHuge(page));
5442
5443	/*
5444	 * Prevent mem_cgroup_migrate() from looking at
5445	 * page->mem_cgroup of its source page while we change it.
5446	 */
5447	ret = -EBUSY;
5448	if (!trylock_page(page))
5449		goto out;
5450
5451	ret = -EINVAL;
5452	if (page->mem_cgroup != from)
5453		goto out_unlock;
5454
5455	anon = PageAnon(page);
5456
5457	pgdat = page_pgdat(page);
5458	from_vec = mem_cgroup_lruvec(pgdat, from);
5459	to_vec = mem_cgroup_lruvec(pgdat, to);
5460
5461	spin_lock_irqsave(&from->move_lock, flags);
5462
5463	if (!anon && page_mapped(page)) {
5464		__mod_lruvec_state(from_vec, NR_FILE_MAPPED, -nr_pages);
5465		__mod_lruvec_state(to_vec, NR_FILE_MAPPED, nr_pages);
5466	}
5467
5468	/*
5469	 * move_lock grabbed above and caller set from->moving_account, so
5470	 * mod_memcg_page_state will serialize updates to PageDirty.
5471	 * So mapping should be stable for dirty pages.
5472	 */
5473	if (!anon && PageDirty(page)) {
5474		struct address_space *mapping = page_mapping(page);
5475
5476		if (mapping_cap_account_dirty(mapping)) {
5477			__mod_lruvec_state(from_vec, NR_FILE_DIRTY, -nr_pages);
5478			__mod_lruvec_state(to_vec, NR_FILE_DIRTY, nr_pages);
5479		}
5480	}
5481
5482	if (PageWriteback(page)) {
5483		__mod_lruvec_state(from_vec, NR_WRITEBACK, -nr_pages);
5484		__mod_lruvec_state(to_vec, NR_WRITEBACK, nr_pages);
5485	}
5486
5487#ifdef CONFIG_TRANSPARENT_HUGEPAGE
5488	if (compound && !list_empty(page_deferred_list(page))) {
5489		spin_lock(&from->deferred_split_queue.split_queue_lock);
5490		list_del_init(page_deferred_list(page));
5491		from->deferred_split_queue.split_queue_len--;
5492		spin_unlock(&from->deferred_split_queue.split_queue_lock);
5493	}
5494#endif
5495	/*
5496	 * It is safe to change page->mem_cgroup here because the page
5497	 * is referenced, charged, and isolated - we can't race with
5498	 * uncharging, charging, migration, or LRU putback.
5499	 */
5500
5501	/* caller should have done css_get */
5502	page->mem_cgroup = to;
5503
5504#ifdef CONFIG_TRANSPARENT_HUGEPAGE
5505	if (compound && list_empty(page_deferred_list(page))) {
5506		spin_lock(&to->deferred_split_queue.split_queue_lock);
5507		list_add_tail(page_deferred_list(page),
5508			      &to->deferred_split_queue.split_queue);
5509		to->deferred_split_queue.split_queue_len++;
5510		spin_unlock(&to->deferred_split_queue.split_queue_lock);
5511	}
5512#endif
5513
5514	spin_unlock_irqrestore(&from->move_lock, flags);
5515
5516	ret = 0;
5517
5518	local_irq_disable();
5519	mem_cgroup_charge_statistics(to, page, compound, nr_pages);
5520	memcg_check_events(to, page);
5521	mem_cgroup_charge_statistics(from, page, compound, -nr_pages);
5522	memcg_check_events(from, page);
5523	local_irq_enable();
5524out_unlock:
5525	unlock_page(page);
5526out:
5527	return ret;
5528}
5529
5530/**
5531 * get_mctgt_type - get target type of moving charge
5532 * @vma: the vma the pte to be checked belongs
5533 * @addr: the address corresponding to the pte to be checked
5534 * @ptent: the pte to be checked
5535 * @target: the pointer the target page or swap ent will be stored(can be NULL)
5536 *
5537 * Returns
5538 *   0(MC_TARGET_NONE): if the pte is not a target for move charge.
5539 *   1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
5540 *     move charge. if @target is not NULL, the page is stored in target->page
5541 *     with extra refcnt got(Callers should handle it).
5542 *   2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
5543 *     target for charge migration. if @target is not NULL, the entry is stored
5544 *     in target->ent.
5545 *   3(MC_TARGET_DEVICE): like MC_TARGET_PAGE  but page is MEMORY_DEVICE_PRIVATE
5546 *     (so ZONE_DEVICE page and thus not on the lru).
5547 *     For now we such page is charge like a regular page would be as for all
5548 *     intent and purposes it is just special memory taking the place of a
5549 *     regular page.
5550 *
5551 *     See Documentations/vm/hmm.txt and include/linux/hmm.h
5552 *
5553 * Called with pte lock held.
5554 */
5555
5556static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
5557		unsigned long addr, pte_t ptent, union mc_target *target)
5558{
5559	struct page *page = NULL;
5560	enum mc_target_type ret = MC_TARGET_NONE;
5561	swp_entry_t ent = { .val = 0 };
5562
5563	if (pte_present(ptent))
5564		page = mc_handle_present_pte(vma, addr, ptent);
5565	else if (is_swap_pte(ptent))
5566		page = mc_handle_swap_pte(vma, ptent, &ent);
5567	else if (pte_none(ptent))
5568		page = mc_handle_file_pte(vma, addr, ptent, &ent);
5569
5570	if (!page && !ent.val)
5571		return ret;
5572	if (page) {
5573		/*
5574		 * Do only loose check w/o serialization.
5575		 * mem_cgroup_move_account() checks the page is valid or
5576		 * not under LRU exclusion.
5577		 */
5578		if (page->mem_cgroup == mc.from) {
5579			ret = MC_TARGET_PAGE;
5580			if (is_device_private_page(page))
 
5581				ret = MC_TARGET_DEVICE;
5582			if (target)
5583				target->page = page;
5584		}
5585		if (!ret || !target)
5586			put_page(page);
5587	}
5588	/*
5589	 * There is a swap entry and a page doesn't exist or isn't charged.
5590	 * But we cannot move a tail-page in a THP.
5591	 */
5592	if (ent.val && !ret && (!page || !PageTransCompound(page)) &&
5593	    mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) {
5594		ret = MC_TARGET_SWAP;
5595		if (target)
5596			target->ent = ent;
5597	}
5598	return ret;
5599}
5600
5601#ifdef CONFIG_TRANSPARENT_HUGEPAGE
5602/*
5603 * We don't consider PMD mapped swapping or file mapped pages because THP does
5604 * not support them for now.
5605 * Caller should make sure that pmd_trans_huge(pmd) is true.
5606 */
5607static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
5608		unsigned long addr, pmd_t pmd, union mc_target *target)
5609{
5610	struct page *page = NULL;
5611	enum mc_target_type ret = MC_TARGET_NONE;
5612
5613	if (unlikely(is_swap_pmd(pmd))) {
5614		VM_BUG_ON(thp_migration_supported() &&
5615				  !is_pmd_migration_entry(pmd));
5616		return ret;
5617	}
5618	page = pmd_page(pmd);
5619	VM_BUG_ON_PAGE(!page || !PageHead(page), page);
5620	if (!(mc.flags & MOVE_ANON))
5621		return ret;
5622	if (page->mem_cgroup == mc.from) {
5623		ret = MC_TARGET_PAGE;
5624		if (target) {
5625			get_page(page);
5626			target->page = page;
5627		}
5628	}
5629	return ret;
5630}
5631#else
5632static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
5633		unsigned long addr, pmd_t pmd, union mc_target *target)
5634{
5635	return MC_TARGET_NONE;
5636}
5637#endif
5638
5639static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
5640					unsigned long addr, unsigned long end,
5641					struct mm_walk *walk)
5642{
5643	struct vm_area_struct *vma = walk->vma;
5644	pte_t *pte;
5645	spinlock_t *ptl;
5646
5647	ptl = pmd_trans_huge_lock(pmd, vma);
5648	if (ptl) {
5649		/*
5650		 * Note their can not be MC_TARGET_DEVICE for now as we do not
5651		 * support transparent huge page with MEMORY_DEVICE_PRIVATE but
5652		 * this might change.
5653		 */
5654		if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
5655			mc.precharge += HPAGE_PMD_NR;
5656		spin_unlock(ptl);
5657		return 0;
5658	}
5659
5660	if (pmd_trans_unstable(pmd))
5661		return 0;
5662	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
5663	for (; addr != end; pte++, addr += PAGE_SIZE)
5664		if (get_mctgt_type(vma, addr, *pte, NULL))
5665			mc.precharge++;	/* increment precharge temporarily */
5666	pte_unmap_unlock(pte - 1, ptl);
5667	cond_resched();
5668
5669	return 0;
5670}
5671
5672static const struct mm_walk_ops precharge_walk_ops = {
5673	.pmd_entry	= mem_cgroup_count_precharge_pte_range,
5674};
5675
5676static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
5677{
5678	unsigned long precharge;
5679
 
 
 
 
5680	down_read(&mm->mmap_sem);
5681	walk_page_range(mm, 0, mm->highest_vm_end, &precharge_walk_ops, NULL);
 
5682	up_read(&mm->mmap_sem);
5683
5684	precharge = mc.precharge;
5685	mc.precharge = 0;
5686
5687	return precharge;
5688}
5689
5690static int mem_cgroup_precharge_mc(struct mm_struct *mm)
5691{
5692	unsigned long precharge = mem_cgroup_count_precharge(mm);
5693
5694	VM_BUG_ON(mc.moving_task);
5695	mc.moving_task = current;
5696	return mem_cgroup_do_precharge(precharge);
5697}
5698
5699/* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
5700static void __mem_cgroup_clear_mc(void)
5701{
5702	struct mem_cgroup *from = mc.from;
5703	struct mem_cgroup *to = mc.to;
5704
5705	/* we must uncharge all the leftover precharges from mc.to */
5706	if (mc.precharge) {
5707		cancel_charge(mc.to, mc.precharge);
5708		mc.precharge = 0;
5709	}
5710	/*
5711	 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
5712	 * we must uncharge here.
5713	 */
5714	if (mc.moved_charge) {
5715		cancel_charge(mc.from, mc.moved_charge);
5716		mc.moved_charge = 0;
5717	}
5718	/* we must fixup refcnts and charges */
5719	if (mc.moved_swap) {
5720		/* uncharge swap account from the old cgroup */
5721		if (!mem_cgroup_is_root(mc.from))
5722			page_counter_uncharge(&mc.from->memsw, mc.moved_swap);
5723
5724		mem_cgroup_id_put_many(mc.from, mc.moved_swap);
5725
5726		/*
5727		 * we charged both to->memory and to->memsw, so we
5728		 * should uncharge to->memory.
5729		 */
5730		if (!mem_cgroup_is_root(mc.to))
5731			page_counter_uncharge(&mc.to->memory, mc.moved_swap);
5732
5733		mem_cgroup_id_get_many(mc.to, mc.moved_swap);
5734		css_put_many(&mc.to->css, mc.moved_swap);
5735
5736		mc.moved_swap = 0;
5737	}
5738	memcg_oom_recover(from);
5739	memcg_oom_recover(to);
5740	wake_up_all(&mc.waitq);
5741}
5742
5743static void mem_cgroup_clear_mc(void)
5744{
5745	struct mm_struct *mm = mc.mm;
5746
5747	/*
5748	 * we must clear moving_task before waking up waiters at the end of
5749	 * task migration.
5750	 */
5751	mc.moving_task = NULL;
5752	__mem_cgroup_clear_mc();
5753	spin_lock(&mc.lock);
5754	mc.from = NULL;
5755	mc.to = NULL;
5756	mc.mm = NULL;
5757	spin_unlock(&mc.lock);
5758
5759	mmput(mm);
5760}
5761
5762static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
5763{
5764	struct cgroup_subsys_state *css;
5765	struct mem_cgroup *memcg = NULL; /* unneeded init to make gcc happy */
5766	struct mem_cgroup *from;
5767	struct task_struct *leader, *p;
5768	struct mm_struct *mm;
5769	unsigned long move_flags;
5770	int ret = 0;
5771
5772	/* charge immigration isn't supported on the default hierarchy */
5773	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
5774		return 0;
5775
5776	/*
5777	 * Multi-process migrations only happen on the default hierarchy
5778	 * where charge immigration is not used.  Perform charge
5779	 * immigration if @tset contains a leader and whine if there are
5780	 * multiple.
5781	 */
5782	p = NULL;
5783	cgroup_taskset_for_each_leader(leader, css, tset) {
5784		WARN_ON_ONCE(p);
5785		p = leader;
5786		memcg = mem_cgroup_from_css(css);
5787	}
5788	if (!p)
5789		return 0;
5790
5791	/*
5792	 * We are now commited to this value whatever it is. Changes in this
5793	 * tunable will only affect upcoming migrations, not the current one.
5794	 * So we need to save it, and keep it going.
5795	 */
5796	move_flags = READ_ONCE(memcg->move_charge_at_immigrate);
5797	if (!move_flags)
5798		return 0;
5799
5800	from = mem_cgroup_from_task(p);
5801
5802	VM_BUG_ON(from == memcg);
5803
5804	mm = get_task_mm(p);
5805	if (!mm)
5806		return 0;
5807	/* We move charges only when we move a owner of the mm */
5808	if (mm->owner == p) {
5809		VM_BUG_ON(mc.from);
5810		VM_BUG_ON(mc.to);
5811		VM_BUG_ON(mc.precharge);
5812		VM_BUG_ON(mc.moved_charge);
5813		VM_BUG_ON(mc.moved_swap);
5814
5815		spin_lock(&mc.lock);
5816		mc.mm = mm;
5817		mc.from = from;
5818		mc.to = memcg;
5819		mc.flags = move_flags;
5820		spin_unlock(&mc.lock);
5821		/* We set mc.moving_task later */
5822
5823		ret = mem_cgroup_precharge_mc(mm);
5824		if (ret)
5825			mem_cgroup_clear_mc();
5826	} else {
5827		mmput(mm);
5828	}
5829	return ret;
5830}
5831
5832static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
5833{
5834	if (mc.to)
5835		mem_cgroup_clear_mc();
5836}
5837
5838static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
5839				unsigned long addr, unsigned long end,
5840				struct mm_walk *walk)
5841{
5842	int ret = 0;
5843	struct vm_area_struct *vma = walk->vma;
5844	pte_t *pte;
5845	spinlock_t *ptl;
5846	enum mc_target_type target_type;
5847	union mc_target target;
5848	struct page *page;
5849
5850	ptl = pmd_trans_huge_lock(pmd, vma);
5851	if (ptl) {
5852		if (mc.precharge < HPAGE_PMD_NR) {
5853			spin_unlock(ptl);
5854			return 0;
5855		}
5856		target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
5857		if (target_type == MC_TARGET_PAGE) {
5858			page = target.page;
5859			if (!isolate_lru_page(page)) {
5860				if (!mem_cgroup_move_account(page, true,
5861							     mc.from, mc.to)) {
5862					mc.precharge -= HPAGE_PMD_NR;
5863					mc.moved_charge += HPAGE_PMD_NR;
5864				}
5865				putback_lru_page(page);
5866			}
5867			put_page(page);
5868		} else if (target_type == MC_TARGET_DEVICE) {
5869			page = target.page;
5870			if (!mem_cgroup_move_account(page, true,
5871						     mc.from, mc.to)) {
5872				mc.precharge -= HPAGE_PMD_NR;
5873				mc.moved_charge += HPAGE_PMD_NR;
5874			}
5875			put_page(page);
5876		}
5877		spin_unlock(ptl);
5878		return 0;
5879	}
5880
5881	if (pmd_trans_unstable(pmd))
5882		return 0;
5883retry:
5884	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
5885	for (; addr != end; addr += PAGE_SIZE) {
5886		pte_t ptent = *(pte++);
5887		bool device = false;
5888		swp_entry_t ent;
5889
5890		if (!mc.precharge)
5891			break;
5892
5893		switch (get_mctgt_type(vma, addr, ptent, &target)) {
5894		case MC_TARGET_DEVICE:
5895			device = true;
5896			/* fall through */
5897		case MC_TARGET_PAGE:
5898			page = target.page;
5899			/*
5900			 * We can have a part of the split pmd here. Moving it
5901			 * can be done but it would be too convoluted so simply
5902			 * ignore such a partial THP and keep it in original
5903			 * memcg. There should be somebody mapping the head.
5904			 */
5905			if (PageTransCompound(page))
5906				goto put;
5907			if (!device && isolate_lru_page(page))
5908				goto put;
5909			if (!mem_cgroup_move_account(page, false,
5910						mc.from, mc.to)) {
5911				mc.precharge--;
5912				/* we uncharge from mc.from later. */
5913				mc.moved_charge++;
5914			}
5915			if (!device)
5916				putback_lru_page(page);
5917put:			/* get_mctgt_type() gets the page */
5918			put_page(page);
5919			break;
5920		case MC_TARGET_SWAP:
5921			ent = target.ent;
5922			if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
5923				mc.precharge--;
5924				/* we fixup refcnts and charges later. */
5925				mc.moved_swap++;
5926			}
5927			break;
5928		default:
5929			break;
5930		}
5931	}
5932	pte_unmap_unlock(pte - 1, ptl);
5933	cond_resched();
5934
5935	if (addr != end) {
5936		/*
5937		 * We have consumed all precharges we got in can_attach().
5938		 * We try charge one by one, but don't do any additional
5939		 * charges to mc.to if we have failed in charge once in attach()
5940		 * phase.
5941		 */
5942		ret = mem_cgroup_do_precharge(1);
5943		if (!ret)
5944			goto retry;
5945	}
5946
5947	return ret;
5948}
5949
5950static const struct mm_walk_ops charge_walk_ops = {
5951	.pmd_entry	= mem_cgroup_move_charge_pte_range,
5952};
5953
5954static void mem_cgroup_move_charge(void)
5955{
 
 
 
 
 
5956	lru_add_drain_all();
5957	/*
5958	 * Signal lock_page_memcg() to take the memcg's move_lock
5959	 * while we're moving its pages to another memcg. Then wait
5960	 * for already started RCU-only updates to finish.
5961	 */
5962	atomic_inc(&mc.from->moving_account);
5963	synchronize_rcu();
5964retry:
5965	if (unlikely(!down_read_trylock(&mc.mm->mmap_sem))) {
5966		/*
5967		 * Someone who are holding the mmap_sem might be waiting in
5968		 * waitq. So we cancel all extra charges, wake up all waiters,
5969		 * and retry. Because we cancel precharges, we might not be able
5970		 * to move enough charges, but moving charge is a best-effort
5971		 * feature anyway, so it wouldn't be a big problem.
5972		 */
5973		__mem_cgroup_clear_mc();
5974		cond_resched();
5975		goto retry;
5976	}
5977	/*
5978	 * When we have consumed all precharges and failed in doing
5979	 * additional charge, the page walk just aborts.
5980	 */
5981	walk_page_range(mc.mm, 0, mc.mm->highest_vm_end, &charge_walk_ops,
5982			NULL);
5983
5984	up_read(&mc.mm->mmap_sem);
5985	atomic_dec(&mc.from->moving_account);
5986}
5987
5988static void mem_cgroup_move_task(void)
5989{
5990	if (mc.to) {
5991		mem_cgroup_move_charge();
5992		mem_cgroup_clear_mc();
5993	}
5994}
5995#else	/* !CONFIG_MMU */
5996static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
5997{
5998	return 0;
5999}
6000static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
6001{
6002}
6003static void mem_cgroup_move_task(void)
6004{
6005}
6006#endif
6007
6008/*
6009 * Cgroup retains root cgroups across [un]mount cycles making it necessary
6010 * to verify whether we're attached to the default hierarchy on each mount
6011 * attempt.
6012 */
6013static void mem_cgroup_bind(struct cgroup_subsys_state *root_css)
6014{
6015	/*
6016	 * use_hierarchy is forced on the default hierarchy.  cgroup core
6017	 * guarantees that @root doesn't have any children, so turning it
6018	 * on for the root memcg is enough.
6019	 */
6020	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
6021		root_mem_cgroup->use_hierarchy = true;
6022	else
6023		root_mem_cgroup->use_hierarchy = false;
6024}
6025
6026static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value)
6027{
6028	if (value == PAGE_COUNTER_MAX)
6029		seq_puts(m, "max\n");
6030	else
6031		seq_printf(m, "%llu\n", (u64)value * PAGE_SIZE);
6032
6033	return 0;
6034}
6035
6036static u64 memory_current_read(struct cgroup_subsys_state *css,
6037			       struct cftype *cft)
6038{
6039	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6040
6041	return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE;
6042}
6043
6044static int memory_min_show(struct seq_file *m, void *v)
6045{
6046	return seq_puts_memcg_tunable(m,
6047		READ_ONCE(mem_cgroup_from_seq(m)->memory.min));
6048}
6049
6050static ssize_t memory_min_write(struct kernfs_open_file *of,
6051				char *buf, size_t nbytes, loff_t off)
6052{
6053	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6054	unsigned long min;
6055	int err;
6056
6057	buf = strstrip(buf);
6058	err = page_counter_memparse(buf, "max", &min);
6059	if (err)
6060		return err;
6061
6062	page_counter_set_min(&memcg->memory, min);
6063
6064	return nbytes;
6065}
6066
6067static int memory_low_show(struct seq_file *m, void *v)
6068{
6069	return seq_puts_memcg_tunable(m,
6070		READ_ONCE(mem_cgroup_from_seq(m)->memory.low));
6071}
6072
6073static ssize_t memory_low_write(struct kernfs_open_file *of,
6074				char *buf, size_t nbytes, loff_t off)
6075{
6076	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6077	unsigned long low;
6078	int err;
6079
6080	buf = strstrip(buf);
6081	err = page_counter_memparse(buf, "max", &low);
6082	if (err)
6083		return err;
6084
6085	page_counter_set_low(&memcg->memory, low);
6086
6087	return nbytes;
6088}
6089
6090static int memory_high_show(struct seq_file *m, void *v)
6091{
6092	return seq_puts_memcg_tunable(m, READ_ONCE(mem_cgroup_from_seq(m)->high));
 
 
 
 
 
 
 
 
6093}
6094
6095static ssize_t memory_high_write(struct kernfs_open_file *of,
6096				 char *buf, size_t nbytes, loff_t off)
6097{
6098	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6099	unsigned long nr_pages;
6100	unsigned long high;
6101	int err;
6102
6103	buf = strstrip(buf);
6104	err = page_counter_memparse(buf, "max", &high);
6105	if (err)
6106		return err;
6107
6108	memcg->high = high;
6109
6110	nr_pages = page_counter_read(&memcg->memory);
6111	if (nr_pages > high)
6112		try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
6113					     GFP_KERNEL, true);
6114
6115	memcg_wb_domain_size_changed(memcg);
6116	return nbytes;
6117}
6118
6119static int memory_max_show(struct seq_file *m, void *v)
6120{
6121	return seq_puts_memcg_tunable(m,
6122		READ_ONCE(mem_cgroup_from_seq(m)->memory.max));
 
 
 
 
 
 
 
6123}
6124
6125static ssize_t memory_max_write(struct kernfs_open_file *of,
6126				char *buf, size_t nbytes, loff_t off)
6127{
6128	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6129	unsigned int nr_reclaims = MEM_CGROUP_RECLAIM_RETRIES;
6130	bool drained = false;
6131	unsigned long max;
6132	int err;
6133
6134	buf = strstrip(buf);
6135	err = page_counter_memparse(buf, "max", &max);
6136	if (err)
6137		return err;
6138
6139	xchg(&memcg->memory.max, max);
6140
6141	for (;;) {
6142		unsigned long nr_pages = page_counter_read(&memcg->memory);
6143
6144		if (nr_pages <= max)
6145			break;
6146
6147		if (signal_pending(current)) {
6148			err = -EINTR;
6149			break;
6150		}
6151
6152		if (!drained) {
6153			drain_all_stock(memcg);
6154			drained = true;
6155			continue;
6156		}
6157
6158		if (nr_reclaims) {
6159			if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max,
6160							  GFP_KERNEL, true))
6161				nr_reclaims--;
6162			continue;
6163		}
6164
6165		memcg_memory_event(memcg, MEMCG_OOM);
6166		if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
6167			break;
6168	}
6169
6170	memcg_wb_domain_size_changed(memcg);
6171	return nbytes;
6172}
6173
6174static void __memory_events_show(struct seq_file *m, atomic_long_t *events)
6175{
6176	seq_printf(m, "low %lu\n", atomic_long_read(&events[MEMCG_LOW]));
6177	seq_printf(m, "high %lu\n", atomic_long_read(&events[MEMCG_HIGH]));
6178	seq_printf(m, "max %lu\n", atomic_long_read(&events[MEMCG_MAX]));
6179	seq_printf(m, "oom %lu\n", atomic_long_read(&events[MEMCG_OOM]));
6180	seq_printf(m, "oom_kill %lu\n",
6181		   atomic_long_read(&events[MEMCG_OOM_KILL]));
6182}
6183
6184static int memory_events_show(struct seq_file *m, void *v)
6185{
6186	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6187
6188	__memory_events_show(m, memcg->memory_events);
6189	return 0;
6190}
6191
6192static int memory_events_local_show(struct seq_file *m, void *v)
6193{
6194	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
 
 
6195
6196	__memory_events_show(m, memcg->memory_events_local);
6197	return 0;
6198}
6199
6200static int memory_stat_show(struct seq_file *m, void *v)
6201{
6202	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6203	char *buf;
 
 
6204
6205	buf = memory_stat_format(memcg);
6206	if (!buf)
6207		return -ENOMEM;
6208	seq_puts(m, buf);
6209	kfree(buf);
6210	return 0;
6211}
 
 
 
6212
6213static int memory_oom_group_show(struct seq_file *m, void *v)
6214{
6215	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6216
6217	seq_printf(m, "%d\n", memcg->oom_group);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6218
6219	return 0;
6220}
 
6221
6222static ssize_t memory_oom_group_write(struct kernfs_open_file *of,
6223				      char *buf, size_t nbytes, loff_t off)
6224{
6225	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6226	int ret, oom_group;
6227
6228	buf = strstrip(buf);
6229	if (!buf)
6230		return -EINVAL;
 
6231
6232	ret = kstrtoint(buf, 0, &oom_group);
6233	if (ret)
6234		return ret;
6235
6236	if (oom_group != 0 && oom_group != 1)
6237		return -EINVAL;
6238
6239	memcg->oom_group = oom_group;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6240
6241	return nbytes;
6242}
6243
6244static struct cftype memory_files[] = {
6245	{
6246		.name = "current",
6247		.flags = CFTYPE_NOT_ON_ROOT,
6248		.read_u64 = memory_current_read,
6249	},
6250	{
6251		.name = "min",
6252		.flags = CFTYPE_NOT_ON_ROOT,
6253		.seq_show = memory_min_show,
6254		.write = memory_min_write,
6255	},
6256	{
6257		.name = "low",
6258		.flags = CFTYPE_NOT_ON_ROOT,
6259		.seq_show = memory_low_show,
6260		.write = memory_low_write,
6261	},
6262	{
6263		.name = "high",
6264		.flags = CFTYPE_NOT_ON_ROOT,
6265		.seq_show = memory_high_show,
6266		.write = memory_high_write,
6267	},
6268	{
6269		.name = "max",
6270		.flags = CFTYPE_NOT_ON_ROOT,
6271		.seq_show = memory_max_show,
6272		.write = memory_max_write,
6273	},
6274	{
6275		.name = "events",
6276		.flags = CFTYPE_NOT_ON_ROOT,
6277		.file_offset = offsetof(struct mem_cgroup, events_file),
6278		.seq_show = memory_events_show,
6279	},
6280	{
6281		.name = "events.local",
6282		.flags = CFTYPE_NOT_ON_ROOT,
6283		.file_offset = offsetof(struct mem_cgroup, events_local_file),
6284		.seq_show = memory_events_local_show,
6285	},
6286	{
6287		.name = "stat",
6288		.flags = CFTYPE_NOT_ON_ROOT,
6289		.seq_show = memory_stat_show,
6290	},
6291	{
6292		.name = "oom.group",
6293		.flags = CFTYPE_NOT_ON_ROOT | CFTYPE_NS_DELEGATABLE,
6294		.seq_show = memory_oom_group_show,
6295		.write = memory_oom_group_write,
6296	},
6297	{ }	/* terminate */
6298};
6299
6300struct cgroup_subsys memory_cgrp_subsys = {
6301	.css_alloc = mem_cgroup_css_alloc,
6302	.css_online = mem_cgroup_css_online,
6303	.css_offline = mem_cgroup_css_offline,
6304	.css_released = mem_cgroup_css_released,
6305	.css_free = mem_cgroup_css_free,
6306	.css_reset = mem_cgroup_css_reset,
6307	.can_attach = mem_cgroup_can_attach,
6308	.cancel_attach = mem_cgroup_cancel_attach,
6309	.post_attach = mem_cgroup_move_task,
6310	.bind = mem_cgroup_bind,
6311	.dfl_cftypes = memory_files,
6312	.legacy_cftypes = mem_cgroup_legacy_files,
6313	.early_init = 0,
6314};
6315
6316/**
6317 * mem_cgroup_protected - check if memory consumption is in the normal range
6318 * @root: the top ancestor of the sub-tree being checked
6319 * @memcg: the memory cgroup to check
6320 *
6321 * WARNING: This function is not stateless! It can only be used as part
6322 *          of a top-down tree iteration, not for isolated queries.
 
 
 
6323 *
6324 * Returns one of the following:
6325 *   MEMCG_PROT_NONE: cgroup memory is not protected
6326 *   MEMCG_PROT_LOW: cgroup memory is protected as long there is
6327 *     an unprotected supply of reclaimable memory from other cgroups.
6328 *   MEMCG_PROT_MIN: cgroup memory is protected
6329 *
6330 * @root is exclusive; it is never protected when looked at directly
6331 *
6332 * To provide a proper hierarchical behavior, effective memory.min/low values
6333 * are used. Below is the description of how effective memory.low is calculated.
6334 * Effective memory.min values is calculated in the same way.
6335 *
6336 * Effective memory.low is always equal or less than the original memory.low.
6337 * If there is no memory.low overcommittment (which is always true for
6338 * top-level memory cgroups), these two values are equal.
6339 * Otherwise, it's a part of parent's effective memory.low,
6340 * calculated as a cgroup's memory.low usage divided by sum of sibling's
6341 * memory.low usages, where memory.low usage is the size of actually
6342 * protected memory.
6343 *
6344 *                                             low_usage
6345 * elow = min( memory.low, parent->elow * ------------------ ),
6346 *                                        siblings_low_usage
6347 *
6348 *             | memory.current, if memory.current < memory.low
6349 * low_usage = |
6350 *	       | 0, otherwise.
6351 *
6352 *
6353 * Such definition of the effective memory.low provides the expected
6354 * hierarchical behavior: parent's memory.low value is limiting
6355 * children, unprotected memory is reclaimed first and cgroups,
6356 * which are not using their guarantee do not affect actual memory
6357 * distribution.
6358 *
6359 * For example, if there are memcgs A, A/B, A/C, A/D and A/E:
6360 *
6361 *     A      A/memory.low = 2G, A/memory.current = 6G
6362 *    //\\
6363 *   BC  DE   B/memory.low = 3G  B/memory.current = 2G
6364 *            C/memory.low = 1G  C/memory.current = 2G
6365 *            D/memory.low = 0   D/memory.current = 2G
6366 *            E/memory.low = 10G E/memory.current = 0
6367 *
6368 * and the memory pressure is applied, the following memory distribution
6369 * is expected (approximately):
6370 *
6371 *     A/memory.current = 2G
6372 *
6373 *     B/memory.current = 1.3G
6374 *     C/memory.current = 0.6G
6375 *     D/memory.current = 0
6376 *     E/memory.current = 0
6377 *
6378 * These calculations require constant tracking of the actual low usages
6379 * (see propagate_protected_usage()), as well as recursive calculation of
6380 * effective memory.low values. But as we do call mem_cgroup_protected()
6381 * path for each memory cgroup top-down from the reclaim,
6382 * it's possible to optimize this part, and save calculated elow
6383 * for next usage. This part is intentionally racy, but it's ok,
6384 * as memory.low is a best-effort mechanism.
6385 */
6386enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
6387						struct mem_cgroup *memcg)
6388{
6389	struct mem_cgroup *parent;
6390	unsigned long emin, parent_emin;
6391	unsigned long elow, parent_elow;
6392	unsigned long usage;
6393
6394	if (mem_cgroup_disabled())
6395		return MEMCG_PROT_NONE;
6396
6397	if (!root)
6398		root = root_mem_cgroup;
6399	if (memcg == root)
6400		return MEMCG_PROT_NONE;
6401
6402	usage = page_counter_read(&memcg->memory);
6403	if (!usage)
6404		return MEMCG_PROT_NONE;
6405
6406	emin = memcg->memory.min;
6407	elow = memcg->memory.low;
6408
6409	parent = parent_mem_cgroup(memcg);
6410	/* No parent means a non-hierarchical mode on v1 memcg */
6411	if (!parent)
6412		return MEMCG_PROT_NONE;
6413
6414	if (parent == root)
6415		goto exit;
6416
6417	parent_emin = READ_ONCE(parent->memory.emin);
6418	emin = min(emin, parent_emin);
6419	if (emin && parent_emin) {
6420		unsigned long min_usage, siblings_min_usage;
6421
6422		min_usage = min(usage, memcg->memory.min);
6423		siblings_min_usage = atomic_long_read(
6424			&parent->memory.children_min_usage);
6425
6426		if (min_usage && siblings_min_usage)
6427			emin = min(emin, parent_emin * min_usage /
6428				   siblings_min_usage);
6429	}
6430
6431	parent_elow = READ_ONCE(parent->memory.elow);
6432	elow = min(elow, parent_elow);
6433	if (elow && parent_elow) {
6434		unsigned long low_usage, siblings_low_usage;
6435
6436		low_usage = min(usage, memcg->memory.low);
6437		siblings_low_usage = atomic_long_read(
6438			&parent->memory.children_low_usage);
6439
6440		if (low_usage && siblings_low_usage)
6441			elow = min(elow, parent_elow * low_usage /
6442				   siblings_low_usage);
6443	}
6444
6445exit:
6446	memcg->memory.emin = emin;
6447	memcg->memory.elow = elow;
6448
6449	if (usage <= emin)
6450		return MEMCG_PROT_MIN;
6451	else if (usage <= elow)
6452		return MEMCG_PROT_LOW;
6453	else
6454		return MEMCG_PROT_NONE;
6455}
6456
6457/**
6458 * mem_cgroup_try_charge - try charging a page
6459 * @page: page to charge
6460 * @mm: mm context of the victim
6461 * @gfp_mask: reclaim mode
6462 * @memcgp: charged memcg return
6463 * @compound: charge the page as compound or small page
6464 *
6465 * Try to charge @page to the memcg that @mm belongs to, reclaiming
6466 * pages according to @gfp_mask if necessary.
6467 *
6468 * Returns 0 on success, with *@memcgp pointing to the charged memcg.
6469 * Otherwise, an error code is returned.
6470 *
6471 * After page->mapping has been set up, the caller must finalize the
6472 * charge with mem_cgroup_commit_charge().  Or abort the transaction
6473 * with mem_cgroup_cancel_charge() in case page instantiation fails.
6474 */
6475int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
6476			  gfp_t gfp_mask, struct mem_cgroup **memcgp,
6477			  bool compound)
6478{
6479	struct mem_cgroup *memcg = NULL;
6480	unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
6481	int ret = 0;
6482
6483	if (mem_cgroup_disabled())
6484		goto out;
6485
6486	if (PageSwapCache(page)) {
6487		/*
6488		 * Every swap fault against a single page tries to charge the
6489		 * page, bail as early as possible.  shmem_unuse() encounters
6490		 * already charged pages, too.  The USED bit is protected by
6491		 * the page lock, which serializes swap cache removal, which
6492		 * in turn serializes uncharging.
6493		 */
6494		VM_BUG_ON_PAGE(!PageLocked(page), page);
6495		if (compound_head(page)->mem_cgroup)
6496			goto out;
6497
6498		if (do_swap_account) {
6499			swp_entry_t ent = { .val = page_private(page), };
6500			unsigned short id = lookup_swap_cgroup_id(ent);
6501
6502			rcu_read_lock();
6503			memcg = mem_cgroup_from_id(id);
6504			if (memcg && !css_tryget_online(&memcg->css))
6505				memcg = NULL;
6506			rcu_read_unlock();
6507		}
6508	}
6509
6510	if (!memcg)
6511		memcg = get_mem_cgroup_from_mm(mm);
6512
6513	ret = try_charge(memcg, gfp_mask, nr_pages);
6514
6515	css_put(&memcg->css);
6516out:
6517	*memcgp = memcg;
6518	return ret;
6519}
6520
6521int mem_cgroup_try_charge_delay(struct page *page, struct mm_struct *mm,
6522			  gfp_t gfp_mask, struct mem_cgroup **memcgp,
6523			  bool compound)
6524{
6525	struct mem_cgroup *memcg;
6526	int ret;
6527
6528	ret = mem_cgroup_try_charge(page, mm, gfp_mask, memcgp, compound);
6529	memcg = *memcgp;
6530	mem_cgroup_throttle_swaprate(memcg, page_to_nid(page), gfp_mask);
6531	return ret;
6532}
6533
6534/**
6535 * mem_cgroup_commit_charge - commit a page charge
6536 * @page: page to charge
6537 * @memcg: memcg to charge the page to
6538 * @lrucare: page might be on LRU already
6539 * @compound: charge the page as compound or small page
6540 *
6541 * Finalize a charge transaction started by mem_cgroup_try_charge(),
6542 * after page->mapping has been set up.  This must happen atomically
6543 * as part of the page instantiation, i.e. under the page table lock
6544 * for anonymous pages, under the page lock for page and swap cache.
6545 *
6546 * In addition, the page must not be on the LRU during the commit, to
6547 * prevent racing with task migration.  If it might be, use @lrucare.
6548 *
6549 * Use mem_cgroup_cancel_charge() to cancel the transaction instead.
6550 */
6551void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
6552			      bool lrucare, bool compound)
6553{
6554	unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
6555
6556	VM_BUG_ON_PAGE(!page->mapping, page);
6557	VM_BUG_ON_PAGE(PageLRU(page) && !lrucare, page);
6558
6559	if (mem_cgroup_disabled())
6560		return;
6561	/*
6562	 * Swap faults will attempt to charge the same page multiple
6563	 * times.  But reuse_swap_page() might have removed the page
6564	 * from swapcache already, so we can't check PageSwapCache().
6565	 */
6566	if (!memcg)
6567		return;
6568
6569	commit_charge(page, memcg, lrucare);
6570
6571	local_irq_disable();
6572	mem_cgroup_charge_statistics(memcg, page, compound, nr_pages);
6573	memcg_check_events(memcg, page);
6574	local_irq_enable();
6575
6576	if (do_memsw_account() && PageSwapCache(page)) {
6577		swp_entry_t entry = { .val = page_private(page) };
6578		/*
6579		 * The swap entry might not get freed for a long time,
6580		 * let's not wait for it.  The page already received a
6581		 * memory+swap charge, drop the swap entry duplicate.
6582		 */
6583		mem_cgroup_uncharge_swap(entry, nr_pages);
6584	}
6585}
6586
6587/**
6588 * mem_cgroup_cancel_charge - cancel a page charge
6589 * @page: page to charge
6590 * @memcg: memcg to charge the page to
6591 * @compound: charge the page as compound or small page
6592 *
6593 * Cancel a charge transaction started by mem_cgroup_try_charge().
6594 */
6595void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg,
6596		bool compound)
6597{
6598	unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
6599
6600	if (mem_cgroup_disabled())
6601		return;
6602	/*
6603	 * Swap faults will attempt to charge the same page multiple
6604	 * times.  But reuse_swap_page() might have removed the page
6605	 * from swapcache already, so we can't check PageSwapCache().
6606	 */
6607	if (!memcg)
6608		return;
6609
6610	cancel_charge(memcg, nr_pages);
6611}
6612
6613struct uncharge_gather {
6614	struct mem_cgroup *memcg;
6615	unsigned long pgpgout;
6616	unsigned long nr_anon;
6617	unsigned long nr_file;
6618	unsigned long nr_kmem;
6619	unsigned long nr_huge;
6620	unsigned long nr_shmem;
6621	struct page *dummy_page;
6622};
6623
6624static inline void uncharge_gather_clear(struct uncharge_gather *ug)
6625{
6626	memset(ug, 0, sizeof(*ug));
6627}
6628
6629static void uncharge_batch(const struct uncharge_gather *ug)
6630{
6631	unsigned long nr_pages = ug->nr_anon + ug->nr_file + ug->nr_kmem;
6632	unsigned long flags;
6633
6634	if (!mem_cgroup_is_root(ug->memcg)) {
6635		page_counter_uncharge(&ug->memcg->memory, nr_pages);
6636		if (do_memsw_account())
6637			page_counter_uncharge(&ug->memcg->memsw, nr_pages);
6638		if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && ug->nr_kmem)
6639			page_counter_uncharge(&ug->memcg->kmem, ug->nr_kmem);
6640		memcg_oom_recover(ug->memcg);
6641	}
6642
6643	local_irq_save(flags);
6644	__mod_memcg_state(ug->memcg, MEMCG_RSS, -ug->nr_anon);
6645	__mod_memcg_state(ug->memcg, MEMCG_CACHE, -ug->nr_file);
6646	__mod_memcg_state(ug->memcg, MEMCG_RSS_HUGE, -ug->nr_huge);
6647	__mod_memcg_state(ug->memcg, NR_SHMEM, -ug->nr_shmem);
6648	__count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout);
6649	__this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, nr_pages);
6650	memcg_check_events(ug->memcg, ug->dummy_page);
6651	local_irq_restore(flags);
6652
6653	if (!mem_cgroup_is_root(ug->memcg))
6654		css_put_many(&ug->memcg->css, nr_pages);
6655}
6656
6657static void uncharge_page(struct page *page, struct uncharge_gather *ug)
6658{
6659	VM_BUG_ON_PAGE(PageLRU(page), page);
6660	VM_BUG_ON_PAGE(page_count(page) && !is_zone_device_page(page) &&
6661			!PageHWPoison(page) , page);
6662
6663	if (!page->mem_cgroup)
6664		return;
6665
6666	/*
6667	 * Nobody should be changing or seriously looking at
6668	 * page->mem_cgroup at this point, we have fully
6669	 * exclusive access to the page.
6670	 */
6671
6672	if (ug->memcg != page->mem_cgroup) {
6673		if (ug->memcg) {
6674			uncharge_batch(ug);
6675			uncharge_gather_clear(ug);
6676		}
6677		ug->memcg = page->mem_cgroup;
6678	}
6679
6680	if (!PageKmemcg(page)) {
6681		unsigned int nr_pages = 1;
6682
6683		if (PageTransHuge(page)) {
6684			nr_pages = compound_nr(page);
6685			ug->nr_huge += nr_pages;
6686		}
6687		if (PageAnon(page))
6688			ug->nr_anon += nr_pages;
6689		else {
6690			ug->nr_file += nr_pages;
6691			if (PageSwapBacked(page))
6692				ug->nr_shmem += nr_pages;
6693		}
6694		ug->pgpgout++;
6695	} else {
6696		ug->nr_kmem += compound_nr(page);
6697		__ClearPageKmemcg(page);
6698	}
6699
6700	ug->dummy_page = page;
6701	page->mem_cgroup = NULL;
6702}
6703
6704static void uncharge_list(struct list_head *page_list)
6705{
6706	struct uncharge_gather ug;
6707	struct list_head *next;
6708
6709	uncharge_gather_clear(&ug);
6710
6711	/*
6712	 * Note that the list can be a single page->lru; hence the
6713	 * do-while loop instead of a simple list_for_each_entry().
6714	 */
6715	next = page_list->next;
6716	do {
6717		struct page *page;
6718
6719		page = list_entry(next, struct page, lru);
6720		next = page->lru.next;
6721
6722		uncharge_page(page, &ug);
6723	} while (next != page_list);
6724
6725	if (ug.memcg)
6726		uncharge_batch(&ug);
6727}
6728
6729/**
6730 * mem_cgroup_uncharge - uncharge a page
6731 * @page: page to uncharge
6732 *
6733 * Uncharge a page previously charged with mem_cgroup_try_charge() and
6734 * mem_cgroup_commit_charge().
6735 */
6736void mem_cgroup_uncharge(struct page *page)
6737{
6738	struct uncharge_gather ug;
6739
6740	if (mem_cgroup_disabled())
6741		return;
6742
6743	/* Don't touch page->lru of any random page, pre-check: */
6744	if (!page->mem_cgroup)
6745		return;
6746
6747	uncharge_gather_clear(&ug);
6748	uncharge_page(page, &ug);
6749	uncharge_batch(&ug);
6750}
6751
6752/**
6753 * mem_cgroup_uncharge_list - uncharge a list of page
6754 * @page_list: list of pages to uncharge
6755 *
6756 * Uncharge a list of pages previously charged with
6757 * mem_cgroup_try_charge() and mem_cgroup_commit_charge().
6758 */
6759void mem_cgroup_uncharge_list(struct list_head *page_list)
6760{
6761	if (mem_cgroup_disabled())
6762		return;
6763
6764	if (!list_empty(page_list))
6765		uncharge_list(page_list);
6766}
6767
6768/**
6769 * mem_cgroup_migrate - charge a page's replacement
6770 * @oldpage: currently circulating page
6771 * @newpage: replacement page
6772 *
6773 * Charge @newpage as a replacement page for @oldpage. @oldpage will
6774 * be uncharged upon free.
6775 *
6776 * Both pages must be locked, @newpage->mapping must be set up.
6777 */
6778void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
6779{
6780	struct mem_cgroup *memcg;
6781	unsigned int nr_pages;
6782	bool compound;
6783	unsigned long flags;
6784
6785	VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
6786	VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
6787	VM_BUG_ON_PAGE(PageAnon(oldpage) != PageAnon(newpage), newpage);
6788	VM_BUG_ON_PAGE(PageTransHuge(oldpage) != PageTransHuge(newpage),
6789		       newpage);
6790
6791	if (mem_cgroup_disabled())
6792		return;
6793
6794	/* Page cache replacement: new page already charged? */
6795	if (newpage->mem_cgroup)
6796		return;
6797
6798	/* Swapcache readahead pages can get replaced before being charged */
6799	memcg = oldpage->mem_cgroup;
6800	if (!memcg)
6801		return;
6802
6803	/* Force-charge the new page. The old one will be freed soon */
6804	compound = PageTransHuge(newpage);
6805	nr_pages = compound ? hpage_nr_pages(newpage) : 1;
6806
6807	page_counter_charge(&memcg->memory, nr_pages);
6808	if (do_memsw_account())
6809		page_counter_charge(&memcg->memsw, nr_pages);
6810	css_get_many(&memcg->css, nr_pages);
6811
6812	commit_charge(newpage, memcg, false);
6813
6814	local_irq_save(flags);
6815	mem_cgroup_charge_statistics(memcg, newpage, compound, nr_pages);
6816	memcg_check_events(memcg, newpage);
6817	local_irq_restore(flags);
6818}
6819
6820DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
6821EXPORT_SYMBOL(memcg_sockets_enabled_key);
6822
6823void mem_cgroup_sk_alloc(struct sock *sk)
6824{
6825	struct mem_cgroup *memcg;
6826
6827	if (!mem_cgroup_sockets_enabled)
6828		return;
6829
6830	/*
6831	 * Socket cloning can throw us here with sk_memcg already
6832	 * filled. It won't however, necessarily happen from
6833	 * process context. So the test for root memcg given
6834	 * the current task's memcg won't help us in this case.
6835	 *
6836	 * Respecting the original socket's memcg is a better
6837	 * decision in this case.
6838	 */
6839	if (sk->sk_memcg) {
6840		css_get(&sk->sk_memcg->css);
6841		return;
6842	}
6843
6844	rcu_read_lock();
6845	memcg = mem_cgroup_from_task(current);
6846	if (memcg == root_mem_cgroup)
6847		goto out;
6848	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active)
6849		goto out;
6850	if (css_tryget_online(&memcg->css))
6851		sk->sk_memcg = memcg;
6852out:
6853	rcu_read_unlock();
6854}
6855
6856void mem_cgroup_sk_free(struct sock *sk)
6857{
6858	if (sk->sk_memcg)
6859		css_put(&sk->sk_memcg->css);
6860}
6861
6862/**
6863 * mem_cgroup_charge_skmem - charge socket memory
6864 * @memcg: memcg to charge
6865 * @nr_pages: number of pages to charge
6866 *
6867 * Charges @nr_pages to @memcg. Returns %true if the charge fit within
6868 * @memcg's configured limit, %false if the charge had to be forced.
6869 */
6870bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
6871{
6872	gfp_t gfp_mask = GFP_KERNEL;
6873
6874	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
6875		struct page_counter *fail;
6876
6877		if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) {
6878			memcg->tcpmem_pressure = 0;
6879			return true;
6880		}
6881		page_counter_charge(&memcg->tcpmem, nr_pages);
6882		memcg->tcpmem_pressure = 1;
6883		return false;
6884	}
6885
6886	/* Don't block in the packet receive path */
6887	if (in_softirq())
6888		gfp_mask = GFP_NOWAIT;
6889
6890	mod_memcg_state(memcg, MEMCG_SOCK, nr_pages);
6891
6892	if (try_charge(memcg, gfp_mask, nr_pages) == 0)
6893		return true;
6894
6895	try_charge(memcg, gfp_mask|__GFP_NOFAIL, nr_pages);
6896	return false;
6897}
6898
6899/**
6900 * mem_cgroup_uncharge_skmem - uncharge socket memory
6901 * @memcg: memcg to uncharge
6902 * @nr_pages: number of pages to uncharge
6903 */
6904void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
6905{
6906	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
6907		page_counter_uncharge(&memcg->tcpmem, nr_pages);
6908		return;
6909	}
6910
6911	mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages);
6912
6913	refill_stock(memcg, nr_pages);
6914}
6915
6916static int __init cgroup_memory(char *s)
6917{
6918	char *token;
6919
6920	while ((token = strsep(&s, ",")) != NULL) {
6921		if (!*token)
6922			continue;
6923		if (!strcmp(token, "nosocket"))
6924			cgroup_memory_nosocket = true;
6925		if (!strcmp(token, "nokmem"))
6926			cgroup_memory_nokmem = true;
6927	}
6928	return 0;
6929}
6930__setup("cgroup.memory=", cgroup_memory);
6931
6932/*
6933 * subsys_initcall() for memory controller.
6934 *
6935 * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this
6936 * context because of lock dependencies (cgroup_lock -> cpu hotplug) but
6937 * basically everything that doesn't depend on a specific mem_cgroup structure
6938 * should be initialized from here.
6939 */
6940static int __init mem_cgroup_init(void)
6941{
6942	int cpu, node;
6943
6944#ifdef CONFIG_MEMCG_KMEM
6945	/*
6946	 * Kmem cache creation is mostly done with the slab_mutex held,
6947	 * so use a workqueue with limited concurrency to avoid stalling
6948	 * all worker threads in case lots of cgroups are created and
6949	 * destroyed simultaneously.
6950	 */
6951	memcg_kmem_cache_wq = alloc_workqueue("memcg_kmem_cache", 0, 1);
6952	BUG_ON(!memcg_kmem_cache_wq);
6953#endif
6954
6955	cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
6956				  memcg_hotplug_cpu_dead);
6957
6958	for_each_possible_cpu(cpu)
6959		INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
6960			  drain_local_stock);
6961
6962	for_each_node(node) {
6963		struct mem_cgroup_tree_per_node *rtpn;
6964
6965		rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL,
6966				    node_online(node) ? node : NUMA_NO_NODE);
6967
6968		rtpn->rb_root = RB_ROOT;
6969		rtpn->rb_rightmost = NULL;
6970		spin_lock_init(&rtpn->lock);
6971		soft_limit_tree.rb_tree_per_node[node] = rtpn;
6972	}
6973
6974	return 0;
6975}
6976subsys_initcall(mem_cgroup_init);
6977
6978#ifdef CONFIG_MEMCG_SWAP
6979static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg)
6980{
6981	while (!refcount_inc_not_zero(&memcg->id.ref)) {
6982		/*
6983		 * The root cgroup cannot be destroyed, so it's refcount must
6984		 * always be >= 1.
6985		 */
6986		if (WARN_ON_ONCE(memcg == root_mem_cgroup)) {
6987			VM_BUG_ON(1);
6988			break;
6989		}
6990		memcg = parent_mem_cgroup(memcg);
6991		if (!memcg)
6992			memcg = root_mem_cgroup;
6993	}
6994	return memcg;
6995}
6996
6997/**
6998 * mem_cgroup_swapout - transfer a memsw charge to swap
6999 * @page: page whose memsw charge to transfer
7000 * @entry: swap entry to move the charge to
7001 *
7002 * Transfer the memsw charge of @page to @entry.
7003 */
7004void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
7005{
7006	struct mem_cgroup *memcg, *swap_memcg;
7007	unsigned int nr_entries;
7008	unsigned short oldid;
7009
7010	VM_BUG_ON_PAGE(PageLRU(page), page);
7011	VM_BUG_ON_PAGE(page_count(page), page);
7012
7013	if (!do_memsw_account())
7014		return;
7015
7016	memcg = page->mem_cgroup;
7017
7018	/* Readahead page, never charged */
7019	if (!memcg)
7020		return;
7021
7022	/*
7023	 * In case the memcg owning these pages has been offlined and doesn't
7024	 * have an ID allocated to it anymore, charge the closest online
7025	 * ancestor for the swap instead and transfer the memory+swap charge.
7026	 */
7027	swap_memcg = mem_cgroup_id_get_online(memcg);
7028	nr_entries = hpage_nr_pages(page);
7029	/* Get references for the tail pages, too */
7030	if (nr_entries > 1)
7031		mem_cgroup_id_get_many(swap_memcg, nr_entries - 1);
7032	oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg),
7033				   nr_entries);
7034	VM_BUG_ON_PAGE(oldid, page);
7035	mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries);
7036
7037	page->mem_cgroup = NULL;
7038
7039	if (!mem_cgroup_is_root(memcg))
7040		page_counter_uncharge(&memcg->memory, nr_entries);
7041
7042	if (memcg != swap_memcg) {
7043		if (!mem_cgroup_is_root(swap_memcg))
7044			page_counter_charge(&swap_memcg->memsw, nr_entries);
7045		page_counter_uncharge(&memcg->memsw, nr_entries);
7046	}
7047
7048	/*
7049	 * Interrupts should be disabled here because the caller holds the
7050	 * i_pages lock which is taken with interrupts-off. It is
7051	 * important here to have the interrupts disabled because it is the
7052	 * only synchronisation we have for updating the per-CPU variables.
7053	 */
7054	VM_BUG_ON(!irqs_disabled());
7055	mem_cgroup_charge_statistics(memcg, page, PageTransHuge(page),
7056				     -nr_entries);
7057	memcg_check_events(memcg, page);
7058
7059	if (!mem_cgroup_is_root(memcg))
7060		css_put_many(&memcg->css, nr_entries);
7061}
7062
7063/**
7064 * mem_cgroup_try_charge_swap - try charging swap space for a page
7065 * @page: page being added to swap
7066 * @entry: swap entry to charge
7067 *
7068 * Try to charge @page's memcg for the swap space at @entry.
7069 *
7070 * Returns 0 on success, -ENOMEM on failure.
7071 */
7072int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry)
7073{
7074	unsigned int nr_pages = hpage_nr_pages(page);
7075	struct page_counter *counter;
7076	struct mem_cgroup *memcg;
7077	unsigned short oldid;
7078
7079	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) || !do_swap_account)
7080		return 0;
7081
7082	memcg = page->mem_cgroup;
7083
7084	/* Readahead page, never charged */
7085	if (!memcg)
7086		return 0;
7087
7088	if (!entry.val) {
7089		memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
7090		return 0;
7091	}
7092
7093	memcg = mem_cgroup_id_get_online(memcg);
7094
7095	if (!mem_cgroup_is_root(memcg) &&
7096	    !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) {
7097		memcg_memory_event(memcg, MEMCG_SWAP_MAX);
7098		memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
7099		mem_cgroup_id_put(memcg);
7100		return -ENOMEM;
7101	}
7102
7103	/* Get references for the tail pages, too */
7104	if (nr_pages > 1)
7105		mem_cgroup_id_get_many(memcg, nr_pages - 1);
7106	oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg), nr_pages);
7107	VM_BUG_ON_PAGE(oldid, page);
7108	mod_memcg_state(memcg, MEMCG_SWAP, nr_pages);
7109
7110	return 0;
7111}
7112
7113/**
7114 * mem_cgroup_uncharge_swap - uncharge swap space
7115 * @entry: swap entry to uncharge
7116 * @nr_pages: the amount of swap space to uncharge
7117 */
7118void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
7119{
7120	struct mem_cgroup *memcg;
7121	unsigned short id;
7122
7123	if (!do_swap_account)
7124		return;
7125
7126	id = swap_cgroup_record(entry, 0, nr_pages);
7127	rcu_read_lock();
7128	memcg = mem_cgroup_from_id(id);
7129	if (memcg) {
7130		if (!mem_cgroup_is_root(memcg)) {
7131			if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
7132				page_counter_uncharge(&memcg->swap, nr_pages);
7133			else
7134				page_counter_uncharge(&memcg->memsw, nr_pages);
7135		}
7136		mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages);
7137		mem_cgroup_id_put_many(memcg, nr_pages);
7138	}
7139	rcu_read_unlock();
7140}
7141
7142long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
7143{
7144	long nr_swap_pages = get_nr_swap_pages();
7145
7146	if (!do_swap_account || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
7147		return nr_swap_pages;
7148	for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg))
7149		nr_swap_pages = min_t(long, nr_swap_pages,
7150				      READ_ONCE(memcg->swap.max) -
7151				      page_counter_read(&memcg->swap));
7152	return nr_swap_pages;
7153}
7154
7155bool mem_cgroup_swap_full(struct page *page)
7156{
7157	struct mem_cgroup *memcg;
7158
7159	VM_BUG_ON_PAGE(!PageLocked(page), page);
7160
7161	if (vm_swap_full())
7162		return true;
7163	if (!do_swap_account || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
7164		return false;
7165
7166	memcg = page->mem_cgroup;
7167	if (!memcg)
7168		return false;
7169
7170	for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg))
7171		if (page_counter_read(&memcg->swap) * 2 >= memcg->swap.max)
7172			return true;
7173
7174	return false;
7175}
7176
7177/* for remember boot option*/
7178#ifdef CONFIG_MEMCG_SWAP_ENABLED
7179static int really_do_swap_account __initdata = 1;
7180#else
7181static int really_do_swap_account __initdata;
7182#endif
7183
7184static int __init enable_swap_account(char *s)
7185{
7186	if (!strcmp(s, "1"))
7187		really_do_swap_account = 1;
7188	else if (!strcmp(s, "0"))
7189		really_do_swap_account = 0;
7190	return 1;
7191}
7192__setup("swapaccount=", enable_swap_account);
7193
7194static u64 swap_current_read(struct cgroup_subsys_state *css,
7195			     struct cftype *cft)
7196{
7197	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
7198
7199	return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE;
7200}
7201
7202static int swap_max_show(struct seq_file *m, void *v)
7203{
7204	return seq_puts_memcg_tunable(m,
7205		READ_ONCE(mem_cgroup_from_seq(m)->swap.max));
 
 
 
 
 
 
 
7206}
7207
7208static ssize_t swap_max_write(struct kernfs_open_file *of,
7209			      char *buf, size_t nbytes, loff_t off)
7210{
7211	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
7212	unsigned long max;
7213	int err;
7214
7215	buf = strstrip(buf);
7216	err = page_counter_memparse(buf, "max", &max);
7217	if (err)
7218		return err;
7219
7220	xchg(&memcg->swap.max, max);
 
 
 
 
7221
7222	return nbytes;
7223}
7224
7225static int swap_events_show(struct seq_file *m, void *v)
7226{
7227	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
7228
7229	seq_printf(m, "max %lu\n",
7230		   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX]));
7231	seq_printf(m, "fail %lu\n",
7232		   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL]));
7233
7234	return 0;
7235}
7236
7237static struct cftype swap_files[] = {
7238	{
7239		.name = "swap.current",
7240		.flags = CFTYPE_NOT_ON_ROOT,
7241		.read_u64 = swap_current_read,
7242	},
7243	{
7244		.name = "swap.max",
7245		.flags = CFTYPE_NOT_ON_ROOT,
7246		.seq_show = swap_max_show,
7247		.write = swap_max_write,
7248	},
7249	{
7250		.name = "swap.events",
7251		.flags = CFTYPE_NOT_ON_ROOT,
7252		.file_offset = offsetof(struct mem_cgroup, swap_events_file),
7253		.seq_show = swap_events_show,
7254	},
7255	{ }	/* terminate */
7256};
7257
7258static struct cftype memsw_cgroup_files[] = {
7259	{
7260		.name = "memsw.usage_in_bytes",
7261		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
7262		.read_u64 = mem_cgroup_read_u64,
7263	},
7264	{
7265		.name = "memsw.max_usage_in_bytes",
7266		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
7267		.write = mem_cgroup_reset,
7268		.read_u64 = mem_cgroup_read_u64,
7269	},
7270	{
7271		.name = "memsw.limit_in_bytes",
7272		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
7273		.write = mem_cgroup_write,
7274		.read_u64 = mem_cgroup_read_u64,
7275	},
7276	{
7277		.name = "memsw.failcnt",
7278		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
7279		.write = mem_cgroup_reset,
7280		.read_u64 = mem_cgroup_read_u64,
7281	},
7282	{ },	/* terminate */
7283};
7284
7285static int __init mem_cgroup_swap_init(void)
7286{
7287	if (!mem_cgroup_disabled() && really_do_swap_account) {
7288		do_swap_account = 1;
7289		WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys,
7290					       swap_files));
7291		WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys,
7292						  memsw_cgroup_files));
7293	}
7294	return 0;
7295}
7296subsys_initcall(mem_cgroup_swap_init);
7297
7298#endif /* CONFIG_MEMCG_SWAP */