Linux Audio

Check our new training course

Loading...
v4.6
   1/* memcontrol.c - Memory Controller
   2 *
   3 * Copyright IBM Corporation, 2007
   4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
   5 *
   6 * Copyright 2007 OpenVZ SWsoft Inc
   7 * Author: Pavel Emelianov <xemul@openvz.org>
   8 *
   9 * Memory thresholds
  10 * Copyright (C) 2009 Nokia Corporation
  11 * Author: Kirill A. Shutemov
  12 *
  13 * Kernel Memory Controller
  14 * Copyright (C) 2012 Parallels Inc. and Google Inc.
  15 * Authors: Glauber Costa and Suleiman Souhlal
  16 *
  17 * Native page reclaim
  18 * Charge lifetime sanitation
  19 * Lockless page tracking & accounting
  20 * Unified hierarchy configuration model
  21 * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner
  22 *
  23 * This program is free software; you can redistribute it and/or modify
  24 * it under the terms of the GNU General Public License as published by
  25 * the Free Software Foundation; either version 2 of the License, or
  26 * (at your option) any later version.
  27 *
  28 * This program is distributed in the hope that it will be useful,
  29 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  30 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  31 * GNU General Public License for more details.
  32 */
  33
  34#include <linux/page_counter.h>
  35#include <linux/memcontrol.h>
  36#include <linux/cgroup.h>
  37#include <linux/mm.h>
  38#include <linux/hugetlb.h>
  39#include <linux/pagemap.h>
  40#include <linux/smp.h>
  41#include <linux/page-flags.h>
  42#include <linux/backing-dev.h>
  43#include <linux/bit_spinlock.h>
  44#include <linux/rcupdate.h>
  45#include <linux/limits.h>
  46#include <linux/export.h>
  47#include <linux/mutex.h>
  48#include <linux/rbtree.h>
  49#include <linux/slab.h>
  50#include <linux/swap.h>
  51#include <linux/swapops.h>
  52#include <linux/spinlock.h>
  53#include <linux/eventfd.h>
  54#include <linux/poll.h>
  55#include <linux/sort.h>
  56#include <linux/fs.h>
  57#include <linux/seq_file.h>
  58#include <linux/vmpressure.h>
  59#include <linux/mm_inline.h>
  60#include <linux/swap_cgroup.h>
  61#include <linux/cpu.h>
  62#include <linux/oom.h>
  63#include <linux/lockdep.h>
  64#include <linux/file.h>
  65#include <linux/tracehook.h>
  66#include "internal.h"
  67#include <net/sock.h>
  68#include <net/ip.h>
  69#include "slab.h"
  70
  71#include <asm/uaccess.h>
  72
  73#include <trace/events/vmscan.h>
  74
  75struct cgroup_subsys memory_cgrp_subsys __read_mostly;
  76EXPORT_SYMBOL(memory_cgrp_subsys);
  77
  78struct mem_cgroup *root_mem_cgroup __read_mostly;
  79
  80#define MEM_CGROUP_RECLAIM_RETRIES	5
 
  81
  82/* Socket memory accounting disabled? */
  83static bool cgroup_memory_nosocket;
 
  84
  85/* Kernel memory accounting disabled? */
  86static bool cgroup_memory_nokmem;
 
 
 
 
  87
  88/* Whether the swap controller is active */
  89#ifdef CONFIG_MEMCG_SWAP
  90int do_swap_account __read_mostly;
  91#else
  92#define do_swap_account		0
  93#endif
  94
  95/* Whether legacy memory+swap accounting is active */
  96static bool do_memsw_account(void)
  97{
  98	return !cgroup_subsys_on_dfl(memory_cgrp_subsys) && do_swap_account;
  99}
 
 
 
 
 
 
 
 
 
 100
 101static const char * const mem_cgroup_stat_names[] = {
 102	"cache",
 103	"rss",
 104	"rss_huge",
 105	"mapped_file",
 106	"dirty",
 107	"writeback",
 108	"swap",
 109};
 110
 
 
 
 
 
 
 
 
 111static const char * const mem_cgroup_events_names[] = {
 112	"pgpgin",
 113	"pgpgout",
 114	"pgfault",
 115	"pgmajfault",
 116};
 117
 118static const char * const mem_cgroup_lru_names[] = {
 119	"inactive_anon",
 120	"active_anon",
 121	"inactive_file",
 122	"active_file",
 123	"unevictable",
 
 
 
 
 
 124};
 125
 126#define THRESHOLDS_EVENTS_TARGET 128
 127#define SOFTLIMIT_EVENTS_TARGET 1024
 128#define NUMAINFO_EVENTS_TARGET	1024
 129
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 130/*
 131 * Cgroups above their limits are maintained in a RB-Tree, independent of
 132 * their hierarchy representation
 133 */
 134
 135struct mem_cgroup_tree_per_zone {
 136	struct rb_root rb_root;
 137	spinlock_t lock;
 138};
 139
 140struct mem_cgroup_tree_per_node {
 141	struct mem_cgroup_tree_per_zone rb_tree_per_zone[MAX_NR_ZONES];
 142};
 143
 144struct mem_cgroup_tree {
 145	struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
 146};
 147
 148static struct mem_cgroup_tree soft_limit_tree __read_mostly;
 149
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 150/* for OOM */
 151struct mem_cgroup_eventfd_list {
 152	struct list_head list;
 153	struct eventfd_ctx *eventfd;
 154};
 155
 
 
 
 156/*
 157 * cgroup_event represents events which userspace want to receive.
 
 
 
 
 
 
 
 
 158 */
 159struct mem_cgroup_event {
 
 160	/*
 161	 * memcg which the event belongs to.
 162	 */
 163	struct mem_cgroup *memcg;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 164	/*
 165	 * eventfd to signal userspace about the event.
 
 166	 */
 167	struct eventfd_ctx *eventfd;
 
 
 
 
 
 
 168	/*
 169	 * Each of these stored in a list by the cgroup.
 170	 */
 171	struct list_head list;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 172	/*
 173	 * register_event() callback will be used to add new userspace
 174	 * waiter for changes related to this event.  Use eventfd_signal()
 175	 * on eventfd to send notification to userspace.
 176	 */
 177	int (*register_event)(struct mem_cgroup *memcg,
 178			      struct eventfd_ctx *eventfd, const char *args);
 179	/*
 180	 * unregister_event() callback will be called when userspace closes
 181	 * the eventfd or on cgroup removing.  This callback must be set,
 182	 * if you want provide notification functionality.
 183	 */
 184	void (*unregister_event)(struct mem_cgroup *memcg,
 185				 struct eventfd_ctx *eventfd);
 186	/*
 187	 * All fields below needed to unregister event when
 188	 * userspace closes eventfd.
 189	 */
 190	poll_table pt;
 191	wait_queue_head_t *wqh;
 192	wait_queue_t wait;
 193	struct work_struct remove;
 194};
 195
 196static void mem_cgroup_threshold(struct mem_cgroup *memcg);
 197static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
 
 
 198
 199/* Stuffs for move charges at task migration. */
 200/*
 201 * Types of charges to be moved.
 
 202 */
 203#define MOVE_ANON	0x1U
 204#define MOVE_FILE	0x2U
 205#define MOVE_MASK	(MOVE_ANON | MOVE_FILE)
 
 
 206
 207/* "mc" and its members are protected by cgroup_mutex */
 208static struct move_charge_struct {
 209	spinlock_t	  lock; /* for from, to */
 210	struct mm_struct  *mm;
 211	struct mem_cgroup *from;
 212	struct mem_cgroup *to;
 213	unsigned long flags;
 214	unsigned long precharge;
 215	unsigned long moved_charge;
 216	unsigned long moved_swap;
 217	struct task_struct *moving_task;	/* a task moving charges */
 218	wait_queue_head_t waitq;		/* a waitq for other context */
 219} mc = {
 220	.lock = __SPIN_LOCK_UNLOCKED(mc.lock),
 221	.waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
 222};
 223
 
 
 
 
 
 
 
 
 
 
 
 
 224/*
 225 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
 226 * limit reclaim to prevent infinite loops, if they ever occur.
 227 */
 228#define	MEM_CGROUP_MAX_RECLAIM_LOOPS		100
 229#define	MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS	2
 230
 231enum charge_type {
 232	MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
 233	MEM_CGROUP_CHARGE_TYPE_ANON,
 
 
 234	MEM_CGROUP_CHARGE_TYPE_SWAPOUT,	/* for accounting swapcache */
 235	MEM_CGROUP_CHARGE_TYPE_DROP,	/* a page was unused swap cache */
 236	NR_CHARGE_TYPE,
 237};
 238
 239/* for encoding cft->private value on file */
 240enum res_type {
 241	_MEM,
 242	_MEMSWAP,
 243	_OOM_TYPE,
 244	_KMEM,
 245	_TCP,
 246};
 247
 248#define MEMFILE_PRIVATE(x, val)	((x) << 16 | (val))
 249#define MEMFILE_TYPE(val)	((val) >> 16 & 0xffff)
 250#define MEMFILE_ATTR(val)	((val) & 0xffff)
 251/* Used for OOM nofiier */
 252#define OOM_CONTROL		(0)
 253
 254/* Some nice accessors for the vmpressure. */
 255struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
 256{
 257	if (!memcg)
 258		memcg = root_mem_cgroup;
 259	return &memcg->vmpressure;
 260}
 261
 262struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr)
 263{
 264	return &container_of(vmpr, struct mem_cgroup, vmpressure)->css;
 265}
 266
 267static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
 
 
 
 
 
 
 268{
 269	return (memcg == root_mem_cgroup);
 270}
 
 271
 272#ifndef CONFIG_SLOB
 273/*
 274 * This will be the memcg's index in each cache's ->memcg_params.memcg_caches.
 275 * The main reason for not using cgroup id for this:
 276 *  this works better in sparse environments, where we have a lot of memcgs,
 277 *  but only a few kmem-limited. Or also, if we have, for instance, 200
 278 *  memcgs, and none but the 200th is kmem-limited, we'd have to have a
 279 *  200 entry array for that.
 280 *
 281 * The current size of the caches array is stored in memcg_nr_cache_ids. It
 282 * will double each time we have to increase it.
 283 */
 284static DEFINE_IDA(memcg_cache_ida);
 285int memcg_nr_cache_ids;
 286
 287/* Protects memcg_nr_cache_ids */
 288static DECLARE_RWSEM(memcg_cache_ids_sem);
 
 
 
 
 
 
 
 
 
 
 
 289
 290void memcg_get_cache_ids(void)
 291{
 292	down_read(&memcg_cache_ids_sem);
 
 
 
 
 
 
 293}
 
 294
 295void memcg_put_cache_ids(void)
 296{
 297	up_read(&memcg_cache_ids_sem);
 
 
 
 
 
 298}
 299
 300/*
 301 * MIN_SIZE is different than 1, because we would like to avoid going through
 302 * the alloc/free process all the time. In a small machine, 4 kmem-limited
 303 * cgroups is a reasonable guess. In the future, it could be a parameter or
 304 * tunable, but that is strictly not necessary.
 305 *
 306 * MAX_SIZE should be as large as the number of cgrp_ids. Ideally, we could get
 307 * this constant directly from cgroup, but it is understandable that this is
 308 * better kept as an internal representation in cgroup.c. In any case, the
 309 * cgrp_id space is not getting any smaller, and we don't have to necessarily
 310 * increase ours as well if it increases.
 311 */
 312#define MEMCG_CACHES_MIN_SIZE 4
 313#define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX
 314
 315/*
 316 * A lot of the calls to the cache allocation functions are expected to be
 317 * inlined by the compiler. Since the calls to memcg_kmem_get_cache are
 318 * conditional to this static branch, we'll have to allow modules that does
 319 * kmem_cache_alloc and the such to see this symbol as well
 320 */
 321DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key);
 322EXPORT_SYMBOL(memcg_kmem_enabled_key);
 323
 324#endif /* !CONFIG_SLOB */
 325
 326static struct mem_cgroup_per_zone *
 327mem_cgroup_zone_zoneinfo(struct mem_cgroup *memcg, struct zone *zone)
 328{
 329	int nid = zone_to_nid(zone);
 330	int zid = zone_idx(zone);
 331
 332	return &memcg->nodeinfo[nid]->zoneinfo[zid];
 333}
 
 
 
 334
 335/**
 336 * mem_cgroup_css_from_page - css of the memcg associated with a page
 337 * @page: page of interest
 338 *
 339 * If memcg is bound to the default hierarchy, css of the memcg associated
 340 * with @page is returned.  The returned css remains associated with @page
 341 * until it is released.
 342 *
 343 * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup
 344 * is returned.
 345 */
 346struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page)
 347{
 348	struct mem_cgroup *memcg;
 349
 350	memcg = page->mem_cgroup;
 
 
 
 
 
 
 351
 352	if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
 353		memcg = root_mem_cgroup;
 354
 355	return &memcg->css;
 
 
 
 356}
 357
 358/**
 359 * page_cgroup_ino - return inode number of the memcg a page is charged to
 360 * @page: the page
 361 *
 362 * Look up the closest online ancestor of the memory cgroup @page is charged to
 363 * and return its inode number or 0 if @page is not charged to any cgroup. It
 364 * is safe to call this function without holding a reference to @page.
 365 *
 366 * Note, this function is inherently racy, because there is nothing to prevent
 367 * the cgroup inode from getting torn down and potentially reallocated a moment
 368 * after page_cgroup_ino() returns, so it only should be used by callers that
 369 * do not care (such as procfs interfaces).
 370 */
 371ino_t page_cgroup_ino(struct page *page)
 372{
 373	struct mem_cgroup *memcg;
 374	unsigned long ino = 0;
 375
 376	rcu_read_lock();
 377	memcg = READ_ONCE(page->mem_cgroup);
 378	while (memcg && !(memcg->css.flags & CSS_ONLINE))
 379		memcg = parent_mem_cgroup(memcg);
 380	if (memcg)
 381		ino = cgroup_ino(memcg->css.cgroup);
 382	rcu_read_unlock();
 383	return ino;
 384}
 385
 386static struct mem_cgroup_per_zone *
 387mem_cgroup_page_zoneinfo(struct mem_cgroup *memcg, struct page *page)
 388{
 389	int nid = page_to_nid(page);
 390	int zid = page_zonenum(page);
 391
 392	return &memcg->nodeinfo[nid]->zoneinfo[zid];
 393}
 394
 395static struct mem_cgroup_tree_per_zone *
 396soft_limit_tree_node_zone(int nid, int zid)
 397{
 398	return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
 399}
 400
 401static struct mem_cgroup_tree_per_zone *
 402soft_limit_tree_from_page(struct page *page)
 403{
 404	int nid = page_to_nid(page);
 405	int zid = page_zonenum(page);
 406
 407	return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
 408}
 409
 410static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_zone *mz,
 411					 struct mem_cgroup_tree_per_zone *mctz,
 412					 unsigned long new_usage_in_excess)
 
 
 413{
 414	struct rb_node **p = &mctz->rb_root.rb_node;
 415	struct rb_node *parent = NULL;
 416	struct mem_cgroup_per_zone *mz_node;
 417
 418	if (mz->on_tree)
 419		return;
 420
 421	mz->usage_in_excess = new_usage_in_excess;
 422	if (!mz->usage_in_excess)
 423		return;
 424	while (*p) {
 425		parent = *p;
 426		mz_node = rb_entry(parent, struct mem_cgroup_per_zone,
 427					tree_node);
 428		if (mz->usage_in_excess < mz_node->usage_in_excess)
 429			p = &(*p)->rb_left;
 430		/*
 431		 * We can't avoid mem cgroups that are over their soft
 432		 * limit by the same amount
 433		 */
 434		else if (mz->usage_in_excess >= mz_node->usage_in_excess)
 435			p = &(*p)->rb_right;
 436	}
 437	rb_link_node(&mz->tree_node, parent, p);
 438	rb_insert_color(&mz->tree_node, &mctz->rb_root);
 439	mz->on_tree = true;
 440}
 441
 442static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_zone *mz,
 443					 struct mem_cgroup_tree_per_zone *mctz)
 
 
 444{
 445	if (!mz->on_tree)
 446		return;
 447	rb_erase(&mz->tree_node, &mctz->rb_root);
 448	mz->on_tree = false;
 449}
 450
 451static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_zone *mz,
 452				       struct mem_cgroup_tree_per_zone *mctz)
 453{
 454	unsigned long flags;
 455
 456	spin_lock_irqsave(&mctz->lock, flags);
 457	__mem_cgroup_remove_exceeded(mz, mctz);
 458	spin_unlock_irqrestore(&mctz->lock, flags);
 459}
 460
 461static unsigned long soft_limit_excess(struct mem_cgroup *memcg)
 462{
 463	unsigned long nr_pages = page_counter_read(&memcg->memory);
 464	unsigned long soft_limit = READ_ONCE(memcg->soft_limit);
 465	unsigned long excess = 0;
 466
 467	if (nr_pages > soft_limit)
 468		excess = nr_pages - soft_limit;
 469
 470	return excess;
 471}
 472
 473static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
 474{
 475	unsigned long excess;
 476	struct mem_cgroup_per_zone *mz;
 477	struct mem_cgroup_tree_per_zone *mctz;
 478
 
 479	mctz = soft_limit_tree_from_page(page);
 
 480	/*
 481	 * Necessary to update all ancestors when hierarchy is used.
 482	 * because their event counter is not touched.
 483	 */
 484	for (; memcg; memcg = parent_mem_cgroup(memcg)) {
 485		mz = mem_cgroup_page_zoneinfo(memcg, page);
 486		excess = soft_limit_excess(memcg);
 487		/*
 488		 * We have to update the tree if mz is on RB-tree or
 489		 * mem is over its softlimit.
 490		 */
 491		if (excess || mz->on_tree) {
 492			unsigned long flags;
 493
 494			spin_lock_irqsave(&mctz->lock, flags);
 495			/* if on-tree, remove it */
 496			if (mz->on_tree)
 497				__mem_cgroup_remove_exceeded(mz, mctz);
 498			/*
 499			 * Insert again. mz->usage_in_excess will be updated.
 500			 * If excess is 0, no tree ops.
 501			 */
 502			__mem_cgroup_insert_exceeded(mz, mctz, excess);
 503			spin_unlock_irqrestore(&mctz->lock, flags);
 504		}
 505	}
 506}
 507
 508static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
 509{
 510	struct mem_cgroup_tree_per_zone *mctz;
 511	struct mem_cgroup_per_zone *mz;
 512	int nid, zid;
 513
 514	for_each_node(nid) {
 515		for (zid = 0; zid < MAX_NR_ZONES; zid++) {
 516			mz = &memcg->nodeinfo[nid]->zoneinfo[zid];
 517			mctz = soft_limit_tree_node_zone(nid, zid);
 518			mem_cgroup_remove_exceeded(mz, mctz);
 519		}
 520	}
 521}
 522
 523static struct mem_cgroup_per_zone *
 524__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
 525{
 526	struct rb_node *rightmost = NULL;
 527	struct mem_cgroup_per_zone *mz;
 528
 529retry:
 530	mz = NULL;
 531	rightmost = rb_last(&mctz->rb_root);
 532	if (!rightmost)
 533		goto done;		/* Nothing to reclaim from */
 534
 535	mz = rb_entry(rightmost, struct mem_cgroup_per_zone, tree_node);
 536	/*
 537	 * Remove the node now but someone else can add it back,
 538	 * we will to add it back at the end of reclaim to its correct
 539	 * position in the tree.
 540	 */
 541	__mem_cgroup_remove_exceeded(mz, mctz);
 542	if (!soft_limit_excess(mz->memcg) ||
 543	    !css_tryget_online(&mz->memcg->css))
 544		goto retry;
 545done:
 546	return mz;
 547}
 548
 549static struct mem_cgroup_per_zone *
 550mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
 551{
 552	struct mem_cgroup_per_zone *mz;
 553
 554	spin_lock_irq(&mctz->lock);
 555	mz = __mem_cgroup_largest_soft_limit_node(mctz);
 556	spin_unlock_irq(&mctz->lock);
 557	return mz;
 558}
 559
 560/*
 561 * Return page count for single (non recursive) @memcg.
 562 *
 563 * Implementation Note: reading percpu statistics for memcg.
 564 *
 565 * Both of vmstat[] and percpu_counter has threshold and do periodic
 566 * synchronization to implement "quick" read. There are trade-off between
 567 * reading cost and precision of value. Then, we may have a chance to implement
 568 * a periodic synchronization of counter in memcg's counter.
 569 *
 570 * But this _read() function is used for user interface now. The user accounts
 571 * memory usage by memory cgroup and he _always_ requires exact value because
 572 * he accounts memory. Even if we provide quick-and-fuzzy read, we always
 573 * have to visit all online cpus and make sum. So, for now, unnecessary
 574 * synchronization is not implemented. (just implemented for cpu hotplug)
 575 *
 576 * If there are kernel internal actions which can make use of some not-exact
 577 * value, and reading all cpu value can be performance bottleneck in some
 578 * common workload, threshold and synchronization as vmstat[] should be
 579 * implemented.
 580 */
 581static unsigned long
 582mem_cgroup_read_stat(struct mem_cgroup *memcg, enum mem_cgroup_stat_index idx)
 583{
 584	long val = 0;
 585	int cpu;
 586
 587	/* Per-cpu values can be negative, use a signed accumulator */
 588	for_each_possible_cpu(cpu)
 589		val += per_cpu(memcg->stat->count[idx], cpu);
 590	/*
 591	 * Summing races with updates, so val may be negative.  Avoid exposing
 592	 * transient negative values.
 593	 */
 594	if (val < 0)
 595		val = 0;
 596	return val;
 597}
 598
 
 
 
 
 
 
 
 599static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg,
 600					    enum mem_cgroup_events_index idx)
 601{
 602	unsigned long val = 0;
 603	int cpu;
 604
 605	for_each_possible_cpu(cpu)
 606		val += per_cpu(memcg->stat->events[idx], cpu);
 
 
 
 
 
 607	return val;
 608}
 609
 610static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
 611					 struct page *page,
 612					 bool compound, int nr_pages)
 613{
 
 
 614	/*
 615	 * Here, RSS means 'mapped anon' and anon's SwapCache. Shmem/tmpfs is
 616	 * counted as CACHE even if it's on ANON LRU.
 617	 */
 618	if (PageAnon(page))
 619		__this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS],
 620				nr_pages);
 621	else
 622		__this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE],
 623				nr_pages);
 624
 625	if (compound) {
 626		VM_BUG_ON_PAGE(!PageTransHuge(page), page);
 627		__this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE],
 628				nr_pages);
 629	}
 630
 631	/* pagein of a big page is an event. So, ignore page size */
 632	if (nr_pages > 0)
 633		__this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGIN]);
 634	else {
 635		__this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]);
 636		nr_pages = -nr_pages; /* for event */
 637	}
 638
 639	__this_cpu_add(memcg->stat->nr_page_events, nr_pages);
 
 
 640}
 641
 642unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
 643					   int nid, unsigned int lru_mask)
 644{
 645	unsigned long nr = 0;
 646	int zid;
 
 
 
 647
 648	VM_BUG_ON((unsigned)nid >= nr_node_ids);
 
 
 
 
 
 
 649
 650	for (zid = 0; zid < MAX_NR_ZONES; zid++) {
 651		struct mem_cgroup_per_zone *mz;
 652		enum lru_list lru;
 653
 654		for_each_lru(lru) {
 655			if (!(BIT(lru) & lru_mask))
 656				continue;
 657			mz = &memcg->nodeinfo[nid]->zoneinfo[zid];
 658			nr += mz->lru_size[lru];
 659		}
 660	}
 661	return nr;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 662}
 663
 664static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
 665			unsigned int lru_mask)
 666{
 667	unsigned long nr = 0;
 668	int nid;
 
 669
 670	for_each_node_state(nid, N_MEMORY)
 671		nr += mem_cgroup_node_nr_lru_pages(memcg, nid, lru_mask);
 672	return nr;
 673}
 674
 675static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
 676				       enum mem_cgroup_events_target target)
 677{
 678	unsigned long val, next;
 679
 680	val = __this_cpu_read(memcg->stat->nr_page_events);
 681	next = __this_cpu_read(memcg->stat->targets[target]);
 682	/* from time_after() in jiffies.h */
 683	if ((long)next - (long)val < 0) {
 684		switch (target) {
 685		case MEM_CGROUP_TARGET_THRESH:
 686			next = val + THRESHOLDS_EVENTS_TARGET;
 687			break;
 688		case MEM_CGROUP_TARGET_SOFTLIMIT:
 689			next = val + SOFTLIMIT_EVENTS_TARGET;
 690			break;
 691		case MEM_CGROUP_TARGET_NUMAINFO:
 692			next = val + NUMAINFO_EVENTS_TARGET;
 693			break;
 694		default:
 695			break;
 696		}
 697		__this_cpu_write(memcg->stat->targets[target], next);
 698		return true;
 699	}
 700	return false;
 701}
 702
 703/*
 704 * Check events in order.
 705 *
 706 */
 707static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
 708{
 
 709	/* threshold event is triggered in finer grain than soft limit */
 710	if (unlikely(mem_cgroup_event_ratelimit(memcg,
 711						MEM_CGROUP_TARGET_THRESH))) {
 712		bool do_softlimit;
 713		bool do_numainfo __maybe_unused;
 714
 715		do_softlimit = mem_cgroup_event_ratelimit(memcg,
 716						MEM_CGROUP_TARGET_SOFTLIMIT);
 717#if MAX_NUMNODES > 1
 718		do_numainfo = mem_cgroup_event_ratelimit(memcg,
 719						MEM_CGROUP_TARGET_NUMAINFO);
 720#endif
 
 
 721		mem_cgroup_threshold(memcg);
 722		if (unlikely(do_softlimit))
 723			mem_cgroup_update_tree(memcg, page);
 724#if MAX_NUMNODES > 1
 725		if (unlikely(do_numainfo))
 726			atomic_inc(&memcg->numainfo_events);
 727#endif
 728	}
 
 
 
 
 
 
 
 
 729}
 730
 731struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
 732{
 733	/*
 734	 * mm_update_next_owner() may clear mm->owner to NULL
 735	 * if it races with swapoff, page migration, etc.
 736	 * So this can be called with p == NULL.
 737	 */
 738	if (unlikely(!p))
 739		return NULL;
 740
 741	return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
 
 742}
 743EXPORT_SYMBOL(mem_cgroup_from_task);
 744
 745static struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
 746{
 747	struct mem_cgroup *memcg = NULL;
 748
 
 
 
 
 
 
 
 749	rcu_read_lock();
 750	do {
 751		/*
 752		 * Page cache insertions can happen withou an
 753		 * actual mm context, e.g. during disk probing
 754		 * on boot, loopback IO, acct() writes etc.
 755		 */
 756		if (unlikely(!mm))
 757			memcg = root_mem_cgroup;
 758		else {
 759			memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
 760			if (unlikely(!memcg))
 761				memcg = root_mem_cgroup;
 762		}
 763	} while (!css_tryget_online(&memcg->css));
 764	rcu_read_unlock();
 765	return memcg;
 766}
 767
 768/**
 769 * mem_cgroup_iter - iterate over memory cgroup hierarchy
 770 * @root: hierarchy root
 771 * @prev: previously returned memcg, NULL on first invocation
 772 * @reclaim: cookie for shared reclaim walks, NULL for full walks
 773 *
 774 * Returns references to children of the hierarchy below @root, or
 775 * @root itself, or %NULL after a full round-trip.
 776 *
 777 * Caller must pass the return value in @prev on subsequent
 778 * invocations for reference counting, or use mem_cgroup_iter_break()
 779 * to cancel a hierarchy walk before the round-trip is complete.
 780 *
 781 * Reclaimers can specify a zone and a priority level in @reclaim to
 782 * divide up the memcgs in the hierarchy among all concurrent
 783 * reclaimers operating on the same zone and priority.
 784 */
 785struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
 786				   struct mem_cgroup *prev,
 787				   struct mem_cgroup_reclaim_cookie *reclaim)
 788{
 789	struct mem_cgroup_reclaim_iter *uninitialized_var(iter);
 790	struct cgroup_subsys_state *css = NULL;
 791	struct mem_cgroup *memcg = NULL;
 792	struct mem_cgroup *pos = NULL;
 793
 794	if (mem_cgroup_disabled())
 795		return NULL;
 796
 797	if (!root)
 798		root = root_mem_cgroup;
 799
 800	if (prev && !reclaim)
 801		pos = prev;
 
 
 
 802
 803	if (!root->use_hierarchy && root != root_mem_cgroup) {
 804		if (prev)
 805			goto out;
 806		return root;
 807	}
 808
 809	rcu_read_lock();
 810
 811	if (reclaim) {
 812		struct mem_cgroup_per_zone *mz;
 813
 814		mz = mem_cgroup_zone_zoneinfo(root, reclaim->zone);
 815		iter = &mz->iter[reclaim->priority];
 816
 817		if (prev && reclaim->generation != iter->generation)
 818			goto out_unlock;
 819
 820		while (1) {
 821			pos = READ_ONCE(iter->position);
 822			if (!pos || css_tryget(&pos->css))
 823				break;
 824			/*
 825			 * css reference reached zero, so iter->position will
 826			 * be cleared by ->css_released. However, we should not
 827			 * rely on this happening soon, because ->css_released
 828			 * is called from a work queue, and by busy-waiting we
 829			 * might block it. So we clear iter->position right
 830			 * away.
 831			 */
 832			(void)cmpxchg(&iter->position, pos, NULL);
 833		}
 834	}
 835
 836	if (pos)
 837		css = &pos->css;
 
 
 
 
 
 
 
 838
 839	for (;;) {
 840		css = css_next_descendant_pre(css, &root->css);
 841		if (!css) {
 842			/*
 843			 * Reclaimers share the hierarchy walk, and a
 844			 * new one might jump in right at the end of
 845			 * the hierarchy - make sure they see at least
 846			 * one group and restart from the beginning.
 847			 */
 848			if (!prev)
 849				continue;
 850			break;
 851		}
 852
 853		/*
 854		 * Verify the css and acquire a reference.  The root
 855		 * is provided by the caller, so we know it's alive
 856		 * and kicking, and don't take an extra reference.
 857		 */
 858		memcg = mem_cgroup_from_css(css);
 859
 860		if (css == &root->css)
 861			break;
 862
 863		if (css_tryget(css))
 864			break;
 865
 866		memcg = NULL;
 867	}
 868
 869	if (reclaim) {
 870		/*
 871		 * The position could have already been updated by a competing
 872		 * thread, so check that the value hasn't changed since we read
 873		 * it to avoid reclaiming from the same cgroup twice.
 874		 */
 875		(void)cmpxchg(&iter->position, pos, memcg);
 876
 877		if (pos)
 878			css_put(&pos->css);
 879
 880		if (!memcg)
 881			iter->generation++;
 882		else if (!prev)
 883			reclaim->generation = iter->generation;
 884	}
 885
 886out_unlock:
 887	rcu_read_unlock();
 888out:
 889	if (prev && prev != root)
 890		css_put(&prev->css);
 891
 892	return memcg;
 893}
 894
 895/**
 896 * mem_cgroup_iter_break - abort a hierarchy walk prematurely
 897 * @root: hierarchy root
 898 * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
 899 */
 900void mem_cgroup_iter_break(struct mem_cgroup *root,
 901			   struct mem_cgroup *prev)
 902{
 903	if (!root)
 904		root = root_mem_cgroup;
 905	if (prev && prev != root)
 906		css_put(&prev->css);
 907}
 908
 909static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
 910{
 911	struct mem_cgroup *memcg = dead_memcg;
 912	struct mem_cgroup_reclaim_iter *iter;
 913	struct mem_cgroup_per_zone *mz;
 914	int nid, zid;
 915	int i;
 916
 917	while ((memcg = parent_mem_cgroup(memcg))) {
 918		for_each_node(nid) {
 919			for (zid = 0; zid < MAX_NR_ZONES; zid++) {
 920				mz = &memcg->nodeinfo[nid]->zoneinfo[zid];
 921				for (i = 0; i <= DEF_PRIORITY; i++) {
 922					iter = &mz->iter[i];
 923					cmpxchg(&iter->position,
 924						dead_memcg, NULL);
 925				}
 926			}
 927		}
 928	}
 929}
 930
 931/*
 932 * Iteration constructs for visiting all cgroups (under a tree).  If
 933 * loops are exited prematurely (break), mem_cgroup_iter_break() must
 934 * be used for reference counting.
 935 */
 936#define for_each_mem_cgroup_tree(iter, root)		\
 937	for (iter = mem_cgroup_iter(root, NULL, NULL);	\
 938	     iter != NULL;				\
 939	     iter = mem_cgroup_iter(root, iter, NULL))
 940
 941#define for_each_mem_cgroup(iter)			\
 942	for (iter = mem_cgroup_iter(NULL, NULL, NULL);	\
 943	     iter != NULL;				\
 944	     iter = mem_cgroup_iter(NULL, iter, NULL))
 945
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 946/**
 947 * mem_cgroup_zone_lruvec - get the lru list vector for a zone and memcg
 948 * @zone: zone of the wanted lruvec
 949 * @memcg: memcg of the wanted lruvec
 950 *
 951 * Returns the lru list vector holding pages for the given @zone and
 952 * @mem.  This can be the global zone lruvec, if the memory controller
 953 * is disabled.
 954 */
 955struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
 956				      struct mem_cgroup *memcg)
 957{
 958	struct mem_cgroup_per_zone *mz;
 959	struct lruvec *lruvec;
 960
 961	if (mem_cgroup_disabled()) {
 962		lruvec = &zone->lruvec;
 963		goto out;
 964	}
 965
 966	mz = mem_cgroup_zone_zoneinfo(memcg, zone);
 967	lruvec = &mz->lruvec;
 968out:
 969	/*
 970	 * Since a node can be onlined after the mem_cgroup was created,
 971	 * we have to be prepared to initialize lruvec->zone here;
 972	 * and if offlined then reonlined, we need to reinitialize it.
 973	 */
 974	if (unlikely(lruvec->zone != zone))
 975		lruvec->zone = zone;
 976	return lruvec;
 977}
 978
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 979/**
 980 * mem_cgroup_page_lruvec - return lruvec for isolating/putting an LRU page
 981 * @page: the page
 982 * @zone: zone of the page
 983 *
 984 * This function is only safe when following the LRU page isolation
 985 * and putback protocol: the LRU lock must be held, and the page must
 986 * either be PageLRU() or the caller must have isolated/allocated it.
 987 */
 988struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone)
 989{
 990	struct mem_cgroup_per_zone *mz;
 991	struct mem_cgroup *memcg;
 992	struct lruvec *lruvec;
 993
 994	if (mem_cgroup_disabled()) {
 995		lruvec = &zone->lruvec;
 996		goto out;
 997	}
 
 998
 999	memcg = page->mem_cgroup;
1000	/*
1001	 * Swapcache readahead pages are added to the LRU - and
1002	 * possibly migrated - before they are charged.
 
 
 
 
 
1003	 */
1004	if (!memcg)
1005		memcg = root_mem_cgroup;
1006
1007	mz = mem_cgroup_page_zoneinfo(memcg, page);
1008	lruvec = &mz->lruvec;
1009out:
1010	/*
1011	 * Since a node can be onlined after the mem_cgroup was created,
1012	 * we have to be prepared to initialize lruvec->zone here;
1013	 * and if offlined then reonlined, we need to reinitialize it.
1014	 */
1015	if (unlikely(lruvec->zone != zone))
1016		lruvec->zone = zone;
1017	return lruvec;
1018}
1019
1020/**
1021 * mem_cgroup_update_lru_size - account for adding or removing an lru page
1022 * @lruvec: mem_cgroup per zone lru vector
1023 * @lru: index of lru list the page is sitting on
1024 * @nr_pages: positive when adding or negative when removing
1025 *
1026 * This function must be called when a page is added to or removed from an
1027 * lru list.
1028 */
1029void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
1030				int nr_pages)
1031{
1032	struct mem_cgroup_per_zone *mz;
1033	unsigned long *lru_size;
1034
1035	if (mem_cgroup_disabled())
1036		return;
1037
1038	mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
1039	lru_size = mz->lru_size + lru;
1040	*lru_size += nr_pages;
1041	VM_BUG_ON((long)(*lru_size) < 0);
1042}
1043
1044bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1045{
1046	struct mem_cgroup *task_memcg;
1047	struct task_struct *p;
1048	bool ret;
1049
 
 
 
 
 
 
 
 
 
 
 
 
1050	p = find_lock_task_mm(task);
1051	if (p) {
1052		task_memcg = get_mem_cgroup_from_mm(p->mm);
1053		task_unlock(p);
1054	} else {
1055		/*
1056		 * All threads may have already detached their mm's, but the oom
1057		 * killer still needs to detect if they have already been oom
1058		 * killed to prevent needlessly killing additional tasks.
1059		 */
1060		rcu_read_lock();
1061		task_memcg = mem_cgroup_from_task(task);
1062		css_get(&task_memcg->css);
1063		rcu_read_unlock();
 
1064	}
1065	ret = mem_cgroup_is_descendant(task_memcg, memcg);
1066	css_put(&task_memcg->css);
 
 
 
 
 
 
 
 
1067	return ret;
1068}
1069
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1070/**
1071 * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1072 * @memcg: the memory cgroup
1073 *
1074 * Returns the maximum amount of memory @mem can be charged with, in
1075 * pages.
1076 */
1077static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
1078{
1079	unsigned long margin = 0;
1080	unsigned long count;
1081	unsigned long limit;
 
 
 
 
 
 
 
 
1082
1083	count = page_counter_read(&memcg->memory);
1084	limit = READ_ONCE(memcg->memory.limit);
1085	if (count < limit)
1086		margin = limit - count;
1087
1088	if (do_memsw_account()) {
1089		count = page_counter_read(&memcg->memsw);
1090		limit = READ_ONCE(memcg->memsw.limit);
1091		if (count <= limit)
1092			margin = min(margin, limit - count);
1093	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1094
1095	return margin;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1096}
1097
1098/*
1099 * A routine for checking "mem" is under move_account() or not.
1100 *
1101 * Checking a cgroup is mc.from or mc.to or under hierarchy of
1102 * moving cgroups. This is for waiting at high-memory pressure
1103 * caused by "move".
 
 
 
 
1104 */
 
 
 
 
 
 
 
1105static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
1106{
1107	struct mem_cgroup *from;
1108	struct mem_cgroup *to;
1109	bool ret = false;
1110	/*
1111	 * Unlike task_move routines, we access mc.to, mc.from not under
1112	 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
1113	 */
1114	spin_lock(&mc.lock);
1115	from = mc.from;
1116	to = mc.to;
1117	if (!from)
1118		goto unlock;
1119
1120	ret = mem_cgroup_is_descendant(from, memcg) ||
1121		mem_cgroup_is_descendant(to, memcg);
1122unlock:
1123	spin_unlock(&mc.lock);
1124	return ret;
1125}
1126
1127static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
1128{
1129	if (mc.moving_task && current != mc.moving_task) {
1130		if (mem_cgroup_under_move(memcg)) {
1131			DEFINE_WAIT(wait);
1132			prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1133			/* moving charge context might have finished. */
1134			if (mc.moving_task)
1135				schedule();
1136			finish_wait(&mc.waitq, &wait);
1137			return true;
1138		}
1139	}
1140	return false;
1141}
1142
1143#define K(x) ((x) << (PAGE_SHIFT-10))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1144/**
1145 * mem_cgroup_print_oom_info: Print OOM information relevant to memory controller.
1146 * @memcg: The memory cgroup that went over limit
1147 * @p: Task that is going to be killed
1148 *
1149 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1150 * enabled
1151 */
1152void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
1153{
1154	struct mem_cgroup *iter;
1155	unsigned int i;
 
 
 
 
 
 
 
 
 
 
1156
1157	rcu_read_lock();
1158
1159	if (p) {
1160		pr_info("Task in ");
1161		pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
1162		pr_cont(" killed as a result of limit of ");
1163	} else {
1164		pr_info("Memory limit reached of cgroup ");
 
 
 
 
 
1165	}
 
1166
1167	pr_cont_cgroup_path(memcg->css.cgroup);
1168	pr_cont("\n");
1169
 
 
 
 
 
 
1170	rcu_read_unlock();
1171
1172	pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
1173		K((u64)page_counter_read(&memcg->memory)),
1174		K((u64)memcg->memory.limit), memcg->memory.failcnt);
1175	pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
1176		K((u64)page_counter_read(&memcg->memsw)),
1177		K((u64)memcg->memsw.limit), memcg->memsw.failcnt);
1178	pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
1179		K((u64)page_counter_read(&memcg->kmem)),
1180		K((u64)memcg->kmem.limit), memcg->kmem.failcnt);
1181
1182	for_each_mem_cgroup_tree(iter, memcg) {
1183		pr_info("Memory cgroup stats for ");
1184		pr_cont_cgroup_path(iter->css.cgroup);
1185		pr_cont(":");
1186
1187		for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
1188			if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
1189				continue;
1190			pr_cont(" %s:%luKB", mem_cgroup_stat_names[i],
1191				K(mem_cgroup_read_stat(iter, i)));
1192		}
1193
1194		for (i = 0; i < NR_LRU_LISTS; i++)
1195			pr_cont(" %s:%luKB", mem_cgroup_lru_names[i],
1196				K(mem_cgroup_nr_lru_pages(iter, BIT(i))));
1197
1198		pr_cont("\n");
1199	}
 
 
 
 
 
 
 
1200}
1201
1202/*
1203 * This function returns the number of memcg under hierarchy tree. Returns
1204 * 1(self count) if no children.
1205 */
1206static int mem_cgroup_count_children(struct mem_cgroup *memcg)
1207{
1208	int num = 0;
1209	struct mem_cgroup *iter;
1210
1211	for_each_mem_cgroup_tree(iter, memcg)
1212		num++;
1213	return num;
1214}
1215
1216/*
1217 * Return the memory (and swap, if configured) limit for a memcg.
1218 */
1219static unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg)
1220{
1221	unsigned long limit;
 
1222
1223	limit = memcg->memory.limit;
1224	if (mem_cgroup_swappiness(memcg)) {
1225		unsigned long memsw_limit;
1226		unsigned long swap_limit;
1227
1228		memsw_limit = memcg->memsw.limit;
1229		swap_limit = memcg->swap.limit;
1230		swap_limit = min(swap_limit, (unsigned long)total_swap_pages);
1231		limit = min(limit + swap_limit, memsw_limit);
1232	}
1233	return limit;
1234}
1235
1236static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
1237				     int order)
 
1238{
1239	struct oom_control oc = {
1240		.zonelist = NULL,
1241		.nodemask = NULL,
1242		.gfp_mask = gfp_mask,
1243		.order = order,
1244	};
1245	struct mem_cgroup *iter;
1246	unsigned long chosen_points = 0;
1247	unsigned long totalpages;
1248	unsigned int points = 0;
1249	struct task_struct *chosen = NULL;
1250
1251	mutex_lock(&oom_lock);
1252
1253	/*
1254	 * If current has a pending SIGKILL or is exiting, then automatically
1255	 * select it.  The goal is to allow it to allocate so that it may
1256	 * quickly exit and free its memory.
1257	 */
1258	if (fatal_signal_pending(current) || task_will_free_mem(current)) {
1259		mark_oom_victim(current);
1260		goto unlock;
1261	}
1262
1263	check_panic_on_oom(&oc, CONSTRAINT_MEMCG, memcg);
1264	totalpages = mem_cgroup_get_limit(memcg) ? : 1;
1265	for_each_mem_cgroup_tree(iter, memcg) {
1266		struct css_task_iter it;
1267		struct task_struct *task;
1268
1269		css_task_iter_start(&iter->css, &it);
1270		while ((task = css_task_iter_next(&it))) {
1271			switch (oom_scan_process_thread(&oc, task, totalpages)) {
1272			case OOM_SCAN_SELECT:
1273				if (chosen)
1274					put_task_struct(chosen);
1275				chosen = task;
1276				chosen_points = ULONG_MAX;
1277				get_task_struct(chosen);
1278				/* fall through */
1279			case OOM_SCAN_CONTINUE:
1280				continue;
1281			case OOM_SCAN_ABORT:
1282				css_task_iter_end(&it);
1283				mem_cgroup_iter_break(memcg, iter);
1284				if (chosen)
1285					put_task_struct(chosen);
1286				goto unlock;
1287			case OOM_SCAN_OK:
1288				break;
1289			};
1290			points = oom_badness(task, memcg, NULL, totalpages);
1291			if (!points || points < chosen_points)
1292				continue;
1293			/* Prefer thread group leaders for display purposes */
1294			if (points == chosen_points &&
1295			    thread_group_leader(chosen))
1296				continue;
1297
1298			if (chosen)
1299				put_task_struct(chosen);
1300			chosen = task;
1301			chosen_points = points;
1302			get_task_struct(chosen);
1303		}
1304		css_task_iter_end(&it);
1305	}
1306
1307	if (chosen) {
1308		points = chosen_points * 1000 / totalpages;
1309		oom_kill_process(&oc, chosen, points, totalpages, memcg,
1310				 "Memory cgroup out of memory");
1311	}
1312unlock:
1313	mutex_unlock(&oom_lock);
1314	return chosen;
1315}
1316
1317#if MAX_NUMNODES > 1
1318
1319/**
1320 * test_mem_cgroup_node_reclaimable
1321 * @memcg: the target memcg
1322 * @nid: the node ID to be checked.
1323 * @noswap : specify true here if the user wants flle only information.
1324 *
1325 * This function returns whether the specified memcg contains any
1326 * reclaimable pages on a node. Returns true if there are any reclaimable
1327 * pages in the node.
1328 */
1329static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg,
1330		int nid, bool noswap)
1331{
1332	if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_FILE))
1333		return true;
1334	if (noswap || !total_swap_pages)
1335		return false;
1336	if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_ANON))
1337		return true;
1338	return false;
1339
1340}
 
1341
1342/*
1343 * Always updating the nodemask is not very good - even if we have an empty
1344 * list or the wrong list here, we can start from some node and traverse all
1345 * nodes based on the zonelist. So update the list loosely once per 10 secs.
1346 *
1347 */
1348static void mem_cgroup_may_update_nodemask(struct mem_cgroup *memcg)
1349{
1350	int nid;
1351	/*
1352	 * numainfo_events > 0 means there was at least NUMAINFO_EVENTS_TARGET
1353	 * pagein/pageout changes since the last update.
1354	 */
1355	if (!atomic_read(&memcg->numainfo_events))
1356		return;
1357	if (atomic_inc_return(&memcg->numainfo_updating) > 1)
1358		return;
1359
1360	/* make a nodemask where this memcg uses memory from */
1361	memcg->scan_nodes = node_states[N_MEMORY];
1362
1363	for_each_node_mask(nid, node_states[N_MEMORY]) {
1364
1365		if (!test_mem_cgroup_node_reclaimable(memcg, nid, false))
1366			node_clear(nid, memcg->scan_nodes);
1367	}
1368
1369	atomic_set(&memcg->numainfo_events, 0);
1370	atomic_set(&memcg->numainfo_updating, 0);
1371}
1372
1373/*
1374 * Selecting a node where we start reclaim from. Because what we need is just
1375 * reducing usage counter, start from anywhere is O,K. Considering
1376 * memory reclaim from current node, there are pros. and cons.
1377 *
1378 * Freeing memory from current node means freeing memory from a node which
1379 * we'll use or we've used. So, it may make LRU bad. And if several threads
1380 * hit limits, it will see a contention on a node. But freeing from remote
1381 * node means more costs for memory reclaim because of memory latency.
1382 *
1383 * Now, we use round-robin. Better algorithm is welcomed.
1384 */
1385int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
1386{
1387	int node;
1388
1389	mem_cgroup_may_update_nodemask(memcg);
1390	node = memcg->last_scanned_node;
1391
1392	node = next_node(node, memcg->scan_nodes);
1393	if (node == MAX_NUMNODES)
1394		node = first_node(memcg->scan_nodes);
1395	/*
1396	 * We call this when we hit limit, not when pages are added to LRU.
1397	 * No LRU may hold pages because all pages are UNEVICTABLE or
1398	 * memcg is too small and all pages are not on LRU. In that case,
1399	 * we use curret node.
1400	 */
1401	if (unlikely(node == MAX_NUMNODES))
1402		node = numa_node_id();
1403
1404	memcg->last_scanned_node = node;
1405	return node;
1406}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1407#else
1408int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
1409{
1410	return 0;
1411}
 
 
 
 
 
1412#endif
1413
1414static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
1415				   struct zone *zone,
1416				   gfp_t gfp_mask,
1417				   unsigned long *total_scanned)
1418{
1419	struct mem_cgroup *victim = NULL;
1420	int total = 0;
1421	int loop = 0;
1422	unsigned long excess;
1423	unsigned long nr_scanned;
1424	struct mem_cgroup_reclaim_cookie reclaim = {
1425		.zone = zone,
1426		.priority = 0,
1427	};
1428
1429	excess = soft_limit_excess(root_memcg);
1430
1431	while (1) {
1432		victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
1433		if (!victim) {
1434			loop++;
1435			if (loop >= 2) {
1436				/*
1437				 * If we have not been able to reclaim
1438				 * anything, it might because there are
1439				 * no reclaimable pages under this hierarchy
1440				 */
1441				if (!total)
1442					break;
1443				/*
1444				 * We want to do more targeted reclaim.
1445				 * excess >> 2 is not to excessive so as to
1446				 * reclaim too much, nor too less that we keep
1447				 * coming back to reclaim from this cgroup
1448				 */
1449				if (total >= (excess >> 2) ||
1450					(loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
1451					break;
1452			}
1453			continue;
1454		}
 
 
1455		total += mem_cgroup_shrink_node_zone(victim, gfp_mask, false,
1456						     zone, &nr_scanned);
1457		*total_scanned += nr_scanned;
1458		if (!soft_limit_excess(root_memcg))
1459			break;
1460	}
1461	mem_cgroup_iter_break(root_memcg, victim);
1462	return total;
1463}
1464
1465#ifdef CONFIG_LOCKDEP
1466static struct lockdep_map memcg_oom_lock_dep_map = {
1467	.name = "memcg_oom_lock",
1468};
1469#endif
1470
1471static DEFINE_SPINLOCK(memcg_oom_lock);
1472
1473/*
1474 * Check OOM-Killer is already running under our hierarchy.
1475 * If someone is running, return false.
 
1476 */
1477static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
1478{
1479	struct mem_cgroup *iter, *failed = NULL;
1480
1481	spin_lock(&memcg_oom_lock);
1482
1483	for_each_mem_cgroup_tree(iter, memcg) {
1484		if (iter->oom_lock) {
1485			/*
1486			 * this subtree of our hierarchy is already locked
1487			 * so we cannot give a lock.
1488			 */
1489			failed = iter;
1490			mem_cgroup_iter_break(memcg, iter);
1491			break;
1492		} else
1493			iter->oom_lock = true;
1494	}
1495
1496	if (failed) {
1497		/*
1498		 * OK, we failed to lock the whole subtree so we have
1499		 * to clean up what we set up to the failing subtree
1500		 */
1501		for_each_mem_cgroup_tree(iter, memcg) {
1502			if (iter == failed) {
1503				mem_cgroup_iter_break(memcg, iter);
1504				break;
1505			}
1506			iter->oom_lock = false;
1507		}
1508	} else
1509		mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_);
1510
1511	spin_unlock(&memcg_oom_lock);
1512
1513	return !failed;
 
 
 
 
 
 
 
 
 
 
 
1514}
1515
1516static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
 
 
 
1517{
1518	struct mem_cgroup *iter;
1519
1520	spin_lock(&memcg_oom_lock);
1521	mutex_release(&memcg_oom_lock_dep_map, 1, _RET_IP_);
1522	for_each_mem_cgroup_tree(iter, memcg)
1523		iter->oom_lock = false;
1524	spin_unlock(&memcg_oom_lock);
1525}
1526
1527static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
1528{
1529	struct mem_cgroup *iter;
1530
1531	spin_lock(&memcg_oom_lock);
1532	for_each_mem_cgroup_tree(iter, memcg)
1533		iter->under_oom++;
1534	spin_unlock(&memcg_oom_lock);
1535}
1536
1537static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
1538{
1539	struct mem_cgroup *iter;
1540
1541	/*
1542	 * When a new child is created while the hierarchy is under oom,
1543	 * mem_cgroup_oom_lock() may not be called. Watch for underflow.
 
1544	 */
1545	spin_lock(&memcg_oom_lock);
1546	for_each_mem_cgroup_tree(iter, memcg)
1547		if (iter->under_oom > 0)
1548			iter->under_oom--;
1549	spin_unlock(&memcg_oom_lock);
1550}
1551
 
1552static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
1553
1554struct oom_wait_info {
1555	struct mem_cgroup *memcg;
1556	wait_queue_t	wait;
1557};
1558
1559static int memcg_oom_wake_function(wait_queue_t *wait,
1560	unsigned mode, int sync, void *arg)
1561{
1562	struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
1563	struct mem_cgroup *oom_wait_memcg;
1564	struct oom_wait_info *oom_wait_info;
1565
1566	oom_wait_info = container_of(wait, struct oom_wait_info, wait);
1567	oom_wait_memcg = oom_wait_info->memcg;
1568
1569	if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) &&
1570	    !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg))
 
 
 
 
1571		return 0;
1572	return autoremove_wake_function(wait, mode, sync, arg);
1573}
1574
1575static void memcg_oom_recover(struct mem_cgroup *memcg)
1576{
1577	/*
1578	 * For the following lockless ->under_oom test, the only required
1579	 * guarantee is that it must see the state asserted by an OOM when
1580	 * this function is called as a result of userland actions
1581	 * triggered by the notification of the OOM.  This is trivially
1582	 * achieved by invoking mem_cgroup_mark_under_oom() before
1583	 * triggering notification.
1584	 */
1585	if (memcg && memcg->under_oom)
1586		__wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
1587}
1588
1589static void mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
1590{
1591	if (!current->memcg_may_oom)
1592		return;
1593	/*
1594	 * We are in the middle of the charge context here, so we
1595	 * don't want to block when potentially sitting on a callstack
1596	 * that holds all kinds of filesystem and mm locks.
1597	 *
1598	 * Also, the caller may handle a failed allocation gracefully
1599	 * (like optional page cache readahead) and so an OOM killer
1600	 * invocation might not even be necessary.
1601	 *
1602	 * That's why we don't do anything here except remember the
1603	 * OOM context and then deal with it at the end of the page
1604	 * fault when the stack is unwound, the locks are released,
1605	 * and when we know whether the fault was overall successful.
1606	 */
1607	css_get(&memcg->css);
1608	current->memcg_in_oom = memcg;
1609	current->memcg_oom_gfp_mask = mask;
1610	current->memcg_oom_order = order;
1611}
1612
1613/**
1614 * mem_cgroup_oom_synchronize - complete memcg OOM handling
1615 * @handle: actually kill/wait or just clean up the OOM state
1616 *
1617 * This has to be called at the end of a page fault if the memcg OOM
1618 * handler was enabled.
1619 *
1620 * Memcg supports userspace OOM handling where failed allocations must
1621 * sleep on a waitqueue until the userspace task resolves the
1622 * situation.  Sleeping directly in the charge context with all kinds
1623 * of locks held is not a good idea, instead we remember an OOM state
1624 * in the task and mem_cgroup_oom_synchronize() has to be called at
1625 * the end of the page fault to complete the OOM handling.
1626 *
1627 * Returns %true if an ongoing memcg OOM situation was detected and
1628 * completed, %false otherwise.
1629 */
1630bool mem_cgroup_oom_synchronize(bool handle)
 
1631{
1632	struct mem_cgroup *memcg = current->memcg_in_oom;
1633	struct oom_wait_info owait;
1634	bool locked;
1635
1636	/* OOM is global, do not handle */
1637	if (!memcg)
1638		return false;
1639
1640	if (!handle || oom_killer_disabled)
1641		goto cleanup;
1642
1643	owait.memcg = memcg;
1644	owait.wait.flags = 0;
1645	owait.wait.func = memcg_oom_wake_function;
1646	owait.wait.private = current;
1647	INIT_LIST_HEAD(&owait.wait.task_list);
1648
1649	prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
1650	mem_cgroup_mark_under_oom(memcg);
1651
1652	locked = mem_cgroup_oom_trylock(memcg);
1653
 
 
 
 
 
 
 
 
 
1654	if (locked)
1655		mem_cgroup_oom_notify(memcg);
 
1656
1657	if (locked && !memcg->oom_kill_disable) {
1658		mem_cgroup_unmark_under_oom(memcg);
1659		finish_wait(&memcg_oom_waitq, &owait.wait);
1660		mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask,
1661					 current->memcg_oom_order);
1662	} else {
1663		schedule();
1664		mem_cgroup_unmark_under_oom(memcg);
1665		finish_wait(&memcg_oom_waitq, &owait.wait);
1666	}
1667
1668	if (locked) {
1669		mem_cgroup_oom_unlock(memcg);
1670		/*
1671		 * There is no guarantee that an OOM-lock contender
1672		 * sees the wakeups triggered by the OOM kill
1673		 * uncharges.  Wake any sleepers explicitely.
1674		 */
1675		memcg_oom_recover(memcg);
1676	}
1677cleanup:
1678	current->memcg_in_oom = NULL;
1679	css_put(&memcg->css);
1680	return true;
1681}
1682
1683/**
1684 * lock_page_memcg - lock a page->mem_cgroup binding
1685 * @page: the page
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1686 *
1687 * This function protects unlocked LRU pages from being moved to
1688 * another cgroup and stabilizes their page->mem_cgroup binding.
 
1689 */
1690void lock_page_memcg(struct page *page)
 
 
1691{
1692	struct mem_cgroup *memcg;
1693	unsigned long flags;
1694
1695	/*
1696	 * The RCU lock is held throughout the transaction.  The fast
1697	 * path can get away without acquiring the memcg->move_lock
1698	 * because page moving starts with an RCU grace period.
1699	 */
1700	rcu_read_lock();
1701
1702	if (mem_cgroup_disabled())
1703		return;
1704again:
1705	memcg = page->mem_cgroup;
1706	if (unlikely(!memcg))
1707		return;
1708
1709	if (atomic_read(&memcg->moving_account) <= 0)
 
 
 
 
 
1710		return;
1711
1712	spin_lock_irqsave(&memcg->move_lock, flags);
1713	if (memcg != page->mem_cgroup) {
1714		spin_unlock_irqrestore(&memcg->move_lock, flags);
1715		goto again;
1716	}
 
 
 
 
 
 
1717
1718	/*
1719	 * When charge migration first begins, we can have locked and
1720	 * unlocked page stat updates happening concurrently.  Track
1721	 * the task who has the lock for unlock_page_memcg().
1722	 */
1723	memcg->move_lock_task = current;
1724	memcg->move_lock_flags = flags;
1725
1726	return;
1727}
1728EXPORT_SYMBOL(lock_page_memcg);
1729
1730/**
1731 * unlock_page_memcg - unlock a page->mem_cgroup binding
1732 * @page: the page
1733 */
1734void unlock_page_memcg(struct page *page)
1735{
1736	struct mem_cgroup *memcg = page->mem_cgroup;
 
 
1737
1738	if (memcg && memcg->move_lock_task == current) {
1739		unsigned long flags = memcg->move_lock_flags;
1740
1741		memcg->move_lock_task = NULL;
1742		memcg->move_lock_flags = 0;
 
1743
1744		spin_unlock_irqrestore(&memcg->move_lock, flags);
 
 
 
 
 
1745	}
1746
1747	rcu_read_unlock();
1748}
1749EXPORT_SYMBOL(unlock_page_memcg);
1750
1751/*
1752 * size of first charge trial. "32" comes from vmscan.c's magic value.
1753 * TODO: maybe necessary to use big numbers in big irons.
1754 */
1755#define CHARGE_BATCH	32U
1756struct memcg_stock_pcp {
1757	struct mem_cgroup *cached; /* this never be root cgroup */
1758	unsigned int nr_pages;
1759	struct work_struct work;
1760	unsigned long flags;
1761#define FLUSHING_CACHED_CHARGE	0
1762};
1763static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
1764static DEFINE_MUTEX(percpu_charge_mutex);
1765
1766/**
1767 * consume_stock: Try to consume stocked charge on this cpu.
1768 * @memcg: memcg to consume from.
1769 * @nr_pages: how many pages to charge.
1770 *
1771 * The charges will only happen if @memcg matches the current cpu's memcg
1772 * stock, and at least @nr_pages are available in that stock.  Failure to
1773 * service an allocation will refill the stock.
1774 *
1775 * returns true if successful, false otherwise.
1776 */
1777static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
1778{
1779	struct memcg_stock_pcp *stock;
1780	bool ret = false;
1781
1782	if (nr_pages > CHARGE_BATCH)
1783		return ret;
1784
1785	stock = &get_cpu_var(memcg_stock);
1786	if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
1787		stock->nr_pages -= nr_pages;
1788		ret = true;
1789	}
1790	put_cpu_var(memcg_stock);
1791	return ret;
1792}
1793
1794/*
1795 * Returns stocks cached in percpu and reset cached information.
1796 */
1797static void drain_stock(struct memcg_stock_pcp *stock)
1798{
1799	struct mem_cgroup *old = stock->cached;
1800
1801	if (stock->nr_pages) {
1802		page_counter_uncharge(&old->memory, stock->nr_pages);
1803		if (do_memsw_account())
1804			page_counter_uncharge(&old->memsw, stock->nr_pages);
1805		css_put_many(&old->css, stock->nr_pages);
 
1806		stock->nr_pages = 0;
1807	}
1808	stock->cached = NULL;
1809}
1810
1811/*
1812 * This must be called under preempt disabled or must be called by
1813 * a thread which is pinned to local cpu.
1814 */
1815static void drain_local_stock(struct work_struct *dummy)
1816{
1817	struct memcg_stock_pcp *stock = this_cpu_ptr(&memcg_stock);
1818	drain_stock(stock);
1819	clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
1820}
1821
1822/*
1823 * Cache charges(val) to local per_cpu area.
1824 * This will be consumed by consume_stock() function, later.
1825 */
1826static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
1827{
1828	struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock);
1829
1830	if (stock->cached != memcg) { /* reset if necessary */
1831		drain_stock(stock);
1832		stock->cached = memcg;
1833	}
1834	stock->nr_pages += nr_pages;
1835	put_cpu_var(memcg_stock);
1836}
1837
1838/*
1839 * Drains all per-CPU charge caches for given root_memcg resp. subtree
1840 * of the hierarchy under it.
 
1841 */
1842static void drain_all_stock(struct mem_cgroup *root_memcg)
1843{
1844	int cpu, curcpu;
1845
1846	/* If someone's already draining, avoid adding running more workers. */
1847	if (!mutex_trylock(&percpu_charge_mutex))
1848		return;
1849	/* Notify other cpus that system-wide "drain" is running */
1850	get_online_cpus();
1851	curcpu = get_cpu();
1852	for_each_online_cpu(cpu) {
1853		struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
1854		struct mem_cgroup *memcg;
1855
1856		memcg = stock->cached;
1857		if (!memcg || !stock->nr_pages)
1858			continue;
1859		if (!mem_cgroup_is_descendant(memcg, root_memcg))
1860			continue;
1861		if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
1862			if (cpu == curcpu)
1863				drain_local_stock(&stock->work);
1864			else
1865				schedule_work_on(cpu, &stock->work);
1866		}
1867	}
1868	put_cpu();
1869	put_online_cpus();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1870	mutex_unlock(&percpu_charge_mutex);
1871}
1872
1873static int memcg_cpu_hotplug_callback(struct notifier_block *nb,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1874					unsigned long action,
1875					void *hcpu)
1876{
1877	int cpu = (unsigned long)hcpu;
1878	struct memcg_stock_pcp *stock;
 
1879
1880	if (action == CPU_ONLINE)
1881		return NOTIFY_OK;
1882
1883	if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
1884		return NOTIFY_OK;
1885
 
 
 
1886	stock = &per_cpu(memcg_stock, cpu);
1887	drain_stock(stock);
1888	return NOTIFY_OK;
1889}
1890
1891static void reclaim_high(struct mem_cgroup *memcg,
1892			 unsigned int nr_pages,
1893			 gfp_t gfp_mask)
1894{
1895	do {
1896		if (page_counter_read(&memcg->memory) <= memcg->high)
1897			continue;
1898		mem_cgroup_events(memcg, MEMCG_HIGH, 1);
1899		try_to_free_mem_cgroup_pages(memcg, nr_pages, gfp_mask, true);
1900	} while ((memcg = parent_mem_cgroup(memcg)));
1901}
1902
1903static void high_work_func(struct work_struct *work)
1904{
1905	struct mem_cgroup *memcg;
1906
1907	memcg = container_of(work, struct mem_cgroup, high_work);
1908	reclaim_high(memcg, CHARGE_BATCH, GFP_KERNEL);
1909}
1910
1911/*
1912 * Scheduled by try_charge() to be executed from the userland return path
1913 * and reclaims memory over the high limit.
1914 */
1915void mem_cgroup_handle_over_high(void)
1916{
1917	unsigned int nr_pages = current->memcg_nr_pages_over_high;
1918	struct mem_cgroup *memcg;
1919
1920	if (likely(!nr_pages))
1921		return;
1922
1923	memcg = get_mem_cgroup_from_mm(current->mm);
1924	reclaim_high(memcg, nr_pages, GFP_KERNEL);
1925	css_put(&memcg->css);
1926	current->memcg_nr_pages_over_high = 0;
1927}
 
 
 
1928
1929static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
1930		      unsigned int nr_pages)
1931{
1932	unsigned int batch = max(CHARGE_BATCH, nr_pages);
1933	int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
1934	struct mem_cgroup *mem_over_limit;
1935	struct page_counter *counter;
1936	unsigned long nr_reclaimed;
1937	bool may_swap = true;
1938	bool drained = false;
1939
1940	if (mem_cgroup_is_root(memcg))
1941		return 0;
1942retry:
1943	if (consume_stock(memcg, nr_pages))
1944		return 0;
1945
1946	if (!do_memsw_account() ||
1947	    page_counter_try_charge(&memcg->memsw, batch, &counter)) {
1948		if (page_counter_try_charge(&memcg->memory, batch, &counter))
1949			goto done_restock;
1950		if (do_memsw_account())
1951			page_counter_uncharge(&memcg->memsw, batch);
1952		mem_over_limit = mem_cgroup_from_counter(counter, memory);
1953	} else {
1954		mem_over_limit = mem_cgroup_from_counter(counter, memsw);
1955		may_swap = false;
1956	}
1957
1958	if (batch > nr_pages) {
1959		batch = nr_pages;
1960		goto retry;
1961	}
1962
 
 
 
 
 
 
 
 
 
 
 
 
1963	/*
1964	 * Unlike in global OOM situations, memcg is not in a physical
1965	 * memory shortage.  Allow dying and OOM-killed tasks to
1966	 * bypass the last charges so that they can exit quickly and
1967	 * free their memory.
1968	 */
1969	if (unlikely(test_thread_flag(TIF_MEMDIE) ||
1970		     fatal_signal_pending(current) ||
1971		     current->flags & PF_EXITING))
1972		goto force;
1973
1974	if (unlikely(task_in_memcg_oom(current)))
1975		goto nomem;
1976
1977	if (!gfpflags_allow_blocking(gfp_mask))
1978		goto nomem;
1979
1980	mem_cgroup_events(mem_over_limit, MEMCG_MAX, 1);
1981
1982	nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
1983						    gfp_mask, may_swap);
1984
 
1985	if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
1986		goto retry;
1987
1988	if (!drained) {
1989		drain_all_stock(mem_over_limit);
1990		drained = true;
1991		goto retry;
1992	}
1993
1994	if (gfp_mask & __GFP_NORETRY)
1995		goto nomem;
1996	/*
1997	 * Even though the limit is exceeded at this point, reclaim
1998	 * may have been able to free some pages.  Retry the charge
1999	 * before killing the task.
2000	 *
2001	 * Only for regular pages, though: huge pages are rather
2002	 * unlikely to succeed so close to the limit, and we fall back
2003	 * to regular pages anyway in case of failure.
2004	 */
2005	if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))
2006		goto retry;
 
2007	/*
2008	 * At task move, charge accounts can be doubly counted. So, it's
2009	 * better to wait until the end of task_move if something is going on.
2010	 */
2011	if (mem_cgroup_wait_acct_move(mem_over_limit))
2012		goto retry;
2013
2014	if (nr_retries--)
2015		goto retry;
2016
2017	if (gfp_mask & __GFP_NOFAIL)
2018		goto force;
 
 
 
 
2019
2020	if (fatal_signal_pending(current))
2021		goto force;
2022
2023	mem_cgroup_events(mem_over_limit, MEMCG_OOM, 1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2024
2025	mem_cgroup_oom(mem_over_limit, gfp_mask,
2026		       get_order(nr_pages * PAGE_SIZE));
2027nomem:
2028	if (!(gfp_mask & __GFP_NOFAIL))
2029		return -ENOMEM;
2030force:
2031	/*
2032	 * The allocation either can't fail or will lead to more memory
2033	 * being freed very soon.  Allow memory usage go over the limit
2034	 * temporarily by force charging it.
2035	 */
2036	page_counter_charge(&memcg->memory, nr_pages);
2037	if (do_memsw_account())
2038		page_counter_charge(&memcg->memsw, nr_pages);
2039	css_get_many(&memcg->css, nr_pages);
2040
2041	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2042
2043done_restock:
2044	css_get_many(&memcg->css, batch);
2045	if (batch > nr_pages)
2046		refill_stock(memcg, batch - nr_pages);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2047
2048	/*
2049	 * If the hierarchy is above the normal consumption range, schedule
2050	 * reclaim on returning to userland.  We can perform reclaim here
2051	 * if __GFP_RECLAIM but let's always punt for simplicity and so that
2052	 * GFP_KERNEL can consistently be used during reclaim.  @memcg is
2053	 * not recorded as it most likely matches current's and won't
2054	 * change in the meantime.  As high limit is checked again before
2055	 * reclaim, the cost of mismatch is negligible.
2056	 */
2057	do {
2058		if (page_counter_read(&memcg->memory) > memcg->high) {
2059			/* Don't bother a random interrupted task */
2060			if (in_interrupt()) {
2061				schedule_work(&memcg->high_work);
2062				break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2063			}
2064			current->memcg_nr_pages_over_high += batch;
2065			set_notify_resume(current);
2066			break;
 
 
 
2067		}
2068	} while ((memcg = parent_mem_cgroup(memcg)));
2069
 
 
 
 
 
2070	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2071}
2072
2073static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
 
 
 
 
 
2074{
 
 
2075	if (mem_cgroup_is_root(memcg))
2076		return;
2077
2078	page_counter_uncharge(&memcg->memory, nr_pages);
2079	if (do_memsw_account())
2080		page_counter_uncharge(&memcg->memsw, nr_pages);
2081
2082	css_put_many(&memcg->css, nr_pages);
2083}
2084
2085static void lock_page_lru(struct page *page, int *isolated)
 
 
 
 
 
 
2086{
2087	struct zone *zone = page_zone(page);
2088
2089	spin_lock_irq(&zone->lru_lock);
2090	if (PageLRU(page)) {
2091		struct lruvec *lruvec;
2092
2093		lruvec = mem_cgroup_page_lruvec(page, zone);
2094		ClearPageLRU(page);
2095		del_page_from_lru_list(page, lruvec, page_lru(page));
2096		*isolated = 1;
2097	} else
2098		*isolated = 0;
 
2099}
2100
2101static void unlock_page_lru(struct page *page, int isolated)
2102{
2103	struct zone *zone = page_zone(page);
 
 
 
2104
2105	if (isolated) {
2106		struct lruvec *lruvec;
2107
2108		lruvec = mem_cgroup_page_lruvec(page, zone);
2109		VM_BUG_ON_PAGE(PageLRU(page), page);
2110		SetPageLRU(page);
2111		add_page_to_lru_list(page, lruvec, page_lru(page));
 
 
 
 
 
 
 
 
 
 
2112	}
2113	spin_unlock_irq(&zone->lru_lock);
 
2114}
2115
2116static void commit_charge(struct page *page, struct mem_cgroup *memcg,
2117			  bool lrucare)
 
 
 
2118{
2119	int isolated;
 
 
 
 
2120
2121	VM_BUG_ON_PAGE(page->mem_cgroup, page);
 
 
 
 
 
 
 
 
 
2122
2123	/*
2124	 * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page
2125	 * may already be on some other mem_cgroup's LRU.  Take care of it.
2126	 */
2127	if (lrucare)
2128		lock_page_lru(page, &isolated);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2129
2130	/*
2131	 * Nobody should be changing or seriously looking at
2132	 * page->mem_cgroup at this point:
2133	 *
2134	 * - the page is uncharged
2135	 *
2136	 * - the page is off-LRU
2137	 *
2138	 * - an anonymous fault has exclusive page access, except for
2139	 *   a locked page table
2140	 *
2141	 * - a page cache insertion, a swapin fault, or a migration
2142	 *   have the page locked
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2143	 */
2144	page->mem_cgroup = memcg;
 
 
 
 
 
 
 
 
2145
2146	if (lrucare)
2147		unlock_page_lru(page, isolated);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2148}
2149
2150#ifndef CONFIG_SLOB
2151static int memcg_alloc_cache_id(void)
 
 
 
 
 
 
2152{
2153	int id, size;
2154	int err;
 
 
2155
2156	id = ida_simple_get(&memcg_cache_ida,
2157			    0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL);
2158	if (id < 0)
2159		return id;
2160
2161	if (id < memcg_nr_cache_ids)
2162		return id;
 
 
 
2163
 
 
 
2164	/*
2165	 * There's no space for the new id in memcg_caches arrays,
2166	 * so we have to grow them.
2167	 */
2168	down_write(&memcg_cache_ids_sem);
 
2169
2170	size = 2 * (id + 1);
2171	if (size < MEMCG_CACHES_MIN_SIZE)
2172		size = MEMCG_CACHES_MIN_SIZE;
2173	else if (size > MEMCG_CACHES_MAX_SIZE)
2174		size = MEMCG_CACHES_MAX_SIZE;
2175
2176	err = memcg_update_all_caches(size);
2177	if (!err)
2178		err = memcg_update_all_list_lrus(size);
2179	if (!err)
2180		memcg_nr_cache_ids = size;
 
 
 
 
 
 
 
 
2181
2182	up_write(&memcg_cache_ids_sem);
 
 
 
 
 
 
 
 
 
 
 
 
2183
2184	if (err) {
2185		ida_simple_remove(&memcg_cache_ida, id);
2186		return err;
 
 
 
 
 
2187	}
2188	return id;
 
 
 
 
 
2189}
2190
2191static void memcg_free_cache_id(int id)
 
2192{
2193	ida_simple_remove(&memcg_cache_ida, id);
 
 
 
 
 
 
2194}
2195
2196struct memcg_kmem_cache_create_work {
2197	struct mem_cgroup *memcg;
2198	struct kmem_cache *cachep;
2199	struct work_struct work;
2200};
2201
2202static void memcg_kmem_cache_create_func(struct work_struct *w)
 
2203{
2204	struct memcg_kmem_cache_create_work *cw =
2205		container_of(w, struct memcg_kmem_cache_create_work, work);
2206	struct mem_cgroup *memcg = cw->memcg;
2207	struct kmem_cache *cachep = cw->cachep;
2208
2209	memcg_create_kmem_cache(memcg, cachep);
 
 
 
2210
2211	css_put(&memcg->css);
2212	kfree(cw);
 
 
 
 
 
 
 
 
 
 
 
2213}
2214
2215/*
2216 * Enqueue the creation of a per-memcg kmem_cache.
2217 */
2218static void __memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
2219					       struct kmem_cache *cachep)
 
 
 
 
2220{
2221	struct memcg_kmem_cache_create_work *cw;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2222
2223	cw = kmalloc(sizeof(*cw), GFP_NOWAIT);
2224	if (!cw)
 
 
 
 
 
2225		return;
 
2226
2227	css_get(&memcg->css);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2228
2229	cw->memcg = memcg;
2230	cw->cachep = cachep;
2231	INIT_WORK(&cw->work, memcg_kmem_cache_create_func);
 
 
 
2232
2233	schedule_work(&cw->work);
 
 
 
 
 
 
2234}
2235
2236static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
2237					     struct kmem_cache *cachep)
 
2238{
 
 
 
 
 
 
 
 
2239	/*
2240	 * We need to stop accounting when we kmalloc, because if the
2241	 * corresponding kmalloc cache is not yet created, the first allocation
2242	 * in __memcg_schedule_kmem_cache_create will recurse.
2243	 *
2244	 * However, it is better to enclose the whole function. Depending on
2245	 * the debugging options enabled, INIT_WORK(), for instance, can
2246	 * trigger an allocation. This too, will make us recurse. Because at
2247	 * this point we can't allow ourselves back into memcg_kmem_get_cache,
2248	 * the safest choice is to do it like this, wrapping the whole function.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2249	 */
2250	current->memcg_kmem_skip_account = 1;
2251	__memcg_schedule_kmem_cache_create(memcg, cachep);
2252	current->memcg_kmem_skip_account = 0;
 
 
 
 
 
 
 
 
 
 
2253}
2254
2255/*
2256 * Return the kmem_cache we're supposed to use for a slab allocation.
2257 * We try to use the current memcg's version of the cache.
2258 *
2259 * If the cache does not exist yet, if we are the first user of it,
2260 * we either create it immediately, if possible, or create it asynchronously
2261 * in a workqueue.
2262 * In the latter case, we will let the current allocation go through with
2263 * the original cache.
2264 *
2265 * Can't be called in interrupt context or from kernel threads.
2266 * This function needs to be called with rcu_read_lock() held.
2267 */
2268struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
 
2269{
2270	struct mem_cgroup *memcg;
2271	struct kmem_cache *memcg_cachep;
2272	int kmemcg_id;
 
2273
2274	VM_BUG_ON(!is_root_cache(cachep));
 
2275
2276	if (cachep->flags & SLAB_ACCOUNT)
2277		gfp |= __GFP_ACCOUNT;
2278
2279	if (!(gfp & __GFP_ACCOUNT))
2280		return cachep;
 
 
 
 
 
 
 
 
2281
2282	if (current->memcg_kmem_skip_account)
2283		return cachep;
2284
2285	memcg = get_mem_cgroup_from_mm(current->mm);
2286	kmemcg_id = READ_ONCE(memcg->kmemcg_id);
2287	if (kmemcg_id < 0)
2288		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2289
2290	memcg_cachep = cache_from_memcg_idx(cachep, kmemcg_id);
2291	if (likely(memcg_cachep))
2292		return memcg_cachep;
2293
 
2294	/*
2295	 * If we are in a safe context (can wait, and not in interrupt
2296	 * context), we could be be predictable and return right away.
2297	 * This would guarantee that the allocation being performed
2298	 * already belongs in the new cache.
2299	 *
2300	 * However, there are some clashes that can arrive from locking.
2301	 * For instance, because we acquire the slab_mutex while doing
2302	 * memcg_create_kmem_cache, this means no further allocation
2303	 * could happen with the slab_mutex held. So it's better to
2304	 * defer everything.
2305	 */
2306	memcg_schedule_kmem_cache_create(memcg, cachep);
2307out:
2308	css_put(&memcg->css);
2309	return cachep;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2310}
2311
2312void __memcg_kmem_put_cache(struct kmem_cache *cachep)
2313{
2314	if (!is_root_cache(cachep))
2315		css_put(&cachep->memcg_params.memcg->css);
 
 
 
2316}
2317
2318int __memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
2319			      struct mem_cgroup *memcg)
2320{
2321	unsigned int nr_pages = 1 << order;
2322	struct page_counter *counter;
2323	int ret;
 
2324
2325	ret = try_charge(memcg, gfp, nr_pages);
2326	if (ret)
2327		return ret;
 
 
 
 
2328
2329	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) &&
2330	    !page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) {
2331		cancel_charge(memcg, nr_pages);
2332		return -ENOMEM;
 
 
 
 
2333	}
2334
2335	page->mem_cgroup = memcg;
2336
2337	return 0;
2338}
2339
2340int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
2341{
2342	struct mem_cgroup *memcg;
2343	int ret = 0;
2344
2345	memcg = get_mem_cgroup_from_mm(current->mm);
2346	if (!mem_cgroup_is_root(memcg))
2347		ret = __memcg_kmem_charge_memcg(page, gfp, order, memcg);
2348	css_put(&memcg->css);
2349	return ret;
2350}
2351
2352void __memcg_kmem_uncharge(struct page *page, int order)
2353{
2354	struct mem_cgroup *memcg = page->mem_cgroup;
2355	unsigned int nr_pages = 1 << order;
2356
2357	if (!memcg)
2358		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2359
2360	VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page);
 
 
 
 
 
 
 
 
 
2361
2362	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
2363		page_counter_uncharge(&memcg->kmem, nr_pages);
2364
2365	page_counter_uncharge(&memcg->memory, nr_pages);
2366	if (do_memsw_account())
2367		page_counter_uncharge(&memcg->memsw, nr_pages);
2368
2369	page->mem_cgroup = NULL;
2370	css_put_many(&memcg->css, nr_pages);
 
 
 
 
2371}
2372#endif /* !CONFIG_SLOB */
2373
2374#ifdef CONFIG_TRANSPARENT_HUGEPAGE
2375
 
2376/*
2377 * Because tail pages are not marked as "used", set it. We're under
2378 * zone->lru_lock and migration entries setup in all page mappings.
2379 */
2380void mem_cgroup_split_huge_fixup(struct page *head)
2381{
2382	int i;
 
2383
2384	if (mem_cgroup_disabled())
2385		return;
2386
2387	for (i = 1; i < HPAGE_PMD_NR; i++)
2388		head[i].mem_cgroup = head->mem_cgroup;
2389
2390	__this_cpu_sub(head->mem_cgroup->stat->count[MEM_CGROUP_STAT_RSS_HUGE],
2391		       HPAGE_PMD_NR);
2392}
2393#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
2394
2395#ifdef CONFIG_MEMCG_SWAP
2396static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg,
2397					 bool charge)
2398{
2399	int val = (charge) ? 1 : -1;
2400	this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_SWAP], val);
2401}
2402
2403/**
2404 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
2405 * @entry: swap entry to be moved
2406 * @from:  mem_cgroup which the entry is moved from
2407 * @to:  mem_cgroup which the entry is moved to
2408 *
2409 * It succeeds only when the swap_cgroup's record for this entry is the same
2410 * as the mem_cgroup's id of @from.
2411 *
2412 * Returns 0 on success, -EINVAL on failure.
2413 *
2414 * The caller must have charged to @to, IOW, called page_counter_charge() about
2415 * both res and memsw, and called css_get().
2416 */
2417static int mem_cgroup_move_swap_account(swp_entry_t entry,
2418				struct mem_cgroup *from, struct mem_cgroup *to)
2419{
2420	unsigned short old_id, new_id;
2421
2422	old_id = mem_cgroup_id(from);
2423	new_id = mem_cgroup_id(to);
2424
2425	if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
2426		mem_cgroup_swap_statistics(from, false);
2427		mem_cgroup_swap_statistics(to, true);
 
 
 
 
 
 
 
 
 
2428		return 0;
2429	}
2430	return -EINVAL;
2431}
2432#else
2433static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
2434				struct mem_cgroup *from, struct mem_cgroup *to)
2435{
2436	return -EINVAL;
2437}
2438#endif
2439
2440static DEFINE_MUTEX(memcg_limit_mutex);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2441
2442static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
2443				   unsigned long limit)
2444{
2445	unsigned long curusage;
2446	unsigned long oldusage;
2447	bool enlarge = false;
2448	int retry_count;
2449	int ret;
 
 
 
 
2450
2451	/*
2452	 * For keeping hierarchical_reclaim simple, how long we should retry
2453	 * is depends on callers. We set our retry-count to be function
2454	 * of # of children which we should visit in this loop.
2455	 */
2456	retry_count = MEM_CGROUP_RECLAIM_RETRIES *
2457		      mem_cgroup_count_children(memcg);
2458
2459	oldusage = page_counter_read(&memcg->memory);
2460
2461	do {
 
2462		if (signal_pending(current)) {
2463			ret = -EINTR;
2464			break;
2465		}
2466
2467		mutex_lock(&memcg_limit_mutex);
2468		if (limit > memcg->memsw.limit) {
2469			mutex_unlock(&memcg_limit_mutex);
 
 
 
 
2470			ret = -EINVAL;
 
2471			break;
2472		}
2473		if (limit > memcg->memory.limit)
2474			enlarge = true;
2475		ret = page_counter_limit(&memcg->memory, limit);
2476		mutex_unlock(&memcg_limit_mutex);
 
 
 
 
 
 
 
 
 
2477
2478		if (!ret)
2479			break;
2480
2481		try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, true);
2482
2483		curusage = page_counter_read(&memcg->memory);
2484		/* Usage is reduced ? */
2485		if (curusage >= oldusage)
2486			retry_count--;
2487		else
2488			oldusage = curusage;
2489	} while (retry_count);
2490
2491	if (!ret && enlarge)
2492		memcg_oom_recover(memcg);
2493
2494	return ret;
2495}
2496
2497static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
2498					 unsigned long limit)
2499{
2500	unsigned long curusage;
2501	unsigned long oldusage;
2502	bool enlarge = false;
2503	int retry_count;
2504	int ret;
 
 
 
2505
2506	/* see mem_cgroup_resize_res_limit */
2507	retry_count = MEM_CGROUP_RECLAIM_RETRIES *
2508		      mem_cgroup_count_children(memcg);
2509
2510	oldusage = page_counter_read(&memcg->memsw);
2511
2512	do {
2513		if (signal_pending(current)) {
2514			ret = -EINTR;
2515			break;
2516		}
2517
2518		mutex_lock(&memcg_limit_mutex);
2519		if (limit < memcg->memory.limit) {
2520			mutex_unlock(&memcg_limit_mutex);
 
 
 
 
2521			ret = -EINVAL;
 
2522			break;
2523		}
2524		if (limit > memcg->memsw.limit)
2525			enlarge = true;
2526		ret = page_counter_limit(&memcg->memsw, limit);
2527		mutex_unlock(&memcg_limit_mutex);
 
 
 
 
 
 
 
2528
2529		if (!ret)
2530			break;
2531
2532		try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, false);
2533
2534		curusage = page_counter_read(&memcg->memsw);
 
2535		/* Usage is reduced ? */
2536		if (curusage >= oldusage)
2537			retry_count--;
2538		else
2539			oldusage = curusage;
2540	} while (retry_count);
2541
2542	if (!ret && enlarge)
2543		memcg_oom_recover(memcg);
2544
2545	return ret;
2546}
2547
2548unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
2549					    gfp_t gfp_mask,
2550					    unsigned long *total_scanned)
2551{
2552	unsigned long nr_reclaimed = 0;
2553	struct mem_cgroup_per_zone *mz, *next_mz = NULL;
2554	unsigned long reclaimed;
2555	int loop = 0;
2556	struct mem_cgroup_tree_per_zone *mctz;
2557	unsigned long excess;
2558	unsigned long nr_scanned;
2559
2560	if (order > 0)
2561		return 0;
2562
2563	mctz = soft_limit_tree_node_zone(zone_to_nid(zone), zone_idx(zone));
2564	/*
2565	 * This loop can run a while, specially if mem_cgroup's continuously
2566	 * keep exceeding their soft limit and putting the system under
2567	 * pressure
2568	 */
2569	do {
2570		if (next_mz)
2571			mz = next_mz;
2572		else
2573			mz = mem_cgroup_largest_soft_limit_node(mctz);
2574		if (!mz)
2575			break;
2576
2577		nr_scanned = 0;
2578		reclaimed = mem_cgroup_soft_reclaim(mz->memcg, zone,
2579						    gfp_mask, &nr_scanned);
2580		nr_reclaimed += reclaimed;
2581		*total_scanned += nr_scanned;
2582		spin_lock_irq(&mctz->lock);
2583		__mem_cgroup_remove_exceeded(mz, mctz);
2584
2585		/*
2586		 * If we failed to reclaim anything from this memory cgroup
2587		 * it is time to move on to the next cgroup
2588		 */
2589		next_mz = NULL;
2590		if (!reclaimed)
2591			next_mz = __mem_cgroup_largest_soft_limit_node(mctz);
2592
2593		excess = soft_limit_excess(mz->memcg);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2594		/*
2595		 * One school of thought says that we should not add
2596		 * back the node to the tree if reclaim returns 0.
2597		 * But our reclaim could return 0, simply because due
2598		 * to priority we are exposing a smaller subset of
2599		 * memory to reclaim from. Consider this as a longer
2600		 * term TODO.
2601		 */
2602		/* If excess == 0, no tree ops */
2603		__mem_cgroup_insert_exceeded(mz, mctz, excess);
2604		spin_unlock_irq(&mctz->lock);
2605		css_put(&mz->memcg->css);
2606		loop++;
2607		/*
2608		 * Could not reclaim anything and there are no more
2609		 * mem cgroups to try or we seem to be looping without
2610		 * reclaiming anything.
2611		 */
2612		if (!nr_reclaimed &&
2613			(next_mz == NULL ||
2614			loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
2615			break;
2616	} while (!nr_reclaimed);
2617	if (next_mz)
2618		css_put(&next_mz->memcg->css);
2619	return nr_reclaimed;
2620}
2621
2622/*
2623 * Test whether @memcg has children, dead or alive.  Note that this
2624 * function doesn't care whether @memcg has use_hierarchy enabled and
2625 * returns %true if there are child csses according to the cgroup
2626 * hierarchy.  Testing use_hierarchy is the caller's responsiblity.
2627 */
2628static inline bool memcg_has_children(struct mem_cgroup *memcg)
 
2629{
2630	bool ret;
 
 
 
 
 
2631
2632	rcu_read_lock();
2633	ret = css_next_child(NULL, &memcg->css);
2634	rcu_read_unlock();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2635	return ret;
2636}
2637
2638/*
2639 * Reclaims as many pages from the given memcg as possible and moves
2640 * the rest to the parent.
2641 *
2642 * Caller is responsible for holding css reference for memcg.
2643 */
2644static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
2645{
 
 
2646	int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2647
 
 
 
 
 
 
2648	/* we call try-to-free pages for make this cgroup empty */
2649	lru_add_drain_all();
2650	/* try to free all pages in this cgroup */
2651	while (nr_retries && page_counter_read(&memcg->memory)) {
 
2652		int progress;
2653
2654		if (signal_pending(current))
2655			return -EINTR;
2656
2657		progress = try_to_free_mem_cgroup_pages(memcg, 1,
2658							GFP_KERNEL, true);
 
2659		if (!progress) {
2660			nr_retries--;
2661			/* maybe some writeback is necessary */
2662			congestion_wait(BLK_RW_ASYNC, HZ/10);
2663		}
2664
2665	}
2666
2667	return 0;
 
2668}
2669
2670static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of,
2671					    char *buf, size_t nbytes,
2672					    loff_t off)
2673{
2674	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
2675
2676	if (mem_cgroup_is_root(memcg))
2677		return -EINVAL;
2678	return mem_cgroup_force_empty(memcg) ?: nbytes;
2679}
2680
2681static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css,
2682				     struct cftype *cft)
2683{
2684	return mem_cgroup_from_css(css)->use_hierarchy;
2685}
2686
2687static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
2688				      struct cftype *cft, u64 val)
2689{
2690	int retval = 0;
2691	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
2692	struct mem_cgroup *parent_memcg = mem_cgroup_from_css(memcg->css.parent);
 
2693
2694	if (memcg->use_hierarchy == val)
2695		return 0;
2696
 
2697	/*
2698	 * If parent's use_hierarchy is set, we can't make any modifications
2699	 * in the child subtrees. If it is unset, then the change can
2700	 * occur, provided the current cgroup has no children.
2701	 *
2702	 * For the root cgroup, parent_mem is NULL, we allow value to be
2703	 * set if there are no children.
2704	 */
2705	if ((!parent_memcg || !parent_memcg->use_hierarchy) &&
2706				(val == 1 || val == 0)) {
2707		if (!memcg_has_children(memcg))
2708			memcg->use_hierarchy = val;
2709		else
2710			retval = -EBUSY;
2711	} else
2712		retval = -EINVAL;
 
2713
2714	return retval;
2715}
2716
2717static void tree_stat(struct mem_cgroup *memcg, unsigned long *stat)
2718{
2719	struct mem_cgroup *iter;
2720	int i;
2721
2722	memset(stat, 0, sizeof(*stat) * MEMCG_NR_STAT);
2723
2724	for_each_mem_cgroup_tree(iter, memcg) {
2725		for (i = 0; i < MEMCG_NR_STAT; i++)
2726			stat[i] += mem_cgroup_read_stat(iter, i);
2727	}
2728}
2729
2730static void tree_events(struct mem_cgroup *memcg, unsigned long *events)
2731{
2732	struct mem_cgroup *iter;
2733	int i;
2734
2735	memset(events, 0, sizeof(*events) * MEMCG_NR_EVENTS);
 
 
2736
2737	for_each_mem_cgroup_tree(iter, memcg) {
2738		for (i = 0; i < MEMCG_NR_EVENTS; i++)
2739			events[i] += mem_cgroup_read_events(iter, i);
2740	}
2741}
2742
2743static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
2744{
2745	unsigned long val = 0;
2746
2747	if (mem_cgroup_is_root(memcg)) {
2748		struct mem_cgroup *iter;
2749
2750		for_each_mem_cgroup_tree(iter, memcg) {
2751			val += mem_cgroup_read_stat(iter,
2752					MEM_CGROUP_STAT_CACHE);
2753			val += mem_cgroup_read_stat(iter,
2754					MEM_CGROUP_STAT_RSS);
2755			if (swap)
2756				val += mem_cgroup_read_stat(iter,
2757						MEM_CGROUP_STAT_SWAP);
2758		}
2759	} else {
2760		if (!swap)
2761			val = page_counter_read(&memcg->memory);
2762		else
2763			val = page_counter_read(&memcg->memsw);
2764	}
2765	return val;
2766}
2767
2768enum {
2769	RES_USAGE,
2770	RES_LIMIT,
2771	RES_MAX_USAGE,
2772	RES_FAILCNT,
2773	RES_SOFT_LIMIT,
2774};
 
2775
2776static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
2777			       struct cftype *cft)
 
2778{
2779	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
2780	struct page_counter *counter;
 
 
 
 
 
 
 
 
2781
2782	switch (MEMFILE_TYPE(cft->private)) {
2783	case _MEM:
2784		counter = &memcg->memory;
 
 
 
2785		break;
2786	case _MEMSWAP:
2787		counter = &memcg->memsw;
2788		break;
2789	case _KMEM:
2790		counter = &memcg->kmem;
2791		break;
2792	case _TCP:
2793		counter = &memcg->tcpmem;
2794		break;
2795	default:
2796		BUG();
2797	}
2798
2799	switch (MEMFILE_ATTR(cft->private)) {
2800	case RES_USAGE:
2801		if (counter == &memcg->memory)
2802			return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE;
2803		if (counter == &memcg->memsw)
2804			return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE;
2805		return (u64)page_counter_read(counter) * PAGE_SIZE;
2806	case RES_LIMIT:
2807		return (u64)counter->limit * PAGE_SIZE;
2808	case RES_MAX_USAGE:
2809		return (u64)counter->watermark * PAGE_SIZE;
2810	case RES_FAILCNT:
2811		return counter->failcnt;
2812	case RES_SOFT_LIMIT:
2813		return (u64)memcg->soft_limit * PAGE_SIZE;
2814	default:
2815		BUG();
2816	}
2817}
2818
2819#ifndef CONFIG_SLOB
2820static int memcg_online_kmem(struct mem_cgroup *memcg)
2821{
2822	int memcg_id;
2823
2824	if (cgroup_memory_nokmem)
2825		return 0;
2826
2827	BUG_ON(memcg->kmemcg_id >= 0);
2828	BUG_ON(memcg->kmem_state);
2829
2830	memcg_id = memcg_alloc_cache_id();
2831	if (memcg_id < 0)
2832		return memcg_id;
2833
2834	static_branch_inc(&memcg_kmem_enabled_key);
2835	/*
2836	 * A memory cgroup is considered kmem-online as soon as it gets
2837	 * kmemcg_id. Setting the id after enabling static branching will
2838	 * guarantee no one starts accounting before all call sites are
2839	 * patched.
2840	 */
2841	memcg->kmemcg_id = memcg_id;
2842	memcg->kmem_state = KMEM_ONLINE;
2843
2844	return 0;
2845}
2846
2847static void memcg_offline_kmem(struct mem_cgroup *memcg)
2848{
2849	struct cgroup_subsys_state *css;
2850	struct mem_cgroup *parent, *child;
2851	int kmemcg_id;
2852
2853	if (memcg->kmem_state != KMEM_ONLINE)
2854		return;
2855	/*
2856	 * Clear the online state before clearing memcg_caches array
2857	 * entries. The slab_mutex in memcg_deactivate_kmem_caches()
2858	 * guarantees that no cache will be created for this cgroup
2859	 * after we are done (see memcg_create_kmem_cache()).
2860	 */
2861	memcg->kmem_state = KMEM_ALLOCATED;
2862
2863	memcg_deactivate_kmem_caches(memcg);
2864
2865	kmemcg_id = memcg->kmemcg_id;
2866	BUG_ON(kmemcg_id < 0);
2867
2868	parent = parent_mem_cgroup(memcg);
2869	if (!parent)
2870		parent = root_mem_cgroup;
2871
2872	/*
2873	 * Change kmemcg_id of this cgroup and all its descendants to the
2874	 * parent's id, and then move all entries from this cgroup's list_lrus
2875	 * to ones of the parent. After we have finished, all list_lrus
2876	 * corresponding to this cgroup are guaranteed to remain empty. The
2877	 * ordering is imposed by list_lru_node->lock taken by
2878	 * memcg_drain_all_list_lrus().
2879	 */
2880	css_for_each_descendant_pre(css, &memcg->css) {
2881		child = mem_cgroup_from_css(css);
2882		BUG_ON(child->kmemcg_id != kmemcg_id);
2883		child->kmemcg_id = parent->kmemcg_id;
2884		if (!memcg->use_hierarchy)
2885			break;
2886	}
2887	memcg_drain_all_list_lrus(kmemcg_id, parent->kmemcg_id);
2888
2889	memcg_free_cache_id(kmemcg_id);
2890}
2891
2892static void memcg_free_kmem(struct mem_cgroup *memcg)
2893{
2894	/* css_alloc() failed, offlining didn't happen */
2895	if (unlikely(memcg->kmem_state == KMEM_ONLINE))
2896		memcg_offline_kmem(memcg);
2897
2898	if (memcg->kmem_state == KMEM_ALLOCATED) {
2899		memcg_destroy_kmem_caches(memcg);
2900		static_branch_dec(&memcg_kmem_enabled_key);
2901		WARN_ON(page_counter_read(&memcg->kmem));
2902	}
2903}
2904#else
2905static int memcg_online_kmem(struct mem_cgroup *memcg)
2906{
2907	return 0;
2908}
2909static void memcg_offline_kmem(struct mem_cgroup *memcg)
2910{
2911}
2912static void memcg_free_kmem(struct mem_cgroup *memcg)
2913{
2914}
2915#endif /* !CONFIG_SLOB */
2916
2917static int memcg_update_kmem_limit(struct mem_cgroup *memcg,
2918				   unsigned long limit)
2919{
2920	int ret;
2921
2922	mutex_lock(&memcg_limit_mutex);
2923	ret = page_counter_limit(&memcg->kmem, limit);
2924	mutex_unlock(&memcg_limit_mutex);
2925	return ret;
2926}
2927
2928static int memcg_update_tcp_limit(struct mem_cgroup *memcg, unsigned long limit)
2929{
2930	int ret;
2931
2932	mutex_lock(&memcg_limit_mutex);
2933
2934	ret = page_counter_limit(&memcg->tcpmem, limit);
2935	if (ret)
2936		goto out;
2937
2938	if (!memcg->tcpmem_active) {
2939		/*
2940		 * The active flag needs to be written after the static_key
2941		 * update. This is what guarantees that the socket activation
2942		 * function is the last one to run. See sock_update_memcg() for
2943		 * details, and note that we don't mark any socket as belonging
2944		 * to this memcg until that flag is up.
2945		 *
2946		 * We need to do this, because static_keys will span multiple
2947		 * sites, but we can't control their order. If we mark a socket
2948		 * as accounted, but the accounting functions are not patched in
2949		 * yet, we'll lose accounting.
2950		 *
2951		 * We never race with the readers in sock_update_memcg(),
2952		 * because when this value change, the code to process it is not
2953		 * patched in yet.
2954		 */
2955		static_branch_inc(&memcg_sockets_enabled_key);
2956		memcg->tcpmem_active = true;
2957	}
2958out:
2959	mutex_unlock(&memcg_limit_mutex);
2960	return ret;
2961}
2962
2963/*
2964 * The user of this function is...
2965 * RES_LIMIT.
2966 */
2967static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
2968				char *buf, size_t nbytes, loff_t off)
2969{
2970	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
2971	unsigned long nr_pages;
 
2972	int ret;
2973
2974	buf = strstrip(buf);
2975	ret = page_counter_memparse(buf, "-1", &nr_pages);
2976	if (ret)
2977		return ret;
2978
2979	switch (MEMFILE_ATTR(of_cft(of)->private)) {
 
 
 
2980	case RES_LIMIT:
2981		if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
2982			ret = -EINVAL;
2983			break;
2984		}
2985		switch (MEMFILE_TYPE(of_cft(of)->private)) {
2986		case _MEM:
2987			ret = mem_cgroup_resize_limit(memcg, nr_pages);
2988			break;
2989		case _MEMSWAP:
2990			ret = mem_cgroup_resize_memsw_limit(memcg, nr_pages);
2991			break;
2992		case _KMEM:
2993			ret = memcg_update_kmem_limit(memcg, nr_pages);
2994			break;
2995		case _TCP:
2996			ret = memcg_update_tcp_limit(memcg, nr_pages);
2997			break;
2998		}
 
 
 
2999		break;
3000	case RES_SOFT_LIMIT:
3001		memcg->soft_limit = nr_pages;
3002		ret = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
3003		break;
3004	}
3005	return ret ?: nbytes;
3006}
3007
3008static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf,
3009				size_t nbytes, loff_t off)
3010{
3011	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3012	struct page_counter *counter;
3013
3014	switch (MEMFILE_TYPE(of_cft(of)->private)) {
3015	case _MEM:
3016		counter = &memcg->memory;
3017		break;
3018	case _MEMSWAP:
3019		counter = &memcg->memsw;
3020		break;
3021	case _KMEM:
3022		counter = &memcg->kmem;
3023		break;
3024	case _TCP:
3025		counter = &memcg->tcpmem;
3026		break;
3027	default:
3028		BUG();
3029	}
 
 
 
 
3030
3031	switch (MEMFILE_ATTR(of_cft(of)->private)) {
 
 
 
 
 
 
 
 
 
 
 
3032	case RES_MAX_USAGE:
3033		page_counter_reset_watermark(counter);
 
 
 
3034		break;
3035	case RES_FAILCNT:
3036		counter->failcnt = 0;
 
 
 
3037		break;
3038	default:
3039		BUG();
3040	}
3041
3042	return nbytes;
3043}
3044
3045static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css,
3046					struct cftype *cft)
3047{
3048	return mem_cgroup_from_css(css)->move_charge_at_immigrate;
3049}
3050
3051#ifdef CONFIG_MMU
3052static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3053					struct cftype *cft, u64 val)
3054{
3055	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3056
3057	if (val & ~MOVE_MASK)
3058		return -EINVAL;
3059
3060	/*
3061	 * No kind of locking is needed in here, because ->can_attach() will
3062	 * check this value once in the beginning of the process, and then carry
3063	 * on with stale data. This means that changes to this value will only
3064	 * affect task migrations starting after the change.
3065	 */
 
3066	memcg->move_charge_at_immigrate = val;
 
 
3067	return 0;
3068}
3069#else
3070static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3071					struct cftype *cft, u64 val)
3072{
3073	return -ENOSYS;
3074}
3075#endif
3076
3077#ifdef CONFIG_NUMA
3078static int memcg_numa_stat_show(struct seq_file *m, void *v)
 
3079{
3080	struct numa_stat {
3081		const char *name;
3082		unsigned int lru_mask;
3083	};
3084
3085	static const struct numa_stat stats[] = {
3086		{ "total", LRU_ALL },
3087		{ "file", LRU_ALL_FILE },
3088		{ "anon", LRU_ALL_ANON },
3089		{ "unevictable", BIT(LRU_UNEVICTABLE) },
3090	};
3091	const struct numa_stat *stat;
3092	int nid;
3093	unsigned long nr;
3094	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
3095
3096	for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3097		nr = mem_cgroup_nr_lru_pages(memcg, stat->lru_mask);
3098		seq_printf(m, "%s=%lu", stat->name, nr);
3099		for_each_node_state(nid, N_MEMORY) {
3100			nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
3101							  stat->lru_mask);
3102			seq_printf(m, " N%d=%lu", nid, nr);
3103		}
3104		seq_putc(m, '\n');
3105	}
3106
3107	for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3108		struct mem_cgroup *iter;
3109
3110		nr = 0;
3111		for_each_mem_cgroup_tree(iter, memcg)
3112			nr += mem_cgroup_nr_lru_pages(iter, stat->lru_mask);
3113		seq_printf(m, "hierarchical_%s=%lu", stat->name, nr);
3114		for_each_node_state(nid, N_MEMORY) {
3115			nr = 0;
3116			for_each_mem_cgroup_tree(iter, memcg)
3117				nr += mem_cgroup_node_nr_lru_pages(
3118					iter, nid, stat->lru_mask);
3119			seq_printf(m, " N%d=%lu", nid, nr);
3120		}
3121		seq_putc(m, '\n');
 
 
 
 
 
 
 
3122	}
3123
3124	return 0;
3125}
3126#endif /* CONFIG_NUMA */
3127
3128static int memcg_stat_show(struct seq_file *m, void *v)
 
 
 
 
 
 
 
 
3129{
3130	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
3131	unsigned long memory, memsw;
 
 
 
 
 
3132	struct mem_cgroup *mi;
3133	unsigned int i;
3134
3135	BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_stat_names) !=
3136		     MEM_CGROUP_STAT_NSTATS);
3137	BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_events_names) !=
3138		     MEM_CGROUP_EVENTS_NSTATS);
3139	BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS);
3140
3141	for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
3142		if (i == MEM_CGROUP_STAT_SWAP && !do_memsw_account())
3143			continue;
3144		seq_printf(m, "%s %lu\n", mem_cgroup_stat_names[i],
3145			   mem_cgroup_read_stat(memcg, i) * PAGE_SIZE);
3146	}
3147
3148	for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++)
3149		seq_printf(m, "%s %lu\n", mem_cgroup_events_names[i],
3150			   mem_cgroup_read_events(memcg, i));
3151
3152	for (i = 0; i < NR_LRU_LISTS; i++)
3153		seq_printf(m, "%s %lu\n", mem_cgroup_lru_names[i],
3154			   mem_cgroup_nr_lru_pages(memcg, BIT(i)) * PAGE_SIZE);
3155
3156	/* Hierarchical information */
3157	memory = memsw = PAGE_COUNTER_MAX;
3158	for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) {
3159		memory = min(memory, mi->memory.limit);
3160		memsw = min(memsw, mi->memsw.limit);
3161	}
3162	seq_printf(m, "hierarchical_memory_limit %llu\n",
3163		   (u64)memory * PAGE_SIZE);
3164	if (do_memsw_account())
3165		seq_printf(m, "hierarchical_memsw_limit %llu\n",
3166			   (u64)memsw * PAGE_SIZE);
3167
3168	for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
3169		unsigned long long val = 0;
3170
3171		if (i == MEM_CGROUP_STAT_SWAP && !do_memsw_account())
3172			continue;
3173		for_each_mem_cgroup_tree(mi, memcg)
3174			val += mem_cgroup_read_stat(mi, i) * PAGE_SIZE;
3175		seq_printf(m, "total_%s %llu\n", mem_cgroup_stat_names[i], val);
3176	}
3177
3178	for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
3179		unsigned long long val = 0;
3180
3181		for_each_mem_cgroup_tree(mi, memcg)
3182			val += mem_cgroup_read_events(mi, i);
3183		seq_printf(m, "total_%s %llu\n",
3184			   mem_cgroup_events_names[i], val);
3185	}
3186
3187	for (i = 0; i < NR_LRU_LISTS; i++) {
3188		unsigned long long val = 0;
3189
3190		for_each_mem_cgroup_tree(mi, memcg)
3191			val += mem_cgroup_nr_lru_pages(mi, BIT(i)) * PAGE_SIZE;
3192		seq_printf(m, "total_%s %llu\n", mem_cgroup_lru_names[i], val);
3193	}
3194
3195#ifdef CONFIG_DEBUG_VM
3196	{
3197		int nid, zid;
3198		struct mem_cgroup_per_zone *mz;
3199		struct zone_reclaim_stat *rstat;
3200		unsigned long recent_rotated[2] = {0, 0};
3201		unsigned long recent_scanned[2] = {0, 0};
3202
3203		for_each_online_node(nid)
3204			for (zid = 0; zid < MAX_NR_ZONES; zid++) {
3205				mz = &memcg->nodeinfo[nid]->zoneinfo[zid];
3206				rstat = &mz->lruvec.reclaim_stat;
3207
3208				recent_rotated[0] += rstat->recent_rotated[0];
3209				recent_rotated[1] += rstat->recent_rotated[1];
3210				recent_scanned[0] += rstat->recent_scanned[0];
3211				recent_scanned[1] += rstat->recent_scanned[1];
3212			}
3213		seq_printf(m, "recent_rotated_anon %lu\n", recent_rotated[0]);
3214		seq_printf(m, "recent_rotated_file %lu\n", recent_rotated[1]);
3215		seq_printf(m, "recent_scanned_anon %lu\n", recent_scanned[0]);
3216		seq_printf(m, "recent_scanned_file %lu\n", recent_scanned[1]);
3217	}
3218#endif
3219
3220	return 0;
3221}
3222
3223static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css,
3224				      struct cftype *cft)
3225{
3226	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3227
3228	return mem_cgroup_swappiness(memcg);
3229}
3230
3231static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
3232				       struct cftype *cft, u64 val)
3233{
3234	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
 
3235
3236	if (val > 100)
3237		return -EINVAL;
3238
3239	if (css->parent)
3240		memcg->swappiness = val;
3241	else
3242		vm_swappiness = val;
 
 
 
 
 
 
 
 
 
 
 
 
 
3243
3244	return 0;
3245}
3246
3247static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
3248{
3249	struct mem_cgroup_threshold_ary *t;
3250	unsigned long usage;
3251	int i;
3252
3253	rcu_read_lock();
3254	if (!swap)
3255		t = rcu_dereference(memcg->thresholds.primary);
3256	else
3257		t = rcu_dereference(memcg->memsw_thresholds.primary);
3258
3259	if (!t)
3260		goto unlock;
3261
3262	usage = mem_cgroup_usage(memcg, swap);
3263
3264	/*
3265	 * current_threshold points to threshold just below or equal to usage.
3266	 * If it's not true, a threshold was crossed after last
3267	 * call of __mem_cgroup_threshold().
3268	 */
3269	i = t->current_threshold;
3270
3271	/*
3272	 * Iterate backward over array of thresholds starting from
3273	 * current_threshold and check if a threshold is crossed.
3274	 * If none of thresholds below usage is crossed, we read
3275	 * only one element of the array here.
3276	 */
3277	for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
3278		eventfd_signal(t->entries[i].eventfd, 1);
3279
3280	/* i = current_threshold + 1 */
3281	i++;
3282
3283	/*
3284	 * Iterate forward over array of thresholds starting from
3285	 * current_threshold+1 and check if a threshold is crossed.
3286	 * If none of thresholds above usage is crossed, we read
3287	 * only one element of the array here.
3288	 */
3289	for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
3290		eventfd_signal(t->entries[i].eventfd, 1);
3291
3292	/* Update current_threshold */
3293	t->current_threshold = i - 1;
3294unlock:
3295	rcu_read_unlock();
3296}
3297
3298static void mem_cgroup_threshold(struct mem_cgroup *memcg)
3299{
3300	while (memcg) {
3301		__mem_cgroup_threshold(memcg, false);
3302		if (do_memsw_account())
3303			__mem_cgroup_threshold(memcg, true);
3304
3305		memcg = parent_mem_cgroup(memcg);
3306	}
3307}
3308
3309static int compare_thresholds(const void *a, const void *b)
3310{
3311	const struct mem_cgroup_threshold *_a = a;
3312	const struct mem_cgroup_threshold *_b = b;
3313
3314	if (_a->threshold > _b->threshold)
3315		return 1;
3316
3317	if (_a->threshold < _b->threshold)
3318		return -1;
3319
3320	return 0;
3321}
3322
3323static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
3324{
3325	struct mem_cgroup_eventfd_list *ev;
3326
3327	spin_lock(&memcg_oom_lock);
3328
3329	list_for_each_entry(ev, &memcg->oom_notify, list)
3330		eventfd_signal(ev->eventfd, 1);
3331
3332	spin_unlock(&memcg_oom_lock);
3333	return 0;
3334}
3335
3336static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
3337{
3338	struct mem_cgroup *iter;
3339
3340	for_each_mem_cgroup_tree(iter, memcg)
3341		mem_cgroup_oom_notify_cb(iter);
3342}
3343
3344static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
3345	struct eventfd_ctx *eventfd, const char *args, enum res_type type)
3346{
 
3347	struct mem_cgroup_thresholds *thresholds;
3348	struct mem_cgroup_threshold_ary *new;
3349	unsigned long threshold;
3350	unsigned long usage;
3351	int i, size, ret;
3352
3353	ret = page_counter_memparse(args, "-1", &threshold);
3354	if (ret)
3355		return ret;
3356
3357	mutex_lock(&memcg->thresholds_lock);
3358
3359	if (type == _MEM) {
3360		thresholds = &memcg->thresholds;
3361		usage = mem_cgroup_usage(memcg, false);
3362	} else if (type == _MEMSWAP) {
3363		thresholds = &memcg->memsw_thresholds;
3364		usage = mem_cgroup_usage(memcg, true);
3365	} else
3366		BUG();
3367
 
 
3368	/* Check if a threshold crossed before adding a new one */
3369	if (thresholds->primary)
3370		__mem_cgroup_threshold(memcg, type == _MEMSWAP);
3371
3372	size = thresholds->primary ? thresholds->primary->size + 1 : 1;
3373
3374	/* Allocate memory for new array of thresholds */
3375	new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold),
3376			GFP_KERNEL);
3377	if (!new) {
3378		ret = -ENOMEM;
3379		goto unlock;
3380	}
3381	new->size = size;
3382
3383	/* Copy thresholds (if any) to new array */
3384	if (thresholds->primary) {
3385		memcpy(new->entries, thresholds->primary->entries, (size - 1) *
3386				sizeof(struct mem_cgroup_threshold));
3387	}
3388
3389	/* Add new threshold */
3390	new->entries[size - 1].eventfd = eventfd;
3391	new->entries[size - 1].threshold = threshold;
3392
3393	/* Sort thresholds. Registering of new threshold isn't time-critical */
3394	sort(new->entries, size, sizeof(struct mem_cgroup_threshold),
3395			compare_thresholds, NULL);
3396
3397	/* Find current threshold */
3398	new->current_threshold = -1;
3399	for (i = 0; i < size; i++) {
3400		if (new->entries[i].threshold <= usage) {
3401			/*
3402			 * new->current_threshold will not be used until
3403			 * rcu_assign_pointer(), so it's safe to increment
3404			 * it here.
3405			 */
3406			++new->current_threshold;
3407		} else
3408			break;
3409	}
3410
3411	/* Free old spare buffer and save old primary buffer as spare */
3412	kfree(thresholds->spare);
3413	thresholds->spare = thresholds->primary;
3414
3415	rcu_assign_pointer(thresholds->primary, new);
3416
3417	/* To be sure that nobody uses thresholds */
3418	synchronize_rcu();
3419
3420unlock:
3421	mutex_unlock(&memcg->thresholds_lock);
3422
3423	return ret;
3424}
3425
3426static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
3427	struct eventfd_ctx *eventfd, const char *args)
3428{
3429	return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM);
3430}
3431
3432static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg,
3433	struct eventfd_ctx *eventfd, const char *args)
3434{
3435	return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP);
3436}
3437
3438static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
3439	struct eventfd_ctx *eventfd, enum res_type type)
3440{
 
3441	struct mem_cgroup_thresholds *thresholds;
3442	struct mem_cgroup_threshold_ary *new;
3443	unsigned long usage;
 
3444	int i, j, size;
3445
3446	mutex_lock(&memcg->thresholds_lock);
3447
3448	if (type == _MEM) {
3449		thresholds = &memcg->thresholds;
3450		usage = mem_cgroup_usage(memcg, false);
3451	} else if (type == _MEMSWAP) {
3452		thresholds = &memcg->memsw_thresholds;
3453		usage = mem_cgroup_usage(memcg, true);
3454	} else
3455		BUG();
3456
3457	if (!thresholds->primary)
3458		goto unlock;
3459
 
 
3460	/* Check if a threshold crossed before removing */
3461	__mem_cgroup_threshold(memcg, type == _MEMSWAP);
3462
3463	/* Calculate new number of threshold */
3464	size = 0;
3465	for (i = 0; i < thresholds->primary->size; i++) {
3466		if (thresholds->primary->entries[i].eventfd != eventfd)
3467			size++;
3468	}
3469
3470	new = thresholds->spare;
3471
3472	/* Set thresholds array to NULL if we don't have thresholds */
3473	if (!size) {
3474		kfree(new);
3475		new = NULL;
3476		goto swap_buffers;
3477	}
3478
3479	new->size = size;
3480
3481	/* Copy thresholds and find current threshold */
3482	new->current_threshold = -1;
3483	for (i = 0, j = 0; i < thresholds->primary->size; i++) {
3484		if (thresholds->primary->entries[i].eventfd == eventfd)
3485			continue;
3486
3487		new->entries[j] = thresholds->primary->entries[i];
3488		if (new->entries[j].threshold <= usage) {
3489			/*
3490			 * new->current_threshold will not be used
3491			 * until rcu_assign_pointer(), so it's safe to increment
3492			 * it here.
3493			 */
3494			++new->current_threshold;
3495		}
3496		j++;
3497	}
3498
3499swap_buffers:
3500	/* Swap primary and spare array */
3501	thresholds->spare = thresholds->primary;
3502
3503	rcu_assign_pointer(thresholds->primary, new);
3504
3505	/* To be sure that nobody uses thresholds */
3506	synchronize_rcu();
3507
3508	/* If all events are unregistered, free the spare array */
3509	if (!new) {
3510		kfree(thresholds->spare);
3511		thresholds->spare = NULL;
3512	}
3513unlock:
3514	mutex_unlock(&memcg->thresholds_lock);
3515}
3516
3517static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
3518	struct eventfd_ctx *eventfd)
3519{
3520	return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM);
3521}
3522
3523static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
3524	struct eventfd_ctx *eventfd)
3525{
3526	return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP);
3527}
3528
3529static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg,
3530	struct eventfd_ctx *eventfd, const char *args)
3531{
 
3532	struct mem_cgroup_eventfd_list *event;
 
3533
 
3534	event = kmalloc(sizeof(*event),	GFP_KERNEL);
3535	if (!event)
3536		return -ENOMEM;
3537
3538	spin_lock(&memcg_oom_lock);
3539
3540	event->eventfd = eventfd;
3541	list_add(&event->list, &memcg->oom_notify);
3542
3543	/* already in OOM ? */
3544	if (memcg->under_oom)
3545		eventfd_signal(eventfd, 1);
3546	spin_unlock(&memcg_oom_lock);
3547
3548	return 0;
3549}
3550
3551static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg,
3552	struct eventfd_ctx *eventfd)
3553{
 
3554	struct mem_cgroup_eventfd_list *ev, *tmp;
 
 
 
3555
3556	spin_lock(&memcg_oom_lock);
3557
3558	list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
3559		if (ev->eventfd == eventfd) {
3560			list_del(&ev->list);
3561			kfree(ev);
3562		}
3563	}
3564
3565	spin_unlock(&memcg_oom_lock);
3566}
3567
3568static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v)
 
3569{
3570	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf));
 
 
3571
3572	seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable);
3573	seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom);
 
 
3574	return 0;
3575}
3576
3577static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
3578	struct cftype *cft, u64 val)
3579{
3580	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
 
3581
3582	/* cannot set to root cgroup and only 0 and 1 are allowed */
3583	if (!css->parent || !((val == 0) || (val == 1)))
3584		return -EINVAL;
3585
 
 
 
 
 
 
 
 
 
3586	memcg->oom_kill_disable = val;
3587	if (!val)
3588		memcg_oom_recover(memcg);
3589
3590	return 0;
3591}
3592
3593#ifdef CONFIG_CGROUP_WRITEBACK
3594
3595struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg)
3596{
3597	return &memcg->cgwb_list;
3598}
3599
3600static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
3601{
3602	return wb_domain_init(&memcg->cgwb_domain, gfp);
3603}
3604
3605static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
3606{
3607	wb_domain_exit(&memcg->cgwb_domain);
3608}
3609
3610static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
3611{
3612	wb_domain_size_changed(&memcg->cgwb_domain);
3613}
3614
3615struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
3616{
3617	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3618
3619	if (!memcg->css.parent)
3620		return NULL;
3621
3622	return &memcg->cgwb_domain;
3623}
3624
3625/**
3626 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
3627 * @wb: bdi_writeback in question
3628 * @pfilepages: out parameter for number of file pages
3629 * @pheadroom: out parameter for number of allocatable pages according to memcg
3630 * @pdirty: out parameter for number of dirty pages
3631 * @pwriteback: out parameter for number of pages under writeback
3632 *
3633 * Determine the numbers of file, headroom, dirty, and writeback pages in
3634 * @wb's memcg.  File, dirty and writeback are self-explanatory.  Headroom
3635 * is a bit more involved.
3636 *
3637 * A memcg's headroom is "min(max, high) - used".  In the hierarchy, the
3638 * headroom is calculated as the lowest headroom of itself and the
3639 * ancestors.  Note that this doesn't consider the actual amount of
3640 * available memory in the system.  The caller should further cap
3641 * *@pheadroom accordingly.
3642 */
3643void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
3644			 unsigned long *pheadroom, unsigned long *pdirty,
3645			 unsigned long *pwriteback)
3646{
3647	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3648	struct mem_cgroup *parent;
3649
3650	*pdirty = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_DIRTY);
3651
3652	/* this should eventually include NR_UNSTABLE_NFS */
3653	*pwriteback = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_WRITEBACK);
3654	*pfilepages = mem_cgroup_nr_lru_pages(memcg, (1 << LRU_INACTIVE_FILE) |
3655						     (1 << LRU_ACTIVE_FILE));
3656	*pheadroom = PAGE_COUNTER_MAX;
3657
3658	while ((parent = parent_mem_cgroup(memcg))) {
3659		unsigned long ceiling = min(memcg->memory.limit, memcg->high);
3660		unsigned long used = page_counter_read(&memcg->memory);
3661
3662		*pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
3663		memcg = parent;
3664	}
3665}
3666
3667#else	/* CONFIG_CGROUP_WRITEBACK */
3668
3669static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
3670{
3671	return 0;
3672}
3673
3674static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
3675{
3676}
3677
3678static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
3679{
3680}
3681
3682#endif	/* CONFIG_CGROUP_WRITEBACK */
3683
3684/*
3685 * DO NOT USE IN NEW FILES.
3686 *
3687 * "cgroup.event_control" implementation.
3688 *
3689 * This is way over-engineered.  It tries to support fully configurable
3690 * events for each user.  Such level of flexibility is completely
3691 * unnecessary especially in the light of the planned unified hierarchy.
3692 *
3693 * Please deprecate this and replace with something simpler if at all
3694 * possible.
3695 */
3696
3697/*
3698 * Unregister event and free resources.
3699 *
3700 * Gets called from workqueue.
3701 */
3702static void memcg_event_remove(struct work_struct *work)
3703{
3704	struct mem_cgroup_event *event =
3705		container_of(work, struct mem_cgroup_event, remove);
3706	struct mem_cgroup *memcg = event->memcg;
3707
3708	remove_wait_queue(event->wqh, &event->wait);
3709
3710	event->unregister_event(memcg, event->eventfd);
3711
3712	/* Notify userspace the event is going away. */
3713	eventfd_signal(event->eventfd, 1);
3714
3715	eventfd_ctx_put(event->eventfd);
3716	kfree(event);
3717	css_put(&memcg->css);
3718}
3719
3720/*
3721 * Gets called on POLLHUP on eventfd when user closes it.
3722 *
3723 * Called with wqh->lock held and interrupts disabled.
3724 */
3725static int memcg_event_wake(wait_queue_t *wait, unsigned mode,
3726			    int sync, void *key)
3727{
3728	struct mem_cgroup_event *event =
3729		container_of(wait, struct mem_cgroup_event, wait);
3730	struct mem_cgroup *memcg = event->memcg;
3731	unsigned long flags = (unsigned long)key;
3732
3733	if (flags & POLLHUP) {
3734		/*
3735		 * If the event has been detached at cgroup removal, we
3736		 * can simply return knowing the other side will cleanup
3737		 * for us.
3738		 *
3739		 * We can't race against event freeing since the other
3740		 * side will require wqh->lock via remove_wait_queue(),
3741		 * which we hold.
3742		 */
3743		spin_lock(&memcg->event_list_lock);
3744		if (!list_empty(&event->list)) {
3745			list_del_init(&event->list);
3746			/*
3747			 * We are in atomic context, but cgroup_event_remove()
3748			 * may sleep, so we have to call it in workqueue.
3749			 */
3750			schedule_work(&event->remove);
3751		}
3752		spin_unlock(&memcg->event_list_lock);
3753	}
3754
3755	return 0;
3756}
3757
3758static void memcg_event_ptable_queue_proc(struct file *file,
3759		wait_queue_head_t *wqh, poll_table *pt)
3760{
3761	struct mem_cgroup_event *event =
3762		container_of(pt, struct mem_cgroup_event, pt);
3763
3764	event->wqh = wqh;
3765	add_wait_queue(wqh, &event->wait);
3766}
3767
3768/*
3769 * DO NOT USE IN NEW FILES.
3770 *
3771 * Parse input and register new cgroup event handler.
3772 *
3773 * Input must be in format '<event_fd> <control_fd> <args>'.
3774 * Interpretation of args is defined by control file implementation.
3775 */
3776static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
3777					 char *buf, size_t nbytes, loff_t off)
3778{
3779	struct cgroup_subsys_state *css = of_css(of);
3780	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3781	struct mem_cgroup_event *event;
3782	struct cgroup_subsys_state *cfile_css;
3783	unsigned int efd, cfd;
3784	struct fd efile;
3785	struct fd cfile;
3786	const char *name;
3787	char *endp;
3788	int ret;
3789
3790	buf = strstrip(buf);
3791
3792	efd = simple_strtoul(buf, &endp, 10);
3793	if (*endp != ' ')
3794		return -EINVAL;
3795	buf = endp + 1;
3796
3797	cfd = simple_strtoul(buf, &endp, 10);
3798	if ((*endp != ' ') && (*endp != '\0'))
3799		return -EINVAL;
3800	buf = endp + 1;
3801
3802	event = kzalloc(sizeof(*event), GFP_KERNEL);
3803	if (!event)
3804		return -ENOMEM;
3805
3806	event->memcg = memcg;
3807	INIT_LIST_HEAD(&event->list);
3808	init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc);
3809	init_waitqueue_func_entry(&event->wait, memcg_event_wake);
3810	INIT_WORK(&event->remove, memcg_event_remove);
3811
3812	efile = fdget(efd);
3813	if (!efile.file) {
3814		ret = -EBADF;
3815		goto out_kfree;
3816	}
3817
3818	event->eventfd = eventfd_ctx_fileget(efile.file);
3819	if (IS_ERR(event->eventfd)) {
3820		ret = PTR_ERR(event->eventfd);
3821		goto out_put_efile;
3822	}
3823
3824	cfile = fdget(cfd);
3825	if (!cfile.file) {
3826		ret = -EBADF;
3827		goto out_put_eventfd;
3828	}
3829
3830	/* the process need read permission on control file */
3831	/* AV: shouldn't we check that it's been opened for read instead? */
3832	ret = inode_permission(file_inode(cfile.file), MAY_READ);
3833	if (ret < 0)
3834		goto out_put_cfile;
3835
3836	/*
3837	 * Determine the event callbacks and set them in @event.  This used
3838	 * to be done via struct cftype but cgroup core no longer knows
3839	 * about these events.  The following is crude but the whole thing
3840	 * is for compatibility anyway.
3841	 *
3842	 * DO NOT ADD NEW FILES.
3843	 */
3844	name = cfile.file->f_path.dentry->d_name.name;
3845
3846	if (!strcmp(name, "memory.usage_in_bytes")) {
3847		event->register_event = mem_cgroup_usage_register_event;
3848		event->unregister_event = mem_cgroup_usage_unregister_event;
3849	} else if (!strcmp(name, "memory.oom_control")) {
3850		event->register_event = mem_cgroup_oom_register_event;
3851		event->unregister_event = mem_cgroup_oom_unregister_event;
3852	} else if (!strcmp(name, "memory.pressure_level")) {
3853		event->register_event = vmpressure_register_event;
3854		event->unregister_event = vmpressure_unregister_event;
3855	} else if (!strcmp(name, "memory.memsw.usage_in_bytes")) {
3856		event->register_event = memsw_cgroup_usage_register_event;
3857		event->unregister_event = memsw_cgroup_usage_unregister_event;
3858	} else {
3859		ret = -EINVAL;
3860		goto out_put_cfile;
3861	}
3862
3863	/*
3864	 * Verify @cfile should belong to @css.  Also, remaining events are
3865	 * automatically removed on cgroup destruction but the removal is
3866	 * asynchronous, so take an extra ref on @css.
3867	 */
3868	cfile_css = css_tryget_online_from_dir(cfile.file->f_path.dentry->d_parent,
3869					       &memory_cgrp_subsys);
3870	ret = -EINVAL;
3871	if (IS_ERR(cfile_css))
3872		goto out_put_cfile;
3873	if (cfile_css != css) {
3874		css_put(cfile_css);
3875		goto out_put_cfile;
3876	}
3877
3878	ret = event->register_event(memcg, event->eventfd, buf);
3879	if (ret)
3880		goto out_put_css;
3881
3882	efile.file->f_op->poll(efile.file, &event->pt);
3883
3884	spin_lock(&memcg->event_list_lock);
3885	list_add(&event->list, &memcg->event_list);
3886	spin_unlock(&memcg->event_list_lock);
3887
3888	fdput(cfile);
3889	fdput(efile);
3890
3891	return nbytes;
3892
3893out_put_css:
3894	css_put(css);
3895out_put_cfile:
3896	fdput(cfile);
3897out_put_eventfd:
3898	eventfd_ctx_put(event->eventfd);
3899out_put_efile:
3900	fdput(efile);
3901out_kfree:
3902	kfree(event);
3903
3904	return ret;
3905}
 
3906
3907static struct cftype mem_cgroup_legacy_files[] = {
3908	{
3909		.name = "usage_in_bytes",
3910		.private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
3911		.read_u64 = mem_cgroup_read_u64,
 
 
3912	},
3913	{
3914		.name = "max_usage_in_bytes",
3915		.private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
3916		.write = mem_cgroup_reset,
3917		.read_u64 = mem_cgroup_read_u64,
3918	},
3919	{
3920		.name = "limit_in_bytes",
3921		.private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
3922		.write = mem_cgroup_write,
3923		.read_u64 = mem_cgroup_read_u64,
3924	},
3925	{
3926		.name = "soft_limit_in_bytes",
3927		.private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
3928		.write = mem_cgroup_write,
3929		.read_u64 = mem_cgroup_read_u64,
3930	},
3931	{
3932		.name = "failcnt",
3933		.private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
3934		.write = mem_cgroup_reset,
3935		.read_u64 = mem_cgroup_read_u64,
3936	},
3937	{
3938		.name = "stat",
3939		.seq_show = memcg_stat_show,
3940	},
3941	{
3942		.name = "force_empty",
3943		.write = mem_cgroup_force_empty_write,
3944	},
3945	{
3946		.name = "use_hierarchy",
3947		.write_u64 = mem_cgroup_hierarchy_write,
3948		.read_u64 = mem_cgroup_hierarchy_read,
3949	},
3950	{
3951		.name = "cgroup.event_control",		/* XXX: for compat */
3952		.write = memcg_write_event_control,
3953		.flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE,
3954	},
3955	{
3956		.name = "swappiness",
3957		.read_u64 = mem_cgroup_swappiness_read,
3958		.write_u64 = mem_cgroup_swappiness_write,
3959	},
3960	{
3961		.name = "move_charge_at_immigrate",
3962		.read_u64 = mem_cgroup_move_charge_read,
3963		.write_u64 = mem_cgroup_move_charge_write,
3964	},
3965	{
3966		.name = "oom_control",
3967		.seq_show = mem_cgroup_oom_control_read,
3968		.write_u64 = mem_cgroup_oom_control_write,
 
 
3969		.private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
3970	},
3971	{
3972		.name = "pressure_level",
3973	},
3974#ifdef CONFIG_NUMA
3975	{
3976		.name = "numa_stat",
3977		.seq_show = memcg_numa_stat_show,
3978	},
3979#endif
 
3980	{
3981		.name = "kmem.limit_in_bytes",
3982		.private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
3983		.write = mem_cgroup_write,
3984		.read_u64 = mem_cgroup_read_u64,
3985	},
3986	{
3987		.name = "kmem.usage_in_bytes",
3988		.private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
3989		.read_u64 = mem_cgroup_read_u64,
3990	},
3991	{
3992		.name = "kmem.failcnt",
3993		.private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT),
3994		.write = mem_cgroup_reset,
3995		.read_u64 = mem_cgroup_read_u64,
3996	},
3997	{
3998		.name = "kmem.max_usage_in_bytes",
3999		.private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE),
4000		.write = mem_cgroup_reset,
4001		.read_u64 = mem_cgroup_read_u64,
4002	},
4003#ifdef CONFIG_SLABINFO
4004	{
4005		.name = "kmem.slabinfo",
4006		.seq_start = slab_start,
4007		.seq_next = slab_next,
4008		.seq_stop = slab_stop,
4009		.seq_show = memcg_slab_show,
4010	},
4011#endif
4012	{
4013		.name = "kmem.tcp.limit_in_bytes",
4014		.private = MEMFILE_PRIVATE(_TCP, RES_LIMIT),
4015		.write = mem_cgroup_write,
4016		.read_u64 = mem_cgroup_read_u64,
4017	},
4018	{
4019		.name = "kmem.tcp.usage_in_bytes",
4020		.private = MEMFILE_PRIVATE(_TCP, RES_USAGE),
4021		.read_u64 = mem_cgroup_read_u64,
4022	},
4023	{
4024		.name = "kmem.tcp.failcnt",
4025		.private = MEMFILE_PRIVATE(_TCP, RES_FAILCNT),
4026		.write = mem_cgroup_reset,
4027		.read_u64 = mem_cgroup_read_u64,
4028	},
4029	{
4030		.name = "kmem.tcp.max_usage_in_bytes",
4031		.private = MEMFILE_PRIVATE(_TCP, RES_MAX_USAGE),
4032		.write = mem_cgroup_reset,
4033		.read_u64 = mem_cgroup_read_u64,
4034	},
4035	{ },	/* terminate */
4036};
4037
4038static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
4039{
4040	struct mem_cgroup_per_node *pn;
4041	struct mem_cgroup_per_zone *mz;
4042	int zone, tmp = node;
4043	/*
4044	 * This routine is called against possible nodes.
4045	 * But it's BUG to call kmalloc() against offline node.
4046	 *
4047	 * TODO: this routine can waste much memory for nodes which will
4048	 *       never be onlined. It's better to use memory hotplug callback
4049	 *       function.
4050	 */
4051	if (!node_state(node, N_NORMAL_MEMORY))
4052		tmp = -1;
4053	pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
4054	if (!pn)
4055		return 1;
4056
4057	for (zone = 0; zone < MAX_NR_ZONES; zone++) {
4058		mz = &pn->zoneinfo[zone];
4059		lruvec_init(&mz->lruvec);
4060		mz->usage_in_excess = 0;
4061		mz->on_tree = false;
4062		mz->memcg = memcg;
4063	}
4064	memcg->nodeinfo[node] = pn;
4065	return 0;
4066}
4067
4068static void free_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
4069{
4070	kfree(memcg->nodeinfo[node]);
4071}
4072
4073static void mem_cgroup_free(struct mem_cgroup *memcg)
4074{
4075	int node;
4076
4077	memcg_wb_domain_exit(memcg);
4078	for_each_node(node)
4079		free_mem_cgroup_per_zone_info(memcg, node);
4080	free_percpu(memcg->stat);
4081	kfree(memcg);
4082}
4083
4084static struct mem_cgroup *mem_cgroup_alloc(void)
4085{
4086	struct mem_cgroup *memcg;
4087	size_t size;
4088	int node;
4089
4090	size = sizeof(struct mem_cgroup);
4091	size += nr_node_ids * sizeof(struct mem_cgroup_per_node *);
 
 
 
4092
4093	memcg = kzalloc(size, GFP_KERNEL);
4094	if (!memcg)
4095		return NULL;
4096
4097	memcg->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
4098	if (!memcg->stat)
4099		goto fail;
 
 
4100
4101	for_each_node(node)
4102		if (alloc_mem_cgroup_per_zone_info(memcg, node))
4103			goto fail;
 
 
 
 
4104
4105	if (memcg_wb_domain_init(memcg, GFP_KERNEL))
4106		goto fail;
 
 
 
 
 
 
 
4107
4108	INIT_WORK(&memcg->high_work, high_work_func);
4109	memcg->last_scanned_node = MAX_NUMNODES;
4110	INIT_LIST_HEAD(&memcg->oom_notify);
4111	mutex_init(&memcg->thresholds_lock);
4112	spin_lock_init(&memcg->move_lock);
4113	vmpressure_init(&memcg->vmpressure);
4114	INIT_LIST_HEAD(&memcg->event_list);
4115	spin_lock_init(&memcg->event_list_lock);
4116	memcg->socket_pressure = jiffies;
4117#ifndef CONFIG_SLOB
4118	memcg->kmemcg_id = -1;
4119#endif
4120#ifdef CONFIG_CGROUP_WRITEBACK
4121	INIT_LIST_HEAD(&memcg->cgwb_list);
4122#endif
4123	return memcg;
4124fail:
4125	mem_cgroup_free(memcg);
4126	return NULL;
4127}
4128
4129static struct cgroup_subsys_state * __ref
4130mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
4131{
4132	struct mem_cgroup *parent = mem_cgroup_from_css(parent_css);
4133	struct mem_cgroup *memcg;
4134	long error = -ENOMEM;
4135
4136	memcg = mem_cgroup_alloc();
4137	if (!memcg)
4138		return ERR_PTR(error);
 
4139
4140	memcg->high = PAGE_COUNTER_MAX;
4141	memcg->soft_limit = PAGE_COUNTER_MAX;
4142	if (parent) {
4143		memcg->swappiness = mem_cgroup_swappiness(parent);
4144		memcg->oom_kill_disable = parent->oom_kill_disable;
4145	}
4146	if (parent && parent->use_hierarchy) {
4147		memcg->use_hierarchy = true;
4148		page_counter_init(&memcg->memory, &parent->memory);
4149		page_counter_init(&memcg->swap, &parent->swap);
4150		page_counter_init(&memcg->memsw, &parent->memsw);
4151		page_counter_init(&memcg->kmem, &parent->kmem);
4152		page_counter_init(&memcg->tcpmem, &parent->tcpmem);
4153	} else {
4154		page_counter_init(&memcg->memory, NULL);
4155		page_counter_init(&memcg->swap, NULL);
4156		page_counter_init(&memcg->memsw, NULL);
4157		page_counter_init(&memcg->kmem, NULL);
4158		page_counter_init(&memcg->tcpmem, NULL);
4159		/*
4160		 * Deeper hierachy with use_hierarchy == false doesn't make
4161		 * much sense so let cgroup subsystem know about this
4162		 * unfortunate state in our controller.
4163		 */
4164		if (parent != root_mem_cgroup)
4165			memory_cgrp_subsys.broken_hierarchy = true;
4166	}
4167
4168	/* The following stuff does not apply to the root */
4169	if (!parent) {
4170		root_mem_cgroup = memcg;
4171		return &memcg->css;
4172	}
4173
4174	error = memcg_online_kmem(memcg);
4175	if (error)
4176		goto fail;
4177
4178	if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
4179		static_branch_inc(&memcg_sockets_enabled_key);
4180
4181	return &memcg->css;
4182fail:
4183	mem_cgroup_free(memcg);
4184	return NULL;
4185}
4186
4187static int
4188mem_cgroup_css_online(struct cgroup_subsys_state *css)
4189{
4190	if (css->id > MEM_CGROUP_ID_MAX)
4191		return -ENOSPC;
4192
4193	return 0;
 
 
 
 
 
 
 
4194}
4195
4196static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
4197{
4198	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4199	struct mem_cgroup_event *event, *tmp;
4200
4201	/*
4202	 * Unregister events and notify userspace.
4203	 * Notify userspace about cgroup removing only after rmdir of cgroup
4204	 * directory to avoid race between userspace and kernelspace.
4205	 */
4206	spin_lock(&memcg->event_list_lock);
4207	list_for_each_entry_safe(event, tmp, &memcg->event_list, list) {
4208		list_del_init(&event->list);
4209		schedule_work(&event->remove);
4210	}
4211	spin_unlock(&memcg->event_list_lock);
4212
4213	memcg_offline_kmem(memcg);
4214	wb_memcg_offline(memcg);
 
 
 
 
 
 
 
4215}
 
4216
4217static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
4218{
4219	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4220
4221	invalidate_reclaim_iterators(memcg);
4222}
4223
4224static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
 
4225{
4226	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
 
 
4227
4228	if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
4229		static_branch_dec(&memcg_sockets_enabled_key);
 
 
 
 
 
4230
4231	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active)
4232		static_branch_dec(&memcg_sockets_enabled_key);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4233
4234	vmpressure_cleanup(&memcg->vmpressure);
4235	cancel_work_sync(&memcg->high_work);
4236	mem_cgroup_remove_from_trees(memcg);
4237	memcg_free_kmem(memcg);
4238	mem_cgroup_free(memcg);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4239}
4240
4241/**
4242 * mem_cgroup_css_reset - reset the states of a mem_cgroup
4243 * @css: the target css
4244 *
4245 * Reset the states of the mem_cgroup associated with @css.  This is
4246 * invoked when the userland requests disabling on the default hierarchy
4247 * but the memcg is pinned through dependency.  The memcg should stop
4248 * applying policies and should revert to the vanilla state as it may be
4249 * made visible again.
4250 *
4251 * The current implementation only resets the essential configurations.
4252 * This needs to be expanded to cover all the visible parts.
4253 */
4254static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
4255{
4256	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4257
4258	page_counter_limit(&memcg->memory, PAGE_COUNTER_MAX);
4259	page_counter_limit(&memcg->swap, PAGE_COUNTER_MAX);
4260	page_counter_limit(&memcg->memsw, PAGE_COUNTER_MAX);
4261	page_counter_limit(&memcg->kmem, PAGE_COUNTER_MAX);
4262	page_counter_limit(&memcg->tcpmem, PAGE_COUNTER_MAX);
4263	memcg->low = 0;
4264	memcg->high = PAGE_COUNTER_MAX;
4265	memcg->soft_limit = PAGE_COUNTER_MAX;
4266	memcg_wb_domain_size_changed(memcg);
 
4267}
4268
4269#ifdef CONFIG_MMU
4270/* Handlers for move charge at task migration. */
 
4271static int mem_cgroup_do_precharge(unsigned long count)
4272{
4273	int ret;
 
 
4274
4275	/* Try a single bulk charge without reclaim first, kswapd may wake */
4276	ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count);
4277	if (!ret) {
4278		mc.precharge += count;
 
4279		return ret;
4280	}
4281
4282	/* Try charges one by one with reclaim */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4283	while (count--) {
4284		ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_NORETRY, 1);
 
 
 
 
 
 
 
 
 
4285		if (ret)
 
4286			return ret;
4287		mc.precharge++;
4288		cond_resched();
4289	}
4290	return 0;
4291}
4292
4293/**
4294 * get_mctgt_type - get target type of moving charge
4295 * @vma: the vma the pte to be checked belongs
4296 * @addr: the address corresponding to the pte to be checked
4297 * @ptent: the pte to be checked
4298 * @target: the pointer the target page or swap ent will be stored(can be NULL)
4299 *
4300 * Returns
4301 *   0(MC_TARGET_NONE): if the pte is not a target for move charge.
4302 *   1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
4303 *     move charge. if @target is not NULL, the page is stored in target->page
4304 *     with extra refcnt got(Callers should handle it).
4305 *   2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
4306 *     target for charge migration. if @target is not NULL, the entry is stored
4307 *     in target->ent.
4308 *
4309 * Called with pte lock held.
4310 */
4311union mc_target {
4312	struct page	*page;
4313	swp_entry_t	ent;
4314};
4315
4316enum mc_target_type {
4317	MC_TARGET_NONE = 0,
4318	MC_TARGET_PAGE,
4319	MC_TARGET_SWAP,
4320};
4321
4322static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
4323						unsigned long addr, pte_t ptent)
4324{
4325	struct page *page = vm_normal_page(vma, addr, ptent);
4326
4327	if (!page || !page_mapped(page))
4328		return NULL;
4329	if (PageAnon(page)) {
4330		if (!(mc.flags & MOVE_ANON))
4331			return NULL;
4332	} else {
4333		if (!(mc.flags & MOVE_FILE))
4334			return NULL;
4335	}
 
 
4336	if (!get_page_unless_zero(page))
4337		return NULL;
4338
4339	return page;
4340}
4341
4342#ifdef CONFIG_SWAP
4343static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
4344			unsigned long addr, pte_t ptent, swp_entry_t *entry)
4345{
4346	struct page *page = NULL;
4347	swp_entry_t ent = pte_to_swp_entry(ptent);
4348
4349	if (!(mc.flags & MOVE_ANON) || non_swap_entry(ent))
4350		return NULL;
4351	/*
4352	 * Because lookup_swap_cache() updates some statistics counter,
4353	 * we call find_get_page() with swapper_space directly.
4354	 */
4355	page = find_get_page(swap_address_space(ent), ent.val);
4356	if (do_memsw_account())
4357		entry->val = ent.val;
4358
4359	return page;
4360}
4361#else
4362static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
4363			unsigned long addr, pte_t ptent, swp_entry_t *entry)
4364{
4365	return NULL;
4366}
4367#endif
4368
4369static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
4370			unsigned long addr, pte_t ptent, swp_entry_t *entry)
4371{
4372	struct page *page = NULL;
4373	struct address_space *mapping;
4374	pgoff_t pgoff;
4375
4376	if (!vma->vm_file) /* anonymous vma */
4377		return NULL;
4378	if (!(mc.flags & MOVE_FILE))
4379		return NULL;
4380
4381	mapping = vma->vm_file->f_mapping;
4382	pgoff = linear_page_index(vma, addr);
 
 
 
4383
4384	/* page is moved even if it's not RSS of this task(page-faulted). */
 
 
4385#ifdef CONFIG_SWAP
4386	/* shmem/tmpfs may report page out on swap: account for that too. */
4387	if (shmem_mapping(mapping)) {
4388		page = find_get_entry(mapping, pgoff);
4389		if (radix_tree_exceptional_entry(page)) {
4390			swp_entry_t swp = radix_to_swp_entry(page);
4391			if (do_memsw_account())
4392				*entry = swp;
4393			page = find_get_page(swap_address_space(swp), swp.val);
4394		}
4395	} else
4396		page = find_get_page(mapping, pgoff);
4397#else
4398	page = find_get_page(mapping, pgoff);
4399#endif
4400	return page;
4401}
4402
4403/**
4404 * mem_cgroup_move_account - move account of the page
4405 * @page: the page
4406 * @nr_pages: number of regular pages (>1 for huge pages)
4407 * @from: mem_cgroup which the page is moved from.
4408 * @to:	mem_cgroup which the page is moved to. @from != @to.
4409 *
4410 * The caller must make sure the page is not on LRU (isolate_page() is useful.)
4411 *
4412 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
4413 * from old cgroup.
4414 */
4415static int mem_cgroup_move_account(struct page *page,
4416				   bool compound,
4417				   struct mem_cgroup *from,
4418				   struct mem_cgroup *to)
4419{
4420	unsigned long flags;
4421	unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
4422	int ret;
4423	bool anon;
4424
4425	VM_BUG_ON(from == to);
4426	VM_BUG_ON_PAGE(PageLRU(page), page);
4427	VM_BUG_ON(compound && !PageTransHuge(page));
4428
4429	/*
4430	 * Prevent mem_cgroup_migrate() from looking at
4431	 * page->mem_cgroup of its source page while we change it.
4432	 */
4433	ret = -EBUSY;
4434	if (!trylock_page(page))
4435		goto out;
4436
4437	ret = -EINVAL;
4438	if (page->mem_cgroup != from)
4439		goto out_unlock;
4440
4441	anon = PageAnon(page);
4442
4443	spin_lock_irqsave(&from->move_lock, flags);
4444
4445	if (!anon && page_mapped(page)) {
4446		__this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED],
4447			       nr_pages);
4448		__this_cpu_add(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED],
4449			       nr_pages);
4450	}
4451
4452	/*
4453	 * move_lock grabbed above and caller set from->moving_account, so
4454	 * mem_cgroup_update_page_stat() will serialize updates to PageDirty.
4455	 * So mapping should be stable for dirty pages.
4456	 */
4457	if (!anon && PageDirty(page)) {
4458		struct address_space *mapping = page_mapping(page);
4459
4460		if (mapping_cap_account_dirty(mapping)) {
4461			__this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_DIRTY],
4462				       nr_pages);
4463			__this_cpu_add(to->stat->count[MEM_CGROUP_STAT_DIRTY],
4464				       nr_pages);
4465		}
4466	}
4467
4468	if (PageWriteback(page)) {
4469		__this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_WRITEBACK],
4470			       nr_pages);
4471		__this_cpu_add(to->stat->count[MEM_CGROUP_STAT_WRITEBACK],
4472			       nr_pages);
4473	}
4474
4475	/*
4476	 * It is safe to change page->mem_cgroup here because the page
4477	 * is referenced, charged, and isolated - we can't race with
4478	 * uncharging, charging, migration, or LRU putback.
4479	 */
4480
4481	/* caller should have done css_get */
4482	page->mem_cgroup = to;
4483	spin_unlock_irqrestore(&from->move_lock, flags);
4484
4485	ret = 0;
4486
4487	local_irq_disable();
4488	mem_cgroup_charge_statistics(to, page, compound, nr_pages);
4489	memcg_check_events(to, page);
4490	mem_cgroup_charge_statistics(from, page, compound, -nr_pages);
4491	memcg_check_events(from, page);
4492	local_irq_enable();
4493out_unlock:
4494	unlock_page(page);
4495out:
4496	return ret;
4497}
4498
4499static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
4500		unsigned long addr, pte_t ptent, union mc_target *target)
4501{
4502	struct page *page = NULL;
 
4503	enum mc_target_type ret = MC_TARGET_NONE;
4504	swp_entry_t ent = { .val = 0 };
4505
4506	if (pte_present(ptent))
4507		page = mc_handle_present_pte(vma, addr, ptent);
4508	else if (is_swap_pte(ptent))
4509		page = mc_handle_swap_pte(vma, addr, ptent, &ent);
4510	else if (pte_none(ptent))
4511		page = mc_handle_file_pte(vma, addr, ptent, &ent);
4512
4513	if (!page && !ent.val)
4514		return ret;
4515	if (page) {
 
4516		/*
4517		 * Do only loose check w/o serialization.
4518		 * mem_cgroup_move_account() checks the page is valid or
4519		 * not under LRU exclusion.
4520		 */
4521		if (page->mem_cgroup == mc.from) {
4522			ret = MC_TARGET_PAGE;
4523			if (target)
4524				target->page = page;
4525		}
4526		if (!ret || !target)
4527			put_page(page);
4528	}
4529	/* There is a swap entry and a page doesn't exist or isn't charged */
4530	if (ent.val && !ret &&
4531	    mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) {
4532		ret = MC_TARGET_SWAP;
4533		if (target)
4534			target->ent = ent;
4535	}
4536	return ret;
4537}
4538
4539#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4540/*
4541 * We don't consider swapping or file mapped pages because THP does not
4542 * support them for now.
4543 * Caller should make sure that pmd_trans_huge(pmd) is true.
4544 */
4545static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
4546		unsigned long addr, pmd_t pmd, union mc_target *target)
4547{
4548	struct page *page = NULL;
 
4549	enum mc_target_type ret = MC_TARGET_NONE;
4550
4551	page = pmd_page(pmd);
4552	VM_BUG_ON_PAGE(!page || !PageHead(page), page);
4553	if (!(mc.flags & MOVE_ANON))
4554		return ret;
4555	if (page->mem_cgroup == mc.from) {
 
4556		ret = MC_TARGET_PAGE;
4557		if (target) {
4558			get_page(page);
4559			target->page = page;
4560		}
4561	}
4562	return ret;
4563}
4564#else
4565static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
4566		unsigned long addr, pmd_t pmd, union mc_target *target)
4567{
4568	return MC_TARGET_NONE;
4569}
4570#endif
4571
4572static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
4573					unsigned long addr, unsigned long end,
4574					struct mm_walk *walk)
4575{
4576	struct vm_area_struct *vma = walk->vma;
4577	pte_t *pte;
4578	spinlock_t *ptl;
4579
4580	ptl = pmd_trans_huge_lock(pmd, vma);
4581	if (ptl) {
4582		if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
4583			mc.precharge += HPAGE_PMD_NR;
4584		spin_unlock(ptl);
4585		return 0;
4586	}
4587
4588	if (pmd_trans_unstable(pmd))
4589		return 0;
4590	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
4591	for (; addr != end; pte++, addr += PAGE_SIZE)
4592		if (get_mctgt_type(vma, addr, *pte, NULL))
4593			mc.precharge++;	/* increment precharge temporarily */
4594	pte_unmap_unlock(pte - 1, ptl);
4595	cond_resched();
4596
4597	return 0;
4598}
4599
4600static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
4601{
4602	unsigned long precharge;
 
4603
4604	struct mm_walk mem_cgroup_count_precharge_walk = {
4605		.pmd_entry = mem_cgroup_count_precharge_pte_range,
4606		.mm = mm,
4607	};
4608	down_read(&mm->mmap_sem);
4609	walk_page_range(0, ~0UL, &mem_cgroup_count_precharge_walk);
 
 
 
 
 
 
 
 
 
 
4610	up_read(&mm->mmap_sem);
4611
4612	precharge = mc.precharge;
4613	mc.precharge = 0;
4614
4615	return precharge;
4616}
4617
4618static int mem_cgroup_precharge_mc(struct mm_struct *mm)
4619{
4620	unsigned long precharge = mem_cgroup_count_precharge(mm);
4621
4622	VM_BUG_ON(mc.moving_task);
4623	mc.moving_task = current;
4624	return mem_cgroup_do_precharge(precharge);
4625}
4626
4627/* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
4628static void __mem_cgroup_clear_mc(void)
4629{
4630	struct mem_cgroup *from = mc.from;
4631	struct mem_cgroup *to = mc.to;
4632
4633	/* we must uncharge all the leftover precharges from mc.to */
4634	if (mc.precharge) {
4635		cancel_charge(mc.to, mc.precharge);
4636		mc.precharge = 0;
4637	}
4638	/*
4639	 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
4640	 * we must uncharge here.
4641	 */
4642	if (mc.moved_charge) {
4643		cancel_charge(mc.from, mc.moved_charge);
4644		mc.moved_charge = 0;
4645	}
4646	/* we must fixup refcnts and charges */
4647	if (mc.moved_swap) {
4648		/* uncharge swap account from the old cgroup */
4649		if (!mem_cgroup_is_root(mc.from))
4650			page_counter_uncharge(&mc.from->memsw, mc.moved_swap);
4651
4652		/*
4653		 * we charged both to->memory and to->memsw, so we
4654		 * should uncharge to->memory.
4655		 */
4656		if (!mem_cgroup_is_root(mc.to))
4657			page_counter_uncharge(&mc.to->memory, mc.moved_swap);
4658
4659		css_put_many(&mc.from->css, mc.moved_swap);
4660
4661		/* we've already done css_get(mc.to) */
 
 
 
 
 
 
4662		mc.moved_swap = 0;
4663	}
4664	memcg_oom_recover(from);
4665	memcg_oom_recover(to);
4666	wake_up_all(&mc.waitq);
4667}
4668
4669static void mem_cgroup_clear_mc(void)
4670{
4671	struct mm_struct *mm = mc.mm;
4672
4673	/*
4674	 * we must clear moving_task before waking up waiters at the end of
4675	 * task migration.
4676	 */
4677	mc.moving_task = NULL;
4678	__mem_cgroup_clear_mc();
4679	spin_lock(&mc.lock);
4680	mc.from = NULL;
4681	mc.to = NULL;
4682	mc.mm = NULL;
4683	spin_unlock(&mc.lock);
4684
4685	mmput(mm);
4686}
4687
4688static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
 
4689{
4690	struct cgroup_subsys_state *css;
4691	struct mem_cgroup *memcg = NULL; /* unneeded init to make gcc happy */
4692	struct mem_cgroup *from;
4693	struct task_struct *leader, *p;
4694	struct mm_struct *mm;
4695	unsigned long move_flags;
4696	int ret = 0;
 
4697
4698	/* charge immigration isn't supported on the default hierarchy */
4699	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
4700		return 0;
4701
4702	/*
4703	 * Multi-process migrations only happen on the default hierarchy
4704	 * where charge immigration is not used.  Perform charge
4705	 * immigration if @tset contains a leader and whine if there are
4706	 * multiple.
4707	 */
4708	p = NULL;
4709	cgroup_taskset_for_each_leader(leader, css, tset) {
4710		WARN_ON_ONCE(p);
4711		p = leader;
4712		memcg = mem_cgroup_from_css(css);
4713	}
4714	if (!p)
4715		return 0;
4716
4717	/*
4718	 * We are now commited to this value whatever it is. Changes in this
4719	 * tunable will only affect upcoming migrations, not the current one.
4720	 * So we need to save it, and keep it going.
4721	 */
4722	move_flags = READ_ONCE(memcg->move_charge_at_immigrate);
4723	if (!move_flags)
4724		return 0;
4725
4726	from = mem_cgroup_from_task(p);
4727
4728	VM_BUG_ON(from == memcg);
4729
4730	mm = get_task_mm(p);
4731	if (!mm)
4732		return 0;
4733	/* We move charges only when we move a owner of the mm */
4734	if (mm->owner == p) {
4735		VM_BUG_ON(mc.from);
4736		VM_BUG_ON(mc.to);
4737		VM_BUG_ON(mc.precharge);
4738		VM_BUG_ON(mc.moved_charge);
4739		VM_BUG_ON(mc.moved_swap);
4740
4741		spin_lock(&mc.lock);
4742		mc.mm = mm;
4743		mc.from = from;
4744		mc.to = memcg;
4745		mc.flags = move_flags;
4746		spin_unlock(&mc.lock);
4747		/* We set mc.moving_task later */
4748
4749		ret = mem_cgroup_precharge_mc(mm);
4750		if (ret)
4751			mem_cgroup_clear_mc();
4752	} else {
4753		mmput(mm);
4754	}
4755	return ret;
4756}
4757
4758static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
 
4759{
4760	if (mc.to)
4761		mem_cgroup_clear_mc();
4762}
4763
4764static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
4765				unsigned long addr, unsigned long end,
4766				struct mm_walk *walk)
4767{
4768	int ret = 0;
4769	struct vm_area_struct *vma = walk->vma;
4770	pte_t *pte;
4771	spinlock_t *ptl;
4772	enum mc_target_type target_type;
4773	union mc_target target;
4774	struct page *page;
 
4775
4776	ptl = pmd_trans_huge_lock(pmd, vma);
4777	if (ptl) {
 
 
 
 
 
 
 
 
 
4778		if (mc.precharge < HPAGE_PMD_NR) {
4779			spin_unlock(ptl);
4780			return 0;
4781		}
4782		target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
4783		if (target_type == MC_TARGET_PAGE) {
4784			page = target.page;
4785			if (!isolate_lru_page(page)) {
4786				if (!mem_cgroup_move_account(page, true,
4787							     mc.from, mc.to)) {
 
4788					mc.precharge -= HPAGE_PMD_NR;
4789					mc.moved_charge += HPAGE_PMD_NR;
4790				}
4791				putback_lru_page(page);
4792			}
4793			put_page(page);
4794		}
4795		spin_unlock(ptl);
4796		return 0;
4797	}
4798
4799	if (pmd_trans_unstable(pmd))
4800		return 0;
4801retry:
4802	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
4803	for (; addr != end; addr += PAGE_SIZE) {
4804		pte_t ptent = *(pte++);
4805		swp_entry_t ent;
4806
4807		if (!mc.precharge)
4808			break;
4809
4810		switch (get_mctgt_type(vma, addr, ptent, &target)) {
4811		case MC_TARGET_PAGE:
4812			page = target.page;
4813			/*
4814			 * We can have a part of the split pmd here. Moving it
4815			 * can be done but it would be too convoluted so simply
4816			 * ignore such a partial THP and keep it in original
4817			 * memcg. There should be somebody mapping the head.
4818			 */
4819			if (PageTransCompound(page))
4820				goto put;
4821			if (isolate_lru_page(page))
4822				goto put;
4823			if (!mem_cgroup_move_account(page, false,
4824						mc.from, mc.to)) {
 
4825				mc.precharge--;
4826				/* we uncharge from mc.from later. */
4827				mc.moved_charge++;
4828			}
4829			putback_lru_page(page);
4830put:			/* get_mctgt_type() gets the page */
4831			put_page(page);
4832			break;
4833		case MC_TARGET_SWAP:
4834			ent = target.ent;
4835			if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
4836				mc.precharge--;
4837				/* we fixup refcnts and charges later. */
4838				mc.moved_swap++;
4839			}
4840			break;
4841		default:
4842			break;
4843		}
4844	}
4845	pte_unmap_unlock(pte - 1, ptl);
4846	cond_resched();
4847
4848	if (addr != end) {
4849		/*
4850		 * We have consumed all precharges we got in can_attach().
4851		 * We try charge one by one, but don't do any additional
4852		 * charges to mc.to if we have failed in charge once in attach()
4853		 * phase.
4854		 */
4855		ret = mem_cgroup_do_precharge(1);
4856		if (!ret)
4857			goto retry;
4858	}
4859
4860	return ret;
4861}
4862
4863static void mem_cgroup_move_charge(void)
4864{
4865	struct mm_walk mem_cgroup_move_charge_walk = {
4866		.pmd_entry = mem_cgroup_move_charge_pte_range,
4867		.mm = mc.mm,
4868	};
4869
4870	lru_add_drain_all();
4871	/*
4872	 * Signal lock_page_memcg() to take the memcg's move_lock
4873	 * while we're moving its pages to another memcg. Then wait
4874	 * for already started RCU-only updates to finish.
4875	 */
4876	atomic_inc(&mc.from->moving_account);
4877	synchronize_rcu();
4878retry:
4879	if (unlikely(!down_read_trylock(&mc.mm->mmap_sem))) {
4880		/*
4881		 * Someone who are holding the mmap_sem might be waiting in
4882		 * waitq. So we cancel all extra charges, wake up all waiters,
4883		 * and retry. Because we cancel precharges, we might not be able
4884		 * to move enough charges, but moving charge is a best-effort
4885		 * feature anyway, so it wouldn't be a big problem.
4886		 */
4887		__mem_cgroup_clear_mc();
4888		cond_resched();
4889		goto retry;
4890	}
4891	/*
4892	 * When we have consumed all precharges and failed in doing
4893	 * additional charge, the page walk just aborts.
4894	 */
4895	walk_page_range(0, ~0UL, &mem_cgroup_move_charge_walk);
4896	up_read(&mc.mm->mmap_sem);
4897	atomic_dec(&mc.from->moving_account);
 
 
 
 
 
 
 
 
 
 
 
 
4898}
4899
4900static void mem_cgroup_move_task(void)
 
4901{
4902	if (mc.to) {
4903		mem_cgroup_move_charge();
4904		mem_cgroup_clear_mc();
 
 
 
 
4905	}
 
 
4906}
4907#else	/* !CONFIG_MMU */
4908static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
 
4909{
4910	return 0;
4911}
4912static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
 
4913{
4914}
4915static void mem_cgroup_move_task(void)
 
4916{
4917}
4918#endif
4919
4920/*
4921 * Cgroup retains root cgroups across [un]mount cycles making it necessary
4922 * to verify whether we're attached to the default hierarchy on each mount
4923 * attempt.
4924 */
4925static void mem_cgroup_bind(struct cgroup_subsys_state *root_css)
4926{
4927	/*
4928	 * use_hierarchy is forced on the default hierarchy.  cgroup core
4929	 * guarantees that @root doesn't have any children, so turning it
4930	 * on for the root memcg is enough.
4931	 */
4932	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
4933		root_mem_cgroup->use_hierarchy = true;
4934	else
4935		root_mem_cgroup->use_hierarchy = false;
4936}
4937
4938static u64 memory_current_read(struct cgroup_subsys_state *css,
4939			       struct cftype *cft)
4940{
4941	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4942
4943	return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE;
4944}
4945
4946static int memory_low_show(struct seq_file *m, void *v)
4947{
4948	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
4949	unsigned long low = READ_ONCE(memcg->low);
4950
4951	if (low == PAGE_COUNTER_MAX)
4952		seq_puts(m, "max\n");
4953	else
4954		seq_printf(m, "%llu\n", (u64)low * PAGE_SIZE);
4955
4956	return 0;
4957}
4958
4959static ssize_t memory_low_write(struct kernfs_open_file *of,
4960				char *buf, size_t nbytes, loff_t off)
4961{
4962	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4963	unsigned long low;
4964	int err;
4965
4966	buf = strstrip(buf);
4967	err = page_counter_memparse(buf, "max", &low);
4968	if (err)
4969		return err;
4970
4971	memcg->low = low;
4972
4973	return nbytes;
4974}
4975
4976static int memory_high_show(struct seq_file *m, void *v)
4977{
4978	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
4979	unsigned long high = READ_ONCE(memcg->high);
4980
4981	if (high == PAGE_COUNTER_MAX)
4982		seq_puts(m, "max\n");
4983	else
4984		seq_printf(m, "%llu\n", (u64)high * PAGE_SIZE);
4985
4986	return 0;
4987}
4988
4989static ssize_t memory_high_write(struct kernfs_open_file *of,
4990				 char *buf, size_t nbytes, loff_t off)
4991{
4992	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4993	unsigned long nr_pages;
4994	unsigned long high;
4995	int err;
4996
4997	buf = strstrip(buf);
4998	err = page_counter_memparse(buf, "max", &high);
4999	if (err)
5000		return err;
5001
5002	memcg->high = high;
5003
5004	nr_pages = page_counter_read(&memcg->memory);
5005	if (nr_pages > high)
5006		try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
5007					     GFP_KERNEL, true);
5008
5009	memcg_wb_domain_size_changed(memcg);
5010	return nbytes;
5011}
5012
5013static int memory_max_show(struct seq_file *m, void *v)
5014{
5015	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5016	unsigned long max = READ_ONCE(memcg->memory.limit);
5017
5018	if (max == PAGE_COUNTER_MAX)
5019		seq_puts(m, "max\n");
5020	else
5021		seq_printf(m, "%llu\n", (u64)max * PAGE_SIZE);
5022
5023	return 0;
5024}
5025
5026static ssize_t memory_max_write(struct kernfs_open_file *of,
5027				char *buf, size_t nbytes, loff_t off)
5028{
5029	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5030	unsigned int nr_reclaims = MEM_CGROUP_RECLAIM_RETRIES;
5031	bool drained = false;
5032	unsigned long max;
5033	int err;
5034
5035	buf = strstrip(buf);
5036	err = page_counter_memparse(buf, "max", &max);
5037	if (err)
5038		return err;
5039
5040	xchg(&memcg->memory.limit, max);
5041
5042	for (;;) {
5043		unsigned long nr_pages = page_counter_read(&memcg->memory);
5044
5045		if (nr_pages <= max)
5046			break;
5047
5048		if (signal_pending(current)) {
5049			err = -EINTR;
5050			break;
5051		}
5052
5053		if (!drained) {
5054			drain_all_stock(memcg);
5055			drained = true;
5056			continue;
5057		}
5058
5059		if (nr_reclaims) {
5060			if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max,
5061							  GFP_KERNEL, true))
5062				nr_reclaims--;
5063			continue;
5064		}
5065
5066		mem_cgroup_events(memcg, MEMCG_OOM, 1);
5067		if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
5068			break;
5069	}
5070
5071	memcg_wb_domain_size_changed(memcg);
5072	return nbytes;
5073}
5074
5075static int memory_events_show(struct seq_file *m, void *v)
5076{
5077	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5078
5079	seq_printf(m, "low %lu\n", mem_cgroup_read_events(memcg, MEMCG_LOW));
5080	seq_printf(m, "high %lu\n", mem_cgroup_read_events(memcg, MEMCG_HIGH));
5081	seq_printf(m, "max %lu\n", mem_cgroup_read_events(memcg, MEMCG_MAX));
5082	seq_printf(m, "oom %lu\n", mem_cgroup_read_events(memcg, MEMCG_OOM));
5083
5084	return 0;
5085}
5086
5087static int memory_stat_show(struct seq_file *m, void *v)
5088{
5089	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5090	unsigned long stat[MEMCG_NR_STAT];
5091	unsigned long events[MEMCG_NR_EVENTS];
5092	int i;
5093
5094	/*
5095	 * Provide statistics on the state of the memory subsystem as
5096	 * well as cumulative event counters that show past behavior.
5097	 *
5098	 * This list is ordered following a combination of these gradients:
5099	 * 1) generic big picture -> specifics and details
5100	 * 2) reflecting userspace activity -> reflecting kernel heuristics
5101	 *
5102	 * Current memory state:
5103	 */
5104
5105	tree_stat(memcg, stat);
5106	tree_events(memcg, events);
5107
5108	seq_printf(m, "anon %llu\n",
5109		   (u64)stat[MEM_CGROUP_STAT_RSS] * PAGE_SIZE);
5110	seq_printf(m, "file %llu\n",
5111		   (u64)stat[MEM_CGROUP_STAT_CACHE] * PAGE_SIZE);
5112	seq_printf(m, "kernel_stack %llu\n",
5113		   (u64)stat[MEMCG_KERNEL_STACK] * PAGE_SIZE);
5114	seq_printf(m, "slab %llu\n",
5115		   (u64)(stat[MEMCG_SLAB_RECLAIMABLE] +
5116			 stat[MEMCG_SLAB_UNRECLAIMABLE]) * PAGE_SIZE);
5117	seq_printf(m, "sock %llu\n",
5118		   (u64)stat[MEMCG_SOCK] * PAGE_SIZE);
5119
5120	seq_printf(m, "file_mapped %llu\n",
5121		   (u64)stat[MEM_CGROUP_STAT_FILE_MAPPED] * PAGE_SIZE);
5122	seq_printf(m, "file_dirty %llu\n",
5123		   (u64)stat[MEM_CGROUP_STAT_DIRTY] * PAGE_SIZE);
5124	seq_printf(m, "file_writeback %llu\n",
5125		   (u64)stat[MEM_CGROUP_STAT_WRITEBACK] * PAGE_SIZE);
5126
5127	for (i = 0; i < NR_LRU_LISTS; i++) {
5128		struct mem_cgroup *mi;
5129		unsigned long val = 0;
5130
5131		for_each_mem_cgroup_tree(mi, memcg)
5132			val += mem_cgroup_nr_lru_pages(mi, BIT(i));
5133		seq_printf(m, "%s %llu\n",
5134			   mem_cgroup_lru_names[i], (u64)val * PAGE_SIZE);
5135	}
5136
5137	seq_printf(m, "slab_reclaimable %llu\n",
5138		   (u64)stat[MEMCG_SLAB_RECLAIMABLE] * PAGE_SIZE);
5139	seq_printf(m, "slab_unreclaimable %llu\n",
5140		   (u64)stat[MEMCG_SLAB_UNRECLAIMABLE] * PAGE_SIZE);
5141
5142	/* Accumulated memory events */
5143
5144	seq_printf(m, "pgfault %lu\n",
5145		   events[MEM_CGROUP_EVENTS_PGFAULT]);
5146	seq_printf(m, "pgmajfault %lu\n",
5147		   events[MEM_CGROUP_EVENTS_PGMAJFAULT]);
5148
5149	return 0;
5150}
5151
5152static struct cftype memory_files[] = {
5153	{
5154		.name = "current",
5155		.flags = CFTYPE_NOT_ON_ROOT,
5156		.read_u64 = memory_current_read,
5157	},
5158	{
5159		.name = "low",
5160		.flags = CFTYPE_NOT_ON_ROOT,
5161		.seq_show = memory_low_show,
5162		.write = memory_low_write,
5163	},
5164	{
5165		.name = "high",
5166		.flags = CFTYPE_NOT_ON_ROOT,
5167		.seq_show = memory_high_show,
5168		.write = memory_high_write,
5169	},
5170	{
5171		.name = "max",
5172		.flags = CFTYPE_NOT_ON_ROOT,
5173		.seq_show = memory_max_show,
5174		.write = memory_max_write,
5175	},
5176	{
5177		.name = "events",
5178		.flags = CFTYPE_NOT_ON_ROOT,
5179		.file_offset = offsetof(struct mem_cgroup, events_file),
5180		.seq_show = memory_events_show,
5181	},
5182	{
5183		.name = "stat",
5184		.flags = CFTYPE_NOT_ON_ROOT,
5185		.seq_show = memory_stat_show,
5186	},
5187	{ }	/* terminate */
5188};
5189
5190struct cgroup_subsys memory_cgrp_subsys = {
5191	.css_alloc = mem_cgroup_css_alloc,
5192	.css_online = mem_cgroup_css_online,
5193	.css_offline = mem_cgroup_css_offline,
5194	.css_released = mem_cgroup_css_released,
5195	.css_free = mem_cgroup_css_free,
5196	.css_reset = mem_cgroup_css_reset,
5197	.can_attach = mem_cgroup_can_attach,
5198	.cancel_attach = mem_cgroup_cancel_attach,
5199	.post_attach = mem_cgroup_move_task,
5200	.bind = mem_cgroup_bind,
5201	.dfl_cftypes = memory_files,
5202	.legacy_cftypes = mem_cgroup_legacy_files,
5203	.early_init = 0,
 
 
5204};
5205
5206/**
5207 * mem_cgroup_low - check if memory consumption is below the normal range
5208 * @root: the highest ancestor to consider
5209 * @memcg: the memory cgroup to check
5210 *
5211 * Returns %true if memory consumption of @memcg, and that of all
5212 * configurable ancestors up to @root, is below the normal range.
5213 */
5214bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg)
5215{
5216	if (mem_cgroup_disabled())
5217		return false;
5218
5219	/*
5220	 * The toplevel group doesn't have a configurable range, so
5221	 * it's never low when looked at directly, and it is not
5222	 * considered an ancestor when assessing the hierarchy.
5223	 */
5224
5225	if (memcg == root_mem_cgroup)
5226		return false;
5227
5228	if (page_counter_read(&memcg->memory) >= memcg->low)
5229		return false;
5230
5231	while (memcg != root) {
5232		memcg = parent_mem_cgroup(memcg);
5233
5234		if (memcg == root_mem_cgroup)
5235			break;
5236
5237		if (page_counter_read(&memcg->memory) >= memcg->low)
5238			return false;
5239	}
5240	return true;
5241}
5242
5243/**
5244 * mem_cgroup_try_charge - try charging a page
5245 * @page: page to charge
5246 * @mm: mm context of the victim
5247 * @gfp_mask: reclaim mode
5248 * @memcgp: charged memcg return
5249 *
5250 * Try to charge @page to the memcg that @mm belongs to, reclaiming
5251 * pages according to @gfp_mask if necessary.
5252 *
5253 * Returns 0 on success, with *@memcgp pointing to the charged memcg.
5254 * Otherwise, an error code is returned.
5255 *
5256 * After page->mapping has been set up, the caller must finalize the
5257 * charge with mem_cgroup_commit_charge().  Or abort the transaction
5258 * with mem_cgroup_cancel_charge() in case page instantiation fails.
5259 */
5260int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
5261			  gfp_t gfp_mask, struct mem_cgroup **memcgp,
5262			  bool compound)
5263{
5264	struct mem_cgroup *memcg = NULL;
5265	unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
5266	int ret = 0;
5267
5268	if (mem_cgroup_disabled())
5269		goto out;
5270
5271	if (PageSwapCache(page)) {
5272		/*
5273		 * Every swap fault against a single page tries to charge the
5274		 * page, bail as early as possible.  shmem_unuse() encounters
5275		 * already charged pages, too.  The USED bit is protected by
5276		 * the page lock, which serializes swap cache removal, which
5277		 * in turn serializes uncharging.
5278		 */
5279		VM_BUG_ON_PAGE(!PageLocked(page), page);
5280		if (page->mem_cgroup)
5281			goto out;
5282
5283		if (do_swap_account) {
5284			swp_entry_t ent = { .val = page_private(page), };
5285			unsigned short id = lookup_swap_cgroup_id(ent);
5286
5287			rcu_read_lock();
5288			memcg = mem_cgroup_from_id(id);
5289			if (memcg && !css_tryget_online(&memcg->css))
5290				memcg = NULL;
5291			rcu_read_unlock();
5292		}
5293	}
5294
5295	if (!memcg)
5296		memcg = get_mem_cgroup_from_mm(mm);
5297
5298	ret = try_charge(memcg, gfp_mask, nr_pages);
5299
5300	css_put(&memcg->css);
5301out:
5302	*memcgp = memcg;
5303	return ret;
5304}
5305
5306/**
5307 * mem_cgroup_commit_charge - commit a page charge
5308 * @page: page to charge
5309 * @memcg: memcg to charge the page to
5310 * @lrucare: page might be on LRU already
5311 *
5312 * Finalize a charge transaction started by mem_cgroup_try_charge(),
5313 * after page->mapping has been set up.  This must happen atomically
5314 * as part of the page instantiation, i.e. under the page table lock
5315 * for anonymous pages, under the page lock for page and swap cache.
5316 *
5317 * In addition, the page must not be on the LRU during the commit, to
5318 * prevent racing with task migration.  If it might be, use @lrucare.
5319 *
5320 * Use mem_cgroup_cancel_charge() to cancel the transaction instead.
5321 */
5322void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
5323			      bool lrucare, bool compound)
5324{
5325	unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
5326
5327	VM_BUG_ON_PAGE(!page->mapping, page);
5328	VM_BUG_ON_PAGE(PageLRU(page) && !lrucare, page);
5329
5330	if (mem_cgroup_disabled())
5331		return;
5332	/*
5333	 * Swap faults will attempt to charge the same page multiple
5334	 * times.  But reuse_swap_page() might have removed the page
5335	 * from swapcache already, so we can't check PageSwapCache().
5336	 */
5337	if (!memcg)
5338		return;
5339
5340	commit_charge(page, memcg, lrucare);
5341
5342	local_irq_disable();
5343	mem_cgroup_charge_statistics(memcg, page, compound, nr_pages);
5344	memcg_check_events(memcg, page);
5345	local_irq_enable();
5346
5347	if (do_memsw_account() && PageSwapCache(page)) {
5348		swp_entry_t entry = { .val = page_private(page) };
5349		/*
5350		 * The swap entry might not get freed for a long time,
5351		 * let's not wait for it.  The page already received a
5352		 * memory+swap charge, drop the swap entry duplicate.
5353		 */
5354		mem_cgroup_uncharge_swap(entry);
5355	}
5356}
5357
5358/**
5359 * mem_cgroup_cancel_charge - cancel a page charge
5360 * @page: page to charge
5361 * @memcg: memcg to charge the page to
5362 *
5363 * Cancel a charge transaction started by mem_cgroup_try_charge().
5364 */
5365void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg,
5366		bool compound)
5367{
5368	unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
5369
5370	if (mem_cgroup_disabled())
5371		return;
5372	/*
5373	 * Swap faults will attempt to charge the same page multiple
5374	 * times.  But reuse_swap_page() might have removed the page
5375	 * from swapcache already, so we can't check PageSwapCache().
5376	 */
5377	if (!memcg)
5378		return;
5379
5380	cancel_charge(memcg, nr_pages);
5381}
5382
5383static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout,
5384			   unsigned long nr_anon, unsigned long nr_file,
5385			   unsigned long nr_huge, struct page *dummy_page)
5386{
5387	unsigned long nr_pages = nr_anon + nr_file;
5388	unsigned long flags;
5389
5390	if (!mem_cgroup_is_root(memcg)) {
5391		page_counter_uncharge(&memcg->memory, nr_pages);
5392		if (do_memsw_account())
5393			page_counter_uncharge(&memcg->memsw, nr_pages);
5394		memcg_oom_recover(memcg);
5395	}
5396
5397	local_irq_save(flags);
5398	__this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS], nr_anon);
5399	__this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_CACHE], nr_file);
5400	__this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], nr_huge);
5401	__this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT], pgpgout);
5402	__this_cpu_add(memcg->stat->nr_page_events, nr_pages);
5403	memcg_check_events(memcg, dummy_page);
5404	local_irq_restore(flags);
5405
5406	if (!mem_cgroup_is_root(memcg))
5407		css_put_many(&memcg->css, nr_pages);
5408}
5409
5410static void uncharge_list(struct list_head *page_list)
5411{
5412	struct mem_cgroup *memcg = NULL;
5413	unsigned long nr_anon = 0;
5414	unsigned long nr_file = 0;
5415	unsigned long nr_huge = 0;
5416	unsigned long pgpgout = 0;
5417	struct list_head *next;
5418	struct page *page;
5419
5420	/*
5421	 * Note that the list can be a single page->lru; hence the
5422	 * do-while loop instead of a simple list_for_each_entry().
5423	 */
5424	next = page_list->next;
5425	do {
5426		unsigned int nr_pages = 1;
5427
5428		page = list_entry(next, struct page, lru);
5429		next = page->lru.next;
5430
5431		VM_BUG_ON_PAGE(PageLRU(page), page);
5432		VM_BUG_ON_PAGE(page_count(page), page);
5433
5434		if (!page->mem_cgroup)
5435			continue;
5436
5437		/*
5438		 * Nobody should be changing or seriously looking at
5439		 * page->mem_cgroup at this point, we have fully
5440		 * exclusive access to the page.
5441		 */
5442
5443		if (memcg != page->mem_cgroup) {
5444			if (memcg) {
5445				uncharge_batch(memcg, pgpgout, nr_anon, nr_file,
5446					       nr_huge, page);
5447				pgpgout = nr_anon = nr_file = nr_huge = 0;
5448			}
5449			memcg = page->mem_cgroup;
5450		}
5451
5452		if (PageTransHuge(page)) {
5453			nr_pages <<= compound_order(page);
5454			VM_BUG_ON_PAGE(!PageTransHuge(page), page);
5455			nr_huge += nr_pages;
5456		}
5457
5458		if (PageAnon(page))
5459			nr_anon += nr_pages;
5460		else
5461			nr_file += nr_pages;
5462
5463		page->mem_cgroup = NULL;
5464
5465		pgpgout++;
5466	} while (next != page_list);
5467
5468	if (memcg)
5469		uncharge_batch(memcg, pgpgout, nr_anon, nr_file,
5470			       nr_huge, page);
5471}
5472
5473/**
5474 * mem_cgroup_uncharge - uncharge a page
5475 * @page: page to uncharge
5476 *
5477 * Uncharge a page previously charged with mem_cgroup_try_charge() and
5478 * mem_cgroup_commit_charge().
5479 */
5480void mem_cgroup_uncharge(struct page *page)
5481{
5482	if (mem_cgroup_disabled())
5483		return;
5484
5485	/* Don't touch page->lru of any random page, pre-check: */
5486	if (!page->mem_cgroup)
5487		return;
5488
5489	INIT_LIST_HEAD(&page->lru);
5490	uncharge_list(&page->lru);
5491}
5492
5493/**
5494 * mem_cgroup_uncharge_list - uncharge a list of page
5495 * @page_list: list of pages to uncharge
5496 *
5497 * Uncharge a list of pages previously charged with
5498 * mem_cgroup_try_charge() and mem_cgroup_commit_charge().
5499 */
5500void mem_cgroup_uncharge_list(struct list_head *page_list)
5501{
5502	if (mem_cgroup_disabled())
5503		return;
5504
5505	if (!list_empty(page_list))
5506		uncharge_list(page_list);
5507}
5508
5509/**
5510 * mem_cgroup_migrate - charge a page's replacement
5511 * @oldpage: currently circulating page
5512 * @newpage: replacement page
5513 *
5514 * Charge @newpage as a replacement page for @oldpage. @oldpage will
5515 * be uncharged upon free.
5516 *
5517 * Both pages must be locked, @newpage->mapping must be set up.
5518 */
5519void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
5520{
5521	struct mem_cgroup *memcg;
5522	unsigned int nr_pages;
5523	bool compound;
5524
5525	VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
5526	VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
5527	VM_BUG_ON_PAGE(PageAnon(oldpage) != PageAnon(newpage), newpage);
5528	VM_BUG_ON_PAGE(PageTransHuge(oldpage) != PageTransHuge(newpage),
5529		       newpage);
5530
5531	if (mem_cgroup_disabled())
5532		return;
5533
5534	/* Page cache replacement: new page already charged? */
5535	if (newpage->mem_cgroup)
5536		return;
5537
5538	/* Swapcache readahead pages can get replaced before being charged */
5539	memcg = oldpage->mem_cgroup;
5540	if (!memcg)
5541		return;
5542
5543	/* Force-charge the new page. The old one will be freed soon */
5544	compound = PageTransHuge(newpage);
5545	nr_pages = compound ? hpage_nr_pages(newpage) : 1;
5546
5547	page_counter_charge(&memcg->memory, nr_pages);
5548	if (do_memsw_account())
5549		page_counter_charge(&memcg->memsw, nr_pages);
5550	css_get_many(&memcg->css, nr_pages);
5551
5552	commit_charge(newpage, memcg, false);
5553
5554	local_irq_disable();
5555	mem_cgroup_charge_statistics(memcg, newpage, compound, nr_pages);
5556	memcg_check_events(memcg, newpage);
5557	local_irq_enable();
5558}
5559
5560DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
5561EXPORT_SYMBOL(memcg_sockets_enabled_key);
5562
5563void sock_update_memcg(struct sock *sk)
5564{
5565	struct mem_cgroup *memcg;
5566
5567	/* Socket cloning can throw us here with sk_cgrp already
5568	 * filled. It won't however, necessarily happen from
5569	 * process context. So the test for root memcg given
5570	 * the current task's memcg won't help us in this case.
5571	 *
5572	 * Respecting the original socket's memcg is a better
5573	 * decision in this case.
5574	 */
5575	if (sk->sk_memcg) {
5576		BUG_ON(mem_cgroup_is_root(sk->sk_memcg));
5577		css_get(&sk->sk_memcg->css);
5578		return;
5579	}
5580
5581	rcu_read_lock();
5582	memcg = mem_cgroup_from_task(current);
5583	if (memcg == root_mem_cgroup)
5584		goto out;
5585	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active)
5586		goto out;
5587	if (css_tryget_online(&memcg->css))
5588		sk->sk_memcg = memcg;
5589out:
5590	rcu_read_unlock();
5591}
5592EXPORT_SYMBOL(sock_update_memcg);
5593
5594void sock_release_memcg(struct sock *sk)
5595{
5596	WARN_ON(!sk->sk_memcg);
5597	css_put(&sk->sk_memcg->css);
5598}
5599
5600/**
5601 * mem_cgroup_charge_skmem - charge socket memory
5602 * @memcg: memcg to charge
5603 * @nr_pages: number of pages to charge
5604 *
5605 * Charges @nr_pages to @memcg. Returns %true if the charge fit within
5606 * @memcg's configured limit, %false if the charge had to be forced.
5607 */
5608bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
5609{
5610	gfp_t gfp_mask = GFP_KERNEL;
5611
5612	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
5613		struct page_counter *fail;
5614
5615		if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) {
5616			memcg->tcpmem_pressure = 0;
5617			return true;
5618		}
5619		page_counter_charge(&memcg->tcpmem, nr_pages);
5620		memcg->tcpmem_pressure = 1;
5621		return false;
5622	}
5623
5624	/* Don't block in the packet receive path */
5625	if (in_softirq())
5626		gfp_mask = GFP_NOWAIT;
5627
5628	this_cpu_add(memcg->stat->count[MEMCG_SOCK], nr_pages);
5629
5630	if (try_charge(memcg, gfp_mask, nr_pages) == 0)
5631		return true;
5632
5633	try_charge(memcg, gfp_mask|__GFP_NOFAIL, nr_pages);
5634	return false;
5635}
5636
5637/**
5638 * mem_cgroup_uncharge_skmem - uncharge socket memory
5639 * @memcg - memcg to uncharge
5640 * @nr_pages - number of pages to uncharge
5641 */
5642void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
5643{
5644	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
5645		page_counter_uncharge(&memcg->tcpmem, nr_pages);
5646		return;
5647	}
5648
5649	this_cpu_sub(memcg->stat->count[MEMCG_SOCK], nr_pages);
5650
5651	page_counter_uncharge(&memcg->memory, nr_pages);
5652	css_put_many(&memcg->css, nr_pages);
5653}
5654
5655static int __init cgroup_memory(char *s)
5656{
5657	char *token;
5658
5659	while ((token = strsep(&s, ",")) != NULL) {
5660		if (!*token)
5661			continue;
5662		if (!strcmp(token, "nosocket"))
5663			cgroup_memory_nosocket = true;
5664		if (!strcmp(token, "nokmem"))
5665			cgroup_memory_nokmem = true;
5666	}
5667	return 0;
5668}
5669__setup("cgroup.memory=", cgroup_memory);
5670
5671/*
5672 * subsys_initcall() for memory controller.
5673 *
5674 * Some parts like hotcpu_notifier() have to be initialized from this context
5675 * because of lock dependencies (cgroup_lock -> cpu hotplug) but basically
5676 * everything that doesn't depend on a specific mem_cgroup structure should
5677 * be initialized from here.
5678 */
5679static int __init mem_cgroup_init(void)
5680{
5681	int cpu, node;
5682
5683	hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
5684
5685	for_each_possible_cpu(cpu)
5686		INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
5687			  drain_local_stock);
5688
5689	for_each_node(node) {
5690		struct mem_cgroup_tree_per_node *rtpn;
5691		int zone;
5692
5693		rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL,
5694				    node_online(node) ? node : NUMA_NO_NODE);
5695
5696		for (zone = 0; zone < MAX_NR_ZONES; zone++) {
5697			struct mem_cgroup_tree_per_zone *rtpz;
5698
5699			rtpz = &rtpn->rb_tree_per_zone[zone];
5700			rtpz->rb_root = RB_ROOT;
5701			spin_lock_init(&rtpz->lock);
5702		}
5703		soft_limit_tree.rb_tree_per_node[node] = rtpn;
5704	}
5705
5706	return 0;
5707}
5708subsys_initcall(mem_cgroup_init);
5709
5710#ifdef CONFIG_MEMCG_SWAP
5711/**
5712 * mem_cgroup_swapout - transfer a memsw charge to swap
5713 * @page: page whose memsw charge to transfer
5714 * @entry: swap entry to move the charge to
5715 *
5716 * Transfer the memsw charge of @page to @entry.
5717 */
5718void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
5719{
5720	struct mem_cgroup *memcg;
5721	unsigned short oldid;
5722
5723	VM_BUG_ON_PAGE(PageLRU(page), page);
5724	VM_BUG_ON_PAGE(page_count(page), page);
5725
5726	if (!do_memsw_account())
5727		return;
5728
5729	memcg = page->mem_cgroup;
5730
5731	/* Readahead page, never charged */
5732	if (!memcg)
5733		return;
5734
5735	oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg));
5736	VM_BUG_ON_PAGE(oldid, page);
5737	mem_cgroup_swap_statistics(memcg, true);
5738
5739	page->mem_cgroup = NULL;
5740
5741	if (!mem_cgroup_is_root(memcg))
5742		page_counter_uncharge(&memcg->memory, 1);
5743
5744	/*
5745	 * Interrupts should be disabled here because the caller holds the
5746	 * mapping->tree_lock lock which is taken with interrupts-off. It is
5747	 * important here to have the interrupts disabled because it is the
5748	 * only synchronisation we have for udpating the per-CPU variables.
5749	 */
5750	VM_BUG_ON(!irqs_disabled());
5751	mem_cgroup_charge_statistics(memcg, page, false, -1);
5752	memcg_check_events(memcg, page);
5753}
5754
5755/*
5756 * mem_cgroup_try_charge_swap - try charging a swap entry
5757 * @page: page being added to swap
5758 * @entry: swap entry to charge
5759 *
5760 * Try to charge @entry to the memcg that @page belongs to.
5761 *
5762 * Returns 0 on success, -ENOMEM on failure.
5763 */
5764int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry)
5765{
5766	struct mem_cgroup *memcg;
5767	struct page_counter *counter;
5768	unsigned short oldid;
5769
5770	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) || !do_swap_account)
5771		return 0;
5772
5773	memcg = page->mem_cgroup;
5774
5775	/* Readahead page, never charged */
5776	if (!memcg)
5777		return 0;
5778
5779	if (!mem_cgroup_is_root(memcg) &&
5780	    !page_counter_try_charge(&memcg->swap, 1, &counter))
5781		return -ENOMEM;
5782
5783	oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg));
5784	VM_BUG_ON_PAGE(oldid, page);
5785	mem_cgroup_swap_statistics(memcg, true);
5786
5787	css_get(&memcg->css);
5788	return 0;
5789}
5790
5791/**
5792 * mem_cgroup_uncharge_swap - uncharge a swap entry
5793 * @entry: swap entry to uncharge
5794 *
5795 * Drop the swap charge associated with @entry.
5796 */
5797void mem_cgroup_uncharge_swap(swp_entry_t entry)
5798{
5799	struct mem_cgroup *memcg;
5800	unsigned short id;
5801
5802	if (!do_swap_account)
5803		return;
5804
5805	id = swap_cgroup_record(entry, 0);
5806	rcu_read_lock();
5807	memcg = mem_cgroup_from_id(id);
5808	if (memcg) {
5809		if (!mem_cgroup_is_root(memcg)) {
5810			if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
5811				page_counter_uncharge(&memcg->swap, 1);
5812			else
5813				page_counter_uncharge(&memcg->memsw, 1);
5814		}
5815		mem_cgroup_swap_statistics(memcg, false);
5816		css_put(&memcg->css);
5817	}
5818	rcu_read_unlock();
5819}
5820
5821long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
5822{
5823	long nr_swap_pages = get_nr_swap_pages();
5824
5825	if (!do_swap_account || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
5826		return nr_swap_pages;
5827	for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg))
5828		nr_swap_pages = min_t(long, nr_swap_pages,
5829				      READ_ONCE(memcg->swap.limit) -
5830				      page_counter_read(&memcg->swap));
5831	return nr_swap_pages;
5832}
5833
5834bool mem_cgroup_swap_full(struct page *page)
5835{
5836	struct mem_cgroup *memcg;
5837
5838	VM_BUG_ON_PAGE(!PageLocked(page), page);
5839
5840	if (vm_swap_full())
5841		return true;
5842	if (!do_swap_account || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
5843		return false;
5844
5845	memcg = page->mem_cgroup;
5846	if (!memcg)
5847		return false;
5848
5849	for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg))
5850		if (page_counter_read(&memcg->swap) * 2 >= memcg->swap.limit)
5851			return true;
5852
5853	return false;
5854}
5855
5856/* for remember boot option*/
5857#ifdef CONFIG_MEMCG_SWAP_ENABLED
5858static int really_do_swap_account __initdata = 1;
5859#else
5860static int really_do_swap_account __initdata;
5861#endif
5862
5863static int __init enable_swap_account(char *s)
5864{
 
5865	if (!strcmp(s, "1"))
5866		really_do_swap_account = 1;
5867	else if (!strcmp(s, "0"))
5868		really_do_swap_account = 0;
5869	return 1;
5870}
5871__setup("swapaccount=", enable_swap_account);
5872
5873static u64 swap_current_read(struct cgroup_subsys_state *css,
5874			     struct cftype *cft)
5875{
5876	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5877
5878	return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE;
5879}
5880
5881static int swap_max_show(struct seq_file *m, void *v)
5882{
5883	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5884	unsigned long max = READ_ONCE(memcg->swap.limit);
5885
5886	if (max == PAGE_COUNTER_MAX)
5887		seq_puts(m, "max\n");
5888	else
5889		seq_printf(m, "%llu\n", (u64)max * PAGE_SIZE);
5890
5891	return 0;
5892}
5893
5894static ssize_t swap_max_write(struct kernfs_open_file *of,
5895			      char *buf, size_t nbytes, loff_t off)
5896{
5897	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5898	unsigned long max;
5899	int err;
5900
5901	buf = strstrip(buf);
5902	err = page_counter_memparse(buf, "max", &max);
5903	if (err)
5904		return err;
5905
5906	mutex_lock(&memcg_limit_mutex);
5907	err = page_counter_limit(&memcg->swap, max);
5908	mutex_unlock(&memcg_limit_mutex);
5909	if (err)
5910		return err;
5911
5912	return nbytes;
5913}
5914
5915static struct cftype swap_files[] = {
5916	{
5917		.name = "swap.current",
5918		.flags = CFTYPE_NOT_ON_ROOT,
5919		.read_u64 = swap_current_read,
5920	},
5921	{
5922		.name = "swap.max",
5923		.flags = CFTYPE_NOT_ON_ROOT,
5924		.seq_show = swap_max_show,
5925		.write = swap_max_write,
5926	},
5927	{ }	/* terminate */
5928};
5929
5930static struct cftype memsw_cgroup_files[] = {
5931	{
5932		.name = "memsw.usage_in_bytes",
5933		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
5934		.read_u64 = mem_cgroup_read_u64,
5935	},
5936	{
5937		.name = "memsw.max_usage_in_bytes",
5938		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
5939		.write = mem_cgroup_reset,
5940		.read_u64 = mem_cgroup_read_u64,
5941	},
5942	{
5943		.name = "memsw.limit_in_bytes",
5944		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
5945		.write = mem_cgroup_write,
5946		.read_u64 = mem_cgroup_read_u64,
5947	},
5948	{
5949		.name = "memsw.failcnt",
5950		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
5951		.write = mem_cgroup_reset,
5952		.read_u64 = mem_cgroup_read_u64,
5953	},
5954	{ },	/* terminate */
5955};
5956
5957static int __init mem_cgroup_swap_init(void)
5958{
5959	if (!mem_cgroup_disabled() && really_do_swap_account) {
5960		do_swap_account = 1;
5961		WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys,
5962					       swap_files));
5963		WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys,
5964						  memsw_cgroup_files));
5965	}
5966	return 0;
5967}
5968subsys_initcall(mem_cgroup_swap_init);
5969
5970#endif /* CONFIG_MEMCG_SWAP */
v3.5.6
   1/* memcontrol.c - Memory Controller
   2 *
   3 * Copyright IBM Corporation, 2007
   4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
   5 *
   6 * Copyright 2007 OpenVZ SWsoft Inc
   7 * Author: Pavel Emelianov <xemul@openvz.org>
   8 *
   9 * Memory thresholds
  10 * Copyright (C) 2009 Nokia Corporation
  11 * Author: Kirill A. Shutemov
  12 *
 
 
 
 
 
 
 
 
 
 
  13 * This program is free software; you can redistribute it and/or modify
  14 * it under the terms of the GNU General Public License as published by
  15 * the Free Software Foundation; either version 2 of the License, or
  16 * (at your option) any later version.
  17 *
  18 * This program is distributed in the hope that it will be useful,
  19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  21 * GNU General Public License for more details.
  22 */
  23
  24#include <linux/res_counter.h>
  25#include <linux/memcontrol.h>
  26#include <linux/cgroup.h>
  27#include <linux/mm.h>
  28#include <linux/hugetlb.h>
  29#include <linux/pagemap.h>
  30#include <linux/smp.h>
  31#include <linux/page-flags.h>
  32#include <linux/backing-dev.h>
  33#include <linux/bit_spinlock.h>
  34#include <linux/rcupdate.h>
  35#include <linux/limits.h>
  36#include <linux/export.h>
  37#include <linux/mutex.h>
  38#include <linux/rbtree.h>
  39#include <linux/slab.h>
  40#include <linux/swap.h>
  41#include <linux/swapops.h>
  42#include <linux/spinlock.h>
  43#include <linux/eventfd.h>
 
  44#include <linux/sort.h>
  45#include <linux/fs.h>
  46#include <linux/seq_file.h>
  47#include <linux/vmalloc.h>
  48#include <linux/mm_inline.h>
  49#include <linux/page_cgroup.h>
  50#include <linux/cpu.h>
  51#include <linux/oom.h>
 
 
 
  52#include "internal.h"
  53#include <net/sock.h>
  54#include <net/tcp_memcontrol.h>
 
  55
  56#include <asm/uaccess.h>
  57
  58#include <trace/events/vmscan.h>
  59
  60struct cgroup_subsys mem_cgroup_subsys __read_mostly;
 
 
 
 
  61#define MEM_CGROUP_RECLAIM_RETRIES	5
  62static struct mem_cgroup *root_mem_cgroup __read_mostly;
  63
  64#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
  65/* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */
  66int do_swap_account __read_mostly;
  67
  68/* for remember boot option*/
  69#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP_ENABLED
  70static int really_do_swap_account __initdata = 1;
  71#else
  72static int really_do_swap_account __initdata = 0;
  73#endif
  74
 
 
 
  75#else
  76#define do_swap_account		0
  77#endif
  78
  79
  80/*
  81 * Statistics for memory cgroup.
  82 */
  83enum mem_cgroup_stat_index {
  84	/*
  85	 * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
  86	 */
  87	MEM_CGROUP_STAT_CACHE, 	   /* # of pages charged as cache */
  88	MEM_CGROUP_STAT_RSS,	   /* # of pages charged as anon rss */
  89	MEM_CGROUP_STAT_FILE_MAPPED,  /* # of pages charged as file rss */
  90	MEM_CGROUP_STAT_SWAPOUT, /* # of pages, swapped out */
  91	MEM_CGROUP_STAT_NSTATS,
  92};
  93
  94static const char * const mem_cgroup_stat_names[] = {
  95	"cache",
  96	"rss",
 
  97	"mapped_file",
 
 
  98	"swap",
  99};
 100
 101enum mem_cgroup_events_index {
 102	MEM_CGROUP_EVENTS_PGPGIN,	/* # of pages paged in */
 103	MEM_CGROUP_EVENTS_PGPGOUT,	/* # of pages paged out */
 104	MEM_CGROUP_EVENTS_PGFAULT,	/* # of page-faults */
 105	MEM_CGROUP_EVENTS_PGMAJFAULT,	/* # of major page-faults */
 106	MEM_CGROUP_EVENTS_NSTATS,
 107};
 108
 109static const char * const mem_cgroup_events_names[] = {
 110	"pgpgin",
 111	"pgpgout",
 112	"pgfault",
 113	"pgmajfault",
 114};
 115
 116/*
 117 * Per memcg event counter is incremented at every pagein/pageout. With THP,
 118 * it will be incremated by the number of pages. This counter is used for
 119 * for trigger some periodic events. This is straightforward and better
 120 * than using jiffies etc. to handle periodic memcg event.
 121 */
 122enum mem_cgroup_events_target {
 123	MEM_CGROUP_TARGET_THRESH,
 124	MEM_CGROUP_TARGET_SOFTLIMIT,
 125	MEM_CGROUP_TARGET_NUMAINFO,
 126	MEM_CGROUP_NTARGETS,
 127};
 
 128#define THRESHOLDS_EVENTS_TARGET 128
 129#define SOFTLIMIT_EVENTS_TARGET 1024
 130#define NUMAINFO_EVENTS_TARGET	1024
 131
 132struct mem_cgroup_stat_cpu {
 133	long count[MEM_CGROUP_STAT_NSTATS];
 134	unsigned long events[MEM_CGROUP_EVENTS_NSTATS];
 135	unsigned long nr_page_events;
 136	unsigned long targets[MEM_CGROUP_NTARGETS];
 137};
 138
 139struct mem_cgroup_reclaim_iter {
 140	/* css_id of the last scanned hierarchy member */
 141	int position;
 142	/* scan generation, increased every round-trip */
 143	unsigned int generation;
 144};
 145
 146/*
 147 * per-zone information in memory controller.
 148 */
 149struct mem_cgroup_per_zone {
 150	struct lruvec		lruvec;
 151	unsigned long		lru_size[NR_LRU_LISTS];
 152
 153	struct mem_cgroup_reclaim_iter reclaim_iter[DEF_PRIORITY + 1];
 154
 155	struct rb_node		tree_node;	/* RB tree node */
 156	unsigned long long	usage_in_excess;/* Set to the value by which */
 157						/* the soft limit is exceeded*/
 158	bool			on_tree;
 159	struct mem_cgroup	*memcg;		/* Back pointer, we cannot */
 160						/* use container_of	   */
 161};
 162
 163struct mem_cgroup_per_node {
 164	struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
 165};
 166
 167struct mem_cgroup_lru_info {
 168	struct mem_cgroup_per_node *nodeinfo[MAX_NUMNODES];
 169};
 170
 171/*
 172 * Cgroups above their limits are maintained in a RB-Tree, independent of
 173 * their hierarchy representation
 174 */
 175
 176struct mem_cgroup_tree_per_zone {
 177	struct rb_root rb_root;
 178	spinlock_t lock;
 179};
 180
 181struct mem_cgroup_tree_per_node {
 182	struct mem_cgroup_tree_per_zone rb_tree_per_zone[MAX_NR_ZONES];
 183};
 184
 185struct mem_cgroup_tree {
 186	struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
 187};
 188
 189static struct mem_cgroup_tree soft_limit_tree __read_mostly;
 190
 191struct mem_cgroup_threshold {
 192	struct eventfd_ctx *eventfd;
 193	u64 threshold;
 194};
 195
 196/* For threshold */
 197struct mem_cgroup_threshold_ary {
 198	/* An array index points to threshold just below or equal to usage. */
 199	int current_threshold;
 200	/* Size of entries[] */
 201	unsigned int size;
 202	/* Array of thresholds */
 203	struct mem_cgroup_threshold entries[0];
 204};
 205
 206struct mem_cgroup_thresholds {
 207	/* Primary thresholds array */
 208	struct mem_cgroup_threshold_ary *primary;
 209	/*
 210	 * Spare threshold array.
 211	 * This is needed to make mem_cgroup_unregister_event() "never fail".
 212	 * It must be able to store at least primary->size - 1 entries.
 213	 */
 214	struct mem_cgroup_threshold_ary *spare;
 215};
 216
 217/* for OOM */
 218struct mem_cgroup_eventfd_list {
 219	struct list_head list;
 220	struct eventfd_ctx *eventfd;
 221};
 222
 223static void mem_cgroup_threshold(struct mem_cgroup *memcg);
 224static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
 225
 226/*
 227 * The memory controller data structure. The memory controller controls both
 228 * page cache and RSS per cgroup. We would eventually like to provide
 229 * statistics based on the statistics developed by Rik Van Riel for clock-pro,
 230 * to help the administrator determine what knobs to tune.
 231 *
 232 * TODO: Add a water mark for the memory controller. Reclaim will begin when
 233 * we hit the water mark. May be even add a low water mark, such that
 234 * no reclaim occurs from a cgroup at it's low water mark, this is
 235 * a feature that will be implemented much later in the future.
 236 */
 237struct mem_cgroup {
 238	struct cgroup_subsys_state css;
 239	/*
 240	 * the counter to account for memory usage
 241	 */
 242	struct res_counter res;
 243
 244	union {
 245		/*
 246		 * the counter to account for mem+swap usage.
 247		 */
 248		struct res_counter memsw;
 249
 250		/*
 251		 * rcu_freeing is used only when freeing struct mem_cgroup,
 252		 * so put it into a union to avoid wasting more memory.
 253		 * It must be disjoint from the css field.  It could be
 254		 * in a union with the res field, but res plays a much
 255		 * larger part in mem_cgroup life than memsw, and might
 256		 * be of interest, even at time of free, when debugging.
 257		 * So share rcu_head with the less interesting memsw.
 258		 */
 259		struct rcu_head rcu_freeing;
 260		/*
 261		 * We also need some space for a worker in deferred freeing.
 262		 * By the time we call it, rcu_freeing is no longer in use.
 263		 */
 264		struct work_struct work_freeing;
 265	};
 266
 267	/*
 268	 * Per cgroup active and inactive list, similar to the
 269	 * per zone LRU lists.
 270	 */
 271	struct mem_cgroup_lru_info info;
 272	int last_scanned_node;
 273#if MAX_NUMNODES > 1
 274	nodemask_t	scan_nodes;
 275	atomic_t	numainfo_events;
 276	atomic_t	numainfo_updating;
 277#endif
 278	/*
 279	 * Should the accounting and control be hierarchical, per subtree?
 280	 */
 281	bool use_hierarchy;
 282
 283	bool		oom_lock;
 284	atomic_t	under_oom;
 285
 286	atomic_t	refcnt;
 287
 288	int	swappiness;
 289	/* OOM-Killer disable */
 290	int		oom_kill_disable;
 291
 292	/* set when res.limit == memsw.limit */
 293	bool		memsw_is_minimum;
 294
 295	/* protect arrays of thresholds */
 296	struct mutex thresholds_lock;
 297
 298	/* thresholds for memory usage. RCU-protected */
 299	struct mem_cgroup_thresholds thresholds;
 300
 301	/* thresholds for mem+swap usage. RCU-protected */
 302	struct mem_cgroup_thresholds memsw_thresholds;
 303
 304	/* For oom notifier event fd */
 305	struct list_head oom_notify;
 306
 307	/*
 308	 * Should we move charges of a task when a task is moved into this
 309	 * mem_cgroup ? And what type of charges should we move ?
 310	 */
 311	unsigned long 	move_charge_at_immigrate;
 312	/*
 313	 * set > 0 if pages under this cgroup are moving to other cgroup.
 314	 */
 315	atomic_t	moving_account;
 316	/* taken only while moving_account > 0 */
 317	spinlock_t	move_lock;
 318	/*
 319	 * percpu counter.
 320	 */
 321	struct mem_cgroup_stat_cpu __percpu *stat;
 322	/*
 323	 * used when a cpu is offlined or other synchronizations
 324	 * See mem_cgroup_read_stat().
 325	 */
 326	struct mem_cgroup_stat_cpu nocpu_base;
 327	spinlock_t pcp_counter_lock;
 
 
 328
 329#ifdef CONFIG_INET
 330	struct tcp_memcontrol tcp_mem;
 331#endif
 332};
 333
 334/* Stuffs for move charges at task migration. */
 335/*
 336 * Types of charges to be moved. "move_charge_at_immitgrate" is treated as a
 337 * left-shifted bitmap of these types.
 338 */
 339enum move_type {
 340	MOVE_CHARGE_TYPE_ANON,	/* private anonymous page and swap of it */
 341	MOVE_CHARGE_TYPE_FILE,	/* file page(including tmpfs) and swap of it */
 342	NR_MOVE_TYPE,
 343};
 344
 345/* "mc" and its members are protected by cgroup_mutex */
 346static struct move_charge_struct {
 347	spinlock_t	  lock; /* for from, to */
 
 348	struct mem_cgroup *from;
 349	struct mem_cgroup *to;
 
 350	unsigned long precharge;
 351	unsigned long moved_charge;
 352	unsigned long moved_swap;
 353	struct task_struct *moving_task;	/* a task moving charges */
 354	wait_queue_head_t waitq;		/* a waitq for other context */
 355} mc = {
 356	.lock = __SPIN_LOCK_UNLOCKED(mc.lock),
 357	.waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
 358};
 359
 360static bool move_anon(void)
 361{
 362	return test_bit(MOVE_CHARGE_TYPE_ANON,
 363					&mc.to->move_charge_at_immigrate);
 364}
 365
 366static bool move_file(void)
 367{
 368	return test_bit(MOVE_CHARGE_TYPE_FILE,
 369					&mc.to->move_charge_at_immigrate);
 370}
 371
 372/*
 373 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
 374 * limit reclaim to prevent infinite loops, if they ever occur.
 375 */
 376#define	MEM_CGROUP_MAX_RECLAIM_LOOPS		100
 377#define	MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS	2
 378
 379enum charge_type {
 380	MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
 381	MEM_CGROUP_CHARGE_TYPE_MAPPED,
 382	MEM_CGROUP_CHARGE_TYPE_SHMEM,	/* used by page migration of shmem */
 383	MEM_CGROUP_CHARGE_TYPE_FORCE,	/* used by force_empty */
 384	MEM_CGROUP_CHARGE_TYPE_SWAPOUT,	/* for accounting swapcache */
 385	MEM_CGROUP_CHARGE_TYPE_DROP,	/* a page was unused swap cache */
 386	NR_CHARGE_TYPE,
 387};
 388
 389/* for encoding cft->private value on file */
 390#define _MEM			(0)
 391#define _MEMSWAP		(1)
 392#define _OOM_TYPE		(2)
 
 
 
 
 
 393#define MEMFILE_PRIVATE(x, val)	((x) << 16 | (val))
 394#define MEMFILE_TYPE(val)	((val) >> 16 & 0xffff)
 395#define MEMFILE_ATTR(val)	((val) & 0xffff)
 396/* Used for OOM nofiier */
 397#define OOM_CONTROL		(0)
 398
 399/*
 400 * Reclaim flags for mem_cgroup_hierarchical_reclaim
 401 */
 402#define MEM_CGROUP_RECLAIM_NOSWAP_BIT	0x0
 403#define MEM_CGROUP_RECLAIM_NOSWAP	(1 << MEM_CGROUP_RECLAIM_NOSWAP_BIT)
 404#define MEM_CGROUP_RECLAIM_SHRINK_BIT	0x1
 405#define MEM_CGROUP_RECLAIM_SHRINK	(1 << MEM_CGROUP_RECLAIM_SHRINK_BIT)
 406
 407static void mem_cgroup_get(struct mem_cgroup *memcg);
 408static void mem_cgroup_put(struct mem_cgroup *memcg);
 
 
 409
 410/* Writing them here to avoid exposing memcg's inner layout */
 411#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
 412#include <net/sock.h>
 413#include <net/ip.h>
 414
 415static bool mem_cgroup_is_root(struct mem_cgroup *memcg);
 416void sock_update_memcg(struct sock *sk)
 417{
 418	if (mem_cgroup_sockets_enabled) {
 419		struct mem_cgroup *memcg;
 420		struct cg_proto *cg_proto;
 421
 422		BUG_ON(!sk->sk_prot->proto_cgroup);
 
 
 
 
 
 
 
 
 
 
 
 
 
 423
 424		/* Socket cloning can throw us here with sk_cgrp already
 425		 * filled. It won't however, necessarily happen from
 426		 * process context. So the test for root memcg given
 427		 * the current task's memcg won't help us in this case.
 428		 *
 429		 * Respecting the original socket's memcg is a better
 430		 * decision in this case.
 431		 */
 432		if (sk->sk_cgrp) {
 433			BUG_ON(mem_cgroup_is_root(sk->sk_cgrp->memcg));
 434			mem_cgroup_get(sk->sk_cgrp->memcg);
 435			return;
 436		}
 437
 438		rcu_read_lock();
 439		memcg = mem_cgroup_from_task(current);
 440		cg_proto = sk->sk_prot->proto_cgroup(memcg);
 441		if (!mem_cgroup_is_root(memcg) && memcg_proto_active(cg_proto)) {
 442			mem_cgroup_get(memcg);
 443			sk->sk_cgrp = cg_proto;
 444		}
 445		rcu_read_unlock();
 446	}
 447}
 448EXPORT_SYMBOL(sock_update_memcg);
 449
 450void sock_release_memcg(struct sock *sk)
 451{
 452	if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
 453		struct mem_cgroup *memcg;
 454		WARN_ON(!sk->sk_cgrp->memcg);
 455		memcg = sk->sk_cgrp->memcg;
 456		mem_cgroup_put(memcg);
 457	}
 458}
 459
 460#ifdef CONFIG_INET
 461struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 462{
 463	if (!memcg || mem_cgroup_is_root(memcg))
 464		return NULL;
 465
 466	return &memcg->tcp_mem.cg_proto;
 467}
 468EXPORT_SYMBOL(tcp_proto_cgroup);
 469#endif /* CONFIG_INET */
 470#endif /* CONFIG_CGROUP_MEM_RES_CTLR_KMEM */
 471
 472#if defined(CONFIG_INET) && defined(CONFIG_CGROUP_MEM_RES_CTLR_KMEM)
 473static void disarm_sock_keys(struct mem_cgroup *memcg)
 
 
 
 
 
 
 
 
 
 
 474{
 475	if (!memcg_proto_activated(&memcg->tcp_mem.cg_proto))
 476		return;
 477	static_key_slow_dec(&memcg_socket_limit_enabled);
 478}
 479#else
 480static void disarm_sock_keys(struct mem_cgroup *memcg)
 481{
 482}
 483#endif
 484
 485static void drain_all_stock_async(struct mem_cgroup *memcg);
 
 486
 487static struct mem_cgroup_per_zone *
 488mem_cgroup_zoneinfo(struct mem_cgroup *memcg, int nid, int zid)
 489{
 490	return &memcg->info.nodeinfo[nid]->zoneinfo[zid];
 491}
 492
 493struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg)
 
 
 
 
 
 
 
 
 
 
 
 
 
 494{
 495	return &memcg->css;
 
 
 
 
 
 
 
 
 
 
 496}
 497
 498static struct mem_cgroup_per_zone *
 499page_cgroup_zoneinfo(struct mem_cgroup *memcg, struct page *page)
 500{
 501	int nid = page_to_nid(page);
 502	int zid = page_zonenum(page);
 503
 504	return mem_cgroup_zoneinfo(memcg, nid, zid);
 505}
 506
 507static struct mem_cgroup_tree_per_zone *
 508soft_limit_tree_node_zone(int nid, int zid)
 509{
 510	return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
 511}
 512
 513static struct mem_cgroup_tree_per_zone *
 514soft_limit_tree_from_page(struct page *page)
 515{
 516	int nid = page_to_nid(page);
 517	int zid = page_zonenum(page);
 518
 519	return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
 520}
 521
 522static void
 523__mem_cgroup_insert_exceeded(struct mem_cgroup *memcg,
 524				struct mem_cgroup_per_zone *mz,
 525				struct mem_cgroup_tree_per_zone *mctz,
 526				unsigned long long new_usage_in_excess)
 527{
 528	struct rb_node **p = &mctz->rb_root.rb_node;
 529	struct rb_node *parent = NULL;
 530	struct mem_cgroup_per_zone *mz_node;
 531
 532	if (mz->on_tree)
 533		return;
 534
 535	mz->usage_in_excess = new_usage_in_excess;
 536	if (!mz->usage_in_excess)
 537		return;
 538	while (*p) {
 539		parent = *p;
 540		mz_node = rb_entry(parent, struct mem_cgroup_per_zone,
 541					tree_node);
 542		if (mz->usage_in_excess < mz_node->usage_in_excess)
 543			p = &(*p)->rb_left;
 544		/*
 545		 * We can't avoid mem cgroups that are over their soft
 546		 * limit by the same amount
 547		 */
 548		else if (mz->usage_in_excess >= mz_node->usage_in_excess)
 549			p = &(*p)->rb_right;
 550	}
 551	rb_link_node(&mz->tree_node, parent, p);
 552	rb_insert_color(&mz->tree_node, &mctz->rb_root);
 553	mz->on_tree = true;
 554}
 555
 556static void
 557__mem_cgroup_remove_exceeded(struct mem_cgroup *memcg,
 558				struct mem_cgroup_per_zone *mz,
 559				struct mem_cgroup_tree_per_zone *mctz)
 560{
 561	if (!mz->on_tree)
 562		return;
 563	rb_erase(&mz->tree_node, &mctz->rb_root);
 564	mz->on_tree = false;
 565}
 566
 567static void
 568mem_cgroup_remove_exceeded(struct mem_cgroup *memcg,
 569				struct mem_cgroup_per_zone *mz,
 570				struct mem_cgroup_tree_per_zone *mctz)
 571{
 572	spin_lock(&mctz->lock);
 573	__mem_cgroup_remove_exceeded(memcg, mz, mctz);
 574	spin_unlock(&mctz->lock);
 575}
 576
 
 
 
 
 
 
 
 
 
 
 
 577
 578static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
 579{
 580	unsigned long long excess;
 581	struct mem_cgroup_per_zone *mz;
 582	struct mem_cgroup_tree_per_zone *mctz;
 583	int nid = page_to_nid(page);
 584	int zid = page_zonenum(page);
 585	mctz = soft_limit_tree_from_page(page);
 586
 587	/*
 588	 * Necessary to update all ancestors when hierarchy is used.
 589	 * because their event counter is not touched.
 590	 */
 591	for (; memcg; memcg = parent_mem_cgroup(memcg)) {
 592		mz = mem_cgroup_zoneinfo(memcg, nid, zid);
 593		excess = res_counter_soft_limit_excess(&memcg->res);
 594		/*
 595		 * We have to update the tree if mz is on RB-tree or
 596		 * mem is over its softlimit.
 597		 */
 598		if (excess || mz->on_tree) {
 599			spin_lock(&mctz->lock);
 
 
 600			/* if on-tree, remove it */
 601			if (mz->on_tree)
 602				__mem_cgroup_remove_exceeded(memcg, mz, mctz);
 603			/*
 604			 * Insert again. mz->usage_in_excess will be updated.
 605			 * If excess is 0, no tree ops.
 606			 */
 607			__mem_cgroup_insert_exceeded(memcg, mz, mctz, excess);
 608			spin_unlock(&mctz->lock);
 609		}
 610	}
 611}
 612
 613static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
 614{
 615	int node, zone;
 616	struct mem_cgroup_per_zone *mz;
 617	struct mem_cgroup_tree_per_zone *mctz;
 618
 619	for_each_node(node) {
 620		for (zone = 0; zone < MAX_NR_ZONES; zone++) {
 621			mz = mem_cgroup_zoneinfo(memcg, node, zone);
 622			mctz = soft_limit_tree_node_zone(node, zone);
 623			mem_cgroup_remove_exceeded(memcg, mz, mctz);
 624		}
 625	}
 626}
 627
 628static struct mem_cgroup_per_zone *
 629__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
 630{
 631	struct rb_node *rightmost = NULL;
 632	struct mem_cgroup_per_zone *mz;
 633
 634retry:
 635	mz = NULL;
 636	rightmost = rb_last(&mctz->rb_root);
 637	if (!rightmost)
 638		goto done;		/* Nothing to reclaim from */
 639
 640	mz = rb_entry(rightmost, struct mem_cgroup_per_zone, tree_node);
 641	/*
 642	 * Remove the node now but someone else can add it back,
 643	 * we will to add it back at the end of reclaim to its correct
 644	 * position in the tree.
 645	 */
 646	__mem_cgroup_remove_exceeded(mz->memcg, mz, mctz);
 647	if (!res_counter_soft_limit_excess(&mz->memcg->res) ||
 648		!css_tryget(&mz->memcg->css))
 649		goto retry;
 650done:
 651	return mz;
 652}
 653
 654static struct mem_cgroup_per_zone *
 655mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
 656{
 657	struct mem_cgroup_per_zone *mz;
 658
 659	spin_lock(&mctz->lock);
 660	mz = __mem_cgroup_largest_soft_limit_node(mctz);
 661	spin_unlock(&mctz->lock);
 662	return mz;
 663}
 664
 665/*
 
 
 666 * Implementation Note: reading percpu statistics for memcg.
 667 *
 668 * Both of vmstat[] and percpu_counter has threshold and do periodic
 669 * synchronization to implement "quick" read. There are trade-off between
 670 * reading cost and precision of value. Then, we may have a chance to implement
 671 * a periodic synchronizion of counter in memcg's counter.
 672 *
 673 * But this _read() function is used for user interface now. The user accounts
 674 * memory usage by memory cgroup and he _always_ requires exact value because
 675 * he accounts memory. Even if we provide quick-and-fuzzy read, we always
 676 * have to visit all online cpus and make sum. So, for now, unnecessary
 677 * synchronization is not implemented. (just implemented for cpu hotplug)
 678 *
 679 * If there are kernel internal actions which can make use of some not-exact
 680 * value, and reading all cpu value can be performance bottleneck in some
 681 * common workload, threashold and synchonization as vmstat[] should be
 682 * implemented.
 683 */
 684static long mem_cgroup_read_stat(struct mem_cgroup *memcg,
 685				 enum mem_cgroup_stat_index idx)
 686{
 687	long val = 0;
 688	int cpu;
 689
 690	get_online_cpus();
 691	for_each_online_cpu(cpu)
 692		val += per_cpu(memcg->stat->count[idx], cpu);
 693#ifdef CONFIG_HOTPLUG_CPU
 694	spin_lock(&memcg->pcp_counter_lock);
 695	val += memcg->nocpu_base.count[idx];
 696	spin_unlock(&memcg->pcp_counter_lock);
 697#endif
 698	put_online_cpus();
 699	return val;
 700}
 701
 702static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg,
 703					 bool charge)
 704{
 705	int val = (charge) ? 1 : -1;
 706	this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_SWAPOUT], val);
 707}
 708
 709static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg,
 710					    enum mem_cgroup_events_index idx)
 711{
 712	unsigned long val = 0;
 713	int cpu;
 714
 715	for_each_online_cpu(cpu)
 716		val += per_cpu(memcg->stat->events[idx], cpu);
 717#ifdef CONFIG_HOTPLUG_CPU
 718	spin_lock(&memcg->pcp_counter_lock);
 719	val += memcg->nocpu_base.events[idx];
 720	spin_unlock(&memcg->pcp_counter_lock);
 721#endif
 722	return val;
 723}
 724
 725static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
 726					 bool anon, int nr_pages)
 
 727{
 728	preempt_disable();
 729
 730	/*
 731	 * Here, RSS means 'mapped anon' and anon's SwapCache. Shmem/tmpfs is
 732	 * counted as CACHE even if it's on ANON LRU.
 733	 */
 734	if (anon)
 735		__this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS],
 736				nr_pages);
 737	else
 738		__this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE],
 739				nr_pages);
 740
 
 
 
 
 
 
 741	/* pagein of a big page is an event. So, ignore page size */
 742	if (nr_pages > 0)
 743		__this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGIN]);
 744	else {
 745		__this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]);
 746		nr_pages = -nr_pages; /* for event */
 747	}
 748
 749	__this_cpu_add(memcg->stat->nr_page_events, nr_pages);
 750
 751	preempt_enable();
 752}
 753
 754unsigned long
 755mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
 756{
 757	struct mem_cgroup_per_zone *mz;
 758
 759	mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
 760	return mz->lru_size[lru];
 761}
 762
 763static unsigned long
 764mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, int nid, int zid,
 765			unsigned int lru_mask)
 766{
 767	struct mem_cgroup_per_zone *mz;
 768	enum lru_list lru;
 769	unsigned long ret = 0;
 770
 771	mz = mem_cgroup_zoneinfo(memcg, nid, zid);
 
 
 772
 773	for_each_lru(lru) {
 774		if (BIT(lru) & lru_mask)
 775			ret += mz->lru_size[lru];
 
 
 
 776	}
 777	return ret;
 778}
 779
 780static unsigned long
 781mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
 782			int nid, unsigned int lru_mask)
 783{
 784	u64 total = 0;
 785	int zid;
 786
 787	for (zid = 0; zid < MAX_NR_ZONES; zid++)
 788		total += mem_cgroup_zone_nr_lru_pages(memcg,
 789						nid, zid, lru_mask);
 790
 791	return total;
 792}
 793
 794static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
 795			unsigned int lru_mask)
 796{
 
 797	int nid;
 798	u64 total = 0;
 799
 800	for_each_node_state(nid, N_HIGH_MEMORY)
 801		total += mem_cgroup_node_nr_lru_pages(memcg, nid, lru_mask);
 802	return total;
 803}
 804
 805static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
 806				       enum mem_cgroup_events_target target)
 807{
 808	unsigned long val, next;
 809
 810	val = __this_cpu_read(memcg->stat->nr_page_events);
 811	next = __this_cpu_read(memcg->stat->targets[target]);
 812	/* from time_after() in jiffies.h */
 813	if ((long)next - (long)val < 0) {
 814		switch (target) {
 815		case MEM_CGROUP_TARGET_THRESH:
 816			next = val + THRESHOLDS_EVENTS_TARGET;
 817			break;
 818		case MEM_CGROUP_TARGET_SOFTLIMIT:
 819			next = val + SOFTLIMIT_EVENTS_TARGET;
 820			break;
 821		case MEM_CGROUP_TARGET_NUMAINFO:
 822			next = val + NUMAINFO_EVENTS_TARGET;
 823			break;
 824		default:
 825			break;
 826		}
 827		__this_cpu_write(memcg->stat->targets[target], next);
 828		return true;
 829	}
 830	return false;
 831}
 832
 833/*
 834 * Check events in order.
 835 *
 836 */
 837static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
 838{
 839	preempt_disable();
 840	/* threshold event is triggered in finer grain than soft limit */
 841	if (unlikely(mem_cgroup_event_ratelimit(memcg,
 842						MEM_CGROUP_TARGET_THRESH))) {
 843		bool do_softlimit;
 844		bool do_numainfo __maybe_unused;
 845
 846		do_softlimit = mem_cgroup_event_ratelimit(memcg,
 847						MEM_CGROUP_TARGET_SOFTLIMIT);
 848#if MAX_NUMNODES > 1
 849		do_numainfo = mem_cgroup_event_ratelimit(memcg,
 850						MEM_CGROUP_TARGET_NUMAINFO);
 851#endif
 852		preempt_enable();
 853
 854		mem_cgroup_threshold(memcg);
 855		if (unlikely(do_softlimit))
 856			mem_cgroup_update_tree(memcg, page);
 857#if MAX_NUMNODES > 1
 858		if (unlikely(do_numainfo))
 859			atomic_inc(&memcg->numainfo_events);
 860#endif
 861	} else
 862		preempt_enable();
 863}
 864
 865struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
 866{
 867	return container_of(cgroup_subsys_state(cont,
 868				mem_cgroup_subsys_id), struct mem_cgroup,
 869				css);
 870}
 871
 872struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
 873{
 874	/*
 875	 * mm_update_next_owner() may clear mm->owner to NULL
 876	 * if it races with swapoff, page migration, etc.
 877	 * So this can be called with p == NULL.
 878	 */
 879	if (unlikely(!p))
 880		return NULL;
 881
 882	return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
 883				struct mem_cgroup, css);
 884}
 
 885
 886struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
 887{
 888	struct mem_cgroup *memcg = NULL;
 889
 890	if (!mm)
 891		return NULL;
 892	/*
 893	 * Because we have no locks, mm->owner's may be being moved to other
 894	 * cgroup. We use css_tryget() here even if this looks
 895	 * pessimistic (rather than adding locks here).
 896	 */
 897	rcu_read_lock();
 898	do {
 899		memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
 900		if (unlikely(!memcg))
 901			break;
 902	} while (!css_tryget(&memcg->css));
 
 
 
 
 
 
 
 
 
 903	rcu_read_unlock();
 904	return memcg;
 905}
 906
 907/**
 908 * mem_cgroup_iter - iterate over memory cgroup hierarchy
 909 * @root: hierarchy root
 910 * @prev: previously returned memcg, NULL on first invocation
 911 * @reclaim: cookie for shared reclaim walks, NULL for full walks
 912 *
 913 * Returns references to children of the hierarchy below @root, or
 914 * @root itself, or %NULL after a full round-trip.
 915 *
 916 * Caller must pass the return value in @prev on subsequent
 917 * invocations for reference counting, or use mem_cgroup_iter_break()
 918 * to cancel a hierarchy walk before the round-trip is complete.
 919 *
 920 * Reclaimers can specify a zone and a priority level in @reclaim to
 921 * divide up the memcgs in the hierarchy among all concurrent
 922 * reclaimers operating on the same zone and priority.
 923 */
 924struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
 925				   struct mem_cgroup *prev,
 926				   struct mem_cgroup_reclaim_cookie *reclaim)
 927{
 
 
 928	struct mem_cgroup *memcg = NULL;
 929	int id = 0;
 930
 931	if (mem_cgroup_disabled())
 932		return NULL;
 933
 934	if (!root)
 935		root = root_mem_cgroup;
 936
 937	if (prev && !reclaim)
 938		id = css_id(&prev->css);
 939
 940	if (prev && prev != root)
 941		css_put(&prev->css);
 942
 943	if (!root->use_hierarchy && root != root_mem_cgroup) {
 944		if (prev)
 945			return NULL;
 946		return root;
 947	}
 948
 949	while (!memcg) {
 950		struct mem_cgroup_reclaim_iter *uninitialized_var(iter);
 951		struct cgroup_subsys_state *css;
 952
 953		if (reclaim) {
 954			int nid = zone_to_nid(reclaim->zone);
 955			int zid = zone_idx(reclaim->zone);
 956			struct mem_cgroup_per_zone *mz;
 957
 958			mz = mem_cgroup_zoneinfo(root, nid, zid);
 959			iter = &mz->reclaim_iter[reclaim->priority];
 960			if (prev && reclaim->generation != iter->generation)
 961				return NULL;
 962			id = iter->position;
 
 
 
 
 
 
 
 
 
 
 963		}
 
 964
 965		rcu_read_lock();
 966		css = css_get_next(&mem_cgroup_subsys, id + 1, &root->css, &id);
 967		if (css) {
 968			if (css == &root->css || css_tryget(css))
 969				memcg = container_of(css,
 970						     struct mem_cgroup, css);
 971		} else
 972			id = 0;
 973		rcu_read_unlock();
 974
 975		if (reclaim) {
 976			iter->position = id;
 977			if (!css)
 978				iter->generation++;
 979			else if (!prev && memcg)
 980				reclaim->generation = iter->generation;
 
 
 
 
 
 
 981		}
 982
 983		if (prev && !css)
 984			return NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 985	}
 
 
 
 
 
 
 
 986	return memcg;
 987}
 988
 989/**
 990 * mem_cgroup_iter_break - abort a hierarchy walk prematurely
 991 * @root: hierarchy root
 992 * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
 993 */
 994void mem_cgroup_iter_break(struct mem_cgroup *root,
 995			   struct mem_cgroup *prev)
 996{
 997	if (!root)
 998		root = root_mem_cgroup;
 999	if (prev && prev != root)
1000		css_put(&prev->css);
1001}
1002
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1003/*
1004 * Iteration constructs for visiting all cgroups (under a tree).  If
1005 * loops are exited prematurely (break), mem_cgroup_iter_break() must
1006 * be used for reference counting.
1007 */
1008#define for_each_mem_cgroup_tree(iter, root)		\
1009	for (iter = mem_cgroup_iter(root, NULL, NULL);	\
1010	     iter != NULL;				\
1011	     iter = mem_cgroup_iter(root, iter, NULL))
1012
1013#define for_each_mem_cgroup(iter)			\
1014	for (iter = mem_cgroup_iter(NULL, NULL, NULL);	\
1015	     iter != NULL;				\
1016	     iter = mem_cgroup_iter(NULL, iter, NULL))
1017
1018static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
1019{
1020	return (memcg == root_mem_cgroup);
1021}
1022
1023void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
1024{
1025	struct mem_cgroup *memcg;
1026
1027	if (!mm)
1028		return;
1029
1030	rcu_read_lock();
1031	memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1032	if (unlikely(!memcg))
1033		goto out;
1034
1035	switch (idx) {
1036	case PGFAULT:
1037		this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGFAULT]);
1038		break;
1039	case PGMAJFAULT:
1040		this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGMAJFAULT]);
1041		break;
1042	default:
1043		BUG();
1044	}
1045out:
1046	rcu_read_unlock();
1047}
1048EXPORT_SYMBOL(mem_cgroup_count_vm_event);
1049
1050/**
1051 * mem_cgroup_zone_lruvec - get the lru list vector for a zone and memcg
1052 * @zone: zone of the wanted lruvec
1053 * @memcg: memcg of the wanted lruvec
1054 *
1055 * Returns the lru list vector holding pages for the given @zone and
1056 * @mem.  This can be the global zone lruvec, if the memory controller
1057 * is disabled.
1058 */
1059struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
1060				      struct mem_cgroup *memcg)
1061{
1062	struct mem_cgroup_per_zone *mz;
 
1063
1064	if (mem_cgroup_disabled())
1065		return &zone->lruvec;
 
 
1066
1067	mz = mem_cgroup_zoneinfo(memcg, zone_to_nid(zone), zone_idx(zone));
1068	return &mz->lruvec;
 
 
 
 
 
 
 
 
 
1069}
1070
1071/*
1072 * Following LRU functions are allowed to be used without PCG_LOCK.
1073 * Operations are called by routine of global LRU independently from memcg.
1074 * What we have to take care of here is validness of pc->mem_cgroup.
1075 *
1076 * Changes to pc->mem_cgroup happens when
1077 * 1. charge
1078 * 2. moving account
1079 * In typical case, "charge" is done before add-to-lru. Exception is SwapCache.
1080 * It is added to LRU before charge.
1081 * If PCG_USED bit is not set, page_cgroup is not added to this private LRU.
1082 * When moving account, the page is not on LRU. It's isolated.
1083 */
1084
1085/**
1086 * mem_cgroup_page_lruvec - return lruvec for adding an lru page
1087 * @page: the page
1088 * @zone: zone of the page
 
 
 
 
1089 */
1090struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone)
1091{
1092	struct mem_cgroup_per_zone *mz;
1093	struct mem_cgroup *memcg;
1094	struct page_cgroup *pc;
1095
1096	if (mem_cgroup_disabled())
1097		return &zone->lruvec;
1098
1099	pc = lookup_page_cgroup(page);
1100	memcg = pc->mem_cgroup;
1101
 
1102	/*
1103	 * Surreptitiously switch any uncharged offlist page to root:
1104	 * an uncharged page off lru does nothing to secure
1105	 * its former mem_cgroup from sudden removal.
1106	 *
1107	 * Our caller holds lru_lock, and PageCgroupUsed is updated
1108	 * under page_cgroup lock: between them, they make all uses
1109	 * of pc->mem_cgroup safe.
1110	 */
1111	if (!PageLRU(page) && !PageCgroupUsed(pc) && memcg != root_mem_cgroup)
1112		pc->mem_cgroup = memcg = root_mem_cgroup;
1113
1114	mz = page_cgroup_zoneinfo(memcg, page);
1115	return &mz->lruvec;
 
 
 
 
 
 
 
 
 
1116}
1117
1118/**
1119 * mem_cgroup_update_lru_size - account for adding or removing an lru page
1120 * @lruvec: mem_cgroup per zone lru vector
1121 * @lru: index of lru list the page is sitting on
1122 * @nr_pages: positive when adding or negative when removing
1123 *
1124 * This function must be called when a page is added to or removed from an
1125 * lru list.
1126 */
1127void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
1128				int nr_pages)
1129{
1130	struct mem_cgroup_per_zone *mz;
1131	unsigned long *lru_size;
1132
1133	if (mem_cgroup_disabled())
1134		return;
1135
1136	mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
1137	lru_size = mz->lru_size + lru;
1138	*lru_size += nr_pages;
1139	VM_BUG_ON((long)(*lru_size) < 0);
1140}
1141
1142/*
1143 * Checks whether given mem is same or in the root_mem_cgroup's
1144 * hierarchy subtree
1145 */
1146bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
1147				  struct mem_cgroup *memcg)
1148{
1149	if (root_memcg == memcg)
1150		return true;
1151	if (!root_memcg->use_hierarchy || !memcg)
1152		return false;
1153	return css_is_ancestor(&memcg->css, &root_memcg->css);
1154}
1155
1156static bool mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
1157				       struct mem_cgroup *memcg)
1158{
 
 
1159	bool ret;
1160
1161	rcu_read_lock();
1162	ret = __mem_cgroup_same_or_subtree(root_memcg, memcg);
1163	rcu_read_unlock();
1164	return ret;
1165}
1166
1167int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *memcg)
1168{
1169	int ret;
1170	struct mem_cgroup *curr = NULL;
1171	struct task_struct *p;
1172
1173	p = find_lock_task_mm(task);
1174	if (p) {
1175		curr = try_get_mem_cgroup_from_mm(p->mm);
1176		task_unlock(p);
1177	} else {
1178		/*
1179		 * All threads may have already detached their mm's, but the oom
1180		 * killer still needs to detect if they have already been oom
1181		 * killed to prevent needlessly killing additional tasks.
1182		 */
1183		task_lock(task);
1184		curr = mem_cgroup_from_task(task);
1185		if (curr)
1186			css_get(&curr->css);
1187		task_unlock(task);
1188	}
1189	if (!curr)
1190		return 0;
1191	/*
1192	 * We should check use_hierarchy of "memcg" not "curr". Because checking
1193	 * use_hierarchy of "curr" here make this function true if hierarchy is
1194	 * enabled in "curr" and "curr" is a child of "memcg" in *cgroup*
1195	 * hierarchy(even if use_hierarchy is disabled in "memcg").
1196	 */
1197	ret = mem_cgroup_same_or_subtree(memcg, curr);
1198	css_put(&curr->css);
1199	return ret;
1200}
1201
1202int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec)
1203{
1204	unsigned long inactive_ratio;
1205	unsigned long inactive;
1206	unsigned long active;
1207	unsigned long gb;
1208
1209	inactive = mem_cgroup_get_lru_size(lruvec, LRU_INACTIVE_ANON);
1210	active = mem_cgroup_get_lru_size(lruvec, LRU_ACTIVE_ANON);
1211
1212	gb = (inactive + active) >> (30 - PAGE_SHIFT);
1213	if (gb)
1214		inactive_ratio = int_sqrt(10 * gb);
1215	else
1216		inactive_ratio = 1;
1217
1218	return inactive * inactive_ratio < active;
1219}
1220
1221int mem_cgroup_inactive_file_is_low(struct lruvec *lruvec)
1222{
1223	unsigned long active;
1224	unsigned long inactive;
1225
1226	inactive = mem_cgroup_get_lru_size(lruvec, LRU_INACTIVE_FILE);
1227	active = mem_cgroup_get_lru_size(lruvec, LRU_ACTIVE_FILE);
1228
1229	return (active > inactive);
1230}
1231
1232#define mem_cgroup_from_res_counter(counter, member)	\
1233	container_of(counter, struct mem_cgroup, member)
1234
1235/**
1236 * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1237 * @memcg: the memory cgroup
1238 *
1239 * Returns the maximum amount of memory @mem can be charged with, in
1240 * pages.
1241 */
1242static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
1243{
1244	unsigned long long margin;
1245
1246	margin = res_counter_margin(&memcg->res);
1247	if (do_swap_account)
1248		margin = min(margin, res_counter_margin(&memcg->memsw));
1249	return margin >> PAGE_SHIFT;
1250}
1251
1252int mem_cgroup_swappiness(struct mem_cgroup *memcg)
1253{
1254	struct cgroup *cgrp = memcg->css.cgroup;
1255
1256	/* root ? */
1257	if (cgrp->parent == NULL)
1258		return vm_swappiness;
 
1259
1260	return memcg->swappiness;
1261}
1262
1263/*
1264 * memcg->moving_account is used for checking possibility that some thread is
1265 * calling move_account(). When a thread on CPU-A starts moving pages under
1266 * a memcg, other threads should check memcg->moving_account under
1267 * rcu_read_lock(), like this:
1268 *
1269 *         CPU-A                                    CPU-B
1270 *                                              rcu_read_lock()
1271 *         memcg->moving_account+1              if (memcg->mocing_account)
1272 *                                                   take heavy locks.
1273 *         synchronize_rcu()                    update something.
1274 *                                              rcu_read_unlock()
1275 *         start move here.
1276 */
1277
1278/* for quick checking without looking up memcg */
1279atomic_t memcg_moving __read_mostly;
1280
1281static void mem_cgroup_start_move(struct mem_cgroup *memcg)
1282{
1283	atomic_inc(&memcg_moving);
1284	atomic_inc(&memcg->moving_account);
1285	synchronize_rcu();
1286}
1287
1288static void mem_cgroup_end_move(struct mem_cgroup *memcg)
1289{
1290	/*
1291	 * Now, mem_cgroup_clear_mc() may call this function with NULL.
1292	 * We check NULL in callee rather than caller.
1293	 */
1294	if (memcg) {
1295		atomic_dec(&memcg_moving);
1296		atomic_dec(&memcg->moving_account);
1297	}
1298}
1299
1300/*
1301 * 2 routines for checking "mem" is under move_account() or not.
1302 *
1303 * mem_cgroup_stolen() -  checking whether a cgroup is mc.from or not. This
1304 *			  is used for avoiding races in accounting.  If true,
1305 *			  pc->mem_cgroup may be overwritten.
1306 *
1307 * mem_cgroup_under_move() - checking a cgroup is mc.from or mc.to or
1308 *			  under hierarchy of moving cgroups. This is for
1309 *			  waiting at hith-memory prressure caused by "move".
1310 */
1311
1312static bool mem_cgroup_stolen(struct mem_cgroup *memcg)
1313{
1314	VM_BUG_ON(!rcu_read_lock_held());
1315	return atomic_read(&memcg->moving_account) > 0;
1316}
1317
1318static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
1319{
1320	struct mem_cgroup *from;
1321	struct mem_cgroup *to;
1322	bool ret = false;
1323	/*
1324	 * Unlike task_move routines, we access mc.to, mc.from not under
1325	 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
1326	 */
1327	spin_lock(&mc.lock);
1328	from = mc.from;
1329	to = mc.to;
1330	if (!from)
1331		goto unlock;
1332
1333	ret = mem_cgroup_same_or_subtree(memcg, from)
1334		|| mem_cgroup_same_or_subtree(memcg, to);
1335unlock:
1336	spin_unlock(&mc.lock);
1337	return ret;
1338}
1339
1340static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
1341{
1342	if (mc.moving_task && current != mc.moving_task) {
1343		if (mem_cgroup_under_move(memcg)) {
1344			DEFINE_WAIT(wait);
1345			prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1346			/* moving charge context might have finished. */
1347			if (mc.moving_task)
1348				schedule();
1349			finish_wait(&mc.waitq, &wait);
1350			return true;
1351		}
1352	}
1353	return false;
1354}
1355
1356/*
1357 * Take this lock when
1358 * - a code tries to modify page's memcg while it's USED.
1359 * - a code tries to modify page state accounting in a memcg.
1360 * see mem_cgroup_stolen(), too.
1361 */
1362static void move_lock_mem_cgroup(struct mem_cgroup *memcg,
1363				  unsigned long *flags)
1364{
1365	spin_lock_irqsave(&memcg->move_lock, *flags);
1366}
1367
1368static void move_unlock_mem_cgroup(struct mem_cgroup *memcg,
1369				unsigned long *flags)
1370{
1371	spin_unlock_irqrestore(&memcg->move_lock, *flags);
1372}
1373
1374/**
1375 * mem_cgroup_print_oom_info: Called from OOM with tasklist_lock held in read mode.
1376 * @memcg: The memory cgroup that went over limit
1377 * @p: Task that is going to be killed
1378 *
1379 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1380 * enabled
1381 */
1382void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
1383{
1384	struct cgroup *task_cgrp;
1385	struct cgroup *mem_cgrp;
1386	/*
1387	 * Need a buffer in BSS, can't rely on allocations. The code relies
1388	 * on the assumption that OOM is serialized for memory controller.
1389	 * If this assumption is broken, revisit this code.
1390	 */
1391	static char memcg_name[PATH_MAX];
1392	int ret;
1393
1394	if (!memcg || !p)
1395		return;
1396
1397	rcu_read_lock();
1398
1399	mem_cgrp = memcg->css.cgroup;
1400	task_cgrp = task_cgroup(p, mem_cgroup_subsys_id);
1401
1402	ret = cgroup_path(task_cgrp, memcg_name, PATH_MAX);
1403	if (ret < 0) {
1404		/*
1405		 * Unfortunately, we are unable to convert to a useful name
1406		 * But we'll still print out the usage information
1407		 */
1408		rcu_read_unlock();
1409		goto done;
1410	}
1411	rcu_read_unlock();
1412
1413	printk(KERN_INFO "Task in %s killed", memcg_name);
 
1414
1415	rcu_read_lock();
1416	ret = cgroup_path(mem_cgrp, memcg_name, PATH_MAX);
1417	if (ret < 0) {
1418		rcu_read_unlock();
1419		goto done;
1420	}
1421	rcu_read_unlock();
1422
1423	/*
1424	 * Continues from above, so we don't need an KERN_ level
1425	 */
1426	printk(KERN_CONT " as a result of limit of %s\n", memcg_name);
1427done:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1428
1429	printk(KERN_INFO "memory: usage %llukB, limit %llukB, failcnt %llu\n",
1430		res_counter_read_u64(&memcg->res, RES_USAGE) >> 10,
1431		res_counter_read_u64(&memcg->res, RES_LIMIT) >> 10,
1432		res_counter_read_u64(&memcg->res, RES_FAILCNT));
1433	printk(KERN_INFO "memory+swap: usage %llukB, limit %llukB, "
1434		"failcnt %llu\n",
1435		res_counter_read_u64(&memcg->memsw, RES_USAGE) >> 10,
1436		res_counter_read_u64(&memcg->memsw, RES_LIMIT) >> 10,
1437		res_counter_read_u64(&memcg->memsw, RES_FAILCNT));
1438}
1439
1440/*
1441 * This function returns the number of memcg under hierarchy tree. Returns
1442 * 1(self count) if no children.
1443 */
1444static int mem_cgroup_count_children(struct mem_cgroup *memcg)
1445{
1446	int num = 0;
1447	struct mem_cgroup *iter;
1448
1449	for_each_mem_cgroup_tree(iter, memcg)
1450		num++;
1451	return num;
1452}
1453
1454/*
1455 * Return the memory (and swap, if configured) limit for a memcg.
1456 */
1457u64 mem_cgroup_get_limit(struct mem_cgroup *memcg)
1458{
1459	u64 limit;
1460	u64 memsw;
1461
1462	limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
1463	limit += total_swap_pages << PAGE_SHIFT;
 
 
1464
1465	memsw = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
1466	/*
1467	 * If memsw is finite and limits the amount of swap space available
1468	 * to this memcg, return that limit.
1469	 */
1470	return min(limit, memsw);
1471}
1472
1473static unsigned long mem_cgroup_reclaim(struct mem_cgroup *memcg,
1474					gfp_t gfp_mask,
1475					unsigned long flags)
1476{
1477	unsigned long total = 0;
1478	bool noswap = false;
1479	int loop;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1480
1481	if (flags & MEM_CGROUP_RECLAIM_NOSWAP)
1482		noswap = true;
1483	if (!(flags & MEM_CGROUP_RECLAIM_SHRINK) && memcg->memsw_is_minimum)
1484		noswap = true;
 
1485
1486	for (loop = 0; loop < MEM_CGROUP_MAX_RECLAIM_LOOPS; loop++) {
1487		if (loop)
1488			drain_all_stock_async(memcg);
1489		total += try_to_free_mem_cgroup_pages(memcg, gfp_mask, noswap);
1490		/*
1491		 * Allow limit shrinkers, which are triggered directly
1492		 * by userspace, to catch signals and stop reclaim
1493		 * after minimal progress, regardless of the margin.
1494		 */
1495		if (total && (flags & MEM_CGROUP_RECLAIM_SHRINK))
1496			break;
1497		if (mem_cgroup_margin(memcg))
1498			break;
1499		/*
1500		 * If nothing was reclaimed after two attempts, there
1501		 * may be no reclaimable pages in this hierarchy.
1502		 */
1503		if (loop && !total)
1504			break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1505	}
1506	return total;
 
 
1507}
1508
 
 
1509/**
1510 * test_mem_cgroup_node_reclaimable
1511 * @memcg: the target memcg
1512 * @nid: the node ID to be checked.
1513 * @noswap : specify true here if the user wants flle only information.
1514 *
1515 * This function returns whether the specified memcg contains any
1516 * reclaimable pages on a node. Returns true if there are any reclaimable
1517 * pages in the node.
1518 */
1519static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg,
1520		int nid, bool noswap)
1521{
1522	if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_FILE))
1523		return true;
1524	if (noswap || !total_swap_pages)
1525		return false;
1526	if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_ANON))
1527		return true;
1528	return false;
1529
1530}
1531#if MAX_NUMNODES > 1
1532
1533/*
1534 * Always updating the nodemask is not very good - even if we have an empty
1535 * list or the wrong list here, we can start from some node and traverse all
1536 * nodes based on the zonelist. So update the list loosely once per 10 secs.
1537 *
1538 */
1539static void mem_cgroup_may_update_nodemask(struct mem_cgroup *memcg)
1540{
1541	int nid;
1542	/*
1543	 * numainfo_events > 0 means there was at least NUMAINFO_EVENTS_TARGET
1544	 * pagein/pageout changes since the last update.
1545	 */
1546	if (!atomic_read(&memcg->numainfo_events))
1547		return;
1548	if (atomic_inc_return(&memcg->numainfo_updating) > 1)
1549		return;
1550
1551	/* make a nodemask where this memcg uses memory from */
1552	memcg->scan_nodes = node_states[N_HIGH_MEMORY];
1553
1554	for_each_node_mask(nid, node_states[N_HIGH_MEMORY]) {
1555
1556		if (!test_mem_cgroup_node_reclaimable(memcg, nid, false))
1557			node_clear(nid, memcg->scan_nodes);
1558	}
1559
1560	atomic_set(&memcg->numainfo_events, 0);
1561	atomic_set(&memcg->numainfo_updating, 0);
1562}
1563
1564/*
1565 * Selecting a node where we start reclaim from. Because what we need is just
1566 * reducing usage counter, start from anywhere is O,K. Considering
1567 * memory reclaim from current node, there are pros. and cons.
1568 *
1569 * Freeing memory from current node means freeing memory from a node which
1570 * we'll use or we've used. So, it may make LRU bad. And if several threads
1571 * hit limits, it will see a contention on a node. But freeing from remote
1572 * node means more costs for memory reclaim because of memory latency.
1573 *
1574 * Now, we use round-robin. Better algorithm is welcomed.
1575 */
1576int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
1577{
1578	int node;
1579
1580	mem_cgroup_may_update_nodemask(memcg);
1581	node = memcg->last_scanned_node;
1582
1583	node = next_node(node, memcg->scan_nodes);
1584	if (node == MAX_NUMNODES)
1585		node = first_node(memcg->scan_nodes);
1586	/*
1587	 * We call this when we hit limit, not when pages are added to LRU.
1588	 * No LRU may hold pages because all pages are UNEVICTABLE or
1589	 * memcg is too small and all pages are not on LRU. In that case,
1590	 * we use curret node.
1591	 */
1592	if (unlikely(node == MAX_NUMNODES))
1593		node = numa_node_id();
1594
1595	memcg->last_scanned_node = node;
1596	return node;
1597}
1598
1599/*
1600 * Check all nodes whether it contains reclaimable pages or not.
1601 * For quick scan, we make use of scan_nodes. This will allow us to skip
1602 * unused nodes. But scan_nodes is lazily updated and may not cotain
1603 * enough new information. We need to do double check.
1604 */
1605static bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
1606{
1607	int nid;
1608
1609	/*
1610	 * quick check...making use of scan_node.
1611	 * We can skip unused nodes.
1612	 */
1613	if (!nodes_empty(memcg->scan_nodes)) {
1614		for (nid = first_node(memcg->scan_nodes);
1615		     nid < MAX_NUMNODES;
1616		     nid = next_node(nid, memcg->scan_nodes)) {
1617
1618			if (test_mem_cgroup_node_reclaimable(memcg, nid, noswap))
1619				return true;
1620		}
1621	}
1622	/*
1623	 * Check rest of nodes.
1624	 */
1625	for_each_node_state(nid, N_HIGH_MEMORY) {
1626		if (node_isset(nid, memcg->scan_nodes))
1627			continue;
1628		if (test_mem_cgroup_node_reclaimable(memcg, nid, noswap))
1629			return true;
1630	}
1631	return false;
1632}
1633
1634#else
1635int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
1636{
1637	return 0;
1638}
1639
1640static bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
1641{
1642	return test_mem_cgroup_node_reclaimable(memcg, 0, noswap);
1643}
1644#endif
1645
1646static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
1647				   struct zone *zone,
1648				   gfp_t gfp_mask,
1649				   unsigned long *total_scanned)
1650{
1651	struct mem_cgroup *victim = NULL;
1652	int total = 0;
1653	int loop = 0;
1654	unsigned long excess;
1655	unsigned long nr_scanned;
1656	struct mem_cgroup_reclaim_cookie reclaim = {
1657		.zone = zone,
1658		.priority = 0,
1659	};
1660
1661	excess = res_counter_soft_limit_excess(&root_memcg->res) >> PAGE_SHIFT;
1662
1663	while (1) {
1664		victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
1665		if (!victim) {
1666			loop++;
1667			if (loop >= 2) {
1668				/*
1669				 * If we have not been able to reclaim
1670				 * anything, it might because there are
1671				 * no reclaimable pages under this hierarchy
1672				 */
1673				if (!total)
1674					break;
1675				/*
1676				 * We want to do more targeted reclaim.
1677				 * excess >> 2 is not to excessive so as to
1678				 * reclaim too much, nor too less that we keep
1679				 * coming back to reclaim from this cgroup
1680				 */
1681				if (total >= (excess >> 2) ||
1682					(loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
1683					break;
1684			}
1685			continue;
1686		}
1687		if (!mem_cgroup_reclaimable(victim, false))
1688			continue;
1689		total += mem_cgroup_shrink_node_zone(victim, gfp_mask, false,
1690						     zone, &nr_scanned);
1691		*total_scanned += nr_scanned;
1692		if (!res_counter_soft_limit_excess(&root_memcg->res))
1693			break;
1694	}
1695	mem_cgroup_iter_break(root_memcg, victim);
1696	return total;
1697}
1698
 
 
 
 
 
 
 
 
1699/*
1700 * Check OOM-Killer is already running under our hierarchy.
1701 * If someone is running, return false.
1702 * Has to be called with memcg_oom_lock
1703 */
1704static bool mem_cgroup_oom_lock(struct mem_cgroup *memcg)
1705{
1706	struct mem_cgroup *iter, *failed = NULL;
1707
 
 
1708	for_each_mem_cgroup_tree(iter, memcg) {
1709		if (iter->oom_lock) {
1710			/*
1711			 * this subtree of our hierarchy is already locked
1712			 * so we cannot give a lock.
1713			 */
1714			failed = iter;
1715			mem_cgroup_iter_break(memcg, iter);
1716			break;
1717		} else
1718			iter->oom_lock = true;
1719	}
1720
1721	if (!failed)
1722		return true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1723
1724	/*
1725	 * OK, we failed to lock the whole subtree so we have to clean up
1726	 * what we set up to the failing subtree
1727	 */
1728	for_each_mem_cgroup_tree(iter, memcg) {
1729		if (iter == failed) {
1730			mem_cgroup_iter_break(memcg, iter);
1731			break;
1732		}
1733		iter->oom_lock = false;
1734	}
1735	return false;
1736}
1737
1738/*
1739 * Has to be called with memcg_oom_lock
1740 */
1741static int mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
1742{
1743	struct mem_cgroup *iter;
1744
 
 
1745	for_each_mem_cgroup_tree(iter, memcg)
1746		iter->oom_lock = false;
1747	return 0;
1748}
1749
1750static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
1751{
1752	struct mem_cgroup *iter;
1753
 
1754	for_each_mem_cgroup_tree(iter, memcg)
1755		atomic_inc(&iter->under_oom);
 
1756}
1757
1758static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
1759{
1760	struct mem_cgroup *iter;
1761
1762	/*
1763	 * When a new child is created while the hierarchy is under oom,
1764	 * mem_cgroup_oom_lock() may not be called. We have to use
1765	 * atomic_add_unless() here.
1766	 */
 
1767	for_each_mem_cgroup_tree(iter, memcg)
1768		atomic_add_unless(&iter->under_oom, -1, 0);
 
 
1769}
1770
1771static DEFINE_SPINLOCK(memcg_oom_lock);
1772static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
1773
1774struct oom_wait_info {
1775	struct mem_cgroup *memcg;
1776	wait_queue_t	wait;
1777};
1778
1779static int memcg_oom_wake_function(wait_queue_t *wait,
1780	unsigned mode, int sync, void *arg)
1781{
1782	struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
1783	struct mem_cgroup *oom_wait_memcg;
1784	struct oom_wait_info *oom_wait_info;
1785
1786	oom_wait_info = container_of(wait, struct oom_wait_info, wait);
1787	oom_wait_memcg = oom_wait_info->memcg;
1788
1789	/*
1790	 * Both of oom_wait_info->memcg and wake_memcg are stable under us.
1791	 * Then we can use css_is_ancestor without taking care of RCU.
1792	 */
1793	if (!mem_cgroup_same_or_subtree(oom_wait_memcg, wake_memcg)
1794		&& !mem_cgroup_same_or_subtree(wake_memcg, oom_wait_memcg))
1795		return 0;
1796	return autoremove_wake_function(wait, mode, sync, arg);
1797}
1798
1799static void memcg_wakeup_oom(struct mem_cgroup *memcg)
1800{
1801	/* for filtering, pass "memcg" as argument. */
1802	__wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
 
 
 
 
 
 
 
 
1803}
1804
1805static void memcg_oom_recover(struct mem_cgroup *memcg)
1806{
1807	if (memcg && atomic_read(&memcg->under_oom))
1808		memcg_wakeup_oom(memcg);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1809}
1810
1811/*
1812 * try to call OOM killer. returns false if we should exit memory-reclaim loop.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1813 */
1814static bool mem_cgroup_handle_oom(struct mem_cgroup *memcg, gfp_t mask,
1815				  int order)
1816{
 
1817	struct oom_wait_info owait;
1818	bool locked, need_to_kill;
 
 
 
 
 
 
 
1819
1820	owait.memcg = memcg;
1821	owait.wait.flags = 0;
1822	owait.wait.func = memcg_oom_wake_function;
1823	owait.wait.private = current;
1824	INIT_LIST_HEAD(&owait.wait.task_list);
1825	need_to_kill = true;
 
1826	mem_cgroup_mark_under_oom(memcg);
1827
1828	/* At first, try to OOM lock hierarchy under memcg.*/
1829	spin_lock(&memcg_oom_lock);
1830	locked = mem_cgroup_oom_lock(memcg);
1831	/*
1832	 * Even if signal_pending(), we can't quit charge() loop without
1833	 * accounting. So, UNINTERRUPTIBLE is appropriate. But SIGKILL
1834	 * under OOM is always welcomed, use TASK_KILLABLE here.
1835	 */
1836	prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
1837	if (!locked || memcg->oom_kill_disable)
1838		need_to_kill = false;
1839	if (locked)
1840		mem_cgroup_oom_notify(memcg);
1841	spin_unlock(&memcg_oom_lock);
1842
1843	if (need_to_kill) {
 
1844		finish_wait(&memcg_oom_waitq, &owait.wait);
1845		mem_cgroup_out_of_memory(memcg, mask, order);
 
1846	} else {
1847		schedule();
 
1848		finish_wait(&memcg_oom_waitq, &owait.wait);
1849	}
1850	spin_lock(&memcg_oom_lock);
1851	if (locked)
1852		mem_cgroup_oom_unlock(memcg);
1853	memcg_wakeup_oom(memcg);
1854	spin_unlock(&memcg_oom_lock);
1855
1856	mem_cgroup_unmark_under_oom(memcg);
1857
1858	if (test_thread_flag(TIF_MEMDIE) || fatal_signal_pending(current))
1859		return false;
1860	/* Give chance to dying process */
1861	schedule_timeout_uninterruptible(1);
 
1862	return true;
1863}
1864
1865/*
1866 * Currently used to update mapped file statistics, but the routine can be
1867 * generalized to update other statistics as well.
1868 *
1869 * Notes: Race condition
1870 *
1871 * We usually use page_cgroup_lock() for accessing page_cgroup member but
1872 * it tends to be costly. But considering some conditions, we doesn't need
1873 * to do so _always_.
1874 *
1875 * Considering "charge", lock_page_cgroup() is not required because all
1876 * file-stat operations happen after a page is attached to radix-tree. There
1877 * are no race with "charge".
1878 *
1879 * Considering "uncharge", we know that memcg doesn't clear pc->mem_cgroup
1880 * at "uncharge" intentionally. So, we always see valid pc->mem_cgroup even
1881 * if there are race with "uncharge". Statistics itself is properly handled
1882 * by flags.
1883 *
1884 * Considering "move", this is an only case we see a race. To make the race
1885 * small, we check mm->moving_account and detect there are possibility of race
1886 * If there is, we take a lock.
1887 */
1888
1889void __mem_cgroup_begin_update_page_stat(struct page *page,
1890				bool *locked, unsigned long *flags)
1891{
1892	struct mem_cgroup *memcg;
1893	struct page_cgroup *pc;
 
 
 
 
 
 
 
1894
1895	pc = lookup_page_cgroup(page);
 
1896again:
1897	memcg = pc->mem_cgroup;
1898	if (unlikely(!memcg || !PageCgroupUsed(pc)))
1899		return;
1900	/*
1901	 * If this memory cgroup is not under account moving, we don't
1902	 * need to take move_lock_page_cgroup(). Because we already hold
1903	 * rcu_read_lock(), any calls to move_account will be delayed until
1904	 * rcu_read_unlock() if mem_cgroup_stolen() == true.
1905	 */
1906	if (!mem_cgroup_stolen(memcg))
1907		return;
1908
1909	move_lock_mem_cgroup(memcg, flags);
1910	if (memcg != pc->mem_cgroup || !PageCgroupUsed(pc)) {
1911		move_unlock_mem_cgroup(memcg, flags);
1912		goto again;
1913	}
1914	*locked = true;
1915}
1916
1917void __mem_cgroup_end_update_page_stat(struct page *page, unsigned long *flags)
1918{
1919	struct page_cgroup *pc = lookup_page_cgroup(page);
1920
1921	/*
1922	 * It's guaranteed that pc->mem_cgroup never changes while
1923	 * lock is held because a routine modifies pc->mem_cgroup
1924	 * should take move_lock_page_cgroup().
1925	 */
1926	move_unlock_mem_cgroup(pc->mem_cgroup, flags);
 
 
 
1927}
 
1928
1929void mem_cgroup_update_page_stat(struct page *page,
1930				 enum mem_cgroup_page_stat_item idx, int val)
 
 
 
1931{
1932	struct mem_cgroup *memcg;
1933	struct page_cgroup *pc = lookup_page_cgroup(page);
1934	unsigned long uninitialized_var(flags);
1935
1936	if (mem_cgroup_disabled())
1937		return;
1938
1939	memcg = pc->mem_cgroup;
1940	if (unlikely(!memcg || !PageCgroupUsed(pc)))
1941		return;
1942
1943	switch (idx) {
1944	case MEMCG_NR_FILE_MAPPED:
1945		idx = MEM_CGROUP_STAT_FILE_MAPPED;
1946		break;
1947	default:
1948		BUG();
1949	}
1950
1951	this_cpu_add(memcg->stat->count[idx], val);
1952}
 
1953
1954/*
1955 * size of first charge trial. "32" comes from vmscan.c's magic value.
1956 * TODO: maybe necessary to use big numbers in big irons.
1957 */
1958#define CHARGE_BATCH	32U
1959struct memcg_stock_pcp {
1960	struct mem_cgroup *cached; /* this never be root cgroup */
1961	unsigned int nr_pages;
1962	struct work_struct work;
1963	unsigned long flags;
1964#define FLUSHING_CACHED_CHARGE	0
1965};
1966static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
1967static DEFINE_MUTEX(percpu_charge_mutex);
1968
1969/*
1970 * Try to consume stocked charge on this cpu. If success, one page is consumed
1971 * from local stock and true is returned. If the stock is 0 or charges from a
1972 * cgroup which is not current target, returns false. This stock will be
1973 * refilled.
 
 
 
 
 
1974 */
1975static bool consume_stock(struct mem_cgroup *memcg)
1976{
1977	struct memcg_stock_pcp *stock;
1978	bool ret = true;
 
 
 
1979
1980	stock = &get_cpu_var(memcg_stock);
1981	if (memcg == stock->cached && stock->nr_pages)
1982		stock->nr_pages--;
1983	else /* need to call res_counter_charge */
1984		ret = false;
1985	put_cpu_var(memcg_stock);
1986	return ret;
1987}
1988
1989/*
1990 * Returns stocks cached in percpu to res_counter and reset cached information.
1991 */
1992static void drain_stock(struct memcg_stock_pcp *stock)
1993{
1994	struct mem_cgroup *old = stock->cached;
1995
1996	if (stock->nr_pages) {
1997		unsigned long bytes = stock->nr_pages * PAGE_SIZE;
1998
1999		res_counter_uncharge(&old->res, bytes);
2000		if (do_swap_account)
2001			res_counter_uncharge(&old->memsw, bytes);
2002		stock->nr_pages = 0;
2003	}
2004	stock->cached = NULL;
2005}
2006
2007/*
2008 * This must be called under preempt disabled or must be called by
2009 * a thread which is pinned to local cpu.
2010 */
2011static void drain_local_stock(struct work_struct *dummy)
2012{
2013	struct memcg_stock_pcp *stock = &__get_cpu_var(memcg_stock);
2014	drain_stock(stock);
2015	clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
2016}
2017
2018/*
2019 * Cache charges(val) which is from res_counter, to local per_cpu area.
2020 * This will be consumed by consume_stock() function, later.
2021 */
2022static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2023{
2024	struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock);
2025
2026	if (stock->cached != memcg) { /* reset if necessary */
2027		drain_stock(stock);
2028		stock->cached = memcg;
2029	}
2030	stock->nr_pages += nr_pages;
2031	put_cpu_var(memcg_stock);
2032}
2033
2034/*
2035 * Drains all per-CPU charge caches for given root_memcg resp. subtree
2036 * of the hierarchy under it. sync flag says whether we should block
2037 * until the work is done.
2038 */
2039static void drain_all_stock(struct mem_cgroup *root_memcg, bool sync)
2040{
2041	int cpu, curcpu;
2042
 
 
 
2043	/* Notify other cpus that system-wide "drain" is running */
2044	get_online_cpus();
2045	curcpu = get_cpu();
2046	for_each_online_cpu(cpu) {
2047		struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
2048		struct mem_cgroup *memcg;
2049
2050		memcg = stock->cached;
2051		if (!memcg || !stock->nr_pages)
2052			continue;
2053		if (!mem_cgroup_same_or_subtree(root_memcg, memcg))
2054			continue;
2055		if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
2056			if (cpu == curcpu)
2057				drain_local_stock(&stock->work);
2058			else
2059				schedule_work_on(cpu, &stock->work);
2060		}
2061	}
2062	put_cpu();
2063
2064	if (!sync)
2065		goto out;
2066
2067	for_each_online_cpu(cpu) {
2068		struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
2069		if (test_bit(FLUSHING_CACHED_CHARGE, &stock->flags))
2070			flush_work(&stock->work);
2071	}
2072out:
2073 	put_online_cpus();
2074}
2075
2076/*
2077 * Tries to drain stocked charges in other cpus. This function is asynchronous
2078 * and just put a work per cpu for draining localy on each cpu. Caller can
2079 * expects some charges will be back to res_counter later but cannot wait for
2080 * it.
2081 */
2082static void drain_all_stock_async(struct mem_cgroup *root_memcg)
2083{
2084	/*
2085	 * If someone calls draining, avoid adding more kworker runs.
2086	 */
2087	if (!mutex_trylock(&percpu_charge_mutex))
2088		return;
2089	drain_all_stock(root_memcg, false);
2090	mutex_unlock(&percpu_charge_mutex);
2091}
2092
2093/* This is a synchronous drain interface. */
2094static void drain_all_stock_sync(struct mem_cgroup *root_memcg)
2095{
2096	/* called when force_empty is called */
2097	mutex_lock(&percpu_charge_mutex);
2098	drain_all_stock(root_memcg, true);
2099	mutex_unlock(&percpu_charge_mutex);
2100}
2101
2102/*
2103 * This function drains percpu counter value from DEAD cpu and
2104 * move it to local cpu. Note that this function can be preempted.
2105 */
2106static void mem_cgroup_drain_pcp_counter(struct mem_cgroup *memcg, int cpu)
2107{
2108	int i;
2109
2110	spin_lock(&memcg->pcp_counter_lock);
2111	for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
2112		long x = per_cpu(memcg->stat->count[i], cpu);
2113
2114		per_cpu(memcg->stat->count[i], cpu) = 0;
2115		memcg->nocpu_base.count[i] += x;
2116	}
2117	for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
2118		unsigned long x = per_cpu(memcg->stat->events[i], cpu);
2119
2120		per_cpu(memcg->stat->events[i], cpu) = 0;
2121		memcg->nocpu_base.events[i] += x;
2122	}
2123	spin_unlock(&memcg->pcp_counter_lock);
2124}
2125
2126static int __cpuinit memcg_cpu_hotplug_callback(struct notifier_block *nb,
2127					unsigned long action,
2128					void *hcpu)
2129{
2130	int cpu = (unsigned long)hcpu;
2131	struct memcg_stock_pcp *stock;
2132	struct mem_cgroup *iter;
2133
2134	if (action == CPU_ONLINE)
2135		return NOTIFY_OK;
2136
2137	if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
2138		return NOTIFY_OK;
2139
2140	for_each_mem_cgroup(iter)
2141		mem_cgroup_drain_pcp_counter(iter, cpu);
2142
2143	stock = &per_cpu(memcg_stock, cpu);
2144	drain_stock(stock);
2145	return NOTIFY_OK;
2146}
2147
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2148
2149/* See __mem_cgroup_try_charge() for details */
2150enum {
2151	CHARGE_OK,		/* success */
2152	CHARGE_RETRY,		/* need to retry but retry is not bad */
2153	CHARGE_NOMEM,		/* we can't do more. return -ENOMEM */
2154	CHARGE_WOULDBLOCK,	/* GFP_WAIT wasn't set and no enough res. */
2155	CHARGE_OOM_DIE,		/* the current is killed because of OOM */
2156};
2157
2158static int mem_cgroup_do_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
2159				unsigned int nr_pages, bool oom_check)
2160{
2161	unsigned long csize = nr_pages * PAGE_SIZE;
 
2162	struct mem_cgroup *mem_over_limit;
2163	struct res_counter *fail_res;
2164	unsigned long flags = 0;
2165	int ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2166
2167	ret = res_counter_charge(&memcg->res, csize, &fail_res);
 
 
 
2168
2169	if (likely(!ret)) {
2170		if (!do_swap_account)
2171			return CHARGE_OK;
2172		ret = res_counter_charge(&memcg->memsw, csize, &fail_res);
2173		if (likely(!ret))
2174			return CHARGE_OK;
2175
2176		res_counter_uncharge(&memcg->res, csize);
2177		mem_over_limit = mem_cgroup_from_res_counter(fail_res, memsw);
2178		flags |= MEM_CGROUP_RECLAIM_NOSWAP;
2179	} else
2180		mem_over_limit = mem_cgroup_from_res_counter(fail_res, res);
2181	/*
2182	 * nr_pages can be either a huge page (HPAGE_PMD_NR), a batch
2183	 * of regular pages (CHARGE_BATCH), or a single regular page (1).
2184	 *
2185	 * Never reclaim on behalf of optional batching, retry with a
2186	 * single page instead.
2187	 */
2188	if (nr_pages == CHARGE_BATCH)
2189		return CHARGE_RETRY;
 
 
 
 
 
 
 
 
 
2190
2191	if (!(gfp_mask & __GFP_WAIT))
2192		return CHARGE_WOULDBLOCK;
2193
2194	ret = mem_cgroup_reclaim(mem_over_limit, gfp_mask, flags);
2195	if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
2196		return CHARGE_RETRY;
 
 
 
 
 
 
 
 
 
2197	/*
2198	 * Even though the limit is exceeded at this point, reclaim
2199	 * may have been able to free some pages.  Retry the charge
2200	 * before killing the task.
2201	 *
2202	 * Only for regular pages, though: huge pages are rather
2203	 * unlikely to succeed so close to the limit, and we fall back
2204	 * to regular pages anyway in case of failure.
2205	 */
2206	if (nr_pages == 1 && ret)
2207		return CHARGE_RETRY;
2208
2209	/*
2210	 * At task move, charge accounts can be doubly counted. So, it's
2211	 * better to wait until the end of task_move if something is going on.
2212	 */
2213	if (mem_cgroup_wait_acct_move(mem_over_limit))
2214		return CHARGE_RETRY;
 
 
 
2215
2216	/* If we don't need to call oom-killer at el, return immediately */
2217	if (!oom_check)
2218		return CHARGE_NOMEM;
2219	/* check OOM */
2220	if (!mem_cgroup_handle_oom(mem_over_limit, gfp_mask, get_order(csize)))
2221		return CHARGE_OOM_DIE;
2222
2223	return CHARGE_RETRY;
2224}
2225
2226/*
2227 * __mem_cgroup_try_charge() does
2228 * 1. detect memcg to be charged against from passed *mm and *ptr,
2229 * 2. update res_counter
2230 * 3. call memory reclaim if necessary.
2231 *
2232 * In some special case, if the task is fatal, fatal_signal_pending() or
2233 * has TIF_MEMDIE, this function returns -EINTR while writing root_mem_cgroup
2234 * to *ptr. There are two reasons for this. 1: fatal threads should quit as soon
2235 * as possible without any hazards. 2: all pages should have a valid
2236 * pc->mem_cgroup. If mm is NULL and the caller doesn't pass a valid memcg
2237 * pointer, that is treated as a charge to root_mem_cgroup.
2238 *
2239 * So __mem_cgroup_try_charge() will return
2240 *  0       ...  on success, filling *ptr with a valid memcg pointer.
2241 *  -ENOMEM ...  charge failure because of resource limits.
2242 *  -EINTR  ...  if thread is fatal. *ptr is filled with root_mem_cgroup.
2243 *
2244 * Unlike the exported interface, an "oom" parameter is added. if oom==true,
2245 * the oom-killer can be invoked.
2246 */
2247static int __mem_cgroup_try_charge(struct mm_struct *mm,
2248				   gfp_t gfp_mask,
2249				   unsigned int nr_pages,
2250				   struct mem_cgroup **ptr,
2251				   bool oom)
2252{
2253	unsigned int batch = max(CHARGE_BATCH, nr_pages);
2254	int nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES;
2255	struct mem_cgroup *memcg = NULL;
2256	int ret;
2257
 
 
 
 
 
 
2258	/*
2259	 * Unlike gloval-vm's OOM-kill, we're not in memory shortage
2260	 * in system level. So, allow to go ahead dying process in addition to
2261	 * MEMDIE process.
2262	 */
2263	if (unlikely(test_thread_flag(TIF_MEMDIE)
2264		     || fatal_signal_pending(current)))
2265		goto bypass;
2266
2267	/*
2268	 * We always charge the cgroup the mm_struct belongs to.
2269	 * The mm_struct's mem_cgroup changes on task migration if the
2270	 * thread group leader migrates. It's possible that mm is not
2271	 * set, if so charge the init_mm (happens for pagecache usage).
2272	 */
2273	if (!*ptr && !mm)
2274		*ptr = root_mem_cgroup;
2275again:
2276	if (*ptr) { /* css should be a valid one */
2277		memcg = *ptr;
2278		VM_BUG_ON(css_is_removed(&memcg->css));
2279		if (mem_cgroup_is_root(memcg))
2280			goto done;
2281		if (nr_pages == 1 && consume_stock(memcg))
2282			goto done;
2283		css_get(&memcg->css);
2284	} else {
2285		struct task_struct *p;
2286
2287		rcu_read_lock();
2288		p = rcu_dereference(mm->owner);
2289		/*
2290		 * Because we don't have task_lock(), "p" can exit.
2291		 * In that case, "memcg" can point to root or p can be NULL with
2292		 * race with swapoff. Then, we have small risk of mis-accouning.
2293		 * But such kind of mis-account by race always happens because
2294		 * we don't have cgroup_mutex(). It's overkill and we allo that
2295		 * small race, here.
2296		 * (*) swapoff at el will charge against mm-struct not against
2297		 * task-struct. So, mm->owner can be NULL.
2298		 */
2299		memcg = mem_cgroup_from_task(p);
2300		if (!memcg)
2301			memcg = root_mem_cgroup;
2302		if (mem_cgroup_is_root(memcg)) {
2303			rcu_read_unlock();
2304			goto done;
2305		}
2306		if (nr_pages == 1 && consume_stock(memcg)) {
2307			/*
2308			 * It seems dagerous to access memcg without css_get().
2309			 * But considering how consume_stok works, it's not
2310			 * necessary. If consume_stock success, some charges
2311			 * from this memcg are cached on this cpu. So, we
2312			 * don't need to call css_get()/css_tryget() before
2313			 * calling consume_stock().
2314			 */
2315			rcu_read_unlock();
2316			goto done;
2317		}
2318		/* after here, we may be blocked. we need to get refcnt */
2319		if (!css_tryget(&memcg->css)) {
2320			rcu_read_unlock();
2321			goto again;
2322		}
2323		rcu_read_unlock();
2324	}
2325
 
 
 
 
 
 
 
 
 
2326	do {
2327		bool oom_check;
2328
2329		/* If killed, bypass charge */
2330		if (fatal_signal_pending(current)) {
2331			css_put(&memcg->css);
2332			goto bypass;
2333		}
2334
2335		oom_check = false;
2336		if (oom && !nr_oom_retries) {
2337			oom_check = true;
2338			nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES;
2339		}
2340
2341		ret = mem_cgroup_do_charge(memcg, gfp_mask, batch, oom_check);
2342		switch (ret) {
2343		case CHARGE_OK:
2344			break;
2345		case CHARGE_RETRY: /* not in OOM situation but retry */
2346			batch = nr_pages;
2347			css_put(&memcg->css);
2348			memcg = NULL;
2349			goto again;
2350		case CHARGE_WOULDBLOCK: /* !__GFP_WAIT */
2351			css_put(&memcg->css);
2352			goto nomem;
2353		case CHARGE_NOMEM: /* OOM routine works */
2354			if (!oom) {
2355				css_put(&memcg->css);
2356				goto nomem;
2357			}
2358			/* If oom, we never return -ENOMEM */
2359			nr_oom_retries--;
2360			break;
2361		case CHARGE_OOM_DIE: /* Killed by OOM Killer */
2362			css_put(&memcg->css);
2363			goto bypass;
2364		}
2365	} while (ret != CHARGE_OK);
2366
2367	if (batch > nr_pages)
2368		refill_stock(memcg, batch - nr_pages);
2369	css_put(&memcg->css);
2370done:
2371	*ptr = memcg;
2372	return 0;
2373nomem:
2374	*ptr = NULL;
2375	return -ENOMEM;
2376bypass:
2377	*ptr = root_mem_cgroup;
2378	return -EINTR;
2379}
2380
2381/*
2382 * Somemtimes we have to undo a charge we got by try_charge().
2383 * This function is for that and do uncharge, put css's refcnt.
2384 * gotten by try_charge().
2385 */
2386static void __mem_cgroup_cancel_charge(struct mem_cgroup *memcg,
2387				       unsigned int nr_pages)
2388{
2389	if (!mem_cgroup_is_root(memcg)) {
2390		unsigned long bytes = nr_pages * PAGE_SIZE;
2391
2392		res_counter_uncharge(&memcg->res, bytes);
2393		if (do_swap_account)
2394			res_counter_uncharge(&memcg->memsw, bytes);
2395	}
2396}
2397
2398/*
2399 * Cancel chrages in this cgroup....doesn't propagate to parent cgroup.
2400 * This is useful when moving usage to parent cgroup.
2401 */
2402static void __mem_cgroup_cancel_local_charge(struct mem_cgroup *memcg,
2403					unsigned int nr_pages)
2404{
2405	unsigned long bytes = nr_pages * PAGE_SIZE;
2406
2407	if (mem_cgroup_is_root(memcg))
2408		return;
2409
2410	res_counter_uncharge_until(&memcg->res, memcg->res.parent, bytes);
2411	if (do_swap_account)
2412		res_counter_uncharge_until(&memcg->memsw,
2413						memcg->memsw.parent, bytes);
 
2414}
2415
2416/*
2417 * A helper function to get mem_cgroup from ID. must be called under
2418 * rcu_read_lock(). The caller must check css_is_removed() or some if
2419 * it's concern. (dropping refcnt from swap can be called against removed
2420 * memcg.)
2421 */
2422static struct mem_cgroup *mem_cgroup_lookup(unsigned short id)
2423{
2424	struct cgroup_subsys_state *css;
 
 
 
 
2425
2426	/* ID 0 is unused ID */
2427	if (!id)
2428		return NULL;
2429	css = css_lookup(&mem_cgroup_subsys, id);
2430	if (!css)
2431		return NULL;
2432	return container_of(css, struct mem_cgroup, css);
2433}
2434
2435struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
2436{
2437	struct mem_cgroup *memcg = NULL;
2438	struct page_cgroup *pc;
2439	unsigned short id;
2440	swp_entry_t ent;
2441
2442	VM_BUG_ON(!PageLocked(page));
 
2443
2444	pc = lookup_page_cgroup(page);
2445	lock_page_cgroup(pc);
2446	if (PageCgroupUsed(pc)) {
2447		memcg = pc->mem_cgroup;
2448		if (memcg && !css_tryget(&memcg->css))
2449			memcg = NULL;
2450	} else if (PageSwapCache(page)) {
2451		ent.val = page_private(page);
2452		id = lookup_swap_cgroup_id(ent);
2453		rcu_read_lock();
2454		memcg = mem_cgroup_lookup(id);
2455		if (memcg && !css_tryget(&memcg->css))
2456			memcg = NULL;
2457		rcu_read_unlock();
2458	}
2459	unlock_page_cgroup(pc);
2460	return memcg;
2461}
2462
2463static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
2464				       struct page *page,
2465				       unsigned int nr_pages,
2466				       enum charge_type ctype,
2467				       bool lrucare)
2468{
2469	struct page_cgroup *pc = lookup_page_cgroup(page);
2470	struct zone *uninitialized_var(zone);
2471	struct lruvec *lruvec;
2472	bool was_on_lru = false;
2473	bool anon;
2474
2475	lock_page_cgroup(pc);
2476	if (unlikely(PageCgroupUsed(pc))) {
2477		unlock_page_cgroup(pc);
2478		__mem_cgroup_cancel_charge(memcg, nr_pages);
2479		return;
2480	}
2481	/*
2482	 * we don't need page_cgroup_lock about tail pages, becase they are not
2483	 * accessed by any other context at this point.
2484	 */
2485
2486	/*
2487	 * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page
2488	 * may already be on some other mem_cgroup's LRU.  Take care of it.
2489	 */
2490	if (lrucare) {
2491		zone = page_zone(page);
2492		spin_lock_irq(&zone->lru_lock);
2493		if (PageLRU(page)) {
2494			lruvec = mem_cgroup_zone_lruvec(zone, pc->mem_cgroup);
2495			ClearPageLRU(page);
2496			del_page_from_lru_list(page, lruvec, page_lru(page));
2497			was_on_lru = true;
2498		}
2499	}
2500
2501	pc->mem_cgroup = memcg;
2502	/*
2503	 * We access a page_cgroup asynchronously without lock_page_cgroup().
2504	 * Especially when a page_cgroup is taken from a page, pc->mem_cgroup
2505	 * is accessed after testing USED bit. To make pc->mem_cgroup visible
2506	 * before USED bit, we need memory barrier here.
2507	 * See mem_cgroup_add_lru_list(), etc.
2508 	 */
2509	smp_wmb();
2510	SetPageCgroupUsed(pc);
2511
2512	if (lrucare) {
2513		if (was_on_lru) {
2514			lruvec = mem_cgroup_zone_lruvec(zone, pc->mem_cgroup);
2515			VM_BUG_ON(PageLRU(page));
2516			SetPageLRU(page);
2517			add_page_to_lru_list(page, lruvec, page_lru(page));
2518		}
2519		spin_unlock_irq(&zone->lru_lock);
2520	}
2521
2522	if (ctype == MEM_CGROUP_CHARGE_TYPE_MAPPED)
2523		anon = true;
2524	else
2525		anon = false;
2526
2527	mem_cgroup_charge_statistics(memcg, anon, nr_pages);
2528	unlock_page_cgroup(pc);
2529
2530	/*
2531	 * "charge_statistics" updated event counter. Then, check it.
2532	 * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
2533	 * if they exceeds softlimit.
2534	 */
2535	memcg_check_events(memcg, page);
2536}
2537
2538#ifdef CONFIG_TRANSPARENT_HUGEPAGE
2539
2540#define PCGF_NOCOPY_AT_SPLIT (1 << PCG_LOCK | 1 << PCG_MIGRATION)
2541/*
2542 * Because tail pages are not marked as "used", set it. We're under
2543 * zone->lru_lock, 'splitting on pmd' and compound_lock.
2544 * charge/uncharge will be never happen and move_account() is done under
2545 * compound_lock(), so we don't have to take care of races.
2546 */
2547void mem_cgroup_split_huge_fixup(struct page *head)
2548{
2549	struct page_cgroup *head_pc = lookup_page_cgroup(head);
2550	struct page_cgroup *pc;
2551	int i;
2552
2553	if (mem_cgroup_disabled())
2554		return;
2555	for (i = 1; i < HPAGE_PMD_NR; i++) {
2556		pc = head_pc + i;
2557		pc->mem_cgroup = head_pc->mem_cgroup;
2558		smp_wmb();/* see __commit_charge() */
2559		pc->flags = head_pc->flags & ~PCGF_NOCOPY_AT_SPLIT;
2560	}
2561}
2562#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
2563
2564/**
2565 * mem_cgroup_move_account - move account of the page
2566 * @page: the page
2567 * @nr_pages: number of regular pages (>1 for huge pages)
2568 * @pc:	page_cgroup of the page.
2569 * @from: mem_cgroup which the page is moved from.
2570 * @to:	mem_cgroup which the page is moved to. @from != @to.
2571 *
2572 * The caller must confirm following.
2573 * - page is not on LRU (isolate_page() is useful.)
2574 * - compound_lock is held when nr_pages > 1
2575 *
2576 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
2577 * from old cgroup.
2578 */
2579static int mem_cgroup_move_account(struct page *page,
2580				   unsigned int nr_pages,
2581				   struct page_cgroup *pc,
2582				   struct mem_cgroup *from,
2583				   struct mem_cgroup *to)
2584{
2585	unsigned long flags;
2586	int ret;
2587	bool anon = PageAnon(page);
2588
2589	VM_BUG_ON(from == to);
2590	VM_BUG_ON(PageLRU(page));
2591	/*
2592	 * The page is isolated from LRU. So, collapse function
2593	 * will not handle this page. But page splitting can happen.
2594	 * Do this check under compound_page_lock(). The caller should
2595	 * hold it.
2596	 */
2597	ret = -EBUSY;
2598	if (nr_pages > 1 && !PageTransHuge(page))
2599		goto out;
2600
2601	lock_page_cgroup(pc);
2602
2603	ret = -EINVAL;
2604	if (!PageCgroupUsed(pc) || pc->mem_cgroup != from)
2605		goto unlock;
2606
2607	move_lock_mem_cgroup(from, &flags);
2608
2609	if (!anon && page_mapped(page)) {
2610		/* Update mapped_file data for mem_cgroup */
2611		preempt_disable();
2612		__this_cpu_dec(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
2613		__this_cpu_inc(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
2614		preempt_enable();
2615	}
2616	mem_cgroup_charge_statistics(from, anon, -nr_pages);
2617
2618	/* caller should have done css_get */
2619	pc->mem_cgroup = to;
2620	mem_cgroup_charge_statistics(to, anon, nr_pages);
2621	/*
2622	 * We charges against "to" which may not have any tasks. Then, "to"
2623	 * can be under rmdir(). But in current implementation, caller of
2624	 * this function is just force_empty() and move charge, so it's
2625	 * guaranteed that "to" is never removed. So, we don't check rmdir
2626	 * status here.
2627	 */
2628	move_unlock_mem_cgroup(from, &flags);
2629	ret = 0;
2630unlock:
2631	unlock_page_cgroup(pc);
2632	/*
2633	 * check events
2634	 */
2635	memcg_check_events(to, page);
2636	memcg_check_events(from, page);
2637out:
2638	return ret;
2639}
2640
2641/*
2642 * move charges to its parent.
2643 */
2644
2645static int mem_cgroup_move_parent(struct page *page,
2646				  struct page_cgroup *pc,
2647				  struct mem_cgroup *child,
2648				  gfp_t gfp_mask)
2649{
2650	struct mem_cgroup *parent;
2651	unsigned int nr_pages;
2652	unsigned long uninitialized_var(flags);
2653	int ret;
2654
2655	/* Is ROOT ? */
2656	if (mem_cgroup_is_root(child))
2657		return -EINVAL;
 
2658
2659	ret = -EBUSY;
2660	if (!get_page_unless_zero(page))
2661		goto out;
2662	if (isolate_lru_page(page))
2663		goto put;
2664
2665	nr_pages = hpage_nr_pages(page);
2666
2667	parent = parent_mem_cgroup(child);
2668	/*
2669	 * If no parent, move charges to root cgroup.
 
2670	 */
2671	if (!parent)
2672		parent = root_mem_cgroup;
2673
2674	if (nr_pages > 1)
2675		flags = compound_lock_irqsave(page);
 
 
 
2676
2677	ret = mem_cgroup_move_account(page, nr_pages,
2678				pc, child, parent);
2679	if (!ret)
2680		__mem_cgroup_cancel_local_charge(child, nr_pages);
2681
2682	if (nr_pages > 1)
2683		compound_unlock_irqrestore(page, flags);
2684	putback_lru_page(page);
2685put:
2686	put_page(page);
2687out:
2688	return ret;
2689}
2690
2691/*
2692 * Charge the memory controller for page usage.
2693 * Return
2694 * 0 if the charge was successful
2695 * < 0 if the cgroup is over its limit
2696 */
2697static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
2698				gfp_t gfp_mask, enum charge_type ctype)
2699{
2700	struct mem_cgroup *memcg = NULL;
2701	unsigned int nr_pages = 1;
2702	bool oom = true;
2703	int ret;
2704
2705	if (PageTransHuge(page)) {
2706		nr_pages <<= compound_order(page);
2707		VM_BUG_ON(!PageTransHuge(page));
2708		/*
2709		 * Never OOM-kill a process for a huge page.  The
2710		 * fault handler will fall back to regular pages.
2711		 */
2712		oom = false;
2713	}
2714
2715	ret = __mem_cgroup_try_charge(mm, gfp_mask, nr_pages, &memcg, oom);
2716	if (ret == -ENOMEM)
2717		return ret;
2718	__mem_cgroup_commit_charge(memcg, page, nr_pages, ctype, false);
2719	return 0;
2720}
2721
2722int mem_cgroup_newpage_charge(struct page *page,
2723			      struct mm_struct *mm, gfp_t gfp_mask)
2724{
2725	if (mem_cgroup_disabled())
2726		return 0;
2727	VM_BUG_ON(page_mapped(page));
2728	VM_BUG_ON(page->mapping && !PageAnon(page));
2729	VM_BUG_ON(!mm);
2730	return mem_cgroup_charge_common(page, mm, gfp_mask,
2731					MEM_CGROUP_CHARGE_TYPE_MAPPED);
2732}
2733
2734static void
2735__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
2736					enum charge_type ctype);
 
 
2737
2738int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
2739				gfp_t gfp_mask)
2740{
2741	struct mem_cgroup *memcg = NULL;
2742	enum charge_type type = MEM_CGROUP_CHARGE_TYPE_CACHE;
2743	int ret;
 
2744
2745	if (mem_cgroup_disabled())
2746		return 0;
2747	if (PageCompound(page))
2748		return 0;
2749
2750	if (unlikely(!mm))
2751		mm = &init_mm;
2752	if (!page_is_file_cache(page))
2753		type = MEM_CGROUP_CHARGE_TYPE_SHMEM;
2754
2755	if (!PageSwapCache(page))
2756		ret = mem_cgroup_charge_common(page, mm, gfp_mask, type);
2757	else { /* page is swapcache/shmem */
2758		ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &memcg);
2759		if (!ret)
2760			__mem_cgroup_commit_charge_swapin(page, memcg, type);
2761	}
2762	return ret;
2763}
2764
2765/*
2766 * While swap-in, try_charge -> commit or cancel, the page is locked.
2767 * And when try_charge() successfully returns, one refcnt to memcg without
2768 * struct page_cgroup is acquired. This refcnt will be consumed by
2769 * "commit()" or removed by "cancel()"
2770 */
2771int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
2772				 struct page *page,
2773				 gfp_t mask, struct mem_cgroup **memcgp)
2774{
2775	struct mem_cgroup *memcg;
2776	int ret;
2777
2778	*memcgp = NULL;
2779
2780	if (mem_cgroup_disabled())
2781		return 0;
2782
2783	if (!do_swap_account)
2784		goto charge_cur_mm;
2785	/*
2786	 * A racing thread's fault, or swapoff, may have already updated
2787	 * the pte, and even removed page from swap cache: in those cases
2788	 * do_swap_page()'s pte_same() test will fail; but there's also a
2789	 * KSM case which does need to charge the page.
2790	 */
2791	if (!PageSwapCache(page))
2792		goto charge_cur_mm;
2793	memcg = try_get_mem_cgroup_from_page(page);
2794	if (!memcg)
2795		goto charge_cur_mm;
2796	*memcgp = memcg;
2797	ret = __mem_cgroup_try_charge(NULL, mask, 1, memcgp, true);
2798	css_put(&memcg->css);
2799	if (ret == -EINTR)
2800		ret = 0;
2801	return ret;
2802charge_cur_mm:
2803	if (unlikely(!mm))
2804		mm = &init_mm;
2805	ret = __mem_cgroup_try_charge(mm, mask, 1, memcgp, true);
2806	if (ret == -EINTR)
2807		ret = 0;
2808	return ret;
2809}
2810
2811static void
2812__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *memcg,
2813					enum charge_type ctype)
2814{
2815	if (mem_cgroup_disabled())
2816		return;
2817	if (!memcg)
2818		return;
2819	cgroup_exclude_rmdir(&memcg->css);
2820
2821	__mem_cgroup_commit_charge(memcg, page, 1, ctype, true);
2822	/*
2823	 * Now swap is on-memory. This means this page may be
2824	 * counted both as mem and swap....double count.
2825	 * Fix it by uncharging from memsw. Basically, this SwapCache is stable
2826	 * under lock_page(). But in do_swap_page()::memory.c, reuse_swap_page()
2827	 * may call delete_from_swap_cache() before reach here.
2828	 */
2829	if (do_swap_account && PageSwapCache(page)) {
2830		swp_entry_t ent = {.val = page_private(page)};
2831		mem_cgroup_uncharge_swap(ent);
2832	}
2833	/*
2834	 * At swapin, we may charge account against cgroup which has no tasks.
2835	 * So, rmdir()->pre_destroy() can be called while we do this charge.
2836	 * In that case, we need to call pre_destroy() again. check it here.
2837	 */
2838	cgroup_release_and_wakeup_rmdir(&memcg->css);
2839}
2840
2841void mem_cgroup_commit_charge_swapin(struct page *page,
2842				     struct mem_cgroup *memcg)
2843{
2844	__mem_cgroup_commit_charge_swapin(page, memcg,
2845					  MEM_CGROUP_CHARGE_TYPE_MAPPED);
2846}
2847
2848void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg)
2849{
2850	if (mem_cgroup_disabled())
2851		return;
2852	if (!memcg)
2853		return;
2854	__mem_cgroup_cancel_charge(memcg, 1);
2855}
2856
2857static void mem_cgroup_do_uncharge(struct mem_cgroup *memcg,
2858				   unsigned int nr_pages,
2859				   const enum charge_type ctype)
2860{
2861	struct memcg_batch_info *batch = NULL;
2862	bool uncharge_memsw = true;
2863
2864	/* If swapout, usage of swap doesn't decrease */
2865	if (!do_swap_account || ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
2866		uncharge_memsw = false;
2867
2868	batch = &current->memcg_batch;
2869	/*
2870	 * In usual, we do css_get() when we remember memcg pointer.
2871	 * But in this case, we keep res->usage until end of a series of
2872	 * uncharges. Then, it's ok to ignore memcg's refcnt.
2873	 */
2874	if (!batch->memcg)
2875		batch->memcg = memcg;
2876	/*
2877	 * do_batch > 0 when unmapping pages or inode invalidate/truncate.
2878	 * In those cases, all pages freed continuously can be expected to be in
2879	 * the same cgroup and we have chance to coalesce uncharges.
2880	 * But we do uncharge one by one if this is killed by OOM(TIF_MEMDIE)
2881	 * because we want to do uncharge as soon as possible.
2882	 */
2883
2884	if (!batch->do_batch || test_thread_flag(TIF_MEMDIE))
2885		goto direct_uncharge;
2886
2887	if (nr_pages > 1)
2888		goto direct_uncharge;
2889
2890	/*
2891	 * In typical case, batch->memcg == mem. This means we can
2892	 * merge a series of uncharges to an uncharge of res_counter.
2893	 * If not, we uncharge res_counter ony by one.
2894	 */
2895	if (batch->memcg != memcg)
2896		goto direct_uncharge;
2897	/* remember freed charge and uncharge it later */
2898	batch->nr_pages++;
2899	if (uncharge_memsw)
2900		batch->memsw_nr_pages++;
2901	return;
2902direct_uncharge:
2903	res_counter_uncharge(&memcg->res, nr_pages * PAGE_SIZE);
2904	if (uncharge_memsw)
2905		res_counter_uncharge(&memcg->memsw, nr_pages * PAGE_SIZE);
2906	if (unlikely(batch->memcg != memcg))
2907		memcg_oom_recover(memcg);
2908}
2909
2910/*
2911 * uncharge if !page_mapped(page)
 
 
 
 
 
 
 
 
 
 
2912 */
2913static struct mem_cgroup *
2914__mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
2915{
2916	struct mem_cgroup *memcg = NULL;
2917	unsigned int nr_pages = 1;
2918	struct page_cgroup *pc;
2919	bool anon;
2920
2921	if (mem_cgroup_disabled())
2922		return NULL;
2923
2924	if (PageSwapCache(page))
2925		return NULL;
2926
2927	if (PageTransHuge(page)) {
2928		nr_pages <<= compound_order(page);
2929		VM_BUG_ON(!PageTransHuge(page));
2930	}
2931	/*
2932	 * Check if our page_cgroup is valid
2933	 */
2934	pc = lookup_page_cgroup(page);
2935	if (unlikely(!PageCgroupUsed(pc)))
2936		return NULL;
2937
2938	lock_page_cgroup(pc);
 
2939
2940	memcg = pc->mem_cgroup;
2941
2942	if (!PageCgroupUsed(pc))
2943		goto unlock_out;
2944
2945	anon = PageAnon(page);
2946
2947	switch (ctype) {
2948	case MEM_CGROUP_CHARGE_TYPE_MAPPED:
2949		/*
2950		 * Generally PageAnon tells if it's the anon statistics to be
2951		 * updated; but sometimes e.g. mem_cgroup_uncharge_page() is
2952		 * used before page reached the stage of being marked PageAnon.
2953		 */
2954		anon = true;
2955		/* fallthrough */
2956	case MEM_CGROUP_CHARGE_TYPE_DROP:
2957		/* See mem_cgroup_prepare_migration() */
2958		if (page_mapped(page) || PageCgroupMigration(pc))
2959			goto unlock_out;
2960		break;
2961	case MEM_CGROUP_CHARGE_TYPE_SWAPOUT:
2962		if (!PageAnon(page)) {	/* Shared memory */
2963			if (page->mapping && !page_is_file_cache(page))
2964				goto unlock_out;
2965		} else if (page_mapped(page)) /* Anon */
2966				goto unlock_out;
2967		break;
2968	default:
2969		break;
2970	}
2971
2972	mem_cgroup_charge_statistics(memcg, anon, -nr_pages);
 
 
2973
2974	ClearPageCgroupUsed(pc);
2975	/*
2976	 * pc->mem_cgroup is not cleared here. It will be accessed when it's
2977	 * freed from LRU. This is safe because uncharged page is expected not
2978	 * to be reused (freed soon). Exception is SwapCache, it's handled by
2979	 * special functions.
 
 
 
 
 
 
2980	 */
2981
2982	unlock_page_cgroup(pc);
2983	/*
2984	 * even after unlock, we have memcg->res.usage here and this memcg
2985	 * will never be freed.
2986	 */
2987	memcg_check_events(memcg, page);
2988	if (do_swap_account && ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT) {
2989		mem_cgroup_swap_statistics(memcg, true);
2990		mem_cgroup_get(memcg);
2991	}
2992	if (!mem_cgroup_is_root(memcg))
2993		mem_cgroup_do_uncharge(memcg, nr_pages, ctype);
2994
2995	return memcg;
2996
2997unlock_out:
2998	unlock_page_cgroup(pc);
2999	return NULL;
3000}
3001
3002void mem_cgroup_uncharge_page(struct page *page)
3003{
3004	/* early check. */
3005	if (page_mapped(page))
3006		return;
3007	VM_BUG_ON(page->mapping && !PageAnon(page));
3008	__mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_MAPPED);
3009}
3010
3011void mem_cgroup_uncharge_cache_page(struct page *page)
 
3012{
3013	VM_BUG_ON(page_mapped(page));
3014	VM_BUG_ON(page->mapping);
3015	__mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE);
3016}
3017
3018/*
3019 * Batch_start/batch_end is called in unmap_page_range/invlidate/trucate.
3020 * In that cases, pages are freed continuously and we can expect pages
3021 * are in the same memcg. All these calls itself limits the number of
3022 * pages freed at once, then uncharge_start/end() is called properly.
3023 * This may be called prural(2) times in a context,
3024 */
3025
3026void mem_cgroup_uncharge_start(void)
3027{
3028	current->memcg_batch.do_batch++;
3029	/* We can do nest. */
3030	if (current->memcg_batch.do_batch == 1) {
3031		current->memcg_batch.memcg = NULL;
3032		current->memcg_batch.nr_pages = 0;
3033		current->memcg_batch.memsw_nr_pages = 0;
3034	}
 
 
 
 
3035}
3036
3037void mem_cgroup_uncharge_end(void)
3038{
3039	struct memcg_batch_info *batch = &current->memcg_batch;
 
3040
3041	if (!batch->do_batch)
3042		return;
 
 
 
 
3043
3044	batch->do_batch--;
3045	if (batch->do_batch) /* If stacked, do nothing. */
3046		return;
 
3047
3048	if (!batch->memcg)
3049		return;
3050	/*
3051	 * This "batch->memcg" is valid without any css_get/put etc...
3052	 * bacause we hide charges behind us.
3053	 */
3054	if (batch->nr_pages)
3055		res_counter_uncharge(&batch->memcg->res,
3056				     batch->nr_pages * PAGE_SIZE);
3057	if (batch->memsw_nr_pages)
3058		res_counter_uncharge(&batch->memcg->memsw,
3059				     batch->memsw_nr_pages * PAGE_SIZE);
3060	memcg_oom_recover(batch->memcg);
3061	/* forget this pointer (for sanity check) */
3062	batch->memcg = NULL;
3063}
3064
3065#ifdef CONFIG_SWAP
3066/*
3067 * called after __delete_from_swap_cache() and drop "page" account.
3068 * memcg information is recorded to swap_cgroup of "ent"
3069 */
3070void
3071mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout)
3072{
3073	struct mem_cgroup *memcg;
3074	int ctype = MEM_CGROUP_CHARGE_TYPE_SWAPOUT;
3075
3076	if (!swapout) /* this was a swap cache but the swap is unused ! */
3077		ctype = MEM_CGROUP_CHARGE_TYPE_DROP;
3078
3079	memcg = __mem_cgroup_uncharge_common(page, ctype);
 
 
3080
3081	/*
3082	 * record memcg information,  if swapout && memcg != NULL,
3083	 * mem_cgroup_get() was called in uncharge().
3084	 */
3085	if (do_swap_account && swapout && memcg)
3086		swap_cgroup_record(ent, css_id(&memcg->css));
3087}
3088#endif
 
 
3089
3090#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
3091/*
3092 * called from swap_entry_free(). remove record in swap_cgroup and
3093 * uncharge "memsw" account.
3094 */
3095void mem_cgroup_uncharge_swap(swp_entry_t ent)
3096{
3097	struct mem_cgroup *memcg;
3098	unsigned short id;
3099
3100	if (!do_swap_account)
3101		return;
3102
3103	id = swap_cgroup_record(ent, 0);
3104	rcu_read_lock();
3105	memcg = mem_cgroup_lookup(id);
3106	if (memcg) {
3107		/*
3108		 * We uncharge this because swap is freed.
3109		 * This memcg can be obsolete one. We avoid calling css_tryget
3110		 */
3111		if (!mem_cgroup_is_root(memcg))
3112			res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
3113		mem_cgroup_swap_statistics(memcg, false);
3114		mem_cgroup_put(memcg);
3115	}
3116	rcu_read_unlock();
3117}
3118
3119/**
3120 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
3121 * @entry: swap entry to be moved
3122 * @from:  mem_cgroup which the entry is moved from
3123 * @to:  mem_cgroup which the entry is moved to
3124 *
3125 * It succeeds only when the swap_cgroup's record for this entry is the same
3126 * as the mem_cgroup's id of @from.
3127 *
3128 * Returns 0 on success, -EINVAL on failure.
3129 *
3130 * The caller must have charged to @to, IOW, called res_counter_charge() about
3131 * both res and memsw, and called css_get().
3132 */
3133static int mem_cgroup_move_swap_account(swp_entry_t entry,
3134				struct mem_cgroup *from, struct mem_cgroup *to)
3135{
3136	unsigned short old_id, new_id;
3137
3138	old_id = css_id(&from->css);
3139	new_id = css_id(&to->css);
3140
3141	if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
3142		mem_cgroup_swap_statistics(from, false);
3143		mem_cgroup_swap_statistics(to, true);
3144		/*
3145		 * This function is only called from task migration context now.
3146		 * It postpones res_counter and refcount handling till the end
3147		 * of task migration(mem_cgroup_clear_mc()) for performance
3148		 * improvement. But we cannot postpone mem_cgroup_get(to)
3149		 * because if the process that has been moved to @to does
3150		 * swap-in, the refcount of @to might be decreased to 0.
3151		 */
3152		mem_cgroup_get(to);
3153		return 0;
3154	}
3155	return -EINVAL;
3156}
3157#else
3158static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
3159				struct mem_cgroup *from, struct mem_cgroup *to)
3160{
3161	return -EINVAL;
3162}
3163#endif
3164
3165/*
3166 * Before starting migration, account PAGE_SIZE to mem_cgroup that the old
3167 * page belongs to.
3168 */
3169int mem_cgroup_prepare_migration(struct page *page,
3170	struct page *newpage, struct mem_cgroup **memcgp, gfp_t gfp_mask)
3171{
3172	struct mem_cgroup *memcg = NULL;
3173	struct page_cgroup *pc;
3174	enum charge_type ctype;
3175	int ret = 0;
3176
3177	*memcgp = NULL;
3178
3179	VM_BUG_ON(PageTransHuge(page));
3180	if (mem_cgroup_disabled())
3181		return 0;
3182
3183	pc = lookup_page_cgroup(page);
3184	lock_page_cgroup(pc);
3185	if (PageCgroupUsed(pc)) {
3186		memcg = pc->mem_cgroup;
3187		css_get(&memcg->css);
3188		/*
3189		 * At migrating an anonymous page, its mapcount goes down
3190		 * to 0 and uncharge() will be called. But, even if it's fully
3191		 * unmapped, migration may fail and this page has to be
3192		 * charged again. We set MIGRATION flag here and delay uncharge
3193		 * until end_migration() is called
3194		 *
3195		 * Corner Case Thinking
3196		 * A)
3197		 * When the old page was mapped as Anon and it's unmap-and-freed
3198		 * while migration was ongoing.
3199		 * If unmap finds the old page, uncharge() of it will be delayed
3200		 * until end_migration(). If unmap finds a new page, it's
3201		 * uncharged when it make mapcount to be 1->0. If unmap code
3202		 * finds swap_migration_entry, the new page will not be mapped
3203		 * and end_migration() will find it(mapcount==0).
3204		 *
3205		 * B)
3206		 * When the old page was mapped but migraion fails, the kernel
3207		 * remaps it. A charge for it is kept by MIGRATION flag even
3208		 * if mapcount goes down to 0. We can do remap successfully
3209		 * without charging it again.
3210		 *
3211		 * C)
3212		 * The "old" page is under lock_page() until the end of
3213		 * migration, so, the old page itself will not be swapped-out.
3214		 * If the new page is swapped out before end_migraton, our
3215		 * hook to usual swap-out path will catch the event.
3216		 */
3217		if (PageAnon(page))
3218			SetPageCgroupMigration(pc);
3219	}
3220	unlock_page_cgroup(pc);
3221	/*
3222	 * If the page is not charged at this point,
3223	 * we return here.
3224	 */
3225	if (!memcg)
3226		return 0;
3227
3228	*memcgp = memcg;
3229	ret = __mem_cgroup_try_charge(NULL, gfp_mask, 1, memcgp, false);
3230	css_put(&memcg->css);/* drop extra refcnt */
3231	if (ret) {
3232		if (PageAnon(page)) {
3233			lock_page_cgroup(pc);
3234			ClearPageCgroupMigration(pc);
3235			unlock_page_cgroup(pc);
3236			/*
3237			 * The old page may be fully unmapped while we kept it.
3238			 */
3239			mem_cgroup_uncharge_page(page);
3240		}
3241		/* we'll need to revisit this error code (we have -EINTR) */
3242		return -ENOMEM;
3243	}
3244	/*
3245	 * We charge new page before it's used/mapped. So, even if unlock_page()
3246	 * is called before end_migration, we can catch all events on this new
3247	 * page. In the case new page is migrated but not remapped, new page's
3248	 * mapcount will be finally 0 and we call uncharge in end_migration().
3249	 */
3250	if (PageAnon(page))
3251		ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED;
3252	else if (page_is_file_cache(page))
3253		ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
3254	else
3255		ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
3256	__mem_cgroup_commit_charge(memcg, newpage, 1, ctype, false);
3257	return ret;
3258}
3259
3260/* remove redundant charge if migration failed*/
3261void mem_cgroup_end_migration(struct mem_cgroup *memcg,
3262	struct page *oldpage, struct page *newpage, bool migration_ok)
3263{
3264	struct page *used, *unused;
3265	struct page_cgroup *pc;
3266	bool anon;
3267
3268	if (!memcg)
3269		return;
3270	/* blocks rmdir() */
3271	cgroup_exclude_rmdir(&memcg->css);
3272	if (!migration_ok) {
3273		used = oldpage;
3274		unused = newpage;
3275	} else {
3276		used = newpage;
3277		unused = oldpage;
3278	}
3279	/*
3280	 * We disallowed uncharge of pages under migration because mapcount
3281	 * of the page goes down to zero, temporarly.
3282	 * Clear the flag and check the page should be charged.
3283	 */
3284	pc = lookup_page_cgroup(oldpage);
3285	lock_page_cgroup(pc);
3286	ClearPageCgroupMigration(pc);
3287	unlock_page_cgroup(pc);
3288	anon = PageAnon(used);
3289	__mem_cgroup_uncharge_common(unused,
3290		anon ? MEM_CGROUP_CHARGE_TYPE_MAPPED
3291		     : MEM_CGROUP_CHARGE_TYPE_CACHE);
3292
3293	/*
3294	 * If a page is a file cache, radix-tree replacement is very atomic
3295	 * and we can skip this check. When it was an Anon page, its mapcount
3296	 * goes down to 0. But because we added MIGRATION flage, it's not
3297	 * uncharged yet. There are several case but page->mapcount check
3298	 * and USED bit check in mem_cgroup_uncharge_page() will do enough
3299	 * check. (see prepare_charge() also)
3300	 */
3301	if (anon)
3302		mem_cgroup_uncharge_page(used);
3303	/*
3304	 * At migration, we may charge account against cgroup which has no
3305	 * tasks.
3306	 * So, rmdir()->pre_destroy() can be called while we do this charge.
3307	 * In that case, we need to call pre_destroy() again. check it here.
3308	 */
3309	cgroup_release_and_wakeup_rmdir(&memcg->css);
3310}
3311
3312/*
3313 * At replace page cache, newpage is not under any memcg but it's on
3314 * LRU. So, this function doesn't touch res_counter but handles LRU
3315 * in correct way. Both pages are locked so we cannot race with uncharge.
3316 */
3317void mem_cgroup_replace_page_cache(struct page *oldpage,
3318				  struct page *newpage)
3319{
3320	struct mem_cgroup *memcg = NULL;
3321	struct page_cgroup *pc;
3322	enum charge_type type = MEM_CGROUP_CHARGE_TYPE_CACHE;
3323
3324	if (mem_cgroup_disabled())
3325		return;
3326
3327	pc = lookup_page_cgroup(oldpage);
3328	/* fix accounting on old pages */
3329	lock_page_cgroup(pc);
3330	if (PageCgroupUsed(pc)) {
3331		memcg = pc->mem_cgroup;
3332		mem_cgroup_charge_statistics(memcg, false, -1);
3333		ClearPageCgroupUsed(pc);
3334	}
3335	unlock_page_cgroup(pc);
3336
3337	/*
3338	 * When called from shmem_replace_page(), in some cases the
3339	 * oldpage has already been charged, and in some cases not.
3340	 */
3341	if (!memcg)
3342		return;
3343
3344	if (PageSwapBacked(oldpage))
3345		type = MEM_CGROUP_CHARGE_TYPE_SHMEM;
3346
3347	/*
3348	 * Even if newpage->mapping was NULL before starting replacement,
3349	 * the newpage may be on LRU(or pagevec for LRU) already. We lock
3350	 * LRU while we overwrite pc->mem_cgroup.
3351	 */
3352	__mem_cgroup_commit_charge(memcg, newpage, 1, type, true);
3353}
3354
3355#ifdef CONFIG_DEBUG_VM
3356static struct page_cgroup *lookup_page_cgroup_used(struct page *page)
3357{
3358	struct page_cgroup *pc;
3359
3360	pc = lookup_page_cgroup(page);
3361	/*
3362	 * Can be NULL while feeding pages into the page allocator for
3363	 * the first time, i.e. during boot or memory hotplug;
3364	 * or when mem_cgroup_disabled().
3365	 */
3366	if (likely(pc) && PageCgroupUsed(pc))
3367		return pc;
3368	return NULL;
3369}
3370
3371bool mem_cgroup_bad_page_check(struct page *page)
3372{
3373	if (mem_cgroup_disabled())
3374		return false;
3375
3376	return lookup_page_cgroup_used(page) != NULL;
3377}
3378
3379void mem_cgroup_print_bad_page(struct page *page)
3380{
3381	struct page_cgroup *pc;
3382
3383	pc = lookup_page_cgroup_used(page);
3384	if (pc) {
3385		printk(KERN_ALERT "pc:%p pc->flags:%lx pc->mem_cgroup:%p\n",
3386		       pc, pc->flags, pc->mem_cgroup);
3387	}
3388}
3389#endif
3390
3391static DEFINE_MUTEX(set_limit_mutex);
3392
3393static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
3394				unsigned long long val)
3395{
 
 
 
3396	int retry_count;
3397	u64 memswlimit, memlimit;
3398	int ret = 0;
3399	int children = mem_cgroup_count_children(memcg);
3400	u64 curusage, oldusage;
3401	int enlarge;
3402
3403	/*
3404	 * For keeping hierarchical_reclaim simple, how long we should retry
3405	 * is depends on callers. We set our retry-count to be function
3406	 * of # of children which we should visit in this loop.
3407	 */
3408	retry_count = MEM_CGROUP_RECLAIM_RETRIES * children;
 
3409
3410	oldusage = res_counter_read_u64(&memcg->res, RES_USAGE);
3411
3412	enlarge = 0;
3413	while (retry_count) {
3414		if (signal_pending(current)) {
3415			ret = -EINTR;
3416			break;
3417		}
3418		/*
3419		 * Rather than hide all in some function, I do this in
3420		 * open coded manner. You see what this really does.
3421		 * We have to guarantee memcg->res.limit < memcg->memsw.limit.
3422		 */
3423		mutex_lock(&set_limit_mutex);
3424		memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
3425		if (memswlimit < val) {
3426			ret = -EINVAL;
3427			mutex_unlock(&set_limit_mutex);
3428			break;
3429		}
3430
3431		memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
3432		if (memlimit < val)
3433			enlarge = 1;
3434
3435		ret = res_counter_set_limit(&memcg->res, val);
3436		if (!ret) {
3437			if (memswlimit == val)
3438				memcg->memsw_is_minimum = true;
3439			else
3440				memcg->memsw_is_minimum = false;
3441		}
3442		mutex_unlock(&set_limit_mutex);
3443
3444		if (!ret)
3445			break;
3446
3447		mem_cgroup_reclaim(memcg, GFP_KERNEL,
3448				   MEM_CGROUP_RECLAIM_SHRINK);
3449		curusage = res_counter_read_u64(&memcg->res, RES_USAGE);
3450		/* Usage is reduced ? */
3451  		if (curusage >= oldusage)
3452			retry_count--;
3453		else
3454			oldusage = curusage;
3455	}
 
3456	if (!ret && enlarge)
3457		memcg_oom_recover(memcg);
3458
3459	return ret;
3460}
3461
3462static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
3463					unsigned long long val)
3464{
 
 
 
3465	int retry_count;
3466	u64 memlimit, memswlimit, oldusage, curusage;
3467	int children = mem_cgroup_count_children(memcg);
3468	int ret = -EBUSY;
3469	int enlarge = 0;
3470
3471	/* see mem_cgroup_resize_res_limit */
3472 	retry_count = children * MEM_CGROUP_RECLAIM_RETRIES;
3473	oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
3474	while (retry_count) {
 
 
 
3475		if (signal_pending(current)) {
3476			ret = -EINTR;
3477			break;
3478		}
3479		/*
3480		 * Rather than hide all in some function, I do this in
3481		 * open coded manner. You see what this really does.
3482		 * We have to guarantee memcg->res.limit < memcg->memsw.limit.
3483		 */
3484		mutex_lock(&set_limit_mutex);
3485		memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
3486		if (memlimit > val) {
3487			ret = -EINVAL;
3488			mutex_unlock(&set_limit_mutex);
3489			break;
3490		}
3491		memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
3492		if (memswlimit < val)
3493			enlarge = 1;
3494		ret = res_counter_set_limit(&memcg->memsw, val);
3495		if (!ret) {
3496			if (memlimit == val)
3497				memcg->memsw_is_minimum = true;
3498			else
3499				memcg->memsw_is_minimum = false;
3500		}
3501		mutex_unlock(&set_limit_mutex);
3502
3503		if (!ret)
3504			break;
3505
3506		mem_cgroup_reclaim(memcg, GFP_KERNEL,
3507				   MEM_CGROUP_RECLAIM_NOSWAP |
3508				   MEM_CGROUP_RECLAIM_SHRINK);
3509		curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
3510		/* Usage is reduced ? */
3511		if (curusage >= oldusage)
3512			retry_count--;
3513		else
3514			oldusage = curusage;
3515	}
 
3516	if (!ret && enlarge)
3517		memcg_oom_recover(memcg);
 
3518	return ret;
3519}
3520
3521unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
3522					    gfp_t gfp_mask,
3523					    unsigned long *total_scanned)
3524{
3525	unsigned long nr_reclaimed = 0;
3526	struct mem_cgroup_per_zone *mz, *next_mz = NULL;
3527	unsigned long reclaimed;
3528	int loop = 0;
3529	struct mem_cgroup_tree_per_zone *mctz;
3530	unsigned long long excess;
3531	unsigned long nr_scanned;
3532
3533	if (order > 0)
3534		return 0;
3535
3536	mctz = soft_limit_tree_node_zone(zone_to_nid(zone), zone_idx(zone));
3537	/*
3538	 * This loop can run a while, specially if mem_cgroup's continuously
3539	 * keep exceeding their soft limit and putting the system under
3540	 * pressure
3541	 */
3542	do {
3543		if (next_mz)
3544			mz = next_mz;
3545		else
3546			mz = mem_cgroup_largest_soft_limit_node(mctz);
3547		if (!mz)
3548			break;
3549
3550		nr_scanned = 0;
3551		reclaimed = mem_cgroup_soft_reclaim(mz->memcg, zone,
3552						    gfp_mask, &nr_scanned);
3553		nr_reclaimed += reclaimed;
3554		*total_scanned += nr_scanned;
3555		spin_lock(&mctz->lock);
 
3556
3557		/*
3558		 * If we failed to reclaim anything from this memory cgroup
3559		 * it is time to move on to the next cgroup
3560		 */
3561		next_mz = NULL;
3562		if (!reclaimed) {
3563			do {
3564				/*
3565				 * Loop until we find yet another one.
3566				 *
3567				 * By the time we get the soft_limit lock
3568				 * again, someone might have aded the
3569				 * group back on the RB tree. Iterate to
3570				 * make sure we get a different mem.
3571				 * mem_cgroup_largest_soft_limit_node returns
3572				 * NULL if no other cgroup is present on
3573				 * the tree
3574				 */
3575				next_mz =
3576				__mem_cgroup_largest_soft_limit_node(mctz);
3577				if (next_mz == mz)
3578					css_put(&next_mz->memcg->css);
3579				else /* next_mz == NULL or other memcg */
3580					break;
3581			} while (1);
3582		}
3583		__mem_cgroup_remove_exceeded(mz->memcg, mz, mctz);
3584		excess = res_counter_soft_limit_excess(&mz->memcg->res);
3585		/*
3586		 * One school of thought says that we should not add
3587		 * back the node to the tree if reclaim returns 0.
3588		 * But our reclaim could return 0, simply because due
3589		 * to priority we are exposing a smaller subset of
3590		 * memory to reclaim from. Consider this as a longer
3591		 * term TODO.
3592		 */
3593		/* If excess == 0, no tree ops */
3594		__mem_cgroup_insert_exceeded(mz->memcg, mz, mctz, excess);
3595		spin_unlock(&mctz->lock);
3596		css_put(&mz->memcg->css);
3597		loop++;
3598		/*
3599		 * Could not reclaim anything and there are no more
3600		 * mem cgroups to try or we seem to be looping without
3601		 * reclaiming anything.
3602		 */
3603		if (!nr_reclaimed &&
3604			(next_mz == NULL ||
3605			loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
3606			break;
3607	} while (!nr_reclaimed);
3608	if (next_mz)
3609		css_put(&next_mz->memcg->css);
3610	return nr_reclaimed;
3611}
3612
3613/*
3614 * This routine traverse page_cgroup in given list and drop them all.
3615 * *And* this routine doesn't reclaim page itself, just removes page_cgroup.
 
 
3616 */
3617static int mem_cgroup_force_empty_list(struct mem_cgroup *memcg,
3618				int node, int zid, enum lru_list lru)
3619{
3620	struct mem_cgroup_per_zone *mz;
3621	unsigned long flags, loop;
3622	struct list_head *list;
3623	struct page *busy;
3624	struct zone *zone;
3625	int ret = 0;
3626
3627	zone = &NODE_DATA(node)->node_zones[zid];
3628	mz = mem_cgroup_zoneinfo(memcg, node, zid);
3629	list = &mz->lruvec.lists[lru];
3630
3631	loop = mz->lru_size[lru];
3632	/* give some margin against EBUSY etc...*/
3633	loop += 256;
3634	busy = NULL;
3635	while (loop--) {
3636		struct page_cgroup *pc;
3637		struct page *page;
3638
3639		ret = 0;
3640		spin_lock_irqsave(&zone->lru_lock, flags);
3641		if (list_empty(list)) {
3642			spin_unlock_irqrestore(&zone->lru_lock, flags);
3643			break;
3644		}
3645		page = list_entry(list->prev, struct page, lru);
3646		if (busy == page) {
3647			list_move(&page->lru, list);
3648			busy = NULL;
3649			spin_unlock_irqrestore(&zone->lru_lock, flags);
3650			continue;
3651		}
3652		spin_unlock_irqrestore(&zone->lru_lock, flags);
3653
3654		pc = lookup_page_cgroup(page);
3655
3656		ret = mem_cgroup_move_parent(page, pc, memcg, GFP_KERNEL);
3657		if (ret == -ENOMEM || ret == -EINTR)
3658			break;
3659
3660		if (ret == -EBUSY || ret == -EINVAL) {
3661			/* found lock contention or "pc" is obsolete. */
3662			busy = page;
3663			cond_resched();
3664		} else
3665			busy = NULL;
3666	}
3667
3668	if (!ret && !list_empty(list))
3669		return -EBUSY;
3670	return ret;
3671}
3672
3673/*
3674 * make mem_cgroup's charge to be 0 if there is no task.
3675 * This enables deleting this mem_cgroup.
 
 
3676 */
3677static int mem_cgroup_force_empty(struct mem_cgroup *memcg, bool free_all)
3678{
3679	int ret;
3680	int node, zid, shrink;
3681	int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
3682	struct cgroup *cgrp = memcg->css.cgroup;
3683
3684	css_get(&memcg->css);
3685
3686	shrink = 0;
3687	/* should free all ? */
3688	if (free_all)
3689		goto try_to_free;
3690move_account:
3691	do {
3692		ret = -EBUSY;
3693		if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children))
3694			goto out;
3695		ret = -EINTR;
3696		if (signal_pending(current))
3697			goto out;
3698		/* This is for making all *used* pages to be on LRU. */
3699		lru_add_drain_all();
3700		drain_all_stock_sync(memcg);
3701		ret = 0;
3702		mem_cgroup_start_move(memcg);
3703		for_each_node_state(node, N_HIGH_MEMORY) {
3704			for (zid = 0; !ret && zid < MAX_NR_ZONES; zid++) {
3705				enum lru_list lru;
3706				for_each_lru(lru) {
3707					ret = mem_cgroup_force_empty_list(memcg,
3708							node, zid, lru);
3709					if (ret)
3710						break;
3711				}
3712			}
3713			if (ret)
3714				break;
3715		}
3716		mem_cgroup_end_move(memcg);
3717		memcg_oom_recover(memcg);
3718		/* it seems parent cgroup doesn't have enough mem */
3719		if (ret == -ENOMEM)
3720			goto try_to_free;
3721		cond_resched();
3722	/* "ret" should also be checked to ensure all lists are empty. */
3723	} while (res_counter_read_u64(&memcg->res, RES_USAGE) > 0 || ret);
3724out:
3725	css_put(&memcg->css);
3726	return ret;
3727
3728try_to_free:
3729	/* returns EBUSY if there is a task or if we come here twice. */
3730	if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children) || shrink) {
3731		ret = -EBUSY;
3732		goto out;
3733	}
3734	/* we call try-to-free pages for make this cgroup empty */
3735	lru_add_drain_all();
3736	/* try to free all pages in this cgroup */
3737	shrink = 1;
3738	while (nr_retries && res_counter_read_u64(&memcg->res, RES_USAGE) > 0) {
3739		int progress;
3740
3741		if (signal_pending(current)) {
3742			ret = -EINTR;
3743			goto out;
3744		}
3745		progress = try_to_free_mem_cgroup_pages(memcg, GFP_KERNEL,
3746						false);
3747		if (!progress) {
3748			nr_retries--;
3749			/* maybe some writeback is necessary */
3750			congestion_wait(BLK_RW_ASYNC, HZ/10);
3751		}
3752
3753	}
3754	lru_add_drain();
3755	/* try move_account...there may be some *locked* pages. */
3756	goto move_account;
3757}
3758
3759static int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event)
 
 
3760{
3761	return mem_cgroup_force_empty(mem_cgroup_from_cont(cont), true);
 
 
 
 
3762}
3763
3764
3765static u64 mem_cgroup_hierarchy_read(struct cgroup *cont, struct cftype *cft)
3766{
3767	return mem_cgroup_from_cont(cont)->use_hierarchy;
3768}
3769
3770static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft,
3771					u64 val)
3772{
3773	int retval = 0;
3774	struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
3775	struct cgroup *parent = cont->parent;
3776	struct mem_cgroup *parent_memcg = NULL;
3777
3778	if (parent)
3779		parent_memcg = mem_cgroup_from_cont(parent);
3780
3781	cgroup_lock();
3782	/*
3783	 * If parent's use_hierarchy is set, we can't make any modifications
3784	 * in the child subtrees. If it is unset, then the change can
3785	 * occur, provided the current cgroup has no children.
3786	 *
3787	 * For the root cgroup, parent_mem is NULL, we allow value to be
3788	 * set if there are no children.
3789	 */
3790	if ((!parent_memcg || !parent_memcg->use_hierarchy) &&
3791				(val == 1 || val == 0)) {
3792		if (list_empty(&cont->children))
3793			memcg->use_hierarchy = val;
3794		else
3795			retval = -EBUSY;
3796	} else
3797		retval = -EINVAL;
3798	cgroup_unlock();
3799
3800	return retval;
3801}
3802
 
 
 
 
 
 
3803
3804static unsigned long mem_cgroup_recursive_stat(struct mem_cgroup *memcg,
3805					       enum mem_cgroup_stat_index idx)
 
 
 
 
 
3806{
3807	struct mem_cgroup *iter;
3808	long val = 0;
3809
3810	/* Per-cpu values can be negative, use a signed accumulator */
3811	for_each_mem_cgroup_tree(iter, memcg)
3812		val += mem_cgroup_read_stat(iter, idx);
3813
3814	if (val < 0) /* race ? */
3815		val = 0;
3816	return val;
 
3817}
3818
3819static inline u64 mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
3820{
3821	u64 val;
 
 
 
3822
3823	if (!mem_cgroup_is_root(memcg)) {
 
 
 
 
 
 
 
 
 
3824		if (!swap)
3825			return res_counter_read_u64(&memcg->res, RES_USAGE);
3826		else
3827			return res_counter_read_u64(&memcg->memsw, RES_USAGE);
3828	}
 
 
3829
3830	val = mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_CACHE);
3831	val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_RSS);
3832
3833	if (swap)
3834		val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_SWAPOUT);
3835
3836	return val << PAGE_SHIFT;
3837}
3838
3839static ssize_t mem_cgroup_read(struct cgroup *cont, struct cftype *cft,
3840			       struct file *file, char __user *buf,
3841			       size_t nbytes, loff_t *ppos)
3842{
3843	struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
3844	char str[64];
3845	u64 val;
3846	int type, name, len;
3847
3848	type = MEMFILE_TYPE(cft->private);
3849	name = MEMFILE_ATTR(cft->private);
3850
3851	if (!do_swap_account && type == _MEMSWAP)
3852		return -EOPNOTSUPP;
3853
3854	switch (type) {
3855	case _MEM:
3856		if (name == RES_USAGE)
3857			val = mem_cgroup_usage(memcg, false);
3858		else
3859			val = res_counter_read_u64(&memcg->res, name);
3860		break;
3861	case _MEMSWAP:
3862		if (name == RES_USAGE)
3863			val = mem_cgroup_usage(memcg, true);
3864		else
3865			val = res_counter_read_u64(&memcg->memsw, name);
 
 
 
3866		break;
3867	default:
3868		BUG();
3869	}
3870
3871	len = scnprintf(str, sizeof(str), "%llu\n", (unsigned long long)val);
3872	return simple_read_from_buffer(buf, nbytes, ppos, str, len);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3873}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3874/*
3875 * The user of this function is...
3876 * RES_LIMIT.
3877 */
3878static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
3879			    const char *buffer)
3880{
3881	struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
3882	int type, name;
3883	unsigned long long val;
3884	int ret;
3885
3886	type = MEMFILE_TYPE(cft->private);
3887	name = MEMFILE_ATTR(cft->private);
 
 
3888
3889	if (!do_swap_account && type == _MEMSWAP)
3890		return -EOPNOTSUPP;
3891
3892	switch (name) {
3893	case RES_LIMIT:
3894		if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
3895			ret = -EINVAL;
3896			break;
3897		}
3898		/* This function does all necessary parse...reuse it */
3899		ret = res_counter_memparse_write_strategy(buffer, &val);
3900		if (ret)
 
 
 
 
 
 
 
 
 
3901			break;
3902		if (type == _MEM)
3903			ret = mem_cgroup_resize_limit(memcg, val);
3904		else
3905			ret = mem_cgroup_resize_memsw_limit(memcg, val);
3906		break;
3907	case RES_SOFT_LIMIT:
3908		ret = res_counter_memparse_write_strategy(buffer, &val);
3909		if (ret)
3910			break;
3911		/*
3912		 * For memsw, soft limits are hard to implement in terms
3913		 * of semantics, for now, we support soft limits for
3914		 * control without swap
3915		 */
3916		if (type == _MEM)
3917			ret = res_counter_set_soft_limit(&memcg->res, val);
3918		else
3919			ret = -EINVAL;
3920		break;
3921	default:
3922		ret = -EINVAL; /* should be BUG() ? */
3923		break;
3924	}
3925	return ret;
3926}
3927
3928static void memcg_get_hierarchical_limit(struct mem_cgroup *memcg,
3929		unsigned long long *mem_limit, unsigned long long *memsw_limit)
3930{
3931	struct cgroup *cgroup;
3932	unsigned long long min_limit, min_memsw_limit, tmp;
3933
3934	min_limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
3935	min_memsw_limit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
3936	cgroup = memcg->css.cgroup;
3937	if (!memcg->use_hierarchy)
3938		goto out;
3939
3940	while (cgroup->parent) {
3941		cgroup = cgroup->parent;
3942		memcg = mem_cgroup_from_cont(cgroup);
3943		if (!memcg->use_hierarchy)
3944			break;
3945		tmp = res_counter_read_u64(&memcg->res, RES_LIMIT);
3946		min_limit = min(min_limit, tmp);
3947		tmp = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
3948		min_memsw_limit = min(min_memsw_limit, tmp);
3949	}
3950out:
3951	*mem_limit = min_limit;
3952	*memsw_limit = min_memsw_limit;
3953}
3954
3955static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
3956{
3957	struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
3958	int type, name;
3959
3960	type = MEMFILE_TYPE(event);
3961	name = MEMFILE_ATTR(event);
3962
3963	if (!do_swap_account && type == _MEMSWAP)
3964		return -EOPNOTSUPP;
3965
3966	switch (name) {
3967	case RES_MAX_USAGE:
3968		if (type == _MEM)
3969			res_counter_reset_max(&memcg->res);
3970		else
3971			res_counter_reset_max(&memcg->memsw);
3972		break;
3973	case RES_FAILCNT:
3974		if (type == _MEM)
3975			res_counter_reset_failcnt(&memcg->res);
3976		else
3977			res_counter_reset_failcnt(&memcg->memsw);
3978		break;
 
 
3979	}
3980
3981	return 0;
3982}
3983
3984static u64 mem_cgroup_move_charge_read(struct cgroup *cgrp,
3985					struct cftype *cft)
3986{
3987	return mem_cgroup_from_cont(cgrp)->move_charge_at_immigrate;
3988}
3989
3990#ifdef CONFIG_MMU
3991static int mem_cgroup_move_charge_write(struct cgroup *cgrp,
3992					struct cftype *cft, u64 val)
3993{
3994	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
3995
3996	if (val >= (1 << NR_MOVE_TYPE))
3997		return -EINVAL;
 
3998	/*
3999	 * We check this value several times in both in can_attach() and
4000	 * attach(), so we need cgroup lock to prevent this value from being
4001	 * inconsistent.
 
4002	 */
4003	cgroup_lock();
4004	memcg->move_charge_at_immigrate = val;
4005	cgroup_unlock();
4006
4007	return 0;
4008}
4009#else
4010static int mem_cgroup_move_charge_write(struct cgroup *cgrp,
4011					struct cftype *cft, u64 val)
4012{
4013	return -ENOSYS;
4014}
4015#endif
4016
4017#ifdef CONFIG_NUMA
4018static int mem_control_numa_stat_show(struct cgroup *cont, struct cftype *cft,
4019				      struct seq_file *m)
4020{
 
 
 
 
 
 
 
 
 
 
 
 
4021	int nid;
4022	unsigned long total_nr, file_nr, anon_nr, unevictable_nr;
4023	unsigned long node_nr;
4024	struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
4025
4026	total_nr = mem_cgroup_nr_lru_pages(memcg, LRU_ALL);
4027	seq_printf(m, "total=%lu", total_nr);
4028	for_each_node_state(nid, N_HIGH_MEMORY) {
4029		node_nr = mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL);
4030		seq_printf(m, " N%d=%lu", nid, node_nr);
4031	}
4032	seq_putc(m, '\n');
4033
4034	file_nr = mem_cgroup_nr_lru_pages(memcg, LRU_ALL_FILE);
4035	seq_printf(m, "file=%lu", file_nr);
4036	for_each_node_state(nid, N_HIGH_MEMORY) {
4037		node_nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
4038				LRU_ALL_FILE);
4039		seq_printf(m, " N%d=%lu", nid, node_nr);
4040	}
4041	seq_putc(m, '\n');
4042
4043	anon_nr = mem_cgroup_nr_lru_pages(memcg, LRU_ALL_ANON);
4044	seq_printf(m, "anon=%lu", anon_nr);
4045	for_each_node_state(nid, N_HIGH_MEMORY) {
4046		node_nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
4047				LRU_ALL_ANON);
4048		seq_printf(m, " N%d=%lu", nid, node_nr);
4049	}
4050	seq_putc(m, '\n');
4051
4052	unevictable_nr = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_UNEVICTABLE));
4053	seq_printf(m, "unevictable=%lu", unevictable_nr);
4054	for_each_node_state(nid, N_HIGH_MEMORY) {
4055		node_nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
4056				BIT(LRU_UNEVICTABLE));
4057		seq_printf(m, " N%d=%lu", nid, node_nr);
4058	}
4059	seq_putc(m, '\n');
4060	return 0;
4061}
4062#endif /* CONFIG_NUMA */
4063
4064static const char * const mem_cgroup_lru_names[] = {
4065	"inactive_anon",
4066	"active_anon",
4067	"inactive_file",
4068	"active_file",
4069	"unevictable",
4070};
4071
4072static inline void mem_cgroup_lru_names_not_uptodate(void)
4073{
4074	BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS);
4075}
4076
4077static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
4078				 struct seq_file *m)
4079{
4080	struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
4081	struct mem_cgroup *mi;
4082	unsigned int i;
4083
 
 
 
 
 
 
4084	for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
4085		if (i == MEM_CGROUP_STAT_SWAPOUT && !do_swap_account)
4086			continue;
4087		seq_printf(m, "%s %ld\n", mem_cgroup_stat_names[i],
4088			   mem_cgroup_read_stat(memcg, i) * PAGE_SIZE);
4089	}
4090
4091	for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++)
4092		seq_printf(m, "%s %lu\n", mem_cgroup_events_names[i],
4093			   mem_cgroup_read_events(memcg, i));
4094
4095	for (i = 0; i < NR_LRU_LISTS; i++)
4096		seq_printf(m, "%s %lu\n", mem_cgroup_lru_names[i],
4097			   mem_cgroup_nr_lru_pages(memcg, BIT(i)) * PAGE_SIZE);
4098
4099	/* Hierarchical information */
4100	{
4101		unsigned long long limit, memsw_limit;
4102		memcg_get_hierarchical_limit(memcg, &limit, &memsw_limit);
4103		seq_printf(m, "hierarchical_memory_limit %llu\n", limit);
4104		if (do_swap_account)
4105			seq_printf(m, "hierarchical_memsw_limit %llu\n",
4106				   memsw_limit);
4107	}
 
 
4108
4109	for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
4110		long long val = 0;
4111
4112		if (i == MEM_CGROUP_STAT_SWAPOUT && !do_swap_account)
4113			continue;
4114		for_each_mem_cgroup_tree(mi, memcg)
4115			val += mem_cgroup_read_stat(mi, i) * PAGE_SIZE;
4116		seq_printf(m, "total_%s %lld\n", mem_cgroup_stat_names[i], val);
4117	}
4118
4119	for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
4120		unsigned long long val = 0;
4121
4122		for_each_mem_cgroup_tree(mi, memcg)
4123			val += mem_cgroup_read_events(mi, i);
4124		seq_printf(m, "total_%s %llu\n",
4125			   mem_cgroup_events_names[i], val);
4126	}
4127
4128	for (i = 0; i < NR_LRU_LISTS; i++) {
4129		unsigned long long val = 0;
4130
4131		for_each_mem_cgroup_tree(mi, memcg)
4132			val += mem_cgroup_nr_lru_pages(mi, BIT(i)) * PAGE_SIZE;
4133		seq_printf(m, "total_%s %llu\n", mem_cgroup_lru_names[i], val);
4134	}
4135
4136#ifdef CONFIG_DEBUG_VM
4137	{
4138		int nid, zid;
4139		struct mem_cgroup_per_zone *mz;
4140		struct zone_reclaim_stat *rstat;
4141		unsigned long recent_rotated[2] = {0, 0};
4142		unsigned long recent_scanned[2] = {0, 0};
4143
4144		for_each_online_node(nid)
4145			for (zid = 0; zid < MAX_NR_ZONES; zid++) {
4146				mz = mem_cgroup_zoneinfo(memcg, nid, zid);
4147				rstat = &mz->lruvec.reclaim_stat;
4148
4149				recent_rotated[0] += rstat->recent_rotated[0];
4150				recent_rotated[1] += rstat->recent_rotated[1];
4151				recent_scanned[0] += rstat->recent_scanned[0];
4152				recent_scanned[1] += rstat->recent_scanned[1];
4153			}
4154		seq_printf(m, "recent_rotated_anon %lu\n", recent_rotated[0]);
4155		seq_printf(m, "recent_rotated_file %lu\n", recent_rotated[1]);
4156		seq_printf(m, "recent_scanned_anon %lu\n", recent_scanned[0]);
4157		seq_printf(m, "recent_scanned_file %lu\n", recent_scanned[1]);
4158	}
4159#endif
4160
4161	return 0;
4162}
4163
4164static u64 mem_cgroup_swappiness_read(struct cgroup *cgrp, struct cftype *cft)
 
4165{
4166	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
4167
4168	return mem_cgroup_swappiness(memcg);
4169}
4170
4171static int mem_cgroup_swappiness_write(struct cgroup *cgrp, struct cftype *cft,
4172				       u64 val)
4173{
4174	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
4175	struct mem_cgroup *parent;
4176
4177	if (val > 100)
4178		return -EINVAL;
4179
4180	if (cgrp->parent == NULL)
4181		return -EINVAL;
4182
4183	parent = mem_cgroup_from_cont(cgrp->parent);
4184
4185	cgroup_lock();
4186
4187	/* If under hierarchy, only empty-root can set this value */
4188	if ((parent->use_hierarchy) ||
4189	    (memcg->use_hierarchy && !list_empty(&cgrp->children))) {
4190		cgroup_unlock();
4191		return -EINVAL;
4192	}
4193
4194	memcg->swappiness = val;
4195
4196	cgroup_unlock();
4197
4198	return 0;
4199}
4200
4201static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
4202{
4203	struct mem_cgroup_threshold_ary *t;
4204	u64 usage;
4205	int i;
4206
4207	rcu_read_lock();
4208	if (!swap)
4209		t = rcu_dereference(memcg->thresholds.primary);
4210	else
4211		t = rcu_dereference(memcg->memsw_thresholds.primary);
4212
4213	if (!t)
4214		goto unlock;
4215
4216	usage = mem_cgroup_usage(memcg, swap);
4217
4218	/*
4219	 * current_threshold points to threshold just below or equal to usage.
4220	 * If it's not true, a threshold was crossed after last
4221	 * call of __mem_cgroup_threshold().
4222	 */
4223	i = t->current_threshold;
4224
4225	/*
4226	 * Iterate backward over array of thresholds starting from
4227	 * current_threshold and check if a threshold is crossed.
4228	 * If none of thresholds below usage is crossed, we read
4229	 * only one element of the array here.
4230	 */
4231	for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
4232		eventfd_signal(t->entries[i].eventfd, 1);
4233
4234	/* i = current_threshold + 1 */
4235	i++;
4236
4237	/*
4238	 * Iterate forward over array of thresholds starting from
4239	 * current_threshold+1 and check if a threshold is crossed.
4240	 * If none of thresholds above usage is crossed, we read
4241	 * only one element of the array here.
4242	 */
4243	for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
4244		eventfd_signal(t->entries[i].eventfd, 1);
4245
4246	/* Update current_threshold */
4247	t->current_threshold = i - 1;
4248unlock:
4249	rcu_read_unlock();
4250}
4251
4252static void mem_cgroup_threshold(struct mem_cgroup *memcg)
4253{
4254	while (memcg) {
4255		__mem_cgroup_threshold(memcg, false);
4256		if (do_swap_account)
4257			__mem_cgroup_threshold(memcg, true);
4258
4259		memcg = parent_mem_cgroup(memcg);
4260	}
4261}
4262
4263static int compare_thresholds(const void *a, const void *b)
4264{
4265	const struct mem_cgroup_threshold *_a = a;
4266	const struct mem_cgroup_threshold *_b = b;
4267
4268	return _a->threshold - _b->threshold;
 
 
 
 
 
 
4269}
4270
4271static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
4272{
4273	struct mem_cgroup_eventfd_list *ev;
4274
 
 
4275	list_for_each_entry(ev, &memcg->oom_notify, list)
4276		eventfd_signal(ev->eventfd, 1);
 
 
4277	return 0;
4278}
4279
4280static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
4281{
4282	struct mem_cgroup *iter;
4283
4284	for_each_mem_cgroup_tree(iter, memcg)
4285		mem_cgroup_oom_notify_cb(iter);
4286}
4287
4288static int mem_cgroup_usage_register_event(struct cgroup *cgrp,
4289	struct cftype *cft, struct eventfd_ctx *eventfd, const char *args)
4290{
4291	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
4292	struct mem_cgroup_thresholds *thresholds;
4293	struct mem_cgroup_threshold_ary *new;
4294	int type = MEMFILE_TYPE(cft->private);
4295	u64 threshold, usage;
4296	int i, size, ret;
4297
4298	ret = res_counter_memparse_write_strategy(args, &threshold);
4299	if (ret)
4300		return ret;
4301
4302	mutex_lock(&memcg->thresholds_lock);
4303
4304	if (type == _MEM)
4305		thresholds = &memcg->thresholds;
4306	else if (type == _MEMSWAP)
 
4307		thresholds = &memcg->memsw_thresholds;
4308	else
 
4309		BUG();
4310
4311	usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
4312
4313	/* Check if a threshold crossed before adding a new one */
4314	if (thresholds->primary)
4315		__mem_cgroup_threshold(memcg, type == _MEMSWAP);
4316
4317	size = thresholds->primary ? thresholds->primary->size + 1 : 1;
4318
4319	/* Allocate memory for new array of thresholds */
4320	new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold),
4321			GFP_KERNEL);
4322	if (!new) {
4323		ret = -ENOMEM;
4324		goto unlock;
4325	}
4326	new->size = size;
4327
4328	/* Copy thresholds (if any) to new array */
4329	if (thresholds->primary) {
4330		memcpy(new->entries, thresholds->primary->entries, (size - 1) *
4331				sizeof(struct mem_cgroup_threshold));
4332	}
4333
4334	/* Add new threshold */
4335	new->entries[size - 1].eventfd = eventfd;
4336	new->entries[size - 1].threshold = threshold;
4337
4338	/* Sort thresholds. Registering of new threshold isn't time-critical */
4339	sort(new->entries, size, sizeof(struct mem_cgroup_threshold),
4340			compare_thresholds, NULL);
4341
4342	/* Find current threshold */
4343	new->current_threshold = -1;
4344	for (i = 0; i < size; i++) {
4345		if (new->entries[i].threshold <= usage) {
4346			/*
4347			 * new->current_threshold will not be used until
4348			 * rcu_assign_pointer(), so it's safe to increment
4349			 * it here.
4350			 */
4351			++new->current_threshold;
4352		} else
4353			break;
4354	}
4355
4356	/* Free old spare buffer and save old primary buffer as spare */
4357	kfree(thresholds->spare);
4358	thresholds->spare = thresholds->primary;
4359
4360	rcu_assign_pointer(thresholds->primary, new);
4361
4362	/* To be sure that nobody uses thresholds */
4363	synchronize_rcu();
4364
4365unlock:
4366	mutex_unlock(&memcg->thresholds_lock);
4367
4368	return ret;
4369}
4370
4371static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
4372	struct cftype *cft, struct eventfd_ctx *eventfd)
 
 
 
 
 
 
 
 
 
 
 
 
4373{
4374	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
4375	struct mem_cgroup_thresholds *thresholds;
4376	struct mem_cgroup_threshold_ary *new;
4377	int type = MEMFILE_TYPE(cft->private);
4378	u64 usage;
4379	int i, j, size;
4380
4381	mutex_lock(&memcg->thresholds_lock);
4382	if (type == _MEM)
 
4383		thresholds = &memcg->thresholds;
4384	else if (type == _MEMSWAP)
 
4385		thresholds = &memcg->memsw_thresholds;
4386	else
 
4387		BUG();
4388
4389	if (!thresholds->primary)
4390		goto unlock;
4391
4392	usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
4393
4394	/* Check if a threshold crossed before removing */
4395	__mem_cgroup_threshold(memcg, type == _MEMSWAP);
4396
4397	/* Calculate new number of threshold */
4398	size = 0;
4399	for (i = 0; i < thresholds->primary->size; i++) {
4400		if (thresholds->primary->entries[i].eventfd != eventfd)
4401			size++;
4402	}
4403
4404	new = thresholds->spare;
4405
4406	/* Set thresholds array to NULL if we don't have thresholds */
4407	if (!size) {
4408		kfree(new);
4409		new = NULL;
4410		goto swap_buffers;
4411	}
4412
4413	new->size = size;
4414
4415	/* Copy thresholds and find current threshold */
4416	new->current_threshold = -1;
4417	for (i = 0, j = 0; i < thresholds->primary->size; i++) {
4418		if (thresholds->primary->entries[i].eventfd == eventfd)
4419			continue;
4420
4421		new->entries[j] = thresholds->primary->entries[i];
4422		if (new->entries[j].threshold <= usage) {
4423			/*
4424			 * new->current_threshold will not be used
4425			 * until rcu_assign_pointer(), so it's safe to increment
4426			 * it here.
4427			 */
4428			++new->current_threshold;
4429		}
4430		j++;
4431	}
4432
4433swap_buffers:
4434	/* Swap primary and spare array */
4435	thresholds->spare = thresholds->primary;
 
 
 
 
 
 
4436	/* If all events are unregistered, free the spare array */
4437	if (!new) {
4438		kfree(thresholds->spare);
4439		thresholds->spare = NULL;
4440	}
 
 
 
4441
4442	rcu_assign_pointer(thresholds->primary, new);
 
 
 
 
4443
4444	/* To be sure that nobody uses thresholds */
4445	synchronize_rcu();
4446unlock:
4447	mutex_unlock(&memcg->thresholds_lock);
4448}
4449
4450static int mem_cgroup_oom_register_event(struct cgroup *cgrp,
4451	struct cftype *cft, struct eventfd_ctx *eventfd, const char *args)
4452{
4453	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
4454	struct mem_cgroup_eventfd_list *event;
4455	int type = MEMFILE_TYPE(cft->private);
4456
4457	BUG_ON(type != _OOM_TYPE);
4458	event = kmalloc(sizeof(*event),	GFP_KERNEL);
4459	if (!event)
4460		return -ENOMEM;
4461
4462	spin_lock(&memcg_oom_lock);
4463
4464	event->eventfd = eventfd;
4465	list_add(&event->list, &memcg->oom_notify);
4466
4467	/* already in OOM ? */
4468	if (atomic_read(&memcg->under_oom))
4469		eventfd_signal(eventfd, 1);
4470	spin_unlock(&memcg_oom_lock);
4471
4472	return 0;
4473}
4474
4475static void mem_cgroup_oom_unregister_event(struct cgroup *cgrp,
4476	struct cftype *cft, struct eventfd_ctx *eventfd)
4477{
4478	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
4479	struct mem_cgroup_eventfd_list *ev, *tmp;
4480	int type = MEMFILE_TYPE(cft->private);
4481
4482	BUG_ON(type != _OOM_TYPE);
4483
4484	spin_lock(&memcg_oom_lock);
4485
4486	list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
4487		if (ev->eventfd == eventfd) {
4488			list_del(&ev->list);
4489			kfree(ev);
4490		}
4491	}
4492
4493	spin_unlock(&memcg_oom_lock);
4494}
4495
4496static int mem_cgroup_oom_control_read(struct cgroup *cgrp,
4497	struct cftype *cft,  struct cgroup_map_cb *cb)
4498{
4499	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
4500
4501	cb->fill(cb, "oom_kill_disable", memcg->oom_kill_disable);
4502
4503	if (atomic_read(&memcg->under_oom))
4504		cb->fill(cb, "under_oom", 1);
4505	else
4506		cb->fill(cb, "under_oom", 0);
4507	return 0;
4508}
4509
4510static int mem_cgroup_oom_control_write(struct cgroup *cgrp,
4511	struct cftype *cft, u64 val)
4512{
4513	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
4514	struct mem_cgroup *parent;
4515
4516	/* cannot set to root cgroup and only 0 and 1 are allowed */
4517	if (!cgrp->parent || !((val == 0) || (val == 1)))
4518		return -EINVAL;
4519
4520	parent = mem_cgroup_from_cont(cgrp->parent);
4521
4522	cgroup_lock();
4523	/* oom-kill-disable is a flag for subhierarchy. */
4524	if ((parent->use_hierarchy) ||
4525	    (memcg->use_hierarchy && !list_empty(&cgrp->children))) {
4526		cgroup_unlock();
4527		return -EINVAL;
4528	}
4529	memcg->oom_kill_disable = val;
4530	if (!val)
4531		memcg_oom_recover(memcg);
4532	cgroup_unlock();
4533	return 0;
4534}
4535
4536#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
4537static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4538{
4539	return mem_cgroup_sockets_init(memcg, ss);
4540};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4541
4542static void kmem_cgroup_destroy(struct mem_cgroup *memcg)
 
 
 
 
 
4543{
4544	mem_cgroup_sockets_destroy(memcg);
 
 
 
 
 
 
 
 
 
 
 
 
 
4545}
4546#else
4547static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
 
 
 
 
 
 
4548{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4549	return 0;
4550}
4551
4552static void kmem_cgroup_destroy(struct mem_cgroup *memcg)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4553{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4554}
4555#endif
4556
4557static struct cftype mem_cgroup_files[] = {
4558	{
4559		.name = "usage_in_bytes",
4560		.private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
4561		.read = mem_cgroup_read,
4562		.register_event = mem_cgroup_usage_register_event,
4563		.unregister_event = mem_cgroup_usage_unregister_event,
4564	},
4565	{
4566		.name = "max_usage_in_bytes",
4567		.private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
4568		.trigger = mem_cgroup_reset,
4569		.read = mem_cgroup_read,
4570	},
4571	{
4572		.name = "limit_in_bytes",
4573		.private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
4574		.write_string = mem_cgroup_write,
4575		.read = mem_cgroup_read,
4576	},
4577	{
4578		.name = "soft_limit_in_bytes",
4579		.private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
4580		.write_string = mem_cgroup_write,
4581		.read = mem_cgroup_read,
4582	},
4583	{
4584		.name = "failcnt",
4585		.private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
4586		.trigger = mem_cgroup_reset,
4587		.read = mem_cgroup_read,
4588	},
4589	{
4590		.name = "stat",
4591		.read_seq_string = mem_control_stat_show,
4592	},
4593	{
4594		.name = "force_empty",
4595		.trigger = mem_cgroup_force_empty_write,
4596	},
4597	{
4598		.name = "use_hierarchy",
4599		.write_u64 = mem_cgroup_hierarchy_write,
4600		.read_u64 = mem_cgroup_hierarchy_read,
4601	},
4602	{
 
 
 
 
 
4603		.name = "swappiness",
4604		.read_u64 = mem_cgroup_swappiness_read,
4605		.write_u64 = mem_cgroup_swappiness_write,
4606	},
4607	{
4608		.name = "move_charge_at_immigrate",
4609		.read_u64 = mem_cgroup_move_charge_read,
4610		.write_u64 = mem_cgroup_move_charge_write,
4611	},
4612	{
4613		.name = "oom_control",
4614		.read_map = mem_cgroup_oom_control_read,
4615		.write_u64 = mem_cgroup_oom_control_write,
4616		.register_event = mem_cgroup_oom_register_event,
4617		.unregister_event = mem_cgroup_oom_unregister_event,
4618		.private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
4619	},
 
 
 
4620#ifdef CONFIG_NUMA
4621	{
4622		.name = "numa_stat",
4623		.read_seq_string = mem_control_numa_stat_show,
4624	},
4625#endif
4626#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
4627	{
4628		.name = "memsw.usage_in_bytes",
4629		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
4630		.read = mem_cgroup_read,
4631		.register_event = mem_cgroup_usage_register_event,
4632		.unregister_event = mem_cgroup_usage_unregister_event,
 
 
 
 
4633	},
4634	{
4635		.name = "memsw.max_usage_in_bytes",
4636		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
4637		.trigger = mem_cgroup_reset,
4638		.read = mem_cgroup_read,
4639	},
4640	{
4641		.name = "memsw.limit_in_bytes",
4642		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
4643		.write_string = mem_cgroup_write,
4644		.read = mem_cgroup_read,
4645	},
 
4646	{
4647		.name = "memsw.failcnt",
4648		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
4649		.trigger = mem_cgroup_reset,
4650		.read = mem_cgroup_read,
 
4651	},
4652#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4653	{ },	/* terminate */
4654};
4655
4656static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
4657{
4658	struct mem_cgroup_per_node *pn;
4659	struct mem_cgroup_per_zone *mz;
4660	int zone, tmp = node;
4661	/*
4662	 * This routine is called against possible nodes.
4663	 * But it's BUG to call kmalloc() against offline node.
4664	 *
4665	 * TODO: this routine can waste much memory for nodes which will
4666	 *       never be onlined. It's better to use memory hotplug callback
4667	 *       function.
4668	 */
4669	if (!node_state(node, N_NORMAL_MEMORY))
4670		tmp = -1;
4671	pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
4672	if (!pn)
4673		return 1;
4674
4675	for (zone = 0; zone < MAX_NR_ZONES; zone++) {
4676		mz = &pn->zoneinfo[zone];
4677		lruvec_init(&mz->lruvec, &NODE_DATA(node)->node_zones[zone]);
4678		mz->usage_in_excess = 0;
4679		mz->on_tree = false;
4680		mz->memcg = memcg;
4681	}
4682	memcg->info.nodeinfo[node] = pn;
4683	return 0;
4684}
4685
4686static void free_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
4687{
4688	kfree(memcg->info.nodeinfo[node]);
 
 
 
 
 
 
 
 
 
 
 
4689}
4690
4691static struct mem_cgroup *mem_cgroup_alloc(void)
4692{
4693	struct mem_cgroup *memcg;
4694	int size = sizeof(struct mem_cgroup);
 
4695
4696	/* Can be very big if MAX_NUMNODES is very big */
4697	if (size < PAGE_SIZE)
4698		memcg = kzalloc(size, GFP_KERNEL);
4699	else
4700		memcg = vzalloc(size);
4701
 
4702	if (!memcg)
4703		return NULL;
4704
4705	memcg->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
4706	if (!memcg->stat)
4707		goto out_free;
4708	spin_lock_init(&memcg->pcp_counter_lock);
4709	return memcg;
4710
4711out_free:
4712	if (size < PAGE_SIZE)
4713		kfree(memcg);
4714	else
4715		vfree(memcg);
4716	return NULL;
4717}
4718
4719/*
4720 * Helpers for freeing a kmalloc()ed/vzalloc()ed mem_cgroup by RCU,
4721 * but in process context.  The work_freeing structure is overlaid
4722 * on the rcu_freeing structure, which itself is overlaid on memsw.
4723 */
4724static void free_work(struct work_struct *work)
4725{
4726	struct mem_cgroup *memcg;
4727	int size = sizeof(struct mem_cgroup);
4728
4729	memcg = container_of(work, struct mem_cgroup, work_freeing);
4730	/*
4731	 * We need to make sure that (at least for now), the jump label
4732	 * destruction code runs outside of the cgroup lock. This is because
4733	 * get_online_cpus(), which is called from the static_branch update,
4734	 * can't be called inside the cgroup_lock. cpusets are the ones
4735	 * enforcing this dependency, so if they ever change, we might as well.
4736	 *
4737	 * schedule_work() will guarantee this happens. Be careful if you need
4738	 * to move this code around, and make sure it is outside
4739	 * the cgroup_lock.
4740	 */
4741	disarm_sock_keys(memcg);
4742	if (size < PAGE_SIZE)
4743		kfree(memcg);
4744	else
4745		vfree(memcg);
 
 
4746}
4747
4748static void free_rcu(struct rcu_head *rcu_head)
 
4749{
 
4750	struct mem_cgroup *memcg;
 
4751
4752	memcg = container_of(rcu_head, struct mem_cgroup, rcu_freeing);
4753	INIT_WORK(&memcg->work_freeing, free_work);
4754	schedule_work(&memcg->work_freeing);
4755}
4756
4757/*
4758 * At destroying mem_cgroup, references from swap_cgroup can remain.
4759 * (scanning all at force_empty is too costly...)
4760 *
4761 * Instead of clearing all references at force_empty, we remember
4762 * the number of reference from swap_cgroup and free mem_cgroup when
4763 * it goes down to 0.
4764 *
4765 * Removal of cgroup itself succeeds regardless of refs from swap.
4766 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4767
4768static void __mem_cgroup_free(struct mem_cgroup *memcg)
4769{
4770	int node;
 
 
4771
4772	mem_cgroup_remove_from_trees(memcg);
4773	free_css_id(&mem_cgroup_subsys, &memcg->css);
 
4774
4775	for_each_node(node)
4776		free_mem_cgroup_per_zone_info(memcg, node);
4777
4778	free_percpu(memcg->stat);
4779	call_rcu(&memcg->rcu_freeing, free_rcu);
 
 
4780}
4781
4782static void mem_cgroup_get(struct mem_cgroup *memcg)
 
4783{
4784	atomic_inc(&memcg->refcnt);
4785}
4786
4787static void __mem_cgroup_put(struct mem_cgroup *memcg, int count)
4788{
4789	if (atomic_sub_and_test(count, &memcg->refcnt)) {
4790		struct mem_cgroup *parent = parent_mem_cgroup(memcg);
4791		__mem_cgroup_free(memcg);
4792		if (parent)
4793			mem_cgroup_put(parent);
4794	}
4795}
4796
4797static void mem_cgroup_put(struct mem_cgroup *memcg)
4798{
4799	__mem_cgroup_put(memcg, 1);
4800}
4801
4802/*
4803 * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled.
4804 */
4805struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
4806{
4807	if (!memcg->res.parent)
4808		return NULL;
4809	return mem_cgroup_from_res_counter(memcg->res.parent, res);
4810}
4811EXPORT_SYMBOL(parent_mem_cgroup);
 
4812
4813#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
4814static void __init enable_swap_cgroup(void)
4815{
4816	if (!mem_cgroup_disabled() && really_do_swap_account)
4817		do_swap_account = 1;
4818}
4819#else
4820static void __init enable_swap_cgroup(void)
4821{
4822}
4823#endif
4824
4825static int mem_cgroup_soft_limit_tree_init(void)
4826{
4827	struct mem_cgroup_tree_per_node *rtpn;
4828	struct mem_cgroup_tree_per_zone *rtpz;
4829	int tmp, node, zone;
4830
4831	for_each_node(node) {
4832		tmp = node;
4833		if (!node_state(node, N_NORMAL_MEMORY))
4834			tmp = -1;
4835		rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, tmp);
4836		if (!rtpn)
4837			goto err_cleanup;
4838
4839		soft_limit_tree.rb_tree_per_node[node] = rtpn;
4840
4841		for (zone = 0; zone < MAX_NR_ZONES; zone++) {
4842			rtpz = &rtpn->rb_tree_per_zone[zone];
4843			rtpz->rb_root = RB_ROOT;
4844			spin_lock_init(&rtpz->lock);
4845		}
4846	}
4847	return 0;
4848
4849err_cleanup:
4850	for_each_node(node) {
4851		if (!soft_limit_tree.rb_tree_per_node[node])
4852			break;
4853		kfree(soft_limit_tree.rb_tree_per_node[node]);
4854		soft_limit_tree.rb_tree_per_node[node] = NULL;
4855	}
4856	return 1;
4857
 
4858}
4859
4860static struct cgroup_subsys_state * __ref
4861mem_cgroup_create(struct cgroup *cont)
4862{
4863	struct mem_cgroup *memcg, *parent;
4864	long error = -ENOMEM;
4865	int node;
4866
4867	memcg = mem_cgroup_alloc();
4868	if (!memcg)
4869		return ERR_PTR(error);
4870
4871	for_each_node(node)
4872		if (alloc_mem_cgroup_per_zone_info(memcg, node))
4873			goto free_out;
4874
4875	/* root ? */
4876	if (cont->parent == NULL) {
4877		int cpu;
4878		enable_swap_cgroup();
4879		parent = NULL;
4880		if (mem_cgroup_soft_limit_tree_init())
4881			goto free_out;
4882		root_mem_cgroup = memcg;
4883		for_each_possible_cpu(cpu) {
4884			struct memcg_stock_pcp *stock =
4885						&per_cpu(memcg_stock, cpu);
4886			INIT_WORK(&stock->work, drain_local_stock);
4887		}
4888		hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
4889	} else {
4890		parent = mem_cgroup_from_cont(cont->parent);
4891		memcg->use_hierarchy = parent->use_hierarchy;
4892		memcg->oom_kill_disable = parent->oom_kill_disable;
4893	}
4894
4895	if (parent && parent->use_hierarchy) {
4896		res_counter_init(&memcg->res, &parent->res);
4897		res_counter_init(&memcg->memsw, &parent->memsw);
4898		/*
4899		 * We increment refcnt of the parent to ensure that we can
4900		 * safely access it on res_counter_charge/uncharge.
4901		 * This refcnt will be decremented when freeing this
4902		 * mem_cgroup(see mem_cgroup_put).
4903		 */
4904		mem_cgroup_get(parent);
4905	} else {
4906		res_counter_init(&memcg->res, NULL);
4907		res_counter_init(&memcg->memsw, NULL);
4908	}
4909	memcg->last_scanned_node = MAX_NUMNODES;
4910	INIT_LIST_HEAD(&memcg->oom_notify);
4911
4912	if (parent)
4913		memcg->swappiness = mem_cgroup_swappiness(parent);
4914	atomic_set(&memcg->refcnt, 1);
4915	memcg->move_charge_at_immigrate = 0;
4916	mutex_init(&memcg->thresholds_lock);
4917	spin_lock_init(&memcg->move_lock);
4918
4919	error = memcg_init_kmem(memcg, &mem_cgroup_subsys);
4920	if (error) {
4921		/*
4922		 * We call put now because our (and parent's) refcnts
4923		 * are already in place. mem_cgroup_put() will internally
4924		 * call __mem_cgroup_free, so return directly
4925		 */
4926		mem_cgroup_put(memcg);
4927		return ERR_PTR(error);
4928	}
4929	return &memcg->css;
4930free_out:
4931	__mem_cgroup_free(memcg);
4932	return ERR_PTR(error);
4933}
4934
4935static int mem_cgroup_pre_destroy(struct cgroup *cont)
 
 
 
 
 
 
 
 
 
 
 
 
 
4936{
4937	struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
4938
4939	return mem_cgroup_force_empty(memcg, false);
4940}
4941
4942static void mem_cgroup_destroy(struct cgroup *cont)
4943{
4944	struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
4945
4946	kmem_cgroup_destroy(memcg);
4947
4948	mem_cgroup_put(memcg);
4949}
4950
4951#ifdef CONFIG_MMU
4952/* Handlers for move charge at task migration. */
4953#define PRECHARGE_COUNT_AT_ONCE	256
4954static int mem_cgroup_do_precharge(unsigned long count)
4955{
4956	int ret = 0;
4957	int batch_count = PRECHARGE_COUNT_AT_ONCE;
4958	struct mem_cgroup *memcg = mc.to;
4959
4960	if (mem_cgroup_is_root(memcg)) {
 
 
4961		mc.precharge += count;
4962		/* we don't need css_get for root */
4963		return ret;
4964	}
4965	/* try to charge at once */
4966	if (count > 1) {
4967		struct res_counter *dummy;
4968		/*
4969		 * "memcg" cannot be under rmdir() because we've already checked
4970		 * by cgroup_lock_live_cgroup() that it is not removed and we
4971		 * are still under the same cgroup_mutex. So we can postpone
4972		 * css_get().
4973		 */
4974		if (res_counter_charge(&memcg->res, PAGE_SIZE * count, &dummy))
4975			goto one_by_one;
4976		if (do_swap_account && res_counter_charge(&memcg->memsw,
4977						PAGE_SIZE * count, &dummy)) {
4978			res_counter_uncharge(&memcg->res, PAGE_SIZE * count);
4979			goto one_by_one;
4980		}
4981		mc.precharge += count;
4982		return ret;
4983	}
4984one_by_one:
4985	/* fall back to one by one charge */
4986	while (count--) {
4987		if (signal_pending(current)) {
4988			ret = -EINTR;
4989			break;
4990		}
4991		if (!batch_count--) {
4992			batch_count = PRECHARGE_COUNT_AT_ONCE;
4993			cond_resched();
4994		}
4995		ret = __mem_cgroup_try_charge(NULL,
4996					GFP_KERNEL, 1, &memcg, false);
4997		if (ret)
4998			/* mem_cgroup_clear_mc() will do uncharge later */
4999			return ret;
5000		mc.precharge++;
 
5001	}
5002	return ret;
5003}
5004
5005/**
5006 * get_mctgt_type - get target type of moving charge
5007 * @vma: the vma the pte to be checked belongs
5008 * @addr: the address corresponding to the pte to be checked
5009 * @ptent: the pte to be checked
5010 * @target: the pointer the target page or swap ent will be stored(can be NULL)
5011 *
5012 * Returns
5013 *   0(MC_TARGET_NONE): if the pte is not a target for move charge.
5014 *   1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
5015 *     move charge. if @target is not NULL, the page is stored in target->page
5016 *     with extra refcnt got(Callers should handle it).
5017 *   2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
5018 *     target for charge migration. if @target is not NULL, the entry is stored
5019 *     in target->ent.
5020 *
5021 * Called with pte lock held.
5022 */
5023union mc_target {
5024	struct page	*page;
5025	swp_entry_t	ent;
5026};
5027
5028enum mc_target_type {
5029	MC_TARGET_NONE = 0,
5030	MC_TARGET_PAGE,
5031	MC_TARGET_SWAP,
5032};
5033
5034static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
5035						unsigned long addr, pte_t ptent)
5036{
5037	struct page *page = vm_normal_page(vma, addr, ptent);
5038
5039	if (!page || !page_mapped(page))
5040		return NULL;
5041	if (PageAnon(page)) {
5042		/* we don't move shared anon */
5043		if (!move_anon())
 
 
5044			return NULL;
5045	} else if (!move_file())
5046		/* we ignore mapcount for file pages */
5047		return NULL;
5048	if (!get_page_unless_zero(page))
5049		return NULL;
5050
5051	return page;
5052}
5053
5054#ifdef CONFIG_SWAP
5055static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5056			unsigned long addr, pte_t ptent, swp_entry_t *entry)
5057{
5058	struct page *page = NULL;
5059	swp_entry_t ent = pte_to_swp_entry(ptent);
5060
5061	if (!move_anon() || non_swap_entry(ent))
5062		return NULL;
5063	/*
5064	 * Because lookup_swap_cache() updates some statistics counter,
5065	 * we call find_get_page() with swapper_space directly.
5066	 */
5067	page = find_get_page(&swapper_space, ent.val);
5068	if (do_swap_account)
5069		entry->val = ent.val;
5070
5071	return page;
5072}
5073#else
5074static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5075			unsigned long addr, pte_t ptent, swp_entry_t *entry)
5076{
5077	return NULL;
5078}
5079#endif
5080
5081static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
5082			unsigned long addr, pte_t ptent, swp_entry_t *entry)
5083{
5084	struct page *page = NULL;
5085	struct address_space *mapping;
5086	pgoff_t pgoff;
5087
5088	if (!vma->vm_file) /* anonymous vma */
5089		return NULL;
5090	if (!move_file())
5091		return NULL;
5092
5093	mapping = vma->vm_file->f_mapping;
5094	if (pte_none(ptent))
5095		pgoff = linear_page_index(vma, addr);
5096	else /* pte_file(ptent) is true */
5097		pgoff = pte_to_pgoff(ptent);
5098
5099	/* page is moved even if it's not RSS of this task(page-faulted). */
5100	page = find_get_page(mapping, pgoff);
5101
5102#ifdef CONFIG_SWAP
5103	/* shmem/tmpfs may report page out on swap: account for that too. */
5104	if (radix_tree_exceptional_entry(page)) {
5105		swp_entry_t swap = radix_to_swp_entry(page);
5106		if (do_swap_account)
5107			*entry = swap;
5108		page = find_get_page(&swapper_space, swap.val);
5109	}
 
 
 
 
 
 
5110#endif
5111	return page;
5112}
5113
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5114static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
5115		unsigned long addr, pte_t ptent, union mc_target *target)
5116{
5117	struct page *page = NULL;
5118	struct page_cgroup *pc;
5119	enum mc_target_type ret = MC_TARGET_NONE;
5120	swp_entry_t ent = { .val = 0 };
5121
5122	if (pte_present(ptent))
5123		page = mc_handle_present_pte(vma, addr, ptent);
5124	else if (is_swap_pte(ptent))
5125		page = mc_handle_swap_pte(vma, addr, ptent, &ent);
5126	else if (pte_none(ptent) || pte_file(ptent))
5127		page = mc_handle_file_pte(vma, addr, ptent, &ent);
5128
5129	if (!page && !ent.val)
5130		return ret;
5131	if (page) {
5132		pc = lookup_page_cgroup(page);
5133		/*
5134		 * Do only loose check w/o page_cgroup lock.
5135		 * mem_cgroup_move_account() checks the pc is valid or not under
5136		 * the lock.
5137		 */
5138		if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) {
5139			ret = MC_TARGET_PAGE;
5140			if (target)
5141				target->page = page;
5142		}
5143		if (!ret || !target)
5144			put_page(page);
5145	}
5146	/* There is a swap entry and a page doesn't exist or isn't charged */
5147	if (ent.val && !ret &&
5148			css_id(&mc.from->css) == lookup_swap_cgroup_id(ent)) {
5149		ret = MC_TARGET_SWAP;
5150		if (target)
5151			target->ent = ent;
5152	}
5153	return ret;
5154}
5155
5156#ifdef CONFIG_TRANSPARENT_HUGEPAGE
5157/*
5158 * We don't consider swapping or file mapped pages because THP does not
5159 * support them for now.
5160 * Caller should make sure that pmd_trans_huge(pmd) is true.
5161 */
5162static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
5163		unsigned long addr, pmd_t pmd, union mc_target *target)
5164{
5165	struct page *page = NULL;
5166	struct page_cgroup *pc;
5167	enum mc_target_type ret = MC_TARGET_NONE;
5168
5169	page = pmd_page(pmd);
5170	VM_BUG_ON(!page || !PageHead(page));
5171	if (!move_anon())
5172		return ret;
5173	pc = lookup_page_cgroup(page);
5174	if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) {
5175		ret = MC_TARGET_PAGE;
5176		if (target) {
5177			get_page(page);
5178			target->page = page;
5179		}
5180	}
5181	return ret;
5182}
5183#else
5184static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
5185		unsigned long addr, pmd_t pmd, union mc_target *target)
5186{
5187	return MC_TARGET_NONE;
5188}
5189#endif
5190
5191static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
5192					unsigned long addr, unsigned long end,
5193					struct mm_walk *walk)
5194{
5195	struct vm_area_struct *vma = walk->private;
5196	pte_t *pte;
5197	spinlock_t *ptl;
5198
5199	if (pmd_trans_huge_lock(pmd, vma) == 1) {
 
5200		if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
5201			mc.precharge += HPAGE_PMD_NR;
5202		spin_unlock(&vma->vm_mm->page_table_lock);
5203		return 0;
5204	}
5205
5206	if (pmd_trans_unstable(pmd))
5207		return 0;
5208	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
5209	for (; addr != end; pte++, addr += PAGE_SIZE)
5210		if (get_mctgt_type(vma, addr, *pte, NULL))
5211			mc.precharge++;	/* increment precharge temporarily */
5212	pte_unmap_unlock(pte - 1, ptl);
5213	cond_resched();
5214
5215	return 0;
5216}
5217
5218static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
5219{
5220	unsigned long precharge;
5221	struct vm_area_struct *vma;
5222
 
 
 
 
5223	down_read(&mm->mmap_sem);
5224	for (vma = mm->mmap; vma; vma = vma->vm_next) {
5225		struct mm_walk mem_cgroup_count_precharge_walk = {
5226			.pmd_entry = mem_cgroup_count_precharge_pte_range,
5227			.mm = mm,
5228			.private = vma,
5229		};
5230		if (is_vm_hugetlb_page(vma))
5231			continue;
5232		walk_page_range(vma->vm_start, vma->vm_end,
5233					&mem_cgroup_count_precharge_walk);
5234	}
5235	up_read(&mm->mmap_sem);
5236
5237	precharge = mc.precharge;
5238	mc.precharge = 0;
5239
5240	return precharge;
5241}
5242
5243static int mem_cgroup_precharge_mc(struct mm_struct *mm)
5244{
5245	unsigned long precharge = mem_cgroup_count_precharge(mm);
5246
5247	VM_BUG_ON(mc.moving_task);
5248	mc.moving_task = current;
5249	return mem_cgroup_do_precharge(precharge);
5250}
5251
5252/* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
5253static void __mem_cgroup_clear_mc(void)
5254{
5255	struct mem_cgroup *from = mc.from;
5256	struct mem_cgroup *to = mc.to;
5257
5258	/* we must uncharge all the leftover precharges from mc.to */
5259	if (mc.precharge) {
5260		__mem_cgroup_cancel_charge(mc.to, mc.precharge);
5261		mc.precharge = 0;
5262	}
5263	/*
5264	 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
5265	 * we must uncharge here.
5266	 */
5267	if (mc.moved_charge) {
5268		__mem_cgroup_cancel_charge(mc.from, mc.moved_charge);
5269		mc.moved_charge = 0;
5270	}
5271	/* we must fixup refcnts and charges */
5272	if (mc.moved_swap) {
5273		/* uncharge swap account from the old cgroup */
5274		if (!mem_cgroup_is_root(mc.from))
5275			res_counter_uncharge(&mc.from->memsw,
5276						PAGE_SIZE * mc.moved_swap);
5277		__mem_cgroup_put(mc.from, mc.moved_swap);
 
 
 
 
 
5278
5279		if (!mem_cgroup_is_root(mc.to)) {
5280			/*
5281			 * we charged both to->res and to->memsw, so we should
5282			 * uncharge to->res.
5283			 */
5284			res_counter_uncharge(&mc.to->res,
5285						PAGE_SIZE * mc.moved_swap);
5286		}
5287		/* we've already done mem_cgroup_get(mc.to) */
5288		mc.moved_swap = 0;
5289	}
5290	memcg_oom_recover(from);
5291	memcg_oom_recover(to);
5292	wake_up_all(&mc.waitq);
5293}
5294
5295static void mem_cgroup_clear_mc(void)
5296{
5297	struct mem_cgroup *from = mc.from;
5298
5299	/*
5300	 * we must clear moving_task before waking up waiters at the end of
5301	 * task migration.
5302	 */
5303	mc.moving_task = NULL;
5304	__mem_cgroup_clear_mc();
5305	spin_lock(&mc.lock);
5306	mc.from = NULL;
5307	mc.to = NULL;
 
5308	spin_unlock(&mc.lock);
5309	mem_cgroup_end_move(from);
 
5310}
5311
5312static int mem_cgroup_can_attach(struct cgroup *cgroup,
5313				 struct cgroup_taskset *tset)
5314{
5315	struct task_struct *p = cgroup_taskset_first(tset);
 
 
 
 
 
5316	int ret = 0;
5317	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgroup);
5318
5319	if (memcg->move_charge_at_immigrate) {
5320		struct mm_struct *mm;
5321		struct mem_cgroup *from = mem_cgroup_from_task(p);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5322
5323		VM_BUG_ON(from == memcg);
5324
5325		mm = get_task_mm(p);
5326		if (!mm)
5327			return 0;
5328		/* We move charges only when we move a owner of the mm */
5329		if (mm->owner == p) {
5330			VM_BUG_ON(mc.from);
5331			VM_BUG_ON(mc.to);
5332			VM_BUG_ON(mc.precharge);
5333			VM_BUG_ON(mc.moved_charge);
5334			VM_BUG_ON(mc.moved_swap);
5335			mem_cgroup_start_move(from);
5336			spin_lock(&mc.lock);
5337			mc.from = from;
5338			mc.to = memcg;
5339			spin_unlock(&mc.lock);
5340			/* We set mc.moving_task later */
5341
5342			ret = mem_cgroup_precharge_mc(mm);
5343			if (ret)
5344				mem_cgroup_clear_mc();
5345		}
 
 
5346		mmput(mm);
5347	}
5348	return ret;
5349}
5350
5351static void mem_cgroup_cancel_attach(struct cgroup *cgroup,
5352				     struct cgroup_taskset *tset)
5353{
5354	mem_cgroup_clear_mc();
 
5355}
5356
5357static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
5358				unsigned long addr, unsigned long end,
5359				struct mm_walk *walk)
5360{
5361	int ret = 0;
5362	struct vm_area_struct *vma = walk->private;
5363	pte_t *pte;
5364	spinlock_t *ptl;
5365	enum mc_target_type target_type;
5366	union mc_target target;
5367	struct page *page;
5368	struct page_cgroup *pc;
5369
5370	/*
5371	 * We don't take compound_lock() here but no race with splitting thp
5372	 * happens because:
5373	 *  - if pmd_trans_huge_lock() returns 1, the relevant thp is not
5374	 *    under splitting, which means there's no concurrent thp split,
5375	 *  - if another thread runs into split_huge_page() just after we
5376	 *    entered this if-block, the thread must wait for page table lock
5377	 *    to be unlocked in __split_huge_page_splitting(), where the main
5378	 *    part of thp split is not executed yet.
5379	 */
5380	if (pmd_trans_huge_lock(pmd, vma) == 1) {
5381		if (mc.precharge < HPAGE_PMD_NR) {
5382			spin_unlock(&vma->vm_mm->page_table_lock);
5383			return 0;
5384		}
5385		target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
5386		if (target_type == MC_TARGET_PAGE) {
5387			page = target.page;
5388			if (!isolate_lru_page(page)) {
5389				pc = lookup_page_cgroup(page);
5390				if (!mem_cgroup_move_account(page, HPAGE_PMD_NR,
5391							pc, mc.from, mc.to)) {
5392					mc.precharge -= HPAGE_PMD_NR;
5393					mc.moved_charge += HPAGE_PMD_NR;
5394				}
5395				putback_lru_page(page);
5396			}
5397			put_page(page);
5398		}
5399		spin_unlock(&vma->vm_mm->page_table_lock);
5400		return 0;
5401	}
5402
5403	if (pmd_trans_unstable(pmd))
5404		return 0;
5405retry:
5406	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
5407	for (; addr != end; addr += PAGE_SIZE) {
5408		pte_t ptent = *(pte++);
5409		swp_entry_t ent;
5410
5411		if (!mc.precharge)
5412			break;
5413
5414		switch (get_mctgt_type(vma, addr, ptent, &target)) {
5415		case MC_TARGET_PAGE:
5416			page = target.page;
 
 
 
 
 
 
 
 
5417			if (isolate_lru_page(page))
5418				goto put;
5419			pc = lookup_page_cgroup(page);
5420			if (!mem_cgroup_move_account(page, 1, pc,
5421						     mc.from, mc.to)) {
5422				mc.precharge--;
5423				/* we uncharge from mc.from later. */
5424				mc.moved_charge++;
5425			}
5426			putback_lru_page(page);
5427put:			/* get_mctgt_type() gets the page */
5428			put_page(page);
5429			break;
5430		case MC_TARGET_SWAP:
5431			ent = target.ent;
5432			if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
5433				mc.precharge--;
5434				/* we fixup refcnts and charges later. */
5435				mc.moved_swap++;
5436			}
5437			break;
5438		default:
5439			break;
5440		}
5441	}
5442	pte_unmap_unlock(pte - 1, ptl);
5443	cond_resched();
5444
5445	if (addr != end) {
5446		/*
5447		 * We have consumed all precharges we got in can_attach().
5448		 * We try charge one by one, but don't do any additional
5449		 * charges to mc.to if we have failed in charge once in attach()
5450		 * phase.
5451		 */
5452		ret = mem_cgroup_do_precharge(1);
5453		if (!ret)
5454			goto retry;
5455	}
5456
5457	return ret;
5458}
5459
5460static void mem_cgroup_move_charge(struct mm_struct *mm)
5461{
5462	struct vm_area_struct *vma;
 
 
 
5463
5464	lru_add_drain_all();
 
 
 
 
 
 
 
5465retry:
5466	if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
5467		/*
5468		 * Someone who are holding the mmap_sem might be waiting in
5469		 * waitq. So we cancel all extra charges, wake up all waiters,
5470		 * and retry. Because we cancel precharges, we might not be able
5471		 * to move enough charges, but moving charge is a best-effort
5472		 * feature anyway, so it wouldn't be a big problem.
5473		 */
5474		__mem_cgroup_clear_mc();
5475		cond_resched();
5476		goto retry;
5477	}
5478	for (vma = mm->mmap; vma; vma = vma->vm_next) {
5479		int ret;
5480		struct mm_walk mem_cgroup_move_charge_walk = {
5481			.pmd_entry = mem_cgroup_move_charge_pte_range,
5482			.mm = mm,
5483			.private = vma,
5484		};
5485		if (is_vm_hugetlb_page(vma))
5486			continue;
5487		ret = walk_page_range(vma->vm_start, vma->vm_end,
5488						&mem_cgroup_move_charge_walk);
5489		if (ret)
5490			/*
5491			 * means we have consumed all precharges and failed in
5492			 * doing additional charge. Just abandon here.
5493			 */
5494			break;
5495	}
5496	up_read(&mm->mmap_sem);
5497}
5498
5499static void mem_cgroup_move_task(struct cgroup *cont,
5500				 struct cgroup_taskset *tset)
5501{
5502	struct task_struct *p = cgroup_taskset_first(tset);
5503	struct mm_struct *mm = get_task_mm(p);
5504
5505	if (mm) {
5506		if (mc.to)
5507			mem_cgroup_move_charge(mm);
5508		mmput(mm);
5509	}
5510	if (mc.to)
5511		mem_cgroup_clear_mc();
5512}
5513#else	/* !CONFIG_MMU */
5514static int mem_cgroup_can_attach(struct cgroup *cgroup,
5515				 struct cgroup_taskset *tset)
5516{
5517	return 0;
5518}
5519static void mem_cgroup_cancel_attach(struct cgroup *cgroup,
5520				     struct cgroup_taskset *tset)
5521{
5522}
5523static void mem_cgroup_move_task(struct cgroup *cont,
5524				 struct cgroup_taskset *tset)
5525{
5526}
5527#endif
5528
5529struct cgroup_subsys mem_cgroup_subsys = {
5530	.name = "memory",
5531	.subsys_id = mem_cgroup_subsys_id,
5532	.create = mem_cgroup_create,
5533	.pre_destroy = mem_cgroup_pre_destroy,
5534	.destroy = mem_cgroup_destroy,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5535	.can_attach = mem_cgroup_can_attach,
5536	.cancel_attach = mem_cgroup_cancel_attach,
5537	.attach = mem_cgroup_move_task,
5538	.base_cftypes = mem_cgroup_files,
 
 
5539	.early_init = 0,
5540	.use_id = 1,
5541	.__DEPRECATED_clear_css_refs = true,
5542};
5543
5544#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5545static int __init enable_swap_account(char *s)
5546{
5547	/* consider enabled if no parameter or 1 is given */
5548	if (!strcmp(s, "1"))
5549		really_do_swap_account = 1;
5550	else if (!strcmp(s, "0"))
5551		really_do_swap_account = 0;
5552	return 1;
5553}
5554__setup("swapaccount=", enable_swap_account);
5555
5556#endif