Linux Audio

Check our new training course

Loading...
v5.9
   1/* SPDX-License-Identifier: GPL-2.0-or-later */
   2/* memcontrol.h - Memory Controller
   3 *
   4 * Copyright IBM Corporation, 2007
   5 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
   6 *
   7 * Copyright 2007 OpenVZ SWsoft Inc
   8 * Author: Pavel Emelianov <xemul@openvz.org>
 
 
 
 
 
 
 
 
 
 
   9 */
  10
  11#ifndef _LINUX_MEMCONTROL_H
  12#define _LINUX_MEMCONTROL_H
  13#include <linux/cgroup.h>
  14#include <linux/vm_event_item.h>
  15#include <linux/hardirq.h>
  16#include <linux/jump_label.h>
  17#include <linux/page_counter.h>
  18#include <linux/vmpressure.h>
  19#include <linux/eventfd.h>
  20#include <linux/mm.h>
  21#include <linux/vmstat.h>
  22#include <linux/writeback.h>
  23#include <linux/page-flags.h>
  24
  25struct mem_cgroup;
  26struct obj_cgroup;
  27struct page;
  28struct mm_struct;
  29struct kmem_cache;
  30
  31/* Cgroup-specific page state, on top of universal node page state */
  32enum memcg_stat_item {
  33	MEMCG_SWAP = NR_VM_NODE_STAT_ITEMS,
  34	MEMCG_SOCK,
  35	MEMCG_PERCPU_B,
  36	MEMCG_NR_STAT,
  37};
  38
  39enum memcg_memory_event {
  40	MEMCG_LOW,
  41	MEMCG_HIGH,
  42	MEMCG_MAX,
  43	MEMCG_OOM,
  44	MEMCG_OOM_KILL,
  45	MEMCG_SWAP_HIGH,
  46	MEMCG_SWAP_MAX,
  47	MEMCG_SWAP_FAIL,
  48	MEMCG_NR_MEMORY_EVENTS,
  49};
  50
  51struct mem_cgroup_reclaim_cookie {
  52	pg_data_t *pgdat;
 
  53	unsigned int generation;
  54};
  55
  56#ifdef CONFIG_MEMCG
  57
  58#define MEM_CGROUP_ID_SHIFT	16
  59#define MEM_CGROUP_ID_MAX	USHRT_MAX
  60
  61struct mem_cgroup_id {
  62	int id;
  63	refcount_t ref;
  64};
  65
  66/*
  67 * Per memcg event counter is incremented at every pagein/pageout. With THP,
  68 * it will be incremented by the number of pages. This counter is used
  69 * to trigger some periodic events. This is straightforward and better
  70 * than using jiffies etc. to handle periodic memcg event.
  71 */
  72enum mem_cgroup_events_target {
  73	MEM_CGROUP_TARGET_THRESH,
  74	MEM_CGROUP_TARGET_SOFTLIMIT,
  75	MEM_CGROUP_NTARGETS,
  76};
  77
  78struct memcg_vmstats_percpu {
  79	long stat[MEMCG_NR_STAT];
  80	unsigned long events[NR_VM_EVENT_ITEMS];
  81	unsigned long nr_page_events;
  82	unsigned long targets[MEM_CGROUP_NTARGETS];
  83};
  84
  85struct mem_cgroup_reclaim_iter {
  86	struct mem_cgroup *position;
  87	/* scan generation, increased every round-trip */
  88	unsigned int generation;
  89};
  90
  91struct lruvec_stat {
  92	long count[NR_VM_NODE_STAT_ITEMS];
  93};
  94
  95/*
  96 * Bitmap of shrinker::id corresponding to memcg-aware shrinkers,
  97 * which have elements charged to this memcg.
  98 */
  99struct memcg_shrinker_map {
 100	struct rcu_head rcu;
 101	unsigned long map[];
 102};
 103
 104/*
 105 * per-node information in memory controller.
 106 */
 107struct mem_cgroup_per_node {
 108	struct lruvec		lruvec;
 109
 110	/* Legacy local VM stats */
 111	struct lruvec_stat __percpu *lruvec_stat_local;
 112
 113	/* Subtree VM stats (batched updates) */
 114	struct lruvec_stat __percpu *lruvec_stat_cpu;
 115	atomic_long_t		lruvec_stat[NR_VM_NODE_STAT_ITEMS];
 116
 117	unsigned long		lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS];
 118
 119	struct mem_cgroup_reclaim_iter	iter;
 120
 121	struct memcg_shrinker_map __rcu	*shrinker_map;
 122
 123	struct rb_node		tree_node;	/* RB tree node */
 124	unsigned long		usage_in_excess;/* Set to the value by which */
 125						/* the soft limit is exceeded*/
 126	bool			on_tree;
 127	struct mem_cgroup	*memcg;		/* Back pointer, we cannot */
 128						/* use container_of	   */
 129};
 130
 131struct mem_cgroup_threshold {
 132	struct eventfd_ctx *eventfd;
 133	unsigned long threshold;
 134};
 135
 136/* For threshold */
 137struct mem_cgroup_threshold_ary {
 138	/* An array index points to threshold just below or equal to usage. */
 139	int current_threshold;
 140	/* Size of entries[] */
 141	unsigned int size;
 142	/* Array of thresholds */
 143	struct mem_cgroup_threshold entries[];
 144};
 145
 146struct mem_cgroup_thresholds {
 147	/* Primary thresholds array */
 148	struct mem_cgroup_threshold_ary *primary;
 149	/*
 150	 * Spare threshold array.
 151	 * This is needed to make mem_cgroup_unregister_event() "never fail".
 152	 * It must be able to store at least primary->size - 1 entries.
 153	 */
 154	struct mem_cgroup_threshold_ary *spare;
 155};
 156
 157enum memcg_kmem_state {
 158	KMEM_NONE,
 159	KMEM_ALLOCATED,
 160	KMEM_ONLINE,
 161};
 162
 163#if defined(CONFIG_SMP)
 164struct memcg_padding {
 165	char x[0];
 166} ____cacheline_internodealigned_in_smp;
 167#define MEMCG_PADDING(name)      struct memcg_padding name;
 168#else
 169#define MEMCG_PADDING(name)
 170#endif
 171
 172/*
 173 * Remember four most recent foreign writebacks with dirty pages in this
 174 * cgroup.  Inode sharing is expected to be uncommon and, even if we miss
 175 * one in a given round, we're likely to catch it later if it keeps
 176 * foreign-dirtying, so a fairly low count should be enough.
 177 *
 178 * See mem_cgroup_track_foreign_dirty_slowpath() for details.
 179 */
 180#define MEMCG_CGWB_FRN_CNT	4
 181
 182struct memcg_cgwb_frn {
 183	u64 bdi_id;			/* bdi->id of the foreign inode */
 184	int memcg_id;			/* memcg->css.id of foreign inode */
 185	u64 at;				/* jiffies_64 at the time of dirtying */
 186	struct wb_completion done;	/* tracks in-flight foreign writebacks */
 187};
 188
 189/*
 190 * Bucket for arbitrarily byte-sized objects charged to a memory
 191 * cgroup. The bucket can be reparented in one piece when the cgroup
 192 * is destroyed, without having to round up the individual references
 193 * of all live memory objects in the wild.
 194 */
 195struct obj_cgroup {
 196	struct percpu_ref refcnt;
 197	struct mem_cgroup *memcg;
 198	atomic_t nr_charged_bytes;
 199	union {
 200		struct list_head list;
 201		struct rcu_head rcu;
 202	};
 203};
 204
 205/*
 206 * The memory controller data structure. The memory controller controls both
 207 * page cache and RSS per cgroup. We would eventually like to provide
 208 * statistics based on the statistics developed by Rik Van Riel for clock-pro,
 209 * to help the administrator determine what knobs to tune.
 210 */
 211struct mem_cgroup {
 212	struct cgroup_subsys_state css;
 213
 214	/* Private memcg ID. Used to ID objects that outlive the cgroup */
 215	struct mem_cgroup_id id;
 216
 217	/* Accounted resources */
 218	struct page_counter memory;
 219	struct page_counter swap;
 220
 221	/* Legacy consumer-oriented counters */
 222	struct page_counter memsw;
 223	struct page_counter kmem;
 224	struct page_counter tcpmem;
 225
 226	/* Range enforcement for interrupt charges */
 227	struct work_struct high_work;
 228
 229	unsigned long soft_limit;
 230
 231	/* vmpressure notifications */
 232	struct vmpressure vmpressure;
 233
 234	/*
 235	 * Should the accounting and control be hierarchical, per subtree?
 236	 */
 237	bool use_hierarchy;
 238
 239	/*
 240	 * Should the OOM killer kill all belonging tasks, had it kill one?
 241	 */
 242	bool oom_group;
 243
 244	/* protected by memcg_oom_lock */
 245	bool		oom_lock;
 246	int		under_oom;
 247
 248	int	swappiness;
 249	/* OOM-Killer disable */
 250	int		oom_kill_disable;
 251
 252	/* memory.events and memory.events.local */
 253	struct cgroup_file events_file;
 254	struct cgroup_file events_local_file;
 255
 256	/* handle for "memory.swap.events" */
 257	struct cgroup_file swap_events_file;
 258
 259	/* protect arrays of thresholds */
 260	struct mutex thresholds_lock;
 261
 262	/* thresholds for memory usage. RCU-protected */
 263	struct mem_cgroup_thresholds thresholds;
 264
 265	/* thresholds for mem+swap usage. RCU-protected */
 266	struct mem_cgroup_thresholds memsw_thresholds;
 267
 268	/* For oom notifier event fd */
 269	struct list_head oom_notify;
 270
 271	/*
 272	 * Should we move charges of a task when a task is moved into this
 273	 * mem_cgroup ? And what type of charges should we move ?
 274	 */
 275	unsigned long move_charge_at_immigrate;
 276	/* taken only while moving_account > 0 */
 277	spinlock_t		move_lock;
 278	unsigned long		move_lock_flags;
 279
 280	MEMCG_PADDING(_pad1_);
 281
 282	/*
 283	 * set > 0 if pages under this cgroup are moving to other cgroup.
 284	 */
 285	atomic_t		moving_account;
 286	struct task_struct	*move_lock_task;
 287
 288	/* Legacy local VM stats and events */
 289	struct memcg_vmstats_percpu __percpu *vmstats_local;
 290
 291	/* Subtree VM stats and events (batched updates) */
 292	struct memcg_vmstats_percpu __percpu *vmstats_percpu;
 293
 294	MEMCG_PADDING(_pad2_);
 295
 296	atomic_long_t		vmstats[MEMCG_NR_STAT];
 297	atomic_long_t		vmevents[NR_VM_EVENT_ITEMS];
 298
 299	/* memory.events */
 300	atomic_long_t		memory_events[MEMCG_NR_MEMORY_EVENTS];
 301	atomic_long_t		memory_events_local[MEMCG_NR_MEMORY_EVENTS];
 302
 303	unsigned long		socket_pressure;
 304
 305	/* Legacy tcp memory accounting */
 306	bool			tcpmem_active;
 307	int			tcpmem_pressure;
 308
 309#ifdef CONFIG_MEMCG_KMEM
 310        /* Index in the kmem_cache->memcg_params.memcg_caches array */
 311	int kmemcg_id;
 312	enum memcg_kmem_state kmem_state;
 313	struct obj_cgroup __rcu *objcg;
 314	struct list_head objcg_list; /* list of inherited objcgs */
 315#endif
 316
 317#ifdef CONFIG_CGROUP_WRITEBACK
 318	struct list_head cgwb_list;
 319	struct wb_domain cgwb_domain;
 320	struct memcg_cgwb_frn cgwb_frn[MEMCG_CGWB_FRN_CNT];
 321#endif
 322
 323	/* List of events which userspace want to receive */
 324	struct list_head event_list;
 325	spinlock_t event_list_lock;
 326
 327#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 328	struct deferred_split deferred_split_queue;
 329#endif
 330
 331	struct mem_cgroup_per_node *nodeinfo[0];
 332	/* WARNING: nodeinfo must be the last member here */
 333};
 334
 335/*
 336 * size of first charge trial. "32" comes from vmscan.c's magic value.
 337 * TODO: maybe necessary to use big numbers in big irons.
 338 */
 339#define MEMCG_CHARGE_BATCH 32U
 340
 341extern struct mem_cgroup *root_mem_cgroup;
 342
 343static __always_inline bool memcg_stat_item_in_bytes(int idx)
 344{
 345	if (idx == MEMCG_PERCPU_B)
 346		return true;
 347	return vmstat_item_in_bytes(idx);
 348}
 349
 350static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
 351{
 352	return (memcg == root_mem_cgroup);
 353}
 354
 355static inline bool mem_cgroup_disabled(void)
 356{
 357	return !cgroup_subsys_enabled(memory_cgrp_subsys);
 358}
 359
 360static inline unsigned long mem_cgroup_protection(struct mem_cgroup *root,
 361						  struct mem_cgroup *memcg,
 362						  bool in_low_reclaim)
 363{
 364	if (mem_cgroup_disabled())
 365		return 0;
 366
 367	/*
 368	 * There is no reclaim protection applied to a targeted reclaim.
 369	 * We are special casing this specific case here because
 370	 * mem_cgroup_protected calculation is not robust enough to keep
 371	 * the protection invariant for calculated effective values for
 372	 * parallel reclaimers with different reclaim target. This is
 373	 * especially a problem for tail memcgs (as they have pages on LRU)
 374	 * which would want to have effective values 0 for targeted reclaim
 375	 * but a different value for external reclaim.
 376	 *
 377	 * Example
 378	 * Let's have global and A's reclaim in parallel:
 379	 *  |
 380	 *  A (low=2G, usage = 3G, max = 3G, children_low_usage = 1.5G)
 381	 *  |\
 382	 *  | C (low = 1G, usage = 2.5G)
 383	 *  B (low = 1G, usage = 0.5G)
 384	 *
 385	 * For the global reclaim
 386	 * A.elow = A.low
 387	 * B.elow = min(B.usage, B.low) because children_low_usage <= A.elow
 388	 * C.elow = min(C.usage, C.low)
 389	 *
 390	 * With the effective values resetting we have A reclaim
 391	 * A.elow = 0
 392	 * B.elow = B.low
 393	 * C.elow = C.low
 394	 *
 395	 * If the global reclaim races with A's reclaim then
 396	 * B.elow = C.elow = 0 because children_low_usage > A.elow)
 397	 * is possible and reclaiming B would be violating the protection.
 398	 *
 399	 */
 400	if (root == memcg)
 401		return 0;
 402
 403	if (in_low_reclaim)
 404		return READ_ONCE(memcg->memory.emin);
 405
 406	return max(READ_ONCE(memcg->memory.emin),
 407		   READ_ONCE(memcg->memory.elow));
 408}
 409
 410void mem_cgroup_calculate_protection(struct mem_cgroup *root,
 411				     struct mem_cgroup *memcg);
 412
 413static inline bool mem_cgroup_supports_protection(struct mem_cgroup *memcg)
 414{
 415	/*
 416	 * The root memcg doesn't account charges, and doesn't support
 417	 * protection.
 418	 */
 419	return !mem_cgroup_disabled() && !mem_cgroup_is_root(memcg);
 420
 421}
 422
 423static inline bool mem_cgroup_below_low(struct mem_cgroup *memcg)
 424{
 425	if (!mem_cgroup_supports_protection(memcg))
 426		return false;
 427
 428	return READ_ONCE(memcg->memory.elow) >=
 429		page_counter_read(&memcg->memory);
 430}
 431
 432static inline bool mem_cgroup_below_min(struct mem_cgroup *memcg)
 433{
 434	if (!mem_cgroup_supports_protection(memcg))
 435		return false;
 436
 437	return READ_ONCE(memcg->memory.emin) >=
 438		page_counter_read(&memcg->memory);
 439}
 440
 441int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask);
 442
 443void mem_cgroup_uncharge(struct page *page);
 444void mem_cgroup_uncharge_list(struct list_head *page_list);
 445
 446void mem_cgroup_migrate(struct page *oldpage, struct page *newpage);
 447
 448static struct mem_cgroup_per_node *
 449mem_cgroup_nodeinfo(struct mem_cgroup *memcg, int nid)
 450{
 451	return memcg->nodeinfo[nid];
 452}
 453
 454/**
 455 * mem_cgroup_lruvec - get the lru list vector for a memcg & node
 456 * @memcg: memcg of the wanted lruvec
 457 *
 458 * Returns the lru list vector holding pages for a given @memcg &
 459 * @node combination. This can be the node lruvec, if the memory
 460 * controller is disabled.
 461 */
 462static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
 463					       struct pglist_data *pgdat)
 464{
 465	struct mem_cgroup_per_node *mz;
 466	struct lruvec *lruvec;
 467
 468	if (mem_cgroup_disabled()) {
 469		lruvec = &pgdat->__lruvec;
 470		goto out;
 471	}
 472
 473	if (!memcg)
 474		memcg = root_mem_cgroup;
 475
 476	mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
 477	lruvec = &mz->lruvec;
 478out:
 479	/*
 480	 * Since a node can be onlined after the mem_cgroup was created,
 481	 * we have to be prepared to initialize lruvec->pgdat here;
 482	 * and if offlined then reonlined, we need to reinitialize it.
 483	 */
 484	if (unlikely(lruvec->pgdat != pgdat))
 485		lruvec->pgdat = pgdat;
 486	return lruvec;
 487}
 488
 489struct lruvec *mem_cgroup_page_lruvec(struct page *, struct pglist_data *);
 490
 491struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
 492
 493struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm);
 494
 495struct mem_cgroup *get_mem_cgroup_from_page(struct page *page);
 496
 497static inline
 498struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){
 499	return css ? container_of(css, struct mem_cgroup, css) : NULL;
 500}
 501
 502static inline bool obj_cgroup_tryget(struct obj_cgroup *objcg)
 503{
 504	return percpu_ref_tryget(&objcg->refcnt);
 505}
 506
 507static inline void obj_cgroup_get(struct obj_cgroup *objcg)
 508{
 509	percpu_ref_get(&objcg->refcnt);
 510}
 511
 512static inline void obj_cgroup_put(struct obj_cgroup *objcg)
 513{
 514	percpu_ref_put(&objcg->refcnt);
 515}
 516
 517/*
 518 * After the initialization objcg->memcg is always pointing at
 519 * a valid memcg, but can be atomically swapped to the parent memcg.
 520 *
 521 * The caller must ensure that the returned memcg won't be released:
 522 * e.g. acquire the rcu_read_lock or css_set_lock.
 523 */
 524static inline struct mem_cgroup *obj_cgroup_memcg(struct obj_cgroup *objcg)
 525{
 526	return READ_ONCE(objcg->memcg);
 527}
 528
 529static inline void mem_cgroup_put(struct mem_cgroup *memcg)
 530{
 531	if (memcg)
 532		css_put(&memcg->css);
 533}
 534
 535#define mem_cgroup_from_counter(counter, member)	\
 536	container_of(counter, struct mem_cgroup, member)
 
 
 
 537
 538struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
 539				   struct mem_cgroup *,
 540				   struct mem_cgroup_reclaim_cookie *);
 541void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
 542int mem_cgroup_scan_tasks(struct mem_cgroup *,
 543			  int (*)(struct task_struct *, void *), void *);
 544
 545static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
 546{
 547	if (mem_cgroup_disabled())
 548		return 0;
 549
 550	return memcg->id.id;
 551}
 552struct mem_cgroup *mem_cgroup_from_id(unsigned short id);
 553
 554static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m)
 555{
 556	return mem_cgroup_from_css(seq_css(m));
 557}
 558
 559static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
 560{
 561	struct mem_cgroup_per_node *mz;
 562
 563	if (mem_cgroup_disabled())
 564		return NULL;
 565
 566	mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
 567	return mz->memcg;
 568}
 569
 570/**
 571 * parent_mem_cgroup - find the accounting parent of a memcg
 572 * @memcg: memcg whose parent to find
 573 *
 574 * Returns the parent memcg, or NULL if this is the root or the memory
 575 * controller is in legacy no-hierarchy mode.
 576 */
 577static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
 578{
 579	if (!memcg->memory.parent)
 580		return NULL;
 581	return mem_cgroup_from_counter(memcg->memory.parent, memory);
 582}
 583
 584static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg,
 585			      struct mem_cgroup *root)
 586{
 587	if (root == memcg)
 588		return true;
 589	if (!root->use_hierarchy)
 590		return false;
 591	return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup);
 592}
 593
 594static inline bool mm_match_cgroup(struct mm_struct *mm,
 595				   struct mem_cgroup *memcg)
 596{
 597	struct mem_cgroup *task_memcg;
 598	bool match = false;
 599
 600	rcu_read_lock();
 601	task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
 602	if (task_memcg)
 603		match = mem_cgroup_is_descendant(task_memcg, memcg);
 604	rcu_read_unlock();
 605	return match;
 606}
 607
 608struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page);
 609ino_t page_cgroup_ino(struct page *page);
 610
 611static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
 612{
 613	if (mem_cgroup_disabled())
 614		return true;
 615	return !!(memcg->css.flags & CSS_ONLINE);
 616}
 617
 618/*
 619 * For memory reclaim.
 620 */
 
 
 621int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
 
 
 
 
 
 
 622
 623void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
 624		int zid, int nr_pages);
 625
 626static inline
 627unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
 628		enum lru_list lru, int zone_idx)
 629{
 630	struct mem_cgroup_per_node *mz;
 631
 632	mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
 633	return READ_ONCE(mz->lru_zone_size[zone_idx][lru]);
 634}
 635
 636void mem_cgroup_handle_over_high(void);
 637
 638unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg);
 639
 640unsigned long mem_cgroup_size(struct mem_cgroup *memcg);
 641
 642void mem_cgroup_print_oom_context(struct mem_cgroup *memcg,
 643				struct task_struct *p);
 644
 645void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg);
 646
 647static inline void mem_cgroup_enter_user_fault(void)
 648{
 649	WARN_ON(current->in_user_fault);
 650	current->in_user_fault = 1;
 651}
 652
 653static inline void mem_cgroup_exit_user_fault(void)
 654{
 655	WARN_ON(!current->in_user_fault);
 656	current->in_user_fault = 0;
 657}
 658
 659static inline bool task_in_memcg_oom(struct task_struct *p)
 660{
 661	return p->memcg_in_oom;
 662}
 663
 664bool mem_cgroup_oom_synchronize(bool wait);
 665struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
 666					    struct mem_cgroup *oom_domain);
 667void mem_cgroup_print_oom_group(struct mem_cgroup *memcg);
 668
 669#ifdef CONFIG_MEMCG_SWAP
 670extern bool cgroup_memory_noswap;
 671#endif
 672
 673struct mem_cgroup *lock_page_memcg(struct page *page);
 674void __unlock_page_memcg(struct mem_cgroup *memcg);
 675void unlock_page_memcg(struct page *page);
 676
 677/*
 678 * idx can be of type enum memcg_stat_item or node_stat_item.
 679 * Keep in sync with memcg_exact_page_state().
 680 */
 681static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
 682{
 683	long x = atomic_long_read(&memcg->vmstats[idx]);
 684#ifdef CONFIG_SMP
 685	if (x < 0)
 686		x = 0;
 687#endif
 688	return x;
 689}
 690
 691/*
 692 * idx can be of type enum memcg_stat_item or node_stat_item.
 693 * Keep in sync with memcg_exact_page_state().
 694 */
 695static inline unsigned long memcg_page_state_local(struct mem_cgroup *memcg,
 696						   int idx)
 697{
 698	long x = 0;
 699	int cpu;
 700
 701	for_each_possible_cpu(cpu)
 702		x += per_cpu(memcg->vmstats_local->stat[idx], cpu);
 703#ifdef CONFIG_SMP
 704	if (x < 0)
 705		x = 0;
 706#endif
 707	return x;
 708}
 709
 710void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val);
 
 711
 712/* idx can be of type enum memcg_stat_item or node_stat_item */
 713static inline void mod_memcg_state(struct mem_cgroup *memcg,
 714				   int idx, int val)
 715{
 716	unsigned long flags;
 717
 718	local_irq_save(flags);
 719	__mod_memcg_state(memcg, idx, val);
 720	local_irq_restore(flags);
 721}
 722
 723/**
 724 * mod_memcg_page_state - update page state statistics
 725 * @page: the page
 726 * @idx: page state item to account
 727 * @val: number of pages (positive or negative)
 728 *
 729 * The @page must be locked or the caller must use lock_page_memcg()
 730 * to prevent double accounting when the page is concurrently being
 731 * moved to another memcg:
 732 *
 733 *   lock_page(page) or lock_page_memcg(page)
 734 *   if (TestClearPageState(page))
 735 *     mod_memcg_page_state(page, state, -1);
 736 *   unlock_page(page) or unlock_page_memcg(page)
 737 *
 738 * Kernel pages are an exception to this, since they'll never move.
 739 */
 740static inline void __mod_memcg_page_state(struct page *page,
 741					  int idx, int val)
 742{
 743	if (page->mem_cgroup)
 744		__mod_memcg_state(page->mem_cgroup, idx, val);
 745}
 746
 747static inline void mod_memcg_page_state(struct page *page,
 748					int idx, int val)
 749{
 750	if (page->mem_cgroup)
 751		mod_memcg_state(page->mem_cgroup, idx, val);
 752}
 753
 754static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
 755					      enum node_stat_item idx)
 756{
 757	struct mem_cgroup_per_node *pn;
 758	long x;
 759
 760	if (mem_cgroup_disabled())
 761		return node_page_state(lruvec_pgdat(lruvec), idx);
 762
 763	pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
 764	x = atomic_long_read(&pn->lruvec_stat[idx]);
 765#ifdef CONFIG_SMP
 766	if (x < 0)
 767		x = 0;
 768#endif
 769	return x;
 770}
 771
 772static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
 773						    enum node_stat_item idx)
 
 
 774{
 775	struct mem_cgroup_per_node *pn;
 776	long x = 0;
 777	int cpu;
 778
 779	if (mem_cgroup_disabled())
 780		return node_page_state(lruvec_pgdat(lruvec), idx);
 781
 782	pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
 783	for_each_possible_cpu(cpu)
 784		x += per_cpu(pn->lruvec_stat_local->count[idx], cpu);
 785#ifdef CONFIG_SMP
 786	if (x < 0)
 787		x = 0;
 788#endif
 789	return x;
 790}
 791
 792void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
 793			      int val);
 794void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
 795			int val);
 796void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val);
 797
 798void mod_memcg_obj_state(void *p, int idx, int val);
 799
 800static inline void mod_lruvec_slab_state(void *p, enum node_stat_item idx,
 801					 int val)
 802{
 803	unsigned long flags;
 804
 805	local_irq_save(flags);
 806	__mod_lruvec_slab_state(p, idx, val);
 807	local_irq_restore(flags);
 808}
 809
 810static inline void mod_memcg_lruvec_state(struct lruvec *lruvec,
 811					  enum node_stat_item idx, int val)
 812{
 813	unsigned long flags;
 814
 815	local_irq_save(flags);
 816	__mod_memcg_lruvec_state(lruvec, idx, val);
 817	local_irq_restore(flags);
 818}
 819
 820static inline void mod_lruvec_state(struct lruvec *lruvec,
 821				    enum node_stat_item idx, int val)
 822{
 823	unsigned long flags;
 824
 825	local_irq_save(flags);
 826	__mod_lruvec_state(lruvec, idx, val);
 827	local_irq_restore(flags);
 828}
 829
 830static inline void __mod_lruvec_page_state(struct page *page,
 831					   enum node_stat_item idx, int val)
 832{
 833	struct page *head = compound_head(page); /* rmap on tail pages */
 834	pg_data_t *pgdat = page_pgdat(page);
 835	struct lruvec *lruvec;
 836
 837	/* Untracked pages have no memcg, no lruvec. Update only the node */
 838	if (!head->mem_cgroup) {
 839		__mod_node_page_state(pgdat, idx, val);
 840		return;
 841	}
 842
 843	lruvec = mem_cgroup_lruvec(head->mem_cgroup, pgdat);
 844	__mod_lruvec_state(lruvec, idx, val);
 845}
 846
 847static inline void mod_lruvec_page_state(struct page *page,
 848					 enum node_stat_item idx, int val)
 849{
 850	unsigned long flags;
 851
 852	local_irq_save(flags);
 853	__mod_lruvec_page_state(page, idx, val);
 854	local_irq_restore(flags);
 855}
 856
 857unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
 858						gfp_t gfp_mask,
 859						unsigned long *total_scanned);
 
 860
 861void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
 862			  unsigned long count);
 863
 864static inline void count_memcg_events(struct mem_cgroup *memcg,
 865				      enum vm_event_item idx,
 866				      unsigned long count)
 867{
 868	unsigned long flags;
 869
 870	local_irq_save(flags);
 871	__count_memcg_events(memcg, idx, count);
 872	local_irq_restore(flags);
 873}
 874
 875static inline void count_memcg_page_event(struct page *page,
 876					  enum vm_event_item idx)
 877{
 878	if (page->mem_cgroup)
 879		count_memcg_events(page->mem_cgroup, idx, 1);
 880}
 881
 882static inline void count_memcg_event_mm(struct mm_struct *mm,
 883					enum vm_event_item idx)
 884{
 885	struct mem_cgroup *memcg;
 886
 887	if (mem_cgroup_disabled())
 888		return;
 889
 890	rcu_read_lock();
 891	memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
 892	if (likely(memcg))
 893		count_memcg_events(memcg, idx, 1);
 894	rcu_read_unlock();
 895}
 896
 897static inline void memcg_memory_event(struct mem_cgroup *memcg,
 898				      enum memcg_memory_event event)
 899{
 900	atomic_long_inc(&memcg->memory_events_local[event]);
 901	cgroup_file_notify(&memcg->events_local_file);
 902
 903	do {
 904		atomic_long_inc(&memcg->memory_events[event]);
 905		cgroup_file_notify(&memcg->events_file);
 906
 907		if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
 908			break;
 909		if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS)
 910			break;
 911	} while ((memcg = parent_mem_cgroup(memcg)) &&
 912		 !mem_cgroup_is_root(memcg));
 913}
 914
 915static inline void memcg_memory_event_mm(struct mm_struct *mm,
 916					 enum memcg_memory_event event)
 917{
 918	struct mem_cgroup *memcg;
 919
 920	if (mem_cgroup_disabled())
 921		return;
 922
 923	rcu_read_lock();
 924	memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
 925	if (likely(memcg))
 926		memcg_memory_event(memcg, event);
 927	rcu_read_unlock();
 928}
 929
 930#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 931void mem_cgroup_split_huge_fixup(struct page *head);
 932#endif
 933
 934#else /* CONFIG_MEMCG */
 935
 936#define MEM_CGROUP_ID_SHIFT	0
 937#define MEM_CGROUP_ID_MAX	0
 938
 939struct mem_cgroup;
 940
 941static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
 942{
 943	return true;
 944}
 945
 946static inline bool mem_cgroup_disabled(void)
 947{
 948	return true;
 949}
 950
 951static inline void memcg_memory_event(struct mem_cgroup *memcg,
 952				      enum memcg_memory_event event)
 953{
 
 954}
 955
 956static inline void memcg_memory_event_mm(struct mm_struct *mm,
 957					 enum memcg_memory_event event)
 958{
 
 959}
 960
 961static inline unsigned long mem_cgroup_protection(struct mem_cgroup *root,
 962						  struct mem_cgroup *memcg,
 963						  bool in_low_reclaim)
 964{
 965	return 0;
 966}
 967
 968static inline void mem_cgroup_calculate_protection(struct mem_cgroup *root,
 969						   struct mem_cgroup *memcg)
 970{
 971}
 972
 973static inline bool mem_cgroup_below_low(struct mem_cgroup *memcg)
 974{
 975	return false;
 976}
 977
 978static inline bool mem_cgroup_below_min(struct mem_cgroup *memcg)
 979{
 980	return false;
 981}
 982
 983static inline int mem_cgroup_charge(struct page *page, struct mm_struct *mm,
 984				    gfp_t gfp_mask)
 985{
 986	return 0;
 987}
 988
 989static inline void mem_cgroup_uncharge(struct page *page)
 990{
 991}
 992
 993static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
 994{
 995}
 996
 997static inline void mem_cgroup_migrate(struct page *old, struct page *new)
 
 998{
 
 999}
1000
1001static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
1002					       struct pglist_data *pgdat)
1003{
1004	return &pgdat->__lruvec;
1005}
1006
1007static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page,
1008						    struct pglist_data *pgdat)
1009{
1010	return &pgdat->__lruvec;
1011}
1012
1013static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
1014{
1015	return NULL;
1016}
1017
1018static inline bool mm_match_cgroup(struct mm_struct *mm,
1019		struct mem_cgroup *memcg)
1020{
1021	return true;
1022}
1023
1024static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
 
1025{
1026	return NULL;
1027}
1028
1029static inline struct mem_cgroup *get_mem_cgroup_from_page(struct page *page)
 
1030{
1031	return NULL;
1032}
1033
1034static inline void mem_cgroup_put(struct mem_cgroup *memcg)
 
 
 
 
 
 
 
 
1035{
1036}
1037
1038static inline struct mem_cgroup *
1039mem_cgroup_iter(struct mem_cgroup *root,
1040		struct mem_cgroup *prev,
1041		struct mem_cgroup_reclaim_cookie *reclaim)
1042{
1043	return NULL;
1044}
1045
1046static inline void mem_cgroup_iter_break(struct mem_cgroup *root,
1047					 struct mem_cgroup *prev)
1048{
1049}
1050
1051static inline int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
1052		int (*fn)(struct task_struct *, void *), void *arg)
1053{
1054	return 0;
1055}
1056
1057static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
1058{
1059	return 0;
1060}
1061
1062static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
1063{
1064	WARN_ON_ONCE(id);
1065	/* XXX: This should always return root_mem_cgroup */
1066	return NULL;
1067}
1068
1069static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m)
1070{
1071	return NULL;
1072}
1073
1074static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
1075{
1076	return NULL;
1077}
1078
1079static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
1080{
1081	return true;
1082}
1083
1084static inline
1085unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
1086		enum lru_list lru, int zone_idx)
1087{
1088	return 0;
1089}
1090
1091static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
 
1092{
1093	return 0;
1094}
1095
1096static inline unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
 
1097{
1098	return 0;
1099}
1100
1101static inline void
1102mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
 
1103{
1104}
1105
1106static inline void
1107mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
1108{
1109}
1110
1111static inline struct mem_cgroup *lock_page_memcg(struct page *page)
1112{
1113	return NULL;
1114}
1115
1116static inline void __unlock_page_memcg(struct mem_cgroup *memcg)
1117{
1118}
1119
1120static inline void unlock_page_memcg(struct page *page)
1121{
1122}
1123
1124static inline void mem_cgroup_handle_over_high(void)
1125{
1126}
1127
1128static inline void mem_cgroup_enter_user_fault(void)
1129{
1130}
1131
1132static inline void mem_cgroup_exit_user_fault(void)
1133{
1134}
1135
1136static inline bool task_in_memcg_oom(struct task_struct *p)
1137{
1138	return false;
1139}
1140
1141static inline bool mem_cgroup_oom_synchronize(bool wait)
1142{
1143	return false;
1144}
1145
1146static inline struct mem_cgroup *mem_cgroup_get_oom_group(
1147	struct task_struct *victim, struct mem_cgroup *oom_domain)
1148{
1149	return NULL;
1150}
1151
1152static inline void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
1153{
1154}
1155
1156static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
1157{
1158	return 0;
1159}
1160
1161static inline unsigned long memcg_page_state_local(struct mem_cgroup *memcg,
1162						   int idx)
1163{
1164	return 0;
1165}
1166
1167static inline void __mod_memcg_state(struct mem_cgroup *memcg,
1168				     int idx,
1169				     int nr)
1170{
1171}
1172
1173static inline void mod_memcg_state(struct mem_cgroup *memcg,
1174				   int idx,
1175				   int nr)
1176{
1177}
1178
1179static inline void __mod_memcg_page_state(struct page *page,
1180					  int idx,
1181					  int nr)
1182{
1183}
1184
1185static inline void mod_memcg_page_state(struct page *page,
1186					int idx,
1187					int nr)
1188{
1189}
1190
1191static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
1192					      enum node_stat_item idx)
1193{
1194	return node_page_state(lruvec_pgdat(lruvec), idx);
1195}
1196
1197static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
1198						    enum node_stat_item idx)
1199{
1200	return node_page_state(lruvec_pgdat(lruvec), idx);
1201}
1202
1203static inline void __mod_memcg_lruvec_state(struct lruvec *lruvec,
1204					    enum node_stat_item idx, int val)
1205{
1206}
1207
1208static inline void __mod_lruvec_state(struct lruvec *lruvec,
1209				      enum node_stat_item idx, int val)
1210{
1211	__mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
1212}
1213
1214static inline void mod_lruvec_state(struct lruvec *lruvec,
1215				    enum node_stat_item idx, int val)
1216{
1217	mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
1218}
1219
1220static inline void __mod_lruvec_page_state(struct page *page,
1221					   enum node_stat_item idx, int val)
1222{
1223	__mod_node_page_state(page_pgdat(page), idx, val);
1224}
1225
1226static inline void mod_lruvec_page_state(struct page *page,
1227					 enum node_stat_item idx, int val)
1228{
1229	mod_node_page_state(page_pgdat(page), idx, val);
1230}
1231
1232static inline void __mod_lruvec_slab_state(void *p, enum node_stat_item idx,
1233					   int val)
1234{
1235	struct page *page = virt_to_head_page(p);
1236
1237	__mod_node_page_state(page_pgdat(page), idx, val);
1238}
1239
1240static inline void mod_lruvec_slab_state(void *p, enum node_stat_item idx,
1241					 int val)
1242{
1243	struct page *page = virt_to_head_page(p);
1244
1245	mod_node_page_state(page_pgdat(page), idx, val);
1246}
1247
1248static inline void mod_memcg_obj_state(void *p, int idx, int val)
1249{
1250}
1251
1252static inline
1253unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
1254					    gfp_t gfp_mask,
1255					    unsigned long *total_scanned)
1256{
1257	return 0;
1258}
1259
1260static inline void mem_cgroup_split_huge_fixup(struct page *head)
1261{
1262}
1263
1264static inline void count_memcg_events(struct mem_cgroup *memcg,
1265				      enum vm_event_item idx,
1266				      unsigned long count)
1267{
1268}
1269
1270static inline void __count_memcg_events(struct mem_cgroup *memcg,
1271					enum vm_event_item idx,
1272					unsigned long count)
1273{
1274}
1275
1276static inline void count_memcg_page_event(struct page *page,
1277					  int idx)
1278{
1279}
1280
1281static inline
1282void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx)
1283{
1284}
1285#endif /* CONFIG_MEMCG */
1286
1287/* idx can be of type enum memcg_stat_item or node_stat_item */
1288static inline void __inc_memcg_state(struct mem_cgroup *memcg,
1289				     int idx)
1290{
1291	__mod_memcg_state(memcg, idx, 1);
1292}
1293
1294/* idx can be of type enum memcg_stat_item or node_stat_item */
1295static inline void __dec_memcg_state(struct mem_cgroup *memcg,
1296				     int idx)
1297{
1298	__mod_memcg_state(memcg, idx, -1);
1299}
1300
1301/* idx can be of type enum memcg_stat_item or node_stat_item */
1302static inline void __inc_memcg_page_state(struct page *page,
1303					  int idx)
1304{
1305	__mod_memcg_page_state(page, idx, 1);
1306}
1307
1308/* idx can be of type enum memcg_stat_item or node_stat_item */
1309static inline void __dec_memcg_page_state(struct page *page,
1310					  int idx)
1311{
1312	__mod_memcg_page_state(page, idx, -1);
1313}
1314
1315static inline void __inc_lruvec_state(struct lruvec *lruvec,
1316				      enum node_stat_item idx)
1317{
1318	__mod_lruvec_state(lruvec, idx, 1);
1319}
1320
1321static inline void __dec_lruvec_state(struct lruvec *lruvec,
1322				      enum node_stat_item idx)
1323{
1324	__mod_lruvec_state(lruvec, idx, -1);
1325}
1326
1327static inline void __inc_lruvec_page_state(struct page *page,
1328					   enum node_stat_item idx)
1329{
1330	__mod_lruvec_page_state(page, idx, 1);
1331}
1332
1333static inline void __dec_lruvec_page_state(struct page *page,
1334					   enum node_stat_item idx)
1335{
1336	__mod_lruvec_page_state(page, idx, -1);
1337}
1338
1339static inline void __inc_lruvec_slab_state(void *p, enum node_stat_item idx)
1340{
1341	__mod_lruvec_slab_state(p, idx, 1);
1342}
1343
1344static inline void __dec_lruvec_slab_state(void *p, enum node_stat_item idx)
1345{
1346	__mod_lruvec_slab_state(p, idx, -1);
1347}
1348
1349/* idx can be of type enum memcg_stat_item or node_stat_item */
1350static inline void inc_memcg_state(struct mem_cgroup *memcg,
1351				   int idx)
1352{
1353	mod_memcg_state(memcg, idx, 1);
1354}
1355
1356/* idx can be of type enum memcg_stat_item or node_stat_item */
1357static inline void dec_memcg_state(struct mem_cgroup *memcg,
1358				   int idx)
1359{
1360	mod_memcg_state(memcg, idx, -1);
1361}
1362
1363/* idx can be of type enum memcg_stat_item or node_stat_item */
1364static inline void inc_memcg_page_state(struct page *page,
1365					int idx)
1366{
1367	mod_memcg_page_state(page, idx, 1);
1368}
1369
1370/* idx can be of type enum memcg_stat_item or node_stat_item */
1371static inline void dec_memcg_page_state(struct page *page,
1372					int idx)
1373{
1374	mod_memcg_page_state(page, idx, -1);
1375}
1376
1377static inline void inc_lruvec_state(struct lruvec *lruvec,
1378				    enum node_stat_item idx)
1379{
1380	mod_lruvec_state(lruvec, idx, 1);
1381}
1382
1383static inline void dec_lruvec_state(struct lruvec *lruvec,
1384				    enum node_stat_item idx)
1385{
1386	mod_lruvec_state(lruvec, idx, -1);
1387}
1388
1389static inline void inc_lruvec_page_state(struct page *page,
1390					 enum node_stat_item idx)
1391{
1392	mod_lruvec_page_state(page, idx, 1);
1393}
1394
1395static inline void dec_lruvec_page_state(struct page *page,
1396					 enum node_stat_item idx)
1397{
1398	mod_lruvec_page_state(page, idx, -1);
1399}
1400
1401static inline struct lruvec *parent_lruvec(struct lruvec *lruvec)
1402{
1403	struct mem_cgroup *memcg;
1404
1405	memcg = lruvec_memcg(lruvec);
1406	if (!memcg)
1407		return NULL;
1408	memcg = parent_mem_cgroup(memcg);
1409	if (!memcg)
1410		return NULL;
1411	return mem_cgroup_lruvec(memcg, lruvec_pgdat(lruvec));
1412}
1413
1414#ifdef CONFIG_CGROUP_WRITEBACK
1415
1416struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb);
1417void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
1418			 unsigned long *pheadroom, unsigned long *pdirty,
1419			 unsigned long *pwriteback);
1420
1421void mem_cgroup_track_foreign_dirty_slowpath(struct page *page,
1422					     struct bdi_writeback *wb);
1423
1424static inline void mem_cgroup_track_foreign_dirty(struct page *page,
1425						  struct bdi_writeback *wb)
1426{
1427	if (mem_cgroup_disabled())
1428		return;
1429
1430	if (unlikely(&page->mem_cgroup->css != wb->memcg_css))
1431		mem_cgroup_track_foreign_dirty_slowpath(page, wb);
1432}
1433
1434void mem_cgroup_flush_foreign(struct bdi_writeback *wb);
1435
1436#else	/* CONFIG_CGROUP_WRITEBACK */
1437
1438static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
1439{
1440	return NULL;
1441}
1442
1443static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb,
1444				       unsigned long *pfilepages,
1445				       unsigned long *pheadroom,
1446				       unsigned long *pdirty,
1447				       unsigned long *pwriteback)
1448{
 
1449}
1450
1451static inline void mem_cgroup_track_foreign_dirty(struct page *page,
1452						  struct bdi_writeback *wb)
1453{
1454}
1455
1456static inline void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
 
1457{
1458}
1459
1460#endif	/* CONFIG_CGROUP_WRITEBACK */
1461
1462struct sock;
1463bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
1464void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
1465#ifdef CONFIG_MEMCG
1466extern struct static_key_false memcg_sockets_enabled_key;
1467#define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key)
1468void mem_cgroup_sk_alloc(struct sock *sk);
1469void mem_cgroup_sk_free(struct sock *sk);
1470static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
1471{
1472	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_pressure)
1473		return true;
1474	do {
1475		if (time_before(jiffies, memcg->socket_pressure))
1476			return true;
1477	} while ((memcg = parent_mem_cgroup(memcg)));
1478	return false;
1479}
 
1480
1481extern int memcg_expand_shrinker_maps(int new_id);
1482
1483extern void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
1484				   int nid, int shrinker_id);
1485#else
1486#define mem_cgroup_sockets_enabled 0
1487static inline void mem_cgroup_sk_alloc(struct sock *sk) { };
1488static inline void mem_cgroup_sk_free(struct sock *sk) { };
1489static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
1490{
1491	return false;
1492}
1493
1494static inline void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
1495					  int nid, int shrinker_id)
1496{
1497}
1498#endif
1499
1500#ifdef CONFIG_MEMCG_KMEM
1501int __memcg_kmem_charge(struct mem_cgroup *memcg, gfp_t gfp,
1502			unsigned int nr_pages);
1503void __memcg_kmem_uncharge(struct mem_cgroup *memcg, unsigned int nr_pages);
1504int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order);
1505void __memcg_kmem_uncharge_page(struct page *page, int order);
1506
1507struct obj_cgroup *get_obj_cgroup_from_current(void);
1508
1509int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size);
1510void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size);
1511
1512extern struct static_key_false memcg_kmem_enabled_key;
1513
1514extern int memcg_nr_cache_ids;
1515void memcg_get_cache_ids(void);
1516void memcg_put_cache_ids(void);
1517
1518/*
1519 * Helper macro to loop through all memcg-specific caches. Callers must still
1520 * check if the cache is valid (it is either valid or NULL).
1521 * the slab_mutex must be held when looping through those caches
1522 */
1523#define for_each_memcg_cache_index(_idx)	\
1524	for ((_idx) = 0; (_idx) < memcg_nr_cache_ids; (_idx)++)
1525
1526static inline bool memcg_kmem_enabled(void)
1527{
1528	return static_branch_likely(&memcg_kmem_enabled_key);
1529}
1530
1531static inline bool memcg_kmem_bypass(void)
1532{
1533	if (in_interrupt())
1534		return true;
1535
1536	/* Allow remote memcg charging in kthread contexts. */
1537	if ((!current->mm || (current->flags & PF_KTHREAD)) &&
1538	     !current->active_memcg)
1539		return true;
1540	return false;
1541}
1542
1543static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp,
1544					 int order)
1545{
1546	if (memcg_kmem_enabled())
1547		return __memcg_kmem_charge_page(page, gfp, order);
1548	return 0;
1549}
1550
1551static inline void memcg_kmem_uncharge_page(struct page *page, int order)
1552{
1553	if (memcg_kmem_enabled())
1554		__memcg_kmem_uncharge_page(page, order);
1555}
1556
1557static inline int memcg_kmem_charge(struct mem_cgroup *memcg, gfp_t gfp,
1558				    unsigned int nr_pages)
1559{
1560	if (memcg_kmem_enabled())
1561		return __memcg_kmem_charge(memcg, gfp, nr_pages);
1562	return 0;
1563}
1564
1565static inline void memcg_kmem_uncharge(struct mem_cgroup *memcg,
1566				       unsigned int nr_pages)
1567{
1568	if (memcg_kmem_enabled())
1569		__memcg_kmem_uncharge(memcg, nr_pages);
1570}
1571
1572/*
1573 * helper for accessing a memcg's index. It will be used as an index in the
1574 * child cache array in kmem_cache, and also to derive its name. This function
1575 * will return -1 when this is not a kmem-limited memcg.
1576 */
1577static inline int memcg_cache_id(struct mem_cgroup *memcg)
1578{
1579	return memcg ? memcg->kmemcg_id : -1;
1580}
1581
1582struct mem_cgroup *mem_cgroup_from_obj(void *p);
1583
 
 
 
 
1584#else
1585
1586static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp,
1587					 int order)
1588{
1589	return 0;
1590}
1591
1592static inline void memcg_kmem_uncharge_page(struct page *page, int order)
1593{
1594}
1595
1596static inline int __memcg_kmem_charge_page(struct page *page, gfp_t gfp,
1597					   int order)
1598{
1599	return 0;
1600}
1601
1602static inline void __memcg_kmem_uncharge_page(struct page *page, int order)
1603{
1604}
1605
1606#define for_each_memcg_cache_index(_idx)	\
1607	for (; NULL; )
1608
1609static inline bool memcg_kmem_enabled(void)
1610{
1611	return false;
1612}
1613
1614static inline int memcg_cache_id(struct mem_cgroup *memcg)
1615{
1616	return -1;
1617}
1618
1619static inline void memcg_get_cache_ids(void)
1620{
1621}
1622
1623static inline void memcg_put_cache_ids(void)
1624{
1625}
1626
1627static inline struct mem_cgroup *mem_cgroup_from_obj(void *p)
1628{
1629       return NULL;
1630}
1631
1632#endif /* CONFIG_MEMCG_KMEM */
1633
1634#endif /* _LINUX_MEMCONTROL_H */
v3.5.6
 
  1/* memcontrol.h - Memory Controller
  2 *
  3 * Copyright IBM Corporation, 2007
  4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
  5 *
  6 * Copyright 2007 OpenVZ SWsoft Inc
  7 * Author: Pavel Emelianov <xemul@openvz.org>
  8 *
  9 * This program is free software; you can redistribute it and/or modify
 10 * it under the terms of the GNU General Public License as published by
 11 * the Free Software Foundation; either version 2 of the License, or
 12 * (at your option) any later version.
 13 *
 14 * This program is distributed in the hope that it will be useful,
 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 17 * GNU General Public License for more details.
 18 */
 19
 20#ifndef _LINUX_MEMCONTROL_H
 21#define _LINUX_MEMCONTROL_H
 22#include <linux/cgroup.h>
 23#include <linux/vm_event_item.h>
 
 
 
 
 
 
 
 
 
 24
 25struct mem_cgroup;
 26struct page_cgroup;
 27struct page;
 28struct mm_struct;
 
 29
 30/* Stats that can be updated by kernel. */
 31enum mem_cgroup_page_stat_item {
 32	MEMCG_NR_FILE_MAPPED, /* # of pages charged as file rss */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 33};
 34
 35struct mem_cgroup_reclaim_cookie {
 36	struct zone *zone;
 37	int priority;
 38	unsigned int generation;
 39};
 40
 41#ifdef CONFIG_CGROUP_MEM_RES_CTLR
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 42/*
 43 * All "charge" functions with gfp_mask should use GFP_KERNEL or
 44 * (gfp_mask & GFP_RECLAIM_MASK). In current implementatin, memcg doesn't
 45 * alloc memory but reclaims memory from all available zones. So, "where I want
 46 * memory from" bits of gfp_mask has no meaning. So any bits of that field is
 47 * available but adding a rule is better. charge functions' gfp_mask should
 48 * be set to GFP_KERNEL or gfp_mask & GFP_RECLAIM_MASK for avoiding ambiguous
 49 * codes.
 50 * (Of course, if memcg does memory allocation in future, GFP_KERNEL is sane.)
 51 */
 52
 53extern int mem_cgroup_newpage_charge(struct page *page, struct mm_struct *mm,
 54				gfp_t gfp_mask);
 55/* for swap handling */
 56extern int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
 57		struct page *page, gfp_t mask, struct mem_cgroup **memcgp);
 58extern void mem_cgroup_commit_charge_swapin(struct page *page,
 59					struct mem_cgroup *memcg);
 60extern void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg);
 61
 62extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
 63					gfp_t gfp_mask);
 64
 65struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *);
 66struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *);
 67
 68/* For coalescing uncharge for reducing memcg' overhead*/
 69extern void mem_cgroup_uncharge_start(void);
 70extern void mem_cgroup_uncharge_end(void);
 71
 72extern void mem_cgroup_uncharge_page(struct page *page);
 73extern void mem_cgroup_uncharge_cache_page(struct page *page);
 74
 75extern void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
 76				     int order);
 77bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
 78				  struct mem_cgroup *memcg);
 79int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *memcg);
 80
 81extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page);
 82extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
 83extern struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 84
 85extern struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg);
 86extern struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 87
 88static inline
 89int mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *cgroup)
 
 
 
 
 
 
 
 
 
 90{
 91	struct mem_cgroup *memcg;
 92	int match;
 
 
 
 
 
 93
 94	rcu_read_lock();
 95	memcg = mem_cgroup_from_task(rcu_dereference((mm)->owner));
 96	match = __mem_cgroup_same_or_subtree(cgroup, memcg);
 97	rcu_read_unlock();
 98	return match;
 
 
 
 
 
 99}
100
101extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg);
 
 
 
 
102
103extern int
104mem_cgroup_prepare_migration(struct page *page,
105	struct page *newpage, struct mem_cgroup **memcgp, gfp_t gfp_mask);
106extern void mem_cgroup_end_migration(struct mem_cgroup *memcg,
107	struct page *oldpage, struct page *newpage, bool migration_ok);
108
109struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
110				   struct mem_cgroup *,
111				   struct mem_cgroup_reclaim_cookie *);
112void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
113
114/*
115 * For memory reclaim.
116 */
117int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec);
118int mem_cgroup_inactive_file_is_low(struct lruvec *lruvec);
119int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
120unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list);
121void mem_cgroup_update_lru_size(struct lruvec *, enum lru_list, int);
122extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
123					struct task_struct *p);
124extern void mem_cgroup_replace_page_cache(struct page *oldpage,
125					struct page *newpage);
126
127#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
128extern int do_swap_account;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
129#endif
 
 
130
131static inline bool mem_cgroup_disabled(void)
 
 
 
 
 
132{
133	if (mem_cgroup_subsys.disabled)
134		return true;
135	return false;
 
 
 
 
 
 
 
136}
137
138void __mem_cgroup_begin_update_page_stat(struct page *page, bool *locked,
139					 unsigned long *flags);
140
141extern atomic_t memcg_moving;
 
 
 
 
142
143static inline void mem_cgroup_begin_update_page_stat(struct page *page,
144					bool *locked, unsigned long *flags)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
145{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
146	if (mem_cgroup_disabled())
147		return;
148	rcu_read_lock();
149	*locked = false;
150	if (atomic_read(&memcg_moving))
151		__mem_cgroup_begin_update_page_stat(page, locked, flags);
 
 
 
 
152}
153
154void __mem_cgroup_end_update_page_stat(struct page *page,
155				unsigned long *flags);
156static inline void mem_cgroup_end_update_page_stat(struct page *page,
157					bool *locked, unsigned long *flags)
158{
 
 
 
 
159	if (mem_cgroup_disabled())
160		return;
161	if (*locked)
162		__mem_cgroup_end_update_page_stat(page, flags);
163	rcu_read_unlock();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
164}
165
166void mem_cgroup_update_page_stat(struct page *page,
167				 enum mem_cgroup_page_stat_item idx,
168				 int val);
 
 
 
 
 
 
169
170static inline void mem_cgroup_inc_page_stat(struct page *page,
171					    enum mem_cgroup_page_stat_item idx)
172{
173	mem_cgroup_update_page_stat(page, idx, 1);
 
 
 
 
174}
175
176static inline void mem_cgroup_dec_page_stat(struct page *page,
177					    enum mem_cgroup_page_stat_item idx)
178{
179	mem_cgroup_update_page_stat(page, idx, -1);
 
 
 
 
 
 
 
 
 
 
 
180}
181
182unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
 
 
 
 
 
 
 
 
 
 
183						gfp_t gfp_mask,
184						unsigned long *total_scanned);
185u64 mem_cgroup_get_limit(struct mem_cgroup *memcg);
186
187void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
188#ifdef CONFIG_TRANSPARENT_HUGEPAGE
189void mem_cgroup_split_huge_fixup(struct page *head);
190#endif
191
192#ifdef CONFIG_DEBUG_VM
193bool mem_cgroup_bad_page_check(struct page *page);
194void mem_cgroup_print_bad_page(struct page *page);
195#endif
196#else /* CONFIG_CGROUP_MEM_RES_CTLR */
197struct mem_cgroup;
198
199static inline int mem_cgroup_newpage_charge(struct page *page,
200					struct mm_struct *mm, gfp_t gfp_mask)
 
 
 
 
 
 
 
 
 
 
201{
202	return 0;
203}
204
205static inline int mem_cgroup_cache_charge(struct page *page,
206					struct mm_struct *mm, gfp_t gfp_mask)
207{
208	return 0;
209}
210
211static inline int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
212		struct page *page, gfp_t gfp_mask, struct mem_cgroup **memcgp)
 
213{
214	return 0;
215}
216
217static inline void mem_cgroup_commit_charge_swapin(struct page *page,
218					  struct mem_cgroup *memcg)
219{
220}
221
222static inline void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg)
223{
 
224}
225
226static inline void mem_cgroup_uncharge_start(void)
227{
 
228}
229
230static inline void mem_cgroup_uncharge_end(void)
 
231{
 
232}
233
234static inline void mem_cgroup_uncharge_page(struct page *page)
235{
236}
237
238static inline void mem_cgroup_uncharge_cache_page(struct page *page)
239{
240}
241
242static inline struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
243						    struct mem_cgroup *memcg)
244{
245	return &zone->lruvec;
246}
247
248static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page,
249						    struct zone *zone)
250{
251	return &zone->lruvec;
252}
253
254static inline struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
 
255{
256	return NULL;
257}
258
259static inline struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
260{
261	return NULL;
262}
263
264static inline int mm_match_cgroup(struct mm_struct *mm,
265		struct mem_cgroup *memcg)
266{
267	return 1;
268}
269
270static inline int task_in_mem_cgroup(struct task_struct *task,
271				     const struct mem_cgroup *memcg)
272{
273	return 1;
274}
275
276static inline struct cgroup_subsys_state
277		*mem_cgroup_css(struct mem_cgroup *memcg)
278{
279	return NULL;
280}
281
282static inline int
283mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
284	struct mem_cgroup **memcgp, gfp_t gfp_mask)
285{
286	return 0;
287}
288
289static inline void mem_cgroup_end_migration(struct mem_cgroup *memcg,
290		struct page *oldpage, struct page *newpage, bool migration_ok)
291{
292}
293
294static inline struct mem_cgroup *
295mem_cgroup_iter(struct mem_cgroup *root,
296		struct mem_cgroup *prev,
297		struct mem_cgroup_reclaim_cookie *reclaim)
298{
299	return NULL;
300}
301
302static inline void mem_cgroup_iter_break(struct mem_cgroup *root,
303					 struct mem_cgroup *prev)
304{
305}
306
307static inline bool mem_cgroup_disabled(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
308{
309	return true;
310}
311
312static inline int
313mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec)
 
314{
315	return 1;
316}
317
318static inline int
319mem_cgroup_inactive_file_is_low(struct lruvec *lruvec)
320{
321	return 1;
322}
323
324static inline unsigned long
325mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
326{
327	return 0;
328}
329
330static inline void
331mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
332			      int increment)
333{
334}
335
336static inline void
337mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
338{
 
339}
340
341static inline void mem_cgroup_begin_update_page_stat(struct page *page,
342					bool *locked, unsigned long *flags)
343{
 
344}
345
346static inline void mem_cgroup_end_update_page_stat(struct page *page,
347					bool *locked, unsigned long *flags)
348{
349}
350
351static inline void mem_cgroup_inc_page_stat(struct page *page,
352					    enum mem_cgroup_page_stat_item idx)
353{
 
354}
355
356static inline void mem_cgroup_dec_page_stat(struct page *page,
357					    enum mem_cgroup_page_stat_item idx)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
358{
359}
360
361static inline
362unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
363					    gfp_t gfp_mask,
364					    unsigned long *total_scanned)
365{
366	return 0;
367}
368
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
369static inline
370u64 mem_cgroup_get_limit(struct mem_cgroup *memcg)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
371{
372	return 0;
373}
374
375static inline void mem_cgroup_split_huge_fixup(struct page *head)
 
376{
377}
378
379static inline
380void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
381{
382}
383static inline void mem_cgroup_replace_page_cache(struct page *oldpage,
384				struct page *newpage)
 
 
 
 
 
 
 
 
 
 
385{
 
 
 
 
 
 
 
386}
387#endif /* CONFIG_CGROUP_MEM_RES_CTLR */
388
389#if !defined(CONFIG_CGROUP_MEM_RES_CTLR) || !defined(CONFIG_DEBUG_VM)
390static inline bool
391mem_cgroup_bad_page_check(struct page *page)
 
 
 
 
 
 
392{
393	return false;
394}
395
396static inline void
397mem_cgroup_print_bad_page(struct page *page)
398{
399}
400#endif
401
402enum {
403	UNDER_LIMIT,
404	SOFT_LIMIT,
405	OVER_LIMIT,
406};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
407
408struct sock;
409#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
410void sock_update_memcg(struct sock *sk);
411void sock_release_memcg(struct sock *sk);
412#else
413static inline void sock_update_memcg(struct sock *sk)
 
 
414{
 
415}
416static inline void sock_release_memcg(struct sock *sk)
 
417{
418}
419#endif /* CONFIG_CGROUP_MEM_RES_CTLR_KMEM */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
420#endif /* _LINUX_MEMCONTROL_H */
421