Linux Audio

Check our new training course

Loading...
v3.15
  1/*
  2 *
  3 * Copyright IBM Corporation, 2012
  4 * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
  5 *
 
 
 
 
  6 * This program is free software; you can redistribute it and/or modify it
  7 * under the terms of version 2.1 of the GNU Lesser General Public License
  8 * as published by the Free Software Foundation.
  9 *
 10 * This program is distributed in the hope that it would be useful, but
 11 * WITHOUT ANY WARRANTY; without even the implied warranty of
 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
 13 *
 14 */
 15
 16#include <linux/cgroup.h>
 
 17#include <linux/slab.h>
 18#include <linux/hugetlb.h>
 19#include <linux/hugetlb_cgroup.h>
 20
 21struct hugetlb_cgroup {
 22	struct cgroup_subsys_state css;
 23	/*
 24	 * the counter to account for hugepages from hugetlb.
 25	 */
 26	struct res_counter hugepage[HUGE_MAX_HSTATE];
 27};
 28
 29#define MEMFILE_PRIVATE(x, val)	(((x) << 16) | (val))
 30#define MEMFILE_IDX(val)	(((val) >> 16) & 0xffff)
 31#define MEMFILE_ATTR(val)	((val) & 0xffff)
 32
 33static struct hugetlb_cgroup *root_h_cgroup __read_mostly;
 34
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 35static inline
 36struct hugetlb_cgroup *hugetlb_cgroup_from_css(struct cgroup_subsys_state *s)
 37{
 38	return s ? container_of(s, struct hugetlb_cgroup, css) : NULL;
 39}
 40
 41static inline
 42struct hugetlb_cgroup *hugetlb_cgroup_from_task(struct task_struct *task)
 43{
 44	return hugetlb_cgroup_from_css(task_css(task, hugetlb_cgrp_id));
 45}
 46
 47static inline bool hugetlb_cgroup_is_root(struct hugetlb_cgroup *h_cg)
 48{
 49	return (h_cg == root_h_cgroup);
 50}
 51
 52static inline struct hugetlb_cgroup *
 53parent_hugetlb_cgroup(struct hugetlb_cgroup *h_cg)
 54{
 55	return hugetlb_cgroup_from_css(css_parent(&h_cg->css));
 56}
 57
 58static inline bool hugetlb_cgroup_have_usage(struct hugetlb_cgroup *h_cg)
 59{
 60	int idx;
 61
 62	for (idx = 0; idx < hugetlb_max_hstate; idx++) {
 63		if ((res_counter_read_u64(&h_cg->hugepage[idx], RES_USAGE)) > 0)
 
 64			return true;
 65	}
 66	return false;
 67}
 68
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 69static struct cgroup_subsys_state *
 70hugetlb_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
 71{
 72	struct hugetlb_cgroup *parent_h_cgroup = hugetlb_cgroup_from_css(parent_css);
 73	struct hugetlb_cgroup *h_cgroup;
 74	int idx;
 
 
 
 75
 76	h_cgroup = kzalloc(sizeof(*h_cgroup), GFP_KERNEL);
 77	if (!h_cgroup)
 78		return ERR_PTR(-ENOMEM);
 79
 80	if (parent_h_cgroup) {
 81		for (idx = 0; idx < HUGE_MAX_HSTATE; idx++)
 82			res_counter_init(&h_cgroup->hugepage[idx],
 83					 &parent_h_cgroup->hugepage[idx]);
 84	} else {
 85		root_h_cgroup = h_cgroup;
 86		for (idx = 0; idx < HUGE_MAX_HSTATE; idx++)
 87			res_counter_init(&h_cgroup->hugepage[idx], NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 88	}
 
 
 89	return &h_cgroup->css;
 
 
 
 
 90}
 91
 92static void hugetlb_cgroup_css_free(struct cgroup_subsys_state *css)
 93{
 94	struct hugetlb_cgroup *h_cgroup;
 95
 96	h_cgroup = hugetlb_cgroup_from_css(css);
 97	kfree(h_cgroup);
 98}
 99
100
101/*
102 * Should be called with hugetlb_lock held.
103 * Since we are holding hugetlb_lock, pages cannot get moved from
104 * active list or uncharged from the cgroup, So no need to get
105 * page reference and test for page active here. This function
106 * cannot fail.
107 */
108static void hugetlb_cgroup_move_parent(int idx, struct hugetlb_cgroup *h_cg,
109				       struct page *page)
110{
111	int csize;
112	struct res_counter *counter;
113	struct res_counter *fail_res;
114	struct hugetlb_cgroup *page_hcg;
115	struct hugetlb_cgroup *parent = parent_hugetlb_cgroup(h_cg);
 
116
117	page_hcg = hugetlb_cgroup_from_page(page);
118	/*
119	 * We can have pages in active list without any cgroup
120	 * ie, hugepage with less than 3 pages. We can safely
121	 * ignore those pages.
122	 */
123	if (!page_hcg || page_hcg != h_cg)
124		goto out;
125
126	csize = PAGE_SIZE << compound_order(page);
127	if (!parent) {
128		parent = root_h_cgroup;
129		/* root has no limit */
130		res_counter_charge_nofail(&parent->hugepage[idx],
131					  csize, &fail_res);
132	}
133	counter = &h_cg->hugepage[idx];
134	res_counter_uncharge_until(counter, counter->parent, csize);
 
135
136	set_hugetlb_cgroup(page, parent);
137out:
138	return;
139}
140
141/*
142 * Force the hugetlb cgroup to empty the hugetlb resources by moving them to
143 * the parent cgroup.
144 */
145static void hugetlb_cgroup_css_offline(struct cgroup_subsys_state *css)
146{
147	struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css);
148	struct hstate *h;
149	struct page *page;
150	int idx = 0;
151
152	do {
153		for_each_hstate(h) {
154			spin_lock(&hugetlb_lock);
155			list_for_each_entry(page, &h->hugepage_activelist, lru)
156				hugetlb_cgroup_move_parent(idx, h_cg, page);
157
158			spin_unlock(&hugetlb_lock);
159			idx++;
160		}
161		cond_resched();
162	} while (hugetlb_cgroup_have_usage(h_cg));
163}
164
165int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
166				 struct hugetlb_cgroup **ptr)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
167{
168	int ret = 0;
169	struct res_counter *fail_res;
170	struct hugetlb_cgroup *h_cg = NULL;
171	unsigned long csize = nr_pages * PAGE_SIZE;
172
173	if (hugetlb_cgroup_disabled())
174		goto done;
175	/*
176	 * We don't charge any cgroup if the compound page have less
177	 * than 3 pages.
178	 */
179	if (huge_page_order(&hstates[idx]) < HUGETLB_CGROUP_MIN_ORDER)
180		goto done;
181again:
182	rcu_read_lock();
183	h_cg = hugetlb_cgroup_from_task(current);
184	if (!css_tryget(&h_cg->css)) {
185		rcu_read_unlock();
186		goto again;
187	}
188	rcu_read_unlock();
189
190	ret = res_counter_charge(&h_cg->hugepage[idx], csize, &fail_res);
191	css_put(&h_cg->css);
 
 
 
 
 
 
 
 
 
 
 
192done:
193	*ptr = h_cg;
194	return ret;
195}
196
 
 
 
 
 
 
 
 
 
 
 
 
197/* Should be called with hugetlb_lock held */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
198void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
199				  struct hugetlb_cgroup *h_cg,
200				  struct page *page)
201{
202	if (hugetlb_cgroup_disabled() || !h_cg)
203		return;
204
205	set_hugetlb_cgroup(page, h_cg);
206	return;
 
 
 
 
 
 
 
 
207}
208
209/*
210 * Should be called with hugetlb_lock held
211 */
212void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
213				  struct page *page)
214{
215	struct hugetlb_cgroup *h_cg;
216	unsigned long csize = nr_pages * PAGE_SIZE;
217
218	if (hugetlb_cgroup_disabled())
219		return;
220	VM_BUG_ON(!spin_is_locked(&hugetlb_lock));
221	h_cg = hugetlb_cgroup_from_page(page);
222	if (unlikely(!h_cg))
223		return;
224	set_hugetlb_cgroup(page, NULL);
225	res_counter_uncharge(&h_cg->hugepage[idx], csize);
226	return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
227}
228
229void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
230				    struct hugetlb_cgroup *h_cg)
231{
232	unsigned long csize = nr_pages * PAGE_SIZE;
 
233
 
 
 
 
 
 
 
 
 
 
234	if (hugetlb_cgroup_disabled() || !h_cg)
235		return;
236
237	if (huge_page_order(&hstates[idx]) < HUGETLB_CGROUP_MIN_ORDER)
238		return;
239
240	res_counter_uncharge(&h_cg->hugepage[idx], csize);
241	return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
242}
243
244static u64 hugetlb_cgroup_read_u64(struct cgroup_subsys_state *css,
245				   struct cftype *cft)
246{
247	int idx, name;
 
248	struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css);
249
250	idx = MEMFILE_IDX(cft->private);
251	name = MEMFILE_ATTR(cft->private);
252
253	return res_counter_read_u64(&h_cg->hugepage[idx], name);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
254}
255
256static int hugetlb_cgroup_write(struct cgroup_subsys_state *css,
257				struct cftype *cft, char *buffer)
258{
259	int idx, name, ret;
260	unsigned long long val;
261	struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css);
 
 
 
262
263	idx = MEMFILE_IDX(cft->private);
264	name = MEMFILE_ATTR(cft->private);
 
 
 
265
266	switch (name) {
 
 
 
 
 
 
 
 
 
 
267	case RES_LIMIT:
268		if (hugetlb_cgroup_is_root(h_cg)) {
269			/* Can't set limit on root */
270			ret = -EINVAL;
271			break;
272		}
273		/* This function does all necessary parse...reuse it */
274		ret = res_counter_memparse_write_strategy(buffer, &val);
275		if (ret)
276			break;
277		ret = res_counter_set_limit(&h_cg->hugepage[idx], val);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
278		break;
279	default:
280		ret = -EINVAL;
281		break;
282	}
283	return ret;
284}
285
286static int hugetlb_cgroup_reset(struct cgroup_subsys_state *css,
287				unsigned int event)
288{
289	int idx, name, ret = 0;
290	struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css);
 
 
 
 
 
 
291
292	idx = MEMFILE_IDX(event);
293	name = MEMFILE_ATTR(event);
 
 
 
 
294
295	switch (name) {
 
 
 
296	case RES_MAX_USAGE:
297		res_counter_reset_max(&h_cg->hugepage[idx]);
 
 
 
298		break;
299	case RES_FAILCNT:
300		res_counter_reset_failcnt(&h_cg->hugepage[idx]);
 
 
 
301		break;
302	default:
303		ret = -EINVAL;
304		break;
305	}
306	return ret;
307}
308
309static char *mem_fmt(char *buf, int size, unsigned long hsize)
310{
311	if (hsize >= (1UL << 30))
312		snprintf(buf, size, "%luGB", hsize >> 30);
313	else if (hsize >= (1UL << 20))
314		snprintf(buf, size, "%luMB", hsize >> 20);
315	else
316		snprintf(buf, size, "%luKB", hsize >> 10);
317	return buf;
318}
319
320static void __init __hugetlb_cgroup_file_init(int idx)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
321{
322	char buf[32];
323	struct cftype *cft;
324	struct hstate *h = &hstates[idx];
325
326	/* format the size */
327	mem_fmt(buf, 32, huge_page_size(h));
328
329	/* Add the limit file */
330	cft = &h->cgroup_files[0];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
331	snprintf(cft->name, MAX_CFTYPE_NAME, "%s.limit_in_bytes", buf);
332	cft->private = MEMFILE_PRIVATE(idx, RES_LIMIT);
333	cft->read_u64 = hugetlb_cgroup_read_u64;
334	cft->write_string = hugetlb_cgroup_write;
 
 
 
 
 
 
 
335
336	/* Add the usage file */
337	cft = &h->cgroup_files[1];
338	snprintf(cft->name, MAX_CFTYPE_NAME, "%s.usage_in_bytes", buf);
339	cft->private = MEMFILE_PRIVATE(idx, RES_USAGE);
340	cft->read_u64 = hugetlb_cgroup_read_u64;
341
 
 
 
 
 
 
342	/* Add the MAX usage file */
343	cft = &h->cgroup_files[2];
344	snprintf(cft->name, MAX_CFTYPE_NAME, "%s.max_usage_in_bytes", buf);
345	cft->private = MEMFILE_PRIVATE(idx, RES_MAX_USAGE);
346	cft->trigger = hugetlb_cgroup_reset;
 
 
 
 
 
 
 
347	cft->read_u64 = hugetlb_cgroup_read_u64;
348
349	/* Add the failcntfile */
350	cft = &h->cgroup_files[3];
351	snprintf(cft->name, MAX_CFTYPE_NAME, "%s.failcnt", buf);
352	cft->private  = MEMFILE_PRIVATE(idx, RES_FAILCNT);
353	cft->trigger  = hugetlb_cgroup_reset;
354	cft->read_u64 = hugetlb_cgroup_read_u64;
355
 
 
 
 
 
 
 
 
 
 
 
 
 
356	/* NULL terminate the last cft */
357	cft = &h->cgroup_files[4];
358	memset(cft, 0, sizeof(*cft));
359
360	WARN_ON(cgroup_add_cftypes(&hugetlb_cgrp_subsys, h->cgroup_files));
 
 
361
362	return;
 
 
 
363}
364
365void __init hugetlb_cgroup_file_init(void)
366{
367	struct hstate *h;
368
369	for_each_hstate(h) {
370		/*
371		 * Add cgroup control files only if the huge page consists
372		 * of more than two normal pages. This is because we use
373		 * page[2].lru.next for storing cgroup details.
374		 */
375		if (huge_page_order(h) >= HUGETLB_CGROUP_MIN_ORDER)
376			__hugetlb_cgroup_file_init(hstate_index(h));
377	}
378}
379
380/*
381 * hugetlb_lock will make sure a parallel cgroup rmdir won't happen
382 * when we migrate hugepages
383 */
384void hugetlb_cgroup_migrate(struct page *oldhpage, struct page *newhpage)
385{
386	struct hugetlb_cgroup *h_cg;
387	struct hstate *h = page_hstate(oldhpage);
 
388
389	if (hugetlb_cgroup_disabled())
390		return;
391
392	VM_BUG_ON_PAGE(!PageHuge(oldhpage), oldhpage);
393	spin_lock(&hugetlb_lock);
394	h_cg = hugetlb_cgroup_from_page(oldhpage);
395	set_hugetlb_cgroup(oldhpage, NULL);
 
396
397	/* move the h_cg details to new cgroup */
398	set_hugetlb_cgroup(newhpage, h_cg);
399	list_move(&newhpage->lru, &h->hugepage_activelist);
400	spin_unlock(&hugetlb_lock);
 
401	return;
402}
403
 
 
 
 
404struct cgroup_subsys hugetlb_cgrp_subsys = {
405	.css_alloc	= hugetlb_cgroup_css_alloc,
406	.css_offline	= hugetlb_cgroup_css_offline,
407	.css_free	= hugetlb_cgroup_css_free,
 
 
408};
v6.2
  1/*
  2 *
  3 * Copyright IBM Corporation, 2012
  4 * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
  5 *
  6 * Cgroup v2
  7 * Copyright (C) 2019 Red Hat, Inc.
  8 * Author: Giuseppe Scrivano <gscrivan@redhat.com>
  9 *
 10 * This program is free software; you can redistribute it and/or modify it
 11 * under the terms of version 2.1 of the GNU Lesser General Public License
 12 * as published by the Free Software Foundation.
 13 *
 14 * This program is distributed in the hope that it would be useful, but
 15 * WITHOUT ANY WARRANTY; without even the implied warranty of
 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
 17 *
 18 */
 19
 20#include <linux/cgroup.h>
 21#include <linux/page_counter.h>
 22#include <linux/slab.h>
 23#include <linux/hugetlb.h>
 24#include <linux/hugetlb_cgroup.h>
 25
 
 
 
 
 
 
 
 
 26#define MEMFILE_PRIVATE(x, val)	(((x) << 16) | (val))
 27#define MEMFILE_IDX(val)	(((val) >> 16) & 0xffff)
 28#define MEMFILE_ATTR(val)	((val) & 0xffff)
 29
 30static struct hugetlb_cgroup *root_h_cgroup __read_mostly;
 31
 32static inline struct page_counter *
 33__hugetlb_cgroup_counter_from_cgroup(struct hugetlb_cgroup *h_cg, int idx,
 34				     bool rsvd)
 35{
 36	if (rsvd)
 37		return &h_cg->rsvd_hugepage[idx];
 38	return &h_cg->hugepage[idx];
 39}
 40
 41static inline struct page_counter *
 42hugetlb_cgroup_counter_from_cgroup(struct hugetlb_cgroup *h_cg, int idx)
 43{
 44	return __hugetlb_cgroup_counter_from_cgroup(h_cg, idx, false);
 45}
 46
 47static inline struct page_counter *
 48hugetlb_cgroup_counter_from_cgroup_rsvd(struct hugetlb_cgroup *h_cg, int idx)
 49{
 50	return __hugetlb_cgroup_counter_from_cgroup(h_cg, idx, true);
 51}
 52
 53static inline
 54struct hugetlb_cgroup *hugetlb_cgroup_from_css(struct cgroup_subsys_state *s)
 55{
 56	return s ? container_of(s, struct hugetlb_cgroup, css) : NULL;
 57}
 58
 59static inline
 60struct hugetlb_cgroup *hugetlb_cgroup_from_task(struct task_struct *task)
 61{
 62	return hugetlb_cgroup_from_css(task_css(task, hugetlb_cgrp_id));
 63}
 64
 65static inline bool hugetlb_cgroup_is_root(struct hugetlb_cgroup *h_cg)
 66{
 67	return (h_cg == root_h_cgroup);
 68}
 69
 70static inline struct hugetlb_cgroup *
 71parent_hugetlb_cgroup(struct hugetlb_cgroup *h_cg)
 72{
 73	return hugetlb_cgroup_from_css(h_cg->css.parent);
 74}
 75
 76static inline bool hugetlb_cgroup_have_usage(struct hugetlb_cgroup *h_cg)
 77{
 78	struct hstate *h;
 79
 80	for_each_hstate(h) {
 81		if (page_counter_read(
 82		    hugetlb_cgroup_counter_from_cgroup(h_cg, hstate_index(h))))
 83			return true;
 84	}
 85	return false;
 86}
 87
 88static void hugetlb_cgroup_init(struct hugetlb_cgroup *h_cgroup,
 89				struct hugetlb_cgroup *parent_h_cgroup)
 90{
 91	int idx;
 92
 93	for (idx = 0; idx < HUGE_MAX_HSTATE; idx++) {
 94		struct page_counter *fault_parent = NULL;
 95		struct page_counter *rsvd_parent = NULL;
 96		unsigned long limit;
 97		int ret;
 98
 99		if (parent_h_cgroup) {
100			fault_parent = hugetlb_cgroup_counter_from_cgroup(
101				parent_h_cgroup, idx);
102			rsvd_parent = hugetlb_cgroup_counter_from_cgroup_rsvd(
103				parent_h_cgroup, idx);
104		}
105		page_counter_init(hugetlb_cgroup_counter_from_cgroup(h_cgroup,
106								     idx),
107				  fault_parent);
108		page_counter_init(
109			hugetlb_cgroup_counter_from_cgroup_rsvd(h_cgroup, idx),
110			rsvd_parent);
111
112		limit = round_down(PAGE_COUNTER_MAX,
113				   pages_per_huge_page(&hstates[idx]));
114
115		ret = page_counter_set_max(
116			hugetlb_cgroup_counter_from_cgroup(h_cgroup, idx),
117			limit);
118		VM_BUG_ON(ret);
119		ret = page_counter_set_max(
120			hugetlb_cgroup_counter_from_cgroup_rsvd(h_cgroup, idx),
121			limit);
122		VM_BUG_ON(ret);
123	}
124}
125
126static void hugetlb_cgroup_free(struct hugetlb_cgroup *h_cgroup)
127{
128	int node;
129
130	for_each_node(node)
131		kfree(h_cgroup->nodeinfo[node]);
132	kfree(h_cgroup);
133}
134
135static struct cgroup_subsys_state *
136hugetlb_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
137{
138	struct hugetlb_cgroup *parent_h_cgroup = hugetlb_cgroup_from_css(parent_css);
139	struct hugetlb_cgroup *h_cgroup;
140	int node;
141
142	h_cgroup = kzalloc(struct_size(h_cgroup, nodeinfo, nr_node_ids),
143			   GFP_KERNEL);
144
 
145	if (!h_cgroup)
146		return ERR_PTR(-ENOMEM);
147
148	if (!parent_h_cgroup)
 
 
 
 
149		root_h_cgroup = h_cgroup;
150
151	/*
152	 * TODO: this routine can waste much memory for nodes which will
153	 * never be onlined. It's better to use memory hotplug callback
154	 * function.
155	 */
156	for_each_node(node) {
157		/* Set node_to_alloc to NUMA_NO_NODE for offline nodes. */
158		int node_to_alloc =
159			node_state(node, N_NORMAL_MEMORY) ? node : NUMA_NO_NODE;
160		h_cgroup->nodeinfo[node] =
161			kzalloc_node(sizeof(struct hugetlb_cgroup_per_node),
162				     GFP_KERNEL, node_to_alloc);
163		if (!h_cgroup->nodeinfo[node])
164			goto fail_alloc_nodeinfo;
165	}
166
167	hugetlb_cgroup_init(h_cgroup, parent_h_cgroup);
168	return &h_cgroup->css;
169
170fail_alloc_nodeinfo:
171	hugetlb_cgroup_free(h_cgroup);
172	return ERR_PTR(-ENOMEM);
173}
174
175static void hugetlb_cgroup_css_free(struct cgroup_subsys_state *css)
176{
177	hugetlb_cgroup_free(hugetlb_cgroup_from_css(css));
 
 
 
178}
179
 
180/*
181 * Should be called with hugetlb_lock held.
182 * Since we are holding hugetlb_lock, pages cannot get moved from
183 * active list or uncharged from the cgroup, So no need to get
184 * page reference and test for page active here. This function
185 * cannot fail.
186 */
187static void hugetlb_cgroup_move_parent(int idx, struct hugetlb_cgroup *h_cg,
188				       struct page *page)
189{
190	unsigned int nr_pages;
191	struct page_counter *counter;
 
192	struct hugetlb_cgroup *page_hcg;
193	struct hugetlb_cgroup *parent = parent_hugetlb_cgroup(h_cg);
194	struct folio *folio = page_folio(page);
195
196	page_hcg = hugetlb_cgroup_from_folio(folio);
197	/*
198	 * We can have pages in active list without any cgroup
199	 * ie, hugepage with less than 3 pages. We can safely
200	 * ignore those pages.
201	 */
202	if (!page_hcg || page_hcg != h_cg)
203		goto out;
204
205	nr_pages = compound_nr(page);
206	if (!parent) {
207		parent = root_h_cgroup;
208		/* root has no limit */
209		page_counter_charge(&parent->hugepage[idx], nr_pages);
 
210	}
211	counter = &h_cg->hugepage[idx];
212	/* Take the pages off the local counter */
213	page_counter_cancel(counter, nr_pages);
214
215	set_hugetlb_cgroup(folio, parent);
216out:
217	return;
218}
219
220/*
221 * Force the hugetlb cgroup to empty the hugetlb resources by moving them to
222 * the parent cgroup.
223 */
224static void hugetlb_cgroup_css_offline(struct cgroup_subsys_state *css)
225{
226	struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css);
227	struct hstate *h;
228	struct page *page;
 
229
230	do {
231		for_each_hstate(h) {
232			spin_lock_irq(&hugetlb_lock);
233			list_for_each_entry(page, &h->hugepage_activelist, lru)
234				hugetlb_cgroup_move_parent(hstate_index(h), h_cg, page);
235
236			spin_unlock_irq(&hugetlb_lock);
 
237		}
238		cond_resched();
239	} while (hugetlb_cgroup_have_usage(h_cg));
240}
241
242static inline void hugetlb_event(struct hugetlb_cgroup *hugetlb, int idx,
243				 enum hugetlb_memory_event event)
244{
245	atomic_long_inc(&hugetlb->events_local[idx][event]);
246	cgroup_file_notify(&hugetlb->events_local_file[idx]);
247
248	do {
249		atomic_long_inc(&hugetlb->events[idx][event]);
250		cgroup_file_notify(&hugetlb->events_file[idx]);
251	} while ((hugetlb = parent_hugetlb_cgroup(hugetlb)) &&
252		 !hugetlb_cgroup_is_root(hugetlb));
253}
254
255static int __hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
256					  struct hugetlb_cgroup **ptr,
257					  bool rsvd)
258{
259	int ret = 0;
260	struct page_counter *counter;
261	struct hugetlb_cgroup *h_cg = NULL;
 
262
263	if (hugetlb_cgroup_disabled())
264		goto done;
265	/*
266	 * We don't charge any cgroup if the compound page have less
267	 * than 3 pages.
268	 */
269	if (huge_page_order(&hstates[idx]) < HUGETLB_CGROUP_MIN_ORDER)
270		goto done;
271again:
272	rcu_read_lock();
273	h_cg = hugetlb_cgroup_from_task(current);
274	if (!css_tryget(&h_cg->css)) {
275		rcu_read_unlock();
276		goto again;
277	}
278	rcu_read_unlock();
279
280	if (!page_counter_try_charge(
281		    __hugetlb_cgroup_counter_from_cgroup(h_cg, idx, rsvd),
282		    nr_pages, &counter)) {
283		ret = -ENOMEM;
284		hugetlb_event(h_cg, idx, HUGETLB_MAX);
285		css_put(&h_cg->css);
286		goto done;
287	}
288	/* Reservations take a reference to the css because they do not get
289	 * reparented.
290	 */
291	if (!rsvd)
292		css_put(&h_cg->css);
293done:
294	*ptr = h_cg;
295	return ret;
296}
297
298int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
299				 struct hugetlb_cgroup **ptr)
300{
301	return __hugetlb_cgroup_charge_cgroup(idx, nr_pages, ptr, false);
302}
303
304int hugetlb_cgroup_charge_cgroup_rsvd(int idx, unsigned long nr_pages,
305				      struct hugetlb_cgroup **ptr)
306{
307	return __hugetlb_cgroup_charge_cgroup(idx, nr_pages, ptr, true);
308}
309
310/* Should be called with hugetlb_lock held */
311static void __hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
312					   struct hugetlb_cgroup *h_cg,
313					   struct folio *folio, bool rsvd)
314{
315	if (hugetlb_cgroup_disabled() || !h_cg)
316		return;
317
318	__set_hugetlb_cgroup(folio, h_cg, rsvd);
319	if (!rsvd) {
320		unsigned long usage =
321			h_cg->nodeinfo[folio_nid(folio)]->usage[idx];
322		/*
323		 * This write is not atomic due to fetching usage and writing
324		 * to it, but that's fine because we call this with
325		 * hugetlb_lock held anyway.
326		 */
327		WRITE_ONCE(h_cg->nodeinfo[folio_nid(folio)]->usage[idx],
328			   usage + nr_pages);
329	}
330}
331
332void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
333				  struct hugetlb_cgroup *h_cg,
334				  struct page *page)
335{
336	struct folio *folio = page_folio(page);
 
337
338	__hugetlb_cgroup_commit_charge(idx, nr_pages, h_cg, folio, false);
339}
340
341void hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
342				       struct hugetlb_cgroup *h_cg,
343				       struct page *page)
344{
345	struct folio *folio = page_folio(page);
346
347	__hugetlb_cgroup_commit_charge(idx, nr_pages, h_cg, folio, true);
348}
349
350/*
351 * Should be called with hugetlb_lock held
352 */
353static void __hugetlb_cgroup_uncharge_folio(int idx, unsigned long nr_pages,
354					   struct folio *folio, bool rsvd)
355{
356	struct hugetlb_cgroup *h_cg;
 
357
358	if (hugetlb_cgroup_disabled())
359		return;
360	lockdep_assert_held(&hugetlb_lock);
361	h_cg = __hugetlb_cgroup_from_folio(folio, rsvd);
362	if (unlikely(!h_cg))
363		return;
364	__set_hugetlb_cgroup(folio, NULL, rsvd);
365
366	page_counter_uncharge(__hugetlb_cgroup_counter_from_cgroup(h_cg, idx,
367								   rsvd),
368			      nr_pages);
369
370	if (rsvd)
371		css_put(&h_cg->css);
372	else {
373		unsigned long usage =
374			h_cg->nodeinfo[folio_nid(folio)]->usage[idx];
375		/*
376		 * This write is not atomic due to fetching usage and writing
377		 * to it, but that's fine because we call this with
378		 * hugetlb_lock held anyway.
379		 */
380		WRITE_ONCE(h_cg->nodeinfo[folio_nid(folio)]->usage[idx],
381			   usage - nr_pages);
382	}
383}
384
385void hugetlb_cgroup_uncharge_folio(int idx, unsigned long nr_pages,
386				  struct folio *folio)
387{
388	__hugetlb_cgroup_uncharge_folio(idx, nr_pages, folio, false);
389}
390
391void hugetlb_cgroup_uncharge_folio_rsvd(int idx, unsigned long nr_pages,
392				       struct folio *folio)
393{
394	__hugetlb_cgroup_uncharge_folio(idx, nr_pages, folio, true);
395}
396
397static void __hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
398					     struct hugetlb_cgroup *h_cg,
399					     bool rsvd)
400{
401	if (hugetlb_cgroup_disabled() || !h_cg)
402		return;
403
404	if (huge_page_order(&hstates[idx]) < HUGETLB_CGROUP_MIN_ORDER)
405		return;
406
407	page_counter_uncharge(__hugetlb_cgroup_counter_from_cgroup(h_cg, idx,
408								   rsvd),
409			      nr_pages);
410
411	if (rsvd)
412		css_put(&h_cg->css);
413}
414
415void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
416				    struct hugetlb_cgroup *h_cg)
417{
418	__hugetlb_cgroup_uncharge_cgroup(idx, nr_pages, h_cg, false);
419}
420
421void hugetlb_cgroup_uncharge_cgroup_rsvd(int idx, unsigned long nr_pages,
422					 struct hugetlb_cgroup *h_cg)
423{
424	__hugetlb_cgroup_uncharge_cgroup(idx, nr_pages, h_cg, true);
425}
426
427void hugetlb_cgroup_uncharge_counter(struct resv_map *resv, unsigned long start,
428				     unsigned long end)
429{
430	if (hugetlb_cgroup_disabled() || !resv || !resv->reservation_counter ||
431	    !resv->css)
432		return;
433
434	page_counter_uncharge(resv->reservation_counter,
435			      (end - start) * resv->pages_per_hpage);
436	css_put(resv->css);
437}
438
439void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv,
440					 struct file_region *rg,
441					 unsigned long nr_pages,
442					 bool region_del)
443{
444	if (hugetlb_cgroup_disabled() || !resv || !rg || !nr_pages)
445		return;
446
447	if (rg->reservation_counter && resv->pages_per_hpage &&
448	    !resv->reservation_counter) {
449		page_counter_uncharge(rg->reservation_counter,
450				      nr_pages * resv->pages_per_hpage);
451		/*
452		 * Only do css_put(rg->css) when we delete the entire region
453		 * because one file_region must hold exactly one css reference.
454		 */
455		if (region_del)
456			css_put(rg->css);
457	}
458}
459
460enum {
461	RES_USAGE,
462	RES_RSVD_USAGE,
463	RES_LIMIT,
464	RES_RSVD_LIMIT,
465	RES_MAX_USAGE,
466	RES_RSVD_MAX_USAGE,
467	RES_FAILCNT,
468	RES_RSVD_FAILCNT,
469};
470
471static int hugetlb_cgroup_read_numa_stat(struct seq_file *seq, void *dummy)
472{
473	int nid;
474	struct cftype *cft = seq_cft(seq);
475	int idx = MEMFILE_IDX(cft->private);
476	bool legacy = MEMFILE_ATTR(cft->private);
477	struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(seq_css(seq));
478	struct cgroup_subsys_state *css;
479	unsigned long usage;
480
481	if (legacy) {
482		/* Add up usage across all nodes for the non-hierarchical total. */
483		usage = 0;
484		for_each_node_state(nid, N_MEMORY)
485			usage += READ_ONCE(h_cg->nodeinfo[nid]->usage[idx]);
486		seq_printf(seq, "total=%lu", usage * PAGE_SIZE);
487
488		/* Simply print the per-node usage for the non-hierarchical total. */
489		for_each_node_state(nid, N_MEMORY)
490			seq_printf(seq, " N%d=%lu", nid,
491				   READ_ONCE(h_cg->nodeinfo[nid]->usage[idx]) *
492					   PAGE_SIZE);
493		seq_putc(seq, '\n');
494	}
495
496	/*
497	 * The hierarchical total is pretty much the value recorded by the
498	 * counter, so use that.
499	 */
500	seq_printf(seq, "%stotal=%lu", legacy ? "hierarchical_" : "",
501		   page_counter_read(&h_cg->hugepage[idx]) * PAGE_SIZE);
502
503	/*
504	 * For each node, transverse the css tree to obtain the hierarchical
505	 * node usage.
506	 */
507	for_each_node_state(nid, N_MEMORY) {
508		usage = 0;
509		rcu_read_lock();
510		css_for_each_descendant_pre(css, &h_cg->css) {
511			usage += READ_ONCE(hugetlb_cgroup_from_css(css)
512						   ->nodeinfo[nid]
513						   ->usage[idx]);
514		}
515		rcu_read_unlock();
516		seq_printf(seq, " N%d=%lu", nid, usage * PAGE_SIZE);
517	}
518
519	seq_putc(seq, '\n');
520
521	return 0;
522}
523
524static u64 hugetlb_cgroup_read_u64(struct cgroup_subsys_state *css,
525				   struct cftype *cft)
526{
527	struct page_counter *counter;
528	struct page_counter *rsvd_counter;
529	struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css);
530
531	counter = &h_cg->hugepage[MEMFILE_IDX(cft->private)];
532	rsvd_counter = &h_cg->rsvd_hugepage[MEMFILE_IDX(cft->private)];
533
534	switch (MEMFILE_ATTR(cft->private)) {
535	case RES_USAGE:
536		return (u64)page_counter_read(counter) * PAGE_SIZE;
537	case RES_RSVD_USAGE:
538		return (u64)page_counter_read(rsvd_counter) * PAGE_SIZE;
539	case RES_LIMIT:
540		return (u64)counter->max * PAGE_SIZE;
541	case RES_RSVD_LIMIT:
542		return (u64)rsvd_counter->max * PAGE_SIZE;
543	case RES_MAX_USAGE:
544		return (u64)counter->watermark * PAGE_SIZE;
545	case RES_RSVD_MAX_USAGE:
546		return (u64)rsvd_counter->watermark * PAGE_SIZE;
547	case RES_FAILCNT:
548		return counter->failcnt;
549	case RES_RSVD_FAILCNT:
550		return rsvd_counter->failcnt;
551	default:
552		BUG();
553	}
554}
555
556static int hugetlb_cgroup_read_u64_max(struct seq_file *seq, void *v)
 
557{
558	int idx;
559	u64 val;
560	struct cftype *cft = seq_cft(seq);
561	unsigned long limit;
562	struct page_counter *counter;
563	struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(seq_css(seq));
564
565	idx = MEMFILE_IDX(cft->private);
566	counter = &h_cg->hugepage[idx];
567
568	limit = round_down(PAGE_COUNTER_MAX,
569			   pages_per_huge_page(&hstates[idx]));
570
571	switch (MEMFILE_ATTR(cft->private)) {
572	case RES_RSVD_USAGE:
573		counter = &h_cg->rsvd_hugepage[idx];
574		fallthrough;
575	case RES_USAGE:
576		val = (u64)page_counter_read(counter);
577		seq_printf(seq, "%llu\n", val * PAGE_SIZE);
578		break;
579	case RES_RSVD_LIMIT:
580		counter = &h_cg->rsvd_hugepage[idx];
581		fallthrough;
582	case RES_LIMIT:
583		val = (u64)counter->max;
584		if (val == limit)
585			seq_puts(seq, "max\n");
586		else
587			seq_printf(seq, "%llu\n", val * PAGE_SIZE);
588		break;
589	default:
590		BUG();
591	}
592
593	return 0;
594}
595
596static DEFINE_MUTEX(hugetlb_limit_mutex);
597
598static ssize_t hugetlb_cgroup_write(struct kernfs_open_file *of,
599				    char *buf, size_t nbytes, loff_t off,
600				    const char *max)
601{
602	int ret, idx;
603	unsigned long nr_pages;
604	struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(of_css(of));
605	bool rsvd = false;
606
607	if (hugetlb_cgroup_is_root(h_cg)) /* Can't set limit on root */
608		return -EINVAL;
609
610	buf = strstrip(buf);
611	ret = page_counter_memparse(buf, max, &nr_pages);
612	if (ret)
613		return ret;
614
615	idx = MEMFILE_IDX(of_cft(of)->private);
616	nr_pages = round_down(nr_pages, pages_per_huge_page(&hstates[idx]));
617
618	switch (MEMFILE_ATTR(of_cft(of)->private)) {
619	case RES_RSVD_LIMIT:
620		rsvd = true;
621		fallthrough;
622	case RES_LIMIT:
623		mutex_lock(&hugetlb_limit_mutex);
624		ret = page_counter_set_max(
625			__hugetlb_cgroup_counter_from_cgroup(h_cg, idx, rsvd),
626			nr_pages);
627		mutex_unlock(&hugetlb_limit_mutex);
628		break;
629	default:
630		ret = -EINVAL;
631		break;
632	}
633	return ret ?: nbytes;
634}
635
636static ssize_t hugetlb_cgroup_write_legacy(struct kernfs_open_file *of,
637					   char *buf, size_t nbytes, loff_t off)
638{
639	return hugetlb_cgroup_write(of, buf, nbytes, off, "-1");
640}
641
642static ssize_t hugetlb_cgroup_write_dfl(struct kernfs_open_file *of,
643					char *buf, size_t nbytes, loff_t off)
644{
645	return hugetlb_cgroup_write(of, buf, nbytes, off, "max");
646}
647
648static ssize_t hugetlb_cgroup_reset(struct kernfs_open_file *of,
649				    char *buf, size_t nbytes, loff_t off)
650{
651	int ret = 0;
652	struct page_counter *counter, *rsvd_counter;
653	struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(of_css(of));
654
655	counter = &h_cg->hugepage[MEMFILE_IDX(of_cft(of)->private)];
656	rsvd_counter = &h_cg->rsvd_hugepage[MEMFILE_IDX(of_cft(of)->private)];
657
658	switch (MEMFILE_ATTR(of_cft(of)->private)) {
659	case RES_MAX_USAGE:
660		page_counter_reset_watermark(counter);
661		break;
662	case RES_RSVD_MAX_USAGE:
663		page_counter_reset_watermark(rsvd_counter);
664		break;
665	case RES_FAILCNT:
666		counter->failcnt = 0;
667		break;
668	case RES_RSVD_FAILCNT:
669		rsvd_counter->failcnt = 0;
670		break;
671	default:
672		ret = -EINVAL;
673		break;
674	}
675	return ret ?: nbytes;
676}
677
678static char *mem_fmt(char *buf, int size, unsigned long hsize)
679{
680	if (hsize >= SZ_1G)
681		snprintf(buf, size, "%luGB", hsize / SZ_1G);
682	else if (hsize >= SZ_1M)
683		snprintf(buf, size, "%luMB", hsize / SZ_1M);
684	else
685		snprintf(buf, size, "%luKB", hsize / SZ_1K);
686	return buf;
687}
688
689static int __hugetlb_events_show(struct seq_file *seq, bool local)
690{
691	int idx;
692	long max;
693	struct cftype *cft = seq_cft(seq);
694	struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(seq_css(seq));
695
696	idx = MEMFILE_IDX(cft->private);
697
698	if (local)
699		max = atomic_long_read(&h_cg->events_local[idx][HUGETLB_MAX]);
700	else
701		max = atomic_long_read(&h_cg->events[idx][HUGETLB_MAX]);
702
703	seq_printf(seq, "max %lu\n", max);
704
705	return 0;
706}
707
708static int hugetlb_events_show(struct seq_file *seq, void *v)
709{
710	return __hugetlb_events_show(seq, false);
711}
712
713static int hugetlb_events_local_show(struct seq_file *seq, void *v)
714{
715	return __hugetlb_events_show(seq, true);
716}
717
718static void __init __hugetlb_cgroup_file_dfl_init(int idx)
719{
720	char buf[32];
721	struct cftype *cft;
722	struct hstate *h = &hstates[idx];
723
724	/* format the size */
725	mem_fmt(buf, sizeof(buf), huge_page_size(h));
726
727	/* Add the limit file */
728	cft = &h->cgroup_files_dfl[0];
729	snprintf(cft->name, MAX_CFTYPE_NAME, "%s.max", buf);
730	cft->private = MEMFILE_PRIVATE(idx, RES_LIMIT);
731	cft->seq_show = hugetlb_cgroup_read_u64_max;
732	cft->write = hugetlb_cgroup_write_dfl;
733	cft->flags = CFTYPE_NOT_ON_ROOT;
734
735	/* Add the reservation limit file */
736	cft = &h->cgroup_files_dfl[1];
737	snprintf(cft->name, MAX_CFTYPE_NAME, "%s.rsvd.max", buf);
738	cft->private = MEMFILE_PRIVATE(idx, RES_RSVD_LIMIT);
739	cft->seq_show = hugetlb_cgroup_read_u64_max;
740	cft->write = hugetlb_cgroup_write_dfl;
741	cft->flags = CFTYPE_NOT_ON_ROOT;
742
743	/* Add the current usage file */
744	cft = &h->cgroup_files_dfl[2];
745	snprintf(cft->name, MAX_CFTYPE_NAME, "%s.current", buf);
746	cft->private = MEMFILE_PRIVATE(idx, RES_USAGE);
747	cft->seq_show = hugetlb_cgroup_read_u64_max;
748	cft->flags = CFTYPE_NOT_ON_ROOT;
749
750	/* Add the current reservation usage file */
751	cft = &h->cgroup_files_dfl[3];
752	snprintf(cft->name, MAX_CFTYPE_NAME, "%s.rsvd.current", buf);
753	cft->private = MEMFILE_PRIVATE(idx, RES_RSVD_USAGE);
754	cft->seq_show = hugetlb_cgroup_read_u64_max;
755	cft->flags = CFTYPE_NOT_ON_ROOT;
756
757	/* Add the events file */
758	cft = &h->cgroup_files_dfl[4];
759	snprintf(cft->name, MAX_CFTYPE_NAME, "%s.events", buf);
760	cft->private = MEMFILE_PRIVATE(idx, 0);
761	cft->seq_show = hugetlb_events_show;
762	cft->file_offset = offsetof(struct hugetlb_cgroup, events_file[idx]);
763	cft->flags = CFTYPE_NOT_ON_ROOT;
764
765	/* Add the events.local file */
766	cft = &h->cgroup_files_dfl[5];
767	snprintf(cft->name, MAX_CFTYPE_NAME, "%s.events.local", buf);
768	cft->private = MEMFILE_PRIVATE(idx, 0);
769	cft->seq_show = hugetlb_events_local_show;
770	cft->file_offset = offsetof(struct hugetlb_cgroup,
771				    events_local_file[idx]);
772	cft->flags = CFTYPE_NOT_ON_ROOT;
773
774	/* Add the numa stat file */
775	cft = &h->cgroup_files_dfl[6];
776	snprintf(cft->name, MAX_CFTYPE_NAME, "%s.numa_stat", buf);
777	cft->private = MEMFILE_PRIVATE(idx, 0);
778	cft->seq_show = hugetlb_cgroup_read_numa_stat;
779	cft->flags = CFTYPE_NOT_ON_ROOT;
780
781	/* NULL terminate the last cft */
782	cft = &h->cgroup_files_dfl[7];
783	memset(cft, 0, sizeof(*cft));
784
785	WARN_ON(cgroup_add_dfl_cftypes(&hugetlb_cgrp_subsys,
786				       h->cgroup_files_dfl));
787}
788
789static void __init __hugetlb_cgroup_file_legacy_init(int idx)
790{
791	char buf[32];
792	struct cftype *cft;
793	struct hstate *h = &hstates[idx];
794
795	/* format the size */
796	mem_fmt(buf, sizeof(buf), huge_page_size(h));
797
798	/* Add the limit file */
799	cft = &h->cgroup_files_legacy[0];
800	snprintf(cft->name, MAX_CFTYPE_NAME, "%s.limit_in_bytes", buf);
801	cft->private = MEMFILE_PRIVATE(idx, RES_LIMIT);
802	cft->read_u64 = hugetlb_cgroup_read_u64;
803	cft->write = hugetlb_cgroup_write_legacy;
804
805	/* Add the reservation limit file */
806	cft = &h->cgroup_files_legacy[1];
807	snprintf(cft->name, MAX_CFTYPE_NAME, "%s.rsvd.limit_in_bytes", buf);
808	cft->private = MEMFILE_PRIVATE(idx, RES_RSVD_LIMIT);
809	cft->read_u64 = hugetlb_cgroup_read_u64;
810	cft->write = hugetlb_cgroup_write_legacy;
811
812	/* Add the usage file */
813	cft = &h->cgroup_files_legacy[2];
814	snprintf(cft->name, MAX_CFTYPE_NAME, "%s.usage_in_bytes", buf);
815	cft->private = MEMFILE_PRIVATE(idx, RES_USAGE);
816	cft->read_u64 = hugetlb_cgroup_read_u64;
817
818	/* Add the reservation usage file */
819	cft = &h->cgroup_files_legacy[3];
820	snprintf(cft->name, MAX_CFTYPE_NAME, "%s.rsvd.usage_in_bytes", buf);
821	cft->private = MEMFILE_PRIVATE(idx, RES_RSVD_USAGE);
822	cft->read_u64 = hugetlb_cgroup_read_u64;
823
824	/* Add the MAX usage file */
825	cft = &h->cgroup_files_legacy[4];
826	snprintf(cft->name, MAX_CFTYPE_NAME, "%s.max_usage_in_bytes", buf);
827	cft->private = MEMFILE_PRIVATE(idx, RES_MAX_USAGE);
828	cft->write = hugetlb_cgroup_reset;
829	cft->read_u64 = hugetlb_cgroup_read_u64;
830
831	/* Add the MAX reservation usage file */
832	cft = &h->cgroup_files_legacy[5];
833	snprintf(cft->name, MAX_CFTYPE_NAME, "%s.rsvd.max_usage_in_bytes", buf);
834	cft->private = MEMFILE_PRIVATE(idx, RES_RSVD_MAX_USAGE);
835	cft->write = hugetlb_cgroup_reset;
836	cft->read_u64 = hugetlb_cgroup_read_u64;
837
838	/* Add the failcntfile */
839	cft = &h->cgroup_files_legacy[6];
840	snprintf(cft->name, MAX_CFTYPE_NAME, "%s.failcnt", buf);
841	cft->private = MEMFILE_PRIVATE(idx, RES_FAILCNT);
842	cft->write = hugetlb_cgroup_reset;
843	cft->read_u64 = hugetlb_cgroup_read_u64;
844
845	/* Add the reservation failcntfile */
846	cft = &h->cgroup_files_legacy[7];
847	snprintf(cft->name, MAX_CFTYPE_NAME, "%s.rsvd.failcnt", buf);
848	cft->private = MEMFILE_PRIVATE(idx, RES_RSVD_FAILCNT);
849	cft->write = hugetlb_cgroup_reset;
850	cft->read_u64 = hugetlb_cgroup_read_u64;
851
852	/* Add the numa stat file */
853	cft = &h->cgroup_files_legacy[8];
854	snprintf(cft->name, MAX_CFTYPE_NAME, "%s.numa_stat", buf);
855	cft->private = MEMFILE_PRIVATE(idx, 1);
856	cft->seq_show = hugetlb_cgroup_read_numa_stat;
857
858	/* NULL terminate the last cft */
859	cft = &h->cgroup_files_legacy[9];
860	memset(cft, 0, sizeof(*cft));
861
862	WARN_ON(cgroup_add_legacy_cftypes(&hugetlb_cgrp_subsys,
863					  h->cgroup_files_legacy));
864}
865
866static void __init __hugetlb_cgroup_file_init(int idx)
867{
868	__hugetlb_cgroup_file_dfl_init(idx);
869	__hugetlb_cgroup_file_legacy_init(idx);
870}
871
872void __init hugetlb_cgroup_file_init(void)
873{
874	struct hstate *h;
875
876	for_each_hstate(h) {
877		/*
878		 * Add cgroup control files only if the huge page consists
879		 * of more than two normal pages. This is because we use
880		 * page[2].private for storing cgroup details.
881		 */
882		if (huge_page_order(h) >= HUGETLB_CGROUP_MIN_ORDER)
883			__hugetlb_cgroup_file_init(hstate_index(h));
884	}
885}
886
887/*
888 * hugetlb_lock will make sure a parallel cgroup rmdir won't happen
889 * when we migrate hugepages
890 */
891void hugetlb_cgroup_migrate(struct folio *old_folio, struct folio *new_folio)
892{
893	struct hugetlb_cgroup *h_cg;
894	struct hugetlb_cgroup *h_cg_rsvd;
895	struct hstate *h = folio_hstate(old_folio);
896
897	if (hugetlb_cgroup_disabled())
898		return;
899
900	spin_lock_irq(&hugetlb_lock);
901	h_cg = hugetlb_cgroup_from_folio(old_folio);
902	h_cg_rsvd = hugetlb_cgroup_from_folio_rsvd(old_folio);
903	set_hugetlb_cgroup(old_folio, NULL);
904	set_hugetlb_cgroup_rsvd(old_folio, NULL);
905
906	/* move the h_cg details to new cgroup */
907	set_hugetlb_cgroup(new_folio, h_cg);
908	set_hugetlb_cgroup_rsvd(new_folio, h_cg_rsvd);
909	list_move(&new_folio->lru, &h->hugepage_activelist);
910	spin_unlock_irq(&hugetlb_lock);
911	return;
912}
913
914static struct cftype hugetlb_files[] = {
915	{} /* terminate */
916};
917
918struct cgroup_subsys hugetlb_cgrp_subsys = {
919	.css_alloc	= hugetlb_cgroup_css_alloc,
920	.css_offline	= hugetlb_cgroup_css_offline,
921	.css_free	= hugetlb_cgroup_css_free,
922	.dfl_cftypes	= hugetlb_files,
923	.legacy_cftypes	= hugetlb_files,
924};