Loading...
1/*
2 *
3 * Copyright IBM Corporation, 2012
4 * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of version 2.1 of the GNU Lesser General Public License
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it would be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
13 *
14 */
15
16#include <linux/cgroup.h>
17#include <linux/page_counter.h>
18#include <linux/slab.h>
19#include <linux/hugetlb.h>
20#include <linux/hugetlb_cgroup.h>
21
22struct hugetlb_cgroup {
23 struct cgroup_subsys_state css;
24 /*
25 * the counter to account for hugepages from hugetlb.
26 */
27 struct page_counter hugepage[HUGE_MAX_HSTATE];
28};
29
30#define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val))
31#define MEMFILE_IDX(val) (((val) >> 16) & 0xffff)
32#define MEMFILE_ATTR(val) ((val) & 0xffff)
33
34static struct hugetlb_cgroup *root_h_cgroup __read_mostly;
35
36static inline
37struct hugetlb_cgroup *hugetlb_cgroup_from_css(struct cgroup_subsys_state *s)
38{
39 return s ? container_of(s, struct hugetlb_cgroup, css) : NULL;
40}
41
42static inline
43struct hugetlb_cgroup *hugetlb_cgroup_from_task(struct task_struct *task)
44{
45 return hugetlb_cgroup_from_css(task_css(task, hugetlb_cgrp_id));
46}
47
48static inline bool hugetlb_cgroup_is_root(struct hugetlb_cgroup *h_cg)
49{
50 return (h_cg == root_h_cgroup);
51}
52
53static inline struct hugetlb_cgroup *
54parent_hugetlb_cgroup(struct hugetlb_cgroup *h_cg)
55{
56 return hugetlb_cgroup_from_css(h_cg->css.parent);
57}
58
59static inline bool hugetlb_cgroup_have_usage(struct hugetlb_cgroup *h_cg)
60{
61 int idx;
62
63 for (idx = 0; idx < hugetlb_max_hstate; idx++) {
64 if (page_counter_read(&h_cg->hugepage[idx]))
65 return true;
66 }
67 return false;
68}
69
70static void hugetlb_cgroup_init(struct hugetlb_cgroup *h_cgroup,
71 struct hugetlb_cgroup *parent_h_cgroup)
72{
73 int idx;
74
75 for (idx = 0; idx < HUGE_MAX_HSTATE; idx++) {
76 struct page_counter *counter = &h_cgroup->hugepage[idx];
77 struct page_counter *parent = NULL;
78 unsigned long limit;
79 int ret;
80
81 if (parent_h_cgroup)
82 parent = &parent_h_cgroup->hugepage[idx];
83 page_counter_init(counter, parent);
84
85 limit = round_down(PAGE_COUNTER_MAX,
86 1 << huge_page_order(&hstates[idx]));
87 ret = page_counter_limit(counter, limit);
88 VM_BUG_ON(ret);
89 }
90}
91
92static struct cgroup_subsys_state *
93hugetlb_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
94{
95 struct hugetlb_cgroup *parent_h_cgroup = hugetlb_cgroup_from_css(parent_css);
96 struct hugetlb_cgroup *h_cgroup;
97
98 h_cgroup = kzalloc(sizeof(*h_cgroup), GFP_KERNEL);
99 if (!h_cgroup)
100 return ERR_PTR(-ENOMEM);
101
102 if (!parent_h_cgroup)
103 root_h_cgroup = h_cgroup;
104
105 hugetlb_cgroup_init(h_cgroup, parent_h_cgroup);
106 return &h_cgroup->css;
107}
108
109static void hugetlb_cgroup_css_free(struct cgroup_subsys_state *css)
110{
111 struct hugetlb_cgroup *h_cgroup;
112
113 h_cgroup = hugetlb_cgroup_from_css(css);
114 kfree(h_cgroup);
115}
116
117
118/*
119 * Should be called with hugetlb_lock held.
120 * Since we are holding hugetlb_lock, pages cannot get moved from
121 * active list or uncharged from the cgroup, So no need to get
122 * page reference and test for page active here. This function
123 * cannot fail.
124 */
125static void hugetlb_cgroup_move_parent(int idx, struct hugetlb_cgroup *h_cg,
126 struct page *page)
127{
128 unsigned int nr_pages;
129 struct page_counter *counter;
130 struct hugetlb_cgroup *page_hcg;
131 struct hugetlb_cgroup *parent = parent_hugetlb_cgroup(h_cg);
132
133 page_hcg = hugetlb_cgroup_from_page(page);
134 /*
135 * We can have pages in active list without any cgroup
136 * ie, hugepage with less than 3 pages. We can safely
137 * ignore those pages.
138 */
139 if (!page_hcg || page_hcg != h_cg)
140 goto out;
141
142 nr_pages = 1 << compound_order(page);
143 if (!parent) {
144 parent = root_h_cgroup;
145 /* root has no limit */
146 page_counter_charge(&parent->hugepage[idx], nr_pages);
147 }
148 counter = &h_cg->hugepage[idx];
149 /* Take the pages off the local counter */
150 page_counter_cancel(counter, nr_pages);
151
152 set_hugetlb_cgroup(page, parent);
153out:
154 return;
155}
156
157/*
158 * Force the hugetlb cgroup to empty the hugetlb resources by moving them to
159 * the parent cgroup.
160 */
161static void hugetlb_cgroup_css_offline(struct cgroup_subsys_state *css)
162{
163 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css);
164 struct hstate *h;
165 struct page *page;
166 int idx = 0;
167
168 do {
169 for_each_hstate(h) {
170 spin_lock(&hugetlb_lock);
171 list_for_each_entry(page, &h->hugepage_activelist, lru)
172 hugetlb_cgroup_move_parent(idx, h_cg, page);
173
174 spin_unlock(&hugetlb_lock);
175 idx++;
176 }
177 cond_resched();
178 } while (hugetlb_cgroup_have_usage(h_cg));
179}
180
181int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
182 struct hugetlb_cgroup **ptr)
183{
184 int ret = 0;
185 struct page_counter *counter;
186 struct hugetlb_cgroup *h_cg = NULL;
187
188 if (hugetlb_cgroup_disabled())
189 goto done;
190 /*
191 * We don't charge any cgroup if the compound page have less
192 * than 3 pages.
193 */
194 if (huge_page_order(&hstates[idx]) < HUGETLB_CGROUP_MIN_ORDER)
195 goto done;
196again:
197 rcu_read_lock();
198 h_cg = hugetlb_cgroup_from_task(current);
199 if (!css_tryget_online(&h_cg->css)) {
200 rcu_read_unlock();
201 goto again;
202 }
203 rcu_read_unlock();
204
205 if (!page_counter_try_charge(&h_cg->hugepage[idx], nr_pages, &counter))
206 ret = -ENOMEM;
207 css_put(&h_cg->css);
208done:
209 *ptr = h_cg;
210 return ret;
211}
212
213/* Should be called with hugetlb_lock held */
214void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
215 struct hugetlb_cgroup *h_cg,
216 struct page *page)
217{
218 if (hugetlb_cgroup_disabled() || !h_cg)
219 return;
220
221 set_hugetlb_cgroup(page, h_cg);
222 return;
223}
224
225/*
226 * Should be called with hugetlb_lock held
227 */
228void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
229 struct page *page)
230{
231 struct hugetlb_cgroup *h_cg;
232
233 if (hugetlb_cgroup_disabled())
234 return;
235 lockdep_assert_held(&hugetlb_lock);
236 h_cg = hugetlb_cgroup_from_page(page);
237 if (unlikely(!h_cg))
238 return;
239 set_hugetlb_cgroup(page, NULL);
240 page_counter_uncharge(&h_cg->hugepage[idx], nr_pages);
241 return;
242}
243
244void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
245 struct hugetlb_cgroup *h_cg)
246{
247 if (hugetlb_cgroup_disabled() || !h_cg)
248 return;
249
250 if (huge_page_order(&hstates[idx]) < HUGETLB_CGROUP_MIN_ORDER)
251 return;
252
253 page_counter_uncharge(&h_cg->hugepage[idx], nr_pages);
254 return;
255}
256
257enum {
258 RES_USAGE,
259 RES_LIMIT,
260 RES_MAX_USAGE,
261 RES_FAILCNT,
262};
263
264static u64 hugetlb_cgroup_read_u64(struct cgroup_subsys_state *css,
265 struct cftype *cft)
266{
267 struct page_counter *counter;
268 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css);
269
270 counter = &h_cg->hugepage[MEMFILE_IDX(cft->private)];
271
272 switch (MEMFILE_ATTR(cft->private)) {
273 case RES_USAGE:
274 return (u64)page_counter_read(counter) * PAGE_SIZE;
275 case RES_LIMIT:
276 return (u64)counter->limit * PAGE_SIZE;
277 case RES_MAX_USAGE:
278 return (u64)counter->watermark * PAGE_SIZE;
279 case RES_FAILCNT:
280 return counter->failcnt;
281 default:
282 BUG();
283 }
284}
285
286static DEFINE_MUTEX(hugetlb_limit_mutex);
287
288static ssize_t hugetlb_cgroup_write(struct kernfs_open_file *of,
289 char *buf, size_t nbytes, loff_t off)
290{
291 int ret, idx;
292 unsigned long nr_pages;
293 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(of_css(of));
294
295 if (hugetlb_cgroup_is_root(h_cg)) /* Can't set limit on root */
296 return -EINVAL;
297
298 buf = strstrip(buf);
299 ret = page_counter_memparse(buf, "-1", &nr_pages);
300 if (ret)
301 return ret;
302
303 idx = MEMFILE_IDX(of_cft(of)->private);
304 nr_pages = round_down(nr_pages, 1 << huge_page_order(&hstates[idx]));
305
306 switch (MEMFILE_ATTR(of_cft(of)->private)) {
307 case RES_LIMIT:
308 mutex_lock(&hugetlb_limit_mutex);
309 ret = page_counter_limit(&h_cg->hugepage[idx], nr_pages);
310 mutex_unlock(&hugetlb_limit_mutex);
311 break;
312 default:
313 ret = -EINVAL;
314 break;
315 }
316 return ret ?: nbytes;
317}
318
319static ssize_t hugetlb_cgroup_reset(struct kernfs_open_file *of,
320 char *buf, size_t nbytes, loff_t off)
321{
322 int ret = 0;
323 struct page_counter *counter;
324 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(of_css(of));
325
326 counter = &h_cg->hugepage[MEMFILE_IDX(of_cft(of)->private)];
327
328 switch (MEMFILE_ATTR(of_cft(of)->private)) {
329 case RES_MAX_USAGE:
330 page_counter_reset_watermark(counter);
331 break;
332 case RES_FAILCNT:
333 counter->failcnt = 0;
334 break;
335 default:
336 ret = -EINVAL;
337 break;
338 }
339 return ret ?: nbytes;
340}
341
342static char *mem_fmt(char *buf, int size, unsigned long hsize)
343{
344 if (hsize >= (1UL << 30))
345 snprintf(buf, size, "%luGB", hsize >> 30);
346 else if (hsize >= (1UL << 20))
347 snprintf(buf, size, "%luMB", hsize >> 20);
348 else
349 snprintf(buf, size, "%luKB", hsize >> 10);
350 return buf;
351}
352
353static void __init __hugetlb_cgroup_file_init(int idx)
354{
355 char buf[32];
356 struct cftype *cft;
357 struct hstate *h = &hstates[idx];
358
359 /* format the size */
360 mem_fmt(buf, 32, huge_page_size(h));
361
362 /* Add the limit file */
363 cft = &h->cgroup_files[0];
364 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.limit_in_bytes", buf);
365 cft->private = MEMFILE_PRIVATE(idx, RES_LIMIT);
366 cft->read_u64 = hugetlb_cgroup_read_u64;
367 cft->write = hugetlb_cgroup_write;
368
369 /* Add the usage file */
370 cft = &h->cgroup_files[1];
371 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.usage_in_bytes", buf);
372 cft->private = MEMFILE_PRIVATE(idx, RES_USAGE);
373 cft->read_u64 = hugetlb_cgroup_read_u64;
374
375 /* Add the MAX usage file */
376 cft = &h->cgroup_files[2];
377 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.max_usage_in_bytes", buf);
378 cft->private = MEMFILE_PRIVATE(idx, RES_MAX_USAGE);
379 cft->write = hugetlb_cgroup_reset;
380 cft->read_u64 = hugetlb_cgroup_read_u64;
381
382 /* Add the failcntfile */
383 cft = &h->cgroup_files[3];
384 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.failcnt", buf);
385 cft->private = MEMFILE_PRIVATE(idx, RES_FAILCNT);
386 cft->write = hugetlb_cgroup_reset;
387 cft->read_u64 = hugetlb_cgroup_read_u64;
388
389 /* NULL terminate the last cft */
390 cft = &h->cgroup_files[4];
391 memset(cft, 0, sizeof(*cft));
392
393 WARN_ON(cgroup_add_legacy_cftypes(&hugetlb_cgrp_subsys,
394 h->cgroup_files));
395}
396
397void __init hugetlb_cgroup_file_init(void)
398{
399 struct hstate *h;
400
401 for_each_hstate(h) {
402 /*
403 * Add cgroup control files only if the huge page consists
404 * of more than two normal pages. This is because we use
405 * page[2].private for storing cgroup details.
406 */
407 if (huge_page_order(h) >= HUGETLB_CGROUP_MIN_ORDER)
408 __hugetlb_cgroup_file_init(hstate_index(h));
409 }
410}
411
412/*
413 * hugetlb_lock will make sure a parallel cgroup rmdir won't happen
414 * when we migrate hugepages
415 */
416void hugetlb_cgroup_migrate(struct page *oldhpage, struct page *newhpage)
417{
418 struct hugetlb_cgroup *h_cg;
419 struct hstate *h = page_hstate(oldhpage);
420
421 if (hugetlb_cgroup_disabled())
422 return;
423
424 VM_BUG_ON_PAGE(!PageHuge(oldhpage), oldhpage);
425 spin_lock(&hugetlb_lock);
426 h_cg = hugetlb_cgroup_from_page(oldhpage);
427 set_hugetlb_cgroup(oldhpage, NULL);
428
429 /* move the h_cg details to new cgroup */
430 set_hugetlb_cgroup(newhpage, h_cg);
431 list_move(&newhpage->lru, &h->hugepage_activelist);
432 spin_unlock(&hugetlb_lock);
433 return;
434}
435
436struct cgroup_subsys hugetlb_cgrp_subsys = {
437 .css_alloc = hugetlb_cgroup_css_alloc,
438 .css_offline = hugetlb_cgroup_css_offline,
439 .css_free = hugetlb_cgroup_css_free,
440};
1/*
2 *
3 * Copyright IBM Corporation, 2012
4 * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
5 *
6 * Cgroup v2
7 * Copyright (C) 2019 Red Hat, Inc.
8 * Author: Giuseppe Scrivano <gscrivan@redhat.com>
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of version 2.1 of the GNU Lesser General Public License
12 * as published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it would be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
17 *
18 */
19
20#include <linux/cgroup.h>
21#include <linux/page_counter.h>
22#include <linux/slab.h>
23#include <linux/hugetlb.h>
24#include <linux/hugetlb_cgroup.h>
25
26#define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val))
27#define MEMFILE_IDX(val) (((val) >> 16) & 0xffff)
28#define MEMFILE_ATTR(val) ((val) & 0xffff)
29
30static struct hugetlb_cgroup *root_h_cgroup __read_mostly;
31
32static inline struct page_counter *
33__hugetlb_cgroup_counter_from_cgroup(struct hugetlb_cgroup *h_cg, int idx,
34 bool rsvd)
35{
36 if (rsvd)
37 return &h_cg->rsvd_hugepage[idx];
38 return &h_cg->hugepage[idx];
39}
40
41static inline struct page_counter *
42hugetlb_cgroup_counter_from_cgroup(struct hugetlb_cgroup *h_cg, int idx)
43{
44 return __hugetlb_cgroup_counter_from_cgroup(h_cg, idx, false);
45}
46
47static inline struct page_counter *
48hugetlb_cgroup_counter_from_cgroup_rsvd(struct hugetlb_cgroup *h_cg, int idx)
49{
50 return __hugetlb_cgroup_counter_from_cgroup(h_cg, idx, true);
51}
52
53static inline
54struct hugetlb_cgroup *hugetlb_cgroup_from_css(struct cgroup_subsys_state *s)
55{
56 return s ? container_of(s, struct hugetlb_cgroup, css) : NULL;
57}
58
59static inline
60struct hugetlb_cgroup *hugetlb_cgroup_from_task(struct task_struct *task)
61{
62 return hugetlb_cgroup_from_css(task_css(task, hugetlb_cgrp_id));
63}
64
65static inline bool hugetlb_cgroup_is_root(struct hugetlb_cgroup *h_cg)
66{
67 return (h_cg == root_h_cgroup);
68}
69
70static inline struct hugetlb_cgroup *
71parent_hugetlb_cgroup(struct hugetlb_cgroup *h_cg)
72{
73 return hugetlb_cgroup_from_css(h_cg->css.parent);
74}
75
76static inline bool hugetlb_cgroup_have_usage(struct hugetlb_cgroup *h_cg)
77{
78 struct hstate *h;
79
80 for_each_hstate(h) {
81 if (page_counter_read(
82 hugetlb_cgroup_counter_from_cgroup(h_cg, hstate_index(h))))
83 return true;
84 }
85 return false;
86}
87
88static void hugetlb_cgroup_init(struct hugetlb_cgroup *h_cgroup,
89 struct hugetlb_cgroup *parent_h_cgroup)
90{
91 int idx;
92
93 for (idx = 0; idx < HUGE_MAX_HSTATE; idx++) {
94 struct page_counter *fault_parent = NULL;
95 struct page_counter *rsvd_parent = NULL;
96 unsigned long limit;
97 int ret;
98
99 if (parent_h_cgroup) {
100 fault_parent = hugetlb_cgroup_counter_from_cgroup(
101 parent_h_cgroup, idx);
102 rsvd_parent = hugetlb_cgroup_counter_from_cgroup_rsvd(
103 parent_h_cgroup, idx);
104 }
105 page_counter_init(hugetlb_cgroup_counter_from_cgroup(h_cgroup,
106 idx),
107 fault_parent);
108 page_counter_init(
109 hugetlb_cgroup_counter_from_cgroup_rsvd(h_cgroup, idx),
110 rsvd_parent);
111
112 limit = round_down(PAGE_COUNTER_MAX,
113 pages_per_huge_page(&hstates[idx]));
114
115 ret = page_counter_set_max(
116 hugetlb_cgroup_counter_from_cgroup(h_cgroup, idx),
117 limit);
118 VM_BUG_ON(ret);
119 ret = page_counter_set_max(
120 hugetlb_cgroup_counter_from_cgroup_rsvd(h_cgroup, idx),
121 limit);
122 VM_BUG_ON(ret);
123 }
124}
125
126static void hugetlb_cgroup_free(struct hugetlb_cgroup *h_cgroup)
127{
128 int node;
129
130 for_each_node(node)
131 kfree(h_cgroup->nodeinfo[node]);
132 kfree(h_cgroup);
133}
134
135static struct cgroup_subsys_state *
136hugetlb_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
137{
138 struct hugetlb_cgroup *parent_h_cgroup = hugetlb_cgroup_from_css(parent_css);
139 struct hugetlb_cgroup *h_cgroup;
140 int node;
141
142 h_cgroup = kzalloc(struct_size(h_cgroup, nodeinfo, nr_node_ids),
143 GFP_KERNEL);
144
145 if (!h_cgroup)
146 return ERR_PTR(-ENOMEM);
147
148 if (!parent_h_cgroup)
149 root_h_cgroup = h_cgroup;
150
151 /*
152 * TODO: this routine can waste much memory for nodes which will
153 * never be onlined. It's better to use memory hotplug callback
154 * function.
155 */
156 for_each_node(node) {
157 /* Set node_to_alloc to NUMA_NO_NODE for offline nodes. */
158 int node_to_alloc =
159 node_state(node, N_NORMAL_MEMORY) ? node : NUMA_NO_NODE;
160 h_cgroup->nodeinfo[node] =
161 kzalloc_node(sizeof(struct hugetlb_cgroup_per_node),
162 GFP_KERNEL, node_to_alloc);
163 if (!h_cgroup->nodeinfo[node])
164 goto fail_alloc_nodeinfo;
165 }
166
167 hugetlb_cgroup_init(h_cgroup, parent_h_cgroup);
168 return &h_cgroup->css;
169
170fail_alloc_nodeinfo:
171 hugetlb_cgroup_free(h_cgroup);
172 return ERR_PTR(-ENOMEM);
173}
174
175static void hugetlb_cgroup_css_free(struct cgroup_subsys_state *css)
176{
177 hugetlb_cgroup_free(hugetlb_cgroup_from_css(css));
178}
179
180/*
181 * Should be called with hugetlb_lock held.
182 * Since we are holding hugetlb_lock, pages cannot get moved from
183 * active list or uncharged from the cgroup, So no need to get
184 * page reference and test for page active here. This function
185 * cannot fail.
186 */
187static void hugetlb_cgroup_move_parent(int idx, struct hugetlb_cgroup *h_cg,
188 struct page *page)
189{
190 unsigned int nr_pages;
191 struct page_counter *counter;
192 struct hugetlb_cgroup *page_hcg;
193 struct hugetlb_cgroup *parent = parent_hugetlb_cgroup(h_cg);
194 struct folio *folio = page_folio(page);
195
196 page_hcg = hugetlb_cgroup_from_folio(folio);
197 /*
198 * We can have pages in active list without any cgroup
199 * ie, hugepage with less than 3 pages. We can safely
200 * ignore those pages.
201 */
202 if (!page_hcg || page_hcg != h_cg)
203 goto out;
204
205 nr_pages = compound_nr(page);
206 if (!parent) {
207 parent = root_h_cgroup;
208 /* root has no limit */
209 page_counter_charge(&parent->hugepage[idx], nr_pages);
210 }
211 counter = &h_cg->hugepage[idx];
212 /* Take the pages off the local counter */
213 page_counter_cancel(counter, nr_pages);
214
215 set_hugetlb_cgroup(folio, parent);
216out:
217 return;
218}
219
220/*
221 * Force the hugetlb cgroup to empty the hugetlb resources by moving them to
222 * the parent cgroup.
223 */
224static void hugetlb_cgroup_css_offline(struct cgroup_subsys_state *css)
225{
226 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css);
227 struct hstate *h;
228 struct page *page;
229
230 do {
231 for_each_hstate(h) {
232 spin_lock_irq(&hugetlb_lock);
233 list_for_each_entry(page, &h->hugepage_activelist, lru)
234 hugetlb_cgroup_move_parent(hstate_index(h), h_cg, page);
235
236 spin_unlock_irq(&hugetlb_lock);
237 }
238 cond_resched();
239 } while (hugetlb_cgroup_have_usage(h_cg));
240}
241
242static inline void hugetlb_event(struct hugetlb_cgroup *hugetlb, int idx,
243 enum hugetlb_memory_event event)
244{
245 atomic_long_inc(&hugetlb->events_local[idx][event]);
246 cgroup_file_notify(&hugetlb->events_local_file[idx]);
247
248 do {
249 atomic_long_inc(&hugetlb->events[idx][event]);
250 cgroup_file_notify(&hugetlb->events_file[idx]);
251 } while ((hugetlb = parent_hugetlb_cgroup(hugetlb)) &&
252 !hugetlb_cgroup_is_root(hugetlb));
253}
254
255static int __hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
256 struct hugetlb_cgroup **ptr,
257 bool rsvd)
258{
259 int ret = 0;
260 struct page_counter *counter;
261 struct hugetlb_cgroup *h_cg = NULL;
262
263 if (hugetlb_cgroup_disabled())
264 goto done;
265 /*
266 * We don't charge any cgroup if the compound page have less
267 * than 3 pages.
268 */
269 if (huge_page_order(&hstates[idx]) < HUGETLB_CGROUP_MIN_ORDER)
270 goto done;
271again:
272 rcu_read_lock();
273 h_cg = hugetlb_cgroup_from_task(current);
274 if (!css_tryget(&h_cg->css)) {
275 rcu_read_unlock();
276 goto again;
277 }
278 rcu_read_unlock();
279
280 if (!page_counter_try_charge(
281 __hugetlb_cgroup_counter_from_cgroup(h_cg, idx, rsvd),
282 nr_pages, &counter)) {
283 ret = -ENOMEM;
284 hugetlb_event(h_cg, idx, HUGETLB_MAX);
285 css_put(&h_cg->css);
286 goto done;
287 }
288 /* Reservations take a reference to the css because they do not get
289 * reparented.
290 */
291 if (!rsvd)
292 css_put(&h_cg->css);
293done:
294 *ptr = h_cg;
295 return ret;
296}
297
298int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
299 struct hugetlb_cgroup **ptr)
300{
301 return __hugetlb_cgroup_charge_cgroup(idx, nr_pages, ptr, false);
302}
303
304int hugetlb_cgroup_charge_cgroup_rsvd(int idx, unsigned long nr_pages,
305 struct hugetlb_cgroup **ptr)
306{
307 return __hugetlb_cgroup_charge_cgroup(idx, nr_pages, ptr, true);
308}
309
310/* Should be called with hugetlb_lock held */
311static void __hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
312 struct hugetlb_cgroup *h_cg,
313 struct folio *folio, bool rsvd)
314{
315 if (hugetlb_cgroup_disabled() || !h_cg)
316 return;
317
318 __set_hugetlb_cgroup(folio, h_cg, rsvd);
319 if (!rsvd) {
320 unsigned long usage =
321 h_cg->nodeinfo[folio_nid(folio)]->usage[idx];
322 /*
323 * This write is not atomic due to fetching usage and writing
324 * to it, but that's fine because we call this with
325 * hugetlb_lock held anyway.
326 */
327 WRITE_ONCE(h_cg->nodeinfo[folio_nid(folio)]->usage[idx],
328 usage + nr_pages);
329 }
330}
331
332void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
333 struct hugetlb_cgroup *h_cg,
334 struct page *page)
335{
336 struct folio *folio = page_folio(page);
337
338 __hugetlb_cgroup_commit_charge(idx, nr_pages, h_cg, folio, false);
339}
340
341void hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
342 struct hugetlb_cgroup *h_cg,
343 struct page *page)
344{
345 struct folio *folio = page_folio(page);
346
347 __hugetlb_cgroup_commit_charge(idx, nr_pages, h_cg, folio, true);
348}
349
350/*
351 * Should be called with hugetlb_lock held
352 */
353static void __hugetlb_cgroup_uncharge_folio(int idx, unsigned long nr_pages,
354 struct folio *folio, bool rsvd)
355{
356 struct hugetlb_cgroup *h_cg;
357
358 if (hugetlb_cgroup_disabled())
359 return;
360 lockdep_assert_held(&hugetlb_lock);
361 h_cg = __hugetlb_cgroup_from_folio(folio, rsvd);
362 if (unlikely(!h_cg))
363 return;
364 __set_hugetlb_cgroup(folio, NULL, rsvd);
365
366 page_counter_uncharge(__hugetlb_cgroup_counter_from_cgroup(h_cg, idx,
367 rsvd),
368 nr_pages);
369
370 if (rsvd)
371 css_put(&h_cg->css);
372 else {
373 unsigned long usage =
374 h_cg->nodeinfo[folio_nid(folio)]->usage[idx];
375 /*
376 * This write is not atomic due to fetching usage and writing
377 * to it, but that's fine because we call this with
378 * hugetlb_lock held anyway.
379 */
380 WRITE_ONCE(h_cg->nodeinfo[folio_nid(folio)]->usage[idx],
381 usage - nr_pages);
382 }
383}
384
385void hugetlb_cgroup_uncharge_folio(int idx, unsigned long nr_pages,
386 struct folio *folio)
387{
388 __hugetlb_cgroup_uncharge_folio(idx, nr_pages, folio, false);
389}
390
391void hugetlb_cgroup_uncharge_folio_rsvd(int idx, unsigned long nr_pages,
392 struct folio *folio)
393{
394 __hugetlb_cgroup_uncharge_folio(idx, nr_pages, folio, true);
395}
396
397static void __hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
398 struct hugetlb_cgroup *h_cg,
399 bool rsvd)
400{
401 if (hugetlb_cgroup_disabled() || !h_cg)
402 return;
403
404 if (huge_page_order(&hstates[idx]) < HUGETLB_CGROUP_MIN_ORDER)
405 return;
406
407 page_counter_uncharge(__hugetlb_cgroup_counter_from_cgroup(h_cg, idx,
408 rsvd),
409 nr_pages);
410
411 if (rsvd)
412 css_put(&h_cg->css);
413}
414
415void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
416 struct hugetlb_cgroup *h_cg)
417{
418 __hugetlb_cgroup_uncharge_cgroup(idx, nr_pages, h_cg, false);
419}
420
421void hugetlb_cgroup_uncharge_cgroup_rsvd(int idx, unsigned long nr_pages,
422 struct hugetlb_cgroup *h_cg)
423{
424 __hugetlb_cgroup_uncharge_cgroup(idx, nr_pages, h_cg, true);
425}
426
427void hugetlb_cgroup_uncharge_counter(struct resv_map *resv, unsigned long start,
428 unsigned long end)
429{
430 if (hugetlb_cgroup_disabled() || !resv || !resv->reservation_counter ||
431 !resv->css)
432 return;
433
434 page_counter_uncharge(resv->reservation_counter,
435 (end - start) * resv->pages_per_hpage);
436 css_put(resv->css);
437}
438
439void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv,
440 struct file_region *rg,
441 unsigned long nr_pages,
442 bool region_del)
443{
444 if (hugetlb_cgroup_disabled() || !resv || !rg || !nr_pages)
445 return;
446
447 if (rg->reservation_counter && resv->pages_per_hpage &&
448 !resv->reservation_counter) {
449 page_counter_uncharge(rg->reservation_counter,
450 nr_pages * resv->pages_per_hpage);
451 /*
452 * Only do css_put(rg->css) when we delete the entire region
453 * because one file_region must hold exactly one css reference.
454 */
455 if (region_del)
456 css_put(rg->css);
457 }
458}
459
460enum {
461 RES_USAGE,
462 RES_RSVD_USAGE,
463 RES_LIMIT,
464 RES_RSVD_LIMIT,
465 RES_MAX_USAGE,
466 RES_RSVD_MAX_USAGE,
467 RES_FAILCNT,
468 RES_RSVD_FAILCNT,
469};
470
471static int hugetlb_cgroup_read_numa_stat(struct seq_file *seq, void *dummy)
472{
473 int nid;
474 struct cftype *cft = seq_cft(seq);
475 int idx = MEMFILE_IDX(cft->private);
476 bool legacy = MEMFILE_ATTR(cft->private);
477 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(seq_css(seq));
478 struct cgroup_subsys_state *css;
479 unsigned long usage;
480
481 if (legacy) {
482 /* Add up usage across all nodes for the non-hierarchical total. */
483 usage = 0;
484 for_each_node_state(nid, N_MEMORY)
485 usage += READ_ONCE(h_cg->nodeinfo[nid]->usage[idx]);
486 seq_printf(seq, "total=%lu", usage * PAGE_SIZE);
487
488 /* Simply print the per-node usage for the non-hierarchical total. */
489 for_each_node_state(nid, N_MEMORY)
490 seq_printf(seq, " N%d=%lu", nid,
491 READ_ONCE(h_cg->nodeinfo[nid]->usage[idx]) *
492 PAGE_SIZE);
493 seq_putc(seq, '\n');
494 }
495
496 /*
497 * The hierarchical total is pretty much the value recorded by the
498 * counter, so use that.
499 */
500 seq_printf(seq, "%stotal=%lu", legacy ? "hierarchical_" : "",
501 page_counter_read(&h_cg->hugepage[idx]) * PAGE_SIZE);
502
503 /*
504 * For each node, transverse the css tree to obtain the hierarchical
505 * node usage.
506 */
507 for_each_node_state(nid, N_MEMORY) {
508 usage = 0;
509 rcu_read_lock();
510 css_for_each_descendant_pre(css, &h_cg->css) {
511 usage += READ_ONCE(hugetlb_cgroup_from_css(css)
512 ->nodeinfo[nid]
513 ->usage[idx]);
514 }
515 rcu_read_unlock();
516 seq_printf(seq, " N%d=%lu", nid, usage * PAGE_SIZE);
517 }
518
519 seq_putc(seq, '\n');
520
521 return 0;
522}
523
524static u64 hugetlb_cgroup_read_u64(struct cgroup_subsys_state *css,
525 struct cftype *cft)
526{
527 struct page_counter *counter;
528 struct page_counter *rsvd_counter;
529 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css);
530
531 counter = &h_cg->hugepage[MEMFILE_IDX(cft->private)];
532 rsvd_counter = &h_cg->rsvd_hugepage[MEMFILE_IDX(cft->private)];
533
534 switch (MEMFILE_ATTR(cft->private)) {
535 case RES_USAGE:
536 return (u64)page_counter_read(counter) * PAGE_SIZE;
537 case RES_RSVD_USAGE:
538 return (u64)page_counter_read(rsvd_counter) * PAGE_SIZE;
539 case RES_LIMIT:
540 return (u64)counter->max * PAGE_SIZE;
541 case RES_RSVD_LIMIT:
542 return (u64)rsvd_counter->max * PAGE_SIZE;
543 case RES_MAX_USAGE:
544 return (u64)counter->watermark * PAGE_SIZE;
545 case RES_RSVD_MAX_USAGE:
546 return (u64)rsvd_counter->watermark * PAGE_SIZE;
547 case RES_FAILCNT:
548 return counter->failcnt;
549 case RES_RSVD_FAILCNT:
550 return rsvd_counter->failcnt;
551 default:
552 BUG();
553 }
554}
555
556static int hugetlb_cgroup_read_u64_max(struct seq_file *seq, void *v)
557{
558 int idx;
559 u64 val;
560 struct cftype *cft = seq_cft(seq);
561 unsigned long limit;
562 struct page_counter *counter;
563 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(seq_css(seq));
564
565 idx = MEMFILE_IDX(cft->private);
566 counter = &h_cg->hugepage[idx];
567
568 limit = round_down(PAGE_COUNTER_MAX,
569 pages_per_huge_page(&hstates[idx]));
570
571 switch (MEMFILE_ATTR(cft->private)) {
572 case RES_RSVD_USAGE:
573 counter = &h_cg->rsvd_hugepage[idx];
574 fallthrough;
575 case RES_USAGE:
576 val = (u64)page_counter_read(counter);
577 seq_printf(seq, "%llu\n", val * PAGE_SIZE);
578 break;
579 case RES_RSVD_LIMIT:
580 counter = &h_cg->rsvd_hugepage[idx];
581 fallthrough;
582 case RES_LIMIT:
583 val = (u64)counter->max;
584 if (val == limit)
585 seq_puts(seq, "max\n");
586 else
587 seq_printf(seq, "%llu\n", val * PAGE_SIZE);
588 break;
589 default:
590 BUG();
591 }
592
593 return 0;
594}
595
596static DEFINE_MUTEX(hugetlb_limit_mutex);
597
598static ssize_t hugetlb_cgroup_write(struct kernfs_open_file *of,
599 char *buf, size_t nbytes, loff_t off,
600 const char *max)
601{
602 int ret, idx;
603 unsigned long nr_pages;
604 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(of_css(of));
605 bool rsvd = false;
606
607 if (hugetlb_cgroup_is_root(h_cg)) /* Can't set limit on root */
608 return -EINVAL;
609
610 buf = strstrip(buf);
611 ret = page_counter_memparse(buf, max, &nr_pages);
612 if (ret)
613 return ret;
614
615 idx = MEMFILE_IDX(of_cft(of)->private);
616 nr_pages = round_down(nr_pages, pages_per_huge_page(&hstates[idx]));
617
618 switch (MEMFILE_ATTR(of_cft(of)->private)) {
619 case RES_RSVD_LIMIT:
620 rsvd = true;
621 fallthrough;
622 case RES_LIMIT:
623 mutex_lock(&hugetlb_limit_mutex);
624 ret = page_counter_set_max(
625 __hugetlb_cgroup_counter_from_cgroup(h_cg, idx, rsvd),
626 nr_pages);
627 mutex_unlock(&hugetlb_limit_mutex);
628 break;
629 default:
630 ret = -EINVAL;
631 break;
632 }
633 return ret ?: nbytes;
634}
635
636static ssize_t hugetlb_cgroup_write_legacy(struct kernfs_open_file *of,
637 char *buf, size_t nbytes, loff_t off)
638{
639 return hugetlb_cgroup_write(of, buf, nbytes, off, "-1");
640}
641
642static ssize_t hugetlb_cgroup_write_dfl(struct kernfs_open_file *of,
643 char *buf, size_t nbytes, loff_t off)
644{
645 return hugetlb_cgroup_write(of, buf, nbytes, off, "max");
646}
647
648static ssize_t hugetlb_cgroup_reset(struct kernfs_open_file *of,
649 char *buf, size_t nbytes, loff_t off)
650{
651 int ret = 0;
652 struct page_counter *counter, *rsvd_counter;
653 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(of_css(of));
654
655 counter = &h_cg->hugepage[MEMFILE_IDX(of_cft(of)->private)];
656 rsvd_counter = &h_cg->rsvd_hugepage[MEMFILE_IDX(of_cft(of)->private)];
657
658 switch (MEMFILE_ATTR(of_cft(of)->private)) {
659 case RES_MAX_USAGE:
660 page_counter_reset_watermark(counter);
661 break;
662 case RES_RSVD_MAX_USAGE:
663 page_counter_reset_watermark(rsvd_counter);
664 break;
665 case RES_FAILCNT:
666 counter->failcnt = 0;
667 break;
668 case RES_RSVD_FAILCNT:
669 rsvd_counter->failcnt = 0;
670 break;
671 default:
672 ret = -EINVAL;
673 break;
674 }
675 return ret ?: nbytes;
676}
677
678static char *mem_fmt(char *buf, int size, unsigned long hsize)
679{
680 if (hsize >= SZ_1G)
681 snprintf(buf, size, "%luGB", hsize / SZ_1G);
682 else if (hsize >= SZ_1M)
683 snprintf(buf, size, "%luMB", hsize / SZ_1M);
684 else
685 snprintf(buf, size, "%luKB", hsize / SZ_1K);
686 return buf;
687}
688
689static int __hugetlb_events_show(struct seq_file *seq, bool local)
690{
691 int idx;
692 long max;
693 struct cftype *cft = seq_cft(seq);
694 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(seq_css(seq));
695
696 idx = MEMFILE_IDX(cft->private);
697
698 if (local)
699 max = atomic_long_read(&h_cg->events_local[idx][HUGETLB_MAX]);
700 else
701 max = atomic_long_read(&h_cg->events[idx][HUGETLB_MAX]);
702
703 seq_printf(seq, "max %lu\n", max);
704
705 return 0;
706}
707
708static int hugetlb_events_show(struct seq_file *seq, void *v)
709{
710 return __hugetlb_events_show(seq, false);
711}
712
713static int hugetlb_events_local_show(struct seq_file *seq, void *v)
714{
715 return __hugetlb_events_show(seq, true);
716}
717
718static void __init __hugetlb_cgroup_file_dfl_init(int idx)
719{
720 char buf[32];
721 struct cftype *cft;
722 struct hstate *h = &hstates[idx];
723
724 /* format the size */
725 mem_fmt(buf, sizeof(buf), huge_page_size(h));
726
727 /* Add the limit file */
728 cft = &h->cgroup_files_dfl[0];
729 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.max", buf);
730 cft->private = MEMFILE_PRIVATE(idx, RES_LIMIT);
731 cft->seq_show = hugetlb_cgroup_read_u64_max;
732 cft->write = hugetlb_cgroup_write_dfl;
733 cft->flags = CFTYPE_NOT_ON_ROOT;
734
735 /* Add the reservation limit file */
736 cft = &h->cgroup_files_dfl[1];
737 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.rsvd.max", buf);
738 cft->private = MEMFILE_PRIVATE(idx, RES_RSVD_LIMIT);
739 cft->seq_show = hugetlb_cgroup_read_u64_max;
740 cft->write = hugetlb_cgroup_write_dfl;
741 cft->flags = CFTYPE_NOT_ON_ROOT;
742
743 /* Add the current usage file */
744 cft = &h->cgroup_files_dfl[2];
745 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.current", buf);
746 cft->private = MEMFILE_PRIVATE(idx, RES_USAGE);
747 cft->seq_show = hugetlb_cgroup_read_u64_max;
748 cft->flags = CFTYPE_NOT_ON_ROOT;
749
750 /* Add the current reservation usage file */
751 cft = &h->cgroup_files_dfl[3];
752 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.rsvd.current", buf);
753 cft->private = MEMFILE_PRIVATE(idx, RES_RSVD_USAGE);
754 cft->seq_show = hugetlb_cgroup_read_u64_max;
755 cft->flags = CFTYPE_NOT_ON_ROOT;
756
757 /* Add the events file */
758 cft = &h->cgroup_files_dfl[4];
759 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.events", buf);
760 cft->private = MEMFILE_PRIVATE(idx, 0);
761 cft->seq_show = hugetlb_events_show;
762 cft->file_offset = offsetof(struct hugetlb_cgroup, events_file[idx]);
763 cft->flags = CFTYPE_NOT_ON_ROOT;
764
765 /* Add the events.local file */
766 cft = &h->cgroup_files_dfl[5];
767 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.events.local", buf);
768 cft->private = MEMFILE_PRIVATE(idx, 0);
769 cft->seq_show = hugetlb_events_local_show;
770 cft->file_offset = offsetof(struct hugetlb_cgroup,
771 events_local_file[idx]);
772 cft->flags = CFTYPE_NOT_ON_ROOT;
773
774 /* Add the numa stat file */
775 cft = &h->cgroup_files_dfl[6];
776 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.numa_stat", buf);
777 cft->private = MEMFILE_PRIVATE(idx, 0);
778 cft->seq_show = hugetlb_cgroup_read_numa_stat;
779 cft->flags = CFTYPE_NOT_ON_ROOT;
780
781 /* NULL terminate the last cft */
782 cft = &h->cgroup_files_dfl[7];
783 memset(cft, 0, sizeof(*cft));
784
785 WARN_ON(cgroup_add_dfl_cftypes(&hugetlb_cgrp_subsys,
786 h->cgroup_files_dfl));
787}
788
789static void __init __hugetlb_cgroup_file_legacy_init(int idx)
790{
791 char buf[32];
792 struct cftype *cft;
793 struct hstate *h = &hstates[idx];
794
795 /* format the size */
796 mem_fmt(buf, sizeof(buf), huge_page_size(h));
797
798 /* Add the limit file */
799 cft = &h->cgroup_files_legacy[0];
800 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.limit_in_bytes", buf);
801 cft->private = MEMFILE_PRIVATE(idx, RES_LIMIT);
802 cft->read_u64 = hugetlb_cgroup_read_u64;
803 cft->write = hugetlb_cgroup_write_legacy;
804
805 /* Add the reservation limit file */
806 cft = &h->cgroup_files_legacy[1];
807 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.rsvd.limit_in_bytes", buf);
808 cft->private = MEMFILE_PRIVATE(idx, RES_RSVD_LIMIT);
809 cft->read_u64 = hugetlb_cgroup_read_u64;
810 cft->write = hugetlb_cgroup_write_legacy;
811
812 /* Add the usage file */
813 cft = &h->cgroup_files_legacy[2];
814 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.usage_in_bytes", buf);
815 cft->private = MEMFILE_PRIVATE(idx, RES_USAGE);
816 cft->read_u64 = hugetlb_cgroup_read_u64;
817
818 /* Add the reservation usage file */
819 cft = &h->cgroup_files_legacy[3];
820 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.rsvd.usage_in_bytes", buf);
821 cft->private = MEMFILE_PRIVATE(idx, RES_RSVD_USAGE);
822 cft->read_u64 = hugetlb_cgroup_read_u64;
823
824 /* Add the MAX usage file */
825 cft = &h->cgroup_files_legacy[4];
826 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.max_usage_in_bytes", buf);
827 cft->private = MEMFILE_PRIVATE(idx, RES_MAX_USAGE);
828 cft->write = hugetlb_cgroup_reset;
829 cft->read_u64 = hugetlb_cgroup_read_u64;
830
831 /* Add the MAX reservation usage file */
832 cft = &h->cgroup_files_legacy[5];
833 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.rsvd.max_usage_in_bytes", buf);
834 cft->private = MEMFILE_PRIVATE(idx, RES_RSVD_MAX_USAGE);
835 cft->write = hugetlb_cgroup_reset;
836 cft->read_u64 = hugetlb_cgroup_read_u64;
837
838 /* Add the failcntfile */
839 cft = &h->cgroup_files_legacy[6];
840 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.failcnt", buf);
841 cft->private = MEMFILE_PRIVATE(idx, RES_FAILCNT);
842 cft->write = hugetlb_cgroup_reset;
843 cft->read_u64 = hugetlb_cgroup_read_u64;
844
845 /* Add the reservation failcntfile */
846 cft = &h->cgroup_files_legacy[7];
847 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.rsvd.failcnt", buf);
848 cft->private = MEMFILE_PRIVATE(idx, RES_RSVD_FAILCNT);
849 cft->write = hugetlb_cgroup_reset;
850 cft->read_u64 = hugetlb_cgroup_read_u64;
851
852 /* Add the numa stat file */
853 cft = &h->cgroup_files_legacy[8];
854 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.numa_stat", buf);
855 cft->private = MEMFILE_PRIVATE(idx, 1);
856 cft->seq_show = hugetlb_cgroup_read_numa_stat;
857
858 /* NULL terminate the last cft */
859 cft = &h->cgroup_files_legacy[9];
860 memset(cft, 0, sizeof(*cft));
861
862 WARN_ON(cgroup_add_legacy_cftypes(&hugetlb_cgrp_subsys,
863 h->cgroup_files_legacy));
864}
865
866static void __init __hugetlb_cgroup_file_init(int idx)
867{
868 __hugetlb_cgroup_file_dfl_init(idx);
869 __hugetlb_cgroup_file_legacy_init(idx);
870}
871
872void __init hugetlb_cgroup_file_init(void)
873{
874 struct hstate *h;
875
876 for_each_hstate(h) {
877 /*
878 * Add cgroup control files only if the huge page consists
879 * of more than two normal pages. This is because we use
880 * page[2].private for storing cgroup details.
881 */
882 if (huge_page_order(h) >= HUGETLB_CGROUP_MIN_ORDER)
883 __hugetlb_cgroup_file_init(hstate_index(h));
884 }
885}
886
887/*
888 * hugetlb_lock will make sure a parallel cgroup rmdir won't happen
889 * when we migrate hugepages
890 */
891void hugetlb_cgroup_migrate(struct folio *old_folio, struct folio *new_folio)
892{
893 struct hugetlb_cgroup *h_cg;
894 struct hugetlb_cgroup *h_cg_rsvd;
895 struct hstate *h = folio_hstate(old_folio);
896
897 if (hugetlb_cgroup_disabled())
898 return;
899
900 spin_lock_irq(&hugetlb_lock);
901 h_cg = hugetlb_cgroup_from_folio(old_folio);
902 h_cg_rsvd = hugetlb_cgroup_from_folio_rsvd(old_folio);
903 set_hugetlb_cgroup(old_folio, NULL);
904 set_hugetlb_cgroup_rsvd(old_folio, NULL);
905
906 /* move the h_cg details to new cgroup */
907 set_hugetlb_cgroup(new_folio, h_cg);
908 set_hugetlb_cgroup_rsvd(new_folio, h_cg_rsvd);
909 list_move(&new_folio->lru, &h->hugepage_activelist);
910 spin_unlock_irq(&hugetlb_lock);
911 return;
912}
913
914static struct cftype hugetlb_files[] = {
915 {} /* terminate */
916};
917
918struct cgroup_subsys hugetlb_cgrp_subsys = {
919 .css_alloc = hugetlb_cgroup_css_alloc,
920 .css_offline = hugetlb_cgroup_css_offline,
921 .css_free = hugetlb_cgroup_css_free,
922 .dfl_cftypes = hugetlb_files,
923 .legacy_cftypes = hugetlb_files,
924};