Loading...
1/*
2 *
3 * Copyright IBM Corporation, 2012
4 * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of version 2.1 of the GNU Lesser General Public License
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it would be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
13 *
14 */
15
16#include <linux/cgroup.h>
17#include <linux/page_counter.h>
18#include <linux/slab.h>
19#include <linux/hugetlb.h>
20#include <linux/hugetlb_cgroup.h>
21
22struct hugetlb_cgroup {
23 struct cgroup_subsys_state css;
24 /*
25 * the counter to account for hugepages from hugetlb.
26 */
27 struct page_counter hugepage[HUGE_MAX_HSTATE];
28};
29
30#define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val))
31#define MEMFILE_IDX(val) (((val) >> 16) & 0xffff)
32#define MEMFILE_ATTR(val) ((val) & 0xffff)
33
34static struct hugetlb_cgroup *root_h_cgroup __read_mostly;
35
36static inline
37struct hugetlb_cgroup *hugetlb_cgroup_from_css(struct cgroup_subsys_state *s)
38{
39 return s ? container_of(s, struct hugetlb_cgroup, css) : NULL;
40}
41
42static inline
43struct hugetlb_cgroup *hugetlb_cgroup_from_task(struct task_struct *task)
44{
45 return hugetlb_cgroup_from_css(task_css(task, hugetlb_cgrp_id));
46}
47
48static inline bool hugetlb_cgroup_is_root(struct hugetlb_cgroup *h_cg)
49{
50 return (h_cg == root_h_cgroup);
51}
52
53static inline struct hugetlb_cgroup *
54parent_hugetlb_cgroup(struct hugetlb_cgroup *h_cg)
55{
56 return hugetlb_cgroup_from_css(h_cg->css.parent);
57}
58
59static inline bool hugetlb_cgroup_have_usage(struct hugetlb_cgroup *h_cg)
60{
61 int idx;
62
63 for (idx = 0; idx < hugetlb_max_hstate; idx++) {
64 if (page_counter_read(&h_cg->hugepage[idx]))
65 return true;
66 }
67 return false;
68}
69
70static struct cgroup_subsys_state *
71hugetlb_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
72{
73 struct hugetlb_cgroup *parent_h_cgroup = hugetlb_cgroup_from_css(parent_css);
74 struct hugetlb_cgroup *h_cgroup;
75 int idx;
76
77 h_cgroup = kzalloc(sizeof(*h_cgroup), GFP_KERNEL);
78 if (!h_cgroup)
79 return ERR_PTR(-ENOMEM);
80
81 if (parent_h_cgroup) {
82 for (idx = 0; idx < HUGE_MAX_HSTATE; idx++)
83 page_counter_init(&h_cgroup->hugepage[idx],
84 &parent_h_cgroup->hugepage[idx]);
85 } else {
86 root_h_cgroup = h_cgroup;
87 for (idx = 0; idx < HUGE_MAX_HSTATE; idx++)
88 page_counter_init(&h_cgroup->hugepage[idx], NULL);
89 }
90 return &h_cgroup->css;
91}
92
93static void hugetlb_cgroup_css_free(struct cgroup_subsys_state *css)
94{
95 struct hugetlb_cgroup *h_cgroup;
96
97 h_cgroup = hugetlb_cgroup_from_css(css);
98 kfree(h_cgroup);
99}
100
101
102/*
103 * Should be called with hugetlb_lock held.
104 * Since we are holding hugetlb_lock, pages cannot get moved from
105 * active list or uncharged from the cgroup, So no need to get
106 * page reference and test for page active here. This function
107 * cannot fail.
108 */
109static void hugetlb_cgroup_move_parent(int idx, struct hugetlb_cgroup *h_cg,
110 struct page *page)
111{
112 unsigned int nr_pages;
113 struct page_counter *counter;
114 struct hugetlb_cgroup *page_hcg;
115 struct hugetlb_cgroup *parent = parent_hugetlb_cgroup(h_cg);
116
117 page_hcg = hugetlb_cgroup_from_page(page);
118 /*
119 * We can have pages in active list without any cgroup
120 * ie, hugepage with less than 3 pages. We can safely
121 * ignore those pages.
122 */
123 if (!page_hcg || page_hcg != h_cg)
124 goto out;
125
126 nr_pages = 1 << compound_order(page);
127 if (!parent) {
128 parent = root_h_cgroup;
129 /* root has no limit */
130 page_counter_charge(&parent->hugepage[idx], nr_pages);
131 }
132 counter = &h_cg->hugepage[idx];
133 /* Take the pages off the local counter */
134 page_counter_cancel(counter, nr_pages);
135
136 set_hugetlb_cgroup(page, parent);
137out:
138 return;
139}
140
141/*
142 * Force the hugetlb cgroup to empty the hugetlb resources by moving them to
143 * the parent cgroup.
144 */
145static void hugetlb_cgroup_css_offline(struct cgroup_subsys_state *css)
146{
147 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css);
148 struct hstate *h;
149 struct page *page;
150 int idx = 0;
151
152 do {
153 for_each_hstate(h) {
154 spin_lock(&hugetlb_lock);
155 list_for_each_entry(page, &h->hugepage_activelist, lru)
156 hugetlb_cgroup_move_parent(idx, h_cg, page);
157
158 spin_unlock(&hugetlb_lock);
159 idx++;
160 }
161 cond_resched();
162 } while (hugetlb_cgroup_have_usage(h_cg));
163}
164
165int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
166 struct hugetlb_cgroup **ptr)
167{
168 int ret = 0;
169 struct page_counter *counter;
170 struct hugetlb_cgroup *h_cg = NULL;
171
172 if (hugetlb_cgroup_disabled())
173 goto done;
174 /*
175 * We don't charge any cgroup if the compound page have less
176 * than 3 pages.
177 */
178 if (huge_page_order(&hstates[idx]) < HUGETLB_CGROUP_MIN_ORDER)
179 goto done;
180again:
181 rcu_read_lock();
182 h_cg = hugetlb_cgroup_from_task(current);
183 if (!css_tryget_online(&h_cg->css)) {
184 rcu_read_unlock();
185 goto again;
186 }
187 rcu_read_unlock();
188
189 if (!page_counter_try_charge(&h_cg->hugepage[idx], nr_pages, &counter))
190 ret = -ENOMEM;
191 css_put(&h_cg->css);
192done:
193 *ptr = h_cg;
194 return ret;
195}
196
197/* Should be called with hugetlb_lock held */
198void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
199 struct hugetlb_cgroup *h_cg,
200 struct page *page)
201{
202 if (hugetlb_cgroup_disabled() || !h_cg)
203 return;
204
205 set_hugetlb_cgroup(page, h_cg);
206 return;
207}
208
209/*
210 * Should be called with hugetlb_lock held
211 */
212void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
213 struct page *page)
214{
215 struct hugetlb_cgroup *h_cg;
216
217 if (hugetlb_cgroup_disabled())
218 return;
219 lockdep_assert_held(&hugetlb_lock);
220 h_cg = hugetlb_cgroup_from_page(page);
221 if (unlikely(!h_cg))
222 return;
223 set_hugetlb_cgroup(page, NULL);
224 page_counter_uncharge(&h_cg->hugepage[idx], nr_pages);
225 return;
226}
227
228void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
229 struct hugetlb_cgroup *h_cg)
230{
231 if (hugetlb_cgroup_disabled() || !h_cg)
232 return;
233
234 if (huge_page_order(&hstates[idx]) < HUGETLB_CGROUP_MIN_ORDER)
235 return;
236
237 page_counter_uncharge(&h_cg->hugepage[idx], nr_pages);
238 return;
239}
240
241enum {
242 RES_USAGE,
243 RES_LIMIT,
244 RES_MAX_USAGE,
245 RES_FAILCNT,
246};
247
248static u64 hugetlb_cgroup_read_u64(struct cgroup_subsys_state *css,
249 struct cftype *cft)
250{
251 struct page_counter *counter;
252 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css);
253
254 counter = &h_cg->hugepage[MEMFILE_IDX(cft->private)];
255
256 switch (MEMFILE_ATTR(cft->private)) {
257 case RES_USAGE:
258 return (u64)page_counter_read(counter) * PAGE_SIZE;
259 case RES_LIMIT:
260 return (u64)counter->limit * PAGE_SIZE;
261 case RES_MAX_USAGE:
262 return (u64)counter->watermark * PAGE_SIZE;
263 case RES_FAILCNT:
264 return counter->failcnt;
265 default:
266 BUG();
267 }
268}
269
270static DEFINE_MUTEX(hugetlb_limit_mutex);
271
272static ssize_t hugetlb_cgroup_write(struct kernfs_open_file *of,
273 char *buf, size_t nbytes, loff_t off)
274{
275 int ret, idx;
276 unsigned long nr_pages;
277 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(of_css(of));
278
279 if (hugetlb_cgroup_is_root(h_cg)) /* Can't set limit on root */
280 return -EINVAL;
281
282 buf = strstrip(buf);
283 ret = page_counter_memparse(buf, "-1", &nr_pages);
284 if (ret)
285 return ret;
286
287 idx = MEMFILE_IDX(of_cft(of)->private);
288
289 switch (MEMFILE_ATTR(of_cft(of)->private)) {
290 case RES_LIMIT:
291 mutex_lock(&hugetlb_limit_mutex);
292 ret = page_counter_limit(&h_cg->hugepage[idx], nr_pages);
293 mutex_unlock(&hugetlb_limit_mutex);
294 break;
295 default:
296 ret = -EINVAL;
297 break;
298 }
299 return ret ?: nbytes;
300}
301
302static ssize_t hugetlb_cgroup_reset(struct kernfs_open_file *of,
303 char *buf, size_t nbytes, loff_t off)
304{
305 int ret = 0;
306 struct page_counter *counter;
307 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(of_css(of));
308
309 counter = &h_cg->hugepage[MEMFILE_IDX(of_cft(of)->private)];
310
311 switch (MEMFILE_ATTR(of_cft(of)->private)) {
312 case RES_MAX_USAGE:
313 page_counter_reset_watermark(counter);
314 break;
315 case RES_FAILCNT:
316 counter->failcnt = 0;
317 break;
318 default:
319 ret = -EINVAL;
320 break;
321 }
322 return ret ?: nbytes;
323}
324
325static char *mem_fmt(char *buf, int size, unsigned long hsize)
326{
327 if (hsize >= (1UL << 30))
328 snprintf(buf, size, "%luGB", hsize >> 30);
329 else if (hsize >= (1UL << 20))
330 snprintf(buf, size, "%luMB", hsize >> 20);
331 else
332 snprintf(buf, size, "%luKB", hsize >> 10);
333 return buf;
334}
335
336static void __init __hugetlb_cgroup_file_init(int idx)
337{
338 char buf[32];
339 struct cftype *cft;
340 struct hstate *h = &hstates[idx];
341
342 /* format the size */
343 mem_fmt(buf, 32, huge_page_size(h));
344
345 /* Add the limit file */
346 cft = &h->cgroup_files[0];
347 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.limit_in_bytes", buf);
348 cft->private = MEMFILE_PRIVATE(idx, RES_LIMIT);
349 cft->read_u64 = hugetlb_cgroup_read_u64;
350 cft->write = hugetlb_cgroup_write;
351
352 /* Add the usage file */
353 cft = &h->cgroup_files[1];
354 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.usage_in_bytes", buf);
355 cft->private = MEMFILE_PRIVATE(idx, RES_USAGE);
356 cft->read_u64 = hugetlb_cgroup_read_u64;
357
358 /* Add the MAX usage file */
359 cft = &h->cgroup_files[2];
360 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.max_usage_in_bytes", buf);
361 cft->private = MEMFILE_PRIVATE(idx, RES_MAX_USAGE);
362 cft->write = hugetlb_cgroup_reset;
363 cft->read_u64 = hugetlb_cgroup_read_u64;
364
365 /* Add the failcntfile */
366 cft = &h->cgroup_files[3];
367 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.failcnt", buf);
368 cft->private = MEMFILE_PRIVATE(idx, RES_FAILCNT);
369 cft->write = hugetlb_cgroup_reset;
370 cft->read_u64 = hugetlb_cgroup_read_u64;
371
372 /* NULL terminate the last cft */
373 cft = &h->cgroup_files[4];
374 memset(cft, 0, sizeof(*cft));
375
376 WARN_ON(cgroup_add_legacy_cftypes(&hugetlb_cgrp_subsys,
377 h->cgroup_files));
378}
379
380void __init hugetlb_cgroup_file_init(void)
381{
382 struct hstate *h;
383
384 for_each_hstate(h) {
385 /*
386 * Add cgroup control files only if the huge page consists
387 * of more than two normal pages. This is because we use
388 * page[2].private for storing cgroup details.
389 */
390 if (huge_page_order(h) >= HUGETLB_CGROUP_MIN_ORDER)
391 __hugetlb_cgroup_file_init(hstate_index(h));
392 }
393}
394
395/*
396 * hugetlb_lock will make sure a parallel cgroup rmdir won't happen
397 * when we migrate hugepages
398 */
399void hugetlb_cgroup_migrate(struct page *oldhpage, struct page *newhpage)
400{
401 struct hugetlb_cgroup *h_cg;
402 struct hstate *h = page_hstate(oldhpage);
403
404 if (hugetlb_cgroup_disabled())
405 return;
406
407 VM_BUG_ON_PAGE(!PageHuge(oldhpage), oldhpage);
408 spin_lock(&hugetlb_lock);
409 h_cg = hugetlb_cgroup_from_page(oldhpage);
410 set_hugetlb_cgroup(oldhpage, NULL);
411
412 /* move the h_cg details to new cgroup */
413 set_hugetlb_cgroup(newhpage, h_cg);
414 list_move(&newhpage->lru, &h->hugepage_activelist);
415 spin_unlock(&hugetlb_lock);
416 return;
417}
418
419struct cgroup_subsys hugetlb_cgrp_subsys = {
420 .css_alloc = hugetlb_cgroup_css_alloc,
421 .css_offline = hugetlb_cgroup_css_offline,
422 .css_free = hugetlb_cgroup_css_free,
423};
1/*
2 *
3 * Copyright IBM Corporation, 2012
4 * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
5 *
6 * Cgroup v2
7 * Copyright (C) 2019 Red Hat, Inc.
8 * Author: Giuseppe Scrivano <gscrivan@redhat.com>
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of version 2.1 of the GNU Lesser General Public License
12 * as published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it would be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
17 *
18 */
19
20#include <linux/cgroup.h>
21#include <linux/page_counter.h>
22#include <linux/slab.h>
23#include <linux/hugetlb.h>
24#include <linux/hugetlb_cgroup.h>
25
26#define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val))
27#define MEMFILE_IDX(val) (((val) >> 16) & 0xffff)
28#define MEMFILE_ATTR(val) ((val) & 0xffff)
29
30static struct hugetlb_cgroup *root_h_cgroup __read_mostly;
31
32static inline struct page_counter *
33__hugetlb_cgroup_counter_from_cgroup(struct hugetlb_cgroup *h_cg, int idx,
34 bool rsvd)
35{
36 if (rsvd)
37 return &h_cg->rsvd_hugepage[idx];
38 return &h_cg->hugepage[idx];
39}
40
41static inline struct page_counter *
42hugetlb_cgroup_counter_from_cgroup(struct hugetlb_cgroup *h_cg, int idx)
43{
44 return __hugetlb_cgroup_counter_from_cgroup(h_cg, idx, false);
45}
46
47static inline struct page_counter *
48hugetlb_cgroup_counter_from_cgroup_rsvd(struct hugetlb_cgroup *h_cg, int idx)
49{
50 return __hugetlb_cgroup_counter_from_cgroup(h_cg, idx, true);
51}
52
53static inline
54struct hugetlb_cgroup *hugetlb_cgroup_from_css(struct cgroup_subsys_state *s)
55{
56 return s ? container_of(s, struct hugetlb_cgroup, css) : NULL;
57}
58
59static inline
60struct hugetlb_cgroup *hugetlb_cgroup_from_task(struct task_struct *task)
61{
62 return hugetlb_cgroup_from_css(task_css(task, hugetlb_cgrp_id));
63}
64
65static inline bool hugetlb_cgroup_is_root(struct hugetlb_cgroup *h_cg)
66{
67 return (h_cg == root_h_cgroup);
68}
69
70static inline struct hugetlb_cgroup *
71parent_hugetlb_cgroup(struct hugetlb_cgroup *h_cg)
72{
73 return hugetlb_cgroup_from_css(h_cg->css.parent);
74}
75
76static inline bool hugetlb_cgroup_have_usage(struct hugetlb_cgroup *h_cg)
77{
78 struct hstate *h;
79
80 for_each_hstate(h) {
81 if (page_counter_read(
82 hugetlb_cgroup_counter_from_cgroup(h_cg, hstate_index(h))))
83 return true;
84 }
85 return false;
86}
87
88static void hugetlb_cgroup_init(struct hugetlb_cgroup *h_cgroup,
89 struct hugetlb_cgroup *parent_h_cgroup)
90{
91 int idx;
92
93 for (idx = 0; idx < HUGE_MAX_HSTATE; idx++) {
94 struct page_counter *fault_parent = NULL;
95 struct page_counter *rsvd_parent = NULL;
96 unsigned long limit;
97 int ret;
98
99 if (parent_h_cgroup) {
100 fault_parent = hugetlb_cgroup_counter_from_cgroup(
101 parent_h_cgroup, idx);
102 rsvd_parent = hugetlb_cgroup_counter_from_cgroup_rsvd(
103 parent_h_cgroup, idx);
104 }
105 page_counter_init(hugetlb_cgroup_counter_from_cgroup(h_cgroup,
106 idx),
107 fault_parent);
108 page_counter_init(
109 hugetlb_cgroup_counter_from_cgroup_rsvd(h_cgroup, idx),
110 rsvd_parent);
111
112 limit = round_down(PAGE_COUNTER_MAX,
113 pages_per_huge_page(&hstates[idx]));
114
115 ret = page_counter_set_max(
116 hugetlb_cgroup_counter_from_cgroup(h_cgroup, idx),
117 limit);
118 VM_BUG_ON(ret);
119 ret = page_counter_set_max(
120 hugetlb_cgroup_counter_from_cgroup_rsvd(h_cgroup, idx),
121 limit);
122 VM_BUG_ON(ret);
123 }
124}
125
126static void hugetlb_cgroup_free(struct hugetlb_cgroup *h_cgroup)
127{
128 int node;
129
130 for_each_node(node)
131 kfree(h_cgroup->nodeinfo[node]);
132 kfree(h_cgroup);
133}
134
135static struct cgroup_subsys_state *
136hugetlb_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
137{
138 struct hugetlb_cgroup *parent_h_cgroup = hugetlb_cgroup_from_css(parent_css);
139 struct hugetlb_cgroup *h_cgroup;
140 int node;
141
142 h_cgroup = kzalloc(struct_size(h_cgroup, nodeinfo, nr_node_ids),
143 GFP_KERNEL);
144
145 if (!h_cgroup)
146 return ERR_PTR(-ENOMEM);
147
148 if (!parent_h_cgroup)
149 root_h_cgroup = h_cgroup;
150
151 /*
152 * TODO: this routine can waste much memory for nodes which will
153 * never be onlined. It's better to use memory hotplug callback
154 * function.
155 */
156 for_each_node(node) {
157 /* Set node_to_alloc to NUMA_NO_NODE for offline nodes. */
158 int node_to_alloc =
159 node_state(node, N_NORMAL_MEMORY) ? node : NUMA_NO_NODE;
160 h_cgroup->nodeinfo[node] =
161 kzalloc_node(sizeof(struct hugetlb_cgroup_per_node),
162 GFP_KERNEL, node_to_alloc);
163 if (!h_cgroup->nodeinfo[node])
164 goto fail_alloc_nodeinfo;
165 }
166
167 hugetlb_cgroup_init(h_cgroup, parent_h_cgroup);
168 return &h_cgroup->css;
169
170fail_alloc_nodeinfo:
171 hugetlb_cgroup_free(h_cgroup);
172 return ERR_PTR(-ENOMEM);
173}
174
175static void hugetlb_cgroup_css_free(struct cgroup_subsys_state *css)
176{
177 hugetlb_cgroup_free(hugetlb_cgroup_from_css(css));
178}
179
180/*
181 * Should be called with hugetlb_lock held.
182 * Since we are holding hugetlb_lock, pages cannot get moved from
183 * active list or uncharged from the cgroup, So no need to get
184 * page reference and test for page active here. This function
185 * cannot fail.
186 */
187static void hugetlb_cgroup_move_parent(int idx, struct hugetlb_cgroup *h_cg,
188 struct page *page)
189{
190 unsigned int nr_pages;
191 struct page_counter *counter;
192 struct hugetlb_cgroup *page_hcg;
193 struct hugetlb_cgroup *parent = parent_hugetlb_cgroup(h_cg);
194 struct folio *folio = page_folio(page);
195
196 page_hcg = hugetlb_cgroup_from_folio(folio);
197 /*
198 * We can have pages in active list without any cgroup
199 * ie, hugepage with less than 3 pages. We can safely
200 * ignore those pages.
201 */
202 if (!page_hcg || page_hcg != h_cg)
203 goto out;
204
205 nr_pages = compound_nr(page);
206 if (!parent) {
207 parent = root_h_cgroup;
208 /* root has no limit */
209 page_counter_charge(&parent->hugepage[idx], nr_pages);
210 }
211 counter = &h_cg->hugepage[idx];
212 /* Take the pages off the local counter */
213 page_counter_cancel(counter, nr_pages);
214
215 set_hugetlb_cgroup(folio, parent);
216out:
217 return;
218}
219
220/*
221 * Force the hugetlb cgroup to empty the hugetlb resources by moving them to
222 * the parent cgroup.
223 */
224static void hugetlb_cgroup_css_offline(struct cgroup_subsys_state *css)
225{
226 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css);
227 struct hstate *h;
228 struct page *page;
229
230 do {
231 for_each_hstate(h) {
232 spin_lock_irq(&hugetlb_lock);
233 list_for_each_entry(page, &h->hugepage_activelist, lru)
234 hugetlb_cgroup_move_parent(hstate_index(h), h_cg, page);
235
236 spin_unlock_irq(&hugetlb_lock);
237 }
238 cond_resched();
239 } while (hugetlb_cgroup_have_usage(h_cg));
240}
241
242static inline void hugetlb_event(struct hugetlb_cgroup *hugetlb, int idx,
243 enum hugetlb_memory_event event)
244{
245 atomic_long_inc(&hugetlb->events_local[idx][event]);
246 cgroup_file_notify(&hugetlb->events_local_file[idx]);
247
248 do {
249 atomic_long_inc(&hugetlb->events[idx][event]);
250 cgroup_file_notify(&hugetlb->events_file[idx]);
251 } while ((hugetlb = parent_hugetlb_cgroup(hugetlb)) &&
252 !hugetlb_cgroup_is_root(hugetlb));
253}
254
255static int __hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
256 struct hugetlb_cgroup **ptr,
257 bool rsvd)
258{
259 int ret = 0;
260 struct page_counter *counter;
261 struct hugetlb_cgroup *h_cg = NULL;
262
263 if (hugetlb_cgroup_disabled())
264 goto done;
265again:
266 rcu_read_lock();
267 h_cg = hugetlb_cgroup_from_task(current);
268 if (!css_tryget(&h_cg->css)) {
269 rcu_read_unlock();
270 goto again;
271 }
272 rcu_read_unlock();
273
274 if (!page_counter_try_charge(
275 __hugetlb_cgroup_counter_from_cgroup(h_cg, idx, rsvd),
276 nr_pages, &counter)) {
277 ret = -ENOMEM;
278 hugetlb_event(h_cg, idx, HUGETLB_MAX);
279 css_put(&h_cg->css);
280 goto done;
281 }
282 /* Reservations take a reference to the css because they do not get
283 * reparented.
284 */
285 if (!rsvd)
286 css_put(&h_cg->css);
287done:
288 *ptr = h_cg;
289 return ret;
290}
291
292int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
293 struct hugetlb_cgroup **ptr)
294{
295 return __hugetlb_cgroup_charge_cgroup(idx, nr_pages, ptr, false);
296}
297
298int hugetlb_cgroup_charge_cgroup_rsvd(int idx, unsigned long nr_pages,
299 struct hugetlb_cgroup **ptr)
300{
301 return __hugetlb_cgroup_charge_cgroup(idx, nr_pages, ptr, true);
302}
303
304/* Should be called with hugetlb_lock held */
305static void __hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
306 struct hugetlb_cgroup *h_cg,
307 struct folio *folio, bool rsvd)
308{
309 if (hugetlb_cgroup_disabled() || !h_cg)
310 return;
311
312 __set_hugetlb_cgroup(folio, h_cg, rsvd);
313 if (!rsvd) {
314 unsigned long usage =
315 h_cg->nodeinfo[folio_nid(folio)]->usage[idx];
316 /*
317 * This write is not atomic due to fetching usage and writing
318 * to it, but that's fine because we call this with
319 * hugetlb_lock held anyway.
320 */
321 WRITE_ONCE(h_cg->nodeinfo[folio_nid(folio)]->usage[idx],
322 usage + nr_pages);
323 }
324}
325
326void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
327 struct hugetlb_cgroup *h_cg,
328 struct folio *folio)
329{
330 __hugetlb_cgroup_commit_charge(idx, nr_pages, h_cg, folio, false);
331}
332
333void hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
334 struct hugetlb_cgroup *h_cg,
335 struct folio *folio)
336{
337 __hugetlb_cgroup_commit_charge(idx, nr_pages, h_cg, folio, true);
338}
339
340/*
341 * Should be called with hugetlb_lock held
342 */
343static void __hugetlb_cgroup_uncharge_folio(int idx, unsigned long nr_pages,
344 struct folio *folio, bool rsvd)
345{
346 struct hugetlb_cgroup *h_cg;
347
348 if (hugetlb_cgroup_disabled())
349 return;
350 lockdep_assert_held(&hugetlb_lock);
351 h_cg = __hugetlb_cgroup_from_folio(folio, rsvd);
352 if (unlikely(!h_cg))
353 return;
354 __set_hugetlb_cgroup(folio, NULL, rsvd);
355
356 page_counter_uncharge(__hugetlb_cgroup_counter_from_cgroup(h_cg, idx,
357 rsvd),
358 nr_pages);
359
360 if (rsvd)
361 css_put(&h_cg->css);
362 else {
363 unsigned long usage =
364 h_cg->nodeinfo[folio_nid(folio)]->usage[idx];
365 /*
366 * This write is not atomic due to fetching usage and writing
367 * to it, but that's fine because we call this with
368 * hugetlb_lock held anyway.
369 */
370 WRITE_ONCE(h_cg->nodeinfo[folio_nid(folio)]->usage[idx],
371 usage - nr_pages);
372 }
373}
374
375void hugetlb_cgroup_uncharge_folio(int idx, unsigned long nr_pages,
376 struct folio *folio)
377{
378 __hugetlb_cgroup_uncharge_folio(idx, nr_pages, folio, false);
379}
380
381void hugetlb_cgroup_uncharge_folio_rsvd(int idx, unsigned long nr_pages,
382 struct folio *folio)
383{
384 __hugetlb_cgroup_uncharge_folio(idx, nr_pages, folio, true);
385}
386
387static void __hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
388 struct hugetlb_cgroup *h_cg,
389 bool rsvd)
390{
391 if (hugetlb_cgroup_disabled() || !h_cg)
392 return;
393
394 page_counter_uncharge(__hugetlb_cgroup_counter_from_cgroup(h_cg, idx,
395 rsvd),
396 nr_pages);
397
398 if (rsvd)
399 css_put(&h_cg->css);
400}
401
402void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
403 struct hugetlb_cgroup *h_cg)
404{
405 __hugetlb_cgroup_uncharge_cgroup(idx, nr_pages, h_cg, false);
406}
407
408void hugetlb_cgroup_uncharge_cgroup_rsvd(int idx, unsigned long nr_pages,
409 struct hugetlb_cgroup *h_cg)
410{
411 __hugetlb_cgroup_uncharge_cgroup(idx, nr_pages, h_cg, true);
412}
413
414void hugetlb_cgroup_uncharge_counter(struct resv_map *resv, unsigned long start,
415 unsigned long end)
416{
417 if (hugetlb_cgroup_disabled() || !resv || !resv->reservation_counter ||
418 !resv->css)
419 return;
420
421 page_counter_uncharge(resv->reservation_counter,
422 (end - start) * resv->pages_per_hpage);
423 css_put(resv->css);
424}
425
426void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv,
427 struct file_region *rg,
428 unsigned long nr_pages,
429 bool region_del)
430{
431 if (hugetlb_cgroup_disabled() || !resv || !rg || !nr_pages)
432 return;
433
434 if (rg->reservation_counter && resv->pages_per_hpage &&
435 !resv->reservation_counter) {
436 page_counter_uncharge(rg->reservation_counter,
437 nr_pages * resv->pages_per_hpage);
438 /*
439 * Only do css_put(rg->css) when we delete the entire region
440 * because one file_region must hold exactly one css reference.
441 */
442 if (region_del)
443 css_put(rg->css);
444 }
445}
446
447enum {
448 RES_USAGE,
449 RES_RSVD_USAGE,
450 RES_LIMIT,
451 RES_RSVD_LIMIT,
452 RES_MAX_USAGE,
453 RES_RSVD_MAX_USAGE,
454 RES_FAILCNT,
455 RES_RSVD_FAILCNT,
456};
457
458static int hugetlb_cgroup_read_numa_stat(struct seq_file *seq, void *dummy)
459{
460 int nid;
461 struct cftype *cft = seq_cft(seq);
462 int idx = MEMFILE_IDX(cft->private);
463 bool legacy = MEMFILE_ATTR(cft->private);
464 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(seq_css(seq));
465 struct cgroup_subsys_state *css;
466 unsigned long usage;
467
468 if (legacy) {
469 /* Add up usage across all nodes for the non-hierarchical total. */
470 usage = 0;
471 for_each_node_state(nid, N_MEMORY)
472 usage += READ_ONCE(h_cg->nodeinfo[nid]->usage[idx]);
473 seq_printf(seq, "total=%lu", usage * PAGE_SIZE);
474
475 /* Simply print the per-node usage for the non-hierarchical total. */
476 for_each_node_state(nid, N_MEMORY)
477 seq_printf(seq, " N%d=%lu", nid,
478 READ_ONCE(h_cg->nodeinfo[nid]->usage[idx]) *
479 PAGE_SIZE);
480 seq_putc(seq, '\n');
481 }
482
483 /*
484 * The hierarchical total is pretty much the value recorded by the
485 * counter, so use that.
486 */
487 seq_printf(seq, "%stotal=%lu", legacy ? "hierarchical_" : "",
488 page_counter_read(&h_cg->hugepage[idx]) * PAGE_SIZE);
489
490 /*
491 * For each node, transverse the css tree to obtain the hierarchical
492 * node usage.
493 */
494 for_each_node_state(nid, N_MEMORY) {
495 usage = 0;
496 rcu_read_lock();
497 css_for_each_descendant_pre(css, &h_cg->css) {
498 usage += READ_ONCE(hugetlb_cgroup_from_css(css)
499 ->nodeinfo[nid]
500 ->usage[idx]);
501 }
502 rcu_read_unlock();
503 seq_printf(seq, " N%d=%lu", nid, usage * PAGE_SIZE);
504 }
505
506 seq_putc(seq, '\n');
507
508 return 0;
509}
510
511static u64 hugetlb_cgroup_read_u64(struct cgroup_subsys_state *css,
512 struct cftype *cft)
513{
514 struct page_counter *counter;
515 struct page_counter *rsvd_counter;
516 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css);
517
518 counter = &h_cg->hugepage[MEMFILE_IDX(cft->private)];
519 rsvd_counter = &h_cg->rsvd_hugepage[MEMFILE_IDX(cft->private)];
520
521 switch (MEMFILE_ATTR(cft->private)) {
522 case RES_USAGE:
523 return (u64)page_counter_read(counter) * PAGE_SIZE;
524 case RES_RSVD_USAGE:
525 return (u64)page_counter_read(rsvd_counter) * PAGE_SIZE;
526 case RES_LIMIT:
527 return (u64)counter->max * PAGE_SIZE;
528 case RES_RSVD_LIMIT:
529 return (u64)rsvd_counter->max * PAGE_SIZE;
530 case RES_MAX_USAGE:
531 return (u64)counter->watermark * PAGE_SIZE;
532 case RES_RSVD_MAX_USAGE:
533 return (u64)rsvd_counter->watermark * PAGE_SIZE;
534 case RES_FAILCNT:
535 return counter->failcnt;
536 case RES_RSVD_FAILCNT:
537 return rsvd_counter->failcnt;
538 default:
539 BUG();
540 }
541}
542
543static int hugetlb_cgroup_read_u64_max(struct seq_file *seq, void *v)
544{
545 int idx;
546 u64 val;
547 struct cftype *cft = seq_cft(seq);
548 unsigned long limit;
549 struct page_counter *counter;
550 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(seq_css(seq));
551
552 idx = MEMFILE_IDX(cft->private);
553 counter = &h_cg->hugepage[idx];
554
555 limit = round_down(PAGE_COUNTER_MAX,
556 pages_per_huge_page(&hstates[idx]));
557
558 switch (MEMFILE_ATTR(cft->private)) {
559 case RES_RSVD_USAGE:
560 counter = &h_cg->rsvd_hugepage[idx];
561 fallthrough;
562 case RES_USAGE:
563 val = (u64)page_counter_read(counter);
564 seq_printf(seq, "%llu\n", val * PAGE_SIZE);
565 break;
566 case RES_RSVD_LIMIT:
567 counter = &h_cg->rsvd_hugepage[idx];
568 fallthrough;
569 case RES_LIMIT:
570 val = (u64)counter->max;
571 if (val == limit)
572 seq_puts(seq, "max\n");
573 else
574 seq_printf(seq, "%llu\n", val * PAGE_SIZE);
575 break;
576 default:
577 BUG();
578 }
579
580 return 0;
581}
582
583static DEFINE_MUTEX(hugetlb_limit_mutex);
584
585static ssize_t hugetlb_cgroup_write(struct kernfs_open_file *of,
586 char *buf, size_t nbytes, loff_t off,
587 const char *max)
588{
589 int ret, idx;
590 unsigned long nr_pages;
591 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(of_css(of));
592 bool rsvd = false;
593
594 if (hugetlb_cgroup_is_root(h_cg)) /* Can't set limit on root */
595 return -EINVAL;
596
597 buf = strstrip(buf);
598 ret = page_counter_memparse(buf, max, &nr_pages);
599 if (ret)
600 return ret;
601
602 idx = MEMFILE_IDX(of_cft(of)->private);
603 nr_pages = round_down(nr_pages, pages_per_huge_page(&hstates[idx]));
604
605 switch (MEMFILE_ATTR(of_cft(of)->private)) {
606 case RES_RSVD_LIMIT:
607 rsvd = true;
608 fallthrough;
609 case RES_LIMIT:
610 mutex_lock(&hugetlb_limit_mutex);
611 ret = page_counter_set_max(
612 __hugetlb_cgroup_counter_from_cgroup(h_cg, idx, rsvd),
613 nr_pages);
614 mutex_unlock(&hugetlb_limit_mutex);
615 break;
616 default:
617 ret = -EINVAL;
618 break;
619 }
620 return ret ?: nbytes;
621}
622
623static ssize_t hugetlb_cgroup_write_legacy(struct kernfs_open_file *of,
624 char *buf, size_t nbytes, loff_t off)
625{
626 return hugetlb_cgroup_write(of, buf, nbytes, off, "-1");
627}
628
629static ssize_t hugetlb_cgroup_write_dfl(struct kernfs_open_file *of,
630 char *buf, size_t nbytes, loff_t off)
631{
632 return hugetlb_cgroup_write(of, buf, nbytes, off, "max");
633}
634
635static ssize_t hugetlb_cgroup_reset(struct kernfs_open_file *of,
636 char *buf, size_t nbytes, loff_t off)
637{
638 int ret = 0;
639 struct page_counter *counter, *rsvd_counter;
640 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(of_css(of));
641
642 counter = &h_cg->hugepage[MEMFILE_IDX(of_cft(of)->private)];
643 rsvd_counter = &h_cg->rsvd_hugepage[MEMFILE_IDX(of_cft(of)->private)];
644
645 switch (MEMFILE_ATTR(of_cft(of)->private)) {
646 case RES_MAX_USAGE:
647 page_counter_reset_watermark(counter);
648 break;
649 case RES_RSVD_MAX_USAGE:
650 page_counter_reset_watermark(rsvd_counter);
651 break;
652 case RES_FAILCNT:
653 counter->failcnt = 0;
654 break;
655 case RES_RSVD_FAILCNT:
656 rsvd_counter->failcnt = 0;
657 break;
658 default:
659 ret = -EINVAL;
660 break;
661 }
662 return ret ?: nbytes;
663}
664
665static char *mem_fmt(char *buf, int size, unsigned long hsize)
666{
667 if (hsize >= SZ_1G)
668 snprintf(buf, size, "%luGB", hsize / SZ_1G);
669 else if (hsize >= SZ_1M)
670 snprintf(buf, size, "%luMB", hsize / SZ_1M);
671 else
672 snprintf(buf, size, "%luKB", hsize / SZ_1K);
673 return buf;
674}
675
676static int __hugetlb_events_show(struct seq_file *seq, bool local)
677{
678 int idx;
679 long max;
680 struct cftype *cft = seq_cft(seq);
681 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(seq_css(seq));
682
683 idx = MEMFILE_IDX(cft->private);
684
685 if (local)
686 max = atomic_long_read(&h_cg->events_local[idx][HUGETLB_MAX]);
687 else
688 max = atomic_long_read(&h_cg->events[idx][HUGETLB_MAX]);
689
690 seq_printf(seq, "max %lu\n", max);
691
692 return 0;
693}
694
695static int hugetlb_events_show(struct seq_file *seq, void *v)
696{
697 return __hugetlb_events_show(seq, false);
698}
699
700static int hugetlb_events_local_show(struct seq_file *seq, void *v)
701{
702 return __hugetlb_events_show(seq, true);
703}
704
705static void __init __hugetlb_cgroup_file_dfl_init(int idx)
706{
707 char buf[32];
708 struct cftype *cft;
709 struct hstate *h = &hstates[idx];
710
711 /* format the size */
712 mem_fmt(buf, sizeof(buf), huge_page_size(h));
713
714 /* Add the limit file */
715 cft = &h->cgroup_files_dfl[0];
716 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.max", buf);
717 cft->private = MEMFILE_PRIVATE(idx, RES_LIMIT);
718 cft->seq_show = hugetlb_cgroup_read_u64_max;
719 cft->write = hugetlb_cgroup_write_dfl;
720 cft->flags = CFTYPE_NOT_ON_ROOT;
721
722 /* Add the reservation limit file */
723 cft = &h->cgroup_files_dfl[1];
724 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.rsvd.max", buf);
725 cft->private = MEMFILE_PRIVATE(idx, RES_RSVD_LIMIT);
726 cft->seq_show = hugetlb_cgroup_read_u64_max;
727 cft->write = hugetlb_cgroup_write_dfl;
728 cft->flags = CFTYPE_NOT_ON_ROOT;
729
730 /* Add the current usage file */
731 cft = &h->cgroup_files_dfl[2];
732 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.current", buf);
733 cft->private = MEMFILE_PRIVATE(idx, RES_USAGE);
734 cft->seq_show = hugetlb_cgroup_read_u64_max;
735 cft->flags = CFTYPE_NOT_ON_ROOT;
736
737 /* Add the current reservation usage file */
738 cft = &h->cgroup_files_dfl[3];
739 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.rsvd.current", buf);
740 cft->private = MEMFILE_PRIVATE(idx, RES_RSVD_USAGE);
741 cft->seq_show = hugetlb_cgroup_read_u64_max;
742 cft->flags = CFTYPE_NOT_ON_ROOT;
743
744 /* Add the events file */
745 cft = &h->cgroup_files_dfl[4];
746 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.events", buf);
747 cft->private = MEMFILE_PRIVATE(idx, 0);
748 cft->seq_show = hugetlb_events_show;
749 cft->file_offset = offsetof(struct hugetlb_cgroup, events_file[idx]);
750 cft->flags = CFTYPE_NOT_ON_ROOT;
751
752 /* Add the events.local file */
753 cft = &h->cgroup_files_dfl[5];
754 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.events.local", buf);
755 cft->private = MEMFILE_PRIVATE(idx, 0);
756 cft->seq_show = hugetlb_events_local_show;
757 cft->file_offset = offsetof(struct hugetlb_cgroup,
758 events_local_file[idx]);
759 cft->flags = CFTYPE_NOT_ON_ROOT;
760
761 /* Add the numa stat file */
762 cft = &h->cgroup_files_dfl[6];
763 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.numa_stat", buf);
764 cft->private = MEMFILE_PRIVATE(idx, 0);
765 cft->seq_show = hugetlb_cgroup_read_numa_stat;
766 cft->flags = CFTYPE_NOT_ON_ROOT;
767
768 /* NULL terminate the last cft */
769 cft = &h->cgroup_files_dfl[7];
770 memset(cft, 0, sizeof(*cft));
771
772 WARN_ON(cgroup_add_dfl_cftypes(&hugetlb_cgrp_subsys,
773 h->cgroup_files_dfl));
774}
775
776static void __init __hugetlb_cgroup_file_legacy_init(int idx)
777{
778 char buf[32];
779 struct cftype *cft;
780 struct hstate *h = &hstates[idx];
781
782 /* format the size */
783 mem_fmt(buf, sizeof(buf), huge_page_size(h));
784
785 /* Add the limit file */
786 cft = &h->cgroup_files_legacy[0];
787 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.limit_in_bytes", buf);
788 cft->private = MEMFILE_PRIVATE(idx, RES_LIMIT);
789 cft->read_u64 = hugetlb_cgroup_read_u64;
790 cft->write = hugetlb_cgroup_write_legacy;
791
792 /* Add the reservation limit file */
793 cft = &h->cgroup_files_legacy[1];
794 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.rsvd.limit_in_bytes", buf);
795 cft->private = MEMFILE_PRIVATE(idx, RES_RSVD_LIMIT);
796 cft->read_u64 = hugetlb_cgroup_read_u64;
797 cft->write = hugetlb_cgroup_write_legacy;
798
799 /* Add the usage file */
800 cft = &h->cgroup_files_legacy[2];
801 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.usage_in_bytes", buf);
802 cft->private = MEMFILE_PRIVATE(idx, RES_USAGE);
803 cft->read_u64 = hugetlb_cgroup_read_u64;
804
805 /* Add the reservation usage file */
806 cft = &h->cgroup_files_legacy[3];
807 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.rsvd.usage_in_bytes", buf);
808 cft->private = MEMFILE_PRIVATE(idx, RES_RSVD_USAGE);
809 cft->read_u64 = hugetlb_cgroup_read_u64;
810
811 /* Add the MAX usage file */
812 cft = &h->cgroup_files_legacy[4];
813 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.max_usage_in_bytes", buf);
814 cft->private = MEMFILE_PRIVATE(idx, RES_MAX_USAGE);
815 cft->write = hugetlb_cgroup_reset;
816 cft->read_u64 = hugetlb_cgroup_read_u64;
817
818 /* Add the MAX reservation usage file */
819 cft = &h->cgroup_files_legacy[5];
820 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.rsvd.max_usage_in_bytes", buf);
821 cft->private = MEMFILE_PRIVATE(idx, RES_RSVD_MAX_USAGE);
822 cft->write = hugetlb_cgroup_reset;
823 cft->read_u64 = hugetlb_cgroup_read_u64;
824
825 /* Add the failcntfile */
826 cft = &h->cgroup_files_legacy[6];
827 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.failcnt", buf);
828 cft->private = MEMFILE_PRIVATE(idx, RES_FAILCNT);
829 cft->write = hugetlb_cgroup_reset;
830 cft->read_u64 = hugetlb_cgroup_read_u64;
831
832 /* Add the reservation failcntfile */
833 cft = &h->cgroup_files_legacy[7];
834 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.rsvd.failcnt", buf);
835 cft->private = MEMFILE_PRIVATE(idx, RES_RSVD_FAILCNT);
836 cft->write = hugetlb_cgroup_reset;
837 cft->read_u64 = hugetlb_cgroup_read_u64;
838
839 /* Add the numa stat file */
840 cft = &h->cgroup_files_legacy[8];
841 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.numa_stat", buf);
842 cft->private = MEMFILE_PRIVATE(idx, 1);
843 cft->seq_show = hugetlb_cgroup_read_numa_stat;
844
845 /* NULL terminate the last cft */
846 cft = &h->cgroup_files_legacy[9];
847 memset(cft, 0, sizeof(*cft));
848
849 WARN_ON(cgroup_add_legacy_cftypes(&hugetlb_cgrp_subsys,
850 h->cgroup_files_legacy));
851}
852
853static void __init __hugetlb_cgroup_file_init(int idx)
854{
855 __hugetlb_cgroup_file_dfl_init(idx);
856 __hugetlb_cgroup_file_legacy_init(idx);
857}
858
859void __init hugetlb_cgroup_file_init(void)
860{
861 struct hstate *h;
862
863 for_each_hstate(h)
864 __hugetlb_cgroup_file_init(hstate_index(h));
865}
866
867/*
868 * hugetlb_lock will make sure a parallel cgroup rmdir won't happen
869 * when we migrate hugepages
870 */
871void hugetlb_cgroup_migrate(struct folio *old_folio, struct folio *new_folio)
872{
873 struct hugetlb_cgroup *h_cg;
874 struct hugetlb_cgroup *h_cg_rsvd;
875 struct hstate *h = folio_hstate(old_folio);
876
877 if (hugetlb_cgroup_disabled())
878 return;
879
880 spin_lock_irq(&hugetlb_lock);
881 h_cg = hugetlb_cgroup_from_folio(old_folio);
882 h_cg_rsvd = hugetlb_cgroup_from_folio_rsvd(old_folio);
883 set_hugetlb_cgroup(old_folio, NULL);
884 set_hugetlb_cgroup_rsvd(old_folio, NULL);
885
886 /* move the h_cg details to new cgroup */
887 set_hugetlb_cgroup(new_folio, h_cg);
888 set_hugetlb_cgroup_rsvd(new_folio, h_cg_rsvd);
889 list_move(&new_folio->lru, &h->hugepage_activelist);
890 spin_unlock_irq(&hugetlb_lock);
891 return;
892}
893
894static struct cftype hugetlb_files[] = {
895 {} /* terminate */
896};
897
898struct cgroup_subsys hugetlb_cgrp_subsys = {
899 .css_alloc = hugetlb_cgroup_css_alloc,
900 .css_offline = hugetlb_cgroup_css_offline,
901 .css_free = hugetlb_cgroup_css_free,
902 .dfl_cftypes = hugetlb_files,
903 .legacy_cftypes = hugetlb_files,
904};