Loading...
Note: File does not exist in v3.5.6.
1/*
2 * Copyright IBM Corporation, 2012
3 * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2.1 of the GNU Lesser General Public License
7 * as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
12 *
13 */
14
15#ifndef _LINUX_HUGETLB_CGROUP_H
16#define _LINUX_HUGETLB_CGROUP_H
17
18#include <linux/mmdebug.h>
19
20struct hugetlb_cgroup;
21struct resv_map;
22struct file_region;
23
24/*
25 * Minimum page order trackable by hugetlb cgroup.
26 * At least 4 pages are necessary for all the tracking information.
27 * The second tail page (hpage[2]) is the fault usage cgroup.
28 * The third tail page (hpage[3]) is the reservation usage cgroup.
29 */
30#define HUGETLB_CGROUP_MIN_ORDER 2
31
32#ifdef CONFIG_CGROUP_HUGETLB
33enum hugetlb_memory_event {
34 HUGETLB_MAX,
35 HUGETLB_NR_MEMORY_EVENTS,
36};
37
38struct hugetlb_cgroup {
39 struct cgroup_subsys_state css;
40
41 /*
42 * the counter to account for hugepages from hugetlb.
43 */
44 struct page_counter hugepage[HUGE_MAX_HSTATE];
45
46 /*
47 * the counter to account for hugepage reservations from hugetlb.
48 */
49 struct page_counter rsvd_hugepage[HUGE_MAX_HSTATE];
50
51 atomic_long_t events[HUGE_MAX_HSTATE][HUGETLB_NR_MEMORY_EVENTS];
52 atomic_long_t events_local[HUGE_MAX_HSTATE][HUGETLB_NR_MEMORY_EVENTS];
53
54 /* Handle for "hugetlb.events" */
55 struct cgroup_file events_file[HUGE_MAX_HSTATE];
56
57 /* Handle for "hugetlb.events.local" */
58 struct cgroup_file events_local_file[HUGE_MAX_HSTATE];
59};
60
61static inline struct hugetlb_cgroup *
62__hugetlb_cgroup_from_page(struct page *page, bool rsvd)
63{
64 VM_BUG_ON_PAGE(!PageHuge(page), page);
65
66 if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER)
67 return NULL;
68 if (rsvd)
69 return (struct hugetlb_cgroup *)page[3].private;
70 else
71 return (struct hugetlb_cgroup *)page[2].private;
72}
73
74static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page)
75{
76 return __hugetlb_cgroup_from_page(page, false);
77}
78
79static inline struct hugetlb_cgroup *
80hugetlb_cgroup_from_page_rsvd(struct page *page)
81{
82 return __hugetlb_cgroup_from_page(page, true);
83}
84
85static inline int __set_hugetlb_cgroup(struct page *page,
86 struct hugetlb_cgroup *h_cg, bool rsvd)
87{
88 VM_BUG_ON_PAGE(!PageHuge(page), page);
89
90 if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER)
91 return -1;
92 if (rsvd)
93 page[3].private = (unsigned long)h_cg;
94 else
95 page[2].private = (unsigned long)h_cg;
96 return 0;
97}
98
99static inline int set_hugetlb_cgroup(struct page *page,
100 struct hugetlb_cgroup *h_cg)
101{
102 return __set_hugetlb_cgroup(page, h_cg, false);
103}
104
105static inline int set_hugetlb_cgroup_rsvd(struct page *page,
106 struct hugetlb_cgroup *h_cg)
107{
108 return __set_hugetlb_cgroup(page, h_cg, true);
109}
110
111static inline bool hugetlb_cgroup_disabled(void)
112{
113 return !cgroup_subsys_enabled(hugetlb_cgrp_subsys);
114}
115
116extern int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
117 struct hugetlb_cgroup **ptr);
118extern int hugetlb_cgroup_charge_cgroup_rsvd(int idx, unsigned long nr_pages,
119 struct hugetlb_cgroup **ptr);
120extern void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
121 struct hugetlb_cgroup *h_cg,
122 struct page *page);
123extern void hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
124 struct hugetlb_cgroup *h_cg,
125 struct page *page);
126extern void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
127 struct page *page);
128extern void hugetlb_cgroup_uncharge_page_rsvd(int idx, unsigned long nr_pages,
129 struct page *page);
130
131extern void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
132 struct hugetlb_cgroup *h_cg);
133extern void hugetlb_cgroup_uncharge_cgroup_rsvd(int idx, unsigned long nr_pages,
134 struct hugetlb_cgroup *h_cg);
135extern void hugetlb_cgroup_uncharge_counter(struct resv_map *resv,
136 unsigned long start,
137 unsigned long end);
138
139extern void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv,
140 struct file_region *rg,
141 unsigned long nr_pages);
142
143extern void hugetlb_cgroup_file_init(void) __init;
144extern void hugetlb_cgroup_migrate(struct page *oldhpage,
145 struct page *newhpage);
146
147#else
148static inline void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv,
149 struct file_region *rg,
150 unsigned long nr_pages)
151{
152}
153
154static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page)
155{
156 return NULL;
157}
158
159static inline struct hugetlb_cgroup *
160hugetlb_cgroup_from_page_resv(struct page *page)
161{
162 return NULL;
163}
164
165static inline struct hugetlb_cgroup *
166hugetlb_cgroup_from_page_rsvd(struct page *page)
167{
168 return NULL;
169}
170
171static inline int set_hugetlb_cgroup(struct page *page,
172 struct hugetlb_cgroup *h_cg)
173{
174 return 0;
175}
176
177static inline int set_hugetlb_cgroup_rsvd(struct page *page,
178 struct hugetlb_cgroup *h_cg)
179{
180 return 0;
181}
182
183static inline bool hugetlb_cgroup_disabled(void)
184{
185 return true;
186}
187
188static inline int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
189 struct hugetlb_cgroup **ptr)
190{
191 return 0;
192}
193
194static inline int hugetlb_cgroup_charge_cgroup_rsvd(int idx,
195 unsigned long nr_pages,
196 struct hugetlb_cgroup **ptr)
197{
198 return 0;
199}
200
201static inline void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
202 struct hugetlb_cgroup *h_cg,
203 struct page *page)
204{
205}
206
207static inline void
208hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
209 struct hugetlb_cgroup *h_cg,
210 struct page *page)
211{
212}
213
214static inline void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
215 struct page *page)
216{
217}
218
219static inline void hugetlb_cgroup_uncharge_page_rsvd(int idx,
220 unsigned long nr_pages,
221 struct page *page)
222{
223}
224static inline void hugetlb_cgroup_uncharge_cgroup(int idx,
225 unsigned long nr_pages,
226 struct hugetlb_cgroup *h_cg)
227{
228}
229
230static inline void
231hugetlb_cgroup_uncharge_cgroup_rsvd(int idx, unsigned long nr_pages,
232 struct hugetlb_cgroup *h_cg)
233{
234}
235
236static inline void hugetlb_cgroup_uncharge_counter(struct resv_map *resv,
237 unsigned long start,
238 unsigned long end)
239{
240}
241
242static inline void hugetlb_cgroup_file_init(void)
243{
244}
245
246static inline void hugetlb_cgroup_migrate(struct page *oldhpage,
247 struct page *newhpage)
248{
249}
250
251#endif /* CONFIG_MEM_RES_CTLR_HUGETLB */
252#endif