Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/memblock.h>
3#include <linux/compiler.h>
4#include <linux/fs.h>
5#include <linux/init.h>
6#include <linux/ksm.h>
7#include <linux/mm.h>
8#include <linux/mmzone.h>
9#include <linux/huge_mm.h>
10#include <linux/proc_fs.h>
11#include <linux/seq_file.h>
12#include <linux/hugetlb.h>
13#include <linux/memremap.h>
14#include <linux/memcontrol.h>
15#include <linux/mmu_notifier.h>
16#include <linux/page_idle.h>
17#include <linux/kernel-page-flags.h>
18#include <linux/uaccess.h>
19#include "internal.h"
20
21#define KPMSIZE sizeof(u64)
22#define KPMMASK (KPMSIZE - 1)
23#define KPMBITS (KPMSIZE * BITS_PER_BYTE)
24
25static inline unsigned long get_max_dump_pfn(void)
26{
27#ifdef CONFIG_SPARSEMEM
28 /*
29 * The memmap of early sections is completely populated and marked
30 * online even if max_pfn does not fall on a section boundary -
31 * pfn_to_online_page() will succeed on all pages. Allow inspecting
32 * these memmaps.
33 */
34 return round_up(max_pfn, PAGES_PER_SECTION);
35#else
36 return max_pfn;
37#endif
38}
39
40/* /proc/kpagecount - an array exposing page counts
41 *
42 * Each entry is a u64 representing the corresponding
43 * physical page count.
44 */
45static ssize_t kpagecount_read(struct file *file, char __user *buf,
46 size_t count, loff_t *ppos)
47{
48 const unsigned long max_dump_pfn = get_max_dump_pfn();
49 u64 __user *out = (u64 __user *)buf;
50 struct page *ppage;
51 unsigned long src = *ppos;
52 unsigned long pfn;
53 ssize_t ret = 0;
54 u64 pcount;
55
56 pfn = src / KPMSIZE;
57 if (src & KPMMASK || count & KPMMASK)
58 return -EINVAL;
59 if (src >= max_dump_pfn * KPMSIZE)
60 return 0;
61 count = min_t(unsigned long, count, (max_dump_pfn * KPMSIZE) - src);
62
63 while (count > 0) {
64 /*
65 * TODO: ZONE_DEVICE support requires to identify
66 * memmaps that were actually initialized.
67 */
68 ppage = pfn_to_online_page(pfn);
69
70 if (!ppage || PageSlab(ppage) || page_has_type(ppage))
71 pcount = 0;
72 else
73 pcount = page_mapcount(ppage);
74
75 if (put_user(pcount, out)) {
76 ret = -EFAULT;
77 break;
78 }
79
80 pfn++;
81 out++;
82 count -= KPMSIZE;
83
84 cond_resched();
85 }
86
87 *ppos += (char __user *)out - buf;
88 if (!ret)
89 ret = (char __user *)out - buf;
90 return ret;
91}
92
93static const struct proc_ops kpagecount_proc_ops = {
94 .proc_flags = PROC_ENTRY_PERMANENT,
95 .proc_lseek = mem_lseek,
96 .proc_read = kpagecount_read,
97};
98
99/* /proc/kpageflags - an array exposing page flags
100 *
101 * Each entry is a u64 representing the corresponding
102 * physical page flags.
103 */
104
105static inline u64 kpf_copy_bit(u64 kflags, int ubit, int kbit)
106{
107 return ((kflags >> kbit) & 1) << ubit;
108}
109
110u64 stable_page_flags(struct page *page)
111{
112 u64 k;
113 u64 u;
114
115 /*
116 * pseudo flag: KPF_NOPAGE
117 * it differentiates a memory hole from a page with no flags
118 */
119 if (!page)
120 return 1 << KPF_NOPAGE;
121
122 k = page->flags;
123 u = 0;
124
125 /*
126 * pseudo flags for the well known (anonymous) memory mapped pages
127 *
128 * Note that page->_mapcount is overloaded in SLOB/SLUB/SLQB, so the
129 * simple test in page_mapped() is not enough.
130 */
131 if (!PageSlab(page) && page_mapped(page))
132 u |= 1 << KPF_MMAP;
133 if (PageAnon(page))
134 u |= 1 << KPF_ANON;
135 if (PageKsm(page))
136 u |= 1 << KPF_KSM;
137
138 /*
139 * compound pages: export both head/tail info
140 * they together define a compound page's start/end pos and order
141 */
142 if (PageHead(page))
143 u |= 1 << KPF_COMPOUND_HEAD;
144 if (PageTail(page))
145 u |= 1 << KPF_COMPOUND_TAIL;
146 if (PageHuge(page))
147 u |= 1 << KPF_HUGE;
148 /*
149 * PageTransCompound can be true for non-huge compound pages (slab
150 * pages or pages allocated by drivers with __GFP_COMP) because it
151 * just checks PG_head/PG_tail, so we need to check PageLRU/PageAnon
152 * to make sure a given page is a thp, not a non-huge compound page.
153 */
154 else if (PageTransCompound(page)) {
155 struct page *head = compound_head(page);
156
157 if (PageLRU(head) || PageAnon(head))
158 u |= 1 << KPF_THP;
159 else if (is_huge_zero_page(head)) {
160 u |= 1 << KPF_ZERO_PAGE;
161 u |= 1 << KPF_THP;
162 }
163 } else if (is_zero_pfn(page_to_pfn(page)))
164 u |= 1 << KPF_ZERO_PAGE;
165
166
167 /*
168 * Caveats on high order pages: page->_refcount will only be set
169 * -1 on the head page; SLUB/SLQB do the same for PG_slab;
170 * SLOB won't set PG_slab at all on compound pages.
171 */
172 if (PageBuddy(page))
173 u |= 1 << KPF_BUDDY;
174 else if (page_count(page) == 0 && is_free_buddy_page(page))
175 u |= 1 << KPF_BUDDY;
176
177 if (PageOffline(page))
178 u |= 1 << KPF_OFFLINE;
179 if (PageTable(page))
180 u |= 1 << KPF_PGTABLE;
181
182 if (page_is_idle(page))
183 u |= 1 << KPF_IDLE;
184
185 u |= kpf_copy_bit(k, KPF_LOCKED, PG_locked);
186
187 u |= kpf_copy_bit(k, KPF_SLAB, PG_slab);
188 if (PageTail(page) && PageSlab(compound_head(page)))
189 u |= 1 << KPF_SLAB;
190
191 u |= kpf_copy_bit(k, KPF_ERROR, PG_error);
192 u |= kpf_copy_bit(k, KPF_DIRTY, PG_dirty);
193 u |= kpf_copy_bit(k, KPF_UPTODATE, PG_uptodate);
194 u |= kpf_copy_bit(k, KPF_WRITEBACK, PG_writeback);
195
196 u |= kpf_copy_bit(k, KPF_LRU, PG_lru);
197 u |= kpf_copy_bit(k, KPF_REFERENCED, PG_referenced);
198 u |= kpf_copy_bit(k, KPF_ACTIVE, PG_active);
199 u |= kpf_copy_bit(k, KPF_RECLAIM, PG_reclaim);
200
201 if (PageSwapCache(page))
202 u |= 1 << KPF_SWAPCACHE;
203 u |= kpf_copy_bit(k, KPF_SWAPBACKED, PG_swapbacked);
204
205 u |= kpf_copy_bit(k, KPF_UNEVICTABLE, PG_unevictable);
206 u |= kpf_copy_bit(k, KPF_MLOCKED, PG_mlocked);
207
208#ifdef CONFIG_MEMORY_FAILURE
209 u |= kpf_copy_bit(k, KPF_HWPOISON, PG_hwpoison);
210#endif
211
212#ifdef CONFIG_ARCH_USES_PG_UNCACHED
213 u |= kpf_copy_bit(k, KPF_UNCACHED, PG_uncached);
214#endif
215
216 u |= kpf_copy_bit(k, KPF_RESERVED, PG_reserved);
217 u |= kpf_copy_bit(k, KPF_MAPPEDTODISK, PG_mappedtodisk);
218 u |= kpf_copy_bit(k, KPF_PRIVATE, PG_private);
219 u |= kpf_copy_bit(k, KPF_PRIVATE_2, PG_private_2);
220 u |= kpf_copy_bit(k, KPF_OWNER_PRIVATE, PG_owner_priv_1);
221 u |= kpf_copy_bit(k, KPF_ARCH, PG_arch_1);
222#ifdef CONFIG_ARCH_USES_PG_ARCH_X
223 u |= kpf_copy_bit(k, KPF_ARCH_2, PG_arch_2);
224 u |= kpf_copy_bit(k, KPF_ARCH_3, PG_arch_3);
225#endif
226
227 return u;
228};
229
230static ssize_t kpageflags_read(struct file *file, char __user *buf,
231 size_t count, loff_t *ppos)
232{
233 const unsigned long max_dump_pfn = get_max_dump_pfn();
234 u64 __user *out = (u64 __user *)buf;
235 struct page *ppage;
236 unsigned long src = *ppos;
237 unsigned long pfn;
238 ssize_t ret = 0;
239
240 pfn = src / KPMSIZE;
241 if (src & KPMMASK || count & KPMMASK)
242 return -EINVAL;
243 if (src >= max_dump_pfn * KPMSIZE)
244 return 0;
245 count = min_t(unsigned long, count, (max_dump_pfn * KPMSIZE) - src);
246
247 while (count > 0) {
248 /*
249 * TODO: ZONE_DEVICE support requires to identify
250 * memmaps that were actually initialized.
251 */
252 ppage = pfn_to_online_page(pfn);
253
254 if (put_user(stable_page_flags(ppage), out)) {
255 ret = -EFAULT;
256 break;
257 }
258
259 pfn++;
260 out++;
261 count -= KPMSIZE;
262
263 cond_resched();
264 }
265
266 *ppos += (char __user *)out - buf;
267 if (!ret)
268 ret = (char __user *)out - buf;
269 return ret;
270}
271
272static const struct proc_ops kpageflags_proc_ops = {
273 .proc_flags = PROC_ENTRY_PERMANENT,
274 .proc_lseek = mem_lseek,
275 .proc_read = kpageflags_read,
276};
277
278#ifdef CONFIG_MEMCG
279static ssize_t kpagecgroup_read(struct file *file, char __user *buf,
280 size_t count, loff_t *ppos)
281{
282 const unsigned long max_dump_pfn = get_max_dump_pfn();
283 u64 __user *out = (u64 __user *)buf;
284 struct page *ppage;
285 unsigned long src = *ppos;
286 unsigned long pfn;
287 ssize_t ret = 0;
288 u64 ino;
289
290 pfn = src / KPMSIZE;
291 if (src & KPMMASK || count & KPMMASK)
292 return -EINVAL;
293 if (src >= max_dump_pfn * KPMSIZE)
294 return 0;
295 count = min_t(unsigned long, count, (max_dump_pfn * KPMSIZE) - src);
296
297 while (count > 0) {
298 /*
299 * TODO: ZONE_DEVICE support requires to identify
300 * memmaps that were actually initialized.
301 */
302 ppage = pfn_to_online_page(pfn);
303
304 if (ppage)
305 ino = page_cgroup_ino(ppage);
306 else
307 ino = 0;
308
309 if (put_user(ino, out)) {
310 ret = -EFAULT;
311 break;
312 }
313
314 pfn++;
315 out++;
316 count -= KPMSIZE;
317
318 cond_resched();
319 }
320
321 *ppos += (char __user *)out - buf;
322 if (!ret)
323 ret = (char __user *)out - buf;
324 return ret;
325}
326
327static const struct proc_ops kpagecgroup_proc_ops = {
328 .proc_flags = PROC_ENTRY_PERMANENT,
329 .proc_lseek = mem_lseek,
330 .proc_read = kpagecgroup_read,
331};
332#endif /* CONFIG_MEMCG */
333
334static int __init proc_page_init(void)
335{
336 proc_create("kpagecount", S_IRUSR, NULL, &kpagecount_proc_ops);
337 proc_create("kpageflags", S_IRUSR, NULL, &kpageflags_proc_ops);
338#ifdef CONFIG_MEMCG
339 proc_create("kpagecgroup", S_IRUSR, NULL, &kpagecgroup_proc_ops);
340#endif
341 return 0;
342}
343fs_initcall(proc_page_init);
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/memblock.h>
3#include <linux/compiler.h>
4#include <linux/fs.h>
5#include <linux/init.h>
6#include <linux/ksm.h>
7#include <linux/mm.h>
8#include <linux/mmzone.h>
9#include <linux/huge_mm.h>
10#include <linux/proc_fs.h>
11#include <linux/seq_file.h>
12#include <linux/hugetlb.h>
13#include <linux/memcontrol.h>
14#include <linux/mmu_notifier.h>
15#include <linux/page_idle.h>
16#include <linux/kernel-page-flags.h>
17#include <linux/uaccess.h>
18#include "internal.h"
19
20#define KPMSIZE sizeof(u64)
21#define KPMMASK (KPMSIZE - 1)
22#define KPMBITS (KPMSIZE * BITS_PER_BYTE)
23
24static inline unsigned long get_max_dump_pfn(void)
25{
26#ifdef CONFIG_SPARSEMEM
27 /*
28 * The memmap of early sections is completely populated and marked
29 * online even if max_pfn does not fall on a section boundary -
30 * pfn_to_online_page() will succeed on all pages. Allow inspecting
31 * these memmaps.
32 */
33 return round_up(max_pfn, PAGES_PER_SECTION);
34#else
35 return max_pfn;
36#endif
37}
38
39/* /proc/kpagecount - an array exposing page counts
40 *
41 * Each entry is a u64 representing the corresponding
42 * physical page count.
43 */
44static ssize_t kpagecount_read(struct file *file, char __user *buf,
45 size_t count, loff_t *ppos)
46{
47 const unsigned long max_dump_pfn = get_max_dump_pfn();
48 u64 __user *out = (u64 __user *)buf;
49 struct page *ppage;
50 unsigned long src = *ppos;
51 unsigned long pfn;
52 ssize_t ret = 0;
53 u64 pcount;
54
55 pfn = src / KPMSIZE;
56 if (src & KPMMASK || count & KPMMASK)
57 return -EINVAL;
58 if (src >= max_dump_pfn * KPMSIZE)
59 return 0;
60 count = min_t(unsigned long, count, (max_dump_pfn * KPMSIZE) - src);
61
62 while (count > 0) {
63 /*
64 * TODO: ZONE_DEVICE support requires to identify
65 * memmaps that were actually initialized.
66 */
67 ppage = pfn_to_online_page(pfn);
68
69 if (!ppage || PageSlab(ppage) || page_has_type(ppage))
70 pcount = 0;
71 else
72 pcount = page_mapcount(ppage);
73
74 if (put_user(pcount, out)) {
75 ret = -EFAULT;
76 break;
77 }
78
79 pfn++;
80 out++;
81 count -= KPMSIZE;
82
83 cond_resched();
84 }
85
86 *ppos += (char __user *)out - buf;
87 if (!ret)
88 ret = (char __user *)out - buf;
89 return ret;
90}
91
92static const struct proc_ops kpagecount_proc_ops = {
93 .proc_lseek = mem_lseek,
94 .proc_read = kpagecount_read,
95};
96
97/* /proc/kpageflags - an array exposing page flags
98 *
99 * Each entry is a u64 representing the corresponding
100 * physical page flags.
101 */
102
103static inline u64 kpf_copy_bit(u64 kflags, int ubit, int kbit)
104{
105 return ((kflags >> kbit) & 1) << ubit;
106}
107
108u64 stable_page_flags(struct page *page)
109{
110 u64 k;
111 u64 u;
112
113 /*
114 * pseudo flag: KPF_NOPAGE
115 * it differentiates a memory hole from a page with no flags
116 */
117 if (!page)
118 return 1 << KPF_NOPAGE;
119
120 k = page->flags;
121 u = 0;
122
123 /*
124 * pseudo flags for the well known (anonymous) memory mapped pages
125 *
126 * Note that page->_mapcount is overloaded in SLOB/SLUB/SLQB, so the
127 * simple test in page_mapped() is not enough.
128 */
129 if (!PageSlab(page) && page_mapped(page))
130 u |= 1 << KPF_MMAP;
131 if (PageAnon(page))
132 u |= 1 << KPF_ANON;
133 if (PageKsm(page))
134 u |= 1 << KPF_KSM;
135
136 /*
137 * compound pages: export both head/tail info
138 * they together define a compound page's start/end pos and order
139 */
140 if (PageHead(page))
141 u |= 1 << KPF_COMPOUND_HEAD;
142 if (PageTail(page))
143 u |= 1 << KPF_COMPOUND_TAIL;
144 if (PageHuge(page))
145 u |= 1 << KPF_HUGE;
146 /*
147 * PageTransCompound can be true for non-huge compound pages (slab
148 * pages or pages allocated by drivers with __GFP_COMP) because it
149 * just checks PG_head/PG_tail, so we need to check PageLRU/PageAnon
150 * to make sure a given page is a thp, not a non-huge compound page.
151 */
152 else if (PageTransCompound(page)) {
153 struct page *head = compound_head(page);
154
155 if (PageLRU(head) || PageAnon(head))
156 u |= 1 << KPF_THP;
157 else if (is_huge_zero_page(head)) {
158 u |= 1 << KPF_ZERO_PAGE;
159 u |= 1 << KPF_THP;
160 }
161 } else if (is_zero_pfn(page_to_pfn(page)))
162 u |= 1 << KPF_ZERO_PAGE;
163
164
165 /*
166 * Caveats on high order pages: page->_refcount will only be set
167 * -1 on the head page; SLUB/SLQB do the same for PG_slab;
168 * SLOB won't set PG_slab at all on compound pages.
169 */
170 if (PageBuddy(page))
171 u |= 1 << KPF_BUDDY;
172 else if (page_count(page) == 0 && is_free_buddy_page(page))
173 u |= 1 << KPF_BUDDY;
174
175 if (PageOffline(page))
176 u |= 1 << KPF_OFFLINE;
177 if (PageTable(page))
178 u |= 1 << KPF_PGTABLE;
179
180 if (page_is_idle(page))
181 u |= 1 << KPF_IDLE;
182
183 u |= kpf_copy_bit(k, KPF_LOCKED, PG_locked);
184
185 u |= kpf_copy_bit(k, KPF_SLAB, PG_slab);
186 if (PageTail(page) && PageSlab(compound_head(page)))
187 u |= 1 << KPF_SLAB;
188
189 u |= kpf_copy_bit(k, KPF_ERROR, PG_error);
190 u |= kpf_copy_bit(k, KPF_DIRTY, PG_dirty);
191 u |= kpf_copy_bit(k, KPF_UPTODATE, PG_uptodate);
192 u |= kpf_copy_bit(k, KPF_WRITEBACK, PG_writeback);
193
194 u |= kpf_copy_bit(k, KPF_LRU, PG_lru);
195 u |= kpf_copy_bit(k, KPF_REFERENCED, PG_referenced);
196 u |= kpf_copy_bit(k, KPF_ACTIVE, PG_active);
197 u |= kpf_copy_bit(k, KPF_RECLAIM, PG_reclaim);
198
199 if (PageSwapCache(page))
200 u |= 1 << KPF_SWAPCACHE;
201 u |= kpf_copy_bit(k, KPF_SWAPBACKED, PG_swapbacked);
202
203 u |= kpf_copy_bit(k, KPF_UNEVICTABLE, PG_unevictable);
204 u |= kpf_copy_bit(k, KPF_MLOCKED, PG_mlocked);
205
206#ifdef CONFIG_MEMORY_FAILURE
207 u |= kpf_copy_bit(k, KPF_HWPOISON, PG_hwpoison);
208#endif
209
210#ifdef CONFIG_ARCH_USES_PG_UNCACHED
211 u |= kpf_copy_bit(k, KPF_UNCACHED, PG_uncached);
212#endif
213
214 u |= kpf_copy_bit(k, KPF_RESERVED, PG_reserved);
215 u |= kpf_copy_bit(k, KPF_MAPPEDTODISK, PG_mappedtodisk);
216 u |= kpf_copy_bit(k, KPF_PRIVATE, PG_private);
217 u |= kpf_copy_bit(k, KPF_PRIVATE_2, PG_private_2);
218 u |= kpf_copy_bit(k, KPF_OWNER_PRIVATE, PG_owner_priv_1);
219 u |= kpf_copy_bit(k, KPF_ARCH, PG_arch_1);
220
221 return u;
222};
223
224static ssize_t kpageflags_read(struct file *file, char __user *buf,
225 size_t count, loff_t *ppos)
226{
227 const unsigned long max_dump_pfn = get_max_dump_pfn();
228 u64 __user *out = (u64 __user *)buf;
229 struct page *ppage;
230 unsigned long src = *ppos;
231 unsigned long pfn;
232 ssize_t ret = 0;
233
234 pfn = src / KPMSIZE;
235 if (src & KPMMASK || count & KPMMASK)
236 return -EINVAL;
237 if (src >= max_dump_pfn * KPMSIZE)
238 return 0;
239 count = min_t(unsigned long, count, (max_dump_pfn * KPMSIZE) - src);
240
241 while (count > 0) {
242 /*
243 * TODO: ZONE_DEVICE support requires to identify
244 * memmaps that were actually initialized.
245 */
246 ppage = pfn_to_online_page(pfn);
247
248 if (put_user(stable_page_flags(ppage), out)) {
249 ret = -EFAULT;
250 break;
251 }
252
253 pfn++;
254 out++;
255 count -= KPMSIZE;
256
257 cond_resched();
258 }
259
260 *ppos += (char __user *)out - buf;
261 if (!ret)
262 ret = (char __user *)out - buf;
263 return ret;
264}
265
266static const struct proc_ops kpageflags_proc_ops = {
267 .proc_lseek = mem_lseek,
268 .proc_read = kpageflags_read,
269};
270
271#ifdef CONFIG_MEMCG
272static ssize_t kpagecgroup_read(struct file *file, char __user *buf,
273 size_t count, loff_t *ppos)
274{
275 const unsigned long max_dump_pfn = get_max_dump_pfn();
276 u64 __user *out = (u64 __user *)buf;
277 struct page *ppage;
278 unsigned long src = *ppos;
279 unsigned long pfn;
280 ssize_t ret = 0;
281 u64 ino;
282
283 pfn = src / KPMSIZE;
284 if (src & KPMMASK || count & KPMMASK)
285 return -EINVAL;
286 if (src >= max_dump_pfn * KPMSIZE)
287 return 0;
288 count = min_t(unsigned long, count, (max_dump_pfn * KPMSIZE) - src);
289
290 while (count > 0) {
291 /*
292 * TODO: ZONE_DEVICE support requires to identify
293 * memmaps that were actually initialized.
294 */
295 ppage = pfn_to_online_page(pfn);
296
297 if (ppage)
298 ino = page_cgroup_ino(ppage);
299 else
300 ino = 0;
301
302 if (put_user(ino, out)) {
303 ret = -EFAULT;
304 break;
305 }
306
307 pfn++;
308 out++;
309 count -= KPMSIZE;
310
311 cond_resched();
312 }
313
314 *ppos += (char __user *)out - buf;
315 if (!ret)
316 ret = (char __user *)out - buf;
317 return ret;
318}
319
320static const struct proc_ops kpagecgroup_proc_ops = {
321 .proc_lseek = mem_lseek,
322 .proc_read = kpagecgroup_read,
323};
324#endif /* CONFIG_MEMCG */
325
326static int __init proc_page_init(void)
327{
328 proc_create("kpagecount", S_IRUSR, NULL, &kpagecount_proc_ops);
329 proc_create("kpageflags", S_IRUSR, NULL, &kpageflags_proc_ops);
330#ifdef CONFIG_MEMCG
331 proc_create("kpagecgroup", S_IRUSR, NULL, &kpagecgroup_proc_ops);
332#endif
333 return 0;
334}
335fs_initcall(proc_page_init);