Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/memblock.h>
3#include <linux/compiler.h>
4#include <linux/fs.h>
5#include <linux/init.h>
6#include <linux/ksm.h>
7#include <linux/mm.h>
8#include <linux/mmzone.h>
9#include <linux/huge_mm.h>
10#include <linux/proc_fs.h>
11#include <linux/seq_file.h>
12#include <linux/hugetlb.h>
13#include <linux/memcontrol.h>
14#include <linux/mmu_notifier.h>
15#include <linux/page_idle.h>
16#include <linux/kernel-page-flags.h>
17#include <linux/uaccess.h>
18#include "internal.h"
19
20#define KPMSIZE sizeof(u64)
21#define KPMMASK (KPMSIZE - 1)
22#define KPMBITS (KPMSIZE * BITS_PER_BYTE)
23
24/* /proc/kpagecount - an array exposing page counts
25 *
26 * Each entry is a u64 representing the corresponding
27 * physical page count.
28 */
29static ssize_t kpagecount_read(struct file *file, char __user *buf,
30 size_t count, loff_t *ppos)
31{
32 u64 __user *out = (u64 __user *)buf;
33 struct page *ppage;
34 unsigned long src = *ppos;
35 unsigned long pfn;
36 ssize_t ret = 0;
37 u64 pcount;
38
39 pfn = src / KPMSIZE;
40 count = min_t(size_t, count, (max_pfn * KPMSIZE) - src);
41 if (src & KPMMASK || count & KPMMASK)
42 return -EINVAL;
43
44 while (count > 0) {
45 /*
46 * TODO: ZONE_DEVICE support requires to identify
47 * memmaps that were actually initialized.
48 */
49 ppage = pfn_to_online_page(pfn);
50
51 if (!ppage || PageSlab(ppage) || page_has_type(ppage))
52 pcount = 0;
53 else
54 pcount = page_mapcount(ppage);
55
56 if (put_user(pcount, out)) {
57 ret = -EFAULT;
58 break;
59 }
60
61 pfn++;
62 out++;
63 count -= KPMSIZE;
64
65 cond_resched();
66 }
67
68 *ppos += (char __user *)out - buf;
69 if (!ret)
70 ret = (char __user *)out - buf;
71 return ret;
72}
73
74static const struct file_operations proc_kpagecount_operations = {
75 .llseek = mem_lseek,
76 .read = kpagecount_read,
77};
78
79/* /proc/kpageflags - an array exposing page flags
80 *
81 * Each entry is a u64 representing the corresponding
82 * physical page flags.
83 */
84
85static inline u64 kpf_copy_bit(u64 kflags, int ubit, int kbit)
86{
87 return ((kflags >> kbit) & 1) << ubit;
88}
89
90u64 stable_page_flags(struct page *page)
91{
92 u64 k;
93 u64 u;
94
95 /*
96 * pseudo flag: KPF_NOPAGE
97 * it differentiates a memory hole from a page with no flags
98 */
99 if (!page)
100 return 1 << KPF_NOPAGE;
101
102 k = page->flags;
103 u = 0;
104
105 /*
106 * pseudo flags for the well known (anonymous) memory mapped pages
107 *
108 * Note that page->_mapcount is overloaded in SLOB/SLUB/SLQB, so the
109 * simple test in page_mapped() is not enough.
110 */
111 if (!PageSlab(page) && page_mapped(page))
112 u |= 1 << KPF_MMAP;
113 if (PageAnon(page))
114 u |= 1 << KPF_ANON;
115 if (PageKsm(page))
116 u |= 1 << KPF_KSM;
117
118 /*
119 * compound pages: export both head/tail info
120 * they together define a compound page's start/end pos and order
121 */
122 if (PageHead(page))
123 u |= 1 << KPF_COMPOUND_HEAD;
124 if (PageTail(page))
125 u |= 1 << KPF_COMPOUND_TAIL;
126 if (PageHuge(page))
127 u |= 1 << KPF_HUGE;
128 /*
129 * PageTransCompound can be true for non-huge compound pages (slab
130 * pages or pages allocated by drivers with __GFP_COMP) because it
131 * just checks PG_head/PG_tail, so we need to check PageLRU/PageAnon
132 * to make sure a given page is a thp, not a non-huge compound page.
133 */
134 else if (PageTransCompound(page)) {
135 struct page *head = compound_head(page);
136
137 if (PageLRU(head) || PageAnon(head))
138 u |= 1 << KPF_THP;
139 else if (is_huge_zero_page(head)) {
140 u |= 1 << KPF_ZERO_PAGE;
141 u |= 1 << KPF_THP;
142 }
143 } else if (is_zero_pfn(page_to_pfn(page)))
144 u |= 1 << KPF_ZERO_PAGE;
145
146
147 /*
148 * Caveats on high order pages: page->_refcount will only be set
149 * -1 on the head page; SLUB/SLQB do the same for PG_slab;
150 * SLOB won't set PG_slab at all on compound pages.
151 */
152 if (PageBuddy(page))
153 u |= 1 << KPF_BUDDY;
154 else if (page_count(page) == 0 && is_free_buddy_page(page))
155 u |= 1 << KPF_BUDDY;
156
157 if (PageOffline(page))
158 u |= 1 << KPF_OFFLINE;
159 if (PageTable(page))
160 u |= 1 << KPF_PGTABLE;
161
162 if (page_is_idle(page))
163 u |= 1 << KPF_IDLE;
164
165 u |= kpf_copy_bit(k, KPF_LOCKED, PG_locked);
166
167 u |= kpf_copy_bit(k, KPF_SLAB, PG_slab);
168 if (PageTail(page) && PageSlab(compound_head(page)))
169 u |= 1 << KPF_SLAB;
170
171 u |= kpf_copy_bit(k, KPF_ERROR, PG_error);
172 u |= kpf_copy_bit(k, KPF_DIRTY, PG_dirty);
173 u |= kpf_copy_bit(k, KPF_UPTODATE, PG_uptodate);
174 u |= kpf_copy_bit(k, KPF_WRITEBACK, PG_writeback);
175
176 u |= kpf_copy_bit(k, KPF_LRU, PG_lru);
177 u |= kpf_copy_bit(k, KPF_REFERENCED, PG_referenced);
178 u |= kpf_copy_bit(k, KPF_ACTIVE, PG_active);
179 u |= kpf_copy_bit(k, KPF_RECLAIM, PG_reclaim);
180
181 if (PageSwapCache(page))
182 u |= 1 << KPF_SWAPCACHE;
183 u |= kpf_copy_bit(k, KPF_SWAPBACKED, PG_swapbacked);
184
185 u |= kpf_copy_bit(k, KPF_UNEVICTABLE, PG_unevictable);
186 u |= kpf_copy_bit(k, KPF_MLOCKED, PG_mlocked);
187
188#ifdef CONFIG_MEMORY_FAILURE
189 u |= kpf_copy_bit(k, KPF_HWPOISON, PG_hwpoison);
190#endif
191
192#ifdef CONFIG_ARCH_USES_PG_UNCACHED
193 u |= kpf_copy_bit(k, KPF_UNCACHED, PG_uncached);
194#endif
195
196 u |= kpf_copy_bit(k, KPF_RESERVED, PG_reserved);
197 u |= kpf_copy_bit(k, KPF_MAPPEDTODISK, PG_mappedtodisk);
198 u |= kpf_copy_bit(k, KPF_PRIVATE, PG_private);
199 u |= kpf_copy_bit(k, KPF_PRIVATE_2, PG_private_2);
200 u |= kpf_copy_bit(k, KPF_OWNER_PRIVATE, PG_owner_priv_1);
201 u |= kpf_copy_bit(k, KPF_ARCH, PG_arch_1);
202
203 return u;
204};
205
206static ssize_t kpageflags_read(struct file *file, char __user *buf,
207 size_t count, loff_t *ppos)
208{
209 u64 __user *out = (u64 __user *)buf;
210 struct page *ppage;
211 unsigned long src = *ppos;
212 unsigned long pfn;
213 ssize_t ret = 0;
214
215 pfn = src / KPMSIZE;
216 count = min_t(unsigned long, count, (max_pfn * KPMSIZE) - src);
217 if (src & KPMMASK || count & KPMMASK)
218 return -EINVAL;
219
220 while (count > 0) {
221 /*
222 * TODO: ZONE_DEVICE support requires to identify
223 * memmaps that were actually initialized.
224 */
225 ppage = pfn_to_online_page(pfn);
226
227 if (put_user(stable_page_flags(ppage), out)) {
228 ret = -EFAULT;
229 break;
230 }
231
232 pfn++;
233 out++;
234 count -= KPMSIZE;
235
236 cond_resched();
237 }
238
239 *ppos += (char __user *)out - buf;
240 if (!ret)
241 ret = (char __user *)out - buf;
242 return ret;
243}
244
245static const struct file_operations proc_kpageflags_operations = {
246 .llseek = mem_lseek,
247 .read = kpageflags_read,
248};
249
250#ifdef CONFIG_MEMCG
251static ssize_t kpagecgroup_read(struct file *file, char __user *buf,
252 size_t count, loff_t *ppos)
253{
254 u64 __user *out = (u64 __user *)buf;
255 struct page *ppage;
256 unsigned long src = *ppos;
257 unsigned long pfn;
258 ssize_t ret = 0;
259 u64 ino;
260
261 pfn = src / KPMSIZE;
262 count = min_t(unsigned long, count, (max_pfn * KPMSIZE) - src);
263 if (src & KPMMASK || count & KPMMASK)
264 return -EINVAL;
265
266 while (count > 0) {
267 /*
268 * TODO: ZONE_DEVICE support requires to identify
269 * memmaps that were actually initialized.
270 */
271 ppage = pfn_to_online_page(pfn);
272
273 if (ppage)
274 ino = page_cgroup_ino(ppage);
275 else
276 ino = 0;
277
278 if (put_user(ino, out)) {
279 ret = -EFAULT;
280 break;
281 }
282
283 pfn++;
284 out++;
285 count -= KPMSIZE;
286
287 cond_resched();
288 }
289
290 *ppos += (char __user *)out - buf;
291 if (!ret)
292 ret = (char __user *)out - buf;
293 return ret;
294}
295
296static const struct file_operations proc_kpagecgroup_operations = {
297 .llseek = mem_lseek,
298 .read = kpagecgroup_read,
299};
300#endif /* CONFIG_MEMCG */
301
302static int __init proc_page_init(void)
303{
304 proc_create("kpagecount", S_IRUSR, NULL, &proc_kpagecount_operations);
305 proc_create("kpageflags", S_IRUSR, NULL, &proc_kpageflags_operations);
306#ifdef CONFIG_MEMCG
307 proc_create("kpagecgroup", S_IRUSR, NULL, &proc_kpagecgroup_operations);
308#endif
309 return 0;
310}
311fs_initcall(proc_page_init);
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/bootmem.h>
3#include <linux/compiler.h>
4#include <linux/fs.h>
5#include <linux/init.h>
6#include <linux/ksm.h>
7#include <linux/mm.h>
8#include <linux/mmzone.h>
9#include <linux/huge_mm.h>
10#include <linux/proc_fs.h>
11#include <linux/seq_file.h>
12#include <linux/hugetlb.h>
13#include <linux/memcontrol.h>
14#include <linux/mmu_notifier.h>
15#include <linux/page_idle.h>
16#include <linux/kernel-page-flags.h>
17#include <linux/uaccess.h>
18#include "internal.h"
19
20#define KPMSIZE sizeof(u64)
21#define KPMMASK (KPMSIZE - 1)
22#define KPMBITS (KPMSIZE * BITS_PER_BYTE)
23
24/* /proc/kpagecount - an array exposing page counts
25 *
26 * Each entry is a u64 representing the corresponding
27 * physical page count.
28 */
29static ssize_t kpagecount_read(struct file *file, char __user *buf,
30 size_t count, loff_t *ppos)
31{
32 u64 __user *out = (u64 __user *)buf;
33 struct page *ppage;
34 unsigned long src = *ppos;
35 unsigned long pfn;
36 ssize_t ret = 0;
37 u64 pcount;
38
39 pfn = src / KPMSIZE;
40 count = min_t(size_t, count, (max_pfn * KPMSIZE) - src);
41 if (src & KPMMASK || count & KPMMASK)
42 return -EINVAL;
43
44 while (count > 0) {
45 if (pfn_valid(pfn))
46 ppage = pfn_to_page(pfn);
47 else
48 ppage = NULL;
49 if (!ppage || PageSlab(ppage))
50 pcount = 0;
51 else
52 pcount = page_mapcount(ppage);
53
54 if (put_user(pcount, out)) {
55 ret = -EFAULT;
56 break;
57 }
58
59 pfn++;
60 out++;
61 count -= KPMSIZE;
62
63 cond_resched();
64 }
65
66 *ppos += (char __user *)out - buf;
67 if (!ret)
68 ret = (char __user *)out - buf;
69 return ret;
70}
71
72static const struct file_operations proc_kpagecount_operations = {
73 .llseek = mem_lseek,
74 .read = kpagecount_read,
75};
76
77/* /proc/kpageflags - an array exposing page flags
78 *
79 * Each entry is a u64 representing the corresponding
80 * physical page flags.
81 */
82
83static inline u64 kpf_copy_bit(u64 kflags, int ubit, int kbit)
84{
85 return ((kflags >> kbit) & 1) << ubit;
86}
87
88u64 stable_page_flags(struct page *page)
89{
90 u64 k;
91 u64 u;
92
93 /*
94 * pseudo flag: KPF_NOPAGE
95 * it differentiates a memory hole from a page with no flags
96 */
97 if (!page)
98 return 1 << KPF_NOPAGE;
99
100 k = page->flags;
101 u = 0;
102
103 /*
104 * pseudo flags for the well known (anonymous) memory mapped pages
105 *
106 * Note that page->_mapcount is overloaded in SLOB/SLUB/SLQB, so the
107 * simple test in page_mapped() is not enough.
108 */
109 if (!PageSlab(page) && page_mapped(page))
110 u |= 1 << KPF_MMAP;
111 if (PageAnon(page))
112 u |= 1 << KPF_ANON;
113 if (PageKsm(page))
114 u |= 1 << KPF_KSM;
115
116 /*
117 * compound pages: export both head/tail info
118 * they together define a compound page's start/end pos and order
119 */
120 if (PageHead(page))
121 u |= 1 << KPF_COMPOUND_HEAD;
122 if (PageTail(page))
123 u |= 1 << KPF_COMPOUND_TAIL;
124 if (PageHuge(page))
125 u |= 1 << KPF_HUGE;
126 /*
127 * PageTransCompound can be true for non-huge compound pages (slab
128 * pages or pages allocated by drivers with __GFP_COMP) because it
129 * just checks PG_head/PG_tail, so we need to check PageLRU/PageAnon
130 * to make sure a given page is a thp, not a non-huge compound page.
131 */
132 else if (PageTransCompound(page)) {
133 struct page *head = compound_head(page);
134
135 if (PageLRU(head) || PageAnon(head))
136 u |= 1 << KPF_THP;
137 else if (is_huge_zero_page(head)) {
138 u |= 1 << KPF_ZERO_PAGE;
139 u |= 1 << KPF_THP;
140 }
141 } else if (is_zero_pfn(page_to_pfn(page)))
142 u |= 1 << KPF_ZERO_PAGE;
143
144
145 /*
146 * Caveats on high order pages: page->_refcount will only be set
147 * -1 on the head page; SLUB/SLQB do the same for PG_slab;
148 * SLOB won't set PG_slab at all on compound pages.
149 */
150 if (PageBuddy(page))
151 u |= 1 << KPF_BUDDY;
152 else if (page_count(page) == 0 && is_free_buddy_page(page))
153 u |= 1 << KPF_BUDDY;
154
155 if (PageBalloon(page))
156 u |= 1 << KPF_BALLOON;
157
158 if (page_is_idle(page))
159 u |= 1 << KPF_IDLE;
160
161 u |= kpf_copy_bit(k, KPF_LOCKED, PG_locked);
162
163 u |= kpf_copy_bit(k, KPF_SLAB, PG_slab);
164 if (PageTail(page) && PageSlab(compound_head(page)))
165 u |= 1 << KPF_SLAB;
166
167 u |= kpf_copy_bit(k, KPF_ERROR, PG_error);
168 u |= kpf_copy_bit(k, KPF_DIRTY, PG_dirty);
169 u |= kpf_copy_bit(k, KPF_UPTODATE, PG_uptodate);
170 u |= kpf_copy_bit(k, KPF_WRITEBACK, PG_writeback);
171
172 u |= kpf_copy_bit(k, KPF_LRU, PG_lru);
173 u |= kpf_copy_bit(k, KPF_REFERENCED, PG_referenced);
174 u |= kpf_copy_bit(k, KPF_ACTIVE, PG_active);
175 u |= kpf_copy_bit(k, KPF_RECLAIM, PG_reclaim);
176
177 if (PageSwapCache(page))
178 u |= 1 << KPF_SWAPCACHE;
179 u |= kpf_copy_bit(k, KPF_SWAPBACKED, PG_swapbacked);
180
181 u |= kpf_copy_bit(k, KPF_UNEVICTABLE, PG_unevictable);
182 u |= kpf_copy_bit(k, KPF_MLOCKED, PG_mlocked);
183
184#ifdef CONFIG_MEMORY_FAILURE
185 u |= kpf_copy_bit(k, KPF_HWPOISON, PG_hwpoison);
186#endif
187
188#ifdef CONFIG_ARCH_USES_PG_UNCACHED
189 u |= kpf_copy_bit(k, KPF_UNCACHED, PG_uncached);
190#endif
191
192 u |= kpf_copy_bit(k, KPF_RESERVED, PG_reserved);
193 u |= kpf_copy_bit(k, KPF_MAPPEDTODISK, PG_mappedtodisk);
194 u |= kpf_copy_bit(k, KPF_PRIVATE, PG_private);
195 u |= kpf_copy_bit(k, KPF_PRIVATE_2, PG_private_2);
196 u |= kpf_copy_bit(k, KPF_OWNER_PRIVATE, PG_owner_priv_1);
197 u |= kpf_copy_bit(k, KPF_ARCH, PG_arch_1);
198
199 return u;
200};
201
202static ssize_t kpageflags_read(struct file *file, char __user *buf,
203 size_t count, loff_t *ppos)
204{
205 u64 __user *out = (u64 __user *)buf;
206 struct page *ppage;
207 unsigned long src = *ppos;
208 unsigned long pfn;
209 ssize_t ret = 0;
210
211 pfn = src / KPMSIZE;
212 count = min_t(unsigned long, count, (max_pfn * KPMSIZE) - src);
213 if (src & KPMMASK || count & KPMMASK)
214 return -EINVAL;
215
216 while (count > 0) {
217 if (pfn_valid(pfn))
218 ppage = pfn_to_page(pfn);
219 else
220 ppage = NULL;
221
222 if (put_user(stable_page_flags(ppage), out)) {
223 ret = -EFAULT;
224 break;
225 }
226
227 pfn++;
228 out++;
229 count -= KPMSIZE;
230
231 cond_resched();
232 }
233
234 *ppos += (char __user *)out - buf;
235 if (!ret)
236 ret = (char __user *)out - buf;
237 return ret;
238}
239
240static const struct file_operations proc_kpageflags_operations = {
241 .llseek = mem_lseek,
242 .read = kpageflags_read,
243};
244
245#ifdef CONFIG_MEMCG
246static ssize_t kpagecgroup_read(struct file *file, char __user *buf,
247 size_t count, loff_t *ppos)
248{
249 u64 __user *out = (u64 __user *)buf;
250 struct page *ppage;
251 unsigned long src = *ppos;
252 unsigned long pfn;
253 ssize_t ret = 0;
254 u64 ino;
255
256 pfn = src / KPMSIZE;
257 count = min_t(unsigned long, count, (max_pfn * KPMSIZE) - src);
258 if (src & KPMMASK || count & KPMMASK)
259 return -EINVAL;
260
261 while (count > 0) {
262 if (pfn_valid(pfn))
263 ppage = pfn_to_page(pfn);
264 else
265 ppage = NULL;
266
267 if (ppage)
268 ino = page_cgroup_ino(ppage);
269 else
270 ino = 0;
271
272 if (put_user(ino, out)) {
273 ret = -EFAULT;
274 break;
275 }
276
277 pfn++;
278 out++;
279 count -= KPMSIZE;
280
281 cond_resched();
282 }
283
284 *ppos += (char __user *)out - buf;
285 if (!ret)
286 ret = (char __user *)out - buf;
287 return ret;
288}
289
290static const struct file_operations proc_kpagecgroup_operations = {
291 .llseek = mem_lseek,
292 .read = kpagecgroup_read,
293};
294#endif /* CONFIG_MEMCG */
295
296static int __init proc_page_init(void)
297{
298 proc_create("kpagecount", S_IRUSR, NULL, &proc_kpagecount_operations);
299 proc_create("kpageflags", S_IRUSR, NULL, &proc_kpageflags_operations);
300#ifdef CONFIG_MEMCG
301 proc_create("kpagecgroup", S_IRUSR, NULL, &proc_kpagecgroup_operations);
302#endif
303 return 0;
304}
305fs_initcall(proc_page_init);