Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2#include <linux/memblock.h>
  3#include <linux/compiler.h>
  4#include <linux/fs.h>
  5#include <linux/init.h>
  6#include <linux/ksm.h>
  7#include <linux/mm.h>
  8#include <linux/mmzone.h>
  9#include <linux/huge_mm.h>
 10#include <linux/proc_fs.h>
 11#include <linux/seq_file.h>
 12#include <linux/hugetlb.h>
 13#include <linux/memremap.h>
 14#include <linux/memcontrol.h>
 15#include <linux/mmu_notifier.h>
 16#include <linux/page_idle.h>
 17#include <linux/kernel-page-flags.h>
 18#include <linux/uaccess.h>
 19#include "internal.h"
 20
 21#define KPMSIZE sizeof(u64)
 22#define KPMMASK (KPMSIZE - 1)
 23#define KPMBITS (KPMSIZE * BITS_PER_BYTE)
 24
 25static inline unsigned long get_max_dump_pfn(void)
 26{
 27#ifdef CONFIG_SPARSEMEM
 28	/*
 29	 * The memmap of early sections is completely populated and marked
 30	 * online even if max_pfn does not fall on a section boundary -
 31	 * pfn_to_online_page() will succeed on all pages. Allow inspecting
 32	 * these memmaps.
 33	 */
 34	return round_up(max_pfn, PAGES_PER_SECTION);
 35#else
 36	return max_pfn;
 37#endif
 38}
 39
 40/* /proc/kpagecount - an array exposing page mapcounts
 41 *
 42 * Each entry is a u64 representing the corresponding
 43 * physical page mapcount.
 44 */
 45static ssize_t kpagecount_read(struct file *file, char __user *buf,
 46			     size_t count, loff_t *ppos)
 47{
 48	const unsigned long max_dump_pfn = get_max_dump_pfn();
 49	u64 __user *out = (u64 __user *)buf;
 
 50	unsigned long src = *ppos;
 51	unsigned long pfn;
 52	ssize_t ret = 0;
 
 53
 54	pfn = src / KPMSIZE;
 55	if (src & KPMMASK || count & KPMMASK)
 56		return -EINVAL;
 57	if (src >= max_dump_pfn * KPMSIZE)
 58		return 0;
 59	count = min_t(unsigned long, count, (max_dump_pfn * KPMSIZE) - src);
 60
 61	while (count > 0) {
 62		struct page *page;
 63		u64 mapcount = 0;
 64
 65		/*
 66		 * TODO: ZONE_DEVICE support requires to identify
 67		 * memmaps that were actually initialized.
 68		 */
 69		page = pfn_to_online_page(pfn);
 70		if (page)
 71			mapcount = folio_precise_page_mapcount(page_folio(page),
 72							       page);
 
 
 73
 74		if (put_user(mapcount, out)) {
 75			ret = -EFAULT;
 76			break;
 77		}
 78
 79		pfn++;
 80		out++;
 81		count -= KPMSIZE;
 82
 83		cond_resched();
 84	}
 85
 86	*ppos += (char __user *)out - buf;
 87	if (!ret)
 88		ret = (char __user *)out - buf;
 89	return ret;
 90}
 91
 92static const struct proc_ops kpagecount_proc_ops = {
 93	.proc_flags	= PROC_ENTRY_PERMANENT,
 94	.proc_lseek	= mem_lseek,
 95	.proc_read	= kpagecount_read,
 96};
 97
 98/* /proc/kpageflags - an array exposing page flags
 99 *
100 * Each entry is a u64 representing the corresponding
101 * physical page flags.
102 */
103
104static inline u64 kpf_copy_bit(u64 kflags, int ubit, int kbit)
105{
106	return ((kflags >> kbit) & 1) << ubit;
107}
108
109u64 stable_page_flags(const struct page *page)
110{
111	const struct folio *folio;
112	unsigned long k;
113	unsigned long mapping;
114	bool is_anon;
115	u64 u = 0;
116
117	/*
118	 * pseudo flag: KPF_NOPAGE
119	 * it differentiates a memory hole from a page with no flags
120	 */
121	if (!page)
122		return 1 << KPF_NOPAGE;
123	folio = page_folio(page);
124
125	k = folio->flags;
126	mapping = (unsigned long)folio->mapping;
127	is_anon = mapping & PAGE_MAPPING_ANON;
128
129	/*
130	 * pseudo flags for the well known (anonymous) memory mapped pages
131	 */
132	if (page_mapped(page))
133		u |= 1 << KPF_MMAP;
134	if (is_anon) {
135		u |= 1 << KPF_ANON;
136		if (mapping & PAGE_MAPPING_KSM)
137			u |= 1 << KPF_KSM;
138	}
139
140	/*
141	 * compound pages: export both head/tail info
142	 * they together define a compound page's start/end pos and order
143	 */
144	if (page == &folio->page)
145		u |= kpf_copy_bit(k, KPF_COMPOUND_HEAD, PG_head);
146	else
147		u |= 1 << KPF_COMPOUND_TAIL;
148	if (folio_test_hugetlb(folio))
149		u |= 1 << KPF_HUGE;
150	else if (folio_test_large(folio) &&
151	         folio_test_large_rmappable(folio)) {
152		/* Note: we indicate any THPs here, not just PMD-sized ones */
153		u |= 1 << KPF_THP;
154	} else if (is_huge_zero_folio(folio)) {
155		u |= 1 << KPF_ZERO_PAGE;
156		u |= 1 << KPF_THP;
157	} else if (is_zero_folio(folio)) {
 
 
 
 
 
 
 
 
158		u |= 1 << KPF_ZERO_PAGE;
159	}
160
161	/*
162	 * Caveats on high order pages: PG_buddy and PG_slab will only be set
163	 * on the head page.
164	 */
165	if (PageBuddy(page))
166		u |= 1 << KPF_BUDDY;
167	else if (page_count(page) == 0 && is_free_buddy_page(page))
168		u |= 1 << KPF_BUDDY;
169
170	if (PageOffline(page))
171		u |= 1 << KPF_OFFLINE;
172	if (PageTable(page))
173		u |= 1 << KPF_PGTABLE;
174	if (folio_test_slab(folio))
175		u |= 1 << KPF_SLAB;
176
177#if defined(CONFIG_PAGE_IDLE_FLAG) && defined(CONFIG_64BIT)
178	u |= kpf_copy_bit(k, KPF_IDLE,          PG_idle);
179#else
180	if (folio_test_idle(folio))
181		u |= 1 << KPF_IDLE;
182#endif
183
184	u |= kpf_copy_bit(k, KPF_LOCKED,	PG_locked);
 
 
 
 
 
 
185	u |= kpf_copy_bit(k, KPF_DIRTY,		PG_dirty);
186	u |= kpf_copy_bit(k, KPF_UPTODATE,	PG_uptodate);
187	u |= kpf_copy_bit(k, KPF_WRITEBACK,	PG_writeback);
188
189	u |= kpf_copy_bit(k, KPF_LRU,		PG_lru);
190	u |= kpf_copy_bit(k, KPF_REFERENCED,	PG_referenced);
191	u |= kpf_copy_bit(k, KPF_ACTIVE,	PG_active);
192	u |= kpf_copy_bit(k, KPF_RECLAIM,	PG_reclaim);
193
194#define SWAPCACHE ((1 << PG_swapbacked) | (1 << PG_swapcache))
195	if ((k & SWAPCACHE) == SWAPCACHE)
196		u |= 1 << KPF_SWAPCACHE;
197	u |= kpf_copy_bit(k, KPF_SWAPBACKED,	PG_swapbacked);
198
199	u |= kpf_copy_bit(k, KPF_UNEVICTABLE,	PG_unevictable);
200	u |= kpf_copy_bit(k, KPF_MLOCKED,	PG_mlocked);
201
202#ifdef CONFIG_MEMORY_FAILURE
203	if (u & (1 << KPF_HUGE))
204		u |= kpf_copy_bit(k, KPF_HWPOISON,	PG_hwpoison);
205	else
206		u |= kpf_copy_bit(page->flags, KPF_HWPOISON,	PG_hwpoison);
 
207#endif
208
209	u |= kpf_copy_bit(k, KPF_RESERVED,	PG_reserved);
210	u |= kpf_copy_bit(k, KPF_OWNER_2,	PG_owner_2);
211	u |= kpf_copy_bit(k, KPF_PRIVATE,	PG_private);
212	u |= kpf_copy_bit(k, KPF_PRIVATE_2,	PG_private_2);
213	u |= kpf_copy_bit(k, KPF_OWNER_PRIVATE,	PG_owner_priv_1);
214	u |= kpf_copy_bit(k, KPF_ARCH,		PG_arch_1);
215#ifdef CONFIG_ARCH_USES_PG_ARCH_2
216	u |= kpf_copy_bit(k, KPF_ARCH_2,	PG_arch_2);
217#endif
218#ifdef CONFIG_ARCH_USES_PG_ARCH_3
219	u |= kpf_copy_bit(k, KPF_ARCH_3,	PG_arch_3);
220#endif
221
222	return u;
223};
224
225static ssize_t kpageflags_read(struct file *file, char __user *buf,
226			     size_t count, loff_t *ppos)
227{
228	const unsigned long max_dump_pfn = get_max_dump_pfn();
229	u64 __user *out = (u64 __user *)buf;
 
230	unsigned long src = *ppos;
231	unsigned long pfn;
232	ssize_t ret = 0;
233
234	pfn = src / KPMSIZE;
235	if (src & KPMMASK || count & KPMMASK)
236		return -EINVAL;
237	if (src >= max_dump_pfn * KPMSIZE)
238		return 0;
239	count = min_t(unsigned long, count, (max_dump_pfn * KPMSIZE) - src);
240
241	while (count > 0) {
242		/*
243		 * TODO: ZONE_DEVICE support requires to identify
244		 * memmaps that were actually initialized.
245		 */
246		struct page *page = pfn_to_online_page(pfn);
247
248		if (put_user(stable_page_flags(page), out)) {
249			ret = -EFAULT;
250			break;
251		}
252
253		pfn++;
254		out++;
255		count -= KPMSIZE;
256
257		cond_resched();
258	}
259
260	*ppos += (char __user *)out - buf;
261	if (!ret)
262		ret = (char __user *)out - buf;
263	return ret;
264}
265
266static const struct proc_ops kpageflags_proc_ops = {
267	.proc_flags	= PROC_ENTRY_PERMANENT,
268	.proc_lseek	= mem_lseek,
269	.proc_read	= kpageflags_read,
270};
271
272#ifdef CONFIG_MEMCG
273static ssize_t kpagecgroup_read(struct file *file, char __user *buf,
274				size_t count, loff_t *ppos)
275{
276	const unsigned long max_dump_pfn = get_max_dump_pfn();
277	u64 __user *out = (u64 __user *)buf;
278	struct page *ppage;
279	unsigned long src = *ppos;
280	unsigned long pfn;
281	ssize_t ret = 0;
282	u64 ino;
283
284	pfn = src / KPMSIZE;
285	if (src & KPMMASK || count & KPMMASK)
286		return -EINVAL;
287	if (src >= max_dump_pfn * KPMSIZE)
288		return 0;
289	count = min_t(unsigned long, count, (max_dump_pfn * KPMSIZE) - src);
290
291	while (count > 0) {
292		/*
293		 * TODO: ZONE_DEVICE support requires to identify
294		 * memmaps that were actually initialized.
295		 */
296		ppage = pfn_to_online_page(pfn);
297
298		if (ppage)
299			ino = page_cgroup_ino(ppage);
300		else
301			ino = 0;
302
303		if (put_user(ino, out)) {
304			ret = -EFAULT;
305			break;
306		}
307
308		pfn++;
309		out++;
310		count -= KPMSIZE;
311
312		cond_resched();
313	}
314
315	*ppos += (char __user *)out - buf;
316	if (!ret)
317		ret = (char __user *)out - buf;
318	return ret;
319}
320
321static const struct proc_ops kpagecgroup_proc_ops = {
322	.proc_flags	= PROC_ENTRY_PERMANENT,
323	.proc_lseek	= mem_lseek,
324	.proc_read	= kpagecgroup_read,
325};
326#endif /* CONFIG_MEMCG */
327
328static int __init proc_page_init(void)
329{
330	proc_create("kpagecount", S_IRUSR, NULL, &kpagecount_proc_ops);
331	proc_create("kpageflags", S_IRUSR, NULL, &kpageflags_proc_ops);
332#ifdef CONFIG_MEMCG
333	proc_create("kpagecgroup", S_IRUSR, NULL, &kpagecgroup_proc_ops);
334#endif
335	return 0;
336}
337fs_initcall(proc_page_init);
v6.9.4
  1// SPDX-License-Identifier: GPL-2.0
  2#include <linux/memblock.h>
  3#include <linux/compiler.h>
  4#include <linux/fs.h>
  5#include <linux/init.h>
  6#include <linux/ksm.h>
  7#include <linux/mm.h>
  8#include <linux/mmzone.h>
  9#include <linux/huge_mm.h>
 10#include <linux/proc_fs.h>
 11#include <linux/seq_file.h>
 12#include <linux/hugetlb.h>
 13#include <linux/memremap.h>
 14#include <linux/memcontrol.h>
 15#include <linux/mmu_notifier.h>
 16#include <linux/page_idle.h>
 17#include <linux/kernel-page-flags.h>
 18#include <linux/uaccess.h>
 19#include "internal.h"
 20
 21#define KPMSIZE sizeof(u64)
 22#define KPMMASK (KPMSIZE - 1)
 23#define KPMBITS (KPMSIZE * BITS_PER_BYTE)
 24
 25static inline unsigned long get_max_dump_pfn(void)
 26{
 27#ifdef CONFIG_SPARSEMEM
 28	/*
 29	 * The memmap of early sections is completely populated and marked
 30	 * online even if max_pfn does not fall on a section boundary -
 31	 * pfn_to_online_page() will succeed on all pages. Allow inspecting
 32	 * these memmaps.
 33	 */
 34	return round_up(max_pfn, PAGES_PER_SECTION);
 35#else
 36	return max_pfn;
 37#endif
 38}
 39
 40/* /proc/kpagecount - an array exposing page counts
 41 *
 42 * Each entry is a u64 representing the corresponding
 43 * physical page count.
 44 */
 45static ssize_t kpagecount_read(struct file *file, char __user *buf,
 46			     size_t count, loff_t *ppos)
 47{
 48	const unsigned long max_dump_pfn = get_max_dump_pfn();
 49	u64 __user *out = (u64 __user *)buf;
 50	struct page *ppage;
 51	unsigned long src = *ppos;
 52	unsigned long pfn;
 53	ssize_t ret = 0;
 54	u64 pcount;
 55
 56	pfn = src / KPMSIZE;
 57	if (src & KPMMASK || count & KPMMASK)
 58		return -EINVAL;
 59	if (src >= max_dump_pfn * KPMSIZE)
 60		return 0;
 61	count = min_t(unsigned long, count, (max_dump_pfn * KPMSIZE) - src);
 62
 63	while (count > 0) {
 
 
 
 64		/*
 65		 * TODO: ZONE_DEVICE support requires to identify
 66		 * memmaps that were actually initialized.
 67		 */
 68		ppage = pfn_to_online_page(pfn);
 69
 70		if (!ppage)
 71			pcount = 0;
 72		else
 73			pcount = page_mapcount(ppage);
 74
 75		if (put_user(pcount, out)) {
 76			ret = -EFAULT;
 77			break;
 78		}
 79
 80		pfn++;
 81		out++;
 82		count -= KPMSIZE;
 83
 84		cond_resched();
 85	}
 86
 87	*ppos += (char __user *)out - buf;
 88	if (!ret)
 89		ret = (char __user *)out - buf;
 90	return ret;
 91}
 92
 93static const struct proc_ops kpagecount_proc_ops = {
 94	.proc_flags	= PROC_ENTRY_PERMANENT,
 95	.proc_lseek	= mem_lseek,
 96	.proc_read	= kpagecount_read,
 97};
 98
 99/* /proc/kpageflags - an array exposing page flags
100 *
101 * Each entry is a u64 representing the corresponding
102 * physical page flags.
103 */
104
105static inline u64 kpf_copy_bit(u64 kflags, int ubit, int kbit)
106{
107	return ((kflags >> kbit) & 1) << ubit;
108}
109
110u64 stable_page_flags(struct page *page)
111{
112	u64 k;
113	u64 u;
 
 
 
114
115	/*
116	 * pseudo flag: KPF_NOPAGE
117	 * it differentiates a memory hole from a page with no flags
118	 */
119	if (!page)
120		return 1 << KPF_NOPAGE;
 
121
122	k = page->flags;
123	u = 0;
 
124
125	/*
126	 * pseudo flags for the well known (anonymous) memory mapped pages
127	 */
128	if (page_mapped(page))
129		u |= 1 << KPF_MMAP;
130	if (PageAnon(page))
131		u |= 1 << KPF_ANON;
132	if (PageKsm(page))
133		u |= 1 << KPF_KSM;
 
134
135	/*
136	 * compound pages: export both head/tail info
137	 * they together define a compound page's start/end pos and order
138	 */
139	if (PageHead(page))
140		u |= 1 << KPF_COMPOUND_HEAD;
141	if (PageTail(page))
142		u |= 1 << KPF_COMPOUND_TAIL;
143	if (PageHuge(page))
144		u |= 1 << KPF_HUGE;
145	/*
146	 * PageTransCompound can be true for non-huge compound pages (slab
147	 * pages or pages allocated by drivers with __GFP_COMP) because it
148	 * just checks PG_head/PG_tail, so we need to check PageLRU/PageAnon
149	 * to make sure a given page is a thp, not a non-huge compound page.
150	 */
151	else if (PageTransCompound(page)) {
152		struct page *head = compound_head(page);
153
154		if (PageLRU(head) || PageAnon(head))
155			u |= 1 << KPF_THP;
156		else if (is_huge_zero_page(head)) {
157			u |= 1 << KPF_ZERO_PAGE;
158			u |= 1 << KPF_THP;
159		}
160	} else if (is_zero_pfn(page_to_pfn(page)))
161		u |= 1 << KPF_ZERO_PAGE;
162
163
164	/*
165	 * Caveats on high order pages: PG_buddy and PG_slab will only be set
166	 * on the head page.
167	 */
168	if (PageBuddy(page))
169		u |= 1 << KPF_BUDDY;
170	else if (page_count(page) == 0 && is_free_buddy_page(page))
171		u |= 1 << KPF_BUDDY;
172
173	if (PageOffline(page))
174		u |= 1 << KPF_OFFLINE;
175	if (PageTable(page))
176		u |= 1 << KPF_PGTABLE;
 
 
177
178	if (page_is_idle(page))
 
 
 
179		u |= 1 << KPF_IDLE;
 
180
181	u |= kpf_copy_bit(k, KPF_LOCKED,	PG_locked);
182
183	u |= kpf_copy_bit(k, KPF_SLAB,		PG_slab);
184	if (PageTail(page) && PageSlab(page))
185		u |= 1 << KPF_SLAB;
186
187	u |= kpf_copy_bit(k, KPF_ERROR,		PG_error);
188	u |= kpf_copy_bit(k, KPF_DIRTY,		PG_dirty);
189	u |= kpf_copy_bit(k, KPF_UPTODATE,	PG_uptodate);
190	u |= kpf_copy_bit(k, KPF_WRITEBACK,	PG_writeback);
191
192	u |= kpf_copy_bit(k, KPF_LRU,		PG_lru);
193	u |= kpf_copy_bit(k, KPF_REFERENCED,	PG_referenced);
194	u |= kpf_copy_bit(k, KPF_ACTIVE,	PG_active);
195	u |= kpf_copy_bit(k, KPF_RECLAIM,	PG_reclaim);
196
197	if (PageSwapCache(page))
 
198		u |= 1 << KPF_SWAPCACHE;
199	u |= kpf_copy_bit(k, KPF_SWAPBACKED,	PG_swapbacked);
200
201	u |= kpf_copy_bit(k, KPF_UNEVICTABLE,	PG_unevictable);
202	u |= kpf_copy_bit(k, KPF_MLOCKED,	PG_mlocked);
203
204#ifdef CONFIG_MEMORY_FAILURE
205	u |= kpf_copy_bit(k, KPF_HWPOISON,	PG_hwpoison);
206#endif
207
208#ifdef CONFIG_ARCH_USES_PG_UNCACHED
209	u |= kpf_copy_bit(k, KPF_UNCACHED,	PG_uncached);
210#endif
211
212	u |= kpf_copy_bit(k, KPF_RESERVED,	PG_reserved);
213	u |= kpf_copy_bit(k, KPF_MAPPEDTODISK,	PG_mappedtodisk);
214	u |= kpf_copy_bit(k, KPF_PRIVATE,	PG_private);
215	u |= kpf_copy_bit(k, KPF_PRIVATE_2,	PG_private_2);
216	u |= kpf_copy_bit(k, KPF_OWNER_PRIVATE,	PG_owner_priv_1);
217	u |= kpf_copy_bit(k, KPF_ARCH,		PG_arch_1);
218#ifdef CONFIG_ARCH_USES_PG_ARCH_X
219	u |= kpf_copy_bit(k, KPF_ARCH_2,	PG_arch_2);
 
 
220	u |= kpf_copy_bit(k, KPF_ARCH_3,	PG_arch_3);
221#endif
222
223	return u;
224};
225
226static ssize_t kpageflags_read(struct file *file, char __user *buf,
227			     size_t count, loff_t *ppos)
228{
229	const unsigned long max_dump_pfn = get_max_dump_pfn();
230	u64 __user *out = (u64 __user *)buf;
231	struct page *ppage;
232	unsigned long src = *ppos;
233	unsigned long pfn;
234	ssize_t ret = 0;
235
236	pfn = src / KPMSIZE;
237	if (src & KPMMASK || count & KPMMASK)
238		return -EINVAL;
239	if (src >= max_dump_pfn * KPMSIZE)
240		return 0;
241	count = min_t(unsigned long, count, (max_dump_pfn * KPMSIZE) - src);
242
243	while (count > 0) {
244		/*
245		 * TODO: ZONE_DEVICE support requires to identify
246		 * memmaps that were actually initialized.
247		 */
248		ppage = pfn_to_online_page(pfn);
249
250		if (put_user(stable_page_flags(ppage), out)) {
251			ret = -EFAULT;
252			break;
253		}
254
255		pfn++;
256		out++;
257		count -= KPMSIZE;
258
259		cond_resched();
260	}
261
262	*ppos += (char __user *)out - buf;
263	if (!ret)
264		ret = (char __user *)out - buf;
265	return ret;
266}
267
268static const struct proc_ops kpageflags_proc_ops = {
269	.proc_flags	= PROC_ENTRY_PERMANENT,
270	.proc_lseek	= mem_lseek,
271	.proc_read	= kpageflags_read,
272};
273
274#ifdef CONFIG_MEMCG
275static ssize_t kpagecgroup_read(struct file *file, char __user *buf,
276				size_t count, loff_t *ppos)
277{
278	const unsigned long max_dump_pfn = get_max_dump_pfn();
279	u64 __user *out = (u64 __user *)buf;
280	struct page *ppage;
281	unsigned long src = *ppos;
282	unsigned long pfn;
283	ssize_t ret = 0;
284	u64 ino;
285
286	pfn = src / KPMSIZE;
287	if (src & KPMMASK || count & KPMMASK)
288		return -EINVAL;
289	if (src >= max_dump_pfn * KPMSIZE)
290		return 0;
291	count = min_t(unsigned long, count, (max_dump_pfn * KPMSIZE) - src);
292
293	while (count > 0) {
294		/*
295		 * TODO: ZONE_DEVICE support requires to identify
296		 * memmaps that were actually initialized.
297		 */
298		ppage = pfn_to_online_page(pfn);
299
300		if (ppage)
301			ino = page_cgroup_ino(ppage);
302		else
303			ino = 0;
304
305		if (put_user(ino, out)) {
306			ret = -EFAULT;
307			break;
308		}
309
310		pfn++;
311		out++;
312		count -= KPMSIZE;
313
314		cond_resched();
315	}
316
317	*ppos += (char __user *)out - buf;
318	if (!ret)
319		ret = (char __user *)out - buf;
320	return ret;
321}
322
323static const struct proc_ops kpagecgroup_proc_ops = {
324	.proc_flags	= PROC_ENTRY_PERMANENT,
325	.proc_lseek	= mem_lseek,
326	.proc_read	= kpagecgroup_read,
327};
328#endif /* CONFIG_MEMCG */
329
330static int __init proc_page_init(void)
331{
332	proc_create("kpagecount", S_IRUSR, NULL, &kpagecount_proc_ops);
333	proc_create("kpageflags", S_IRUSR, NULL, &kpageflags_proc_ops);
334#ifdef CONFIG_MEMCG
335	proc_create("kpagecgroup", S_IRUSR, NULL, &kpagecgroup_proc_ops);
336#endif
337	return 0;
338}
339fs_initcall(proc_page_init);