Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) 2019 SiFive
  4 */
  5
  6#include <linux/efi.h>
  7#include <linux/init.h>
  8#include <linux/debugfs.h>
  9#include <linux/seq_file.h>
 10#include <linux/ptdump.h>
 11
 12#include <asm/ptdump.h>
 13#include <linux/pgtable.h>
 14#include <asm/kasan.h>
 15
 16#define pt_dump_seq_printf(m, fmt, args...)	\
 17({						\
 18	if (m)					\
 19		seq_printf(m, fmt, ##args);	\
 20})
 21
 22#define pt_dump_seq_puts(m, fmt)	\
 23({					\
 24	if (m)				\
 25		seq_printf(m, fmt);	\
 26})
 27
 28/*
 29 * The page dumper groups page table entries of the same type into a single
 30 * description. It uses pg_state to track the range information while
 31 * iterating over the pte entries. When the continuity is broken it then
 32 * dumps out a description of the range.
 33 */
 34struct pg_state {
 35	struct ptdump_state ptdump;
 36	struct seq_file *seq;
 37	const struct addr_marker *marker;
 38	unsigned long start_address;
 39	unsigned long start_pa;
 40	unsigned long last_pa;
 41	int level;
 42	u64 current_prot;
 43	bool check_wx;
 44	unsigned long wx_pages;
 45};
 46
 47/* Address marker */
 48struct addr_marker {
 49	unsigned long start_address;
 50	const char *name;
 51};
 52
 53/* Private information for debugfs */
 54struct ptd_mm_info {
 55	struct mm_struct		*mm;
 56	const struct addr_marker	*markers;
 57	unsigned long base_addr;
 58	unsigned long end;
 59};
 60
 61enum address_markers_idx {
 62#ifdef CONFIG_KASAN
 63	KASAN_SHADOW_START_NR,
 64	KASAN_SHADOW_END_NR,
 65#endif
 66	FIXMAP_START_NR,
 67	FIXMAP_END_NR,
 68	PCI_IO_START_NR,
 69	PCI_IO_END_NR,
 70#ifdef CONFIG_SPARSEMEM_VMEMMAP
 71	VMEMMAP_START_NR,
 72	VMEMMAP_END_NR,
 73#endif
 74	VMALLOC_START_NR,
 75	VMALLOC_END_NR,
 76	PAGE_OFFSET_NR,
 
 
 
 
 77#ifdef CONFIG_64BIT
 78	MODULES_MAPPING_NR,
 79	KERNEL_MAPPING_NR,
 80#endif
 81	END_OF_SPACE_NR
 82};
 83
 84static struct addr_marker address_markers[] = {
 85#ifdef CONFIG_KASAN
 86	{0, "Kasan shadow start"},
 87	{0, "Kasan shadow end"},
 88#endif
 89	{0, "Fixmap start"},
 90	{0, "Fixmap end"},
 91	{0, "PCI I/O start"},
 92	{0, "PCI I/O end"},
 93#ifdef CONFIG_SPARSEMEM_VMEMMAP
 94	{0, "vmemmap start"},
 95	{0, "vmemmap end"},
 96#endif
 97	{0, "vmalloc() area"},
 98	{0, "vmalloc() end"},
 99	{0, "Linear mapping"},
 
 
 
 
100#ifdef CONFIG_64BIT
101	{0, "Modules/BPF mapping"},
102	{0, "Kernel mapping"},
103#endif
104	{-1, NULL},
105};
106
107static struct ptd_mm_info kernel_ptd_info = {
108	.mm		= &init_mm,
109	.markers	= address_markers,
110	.base_addr	= 0,
111	.end		= ULONG_MAX,
112};
113
114#ifdef CONFIG_EFI
115static struct addr_marker efi_addr_markers[] = {
116		{ 0,		"UEFI runtime start" },
117		{ SZ_1G,	"UEFI runtime end" },
118		{ -1,		NULL }
119};
120
121static struct ptd_mm_info efi_ptd_info = {
122	.mm		= &efi_mm,
123	.markers	= efi_addr_markers,
124	.base_addr	= 0,
125	.end		= SZ_2G,
126};
127#endif
128
129/* Page Table Entry */
130struct prot_bits {
131	u64 mask;
132	u64 val;
133	const char *set;
134	const char *clear;
135};
136
137static const struct prot_bits pte_bits[] = {
138	{
 
 
 
 
 
 
 
 
 
 
139		.mask = _PAGE_SOFT,
140		.val = _PAGE_SOFT,
141		.set = "RSW",
142		.clear = "   ",
143	}, {
144		.mask = _PAGE_DIRTY,
145		.val = _PAGE_DIRTY,
146		.set = "D",
147		.clear = ".",
148	}, {
149		.mask = _PAGE_ACCESSED,
150		.val = _PAGE_ACCESSED,
151		.set = "A",
152		.clear = ".",
153	}, {
154		.mask = _PAGE_GLOBAL,
155		.val = _PAGE_GLOBAL,
156		.set = "G",
157		.clear = ".",
158	}, {
159		.mask = _PAGE_USER,
160		.val = _PAGE_USER,
161		.set = "U",
162		.clear = ".",
163	}, {
164		.mask = _PAGE_EXEC,
165		.val = _PAGE_EXEC,
166		.set = "X",
167		.clear = ".",
168	}, {
169		.mask = _PAGE_WRITE,
170		.val = _PAGE_WRITE,
171		.set = "W",
172		.clear = ".",
173	}, {
174		.mask = _PAGE_READ,
175		.val = _PAGE_READ,
176		.set = "R",
177		.clear = ".",
178	}, {
179		.mask = _PAGE_PRESENT,
180		.val = _PAGE_PRESENT,
181		.set = "V",
182		.clear = ".",
183	}
184};
185
186/* Page Level */
187struct pg_level {
188	const char *name;
189	u64 mask;
190};
191
192static struct pg_level pg_level[] = {
193	{ /* pgd */
194		.name = "PGD",
195	}, { /* p4d */
196		.name = (CONFIG_PGTABLE_LEVELS > 4) ? "P4D" : "PGD",
197	}, { /* pud */
198		.name = (CONFIG_PGTABLE_LEVELS > 3) ? "PUD" : "PGD",
199	}, { /* pmd */
200		.name = (CONFIG_PGTABLE_LEVELS > 2) ? "PMD" : "PGD",
201	}, { /* pte */
202		.name = "PTE",
203	},
204};
205
206static void dump_prot(struct pg_state *st)
207{
208	unsigned int i;
209
210	for (i = 0; i < ARRAY_SIZE(pte_bits); i++) {
211		const char *s;
 
212
213		if ((st->current_prot & pte_bits[i].mask) == pte_bits[i].val)
214			s = pte_bits[i].set;
215		else
216			s = pte_bits[i].clear;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
217
218		if (s)
219			pt_dump_seq_printf(st->seq, " %s", s);
220	}
221}
222
223#ifdef CONFIG_64BIT
224#define ADDR_FORMAT	"0x%016lx"
225#else
226#define ADDR_FORMAT	"0x%08lx"
227#endif
228static void dump_addr(struct pg_state *st, unsigned long addr)
229{
230	static const char units[] = "KMGTPE";
231	const char *unit = units;
232	unsigned long delta;
233
234	pt_dump_seq_printf(st->seq, ADDR_FORMAT "-" ADDR_FORMAT "   ",
235			   st->start_address, addr);
236
237	pt_dump_seq_printf(st->seq, " " ADDR_FORMAT " ", st->start_pa);
238	delta = (addr - st->start_address) >> 10;
239
240	while (!(delta & 1023) && unit[1]) {
241		delta >>= 10;
242		unit++;
243	}
244
245	pt_dump_seq_printf(st->seq, "%9lu%c %s", delta, *unit,
246			   pg_level[st->level].name);
247}
248
249static void note_prot_wx(struct pg_state *st, unsigned long addr)
250{
251	if (!st->check_wx)
252		return;
253
254	if ((st->current_prot & (_PAGE_WRITE | _PAGE_EXEC)) !=
255	    (_PAGE_WRITE | _PAGE_EXEC))
256		return;
257
258	WARN_ONCE(1, "riscv/mm: Found insecure W+X mapping at address %p/%pS\n",
259		  (void *)st->start_address, (void *)st->start_address);
260
261	st->wx_pages += (addr - st->start_address) / PAGE_SIZE;
262}
263
264static void note_page(struct ptdump_state *pt_st, unsigned long addr,
265		      int level, u64 val)
266{
267	struct pg_state *st = container_of(pt_st, struct pg_state, ptdump);
268	u64 pa = PFN_PHYS(pte_pfn(__pte(val)));
269	u64 prot = 0;
270
271	if (level >= 0)
272		prot = val & pg_level[level].mask;
273
274	if (st->level == -1) {
275		st->level = level;
276		st->current_prot = prot;
277		st->start_address = addr;
278		st->start_pa = pa;
279		st->last_pa = pa;
280		pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
281	} else if (prot != st->current_prot ||
282		   level != st->level || addr >= st->marker[1].start_address) {
283		if (st->current_prot) {
284			note_prot_wx(st, addr);
285			dump_addr(st, addr);
286			dump_prot(st);
287			pt_dump_seq_puts(st->seq, "\n");
288		}
289
290		while (addr >= st->marker[1].start_address) {
291			st->marker++;
292			pt_dump_seq_printf(st->seq, "---[ %s ]---\n",
293					   st->marker->name);
294		}
295
296		st->start_address = addr;
297		st->start_pa = pa;
298		st->last_pa = pa;
299		st->current_prot = prot;
300		st->level = level;
301	} else {
302		st->last_pa = pa;
303	}
304}
305
306static void ptdump_walk(struct seq_file *s, struct ptd_mm_info *pinfo)
307{
308	struct pg_state st = {
309		.seq = s,
310		.marker = pinfo->markers,
311		.level = -1,
312		.ptdump = {
313			.note_page = note_page,
314			.range = (struct ptdump_range[]) {
315				{pinfo->base_addr, pinfo->end},
316				{0, 0}
317			}
318		}
319	};
320
321	ptdump_walk_pgd(&st.ptdump, pinfo->mm, NULL);
322}
323
324void ptdump_check_wx(void)
325{
326	struct pg_state st = {
327		.seq = NULL,
328		.marker = (struct addr_marker[]) {
329			{0, NULL},
330			{-1, NULL},
331		},
332		.level = -1,
333		.check_wx = true,
334		.ptdump = {
335			.note_page = note_page,
336			.range = (struct ptdump_range[]) {
337				{KERN_VIRT_START, ULONG_MAX},
338				{0, 0}
339			}
340		}
341	};
342
343	ptdump_walk_pgd(&st.ptdump, &init_mm, NULL);
344
345	if (st.wx_pages)
346		pr_warn("Checked W+X mappings: failed, %lu W+X pages found\n",
347			st.wx_pages);
348	else
349		pr_info("Checked W+X mappings: passed, no W+X pages found\n");
350}
351
352static int ptdump_show(struct seq_file *m, void *v)
353{
354	ptdump_walk(m, m->private);
355
356	return 0;
357}
358
359DEFINE_SHOW_ATTRIBUTE(ptdump);
360
361static int __init ptdump_init(void)
362{
363	unsigned int i, j;
364
365#ifdef CONFIG_KASAN
366	address_markers[KASAN_SHADOW_START_NR].start_address = KASAN_SHADOW_START;
367	address_markers[KASAN_SHADOW_END_NR].start_address = KASAN_SHADOW_END;
368#endif
369	address_markers[FIXMAP_START_NR].start_address = FIXADDR_START;
370	address_markers[FIXMAP_END_NR].start_address = FIXADDR_TOP;
371	address_markers[PCI_IO_START_NR].start_address = PCI_IO_START;
372	address_markers[PCI_IO_END_NR].start_address = PCI_IO_END;
373#ifdef CONFIG_SPARSEMEM_VMEMMAP
374	address_markers[VMEMMAP_START_NR].start_address = VMEMMAP_START;
375	address_markers[VMEMMAP_END_NR].start_address = VMEMMAP_END;
376#endif
377	address_markers[VMALLOC_START_NR].start_address = VMALLOC_START;
378	address_markers[VMALLOC_END_NR].start_address = VMALLOC_END;
379	address_markers[PAGE_OFFSET_NR].start_address = PAGE_OFFSET;
 
 
 
 
380#ifdef CONFIG_64BIT
381	address_markers[MODULES_MAPPING_NR].start_address = MODULES_VADDR;
382	address_markers[KERNEL_MAPPING_NR].start_address = kernel_map.virt_addr;
383#endif
384
385	kernel_ptd_info.base_addr = KERN_VIRT_START;
 
 
 
386
387	for (i = 0; i < ARRAY_SIZE(pg_level); i++)
388		for (j = 0; j < ARRAY_SIZE(pte_bits); j++)
389			pg_level[i].mask |= pte_bits[j].mask;
390
391	debugfs_create_file("kernel_page_tables", 0400, NULL, &kernel_ptd_info,
392			    &ptdump_fops);
393#ifdef CONFIG_EFI
394	if (efi_enabled(EFI_RUNTIME_SERVICES))
395		debugfs_create_file("efi_page_tables", 0400, NULL, &efi_ptd_info,
396				    &ptdump_fops);
397#endif
398
399	return 0;
400}
401
402device_initcall(ptdump_init);
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) 2019 SiFive
  4 */
  5
  6#include <linux/efi.h>
  7#include <linux/init.h>
  8#include <linux/debugfs.h>
  9#include <linux/seq_file.h>
 10#include <linux/ptdump.h>
 11
 12#include <asm/ptdump.h>
 13#include <linux/pgtable.h>
 14#include <asm/kasan.h>
 15
 16#define pt_dump_seq_printf(m, fmt, args...)	\
 17({						\
 18	if (m)					\
 19		seq_printf(m, fmt, ##args);	\
 20})
 21
 22#define pt_dump_seq_puts(m, fmt)	\
 23({					\
 24	if (m)				\
 25		seq_printf(m, fmt);	\
 26})
 27
 28/*
 29 * The page dumper groups page table entries of the same type into a single
 30 * description. It uses pg_state to track the range information while
 31 * iterating over the pte entries. When the continuity is broken it then
 32 * dumps out a description of the range.
 33 */
 34struct pg_state {
 35	struct ptdump_state ptdump;
 36	struct seq_file *seq;
 37	const struct addr_marker *marker;
 38	unsigned long start_address;
 39	unsigned long start_pa;
 40	unsigned long last_pa;
 41	int level;
 42	u64 current_prot;
 43	bool check_wx;
 44	unsigned long wx_pages;
 45};
 46
 47/* Address marker */
 48struct addr_marker {
 49	unsigned long start_address;
 50	const char *name;
 51};
 52
 53/* Private information for debugfs */
 54struct ptd_mm_info {
 55	struct mm_struct		*mm;
 56	const struct addr_marker	*markers;
 57	unsigned long base_addr;
 58	unsigned long end;
 59};
 60
 61enum address_markers_idx {
 
 
 
 
 62	FIXMAP_START_NR,
 63	FIXMAP_END_NR,
 64	PCI_IO_START_NR,
 65	PCI_IO_END_NR,
 66#ifdef CONFIG_SPARSEMEM_VMEMMAP
 67	VMEMMAP_START_NR,
 68	VMEMMAP_END_NR,
 69#endif
 70	VMALLOC_START_NR,
 71	VMALLOC_END_NR,
 72	PAGE_OFFSET_NR,
 73#ifdef CONFIG_KASAN
 74	KASAN_SHADOW_START_NR,
 75	KASAN_SHADOW_END_NR,
 76#endif
 77#ifdef CONFIG_64BIT
 78	MODULES_MAPPING_NR,
 79	KERNEL_MAPPING_NR,
 80#endif
 81	END_OF_SPACE_NR
 82};
 83
 84static struct addr_marker address_markers[] = {
 
 
 
 
 85	{0, "Fixmap start"},
 86	{0, "Fixmap end"},
 87	{0, "PCI I/O start"},
 88	{0, "PCI I/O end"},
 89#ifdef CONFIG_SPARSEMEM_VMEMMAP
 90	{0, "vmemmap start"},
 91	{0, "vmemmap end"},
 92#endif
 93	{0, "vmalloc() area"},
 94	{0, "vmalloc() end"},
 95	{0, "Linear mapping"},
 96#ifdef CONFIG_KASAN
 97	{0, "Kasan shadow start"},
 98	{0, "Kasan shadow end"},
 99#endif
100#ifdef CONFIG_64BIT
101	{0, "Modules/BPF mapping"},
102	{0, "Kernel mapping"},
103#endif
104	{-1, NULL},
105};
106
107static struct ptd_mm_info kernel_ptd_info = {
108	.mm		= &init_mm,
109	.markers	= address_markers,
110	.base_addr	= 0,
111	.end		= ULONG_MAX,
112};
113
114#ifdef CONFIG_EFI
115static struct addr_marker efi_addr_markers[] = {
116		{ 0,		"UEFI runtime start" },
117		{ SZ_1G,	"UEFI runtime end" },
118		{ -1,		NULL }
119};
120
121static struct ptd_mm_info efi_ptd_info = {
122	.mm		= &efi_mm,
123	.markers	= efi_addr_markers,
124	.base_addr	= 0,
125	.end		= SZ_2G,
126};
127#endif
128
129/* Page Table Entry */
130struct prot_bits {
131	u64 mask;
 
132	const char *set;
133	const char *clear;
134};
135
136static const struct prot_bits pte_bits[] = {
137	{
138#ifdef CONFIG_64BIT
139		.mask = _PAGE_NAPOT,
140		.set = "N",
141		.clear = ".",
142	}, {
143		.mask = _PAGE_MTMASK_SVPBMT,
144		.set = "MT(%s)",
145		.clear = "  ..  ",
146	}, {
147#endif
148		.mask = _PAGE_SOFT,
149		.set = "RSW(%d)",
150		.clear = "  ..  ",
 
151	}, {
152		.mask = _PAGE_DIRTY,
 
153		.set = "D",
154		.clear = ".",
155	}, {
156		.mask = _PAGE_ACCESSED,
 
157		.set = "A",
158		.clear = ".",
159	}, {
160		.mask = _PAGE_GLOBAL,
 
161		.set = "G",
162		.clear = ".",
163	}, {
164		.mask = _PAGE_USER,
 
165		.set = "U",
166		.clear = ".",
167	}, {
168		.mask = _PAGE_EXEC,
 
169		.set = "X",
170		.clear = ".",
171	}, {
172		.mask = _PAGE_WRITE,
 
173		.set = "W",
174		.clear = ".",
175	}, {
176		.mask = _PAGE_READ,
 
177		.set = "R",
178		.clear = ".",
179	}, {
180		.mask = _PAGE_PRESENT,
 
181		.set = "V",
182		.clear = ".",
183	}
184};
185
186/* Page Level */
187struct pg_level {
188	const char *name;
189	u64 mask;
190};
191
192static struct pg_level pg_level[] = {
193	{ /* pgd */
194		.name = "PGD",
195	}, { /* p4d */
196		.name = (CONFIG_PGTABLE_LEVELS > 4) ? "P4D" : "PGD",
197	}, { /* pud */
198		.name = (CONFIG_PGTABLE_LEVELS > 3) ? "PUD" : "PGD",
199	}, { /* pmd */
200		.name = (CONFIG_PGTABLE_LEVELS > 2) ? "PMD" : "PGD",
201	}, { /* pte */
202		.name = "PTE",
203	},
204};
205
206static void dump_prot(struct pg_state *st)
207{
208	unsigned int i;
209
210	for (i = 0; i < ARRAY_SIZE(pte_bits); i++) {
211		char s[7];
212		unsigned long val;
213
214		val = st->current_prot & pte_bits[i].mask;
215		if (val) {
216			if (pte_bits[i].mask == _PAGE_SOFT)
217				sprintf(s, pte_bits[i].set, val >> 8);
218#ifdef CONFIG_64BIT
219			else if (pte_bits[i].mask == _PAGE_MTMASK_SVPBMT) {
220				if (val == _PAGE_NOCACHE_SVPBMT)
221					sprintf(s, pte_bits[i].set, "NC");
222				else if (val == _PAGE_IO_SVPBMT)
223					sprintf(s, pte_bits[i].set, "IO");
224				else
225					sprintf(s, pte_bits[i].set, "??");
226			}
227#endif
228			else
229				sprintf(s, "%s", pte_bits[i].set);
230		} else {
231			sprintf(s, "%s", pte_bits[i].clear);
232		}
233
234		pt_dump_seq_printf(st->seq, " %s", s);
 
235	}
236}
237
238#ifdef CONFIG_64BIT
239#define ADDR_FORMAT	"0x%016lx"
240#else
241#define ADDR_FORMAT	"0x%08lx"
242#endif
243static void dump_addr(struct pg_state *st, unsigned long addr)
244{
245	static const char units[] = "KMGTPE";
246	const char *unit = units;
247	unsigned long delta;
248
249	pt_dump_seq_printf(st->seq, ADDR_FORMAT "-" ADDR_FORMAT "   ",
250			   st->start_address, addr);
251
252	pt_dump_seq_printf(st->seq, " " ADDR_FORMAT " ", st->start_pa);
253	delta = (addr - st->start_address) >> 10;
254
255	while (!(delta & 1023) && unit[1]) {
256		delta >>= 10;
257		unit++;
258	}
259
260	pt_dump_seq_printf(st->seq, "%9lu%c %s", delta, *unit,
261			   pg_level[st->level].name);
262}
263
264static void note_prot_wx(struct pg_state *st, unsigned long addr)
265{
266	if (!st->check_wx)
267		return;
268
269	if ((st->current_prot & (_PAGE_WRITE | _PAGE_EXEC)) !=
270	    (_PAGE_WRITE | _PAGE_EXEC))
271		return;
272
273	WARN_ONCE(1, "riscv/mm: Found insecure W+X mapping at address %p/%pS\n",
274		  (void *)st->start_address, (void *)st->start_address);
275
276	st->wx_pages += (addr - st->start_address) / PAGE_SIZE;
277}
278
279static void note_page(struct ptdump_state *pt_st, unsigned long addr,
280		      int level, u64 val)
281{
282	struct pg_state *st = container_of(pt_st, struct pg_state, ptdump);
283	u64 pa = PFN_PHYS(pte_pfn(__pte(val)));
284	u64 prot = 0;
285
286	if (level >= 0)
287		prot = val & pg_level[level].mask;
288
289	if (st->level == -1) {
290		st->level = level;
291		st->current_prot = prot;
292		st->start_address = addr;
293		st->start_pa = pa;
294		st->last_pa = pa;
295		pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
296	} else if (prot != st->current_prot ||
297		   level != st->level || addr >= st->marker[1].start_address) {
298		if (st->current_prot) {
299			note_prot_wx(st, addr);
300			dump_addr(st, addr);
301			dump_prot(st);
302			pt_dump_seq_puts(st->seq, "\n");
303		}
304
305		while (addr >= st->marker[1].start_address) {
306			st->marker++;
307			pt_dump_seq_printf(st->seq, "---[ %s ]---\n",
308					   st->marker->name);
309		}
310
311		st->start_address = addr;
312		st->start_pa = pa;
313		st->last_pa = pa;
314		st->current_prot = prot;
315		st->level = level;
316	} else {
317		st->last_pa = pa;
318	}
319}
320
321static void ptdump_walk(struct seq_file *s, struct ptd_mm_info *pinfo)
322{
323	struct pg_state st = {
324		.seq = s,
325		.marker = pinfo->markers,
326		.level = -1,
327		.ptdump = {
328			.note_page = note_page,
329			.range = (struct ptdump_range[]) {
330				{pinfo->base_addr, pinfo->end},
331				{0, 0}
332			}
333		}
334	};
335
336	ptdump_walk_pgd(&st.ptdump, pinfo->mm, NULL);
337}
338
339void ptdump_check_wx(void)
340{
341	struct pg_state st = {
342		.seq = NULL,
343		.marker = (struct addr_marker[]) {
344			{0, NULL},
345			{-1, NULL},
346		},
347		.level = -1,
348		.check_wx = true,
349		.ptdump = {
350			.note_page = note_page,
351			.range = (struct ptdump_range[]) {
352				{KERN_VIRT_START, ULONG_MAX},
353				{0, 0}
354			}
355		}
356	};
357
358	ptdump_walk_pgd(&st.ptdump, &init_mm, NULL);
359
360	if (st.wx_pages)
361		pr_warn("Checked W+X mappings: failed, %lu W+X pages found\n",
362			st.wx_pages);
363	else
364		pr_info("Checked W+X mappings: passed, no W+X pages found\n");
365}
366
367static int ptdump_show(struct seq_file *m, void *v)
368{
369	ptdump_walk(m, m->private);
370
371	return 0;
372}
373
374DEFINE_SHOW_ATTRIBUTE(ptdump);
375
376static int __init ptdump_init(void)
377{
378	unsigned int i, j;
379
 
 
 
 
380	address_markers[FIXMAP_START_NR].start_address = FIXADDR_START;
381	address_markers[FIXMAP_END_NR].start_address = FIXADDR_TOP;
382	address_markers[PCI_IO_START_NR].start_address = PCI_IO_START;
383	address_markers[PCI_IO_END_NR].start_address = PCI_IO_END;
384#ifdef CONFIG_SPARSEMEM_VMEMMAP
385	address_markers[VMEMMAP_START_NR].start_address = VMEMMAP_START;
386	address_markers[VMEMMAP_END_NR].start_address = VMEMMAP_END;
387#endif
388	address_markers[VMALLOC_START_NR].start_address = VMALLOC_START;
389	address_markers[VMALLOC_END_NR].start_address = VMALLOC_END;
390	address_markers[PAGE_OFFSET_NR].start_address = PAGE_OFFSET;
391#ifdef CONFIG_KASAN
392	address_markers[KASAN_SHADOW_START_NR].start_address = KASAN_SHADOW_START;
393	address_markers[KASAN_SHADOW_END_NR].start_address = KASAN_SHADOW_END;
394#endif
395#ifdef CONFIG_64BIT
396	address_markers[MODULES_MAPPING_NR].start_address = MODULES_VADDR;
397	address_markers[KERNEL_MAPPING_NR].start_address = kernel_map.virt_addr;
398#endif
399
400	kernel_ptd_info.base_addr = KERN_VIRT_START;
401
402	pg_level[1].name = pgtable_l5_enabled ? "P4D" : "PGD";
403	pg_level[2].name = pgtable_l4_enabled ? "PUD" : "PGD";
404
405	for (i = 0; i < ARRAY_SIZE(pg_level); i++)
406		for (j = 0; j < ARRAY_SIZE(pte_bits); j++)
407			pg_level[i].mask |= pte_bits[j].mask;
408
409	debugfs_create_file("kernel_page_tables", 0400, NULL, &kernel_ptd_info,
410			    &ptdump_fops);
411#ifdef CONFIG_EFI
412	if (efi_enabled(EFI_RUNTIME_SERVICES))
413		debugfs_create_file("efi_page_tables", 0400, NULL, &efi_ptd_info,
414				    &ptdump_fops);
415#endif
416
417	return 0;
418}
419
420device_initcall(ptdump_init);