Loading...
1/*
2 * Debug helper to dump the current kernel pagetables of the system
3 * so that we can see what the various memory ranges are set to.
4 *
5 * Derived from x86 implementation:
6 * (C) Copyright 2008 Intel Corporation
7 *
8 * Author: Arjan van de Ven <arjan@linux.intel.com>
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; version 2
13 * of the License.
14 */
15#include <linux/debugfs.h>
16#include <linux/fs.h>
17#include <linux/mm.h>
18#include <linux/seq_file.h>
19
20#include <asm/fixmap.h>
21#include <asm/pgtable.h>
22
23struct addr_marker {
24 unsigned long start_address;
25 const char *name;
26};
27
28static struct addr_marker address_markers[] = {
29 { MODULES_VADDR, "Modules" },
30 { PAGE_OFFSET, "Kernel Mapping" },
31 { 0, "vmalloc() Area" },
32 { VMALLOC_END, "vmalloc() End" },
33 { FIXADDR_START, "Fixmap Area" },
34 { CONFIG_VECTORS_BASE, "Vectors" },
35 { CONFIG_VECTORS_BASE + PAGE_SIZE * 2, "Vectors End" },
36 { -1, NULL },
37};
38
39struct pg_state {
40 struct seq_file *seq;
41 const struct addr_marker *marker;
42 unsigned long start_address;
43 unsigned level;
44 u64 current_prot;
45};
46
47struct prot_bits {
48 u64 mask;
49 u64 val;
50 const char *set;
51 const char *clear;
52};
53
54static const struct prot_bits pte_bits[] = {
55 {
56 .mask = L_PTE_USER,
57 .val = L_PTE_USER,
58 .set = "USR",
59 .clear = " ",
60 }, {
61 .mask = L_PTE_RDONLY,
62 .val = L_PTE_RDONLY,
63 .set = "ro",
64 .clear = "RW",
65 }, {
66 .mask = L_PTE_XN,
67 .val = L_PTE_XN,
68 .set = "NX",
69 .clear = "x ",
70 }, {
71 .mask = L_PTE_SHARED,
72 .val = L_PTE_SHARED,
73 .set = "SHD",
74 .clear = " ",
75 }, {
76 .mask = L_PTE_MT_MASK,
77 .val = L_PTE_MT_UNCACHED,
78 .set = "SO/UNCACHED",
79 }, {
80 .mask = L_PTE_MT_MASK,
81 .val = L_PTE_MT_BUFFERABLE,
82 .set = "MEM/BUFFERABLE/WC",
83 }, {
84 .mask = L_PTE_MT_MASK,
85 .val = L_PTE_MT_WRITETHROUGH,
86 .set = "MEM/CACHED/WT",
87 }, {
88 .mask = L_PTE_MT_MASK,
89 .val = L_PTE_MT_WRITEBACK,
90 .set = "MEM/CACHED/WBRA",
91#ifndef CONFIG_ARM_LPAE
92 }, {
93 .mask = L_PTE_MT_MASK,
94 .val = L_PTE_MT_MINICACHE,
95 .set = "MEM/MINICACHE",
96#endif
97 }, {
98 .mask = L_PTE_MT_MASK,
99 .val = L_PTE_MT_WRITEALLOC,
100 .set = "MEM/CACHED/WBWA",
101 }, {
102 .mask = L_PTE_MT_MASK,
103 .val = L_PTE_MT_DEV_SHARED,
104 .set = "DEV/SHARED",
105#ifndef CONFIG_ARM_LPAE
106 }, {
107 .mask = L_PTE_MT_MASK,
108 .val = L_PTE_MT_DEV_NONSHARED,
109 .set = "DEV/NONSHARED",
110#endif
111 }, {
112 .mask = L_PTE_MT_MASK,
113 .val = L_PTE_MT_DEV_WC,
114 .set = "DEV/WC",
115 }, {
116 .mask = L_PTE_MT_MASK,
117 .val = L_PTE_MT_DEV_CACHED,
118 .set = "DEV/CACHED",
119 },
120};
121
122static const struct prot_bits section_bits[] = {
123#ifdef CONFIG_ARM_LPAE
124 {
125 .mask = PMD_SECT_USER,
126 .val = PMD_SECT_USER,
127 .set = "USR",
128 }, {
129 .mask = L_PMD_SECT_RDONLY,
130 .val = L_PMD_SECT_RDONLY,
131 .set = "ro",
132 .clear = "RW",
133#elif __LINUX_ARM_ARCH__ >= 6
134 {
135 .mask = PMD_SECT_APX | PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
136 .val = PMD_SECT_APX | PMD_SECT_AP_WRITE,
137 .set = " ro",
138 }, {
139 .mask = PMD_SECT_APX | PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
140 .val = PMD_SECT_AP_WRITE,
141 .set = " RW",
142 }, {
143 .mask = PMD_SECT_APX | PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
144 .val = PMD_SECT_AP_READ,
145 .set = "USR ro",
146 }, {
147 .mask = PMD_SECT_APX | PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
148 .val = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
149 .set = "USR RW",
150#else /* ARMv4/ARMv5 */
151 /* These are approximate */
152 {
153 .mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
154 .val = 0,
155 .set = " ro",
156 }, {
157 .mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
158 .val = PMD_SECT_AP_WRITE,
159 .set = " RW",
160 }, {
161 .mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
162 .val = PMD_SECT_AP_READ,
163 .set = "USR ro",
164 }, {
165 .mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
166 .val = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
167 .set = "USR RW",
168#endif
169 }, {
170 .mask = PMD_SECT_XN,
171 .val = PMD_SECT_XN,
172 .set = "NX",
173 .clear = "x ",
174 }, {
175 .mask = PMD_SECT_S,
176 .val = PMD_SECT_S,
177 .set = "SHD",
178 .clear = " ",
179 },
180};
181
182struct pg_level {
183 const struct prot_bits *bits;
184 size_t num;
185 u64 mask;
186};
187
188static struct pg_level pg_level[] = {
189 {
190 }, { /* pgd */
191 }, { /* pud */
192 }, { /* pmd */
193 .bits = section_bits,
194 .num = ARRAY_SIZE(section_bits),
195 }, { /* pte */
196 .bits = pte_bits,
197 .num = ARRAY_SIZE(pte_bits),
198 },
199};
200
201static void dump_prot(struct pg_state *st, const struct prot_bits *bits, size_t num)
202{
203 unsigned i;
204
205 for (i = 0; i < num; i++, bits++) {
206 const char *s;
207
208 if ((st->current_prot & bits->mask) == bits->val)
209 s = bits->set;
210 else
211 s = bits->clear;
212
213 if (s)
214 seq_printf(st->seq, " %s", s);
215 }
216}
217
218static void note_page(struct pg_state *st, unsigned long addr, unsigned level, u64 val)
219{
220 static const char units[] = "KMGTPE";
221 u64 prot = val & pg_level[level].mask;
222
223 if (!st->level) {
224 st->level = level;
225 st->current_prot = prot;
226 seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
227 } else if (prot != st->current_prot || level != st->level ||
228 addr >= st->marker[1].start_address) {
229 const char *unit = units;
230 unsigned long delta;
231
232 if (st->current_prot) {
233 seq_printf(st->seq, "0x%08lx-0x%08lx ",
234 st->start_address, addr);
235
236 delta = (addr - st->start_address) >> 10;
237 while (!(delta & 1023) && unit[1]) {
238 delta >>= 10;
239 unit++;
240 }
241 seq_printf(st->seq, "%9lu%c", delta, *unit);
242 if (pg_level[st->level].bits)
243 dump_prot(st, pg_level[st->level].bits, pg_level[st->level].num);
244 seq_printf(st->seq, "\n");
245 }
246
247 if (addr >= st->marker[1].start_address) {
248 st->marker++;
249 seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
250 }
251 st->start_address = addr;
252 st->current_prot = prot;
253 st->level = level;
254 }
255}
256
257static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start)
258{
259 pte_t *pte = pte_offset_kernel(pmd, 0);
260 unsigned long addr;
261 unsigned i;
262
263 for (i = 0; i < PTRS_PER_PTE; i++, pte++) {
264 addr = start + i * PAGE_SIZE;
265 note_page(st, addr, 4, pte_val(*pte));
266 }
267}
268
269static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start)
270{
271 pmd_t *pmd = pmd_offset(pud, 0);
272 unsigned long addr;
273 unsigned i;
274
275 for (i = 0; i < PTRS_PER_PMD; i++, pmd++) {
276 addr = start + i * PMD_SIZE;
277 if (pmd_none(*pmd) || pmd_large(*pmd) || !pmd_present(*pmd))
278 note_page(st, addr, 3, pmd_val(*pmd));
279 else
280 walk_pte(st, pmd, addr);
281
282 if (SECTION_SIZE < PMD_SIZE && pmd_large(pmd[1]))
283 note_page(st, addr + SECTION_SIZE, 3, pmd_val(pmd[1]));
284 }
285}
286
287static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start)
288{
289 pud_t *pud = pud_offset(pgd, 0);
290 unsigned long addr;
291 unsigned i;
292
293 for (i = 0; i < PTRS_PER_PUD; i++, pud++) {
294 addr = start + i * PUD_SIZE;
295 if (!pud_none(*pud)) {
296 walk_pmd(st, pud, addr);
297 } else {
298 note_page(st, addr, 2, pud_val(*pud));
299 }
300 }
301}
302
303static void walk_pgd(struct seq_file *m)
304{
305 pgd_t *pgd = swapper_pg_dir;
306 struct pg_state st;
307 unsigned long addr;
308 unsigned i;
309
310 memset(&st, 0, sizeof(st));
311 st.seq = m;
312 st.marker = address_markers;
313
314 for (i = 0; i < PTRS_PER_PGD; i++, pgd++) {
315 addr = i * PGDIR_SIZE;
316 if (!pgd_none(*pgd)) {
317 walk_pud(&st, pgd, addr);
318 } else {
319 note_page(&st, addr, 1, pgd_val(*pgd));
320 }
321 }
322
323 note_page(&st, 0, 0, 0);
324}
325
326static int ptdump_show(struct seq_file *m, void *v)
327{
328 walk_pgd(m);
329 return 0;
330}
331
332static int ptdump_open(struct inode *inode, struct file *file)
333{
334 return single_open(file, ptdump_show, NULL);
335}
336
337static const struct file_operations ptdump_fops = {
338 .open = ptdump_open,
339 .read = seq_read,
340 .llseek = seq_lseek,
341 .release = single_release,
342};
343
344static int ptdump_init(void)
345{
346 struct dentry *pe;
347 unsigned i, j;
348
349 for (i = 0; i < ARRAY_SIZE(pg_level); i++)
350 if (pg_level[i].bits)
351 for (j = 0; j < pg_level[i].num; j++)
352 pg_level[i].mask |= pg_level[i].bits[j].mask;
353
354 address_markers[2].start_address = VMALLOC_START;
355
356 pe = debugfs_create_file("kernel_page_tables", 0400, NULL, NULL,
357 &ptdump_fops);
358 return pe ? 0 : -ENOMEM;
359}
360__initcall(ptdump_init);
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Debug helper to dump the current kernel pagetables of the system
4 * so that we can see what the various memory ranges are set to.
5 *
6 * Derived from x86 implementation:
7 * (C) Copyright 2008 Intel Corporation
8 *
9 * Author: Arjan van de Ven <arjan@linux.intel.com>
10 */
11#include <linux/debugfs.h>
12#include <linux/fs.h>
13#include <linux/mm.h>
14#include <linux/seq_file.h>
15
16#include <asm/domain.h>
17#include <asm/fixmap.h>
18#include <asm/memory.h>
19#include <asm/ptdump.h>
20
21static struct addr_marker address_markers[] = {
22#ifdef CONFIG_KASAN
23 { KASAN_SHADOW_START, "Kasan shadow start"},
24 { KASAN_SHADOW_END, "Kasan shadow end"},
25#endif
26 { MODULES_VADDR, "Modules" },
27 { PAGE_OFFSET, "Kernel Mapping" },
28 { 0, "vmalloc() Area" },
29 { VMALLOC_END, "vmalloc() End" },
30 { FIXADDR_START, "Fixmap Area" },
31 { VECTORS_BASE, "Vectors" },
32 { VECTORS_BASE + PAGE_SIZE * 2, "Vectors End" },
33 { -1, NULL },
34};
35
36#define pt_dump_seq_printf(m, fmt, args...) \
37({ \
38 if (m) \
39 seq_printf(m, fmt, ##args); \
40})
41
42#define pt_dump_seq_puts(m, fmt) \
43({ \
44 if (m) \
45 seq_printf(m, fmt); \
46})
47
48struct pg_state {
49 struct seq_file *seq;
50 const struct addr_marker *marker;
51 unsigned long start_address;
52 unsigned level;
53 u64 current_prot;
54 bool check_wx;
55 unsigned long wx_pages;
56 const char *current_domain;
57};
58
59struct prot_bits {
60 u64 mask;
61 u64 val;
62 const char *set;
63 const char *clear;
64 bool ro_bit;
65 bool nx_bit;
66};
67
68static const struct prot_bits pte_bits[] = {
69 {
70 .mask = L_PTE_USER,
71 .val = L_PTE_USER,
72 .set = "USR",
73 .clear = " ",
74 }, {
75 .mask = L_PTE_RDONLY,
76 .val = L_PTE_RDONLY,
77 .set = "ro",
78 .clear = "RW",
79 .ro_bit = true,
80 }, {
81 .mask = L_PTE_XN,
82 .val = L_PTE_XN,
83 .set = "NX",
84 .clear = "x ",
85 .nx_bit = true,
86 }, {
87 .mask = L_PTE_SHARED,
88 .val = L_PTE_SHARED,
89 .set = "SHD",
90 .clear = " ",
91 }, {
92 .mask = L_PTE_MT_MASK,
93 .val = L_PTE_MT_UNCACHED,
94 .set = "SO/UNCACHED",
95 }, {
96 .mask = L_PTE_MT_MASK,
97 .val = L_PTE_MT_BUFFERABLE,
98 .set = "MEM/BUFFERABLE/WC",
99 }, {
100 .mask = L_PTE_MT_MASK,
101 .val = L_PTE_MT_WRITETHROUGH,
102 .set = "MEM/CACHED/WT",
103 }, {
104 .mask = L_PTE_MT_MASK,
105 .val = L_PTE_MT_WRITEBACK,
106 .set = "MEM/CACHED/WBRA",
107#ifndef CONFIG_ARM_LPAE
108 }, {
109 .mask = L_PTE_MT_MASK,
110 .val = L_PTE_MT_MINICACHE,
111 .set = "MEM/MINICACHE",
112#endif
113 }, {
114 .mask = L_PTE_MT_MASK,
115 .val = L_PTE_MT_WRITEALLOC,
116 .set = "MEM/CACHED/WBWA",
117 }, {
118 .mask = L_PTE_MT_MASK,
119 .val = L_PTE_MT_DEV_SHARED,
120 .set = "DEV/SHARED",
121#ifndef CONFIG_ARM_LPAE
122 }, {
123 .mask = L_PTE_MT_MASK,
124 .val = L_PTE_MT_DEV_NONSHARED,
125 .set = "DEV/NONSHARED",
126#endif
127 }, {
128 .mask = L_PTE_MT_MASK,
129 .val = L_PTE_MT_DEV_WC,
130 .set = "DEV/WC",
131 }, {
132 .mask = L_PTE_MT_MASK,
133 .val = L_PTE_MT_DEV_CACHED,
134 .set = "DEV/CACHED",
135 },
136};
137
138static const struct prot_bits section_bits[] = {
139#ifdef CONFIG_ARM_LPAE
140 {
141 .mask = PMD_SECT_USER,
142 .val = PMD_SECT_USER,
143 .set = "USR",
144 }, {
145 .mask = L_PMD_SECT_RDONLY | PMD_SECT_AP2,
146 .val = L_PMD_SECT_RDONLY | PMD_SECT_AP2,
147 .set = "ro",
148 .clear = "RW",
149 .ro_bit = true,
150#elif __LINUX_ARM_ARCH__ >= 6
151 {
152 .mask = PMD_SECT_APX | PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
153 .val = PMD_SECT_APX | PMD_SECT_AP_WRITE,
154 .set = " ro",
155 .ro_bit = true,
156 }, {
157 .mask = PMD_SECT_APX | PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
158 .val = PMD_SECT_AP_WRITE,
159 .set = " RW",
160 }, {
161 .mask = PMD_SECT_APX | PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
162 .val = PMD_SECT_AP_READ,
163 .set = "USR ro",
164 }, {
165 .mask = PMD_SECT_APX | PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
166 .val = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
167 .set = "USR RW",
168#else /* ARMv4/ARMv5 */
169 /* These are approximate */
170 {
171 .mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
172 .val = 0,
173 .set = " ro",
174 .ro_bit = true,
175 }, {
176 .mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
177 .val = PMD_SECT_AP_WRITE,
178 .set = " RW",
179 }, {
180 .mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
181 .val = PMD_SECT_AP_READ,
182 .set = "USR ro",
183 }, {
184 .mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
185 .val = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
186 .set = "USR RW",
187#endif
188 }, {
189 .mask = PMD_SECT_XN,
190 .val = PMD_SECT_XN,
191 .set = "NX",
192 .clear = "x ",
193 .nx_bit = true,
194 }, {
195 .mask = PMD_SECT_S,
196 .val = PMD_SECT_S,
197 .set = "SHD",
198 .clear = " ",
199 },
200};
201
202struct pg_level {
203 const struct prot_bits *bits;
204 size_t num;
205 u64 mask;
206 const struct prot_bits *ro_bit;
207 const struct prot_bits *nx_bit;
208};
209
210static struct pg_level pg_level[] = {
211 {
212 }, { /* pgd */
213 }, { /* p4d */
214 }, { /* pud */
215 }, { /* pmd */
216 .bits = section_bits,
217 .num = ARRAY_SIZE(section_bits),
218 }, { /* pte */
219 .bits = pte_bits,
220 .num = ARRAY_SIZE(pte_bits),
221 },
222};
223
224static void dump_prot(struct pg_state *st, const struct prot_bits *bits, size_t num)
225{
226 unsigned i;
227
228 for (i = 0; i < num; i++, bits++) {
229 const char *s;
230
231 if ((st->current_prot & bits->mask) == bits->val)
232 s = bits->set;
233 else
234 s = bits->clear;
235
236 if (s)
237 pt_dump_seq_printf(st->seq, " %s", s);
238 }
239}
240
241static void note_prot_wx(struct pg_state *st, unsigned long addr)
242{
243 if (!st->check_wx)
244 return;
245 if ((st->current_prot & pg_level[st->level].ro_bit->mask) ==
246 pg_level[st->level].ro_bit->val)
247 return;
248 if ((st->current_prot & pg_level[st->level].nx_bit->mask) ==
249 pg_level[st->level].nx_bit->val)
250 return;
251
252 WARN_ONCE(1, "arm/mm: Found insecure W+X mapping at address %pS\n",
253 (void *)st->start_address);
254
255 st->wx_pages += (addr - st->start_address) / PAGE_SIZE;
256}
257
258static void note_page(struct pg_state *st, unsigned long addr,
259 unsigned int level, u64 val, const char *domain)
260{
261 static const char units[] = "KMGTPE";
262 u64 prot = val & pg_level[level].mask;
263
264 if (!st->level) {
265 st->level = level;
266 st->current_prot = prot;
267 st->current_domain = domain;
268 pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
269 } else if (prot != st->current_prot || level != st->level ||
270 domain != st->current_domain ||
271 addr >= st->marker[1].start_address) {
272 const char *unit = units;
273 unsigned long delta;
274
275 if (st->current_prot) {
276 note_prot_wx(st, addr);
277 pt_dump_seq_printf(st->seq, "0x%08lx-0x%08lx ",
278 st->start_address, addr);
279
280 delta = (addr - st->start_address) >> 10;
281 while (!(delta & 1023) && unit[1]) {
282 delta >>= 10;
283 unit++;
284 }
285 pt_dump_seq_printf(st->seq, "%9lu%c", delta, *unit);
286 if (st->current_domain)
287 pt_dump_seq_printf(st->seq, " %s",
288 st->current_domain);
289 if (pg_level[st->level].bits)
290 dump_prot(st, pg_level[st->level].bits, pg_level[st->level].num);
291 pt_dump_seq_printf(st->seq, "\n");
292 }
293
294 if (addr >= st->marker[1].start_address) {
295 st->marker++;
296 pt_dump_seq_printf(st->seq, "---[ %s ]---\n",
297 st->marker->name);
298 }
299 st->start_address = addr;
300 st->current_prot = prot;
301 st->current_domain = domain;
302 st->level = level;
303 }
304}
305
306static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start,
307 const char *domain)
308{
309 pte_t *pte = pte_offset_kernel(pmd, 0);
310 unsigned long addr;
311 unsigned i;
312
313 for (i = 0; i < PTRS_PER_PTE; i++, pte++) {
314 addr = start + i * PAGE_SIZE;
315 note_page(st, addr, 5, pte_val(*pte), domain);
316 }
317}
318
319static const char *get_domain_name(pmd_t *pmd)
320{
321#ifndef CONFIG_ARM_LPAE
322 switch (pmd_val(*pmd) & PMD_DOMAIN_MASK) {
323 case PMD_DOMAIN(DOMAIN_KERNEL):
324 return "KERNEL ";
325 case PMD_DOMAIN(DOMAIN_USER):
326 return "USER ";
327 case PMD_DOMAIN(DOMAIN_IO):
328 return "IO ";
329 case PMD_DOMAIN(DOMAIN_VECTORS):
330 return "VECTORS";
331 default:
332 return "unknown";
333 }
334#endif
335 return NULL;
336}
337
338static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start)
339{
340 pmd_t *pmd = pmd_offset(pud, 0);
341 unsigned long addr;
342 unsigned i;
343 const char *domain;
344
345 for (i = 0; i < PTRS_PER_PMD; i++, pmd++) {
346 addr = start + i * PMD_SIZE;
347 domain = get_domain_name(pmd);
348 if (pmd_none(*pmd) || pmd_large(*pmd) || !pmd_present(*pmd))
349 note_page(st, addr, 3, pmd_val(*pmd), domain);
350 else
351 walk_pte(st, pmd, addr, domain);
352
353 if (SECTION_SIZE < PMD_SIZE && pmd_large(pmd[1])) {
354 addr += SECTION_SIZE;
355 pmd++;
356 domain = get_domain_name(pmd);
357 note_page(st, addr, 4, pmd_val(*pmd), domain);
358 }
359 }
360}
361
362static void walk_pud(struct pg_state *st, p4d_t *p4d, unsigned long start)
363{
364 pud_t *pud = pud_offset(p4d, 0);
365 unsigned long addr;
366 unsigned i;
367
368 for (i = 0; i < PTRS_PER_PUD; i++, pud++) {
369 addr = start + i * PUD_SIZE;
370 if (!pud_none(*pud)) {
371 walk_pmd(st, pud, addr);
372 } else {
373 note_page(st, addr, 3, pud_val(*pud), NULL);
374 }
375 }
376}
377
378static void walk_p4d(struct pg_state *st, pgd_t *pgd, unsigned long start)
379{
380 p4d_t *p4d = p4d_offset(pgd, 0);
381 unsigned long addr;
382 unsigned i;
383
384 for (i = 0; i < PTRS_PER_P4D; i++, p4d++) {
385 addr = start + i * P4D_SIZE;
386 if (!p4d_none(*p4d)) {
387 walk_pud(st, p4d, addr);
388 } else {
389 note_page(st, addr, 2, p4d_val(*p4d), NULL);
390 }
391 }
392}
393
394static void walk_pgd(struct pg_state *st, struct mm_struct *mm,
395 unsigned long start)
396{
397 pgd_t *pgd = pgd_offset(mm, 0UL);
398 unsigned i;
399 unsigned long addr;
400
401 for (i = 0; i < PTRS_PER_PGD; i++, pgd++) {
402 addr = start + i * PGDIR_SIZE;
403 if (!pgd_none(*pgd)) {
404 walk_p4d(st, pgd, addr);
405 } else {
406 note_page(st, addr, 1, pgd_val(*pgd), NULL);
407 }
408 }
409}
410
411void ptdump_walk_pgd(struct seq_file *m, struct ptdump_info *info)
412{
413 struct pg_state st = {
414 .seq = m,
415 .marker = info->markers,
416 .check_wx = false,
417 };
418
419 walk_pgd(&st, info->mm, info->base_addr);
420 note_page(&st, 0, 0, 0, NULL);
421}
422
423static void __init ptdump_initialize(void)
424{
425 unsigned i, j;
426
427 for (i = 0; i < ARRAY_SIZE(pg_level); i++)
428 if (pg_level[i].bits)
429 for (j = 0; j < pg_level[i].num; j++) {
430 pg_level[i].mask |= pg_level[i].bits[j].mask;
431 if (pg_level[i].bits[j].ro_bit)
432 pg_level[i].ro_bit = &pg_level[i].bits[j];
433 if (pg_level[i].bits[j].nx_bit)
434 pg_level[i].nx_bit = &pg_level[i].bits[j];
435 }
436#ifdef CONFIG_KASAN
437 address_markers[4].start_address = VMALLOC_START;
438#else
439 address_markers[2].start_address = VMALLOC_START;
440#endif
441}
442
443static struct ptdump_info kernel_ptdump_info = {
444 .mm = &init_mm,
445 .markers = address_markers,
446 .base_addr = 0,
447};
448
449void ptdump_check_wx(void)
450{
451 struct pg_state st = {
452 .seq = NULL,
453 .marker = (struct addr_marker[]) {
454 { 0, NULL},
455 { -1, NULL},
456 },
457 .check_wx = true,
458 };
459
460 walk_pgd(&st, &init_mm, 0);
461 note_page(&st, 0, 0, 0, NULL);
462 if (st.wx_pages)
463 pr_warn("Checked W+X mappings: FAILED, %lu W+X pages found\n",
464 st.wx_pages);
465 else
466 pr_info("Checked W+X mappings: passed, no W+X pages found\n");
467}
468
469static int __init ptdump_init(void)
470{
471 ptdump_initialize();
472 ptdump_debugfs_register(&kernel_ptdump_info, "kernel_page_tables");
473 return 0;
474}
475__initcall(ptdump_init);