Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Debug helper to dump the current kernel pagetables of the system
4 * so that we can see what the various memory ranges are set to.
5 *
6 * Derived from x86 implementation:
7 * (C) Copyright 2008 Intel Corporation
8 *
9 * Author: Arjan van de Ven <arjan@linux.intel.com>
10 */
11#include <linux/debugfs.h>
12#include <linux/fs.h>
13#include <linux/mm.h>
14#include <linux/seq_file.h>
15
16#include <asm/domain.h>
17#include <asm/fixmap.h>
18#include <asm/memory.h>
19#include <asm/pgtable.h>
20#include <asm/ptdump.h>
21
22static struct addr_marker address_markers[] = {
23 { MODULES_VADDR, "Modules" },
24 { PAGE_OFFSET, "Kernel Mapping" },
25 { 0, "vmalloc() Area" },
26 { VMALLOC_END, "vmalloc() End" },
27 { FIXADDR_START, "Fixmap Area" },
28 { VECTORS_BASE, "Vectors" },
29 { VECTORS_BASE + PAGE_SIZE * 2, "Vectors End" },
30 { -1, NULL },
31};
32
33#define pt_dump_seq_printf(m, fmt, args...) \
34({ \
35 if (m) \
36 seq_printf(m, fmt, ##args); \
37})
38
39#define pt_dump_seq_puts(m, fmt) \
40({ \
41 if (m) \
42 seq_printf(m, fmt); \
43})
44
45struct pg_state {
46 struct seq_file *seq;
47 const struct addr_marker *marker;
48 unsigned long start_address;
49 unsigned level;
50 u64 current_prot;
51 bool check_wx;
52 unsigned long wx_pages;
53 const char *current_domain;
54};
55
56struct prot_bits {
57 u64 mask;
58 u64 val;
59 const char *set;
60 const char *clear;
61 bool ro_bit;
62 bool nx_bit;
63};
64
65static const struct prot_bits pte_bits[] = {
66 {
67 .mask = L_PTE_USER,
68 .val = L_PTE_USER,
69 .set = "USR",
70 .clear = " ",
71 }, {
72 .mask = L_PTE_RDONLY,
73 .val = L_PTE_RDONLY,
74 .set = "ro",
75 .clear = "RW",
76 .ro_bit = true,
77 }, {
78 .mask = L_PTE_XN,
79 .val = L_PTE_XN,
80 .set = "NX",
81 .clear = "x ",
82 .nx_bit = true,
83 }, {
84 .mask = L_PTE_SHARED,
85 .val = L_PTE_SHARED,
86 .set = "SHD",
87 .clear = " ",
88 }, {
89 .mask = L_PTE_MT_MASK,
90 .val = L_PTE_MT_UNCACHED,
91 .set = "SO/UNCACHED",
92 }, {
93 .mask = L_PTE_MT_MASK,
94 .val = L_PTE_MT_BUFFERABLE,
95 .set = "MEM/BUFFERABLE/WC",
96 }, {
97 .mask = L_PTE_MT_MASK,
98 .val = L_PTE_MT_WRITETHROUGH,
99 .set = "MEM/CACHED/WT",
100 }, {
101 .mask = L_PTE_MT_MASK,
102 .val = L_PTE_MT_WRITEBACK,
103 .set = "MEM/CACHED/WBRA",
104#ifndef CONFIG_ARM_LPAE
105 }, {
106 .mask = L_PTE_MT_MASK,
107 .val = L_PTE_MT_MINICACHE,
108 .set = "MEM/MINICACHE",
109#endif
110 }, {
111 .mask = L_PTE_MT_MASK,
112 .val = L_PTE_MT_WRITEALLOC,
113 .set = "MEM/CACHED/WBWA",
114 }, {
115 .mask = L_PTE_MT_MASK,
116 .val = L_PTE_MT_DEV_SHARED,
117 .set = "DEV/SHARED",
118#ifndef CONFIG_ARM_LPAE
119 }, {
120 .mask = L_PTE_MT_MASK,
121 .val = L_PTE_MT_DEV_NONSHARED,
122 .set = "DEV/NONSHARED",
123#endif
124 }, {
125 .mask = L_PTE_MT_MASK,
126 .val = L_PTE_MT_DEV_WC,
127 .set = "DEV/WC",
128 }, {
129 .mask = L_PTE_MT_MASK,
130 .val = L_PTE_MT_DEV_CACHED,
131 .set = "DEV/CACHED",
132 },
133};
134
135static const struct prot_bits section_bits[] = {
136#ifdef CONFIG_ARM_LPAE
137 {
138 .mask = PMD_SECT_USER,
139 .val = PMD_SECT_USER,
140 .set = "USR",
141 }, {
142 .mask = L_PMD_SECT_RDONLY | PMD_SECT_AP2,
143 .val = L_PMD_SECT_RDONLY | PMD_SECT_AP2,
144 .set = "ro",
145 .clear = "RW",
146 .ro_bit = true,
147#elif __LINUX_ARM_ARCH__ >= 6
148 {
149 .mask = PMD_SECT_APX | PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
150 .val = PMD_SECT_APX | PMD_SECT_AP_WRITE,
151 .set = " ro",
152 .ro_bit = true,
153 }, {
154 .mask = PMD_SECT_APX | PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
155 .val = PMD_SECT_AP_WRITE,
156 .set = " RW",
157 }, {
158 .mask = PMD_SECT_APX | PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
159 .val = PMD_SECT_AP_READ,
160 .set = "USR ro",
161 }, {
162 .mask = PMD_SECT_APX | PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
163 .val = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
164 .set = "USR RW",
165#else /* ARMv4/ARMv5 */
166 /* These are approximate */
167 {
168 .mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
169 .val = 0,
170 .set = " ro",
171 .ro_bit = true,
172 }, {
173 .mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
174 .val = PMD_SECT_AP_WRITE,
175 .set = " RW",
176 }, {
177 .mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
178 .val = PMD_SECT_AP_READ,
179 .set = "USR ro",
180 }, {
181 .mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
182 .val = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
183 .set = "USR RW",
184#endif
185 }, {
186 .mask = PMD_SECT_XN,
187 .val = PMD_SECT_XN,
188 .set = "NX",
189 .clear = "x ",
190 .nx_bit = true,
191 }, {
192 .mask = PMD_SECT_S,
193 .val = PMD_SECT_S,
194 .set = "SHD",
195 .clear = " ",
196 },
197};
198
199struct pg_level {
200 const struct prot_bits *bits;
201 size_t num;
202 u64 mask;
203 const struct prot_bits *ro_bit;
204 const struct prot_bits *nx_bit;
205};
206
207static struct pg_level pg_level[] = {
208 {
209 }, { /* pgd */
210 }, { /* pud */
211 }, { /* pmd */
212 .bits = section_bits,
213 .num = ARRAY_SIZE(section_bits),
214 }, { /* pte */
215 .bits = pte_bits,
216 .num = ARRAY_SIZE(pte_bits),
217 },
218};
219
220static void dump_prot(struct pg_state *st, const struct prot_bits *bits, size_t num)
221{
222 unsigned i;
223
224 for (i = 0; i < num; i++, bits++) {
225 const char *s;
226
227 if ((st->current_prot & bits->mask) == bits->val)
228 s = bits->set;
229 else
230 s = bits->clear;
231
232 if (s)
233 pt_dump_seq_printf(st->seq, " %s", s);
234 }
235}
236
237static void note_prot_wx(struct pg_state *st, unsigned long addr)
238{
239 if (!st->check_wx)
240 return;
241 if ((st->current_prot & pg_level[st->level].ro_bit->mask) ==
242 pg_level[st->level].ro_bit->val)
243 return;
244 if ((st->current_prot & pg_level[st->level].nx_bit->mask) ==
245 pg_level[st->level].nx_bit->val)
246 return;
247
248 WARN_ONCE(1, "arm/mm: Found insecure W+X mapping at address %pS\n",
249 (void *)st->start_address);
250
251 st->wx_pages += (addr - st->start_address) / PAGE_SIZE;
252}
253
254static void note_page(struct pg_state *st, unsigned long addr,
255 unsigned int level, u64 val, const char *domain)
256{
257 static const char units[] = "KMGTPE";
258 u64 prot = val & pg_level[level].mask;
259
260 if (!st->level) {
261 st->level = level;
262 st->current_prot = prot;
263 st->current_domain = domain;
264 pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
265 } else if (prot != st->current_prot || level != st->level ||
266 domain != st->current_domain ||
267 addr >= st->marker[1].start_address) {
268 const char *unit = units;
269 unsigned long delta;
270
271 if (st->current_prot) {
272 note_prot_wx(st, addr);
273 pt_dump_seq_printf(st->seq, "0x%08lx-0x%08lx ",
274 st->start_address, addr);
275
276 delta = (addr - st->start_address) >> 10;
277 while (!(delta & 1023) && unit[1]) {
278 delta >>= 10;
279 unit++;
280 }
281 pt_dump_seq_printf(st->seq, "%9lu%c", delta, *unit);
282 if (st->current_domain)
283 pt_dump_seq_printf(st->seq, " %s",
284 st->current_domain);
285 if (pg_level[st->level].bits)
286 dump_prot(st, pg_level[st->level].bits, pg_level[st->level].num);
287 pt_dump_seq_printf(st->seq, "\n");
288 }
289
290 if (addr >= st->marker[1].start_address) {
291 st->marker++;
292 pt_dump_seq_printf(st->seq, "---[ %s ]---\n",
293 st->marker->name);
294 }
295 st->start_address = addr;
296 st->current_prot = prot;
297 st->current_domain = domain;
298 st->level = level;
299 }
300}
301
302static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start,
303 const char *domain)
304{
305 pte_t *pte = pte_offset_kernel(pmd, 0);
306 unsigned long addr;
307 unsigned i;
308
309 for (i = 0; i < PTRS_PER_PTE; i++, pte++) {
310 addr = start + i * PAGE_SIZE;
311 note_page(st, addr, 4, pte_val(*pte), domain);
312 }
313}
314
315static const char *get_domain_name(pmd_t *pmd)
316{
317#ifndef CONFIG_ARM_LPAE
318 switch (pmd_val(*pmd) & PMD_DOMAIN_MASK) {
319 case PMD_DOMAIN(DOMAIN_KERNEL):
320 return "KERNEL ";
321 case PMD_DOMAIN(DOMAIN_USER):
322 return "USER ";
323 case PMD_DOMAIN(DOMAIN_IO):
324 return "IO ";
325 case PMD_DOMAIN(DOMAIN_VECTORS):
326 return "VECTORS";
327 default:
328 return "unknown";
329 }
330#endif
331 return NULL;
332}
333
334static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start)
335{
336 pmd_t *pmd = pmd_offset(pud, 0);
337 unsigned long addr;
338 unsigned i;
339 const char *domain;
340
341 for (i = 0; i < PTRS_PER_PMD; i++, pmd++) {
342 addr = start + i * PMD_SIZE;
343 domain = get_domain_name(pmd);
344 if (pmd_none(*pmd) || pmd_large(*pmd) || !pmd_present(*pmd))
345 note_page(st, addr, 3, pmd_val(*pmd), domain);
346 else
347 walk_pte(st, pmd, addr, domain);
348
349 if (SECTION_SIZE < PMD_SIZE && pmd_large(pmd[1])) {
350 addr += SECTION_SIZE;
351 pmd++;
352 domain = get_domain_name(pmd);
353 note_page(st, addr, 3, pmd_val(*pmd), domain);
354 }
355 }
356}
357
358static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start)
359{
360 pud_t *pud = pud_offset(pgd, 0);
361 unsigned long addr;
362 unsigned i;
363
364 for (i = 0; i < PTRS_PER_PUD; i++, pud++) {
365 addr = start + i * PUD_SIZE;
366 if (!pud_none(*pud)) {
367 walk_pmd(st, pud, addr);
368 } else {
369 note_page(st, addr, 2, pud_val(*pud), NULL);
370 }
371 }
372}
373
374static void walk_pgd(struct pg_state *st, struct mm_struct *mm,
375 unsigned long start)
376{
377 pgd_t *pgd = pgd_offset(mm, 0UL);
378 unsigned i;
379 unsigned long addr;
380
381 for (i = 0; i < PTRS_PER_PGD; i++, pgd++) {
382 addr = start + i * PGDIR_SIZE;
383 if (!pgd_none(*pgd)) {
384 walk_pud(st, pgd, addr);
385 } else {
386 note_page(st, addr, 1, pgd_val(*pgd), NULL);
387 }
388 }
389}
390
391void ptdump_walk_pgd(struct seq_file *m, struct ptdump_info *info)
392{
393 struct pg_state st = {
394 .seq = m,
395 .marker = info->markers,
396 .check_wx = false,
397 };
398
399 walk_pgd(&st, info->mm, info->base_addr);
400 note_page(&st, 0, 0, 0, NULL);
401}
402
403static void ptdump_initialize(void)
404{
405 unsigned i, j;
406
407 for (i = 0; i < ARRAY_SIZE(pg_level); i++)
408 if (pg_level[i].bits)
409 for (j = 0; j < pg_level[i].num; j++) {
410 pg_level[i].mask |= pg_level[i].bits[j].mask;
411 if (pg_level[i].bits[j].ro_bit)
412 pg_level[i].ro_bit = &pg_level[i].bits[j];
413 if (pg_level[i].bits[j].nx_bit)
414 pg_level[i].nx_bit = &pg_level[i].bits[j];
415 }
416
417 address_markers[2].start_address = VMALLOC_START;
418}
419
420static struct ptdump_info kernel_ptdump_info = {
421 .mm = &init_mm,
422 .markers = address_markers,
423 .base_addr = 0,
424};
425
426void ptdump_check_wx(void)
427{
428 struct pg_state st = {
429 .seq = NULL,
430 .marker = (struct addr_marker[]) {
431 { 0, NULL},
432 { -1, NULL},
433 },
434 .check_wx = true,
435 };
436
437 walk_pgd(&st, &init_mm, 0);
438 note_page(&st, 0, 0, 0, NULL);
439 if (st.wx_pages)
440 pr_warn("Checked W+X mappings: FAILED, %lu W+X pages found\n",
441 st.wx_pages);
442 else
443 pr_info("Checked W+X mappings: passed, no W+X pages found\n");
444}
445
446static int ptdump_init(void)
447{
448 ptdump_initialize();
449 ptdump_debugfs_register(&kernel_ptdump_info, "kernel_page_tables");
450 return 0;
451}
452__initcall(ptdump_init);
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Debug helper to dump the current kernel pagetables of the system
4 * so that we can see what the various memory ranges are set to.
5 *
6 * Derived from x86 implementation:
7 * (C) Copyright 2008 Intel Corporation
8 *
9 * Author: Arjan van de Ven <arjan@linux.intel.com>
10 */
11#include <linux/debugfs.h>
12#include <linux/fs.h>
13#include <linux/mm.h>
14#include <linux/seq_file.h>
15
16#include <asm/domain.h>
17#include <asm/fixmap.h>
18#include <asm/memory.h>
19#include <asm/ptdump.h>
20
21static struct addr_marker address_markers[] = {
22 { MODULES_VADDR, "Modules" },
23 { PAGE_OFFSET, "Kernel Mapping" },
24 { 0, "vmalloc() Area" },
25 { VMALLOC_END, "vmalloc() End" },
26 { FIXADDR_START, "Fixmap Area" },
27 { VECTORS_BASE, "Vectors" },
28 { VECTORS_BASE + PAGE_SIZE * 2, "Vectors End" },
29 { -1, NULL },
30};
31
32#define pt_dump_seq_printf(m, fmt, args...) \
33({ \
34 if (m) \
35 seq_printf(m, fmt, ##args); \
36})
37
38#define pt_dump_seq_puts(m, fmt) \
39({ \
40 if (m) \
41 seq_printf(m, fmt); \
42})
43
44struct pg_state {
45 struct seq_file *seq;
46 const struct addr_marker *marker;
47 unsigned long start_address;
48 unsigned level;
49 u64 current_prot;
50 bool check_wx;
51 unsigned long wx_pages;
52 const char *current_domain;
53};
54
55struct prot_bits {
56 u64 mask;
57 u64 val;
58 const char *set;
59 const char *clear;
60 bool ro_bit;
61 bool nx_bit;
62};
63
64static const struct prot_bits pte_bits[] = {
65 {
66 .mask = L_PTE_USER,
67 .val = L_PTE_USER,
68 .set = "USR",
69 .clear = " ",
70 }, {
71 .mask = L_PTE_RDONLY,
72 .val = L_PTE_RDONLY,
73 .set = "ro",
74 .clear = "RW",
75 .ro_bit = true,
76 }, {
77 .mask = L_PTE_XN,
78 .val = L_PTE_XN,
79 .set = "NX",
80 .clear = "x ",
81 .nx_bit = true,
82 }, {
83 .mask = L_PTE_SHARED,
84 .val = L_PTE_SHARED,
85 .set = "SHD",
86 .clear = " ",
87 }, {
88 .mask = L_PTE_MT_MASK,
89 .val = L_PTE_MT_UNCACHED,
90 .set = "SO/UNCACHED",
91 }, {
92 .mask = L_PTE_MT_MASK,
93 .val = L_PTE_MT_BUFFERABLE,
94 .set = "MEM/BUFFERABLE/WC",
95 }, {
96 .mask = L_PTE_MT_MASK,
97 .val = L_PTE_MT_WRITETHROUGH,
98 .set = "MEM/CACHED/WT",
99 }, {
100 .mask = L_PTE_MT_MASK,
101 .val = L_PTE_MT_WRITEBACK,
102 .set = "MEM/CACHED/WBRA",
103#ifndef CONFIG_ARM_LPAE
104 }, {
105 .mask = L_PTE_MT_MASK,
106 .val = L_PTE_MT_MINICACHE,
107 .set = "MEM/MINICACHE",
108#endif
109 }, {
110 .mask = L_PTE_MT_MASK,
111 .val = L_PTE_MT_WRITEALLOC,
112 .set = "MEM/CACHED/WBWA",
113 }, {
114 .mask = L_PTE_MT_MASK,
115 .val = L_PTE_MT_DEV_SHARED,
116 .set = "DEV/SHARED",
117#ifndef CONFIG_ARM_LPAE
118 }, {
119 .mask = L_PTE_MT_MASK,
120 .val = L_PTE_MT_DEV_NONSHARED,
121 .set = "DEV/NONSHARED",
122#endif
123 }, {
124 .mask = L_PTE_MT_MASK,
125 .val = L_PTE_MT_DEV_WC,
126 .set = "DEV/WC",
127 }, {
128 .mask = L_PTE_MT_MASK,
129 .val = L_PTE_MT_DEV_CACHED,
130 .set = "DEV/CACHED",
131 },
132};
133
134static const struct prot_bits section_bits[] = {
135#ifdef CONFIG_ARM_LPAE
136 {
137 .mask = PMD_SECT_USER,
138 .val = PMD_SECT_USER,
139 .set = "USR",
140 }, {
141 .mask = L_PMD_SECT_RDONLY | PMD_SECT_AP2,
142 .val = L_PMD_SECT_RDONLY | PMD_SECT_AP2,
143 .set = "ro",
144 .clear = "RW",
145 .ro_bit = true,
146#elif __LINUX_ARM_ARCH__ >= 6
147 {
148 .mask = PMD_SECT_APX | PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
149 .val = PMD_SECT_APX | PMD_SECT_AP_WRITE,
150 .set = " ro",
151 .ro_bit = true,
152 }, {
153 .mask = PMD_SECT_APX | PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
154 .val = PMD_SECT_AP_WRITE,
155 .set = " RW",
156 }, {
157 .mask = PMD_SECT_APX | PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
158 .val = PMD_SECT_AP_READ,
159 .set = "USR ro",
160 }, {
161 .mask = PMD_SECT_APX | PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
162 .val = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
163 .set = "USR RW",
164#else /* ARMv4/ARMv5 */
165 /* These are approximate */
166 {
167 .mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
168 .val = 0,
169 .set = " ro",
170 .ro_bit = true,
171 }, {
172 .mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
173 .val = PMD_SECT_AP_WRITE,
174 .set = " RW",
175 }, {
176 .mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
177 .val = PMD_SECT_AP_READ,
178 .set = "USR ro",
179 }, {
180 .mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
181 .val = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
182 .set = "USR RW",
183#endif
184 }, {
185 .mask = PMD_SECT_XN,
186 .val = PMD_SECT_XN,
187 .set = "NX",
188 .clear = "x ",
189 .nx_bit = true,
190 }, {
191 .mask = PMD_SECT_S,
192 .val = PMD_SECT_S,
193 .set = "SHD",
194 .clear = " ",
195 },
196};
197
198struct pg_level {
199 const struct prot_bits *bits;
200 size_t num;
201 u64 mask;
202 const struct prot_bits *ro_bit;
203 const struct prot_bits *nx_bit;
204};
205
206static struct pg_level pg_level[] = {
207 {
208 }, { /* pgd */
209 }, { /* p4d */
210 }, { /* pud */
211 }, { /* pmd */
212 .bits = section_bits,
213 .num = ARRAY_SIZE(section_bits),
214 }, { /* pte */
215 .bits = pte_bits,
216 .num = ARRAY_SIZE(pte_bits),
217 },
218};
219
220static void dump_prot(struct pg_state *st, const struct prot_bits *bits, size_t num)
221{
222 unsigned i;
223
224 for (i = 0; i < num; i++, bits++) {
225 const char *s;
226
227 if ((st->current_prot & bits->mask) == bits->val)
228 s = bits->set;
229 else
230 s = bits->clear;
231
232 if (s)
233 pt_dump_seq_printf(st->seq, " %s", s);
234 }
235}
236
237static void note_prot_wx(struct pg_state *st, unsigned long addr)
238{
239 if (!st->check_wx)
240 return;
241 if ((st->current_prot & pg_level[st->level].ro_bit->mask) ==
242 pg_level[st->level].ro_bit->val)
243 return;
244 if ((st->current_prot & pg_level[st->level].nx_bit->mask) ==
245 pg_level[st->level].nx_bit->val)
246 return;
247
248 WARN_ONCE(1, "arm/mm: Found insecure W+X mapping at address %pS\n",
249 (void *)st->start_address);
250
251 st->wx_pages += (addr - st->start_address) / PAGE_SIZE;
252}
253
254static void note_page(struct pg_state *st, unsigned long addr,
255 unsigned int level, u64 val, const char *domain)
256{
257 static const char units[] = "KMGTPE";
258 u64 prot = val & pg_level[level].mask;
259
260 if (!st->level) {
261 st->level = level;
262 st->current_prot = prot;
263 st->current_domain = domain;
264 pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
265 } else if (prot != st->current_prot || level != st->level ||
266 domain != st->current_domain ||
267 addr >= st->marker[1].start_address) {
268 const char *unit = units;
269 unsigned long delta;
270
271 if (st->current_prot) {
272 note_prot_wx(st, addr);
273 pt_dump_seq_printf(st->seq, "0x%08lx-0x%08lx ",
274 st->start_address, addr);
275
276 delta = (addr - st->start_address) >> 10;
277 while (!(delta & 1023) && unit[1]) {
278 delta >>= 10;
279 unit++;
280 }
281 pt_dump_seq_printf(st->seq, "%9lu%c", delta, *unit);
282 if (st->current_domain)
283 pt_dump_seq_printf(st->seq, " %s",
284 st->current_domain);
285 if (pg_level[st->level].bits)
286 dump_prot(st, pg_level[st->level].bits, pg_level[st->level].num);
287 pt_dump_seq_printf(st->seq, "\n");
288 }
289
290 if (addr >= st->marker[1].start_address) {
291 st->marker++;
292 pt_dump_seq_printf(st->seq, "---[ %s ]---\n",
293 st->marker->name);
294 }
295 st->start_address = addr;
296 st->current_prot = prot;
297 st->current_domain = domain;
298 st->level = level;
299 }
300}
301
302static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start,
303 const char *domain)
304{
305 pte_t *pte = pte_offset_kernel(pmd, 0);
306 unsigned long addr;
307 unsigned i;
308
309 for (i = 0; i < PTRS_PER_PTE; i++, pte++) {
310 addr = start + i * PAGE_SIZE;
311 note_page(st, addr, 5, pte_val(*pte), domain);
312 }
313}
314
315static const char *get_domain_name(pmd_t *pmd)
316{
317#ifndef CONFIG_ARM_LPAE
318 switch (pmd_val(*pmd) & PMD_DOMAIN_MASK) {
319 case PMD_DOMAIN(DOMAIN_KERNEL):
320 return "KERNEL ";
321 case PMD_DOMAIN(DOMAIN_USER):
322 return "USER ";
323 case PMD_DOMAIN(DOMAIN_IO):
324 return "IO ";
325 case PMD_DOMAIN(DOMAIN_VECTORS):
326 return "VECTORS";
327 default:
328 return "unknown";
329 }
330#endif
331 return NULL;
332}
333
334static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start)
335{
336 pmd_t *pmd = pmd_offset(pud, 0);
337 unsigned long addr;
338 unsigned i;
339 const char *domain;
340
341 for (i = 0; i < PTRS_PER_PMD; i++, pmd++) {
342 addr = start + i * PMD_SIZE;
343 domain = get_domain_name(pmd);
344 if (pmd_none(*pmd) || pmd_large(*pmd) || !pmd_present(*pmd))
345 note_page(st, addr, 3, pmd_val(*pmd), domain);
346 else
347 walk_pte(st, pmd, addr, domain);
348
349 if (SECTION_SIZE < PMD_SIZE && pmd_large(pmd[1])) {
350 addr += SECTION_SIZE;
351 pmd++;
352 domain = get_domain_name(pmd);
353 note_page(st, addr, 4, pmd_val(*pmd), domain);
354 }
355 }
356}
357
358static void walk_pud(struct pg_state *st, p4d_t *p4d, unsigned long start)
359{
360 pud_t *pud = pud_offset(p4d, 0);
361 unsigned long addr;
362 unsigned i;
363
364 for (i = 0; i < PTRS_PER_PUD; i++, pud++) {
365 addr = start + i * PUD_SIZE;
366 if (!pud_none(*pud)) {
367 walk_pmd(st, pud, addr);
368 } else {
369 note_page(st, addr, 3, pud_val(*pud), NULL);
370 }
371 }
372}
373
374static void walk_p4d(struct pg_state *st, pgd_t *pgd, unsigned long start)
375{
376 p4d_t *p4d = p4d_offset(pgd, 0);
377 unsigned long addr;
378 unsigned i;
379
380 for (i = 0; i < PTRS_PER_P4D; i++, p4d++) {
381 addr = start + i * P4D_SIZE;
382 if (!p4d_none(*p4d)) {
383 walk_pud(st, p4d, addr);
384 } else {
385 note_page(st, addr, 2, p4d_val(*p4d), NULL);
386 }
387 }
388}
389
390static void walk_pgd(struct pg_state *st, struct mm_struct *mm,
391 unsigned long start)
392{
393 pgd_t *pgd = pgd_offset(mm, 0UL);
394 unsigned i;
395 unsigned long addr;
396
397 for (i = 0; i < PTRS_PER_PGD; i++, pgd++) {
398 addr = start + i * PGDIR_SIZE;
399 if (!pgd_none(*pgd)) {
400 walk_p4d(st, pgd, addr);
401 } else {
402 note_page(st, addr, 1, pgd_val(*pgd), NULL);
403 }
404 }
405}
406
407void ptdump_walk_pgd(struct seq_file *m, struct ptdump_info *info)
408{
409 struct pg_state st = {
410 .seq = m,
411 .marker = info->markers,
412 .check_wx = false,
413 };
414
415 walk_pgd(&st, info->mm, info->base_addr);
416 note_page(&st, 0, 0, 0, NULL);
417}
418
419static void ptdump_initialize(void)
420{
421 unsigned i, j;
422
423 for (i = 0; i < ARRAY_SIZE(pg_level); i++)
424 if (pg_level[i].bits)
425 for (j = 0; j < pg_level[i].num; j++) {
426 pg_level[i].mask |= pg_level[i].bits[j].mask;
427 if (pg_level[i].bits[j].ro_bit)
428 pg_level[i].ro_bit = &pg_level[i].bits[j];
429 if (pg_level[i].bits[j].nx_bit)
430 pg_level[i].nx_bit = &pg_level[i].bits[j];
431 }
432
433 address_markers[2].start_address = VMALLOC_START;
434}
435
436static struct ptdump_info kernel_ptdump_info = {
437 .mm = &init_mm,
438 .markers = address_markers,
439 .base_addr = 0,
440};
441
442void ptdump_check_wx(void)
443{
444 struct pg_state st = {
445 .seq = NULL,
446 .marker = (struct addr_marker[]) {
447 { 0, NULL},
448 { -1, NULL},
449 },
450 .check_wx = true,
451 };
452
453 walk_pgd(&st, &init_mm, 0);
454 note_page(&st, 0, 0, 0, NULL);
455 if (st.wx_pages)
456 pr_warn("Checked W+X mappings: FAILED, %lu W+X pages found\n",
457 st.wx_pages);
458 else
459 pr_info("Checked W+X mappings: passed, no W+X pages found\n");
460}
461
462static int ptdump_init(void)
463{
464 ptdump_initialize();
465 ptdump_debugfs_register(&kernel_ptdump_info, "kernel_page_tables");
466 return 0;
467}
468__initcall(ptdump_init);