Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 | // SPDX-License-Identifier: GPL-2.0-only /* * Copyright 2016, Rashmica Gupta, IBM Corp. * * This traverses the kernel pagetables and dumps the * information about the used sections of memory to * /sys/kernel/debug/kernel_pagetables. * * Derived from the arm64 implementation: * Copyright (c) 2014, The Linux Foundation, Laura Abbott. * (C) Copyright 2008 Intel Corporation, Arjan van de Ven. */ #include <linux/debugfs.h> #include <linux/fs.h> #include <linux/hugetlb.h> #include <linux/io.h> #include <linux/mm.h> #include <linux/highmem.h> #include <linux/ptdump.h> #include <linux/sched.h> #include <linux/seq_file.h> #include <asm/fixmap.h> #include <linux/const.h> #include <linux/kasan.h> #include <asm/page.h> #include <asm/hugetlb.h> #include <mm/mmu_decl.h> #include "ptdump.h" /* * To visualise what is happening, * * - PTRS_PER_P** = how many entries there are in the corresponding P** * - P**_SHIFT = how many bits of the address we use to index into the * corresponding P** * - P**_SIZE is how much memory we can access through the table - not the * size of the table itself. * P**={PGD, PUD, PMD, PTE} * * * Each entry of the PGD points to a PUD. Each entry of a PUD points to a * PMD. Each entry of a PMD points to a PTE. And every PTE entry points to * a page. * * In the case where there are only 3 levels, the PUD is folded into the * PGD: every PUD has only one entry which points to the PMD. * * The page dumper groups page table entries of the same type into a single * description. It uses pg_state to track the range information while * iterating over the PTE entries. When the continuity is broken it then * dumps out a description of the range - ie PTEs that are virtually contiguous * with the same PTE flags are chunked together. This is to make it clear how * different areas of the kernel virtual memory are used. * */ struct pg_state { struct ptdump_state ptdump; struct seq_file *seq; const struct addr_marker *marker; unsigned long start_address; unsigned long start_pa; int level; u64 current_flags; bool check_wx; unsigned long wx_pages; }; struct addr_marker { unsigned long start_address; const char *name; }; static struct addr_marker address_markers[] = { { 0, "Start of kernel VM" }, #ifdef MODULES_VADDR { 0, "modules start" }, { 0, "modules end" }, #endif { 0, "vmalloc() Area" }, { 0, "vmalloc() End" }, #ifdef CONFIG_PPC64 { 0, "isa I/O start" }, { 0, "isa I/O end" }, { 0, "phb I/O start" }, { 0, "phb I/O end" }, { 0, "I/O remap start" }, { 0, "I/O remap end" }, { 0, "vmemmap start" }, #else { 0, "Early I/O remap start" }, { 0, "Early I/O remap end" }, #ifdef CONFIG_HIGHMEM { 0, "Highmem PTEs start" }, { 0, "Highmem PTEs end" }, #endif { 0, "Fixmap start" }, { 0, "Fixmap end" }, #endif #ifdef CONFIG_KASAN { 0, "kasan shadow mem start" }, { 0, "kasan shadow mem end" }, #endif { -1, NULL }, }; static struct ptdump_range ptdump_range[] __ro_after_init = { {TASK_SIZE_MAX, ~0UL}, {0, 0} }; #define pt_dump_seq_printf(m, fmt, args...) \ ({ \ if (m) \ seq_printf(m, fmt, ##args); \ }) #define pt_dump_seq_putc(m, c) \ ({ \ if (m) \ seq_putc(m, c); \ }) void pt_dump_size(struct seq_file *m, unsigned long size) { static const char units[] = " KMGTPE"; const char *unit = units; /* Work out what appropriate unit to use */ while (!(size & 1023) && unit[1]) { size >>= 10; unit++; } pt_dump_seq_printf(m, "%9lu%c ", size, *unit); } static void dump_flag_info(struct pg_state *st, const struct flag_info *flag, u64 pte, int num) { unsigned int i; for (i = 0; i < num; i++, flag++) { const char *s = NULL; u64 val; /* flag not defined so don't check it */ if (flag->mask == 0) continue; /* Some 'flags' are actually values */ if (flag->is_val) { val = pte & flag->val; if (flag->shift) val = val >> flag->shift; pt_dump_seq_printf(st->seq, " %s:%llx", flag->set, val); } else { if ((pte & flag->mask) == flag->val) s = flag->set; else s = flag->clear; if (s) pt_dump_seq_printf(st->seq, " %s", s); } st->current_flags &= ~flag->mask; } if (st->current_flags != 0) pt_dump_seq_printf(st->seq, " unknown flags:%llx", st->current_flags); } static void dump_addr(struct pg_state *st, unsigned long addr) { #ifdef CONFIG_PPC64 #define REG "0x%016lx" #else #define REG "0x%08lx" #endif pt_dump_seq_printf(st->seq, REG "-" REG " ", st->start_address, addr - 1); pt_dump_seq_printf(st->seq, " " REG " ", st->start_pa); pt_dump_size(st->seq, addr - st->start_address); } static void note_prot_wx(struct pg_state *st, unsigned long addr) { pte_t pte = __pte(st->current_flags); if (!st->check_wx) return; if (!pte_write(pte) || !pte_exec(pte)) return; WARN_ONCE(IS_ENABLED(CONFIG_DEBUG_WX), "powerpc/mm: Found insecure W+X mapping at address %p/%pS\n", (void *)st->start_address, (void *)st->start_address); st->wx_pages += (addr - st->start_address) / PAGE_SIZE; } static void note_page_update_state(struct pg_state *st, unsigned long addr, int level, u64 val) { u64 flag = level >= 0 ? val & pg_level[level].mask : 0; u64 pa = val & PTE_RPN_MASK; st->level = level; st->current_flags = flag; st->start_address = addr; st->start_pa = pa; while (addr >= st->marker[1].start_address) { st->marker++; pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name); } } static void note_page(struct ptdump_state *pt_st, unsigned long addr, int level, u64 val) { u64 flag = level >= 0 ? val & pg_level[level].mask : 0; struct pg_state *st = container_of(pt_st, struct pg_state, ptdump); /* At first no level is set */ if (st->level == -1) { pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name); note_page_update_state(st, addr, level, val); /* * Dump the section of virtual memory when: * - the PTE flags from one entry to the next differs. * - we change levels in the tree. * - the address is in a different section of memory and is thus * used for a different purpose, regardless of the flags. */ } else if (flag != st->current_flags || level != st->level || addr >= st->marker[1].start_address) { /* Check the PTE flags */ if (st->current_flags) { note_prot_wx(st, addr); dump_addr(st, addr); /* Dump all the flags */ if (pg_level[st->level].flag) dump_flag_info(st, pg_level[st->level].flag, st->current_flags, pg_level[st->level].num); pt_dump_seq_putc(st->seq, '\n'); } /* * Address indicates we have passed the end of the * current section of virtual memory */ note_page_update_state(st, addr, level, val); } } static void populate_markers(void) { int i = 0; #ifdef CONFIG_PPC64 address_markers[i++].start_address = PAGE_OFFSET; #else address_markers[i++].start_address = TASK_SIZE; #endif #ifdef MODULES_VADDR address_markers[i++].start_address = MODULES_VADDR; address_markers[i++].start_address = MODULES_END; #endif address_markers[i++].start_address = VMALLOC_START; address_markers[i++].start_address = VMALLOC_END; #ifdef CONFIG_PPC64 address_markers[i++].start_address = ISA_IO_BASE; address_markers[i++].start_address = ISA_IO_END; address_markers[i++].start_address = PHB_IO_BASE; address_markers[i++].start_address = PHB_IO_END; address_markers[i++].start_address = IOREMAP_BASE; address_markers[i++].start_address = IOREMAP_END; /* What is the ifdef about? */ #ifdef CONFIG_PPC_BOOK3S_64 address_markers[i++].start_address = H_VMEMMAP_START; #else address_markers[i++].start_address = VMEMMAP_BASE; #endif #else /* !CONFIG_PPC64 */ address_markers[i++].start_address = ioremap_bot; address_markers[i++].start_address = IOREMAP_TOP; #ifdef CONFIG_HIGHMEM address_markers[i++].start_address = PKMAP_BASE; address_markers[i++].start_address = PKMAP_ADDR(LAST_PKMAP); #endif address_markers[i++].start_address = FIXADDR_START; address_markers[i++].start_address = FIXADDR_TOP; #endif /* CONFIG_PPC64 */ #ifdef CONFIG_KASAN address_markers[i++].start_address = KASAN_SHADOW_START; address_markers[i++].start_address = KASAN_SHADOW_END; #endif } static int ptdump_show(struct seq_file *m, void *v) { struct pg_state st = { .seq = m, .marker = address_markers, .level = -1, .ptdump = { .note_page = note_page, .range = ptdump_range, } }; /* Traverse kernel page tables */ ptdump_walk_pgd(&st.ptdump, &init_mm, NULL); return 0; } DEFINE_SHOW_ATTRIBUTE(ptdump); static void __init build_pgtable_complete_mask(void) { unsigned int i, j; for (i = 0; i < ARRAY_SIZE(pg_level); i++) if (pg_level[i].flag) for (j = 0; j < pg_level[i].num; j++) pg_level[i].mask |= pg_level[i].flag[j].mask; } bool ptdump_check_wx(void) { struct pg_state st = { .seq = NULL, .marker = (struct addr_marker[]) { { 0, NULL}, { -1, NULL}, }, .level = -1, .check_wx = true, .ptdump = { .note_page = note_page, .range = ptdump_range, } }; if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && !mmu_has_feature(MMU_FTR_KERNEL_RO)) return true; ptdump_walk_pgd(&st.ptdump, &init_mm, NULL); if (st.wx_pages) { pr_warn("Checked W+X mappings: FAILED, %lu W+X pages found\n", st.wx_pages); return false; } else { pr_info("Checked W+X mappings: passed, no W+X pages found\n"); return true; } } static int __init ptdump_init(void) { #ifdef CONFIG_PPC64 if (!radix_enabled()) ptdump_range[0].start = KERN_VIRT_START; else ptdump_range[0].start = PAGE_OFFSET; ptdump_range[0].end = PAGE_OFFSET + (PGDIR_SIZE * PTRS_PER_PGD); #endif populate_markers(); build_pgtable_complete_mask(); if (IS_ENABLED(CONFIG_PTDUMP_DEBUGFS)) debugfs_create_file("kernel_page_tables", 0400, NULL, NULL, &ptdump_fops); return 0; } device_initcall(ptdump_init); |