Linux Audio

Check our new training course

Open-source upstreaming

Need help get the support for your hardware in upstream Linux?
Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2#include <linux/set_memory.h>
  3#include <linux/ptdump.h>
  4#include <linux/seq_file.h>
  5#include <linux/debugfs.h>
  6#include <linux/sort.h>
  7#include <linux/mm.h>
  8#include <linux/kfence.h>
  9#include <linux/kasan.h>
 10#include <asm/kasan.h>
 11#include <asm/abs_lowcore.h>
 12#include <asm/nospec-branch.h>
 13#include <asm/sections.h>
 14#include <asm/maccess.h>
 15
 16static unsigned long max_addr;
 17
 18struct addr_marker {
 19	int is_start;
 20	unsigned long start_address;
 21	unsigned long size;
 22	const char *name;
 23};
 24
 25static struct addr_marker *markers;
 26static unsigned int markers_cnt;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 27
 28struct pg_state {
 29	struct ptdump_state ptdump;
 30	struct seq_file *seq;
 31	int level;
 32	unsigned int current_prot;
 33	bool check_wx;
 34	unsigned long wx_pages;
 35	unsigned long start_address;
 
 36	const struct addr_marker *marker;
 37};
 38
 39#define pt_dump_seq_printf(m, fmt, args...)	\
 40({						\
 41	struct seq_file *__m = (m);		\
 42						\
 43	if (__m)				\
 44		seq_printf(__m, fmt, ##args);	\
 45})
 46
 47#define pt_dump_seq_puts(m, fmt)		\
 48({						\
 49	struct seq_file *__m = (m);		\
 50						\
 51	if (__m)				\
 52		seq_printf(__m, fmt);		\
 53})
 54
 55static void print_prot(struct seq_file *m, unsigned int pr, int level)
 56{
 57	static const char * const level_name[] =
 58		{ "ASCE", "PGD", "PUD", "PMD", "PTE" };
 59
 60	pt_dump_seq_printf(m, "%s ", level_name[level]);
 61	if (pr & _PAGE_INVALID) {
 62		pt_dump_seq_printf(m, "I\n");
 63		return;
 64	}
 65	pt_dump_seq_puts(m, (pr & _PAGE_PROTECT) ? "RO " : "RW ");
 66	pt_dump_seq_puts(m, (pr & _PAGE_NOEXEC) ? "NX\n" : "X\n");
 67}
 68
 69static void note_prot_wx(struct pg_state *st, unsigned long addr)
 
 70{
 71	if (!st->check_wx)
 72		return;
 73	if (st->current_prot & _PAGE_INVALID)
 74		return;
 75	if (st->current_prot & _PAGE_PROTECT)
 76		return;
 77	if (st->current_prot & _PAGE_NOEXEC)
 78		return;
 79	/*
 80	 * The first lowcore page is W+X if spectre mitigations are using
 81	 * trampolines or the BEAR enhancements facility is not installed,
 82	 * in which case we have two lpswe instructions in lowcore that need
 83	 * to be executable.
 84	 */
 85	if (addr == PAGE_SIZE && (nospec_uses_trampoline() || !static_key_enabled(&cpu_has_bear)))
 86		return;
 87	WARN_ONCE(IS_ENABLED(CONFIG_DEBUG_WX),
 88		  "s390/mm: Found insecure W+X mapping at address %pS\n",
 89		  (void *)st->start_address);
 90	st->wx_pages += (addr - st->start_address) / PAGE_SIZE;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 91}
 92
 93static void note_page_update_state(struct pg_state *st, unsigned long addr, unsigned int prot, int level)
 
 
 94{
 95	struct seq_file *m = st->seq;
 96
 97	while (addr >= st->marker[1].start_address) {
 98		st->marker++;
 99		pt_dump_seq_printf(m, "---[ %s %s ]---\n", st->marker->name,
100				   st->marker->is_start ? "Start" : "End");
101	}
102	st->start_address = addr;
103	st->current_prot = prot;
104	st->level = level;
105}
 
106
107static void note_page(struct ptdump_state *pt_st, unsigned long addr, int level, u64 val)
 
 
 
 
 
 
 
 
 
108{
109	int width = sizeof(unsigned long) * 2;
110	static const char units[] = "KMGTPE";
111	const char *unit = units;
112	unsigned long delta;
113	struct pg_state *st;
114	struct seq_file *m;
115	unsigned int prot;
 
 
116
117	st = container_of(pt_st, struct pg_state, ptdump);
118	m = st->seq;
119	prot = val & (_PAGE_PROTECT | _PAGE_NOEXEC);
120	if (level == 4 && (val & _PAGE_INVALID))
121		prot = _PAGE_INVALID;
122	/* For pmd_none() & friends val gets passed as zero. */
123	if (level != 4 && !val)
124		prot = _PAGE_INVALID;
125	/* Final flush from generic code. */
126	if (level == -1)
127		addr = max_addr;
128	if (st->level == -1) {
129		pt_dump_seq_puts(m, "---[ Kernel Virtual Address Space ]---\n");
130		note_page_update_state(st, addr, prot, level);
131	} else if (prot != st->current_prot || level != st->level ||
132		   addr >= st->marker[1].start_address) {
133		note_prot_wx(st, addr);
134		pt_dump_seq_printf(m, "0x%0*lx-0x%0*lx ",
135				   width, st->start_address,
136				   width, addr);
137		delta = (addr - st->start_address) >> 10;
138		while (!(delta & 0x3ff) && unit[1]) {
139			delta >>= 10;
140			unit++;
141		}
142		pt_dump_seq_printf(m, "%9lu%c ", delta, *unit);
143		print_prot(m, st->current_prot, st->level);
144		note_page_update_state(st, addr, prot, level);
145	}
146}
147
148bool ptdump_check_wx(void)
 
149{
150	struct pg_state st = {
151		.ptdump = {
152			.note_page = note_page,
153			.range = (struct ptdump_range[]) {
154				{.start = 0, .end = max_addr},
155				{.start = 0, .end = 0},
156			}
157		},
158		.seq = NULL,
159		.level = -1,
160		.current_prot = 0,
161		.check_wx = true,
162		.wx_pages = 0,
163		.start_address = 0,
164		.marker = (struct addr_marker[]) {
165			{ .start_address =  0, .name = NULL},
166			{ .start_address = -1, .name = NULL},
167		},
168	};
169
170	if (!MACHINE_HAS_NX)
171		return true;
172	ptdump_walk_pgd(&st.ptdump, &init_mm, NULL);
173	if (st.wx_pages) {
174		pr_warn("Checked W+X mappings: FAILED, %lu W+X pages found\n", st.wx_pages);
175
176		return false;
177	} else {
178		pr_info("Checked W+X mappings: passed, no %sW+X pages found\n",
179			(nospec_uses_trampoline() || !static_key_enabled(&cpu_has_bear)) ?
180			"unexpected " : "");
181
182		return true;
 
 
 
 
 
 
 
 
 
 
 
 
 
183	}
184}
185
186#ifdef CONFIG_PTDUMP_DEBUGFS
187static int ptdump_show(struct seq_file *m, void *v)
188{
189	struct pg_state st = {
190		.ptdump = {
191			.note_page = note_page,
192			.range = (struct ptdump_range[]) {
193				{.start = 0, .end = max_addr},
194				{.start = 0, .end = 0},
195			}
196		},
197		.seq = m,
198		.level = -1,
199		.current_prot = 0,
200		.check_wx = false,
201		.wx_pages = 0,
202		.start_address = 0,
203		.marker = markers,
204	};
205
206	get_online_mems();
207	mutex_lock(&cpa_mutex);
208	ptdump_walk_pgd(&st.ptdump, &init_mm, NULL);
209	mutex_unlock(&cpa_mutex);
210	put_online_mems();
211	return 0;
 
 
 
212}
213DEFINE_SHOW_ATTRIBUTE(ptdump);
214#endif /* CONFIG_PTDUMP_DEBUGFS */
215
216static int ptdump_cmp(const void *a, const void *b)
 
217{
218	const struct addr_marker *ama = a;
219	const struct addr_marker *amb = b;
220
221	if (ama->start_address > amb->start_address)
222		return 1;
223	if (ama->start_address < amb->start_address)
224		return -1;
225	/*
226	 * If the start addresses of two markers are identical sort markers in an
227	 * order that considers areas contained within other areas correctly.
228	 */
229	if (ama->is_start && amb->is_start) {
230		if (ama->size > amb->size)
231			return -1;
232		if (ama->size < amb->size)
233			return 1;
234		return 0;
235	}
236	if (!ama->is_start && !amb->is_start) {
237		if (ama->size > amb->size)
238			return 1;
239		if (ama->size < amb->size)
240			return -1;
241		return 0;
 
 
 
 
242	}
243	if (ama->is_start)
244		return 1;
245	if (amb->is_start)
246		return -1;
247	return 0;
248}
249
250static int add_marker(unsigned long start, unsigned long end, const char *name)
251{
252	size_t oldsize, newsize;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
253
254	oldsize = markers_cnt * sizeof(*markers);
255	newsize = oldsize + 2 * sizeof(*markers);
256	if (!oldsize)
257		markers = kvmalloc(newsize, GFP_KERNEL);
258	else
259		markers = kvrealloc(markers, newsize, GFP_KERNEL);
260	if (!markers)
261		goto error;
262	markers[markers_cnt].is_start = 1;
263	markers[markers_cnt].start_address = start;
264	markers[markers_cnt].size = end - start;
265	markers[markers_cnt].name = name;
266	markers_cnt++;
267	markers[markers_cnt].is_start = 0;
268	markers[markers_cnt].start_address = end;
269	markers[markers_cnt].size = end - start;
270	markers[markers_cnt].name = name;
271	markers_cnt++;
272	return 0;
273error:
274	markers_cnt = 0;
275	return -ENOMEM;
276}
277
278static int pt_dump_init(void)
279{
280#ifdef CONFIG_KFENCE
281	unsigned long kfence_start = (unsigned long)__kfence_pool;
282#endif
283	unsigned long lowcore = (unsigned long)get_lowcore();
284	int rc;
 
 
 
 
285
 
 
286	/*
287	 * Figure out the maximum virtual address being accessible with the
288	 * kernel ASCE. We need this to keep the page table walker functions
289	 * from accessing non-existent entries.
290	 */
291	max_addr = (get_lowcore()->kernel_asce.val & _REGION_ENTRY_TYPE_MASK) >> 2;
292	max_addr = 1UL << (max_addr * 11 + 31);
293	/* start + end markers - must be added first */
294	rc = add_marker(0, -1UL, NULL);
295	rc |= add_marker((unsigned long)_stext, (unsigned long)_end, "Kernel Image");
296	rc |= add_marker(lowcore, lowcore + sizeof(struct lowcore), "Lowcore");
297	rc |= add_marker(__identity_base, __identity_base + ident_map_size, "Identity Mapping");
298	rc |= add_marker((unsigned long)__samode31, (unsigned long)__eamode31, "Amode31 Area");
299	rc |= add_marker(MODULES_VADDR, MODULES_END, "Modules Area");
300	rc |= add_marker(__abs_lowcore, __abs_lowcore + ABS_LOWCORE_MAP_SIZE, "Lowcore Area");
301	rc |= add_marker(__memcpy_real_area, __memcpy_real_area + MEMCPY_REAL_SIZE, "Real Memory Copy Area");
302	rc |= add_marker((unsigned long)vmemmap, (unsigned long)vmemmap + vmemmap_size, "vmemmap Area");
303	rc |= add_marker(VMALLOC_START, VMALLOC_END, "vmalloc Area");
304#ifdef CONFIG_KFENCE
305	rc |= add_marker(kfence_start, kfence_start + KFENCE_POOL_SIZE, "KFence Pool");
306#endif
307#ifdef CONFIG_KMSAN
308	rc |= add_marker(KMSAN_VMALLOC_SHADOW_START, KMSAN_VMALLOC_SHADOW_END, "Kmsan vmalloc Shadow");
309	rc |= add_marker(KMSAN_VMALLOC_ORIGIN_START, KMSAN_VMALLOC_ORIGIN_END, "Kmsan vmalloc Origins");
310	rc |= add_marker(KMSAN_MODULES_SHADOW_START, KMSAN_MODULES_SHADOW_END, "Kmsan Modules Shadow");
311	rc |= add_marker(KMSAN_MODULES_ORIGIN_START, KMSAN_MODULES_ORIGIN_END, "Kmsan Modules Origins");
312#endif
313#ifdef CONFIG_KASAN
314	rc |= add_marker(KASAN_SHADOW_START, KASAN_SHADOW_END, "Kasan Shadow");
315#endif
316	if (rc)
317		goto error;
318	sort(&markers[1], markers_cnt - 1, sizeof(*markers), ptdump_cmp, NULL);
319#ifdef CONFIG_PTDUMP_DEBUGFS
320	debugfs_create_file("kernel_page_tables", 0400, NULL, NULL, &ptdump_fops);
321#endif /* CONFIG_PTDUMP_DEBUGFS */
322	return 0;
323error:
324	kvfree(markers);
325	return -ENOMEM;
326}
327device_initcall(pt_dump_init);
v5.9
  1// SPDX-License-Identifier: GPL-2.0
 
 
  2#include <linux/seq_file.h>
  3#include <linux/debugfs.h>
  4#include <linux/sched.h>
  5#include <linux/mm.h>
 
  6#include <linux/kasan.h>
  7#include <asm/kasan.h>
 
 
  8#include <asm/sections.h>
 
  9
 10static unsigned long max_addr;
 11
 12struct addr_marker {
 
 13	unsigned long start_address;
 
 14	const char *name;
 15};
 16
 17enum address_markers_idx {
 18	IDENTITY_NR = 0,
 19	KERNEL_START_NR,
 20	KERNEL_END_NR,
 21#ifdef CONFIG_KASAN
 22	KASAN_SHADOW_START_NR,
 23	KASAN_SHADOW_END_NR,
 24#endif
 25	VMEMMAP_NR,
 26	VMALLOC_NR,
 27	MODULES_NR,
 28};
 29
 30static struct addr_marker address_markers[] = {
 31	[IDENTITY_NR]		= {0, "Identity Mapping"},
 32	[KERNEL_START_NR]	= {(unsigned long)_stext, "Kernel Image Start"},
 33	[KERNEL_END_NR]		= {(unsigned long)_end, "Kernel Image End"},
 34#ifdef CONFIG_KASAN
 35	[KASAN_SHADOW_START_NR]	= {KASAN_SHADOW_START, "Kasan Shadow Start"},
 36	[KASAN_SHADOW_END_NR]	= {KASAN_SHADOW_END, "Kasan Shadow End"},
 37#endif
 38	[VMEMMAP_NR]		= {0, "vmemmap Area"},
 39	[VMALLOC_NR]		= {0, "vmalloc Area"},
 40	[MODULES_NR]		= {0, "Modules Area"},
 41	{ -1, NULL }
 42};
 43
 44struct pg_state {
 
 
 45	int level;
 46	unsigned int current_prot;
 
 
 47	unsigned long start_address;
 48	unsigned long current_address;
 49	const struct addr_marker *marker;
 50};
 51
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 52static void print_prot(struct seq_file *m, unsigned int pr, int level)
 53{
 54	static const char * const level_name[] =
 55		{ "ASCE", "PGD", "PUD", "PMD", "PTE" };
 56
 57	seq_printf(m, "%s ", level_name[level]);
 58	if (pr & _PAGE_INVALID) {
 59		seq_printf(m, "I\n");
 60		return;
 61	}
 62	seq_puts(m, (pr & _PAGE_PROTECT) ? "RO " : "RW ");
 63	seq_puts(m, (pr & _PAGE_NOEXEC) ? "NX\n" : "X\n");
 64}
 65
 66static void note_page(struct seq_file *m, struct pg_state *st,
 67		     unsigned int new_prot, int level)
 68{
 69	static const char units[] = "KMGTPE";
 70	int width = sizeof(unsigned long) * 2;
 71	const char *unit = units;
 72	unsigned int prot, cur;
 73	unsigned long delta;
 74
 
 
 75	/*
 76	 * If we have a "break" in the series, we need to flush the state
 77	 * that we have now. "break" is either changing perms, levels or
 78	 * address space marker.
 
 79	 */
 80	prot = new_prot;
 81	cur = st->current_prot;
 82
 83	if (!st->level) {
 84		/* First entry */
 85		st->current_prot = new_prot;
 86		st->level = level;
 87		st->marker = address_markers;
 88		seq_printf(m, "---[ %s ]---\n", st->marker->name);
 89	} else if (prot != cur || level != st->level ||
 90		   st->current_address >= st->marker[1].start_address) {
 91		/* Print the actual finished series */
 92		seq_printf(m, "0x%0*lx-0x%0*lx ",
 93			   width, st->start_address,
 94			   width, st->current_address);
 95		delta = (st->current_address - st->start_address) >> 10;
 96		while (!(delta & 0x3ff) && unit[1]) {
 97			delta >>= 10;
 98			unit++;
 99		}
100		seq_printf(m, "%9lu%c ", delta, *unit);
101		print_prot(m, st->current_prot, st->level);
102		while (st->current_address >= st->marker[1].start_address) {
103			st->marker++;
104			seq_printf(m, "---[ %s ]---\n", st->marker->name);
105		}
106		st->start_address = st->current_address;
107		st->current_prot = new_prot;
108		st->level = level;
109	}
110}
111
112#ifdef CONFIG_KASAN
113static void note_kasan_early_shadow_page(struct seq_file *m,
114						struct pg_state *st)
115{
116	unsigned int prot;
117
118	prot = pte_val(*kasan_early_shadow_pte) &
119		(_PAGE_PROTECT | _PAGE_INVALID | _PAGE_NOEXEC);
120	note_page(m, st, prot, 4);
 
 
 
 
 
121}
122#endif
123
124/*
125 * The actual page table walker functions. In order to keep the
126 * implementation of print_prot() short, we only check and pass
127 * _PAGE_INVALID and _PAGE_PROTECT flags to note_page() if a region,
128 * segment or page table entry is invalid or read-only.
129 * After all it's just a hint that the current level being walked
130 * contains an invalid or read-only entry.
131 */
132static void walk_pte_level(struct seq_file *m, struct pg_state *st,
133			   pmd_t *pmd, unsigned long addr)
134{
 
 
 
 
 
 
135	unsigned int prot;
136	pte_t *pte;
137	int i;
138
139	for (i = 0; i < PTRS_PER_PTE && addr < max_addr; i++) {
140		st->current_address = addr;
141		pte = pte_offset_kernel(pmd, addr);
142		prot = pte_val(*pte) &
143			(_PAGE_PROTECT | _PAGE_INVALID | _PAGE_NOEXEC);
144		note_page(m, st, prot, 4);
145		addr += PAGE_SIZE;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
146	}
147}
148
149static void walk_pmd_level(struct seq_file *m, struct pg_state *st,
150			   pud_t *pud, unsigned long addr)
151{
152	unsigned int prot;
153	pmd_t *pmd;
154	int i;
155
156#ifdef CONFIG_KASAN
157	if ((pud_val(*pud) & PAGE_MASK) == __pa(kasan_early_shadow_pmd)) {
158		note_kasan_early_shadow_page(m, st);
159		return;
160	}
161#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
162
163	pmd = pmd_offset(pud, addr);
164	for (i = 0; i < PTRS_PER_PMD && addr < max_addr; i++, pmd++) {
165		st->current_address = addr;
166		if (!pmd_none(*pmd)) {
167			if (pmd_large(*pmd)) {
168				prot = pmd_val(*pmd) &
169					(_SEGMENT_ENTRY_PROTECT |
170					 _SEGMENT_ENTRY_NOEXEC);
171				note_page(m, st, prot, 3);
172			} else
173				walk_pte_level(m, st, pmd, addr);
174		} else
175			note_page(m, st, _PAGE_INVALID, 3);
176		addr += PMD_SIZE;
177	}
178}
179
180static void walk_pud_level(struct seq_file *m, struct pg_state *st,
181			   p4d_t *p4d, unsigned long addr)
182{
183	unsigned int prot;
184	pud_t *pud;
185	int i;
186
187#ifdef CONFIG_KASAN
188	if ((p4d_val(*p4d) & PAGE_MASK) == __pa(kasan_early_shadow_pud)) {
189		note_kasan_early_shadow_page(m, st);
190		return;
191	}
192#endif
193
194	pud = pud_offset(p4d, addr);
195	for (i = 0; i < PTRS_PER_PUD && addr < max_addr; i++, pud++) {
196		st->current_address = addr;
197		if (!pud_none(*pud))
198			if (pud_large(*pud)) {
199				prot = pud_val(*pud) &
200					(_REGION_ENTRY_PROTECT |
201					 _REGION_ENTRY_NOEXEC);
202				note_page(m, st, prot, 2);
203			} else
204				walk_pmd_level(m, st, pud, addr);
205		else
206			note_page(m, st, _PAGE_INVALID, 2);
207		addr += PUD_SIZE;
208	}
209}
 
 
210
211static void walk_p4d_level(struct seq_file *m, struct pg_state *st,
212			   pgd_t *pgd, unsigned long addr)
213{
214	p4d_t *p4d;
215	int i;
216
217#ifdef CONFIG_KASAN
218	if ((pgd_val(*pgd) & PAGE_MASK) == __pa(kasan_early_shadow_p4d)) {
219		note_kasan_early_shadow_page(m, st);
220		return;
 
 
 
 
 
 
 
 
 
 
221	}
222#endif
223
224	p4d = p4d_offset(pgd, addr);
225	for (i = 0; i < PTRS_PER_P4D && addr < max_addr; i++, p4d++) {
226		st->current_address = addr;
227		if (!p4d_none(*p4d))
228			walk_pud_level(m, st, p4d, addr);
229		else
230			note_page(m, st, _PAGE_INVALID, 2);
231		addr += P4D_SIZE;
232	}
 
 
 
 
 
233}
234
235static void walk_pgd_level(struct seq_file *m)
236{
237	unsigned long addr = 0;
238	struct pg_state st;
239	pgd_t *pgd;
240	int i;
241
242	memset(&st, 0, sizeof(st));
243	for (i = 0; i < PTRS_PER_PGD && addr < max_addr; i++) {
244		st.current_address = addr;
245		pgd = pgd_offset_k(addr);
246		if (!pgd_none(*pgd))
247			walk_p4d_level(m, &st, pgd, addr);
248		else
249			note_page(m, &st, _PAGE_INVALID, 1);
250		addr += PGDIR_SIZE;
251		cond_resched();
252	}
253	/* Flush out the last page */
254	st.current_address = max_addr;
255	note_page(m, &st, 0, 0);
256}
257
258static int ptdump_show(struct seq_file *m, void *v)
259{
260	walk_pgd_level(m);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
261	return 0;
 
 
 
262}
263
264static int ptdump_open(struct inode *inode, struct file *filp)
265{
266	return single_open(filp, ptdump_show, NULL);
267}
268
269static const struct file_operations ptdump_fops = {
270	.open		= ptdump_open,
271	.read		= seq_read,
272	.llseek		= seq_lseek,
273	.release	= single_release,
274};
275
276static int pt_dump_init(void)
277{
278	/*
279	 * Figure out the maximum virtual address being accessible with the
280	 * kernel ASCE. We need this to keep the page table walker functions
281	 * from accessing non-existent entries.
282	 */
283	max_addr = (S390_lowcore.kernel_asce & _REGION_ENTRY_TYPE_MASK) >> 2;
284	max_addr = 1UL << (max_addr * 11 + 31);
285	address_markers[MODULES_NR].start_address = MODULES_VADDR;
286	address_markers[VMEMMAP_NR].start_address = (unsigned long) vmemmap;
287	address_markers[VMALLOC_NR].start_address = VMALLOC_START;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
288	debugfs_create_file("kernel_page_tables", 0400, NULL, NULL, &ptdump_fops);
 
289	return 0;
 
 
 
290}
291device_initcall(pt_dump_init);