Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * mm/debug.c
  4 *
  5 * mm/ specific debug routines.
  6 *
  7 */
  8
  9#include <linux/kernel.h>
 10#include <linux/mm.h>
 11#include <linux/trace_events.h>
 12#include <linux/memcontrol.h>
 13#include <trace/events/mmflags.h>
 14#include <linux/migrate.h>
 15#include <linux/page_owner.h>
 16#include <linux/ctype.h>
 17
 18#include "internal.h"
 
 
 
 
 
 
 
 
 
 
 19
 20const char *migrate_reason_names[MR_TYPES] = {
 21	"compaction",
 22	"memory_failure",
 23	"memory_hotplug",
 24	"syscall_or_cpuset",
 25	"mempolicy_mbind",
 26	"numa_misplaced",
 27	"cma",
 28};
 29
 30const struct trace_print_flags pageflag_names[] = {
 31	__def_pageflag_names,
 32	{0, NULL}
 33};
 34
 
 
 
 
 
 35const struct trace_print_flags gfpflag_names[] = {
 36	__def_gfpflag_names,
 37	{0, NULL}
 38};
 39
 40const struct trace_print_flags vmaflag_names[] = {
 41	__def_vmaflag_names,
 42	{0, NULL}
 43};
 44
 45void __dump_page(struct page *page, const char *reason)
 
 46{
 47	struct page *head = compound_head(page);
 48	struct address_space *mapping;
 49	bool page_poisoned = PagePoisoned(page);
 50	bool compound = PageCompound(page);
 51	/*
 52	 * Accessing the pageblock without the zone lock. It could change to
 53	 * "isolate" again in the meantime, but since we are just dumping the
 54	 * state for debugging, it should be fine to accept a bit of
 55	 * inaccuracy here due to racing.
 56	 */
 57	bool page_cma = is_migrate_cma_page(page);
 58	int mapcount;
 59	char *type = "";
 60
 61	/*
 62	 * If struct page is poisoned don't access Page*() functions as that
 63	 * leads to recursive loop. Page*() check for poisoned pages, and calls
 64	 * dump_page() when detected.
 65	 */
 66	if (page_poisoned) {
 67		pr_warn("page:%px is uninitialized and poisoned", page);
 68		goto hex_only;
 
 69	}
 70
 71	if (page < head || (page >= head + MAX_ORDER_NR_PAGES)) {
 72		/*
 73		 * Corrupt page, so we cannot call page_mapping. Instead, do a
 74		 * safe subset of the steps that page_mapping() does. Caution:
 75		 * this will be misleading for tail pages, PageSwapCache pages,
 76		 * and potentially other situations. (See the page_mapping()
 77		 * implementation for what's missing here.)
 78		 */
 79		unsigned long tmp = (unsigned long)page->mapping;
 80
 81		if (tmp & PAGE_MAPPING_ANON)
 82			mapping = NULL;
 83		else
 84			mapping = (void *)(tmp & ~PAGE_MAPPING_FLAGS);
 85		head = page;
 86		compound = false;
 87	} else {
 88		mapping = page_mapping(page);
 89	}
 90
 91	/*
 92	 * Avoid VM_BUG_ON() in page_mapcount().
 93	 * page->_mapcount space in struct page is used by sl[aou]b pages to
 94	 * encode own info.
 95	 */
 96	mapcount = PageSlab(head) ? 0 : page_mapcount(page);
 97
 98	pr_warn("page:%p refcount:%d mapcount:%d mapping:%p index:%#lx pfn:%#lx\n",
 99			page, page_ref_count(head), mapcount, mapping,
100			page_to_pgoff(page), page_to_pfn(page));
101	if (compound) {
102		if (hpage_pincount_available(page)) {
103			pr_warn("head:%p order:%u compound_mapcount:%d compound_pincount:%d\n",
104					head, compound_order(head),
105					head_mapcount(head),
106					head_pincount(head));
107		} else {
108			pr_warn("head:%p order:%u compound_mapcount:%d\n",
109					head, compound_order(head),
110					head_mapcount(head));
111		}
112	}
113	if (PageKsm(page))
114		type = "ksm ";
115	else if (PageAnon(page))
116		type = "anon ";
117	else if (mapping) {
118		struct inode *host;
119		const struct address_space_operations *a_ops;
120		struct hlist_node *dentry_first;
121		struct dentry *dentry_ptr;
122		struct dentry dentry;
123
124		/*
125		 * mapping can be invalid pointer and we don't want to crash
126		 * accessing it, so probe everything depending on it carefully
127		 */
128		if (get_kernel_nofault(host, &mapping->host) ||
129		    get_kernel_nofault(a_ops, &mapping->a_ops)) {
130			pr_warn("failed to read mapping contents, not a valid kernel address?\n");
131			goto out_mapping;
132		}
133
134		if (!host) {
135			pr_warn("aops:%ps\n", a_ops);
136			goto out_mapping;
137		}
138
139		if (get_kernel_nofault(dentry_first, &host->i_dentry.first)) {
140			pr_warn("aops:%ps with invalid host inode %px\n",
141					a_ops, host);
142			goto out_mapping;
143		}
144
145		if (!dentry_first) {
146			pr_warn("aops:%ps ino:%lx\n", a_ops, host->i_ino);
147			goto out_mapping;
148		}
149
150		dentry_ptr = container_of(dentry_first, struct dentry, d_u.d_alias);
151		if (get_kernel_nofault(dentry, dentry_ptr)) {
152			pr_warn("aops:%ps with invalid dentry %px\n", a_ops,
153					dentry_ptr);
154		} else {
155			/*
156			 * if dentry is corrupted, the %pd handler may still
157			 * crash, but it's unlikely that we reach here with a
158			 * corrupted struct page
159			 */
160			pr_warn("aops:%ps ino:%lx dentry name:\"%pd\"\n",
161					a_ops, host->i_ino, &dentry);
162		}
163	}
164out_mapping:
165	BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS + 1);
166
167	pr_warn("%sflags: %#lx(%pGp)%s\n", type, head->flags, &head->flags,
168		page_cma ? " CMA" : "");
 
 
 
 
 
 
 
169
170hex_only:
171	print_hex_dump(KERN_WARNING, "raw: ", DUMP_PREFIX_NONE, 32,
172			sizeof(unsigned long), page,
173			sizeof(struct page), false);
174	if (head != page)
175		print_hex_dump(KERN_WARNING, "head: ", DUMP_PREFIX_NONE, 32,
176			sizeof(unsigned long), head,
177			sizeof(struct page), false);
 
178
179	if (reason)
180		pr_warn("page dumped because: %s\n", reason);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
181
182#ifdef CONFIG_MEMCG
183	if (!page_poisoned && page->mem_cgroup)
184		pr_warn("page->mem_cgroup:%px\n", page->mem_cgroup);
185#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
186}
187
188void dump_page(struct page *page, const char *reason)
189{
190	__dump_page(page, reason);
 
 
 
 
 
191	dump_page_owner(page);
192}
193EXPORT_SYMBOL(dump_page);
194
195#ifdef CONFIG_DEBUG_VM
196
197void dump_vma(const struct vm_area_struct *vma)
198{
199	pr_emerg("vma %px start %px end %px\n"
200		"next %px prev %px mm %px\n"
201		"prot %lx anon_vma %px vm_ops %px\n"
202		"pgoff %lx file %px private_data %px\n"
203		"flags: %#lx(%pGv)\n",
204		vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_next,
205		vma->vm_prev, vma->vm_mm,
206		(unsigned long)pgprot_val(vma->vm_page_prot),
207		vma->anon_vma, vma->vm_ops, vma->vm_pgoff,
208		vma->vm_file, vma->vm_private_data,
209		vma->vm_flags, &vma->vm_flags);
210}
211EXPORT_SYMBOL(dump_vma);
212
213void dump_mm(const struct mm_struct *mm)
214{
215	pr_emerg("mm %px mmap %px seqnum %llu task_size %lu\n"
216#ifdef CONFIG_MMU
217		"get_unmapped_area %px\n"
218#endif
219		"mmap_base %lu mmap_legacy_base %lu highest_vm_end %lu\n"
220		"pgd %px mm_users %d mm_count %d pgtables_bytes %lu map_count %d\n"
221		"hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n"
222		"pinned_vm %llx data_vm %lx exec_vm %lx stack_vm %lx\n"
223		"start_code %lx end_code %lx start_data %lx end_data %lx\n"
224		"start_brk %lx brk %lx start_stack %lx\n"
225		"arg_start %lx arg_end %lx env_start %lx env_end %lx\n"
226		"binfmt %px flags %lx core_state %px\n"
227#ifdef CONFIG_AIO
228		"ioctx_table %px\n"
229#endif
230#ifdef CONFIG_MEMCG
231		"owner %px "
232#endif
233		"exe_file %px\n"
234#ifdef CONFIG_MMU_NOTIFIER
235		"notifier_subscriptions %px\n"
236#endif
237#ifdef CONFIG_NUMA_BALANCING
238		"numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n"
239#endif
240		"tlb_flush_pending %d\n"
241		"def_flags: %#lx(%pGv)\n",
242
243		mm, mm->mmap, (long long) mm->vmacache_seqnum, mm->task_size,
244#ifdef CONFIG_MMU
245		mm->get_unmapped_area,
246#endif
247		mm->mmap_base, mm->mmap_legacy_base, mm->highest_vm_end,
248		mm->pgd, atomic_read(&mm->mm_users),
249		atomic_read(&mm->mm_count),
250		mm_pgtables_bytes(mm),
251		mm->map_count,
252		mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm,
253		(u64)atomic64_read(&mm->pinned_vm),
254		mm->data_vm, mm->exec_vm, mm->stack_vm,
255		mm->start_code, mm->end_code, mm->start_data, mm->end_data,
256		mm->start_brk, mm->brk, mm->start_stack,
257		mm->arg_start, mm->arg_end, mm->env_start, mm->env_end,
258		mm->binfmt, mm->flags, mm->core_state,
259#ifdef CONFIG_AIO
260		mm->ioctx_table,
261#endif
262#ifdef CONFIG_MEMCG
263		mm->owner,
264#endif
265		mm->exe_file,
266#ifdef CONFIG_MMU_NOTIFIER
267		mm->notifier_subscriptions,
268#endif
269#ifdef CONFIG_NUMA_BALANCING
270		mm->numa_next_scan, mm->numa_scan_offset, mm->numa_scan_seq,
271#endif
272		atomic_read(&mm->tlb_flush_pending),
273		mm->def_flags, &mm->def_flags
274	);
275}
 
276
277static bool page_init_poisoning __read_mostly = true;
278
279static int __init setup_vm_debug(char *str)
280{
281	bool __page_init_poisoning = true;
282
283	/*
284	 * Calling vm_debug with no arguments is equivalent to requesting
285	 * to enable all debugging options we can control.
286	 */
287	if (*str++ != '=' || !*str)
288		goto out;
289
290	__page_init_poisoning = false;
291	if (*str == '-')
292		goto out;
293
294	while (*str) {
295		switch (tolower(*str)) {
296		case'p':
297			__page_init_poisoning = true;
298			break;
299		default:
300			pr_err("vm_debug option '%c' unknown. skipped\n",
301			       *str);
302		}
303
304		str++;
305	}
306out:
307	if (page_init_poisoning && !__page_init_poisoning)
308		pr_warn("Page struct poisoning disabled by kernel command line option 'vm_debug'\n");
309
310	page_init_poisoning = __page_init_poisoning;
311
312	return 1;
313}
314__setup("vm_debug", setup_vm_debug);
315
316void page_init_poison(struct page *page, size_t size)
317{
318	if (page_init_poisoning)
319		memset(page, PAGE_POISON_PATTERN, size);
320}
321EXPORT_SYMBOL_GPL(page_init_poison);
 
 
 
 
 
 
 
 
322#endif		/* CONFIG_DEBUG_VM */
v6.9.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * mm/debug.c
  4 *
  5 * mm/ specific debug routines.
  6 *
  7 */
  8
  9#include <linux/kernel.h>
 10#include <linux/mm.h>
 11#include <linux/trace_events.h>
 12#include <linux/memcontrol.h>
 13#include <trace/events/mmflags.h>
 14#include <linux/migrate.h>
 15#include <linux/page_owner.h>
 16#include <linux/ctype.h>
 17
 18#include "internal.h"
 19#include <trace/events/migrate.h>
 20
 21/*
 22 * Define EM() and EMe() so that MIGRATE_REASON from trace/events/migrate.h can
 23 * be used to populate migrate_reason_names[].
 24 */
 25#undef EM
 26#undef EMe
 27#define EM(a, b)	b,
 28#define EMe(a, b)	b
 29
 30const char *migrate_reason_names[MR_TYPES] = {
 31	MIGRATE_REASON
 
 
 
 
 
 
 32};
 33
 34const struct trace_print_flags pageflag_names[] = {
 35	__def_pageflag_names,
 36	{0, NULL}
 37};
 38
 39const struct trace_print_flags pagetype_names[] = {
 40	__def_pagetype_names,
 41	{0, NULL}
 42};
 43
 44const struct trace_print_flags gfpflag_names[] = {
 45	__def_gfpflag_names,
 46	{0, NULL}
 47};
 48
 49const struct trace_print_flags vmaflag_names[] = {
 50	__def_vmaflag_names,
 51	{0, NULL}
 52};
 53
 54static void __dump_folio(struct folio *folio, struct page *page,
 55		unsigned long pfn, unsigned long idx)
 56{
 57	struct address_space *mapping = folio_mapping(folio);
 58	int mapcount = 0;
 
 
 
 
 
 
 
 
 
 
 59	char *type = "";
 60
 61	/*
 62	 * page->_mapcount space in struct page is used by slab pages to
 63	 * encode own info, and we must avoid calling page_folio() again.
 
 64	 */
 65	if (!folio_test_slab(folio)) {
 66		mapcount = atomic_read(&page->_mapcount) + 1;
 67		if (folio_test_large(folio))
 68			mapcount += folio_entire_mapcount(folio);
 69	}
 70
 71	pr_warn("page: refcount:%d mapcount:%d mapping:%p index:%#lx pfn:%#lx\n",
 72			folio_ref_count(folio), mapcount, mapping,
 73			folio->index + idx, pfn);
 74	if (folio_test_large(folio)) {
 75		pr_warn("head: order:%u entire_mapcount:%d nr_pages_mapped:%d pincount:%d\n",
 76				folio_order(folio),
 77				folio_entire_mapcount(folio),
 78				folio_nr_pages_mapped(folio),
 79				atomic_read(&folio->_pincount));
 
 
 
 
 
 
 
 
 
 80	}
 81
 82#ifdef CONFIG_MEMCG
 83	if (folio->memcg_data)
 84		pr_warn("memcg:%lx\n", folio->memcg_data);
 85#endif
 86	if (folio_test_ksm(folio))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 87		type = "ksm ";
 88	else if (folio_test_anon(folio))
 89		type = "anon ";
 90	else if (mapping)
 91		dump_mapping(mapping);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 92	BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS + 1);
 93
 94	/*
 95	 * Accessing the pageblock without the zone lock. It could change to
 96	 * "isolate" again in the meantime, but since we are just dumping the
 97	 * state for debugging, it should be fine to accept a bit of
 98	 * inaccuracy here due to racing.
 99	 */
100	pr_warn("%sflags: %pGp%s\n", type, &folio->flags,
101		is_migrate_cma_folio(folio, pfn) ? " CMA" : "");
102	pr_warn("page_type: %pGt\n", &folio->page.page_type);
103
 
104	print_hex_dump(KERN_WARNING, "raw: ", DUMP_PREFIX_NONE, 32,
105			sizeof(unsigned long), page,
106			sizeof(struct page), false);
107	if (folio_test_large(folio))
108		print_hex_dump(KERN_WARNING, "head: ", DUMP_PREFIX_NONE, 32,
109			sizeof(unsigned long), folio,
110			2 * sizeof(struct page), false);
111}
112
113static void __dump_page(const struct page *page)
114{
115	struct folio *foliop, folio;
116	struct page precise;
117	unsigned long pfn = page_to_pfn(page);
118	unsigned long idx, nr_pages = 1;
119	int loops = 5;
120
121again:
122	memcpy(&precise, page, sizeof(*page));
123	foliop = page_folio(&precise);
124	if (foliop == (struct folio *)&precise) {
125		idx = 0;
126		if (!folio_test_large(foliop))
127			goto dump;
128		foliop = (struct folio *)page;
129	} else {
130		idx = folio_page_idx(foliop, page);
131	}
132
133	if (idx < MAX_FOLIO_NR_PAGES) {
134		memcpy(&folio, foliop, 2 * sizeof(struct page));
135		nr_pages = folio_nr_pages(&folio);
136		foliop = &folio;
137	}
138
139	if (idx > nr_pages) {
140		if (loops-- > 0)
141			goto again;
142		pr_warn("page does not match folio\n");
143		precise.compound_head &= ~1UL;
144		foliop = (struct folio *)&precise;
145		idx = 0;
146	}
147
148dump:
149	__dump_folio(foliop, &precise, pfn, idx);
150}
151
152void dump_page(const struct page *page, const char *reason)
153{
154	if (PagePoisoned(page))
155		pr_warn("page:%p is uninitialized and poisoned", page);
156	else
157		__dump_page(page);
158	if (reason)
159		pr_warn("page dumped because: %s\n", reason);
160	dump_page_owner(page);
161}
162EXPORT_SYMBOL(dump_page);
163
164#ifdef CONFIG_DEBUG_VM
165
166void dump_vma(const struct vm_area_struct *vma)
167{
168	pr_emerg("vma %px start %px end %px mm %px\n"
 
169		"prot %lx anon_vma %px vm_ops %px\n"
170		"pgoff %lx file %px private_data %px\n"
171		"flags: %#lx(%pGv)\n",
172		vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_mm,
 
173		(unsigned long)pgprot_val(vma->vm_page_prot),
174		vma->anon_vma, vma->vm_ops, vma->vm_pgoff,
175		vma->vm_file, vma->vm_private_data,
176		vma->vm_flags, &vma->vm_flags);
177}
178EXPORT_SYMBOL(dump_vma);
179
180void dump_mm(const struct mm_struct *mm)
181{
182	pr_emerg("mm %px task_size %lu\n"
183#ifdef CONFIG_MMU
184		"get_unmapped_area %px\n"
185#endif
186		"mmap_base %lu mmap_legacy_base %lu\n"
187		"pgd %px mm_users %d mm_count %d pgtables_bytes %lu map_count %d\n"
188		"hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n"
189		"pinned_vm %llx data_vm %lx exec_vm %lx stack_vm %lx\n"
190		"start_code %lx end_code %lx start_data %lx end_data %lx\n"
191		"start_brk %lx brk %lx start_stack %lx\n"
192		"arg_start %lx arg_end %lx env_start %lx env_end %lx\n"
193		"binfmt %px flags %lx\n"
194#ifdef CONFIG_AIO
195		"ioctx_table %px\n"
196#endif
197#ifdef CONFIG_MEMCG
198		"owner %px "
199#endif
200		"exe_file %px\n"
201#ifdef CONFIG_MMU_NOTIFIER
202		"notifier_subscriptions %px\n"
203#endif
204#ifdef CONFIG_NUMA_BALANCING
205		"numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n"
206#endif
207		"tlb_flush_pending %d\n"
208		"def_flags: %#lx(%pGv)\n",
209
210		mm, mm->task_size,
211#ifdef CONFIG_MMU
212		mm->get_unmapped_area,
213#endif
214		mm->mmap_base, mm->mmap_legacy_base,
215		mm->pgd, atomic_read(&mm->mm_users),
216		atomic_read(&mm->mm_count),
217		mm_pgtables_bytes(mm),
218		mm->map_count,
219		mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm,
220		(u64)atomic64_read(&mm->pinned_vm),
221		mm->data_vm, mm->exec_vm, mm->stack_vm,
222		mm->start_code, mm->end_code, mm->start_data, mm->end_data,
223		mm->start_brk, mm->brk, mm->start_stack,
224		mm->arg_start, mm->arg_end, mm->env_start, mm->env_end,
225		mm->binfmt, mm->flags,
226#ifdef CONFIG_AIO
227		mm->ioctx_table,
228#endif
229#ifdef CONFIG_MEMCG
230		mm->owner,
231#endif
232		mm->exe_file,
233#ifdef CONFIG_MMU_NOTIFIER
234		mm->notifier_subscriptions,
235#endif
236#ifdef CONFIG_NUMA_BALANCING
237		mm->numa_next_scan, mm->numa_scan_offset, mm->numa_scan_seq,
238#endif
239		atomic_read(&mm->tlb_flush_pending),
240		mm->def_flags, &mm->def_flags
241	);
242}
243EXPORT_SYMBOL(dump_mm);
244
245static bool page_init_poisoning __read_mostly = true;
246
247static int __init setup_vm_debug(char *str)
248{
249	bool __page_init_poisoning = true;
250
251	/*
252	 * Calling vm_debug with no arguments is equivalent to requesting
253	 * to enable all debugging options we can control.
254	 */
255	if (*str++ != '=' || !*str)
256		goto out;
257
258	__page_init_poisoning = false;
259	if (*str == '-')
260		goto out;
261
262	while (*str) {
263		switch (tolower(*str)) {
264		case'p':
265			__page_init_poisoning = true;
266			break;
267		default:
268			pr_err("vm_debug option '%c' unknown. skipped\n",
269			       *str);
270		}
271
272		str++;
273	}
274out:
275	if (page_init_poisoning && !__page_init_poisoning)
276		pr_warn("Page struct poisoning disabled by kernel command line option 'vm_debug'\n");
277
278	page_init_poisoning = __page_init_poisoning;
279
280	return 1;
281}
282__setup("vm_debug", setup_vm_debug);
283
284void page_init_poison(struct page *page, size_t size)
285{
286	if (page_init_poisoning)
287		memset(page, PAGE_POISON_PATTERN, size);
288}
289
290void vma_iter_dump_tree(const struct vma_iterator *vmi)
291{
292#if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
293	mas_dump(&vmi->mas);
294	mt_dump(vmi->mas.tree, mt_dump_hex);
295#endif	/* CONFIG_DEBUG_VM_MAPLE_TREE */
296}
297
298#endif		/* CONFIG_DEBUG_VM */