Linux Audio

Check our new training course

Loading...
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * mm/debug.c
  4 *
  5 * mm/ specific debug routines.
  6 *
  7 */
  8
  9#include <linux/kernel.h>
 10#include <linux/mm.h>
 11#include <linux/trace_events.h>
 12#include <linux/memcontrol.h>
 13#include <trace/events/mmflags.h>
 14#include <linux/migrate.h>
 15#include <linux/page_owner.h>
 16#include <linux/ctype.h>
 17
 18#include "internal.h"
 
 
 
 
 
 
 
 
 
 
 19
 20const char *migrate_reason_names[MR_TYPES] = {
 21	"compaction",
 22	"memory_failure",
 23	"memory_hotplug",
 24	"syscall_or_cpuset",
 25	"mempolicy_mbind",
 26	"numa_misplaced",
 27	"contig_range",
 28	"longterm_pin",
 29};
 30
 31const struct trace_print_flags pageflag_names[] = {
 32	__def_pageflag_names,
 33	{0, NULL}
 34};
 35
 
 
 
 
 
 36const struct trace_print_flags gfpflag_names[] = {
 37	__def_gfpflag_names,
 38	{0, NULL}
 39};
 40
 41const struct trace_print_flags vmaflag_names[] = {
 42	__def_vmaflag_names,
 43	{0, NULL}
 44};
 45
 46static void __dump_page(struct page *page)
 47{
 48	struct page *head = compound_head(page);
 
 49	struct address_space *mapping;
 50	bool compound = PageCompound(page);
 51	/*
 52	 * Accessing the pageblock without the zone lock. It could change to
 53	 * "isolate" again in the meantime, but since we are just dumping the
 54	 * state for debugging, it should be fine to accept a bit of
 55	 * inaccuracy here due to racing.
 56	 */
 57	bool page_cma = is_migrate_cma_page(page);
 58	int mapcount;
 59	char *type = "";
 60
 61	if (page < head || (page >= head + MAX_ORDER_NR_PAGES)) {
 62		/*
 63		 * Corrupt page, so we cannot call page_mapping. Instead, do a
 64		 * safe subset of the steps that page_mapping() does. Caution:
 65		 * this will be misleading for tail pages, PageSwapCache pages,
 66		 * and potentially other situations. (See the page_mapping()
 67		 * implementation for what's missing here.)
 68		 */
 69		unsigned long tmp = (unsigned long)page->mapping;
 70
 71		if (tmp & PAGE_MAPPING_ANON)
 72			mapping = NULL;
 73		else
 74			mapping = (void *)(tmp & ~PAGE_MAPPING_FLAGS);
 75		head = page;
 
 76		compound = false;
 77	} else {
 78		mapping = page_mapping(page);
 79	}
 80
 81	/*
 82	 * Avoid VM_BUG_ON() in page_mapcount().
 83	 * page->_mapcount space in struct page is used by sl[aou]b pages to
 84	 * encode own info.
 85	 */
 86	mapcount = PageSlab(head) ? 0 : page_mapcount(page);
 87
 88	pr_warn("page:%p refcount:%d mapcount:%d mapping:%p index:%#lx pfn:%#lx\n",
 89			page, page_ref_count(head), mapcount, mapping,
 90			page_to_pgoff(page), page_to_pfn(page));
 91	if (compound) {
 92		if (hpage_pincount_available(page)) {
 93			pr_warn("head:%p order:%u compound_mapcount:%d compound_pincount:%d\n",
 94					head, compound_order(head),
 95					head_compound_mapcount(head),
 96					head_compound_pincount(head));
 97		} else {
 98			pr_warn("head:%p order:%u compound_mapcount:%d\n",
 99					head, compound_order(head),
100					head_compound_mapcount(head));
101		}
102	}
103
104#ifdef CONFIG_MEMCG
105	if (head->memcg_data)
106		pr_warn("memcg:%lx\n", head->memcg_data);
107#endif
108	if (PageKsm(page))
109		type = "ksm ";
110	else if (PageAnon(page))
111		type = "anon ";
112	else if (mapping) {
113		struct inode *host;
114		const struct address_space_operations *a_ops;
115		struct hlist_node *dentry_first;
116		struct dentry *dentry_ptr;
117		struct dentry dentry;
118		unsigned long ino;
119
120		/*
121		 * mapping can be invalid pointer and we don't want to crash
122		 * accessing it, so probe everything depending on it carefully
123		 */
124		if (get_kernel_nofault(host, &mapping->host) ||
125		    get_kernel_nofault(a_ops, &mapping->a_ops)) {
126			pr_warn("failed to read mapping contents, not a valid kernel address?\n");
127			goto out_mapping;
128		}
129
130		if (!host) {
131			pr_warn("aops:%ps\n", a_ops);
132			goto out_mapping;
133		}
134
135		if (get_kernel_nofault(dentry_first, &host->i_dentry.first) ||
136		    get_kernel_nofault(ino, &host->i_ino)) {
137			pr_warn("aops:%ps with invalid host inode %px\n",
138					a_ops, host);
139			goto out_mapping;
140		}
141
142		if (!dentry_first) {
143			pr_warn("aops:%ps ino:%lx\n", a_ops, ino);
144			goto out_mapping;
145		}
146
147		dentry_ptr = container_of(dentry_first, struct dentry, d_u.d_alias);
148		if (get_kernel_nofault(dentry, dentry_ptr)) {
149			pr_warn("aops:%ps ino:%lx with invalid dentry %px\n",
150					a_ops, ino, dentry_ptr);
151		} else {
152			/*
153			 * if dentry is corrupted, the %pd handler may still
154			 * crash, but it's unlikely that we reach here with a
155			 * corrupted struct page
156			 */
157			pr_warn("aops:%ps ino:%lx dentry name:\"%pd\"\n",
158					a_ops, ino, &dentry);
159		}
160	}
161out_mapping:
162	BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS + 1);
163
164	pr_warn("%sflags: %#lx(%pGp)%s\n", type, head->flags, &head->flags,
165		page_cma ? " CMA" : "");
 
 
166	print_hex_dump(KERN_WARNING, "raw: ", DUMP_PREFIX_NONE, 32,
167			sizeof(unsigned long), page,
168			sizeof(struct page), false);
169	if (head != page)
170		print_hex_dump(KERN_WARNING, "head: ", DUMP_PREFIX_NONE, 32,
171			sizeof(unsigned long), head,
172			sizeof(struct page), false);
173}
174
175void dump_page(struct page *page, const char *reason)
176{
177	if (PagePoisoned(page))
178		pr_warn("page:%p is uninitialized and poisoned", page);
179	else
180		__dump_page(page);
181	if (reason)
182		pr_warn("page dumped because: %s\n", reason);
183	dump_page_owner(page);
184}
185EXPORT_SYMBOL(dump_page);
186
187#ifdef CONFIG_DEBUG_VM
188
189void dump_vma(const struct vm_area_struct *vma)
190{
191	pr_emerg("vma %px start %px end %px\n"
192		"next %px prev %px mm %px\n"
193		"prot %lx anon_vma %px vm_ops %px\n"
194		"pgoff %lx file %px private_data %px\n"
195		"flags: %#lx(%pGv)\n",
196		vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_next,
197		vma->vm_prev, vma->vm_mm,
198		(unsigned long)pgprot_val(vma->vm_page_prot),
199		vma->anon_vma, vma->vm_ops, vma->vm_pgoff,
200		vma->vm_file, vma->vm_private_data,
201		vma->vm_flags, &vma->vm_flags);
202}
203EXPORT_SYMBOL(dump_vma);
204
205void dump_mm(const struct mm_struct *mm)
206{
207	pr_emerg("mm %px mmap %px seqnum %llu task_size %lu\n"
208#ifdef CONFIG_MMU
209		"get_unmapped_area %px\n"
210#endif
211		"mmap_base %lu mmap_legacy_base %lu highest_vm_end %lu\n"
212		"pgd %px mm_users %d mm_count %d pgtables_bytes %lu map_count %d\n"
213		"hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n"
214		"pinned_vm %llx data_vm %lx exec_vm %lx stack_vm %lx\n"
215		"start_code %lx end_code %lx start_data %lx end_data %lx\n"
216		"start_brk %lx brk %lx start_stack %lx\n"
217		"arg_start %lx arg_end %lx env_start %lx env_end %lx\n"
218		"binfmt %px flags %lx core_state %px\n"
219#ifdef CONFIG_AIO
220		"ioctx_table %px\n"
221#endif
222#ifdef CONFIG_MEMCG
223		"owner %px "
224#endif
225		"exe_file %px\n"
226#ifdef CONFIG_MMU_NOTIFIER
227		"notifier_subscriptions %px\n"
228#endif
229#ifdef CONFIG_NUMA_BALANCING
230		"numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n"
231#endif
232		"tlb_flush_pending %d\n"
233		"def_flags: %#lx(%pGv)\n",
234
235		mm, mm->mmap, (long long) mm->vmacache_seqnum, mm->task_size,
236#ifdef CONFIG_MMU
237		mm->get_unmapped_area,
238#endif
239		mm->mmap_base, mm->mmap_legacy_base, mm->highest_vm_end,
240		mm->pgd, atomic_read(&mm->mm_users),
241		atomic_read(&mm->mm_count),
242		mm_pgtables_bytes(mm),
243		mm->map_count,
244		mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm,
245		(u64)atomic64_read(&mm->pinned_vm),
246		mm->data_vm, mm->exec_vm, mm->stack_vm,
247		mm->start_code, mm->end_code, mm->start_data, mm->end_data,
248		mm->start_brk, mm->brk, mm->start_stack,
249		mm->arg_start, mm->arg_end, mm->env_start, mm->env_end,
250		mm->binfmt, mm->flags, mm->core_state,
251#ifdef CONFIG_AIO
252		mm->ioctx_table,
253#endif
254#ifdef CONFIG_MEMCG
255		mm->owner,
256#endif
257		mm->exe_file,
258#ifdef CONFIG_MMU_NOTIFIER
259		mm->notifier_subscriptions,
260#endif
261#ifdef CONFIG_NUMA_BALANCING
262		mm->numa_next_scan, mm->numa_scan_offset, mm->numa_scan_seq,
263#endif
264		atomic_read(&mm->tlb_flush_pending),
265		mm->def_flags, &mm->def_flags
266	);
267}
 
268
269static bool page_init_poisoning __read_mostly = true;
270
271static int __init setup_vm_debug(char *str)
272{
273	bool __page_init_poisoning = true;
274
275	/*
276	 * Calling vm_debug with no arguments is equivalent to requesting
277	 * to enable all debugging options we can control.
278	 */
279	if (*str++ != '=' || !*str)
280		goto out;
281
282	__page_init_poisoning = false;
283	if (*str == '-')
284		goto out;
285
286	while (*str) {
287		switch (tolower(*str)) {
288		case'p':
289			__page_init_poisoning = true;
290			break;
291		default:
292			pr_err("vm_debug option '%c' unknown. skipped\n",
293			       *str);
294		}
295
296		str++;
297	}
298out:
299	if (page_init_poisoning && !__page_init_poisoning)
300		pr_warn("Page struct poisoning disabled by kernel command line option 'vm_debug'\n");
301
302	page_init_poisoning = __page_init_poisoning;
303
304	return 1;
305}
306__setup("vm_debug", setup_vm_debug);
307
308void page_init_poison(struct page *page, size_t size)
309{
310	if (page_init_poisoning)
311		memset(page, PAGE_POISON_PATTERN, size);
312}
313EXPORT_SYMBOL_GPL(page_init_poison);
 
 
 
 
 
 
 
 
314#endif		/* CONFIG_DEBUG_VM */
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * mm/debug.c
  4 *
  5 * mm/ specific debug routines.
  6 *
  7 */
  8
  9#include <linux/kernel.h>
 10#include <linux/mm.h>
 11#include <linux/trace_events.h>
 12#include <linux/memcontrol.h>
 13#include <trace/events/mmflags.h>
 14#include <linux/migrate.h>
 15#include <linux/page_owner.h>
 16#include <linux/ctype.h>
 17
 18#include "internal.h"
 19#include <trace/events/migrate.h>
 20
 21/*
 22 * Define EM() and EMe() so that MIGRATE_REASON from trace/events/migrate.h can
 23 * be used to populate migrate_reason_names[].
 24 */
 25#undef EM
 26#undef EMe
 27#define EM(a, b)	b,
 28#define EMe(a, b)	b
 29
 30const char *migrate_reason_names[MR_TYPES] = {
 31	MIGRATE_REASON
 
 
 
 
 
 
 
 32};
 33
 34const struct trace_print_flags pageflag_names[] = {
 35	__def_pageflag_names,
 36	{0, NULL}
 37};
 38
 39const struct trace_print_flags pagetype_names[] = {
 40	__def_pagetype_names,
 41	{0, NULL}
 42};
 43
 44const struct trace_print_flags gfpflag_names[] = {
 45	__def_gfpflag_names,
 46	{0, NULL}
 47};
 48
 49const struct trace_print_flags vmaflag_names[] = {
 50	__def_vmaflag_names,
 51	{0, NULL}
 52};
 53
 54static void __dump_page(struct page *page)
 55{
 56	struct folio *folio = page_folio(page);
 57	struct page *head = &folio->page;
 58	struct address_space *mapping;
 59	bool compound = PageCompound(page);
 60	/*
 61	 * Accessing the pageblock without the zone lock. It could change to
 62	 * "isolate" again in the meantime, but since we are just dumping the
 63	 * state for debugging, it should be fine to accept a bit of
 64	 * inaccuracy here due to racing.
 65	 */
 66	bool page_cma = is_migrate_cma_page(page);
 67	int mapcount;
 68	char *type = "";
 69
 70	if (page < head || (page >= head + MAX_ORDER_NR_PAGES)) {
 71		/*
 72		 * Corrupt page, so we cannot call page_mapping. Instead, do a
 73		 * safe subset of the steps that page_mapping() does. Caution:
 74		 * this will be misleading for tail pages, PageSwapCache pages,
 75		 * and potentially other situations. (See the page_mapping()
 76		 * implementation for what's missing here.)
 77		 */
 78		unsigned long tmp = (unsigned long)page->mapping;
 79
 80		if (tmp & PAGE_MAPPING_ANON)
 81			mapping = NULL;
 82		else
 83			mapping = (void *)(tmp & ~PAGE_MAPPING_FLAGS);
 84		head = page;
 85		folio = (struct folio *)page;
 86		compound = false;
 87	} else {
 88		mapping = page_mapping(page);
 89	}
 90
 91	/*
 92	 * Avoid VM_BUG_ON() in page_mapcount().
 93	 * page->_mapcount space in struct page is used by sl[aou]b pages to
 94	 * encode own info.
 95	 */
 96	mapcount = PageSlab(head) ? 0 : page_mapcount(page);
 97
 98	pr_warn("page:%p refcount:%d mapcount:%d mapping:%p index:%#lx pfn:%#lx\n",
 99			page, page_ref_count(head), mapcount, mapping,
100			page_to_pgoff(page), page_to_pfn(page));
101	if (compound) {
102		pr_warn("head:%p order:%u entire_mapcount:%d nr_pages_mapped:%d pincount:%d\n",
103				head, compound_order(head),
104				folio_entire_mapcount(folio),
105				folio_nr_pages_mapped(folio),
106				atomic_read(&folio->_pincount));
 
 
 
 
 
107	}
108
109#ifdef CONFIG_MEMCG
110	if (head->memcg_data)
111		pr_warn("memcg:%lx\n", head->memcg_data);
112#endif
113	if (PageKsm(page))
114		type = "ksm ";
115	else if (PageAnon(page))
116		type = "anon ";
117	else if (mapping)
118		dump_mapping(mapping);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
119	BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS + 1);
120
121	pr_warn("%sflags: %pGp%s\n", type, &head->flags,
122		page_cma ? " CMA" : "");
123	pr_warn("page_type: %pGt\n", &head->page_type);
124
125	print_hex_dump(KERN_WARNING, "raw: ", DUMP_PREFIX_NONE, 32,
126			sizeof(unsigned long), page,
127			sizeof(struct page), false);
128	if (head != page)
129		print_hex_dump(KERN_WARNING, "head: ", DUMP_PREFIX_NONE, 32,
130			sizeof(unsigned long), head,
131			sizeof(struct page), false);
132}
133
134void dump_page(struct page *page, const char *reason)
135{
136	if (PagePoisoned(page))
137		pr_warn("page:%p is uninitialized and poisoned", page);
138	else
139		__dump_page(page);
140	if (reason)
141		pr_warn("page dumped because: %s\n", reason);
142	dump_page_owner(page);
143}
144EXPORT_SYMBOL(dump_page);
145
146#ifdef CONFIG_DEBUG_VM
147
148void dump_vma(const struct vm_area_struct *vma)
149{
150	pr_emerg("vma %px start %px end %px mm %px\n"
 
151		"prot %lx anon_vma %px vm_ops %px\n"
152		"pgoff %lx file %px private_data %px\n"
153		"flags: %#lx(%pGv)\n",
154		vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_mm,
 
155		(unsigned long)pgprot_val(vma->vm_page_prot),
156		vma->anon_vma, vma->vm_ops, vma->vm_pgoff,
157		vma->vm_file, vma->vm_private_data,
158		vma->vm_flags, &vma->vm_flags);
159}
160EXPORT_SYMBOL(dump_vma);
161
162void dump_mm(const struct mm_struct *mm)
163{
164	pr_emerg("mm %px task_size %lu\n"
165#ifdef CONFIG_MMU
166		"get_unmapped_area %px\n"
167#endif
168		"mmap_base %lu mmap_legacy_base %lu\n"
169		"pgd %px mm_users %d mm_count %d pgtables_bytes %lu map_count %d\n"
170		"hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n"
171		"pinned_vm %llx data_vm %lx exec_vm %lx stack_vm %lx\n"
172		"start_code %lx end_code %lx start_data %lx end_data %lx\n"
173		"start_brk %lx brk %lx start_stack %lx\n"
174		"arg_start %lx arg_end %lx env_start %lx env_end %lx\n"
175		"binfmt %px flags %lx\n"
176#ifdef CONFIG_AIO
177		"ioctx_table %px\n"
178#endif
179#ifdef CONFIG_MEMCG
180		"owner %px "
181#endif
182		"exe_file %px\n"
183#ifdef CONFIG_MMU_NOTIFIER
184		"notifier_subscriptions %px\n"
185#endif
186#ifdef CONFIG_NUMA_BALANCING
187		"numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n"
188#endif
189		"tlb_flush_pending %d\n"
190		"def_flags: %#lx(%pGv)\n",
191
192		mm, mm->task_size,
193#ifdef CONFIG_MMU
194		mm->get_unmapped_area,
195#endif
196		mm->mmap_base, mm->mmap_legacy_base,
197		mm->pgd, atomic_read(&mm->mm_users),
198		atomic_read(&mm->mm_count),
199		mm_pgtables_bytes(mm),
200		mm->map_count,
201		mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm,
202		(u64)atomic64_read(&mm->pinned_vm),
203		mm->data_vm, mm->exec_vm, mm->stack_vm,
204		mm->start_code, mm->end_code, mm->start_data, mm->end_data,
205		mm->start_brk, mm->brk, mm->start_stack,
206		mm->arg_start, mm->arg_end, mm->env_start, mm->env_end,
207		mm->binfmt, mm->flags,
208#ifdef CONFIG_AIO
209		mm->ioctx_table,
210#endif
211#ifdef CONFIG_MEMCG
212		mm->owner,
213#endif
214		mm->exe_file,
215#ifdef CONFIG_MMU_NOTIFIER
216		mm->notifier_subscriptions,
217#endif
218#ifdef CONFIG_NUMA_BALANCING
219		mm->numa_next_scan, mm->numa_scan_offset, mm->numa_scan_seq,
220#endif
221		atomic_read(&mm->tlb_flush_pending),
222		mm->def_flags, &mm->def_flags
223	);
224}
225EXPORT_SYMBOL(dump_mm);
226
227static bool page_init_poisoning __read_mostly = true;
228
229static int __init setup_vm_debug(char *str)
230{
231	bool __page_init_poisoning = true;
232
233	/*
234	 * Calling vm_debug with no arguments is equivalent to requesting
235	 * to enable all debugging options we can control.
236	 */
237	if (*str++ != '=' || !*str)
238		goto out;
239
240	__page_init_poisoning = false;
241	if (*str == '-')
242		goto out;
243
244	while (*str) {
245		switch (tolower(*str)) {
246		case'p':
247			__page_init_poisoning = true;
248			break;
249		default:
250			pr_err("vm_debug option '%c' unknown. skipped\n",
251			       *str);
252		}
253
254		str++;
255	}
256out:
257	if (page_init_poisoning && !__page_init_poisoning)
258		pr_warn("Page struct poisoning disabled by kernel command line option 'vm_debug'\n");
259
260	page_init_poisoning = __page_init_poisoning;
261
262	return 1;
263}
264__setup("vm_debug", setup_vm_debug);
265
266void page_init_poison(struct page *page, size_t size)
267{
268	if (page_init_poisoning)
269		memset(page, PAGE_POISON_PATTERN, size);
270}
271
272void vma_iter_dump_tree(const struct vma_iterator *vmi)
273{
274#if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
275	mas_dump(&vmi->mas);
276	mt_dump(vmi->mas.tree, mt_dump_hex);
277#endif	/* CONFIG_DEBUG_VM_MAPLE_TREE */
278}
279
280#endif		/* CONFIG_DEBUG_VM */