Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * mm/debug.c
4 *
5 * mm/ specific debug routines.
6 *
7 */
8
9#include <linux/kernel.h>
10#include <linux/mm.h>
11#include <linux/trace_events.h>
12#include <linux/memcontrol.h>
13#include <trace/events/mmflags.h>
14#include <linux/migrate.h>
15#include <linux/page_owner.h>
16#include <linux/ctype.h>
17
18#include "internal.h"
19
20const char *migrate_reason_names[MR_TYPES] = {
21 "compaction",
22 "memory_failure",
23 "memory_hotplug",
24 "syscall_or_cpuset",
25 "mempolicy_mbind",
26 "numa_misplaced",
27 "cma",
28};
29
30const struct trace_print_flags pageflag_names[] = {
31 __def_pageflag_names,
32 {0, NULL}
33};
34
35const struct trace_print_flags gfpflag_names[] = {
36 __def_gfpflag_names,
37 {0, NULL}
38};
39
40const struct trace_print_flags vmaflag_names[] = {
41 __def_vmaflag_names,
42 {0, NULL}
43};
44
45void __dump_page(struct page *page, const char *reason)
46{
47 struct address_space *mapping;
48 bool page_poisoned = PagePoisoned(page);
49 int mapcount;
50
51 /*
52 * If struct page is poisoned don't access Page*() functions as that
53 * leads to recursive loop. Page*() check for poisoned pages, and calls
54 * dump_page() when detected.
55 */
56 if (page_poisoned) {
57 pr_warn("page:%px is uninitialized and poisoned", page);
58 goto hex_only;
59 }
60
61 mapping = page_mapping(page);
62
63 /*
64 * Avoid VM_BUG_ON() in page_mapcount().
65 * page->_mapcount space in struct page is used by sl[aou]b pages to
66 * encode own info.
67 */
68 mapcount = PageSlab(page) ? 0 : page_mapcount(page);
69
70 if (PageCompound(page))
71 pr_warn("page:%px refcount:%d mapcount:%d mapping:%px "
72 "index:%#lx compound_mapcount: %d\n",
73 page, page_ref_count(page), mapcount,
74 page->mapping, page_to_pgoff(page),
75 compound_mapcount(page));
76 else
77 pr_warn("page:%px refcount:%d mapcount:%d mapping:%px index:%#lx\n",
78 page, page_ref_count(page), mapcount,
79 page->mapping, page_to_pgoff(page));
80 if (PageKsm(page))
81 pr_warn("ksm flags: %#lx(%pGp)\n", page->flags, &page->flags);
82 else if (PageAnon(page))
83 pr_warn("anon flags: %#lx(%pGp)\n", page->flags, &page->flags);
84 else if (mapping) {
85 if (mapping->host && mapping->host->i_dentry.first) {
86 struct dentry *dentry;
87 dentry = container_of(mapping->host->i_dentry.first, struct dentry, d_u.d_alias);
88 pr_warn("%ps name:\"%pd\"\n", mapping->a_ops, dentry);
89 } else
90 pr_warn("%ps\n", mapping->a_ops);
91 pr_warn("flags: %#lx(%pGp)\n", page->flags, &page->flags);
92 }
93 BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS + 1);
94
95hex_only:
96 print_hex_dump(KERN_WARNING, "raw: ", DUMP_PREFIX_NONE, 32,
97 sizeof(unsigned long), page,
98 sizeof(struct page), false);
99
100 if (reason)
101 pr_warn("page dumped because: %s\n", reason);
102
103#ifdef CONFIG_MEMCG
104 if (!page_poisoned && page->mem_cgroup)
105 pr_warn("page->mem_cgroup:%px\n", page->mem_cgroup);
106#endif
107}
108
109void dump_page(struct page *page, const char *reason)
110{
111 __dump_page(page, reason);
112 dump_page_owner(page);
113}
114EXPORT_SYMBOL(dump_page);
115
116#ifdef CONFIG_DEBUG_VM
117
118void dump_vma(const struct vm_area_struct *vma)
119{
120 pr_emerg("vma %px start %px end %px\n"
121 "next %px prev %px mm %px\n"
122 "prot %lx anon_vma %px vm_ops %px\n"
123 "pgoff %lx file %px private_data %px\n"
124 "flags: %#lx(%pGv)\n",
125 vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_next,
126 vma->vm_prev, vma->vm_mm,
127 (unsigned long)pgprot_val(vma->vm_page_prot),
128 vma->anon_vma, vma->vm_ops, vma->vm_pgoff,
129 vma->vm_file, vma->vm_private_data,
130 vma->vm_flags, &vma->vm_flags);
131}
132EXPORT_SYMBOL(dump_vma);
133
134void dump_mm(const struct mm_struct *mm)
135{
136 pr_emerg("mm %px mmap %px seqnum %llu task_size %lu\n"
137#ifdef CONFIG_MMU
138 "get_unmapped_area %px\n"
139#endif
140 "mmap_base %lu mmap_legacy_base %lu highest_vm_end %lu\n"
141 "pgd %px mm_users %d mm_count %d pgtables_bytes %lu map_count %d\n"
142 "hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n"
143 "pinned_vm %llx data_vm %lx exec_vm %lx stack_vm %lx\n"
144 "start_code %lx end_code %lx start_data %lx end_data %lx\n"
145 "start_brk %lx brk %lx start_stack %lx\n"
146 "arg_start %lx arg_end %lx env_start %lx env_end %lx\n"
147 "binfmt %px flags %lx core_state %px\n"
148#ifdef CONFIG_AIO
149 "ioctx_table %px\n"
150#endif
151#ifdef CONFIG_MEMCG
152 "owner %px "
153#endif
154 "exe_file %px\n"
155#ifdef CONFIG_MMU_NOTIFIER
156 "mmu_notifier_mm %px\n"
157#endif
158#ifdef CONFIG_NUMA_BALANCING
159 "numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n"
160#endif
161 "tlb_flush_pending %d\n"
162 "def_flags: %#lx(%pGv)\n",
163
164 mm, mm->mmap, (long long) mm->vmacache_seqnum, mm->task_size,
165#ifdef CONFIG_MMU
166 mm->get_unmapped_area,
167#endif
168 mm->mmap_base, mm->mmap_legacy_base, mm->highest_vm_end,
169 mm->pgd, atomic_read(&mm->mm_users),
170 atomic_read(&mm->mm_count),
171 mm_pgtables_bytes(mm),
172 mm->map_count,
173 mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm,
174 (u64)atomic64_read(&mm->pinned_vm),
175 mm->data_vm, mm->exec_vm, mm->stack_vm,
176 mm->start_code, mm->end_code, mm->start_data, mm->end_data,
177 mm->start_brk, mm->brk, mm->start_stack,
178 mm->arg_start, mm->arg_end, mm->env_start, mm->env_end,
179 mm->binfmt, mm->flags, mm->core_state,
180#ifdef CONFIG_AIO
181 mm->ioctx_table,
182#endif
183#ifdef CONFIG_MEMCG
184 mm->owner,
185#endif
186 mm->exe_file,
187#ifdef CONFIG_MMU_NOTIFIER
188 mm->mmu_notifier_mm,
189#endif
190#ifdef CONFIG_NUMA_BALANCING
191 mm->numa_next_scan, mm->numa_scan_offset, mm->numa_scan_seq,
192#endif
193 atomic_read(&mm->tlb_flush_pending),
194 mm->def_flags, &mm->def_flags
195 );
196}
197
198static bool page_init_poisoning __read_mostly = true;
199
200static int __init setup_vm_debug(char *str)
201{
202 bool __page_init_poisoning = true;
203
204 /*
205 * Calling vm_debug with no arguments is equivalent to requesting
206 * to enable all debugging options we can control.
207 */
208 if (*str++ != '=' || !*str)
209 goto out;
210
211 __page_init_poisoning = false;
212 if (*str == '-')
213 goto out;
214
215 while (*str) {
216 switch (tolower(*str)) {
217 case'p':
218 __page_init_poisoning = true;
219 break;
220 default:
221 pr_err("vm_debug option '%c' unknown. skipped\n",
222 *str);
223 }
224
225 str++;
226 }
227out:
228 if (page_init_poisoning && !__page_init_poisoning)
229 pr_warn("Page struct poisoning disabled by kernel command line option 'vm_debug'\n");
230
231 page_init_poisoning = __page_init_poisoning;
232
233 return 1;
234}
235__setup("vm_debug", setup_vm_debug);
236
237void page_init_poison(struct page *page, size_t size)
238{
239 if (page_init_poisoning)
240 memset(page, PAGE_POISON_PATTERN, size);
241}
242EXPORT_SYMBOL_GPL(page_init_poison);
243#endif /* CONFIG_DEBUG_VM */
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * mm/debug.c
4 *
5 * mm/ specific debug routines.
6 *
7 */
8
9#include <linux/kernel.h>
10#include <linux/mm.h>
11#include <linux/trace_events.h>
12#include <linux/memcontrol.h>
13#include <trace/events/mmflags.h>
14#include <linux/migrate.h>
15#include <linux/page_owner.h>
16#include <linux/ctype.h>
17
18#include "internal.h"
19#include <trace/events/migrate.h>
20
21/*
22 * Define EM() and EMe() so that MIGRATE_REASON from trace/events/migrate.h can
23 * be used to populate migrate_reason_names[].
24 */
25#undef EM
26#undef EMe
27#define EM(a, b) b,
28#define EMe(a, b) b
29
30const char *migrate_reason_names[MR_TYPES] = {
31 MIGRATE_REASON
32};
33
34const struct trace_print_flags pageflag_names[] = {
35 __def_pageflag_names,
36 {0, NULL}
37};
38
39const struct trace_print_flags pagetype_names[] = {
40 __def_pagetype_names,
41 {0, NULL}
42};
43
44const struct trace_print_flags gfpflag_names[] = {
45 __def_gfpflag_names,
46 {0, NULL}
47};
48
49const struct trace_print_flags vmaflag_names[] = {
50 __def_vmaflag_names,
51 {0, NULL}
52};
53
54static void __dump_page(struct page *page)
55{
56 struct folio *folio = page_folio(page);
57 struct page *head = &folio->page;
58 struct address_space *mapping;
59 bool compound = PageCompound(page);
60 /*
61 * Accessing the pageblock without the zone lock. It could change to
62 * "isolate" again in the meantime, but since we are just dumping the
63 * state for debugging, it should be fine to accept a bit of
64 * inaccuracy here due to racing.
65 */
66 bool page_cma = is_migrate_cma_page(page);
67 int mapcount;
68 char *type = "";
69
70 if (page < head || (page >= head + MAX_ORDER_NR_PAGES)) {
71 /*
72 * Corrupt page, so we cannot call page_mapping. Instead, do a
73 * safe subset of the steps that page_mapping() does. Caution:
74 * this will be misleading for tail pages, PageSwapCache pages,
75 * and potentially other situations. (See the page_mapping()
76 * implementation for what's missing here.)
77 */
78 unsigned long tmp = (unsigned long)page->mapping;
79
80 if (tmp & PAGE_MAPPING_ANON)
81 mapping = NULL;
82 else
83 mapping = (void *)(tmp & ~PAGE_MAPPING_FLAGS);
84 head = page;
85 folio = (struct folio *)page;
86 compound = false;
87 } else {
88 mapping = page_mapping(page);
89 }
90
91 /*
92 * Avoid VM_BUG_ON() in page_mapcount().
93 * page->_mapcount space in struct page is used by sl[aou]b pages to
94 * encode own info.
95 */
96 mapcount = PageSlab(head) ? 0 : page_mapcount(page);
97
98 pr_warn("page:%p refcount:%d mapcount:%d mapping:%p index:%#lx pfn:%#lx\n",
99 page, page_ref_count(head), mapcount, mapping,
100 page_to_pgoff(page), page_to_pfn(page));
101 if (compound) {
102 pr_warn("head:%p order:%u entire_mapcount:%d nr_pages_mapped:%d pincount:%d\n",
103 head, compound_order(head),
104 folio_entire_mapcount(folio),
105 folio_nr_pages_mapped(folio),
106 atomic_read(&folio->_pincount));
107 }
108
109#ifdef CONFIG_MEMCG
110 if (head->memcg_data)
111 pr_warn("memcg:%lx\n", head->memcg_data);
112#endif
113 if (PageKsm(page))
114 type = "ksm ";
115 else if (PageAnon(page))
116 type = "anon ";
117 else if (mapping)
118 dump_mapping(mapping);
119 BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS + 1);
120
121 pr_warn("%sflags: %pGp%s\n", type, &head->flags,
122 page_cma ? " CMA" : "");
123 pr_warn("page_type: %pGt\n", &head->page_type);
124
125 print_hex_dump(KERN_WARNING, "raw: ", DUMP_PREFIX_NONE, 32,
126 sizeof(unsigned long), page,
127 sizeof(struct page), false);
128 if (head != page)
129 print_hex_dump(KERN_WARNING, "head: ", DUMP_PREFIX_NONE, 32,
130 sizeof(unsigned long), head,
131 sizeof(struct page), false);
132}
133
134void dump_page(struct page *page, const char *reason)
135{
136 if (PagePoisoned(page))
137 pr_warn("page:%p is uninitialized and poisoned", page);
138 else
139 __dump_page(page);
140 if (reason)
141 pr_warn("page dumped because: %s\n", reason);
142 dump_page_owner(page);
143}
144EXPORT_SYMBOL(dump_page);
145
146#ifdef CONFIG_DEBUG_VM
147
148void dump_vma(const struct vm_area_struct *vma)
149{
150 pr_emerg("vma %px start %px end %px mm %px\n"
151 "prot %lx anon_vma %px vm_ops %px\n"
152 "pgoff %lx file %px private_data %px\n"
153 "flags: %#lx(%pGv)\n",
154 vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_mm,
155 (unsigned long)pgprot_val(vma->vm_page_prot),
156 vma->anon_vma, vma->vm_ops, vma->vm_pgoff,
157 vma->vm_file, vma->vm_private_data,
158 vma->vm_flags, &vma->vm_flags);
159}
160EXPORT_SYMBOL(dump_vma);
161
162void dump_mm(const struct mm_struct *mm)
163{
164 pr_emerg("mm %px task_size %lu\n"
165#ifdef CONFIG_MMU
166 "get_unmapped_area %px\n"
167#endif
168 "mmap_base %lu mmap_legacy_base %lu\n"
169 "pgd %px mm_users %d mm_count %d pgtables_bytes %lu map_count %d\n"
170 "hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n"
171 "pinned_vm %llx data_vm %lx exec_vm %lx stack_vm %lx\n"
172 "start_code %lx end_code %lx start_data %lx end_data %lx\n"
173 "start_brk %lx brk %lx start_stack %lx\n"
174 "arg_start %lx arg_end %lx env_start %lx env_end %lx\n"
175 "binfmt %px flags %lx\n"
176#ifdef CONFIG_AIO
177 "ioctx_table %px\n"
178#endif
179#ifdef CONFIG_MEMCG
180 "owner %px "
181#endif
182 "exe_file %px\n"
183#ifdef CONFIG_MMU_NOTIFIER
184 "notifier_subscriptions %px\n"
185#endif
186#ifdef CONFIG_NUMA_BALANCING
187 "numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n"
188#endif
189 "tlb_flush_pending %d\n"
190 "def_flags: %#lx(%pGv)\n",
191
192 mm, mm->task_size,
193#ifdef CONFIG_MMU
194 mm->get_unmapped_area,
195#endif
196 mm->mmap_base, mm->mmap_legacy_base,
197 mm->pgd, atomic_read(&mm->mm_users),
198 atomic_read(&mm->mm_count),
199 mm_pgtables_bytes(mm),
200 mm->map_count,
201 mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm,
202 (u64)atomic64_read(&mm->pinned_vm),
203 mm->data_vm, mm->exec_vm, mm->stack_vm,
204 mm->start_code, mm->end_code, mm->start_data, mm->end_data,
205 mm->start_brk, mm->brk, mm->start_stack,
206 mm->arg_start, mm->arg_end, mm->env_start, mm->env_end,
207 mm->binfmt, mm->flags,
208#ifdef CONFIG_AIO
209 mm->ioctx_table,
210#endif
211#ifdef CONFIG_MEMCG
212 mm->owner,
213#endif
214 mm->exe_file,
215#ifdef CONFIG_MMU_NOTIFIER
216 mm->notifier_subscriptions,
217#endif
218#ifdef CONFIG_NUMA_BALANCING
219 mm->numa_next_scan, mm->numa_scan_offset, mm->numa_scan_seq,
220#endif
221 atomic_read(&mm->tlb_flush_pending),
222 mm->def_flags, &mm->def_flags
223 );
224}
225EXPORT_SYMBOL(dump_mm);
226
227static bool page_init_poisoning __read_mostly = true;
228
229static int __init setup_vm_debug(char *str)
230{
231 bool __page_init_poisoning = true;
232
233 /*
234 * Calling vm_debug with no arguments is equivalent to requesting
235 * to enable all debugging options we can control.
236 */
237 if (*str++ != '=' || !*str)
238 goto out;
239
240 __page_init_poisoning = false;
241 if (*str == '-')
242 goto out;
243
244 while (*str) {
245 switch (tolower(*str)) {
246 case'p':
247 __page_init_poisoning = true;
248 break;
249 default:
250 pr_err("vm_debug option '%c' unknown. skipped\n",
251 *str);
252 }
253
254 str++;
255 }
256out:
257 if (page_init_poisoning && !__page_init_poisoning)
258 pr_warn("Page struct poisoning disabled by kernel command line option 'vm_debug'\n");
259
260 page_init_poisoning = __page_init_poisoning;
261
262 return 1;
263}
264__setup("vm_debug", setup_vm_debug);
265
266void page_init_poison(struct page *page, size_t size)
267{
268 if (page_init_poisoning)
269 memset(page, PAGE_POISON_PATTERN, size);
270}
271
272void vma_iter_dump_tree(const struct vma_iterator *vmi)
273{
274#if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
275 mas_dump(&vmi->mas);
276 mt_dump(vmi->mas.tree, mt_dump_hex);
277#endif /* CONFIG_DEBUG_VM_MAPLE_TREE */
278}
279
280#endif /* CONFIG_DEBUG_VM */