Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * mm/debug.c
4 *
5 * mm/ specific debug routines.
6 *
7 */
8
9#include <linux/kernel.h>
10#include <linux/mm.h>
11#include <linux/trace_events.h>
12#include <linux/memcontrol.h>
13#include <trace/events/mmflags.h>
14#include <linux/migrate.h>
15#include <linux/page_owner.h>
16#include <linux/ctype.h>
17
18#include "internal.h"
19#include <trace/events/migrate.h>
20
21/*
22 * Define EM() and EMe() so that MIGRATE_REASON from trace/events/migrate.h can
23 * be used to populate migrate_reason_names[].
24 */
25#undef EM
26#undef EMe
27#define EM(a, b) b,
28#define EMe(a, b) b
29
30const char *migrate_reason_names[MR_TYPES] = {
31 MIGRATE_REASON
32};
33
34const struct trace_print_flags pageflag_names[] = {
35 __def_pageflag_names,
36 {0, NULL}
37};
38
39const struct trace_print_flags gfpflag_names[] = {
40 __def_gfpflag_names,
41 {0, NULL}
42};
43
44const struct trace_print_flags vmaflag_names[] = {
45 __def_vmaflag_names,
46 {0, NULL}
47};
48
49static void __dump_page(struct page *page)
50{
51 struct folio *folio = page_folio(page);
52 struct page *head = &folio->page;
53 struct address_space *mapping;
54 bool compound = PageCompound(page);
55 /*
56 * Accessing the pageblock without the zone lock. It could change to
57 * "isolate" again in the meantime, but since we are just dumping the
58 * state for debugging, it should be fine to accept a bit of
59 * inaccuracy here due to racing.
60 */
61 bool page_cma = is_migrate_cma_page(page);
62 int mapcount;
63 char *type = "";
64
65 if (page < head || (page >= head + MAX_ORDER_NR_PAGES)) {
66 /*
67 * Corrupt page, so we cannot call page_mapping. Instead, do a
68 * safe subset of the steps that page_mapping() does. Caution:
69 * this will be misleading for tail pages, PageSwapCache pages,
70 * and potentially other situations. (See the page_mapping()
71 * implementation for what's missing here.)
72 */
73 unsigned long tmp = (unsigned long)page->mapping;
74
75 if (tmp & PAGE_MAPPING_ANON)
76 mapping = NULL;
77 else
78 mapping = (void *)(tmp & ~PAGE_MAPPING_FLAGS);
79 head = page;
80 folio = (struct folio *)page;
81 compound = false;
82 } else {
83 mapping = page_mapping(page);
84 }
85
86 /*
87 * Avoid VM_BUG_ON() in page_mapcount().
88 * page->_mapcount space in struct page is used by sl[aou]b pages to
89 * encode own info.
90 */
91 mapcount = PageSlab(head) ? 0 : page_mapcount(page);
92
93 pr_warn("page:%p refcount:%d mapcount:%d mapping:%p index:%#lx pfn:%#lx\n",
94 page, page_ref_count(head), mapcount, mapping,
95 page_to_pgoff(page), page_to_pfn(page));
96 if (compound) {
97 pr_warn("head:%p order:%u compound_mapcount:%d subpages_mapcount:%d compound_pincount:%d\n",
98 head, compound_order(head),
99 head_compound_mapcount(head),
100 head_subpages_mapcount(head),
101 head_compound_pincount(head));
102 }
103
104#ifdef CONFIG_MEMCG
105 if (head->memcg_data)
106 pr_warn("memcg:%lx\n", head->memcg_data);
107#endif
108 if (PageKsm(page))
109 type = "ksm ";
110 else if (PageAnon(page))
111 type = "anon ";
112 else if (mapping)
113 dump_mapping(mapping);
114 BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS + 1);
115
116 pr_warn("%sflags: %pGp%s\n", type, &head->flags,
117 page_cma ? " CMA" : "");
118 print_hex_dump(KERN_WARNING, "raw: ", DUMP_PREFIX_NONE, 32,
119 sizeof(unsigned long), page,
120 sizeof(struct page), false);
121 if (head != page)
122 print_hex_dump(KERN_WARNING, "head: ", DUMP_PREFIX_NONE, 32,
123 sizeof(unsigned long), head,
124 sizeof(struct page), false);
125}
126
127void dump_page(struct page *page, const char *reason)
128{
129 if (PagePoisoned(page))
130 pr_warn("page:%p is uninitialized and poisoned", page);
131 else
132 __dump_page(page);
133 if (reason)
134 pr_warn("page dumped because: %s\n", reason);
135 dump_page_owner(page);
136}
137EXPORT_SYMBOL(dump_page);
138
139#ifdef CONFIG_DEBUG_VM
140
141void dump_vma(const struct vm_area_struct *vma)
142{
143 pr_emerg("vma %px start %px end %px mm %px\n"
144 "prot %lx anon_vma %px vm_ops %px\n"
145 "pgoff %lx file %px private_data %px\n"
146 "flags: %#lx(%pGv)\n",
147 vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_mm,
148 (unsigned long)pgprot_val(vma->vm_page_prot),
149 vma->anon_vma, vma->vm_ops, vma->vm_pgoff,
150 vma->vm_file, vma->vm_private_data,
151 vma->vm_flags, &vma->vm_flags);
152}
153EXPORT_SYMBOL(dump_vma);
154
155void dump_mm(const struct mm_struct *mm)
156{
157 pr_emerg("mm %px task_size %lu\n"
158#ifdef CONFIG_MMU
159 "get_unmapped_area %px\n"
160#endif
161 "mmap_base %lu mmap_legacy_base %lu\n"
162 "pgd %px mm_users %d mm_count %d pgtables_bytes %lu map_count %d\n"
163 "hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n"
164 "pinned_vm %llx data_vm %lx exec_vm %lx stack_vm %lx\n"
165 "start_code %lx end_code %lx start_data %lx end_data %lx\n"
166 "start_brk %lx brk %lx start_stack %lx\n"
167 "arg_start %lx arg_end %lx env_start %lx env_end %lx\n"
168 "binfmt %px flags %lx\n"
169#ifdef CONFIG_AIO
170 "ioctx_table %px\n"
171#endif
172#ifdef CONFIG_MEMCG
173 "owner %px "
174#endif
175 "exe_file %px\n"
176#ifdef CONFIG_MMU_NOTIFIER
177 "notifier_subscriptions %px\n"
178#endif
179#ifdef CONFIG_NUMA_BALANCING
180 "numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n"
181#endif
182 "tlb_flush_pending %d\n"
183 "def_flags: %#lx(%pGv)\n",
184
185 mm, mm->task_size,
186#ifdef CONFIG_MMU
187 mm->get_unmapped_area,
188#endif
189 mm->mmap_base, mm->mmap_legacy_base,
190 mm->pgd, atomic_read(&mm->mm_users),
191 atomic_read(&mm->mm_count),
192 mm_pgtables_bytes(mm),
193 mm->map_count,
194 mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm,
195 (u64)atomic64_read(&mm->pinned_vm),
196 mm->data_vm, mm->exec_vm, mm->stack_vm,
197 mm->start_code, mm->end_code, mm->start_data, mm->end_data,
198 mm->start_brk, mm->brk, mm->start_stack,
199 mm->arg_start, mm->arg_end, mm->env_start, mm->env_end,
200 mm->binfmt, mm->flags,
201#ifdef CONFIG_AIO
202 mm->ioctx_table,
203#endif
204#ifdef CONFIG_MEMCG
205 mm->owner,
206#endif
207 mm->exe_file,
208#ifdef CONFIG_MMU_NOTIFIER
209 mm->notifier_subscriptions,
210#endif
211#ifdef CONFIG_NUMA_BALANCING
212 mm->numa_next_scan, mm->numa_scan_offset, mm->numa_scan_seq,
213#endif
214 atomic_read(&mm->tlb_flush_pending),
215 mm->def_flags, &mm->def_flags
216 );
217}
218
219static bool page_init_poisoning __read_mostly = true;
220
221static int __init setup_vm_debug(char *str)
222{
223 bool __page_init_poisoning = true;
224
225 /*
226 * Calling vm_debug with no arguments is equivalent to requesting
227 * to enable all debugging options we can control.
228 */
229 if (*str++ != '=' || !*str)
230 goto out;
231
232 __page_init_poisoning = false;
233 if (*str == '-')
234 goto out;
235
236 while (*str) {
237 switch (tolower(*str)) {
238 case'p':
239 __page_init_poisoning = true;
240 break;
241 default:
242 pr_err("vm_debug option '%c' unknown. skipped\n",
243 *str);
244 }
245
246 str++;
247 }
248out:
249 if (page_init_poisoning && !__page_init_poisoning)
250 pr_warn("Page struct poisoning disabled by kernel command line option 'vm_debug'\n");
251
252 page_init_poisoning = __page_init_poisoning;
253
254 return 1;
255}
256__setup("vm_debug", setup_vm_debug);
257
258void page_init_poison(struct page *page, size_t size)
259{
260 if (page_init_poisoning)
261 memset(page, PAGE_POISON_PATTERN, size);
262}
263#endif /* CONFIG_DEBUG_VM */
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * mm/debug.c
4 *
5 * mm/ specific debug routines.
6 *
7 */
8
9#include <linux/kernel.h>
10#include <linux/mm.h>
11#include <linux/trace_events.h>
12#include <linux/memcontrol.h>
13#include <trace/events/mmflags.h>
14#include <linux/migrate.h>
15#include <linux/page_owner.h>
16#include <linux/ctype.h>
17
18#include "internal.h"
19#include <trace/events/migrate.h>
20
21/*
22 * Define EM() and EMe() so that MIGRATE_REASON from trace/events/migrate.h can
23 * be used to populate migrate_reason_names[].
24 */
25#undef EM
26#undef EMe
27#define EM(a, b) b,
28#define EMe(a, b) b
29
30const char *migrate_reason_names[MR_TYPES] = {
31 MIGRATE_REASON
32};
33
34const struct trace_print_flags pageflag_names[] = {
35 __def_pageflag_names,
36 {0, NULL}
37};
38
39const struct trace_print_flags pagetype_names[] = {
40 __def_pagetype_names,
41 {0, NULL}
42};
43
44const struct trace_print_flags gfpflag_names[] = {
45 __def_gfpflag_names,
46 {0, NULL}
47};
48
49const struct trace_print_flags vmaflag_names[] = {
50 __def_vmaflag_names,
51 {0, NULL}
52};
53
54static void __dump_folio(struct folio *folio, struct page *page,
55 unsigned long pfn, unsigned long idx)
56{
57 struct address_space *mapping = folio_mapping(folio);
58 int mapcount = 0;
59 char *type = "";
60
61 /*
62 * page->_mapcount space in struct page is used by slab pages to
63 * encode own info, and we must avoid calling page_folio() again.
64 */
65 if (!folio_test_slab(folio)) {
66 mapcount = atomic_read(&page->_mapcount) + 1;
67 if (folio_test_large(folio))
68 mapcount += folio_entire_mapcount(folio);
69 }
70
71 pr_warn("page: refcount:%d mapcount:%d mapping:%p index:%#lx pfn:%#lx\n",
72 folio_ref_count(folio), mapcount, mapping,
73 folio->index + idx, pfn);
74 if (folio_test_large(folio)) {
75 pr_warn("head: order:%u entire_mapcount:%d nr_pages_mapped:%d pincount:%d\n",
76 folio_order(folio),
77 folio_entire_mapcount(folio),
78 folio_nr_pages_mapped(folio),
79 atomic_read(&folio->_pincount));
80 }
81
82#ifdef CONFIG_MEMCG
83 if (folio->memcg_data)
84 pr_warn("memcg:%lx\n", folio->memcg_data);
85#endif
86 if (folio_test_ksm(folio))
87 type = "ksm ";
88 else if (folio_test_anon(folio))
89 type = "anon ";
90 else if (mapping)
91 dump_mapping(mapping);
92 BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS + 1);
93
94 /*
95 * Accessing the pageblock without the zone lock. It could change to
96 * "isolate" again in the meantime, but since we are just dumping the
97 * state for debugging, it should be fine to accept a bit of
98 * inaccuracy here due to racing.
99 */
100 pr_warn("%sflags: %pGp%s\n", type, &folio->flags,
101 is_migrate_cma_folio(folio, pfn) ? " CMA" : "");
102 pr_warn("page_type: %pGt\n", &folio->page.page_type);
103
104 print_hex_dump(KERN_WARNING, "raw: ", DUMP_PREFIX_NONE, 32,
105 sizeof(unsigned long), page,
106 sizeof(struct page), false);
107 if (folio_test_large(folio))
108 print_hex_dump(KERN_WARNING, "head: ", DUMP_PREFIX_NONE, 32,
109 sizeof(unsigned long), folio,
110 2 * sizeof(struct page), false);
111}
112
113static void __dump_page(const struct page *page)
114{
115 struct folio *foliop, folio;
116 struct page precise;
117 unsigned long pfn = page_to_pfn(page);
118 unsigned long idx, nr_pages = 1;
119 int loops = 5;
120
121again:
122 memcpy(&precise, page, sizeof(*page));
123 foliop = page_folio(&precise);
124 if (foliop == (struct folio *)&precise) {
125 idx = 0;
126 if (!folio_test_large(foliop))
127 goto dump;
128 foliop = (struct folio *)page;
129 } else {
130 idx = folio_page_idx(foliop, page);
131 }
132
133 if (idx < MAX_FOLIO_NR_PAGES) {
134 memcpy(&folio, foliop, 2 * sizeof(struct page));
135 nr_pages = folio_nr_pages(&folio);
136 foliop = &folio;
137 }
138
139 if (idx > nr_pages) {
140 if (loops-- > 0)
141 goto again;
142 pr_warn("page does not match folio\n");
143 precise.compound_head &= ~1UL;
144 foliop = (struct folio *)&precise;
145 idx = 0;
146 }
147
148dump:
149 __dump_folio(foliop, &precise, pfn, idx);
150}
151
152void dump_page(const struct page *page, const char *reason)
153{
154 if (PagePoisoned(page))
155 pr_warn("page:%p is uninitialized and poisoned", page);
156 else
157 __dump_page(page);
158 if (reason)
159 pr_warn("page dumped because: %s\n", reason);
160 dump_page_owner(page);
161}
162EXPORT_SYMBOL(dump_page);
163
164#ifdef CONFIG_DEBUG_VM
165
166void dump_vma(const struct vm_area_struct *vma)
167{
168 pr_emerg("vma %px start %px end %px mm %px\n"
169 "prot %lx anon_vma %px vm_ops %px\n"
170 "pgoff %lx file %px private_data %px\n"
171 "flags: %#lx(%pGv)\n",
172 vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_mm,
173 (unsigned long)pgprot_val(vma->vm_page_prot),
174 vma->anon_vma, vma->vm_ops, vma->vm_pgoff,
175 vma->vm_file, vma->vm_private_data,
176 vma->vm_flags, &vma->vm_flags);
177}
178EXPORT_SYMBOL(dump_vma);
179
180void dump_mm(const struct mm_struct *mm)
181{
182 pr_emerg("mm %px task_size %lu\n"
183#ifdef CONFIG_MMU
184 "get_unmapped_area %px\n"
185#endif
186 "mmap_base %lu mmap_legacy_base %lu\n"
187 "pgd %px mm_users %d mm_count %d pgtables_bytes %lu map_count %d\n"
188 "hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n"
189 "pinned_vm %llx data_vm %lx exec_vm %lx stack_vm %lx\n"
190 "start_code %lx end_code %lx start_data %lx end_data %lx\n"
191 "start_brk %lx brk %lx start_stack %lx\n"
192 "arg_start %lx arg_end %lx env_start %lx env_end %lx\n"
193 "binfmt %px flags %lx\n"
194#ifdef CONFIG_AIO
195 "ioctx_table %px\n"
196#endif
197#ifdef CONFIG_MEMCG
198 "owner %px "
199#endif
200 "exe_file %px\n"
201#ifdef CONFIG_MMU_NOTIFIER
202 "notifier_subscriptions %px\n"
203#endif
204#ifdef CONFIG_NUMA_BALANCING
205 "numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n"
206#endif
207 "tlb_flush_pending %d\n"
208 "def_flags: %#lx(%pGv)\n",
209
210 mm, mm->task_size,
211#ifdef CONFIG_MMU
212 mm->get_unmapped_area,
213#endif
214 mm->mmap_base, mm->mmap_legacy_base,
215 mm->pgd, atomic_read(&mm->mm_users),
216 atomic_read(&mm->mm_count),
217 mm_pgtables_bytes(mm),
218 mm->map_count,
219 mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm,
220 (u64)atomic64_read(&mm->pinned_vm),
221 mm->data_vm, mm->exec_vm, mm->stack_vm,
222 mm->start_code, mm->end_code, mm->start_data, mm->end_data,
223 mm->start_brk, mm->brk, mm->start_stack,
224 mm->arg_start, mm->arg_end, mm->env_start, mm->env_end,
225 mm->binfmt, mm->flags,
226#ifdef CONFIG_AIO
227 mm->ioctx_table,
228#endif
229#ifdef CONFIG_MEMCG
230 mm->owner,
231#endif
232 mm->exe_file,
233#ifdef CONFIG_MMU_NOTIFIER
234 mm->notifier_subscriptions,
235#endif
236#ifdef CONFIG_NUMA_BALANCING
237 mm->numa_next_scan, mm->numa_scan_offset, mm->numa_scan_seq,
238#endif
239 atomic_read(&mm->tlb_flush_pending),
240 mm->def_flags, &mm->def_flags
241 );
242}
243EXPORT_SYMBOL(dump_mm);
244
245static bool page_init_poisoning __read_mostly = true;
246
247static int __init setup_vm_debug(char *str)
248{
249 bool __page_init_poisoning = true;
250
251 /*
252 * Calling vm_debug with no arguments is equivalent to requesting
253 * to enable all debugging options we can control.
254 */
255 if (*str++ != '=' || !*str)
256 goto out;
257
258 __page_init_poisoning = false;
259 if (*str == '-')
260 goto out;
261
262 while (*str) {
263 switch (tolower(*str)) {
264 case'p':
265 __page_init_poisoning = true;
266 break;
267 default:
268 pr_err("vm_debug option '%c' unknown. skipped\n",
269 *str);
270 }
271
272 str++;
273 }
274out:
275 if (page_init_poisoning && !__page_init_poisoning)
276 pr_warn("Page struct poisoning disabled by kernel command line option 'vm_debug'\n");
277
278 page_init_poisoning = __page_init_poisoning;
279
280 return 1;
281}
282__setup("vm_debug", setup_vm_debug);
283
284void page_init_poison(struct page *page, size_t size)
285{
286 if (page_init_poisoning)
287 memset(page, PAGE_POISON_PATTERN, size);
288}
289
290void vma_iter_dump_tree(const struct vma_iterator *vmi)
291{
292#if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
293 mas_dump(&vmi->mas);
294 mt_dump(vmi->mas.tree, mt_dump_hex);
295#endif /* CONFIG_DEBUG_VM_MAPLE_TREE */
296}
297
298#endif /* CONFIG_DEBUG_VM */