Linux Audio

Check our new training course

Loading...
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2#include <linux/debugfs.h>
  3#include <linux/mm.h>
  4#include <linux/slab.h>
  5#include <linux/uaccess.h>
  6#include <linux/bootmem.h>
  7#include <linux/stacktrace.h>
  8#include <linux/page_owner.h>
  9#include <linux/jump_label.h>
 10#include <linux/migrate.h>
 11#include <linux/stackdepot.h>
 12#include <linux/seq_file.h>
 
 
 13
 14#include "internal.h"
 15
 16/*
 17 * TODO: teach PAGE_OWNER_STACK_DEPTH (__dump_page_owner and save_stack)
 18 * to use off stack temporal storage
 19 */
 20#define PAGE_OWNER_STACK_DEPTH (16)
 21
 22struct page_owner {
 23	unsigned short order;
 24	short last_migrate_reason;
 25	gfp_t gfp_mask;
 26	depot_stack_handle_t handle;
 
 
 
 
 
 
 27};
 28
 29static bool page_owner_disabled = true;
 30DEFINE_STATIC_KEY_FALSE(page_owner_inited);
 31
 32static depot_stack_handle_t dummy_handle;
 33static depot_stack_handle_t failure_handle;
 34static depot_stack_handle_t early_handle;
 35
 36static void init_early_allocated_pages(void);
 37
 38static int __init early_page_owner_param(char *buf)
 39{
 40	if (!buf)
 41		return -EINVAL;
 42
 43	if (strcmp(buf, "on") == 0)
 44		page_owner_disabled = false;
 45
 46	return 0;
 47}
 48early_param("page_owner", early_page_owner_param);
 49
 50static bool need_page_owner(void)
 51{
 52	if (page_owner_disabled)
 53		return false;
 54
 55	return true;
 56}
 57
 58static __always_inline depot_stack_handle_t create_dummy_stack(void)
 59{
 60	unsigned long entries[4];
 61	struct stack_trace dummy;
 62
 63	dummy.nr_entries = 0;
 64	dummy.max_entries = ARRAY_SIZE(entries);
 65	dummy.entries = &entries[0];
 66	dummy.skip = 0;
 67
 68	save_stack_trace(&dummy);
 69	return depot_save_stack(&dummy, GFP_KERNEL);
 70}
 71
 72static noinline void register_dummy_stack(void)
 73{
 74	dummy_handle = create_dummy_stack();
 75}
 76
 77static noinline void register_failure_stack(void)
 78{
 79	failure_handle = create_dummy_stack();
 80}
 81
 82static noinline void register_early_stack(void)
 83{
 84	early_handle = create_dummy_stack();
 85}
 86
 87static void init_page_owner(void)
 88{
 89	if (page_owner_disabled)
 90		return;
 91
 92	register_dummy_stack();
 93	register_failure_stack();
 94	register_early_stack();
 95	static_branch_enable(&page_owner_inited);
 96	init_early_allocated_pages();
 97}
 98
 99struct page_ext_operations page_owner_ops = {
100	.size = sizeof(struct page_owner),
101	.need = need_page_owner,
102	.init = init_page_owner,
103};
104
105static inline struct page_owner *get_page_owner(struct page_ext *page_ext)
106{
107	return (void *)page_ext + page_owner_ops.offset;
108}
109
110void __reset_page_owner(struct page *page, unsigned int order)
111{
112	int i;
113	struct page_ext *page_ext;
114
115	for (i = 0; i < (1 << order); i++) {
116		page_ext = lookup_page_ext(page + i);
117		if (unlikely(!page_ext))
118			continue;
119		__clear_bit(PAGE_EXT_OWNER, &page_ext->flags);
120	}
121}
122
123static inline bool check_recursive_alloc(struct stack_trace *trace,
124					unsigned long ip)
125{
126	int i;
127
128	if (!trace->nr_entries)
129		return false;
130
131	for (i = 0; i < trace->nr_entries; i++) {
132		if (trace->entries[i] == ip)
133			return true;
134	}
135
136	return false;
137}
138
139static noinline depot_stack_handle_t save_stack(gfp_t flags)
140{
141	unsigned long entries[PAGE_OWNER_STACK_DEPTH];
142	struct stack_trace trace = {
143		.nr_entries = 0,
144		.entries = entries,
145		.max_entries = PAGE_OWNER_STACK_DEPTH,
146		.skip = 2
147	};
148	depot_stack_handle_t handle;
149
150	save_stack_trace(&trace);
151	if (trace.nr_entries != 0 &&
152	    trace.entries[trace.nr_entries-1] == ULONG_MAX)
153		trace.nr_entries--;
154
155	/*
156	 * We need to check recursion here because our request to stackdepot
157	 * could trigger memory allocation to save new entry. New memory
158	 * allocation would reach here and call depot_save_stack() again
159	 * if we don't catch it. There is still not enough memory in stackdepot
160	 * so it would try to allocate memory again and loop forever.
 
161	 */
162	if (check_recursive_alloc(&trace, _RET_IP_))
163		return dummy_handle;
 
164
165	handle = depot_save_stack(&trace, flags);
 
166	if (!handle)
167		handle = failure_handle;
168
 
169	return handle;
170}
171
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
172static inline void __set_page_owner_handle(struct page_ext *page_ext,
173	depot_stack_handle_t handle, unsigned int order, gfp_t gfp_mask)
 
174{
175	struct page_owner *page_owner;
 
176
177	page_owner = get_page_owner(page_ext);
178	page_owner->handle = handle;
179	page_owner->order = order;
180	page_owner->gfp_mask = gfp_mask;
181	page_owner->last_migrate_reason = -1;
 
 
 
 
 
 
 
 
182
183	__set_bit(PAGE_EXT_OWNER, &page_ext->flags);
 
184}
185
186noinline void __set_page_owner(struct page *page, unsigned int order,
187					gfp_t gfp_mask)
188{
189	struct page_ext *page_ext = lookup_page_ext(page);
190	depot_stack_handle_t handle;
191
 
 
 
192	if (unlikely(!page_ext))
193		return;
194
195	handle = save_stack(gfp_mask);
196	__set_page_owner_handle(page_ext, handle, order, gfp_mask);
 
197}
198
199void __set_page_owner_migrate_reason(struct page *page, int reason)
200{
201	struct page_ext *page_ext = lookup_page_ext(page);
202	struct page_owner *page_owner;
203
204	if (unlikely(!page_ext))
205		return;
206
207	page_owner = get_page_owner(page_ext);
208	page_owner->last_migrate_reason = reason;
 
209}
210
211void __split_page_owner(struct page *page, unsigned int order)
212{
213	int i;
214	struct page_ext *page_ext = lookup_page_ext(page);
215	struct page_owner *page_owner;
216
217	if (unlikely(!page_ext))
218		return;
219
220	page_owner = get_page_owner(page_ext);
221	page_owner->order = 0;
222	for (i = 1; i < (1 << order); i++)
223		__copy_page_owner(page, page + i);
 
 
224}
225
226void __copy_page_owner(struct page *oldpage, struct page *newpage)
227{
228	struct page_ext *old_ext = lookup_page_ext(oldpage);
229	struct page_ext *new_ext = lookup_page_ext(newpage);
230	struct page_owner *old_page_owner, *new_page_owner;
231
232	if (unlikely(!old_ext || !new_ext))
 
 
 
 
 
 
233		return;
 
234
235	old_page_owner = get_page_owner(old_ext);
236	new_page_owner = get_page_owner(new_ext);
237	new_page_owner->order = old_page_owner->order;
238	new_page_owner->gfp_mask = old_page_owner->gfp_mask;
239	new_page_owner->last_migrate_reason =
240		old_page_owner->last_migrate_reason;
241	new_page_owner->handle = old_page_owner->handle;
 
 
 
 
 
242
243	/*
244	 * We don't clear the bit on the oldpage as it's going to be freed
245	 * after migration. Until then, the info can be useful in case of
246	 * a bug, and the overal stats will be off a bit only temporarily.
247	 * Also, migrate_misplaced_transhuge_page() can still fail the
248	 * migration and then we want the oldpage to retain the info. But
249	 * in that case we also don't need to explicitly clear the info from
250	 * the new page, which will be freed.
251	 */
252	__set_bit(PAGE_EXT_OWNER, &new_ext->flags);
 
 
 
253}
254
255void pagetypeinfo_showmixedcount_print(struct seq_file *m,
256				       pg_data_t *pgdat, struct zone *zone)
257{
258	struct page *page;
259	struct page_ext *page_ext;
260	struct page_owner *page_owner;
261	unsigned long pfn = zone->zone_start_pfn, block_end_pfn;
262	unsigned long end_pfn = pfn + zone->spanned_pages;
263	unsigned long count[MIGRATE_TYPES] = { 0, };
264	int pageblock_mt, page_mt;
265	int i;
266
267	/* Scan block by block. First and last block may be incomplete */
268	pfn = zone->zone_start_pfn;
269
270	/*
271	 * Walk the zone in pageblock_nr_pages steps. If a page block spans
272	 * a zone boundary, it will be double counted between zones. This does
273	 * not matter as the mixed block count will still be correct
274	 */
275	for (; pfn < end_pfn; ) {
276		if (!pfn_valid(pfn)) {
 
277			pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
278			continue;
279		}
280
281		block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
282		block_end_pfn = min(block_end_pfn, end_pfn);
283
284		page = pfn_to_page(pfn);
285		pageblock_mt = get_pageblock_migratetype(page);
286
287		for (; pfn < block_end_pfn; pfn++) {
288			if (!pfn_valid_within(pfn))
289				continue;
290
291			page = pfn_to_page(pfn);
292
293			if (page_zone(page) != zone)
294				continue;
295
296			if (PageBuddy(page)) {
297				unsigned long freepage_order;
298
299				freepage_order = page_order_unsafe(page);
300				if (freepage_order < MAX_ORDER)
301					pfn += (1UL << freepage_order) - 1;
302				continue;
303			}
304
305			if (PageReserved(page))
306				continue;
307
308			page_ext = lookup_page_ext(page);
309			if (unlikely(!page_ext))
310				continue;
311
312			if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
313				continue;
314
315			page_owner = get_page_owner(page_ext);
316			page_mt = gfpflags_to_migratetype(
317					page_owner->gfp_mask);
318			if (pageblock_mt != page_mt) {
319				if (is_migrate_cma(pageblock_mt))
320					count[MIGRATE_MOVABLE]++;
321				else
322					count[pageblock_mt]++;
323
324				pfn = block_end_pfn;
 
325				break;
326			}
327			pfn += (1UL << page_owner->order) - 1;
 
 
328		}
329	}
330
331	/* Print counts */
332	seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
333	for (i = 0; i < MIGRATE_TYPES; i++)
334		seq_printf(m, "%12lu ", count[i]);
335	seq_putc(m, '\n');
336}
337
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
338static ssize_t
339print_page_owner(char __user *buf, size_t count, unsigned long pfn,
340		struct page *page, struct page_owner *page_owner,
341		depot_stack_handle_t handle)
342{
343	int ret;
344	int pageblock_mt, page_mt;
345	char *kbuf;
346	unsigned long entries[PAGE_OWNER_STACK_DEPTH];
347	struct stack_trace trace = {
348		.nr_entries = 0,
349		.entries = entries,
350		.max_entries = PAGE_OWNER_STACK_DEPTH,
351		.skip = 0
352	};
353
 
354	kbuf = kmalloc(count, GFP_KERNEL);
355	if (!kbuf)
356		return -ENOMEM;
357
358	ret = snprintf(kbuf, count,
359			"Page allocated via order %u, mask %#x(%pGg)\n",
360			page_owner->order, page_owner->gfp_mask,
361			&page_owner->gfp_mask);
362
363	if (ret >= count)
364		goto err;
365
366	/* Print information relevant to grouping pages by mobility */
367	pageblock_mt = get_pageblock_migratetype(page);
368	page_mt  = gfpflags_to_migratetype(page_owner->gfp_mask);
369	ret += snprintf(kbuf + ret, count - ret,
370			"PFN %lu type %s Block %lu type %s Flags %#lx(%pGp)\n",
371			pfn,
372			migratetype_names[page_mt],
373			pfn >> pageblock_order,
374			migratetype_names[pageblock_mt],
375			page->flags, &page->flags);
376
377	if (ret >= count)
378		goto err;
379
380	depot_fetch_stack(handle, &trace);
381	ret += snprint_stack_trace(kbuf + ret, count - ret, &trace, 0);
382	if (ret >= count)
383		goto err;
384
385	if (page_owner->last_migrate_reason != -1) {
386		ret += snprintf(kbuf + ret, count - ret,
387			"Page has been migrated, last migrate reason: %s\n",
388			migrate_reason_names[page_owner->last_migrate_reason]);
389		if (ret >= count)
390			goto err;
391	}
392
 
 
393	ret += snprintf(kbuf + ret, count - ret, "\n");
394	if (ret >= count)
395		goto err;
396
397	if (copy_to_user(buf, kbuf, ret))
398		ret = -EFAULT;
399
400	kfree(kbuf);
401	return ret;
402
403err:
404	kfree(kbuf);
405	return -ENOMEM;
406}
407
408void __dump_page_owner(struct page *page)
409{
410	struct page_ext *page_ext = lookup_page_ext(page);
411	struct page_owner *page_owner;
412	unsigned long entries[PAGE_OWNER_STACK_DEPTH];
413	struct stack_trace trace = {
414		.nr_entries = 0,
415		.entries = entries,
416		.max_entries = PAGE_OWNER_STACK_DEPTH,
417		.skip = 0
418	};
419	depot_stack_handle_t handle;
420	gfp_t gfp_mask;
421	int mt;
422
423	if (unlikely(!page_ext)) {
424		pr_alert("There is not page extension available.\n");
425		return;
426	}
427
428	page_owner = get_page_owner(page_ext);
429	gfp_mask = page_owner->gfp_mask;
430	mt = gfpflags_to_migratetype(gfp_mask);
431
432	if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) {
433		pr_alert("page_owner info is not active (free page?)\n");
 
434		return;
435	}
436
 
 
 
 
 
 
 
 
 
 
437	handle = READ_ONCE(page_owner->handle);
 
 
 
 
 
 
438	if (!handle) {
439		pr_alert("page_owner info is not active (free page?)\n");
440		return;
 
 
441	}
442
443	depot_fetch_stack(handle, &trace);
444	pr_alert("page allocated via order %u, migratetype %s, gfp_mask %#x(%pGg)\n",
445		 page_owner->order, migratetype_names[mt], gfp_mask, &gfp_mask);
446	print_stack_trace(&trace, 0);
447
448	if (page_owner->last_migrate_reason != -1)
449		pr_alert("page has been migrated, last migrate reason: %s\n",
450			migrate_reason_names[page_owner->last_migrate_reason]);
 
451}
452
453static ssize_t
454read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
455{
456	unsigned long pfn;
457	struct page *page;
458	struct page_ext *page_ext;
459	struct page_owner *page_owner;
460	depot_stack_handle_t handle;
461
462	if (!static_branch_unlikely(&page_owner_inited))
463		return -EINVAL;
464
465	page = NULL;
466	pfn = min_low_pfn + *ppos;
467
 
 
468	/* Find a valid PFN or the start of a MAX_ORDER_NR_PAGES area */
469	while (!pfn_valid(pfn) && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 0)
470		pfn++;
471
472	drain_all_pages(NULL);
473
474	/* Find an allocated page */
475	for (; pfn < max_pfn; pfn++) {
476		/*
 
 
 
 
 
 
 
 
477		 * If the new page is in a new MAX_ORDER_NR_PAGES area,
478		 * validate the area as existing, skip it if not
479		 */
480		if ((pfn & (MAX_ORDER_NR_PAGES - 1)) == 0 && !pfn_valid(pfn)) {
481			pfn += MAX_ORDER_NR_PAGES - 1;
482			continue;
483		}
484
485		/* Check for holes within a MAX_ORDER area */
486		if (!pfn_valid_within(pfn))
487			continue;
488
489		page = pfn_to_page(pfn);
490		if (PageBuddy(page)) {
491			unsigned long freepage_order = page_order_unsafe(page);
492
493			if (freepage_order < MAX_ORDER)
494				pfn += (1UL << freepage_order) - 1;
495			continue;
496		}
497
498		page_ext = lookup_page_ext(page);
499		if (unlikely(!page_ext))
500			continue;
501
502		/*
503		 * Some pages could be missed by concurrent allocation or free,
504		 * because we don't hold the zone lock.
505		 */
506		if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
507			continue;
 
 
 
 
 
 
 
508
509		page_owner = get_page_owner(page_ext);
510
511		/*
 
 
 
 
 
 
 
512		 * Access to page_ext->handle isn't synchronous so we should
513		 * be careful to access it.
514		 */
515		handle = READ_ONCE(page_owner->handle);
516		if (!handle)
517			continue;
518
519		/* Record the next PFN to read in the file offset */
520		*ppos = (pfn - min_low_pfn) + 1;
521
 
 
522		return print_page_owner(buf, count, pfn, page,
523				page_owner, handle);
 
 
524	}
525
526	return 0;
527}
528
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
529static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
530{
531	unsigned long pfn = zone->zone_start_pfn;
532	unsigned long end_pfn = zone_end_pfn(zone);
533	unsigned long count = 0;
534
535	/*
536	 * Walk the zone in pageblock_nr_pages steps. If a page block spans
537	 * a zone boundary, it will be double counted between zones. This does
538	 * not matter as the mixed block count will still be correct
539	 */
540	for (; pfn < end_pfn; ) {
541		unsigned long block_end_pfn;
542
543		if (!pfn_valid(pfn)) {
544			pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
545			continue;
546		}
547
548		block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
549		block_end_pfn = min(block_end_pfn, end_pfn);
550
551		for (; pfn < block_end_pfn; pfn++) {
552			struct page *page;
553			struct page_ext *page_ext;
554
555			if (!pfn_valid_within(pfn))
556				continue;
557
558			page = pfn_to_page(pfn);
559
560			if (page_zone(page) != zone)
561				continue;
562
563			/*
564			 * To avoid having to grab zone->lock, be a little
565			 * careful when reading buddy page order. The only
566			 * danger is that we skip too much and potentially miss
567			 * some early allocated pages, which is better than
568			 * heavy lock contention.
569			 */
570			if (PageBuddy(page)) {
571				unsigned long order = page_order_unsafe(page);
572
573				if (order > 0 && order < MAX_ORDER)
574					pfn += (1UL << order) - 1;
575				continue;
576			}
577
578			if (PageReserved(page))
579				continue;
580
581			page_ext = lookup_page_ext(page);
582			if (unlikely(!page_ext))
583				continue;
584
585			/* Maybe overlapping zone */
586			if (test_bit(PAGE_EXT_OWNER, &page_ext->flags))
587				continue;
588
589			/* Found early allocated page */
590			__set_page_owner_handle(page_ext, early_handle, 0, 0);
 
591			count++;
 
 
592		}
593		cond_resched();
594	}
595
596	pr_info("Node %d, zone %8s: page owner found early allocated %lu pages\n",
597		pgdat->node_id, zone->name, count);
598}
599
600static void init_zones_in_node(pg_data_t *pgdat)
601{
602	struct zone *zone;
603	struct zone *node_zones = pgdat->node_zones;
604
605	for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
606		if (!populated_zone(zone))
607			continue;
608
609		init_pages_in_zone(pgdat, zone);
610	}
611}
612
613static void init_early_allocated_pages(void)
614{
615	pg_data_t *pgdat;
616
617	for_each_online_pgdat(pgdat)
618		init_zones_in_node(pgdat);
619}
620
621static const struct file_operations proc_page_owner_operations = {
622	.read		= read_page_owner,
 
623};
624
625static int __init pageowner_init(void)
626{
627	struct dentry *dentry;
628
629	if (!static_branch_unlikely(&page_owner_inited)) {
630		pr_info("page_owner is disabled\n");
631		return 0;
632	}
633
634	dentry = debugfs_create_file("page_owner", S_IRUSR, NULL,
635			NULL, &proc_page_owner_operations);
636
637	return PTR_ERR_OR_ZERO(dentry);
638}
639late_initcall(pageowner_init)
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2#include <linux/debugfs.h>
  3#include <linux/mm.h>
  4#include <linux/slab.h>
  5#include <linux/uaccess.h>
  6#include <linux/memblock.h>
  7#include <linux/stacktrace.h>
  8#include <linux/page_owner.h>
  9#include <linux/jump_label.h>
 10#include <linux/migrate.h>
 11#include <linux/stackdepot.h>
 12#include <linux/seq_file.h>
 13#include <linux/memcontrol.h>
 14#include <linux/sched/clock.h>
 15
 16#include "internal.h"
 17
 18/*
 19 * TODO: teach PAGE_OWNER_STACK_DEPTH (__dump_page_owner and save_stack)
 20 * to use off stack temporal storage
 21 */
 22#define PAGE_OWNER_STACK_DEPTH (16)
 23
 24struct page_owner {
 25	unsigned short order;
 26	short last_migrate_reason;
 27	gfp_t gfp_mask;
 28	depot_stack_handle_t handle;
 29	depot_stack_handle_t free_handle;
 30	u64 ts_nsec;
 31	u64 free_ts_nsec;
 32	char comm[TASK_COMM_LEN];
 33	pid_t pid;
 34	pid_t tgid;
 35};
 36
 37static bool page_owner_enabled __initdata;
 38DEFINE_STATIC_KEY_FALSE(page_owner_inited);
 39
 40static depot_stack_handle_t dummy_handle;
 41static depot_stack_handle_t failure_handle;
 42static depot_stack_handle_t early_handle;
 43
 44static void init_early_allocated_pages(void);
 45
 46static int __init early_page_owner_param(char *buf)
 47{
 48	int ret = kstrtobool(buf, &page_owner_enabled);
 
 49
 50	if (page_owner_enabled)
 51		stack_depot_want_early_init();
 52
 53	return ret;
 54}
 55early_param("page_owner", early_page_owner_param);
 56
 57static __init bool need_page_owner(void)
 58{
 59	return page_owner_enabled;
 
 
 
 60}
 61
 62static __always_inline depot_stack_handle_t create_dummy_stack(void)
 63{
 64	unsigned long entries[4];
 65	unsigned int nr_entries;
 66
 67	nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
 68	return stack_depot_save(entries, nr_entries, GFP_KERNEL);
 
 
 
 
 
 69}
 70
 71static noinline void register_dummy_stack(void)
 72{
 73	dummy_handle = create_dummy_stack();
 74}
 75
 76static noinline void register_failure_stack(void)
 77{
 78	failure_handle = create_dummy_stack();
 79}
 80
 81static noinline void register_early_stack(void)
 82{
 83	early_handle = create_dummy_stack();
 84}
 85
 86static __init void init_page_owner(void)
 87{
 88	if (!page_owner_enabled)
 89		return;
 90
 91	register_dummy_stack();
 92	register_failure_stack();
 93	register_early_stack();
 94	static_branch_enable(&page_owner_inited);
 95	init_early_allocated_pages();
 96}
 97
 98struct page_ext_operations page_owner_ops = {
 99	.size = sizeof(struct page_owner),
100	.need = need_page_owner,
101	.init = init_page_owner,
102};
103
104static inline struct page_owner *get_page_owner(struct page_ext *page_ext)
105{
106	return (void *)page_ext + page_owner_ops.offset;
107}
108
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
109static noinline depot_stack_handle_t save_stack(gfp_t flags)
110{
111	unsigned long entries[PAGE_OWNER_STACK_DEPTH];
 
 
 
 
 
 
112	depot_stack_handle_t handle;
113	unsigned int nr_entries;
 
 
 
 
114
115	/*
116	 * Avoid recursion.
117	 *
118	 * Sometimes page metadata allocation tracking requires more
119	 * memory to be allocated:
120	 * - when new stack trace is saved to stack depot
121	 * - when backtrace itself is calculated (ia64)
122	 */
123	if (current->in_page_owner)
124		return dummy_handle;
125	current->in_page_owner = 1;
126
127	nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 2);
128	handle = stack_depot_save(entries, nr_entries, flags);
129	if (!handle)
130		handle = failure_handle;
131
132	current->in_page_owner = 0;
133	return handle;
134}
135
136void __reset_page_owner(struct page *page, unsigned short order)
137{
138	int i;
139	struct page_ext *page_ext;
140	depot_stack_handle_t handle;
141	struct page_owner *page_owner;
142	u64 free_ts_nsec = local_clock();
143
144	page_ext = page_ext_get(page);
145	if (unlikely(!page_ext))
146		return;
147
148	handle = save_stack(GFP_NOWAIT | __GFP_NOWARN);
149	for (i = 0; i < (1 << order); i++) {
150		__clear_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags);
151		page_owner = get_page_owner(page_ext);
152		page_owner->free_handle = handle;
153		page_owner->free_ts_nsec = free_ts_nsec;
154		page_ext = page_ext_next(page_ext);
155	}
156	page_ext_put(page_ext);
157}
158
159static inline void __set_page_owner_handle(struct page_ext *page_ext,
160					depot_stack_handle_t handle,
161					unsigned short order, gfp_t gfp_mask)
162{
163	struct page_owner *page_owner;
164	int i;
165
166	for (i = 0; i < (1 << order); i++) {
167		page_owner = get_page_owner(page_ext);
168		page_owner->handle = handle;
169		page_owner->order = order;
170		page_owner->gfp_mask = gfp_mask;
171		page_owner->last_migrate_reason = -1;
172		page_owner->pid = current->pid;
173		page_owner->tgid = current->tgid;
174		page_owner->ts_nsec = local_clock();
175		strscpy(page_owner->comm, current->comm,
176			sizeof(page_owner->comm));
177		__set_bit(PAGE_EXT_OWNER, &page_ext->flags);
178		__set_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags);
179
180		page_ext = page_ext_next(page_ext);
181	}
182}
183
184noinline void __set_page_owner(struct page *page, unsigned short order,
185					gfp_t gfp_mask)
186{
187	struct page_ext *page_ext;
188	depot_stack_handle_t handle;
189
190	handle = save_stack(gfp_mask);
191
192	page_ext = page_ext_get(page);
193	if (unlikely(!page_ext))
194		return;
 
 
195	__set_page_owner_handle(page_ext, handle, order, gfp_mask);
196	page_ext_put(page_ext);
197}
198
199void __set_page_owner_migrate_reason(struct page *page, int reason)
200{
201	struct page_ext *page_ext = page_ext_get(page);
202	struct page_owner *page_owner;
203
204	if (unlikely(!page_ext))
205		return;
206
207	page_owner = get_page_owner(page_ext);
208	page_owner->last_migrate_reason = reason;
209	page_ext_put(page_ext);
210}
211
212void __split_page_owner(struct page *page, unsigned int nr)
213{
214	int i;
215	struct page_ext *page_ext = page_ext_get(page);
216	struct page_owner *page_owner;
217
218	if (unlikely(!page_ext))
219		return;
220
221	for (i = 0; i < nr; i++) {
222		page_owner = get_page_owner(page_ext);
223		page_owner->order = 0;
224		page_ext = page_ext_next(page_ext);
225	}
226	page_ext_put(page_ext);
227}
228
229void __folio_copy_owner(struct folio *newfolio, struct folio *old)
230{
231	struct page_ext *old_ext;
232	struct page_ext *new_ext;
233	struct page_owner *old_page_owner, *new_page_owner;
234
235	old_ext = page_ext_get(&old->page);
236	if (unlikely(!old_ext))
237		return;
238
239	new_ext = page_ext_get(&newfolio->page);
240	if (unlikely(!new_ext)) {
241		page_ext_put(old_ext);
242		return;
243	}
244
245	old_page_owner = get_page_owner(old_ext);
246	new_page_owner = get_page_owner(new_ext);
247	new_page_owner->order = old_page_owner->order;
248	new_page_owner->gfp_mask = old_page_owner->gfp_mask;
249	new_page_owner->last_migrate_reason =
250		old_page_owner->last_migrate_reason;
251	new_page_owner->handle = old_page_owner->handle;
252	new_page_owner->pid = old_page_owner->pid;
253	new_page_owner->tgid = old_page_owner->tgid;
254	new_page_owner->ts_nsec = old_page_owner->ts_nsec;
255	new_page_owner->free_ts_nsec = old_page_owner->ts_nsec;
256	strcpy(new_page_owner->comm, old_page_owner->comm);
257
258	/*
259	 * We don't clear the bit on the old folio as it's going to be freed
260	 * after migration. Until then, the info can be useful in case of
261	 * a bug, and the overall stats will be off a bit only temporarily.
262	 * Also, migrate_misplaced_transhuge_page() can still fail the
263	 * migration and then we want the old folio to retain the info. But
264	 * in that case we also don't need to explicitly clear the info from
265	 * the new page, which will be freed.
266	 */
267	__set_bit(PAGE_EXT_OWNER, &new_ext->flags);
268	__set_bit(PAGE_EXT_OWNER_ALLOCATED, &new_ext->flags);
269	page_ext_put(new_ext);
270	page_ext_put(old_ext);
271}
272
273void pagetypeinfo_showmixedcount_print(struct seq_file *m,
274				       pg_data_t *pgdat, struct zone *zone)
275{
276	struct page *page;
277	struct page_ext *page_ext;
278	struct page_owner *page_owner;
279	unsigned long pfn, block_end_pfn;
280	unsigned long end_pfn = zone_end_pfn(zone);
281	unsigned long count[MIGRATE_TYPES] = { 0, };
282	int pageblock_mt, page_mt;
283	int i;
284
285	/* Scan block by block. First and last block may be incomplete */
286	pfn = zone->zone_start_pfn;
287
288	/*
289	 * Walk the zone in pageblock_nr_pages steps. If a page block spans
290	 * a zone boundary, it will be double counted between zones. This does
291	 * not matter as the mixed block count will still be correct
292	 */
293	for (; pfn < end_pfn; ) {
294		page = pfn_to_online_page(pfn);
295		if (!page) {
296			pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
297			continue;
298		}
299
300		block_end_pfn = pageblock_end_pfn(pfn);
301		block_end_pfn = min(block_end_pfn, end_pfn);
302
 
303		pageblock_mt = get_pageblock_migratetype(page);
304
305		for (; pfn < block_end_pfn; pfn++) {
306			/* The pageblock is online, no need to recheck. */
 
 
307			page = pfn_to_page(pfn);
308
309			if (page_zone(page) != zone)
310				continue;
311
312			if (PageBuddy(page)) {
313				unsigned long freepage_order;
314
315				freepage_order = buddy_order_unsafe(page);
316				if (freepage_order < MAX_ORDER)
317					pfn += (1UL << freepage_order) - 1;
318				continue;
319			}
320
321			if (PageReserved(page))
322				continue;
323
324			page_ext = page_ext_get(page);
325			if (unlikely(!page_ext))
326				continue;
327
328			if (!test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags))
329				goto ext_put_continue;
330
331			page_owner = get_page_owner(page_ext);
332			page_mt = gfp_migratetype(page_owner->gfp_mask);
 
333			if (pageblock_mt != page_mt) {
334				if (is_migrate_cma(pageblock_mt))
335					count[MIGRATE_MOVABLE]++;
336				else
337					count[pageblock_mt]++;
338
339				pfn = block_end_pfn;
340				page_ext_put(page_ext);
341				break;
342			}
343			pfn += (1UL << page_owner->order) - 1;
344ext_put_continue:
345			page_ext_put(page_ext);
346		}
347	}
348
349	/* Print counts */
350	seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
351	for (i = 0; i < MIGRATE_TYPES; i++)
352		seq_printf(m, "%12lu ", count[i]);
353	seq_putc(m, '\n');
354}
355
356/*
357 * Looking for memcg information and print it out
358 */
359static inline int print_page_owner_memcg(char *kbuf, size_t count, int ret,
360					 struct page *page)
361{
362#ifdef CONFIG_MEMCG
363	unsigned long memcg_data;
364	struct mem_cgroup *memcg;
365	bool online;
366	char name[80];
367
368	rcu_read_lock();
369	memcg_data = READ_ONCE(page->memcg_data);
370	if (!memcg_data)
371		goto out_unlock;
372
373	if (memcg_data & MEMCG_DATA_OBJCGS)
374		ret += scnprintf(kbuf + ret, count - ret,
375				"Slab cache page\n");
376
377	memcg = page_memcg_check(page);
378	if (!memcg)
379		goto out_unlock;
380
381	online = (memcg->css.flags & CSS_ONLINE);
382	cgroup_name(memcg->css.cgroup, name, sizeof(name));
383	ret += scnprintf(kbuf + ret, count - ret,
384			"Charged %sto %smemcg %s\n",
385			PageMemcgKmem(page) ? "(via objcg) " : "",
386			online ? "" : "offline ",
387			name);
388out_unlock:
389	rcu_read_unlock();
390#endif /* CONFIG_MEMCG */
391
392	return ret;
393}
394
395static ssize_t
396print_page_owner(char __user *buf, size_t count, unsigned long pfn,
397		struct page *page, struct page_owner *page_owner,
398		depot_stack_handle_t handle)
399{
400	int ret, pageblock_mt, page_mt;
 
401	char *kbuf;
 
 
 
 
 
 
 
402
403	count = min_t(size_t, count, PAGE_SIZE);
404	kbuf = kmalloc(count, GFP_KERNEL);
405	if (!kbuf)
406		return -ENOMEM;
407
408	ret = scnprintf(kbuf, count,
409			"Page allocated via order %u, mask %#x(%pGg), pid %d, tgid %d (%s), ts %llu ns, free_ts %llu ns\n",
410			page_owner->order, page_owner->gfp_mask,
411			&page_owner->gfp_mask, page_owner->pid,
412			page_owner->tgid, page_owner->comm,
413			page_owner->ts_nsec, page_owner->free_ts_nsec);
 
414
415	/* Print information relevant to grouping pages by mobility */
416	pageblock_mt = get_pageblock_migratetype(page);
417	page_mt  = gfp_migratetype(page_owner->gfp_mask);
418	ret += scnprintf(kbuf + ret, count - ret,
419			"PFN %lu type %s Block %lu type %s Flags %pGp\n",
420			pfn,
421			migratetype_names[page_mt],
422			pfn >> pageblock_order,
423			migratetype_names[pageblock_mt],
424			&page->flags);
425
426	ret += stack_depot_snprint(handle, kbuf + ret, count - ret, 0);
 
 
 
 
427	if (ret >= count)
428		goto err;
429
430	if (page_owner->last_migrate_reason != -1) {
431		ret += scnprintf(kbuf + ret, count - ret,
432			"Page has been migrated, last migrate reason: %s\n",
433			migrate_reason_names[page_owner->last_migrate_reason]);
 
 
434	}
435
436	ret = print_page_owner_memcg(kbuf, count, ret, page);
437
438	ret += snprintf(kbuf + ret, count - ret, "\n");
439	if (ret >= count)
440		goto err;
441
442	if (copy_to_user(buf, kbuf, ret))
443		ret = -EFAULT;
444
445	kfree(kbuf);
446	return ret;
447
448err:
449	kfree(kbuf);
450	return -ENOMEM;
451}
452
453void __dump_page_owner(const struct page *page)
454{
455	struct page_ext *page_ext = page_ext_get((void *)page);
456	struct page_owner *page_owner;
 
 
 
 
 
 
 
457	depot_stack_handle_t handle;
458	gfp_t gfp_mask;
459	int mt;
460
461	if (unlikely(!page_ext)) {
462		pr_alert("There is not page extension available.\n");
463		return;
464	}
465
466	page_owner = get_page_owner(page_ext);
467	gfp_mask = page_owner->gfp_mask;
468	mt = gfp_migratetype(gfp_mask);
469
470	if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) {
471		pr_alert("page_owner info is not present (never set?)\n");
472		page_ext_put(page_ext);
473		return;
474	}
475
476	if (test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags))
477		pr_alert("page_owner tracks the page as allocated\n");
478	else
479		pr_alert("page_owner tracks the page as freed\n");
480
481	pr_alert("page last allocated via order %u, migratetype %s, gfp_mask %#x(%pGg), pid %d, tgid %d (%s), ts %llu, free_ts %llu\n",
482		 page_owner->order, migratetype_names[mt], gfp_mask, &gfp_mask,
483		 page_owner->pid, page_owner->tgid, page_owner->comm,
484		 page_owner->ts_nsec, page_owner->free_ts_nsec);
485
486	handle = READ_ONCE(page_owner->handle);
487	if (!handle)
488		pr_alert("page_owner allocation stack trace missing\n");
489	else
490		stack_depot_print(handle);
491
492	handle = READ_ONCE(page_owner->free_handle);
493	if (!handle) {
494		pr_alert("page_owner free stack trace missing\n");
495	} else {
496		pr_alert("page last free stack trace:\n");
497		stack_depot_print(handle);
498	}
499
 
 
 
 
 
500	if (page_owner->last_migrate_reason != -1)
501		pr_alert("page has been migrated, last migrate reason: %s\n",
502			migrate_reason_names[page_owner->last_migrate_reason]);
503	page_ext_put(page_ext);
504}
505
506static ssize_t
507read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
508{
509	unsigned long pfn;
510	struct page *page;
511	struct page_ext *page_ext;
512	struct page_owner *page_owner;
513	depot_stack_handle_t handle;
514
515	if (!static_branch_unlikely(&page_owner_inited))
516		return -EINVAL;
517
518	page = NULL;
519	if (*ppos == 0)
520		pfn = min_low_pfn;
521	else
522		pfn = *ppos;
523	/* Find a valid PFN or the start of a MAX_ORDER_NR_PAGES area */
524	while (!pfn_valid(pfn) && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 0)
525		pfn++;
526
 
 
527	/* Find an allocated page */
528	for (; pfn < max_pfn; pfn++) {
529		/*
530		 * This temporary page_owner is required so
531		 * that we can avoid the context switches while holding
532		 * the rcu lock and copying the page owner information to
533		 * user through copy_to_user() or GFP_KERNEL allocations.
534		 */
535		struct page_owner page_owner_tmp;
536
537		/*
538		 * If the new page is in a new MAX_ORDER_NR_PAGES area,
539		 * validate the area as existing, skip it if not
540		 */
541		if ((pfn & (MAX_ORDER_NR_PAGES - 1)) == 0 && !pfn_valid(pfn)) {
542			pfn += MAX_ORDER_NR_PAGES - 1;
543			continue;
544		}
545
 
 
 
 
546		page = pfn_to_page(pfn);
547		if (PageBuddy(page)) {
548			unsigned long freepage_order = buddy_order_unsafe(page);
549
550			if (freepage_order < MAX_ORDER)
551				pfn += (1UL << freepage_order) - 1;
552			continue;
553		}
554
555		page_ext = page_ext_get(page);
556		if (unlikely(!page_ext))
557			continue;
558
559		/*
560		 * Some pages could be missed by concurrent allocation or free,
561		 * because we don't hold the zone lock.
562		 */
563		if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
564			goto ext_put_continue;
565
566		/*
567		 * Although we do have the info about past allocation of free
568		 * pages, it's not relevant for current memory usage.
569		 */
570		if (!test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags))
571			goto ext_put_continue;
572
573		page_owner = get_page_owner(page_ext);
574
575		/*
576		 * Don't print "tail" pages of high-order allocations as that
577		 * would inflate the stats.
578		 */
579		if (!IS_ALIGNED(pfn, 1 << page_owner->order))
580			goto ext_put_continue;
581
582		/*
583		 * Access to page_ext->handle isn't synchronous so we should
584		 * be careful to access it.
585		 */
586		handle = READ_ONCE(page_owner->handle);
587		if (!handle)
588			goto ext_put_continue;
589
590		/* Record the next PFN to read in the file offset */
591		*ppos = pfn + 1;
592
593		page_owner_tmp = *page_owner;
594		page_ext_put(page_ext);
595		return print_page_owner(buf, count, pfn, page,
596				&page_owner_tmp, handle);
597ext_put_continue:
598		page_ext_put(page_ext);
599	}
600
601	return 0;
602}
603
604static loff_t lseek_page_owner(struct file *file, loff_t offset, int orig)
605{
606	switch (orig) {
607	case SEEK_SET:
608		file->f_pos = offset;
609		break;
610	case SEEK_CUR:
611		file->f_pos += offset;
612		break;
613	default:
614		return -EINVAL;
615	}
616	return file->f_pos;
617}
618
619static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
620{
621	unsigned long pfn = zone->zone_start_pfn;
622	unsigned long end_pfn = zone_end_pfn(zone);
623	unsigned long count = 0;
624
625	/*
626	 * Walk the zone in pageblock_nr_pages steps. If a page block spans
627	 * a zone boundary, it will be double counted between zones. This does
628	 * not matter as the mixed block count will still be correct
629	 */
630	for (; pfn < end_pfn; ) {
631		unsigned long block_end_pfn;
632
633		if (!pfn_valid(pfn)) {
634			pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
635			continue;
636		}
637
638		block_end_pfn = pageblock_end_pfn(pfn);
639		block_end_pfn = min(block_end_pfn, end_pfn);
640
641		for (; pfn < block_end_pfn; pfn++) {
642			struct page *page = pfn_to_page(pfn);
643			struct page_ext *page_ext;
644
 
 
 
 
 
645			if (page_zone(page) != zone)
646				continue;
647
648			/*
649			 * To avoid having to grab zone->lock, be a little
650			 * careful when reading buddy page order. The only
651			 * danger is that we skip too much and potentially miss
652			 * some early allocated pages, which is better than
653			 * heavy lock contention.
654			 */
655			if (PageBuddy(page)) {
656				unsigned long order = buddy_order_unsafe(page);
657
658				if (order > 0 && order < MAX_ORDER)
659					pfn += (1UL << order) - 1;
660				continue;
661			}
662
663			if (PageReserved(page))
664				continue;
665
666			page_ext = page_ext_get(page);
667			if (unlikely(!page_ext))
668				continue;
669
670			/* Maybe overlapping zone */
671			if (test_bit(PAGE_EXT_OWNER, &page_ext->flags))
672				goto ext_put_continue;
673
674			/* Found early allocated page */
675			__set_page_owner_handle(page_ext, early_handle,
676						0, 0);
677			count++;
678ext_put_continue:
679			page_ext_put(page_ext);
680		}
681		cond_resched();
682	}
683
684	pr_info("Node %d, zone %8s: page owner found early allocated %lu pages\n",
685		pgdat->node_id, zone->name, count);
686}
687
688static void init_zones_in_node(pg_data_t *pgdat)
689{
690	struct zone *zone;
691	struct zone *node_zones = pgdat->node_zones;
692
693	for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
694		if (!populated_zone(zone))
695			continue;
696
697		init_pages_in_zone(pgdat, zone);
698	}
699}
700
701static void init_early_allocated_pages(void)
702{
703	pg_data_t *pgdat;
704
705	for_each_online_pgdat(pgdat)
706		init_zones_in_node(pgdat);
707}
708
709static const struct file_operations proc_page_owner_operations = {
710	.read		= read_page_owner,
711	.llseek		= lseek_page_owner,
712};
713
714static int __init pageowner_init(void)
715{
 
 
716	if (!static_branch_unlikely(&page_owner_inited)) {
717		pr_info("page_owner is disabled\n");
718		return 0;
719	}
720
721	debugfs_create_file("page_owner", 0400, NULL, NULL,
722			    &proc_page_owner_operations);
723
724	return 0;
725}
726late_initcall(pageowner_init)