Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2#include <linux/debugfs.h>
  3#include <linux/mm.h>
  4#include <linux/slab.h>
  5#include <linux/uaccess.h>
  6#include <linux/memblock.h>
  7#include <linux/stacktrace.h>
  8#include <linux/page_owner.h>
  9#include <linux/jump_label.h>
 10#include <linux/migrate.h>
 11#include <linux/stackdepot.h>
 12#include <linux/seq_file.h>
 
 
 13
 14#include "internal.h"
 15
 16/*
 17 * TODO: teach PAGE_OWNER_STACK_DEPTH (__dump_page_owner and save_stack)
 18 * to use off stack temporal storage
 19 */
 20#define PAGE_OWNER_STACK_DEPTH (16)
 21
 22struct page_owner {
 23	unsigned short order;
 24	short last_migrate_reason;
 25	gfp_t gfp_mask;
 26	depot_stack_handle_t handle;
 27	depot_stack_handle_t free_handle;
 
 
 
 
 
 
 
 28};
 29
 30static bool page_owner_enabled = false;
 31DEFINE_STATIC_KEY_FALSE(page_owner_inited);
 32
 33static depot_stack_handle_t dummy_handle;
 34static depot_stack_handle_t failure_handle;
 35static depot_stack_handle_t early_handle;
 36
 37static void init_early_allocated_pages(void);
 38
 39static int __init early_page_owner_param(char *buf)
 40{
 41	if (!buf)
 42		return -EINVAL;
 43
 44	if (strcmp(buf, "on") == 0)
 45		page_owner_enabled = true;
 46
 47	return 0;
 48}
 49early_param("page_owner", early_page_owner_param);
 50
 51static bool need_page_owner(void)
 52{
 53	return page_owner_enabled;
 54}
 55
 56static __always_inline depot_stack_handle_t create_dummy_stack(void)
 57{
 58	unsigned long entries[4];
 59	unsigned int nr_entries;
 60
 61	nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
 62	return stack_depot_save(entries, nr_entries, GFP_KERNEL);
 63}
 64
 65static noinline void register_dummy_stack(void)
 66{
 67	dummy_handle = create_dummy_stack();
 68}
 69
 70static noinline void register_failure_stack(void)
 71{
 72	failure_handle = create_dummy_stack();
 73}
 74
 75static noinline void register_early_stack(void)
 76{
 77	early_handle = create_dummy_stack();
 78}
 79
 80static void init_page_owner(void)
 81{
 82	if (!page_owner_enabled)
 83		return;
 84
 85	register_dummy_stack();
 86	register_failure_stack();
 87	register_early_stack();
 88	static_branch_enable(&page_owner_inited);
 89	init_early_allocated_pages();
 90}
 91
 92struct page_ext_operations page_owner_ops = {
 93	.size = sizeof(struct page_owner),
 94	.need = need_page_owner,
 95	.init = init_page_owner,
 
 96};
 97
 98static inline struct page_owner *get_page_owner(struct page_ext *page_ext)
 99{
100	return (void *)page_ext + page_owner_ops.offset;
101}
102
103static inline bool check_recursive_alloc(unsigned long *entries,
104					 unsigned int nr_entries,
105					 unsigned long ip)
106{
107	unsigned int i;
108
109	for (i = 0; i < nr_entries; i++) {
110		if (entries[i] == ip)
111			return true;
112	}
113	return false;
114}
115
116static noinline depot_stack_handle_t save_stack(gfp_t flags)
117{
118	unsigned long entries[PAGE_OWNER_STACK_DEPTH];
119	depot_stack_handle_t handle;
120	unsigned int nr_entries;
121
122	nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 2);
123
124	/*
125	 * We need to check recursion here because our request to
126	 * stackdepot could trigger memory allocation to save new
127	 * entry. New memory allocation would reach here and call
128	 * stack_depot_save_entries() again if we don't catch it. There is
129	 * still not enough memory in stackdepot so it would try to
130	 * allocate memory again and loop forever.
131	 */
132	if (check_recursive_alloc(entries, nr_entries, _RET_IP_))
133		return dummy_handle;
 
134
 
135	handle = stack_depot_save(entries, nr_entries, flags);
136	if (!handle)
137		handle = failure_handle;
138
 
139	return handle;
140}
141
142void __reset_page_owner(struct page *page, unsigned int order)
143{
144	int i;
145	struct page_ext *page_ext;
146	depot_stack_handle_t handle = 0;
147	struct page_owner *page_owner;
 
148
149	handle = save_stack(GFP_NOWAIT | __GFP_NOWARN);
150
151	page_ext = lookup_page_ext(page);
152	if (unlikely(!page_ext))
153		return;
 
 
154	for (i = 0; i < (1 << order); i++) {
155		__clear_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags);
156		page_owner = get_page_owner(page_ext);
157		page_owner->free_handle = handle;
 
 
 
158		page_ext = page_ext_next(page_ext);
159	}
 
160}
161
162static inline void __set_page_owner_handle(struct page *page,
163	struct page_ext *page_ext, depot_stack_handle_t handle,
164	unsigned int order, gfp_t gfp_mask)
165{
166	struct page_owner *page_owner;
167	int i;
 
168
169	for (i = 0; i < (1 << order); i++) {
170		page_owner = get_page_owner(page_ext);
171		page_owner->handle = handle;
172		page_owner->order = order;
173		page_owner->gfp_mask = gfp_mask;
174		page_owner->last_migrate_reason = -1;
 
 
 
 
 
175		__set_bit(PAGE_EXT_OWNER, &page_ext->flags);
176		__set_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags);
177
178		page_ext = page_ext_next(page_ext);
179	}
180}
181
182noinline void __set_page_owner(struct page *page, unsigned int order,
183					gfp_t gfp_mask)
184{
185	struct page_ext *page_ext = lookup_page_ext(page);
186	depot_stack_handle_t handle;
187
 
 
 
188	if (unlikely(!page_ext))
189		return;
190
191	handle = save_stack(gfp_mask);
192	__set_page_owner_handle(page, page_ext, handle, order, gfp_mask);
193}
194
195void __set_page_owner_migrate_reason(struct page *page, int reason)
196{
197	struct page_ext *page_ext = lookup_page_ext(page);
198	struct page_owner *page_owner;
199
200	if (unlikely(!page_ext))
201		return;
202
203	page_owner = get_page_owner(page_ext);
204	page_owner->last_migrate_reason = reason;
 
205}
206
207void __split_page_owner(struct page *page, unsigned int order)
208{
209	int i;
210	struct page_ext *page_ext = lookup_page_ext(page);
211	struct page_owner *page_owner;
212
213	if (unlikely(!page_ext))
214		return;
215
216	for (i = 0; i < (1 << order); i++) {
217		page_owner = get_page_owner(page_ext);
218		page_owner->order = 0;
219		page_ext = page_ext_next(page_ext);
220	}
 
221}
222
223void __copy_page_owner(struct page *oldpage, struct page *newpage)
224{
225	struct page_ext *old_ext = lookup_page_ext(oldpage);
226	struct page_ext *new_ext = lookup_page_ext(newpage);
227	struct page_owner *old_page_owner, *new_page_owner;
228
229	if (unlikely(!old_ext || !new_ext))
 
230		return;
231
 
 
 
 
 
 
232	old_page_owner = get_page_owner(old_ext);
233	new_page_owner = get_page_owner(new_ext);
234	new_page_owner->order = old_page_owner->order;
235	new_page_owner->gfp_mask = old_page_owner->gfp_mask;
236	new_page_owner->last_migrate_reason =
237		old_page_owner->last_migrate_reason;
238	new_page_owner->handle = old_page_owner->handle;
 
 
 
 
 
 
 
239
240	/*
241	 * We don't clear the bit on the oldpage as it's going to be freed
242	 * after migration. Until then, the info can be useful in case of
243	 * a bug, and the overal stats will be off a bit only temporarily.
244	 * Also, migrate_misplaced_transhuge_page() can still fail the
245	 * migration and then we want the oldpage to retain the info. But
246	 * in that case we also don't need to explicitly clear the info from
247	 * the new page, which will be freed.
248	 */
249	__set_bit(PAGE_EXT_OWNER, &new_ext->flags);
250	__set_bit(PAGE_EXT_OWNER_ALLOCATED, &new_ext->flags);
 
 
251}
252
253void pagetypeinfo_showmixedcount_print(struct seq_file *m,
254				       pg_data_t *pgdat, struct zone *zone)
255{
256	struct page *page;
257	struct page_ext *page_ext;
258	struct page_owner *page_owner;
259	unsigned long pfn = zone->zone_start_pfn, block_end_pfn;
260	unsigned long end_pfn = pfn + zone->spanned_pages;
261	unsigned long count[MIGRATE_TYPES] = { 0, };
262	int pageblock_mt, page_mt;
263	int i;
264
265	/* Scan block by block. First and last block may be incomplete */
266	pfn = zone->zone_start_pfn;
267
268	/*
269	 * Walk the zone in pageblock_nr_pages steps. If a page block spans
270	 * a zone boundary, it will be double counted between zones. This does
271	 * not matter as the mixed block count will still be correct
272	 */
273	for (; pfn < end_pfn; ) {
274		page = pfn_to_online_page(pfn);
275		if (!page) {
276			pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
277			continue;
278		}
279
280		block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
281		block_end_pfn = min(block_end_pfn, end_pfn);
282
283		pageblock_mt = get_pageblock_migratetype(page);
284
285		for (; pfn < block_end_pfn; pfn++) {
286			if (!pfn_valid_within(pfn))
287				continue;
288
289			/* The pageblock is online, no need to recheck. */
290			page = pfn_to_page(pfn);
291
292			if (page_zone(page) != zone)
293				continue;
294
295			if (PageBuddy(page)) {
296				unsigned long freepage_order;
297
298				freepage_order = page_order_unsafe(page);
299				if (freepage_order < MAX_ORDER)
300					pfn += (1UL << freepage_order) - 1;
301				continue;
302			}
303
304			if (PageReserved(page))
305				continue;
306
307			page_ext = lookup_page_ext(page);
308			if (unlikely(!page_ext))
309				continue;
310
311			if (!test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags))
312				continue;
313
314			page_owner = get_page_owner(page_ext);
315			page_mt = gfpflags_to_migratetype(
316					page_owner->gfp_mask);
317			if (pageblock_mt != page_mt) {
318				if (is_migrate_cma(pageblock_mt))
319					count[MIGRATE_MOVABLE]++;
320				else
321					count[pageblock_mt]++;
322
323				pfn = block_end_pfn;
 
324				break;
325			}
326			pfn += (1UL << page_owner->order) - 1;
 
 
327		}
328	}
329
330	/* Print counts */
331	seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
332	for (i = 0; i < MIGRATE_TYPES; i++)
333		seq_printf(m, "%12lu ", count[i]);
334	seq_putc(m, '\n');
335}
336
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
337static ssize_t
338print_page_owner(char __user *buf, size_t count, unsigned long pfn,
339		struct page *page, struct page_owner *page_owner,
340		depot_stack_handle_t handle)
341{
342	int ret, pageblock_mt, page_mt;
343	unsigned long *entries;
344	unsigned int nr_entries;
345	char *kbuf;
346
347	count = min_t(size_t, count, PAGE_SIZE);
348	kbuf = kmalloc(count, GFP_KERNEL);
349	if (!kbuf)
350		return -ENOMEM;
351
352	ret = snprintf(kbuf, count,
353			"Page allocated via order %u, mask %#x(%pGg)\n",
354			page_owner->order, page_owner->gfp_mask,
355			&page_owner->gfp_mask);
356
357	if (ret >= count)
358		goto err;
359
360	/* Print information relevant to grouping pages by mobility */
361	pageblock_mt = get_pageblock_migratetype(page);
362	page_mt  = gfpflags_to_migratetype(page_owner->gfp_mask);
363	ret += snprintf(kbuf + ret, count - ret,
364			"PFN %lu type %s Block %lu type %s Flags %#lx(%pGp)\n",
365			pfn,
366			migratetype_names[page_mt],
367			pfn >> pageblock_order,
368			migratetype_names[pageblock_mt],
369			page->flags, &page->flags);
370
371	if (ret >= count)
372		goto err;
373
374	nr_entries = stack_depot_fetch(handle, &entries);
375	ret += stack_trace_snprint(kbuf + ret, count - ret, entries, nr_entries, 0);
376	if (ret >= count)
377		goto err;
378
379	if (page_owner->last_migrate_reason != -1) {
380		ret += snprintf(kbuf + ret, count - ret,
381			"Page has been migrated, last migrate reason: %s\n",
382			migrate_reason_names[page_owner->last_migrate_reason]);
383		if (ret >= count)
384			goto err;
385	}
386
 
 
387	ret += snprintf(kbuf + ret, count - ret, "\n");
388	if (ret >= count)
389		goto err;
390
391	if (copy_to_user(buf, kbuf, ret))
392		ret = -EFAULT;
393
394	kfree(kbuf);
395	return ret;
396
397err:
398	kfree(kbuf);
399	return -ENOMEM;
400}
401
402void __dump_page_owner(struct page *page)
403{
404	struct page_ext *page_ext = lookup_page_ext(page);
405	struct page_owner *page_owner;
406	depot_stack_handle_t handle;
407	unsigned long *entries;
408	unsigned int nr_entries;
409	gfp_t gfp_mask;
410	int mt;
411
412	if (unlikely(!page_ext)) {
413		pr_alert("There is not page extension available.\n");
414		return;
415	}
416
417	page_owner = get_page_owner(page_ext);
418	gfp_mask = page_owner->gfp_mask;
419	mt = gfpflags_to_migratetype(gfp_mask);
420
421	if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) {
422		pr_alert("page_owner info is not present (never set?)\n");
 
423		return;
424	}
425
426	if (test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags))
427		pr_alert("page_owner tracks the page as allocated\n");
428	else
429		pr_alert("page_owner tracks the page as freed\n");
430
431	pr_alert("page last allocated via order %u, migratetype %s, gfp_mask %#x(%pGg)\n",
432		 page_owner->order, migratetype_names[mt], gfp_mask, &gfp_mask);
 
 
433
434	handle = READ_ONCE(page_owner->handle);
435	if (!handle) {
436		pr_alert("page_owner allocation stack trace missing\n");
437	} else {
438		nr_entries = stack_depot_fetch(handle, &entries);
439		stack_trace_print(entries, nr_entries, 0);
440	}
441
442	handle = READ_ONCE(page_owner->free_handle);
443	if (!handle) {
444		pr_alert("page_owner free stack trace missing\n");
445	} else {
446		nr_entries = stack_depot_fetch(handle, &entries);
447		pr_alert("page last free stack trace:\n");
448		stack_trace_print(entries, nr_entries, 0);
449	}
450
451	if (page_owner->last_migrate_reason != -1)
452		pr_alert("page has been migrated, last migrate reason: %s\n",
453			migrate_reason_names[page_owner->last_migrate_reason]);
 
454}
455
456static ssize_t
457read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
458{
459	unsigned long pfn;
460	struct page *page;
461	struct page_ext *page_ext;
462	struct page_owner *page_owner;
463	depot_stack_handle_t handle;
464
465	if (!static_branch_unlikely(&page_owner_inited))
466		return -EINVAL;
467
468	page = NULL;
469	pfn = min_low_pfn + *ppos;
470
 
 
471	/* Find a valid PFN or the start of a MAX_ORDER_NR_PAGES area */
472	while (!pfn_valid(pfn) && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 0)
473		pfn++;
474
475	drain_all_pages(NULL);
476
477	/* Find an allocated page */
478	for (; pfn < max_pfn; pfn++) {
479		/*
 
 
 
 
 
 
 
 
480		 * If the new page is in a new MAX_ORDER_NR_PAGES area,
481		 * validate the area as existing, skip it if not
482		 */
483		if ((pfn & (MAX_ORDER_NR_PAGES - 1)) == 0 && !pfn_valid(pfn)) {
484			pfn += MAX_ORDER_NR_PAGES - 1;
485			continue;
486		}
487
488		/* Check for holes within a MAX_ORDER area */
489		if (!pfn_valid_within(pfn))
490			continue;
491
492		page = pfn_to_page(pfn);
493		if (PageBuddy(page)) {
494			unsigned long freepage_order = page_order_unsafe(page);
495
496			if (freepage_order < MAX_ORDER)
497				pfn += (1UL << freepage_order) - 1;
498			continue;
499		}
500
501		page_ext = lookup_page_ext(page);
502		if (unlikely(!page_ext))
503			continue;
504
505		/*
506		 * Some pages could be missed by concurrent allocation or free,
507		 * because we don't hold the zone lock.
508		 */
509		if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
510			continue;
511
512		/*
513		 * Although we do have the info about past allocation of free
514		 * pages, it's not relevant for current memory usage.
515		 */
516		if (!test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags))
517			continue;
518
519		page_owner = get_page_owner(page_ext);
520
521		/*
522		 * Don't print "tail" pages of high-order allocations as that
523		 * would inflate the stats.
524		 */
525		if (!IS_ALIGNED(pfn, 1 << page_owner->order))
526			continue;
527
528		/*
529		 * Access to page_ext->handle isn't synchronous so we should
530		 * be careful to access it.
531		 */
532		handle = READ_ONCE(page_owner->handle);
533		if (!handle)
534			continue;
535
536		/* Record the next PFN to read in the file offset */
537		*ppos = (pfn - min_low_pfn) + 1;
538
 
 
539		return print_page_owner(buf, count, pfn, page,
540				page_owner, handle);
 
 
541	}
542
543	return 0;
544}
545
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
546static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
547{
548	unsigned long pfn = zone->zone_start_pfn;
549	unsigned long end_pfn = zone_end_pfn(zone);
550	unsigned long count = 0;
551
552	/*
553	 * Walk the zone in pageblock_nr_pages steps. If a page block spans
554	 * a zone boundary, it will be double counted between zones. This does
555	 * not matter as the mixed block count will still be correct
556	 */
557	for (; pfn < end_pfn; ) {
558		unsigned long block_end_pfn;
559
560		if (!pfn_valid(pfn)) {
561			pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
562			continue;
563		}
564
565		block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
566		block_end_pfn = min(block_end_pfn, end_pfn);
567
568		for (; pfn < block_end_pfn; pfn++) {
569			struct page *page;
570			struct page_ext *page_ext;
571
572			if (!pfn_valid_within(pfn))
573				continue;
574
575			page = pfn_to_page(pfn);
576
577			if (page_zone(page) != zone)
578				continue;
579
580			/*
581			 * To avoid having to grab zone->lock, be a little
582			 * careful when reading buddy page order. The only
583			 * danger is that we skip too much and potentially miss
584			 * some early allocated pages, which is better than
585			 * heavy lock contention.
586			 */
587			if (PageBuddy(page)) {
588				unsigned long order = page_order_unsafe(page);
589
590				if (order > 0 && order < MAX_ORDER)
591					pfn += (1UL << order) - 1;
592				continue;
593			}
594
595			if (PageReserved(page))
596				continue;
597
598			page_ext = lookup_page_ext(page);
599			if (unlikely(!page_ext))
600				continue;
601
602			/* Maybe overlapping zone */
603			if (test_bit(PAGE_EXT_OWNER, &page_ext->flags))
604				continue;
605
606			/* Found early allocated page */
607			__set_page_owner_handle(page, page_ext, early_handle,
608						0, 0);
609			count++;
 
 
610		}
611		cond_resched();
612	}
613
614	pr_info("Node %d, zone %8s: page owner found early allocated %lu pages\n",
615		pgdat->node_id, zone->name, count);
616}
617
618static void init_zones_in_node(pg_data_t *pgdat)
619{
620	struct zone *zone;
621	struct zone *node_zones = pgdat->node_zones;
622
623	for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
624		if (!populated_zone(zone))
625			continue;
626
627		init_pages_in_zone(pgdat, zone);
628	}
629}
630
631static void init_early_allocated_pages(void)
632{
633	pg_data_t *pgdat;
634
635	for_each_online_pgdat(pgdat)
636		init_zones_in_node(pgdat);
637}
638
639static const struct file_operations proc_page_owner_operations = {
640	.read		= read_page_owner,
 
641};
642
643static int __init pageowner_init(void)
644{
645	if (!static_branch_unlikely(&page_owner_inited)) {
646		pr_info("page_owner is disabled\n");
647		return 0;
648	}
649
650	debugfs_create_file("page_owner", 0400, NULL, NULL,
651			    &proc_page_owner_operations);
652
653	return 0;
654}
655late_initcall(pageowner_init)
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2#include <linux/debugfs.h>
  3#include <linux/mm.h>
  4#include <linux/slab.h>
  5#include <linux/uaccess.h>
  6#include <linux/memblock.h>
  7#include <linux/stacktrace.h>
  8#include <linux/page_owner.h>
  9#include <linux/jump_label.h>
 10#include <linux/migrate.h>
 11#include <linux/stackdepot.h>
 12#include <linux/seq_file.h>
 13#include <linux/memcontrol.h>
 14#include <linux/sched/clock.h>
 15
 16#include "internal.h"
 17
 18/*
 19 * TODO: teach PAGE_OWNER_STACK_DEPTH (__dump_page_owner and save_stack)
 20 * to use off stack temporal storage
 21 */
 22#define PAGE_OWNER_STACK_DEPTH (16)
 23
 24struct page_owner {
 25	unsigned short order;
 26	short last_migrate_reason;
 27	gfp_t gfp_mask;
 28	depot_stack_handle_t handle;
 29	depot_stack_handle_t free_handle;
 30	u64 ts_nsec;
 31	u64 free_ts_nsec;
 32	char comm[TASK_COMM_LEN];
 33	pid_t pid;
 34	pid_t tgid;
 35	pid_t free_pid;
 36	pid_t free_tgid;
 37};
 38
 39static bool page_owner_enabled __initdata;
 40DEFINE_STATIC_KEY_FALSE(page_owner_inited);
 41
 42static depot_stack_handle_t dummy_handle;
 43static depot_stack_handle_t failure_handle;
 44static depot_stack_handle_t early_handle;
 45
 46static void init_early_allocated_pages(void);
 47
 48static int __init early_page_owner_param(char *buf)
 49{
 50	int ret = kstrtobool(buf, &page_owner_enabled);
 
 51
 52	if (page_owner_enabled)
 53		stack_depot_request_early_init();
 54
 55	return ret;
 56}
 57early_param("page_owner", early_page_owner_param);
 58
 59static __init bool need_page_owner(void)
 60{
 61	return page_owner_enabled;
 62}
 63
 64static __always_inline depot_stack_handle_t create_dummy_stack(void)
 65{
 66	unsigned long entries[4];
 67	unsigned int nr_entries;
 68
 69	nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
 70	return stack_depot_save(entries, nr_entries, GFP_KERNEL);
 71}
 72
 73static noinline void register_dummy_stack(void)
 74{
 75	dummy_handle = create_dummy_stack();
 76}
 77
 78static noinline void register_failure_stack(void)
 79{
 80	failure_handle = create_dummy_stack();
 81}
 82
 83static noinline void register_early_stack(void)
 84{
 85	early_handle = create_dummy_stack();
 86}
 87
 88static __init void init_page_owner(void)
 89{
 90	if (!page_owner_enabled)
 91		return;
 92
 93	register_dummy_stack();
 94	register_failure_stack();
 95	register_early_stack();
 96	static_branch_enable(&page_owner_inited);
 97	init_early_allocated_pages();
 98}
 99
100struct page_ext_operations page_owner_ops = {
101	.size = sizeof(struct page_owner),
102	.need = need_page_owner,
103	.init = init_page_owner,
104	.need_shared_flags = true,
105};
106
107static inline struct page_owner *get_page_owner(struct page_ext *page_ext)
108{
109	return page_ext_data(page_ext, &page_owner_ops);
 
 
 
 
 
 
 
 
 
 
 
 
 
110}
111
112static noinline depot_stack_handle_t save_stack(gfp_t flags)
113{
114	unsigned long entries[PAGE_OWNER_STACK_DEPTH];
115	depot_stack_handle_t handle;
116	unsigned int nr_entries;
117
 
 
118	/*
119	 * Avoid recursion.
120	 *
121	 * Sometimes page metadata allocation tracking requires more
122	 * memory to be allocated:
123	 * - when new stack trace is saved to stack depot
 
124	 */
125	if (current->in_page_owner)
126		return dummy_handle;
127	current->in_page_owner = 1;
128
129	nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 2);
130	handle = stack_depot_save(entries, nr_entries, flags);
131	if (!handle)
132		handle = failure_handle;
133
134	current->in_page_owner = 0;
135	return handle;
136}
137
138void __reset_page_owner(struct page *page, unsigned short order)
139{
140	int i;
141	struct page_ext *page_ext;
142	depot_stack_handle_t handle;
143	struct page_owner *page_owner;
144	u64 free_ts_nsec = local_clock();
145
146	page_ext = page_ext_get(page);
 
 
147	if (unlikely(!page_ext))
148		return;
149
150	handle = save_stack(GFP_NOWAIT | __GFP_NOWARN);
151	for (i = 0; i < (1 << order); i++) {
152		__clear_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags);
153		page_owner = get_page_owner(page_ext);
154		page_owner->free_handle = handle;
155		page_owner->free_ts_nsec = free_ts_nsec;
156		page_owner->free_pid = current->pid;
157		page_owner->free_tgid = current->tgid;
158		page_ext = page_ext_next(page_ext);
159	}
160	page_ext_put(page_ext);
161}
162
163static inline void __set_page_owner_handle(struct page_ext *page_ext,
164					depot_stack_handle_t handle,
165					unsigned short order, gfp_t gfp_mask)
166{
167	struct page_owner *page_owner;
168	int i;
169	u64 ts_nsec = local_clock();
170
171	for (i = 0; i < (1 << order); i++) {
172		page_owner = get_page_owner(page_ext);
173		page_owner->handle = handle;
174		page_owner->order = order;
175		page_owner->gfp_mask = gfp_mask;
176		page_owner->last_migrate_reason = -1;
177		page_owner->pid = current->pid;
178		page_owner->tgid = current->tgid;
179		page_owner->ts_nsec = ts_nsec;
180		strscpy(page_owner->comm, current->comm,
181			sizeof(page_owner->comm));
182		__set_bit(PAGE_EXT_OWNER, &page_ext->flags);
183		__set_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags);
184
185		page_ext = page_ext_next(page_ext);
186	}
187}
188
189noinline void __set_page_owner(struct page *page, unsigned short order,
190					gfp_t gfp_mask)
191{
192	struct page_ext *page_ext;
193	depot_stack_handle_t handle;
194
195	handle = save_stack(gfp_mask);
196
197	page_ext = page_ext_get(page);
198	if (unlikely(!page_ext))
199		return;
200	__set_page_owner_handle(page_ext, handle, order, gfp_mask);
201	page_ext_put(page_ext);
 
202}
203
204void __set_page_owner_migrate_reason(struct page *page, int reason)
205{
206	struct page_ext *page_ext = page_ext_get(page);
207	struct page_owner *page_owner;
208
209	if (unlikely(!page_ext))
210		return;
211
212	page_owner = get_page_owner(page_ext);
213	page_owner->last_migrate_reason = reason;
214	page_ext_put(page_ext);
215}
216
217void __split_page_owner(struct page *page, unsigned int nr)
218{
219	int i;
220	struct page_ext *page_ext = page_ext_get(page);
221	struct page_owner *page_owner;
222
223	if (unlikely(!page_ext))
224		return;
225
226	for (i = 0; i < nr; i++) {
227		page_owner = get_page_owner(page_ext);
228		page_owner->order = 0;
229		page_ext = page_ext_next(page_ext);
230	}
231	page_ext_put(page_ext);
232}
233
234void __folio_copy_owner(struct folio *newfolio, struct folio *old)
235{
236	struct page_ext *old_ext;
237	struct page_ext *new_ext;
238	struct page_owner *old_page_owner, *new_page_owner;
239
240	old_ext = page_ext_get(&old->page);
241	if (unlikely(!old_ext))
242		return;
243
244	new_ext = page_ext_get(&newfolio->page);
245	if (unlikely(!new_ext)) {
246		page_ext_put(old_ext);
247		return;
248	}
249
250	old_page_owner = get_page_owner(old_ext);
251	new_page_owner = get_page_owner(new_ext);
252	new_page_owner->order = old_page_owner->order;
253	new_page_owner->gfp_mask = old_page_owner->gfp_mask;
254	new_page_owner->last_migrate_reason =
255		old_page_owner->last_migrate_reason;
256	new_page_owner->handle = old_page_owner->handle;
257	new_page_owner->pid = old_page_owner->pid;
258	new_page_owner->tgid = old_page_owner->tgid;
259	new_page_owner->free_pid = old_page_owner->free_pid;
260	new_page_owner->free_tgid = old_page_owner->free_tgid;
261	new_page_owner->ts_nsec = old_page_owner->ts_nsec;
262	new_page_owner->free_ts_nsec = old_page_owner->ts_nsec;
263	strcpy(new_page_owner->comm, old_page_owner->comm);
264
265	/*
266	 * We don't clear the bit on the old folio as it's going to be freed
267	 * after migration. Until then, the info can be useful in case of
268	 * a bug, and the overall stats will be off a bit only temporarily.
269	 * Also, migrate_misplaced_transhuge_page() can still fail the
270	 * migration and then we want the old folio to retain the info. But
271	 * in that case we also don't need to explicitly clear the info from
272	 * the new page, which will be freed.
273	 */
274	__set_bit(PAGE_EXT_OWNER, &new_ext->flags);
275	__set_bit(PAGE_EXT_OWNER_ALLOCATED, &new_ext->flags);
276	page_ext_put(new_ext);
277	page_ext_put(old_ext);
278}
279
280void pagetypeinfo_showmixedcount_print(struct seq_file *m,
281				       pg_data_t *pgdat, struct zone *zone)
282{
283	struct page *page;
284	struct page_ext *page_ext;
285	struct page_owner *page_owner;
286	unsigned long pfn, block_end_pfn;
287	unsigned long end_pfn = zone_end_pfn(zone);
288	unsigned long count[MIGRATE_TYPES] = { 0, };
289	int pageblock_mt, page_mt;
290	int i;
291
292	/* Scan block by block. First and last block may be incomplete */
293	pfn = zone->zone_start_pfn;
294
295	/*
296	 * Walk the zone in pageblock_nr_pages steps. If a page block spans
297	 * a zone boundary, it will be double counted between zones. This does
298	 * not matter as the mixed block count will still be correct
299	 */
300	for (; pfn < end_pfn; ) {
301		page = pfn_to_online_page(pfn);
302		if (!page) {
303			pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
304			continue;
305		}
306
307		block_end_pfn = pageblock_end_pfn(pfn);
308		block_end_pfn = min(block_end_pfn, end_pfn);
309
310		pageblock_mt = get_pageblock_migratetype(page);
311
312		for (; pfn < block_end_pfn; pfn++) {
 
 
 
313			/* The pageblock is online, no need to recheck. */
314			page = pfn_to_page(pfn);
315
316			if (page_zone(page) != zone)
317				continue;
318
319			if (PageBuddy(page)) {
320				unsigned long freepage_order;
321
322				freepage_order = buddy_order_unsafe(page);
323				if (freepage_order <= MAX_PAGE_ORDER)
324					pfn += (1UL << freepage_order) - 1;
325				continue;
326			}
327
328			if (PageReserved(page))
329				continue;
330
331			page_ext = page_ext_get(page);
332			if (unlikely(!page_ext))
333				continue;
334
335			if (!test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags))
336				goto ext_put_continue;
337
338			page_owner = get_page_owner(page_ext);
339			page_mt = gfp_migratetype(page_owner->gfp_mask);
 
340			if (pageblock_mt != page_mt) {
341				if (is_migrate_cma(pageblock_mt))
342					count[MIGRATE_MOVABLE]++;
343				else
344					count[pageblock_mt]++;
345
346				pfn = block_end_pfn;
347				page_ext_put(page_ext);
348				break;
349			}
350			pfn += (1UL << page_owner->order) - 1;
351ext_put_continue:
352			page_ext_put(page_ext);
353		}
354	}
355
356	/* Print counts */
357	seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
358	for (i = 0; i < MIGRATE_TYPES; i++)
359		seq_printf(m, "%12lu ", count[i]);
360	seq_putc(m, '\n');
361}
362
363/*
364 * Looking for memcg information and print it out
365 */
366static inline int print_page_owner_memcg(char *kbuf, size_t count, int ret,
367					 struct page *page)
368{
369#ifdef CONFIG_MEMCG
370	unsigned long memcg_data;
371	struct mem_cgroup *memcg;
372	bool online;
373	char name[80];
374
375	rcu_read_lock();
376	memcg_data = READ_ONCE(page->memcg_data);
377	if (!memcg_data)
378		goto out_unlock;
379
380	if (memcg_data & MEMCG_DATA_OBJCGS)
381		ret += scnprintf(kbuf + ret, count - ret,
382				"Slab cache page\n");
383
384	memcg = page_memcg_check(page);
385	if (!memcg)
386		goto out_unlock;
387
388	online = (memcg->css.flags & CSS_ONLINE);
389	cgroup_name(memcg->css.cgroup, name, sizeof(name));
390	ret += scnprintf(kbuf + ret, count - ret,
391			"Charged %sto %smemcg %s\n",
392			PageMemcgKmem(page) ? "(via objcg) " : "",
393			online ? "" : "offline ",
394			name);
395out_unlock:
396	rcu_read_unlock();
397#endif /* CONFIG_MEMCG */
398
399	return ret;
400}
401
402static ssize_t
403print_page_owner(char __user *buf, size_t count, unsigned long pfn,
404		struct page *page, struct page_owner *page_owner,
405		depot_stack_handle_t handle)
406{
407	int ret, pageblock_mt, page_mt;
 
 
408	char *kbuf;
409
410	count = min_t(size_t, count, PAGE_SIZE);
411	kbuf = kmalloc(count, GFP_KERNEL);
412	if (!kbuf)
413		return -ENOMEM;
414
415	ret = scnprintf(kbuf, count,
416			"Page allocated via order %u, mask %#x(%pGg), pid %d, tgid %d (%s), ts %llu ns\n",
417			page_owner->order, page_owner->gfp_mask,
418			&page_owner->gfp_mask, page_owner->pid,
419			page_owner->tgid, page_owner->comm,
420			page_owner->ts_nsec);
 
421
422	/* Print information relevant to grouping pages by mobility */
423	pageblock_mt = get_pageblock_migratetype(page);
424	page_mt  = gfp_migratetype(page_owner->gfp_mask);
425	ret += scnprintf(kbuf + ret, count - ret,
426			"PFN 0x%lx type %s Block %lu type %s Flags %pGp\n",
427			pfn,
428			migratetype_names[page_mt],
429			pfn >> pageblock_order,
430			migratetype_names[pageblock_mt],
431			&page->flags);
432
433	ret += stack_depot_snprint(handle, kbuf + ret, count - ret, 0);
 
 
 
 
434	if (ret >= count)
435		goto err;
436
437	if (page_owner->last_migrate_reason != -1) {
438		ret += scnprintf(kbuf + ret, count - ret,
439			"Page has been migrated, last migrate reason: %s\n",
440			migrate_reason_names[page_owner->last_migrate_reason]);
 
 
441	}
442
443	ret = print_page_owner_memcg(kbuf, count, ret, page);
444
445	ret += snprintf(kbuf + ret, count - ret, "\n");
446	if (ret >= count)
447		goto err;
448
449	if (copy_to_user(buf, kbuf, ret))
450		ret = -EFAULT;
451
452	kfree(kbuf);
453	return ret;
454
455err:
456	kfree(kbuf);
457	return -ENOMEM;
458}
459
460void __dump_page_owner(const struct page *page)
461{
462	struct page_ext *page_ext = page_ext_get((void *)page);
463	struct page_owner *page_owner;
464	depot_stack_handle_t handle;
 
 
465	gfp_t gfp_mask;
466	int mt;
467
468	if (unlikely(!page_ext)) {
469		pr_alert("There is not page extension available.\n");
470		return;
471	}
472
473	page_owner = get_page_owner(page_ext);
474	gfp_mask = page_owner->gfp_mask;
475	mt = gfp_migratetype(gfp_mask);
476
477	if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) {
478		pr_alert("page_owner info is not present (never set?)\n");
479		page_ext_put(page_ext);
480		return;
481	}
482
483	if (test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags))
484		pr_alert("page_owner tracks the page as allocated\n");
485	else
486		pr_alert("page_owner tracks the page as freed\n");
487
488	pr_alert("page last allocated via order %u, migratetype %s, gfp_mask %#x(%pGg), pid %d, tgid %d (%s), ts %llu, free_ts %llu\n",
489		 page_owner->order, migratetype_names[mt], gfp_mask, &gfp_mask,
490		 page_owner->pid, page_owner->tgid, page_owner->comm,
491		 page_owner->ts_nsec, page_owner->free_ts_nsec);
492
493	handle = READ_ONCE(page_owner->handle);
494	if (!handle)
495		pr_alert("page_owner allocation stack trace missing\n");
496	else
497		stack_depot_print(handle);
 
 
498
499	handle = READ_ONCE(page_owner->free_handle);
500	if (!handle) {
501		pr_alert("page_owner free stack trace missing\n");
502	} else {
503		pr_alert("page last free pid %d tgid %d stack trace:\n",
504			  page_owner->free_pid, page_owner->free_tgid);
505		stack_depot_print(handle);
506	}
507
508	if (page_owner->last_migrate_reason != -1)
509		pr_alert("page has been migrated, last migrate reason: %s\n",
510			migrate_reason_names[page_owner->last_migrate_reason]);
511	page_ext_put(page_ext);
512}
513
514static ssize_t
515read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
516{
517	unsigned long pfn;
518	struct page *page;
519	struct page_ext *page_ext;
520	struct page_owner *page_owner;
521	depot_stack_handle_t handle;
522
523	if (!static_branch_unlikely(&page_owner_inited))
524		return -EINVAL;
525
526	page = NULL;
527	if (*ppos == 0)
528		pfn = min_low_pfn;
529	else
530		pfn = *ppos;
531	/* Find a valid PFN or the start of a MAX_ORDER_NR_PAGES area */
532	while (!pfn_valid(pfn) && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 0)
533		pfn++;
534
 
 
535	/* Find an allocated page */
536	for (; pfn < max_pfn; pfn++) {
537		/*
538		 * This temporary page_owner is required so
539		 * that we can avoid the context switches while holding
540		 * the rcu lock and copying the page owner information to
541		 * user through copy_to_user() or GFP_KERNEL allocations.
542		 */
543		struct page_owner page_owner_tmp;
544
545		/*
546		 * If the new page is in a new MAX_ORDER_NR_PAGES area,
547		 * validate the area as existing, skip it if not
548		 */
549		if ((pfn & (MAX_ORDER_NR_PAGES - 1)) == 0 && !pfn_valid(pfn)) {
550			pfn += MAX_ORDER_NR_PAGES - 1;
551			continue;
552		}
553
 
 
 
 
554		page = pfn_to_page(pfn);
555		if (PageBuddy(page)) {
556			unsigned long freepage_order = buddy_order_unsafe(page);
557
558			if (freepage_order <= MAX_PAGE_ORDER)
559				pfn += (1UL << freepage_order) - 1;
560			continue;
561		}
562
563		page_ext = page_ext_get(page);
564		if (unlikely(!page_ext))
565			continue;
566
567		/*
568		 * Some pages could be missed by concurrent allocation or free,
569		 * because we don't hold the zone lock.
570		 */
571		if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
572			goto ext_put_continue;
573
574		/*
575		 * Although we do have the info about past allocation of free
576		 * pages, it's not relevant for current memory usage.
577		 */
578		if (!test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags))
579			goto ext_put_continue;
580
581		page_owner = get_page_owner(page_ext);
582
583		/*
584		 * Don't print "tail" pages of high-order allocations as that
585		 * would inflate the stats.
586		 */
587		if (!IS_ALIGNED(pfn, 1 << page_owner->order))
588			goto ext_put_continue;
589
590		/*
591		 * Access to page_ext->handle isn't synchronous so we should
592		 * be careful to access it.
593		 */
594		handle = READ_ONCE(page_owner->handle);
595		if (!handle)
596			goto ext_put_continue;
597
598		/* Record the next PFN to read in the file offset */
599		*ppos = pfn + 1;
600
601		page_owner_tmp = *page_owner;
602		page_ext_put(page_ext);
603		return print_page_owner(buf, count, pfn, page,
604				&page_owner_tmp, handle);
605ext_put_continue:
606		page_ext_put(page_ext);
607	}
608
609	return 0;
610}
611
612static loff_t lseek_page_owner(struct file *file, loff_t offset, int orig)
613{
614	switch (orig) {
615	case SEEK_SET:
616		file->f_pos = offset;
617		break;
618	case SEEK_CUR:
619		file->f_pos += offset;
620		break;
621	default:
622		return -EINVAL;
623	}
624	return file->f_pos;
625}
626
627static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
628{
629	unsigned long pfn = zone->zone_start_pfn;
630	unsigned long end_pfn = zone_end_pfn(zone);
631	unsigned long count = 0;
632
633	/*
634	 * Walk the zone in pageblock_nr_pages steps. If a page block spans
635	 * a zone boundary, it will be double counted between zones. This does
636	 * not matter as the mixed block count will still be correct
637	 */
638	for (; pfn < end_pfn; ) {
639		unsigned long block_end_pfn;
640
641		if (!pfn_valid(pfn)) {
642			pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
643			continue;
644		}
645
646		block_end_pfn = pageblock_end_pfn(pfn);
647		block_end_pfn = min(block_end_pfn, end_pfn);
648
649		for (; pfn < block_end_pfn; pfn++) {
650			struct page *page = pfn_to_page(pfn);
651			struct page_ext *page_ext;
652
 
 
 
 
 
653			if (page_zone(page) != zone)
654				continue;
655
656			/*
657			 * To avoid having to grab zone->lock, be a little
658			 * careful when reading buddy page order. The only
659			 * danger is that we skip too much and potentially miss
660			 * some early allocated pages, which is better than
661			 * heavy lock contention.
662			 */
663			if (PageBuddy(page)) {
664				unsigned long order = buddy_order_unsafe(page);
665
666				if (order > 0 && order <= MAX_PAGE_ORDER)
667					pfn += (1UL << order) - 1;
668				continue;
669			}
670
671			if (PageReserved(page))
672				continue;
673
674			page_ext = page_ext_get(page);
675			if (unlikely(!page_ext))
676				continue;
677
678			/* Maybe overlapping zone */
679			if (test_bit(PAGE_EXT_OWNER, &page_ext->flags))
680				goto ext_put_continue;
681
682			/* Found early allocated page */
683			__set_page_owner_handle(page_ext, early_handle,
684						0, 0);
685			count++;
686ext_put_continue:
687			page_ext_put(page_ext);
688		}
689		cond_resched();
690	}
691
692	pr_info("Node %d, zone %8s: page owner found early allocated %lu pages\n",
693		pgdat->node_id, zone->name, count);
694}
695
696static void init_zones_in_node(pg_data_t *pgdat)
697{
698	struct zone *zone;
699	struct zone *node_zones = pgdat->node_zones;
700
701	for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
702		if (!populated_zone(zone))
703			continue;
704
705		init_pages_in_zone(pgdat, zone);
706	}
707}
708
709static void init_early_allocated_pages(void)
710{
711	pg_data_t *pgdat;
712
713	for_each_online_pgdat(pgdat)
714		init_zones_in_node(pgdat);
715}
716
717static const struct file_operations proc_page_owner_operations = {
718	.read		= read_page_owner,
719	.llseek		= lseek_page_owner,
720};
721
722static int __init pageowner_init(void)
723{
724	if (!static_branch_unlikely(&page_owner_inited)) {
725		pr_info("page_owner is disabled\n");
726		return 0;
727	}
728
729	debugfs_create_file("page_owner", 0400, NULL, NULL,
730			    &proc_page_owner_operations);
731
732	return 0;
733}
734late_initcall(pageowner_init)