Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2#include <linux/pagewalk.h>
   3#include <linux/mm_inline.h>
   4#include <linux/hugetlb.h>
   5#include <linux/huge_mm.h>
   6#include <linux/mount.h>
 
   7#include <linux/seq_file.h>
   8#include <linux/highmem.h>
   9#include <linux/ptrace.h>
  10#include <linux/slab.h>
  11#include <linux/pagemap.h>
  12#include <linux/mempolicy.h>
  13#include <linux/rmap.h>
  14#include <linux/swap.h>
  15#include <linux/sched/mm.h>
  16#include <linux/swapops.h>
  17#include <linux/mmu_notifier.h>
  18#include <linux/page_idle.h>
  19#include <linux/shmem_fs.h>
  20#include <linux/uaccess.h>
  21#include <linux/pkeys.h>
 
 
 
  22
  23#include <asm/elf.h>
  24#include <asm/tlb.h>
  25#include <asm/tlbflush.h>
  26#include "internal.h"
  27
  28#define SEQ_PUT_DEC(str, val) \
  29		seq_put_decimal_ull_width(m, str, (val) << (PAGE_SHIFT-10), 8)
  30void task_mem(struct seq_file *m, struct mm_struct *mm)
  31{
  32	unsigned long text, lib, swap, anon, file, shmem;
  33	unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;
  34
  35	anon = get_mm_counter(mm, MM_ANONPAGES);
  36	file = get_mm_counter(mm, MM_FILEPAGES);
  37	shmem = get_mm_counter(mm, MM_SHMEMPAGES);
  38
  39	/*
  40	 * Note: to minimize their overhead, mm maintains hiwater_vm and
  41	 * hiwater_rss only when about to *lower* total_vm or rss.  Any
  42	 * collector of these hiwater stats must therefore get total_vm
  43	 * and rss too, which will usually be the higher.  Barriers? not
  44	 * worth the effort, such snapshots can always be inconsistent.
  45	 */
  46	hiwater_vm = total_vm = mm->total_vm;
  47	if (hiwater_vm < mm->hiwater_vm)
  48		hiwater_vm = mm->hiwater_vm;
  49	hiwater_rss = total_rss = anon + file + shmem;
  50	if (hiwater_rss < mm->hiwater_rss)
  51		hiwater_rss = mm->hiwater_rss;
  52
  53	/* split executable areas between text and lib */
  54	text = PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK);
  55	text = min(text, mm->exec_vm << PAGE_SHIFT);
  56	lib = (mm->exec_vm << PAGE_SHIFT) - text;
  57
  58	swap = get_mm_counter(mm, MM_SWAPENTS);
  59	SEQ_PUT_DEC("VmPeak:\t", hiwater_vm);
  60	SEQ_PUT_DEC(" kB\nVmSize:\t", total_vm);
  61	SEQ_PUT_DEC(" kB\nVmLck:\t", mm->locked_vm);
  62	SEQ_PUT_DEC(" kB\nVmPin:\t", atomic64_read(&mm->pinned_vm));
  63	SEQ_PUT_DEC(" kB\nVmHWM:\t", hiwater_rss);
  64	SEQ_PUT_DEC(" kB\nVmRSS:\t", total_rss);
  65	SEQ_PUT_DEC(" kB\nRssAnon:\t", anon);
  66	SEQ_PUT_DEC(" kB\nRssFile:\t", file);
  67	SEQ_PUT_DEC(" kB\nRssShmem:\t", shmem);
  68	SEQ_PUT_DEC(" kB\nVmData:\t", mm->data_vm);
  69	SEQ_PUT_DEC(" kB\nVmStk:\t", mm->stack_vm);
  70	seq_put_decimal_ull_width(m,
  71		    " kB\nVmExe:\t", text >> 10, 8);
  72	seq_put_decimal_ull_width(m,
  73		    " kB\nVmLib:\t", lib >> 10, 8);
  74	seq_put_decimal_ull_width(m,
  75		    " kB\nVmPTE:\t", mm_pgtables_bytes(mm) >> 10, 8);
  76	SEQ_PUT_DEC(" kB\nVmSwap:\t", swap);
  77	seq_puts(m, " kB\n");
  78	hugetlb_report_usage(m, mm);
  79}
  80#undef SEQ_PUT_DEC
  81
  82unsigned long task_vsize(struct mm_struct *mm)
  83{
  84	return PAGE_SIZE * mm->total_vm;
  85}
  86
  87unsigned long task_statm(struct mm_struct *mm,
  88			 unsigned long *shared, unsigned long *text,
  89			 unsigned long *data, unsigned long *resident)
  90{
  91	*shared = get_mm_counter(mm, MM_FILEPAGES) +
  92			get_mm_counter(mm, MM_SHMEMPAGES);
  93	*text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
  94								>> PAGE_SHIFT;
  95	*data = mm->data_vm + mm->stack_vm;
  96	*resident = *shared + get_mm_counter(mm, MM_ANONPAGES);
  97	return mm->total_vm;
  98}
  99
 100#ifdef CONFIG_NUMA
 101/*
 102 * Save get_task_policy() for show_numa_map().
 103 */
 104static void hold_task_mempolicy(struct proc_maps_private *priv)
 105{
 106	struct task_struct *task = priv->task;
 107
 108	task_lock(task);
 109	priv->task_mempolicy = get_task_policy(task);
 110	mpol_get(priv->task_mempolicy);
 111	task_unlock(task);
 112}
 113static void release_task_mempolicy(struct proc_maps_private *priv)
 114{
 115	mpol_put(priv->task_mempolicy);
 116}
 117#else
 118static void hold_task_mempolicy(struct proc_maps_private *priv)
 119{
 120}
 121static void release_task_mempolicy(struct proc_maps_private *priv)
 122{
 123}
 124#endif
 125
 126static struct vm_area_struct *proc_get_vma(struct proc_maps_private *priv,
 127						loff_t *ppos)
 128{
 129	struct vm_area_struct *vma = vma_next(&priv->iter);
 130
 131	if (vma) {
 132		*ppos = vma->vm_start;
 133	} else {
 134		*ppos = -2UL;
 135		vma = get_gate_vma(priv->mm);
 136	}
 137
 138	return vma;
 139}
 140
 141static void *m_start(struct seq_file *m, loff_t *ppos)
 142{
 143	struct proc_maps_private *priv = m->private;
 144	unsigned long last_addr = *ppos;
 145	struct mm_struct *mm;
 146
 147	/* See m_next(). Zero at the start or after lseek. */
 148	if (last_addr == -1UL)
 149		return NULL;
 150
 151	priv->task = get_proc_task(priv->inode);
 152	if (!priv->task)
 153		return ERR_PTR(-ESRCH);
 154
 155	mm = priv->mm;
 156	if (!mm || !mmget_not_zero(mm)) {
 157		put_task_struct(priv->task);
 158		priv->task = NULL;
 159		return NULL;
 160	}
 161
 162	if (mmap_read_lock_killable(mm)) {
 163		mmput(mm);
 164		put_task_struct(priv->task);
 165		priv->task = NULL;
 166		return ERR_PTR(-EINTR);
 167	}
 168
 169	vma_iter_init(&priv->iter, mm, last_addr);
 170	hold_task_mempolicy(priv);
 171	if (last_addr == -2UL)
 172		return get_gate_vma(mm);
 173
 174	return proc_get_vma(priv, ppos);
 175}
 176
 177static void *m_next(struct seq_file *m, void *v, loff_t *ppos)
 178{
 179	if (*ppos == -2UL) {
 180		*ppos = -1UL;
 181		return NULL;
 182	}
 183	return proc_get_vma(m->private, ppos);
 184}
 185
 186static void m_stop(struct seq_file *m, void *v)
 187{
 188	struct proc_maps_private *priv = m->private;
 189	struct mm_struct *mm = priv->mm;
 190
 191	if (!priv->task)
 192		return;
 193
 194	release_task_mempolicy(priv);
 195	mmap_read_unlock(mm);
 196	mmput(mm);
 197	put_task_struct(priv->task);
 198	priv->task = NULL;
 199}
 200
 201static int proc_maps_open(struct inode *inode, struct file *file,
 202			const struct seq_operations *ops, int psize)
 203{
 204	struct proc_maps_private *priv = __seq_open_private(file, ops, psize);
 205
 206	if (!priv)
 207		return -ENOMEM;
 208
 209	priv->inode = inode;
 210	priv->mm = proc_mem_open(inode, PTRACE_MODE_READ);
 211	if (IS_ERR(priv->mm)) {
 212		int err = PTR_ERR(priv->mm);
 213
 214		seq_release_private(inode, file);
 215		return err;
 216	}
 217
 218	return 0;
 219}
 220
 221static int proc_map_release(struct inode *inode, struct file *file)
 222{
 223	struct seq_file *seq = file->private_data;
 224	struct proc_maps_private *priv = seq->private;
 225
 226	if (priv->mm)
 227		mmdrop(priv->mm);
 228
 229	return seq_release_private(inode, file);
 230}
 231
 232static int do_maps_open(struct inode *inode, struct file *file,
 233			const struct seq_operations *ops)
 234{
 235	return proc_maps_open(inode, file, ops,
 236				sizeof(struct proc_maps_private));
 237}
 238
 239/*
 240 * Indicate if the VMA is a stack for the given task; for
 241 * /proc/PID/maps that is the stack of the main task.
 242 */
 243static int is_stack(struct vm_area_struct *vma)
 244{
 
 
 
 
 
 245	/*
 246	 * We make no effort to guess what a given thread considers to be
 247	 * its "stack".  It's not even well-defined for programs written
 248	 * languages like Go.
 249	 */
 250	return vma->vm_start <= vma->vm_mm->start_stack &&
 251		vma->vm_end >= vma->vm_mm->start_stack;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 252}
 253
 254static void show_vma_header_prefix(struct seq_file *m,
 255				   unsigned long start, unsigned long end,
 256				   vm_flags_t flags, unsigned long long pgoff,
 257				   dev_t dev, unsigned long ino)
 258{
 259	seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
 260	seq_put_hex_ll(m, NULL, start, 8);
 261	seq_put_hex_ll(m, "-", end, 8);
 262	seq_putc(m, ' ');
 263	seq_putc(m, flags & VM_READ ? 'r' : '-');
 264	seq_putc(m, flags & VM_WRITE ? 'w' : '-');
 265	seq_putc(m, flags & VM_EXEC ? 'x' : '-');
 266	seq_putc(m, flags & VM_MAYSHARE ? 's' : 'p');
 267	seq_put_hex_ll(m, " ", pgoff, 8);
 268	seq_put_hex_ll(m, " ", MAJOR(dev), 2);
 269	seq_put_hex_ll(m, ":", MINOR(dev), 2);
 270	seq_put_decimal_ull(m, " ", ino);
 271	seq_putc(m, ' ');
 272}
 273
 274static void
 275show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
 276{
 277	struct anon_vma_name *anon_name = NULL;
 278	struct mm_struct *mm = vma->vm_mm;
 279	struct file *file = vma->vm_file;
 280	vm_flags_t flags = vma->vm_flags;
 281	unsigned long ino = 0;
 282	unsigned long long pgoff = 0;
 283	unsigned long start, end;
 284	dev_t dev = 0;
 285	const char *name = NULL;
 286
 287	if (file) {
 288		struct inode *inode = file_inode(vma->vm_file);
 
 289		dev = inode->i_sb->s_dev;
 290		ino = inode->i_ino;
 291		pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
 292	}
 293
 294	start = vma->vm_start;
 295	end = vma->vm_end;
 296	show_vma_header_prefix(m, start, end, flags, pgoff, dev, ino);
 297	if (mm)
 298		anon_name = anon_vma_name(vma);
 299
 300	/*
 301	 * Print the dentry name for named mappings, and a
 302	 * special [heap] marker for the heap:
 303	 */
 304	if (file) {
 305		seq_pad(m, ' ');
 306		/*
 307		 * If user named this anon shared memory via
 308		 * prctl(PR_SET_VMA ..., use the provided name.
 309		 */
 310		if (anon_name)
 311			seq_printf(m, "[anon_shmem:%s]", anon_name->name);
 312		else
 313			seq_file_path(m, file, "\n");
 314		goto done;
 315	}
 316
 317	if (vma->vm_ops && vma->vm_ops->name) {
 318		name = vma->vm_ops->name(vma);
 319		if (name)
 320			goto done;
 321	}
 322
 323	name = arch_vma_name(vma);
 324	if (!name) {
 325		if (!mm) {
 326			name = "[vdso]";
 327			goto done;
 328		}
 329
 330		if (vma->vm_start <= mm->brk &&
 331		    vma->vm_end >= mm->start_brk) {
 332			name = "[heap]";
 333			goto done;
 334		}
 335
 336		if (is_stack(vma)) {
 337			name = "[stack]";
 338			goto done;
 339		}
 340
 341		if (anon_name) {
 342			seq_pad(m, ' ');
 343			seq_printf(m, "[anon:%s]", anon_name->name);
 344		}
 345	}
 346
 347done:
 348	if (name) {
 349		seq_pad(m, ' ');
 350		seq_puts(m, name);
 351	}
 352	seq_putc(m, '\n');
 353}
 354
 355static int show_map(struct seq_file *m, void *v)
 356{
 357	show_map_vma(m, v);
 358	return 0;
 359}
 360
 361static const struct seq_operations proc_pid_maps_op = {
 362	.start	= m_start,
 363	.next	= m_next,
 364	.stop	= m_stop,
 365	.show	= show_map
 366};
 367
 368static int pid_maps_open(struct inode *inode, struct file *file)
 369{
 370	return do_maps_open(inode, file, &proc_pid_maps_op);
 371}
 372
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 373const struct file_operations proc_pid_maps_operations = {
 374	.open		= pid_maps_open,
 375	.read		= seq_read,
 376	.llseek		= seq_lseek,
 377	.release	= proc_map_release,
 
 
 378};
 379
 380/*
 381 * Proportional Set Size(PSS): my share of RSS.
 382 *
 383 * PSS of a process is the count of pages it has in memory, where each
 384 * page is divided by the number of processes sharing it.  So if a
 385 * process has 1000 pages all to itself, and 1000 shared with one other
 386 * process, its PSS will be 1500.
 387 *
 388 * To keep (accumulated) division errors low, we adopt a 64bit
 389 * fixed-point pss counter to minimize division errors. So (pss >>
 390 * PSS_SHIFT) would be the real byte count.
 391 *
 392 * A shift of 12 before division means (assuming 4K page size):
 393 * 	- 1M 3-user-pages add up to 8KB errors;
 394 * 	- supports mapcount up to 2^24, or 16M;
 395 * 	- supports PSS up to 2^52 bytes, or 4PB.
 396 */
 397#define PSS_SHIFT 12
 398
 399#ifdef CONFIG_PROC_PAGE_MONITOR
 400struct mem_size_stats {
 401	unsigned long resident;
 402	unsigned long shared_clean;
 403	unsigned long shared_dirty;
 404	unsigned long private_clean;
 405	unsigned long private_dirty;
 406	unsigned long referenced;
 407	unsigned long anonymous;
 408	unsigned long lazyfree;
 409	unsigned long anonymous_thp;
 410	unsigned long shmem_thp;
 411	unsigned long file_thp;
 412	unsigned long swap;
 413	unsigned long shared_hugetlb;
 414	unsigned long private_hugetlb;
 
 415	u64 pss;
 416	u64 pss_anon;
 417	u64 pss_file;
 418	u64 pss_shmem;
 419	u64 pss_dirty;
 420	u64 pss_locked;
 421	u64 swap_pss;
 422};
 423
 424static void smaps_page_accumulate(struct mem_size_stats *mss,
 425		struct page *page, unsigned long size, unsigned long pss,
 426		bool dirty, bool locked, bool private)
 427{
 428	mss->pss += pss;
 429
 430	if (PageAnon(page))
 431		mss->pss_anon += pss;
 432	else if (PageSwapBacked(page))
 433		mss->pss_shmem += pss;
 434	else
 435		mss->pss_file += pss;
 436
 437	if (locked)
 438		mss->pss_locked += pss;
 439
 440	if (dirty || PageDirty(page)) {
 441		mss->pss_dirty += pss;
 442		if (private)
 443			mss->private_dirty += size;
 444		else
 445			mss->shared_dirty += size;
 446	} else {
 447		if (private)
 448			mss->private_clean += size;
 449		else
 450			mss->shared_clean += size;
 451	}
 452}
 453
 454static void smaps_account(struct mem_size_stats *mss, struct page *page,
 455		bool compound, bool young, bool dirty, bool locked,
 456		bool migration)
 457{
 
 458	int i, nr = compound ? compound_nr(page) : 1;
 459	unsigned long size = nr * PAGE_SIZE;
 460
 461	/*
 462	 * First accumulate quantities that depend only on |size| and the type
 463	 * of the compound page.
 464	 */
 465	if (PageAnon(page)) {
 466		mss->anonymous += size;
 467		if (!PageSwapBacked(page) && !dirty && !PageDirty(page))
 
 468			mss->lazyfree += size;
 469	}
 470
 
 
 
 471	mss->resident += size;
 472	/* Accumulate the size in pages that have been accessed. */
 473	if (young || page_is_young(page) || PageReferenced(page))
 474		mss->referenced += size;
 475
 476	/*
 477	 * Then accumulate quantities that may depend on sharing, or that may
 478	 * differ page-by-page.
 479	 *
 480	 * page_count(page) == 1 guarantees the page is mapped exactly once.
 481	 * If any subpage of the compound page mapped with PTE it would elevate
 482	 * page_count().
 
 
 
 
 483	 *
 484	 * The page_mapcount() is called to get a snapshot of the mapcount.
 485	 * Without holding the page lock this snapshot can be slightly wrong as
 486	 * we cannot always read the mapcount atomically.  It is not safe to
 487	 * call page_mapcount() even with PTL held if the page is not mapped,
 488	 * especially for migration entries.  Treat regular migration entries
 489	 * as mapcount == 1.
 490	 */
 491	if ((page_count(page) == 1) || migration) {
 492		smaps_page_accumulate(mss, page, size, size << PSS_SHIFT, dirty,
 493			locked, true);
 494		return;
 495	}
 
 
 
 
 
 496	for (i = 0; i < nr; i++, page++) {
 497		int mapcount = page_mapcount(page);
 498		unsigned long pss = PAGE_SIZE << PSS_SHIFT;
 499		if (mapcount >= 2)
 500			pss /= mapcount;
 501		smaps_page_accumulate(mss, page, PAGE_SIZE, pss, dirty, locked,
 502				      mapcount < 2);
 503	}
 504}
 505
 506#ifdef CONFIG_SHMEM
 507static int smaps_pte_hole(unsigned long addr, unsigned long end,
 508			  __always_unused int depth, struct mm_walk *walk)
 509{
 510	struct mem_size_stats *mss = walk->private;
 511	struct vm_area_struct *vma = walk->vma;
 512
 513	mss->swap += shmem_partial_swap_usage(walk->vma->vm_file->f_mapping,
 514					      linear_page_index(vma, addr),
 515					      linear_page_index(vma, end));
 516
 517	return 0;
 518}
 519#else
 520#define smaps_pte_hole		NULL
 521#endif /* CONFIG_SHMEM */
 522
 523static void smaps_pte_hole_lookup(unsigned long addr, struct mm_walk *walk)
 524{
 525#ifdef CONFIG_SHMEM
 526	if (walk->ops->pte_hole) {
 527		/* depth is not used */
 528		smaps_pte_hole(addr, addr + PAGE_SIZE, 0, walk);
 529	}
 530#endif
 531}
 532
 533static void smaps_pte_entry(pte_t *pte, unsigned long addr,
 534		struct mm_walk *walk)
 535{
 536	struct mem_size_stats *mss = walk->private;
 537	struct vm_area_struct *vma = walk->vma;
 538	bool locked = !!(vma->vm_flags & VM_LOCKED);
 539	struct page *page = NULL;
 540	bool migration = false, young = false, dirty = false;
 
 541
 542	if (pte_present(*pte)) {
 543		page = vm_normal_page(vma, addr, *pte);
 544		young = pte_young(*pte);
 545		dirty = pte_dirty(*pte);
 546	} else if (is_swap_pte(*pte)) {
 547		swp_entry_t swpent = pte_to_swp_entry(*pte);
 
 548
 549		if (!non_swap_entry(swpent)) {
 550			int mapcount;
 551
 552			mss->swap += PAGE_SIZE;
 553			mapcount = swp_swapcount(swpent);
 554			if (mapcount >= 2) {
 555				u64 pss_delta = (u64)PAGE_SIZE << PSS_SHIFT;
 556
 557				do_div(pss_delta, mapcount);
 558				mss->swap_pss += pss_delta;
 559			} else {
 560				mss->swap_pss += (u64)PAGE_SIZE << PSS_SHIFT;
 561			}
 562		} else if (is_pfn_swap_entry(swpent)) {
 563			if (is_migration_entry(swpent))
 564				migration = true;
 565			page = pfn_swap_entry_to_page(swpent);
 566		}
 567	} else {
 568		smaps_pte_hole_lookup(addr, walk);
 569		return;
 570	}
 571
 572	if (!page)
 573		return;
 574
 575	smaps_account(mss, page, false, young, dirty, locked, migration);
 576}
 577
 578#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 579static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
 580		struct mm_walk *walk)
 581{
 582	struct mem_size_stats *mss = walk->private;
 583	struct vm_area_struct *vma = walk->vma;
 584	bool locked = !!(vma->vm_flags & VM_LOCKED);
 585	struct page *page = NULL;
 586	bool migration = false;
 
 587
 588	if (pmd_present(*pmd)) {
 589		/* FOLL_DUMP will return -EFAULT on huge zero page */
 590		page = follow_trans_huge_pmd(vma, addr, pmd, FOLL_DUMP);
 591	} else if (unlikely(thp_migration_supported() && is_swap_pmd(*pmd))) {
 592		swp_entry_t entry = pmd_to_swp_entry(*pmd);
 593
 594		if (is_migration_entry(entry)) {
 595			migration = true;
 596			page = pfn_swap_entry_to_page(entry);
 597		}
 598	}
 599	if (IS_ERR_OR_NULL(page))
 600		return;
 601	if (PageAnon(page))
 
 602		mss->anonymous_thp += HPAGE_PMD_SIZE;
 603	else if (PageSwapBacked(page))
 604		mss->shmem_thp += HPAGE_PMD_SIZE;
 605	else if (is_zone_device_page(page))
 606		/* pass */;
 607	else
 608		mss->file_thp += HPAGE_PMD_SIZE;
 609
 610	smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd),
 611		      locked, migration);
 612}
 613#else
 614static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
 615		struct mm_walk *walk)
 616{
 617}
 618#endif
 619
 620static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
 621			   struct mm_walk *walk)
 622{
 623	struct vm_area_struct *vma = walk->vma;
 624	pte_t *pte;
 625	spinlock_t *ptl;
 626
 627	ptl = pmd_trans_huge_lock(pmd, vma);
 628	if (ptl) {
 629		smaps_pmd_entry(pmd, addr, walk);
 630		spin_unlock(ptl);
 631		goto out;
 632	}
 633
 634	if (pmd_trans_unstable(pmd))
 635		goto out;
 636	/*
 637	 * The mmap_lock held all the way back in m_start() is what
 638	 * keeps khugepaged out of here and from collapsing things
 639	 * in here.
 640	 */
 641	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
 
 
 
 
 642	for (; addr != end; pte++, addr += PAGE_SIZE)
 643		smaps_pte_entry(pte, addr, walk);
 644	pte_unmap_unlock(pte - 1, ptl);
 645out:
 646	cond_resched();
 647	return 0;
 648}
 649
 650static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma)
 651{
 652	/*
 653	 * Don't forget to update Documentation/ on changes.
 
 
 
 
 
 
 
 654	 */
 655	static const char mnemonics[BITS_PER_LONG][2] = {
 656		/*
 657		 * In case if we meet a flag we don't know about.
 658		 */
 659		[0 ... (BITS_PER_LONG-1)] = "??",
 660
 661		[ilog2(VM_READ)]	= "rd",
 662		[ilog2(VM_WRITE)]	= "wr",
 663		[ilog2(VM_EXEC)]	= "ex",
 664		[ilog2(VM_SHARED)]	= "sh",
 665		[ilog2(VM_MAYREAD)]	= "mr",
 666		[ilog2(VM_MAYWRITE)]	= "mw",
 667		[ilog2(VM_MAYEXEC)]	= "me",
 668		[ilog2(VM_MAYSHARE)]	= "ms",
 669		[ilog2(VM_GROWSDOWN)]	= "gd",
 670		[ilog2(VM_PFNMAP)]	= "pf",
 671		[ilog2(VM_LOCKED)]	= "lo",
 672		[ilog2(VM_IO)]		= "io",
 673		[ilog2(VM_SEQ_READ)]	= "sr",
 674		[ilog2(VM_RAND_READ)]	= "rr",
 675		[ilog2(VM_DONTCOPY)]	= "dc",
 676		[ilog2(VM_DONTEXPAND)]	= "de",
 677		[ilog2(VM_LOCKONFAULT)]	= "lf",
 678		[ilog2(VM_ACCOUNT)]	= "ac",
 679		[ilog2(VM_NORESERVE)]	= "nr",
 680		[ilog2(VM_HUGETLB)]	= "ht",
 681		[ilog2(VM_SYNC)]	= "sf",
 682		[ilog2(VM_ARCH_1)]	= "ar",
 683		[ilog2(VM_WIPEONFORK)]	= "wf",
 684		[ilog2(VM_DONTDUMP)]	= "dd",
 685#ifdef CONFIG_ARM64_BTI
 686		[ilog2(VM_ARM64_BTI)]	= "bt",
 687#endif
 688#ifdef CONFIG_MEM_SOFT_DIRTY
 689		[ilog2(VM_SOFTDIRTY)]	= "sd",
 690#endif
 691		[ilog2(VM_MIXEDMAP)]	= "mm",
 692		[ilog2(VM_HUGEPAGE)]	= "hg",
 693		[ilog2(VM_NOHUGEPAGE)]	= "nh",
 694		[ilog2(VM_MERGEABLE)]	= "mg",
 695		[ilog2(VM_UFFD_MISSING)]= "um",
 696		[ilog2(VM_UFFD_WP)]	= "uw",
 697#ifdef CONFIG_ARM64_MTE
 698		[ilog2(VM_MTE)]		= "mt",
 699		[ilog2(VM_MTE_ALLOWED)]	= "",
 700#endif
 701#ifdef CONFIG_ARCH_HAS_PKEYS
 702		/* These come out via ProtectionKey: */
 703		[ilog2(VM_PKEY_BIT0)]	= "",
 704		[ilog2(VM_PKEY_BIT1)]	= "",
 705		[ilog2(VM_PKEY_BIT2)]	= "",
 
 706		[ilog2(VM_PKEY_BIT3)]	= "",
 
 707#if VM_PKEY_BIT4
 708		[ilog2(VM_PKEY_BIT4)]	= "",
 709#endif
 710#endif /* CONFIG_ARCH_HAS_PKEYS */
 711#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR
 712		[ilog2(VM_UFFD_MINOR)]	= "ui",
 713#endif /* CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */
 
 
 
 
 
 
 
 
 
 714	};
 715	size_t i;
 716
 717	seq_puts(m, "VmFlags: ");
 718	for (i = 0; i < BITS_PER_LONG; i++) {
 719		if (!mnemonics[i][0])
 720			continue;
 721		if (vma->vm_flags & (1UL << i)) {
 722			seq_putc(m, mnemonics[i][0]);
 723			seq_putc(m, mnemonics[i][1]);
 724			seq_putc(m, ' ');
 725		}
 726	}
 727	seq_putc(m, '\n');
 728}
 729
 730#ifdef CONFIG_HUGETLB_PAGE
 731static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask,
 732				 unsigned long addr, unsigned long end,
 733				 struct mm_walk *walk)
 734{
 735	struct mem_size_stats *mss = walk->private;
 736	struct vm_area_struct *vma = walk->vma;
 737	struct page *page = NULL;
 
 
 738
 739	if (pte_present(*pte)) {
 740		page = vm_normal_page(vma, addr, *pte);
 741	} else if (is_swap_pte(*pte)) {
 742		swp_entry_t swpent = pte_to_swp_entry(*pte);
 
 743
 744		if (is_pfn_swap_entry(swpent))
 745			page = pfn_swap_entry_to_page(swpent);
 746	}
 747	if (page) {
 748		if (page_mapcount(page) >= 2 || hugetlb_pmd_shared(pte))
 
 
 
 749			mss->shared_hugetlb += huge_page_size(hstate_vma(vma));
 750		else
 751			mss->private_hugetlb += huge_page_size(hstate_vma(vma));
 752	}
 753	return 0;
 754}
 755#else
 756#define smaps_hugetlb_range	NULL
 757#endif /* HUGETLB_PAGE */
 758
 759static const struct mm_walk_ops smaps_walk_ops = {
 760	.pmd_entry		= smaps_pte_range,
 761	.hugetlb_entry		= smaps_hugetlb_range,
 
 762};
 763
 764static const struct mm_walk_ops smaps_shmem_walk_ops = {
 765	.pmd_entry		= smaps_pte_range,
 766	.hugetlb_entry		= smaps_hugetlb_range,
 767	.pte_hole		= smaps_pte_hole,
 
 768};
 769
 770/*
 771 * Gather mem stats from @vma with the indicated beginning
 772 * address @start, and keep them in @mss.
 773 *
 774 * Use vm_start of @vma as the beginning address if @start is 0.
 775 */
 776static void smap_gather_stats(struct vm_area_struct *vma,
 777		struct mem_size_stats *mss, unsigned long start)
 778{
 779	const struct mm_walk_ops *ops = &smaps_walk_ops;
 780
 781	/* Invalid start */
 782	if (start >= vma->vm_end)
 783		return;
 784
 785#ifdef CONFIG_SHMEM
 786	if (vma->vm_file && shmem_mapping(vma->vm_file->f_mapping)) {
 787		/*
 788		 * For shared or readonly shmem mappings we know that all
 789		 * swapped out pages belong to the shmem object, and we can
 790		 * obtain the swap value much more efficiently. For private
 791		 * writable mappings, we might have COW pages that are
 792		 * not affected by the parent swapped out pages of the shmem
 793		 * object, so we have to distinguish them during the page walk.
 794		 * Unless we know that the shmem object (or the part mapped by
 795		 * our VMA) has no swapped out pages at all.
 796		 */
 797		unsigned long shmem_swapped = shmem_swap_usage(vma);
 798
 799		if (!start && (!shmem_swapped || (vma->vm_flags & VM_SHARED) ||
 800					!(vma->vm_flags & VM_WRITE))) {
 801			mss->swap += shmem_swapped;
 802		} else {
 803			ops = &smaps_shmem_walk_ops;
 804		}
 805	}
 806#endif
 807	/* mmap_lock is held in m_start */
 808	if (!start)
 809		walk_page_vma(vma, ops, mss);
 810	else
 811		walk_page_range(vma->vm_mm, start, vma->vm_end, ops, mss);
 812}
 813
 814#define SEQ_PUT_DEC(str, val) \
 815		seq_put_decimal_ull_width(m, str, (val) >> 10, 8)
 816
 817/* Show the contents common for smaps and smaps_rollup */
 818static void __show_smap(struct seq_file *m, const struct mem_size_stats *mss,
 819	bool rollup_mode)
 820{
 821	SEQ_PUT_DEC("Rss:            ", mss->resident);
 822	SEQ_PUT_DEC(" kB\nPss:            ", mss->pss >> PSS_SHIFT);
 823	SEQ_PUT_DEC(" kB\nPss_Dirty:      ", mss->pss_dirty >> PSS_SHIFT);
 824	if (rollup_mode) {
 825		/*
 826		 * These are meaningful only for smaps_rollup, otherwise two of
 827		 * them are zero, and the other one is the same as Pss.
 828		 */
 829		SEQ_PUT_DEC(" kB\nPss_Anon:       ",
 830			mss->pss_anon >> PSS_SHIFT);
 831		SEQ_PUT_DEC(" kB\nPss_File:       ",
 832			mss->pss_file >> PSS_SHIFT);
 833		SEQ_PUT_DEC(" kB\nPss_Shmem:      ",
 834			mss->pss_shmem >> PSS_SHIFT);
 835	}
 836	SEQ_PUT_DEC(" kB\nShared_Clean:   ", mss->shared_clean);
 837	SEQ_PUT_DEC(" kB\nShared_Dirty:   ", mss->shared_dirty);
 838	SEQ_PUT_DEC(" kB\nPrivate_Clean:  ", mss->private_clean);
 839	SEQ_PUT_DEC(" kB\nPrivate_Dirty:  ", mss->private_dirty);
 840	SEQ_PUT_DEC(" kB\nReferenced:     ", mss->referenced);
 841	SEQ_PUT_DEC(" kB\nAnonymous:      ", mss->anonymous);
 
 842	SEQ_PUT_DEC(" kB\nLazyFree:       ", mss->lazyfree);
 843	SEQ_PUT_DEC(" kB\nAnonHugePages:  ", mss->anonymous_thp);
 844	SEQ_PUT_DEC(" kB\nShmemPmdMapped: ", mss->shmem_thp);
 845	SEQ_PUT_DEC(" kB\nFilePmdMapped:  ", mss->file_thp);
 846	SEQ_PUT_DEC(" kB\nShared_Hugetlb: ", mss->shared_hugetlb);
 847	seq_put_decimal_ull_width(m, " kB\nPrivate_Hugetlb: ",
 848				  mss->private_hugetlb >> 10, 7);
 849	SEQ_PUT_DEC(" kB\nSwap:           ", mss->swap);
 850	SEQ_PUT_DEC(" kB\nSwapPss:        ",
 851					mss->swap_pss >> PSS_SHIFT);
 852	SEQ_PUT_DEC(" kB\nLocked:         ",
 853					mss->pss_locked >> PSS_SHIFT);
 854	seq_puts(m, " kB\n");
 855}
 856
 857static int show_smap(struct seq_file *m, void *v)
 858{
 859	struct vm_area_struct *vma = v;
 860	struct mem_size_stats mss;
 861
 862	memset(&mss, 0, sizeof(mss));
 863
 864	smap_gather_stats(vma, &mss, 0);
 865
 866	show_map_vma(m, vma);
 867
 868	SEQ_PUT_DEC("Size:           ", vma->vm_end - vma->vm_start);
 869	SEQ_PUT_DEC(" kB\nKernelPageSize: ", vma_kernel_pagesize(vma));
 870	SEQ_PUT_DEC(" kB\nMMUPageSize:    ", vma_mmu_pagesize(vma));
 871	seq_puts(m, " kB\n");
 872
 873	__show_smap(m, &mss, false);
 874
 875	seq_printf(m, "THPeligible:    %d\n",
 876		   hugepage_vma_check(vma, vma->vm_flags, true, false, true));
 
 877
 878	if (arch_pkeys_enabled())
 879		seq_printf(m, "ProtectionKey:  %8u\n", vma_pkey(vma));
 880	show_smap_vma_flags(m, vma);
 881
 882	return 0;
 883}
 884
 885static int show_smaps_rollup(struct seq_file *m, void *v)
 886{
 887	struct proc_maps_private *priv = m->private;
 888	struct mem_size_stats mss;
 889	struct mm_struct *mm = priv->mm;
 890	struct vm_area_struct *vma;
 891	unsigned long vma_start = 0, last_vma_end = 0;
 892	int ret = 0;
 893	MA_STATE(mas, &mm->mm_mt, 0, 0);
 894
 895	priv->task = get_proc_task(priv->inode);
 896	if (!priv->task)
 897		return -ESRCH;
 898
 899	if (!mm || !mmget_not_zero(mm)) {
 900		ret = -ESRCH;
 901		goto out_put_task;
 902	}
 903
 904	memset(&mss, 0, sizeof(mss));
 905
 906	ret = mmap_read_lock_killable(mm);
 907	if (ret)
 908		goto out_put_mm;
 909
 910	hold_task_mempolicy(priv);
 911	vma = mas_find(&mas, ULONG_MAX);
 912
 913	if (unlikely(!vma))
 914		goto empty_set;
 915
 916	vma_start = vma->vm_start;
 917	do {
 918		smap_gather_stats(vma, &mss, 0);
 919		last_vma_end = vma->vm_end;
 920
 921		/*
 922		 * Release mmap_lock temporarily if someone wants to
 923		 * access it for write request.
 924		 */
 925		if (mmap_lock_is_contended(mm)) {
 926			mas_pause(&mas);
 927			mmap_read_unlock(mm);
 928			ret = mmap_read_lock_killable(mm);
 929			if (ret) {
 930				release_task_mempolicy(priv);
 931				goto out_put_mm;
 932			}
 933
 934			/*
 935			 * After dropping the lock, there are four cases to
 936			 * consider. See the following example for explanation.
 937			 *
 938			 *   +------+------+-----------+
 939			 *   | VMA1 | VMA2 | VMA3      |
 940			 *   +------+------+-----------+
 941			 *   |      |      |           |
 942			 *  4k     8k     16k         400k
 943			 *
 944			 * Suppose we drop the lock after reading VMA2 due to
 945			 * contention, then we get:
 946			 *
 947			 *	last_vma_end = 16k
 948			 *
 949			 * 1) VMA2 is freed, but VMA3 exists:
 950			 *
 951			 *    find_vma(mm, 16k - 1) will return VMA3.
 952			 *    In this case, just continue from VMA3.
 953			 *
 954			 * 2) VMA2 still exists:
 955			 *
 956			 *    find_vma(mm, 16k - 1) will return VMA2.
 957			 *    Iterate the loop like the original one.
 958			 *
 959			 * 3) No more VMAs can be found:
 960			 *
 961			 *    find_vma(mm, 16k - 1) will return NULL.
 962			 *    No more things to do, just break.
 963			 *
 964			 * 4) (last_vma_end - 1) is the middle of a vma (VMA'):
 965			 *
 966			 *    find_vma(mm, 16k - 1) will return VMA' whose range
 967			 *    contains last_vma_end.
 968			 *    Iterate VMA' from last_vma_end.
 969			 */
 970			vma = mas_find(&mas, ULONG_MAX);
 971			/* Case 3 above */
 972			if (!vma)
 973				break;
 974
 975			/* Case 1 above */
 976			if (vma->vm_start >= last_vma_end)
 
 
 977				continue;
 
 978
 979			/* Case 4 above */
 980			if (vma->vm_end > last_vma_end)
 981				smap_gather_stats(vma, &mss, last_vma_end);
 
 
 982		}
 983		/* Case 2 above */
 984	} while ((vma = mas_find(&mas, ULONG_MAX)) != NULL);
 985
 986empty_set:
 987	show_vma_header_prefix(m, vma_start, last_vma_end, 0, 0, 0, 0);
 988	seq_pad(m, ' ');
 989	seq_puts(m, "[rollup]\n");
 990
 991	__show_smap(m, &mss, true);
 992
 993	release_task_mempolicy(priv);
 994	mmap_read_unlock(mm);
 995
 996out_put_mm:
 997	mmput(mm);
 998out_put_task:
 999	put_task_struct(priv->task);
1000	priv->task = NULL;
1001
1002	return ret;
1003}
1004#undef SEQ_PUT_DEC
1005
1006static const struct seq_operations proc_pid_smaps_op = {
1007	.start	= m_start,
1008	.next	= m_next,
1009	.stop	= m_stop,
1010	.show	= show_smap
1011};
1012
1013static int pid_smaps_open(struct inode *inode, struct file *file)
1014{
1015	return do_maps_open(inode, file, &proc_pid_smaps_op);
1016}
1017
1018static int smaps_rollup_open(struct inode *inode, struct file *file)
1019{
1020	int ret;
1021	struct proc_maps_private *priv;
1022
1023	priv = kzalloc(sizeof(*priv), GFP_KERNEL_ACCOUNT);
1024	if (!priv)
1025		return -ENOMEM;
1026
1027	ret = single_open(file, show_smaps_rollup, priv);
1028	if (ret)
1029		goto out_free;
1030
1031	priv->inode = inode;
1032	priv->mm = proc_mem_open(inode, PTRACE_MODE_READ);
1033	if (IS_ERR(priv->mm)) {
1034		ret = PTR_ERR(priv->mm);
1035
1036		single_release(inode, file);
1037		goto out_free;
1038	}
1039
1040	return 0;
1041
1042out_free:
1043	kfree(priv);
1044	return ret;
1045}
1046
1047static int smaps_rollup_release(struct inode *inode, struct file *file)
1048{
1049	struct seq_file *seq = file->private_data;
1050	struct proc_maps_private *priv = seq->private;
1051
1052	if (priv->mm)
1053		mmdrop(priv->mm);
1054
1055	kfree(priv);
1056	return single_release(inode, file);
1057}
1058
1059const struct file_operations proc_pid_smaps_operations = {
1060	.open		= pid_smaps_open,
1061	.read		= seq_read,
1062	.llseek		= seq_lseek,
1063	.release	= proc_map_release,
1064};
1065
1066const struct file_operations proc_pid_smaps_rollup_operations = {
1067	.open		= smaps_rollup_open,
1068	.read		= seq_read,
1069	.llseek		= seq_lseek,
1070	.release	= smaps_rollup_release,
1071};
1072
1073enum clear_refs_types {
1074	CLEAR_REFS_ALL = 1,
1075	CLEAR_REFS_ANON,
1076	CLEAR_REFS_MAPPED,
1077	CLEAR_REFS_SOFT_DIRTY,
1078	CLEAR_REFS_MM_HIWATER_RSS,
1079	CLEAR_REFS_LAST,
1080};
1081
1082struct clear_refs_private {
1083	enum clear_refs_types type;
1084};
1085
1086#ifdef CONFIG_MEM_SOFT_DIRTY
1087
1088static inline bool pte_is_pinned(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
1089{
1090	struct page *page;
1091
1092	if (!pte_write(pte))
1093		return false;
1094	if (!is_cow_mapping(vma->vm_flags))
1095		return false;
1096	if (likely(!test_bit(MMF_HAS_PINNED, &vma->vm_mm->flags)))
1097		return false;
1098	page = vm_normal_page(vma, addr, pte);
1099	if (!page)
1100		return false;
1101	return page_maybe_dma_pinned(page);
1102}
1103
1104static inline void clear_soft_dirty(struct vm_area_struct *vma,
1105		unsigned long addr, pte_t *pte)
1106{
1107	/*
1108	 * The soft-dirty tracker uses #PF-s to catch writes
1109	 * to pages, so write-protect the pte as well. See the
1110	 * Documentation/admin-guide/mm/soft-dirty.rst for full description
1111	 * of how soft-dirty works.
1112	 */
1113	pte_t ptent = *pte;
1114
1115	if (pte_present(ptent)) {
1116		pte_t old_pte;
1117
1118		if (pte_is_pinned(vma, addr, ptent))
1119			return;
1120		old_pte = ptep_modify_prot_start(vma, addr, pte);
1121		ptent = pte_wrprotect(old_pte);
1122		ptent = pte_clear_soft_dirty(ptent);
1123		ptep_modify_prot_commit(vma, addr, pte, old_pte, ptent);
1124	} else if (is_swap_pte(ptent)) {
1125		ptent = pte_swp_clear_soft_dirty(ptent);
1126		set_pte_at(vma->vm_mm, addr, pte, ptent);
1127	}
1128}
1129#else
1130static inline void clear_soft_dirty(struct vm_area_struct *vma,
1131		unsigned long addr, pte_t *pte)
1132{
1133}
1134#endif
1135
1136#if defined(CONFIG_MEM_SOFT_DIRTY) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
1137static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
1138		unsigned long addr, pmd_t *pmdp)
1139{
1140	pmd_t old, pmd = *pmdp;
1141
1142	if (pmd_present(pmd)) {
1143		/* See comment in change_huge_pmd() */
1144		old = pmdp_invalidate(vma, addr, pmdp);
1145		if (pmd_dirty(old))
1146			pmd = pmd_mkdirty(pmd);
1147		if (pmd_young(old))
1148			pmd = pmd_mkyoung(pmd);
1149
1150		pmd = pmd_wrprotect(pmd);
1151		pmd = pmd_clear_soft_dirty(pmd);
1152
1153		set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
1154	} else if (is_migration_entry(pmd_to_swp_entry(pmd))) {
1155		pmd = pmd_swp_clear_soft_dirty(pmd);
1156		set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
1157	}
1158}
1159#else
1160static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
1161		unsigned long addr, pmd_t *pmdp)
1162{
1163}
1164#endif
1165
1166static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
1167				unsigned long end, struct mm_walk *walk)
1168{
1169	struct clear_refs_private *cp = walk->private;
1170	struct vm_area_struct *vma = walk->vma;
1171	pte_t *pte, ptent;
1172	spinlock_t *ptl;
1173	struct page *page;
1174
1175	ptl = pmd_trans_huge_lock(pmd, vma);
1176	if (ptl) {
1177		if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
1178			clear_soft_dirty_pmd(vma, addr, pmd);
1179			goto out;
1180		}
1181
1182		if (!pmd_present(*pmd))
1183			goto out;
1184
1185		page = pmd_page(*pmd);
1186
1187		/* Clear accessed and referenced bits. */
1188		pmdp_test_and_clear_young(vma, addr, pmd);
1189		test_and_clear_page_young(page);
1190		ClearPageReferenced(page);
1191out:
1192		spin_unlock(ptl);
1193		return 0;
1194	}
1195
1196	if (pmd_trans_unstable(pmd))
1197		return 0;
1198
1199	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
 
 
 
 
1200	for (; addr != end; pte++, addr += PAGE_SIZE) {
1201		ptent = *pte;
1202
1203		if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
1204			clear_soft_dirty(vma, addr, pte);
1205			continue;
1206		}
1207
1208		if (!pte_present(ptent))
1209			continue;
1210
1211		page = vm_normal_page(vma, addr, ptent);
1212		if (!page)
1213			continue;
1214
1215		/* Clear accessed and referenced bits. */
1216		ptep_test_and_clear_young(vma, addr, pte);
1217		test_and_clear_page_young(page);
1218		ClearPageReferenced(page);
1219	}
1220	pte_unmap_unlock(pte - 1, ptl);
1221	cond_resched();
1222	return 0;
1223}
1224
1225static int clear_refs_test_walk(unsigned long start, unsigned long end,
1226				struct mm_walk *walk)
1227{
1228	struct clear_refs_private *cp = walk->private;
1229	struct vm_area_struct *vma = walk->vma;
1230
1231	if (vma->vm_flags & VM_PFNMAP)
1232		return 1;
1233
1234	/*
1235	 * Writing 1 to /proc/pid/clear_refs affects all pages.
1236	 * Writing 2 to /proc/pid/clear_refs only affects anonymous pages.
1237	 * Writing 3 to /proc/pid/clear_refs only affects file mapped pages.
1238	 * Writing 4 to /proc/pid/clear_refs affects all pages.
1239	 */
1240	if (cp->type == CLEAR_REFS_ANON && vma->vm_file)
1241		return 1;
1242	if (cp->type == CLEAR_REFS_MAPPED && !vma->vm_file)
1243		return 1;
1244	return 0;
1245}
1246
1247static const struct mm_walk_ops clear_refs_walk_ops = {
1248	.pmd_entry		= clear_refs_pte_range,
1249	.test_walk		= clear_refs_test_walk,
 
1250};
1251
1252static ssize_t clear_refs_write(struct file *file, const char __user *buf,
1253				size_t count, loff_t *ppos)
1254{
1255	struct task_struct *task;
1256	char buffer[PROC_NUMBUF];
1257	struct mm_struct *mm;
1258	struct vm_area_struct *vma;
1259	enum clear_refs_types type;
1260	int itype;
1261	int rv;
1262
1263	memset(buffer, 0, sizeof(buffer));
1264	if (count > sizeof(buffer) - 1)
1265		count = sizeof(buffer) - 1;
1266	if (copy_from_user(buffer, buf, count))
1267		return -EFAULT;
1268	rv = kstrtoint(strstrip(buffer), 10, &itype);
1269	if (rv < 0)
1270		return rv;
1271	type = (enum clear_refs_types)itype;
1272	if (type < CLEAR_REFS_ALL || type >= CLEAR_REFS_LAST)
1273		return -EINVAL;
1274
1275	task = get_proc_task(file_inode(file));
1276	if (!task)
1277		return -ESRCH;
1278	mm = get_task_mm(task);
1279	if (mm) {
1280		MA_STATE(mas, &mm->mm_mt, 0, 0);
1281		struct mmu_notifier_range range;
1282		struct clear_refs_private cp = {
1283			.type = type,
1284		};
1285
1286		if (mmap_write_lock_killable(mm)) {
1287			count = -EINTR;
1288			goto out_mm;
1289		}
1290		if (type == CLEAR_REFS_MM_HIWATER_RSS) {
1291			/*
1292			 * Writing 5 to /proc/pid/clear_refs resets the peak
1293			 * resident set size to this mm's current rss value.
1294			 */
1295			reset_mm_hiwater_rss(mm);
1296			goto out_unlock;
1297		}
1298
1299		if (type == CLEAR_REFS_SOFT_DIRTY) {
1300			mas_for_each(&mas, vma, ULONG_MAX) {
1301				if (!(vma->vm_flags & VM_SOFTDIRTY))
1302					continue;
1303				vma->vm_flags &= ~VM_SOFTDIRTY;
1304				vma_set_page_prot(vma);
1305			}
1306
1307			inc_tlb_flush_pending(mm);
1308			mmu_notifier_range_init(&range, MMU_NOTIFY_SOFT_DIRTY,
1309						0, NULL, mm, 0, -1UL);
1310			mmu_notifier_invalidate_range_start(&range);
1311		}
1312		walk_page_range(mm, 0, -1, &clear_refs_walk_ops, &cp);
1313		if (type == CLEAR_REFS_SOFT_DIRTY) {
1314			mmu_notifier_invalidate_range_end(&range);
1315			flush_tlb_mm(mm);
1316			dec_tlb_flush_pending(mm);
1317		}
1318out_unlock:
1319		mmap_write_unlock(mm);
1320out_mm:
1321		mmput(mm);
1322	}
1323	put_task_struct(task);
1324
1325	return count;
1326}
1327
1328const struct file_operations proc_clear_refs_operations = {
1329	.write		= clear_refs_write,
1330	.llseek		= noop_llseek,
1331};
1332
1333typedef struct {
1334	u64 pme;
1335} pagemap_entry_t;
1336
1337struct pagemapread {
1338	int pos, len;		/* units: PM_ENTRY_BYTES, not bytes */
1339	pagemap_entry_t *buffer;
1340	bool show_pfn;
1341};
1342
1343#define PAGEMAP_WALK_SIZE	(PMD_SIZE)
1344#define PAGEMAP_WALK_MASK	(PMD_MASK)
1345
1346#define PM_ENTRY_BYTES		sizeof(pagemap_entry_t)
1347#define PM_PFRAME_BITS		55
1348#define PM_PFRAME_MASK		GENMASK_ULL(PM_PFRAME_BITS - 1, 0)
1349#define PM_SOFT_DIRTY		BIT_ULL(55)
1350#define PM_MMAP_EXCLUSIVE	BIT_ULL(56)
1351#define PM_UFFD_WP		BIT_ULL(57)
1352#define PM_FILE			BIT_ULL(61)
1353#define PM_SWAP			BIT_ULL(62)
1354#define PM_PRESENT		BIT_ULL(63)
1355
1356#define PM_END_OF_BUFFER    1
1357
1358static inline pagemap_entry_t make_pme(u64 frame, u64 flags)
1359{
1360	return (pagemap_entry_t) { .pme = (frame & PM_PFRAME_MASK) | flags };
1361}
1362
1363static int add_to_pagemap(unsigned long addr, pagemap_entry_t *pme,
1364			  struct pagemapread *pm)
1365{
1366	pm->buffer[pm->pos++] = *pme;
1367	if (pm->pos >= pm->len)
1368		return PM_END_OF_BUFFER;
1369	return 0;
1370}
1371
1372static int pagemap_pte_hole(unsigned long start, unsigned long end,
1373			    __always_unused int depth, struct mm_walk *walk)
1374{
1375	struct pagemapread *pm = walk->private;
1376	unsigned long addr = start;
1377	int err = 0;
1378
1379	while (addr < end) {
1380		struct vm_area_struct *vma = find_vma(walk->mm, addr);
1381		pagemap_entry_t pme = make_pme(0, 0);
1382		/* End of address space hole, which we mark as non-present. */
1383		unsigned long hole_end;
1384
1385		if (vma)
1386			hole_end = min(end, vma->vm_start);
1387		else
1388			hole_end = end;
1389
1390		for (; addr < hole_end; addr += PAGE_SIZE) {
1391			err = add_to_pagemap(addr, &pme, pm);
1392			if (err)
1393				goto out;
1394		}
1395
1396		if (!vma)
1397			break;
1398
1399		/* Addresses in the VMA. */
1400		if (vma->vm_flags & VM_SOFTDIRTY)
1401			pme = make_pme(0, PM_SOFT_DIRTY);
1402		for (; addr < min(end, vma->vm_end); addr += PAGE_SIZE) {
1403			err = add_to_pagemap(addr, &pme, pm);
1404			if (err)
1405				goto out;
1406		}
1407	}
1408out:
1409	return err;
1410}
1411
1412static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm,
1413		struct vm_area_struct *vma, unsigned long addr, pte_t pte)
1414{
1415	u64 frame = 0, flags = 0;
1416	struct page *page = NULL;
1417	bool migration = false;
1418
1419	if (pte_present(pte)) {
1420		if (pm->show_pfn)
1421			frame = pte_pfn(pte);
1422		flags |= PM_PRESENT;
1423		page = vm_normal_page(vma, addr, pte);
1424		if (pte_soft_dirty(pte))
1425			flags |= PM_SOFT_DIRTY;
1426		if (pte_uffd_wp(pte))
1427			flags |= PM_UFFD_WP;
1428	} else if (is_swap_pte(pte)) {
1429		swp_entry_t entry;
1430		if (pte_swp_soft_dirty(pte))
1431			flags |= PM_SOFT_DIRTY;
1432		if (pte_swp_uffd_wp(pte))
1433			flags |= PM_UFFD_WP;
1434		entry = pte_to_swp_entry(pte);
1435		if (pm->show_pfn) {
1436			pgoff_t offset;
1437			/*
1438			 * For PFN swap offsets, keeping the offset field
1439			 * to be PFN only to be compatible with old smaps.
1440			 */
1441			if (is_pfn_swap_entry(entry))
1442				offset = swp_offset_pfn(entry);
1443			else
1444				offset = swp_offset(entry);
1445			frame = swp_type(entry) |
1446			    (offset << MAX_SWAPFILES_SHIFT);
1447		}
1448		flags |= PM_SWAP;
1449		migration = is_migration_entry(entry);
1450		if (is_pfn_swap_entry(entry))
1451			page = pfn_swap_entry_to_page(entry);
1452		if (pte_marker_entry_uffd_wp(entry))
1453			flags |= PM_UFFD_WP;
1454	}
1455
1456	if (page && !PageAnon(page))
1457		flags |= PM_FILE;
1458	if (page && !migration && page_mapcount(page) == 1)
1459		flags |= PM_MMAP_EXCLUSIVE;
 
 
 
 
1460	if (vma->vm_flags & VM_SOFTDIRTY)
1461		flags |= PM_SOFT_DIRTY;
1462
1463	return make_pme(frame, flags);
1464}
1465
1466static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
1467			     struct mm_walk *walk)
1468{
1469	struct vm_area_struct *vma = walk->vma;
1470	struct pagemapread *pm = walk->private;
1471	spinlock_t *ptl;
1472	pte_t *pte, *orig_pte;
1473	int err = 0;
1474#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1475	bool migration = false;
1476
1477	ptl = pmd_trans_huge_lock(pmdp, vma);
1478	if (ptl) {
 
1479		u64 flags = 0, frame = 0;
1480		pmd_t pmd = *pmdp;
1481		struct page *page = NULL;
 
1482
1483		if (vma->vm_flags & VM_SOFTDIRTY)
1484			flags |= PM_SOFT_DIRTY;
1485
1486		if (pmd_present(pmd)) {
1487			page = pmd_page(pmd);
1488
1489			flags |= PM_PRESENT;
1490			if (pmd_soft_dirty(pmd))
1491				flags |= PM_SOFT_DIRTY;
1492			if (pmd_uffd_wp(pmd))
1493				flags |= PM_UFFD_WP;
1494			if (pm->show_pfn)
1495				frame = pmd_pfn(pmd) +
1496					((addr & ~PMD_MASK) >> PAGE_SHIFT);
1497		}
1498#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1499		else if (is_swap_pmd(pmd)) {
1500			swp_entry_t entry = pmd_to_swp_entry(pmd);
1501			unsigned long offset;
1502
1503			if (pm->show_pfn) {
1504				if (is_pfn_swap_entry(entry))
1505					offset = swp_offset_pfn(entry);
1506				else
1507					offset = swp_offset(entry);
1508				offset = offset +
1509					((addr & ~PMD_MASK) >> PAGE_SHIFT);
1510				frame = swp_type(entry) |
1511					(offset << MAX_SWAPFILES_SHIFT);
1512			}
1513			flags |= PM_SWAP;
1514			if (pmd_swp_soft_dirty(pmd))
1515				flags |= PM_SOFT_DIRTY;
1516			if (pmd_swp_uffd_wp(pmd))
1517				flags |= PM_UFFD_WP;
1518			VM_BUG_ON(!is_pmd_migration_entry(pmd));
1519			migration = is_migration_entry(entry);
1520			page = pfn_swap_entry_to_page(entry);
1521		}
1522#endif
1523
1524		if (page && !migration && page_mapcount(page) == 1)
1525			flags |= PM_MMAP_EXCLUSIVE;
1526
1527		for (; addr != end; addr += PAGE_SIZE) {
1528			pagemap_entry_t pme = make_pme(frame, flags);
 
 
 
 
 
 
 
 
1529
1530			err = add_to_pagemap(addr, &pme, pm);
 
1531			if (err)
1532				break;
1533			if (pm->show_pfn) {
1534				if (flags & PM_PRESENT)
1535					frame++;
1536				else if (flags & PM_SWAP)
1537					frame += (1 << MAX_SWAPFILES_SHIFT);
1538			}
1539		}
1540		spin_unlock(ptl);
1541		return err;
1542	}
1543
1544	if (pmd_trans_unstable(pmdp))
1545		return 0;
1546#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1547
1548	/*
1549	 * We can assume that @vma always points to a valid one and @end never
1550	 * goes beyond vma->vm_end.
1551	 */
1552	orig_pte = pte = pte_offset_map_lock(walk->mm, pmdp, addr, &ptl);
 
 
 
 
1553	for (; addr < end; pte++, addr += PAGE_SIZE) {
1554		pagemap_entry_t pme;
1555
1556		pme = pte_to_pagemap_entry(pm, vma, addr, *pte);
1557		err = add_to_pagemap(addr, &pme, pm);
1558		if (err)
1559			break;
1560	}
1561	pte_unmap_unlock(orig_pte, ptl);
1562
1563	cond_resched();
1564
1565	return err;
1566}
1567
1568#ifdef CONFIG_HUGETLB_PAGE
1569/* This function walks within one hugetlb entry in the single call */
1570static int pagemap_hugetlb_range(pte_t *ptep, unsigned long hmask,
1571				 unsigned long addr, unsigned long end,
1572				 struct mm_walk *walk)
1573{
1574	struct pagemapread *pm = walk->private;
1575	struct vm_area_struct *vma = walk->vma;
1576	u64 flags = 0, frame = 0;
1577	int err = 0;
1578	pte_t pte;
1579
1580	if (vma->vm_flags & VM_SOFTDIRTY)
1581		flags |= PM_SOFT_DIRTY;
1582
1583	pte = huge_ptep_get(ptep);
1584	if (pte_present(pte)) {
1585		struct page *page = pte_page(pte);
1586
1587		if (!PageAnon(page))
1588			flags |= PM_FILE;
1589
1590		if (page_mapcount(page) == 1)
 
1591			flags |= PM_MMAP_EXCLUSIVE;
1592
1593		if (huge_pte_uffd_wp(pte))
1594			flags |= PM_UFFD_WP;
1595
1596		flags |= PM_PRESENT;
1597		if (pm->show_pfn)
1598			frame = pte_pfn(pte) +
1599				((addr & ~hmask) >> PAGE_SHIFT);
1600	} else if (pte_swp_uffd_wp_any(pte)) {
1601		flags |= PM_UFFD_WP;
1602	}
1603
1604	for (; addr != end; addr += PAGE_SIZE) {
1605		pagemap_entry_t pme = make_pme(frame, flags);
1606
1607		err = add_to_pagemap(addr, &pme, pm);
1608		if (err)
1609			return err;
1610		if (pm->show_pfn && (flags & PM_PRESENT))
1611			frame++;
1612	}
1613
1614	cond_resched();
1615
1616	return err;
1617}
1618#else
1619#define pagemap_hugetlb_range	NULL
1620#endif /* HUGETLB_PAGE */
1621
1622static const struct mm_walk_ops pagemap_ops = {
1623	.pmd_entry	= pagemap_pmd_range,
1624	.pte_hole	= pagemap_pte_hole,
1625	.hugetlb_entry	= pagemap_hugetlb_range,
 
1626};
1627
1628/*
1629 * /proc/pid/pagemap - an array mapping virtual pages to pfns
1630 *
1631 * For each page in the address space, this file contains one 64-bit entry
1632 * consisting of the following:
1633 *
1634 * Bits 0-54  page frame number (PFN) if present
1635 * Bits 0-4   swap type if swapped
1636 * Bits 5-54  swap offset if swapped
1637 * Bit  55    pte is soft-dirty (see Documentation/admin-guide/mm/soft-dirty.rst)
1638 * Bit  56    page exclusively mapped
1639 * Bit  57    pte is uffd-wp write-protected
1640 * Bits 58-60 zero
1641 * Bit  61    page is file-page or shared-anon
1642 * Bit  62    page swapped
1643 * Bit  63    page present
1644 *
1645 * If the page is not present but in swap, then the PFN contains an
1646 * encoding of the swap file number and the page's offset into the
1647 * swap. Unmapped pages return a null PFN. This allows determining
1648 * precisely which pages are mapped (or in swap) and comparing mapped
1649 * pages between processes.
1650 *
1651 * Efficient users of this interface will use /proc/pid/maps to
1652 * determine which areas of memory are actually mapped and llseek to
1653 * skip over unmapped regions.
1654 */
1655static ssize_t pagemap_read(struct file *file, char __user *buf,
1656			    size_t count, loff_t *ppos)
1657{
1658	struct mm_struct *mm = file->private_data;
1659	struct pagemapread pm;
1660	unsigned long src;
1661	unsigned long svpfn;
1662	unsigned long start_vaddr;
1663	unsigned long end_vaddr;
1664	int ret = 0, copied = 0;
1665
1666	if (!mm || !mmget_not_zero(mm))
1667		goto out;
1668
1669	ret = -EINVAL;
1670	/* file position must be aligned */
1671	if ((*ppos % PM_ENTRY_BYTES) || (count % PM_ENTRY_BYTES))
1672		goto out_mm;
1673
1674	ret = 0;
1675	if (!count)
1676		goto out_mm;
1677
1678	/* do not disclose physical addresses: attack vector */
1679	pm.show_pfn = file_ns_capable(file, &init_user_ns, CAP_SYS_ADMIN);
1680
1681	pm.len = (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
1682	pm.buffer = kmalloc_array(pm.len, PM_ENTRY_BYTES, GFP_KERNEL);
1683	ret = -ENOMEM;
1684	if (!pm.buffer)
1685		goto out_mm;
1686
1687	src = *ppos;
1688	svpfn = src / PM_ENTRY_BYTES;
1689	end_vaddr = mm->task_size;
1690
1691	/* watch out for wraparound */
1692	start_vaddr = end_vaddr;
1693	if (svpfn <= (ULONG_MAX >> PAGE_SHIFT))
1694		start_vaddr = untagged_addr(svpfn << PAGE_SHIFT);
 
 
 
 
 
 
 
 
 
 
 
1695
1696	/* Ensure the address is inside the task */
1697	if (start_vaddr > mm->task_size)
1698		start_vaddr = end_vaddr;
1699
1700	/*
1701	 * The odds are that this will stop walking way
1702	 * before end_vaddr, because the length of the
1703	 * user buffer is tracked in "pm", and the walk
1704	 * will stop when we hit the end of the buffer.
1705	 */
1706	ret = 0;
1707	while (count && (start_vaddr < end_vaddr)) {
1708		int len;
1709		unsigned long end;
1710
1711		pm.pos = 0;
1712		end = (start_vaddr + PAGEMAP_WALK_SIZE) & PAGEMAP_WALK_MASK;
1713		/* overflow ? */
1714		if (end < start_vaddr || end > end_vaddr)
1715			end = end_vaddr;
1716		ret = mmap_read_lock_killable(mm);
1717		if (ret)
1718			goto out_free;
1719		ret = walk_page_range(mm, start_vaddr, end, &pagemap_ops, &pm);
1720		mmap_read_unlock(mm);
1721		start_vaddr = end;
1722
1723		len = min(count, PM_ENTRY_BYTES * pm.pos);
1724		if (copy_to_user(buf, pm.buffer, len)) {
1725			ret = -EFAULT;
1726			goto out_free;
1727		}
1728		copied += len;
1729		buf += len;
1730		count -= len;
1731	}
1732	*ppos += copied;
1733	if (!ret || ret == PM_END_OF_BUFFER)
1734		ret = copied;
1735
1736out_free:
1737	kfree(pm.buffer);
1738out_mm:
1739	mmput(mm);
1740out:
1741	return ret;
1742}
1743
1744static int pagemap_open(struct inode *inode, struct file *file)
1745{
1746	struct mm_struct *mm;
1747
1748	mm = proc_mem_open(inode, PTRACE_MODE_READ);
1749	if (IS_ERR(mm))
1750		return PTR_ERR(mm);
1751	file->private_data = mm;
1752	return 0;
1753}
1754
1755static int pagemap_release(struct inode *inode, struct file *file)
1756{
1757	struct mm_struct *mm = file->private_data;
1758
1759	if (mm)
1760		mmdrop(mm);
1761	return 0;
1762}
1763
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1764const struct file_operations proc_pagemap_operations = {
1765	.llseek		= mem_lseek, /* borrow this */
1766	.read		= pagemap_read,
1767	.open		= pagemap_open,
1768	.release	= pagemap_release,
 
 
1769};
1770#endif /* CONFIG_PROC_PAGE_MONITOR */
1771
1772#ifdef CONFIG_NUMA
1773
1774struct numa_maps {
1775	unsigned long pages;
1776	unsigned long anon;
1777	unsigned long active;
1778	unsigned long writeback;
1779	unsigned long mapcount_max;
1780	unsigned long dirty;
1781	unsigned long swapcache;
1782	unsigned long node[MAX_NUMNODES];
1783};
1784
1785struct numa_maps_private {
1786	struct proc_maps_private proc_maps;
1787	struct numa_maps md;
1788};
1789
1790static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty,
1791			unsigned long nr_pages)
1792{
1793	int count = page_mapcount(page);
 
1794
1795	md->pages += nr_pages;
1796	if (pte_dirty || PageDirty(page))
1797		md->dirty += nr_pages;
1798
1799	if (PageSwapCache(page))
1800		md->swapcache += nr_pages;
1801
1802	if (PageActive(page) || PageUnevictable(page))
1803		md->active += nr_pages;
1804
1805	if (PageWriteback(page))
1806		md->writeback += nr_pages;
1807
1808	if (PageAnon(page))
1809		md->anon += nr_pages;
1810
1811	if (count > md->mapcount_max)
1812		md->mapcount_max = count;
1813
1814	md->node[page_to_nid(page)] += nr_pages;
1815}
1816
1817static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma,
1818		unsigned long addr)
1819{
1820	struct page *page;
1821	int nid;
1822
1823	if (!pte_present(pte))
1824		return NULL;
1825
1826	page = vm_normal_page(vma, addr, pte);
1827	if (!page || is_zone_device_page(page))
1828		return NULL;
1829
1830	if (PageReserved(page))
1831		return NULL;
1832
1833	nid = page_to_nid(page);
1834	if (!node_isset(nid, node_states[N_MEMORY]))
1835		return NULL;
1836
1837	return page;
1838}
1839
1840#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1841static struct page *can_gather_numa_stats_pmd(pmd_t pmd,
1842					      struct vm_area_struct *vma,
1843					      unsigned long addr)
1844{
1845	struct page *page;
1846	int nid;
1847
1848	if (!pmd_present(pmd))
1849		return NULL;
1850
1851	page = vm_normal_page_pmd(vma, addr, pmd);
1852	if (!page)
1853		return NULL;
1854
1855	if (PageReserved(page))
1856		return NULL;
1857
1858	nid = page_to_nid(page);
1859	if (!node_isset(nid, node_states[N_MEMORY]))
1860		return NULL;
1861
1862	return page;
1863}
1864#endif
1865
1866static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
1867		unsigned long end, struct mm_walk *walk)
1868{
1869	struct numa_maps *md = walk->private;
1870	struct vm_area_struct *vma = walk->vma;
1871	spinlock_t *ptl;
1872	pte_t *orig_pte;
1873	pte_t *pte;
1874
1875#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1876	ptl = pmd_trans_huge_lock(pmd, vma);
1877	if (ptl) {
1878		struct page *page;
1879
1880		page = can_gather_numa_stats_pmd(*pmd, vma, addr);
1881		if (page)
1882			gather_stats(page, md, pmd_dirty(*pmd),
1883				     HPAGE_PMD_SIZE/PAGE_SIZE);
1884		spin_unlock(ptl);
1885		return 0;
1886	}
1887
1888	if (pmd_trans_unstable(pmd))
1889		return 0;
1890#endif
1891	orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
 
 
 
 
1892	do {
1893		struct page *page = can_gather_numa_stats(*pte, vma, addr);
 
1894		if (!page)
1895			continue;
1896		gather_stats(page, md, pte_dirty(*pte), 1);
1897
1898	} while (pte++, addr += PAGE_SIZE, addr != end);
1899	pte_unmap_unlock(orig_pte, ptl);
1900	cond_resched();
1901	return 0;
1902}
1903#ifdef CONFIG_HUGETLB_PAGE
1904static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
1905		unsigned long addr, unsigned long end, struct mm_walk *walk)
1906{
1907	pte_t huge_pte = huge_ptep_get(pte);
1908	struct numa_maps *md;
1909	struct page *page;
1910
1911	if (!pte_present(huge_pte))
1912		return 0;
1913
1914	page = pte_page(huge_pte);
1915
1916	md = walk->private;
1917	gather_stats(page, md, pte_dirty(huge_pte), 1);
1918	return 0;
1919}
1920
1921#else
1922static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
1923		unsigned long addr, unsigned long end, struct mm_walk *walk)
1924{
1925	return 0;
1926}
1927#endif
1928
1929static const struct mm_walk_ops show_numa_ops = {
1930	.hugetlb_entry = gather_hugetlb_stats,
1931	.pmd_entry = gather_pte_stats,
 
1932};
1933
1934/*
1935 * Display pages allocated per node and memory policy via /proc.
1936 */
1937static int show_numa_map(struct seq_file *m, void *v)
1938{
1939	struct numa_maps_private *numa_priv = m->private;
1940	struct proc_maps_private *proc_priv = &numa_priv->proc_maps;
1941	struct vm_area_struct *vma = v;
1942	struct numa_maps *md = &numa_priv->md;
1943	struct file *file = vma->vm_file;
1944	struct mm_struct *mm = vma->vm_mm;
1945	struct mempolicy *pol;
1946	char buffer[64];
 
 
1947	int nid;
1948
1949	if (!mm)
1950		return 0;
1951
1952	/* Ensure we start with an empty set of numa_maps statistics. */
1953	memset(md, 0, sizeof(*md));
1954
1955	pol = __get_vma_policy(vma, vma->vm_start);
1956	if (pol) {
1957		mpol_to_str(buffer, sizeof(buffer), pol);
1958		mpol_cond_put(pol);
1959	} else {
1960		mpol_to_str(buffer, sizeof(buffer), proc_priv->task_mempolicy);
1961	}
1962
1963	seq_printf(m, "%08lx %s", vma->vm_start, buffer);
1964
1965	if (file) {
1966		seq_puts(m, " file=");
1967		seq_file_path(m, file, "\n\t= ");
1968	} else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
1969		seq_puts(m, " heap");
1970	} else if (is_stack(vma)) {
1971		seq_puts(m, " stack");
1972	}
1973
1974	if (is_vm_hugetlb_page(vma))
1975		seq_puts(m, " huge");
1976
1977	/* mmap_lock is held by m_start */
1978	walk_page_vma(vma, &show_numa_ops, md);
1979
1980	if (!md->pages)
1981		goto out;
1982
1983	if (md->anon)
1984		seq_printf(m, " anon=%lu", md->anon);
1985
1986	if (md->dirty)
1987		seq_printf(m, " dirty=%lu", md->dirty);
1988
1989	if (md->pages != md->anon && md->pages != md->dirty)
1990		seq_printf(m, " mapped=%lu", md->pages);
1991
1992	if (md->mapcount_max > 1)
1993		seq_printf(m, " mapmax=%lu", md->mapcount_max);
1994
1995	if (md->swapcache)
1996		seq_printf(m, " swapcache=%lu", md->swapcache);
1997
1998	if (md->active < md->pages && !is_vm_hugetlb_page(vma))
1999		seq_printf(m, " active=%lu", md->active);
2000
2001	if (md->writeback)
2002		seq_printf(m, " writeback=%lu", md->writeback);
2003
2004	for_each_node_state(nid, N_MEMORY)
2005		if (md->node[nid])
2006			seq_printf(m, " N%d=%lu", nid, md->node[nid]);
2007
2008	seq_printf(m, " kernelpagesize_kB=%lu", vma_kernel_pagesize(vma) >> 10);
2009out:
2010	seq_putc(m, '\n');
2011	return 0;
2012}
2013
2014static const struct seq_operations proc_pid_numa_maps_op = {
2015	.start  = m_start,
2016	.next   = m_next,
2017	.stop   = m_stop,
2018	.show   = show_numa_map,
2019};
2020
2021static int pid_numa_maps_open(struct inode *inode, struct file *file)
2022{
2023	return proc_maps_open(inode, file, &proc_pid_numa_maps_op,
2024				sizeof(struct numa_maps_private));
2025}
2026
2027const struct file_operations proc_pid_numa_maps_operations = {
2028	.open		= pid_numa_maps_open,
2029	.read		= seq_read,
2030	.llseek		= seq_lseek,
2031	.release	= proc_map_release,
2032};
2033
2034#endif /* CONFIG_NUMA */
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2#include <linux/pagewalk.h>
   3#include <linux/mm_inline.h>
   4#include <linux/hugetlb.h>
   5#include <linux/huge_mm.h>
   6#include <linux/mount.h>
   7#include <linux/ksm.h>
   8#include <linux/seq_file.h>
   9#include <linux/highmem.h>
  10#include <linux/ptrace.h>
  11#include <linux/slab.h>
  12#include <linux/pagemap.h>
  13#include <linux/mempolicy.h>
  14#include <linux/rmap.h>
  15#include <linux/swap.h>
  16#include <linux/sched/mm.h>
  17#include <linux/swapops.h>
  18#include <linux/mmu_notifier.h>
  19#include <linux/page_idle.h>
  20#include <linux/shmem_fs.h>
  21#include <linux/uaccess.h>
  22#include <linux/pkeys.h>
  23#include <linux/minmax.h>
  24#include <linux/overflow.h>
  25#include <linux/buildid.h>
  26
  27#include <asm/elf.h>
  28#include <asm/tlb.h>
  29#include <asm/tlbflush.h>
  30#include "internal.h"
  31
  32#define SEQ_PUT_DEC(str, val) \
  33		seq_put_decimal_ull_width(m, str, (val) << (PAGE_SHIFT-10), 8)
  34void task_mem(struct seq_file *m, struct mm_struct *mm)
  35{
  36	unsigned long text, lib, swap, anon, file, shmem;
  37	unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;
  38
  39	anon = get_mm_counter(mm, MM_ANONPAGES);
  40	file = get_mm_counter(mm, MM_FILEPAGES);
  41	shmem = get_mm_counter(mm, MM_SHMEMPAGES);
  42
  43	/*
  44	 * Note: to minimize their overhead, mm maintains hiwater_vm and
  45	 * hiwater_rss only when about to *lower* total_vm or rss.  Any
  46	 * collector of these hiwater stats must therefore get total_vm
  47	 * and rss too, which will usually be the higher.  Barriers? not
  48	 * worth the effort, such snapshots can always be inconsistent.
  49	 */
  50	hiwater_vm = total_vm = mm->total_vm;
  51	if (hiwater_vm < mm->hiwater_vm)
  52		hiwater_vm = mm->hiwater_vm;
  53	hiwater_rss = total_rss = anon + file + shmem;
  54	if (hiwater_rss < mm->hiwater_rss)
  55		hiwater_rss = mm->hiwater_rss;
  56
  57	/* split executable areas between text and lib */
  58	text = PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK);
  59	text = min(text, mm->exec_vm << PAGE_SHIFT);
  60	lib = (mm->exec_vm << PAGE_SHIFT) - text;
  61
  62	swap = get_mm_counter(mm, MM_SWAPENTS);
  63	SEQ_PUT_DEC("VmPeak:\t", hiwater_vm);
  64	SEQ_PUT_DEC(" kB\nVmSize:\t", total_vm);
  65	SEQ_PUT_DEC(" kB\nVmLck:\t", mm->locked_vm);
  66	SEQ_PUT_DEC(" kB\nVmPin:\t", atomic64_read(&mm->pinned_vm));
  67	SEQ_PUT_DEC(" kB\nVmHWM:\t", hiwater_rss);
  68	SEQ_PUT_DEC(" kB\nVmRSS:\t", total_rss);
  69	SEQ_PUT_DEC(" kB\nRssAnon:\t", anon);
  70	SEQ_PUT_DEC(" kB\nRssFile:\t", file);
  71	SEQ_PUT_DEC(" kB\nRssShmem:\t", shmem);
  72	SEQ_PUT_DEC(" kB\nVmData:\t", mm->data_vm);
  73	SEQ_PUT_DEC(" kB\nVmStk:\t", mm->stack_vm);
  74	seq_put_decimal_ull_width(m,
  75		    " kB\nVmExe:\t", text >> 10, 8);
  76	seq_put_decimal_ull_width(m,
  77		    " kB\nVmLib:\t", lib >> 10, 8);
  78	seq_put_decimal_ull_width(m,
  79		    " kB\nVmPTE:\t", mm_pgtables_bytes(mm) >> 10, 8);
  80	SEQ_PUT_DEC(" kB\nVmSwap:\t", swap);
  81	seq_puts(m, " kB\n");
  82	hugetlb_report_usage(m, mm);
  83}
  84#undef SEQ_PUT_DEC
  85
  86unsigned long task_vsize(struct mm_struct *mm)
  87{
  88	return PAGE_SIZE * mm->total_vm;
  89}
  90
  91unsigned long task_statm(struct mm_struct *mm,
  92			 unsigned long *shared, unsigned long *text,
  93			 unsigned long *data, unsigned long *resident)
  94{
  95	*shared = get_mm_counter(mm, MM_FILEPAGES) +
  96			get_mm_counter(mm, MM_SHMEMPAGES);
  97	*text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
  98								>> PAGE_SHIFT;
  99	*data = mm->data_vm + mm->stack_vm;
 100	*resident = *shared + get_mm_counter(mm, MM_ANONPAGES);
 101	return mm->total_vm;
 102}
 103
 104#ifdef CONFIG_NUMA
 105/*
 106 * Save get_task_policy() for show_numa_map().
 107 */
 108static void hold_task_mempolicy(struct proc_maps_private *priv)
 109{
 110	struct task_struct *task = priv->task;
 111
 112	task_lock(task);
 113	priv->task_mempolicy = get_task_policy(task);
 114	mpol_get(priv->task_mempolicy);
 115	task_unlock(task);
 116}
 117static void release_task_mempolicy(struct proc_maps_private *priv)
 118{
 119	mpol_put(priv->task_mempolicy);
 120}
 121#else
 122static void hold_task_mempolicy(struct proc_maps_private *priv)
 123{
 124}
 125static void release_task_mempolicy(struct proc_maps_private *priv)
 126{
 127}
 128#endif
 129
 130static struct vm_area_struct *proc_get_vma(struct proc_maps_private *priv,
 131						loff_t *ppos)
 132{
 133	struct vm_area_struct *vma = vma_next(&priv->iter);
 134
 135	if (vma) {
 136		*ppos = vma->vm_start;
 137	} else {
 138		*ppos = -2UL;
 139		vma = get_gate_vma(priv->mm);
 140	}
 141
 142	return vma;
 143}
 144
 145static void *m_start(struct seq_file *m, loff_t *ppos)
 146{
 147	struct proc_maps_private *priv = m->private;
 148	unsigned long last_addr = *ppos;
 149	struct mm_struct *mm;
 150
 151	/* See m_next(). Zero at the start or after lseek. */
 152	if (last_addr == -1UL)
 153		return NULL;
 154
 155	priv->task = get_proc_task(priv->inode);
 156	if (!priv->task)
 157		return ERR_PTR(-ESRCH);
 158
 159	mm = priv->mm;
 160	if (!mm || !mmget_not_zero(mm)) {
 161		put_task_struct(priv->task);
 162		priv->task = NULL;
 163		return NULL;
 164	}
 165
 166	if (mmap_read_lock_killable(mm)) {
 167		mmput(mm);
 168		put_task_struct(priv->task);
 169		priv->task = NULL;
 170		return ERR_PTR(-EINTR);
 171	}
 172
 173	vma_iter_init(&priv->iter, mm, last_addr);
 174	hold_task_mempolicy(priv);
 175	if (last_addr == -2UL)
 176		return get_gate_vma(mm);
 177
 178	return proc_get_vma(priv, ppos);
 179}
 180
 181static void *m_next(struct seq_file *m, void *v, loff_t *ppos)
 182{
 183	if (*ppos == -2UL) {
 184		*ppos = -1UL;
 185		return NULL;
 186	}
 187	return proc_get_vma(m->private, ppos);
 188}
 189
 190static void m_stop(struct seq_file *m, void *v)
 191{
 192	struct proc_maps_private *priv = m->private;
 193	struct mm_struct *mm = priv->mm;
 194
 195	if (!priv->task)
 196		return;
 197
 198	release_task_mempolicy(priv);
 199	mmap_read_unlock(mm);
 200	mmput(mm);
 201	put_task_struct(priv->task);
 202	priv->task = NULL;
 203}
 204
 205static int proc_maps_open(struct inode *inode, struct file *file,
 206			const struct seq_operations *ops, int psize)
 207{
 208	struct proc_maps_private *priv = __seq_open_private(file, ops, psize);
 209
 210	if (!priv)
 211		return -ENOMEM;
 212
 213	priv->inode = inode;
 214	priv->mm = proc_mem_open(inode, PTRACE_MODE_READ);
 215	if (IS_ERR(priv->mm)) {
 216		int err = PTR_ERR(priv->mm);
 217
 218		seq_release_private(inode, file);
 219		return err;
 220	}
 221
 222	return 0;
 223}
 224
 225static int proc_map_release(struct inode *inode, struct file *file)
 226{
 227	struct seq_file *seq = file->private_data;
 228	struct proc_maps_private *priv = seq->private;
 229
 230	if (priv->mm)
 231		mmdrop(priv->mm);
 232
 233	return seq_release_private(inode, file);
 234}
 235
 236static int do_maps_open(struct inode *inode, struct file *file,
 237			const struct seq_operations *ops)
 238{
 239	return proc_maps_open(inode, file, ops,
 240				sizeof(struct proc_maps_private));
 241}
 242
 243static void get_vma_name(struct vm_area_struct *vma,
 244			 const struct path **path,
 245			 const char **name,
 246			 const char **name_fmt)
 247{
 248	struct anon_vma_name *anon_name = vma->vm_mm ? anon_vma_name(vma) : NULL;
 249
 250	*name = NULL;
 251	*path = NULL;
 252	*name_fmt = NULL;
 253
 254	/*
 255	 * Print the dentry name for named mappings, and a
 256	 * special [heap] marker for the heap:
 
 257	 */
 258	if (vma->vm_file) {
 259		/*
 260		 * If user named this anon shared memory via
 261		 * prctl(PR_SET_VMA ..., use the provided name.
 262		 */
 263		if (anon_name) {
 264			*name_fmt = "[anon_shmem:%s]";
 265			*name = anon_name->name;
 266		} else {
 267			*path = file_user_path(vma->vm_file);
 268		}
 269		return;
 270	}
 271
 272	if (vma->vm_ops && vma->vm_ops->name) {
 273		*name = vma->vm_ops->name(vma);
 274		if (*name)
 275			return;
 276	}
 277
 278	*name = arch_vma_name(vma);
 279	if (*name)
 280		return;
 281
 282	if (!vma->vm_mm) {
 283		*name = "[vdso]";
 284		return;
 285	}
 286
 287	if (vma_is_initial_heap(vma)) {
 288		*name = "[heap]";
 289		return;
 290	}
 291
 292	if (vma_is_initial_stack(vma)) {
 293		*name = "[stack]";
 294		return;
 295	}
 296
 297	if (anon_name) {
 298		*name_fmt = "[anon:%s]";
 299		*name = anon_name->name;
 300		return;
 301	}
 302}
 303
 304static void show_vma_header_prefix(struct seq_file *m,
 305				   unsigned long start, unsigned long end,
 306				   vm_flags_t flags, unsigned long long pgoff,
 307				   dev_t dev, unsigned long ino)
 308{
 309	seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
 310	seq_put_hex_ll(m, NULL, start, 8);
 311	seq_put_hex_ll(m, "-", end, 8);
 312	seq_putc(m, ' ');
 313	seq_putc(m, flags & VM_READ ? 'r' : '-');
 314	seq_putc(m, flags & VM_WRITE ? 'w' : '-');
 315	seq_putc(m, flags & VM_EXEC ? 'x' : '-');
 316	seq_putc(m, flags & VM_MAYSHARE ? 's' : 'p');
 317	seq_put_hex_ll(m, " ", pgoff, 8);
 318	seq_put_hex_ll(m, " ", MAJOR(dev), 2);
 319	seq_put_hex_ll(m, ":", MINOR(dev), 2);
 320	seq_put_decimal_ull(m, " ", ino);
 321	seq_putc(m, ' ');
 322}
 323
 324static void
 325show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
 326{
 327	const struct path *path;
 328	const char *name_fmt, *name;
 
 329	vm_flags_t flags = vma->vm_flags;
 330	unsigned long ino = 0;
 331	unsigned long long pgoff = 0;
 332	unsigned long start, end;
 333	dev_t dev = 0;
 
 334
 335	if (vma->vm_file) {
 336		const struct inode *inode = file_user_inode(vma->vm_file);
 337
 338		dev = inode->i_sb->s_dev;
 339		ino = inode->i_ino;
 340		pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
 341	}
 342
 343	start = vma->vm_start;
 344	end = vma->vm_end;
 345	show_vma_header_prefix(m, start, end, flags, pgoff, dev, ino);
 
 
 346
 347	get_vma_name(vma, &path, &name, &name_fmt);
 348	if (path) {
 
 
 
 349		seq_pad(m, ' ');
 350		seq_path(m, path, "\n");
 351	} else if (name_fmt) {
 352		seq_pad(m, ' ');
 353		seq_printf(m, name_fmt, name);
 354	} else if (name) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 355		seq_pad(m, ' ');
 356		seq_puts(m, name);
 357	}
 358	seq_putc(m, '\n');
 359}
 360
 361static int show_map(struct seq_file *m, void *v)
 362{
 363	show_map_vma(m, v);
 364	return 0;
 365}
 366
 367static const struct seq_operations proc_pid_maps_op = {
 368	.start	= m_start,
 369	.next	= m_next,
 370	.stop	= m_stop,
 371	.show	= show_map
 372};
 373
 374static int pid_maps_open(struct inode *inode, struct file *file)
 375{
 376	return do_maps_open(inode, file, &proc_pid_maps_op);
 377}
 378
 379#define PROCMAP_QUERY_VMA_FLAGS (				\
 380		PROCMAP_QUERY_VMA_READABLE |			\
 381		PROCMAP_QUERY_VMA_WRITABLE |			\
 382		PROCMAP_QUERY_VMA_EXECUTABLE |			\
 383		PROCMAP_QUERY_VMA_SHARED			\
 384)
 385
 386#define PROCMAP_QUERY_VALID_FLAGS_MASK (			\
 387		PROCMAP_QUERY_COVERING_OR_NEXT_VMA |		\
 388		PROCMAP_QUERY_FILE_BACKED_VMA |			\
 389		PROCMAP_QUERY_VMA_FLAGS				\
 390)
 391
 392static int query_vma_setup(struct mm_struct *mm)
 393{
 394	return mmap_read_lock_killable(mm);
 395}
 396
 397static void query_vma_teardown(struct mm_struct *mm, struct vm_area_struct *vma)
 398{
 399	mmap_read_unlock(mm);
 400}
 401
 402static struct vm_area_struct *query_vma_find_by_addr(struct mm_struct *mm, unsigned long addr)
 403{
 404	return find_vma(mm, addr);
 405}
 406
 407static struct vm_area_struct *query_matching_vma(struct mm_struct *mm,
 408						 unsigned long addr, u32 flags)
 409{
 410	struct vm_area_struct *vma;
 411
 412next_vma:
 413	vma = query_vma_find_by_addr(mm, addr);
 414	if (!vma)
 415		goto no_vma;
 416
 417	/* user requested only file-backed VMA, keep iterating */
 418	if ((flags & PROCMAP_QUERY_FILE_BACKED_VMA) && !vma->vm_file)
 419		goto skip_vma;
 420
 421	/* VMA permissions should satisfy query flags */
 422	if (flags & PROCMAP_QUERY_VMA_FLAGS) {
 423		u32 perm = 0;
 424
 425		if (flags & PROCMAP_QUERY_VMA_READABLE)
 426			perm |= VM_READ;
 427		if (flags & PROCMAP_QUERY_VMA_WRITABLE)
 428			perm |= VM_WRITE;
 429		if (flags & PROCMAP_QUERY_VMA_EXECUTABLE)
 430			perm |= VM_EXEC;
 431		if (flags & PROCMAP_QUERY_VMA_SHARED)
 432			perm |= VM_MAYSHARE;
 433
 434		if ((vma->vm_flags & perm) != perm)
 435			goto skip_vma;
 436	}
 437
 438	/* found covering VMA or user is OK with the matching next VMA */
 439	if ((flags & PROCMAP_QUERY_COVERING_OR_NEXT_VMA) || vma->vm_start <= addr)
 440		return vma;
 441
 442skip_vma:
 443	/*
 444	 * If the user needs closest matching VMA, keep iterating.
 445	 */
 446	addr = vma->vm_end;
 447	if (flags & PROCMAP_QUERY_COVERING_OR_NEXT_VMA)
 448		goto next_vma;
 449
 450no_vma:
 451	return ERR_PTR(-ENOENT);
 452}
 453
 454static int do_procmap_query(struct proc_maps_private *priv, void __user *uarg)
 455{
 456	struct procmap_query karg;
 457	struct vm_area_struct *vma;
 458	struct mm_struct *mm;
 459	const char *name = NULL;
 460	char build_id_buf[BUILD_ID_SIZE_MAX], *name_buf = NULL;
 461	__u64 usize;
 462	int err;
 463
 464	if (copy_from_user(&usize, (void __user *)uarg, sizeof(usize)))
 465		return -EFAULT;
 466	/* argument struct can never be that large, reject abuse */
 467	if (usize > PAGE_SIZE)
 468		return -E2BIG;
 469	/* argument struct should have at least query_flags and query_addr fields */
 470	if (usize < offsetofend(struct procmap_query, query_addr))
 471		return -EINVAL;
 472	err = copy_struct_from_user(&karg, sizeof(karg), uarg, usize);
 473	if (err)
 474		return err;
 475
 476	/* reject unknown flags */
 477	if (karg.query_flags & ~PROCMAP_QUERY_VALID_FLAGS_MASK)
 478		return -EINVAL;
 479	/* either both buffer address and size are set, or both should be zero */
 480	if (!!karg.vma_name_size != !!karg.vma_name_addr)
 481		return -EINVAL;
 482	if (!!karg.build_id_size != !!karg.build_id_addr)
 483		return -EINVAL;
 484
 485	mm = priv->mm;
 486	if (!mm || !mmget_not_zero(mm))
 487		return -ESRCH;
 488
 489	err = query_vma_setup(mm);
 490	if (err) {
 491		mmput(mm);
 492		return err;
 493	}
 494
 495	vma = query_matching_vma(mm, karg.query_addr, karg.query_flags);
 496	if (IS_ERR(vma)) {
 497		err = PTR_ERR(vma);
 498		vma = NULL;
 499		goto out;
 500	}
 501
 502	karg.vma_start = vma->vm_start;
 503	karg.vma_end = vma->vm_end;
 504
 505	karg.vma_flags = 0;
 506	if (vma->vm_flags & VM_READ)
 507		karg.vma_flags |= PROCMAP_QUERY_VMA_READABLE;
 508	if (vma->vm_flags & VM_WRITE)
 509		karg.vma_flags |= PROCMAP_QUERY_VMA_WRITABLE;
 510	if (vma->vm_flags & VM_EXEC)
 511		karg.vma_flags |= PROCMAP_QUERY_VMA_EXECUTABLE;
 512	if (vma->vm_flags & VM_MAYSHARE)
 513		karg.vma_flags |= PROCMAP_QUERY_VMA_SHARED;
 514
 515	karg.vma_page_size = vma_kernel_pagesize(vma);
 516
 517	if (vma->vm_file) {
 518		const struct inode *inode = file_user_inode(vma->vm_file);
 519
 520		karg.vma_offset = ((__u64)vma->vm_pgoff) << PAGE_SHIFT;
 521		karg.dev_major = MAJOR(inode->i_sb->s_dev);
 522		karg.dev_minor = MINOR(inode->i_sb->s_dev);
 523		karg.inode = inode->i_ino;
 524	} else {
 525		karg.vma_offset = 0;
 526		karg.dev_major = 0;
 527		karg.dev_minor = 0;
 528		karg.inode = 0;
 529	}
 530
 531	if (karg.build_id_size) {
 532		__u32 build_id_sz;
 533
 534		err = build_id_parse(vma, build_id_buf, &build_id_sz);
 535		if (err) {
 536			karg.build_id_size = 0;
 537		} else {
 538			if (karg.build_id_size < build_id_sz) {
 539				err = -ENAMETOOLONG;
 540				goto out;
 541			}
 542			karg.build_id_size = build_id_sz;
 543		}
 544	}
 545
 546	if (karg.vma_name_size) {
 547		size_t name_buf_sz = min_t(size_t, PATH_MAX, karg.vma_name_size);
 548		const struct path *path;
 549		const char *name_fmt;
 550		size_t name_sz = 0;
 551
 552		get_vma_name(vma, &path, &name, &name_fmt);
 553
 554		if (path || name_fmt || name) {
 555			name_buf = kmalloc(name_buf_sz, GFP_KERNEL);
 556			if (!name_buf) {
 557				err = -ENOMEM;
 558				goto out;
 559			}
 560		}
 561		if (path) {
 562			name = d_path(path, name_buf, name_buf_sz);
 563			if (IS_ERR(name)) {
 564				err = PTR_ERR(name);
 565				goto out;
 566			}
 567			name_sz = name_buf + name_buf_sz - name;
 568		} else if (name || name_fmt) {
 569			name_sz = 1 + snprintf(name_buf, name_buf_sz, name_fmt ?: "%s", name);
 570			name = name_buf;
 571		}
 572		if (name_sz > name_buf_sz) {
 573			err = -ENAMETOOLONG;
 574			goto out;
 575		}
 576		karg.vma_name_size = name_sz;
 577	}
 578
 579	/* unlock vma or mmap_lock, and put mm_struct before copying data to user */
 580	query_vma_teardown(mm, vma);
 581	mmput(mm);
 582
 583	if (karg.vma_name_size && copy_to_user(u64_to_user_ptr(karg.vma_name_addr),
 584					       name, karg.vma_name_size)) {
 585		kfree(name_buf);
 586		return -EFAULT;
 587	}
 588	kfree(name_buf);
 589
 590	if (karg.build_id_size && copy_to_user(u64_to_user_ptr(karg.build_id_addr),
 591					       build_id_buf, karg.build_id_size))
 592		return -EFAULT;
 593
 594	if (copy_to_user(uarg, &karg, min_t(size_t, sizeof(karg), usize)))
 595		return -EFAULT;
 596
 597	return 0;
 598
 599out:
 600	query_vma_teardown(mm, vma);
 601	mmput(mm);
 602	kfree(name_buf);
 603	return err;
 604}
 605
 606static long procfs_procmap_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 607{
 608	struct seq_file *seq = file->private_data;
 609	struct proc_maps_private *priv = seq->private;
 610
 611	switch (cmd) {
 612	case PROCMAP_QUERY:
 613		return do_procmap_query(priv, (void __user *)arg);
 614	default:
 615		return -ENOIOCTLCMD;
 616	}
 617}
 618
 619const struct file_operations proc_pid_maps_operations = {
 620	.open		= pid_maps_open,
 621	.read		= seq_read,
 622	.llseek		= seq_lseek,
 623	.release	= proc_map_release,
 624	.unlocked_ioctl = procfs_procmap_ioctl,
 625	.compat_ioctl	= compat_ptr_ioctl,
 626};
 627
 628/*
 629 * Proportional Set Size(PSS): my share of RSS.
 630 *
 631 * PSS of a process is the count of pages it has in memory, where each
 632 * page is divided by the number of processes sharing it.  So if a
 633 * process has 1000 pages all to itself, and 1000 shared with one other
 634 * process, its PSS will be 1500.
 635 *
 636 * To keep (accumulated) division errors low, we adopt a 64bit
 637 * fixed-point pss counter to minimize division errors. So (pss >>
 638 * PSS_SHIFT) would be the real byte count.
 639 *
 640 * A shift of 12 before division means (assuming 4K page size):
 641 * 	- 1M 3-user-pages add up to 8KB errors;
 642 * 	- supports mapcount up to 2^24, or 16M;
 643 * 	- supports PSS up to 2^52 bytes, or 4PB.
 644 */
 645#define PSS_SHIFT 12
 646
 647#ifdef CONFIG_PROC_PAGE_MONITOR
 648struct mem_size_stats {
 649	unsigned long resident;
 650	unsigned long shared_clean;
 651	unsigned long shared_dirty;
 652	unsigned long private_clean;
 653	unsigned long private_dirty;
 654	unsigned long referenced;
 655	unsigned long anonymous;
 656	unsigned long lazyfree;
 657	unsigned long anonymous_thp;
 658	unsigned long shmem_thp;
 659	unsigned long file_thp;
 660	unsigned long swap;
 661	unsigned long shared_hugetlb;
 662	unsigned long private_hugetlb;
 663	unsigned long ksm;
 664	u64 pss;
 665	u64 pss_anon;
 666	u64 pss_file;
 667	u64 pss_shmem;
 668	u64 pss_dirty;
 669	u64 pss_locked;
 670	u64 swap_pss;
 671};
 672
 673static void smaps_page_accumulate(struct mem_size_stats *mss,
 674		struct folio *folio, unsigned long size, unsigned long pss,
 675		bool dirty, bool locked, bool private)
 676{
 677	mss->pss += pss;
 678
 679	if (folio_test_anon(folio))
 680		mss->pss_anon += pss;
 681	else if (folio_test_swapbacked(folio))
 682		mss->pss_shmem += pss;
 683	else
 684		mss->pss_file += pss;
 685
 686	if (locked)
 687		mss->pss_locked += pss;
 688
 689	if (dirty || folio_test_dirty(folio)) {
 690		mss->pss_dirty += pss;
 691		if (private)
 692			mss->private_dirty += size;
 693		else
 694			mss->shared_dirty += size;
 695	} else {
 696		if (private)
 697			mss->private_clean += size;
 698		else
 699			mss->shared_clean += size;
 700	}
 701}
 702
 703static void smaps_account(struct mem_size_stats *mss, struct page *page,
 704		bool compound, bool young, bool dirty, bool locked,
 705		bool present)
 706{
 707	struct folio *folio = page_folio(page);
 708	int i, nr = compound ? compound_nr(page) : 1;
 709	unsigned long size = nr * PAGE_SIZE;
 710
 711	/*
 712	 * First accumulate quantities that depend only on |size| and the type
 713	 * of the compound page.
 714	 */
 715	if (folio_test_anon(folio)) {
 716		mss->anonymous += size;
 717		if (!folio_test_swapbacked(folio) && !dirty &&
 718		    !folio_test_dirty(folio))
 719			mss->lazyfree += size;
 720	}
 721
 722	if (folio_test_ksm(folio))
 723		mss->ksm += size;
 724
 725	mss->resident += size;
 726	/* Accumulate the size in pages that have been accessed. */
 727	if (young || folio_test_young(folio) || folio_test_referenced(folio))
 728		mss->referenced += size;
 729
 730	/*
 731	 * Then accumulate quantities that may depend on sharing, or that may
 732	 * differ page-by-page.
 733	 *
 734	 * refcount == 1 for present entries guarantees that the folio is mapped
 735	 * exactly once. For large folios this implies that exactly one
 736	 * PTE/PMD/... maps (a part of) this folio.
 737	 *
 738	 * Treat all non-present entries (where relying on the mapcount and
 739	 * refcount doesn't make sense) as "maybe shared, but not sure how
 740	 * often". We treat device private entries as being fake-present.
 741	 *
 742	 * Note that it would not be safe to read the mapcount especially for
 743	 * pages referenced by migration entries, even with the PTL held.
 
 
 
 
 744	 */
 745	if (folio_ref_count(folio) == 1 || !present) {
 746		smaps_page_accumulate(mss, folio, size, size << PSS_SHIFT,
 747				      dirty, locked, present);
 748		return;
 749	}
 750	/*
 751	 * We obtain a snapshot of the mapcount. Without holding the folio lock
 752	 * this snapshot can be slightly wrong as we cannot always read the
 753	 * mapcount atomically.
 754	 */
 755	for (i = 0; i < nr; i++, page++) {
 756		int mapcount = folio_precise_page_mapcount(folio, page);
 757		unsigned long pss = PAGE_SIZE << PSS_SHIFT;
 758		if (mapcount >= 2)
 759			pss /= mapcount;
 760		smaps_page_accumulate(mss, folio, PAGE_SIZE, pss,
 761				dirty, locked, mapcount < 2);
 762	}
 763}
 764
 765#ifdef CONFIG_SHMEM
 766static int smaps_pte_hole(unsigned long addr, unsigned long end,
 767			  __always_unused int depth, struct mm_walk *walk)
 768{
 769	struct mem_size_stats *mss = walk->private;
 770	struct vm_area_struct *vma = walk->vma;
 771
 772	mss->swap += shmem_partial_swap_usage(walk->vma->vm_file->f_mapping,
 773					      linear_page_index(vma, addr),
 774					      linear_page_index(vma, end));
 775
 776	return 0;
 777}
 778#else
 779#define smaps_pte_hole		NULL
 780#endif /* CONFIG_SHMEM */
 781
 782static void smaps_pte_hole_lookup(unsigned long addr, struct mm_walk *walk)
 783{
 784#ifdef CONFIG_SHMEM
 785	if (walk->ops->pte_hole) {
 786		/* depth is not used */
 787		smaps_pte_hole(addr, addr + PAGE_SIZE, 0, walk);
 788	}
 789#endif
 790}
 791
 792static void smaps_pte_entry(pte_t *pte, unsigned long addr,
 793		struct mm_walk *walk)
 794{
 795	struct mem_size_stats *mss = walk->private;
 796	struct vm_area_struct *vma = walk->vma;
 797	bool locked = !!(vma->vm_flags & VM_LOCKED);
 798	struct page *page = NULL;
 799	bool present = false, young = false, dirty = false;
 800	pte_t ptent = ptep_get(pte);
 801
 802	if (pte_present(ptent)) {
 803		page = vm_normal_page(vma, addr, ptent);
 804		young = pte_young(ptent);
 805		dirty = pte_dirty(ptent);
 806		present = true;
 807	} else if (is_swap_pte(ptent)) {
 808		swp_entry_t swpent = pte_to_swp_entry(ptent);
 809
 810		if (!non_swap_entry(swpent)) {
 811			int mapcount;
 812
 813			mss->swap += PAGE_SIZE;
 814			mapcount = swp_swapcount(swpent);
 815			if (mapcount >= 2) {
 816				u64 pss_delta = (u64)PAGE_SIZE << PSS_SHIFT;
 817
 818				do_div(pss_delta, mapcount);
 819				mss->swap_pss += pss_delta;
 820			} else {
 821				mss->swap_pss += (u64)PAGE_SIZE << PSS_SHIFT;
 822			}
 823		} else if (is_pfn_swap_entry(swpent)) {
 824			if (is_device_private_entry(swpent))
 825				present = true;
 826			page = pfn_swap_entry_to_page(swpent);
 827		}
 828	} else {
 829		smaps_pte_hole_lookup(addr, walk);
 830		return;
 831	}
 832
 833	if (!page)
 834		return;
 835
 836	smaps_account(mss, page, false, young, dirty, locked, present);
 837}
 838
 839#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 840static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
 841		struct mm_walk *walk)
 842{
 843	struct mem_size_stats *mss = walk->private;
 844	struct vm_area_struct *vma = walk->vma;
 845	bool locked = !!(vma->vm_flags & VM_LOCKED);
 846	struct page *page = NULL;
 847	bool present = false;
 848	struct folio *folio;
 849
 850	if (pmd_present(*pmd)) {
 851		page = vm_normal_page_pmd(vma, addr, *pmd);
 852		present = true;
 853	} else if (unlikely(thp_migration_supported() && is_swap_pmd(*pmd))) {
 854		swp_entry_t entry = pmd_to_swp_entry(*pmd);
 855
 856		if (is_pfn_swap_entry(entry))
 
 857			page = pfn_swap_entry_to_page(entry);
 
 858	}
 859	if (IS_ERR_OR_NULL(page))
 860		return;
 861	folio = page_folio(page);
 862	if (folio_test_anon(folio))
 863		mss->anonymous_thp += HPAGE_PMD_SIZE;
 864	else if (folio_test_swapbacked(folio))
 865		mss->shmem_thp += HPAGE_PMD_SIZE;
 866	else if (folio_is_zone_device(folio))
 867		/* pass */;
 868	else
 869		mss->file_thp += HPAGE_PMD_SIZE;
 870
 871	smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd),
 872		      locked, present);
 873}
 874#else
 875static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
 876		struct mm_walk *walk)
 877{
 878}
 879#endif
 880
 881static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
 882			   struct mm_walk *walk)
 883{
 884	struct vm_area_struct *vma = walk->vma;
 885	pte_t *pte;
 886	spinlock_t *ptl;
 887
 888	ptl = pmd_trans_huge_lock(pmd, vma);
 889	if (ptl) {
 890		smaps_pmd_entry(pmd, addr, walk);
 891		spin_unlock(ptl);
 892		goto out;
 893	}
 894
 
 
 
 
 
 
 
 895	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
 896	if (!pte) {
 897		walk->action = ACTION_AGAIN;
 898		return 0;
 899	}
 900	for (; addr != end; pte++, addr += PAGE_SIZE)
 901		smaps_pte_entry(pte, addr, walk);
 902	pte_unmap_unlock(pte - 1, ptl);
 903out:
 904	cond_resched();
 905	return 0;
 906}
 907
 908static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma)
 909{
 910	/*
 911	 * Don't forget to update Documentation/ on changes.
 912	 *
 913	 * The length of the second argument of mnemonics[]
 914	 * needs to be 3 instead of previously set 2
 915	 * (i.e. from [BITS_PER_LONG][2] to [BITS_PER_LONG][3])
 916	 * to avoid spurious
 917	 * -Werror=unterminated-string-initialization warning
 918	 *  with GCC 15
 919	 */
 920	static const char mnemonics[BITS_PER_LONG][3] = {
 921		/*
 922		 * In case if we meet a flag we don't know about.
 923		 */
 924		[0 ... (BITS_PER_LONG-1)] = "??",
 925
 926		[ilog2(VM_READ)]	= "rd",
 927		[ilog2(VM_WRITE)]	= "wr",
 928		[ilog2(VM_EXEC)]	= "ex",
 929		[ilog2(VM_SHARED)]	= "sh",
 930		[ilog2(VM_MAYREAD)]	= "mr",
 931		[ilog2(VM_MAYWRITE)]	= "mw",
 932		[ilog2(VM_MAYEXEC)]	= "me",
 933		[ilog2(VM_MAYSHARE)]	= "ms",
 934		[ilog2(VM_GROWSDOWN)]	= "gd",
 935		[ilog2(VM_PFNMAP)]	= "pf",
 936		[ilog2(VM_LOCKED)]	= "lo",
 937		[ilog2(VM_IO)]		= "io",
 938		[ilog2(VM_SEQ_READ)]	= "sr",
 939		[ilog2(VM_RAND_READ)]	= "rr",
 940		[ilog2(VM_DONTCOPY)]	= "dc",
 941		[ilog2(VM_DONTEXPAND)]	= "de",
 942		[ilog2(VM_LOCKONFAULT)]	= "lf",
 943		[ilog2(VM_ACCOUNT)]	= "ac",
 944		[ilog2(VM_NORESERVE)]	= "nr",
 945		[ilog2(VM_HUGETLB)]	= "ht",
 946		[ilog2(VM_SYNC)]	= "sf",
 947		[ilog2(VM_ARCH_1)]	= "ar",
 948		[ilog2(VM_WIPEONFORK)]	= "wf",
 949		[ilog2(VM_DONTDUMP)]	= "dd",
 950#ifdef CONFIG_ARM64_BTI
 951		[ilog2(VM_ARM64_BTI)]	= "bt",
 952#endif
 953#ifdef CONFIG_MEM_SOFT_DIRTY
 954		[ilog2(VM_SOFTDIRTY)]	= "sd",
 955#endif
 956		[ilog2(VM_MIXEDMAP)]	= "mm",
 957		[ilog2(VM_HUGEPAGE)]	= "hg",
 958		[ilog2(VM_NOHUGEPAGE)]	= "nh",
 959		[ilog2(VM_MERGEABLE)]	= "mg",
 960		[ilog2(VM_UFFD_MISSING)]= "um",
 961		[ilog2(VM_UFFD_WP)]	= "uw",
 962#ifdef CONFIG_ARM64_MTE
 963		[ilog2(VM_MTE)]		= "mt",
 964		[ilog2(VM_MTE_ALLOWED)]	= "",
 965#endif
 966#ifdef CONFIG_ARCH_HAS_PKEYS
 967		/* These come out via ProtectionKey: */
 968		[ilog2(VM_PKEY_BIT0)]	= "",
 969		[ilog2(VM_PKEY_BIT1)]	= "",
 970		[ilog2(VM_PKEY_BIT2)]	= "",
 971#if VM_PKEY_BIT3
 972		[ilog2(VM_PKEY_BIT3)]	= "",
 973#endif
 974#if VM_PKEY_BIT4
 975		[ilog2(VM_PKEY_BIT4)]	= "",
 976#endif
 977#endif /* CONFIG_ARCH_HAS_PKEYS */
 978#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR
 979		[ilog2(VM_UFFD_MINOR)]	= "ui",
 980#endif /* CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */
 981#ifdef CONFIG_ARCH_HAS_USER_SHADOW_STACK
 982		[ilog2(VM_SHADOW_STACK)] = "ss",
 983#endif
 984#if defined(CONFIG_64BIT) || defined(CONFIG_PPC32)
 985		[ilog2(VM_DROPPABLE)] = "dp",
 986#endif
 987#ifdef CONFIG_64BIT
 988		[ilog2(VM_SEALED)] = "sl",
 989#endif
 990	};
 991	size_t i;
 992
 993	seq_puts(m, "VmFlags: ");
 994	for (i = 0; i < BITS_PER_LONG; i++) {
 995		if (!mnemonics[i][0])
 996			continue;
 997		if (vma->vm_flags & (1UL << i))
 998			seq_printf(m, "%s ", mnemonics[i]);
 
 
 
 999	}
1000	seq_putc(m, '\n');
1001}
1002
1003#ifdef CONFIG_HUGETLB_PAGE
1004static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask,
1005				 unsigned long addr, unsigned long end,
1006				 struct mm_walk *walk)
1007{
1008	struct mem_size_stats *mss = walk->private;
1009	struct vm_area_struct *vma = walk->vma;
1010	pte_t ptent = huge_ptep_get(walk->mm, addr, pte);
1011	struct folio *folio = NULL;
1012	bool present = false;
1013
1014	if (pte_present(ptent)) {
1015		folio = page_folio(pte_page(ptent));
1016		present = true;
1017	} else if (is_swap_pte(ptent)) {
1018		swp_entry_t swpent = pte_to_swp_entry(ptent);
1019
1020		if (is_pfn_swap_entry(swpent))
1021			folio = pfn_swap_entry_folio(swpent);
1022	}
1023
1024	if (folio) {
1025		/* We treat non-present entries as "maybe shared". */
1026		if (!present || folio_likely_mapped_shared(folio) ||
1027		    hugetlb_pmd_shared(pte))
1028			mss->shared_hugetlb += huge_page_size(hstate_vma(vma));
1029		else
1030			mss->private_hugetlb += huge_page_size(hstate_vma(vma));
1031	}
1032	return 0;
1033}
1034#else
1035#define smaps_hugetlb_range	NULL
1036#endif /* HUGETLB_PAGE */
1037
1038static const struct mm_walk_ops smaps_walk_ops = {
1039	.pmd_entry		= smaps_pte_range,
1040	.hugetlb_entry		= smaps_hugetlb_range,
1041	.walk_lock		= PGWALK_RDLOCK,
1042};
1043
1044static const struct mm_walk_ops smaps_shmem_walk_ops = {
1045	.pmd_entry		= smaps_pte_range,
1046	.hugetlb_entry		= smaps_hugetlb_range,
1047	.pte_hole		= smaps_pte_hole,
1048	.walk_lock		= PGWALK_RDLOCK,
1049};
1050
1051/*
1052 * Gather mem stats from @vma with the indicated beginning
1053 * address @start, and keep them in @mss.
1054 *
1055 * Use vm_start of @vma as the beginning address if @start is 0.
1056 */
1057static void smap_gather_stats(struct vm_area_struct *vma,
1058		struct mem_size_stats *mss, unsigned long start)
1059{
1060	const struct mm_walk_ops *ops = &smaps_walk_ops;
1061
1062	/* Invalid start */
1063	if (start >= vma->vm_end)
1064		return;
1065
 
1066	if (vma->vm_file && shmem_mapping(vma->vm_file->f_mapping)) {
1067		/*
1068		 * For shared or readonly shmem mappings we know that all
1069		 * swapped out pages belong to the shmem object, and we can
1070		 * obtain the swap value much more efficiently. For private
1071		 * writable mappings, we might have COW pages that are
1072		 * not affected by the parent swapped out pages of the shmem
1073		 * object, so we have to distinguish them during the page walk.
1074		 * Unless we know that the shmem object (or the part mapped by
1075		 * our VMA) has no swapped out pages at all.
1076		 */
1077		unsigned long shmem_swapped = shmem_swap_usage(vma);
1078
1079		if (!start && (!shmem_swapped || (vma->vm_flags & VM_SHARED) ||
1080					!(vma->vm_flags & VM_WRITE))) {
1081			mss->swap += shmem_swapped;
1082		} else {
1083			ops = &smaps_shmem_walk_ops;
1084		}
1085	}
1086
1087	/* mmap_lock is held in m_start */
1088	if (!start)
1089		walk_page_vma(vma, ops, mss);
1090	else
1091		walk_page_range(vma->vm_mm, start, vma->vm_end, ops, mss);
1092}
1093
1094#define SEQ_PUT_DEC(str, val) \
1095		seq_put_decimal_ull_width(m, str, (val) >> 10, 8)
1096
1097/* Show the contents common for smaps and smaps_rollup */
1098static void __show_smap(struct seq_file *m, const struct mem_size_stats *mss,
1099	bool rollup_mode)
1100{
1101	SEQ_PUT_DEC("Rss:            ", mss->resident);
1102	SEQ_PUT_DEC(" kB\nPss:            ", mss->pss >> PSS_SHIFT);
1103	SEQ_PUT_DEC(" kB\nPss_Dirty:      ", mss->pss_dirty >> PSS_SHIFT);
1104	if (rollup_mode) {
1105		/*
1106		 * These are meaningful only for smaps_rollup, otherwise two of
1107		 * them are zero, and the other one is the same as Pss.
1108		 */
1109		SEQ_PUT_DEC(" kB\nPss_Anon:       ",
1110			mss->pss_anon >> PSS_SHIFT);
1111		SEQ_PUT_DEC(" kB\nPss_File:       ",
1112			mss->pss_file >> PSS_SHIFT);
1113		SEQ_PUT_DEC(" kB\nPss_Shmem:      ",
1114			mss->pss_shmem >> PSS_SHIFT);
1115	}
1116	SEQ_PUT_DEC(" kB\nShared_Clean:   ", mss->shared_clean);
1117	SEQ_PUT_DEC(" kB\nShared_Dirty:   ", mss->shared_dirty);
1118	SEQ_PUT_DEC(" kB\nPrivate_Clean:  ", mss->private_clean);
1119	SEQ_PUT_DEC(" kB\nPrivate_Dirty:  ", mss->private_dirty);
1120	SEQ_PUT_DEC(" kB\nReferenced:     ", mss->referenced);
1121	SEQ_PUT_DEC(" kB\nAnonymous:      ", mss->anonymous);
1122	SEQ_PUT_DEC(" kB\nKSM:            ", mss->ksm);
1123	SEQ_PUT_DEC(" kB\nLazyFree:       ", mss->lazyfree);
1124	SEQ_PUT_DEC(" kB\nAnonHugePages:  ", mss->anonymous_thp);
1125	SEQ_PUT_DEC(" kB\nShmemPmdMapped: ", mss->shmem_thp);
1126	SEQ_PUT_DEC(" kB\nFilePmdMapped:  ", mss->file_thp);
1127	SEQ_PUT_DEC(" kB\nShared_Hugetlb: ", mss->shared_hugetlb);
1128	seq_put_decimal_ull_width(m, " kB\nPrivate_Hugetlb: ",
1129				  mss->private_hugetlb >> 10, 7);
1130	SEQ_PUT_DEC(" kB\nSwap:           ", mss->swap);
1131	SEQ_PUT_DEC(" kB\nSwapPss:        ",
1132					mss->swap_pss >> PSS_SHIFT);
1133	SEQ_PUT_DEC(" kB\nLocked:         ",
1134					mss->pss_locked >> PSS_SHIFT);
1135	seq_puts(m, " kB\n");
1136}
1137
1138static int show_smap(struct seq_file *m, void *v)
1139{
1140	struct vm_area_struct *vma = v;
1141	struct mem_size_stats mss = {};
 
 
1142
1143	smap_gather_stats(vma, &mss, 0);
1144
1145	show_map_vma(m, vma);
1146
1147	SEQ_PUT_DEC("Size:           ", vma->vm_end - vma->vm_start);
1148	SEQ_PUT_DEC(" kB\nKernelPageSize: ", vma_kernel_pagesize(vma));
1149	SEQ_PUT_DEC(" kB\nMMUPageSize:    ", vma_mmu_pagesize(vma));
1150	seq_puts(m, " kB\n");
1151
1152	__show_smap(m, &mss, false);
1153
1154	seq_printf(m, "THPeligible:    %8u\n",
1155		   !!thp_vma_allowable_orders(vma, vma->vm_flags,
1156			   TVA_SMAPS | TVA_ENFORCE_SYSFS, THP_ORDERS_ALL));
1157
1158	if (arch_pkeys_enabled())
1159		seq_printf(m, "ProtectionKey:  %8u\n", vma_pkey(vma));
1160	show_smap_vma_flags(m, vma);
1161
1162	return 0;
1163}
1164
1165static int show_smaps_rollup(struct seq_file *m, void *v)
1166{
1167	struct proc_maps_private *priv = m->private;
1168	struct mem_size_stats mss = {};
1169	struct mm_struct *mm = priv->mm;
1170	struct vm_area_struct *vma;
1171	unsigned long vma_start = 0, last_vma_end = 0;
1172	int ret = 0;
1173	VMA_ITERATOR(vmi, mm, 0);
1174
1175	priv->task = get_proc_task(priv->inode);
1176	if (!priv->task)
1177		return -ESRCH;
1178
1179	if (!mm || !mmget_not_zero(mm)) {
1180		ret = -ESRCH;
1181		goto out_put_task;
1182	}
1183
 
 
1184	ret = mmap_read_lock_killable(mm);
1185	if (ret)
1186		goto out_put_mm;
1187
1188	hold_task_mempolicy(priv);
1189	vma = vma_next(&vmi);
1190
1191	if (unlikely(!vma))
1192		goto empty_set;
1193
1194	vma_start = vma->vm_start;
1195	do {
1196		smap_gather_stats(vma, &mss, 0);
1197		last_vma_end = vma->vm_end;
1198
1199		/*
1200		 * Release mmap_lock temporarily if someone wants to
1201		 * access it for write request.
1202		 */
1203		if (mmap_lock_is_contended(mm)) {
1204			vma_iter_invalidate(&vmi);
1205			mmap_read_unlock(mm);
1206			ret = mmap_read_lock_killable(mm);
1207			if (ret) {
1208				release_task_mempolicy(priv);
1209				goto out_put_mm;
1210			}
1211
1212			/*
1213			 * After dropping the lock, there are four cases to
1214			 * consider. See the following example for explanation.
1215			 *
1216			 *   +------+------+-----------+
1217			 *   | VMA1 | VMA2 | VMA3      |
1218			 *   +------+------+-----------+
1219			 *   |      |      |           |
1220			 *  4k     8k     16k         400k
1221			 *
1222			 * Suppose we drop the lock after reading VMA2 due to
1223			 * contention, then we get:
1224			 *
1225			 *	last_vma_end = 16k
1226			 *
1227			 * 1) VMA2 is freed, but VMA3 exists:
1228			 *
1229			 *    vma_next(vmi) will return VMA3.
1230			 *    In this case, just continue from VMA3.
1231			 *
1232			 * 2) VMA2 still exists:
1233			 *
1234			 *    vma_next(vmi) will return VMA3.
1235			 *    In this case, just continue from VMA3.
1236			 *
1237			 * 3) No more VMAs can be found:
1238			 *
1239			 *    vma_next(vmi) will return NULL.
1240			 *    No more things to do, just break.
1241			 *
1242			 * 4) (last_vma_end - 1) is the middle of a vma (VMA'):
1243			 *
1244			 *    vma_next(vmi) will return VMA' whose range
1245			 *    contains last_vma_end.
1246			 *    Iterate VMA' from last_vma_end.
1247			 */
1248			vma = vma_next(&vmi);
1249			/* Case 3 above */
1250			if (!vma)
1251				break;
1252
1253			/* Case 1 and 2 above */
1254			if (vma->vm_start >= last_vma_end) {
1255				smap_gather_stats(vma, &mss, 0);
1256				last_vma_end = vma->vm_end;
1257				continue;
1258			}
1259
1260			/* Case 4 above */
1261			if (vma->vm_end > last_vma_end) {
1262				smap_gather_stats(vma, &mss, last_vma_end);
1263				last_vma_end = vma->vm_end;
1264			}
1265		}
1266	} for_each_vma(vmi, vma);
 
1267
1268empty_set:
1269	show_vma_header_prefix(m, vma_start, last_vma_end, 0, 0, 0, 0);
1270	seq_pad(m, ' ');
1271	seq_puts(m, "[rollup]\n");
1272
1273	__show_smap(m, &mss, true);
1274
1275	release_task_mempolicy(priv);
1276	mmap_read_unlock(mm);
1277
1278out_put_mm:
1279	mmput(mm);
1280out_put_task:
1281	put_task_struct(priv->task);
1282	priv->task = NULL;
1283
1284	return ret;
1285}
1286#undef SEQ_PUT_DEC
1287
1288static const struct seq_operations proc_pid_smaps_op = {
1289	.start	= m_start,
1290	.next	= m_next,
1291	.stop	= m_stop,
1292	.show	= show_smap
1293};
1294
1295static int pid_smaps_open(struct inode *inode, struct file *file)
1296{
1297	return do_maps_open(inode, file, &proc_pid_smaps_op);
1298}
1299
1300static int smaps_rollup_open(struct inode *inode, struct file *file)
1301{
1302	int ret;
1303	struct proc_maps_private *priv;
1304
1305	priv = kzalloc(sizeof(*priv), GFP_KERNEL_ACCOUNT);
1306	if (!priv)
1307		return -ENOMEM;
1308
1309	ret = single_open(file, show_smaps_rollup, priv);
1310	if (ret)
1311		goto out_free;
1312
1313	priv->inode = inode;
1314	priv->mm = proc_mem_open(inode, PTRACE_MODE_READ);
1315	if (IS_ERR(priv->mm)) {
1316		ret = PTR_ERR(priv->mm);
1317
1318		single_release(inode, file);
1319		goto out_free;
1320	}
1321
1322	return 0;
1323
1324out_free:
1325	kfree(priv);
1326	return ret;
1327}
1328
1329static int smaps_rollup_release(struct inode *inode, struct file *file)
1330{
1331	struct seq_file *seq = file->private_data;
1332	struct proc_maps_private *priv = seq->private;
1333
1334	if (priv->mm)
1335		mmdrop(priv->mm);
1336
1337	kfree(priv);
1338	return single_release(inode, file);
1339}
1340
1341const struct file_operations proc_pid_smaps_operations = {
1342	.open		= pid_smaps_open,
1343	.read		= seq_read,
1344	.llseek		= seq_lseek,
1345	.release	= proc_map_release,
1346};
1347
1348const struct file_operations proc_pid_smaps_rollup_operations = {
1349	.open		= smaps_rollup_open,
1350	.read		= seq_read,
1351	.llseek		= seq_lseek,
1352	.release	= smaps_rollup_release,
1353};
1354
1355enum clear_refs_types {
1356	CLEAR_REFS_ALL = 1,
1357	CLEAR_REFS_ANON,
1358	CLEAR_REFS_MAPPED,
1359	CLEAR_REFS_SOFT_DIRTY,
1360	CLEAR_REFS_MM_HIWATER_RSS,
1361	CLEAR_REFS_LAST,
1362};
1363
1364struct clear_refs_private {
1365	enum clear_refs_types type;
1366};
1367
1368#ifdef CONFIG_MEM_SOFT_DIRTY
1369
1370static inline bool pte_is_pinned(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
1371{
1372	struct folio *folio;
1373
1374	if (!pte_write(pte))
1375		return false;
1376	if (!is_cow_mapping(vma->vm_flags))
1377		return false;
1378	if (likely(!test_bit(MMF_HAS_PINNED, &vma->vm_mm->flags)))
1379		return false;
1380	folio = vm_normal_folio(vma, addr, pte);
1381	if (!folio)
1382		return false;
1383	return folio_maybe_dma_pinned(folio);
1384}
1385
1386static inline void clear_soft_dirty(struct vm_area_struct *vma,
1387		unsigned long addr, pte_t *pte)
1388{
1389	/*
1390	 * The soft-dirty tracker uses #PF-s to catch writes
1391	 * to pages, so write-protect the pte as well. See the
1392	 * Documentation/admin-guide/mm/soft-dirty.rst for full description
1393	 * of how soft-dirty works.
1394	 */
1395	pte_t ptent = ptep_get(pte);
1396
1397	if (pte_present(ptent)) {
1398		pte_t old_pte;
1399
1400		if (pte_is_pinned(vma, addr, ptent))
1401			return;
1402		old_pte = ptep_modify_prot_start(vma, addr, pte);
1403		ptent = pte_wrprotect(old_pte);
1404		ptent = pte_clear_soft_dirty(ptent);
1405		ptep_modify_prot_commit(vma, addr, pte, old_pte, ptent);
1406	} else if (is_swap_pte(ptent)) {
1407		ptent = pte_swp_clear_soft_dirty(ptent);
1408		set_pte_at(vma->vm_mm, addr, pte, ptent);
1409	}
1410}
1411#else
1412static inline void clear_soft_dirty(struct vm_area_struct *vma,
1413		unsigned long addr, pte_t *pte)
1414{
1415}
1416#endif
1417
1418#if defined(CONFIG_MEM_SOFT_DIRTY) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
1419static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
1420		unsigned long addr, pmd_t *pmdp)
1421{
1422	pmd_t old, pmd = *pmdp;
1423
1424	if (pmd_present(pmd)) {
1425		/* See comment in change_huge_pmd() */
1426		old = pmdp_invalidate(vma, addr, pmdp);
1427		if (pmd_dirty(old))
1428			pmd = pmd_mkdirty(pmd);
1429		if (pmd_young(old))
1430			pmd = pmd_mkyoung(pmd);
1431
1432		pmd = pmd_wrprotect(pmd);
1433		pmd = pmd_clear_soft_dirty(pmd);
1434
1435		set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
1436	} else if (is_migration_entry(pmd_to_swp_entry(pmd))) {
1437		pmd = pmd_swp_clear_soft_dirty(pmd);
1438		set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
1439	}
1440}
1441#else
1442static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
1443		unsigned long addr, pmd_t *pmdp)
1444{
1445}
1446#endif
1447
1448static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
1449				unsigned long end, struct mm_walk *walk)
1450{
1451	struct clear_refs_private *cp = walk->private;
1452	struct vm_area_struct *vma = walk->vma;
1453	pte_t *pte, ptent;
1454	spinlock_t *ptl;
1455	struct folio *folio;
1456
1457	ptl = pmd_trans_huge_lock(pmd, vma);
1458	if (ptl) {
1459		if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
1460			clear_soft_dirty_pmd(vma, addr, pmd);
1461			goto out;
1462		}
1463
1464		if (!pmd_present(*pmd))
1465			goto out;
1466
1467		folio = pmd_folio(*pmd);
1468
1469		/* Clear accessed and referenced bits. */
1470		pmdp_test_and_clear_young(vma, addr, pmd);
1471		folio_test_clear_young(folio);
1472		folio_clear_referenced(folio);
1473out:
1474		spin_unlock(ptl);
1475		return 0;
1476	}
1477
 
 
 
1478	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
1479	if (!pte) {
1480		walk->action = ACTION_AGAIN;
1481		return 0;
1482	}
1483	for (; addr != end; pte++, addr += PAGE_SIZE) {
1484		ptent = ptep_get(pte);
1485
1486		if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
1487			clear_soft_dirty(vma, addr, pte);
1488			continue;
1489		}
1490
1491		if (!pte_present(ptent))
1492			continue;
1493
1494		folio = vm_normal_folio(vma, addr, ptent);
1495		if (!folio)
1496			continue;
1497
1498		/* Clear accessed and referenced bits. */
1499		ptep_test_and_clear_young(vma, addr, pte);
1500		folio_test_clear_young(folio);
1501		folio_clear_referenced(folio);
1502	}
1503	pte_unmap_unlock(pte - 1, ptl);
1504	cond_resched();
1505	return 0;
1506}
1507
1508static int clear_refs_test_walk(unsigned long start, unsigned long end,
1509				struct mm_walk *walk)
1510{
1511	struct clear_refs_private *cp = walk->private;
1512	struct vm_area_struct *vma = walk->vma;
1513
1514	if (vma->vm_flags & VM_PFNMAP)
1515		return 1;
1516
1517	/*
1518	 * Writing 1 to /proc/pid/clear_refs affects all pages.
1519	 * Writing 2 to /proc/pid/clear_refs only affects anonymous pages.
1520	 * Writing 3 to /proc/pid/clear_refs only affects file mapped pages.
1521	 * Writing 4 to /proc/pid/clear_refs affects all pages.
1522	 */
1523	if (cp->type == CLEAR_REFS_ANON && vma->vm_file)
1524		return 1;
1525	if (cp->type == CLEAR_REFS_MAPPED && !vma->vm_file)
1526		return 1;
1527	return 0;
1528}
1529
1530static const struct mm_walk_ops clear_refs_walk_ops = {
1531	.pmd_entry		= clear_refs_pte_range,
1532	.test_walk		= clear_refs_test_walk,
1533	.walk_lock		= PGWALK_WRLOCK,
1534};
1535
1536static ssize_t clear_refs_write(struct file *file, const char __user *buf,
1537				size_t count, loff_t *ppos)
1538{
1539	struct task_struct *task;
1540	char buffer[PROC_NUMBUF] = {};
1541	struct mm_struct *mm;
1542	struct vm_area_struct *vma;
1543	enum clear_refs_types type;
1544	int itype;
1545	int rv;
1546
 
1547	if (count > sizeof(buffer) - 1)
1548		count = sizeof(buffer) - 1;
1549	if (copy_from_user(buffer, buf, count))
1550		return -EFAULT;
1551	rv = kstrtoint(strstrip(buffer), 10, &itype);
1552	if (rv < 0)
1553		return rv;
1554	type = (enum clear_refs_types)itype;
1555	if (type < CLEAR_REFS_ALL || type >= CLEAR_REFS_LAST)
1556		return -EINVAL;
1557
1558	task = get_proc_task(file_inode(file));
1559	if (!task)
1560		return -ESRCH;
1561	mm = get_task_mm(task);
1562	if (mm) {
1563		VMA_ITERATOR(vmi, mm, 0);
1564		struct mmu_notifier_range range;
1565		struct clear_refs_private cp = {
1566			.type = type,
1567		};
1568
1569		if (mmap_write_lock_killable(mm)) {
1570			count = -EINTR;
1571			goto out_mm;
1572		}
1573		if (type == CLEAR_REFS_MM_HIWATER_RSS) {
1574			/*
1575			 * Writing 5 to /proc/pid/clear_refs resets the peak
1576			 * resident set size to this mm's current rss value.
1577			 */
1578			reset_mm_hiwater_rss(mm);
1579			goto out_unlock;
1580		}
1581
1582		if (type == CLEAR_REFS_SOFT_DIRTY) {
1583			for_each_vma(vmi, vma) {
1584				if (!(vma->vm_flags & VM_SOFTDIRTY))
1585					continue;
1586				vm_flags_clear(vma, VM_SOFTDIRTY);
1587				vma_set_page_prot(vma);
1588			}
1589
1590			inc_tlb_flush_pending(mm);
1591			mmu_notifier_range_init(&range, MMU_NOTIFY_SOFT_DIRTY,
1592						0, mm, 0, -1UL);
1593			mmu_notifier_invalidate_range_start(&range);
1594		}
1595		walk_page_range(mm, 0, -1, &clear_refs_walk_ops, &cp);
1596		if (type == CLEAR_REFS_SOFT_DIRTY) {
1597			mmu_notifier_invalidate_range_end(&range);
1598			flush_tlb_mm(mm);
1599			dec_tlb_flush_pending(mm);
1600		}
1601out_unlock:
1602		mmap_write_unlock(mm);
1603out_mm:
1604		mmput(mm);
1605	}
1606	put_task_struct(task);
1607
1608	return count;
1609}
1610
1611const struct file_operations proc_clear_refs_operations = {
1612	.write		= clear_refs_write,
1613	.llseek		= noop_llseek,
1614};
1615
1616typedef struct {
1617	u64 pme;
1618} pagemap_entry_t;
1619
1620struct pagemapread {
1621	int pos, len;		/* units: PM_ENTRY_BYTES, not bytes */
1622	pagemap_entry_t *buffer;
1623	bool show_pfn;
1624};
1625
1626#define PAGEMAP_WALK_SIZE	(PMD_SIZE)
1627#define PAGEMAP_WALK_MASK	(PMD_MASK)
1628
1629#define PM_ENTRY_BYTES		sizeof(pagemap_entry_t)
1630#define PM_PFRAME_BITS		55
1631#define PM_PFRAME_MASK		GENMASK_ULL(PM_PFRAME_BITS - 1, 0)
1632#define PM_SOFT_DIRTY		BIT_ULL(55)
1633#define PM_MMAP_EXCLUSIVE	BIT_ULL(56)
1634#define PM_UFFD_WP		BIT_ULL(57)
1635#define PM_FILE			BIT_ULL(61)
1636#define PM_SWAP			BIT_ULL(62)
1637#define PM_PRESENT		BIT_ULL(63)
1638
1639#define PM_END_OF_BUFFER    1
1640
1641static inline pagemap_entry_t make_pme(u64 frame, u64 flags)
1642{
1643	return (pagemap_entry_t) { .pme = (frame & PM_PFRAME_MASK) | flags };
1644}
1645
1646static int add_to_pagemap(pagemap_entry_t *pme, struct pagemapread *pm)
 
1647{
1648	pm->buffer[pm->pos++] = *pme;
1649	if (pm->pos >= pm->len)
1650		return PM_END_OF_BUFFER;
1651	return 0;
1652}
1653
1654static int pagemap_pte_hole(unsigned long start, unsigned long end,
1655			    __always_unused int depth, struct mm_walk *walk)
1656{
1657	struct pagemapread *pm = walk->private;
1658	unsigned long addr = start;
1659	int err = 0;
1660
1661	while (addr < end) {
1662		struct vm_area_struct *vma = find_vma(walk->mm, addr);
1663		pagemap_entry_t pme = make_pme(0, 0);
1664		/* End of address space hole, which we mark as non-present. */
1665		unsigned long hole_end;
1666
1667		if (vma)
1668			hole_end = min(end, vma->vm_start);
1669		else
1670			hole_end = end;
1671
1672		for (; addr < hole_end; addr += PAGE_SIZE) {
1673			err = add_to_pagemap(&pme, pm);
1674			if (err)
1675				goto out;
1676		}
1677
1678		if (!vma)
1679			break;
1680
1681		/* Addresses in the VMA. */
1682		if (vma->vm_flags & VM_SOFTDIRTY)
1683			pme = make_pme(0, PM_SOFT_DIRTY);
1684		for (; addr < min(end, vma->vm_end); addr += PAGE_SIZE) {
1685			err = add_to_pagemap(&pme, pm);
1686			if (err)
1687				goto out;
1688		}
1689	}
1690out:
1691	return err;
1692}
1693
1694static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm,
1695		struct vm_area_struct *vma, unsigned long addr, pte_t pte)
1696{
1697	u64 frame = 0, flags = 0;
1698	struct page *page = NULL;
1699	struct folio *folio;
1700
1701	if (pte_present(pte)) {
1702		if (pm->show_pfn)
1703			frame = pte_pfn(pte);
1704		flags |= PM_PRESENT;
1705		page = vm_normal_page(vma, addr, pte);
1706		if (pte_soft_dirty(pte))
1707			flags |= PM_SOFT_DIRTY;
1708		if (pte_uffd_wp(pte))
1709			flags |= PM_UFFD_WP;
1710	} else if (is_swap_pte(pte)) {
1711		swp_entry_t entry;
1712		if (pte_swp_soft_dirty(pte))
1713			flags |= PM_SOFT_DIRTY;
1714		if (pte_swp_uffd_wp(pte))
1715			flags |= PM_UFFD_WP;
1716		entry = pte_to_swp_entry(pte);
1717		if (pm->show_pfn) {
1718			pgoff_t offset;
1719			/*
1720			 * For PFN swap offsets, keeping the offset field
1721			 * to be PFN only to be compatible with old smaps.
1722			 */
1723			if (is_pfn_swap_entry(entry))
1724				offset = swp_offset_pfn(entry);
1725			else
1726				offset = swp_offset(entry);
1727			frame = swp_type(entry) |
1728			    (offset << MAX_SWAPFILES_SHIFT);
1729		}
1730		flags |= PM_SWAP;
 
1731		if (is_pfn_swap_entry(entry))
1732			page = pfn_swap_entry_to_page(entry);
1733		if (pte_marker_entry_uffd_wp(entry))
1734			flags |= PM_UFFD_WP;
1735	}
1736
1737	if (page) {
1738		folio = page_folio(page);
1739		if (!folio_test_anon(folio))
1740			flags |= PM_FILE;
1741		if ((flags & PM_PRESENT) &&
1742		    folio_precise_page_mapcount(folio, page) == 1)
1743			flags |= PM_MMAP_EXCLUSIVE;
1744	}
1745	if (vma->vm_flags & VM_SOFTDIRTY)
1746		flags |= PM_SOFT_DIRTY;
1747
1748	return make_pme(frame, flags);
1749}
1750
1751static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
1752			     struct mm_walk *walk)
1753{
1754	struct vm_area_struct *vma = walk->vma;
1755	struct pagemapread *pm = walk->private;
1756	spinlock_t *ptl;
1757	pte_t *pte, *orig_pte;
1758	int err = 0;
1759#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 
1760
1761	ptl = pmd_trans_huge_lock(pmdp, vma);
1762	if (ptl) {
1763		unsigned int idx = (addr & ~PMD_MASK) >> PAGE_SHIFT;
1764		u64 flags = 0, frame = 0;
1765		pmd_t pmd = *pmdp;
1766		struct page *page = NULL;
1767		struct folio *folio = NULL;
1768
1769		if (vma->vm_flags & VM_SOFTDIRTY)
1770			flags |= PM_SOFT_DIRTY;
1771
1772		if (pmd_present(pmd)) {
1773			page = pmd_page(pmd);
1774
1775			flags |= PM_PRESENT;
1776			if (pmd_soft_dirty(pmd))
1777				flags |= PM_SOFT_DIRTY;
1778			if (pmd_uffd_wp(pmd))
1779				flags |= PM_UFFD_WP;
1780			if (pm->show_pfn)
1781				frame = pmd_pfn(pmd) + idx;
 
1782		}
1783#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1784		else if (is_swap_pmd(pmd)) {
1785			swp_entry_t entry = pmd_to_swp_entry(pmd);
1786			unsigned long offset;
1787
1788			if (pm->show_pfn) {
1789				if (is_pfn_swap_entry(entry))
1790					offset = swp_offset_pfn(entry) + idx;
1791				else
1792					offset = swp_offset(entry) + idx;
 
 
1793				frame = swp_type(entry) |
1794					(offset << MAX_SWAPFILES_SHIFT);
1795			}
1796			flags |= PM_SWAP;
1797			if (pmd_swp_soft_dirty(pmd))
1798				flags |= PM_SOFT_DIRTY;
1799			if (pmd_swp_uffd_wp(pmd))
1800				flags |= PM_UFFD_WP;
1801			VM_BUG_ON(!is_pmd_migration_entry(pmd));
 
1802			page = pfn_swap_entry_to_page(entry);
1803		}
1804#endif
1805
1806		if (page) {
1807			folio = page_folio(page);
1808			if (!folio_test_anon(folio))
1809				flags |= PM_FILE;
1810		}
1811
1812		for (; addr != end; addr += PAGE_SIZE, idx++) {
1813			u64 cur_flags = flags;
1814			pagemap_entry_t pme;
1815
1816			if (folio && (flags & PM_PRESENT) &&
1817			    folio_precise_page_mapcount(folio, page + idx) == 1)
1818				cur_flags |= PM_MMAP_EXCLUSIVE;
1819
1820			pme = make_pme(frame, cur_flags);
1821			err = add_to_pagemap(&pme, pm);
1822			if (err)
1823				break;
1824			if (pm->show_pfn) {
1825				if (flags & PM_PRESENT)
1826					frame++;
1827				else if (flags & PM_SWAP)
1828					frame += (1 << MAX_SWAPFILES_SHIFT);
1829			}
1830		}
1831		spin_unlock(ptl);
1832		return err;
1833	}
 
 
 
1834#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1835
1836	/*
1837	 * We can assume that @vma always points to a valid one and @end never
1838	 * goes beyond vma->vm_end.
1839	 */
1840	orig_pte = pte = pte_offset_map_lock(walk->mm, pmdp, addr, &ptl);
1841	if (!pte) {
1842		walk->action = ACTION_AGAIN;
1843		return err;
1844	}
1845	for (; addr < end; pte++, addr += PAGE_SIZE) {
1846		pagemap_entry_t pme;
1847
1848		pme = pte_to_pagemap_entry(pm, vma, addr, ptep_get(pte));
1849		err = add_to_pagemap(&pme, pm);
1850		if (err)
1851			break;
1852	}
1853	pte_unmap_unlock(orig_pte, ptl);
1854
1855	cond_resched();
1856
1857	return err;
1858}
1859
1860#ifdef CONFIG_HUGETLB_PAGE
1861/* This function walks within one hugetlb entry in the single call */
1862static int pagemap_hugetlb_range(pte_t *ptep, unsigned long hmask,
1863				 unsigned long addr, unsigned long end,
1864				 struct mm_walk *walk)
1865{
1866	struct pagemapread *pm = walk->private;
1867	struct vm_area_struct *vma = walk->vma;
1868	u64 flags = 0, frame = 0;
1869	int err = 0;
1870	pte_t pte;
1871
1872	if (vma->vm_flags & VM_SOFTDIRTY)
1873		flags |= PM_SOFT_DIRTY;
1874
1875	pte = huge_ptep_get(walk->mm, addr, ptep);
1876	if (pte_present(pte)) {
1877		struct folio *folio = page_folio(pte_page(pte));
1878
1879		if (!folio_test_anon(folio))
1880			flags |= PM_FILE;
1881
1882		if (!folio_likely_mapped_shared(folio) &&
1883		    !hugetlb_pmd_shared(ptep))
1884			flags |= PM_MMAP_EXCLUSIVE;
1885
1886		if (huge_pte_uffd_wp(pte))
1887			flags |= PM_UFFD_WP;
1888
1889		flags |= PM_PRESENT;
1890		if (pm->show_pfn)
1891			frame = pte_pfn(pte) +
1892				((addr & ~hmask) >> PAGE_SHIFT);
1893	} else if (pte_swp_uffd_wp_any(pte)) {
1894		flags |= PM_UFFD_WP;
1895	}
1896
1897	for (; addr != end; addr += PAGE_SIZE) {
1898		pagemap_entry_t pme = make_pme(frame, flags);
1899
1900		err = add_to_pagemap(&pme, pm);
1901		if (err)
1902			return err;
1903		if (pm->show_pfn && (flags & PM_PRESENT))
1904			frame++;
1905	}
1906
1907	cond_resched();
1908
1909	return err;
1910}
1911#else
1912#define pagemap_hugetlb_range	NULL
1913#endif /* HUGETLB_PAGE */
1914
1915static const struct mm_walk_ops pagemap_ops = {
1916	.pmd_entry	= pagemap_pmd_range,
1917	.pte_hole	= pagemap_pte_hole,
1918	.hugetlb_entry	= pagemap_hugetlb_range,
1919	.walk_lock	= PGWALK_RDLOCK,
1920};
1921
1922/*
1923 * /proc/pid/pagemap - an array mapping virtual pages to pfns
1924 *
1925 * For each page in the address space, this file contains one 64-bit entry
1926 * consisting of the following:
1927 *
1928 * Bits 0-54  page frame number (PFN) if present
1929 * Bits 0-4   swap type if swapped
1930 * Bits 5-54  swap offset if swapped
1931 * Bit  55    pte is soft-dirty (see Documentation/admin-guide/mm/soft-dirty.rst)
1932 * Bit  56    page exclusively mapped
1933 * Bit  57    pte is uffd-wp write-protected
1934 * Bits 58-60 zero
1935 * Bit  61    page is file-page or shared-anon
1936 * Bit  62    page swapped
1937 * Bit  63    page present
1938 *
1939 * If the page is not present but in swap, then the PFN contains an
1940 * encoding of the swap file number and the page's offset into the
1941 * swap. Unmapped pages return a null PFN. This allows determining
1942 * precisely which pages are mapped (or in swap) and comparing mapped
1943 * pages between processes.
1944 *
1945 * Efficient users of this interface will use /proc/pid/maps to
1946 * determine which areas of memory are actually mapped and llseek to
1947 * skip over unmapped regions.
1948 */
1949static ssize_t pagemap_read(struct file *file, char __user *buf,
1950			    size_t count, loff_t *ppos)
1951{
1952	struct mm_struct *mm = file->private_data;
1953	struct pagemapread pm;
1954	unsigned long src;
1955	unsigned long svpfn;
1956	unsigned long start_vaddr;
1957	unsigned long end_vaddr;
1958	int ret = 0, copied = 0;
1959
1960	if (!mm || !mmget_not_zero(mm))
1961		goto out;
1962
1963	ret = -EINVAL;
1964	/* file position must be aligned */
1965	if ((*ppos % PM_ENTRY_BYTES) || (count % PM_ENTRY_BYTES))
1966		goto out_mm;
1967
1968	ret = 0;
1969	if (!count)
1970		goto out_mm;
1971
1972	/* do not disclose physical addresses: attack vector */
1973	pm.show_pfn = file_ns_capable(file, &init_user_ns, CAP_SYS_ADMIN);
1974
1975	pm.len = (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
1976	pm.buffer = kmalloc_array(pm.len, PM_ENTRY_BYTES, GFP_KERNEL);
1977	ret = -ENOMEM;
1978	if (!pm.buffer)
1979		goto out_mm;
1980
1981	src = *ppos;
1982	svpfn = src / PM_ENTRY_BYTES;
1983	end_vaddr = mm->task_size;
1984
1985	/* watch out for wraparound */
1986	start_vaddr = end_vaddr;
1987	if (svpfn <= (ULONG_MAX >> PAGE_SHIFT)) {
1988		unsigned long end;
1989
1990		ret = mmap_read_lock_killable(mm);
1991		if (ret)
1992			goto out_free;
1993		start_vaddr = untagged_addr_remote(mm, svpfn << PAGE_SHIFT);
1994		mmap_read_unlock(mm);
1995
1996		end = start_vaddr + ((count / PM_ENTRY_BYTES) << PAGE_SHIFT);
1997		if (end >= start_vaddr && end < mm->task_size)
1998			end_vaddr = end;
1999	}
2000
2001	/* Ensure the address is inside the task */
2002	if (start_vaddr > mm->task_size)
2003		start_vaddr = end_vaddr;
2004
 
 
 
 
 
 
2005	ret = 0;
2006	while (count && (start_vaddr < end_vaddr)) {
2007		int len;
2008		unsigned long end;
2009
2010		pm.pos = 0;
2011		end = (start_vaddr + PAGEMAP_WALK_SIZE) & PAGEMAP_WALK_MASK;
2012		/* overflow ? */
2013		if (end < start_vaddr || end > end_vaddr)
2014			end = end_vaddr;
2015		ret = mmap_read_lock_killable(mm);
2016		if (ret)
2017			goto out_free;
2018		ret = walk_page_range(mm, start_vaddr, end, &pagemap_ops, &pm);
2019		mmap_read_unlock(mm);
2020		start_vaddr = end;
2021
2022		len = min(count, PM_ENTRY_BYTES * pm.pos);
2023		if (copy_to_user(buf, pm.buffer, len)) {
2024			ret = -EFAULT;
2025			goto out_free;
2026		}
2027		copied += len;
2028		buf += len;
2029		count -= len;
2030	}
2031	*ppos += copied;
2032	if (!ret || ret == PM_END_OF_BUFFER)
2033		ret = copied;
2034
2035out_free:
2036	kfree(pm.buffer);
2037out_mm:
2038	mmput(mm);
2039out:
2040	return ret;
2041}
2042
2043static int pagemap_open(struct inode *inode, struct file *file)
2044{
2045	struct mm_struct *mm;
2046
2047	mm = proc_mem_open(inode, PTRACE_MODE_READ);
2048	if (IS_ERR(mm))
2049		return PTR_ERR(mm);
2050	file->private_data = mm;
2051	return 0;
2052}
2053
2054static int pagemap_release(struct inode *inode, struct file *file)
2055{
2056	struct mm_struct *mm = file->private_data;
2057
2058	if (mm)
2059		mmdrop(mm);
2060	return 0;
2061}
2062
2063#define PM_SCAN_CATEGORIES	(PAGE_IS_WPALLOWED | PAGE_IS_WRITTEN |	\
2064				 PAGE_IS_FILE |	PAGE_IS_PRESENT |	\
2065				 PAGE_IS_SWAPPED | PAGE_IS_PFNZERO |	\
2066				 PAGE_IS_HUGE | PAGE_IS_SOFT_DIRTY)
2067#define PM_SCAN_FLAGS		(PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC)
2068
2069struct pagemap_scan_private {
2070	struct pm_scan_arg arg;
2071	unsigned long masks_of_interest, cur_vma_category;
2072	struct page_region *vec_buf;
2073	unsigned long vec_buf_len, vec_buf_index, found_pages;
2074	struct page_region __user *vec_out;
2075};
2076
2077static unsigned long pagemap_page_category(struct pagemap_scan_private *p,
2078					   struct vm_area_struct *vma,
2079					   unsigned long addr, pte_t pte)
2080{
2081	unsigned long categories = 0;
2082
2083	if (pte_present(pte)) {
2084		struct page *page;
2085
2086		categories |= PAGE_IS_PRESENT;
2087		if (!pte_uffd_wp(pte))
2088			categories |= PAGE_IS_WRITTEN;
2089
2090		if (p->masks_of_interest & PAGE_IS_FILE) {
2091			page = vm_normal_page(vma, addr, pte);
2092			if (page && !PageAnon(page))
2093				categories |= PAGE_IS_FILE;
2094		}
2095
2096		if (is_zero_pfn(pte_pfn(pte)))
2097			categories |= PAGE_IS_PFNZERO;
2098		if (pte_soft_dirty(pte))
2099			categories |= PAGE_IS_SOFT_DIRTY;
2100	} else if (is_swap_pte(pte)) {
2101		swp_entry_t swp;
2102
2103		categories |= PAGE_IS_SWAPPED;
2104		if (!pte_swp_uffd_wp_any(pte))
2105			categories |= PAGE_IS_WRITTEN;
2106
2107		if (p->masks_of_interest & PAGE_IS_FILE) {
2108			swp = pte_to_swp_entry(pte);
2109			if (is_pfn_swap_entry(swp) &&
2110			    !folio_test_anon(pfn_swap_entry_folio(swp)))
2111				categories |= PAGE_IS_FILE;
2112		}
2113		if (pte_swp_soft_dirty(pte))
2114			categories |= PAGE_IS_SOFT_DIRTY;
2115	}
2116
2117	return categories;
2118}
2119
2120static void make_uffd_wp_pte(struct vm_area_struct *vma,
2121			     unsigned long addr, pte_t *pte, pte_t ptent)
2122{
2123	if (pte_present(ptent)) {
2124		pte_t old_pte;
2125
2126		old_pte = ptep_modify_prot_start(vma, addr, pte);
2127		ptent = pte_mkuffd_wp(old_pte);
2128		ptep_modify_prot_commit(vma, addr, pte, old_pte, ptent);
2129	} else if (is_swap_pte(ptent)) {
2130		ptent = pte_swp_mkuffd_wp(ptent);
2131		set_pte_at(vma->vm_mm, addr, pte, ptent);
2132	} else {
2133		set_pte_at(vma->vm_mm, addr, pte,
2134			   make_pte_marker(PTE_MARKER_UFFD_WP));
2135	}
2136}
2137
2138#ifdef CONFIG_TRANSPARENT_HUGEPAGE
2139static unsigned long pagemap_thp_category(struct pagemap_scan_private *p,
2140					  struct vm_area_struct *vma,
2141					  unsigned long addr, pmd_t pmd)
2142{
2143	unsigned long categories = PAGE_IS_HUGE;
2144
2145	if (pmd_present(pmd)) {
2146		struct page *page;
2147
2148		categories |= PAGE_IS_PRESENT;
2149		if (!pmd_uffd_wp(pmd))
2150			categories |= PAGE_IS_WRITTEN;
2151
2152		if (p->masks_of_interest & PAGE_IS_FILE) {
2153			page = vm_normal_page_pmd(vma, addr, pmd);
2154			if (page && !PageAnon(page))
2155				categories |= PAGE_IS_FILE;
2156		}
2157
2158		if (is_zero_pfn(pmd_pfn(pmd)))
2159			categories |= PAGE_IS_PFNZERO;
2160		if (pmd_soft_dirty(pmd))
2161			categories |= PAGE_IS_SOFT_DIRTY;
2162	} else if (is_swap_pmd(pmd)) {
2163		swp_entry_t swp;
2164
2165		categories |= PAGE_IS_SWAPPED;
2166		if (!pmd_swp_uffd_wp(pmd))
2167			categories |= PAGE_IS_WRITTEN;
2168		if (pmd_swp_soft_dirty(pmd))
2169			categories |= PAGE_IS_SOFT_DIRTY;
2170
2171		if (p->masks_of_interest & PAGE_IS_FILE) {
2172			swp = pmd_to_swp_entry(pmd);
2173			if (is_pfn_swap_entry(swp) &&
2174			    !folio_test_anon(pfn_swap_entry_folio(swp)))
2175				categories |= PAGE_IS_FILE;
2176		}
2177	}
2178
2179	return categories;
2180}
2181
2182static void make_uffd_wp_pmd(struct vm_area_struct *vma,
2183			     unsigned long addr, pmd_t *pmdp)
2184{
2185	pmd_t old, pmd = *pmdp;
2186
2187	if (pmd_present(pmd)) {
2188		old = pmdp_invalidate_ad(vma, addr, pmdp);
2189		pmd = pmd_mkuffd_wp(old);
2190		set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
2191	} else if (is_migration_entry(pmd_to_swp_entry(pmd))) {
2192		pmd = pmd_swp_mkuffd_wp(pmd);
2193		set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
2194	}
2195}
2196#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
2197
2198#ifdef CONFIG_HUGETLB_PAGE
2199static unsigned long pagemap_hugetlb_category(pte_t pte)
2200{
2201	unsigned long categories = PAGE_IS_HUGE;
2202
2203	/*
2204	 * According to pagemap_hugetlb_range(), file-backed HugeTLB
2205	 * page cannot be swapped. So PAGE_IS_FILE is not checked for
2206	 * swapped pages.
2207	 */
2208	if (pte_present(pte)) {
2209		categories |= PAGE_IS_PRESENT;
2210		if (!huge_pte_uffd_wp(pte))
2211			categories |= PAGE_IS_WRITTEN;
2212		if (!PageAnon(pte_page(pte)))
2213			categories |= PAGE_IS_FILE;
2214		if (is_zero_pfn(pte_pfn(pte)))
2215			categories |= PAGE_IS_PFNZERO;
2216		if (pte_soft_dirty(pte))
2217			categories |= PAGE_IS_SOFT_DIRTY;
2218	} else if (is_swap_pte(pte)) {
2219		categories |= PAGE_IS_SWAPPED;
2220		if (!pte_swp_uffd_wp_any(pte))
2221			categories |= PAGE_IS_WRITTEN;
2222		if (pte_swp_soft_dirty(pte))
2223			categories |= PAGE_IS_SOFT_DIRTY;
2224	}
2225
2226	return categories;
2227}
2228
2229static void make_uffd_wp_huge_pte(struct vm_area_struct *vma,
2230				  unsigned long addr, pte_t *ptep,
2231				  pte_t ptent)
2232{
2233	unsigned long psize;
2234
2235	if (is_hugetlb_entry_hwpoisoned(ptent) || is_pte_marker(ptent))
2236		return;
2237
2238	psize = huge_page_size(hstate_vma(vma));
2239
2240	if (is_hugetlb_entry_migration(ptent))
2241		set_huge_pte_at(vma->vm_mm, addr, ptep,
2242				pte_swp_mkuffd_wp(ptent), psize);
2243	else if (!huge_pte_none(ptent))
2244		huge_ptep_modify_prot_commit(vma, addr, ptep, ptent,
2245					     huge_pte_mkuffd_wp(ptent));
2246	else
2247		set_huge_pte_at(vma->vm_mm, addr, ptep,
2248				make_pte_marker(PTE_MARKER_UFFD_WP), psize);
2249}
2250#endif /* CONFIG_HUGETLB_PAGE */
2251
2252#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
2253static void pagemap_scan_backout_range(struct pagemap_scan_private *p,
2254				       unsigned long addr, unsigned long end)
2255{
2256	struct page_region *cur_buf = &p->vec_buf[p->vec_buf_index];
2257
2258	if (cur_buf->start != addr)
2259		cur_buf->end = addr;
2260	else
2261		cur_buf->start = cur_buf->end = 0;
2262
2263	p->found_pages -= (end - addr) / PAGE_SIZE;
2264}
2265#endif
2266
2267static bool pagemap_scan_is_interesting_page(unsigned long categories,
2268					     const struct pagemap_scan_private *p)
2269{
2270	categories ^= p->arg.category_inverted;
2271	if ((categories & p->arg.category_mask) != p->arg.category_mask)
2272		return false;
2273	if (p->arg.category_anyof_mask && !(categories & p->arg.category_anyof_mask))
2274		return false;
2275
2276	return true;
2277}
2278
2279static bool pagemap_scan_is_interesting_vma(unsigned long categories,
2280					    const struct pagemap_scan_private *p)
2281{
2282	unsigned long required = p->arg.category_mask & PAGE_IS_WPALLOWED;
2283
2284	categories ^= p->arg.category_inverted;
2285	if ((categories & required) != required)
2286		return false;
2287
2288	return true;
2289}
2290
2291static int pagemap_scan_test_walk(unsigned long start, unsigned long end,
2292				  struct mm_walk *walk)
2293{
2294	struct pagemap_scan_private *p = walk->private;
2295	struct vm_area_struct *vma = walk->vma;
2296	unsigned long vma_category = 0;
2297	bool wp_allowed = userfaultfd_wp_async(vma) &&
2298	    userfaultfd_wp_use_markers(vma);
2299
2300	if (!wp_allowed) {
2301		/* User requested explicit failure over wp-async capability */
2302		if (p->arg.flags & PM_SCAN_CHECK_WPASYNC)
2303			return -EPERM;
2304		/*
2305		 * User requires wr-protect, and allows silently skipping
2306		 * unsupported vmas.
2307		 */
2308		if (p->arg.flags & PM_SCAN_WP_MATCHING)
2309			return 1;
2310		/*
2311		 * Then the request doesn't involve wr-protects at all,
2312		 * fall through to the rest checks, and allow vma walk.
2313		 */
2314	}
2315
2316	if (vma->vm_flags & VM_PFNMAP)
2317		return 1;
2318
2319	if (wp_allowed)
2320		vma_category |= PAGE_IS_WPALLOWED;
2321
2322	if (vma->vm_flags & VM_SOFTDIRTY)
2323		vma_category |= PAGE_IS_SOFT_DIRTY;
2324
2325	if (!pagemap_scan_is_interesting_vma(vma_category, p))
2326		return 1;
2327
2328	p->cur_vma_category = vma_category;
2329
2330	return 0;
2331}
2332
2333static bool pagemap_scan_push_range(unsigned long categories,
2334				    struct pagemap_scan_private *p,
2335				    unsigned long addr, unsigned long end)
2336{
2337	struct page_region *cur_buf = &p->vec_buf[p->vec_buf_index];
2338
2339	/*
2340	 * When there is no output buffer provided at all, the sentinel values
2341	 * won't match here. There is no other way for `cur_buf->end` to be
2342	 * non-zero other than it being non-empty.
2343	 */
2344	if (addr == cur_buf->end && categories == cur_buf->categories) {
2345		cur_buf->end = end;
2346		return true;
2347	}
2348
2349	if (cur_buf->end) {
2350		if (p->vec_buf_index >= p->vec_buf_len - 1)
2351			return false;
2352
2353		cur_buf = &p->vec_buf[++p->vec_buf_index];
2354	}
2355
2356	cur_buf->start = addr;
2357	cur_buf->end = end;
2358	cur_buf->categories = categories;
2359
2360	return true;
2361}
2362
2363static int pagemap_scan_output(unsigned long categories,
2364			       struct pagemap_scan_private *p,
2365			       unsigned long addr, unsigned long *end)
2366{
2367	unsigned long n_pages, total_pages;
2368	int ret = 0;
2369
2370	if (!p->vec_buf)
2371		return 0;
2372
2373	categories &= p->arg.return_mask;
2374
2375	n_pages = (*end - addr) / PAGE_SIZE;
2376	if (check_add_overflow(p->found_pages, n_pages, &total_pages) ||
2377	    total_pages > p->arg.max_pages) {
2378		size_t n_too_much = total_pages - p->arg.max_pages;
2379		*end -= n_too_much * PAGE_SIZE;
2380		n_pages -= n_too_much;
2381		ret = -ENOSPC;
2382	}
2383
2384	if (!pagemap_scan_push_range(categories, p, addr, *end)) {
2385		*end = addr;
2386		n_pages = 0;
2387		ret = -ENOSPC;
2388	}
2389
2390	p->found_pages += n_pages;
2391	if (ret)
2392		p->arg.walk_end = *end;
2393
2394	return ret;
2395}
2396
2397static int pagemap_scan_thp_entry(pmd_t *pmd, unsigned long start,
2398				  unsigned long end, struct mm_walk *walk)
2399{
2400#ifdef CONFIG_TRANSPARENT_HUGEPAGE
2401	struct pagemap_scan_private *p = walk->private;
2402	struct vm_area_struct *vma = walk->vma;
2403	unsigned long categories;
2404	spinlock_t *ptl;
2405	int ret = 0;
2406
2407	ptl = pmd_trans_huge_lock(pmd, vma);
2408	if (!ptl)
2409		return -ENOENT;
2410
2411	categories = p->cur_vma_category |
2412		     pagemap_thp_category(p, vma, start, *pmd);
2413
2414	if (!pagemap_scan_is_interesting_page(categories, p))
2415		goto out_unlock;
2416
2417	ret = pagemap_scan_output(categories, p, start, &end);
2418	if (start == end)
2419		goto out_unlock;
2420
2421	if (~p->arg.flags & PM_SCAN_WP_MATCHING)
2422		goto out_unlock;
2423	if (~categories & PAGE_IS_WRITTEN)
2424		goto out_unlock;
2425
2426	/*
2427	 * Break huge page into small pages if the WP operation
2428	 * needs to be performed on a portion of the huge page.
2429	 */
2430	if (end != start + HPAGE_SIZE) {
2431		spin_unlock(ptl);
2432		split_huge_pmd(vma, pmd, start);
2433		pagemap_scan_backout_range(p, start, end);
2434		/* Report as if there was no THP */
2435		return -ENOENT;
2436	}
2437
2438	make_uffd_wp_pmd(vma, start, pmd);
2439	flush_tlb_range(vma, start, end);
2440out_unlock:
2441	spin_unlock(ptl);
2442	return ret;
2443#else /* !CONFIG_TRANSPARENT_HUGEPAGE */
2444	return -ENOENT;
2445#endif
2446}
2447
2448static int pagemap_scan_pmd_entry(pmd_t *pmd, unsigned long start,
2449				  unsigned long end, struct mm_walk *walk)
2450{
2451	struct pagemap_scan_private *p = walk->private;
2452	struct vm_area_struct *vma = walk->vma;
2453	unsigned long addr, flush_end = 0;
2454	pte_t *pte, *start_pte;
2455	spinlock_t *ptl;
2456	int ret;
2457
2458	arch_enter_lazy_mmu_mode();
2459
2460	ret = pagemap_scan_thp_entry(pmd, start, end, walk);
2461	if (ret != -ENOENT) {
2462		arch_leave_lazy_mmu_mode();
2463		return ret;
2464	}
2465
2466	ret = 0;
2467	start_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl);
2468	if (!pte) {
2469		arch_leave_lazy_mmu_mode();
2470		walk->action = ACTION_AGAIN;
2471		return 0;
2472	}
2473
2474	if ((p->arg.flags & PM_SCAN_WP_MATCHING) && !p->vec_out) {
2475		/* Fast path for performing exclusive WP */
2476		for (addr = start; addr != end; pte++, addr += PAGE_SIZE) {
2477			pte_t ptent = ptep_get(pte);
2478
2479			if ((pte_present(ptent) && pte_uffd_wp(ptent)) ||
2480			    pte_swp_uffd_wp_any(ptent))
2481				continue;
2482			make_uffd_wp_pte(vma, addr, pte, ptent);
2483			if (!flush_end)
2484				start = addr;
2485			flush_end = addr + PAGE_SIZE;
2486		}
2487		goto flush_and_return;
2488	}
2489
2490	if (!p->arg.category_anyof_mask && !p->arg.category_inverted &&
2491	    p->arg.category_mask == PAGE_IS_WRITTEN &&
2492	    p->arg.return_mask == PAGE_IS_WRITTEN) {
2493		for (addr = start; addr < end; pte++, addr += PAGE_SIZE) {
2494			unsigned long next = addr + PAGE_SIZE;
2495			pte_t ptent = ptep_get(pte);
2496
2497			if ((pte_present(ptent) && pte_uffd_wp(ptent)) ||
2498			    pte_swp_uffd_wp_any(ptent))
2499				continue;
2500			ret = pagemap_scan_output(p->cur_vma_category | PAGE_IS_WRITTEN,
2501						  p, addr, &next);
2502			if (next == addr)
2503				break;
2504			if (~p->arg.flags & PM_SCAN_WP_MATCHING)
2505				continue;
2506			make_uffd_wp_pte(vma, addr, pte, ptent);
2507			if (!flush_end)
2508				start = addr;
2509			flush_end = next;
2510		}
2511		goto flush_and_return;
2512	}
2513
2514	for (addr = start; addr != end; pte++, addr += PAGE_SIZE) {
2515		pte_t ptent = ptep_get(pte);
2516		unsigned long categories = p->cur_vma_category |
2517					   pagemap_page_category(p, vma, addr, ptent);
2518		unsigned long next = addr + PAGE_SIZE;
2519
2520		if (!pagemap_scan_is_interesting_page(categories, p))
2521			continue;
2522
2523		ret = pagemap_scan_output(categories, p, addr, &next);
2524		if (next == addr)
2525			break;
2526
2527		if (~p->arg.flags & PM_SCAN_WP_MATCHING)
2528			continue;
2529		if (~categories & PAGE_IS_WRITTEN)
2530			continue;
2531
2532		make_uffd_wp_pte(vma, addr, pte, ptent);
2533		if (!flush_end)
2534			start = addr;
2535		flush_end = next;
2536	}
2537
2538flush_and_return:
2539	if (flush_end)
2540		flush_tlb_range(vma, start, addr);
2541
2542	pte_unmap_unlock(start_pte, ptl);
2543	arch_leave_lazy_mmu_mode();
2544
2545	cond_resched();
2546	return ret;
2547}
2548
2549#ifdef CONFIG_HUGETLB_PAGE
2550static int pagemap_scan_hugetlb_entry(pte_t *ptep, unsigned long hmask,
2551				      unsigned long start, unsigned long end,
2552				      struct mm_walk *walk)
2553{
2554	struct pagemap_scan_private *p = walk->private;
2555	struct vm_area_struct *vma = walk->vma;
2556	unsigned long categories;
2557	spinlock_t *ptl;
2558	int ret = 0;
2559	pte_t pte;
2560
2561	if (~p->arg.flags & PM_SCAN_WP_MATCHING) {
2562		/* Go the short route when not write-protecting pages. */
2563
2564		pte = huge_ptep_get(walk->mm, start, ptep);
2565		categories = p->cur_vma_category | pagemap_hugetlb_category(pte);
2566
2567		if (!pagemap_scan_is_interesting_page(categories, p))
2568			return 0;
2569
2570		return pagemap_scan_output(categories, p, start, &end);
2571	}
2572
2573	i_mmap_lock_write(vma->vm_file->f_mapping);
2574	ptl = huge_pte_lock(hstate_vma(vma), vma->vm_mm, ptep);
2575
2576	pte = huge_ptep_get(walk->mm, start, ptep);
2577	categories = p->cur_vma_category | pagemap_hugetlb_category(pte);
2578
2579	if (!pagemap_scan_is_interesting_page(categories, p))
2580		goto out_unlock;
2581
2582	ret = pagemap_scan_output(categories, p, start, &end);
2583	if (start == end)
2584		goto out_unlock;
2585
2586	if (~categories & PAGE_IS_WRITTEN)
2587		goto out_unlock;
2588
2589	if (end != start + HPAGE_SIZE) {
2590		/* Partial HugeTLB page WP isn't possible. */
2591		pagemap_scan_backout_range(p, start, end);
2592		p->arg.walk_end = start;
2593		ret = 0;
2594		goto out_unlock;
2595	}
2596
2597	make_uffd_wp_huge_pte(vma, start, ptep, pte);
2598	flush_hugetlb_tlb_range(vma, start, end);
2599
2600out_unlock:
2601	spin_unlock(ptl);
2602	i_mmap_unlock_write(vma->vm_file->f_mapping);
2603
2604	return ret;
2605}
2606#else
2607#define pagemap_scan_hugetlb_entry NULL
2608#endif
2609
2610static int pagemap_scan_pte_hole(unsigned long addr, unsigned long end,
2611				 int depth, struct mm_walk *walk)
2612{
2613	struct pagemap_scan_private *p = walk->private;
2614	struct vm_area_struct *vma = walk->vma;
2615	int ret, err;
2616
2617	if (!vma || !pagemap_scan_is_interesting_page(p->cur_vma_category, p))
2618		return 0;
2619
2620	ret = pagemap_scan_output(p->cur_vma_category, p, addr, &end);
2621	if (addr == end)
2622		return ret;
2623
2624	if (~p->arg.flags & PM_SCAN_WP_MATCHING)
2625		return ret;
2626
2627	err = uffd_wp_range(vma, addr, end - addr, true);
2628	if (err < 0)
2629		ret = err;
2630
2631	return ret;
2632}
2633
2634static const struct mm_walk_ops pagemap_scan_ops = {
2635	.test_walk = pagemap_scan_test_walk,
2636	.pmd_entry = pagemap_scan_pmd_entry,
2637	.pte_hole = pagemap_scan_pte_hole,
2638	.hugetlb_entry = pagemap_scan_hugetlb_entry,
2639};
2640
2641static int pagemap_scan_get_args(struct pm_scan_arg *arg,
2642				 unsigned long uarg)
2643{
2644	if (copy_from_user(arg, (void __user *)uarg, sizeof(*arg)))
2645		return -EFAULT;
2646
2647	if (arg->size != sizeof(struct pm_scan_arg))
2648		return -EINVAL;
2649
2650	/* Validate requested features */
2651	if (arg->flags & ~PM_SCAN_FLAGS)
2652		return -EINVAL;
2653	if ((arg->category_inverted | arg->category_mask |
2654	     arg->category_anyof_mask | arg->return_mask) & ~PM_SCAN_CATEGORIES)
2655		return -EINVAL;
2656
2657	arg->start = untagged_addr((unsigned long)arg->start);
2658	arg->end = untagged_addr((unsigned long)arg->end);
2659	arg->vec = untagged_addr((unsigned long)arg->vec);
2660
2661	/* Validate memory pointers */
2662	if (!IS_ALIGNED(arg->start, PAGE_SIZE))
2663		return -EINVAL;
2664	if (!access_ok((void __user *)(long)arg->start, arg->end - arg->start))
2665		return -EFAULT;
2666	if (!arg->vec && arg->vec_len)
2667		return -EINVAL;
2668	if (UINT_MAX == SIZE_MAX && arg->vec_len > SIZE_MAX)
2669		return -EINVAL;
2670	if (arg->vec && !access_ok((void __user *)(long)arg->vec,
2671				   size_mul(arg->vec_len, sizeof(struct page_region))))
2672		return -EFAULT;
2673
2674	/* Fixup default values */
2675	arg->end = ALIGN(arg->end, PAGE_SIZE);
2676	arg->walk_end = 0;
2677	if (!arg->max_pages)
2678		arg->max_pages = ULONG_MAX;
2679
2680	return 0;
2681}
2682
2683static int pagemap_scan_writeback_args(struct pm_scan_arg *arg,
2684				       unsigned long uargl)
2685{
2686	struct pm_scan_arg __user *uarg	= (void __user *)uargl;
2687
2688	if (copy_to_user(&uarg->walk_end, &arg->walk_end, sizeof(arg->walk_end)))
2689		return -EFAULT;
2690
2691	return 0;
2692}
2693
2694static int pagemap_scan_init_bounce_buffer(struct pagemap_scan_private *p)
2695{
2696	if (!p->arg.vec_len)
2697		return 0;
2698
2699	p->vec_buf_len = min_t(size_t, PAGEMAP_WALK_SIZE >> PAGE_SHIFT,
2700			       p->arg.vec_len);
2701	p->vec_buf = kmalloc_array(p->vec_buf_len, sizeof(*p->vec_buf),
2702				   GFP_KERNEL);
2703	if (!p->vec_buf)
2704		return -ENOMEM;
2705
2706	p->vec_buf->start = p->vec_buf->end = 0;
2707	p->vec_out = (struct page_region __user *)(long)p->arg.vec;
2708
2709	return 0;
2710}
2711
2712static long pagemap_scan_flush_buffer(struct pagemap_scan_private *p)
2713{
2714	const struct page_region *buf = p->vec_buf;
2715	long n = p->vec_buf_index;
2716
2717	if (!p->vec_buf)
2718		return 0;
2719
2720	if (buf[n].end != buf[n].start)
2721		n++;
2722
2723	if (!n)
2724		return 0;
2725
2726	if (copy_to_user(p->vec_out, buf, n * sizeof(*buf)))
2727		return -EFAULT;
2728
2729	p->arg.vec_len -= n;
2730	p->vec_out += n;
2731
2732	p->vec_buf_index = 0;
2733	p->vec_buf_len = min_t(size_t, p->vec_buf_len, p->arg.vec_len);
2734	p->vec_buf->start = p->vec_buf->end = 0;
2735
2736	return n;
2737}
2738
2739static long do_pagemap_scan(struct mm_struct *mm, unsigned long uarg)
2740{
2741	struct pagemap_scan_private p = {0};
2742	unsigned long walk_start;
2743	size_t n_ranges_out = 0;
2744	int ret;
2745
2746	ret = pagemap_scan_get_args(&p.arg, uarg);
2747	if (ret)
2748		return ret;
2749
2750	p.masks_of_interest = p.arg.category_mask | p.arg.category_anyof_mask |
2751			      p.arg.return_mask;
2752	ret = pagemap_scan_init_bounce_buffer(&p);
2753	if (ret)
2754		return ret;
2755
2756	for (walk_start = p.arg.start; walk_start < p.arg.end;
2757			walk_start = p.arg.walk_end) {
2758		struct mmu_notifier_range range;
2759		long n_out;
2760
2761		if (fatal_signal_pending(current)) {
2762			ret = -EINTR;
2763			break;
2764		}
2765
2766		ret = mmap_read_lock_killable(mm);
2767		if (ret)
2768			break;
2769
2770		/* Protection change for the range is going to happen. */
2771		if (p.arg.flags & PM_SCAN_WP_MATCHING) {
2772			mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_VMA, 0,
2773						mm, walk_start, p.arg.end);
2774			mmu_notifier_invalidate_range_start(&range);
2775		}
2776
2777		ret = walk_page_range(mm, walk_start, p.arg.end,
2778				      &pagemap_scan_ops, &p);
2779
2780		if (p.arg.flags & PM_SCAN_WP_MATCHING)
2781			mmu_notifier_invalidate_range_end(&range);
2782
2783		mmap_read_unlock(mm);
2784
2785		n_out = pagemap_scan_flush_buffer(&p);
2786		if (n_out < 0)
2787			ret = n_out;
2788		else
2789			n_ranges_out += n_out;
2790
2791		if (ret != -ENOSPC)
2792			break;
2793
2794		if (p.arg.vec_len == 0 || p.found_pages == p.arg.max_pages)
2795			break;
2796	}
2797
2798	/* ENOSPC signifies early stop (buffer full) from the walk. */
2799	if (!ret || ret == -ENOSPC)
2800		ret = n_ranges_out;
2801
2802	/* The walk_end isn't set when ret is zero */
2803	if (!p.arg.walk_end)
2804		p.arg.walk_end = p.arg.end;
2805	if (pagemap_scan_writeback_args(&p.arg, uarg))
2806		ret = -EFAULT;
2807
2808	kfree(p.vec_buf);
2809	return ret;
2810}
2811
2812static long do_pagemap_cmd(struct file *file, unsigned int cmd,
2813			   unsigned long arg)
2814{
2815	struct mm_struct *mm = file->private_data;
2816
2817	switch (cmd) {
2818	case PAGEMAP_SCAN:
2819		return do_pagemap_scan(mm, arg);
2820
2821	default:
2822		return -EINVAL;
2823	}
2824}
2825
2826const struct file_operations proc_pagemap_operations = {
2827	.llseek		= mem_lseek, /* borrow this */
2828	.read		= pagemap_read,
2829	.open		= pagemap_open,
2830	.release	= pagemap_release,
2831	.unlocked_ioctl = do_pagemap_cmd,
2832	.compat_ioctl	= do_pagemap_cmd,
2833};
2834#endif /* CONFIG_PROC_PAGE_MONITOR */
2835
2836#ifdef CONFIG_NUMA
2837
2838struct numa_maps {
2839	unsigned long pages;
2840	unsigned long anon;
2841	unsigned long active;
2842	unsigned long writeback;
2843	unsigned long mapcount_max;
2844	unsigned long dirty;
2845	unsigned long swapcache;
2846	unsigned long node[MAX_NUMNODES];
2847};
2848
2849struct numa_maps_private {
2850	struct proc_maps_private proc_maps;
2851	struct numa_maps md;
2852};
2853
2854static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty,
2855			unsigned long nr_pages)
2856{
2857	struct folio *folio = page_folio(page);
2858	int count = folio_precise_page_mapcount(folio, page);
2859
2860	md->pages += nr_pages;
2861	if (pte_dirty || folio_test_dirty(folio))
2862		md->dirty += nr_pages;
2863
2864	if (folio_test_swapcache(folio))
2865		md->swapcache += nr_pages;
2866
2867	if (folio_test_active(folio) || folio_test_unevictable(folio))
2868		md->active += nr_pages;
2869
2870	if (folio_test_writeback(folio))
2871		md->writeback += nr_pages;
2872
2873	if (folio_test_anon(folio))
2874		md->anon += nr_pages;
2875
2876	if (count > md->mapcount_max)
2877		md->mapcount_max = count;
2878
2879	md->node[folio_nid(folio)] += nr_pages;
2880}
2881
2882static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma,
2883		unsigned long addr)
2884{
2885	struct page *page;
2886	int nid;
2887
2888	if (!pte_present(pte))
2889		return NULL;
2890
2891	page = vm_normal_page(vma, addr, pte);
2892	if (!page || is_zone_device_page(page))
2893		return NULL;
2894
2895	if (PageReserved(page))
2896		return NULL;
2897
2898	nid = page_to_nid(page);
2899	if (!node_isset(nid, node_states[N_MEMORY]))
2900		return NULL;
2901
2902	return page;
2903}
2904
2905#ifdef CONFIG_TRANSPARENT_HUGEPAGE
2906static struct page *can_gather_numa_stats_pmd(pmd_t pmd,
2907					      struct vm_area_struct *vma,
2908					      unsigned long addr)
2909{
2910	struct page *page;
2911	int nid;
2912
2913	if (!pmd_present(pmd))
2914		return NULL;
2915
2916	page = vm_normal_page_pmd(vma, addr, pmd);
2917	if (!page)
2918		return NULL;
2919
2920	if (PageReserved(page))
2921		return NULL;
2922
2923	nid = page_to_nid(page);
2924	if (!node_isset(nid, node_states[N_MEMORY]))
2925		return NULL;
2926
2927	return page;
2928}
2929#endif
2930
2931static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
2932		unsigned long end, struct mm_walk *walk)
2933{
2934	struct numa_maps *md = walk->private;
2935	struct vm_area_struct *vma = walk->vma;
2936	spinlock_t *ptl;
2937	pte_t *orig_pte;
2938	pte_t *pte;
2939
2940#ifdef CONFIG_TRANSPARENT_HUGEPAGE
2941	ptl = pmd_trans_huge_lock(pmd, vma);
2942	if (ptl) {
2943		struct page *page;
2944
2945		page = can_gather_numa_stats_pmd(*pmd, vma, addr);
2946		if (page)
2947			gather_stats(page, md, pmd_dirty(*pmd),
2948				     HPAGE_PMD_SIZE/PAGE_SIZE);
2949		spin_unlock(ptl);
2950		return 0;
2951	}
 
 
 
2952#endif
2953	orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
2954	if (!pte) {
2955		walk->action = ACTION_AGAIN;
2956		return 0;
2957	}
2958	do {
2959		pte_t ptent = ptep_get(pte);
2960		struct page *page = can_gather_numa_stats(ptent, vma, addr);
2961		if (!page)
2962			continue;
2963		gather_stats(page, md, pte_dirty(ptent), 1);
2964
2965	} while (pte++, addr += PAGE_SIZE, addr != end);
2966	pte_unmap_unlock(orig_pte, ptl);
2967	cond_resched();
2968	return 0;
2969}
2970#ifdef CONFIG_HUGETLB_PAGE
2971static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
2972		unsigned long addr, unsigned long end, struct mm_walk *walk)
2973{
2974	pte_t huge_pte = huge_ptep_get(walk->mm, addr, pte);
2975	struct numa_maps *md;
2976	struct page *page;
2977
2978	if (!pte_present(huge_pte))
2979		return 0;
2980
2981	page = pte_page(huge_pte);
2982
2983	md = walk->private;
2984	gather_stats(page, md, pte_dirty(huge_pte), 1);
2985	return 0;
2986}
2987
2988#else
2989static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
2990		unsigned long addr, unsigned long end, struct mm_walk *walk)
2991{
2992	return 0;
2993}
2994#endif
2995
2996static const struct mm_walk_ops show_numa_ops = {
2997	.hugetlb_entry = gather_hugetlb_stats,
2998	.pmd_entry = gather_pte_stats,
2999	.walk_lock = PGWALK_RDLOCK,
3000};
3001
3002/*
3003 * Display pages allocated per node and memory policy via /proc.
3004 */
3005static int show_numa_map(struct seq_file *m, void *v)
3006{
3007	struct numa_maps_private *numa_priv = m->private;
3008	struct proc_maps_private *proc_priv = &numa_priv->proc_maps;
3009	struct vm_area_struct *vma = v;
3010	struct numa_maps *md = &numa_priv->md;
3011	struct file *file = vma->vm_file;
3012	struct mm_struct *mm = vma->vm_mm;
 
3013	char buffer[64];
3014	struct mempolicy *pol;
3015	pgoff_t ilx;
3016	int nid;
3017
3018	if (!mm)
3019		return 0;
3020
3021	/* Ensure we start with an empty set of numa_maps statistics. */
3022	memset(md, 0, sizeof(*md));
3023
3024	pol = __get_vma_policy(vma, vma->vm_start, &ilx);
3025	if (pol) {
3026		mpol_to_str(buffer, sizeof(buffer), pol);
3027		mpol_cond_put(pol);
3028	} else {
3029		mpol_to_str(buffer, sizeof(buffer), proc_priv->task_mempolicy);
3030	}
3031
3032	seq_printf(m, "%08lx %s", vma->vm_start, buffer);
3033
3034	if (file) {
3035		seq_puts(m, " file=");
3036		seq_path(m, file_user_path(file), "\n\t= ");
3037	} else if (vma_is_initial_heap(vma)) {
3038		seq_puts(m, " heap");
3039	} else if (vma_is_initial_stack(vma)) {
3040		seq_puts(m, " stack");
3041	}
3042
3043	if (is_vm_hugetlb_page(vma))
3044		seq_puts(m, " huge");
3045
3046	/* mmap_lock is held by m_start */
3047	walk_page_vma(vma, &show_numa_ops, md);
3048
3049	if (!md->pages)
3050		goto out;
3051
3052	if (md->anon)
3053		seq_printf(m, " anon=%lu", md->anon);
3054
3055	if (md->dirty)
3056		seq_printf(m, " dirty=%lu", md->dirty);
3057
3058	if (md->pages != md->anon && md->pages != md->dirty)
3059		seq_printf(m, " mapped=%lu", md->pages);
3060
3061	if (md->mapcount_max > 1)
3062		seq_printf(m, " mapmax=%lu", md->mapcount_max);
3063
3064	if (md->swapcache)
3065		seq_printf(m, " swapcache=%lu", md->swapcache);
3066
3067	if (md->active < md->pages && !is_vm_hugetlb_page(vma))
3068		seq_printf(m, " active=%lu", md->active);
3069
3070	if (md->writeback)
3071		seq_printf(m, " writeback=%lu", md->writeback);
3072
3073	for_each_node_state(nid, N_MEMORY)
3074		if (md->node[nid])
3075			seq_printf(m, " N%d=%lu", nid, md->node[nid]);
3076
3077	seq_printf(m, " kernelpagesize_kB=%lu", vma_kernel_pagesize(vma) >> 10);
3078out:
3079	seq_putc(m, '\n');
3080	return 0;
3081}
3082
3083static const struct seq_operations proc_pid_numa_maps_op = {
3084	.start  = m_start,
3085	.next   = m_next,
3086	.stop   = m_stop,
3087	.show   = show_numa_map,
3088};
3089
3090static int pid_numa_maps_open(struct inode *inode, struct file *file)
3091{
3092	return proc_maps_open(inode, file, &proc_pid_numa_maps_op,
3093				sizeof(struct numa_maps_private));
3094}
3095
3096const struct file_operations proc_pid_numa_maps_operations = {
3097	.open		= pid_numa_maps_open,
3098	.read		= seq_read,
3099	.llseek		= seq_lseek,
3100	.release	= proc_map_release,
3101};
3102
3103#endif /* CONFIG_NUMA */