Linux Audio

Check our new training course

Loading...
v5.9
   1// SPDX-License-Identifier: GPL-2.0
   2#include <linux/pagewalk.h>
   3#include <linux/vmacache.h>
   4#include <linux/hugetlb.h>
   5#include <linux/huge_mm.h>
   6#include <linux/mount.h>
 
   7#include <linux/seq_file.h>
   8#include <linux/highmem.h>
   9#include <linux/ptrace.h>
  10#include <linux/slab.h>
  11#include <linux/pagemap.h>
  12#include <linux/mempolicy.h>
  13#include <linux/rmap.h>
  14#include <linux/swap.h>
  15#include <linux/sched/mm.h>
  16#include <linux/swapops.h>
  17#include <linux/mmu_notifier.h>
  18#include <linux/page_idle.h>
  19#include <linux/shmem_fs.h>
  20#include <linux/uaccess.h>
  21#include <linux/pkeys.h>
 
 
  22
  23#include <asm/elf.h>
  24#include <asm/tlb.h>
  25#include <asm/tlbflush.h>
  26#include "internal.h"
  27
  28#define SEQ_PUT_DEC(str, val) \
  29		seq_put_decimal_ull_width(m, str, (val) << (PAGE_SHIFT-10), 8)
  30void task_mem(struct seq_file *m, struct mm_struct *mm)
  31{
  32	unsigned long text, lib, swap, anon, file, shmem;
  33	unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;
  34
  35	anon = get_mm_counter(mm, MM_ANONPAGES);
  36	file = get_mm_counter(mm, MM_FILEPAGES);
  37	shmem = get_mm_counter(mm, MM_SHMEMPAGES);
  38
  39	/*
  40	 * Note: to minimize their overhead, mm maintains hiwater_vm and
  41	 * hiwater_rss only when about to *lower* total_vm or rss.  Any
  42	 * collector of these hiwater stats must therefore get total_vm
  43	 * and rss too, which will usually be the higher.  Barriers? not
  44	 * worth the effort, such snapshots can always be inconsistent.
  45	 */
  46	hiwater_vm = total_vm = mm->total_vm;
  47	if (hiwater_vm < mm->hiwater_vm)
  48		hiwater_vm = mm->hiwater_vm;
  49	hiwater_rss = total_rss = anon + file + shmem;
  50	if (hiwater_rss < mm->hiwater_rss)
  51		hiwater_rss = mm->hiwater_rss;
  52
  53	/* split executable areas between text and lib */
  54	text = PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK);
  55	text = min(text, mm->exec_vm << PAGE_SHIFT);
  56	lib = (mm->exec_vm << PAGE_SHIFT) - text;
  57
  58	swap = get_mm_counter(mm, MM_SWAPENTS);
  59	SEQ_PUT_DEC("VmPeak:\t", hiwater_vm);
  60	SEQ_PUT_DEC(" kB\nVmSize:\t", total_vm);
  61	SEQ_PUT_DEC(" kB\nVmLck:\t", mm->locked_vm);
  62	SEQ_PUT_DEC(" kB\nVmPin:\t", atomic64_read(&mm->pinned_vm));
  63	SEQ_PUT_DEC(" kB\nVmHWM:\t", hiwater_rss);
  64	SEQ_PUT_DEC(" kB\nVmRSS:\t", total_rss);
  65	SEQ_PUT_DEC(" kB\nRssAnon:\t", anon);
  66	SEQ_PUT_DEC(" kB\nRssFile:\t", file);
  67	SEQ_PUT_DEC(" kB\nRssShmem:\t", shmem);
  68	SEQ_PUT_DEC(" kB\nVmData:\t", mm->data_vm);
  69	SEQ_PUT_DEC(" kB\nVmStk:\t", mm->stack_vm);
  70	seq_put_decimal_ull_width(m,
  71		    " kB\nVmExe:\t", text >> 10, 8);
  72	seq_put_decimal_ull_width(m,
  73		    " kB\nVmLib:\t", lib >> 10, 8);
  74	seq_put_decimal_ull_width(m,
  75		    " kB\nVmPTE:\t", mm_pgtables_bytes(mm) >> 10, 8);
  76	SEQ_PUT_DEC(" kB\nVmSwap:\t", swap);
  77	seq_puts(m, " kB\n");
  78	hugetlb_report_usage(m, mm);
  79}
  80#undef SEQ_PUT_DEC
  81
  82unsigned long task_vsize(struct mm_struct *mm)
  83{
  84	return PAGE_SIZE * mm->total_vm;
  85}
  86
  87unsigned long task_statm(struct mm_struct *mm,
  88			 unsigned long *shared, unsigned long *text,
  89			 unsigned long *data, unsigned long *resident)
  90{
  91	*shared = get_mm_counter(mm, MM_FILEPAGES) +
  92			get_mm_counter(mm, MM_SHMEMPAGES);
  93	*text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
  94								>> PAGE_SHIFT;
  95	*data = mm->data_vm + mm->stack_vm;
  96	*resident = *shared + get_mm_counter(mm, MM_ANONPAGES);
  97	return mm->total_vm;
  98}
  99
 100#ifdef CONFIG_NUMA
 101/*
 102 * Save get_task_policy() for show_numa_map().
 103 */
 104static void hold_task_mempolicy(struct proc_maps_private *priv)
 105{
 106	struct task_struct *task = priv->task;
 107
 108	task_lock(task);
 109	priv->task_mempolicy = get_task_policy(task);
 110	mpol_get(priv->task_mempolicy);
 111	task_unlock(task);
 112}
 113static void release_task_mempolicy(struct proc_maps_private *priv)
 114{
 115	mpol_put(priv->task_mempolicy);
 116}
 117#else
 118static void hold_task_mempolicy(struct proc_maps_private *priv)
 119{
 120}
 121static void release_task_mempolicy(struct proc_maps_private *priv)
 122{
 123}
 124#endif
 125
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 126static void *m_start(struct seq_file *m, loff_t *ppos)
 127{
 128	struct proc_maps_private *priv = m->private;
 129	unsigned long last_addr = *ppos;
 130	struct mm_struct *mm;
 131	struct vm_area_struct *vma;
 132
 133	/* See m_next(). Zero at the start or after lseek. */
 134	if (last_addr == -1UL)
 135		return NULL;
 136
 137	priv->task = get_proc_task(priv->inode);
 138	if (!priv->task)
 139		return ERR_PTR(-ESRCH);
 140
 141	mm = priv->mm;
 142	if (!mm || !mmget_not_zero(mm)) {
 143		put_task_struct(priv->task);
 144		priv->task = NULL;
 145		return NULL;
 146	}
 147
 148	if (mmap_read_lock_killable(mm)) {
 149		mmput(mm);
 150		put_task_struct(priv->task);
 151		priv->task = NULL;
 152		return ERR_PTR(-EINTR);
 153	}
 154
 
 155	hold_task_mempolicy(priv);
 156	priv->tail_vma = get_gate_vma(mm);
 
 157
 158	vma = find_vma(mm, last_addr);
 159	if (vma)
 160		return vma;
 161
 162	return priv->tail_vma;
 163}
 164
 165static void *m_next(struct seq_file *m, void *v, loff_t *ppos)
 166{
 167	struct proc_maps_private *priv = m->private;
 168	struct vm_area_struct *next, *vma = v;
 169
 170	if (vma == priv->tail_vma)
 171		next = NULL;
 172	else if (vma->vm_next)
 173		next = vma->vm_next;
 174	else
 175		next = priv->tail_vma;
 176
 177	*ppos = next ? next->vm_start : -1UL;
 178
 179	return next;
 180}
 181
 182static void m_stop(struct seq_file *m, void *v)
 183{
 184	struct proc_maps_private *priv = m->private;
 185	struct mm_struct *mm = priv->mm;
 186
 187	if (!priv->task)
 188		return;
 189
 190	release_task_mempolicy(priv);
 191	mmap_read_unlock(mm);
 192	mmput(mm);
 193	put_task_struct(priv->task);
 194	priv->task = NULL;
 195}
 196
 197static int proc_maps_open(struct inode *inode, struct file *file,
 198			const struct seq_operations *ops, int psize)
 199{
 200	struct proc_maps_private *priv = __seq_open_private(file, ops, psize);
 201
 202	if (!priv)
 203		return -ENOMEM;
 204
 205	priv->inode = inode;
 206	priv->mm = proc_mem_open(inode, PTRACE_MODE_READ);
 207	if (IS_ERR(priv->mm)) {
 208		int err = PTR_ERR(priv->mm);
 209
 210		seq_release_private(inode, file);
 211		return err;
 212	}
 213
 214	return 0;
 215}
 216
 217static int proc_map_release(struct inode *inode, struct file *file)
 218{
 219	struct seq_file *seq = file->private_data;
 220	struct proc_maps_private *priv = seq->private;
 221
 222	if (priv->mm)
 223		mmdrop(priv->mm);
 224
 225	return seq_release_private(inode, file);
 226}
 227
 228static int do_maps_open(struct inode *inode, struct file *file,
 229			const struct seq_operations *ops)
 230{
 231	return proc_maps_open(inode, file, ops,
 232				sizeof(struct proc_maps_private));
 233}
 234
 235/*
 236 * Indicate if the VMA is a stack for the given task; for
 237 * /proc/PID/maps that is the stack of the main task.
 238 */
 239static int is_stack(struct vm_area_struct *vma)
 240{
 241	/*
 242	 * We make no effort to guess what a given thread considers to be
 243	 * its "stack".  It's not even well-defined for programs written
 244	 * languages like Go.
 245	 */
 246	return vma->vm_start <= vma->vm_mm->start_stack &&
 247		vma->vm_end >= vma->vm_mm->start_stack;
 248}
 249
 250static void show_vma_header_prefix(struct seq_file *m,
 251				   unsigned long start, unsigned long end,
 252				   vm_flags_t flags, unsigned long long pgoff,
 253				   dev_t dev, unsigned long ino)
 254{
 255	seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
 256	seq_put_hex_ll(m, NULL, start, 8);
 257	seq_put_hex_ll(m, "-", end, 8);
 258	seq_putc(m, ' ');
 259	seq_putc(m, flags & VM_READ ? 'r' : '-');
 260	seq_putc(m, flags & VM_WRITE ? 'w' : '-');
 261	seq_putc(m, flags & VM_EXEC ? 'x' : '-');
 262	seq_putc(m, flags & VM_MAYSHARE ? 's' : 'p');
 263	seq_put_hex_ll(m, " ", pgoff, 8);
 264	seq_put_hex_ll(m, " ", MAJOR(dev), 2);
 265	seq_put_hex_ll(m, ":", MINOR(dev), 2);
 266	seq_put_decimal_ull(m, " ", ino);
 267	seq_putc(m, ' ');
 268}
 269
 270static void
 271show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
 272{
 
 273	struct mm_struct *mm = vma->vm_mm;
 274	struct file *file = vma->vm_file;
 275	vm_flags_t flags = vma->vm_flags;
 276	unsigned long ino = 0;
 277	unsigned long long pgoff = 0;
 278	unsigned long start, end;
 279	dev_t dev = 0;
 280	const char *name = NULL;
 281
 282	if (file) {
 283		struct inode *inode = file_inode(vma->vm_file);
 
 284		dev = inode->i_sb->s_dev;
 285		ino = inode->i_ino;
 286		pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
 287	}
 288
 289	start = vma->vm_start;
 290	end = vma->vm_end;
 291	show_vma_header_prefix(m, start, end, flags, pgoff, dev, ino);
 
 
 292
 293	/*
 294	 * Print the dentry name for named mappings, and a
 295	 * special [heap] marker for the heap:
 296	 */
 297	if (file) {
 298		seq_pad(m, ' ');
 299		seq_file_path(m, file, "\n");
 
 
 
 
 
 
 
 300		goto done;
 301	}
 302
 303	if (vma->vm_ops && vma->vm_ops->name) {
 304		name = vma->vm_ops->name(vma);
 305		if (name)
 306			goto done;
 307	}
 308
 309	name = arch_vma_name(vma);
 310	if (!name) {
 311		if (!mm) {
 312			name = "[vdso]";
 313			goto done;
 314		}
 315
 316		if (vma->vm_start <= mm->brk &&
 317		    vma->vm_end >= mm->start_brk) {
 318			name = "[heap]";
 319			goto done;
 320		}
 321
 322		if (is_stack(vma))
 323			name = "[stack]";
 
 
 
 
 
 
 
 324	}
 325
 326done:
 327	if (name) {
 328		seq_pad(m, ' ');
 329		seq_puts(m, name);
 330	}
 331	seq_putc(m, '\n');
 332}
 333
 334static int show_map(struct seq_file *m, void *v)
 335{
 336	show_map_vma(m, v);
 337	return 0;
 338}
 339
 340static const struct seq_operations proc_pid_maps_op = {
 341	.start	= m_start,
 342	.next	= m_next,
 343	.stop	= m_stop,
 344	.show	= show_map
 345};
 346
 347static int pid_maps_open(struct inode *inode, struct file *file)
 348{
 349	return do_maps_open(inode, file, &proc_pid_maps_op);
 350}
 351
 352const struct file_operations proc_pid_maps_operations = {
 353	.open		= pid_maps_open,
 354	.read		= seq_read,
 355	.llseek		= seq_lseek,
 356	.release	= proc_map_release,
 357};
 358
 359/*
 360 * Proportional Set Size(PSS): my share of RSS.
 361 *
 362 * PSS of a process is the count of pages it has in memory, where each
 363 * page is divided by the number of processes sharing it.  So if a
 364 * process has 1000 pages all to itself, and 1000 shared with one other
 365 * process, its PSS will be 1500.
 366 *
 367 * To keep (accumulated) division errors low, we adopt a 64bit
 368 * fixed-point pss counter to minimize division errors. So (pss >>
 369 * PSS_SHIFT) would be the real byte count.
 370 *
 371 * A shift of 12 before division means (assuming 4K page size):
 372 * 	- 1M 3-user-pages add up to 8KB errors;
 373 * 	- supports mapcount up to 2^24, or 16M;
 374 * 	- supports PSS up to 2^52 bytes, or 4PB.
 375 */
 376#define PSS_SHIFT 12
 377
 378#ifdef CONFIG_PROC_PAGE_MONITOR
 379struct mem_size_stats {
 380	unsigned long resident;
 381	unsigned long shared_clean;
 382	unsigned long shared_dirty;
 383	unsigned long private_clean;
 384	unsigned long private_dirty;
 385	unsigned long referenced;
 386	unsigned long anonymous;
 387	unsigned long lazyfree;
 388	unsigned long anonymous_thp;
 389	unsigned long shmem_thp;
 390	unsigned long file_thp;
 391	unsigned long swap;
 392	unsigned long shared_hugetlb;
 393	unsigned long private_hugetlb;
 
 394	u64 pss;
 395	u64 pss_anon;
 396	u64 pss_file;
 397	u64 pss_shmem;
 
 398	u64 pss_locked;
 399	u64 swap_pss;
 400	bool check_shmem_swap;
 401};
 402
 403static void smaps_page_accumulate(struct mem_size_stats *mss,
 404		struct page *page, unsigned long size, unsigned long pss,
 405		bool dirty, bool locked, bool private)
 406{
 407	mss->pss += pss;
 408
 409	if (PageAnon(page))
 410		mss->pss_anon += pss;
 411	else if (PageSwapBacked(page))
 412		mss->pss_shmem += pss;
 413	else
 414		mss->pss_file += pss;
 415
 416	if (locked)
 417		mss->pss_locked += pss;
 418
 419	if (dirty || PageDirty(page)) {
 
 420		if (private)
 421			mss->private_dirty += size;
 422		else
 423			mss->shared_dirty += size;
 424	} else {
 425		if (private)
 426			mss->private_clean += size;
 427		else
 428			mss->shared_clean += size;
 429	}
 430}
 431
 432static void smaps_account(struct mem_size_stats *mss, struct page *page,
 433		bool compound, bool young, bool dirty, bool locked)
 
 434{
 435	int i, nr = compound ? compound_nr(page) : 1;
 436	unsigned long size = nr * PAGE_SIZE;
 437
 438	/*
 439	 * First accumulate quantities that depend only on |size| and the type
 440	 * of the compound page.
 441	 */
 442	if (PageAnon(page)) {
 443		mss->anonymous += size;
 444		if (!PageSwapBacked(page) && !dirty && !PageDirty(page))
 445			mss->lazyfree += size;
 446	}
 447
 
 
 
 448	mss->resident += size;
 449	/* Accumulate the size in pages that have been accessed. */
 450	if (young || page_is_young(page) || PageReferenced(page))
 451		mss->referenced += size;
 452
 453	/*
 454	 * Then accumulate quantities that may depend on sharing, or that may
 455	 * differ page-by-page.
 456	 *
 457	 * page_count(page) == 1 guarantees the page is mapped exactly once.
 458	 * If any subpage of the compound page mapped with PTE it would elevate
 459	 * page_count().
 
 
 
 
 
 
 
 460	 */
 461	if (page_count(page) == 1) {
 462		smaps_page_accumulate(mss, page, size, size << PSS_SHIFT, dirty,
 463			locked, true);
 464		return;
 465	}
 466	for (i = 0; i < nr; i++, page++) {
 467		int mapcount = page_mapcount(page);
 468		unsigned long pss = PAGE_SIZE << PSS_SHIFT;
 469		if (mapcount >= 2)
 470			pss /= mapcount;
 471		smaps_page_accumulate(mss, page, PAGE_SIZE, pss, dirty, locked,
 472				      mapcount < 2);
 473	}
 474}
 475
 476#ifdef CONFIG_SHMEM
 477static int smaps_pte_hole(unsigned long addr, unsigned long end,
 478			  __always_unused int depth, struct mm_walk *walk)
 479{
 480	struct mem_size_stats *mss = walk->private;
 
 481
 482	mss->swap += shmem_partial_swap_usage(
 483			walk->vma->vm_file->f_mapping, addr, end);
 
 484
 485	return 0;
 486}
 487#else
 488#define smaps_pte_hole		NULL
 489#endif /* CONFIG_SHMEM */
 490
 
 
 
 
 
 
 
 
 
 
 491static void smaps_pte_entry(pte_t *pte, unsigned long addr,
 492		struct mm_walk *walk)
 493{
 494	struct mem_size_stats *mss = walk->private;
 495	struct vm_area_struct *vma = walk->vma;
 496	bool locked = !!(vma->vm_flags & VM_LOCKED);
 497	struct page *page = NULL;
 
 
 498
 499	if (pte_present(*pte)) {
 500		page = vm_normal_page(vma, addr, *pte);
 501	} else if (is_swap_pte(*pte)) {
 502		swp_entry_t swpent = pte_to_swp_entry(*pte);
 
 
 503
 504		if (!non_swap_entry(swpent)) {
 505			int mapcount;
 506
 507			mss->swap += PAGE_SIZE;
 508			mapcount = swp_swapcount(swpent);
 509			if (mapcount >= 2) {
 510				u64 pss_delta = (u64)PAGE_SIZE << PSS_SHIFT;
 511
 512				do_div(pss_delta, mapcount);
 513				mss->swap_pss += pss_delta;
 514			} else {
 515				mss->swap_pss += (u64)PAGE_SIZE << PSS_SHIFT;
 516			}
 517		} else if (is_migration_entry(swpent))
 518			page = migration_entry_to_page(swpent);
 519		else if (is_device_private_entry(swpent))
 520			page = device_private_entry_to_page(swpent);
 521	} else if (unlikely(IS_ENABLED(CONFIG_SHMEM) && mss->check_shmem_swap
 522							&& pte_none(*pte))) {
 523		page = find_get_entry(vma->vm_file->f_mapping,
 524						linear_page_index(vma, addr));
 525		if (!page)
 526			return;
 527
 528		if (xa_is_value(page))
 529			mss->swap += PAGE_SIZE;
 530		else
 531			put_page(page);
 532
 533		return;
 534	}
 535
 536	if (!page)
 537		return;
 538
 539	smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte), locked);
 540}
 541
 542#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 543static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
 544		struct mm_walk *walk)
 545{
 546	struct mem_size_stats *mss = walk->private;
 547	struct vm_area_struct *vma = walk->vma;
 548	bool locked = !!(vma->vm_flags & VM_LOCKED);
 549	struct page *page = NULL;
 
 550
 551	if (pmd_present(*pmd)) {
 552		/* FOLL_DUMP will return -EFAULT on huge zero page */
 553		page = follow_trans_huge_pmd(vma, addr, pmd, FOLL_DUMP);
 554	} else if (unlikely(thp_migration_supported() && is_swap_pmd(*pmd))) {
 555		swp_entry_t entry = pmd_to_swp_entry(*pmd);
 556
 557		if (is_migration_entry(entry))
 558			page = migration_entry_to_page(entry);
 
 
 559	}
 560	if (IS_ERR_OR_NULL(page))
 561		return;
 562	if (PageAnon(page))
 563		mss->anonymous_thp += HPAGE_PMD_SIZE;
 564	else if (PageSwapBacked(page))
 565		mss->shmem_thp += HPAGE_PMD_SIZE;
 566	else if (is_zone_device_page(page))
 567		/* pass */;
 568	else
 569		mss->file_thp += HPAGE_PMD_SIZE;
 570	smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd), locked);
 
 
 571}
 572#else
 573static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
 574		struct mm_walk *walk)
 575{
 576}
 577#endif
 578
 579static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
 580			   struct mm_walk *walk)
 581{
 582	struct vm_area_struct *vma = walk->vma;
 583	pte_t *pte;
 584	spinlock_t *ptl;
 585
 586	ptl = pmd_trans_huge_lock(pmd, vma);
 587	if (ptl) {
 588		smaps_pmd_entry(pmd, addr, walk);
 589		spin_unlock(ptl);
 590		goto out;
 591	}
 592
 593	if (pmd_trans_unstable(pmd))
 594		goto out;
 595	/*
 596	 * The mmap_lock held all the way back in m_start() is what
 597	 * keeps khugepaged out of here and from collapsing things
 598	 * in here.
 599	 */
 600	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
 
 
 
 
 601	for (; addr != end; pte++, addr += PAGE_SIZE)
 602		smaps_pte_entry(pte, addr, walk);
 603	pte_unmap_unlock(pte - 1, ptl);
 604out:
 605	cond_resched();
 606	return 0;
 607}
 608
 609static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma)
 610{
 611	/*
 612	 * Don't forget to update Documentation/ on changes.
 613	 */
 614	static const char mnemonics[BITS_PER_LONG][2] = {
 615		/*
 616		 * In case if we meet a flag we don't know about.
 617		 */
 618		[0 ... (BITS_PER_LONG-1)] = "??",
 619
 620		[ilog2(VM_READ)]	= "rd",
 621		[ilog2(VM_WRITE)]	= "wr",
 622		[ilog2(VM_EXEC)]	= "ex",
 623		[ilog2(VM_SHARED)]	= "sh",
 624		[ilog2(VM_MAYREAD)]	= "mr",
 625		[ilog2(VM_MAYWRITE)]	= "mw",
 626		[ilog2(VM_MAYEXEC)]	= "me",
 627		[ilog2(VM_MAYSHARE)]	= "ms",
 628		[ilog2(VM_GROWSDOWN)]	= "gd",
 629		[ilog2(VM_PFNMAP)]	= "pf",
 630		[ilog2(VM_DENYWRITE)]	= "dw",
 631		[ilog2(VM_LOCKED)]	= "lo",
 632		[ilog2(VM_IO)]		= "io",
 633		[ilog2(VM_SEQ_READ)]	= "sr",
 634		[ilog2(VM_RAND_READ)]	= "rr",
 635		[ilog2(VM_DONTCOPY)]	= "dc",
 636		[ilog2(VM_DONTEXPAND)]	= "de",
 
 637		[ilog2(VM_ACCOUNT)]	= "ac",
 638		[ilog2(VM_NORESERVE)]	= "nr",
 639		[ilog2(VM_HUGETLB)]	= "ht",
 640		[ilog2(VM_SYNC)]	= "sf",
 641		[ilog2(VM_ARCH_1)]	= "ar",
 642		[ilog2(VM_WIPEONFORK)]	= "wf",
 643		[ilog2(VM_DONTDUMP)]	= "dd",
 644#ifdef CONFIG_ARM64_BTI
 645		[ilog2(VM_ARM64_BTI)]	= "bt",
 646#endif
 647#ifdef CONFIG_MEM_SOFT_DIRTY
 648		[ilog2(VM_SOFTDIRTY)]	= "sd",
 649#endif
 650		[ilog2(VM_MIXEDMAP)]	= "mm",
 651		[ilog2(VM_HUGEPAGE)]	= "hg",
 652		[ilog2(VM_NOHUGEPAGE)]	= "nh",
 653		[ilog2(VM_MERGEABLE)]	= "mg",
 654		[ilog2(VM_UFFD_MISSING)]= "um",
 655		[ilog2(VM_UFFD_WP)]	= "uw",
 
 
 
 
 656#ifdef CONFIG_ARCH_HAS_PKEYS
 657		/* These come out via ProtectionKey: */
 658		[ilog2(VM_PKEY_BIT0)]	= "",
 659		[ilog2(VM_PKEY_BIT1)]	= "",
 660		[ilog2(VM_PKEY_BIT2)]	= "",
 661		[ilog2(VM_PKEY_BIT3)]	= "",
 662#if VM_PKEY_BIT4
 663		[ilog2(VM_PKEY_BIT4)]	= "",
 664#endif
 665#endif /* CONFIG_ARCH_HAS_PKEYS */
 
 
 
 
 
 
 666	};
 667	size_t i;
 668
 669	seq_puts(m, "VmFlags: ");
 670	for (i = 0; i < BITS_PER_LONG; i++) {
 671		if (!mnemonics[i][0])
 672			continue;
 673		if (vma->vm_flags & (1UL << i)) {
 674			seq_putc(m, mnemonics[i][0]);
 675			seq_putc(m, mnemonics[i][1]);
 676			seq_putc(m, ' ');
 677		}
 678	}
 679	seq_putc(m, '\n');
 680}
 681
 682#ifdef CONFIG_HUGETLB_PAGE
 683static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask,
 684				 unsigned long addr, unsigned long end,
 685				 struct mm_walk *walk)
 686{
 687	struct mem_size_stats *mss = walk->private;
 688	struct vm_area_struct *vma = walk->vma;
 689	struct page *page = NULL;
 
 
 
 
 
 
 690
 691	if (pte_present(*pte)) {
 692		page = vm_normal_page(vma, addr, *pte);
 693	} else if (is_swap_pte(*pte)) {
 694		swp_entry_t swpent = pte_to_swp_entry(*pte);
 695
 696		if (is_migration_entry(swpent))
 697			page = migration_entry_to_page(swpent);
 698		else if (is_device_private_entry(swpent))
 699			page = device_private_entry_to_page(swpent);
 700	}
 701	if (page) {
 702		int mapcount = page_mapcount(page);
 703
 704		if (mapcount >= 2)
 705			mss->shared_hugetlb += huge_page_size(hstate_vma(vma));
 706		else
 707			mss->private_hugetlb += huge_page_size(hstate_vma(vma));
 708	}
 709	return 0;
 710}
 711#else
 712#define smaps_hugetlb_range	NULL
 713#endif /* HUGETLB_PAGE */
 714
 715static const struct mm_walk_ops smaps_walk_ops = {
 716	.pmd_entry		= smaps_pte_range,
 717	.hugetlb_entry		= smaps_hugetlb_range,
 
 718};
 719
 720static const struct mm_walk_ops smaps_shmem_walk_ops = {
 721	.pmd_entry		= smaps_pte_range,
 722	.hugetlb_entry		= smaps_hugetlb_range,
 723	.pte_hole		= smaps_pte_hole,
 
 724};
 725
 
 
 
 
 
 
 726static void smap_gather_stats(struct vm_area_struct *vma,
 727			     struct mem_size_stats *mss)
 728{
 729#ifdef CONFIG_SHMEM
 730	/* In case of smaps_rollup, reset the value from previous vma */
 731	mss->check_shmem_swap = false;
 
 
 
 732	if (vma->vm_file && shmem_mapping(vma->vm_file->f_mapping)) {
 733		/*
 734		 * For shared or readonly shmem mappings we know that all
 735		 * swapped out pages belong to the shmem object, and we can
 736		 * obtain the swap value much more efficiently. For private
 737		 * writable mappings, we might have COW pages that are
 738		 * not affected by the parent swapped out pages of the shmem
 739		 * object, so we have to distinguish them during the page walk.
 740		 * Unless we know that the shmem object (or the part mapped by
 741		 * our VMA) has no swapped out pages at all.
 742		 */
 743		unsigned long shmem_swapped = shmem_swap_usage(vma);
 744
 745		if (!shmem_swapped || (vma->vm_flags & VM_SHARED) ||
 746					!(vma->vm_flags & VM_WRITE)) {
 747			mss->swap += shmem_swapped;
 748		} else {
 749			mss->check_shmem_swap = true;
 750			walk_page_vma(vma, &smaps_shmem_walk_ops, mss);
 751			return;
 752		}
 753	}
 754#endif
 755	/* mmap_lock is held in m_start */
 756	walk_page_vma(vma, &smaps_walk_ops, mss);
 
 
 
 757}
 758
 759#define SEQ_PUT_DEC(str, val) \
 760		seq_put_decimal_ull_width(m, str, (val) >> 10, 8)
 761
 762/* Show the contents common for smaps and smaps_rollup */
 763static void __show_smap(struct seq_file *m, const struct mem_size_stats *mss,
 764	bool rollup_mode)
 765{
 766	SEQ_PUT_DEC("Rss:            ", mss->resident);
 767	SEQ_PUT_DEC(" kB\nPss:            ", mss->pss >> PSS_SHIFT);
 
 768	if (rollup_mode) {
 769		/*
 770		 * These are meaningful only for smaps_rollup, otherwise two of
 771		 * them are zero, and the other one is the same as Pss.
 772		 */
 773		SEQ_PUT_DEC(" kB\nPss_Anon:       ",
 774			mss->pss_anon >> PSS_SHIFT);
 775		SEQ_PUT_DEC(" kB\nPss_File:       ",
 776			mss->pss_file >> PSS_SHIFT);
 777		SEQ_PUT_DEC(" kB\nPss_Shmem:      ",
 778			mss->pss_shmem >> PSS_SHIFT);
 779	}
 780	SEQ_PUT_DEC(" kB\nShared_Clean:   ", mss->shared_clean);
 781	SEQ_PUT_DEC(" kB\nShared_Dirty:   ", mss->shared_dirty);
 782	SEQ_PUT_DEC(" kB\nPrivate_Clean:  ", mss->private_clean);
 783	SEQ_PUT_DEC(" kB\nPrivate_Dirty:  ", mss->private_dirty);
 784	SEQ_PUT_DEC(" kB\nReferenced:     ", mss->referenced);
 785	SEQ_PUT_DEC(" kB\nAnonymous:      ", mss->anonymous);
 
 786	SEQ_PUT_DEC(" kB\nLazyFree:       ", mss->lazyfree);
 787	SEQ_PUT_DEC(" kB\nAnonHugePages:  ", mss->anonymous_thp);
 788	SEQ_PUT_DEC(" kB\nShmemPmdMapped: ", mss->shmem_thp);
 789	SEQ_PUT_DEC(" kB\nFilePmdMapped:  ", mss->file_thp);
 790	SEQ_PUT_DEC(" kB\nShared_Hugetlb: ", mss->shared_hugetlb);
 791	seq_put_decimal_ull_width(m, " kB\nPrivate_Hugetlb: ",
 792				  mss->private_hugetlb >> 10, 7);
 793	SEQ_PUT_DEC(" kB\nSwap:           ", mss->swap);
 794	SEQ_PUT_DEC(" kB\nSwapPss:        ",
 795					mss->swap_pss >> PSS_SHIFT);
 796	SEQ_PUT_DEC(" kB\nLocked:         ",
 797					mss->pss_locked >> PSS_SHIFT);
 798	seq_puts(m, " kB\n");
 799}
 800
 801static int show_smap(struct seq_file *m, void *v)
 802{
 803	struct vm_area_struct *vma = v;
 804	struct mem_size_stats mss;
 805
 806	memset(&mss, 0, sizeof(mss));
 807
 808	smap_gather_stats(vma, &mss);
 809
 810	show_map_vma(m, vma);
 811
 812	SEQ_PUT_DEC("Size:           ", vma->vm_end - vma->vm_start);
 813	SEQ_PUT_DEC(" kB\nKernelPageSize: ", vma_kernel_pagesize(vma));
 814	SEQ_PUT_DEC(" kB\nMMUPageSize:    ", vma_mmu_pagesize(vma));
 815	seq_puts(m, " kB\n");
 816
 817	__show_smap(m, &mss, false);
 818
 819	seq_printf(m, "THPeligible:    %d\n",
 820		   transparent_hugepage_enabled(vma));
 
 821
 822	if (arch_pkeys_enabled())
 823		seq_printf(m, "ProtectionKey:  %8u\n", vma_pkey(vma));
 824	show_smap_vma_flags(m, vma);
 825
 826	return 0;
 827}
 828
 829static int show_smaps_rollup(struct seq_file *m, void *v)
 830{
 831	struct proc_maps_private *priv = m->private;
 832	struct mem_size_stats mss;
 833	struct mm_struct *mm;
 834	struct vm_area_struct *vma;
 835	unsigned long last_vma_end = 0;
 836	int ret = 0;
 
 837
 838	priv->task = get_proc_task(priv->inode);
 839	if (!priv->task)
 840		return -ESRCH;
 841
 842	mm = priv->mm;
 843	if (!mm || !mmget_not_zero(mm)) {
 844		ret = -ESRCH;
 845		goto out_put_task;
 846	}
 847
 848	memset(&mss, 0, sizeof(mss));
 849
 850	ret = mmap_read_lock_killable(mm);
 851	if (ret)
 852		goto out_put_mm;
 853
 854	hold_task_mempolicy(priv);
 
 
 
 
 855
 856	for (vma = priv->mm->mmap; vma; vma = vma->vm_next) {
 857		smap_gather_stats(vma, &mss);
 
 858		last_vma_end = vma->vm_end;
 859	}
 860
 861	show_vma_header_prefix(m, priv->mm->mmap->vm_start,
 862			       last_vma_end, 0, 0, 0, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 863	seq_pad(m, ' ');
 864	seq_puts(m, "[rollup]\n");
 865
 866	__show_smap(m, &mss, true);
 867
 868	release_task_mempolicy(priv);
 869	mmap_read_unlock(mm);
 870
 871out_put_mm:
 872	mmput(mm);
 873out_put_task:
 874	put_task_struct(priv->task);
 875	priv->task = NULL;
 876
 877	return ret;
 878}
 879#undef SEQ_PUT_DEC
 880
 881static const struct seq_operations proc_pid_smaps_op = {
 882	.start	= m_start,
 883	.next	= m_next,
 884	.stop	= m_stop,
 885	.show	= show_smap
 886};
 887
 888static int pid_smaps_open(struct inode *inode, struct file *file)
 889{
 890	return do_maps_open(inode, file, &proc_pid_smaps_op);
 891}
 892
 893static int smaps_rollup_open(struct inode *inode, struct file *file)
 894{
 895	int ret;
 896	struct proc_maps_private *priv;
 897
 898	priv = kzalloc(sizeof(*priv), GFP_KERNEL_ACCOUNT);
 899	if (!priv)
 900		return -ENOMEM;
 901
 902	ret = single_open(file, show_smaps_rollup, priv);
 903	if (ret)
 904		goto out_free;
 905
 906	priv->inode = inode;
 907	priv->mm = proc_mem_open(inode, PTRACE_MODE_READ);
 908	if (IS_ERR(priv->mm)) {
 909		ret = PTR_ERR(priv->mm);
 910
 911		single_release(inode, file);
 912		goto out_free;
 913	}
 914
 915	return 0;
 916
 917out_free:
 918	kfree(priv);
 919	return ret;
 920}
 921
 922static int smaps_rollup_release(struct inode *inode, struct file *file)
 923{
 924	struct seq_file *seq = file->private_data;
 925	struct proc_maps_private *priv = seq->private;
 926
 927	if (priv->mm)
 928		mmdrop(priv->mm);
 929
 930	kfree(priv);
 931	return single_release(inode, file);
 932}
 933
 934const struct file_operations proc_pid_smaps_operations = {
 935	.open		= pid_smaps_open,
 936	.read		= seq_read,
 937	.llseek		= seq_lseek,
 938	.release	= proc_map_release,
 939};
 940
 941const struct file_operations proc_pid_smaps_rollup_operations = {
 942	.open		= smaps_rollup_open,
 943	.read		= seq_read,
 944	.llseek		= seq_lseek,
 945	.release	= smaps_rollup_release,
 946};
 947
 948enum clear_refs_types {
 949	CLEAR_REFS_ALL = 1,
 950	CLEAR_REFS_ANON,
 951	CLEAR_REFS_MAPPED,
 952	CLEAR_REFS_SOFT_DIRTY,
 953	CLEAR_REFS_MM_HIWATER_RSS,
 954	CLEAR_REFS_LAST,
 955};
 956
 957struct clear_refs_private {
 958	enum clear_refs_types type;
 959};
 960
 961#ifdef CONFIG_MEM_SOFT_DIRTY
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 962static inline void clear_soft_dirty(struct vm_area_struct *vma,
 963		unsigned long addr, pte_t *pte)
 964{
 965	/*
 966	 * The soft-dirty tracker uses #PF-s to catch writes
 967	 * to pages, so write-protect the pte as well. See the
 968	 * Documentation/admin-guide/mm/soft-dirty.rst for full description
 969	 * of how soft-dirty works.
 970	 */
 971	pte_t ptent = *pte;
 972
 973	if (pte_present(ptent)) {
 974		pte_t old_pte;
 975
 
 
 976		old_pte = ptep_modify_prot_start(vma, addr, pte);
 977		ptent = pte_wrprotect(old_pte);
 978		ptent = pte_clear_soft_dirty(ptent);
 979		ptep_modify_prot_commit(vma, addr, pte, old_pte, ptent);
 980	} else if (is_swap_pte(ptent)) {
 981		ptent = pte_swp_clear_soft_dirty(ptent);
 982		set_pte_at(vma->vm_mm, addr, pte, ptent);
 983	}
 984}
 985#else
 986static inline void clear_soft_dirty(struct vm_area_struct *vma,
 987		unsigned long addr, pte_t *pte)
 988{
 989}
 990#endif
 991
 992#if defined(CONFIG_MEM_SOFT_DIRTY) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
 993static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
 994		unsigned long addr, pmd_t *pmdp)
 995{
 996	pmd_t old, pmd = *pmdp;
 997
 998	if (pmd_present(pmd)) {
 999		/* See comment in change_huge_pmd() */
1000		old = pmdp_invalidate(vma, addr, pmdp);
1001		if (pmd_dirty(old))
1002			pmd = pmd_mkdirty(pmd);
1003		if (pmd_young(old))
1004			pmd = pmd_mkyoung(pmd);
1005
1006		pmd = pmd_wrprotect(pmd);
1007		pmd = pmd_clear_soft_dirty(pmd);
1008
1009		set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
1010	} else if (is_migration_entry(pmd_to_swp_entry(pmd))) {
1011		pmd = pmd_swp_clear_soft_dirty(pmd);
1012		set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
1013	}
1014}
1015#else
1016static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
1017		unsigned long addr, pmd_t *pmdp)
1018{
1019}
1020#endif
1021
1022static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
1023				unsigned long end, struct mm_walk *walk)
1024{
1025	struct clear_refs_private *cp = walk->private;
1026	struct vm_area_struct *vma = walk->vma;
1027	pte_t *pte, ptent;
1028	spinlock_t *ptl;
1029	struct page *page;
1030
1031	ptl = pmd_trans_huge_lock(pmd, vma);
1032	if (ptl) {
1033		if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
1034			clear_soft_dirty_pmd(vma, addr, pmd);
1035			goto out;
1036		}
1037
1038		if (!pmd_present(*pmd))
1039			goto out;
1040
1041		page = pmd_page(*pmd);
1042
1043		/* Clear accessed and referenced bits. */
1044		pmdp_test_and_clear_young(vma, addr, pmd);
1045		test_and_clear_page_young(page);
1046		ClearPageReferenced(page);
1047out:
1048		spin_unlock(ptl);
1049		return 0;
1050	}
1051
1052	if (pmd_trans_unstable(pmd))
1053		return 0;
1054
1055	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
 
 
 
 
1056	for (; addr != end; pte++, addr += PAGE_SIZE) {
1057		ptent = *pte;
1058
1059		if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
1060			clear_soft_dirty(vma, addr, pte);
1061			continue;
1062		}
1063
1064		if (!pte_present(ptent))
1065			continue;
1066
1067		page = vm_normal_page(vma, addr, ptent);
1068		if (!page)
1069			continue;
1070
1071		/* Clear accessed and referenced bits. */
1072		ptep_test_and_clear_young(vma, addr, pte);
1073		test_and_clear_page_young(page);
1074		ClearPageReferenced(page);
1075	}
1076	pte_unmap_unlock(pte - 1, ptl);
1077	cond_resched();
1078	return 0;
1079}
1080
1081static int clear_refs_test_walk(unsigned long start, unsigned long end,
1082				struct mm_walk *walk)
1083{
1084	struct clear_refs_private *cp = walk->private;
1085	struct vm_area_struct *vma = walk->vma;
1086
1087	if (vma->vm_flags & VM_PFNMAP)
1088		return 1;
1089
1090	/*
1091	 * Writing 1 to /proc/pid/clear_refs affects all pages.
1092	 * Writing 2 to /proc/pid/clear_refs only affects anonymous pages.
1093	 * Writing 3 to /proc/pid/clear_refs only affects file mapped pages.
1094	 * Writing 4 to /proc/pid/clear_refs affects all pages.
1095	 */
1096	if (cp->type == CLEAR_REFS_ANON && vma->vm_file)
1097		return 1;
1098	if (cp->type == CLEAR_REFS_MAPPED && !vma->vm_file)
1099		return 1;
1100	return 0;
1101}
1102
1103static const struct mm_walk_ops clear_refs_walk_ops = {
1104	.pmd_entry		= clear_refs_pte_range,
1105	.test_walk		= clear_refs_test_walk,
 
1106};
1107
1108static ssize_t clear_refs_write(struct file *file, const char __user *buf,
1109				size_t count, loff_t *ppos)
1110{
1111	struct task_struct *task;
1112	char buffer[PROC_NUMBUF];
1113	struct mm_struct *mm;
1114	struct vm_area_struct *vma;
1115	enum clear_refs_types type;
1116	struct mmu_gather tlb;
1117	int itype;
1118	int rv;
1119
1120	memset(buffer, 0, sizeof(buffer));
1121	if (count > sizeof(buffer) - 1)
1122		count = sizeof(buffer) - 1;
1123	if (copy_from_user(buffer, buf, count))
1124		return -EFAULT;
1125	rv = kstrtoint(strstrip(buffer), 10, &itype);
1126	if (rv < 0)
1127		return rv;
1128	type = (enum clear_refs_types)itype;
1129	if (type < CLEAR_REFS_ALL || type >= CLEAR_REFS_LAST)
1130		return -EINVAL;
1131
1132	task = get_proc_task(file_inode(file));
1133	if (!task)
1134		return -ESRCH;
1135	mm = get_task_mm(task);
1136	if (mm) {
 
1137		struct mmu_notifier_range range;
1138		struct clear_refs_private cp = {
1139			.type = type,
1140		};
1141
 
 
 
 
1142		if (type == CLEAR_REFS_MM_HIWATER_RSS) {
1143			if (mmap_write_lock_killable(mm)) {
1144				count = -EINTR;
1145				goto out_mm;
1146			}
1147
1148			/*
1149			 * Writing 5 to /proc/pid/clear_refs resets the peak
1150			 * resident set size to this mm's current rss value.
1151			 */
1152			reset_mm_hiwater_rss(mm);
1153			mmap_write_unlock(mm);
1154			goto out_mm;
1155		}
1156
1157		if (mmap_read_lock_killable(mm)) {
1158			count = -EINTR;
1159			goto out_mm;
1160		}
1161		tlb_gather_mmu(&tlb, mm, 0, -1);
1162		if (type == CLEAR_REFS_SOFT_DIRTY) {
1163			for (vma = mm->mmap; vma; vma = vma->vm_next) {
1164				if (!(vma->vm_flags & VM_SOFTDIRTY))
1165					continue;
1166				mmap_read_unlock(mm);
1167				if (mmap_write_lock_killable(mm)) {
1168					count = -EINTR;
1169					goto out_mm;
1170				}
1171				/*
1172				 * Avoid to modify vma->vm_flags
1173				 * without locked ops while the
1174				 * coredump reads the vm_flags.
1175				 */
1176				if (!mmget_still_valid(mm)) {
1177					/*
1178					 * Silently return "count"
1179					 * like if get_task_mm()
1180					 * failed. FIXME: should this
1181					 * function have returned
1182					 * -ESRCH if get_task_mm()
1183					 * failed like if
1184					 * get_proc_task() fails?
1185					 */
1186					mmap_write_unlock(mm);
1187					goto out_mm;
1188				}
1189				for (vma = mm->mmap; vma; vma = vma->vm_next) {
1190					vma->vm_flags &= ~VM_SOFTDIRTY;
1191					vma_set_page_prot(vma);
1192				}
1193				mmap_write_downgrade(mm);
1194				break;
1195			}
1196
 
1197			mmu_notifier_range_init(&range, MMU_NOTIFY_SOFT_DIRTY,
1198						0, NULL, mm, 0, -1UL);
1199			mmu_notifier_invalidate_range_start(&range);
1200		}
1201		walk_page_range(mm, 0, mm->highest_vm_end, &clear_refs_walk_ops,
1202				&cp);
1203		if (type == CLEAR_REFS_SOFT_DIRTY)
1204			mmu_notifier_invalidate_range_end(&range);
1205		tlb_finish_mmu(&tlb, 0, -1);
1206		mmap_read_unlock(mm);
 
 
 
1207out_mm:
1208		mmput(mm);
1209	}
1210	put_task_struct(task);
1211
1212	return count;
1213}
1214
1215const struct file_operations proc_clear_refs_operations = {
1216	.write		= clear_refs_write,
1217	.llseek		= noop_llseek,
1218};
1219
1220typedef struct {
1221	u64 pme;
1222} pagemap_entry_t;
1223
1224struct pagemapread {
1225	int pos, len;		/* units: PM_ENTRY_BYTES, not bytes */
1226	pagemap_entry_t *buffer;
1227	bool show_pfn;
1228};
1229
1230#define PAGEMAP_WALK_SIZE	(PMD_SIZE)
1231#define PAGEMAP_WALK_MASK	(PMD_MASK)
1232
1233#define PM_ENTRY_BYTES		sizeof(pagemap_entry_t)
1234#define PM_PFRAME_BITS		55
1235#define PM_PFRAME_MASK		GENMASK_ULL(PM_PFRAME_BITS - 1, 0)
1236#define PM_SOFT_DIRTY		BIT_ULL(55)
1237#define PM_MMAP_EXCLUSIVE	BIT_ULL(56)
 
1238#define PM_FILE			BIT_ULL(61)
1239#define PM_SWAP			BIT_ULL(62)
1240#define PM_PRESENT		BIT_ULL(63)
1241
1242#define PM_END_OF_BUFFER    1
1243
1244static inline pagemap_entry_t make_pme(u64 frame, u64 flags)
1245{
1246	return (pagemap_entry_t) { .pme = (frame & PM_PFRAME_MASK) | flags };
1247}
1248
1249static int add_to_pagemap(unsigned long addr, pagemap_entry_t *pme,
1250			  struct pagemapread *pm)
1251{
1252	pm->buffer[pm->pos++] = *pme;
1253	if (pm->pos >= pm->len)
1254		return PM_END_OF_BUFFER;
1255	return 0;
1256}
1257
1258static int pagemap_pte_hole(unsigned long start, unsigned long end,
1259			    __always_unused int depth, struct mm_walk *walk)
1260{
1261	struct pagemapread *pm = walk->private;
1262	unsigned long addr = start;
1263	int err = 0;
1264
1265	while (addr < end) {
1266		struct vm_area_struct *vma = find_vma(walk->mm, addr);
1267		pagemap_entry_t pme = make_pme(0, 0);
1268		/* End of address space hole, which we mark as non-present. */
1269		unsigned long hole_end;
1270
1271		if (vma)
1272			hole_end = min(end, vma->vm_start);
1273		else
1274			hole_end = end;
1275
1276		for (; addr < hole_end; addr += PAGE_SIZE) {
1277			err = add_to_pagemap(addr, &pme, pm);
1278			if (err)
1279				goto out;
1280		}
1281
1282		if (!vma)
1283			break;
1284
1285		/* Addresses in the VMA. */
1286		if (vma->vm_flags & VM_SOFTDIRTY)
1287			pme = make_pme(0, PM_SOFT_DIRTY);
1288		for (; addr < min(end, vma->vm_end); addr += PAGE_SIZE) {
1289			err = add_to_pagemap(addr, &pme, pm);
1290			if (err)
1291				goto out;
1292		}
1293	}
1294out:
1295	return err;
1296}
1297
1298static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm,
1299		struct vm_area_struct *vma, unsigned long addr, pte_t pte)
1300{
1301	u64 frame = 0, flags = 0;
1302	struct page *page = NULL;
 
1303
1304	if (pte_present(pte)) {
1305		if (pm->show_pfn)
1306			frame = pte_pfn(pte);
1307		flags |= PM_PRESENT;
1308		page = vm_normal_page(vma, addr, pte);
1309		if (pte_soft_dirty(pte))
1310			flags |= PM_SOFT_DIRTY;
 
 
1311	} else if (is_swap_pte(pte)) {
1312		swp_entry_t entry;
1313		if (pte_swp_soft_dirty(pte))
1314			flags |= PM_SOFT_DIRTY;
 
 
1315		entry = pte_to_swp_entry(pte);
1316		if (pm->show_pfn)
 
 
 
 
 
 
 
 
 
1317			frame = swp_type(entry) |
1318				(swp_offset(entry) << MAX_SWAPFILES_SHIFT);
 
1319		flags |= PM_SWAP;
1320		if (is_migration_entry(entry))
1321			page = migration_entry_to_page(entry);
1322
1323		if (is_device_private_entry(entry))
1324			page = device_private_entry_to_page(entry);
1325	}
1326
1327	if (page && !PageAnon(page))
1328		flags |= PM_FILE;
1329	if (page && page_mapcount(page) == 1)
1330		flags |= PM_MMAP_EXCLUSIVE;
1331	if (vma->vm_flags & VM_SOFTDIRTY)
1332		flags |= PM_SOFT_DIRTY;
1333
1334	return make_pme(frame, flags);
1335}
1336
1337static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
1338			     struct mm_walk *walk)
1339{
1340	struct vm_area_struct *vma = walk->vma;
1341	struct pagemapread *pm = walk->private;
1342	spinlock_t *ptl;
1343	pte_t *pte, *orig_pte;
1344	int err = 0;
1345
1346#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 
 
1347	ptl = pmd_trans_huge_lock(pmdp, vma);
1348	if (ptl) {
1349		u64 flags = 0, frame = 0;
1350		pmd_t pmd = *pmdp;
1351		struct page *page = NULL;
1352
1353		if (vma->vm_flags & VM_SOFTDIRTY)
1354			flags |= PM_SOFT_DIRTY;
1355
1356		if (pmd_present(pmd)) {
1357			page = pmd_page(pmd);
1358
1359			flags |= PM_PRESENT;
1360			if (pmd_soft_dirty(pmd))
1361				flags |= PM_SOFT_DIRTY;
 
 
1362			if (pm->show_pfn)
1363				frame = pmd_pfn(pmd) +
1364					((addr & ~PMD_MASK) >> PAGE_SHIFT);
1365		}
1366#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1367		else if (is_swap_pmd(pmd)) {
1368			swp_entry_t entry = pmd_to_swp_entry(pmd);
1369			unsigned long offset;
1370
1371			if (pm->show_pfn) {
1372				offset = swp_offset(entry) +
 
 
 
 
1373					((addr & ~PMD_MASK) >> PAGE_SHIFT);
1374				frame = swp_type(entry) |
1375					(offset << MAX_SWAPFILES_SHIFT);
1376			}
1377			flags |= PM_SWAP;
1378			if (pmd_swp_soft_dirty(pmd))
1379				flags |= PM_SOFT_DIRTY;
 
 
1380			VM_BUG_ON(!is_pmd_migration_entry(pmd));
1381			page = migration_entry_to_page(entry);
 
1382		}
1383#endif
1384
1385		if (page && page_mapcount(page) == 1)
1386			flags |= PM_MMAP_EXCLUSIVE;
1387
1388		for (; addr != end; addr += PAGE_SIZE) {
1389			pagemap_entry_t pme = make_pme(frame, flags);
1390
1391			err = add_to_pagemap(addr, &pme, pm);
1392			if (err)
1393				break;
1394			if (pm->show_pfn) {
1395				if (flags & PM_PRESENT)
1396					frame++;
1397				else if (flags & PM_SWAP)
1398					frame += (1 << MAX_SWAPFILES_SHIFT);
1399			}
1400		}
1401		spin_unlock(ptl);
1402		return err;
1403	}
1404
1405	if (pmd_trans_unstable(pmdp))
1406		return 0;
1407#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1408
1409	/*
1410	 * We can assume that @vma always points to a valid one and @end never
1411	 * goes beyond vma->vm_end.
1412	 */
1413	orig_pte = pte = pte_offset_map_lock(walk->mm, pmdp, addr, &ptl);
 
 
 
 
1414	for (; addr < end; pte++, addr += PAGE_SIZE) {
1415		pagemap_entry_t pme;
1416
1417		pme = pte_to_pagemap_entry(pm, vma, addr, *pte);
1418		err = add_to_pagemap(addr, &pme, pm);
1419		if (err)
1420			break;
1421	}
1422	pte_unmap_unlock(orig_pte, ptl);
1423
1424	cond_resched();
1425
1426	return err;
1427}
1428
1429#ifdef CONFIG_HUGETLB_PAGE
1430/* This function walks within one hugetlb entry in the single call */
1431static int pagemap_hugetlb_range(pte_t *ptep, unsigned long hmask,
1432				 unsigned long addr, unsigned long end,
1433				 struct mm_walk *walk)
1434{
1435	struct pagemapread *pm = walk->private;
1436	struct vm_area_struct *vma = walk->vma;
1437	u64 flags = 0, frame = 0;
1438	int err = 0;
1439	pte_t pte;
1440
1441	if (vma->vm_flags & VM_SOFTDIRTY)
1442		flags |= PM_SOFT_DIRTY;
1443
1444	pte = huge_ptep_get(ptep);
1445	if (pte_present(pte)) {
1446		struct page *page = pte_page(pte);
1447
1448		if (!PageAnon(page))
1449			flags |= PM_FILE;
1450
1451		if (page_mapcount(page) == 1)
1452			flags |= PM_MMAP_EXCLUSIVE;
1453
 
 
 
1454		flags |= PM_PRESENT;
1455		if (pm->show_pfn)
1456			frame = pte_pfn(pte) +
1457				((addr & ~hmask) >> PAGE_SHIFT);
 
 
1458	}
1459
1460	for (; addr != end; addr += PAGE_SIZE) {
1461		pagemap_entry_t pme = make_pme(frame, flags);
1462
1463		err = add_to_pagemap(addr, &pme, pm);
1464		if (err)
1465			return err;
1466		if (pm->show_pfn && (flags & PM_PRESENT))
1467			frame++;
1468	}
1469
1470	cond_resched();
1471
1472	return err;
1473}
1474#else
1475#define pagemap_hugetlb_range	NULL
1476#endif /* HUGETLB_PAGE */
1477
1478static const struct mm_walk_ops pagemap_ops = {
1479	.pmd_entry	= pagemap_pmd_range,
1480	.pte_hole	= pagemap_pte_hole,
1481	.hugetlb_entry	= pagemap_hugetlb_range,
 
1482};
1483
1484/*
1485 * /proc/pid/pagemap - an array mapping virtual pages to pfns
1486 *
1487 * For each page in the address space, this file contains one 64-bit entry
1488 * consisting of the following:
1489 *
1490 * Bits 0-54  page frame number (PFN) if present
1491 * Bits 0-4   swap type if swapped
1492 * Bits 5-54  swap offset if swapped
1493 * Bit  55    pte is soft-dirty (see Documentation/admin-guide/mm/soft-dirty.rst)
1494 * Bit  56    page exclusively mapped
1495 * Bits 57-60 zero
 
1496 * Bit  61    page is file-page or shared-anon
1497 * Bit  62    page swapped
1498 * Bit  63    page present
1499 *
1500 * If the page is not present but in swap, then the PFN contains an
1501 * encoding of the swap file number and the page's offset into the
1502 * swap. Unmapped pages return a null PFN. This allows determining
1503 * precisely which pages are mapped (or in swap) and comparing mapped
1504 * pages between processes.
1505 *
1506 * Efficient users of this interface will use /proc/pid/maps to
1507 * determine which areas of memory are actually mapped and llseek to
1508 * skip over unmapped regions.
1509 */
1510static ssize_t pagemap_read(struct file *file, char __user *buf,
1511			    size_t count, loff_t *ppos)
1512{
1513	struct mm_struct *mm = file->private_data;
1514	struct pagemapread pm;
1515	unsigned long src;
1516	unsigned long svpfn;
1517	unsigned long start_vaddr;
1518	unsigned long end_vaddr;
1519	int ret = 0, copied = 0;
1520
1521	if (!mm || !mmget_not_zero(mm))
1522		goto out;
1523
1524	ret = -EINVAL;
1525	/* file position must be aligned */
1526	if ((*ppos % PM_ENTRY_BYTES) || (count % PM_ENTRY_BYTES))
1527		goto out_mm;
1528
1529	ret = 0;
1530	if (!count)
1531		goto out_mm;
1532
1533	/* do not disclose physical addresses: attack vector */
1534	pm.show_pfn = file_ns_capable(file, &init_user_ns, CAP_SYS_ADMIN);
1535
1536	pm.len = (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
1537	pm.buffer = kmalloc_array(pm.len, PM_ENTRY_BYTES, GFP_KERNEL);
1538	ret = -ENOMEM;
1539	if (!pm.buffer)
1540		goto out_mm;
1541
1542	src = *ppos;
1543	svpfn = src / PM_ENTRY_BYTES;
1544	start_vaddr = svpfn << PAGE_SHIFT;
1545	end_vaddr = mm->task_size;
1546
1547	/* watch out for wraparound */
1548	if (svpfn > mm->task_size >> PAGE_SHIFT)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1549		start_vaddr = end_vaddr;
1550
1551	/*
1552	 * The odds are that this will stop walking way
1553	 * before end_vaddr, because the length of the
1554	 * user buffer is tracked in "pm", and the walk
1555	 * will stop when we hit the end of the buffer.
1556	 */
1557	ret = 0;
1558	while (count && (start_vaddr < end_vaddr)) {
1559		int len;
1560		unsigned long end;
1561
1562		pm.pos = 0;
1563		end = (start_vaddr + PAGEMAP_WALK_SIZE) & PAGEMAP_WALK_MASK;
1564		/* overflow ? */
1565		if (end < start_vaddr || end > end_vaddr)
1566			end = end_vaddr;
1567		ret = mmap_read_lock_killable(mm);
1568		if (ret)
1569			goto out_free;
1570		ret = walk_page_range(mm, start_vaddr, end, &pagemap_ops, &pm);
1571		mmap_read_unlock(mm);
1572		start_vaddr = end;
1573
1574		len = min(count, PM_ENTRY_BYTES * pm.pos);
1575		if (copy_to_user(buf, pm.buffer, len)) {
1576			ret = -EFAULT;
1577			goto out_free;
1578		}
1579		copied += len;
1580		buf += len;
1581		count -= len;
1582	}
1583	*ppos += copied;
1584	if (!ret || ret == PM_END_OF_BUFFER)
1585		ret = copied;
1586
1587out_free:
1588	kfree(pm.buffer);
1589out_mm:
1590	mmput(mm);
1591out:
1592	return ret;
1593}
1594
1595static int pagemap_open(struct inode *inode, struct file *file)
1596{
1597	struct mm_struct *mm;
1598
1599	mm = proc_mem_open(inode, PTRACE_MODE_READ);
1600	if (IS_ERR(mm))
1601		return PTR_ERR(mm);
1602	file->private_data = mm;
1603	return 0;
1604}
1605
1606static int pagemap_release(struct inode *inode, struct file *file)
1607{
1608	struct mm_struct *mm = file->private_data;
1609
1610	if (mm)
1611		mmdrop(mm);
1612	return 0;
1613}
1614
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1615const struct file_operations proc_pagemap_operations = {
1616	.llseek		= mem_lseek, /* borrow this */
1617	.read		= pagemap_read,
1618	.open		= pagemap_open,
1619	.release	= pagemap_release,
 
 
1620};
1621#endif /* CONFIG_PROC_PAGE_MONITOR */
1622
1623#ifdef CONFIG_NUMA
1624
1625struct numa_maps {
1626	unsigned long pages;
1627	unsigned long anon;
1628	unsigned long active;
1629	unsigned long writeback;
1630	unsigned long mapcount_max;
1631	unsigned long dirty;
1632	unsigned long swapcache;
1633	unsigned long node[MAX_NUMNODES];
1634};
1635
1636struct numa_maps_private {
1637	struct proc_maps_private proc_maps;
1638	struct numa_maps md;
1639};
1640
1641static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty,
1642			unsigned long nr_pages)
1643{
1644	int count = page_mapcount(page);
1645
1646	md->pages += nr_pages;
1647	if (pte_dirty || PageDirty(page))
1648		md->dirty += nr_pages;
1649
1650	if (PageSwapCache(page))
1651		md->swapcache += nr_pages;
1652
1653	if (PageActive(page) || PageUnevictable(page))
1654		md->active += nr_pages;
1655
1656	if (PageWriteback(page))
1657		md->writeback += nr_pages;
1658
1659	if (PageAnon(page))
1660		md->anon += nr_pages;
1661
1662	if (count > md->mapcount_max)
1663		md->mapcount_max = count;
1664
1665	md->node[page_to_nid(page)] += nr_pages;
1666}
1667
1668static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma,
1669		unsigned long addr)
1670{
1671	struct page *page;
1672	int nid;
1673
1674	if (!pte_present(pte))
1675		return NULL;
1676
1677	page = vm_normal_page(vma, addr, pte);
1678	if (!page)
1679		return NULL;
1680
1681	if (PageReserved(page))
1682		return NULL;
1683
1684	nid = page_to_nid(page);
1685	if (!node_isset(nid, node_states[N_MEMORY]))
1686		return NULL;
1687
1688	return page;
1689}
1690
1691#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1692static struct page *can_gather_numa_stats_pmd(pmd_t pmd,
1693					      struct vm_area_struct *vma,
1694					      unsigned long addr)
1695{
1696	struct page *page;
1697	int nid;
1698
1699	if (!pmd_present(pmd))
1700		return NULL;
1701
1702	page = vm_normal_page_pmd(vma, addr, pmd);
1703	if (!page)
1704		return NULL;
1705
1706	if (PageReserved(page))
1707		return NULL;
1708
1709	nid = page_to_nid(page);
1710	if (!node_isset(nid, node_states[N_MEMORY]))
1711		return NULL;
1712
1713	return page;
1714}
1715#endif
1716
1717static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
1718		unsigned long end, struct mm_walk *walk)
1719{
1720	struct numa_maps *md = walk->private;
1721	struct vm_area_struct *vma = walk->vma;
1722	spinlock_t *ptl;
1723	pte_t *orig_pte;
1724	pte_t *pte;
1725
1726#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1727	ptl = pmd_trans_huge_lock(pmd, vma);
1728	if (ptl) {
1729		struct page *page;
1730
1731		page = can_gather_numa_stats_pmd(*pmd, vma, addr);
1732		if (page)
1733			gather_stats(page, md, pmd_dirty(*pmd),
1734				     HPAGE_PMD_SIZE/PAGE_SIZE);
1735		spin_unlock(ptl);
1736		return 0;
1737	}
1738
1739	if (pmd_trans_unstable(pmd))
1740		return 0;
1741#endif
1742	orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
 
 
 
 
1743	do {
1744		struct page *page = can_gather_numa_stats(*pte, vma, addr);
 
1745		if (!page)
1746			continue;
1747		gather_stats(page, md, pte_dirty(*pte), 1);
1748
1749	} while (pte++, addr += PAGE_SIZE, addr != end);
1750	pte_unmap_unlock(orig_pte, ptl);
1751	cond_resched();
1752	return 0;
1753}
1754#ifdef CONFIG_HUGETLB_PAGE
1755static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
1756		unsigned long addr, unsigned long end, struct mm_walk *walk)
1757{
1758	pte_t huge_pte = huge_ptep_get(pte);
1759	struct numa_maps *md;
1760	struct page *page;
1761
1762	if (!pte_present(huge_pte))
1763		return 0;
1764
1765	page = pte_page(huge_pte);
1766	if (!page)
1767		return 0;
1768
1769	md = walk->private;
1770	gather_stats(page, md, pte_dirty(huge_pte), 1);
1771	return 0;
1772}
1773
1774#else
1775static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
1776		unsigned long addr, unsigned long end, struct mm_walk *walk)
1777{
1778	return 0;
1779}
1780#endif
1781
1782static const struct mm_walk_ops show_numa_ops = {
1783	.hugetlb_entry = gather_hugetlb_stats,
1784	.pmd_entry = gather_pte_stats,
 
1785};
1786
1787/*
1788 * Display pages allocated per node and memory policy via /proc.
1789 */
1790static int show_numa_map(struct seq_file *m, void *v)
1791{
1792	struct numa_maps_private *numa_priv = m->private;
1793	struct proc_maps_private *proc_priv = &numa_priv->proc_maps;
1794	struct vm_area_struct *vma = v;
1795	struct numa_maps *md = &numa_priv->md;
1796	struct file *file = vma->vm_file;
1797	struct mm_struct *mm = vma->vm_mm;
1798	struct mempolicy *pol;
1799	char buffer[64];
 
 
1800	int nid;
1801
1802	if (!mm)
1803		return 0;
1804
1805	/* Ensure we start with an empty set of numa_maps statistics. */
1806	memset(md, 0, sizeof(*md));
1807
1808	pol = __get_vma_policy(vma, vma->vm_start);
1809	if (pol) {
1810		mpol_to_str(buffer, sizeof(buffer), pol);
1811		mpol_cond_put(pol);
1812	} else {
1813		mpol_to_str(buffer, sizeof(buffer), proc_priv->task_mempolicy);
1814	}
1815
1816	seq_printf(m, "%08lx %s", vma->vm_start, buffer);
1817
1818	if (file) {
1819		seq_puts(m, " file=");
1820		seq_file_path(m, file, "\n\t= ");
1821	} else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
1822		seq_puts(m, " heap");
1823	} else if (is_stack(vma)) {
1824		seq_puts(m, " stack");
1825	}
1826
1827	if (is_vm_hugetlb_page(vma))
1828		seq_puts(m, " huge");
1829
1830	/* mmap_lock is held by m_start */
1831	walk_page_vma(vma, &show_numa_ops, md);
1832
1833	if (!md->pages)
1834		goto out;
1835
1836	if (md->anon)
1837		seq_printf(m, " anon=%lu", md->anon);
1838
1839	if (md->dirty)
1840		seq_printf(m, " dirty=%lu", md->dirty);
1841
1842	if (md->pages != md->anon && md->pages != md->dirty)
1843		seq_printf(m, " mapped=%lu", md->pages);
1844
1845	if (md->mapcount_max > 1)
1846		seq_printf(m, " mapmax=%lu", md->mapcount_max);
1847
1848	if (md->swapcache)
1849		seq_printf(m, " swapcache=%lu", md->swapcache);
1850
1851	if (md->active < md->pages && !is_vm_hugetlb_page(vma))
1852		seq_printf(m, " active=%lu", md->active);
1853
1854	if (md->writeback)
1855		seq_printf(m, " writeback=%lu", md->writeback);
1856
1857	for_each_node_state(nid, N_MEMORY)
1858		if (md->node[nid])
1859			seq_printf(m, " N%d=%lu", nid, md->node[nid]);
1860
1861	seq_printf(m, " kernelpagesize_kB=%lu", vma_kernel_pagesize(vma) >> 10);
1862out:
1863	seq_putc(m, '\n');
1864	return 0;
1865}
1866
1867static const struct seq_operations proc_pid_numa_maps_op = {
1868	.start  = m_start,
1869	.next   = m_next,
1870	.stop   = m_stop,
1871	.show   = show_numa_map,
1872};
1873
1874static int pid_numa_maps_open(struct inode *inode, struct file *file)
1875{
1876	return proc_maps_open(inode, file, &proc_pid_numa_maps_op,
1877				sizeof(struct numa_maps_private));
1878}
1879
1880const struct file_operations proc_pid_numa_maps_operations = {
1881	.open		= pid_numa_maps_open,
1882	.read		= seq_read,
1883	.llseek		= seq_lseek,
1884	.release	= proc_map_release,
1885};
1886
1887#endif /* CONFIG_NUMA */
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0
   2#include <linux/pagewalk.h>
   3#include <linux/mm_inline.h>
   4#include <linux/hugetlb.h>
   5#include <linux/huge_mm.h>
   6#include <linux/mount.h>
   7#include <linux/ksm.h>
   8#include <linux/seq_file.h>
   9#include <linux/highmem.h>
  10#include <linux/ptrace.h>
  11#include <linux/slab.h>
  12#include <linux/pagemap.h>
  13#include <linux/mempolicy.h>
  14#include <linux/rmap.h>
  15#include <linux/swap.h>
  16#include <linux/sched/mm.h>
  17#include <linux/swapops.h>
  18#include <linux/mmu_notifier.h>
  19#include <linux/page_idle.h>
  20#include <linux/shmem_fs.h>
  21#include <linux/uaccess.h>
  22#include <linux/pkeys.h>
  23#include <linux/minmax.h>
  24#include <linux/overflow.h>
  25
  26#include <asm/elf.h>
  27#include <asm/tlb.h>
  28#include <asm/tlbflush.h>
  29#include "internal.h"
  30
  31#define SEQ_PUT_DEC(str, val) \
  32		seq_put_decimal_ull_width(m, str, (val) << (PAGE_SHIFT-10), 8)
  33void task_mem(struct seq_file *m, struct mm_struct *mm)
  34{
  35	unsigned long text, lib, swap, anon, file, shmem;
  36	unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;
  37
  38	anon = get_mm_counter(mm, MM_ANONPAGES);
  39	file = get_mm_counter(mm, MM_FILEPAGES);
  40	shmem = get_mm_counter(mm, MM_SHMEMPAGES);
  41
  42	/*
  43	 * Note: to minimize their overhead, mm maintains hiwater_vm and
  44	 * hiwater_rss only when about to *lower* total_vm or rss.  Any
  45	 * collector of these hiwater stats must therefore get total_vm
  46	 * and rss too, which will usually be the higher.  Barriers? not
  47	 * worth the effort, such snapshots can always be inconsistent.
  48	 */
  49	hiwater_vm = total_vm = mm->total_vm;
  50	if (hiwater_vm < mm->hiwater_vm)
  51		hiwater_vm = mm->hiwater_vm;
  52	hiwater_rss = total_rss = anon + file + shmem;
  53	if (hiwater_rss < mm->hiwater_rss)
  54		hiwater_rss = mm->hiwater_rss;
  55
  56	/* split executable areas between text and lib */
  57	text = PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK);
  58	text = min(text, mm->exec_vm << PAGE_SHIFT);
  59	lib = (mm->exec_vm << PAGE_SHIFT) - text;
  60
  61	swap = get_mm_counter(mm, MM_SWAPENTS);
  62	SEQ_PUT_DEC("VmPeak:\t", hiwater_vm);
  63	SEQ_PUT_DEC(" kB\nVmSize:\t", total_vm);
  64	SEQ_PUT_DEC(" kB\nVmLck:\t", mm->locked_vm);
  65	SEQ_PUT_DEC(" kB\nVmPin:\t", atomic64_read(&mm->pinned_vm));
  66	SEQ_PUT_DEC(" kB\nVmHWM:\t", hiwater_rss);
  67	SEQ_PUT_DEC(" kB\nVmRSS:\t", total_rss);
  68	SEQ_PUT_DEC(" kB\nRssAnon:\t", anon);
  69	SEQ_PUT_DEC(" kB\nRssFile:\t", file);
  70	SEQ_PUT_DEC(" kB\nRssShmem:\t", shmem);
  71	SEQ_PUT_DEC(" kB\nVmData:\t", mm->data_vm);
  72	SEQ_PUT_DEC(" kB\nVmStk:\t", mm->stack_vm);
  73	seq_put_decimal_ull_width(m,
  74		    " kB\nVmExe:\t", text >> 10, 8);
  75	seq_put_decimal_ull_width(m,
  76		    " kB\nVmLib:\t", lib >> 10, 8);
  77	seq_put_decimal_ull_width(m,
  78		    " kB\nVmPTE:\t", mm_pgtables_bytes(mm) >> 10, 8);
  79	SEQ_PUT_DEC(" kB\nVmSwap:\t", swap);
  80	seq_puts(m, " kB\n");
  81	hugetlb_report_usage(m, mm);
  82}
  83#undef SEQ_PUT_DEC
  84
  85unsigned long task_vsize(struct mm_struct *mm)
  86{
  87	return PAGE_SIZE * mm->total_vm;
  88}
  89
  90unsigned long task_statm(struct mm_struct *mm,
  91			 unsigned long *shared, unsigned long *text,
  92			 unsigned long *data, unsigned long *resident)
  93{
  94	*shared = get_mm_counter(mm, MM_FILEPAGES) +
  95			get_mm_counter(mm, MM_SHMEMPAGES);
  96	*text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
  97								>> PAGE_SHIFT;
  98	*data = mm->data_vm + mm->stack_vm;
  99	*resident = *shared + get_mm_counter(mm, MM_ANONPAGES);
 100	return mm->total_vm;
 101}
 102
 103#ifdef CONFIG_NUMA
 104/*
 105 * Save get_task_policy() for show_numa_map().
 106 */
 107static void hold_task_mempolicy(struct proc_maps_private *priv)
 108{
 109	struct task_struct *task = priv->task;
 110
 111	task_lock(task);
 112	priv->task_mempolicy = get_task_policy(task);
 113	mpol_get(priv->task_mempolicy);
 114	task_unlock(task);
 115}
 116static void release_task_mempolicy(struct proc_maps_private *priv)
 117{
 118	mpol_put(priv->task_mempolicy);
 119}
 120#else
 121static void hold_task_mempolicy(struct proc_maps_private *priv)
 122{
 123}
 124static void release_task_mempolicy(struct proc_maps_private *priv)
 125{
 126}
 127#endif
 128
 129static struct vm_area_struct *proc_get_vma(struct proc_maps_private *priv,
 130						loff_t *ppos)
 131{
 132	struct vm_area_struct *vma = vma_next(&priv->iter);
 133
 134	if (vma) {
 135		*ppos = vma->vm_start;
 136	} else {
 137		*ppos = -2UL;
 138		vma = get_gate_vma(priv->mm);
 139	}
 140
 141	return vma;
 142}
 143
 144static void *m_start(struct seq_file *m, loff_t *ppos)
 145{
 146	struct proc_maps_private *priv = m->private;
 147	unsigned long last_addr = *ppos;
 148	struct mm_struct *mm;
 
 149
 150	/* See m_next(). Zero at the start or after lseek. */
 151	if (last_addr == -1UL)
 152		return NULL;
 153
 154	priv->task = get_proc_task(priv->inode);
 155	if (!priv->task)
 156		return ERR_PTR(-ESRCH);
 157
 158	mm = priv->mm;
 159	if (!mm || !mmget_not_zero(mm)) {
 160		put_task_struct(priv->task);
 161		priv->task = NULL;
 162		return NULL;
 163	}
 164
 165	if (mmap_read_lock_killable(mm)) {
 166		mmput(mm);
 167		put_task_struct(priv->task);
 168		priv->task = NULL;
 169		return ERR_PTR(-EINTR);
 170	}
 171
 172	vma_iter_init(&priv->iter, mm, last_addr);
 173	hold_task_mempolicy(priv);
 174	if (last_addr == -2UL)
 175		return get_gate_vma(mm);
 176
 177	return proc_get_vma(priv, ppos);
 
 
 
 
 178}
 179
 180static void *m_next(struct seq_file *m, void *v, loff_t *ppos)
 181{
 182	if (*ppos == -2UL) {
 183		*ppos = -1UL;
 184		return NULL;
 185	}
 186	return proc_get_vma(m->private, ppos);
 
 
 
 
 
 
 
 
 187}
 188
 189static void m_stop(struct seq_file *m, void *v)
 190{
 191	struct proc_maps_private *priv = m->private;
 192	struct mm_struct *mm = priv->mm;
 193
 194	if (!priv->task)
 195		return;
 196
 197	release_task_mempolicy(priv);
 198	mmap_read_unlock(mm);
 199	mmput(mm);
 200	put_task_struct(priv->task);
 201	priv->task = NULL;
 202}
 203
 204static int proc_maps_open(struct inode *inode, struct file *file,
 205			const struct seq_operations *ops, int psize)
 206{
 207	struct proc_maps_private *priv = __seq_open_private(file, ops, psize);
 208
 209	if (!priv)
 210		return -ENOMEM;
 211
 212	priv->inode = inode;
 213	priv->mm = proc_mem_open(inode, PTRACE_MODE_READ);
 214	if (IS_ERR(priv->mm)) {
 215		int err = PTR_ERR(priv->mm);
 216
 217		seq_release_private(inode, file);
 218		return err;
 219	}
 220
 221	return 0;
 222}
 223
 224static int proc_map_release(struct inode *inode, struct file *file)
 225{
 226	struct seq_file *seq = file->private_data;
 227	struct proc_maps_private *priv = seq->private;
 228
 229	if (priv->mm)
 230		mmdrop(priv->mm);
 231
 232	return seq_release_private(inode, file);
 233}
 234
 235static int do_maps_open(struct inode *inode, struct file *file,
 236			const struct seq_operations *ops)
 237{
 238	return proc_maps_open(inode, file, ops,
 239				sizeof(struct proc_maps_private));
 240}
 241
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 242static void show_vma_header_prefix(struct seq_file *m,
 243				   unsigned long start, unsigned long end,
 244				   vm_flags_t flags, unsigned long long pgoff,
 245				   dev_t dev, unsigned long ino)
 246{
 247	seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
 248	seq_put_hex_ll(m, NULL, start, 8);
 249	seq_put_hex_ll(m, "-", end, 8);
 250	seq_putc(m, ' ');
 251	seq_putc(m, flags & VM_READ ? 'r' : '-');
 252	seq_putc(m, flags & VM_WRITE ? 'w' : '-');
 253	seq_putc(m, flags & VM_EXEC ? 'x' : '-');
 254	seq_putc(m, flags & VM_MAYSHARE ? 's' : 'p');
 255	seq_put_hex_ll(m, " ", pgoff, 8);
 256	seq_put_hex_ll(m, " ", MAJOR(dev), 2);
 257	seq_put_hex_ll(m, ":", MINOR(dev), 2);
 258	seq_put_decimal_ull(m, " ", ino);
 259	seq_putc(m, ' ');
 260}
 261
 262static void
 263show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
 264{
 265	struct anon_vma_name *anon_name = NULL;
 266	struct mm_struct *mm = vma->vm_mm;
 267	struct file *file = vma->vm_file;
 268	vm_flags_t flags = vma->vm_flags;
 269	unsigned long ino = 0;
 270	unsigned long long pgoff = 0;
 271	unsigned long start, end;
 272	dev_t dev = 0;
 273	const char *name = NULL;
 274
 275	if (file) {
 276		const struct inode *inode = file_user_inode(vma->vm_file);
 277
 278		dev = inode->i_sb->s_dev;
 279		ino = inode->i_ino;
 280		pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
 281	}
 282
 283	start = vma->vm_start;
 284	end = vma->vm_end;
 285	show_vma_header_prefix(m, start, end, flags, pgoff, dev, ino);
 286	if (mm)
 287		anon_name = anon_vma_name(vma);
 288
 289	/*
 290	 * Print the dentry name for named mappings, and a
 291	 * special [heap] marker for the heap:
 292	 */
 293	if (file) {
 294		seq_pad(m, ' ');
 295		/*
 296		 * If user named this anon shared memory via
 297		 * prctl(PR_SET_VMA ..., use the provided name.
 298		 */
 299		if (anon_name)
 300			seq_printf(m, "[anon_shmem:%s]", anon_name->name);
 301		else
 302			seq_path(m, file_user_path(file), "\n");
 303		goto done;
 304	}
 305
 306	if (vma->vm_ops && vma->vm_ops->name) {
 307		name = vma->vm_ops->name(vma);
 308		if (name)
 309			goto done;
 310	}
 311
 312	name = arch_vma_name(vma);
 313	if (!name) {
 314		if (!mm) {
 315			name = "[vdso]";
 316			goto done;
 317		}
 318
 319		if (vma_is_initial_heap(vma)) {
 
 320			name = "[heap]";
 321			goto done;
 322		}
 323
 324		if (vma_is_initial_stack(vma)) {
 325			name = "[stack]";
 326			goto done;
 327		}
 328
 329		if (anon_name) {
 330			seq_pad(m, ' ');
 331			seq_printf(m, "[anon:%s]", anon_name->name);
 332		}
 333	}
 334
 335done:
 336	if (name) {
 337		seq_pad(m, ' ');
 338		seq_puts(m, name);
 339	}
 340	seq_putc(m, '\n');
 341}
 342
 343static int show_map(struct seq_file *m, void *v)
 344{
 345	show_map_vma(m, v);
 346	return 0;
 347}
 348
 349static const struct seq_operations proc_pid_maps_op = {
 350	.start	= m_start,
 351	.next	= m_next,
 352	.stop	= m_stop,
 353	.show	= show_map
 354};
 355
 356static int pid_maps_open(struct inode *inode, struct file *file)
 357{
 358	return do_maps_open(inode, file, &proc_pid_maps_op);
 359}
 360
 361const struct file_operations proc_pid_maps_operations = {
 362	.open		= pid_maps_open,
 363	.read		= seq_read,
 364	.llseek		= seq_lseek,
 365	.release	= proc_map_release,
 366};
 367
 368/*
 369 * Proportional Set Size(PSS): my share of RSS.
 370 *
 371 * PSS of a process is the count of pages it has in memory, where each
 372 * page is divided by the number of processes sharing it.  So if a
 373 * process has 1000 pages all to itself, and 1000 shared with one other
 374 * process, its PSS will be 1500.
 375 *
 376 * To keep (accumulated) division errors low, we adopt a 64bit
 377 * fixed-point pss counter to minimize division errors. So (pss >>
 378 * PSS_SHIFT) would be the real byte count.
 379 *
 380 * A shift of 12 before division means (assuming 4K page size):
 381 * 	- 1M 3-user-pages add up to 8KB errors;
 382 * 	- supports mapcount up to 2^24, or 16M;
 383 * 	- supports PSS up to 2^52 bytes, or 4PB.
 384 */
 385#define PSS_SHIFT 12
 386
 387#ifdef CONFIG_PROC_PAGE_MONITOR
 388struct mem_size_stats {
 389	unsigned long resident;
 390	unsigned long shared_clean;
 391	unsigned long shared_dirty;
 392	unsigned long private_clean;
 393	unsigned long private_dirty;
 394	unsigned long referenced;
 395	unsigned long anonymous;
 396	unsigned long lazyfree;
 397	unsigned long anonymous_thp;
 398	unsigned long shmem_thp;
 399	unsigned long file_thp;
 400	unsigned long swap;
 401	unsigned long shared_hugetlb;
 402	unsigned long private_hugetlb;
 403	unsigned long ksm;
 404	u64 pss;
 405	u64 pss_anon;
 406	u64 pss_file;
 407	u64 pss_shmem;
 408	u64 pss_dirty;
 409	u64 pss_locked;
 410	u64 swap_pss;
 
 411};
 412
 413static void smaps_page_accumulate(struct mem_size_stats *mss,
 414		struct page *page, unsigned long size, unsigned long pss,
 415		bool dirty, bool locked, bool private)
 416{
 417	mss->pss += pss;
 418
 419	if (PageAnon(page))
 420		mss->pss_anon += pss;
 421	else if (PageSwapBacked(page))
 422		mss->pss_shmem += pss;
 423	else
 424		mss->pss_file += pss;
 425
 426	if (locked)
 427		mss->pss_locked += pss;
 428
 429	if (dirty || PageDirty(page)) {
 430		mss->pss_dirty += pss;
 431		if (private)
 432			mss->private_dirty += size;
 433		else
 434			mss->shared_dirty += size;
 435	} else {
 436		if (private)
 437			mss->private_clean += size;
 438		else
 439			mss->shared_clean += size;
 440	}
 441}
 442
 443static void smaps_account(struct mem_size_stats *mss, struct page *page,
 444		bool compound, bool young, bool dirty, bool locked,
 445		bool migration)
 446{
 447	int i, nr = compound ? compound_nr(page) : 1;
 448	unsigned long size = nr * PAGE_SIZE;
 449
 450	/*
 451	 * First accumulate quantities that depend only on |size| and the type
 452	 * of the compound page.
 453	 */
 454	if (PageAnon(page)) {
 455		mss->anonymous += size;
 456		if (!PageSwapBacked(page) && !dirty && !PageDirty(page))
 457			mss->lazyfree += size;
 458	}
 459
 460	if (PageKsm(page))
 461		mss->ksm += size;
 462
 463	mss->resident += size;
 464	/* Accumulate the size in pages that have been accessed. */
 465	if (young || page_is_young(page) || PageReferenced(page))
 466		mss->referenced += size;
 467
 468	/*
 469	 * Then accumulate quantities that may depend on sharing, or that may
 470	 * differ page-by-page.
 471	 *
 472	 * page_count(page) == 1 guarantees the page is mapped exactly once.
 473	 * If any subpage of the compound page mapped with PTE it would elevate
 474	 * page_count().
 475	 *
 476	 * The page_mapcount() is called to get a snapshot of the mapcount.
 477	 * Without holding the page lock this snapshot can be slightly wrong as
 478	 * we cannot always read the mapcount atomically.  It is not safe to
 479	 * call page_mapcount() even with PTL held if the page is not mapped,
 480	 * especially for migration entries.  Treat regular migration entries
 481	 * as mapcount == 1.
 482	 */
 483	if ((page_count(page) == 1) || migration) {
 484		smaps_page_accumulate(mss, page, size, size << PSS_SHIFT, dirty,
 485			locked, true);
 486		return;
 487	}
 488	for (i = 0; i < nr; i++, page++) {
 489		int mapcount = page_mapcount(page);
 490		unsigned long pss = PAGE_SIZE << PSS_SHIFT;
 491		if (mapcount >= 2)
 492			pss /= mapcount;
 493		smaps_page_accumulate(mss, page, PAGE_SIZE, pss, dirty, locked,
 494				      mapcount < 2);
 495	}
 496}
 497
 498#ifdef CONFIG_SHMEM
 499static int smaps_pte_hole(unsigned long addr, unsigned long end,
 500			  __always_unused int depth, struct mm_walk *walk)
 501{
 502	struct mem_size_stats *mss = walk->private;
 503	struct vm_area_struct *vma = walk->vma;
 504
 505	mss->swap += shmem_partial_swap_usage(walk->vma->vm_file->f_mapping,
 506					      linear_page_index(vma, addr),
 507					      linear_page_index(vma, end));
 508
 509	return 0;
 510}
 511#else
 512#define smaps_pte_hole		NULL
 513#endif /* CONFIG_SHMEM */
 514
 515static void smaps_pte_hole_lookup(unsigned long addr, struct mm_walk *walk)
 516{
 517#ifdef CONFIG_SHMEM
 518	if (walk->ops->pte_hole) {
 519		/* depth is not used */
 520		smaps_pte_hole(addr, addr + PAGE_SIZE, 0, walk);
 521	}
 522#endif
 523}
 524
 525static void smaps_pte_entry(pte_t *pte, unsigned long addr,
 526		struct mm_walk *walk)
 527{
 528	struct mem_size_stats *mss = walk->private;
 529	struct vm_area_struct *vma = walk->vma;
 530	bool locked = !!(vma->vm_flags & VM_LOCKED);
 531	struct page *page = NULL;
 532	bool migration = false, young = false, dirty = false;
 533	pte_t ptent = ptep_get(pte);
 534
 535	if (pte_present(ptent)) {
 536		page = vm_normal_page(vma, addr, ptent);
 537		young = pte_young(ptent);
 538		dirty = pte_dirty(ptent);
 539	} else if (is_swap_pte(ptent)) {
 540		swp_entry_t swpent = pte_to_swp_entry(ptent);
 541
 542		if (!non_swap_entry(swpent)) {
 543			int mapcount;
 544
 545			mss->swap += PAGE_SIZE;
 546			mapcount = swp_swapcount(swpent);
 547			if (mapcount >= 2) {
 548				u64 pss_delta = (u64)PAGE_SIZE << PSS_SHIFT;
 549
 550				do_div(pss_delta, mapcount);
 551				mss->swap_pss += pss_delta;
 552			} else {
 553				mss->swap_pss += (u64)PAGE_SIZE << PSS_SHIFT;
 554			}
 555		} else if (is_pfn_swap_entry(swpent)) {
 556			if (is_migration_entry(swpent))
 557				migration = true;
 558			page = pfn_swap_entry_to_page(swpent);
 559		}
 560	} else {
 561		smaps_pte_hole_lookup(addr, walk);
 
 
 
 
 
 
 
 
 
 562		return;
 563	}
 564
 565	if (!page)
 566		return;
 567
 568	smaps_account(mss, page, false, young, dirty, locked, migration);
 569}
 570
 571#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 572static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
 573		struct mm_walk *walk)
 574{
 575	struct mem_size_stats *mss = walk->private;
 576	struct vm_area_struct *vma = walk->vma;
 577	bool locked = !!(vma->vm_flags & VM_LOCKED);
 578	struct page *page = NULL;
 579	bool migration = false;
 580
 581	if (pmd_present(*pmd)) {
 582		page = vm_normal_page_pmd(vma, addr, *pmd);
 
 583	} else if (unlikely(thp_migration_supported() && is_swap_pmd(*pmd))) {
 584		swp_entry_t entry = pmd_to_swp_entry(*pmd);
 585
 586		if (is_migration_entry(entry)) {
 587			migration = true;
 588			page = pfn_swap_entry_to_page(entry);
 589		}
 590	}
 591	if (IS_ERR_OR_NULL(page))
 592		return;
 593	if (PageAnon(page))
 594		mss->anonymous_thp += HPAGE_PMD_SIZE;
 595	else if (PageSwapBacked(page))
 596		mss->shmem_thp += HPAGE_PMD_SIZE;
 597	else if (is_zone_device_page(page))
 598		/* pass */;
 599	else
 600		mss->file_thp += HPAGE_PMD_SIZE;
 601
 602	smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd),
 603		      locked, migration);
 604}
 605#else
 606static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
 607		struct mm_walk *walk)
 608{
 609}
 610#endif
 611
 612static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
 613			   struct mm_walk *walk)
 614{
 615	struct vm_area_struct *vma = walk->vma;
 616	pte_t *pte;
 617	spinlock_t *ptl;
 618
 619	ptl = pmd_trans_huge_lock(pmd, vma);
 620	if (ptl) {
 621		smaps_pmd_entry(pmd, addr, walk);
 622		spin_unlock(ptl);
 623		goto out;
 624	}
 625
 
 
 
 
 
 
 
 626	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
 627	if (!pte) {
 628		walk->action = ACTION_AGAIN;
 629		return 0;
 630	}
 631	for (; addr != end; pte++, addr += PAGE_SIZE)
 632		smaps_pte_entry(pte, addr, walk);
 633	pte_unmap_unlock(pte - 1, ptl);
 634out:
 635	cond_resched();
 636	return 0;
 637}
 638
 639static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma)
 640{
 641	/*
 642	 * Don't forget to update Documentation/ on changes.
 643	 */
 644	static const char mnemonics[BITS_PER_LONG][2] = {
 645		/*
 646		 * In case if we meet a flag we don't know about.
 647		 */
 648		[0 ... (BITS_PER_LONG-1)] = "??",
 649
 650		[ilog2(VM_READ)]	= "rd",
 651		[ilog2(VM_WRITE)]	= "wr",
 652		[ilog2(VM_EXEC)]	= "ex",
 653		[ilog2(VM_SHARED)]	= "sh",
 654		[ilog2(VM_MAYREAD)]	= "mr",
 655		[ilog2(VM_MAYWRITE)]	= "mw",
 656		[ilog2(VM_MAYEXEC)]	= "me",
 657		[ilog2(VM_MAYSHARE)]	= "ms",
 658		[ilog2(VM_GROWSDOWN)]	= "gd",
 659		[ilog2(VM_PFNMAP)]	= "pf",
 
 660		[ilog2(VM_LOCKED)]	= "lo",
 661		[ilog2(VM_IO)]		= "io",
 662		[ilog2(VM_SEQ_READ)]	= "sr",
 663		[ilog2(VM_RAND_READ)]	= "rr",
 664		[ilog2(VM_DONTCOPY)]	= "dc",
 665		[ilog2(VM_DONTEXPAND)]	= "de",
 666		[ilog2(VM_LOCKONFAULT)]	= "lf",
 667		[ilog2(VM_ACCOUNT)]	= "ac",
 668		[ilog2(VM_NORESERVE)]	= "nr",
 669		[ilog2(VM_HUGETLB)]	= "ht",
 670		[ilog2(VM_SYNC)]	= "sf",
 671		[ilog2(VM_ARCH_1)]	= "ar",
 672		[ilog2(VM_WIPEONFORK)]	= "wf",
 673		[ilog2(VM_DONTDUMP)]	= "dd",
 674#ifdef CONFIG_ARM64_BTI
 675		[ilog2(VM_ARM64_BTI)]	= "bt",
 676#endif
 677#ifdef CONFIG_MEM_SOFT_DIRTY
 678		[ilog2(VM_SOFTDIRTY)]	= "sd",
 679#endif
 680		[ilog2(VM_MIXEDMAP)]	= "mm",
 681		[ilog2(VM_HUGEPAGE)]	= "hg",
 682		[ilog2(VM_NOHUGEPAGE)]	= "nh",
 683		[ilog2(VM_MERGEABLE)]	= "mg",
 684		[ilog2(VM_UFFD_MISSING)]= "um",
 685		[ilog2(VM_UFFD_WP)]	= "uw",
 686#ifdef CONFIG_ARM64_MTE
 687		[ilog2(VM_MTE)]		= "mt",
 688		[ilog2(VM_MTE_ALLOWED)]	= "",
 689#endif
 690#ifdef CONFIG_ARCH_HAS_PKEYS
 691		/* These come out via ProtectionKey: */
 692		[ilog2(VM_PKEY_BIT0)]	= "",
 693		[ilog2(VM_PKEY_BIT1)]	= "",
 694		[ilog2(VM_PKEY_BIT2)]	= "",
 695		[ilog2(VM_PKEY_BIT3)]	= "",
 696#if VM_PKEY_BIT4
 697		[ilog2(VM_PKEY_BIT4)]	= "",
 698#endif
 699#endif /* CONFIG_ARCH_HAS_PKEYS */
 700#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR
 701		[ilog2(VM_UFFD_MINOR)]	= "ui",
 702#endif /* CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */
 703#ifdef CONFIG_X86_USER_SHADOW_STACK
 704		[ilog2(VM_SHADOW_STACK)] = "ss",
 705#endif
 706	};
 707	size_t i;
 708
 709	seq_puts(m, "VmFlags: ");
 710	for (i = 0; i < BITS_PER_LONG; i++) {
 711		if (!mnemonics[i][0])
 712			continue;
 713		if (vma->vm_flags & (1UL << i)) {
 714			seq_putc(m, mnemonics[i][0]);
 715			seq_putc(m, mnemonics[i][1]);
 716			seq_putc(m, ' ');
 717		}
 718	}
 719	seq_putc(m, '\n');
 720}
 721
 722#ifdef CONFIG_HUGETLB_PAGE
 723static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask,
 724				 unsigned long addr, unsigned long end,
 725				 struct mm_walk *walk)
 726{
 727	struct mem_size_stats *mss = walk->private;
 728	struct vm_area_struct *vma = walk->vma;
 729	struct page *page = NULL;
 730	pte_t ptent = ptep_get(pte);
 731
 732	if (pte_present(ptent)) {
 733		page = vm_normal_page(vma, addr, ptent);
 734	} else if (is_swap_pte(ptent)) {
 735		swp_entry_t swpent = pte_to_swp_entry(ptent);
 736
 737		if (is_pfn_swap_entry(swpent))
 738			page = pfn_swap_entry_to_page(swpent);
 
 
 
 
 
 
 
 739	}
 740	if (page) {
 741		if (page_mapcount(page) >= 2 || hugetlb_pmd_shared(pte))
 
 
 742			mss->shared_hugetlb += huge_page_size(hstate_vma(vma));
 743		else
 744			mss->private_hugetlb += huge_page_size(hstate_vma(vma));
 745	}
 746	return 0;
 747}
 748#else
 749#define smaps_hugetlb_range	NULL
 750#endif /* HUGETLB_PAGE */
 751
 752static const struct mm_walk_ops smaps_walk_ops = {
 753	.pmd_entry		= smaps_pte_range,
 754	.hugetlb_entry		= smaps_hugetlb_range,
 755	.walk_lock		= PGWALK_RDLOCK,
 756};
 757
 758static const struct mm_walk_ops smaps_shmem_walk_ops = {
 759	.pmd_entry		= smaps_pte_range,
 760	.hugetlb_entry		= smaps_hugetlb_range,
 761	.pte_hole		= smaps_pte_hole,
 762	.walk_lock		= PGWALK_RDLOCK,
 763};
 764
 765/*
 766 * Gather mem stats from @vma with the indicated beginning
 767 * address @start, and keep them in @mss.
 768 *
 769 * Use vm_start of @vma as the beginning address if @start is 0.
 770 */
 771static void smap_gather_stats(struct vm_area_struct *vma,
 772		struct mem_size_stats *mss, unsigned long start)
 773{
 774	const struct mm_walk_ops *ops = &smaps_walk_ops;
 775
 776	/* Invalid start */
 777	if (start >= vma->vm_end)
 778		return;
 779
 780	if (vma->vm_file && shmem_mapping(vma->vm_file->f_mapping)) {
 781		/*
 782		 * For shared or readonly shmem mappings we know that all
 783		 * swapped out pages belong to the shmem object, and we can
 784		 * obtain the swap value much more efficiently. For private
 785		 * writable mappings, we might have COW pages that are
 786		 * not affected by the parent swapped out pages of the shmem
 787		 * object, so we have to distinguish them during the page walk.
 788		 * Unless we know that the shmem object (or the part mapped by
 789		 * our VMA) has no swapped out pages at all.
 790		 */
 791		unsigned long shmem_swapped = shmem_swap_usage(vma);
 792
 793		if (!start && (!shmem_swapped || (vma->vm_flags & VM_SHARED) ||
 794					!(vma->vm_flags & VM_WRITE))) {
 795			mss->swap += shmem_swapped;
 796		} else {
 797			ops = &smaps_shmem_walk_ops;
 
 
 798		}
 799	}
 800
 801	/* mmap_lock is held in m_start */
 802	if (!start)
 803		walk_page_vma(vma, ops, mss);
 804	else
 805		walk_page_range(vma->vm_mm, start, vma->vm_end, ops, mss);
 806}
 807
 808#define SEQ_PUT_DEC(str, val) \
 809		seq_put_decimal_ull_width(m, str, (val) >> 10, 8)
 810
 811/* Show the contents common for smaps and smaps_rollup */
 812static void __show_smap(struct seq_file *m, const struct mem_size_stats *mss,
 813	bool rollup_mode)
 814{
 815	SEQ_PUT_DEC("Rss:            ", mss->resident);
 816	SEQ_PUT_DEC(" kB\nPss:            ", mss->pss >> PSS_SHIFT);
 817	SEQ_PUT_DEC(" kB\nPss_Dirty:      ", mss->pss_dirty >> PSS_SHIFT);
 818	if (rollup_mode) {
 819		/*
 820		 * These are meaningful only for smaps_rollup, otherwise two of
 821		 * them are zero, and the other one is the same as Pss.
 822		 */
 823		SEQ_PUT_DEC(" kB\nPss_Anon:       ",
 824			mss->pss_anon >> PSS_SHIFT);
 825		SEQ_PUT_DEC(" kB\nPss_File:       ",
 826			mss->pss_file >> PSS_SHIFT);
 827		SEQ_PUT_DEC(" kB\nPss_Shmem:      ",
 828			mss->pss_shmem >> PSS_SHIFT);
 829	}
 830	SEQ_PUT_DEC(" kB\nShared_Clean:   ", mss->shared_clean);
 831	SEQ_PUT_DEC(" kB\nShared_Dirty:   ", mss->shared_dirty);
 832	SEQ_PUT_DEC(" kB\nPrivate_Clean:  ", mss->private_clean);
 833	SEQ_PUT_DEC(" kB\nPrivate_Dirty:  ", mss->private_dirty);
 834	SEQ_PUT_DEC(" kB\nReferenced:     ", mss->referenced);
 835	SEQ_PUT_DEC(" kB\nAnonymous:      ", mss->anonymous);
 836	SEQ_PUT_DEC(" kB\nKSM:            ", mss->ksm);
 837	SEQ_PUT_DEC(" kB\nLazyFree:       ", mss->lazyfree);
 838	SEQ_PUT_DEC(" kB\nAnonHugePages:  ", mss->anonymous_thp);
 839	SEQ_PUT_DEC(" kB\nShmemPmdMapped: ", mss->shmem_thp);
 840	SEQ_PUT_DEC(" kB\nFilePmdMapped:  ", mss->file_thp);
 841	SEQ_PUT_DEC(" kB\nShared_Hugetlb: ", mss->shared_hugetlb);
 842	seq_put_decimal_ull_width(m, " kB\nPrivate_Hugetlb: ",
 843				  mss->private_hugetlb >> 10, 7);
 844	SEQ_PUT_DEC(" kB\nSwap:           ", mss->swap);
 845	SEQ_PUT_DEC(" kB\nSwapPss:        ",
 846					mss->swap_pss >> PSS_SHIFT);
 847	SEQ_PUT_DEC(" kB\nLocked:         ",
 848					mss->pss_locked >> PSS_SHIFT);
 849	seq_puts(m, " kB\n");
 850}
 851
 852static int show_smap(struct seq_file *m, void *v)
 853{
 854	struct vm_area_struct *vma = v;
 855	struct mem_size_stats mss = {};
 856
 857	smap_gather_stats(vma, &mss, 0);
 
 
 858
 859	show_map_vma(m, vma);
 860
 861	SEQ_PUT_DEC("Size:           ", vma->vm_end - vma->vm_start);
 862	SEQ_PUT_DEC(" kB\nKernelPageSize: ", vma_kernel_pagesize(vma));
 863	SEQ_PUT_DEC(" kB\nMMUPageSize:    ", vma_mmu_pagesize(vma));
 864	seq_puts(m, " kB\n");
 865
 866	__show_smap(m, &mss, false);
 867
 868	seq_printf(m, "THPeligible:    %8u\n",
 869		   !!thp_vma_allowable_orders(vma, vma->vm_flags, true, false,
 870					      true, THP_ORDERS_ALL));
 871
 872	if (arch_pkeys_enabled())
 873		seq_printf(m, "ProtectionKey:  %8u\n", vma_pkey(vma));
 874	show_smap_vma_flags(m, vma);
 875
 876	return 0;
 877}
 878
 879static int show_smaps_rollup(struct seq_file *m, void *v)
 880{
 881	struct proc_maps_private *priv = m->private;
 882	struct mem_size_stats mss = {};
 883	struct mm_struct *mm = priv->mm;
 884	struct vm_area_struct *vma;
 885	unsigned long vma_start = 0, last_vma_end = 0;
 886	int ret = 0;
 887	VMA_ITERATOR(vmi, mm, 0);
 888
 889	priv->task = get_proc_task(priv->inode);
 890	if (!priv->task)
 891		return -ESRCH;
 892
 
 893	if (!mm || !mmget_not_zero(mm)) {
 894		ret = -ESRCH;
 895		goto out_put_task;
 896	}
 897
 
 
 898	ret = mmap_read_lock_killable(mm);
 899	if (ret)
 900		goto out_put_mm;
 901
 902	hold_task_mempolicy(priv);
 903	vma = vma_next(&vmi);
 904
 905	if (unlikely(!vma))
 906		goto empty_set;
 907
 908	vma_start = vma->vm_start;
 909	do {
 910		smap_gather_stats(vma, &mss, 0);
 911		last_vma_end = vma->vm_end;
 
 912
 913		/*
 914		 * Release mmap_lock temporarily if someone wants to
 915		 * access it for write request.
 916		 */
 917		if (mmap_lock_is_contended(mm)) {
 918			vma_iter_invalidate(&vmi);
 919			mmap_read_unlock(mm);
 920			ret = mmap_read_lock_killable(mm);
 921			if (ret) {
 922				release_task_mempolicy(priv);
 923				goto out_put_mm;
 924			}
 925
 926			/*
 927			 * After dropping the lock, there are four cases to
 928			 * consider. See the following example for explanation.
 929			 *
 930			 *   +------+------+-----------+
 931			 *   | VMA1 | VMA2 | VMA3      |
 932			 *   +------+------+-----------+
 933			 *   |      |      |           |
 934			 *  4k     8k     16k         400k
 935			 *
 936			 * Suppose we drop the lock after reading VMA2 due to
 937			 * contention, then we get:
 938			 *
 939			 *	last_vma_end = 16k
 940			 *
 941			 * 1) VMA2 is freed, but VMA3 exists:
 942			 *
 943			 *    vma_next(vmi) will return VMA3.
 944			 *    In this case, just continue from VMA3.
 945			 *
 946			 * 2) VMA2 still exists:
 947			 *
 948			 *    vma_next(vmi) will return VMA3.
 949			 *    In this case, just continue from VMA3.
 950			 *
 951			 * 3) No more VMAs can be found:
 952			 *
 953			 *    vma_next(vmi) will return NULL.
 954			 *    No more things to do, just break.
 955			 *
 956			 * 4) (last_vma_end - 1) is the middle of a vma (VMA'):
 957			 *
 958			 *    vma_next(vmi) will return VMA' whose range
 959			 *    contains last_vma_end.
 960			 *    Iterate VMA' from last_vma_end.
 961			 */
 962			vma = vma_next(&vmi);
 963			/* Case 3 above */
 964			if (!vma)
 965				break;
 966
 967			/* Case 1 and 2 above */
 968			if (vma->vm_start >= last_vma_end)
 969				continue;
 970
 971			/* Case 4 above */
 972			if (vma->vm_end > last_vma_end)
 973				smap_gather_stats(vma, &mss, last_vma_end);
 974		}
 975	} for_each_vma(vmi, vma);
 976
 977empty_set:
 978	show_vma_header_prefix(m, vma_start, last_vma_end, 0, 0, 0, 0);
 979	seq_pad(m, ' ');
 980	seq_puts(m, "[rollup]\n");
 981
 982	__show_smap(m, &mss, true);
 983
 984	release_task_mempolicy(priv);
 985	mmap_read_unlock(mm);
 986
 987out_put_mm:
 988	mmput(mm);
 989out_put_task:
 990	put_task_struct(priv->task);
 991	priv->task = NULL;
 992
 993	return ret;
 994}
 995#undef SEQ_PUT_DEC
 996
 997static const struct seq_operations proc_pid_smaps_op = {
 998	.start	= m_start,
 999	.next	= m_next,
1000	.stop	= m_stop,
1001	.show	= show_smap
1002};
1003
1004static int pid_smaps_open(struct inode *inode, struct file *file)
1005{
1006	return do_maps_open(inode, file, &proc_pid_smaps_op);
1007}
1008
1009static int smaps_rollup_open(struct inode *inode, struct file *file)
1010{
1011	int ret;
1012	struct proc_maps_private *priv;
1013
1014	priv = kzalloc(sizeof(*priv), GFP_KERNEL_ACCOUNT);
1015	if (!priv)
1016		return -ENOMEM;
1017
1018	ret = single_open(file, show_smaps_rollup, priv);
1019	if (ret)
1020		goto out_free;
1021
1022	priv->inode = inode;
1023	priv->mm = proc_mem_open(inode, PTRACE_MODE_READ);
1024	if (IS_ERR(priv->mm)) {
1025		ret = PTR_ERR(priv->mm);
1026
1027		single_release(inode, file);
1028		goto out_free;
1029	}
1030
1031	return 0;
1032
1033out_free:
1034	kfree(priv);
1035	return ret;
1036}
1037
1038static int smaps_rollup_release(struct inode *inode, struct file *file)
1039{
1040	struct seq_file *seq = file->private_data;
1041	struct proc_maps_private *priv = seq->private;
1042
1043	if (priv->mm)
1044		mmdrop(priv->mm);
1045
1046	kfree(priv);
1047	return single_release(inode, file);
1048}
1049
1050const struct file_operations proc_pid_smaps_operations = {
1051	.open		= pid_smaps_open,
1052	.read		= seq_read,
1053	.llseek		= seq_lseek,
1054	.release	= proc_map_release,
1055};
1056
1057const struct file_operations proc_pid_smaps_rollup_operations = {
1058	.open		= smaps_rollup_open,
1059	.read		= seq_read,
1060	.llseek		= seq_lseek,
1061	.release	= smaps_rollup_release,
1062};
1063
1064enum clear_refs_types {
1065	CLEAR_REFS_ALL = 1,
1066	CLEAR_REFS_ANON,
1067	CLEAR_REFS_MAPPED,
1068	CLEAR_REFS_SOFT_DIRTY,
1069	CLEAR_REFS_MM_HIWATER_RSS,
1070	CLEAR_REFS_LAST,
1071};
1072
1073struct clear_refs_private {
1074	enum clear_refs_types type;
1075};
1076
1077#ifdef CONFIG_MEM_SOFT_DIRTY
1078
1079static inline bool pte_is_pinned(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
1080{
1081	struct page *page;
1082
1083	if (!pte_write(pte))
1084		return false;
1085	if (!is_cow_mapping(vma->vm_flags))
1086		return false;
1087	if (likely(!test_bit(MMF_HAS_PINNED, &vma->vm_mm->flags)))
1088		return false;
1089	page = vm_normal_page(vma, addr, pte);
1090	if (!page)
1091		return false;
1092	return page_maybe_dma_pinned(page);
1093}
1094
1095static inline void clear_soft_dirty(struct vm_area_struct *vma,
1096		unsigned long addr, pte_t *pte)
1097{
1098	/*
1099	 * The soft-dirty tracker uses #PF-s to catch writes
1100	 * to pages, so write-protect the pte as well. See the
1101	 * Documentation/admin-guide/mm/soft-dirty.rst for full description
1102	 * of how soft-dirty works.
1103	 */
1104	pte_t ptent = ptep_get(pte);
1105
1106	if (pte_present(ptent)) {
1107		pte_t old_pte;
1108
1109		if (pte_is_pinned(vma, addr, ptent))
1110			return;
1111		old_pte = ptep_modify_prot_start(vma, addr, pte);
1112		ptent = pte_wrprotect(old_pte);
1113		ptent = pte_clear_soft_dirty(ptent);
1114		ptep_modify_prot_commit(vma, addr, pte, old_pte, ptent);
1115	} else if (is_swap_pte(ptent)) {
1116		ptent = pte_swp_clear_soft_dirty(ptent);
1117		set_pte_at(vma->vm_mm, addr, pte, ptent);
1118	}
1119}
1120#else
1121static inline void clear_soft_dirty(struct vm_area_struct *vma,
1122		unsigned long addr, pte_t *pte)
1123{
1124}
1125#endif
1126
1127#if defined(CONFIG_MEM_SOFT_DIRTY) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
1128static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
1129		unsigned long addr, pmd_t *pmdp)
1130{
1131	pmd_t old, pmd = *pmdp;
1132
1133	if (pmd_present(pmd)) {
1134		/* See comment in change_huge_pmd() */
1135		old = pmdp_invalidate(vma, addr, pmdp);
1136		if (pmd_dirty(old))
1137			pmd = pmd_mkdirty(pmd);
1138		if (pmd_young(old))
1139			pmd = pmd_mkyoung(pmd);
1140
1141		pmd = pmd_wrprotect(pmd);
1142		pmd = pmd_clear_soft_dirty(pmd);
1143
1144		set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
1145	} else if (is_migration_entry(pmd_to_swp_entry(pmd))) {
1146		pmd = pmd_swp_clear_soft_dirty(pmd);
1147		set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
1148	}
1149}
1150#else
1151static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
1152		unsigned long addr, pmd_t *pmdp)
1153{
1154}
1155#endif
1156
1157static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
1158				unsigned long end, struct mm_walk *walk)
1159{
1160	struct clear_refs_private *cp = walk->private;
1161	struct vm_area_struct *vma = walk->vma;
1162	pte_t *pte, ptent;
1163	spinlock_t *ptl;
1164	struct page *page;
1165
1166	ptl = pmd_trans_huge_lock(pmd, vma);
1167	if (ptl) {
1168		if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
1169			clear_soft_dirty_pmd(vma, addr, pmd);
1170			goto out;
1171		}
1172
1173		if (!pmd_present(*pmd))
1174			goto out;
1175
1176		page = pmd_page(*pmd);
1177
1178		/* Clear accessed and referenced bits. */
1179		pmdp_test_and_clear_young(vma, addr, pmd);
1180		test_and_clear_page_young(page);
1181		ClearPageReferenced(page);
1182out:
1183		spin_unlock(ptl);
1184		return 0;
1185	}
1186
 
 
 
1187	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
1188	if (!pte) {
1189		walk->action = ACTION_AGAIN;
1190		return 0;
1191	}
1192	for (; addr != end; pte++, addr += PAGE_SIZE) {
1193		ptent = ptep_get(pte);
1194
1195		if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
1196			clear_soft_dirty(vma, addr, pte);
1197			continue;
1198		}
1199
1200		if (!pte_present(ptent))
1201			continue;
1202
1203		page = vm_normal_page(vma, addr, ptent);
1204		if (!page)
1205			continue;
1206
1207		/* Clear accessed and referenced bits. */
1208		ptep_test_and_clear_young(vma, addr, pte);
1209		test_and_clear_page_young(page);
1210		ClearPageReferenced(page);
1211	}
1212	pte_unmap_unlock(pte - 1, ptl);
1213	cond_resched();
1214	return 0;
1215}
1216
1217static int clear_refs_test_walk(unsigned long start, unsigned long end,
1218				struct mm_walk *walk)
1219{
1220	struct clear_refs_private *cp = walk->private;
1221	struct vm_area_struct *vma = walk->vma;
1222
1223	if (vma->vm_flags & VM_PFNMAP)
1224		return 1;
1225
1226	/*
1227	 * Writing 1 to /proc/pid/clear_refs affects all pages.
1228	 * Writing 2 to /proc/pid/clear_refs only affects anonymous pages.
1229	 * Writing 3 to /proc/pid/clear_refs only affects file mapped pages.
1230	 * Writing 4 to /proc/pid/clear_refs affects all pages.
1231	 */
1232	if (cp->type == CLEAR_REFS_ANON && vma->vm_file)
1233		return 1;
1234	if (cp->type == CLEAR_REFS_MAPPED && !vma->vm_file)
1235		return 1;
1236	return 0;
1237}
1238
1239static const struct mm_walk_ops clear_refs_walk_ops = {
1240	.pmd_entry		= clear_refs_pte_range,
1241	.test_walk		= clear_refs_test_walk,
1242	.walk_lock		= PGWALK_WRLOCK,
1243};
1244
1245static ssize_t clear_refs_write(struct file *file, const char __user *buf,
1246				size_t count, loff_t *ppos)
1247{
1248	struct task_struct *task;
1249	char buffer[PROC_NUMBUF] = {};
1250	struct mm_struct *mm;
1251	struct vm_area_struct *vma;
1252	enum clear_refs_types type;
 
1253	int itype;
1254	int rv;
1255
 
1256	if (count > sizeof(buffer) - 1)
1257		count = sizeof(buffer) - 1;
1258	if (copy_from_user(buffer, buf, count))
1259		return -EFAULT;
1260	rv = kstrtoint(strstrip(buffer), 10, &itype);
1261	if (rv < 0)
1262		return rv;
1263	type = (enum clear_refs_types)itype;
1264	if (type < CLEAR_REFS_ALL || type >= CLEAR_REFS_LAST)
1265		return -EINVAL;
1266
1267	task = get_proc_task(file_inode(file));
1268	if (!task)
1269		return -ESRCH;
1270	mm = get_task_mm(task);
1271	if (mm) {
1272		VMA_ITERATOR(vmi, mm, 0);
1273		struct mmu_notifier_range range;
1274		struct clear_refs_private cp = {
1275			.type = type,
1276		};
1277
1278		if (mmap_write_lock_killable(mm)) {
1279			count = -EINTR;
1280			goto out_mm;
1281		}
1282		if (type == CLEAR_REFS_MM_HIWATER_RSS) {
 
 
 
 
 
1283			/*
1284			 * Writing 5 to /proc/pid/clear_refs resets the peak
1285			 * resident set size to this mm's current rss value.
1286			 */
1287			reset_mm_hiwater_rss(mm);
1288			goto out_unlock;
 
1289		}
1290
 
 
 
 
 
1291		if (type == CLEAR_REFS_SOFT_DIRTY) {
1292			for_each_vma(vmi, vma) {
1293				if (!(vma->vm_flags & VM_SOFTDIRTY))
1294					continue;
1295				vm_flags_clear(vma, VM_SOFTDIRTY);
1296				vma_set_page_prot(vma);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1297			}
1298
1299			inc_tlb_flush_pending(mm);
1300			mmu_notifier_range_init(&range, MMU_NOTIFY_SOFT_DIRTY,
1301						0, mm, 0, -1UL);
1302			mmu_notifier_invalidate_range_start(&range);
1303		}
1304		walk_page_range(mm, 0, -1, &clear_refs_walk_ops, &cp);
1305		if (type == CLEAR_REFS_SOFT_DIRTY) {
 
1306			mmu_notifier_invalidate_range_end(&range);
1307			flush_tlb_mm(mm);
1308			dec_tlb_flush_pending(mm);
1309		}
1310out_unlock:
1311		mmap_write_unlock(mm);
1312out_mm:
1313		mmput(mm);
1314	}
1315	put_task_struct(task);
1316
1317	return count;
1318}
1319
1320const struct file_operations proc_clear_refs_operations = {
1321	.write		= clear_refs_write,
1322	.llseek		= noop_llseek,
1323};
1324
1325typedef struct {
1326	u64 pme;
1327} pagemap_entry_t;
1328
1329struct pagemapread {
1330	int pos, len;		/* units: PM_ENTRY_BYTES, not bytes */
1331	pagemap_entry_t *buffer;
1332	bool show_pfn;
1333};
1334
1335#define PAGEMAP_WALK_SIZE	(PMD_SIZE)
1336#define PAGEMAP_WALK_MASK	(PMD_MASK)
1337
1338#define PM_ENTRY_BYTES		sizeof(pagemap_entry_t)
1339#define PM_PFRAME_BITS		55
1340#define PM_PFRAME_MASK		GENMASK_ULL(PM_PFRAME_BITS - 1, 0)
1341#define PM_SOFT_DIRTY		BIT_ULL(55)
1342#define PM_MMAP_EXCLUSIVE	BIT_ULL(56)
1343#define PM_UFFD_WP		BIT_ULL(57)
1344#define PM_FILE			BIT_ULL(61)
1345#define PM_SWAP			BIT_ULL(62)
1346#define PM_PRESENT		BIT_ULL(63)
1347
1348#define PM_END_OF_BUFFER    1
1349
1350static inline pagemap_entry_t make_pme(u64 frame, u64 flags)
1351{
1352	return (pagemap_entry_t) { .pme = (frame & PM_PFRAME_MASK) | flags };
1353}
1354
1355static int add_to_pagemap(pagemap_entry_t *pme, struct pagemapread *pm)
 
1356{
1357	pm->buffer[pm->pos++] = *pme;
1358	if (pm->pos >= pm->len)
1359		return PM_END_OF_BUFFER;
1360	return 0;
1361}
1362
1363static int pagemap_pte_hole(unsigned long start, unsigned long end,
1364			    __always_unused int depth, struct mm_walk *walk)
1365{
1366	struct pagemapread *pm = walk->private;
1367	unsigned long addr = start;
1368	int err = 0;
1369
1370	while (addr < end) {
1371		struct vm_area_struct *vma = find_vma(walk->mm, addr);
1372		pagemap_entry_t pme = make_pme(0, 0);
1373		/* End of address space hole, which we mark as non-present. */
1374		unsigned long hole_end;
1375
1376		if (vma)
1377			hole_end = min(end, vma->vm_start);
1378		else
1379			hole_end = end;
1380
1381		for (; addr < hole_end; addr += PAGE_SIZE) {
1382			err = add_to_pagemap(&pme, pm);
1383			if (err)
1384				goto out;
1385		}
1386
1387		if (!vma)
1388			break;
1389
1390		/* Addresses in the VMA. */
1391		if (vma->vm_flags & VM_SOFTDIRTY)
1392			pme = make_pme(0, PM_SOFT_DIRTY);
1393		for (; addr < min(end, vma->vm_end); addr += PAGE_SIZE) {
1394			err = add_to_pagemap(&pme, pm);
1395			if (err)
1396				goto out;
1397		}
1398	}
1399out:
1400	return err;
1401}
1402
1403static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm,
1404		struct vm_area_struct *vma, unsigned long addr, pte_t pte)
1405{
1406	u64 frame = 0, flags = 0;
1407	struct page *page = NULL;
1408	bool migration = false;
1409
1410	if (pte_present(pte)) {
1411		if (pm->show_pfn)
1412			frame = pte_pfn(pte);
1413		flags |= PM_PRESENT;
1414		page = vm_normal_page(vma, addr, pte);
1415		if (pte_soft_dirty(pte))
1416			flags |= PM_SOFT_DIRTY;
1417		if (pte_uffd_wp(pte))
1418			flags |= PM_UFFD_WP;
1419	} else if (is_swap_pte(pte)) {
1420		swp_entry_t entry;
1421		if (pte_swp_soft_dirty(pte))
1422			flags |= PM_SOFT_DIRTY;
1423		if (pte_swp_uffd_wp(pte))
1424			flags |= PM_UFFD_WP;
1425		entry = pte_to_swp_entry(pte);
1426		if (pm->show_pfn) {
1427			pgoff_t offset;
1428			/*
1429			 * For PFN swap offsets, keeping the offset field
1430			 * to be PFN only to be compatible with old smaps.
1431			 */
1432			if (is_pfn_swap_entry(entry))
1433				offset = swp_offset_pfn(entry);
1434			else
1435				offset = swp_offset(entry);
1436			frame = swp_type(entry) |
1437			    (offset << MAX_SWAPFILES_SHIFT);
1438		}
1439		flags |= PM_SWAP;
1440		migration = is_migration_entry(entry);
1441		if (is_pfn_swap_entry(entry))
1442			page = pfn_swap_entry_to_page(entry);
1443		if (pte_marker_entry_uffd_wp(entry))
1444			flags |= PM_UFFD_WP;
1445	}
1446
1447	if (page && !PageAnon(page))
1448		flags |= PM_FILE;
1449	if (page && !migration && page_mapcount(page) == 1)
1450		flags |= PM_MMAP_EXCLUSIVE;
1451	if (vma->vm_flags & VM_SOFTDIRTY)
1452		flags |= PM_SOFT_DIRTY;
1453
1454	return make_pme(frame, flags);
1455}
1456
1457static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
1458			     struct mm_walk *walk)
1459{
1460	struct vm_area_struct *vma = walk->vma;
1461	struct pagemapread *pm = walk->private;
1462	spinlock_t *ptl;
1463	pte_t *pte, *orig_pte;
1464	int err = 0;
 
1465#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1466	bool migration = false;
1467
1468	ptl = pmd_trans_huge_lock(pmdp, vma);
1469	if (ptl) {
1470		u64 flags = 0, frame = 0;
1471		pmd_t pmd = *pmdp;
1472		struct page *page = NULL;
1473
1474		if (vma->vm_flags & VM_SOFTDIRTY)
1475			flags |= PM_SOFT_DIRTY;
1476
1477		if (pmd_present(pmd)) {
1478			page = pmd_page(pmd);
1479
1480			flags |= PM_PRESENT;
1481			if (pmd_soft_dirty(pmd))
1482				flags |= PM_SOFT_DIRTY;
1483			if (pmd_uffd_wp(pmd))
1484				flags |= PM_UFFD_WP;
1485			if (pm->show_pfn)
1486				frame = pmd_pfn(pmd) +
1487					((addr & ~PMD_MASK) >> PAGE_SHIFT);
1488		}
1489#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1490		else if (is_swap_pmd(pmd)) {
1491			swp_entry_t entry = pmd_to_swp_entry(pmd);
1492			unsigned long offset;
1493
1494			if (pm->show_pfn) {
1495				if (is_pfn_swap_entry(entry))
1496					offset = swp_offset_pfn(entry);
1497				else
1498					offset = swp_offset(entry);
1499				offset = offset +
1500					((addr & ~PMD_MASK) >> PAGE_SHIFT);
1501				frame = swp_type(entry) |
1502					(offset << MAX_SWAPFILES_SHIFT);
1503			}
1504			flags |= PM_SWAP;
1505			if (pmd_swp_soft_dirty(pmd))
1506				flags |= PM_SOFT_DIRTY;
1507			if (pmd_swp_uffd_wp(pmd))
1508				flags |= PM_UFFD_WP;
1509			VM_BUG_ON(!is_pmd_migration_entry(pmd));
1510			migration = is_migration_entry(entry);
1511			page = pfn_swap_entry_to_page(entry);
1512		}
1513#endif
1514
1515		if (page && !migration && page_mapcount(page) == 1)
1516			flags |= PM_MMAP_EXCLUSIVE;
1517
1518		for (; addr != end; addr += PAGE_SIZE) {
1519			pagemap_entry_t pme = make_pme(frame, flags);
1520
1521			err = add_to_pagemap(&pme, pm);
1522			if (err)
1523				break;
1524			if (pm->show_pfn) {
1525				if (flags & PM_PRESENT)
1526					frame++;
1527				else if (flags & PM_SWAP)
1528					frame += (1 << MAX_SWAPFILES_SHIFT);
1529			}
1530		}
1531		spin_unlock(ptl);
1532		return err;
1533	}
 
 
 
1534#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1535
1536	/*
1537	 * We can assume that @vma always points to a valid one and @end never
1538	 * goes beyond vma->vm_end.
1539	 */
1540	orig_pte = pte = pte_offset_map_lock(walk->mm, pmdp, addr, &ptl);
1541	if (!pte) {
1542		walk->action = ACTION_AGAIN;
1543		return err;
1544	}
1545	for (; addr < end; pte++, addr += PAGE_SIZE) {
1546		pagemap_entry_t pme;
1547
1548		pme = pte_to_pagemap_entry(pm, vma, addr, ptep_get(pte));
1549		err = add_to_pagemap(&pme, pm);
1550		if (err)
1551			break;
1552	}
1553	pte_unmap_unlock(orig_pte, ptl);
1554
1555	cond_resched();
1556
1557	return err;
1558}
1559
1560#ifdef CONFIG_HUGETLB_PAGE
1561/* This function walks within one hugetlb entry in the single call */
1562static int pagemap_hugetlb_range(pte_t *ptep, unsigned long hmask,
1563				 unsigned long addr, unsigned long end,
1564				 struct mm_walk *walk)
1565{
1566	struct pagemapread *pm = walk->private;
1567	struct vm_area_struct *vma = walk->vma;
1568	u64 flags = 0, frame = 0;
1569	int err = 0;
1570	pte_t pte;
1571
1572	if (vma->vm_flags & VM_SOFTDIRTY)
1573		flags |= PM_SOFT_DIRTY;
1574
1575	pte = huge_ptep_get(ptep);
1576	if (pte_present(pte)) {
1577		struct page *page = pte_page(pte);
1578
1579		if (!PageAnon(page))
1580			flags |= PM_FILE;
1581
1582		if (page_mapcount(page) == 1)
1583			flags |= PM_MMAP_EXCLUSIVE;
1584
1585		if (huge_pte_uffd_wp(pte))
1586			flags |= PM_UFFD_WP;
1587
1588		flags |= PM_PRESENT;
1589		if (pm->show_pfn)
1590			frame = pte_pfn(pte) +
1591				((addr & ~hmask) >> PAGE_SHIFT);
1592	} else if (pte_swp_uffd_wp_any(pte)) {
1593		flags |= PM_UFFD_WP;
1594	}
1595
1596	for (; addr != end; addr += PAGE_SIZE) {
1597		pagemap_entry_t pme = make_pme(frame, flags);
1598
1599		err = add_to_pagemap(&pme, pm);
1600		if (err)
1601			return err;
1602		if (pm->show_pfn && (flags & PM_PRESENT))
1603			frame++;
1604	}
1605
1606	cond_resched();
1607
1608	return err;
1609}
1610#else
1611#define pagemap_hugetlb_range	NULL
1612#endif /* HUGETLB_PAGE */
1613
1614static const struct mm_walk_ops pagemap_ops = {
1615	.pmd_entry	= pagemap_pmd_range,
1616	.pte_hole	= pagemap_pte_hole,
1617	.hugetlb_entry	= pagemap_hugetlb_range,
1618	.walk_lock	= PGWALK_RDLOCK,
1619};
1620
1621/*
1622 * /proc/pid/pagemap - an array mapping virtual pages to pfns
1623 *
1624 * For each page in the address space, this file contains one 64-bit entry
1625 * consisting of the following:
1626 *
1627 * Bits 0-54  page frame number (PFN) if present
1628 * Bits 0-4   swap type if swapped
1629 * Bits 5-54  swap offset if swapped
1630 * Bit  55    pte is soft-dirty (see Documentation/admin-guide/mm/soft-dirty.rst)
1631 * Bit  56    page exclusively mapped
1632 * Bit  57    pte is uffd-wp write-protected
1633 * Bits 58-60 zero
1634 * Bit  61    page is file-page or shared-anon
1635 * Bit  62    page swapped
1636 * Bit  63    page present
1637 *
1638 * If the page is not present but in swap, then the PFN contains an
1639 * encoding of the swap file number and the page's offset into the
1640 * swap. Unmapped pages return a null PFN. This allows determining
1641 * precisely which pages are mapped (or in swap) and comparing mapped
1642 * pages between processes.
1643 *
1644 * Efficient users of this interface will use /proc/pid/maps to
1645 * determine which areas of memory are actually mapped and llseek to
1646 * skip over unmapped regions.
1647 */
1648static ssize_t pagemap_read(struct file *file, char __user *buf,
1649			    size_t count, loff_t *ppos)
1650{
1651	struct mm_struct *mm = file->private_data;
1652	struct pagemapread pm;
1653	unsigned long src;
1654	unsigned long svpfn;
1655	unsigned long start_vaddr;
1656	unsigned long end_vaddr;
1657	int ret = 0, copied = 0;
1658
1659	if (!mm || !mmget_not_zero(mm))
1660		goto out;
1661
1662	ret = -EINVAL;
1663	/* file position must be aligned */
1664	if ((*ppos % PM_ENTRY_BYTES) || (count % PM_ENTRY_BYTES))
1665		goto out_mm;
1666
1667	ret = 0;
1668	if (!count)
1669		goto out_mm;
1670
1671	/* do not disclose physical addresses: attack vector */
1672	pm.show_pfn = file_ns_capable(file, &init_user_ns, CAP_SYS_ADMIN);
1673
1674	pm.len = (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
1675	pm.buffer = kmalloc_array(pm.len, PM_ENTRY_BYTES, GFP_KERNEL);
1676	ret = -ENOMEM;
1677	if (!pm.buffer)
1678		goto out_mm;
1679
1680	src = *ppos;
1681	svpfn = src / PM_ENTRY_BYTES;
 
1682	end_vaddr = mm->task_size;
1683
1684	/* watch out for wraparound */
1685	start_vaddr = end_vaddr;
1686	if (svpfn <= (ULONG_MAX >> PAGE_SHIFT)) {
1687		unsigned long end;
1688
1689		ret = mmap_read_lock_killable(mm);
1690		if (ret)
1691			goto out_free;
1692		start_vaddr = untagged_addr_remote(mm, svpfn << PAGE_SHIFT);
1693		mmap_read_unlock(mm);
1694
1695		end = start_vaddr + ((count / PM_ENTRY_BYTES) << PAGE_SHIFT);
1696		if (end >= start_vaddr && end < mm->task_size)
1697			end_vaddr = end;
1698	}
1699
1700	/* Ensure the address is inside the task */
1701	if (start_vaddr > mm->task_size)
1702		start_vaddr = end_vaddr;
1703
 
 
 
 
 
 
1704	ret = 0;
1705	while (count && (start_vaddr < end_vaddr)) {
1706		int len;
1707		unsigned long end;
1708
1709		pm.pos = 0;
1710		end = (start_vaddr + PAGEMAP_WALK_SIZE) & PAGEMAP_WALK_MASK;
1711		/* overflow ? */
1712		if (end < start_vaddr || end > end_vaddr)
1713			end = end_vaddr;
1714		ret = mmap_read_lock_killable(mm);
1715		if (ret)
1716			goto out_free;
1717		ret = walk_page_range(mm, start_vaddr, end, &pagemap_ops, &pm);
1718		mmap_read_unlock(mm);
1719		start_vaddr = end;
1720
1721		len = min(count, PM_ENTRY_BYTES * pm.pos);
1722		if (copy_to_user(buf, pm.buffer, len)) {
1723			ret = -EFAULT;
1724			goto out_free;
1725		}
1726		copied += len;
1727		buf += len;
1728		count -= len;
1729	}
1730	*ppos += copied;
1731	if (!ret || ret == PM_END_OF_BUFFER)
1732		ret = copied;
1733
1734out_free:
1735	kfree(pm.buffer);
1736out_mm:
1737	mmput(mm);
1738out:
1739	return ret;
1740}
1741
1742static int pagemap_open(struct inode *inode, struct file *file)
1743{
1744	struct mm_struct *mm;
1745
1746	mm = proc_mem_open(inode, PTRACE_MODE_READ);
1747	if (IS_ERR(mm))
1748		return PTR_ERR(mm);
1749	file->private_data = mm;
1750	return 0;
1751}
1752
1753static int pagemap_release(struct inode *inode, struct file *file)
1754{
1755	struct mm_struct *mm = file->private_data;
1756
1757	if (mm)
1758		mmdrop(mm);
1759	return 0;
1760}
1761
1762#define PM_SCAN_CATEGORIES	(PAGE_IS_WPALLOWED | PAGE_IS_WRITTEN |	\
1763				 PAGE_IS_FILE |	PAGE_IS_PRESENT |	\
1764				 PAGE_IS_SWAPPED | PAGE_IS_PFNZERO |	\
1765				 PAGE_IS_HUGE | PAGE_IS_SOFT_DIRTY)
1766#define PM_SCAN_FLAGS		(PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC)
1767
1768struct pagemap_scan_private {
1769	struct pm_scan_arg arg;
1770	unsigned long masks_of_interest, cur_vma_category;
1771	struct page_region *vec_buf;
1772	unsigned long vec_buf_len, vec_buf_index, found_pages;
1773	struct page_region __user *vec_out;
1774};
1775
1776static unsigned long pagemap_page_category(struct pagemap_scan_private *p,
1777					   struct vm_area_struct *vma,
1778					   unsigned long addr, pte_t pte)
1779{
1780	unsigned long categories = 0;
1781
1782	if (pte_present(pte)) {
1783		struct page *page;
1784
1785		categories |= PAGE_IS_PRESENT;
1786		if (!pte_uffd_wp(pte))
1787			categories |= PAGE_IS_WRITTEN;
1788
1789		if (p->masks_of_interest & PAGE_IS_FILE) {
1790			page = vm_normal_page(vma, addr, pte);
1791			if (page && !PageAnon(page))
1792				categories |= PAGE_IS_FILE;
1793		}
1794
1795		if (is_zero_pfn(pte_pfn(pte)))
1796			categories |= PAGE_IS_PFNZERO;
1797		if (pte_soft_dirty(pte))
1798			categories |= PAGE_IS_SOFT_DIRTY;
1799	} else if (is_swap_pte(pte)) {
1800		swp_entry_t swp;
1801
1802		categories |= PAGE_IS_SWAPPED;
1803		if (!pte_swp_uffd_wp_any(pte))
1804			categories |= PAGE_IS_WRITTEN;
1805
1806		if (p->masks_of_interest & PAGE_IS_FILE) {
1807			swp = pte_to_swp_entry(pte);
1808			if (is_pfn_swap_entry(swp) &&
1809			    !folio_test_anon(pfn_swap_entry_folio(swp)))
1810				categories |= PAGE_IS_FILE;
1811		}
1812		if (pte_swp_soft_dirty(pte))
1813			categories |= PAGE_IS_SOFT_DIRTY;
1814	}
1815
1816	return categories;
1817}
1818
1819static void make_uffd_wp_pte(struct vm_area_struct *vma,
1820			     unsigned long addr, pte_t *pte, pte_t ptent)
1821{
1822	if (pte_present(ptent)) {
1823		pte_t old_pte;
1824
1825		old_pte = ptep_modify_prot_start(vma, addr, pte);
1826		ptent = pte_mkuffd_wp(old_pte);
1827		ptep_modify_prot_commit(vma, addr, pte, old_pte, ptent);
1828	} else if (is_swap_pte(ptent)) {
1829		ptent = pte_swp_mkuffd_wp(ptent);
1830		set_pte_at(vma->vm_mm, addr, pte, ptent);
1831	} else {
1832		set_pte_at(vma->vm_mm, addr, pte,
1833			   make_pte_marker(PTE_MARKER_UFFD_WP));
1834	}
1835}
1836
1837#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1838static unsigned long pagemap_thp_category(struct pagemap_scan_private *p,
1839					  struct vm_area_struct *vma,
1840					  unsigned long addr, pmd_t pmd)
1841{
1842	unsigned long categories = PAGE_IS_HUGE;
1843
1844	if (pmd_present(pmd)) {
1845		struct page *page;
1846
1847		categories |= PAGE_IS_PRESENT;
1848		if (!pmd_uffd_wp(pmd))
1849			categories |= PAGE_IS_WRITTEN;
1850
1851		if (p->masks_of_interest & PAGE_IS_FILE) {
1852			page = vm_normal_page_pmd(vma, addr, pmd);
1853			if (page && !PageAnon(page))
1854				categories |= PAGE_IS_FILE;
1855		}
1856
1857		if (is_zero_pfn(pmd_pfn(pmd)))
1858			categories |= PAGE_IS_PFNZERO;
1859		if (pmd_soft_dirty(pmd))
1860			categories |= PAGE_IS_SOFT_DIRTY;
1861	} else if (is_swap_pmd(pmd)) {
1862		swp_entry_t swp;
1863
1864		categories |= PAGE_IS_SWAPPED;
1865		if (!pmd_swp_uffd_wp(pmd))
1866			categories |= PAGE_IS_WRITTEN;
1867		if (pmd_swp_soft_dirty(pmd))
1868			categories |= PAGE_IS_SOFT_DIRTY;
1869
1870		if (p->masks_of_interest & PAGE_IS_FILE) {
1871			swp = pmd_to_swp_entry(pmd);
1872			if (is_pfn_swap_entry(swp) &&
1873			    !folio_test_anon(pfn_swap_entry_folio(swp)))
1874				categories |= PAGE_IS_FILE;
1875		}
1876	}
1877
1878	return categories;
1879}
1880
1881static void make_uffd_wp_pmd(struct vm_area_struct *vma,
1882			     unsigned long addr, pmd_t *pmdp)
1883{
1884	pmd_t old, pmd = *pmdp;
1885
1886	if (pmd_present(pmd)) {
1887		old = pmdp_invalidate_ad(vma, addr, pmdp);
1888		pmd = pmd_mkuffd_wp(old);
1889		set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
1890	} else if (is_migration_entry(pmd_to_swp_entry(pmd))) {
1891		pmd = pmd_swp_mkuffd_wp(pmd);
1892		set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
1893	}
1894}
1895#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1896
1897#ifdef CONFIG_HUGETLB_PAGE
1898static unsigned long pagemap_hugetlb_category(pte_t pte)
1899{
1900	unsigned long categories = PAGE_IS_HUGE;
1901
1902	/*
1903	 * According to pagemap_hugetlb_range(), file-backed HugeTLB
1904	 * page cannot be swapped. So PAGE_IS_FILE is not checked for
1905	 * swapped pages.
1906	 */
1907	if (pte_present(pte)) {
1908		categories |= PAGE_IS_PRESENT;
1909		if (!huge_pte_uffd_wp(pte))
1910			categories |= PAGE_IS_WRITTEN;
1911		if (!PageAnon(pte_page(pte)))
1912			categories |= PAGE_IS_FILE;
1913		if (is_zero_pfn(pte_pfn(pte)))
1914			categories |= PAGE_IS_PFNZERO;
1915		if (pte_soft_dirty(pte))
1916			categories |= PAGE_IS_SOFT_DIRTY;
1917	} else if (is_swap_pte(pte)) {
1918		categories |= PAGE_IS_SWAPPED;
1919		if (!pte_swp_uffd_wp_any(pte))
1920			categories |= PAGE_IS_WRITTEN;
1921		if (pte_swp_soft_dirty(pte))
1922			categories |= PAGE_IS_SOFT_DIRTY;
1923	}
1924
1925	return categories;
1926}
1927
1928static void make_uffd_wp_huge_pte(struct vm_area_struct *vma,
1929				  unsigned long addr, pte_t *ptep,
1930				  pte_t ptent)
1931{
1932	unsigned long psize;
1933
1934	if (is_hugetlb_entry_hwpoisoned(ptent) || is_pte_marker(ptent))
1935		return;
1936
1937	psize = huge_page_size(hstate_vma(vma));
1938
1939	if (is_hugetlb_entry_migration(ptent))
1940		set_huge_pte_at(vma->vm_mm, addr, ptep,
1941				pte_swp_mkuffd_wp(ptent), psize);
1942	else if (!huge_pte_none(ptent))
1943		huge_ptep_modify_prot_commit(vma, addr, ptep, ptent,
1944					     huge_pte_mkuffd_wp(ptent));
1945	else
1946		set_huge_pte_at(vma->vm_mm, addr, ptep,
1947				make_pte_marker(PTE_MARKER_UFFD_WP), psize);
1948}
1949#endif /* CONFIG_HUGETLB_PAGE */
1950
1951#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
1952static void pagemap_scan_backout_range(struct pagemap_scan_private *p,
1953				       unsigned long addr, unsigned long end)
1954{
1955	struct page_region *cur_buf = &p->vec_buf[p->vec_buf_index];
1956
1957	if (cur_buf->start != addr)
1958		cur_buf->end = addr;
1959	else
1960		cur_buf->start = cur_buf->end = 0;
1961
1962	p->found_pages -= (end - addr) / PAGE_SIZE;
1963}
1964#endif
1965
1966static bool pagemap_scan_is_interesting_page(unsigned long categories,
1967					     const struct pagemap_scan_private *p)
1968{
1969	categories ^= p->arg.category_inverted;
1970	if ((categories & p->arg.category_mask) != p->arg.category_mask)
1971		return false;
1972	if (p->arg.category_anyof_mask && !(categories & p->arg.category_anyof_mask))
1973		return false;
1974
1975	return true;
1976}
1977
1978static bool pagemap_scan_is_interesting_vma(unsigned long categories,
1979					    const struct pagemap_scan_private *p)
1980{
1981	unsigned long required = p->arg.category_mask & PAGE_IS_WPALLOWED;
1982
1983	categories ^= p->arg.category_inverted;
1984	if ((categories & required) != required)
1985		return false;
1986
1987	return true;
1988}
1989
1990static int pagemap_scan_test_walk(unsigned long start, unsigned long end,
1991				  struct mm_walk *walk)
1992{
1993	struct pagemap_scan_private *p = walk->private;
1994	struct vm_area_struct *vma = walk->vma;
1995	unsigned long vma_category = 0;
1996	bool wp_allowed = userfaultfd_wp_async(vma) &&
1997	    userfaultfd_wp_use_markers(vma);
1998
1999	if (!wp_allowed) {
2000		/* User requested explicit failure over wp-async capability */
2001		if (p->arg.flags & PM_SCAN_CHECK_WPASYNC)
2002			return -EPERM;
2003		/*
2004		 * User requires wr-protect, and allows silently skipping
2005		 * unsupported vmas.
2006		 */
2007		if (p->arg.flags & PM_SCAN_WP_MATCHING)
2008			return 1;
2009		/*
2010		 * Then the request doesn't involve wr-protects at all,
2011		 * fall through to the rest checks, and allow vma walk.
2012		 */
2013	}
2014
2015	if (vma->vm_flags & VM_PFNMAP)
2016		return 1;
2017
2018	if (wp_allowed)
2019		vma_category |= PAGE_IS_WPALLOWED;
2020
2021	if (vma->vm_flags & VM_SOFTDIRTY)
2022		vma_category |= PAGE_IS_SOFT_DIRTY;
2023
2024	if (!pagemap_scan_is_interesting_vma(vma_category, p))
2025		return 1;
2026
2027	p->cur_vma_category = vma_category;
2028
2029	return 0;
2030}
2031
2032static bool pagemap_scan_push_range(unsigned long categories,
2033				    struct pagemap_scan_private *p,
2034				    unsigned long addr, unsigned long end)
2035{
2036	struct page_region *cur_buf = &p->vec_buf[p->vec_buf_index];
2037
2038	/*
2039	 * When there is no output buffer provided at all, the sentinel values
2040	 * won't match here. There is no other way for `cur_buf->end` to be
2041	 * non-zero other than it being non-empty.
2042	 */
2043	if (addr == cur_buf->end && categories == cur_buf->categories) {
2044		cur_buf->end = end;
2045		return true;
2046	}
2047
2048	if (cur_buf->end) {
2049		if (p->vec_buf_index >= p->vec_buf_len - 1)
2050			return false;
2051
2052		cur_buf = &p->vec_buf[++p->vec_buf_index];
2053	}
2054
2055	cur_buf->start = addr;
2056	cur_buf->end = end;
2057	cur_buf->categories = categories;
2058
2059	return true;
2060}
2061
2062static int pagemap_scan_output(unsigned long categories,
2063			       struct pagemap_scan_private *p,
2064			       unsigned long addr, unsigned long *end)
2065{
2066	unsigned long n_pages, total_pages;
2067	int ret = 0;
2068
2069	if (!p->vec_buf)
2070		return 0;
2071
2072	categories &= p->arg.return_mask;
2073
2074	n_pages = (*end - addr) / PAGE_SIZE;
2075	if (check_add_overflow(p->found_pages, n_pages, &total_pages) ||
2076	    total_pages > p->arg.max_pages) {
2077		size_t n_too_much = total_pages - p->arg.max_pages;
2078		*end -= n_too_much * PAGE_SIZE;
2079		n_pages -= n_too_much;
2080		ret = -ENOSPC;
2081	}
2082
2083	if (!pagemap_scan_push_range(categories, p, addr, *end)) {
2084		*end = addr;
2085		n_pages = 0;
2086		ret = -ENOSPC;
2087	}
2088
2089	p->found_pages += n_pages;
2090	if (ret)
2091		p->arg.walk_end = *end;
2092
2093	return ret;
2094}
2095
2096static int pagemap_scan_thp_entry(pmd_t *pmd, unsigned long start,
2097				  unsigned long end, struct mm_walk *walk)
2098{
2099#ifdef CONFIG_TRANSPARENT_HUGEPAGE
2100	struct pagemap_scan_private *p = walk->private;
2101	struct vm_area_struct *vma = walk->vma;
2102	unsigned long categories;
2103	spinlock_t *ptl;
2104	int ret = 0;
2105
2106	ptl = pmd_trans_huge_lock(pmd, vma);
2107	if (!ptl)
2108		return -ENOENT;
2109
2110	categories = p->cur_vma_category |
2111		     pagemap_thp_category(p, vma, start, *pmd);
2112
2113	if (!pagemap_scan_is_interesting_page(categories, p))
2114		goto out_unlock;
2115
2116	ret = pagemap_scan_output(categories, p, start, &end);
2117	if (start == end)
2118		goto out_unlock;
2119
2120	if (~p->arg.flags & PM_SCAN_WP_MATCHING)
2121		goto out_unlock;
2122	if (~categories & PAGE_IS_WRITTEN)
2123		goto out_unlock;
2124
2125	/*
2126	 * Break huge page into small pages if the WP operation
2127	 * needs to be performed on a portion of the huge page.
2128	 */
2129	if (end != start + HPAGE_SIZE) {
2130		spin_unlock(ptl);
2131		split_huge_pmd(vma, pmd, start);
2132		pagemap_scan_backout_range(p, start, end);
2133		/* Report as if there was no THP */
2134		return -ENOENT;
2135	}
2136
2137	make_uffd_wp_pmd(vma, start, pmd);
2138	flush_tlb_range(vma, start, end);
2139out_unlock:
2140	spin_unlock(ptl);
2141	return ret;
2142#else /* !CONFIG_TRANSPARENT_HUGEPAGE */
2143	return -ENOENT;
2144#endif
2145}
2146
2147static int pagemap_scan_pmd_entry(pmd_t *pmd, unsigned long start,
2148				  unsigned long end, struct mm_walk *walk)
2149{
2150	struct pagemap_scan_private *p = walk->private;
2151	struct vm_area_struct *vma = walk->vma;
2152	unsigned long addr, flush_end = 0;
2153	pte_t *pte, *start_pte;
2154	spinlock_t *ptl;
2155	int ret;
2156
2157	arch_enter_lazy_mmu_mode();
2158
2159	ret = pagemap_scan_thp_entry(pmd, start, end, walk);
2160	if (ret != -ENOENT) {
2161		arch_leave_lazy_mmu_mode();
2162		return ret;
2163	}
2164
2165	ret = 0;
2166	start_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl);
2167	if (!pte) {
2168		arch_leave_lazy_mmu_mode();
2169		walk->action = ACTION_AGAIN;
2170		return 0;
2171	}
2172
2173	if ((p->arg.flags & PM_SCAN_WP_MATCHING) && !p->vec_out) {
2174		/* Fast path for performing exclusive WP */
2175		for (addr = start; addr != end; pte++, addr += PAGE_SIZE) {
2176			pte_t ptent = ptep_get(pte);
2177
2178			if ((pte_present(ptent) && pte_uffd_wp(ptent)) ||
2179			    pte_swp_uffd_wp_any(ptent))
2180				continue;
2181			make_uffd_wp_pte(vma, addr, pte, ptent);
2182			if (!flush_end)
2183				start = addr;
2184			flush_end = addr + PAGE_SIZE;
2185		}
2186		goto flush_and_return;
2187	}
2188
2189	if (!p->arg.category_anyof_mask && !p->arg.category_inverted &&
2190	    p->arg.category_mask == PAGE_IS_WRITTEN &&
2191	    p->arg.return_mask == PAGE_IS_WRITTEN) {
2192		for (addr = start; addr < end; pte++, addr += PAGE_SIZE) {
2193			unsigned long next = addr + PAGE_SIZE;
2194			pte_t ptent = ptep_get(pte);
2195
2196			if ((pte_present(ptent) && pte_uffd_wp(ptent)) ||
2197			    pte_swp_uffd_wp_any(ptent))
2198				continue;
2199			ret = pagemap_scan_output(p->cur_vma_category | PAGE_IS_WRITTEN,
2200						  p, addr, &next);
2201			if (next == addr)
2202				break;
2203			if (~p->arg.flags & PM_SCAN_WP_MATCHING)
2204				continue;
2205			make_uffd_wp_pte(vma, addr, pte, ptent);
2206			if (!flush_end)
2207				start = addr;
2208			flush_end = next;
2209		}
2210		goto flush_and_return;
2211	}
2212
2213	for (addr = start; addr != end; pte++, addr += PAGE_SIZE) {
2214		pte_t ptent = ptep_get(pte);
2215		unsigned long categories = p->cur_vma_category |
2216					   pagemap_page_category(p, vma, addr, ptent);
2217		unsigned long next = addr + PAGE_SIZE;
2218
2219		if (!pagemap_scan_is_interesting_page(categories, p))
2220			continue;
2221
2222		ret = pagemap_scan_output(categories, p, addr, &next);
2223		if (next == addr)
2224			break;
2225
2226		if (~p->arg.flags & PM_SCAN_WP_MATCHING)
2227			continue;
2228		if (~categories & PAGE_IS_WRITTEN)
2229			continue;
2230
2231		make_uffd_wp_pte(vma, addr, pte, ptent);
2232		if (!flush_end)
2233			start = addr;
2234		flush_end = next;
2235	}
2236
2237flush_and_return:
2238	if (flush_end)
2239		flush_tlb_range(vma, start, addr);
2240
2241	pte_unmap_unlock(start_pte, ptl);
2242	arch_leave_lazy_mmu_mode();
2243
2244	cond_resched();
2245	return ret;
2246}
2247
2248#ifdef CONFIG_HUGETLB_PAGE
2249static int pagemap_scan_hugetlb_entry(pte_t *ptep, unsigned long hmask,
2250				      unsigned long start, unsigned long end,
2251				      struct mm_walk *walk)
2252{
2253	struct pagemap_scan_private *p = walk->private;
2254	struct vm_area_struct *vma = walk->vma;
2255	unsigned long categories;
2256	spinlock_t *ptl;
2257	int ret = 0;
2258	pte_t pte;
2259
2260	if (~p->arg.flags & PM_SCAN_WP_MATCHING) {
2261		/* Go the short route when not write-protecting pages. */
2262
2263		pte = huge_ptep_get(ptep);
2264		categories = p->cur_vma_category | pagemap_hugetlb_category(pte);
2265
2266		if (!pagemap_scan_is_interesting_page(categories, p))
2267			return 0;
2268
2269		return pagemap_scan_output(categories, p, start, &end);
2270	}
2271
2272	i_mmap_lock_write(vma->vm_file->f_mapping);
2273	ptl = huge_pte_lock(hstate_vma(vma), vma->vm_mm, ptep);
2274
2275	pte = huge_ptep_get(ptep);
2276	categories = p->cur_vma_category | pagemap_hugetlb_category(pte);
2277
2278	if (!pagemap_scan_is_interesting_page(categories, p))
2279		goto out_unlock;
2280
2281	ret = pagemap_scan_output(categories, p, start, &end);
2282	if (start == end)
2283		goto out_unlock;
2284
2285	if (~categories & PAGE_IS_WRITTEN)
2286		goto out_unlock;
2287
2288	if (end != start + HPAGE_SIZE) {
2289		/* Partial HugeTLB page WP isn't possible. */
2290		pagemap_scan_backout_range(p, start, end);
2291		p->arg.walk_end = start;
2292		ret = 0;
2293		goto out_unlock;
2294	}
2295
2296	make_uffd_wp_huge_pte(vma, start, ptep, pte);
2297	flush_hugetlb_tlb_range(vma, start, end);
2298
2299out_unlock:
2300	spin_unlock(ptl);
2301	i_mmap_unlock_write(vma->vm_file->f_mapping);
2302
2303	return ret;
2304}
2305#else
2306#define pagemap_scan_hugetlb_entry NULL
2307#endif
2308
2309static int pagemap_scan_pte_hole(unsigned long addr, unsigned long end,
2310				 int depth, struct mm_walk *walk)
2311{
2312	struct pagemap_scan_private *p = walk->private;
2313	struct vm_area_struct *vma = walk->vma;
2314	int ret, err;
2315
2316	if (!vma || !pagemap_scan_is_interesting_page(p->cur_vma_category, p))
2317		return 0;
2318
2319	ret = pagemap_scan_output(p->cur_vma_category, p, addr, &end);
2320	if (addr == end)
2321		return ret;
2322
2323	if (~p->arg.flags & PM_SCAN_WP_MATCHING)
2324		return ret;
2325
2326	err = uffd_wp_range(vma, addr, end - addr, true);
2327	if (err < 0)
2328		ret = err;
2329
2330	return ret;
2331}
2332
2333static const struct mm_walk_ops pagemap_scan_ops = {
2334	.test_walk = pagemap_scan_test_walk,
2335	.pmd_entry = pagemap_scan_pmd_entry,
2336	.pte_hole = pagemap_scan_pte_hole,
2337	.hugetlb_entry = pagemap_scan_hugetlb_entry,
2338};
2339
2340static int pagemap_scan_get_args(struct pm_scan_arg *arg,
2341				 unsigned long uarg)
2342{
2343	if (copy_from_user(arg, (void __user *)uarg, sizeof(*arg)))
2344		return -EFAULT;
2345
2346	if (arg->size != sizeof(struct pm_scan_arg))
2347		return -EINVAL;
2348
2349	/* Validate requested features */
2350	if (arg->flags & ~PM_SCAN_FLAGS)
2351		return -EINVAL;
2352	if ((arg->category_inverted | arg->category_mask |
2353	     arg->category_anyof_mask | arg->return_mask) & ~PM_SCAN_CATEGORIES)
2354		return -EINVAL;
2355
2356	arg->start = untagged_addr((unsigned long)arg->start);
2357	arg->end = untagged_addr((unsigned long)arg->end);
2358	arg->vec = untagged_addr((unsigned long)arg->vec);
2359
2360	/* Validate memory pointers */
2361	if (!IS_ALIGNED(arg->start, PAGE_SIZE))
2362		return -EINVAL;
2363	if (!access_ok((void __user *)(long)arg->start, arg->end - arg->start))
2364		return -EFAULT;
2365	if (!arg->vec && arg->vec_len)
2366		return -EINVAL;
2367	if (arg->vec && !access_ok((void __user *)(long)arg->vec,
2368			      arg->vec_len * sizeof(struct page_region)))
2369		return -EFAULT;
2370
2371	/* Fixup default values */
2372	arg->end = ALIGN(arg->end, PAGE_SIZE);
2373	arg->walk_end = 0;
2374	if (!arg->max_pages)
2375		arg->max_pages = ULONG_MAX;
2376
2377	return 0;
2378}
2379
2380static int pagemap_scan_writeback_args(struct pm_scan_arg *arg,
2381				       unsigned long uargl)
2382{
2383	struct pm_scan_arg __user *uarg	= (void __user *)uargl;
2384
2385	if (copy_to_user(&uarg->walk_end, &arg->walk_end, sizeof(arg->walk_end)))
2386		return -EFAULT;
2387
2388	return 0;
2389}
2390
2391static int pagemap_scan_init_bounce_buffer(struct pagemap_scan_private *p)
2392{
2393	if (!p->arg.vec_len)
2394		return 0;
2395
2396	p->vec_buf_len = min_t(size_t, PAGEMAP_WALK_SIZE >> PAGE_SHIFT,
2397			       p->arg.vec_len);
2398	p->vec_buf = kmalloc_array(p->vec_buf_len, sizeof(*p->vec_buf),
2399				   GFP_KERNEL);
2400	if (!p->vec_buf)
2401		return -ENOMEM;
2402
2403	p->vec_buf->start = p->vec_buf->end = 0;
2404	p->vec_out = (struct page_region __user *)(long)p->arg.vec;
2405
2406	return 0;
2407}
2408
2409static long pagemap_scan_flush_buffer(struct pagemap_scan_private *p)
2410{
2411	const struct page_region *buf = p->vec_buf;
2412	long n = p->vec_buf_index;
2413
2414	if (!p->vec_buf)
2415		return 0;
2416
2417	if (buf[n].end != buf[n].start)
2418		n++;
2419
2420	if (!n)
2421		return 0;
2422
2423	if (copy_to_user(p->vec_out, buf, n * sizeof(*buf)))
2424		return -EFAULT;
2425
2426	p->arg.vec_len -= n;
2427	p->vec_out += n;
2428
2429	p->vec_buf_index = 0;
2430	p->vec_buf_len = min_t(size_t, p->vec_buf_len, p->arg.vec_len);
2431	p->vec_buf->start = p->vec_buf->end = 0;
2432
2433	return n;
2434}
2435
2436static long do_pagemap_scan(struct mm_struct *mm, unsigned long uarg)
2437{
2438	struct pagemap_scan_private p = {0};
2439	unsigned long walk_start;
2440	size_t n_ranges_out = 0;
2441	int ret;
2442
2443	ret = pagemap_scan_get_args(&p.arg, uarg);
2444	if (ret)
2445		return ret;
2446
2447	p.masks_of_interest = p.arg.category_mask | p.arg.category_anyof_mask |
2448			      p.arg.return_mask;
2449	ret = pagemap_scan_init_bounce_buffer(&p);
2450	if (ret)
2451		return ret;
2452
2453	for (walk_start = p.arg.start; walk_start < p.arg.end;
2454			walk_start = p.arg.walk_end) {
2455		struct mmu_notifier_range range;
2456		long n_out;
2457
2458		if (fatal_signal_pending(current)) {
2459			ret = -EINTR;
2460			break;
2461		}
2462
2463		ret = mmap_read_lock_killable(mm);
2464		if (ret)
2465			break;
2466
2467		/* Protection change for the range is going to happen. */
2468		if (p.arg.flags & PM_SCAN_WP_MATCHING) {
2469			mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_VMA, 0,
2470						mm, walk_start, p.arg.end);
2471			mmu_notifier_invalidate_range_start(&range);
2472		}
2473
2474		ret = walk_page_range(mm, walk_start, p.arg.end,
2475				      &pagemap_scan_ops, &p);
2476
2477		if (p.arg.flags & PM_SCAN_WP_MATCHING)
2478			mmu_notifier_invalidate_range_end(&range);
2479
2480		mmap_read_unlock(mm);
2481
2482		n_out = pagemap_scan_flush_buffer(&p);
2483		if (n_out < 0)
2484			ret = n_out;
2485		else
2486			n_ranges_out += n_out;
2487
2488		if (ret != -ENOSPC)
2489			break;
2490
2491		if (p.arg.vec_len == 0 || p.found_pages == p.arg.max_pages)
2492			break;
2493	}
2494
2495	/* ENOSPC signifies early stop (buffer full) from the walk. */
2496	if (!ret || ret == -ENOSPC)
2497		ret = n_ranges_out;
2498
2499	/* The walk_end isn't set when ret is zero */
2500	if (!p.arg.walk_end)
2501		p.arg.walk_end = p.arg.end;
2502	if (pagemap_scan_writeback_args(&p.arg, uarg))
2503		ret = -EFAULT;
2504
2505	kfree(p.vec_buf);
2506	return ret;
2507}
2508
2509static long do_pagemap_cmd(struct file *file, unsigned int cmd,
2510			   unsigned long arg)
2511{
2512	struct mm_struct *mm = file->private_data;
2513
2514	switch (cmd) {
2515	case PAGEMAP_SCAN:
2516		return do_pagemap_scan(mm, arg);
2517
2518	default:
2519		return -EINVAL;
2520	}
2521}
2522
2523const struct file_operations proc_pagemap_operations = {
2524	.llseek		= mem_lseek, /* borrow this */
2525	.read		= pagemap_read,
2526	.open		= pagemap_open,
2527	.release	= pagemap_release,
2528	.unlocked_ioctl = do_pagemap_cmd,
2529	.compat_ioctl	= do_pagemap_cmd,
2530};
2531#endif /* CONFIG_PROC_PAGE_MONITOR */
2532
2533#ifdef CONFIG_NUMA
2534
2535struct numa_maps {
2536	unsigned long pages;
2537	unsigned long anon;
2538	unsigned long active;
2539	unsigned long writeback;
2540	unsigned long mapcount_max;
2541	unsigned long dirty;
2542	unsigned long swapcache;
2543	unsigned long node[MAX_NUMNODES];
2544};
2545
2546struct numa_maps_private {
2547	struct proc_maps_private proc_maps;
2548	struct numa_maps md;
2549};
2550
2551static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty,
2552			unsigned long nr_pages)
2553{
2554	int count = page_mapcount(page);
2555
2556	md->pages += nr_pages;
2557	if (pte_dirty || PageDirty(page))
2558		md->dirty += nr_pages;
2559
2560	if (PageSwapCache(page))
2561		md->swapcache += nr_pages;
2562
2563	if (PageActive(page) || PageUnevictable(page))
2564		md->active += nr_pages;
2565
2566	if (PageWriteback(page))
2567		md->writeback += nr_pages;
2568
2569	if (PageAnon(page))
2570		md->anon += nr_pages;
2571
2572	if (count > md->mapcount_max)
2573		md->mapcount_max = count;
2574
2575	md->node[page_to_nid(page)] += nr_pages;
2576}
2577
2578static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma,
2579		unsigned long addr)
2580{
2581	struct page *page;
2582	int nid;
2583
2584	if (!pte_present(pte))
2585		return NULL;
2586
2587	page = vm_normal_page(vma, addr, pte);
2588	if (!page || is_zone_device_page(page))
2589		return NULL;
2590
2591	if (PageReserved(page))
2592		return NULL;
2593
2594	nid = page_to_nid(page);
2595	if (!node_isset(nid, node_states[N_MEMORY]))
2596		return NULL;
2597
2598	return page;
2599}
2600
2601#ifdef CONFIG_TRANSPARENT_HUGEPAGE
2602static struct page *can_gather_numa_stats_pmd(pmd_t pmd,
2603					      struct vm_area_struct *vma,
2604					      unsigned long addr)
2605{
2606	struct page *page;
2607	int nid;
2608
2609	if (!pmd_present(pmd))
2610		return NULL;
2611
2612	page = vm_normal_page_pmd(vma, addr, pmd);
2613	if (!page)
2614		return NULL;
2615
2616	if (PageReserved(page))
2617		return NULL;
2618
2619	nid = page_to_nid(page);
2620	if (!node_isset(nid, node_states[N_MEMORY]))
2621		return NULL;
2622
2623	return page;
2624}
2625#endif
2626
2627static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
2628		unsigned long end, struct mm_walk *walk)
2629{
2630	struct numa_maps *md = walk->private;
2631	struct vm_area_struct *vma = walk->vma;
2632	spinlock_t *ptl;
2633	pte_t *orig_pte;
2634	pte_t *pte;
2635
2636#ifdef CONFIG_TRANSPARENT_HUGEPAGE
2637	ptl = pmd_trans_huge_lock(pmd, vma);
2638	if (ptl) {
2639		struct page *page;
2640
2641		page = can_gather_numa_stats_pmd(*pmd, vma, addr);
2642		if (page)
2643			gather_stats(page, md, pmd_dirty(*pmd),
2644				     HPAGE_PMD_SIZE/PAGE_SIZE);
2645		spin_unlock(ptl);
2646		return 0;
2647	}
 
 
 
2648#endif
2649	orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
2650	if (!pte) {
2651		walk->action = ACTION_AGAIN;
2652		return 0;
2653	}
2654	do {
2655		pte_t ptent = ptep_get(pte);
2656		struct page *page = can_gather_numa_stats(ptent, vma, addr);
2657		if (!page)
2658			continue;
2659		gather_stats(page, md, pte_dirty(ptent), 1);
2660
2661	} while (pte++, addr += PAGE_SIZE, addr != end);
2662	pte_unmap_unlock(orig_pte, ptl);
2663	cond_resched();
2664	return 0;
2665}
2666#ifdef CONFIG_HUGETLB_PAGE
2667static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
2668		unsigned long addr, unsigned long end, struct mm_walk *walk)
2669{
2670	pte_t huge_pte = huge_ptep_get(pte);
2671	struct numa_maps *md;
2672	struct page *page;
2673
2674	if (!pte_present(huge_pte))
2675		return 0;
2676
2677	page = pte_page(huge_pte);
 
 
2678
2679	md = walk->private;
2680	gather_stats(page, md, pte_dirty(huge_pte), 1);
2681	return 0;
2682}
2683
2684#else
2685static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
2686		unsigned long addr, unsigned long end, struct mm_walk *walk)
2687{
2688	return 0;
2689}
2690#endif
2691
2692static const struct mm_walk_ops show_numa_ops = {
2693	.hugetlb_entry = gather_hugetlb_stats,
2694	.pmd_entry = gather_pte_stats,
2695	.walk_lock = PGWALK_RDLOCK,
2696};
2697
2698/*
2699 * Display pages allocated per node and memory policy via /proc.
2700 */
2701static int show_numa_map(struct seq_file *m, void *v)
2702{
2703	struct numa_maps_private *numa_priv = m->private;
2704	struct proc_maps_private *proc_priv = &numa_priv->proc_maps;
2705	struct vm_area_struct *vma = v;
2706	struct numa_maps *md = &numa_priv->md;
2707	struct file *file = vma->vm_file;
2708	struct mm_struct *mm = vma->vm_mm;
 
2709	char buffer[64];
2710	struct mempolicy *pol;
2711	pgoff_t ilx;
2712	int nid;
2713
2714	if (!mm)
2715		return 0;
2716
2717	/* Ensure we start with an empty set of numa_maps statistics. */
2718	memset(md, 0, sizeof(*md));
2719
2720	pol = __get_vma_policy(vma, vma->vm_start, &ilx);
2721	if (pol) {
2722		mpol_to_str(buffer, sizeof(buffer), pol);
2723		mpol_cond_put(pol);
2724	} else {
2725		mpol_to_str(buffer, sizeof(buffer), proc_priv->task_mempolicy);
2726	}
2727
2728	seq_printf(m, "%08lx %s", vma->vm_start, buffer);
2729
2730	if (file) {
2731		seq_puts(m, " file=");
2732		seq_path(m, file_user_path(file), "\n\t= ");
2733	} else if (vma_is_initial_heap(vma)) {
2734		seq_puts(m, " heap");
2735	} else if (vma_is_initial_stack(vma)) {
2736		seq_puts(m, " stack");
2737	}
2738
2739	if (is_vm_hugetlb_page(vma))
2740		seq_puts(m, " huge");
2741
2742	/* mmap_lock is held by m_start */
2743	walk_page_vma(vma, &show_numa_ops, md);
2744
2745	if (!md->pages)
2746		goto out;
2747
2748	if (md->anon)
2749		seq_printf(m, " anon=%lu", md->anon);
2750
2751	if (md->dirty)
2752		seq_printf(m, " dirty=%lu", md->dirty);
2753
2754	if (md->pages != md->anon && md->pages != md->dirty)
2755		seq_printf(m, " mapped=%lu", md->pages);
2756
2757	if (md->mapcount_max > 1)
2758		seq_printf(m, " mapmax=%lu", md->mapcount_max);
2759
2760	if (md->swapcache)
2761		seq_printf(m, " swapcache=%lu", md->swapcache);
2762
2763	if (md->active < md->pages && !is_vm_hugetlb_page(vma))
2764		seq_printf(m, " active=%lu", md->active);
2765
2766	if (md->writeback)
2767		seq_printf(m, " writeback=%lu", md->writeback);
2768
2769	for_each_node_state(nid, N_MEMORY)
2770		if (md->node[nid])
2771			seq_printf(m, " N%d=%lu", nid, md->node[nid]);
2772
2773	seq_printf(m, " kernelpagesize_kB=%lu", vma_kernel_pagesize(vma) >> 10);
2774out:
2775	seq_putc(m, '\n');
2776	return 0;
2777}
2778
2779static const struct seq_operations proc_pid_numa_maps_op = {
2780	.start  = m_start,
2781	.next   = m_next,
2782	.stop   = m_stop,
2783	.show   = show_numa_map,
2784};
2785
2786static int pid_numa_maps_open(struct inode *inode, struct file *file)
2787{
2788	return proc_maps_open(inode, file, &proc_pid_numa_maps_op,
2789				sizeof(struct numa_maps_private));
2790}
2791
2792const struct file_operations proc_pid_numa_maps_operations = {
2793	.open		= pid_numa_maps_open,
2794	.read		= seq_read,
2795	.llseek		= seq_lseek,
2796	.release	= proc_map_release,
2797};
2798
2799#endif /* CONFIG_NUMA */