Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2#include <linux/pagewalk.h>
   3#include <linux/mm_inline.h>
   4#include <linux/hugetlb.h>
   5#include <linux/huge_mm.h>
   6#include <linux/mount.h>
   7#include <linux/seq_file.h>
   8#include <linux/highmem.h>
   9#include <linux/ptrace.h>
  10#include <linux/slab.h>
  11#include <linux/pagemap.h>
  12#include <linux/mempolicy.h>
  13#include <linux/rmap.h>
  14#include <linux/swap.h>
  15#include <linux/sched/mm.h>
  16#include <linux/swapops.h>
  17#include <linux/mmu_notifier.h>
  18#include <linux/page_idle.h>
  19#include <linux/shmem_fs.h>
  20#include <linux/uaccess.h>
  21#include <linux/pkeys.h>
  22
  23#include <asm/elf.h>
  24#include <asm/tlb.h>
  25#include <asm/tlbflush.h>
  26#include "internal.h"
  27
  28#define SEQ_PUT_DEC(str, val) \
  29		seq_put_decimal_ull_width(m, str, (val) << (PAGE_SHIFT-10), 8)
  30void task_mem(struct seq_file *m, struct mm_struct *mm)
  31{
  32	unsigned long text, lib, swap, anon, file, shmem;
  33	unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;
  34
  35	anon = get_mm_counter(mm, MM_ANONPAGES);
  36	file = get_mm_counter(mm, MM_FILEPAGES);
  37	shmem = get_mm_counter(mm, MM_SHMEMPAGES);
  38
  39	/*
  40	 * Note: to minimize their overhead, mm maintains hiwater_vm and
  41	 * hiwater_rss only when about to *lower* total_vm or rss.  Any
  42	 * collector of these hiwater stats must therefore get total_vm
  43	 * and rss too, which will usually be the higher.  Barriers? not
  44	 * worth the effort, such snapshots can always be inconsistent.
  45	 */
  46	hiwater_vm = total_vm = mm->total_vm;
  47	if (hiwater_vm < mm->hiwater_vm)
  48		hiwater_vm = mm->hiwater_vm;
  49	hiwater_rss = total_rss = anon + file + shmem;
  50	if (hiwater_rss < mm->hiwater_rss)
  51		hiwater_rss = mm->hiwater_rss;
  52
  53	/* split executable areas between text and lib */
  54	text = PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK);
  55	text = min(text, mm->exec_vm << PAGE_SHIFT);
  56	lib = (mm->exec_vm << PAGE_SHIFT) - text;
  57
  58	swap = get_mm_counter(mm, MM_SWAPENTS);
  59	SEQ_PUT_DEC("VmPeak:\t", hiwater_vm);
  60	SEQ_PUT_DEC(" kB\nVmSize:\t", total_vm);
  61	SEQ_PUT_DEC(" kB\nVmLck:\t", mm->locked_vm);
  62	SEQ_PUT_DEC(" kB\nVmPin:\t", atomic64_read(&mm->pinned_vm));
  63	SEQ_PUT_DEC(" kB\nVmHWM:\t", hiwater_rss);
  64	SEQ_PUT_DEC(" kB\nVmRSS:\t", total_rss);
  65	SEQ_PUT_DEC(" kB\nRssAnon:\t", anon);
  66	SEQ_PUT_DEC(" kB\nRssFile:\t", file);
  67	SEQ_PUT_DEC(" kB\nRssShmem:\t", shmem);
  68	SEQ_PUT_DEC(" kB\nVmData:\t", mm->data_vm);
  69	SEQ_PUT_DEC(" kB\nVmStk:\t", mm->stack_vm);
  70	seq_put_decimal_ull_width(m,
  71		    " kB\nVmExe:\t", text >> 10, 8);
  72	seq_put_decimal_ull_width(m,
  73		    " kB\nVmLib:\t", lib >> 10, 8);
  74	seq_put_decimal_ull_width(m,
  75		    " kB\nVmPTE:\t", mm_pgtables_bytes(mm) >> 10, 8);
  76	SEQ_PUT_DEC(" kB\nVmSwap:\t", swap);
  77	seq_puts(m, " kB\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  78	hugetlb_report_usage(m, mm);
  79}
  80#undef SEQ_PUT_DEC
  81
  82unsigned long task_vsize(struct mm_struct *mm)
  83{
  84	return PAGE_SIZE * mm->total_vm;
  85}
  86
  87unsigned long task_statm(struct mm_struct *mm,
  88			 unsigned long *shared, unsigned long *text,
  89			 unsigned long *data, unsigned long *resident)
  90{
  91	*shared = get_mm_counter(mm, MM_FILEPAGES) +
  92			get_mm_counter(mm, MM_SHMEMPAGES);
  93	*text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
  94								>> PAGE_SHIFT;
  95	*data = mm->data_vm + mm->stack_vm;
  96	*resident = *shared + get_mm_counter(mm, MM_ANONPAGES);
  97	return mm->total_vm;
  98}
  99
 100#ifdef CONFIG_NUMA
 101/*
 102 * Save get_task_policy() for show_numa_map().
 103 */
 104static void hold_task_mempolicy(struct proc_maps_private *priv)
 105{
 106	struct task_struct *task = priv->task;
 107
 108	task_lock(task);
 109	priv->task_mempolicy = get_task_policy(task);
 110	mpol_get(priv->task_mempolicy);
 111	task_unlock(task);
 112}
 113static void release_task_mempolicy(struct proc_maps_private *priv)
 114{
 115	mpol_put(priv->task_mempolicy);
 116}
 117#else
 118static void hold_task_mempolicy(struct proc_maps_private *priv)
 119{
 120}
 121static void release_task_mempolicy(struct proc_maps_private *priv)
 122{
 123}
 124#endif
 125
 126static struct vm_area_struct *proc_get_vma(struct proc_maps_private *priv,
 127						loff_t *ppos)
 128{
 129	struct vm_area_struct *vma = vma_next(&priv->iter);
 130
 131	if (vma) {
 132		*ppos = vma->vm_start;
 133	} else {
 134		*ppos = -2UL;
 135		vma = get_gate_vma(priv->mm);
 136	}
 
 
 
 
 
 
 137
 138	return vma;
 
 
 
 139}
 140
 141static void *m_start(struct seq_file *m, loff_t *ppos)
 142{
 143	struct proc_maps_private *priv = m->private;
 144	unsigned long last_addr = *ppos;
 145	struct mm_struct *mm;
 
 
 146
 147	/* See m_next(). Zero at the start or after lseek. */
 148	if (last_addr == -1UL)
 149		return NULL;
 150
 151	priv->task = get_proc_task(priv->inode);
 152	if (!priv->task)
 153		return ERR_PTR(-ESRCH);
 154
 155	mm = priv->mm;
 156	if (!mm || !mmget_not_zero(mm)) {
 157		put_task_struct(priv->task);
 158		priv->task = NULL;
 159		return NULL;
 
 
 
 
 
 
 
 
 
 
 
 160	}
 161
 162	if (mmap_read_lock_killable(mm)) {
 163		mmput(mm);
 164		put_task_struct(priv->task);
 165		priv->task = NULL;
 166		return ERR_PTR(-EINTR);
 
 
 167	}
 168
 169	vma_iter_init(&priv->iter, mm, last_addr);
 170	hold_task_mempolicy(priv);
 171	if (last_addr == -2UL)
 172		return get_gate_vma(mm);
 173
 174	return proc_get_vma(priv, ppos);
 
 175}
 176
 177static void *m_next(struct seq_file *m, void *v, loff_t *ppos)
 178{
 179	if (*ppos == -2UL) {
 180		*ppos = -1UL;
 181		return NULL;
 182	}
 183	return proc_get_vma(m->private, ppos);
 
 
 
 184}
 185
 186static void m_stop(struct seq_file *m, void *v)
 187{
 188	struct proc_maps_private *priv = m->private;
 189	struct mm_struct *mm = priv->mm;
 190
 191	if (!priv->task)
 192		return;
 193
 194	release_task_mempolicy(priv);
 195	mmap_read_unlock(mm);
 196	mmput(mm);
 197	put_task_struct(priv->task);
 198	priv->task = NULL;
 199}
 200
 201static int proc_maps_open(struct inode *inode, struct file *file,
 202			const struct seq_operations *ops, int psize)
 203{
 204	struct proc_maps_private *priv = __seq_open_private(file, ops, psize);
 205
 206	if (!priv)
 207		return -ENOMEM;
 208
 209	priv->inode = inode;
 210	priv->mm = proc_mem_open(inode, PTRACE_MODE_READ);
 211	if (IS_ERR(priv->mm)) {
 212		int err = PTR_ERR(priv->mm);
 213
 214		seq_release_private(inode, file);
 215		return err;
 216	}
 217
 218	return 0;
 219}
 220
 221static int proc_map_release(struct inode *inode, struct file *file)
 222{
 223	struct seq_file *seq = file->private_data;
 224	struct proc_maps_private *priv = seq->private;
 225
 226	if (priv->mm)
 227		mmdrop(priv->mm);
 228
 229	return seq_release_private(inode, file);
 230}
 231
 232static int do_maps_open(struct inode *inode, struct file *file,
 233			const struct seq_operations *ops)
 234{
 235	return proc_maps_open(inode, file, ops,
 236				sizeof(struct proc_maps_private));
 237}
 238
 239/*
 240 * Indicate if the VMA is a stack for the given task; for
 241 * /proc/PID/maps that is the stack of the main task.
 242 */
 243static int is_stack(struct vm_area_struct *vma)
 
 244{
 245	/*
 246	 * We make no effort to guess what a given thread considers to be
 247	 * its "stack".  It's not even well-defined for programs written
 248	 * languages like Go.
 249	 */
 250	return vma->vm_start <= vma->vm_mm->start_stack &&
 251		vma->vm_end >= vma->vm_mm->start_stack;
 252}
 253
 254static void show_vma_header_prefix(struct seq_file *m,
 255				   unsigned long start, unsigned long end,
 256				   vm_flags_t flags, unsigned long long pgoff,
 257				   dev_t dev, unsigned long ino)
 258{
 259	seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
 260	seq_put_hex_ll(m, NULL, start, 8);
 261	seq_put_hex_ll(m, "-", end, 8);
 262	seq_putc(m, ' ');
 263	seq_putc(m, flags & VM_READ ? 'r' : '-');
 264	seq_putc(m, flags & VM_WRITE ? 'w' : '-');
 265	seq_putc(m, flags & VM_EXEC ? 'x' : '-');
 266	seq_putc(m, flags & VM_MAYSHARE ? 's' : 'p');
 267	seq_put_hex_ll(m, " ", pgoff, 8);
 268	seq_put_hex_ll(m, " ", MAJOR(dev), 2);
 269	seq_put_hex_ll(m, ":", MINOR(dev), 2);
 270	seq_put_decimal_ull(m, " ", ino);
 271	seq_putc(m, ' ');
 272}
 273
 274static void
 275show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
 276{
 277	struct anon_vma_name *anon_name = NULL;
 278	struct mm_struct *mm = vma->vm_mm;
 279	struct file *file = vma->vm_file;
 
 280	vm_flags_t flags = vma->vm_flags;
 281	unsigned long ino = 0;
 282	unsigned long long pgoff = 0;
 283	unsigned long start, end;
 284	dev_t dev = 0;
 285	const char *name = NULL;
 286
 287	if (file) {
 288		struct inode *inode = file_inode(vma->vm_file);
 289		dev = inode->i_sb->s_dev;
 290		ino = inode->i_ino;
 291		pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
 292	}
 293
 
 294	start = vma->vm_start;
 
 
 295	end = vma->vm_end;
 296	show_vma_header_prefix(m, start, end, flags, pgoff, dev, ino);
 297	if (mm)
 298		anon_name = anon_vma_name(vma);
 
 
 
 
 
 
 
 
 
 
 299
 300	/*
 301	 * Print the dentry name for named mappings, and a
 302	 * special [heap] marker for the heap:
 303	 */
 304	if (file) {
 305		seq_pad(m, ' ');
 306		/*
 307		 * If user named this anon shared memory via
 308		 * prctl(PR_SET_VMA ..., use the provided name.
 309		 */
 310		if (anon_name)
 311			seq_printf(m, "[anon_shmem:%s]", anon_name->name);
 312		else
 313			seq_file_path(m, file, "\n");
 314		goto done;
 315	}
 316
 317	if (vma->vm_ops && vma->vm_ops->name) {
 318		name = vma->vm_ops->name(vma);
 319		if (name)
 320			goto done;
 321	}
 322
 323	name = arch_vma_name(vma);
 324	if (!name) {
 325		if (!mm) {
 326			name = "[vdso]";
 327			goto done;
 328		}
 329
 330		if (vma->vm_start <= mm->brk &&
 331		    vma->vm_end >= mm->start_brk) {
 332			name = "[heap]";
 333			goto done;
 334		}
 335
 336		if (is_stack(vma)) {
 337			name = "[stack]";
 338			goto done;
 339		}
 340
 341		if (anon_name) {
 342			seq_pad(m, ' ');
 343			seq_printf(m, "[anon:%s]", anon_name->name);
 344		}
 345	}
 346
 347done:
 348	if (name) {
 349		seq_pad(m, ' ');
 350		seq_puts(m, name);
 351	}
 352	seq_putc(m, '\n');
 353}
 354
 355static int show_map(struct seq_file *m, void *v)
 356{
 357	show_map_vma(m, v);
 
 358	return 0;
 359}
 360
 
 
 
 
 
 
 
 
 
 
 361static const struct seq_operations proc_pid_maps_op = {
 362	.start	= m_start,
 363	.next	= m_next,
 364	.stop	= m_stop,
 365	.show	= show_map
 
 
 
 
 
 
 
 366};
 367
 368static int pid_maps_open(struct inode *inode, struct file *file)
 369{
 370	return do_maps_open(inode, file, &proc_pid_maps_op);
 371}
 372
 
 
 
 
 
 373const struct file_operations proc_pid_maps_operations = {
 374	.open		= pid_maps_open,
 375	.read		= seq_read,
 376	.llseek		= seq_lseek,
 377	.release	= proc_map_release,
 378};
 379
 
 
 
 
 
 
 
 380/*
 381 * Proportional Set Size(PSS): my share of RSS.
 382 *
 383 * PSS of a process is the count of pages it has in memory, where each
 384 * page is divided by the number of processes sharing it.  So if a
 385 * process has 1000 pages all to itself, and 1000 shared with one other
 386 * process, its PSS will be 1500.
 387 *
 388 * To keep (accumulated) division errors low, we adopt a 64bit
 389 * fixed-point pss counter to minimize division errors. So (pss >>
 390 * PSS_SHIFT) would be the real byte count.
 391 *
 392 * A shift of 12 before division means (assuming 4K page size):
 393 * 	- 1M 3-user-pages add up to 8KB errors;
 394 * 	- supports mapcount up to 2^24, or 16M;
 395 * 	- supports PSS up to 2^52 bytes, or 4PB.
 396 */
 397#define PSS_SHIFT 12
 398
 399#ifdef CONFIG_PROC_PAGE_MONITOR
 400struct mem_size_stats {
 401	unsigned long resident;
 402	unsigned long shared_clean;
 403	unsigned long shared_dirty;
 404	unsigned long private_clean;
 405	unsigned long private_dirty;
 406	unsigned long referenced;
 407	unsigned long anonymous;
 408	unsigned long lazyfree;
 409	unsigned long anonymous_thp;
 410	unsigned long shmem_thp;
 411	unsigned long file_thp;
 412	unsigned long swap;
 413	unsigned long shared_hugetlb;
 414	unsigned long private_hugetlb;
 415	u64 pss;
 416	u64 pss_anon;
 417	u64 pss_file;
 418	u64 pss_shmem;
 419	u64 pss_dirty;
 420	u64 pss_locked;
 421	u64 swap_pss;
 
 422};
 423
 424static void smaps_page_accumulate(struct mem_size_stats *mss,
 425		struct page *page, unsigned long size, unsigned long pss,
 426		bool dirty, bool locked, bool private)
 427{
 428	mss->pss += pss;
 429
 430	if (PageAnon(page))
 431		mss->pss_anon += pss;
 432	else if (PageSwapBacked(page))
 433		mss->pss_shmem += pss;
 434	else
 435		mss->pss_file += pss;
 436
 437	if (locked)
 438		mss->pss_locked += pss;
 439
 440	if (dirty || PageDirty(page)) {
 441		mss->pss_dirty += pss;
 442		if (private)
 443			mss->private_dirty += size;
 444		else
 445			mss->shared_dirty += size;
 446	} else {
 447		if (private)
 448			mss->private_clean += size;
 449		else
 450			mss->shared_clean += size;
 451	}
 452}
 453
 454static void smaps_account(struct mem_size_stats *mss, struct page *page,
 455		bool compound, bool young, bool dirty, bool locked,
 456		bool migration)
 457{
 458	int i, nr = compound ? compound_nr(page) : 1;
 459	unsigned long size = nr * PAGE_SIZE;
 460
 461	/*
 462	 * First accumulate quantities that depend only on |size| and the type
 463	 * of the compound page.
 464	 */
 465	if (PageAnon(page)) {
 466		mss->anonymous += size;
 467		if (!PageSwapBacked(page) && !dirty && !PageDirty(page))
 468			mss->lazyfree += size;
 469	}
 470
 471	mss->resident += size;
 472	/* Accumulate the size in pages that have been accessed. */
 473	if (young || page_is_young(page) || PageReferenced(page))
 474		mss->referenced += size;
 475
 476	/*
 477	 * Then accumulate quantities that may depend on sharing, or that may
 478	 * differ page-by-page.
 479	 *
 480	 * page_count(page) == 1 guarantees the page is mapped exactly once.
 481	 * If any subpage of the compound page mapped with PTE it would elevate
 482	 * page_count().
 483	 *
 484	 * The page_mapcount() is called to get a snapshot of the mapcount.
 485	 * Without holding the page lock this snapshot can be slightly wrong as
 486	 * we cannot always read the mapcount atomically.  It is not safe to
 487	 * call page_mapcount() even with PTL held if the page is not mapped,
 488	 * especially for migration entries.  Treat regular migration entries
 489	 * as mapcount == 1.
 490	 */
 491	if ((page_count(page) == 1) || migration) {
 492		smaps_page_accumulate(mss, page, size, size << PSS_SHIFT, dirty,
 493			locked, true);
 
 
 
 494		return;
 495	}
 
 496	for (i = 0; i < nr; i++, page++) {
 497		int mapcount = page_mapcount(page);
 498		unsigned long pss = PAGE_SIZE << PSS_SHIFT;
 499		if (mapcount >= 2)
 500			pss /= mapcount;
 501		smaps_page_accumulate(mss, page, PAGE_SIZE, pss, dirty, locked,
 502				      mapcount < 2);
 
 
 
 
 
 
 
 
 
 503	}
 504}
 505
 506#ifdef CONFIG_SHMEM
 507static int smaps_pte_hole(unsigned long addr, unsigned long end,
 508			  __always_unused int depth, struct mm_walk *walk)
 509{
 510	struct mem_size_stats *mss = walk->private;
 511	struct vm_area_struct *vma = walk->vma;
 512
 513	mss->swap += shmem_partial_swap_usage(walk->vma->vm_file->f_mapping,
 514					      linear_page_index(vma, addr),
 515					      linear_page_index(vma, end));
 516
 517	return 0;
 518}
 519#else
 520#define smaps_pte_hole		NULL
 521#endif /* CONFIG_SHMEM */
 522
 523static void smaps_pte_hole_lookup(unsigned long addr, struct mm_walk *walk)
 524{
 525#ifdef CONFIG_SHMEM
 526	if (walk->ops->pte_hole) {
 527		/* depth is not used */
 528		smaps_pte_hole(addr, addr + PAGE_SIZE, 0, walk);
 529	}
 530#endif
 531}
 532
 533static void smaps_pte_entry(pte_t *pte, unsigned long addr,
 534		struct mm_walk *walk)
 535{
 536	struct mem_size_stats *mss = walk->private;
 537	struct vm_area_struct *vma = walk->vma;
 538	bool locked = !!(vma->vm_flags & VM_LOCKED);
 539	struct page *page = NULL;
 540	bool migration = false, young = false, dirty = false;
 541
 542	if (pte_present(*pte)) {
 543		page = vm_normal_page(vma, addr, *pte);
 544		young = pte_young(*pte);
 545		dirty = pte_dirty(*pte);
 546	} else if (is_swap_pte(*pte)) {
 547		swp_entry_t swpent = pte_to_swp_entry(*pte);
 548
 549		if (!non_swap_entry(swpent)) {
 550			int mapcount;
 551
 552			mss->swap += PAGE_SIZE;
 553			mapcount = swp_swapcount(swpent);
 554			if (mapcount >= 2) {
 555				u64 pss_delta = (u64)PAGE_SIZE << PSS_SHIFT;
 556
 557				do_div(pss_delta, mapcount);
 558				mss->swap_pss += pss_delta;
 559			} else {
 560				mss->swap_pss += (u64)PAGE_SIZE << PSS_SHIFT;
 561			}
 562		} else if (is_pfn_swap_entry(swpent)) {
 563			if (is_migration_entry(swpent))
 564				migration = true;
 565			page = pfn_swap_entry_to_page(swpent);
 566		}
 567	} else {
 568		smaps_pte_hole_lookup(addr, walk);
 
 
 
 
 
 
 
 569		return;
 570	}
 571
 572	if (!page)
 573		return;
 574
 575	smaps_account(mss, page, false, young, dirty, locked, migration);
 576}
 577
 578#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 579static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
 580		struct mm_walk *walk)
 581{
 582	struct mem_size_stats *mss = walk->private;
 583	struct vm_area_struct *vma = walk->vma;
 584	bool locked = !!(vma->vm_flags & VM_LOCKED);
 585	struct page *page = NULL;
 586	bool migration = false;
 587
 588	if (pmd_present(*pmd)) {
 589		/* FOLL_DUMP will return -EFAULT on huge zero page */
 590		page = follow_trans_huge_pmd(vma, addr, pmd, FOLL_DUMP);
 591	} else if (unlikely(thp_migration_supported() && is_swap_pmd(*pmd))) {
 592		swp_entry_t entry = pmd_to_swp_entry(*pmd);
 593
 594		if (is_migration_entry(entry)) {
 595			migration = true;
 596			page = pfn_swap_entry_to_page(entry);
 597		}
 598	}
 599	if (IS_ERR_OR_NULL(page))
 600		return;
 601	if (PageAnon(page))
 602		mss->anonymous_thp += HPAGE_PMD_SIZE;
 603	else if (PageSwapBacked(page))
 604		mss->shmem_thp += HPAGE_PMD_SIZE;
 605	else if (is_zone_device_page(page))
 606		/* pass */;
 607	else
 608		mss->file_thp += HPAGE_PMD_SIZE;
 609
 610	smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd),
 611		      locked, migration);
 612}
 613#else
 614static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
 615		struct mm_walk *walk)
 616{
 617}
 618#endif
 619
 620static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
 621			   struct mm_walk *walk)
 622{
 623	struct vm_area_struct *vma = walk->vma;
 624	pte_t *pte;
 625	spinlock_t *ptl;
 626
 627	ptl = pmd_trans_huge_lock(pmd, vma);
 628	if (ptl) {
 629		smaps_pmd_entry(pmd, addr, walk);
 630		spin_unlock(ptl);
 631		goto out;
 632	}
 633
 634	if (pmd_trans_unstable(pmd))
 635		goto out;
 636	/*
 637	 * The mmap_lock held all the way back in m_start() is what
 638	 * keeps khugepaged out of here and from collapsing things
 639	 * in here.
 640	 */
 641	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
 642	for (; addr != end; pte++, addr += PAGE_SIZE)
 643		smaps_pte_entry(pte, addr, walk);
 644	pte_unmap_unlock(pte - 1, ptl);
 645out:
 646	cond_resched();
 647	return 0;
 648}
 649
 650static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma)
 651{
 652	/*
 653	 * Don't forget to update Documentation/ on changes.
 654	 */
 655	static const char mnemonics[BITS_PER_LONG][2] = {
 656		/*
 657		 * In case if we meet a flag we don't know about.
 658		 */
 659		[0 ... (BITS_PER_LONG-1)] = "??",
 660
 661		[ilog2(VM_READ)]	= "rd",
 662		[ilog2(VM_WRITE)]	= "wr",
 663		[ilog2(VM_EXEC)]	= "ex",
 664		[ilog2(VM_SHARED)]	= "sh",
 665		[ilog2(VM_MAYREAD)]	= "mr",
 666		[ilog2(VM_MAYWRITE)]	= "mw",
 667		[ilog2(VM_MAYEXEC)]	= "me",
 668		[ilog2(VM_MAYSHARE)]	= "ms",
 669		[ilog2(VM_GROWSDOWN)]	= "gd",
 670		[ilog2(VM_PFNMAP)]	= "pf",
 
 
 
 
 671		[ilog2(VM_LOCKED)]	= "lo",
 672		[ilog2(VM_IO)]		= "io",
 673		[ilog2(VM_SEQ_READ)]	= "sr",
 674		[ilog2(VM_RAND_READ)]	= "rr",
 675		[ilog2(VM_DONTCOPY)]	= "dc",
 676		[ilog2(VM_DONTEXPAND)]	= "de",
 677		[ilog2(VM_LOCKONFAULT)]	= "lf",
 678		[ilog2(VM_ACCOUNT)]	= "ac",
 679		[ilog2(VM_NORESERVE)]	= "nr",
 680		[ilog2(VM_HUGETLB)]	= "ht",
 681		[ilog2(VM_SYNC)]	= "sf",
 682		[ilog2(VM_ARCH_1)]	= "ar",
 683		[ilog2(VM_WIPEONFORK)]	= "wf",
 684		[ilog2(VM_DONTDUMP)]	= "dd",
 685#ifdef CONFIG_ARM64_BTI
 686		[ilog2(VM_ARM64_BTI)]	= "bt",
 687#endif
 688#ifdef CONFIG_MEM_SOFT_DIRTY
 689		[ilog2(VM_SOFTDIRTY)]	= "sd",
 690#endif
 691		[ilog2(VM_MIXEDMAP)]	= "mm",
 692		[ilog2(VM_HUGEPAGE)]	= "hg",
 693		[ilog2(VM_NOHUGEPAGE)]	= "nh",
 694		[ilog2(VM_MERGEABLE)]	= "mg",
 695		[ilog2(VM_UFFD_MISSING)]= "um",
 696		[ilog2(VM_UFFD_WP)]	= "uw",
 697#ifdef CONFIG_ARM64_MTE
 698		[ilog2(VM_MTE)]		= "mt",
 699		[ilog2(VM_MTE_ALLOWED)]	= "",
 700#endif
 701#ifdef CONFIG_ARCH_HAS_PKEYS
 702		/* These come out via ProtectionKey: */
 703		[ilog2(VM_PKEY_BIT0)]	= "",
 704		[ilog2(VM_PKEY_BIT1)]	= "",
 705		[ilog2(VM_PKEY_BIT2)]	= "",
 706		[ilog2(VM_PKEY_BIT3)]	= "",
 707#if VM_PKEY_BIT4
 708		[ilog2(VM_PKEY_BIT4)]	= "",
 709#endif
 710#endif /* CONFIG_ARCH_HAS_PKEYS */
 711#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR
 712		[ilog2(VM_UFFD_MINOR)]	= "ui",
 713#endif /* CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */
 714	};
 715	size_t i;
 716
 717	seq_puts(m, "VmFlags: ");
 718	for (i = 0; i < BITS_PER_LONG; i++) {
 719		if (!mnemonics[i][0])
 720			continue;
 721		if (vma->vm_flags & (1UL << i)) {
 722			seq_putc(m, mnemonics[i][0]);
 723			seq_putc(m, mnemonics[i][1]);
 724			seq_putc(m, ' ');
 725		}
 726	}
 727	seq_putc(m, '\n');
 728}
 729
 730#ifdef CONFIG_HUGETLB_PAGE
 731static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask,
 732				 unsigned long addr, unsigned long end,
 733				 struct mm_walk *walk)
 734{
 735	struct mem_size_stats *mss = walk->private;
 736	struct vm_area_struct *vma = walk->vma;
 737	struct page *page = NULL;
 738
 739	if (pte_present(*pte)) {
 740		page = vm_normal_page(vma, addr, *pte);
 741	} else if (is_swap_pte(*pte)) {
 742		swp_entry_t swpent = pte_to_swp_entry(*pte);
 743
 744		if (is_pfn_swap_entry(swpent))
 745			page = pfn_swap_entry_to_page(swpent);
 746	}
 747	if (page) {
 748		if (page_mapcount(page) >= 2 || hugetlb_pmd_shared(pte))
 
 
 749			mss->shared_hugetlb += huge_page_size(hstate_vma(vma));
 750		else
 751			mss->private_hugetlb += huge_page_size(hstate_vma(vma));
 752	}
 753	return 0;
 754}
 755#else
 756#define smaps_hugetlb_range	NULL
 757#endif /* HUGETLB_PAGE */
 758
 759static const struct mm_walk_ops smaps_walk_ops = {
 760	.pmd_entry		= smaps_pte_range,
 761	.hugetlb_entry		= smaps_hugetlb_range,
 762};
 763
 764static const struct mm_walk_ops smaps_shmem_walk_ops = {
 765	.pmd_entry		= smaps_pte_range,
 766	.hugetlb_entry		= smaps_hugetlb_range,
 767	.pte_hole		= smaps_pte_hole,
 768};
 769
 770/*
 771 * Gather mem stats from @vma with the indicated beginning
 772 * address @start, and keep them in @mss.
 773 *
 774 * Use vm_start of @vma as the beginning address if @start is 0.
 775 */
 776static void smap_gather_stats(struct vm_area_struct *vma,
 777		struct mem_size_stats *mss, unsigned long start)
 778{
 779	const struct mm_walk_ops *ops = &smaps_walk_ops;
 
 
 
 
 
 
 
 
 
 780
 781	/* Invalid start */
 782	if (start >= vma->vm_end)
 783		return;
 784
 785#ifdef CONFIG_SHMEM
 786	if (vma->vm_file && shmem_mapping(vma->vm_file->f_mapping)) {
 787		/*
 788		 * For shared or readonly shmem mappings we know that all
 789		 * swapped out pages belong to the shmem object, and we can
 790		 * obtain the swap value much more efficiently. For private
 791		 * writable mappings, we might have COW pages that are
 792		 * not affected by the parent swapped out pages of the shmem
 793		 * object, so we have to distinguish them during the page walk.
 794		 * Unless we know that the shmem object (or the part mapped by
 795		 * our VMA) has no swapped out pages at all.
 796		 */
 797		unsigned long shmem_swapped = shmem_swap_usage(vma);
 798
 799		if (!start && (!shmem_swapped || (vma->vm_flags & VM_SHARED) ||
 800					!(vma->vm_flags & VM_WRITE))) {
 801			mss->swap += shmem_swapped;
 802		} else {
 803			ops = &smaps_shmem_walk_ops;
 
 804		}
 805	}
 806#endif
 807	/* mmap_lock is held in m_start */
 808	if (!start)
 809		walk_page_vma(vma, ops, mss);
 810	else
 811		walk_page_range(vma->vm_mm, start, vma->vm_end, ops, mss);
 812}
 813
 814#define SEQ_PUT_DEC(str, val) \
 815		seq_put_decimal_ull_width(m, str, (val) >> 10, 8)
 816
 817/* Show the contents common for smaps and smaps_rollup */
 818static void __show_smap(struct seq_file *m, const struct mem_size_stats *mss,
 819	bool rollup_mode)
 820{
 821	SEQ_PUT_DEC("Rss:            ", mss->resident);
 822	SEQ_PUT_DEC(" kB\nPss:            ", mss->pss >> PSS_SHIFT);
 823	SEQ_PUT_DEC(" kB\nPss_Dirty:      ", mss->pss_dirty >> PSS_SHIFT);
 824	if (rollup_mode) {
 825		/*
 826		 * These are meaningful only for smaps_rollup, otherwise two of
 827		 * them are zero, and the other one is the same as Pss.
 828		 */
 829		SEQ_PUT_DEC(" kB\nPss_Anon:       ",
 830			mss->pss_anon >> PSS_SHIFT);
 831		SEQ_PUT_DEC(" kB\nPss_File:       ",
 832			mss->pss_file >> PSS_SHIFT);
 833		SEQ_PUT_DEC(" kB\nPss_Shmem:      ",
 834			mss->pss_shmem >> PSS_SHIFT);
 835	}
 836	SEQ_PUT_DEC(" kB\nShared_Clean:   ", mss->shared_clean);
 837	SEQ_PUT_DEC(" kB\nShared_Dirty:   ", mss->shared_dirty);
 838	SEQ_PUT_DEC(" kB\nPrivate_Clean:  ", mss->private_clean);
 839	SEQ_PUT_DEC(" kB\nPrivate_Dirty:  ", mss->private_dirty);
 840	SEQ_PUT_DEC(" kB\nReferenced:     ", mss->referenced);
 841	SEQ_PUT_DEC(" kB\nAnonymous:      ", mss->anonymous);
 842	SEQ_PUT_DEC(" kB\nLazyFree:       ", mss->lazyfree);
 843	SEQ_PUT_DEC(" kB\nAnonHugePages:  ", mss->anonymous_thp);
 844	SEQ_PUT_DEC(" kB\nShmemPmdMapped: ", mss->shmem_thp);
 845	SEQ_PUT_DEC(" kB\nFilePmdMapped:  ", mss->file_thp);
 846	SEQ_PUT_DEC(" kB\nShared_Hugetlb: ", mss->shared_hugetlb);
 847	seq_put_decimal_ull_width(m, " kB\nPrivate_Hugetlb: ",
 848				  mss->private_hugetlb >> 10, 7);
 849	SEQ_PUT_DEC(" kB\nSwap:           ", mss->swap);
 850	SEQ_PUT_DEC(" kB\nSwapPss:        ",
 851					mss->swap_pss >> PSS_SHIFT);
 852	SEQ_PUT_DEC(" kB\nLocked:         ",
 853					mss->pss_locked >> PSS_SHIFT);
 854	seq_puts(m, " kB\n");
 855}
 856
 857static int show_smap(struct seq_file *m, void *v)
 858{
 859	struct vm_area_struct *vma = v;
 860	struct mem_size_stats mss;
 861
 862	memset(&mss, 0, sizeof(mss));
 863
 864	smap_gather_stats(vma, &mss, 0);
 865
 866	show_map_vma(m, vma);
 867
 868	SEQ_PUT_DEC("Size:           ", vma->vm_end - vma->vm_start);
 869	SEQ_PUT_DEC(" kB\nKernelPageSize: ", vma_kernel_pagesize(vma));
 870	SEQ_PUT_DEC(" kB\nMMUPageSize:    ", vma_mmu_pagesize(vma));
 871	seq_puts(m, " kB\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 872
 873	__show_smap(m, &mss, false);
 874
 875	seq_printf(m, "THPeligible:    %d\n",
 876		   hugepage_vma_check(vma, vma->vm_flags, true, false, true));
 877
 878	if (arch_pkeys_enabled())
 879		seq_printf(m, "ProtectionKey:  %8u\n", vma_pkey(vma));
 880	show_smap_vma_flags(m, vma);
 881
 882	return 0;
 883}
 884
 885static int show_smaps_rollup(struct seq_file *m, void *v)
 886{
 887	struct proc_maps_private *priv = m->private;
 888	struct mem_size_stats mss;
 889	struct mm_struct *mm = priv->mm;
 890	struct vm_area_struct *vma;
 891	unsigned long vma_start = 0, last_vma_end = 0;
 892	int ret = 0;
 893	MA_STATE(mas, &mm->mm_mt, 0, 0);
 894
 895	priv->task = get_proc_task(priv->inode);
 896	if (!priv->task)
 897		return -ESRCH;
 898
 899	if (!mm || !mmget_not_zero(mm)) {
 900		ret = -ESRCH;
 901		goto out_put_task;
 902	}
 903
 904	memset(&mss, 0, sizeof(mss));
 905
 906	ret = mmap_read_lock_killable(mm);
 907	if (ret)
 908		goto out_put_mm;
 909
 910	hold_task_mempolicy(priv);
 911	vma = mas_find(&mas, ULONG_MAX);
 912
 913	if (unlikely(!vma))
 914		goto empty_set;
 915
 916	vma_start = vma->vm_start;
 917	do {
 918		smap_gather_stats(vma, &mss, 0);
 919		last_vma_end = vma->vm_end;
 920
 921		/*
 922		 * Release mmap_lock temporarily if someone wants to
 923		 * access it for write request.
 924		 */
 925		if (mmap_lock_is_contended(mm)) {
 926			mas_pause(&mas);
 927			mmap_read_unlock(mm);
 928			ret = mmap_read_lock_killable(mm);
 929			if (ret) {
 930				release_task_mempolicy(priv);
 931				goto out_put_mm;
 932			}
 933
 934			/*
 935			 * After dropping the lock, there are four cases to
 936			 * consider. See the following example for explanation.
 937			 *
 938			 *   +------+------+-----------+
 939			 *   | VMA1 | VMA2 | VMA3      |
 940			 *   +------+------+-----------+
 941			 *   |      |      |           |
 942			 *  4k     8k     16k         400k
 943			 *
 944			 * Suppose we drop the lock after reading VMA2 due to
 945			 * contention, then we get:
 946			 *
 947			 *	last_vma_end = 16k
 948			 *
 949			 * 1) VMA2 is freed, but VMA3 exists:
 950			 *
 951			 *    find_vma(mm, 16k - 1) will return VMA3.
 952			 *    In this case, just continue from VMA3.
 953			 *
 954			 * 2) VMA2 still exists:
 955			 *
 956			 *    find_vma(mm, 16k - 1) will return VMA2.
 957			 *    Iterate the loop like the original one.
 958			 *
 959			 * 3) No more VMAs can be found:
 960			 *
 961			 *    find_vma(mm, 16k - 1) will return NULL.
 962			 *    No more things to do, just break.
 963			 *
 964			 * 4) (last_vma_end - 1) is the middle of a vma (VMA'):
 965			 *
 966			 *    find_vma(mm, 16k - 1) will return VMA' whose range
 967			 *    contains last_vma_end.
 968			 *    Iterate VMA' from last_vma_end.
 969			 */
 970			vma = mas_find(&mas, ULONG_MAX);
 971			/* Case 3 above */
 972			if (!vma)
 973				break;
 974
 975			/* Case 1 above */
 976			if (vma->vm_start >= last_vma_end)
 977				continue;
 978
 979			/* Case 4 above */
 980			if (vma->vm_end > last_vma_end)
 981				smap_gather_stats(vma, &mss, last_vma_end);
 982		}
 983		/* Case 2 above */
 984	} while ((vma = mas_find(&mas, ULONG_MAX)) != NULL);
 985
 986empty_set:
 987	show_vma_header_prefix(m, vma_start, last_vma_end, 0, 0, 0, 0);
 988	seq_pad(m, ' ');
 989	seq_puts(m, "[rollup]\n");
 990
 991	__show_smap(m, &mss, true);
 992
 993	release_task_mempolicy(priv);
 994	mmap_read_unlock(mm);
 995
 996out_put_mm:
 997	mmput(mm);
 998out_put_task:
 999	put_task_struct(priv->task);
1000	priv->task = NULL;
1001
1002	return ret;
 
 
1003}
1004#undef SEQ_PUT_DEC
1005
1006static const struct seq_operations proc_pid_smaps_op = {
1007	.start	= m_start,
1008	.next	= m_next,
1009	.stop	= m_stop,
1010	.show	= show_smap
 
 
 
 
 
 
 
1011};
1012
1013static int pid_smaps_open(struct inode *inode, struct file *file)
1014{
1015	return do_maps_open(inode, file, &proc_pid_smaps_op);
1016}
1017
1018static int smaps_rollup_open(struct inode *inode, struct file *file)
1019{
1020	int ret;
1021	struct proc_maps_private *priv;
1022
1023	priv = kzalloc(sizeof(*priv), GFP_KERNEL_ACCOUNT);
1024	if (!priv)
1025		return -ENOMEM;
1026
1027	ret = single_open(file, show_smaps_rollup, priv);
1028	if (ret)
1029		goto out_free;
1030
1031	priv->inode = inode;
1032	priv->mm = proc_mem_open(inode, PTRACE_MODE_READ);
1033	if (IS_ERR(priv->mm)) {
1034		ret = PTR_ERR(priv->mm);
1035
1036		single_release(inode, file);
1037		goto out_free;
1038	}
1039
1040	return 0;
1041
1042out_free:
1043	kfree(priv);
1044	return ret;
1045}
1046
1047static int smaps_rollup_release(struct inode *inode, struct file *file)
1048{
1049	struct seq_file *seq = file->private_data;
1050	struct proc_maps_private *priv = seq->private;
1051
1052	if (priv->mm)
1053		mmdrop(priv->mm);
1054
1055	kfree(priv);
1056	return single_release(inode, file);
1057}
1058
1059const struct file_operations proc_pid_smaps_operations = {
1060	.open		= pid_smaps_open,
1061	.read		= seq_read,
1062	.llseek		= seq_lseek,
1063	.release	= proc_map_release,
1064};
1065
1066const struct file_operations proc_pid_smaps_rollup_operations = {
1067	.open		= smaps_rollup_open,
1068	.read		= seq_read,
1069	.llseek		= seq_lseek,
1070	.release	= smaps_rollup_release,
1071};
1072
1073enum clear_refs_types {
1074	CLEAR_REFS_ALL = 1,
1075	CLEAR_REFS_ANON,
1076	CLEAR_REFS_MAPPED,
1077	CLEAR_REFS_SOFT_DIRTY,
1078	CLEAR_REFS_MM_HIWATER_RSS,
1079	CLEAR_REFS_LAST,
1080};
1081
1082struct clear_refs_private {
1083	enum clear_refs_types type;
1084};
1085
1086#ifdef CONFIG_MEM_SOFT_DIRTY
1087
1088static inline bool pte_is_pinned(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
1089{
1090	struct page *page;
1091
1092	if (!pte_write(pte))
1093		return false;
1094	if (!is_cow_mapping(vma->vm_flags))
1095		return false;
1096	if (likely(!test_bit(MMF_HAS_PINNED, &vma->vm_mm->flags)))
1097		return false;
1098	page = vm_normal_page(vma, addr, pte);
1099	if (!page)
1100		return false;
1101	return page_maybe_dma_pinned(page);
1102}
1103
1104static inline void clear_soft_dirty(struct vm_area_struct *vma,
1105		unsigned long addr, pte_t *pte)
1106{
1107	/*
1108	 * The soft-dirty tracker uses #PF-s to catch writes
1109	 * to pages, so write-protect the pte as well. See the
1110	 * Documentation/admin-guide/mm/soft-dirty.rst for full description
1111	 * of how soft-dirty works.
1112	 */
1113	pte_t ptent = *pte;
1114
1115	if (pte_present(ptent)) {
1116		pte_t old_pte;
1117
1118		if (pte_is_pinned(vma, addr, ptent))
1119			return;
1120		old_pte = ptep_modify_prot_start(vma, addr, pte);
1121		ptent = pte_wrprotect(old_pte);
1122		ptent = pte_clear_soft_dirty(ptent);
1123		ptep_modify_prot_commit(vma, addr, pte, old_pte, ptent);
1124	} else if (is_swap_pte(ptent)) {
1125		ptent = pte_swp_clear_soft_dirty(ptent);
1126		set_pte_at(vma->vm_mm, addr, pte, ptent);
1127	}
1128}
1129#else
1130static inline void clear_soft_dirty(struct vm_area_struct *vma,
1131		unsigned long addr, pte_t *pte)
1132{
1133}
1134#endif
1135
1136#if defined(CONFIG_MEM_SOFT_DIRTY) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
1137static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
1138		unsigned long addr, pmd_t *pmdp)
1139{
1140	pmd_t old, pmd = *pmdp;
1141
1142	if (pmd_present(pmd)) {
1143		/* See comment in change_huge_pmd() */
1144		old = pmdp_invalidate(vma, addr, pmdp);
1145		if (pmd_dirty(old))
1146			pmd = pmd_mkdirty(pmd);
1147		if (pmd_young(old))
1148			pmd = pmd_mkyoung(pmd);
1149
1150		pmd = pmd_wrprotect(pmd);
1151		pmd = pmd_clear_soft_dirty(pmd);
1152
1153		set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
1154	} else if (is_migration_entry(pmd_to_swp_entry(pmd))) {
1155		pmd = pmd_swp_clear_soft_dirty(pmd);
1156		set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
1157	}
1158}
1159#else
1160static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
1161		unsigned long addr, pmd_t *pmdp)
1162{
1163}
1164#endif
1165
1166static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
1167				unsigned long end, struct mm_walk *walk)
1168{
1169	struct clear_refs_private *cp = walk->private;
1170	struct vm_area_struct *vma = walk->vma;
1171	pte_t *pte, ptent;
1172	spinlock_t *ptl;
1173	struct page *page;
1174
1175	ptl = pmd_trans_huge_lock(pmd, vma);
1176	if (ptl) {
1177		if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
1178			clear_soft_dirty_pmd(vma, addr, pmd);
1179			goto out;
1180		}
1181
1182		if (!pmd_present(*pmd))
1183			goto out;
1184
1185		page = pmd_page(*pmd);
1186
1187		/* Clear accessed and referenced bits. */
1188		pmdp_test_and_clear_young(vma, addr, pmd);
1189		test_and_clear_page_young(page);
1190		ClearPageReferenced(page);
1191out:
1192		spin_unlock(ptl);
1193		return 0;
1194	}
1195
1196	if (pmd_trans_unstable(pmd))
1197		return 0;
1198
1199	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
1200	for (; addr != end; pte++, addr += PAGE_SIZE) {
1201		ptent = *pte;
1202
1203		if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
1204			clear_soft_dirty(vma, addr, pte);
1205			continue;
1206		}
1207
1208		if (!pte_present(ptent))
1209			continue;
1210
1211		page = vm_normal_page(vma, addr, ptent);
1212		if (!page)
1213			continue;
1214
1215		/* Clear accessed and referenced bits. */
1216		ptep_test_and_clear_young(vma, addr, pte);
1217		test_and_clear_page_young(page);
1218		ClearPageReferenced(page);
1219	}
1220	pte_unmap_unlock(pte - 1, ptl);
1221	cond_resched();
1222	return 0;
1223}
1224
1225static int clear_refs_test_walk(unsigned long start, unsigned long end,
1226				struct mm_walk *walk)
1227{
1228	struct clear_refs_private *cp = walk->private;
1229	struct vm_area_struct *vma = walk->vma;
1230
1231	if (vma->vm_flags & VM_PFNMAP)
1232		return 1;
1233
1234	/*
1235	 * Writing 1 to /proc/pid/clear_refs affects all pages.
1236	 * Writing 2 to /proc/pid/clear_refs only affects anonymous pages.
1237	 * Writing 3 to /proc/pid/clear_refs only affects file mapped pages.
1238	 * Writing 4 to /proc/pid/clear_refs affects all pages.
1239	 */
1240	if (cp->type == CLEAR_REFS_ANON && vma->vm_file)
1241		return 1;
1242	if (cp->type == CLEAR_REFS_MAPPED && !vma->vm_file)
1243		return 1;
1244	return 0;
1245}
1246
1247static const struct mm_walk_ops clear_refs_walk_ops = {
1248	.pmd_entry		= clear_refs_pte_range,
1249	.test_walk		= clear_refs_test_walk,
1250};
1251
1252static ssize_t clear_refs_write(struct file *file, const char __user *buf,
1253				size_t count, loff_t *ppos)
1254{
1255	struct task_struct *task;
1256	char buffer[PROC_NUMBUF];
1257	struct mm_struct *mm;
1258	struct vm_area_struct *vma;
1259	enum clear_refs_types type;
1260	int itype;
1261	int rv;
1262
1263	memset(buffer, 0, sizeof(buffer));
1264	if (count > sizeof(buffer) - 1)
1265		count = sizeof(buffer) - 1;
1266	if (copy_from_user(buffer, buf, count))
1267		return -EFAULT;
1268	rv = kstrtoint(strstrip(buffer), 10, &itype);
1269	if (rv < 0)
1270		return rv;
1271	type = (enum clear_refs_types)itype;
1272	if (type < CLEAR_REFS_ALL || type >= CLEAR_REFS_LAST)
1273		return -EINVAL;
1274
1275	task = get_proc_task(file_inode(file));
1276	if (!task)
1277		return -ESRCH;
1278	mm = get_task_mm(task);
1279	if (mm) {
1280		MA_STATE(mas, &mm->mm_mt, 0, 0);
1281		struct mmu_notifier_range range;
1282		struct clear_refs_private cp = {
1283			.type = type,
1284		};
 
 
 
 
 
 
1285
1286		if (mmap_write_lock_killable(mm)) {
1287			count = -EINTR;
1288			goto out_mm;
1289		}
1290		if (type == CLEAR_REFS_MM_HIWATER_RSS) {
 
 
 
 
 
1291			/*
1292			 * Writing 5 to /proc/pid/clear_refs resets the peak
1293			 * resident set size to this mm's current rss value.
1294			 */
1295			reset_mm_hiwater_rss(mm);
1296			goto out_unlock;
 
1297		}
1298
 
1299		if (type == CLEAR_REFS_SOFT_DIRTY) {
1300			mas_for_each(&mas, vma, ULONG_MAX) {
1301				if (!(vma->vm_flags & VM_SOFTDIRTY))
1302					continue;
1303				vma->vm_flags &= ~VM_SOFTDIRTY;
1304				vma_set_page_prot(vma);
 
 
 
 
 
 
 
 
 
1305			}
1306
1307			inc_tlb_flush_pending(mm);
1308			mmu_notifier_range_init(&range, MMU_NOTIFY_SOFT_DIRTY,
1309						0, NULL, mm, 0, -1UL);
1310			mmu_notifier_invalidate_range_start(&range);
1311		}
1312		walk_page_range(mm, 0, -1, &clear_refs_walk_ops, &cp);
1313		if (type == CLEAR_REFS_SOFT_DIRTY) {
1314			mmu_notifier_invalidate_range_end(&range);
1315			flush_tlb_mm(mm);
1316			dec_tlb_flush_pending(mm);
1317		}
1318out_unlock:
1319		mmap_write_unlock(mm);
1320out_mm:
1321		mmput(mm);
1322	}
1323	put_task_struct(task);
1324
1325	return count;
1326}
1327
1328const struct file_operations proc_clear_refs_operations = {
1329	.write		= clear_refs_write,
1330	.llseek		= noop_llseek,
1331};
1332
1333typedef struct {
1334	u64 pme;
1335} pagemap_entry_t;
1336
1337struct pagemapread {
1338	int pos, len;		/* units: PM_ENTRY_BYTES, not bytes */
1339	pagemap_entry_t *buffer;
1340	bool show_pfn;
1341};
1342
1343#define PAGEMAP_WALK_SIZE	(PMD_SIZE)
1344#define PAGEMAP_WALK_MASK	(PMD_MASK)
1345
1346#define PM_ENTRY_BYTES		sizeof(pagemap_entry_t)
1347#define PM_PFRAME_BITS		55
1348#define PM_PFRAME_MASK		GENMASK_ULL(PM_PFRAME_BITS - 1, 0)
1349#define PM_SOFT_DIRTY		BIT_ULL(55)
1350#define PM_MMAP_EXCLUSIVE	BIT_ULL(56)
1351#define PM_UFFD_WP		BIT_ULL(57)
1352#define PM_FILE			BIT_ULL(61)
1353#define PM_SWAP			BIT_ULL(62)
1354#define PM_PRESENT		BIT_ULL(63)
1355
1356#define PM_END_OF_BUFFER    1
1357
1358static inline pagemap_entry_t make_pme(u64 frame, u64 flags)
1359{
1360	return (pagemap_entry_t) { .pme = (frame & PM_PFRAME_MASK) | flags };
1361}
1362
1363static int add_to_pagemap(unsigned long addr, pagemap_entry_t *pme,
1364			  struct pagemapread *pm)
1365{
1366	pm->buffer[pm->pos++] = *pme;
1367	if (pm->pos >= pm->len)
1368		return PM_END_OF_BUFFER;
1369	return 0;
1370}
1371
1372static int pagemap_pte_hole(unsigned long start, unsigned long end,
1373			    __always_unused int depth, struct mm_walk *walk)
1374{
1375	struct pagemapread *pm = walk->private;
1376	unsigned long addr = start;
1377	int err = 0;
1378
1379	while (addr < end) {
1380		struct vm_area_struct *vma = find_vma(walk->mm, addr);
1381		pagemap_entry_t pme = make_pme(0, 0);
1382		/* End of address space hole, which we mark as non-present. */
1383		unsigned long hole_end;
1384
1385		if (vma)
1386			hole_end = min(end, vma->vm_start);
1387		else
1388			hole_end = end;
1389
1390		for (; addr < hole_end; addr += PAGE_SIZE) {
1391			err = add_to_pagemap(addr, &pme, pm);
1392			if (err)
1393				goto out;
1394		}
1395
1396		if (!vma)
1397			break;
1398
1399		/* Addresses in the VMA. */
1400		if (vma->vm_flags & VM_SOFTDIRTY)
1401			pme = make_pme(0, PM_SOFT_DIRTY);
1402		for (; addr < min(end, vma->vm_end); addr += PAGE_SIZE) {
1403			err = add_to_pagemap(addr, &pme, pm);
1404			if (err)
1405				goto out;
1406		}
1407	}
1408out:
1409	return err;
1410}
1411
1412static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm,
1413		struct vm_area_struct *vma, unsigned long addr, pte_t pte)
1414{
1415	u64 frame = 0, flags = 0;
1416	struct page *page = NULL;
1417	bool migration = false;
1418
1419	if (pte_present(pte)) {
1420		if (pm->show_pfn)
1421			frame = pte_pfn(pte);
1422		flags |= PM_PRESENT;
1423		page = vm_normal_page(vma, addr, pte);
1424		if (pte_soft_dirty(pte))
1425			flags |= PM_SOFT_DIRTY;
1426		if (pte_uffd_wp(pte))
1427			flags |= PM_UFFD_WP;
1428	} else if (is_swap_pte(pte)) {
1429		swp_entry_t entry;
1430		if (pte_swp_soft_dirty(pte))
1431			flags |= PM_SOFT_DIRTY;
1432		if (pte_swp_uffd_wp(pte))
1433			flags |= PM_UFFD_WP;
1434		entry = pte_to_swp_entry(pte);
1435		if (pm->show_pfn) {
1436			pgoff_t offset;
1437			/*
1438			 * For PFN swap offsets, keeping the offset field
1439			 * to be PFN only to be compatible with old smaps.
1440			 */
1441			if (is_pfn_swap_entry(entry))
1442				offset = swp_offset_pfn(entry);
1443			else
1444				offset = swp_offset(entry);
1445			frame = swp_type(entry) |
1446			    (offset << MAX_SWAPFILES_SHIFT);
1447		}
1448		flags |= PM_SWAP;
1449		migration = is_migration_entry(entry);
1450		if (is_pfn_swap_entry(entry))
1451			page = pfn_swap_entry_to_page(entry);
1452		if (pte_marker_entry_uffd_wp(entry))
1453			flags |= PM_UFFD_WP;
1454	}
1455
1456	if (page && !PageAnon(page))
1457		flags |= PM_FILE;
1458	if (page && !migration && page_mapcount(page) == 1)
1459		flags |= PM_MMAP_EXCLUSIVE;
1460	if (vma->vm_flags & VM_SOFTDIRTY)
1461		flags |= PM_SOFT_DIRTY;
1462
1463	return make_pme(frame, flags);
1464}
1465
1466static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
1467			     struct mm_walk *walk)
1468{
1469	struct vm_area_struct *vma = walk->vma;
1470	struct pagemapread *pm = walk->private;
1471	spinlock_t *ptl;
1472	pte_t *pte, *orig_pte;
1473	int err = 0;
1474#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1475	bool migration = false;
1476
 
1477	ptl = pmd_trans_huge_lock(pmdp, vma);
1478	if (ptl) {
1479		u64 flags = 0, frame = 0;
1480		pmd_t pmd = *pmdp;
1481		struct page *page = NULL;
1482
1483		if (vma->vm_flags & VM_SOFTDIRTY)
1484			flags |= PM_SOFT_DIRTY;
1485
 
 
 
 
 
 
1486		if (pmd_present(pmd)) {
1487			page = pmd_page(pmd);
 
 
 
1488
1489			flags |= PM_PRESENT;
1490			if (pmd_soft_dirty(pmd))
1491				flags |= PM_SOFT_DIRTY;
1492			if (pmd_uffd_wp(pmd))
1493				flags |= PM_UFFD_WP;
1494			if (pm->show_pfn)
1495				frame = pmd_pfn(pmd) +
1496					((addr & ~PMD_MASK) >> PAGE_SHIFT);
1497		}
1498#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1499		else if (is_swap_pmd(pmd)) {
1500			swp_entry_t entry = pmd_to_swp_entry(pmd);
1501			unsigned long offset;
1502
1503			if (pm->show_pfn) {
1504				if (is_pfn_swap_entry(entry))
1505					offset = swp_offset_pfn(entry);
1506				else
1507					offset = swp_offset(entry);
1508				offset = offset +
1509					((addr & ~PMD_MASK) >> PAGE_SHIFT);
1510				frame = swp_type(entry) |
1511					(offset << MAX_SWAPFILES_SHIFT);
1512			}
1513			flags |= PM_SWAP;
1514			if (pmd_swp_soft_dirty(pmd))
1515				flags |= PM_SOFT_DIRTY;
1516			if (pmd_swp_uffd_wp(pmd))
1517				flags |= PM_UFFD_WP;
1518			VM_BUG_ON(!is_pmd_migration_entry(pmd));
1519			migration = is_migration_entry(entry);
1520			page = pfn_swap_entry_to_page(entry);
1521		}
1522#endif
1523
1524		if (page && !migration && page_mapcount(page) == 1)
1525			flags |= PM_MMAP_EXCLUSIVE;
1526
1527		for (; addr != end; addr += PAGE_SIZE) {
1528			pagemap_entry_t pme = make_pme(frame, flags);
1529
1530			err = add_to_pagemap(addr, &pme, pm);
1531			if (err)
1532				break;
1533			if (pm->show_pfn) {
1534				if (flags & PM_PRESENT)
1535					frame++;
1536				else if (flags & PM_SWAP)
1537					frame += (1 << MAX_SWAPFILES_SHIFT);
1538			}
1539		}
1540		spin_unlock(ptl);
1541		return err;
1542	}
1543
1544	if (pmd_trans_unstable(pmdp))
1545		return 0;
1546#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1547
1548	/*
1549	 * We can assume that @vma always points to a valid one and @end never
1550	 * goes beyond vma->vm_end.
1551	 */
1552	orig_pte = pte = pte_offset_map_lock(walk->mm, pmdp, addr, &ptl);
1553	for (; addr < end; pte++, addr += PAGE_SIZE) {
1554		pagemap_entry_t pme;
1555
1556		pme = pte_to_pagemap_entry(pm, vma, addr, *pte);
1557		err = add_to_pagemap(addr, &pme, pm);
1558		if (err)
1559			break;
1560	}
1561	pte_unmap_unlock(orig_pte, ptl);
1562
1563	cond_resched();
1564
1565	return err;
1566}
1567
1568#ifdef CONFIG_HUGETLB_PAGE
1569/* This function walks within one hugetlb entry in the single call */
1570static int pagemap_hugetlb_range(pte_t *ptep, unsigned long hmask,
1571				 unsigned long addr, unsigned long end,
1572				 struct mm_walk *walk)
1573{
1574	struct pagemapread *pm = walk->private;
1575	struct vm_area_struct *vma = walk->vma;
1576	u64 flags = 0, frame = 0;
1577	int err = 0;
1578	pte_t pte;
1579
1580	if (vma->vm_flags & VM_SOFTDIRTY)
1581		flags |= PM_SOFT_DIRTY;
1582
1583	pte = huge_ptep_get(ptep);
1584	if (pte_present(pte)) {
1585		struct page *page = pte_page(pte);
1586
1587		if (!PageAnon(page))
1588			flags |= PM_FILE;
1589
1590		if (page_mapcount(page) == 1)
1591			flags |= PM_MMAP_EXCLUSIVE;
1592
1593		if (huge_pte_uffd_wp(pte))
1594			flags |= PM_UFFD_WP;
1595
1596		flags |= PM_PRESENT;
1597		if (pm->show_pfn)
1598			frame = pte_pfn(pte) +
1599				((addr & ~hmask) >> PAGE_SHIFT);
1600	} else if (pte_swp_uffd_wp_any(pte)) {
1601		flags |= PM_UFFD_WP;
1602	}
1603
1604	for (; addr != end; addr += PAGE_SIZE) {
1605		pagemap_entry_t pme = make_pme(frame, flags);
1606
1607		err = add_to_pagemap(addr, &pme, pm);
1608		if (err)
1609			return err;
1610		if (pm->show_pfn && (flags & PM_PRESENT))
1611			frame++;
1612	}
1613
1614	cond_resched();
1615
1616	return err;
1617}
1618#else
1619#define pagemap_hugetlb_range	NULL
1620#endif /* HUGETLB_PAGE */
1621
1622static const struct mm_walk_ops pagemap_ops = {
1623	.pmd_entry	= pagemap_pmd_range,
1624	.pte_hole	= pagemap_pte_hole,
1625	.hugetlb_entry	= pagemap_hugetlb_range,
1626};
1627
1628/*
1629 * /proc/pid/pagemap - an array mapping virtual pages to pfns
1630 *
1631 * For each page in the address space, this file contains one 64-bit entry
1632 * consisting of the following:
1633 *
1634 * Bits 0-54  page frame number (PFN) if present
1635 * Bits 0-4   swap type if swapped
1636 * Bits 5-54  swap offset if swapped
1637 * Bit  55    pte is soft-dirty (see Documentation/admin-guide/mm/soft-dirty.rst)
1638 * Bit  56    page exclusively mapped
1639 * Bit  57    pte is uffd-wp write-protected
1640 * Bits 58-60 zero
1641 * Bit  61    page is file-page or shared-anon
1642 * Bit  62    page swapped
1643 * Bit  63    page present
1644 *
1645 * If the page is not present but in swap, then the PFN contains an
1646 * encoding of the swap file number and the page's offset into the
1647 * swap. Unmapped pages return a null PFN. This allows determining
1648 * precisely which pages are mapped (or in swap) and comparing mapped
1649 * pages between processes.
1650 *
1651 * Efficient users of this interface will use /proc/pid/maps to
1652 * determine which areas of memory are actually mapped and llseek to
1653 * skip over unmapped regions.
1654 */
1655static ssize_t pagemap_read(struct file *file, char __user *buf,
1656			    size_t count, loff_t *ppos)
1657{
1658	struct mm_struct *mm = file->private_data;
1659	struct pagemapread pm;
 
1660	unsigned long src;
1661	unsigned long svpfn;
1662	unsigned long start_vaddr;
1663	unsigned long end_vaddr;
1664	int ret = 0, copied = 0;
1665
1666	if (!mm || !mmget_not_zero(mm))
1667		goto out;
1668
1669	ret = -EINVAL;
1670	/* file position must be aligned */
1671	if ((*ppos % PM_ENTRY_BYTES) || (count % PM_ENTRY_BYTES))
1672		goto out_mm;
1673
1674	ret = 0;
1675	if (!count)
1676		goto out_mm;
1677
1678	/* do not disclose physical addresses: attack vector */
1679	pm.show_pfn = file_ns_capable(file, &init_user_ns, CAP_SYS_ADMIN);
1680
1681	pm.len = (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
1682	pm.buffer = kmalloc_array(pm.len, PM_ENTRY_BYTES, GFP_KERNEL);
1683	ret = -ENOMEM;
1684	if (!pm.buffer)
1685		goto out_mm;
1686
 
 
 
 
 
 
 
 
1687	src = *ppos;
1688	svpfn = src / PM_ENTRY_BYTES;
 
1689	end_vaddr = mm->task_size;
1690
1691	/* watch out for wraparound */
1692	start_vaddr = end_vaddr;
1693	if (svpfn <= (ULONG_MAX >> PAGE_SHIFT))
1694		start_vaddr = untagged_addr(svpfn << PAGE_SHIFT);
1695
1696	/* Ensure the address is inside the task */
1697	if (start_vaddr > mm->task_size)
1698		start_vaddr = end_vaddr;
1699
1700	/*
1701	 * The odds are that this will stop walking way
1702	 * before end_vaddr, because the length of the
1703	 * user buffer is tracked in "pm", and the walk
1704	 * will stop when we hit the end of the buffer.
1705	 */
1706	ret = 0;
1707	while (count && (start_vaddr < end_vaddr)) {
1708		int len;
1709		unsigned long end;
1710
1711		pm.pos = 0;
1712		end = (start_vaddr + PAGEMAP_WALK_SIZE) & PAGEMAP_WALK_MASK;
1713		/* overflow ? */
1714		if (end < start_vaddr || end > end_vaddr)
1715			end = end_vaddr;
1716		ret = mmap_read_lock_killable(mm);
1717		if (ret)
1718			goto out_free;
1719		ret = walk_page_range(mm, start_vaddr, end, &pagemap_ops, &pm);
1720		mmap_read_unlock(mm);
1721		start_vaddr = end;
1722
1723		len = min(count, PM_ENTRY_BYTES * pm.pos);
1724		if (copy_to_user(buf, pm.buffer, len)) {
1725			ret = -EFAULT;
1726			goto out_free;
1727		}
1728		copied += len;
1729		buf += len;
1730		count -= len;
1731	}
1732	*ppos += copied;
1733	if (!ret || ret == PM_END_OF_BUFFER)
1734		ret = copied;
1735
1736out_free:
1737	kfree(pm.buffer);
1738out_mm:
1739	mmput(mm);
1740out:
1741	return ret;
1742}
1743
1744static int pagemap_open(struct inode *inode, struct file *file)
1745{
1746	struct mm_struct *mm;
1747
1748	mm = proc_mem_open(inode, PTRACE_MODE_READ);
1749	if (IS_ERR(mm))
1750		return PTR_ERR(mm);
1751	file->private_data = mm;
1752	return 0;
1753}
1754
1755static int pagemap_release(struct inode *inode, struct file *file)
1756{
1757	struct mm_struct *mm = file->private_data;
1758
1759	if (mm)
1760		mmdrop(mm);
1761	return 0;
1762}
1763
1764const struct file_operations proc_pagemap_operations = {
1765	.llseek		= mem_lseek, /* borrow this */
1766	.read		= pagemap_read,
1767	.open		= pagemap_open,
1768	.release	= pagemap_release,
1769};
1770#endif /* CONFIG_PROC_PAGE_MONITOR */
1771
1772#ifdef CONFIG_NUMA
1773
1774struct numa_maps {
1775	unsigned long pages;
1776	unsigned long anon;
1777	unsigned long active;
1778	unsigned long writeback;
1779	unsigned long mapcount_max;
1780	unsigned long dirty;
1781	unsigned long swapcache;
1782	unsigned long node[MAX_NUMNODES];
1783};
1784
1785struct numa_maps_private {
1786	struct proc_maps_private proc_maps;
1787	struct numa_maps md;
1788};
1789
1790static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty,
1791			unsigned long nr_pages)
1792{
1793	int count = page_mapcount(page);
1794
1795	md->pages += nr_pages;
1796	if (pte_dirty || PageDirty(page))
1797		md->dirty += nr_pages;
1798
1799	if (PageSwapCache(page))
1800		md->swapcache += nr_pages;
1801
1802	if (PageActive(page) || PageUnevictable(page))
1803		md->active += nr_pages;
1804
1805	if (PageWriteback(page))
1806		md->writeback += nr_pages;
1807
1808	if (PageAnon(page))
1809		md->anon += nr_pages;
1810
1811	if (count > md->mapcount_max)
1812		md->mapcount_max = count;
1813
1814	md->node[page_to_nid(page)] += nr_pages;
1815}
1816
1817static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma,
1818		unsigned long addr)
1819{
1820	struct page *page;
1821	int nid;
1822
1823	if (!pte_present(pte))
1824		return NULL;
1825
1826	page = vm_normal_page(vma, addr, pte);
1827	if (!page || is_zone_device_page(page))
1828		return NULL;
1829
1830	if (PageReserved(page))
1831		return NULL;
1832
1833	nid = page_to_nid(page);
1834	if (!node_isset(nid, node_states[N_MEMORY]))
1835		return NULL;
1836
1837	return page;
1838}
1839
1840#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1841static struct page *can_gather_numa_stats_pmd(pmd_t pmd,
1842					      struct vm_area_struct *vma,
1843					      unsigned long addr)
1844{
1845	struct page *page;
1846	int nid;
1847
1848	if (!pmd_present(pmd))
1849		return NULL;
1850
1851	page = vm_normal_page_pmd(vma, addr, pmd);
1852	if (!page)
1853		return NULL;
1854
1855	if (PageReserved(page))
1856		return NULL;
1857
1858	nid = page_to_nid(page);
1859	if (!node_isset(nid, node_states[N_MEMORY]))
1860		return NULL;
1861
1862	return page;
1863}
1864#endif
1865
1866static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
1867		unsigned long end, struct mm_walk *walk)
1868{
1869	struct numa_maps *md = walk->private;
1870	struct vm_area_struct *vma = walk->vma;
1871	spinlock_t *ptl;
1872	pte_t *orig_pte;
1873	pte_t *pte;
1874
1875#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1876	ptl = pmd_trans_huge_lock(pmd, vma);
1877	if (ptl) {
1878		struct page *page;
1879
1880		page = can_gather_numa_stats_pmd(*pmd, vma, addr);
1881		if (page)
1882			gather_stats(page, md, pmd_dirty(*pmd),
1883				     HPAGE_PMD_SIZE/PAGE_SIZE);
1884		spin_unlock(ptl);
1885		return 0;
1886	}
1887
1888	if (pmd_trans_unstable(pmd))
1889		return 0;
1890#endif
1891	orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
1892	do {
1893		struct page *page = can_gather_numa_stats(*pte, vma, addr);
1894		if (!page)
1895			continue;
1896		gather_stats(page, md, pte_dirty(*pte), 1);
1897
1898	} while (pte++, addr += PAGE_SIZE, addr != end);
1899	pte_unmap_unlock(orig_pte, ptl);
1900	cond_resched();
1901	return 0;
1902}
1903#ifdef CONFIG_HUGETLB_PAGE
1904static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
1905		unsigned long addr, unsigned long end, struct mm_walk *walk)
1906{
1907	pte_t huge_pte = huge_ptep_get(pte);
1908	struct numa_maps *md;
1909	struct page *page;
1910
1911	if (!pte_present(huge_pte))
1912		return 0;
1913
1914	page = pte_page(huge_pte);
 
 
1915
1916	md = walk->private;
1917	gather_stats(page, md, pte_dirty(huge_pte), 1);
1918	return 0;
1919}
1920
1921#else
1922static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
1923		unsigned long addr, unsigned long end, struct mm_walk *walk)
1924{
1925	return 0;
1926}
1927#endif
1928
1929static const struct mm_walk_ops show_numa_ops = {
1930	.hugetlb_entry = gather_hugetlb_stats,
1931	.pmd_entry = gather_pte_stats,
1932};
1933
1934/*
1935 * Display pages allocated per node and memory policy via /proc.
1936 */
1937static int show_numa_map(struct seq_file *m, void *v)
1938{
1939	struct numa_maps_private *numa_priv = m->private;
1940	struct proc_maps_private *proc_priv = &numa_priv->proc_maps;
1941	struct vm_area_struct *vma = v;
1942	struct numa_maps *md = &numa_priv->md;
1943	struct file *file = vma->vm_file;
1944	struct mm_struct *mm = vma->vm_mm;
 
 
 
 
 
 
1945	struct mempolicy *pol;
1946	char buffer[64];
1947	int nid;
1948
1949	if (!mm)
1950		return 0;
1951
1952	/* Ensure we start with an empty set of numa_maps statistics. */
1953	memset(md, 0, sizeof(*md));
1954
1955	pol = __get_vma_policy(vma, vma->vm_start);
1956	if (pol) {
1957		mpol_to_str(buffer, sizeof(buffer), pol);
1958		mpol_cond_put(pol);
1959	} else {
1960		mpol_to_str(buffer, sizeof(buffer), proc_priv->task_mempolicy);
1961	}
1962
1963	seq_printf(m, "%08lx %s", vma->vm_start, buffer);
1964
1965	if (file) {
1966		seq_puts(m, " file=");
1967		seq_file_path(m, file, "\n\t= ");
1968	} else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
1969		seq_puts(m, " heap");
1970	} else if (is_stack(vma)) {
1971		seq_puts(m, " stack");
1972	}
1973
1974	if (is_vm_hugetlb_page(vma))
1975		seq_puts(m, " huge");
1976
1977	/* mmap_lock is held by m_start */
1978	walk_page_vma(vma, &show_numa_ops, md);
1979
1980	if (!md->pages)
1981		goto out;
1982
1983	if (md->anon)
1984		seq_printf(m, " anon=%lu", md->anon);
1985
1986	if (md->dirty)
1987		seq_printf(m, " dirty=%lu", md->dirty);
1988
1989	if (md->pages != md->anon && md->pages != md->dirty)
1990		seq_printf(m, " mapped=%lu", md->pages);
1991
1992	if (md->mapcount_max > 1)
1993		seq_printf(m, " mapmax=%lu", md->mapcount_max);
1994
1995	if (md->swapcache)
1996		seq_printf(m, " swapcache=%lu", md->swapcache);
1997
1998	if (md->active < md->pages && !is_vm_hugetlb_page(vma))
1999		seq_printf(m, " active=%lu", md->active);
2000
2001	if (md->writeback)
2002		seq_printf(m, " writeback=%lu", md->writeback);
2003
2004	for_each_node_state(nid, N_MEMORY)
2005		if (md->node[nid])
2006			seq_printf(m, " N%d=%lu", nid, md->node[nid]);
2007
2008	seq_printf(m, " kernelpagesize_kB=%lu", vma_kernel_pagesize(vma) >> 10);
2009out:
2010	seq_putc(m, '\n');
 
2011	return 0;
2012}
2013
 
 
 
 
 
 
 
 
 
 
2014static const struct seq_operations proc_pid_numa_maps_op = {
2015	.start  = m_start,
2016	.next   = m_next,
2017	.stop   = m_stop,
2018	.show   = show_numa_map,
2019};
2020
2021static int pid_numa_maps_open(struct inode *inode, struct file *file)
 
 
 
 
 
 
 
 
2022{
2023	return proc_maps_open(inode, file, &proc_pid_numa_maps_op,
2024				sizeof(struct numa_maps_private));
2025}
2026
 
 
 
 
 
 
 
 
 
 
2027const struct file_operations proc_pid_numa_maps_operations = {
2028	.open		= pid_numa_maps_open,
2029	.read		= seq_read,
2030	.llseek		= seq_lseek,
2031	.release	= proc_map_release,
2032};
2033
 
 
 
 
 
 
2034#endif /* CONFIG_NUMA */
v4.10.11
   1#include <linux/mm.h>
   2#include <linux/vmacache.h>
 
   3#include <linux/hugetlb.h>
   4#include <linux/huge_mm.h>
   5#include <linux/mount.h>
   6#include <linux/seq_file.h>
   7#include <linux/highmem.h>
   8#include <linux/ptrace.h>
   9#include <linux/slab.h>
  10#include <linux/pagemap.h>
  11#include <linux/mempolicy.h>
  12#include <linux/rmap.h>
  13#include <linux/swap.h>
 
  14#include <linux/swapops.h>
  15#include <linux/mmu_notifier.h>
  16#include <linux/page_idle.h>
  17#include <linux/shmem_fs.h>
 
 
  18
  19#include <asm/elf.h>
  20#include <linux/uaccess.h>
  21#include <asm/tlbflush.h>
  22#include "internal.h"
  23
 
 
  24void task_mem(struct seq_file *m, struct mm_struct *mm)
  25{
  26	unsigned long text, lib, swap, ptes, pmds, anon, file, shmem;
  27	unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;
  28
  29	anon = get_mm_counter(mm, MM_ANONPAGES);
  30	file = get_mm_counter(mm, MM_FILEPAGES);
  31	shmem = get_mm_counter(mm, MM_SHMEMPAGES);
  32
  33	/*
  34	 * Note: to minimize their overhead, mm maintains hiwater_vm and
  35	 * hiwater_rss only when about to *lower* total_vm or rss.  Any
  36	 * collector of these hiwater stats must therefore get total_vm
  37	 * and rss too, which will usually be the higher.  Barriers? not
  38	 * worth the effort, such snapshots can always be inconsistent.
  39	 */
  40	hiwater_vm = total_vm = mm->total_vm;
  41	if (hiwater_vm < mm->hiwater_vm)
  42		hiwater_vm = mm->hiwater_vm;
  43	hiwater_rss = total_rss = anon + file + shmem;
  44	if (hiwater_rss < mm->hiwater_rss)
  45		hiwater_rss = mm->hiwater_rss;
  46
  47	text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10;
  48	lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text;
 
 
 
  49	swap = get_mm_counter(mm, MM_SWAPENTS);
  50	ptes = PTRS_PER_PTE * sizeof(pte_t) * atomic_long_read(&mm->nr_ptes);
  51	pmds = PTRS_PER_PMD * sizeof(pmd_t) * mm_nr_pmds(mm);
  52	seq_printf(m,
  53		"VmPeak:\t%8lu kB\n"
  54		"VmSize:\t%8lu kB\n"
  55		"VmLck:\t%8lu kB\n"
  56		"VmPin:\t%8lu kB\n"
  57		"VmHWM:\t%8lu kB\n"
  58		"VmRSS:\t%8lu kB\n"
  59		"RssAnon:\t%8lu kB\n"
  60		"RssFile:\t%8lu kB\n"
  61		"RssShmem:\t%8lu kB\n"
  62		"VmData:\t%8lu kB\n"
  63		"VmStk:\t%8lu kB\n"
  64		"VmExe:\t%8lu kB\n"
  65		"VmLib:\t%8lu kB\n"
  66		"VmPTE:\t%8lu kB\n"
  67		"VmPMD:\t%8lu kB\n"
  68		"VmSwap:\t%8lu kB\n",
  69		hiwater_vm << (PAGE_SHIFT-10),
  70		total_vm << (PAGE_SHIFT-10),
  71		mm->locked_vm << (PAGE_SHIFT-10),
  72		mm->pinned_vm << (PAGE_SHIFT-10),
  73		hiwater_rss << (PAGE_SHIFT-10),
  74		total_rss << (PAGE_SHIFT-10),
  75		anon << (PAGE_SHIFT-10),
  76		file << (PAGE_SHIFT-10),
  77		shmem << (PAGE_SHIFT-10),
  78		mm->data_vm << (PAGE_SHIFT-10),
  79		mm->stack_vm << (PAGE_SHIFT-10), text, lib,
  80		ptes >> 10,
  81		pmds >> 10,
  82		swap << (PAGE_SHIFT-10));
  83	hugetlb_report_usage(m, mm);
  84}
 
  85
  86unsigned long task_vsize(struct mm_struct *mm)
  87{
  88	return PAGE_SIZE * mm->total_vm;
  89}
  90
  91unsigned long task_statm(struct mm_struct *mm,
  92			 unsigned long *shared, unsigned long *text,
  93			 unsigned long *data, unsigned long *resident)
  94{
  95	*shared = get_mm_counter(mm, MM_FILEPAGES) +
  96			get_mm_counter(mm, MM_SHMEMPAGES);
  97	*text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
  98								>> PAGE_SHIFT;
  99	*data = mm->data_vm + mm->stack_vm;
 100	*resident = *shared + get_mm_counter(mm, MM_ANONPAGES);
 101	return mm->total_vm;
 102}
 103
 104#ifdef CONFIG_NUMA
 105/*
 106 * Save get_task_policy() for show_numa_map().
 107 */
 108static void hold_task_mempolicy(struct proc_maps_private *priv)
 109{
 110	struct task_struct *task = priv->task;
 111
 112	task_lock(task);
 113	priv->task_mempolicy = get_task_policy(task);
 114	mpol_get(priv->task_mempolicy);
 115	task_unlock(task);
 116}
 117static void release_task_mempolicy(struct proc_maps_private *priv)
 118{
 119	mpol_put(priv->task_mempolicy);
 120}
 121#else
 122static void hold_task_mempolicy(struct proc_maps_private *priv)
 123{
 124}
 125static void release_task_mempolicy(struct proc_maps_private *priv)
 126{
 127}
 128#endif
 129
 130static void vma_stop(struct proc_maps_private *priv)
 
 131{
 132	struct mm_struct *mm = priv->mm;
 133
 134	release_task_mempolicy(priv);
 135	up_read(&mm->mmap_sem);
 136	mmput(mm);
 137}
 138
 139static struct vm_area_struct *
 140m_next_vma(struct proc_maps_private *priv, struct vm_area_struct *vma)
 141{
 142	if (vma == priv->tail_vma)
 143		return NULL;
 144	return vma->vm_next ?: priv->tail_vma;
 145}
 146
 147static void m_cache_vma(struct seq_file *m, struct vm_area_struct *vma)
 148{
 149	if (m->count < m->size)	/* vma is copied successfully */
 150		m->version = m_next_vma(m->private, vma) ? vma->vm_end : -1UL;
 151}
 152
 153static void *m_start(struct seq_file *m, loff_t *ppos)
 154{
 155	struct proc_maps_private *priv = m->private;
 156	unsigned long last_addr = m->version;
 157	struct mm_struct *mm;
 158	struct vm_area_struct *vma;
 159	unsigned int pos = *ppos;
 160
 161	/* See m_cache_vma(). Zero at the start or after lseek. */
 162	if (last_addr == -1UL)
 163		return NULL;
 164
 165	priv->task = get_proc_task(priv->inode);
 166	if (!priv->task)
 167		return ERR_PTR(-ESRCH);
 168
 169	mm = priv->mm;
 170	if (!mm || !atomic_inc_not_zero(&mm->mm_users))
 
 
 171		return NULL;
 172
 173	down_read(&mm->mmap_sem);
 174	hold_task_mempolicy(priv);
 175	priv->tail_vma = get_gate_vma(mm);
 176
 177	if (last_addr) {
 178		vma = find_vma(mm, last_addr - 1);
 179		if (vma && vma->vm_start <= last_addr)
 180			vma = m_next_vma(priv, vma);
 181		if (vma)
 182			return vma;
 183	}
 184
 185	m->version = 0;
 186	if (pos < mm->map_count) {
 187		for (vma = mm->mmap; pos; pos--) {
 188			m->version = vma->vm_start;
 189			vma = vma->vm_next;
 190		}
 191		return vma;
 192	}
 193
 194	/* we do not bother to update m->version in this case */
 195	if (pos == mm->map_count && priv->tail_vma)
 196		return priv->tail_vma;
 
 197
 198	vma_stop(priv);
 199	return NULL;
 200}
 201
 202static void *m_next(struct seq_file *m, void *v, loff_t *pos)
 203{
 204	struct proc_maps_private *priv = m->private;
 205	struct vm_area_struct *next;
 206
 207	(*pos)++;
 208	next = m_next_vma(priv, v);
 209	if (!next)
 210		vma_stop(priv);
 211	return next;
 212}
 213
 214static void m_stop(struct seq_file *m, void *v)
 215{
 216	struct proc_maps_private *priv = m->private;
 
 217
 218	if (!IS_ERR_OR_NULL(v))
 219		vma_stop(priv);
 220	if (priv->task) {
 221		put_task_struct(priv->task);
 222		priv->task = NULL;
 223	}
 
 
 224}
 225
 226static int proc_maps_open(struct inode *inode, struct file *file,
 227			const struct seq_operations *ops, int psize)
 228{
 229	struct proc_maps_private *priv = __seq_open_private(file, ops, psize);
 230
 231	if (!priv)
 232		return -ENOMEM;
 233
 234	priv->inode = inode;
 235	priv->mm = proc_mem_open(inode, PTRACE_MODE_READ);
 236	if (IS_ERR(priv->mm)) {
 237		int err = PTR_ERR(priv->mm);
 238
 239		seq_release_private(inode, file);
 240		return err;
 241	}
 242
 243	return 0;
 244}
 245
 246static int proc_map_release(struct inode *inode, struct file *file)
 247{
 248	struct seq_file *seq = file->private_data;
 249	struct proc_maps_private *priv = seq->private;
 250
 251	if (priv->mm)
 252		mmdrop(priv->mm);
 253
 254	return seq_release_private(inode, file);
 255}
 256
 257static int do_maps_open(struct inode *inode, struct file *file,
 258			const struct seq_operations *ops)
 259{
 260	return proc_maps_open(inode, file, ops,
 261				sizeof(struct proc_maps_private));
 262}
 263
 264/*
 265 * Indicate if the VMA is a stack for the given task; for
 266 * /proc/PID/maps that is the stack of the main task.
 267 */
 268static int is_stack(struct proc_maps_private *priv,
 269		    struct vm_area_struct *vma)
 270{
 271	/*
 272	 * We make no effort to guess what a given thread considers to be
 273	 * its "stack".  It's not even well-defined for programs written
 274	 * languages like Go.
 275	 */
 276	return vma->vm_start <= vma->vm_mm->start_stack &&
 277		vma->vm_end >= vma->vm_mm->start_stack;
 278}
 279
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 280static void
 281show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
 282{
 
 283	struct mm_struct *mm = vma->vm_mm;
 284	struct file *file = vma->vm_file;
 285	struct proc_maps_private *priv = m->private;
 286	vm_flags_t flags = vma->vm_flags;
 287	unsigned long ino = 0;
 288	unsigned long long pgoff = 0;
 289	unsigned long start, end;
 290	dev_t dev = 0;
 291	const char *name = NULL;
 292
 293	if (file) {
 294		struct inode *inode = file_inode(vma->vm_file);
 295		dev = inode->i_sb->s_dev;
 296		ino = inode->i_ino;
 297		pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
 298	}
 299
 300	/* We don't show the stack guard page in /proc/maps */
 301	start = vma->vm_start;
 302	if (stack_guard_page_start(vma, start))
 303		start += PAGE_SIZE;
 304	end = vma->vm_end;
 305	if (stack_guard_page_end(vma, end))
 306		end -= PAGE_SIZE;
 307
 308	seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
 309	seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
 310			start,
 311			end,
 312			flags & VM_READ ? 'r' : '-',
 313			flags & VM_WRITE ? 'w' : '-',
 314			flags & VM_EXEC ? 'x' : '-',
 315			flags & VM_MAYSHARE ? 's' : 'p',
 316			pgoff,
 317			MAJOR(dev), MINOR(dev), ino);
 318
 319	/*
 320	 * Print the dentry name for named mappings, and a
 321	 * special [heap] marker for the heap:
 322	 */
 323	if (file) {
 324		seq_pad(m, ' ');
 325		seq_file_path(m, file, "\n");
 
 
 
 
 
 
 
 326		goto done;
 327	}
 328
 329	if (vma->vm_ops && vma->vm_ops->name) {
 330		name = vma->vm_ops->name(vma);
 331		if (name)
 332			goto done;
 333	}
 334
 335	name = arch_vma_name(vma);
 336	if (!name) {
 337		if (!mm) {
 338			name = "[vdso]";
 339			goto done;
 340		}
 341
 342		if (vma->vm_start <= mm->brk &&
 343		    vma->vm_end >= mm->start_brk) {
 344			name = "[heap]";
 345			goto done;
 346		}
 347
 348		if (is_stack(priv, vma))
 349			name = "[stack]";
 
 
 
 
 
 
 
 350	}
 351
 352done:
 353	if (name) {
 354		seq_pad(m, ' ');
 355		seq_puts(m, name);
 356	}
 357	seq_putc(m, '\n');
 358}
 359
 360static int show_map(struct seq_file *m, void *v, int is_pid)
 361{
 362	show_map_vma(m, v, is_pid);
 363	m_cache_vma(m, v);
 364	return 0;
 365}
 366
 367static int show_pid_map(struct seq_file *m, void *v)
 368{
 369	return show_map(m, v, 1);
 370}
 371
 372static int show_tid_map(struct seq_file *m, void *v)
 373{
 374	return show_map(m, v, 0);
 375}
 376
 377static const struct seq_operations proc_pid_maps_op = {
 378	.start	= m_start,
 379	.next	= m_next,
 380	.stop	= m_stop,
 381	.show	= show_pid_map
 382};
 383
 384static const struct seq_operations proc_tid_maps_op = {
 385	.start	= m_start,
 386	.next	= m_next,
 387	.stop	= m_stop,
 388	.show	= show_tid_map
 389};
 390
 391static int pid_maps_open(struct inode *inode, struct file *file)
 392{
 393	return do_maps_open(inode, file, &proc_pid_maps_op);
 394}
 395
 396static int tid_maps_open(struct inode *inode, struct file *file)
 397{
 398	return do_maps_open(inode, file, &proc_tid_maps_op);
 399}
 400
 401const struct file_operations proc_pid_maps_operations = {
 402	.open		= pid_maps_open,
 403	.read		= seq_read,
 404	.llseek		= seq_lseek,
 405	.release	= proc_map_release,
 406};
 407
 408const struct file_operations proc_tid_maps_operations = {
 409	.open		= tid_maps_open,
 410	.read		= seq_read,
 411	.llseek		= seq_lseek,
 412	.release	= proc_map_release,
 413};
 414
 415/*
 416 * Proportional Set Size(PSS): my share of RSS.
 417 *
 418 * PSS of a process is the count of pages it has in memory, where each
 419 * page is divided by the number of processes sharing it.  So if a
 420 * process has 1000 pages all to itself, and 1000 shared with one other
 421 * process, its PSS will be 1500.
 422 *
 423 * To keep (accumulated) division errors low, we adopt a 64bit
 424 * fixed-point pss counter to minimize division errors. So (pss >>
 425 * PSS_SHIFT) would be the real byte count.
 426 *
 427 * A shift of 12 before division means (assuming 4K page size):
 428 * 	- 1M 3-user-pages add up to 8KB errors;
 429 * 	- supports mapcount up to 2^24, or 16M;
 430 * 	- supports PSS up to 2^52 bytes, or 4PB.
 431 */
 432#define PSS_SHIFT 12
 433
 434#ifdef CONFIG_PROC_PAGE_MONITOR
 435struct mem_size_stats {
 436	unsigned long resident;
 437	unsigned long shared_clean;
 438	unsigned long shared_dirty;
 439	unsigned long private_clean;
 440	unsigned long private_dirty;
 441	unsigned long referenced;
 442	unsigned long anonymous;
 
 443	unsigned long anonymous_thp;
 444	unsigned long shmem_thp;
 
 445	unsigned long swap;
 446	unsigned long shared_hugetlb;
 447	unsigned long private_hugetlb;
 448	u64 pss;
 
 
 
 
 
 449	u64 swap_pss;
 450	bool check_shmem_swap;
 451};
 452
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 453static void smaps_account(struct mem_size_stats *mss, struct page *page,
 454		bool compound, bool young, bool dirty)
 
 455{
 456	int i, nr = compound ? 1 << compound_order(page) : 1;
 457	unsigned long size = nr * PAGE_SIZE;
 458
 459	if (PageAnon(page))
 
 
 
 
 460		mss->anonymous += size;
 
 
 
 461
 462	mss->resident += size;
 463	/* Accumulate the size in pages that have been accessed. */
 464	if (young || page_is_young(page) || PageReferenced(page))
 465		mss->referenced += size;
 466
 467	/*
 
 
 
 468	 * page_count(page) == 1 guarantees the page is mapped exactly once.
 469	 * If any subpage of the compound page mapped with PTE it would elevate
 470	 * page_count().
 
 
 
 
 
 
 
 471	 */
 472	if (page_count(page) == 1) {
 473		if (dirty || PageDirty(page))
 474			mss->private_dirty += size;
 475		else
 476			mss->private_clean += size;
 477		mss->pss += (u64)size << PSS_SHIFT;
 478		return;
 479	}
 480
 481	for (i = 0; i < nr; i++, page++) {
 482		int mapcount = page_mapcount(page);
 483
 484		if (mapcount >= 2) {
 485			if (dirty || PageDirty(page))
 486				mss->shared_dirty += PAGE_SIZE;
 487			else
 488				mss->shared_clean += PAGE_SIZE;
 489			mss->pss += (PAGE_SIZE << PSS_SHIFT) / mapcount;
 490		} else {
 491			if (dirty || PageDirty(page))
 492				mss->private_dirty += PAGE_SIZE;
 493			else
 494				mss->private_clean += PAGE_SIZE;
 495			mss->pss += PAGE_SIZE << PSS_SHIFT;
 496		}
 497	}
 498}
 499
 500#ifdef CONFIG_SHMEM
 501static int smaps_pte_hole(unsigned long addr, unsigned long end,
 502		struct mm_walk *walk)
 503{
 504	struct mem_size_stats *mss = walk->private;
 
 505
 506	mss->swap += shmem_partial_swap_usage(
 507			walk->vma->vm_file->f_mapping, addr, end);
 
 508
 509	return 0;
 510}
 
 
 
 
 
 
 
 
 
 
 
 511#endif
 
 512
 513static void smaps_pte_entry(pte_t *pte, unsigned long addr,
 514		struct mm_walk *walk)
 515{
 516	struct mem_size_stats *mss = walk->private;
 517	struct vm_area_struct *vma = walk->vma;
 
 518	struct page *page = NULL;
 
 519
 520	if (pte_present(*pte)) {
 521		page = vm_normal_page(vma, addr, *pte);
 
 
 522	} else if (is_swap_pte(*pte)) {
 523		swp_entry_t swpent = pte_to_swp_entry(*pte);
 524
 525		if (!non_swap_entry(swpent)) {
 526			int mapcount;
 527
 528			mss->swap += PAGE_SIZE;
 529			mapcount = swp_swapcount(swpent);
 530			if (mapcount >= 2) {
 531				u64 pss_delta = (u64)PAGE_SIZE << PSS_SHIFT;
 532
 533				do_div(pss_delta, mapcount);
 534				mss->swap_pss += pss_delta;
 535			} else {
 536				mss->swap_pss += (u64)PAGE_SIZE << PSS_SHIFT;
 537			}
 538		} else if (is_migration_entry(swpent))
 539			page = migration_entry_to_page(swpent);
 540	} else if (unlikely(IS_ENABLED(CONFIG_SHMEM) && mss->check_shmem_swap
 541							&& pte_none(*pte))) {
 542		page = find_get_entry(vma->vm_file->f_mapping,
 543						linear_page_index(vma, addr));
 544		if (!page)
 545			return;
 546
 547		if (radix_tree_exceptional_entry(page))
 548			mss->swap += PAGE_SIZE;
 549		else
 550			put_page(page);
 551
 552		return;
 553	}
 554
 555	if (!page)
 556		return;
 557
 558	smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte));
 559}
 560
 561#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 562static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
 563		struct mm_walk *walk)
 564{
 565	struct mem_size_stats *mss = walk->private;
 566	struct vm_area_struct *vma = walk->vma;
 567	struct page *page;
 
 
 568
 569	/* FOLL_DUMP will return -EFAULT on huge zero page */
 570	page = follow_trans_huge_pmd(vma, addr, pmd, FOLL_DUMP);
 
 
 
 
 
 
 
 
 
 571	if (IS_ERR_OR_NULL(page))
 572		return;
 573	if (PageAnon(page))
 574		mss->anonymous_thp += HPAGE_PMD_SIZE;
 575	else if (PageSwapBacked(page))
 576		mss->shmem_thp += HPAGE_PMD_SIZE;
 577	else if (is_zone_device_page(page))
 578		/* pass */;
 579	else
 580		VM_BUG_ON_PAGE(1, page);
 581	smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd));
 
 
 582}
 583#else
 584static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
 585		struct mm_walk *walk)
 586{
 587}
 588#endif
 589
 590static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
 591			   struct mm_walk *walk)
 592{
 593	struct vm_area_struct *vma = walk->vma;
 594	pte_t *pte;
 595	spinlock_t *ptl;
 596
 597	ptl = pmd_trans_huge_lock(pmd, vma);
 598	if (ptl) {
 599		smaps_pmd_entry(pmd, addr, walk);
 600		spin_unlock(ptl);
 601		return 0;
 602	}
 603
 604	if (pmd_trans_unstable(pmd))
 605		return 0;
 606	/*
 607	 * The mmap_sem held all the way back in m_start() is what
 608	 * keeps khugepaged out of here and from collapsing things
 609	 * in here.
 610	 */
 611	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
 612	for (; addr != end; pte++, addr += PAGE_SIZE)
 613		smaps_pte_entry(pte, addr, walk);
 614	pte_unmap_unlock(pte - 1, ptl);
 
 615	cond_resched();
 616	return 0;
 617}
 618
 619static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma)
 620{
 621	/*
 622	 * Don't forget to update Documentation/ on changes.
 623	 */
 624	static const char mnemonics[BITS_PER_LONG][2] = {
 625		/*
 626		 * In case if we meet a flag we don't know about.
 627		 */
 628		[0 ... (BITS_PER_LONG-1)] = "??",
 629
 630		[ilog2(VM_READ)]	= "rd",
 631		[ilog2(VM_WRITE)]	= "wr",
 632		[ilog2(VM_EXEC)]	= "ex",
 633		[ilog2(VM_SHARED)]	= "sh",
 634		[ilog2(VM_MAYREAD)]	= "mr",
 635		[ilog2(VM_MAYWRITE)]	= "mw",
 636		[ilog2(VM_MAYEXEC)]	= "me",
 637		[ilog2(VM_MAYSHARE)]	= "ms",
 638		[ilog2(VM_GROWSDOWN)]	= "gd",
 639		[ilog2(VM_PFNMAP)]	= "pf",
 640		[ilog2(VM_DENYWRITE)]	= "dw",
 641#ifdef CONFIG_X86_INTEL_MPX
 642		[ilog2(VM_MPX)]		= "mp",
 643#endif
 644		[ilog2(VM_LOCKED)]	= "lo",
 645		[ilog2(VM_IO)]		= "io",
 646		[ilog2(VM_SEQ_READ)]	= "sr",
 647		[ilog2(VM_RAND_READ)]	= "rr",
 648		[ilog2(VM_DONTCOPY)]	= "dc",
 649		[ilog2(VM_DONTEXPAND)]	= "de",
 
 650		[ilog2(VM_ACCOUNT)]	= "ac",
 651		[ilog2(VM_NORESERVE)]	= "nr",
 652		[ilog2(VM_HUGETLB)]	= "ht",
 
 653		[ilog2(VM_ARCH_1)]	= "ar",
 
 654		[ilog2(VM_DONTDUMP)]	= "dd",
 
 
 
 655#ifdef CONFIG_MEM_SOFT_DIRTY
 656		[ilog2(VM_SOFTDIRTY)]	= "sd",
 657#endif
 658		[ilog2(VM_MIXEDMAP)]	= "mm",
 659		[ilog2(VM_HUGEPAGE)]	= "hg",
 660		[ilog2(VM_NOHUGEPAGE)]	= "nh",
 661		[ilog2(VM_MERGEABLE)]	= "mg",
 662		[ilog2(VM_UFFD_MISSING)]= "um",
 663		[ilog2(VM_UFFD_WP)]	= "uw",
 664#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
 
 
 
 
 665		/* These come out via ProtectionKey: */
 666		[ilog2(VM_PKEY_BIT0)]	= "",
 667		[ilog2(VM_PKEY_BIT1)]	= "",
 668		[ilog2(VM_PKEY_BIT2)]	= "",
 669		[ilog2(VM_PKEY_BIT3)]	= "",
 
 
 670#endif
 
 
 
 
 671	};
 672	size_t i;
 673
 674	seq_puts(m, "VmFlags: ");
 675	for (i = 0; i < BITS_PER_LONG; i++) {
 676		if (!mnemonics[i][0])
 677			continue;
 678		if (vma->vm_flags & (1UL << i)) {
 679			seq_printf(m, "%c%c ",
 680				   mnemonics[i][0], mnemonics[i][1]);
 
 681		}
 682	}
 683	seq_putc(m, '\n');
 684}
 685
 686#ifdef CONFIG_HUGETLB_PAGE
 687static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask,
 688				 unsigned long addr, unsigned long end,
 689				 struct mm_walk *walk)
 690{
 691	struct mem_size_stats *mss = walk->private;
 692	struct vm_area_struct *vma = walk->vma;
 693	struct page *page = NULL;
 694
 695	if (pte_present(*pte)) {
 696		page = vm_normal_page(vma, addr, *pte);
 697	} else if (is_swap_pte(*pte)) {
 698		swp_entry_t swpent = pte_to_swp_entry(*pte);
 699
 700		if (is_migration_entry(swpent))
 701			page = migration_entry_to_page(swpent);
 702	}
 703	if (page) {
 704		int mapcount = page_mapcount(page);
 705
 706		if (mapcount >= 2)
 707			mss->shared_hugetlb += huge_page_size(hstate_vma(vma));
 708		else
 709			mss->private_hugetlb += huge_page_size(hstate_vma(vma));
 710	}
 711	return 0;
 712}
 
 
 713#endif /* HUGETLB_PAGE */
 714
 715void __weak arch_show_smap(struct seq_file *m, struct vm_area_struct *vma)
 716{
 717}
 
 
 
 
 
 
 
 718
 719static int show_smap(struct seq_file *m, void *v, int is_pid)
 
 
 
 
 
 
 
 720{
 721	struct vm_area_struct *vma = v;
 722	struct mem_size_stats mss;
 723	struct mm_walk smaps_walk = {
 724		.pmd_entry = smaps_pte_range,
 725#ifdef CONFIG_HUGETLB_PAGE
 726		.hugetlb_entry = smaps_hugetlb_range,
 727#endif
 728		.mm = vma->vm_mm,
 729		.private = &mss,
 730	};
 731
 732	memset(&mss, 0, sizeof mss);
 
 
 733
 734#ifdef CONFIG_SHMEM
 735	if (vma->vm_file && shmem_mapping(vma->vm_file->f_mapping)) {
 736		/*
 737		 * For shared or readonly shmem mappings we know that all
 738		 * swapped out pages belong to the shmem object, and we can
 739		 * obtain the swap value much more efficiently. For private
 740		 * writable mappings, we might have COW pages that are
 741		 * not affected by the parent swapped out pages of the shmem
 742		 * object, so we have to distinguish them during the page walk.
 743		 * Unless we know that the shmem object (or the part mapped by
 744		 * our VMA) has no swapped out pages at all.
 745		 */
 746		unsigned long shmem_swapped = shmem_swap_usage(vma);
 747
 748		if (!shmem_swapped || (vma->vm_flags & VM_SHARED) ||
 749					!(vma->vm_flags & VM_WRITE)) {
 750			mss.swap = shmem_swapped;
 751		} else {
 752			mss.check_shmem_swap = true;
 753			smaps_walk.pte_hole = smaps_pte_hole;
 754		}
 755	}
 756#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 757
 758	/* mmap_sem is held in m_start */
 759	walk_page_vma(vma, &smaps_walk);
 
 
 
 
 
 
 760
 761	show_map_vma(m, vma, is_pid);
 762
 763	seq_printf(m,
 764		   "Size:           %8lu kB\n"
 765		   "Rss:            %8lu kB\n"
 766		   "Pss:            %8lu kB\n"
 767		   "Shared_Clean:   %8lu kB\n"
 768		   "Shared_Dirty:   %8lu kB\n"
 769		   "Private_Clean:  %8lu kB\n"
 770		   "Private_Dirty:  %8lu kB\n"
 771		   "Referenced:     %8lu kB\n"
 772		   "Anonymous:      %8lu kB\n"
 773		   "AnonHugePages:  %8lu kB\n"
 774		   "ShmemPmdMapped: %8lu kB\n"
 775		   "Shared_Hugetlb: %8lu kB\n"
 776		   "Private_Hugetlb: %7lu kB\n"
 777		   "Swap:           %8lu kB\n"
 778		   "SwapPss:        %8lu kB\n"
 779		   "KernelPageSize: %8lu kB\n"
 780		   "MMUPageSize:    %8lu kB\n"
 781		   "Locked:         %8lu kB\n",
 782		   (vma->vm_end - vma->vm_start) >> 10,
 783		   mss.resident >> 10,
 784		   (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
 785		   mss.shared_clean  >> 10,
 786		   mss.shared_dirty  >> 10,
 787		   mss.private_clean >> 10,
 788		   mss.private_dirty >> 10,
 789		   mss.referenced >> 10,
 790		   mss.anonymous >> 10,
 791		   mss.anonymous_thp >> 10,
 792		   mss.shmem_thp >> 10,
 793		   mss.shared_hugetlb >> 10,
 794		   mss.private_hugetlb >> 10,
 795		   mss.swap >> 10,
 796		   (unsigned long)(mss.swap_pss >> (10 + PSS_SHIFT)),
 797		   vma_kernel_pagesize(vma) >> 10,
 798		   vma_mmu_pagesize(vma) >> 10,
 799		   (vma->vm_flags & VM_LOCKED) ?
 800			(unsigned long)(mss.pss >> (10 + PSS_SHIFT)) : 0);
 801
 802	arch_show_smap(m, vma);
 
 
 
 
 
 
 803	show_smap_vma_flags(m, vma);
 804	m_cache_vma(m, vma);
 805	return 0;
 806}
 807
 808static int show_pid_smap(struct seq_file *m, void *v)
 809{
 810	return show_smap(m, v, 1);
 811}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 812
 813static int show_tid_smap(struct seq_file *m, void *v)
 814{
 815	return show_smap(m, v, 0);
 816}
 
 817
 818static const struct seq_operations proc_pid_smaps_op = {
 819	.start	= m_start,
 820	.next	= m_next,
 821	.stop	= m_stop,
 822	.show	= show_pid_smap
 823};
 824
 825static const struct seq_operations proc_tid_smaps_op = {
 826	.start	= m_start,
 827	.next	= m_next,
 828	.stop	= m_stop,
 829	.show	= show_tid_smap
 830};
 831
 832static int pid_smaps_open(struct inode *inode, struct file *file)
 833{
 834	return do_maps_open(inode, file, &proc_pid_smaps_op);
 835}
 836
 837static int tid_smaps_open(struct inode *inode, struct file *file)
 838{
 839	return do_maps_open(inode, file, &proc_tid_smaps_op);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 840}
 841
 842const struct file_operations proc_pid_smaps_operations = {
 843	.open		= pid_smaps_open,
 844	.read		= seq_read,
 845	.llseek		= seq_lseek,
 846	.release	= proc_map_release,
 847};
 848
 849const struct file_operations proc_tid_smaps_operations = {
 850	.open		= tid_smaps_open,
 851	.read		= seq_read,
 852	.llseek		= seq_lseek,
 853	.release	= proc_map_release,
 854};
 855
 856enum clear_refs_types {
 857	CLEAR_REFS_ALL = 1,
 858	CLEAR_REFS_ANON,
 859	CLEAR_REFS_MAPPED,
 860	CLEAR_REFS_SOFT_DIRTY,
 861	CLEAR_REFS_MM_HIWATER_RSS,
 862	CLEAR_REFS_LAST,
 863};
 864
 865struct clear_refs_private {
 866	enum clear_refs_types type;
 867};
 868
 869#ifdef CONFIG_MEM_SOFT_DIRTY
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 870static inline void clear_soft_dirty(struct vm_area_struct *vma,
 871		unsigned long addr, pte_t *pte)
 872{
 873	/*
 874	 * The soft-dirty tracker uses #PF-s to catch writes
 875	 * to pages, so write-protect the pte as well. See the
 876	 * Documentation/vm/soft-dirty.txt for full description
 877	 * of how soft-dirty works.
 878	 */
 879	pte_t ptent = *pte;
 880
 881	if (pte_present(ptent)) {
 882		ptent = ptep_modify_prot_start(vma->vm_mm, addr, pte);
 883		ptent = pte_wrprotect(ptent);
 
 
 
 
 884		ptent = pte_clear_soft_dirty(ptent);
 885		ptep_modify_prot_commit(vma->vm_mm, addr, pte, ptent);
 886	} else if (is_swap_pte(ptent)) {
 887		ptent = pte_swp_clear_soft_dirty(ptent);
 888		set_pte_at(vma->vm_mm, addr, pte, ptent);
 889	}
 890}
 891#else
 892static inline void clear_soft_dirty(struct vm_area_struct *vma,
 893		unsigned long addr, pte_t *pte)
 894{
 895}
 896#endif
 897
 898#if defined(CONFIG_MEM_SOFT_DIRTY) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
 899static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
 900		unsigned long addr, pmd_t *pmdp)
 901{
 902	pmd_t pmd = pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp);
 903
 904	pmd = pmd_wrprotect(pmd);
 905	pmd = pmd_clear_soft_dirty(pmd);
 906
 907	set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
 
 
 
 
 
 
 
 
 
 
 
 
 908}
 909#else
 910static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
 911		unsigned long addr, pmd_t *pmdp)
 912{
 913}
 914#endif
 915
 916static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
 917				unsigned long end, struct mm_walk *walk)
 918{
 919	struct clear_refs_private *cp = walk->private;
 920	struct vm_area_struct *vma = walk->vma;
 921	pte_t *pte, ptent;
 922	spinlock_t *ptl;
 923	struct page *page;
 924
 925	ptl = pmd_trans_huge_lock(pmd, vma);
 926	if (ptl) {
 927		if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
 928			clear_soft_dirty_pmd(vma, addr, pmd);
 929			goto out;
 930		}
 931
 
 
 
 932		page = pmd_page(*pmd);
 933
 934		/* Clear accessed and referenced bits. */
 935		pmdp_test_and_clear_young(vma, addr, pmd);
 936		test_and_clear_page_young(page);
 937		ClearPageReferenced(page);
 938out:
 939		spin_unlock(ptl);
 940		return 0;
 941	}
 942
 943	if (pmd_trans_unstable(pmd))
 944		return 0;
 945
 946	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
 947	for (; addr != end; pte++, addr += PAGE_SIZE) {
 948		ptent = *pte;
 949
 950		if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
 951			clear_soft_dirty(vma, addr, pte);
 952			continue;
 953		}
 954
 955		if (!pte_present(ptent))
 956			continue;
 957
 958		page = vm_normal_page(vma, addr, ptent);
 959		if (!page)
 960			continue;
 961
 962		/* Clear accessed and referenced bits. */
 963		ptep_test_and_clear_young(vma, addr, pte);
 964		test_and_clear_page_young(page);
 965		ClearPageReferenced(page);
 966	}
 967	pte_unmap_unlock(pte - 1, ptl);
 968	cond_resched();
 969	return 0;
 970}
 971
 972static int clear_refs_test_walk(unsigned long start, unsigned long end,
 973				struct mm_walk *walk)
 974{
 975	struct clear_refs_private *cp = walk->private;
 976	struct vm_area_struct *vma = walk->vma;
 977
 978	if (vma->vm_flags & VM_PFNMAP)
 979		return 1;
 980
 981	/*
 982	 * Writing 1 to /proc/pid/clear_refs affects all pages.
 983	 * Writing 2 to /proc/pid/clear_refs only affects anonymous pages.
 984	 * Writing 3 to /proc/pid/clear_refs only affects file mapped pages.
 985	 * Writing 4 to /proc/pid/clear_refs affects all pages.
 986	 */
 987	if (cp->type == CLEAR_REFS_ANON && vma->vm_file)
 988		return 1;
 989	if (cp->type == CLEAR_REFS_MAPPED && !vma->vm_file)
 990		return 1;
 991	return 0;
 992}
 993
 
 
 
 
 
 994static ssize_t clear_refs_write(struct file *file, const char __user *buf,
 995				size_t count, loff_t *ppos)
 996{
 997	struct task_struct *task;
 998	char buffer[PROC_NUMBUF];
 999	struct mm_struct *mm;
1000	struct vm_area_struct *vma;
1001	enum clear_refs_types type;
1002	int itype;
1003	int rv;
1004
1005	memset(buffer, 0, sizeof(buffer));
1006	if (count > sizeof(buffer) - 1)
1007		count = sizeof(buffer) - 1;
1008	if (copy_from_user(buffer, buf, count))
1009		return -EFAULT;
1010	rv = kstrtoint(strstrip(buffer), 10, &itype);
1011	if (rv < 0)
1012		return rv;
1013	type = (enum clear_refs_types)itype;
1014	if (type < CLEAR_REFS_ALL || type >= CLEAR_REFS_LAST)
1015		return -EINVAL;
1016
1017	task = get_proc_task(file_inode(file));
1018	if (!task)
1019		return -ESRCH;
1020	mm = get_task_mm(task);
1021	if (mm) {
 
 
1022		struct clear_refs_private cp = {
1023			.type = type,
1024		};
1025		struct mm_walk clear_refs_walk = {
1026			.pmd_entry = clear_refs_pte_range,
1027			.test_walk = clear_refs_test_walk,
1028			.mm = mm,
1029			.private = &cp,
1030		};
1031
 
 
 
 
1032		if (type == CLEAR_REFS_MM_HIWATER_RSS) {
1033			if (down_write_killable(&mm->mmap_sem)) {
1034				count = -EINTR;
1035				goto out_mm;
1036			}
1037
1038			/*
1039			 * Writing 5 to /proc/pid/clear_refs resets the peak
1040			 * resident set size to this mm's current rss value.
1041			 */
1042			reset_mm_hiwater_rss(mm);
1043			up_write(&mm->mmap_sem);
1044			goto out_mm;
1045		}
1046
1047		down_read(&mm->mmap_sem);
1048		if (type == CLEAR_REFS_SOFT_DIRTY) {
1049			for (vma = mm->mmap; vma; vma = vma->vm_next) {
1050				if (!(vma->vm_flags & VM_SOFTDIRTY))
1051					continue;
1052				up_read(&mm->mmap_sem);
1053				if (down_write_killable(&mm->mmap_sem)) {
1054					count = -EINTR;
1055					goto out_mm;
1056				}
1057				for (vma = mm->mmap; vma; vma = vma->vm_next) {
1058					vma->vm_flags &= ~VM_SOFTDIRTY;
1059					vma_set_page_prot(vma);
1060				}
1061				downgrade_write(&mm->mmap_sem);
1062				break;
1063			}
1064			mmu_notifier_invalidate_range_start(mm, 0, -1);
 
 
 
 
1065		}
1066		walk_page_range(0, mm->highest_vm_end, &clear_refs_walk);
1067		if (type == CLEAR_REFS_SOFT_DIRTY)
1068			mmu_notifier_invalidate_range_end(mm, 0, -1);
1069		flush_tlb_mm(mm);
1070		up_read(&mm->mmap_sem);
 
 
 
1071out_mm:
1072		mmput(mm);
1073	}
1074	put_task_struct(task);
1075
1076	return count;
1077}
1078
1079const struct file_operations proc_clear_refs_operations = {
1080	.write		= clear_refs_write,
1081	.llseek		= noop_llseek,
1082};
1083
1084typedef struct {
1085	u64 pme;
1086} pagemap_entry_t;
1087
1088struct pagemapread {
1089	int pos, len;		/* units: PM_ENTRY_BYTES, not bytes */
1090	pagemap_entry_t *buffer;
1091	bool show_pfn;
1092};
1093
1094#define PAGEMAP_WALK_SIZE	(PMD_SIZE)
1095#define PAGEMAP_WALK_MASK	(PMD_MASK)
1096
1097#define PM_ENTRY_BYTES		sizeof(pagemap_entry_t)
1098#define PM_PFRAME_BITS		55
1099#define PM_PFRAME_MASK		GENMASK_ULL(PM_PFRAME_BITS - 1, 0)
1100#define PM_SOFT_DIRTY		BIT_ULL(55)
1101#define PM_MMAP_EXCLUSIVE	BIT_ULL(56)
 
1102#define PM_FILE			BIT_ULL(61)
1103#define PM_SWAP			BIT_ULL(62)
1104#define PM_PRESENT		BIT_ULL(63)
1105
1106#define PM_END_OF_BUFFER    1
1107
1108static inline pagemap_entry_t make_pme(u64 frame, u64 flags)
1109{
1110	return (pagemap_entry_t) { .pme = (frame & PM_PFRAME_MASK) | flags };
1111}
1112
1113static int add_to_pagemap(unsigned long addr, pagemap_entry_t *pme,
1114			  struct pagemapread *pm)
1115{
1116	pm->buffer[pm->pos++] = *pme;
1117	if (pm->pos >= pm->len)
1118		return PM_END_OF_BUFFER;
1119	return 0;
1120}
1121
1122static int pagemap_pte_hole(unsigned long start, unsigned long end,
1123				struct mm_walk *walk)
1124{
1125	struct pagemapread *pm = walk->private;
1126	unsigned long addr = start;
1127	int err = 0;
1128
1129	while (addr < end) {
1130		struct vm_area_struct *vma = find_vma(walk->mm, addr);
1131		pagemap_entry_t pme = make_pme(0, 0);
1132		/* End of address space hole, which we mark as non-present. */
1133		unsigned long hole_end;
1134
1135		if (vma)
1136			hole_end = min(end, vma->vm_start);
1137		else
1138			hole_end = end;
1139
1140		for (; addr < hole_end; addr += PAGE_SIZE) {
1141			err = add_to_pagemap(addr, &pme, pm);
1142			if (err)
1143				goto out;
1144		}
1145
1146		if (!vma)
1147			break;
1148
1149		/* Addresses in the VMA. */
1150		if (vma->vm_flags & VM_SOFTDIRTY)
1151			pme = make_pme(0, PM_SOFT_DIRTY);
1152		for (; addr < min(end, vma->vm_end); addr += PAGE_SIZE) {
1153			err = add_to_pagemap(addr, &pme, pm);
1154			if (err)
1155				goto out;
1156		}
1157	}
1158out:
1159	return err;
1160}
1161
1162static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm,
1163		struct vm_area_struct *vma, unsigned long addr, pte_t pte)
1164{
1165	u64 frame = 0, flags = 0;
1166	struct page *page = NULL;
 
1167
1168	if (pte_present(pte)) {
1169		if (pm->show_pfn)
1170			frame = pte_pfn(pte);
1171		flags |= PM_PRESENT;
1172		page = vm_normal_page(vma, addr, pte);
1173		if (pte_soft_dirty(pte))
1174			flags |= PM_SOFT_DIRTY;
 
 
1175	} else if (is_swap_pte(pte)) {
1176		swp_entry_t entry;
1177		if (pte_swp_soft_dirty(pte))
1178			flags |= PM_SOFT_DIRTY;
 
 
1179		entry = pte_to_swp_entry(pte);
1180		frame = swp_type(entry) |
1181			(swp_offset(entry) << MAX_SWAPFILES_SHIFT);
 
 
 
 
 
 
 
 
 
 
 
1182		flags |= PM_SWAP;
1183		if (is_migration_entry(entry))
1184			page = migration_entry_to_page(entry);
 
 
 
1185	}
1186
1187	if (page && !PageAnon(page))
1188		flags |= PM_FILE;
1189	if (page && page_mapcount(page) == 1)
1190		flags |= PM_MMAP_EXCLUSIVE;
1191	if (vma->vm_flags & VM_SOFTDIRTY)
1192		flags |= PM_SOFT_DIRTY;
1193
1194	return make_pme(frame, flags);
1195}
1196
1197static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
1198			     struct mm_walk *walk)
1199{
1200	struct vm_area_struct *vma = walk->vma;
1201	struct pagemapread *pm = walk->private;
1202	spinlock_t *ptl;
1203	pte_t *pte, *orig_pte;
1204	int err = 0;
 
 
1205
1206#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1207	ptl = pmd_trans_huge_lock(pmdp, vma);
1208	if (ptl) {
1209		u64 flags = 0, frame = 0;
1210		pmd_t pmd = *pmdp;
 
1211
1212		if ((vma->vm_flags & VM_SOFTDIRTY) || pmd_soft_dirty(pmd))
1213			flags |= PM_SOFT_DIRTY;
1214
1215		/*
1216		 * Currently pmd for thp is always present because thp
1217		 * can not be swapped-out, migrated, or HWPOISONed
1218		 * (split in such cases instead.)
1219		 * This if-check is just to prepare for future implementation.
1220		 */
1221		if (pmd_present(pmd)) {
1222			struct page *page = pmd_page(pmd);
1223
1224			if (page_mapcount(page) == 1)
1225				flags |= PM_MMAP_EXCLUSIVE;
1226
1227			flags |= PM_PRESENT;
 
 
 
 
1228			if (pm->show_pfn)
1229				frame = pmd_pfn(pmd) +
1230					((addr & ~PMD_MASK) >> PAGE_SHIFT);
1231		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1232
1233		for (; addr != end; addr += PAGE_SIZE) {
1234			pagemap_entry_t pme = make_pme(frame, flags);
1235
1236			err = add_to_pagemap(addr, &pme, pm);
1237			if (err)
1238				break;
1239			if (pm->show_pfn && (flags & PM_PRESENT))
1240				frame++;
 
 
 
 
1241		}
1242		spin_unlock(ptl);
1243		return err;
1244	}
1245
1246	if (pmd_trans_unstable(pmdp))
1247		return 0;
1248#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1249
1250	/*
1251	 * We can assume that @vma always points to a valid one and @end never
1252	 * goes beyond vma->vm_end.
1253	 */
1254	orig_pte = pte = pte_offset_map_lock(walk->mm, pmdp, addr, &ptl);
1255	for (; addr < end; pte++, addr += PAGE_SIZE) {
1256		pagemap_entry_t pme;
1257
1258		pme = pte_to_pagemap_entry(pm, vma, addr, *pte);
1259		err = add_to_pagemap(addr, &pme, pm);
1260		if (err)
1261			break;
1262	}
1263	pte_unmap_unlock(orig_pte, ptl);
1264
1265	cond_resched();
1266
1267	return err;
1268}
1269
1270#ifdef CONFIG_HUGETLB_PAGE
1271/* This function walks within one hugetlb entry in the single call */
1272static int pagemap_hugetlb_range(pte_t *ptep, unsigned long hmask,
1273				 unsigned long addr, unsigned long end,
1274				 struct mm_walk *walk)
1275{
1276	struct pagemapread *pm = walk->private;
1277	struct vm_area_struct *vma = walk->vma;
1278	u64 flags = 0, frame = 0;
1279	int err = 0;
1280	pte_t pte;
1281
1282	if (vma->vm_flags & VM_SOFTDIRTY)
1283		flags |= PM_SOFT_DIRTY;
1284
1285	pte = huge_ptep_get(ptep);
1286	if (pte_present(pte)) {
1287		struct page *page = pte_page(pte);
1288
1289		if (!PageAnon(page))
1290			flags |= PM_FILE;
1291
1292		if (page_mapcount(page) == 1)
1293			flags |= PM_MMAP_EXCLUSIVE;
1294
 
 
 
1295		flags |= PM_PRESENT;
1296		if (pm->show_pfn)
1297			frame = pte_pfn(pte) +
1298				((addr & ~hmask) >> PAGE_SHIFT);
 
 
1299	}
1300
1301	for (; addr != end; addr += PAGE_SIZE) {
1302		pagemap_entry_t pme = make_pme(frame, flags);
1303
1304		err = add_to_pagemap(addr, &pme, pm);
1305		if (err)
1306			return err;
1307		if (pm->show_pfn && (flags & PM_PRESENT))
1308			frame++;
1309	}
1310
1311	cond_resched();
1312
1313	return err;
1314}
 
 
1315#endif /* HUGETLB_PAGE */
1316
 
 
 
 
 
 
1317/*
1318 * /proc/pid/pagemap - an array mapping virtual pages to pfns
1319 *
1320 * For each page in the address space, this file contains one 64-bit entry
1321 * consisting of the following:
1322 *
1323 * Bits 0-54  page frame number (PFN) if present
1324 * Bits 0-4   swap type if swapped
1325 * Bits 5-54  swap offset if swapped
1326 * Bit  55    pte is soft-dirty (see Documentation/vm/soft-dirty.txt)
1327 * Bit  56    page exclusively mapped
1328 * Bits 57-60 zero
 
1329 * Bit  61    page is file-page or shared-anon
1330 * Bit  62    page swapped
1331 * Bit  63    page present
1332 *
1333 * If the page is not present but in swap, then the PFN contains an
1334 * encoding of the swap file number and the page's offset into the
1335 * swap. Unmapped pages return a null PFN. This allows determining
1336 * precisely which pages are mapped (or in swap) and comparing mapped
1337 * pages between processes.
1338 *
1339 * Efficient users of this interface will use /proc/pid/maps to
1340 * determine which areas of memory are actually mapped and llseek to
1341 * skip over unmapped regions.
1342 */
1343static ssize_t pagemap_read(struct file *file, char __user *buf,
1344			    size_t count, loff_t *ppos)
1345{
1346	struct mm_struct *mm = file->private_data;
1347	struct pagemapread pm;
1348	struct mm_walk pagemap_walk = {};
1349	unsigned long src;
1350	unsigned long svpfn;
1351	unsigned long start_vaddr;
1352	unsigned long end_vaddr;
1353	int ret = 0, copied = 0;
1354
1355	if (!mm || !atomic_inc_not_zero(&mm->mm_users))
1356		goto out;
1357
1358	ret = -EINVAL;
1359	/* file position must be aligned */
1360	if ((*ppos % PM_ENTRY_BYTES) || (count % PM_ENTRY_BYTES))
1361		goto out_mm;
1362
1363	ret = 0;
1364	if (!count)
1365		goto out_mm;
1366
1367	/* do not disclose physical addresses: attack vector */
1368	pm.show_pfn = file_ns_capable(file, &init_user_ns, CAP_SYS_ADMIN);
1369
1370	pm.len = (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
1371	pm.buffer = kmalloc(pm.len * PM_ENTRY_BYTES, GFP_TEMPORARY);
1372	ret = -ENOMEM;
1373	if (!pm.buffer)
1374		goto out_mm;
1375
1376	pagemap_walk.pmd_entry = pagemap_pmd_range;
1377	pagemap_walk.pte_hole = pagemap_pte_hole;
1378#ifdef CONFIG_HUGETLB_PAGE
1379	pagemap_walk.hugetlb_entry = pagemap_hugetlb_range;
1380#endif
1381	pagemap_walk.mm = mm;
1382	pagemap_walk.private = &pm;
1383
1384	src = *ppos;
1385	svpfn = src / PM_ENTRY_BYTES;
1386	start_vaddr = svpfn << PAGE_SHIFT;
1387	end_vaddr = mm->task_size;
1388
1389	/* watch out for wraparound */
1390	if (svpfn > mm->task_size >> PAGE_SHIFT)
 
 
 
 
 
1391		start_vaddr = end_vaddr;
1392
1393	/*
1394	 * The odds are that this will stop walking way
1395	 * before end_vaddr, because the length of the
1396	 * user buffer is tracked in "pm", and the walk
1397	 * will stop when we hit the end of the buffer.
1398	 */
1399	ret = 0;
1400	while (count && (start_vaddr < end_vaddr)) {
1401		int len;
1402		unsigned long end;
1403
1404		pm.pos = 0;
1405		end = (start_vaddr + PAGEMAP_WALK_SIZE) & PAGEMAP_WALK_MASK;
1406		/* overflow ? */
1407		if (end < start_vaddr || end > end_vaddr)
1408			end = end_vaddr;
1409		down_read(&mm->mmap_sem);
1410		ret = walk_page_range(start_vaddr, end, &pagemap_walk);
1411		up_read(&mm->mmap_sem);
 
 
1412		start_vaddr = end;
1413
1414		len = min(count, PM_ENTRY_BYTES * pm.pos);
1415		if (copy_to_user(buf, pm.buffer, len)) {
1416			ret = -EFAULT;
1417			goto out_free;
1418		}
1419		copied += len;
1420		buf += len;
1421		count -= len;
1422	}
1423	*ppos += copied;
1424	if (!ret || ret == PM_END_OF_BUFFER)
1425		ret = copied;
1426
1427out_free:
1428	kfree(pm.buffer);
1429out_mm:
1430	mmput(mm);
1431out:
1432	return ret;
1433}
1434
1435static int pagemap_open(struct inode *inode, struct file *file)
1436{
1437	struct mm_struct *mm;
1438
1439	mm = proc_mem_open(inode, PTRACE_MODE_READ);
1440	if (IS_ERR(mm))
1441		return PTR_ERR(mm);
1442	file->private_data = mm;
1443	return 0;
1444}
1445
1446static int pagemap_release(struct inode *inode, struct file *file)
1447{
1448	struct mm_struct *mm = file->private_data;
1449
1450	if (mm)
1451		mmdrop(mm);
1452	return 0;
1453}
1454
1455const struct file_operations proc_pagemap_operations = {
1456	.llseek		= mem_lseek, /* borrow this */
1457	.read		= pagemap_read,
1458	.open		= pagemap_open,
1459	.release	= pagemap_release,
1460};
1461#endif /* CONFIG_PROC_PAGE_MONITOR */
1462
1463#ifdef CONFIG_NUMA
1464
1465struct numa_maps {
1466	unsigned long pages;
1467	unsigned long anon;
1468	unsigned long active;
1469	unsigned long writeback;
1470	unsigned long mapcount_max;
1471	unsigned long dirty;
1472	unsigned long swapcache;
1473	unsigned long node[MAX_NUMNODES];
1474};
1475
1476struct numa_maps_private {
1477	struct proc_maps_private proc_maps;
1478	struct numa_maps md;
1479};
1480
1481static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty,
1482			unsigned long nr_pages)
1483{
1484	int count = page_mapcount(page);
1485
1486	md->pages += nr_pages;
1487	if (pte_dirty || PageDirty(page))
1488		md->dirty += nr_pages;
1489
1490	if (PageSwapCache(page))
1491		md->swapcache += nr_pages;
1492
1493	if (PageActive(page) || PageUnevictable(page))
1494		md->active += nr_pages;
1495
1496	if (PageWriteback(page))
1497		md->writeback += nr_pages;
1498
1499	if (PageAnon(page))
1500		md->anon += nr_pages;
1501
1502	if (count > md->mapcount_max)
1503		md->mapcount_max = count;
1504
1505	md->node[page_to_nid(page)] += nr_pages;
1506}
1507
1508static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma,
1509		unsigned long addr)
1510{
1511	struct page *page;
1512	int nid;
1513
1514	if (!pte_present(pte))
1515		return NULL;
1516
1517	page = vm_normal_page(vma, addr, pte);
1518	if (!page)
1519		return NULL;
1520
1521	if (PageReserved(page))
1522		return NULL;
1523
1524	nid = page_to_nid(page);
1525	if (!node_isset(nid, node_states[N_MEMORY]))
1526		return NULL;
1527
1528	return page;
1529}
1530
1531#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1532static struct page *can_gather_numa_stats_pmd(pmd_t pmd,
1533					      struct vm_area_struct *vma,
1534					      unsigned long addr)
1535{
1536	struct page *page;
1537	int nid;
1538
1539	if (!pmd_present(pmd))
1540		return NULL;
1541
1542	page = vm_normal_page_pmd(vma, addr, pmd);
1543	if (!page)
1544		return NULL;
1545
1546	if (PageReserved(page))
1547		return NULL;
1548
1549	nid = page_to_nid(page);
1550	if (!node_isset(nid, node_states[N_MEMORY]))
1551		return NULL;
1552
1553	return page;
1554}
1555#endif
1556
1557static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
1558		unsigned long end, struct mm_walk *walk)
1559{
1560	struct numa_maps *md = walk->private;
1561	struct vm_area_struct *vma = walk->vma;
1562	spinlock_t *ptl;
1563	pte_t *orig_pte;
1564	pte_t *pte;
1565
1566#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1567	ptl = pmd_trans_huge_lock(pmd, vma);
1568	if (ptl) {
1569		struct page *page;
1570
1571		page = can_gather_numa_stats_pmd(*pmd, vma, addr);
1572		if (page)
1573			gather_stats(page, md, pmd_dirty(*pmd),
1574				     HPAGE_PMD_SIZE/PAGE_SIZE);
1575		spin_unlock(ptl);
1576		return 0;
1577	}
1578
1579	if (pmd_trans_unstable(pmd))
1580		return 0;
1581#endif
1582	orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
1583	do {
1584		struct page *page = can_gather_numa_stats(*pte, vma, addr);
1585		if (!page)
1586			continue;
1587		gather_stats(page, md, pte_dirty(*pte), 1);
1588
1589	} while (pte++, addr += PAGE_SIZE, addr != end);
1590	pte_unmap_unlock(orig_pte, ptl);
1591	cond_resched();
1592	return 0;
1593}
1594#ifdef CONFIG_HUGETLB_PAGE
1595static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
1596		unsigned long addr, unsigned long end, struct mm_walk *walk)
1597{
1598	pte_t huge_pte = huge_ptep_get(pte);
1599	struct numa_maps *md;
1600	struct page *page;
1601
1602	if (!pte_present(huge_pte))
1603		return 0;
1604
1605	page = pte_page(huge_pte);
1606	if (!page)
1607		return 0;
1608
1609	md = walk->private;
1610	gather_stats(page, md, pte_dirty(huge_pte), 1);
1611	return 0;
1612}
1613
1614#else
1615static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
1616		unsigned long addr, unsigned long end, struct mm_walk *walk)
1617{
1618	return 0;
1619}
1620#endif
1621
 
 
 
 
 
1622/*
1623 * Display pages allocated per node and memory policy via /proc.
1624 */
1625static int show_numa_map(struct seq_file *m, void *v, int is_pid)
1626{
1627	struct numa_maps_private *numa_priv = m->private;
1628	struct proc_maps_private *proc_priv = &numa_priv->proc_maps;
1629	struct vm_area_struct *vma = v;
1630	struct numa_maps *md = &numa_priv->md;
1631	struct file *file = vma->vm_file;
1632	struct mm_struct *mm = vma->vm_mm;
1633	struct mm_walk walk = {
1634		.hugetlb_entry = gather_hugetlb_stats,
1635		.pmd_entry = gather_pte_stats,
1636		.private = md,
1637		.mm = mm,
1638	};
1639	struct mempolicy *pol;
1640	char buffer[64];
1641	int nid;
1642
1643	if (!mm)
1644		return 0;
1645
1646	/* Ensure we start with an empty set of numa_maps statistics. */
1647	memset(md, 0, sizeof(*md));
1648
1649	pol = __get_vma_policy(vma, vma->vm_start);
1650	if (pol) {
1651		mpol_to_str(buffer, sizeof(buffer), pol);
1652		mpol_cond_put(pol);
1653	} else {
1654		mpol_to_str(buffer, sizeof(buffer), proc_priv->task_mempolicy);
1655	}
1656
1657	seq_printf(m, "%08lx %s", vma->vm_start, buffer);
1658
1659	if (file) {
1660		seq_puts(m, " file=");
1661		seq_file_path(m, file, "\n\t= ");
1662	} else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
1663		seq_puts(m, " heap");
1664	} else if (is_stack(proc_priv, vma)) {
1665		seq_puts(m, " stack");
1666	}
1667
1668	if (is_vm_hugetlb_page(vma))
1669		seq_puts(m, " huge");
1670
1671	/* mmap_sem is held by m_start */
1672	walk_page_vma(vma, &walk);
1673
1674	if (!md->pages)
1675		goto out;
1676
1677	if (md->anon)
1678		seq_printf(m, " anon=%lu", md->anon);
1679
1680	if (md->dirty)
1681		seq_printf(m, " dirty=%lu", md->dirty);
1682
1683	if (md->pages != md->anon && md->pages != md->dirty)
1684		seq_printf(m, " mapped=%lu", md->pages);
1685
1686	if (md->mapcount_max > 1)
1687		seq_printf(m, " mapmax=%lu", md->mapcount_max);
1688
1689	if (md->swapcache)
1690		seq_printf(m, " swapcache=%lu", md->swapcache);
1691
1692	if (md->active < md->pages && !is_vm_hugetlb_page(vma))
1693		seq_printf(m, " active=%lu", md->active);
1694
1695	if (md->writeback)
1696		seq_printf(m, " writeback=%lu", md->writeback);
1697
1698	for_each_node_state(nid, N_MEMORY)
1699		if (md->node[nid])
1700			seq_printf(m, " N%d=%lu", nid, md->node[nid]);
1701
1702	seq_printf(m, " kernelpagesize_kB=%lu", vma_kernel_pagesize(vma) >> 10);
1703out:
1704	seq_putc(m, '\n');
1705	m_cache_vma(m, vma);
1706	return 0;
1707}
1708
1709static int show_pid_numa_map(struct seq_file *m, void *v)
1710{
1711	return show_numa_map(m, v, 1);
1712}
1713
1714static int show_tid_numa_map(struct seq_file *m, void *v)
1715{
1716	return show_numa_map(m, v, 0);
1717}
1718
1719static const struct seq_operations proc_pid_numa_maps_op = {
1720	.start  = m_start,
1721	.next   = m_next,
1722	.stop   = m_stop,
1723	.show   = show_pid_numa_map,
1724};
1725
1726static const struct seq_operations proc_tid_numa_maps_op = {
1727	.start  = m_start,
1728	.next   = m_next,
1729	.stop   = m_stop,
1730	.show   = show_tid_numa_map,
1731};
1732
1733static int numa_maps_open(struct inode *inode, struct file *file,
1734			  const struct seq_operations *ops)
1735{
1736	return proc_maps_open(inode, file, ops,
1737				sizeof(struct numa_maps_private));
1738}
1739
1740static int pid_numa_maps_open(struct inode *inode, struct file *file)
1741{
1742	return numa_maps_open(inode, file, &proc_pid_numa_maps_op);
1743}
1744
1745static int tid_numa_maps_open(struct inode *inode, struct file *file)
1746{
1747	return numa_maps_open(inode, file, &proc_tid_numa_maps_op);
1748}
1749
1750const struct file_operations proc_pid_numa_maps_operations = {
1751	.open		= pid_numa_maps_open,
1752	.read		= seq_read,
1753	.llseek		= seq_lseek,
1754	.release	= proc_map_release,
1755};
1756
1757const struct file_operations proc_tid_numa_maps_operations = {
1758	.open		= tid_numa_maps_open,
1759	.read		= seq_read,
1760	.llseek		= seq_lseek,
1761	.release	= proc_map_release,
1762};
1763#endif /* CONFIG_NUMA */