Linux Audio

Check our new training course

Loading...
v4.6
   1#include <linux/mm.h>
   2#include <linux/vmacache.h>
   3#include <linux/hugetlb.h>
   4#include <linux/huge_mm.h>
   5#include <linux/mount.h>
   6#include <linux/seq_file.h>
   7#include <linux/highmem.h>
   8#include <linux/ptrace.h>
   9#include <linux/slab.h>
  10#include <linux/pagemap.h>
  11#include <linux/mempolicy.h>
  12#include <linux/rmap.h>
  13#include <linux/swap.h>
  14#include <linux/swapops.h>
  15#include <linux/mmu_notifier.h>
  16#include <linux/page_idle.h>
  17#include <linux/shmem_fs.h>
  18
  19#include <asm/elf.h>
  20#include <asm/uaccess.h>
  21#include <asm/tlbflush.h>
  22#include "internal.h"
  23
  24void task_mem(struct seq_file *m, struct mm_struct *mm)
  25{
  26	unsigned long text, lib, swap, ptes, pmds, anon, file, shmem;
  27	unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;
  28
  29	anon = get_mm_counter(mm, MM_ANONPAGES);
  30	file = get_mm_counter(mm, MM_FILEPAGES);
  31	shmem = get_mm_counter(mm, MM_SHMEMPAGES);
  32
  33	/*
  34	 * Note: to minimize their overhead, mm maintains hiwater_vm and
  35	 * hiwater_rss only when about to *lower* total_vm or rss.  Any
  36	 * collector of these hiwater stats must therefore get total_vm
  37	 * and rss too, which will usually be the higher.  Barriers? not
  38	 * worth the effort, such snapshots can always be inconsistent.
  39	 */
  40	hiwater_vm = total_vm = mm->total_vm;
  41	if (hiwater_vm < mm->hiwater_vm)
  42		hiwater_vm = mm->hiwater_vm;
  43	hiwater_rss = total_rss = anon + file + shmem;
  44	if (hiwater_rss < mm->hiwater_rss)
  45		hiwater_rss = mm->hiwater_rss;
  46
  47	text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10;
  48	lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text;
  49	swap = get_mm_counter(mm, MM_SWAPENTS);
  50	ptes = PTRS_PER_PTE * sizeof(pte_t) * atomic_long_read(&mm->nr_ptes);
  51	pmds = PTRS_PER_PMD * sizeof(pmd_t) * mm_nr_pmds(mm);
  52	seq_printf(m,
  53		"VmPeak:\t%8lu kB\n"
  54		"VmSize:\t%8lu kB\n"
  55		"VmLck:\t%8lu kB\n"
  56		"VmPin:\t%8lu kB\n"
  57		"VmHWM:\t%8lu kB\n"
  58		"VmRSS:\t%8lu kB\n"
  59		"RssAnon:\t%8lu kB\n"
  60		"RssFile:\t%8lu kB\n"
  61		"RssShmem:\t%8lu kB\n"
  62		"VmData:\t%8lu kB\n"
  63		"VmStk:\t%8lu kB\n"
  64		"VmExe:\t%8lu kB\n"
  65		"VmLib:\t%8lu kB\n"
  66		"VmPTE:\t%8lu kB\n"
  67		"VmPMD:\t%8lu kB\n"
  68		"VmSwap:\t%8lu kB\n",
  69		hiwater_vm << (PAGE_SHIFT-10),
  70		total_vm << (PAGE_SHIFT-10),
  71		mm->locked_vm << (PAGE_SHIFT-10),
  72		mm->pinned_vm << (PAGE_SHIFT-10),
  73		hiwater_rss << (PAGE_SHIFT-10),
  74		total_rss << (PAGE_SHIFT-10),
  75		anon << (PAGE_SHIFT-10),
  76		file << (PAGE_SHIFT-10),
  77		shmem << (PAGE_SHIFT-10),
  78		mm->data_vm << (PAGE_SHIFT-10),
  79		mm->stack_vm << (PAGE_SHIFT-10), text, lib,
  80		ptes >> 10,
  81		pmds >> 10,
  82		swap << (PAGE_SHIFT-10));
  83	hugetlb_report_usage(m, mm);
  84}
  85
  86unsigned long task_vsize(struct mm_struct *mm)
  87{
  88	return PAGE_SIZE * mm->total_vm;
  89}
  90
  91unsigned long task_statm(struct mm_struct *mm,
  92			 unsigned long *shared, unsigned long *text,
  93			 unsigned long *data, unsigned long *resident)
  94{
  95	*shared = get_mm_counter(mm, MM_FILEPAGES) +
  96			get_mm_counter(mm, MM_SHMEMPAGES);
  97	*text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
  98								>> PAGE_SHIFT;
  99	*data = mm->data_vm + mm->stack_vm;
 100	*resident = *shared + get_mm_counter(mm, MM_ANONPAGES);
 101	return mm->total_vm;
 102}
 103
 104#ifdef CONFIG_NUMA
 105/*
 106 * Save get_task_policy() for show_numa_map().
 107 */
 108static void hold_task_mempolicy(struct proc_maps_private *priv)
 109{
 110	struct task_struct *task = priv->task;
 111
 112	task_lock(task);
 113	priv->task_mempolicy = get_task_policy(task);
 114	mpol_get(priv->task_mempolicy);
 115	task_unlock(task);
 116}
 117static void release_task_mempolicy(struct proc_maps_private *priv)
 118{
 119	mpol_put(priv->task_mempolicy);
 120}
 121#else
 122static void hold_task_mempolicy(struct proc_maps_private *priv)
 123{
 124}
 125static void release_task_mempolicy(struct proc_maps_private *priv)
 126{
 127}
 128#endif
 129
 130static void vma_stop(struct proc_maps_private *priv)
 131{
 132	struct mm_struct *mm = priv->mm;
 133
 134	release_task_mempolicy(priv);
 135	up_read(&mm->mmap_sem);
 136	mmput(mm);
 137}
 138
 139static struct vm_area_struct *
 140m_next_vma(struct proc_maps_private *priv, struct vm_area_struct *vma)
 141{
 142	if (vma == priv->tail_vma)
 143		return NULL;
 144	return vma->vm_next ?: priv->tail_vma;
 145}
 146
 147static void m_cache_vma(struct seq_file *m, struct vm_area_struct *vma)
 148{
 149	if (m->count < m->size)	/* vma is copied successfully */
 150		m->version = m_next_vma(m->private, vma) ? vma->vm_start : -1UL;
 151}
 152
 153static void *m_start(struct seq_file *m, loff_t *ppos)
 154{
 155	struct proc_maps_private *priv = m->private;
 156	unsigned long last_addr = m->version;
 157	struct mm_struct *mm;
 158	struct vm_area_struct *vma;
 159	unsigned int pos = *ppos;
 160
 161	/* See m_cache_vma(). Zero at the start or after lseek. */
 162	if (last_addr == -1UL)
 163		return NULL;
 164
 165	priv->task = get_proc_task(priv->inode);
 166	if (!priv->task)
 167		return ERR_PTR(-ESRCH);
 168
 169	mm = priv->mm;
 170	if (!mm || !atomic_inc_not_zero(&mm->mm_users))
 171		return NULL;
 172
 173	down_read(&mm->mmap_sem);
 174	hold_task_mempolicy(priv);
 175	priv->tail_vma = get_gate_vma(mm);
 176
 177	if (last_addr) {
 178		vma = find_vma(mm, last_addr);
 179		if (vma && (vma = m_next_vma(priv, vma)))
 
 
 180			return vma;
 181	}
 182
 183	m->version = 0;
 184	if (pos < mm->map_count) {
 185		for (vma = mm->mmap; pos; pos--) {
 186			m->version = vma->vm_start;
 187			vma = vma->vm_next;
 188		}
 189		return vma;
 190	}
 191
 192	/* we do not bother to update m->version in this case */
 193	if (pos == mm->map_count && priv->tail_vma)
 194		return priv->tail_vma;
 195
 196	vma_stop(priv);
 197	return NULL;
 198}
 199
 200static void *m_next(struct seq_file *m, void *v, loff_t *pos)
 201{
 202	struct proc_maps_private *priv = m->private;
 203	struct vm_area_struct *next;
 204
 205	(*pos)++;
 206	next = m_next_vma(priv, v);
 207	if (!next)
 208		vma_stop(priv);
 209	return next;
 210}
 211
 212static void m_stop(struct seq_file *m, void *v)
 213{
 214	struct proc_maps_private *priv = m->private;
 215
 216	if (!IS_ERR_OR_NULL(v))
 217		vma_stop(priv);
 218	if (priv->task) {
 219		put_task_struct(priv->task);
 220		priv->task = NULL;
 221	}
 222}
 223
 224static int proc_maps_open(struct inode *inode, struct file *file,
 225			const struct seq_operations *ops, int psize)
 226{
 227	struct proc_maps_private *priv = __seq_open_private(file, ops, psize);
 228
 229	if (!priv)
 230		return -ENOMEM;
 231
 232	priv->inode = inode;
 233	priv->mm = proc_mem_open(inode, PTRACE_MODE_READ);
 234	if (IS_ERR(priv->mm)) {
 235		int err = PTR_ERR(priv->mm);
 236
 237		seq_release_private(inode, file);
 238		return err;
 239	}
 240
 241	return 0;
 242}
 243
 244static int proc_map_release(struct inode *inode, struct file *file)
 245{
 246	struct seq_file *seq = file->private_data;
 247	struct proc_maps_private *priv = seq->private;
 248
 249	if (priv->mm)
 250		mmdrop(priv->mm);
 251
 252	return seq_release_private(inode, file);
 253}
 254
 255static int do_maps_open(struct inode *inode, struct file *file,
 256			const struct seq_operations *ops)
 257{
 258	return proc_maps_open(inode, file, ops,
 259				sizeof(struct proc_maps_private));
 260}
 261
 262/*
 263 * Indicate if the VMA is a stack for the given task; for
 264 * /proc/PID/maps that is the stack of the main task.
 265 */
 266static int is_stack(struct proc_maps_private *priv,
 267		    struct vm_area_struct *vma, int is_pid)
 268{
 269	int stack = 0;
 270
 271	if (is_pid) {
 272		stack = vma->vm_start <= vma->vm_mm->start_stack &&
 273			vma->vm_end >= vma->vm_mm->start_stack;
 274	} else {
 275		struct inode *inode = priv->inode;
 276		struct task_struct *task;
 277
 278		rcu_read_lock();
 279		task = pid_task(proc_pid(inode), PIDTYPE_PID);
 280		if (task)
 281			stack = vma_is_stack_for_task(vma, task);
 282		rcu_read_unlock();
 283	}
 284	return stack;
 285}
 286
 287static void
 288show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
 289{
 290	struct mm_struct *mm = vma->vm_mm;
 291	struct file *file = vma->vm_file;
 292	struct proc_maps_private *priv = m->private;
 293	vm_flags_t flags = vma->vm_flags;
 294	unsigned long ino = 0;
 295	unsigned long long pgoff = 0;
 296	unsigned long start, end;
 297	dev_t dev = 0;
 298	const char *name = NULL;
 299
 300	if (file) {
 301		struct inode *inode = file_inode(vma->vm_file);
 302		dev = inode->i_sb->s_dev;
 303		ino = inode->i_ino;
 304		pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
 305	}
 306
 307	/* We don't show the stack guard page in /proc/maps */
 308	start = vma->vm_start;
 309	if (stack_guard_page_start(vma, start))
 310		start += PAGE_SIZE;
 311	end = vma->vm_end;
 312	if (stack_guard_page_end(vma, end))
 313		end -= PAGE_SIZE;
 314
 315	seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
 316	seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
 317			start,
 318			end,
 319			flags & VM_READ ? 'r' : '-',
 320			flags & VM_WRITE ? 'w' : '-',
 321			flags & VM_EXEC ? 'x' : '-',
 322			flags & VM_MAYSHARE ? 's' : 'p',
 323			pgoff,
 324			MAJOR(dev), MINOR(dev), ino);
 325
 326	/*
 327	 * Print the dentry name for named mappings, and a
 328	 * special [heap] marker for the heap:
 329	 */
 330	if (file) {
 331		seq_pad(m, ' ');
 332		seq_file_path(m, file, "\n");
 333		goto done;
 334	}
 335
 336	if (vma->vm_ops && vma->vm_ops->name) {
 337		name = vma->vm_ops->name(vma);
 338		if (name)
 339			goto done;
 340	}
 341
 342	name = arch_vma_name(vma);
 343	if (!name) {
 344		if (!mm) {
 345			name = "[vdso]";
 346			goto done;
 347		}
 348
 349		if (vma->vm_start <= mm->brk &&
 350		    vma->vm_end >= mm->start_brk) {
 351			name = "[heap]";
 352			goto done;
 353		}
 354
 355		if (is_stack(priv, vma, is_pid))
 356			name = "[stack]";
 357	}
 358
 359done:
 360	if (name) {
 361		seq_pad(m, ' ');
 362		seq_puts(m, name);
 363	}
 364	seq_putc(m, '\n');
 365}
 366
 367static int show_map(struct seq_file *m, void *v, int is_pid)
 368{
 369	show_map_vma(m, v, is_pid);
 370	m_cache_vma(m, v);
 371	return 0;
 372}
 373
 374static int show_pid_map(struct seq_file *m, void *v)
 375{
 376	return show_map(m, v, 1);
 377}
 378
 379static int show_tid_map(struct seq_file *m, void *v)
 380{
 381	return show_map(m, v, 0);
 382}
 383
 384static const struct seq_operations proc_pid_maps_op = {
 385	.start	= m_start,
 386	.next	= m_next,
 387	.stop	= m_stop,
 388	.show	= show_pid_map
 389};
 390
 391static const struct seq_operations proc_tid_maps_op = {
 392	.start	= m_start,
 393	.next	= m_next,
 394	.stop	= m_stop,
 395	.show	= show_tid_map
 396};
 397
 398static int pid_maps_open(struct inode *inode, struct file *file)
 399{
 400	return do_maps_open(inode, file, &proc_pid_maps_op);
 401}
 402
 403static int tid_maps_open(struct inode *inode, struct file *file)
 404{
 405	return do_maps_open(inode, file, &proc_tid_maps_op);
 406}
 407
 408const struct file_operations proc_pid_maps_operations = {
 409	.open		= pid_maps_open,
 410	.read		= seq_read,
 411	.llseek		= seq_lseek,
 412	.release	= proc_map_release,
 413};
 414
 415const struct file_operations proc_tid_maps_operations = {
 416	.open		= tid_maps_open,
 417	.read		= seq_read,
 418	.llseek		= seq_lseek,
 419	.release	= proc_map_release,
 420};
 421
 422/*
 423 * Proportional Set Size(PSS): my share of RSS.
 424 *
 425 * PSS of a process is the count of pages it has in memory, where each
 426 * page is divided by the number of processes sharing it.  So if a
 427 * process has 1000 pages all to itself, and 1000 shared with one other
 428 * process, its PSS will be 1500.
 429 *
 430 * To keep (accumulated) division errors low, we adopt a 64bit
 431 * fixed-point pss counter to minimize division errors. So (pss >>
 432 * PSS_SHIFT) would be the real byte count.
 433 *
 434 * A shift of 12 before division means (assuming 4K page size):
 435 * 	- 1M 3-user-pages add up to 8KB errors;
 436 * 	- supports mapcount up to 2^24, or 16M;
 437 * 	- supports PSS up to 2^52 bytes, or 4PB.
 438 */
 439#define PSS_SHIFT 12
 440
 441#ifdef CONFIG_PROC_PAGE_MONITOR
 442struct mem_size_stats {
 443	unsigned long resident;
 444	unsigned long shared_clean;
 445	unsigned long shared_dirty;
 446	unsigned long private_clean;
 447	unsigned long private_dirty;
 448	unsigned long referenced;
 449	unsigned long anonymous;
 450	unsigned long anonymous_thp;
 
 451	unsigned long swap;
 452	unsigned long shared_hugetlb;
 453	unsigned long private_hugetlb;
 454	u64 pss;
 455	u64 swap_pss;
 456	bool check_shmem_swap;
 457};
 458
 459static void smaps_account(struct mem_size_stats *mss, struct page *page,
 460		bool compound, bool young, bool dirty)
 461{
 462	int i, nr = compound ? 1 << compound_order(page) : 1;
 463	unsigned long size = nr * PAGE_SIZE;
 464
 465	if (PageAnon(page))
 466		mss->anonymous += size;
 467
 468	mss->resident += size;
 469	/* Accumulate the size in pages that have been accessed. */
 470	if (young || page_is_young(page) || PageReferenced(page))
 471		mss->referenced += size;
 472
 473	/*
 474	 * page_count(page) == 1 guarantees the page is mapped exactly once.
 475	 * If any subpage of the compound page mapped with PTE it would elevate
 476	 * page_count().
 477	 */
 478	if (page_count(page) == 1) {
 479		if (dirty || PageDirty(page))
 480			mss->private_dirty += size;
 481		else
 482			mss->private_clean += size;
 483		mss->pss += (u64)size << PSS_SHIFT;
 484		return;
 485	}
 486
 487	for (i = 0; i < nr; i++, page++) {
 488		int mapcount = page_mapcount(page);
 489
 490		if (mapcount >= 2) {
 491			if (dirty || PageDirty(page))
 492				mss->shared_dirty += PAGE_SIZE;
 493			else
 494				mss->shared_clean += PAGE_SIZE;
 495			mss->pss += (PAGE_SIZE << PSS_SHIFT) / mapcount;
 496		} else {
 497			if (dirty || PageDirty(page))
 498				mss->private_dirty += PAGE_SIZE;
 499			else
 500				mss->private_clean += PAGE_SIZE;
 501			mss->pss += PAGE_SIZE << PSS_SHIFT;
 502		}
 503	}
 504}
 505
 506#ifdef CONFIG_SHMEM
 507static int smaps_pte_hole(unsigned long addr, unsigned long end,
 508		struct mm_walk *walk)
 509{
 510	struct mem_size_stats *mss = walk->private;
 511
 512	mss->swap += shmem_partial_swap_usage(
 513			walk->vma->vm_file->f_mapping, addr, end);
 514
 515	return 0;
 516}
 517#endif
 518
 519static void smaps_pte_entry(pte_t *pte, unsigned long addr,
 520		struct mm_walk *walk)
 521{
 522	struct mem_size_stats *mss = walk->private;
 523	struct vm_area_struct *vma = walk->vma;
 524	struct page *page = NULL;
 525
 526	if (pte_present(*pte)) {
 527		page = vm_normal_page(vma, addr, *pte);
 528	} else if (is_swap_pte(*pte)) {
 529		swp_entry_t swpent = pte_to_swp_entry(*pte);
 530
 531		if (!non_swap_entry(swpent)) {
 532			int mapcount;
 533
 534			mss->swap += PAGE_SIZE;
 535			mapcount = swp_swapcount(swpent);
 536			if (mapcount >= 2) {
 537				u64 pss_delta = (u64)PAGE_SIZE << PSS_SHIFT;
 538
 539				do_div(pss_delta, mapcount);
 540				mss->swap_pss += pss_delta;
 541			} else {
 542				mss->swap_pss += (u64)PAGE_SIZE << PSS_SHIFT;
 543			}
 544		} else if (is_migration_entry(swpent))
 545			page = migration_entry_to_page(swpent);
 546	} else if (unlikely(IS_ENABLED(CONFIG_SHMEM) && mss->check_shmem_swap
 547							&& pte_none(*pte))) {
 548		page = find_get_entry(vma->vm_file->f_mapping,
 549						linear_page_index(vma, addr));
 550		if (!page)
 551			return;
 552
 553		if (radix_tree_exceptional_entry(page))
 554			mss->swap += PAGE_SIZE;
 555		else
 556			put_page(page);
 557
 558		return;
 559	}
 560
 561	if (!page)
 562		return;
 563
 564	smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte));
 565}
 566
 567#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 568static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
 569		struct mm_walk *walk)
 570{
 571	struct mem_size_stats *mss = walk->private;
 572	struct vm_area_struct *vma = walk->vma;
 573	struct page *page;
 574
 575	/* FOLL_DUMP will return -EFAULT on huge zero page */
 576	page = follow_trans_huge_pmd(vma, addr, pmd, FOLL_DUMP);
 577	if (IS_ERR_OR_NULL(page))
 578		return;
 579	mss->anonymous_thp += HPAGE_PMD_SIZE;
 
 
 
 
 
 
 
 580	smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd));
 581}
 582#else
 583static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
 584		struct mm_walk *walk)
 585{
 586}
 587#endif
 588
 589static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
 590			   struct mm_walk *walk)
 591{
 592	struct vm_area_struct *vma = walk->vma;
 593	pte_t *pte;
 594	spinlock_t *ptl;
 595
 596	ptl = pmd_trans_huge_lock(pmd, vma);
 597	if (ptl) {
 598		smaps_pmd_entry(pmd, addr, walk);
 599		spin_unlock(ptl);
 600		return 0;
 601	}
 602
 603	if (pmd_trans_unstable(pmd))
 604		return 0;
 605	/*
 606	 * The mmap_sem held all the way back in m_start() is what
 607	 * keeps khugepaged out of here and from collapsing things
 608	 * in here.
 609	 */
 610	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
 611	for (; addr != end; pte++, addr += PAGE_SIZE)
 612		smaps_pte_entry(pte, addr, walk);
 613	pte_unmap_unlock(pte - 1, ptl);
 614	cond_resched();
 615	return 0;
 616}
 617
 618static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma)
 619{
 620	/*
 621	 * Don't forget to update Documentation/ on changes.
 622	 */
 623	static const char mnemonics[BITS_PER_LONG][2] = {
 624		/*
 625		 * In case if we meet a flag we don't know about.
 626		 */
 627		[0 ... (BITS_PER_LONG-1)] = "??",
 628
 629		[ilog2(VM_READ)]	= "rd",
 630		[ilog2(VM_WRITE)]	= "wr",
 631		[ilog2(VM_EXEC)]	= "ex",
 632		[ilog2(VM_SHARED)]	= "sh",
 633		[ilog2(VM_MAYREAD)]	= "mr",
 634		[ilog2(VM_MAYWRITE)]	= "mw",
 635		[ilog2(VM_MAYEXEC)]	= "me",
 636		[ilog2(VM_MAYSHARE)]	= "ms",
 637		[ilog2(VM_GROWSDOWN)]	= "gd",
 638		[ilog2(VM_PFNMAP)]	= "pf",
 639		[ilog2(VM_DENYWRITE)]	= "dw",
 640#ifdef CONFIG_X86_INTEL_MPX
 641		[ilog2(VM_MPX)]		= "mp",
 642#endif
 643		[ilog2(VM_LOCKED)]	= "lo",
 644		[ilog2(VM_IO)]		= "io",
 645		[ilog2(VM_SEQ_READ)]	= "sr",
 646		[ilog2(VM_RAND_READ)]	= "rr",
 647		[ilog2(VM_DONTCOPY)]	= "dc",
 648		[ilog2(VM_DONTEXPAND)]	= "de",
 649		[ilog2(VM_ACCOUNT)]	= "ac",
 650		[ilog2(VM_NORESERVE)]	= "nr",
 651		[ilog2(VM_HUGETLB)]	= "ht",
 652		[ilog2(VM_ARCH_1)]	= "ar",
 653		[ilog2(VM_DONTDUMP)]	= "dd",
 654#ifdef CONFIG_MEM_SOFT_DIRTY
 655		[ilog2(VM_SOFTDIRTY)]	= "sd",
 656#endif
 657		[ilog2(VM_MIXEDMAP)]	= "mm",
 658		[ilog2(VM_HUGEPAGE)]	= "hg",
 659		[ilog2(VM_NOHUGEPAGE)]	= "nh",
 660		[ilog2(VM_MERGEABLE)]	= "mg",
 661		[ilog2(VM_UFFD_MISSING)]= "um",
 662		[ilog2(VM_UFFD_WP)]	= "uw",
 663#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
 664		/* These come out via ProtectionKey: */
 665		[ilog2(VM_PKEY_BIT0)]	= "",
 666		[ilog2(VM_PKEY_BIT1)]	= "",
 667		[ilog2(VM_PKEY_BIT2)]	= "",
 668		[ilog2(VM_PKEY_BIT3)]	= "",
 669#endif
 670	};
 671	size_t i;
 672
 673	seq_puts(m, "VmFlags: ");
 674	for (i = 0; i < BITS_PER_LONG; i++) {
 675		if (!mnemonics[i][0])
 676			continue;
 677		if (vma->vm_flags & (1UL << i)) {
 678			seq_printf(m, "%c%c ",
 679				   mnemonics[i][0], mnemonics[i][1]);
 680		}
 681	}
 682	seq_putc(m, '\n');
 683}
 684
 685#ifdef CONFIG_HUGETLB_PAGE
 686static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask,
 687				 unsigned long addr, unsigned long end,
 688				 struct mm_walk *walk)
 689{
 690	struct mem_size_stats *mss = walk->private;
 691	struct vm_area_struct *vma = walk->vma;
 692	struct page *page = NULL;
 693
 694	if (pte_present(*pte)) {
 695		page = vm_normal_page(vma, addr, *pte);
 696	} else if (is_swap_pte(*pte)) {
 697		swp_entry_t swpent = pte_to_swp_entry(*pte);
 698
 699		if (is_migration_entry(swpent))
 700			page = migration_entry_to_page(swpent);
 701	}
 702	if (page) {
 703		int mapcount = page_mapcount(page);
 704
 705		if (mapcount >= 2)
 706			mss->shared_hugetlb += huge_page_size(hstate_vma(vma));
 707		else
 708			mss->private_hugetlb += huge_page_size(hstate_vma(vma));
 709	}
 710	return 0;
 711}
 712#endif /* HUGETLB_PAGE */
 713
 714void __weak arch_show_smap(struct seq_file *m, struct vm_area_struct *vma)
 715{
 716}
 717
 718static int show_smap(struct seq_file *m, void *v, int is_pid)
 719{
 720	struct vm_area_struct *vma = v;
 721	struct mem_size_stats mss;
 722	struct mm_walk smaps_walk = {
 723		.pmd_entry = smaps_pte_range,
 724#ifdef CONFIG_HUGETLB_PAGE
 725		.hugetlb_entry = smaps_hugetlb_range,
 726#endif
 727		.mm = vma->vm_mm,
 728		.private = &mss,
 729	};
 730
 731	memset(&mss, 0, sizeof mss);
 732
 733#ifdef CONFIG_SHMEM
 734	if (vma->vm_file && shmem_mapping(vma->vm_file->f_mapping)) {
 735		/*
 736		 * For shared or readonly shmem mappings we know that all
 737		 * swapped out pages belong to the shmem object, and we can
 738		 * obtain the swap value much more efficiently. For private
 739		 * writable mappings, we might have COW pages that are
 740		 * not affected by the parent swapped out pages of the shmem
 741		 * object, so we have to distinguish them during the page walk.
 742		 * Unless we know that the shmem object (or the part mapped by
 743		 * our VMA) has no swapped out pages at all.
 744		 */
 745		unsigned long shmem_swapped = shmem_swap_usage(vma);
 746
 747		if (!shmem_swapped || (vma->vm_flags & VM_SHARED) ||
 748					!(vma->vm_flags & VM_WRITE)) {
 749			mss.swap = shmem_swapped;
 750		} else {
 751			mss.check_shmem_swap = true;
 752			smaps_walk.pte_hole = smaps_pte_hole;
 753		}
 754	}
 755#endif
 756
 757	/* mmap_sem is held in m_start */
 758	walk_page_vma(vma, &smaps_walk);
 759
 760	show_map_vma(m, vma, is_pid);
 761
 762	seq_printf(m,
 763		   "Size:           %8lu kB\n"
 764		   "Rss:            %8lu kB\n"
 765		   "Pss:            %8lu kB\n"
 766		   "Shared_Clean:   %8lu kB\n"
 767		   "Shared_Dirty:   %8lu kB\n"
 768		   "Private_Clean:  %8lu kB\n"
 769		   "Private_Dirty:  %8lu kB\n"
 770		   "Referenced:     %8lu kB\n"
 771		   "Anonymous:      %8lu kB\n"
 772		   "AnonHugePages:  %8lu kB\n"
 
 773		   "Shared_Hugetlb: %8lu kB\n"
 774		   "Private_Hugetlb: %7lu kB\n"
 775		   "Swap:           %8lu kB\n"
 776		   "SwapPss:        %8lu kB\n"
 777		   "KernelPageSize: %8lu kB\n"
 778		   "MMUPageSize:    %8lu kB\n"
 779		   "Locked:         %8lu kB\n",
 780		   (vma->vm_end - vma->vm_start) >> 10,
 781		   mss.resident >> 10,
 782		   (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
 783		   mss.shared_clean  >> 10,
 784		   mss.shared_dirty  >> 10,
 785		   mss.private_clean >> 10,
 786		   mss.private_dirty >> 10,
 787		   mss.referenced >> 10,
 788		   mss.anonymous >> 10,
 789		   mss.anonymous_thp >> 10,
 
 790		   mss.shared_hugetlb >> 10,
 791		   mss.private_hugetlb >> 10,
 792		   mss.swap >> 10,
 793		   (unsigned long)(mss.swap_pss >> (10 + PSS_SHIFT)),
 794		   vma_kernel_pagesize(vma) >> 10,
 795		   vma_mmu_pagesize(vma) >> 10,
 796		   (vma->vm_flags & VM_LOCKED) ?
 797			(unsigned long)(mss.pss >> (10 + PSS_SHIFT)) : 0);
 798
 799	arch_show_smap(m, vma);
 800	show_smap_vma_flags(m, vma);
 801	m_cache_vma(m, vma);
 802	return 0;
 803}
 804
 805static int show_pid_smap(struct seq_file *m, void *v)
 806{
 807	return show_smap(m, v, 1);
 808}
 809
 810static int show_tid_smap(struct seq_file *m, void *v)
 811{
 812	return show_smap(m, v, 0);
 813}
 814
 815static const struct seq_operations proc_pid_smaps_op = {
 816	.start	= m_start,
 817	.next	= m_next,
 818	.stop	= m_stop,
 819	.show	= show_pid_smap
 820};
 821
 822static const struct seq_operations proc_tid_smaps_op = {
 823	.start	= m_start,
 824	.next	= m_next,
 825	.stop	= m_stop,
 826	.show	= show_tid_smap
 827};
 828
 829static int pid_smaps_open(struct inode *inode, struct file *file)
 830{
 831	return do_maps_open(inode, file, &proc_pid_smaps_op);
 832}
 833
 834static int tid_smaps_open(struct inode *inode, struct file *file)
 835{
 836	return do_maps_open(inode, file, &proc_tid_smaps_op);
 837}
 838
 839const struct file_operations proc_pid_smaps_operations = {
 840	.open		= pid_smaps_open,
 841	.read		= seq_read,
 842	.llseek		= seq_lseek,
 843	.release	= proc_map_release,
 844};
 845
 846const struct file_operations proc_tid_smaps_operations = {
 847	.open		= tid_smaps_open,
 848	.read		= seq_read,
 849	.llseek		= seq_lseek,
 850	.release	= proc_map_release,
 851};
 852
 853enum clear_refs_types {
 854	CLEAR_REFS_ALL = 1,
 855	CLEAR_REFS_ANON,
 856	CLEAR_REFS_MAPPED,
 857	CLEAR_REFS_SOFT_DIRTY,
 858	CLEAR_REFS_MM_HIWATER_RSS,
 859	CLEAR_REFS_LAST,
 860};
 861
 862struct clear_refs_private {
 863	enum clear_refs_types type;
 864};
 865
 866#ifdef CONFIG_MEM_SOFT_DIRTY
 867static inline void clear_soft_dirty(struct vm_area_struct *vma,
 868		unsigned long addr, pte_t *pte)
 869{
 870	/*
 871	 * The soft-dirty tracker uses #PF-s to catch writes
 872	 * to pages, so write-protect the pte as well. See the
 873	 * Documentation/vm/soft-dirty.txt for full description
 874	 * of how soft-dirty works.
 875	 */
 876	pte_t ptent = *pte;
 877
 878	if (pte_present(ptent)) {
 879		ptent = ptep_modify_prot_start(vma->vm_mm, addr, pte);
 880		ptent = pte_wrprotect(ptent);
 881		ptent = pte_clear_soft_dirty(ptent);
 882		ptep_modify_prot_commit(vma->vm_mm, addr, pte, ptent);
 883	} else if (is_swap_pte(ptent)) {
 884		ptent = pte_swp_clear_soft_dirty(ptent);
 885		set_pte_at(vma->vm_mm, addr, pte, ptent);
 886	}
 887}
 888#else
 889static inline void clear_soft_dirty(struct vm_area_struct *vma,
 890		unsigned long addr, pte_t *pte)
 891{
 892}
 893#endif
 894
 895#if defined(CONFIG_MEM_SOFT_DIRTY) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
 896static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
 897		unsigned long addr, pmd_t *pmdp)
 898{
 899	pmd_t pmd = pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp);
 900
 901	pmd = pmd_wrprotect(pmd);
 902	pmd = pmd_clear_soft_dirty(pmd);
 903
 904	set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
 905}
 906#else
 907static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
 908		unsigned long addr, pmd_t *pmdp)
 909{
 910}
 911#endif
 912
 913static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
 914				unsigned long end, struct mm_walk *walk)
 915{
 916	struct clear_refs_private *cp = walk->private;
 917	struct vm_area_struct *vma = walk->vma;
 918	pte_t *pte, ptent;
 919	spinlock_t *ptl;
 920	struct page *page;
 921
 922	ptl = pmd_trans_huge_lock(pmd, vma);
 923	if (ptl) {
 924		if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
 925			clear_soft_dirty_pmd(vma, addr, pmd);
 926			goto out;
 927		}
 928
 929		page = pmd_page(*pmd);
 930
 931		/* Clear accessed and referenced bits. */
 932		pmdp_test_and_clear_young(vma, addr, pmd);
 933		test_and_clear_page_young(page);
 934		ClearPageReferenced(page);
 935out:
 936		spin_unlock(ptl);
 937		return 0;
 938	}
 939
 940	if (pmd_trans_unstable(pmd))
 941		return 0;
 942
 943	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
 944	for (; addr != end; pte++, addr += PAGE_SIZE) {
 945		ptent = *pte;
 946
 947		if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
 948			clear_soft_dirty(vma, addr, pte);
 949			continue;
 950		}
 951
 952		if (!pte_present(ptent))
 953			continue;
 954
 955		page = vm_normal_page(vma, addr, ptent);
 956		if (!page)
 957			continue;
 958
 959		/* Clear accessed and referenced bits. */
 960		ptep_test_and_clear_young(vma, addr, pte);
 961		test_and_clear_page_young(page);
 962		ClearPageReferenced(page);
 963	}
 964	pte_unmap_unlock(pte - 1, ptl);
 965	cond_resched();
 966	return 0;
 967}
 968
 969static int clear_refs_test_walk(unsigned long start, unsigned long end,
 970				struct mm_walk *walk)
 971{
 972	struct clear_refs_private *cp = walk->private;
 973	struct vm_area_struct *vma = walk->vma;
 974
 975	if (vma->vm_flags & VM_PFNMAP)
 976		return 1;
 977
 978	/*
 979	 * Writing 1 to /proc/pid/clear_refs affects all pages.
 980	 * Writing 2 to /proc/pid/clear_refs only affects anonymous pages.
 981	 * Writing 3 to /proc/pid/clear_refs only affects file mapped pages.
 982	 * Writing 4 to /proc/pid/clear_refs affects all pages.
 983	 */
 984	if (cp->type == CLEAR_REFS_ANON && vma->vm_file)
 985		return 1;
 986	if (cp->type == CLEAR_REFS_MAPPED && !vma->vm_file)
 987		return 1;
 988	return 0;
 989}
 990
 991static ssize_t clear_refs_write(struct file *file, const char __user *buf,
 992				size_t count, loff_t *ppos)
 993{
 994	struct task_struct *task;
 995	char buffer[PROC_NUMBUF];
 996	struct mm_struct *mm;
 997	struct vm_area_struct *vma;
 998	enum clear_refs_types type;
 999	int itype;
1000	int rv;
1001
1002	memset(buffer, 0, sizeof(buffer));
1003	if (count > sizeof(buffer) - 1)
1004		count = sizeof(buffer) - 1;
1005	if (copy_from_user(buffer, buf, count))
1006		return -EFAULT;
1007	rv = kstrtoint(strstrip(buffer), 10, &itype);
1008	if (rv < 0)
1009		return rv;
1010	type = (enum clear_refs_types)itype;
1011	if (type < CLEAR_REFS_ALL || type >= CLEAR_REFS_LAST)
1012		return -EINVAL;
1013
1014	task = get_proc_task(file_inode(file));
1015	if (!task)
1016		return -ESRCH;
1017	mm = get_task_mm(task);
1018	if (mm) {
1019		struct clear_refs_private cp = {
1020			.type = type,
1021		};
1022		struct mm_walk clear_refs_walk = {
1023			.pmd_entry = clear_refs_pte_range,
1024			.test_walk = clear_refs_test_walk,
1025			.mm = mm,
1026			.private = &cp,
1027		};
1028
1029		if (type == CLEAR_REFS_MM_HIWATER_RSS) {
 
 
 
 
 
1030			/*
1031			 * Writing 5 to /proc/pid/clear_refs resets the peak
1032			 * resident set size to this mm's current rss value.
1033			 */
1034			down_write(&mm->mmap_sem);
1035			reset_mm_hiwater_rss(mm);
1036			up_write(&mm->mmap_sem);
1037			goto out_mm;
1038		}
1039
1040		down_read(&mm->mmap_sem);
1041		if (type == CLEAR_REFS_SOFT_DIRTY) {
1042			for (vma = mm->mmap; vma; vma = vma->vm_next) {
1043				if (!(vma->vm_flags & VM_SOFTDIRTY))
1044					continue;
1045				up_read(&mm->mmap_sem);
1046				down_write(&mm->mmap_sem);
 
 
 
1047				for (vma = mm->mmap; vma; vma = vma->vm_next) {
1048					vma->vm_flags &= ~VM_SOFTDIRTY;
1049					vma_set_page_prot(vma);
1050				}
1051				downgrade_write(&mm->mmap_sem);
1052				break;
1053			}
1054			mmu_notifier_invalidate_range_start(mm, 0, -1);
1055		}
1056		walk_page_range(0, ~0UL, &clear_refs_walk);
1057		if (type == CLEAR_REFS_SOFT_DIRTY)
1058			mmu_notifier_invalidate_range_end(mm, 0, -1);
1059		flush_tlb_mm(mm);
1060		up_read(&mm->mmap_sem);
1061out_mm:
1062		mmput(mm);
1063	}
1064	put_task_struct(task);
1065
1066	return count;
1067}
1068
1069const struct file_operations proc_clear_refs_operations = {
1070	.write		= clear_refs_write,
1071	.llseek		= noop_llseek,
1072};
1073
1074typedef struct {
1075	u64 pme;
1076} pagemap_entry_t;
1077
1078struct pagemapread {
1079	int pos, len;		/* units: PM_ENTRY_BYTES, not bytes */
1080	pagemap_entry_t *buffer;
1081	bool show_pfn;
1082};
1083
1084#define PAGEMAP_WALK_SIZE	(PMD_SIZE)
1085#define PAGEMAP_WALK_MASK	(PMD_MASK)
1086
1087#define PM_ENTRY_BYTES		sizeof(pagemap_entry_t)
1088#define PM_PFRAME_BITS		55
1089#define PM_PFRAME_MASK		GENMASK_ULL(PM_PFRAME_BITS - 1, 0)
1090#define PM_SOFT_DIRTY		BIT_ULL(55)
1091#define PM_MMAP_EXCLUSIVE	BIT_ULL(56)
1092#define PM_FILE			BIT_ULL(61)
1093#define PM_SWAP			BIT_ULL(62)
1094#define PM_PRESENT		BIT_ULL(63)
1095
1096#define PM_END_OF_BUFFER    1
1097
1098static inline pagemap_entry_t make_pme(u64 frame, u64 flags)
1099{
1100	return (pagemap_entry_t) { .pme = (frame & PM_PFRAME_MASK) | flags };
1101}
1102
1103static int add_to_pagemap(unsigned long addr, pagemap_entry_t *pme,
1104			  struct pagemapread *pm)
1105{
1106	pm->buffer[pm->pos++] = *pme;
1107	if (pm->pos >= pm->len)
1108		return PM_END_OF_BUFFER;
1109	return 0;
1110}
1111
1112static int pagemap_pte_hole(unsigned long start, unsigned long end,
1113				struct mm_walk *walk)
1114{
1115	struct pagemapread *pm = walk->private;
1116	unsigned long addr = start;
1117	int err = 0;
1118
1119	while (addr < end) {
1120		struct vm_area_struct *vma = find_vma(walk->mm, addr);
1121		pagemap_entry_t pme = make_pme(0, 0);
1122		/* End of address space hole, which we mark as non-present. */
1123		unsigned long hole_end;
1124
1125		if (vma)
1126			hole_end = min(end, vma->vm_start);
1127		else
1128			hole_end = end;
1129
1130		for (; addr < hole_end; addr += PAGE_SIZE) {
1131			err = add_to_pagemap(addr, &pme, pm);
1132			if (err)
1133				goto out;
1134		}
1135
1136		if (!vma)
1137			break;
1138
1139		/* Addresses in the VMA. */
1140		if (vma->vm_flags & VM_SOFTDIRTY)
1141			pme = make_pme(0, PM_SOFT_DIRTY);
1142		for (; addr < min(end, vma->vm_end); addr += PAGE_SIZE) {
1143			err = add_to_pagemap(addr, &pme, pm);
1144			if (err)
1145				goto out;
1146		}
1147	}
1148out:
1149	return err;
1150}
1151
1152static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm,
1153		struct vm_area_struct *vma, unsigned long addr, pte_t pte)
1154{
1155	u64 frame = 0, flags = 0;
1156	struct page *page = NULL;
1157
1158	if (pte_present(pte)) {
1159		if (pm->show_pfn)
1160			frame = pte_pfn(pte);
1161		flags |= PM_PRESENT;
1162		page = vm_normal_page(vma, addr, pte);
1163		if (pte_soft_dirty(pte))
1164			flags |= PM_SOFT_DIRTY;
1165	} else if (is_swap_pte(pte)) {
1166		swp_entry_t entry;
1167		if (pte_swp_soft_dirty(pte))
1168			flags |= PM_SOFT_DIRTY;
1169		entry = pte_to_swp_entry(pte);
1170		frame = swp_type(entry) |
1171			(swp_offset(entry) << MAX_SWAPFILES_SHIFT);
1172		flags |= PM_SWAP;
1173		if (is_migration_entry(entry))
1174			page = migration_entry_to_page(entry);
1175	}
1176
1177	if (page && !PageAnon(page))
1178		flags |= PM_FILE;
1179	if (page && page_mapcount(page) == 1)
1180		flags |= PM_MMAP_EXCLUSIVE;
1181	if (vma->vm_flags & VM_SOFTDIRTY)
1182		flags |= PM_SOFT_DIRTY;
1183
1184	return make_pme(frame, flags);
1185}
1186
1187static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
1188			     struct mm_walk *walk)
1189{
1190	struct vm_area_struct *vma = walk->vma;
1191	struct pagemapread *pm = walk->private;
1192	spinlock_t *ptl;
1193	pte_t *pte, *orig_pte;
1194	int err = 0;
1195
1196#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1197	ptl = pmd_trans_huge_lock(pmdp, vma);
1198	if (ptl) {
1199		u64 flags = 0, frame = 0;
1200		pmd_t pmd = *pmdp;
1201
1202		if ((vma->vm_flags & VM_SOFTDIRTY) || pmd_soft_dirty(pmd))
1203			flags |= PM_SOFT_DIRTY;
1204
1205		/*
1206		 * Currently pmd for thp is always present because thp
1207		 * can not be swapped-out, migrated, or HWPOISONed
1208		 * (split in such cases instead.)
1209		 * This if-check is just to prepare for future implementation.
1210		 */
1211		if (pmd_present(pmd)) {
1212			struct page *page = pmd_page(pmd);
1213
1214			if (page_mapcount(page) == 1)
1215				flags |= PM_MMAP_EXCLUSIVE;
1216
1217			flags |= PM_PRESENT;
1218			if (pm->show_pfn)
1219				frame = pmd_pfn(pmd) +
1220					((addr & ~PMD_MASK) >> PAGE_SHIFT);
1221		}
1222
1223		for (; addr != end; addr += PAGE_SIZE) {
1224			pagemap_entry_t pme = make_pme(frame, flags);
1225
1226			err = add_to_pagemap(addr, &pme, pm);
1227			if (err)
1228				break;
1229			if (pm->show_pfn && (flags & PM_PRESENT))
1230				frame++;
1231		}
1232		spin_unlock(ptl);
1233		return err;
1234	}
1235
1236	if (pmd_trans_unstable(pmdp))
1237		return 0;
1238#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1239
1240	/*
1241	 * We can assume that @vma always points to a valid one and @end never
1242	 * goes beyond vma->vm_end.
1243	 */
1244	orig_pte = pte = pte_offset_map_lock(walk->mm, pmdp, addr, &ptl);
1245	for (; addr < end; pte++, addr += PAGE_SIZE) {
1246		pagemap_entry_t pme;
1247
1248		pme = pte_to_pagemap_entry(pm, vma, addr, *pte);
1249		err = add_to_pagemap(addr, &pme, pm);
1250		if (err)
1251			break;
1252	}
1253	pte_unmap_unlock(orig_pte, ptl);
1254
1255	cond_resched();
1256
1257	return err;
1258}
1259
1260#ifdef CONFIG_HUGETLB_PAGE
1261/* This function walks within one hugetlb entry in the single call */
1262static int pagemap_hugetlb_range(pte_t *ptep, unsigned long hmask,
1263				 unsigned long addr, unsigned long end,
1264				 struct mm_walk *walk)
1265{
1266	struct pagemapread *pm = walk->private;
1267	struct vm_area_struct *vma = walk->vma;
1268	u64 flags = 0, frame = 0;
1269	int err = 0;
1270	pte_t pte;
1271
1272	if (vma->vm_flags & VM_SOFTDIRTY)
1273		flags |= PM_SOFT_DIRTY;
1274
1275	pte = huge_ptep_get(ptep);
1276	if (pte_present(pte)) {
1277		struct page *page = pte_page(pte);
1278
1279		if (!PageAnon(page))
1280			flags |= PM_FILE;
1281
1282		if (page_mapcount(page) == 1)
1283			flags |= PM_MMAP_EXCLUSIVE;
1284
1285		flags |= PM_PRESENT;
1286		if (pm->show_pfn)
1287			frame = pte_pfn(pte) +
1288				((addr & ~hmask) >> PAGE_SHIFT);
1289	}
1290
1291	for (; addr != end; addr += PAGE_SIZE) {
1292		pagemap_entry_t pme = make_pme(frame, flags);
1293
1294		err = add_to_pagemap(addr, &pme, pm);
1295		if (err)
1296			return err;
1297		if (pm->show_pfn && (flags & PM_PRESENT))
1298			frame++;
1299	}
1300
1301	cond_resched();
1302
1303	return err;
1304}
1305#endif /* HUGETLB_PAGE */
1306
1307/*
1308 * /proc/pid/pagemap - an array mapping virtual pages to pfns
1309 *
1310 * For each page in the address space, this file contains one 64-bit entry
1311 * consisting of the following:
1312 *
1313 * Bits 0-54  page frame number (PFN) if present
1314 * Bits 0-4   swap type if swapped
1315 * Bits 5-54  swap offset if swapped
1316 * Bit  55    pte is soft-dirty (see Documentation/vm/soft-dirty.txt)
1317 * Bit  56    page exclusively mapped
1318 * Bits 57-60 zero
1319 * Bit  61    page is file-page or shared-anon
1320 * Bit  62    page swapped
1321 * Bit  63    page present
1322 *
1323 * If the page is not present but in swap, then the PFN contains an
1324 * encoding of the swap file number and the page's offset into the
1325 * swap. Unmapped pages return a null PFN. This allows determining
1326 * precisely which pages are mapped (or in swap) and comparing mapped
1327 * pages between processes.
1328 *
1329 * Efficient users of this interface will use /proc/pid/maps to
1330 * determine which areas of memory are actually mapped and llseek to
1331 * skip over unmapped regions.
1332 */
1333static ssize_t pagemap_read(struct file *file, char __user *buf,
1334			    size_t count, loff_t *ppos)
1335{
1336	struct mm_struct *mm = file->private_data;
1337	struct pagemapread pm;
1338	struct mm_walk pagemap_walk = {};
1339	unsigned long src;
1340	unsigned long svpfn;
1341	unsigned long start_vaddr;
1342	unsigned long end_vaddr;
1343	int ret = 0, copied = 0;
1344
1345	if (!mm || !atomic_inc_not_zero(&mm->mm_users))
1346		goto out;
1347
1348	ret = -EINVAL;
1349	/* file position must be aligned */
1350	if ((*ppos % PM_ENTRY_BYTES) || (count % PM_ENTRY_BYTES))
1351		goto out_mm;
1352
1353	ret = 0;
1354	if (!count)
1355		goto out_mm;
1356
1357	/* do not disclose physical addresses: attack vector */
1358	pm.show_pfn = file_ns_capable(file, &init_user_ns, CAP_SYS_ADMIN);
1359
1360	pm.len = (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
1361	pm.buffer = kmalloc(pm.len * PM_ENTRY_BYTES, GFP_TEMPORARY);
1362	ret = -ENOMEM;
1363	if (!pm.buffer)
1364		goto out_mm;
1365
1366	pagemap_walk.pmd_entry = pagemap_pmd_range;
1367	pagemap_walk.pte_hole = pagemap_pte_hole;
1368#ifdef CONFIG_HUGETLB_PAGE
1369	pagemap_walk.hugetlb_entry = pagemap_hugetlb_range;
1370#endif
1371	pagemap_walk.mm = mm;
1372	pagemap_walk.private = &pm;
1373
1374	src = *ppos;
1375	svpfn = src / PM_ENTRY_BYTES;
1376	start_vaddr = svpfn << PAGE_SHIFT;
1377	end_vaddr = mm->task_size;
1378
1379	/* watch out for wraparound */
1380	if (svpfn > mm->task_size >> PAGE_SHIFT)
1381		start_vaddr = end_vaddr;
1382
1383	/*
1384	 * The odds are that this will stop walking way
1385	 * before end_vaddr, because the length of the
1386	 * user buffer is tracked in "pm", and the walk
1387	 * will stop when we hit the end of the buffer.
1388	 */
1389	ret = 0;
1390	while (count && (start_vaddr < end_vaddr)) {
1391		int len;
1392		unsigned long end;
1393
1394		pm.pos = 0;
1395		end = (start_vaddr + PAGEMAP_WALK_SIZE) & PAGEMAP_WALK_MASK;
1396		/* overflow ? */
1397		if (end < start_vaddr || end > end_vaddr)
1398			end = end_vaddr;
1399		down_read(&mm->mmap_sem);
1400		ret = walk_page_range(start_vaddr, end, &pagemap_walk);
1401		up_read(&mm->mmap_sem);
1402		start_vaddr = end;
1403
1404		len = min(count, PM_ENTRY_BYTES * pm.pos);
1405		if (copy_to_user(buf, pm.buffer, len)) {
1406			ret = -EFAULT;
1407			goto out_free;
1408		}
1409		copied += len;
1410		buf += len;
1411		count -= len;
1412	}
1413	*ppos += copied;
1414	if (!ret || ret == PM_END_OF_BUFFER)
1415		ret = copied;
1416
1417out_free:
1418	kfree(pm.buffer);
1419out_mm:
1420	mmput(mm);
1421out:
1422	return ret;
1423}
1424
1425static int pagemap_open(struct inode *inode, struct file *file)
1426{
1427	struct mm_struct *mm;
1428
1429	mm = proc_mem_open(inode, PTRACE_MODE_READ);
1430	if (IS_ERR(mm))
1431		return PTR_ERR(mm);
1432	file->private_data = mm;
1433	return 0;
1434}
1435
1436static int pagemap_release(struct inode *inode, struct file *file)
1437{
1438	struct mm_struct *mm = file->private_data;
1439
1440	if (mm)
1441		mmdrop(mm);
1442	return 0;
1443}
1444
1445const struct file_operations proc_pagemap_operations = {
1446	.llseek		= mem_lseek, /* borrow this */
1447	.read		= pagemap_read,
1448	.open		= pagemap_open,
1449	.release	= pagemap_release,
1450};
1451#endif /* CONFIG_PROC_PAGE_MONITOR */
1452
1453#ifdef CONFIG_NUMA
1454
1455struct numa_maps {
1456	unsigned long pages;
1457	unsigned long anon;
1458	unsigned long active;
1459	unsigned long writeback;
1460	unsigned long mapcount_max;
1461	unsigned long dirty;
1462	unsigned long swapcache;
1463	unsigned long node[MAX_NUMNODES];
1464};
1465
1466struct numa_maps_private {
1467	struct proc_maps_private proc_maps;
1468	struct numa_maps md;
1469};
1470
1471static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty,
1472			unsigned long nr_pages)
1473{
1474	int count = page_mapcount(page);
1475
1476	md->pages += nr_pages;
1477	if (pte_dirty || PageDirty(page))
1478		md->dirty += nr_pages;
1479
1480	if (PageSwapCache(page))
1481		md->swapcache += nr_pages;
1482
1483	if (PageActive(page) || PageUnevictable(page))
1484		md->active += nr_pages;
1485
1486	if (PageWriteback(page))
1487		md->writeback += nr_pages;
1488
1489	if (PageAnon(page))
1490		md->anon += nr_pages;
1491
1492	if (count > md->mapcount_max)
1493		md->mapcount_max = count;
1494
1495	md->node[page_to_nid(page)] += nr_pages;
1496}
1497
1498static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma,
1499		unsigned long addr)
1500{
1501	struct page *page;
1502	int nid;
1503
1504	if (!pte_present(pte))
1505		return NULL;
1506
1507	page = vm_normal_page(vma, addr, pte);
1508	if (!page)
1509		return NULL;
1510
1511	if (PageReserved(page))
1512		return NULL;
1513
1514	nid = page_to_nid(page);
1515	if (!node_isset(nid, node_states[N_MEMORY]))
1516		return NULL;
1517
1518	return page;
1519}
1520
1521#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1522static struct page *can_gather_numa_stats_pmd(pmd_t pmd,
1523					      struct vm_area_struct *vma,
1524					      unsigned long addr)
1525{
1526	struct page *page;
1527	int nid;
1528
1529	if (!pmd_present(pmd))
1530		return NULL;
1531
1532	page = vm_normal_page_pmd(vma, addr, pmd);
1533	if (!page)
1534		return NULL;
1535
1536	if (PageReserved(page))
1537		return NULL;
1538
1539	nid = page_to_nid(page);
1540	if (!node_isset(nid, node_states[N_MEMORY]))
1541		return NULL;
1542
1543	return page;
1544}
1545#endif
1546
1547static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
1548		unsigned long end, struct mm_walk *walk)
1549{
1550	struct numa_maps *md = walk->private;
1551	struct vm_area_struct *vma = walk->vma;
1552	spinlock_t *ptl;
1553	pte_t *orig_pte;
1554	pte_t *pte;
1555
1556#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1557	ptl = pmd_trans_huge_lock(pmd, vma);
1558	if (ptl) {
1559		struct page *page;
1560
1561		page = can_gather_numa_stats_pmd(*pmd, vma, addr);
1562		if (page)
1563			gather_stats(page, md, pmd_dirty(*pmd),
1564				     HPAGE_PMD_SIZE/PAGE_SIZE);
1565		spin_unlock(ptl);
1566		return 0;
1567	}
1568
1569	if (pmd_trans_unstable(pmd))
1570		return 0;
1571#endif
1572	orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
1573	do {
1574		struct page *page = can_gather_numa_stats(*pte, vma, addr);
1575		if (!page)
1576			continue;
1577		gather_stats(page, md, pte_dirty(*pte), 1);
1578
1579	} while (pte++, addr += PAGE_SIZE, addr != end);
1580	pte_unmap_unlock(orig_pte, ptl);
 
1581	return 0;
1582}
1583#ifdef CONFIG_HUGETLB_PAGE
1584static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
1585		unsigned long addr, unsigned long end, struct mm_walk *walk)
1586{
1587	pte_t huge_pte = huge_ptep_get(pte);
1588	struct numa_maps *md;
1589	struct page *page;
1590
1591	if (!pte_present(huge_pte))
1592		return 0;
1593
1594	page = pte_page(huge_pte);
1595	if (!page)
1596		return 0;
1597
1598	md = walk->private;
1599	gather_stats(page, md, pte_dirty(huge_pte), 1);
1600	return 0;
1601}
1602
1603#else
1604static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
1605		unsigned long addr, unsigned long end, struct mm_walk *walk)
1606{
1607	return 0;
1608}
1609#endif
1610
1611/*
1612 * Display pages allocated per node and memory policy via /proc.
1613 */
1614static int show_numa_map(struct seq_file *m, void *v, int is_pid)
1615{
1616	struct numa_maps_private *numa_priv = m->private;
1617	struct proc_maps_private *proc_priv = &numa_priv->proc_maps;
1618	struct vm_area_struct *vma = v;
1619	struct numa_maps *md = &numa_priv->md;
1620	struct file *file = vma->vm_file;
1621	struct mm_struct *mm = vma->vm_mm;
1622	struct mm_walk walk = {
1623		.hugetlb_entry = gather_hugetlb_stats,
1624		.pmd_entry = gather_pte_stats,
1625		.private = md,
1626		.mm = mm,
1627	};
1628	struct mempolicy *pol;
1629	char buffer[64];
1630	int nid;
1631
1632	if (!mm)
1633		return 0;
1634
1635	/* Ensure we start with an empty set of numa_maps statistics. */
1636	memset(md, 0, sizeof(*md));
1637
1638	pol = __get_vma_policy(vma, vma->vm_start);
1639	if (pol) {
1640		mpol_to_str(buffer, sizeof(buffer), pol);
1641		mpol_cond_put(pol);
1642	} else {
1643		mpol_to_str(buffer, sizeof(buffer), proc_priv->task_mempolicy);
1644	}
1645
1646	seq_printf(m, "%08lx %s", vma->vm_start, buffer);
1647
1648	if (file) {
1649		seq_puts(m, " file=");
1650		seq_file_path(m, file, "\n\t= ");
1651	} else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
1652		seq_puts(m, " heap");
1653	} else if (is_stack(proc_priv, vma, is_pid)) {
1654		seq_puts(m, " stack");
1655	}
1656
1657	if (is_vm_hugetlb_page(vma))
1658		seq_puts(m, " huge");
1659
1660	/* mmap_sem is held by m_start */
1661	walk_page_vma(vma, &walk);
1662
1663	if (!md->pages)
1664		goto out;
1665
1666	if (md->anon)
1667		seq_printf(m, " anon=%lu", md->anon);
1668
1669	if (md->dirty)
1670		seq_printf(m, " dirty=%lu", md->dirty);
1671
1672	if (md->pages != md->anon && md->pages != md->dirty)
1673		seq_printf(m, " mapped=%lu", md->pages);
1674
1675	if (md->mapcount_max > 1)
1676		seq_printf(m, " mapmax=%lu", md->mapcount_max);
1677
1678	if (md->swapcache)
1679		seq_printf(m, " swapcache=%lu", md->swapcache);
1680
1681	if (md->active < md->pages && !is_vm_hugetlb_page(vma))
1682		seq_printf(m, " active=%lu", md->active);
1683
1684	if (md->writeback)
1685		seq_printf(m, " writeback=%lu", md->writeback);
1686
1687	for_each_node_state(nid, N_MEMORY)
1688		if (md->node[nid])
1689			seq_printf(m, " N%d=%lu", nid, md->node[nid]);
1690
1691	seq_printf(m, " kernelpagesize_kB=%lu", vma_kernel_pagesize(vma) >> 10);
1692out:
1693	seq_putc(m, '\n');
1694	m_cache_vma(m, vma);
1695	return 0;
1696}
1697
1698static int show_pid_numa_map(struct seq_file *m, void *v)
1699{
1700	return show_numa_map(m, v, 1);
1701}
1702
1703static int show_tid_numa_map(struct seq_file *m, void *v)
1704{
1705	return show_numa_map(m, v, 0);
1706}
1707
1708static const struct seq_operations proc_pid_numa_maps_op = {
1709	.start  = m_start,
1710	.next   = m_next,
1711	.stop   = m_stop,
1712	.show   = show_pid_numa_map,
1713};
1714
1715static const struct seq_operations proc_tid_numa_maps_op = {
1716	.start  = m_start,
1717	.next   = m_next,
1718	.stop   = m_stop,
1719	.show   = show_tid_numa_map,
1720};
1721
1722static int numa_maps_open(struct inode *inode, struct file *file,
1723			  const struct seq_operations *ops)
1724{
1725	return proc_maps_open(inode, file, ops,
1726				sizeof(struct numa_maps_private));
1727}
1728
1729static int pid_numa_maps_open(struct inode *inode, struct file *file)
1730{
1731	return numa_maps_open(inode, file, &proc_pid_numa_maps_op);
1732}
1733
1734static int tid_numa_maps_open(struct inode *inode, struct file *file)
1735{
1736	return numa_maps_open(inode, file, &proc_tid_numa_maps_op);
1737}
1738
1739const struct file_operations proc_pid_numa_maps_operations = {
1740	.open		= pid_numa_maps_open,
1741	.read		= seq_read,
1742	.llseek		= seq_lseek,
1743	.release	= proc_map_release,
1744};
1745
1746const struct file_operations proc_tid_numa_maps_operations = {
1747	.open		= tid_numa_maps_open,
1748	.read		= seq_read,
1749	.llseek		= seq_lseek,
1750	.release	= proc_map_release,
1751};
1752#endif /* CONFIG_NUMA */
v4.10.11
   1#include <linux/mm.h>
   2#include <linux/vmacache.h>
   3#include <linux/hugetlb.h>
   4#include <linux/huge_mm.h>
   5#include <linux/mount.h>
   6#include <linux/seq_file.h>
   7#include <linux/highmem.h>
   8#include <linux/ptrace.h>
   9#include <linux/slab.h>
  10#include <linux/pagemap.h>
  11#include <linux/mempolicy.h>
  12#include <linux/rmap.h>
  13#include <linux/swap.h>
  14#include <linux/swapops.h>
  15#include <linux/mmu_notifier.h>
  16#include <linux/page_idle.h>
  17#include <linux/shmem_fs.h>
  18
  19#include <asm/elf.h>
  20#include <linux/uaccess.h>
  21#include <asm/tlbflush.h>
  22#include "internal.h"
  23
  24void task_mem(struct seq_file *m, struct mm_struct *mm)
  25{
  26	unsigned long text, lib, swap, ptes, pmds, anon, file, shmem;
  27	unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;
  28
  29	anon = get_mm_counter(mm, MM_ANONPAGES);
  30	file = get_mm_counter(mm, MM_FILEPAGES);
  31	shmem = get_mm_counter(mm, MM_SHMEMPAGES);
  32
  33	/*
  34	 * Note: to minimize their overhead, mm maintains hiwater_vm and
  35	 * hiwater_rss only when about to *lower* total_vm or rss.  Any
  36	 * collector of these hiwater stats must therefore get total_vm
  37	 * and rss too, which will usually be the higher.  Barriers? not
  38	 * worth the effort, such snapshots can always be inconsistent.
  39	 */
  40	hiwater_vm = total_vm = mm->total_vm;
  41	if (hiwater_vm < mm->hiwater_vm)
  42		hiwater_vm = mm->hiwater_vm;
  43	hiwater_rss = total_rss = anon + file + shmem;
  44	if (hiwater_rss < mm->hiwater_rss)
  45		hiwater_rss = mm->hiwater_rss;
  46
  47	text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10;
  48	lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text;
  49	swap = get_mm_counter(mm, MM_SWAPENTS);
  50	ptes = PTRS_PER_PTE * sizeof(pte_t) * atomic_long_read(&mm->nr_ptes);
  51	pmds = PTRS_PER_PMD * sizeof(pmd_t) * mm_nr_pmds(mm);
  52	seq_printf(m,
  53		"VmPeak:\t%8lu kB\n"
  54		"VmSize:\t%8lu kB\n"
  55		"VmLck:\t%8lu kB\n"
  56		"VmPin:\t%8lu kB\n"
  57		"VmHWM:\t%8lu kB\n"
  58		"VmRSS:\t%8lu kB\n"
  59		"RssAnon:\t%8lu kB\n"
  60		"RssFile:\t%8lu kB\n"
  61		"RssShmem:\t%8lu kB\n"
  62		"VmData:\t%8lu kB\n"
  63		"VmStk:\t%8lu kB\n"
  64		"VmExe:\t%8lu kB\n"
  65		"VmLib:\t%8lu kB\n"
  66		"VmPTE:\t%8lu kB\n"
  67		"VmPMD:\t%8lu kB\n"
  68		"VmSwap:\t%8lu kB\n",
  69		hiwater_vm << (PAGE_SHIFT-10),
  70		total_vm << (PAGE_SHIFT-10),
  71		mm->locked_vm << (PAGE_SHIFT-10),
  72		mm->pinned_vm << (PAGE_SHIFT-10),
  73		hiwater_rss << (PAGE_SHIFT-10),
  74		total_rss << (PAGE_SHIFT-10),
  75		anon << (PAGE_SHIFT-10),
  76		file << (PAGE_SHIFT-10),
  77		shmem << (PAGE_SHIFT-10),
  78		mm->data_vm << (PAGE_SHIFT-10),
  79		mm->stack_vm << (PAGE_SHIFT-10), text, lib,
  80		ptes >> 10,
  81		pmds >> 10,
  82		swap << (PAGE_SHIFT-10));
  83	hugetlb_report_usage(m, mm);
  84}
  85
  86unsigned long task_vsize(struct mm_struct *mm)
  87{
  88	return PAGE_SIZE * mm->total_vm;
  89}
  90
  91unsigned long task_statm(struct mm_struct *mm,
  92			 unsigned long *shared, unsigned long *text,
  93			 unsigned long *data, unsigned long *resident)
  94{
  95	*shared = get_mm_counter(mm, MM_FILEPAGES) +
  96			get_mm_counter(mm, MM_SHMEMPAGES);
  97	*text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
  98								>> PAGE_SHIFT;
  99	*data = mm->data_vm + mm->stack_vm;
 100	*resident = *shared + get_mm_counter(mm, MM_ANONPAGES);
 101	return mm->total_vm;
 102}
 103
 104#ifdef CONFIG_NUMA
 105/*
 106 * Save get_task_policy() for show_numa_map().
 107 */
 108static void hold_task_mempolicy(struct proc_maps_private *priv)
 109{
 110	struct task_struct *task = priv->task;
 111
 112	task_lock(task);
 113	priv->task_mempolicy = get_task_policy(task);
 114	mpol_get(priv->task_mempolicy);
 115	task_unlock(task);
 116}
 117static void release_task_mempolicy(struct proc_maps_private *priv)
 118{
 119	mpol_put(priv->task_mempolicy);
 120}
 121#else
 122static void hold_task_mempolicy(struct proc_maps_private *priv)
 123{
 124}
 125static void release_task_mempolicy(struct proc_maps_private *priv)
 126{
 127}
 128#endif
 129
 130static void vma_stop(struct proc_maps_private *priv)
 131{
 132	struct mm_struct *mm = priv->mm;
 133
 134	release_task_mempolicy(priv);
 135	up_read(&mm->mmap_sem);
 136	mmput(mm);
 137}
 138
 139static struct vm_area_struct *
 140m_next_vma(struct proc_maps_private *priv, struct vm_area_struct *vma)
 141{
 142	if (vma == priv->tail_vma)
 143		return NULL;
 144	return vma->vm_next ?: priv->tail_vma;
 145}
 146
 147static void m_cache_vma(struct seq_file *m, struct vm_area_struct *vma)
 148{
 149	if (m->count < m->size)	/* vma is copied successfully */
 150		m->version = m_next_vma(m->private, vma) ? vma->vm_end : -1UL;
 151}
 152
 153static void *m_start(struct seq_file *m, loff_t *ppos)
 154{
 155	struct proc_maps_private *priv = m->private;
 156	unsigned long last_addr = m->version;
 157	struct mm_struct *mm;
 158	struct vm_area_struct *vma;
 159	unsigned int pos = *ppos;
 160
 161	/* See m_cache_vma(). Zero at the start or after lseek. */
 162	if (last_addr == -1UL)
 163		return NULL;
 164
 165	priv->task = get_proc_task(priv->inode);
 166	if (!priv->task)
 167		return ERR_PTR(-ESRCH);
 168
 169	mm = priv->mm;
 170	if (!mm || !atomic_inc_not_zero(&mm->mm_users))
 171		return NULL;
 172
 173	down_read(&mm->mmap_sem);
 174	hold_task_mempolicy(priv);
 175	priv->tail_vma = get_gate_vma(mm);
 176
 177	if (last_addr) {
 178		vma = find_vma(mm, last_addr - 1);
 179		if (vma && vma->vm_start <= last_addr)
 180			vma = m_next_vma(priv, vma);
 181		if (vma)
 182			return vma;
 183	}
 184
 185	m->version = 0;
 186	if (pos < mm->map_count) {
 187		for (vma = mm->mmap; pos; pos--) {
 188			m->version = vma->vm_start;
 189			vma = vma->vm_next;
 190		}
 191		return vma;
 192	}
 193
 194	/* we do not bother to update m->version in this case */
 195	if (pos == mm->map_count && priv->tail_vma)
 196		return priv->tail_vma;
 197
 198	vma_stop(priv);
 199	return NULL;
 200}
 201
 202static void *m_next(struct seq_file *m, void *v, loff_t *pos)
 203{
 204	struct proc_maps_private *priv = m->private;
 205	struct vm_area_struct *next;
 206
 207	(*pos)++;
 208	next = m_next_vma(priv, v);
 209	if (!next)
 210		vma_stop(priv);
 211	return next;
 212}
 213
 214static void m_stop(struct seq_file *m, void *v)
 215{
 216	struct proc_maps_private *priv = m->private;
 217
 218	if (!IS_ERR_OR_NULL(v))
 219		vma_stop(priv);
 220	if (priv->task) {
 221		put_task_struct(priv->task);
 222		priv->task = NULL;
 223	}
 224}
 225
 226static int proc_maps_open(struct inode *inode, struct file *file,
 227			const struct seq_operations *ops, int psize)
 228{
 229	struct proc_maps_private *priv = __seq_open_private(file, ops, psize);
 230
 231	if (!priv)
 232		return -ENOMEM;
 233
 234	priv->inode = inode;
 235	priv->mm = proc_mem_open(inode, PTRACE_MODE_READ);
 236	if (IS_ERR(priv->mm)) {
 237		int err = PTR_ERR(priv->mm);
 238
 239		seq_release_private(inode, file);
 240		return err;
 241	}
 242
 243	return 0;
 244}
 245
 246static int proc_map_release(struct inode *inode, struct file *file)
 247{
 248	struct seq_file *seq = file->private_data;
 249	struct proc_maps_private *priv = seq->private;
 250
 251	if (priv->mm)
 252		mmdrop(priv->mm);
 253
 254	return seq_release_private(inode, file);
 255}
 256
 257static int do_maps_open(struct inode *inode, struct file *file,
 258			const struct seq_operations *ops)
 259{
 260	return proc_maps_open(inode, file, ops,
 261				sizeof(struct proc_maps_private));
 262}
 263
 264/*
 265 * Indicate if the VMA is a stack for the given task; for
 266 * /proc/PID/maps that is the stack of the main task.
 267 */
 268static int is_stack(struct proc_maps_private *priv,
 269		    struct vm_area_struct *vma)
 270{
 271	/*
 272	 * We make no effort to guess what a given thread considers to be
 273	 * its "stack".  It's not even well-defined for programs written
 274	 * languages like Go.
 275	 */
 276	return vma->vm_start <= vma->vm_mm->start_stack &&
 277		vma->vm_end >= vma->vm_mm->start_stack;
 
 
 
 
 
 
 
 
 
 278}
 279
 280static void
 281show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
 282{
 283	struct mm_struct *mm = vma->vm_mm;
 284	struct file *file = vma->vm_file;
 285	struct proc_maps_private *priv = m->private;
 286	vm_flags_t flags = vma->vm_flags;
 287	unsigned long ino = 0;
 288	unsigned long long pgoff = 0;
 289	unsigned long start, end;
 290	dev_t dev = 0;
 291	const char *name = NULL;
 292
 293	if (file) {
 294		struct inode *inode = file_inode(vma->vm_file);
 295		dev = inode->i_sb->s_dev;
 296		ino = inode->i_ino;
 297		pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
 298	}
 299
 300	/* We don't show the stack guard page in /proc/maps */
 301	start = vma->vm_start;
 302	if (stack_guard_page_start(vma, start))
 303		start += PAGE_SIZE;
 304	end = vma->vm_end;
 305	if (stack_guard_page_end(vma, end))
 306		end -= PAGE_SIZE;
 307
 308	seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
 309	seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
 310			start,
 311			end,
 312			flags & VM_READ ? 'r' : '-',
 313			flags & VM_WRITE ? 'w' : '-',
 314			flags & VM_EXEC ? 'x' : '-',
 315			flags & VM_MAYSHARE ? 's' : 'p',
 316			pgoff,
 317			MAJOR(dev), MINOR(dev), ino);
 318
 319	/*
 320	 * Print the dentry name for named mappings, and a
 321	 * special [heap] marker for the heap:
 322	 */
 323	if (file) {
 324		seq_pad(m, ' ');
 325		seq_file_path(m, file, "\n");
 326		goto done;
 327	}
 328
 329	if (vma->vm_ops && vma->vm_ops->name) {
 330		name = vma->vm_ops->name(vma);
 331		if (name)
 332			goto done;
 333	}
 334
 335	name = arch_vma_name(vma);
 336	if (!name) {
 337		if (!mm) {
 338			name = "[vdso]";
 339			goto done;
 340		}
 341
 342		if (vma->vm_start <= mm->brk &&
 343		    vma->vm_end >= mm->start_brk) {
 344			name = "[heap]";
 345			goto done;
 346		}
 347
 348		if (is_stack(priv, vma))
 349			name = "[stack]";
 350	}
 351
 352done:
 353	if (name) {
 354		seq_pad(m, ' ');
 355		seq_puts(m, name);
 356	}
 357	seq_putc(m, '\n');
 358}
 359
 360static int show_map(struct seq_file *m, void *v, int is_pid)
 361{
 362	show_map_vma(m, v, is_pid);
 363	m_cache_vma(m, v);
 364	return 0;
 365}
 366
 367static int show_pid_map(struct seq_file *m, void *v)
 368{
 369	return show_map(m, v, 1);
 370}
 371
 372static int show_tid_map(struct seq_file *m, void *v)
 373{
 374	return show_map(m, v, 0);
 375}
 376
 377static const struct seq_operations proc_pid_maps_op = {
 378	.start	= m_start,
 379	.next	= m_next,
 380	.stop	= m_stop,
 381	.show	= show_pid_map
 382};
 383
 384static const struct seq_operations proc_tid_maps_op = {
 385	.start	= m_start,
 386	.next	= m_next,
 387	.stop	= m_stop,
 388	.show	= show_tid_map
 389};
 390
 391static int pid_maps_open(struct inode *inode, struct file *file)
 392{
 393	return do_maps_open(inode, file, &proc_pid_maps_op);
 394}
 395
 396static int tid_maps_open(struct inode *inode, struct file *file)
 397{
 398	return do_maps_open(inode, file, &proc_tid_maps_op);
 399}
 400
 401const struct file_operations proc_pid_maps_operations = {
 402	.open		= pid_maps_open,
 403	.read		= seq_read,
 404	.llseek		= seq_lseek,
 405	.release	= proc_map_release,
 406};
 407
 408const struct file_operations proc_tid_maps_operations = {
 409	.open		= tid_maps_open,
 410	.read		= seq_read,
 411	.llseek		= seq_lseek,
 412	.release	= proc_map_release,
 413};
 414
 415/*
 416 * Proportional Set Size(PSS): my share of RSS.
 417 *
 418 * PSS of a process is the count of pages it has in memory, where each
 419 * page is divided by the number of processes sharing it.  So if a
 420 * process has 1000 pages all to itself, and 1000 shared with one other
 421 * process, its PSS will be 1500.
 422 *
 423 * To keep (accumulated) division errors low, we adopt a 64bit
 424 * fixed-point pss counter to minimize division errors. So (pss >>
 425 * PSS_SHIFT) would be the real byte count.
 426 *
 427 * A shift of 12 before division means (assuming 4K page size):
 428 * 	- 1M 3-user-pages add up to 8KB errors;
 429 * 	- supports mapcount up to 2^24, or 16M;
 430 * 	- supports PSS up to 2^52 bytes, or 4PB.
 431 */
 432#define PSS_SHIFT 12
 433
 434#ifdef CONFIG_PROC_PAGE_MONITOR
 435struct mem_size_stats {
 436	unsigned long resident;
 437	unsigned long shared_clean;
 438	unsigned long shared_dirty;
 439	unsigned long private_clean;
 440	unsigned long private_dirty;
 441	unsigned long referenced;
 442	unsigned long anonymous;
 443	unsigned long anonymous_thp;
 444	unsigned long shmem_thp;
 445	unsigned long swap;
 446	unsigned long shared_hugetlb;
 447	unsigned long private_hugetlb;
 448	u64 pss;
 449	u64 swap_pss;
 450	bool check_shmem_swap;
 451};
 452
 453static void smaps_account(struct mem_size_stats *mss, struct page *page,
 454		bool compound, bool young, bool dirty)
 455{
 456	int i, nr = compound ? 1 << compound_order(page) : 1;
 457	unsigned long size = nr * PAGE_SIZE;
 458
 459	if (PageAnon(page))
 460		mss->anonymous += size;
 461
 462	mss->resident += size;
 463	/* Accumulate the size in pages that have been accessed. */
 464	if (young || page_is_young(page) || PageReferenced(page))
 465		mss->referenced += size;
 466
 467	/*
 468	 * page_count(page) == 1 guarantees the page is mapped exactly once.
 469	 * If any subpage of the compound page mapped with PTE it would elevate
 470	 * page_count().
 471	 */
 472	if (page_count(page) == 1) {
 473		if (dirty || PageDirty(page))
 474			mss->private_dirty += size;
 475		else
 476			mss->private_clean += size;
 477		mss->pss += (u64)size << PSS_SHIFT;
 478		return;
 479	}
 480
 481	for (i = 0; i < nr; i++, page++) {
 482		int mapcount = page_mapcount(page);
 483
 484		if (mapcount >= 2) {
 485			if (dirty || PageDirty(page))
 486				mss->shared_dirty += PAGE_SIZE;
 487			else
 488				mss->shared_clean += PAGE_SIZE;
 489			mss->pss += (PAGE_SIZE << PSS_SHIFT) / mapcount;
 490		} else {
 491			if (dirty || PageDirty(page))
 492				mss->private_dirty += PAGE_SIZE;
 493			else
 494				mss->private_clean += PAGE_SIZE;
 495			mss->pss += PAGE_SIZE << PSS_SHIFT;
 496		}
 497	}
 498}
 499
 500#ifdef CONFIG_SHMEM
 501static int smaps_pte_hole(unsigned long addr, unsigned long end,
 502		struct mm_walk *walk)
 503{
 504	struct mem_size_stats *mss = walk->private;
 505
 506	mss->swap += shmem_partial_swap_usage(
 507			walk->vma->vm_file->f_mapping, addr, end);
 508
 509	return 0;
 510}
 511#endif
 512
 513static void smaps_pte_entry(pte_t *pte, unsigned long addr,
 514		struct mm_walk *walk)
 515{
 516	struct mem_size_stats *mss = walk->private;
 517	struct vm_area_struct *vma = walk->vma;
 518	struct page *page = NULL;
 519
 520	if (pte_present(*pte)) {
 521		page = vm_normal_page(vma, addr, *pte);
 522	} else if (is_swap_pte(*pte)) {
 523		swp_entry_t swpent = pte_to_swp_entry(*pte);
 524
 525		if (!non_swap_entry(swpent)) {
 526			int mapcount;
 527
 528			mss->swap += PAGE_SIZE;
 529			mapcount = swp_swapcount(swpent);
 530			if (mapcount >= 2) {
 531				u64 pss_delta = (u64)PAGE_SIZE << PSS_SHIFT;
 532
 533				do_div(pss_delta, mapcount);
 534				mss->swap_pss += pss_delta;
 535			} else {
 536				mss->swap_pss += (u64)PAGE_SIZE << PSS_SHIFT;
 537			}
 538		} else if (is_migration_entry(swpent))
 539			page = migration_entry_to_page(swpent);
 540	} else if (unlikely(IS_ENABLED(CONFIG_SHMEM) && mss->check_shmem_swap
 541							&& pte_none(*pte))) {
 542		page = find_get_entry(vma->vm_file->f_mapping,
 543						linear_page_index(vma, addr));
 544		if (!page)
 545			return;
 546
 547		if (radix_tree_exceptional_entry(page))
 548			mss->swap += PAGE_SIZE;
 549		else
 550			put_page(page);
 551
 552		return;
 553	}
 554
 555	if (!page)
 556		return;
 557
 558	smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte));
 559}
 560
 561#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 562static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
 563		struct mm_walk *walk)
 564{
 565	struct mem_size_stats *mss = walk->private;
 566	struct vm_area_struct *vma = walk->vma;
 567	struct page *page;
 568
 569	/* FOLL_DUMP will return -EFAULT on huge zero page */
 570	page = follow_trans_huge_pmd(vma, addr, pmd, FOLL_DUMP);
 571	if (IS_ERR_OR_NULL(page))
 572		return;
 573	if (PageAnon(page))
 574		mss->anonymous_thp += HPAGE_PMD_SIZE;
 575	else if (PageSwapBacked(page))
 576		mss->shmem_thp += HPAGE_PMD_SIZE;
 577	else if (is_zone_device_page(page))
 578		/* pass */;
 579	else
 580		VM_BUG_ON_PAGE(1, page);
 581	smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd));
 582}
 583#else
 584static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
 585		struct mm_walk *walk)
 586{
 587}
 588#endif
 589
 590static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
 591			   struct mm_walk *walk)
 592{
 593	struct vm_area_struct *vma = walk->vma;
 594	pte_t *pte;
 595	spinlock_t *ptl;
 596
 597	ptl = pmd_trans_huge_lock(pmd, vma);
 598	if (ptl) {
 599		smaps_pmd_entry(pmd, addr, walk);
 600		spin_unlock(ptl);
 601		return 0;
 602	}
 603
 604	if (pmd_trans_unstable(pmd))
 605		return 0;
 606	/*
 607	 * The mmap_sem held all the way back in m_start() is what
 608	 * keeps khugepaged out of here and from collapsing things
 609	 * in here.
 610	 */
 611	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
 612	for (; addr != end; pte++, addr += PAGE_SIZE)
 613		smaps_pte_entry(pte, addr, walk);
 614	pte_unmap_unlock(pte - 1, ptl);
 615	cond_resched();
 616	return 0;
 617}
 618
 619static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma)
 620{
 621	/*
 622	 * Don't forget to update Documentation/ on changes.
 623	 */
 624	static const char mnemonics[BITS_PER_LONG][2] = {
 625		/*
 626		 * In case if we meet a flag we don't know about.
 627		 */
 628		[0 ... (BITS_PER_LONG-1)] = "??",
 629
 630		[ilog2(VM_READ)]	= "rd",
 631		[ilog2(VM_WRITE)]	= "wr",
 632		[ilog2(VM_EXEC)]	= "ex",
 633		[ilog2(VM_SHARED)]	= "sh",
 634		[ilog2(VM_MAYREAD)]	= "mr",
 635		[ilog2(VM_MAYWRITE)]	= "mw",
 636		[ilog2(VM_MAYEXEC)]	= "me",
 637		[ilog2(VM_MAYSHARE)]	= "ms",
 638		[ilog2(VM_GROWSDOWN)]	= "gd",
 639		[ilog2(VM_PFNMAP)]	= "pf",
 640		[ilog2(VM_DENYWRITE)]	= "dw",
 641#ifdef CONFIG_X86_INTEL_MPX
 642		[ilog2(VM_MPX)]		= "mp",
 643#endif
 644		[ilog2(VM_LOCKED)]	= "lo",
 645		[ilog2(VM_IO)]		= "io",
 646		[ilog2(VM_SEQ_READ)]	= "sr",
 647		[ilog2(VM_RAND_READ)]	= "rr",
 648		[ilog2(VM_DONTCOPY)]	= "dc",
 649		[ilog2(VM_DONTEXPAND)]	= "de",
 650		[ilog2(VM_ACCOUNT)]	= "ac",
 651		[ilog2(VM_NORESERVE)]	= "nr",
 652		[ilog2(VM_HUGETLB)]	= "ht",
 653		[ilog2(VM_ARCH_1)]	= "ar",
 654		[ilog2(VM_DONTDUMP)]	= "dd",
 655#ifdef CONFIG_MEM_SOFT_DIRTY
 656		[ilog2(VM_SOFTDIRTY)]	= "sd",
 657#endif
 658		[ilog2(VM_MIXEDMAP)]	= "mm",
 659		[ilog2(VM_HUGEPAGE)]	= "hg",
 660		[ilog2(VM_NOHUGEPAGE)]	= "nh",
 661		[ilog2(VM_MERGEABLE)]	= "mg",
 662		[ilog2(VM_UFFD_MISSING)]= "um",
 663		[ilog2(VM_UFFD_WP)]	= "uw",
 664#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
 665		/* These come out via ProtectionKey: */
 666		[ilog2(VM_PKEY_BIT0)]	= "",
 667		[ilog2(VM_PKEY_BIT1)]	= "",
 668		[ilog2(VM_PKEY_BIT2)]	= "",
 669		[ilog2(VM_PKEY_BIT3)]	= "",
 670#endif
 671	};
 672	size_t i;
 673
 674	seq_puts(m, "VmFlags: ");
 675	for (i = 0; i < BITS_PER_LONG; i++) {
 676		if (!mnemonics[i][0])
 677			continue;
 678		if (vma->vm_flags & (1UL << i)) {
 679			seq_printf(m, "%c%c ",
 680				   mnemonics[i][0], mnemonics[i][1]);
 681		}
 682	}
 683	seq_putc(m, '\n');
 684}
 685
 686#ifdef CONFIG_HUGETLB_PAGE
 687static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask,
 688				 unsigned long addr, unsigned long end,
 689				 struct mm_walk *walk)
 690{
 691	struct mem_size_stats *mss = walk->private;
 692	struct vm_area_struct *vma = walk->vma;
 693	struct page *page = NULL;
 694
 695	if (pte_present(*pte)) {
 696		page = vm_normal_page(vma, addr, *pte);
 697	} else if (is_swap_pte(*pte)) {
 698		swp_entry_t swpent = pte_to_swp_entry(*pte);
 699
 700		if (is_migration_entry(swpent))
 701			page = migration_entry_to_page(swpent);
 702	}
 703	if (page) {
 704		int mapcount = page_mapcount(page);
 705
 706		if (mapcount >= 2)
 707			mss->shared_hugetlb += huge_page_size(hstate_vma(vma));
 708		else
 709			mss->private_hugetlb += huge_page_size(hstate_vma(vma));
 710	}
 711	return 0;
 712}
 713#endif /* HUGETLB_PAGE */
 714
 715void __weak arch_show_smap(struct seq_file *m, struct vm_area_struct *vma)
 716{
 717}
 718
 719static int show_smap(struct seq_file *m, void *v, int is_pid)
 720{
 721	struct vm_area_struct *vma = v;
 722	struct mem_size_stats mss;
 723	struct mm_walk smaps_walk = {
 724		.pmd_entry = smaps_pte_range,
 725#ifdef CONFIG_HUGETLB_PAGE
 726		.hugetlb_entry = smaps_hugetlb_range,
 727#endif
 728		.mm = vma->vm_mm,
 729		.private = &mss,
 730	};
 731
 732	memset(&mss, 0, sizeof mss);
 733
 734#ifdef CONFIG_SHMEM
 735	if (vma->vm_file && shmem_mapping(vma->vm_file->f_mapping)) {
 736		/*
 737		 * For shared or readonly shmem mappings we know that all
 738		 * swapped out pages belong to the shmem object, and we can
 739		 * obtain the swap value much more efficiently. For private
 740		 * writable mappings, we might have COW pages that are
 741		 * not affected by the parent swapped out pages of the shmem
 742		 * object, so we have to distinguish them during the page walk.
 743		 * Unless we know that the shmem object (or the part mapped by
 744		 * our VMA) has no swapped out pages at all.
 745		 */
 746		unsigned long shmem_swapped = shmem_swap_usage(vma);
 747
 748		if (!shmem_swapped || (vma->vm_flags & VM_SHARED) ||
 749					!(vma->vm_flags & VM_WRITE)) {
 750			mss.swap = shmem_swapped;
 751		} else {
 752			mss.check_shmem_swap = true;
 753			smaps_walk.pte_hole = smaps_pte_hole;
 754		}
 755	}
 756#endif
 757
 758	/* mmap_sem is held in m_start */
 759	walk_page_vma(vma, &smaps_walk);
 760
 761	show_map_vma(m, vma, is_pid);
 762
 763	seq_printf(m,
 764		   "Size:           %8lu kB\n"
 765		   "Rss:            %8lu kB\n"
 766		   "Pss:            %8lu kB\n"
 767		   "Shared_Clean:   %8lu kB\n"
 768		   "Shared_Dirty:   %8lu kB\n"
 769		   "Private_Clean:  %8lu kB\n"
 770		   "Private_Dirty:  %8lu kB\n"
 771		   "Referenced:     %8lu kB\n"
 772		   "Anonymous:      %8lu kB\n"
 773		   "AnonHugePages:  %8lu kB\n"
 774		   "ShmemPmdMapped: %8lu kB\n"
 775		   "Shared_Hugetlb: %8lu kB\n"
 776		   "Private_Hugetlb: %7lu kB\n"
 777		   "Swap:           %8lu kB\n"
 778		   "SwapPss:        %8lu kB\n"
 779		   "KernelPageSize: %8lu kB\n"
 780		   "MMUPageSize:    %8lu kB\n"
 781		   "Locked:         %8lu kB\n",
 782		   (vma->vm_end - vma->vm_start) >> 10,
 783		   mss.resident >> 10,
 784		   (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
 785		   mss.shared_clean  >> 10,
 786		   mss.shared_dirty  >> 10,
 787		   mss.private_clean >> 10,
 788		   mss.private_dirty >> 10,
 789		   mss.referenced >> 10,
 790		   mss.anonymous >> 10,
 791		   mss.anonymous_thp >> 10,
 792		   mss.shmem_thp >> 10,
 793		   mss.shared_hugetlb >> 10,
 794		   mss.private_hugetlb >> 10,
 795		   mss.swap >> 10,
 796		   (unsigned long)(mss.swap_pss >> (10 + PSS_SHIFT)),
 797		   vma_kernel_pagesize(vma) >> 10,
 798		   vma_mmu_pagesize(vma) >> 10,
 799		   (vma->vm_flags & VM_LOCKED) ?
 800			(unsigned long)(mss.pss >> (10 + PSS_SHIFT)) : 0);
 801
 802	arch_show_smap(m, vma);
 803	show_smap_vma_flags(m, vma);
 804	m_cache_vma(m, vma);
 805	return 0;
 806}
 807
 808static int show_pid_smap(struct seq_file *m, void *v)
 809{
 810	return show_smap(m, v, 1);
 811}
 812
 813static int show_tid_smap(struct seq_file *m, void *v)
 814{
 815	return show_smap(m, v, 0);
 816}
 817
 818static const struct seq_operations proc_pid_smaps_op = {
 819	.start	= m_start,
 820	.next	= m_next,
 821	.stop	= m_stop,
 822	.show	= show_pid_smap
 823};
 824
 825static const struct seq_operations proc_tid_smaps_op = {
 826	.start	= m_start,
 827	.next	= m_next,
 828	.stop	= m_stop,
 829	.show	= show_tid_smap
 830};
 831
 832static int pid_smaps_open(struct inode *inode, struct file *file)
 833{
 834	return do_maps_open(inode, file, &proc_pid_smaps_op);
 835}
 836
 837static int tid_smaps_open(struct inode *inode, struct file *file)
 838{
 839	return do_maps_open(inode, file, &proc_tid_smaps_op);
 840}
 841
 842const struct file_operations proc_pid_smaps_operations = {
 843	.open		= pid_smaps_open,
 844	.read		= seq_read,
 845	.llseek		= seq_lseek,
 846	.release	= proc_map_release,
 847};
 848
 849const struct file_operations proc_tid_smaps_operations = {
 850	.open		= tid_smaps_open,
 851	.read		= seq_read,
 852	.llseek		= seq_lseek,
 853	.release	= proc_map_release,
 854};
 855
 856enum clear_refs_types {
 857	CLEAR_REFS_ALL = 1,
 858	CLEAR_REFS_ANON,
 859	CLEAR_REFS_MAPPED,
 860	CLEAR_REFS_SOFT_DIRTY,
 861	CLEAR_REFS_MM_HIWATER_RSS,
 862	CLEAR_REFS_LAST,
 863};
 864
 865struct clear_refs_private {
 866	enum clear_refs_types type;
 867};
 868
 869#ifdef CONFIG_MEM_SOFT_DIRTY
 870static inline void clear_soft_dirty(struct vm_area_struct *vma,
 871		unsigned long addr, pte_t *pte)
 872{
 873	/*
 874	 * The soft-dirty tracker uses #PF-s to catch writes
 875	 * to pages, so write-protect the pte as well. See the
 876	 * Documentation/vm/soft-dirty.txt for full description
 877	 * of how soft-dirty works.
 878	 */
 879	pte_t ptent = *pte;
 880
 881	if (pte_present(ptent)) {
 882		ptent = ptep_modify_prot_start(vma->vm_mm, addr, pte);
 883		ptent = pte_wrprotect(ptent);
 884		ptent = pte_clear_soft_dirty(ptent);
 885		ptep_modify_prot_commit(vma->vm_mm, addr, pte, ptent);
 886	} else if (is_swap_pte(ptent)) {
 887		ptent = pte_swp_clear_soft_dirty(ptent);
 888		set_pte_at(vma->vm_mm, addr, pte, ptent);
 889	}
 890}
 891#else
 892static inline void clear_soft_dirty(struct vm_area_struct *vma,
 893		unsigned long addr, pte_t *pte)
 894{
 895}
 896#endif
 897
 898#if defined(CONFIG_MEM_SOFT_DIRTY) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
 899static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
 900		unsigned long addr, pmd_t *pmdp)
 901{
 902	pmd_t pmd = pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp);
 903
 904	pmd = pmd_wrprotect(pmd);
 905	pmd = pmd_clear_soft_dirty(pmd);
 906
 907	set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
 908}
 909#else
 910static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
 911		unsigned long addr, pmd_t *pmdp)
 912{
 913}
 914#endif
 915
 916static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
 917				unsigned long end, struct mm_walk *walk)
 918{
 919	struct clear_refs_private *cp = walk->private;
 920	struct vm_area_struct *vma = walk->vma;
 921	pte_t *pte, ptent;
 922	spinlock_t *ptl;
 923	struct page *page;
 924
 925	ptl = pmd_trans_huge_lock(pmd, vma);
 926	if (ptl) {
 927		if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
 928			clear_soft_dirty_pmd(vma, addr, pmd);
 929			goto out;
 930		}
 931
 932		page = pmd_page(*pmd);
 933
 934		/* Clear accessed and referenced bits. */
 935		pmdp_test_and_clear_young(vma, addr, pmd);
 936		test_and_clear_page_young(page);
 937		ClearPageReferenced(page);
 938out:
 939		spin_unlock(ptl);
 940		return 0;
 941	}
 942
 943	if (pmd_trans_unstable(pmd))
 944		return 0;
 945
 946	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
 947	for (; addr != end; pte++, addr += PAGE_SIZE) {
 948		ptent = *pte;
 949
 950		if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
 951			clear_soft_dirty(vma, addr, pte);
 952			continue;
 953		}
 954
 955		if (!pte_present(ptent))
 956			continue;
 957
 958		page = vm_normal_page(vma, addr, ptent);
 959		if (!page)
 960			continue;
 961
 962		/* Clear accessed and referenced bits. */
 963		ptep_test_and_clear_young(vma, addr, pte);
 964		test_and_clear_page_young(page);
 965		ClearPageReferenced(page);
 966	}
 967	pte_unmap_unlock(pte - 1, ptl);
 968	cond_resched();
 969	return 0;
 970}
 971
 972static int clear_refs_test_walk(unsigned long start, unsigned long end,
 973				struct mm_walk *walk)
 974{
 975	struct clear_refs_private *cp = walk->private;
 976	struct vm_area_struct *vma = walk->vma;
 977
 978	if (vma->vm_flags & VM_PFNMAP)
 979		return 1;
 980
 981	/*
 982	 * Writing 1 to /proc/pid/clear_refs affects all pages.
 983	 * Writing 2 to /proc/pid/clear_refs only affects anonymous pages.
 984	 * Writing 3 to /proc/pid/clear_refs only affects file mapped pages.
 985	 * Writing 4 to /proc/pid/clear_refs affects all pages.
 986	 */
 987	if (cp->type == CLEAR_REFS_ANON && vma->vm_file)
 988		return 1;
 989	if (cp->type == CLEAR_REFS_MAPPED && !vma->vm_file)
 990		return 1;
 991	return 0;
 992}
 993
 994static ssize_t clear_refs_write(struct file *file, const char __user *buf,
 995				size_t count, loff_t *ppos)
 996{
 997	struct task_struct *task;
 998	char buffer[PROC_NUMBUF];
 999	struct mm_struct *mm;
1000	struct vm_area_struct *vma;
1001	enum clear_refs_types type;
1002	int itype;
1003	int rv;
1004
1005	memset(buffer, 0, sizeof(buffer));
1006	if (count > sizeof(buffer) - 1)
1007		count = sizeof(buffer) - 1;
1008	if (copy_from_user(buffer, buf, count))
1009		return -EFAULT;
1010	rv = kstrtoint(strstrip(buffer), 10, &itype);
1011	if (rv < 0)
1012		return rv;
1013	type = (enum clear_refs_types)itype;
1014	if (type < CLEAR_REFS_ALL || type >= CLEAR_REFS_LAST)
1015		return -EINVAL;
1016
1017	task = get_proc_task(file_inode(file));
1018	if (!task)
1019		return -ESRCH;
1020	mm = get_task_mm(task);
1021	if (mm) {
1022		struct clear_refs_private cp = {
1023			.type = type,
1024		};
1025		struct mm_walk clear_refs_walk = {
1026			.pmd_entry = clear_refs_pte_range,
1027			.test_walk = clear_refs_test_walk,
1028			.mm = mm,
1029			.private = &cp,
1030		};
1031
1032		if (type == CLEAR_REFS_MM_HIWATER_RSS) {
1033			if (down_write_killable(&mm->mmap_sem)) {
1034				count = -EINTR;
1035				goto out_mm;
1036			}
1037
1038			/*
1039			 * Writing 5 to /proc/pid/clear_refs resets the peak
1040			 * resident set size to this mm's current rss value.
1041			 */
 
1042			reset_mm_hiwater_rss(mm);
1043			up_write(&mm->mmap_sem);
1044			goto out_mm;
1045		}
1046
1047		down_read(&mm->mmap_sem);
1048		if (type == CLEAR_REFS_SOFT_DIRTY) {
1049			for (vma = mm->mmap; vma; vma = vma->vm_next) {
1050				if (!(vma->vm_flags & VM_SOFTDIRTY))
1051					continue;
1052				up_read(&mm->mmap_sem);
1053				if (down_write_killable(&mm->mmap_sem)) {
1054					count = -EINTR;
1055					goto out_mm;
1056				}
1057				for (vma = mm->mmap; vma; vma = vma->vm_next) {
1058					vma->vm_flags &= ~VM_SOFTDIRTY;
1059					vma_set_page_prot(vma);
1060				}
1061				downgrade_write(&mm->mmap_sem);
1062				break;
1063			}
1064			mmu_notifier_invalidate_range_start(mm, 0, -1);
1065		}
1066		walk_page_range(0, mm->highest_vm_end, &clear_refs_walk);
1067		if (type == CLEAR_REFS_SOFT_DIRTY)
1068			mmu_notifier_invalidate_range_end(mm, 0, -1);
1069		flush_tlb_mm(mm);
1070		up_read(&mm->mmap_sem);
1071out_mm:
1072		mmput(mm);
1073	}
1074	put_task_struct(task);
1075
1076	return count;
1077}
1078
1079const struct file_operations proc_clear_refs_operations = {
1080	.write		= clear_refs_write,
1081	.llseek		= noop_llseek,
1082};
1083
1084typedef struct {
1085	u64 pme;
1086} pagemap_entry_t;
1087
1088struct pagemapread {
1089	int pos, len;		/* units: PM_ENTRY_BYTES, not bytes */
1090	pagemap_entry_t *buffer;
1091	bool show_pfn;
1092};
1093
1094#define PAGEMAP_WALK_SIZE	(PMD_SIZE)
1095#define PAGEMAP_WALK_MASK	(PMD_MASK)
1096
1097#define PM_ENTRY_BYTES		sizeof(pagemap_entry_t)
1098#define PM_PFRAME_BITS		55
1099#define PM_PFRAME_MASK		GENMASK_ULL(PM_PFRAME_BITS - 1, 0)
1100#define PM_SOFT_DIRTY		BIT_ULL(55)
1101#define PM_MMAP_EXCLUSIVE	BIT_ULL(56)
1102#define PM_FILE			BIT_ULL(61)
1103#define PM_SWAP			BIT_ULL(62)
1104#define PM_PRESENT		BIT_ULL(63)
1105
1106#define PM_END_OF_BUFFER    1
1107
1108static inline pagemap_entry_t make_pme(u64 frame, u64 flags)
1109{
1110	return (pagemap_entry_t) { .pme = (frame & PM_PFRAME_MASK) | flags };
1111}
1112
1113static int add_to_pagemap(unsigned long addr, pagemap_entry_t *pme,
1114			  struct pagemapread *pm)
1115{
1116	pm->buffer[pm->pos++] = *pme;
1117	if (pm->pos >= pm->len)
1118		return PM_END_OF_BUFFER;
1119	return 0;
1120}
1121
1122static int pagemap_pte_hole(unsigned long start, unsigned long end,
1123				struct mm_walk *walk)
1124{
1125	struct pagemapread *pm = walk->private;
1126	unsigned long addr = start;
1127	int err = 0;
1128
1129	while (addr < end) {
1130		struct vm_area_struct *vma = find_vma(walk->mm, addr);
1131		pagemap_entry_t pme = make_pme(0, 0);
1132		/* End of address space hole, which we mark as non-present. */
1133		unsigned long hole_end;
1134
1135		if (vma)
1136			hole_end = min(end, vma->vm_start);
1137		else
1138			hole_end = end;
1139
1140		for (; addr < hole_end; addr += PAGE_SIZE) {
1141			err = add_to_pagemap(addr, &pme, pm);
1142			if (err)
1143				goto out;
1144		}
1145
1146		if (!vma)
1147			break;
1148
1149		/* Addresses in the VMA. */
1150		if (vma->vm_flags & VM_SOFTDIRTY)
1151			pme = make_pme(0, PM_SOFT_DIRTY);
1152		for (; addr < min(end, vma->vm_end); addr += PAGE_SIZE) {
1153			err = add_to_pagemap(addr, &pme, pm);
1154			if (err)
1155				goto out;
1156		}
1157	}
1158out:
1159	return err;
1160}
1161
1162static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm,
1163		struct vm_area_struct *vma, unsigned long addr, pte_t pte)
1164{
1165	u64 frame = 0, flags = 0;
1166	struct page *page = NULL;
1167
1168	if (pte_present(pte)) {
1169		if (pm->show_pfn)
1170			frame = pte_pfn(pte);
1171		flags |= PM_PRESENT;
1172		page = vm_normal_page(vma, addr, pte);
1173		if (pte_soft_dirty(pte))
1174			flags |= PM_SOFT_DIRTY;
1175	} else if (is_swap_pte(pte)) {
1176		swp_entry_t entry;
1177		if (pte_swp_soft_dirty(pte))
1178			flags |= PM_SOFT_DIRTY;
1179		entry = pte_to_swp_entry(pte);
1180		frame = swp_type(entry) |
1181			(swp_offset(entry) << MAX_SWAPFILES_SHIFT);
1182		flags |= PM_SWAP;
1183		if (is_migration_entry(entry))
1184			page = migration_entry_to_page(entry);
1185	}
1186
1187	if (page && !PageAnon(page))
1188		flags |= PM_FILE;
1189	if (page && page_mapcount(page) == 1)
1190		flags |= PM_MMAP_EXCLUSIVE;
1191	if (vma->vm_flags & VM_SOFTDIRTY)
1192		flags |= PM_SOFT_DIRTY;
1193
1194	return make_pme(frame, flags);
1195}
1196
1197static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
1198			     struct mm_walk *walk)
1199{
1200	struct vm_area_struct *vma = walk->vma;
1201	struct pagemapread *pm = walk->private;
1202	spinlock_t *ptl;
1203	pte_t *pte, *orig_pte;
1204	int err = 0;
1205
1206#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1207	ptl = pmd_trans_huge_lock(pmdp, vma);
1208	if (ptl) {
1209		u64 flags = 0, frame = 0;
1210		pmd_t pmd = *pmdp;
1211
1212		if ((vma->vm_flags & VM_SOFTDIRTY) || pmd_soft_dirty(pmd))
1213			flags |= PM_SOFT_DIRTY;
1214
1215		/*
1216		 * Currently pmd for thp is always present because thp
1217		 * can not be swapped-out, migrated, or HWPOISONed
1218		 * (split in such cases instead.)
1219		 * This if-check is just to prepare for future implementation.
1220		 */
1221		if (pmd_present(pmd)) {
1222			struct page *page = pmd_page(pmd);
1223
1224			if (page_mapcount(page) == 1)
1225				flags |= PM_MMAP_EXCLUSIVE;
1226
1227			flags |= PM_PRESENT;
1228			if (pm->show_pfn)
1229				frame = pmd_pfn(pmd) +
1230					((addr & ~PMD_MASK) >> PAGE_SHIFT);
1231		}
1232
1233		for (; addr != end; addr += PAGE_SIZE) {
1234			pagemap_entry_t pme = make_pme(frame, flags);
1235
1236			err = add_to_pagemap(addr, &pme, pm);
1237			if (err)
1238				break;
1239			if (pm->show_pfn && (flags & PM_PRESENT))
1240				frame++;
1241		}
1242		spin_unlock(ptl);
1243		return err;
1244	}
1245
1246	if (pmd_trans_unstable(pmdp))
1247		return 0;
1248#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1249
1250	/*
1251	 * We can assume that @vma always points to a valid one and @end never
1252	 * goes beyond vma->vm_end.
1253	 */
1254	orig_pte = pte = pte_offset_map_lock(walk->mm, pmdp, addr, &ptl);
1255	for (; addr < end; pte++, addr += PAGE_SIZE) {
1256		pagemap_entry_t pme;
1257
1258		pme = pte_to_pagemap_entry(pm, vma, addr, *pte);
1259		err = add_to_pagemap(addr, &pme, pm);
1260		if (err)
1261			break;
1262	}
1263	pte_unmap_unlock(orig_pte, ptl);
1264
1265	cond_resched();
1266
1267	return err;
1268}
1269
1270#ifdef CONFIG_HUGETLB_PAGE
1271/* This function walks within one hugetlb entry in the single call */
1272static int pagemap_hugetlb_range(pte_t *ptep, unsigned long hmask,
1273				 unsigned long addr, unsigned long end,
1274				 struct mm_walk *walk)
1275{
1276	struct pagemapread *pm = walk->private;
1277	struct vm_area_struct *vma = walk->vma;
1278	u64 flags = 0, frame = 0;
1279	int err = 0;
1280	pte_t pte;
1281
1282	if (vma->vm_flags & VM_SOFTDIRTY)
1283		flags |= PM_SOFT_DIRTY;
1284
1285	pte = huge_ptep_get(ptep);
1286	if (pte_present(pte)) {
1287		struct page *page = pte_page(pte);
1288
1289		if (!PageAnon(page))
1290			flags |= PM_FILE;
1291
1292		if (page_mapcount(page) == 1)
1293			flags |= PM_MMAP_EXCLUSIVE;
1294
1295		flags |= PM_PRESENT;
1296		if (pm->show_pfn)
1297			frame = pte_pfn(pte) +
1298				((addr & ~hmask) >> PAGE_SHIFT);
1299	}
1300
1301	for (; addr != end; addr += PAGE_SIZE) {
1302		pagemap_entry_t pme = make_pme(frame, flags);
1303
1304		err = add_to_pagemap(addr, &pme, pm);
1305		if (err)
1306			return err;
1307		if (pm->show_pfn && (flags & PM_PRESENT))
1308			frame++;
1309	}
1310
1311	cond_resched();
1312
1313	return err;
1314}
1315#endif /* HUGETLB_PAGE */
1316
1317/*
1318 * /proc/pid/pagemap - an array mapping virtual pages to pfns
1319 *
1320 * For each page in the address space, this file contains one 64-bit entry
1321 * consisting of the following:
1322 *
1323 * Bits 0-54  page frame number (PFN) if present
1324 * Bits 0-4   swap type if swapped
1325 * Bits 5-54  swap offset if swapped
1326 * Bit  55    pte is soft-dirty (see Documentation/vm/soft-dirty.txt)
1327 * Bit  56    page exclusively mapped
1328 * Bits 57-60 zero
1329 * Bit  61    page is file-page or shared-anon
1330 * Bit  62    page swapped
1331 * Bit  63    page present
1332 *
1333 * If the page is not present but in swap, then the PFN contains an
1334 * encoding of the swap file number and the page's offset into the
1335 * swap. Unmapped pages return a null PFN. This allows determining
1336 * precisely which pages are mapped (or in swap) and comparing mapped
1337 * pages between processes.
1338 *
1339 * Efficient users of this interface will use /proc/pid/maps to
1340 * determine which areas of memory are actually mapped and llseek to
1341 * skip over unmapped regions.
1342 */
1343static ssize_t pagemap_read(struct file *file, char __user *buf,
1344			    size_t count, loff_t *ppos)
1345{
1346	struct mm_struct *mm = file->private_data;
1347	struct pagemapread pm;
1348	struct mm_walk pagemap_walk = {};
1349	unsigned long src;
1350	unsigned long svpfn;
1351	unsigned long start_vaddr;
1352	unsigned long end_vaddr;
1353	int ret = 0, copied = 0;
1354
1355	if (!mm || !atomic_inc_not_zero(&mm->mm_users))
1356		goto out;
1357
1358	ret = -EINVAL;
1359	/* file position must be aligned */
1360	if ((*ppos % PM_ENTRY_BYTES) || (count % PM_ENTRY_BYTES))
1361		goto out_mm;
1362
1363	ret = 0;
1364	if (!count)
1365		goto out_mm;
1366
1367	/* do not disclose physical addresses: attack vector */
1368	pm.show_pfn = file_ns_capable(file, &init_user_ns, CAP_SYS_ADMIN);
1369
1370	pm.len = (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
1371	pm.buffer = kmalloc(pm.len * PM_ENTRY_BYTES, GFP_TEMPORARY);
1372	ret = -ENOMEM;
1373	if (!pm.buffer)
1374		goto out_mm;
1375
1376	pagemap_walk.pmd_entry = pagemap_pmd_range;
1377	pagemap_walk.pte_hole = pagemap_pte_hole;
1378#ifdef CONFIG_HUGETLB_PAGE
1379	pagemap_walk.hugetlb_entry = pagemap_hugetlb_range;
1380#endif
1381	pagemap_walk.mm = mm;
1382	pagemap_walk.private = &pm;
1383
1384	src = *ppos;
1385	svpfn = src / PM_ENTRY_BYTES;
1386	start_vaddr = svpfn << PAGE_SHIFT;
1387	end_vaddr = mm->task_size;
1388
1389	/* watch out for wraparound */
1390	if (svpfn > mm->task_size >> PAGE_SHIFT)
1391		start_vaddr = end_vaddr;
1392
1393	/*
1394	 * The odds are that this will stop walking way
1395	 * before end_vaddr, because the length of the
1396	 * user buffer is tracked in "pm", and the walk
1397	 * will stop when we hit the end of the buffer.
1398	 */
1399	ret = 0;
1400	while (count && (start_vaddr < end_vaddr)) {
1401		int len;
1402		unsigned long end;
1403
1404		pm.pos = 0;
1405		end = (start_vaddr + PAGEMAP_WALK_SIZE) & PAGEMAP_WALK_MASK;
1406		/* overflow ? */
1407		if (end < start_vaddr || end > end_vaddr)
1408			end = end_vaddr;
1409		down_read(&mm->mmap_sem);
1410		ret = walk_page_range(start_vaddr, end, &pagemap_walk);
1411		up_read(&mm->mmap_sem);
1412		start_vaddr = end;
1413
1414		len = min(count, PM_ENTRY_BYTES * pm.pos);
1415		if (copy_to_user(buf, pm.buffer, len)) {
1416			ret = -EFAULT;
1417			goto out_free;
1418		}
1419		copied += len;
1420		buf += len;
1421		count -= len;
1422	}
1423	*ppos += copied;
1424	if (!ret || ret == PM_END_OF_BUFFER)
1425		ret = copied;
1426
1427out_free:
1428	kfree(pm.buffer);
1429out_mm:
1430	mmput(mm);
1431out:
1432	return ret;
1433}
1434
1435static int pagemap_open(struct inode *inode, struct file *file)
1436{
1437	struct mm_struct *mm;
1438
1439	mm = proc_mem_open(inode, PTRACE_MODE_READ);
1440	if (IS_ERR(mm))
1441		return PTR_ERR(mm);
1442	file->private_data = mm;
1443	return 0;
1444}
1445
1446static int pagemap_release(struct inode *inode, struct file *file)
1447{
1448	struct mm_struct *mm = file->private_data;
1449
1450	if (mm)
1451		mmdrop(mm);
1452	return 0;
1453}
1454
1455const struct file_operations proc_pagemap_operations = {
1456	.llseek		= mem_lseek, /* borrow this */
1457	.read		= pagemap_read,
1458	.open		= pagemap_open,
1459	.release	= pagemap_release,
1460};
1461#endif /* CONFIG_PROC_PAGE_MONITOR */
1462
1463#ifdef CONFIG_NUMA
1464
1465struct numa_maps {
1466	unsigned long pages;
1467	unsigned long anon;
1468	unsigned long active;
1469	unsigned long writeback;
1470	unsigned long mapcount_max;
1471	unsigned long dirty;
1472	unsigned long swapcache;
1473	unsigned long node[MAX_NUMNODES];
1474};
1475
1476struct numa_maps_private {
1477	struct proc_maps_private proc_maps;
1478	struct numa_maps md;
1479};
1480
1481static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty,
1482			unsigned long nr_pages)
1483{
1484	int count = page_mapcount(page);
1485
1486	md->pages += nr_pages;
1487	if (pte_dirty || PageDirty(page))
1488		md->dirty += nr_pages;
1489
1490	if (PageSwapCache(page))
1491		md->swapcache += nr_pages;
1492
1493	if (PageActive(page) || PageUnevictable(page))
1494		md->active += nr_pages;
1495
1496	if (PageWriteback(page))
1497		md->writeback += nr_pages;
1498
1499	if (PageAnon(page))
1500		md->anon += nr_pages;
1501
1502	if (count > md->mapcount_max)
1503		md->mapcount_max = count;
1504
1505	md->node[page_to_nid(page)] += nr_pages;
1506}
1507
1508static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma,
1509		unsigned long addr)
1510{
1511	struct page *page;
1512	int nid;
1513
1514	if (!pte_present(pte))
1515		return NULL;
1516
1517	page = vm_normal_page(vma, addr, pte);
1518	if (!page)
1519		return NULL;
1520
1521	if (PageReserved(page))
1522		return NULL;
1523
1524	nid = page_to_nid(page);
1525	if (!node_isset(nid, node_states[N_MEMORY]))
1526		return NULL;
1527
1528	return page;
1529}
1530
1531#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1532static struct page *can_gather_numa_stats_pmd(pmd_t pmd,
1533					      struct vm_area_struct *vma,
1534					      unsigned long addr)
1535{
1536	struct page *page;
1537	int nid;
1538
1539	if (!pmd_present(pmd))
1540		return NULL;
1541
1542	page = vm_normal_page_pmd(vma, addr, pmd);
1543	if (!page)
1544		return NULL;
1545
1546	if (PageReserved(page))
1547		return NULL;
1548
1549	nid = page_to_nid(page);
1550	if (!node_isset(nid, node_states[N_MEMORY]))
1551		return NULL;
1552
1553	return page;
1554}
1555#endif
1556
1557static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
1558		unsigned long end, struct mm_walk *walk)
1559{
1560	struct numa_maps *md = walk->private;
1561	struct vm_area_struct *vma = walk->vma;
1562	spinlock_t *ptl;
1563	pte_t *orig_pte;
1564	pte_t *pte;
1565
1566#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1567	ptl = pmd_trans_huge_lock(pmd, vma);
1568	if (ptl) {
1569		struct page *page;
1570
1571		page = can_gather_numa_stats_pmd(*pmd, vma, addr);
1572		if (page)
1573			gather_stats(page, md, pmd_dirty(*pmd),
1574				     HPAGE_PMD_SIZE/PAGE_SIZE);
1575		spin_unlock(ptl);
1576		return 0;
1577	}
1578
1579	if (pmd_trans_unstable(pmd))
1580		return 0;
1581#endif
1582	orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
1583	do {
1584		struct page *page = can_gather_numa_stats(*pte, vma, addr);
1585		if (!page)
1586			continue;
1587		gather_stats(page, md, pte_dirty(*pte), 1);
1588
1589	} while (pte++, addr += PAGE_SIZE, addr != end);
1590	pte_unmap_unlock(orig_pte, ptl);
1591	cond_resched();
1592	return 0;
1593}
1594#ifdef CONFIG_HUGETLB_PAGE
1595static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
1596		unsigned long addr, unsigned long end, struct mm_walk *walk)
1597{
1598	pte_t huge_pte = huge_ptep_get(pte);
1599	struct numa_maps *md;
1600	struct page *page;
1601
1602	if (!pte_present(huge_pte))
1603		return 0;
1604
1605	page = pte_page(huge_pte);
1606	if (!page)
1607		return 0;
1608
1609	md = walk->private;
1610	gather_stats(page, md, pte_dirty(huge_pte), 1);
1611	return 0;
1612}
1613
1614#else
1615static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
1616		unsigned long addr, unsigned long end, struct mm_walk *walk)
1617{
1618	return 0;
1619}
1620#endif
1621
1622/*
1623 * Display pages allocated per node and memory policy via /proc.
1624 */
1625static int show_numa_map(struct seq_file *m, void *v, int is_pid)
1626{
1627	struct numa_maps_private *numa_priv = m->private;
1628	struct proc_maps_private *proc_priv = &numa_priv->proc_maps;
1629	struct vm_area_struct *vma = v;
1630	struct numa_maps *md = &numa_priv->md;
1631	struct file *file = vma->vm_file;
1632	struct mm_struct *mm = vma->vm_mm;
1633	struct mm_walk walk = {
1634		.hugetlb_entry = gather_hugetlb_stats,
1635		.pmd_entry = gather_pte_stats,
1636		.private = md,
1637		.mm = mm,
1638	};
1639	struct mempolicy *pol;
1640	char buffer[64];
1641	int nid;
1642
1643	if (!mm)
1644		return 0;
1645
1646	/* Ensure we start with an empty set of numa_maps statistics. */
1647	memset(md, 0, sizeof(*md));
1648
1649	pol = __get_vma_policy(vma, vma->vm_start);
1650	if (pol) {
1651		mpol_to_str(buffer, sizeof(buffer), pol);
1652		mpol_cond_put(pol);
1653	} else {
1654		mpol_to_str(buffer, sizeof(buffer), proc_priv->task_mempolicy);
1655	}
1656
1657	seq_printf(m, "%08lx %s", vma->vm_start, buffer);
1658
1659	if (file) {
1660		seq_puts(m, " file=");
1661		seq_file_path(m, file, "\n\t= ");
1662	} else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
1663		seq_puts(m, " heap");
1664	} else if (is_stack(proc_priv, vma)) {
1665		seq_puts(m, " stack");
1666	}
1667
1668	if (is_vm_hugetlb_page(vma))
1669		seq_puts(m, " huge");
1670
1671	/* mmap_sem is held by m_start */
1672	walk_page_vma(vma, &walk);
1673
1674	if (!md->pages)
1675		goto out;
1676
1677	if (md->anon)
1678		seq_printf(m, " anon=%lu", md->anon);
1679
1680	if (md->dirty)
1681		seq_printf(m, " dirty=%lu", md->dirty);
1682
1683	if (md->pages != md->anon && md->pages != md->dirty)
1684		seq_printf(m, " mapped=%lu", md->pages);
1685
1686	if (md->mapcount_max > 1)
1687		seq_printf(m, " mapmax=%lu", md->mapcount_max);
1688
1689	if (md->swapcache)
1690		seq_printf(m, " swapcache=%lu", md->swapcache);
1691
1692	if (md->active < md->pages && !is_vm_hugetlb_page(vma))
1693		seq_printf(m, " active=%lu", md->active);
1694
1695	if (md->writeback)
1696		seq_printf(m, " writeback=%lu", md->writeback);
1697
1698	for_each_node_state(nid, N_MEMORY)
1699		if (md->node[nid])
1700			seq_printf(m, " N%d=%lu", nid, md->node[nid]);
1701
1702	seq_printf(m, " kernelpagesize_kB=%lu", vma_kernel_pagesize(vma) >> 10);
1703out:
1704	seq_putc(m, '\n');
1705	m_cache_vma(m, vma);
1706	return 0;
1707}
1708
1709static int show_pid_numa_map(struct seq_file *m, void *v)
1710{
1711	return show_numa_map(m, v, 1);
1712}
1713
1714static int show_tid_numa_map(struct seq_file *m, void *v)
1715{
1716	return show_numa_map(m, v, 0);
1717}
1718
1719static const struct seq_operations proc_pid_numa_maps_op = {
1720	.start  = m_start,
1721	.next   = m_next,
1722	.stop   = m_stop,
1723	.show   = show_pid_numa_map,
1724};
1725
1726static const struct seq_operations proc_tid_numa_maps_op = {
1727	.start  = m_start,
1728	.next   = m_next,
1729	.stop   = m_stop,
1730	.show   = show_tid_numa_map,
1731};
1732
1733static int numa_maps_open(struct inode *inode, struct file *file,
1734			  const struct seq_operations *ops)
1735{
1736	return proc_maps_open(inode, file, ops,
1737				sizeof(struct numa_maps_private));
1738}
1739
1740static int pid_numa_maps_open(struct inode *inode, struct file *file)
1741{
1742	return numa_maps_open(inode, file, &proc_pid_numa_maps_op);
1743}
1744
1745static int tid_numa_maps_open(struct inode *inode, struct file *file)
1746{
1747	return numa_maps_open(inode, file, &proc_tid_numa_maps_op);
1748}
1749
1750const struct file_operations proc_pid_numa_maps_operations = {
1751	.open		= pid_numa_maps_open,
1752	.read		= seq_read,
1753	.llseek		= seq_lseek,
1754	.release	= proc_map_release,
1755};
1756
1757const struct file_operations proc_tid_numa_maps_operations = {
1758	.open		= tid_numa_maps_open,
1759	.read		= seq_read,
1760	.llseek		= seq_lseek,
1761	.release	= proc_map_release,
1762};
1763#endif /* CONFIG_NUMA */