Linux Audio

Check our new training course

Loading...
v4.17
   1// SPDX-License-Identifier: GPL-2.0
   2#include <linux/mm.h>
   3#include <linux/vmacache.h>
   4#include <linux/hugetlb.h>
   5#include <linux/huge_mm.h>
   6#include <linux/mount.h>
   7#include <linux/seq_file.h>
   8#include <linux/highmem.h>
   9#include <linux/ptrace.h>
  10#include <linux/slab.h>
  11#include <linux/pagemap.h>
  12#include <linux/mempolicy.h>
  13#include <linux/rmap.h>
  14#include <linux/swap.h>
  15#include <linux/sched/mm.h>
  16#include <linux/swapops.h>
  17#include <linux/mmu_notifier.h>
  18#include <linux/page_idle.h>
  19#include <linux/shmem_fs.h>
  20#include <linux/uaccess.h>
  21
  22#include <asm/elf.h>
  23#include <asm/tlb.h>
  24#include <asm/tlbflush.h>
  25#include "internal.h"
  26
  27#define SEQ_PUT_DEC(str, val) \
  28		seq_put_decimal_ull_width(m, str, (val) << (PAGE_SHIFT-10), 8)
  29void task_mem(struct seq_file *m, struct mm_struct *mm)
  30{
  31	unsigned long text, lib, swap, anon, file, shmem;
  32	unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;
  33
  34	anon = get_mm_counter(mm, MM_ANONPAGES);
  35	file = get_mm_counter(mm, MM_FILEPAGES);
  36	shmem = get_mm_counter(mm, MM_SHMEMPAGES);
  37
  38	/*
  39	 * Note: to minimize their overhead, mm maintains hiwater_vm and
  40	 * hiwater_rss only when about to *lower* total_vm or rss.  Any
  41	 * collector of these hiwater stats must therefore get total_vm
  42	 * and rss too, which will usually be the higher.  Barriers? not
  43	 * worth the effort, such snapshots can always be inconsistent.
  44	 */
  45	hiwater_vm = total_vm = mm->total_vm;
  46	if (hiwater_vm < mm->hiwater_vm)
  47		hiwater_vm = mm->hiwater_vm;
  48	hiwater_rss = total_rss = anon + file + shmem;
  49	if (hiwater_rss < mm->hiwater_rss)
  50		hiwater_rss = mm->hiwater_rss;
  51
  52	/* split executable areas between text and lib */
  53	text = PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK);
  54	text = min(text, mm->exec_vm << PAGE_SHIFT);
  55	lib = (mm->exec_vm << PAGE_SHIFT) - text;
  56
  57	swap = get_mm_counter(mm, MM_SWAPENTS);
  58	SEQ_PUT_DEC("VmPeak:\t", hiwater_vm);
  59	SEQ_PUT_DEC(" kB\nVmSize:\t", total_vm);
  60	SEQ_PUT_DEC(" kB\nVmLck:\t", mm->locked_vm);
  61	SEQ_PUT_DEC(" kB\nVmPin:\t", mm->pinned_vm);
  62	SEQ_PUT_DEC(" kB\nVmHWM:\t", hiwater_rss);
  63	SEQ_PUT_DEC(" kB\nVmRSS:\t", total_rss);
  64	SEQ_PUT_DEC(" kB\nRssAnon:\t", anon);
  65	SEQ_PUT_DEC(" kB\nRssFile:\t", file);
  66	SEQ_PUT_DEC(" kB\nRssShmem:\t", shmem);
  67	SEQ_PUT_DEC(" kB\nVmData:\t", mm->data_vm);
  68	SEQ_PUT_DEC(" kB\nVmStk:\t", mm->stack_vm);
  69	seq_put_decimal_ull_width(m,
  70		    " kB\nVmExe:\t", text >> 10, 8);
  71	seq_put_decimal_ull_width(m,
  72		    " kB\nVmLib:\t", lib >> 10, 8);
  73	seq_put_decimal_ull_width(m,
  74		    " kB\nVmPTE:\t", mm_pgtables_bytes(mm) >> 10, 8);
  75	SEQ_PUT_DEC(" kB\nVmSwap:\t", swap);
  76	seq_puts(m, " kB\n");
  77	hugetlb_report_usage(m, mm);
 
 
 
 
  78}
  79#undef SEQ_PUT_DEC
  80
  81unsigned long task_vsize(struct mm_struct *mm)
  82{
  83	return PAGE_SIZE * mm->total_vm;
  84}
  85
  86unsigned long task_statm(struct mm_struct *mm,
  87			 unsigned long *shared, unsigned long *text,
  88			 unsigned long *data, unsigned long *resident)
  89{
  90	*shared = get_mm_counter(mm, MM_FILEPAGES) +
  91			get_mm_counter(mm, MM_SHMEMPAGES);
  92	*text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
  93								>> PAGE_SHIFT;
  94	*data = mm->data_vm + mm->stack_vm;
  95	*resident = *shared + get_mm_counter(mm, MM_ANONPAGES);
  96	return mm->total_vm;
  97}
  98
  99#ifdef CONFIG_NUMA
 100/*
 101 * Save get_task_policy() for show_numa_map().
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 102 */
 103static void hold_task_mempolicy(struct proc_maps_private *priv)
 104{
 105	struct task_struct *task = priv->task;
 106
 107	task_lock(task);
 108	priv->task_mempolicy = get_task_policy(task);
 109	mpol_get(priv->task_mempolicy);
 110	task_unlock(task);
 111}
 112static void release_task_mempolicy(struct proc_maps_private *priv)
 113{
 114	mpol_put(priv->task_mempolicy);
 115}
 116#else
 117static void hold_task_mempolicy(struct proc_maps_private *priv)
 118{
 119}
 120static void release_task_mempolicy(struct proc_maps_private *priv)
 121{
 122}
 123#endif
 124
 125static void vma_stop(struct proc_maps_private *priv)
 126{
 127	struct mm_struct *mm = priv->mm;
 128
 129	release_task_mempolicy(priv);
 130	up_read(&mm->mmap_sem);
 131	mmput(mm);
 132}
 133
 134static struct vm_area_struct *
 135m_next_vma(struct proc_maps_private *priv, struct vm_area_struct *vma)
 136{
 137	if (vma == priv->tail_vma)
 138		return NULL;
 139	return vma->vm_next ?: priv->tail_vma;
 140}
 141
 142static void m_cache_vma(struct seq_file *m, struct vm_area_struct *vma)
 143{
 144	if (m->count < m->size)	/* vma is copied successfully */
 145		m->version = m_next_vma(m->private, vma) ? vma->vm_end : -1UL;
 146}
 147
 148static void *m_start(struct seq_file *m, loff_t *ppos)
 149{
 150	struct proc_maps_private *priv = m->private;
 151	unsigned long last_addr = m->version;
 152	struct mm_struct *mm;
 153	struct vm_area_struct *vma;
 154	unsigned int pos = *ppos;
 
 
 
 
 
 
 
 
 
 
 
 155
 156	/* See m_cache_vma(). Zero at the start or after lseek. */
 157	if (last_addr == -1UL)
 158		return NULL;
 159
 160	priv->task = get_proc_task(priv->inode);
 161	if (!priv->task)
 162		return ERR_PTR(-ESRCH);
 163
 164	mm = priv->mm;
 165	if (!mm || !mmget_not_zero(mm))
 166		return NULL;
 167
 168	down_read(&mm->mmap_sem);
 169	hold_task_mempolicy(priv);
 170	priv->tail_vma = get_gate_vma(mm);
 171
 172	if (last_addr) {
 173		vma = find_vma(mm, last_addr - 1);
 174		if (vma && vma->vm_start <= last_addr)
 175			vma = m_next_vma(priv, vma);
 176		if (vma)
 177			return vma;
 
 
 178	}
 179
 180	m->version = 0;
 181	if (pos < mm->map_count) {
 182		for (vma = mm->mmap; pos; pos--) {
 183			m->version = vma->vm_start;
 
 
 
 
 184			vma = vma->vm_next;
 185		}
 186		return vma;
 187	}
 188
 189	/* we do not bother to update m->version in this case */
 190	if (pos == mm->map_count && priv->tail_vma)
 191		return priv->tail_vma;
 
 
 
 192
 193	vma_stop(priv);
 194	return NULL;
 
 
 
 
 195}
 196
 197static void *m_next(struct seq_file *m, void *v, loff_t *pos)
 198{
 199	struct proc_maps_private *priv = m->private;
 200	struct vm_area_struct *next;
 
 201
 202	(*pos)++;
 203	next = m_next_vma(priv, v);
 204	if (!next)
 205		vma_stop(priv);
 206	return next;
 207}
 208
 209static void m_stop(struct seq_file *m, void *v)
 210{
 211	struct proc_maps_private *priv = m->private;
 
 212
 213	if (!IS_ERR_OR_NULL(v))
 214		vma_stop(priv);
 215	if (priv->task) {
 216		put_task_struct(priv->task);
 217		priv->task = NULL;
 218	}
 219}
 220
 221static int proc_maps_open(struct inode *inode, struct file *file,
 222			const struct seq_operations *ops, int psize)
 223{
 224	struct proc_maps_private *priv = __seq_open_private(file, ops, psize);
 225
 226	if (!priv)
 227		return -ENOMEM;
 228
 229	priv->inode = inode;
 230	priv->mm = proc_mem_open(inode, PTRACE_MODE_READ);
 231	if (IS_ERR(priv->mm)) {
 232		int err = PTR_ERR(priv->mm);
 233
 234		seq_release_private(inode, file);
 235		return err;
 236	}
 237
 238	return 0;
 239}
 240
 241static int proc_map_release(struct inode *inode, struct file *file)
 242{
 243	struct seq_file *seq = file->private_data;
 244	struct proc_maps_private *priv = seq->private;
 245
 246	if (priv->mm)
 247		mmdrop(priv->mm);
 248
 249	kfree(priv->rollup);
 250	return seq_release_private(inode, file);
 251}
 252
 253static int do_maps_open(struct inode *inode, struct file *file,
 254			const struct seq_operations *ops)
 255{
 256	return proc_maps_open(inode, file, ops,
 257				sizeof(struct proc_maps_private));
 258}
 259
 260/*
 261 * Indicate if the VMA is a stack for the given task; for
 262 * /proc/PID/maps that is the stack of the main task.
 263 */
 264static int is_stack(struct vm_area_struct *vma)
 265{
 266	/*
 267	 * We make no effort to guess what a given thread considers to be
 268	 * its "stack".  It's not even well-defined for programs written
 269	 * languages like Go.
 270	 */
 271	return vma->vm_start <= vma->vm_mm->start_stack &&
 272		vma->vm_end >= vma->vm_mm->start_stack;
 273}
 274
 275static void show_vma_header_prefix(struct seq_file *m,
 276				   unsigned long start, unsigned long end,
 277				   vm_flags_t flags, unsigned long long pgoff,
 278				   dev_t dev, unsigned long ino)
 279{
 280	seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
 281	seq_put_hex_ll(m, NULL, start, 8);
 282	seq_put_hex_ll(m, "-", end, 8);
 283	seq_putc(m, ' ');
 284	seq_putc(m, flags & VM_READ ? 'r' : '-');
 285	seq_putc(m, flags & VM_WRITE ? 'w' : '-');
 286	seq_putc(m, flags & VM_EXEC ? 'x' : '-');
 287	seq_putc(m, flags & VM_MAYSHARE ? 's' : 'p');
 288	seq_put_hex_ll(m, " ", pgoff, 8);
 289	seq_put_hex_ll(m, " ", MAJOR(dev), 2);
 290	seq_put_hex_ll(m, ":", MINOR(dev), 2);
 291	seq_put_decimal_ull(m, " ", ino);
 292	seq_putc(m, ' ');
 293}
 294
 295static void
 296show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
 297{
 298	struct mm_struct *mm = vma->vm_mm;
 299	struct file *file = vma->vm_file;
 
 
 300	vm_flags_t flags = vma->vm_flags;
 301	unsigned long ino = 0;
 302	unsigned long long pgoff = 0;
 303	unsigned long start, end;
 304	dev_t dev = 0;
 305	const char *name = NULL;
 306
 307	if (file) {
 308		struct inode *inode = file_inode(vma->vm_file);
 309		dev = inode->i_sb->s_dev;
 310		ino = inode->i_ino;
 311		pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
 312	}
 313
 
 314	start = vma->vm_start;
 
 
 315	end = vma->vm_end;
 316	show_vma_header_prefix(m, start, end, flags, pgoff, dev, ino);
 
 
 
 
 
 
 
 
 
 
 
 
 317
 318	/*
 319	 * Print the dentry name for named mappings, and a
 320	 * special [heap] marker for the heap:
 321	 */
 322	if (file) {
 323		seq_pad(m, ' ');
 324		seq_file_path(m, file, "\n");
 325		goto done;
 326	}
 327
 328	if (vma->vm_ops && vma->vm_ops->name) {
 329		name = vma->vm_ops->name(vma);
 330		if (name)
 331			goto done;
 332	}
 333
 334	name = arch_vma_name(vma);
 335	if (!name) {
 
 
 336		if (!mm) {
 337			name = "[vdso]";
 338			goto done;
 339		}
 340
 341		if (vma->vm_start <= mm->brk &&
 342		    vma->vm_end >= mm->start_brk) {
 343			name = "[heap]";
 344			goto done;
 345		}
 346
 347		if (is_stack(vma))
 348			name = "[stack]";
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 349	}
 350
 351done:
 352	if (name) {
 353		seq_pad(m, ' ');
 354		seq_puts(m, name);
 355	}
 356	seq_putc(m, '\n');
 357}
 358
 359static int show_map(struct seq_file *m, void *v, int is_pid)
 360{
 361	show_map_vma(m, v, is_pid);
 362	m_cache_vma(m, v);
 
 
 
 
 
 
 
 363	return 0;
 364}
 365
 366static int show_pid_map(struct seq_file *m, void *v)
 367{
 368	return show_map(m, v, 1);
 369}
 370
 371static int show_tid_map(struct seq_file *m, void *v)
 372{
 373	return show_map(m, v, 0);
 374}
 375
 376static const struct seq_operations proc_pid_maps_op = {
 377	.start	= m_start,
 378	.next	= m_next,
 379	.stop	= m_stop,
 380	.show	= show_pid_map
 381};
 382
 383static const struct seq_operations proc_tid_maps_op = {
 384	.start	= m_start,
 385	.next	= m_next,
 386	.stop	= m_stop,
 387	.show	= show_tid_map
 388};
 389
 390static int pid_maps_open(struct inode *inode, struct file *file)
 391{
 392	return do_maps_open(inode, file, &proc_pid_maps_op);
 393}
 394
 395static int tid_maps_open(struct inode *inode, struct file *file)
 396{
 397	return do_maps_open(inode, file, &proc_tid_maps_op);
 398}
 399
 400const struct file_operations proc_pid_maps_operations = {
 401	.open		= pid_maps_open,
 402	.read		= seq_read,
 403	.llseek		= seq_lseek,
 404	.release	= proc_map_release,
 405};
 406
 407const struct file_operations proc_tid_maps_operations = {
 408	.open		= tid_maps_open,
 409	.read		= seq_read,
 410	.llseek		= seq_lseek,
 411	.release	= proc_map_release,
 412};
 413
 414/*
 415 * Proportional Set Size(PSS): my share of RSS.
 416 *
 417 * PSS of a process is the count of pages it has in memory, where each
 418 * page is divided by the number of processes sharing it.  So if a
 419 * process has 1000 pages all to itself, and 1000 shared with one other
 420 * process, its PSS will be 1500.
 421 *
 422 * To keep (accumulated) division errors low, we adopt a 64bit
 423 * fixed-point pss counter to minimize division errors. So (pss >>
 424 * PSS_SHIFT) would be the real byte count.
 425 *
 426 * A shift of 12 before division means (assuming 4K page size):
 427 * 	- 1M 3-user-pages add up to 8KB errors;
 428 * 	- supports mapcount up to 2^24, or 16M;
 429 * 	- supports PSS up to 2^52 bytes, or 4PB.
 430 */
 431#define PSS_SHIFT 12
 432
 433#ifdef CONFIG_PROC_PAGE_MONITOR
 434struct mem_size_stats {
 435	bool first;
 436	unsigned long resident;
 437	unsigned long shared_clean;
 438	unsigned long shared_dirty;
 439	unsigned long private_clean;
 440	unsigned long private_dirty;
 441	unsigned long referenced;
 442	unsigned long anonymous;
 443	unsigned long lazyfree;
 444	unsigned long anonymous_thp;
 445	unsigned long shmem_thp;
 446	unsigned long swap;
 447	unsigned long shared_hugetlb;
 448	unsigned long private_hugetlb;
 449	unsigned long first_vma_start;
 450	u64 pss;
 451	u64 pss_locked;
 452	u64 swap_pss;
 453	bool check_shmem_swap;
 454};
 455
 456static void smaps_account(struct mem_size_stats *mss, struct page *page,
 457		bool compound, bool young, bool dirty)
 458{
 459	int i, nr = compound ? 1 << compound_order(page) : 1;
 460	unsigned long size = nr * PAGE_SIZE;
 461
 462	if (PageAnon(page)) {
 463		mss->anonymous += size;
 464		if (!PageSwapBacked(page) && !dirty && !PageDirty(page))
 465			mss->lazyfree += size;
 466	}
 467
 468	mss->resident += size;
 469	/* Accumulate the size in pages that have been accessed. */
 470	if (young || page_is_young(page) || PageReferenced(page))
 471		mss->referenced += size;
 472
 473	/*
 474	 * page_count(page) == 1 guarantees the page is mapped exactly once.
 475	 * If any subpage of the compound page mapped with PTE it would elevate
 476	 * page_count().
 477	 */
 478	if (page_count(page) == 1) {
 479		if (dirty || PageDirty(page))
 480			mss->private_dirty += size;
 481		else
 482			mss->private_clean += size;
 483		mss->pss += (u64)size << PSS_SHIFT;
 484		return;
 485	}
 486
 487	for (i = 0; i < nr; i++, page++) {
 488		int mapcount = page_mapcount(page);
 489
 490		if (mapcount >= 2) {
 491			if (dirty || PageDirty(page))
 492				mss->shared_dirty += PAGE_SIZE;
 493			else
 494				mss->shared_clean += PAGE_SIZE;
 495			mss->pss += (PAGE_SIZE << PSS_SHIFT) / mapcount;
 496		} else {
 497			if (dirty || PageDirty(page))
 498				mss->private_dirty += PAGE_SIZE;
 499			else
 500				mss->private_clean += PAGE_SIZE;
 501			mss->pss += PAGE_SIZE << PSS_SHIFT;
 502		}
 503	}
 504}
 505
 506#ifdef CONFIG_SHMEM
 507static int smaps_pte_hole(unsigned long addr, unsigned long end,
 508		struct mm_walk *walk)
 509{
 510	struct mem_size_stats *mss = walk->private;
 511
 512	mss->swap += shmem_partial_swap_usage(
 513			walk->vma->vm_file->f_mapping, addr, end);
 514
 515	return 0;
 516}
 517#endif
 518
 519static void smaps_pte_entry(pte_t *pte, unsigned long addr,
 520		struct mm_walk *walk)
 521{
 522	struct mem_size_stats *mss = walk->private;
 523	struct vm_area_struct *vma = walk->vma;
 524	struct page *page = NULL;
 
 525
 526	if (pte_present(*pte)) {
 527		page = vm_normal_page(vma, addr, *pte);
 528	} else if (is_swap_pte(*pte)) {
 529		swp_entry_t swpent = pte_to_swp_entry(*pte);
 530
 531		if (!non_swap_entry(swpent)) {
 532			int mapcount;
 533
 534			mss->swap += PAGE_SIZE;
 535			mapcount = swp_swapcount(swpent);
 536			if (mapcount >= 2) {
 537				u64 pss_delta = (u64)PAGE_SIZE << PSS_SHIFT;
 538
 539				do_div(pss_delta, mapcount);
 540				mss->swap_pss += pss_delta;
 541			} else {
 542				mss->swap_pss += (u64)PAGE_SIZE << PSS_SHIFT;
 543			}
 544		} else if (is_migration_entry(swpent))
 545			page = migration_entry_to_page(swpent);
 546		else if (is_device_private_entry(swpent))
 547			page = device_private_entry_to_page(swpent);
 548	} else if (unlikely(IS_ENABLED(CONFIG_SHMEM) && mss->check_shmem_swap
 549							&& pte_none(*pte))) {
 550		page = find_get_entry(vma->vm_file->f_mapping,
 551						linear_page_index(vma, addr));
 552		if (!page)
 553			return;
 554
 555		if (radix_tree_exceptional_entry(page))
 556			mss->swap += PAGE_SIZE;
 557		else
 558			put_page(page);
 559
 560		return;
 561	}
 562
 563	if (!page)
 564		return;
 565
 566	smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte));
 567}
 568
 569#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 570static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
 571		struct mm_walk *walk)
 572{
 573	struct mem_size_stats *mss = walk->private;
 574	struct vm_area_struct *vma = walk->vma;
 575	struct page *page;
 576
 577	/* FOLL_DUMP will return -EFAULT on huge zero page */
 578	page = follow_trans_huge_pmd(vma, addr, pmd, FOLL_DUMP);
 579	if (IS_ERR_OR_NULL(page))
 580		return;
 581	if (PageAnon(page))
 582		mss->anonymous_thp += HPAGE_PMD_SIZE;
 583	else if (PageSwapBacked(page))
 584		mss->shmem_thp += HPAGE_PMD_SIZE;
 585	else if (is_zone_device_page(page))
 586		/* pass */;
 587	else
 588		VM_BUG_ON_PAGE(1, page);
 589	smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd));
 590}
 591#else
 592static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
 593		struct mm_walk *walk)
 594{
 595}
 596#endif
 597
 598static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
 599			   struct mm_walk *walk)
 600{
 601	struct vm_area_struct *vma = walk->vma;
 
 602	pte_t *pte;
 603	spinlock_t *ptl;
 604
 605	ptl = pmd_trans_huge_lock(pmd, vma);
 606	if (ptl) {
 607		if (pmd_present(*pmd))
 608			smaps_pmd_entry(pmd, addr, walk);
 609		spin_unlock(ptl);
 610		goto out;
 
 611	}
 612
 613	if (pmd_trans_unstable(pmd))
 614		goto out;
 615	/*
 616	 * The mmap_sem held all the way back in m_start() is what
 617	 * keeps khugepaged out of here and from collapsing things
 618	 * in here.
 619	 */
 620	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
 621	for (; addr != end; pte++, addr += PAGE_SIZE)
 622		smaps_pte_entry(pte, addr, walk);
 623	pte_unmap_unlock(pte - 1, ptl);
 624out:
 625	cond_resched();
 626	return 0;
 627}
 628
 629static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma)
 630{
 631	/*
 632	 * Don't forget to update Documentation/ on changes.
 633	 */
 634	static const char mnemonics[BITS_PER_LONG][2] = {
 635		/*
 636		 * In case if we meet a flag we don't know about.
 637		 */
 638		[0 ... (BITS_PER_LONG-1)] = "??",
 639
 640		[ilog2(VM_READ)]	= "rd",
 641		[ilog2(VM_WRITE)]	= "wr",
 642		[ilog2(VM_EXEC)]	= "ex",
 643		[ilog2(VM_SHARED)]	= "sh",
 644		[ilog2(VM_MAYREAD)]	= "mr",
 645		[ilog2(VM_MAYWRITE)]	= "mw",
 646		[ilog2(VM_MAYEXEC)]	= "me",
 647		[ilog2(VM_MAYSHARE)]	= "ms",
 648		[ilog2(VM_GROWSDOWN)]	= "gd",
 649		[ilog2(VM_PFNMAP)]	= "pf",
 650		[ilog2(VM_DENYWRITE)]	= "dw",
 651#ifdef CONFIG_X86_INTEL_MPX
 652		[ilog2(VM_MPX)]		= "mp",
 653#endif
 654		[ilog2(VM_LOCKED)]	= "lo",
 655		[ilog2(VM_IO)]		= "io",
 656		[ilog2(VM_SEQ_READ)]	= "sr",
 657		[ilog2(VM_RAND_READ)]	= "rr",
 658		[ilog2(VM_DONTCOPY)]	= "dc",
 659		[ilog2(VM_DONTEXPAND)]	= "de",
 660		[ilog2(VM_ACCOUNT)]	= "ac",
 661		[ilog2(VM_NORESERVE)]	= "nr",
 662		[ilog2(VM_HUGETLB)]	= "ht",
 663		[ilog2(VM_SYNC)]	= "sf",
 664		[ilog2(VM_ARCH_1)]	= "ar",
 665		[ilog2(VM_WIPEONFORK)]	= "wf",
 666		[ilog2(VM_DONTDUMP)]	= "dd",
 667#ifdef CONFIG_MEM_SOFT_DIRTY
 668		[ilog2(VM_SOFTDIRTY)]	= "sd",
 669#endif
 670		[ilog2(VM_MIXEDMAP)]	= "mm",
 671		[ilog2(VM_HUGEPAGE)]	= "hg",
 672		[ilog2(VM_NOHUGEPAGE)]	= "nh",
 673		[ilog2(VM_MERGEABLE)]	= "mg",
 674		[ilog2(VM_UFFD_MISSING)]= "um",
 675		[ilog2(VM_UFFD_WP)]	= "uw",
 676#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
 677		/* These come out via ProtectionKey: */
 678		[ilog2(VM_PKEY_BIT0)]	= "",
 679		[ilog2(VM_PKEY_BIT1)]	= "",
 680		[ilog2(VM_PKEY_BIT2)]	= "",
 681		[ilog2(VM_PKEY_BIT3)]	= "",
 682#endif
 683	};
 684	size_t i;
 685
 686	seq_puts(m, "VmFlags: ");
 687	for (i = 0; i < BITS_PER_LONG; i++) {
 688		if (!mnemonics[i][0])
 689			continue;
 690		if (vma->vm_flags & (1UL << i)) {
 691			seq_putc(m, mnemonics[i][0]);
 692			seq_putc(m, mnemonics[i][1]);
 693			seq_putc(m, ' ');
 694		}
 695	}
 696	seq_putc(m, '\n');
 697}
 698
 699#ifdef CONFIG_HUGETLB_PAGE
 700static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask,
 701				 unsigned long addr, unsigned long end,
 702				 struct mm_walk *walk)
 703{
 704	struct mem_size_stats *mss = walk->private;
 705	struct vm_area_struct *vma = walk->vma;
 706	struct page *page = NULL;
 707
 708	if (pte_present(*pte)) {
 709		page = vm_normal_page(vma, addr, *pte);
 710	} else if (is_swap_pte(*pte)) {
 711		swp_entry_t swpent = pte_to_swp_entry(*pte);
 712
 713		if (is_migration_entry(swpent))
 714			page = migration_entry_to_page(swpent);
 715		else if (is_device_private_entry(swpent))
 716			page = device_private_entry_to_page(swpent);
 717	}
 718	if (page) {
 719		int mapcount = page_mapcount(page);
 720
 721		if (mapcount >= 2)
 722			mss->shared_hugetlb += huge_page_size(hstate_vma(vma));
 723		else
 724			mss->private_hugetlb += huge_page_size(hstate_vma(vma));
 725	}
 726	return 0;
 727}
 728#endif /* HUGETLB_PAGE */
 729
 730void __weak arch_show_smap(struct seq_file *m, struct vm_area_struct *vma)
 731{
 732}
 733
 734#define SEQ_PUT_DEC(str, val) \
 735		seq_put_decimal_ull_width(m, str, (val) >> 10, 8)
 736static int show_smap(struct seq_file *m, void *v, int is_pid)
 737{
 738	struct proc_maps_private *priv = m->private;
 
 739	struct vm_area_struct *vma = v;
 740	struct mem_size_stats mss_stack;
 741	struct mem_size_stats *mss;
 742	struct mm_walk smaps_walk = {
 743		.pmd_entry = smaps_pte_range,
 744#ifdef CONFIG_HUGETLB_PAGE
 745		.hugetlb_entry = smaps_hugetlb_range,
 746#endif
 747		.mm = vma->vm_mm,
 
 748	};
 749	int ret = 0;
 750	bool rollup_mode;
 751	bool last_vma;
 752
 753	if (priv->rollup) {
 754		rollup_mode = true;
 755		mss = priv->rollup;
 756		if (mss->first) {
 757			mss->first_vma_start = vma->vm_start;
 758			mss->first = false;
 759		}
 760		last_vma = !m_next_vma(priv, vma);
 761	} else {
 762		rollup_mode = false;
 763		memset(&mss_stack, 0, sizeof(mss_stack));
 764		mss = &mss_stack;
 765	}
 766
 767	smaps_walk.private = mss;
 768
 769#ifdef CONFIG_SHMEM
 770	if (vma->vm_file && shmem_mapping(vma->vm_file->f_mapping)) {
 771		/*
 772		 * For shared or readonly shmem mappings we know that all
 773		 * swapped out pages belong to the shmem object, and we can
 774		 * obtain the swap value much more efficiently. For private
 775		 * writable mappings, we might have COW pages that are
 776		 * not affected by the parent swapped out pages of the shmem
 777		 * object, so we have to distinguish them during the page walk.
 778		 * Unless we know that the shmem object (or the part mapped by
 779		 * our VMA) has no swapped out pages at all.
 780		 */
 781		unsigned long shmem_swapped = shmem_swap_usage(vma);
 782
 783		if (!shmem_swapped || (vma->vm_flags & VM_SHARED) ||
 784					!(vma->vm_flags & VM_WRITE)) {
 785			mss->swap = shmem_swapped;
 786		} else {
 787			mss->check_shmem_swap = true;
 788			smaps_walk.pte_hole = smaps_pte_hole;
 789		}
 790	}
 791#endif
 792
 
 
 793	/* mmap_sem is held in m_start */
 794	walk_page_vma(vma, &smaps_walk);
 795	if (vma->vm_flags & VM_LOCKED)
 796		mss->pss_locked += mss->pss;
 797
 798	if (!rollup_mode) {
 799		show_map_vma(m, vma, is_pid);
 800	} else if (last_vma) {
 801		show_vma_header_prefix(
 802			m, mss->first_vma_start, vma->vm_end, 0, 0, 0, 0);
 803		seq_pad(m, ' ');
 804		seq_puts(m, "[rollup]\n");
 805	} else {
 806		ret = SEQ_SKIP;
 807	}
 808
 809	if (!rollup_mode) {
 810		SEQ_PUT_DEC("Size:           ", vma->vm_end - vma->vm_start);
 811		SEQ_PUT_DEC(" kB\nKernelPageSize: ", vma_kernel_pagesize(vma));
 812		SEQ_PUT_DEC(" kB\nMMUPageSize:    ", vma_mmu_pagesize(vma));
 813		seq_puts(m, " kB\n");
 814	}
 815
 816	if (!rollup_mode || last_vma) {
 817		SEQ_PUT_DEC("Rss:            ", mss->resident);
 818		SEQ_PUT_DEC(" kB\nPss:            ", mss->pss >> PSS_SHIFT);
 819		SEQ_PUT_DEC(" kB\nShared_Clean:   ", mss->shared_clean);
 820		SEQ_PUT_DEC(" kB\nShared_Dirty:   ", mss->shared_dirty);
 821		SEQ_PUT_DEC(" kB\nPrivate_Clean:  ", mss->private_clean);
 822		SEQ_PUT_DEC(" kB\nPrivate_Dirty:  ", mss->private_dirty);
 823		SEQ_PUT_DEC(" kB\nReferenced:     ", mss->referenced);
 824		SEQ_PUT_DEC(" kB\nAnonymous:      ", mss->anonymous);
 825		SEQ_PUT_DEC(" kB\nLazyFree:       ", mss->lazyfree);
 826		SEQ_PUT_DEC(" kB\nAnonHugePages:  ", mss->anonymous_thp);
 827		SEQ_PUT_DEC(" kB\nShmemPmdMapped: ", mss->shmem_thp);
 828		SEQ_PUT_DEC(" kB\nShared_Hugetlb: ", mss->shared_hugetlb);
 829		seq_put_decimal_ull_width(m, " kB\nPrivate_Hugetlb: ",
 830					  mss->private_hugetlb >> 10, 7);
 831		SEQ_PUT_DEC(" kB\nSwap:           ", mss->swap);
 832		SEQ_PUT_DEC(" kB\nSwapPss:        ",
 833						mss->swap_pss >> PSS_SHIFT);
 834		SEQ_PUT_DEC(" kB\nLocked:         ", mss->pss >> PSS_SHIFT);
 835		seq_puts(m, " kB\n");
 836	}
 837	if (!rollup_mode) {
 838		arch_show_smap(m, vma);
 839		show_smap_vma_flags(m, vma);
 840	}
 841	m_cache_vma(m, vma);
 842	return ret;
 
 
 
 
 
 
 
 
 
 843}
 844#undef SEQ_PUT_DEC
 845
 846static int show_pid_smap(struct seq_file *m, void *v)
 847{
 848	return show_smap(m, v, 1);
 849}
 850
 851static int show_tid_smap(struct seq_file *m, void *v)
 852{
 853	return show_smap(m, v, 0);
 854}
 855
 856static const struct seq_operations proc_pid_smaps_op = {
 857	.start	= m_start,
 858	.next	= m_next,
 859	.stop	= m_stop,
 860	.show	= show_pid_smap
 861};
 862
 863static const struct seq_operations proc_tid_smaps_op = {
 864	.start	= m_start,
 865	.next	= m_next,
 866	.stop	= m_stop,
 867	.show	= show_tid_smap
 868};
 869
 870static int pid_smaps_open(struct inode *inode, struct file *file)
 871{
 872	return do_maps_open(inode, file, &proc_pid_smaps_op);
 873}
 874
 875static int pid_smaps_rollup_open(struct inode *inode, struct file *file)
 876{
 877	struct seq_file *seq;
 878	struct proc_maps_private *priv;
 879	int ret = do_maps_open(inode, file, &proc_pid_smaps_op);
 880
 881	if (ret < 0)
 882		return ret;
 883	seq = file->private_data;
 884	priv = seq->private;
 885	priv->rollup = kzalloc(sizeof(*priv->rollup), GFP_KERNEL);
 886	if (!priv->rollup) {
 887		proc_map_release(inode, file);
 888		return -ENOMEM;
 889	}
 890	priv->rollup->first = true;
 891	return 0;
 892}
 893
 894static int tid_smaps_open(struct inode *inode, struct file *file)
 895{
 896	return do_maps_open(inode, file, &proc_tid_smaps_op);
 897}
 898
 899const struct file_operations proc_pid_smaps_operations = {
 900	.open		= pid_smaps_open,
 901	.read		= seq_read,
 902	.llseek		= seq_lseek,
 903	.release	= proc_map_release,
 904};
 905
 906const struct file_operations proc_pid_smaps_rollup_operations = {
 907	.open		= pid_smaps_rollup_open,
 908	.read		= seq_read,
 909	.llseek		= seq_lseek,
 910	.release	= proc_map_release,
 911};
 912
 913const struct file_operations proc_tid_smaps_operations = {
 914	.open		= tid_smaps_open,
 915	.read		= seq_read,
 916	.llseek		= seq_lseek,
 917	.release	= proc_map_release,
 918};
 919
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 920enum clear_refs_types {
 921	CLEAR_REFS_ALL = 1,
 922	CLEAR_REFS_ANON,
 923	CLEAR_REFS_MAPPED,
 924	CLEAR_REFS_SOFT_DIRTY,
 925	CLEAR_REFS_MM_HIWATER_RSS,
 926	CLEAR_REFS_LAST,
 927};
 928
 929struct clear_refs_private {
 
 930	enum clear_refs_types type;
 931};
 932
 933#ifdef CONFIG_MEM_SOFT_DIRTY
 934static inline void clear_soft_dirty(struct vm_area_struct *vma,
 935		unsigned long addr, pte_t *pte)
 936{
 
 937	/*
 938	 * The soft-dirty tracker uses #PF-s to catch writes
 939	 * to pages, so write-protect the pte as well. See the
 940	 * Documentation/vm/soft-dirty.txt for full description
 941	 * of how soft-dirty works.
 942	 */
 943	pte_t ptent = *pte;
 944
 945	if (pte_present(ptent)) {
 946		ptent = ptep_modify_prot_start(vma->vm_mm, addr, pte);
 947		ptent = pte_wrprotect(ptent);
 948		ptent = pte_clear_soft_dirty(ptent);
 949		ptep_modify_prot_commit(vma->vm_mm, addr, pte, ptent);
 950	} else if (is_swap_pte(ptent)) {
 951		ptent = pte_swp_clear_soft_dirty(ptent);
 952		set_pte_at(vma->vm_mm, addr, pte, ptent);
 
 953	}
 954}
 955#else
 956static inline void clear_soft_dirty(struct vm_area_struct *vma,
 957		unsigned long addr, pte_t *pte)
 958{
 959}
 960#endif
 961
 962#if defined(CONFIG_MEM_SOFT_DIRTY) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
 963static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
 964		unsigned long addr, pmd_t *pmdp)
 965{
 966	pmd_t old, pmd = *pmdp;
 967
 968	if (pmd_present(pmd)) {
 969		/* See comment in change_huge_pmd() */
 970		old = pmdp_invalidate(vma, addr, pmdp);
 971		if (pmd_dirty(old))
 972			pmd = pmd_mkdirty(pmd);
 973		if (pmd_young(old))
 974			pmd = pmd_mkyoung(pmd);
 975
 976		pmd = pmd_wrprotect(pmd);
 977		pmd = pmd_clear_soft_dirty(pmd);
 978
 979		set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
 980	} else if (is_migration_entry(pmd_to_swp_entry(pmd))) {
 981		pmd = pmd_swp_clear_soft_dirty(pmd);
 982		set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
 983	}
 984}
 985#else
 986static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
 987		unsigned long addr, pmd_t *pmdp)
 988{
 989}
 990#endif
 
 991
 992static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
 993				unsigned long end, struct mm_walk *walk)
 994{
 995	struct clear_refs_private *cp = walk->private;
 996	struct vm_area_struct *vma = walk->vma;
 997	pte_t *pte, ptent;
 998	spinlock_t *ptl;
 999	struct page *page;
1000
1001	ptl = pmd_trans_huge_lock(pmd, vma);
1002	if (ptl) {
1003		if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
1004			clear_soft_dirty_pmd(vma, addr, pmd);
1005			goto out;
1006		}
1007
1008		if (!pmd_present(*pmd))
1009			goto out;
1010
1011		page = pmd_page(*pmd);
1012
1013		/* Clear accessed and referenced bits. */
1014		pmdp_test_and_clear_young(vma, addr, pmd);
1015		test_and_clear_page_young(page);
1016		ClearPageReferenced(page);
1017out:
1018		spin_unlock(ptl);
1019		return 0;
1020	}
1021
1022	if (pmd_trans_unstable(pmd))
1023		return 0;
1024
1025	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
1026	for (; addr != end; pte++, addr += PAGE_SIZE) {
1027		ptent = *pte;
1028
1029		if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
1030			clear_soft_dirty(vma, addr, pte);
1031			continue;
1032		}
1033
1034		if (!pte_present(ptent))
1035			continue;
1036
1037		page = vm_normal_page(vma, addr, ptent);
1038		if (!page)
1039			continue;
1040
1041		/* Clear accessed and referenced bits. */
1042		ptep_test_and_clear_young(vma, addr, pte);
1043		test_and_clear_page_young(page);
1044		ClearPageReferenced(page);
1045	}
1046	pte_unmap_unlock(pte - 1, ptl);
1047	cond_resched();
1048	return 0;
1049}
1050
1051static int clear_refs_test_walk(unsigned long start, unsigned long end,
1052				struct mm_walk *walk)
1053{
1054	struct clear_refs_private *cp = walk->private;
1055	struct vm_area_struct *vma = walk->vma;
1056
1057	if (vma->vm_flags & VM_PFNMAP)
1058		return 1;
1059
1060	/*
1061	 * Writing 1 to /proc/pid/clear_refs affects all pages.
1062	 * Writing 2 to /proc/pid/clear_refs only affects anonymous pages.
1063	 * Writing 3 to /proc/pid/clear_refs only affects file mapped pages.
1064	 * Writing 4 to /proc/pid/clear_refs affects all pages.
1065	 */
1066	if (cp->type == CLEAR_REFS_ANON && vma->vm_file)
1067		return 1;
1068	if (cp->type == CLEAR_REFS_MAPPED && !vma->vm_file)
1069		return 1;
1070	return 0;
1071}
1072
1073static ssize_t clear_refs_write(struct file *file, const char __user *buf,
1074				size_t count, loff_t *ppos)
1075{
1076	struct task_struct *task;
1077	char buffer[PROC_NUMBUF];
1078	struct mm_struct *mm;
1079	struct vm_area_struct *vma;
1080	enum clear_refs_types type;
1081	struct mmu_gather tlb;
1082	int itype;
1083	int rv;
1084
1085	memset(buffer, 0, sizeof(buffer));
1086	if (count > sizeof(buffer) - 1)
1087		count = sizeof(buffer) - 1;
1088	if (copy_from_user(buffer, buf, count))
1089		return -EFAULT;
1090	rv = kstrtoint(strstrip(buffer), 10, &itype);
1091	if (rv < 0)
1092		return rv;
1093	type = (enum clear_refs_types)itype;
1094	if (type < CLEAR_REFS_ALL || type >= CLEAR_REFS_LAST)
1095		return -EINVAL;
1096
 
 
 
 
 
 
1097	task = get_proc_task(file_inode(file));
1098	if (!task)
1099		return -ESRCH;
1100	mm = get_task_mm(task);
1101	if (mm) {
1102		struct clear_refs_private cp = {
1103			.type = type,
1104		};
1105		struct mm_walk clear_refs_walk = {
1106			.pmd_entry = clear_refs_pte_range,
1107			.test_walk = clear_refs_test_walk,
1108			.mm = mm,
1109			.private = &cp,
1110		};
1111
1112		if (type == CLEAR_REFS_MM_HIWATER_RSS) {
1113			if (down_write_killable(&mm->mmap_sem)) {
1114				count = -EINTR;
1115				goto out_mm;
1116			}
1117
1118			/*
1119			 * Writing 5 to /proc/pid/clear_refs resets the peak
1120			 * resident set size to this mm's current rss value.
1121			 */
1122			reset_mm_hiwater_rss(mm);
1123			up_write(&mm->mmap_sem);
1124			goto out_mm;
1125		}
1126
1127		down_read(&mm->mmap_sem);
1128		tlb_gather_mmu(&tlb, mm, 0, -1);
1129		if (type == CLEAR_REFS_SOFT_DIRTY) {
1130			for (vma = mm->mmap; vma; vma = vma->vm_next) {
1131				if (!(vma->vm_flags & VM_SOFTDIRTY))
1132					continue;
1133				up_read(&mm->mmap_sem);
1134				if (down_write_killable(&mm->mmap_sem)) {
1135					count = -EINTR;
1136					goto out_mm;
1137				}
1138				for (vma = mm->mmap; vma; vma = vma->vm_next) {
1139					vma->vm_flags &= ~VM_SOFTDIRTY;
1140					vma_set_page_prot(vma);
1141				}
1142				downgrade_write(&mm->mmap_sem);
1143				break;
1144			}
1145			mmu_notifier_invalidate_range_start(mm, 0, -1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1146		}
1147		walk_page_range(0, mm->highest_vm_end, &clear_refs_walk);
1148		if (type == CLEAR_REFS_SOFT_DIRTY)
1149			mmu_notifier_invalidate_range_end(mm, 0, -1);
1150		tlb_finish_mmu(&tlb, 0, -1);
1151		up_read(&mm->mmap_sem);
1152out_mm:
1153		mmput(mm);
1154	}
1155	put_task_struct(task);
1156
1157	return count;
1158}
1159
1160const struct file_operations proc_clear_refs_operations = {
1161	.write		= clear_refs_write,
1162	.llseek		= noop_llseek,
1163};
1164
1165typedef struct {
1166	u64 pme;
1167} pagemap_entry_t;
1168
1169struct pagemapread {
1170	int pos, len;		/* units: PM_ENTRY_BYTES, not bytes */
1171	pagemap_entry_t *buffer;
1172	bool show_pfn;
1173};
1174
1175#define PAGEMAP_WALK_SIZE	(PMD_SIZE)
1176#define PAGEMAP_WALK_MASK	(PMD_MASK)
1177
1178#define PM_ENTRY_BYTES		sizeof(pagemap_entry_t)
1179#define PM_PFRAME_BITS		55
1180#define PM_PFRAME_MASK		GENMASK_ULL(PM_PFRAME_BITS - 1, 0)
1181#define PM_SOFT_DIRTY		BIT_ULL(55)
1182#define PM_MMAP_EXCLUSIVE	BIT_ULL(56)
1183#define PM_FILE			BIT_ULL(61)
1184#define PM_SWAP			BIT_ULL(62)
1185#define PM_PRESENT		BIT_ULL(63)
1186
 
 
 
 
 
 
 
 
 
 
1187#define PM_END_OF_BUFFER    1
1188
1189static inline pagemap_entry_t make_pme(u64 frame, u64 flags)
1190{
1191	return (pagemap_entry_t) { .pme = (frame & PM_PFRAME_MASK) | flags };
1192}
1193
1194static int add_to_pagemap(unsigned long addr, pagemap_entry_t *pme,
1195			  struct pagemapread *pm)
1196{
1197	pm->buffer[pm->pos++] = *pme;
1198	if (pm->pos >= pm->len)
1199		return PM_END_OF_BUFFER;
1200	return 0;
1201}
1202
1203static int pagemap_pte_hole(unsigned long start, unsigned long end,
1204				struct mm_walk *walk)
1205{
1206	struct pagemapread *pm = walk->private;
1207	unsigned long addr = start;
1208	int err = 0;
 
1209
1210	while (addr < end) {
1211		struct vm_area_struct *vma = find_vma(walk->mm, addr);
1212		pagemap_entry_t pme = make_pme(0, 0);
1213		/* End of address space hole, which we mark as non-present. */
1214		unsigned long hole_end;
1215
1216		if (vma)
1217			hole_end = min(end, vma->vm_start);
1218		else
1219			hole_end = end;
1220
1221		for (; addr < hole_end; addr += PAGE_SIZE) {
1222			err = add_to_pagemap(addr, &pme, pm);
1223			if (err)
1224				goto out;
1225		}
1226
1227		if (!vma)
1228			break;
1229
1230		/* Addresses in the VMA. */
1231		if (vma->vm_flags & VM_SOFTDIRTY)
1232			pme = make_pme(0, PM_SOFT_DIRTY);
1233		for (; addr < min(end, vma->vm_end); addr += PAGE_SIZE) {
1234			err = add_to_pagemap(addr, &pme, pm);
1235			if (err)
1236				goto out;
1237		}
1238	}
1239out:
1240	return err;
1241}
1242
1243static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm,
1244		struct vm_area_struct *vma, unsigned long addr, pte_t pte)
1245{
1246	u64 frame = 0, flags = 0;
1247	struct page *page = NULL;
 
1248
1249	if (pte_present(pte)) {
1250		if (pm->show_pfn)
1251			frame = pte_pfn(pte);
1252		flags |= PM_PRESENT;
1253		page = _vm_normal_page(vma, addr, pte, true);
1254		if (pte_soft_dirty(pte))
1255			flags |= PM_SOFT_DIRTY;
1256	} else if (is_swap_pte(pte)) {
1257		swp_entry_t entry;
1258		if (pte_swp_soft_dirty(pte))
1259			flags |= PM_SOFT_DIRTY;
1260		entry = pte_to_swp_entry(pte);
1261		frame = swp_type(entry) |
1262			(swp_offset(entry) << MAX_SWAPFILES_SHIFT);
1263		flags |= PM_SWAP;
1264		if (is_migration_entry(entry))
1265			page = migration_entry_to_page(entry);
1266
1267		if (is_device_private_entry(entry))
1268			page = device_private_entry_to_page(entry);
 
 
1269	}
1270
1271	if (page && !PageAnon(page))
1272		flags |= PM_FILE;
1273	if (page && page_mapcount(page) == 1)
1274		flags |= PM_MMAP_EXCLUSIVE;
1275	if (vma->vm_flags & VM_SOFTDIRTY)
1276		flags |= PM_SOFT_DIRTY;
1277
1278	return make_pme(frame, flags);
1279}
1280
1281static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1282			     struct mm_walk *walk)
1283{
1284	struct vm_area_struct *vma = walk->vma;
1285	struct pagemapread *pm = walk->private;
1286	spinlock_t *ptl;
1287	pte_t *pte, *orig_pte;
1288	int err = 0;
 
1289
1290#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1291	ptl = pmd_trans_huge_lock(pmdp, vma);
1292	if (ptl) {
1293		u64 flags = 0, frame = 0;
1294		pmd_t pmd = *pmdp;
1295		struct page *page = NULL;
1296
1297		if (vma->vm_flags & VM_SOFTDIRTY)
1298			flags |= PM_SOFT_DIRTY;
1299
1300		if (pmd_present(pmd)) {
1301			page = pmd_page(pmd);
1302
1303			flags |= PM_PRESENT;
1304			if (pmd_soft_dirty(pmd))
1305				flags |= PM_SOFT_DIRTY;
1306			if (pm->show_pfn)
1307				frame = pmd_pfn(pmd) +
1308					((addr & ~PMD_MASK) >> PAGE_SHIFT);
1309		}
1310#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1311		else if (is_swap_pmd(pmd)) {
1312			swp_entry_t entry = pmd_to_swp_entry(pmd);
1313			unsigned long offset = swp_offset(entry);
1314
1315			offset += (addr & ~PMD_MASK) >> PAGE_SHIFT;
1316			frame = swp_type(entry) |
1317				(offset << MAX_SWAPFILES_SHIFT);
1318			flags |= PM_SWAP;
1319			if (pmd_swp_soft_dirty(pmd))
1320				flags |= PM_SOFT_DIRTY;
1321			VM_BUG_ON(!is_pmd_migration_entry(pmd));
1322			page = migration_entry_to_page(entry);
1323		}
1324#endif
1325
1326		if (page && page_mapcount(page) == 1)
1327			flags |= PM_MMAP_EXCLUSIVE;
1328
1329		for (; addr != end; addr += PAGE_SIZE) {
1330			pagemap_entry_t pme = make_pme(frame, flags);
1331
 
 
 
1332			err = add_to_pagemap(addr, &pme, pm);
1333			if (err)
1334				break;
1335			if (pm->show_pfn && (flags & PM_PRESENT))
1336				frame++;
1337			else if (flags & PM_SWAP)
1338				frame += (1 << MAX_SWAPFILES_SHIFT);
1339		}
1340		spin_unlock(ptl);
1341		return err;
1342	}
1343
1344	if (pmd_trans_unstable(pmdp))
1345		return 0;
1346#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 
1347
1348	/*
1349	 * We can assume that @vma always points to a valid one and @end never
1350	 * goes beyond vma->vm_end.
1351	 */
1352	orig_pte = pte = pte_offset_map_lock(walk->mm, pmdp, addr, &ptl);
1353	for (; addr < end; pte++, addr += PAGE_SIZE) {
1354		pagemap_entry_t pme;
 
 
 
1355
1356		pme = pte_to_pagemap_entry(pm, vma, addr, *pte);
 
 
 
 
 
 
 
 
1357		err = add_to_pagemap(addr, &pme, pm);
1358		if (err)
1359			break;
1360	}
1361	pte_unmap_unlock(orig_pte, ptl);
1362
1363	cond_resched();
1364
1365	return err;
1366}
1367
1368#ifdef CONFIG_HUGETLB_PAGE
 
 
 
 
 
 
 
 
 
 
 
 
1369/* This function walks within one hugetlb entry in the single call */
1370static int pagemap_hugetlb_range(pte_t *ptep, unsigned long hmask,
1371				 unsigned long addr, unsigned long end,
1372				 struct mm_walk *walk)
1373{
1374	struct pagemapread *pm = walk->private;
1375	struct vm_area_struct *vma = walk->vma;
1376	u64 flags = 0, frame = 0;
1377	int err = 0;
1378	pte_t pte;
 
1379
1380	if (vma->vm_flags & VM_SOFTDIRTY)
1381		flags |= PM_SOFT_DIRTY;
1382
1383	pte = huge_ptep_get(ptep);
1384	if (pte_present(pte)) {
1385		struct page *page = pte_page(pte);
1386
1387		if (!PageAnon(page))
1388			flags |= PM_FILE;
1389
1390		if (page_mapcount(page) == 1)
1391			flags |= PM_MMAP_EXCLUSIVE;
1392
1393		flags |= PM_PRESENT;
1394		if (pm->show_pfn)
1395			frame = pte_pfn(pte) +
1396				((addr & ~hmask) >> PAGE_SHIFT);
1397	}
1398
1399	for (; addr != end; addr += PAGE_SIZE) {
1400		pagemap_entry_t pme = make_pme(frame, flags);
1401
1402		err = add_to_pagemap(addr, &pme, pm);
1403		if (err)
1404			return err;
1405		if (pm->show_pfn && (flags & PM_PRESENT))
1406			frame++;
1407	}
1408
1409	cond_resched();
1410
1411	return err;
1412}
1413#endif /* HUGETLB_PAGE */
1414
1415/*
1416 * /proc/pid/pagemap - an array mapping virtual pages to pfns
1417 *
1418 * For each page in the address space, this file contains one 64-bit entry
1419 * consisting of the following:
1420 *
1421 * Bits 0-54  page frame number (PFN) if present
1422 * Bits 0-4   swap type if swapped
1423 * Bits 5-54  swap offset if swapped
1424 * Bit  55    pte is soft-dirty (see Documentation/vm/soft-dirty.txt)
1425 * Bit  56    page exclusively mapped
1426 * Bits 57-60 zero
1427 * Bit  61    page is file-page or shared-anon
1428 * Bit  62    page swapped
1429 * Bit  63    page present
1430 *
1431 * If the page is not present but in swap, then the PFN contains an
1432 * encoding of the swap file number and the page's offset into the
1433 * swap. Unmapped pages return a null PFN. This allows determining
1434 * precisely which pages are mapped (or in swap) and comparing mapped
1435 * pages between processes.
1436 *
1437 * Efficient users of this interface will use /proc/pid/maps to
1438 * determine which areas of memory are actually mapped and llseek to
1439 * skip over unmapped regions.
1440 */
1441static ssize_t pagemap_read(struct file *file, char __user *buf,
1442			    size_t count, loff_t *ppos)
1443{
1444	struct mm_struct *mm = file->private_data;
 
1445	struct pagemapread pm;
 
1446	struct mm_walk pagemap_walk = {};
1447	unsigned long src;
1448	unsigned long svpfn;
1449	unsigned long start_vaddr;
1450	unsigned long end_vaddr;
1451	int ret = 0, copied = 0;
1452
1453	if (!mm || !mmget_not_zero(mm))
1454		goto out;
1455
1456	ret = -EINVAL;
1457	/* file position must be aligned */
1458	if ((*ppos % PM_ENTRY_BYTES) || (count % PM_ENTRY_BYTES))
1459		goto out_mm;
1460
1461	ret = 0;
1462	if (!count)
1463		goto out_mm;
1464
1465	/* do not disclose physical addresses: attack vector */
1466	pm.show_pfn = file_ns_capable(file, &init_user_ns, CAP_SYS_ADMIN);
1467
 
1468	pm.len = (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
1469	pm.buffer = kmalloc(pm.len * PM_ENTRY_BYTES, GFP_KERNEL);
1470	ret = -ENOMEM;
1471	if (!pm.buffer)
1472		goto out_mm;
 
 
 
 
 
1473
1474	pagemap_walk.pmd_entry = pagemap_pmd_range;
1475	pagemap_walk.pte_hole = pagemap_pte_hole;
1476#ifdef CONFIG_HUGETLB_PAGE
1477	pagemap_walk.hugetlb_entry = pagemap_hugetlb_range;
1478#endif
1479	pagemap_walk.mm = mm;
1480	pagemap_walk.private = &pm;
1481
1482	src = *ppos;
1483	svpfn = src / PM_ENTRY_BYTES;
1484	start_vaddr = svpfn << PAGE_SHIFT;
1485	end_vaddr = mm->task_size;
1486
1487	/* watch out for wraparound */
1488	if (svpfn > mm->task_size >> PAGE_SHIFT)
1489		start_vaddr = end_vaddr;
1490
1491	/*
1492	 * The odds are that this will stop walking way
1493	 * before end_vaddr, because the length of the
1494	 * user buffer is tracked in "pm", and the walk
1495	 * will stop when we hit the end of the buffer.
1496	 */
1497	ret = 0;
1498	while (count && (start_vaddr < end_vaddr)) {
1499		int len;
1500		unsigned long end;
1501
1502		pm.pos = 0;
1503		end = (start_vaddr + PAGEMAP_WALK_SIZE) & PAGEMAP_WALK_MASK;
1504		/* overflow ? */
1505		if (end < start_vaddr || end > end_vaddr)
1506			end = end_vaddr;
1507		down_read(&mm->mmap_sem);
1508		ret = walk_page_range(start_vaddr, end, &pagemap_walk);
1509		up_read(&mm->mmap_sem);
1510		start_vaddr = end;
1511
1512		len = min(count, PM_ENTRY_BYTES * pm.pos);
1513		if (copy_to_user(buf, pm.buffer, len)) {
1514			ret = -EFAULT;
1515			goto out_free;
1516		}
1517		copied += len;
1518		buf += len;
1519		count -= len;
1520	}
1521	*ppos += copied;
1522	if (!ret || ret == PM_END_OF_BUFFER)
1523		ret = copied;
1524
1525out_free:
1526	kfree(pm.buffer);
1527out_mm:
1528	mmput(mm);
 
 
 
 
1529out:
1530	return ret;
1531}
1532
1533static int pagemap_open(struct inode *inode, struct file *file)
1534{
1535	struct mm_struct *mm;
1536
1537	mm = proc_mem_open(inode, PTRACE_MODE_READ);
1538	if (IS_ERR(mm))
1539		return PTR_ERR(mm);
1540	file->private_data = mm;
1541	return 0;
1542}
1543
1544static int pagemap_release(struct inode *inode, struct file *file)
1545{
1546	struct mm_struct *mm = file->private_data;
1547
1548	if (mm)
1549		mmdrop(mm);
1550	return 0;
1551}
1552
1553const struct file_operations proc_pagemap_operations = {
1554	.llseek		= mem_lseek, /* borrow this */
1555	.read		= pagemap_read,
1556	.open		= pagemap_open,
1557	.release	= pagemap_release,
1558};
1559#endif /* CONFIG_PROC_PAGE_MONITOR */
1560
1561#ifdef CONFIG_NUMA
1562
1563struct numa_maps {
 
1564	unsigned long pages;
1565	unsigned long anon;
1566	unsigned long active;
1567	unsigned long writeback;
1568	unsigned long mapcount_max;
1569	unsigned long dirty;
1570	unsigned long swapcache;
1571	unsigned long node[MAX_NUMNODES];
1572};
1573
1574struct numa_maps_private {
1575	struct proc_maps_private proc_maps;
1576	struct numa_maps md;
1577};
1578
1579static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty,
1580			unsigned long nr_pages)
1581{
1582	int count = page_mapcount(page);
1583
1584	md->pages += nr_pages;
1585	if (pte_dirty || PageDirty(page))
1586		md->dirty += nr_pages;
1587
1588	if (PageSwapCache(page))
1589		md->swapcache += nr_pages;
1590
1591	if (PageActive(page) || PageUnevictable(page))
1592		md->active += nr_pages;
1593
1594	if (PageWriteback(page))
1595		md->writeback += nr_pages;
1596
1597	if (PageAnon(page))
1598		md->anon += nr_pages;
1599
1600	if (count > md->mapcount_max)
1601		md->mapcount_max = count;
1602
1603	md->node[page_to_nid(page)] += nr_pages;
1604}
1605
1606static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma,
1607		unsigned long addr)
1608{
1609	struct page *page;
1610	int nid;
1611
1612	if (!pte_present(pte))
1613		return NULL;
1614
1615	page = vm_normal_page(vma, addr, pte);
1616	if (!page)
1617		return NULL;
1618
1619	if (PageReserved(page))
1620		return NULL;
1621
1622	nid = page_to_nid(page);
1623	if (!node_isset(nid, node_states[N_MEMORY]))
1624		return NULL;
1625
1626	return page;
1627}
1628
1629#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1630static struct page *can_gather_numa_stats_pmd(pmd_t pmd,
1631					      struct vm_area_struct *vma,
1632					      unsigned long addr)
1633{
1634	struct page *page;
1635	int nid;
1636
1637	if (!pmd_present(pmd))
1638		return NULL;
1639
1640	page = vm_normal_page_pmd(vma, addr, pmd);
1641	if (!page)
1642		return NULL;
1643
1644	if (PageReserved(page))
1645		return NULL;
1646
1647	nid = page_to_nid(page);
1648	if (!node_isset(nid, node_states[N_MEMORY]))
1649		return NULL;
1650
1651	return page;
1652}
1653#endif
1654
1655static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
1656		unsigned long end, struct mm_walk *walk)
1657{
1658	struct numa_maps *md = walk->private;
1659	struct vm_area_struct *vma = walk->vma;
1660	spinlock_t *ptl;
1661	pte_t *orig_pte;
1662	pte_t *pte;
1663
1664#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1665	ptl = pmd_trans_huge_lock(pmd, vma);
1666	if (ptl) {
 
1667		struct page *page;
1668
1669		page = can_gather_numa_stats_pmd(*pmd, vma, addr);
1670		if (page)
1671			gather_stats(page, md, pmd_dirty(*pmd),
1672				     HPAGE_PMD_SIZE/PAGE_SIZE);
1673		spin_unlock(ptl);
1674		return 0;
1675	}
1676
1677	if (pmd_trans_unstable(pmd))
1678		return 0;
1679#endif
1680	orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
1681	do {
1682		struct page *page = can_gather_numa_stats(*pte, vma, addr);
1683		if (!page)
1684			continue;
1685		gather_stats(page, md, pte_dirty(*pte), 1);
1686
1687	} while (pte++, addr += PAGE_SIZE, addr != end);
1688	pte_unmap_unlock(orig_pte, ptl);
1689	cond_resched();
1690	return 0;
1691}
1692#ifdef CONFIG_HUGETLB_PAGE
1693static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
1694		unsigned long addr, unsigned long end, struct mm_walk *walk)
1695{
1696	pte_t huge_pte = huge_ptep_get(pte);
1697	struct numa_maps *md;
1698	struct page *page;
1699
1700	if (!pte_present(huge_pte))
1701		return 0;
1702
1703	page = pte_page(huge_pte);
1704	if (!page)
1705		return 0;
1706
1707	md = walk->private;
1708	gather_stats(page, md, pte_dirty(huge_pte), 1);
1709	return 0;
1710}
1711
1712#else
1713static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
1714		unsigned long addr, unsigned long end, struct mm_walk *walk)
1715{
1716	return 0;
1717}
1718#endif
1719
1720/*
1721 * Display pages allocated per node and memory policy via /proc.
1722 */
1723static int show_numa_map(struct seq_file *m, void *v, int is_pid)
1724{
1725	struct numa_maps_private *numa_priv = m->private;
1726	struct proc_maps_private *proc_priv = &numa_priv->proc_maps;
1727	struct vm_area_struct *vma = v;
1728	struct numa_maps *md = &numa_priv->md;
1729	struct file *file = vma->vm_file;
 
1730	struct mm_struct *mm = vma->vm_mm;
1731	struct mm_walk walk = {
1732		.hugetlb_entry = gather_hugetlb_stats,
1733		.pmd_entry = gather_pte_stats,
1734		.private = md,
1735		.mm = mm,
1736	};
1737	struct mempolicy *pol;
1738	char buffer[64];
1739	int nid;
1740
1741	if (!mm)
1742		return 0;
1743
1744	/* Ensure we start with an empty set of numa_maps statistics. */
1745	memset(md, 0, sizeof(*md));
1746
1747	pol = __get_vma_policy(vma, vma->vm_start);
1748	if (pol) {
1749		mpol_to_str(buffer, sizeof(buffer), pol);
1750		mpol_cond_put(pol);
1751	} else {
1752		mpol_to_str(buffer, sizeof(buffer), proc_priv->task_mempolicy);
1753	}
 
 
 
1754
1755	seq_printf(m, "%08lx %s", vma->vm_start, buffer);
1756
1757	if (file) {
1758		seq_puts(m, " file=");
1759		seq_file_path(m, file, "\n\t= ");
1760	} else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
1761		seq_puts(m, " heap");
1762	} else if (is_stack(vma)) {
1763		seq_puts(m, " stack");
 
 
 
 
 
 
 
 
 
 
 
1764	}
1765
1766	if (is_vm_hugetlb_page(vma))
1767		seq_puts(m, " huge");
1768
1769	/* mmap_sem is held by m_start */
1770	walk_page_vma(vma, &walk);
1771
1772	if (!md->pages)
1773		goto out;
1774
1775	if (md->anon)
1776		seq_printf(m, " anon=%lu", md->anon);
1777
1778	if (md->dirty)
1779		seq_printf(m, " dirty=%lu", md->dirty);
1780
1781	if (md->pages != md->anon && md->pages != md->dirty)
1782		seq_printf(m, " mapped=%lu", md->pages);
1783
1784	if (md->mapcount_max > 1)
1785		seq_printf(m, " mapmax=%lu", md->mapcount_max);
1786
1787	if (md->swapcache)
1788		seq_printf(m, " swapcache=%lu", md->swapcache);
1789
1790	if (md->active < md->pages && !is_vm_hugetlb_page(vma))
1791		seq_printf(m, " active=%lu", md->active);
1792
1793	if (md->writeback)
1794		seq_printf(m, " writeback=%lu", md->writeback);
1795
1796	for_each_node_state(nid, N_MEMORY)
1797		if (md->node[nid])
1798			seq_printf(m, " N%d=%lu", nid, md->node[nid]);
1799
1800	seq_printf(m, " kernelpagesize_kB=%lu", vma_kernel_pagesize(vma) >> 10);
1801out:
1802	seq_putc(m, '\n');
1803	m_cache_vma(m, vma);
 
 
1804	return 0;
1805}
1806
1807static int show_pid_numa_map(struct seq_file *m, void *v)
1808{
1809	return show_numa_map(m, v, 1);
1810}
1811
1812static int show_tid_numa_map(struct seq_file *m, void *v)
1813{
1814	return show_numa_map(m, v, 0);
1815}
1816
1817static const struct seq_operations proc_pid_numa_maps_op = {
1818	.start  = m_start,
1819	.next   = m_next,
1820	.stop   = m_stop,
1821	.show   = show_pid_numa_map,
1822};
1823
1824static const struct seq_operations proc_tid_numa_maps_op = {
1825	.start  = m_start,
1826	.next   = m_next,
1827	.stop   = m_stop,
1828	.show   = show_tid_numa_map,
1829};
1830
1831static int numa_maps_open(struct inode *inode, struct file *file,
1832			  const struct seq_operations *ops)
1833{
1834	return proc_maps_open(inode, file, ops,
1835				sizeof(struct numa_maps_private));
 
 
 
 
 
 
 
 
 
 
 
 
1836}
1837
1838static int pid_numa_maps_open(struct inode *inode, struct file *file)
1839{
1840	return numa_maps_open(inode, file, &proc_pid_numa_maps_op);
1841}
1842
1843static int tid_numa_maps_open(struct inode *inode, struct file *file)
1844{
1845	return numa_maps_open(inode, file, &proc_tid_numa_maps_op);
1846}
1847
1848const struct file_operations proc_pid_numa_maps_operations = {
1849	.open		= pid_numa_maps_open,
1850	.read		= seq_read,
1851	.llseek		= seq_lseek,
1852	.release	= proc_map_release,
1853};
1854
1855const struct file_operations proc_tid_numa_maps_operations = {
1856	.open		= tid_numa_maps_open,
1857	.read		= seq_read,
1858	.llseek		= seq_lseek,
1859	.release	= proc_map_release,
1860};
1861#endif /* CONFIG_NUMA */
v3.15
 
   1#include <linux/mm.h>
   2#include <linux/vmacache.h>
   3#include <linux/hugetlb.h>
   4#include <linux/huge_mm.h>
   5#include <linux/mount.h>
   6#include <linux/seq_file.h>
   7#include <linux/highmem.h>
   8#include <linux/ptrace.h>
   9#include <linux/slab.h>
  10#include <linux/pagemap.h>
  11#include <linux/mempolicy.h>
  12#include <linux/rmap.h>
  13#include <linux/swap.h>
 
  14#include <linux/swapops.h>
  15#include <linux/mmu_notifier.h>
 
 
 
  16
  17#include <asm/elf.h>
  18#include <asm/uaccess.h>
  19#include <asm/tlbflush.h>
  20#include "internal.h"
  21
 
 
  22void task_mem(struct seq_file *m, struct mm_struct *mm)
  23{
  24	unsigned long data, text, lib, swap;
  25	unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;
  26
 
 
 
 
  27	/*
  28	 * Note: to minimize their overhead, mm maintains hiwater_vm and
  29	 * hiwater_rss only when about to *lower* total_vm or rss.  Any
  30	 * collector of these hiwater stats must therefore get total_vm
  31	 * and rss too, which will usually be the higher.  Barriers? not
  32	 * worth the effort, such snapshots can always be inconsistent.
  33	 */
  34	hiwater_vm = total_vm = mm->total_vm;
  35	if (hiwater_vm < mm->hiwater_vm)
  36		hiwater_vm = mm->hiwater_vm;
  37	hiwater_rss = total_rss = get_mm_rss(mm);
  38	if (hiwater_rss < mm->hiwater_rss)
  39		hiwater_rss = mm->hiwater_rss;
  40
  41	data = mm->total_vm - mm->shared_vm - mm->stack_vm;
  42	text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10;
  43	lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text;
 
 
  44	swap = get_mm_counter(mm, MM_SWAPENTS);
  45	seq_printf(m,
  46		"VmPeak:\t%8lu kB\n"
  47		"VmSize:\t%8lu kB\n"
  48		"VmLck:\t%8lu kB\n"
  49		"VmPin:\t%8lu kB\n"
  50		"VmHWM:\t%8lu kB\n"
  51		"VmRSS:\t%8lu kB\n"
  52		"VmData:\t%8lu kB\n"
  53		"VmStk:\t%8lu kB\n"
  54		"VmExe:\t%8lu kB\n"
  55		"VmLib:\t%8lu kB\n"
  56		"VmPTE:\t%8lu kB\n"
  57		"VmSwap:\t%8lu kB\n",
  58		hiwater_vm << (PAGE_SHIFT-10),
  59		total_vm << (PAGE_SHIFT-10),
  60		mm->locked_vm << (PAGE_SHIFT-10),
  61		mm->pinned_vm << (PAGE_SHIFT-10),
  62		hiwater_rss << (PAGE_SHIFT-10),
  63		total_rss << (PAGE_SHIFT-10),
  64		data << (PAGE_SHIFT-10),
  65		mm->stack_vm << (PAGE_SHIFT-10), text, lib,
  66		(PTRS_PER_PTE * sizeof(pte_t) *
  67		 atomic_long_read(&mm->nr_ptes)) >> 10,
  68		swap << (PAGE_SHIFT-10));
  69}
 
  70
  71unsigned long task_vsize(struct mm_struct *mm)
  72{
  73	return PAGE_SIZE * mm->total_vm;
  74}
  75
  76unsigned long task_statm(struct mm_struct *mm,
  77			 unsigned long *shared, unsigned long *text,
  78			 unsigned long *data, unsigned long *resident)
  79{
  80	*shared = get_mm_counter(mm, MM_FILEPAGES);
 
  81	*text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
  82								>> PAGE_SHIFT;
  83	*data = mm->total_vm - mm->shared_vm;
  84	*resident = *shared + get_mm_counter(mm, MM_ANONPAGES);
  85	return mm->total_vm;
  86}
  87
  88#ifdef CONFIG_NUMA
  89/*
  90 * These functions are for numa_maps but called in generic **maps seq_file
  91 * ->start(), ->stop() ops.
  92 *
  93 * numa_maps scans all vmas under mmap_sem and checks their mempolicy.
  94 * Each mempolicy object is controlled by reference counting. The problem here
  95 * is how to avoid accessing dead mempolicy object.
  96 *
  97 * Because we're holding mmap_sem while reading seq_file, it's safe to access
  98 * each vma's mempolicy, no vma objects will never drop refs to mempolicy.
  99 *
 100 * A task's mempolicy (task->mempolicy) has different behavior. task->mempolicy
 101 * is set and replaced under mmap_sem but unrefed and cleared under task_lock().
 102 * So, without task_lock(), we cannot trust get_vma_policy() because we cannot
 103 * gurantee the task never exits under us. But taking task_lock() around
 104 * get_vma_plicy() causes lock order problem.
 105 *
 106 * To access task->mempolicy without lock, we hold a reference count of an
 107 * object pointed by task->mempolicy and remember it. This will guarantee
 108 * that task->mempolicy points to an alive object or NULL in numa_maps accesses.
 109 */
 110static void hold_task_mempolicy(struct proc_maps_private *priv)
 111{
 112	struct task_struct *task = priv->task;
 113
 114	task_lock(task);
 115	priv->task_mempolicy = task->mempolicy;
 116	mpol_get(priv->task_mempolicy);
 117	task_unlock(task);
 118}
 119static void release_task_mempolicy(struct proc_maps_private *priv)
 120{
 121	mpol_put(priv->task_mempolicy);
 122}
 123#else
 124static void hold_task_mempolicy(struct proc_maps_private *priv)
 125{
 126}
 127static void release_task_mempolicy(struct proc_maps_private *priv)
 128{
 129}
 130#endif
 131
 132static void vma_stop(struct proc_maps_private *priv, struct vm_area_struct *vma)
 
 
 
 
 
 
 
 
 
 
 133{
 134	if (vma && vma != priv->tail_vma) {
 135		struct mm_struct *mm = vma->vm_mm;
 136		release_task_mempolicy(priv);
 137		up_read(&mm->mmap_sem);
 138		mmput(mm);
 139	}
 
 
 
 140}
 141
 142static void *m_start(struct seq_file *m, loff_t *pos)
 143{
 144	struct proc_maps_private *priv = m->private;
 145	unsigned long last_addr = m->version;
 146	struct mm_struct *mm;
 147	struct vm_area_struct *vma, *tail_vma = NULL;
 148	loff_t l = *pos;
 149
 150	/* Clear the per syscall fields in priv */
 151	priv->task = NULL;
 152	priv->tail_vma = NULL;
 153
 154	/*
 155	 * We remember last_addr rather than next_addr to hit with
 156	 * vmacache most of the time. We have zero last_addr at
 157	 * the beginning and also after lseek. We will have -1 last_addr
 158	 * after the end of the vmas.
 159	 */
 160
 
 161	if (last_addr == -1UL)
 162		return NULL;
 163
 164	priv->task = get_pid_task(priv->pid, PIDTYPE_PID);
 165	if (!priv->task)
 166		return ERR_PTR(-ESRCH);
 167
 168	mm = mm_access(priv->task, PTRACE_MODE_READ);
 169	if (!mm || IS_ERR(mm))
 170		return mm;
 
 171	down_read(&mm->mmap_sem);
 
 
 172
 173	tail_vma = get_gate_vma(priv->task->mm);
 174	priv->tail_vma = tail_vma;
 175	hold_task_mempolicy(priv);
 176	/* Start with last addr hint */
 177	vma = find_vma(mm, last_addr);
 178	if (last_addr && vma) {
 179		vma = vma->vm_next;
 180		goto out;
 181	}
 182
 183	/*
 184	 * Check the vma index is within the range and do
 185	 * sequential scan until m_index.
 186	 */
 187	vma = NULL;
 188	if ((unsigned long)l < mm->map_count) {
 189		vma = mm->mmap;
 190		while (l-- && vma)
 191			vma = vma->vm_next;
 192		goto out;
 
 193	}
 194
 195	if (l != mm->map_count)
 196		tail_vma = NULL; /* After gate vma */
 197
 198out:
 199	if (vma)
 200		return vma;
 201
 202	release_task_mempolicy(priv);
 203	/* End of vmas has been reached */
 204	m->version = (tail_vma != NULL)? 0: -1UL;
 205	up_read(&mm->mmap_sem);
 206	mmput(mm);
 207	return tail_vma;
 208}
 209
 210static void *m_next(struct seq_file *m, void *v, loff_t *pos)
 211{
 212	struct proc_maps_private *priv = m->private;
 213	struct vm_area_struct *vma = v;
 214	struct vm_area_struct *tail_vma = priv->tail_vma;
 215
 216	(*pos)++;
 217	if (vma && (vma != tail_vma) && vma->vm_next)
 218		return vma->vm_next;
 219	vma_stop(priv, vma);
 220	return (vma != tail_vma)? tail_vma: NULL;
 221}
 222
 223static void m_stop(struct seq_file *m, void *v)
 224{
 225	struct proc_maps_private *priv = m->private;
 226	struct vm_area_struct *vma = v;
 227
 228	if (!IS_ERR(vma))
 229		vma_stop(priv, vma);
 230	if (priv->task)
 231		put_task_struct(priv->task);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 232}
 233
 234static int do_maps_open(struct inode *inode, struct file *file,
 235			const struct seq_operations *ops)
 236{
 237	struct proc_maps_private *priv;
 238	int ret = -ENOMEM;
 239	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
 240	if (priv) {
 241		priv->pid = proc_pid(inode);
 242		ret = seq_open(file, ops);
 243		if (!ret) {
 244			struct seq_file *m = file->private_data;
 245			m->private = priv;
 246		} else {
 247			kfree(priv);
 248		}
 249	}
 250	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 251}
 252
 253static void
 254show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
 255{
 256	struct mm_struct *mm = vma->vm_mm;
 257	struct file *file = vma->vm_file;
 258	struct proc_maps_private *priv = m->private;
 259	struct task_struct *task = priv->task;
 260	vm_flags_t flags = vma->vm_flags;
 261	unsigned long ino = 0;
 262	unsigned long long pgoff = 0;
 263	unsigned long start, end;
 264	dev_t dev = 0;
 265	const char *name = NULL;
 266
 267	if (file) {
 268		struct inode *inode = file_inode(vma->vm_file);
 269		dev = inode->i_sb->s_dev;
 270		ino = inode->i_ino;
 271		pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
 272	}
 273
 274	/* We don't show the stack guard page in /proc/maps */
 275	start = vma->vm_start;
 276	if (stack_guard_page_start(vma, start))
 277		start += PAGE_SIZE;
 278	end = vma->vm_end;
 279	if (stack_guard_page_end(vma, end))
 280		end -= PAGE_SIZE;
 281
 282	seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
 283	seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
 284			start,
 285			end,
 286			flags & VM_READ ? 'r' : '-',
 287			flags & VM_WRITE ? 'w' : '-',
 288			flags & VM_EXEC ? 'x' : '-',
 289			flags & VM_MAYSHARE ? 's' : 'p',
 290			pgoff,
 291			MAJOR(dev), MINOR(dev), ino);
 292
 293	/*
 294	 * Print the dentry name for named mappings, and a
 295	 * special [heap] marker for the heap:
 296	 */
 297	if (file) {
 298		seq_pad(m, ' ');
 299		seq_path(m, &file->f_path, "\n");
 300		goto done;
 301	}
 302
 
 
 
 
 
 
 303	name = arch_vma_name(vma);
 304	if (!name) {
 305		pid_t tid;
 306
 307		if (!mm) {
 308			name = "[vdso]";
 309			goto done;
 310		}
 311
 312		if (vma->vm_start <= mm->brk &&
 313		    vma->vm_end >= mm->start_brk) {
 314			name = "[heap]";
 315			goto done;
 316		}
 317
 318		tid = vm_is_stack(task, vma, is_pid);
 319
 320		if (tid != 0) {
 321			/*
 322			 * Thread stack in /proc/PID/task/TID/maps or
 323			 * the main process stack.
 324			 */
 325			if (!is_pid || (vma->vm_start <= mm->start_stack &&
 326			    vma->vm_end >= mm->start_stack)) {
 327				name = "[stack]";
 328			} else {
 329				/* Thread stack in /proc/PID/maps */
 330				seq_pad(m, ' ');
 331				seq_printf(m, "[stack:%d]", tid);
 332			}
 333		}
 334	}
 335
 336done:
 337	if (name) {
 338		seq_pad(m, ' ');
 339		seq_puts(m, name);
 340	}
 341	seq_putc(m, '\n');
 342}
 343
 344static int show_map(struct seq_file *m, void *v, int is_pid)
 345{
 346	struct vm_area_struct *vma = v;
 347	struct proc_maps_private *priv = m->private;
 348	struct task_struct *task = priv->task;
 349
 350	show_map_vma(m, vma, is_pid);
 351
 352	if (m->count < m->size)  /* vma is copied successfully */
 353		m->version = (vma != get_gate_vma(task->mm))
 354			? vma->vm_start : 0;
 355	return 0;
 356}
 357
 358static int show_pid_map(struct seq_file *m, void *v)
 359{
 360	return show_map(m, v, 1);
 361}
 362
 363static int show_tid_map(struct seq_file *m, void *v)
 364{
 365	return show_map(m, v, 0);
 366}
 367
 368static const struct seq_operations proc_pid_maps_op = {
 369	.start	= m_start,
 370	.next	= m_next,
 371	.stop	= m_stop,
 372	.show	= show_pid_map
 373};
 374
 375static const struct seq_operations proc_tid_maps_op = {
 376	.start	= m_start,
 377	.next	= m_next,
 378	.stop	= m_stop,
 379	.show	= show_tid_map
 380};
 381
 382static int pid_maps_open(struct inode *inode, struct file *file)
 383{
 384	return do_maps_open(inode, file, &proc_pid_maps_op);
 385}
 386
 387static int tid_maps_open(struct inode *inode, struct file *file)
 388{
 389	return do_maps_open(inode, file, &proc_tid_maps_op);
 390}
 391
 392const struct file_operations proc_pid_maps_operations = {
 393	.open		= pid_maps_open,
 394	.read		= seq_read,
 395	.llseek		= seq_lseek,
 396	.release	= seq_release_private,
 397};
 398
 399const struct file_operations proc_tid_maps_operations = {
 400	.open		= tid_maps_open,
 401	.read		= seq_read,
 402	.llseek		= seq_lseek,
 403	.release	= seq_release_private,
 404};
 405
 406/*
 407 * Proportional Set Size(PSS): my share of RSS.
 408 *
 409 * PSS of a process is the count of pages it has in memory, where each
 410 * page is divided by the number of processes sharing it.  So if a
 411 * process has 1000 pages all to itself, and 1000 shared with one other
 412 * process, its PSS will be 1500.
 413 *
 414 * To keep (accumulated) division errors low, we adopt a 64bit
 415 * fixed-point pss counter to minimize division errors. So (pss >>
 416 * PSS_SHIFT) would be the real byte count.
 417 *
 418 * A shift of 12 before division means (assuming 4K page size):
 419 * 	- 1M 3-user-pages add up to 8KB errors;
 420 * 	- supports mapcount up to 2^24, or 16M;
 421 * 	- supports PSS up to 2^52 bytes, or 4PB.
 422 */
 423#define PSS_SHIFT 12
 424
 425#ifdef CONFIG_PROC_PAGE_MONITOR
 426struct mem_size_stats {
 427	struct vm_area_struct *vma;
 428	unsigned long resident;
 429	unsigned long shared_clean;
 430	unsigned long shared_dirty;
 431	unsigned long private_clean;
 432	unsigned long private_dirty;
 433	unsigned long referenced;
 434	unsigned long anonymous;
 
 435	unsigned long anonymous_thp;
 
 436	unsigned long swap;
 437	unsigned long nonlinear;
 
 
 438	u64 pss;
 
 
 
 439};
 440
 
 
 
 
 
 
 
 
 
 
 
 441
 442static void smaps_pte_entry(pte_t ptent, unsigned long addr,
 443		unsigned long ptent_size, struct mm_walk *walk)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 444{
 445	struct mem_size_stats *mss = walk->private;
 446	struct vm_area_struct *vma = mss->vma;
 447	pgoff_t pgoff = linear_page_index(vma, addr);
 
 
 
 
 
 
 
 
 
 
 
 448	struct page *page = NULL;
 449	int mapcount;
 450
 451	if (pte_present(ptent)) {
 452		page = vm_normal_page(vma, addr, ptent);
 453	} else if (is_swap_pte(ptent)) {
 454		swp_entry_t swpent = pte_to_swp_entry(ptent);
 
 
 
 
 
 
 
 
 455
 456		if (!non_swap_entry(swpent))
 457			mss->swap += ptent_size;
 458		else if (is_migration_entry(swpent))
 
 
 
 459			page = migration_entry_to_page(swpent);
 460	} else if (pte_file(ptent)) {
 461		if (pte_to_pgoff(ptent) != pgoff)
 462			mss->nonlinear += ptent_size;
 
 
 
 
 
 
 
 
 
 
 
 
 463	}
 464
 465	if (!page)
 466		return;
 467
 468	if (PageAnon(page))
 469		mss->anonymous += ptent_size;
 470
 471	if (page->index != pgoff)
 472		mss->nonlinear += ptent_size;
 
 
 
 
 
 473
 474	mss->resident += ptent_size;
 475	/* Accumulate the size in pages that have been accessed. */
 476	if (pte_young(ptent) || PageReferenced(page))
 477		mss->referenced += ptent_size;
 478	mapcount = page_mapcount(page);
 479	if (mapcount >= 2) {
 480		if (pte_dirty(ptent) || PageDirty(page))
 481			mss->shared_dirty += ptent_size;
 482		else
 483			mss->shared_clean += ptent_size;
 484		mss->pss += (ptent_size << PSS_SHIFT) / mapcount;
 485	} else {
 486		if (pte_dirty(ptent) || PageDirty(page))
 487			mss->private_dirty += ptent_size;
 488		else
 489			mss->private_clean += ptent_size;
 490		mss->pss += (ptent_size << PSS_SHIFT);
 491	}
 492}
 
 493
 494static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
 495			   struct mm_walk *walk)
 496{
 497	struct mem_size_stats *mss = walk->private;
 498	struct vm_area_struct *vma = mss->vma;
 499	pte_t *pte;
 500	spinlock_t *ptl;
 501
 502	if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
 503		smaps_pte_entry(*(pte_t *)pmd, addr, HPAGE_PMD_SIZE, walk);
 
 
 504		spin_unlock(ptl);
 505		mss->anonymous_thp += HPAGE_PMD_SIZE;
 506		return 0;
 507	}
 508
 509	if (pmd_trans_unstable(pmd))
 510		return 0;
 511	/*
 512	 * The mmap_sem held all the way back in m_start() is what
 513	 * keeps khugepaged out of here and from collapsing things
 514	 * in here.
 515	 */
 516	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
 517	for (; addr != end; pte++, addr += PAGE_SIZE)
 518		smaps_pte_entry(*pte, addr, PAGE_SIZE, walk);
 519	pte_unmap_unlock(pte - 1, ptl);
 
 520	cond_resched();
 521	return 0;
 522}
 523
 524static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma)
 525{
 526	/*
 527	 * Don't forget to update Documentation/ on changes.
 528	 */
 529	static const char mnemonics[BITS_PER_LONG][2] = {
 530		/*
 531		 * In case if we meet a flag we don't know about.
 532		 */
 533		[0 ... (BITS_PER_LONG-1)] = "??",
 534
 535		[ilog2(VM_READ)]	= "rd",
 536		[ilog2(VM_WRITE)]	= "wr",
 537		[ilog2(VM_EXEC)]	= "ex",
 538		[ilog2(VM_SHARED)]	= "sh",
 539		[ilog2(VM_MAYREAD)]	= "mr",
 540		[ilog2(VM_MAYWRITE)]	= "mw",
 541		[ilog2(VM_MAYEXEC)]	= "me",
 542		[ilog2(VM_MAYSHARE)]	= "ms",
 543		[ilog2(VM_GROWSDOWN)]	= "gd",
 544		[ilog2(VM_PFNMAP)]	= "pf",
 545		[ilog2(VM_DENYWRITE)]	= "dw",
 
 
 
 546		[ilog2(VM_LOCKED)]	= "lo",
 547		[ilog2(VM_IO)]		= "io",
 548		[ilog2(VM_SEQ_READ)]	= "sr",
 549		[ilog2(VM_RAND_READ)]	= "rr",
 550		[ilog2(VM_DONTCOPY)]	= "dc",
 551		[ilog2(VM_DONTEXPAND)]	= "de",
 552		[ilog2(VM_ACCOUNT)]	= "ac",
 553		[ilog2(VM_NORESERVE)]	= "nr",
 554		[ilog2(VM_HUGETLB)]	= "ht",
 555		[ilog2(VM_NONLINEAR)]	= "nl",
 556		[ilog2(VM_ARCH_1)]	= "ar",
 
 557		[ilog2(VM_DONTDUMP)]	= "dd",
 558#ifdef CONFIG_MEM_SOFT_DIRTY
 559		[ilog2(VM_SOFTDIRTY)]	= "sd",
 560#endif
 561		[ilog2(VM_MIXEDMAP)]	= "mm",
 562		[ilog2(VM_HUGEPAGE)]	= "hg",
 563		[ilog2(VM_NOHUGEPAGE)]	= "nh",
 564		[ilog2(VM_MERGEABLE)]	= "mg",
 
 
 
 
 
 
 
 
 
 565	};
 566	size_t i;
 567
 568	seq_puts(m, "VmFlags: ");
 569	for (i = 0; i < BITS_PER_LONG; i++) {
 
 
 570		if (vma->vm_flags & (1UL << i)) {
 571			seq_printf(m, "%c%c ",
 572				   mnemonics[i][0], mnemonics[i][1]);
 
 573		}
 574	}
 575	seq_putc(m, '\n');
 576}
 577
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 578static int show_smap(struct seq_file *m, void *v, int is_pid)
 579{
 580	struct proc_maps_private *priv = m->private;
 581	struct task_struct *task = priv->task;
 582	struct vm_area_struct *vma = v;
 583	struct mem_size_stats mss;
 
 584	struct mm_walk smaps_walk = {
 585		.pmd_entry = smaps_pte_range,
 
 
 
 586		.mm = vma->vm_mm,
 587		.private = &mss,
 588	};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 589
 590	memset(&mss, 0, sizeof mss);
 591	mss.vma = vma;
 592	/* mmap_sem is held in m_start */
 593	if (vma->vm_mm && !is_vm_hugetlb_page(vma))
 594		walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
 
 
 
 
 
 
 
 
 
 
 
 
 595
 596	show_map_vma(m, vma, is_pid);
 597
 598	seq_printf(m,
 599		   "Size:           %8lu kB\n"
 600		   "Rss:            %8lu kB\n"
 601		   "Pss:            %8lu kB\n"
 602		   "Shared_Clean:   %8lu kB\n"
 603		   "Shared_Dirty:   %8lu kB\n"
 604		   "Private_Clean:  %8lu kB\n"
 605		   "Private_Dirty:  %8lu kB\n"
 606		   "Referenced:     %8lu kB\n"
 607		   "Anonymous:      %8lu kB\n"
 608		   "AnonHugePages:  %8lu kB\n"
 609		   "Swap:           %8lu kB\n"
 610		   "KernelPageSize: %8lu kB\n"
 611		   "MMUPageSize:    %8lu kB\n"
 612		   "Locked:         %8lu kB\n",
 613		   (vma->vm_end - vma->vm_start) >> 10,
 614		   mss.resident >> 10,
 615		   (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
 616		   mss.shared_clean  >> 10,
 617		   mss.shared_dirty  >> 10,
 618		   mss.private_clean >> 10,
 619		   mss.private_dirty >> 10,
 620		   mss.referenced >> 10,
 621		   mss.anonymous >> 10,
 622		   mss.anonymous_thp >> 10,
 623		   mss.swap >> 10,
 624		   vma_kernel_pagesize(vma) >> 10,
 625		   vma_mmu_pagesize(vma) >> 10,
 626		   (vma->vm_flags & VM_LOCKED) ?
 627			(unsigned long)(mss.pss >> (10 + PSS_SHIFT)) : 0);
 628
 629	if (vma->vm_flags & VM_NONLINEAR)
 630		seq_printf(m, "Nonlinear:      %8lu kB\n",
 631				mss.nonlinear >> 10);
 632
 633	show_smap_vma_flags(m, vma);
 634
 635	if (m->count < m->size)  /* vma is copied successfully */
 636		m->version = (vma != get_gate_vma(task->mm))
 637			? vma->vm_start : 0;
 638	return 0;
 639}
 
 640
 641static int show_pid_smap(struct seq_file *m, void *v)
 642{
 643	return show_smap(m, v, 1);
 644}
 645
 646static int show_tid_smap(struct seq_file *m, void *v)
 647{
 648	return show_smap(m, v, 0);
 649}
 650
 651static const struct seq_operations proc_pid_smaps_op = {
 652	.start	= m_start,
 653	.next	= m_next,
 654	.stop	= m_stop,
 655	.show	= show_pid_smap
 656};
 657
 658static const struct seq_operations proc_tid_smaps_op = {
 659	.start	= m_start,
 660	.next	= m_next,
 661	.stop	= m_stop,
 662	.show	= show_tid_smap
 663};
 664
 665static int pid_smaps_open(struct inode *inode, struct file *file)
 666{
 667	return do_maps_open(inode, file, &proc_pid_smaps_op);
 668}
 669
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 670static int tid_smaps_open(struct inode *inode, struct file *file)
 671{
 672	return do_maps_open(inode, file, &proc_tid_smaps_op);
 673}
 674
 675const struct file_operations proc_pid_smaps_operations = {
 676	.open		= pid_smaps_open,
 677	.read		= seq_read,
 678	.llseek		= seq_lseek,
 679	.release	= seq_release_private,
 
 
 
 
 
 
 
 680};
 681
 682const struct file_operations proc_tid_smaps_operations = {
 683	.open		= tid_smaps_open,
 684	.read		= seq_read,
 685	.llseek		= seq_lseek,
 686	.release	= seq_release_private,
 687};
 688
 689/*
 690 * We do not want to have constant page-shift bits sitting in
 691 * pagemap entries and are about to reuse them some time soon.
 692 *
 693 * Here's the "migration strategy":
 694 * 1. when the system boots these bits remain what they are,
 695 *    but a warning about future change is printed in log;
 696 * 2. once anyone clears soft-dirty bits via clear_refs file,
 697 *    these flag is set to denote, that user is aware of the
 698 *    new API and those page-shift bits change their meaning.
 699 *    The respective warning is printed in dmesg;
 700 * 3. In a couple of releases we will remove all the mentions
 701 *    of page-shift in pagemap entries.
 702 */
 703
 704static bool soft_dirty_cleared __read_mostly;
 705
 706enum clear_refs_types {
 707	CLEAR_REFS_ALL = 1,
 708	CLEAR_REFS_ANON,
 709	CLEAR_REFS_MAPPED,
 710	CLEAR_REFS_SOFT_DIRTY,
 
 711	CLEAR_REFS_LAST,
 712};
 713
 714struct clear_refs_private {
 715	struct vm_area_struct *vma;
 716	enum clear_refs_types type;
 717};
 718
 
 719static inline void clear_soft_dirty(struct vm_area_struct *vma,
 720		unsigned long addr, pte_t *pte)
 721{
 722#ifdef CONFIG_MEM_SOFT_DIRTY
 723	/*
 724	 * The soft-dirty tracker uses #PF-s to catch writes
 725	 * to pages, so write-protect the pte as well. See the
 726	 * Documentation/vm/soft-dirty.txt for full description
 727	 * of how soft-dirty works.
 728	 */
 729	pte_t ptent = *pte;
 730
 731	if (pte_present(ptent)) {
 
 732		ptent = pte_wrprotect(ptent);
 733		ptent = pte_clear_flags(ptent, _PAGE_SOFT_DIRTY);
 
 734	} else if (is_swap_pte(ptent)) {
 735		ptent = pte_swp_clear_soft_dirty(ptent);
 736	} else if (pte_file(ptent)) {
 737		ptent = pte_file_clear_soft_dirty(ptent);
 738	}
 
 
 
 
 
 
 
 739
 740	if (vma->vm_flags & VM_SOFTDIRTY)
 741		vma->vm_flags &= ~VM_SOFTDIRTY;
 742
 743	set_pte_at(vma->vm_mm, addr, pte, ptent);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 744#endif
 745}
 746
 747static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
 748				unsigned long end, struct mm_walk *walk)
 749{
 750	struct clear_refs_private *cp = walk->private;
 751	struct vm_area_struct *vma = cp->vma;
 752	pte_t *pte, ptent;
 753	spinlock_t *ptl;
 754	struct page *page;
 755
 756	split_huge_page_pmd(vma, addr, pmd);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 757	if (pmd_trans_unstable(pmd))
 758		return 0;
 759
 760	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
 761	for (; addr != end; pte++, addr += PAGE_SIZE) {
 762		ptent = *pte;
 763
 764		if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
 765			clear_soft_dirty(vma, addr, pte);
 766			continue;
 767		}
 768
 769		if (!pte_present(ptent))
 770			continue;
 771
 772		page = vm_normal_page(vma, addr, ptent);
 773		if (!page)
 774			continue;
 775
 776		/* Clear accessed and referenced bits. */
 777		ptep_test_and_clear_young(vma, addr, pte);
 
 778		ClearPageReferenced(page);
 779	}
 780	pte_unmap_unlock(pte - 1, ptl);
 781	cond_resched();
 782	return 0;
 783}
 784
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 785static ssize_t clear_refs_write(struct file *file, const char __user *buf,
 786				size_t count, loff_t *ppos)
 787{
 788	struct task_struct *task;
 789	char buffer[PROC_NUMBUF];
 790	struct mm_struct *mm;
 791	struct vm_area_struct *vma;
 792	enum clear_refs_types type;
 
 793	int itype;
 794	int rv;
 795
 796	memset(buffer, 0, sizeof(buffer));
 797	if (count > sizeof(buffer) - 1)
 798		count = sizeof(buffer) - 1;
 799	if (copy_from_user(buffer, buf, count))
 800		return -EFAULT;
 801	rv = kstrtoint(strstrip(buffer), 10, &itype);
 802	if (rv < 0)
 803		return rv;
 804	type = (enum clear_refs_types)itype;
 805	if (type < CLEAR_REFS_ALL || type >= CLEAR_REFS_LAST)
 806		return -EINVAL;
 807
 808	if (type == CLEAR_REFS_SOFT_DIRTY) {
 809		soft_dirty_cleared = true;
 810		pr_warn_once("The pagemap bits 55-60 has changed their meaning! "
 811				"See the linux/Documentation/vm/pagemap.txt for details.\n");
 812	}
 813
 814	task = get_proc_task(file_inode(file));
 815	if (!task)
 816		return -ESRCH;
 817	mm = get_task_mm(task);
 818	if (mm) {
 819		struct clear_refs_private cp = {
 820			.type = type,
 821		};
 822		struct mm_walk clear_refs_walk = {
 823			.pmd_entry = clear_refs_pte_range,
 
 824			.mm = mm,
 825			.private = &cp,
 826		};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 827		down_read(&mm->mmap_sem);
 828		if (type == CLEAR_REFS_SOFT_DIRTY)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 829			mmu_notifier_invalidate_range_start(mm, 0, -1);
 830		for (vma = mm->mmap; vma; vma = vma->vm_next) {
 831			cp.vma = vma;
 832			if (is_vm_hugetlb_page(vma))
 833				continue;
 834			/*
 835			 * Writing 1 to /proc/pid/clear_refs affects all pages.
 836			 *
 837			 * Writing 2 to /proc/pid/clear_refs only affects
 838			 * Anonymous pages.
 839			 *
 840			 * Writing 3 to /proc/pid/clear_refs only affects file
 841			 * mapped pages.
 842			 */
 843			if (type == CLEAR_REFS_ANON && vma->vm_file)
 844				continue;
 845			if (type == CLEAR_REFS_MAPPED && !vma->vm_file)
 846				continue;
 847			walk_page_range(vma->vm_start, vma->vm_end,
 848					&clear_refs_walk);
 849		}
 
 850		if (type == CLEAR_REFS_SOFT_DIRTY)
 851			mmu_notifier_invalidate_range_end(mm, 0, -1);
 852		flush_tlb_mm(mm);
 853		up_read(&mm->mmap_sem);
 
 854		mmput(mm);
 855	}
 856	put_task_struct(task);
 857
 858	return count;
 859}
 860
 861const struct file_operations proc_clear_refs_operations = {
 862	.write		= clear_refs_write,
 863	.llseek		= noop_llseek,
 864};
 865
 866typedef struct {
 867	u64 pme;
 868} pagemap_entry_t;
 869
 870struct pagemapread {
 871	int pos, len;		/* units: PM_ENTRY_BYTES, not bytes */
 872	pagemap_entry_t *buffer;
 873	bool v2;
 874};
 875
 876#define PAGEMAP_WALK_SIZE	(PMD_SIZE)
 877#define PAGEMAP_WALK_MASK	(PMD_MASK)
 878
 879#define PM_ENTRY_BYTES      sizeof(pagemap_entry_t)
 880#define PM_STATUS_BITS      3
 881#define PM_STATUS_OFFSET    (64 - PM_STATUS_BITS)
 882#define PM_STATUS_MASK      (((1LL << PM_STATUS_BITS) - 1) << PM_STATUS_OFFSET)
 883#define PM_STATUS(nr)       (((nr) << PM_STATUS_OFFSET) & PM_STATUS_MASK)
 884#define PM_PSHIFT_BITS      6
 885#define PM_PSHIFT_OFFSET    (PM_STATUS_OFFSET - PM_PSHIFT_BITS)
 886#define PM_PSHIFT_MASK      (((1LL << PM_PSHIFT_BITS) - 1) << PM_PSHIFT_OFFSET)
 887#define __PM_PSHIFT(x)      (((u64) (x) << PM_PSHIFT_OFFSET) & PM_PSHIFT_MASK)
 888#define PM_PFRAME_MASK      ((1LL << PM_PSHIFT_OFFSET) - 1)
 889#define PM_PFRAME(x)        ((x) & PM_PFRAME_MASK)
 890/* in "new" pagemap pshift bits are occupied with more status bits */
 891#define PM_STATUS2(v2, x)   (__PM_PSHIFT(v2 ? x : PAGE_SHIFT))
 892
 893#define __PM_SOFT_DIRTY      (1LL)
 894#define PM_PRESENT          PM_STATUS(4LL)
 895#define PM_SWAP             PM_STATUS(2LL)
 896#define PM_FILE             PM_STATUS(1LL)
 897#define PM_NOT_PRESENT(v2)  PM_STATUS2(v2, 0)
 898#define PM_END_OF_BUFFER    1
 899
 900static inline pagemap_entry_t make_pme(u64 val)
 901{
 902	return (pagemap_entry_t) { .pme = val };
 903}
 904
 905static int add_to_pagemap(unsigned long addr, pagemap_entry_t *pme,
 906			  struct pagemapread *pm)
 907{
 908	pm->buffer[pm->pos++] = *pme;
 909	if (pm->pos >= pm->len)
 910		return PM_END_OF_BUFFER;
 911	return 0;
 912}
 913
 914static int pagemap_pte_hole(unsigned long start, unsigned long end,
 915				struct mm_walk *walk)
 916{
 917	struct pagemapread *pm = walk->private;
 918	unsigned long addr;
 919	int err = 0;
 920	pagemap_entry_t pme = make_pme(PM_NOT_PRESENT(pm->v2));
 921
 922	for (addr = start; addr < end; addr += PAGE_SIZE) {
 923		err = add_to_pagemap(addr, &pme, pm);
 924		if (err)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 925			break;
 
 
 
 
 
 
 
 
 
 926	}
 
 927	return err;
 928}
 929
 930static void pte_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm,
 931		struct vm_area_struct *vma, unsigned long addr, pte_t pte)
 932{
 933	u64 frame, flags;
 934	struct page *page = NULL;
 935	int flags2 = 0;
 936
 937	if (pte_present(pte)) {
 938		frame = pte_pfn(pte);
 939		flags = PM_PRESENT;
 940		page = vm_normal_page(vma, addr, pte);
 
 941		if (pte_soft_dirty(pte))
 942			flags2 |= __PM_SOFT_DIRTY;
 943	} else if (is_swap_pte(pte)) {
 944		swp_entry_t entry;
 945		if (pte_swp_soft_dirty(pte))
 946			flags2 |= __PM_SOFT_DIRTY;
 947		entry = pte_to_swp_entry(pte);
 948		frame = swp_type(entry) |
 949			(swp_offset(entry) << MAX_SWAPFILES_SHIFT);
 950		flags = PM_SWAP;
 951		if (is_migration_entry(entry))
 952			page = migration_entry_to_page(entry);
 953	} else {
 954		if (vma->vm_flags & VM_SOFTDIRTY)
 955			flags2 |= __PM_SOFT_DIRTY;
 956		*pme = make_pme(PM_NOT_PRESENT(pm->v2) | PM_STATUS2(pm->v2, flags2));
 957		return;
 958	}
 959
 960	if (page && !PageAnon(page))
 961		flags |= PM_FILE;
 962	if ((vma->vm_flags & VM_SOFTDIRTY))
 963		flags2 |= __PM_SOFT_DIRTY;
 
 
 964
 965	*pme = make_pme(PM_PFRAME(frame) | PM_STATUS2(pm->v2, flags2) | flags);
 966}
 967
 968#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 969static void thp_pmd_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm,
 970		pmd_t pmd, int offset, int pmd_flags2)
 971{
 972	/*
 973	 * Currently pmd for thp is always present because thp can not be
 974	 * swapped-out, migrated, or HWPOISONed (split in such cases instead.)
 975	 * This if-check is just to prepare for future implementation.
 976	 */
 977	if (pmd_present(pmd))
 978		*pme = make_pme(PM_PFRAME(pmd_pfn(pmd) + offset)
 979				| PM_STATUS2(pm->v2, pmd_flags2) | PM_PRESENT);
 980	else
 981		*pme = make_pme(PM_NOT_PRESENT(pm->v2) | PM_STATUS2(pm->v2, pmd_flags2));
 982}
 983#else
 984static inline void thp_pmd_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm,
 985		pmd_t pmd, int offset, int pmd_flags2)
 986{
 987}
 988#endif
 989
 990static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
 991			     struct mm_walk *walk)
 992{
 993	struct vm_area_struct *vma;
 994	struct pagemapread *pm = walk->private;
 995	spinlock_t *ptl;
 996	pte_t *pte;
 997	int err = 0;
 998	pagemap_entry_t pme = make_pme(PM_NOT_PRESENT(pm->v2));
 999
1000	/* find the first VMA at or above 'addr' */
1001	vma = find_vma(walk->mm, addr);
1002	if (vma && pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
1003		int pmd_flags2;
 
 
 
 
 
1004
1005		if ((vma->vm_flags & VM_SOFTDIRTY) || pmd_soft_dirty(*pmd))
1006			pmd_flags2 = __PM_SOFT_DIRTY;
1007		else
1008			pmd_flags2 = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1009
1010		for (; addr != end; addr += PAGE_SIZE) {
1011			unsigned long offset;
1012
1013			offset = (addr & ~PAGEMAP_WALK_MASK) >>
1014					PAGE_SHIFT;
1015			thp_pmd_to_pagemap_entry(&pme, pm, *pmd, offset, pmd_flags2);
1016			err = add_to_pagemap(addr, &pme, pm);
1017			if (err)
1018				break;
 
 
 
 
1019		}
1020		spin_unlock(ptl);
1021		return err;
1022	}
1023
1024	if (pmd_trans_unstable(pmd))
1025		return 0;
1026	for (; addr != end; addr += PAGE_SIZE) {
1027		int flags2;
1028
1029		/* check to see if we've left 'vma' behind
1030		 * and need a new, higher one */
1031		if (vma && (addr >= vma->vm_end)) {
1032			vma = find_vma(walk->mm, addr);
1033			if (vma && (vma->vm_flags & VM_SOFTDIRTY))
1034				flags2 = __PM_SOFT_DIRTY;
1035			else
1036				flags2 = 0;
1037			pme = make_pme(PM_NOT_PRESENT(pm->v2) | PM_STATUS2(pm->v2, flags2));
1038		}
1039
1040		/* check that 'vma' actually covers this address,
1041		 * and that it isn't a huge page vma */
1042		if (vma && (vma->vm_start <= addr) &&
1043		    !is_vm_hugetlb_page(vma)) {
1044			pte = pte_offset_map(pmd, addr);
1045			pte_to_pagemap_entry(&pme, pm, vma, addr, *pte);
1046			/* unmap before userspace copy */
1047			pte_unmap(pte);
1048		}
1049		err = add_to_pagemap(addr, &pme, pm);
1050		if (err)
1051			return err;
1052	}
 
1053
1054	cond_resched();
1055
1056	return err;
1057}
1058
1059#ifdef CONFIG_HUGETLB_PAGE
1060static void huge_pte_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm,
1061					pte_t pte, int offset, int flags2)
1062{
1063	if (pte_present(pte))
1064		*pme = make_pme(PM_PFRAME(pte_pfn(pte) + offset)	|
1065				PM_STATUS2(pm->v2, flags2)		|
1066				PM_PRESENT);
1067	else
1068		*pme = make_pme(PM_NOT_PRESENT(pm->v2)			|
1069				PM_STATUS2(pm->v2, flags2));
1070}
1071
1072/* This function walks within one hugetlb entry in the single call */
1073static int pagemap_hugetlb_range(pte_t *pte, unsigned long hmask,
1074				 unsigned long addr, unsigned long end,
1075				 struct mm_walk *walk)
1076{
1077	struct pagemapread *pm = walk->private;
1078	struct vm_area_struct *vma;
 
1079	int err = 0;
1080	int flags2;
1081	pagemap_entry_t pme;
1082
1083	vma = find_vma(walk->mm, addr);
1084	WARN_ON_ONCE(!vma);
 
 
 
 
1085
1086	if (vma && (vma->vm_flags & VM_SOFTDIRTY))
1087		flags2 = __PM_SOFT_DIRTY;
1088	else
1089		flags2 = 0;
 
 
 
 
 
 
 
1090
1091	for (; addr != end; addr += PAGE_SIZE) {
1092		int offset = (addr & ~hmask) >> PAGE_SHIFT;
1093		huge_pte_to_pagemap_entry(&pme, pm, *pte, offset, flags2);
1094		err = add_to_pagemap(addr, &pme, pm);
1095		if (err)
1096			return err;
 
 
1097	}
1098
1099	cond_resched();
1100
1101	return err;
1102}
1103#endif /* HUGETLB_PAGE */
1104
1105/*
1106 * /proc/pid/pagemap - an array mapping virtual pages to pfns
1107 *
1108 * For each page in the address space, this file contains one 64-bit entry
1109 * consisting of the following:
1110 *
1111 * Bits 0-54  page frame number (PFN) if present
1112 * Bits 0-4   swap type if swapped
1113 * Bits 5-54  swap offset if swapped
1114 * Bits 55-60 page shift (page size = 1<<page shift)
 
 
1115 * Bit  61    page is file-page or shared-anon
1116 * Bit  62    page swapped
1117 * Bit  63    page present
1118 *
1119 * If the page is not present but in swap, then the PFN contains an
1120 * encoding of the swap file number and the page's offset into the
1121 * swap. Unmapped pages return a null PFN. This allows determining
1122 * precisely which pages are mapped (or in swap) and comparing mapped
1123 * pages between processes.
1124 *
1125 * Efficient users of this interface will use /proc/pid/maps to
1126 * determine which areas of memory are actually mapped and llseek to
1127 * skip over unmapped regions.
1128 */
1129static ssize_t pagemap_read(struct file *file, char __user *buf,
1130			    size_t count, loff_t *ppos)
1131{
1132	struct task_struct *task = get_proc_task(file_inode(file));
1133	struct mm_struct *mm;
1134	struct pagemapread pm;
1135	int ret = -ESRCH;
1136	struct mm_walk pagemap_walk = {};
1137	unsigned long src;
1138	unsigned long svpfn;
1139	unsigned long start_vaddr;
1140	unsigned long end_vaddr;
1141	int copied = 0;
1142
1143	if (!task)
1144		goto out;
1145
1146	ret = -EINVAL;
1147	/* file position must be aligned */
1148	if ((*ppos % PM_ENTRY_BYTES) || (count % PM_ENTRY_BYTES))
1149		goto out_task;
1150
1151	ret = 0;
1152	if (!count)
1153		goto out_task;
 
 
 
1154
1155	pm.v2 = soft_dirty_cleared;
1156	pm.len = (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
1157	pm.buffer = kmalloc(pm.len * PM_ENTRY_BYTES, GFP_TEMPORARY);
1158	ret = -ENOMEM;
1159	if (!pm.buffer)
1160		goto out_task;
1161
1162	mm = mm_access(task, PTRACE_MODE_READ);
1163	ret = PTR_ERR(mm);
1164	if (!mm || IS_ERR(mm))
1165		goto out_free;
1166
1167	pagemap_walk.pmd_entry = pagemap_pte_range;
1168	pagemap_walk.pte_hole = pagemap_pte_hole;
1169#ifdef CONFIG_HUGETLB_PAGE
1170	pagemap_walk.hugetlb_entry = pagemap_hugetlb_range;
1171#endif
1172	pagemap_walk.mm = mm;
1173	pagemap_walk.private = &pm;
1174
1175	src = *ppos;
1176	svpfn = src / PM_ENTRY_BYTES;
1177	start_vaddr = svpfn << PAGE_SHIFT;
1178	end_vaddr = TASK_SIZE_OF(task);
1179
1180	/* watch out for wraparound */
1181	if (svpfn > TASK_SIZE_OF(task) >> PAGE_SHIFT)
1182		start_vaddr = end_vaddr;
1183
1184	/*
1185	 * The odds are that this will stop walking way
1186	 * before end_vaddr, because the length of the
1187	 * user buffer is tracked in "pm", and the walk
1188	 * will stop when we hit the end of the buffer.
1189	 */
1190	ret = 0;
1191	while (count && (start_vaddr < end_vaddr)) {
1192		int len;
1193		unsigned long end;
1194
1195		pm.pos = 0;
1196		end = (start_vaddr + PAGEMAP_WALK_SIZE) & PAGEMAP_WALK_MASK;
1197		/* overflow ? */
1198		if (end < start_vaddr || end > end_vaddr)
1199			end = end_vaddr;
1200		down_read(&mm->mmap_sem);
1201		ret = walk_page_range(start_vaddr, end, &pagemap_walk);
1202		up_read(&mm->mmap_sem);
1203		start_vaddr = end;
1204
1205		len = min(count, PM_ENTRY_BYTES * pm.pos);
1206		if (copy_to_user(buf, pm.buffer, len)) {
1207			ret = -EFAULT;
1208			goto out_mm;
1209		}
1210		copied += len;
1211		buf += len;
1212		count -= len;
1213	}
1214	*ppos += copied;
1215	if (!ret || ret == PM_END_OF_BUFFER)
1216		ret = copied;
1217
 
 
1218out_mm:
1219	mmput(mm);
1220out_free:
1221	kfree(pm.buffer);
1222out_task:
1223	put_task_struct(task);
1224out:
1225	return ret;
1226}
1227
1228static int pagemap_open(struct inode *inode, struct file *file)
1229{
1230	pr_warn_once("Bits 55-60 of /proc/PID/pagemap entries are about "
1231			"to stop being page-shift some time soon. See the "
1232			"linux/Documentation/vm/pagemap.txt for details.\n");
 
 
 
 
 
 
 
 
 
 
 
 
1233	return 0;
1234}
1235
1236const struct file_operations proc_pagemap_operations = {
1237	.llseek		= mem_lseek, /* borrow this */
1238	.read		= pagemap_read,
1239	.open		= pagemap_open,
 
1240};
1241#endif /* CONFIG_PROC_PAGE_MONITOR */
1242
1243#ifdef CONFIG_NUMA
1244
1245struct numa_maps {
1246	struct vm_area_struct *vma;
1247	unsigned long pages;
1248	unsigned long anon;
1249	unsigned long active;
1250	unsigned long writeback;
1251	unsigned long mapcount_max;
1252	unsigned long dirty;
1253	unsigned long swapcache;
1254	unsigned long node[MAX_NUMNODES];
1255};
1256
1257struct numa_maps_private {
1258	struct proc_maps_private proc_maps;
1259	struct numa_maps md;
1260};
1261
1262static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty,
1263			unsigned long nr_pages)
1264{
1265	int count = page_mapcount(page);
1266
1267	md->pages += nr_pages;
1268	if (pte_dirty || PageDirty(page))
1269		md->dirty += nr_pages;
1270
1271	if (PageSwapCache(page))
1272		md->swapcache += nr_pages;
1273
1274	if (PageActive(page) || PageUnevictable(page))
1275		md->active += nr_pages;
1276
1277	if (PageWriteback(page))
1278		md->writeback += nr_pages;
1279
1280	if (PageAnon(page))
1281		md->anon += nr_pages;
1282
1283	if (count > md->mapcount_max)
1284		md->mapcount_max = count;
1285
1286	md->node[page_to_nid(page)] += nr_pages;
1287}
1288
1289static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma,
1290		unsigned long addr)
1291{
1292	struct page *page;
1293	int nid;
1294
1295	if (!pte_present(pte))
1296		return NULL;
1297
1298	page = vm_normal_page(vma, addr, pte);
1299	if (!page)
1300		return NULL;
1301
1302	if (PageReserved(page))
1303		return NULL;
1304
1305	nid = page_to_nid(page);
1306	if (!node_isset(nid, node_states[N_MEMORY]))
1307		return NULL;
1308
1309	return page;
1310}
1311
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1312static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
1313		unsigned long end, struct mm_walk *walk)
1314{
1315	struct numa_maps *md;
 
1316	spinlock_t *ptl;
1317	pte_t *orig_pte;
1318	pte_t *pte;
1319
1320	md = walk->private;
1321
1322	if (pmd_trans_huge_lock(pmd, md->vma, &ptl) == 1) {
1323		pte_t huge_pte = *(pte_t *)pmd;
1324		struct page *page;
1325
1326		page = can_gather_numa_stats(huge_pte, md->vma, addr);
1327		if (page)
1328			gather_stats(page, md, pte_dirty(huge_pte),
1329				     HPAGE_PMD_SIZE/PAGE_SIZE);
1330		spin_unlock(ptl);
1331		return 0;
1332	}
1333
1334	if (pmd_trans_unstable(pmd))
1335		return 0;
 
1336	orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
1337	do {
1338		struct page *page = can_gather_numa_stats(*pte, md->vma, addr);
1339		if (!page)
1340			continue;
1341		gather_stats(page, md, pte_dirty(*pte), 1);
1342
1343	} while (pte++, addr += PAGE_SIZE, addr != end);
1344	pte_unmap_unlock(orig_pte, ptl);
 
1345	return 0;
1346}
1347#ifdef CONFIG_HUGETLB_PAGE
1348static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask,
1349		unsigned long addr, unsigned long end, struct mm_walk *walk)
1350{
 
1351	struct numa_maps *md;
1352	struct page *page;
1353
1354	if (!pte_present(*pte))
1355		return 0;
1356
1357	page = pte_page(*pte);
1358	if (!page)
1359		return 0;
1360
1361	md = walk->private;
1362	gather_stats(page, md, pte_dirty(*pte), 1);
1363	return 0;
1364}
1365
1366#else
1367static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask,
1368		unsigned long addr, unsigned long end, struct mm_walk *walk)
1369{
1370	return 0;
1371}
1372#endif
1373
1374/*
1375 * Display pages allocated per node and memory policy via /proc.
1376 */
1377static int show_numa_map(struct seq_file *m, void *v, int is_pid)
1378{
1379	struct numa_maps_private *numa_priv = m->private;
1380	struct proc_maps_private *proc_priv = &numa_priv->proc_maps;
1381	struct vm_area_struct *vma = v;
1382	struct numa_maps *md = &numa_priv->md;
1383	struct file *file = vma->vm_file;
1384	struct task_struct *task = proc_priv->task;
1385	struct mm_struct *mm = vma->vm_mm;
1386	struct mm_walk walk = {};
 
 
 
 
 
1387	struct mempolicy *pol;
1388	char buffer[64];
1389	int nid;
1390
1391	if (!mm)
1392		return 0;
1393
1394	/* Ensure we start with an empty set of numa_maps statistics. */
1395	memset(md, 0, sizeof(*md));
1396
1397	md->vma = vma;
1398
1399	walk.hugetlb_entry = gather_hugetbl_stats;
1400	walk.pmd_entry = gather_pte_stats;
1401	walk.private = md;
1402	walk.mm = mm;
1403
1404	pol = get_vma_policy(task, vma, vma->vm_start);
1405	mpol_to_str(buffer, sizeof(buffer), pol);
1406	mpol_cond_put(pol);
1407
1408	seq_printf(m, "%08lx %s", vma->vm_start, buffer);
1409
1410	if (file) {
1411		seq_printf(m, " file=");
1412		seq_path(m, &file->f_path, "\n\t= ");
1413	} else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
1414		seq_printf(m, " heap");
1415	} else {
1416		pid_t tid = vm_is_stack(task, vma, is_pid);
1417		if (tid != 0) {
1418			/*
1419			 * Thread stack in /proc/PID/task/TID/maps or
1420			 * the main process stack.
1421			 */
1422			if (!is_pid || (vma->vm_start <= mm->start_stack &&
1423			    vma->vm_end >= mm->start_stack))
1424				seq_printf(m, " stack");
1425			else
1426				seq_printf(m, " stack:%d", tid);
1427		}
1428	}
1429
1430	if (is_vm_hugetlb_page(vma))
1431		seq_printf(m, " huge");
1432
1433	walk_page_range(vma->vm_start, vma->vm_end, &walk);
 
1434
1435	if (!md->pages)
1436		goto out;
1437
1438	if (md->anon)
1439		seq_printf(m, " anon=%lu", md->anon);
1440
1441	if (md->dirty)
1442		seq_printf(m, " dirty=%lu", md->dirty);
1443
1444	if (md->pages != md->anon && md->pages != md->dirty)
1445		seq_printf(m, " mapped=%lu", md->pages);
1446
1447	if (md->mapcount_max > 1)
1448		seq_printf(m, " mapmax=%lu", md->mapcount_max);
1449
1450	if (md->swapcache)
1451		seq_printf(m, " swapcache=%lu", md->swapcache);
1452
1453	if (md->active < md->pages && !is_vm_hugetlb_page(vma))
1454		seq_printf(m, " active=%lu", md->active);
1455
1456	if (md->writeback)
1457		seq_printf(m, " writeback=%lu", md->writeback);
1458
1459	for_each_node_state(nid, N_MEMORY)
1460		if (md->node[nid])
1461			seq_printf(m, " N%d=%lu", nid, md->node[nid]);
 
 
1462out:
1463	seq_putc(m, '\n');
1464
1465	if (m->count < m->size)
1466		m->version = (vma != proc_priv->tail_vma) ? vma->vm_start : 0;
1467	return 0;
1468}
1469
1470static int show_pid_numa_map(struct seq_file *m, void *v)
1471{
1472	return show_numa_map(m, v, 1);
1473}
1474
1475static int show_tid_numa_map(struct seq_file *m, void *v)
1476{
1477	return show_numa_map(m, v, 0);
1478}
1479
1480static const struct seq_operations proc_pid_numa_maps_op = {
1481	.start  = m_start,
1482	.next   = m_next,
1483	.stop   = m_stop,
1484	.show   = show_pid_numa_map,
1485};
1486
1487static const struct seq_operations proc_tid_numa_maps_op = {
1488	.start  = m_start,
1489	.next   = m_next,
1490	.stop   = m_stop,
1491	.show   = show_tid_numa_map,
1492};
1493
1494static int numa_maps_open(struct inode *inode, struct file *file,
1495			  const struct seq_operations *ops)
1496{
1497	struct numa_maps_private *priv;
1498	int ret = -ENOMEM;
1499	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1500	if (priv) {
1501		priv->proc_maps.pid = proc_pid(inode);
1502		ret = seq_open(file, ops);
1503		if (!ret) {
1504			struct seq_file *m = file->private_data;
1505			m->private = priv;
1506		} else {
1507			kfree(priv);
1508		}
1509	}
1510	return ret;
1511}
1512
1513static int pid_numa_maps_open(struct inode *inode, struct file *file)
1514{
1515	return numa_maps_open(inode, file, &proc_pid_numa_maps_op);
1516}
1517
1518static int tid_numa_maps_open(struct inode *inode, struct file *file)
1519{
1520	return numa_maps_open(inode, file, &proc_tid_numa_maps_op);
1521}
1522
1523const struct file_operations proc_pid_numa_maps_operations = {
1524	.open		= pid_numa_maps_open,
1525	.read		= seq_read,
1526	.llseek		= seq_lseek,
1527	.release	= seq_release_private,
1528};
1529
1530const struct file_operations proc_tid_numa_maps_operations = {
1531	.open		= tid_numa_maps_open,
1532	.read		= seq_read,
1533	.llseek		= seq_lseek,
1534	.release	= seq_release_private,
1535};
1536#endif /* CONFIG_NUMA */