Linux Audio

Check our new training course

Loading...
v3.1
   1#include <linux/mm.h>
 
 
   2#include <linux/hugetlb.h>
   3#include <linux/huge_mm.h>
   4#include <linux/mount.h>
   5#include <linux/seq_file.h>
   6#include <linux/highmem.h>
   7#include <linux/ptrace.h>
   8#include <linux/slab.h>
   9#include <linux/pagemap.h>
  10#include <linux/mempolicy.h>
  11#include <linux/rmap.h>
  12#include <linux/swap.h>
 
  13#include <linux/swapops.h>
 
 
 
 
 
  14
  15#include <asm/elf.h>
  16#include <asm/uaccess.h>
  17#include <asm/tlbflush.h>
  18#include "internal.h"
  19
 
 
  20void task_mem(struct seq_file *m, struct mm_struct *mm)
  21{
  22	unsigned long data, text, lib, swap;
  23	unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;
  24
 
 
 
 
  25	/*
  26	 * Note: to minimize their overhead, mm maintains hiwater_vm and
  27	 * hiwater_rss only when about to *lower* total_vm or rss.  Any
  28	 * collector of these hiwater stats must therefore get total_vm
  29	 * and rss too, which will usually be the higher.  Barriers? not
  30	 * worth the effort, such snapshots can always be inconsistent.
  31	 */
  32	hiwater_vm = total_vm = mm->total_vm;
  33	if (hiwater_vm < mm->hiwater_vm)
  34		hiwater_vm = mm->hiwater_vm;
  35	hiwater_rss = total_rss = get_mm_rss(mm);
  36	if (hiwater_rss < mm->hiwater_rss)
  37		hiwater_rss = mm->hiwater_rss;
  38
  39	data = mm->total_vm - mm->shared_vm - mm->stack_vm;
  40	text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10;
  41	lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text;
 
 
  42	swap = get_mm_counter(mm, MM_SWAPENTS);
  43	seq_printf(m,
  44		"VmPeak:\t%8lu kB\n"
  45		"VmSize:\t%8lu kB\n"
  46		"VmLck:\t%8lu kB\n"
  47		"VmHWM:\t%8lu kB\n"
  48		"VmRSS:\t%8lu kB\n"
  49		"VmData:\t%8lu kB\n"
  50		"VmStk:\t%8lu kB\n"
  51		"VmExe:\t%8lu kB\n"
  52		"VmLib:\t%8lu kB\n"
  53		"VmPTE:\t%8lu kB\n"
  54		"VmSwap:\t%8lu kB\n",
  55		hiwater_vm << (PAGE_SHIFT-10),
  56		(total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
  57		mm->locked_vm << (PAGE_SHIFT-10),
  58		hiwater_rss << (PAGE_SHIFT-10),
  59		total_rss << (PAGE_SHIFT-10),
  60		data << (PAGE_SHIFT-10),
  61		mm->stack_vm << (PAGE_SHIFT-10), text, lib,
  62		(PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
  63		swap << (PAGE_SHIFT-10));
  64}
 
  65
  66unsigned long task_vsize(struct mm_struct *mm)
  67{
  68	return PAGE_SIZE * mm->total_vm;
  69}
  70
  71unsigned long task_statm(struct mm_struct *mm,
  72			 unsigned long *shared, unsigned long *text,
  73			 unsigned long *data, unsigned long *resident)
  74{
  75	*shared = get_mm_counter(mm, MM_FILEPAGES);
 
  76	*text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
  77								>> PAGE_SHIFT;
  78	*data = mm->total_vm - mm->shared_vm;
  79	*resident = *shared + get_mm_counter(mm, MM_ANONPAGES);
  80	return mm->total_vm;
  81}
  82
  83static void pad_len_spaces(struct seq_file *m, int len)
 
 
 
 
  84{
  85	len = 25 + sizeof(void*) * 6 - len;
  86	if (len < 1)
  87		len = 1;
  88	seq_printf(m, "%*c", len, ' ');
  89}
  90
  91static void vma_stop(struct proc_maps_private *priv, struct vm_area_struct *vma)
 
 
 
 
 
 
 
 
 
 
 
 
 
  92{
  93	if (vma && vma != priv->tail_vma) {
  94		struct mm_struct *mm = vma->vm_mm;
  95		up_read(&mm->mmap_sem);
  96		mmput(mm);
  97	}
  98}
 
  99
 100static void *m_start(struct seq_file *m, loff_t *pos)
 101{
 102	struct proc_maps_private *priv = m->private;
 103	unsigned long last_addr = m->version;
 104	struct mm_struct *mm;
 105	struct vm_area_struct *vma, *tail_vma = NULL;
 106	loff_t l = *pos;
 107
 108	/* Clear the per syscall fields in priv */
 109	priv->task = NULL;
 110	priv->tail_vma = NULL;
 111
 112	/*
 113	 * We remember last_addr rather than next_addr to hit with
 114	 * mmap_cache most of the time. We have zero last_addr at
 115	 * the beginning and also after lseek. We will have -1 last_addr
 116	 * after the end of the vmas.
 117	 */
 118
 
 119	if (last_addr == -1UL)
 120		return NULL;
 121
 122	priv->task = get_pid_task(priv->pid, PIDTYPE_PID);
 123	if (!priv->task)
 124		return ERR_PTR(-ESRCH);
 125
 126	mm = mm_for_maps(priv->task);
 127	if (!mm || IS_ERR(mm))
 128		return mm;
 129	down_read(&mm->mmap_sem);
 130
 131	tail_vma = get_gate_vma(priv->task->mm);
 132	priv->tail_vma = tail_vma;
 133
 134	/* Start with last addr hint */
 135	vma = find_vma(mm, last_addr);
 136	if (last_addr && vma) {
 137		vma = vma->vm_next;
 138		goto out;
 139	}
 140
 141	/*
 142	 * Check the vma index is within the range and do
 143	 * sequential scan until m_index.
 144	 */
 145	vma = NULL;
 146	if ((unsigned long)l < mm->map_count) {
 147		vma = mm->mmap;
 148		while (l-- && vma)
 149			vma = vma->vm_next;
 150		goto out;
 151	}
 152
 153	if (l != mm->map_count)
 154		tail_vma = NULL; /* After gate vma */
 155
 156out:
 157	if (vma)
 158		return vma;
 159
 160	/* End of vmas has been reached */
 161	m->version = (tail_vma != NULL)? 0: -1UL;
 162	up_read(&mm->mmap_sem);
 163	mmput(mm);
 164	return tail_vma;
 165}
 166
 167static void *m_next(struct seq_file *m, void *v, loff_t *pos)
 168{
 169	struct proc_maps_private *priv = m->private;
 170	struct vm_area_struct *vma = v;
 171	struct vm_area_struct *tail_vma = priv->tail_vma;
 172
 173	(*pos)++;
 174	if (vma && (vma != tail_vma) && vma->vm_next)
 175		return vma->vm_next;
 176	vma_stop(priv, vma);
 177	return (vma != tail_vma)? tail_vma: NULL;
 
 
 
 
 
 178}
 179
 180static void m_stop(struct seq_file *m, void *v)
 181{
 182	struct proc_maps_private *priv = m->private;
 183	struct vm_area_struct *vma = v;
 184
 185	if (!IS_ERR(vma))
 186		vma_stop(priv, vma);
 187	if (priv->task)
 188		put_task_struct(priv->task);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 189}
 190
 191static int do_maps_open(struct inode *inode, struct file *file,
 192			const struct seq_operations *ops)
 193{
 194	struct proc_maps_private *priv;
 195	int ret = -ENOMEM;
 196	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
 197	if (priv) {
 198		priv->pid = proc_pid(inode);
 199		ret = seq_open(file, ops);
 200		if (!ret) {
 201			struct seq_file *m = file->private_data;
 202			m->private = priv;
 203		} else {
 204			kfree(priv);
 205		}
 206	}
 207	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 208}
 209
 210static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
 
 211{
 212	struct mm_struct *mm = vma->vm_mm;
 213	struct file *file = vma->vm_file;
 214	vm_flags_t flags = vma->vm_flags;
 215	unsigned long ino = 0;
 216	unsigned long long pgoff = 0;
 217	unsigned long start, end;
 218	dev_t dev = 0;
 219	int len;
 220
 221	if (file) {
 222		struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
 223		dev = inode->i_sb->s_dev;
 224		ino = inode->i_ino;
 225		pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
 226	}
 227
 228	/* We don't show the stack guard page in /proc/maps */
 229	start = vma->vm_start;
 230	if (stack_guard_page_start(vma, start))
 231		start += PAGE_SIZE;
 232	end = vma->vm_end;
 233	if (stack_guard_page_end(vma, end))
 234		end -= PAGE_SIZE;
 235
 236	seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
 237			start,
 238			end,
 239			flags & VM_READ ? 'r' : '-',
 240			flags & VM_WRITE ? 'w' : '-',
 241			flags & VM_EXEC ? 'x' : '-',
 242			flags & VM_MAYSHARE ? 's' : 'p',
 243			pgoff,
 244			MAJOR(dev), MINOR(dev), ino, &len);
 245
 246	/*
 247	 * Print the dentry name for named mappings, and a
 248	 * special [heap] marker for the heap:
 249	 */
 250	if (file) {
 251		pad_len_spaces(m, len);
 252		seq_path(m, &file->f_path, "\n");
 253	} else {
 254		const char *name = arch_vma_name(vma);
 255		if (!name) {
 256			if (mm) {
 257				if (vma->vm_start <= mm->brk &&
 258						vma->vm_end >= mm->start_brk) {
 259					name = "[heap]";
 260				} else if (vma->vm_start <= mm->start_stack &&
 261					   vma->vm_end >= mm->start_stack) {
 262					name = "[stack]";
 263				}
 264			} else {
 265				name = "[vdso]";
 266			}
 267		}
 268		if (name) {
 269			pad_len_spaces(m, len);
 270			seq_puts(m, name);
 
 
 271		}
 
 
 
 
 
 
 
 
 
 272	}
 273	seq_putc(m, '\n');
 274}
 275
 276static int show_map(struct seq_file *m, void *v)
 277{
 278	struct vm_area_struct *vma = v;
 279	struct proc_maps_private *priv = m->private;
 280	struct task_struct *task = priv->task;
 281
 282	show_map_vma(m, vma);
 283
 284	if (m->count < m->size)  /* vma is copied successfully */
 285		m->version = (vma != get_gate_vma(task->mm))
 286			? vma->vm_start : 0;
 287	return 0;
 288}
 289
 290static const struct seq_operations proc_pid_maps_op = {
 291	.start	= m_start,
 292	.next	= m_next,
 293	.stop	= m_stop,
 294	.show	= show_map
 295};
 296
 297static int maps_open(struct inode *inode, struct file *file)
 298{
 299	return do_maps_open(inode, file, &proc_pid_maps_op);
 300}
 301
 302const struct file_operations proc_maps_operations = {
 303	.open		= maps_open,
 304	.read		= seq_read,
 305	.llseek		= seq_lseek,
 306	.release	= seq_release_private,
 307};
 308
 309/*
 310 * Proportional Set Size(PSS): my share of RSS.
 311 *
 312 * PSS of a process is the count of pages it has in memory, where each
 313 * page is divided by the number of processes sharing it.  So if a
 314 * process has 1000 pages all to itself, and 1000 shared with one other
 315 * process, its PSS will be 1500.
 316 *
 317 * To keep (accumulated) division errors low, we adopt a 64bit
 318 * fixed-point pss counter to minimize division errors. So (pss >>
 319 * PSS_SHIFT) would be the real byte count.
 320 *
 321 * A shift of 12 before division means (assuming 4K page size):
 322 * 	- 1M 3-user-pages add up to 8KB errors;
 323 * 	- supports mapcount up to 2^24, or 16M;
 324 * 	- supports PSS up to 2^52 bytes, or 4PB.
 325 */
 326#define PSS_SHIFT 12
 327
 328#ifdef CONFIG_PROC_PAGE_MONITOR
 329struct mem_size_stats {
 330	struct vm_area_struct *vma;
 331	unsigned long resident;
 332	unsigned long shared_clean;
 333	unsigned long shared_dirty;
 334	unsigned long private_clean;
 335	unsigned long private_dirty;
 336	unsigned long referenced;
 337	unsigned long anonymous;
 
 338	unsigned long anonymous_thp;
 
 
 339	unsigned long swap;
 
 
 340	u64 pss;
 
 
 
 
 
 
 341};
 342
 
 
 
 
 
 343
 344static void smaps_pte_entry(pte_t ptent, unsigned long addr,
 345		unsigned long ptent_size, struct mm_walk *walk)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 346{
 347	struct mem_size_stats *mss = walk->private;
 348	struct vm_area_struct *vma = mss->vma;
 349	struct page *page;
 350	int mapcount;
 351
 352	if (is_swap_pte(ptent)) {
 353		mss->swap += ptent_size;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 354		return;
 355	}
 
 
 
 
 
 
 
 
 
 356
 357	if (!pte_present(ptent))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 358		return;
 
 359
 360	page = vm_normal_page(vma, addr, ptent);
 361	if (!page)
 362		return;
 363
 364	if (PageAnon(page))
 365		mss->anonymous += ptent_size;
 366
 367	mss->resident += ptent_size;
 368	/* Accumulate the size in pages that have been accessed. */
 369	if (pte_young(ptent) || PageReferenced(page))
 370		mss->referenced += ptent_size;
 371	mapcount = page_mapcount(page);
 372	if (mapcount >= 2) {
 373		if (pte_dirty(ptent) || PageDirty(page))
 374			mss->shared_dirty += ptent_size;
 375		else
 376			mss->shared_clean += ptent_size;
 377		mss->pss += (ptent_size << PSS_SHIFT) / mapcount;
 378	} else {
 379		if (pte_dirty(ptent) || PageDirty(page))
 380			mss->private_dirty += ptent_size;
 381		else
 382			mss->private_clean += ptent_size;
 383		mss->pss += (ptent_size << PSS_SHIFT);
 384	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 385}
 
 386
 387static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
 388			   struct mm_walk *walk)
 389{
 390	struct mem_size_stats *mss = walk->private;
 391	struct vm_area_struct *vma = mss->vma;
 392	pte_t *pte;
 393	spinlock_t *ptl;
 394
 395	spin_lock(&walk->mm->page_table_lock);
 396	if (pmd_trans_huge(*pmd)) {
 397		if (pmd_trans_splitting(*pmd)) {
 398			spin_unlock(&walk->mm->page_table_lock);
 399			wait_split_huge_page(vma->anon_vma, pmd);
 400		} else {
 401			smaps_pte_entry(*(pte_t *)pmd, addr,
 402					HPAGE_PMD_SIZE, walk);
 403			spin_unlock(&walk->mm->page_table_lock);
 404			mss->anonymous_thp += HPAGE_PMD_SIZE;
 405			return 0;
 406		}
 407	} else {
 408		spin_unlock(&walk->mm->page_table_lock);
 409	}
 
 
 
 410	/*
 411	 * The mmap_sem held all the way back in m_start() is what
 412	 * keeps khugepaged out of here and from collapsing things
 413	 * in here.
 414	 */
 415	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
 416	for (; addr != end; pte++, addr += PAGE_SIZE)
 417		smaps_pte_entry(*pte, addr, PAGE_SIZE, walk);
 418	pte_unmap_unlock(pte - 1, ptl);
 
 419	cond_resched();
 420	return 0;
 421}
 422
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 423static int show_smap(struct seq_file *m, void *v)
 424{
 425	struct proc_maps_private *priv = m->private;
 426	struct task_struct *task = priv->task;
 427	struct vm_area_struct *vma = v;
 428	struct mem_size_stats mss;
 429	struct mm_walk smaps_walk = {
 430		.pmd_entry = smaps_pte_range,
 431		.mm = vma->vm_mm,
 432		.private = &mss,
 433	};
 434
 435	memset(&mss, 0, sizeof mss);
 436	mss.vma = vma;
 437	/* mmap_sem is held in m_start */
 438	if (vma->vm_mm && !is_vm_hugetlb_page(vma))
 439		walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
 440
 441	show_map_vma(m, vma);
 442
 443	seq_printf(m,
 444		   "Size:           %8lu kB\n"
 445		   "Rss:            %8lu kB\n"
 446		   "Pss:            %8lu kB\n"
 447		   "Shared_Clean:   %8lu kB\n"
 448		   "Shared_Dirty:   %8lu kB\n"
 449		   "Private_Clean:  %8lu kB\n"
 450		   "Private_Dirty:  %8lu kB\n"
 451		   "Referenced:     %8lu kB\n"
 452		   "Anonymous:      %8lu kB\n"
 453		   "AnonHugePages:  %8lu kB\n"
 454		   "Swap:           %8lu kB\n"
 455		   "KernelPageSize: %8lu kB\n"
 456		   "MMUPageSize:    %8lu kB\n"
 457		   "Locked:         %8lu kB\n",
 458		   (vma->vm_end - vma->vm_start) >> 10,
 459		   mss.resident >> 10,
 460		   (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
 461		   mss.shared_clean  >> 10,
 462		   mss.shared_dirty  >> 10,
 463		   mss.private_clean >> 10,
 464		   mss.private_dirty >> 10,
 465		   mss.referenced >> 10,
 466		   mss.anonymous >> 10,
 467		   mss.anonymous_thp >> 10,
 468		   mss.swap >> 10,
 469		   vma_kernel_pagesize(vma) >> 10,
 470		   vma_mmu_pagesize(vma) >> 10,
 471		   (vma->vm_flags & VM_LOCKED) ?
 472			(unsigned long)(mss.pss >> (10 + PSS_SHIFT)) : 0);
 473
 474	if (m->count < m->size)  /* vma is copied successfully */
 475		m->version = (vma != get_gate_vma(task->mm))
 476			? vma->vm_start : 0;
 477	return 0;
 478}
 479
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 480static const struct seq_operations proc_pid_smaps_op = {
 481	.start	= m_start,
 482	.next	= m_next,
 483	.stop	= m_stop,
 484	.show	= show_smap
 485};
 486
 487static int smaps_open(struct inode *inode, struct file *file)
 488{
 489	return do_maps_open(inode, file, &proc_pid_smaps_op);
 490}
 491
 492const struct file_operations proc_smaps_operations = {
 493	.open		= smaps_open,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 494	.read		= seq_read,
 495	.llseek		= seq_lseek,
 496	.release	= seq_release_private,
 
 
 
 
 
 
 
 
 
 497};
 498
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 499static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
 500				unsigned long end, struct mm_walk *walk)
 501{
 502	struct vm_area_struct *vma = walk->private;
 
 503	pte_t *pte, ptent;
 504	spinlock_t *ptl;
 505	struct page *page;
 506
 507	split_huge_page_pmd(walk->mm, pmd);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 508
 509	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
 510	for (; addr != end; pte++, addr += PAGE_SIZE) {
 511		ptent = *pte;
 
 
 
 
 
 
 512		if (!pte_present(ptent))
 513			continue;
 514
 515		page = vm_normal_page(vma, addr, ptent);
 516		if (!page)
 517			continue;
 518
 519		/* Clear accessed and referenced bits. */
 520		ptep_test_and_clear_young(vma, addr, pte);
 
 521		ClearPageReferenced(page);
 522	}
 523	pte_unmap_unlock(pte - 1, ptl);
 524	cond_resched();
 525	return 0;
 526}
 527
 528#define CLEAR_REFS_ALL 1
 529#define CLEAR_REFS_ANON 2
 530#define CLEAR_REFS_MAPPED 3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 531
 532static ssize_t clear_refs_write(struct file *file, const char __user *buf,
 533				size_t count, loff_t *ppos)
 534{
 535	struct task_struct *task;
 536	char buffer[PROC_NUMBUF];
 537	struct mm_struct *mm;
 538	struct vm_area_struct *vma;
 539	int type;
 
 540	int rv;
 541
 542	memset(buffer, 0, sizeof(buffer));
 543	if (count > sizeof(buffer) - 1)
 544		count = sizeof(buffer) - 1;
 545	if (copy_from_user(buffer, buf, count))
 546		return -EFAULT;
 547	rv = kstrtoint(strstrip(buffer), 10, &type);
 548	if (rv < 0)
 549		return rv;
 550	if (type < CLEAR_REFS_ALL || type > CLEAR_REFS_MAPPED)
 
 551		return -EINVAL;
 552	task = get_proc_task(file->f_path.dentry->d_inode);
 
 553	if (!task)
 554		return -ESRCH;
 555	mm = get_task_mm(task);
 556	if (mm) {
 557		struct mm_walk clear_refs_walk = {
 558			.pmd_entry = clear_refs_pte_range,
 559			.mm = mm,
 560		};
 561		down_read(&mm->mmap_sem);
 562		for (vma = mm->mmap; vma; vma = vma->vm_next) {
 563			clear_refs_walk.private = vma;
 564			if (is_vm_hugetlb_page(vma))
 565				continue;
 
 566			/*
 567			 * Writing 1 to /proc/pid/clear_refs affects all pages.
 568			 *
 569			 * Writing 2 to /proc/pid/clear_refs only affects
 570			 * Anonymous pages.
 571			 *
 572			 * Writing 3 to /proc/pid/clear_refs only affects file
 573			 * mapped pages.
 574			 */
 575			if (type == CLEAR_REFS_ANON && vma->vm_file)
 576				continue;
 577			if (type == CLEAR_REFS_MAPPED && !vma->vm_file)
 578				continue;
 579			walk_page_range(vma->vm_start, vma->vm_end,
 580					&clear_refs_walk);
 581		}
 582		flush_tlb_mm(mm);
 583		up_read(&mm->mmap_sem);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 584		mmput(mm);
 585	}
 586	put_task_struct(task);
 587
 588	return count;
 589}
 590
 591const struct file_operations proc_clear_refs_operations = {
 592	.write		= clear_refs_write,
 593	.llseek		= noop_llseek,
 594};
 595
 
 
 
 
 596struct pagemapread {
 597	int pos, len;
 598	u64 *buffer;
 
 599};
 600
 601#define PM_ENTRY_BYTES      sizeof(u64)
 602#define PM_STATUS_BITS      3
 603#define PM_STATUS_OFFSET    (64 - PM_STATUS_BITS)
 604#define PM_STATUS_MASK      (((1LL << PM_STATUS_BITS) - 1) << PM_STATUS_OFFSET)
 605#define PM_STATUS(nr)       (((nr) << PM_STATUS_OFFSET) & PM_STATUS_MASK)
 606#define PM_PSHIFT_BITS      6
 607#define PM_PSHIFT_OFFSET    (PM_STATUS_OFFSET - PM_PSHIFT_BITS)
 608#define PM_PSHIFT_MASK      (((1LL << PM_PSHIFT_BITS) - 1) << PM_PSHIFT_OFFSET)
 609#define PM_PSHIFT(x)        (((u64) (x) << PM_PSHIFT_OFFSET) & PM_PSHIFT_MASK)
 610#define PM_PFRAME_MASK      ((1LL << PM_PSHIFT_OFFSET) - 1)
 611#define PM_PFRAME(x)        ((x) & PM_PFRAME_MASK)
 612
 613#define PM_PRESENT          PM_STATUS(4LL)
 614#define PM_SWAP             PM_STATUS(2LL)
 615#define PM_NOT_PRESENT      PM_PSHIFT(PAGE_SHIFT)
 616#define PM_END_OF_BUFFER    1
 617
 618static int add_to_pagemap(unsigned long addr, u64 pfn,
 
 
 
 
 
 619			  struct pagemapread *pm)
 620{
 621	pm->buffer[pm->pos++] = pfn;
 622	if (pm->pos >= pm->len)
 623		return PM_END_OF_BUFFER;
 624	return 0;
 625}
 626
 627static int pagemap_pte_hole(unsigned long start, unsigned long end,
 628				struct mm_walk *walk)
 629{
 630	struct pagemapread *pm = walk->private;
 631	unsigned long addr;
 632	int err = 0;
 633	for (addr = start; addr < end; addr += PAGE_SIZE) {
 634		err = add_to_pagemap(addr, PM_NOT_PRESENT, pm);
 635		if (err)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 636			break;
 
 
 
 
 
 
 
 
 
 637	}
 
 638	return err;
 639}
 640
 641static u64 swap_pte_to_pagemap_entry(pte_t pte)
 
 642{
 643	swp_entry_t e = pte_to_swp_entry(pte);
 644	return swp_type(e) | (swp_offset(e) << MAX_SWAPFILES_SHIFT);
 645}
 646
 647static u64 pte_to_pagemap_entry(pte_t pte)
 648{
 649	u64 pme = 0;
 650	if (is_swap_pte(pte))
 651		pme = PM_PFRAME(swap_pte_to_pagemap_entry(pte))
 652			| PM_PSHIFT(PAGE_SHIFT) | PM_SWAP;
 653	else if (pte_present(pte))
 654		pme = PM_PFRAME(pte_pfn(pte))
 655			| PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT;
 656	return pme;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 657}
 658
 659static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
 660			     struct mm_walk *walk)
 661{
 662	struct vm_area_struct *vma;
 663	struct pagemapread *pm = walk->private;
 664	pte_t *pte;
 
 665	int err = 0;
 666
 667	split_huge_page_pmd(walk->mm, pmd);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 668
 669	/* find the first VMA at or above 'addr' */
 670	vma = find_vma(walk->mm, addr);
 671	for (; addr != end; addr += PAGE_SIZE) {
 672		u64 pfn = PM_NOT_PRESENT;
 673
 674		/* check to see if we've left 'vma' behind
 675		 * and need a new, higher one */
 676		if (vma && (addr >= vma->vm_end))
 677			vma = find_vma(walk->mm, addr);
 678
 679		/* check that 'vma' actually covers this address,
 680		 * and that it isn't a huge page vma */
 681		if (vma && (vma->vm_start <= addr) &&
 682		    !is_vm_hugetlb_page(vma)) {
 683			pte = pte_offset_map(pmd, addr);
 684			pfn = pte_to_pagemap_entry(*pte);
 685			/* unmap before userspace copy */
 686			pte_unmap(pte);
 687		}
 688		err = add_to_pagemap(addr, pfn, pm);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 689		if (err)
 690			return err;
 691	}
 
 692
 693	cond_resched();
 694
 695	return err;
 696}
 697
 698#ifdef CONFIG_HUGETLB_PAGE
 699static u64 huge_pte_to_pagemap_entry(pte_t pte, int offset)
 700{
 701	u64 pme = 0;
 702	if (pte_present(pte))
 703		pme = PM_PFRAME(pte_pfn(pte) + offset)
 704			| PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT;
 705	return pme;
 706}
 707
 708/* This function walks within one hugetlb entry in the single call */
 709static int pagemap_hugetlb_range(pte_t *pte, unsigned long hmask,
 710				 unsigned long addr, unsigned long end,
 711				 struct mm_walk *walk)
 712{
 713	struct pagemapread *pm = walk->private;
 
 
 714	int err = 0;
 715	u64 pfn;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 716
 717	for (; addr != end; addr += PAGE_SIZE) {
 718		int offset = (addr & ~hmask) >> PAGE_SHIFT;
 719		pfn = huge_pte_to_pagemap_entry(*pte, offset);
 720		err = add_to_pagemap(addr, pfn, pm);
 721		if (err)
 722			return err;
 
 
 723	}
 724
 725	cond_resched();
 726
 727	return err;
 728}
 
 
 729#endif /* HUGETLB_PAGE */
 730
 
 
 
 
 
 
 731/*
 732 * /proc/pid/pagemap - an array mapping virtual pages to pfns
 733 *
 734 * For each page in the address space, this file contains one 64-bit entry
 735 * consisting of the following:
 736 *
 737 * Bits 0-55  page frame number (PFN) if present
 738 * Bits 0-4   swap type if swapped
 739 * Bits 5-55  swap offset if swapped
 740 * Bits 55-60 page shift (page size = 1<<page shift)
 741 * Bit  61    reserved for future use
 
 
 742 * Bit  62    page swapped
 743 * Bit  63    page present
 744 *
 745 * If the page is not present but in swap, then the PFN contains an
 746 * encoding of the swap file number and the page's offset into the
 747 * swap. Unmapped pages return a null PFN. This allows determining
 748 * precisely which pages are mapped (or in swap) and comparing mapped
 749 * pages between processes.
 750 *
 751 * Efficient users of this interface will use /proc/pid/maps to
 752 * determine which areas of memory are actually mapped and llseek to
 753 * skip over unmapped regions.
 754 */
 755#define PAGEMAP_WALK_SIZE	(PMD_SIZE)
 756#define PAGEMAP_WALK_MASK	(PMD_MASK)
 757static ssize_t pagemap_read(struct file *file, char __user *buf,
 758			    size_t count, loff_t *ppos)
 759{
 760	struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
 761	struct mm_struct *mm;
 762	struct pagemapread pm;
 763	int ret = -ESRCH;
 764	struct mm_walk pagemap_walk = {};
 765	unsigned long src;
 766	unsigned long svpfn;
 767	unsigned long start_vaddr;
 768	unsigned long end_vaddr;
 769	int copied = 0;
 770
 771	if (!task)
 772		goto out;
 773
 774	ret = -EINVAL;
 775	/* file position must be aligned */
 776	if ((*ppos % PM_ENTRY_BYTES) || (count % PM_ENTRY_BYTES))
 777		goto out_task;
 778
 779	ret = 0;
 780	if (!count)
 781		goto out_task;
 
 
 
 782
 783	pm.len = PM_ENTRY_BYTES * (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
 784	pm.buffer = kmalloc(pm.len, GFP_TEMPORARY);
 785	ret = -ENOMEM;
 786	if (!pm.buffer)
 787		goto out_task;
 788
 789	mm = mm_for_maps(task);
 790	ret = PTR_ERR(mm);
 791	if (!mm || IS_ERR(mm))
 792		goto out_free;
 793
 794	pagemap_walk.pmd_entry = pagemap_pte_range;
 795	pagemap_walk.pte_hole = pagemap_pte_hole;
 796#ifdef CONFIG_HUGETLB_PAGE
 797	pagemap_walk.hugetlb_entry = pagemap_hugetlb_range;
 798#endif
 799	pagemap_walk.mm = mm;
 800	pagemap_walk.private = &pm;
 801
 802	src = *ppos;
 803	svpfn = src / PM_ENTRY_BYTES;
 804	start_vaddr = svpfn << PAGE_SHIFT;
 805	end_vaddr = TASK_SIZE_OF(task);
 806
 807	/* watch out for wraparound */
 808	if (svpfn > TASK_SIZE_OF(task) >> PAGE_SHIFT)
 
 
 
 
 
 809		start_vaddr = end_vaddr;
 810
 811	/*
 812	 * The odds are that this will stop walking way
 813	 * before end_vaddr, because the length of the
 814	 * user buffer is tracked in "pm", and the walk
 815	 * will stop when we hit the end of the buffer.
 816	 */
 817	ret = 0;
 818	while (count && (start_vaddr < end_vaddr)) {
 819		int len;
 820		unsigned long end;
 821
 822		pm.pos = 0;
 823		end = (start_vaddr + PAGEMAP_WALK_SIZE) & PAGEMAP_WALK_MASK;
 824		/* overflow ? */
 825		if (end < start_vaddr || end > end_vaddr)
 826			end = end_vaddr;
 827		down_read(&mm->mmap_sem);
 828		ret = walk_page_range(start_vaddr, end, &pagemap_walk);
 829		up_read(&mm->mmap_sem);
 
 
 830		start_vaddr = end;
 831
 832		len = min(count, PM_ENTRY_BYTES * pm.pos);
 833		if (copy_to_user(buf, pm.buffer, len)) {
 834			ret = -EFAULT;
 835			goto out_mm;
 836		}
 837		copied += len;
 838		buf += len;
 839		count -= len;
 840	}
 841	*ppos += copied;
 842	if (!ret || ret == PM_END_OF_BUFFER)
 843		ret = copied;
 844
 845out_mm:
 846	mmput(mm);
 847out_free:
 848	kfree(pm.buffer);
 849out_task:
 850	put_task_struct(task);
 851out:
 852	return ret;
 853}
 854
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 855const struct file_operations proc_pagemap_operations = {
 856	.llseek		= mem_lseek, /* borrow this */
 857	.read		= pagemap_read,
 
 
 858};
 859#endif /* CONFIG_PROC_PAGE_MONITOR */
 860
 861#ifdef CONFIG_NUMA
 862
 863struct numa_maps {
 864	struct vm_area_struct *vma;
 865	unsigned long pages;
 866	unsigned long anon;
 867	unsigned long active;
 868	unsigned long writeback;
 869	unsigned long mapcount_max;
 870	unsigned long dirty;
 871	unsigned long swapcache;
 872	unsigned long node[MAX_NUMNODES];
 873};
 874
 875struct numa_maps_private {
 876	struct proc_maps_private proc_maps;
 877	struct numa_maps md;
 878};
 879
 880static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty,
 881			unsigned long nr_pages)
 882{
 883	int count = page_mapcount(page);
 884
 885	md->pages += nr_pages;
 886	if (pte_dirty || PageDirty(page))
 887		md->dirty += nr_pages;
 888
 889	if (PageSwapCache(page))
 890		md->swapcache += nr_pages;
 891
 892	if (PageActive(page) || PageUnevictable(page))
 893		md->active += nr_pages;
 894
 895	if (PageWriteback(page))
 896		md->writeback += nr_pages;
 897
 898	if (PageAnon(page))
 899		md->anon += nr_pages;
 900
 901	if (count > md->mapcount_max)
 902		md->mapcount_max = count;
 903
 904	md->node[page_to_nid(page)] += nr_pages;
 905}
 906
 907static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma,
 908		unsigned long addr)
 909{
 910	struct page *page;
 911	int nid;
 912
 913	if (!pte_present(pte))
 914		return NULL;
 915
 916	page = vm_normal_page(vma, addr, pte);
 917	if (!page)
 918		return NULL;
 919
 920	if (PageReserved(page))
 921		return NULL;
 922
 923	nid = page_to_nid(page);
 924	if (!node_isset(nid, node_states[N_HIGH_MEMORY]))
 925		return NULL;
 926
 927	return page;
 928}
 929
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 930static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
 931		unsigned long end, struct mm_walk *walk)
 932{
 933	struct numa_maps *md;
 
 934	spinlock_t *ptl;
 935	pte_t *orig_pte;
 936	pte_t *pte;
 937
 938	md = walk->private;
 939	spin_lock(&walk->mm->page_table_lock);
 940	if (pmd_trans_huge(*pmd)) {
 941		if (pmd_trans_splitting(*pmd)) {
 942			spin_unlock(&walk->mm->page_table_lock);
 943			wait_split_huge_page(md->vma->anon_vma, pmd);
 944		} else {
 945			pte_t huge_pte = *(pte_t *)pmd;
 946			struct page *page;
 947
 948			page = can_gather_numa_stats(huge_pte, md->vma, addr);
 949			if (page)
 950				gather_stats(page, md, pte_dirty(huge_pte),
 951						HPAGE_PMD_SIZE/PAGE_SIZE);
 952			spin_unlock(&walk->mm->page_table_lock);
 953			return 0;
 954		}
 955	} else {
 956		spin_unlock(&walk->mm->page_table_lock);
 957	}
 958
 
 
 
 959	orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
 960	do {
 961		struct page *page = can_gather_numa_stats(*pte, md->vma, addr);
 962		if (!page)
 963			continue;
 964		gather_stats(page, md, pte_dirty(*pte), 1);
 965
 966	} while (pte++, addr += PAGE_SIZE, addr != end);
 967	pte_unmap_unlock(orig_pte, ptl);
 
 968	return 0;
 969}
 970#ifdef CONFIG_HUGETLB_PAGE
 971static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask,
 972		unsigned long addr, unsigned long end, struct mm_walk *walk)
 973{
 
 974	struct numa_maps *md;
 975	struct page *page;
 976
 977	if (pte_none(*pte))
 978		return 0;
 979
 980	page = pte_page(*pte);
 981	if (!page)
 982		return 0;
 983
 984	md = walk->private;
 985	gather_stats(page, md, pte_dirty(*pte), 1);
 986	return 0;
 987}
 988
 989#else
 990static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask,
 991		unsigned long addr, unsigned long end, struct mm_walk *walk)
 992{
 993	return 0;
 994}
 995#endif
 996
 
 
 
 
 
 997/*
 998 * Display pages allocated per node and memory policy via /proc.
 999 */
1000static int show_numa_map(struct seq_file *m, void *v)
1001{
1002	struct numa_maps_private *numa_priv = m->private;
1003	struct proc_maps_private *proc_priv = &numa_priv->proc_maps;
1004	struct vm_area_struct *vma = v;
1005	struct numa_maps *md = &numa_priv->md;
1006	struct file *file = vma->vm_file;
1007	struct mm_struct *mm = vma->vm_mm;
1008	struct mm_walk walk = {};
1009	struct mempolicy *pol;
1010	int n;
1011	char buffer[50];
1012
1013	if (!mm)
1014		return 0;
1015
1016	/* Ensure we start with an empty set of numa_maps statistics. */
1017	memset(md, 0, sizeof(*md));
1018
1019	md->vma = vma;
1020
1021	walk.hugetlb_entry = gather_hugetbl_stats;
1022	walk.pmd_entry = gather_pte_stats;
1023	walk.private = md;
1024	walk.mm = mm;
1025
1026	pol = get_vma_policy(proc_priv->task, vma, vma->vm_start);
1027	mpol_to_str(buffer, sizeof(buffer), pol, 0);
1028	mpol_cond_put(pol);
1029
1030	seq_printf(m, "%08lx %s", vma->vm_start, buffer);
1031
1032	if (file) {
1033		seq_printf(m, " file=");
1034		seq_path(m, &file->f_path, "\n\t= ");
1035	} else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
1036		seq_printf(m, " heap");
1037	} else if (vma->vm_start <= mm->start_stack &&
1038			vma->vm_end >= mm->start_stack) {
1039		seq_printf(m, " stack");
1040	}
1041
1042	walk_page_range(vma->vm_start, vma->vm_end, &walk);
 
 
 
 
1043
1044	if (!md->pages)
1045		goto out;
1046
1047	if (md->anon)
1048		seq_printf(m, " anon=%lu", md->anon);
1049
1050	if (md->dirty)
1051		seq_printf(m, " dirty=%lu", md->dirty);
1052
1053	if (md->pages != md->anon && md->pages != md->dirty)
1054		seq_printf(m, " mapped=%lu", md->pages);
1055
1056	if (md->mapcount_max > 1)
1057		seq_printf(m, " mapmax=%lu", md->mapcount_max);
1058
1059	if (md->swapcache)
1060		seq_printf(m, " swapcache=%lu", md->swapcache);
1061
1062	if (md->active < md->pages && !is_vm_hugetlb_page(vma))
1063		seq_printf(m, " active=%lu", md->active);
1064
1065	if (md->writeback)
1066		seq_printf(m, " writeback=%lu", md->writeback);
1067
1068	for_each_node_state(n, N_HIGH_MEMORY)
1069		if (md->node[n])
1070			seq_printf(m, " N%d=%lu", n, md->node[n]);
 
 
1071out:
1072	seq_putc(m, '\n');
1073
1074	if (m->count < m->size)
1075		m->version = (vma != proc_priv->tail_vma) ? vma->vm_start : 0;
1076	return 0;
1077}
1078
1079static const struct seq_operations proc_pid_numa_maps_op = {
1080        .start  = m_start,
1081        .next   = m_next,
1082        .stop   = m_stop,
1083        .show   = show_numa_map,
1084};
1085
1086static int numa_maps_open(struct inode *inode, struct file *file)
1087{
1088	struct numa_maps_private *priv;
1089	int ret = -ENOMEM;
1090	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1091	if (priv) {
1092		priv->proc_maps.pid = proc_pid(inode);
1093		ret = seq_open(file, &proc_pid_numa_maps_op);
1094		if (!ret) {
1095			struct seq_file *m = file->private_data;
1096			m->private = priv;
1097		} else {
1098			kfree(priv);
1099		}
1100	}
1101	return ret;
1102}
1103
1104const struct file_operations proc_numa_maps_operations = {
1105	.open		= numa_maps_open,
1106	.read		= seq_read,
1107	.llseek		= seq_lseek,
1108	.release	= seq_release_private,
1109};
 
1110#endif /* CONFIG_NUMA */
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0
   2#include <linux/pagewalk.h>
   3#include <linux/vmacache.h>
   4#include <linux/hugetlb.h>
   5#include <linux/huge_mm.h>
   6#include <linux/mount.h>
   7#include <linux/seq_file.h>
   8#include <linux/highmem.h>
   9#include <linux/ptrace.h>
  10#include <linux/slab.h>
  11#include <linux/pagemap.h>
  12#include <linux/mempolicy.h>
  13#include <linux/rmap.h>
  14#include <linux/swap.h>
  15#include <linux/sched/mm.h>
  16#include <linux/swapops.h>
  17#include <linux/mmu_notifier.h>
  18#include <linux/page_idle.h>
  19#include <linux/shmem_fs.h>
  20#include <linux/uaccess.h>
  21#include <linux/pkeys.h>
  22
  23#include <asm/elf.h>
  24#include <asm/tlb.h>
  25#include <asm/tlbflush.h>
  26#include "internal.h"
  27
  28#define SEQ_PUT_DEC(str, val) \
  29		seq_put_decimal_ull_width(m, str, (val) << (PAGE_SHIFT-10), 8)
  30void task_mem(struct seq_file *m, struct mm_struct *mm)
  31{
  32	unsigned long text, lib, swap, anon, file, shmem;
  33	unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;
  34
  35	anon = get_mm_counter(mm, MM_ANONPAGES);
  36	file = get_mm_counter(mm, MM_FILEPAGES);
  37	shmem = get_mm_counter(mm, MM_SHMEMPAGES);
  38
  39	/*
  40	 * Note: to minimize their overhead, mm maintains hiwater_vm and
  41	 * hiwater_rss only when about to *lower* total_vm or rss.  Any
  42	 * collector of these hiwater stats must therefore get total_vm
  43	 * and rss too, which will usually be the higher.  Barriers? not
  44	 * worth the effort, such snapshots can always be inconsistent.
  45	 */
  46	hiwater_vm = total_vm = mm->total_vm;
  47	if (hiwater_vm < mm->hiwater_vm)
  48		hiwater_vm = mm->hiwater_vm;
  49	hiwater_rss = total_rss = anon + file + shmem;
  50	if (hiwater_rss < mm->hiwater_rss)
  51		hiwater_rss = mm->hiwater_rss;
  52
  53	/* split executable areas between text and lib */
  54	text = PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK);
  55	text = min(text, mm->exec_vm << PAGE_SHIFT);
  56	lib = (mm->exec_vm << PAGE_SHIFT) - text;
  57
  58	swap = get_mm_counter(mm, MM_SWAPENTS);
  59	SEQ_PUT_DEC("VmPeak:\t", hiwater_vm);
  60	SEQ_PUT_DEC(" kB\nVmSize:\t", total_vm);
  61	SEQ_PUT_DEC(" kB\nVmLck:\t", mm->locked_vm);
  62	SEQ_PUT_DEC(" kB\nVmPin:\t", atomic64_read(&mm->pinned_vm));
  63	SEQ_PUT_DEC(" kB\nVmHWM:\t", hiwater_rss);
  64	SEQ_PUT_DEC(" kB\nVmRSS:\t", total_rss);
  65	SEQ_PUT_DEC(" kB\nRssAnon:\t", anon);
  66	SEQ_PUT_DEC(" kB\nRssFile:\t", file);
  67	SEQ_PUT_DEC(" kB\nRssShmem:\t", shmem);
  68	SEQ_PUT_DEC(" kB\nVmData:\t", mm->data_vm);
  69	SEQ_PUT_DEC(" kB\nVmStk:\t", mm->stack_vm);
  70	seq_put_decimal_ull_width(m,
  71		    " kB\nVmExe:\t", text >> 10, 8);
  72	seq_put_decimal_ull_width(m,
  73		    " kB\nVmLib:\t", lib >> 10, 8);
  74	seq_put_decimal_ull_width(m,
  75		    " kB\nVmPTE:\t", mm_pgtables_bytes(mm) >> 10, 8);
  76	SEQ_PUT_DEC(" kB\nVmSwap:\t", swap);
  77	seq_puts(m, " kB\n");
  78	hugetlb_report_usage(m, mm);
 
  79}
  80#undef SEQ_PUT_DEC
  81
  82unsigned long task_vsize(struct mm_struct *mm)
  83{
  84	return PAGE_SIZE * mm->total_vm;
  85}
  86
  87unsigned long task_statm(struct mm_struct *mm,
  88			 unsigned long *shared, unsigned long *text,
  89			 unsigned long *data, unsigned long *resident)
  90{
  91	*shared = get_mm_counter(mm, MM_FILEPAGES) +
  92			get_mm_counter(mm, MM_SHMEMPAGES);
  93	*text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
  94								>> PAGE_SHIFT;
  95	*data = mm->data_vm + mm->stack_vm;
  96	*resident = *shared + get_mm_counter(mm, MM_ANONPAGES);
  97	return mm->total_vm;
  98}
  99
 100#ifdef CONFIG_NUMA
 101/*
 102 * Save get_task_policy() for show_numa_map().
 103 */
 104static void hold_task_mempolicy(struct proc_maps_private *priv)
 105{
 106	struct task_struct *task = priv->task;
 
 
 
 
 107
 108	task_lock(task);
 109	priv->task_mempolicy = get_task_policy(task);
 110	mpol_get(priv->task_mempolicy);
 111	task_unlock(task);
 112}
 113static void release_task_mempolicy(struct proc_maps_private *priv)
 114{
 115	mpol_put(priv->task_mempolicy);
 116}
 117#else
 118static void hold_task_mempolicy(struct proc_maps_private *priv)
 119{
 120}
 121static void release_task_mempolicy(struct proc_maps_private *priv)
 122{
 
 
 
 
 
 123}
 124#endif
 125
 126static void *m_start(struct seq_file *m, loff_t *ppos)
 127{
 128	struct proc_maps_private *priv = m->private;
 129	unsigned long last_addr = *ppos;
 130	struct mm_struct *mm;
 131	struct vm_area_struct *vma;
 
 
 
 
 
 
 
 
 
 
 
 
 132
 133	/* See m_next(). Zero at the start or after lseek. */
 134	if (last_addr == -1UL)
 135		return NULL;
 136
 137	priv->task = get_proc_task(priv->inode);
 138	if (!priv->task)
 139		return ERR_PTR(-ESRCH);
 140
 141	mm = priv->mm;
 142	if (!mm || !mmget_not_zero(mm)) {
 143		put_task_struct(priv->task);
 144		priv->task = NULL;
 145		return NULL;
 
 
 
 
 
 
 
 
 146	}
 147
 148	if (mmap_read_lock_killable(mm)) {
 149		mmput(mm);
 150		put_task_struct(priv->task);
 151		priv->task = NULL;
 152		return ERR_PTR(-EINTR);
 
 
 
 
 
 153	}
 154
 155	hold_task_mempolicy(priv);
 156	priv->tail_vma = get_gate_vma(mm);
 157
 158	vma = find_vma(mm, last_addr);
 159	if (vma)
 160		return vma;
 161
 162	return priv->tail_vma;
 
 
 
 
 163}
 164
 165static void *m_next(struct seq_file *m, void *v, loff_t *ppos)
 166{
 167	struct proc_maps_private *priv = m->private;
 168	struct vm_area_struct *next, *vma = v;
 
 169
 170	if (vma == priv->tail_vma)
 171		next = NULL;
 172	else if (vma->vm_next)
 173		next = vma->vm_next;
 174	else
 175		next = priv->tail_vma;
 176
 177	*ppos = next ? next->vm_start : -1UL;
 178
 179	return next;
 180}
 181
 182static void m_stop(struct seq_file *m, void *v)
 183{
 184	struct proc_maps_private *priv = m->private;
 185	struct mm_struct *mm = priv->mm;
 186
 187	if (!priv->task)
 188		return;
 189
 190	release_task_mempolicy(priv);
 191	mmap_read_unlock(mm);
 192	mmput(mm);
 193	put_task_struct(priv->task);
 194	priv->task = NULL;
 195}
 196
 197static int proc_maps_open(struct inode *inode, struct file *file,
 198			const struct seq_operations *ops, int psize)
 199{
 200	struct proc_maps_private *priv = __seq_open_private(file, ops, psize);
 201
 202	if (!priv)
 203		return -ENOMEM;
 204
 205	priv->inode = inode;
 206	priv->mm = proc_mem_open(inode, PTRACE_MODE_READ);
 207	if (IS_ERR(priv->mm)) {
 208		int err = PTR_ERR(priv->mm);
 209
 210		seq_release_private(inode, file);
 211		return err;
 212	}
 213
 214	return 0;
 215}
 216
 217static int proc_map_release(struct inode *inode, struct file *file)
 218{
 219	struct seq_file *seq = file->private_data;
 220	struct proc_maps_private *priv = seq->private;
 221
 222	if (priv->mm)
 223		mmdrop(priv->mm);
 224
 225	return seq_release_private(inode, file);
 226}
 227
 228static int do_maps_open(struct inode *inode, struct file *file,
 229			const struct seq_operations *ops)
 230{
 231	return proc_maps_open(inode, file, ops,
 232				sizeof(struct proc_maps_private));
 233}
 234
 235/*
 236 * Indicate if the VMA is a stack for the given task; for
 237 * /proc/PID/maps that is the stack of the main task.
 238 */
 239static int is_stack(struct vm_area_struct *vma)
 240{
 241	/*
 242	 * We make no effort to guess what a given thread considers to be
 243	 * its "stack".  It's not even well-defined for programs written
 244	 * languages like Go.
 245	 */
 246	return vma->vm_start <= vma->vm_mm->start_stack &&
 247		vma->vm_end >= vma->vm_mm->start_stack;
 248}
 249
 250static void show_vma_header_prefix(struct seq_file *m,
 251				   unsigned long start, unsigned long end,
 252				   vm_flags_t flags, unsigned long long pgoff,
 253				   dev_t dev, unsigned long ino)
 254{
 255	seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
 256	seq_put_hex_ll(m, NULL, start, 8);
 257	seq_put_hex_ll(m, "-", end, 8);
 258	seq_putc(m, ' ');
 259	seq_putc(m, flags & VM_READ ? 'r' : '-');
 260	seq_putc(m, flags & VM_WRITE ? 'w' : '-');
 261	seq_putc(m, flags & VM_EXEC ? 'x' : '-');
 262	seq_putc(m, flags & VM_MAYSHARE ? 's' : 'p');
 263	seq_put_hex_ll(m, " ", pgoff, 8);
 264	seq_put_hex_ll(m, " ", MAJOR(dev), 2);
 265	seq_put_hex_ll(m, ":", MINOR(dev), 2);
 266	seq_put_decimal_ull(m, " ", ino);
 267	seq_putc(m, ' ');
 268}
 269
 270static void
 271show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
 272{
 273	struct mm_struct *mm = vma->vm_mm;
 274	struct file *file = vma->vm_file;
 275	vm_flags_t flags = vma->vm_flags;
 276	unsigned long ino = 0;
 277	unsigned long long pgoff = 0;
 278	unsigned long start, end;
 279	dev_t dev = 0;
 280	const char *name = NULL;
 281
 282	if (file) {
 283		struct inode *inode = file_inode(vma->vm_file);
 284		dev = inode->i_sb->s_dev;
 285		ino = inode->i_ino;
 286		pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
 287	}
 288
 
 289	start = vma->vm_start;
 
 
 290	end = vma->vm_end;
 291	show_vma_header_prefix(m, start, end, flags, pgoff, dev, ino);
 
 
 
 
 
 
 
 
 
 
 
 292
 293	/*
 294	 * Print the dentry name for named mappings, and a
 295	 * special [heap] marker for the heap:
 296	 */
 297	if (file) {
 298		seq_pad(m, ' ');
 299		seq_file_path(m, file, "\n");
 300		goto done;
 301	}
 302
 303	if (vma->vm_ops && vma->vm_ops->name) {
 304		name = vma->vm_ops->name(vma);
 305		if (name)
 306			goto done;
 307	}
 308
 309	name = arch_vma_name(vma);
 310	if (!name) {
 311		if (!mm) {
 312			name = "[vdso]";
 313			goto done;
 314		}
 315
 316		if (vma->vm_start <= mm->brk &&
 317		    vma->vm_end >= mm->start_brk) {
 318			name = "[heap]";
 319			goto done;
 320		}
 321
 322		if (is_stack(vma))
 323			name = "[stack]";
 324	}
 325
 326done:
 327	if (name) {
 328		seq_pad(m, ' ');
 329		seq_puts(m, name);
 330	}
 331	seq_putc(m, '\n');
 332}
 333
 334static int show_map(struct seq_file *m, void *v)
 335{
 336	show_map_vma(m, v);
 
 
 
 
 
 
 
 
 337	return 0;
 338}
 339
 340static const struct seq_operations proc_pid_maps_op = {
 341	.start	= m_start,
 342	.next	= m_next,
 343	.stop	= m_stop,
 344	.show	= show_map
 345};
 346
 347static int pid_maps_open(struct inode *inode, struct file *file)
 348{
 349	return do_maps_open(inode, file, &proc_pid_maps_op);
 350}
 351
 352const struct file_operations proc_pid_maps_operations = {
 353	.open		= pid_maps_open,
 354	.read		= seq_read,
 355	.llseek		= seq_lseek,
 356	.release	= proc_map_release,
 357};
 358
 359/*
 360 * Proportional Set Size(PSS): my share of RSS.
 361 *
 362 * PSS of a process is the count of pages it has in memory, where each
 363 * page is divided by the number of processes sharing it.  So if a
 364 * process has 1000 pages all to itself, and 1000 shared with one other
 365 * process, its PSS will be 1500.
 366 *
 367 * To keep (accumulated) division errors low, we adopt a 64bit
 368 * fixed-point pss counter to minimize division errors. So (pss >>
 369 * PSS_SHIFT) would be the real byte count.
 370 *
 371 * A shift of 12 before division means (assuming 4K page size):
 372 * 	- 1M 3-user-pages add up to 8KB errors;
 373 * 	- supports mapcount up to 2^24, or 16M;
 374 * 	- supports PSS up to 2^52 bytes, or 4PB.
 375 */
 376#define PSS_SHIFT 12
 377
 378#ifdef CONFIG_PROC_PAGE_MONITOR
 379struct mem_size_stats {
 
 380	unsigned long resident;
 381	unsigned long shared_clean;
 382	unsigned long shared_dirty;
 383	unsigned long private_clean;
 384	unsigned long private_dirty;
 385	unsigned long referenced;
 386	unsigned long anonymous;
 387	unsigned long lazyfree;
 388	unsigned long anonymous_thp;
 389	unsigned long shmem_thp;
 390	unsigned long file_thp;
 391	unsigned long swap;
 392	unsigned long shared_hugetlb;
 393	unsigned long private_hugetlb;
 394	u64 pss;
 395	u64 pss_anon;
 396	u64 pss_file;
 397	u64 pss_shmem;
 398	u64 pss_locked;
 399	u64 swap_pss;
 400	bool check_shmem_swap;
 401};
 402
 403static void smaps_page_accumulate(struct mem_size_stats *mss,
 404		struct page *page, unsigned long size, unsigned long pss,
 405		bool dirty, bool locked, bool private)
 406{
 407	mss->pss += pss;
 408
 409	if (PageAnon(page))
 410		mss->pss_anon += pss;
 411	else if (PageSwapBacked(page))
 412		mss->pss_shmem += pss;
 413	else
 414		mss->pss_file += pss;
 415
 416	if (locked)
 417		mss->pss_locked += pss;
 418
 419	if (dirty || PageDirty(page)) {
 420		if (private)
 421			mss->private_dirty += size;
 422		else
 423			mss->shared_dirty += size;
 424	} else {
 425		if (private)
 426			mss->private_clean += size;
 427		else
 428			mss->shared_clean += size;
 429	}
 430}
 431
 432static void smaps_account(struct mem_size_stats *mss, struct page *page,
 433		bool compound, bool young, bool dirty, bool locked)
 434{
 435	int i, nr = compound ? compound_nr(page) : 1;
 436	unsigned long size = nr * PAGE_SIZE;
 
 
 437
 438	/*
 439	 * First accumulate quantities that depend only on |size| and the type
 440	 * of the compound page.
 441	 */
 442	if (PageAnon(page)) {
 443		mss->anonymous += size;
 444		if (!PageSwapBacked(page) && !dirty && !PageDirty(page))
 445			mss->lazyfree += size;
 446	}
 447
 448	mss->resident += size;
 449	/* Accumulate the size in pages that have been accessed. */
 450	if (young || page_is_young(page) || PageReferenced(page))
 451		mss->referenced += size;
 452
 453	/*
 454	 * Then accumulate quantities that may depend on sharing, or that may
 455	 * differ page-by-page.
 456	 *
 457	 * page_count(page) == 1 guarantees the page is mapped exactly once.
 458	 * If any subpage of the compound page mapped with PTE it would elevate
 459	 * page_count().
 460	 */
 461	if (page_count(page) == 1) {
 462		smaps_page_accumulate(mss, page, size, size << PSS_SHIFT, dirty,
 463			locked, true);
 464		return;
 465	}
 466	for (i = 0; i < nr; i++, page++) {
 467		int mapcount = page_mapcount(page);
 468		unsigned long pss = PAGE_SIZE << PSS_SHIFT;
 469		if (mapcount >= 2)
 470			pss /= mapcount;
 471		smaps_page_accumulate(mss, page, PAGE_SIZE, pss, dirty, locked,
 472				      mapcount < 2);
 473	}
 474}
 475
 476#ifdef CONFIG_SHMEM
 477static int smaps_pte_hole(unsigned long addr, unsigned long end,
 478			  __always_unused int depth, struct mm_walk *walk)
 479{
 480	struct mem_size_stats *mss = walk->private;
 481
 482	mss->swap += shmem_partial_swap_usage(
 483			walk->vma->vm_file->f_mapping, addr, end);
 484
 485	return 0;
 486}
 487#else
 488#define smaps_pte_hole		NULL
 489#endif /* CONFIG_SHMEM */
 490
 491static void smaps_pte_entry(pte_t *pte, unsigned long addr,
 492		struct mm_walk *walk)
 493{
 494	struct mem_size_stats *mss = walk->private;
 495	struct vm_area_struct *vma = walk->vma;
 496	bool locked = !!(vma->vm_flags & VM_LOCKED);
 497	struct page *page = NULL;
 498
 499	if (pte_present(*pte)) {
 500		page = vm_normal_page(vma, addr, *pte);
 501	} else if (is_swap_pte(*pte)) {
 502		swp_entry_t swpent = pte_to_swp_entry(*pte);
 503
 504		if (!non_swap_entry(swpent)) {
 505			int mapcount;
 506
 507			mss->swap += PAGE_SIZE;
 508			mapcount = swp_swapcount(swpent);
 509			if (mapcount >= 2) {
 510				u64 pss_delta = (u64)PAGE_SIZE << PSS_SHIFT;
 511
 512				do_div(pss_delta, mapcount);
 513				mss->swap_pss += pss_delta;
 514			} else {
 515				mss->swap_pss += (u64)PAGE_SIZE << PSS_SHIFT;
 516			}
 517		} else if (is_pfn_swap_entry(swpent))
 518			page = pfn_swap_entry_to_page(swpent);
 519	} else if (unlikely(IS_ENABLED(CONFIG_SHMEM) && mss->check_shmem_swap
 520							&& pte_none(*pte))) {
 521		page = xa_load(&vma->vm_file->f_mapping->i_pages,
 522						linear_page_index(vma, addr));
 523		if (xa_is_value(page))
 524			mss->swap += PAGE_SIZE;
 525		return;
 526	}
 527
 
 528	if (!page)
 529		return;
 530
 531	smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte), locked);
 532}
 533
 534#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 535static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
 536		struct mm_walk *walk)
 537{
 538	struct mem_size_stats *mss = walk->private;
 539	struct vm_area_struct *vma = walk->vma;
 540	bool locked = !!(vma->vm_flags & VM_LOCKED);
 541	struct page *page = NULL;
 542
 543	if (pmd_present(*pmd)) {
 544		/* FOLL_DUMP will return -EFAULT on huge zero page */
 545		page = follow_trans_huge_pmd(vma, addr, pmd, FOLL_DUMP);
 546	} else if (unlikely(thp_migration_supported() && is_swap_pmd(*pmd))) {
 547		swp_entry_t entry = pmd_to_swp_entry(*pmd);
 548
 549		if (is_migration_entry(entry))
 550			page = pfn_swap_entry_to_page(entry);
 551	}
 552	if (IS_ERR_OR_NULL(page))
 553		return;
 554	if (PageAnon(page))
 555		mss->anonymous_thp += HPAGE_PMD_SIZE;
 556	else if (PageSwapBacked(page))
 557		mss->shmem_thp += HPAGE_PMD_SIZE;
 558	else if (is_zone_device_page(page))
 559		/* pass */;
 560	else
 561		mss->file_thp += HPAGE_PMD_SIZE;
 562	smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd), locked);
 563}
 564#else
 565static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
 566		struct mm_walk *walk)
 567{
 568}
 569#endif
 570
 571static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
 572			   struct mm_walk *walk)
 573{
 574	struct vm_area_struct *vma = walk->vma;
 
 575	pte_t *pte;
 576	spinlock_t *ptl;
 577
 578	ptl = pmd_trans_huge_lock(pmd, vma);
 579	if (ptl) {
 580		smaps_pmd_entry(pmd, addr, walk);
 581		spin_unlock(ptl);
 582		goto out;
 
 
 
 
 
 
 
 
 
 583	}
 584
 585	if (pmd_trans_unstable(pmd))
 586		goto out;
 587	/*
 588	 * The mmap_lock held all the way back in m_start() is what
 589	 * keeps khugepaged out of here and from collapsing things
 590	 * in here.
 591	 */
 592	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
 593	for (; addr != end; pte++, addr += PAGE_SIZE)
 594		smaps_pte_entry(pte, addr, walk);
 595	pte_unmap_unlock(pte - 1, ptl);
 596out:
 597	cond_resched();
 598	return 0;
 599}
 600
 601static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma)
 602{
 603	/*
 604	 * Don't forget to update Documentation/ on changes.
 605	 */
 606	static const char mnemonics[BITS_PER_LONG][2] = {
 607		/*
 608		 * In case if we meet a flag we don't know about.
 609		 */
 610		[0 ... (BITS_PER_LONG-1)] = "??",
 611
 612		[ilog2(VM_READ)]	= "rd",
 613		[ilog2(VM_WRITE)]	= "wr",
 614		[ilog2(VM_EXEC)]	= "ex",
 615		[ilog2(VM_SHARED)]	= "sh",
 616		[ilog2(VM_MAYREAD)]	= "mr",
 617		[ilog2(VM_MAYWRITE)]	= "mw",
 618		[ilog2(VM_MAYEXEC)]	= "me",
 619		[ilog2(VM_MAYSHARE)]	= "ms",
 620		[ilog2(VM_GROWSDOWN)]	= "gd",
 621		[ilog2(VM_PFNMAP)]	= "pf",
 622		[ilog2(VM_DENYWRITE)]	= "dw",
 623		[ilog2(VM_LOCKED)]	= "lo",
 624		[ilog2(VM_IO)]		= "io",
 625		[ilog2(VM_SEQ_READ)]	= "sr",
 626		[ilog2(VM_RAND_READ)]	= "rr",
 627		[ilog2(VM_DONTCOPY)]	= "dc",
 628		[ilog2(VM_DONTEXPAND)]	= "de",
 629		[ilog2(VM_ACCOUNT)]	= "ac",
 630		[ilog2(VM_NORESERVE)]	= "nr",
 631		[ilog2(VM_HUGETLB)]	= "ht",
 632		[ilog2(VM_SYNC)]	= "sf",
 633		[ilog2(VM_ARCH_1)]	= "ar",
 634		[ilog2(VM_WIPEONFORK)]	= "wf",
 635		[ilog2(VM_DONTDUMP)]	= "dd",
 636#ifdef CONFIG_ARM64_BTI
 637		[ilog2(VM_ARM64_BTI)]	= "bt",
 638#endif
 639#ifdef CONFIG_MEM_SOFT_DIRTY
 640		[ilog2(VM_SOFTDIRTY)]	= "sd",
 641#endif
 642		[ilog2(VM_MIXEDMAP)]	= "mm",
 643		[ilog2(VM_HUGEPAGE)]	= "hg",
 644		[ilog2(VM_NOHUGEPAGE)]	= "nh",
 645		[ilog2(VM_MERGEABLE)]	= "mg",
 646		[ilog2(VM_UFFD_MISSING)]= "um",
 647		[ilog2(VM_UFFD_WP)]	= "uw",
 648#ifdef CONFIG_ARM64_MTE
 649		[ilog2(VM_MTE)]		= "mt",
 650		[ilog2(VM_MTE_ALLOWED)]	= "",
 651#endif
 652#ifdef CONFIG_ARCH_HAS_PKEYS
 653		/* These come out via ProtectionKey: */
 654		[ilog2(VM_PKEY_BIT0)]	= "",
 655		[ilog2(VM_PKEY_BIT1)]	= "",
 656		[ilog2(VM_PKEY_BIT2)]	= "",
 657		[ilog2(VM_PKEY_BIT3)]	= "",
 658#if VM_PKEY_BIT4
 659		[ilog2(VM_PKEY_BIT4)]	= "",
 660#endif
 661#endif /* CONFIG_ARCH_HAS_PKEYS */
 662#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR
 663		[ilog2(VM_UFFD_MINOR)]	= "ui",
 664#endif /* CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */
 665	};
 666	size_t i;
 667
 668	seq_puts(m, "VmFlags: ");
 669	for (i = 0; i < BITS_PER_LONG; i++) {
 670		if (!mnemonics[i][0])
 671			continue;
 672		if (vma->vm_flags & (1UL << i)) {
 673			seq_putc(m, mnemonics[i][0]);
 674			seq_putc(m, mnemonics[i][1]);
 675			seq_putc(m, ' ');
 676		}
 677	}
 678	seq_putc(m, '\n');
 679}
 680
 681#ifdef CONFIG_HUGETLB_PAGE
 682static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask,
 683				 unsigned long addr, unsigned long end,
 684				 struct mm_walk *walk)
 685{
 686	struct mem_size_stats *mss = walk->private;
 687	struct vm_area_struct *vma = walk->vma;
 688	struct page *page = NULL;
 689
 690	if (pte_present(*pte)) {
 691		page = vm_normal_page(vma, addr, *pte);
 692	} else if (is_swap_pte(*pte)) {
 693		swp_entry_t swpent = pte_to_swp_entry(*pte);
 694
 695		if (is_pfn_swap_entry(swpent))
 696			page = pfn_swap_entry_to_page(swpent);
 697	}
 698	if (page) {
 699		int mapcount = page_mapcount(page);
 700
 701		if (mapcount >= 2)
 702			mss->shared_hugetlb += huge_page_size(hstate_vma(vma));
 703		else
 704			mss->private_hugetlb += huge_page_size(hstate_vma(vma));
 705	}
 706	return 0;
 707}
 708#else
 709#define smaps_hugetlb_range	NULL
 710#endif /* HUGETLB_PAGE */
 711
 712static const struct mm_walk_ops smaps_walk_ops = {
 713	.pmd_entry		= smaps_pte_range,
 714	.hugetlb_entry		= smaps_hugetlb_range,
 715};
 716
 717static const struct mm_walk_ops smaps_shmem_walk_ops = {
 718	.pmd_entry		= smaps_pte_range,
 719	.hugetlb_entry		= smaps_hugetlb_range,
 720	.pte_hole		= smaps_pte_hole,
 721};
 722
 723/*
 724 * Gather mem stats from @vma with the indicated beginning
 725 * address @start, and keep them in @mss.
 726 *
 727 * Use vm_start of @vma as the beginning address if @start is 0.
 728 */
 729static void smap_gather_stats(struct vm_area_struct *vma,
 730		struct mem_size_stats *mss, unsigned long start)
 731{
 732	const struct mm_walk_ops *ops = &smaps_walk_ops;
 733
 734	/* Invalid start */
 735	if (start >= vma->vm_end)
 736		return;
 737
 738#ifdef CONFIG_SHMEM
 739	/* In case of smaps_rollup, reset the value from previous vma */
 740	mss->check_shmem_swap = false;
 741	if (vma->vm_file && shmem_mapping(vma->vm_file->f_mapping)) {
 742		/*
 743		 * For shared or readonly shmem mappings we know that all
 744		 * swapped out pages belong to the shmem object, and we can
 745		 * obtain the swap value much more efficiently. For private
 746		 * writable mappings, we might have COW pages that are
 747		 * not affected by the parent swapped out pages of the shmem
 748		 * object, so we have to distinguish them during the page walk.
 749		 * Unless we know that the shmem object (or the part mapped by
 750		 * our VMA) has no swapped out pages at all.
 751		 */
 752		unsigned long shmem_swapped = shmem_swap_usage(vma);
 753
 754		if (!start && (!shmem_swapped || (vma->vm_flags & VM_SHARED) ||
 755					!(vma->vm_flags & VM_WRITE))) {
 756			mss->swap += shmem_swapped;
 757		} else {
 758			mss->check_shmem_swap = true;
 759			ops = &smaps_shmem_walk_ops;
 760		}
 761	}
 762#endif
 763	/* mmap_lock is held in m_start */
 764	if (!start)
 765		walk_page_vma(vma, ops, mss);
 766	else
 767		walk_page_range(vma->vm_mm, start, vma->vm_end, ops, mss);
 768}
 769
 770#define SEQ_PUT_DEC(str, val) \
 771		seq_put_decimal_ull_width(m, str, (val) >> 10, 8)
 772
 773/* Show the contents common for smaps and smaps_rollup */
 774static void __show_smap(struct seq_file *m, const struct mem_size_stats *mss,
 775	bool rollup_mode)
 776{
 777	SEQ_PUT_DEC("Rss:            ", mss->resident);
 778	SEQ_PUT_DEC(" kB\nPss:            ", mss->pss >> PSS_SHIFT);
 779	if (rollup_mode) {
 780		/*
 781		 * These are meaningful only for smaps_rollup, otherwise two of
 782		 * them are zero, and the other one is the same as Pss.
 783		 */
 784		SEQ_PUT_DEC(" kB\nPss_Anon:       ",
 785			mss->pss_anon >> PSS_SHIFT);
 786		SEQ_PUT_DEC(" kB\nPss_File:       ",
 787			mss->pss_file >> PSS_SHIFT);
 788		SEQ_PUT_DEC(" kB\nPss_Shmem:      ",
 789			mss->pss_shmem >> PSS_SHIFT);
 790	}
 791	SEQ_PUT_DEC(" kB\nShared_Clean:   ", mss->shared_clean);
 792	SEQ_PUT_DEC(" kB\nShared_Dirty:   ", mss->shared_dirty);
 793	SEQ_PUT_DEC(" kB\nPrivate_Clean:  ", mss->private_clean);
 794	SEQ_PUT_DEC(" kB\nPrivate_Dirty:  ", mss->private_dirty);
 795	SEQ_PUT_DEC(" kB\nReferenced:     ", mss->referenced);
 796	SEQ_PUT_DEC(" kB\nAnonymous:      ", mss->anonymous);
 797	SEQ_PUT_DEC(" kB\nLazyFree:       ", mss->lazyfree);
 798	SEQ_PUT_DEC(" kB\nAnonHugePages:  ", mss->anonymous_thp);
 799	SEQ_PUT_DEC(" kB\nShmemPmdMapped: ", mss->shmem_thp);
 800	SEQ_PUT_DEC(" kB\nFilePmdMapped:  ", mss->file_thp);
 801	SEQ_PUT_DEC(" kB\nShared_Hugetlb: ", mss->shared_hugetlb);
 802	seq_put_decimal_ull_width(m, " kB\nPrivate_Hugetlb: ",
 803				  mss->private_hugetlb >> 10, 7);
 804	SEQ_PUT_DEC(" kB\nSwap:           ", mss->swap);
 805	SEQ_PUT_DEC(" kB\nSwapPss:        ",
 806					mss->swap_pss >> PSS_SHIFT);
 807	SEQ_PUT_DEC(" kB\nLocked:         ",
 808					mss->pss_locked >> PSS_SHIFT);
 809	seq_puts(m, " kB\n");
 810}
 811
 812static int show_smap(struct seq_file *m, void *v)
 813{
 
 
 814	struct vm_area_struct *vma = v;
 815	struct mem_size_stats mss;
 
 
 
 
 
 816
 817	memset(&mss, 0, sizeof(mss));
 818
 819	smap_gather_stats(vma, &mss, 0);
 
 
 820
 821	show_map_vma(m, vma);
 822
 823	SEQ_PUT_DEC("Size:           ", vma->vm_end - vma->vm_start);
 824	SEQ_PUT_DEC(" kB\nKernelPageSize: ", vma_kernel_pagesize(vma));
 825	SEQ_PUT_DEC(" kB\nMMUPageSize:    ", vma_mmu_pagesize(vma));
 826	seq_puts(m, " kB\n");
 827
 828	__show_smap(m, &mss, false);
 829
 830	seq_printf(m, "THPeligible:    %d\n",
 831		   transparent_hugepage_active(vma));
 832
 833	if (arch_pkeys_enabled())
 834		seq_printf(m, "ProtectionKey:  %8u\n", vma_pkey(vma));
 835	show_smap_vma_flags(m, vma);
 836
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 837	return 0;
 838}
 839
 840static int show_smaps_rollup(struct seq_file *m, void *v)
 841{
 842	struct proc_maps_private *priv = m->private;
 843	struct mem_size_stats mss;
 844	struct mm_struct *mm;
 845	struct vm_area_struct *vma;
 846	unsigned long last_vma_end = 0;
 847	int ret = 0;
 848
 849	priv->task = get_proc_task(priv->inode);
 850	if (!priv->task)
 851		return -ESRCH;
 852
 853	mm = priv->mm;
 854	if (!mm || !mmget_not_zero(mm)) {
 855		ret = -ESRCH;
 856		goto out_put_task;
 857	}
 858
 859	memset(&mss, 0, sizeof(mss));
 860
 861	ret = mmap_read_lock_killable(mm);
 862	if (ret)
 863		goto out_put_mm;
 864
 865	hold_task_mempolicy(priv);
 866
 867	for (vma = priv->mm->mmap; vma;) {
 868		smap_gather_stats(vma, &mss, 0);
 869		last_vma_end = vma->vm_end;
 870
 871		/*
 872		 * Release mmap_lock temporarily if someone wants to
 873		 * access it for write request.
 874		 */
 875		if (mmap_lock_is_contended(mm)) {
 876			mmap_read_unlock(mm);
 877			ret = mmap_read_lock_killable(mm);
 878			if (ret) {
 879				release_task_mempolicy(priv);
 880				goto out_put_mm;
 881			}
 882
 883			/*
 884			 * After dropping the lock, there are four cases to
 885			 * consider. See the following example for explanation.
 886			 *
 887			 *   +------+------+-----------+
 888			 *   | VMA1 | VMA2 | VMA3      |
 889			 *   +------+------+-----------+
 890			 *   |      |      |           |
 891			 *  4k     8k     16k         400k
 892			 *
 893			 * Suppose we drop the lock after reading VMA2 due to
 894			 * contention, then we get:
 895			 *
 896			 *	last_vma_end = 16k
 897			 *
 898			 * 1) VMA2 is freed, but VMA3 exists:
 899			 *
 900			 *    find_vma(mm, 16k - 1) will return VMA3.
 901			 *    In this case, just continue from VMA3.
 902			 *
 903			 * 2) VMA2 still exists:
 904			 *
 905			 *    find_vma(mm, 16k - 1) will return VMA2.
 906			 *    Iterate the loop like the original one.
 907			 *
 908			 * 3) No more VMAs can be found:
 909			 *
 910			 *    find_vma(mm, 16k - 1) will return NULL.
 911			 *    No more things to do, just break.
 912			 *
 913			 * 4) (last_vma_end - 1) is the middle of a vma (VMA'):
 914			 *
 915			 *    find_vma(mm, 16k - 1) will return VMA' whose range
 916			 *    contains last_vma_end.
 917			 *    Iterate VMA' from last_vma_end.
 918			 */
 919			vma = find_vma(mm, last_vma_end - 1);
 920			/* Case 3 above */
 921			if (!vma)
 922				break;
 923
 924			/* Case 1 above */
 925			if (vma->vm_start >= last_vma_end)
 926				continue;
 927
 928			/* Case 4 above */
 929			if (vma->vm_end > last_vma_end)
 930				smap_gather_stats(vma, &mss, last_vma_end);
 931		}
 932		/* Case 2 above */
 933		vma = vma->vm_next;
 934	}
 935
 936	show_vma_header_prefix(m, priv->mm->mmap->vm_start,
 937			       last_vma_end, 0, 0, 0, 0);
 938	seq_pad(m, ' ');
 939	seq_puts(m, "[rollup]\n");
 940
 941	__show_smap(m, &mss, true);
 942
 943	release_task_mempolicy(priv);
 944	mmap_read_unlock(mm);
 945
 946out_put_mm:
 947	mmput(mm);
 948out_put_task:
 949	put_task_struct(priv->task);
 950	priv->task = NULL;
 951
 952	return ret;
 953}
 954#undef SEQ_PUT_DEC
 955
 956static const struct seq_operations proc_pid_smaps_op = {
 957	.start	= m_start,
 958	.next	= m_next,
 959	.stop	= m_stop,
 960	.show	= show_smap
 961};
 962
 963static int pid_smaps_open(struct inode *inode, struct file *file)
 964{
 965	return do_maps_open(inode, file, &proc_pid_smaps_op);
 966}
 967
 968static int smaps_rollup_open(struct inode *inode, struct file *file)
 969{
 970	int ret;
 971	struct proc_maps_private *priv;
 972
 973	priv = kzalloc(sizeof(*priv), GFP_KERNEL_ACCOUNT);
 974	if (!priv)
 975		return -ENOMEM;
 976
 977	ret = single_open(file, show_smaps_rollup, priv);
 978	if (ret)
 979		goto out_free;
 980
 981	priv->inode = inode;
 982	priv->mm = proc_mem_open(inode, PTRACE_MODE_READ);
 983	if (IS_ERR(priv->mm)) {
 984		ret = PTR_ERR(priv->mm);
 985
 986		single_release(inode, file);
 987		goto out_free;
 988	}
 989
 990	return 0;
 991
 992out_free:
 993	kfree(priv);
 994	return ret;
 995}
 996
 997static int smaps_rollup_release(struct inode *inode, struct file *file)
 998{
 999	struct seq_file *seq = file->private_data;
1000	struct proc_maps_private *priv = seq->private;
1001
1002	if (priv->mm)
1003		mmdrop(priv->mm);
1004
1005	kfree(priv);
1006	return single_release(inode, file);
1007}
1008
1009const struct file_operations proc_pid_smaps_operations = {
1010	.open		= pid_smaps_open,
1011	.read		= seq_read,
1012	.llseek		= seq_lseek,
1013	.release	= proc_map_release,
1014};
1015
1016const struct file_operations proc_pid_smaps_rollup_operations = {
1017	.open		= smaps_rollup_open,
1018	.read		= seq_read,
1019	.llseek		= seq_lseek,
1020	.release	= smaps_rollup_release,
1021};
1022
1023enum clear_refs_types {
1024	CLEAR_REFS_ALL = 1,
1025	CLEAR_REFS_ANON,
1026	CLEAR_REFS_MAPPED,
1027	CLEAR_REFS_SOFT_DIRTY,
1028	CLEAR_REFS_MM_HIWATER_RSS,
1029	CLEAR_REFS_LAST,
1030};
1031
1032struct clear_refs_private {
1033	enum clear_refs_types type;
1034};
1035
1036#ifdef CONFIG_MEM_SOFT_DIRTY
1037
1038static inline bool pte_is_pinned(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
1039{
1040	struct page *page;
1041
1042	if (!pte_write(pte))
1043		return false;
1044	if (!is_cow_mapping(vma->vm_flags))
1045		return false;
1046	if (likely(!test_bit(MMF_HAS_PINNED, &vma->vm_mm->flags)))
1047		return false;
1048	page = vm_normal_page(vma, addr, pte);
1049	if (!page)
1050		return false;
1051	return page_maybe_dma_pinned(page);
1052}
1053
1054static inline void clear_soft_dirty(struct vm_area_struct *vma,
1055		unsigned long addr, pte_t *pte)
1056{
1057	/*
1058	 * The soft-dirty tracker uses #PF-s to catch writes
1059	 * to pages, so write-protect the pte as well. See the
1060	 * Documentation/admin-guide/mm/soft-dirty.rst for full description
1061	 * of how soft-dirty works.
1062	 */
1063	pte_t ptent = *pte;
1064
1065	if (pte_present(ptent)) {
1066		pte_t old_pte;
1067
1068		if (pte_is_pinned(vma, addr, ptent))
1069			return;
1070		old_pte = ptep_modify_prot_start(vma, addr, pte);
1071		ptent = pte_wrprotect(old_pte);
1072		ptent = pte_clear_soft_dirty(ptent);
1073		ptep_modify_prot_commit(vma, addr, pte, old_pte, ptent);
1074	} else if (is_swap_pte(ptent)) {
1075		ptent = pte_swp_clear_soft_dirty(ptent);
1076		set_pte_at(vma->vm_mm, addr, pte, ptent);
1077	}
1078}
1079#else
1080static inline void clear_soft_dirty(struct vm_area_struct *vma,
1081		unsigned long addr, pte_t *pte)
1082{
1083}
1084#endif
1085
1086#if defined(CONFIG_MEM_SOFT_DIRTY) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
1087static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
1088		unsigned long addr, pmd_t *pmdp)
1089{
1090	pmd_t old, pmd = *pmdp;
1091
1092	if (pmd_present(pmd)) {
1093		/* See comment in change_huge_pmd() */
1094		old = pmdp_invalidate(vma, addr, pmdp);
1095		if (pmd_dirty(old))
1096			pmd = pmd_mkdirty(pmd);
1097		if (pmd_young(old))
1098			pmd = pmd_mkyoung(pmd);
1099
1100		pmd = pmd_wrprotect(pmd);
1101		pmd = pmd_clear_soft_dirty(pmd);
1102
1103		set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
1104	} else if (is_migration_entry(pmd_to_swp_entry(pmd))) {
1105		pmd = pmd_swp_clear_soft_dirty(pmd);
1106		set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
1107	}
1108}
1109#else
1110static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
1111		unsigned long addr, pmd_t *pmdp)
1112{
1113}
1114#endif
1115
1116static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
1117				unsigned long end, struct mm_walk *walk)
1118{
1119	struct clear_refs_private *cp = walk->private;
1120	struct vm_area_struct *vma = walk->vma;
1121	pte_t *pte, ptent;
1122	spinlock_t *ptl;
1123	struct page *page;
1124
1125	ptl = pmd_trans_huge_lock(pmd, vma);
1126	if (ptl) {
1127		if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
1128			clear_soft_dirty_pmd(vma, addr, pmd);
1129			goto out;
1130		}
1131
1132		if (!pmd_present(*pmd))
1133			goto out;
1134
1135		page = pmd_page(*pmd);
1136
1137		/* Clear accessed and referenced bits. */
1138		pmdp_test_and_clear_young(vma, addr, pmd);
1139		test_and_clear_page_young(page);
1140		ClearPageReferenced(page);
1141out:
1142		spin_unlock(ptl);
1143		return 0;
1144	}
1145
1146	if (pmd_trans_unstable(pmd))
1147		return 0;
1148
1149	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
1150	for (; addr != end; pte++, addr += PAGE_SIZE) {
1151		ptent = *pte;
1152
1153		if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
1154			clear_soft_dirty(vma, addr, pte);
1155			continue;
1156		}
1157
1158		if (!pte_present(ptent))
1159			continue;
1160
1161		page = vm_normal_page(vma, addr, ptent);
1162		if (!page)
1163			continue;
1164
1165		/* Clear accessed and referenced bits. */
1166		ptep_test_and_clear_young(vma, addr, pte);
1167		test_and_clear_page_young(page);
1168		ClearPageReferenced(page);
1169	}
1170	pte_unmap_unlock(pte - 1, ptl);
1171	cond_resched();
1172	return 0;
1173}
1174
1175static int clear_refs_test_walk(unsigned long start, unsigned long end,
1176				struct mm_walk *walk)
1177{
1178	struct clear_refs_private *cp = walk->private;
1179	struct vm_area_struct *vma = walk->vma;
1180
1181	if (vma->vm_flags & VM_PFNMAP)
1182		return 1;
1183
1184	/*
1185	 * Writing 1 to /proc/pid/clear_refs affects all pages.
1186	 * Writing 2 to /proc/pid/clear_refs only affects anonymous pages.
1187	 * Writing 3 to /proc/pid/clear_refs only affects file mapped pages.
1188	 * Writing 4 to /proc/pid/clear_refs affects all pages.
1189	 */
1190	if (cp->type == CLEAR_REFS_ANON && vma->vm_file)
1191		return 1;
1192	if (cp->type == CLEAR_REFS_MAPPED && !vma->vm_file)
1193		return 1;
1194	return 0;
1195}
1196
1197static const struct mm_walk_ops clear_refs_walk_ops = {
1198	.pmd_entry		= clear_refs_pte_range,
1199	.test_walk		= clear_refs_test_walk,
1200};
1201
1202static ssize_t clear_refs_write(struct file *file, const char __user *buf,
1203				size_t count, loff_t *ppos)
1204{
1205	struct task_struct *task;
1206	char buffer[PROC_NUMBUF];
1207	struct mm_struct *mm;
1208	struct vm_area_struct *vma;
1209	enum clear_refs_types type;
1210	int itype;
1211	int rv;
1212
1213	memset(buffer, 0, sizeof(buffer));
1214	if (count > sizeof(buffer) - 1)
1215		count = sizeof(buffer) - 1;
1216	if (copy_from_user(buffer, buf, count))
1217		return -EFAULT;
1218	rv = kstrtoint(strstrip(buffer), 10, &itype);
1219	if (rv < 0)
1220		return rv;
1221	type = (enum clear_refs_types)itype;
1222	if (type < CLEAR_REFS_ALL || type >= CLEAR_REFS_LAST)
1223		return -EINVAL;
1224
1225	task = get_proc_task(file_inode(file));
1226	if (!task)
1227		return -ESRCH;
1228	mm = get_task_mm(task);
1229	if (mm) {
1230		struct mmu_notifier_range range;
1231		struct clear_refs_private cp = {
1232			.type = type,
1233		};
1234
1235		if (mmap_write_lock_killable(mm)) {
1236			count = -EINTR;
1237			goto out_mm;
1238		}
1239		if (type == CLEAR_REFS_MM_HIWATER_RSS) {
1240			/*
1241			 * Writing 5 to /proc/pid/clear_refs resets the peak
1242			 * resident set size to this mm's current rss value.
 
 
 
 
 
1243			 */
1244			reset_mm_hiwater_rss(mm);
1245			goto out_unlock;
 
 
 
 
1246		}
1247
1248		if (type == CLEAR_REFS_SOFT_DIRTY) {
1249			for (vma = mm->mmap; vma; vma = vma->vm_next) {
1250				if (!(vma->vm_flags & VM_SOFTDIRTY))
1251					continue;
1252				vma->vm_flags &= ~VM_SOFTDIRTY;
1253				vma_set_page_prot(vma);
1254			}
1255
1256			inc_tlb_flush_pending(mm);
1257			mmu_notifier_range_init(&range, MMU_NOTIFY_SOFT_DIRTY,
1258						0, NULL, mm, 0, -1UL);
1259			mmu_notifier_invalidate_range_start(&range);
1260		}
1261		walk_page_range(mm, 0, mm->highest_vm_end, &clear_refs_walk_ops,
1262				&cp);
1263		if (type == CLEAR_REFS_SOFT_DIRTY) {
1264			mmu_notifier_invalidate_range_end(&range);
1265			flush_tlb_mm(mm);
1266			dec_tlb_flush_pending(mm);
1267		}
1268out_unlock:
1269		mmap_write_unlock(mm);
1270out_mm:
1271		mmput(mm);
1272	}
1273	put_task_struct(task);
1274
1275	return count;
1276}
1277
1278const struct file_operations proc_clear_refs_operations = {
1279	.write		= clear_refs_write,
1280	.llseek		= noop_llseek,
1281};
1282
1283typedef struct {
1284	u64 pme;
1285} pagemap_entry_t;
1286
1287struct pagemapread {
1288	int pos, len;		/* units: PM_ENTRY_BYTES, not bytes */
1289	pagemap_entry_t *buffer;
1290	bool show_pfn;
1291};
1292
1293#define PAGEMAP_WALK_SIZE	(PMD_SIZE)
1294#define PAGEMAP_WALK_MASK	(PMD_MASK)
1295
1296#define PM_ENTRY_BYTES		sizeof(pagemap_entry_t)
1297#define PM_PFRAME_BITS		55
1298#define PM_PFRAME_MASK		GENMASK_ULL(PM_PFRAME_BITS - 1, 0)
1299#define PM_SOFT_DIRTY		BIT_ULL(55)
1300#define PM_MMAP_EXCLUSIVE	BIT_ULL(56)
1301#define PM_UFFD_WP		BIT_ULL(57)
1302#define PM_FILE			BIT_ULL(61)
1303#define PM_SWAP			BIT_ULL(62)
1304#define PM_PRESENT		BIT_ULL(63)
1305
 
 
1306#define PM_END_OF_BUFFER    1
1307
1308static inline pagemap_entry_t make_pme(u64 frame, u64 flags)
1309{
1310	return (pagemap_entry_t) { .pme = (frame & PM_PFRAME_MASK) | flags };
1311}
1312
1313static int add_to_pagemap(unsigned long addr, pagemap_entry_t *pme,
1314			  struct pagemapread *pm)
1315{
1316	pm->buffer[pm->pos++] = *pme;
1317	if (pm->pos >= pm->len)
1318		return PM_END_OF_BUFFER;
1319	return 0;
1320}
1321
1322static int pagemap_pte_hole(unsigned long start, unsigned long end,
1323			    __always_unused int depth, struct mm_walk *walk)
1324{
1325	struct pagemapread *pm = walk->private;
1326	unsigned long addr = start;
1327	int err = 0;
1328
1329	while (addr < end) {
1330		struct vm_area_struct *vma = find_vma(walk->mm, addr);
1331		pagemap_entry_t pme = make_pme(0, 0);
1332		/* End of address space hole, which we mark as non-present. */
1333		unsigned long hole_end;
1334
1335		if (vma)
1336			hole_end = min(end, vma->vm_start);
1337		else
1338			hole_end = end;
1339
1340		for (; addr < hole_end; addr += PAGE_SIZE) {
1341			err = add_to_pagemap(addr, &pme, pm);
1342			if (err)
1343				goto out;
1344		}
1345
1346		if (!vma)
1347			break;
1348
1349		/* Addresses in the VMA. */
1350		if (vma->vm_flags & VM_SOFTDIRTY)
1351			pme = make_pme(0, PM_SOFT_DIRTY);
1352		for (; addr < min(end, vma->vm_end); addr += PAGE_SIZE) {
1353			err = add_to_pagemap(addr, &pme, pm);
1354			if (err)
1355				goto out;
1356		}
1357	}
1358out:
1359	return err;
1360}
1361
1362static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm,
1363		struct vm_area_struct *vma, unsigned long addr, pte_t pte)
1364{
1365	u64 frame = 0, flags = 0;
1366	struct page *page = NULL;
 
1367
1368	if (pte_present(pte)) {
1369		if (pm->show_pfn)
1370			frame = pte_pfn(pte);
1371		flags |= PM_PRESENT;
1372		page = vm_normal_page(vma, addr, pte);
1373		if (pte_soft_dirty(pte))
1374			flags |= PM_SOFT_DIRTY;
1375		if (pte_uffd_wp(pte))
1376			flags |= PM_UFFD_WP;
1377	} else if (is_swap_pte(pte)) {
1378		swp_entry_t entry;
1379		if (pte_swp_soft_dirty(pte))
1380			flags |= PM_SOFT_DIRTY;
1381		if (pte_swp_uffd_wp(pte))
1382			flags |= PM_UFFD_WP;
1383		entry = pte_to_swp_entry(pte);
1384		if (pm->show_pfn)
1385			frame = swp_type(entry) |
1386				(swp_offset(entry) << MAX_SWAPFILES_SHIFT);
1387		flags |= PM_SWAP;
1388		if (is_pfn_swap_entry(entry))
1389			page = pfn_swap_entry_to_page(entry);
1390	}
1391
1392	if (page && !PageAnon(page))
1393		flags |= PM_FILE;
1394	if (page && page_mapcount(page) == 1)
1395		flags |= PM_MMAP_EXCLUSIVE;
1396	if (vma->vm_flags & VM_SOFTDIRTY)
1397		flags |= PM_SOFT_DIRTY;
1398
1399	return make_pme(frame, flags);
1400}
1401
1402static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
1403			     struct mm_walk *walk)
1404{
1405	struct vm_area_struct *vma = walk->vma;
1406	struct pagemapread *pm = walk->private;
1407	spinlock_t *ptl;
1408	pte_t *pte, *orig_pte;
1409	int err = 0;
1410
1411#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1412	ptl = pmd_trans_huge_lock(pmdp, vma);
1413	if (ptl) {
1414		u64 flags = 0, frame = 0;
1415		pmd_t pmd = *pmdp;
1416		struct page *page = NULL;
1417
1418		if (vma->vm_flags & VM_SOFTDIRTY)
1419			flags |= PM_SOFT_DIRTY;
1420
1421		if (pmd_present(pmd)) {
1422			page = pmd_page(pmd);
1423
1424			flags |= PM_PRESENT;
1425			if (pmd_soft_dirty(pmd))
1426				flags |= PM_SOFT_DIRTY;
1427			if (pmd_uffd_wp(pmd))
1428				flags |= PM_UFFD_WP;
1429			if (pm->show_pfn)
1430				frame = pmd_pfn(pmd) +
1431					((addr & ~PMD_MASK) >> PAGE_SHIFT);
1432		}
1433#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1434		else if (is_swap_pmd(pmd)) {
1435			swp_entry_t entry = pmd_to_swp_entry(pmd);
1436			unsigned long offset;
1437
1438			if (pm->show_pfn) {
1439				offset = swp_offset(entry) +
1440					((addr & ~PMD_MASK) >> PAGE_SHIFT);
1441				frame = swp_type(entry) |
1442					(offset << MAX_SWAPFILES_SHIFT);
1443			}
1444			flags |= PM_SWAP;
1445			if (pmd_swp_soft_dirty(pmd))
1446				flags |= PM_SOFT_DIRTY;
1447			if (pmd_swp_uffd_wp(pmd))
1448				flags |= PM_UFFD_WP;
1449			VM_BUG_ON(!is_pmd_migration_entry(pmd));
1450			page = pfn_swap_entry_to_page(entry);
1451		}
1452#endif
1453
1454		if (page && page_mapcount(page) == 1)
1455			flags |= PM_MMAP_EXCLUSIVE;
1456
1457		for (; addr != end; addr += PAGE_SIZE) {
1458			pagemap_entry_t pme = make_pme(frame, flags);
 
 
1459
1460			err = add_to_pagemap(addr, &pme, pm);
1461			if (err)
1462				break;
1463			if (pm->show_pfn) {
1464				if (flags & PM_PRESENT)
1465					frame++;
1466				else if (flags & PM_SWAP)
1467					frame += (1 << MAX_SWAPFILES_SHIFT);
1468			}
 
 
 
 
1469		}
1470		spin_unlock(ptl);
1471		return err;
1472	}
1473
1474	if (pmd_trans_unstable(pmdp))
1475		return 0;
1476#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1477
1478	/*
1479	 * We can assume that @vma always points to a valid one and @end never
1480	 * goes beyond vma->vm_end.
1481	 */
1482	orig_pte = pte = pte_offset_map_lock(walk->mm, pmdp, addr, &ptl);
1483	for (; addr < end; pte++, addr += PAGE_SIZE) {
1484		pagemap_entry_t pme;
1485
1486		pme = pte_to_pagemap_entry(pm, vma, addr, *pte);
1487		err = add_to_pagemap(addr, &pme, pm);
1488		if (err)
1489			break;
1490	}
1491	pte_unmap_unlock(orig_pte, ptl);
1492
1493	cond_resched();
1494
1495	return err;
1496}
1497
1498#ifdef CONFIG_HUGETLB_PAGE
 
 
 
 
 
 
 
 
 
1499/* This function walks within one hugetlb entry in the single call */
1500static int pagemap_hugetlb_range(pte_t *ptep, unsigned long hmask,
1501				 unsigned long addr, unsigned long end,
1502				 struct mm_walk *walk)
1503{
1504	struct pagemapread *pm = walk->private;
1505	struct vm_area_struct *vma = walk->vma;
1506	u64 flags = 0, frame = 0;
1507	int err = 0;
1508	pte_t pte;
1509
1510	if (vma->vm_flags & VM_SOFTDIRTY)
1511		flags |= PM_SOFT_DIRTY;
1512
1513	pte = huge_ptep_get(ptep);
1514	if (pte_present(pte)) {
1515		struct page *page = pte_page(pte);
1516
1517		if (!PageAnon(page))
1518			flags |= PM_FILE;
1519
1520		if (page_mapcount(page) == 1)
1521			flags |= PM_MMAP_EXCLUSIVE;
1522
1523		flags |= PM_PRESENT;
1524		if (pm->show_pfn)
1525			frame = pte_pfn(pte) +
1526				((addr & ~hmask) >> PAGE_SHIFT);
1527	}
1528
1529	for (; addr != end; addr += PAGE_SIZE) {
1530		pagemap_entry_t pme = make_pme(frame, flags);
1531
1532		err = add_to_pagemap(addr, &pme, pm);
1533		if (err)
1534			return err;
1535		if (pm->show_pfn && (flags & PM_PRESENT))
1536			frame++;
1537	}
1538
1539	cond_resched();
1540
1541	return err;
1542}
1543#else
1544#define pagemap_hugetlb_range	NULL
1545#endif /* HUGETLB_PAGE */
1546
1547static const struct mm_walk_ops pagemap_ops = {
1548	.pmd_entry	= pagemap_pmd_range,
1549	.pte_hole	= pagemap_pte_hole,
1550	.hugetlb_entry	= pagemap_hugetlb_range,
1551};
1552
1553/*
1554 * /proc/pid/pagemap - an array mapping virtual pages to pfns
1555 *
1556 * For each page in the address space, this file contains one 64-bit entry
1557 * consisting of the following:
1558 *
1559 * Bits 0-54  page frame number (PFN) if present
1560 * Bits 0-4   swap type if swapped
1561 * Bits 5-54  swap offset if swapped
1562 * Bit  55    pte is soft-dirty (see Documentation/admin-guide/mm/soft-dirty.rst)
1563 * Bit  56    page exclusively mapped
1564 * Bits 57-60 zero
1565 * Bit  61    page is file-page or shared-anon
1566 * Bit  62    page swapped
1567 * Bit  63    page present
1568 *
1569 * If the page is not present but in swap, then the PFN contains an
1570 * encoding of the swap file number and the page's offset into the
1571 * swap. Unmapped pages return a null PFN. This allows determining
1572 * precisely which pages are mapped (or in swap) and comparing mapped
1573 * pages between processes.
1574 *
1575 * Efficient users of this interface will use /proc/pid/maps to
1576 * determine which areas of memory are actually mapped and llseek to
1577 * skip over unmapped regions.
1578 */
 
 
1579static ssize_t pagemap_read(struct file *file, char __user *buf,
1580			    size_t count, loff_t *ppos)
1581{
1582	struct mm_struct *mm = file->private_data;
 
1583	struct pagemapread pm;
 
 
1584	unsigned long src;
1585	unsigned long svpfn;
1586	unsigned long start_vaddr;
1587	unsigned long end_vaddr;
1588	int ret = 0, copied = 0;
1589
1590	if (!mm || !mmget_not_zero(mm))
1591		goto out;
1592
1593	ret = -EINVAL;
1594	/* file position must be aligned */
1595	if ((*ppos % PM_ENTRY_BYTES) || (count % PM_ENTRY_BYTES))
1596		goto out_mm;
1597
1598	ret = 0;
1599	if (!count)
1600		goto out_mm;
1601
1602	/* do not disclose physical addresses: attack vector */
1603	pm.show_pfn = file_ns_capable(file, &init_user_ns, CAP_SYS_ADMIN);
1604
1605	pm.len = (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
1606	pm.buffer = kmalloc_array(pm.len, PM_ENTRY_BYTES, GFP_KERNEL);
1607	ret = -ENOMEM;
1608	if (!pm.buffer)
1609		goto out_mm;
 
 
 
 
 
 
 
 
 
 
 
 
 
1610
1611	src = *ppos;
1612	svpfn = src / PM_ENTRY_BYTES;
1613	end_vaddr = mm->task_size;
 
1614
1615	/* watch out for wraparound */
1616	start_vaddr = end_vaddr;
1617	if (svpfn <= (ULONG_MAX >> PAGE_SHIFT))
1618		start_vaddr = untagged_addr(svpfn << PAGE_SHIFT);
1619
1620	/* Ensure the address is inside the task */
1621	if (start_vaddr > mm->task_size)
1622		start_vaddr = end_vaddr;
1623
1624	/*
1625	 * The odds are that this will stop walking way
1626	 * before end_vaddr, because the length of the
1627	 * user buffer is tracked in "pm", and the walk
1628	 * will stop when we hit the end of the buffer.
1629	 */
1630	ret = 0;
1631	while (count && (start_vaddr < end_vaddr)) {
1632		int len;
1633		unsigned long end;
1634
1635		pm.pos = 0;
1636		end = (start_vaddr + PAGEMAP_WALK_SIZE) & PAGEMAP_WALK_MASK;
1637		/* overflow ? */
1638		if (end < start_vaddr || end > end_vaddr)
1639			end = end_vaddr;
1640		ret = mmap_read_lock_killable(mm);
1641		if (ret)
1642			goto out_free;
1643		ret = walk_page_range(mm, start_vaddr, end, &pagemap_ops, &pm);
1644		mmap_read_unlock(mm);
1645		start_vaddr = end;
1646
1647		len = min(count, PM_ENTRY_BYTES * pm.pos);
1648		if (copy_to_user(buf, pm.buffer, len)) {
1649			ret = -EFAULT;
1650			goto out_free;
1651		}
1652		copied += len;
1653		buf += len;
1654		count -= len;
1655	}
1656	*ppos += copied;
1657	if (!ret || ret == PM_END_OF_BUFFER)
1658		ret = copied;
1659
 
 
1660out_free:
1661	kfree(pm.buffer);
1662out_mm:
1663	mmput(mm);
1664out:
1665	return ret;
1666}
1667
1668static int pagemap_open(struct inode *inode, struct file *file)
1669{
1670	struct mm_struct *mm;
1671
1672	mm = proc_mem_open(inode, PTRACE_MODE_READ);
1673	if (IS_ERR(mm))
1674		return PTR_ERR(mm);
1675	file->private_data = mm;
1676	return 0;
1677}
1678
1679static int pagemap_release(struct inode *inode, struct file *file)
1680{
1681	struct mm_struct *mm = file->private_data;
1682
1683	if (mm)
1684		mmdrop(mm);
1685	return 0;
1686}
1687
1688const struct file_operations proc_pagemap_operations = {
1689	.llseek		= mem_lseek, /* borrow this */
1690	.read		= pagemap_read,
1691	.open		= pagemap_open,
1692	.release	= pagemap_release,
1693};
1694#endif /* CONFIG_PROC_PAGE_MONITOR */
1695
1696#ifdef CONFIG_NUMA
1697
1698struct numa_maps {
 
1699	unsigned long pages;
1700	unsigned long anon;
1701	unsigned long active;
1702	unsigned long writeback;
1703	unsigned long mapcount_max;
1704	unsigned long dirty;
1705	unsigned long swapcache;
1706	unsigned long node[MAX_NUMNODES];
1707};
1708
1709struct numa_maps_private {
1710	struct proc_maps_private proc_maps;
1711	struct numa_maps md;
1712};
1713
1714static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty,
1715			unsigned long nr_pages)
1716{
1717	int count = page_mapcount(page);
1718
1719	md->pages += nr_pages;
1720	if (pte_dirty || PageDirty(page))
1721		md->dirty += nr_pages;
1722
1723	if (PageSwapCache(page))
1724		md->swapcache += nr_pages;
1725
1726	if (PageActive(page) || PageUnevictable(page))
1727		md->active += nr_pages;
1728
1729	if (PageWriteback(page))
1730		md->writeback += nr_pages;
1731
1732	if (PageAnon(page))
1733		md->anon += nr_pages;
1734
1735	if (count > md->mapcount_max)
1736		md->mapcount_max = count;
1737
1738	md->node[page_to_nid(page)] += nr_pages;
1739}
1740
1741static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma,
1742		unsigned long addr)
1743{
1744	struct page *page;
1745	int nid;
1746
1747	if (!pte_present(pte))
1748		return NULL;
1749
1750	page = vm_normal_page(vma, addr, pte);
1751	if (!page)
1752		return NULL;
1753
1754	if (PageReserved(page))
1755		return NULL;
1756
1757	nid = page_to_nid(page);
1758	if (!node_isset(nid, node_states[N_MEMORY]))
1759		return NULL;
1760
1761	return page;
1762}
1763
1764#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1765static struct page *can_gather_numa_stats_pmd(pmd_t pmd,
1766					      struct vm_area_struct *vma,
1767					      unsigned long addr)
1768{
1769	struct page *page;
1770	int nid;
1771
1772	if (!pmd_present(pmd))
1773		return NULL;
1774
1775	page = vm_normal_page_pmd(vma, addr, pmd);
1776	if (!page)
1777		return NULL;
1778
1779	if (PageReserved(page))
1780		return NULL;
1781
1782	nid = page_to_nid(page);
1783	if (!node_isset(nid, node_states[N_MEMORY]))
1784		return NULL;
1785
1786	return page;
1787}
1788#endif
1789
1790static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
1791		unsigned long end, struct mm_walk *walk)
1792{
1793	struct numa_maps *md = walk->private;
1794	struct vm_area_struct *vma = walk->vma;
1795	spinlock_t *ptl;
1796	pte_t *orig_pte;
1797	pte_t *pte;
1798
1799#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1800	ptl = pmd_trans_huge_lock(pmd, vma);
1801	if (ptl) {
1802		struct page *page;
1803
1804		page = can_gather_numa_stats_pmd(*pmd, vma, addr);
1805		if (page)
1806			gather_stats(page, md, pmd_dirty(*pmd),
1807				     HPAGE_PMD_SIZE/PAGE_SIZE);
1808		spin_unlock(ptl);
1809		return 0;
 
 
 
 
 
 
 
 
1810	}
1811
1812	if (pmd_trans_unstable(pmd))
1813		return 0;
1814#endif
1815	orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
1816	do {
1817		struct page *page = can_gather_numa_stats(*pte, vma, addr);
1818		if (!page)
1819			continue;
1820		gather_stats(page, md, pte_dirty(*pte), 1);
1821
1822	} while (pte++, addr += PAGE_SIZE, addr != end);
1823	pte_unmap_unlock(orig_pte, ptl);
1824	cond_resched();
1825	return 0;
1826}
1827#ifdef CONFIG_HUGETLB_PAGE
1828static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
1829		unsigned long addr, unsigned long end, struct mm_walk *walk)
1830{
1831	pte_t huge_pte = huge_ptep_get(pte);
1832	struct numa_maps *md;
1833	struct page *page;
1834
1835	if (!pte_present(huge_pte))
1836		return 0;
1837
1838	page = pte_page(huge_pte);
1839	if (!page)
1840		return 0;
1841
1842	md = walk->private;
1843	gather_stats(page, md, pte_dirty(huge_pte), 1);
1844	return 0;
1845}
1846
1847#else
1848static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
1849		unsigned long addr, unsigned long end, struct mm_walk *walk)
1850{
1851	return 0;
1852}
1853#endif
1854
1855static const struct mm_walk_ops show_numa_ops = {
1856	.hugetlb_entry = gather_hugetlb_stats,
1857	.pmd_entry = gather_pte_stats,
1858};
1859
1860/*
1861 * Display pages allocated per node and memory policy via /proc.
1862 */
1863static int show_numa_map(struct seq_file *m, void *v)
1864{
1865	struct numa_maps_private *numa_priv = m->private;
1866	struct proc_maps_private *proc_priv = &numa_priv->proc_maps;
1867	struct vm_area_struct *vma = v;
1868	struct numa_maps *md = &numa_priv->md;
1869	struct file *file = vma->vm_file;
1870	struct mm_struct *mm = vma->vm_mm;
 
1871	struct mempolicy *pol;
1872	char buffer[64];
1873	int nid;
1874
1875	if (!mm)
1876		return 0;
1877
1878	/* Ensure we start with an empty set of numa_maps statistics. */
1879	memset(md, 0, sizeof(*md));
1880
1881	pol = __get_vma_policy(vma, vma->vm_start);
1882	if (pol) {
1883		mpol_to_str(buffer, sizeof(buffer), pol);
1884		mpol_cond_put(pol);
1885	} else {
1886		mpol_to_str(buffer, sizeof(buffer), proc_priv->task_mempolicy);
1887	}
 
 
 
1888
1889	seq_printf(m, "%08lx %s", vma->vm_start, buffer);
1890
1891	if (file) {
1892		seq_puts(m, " file=");
1893		seq_file_path(m, file, "\n\t= ");
1894	} else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
1895		seq_puts(m, " heap");
1896	} else if (is_stack(vma)) {
1897		seq_puts(m, " stack");
 
1898	}
1899
1900	if (is_vm_hugetlb_page(vma))
1901		seq_puts(m, " huge");
1902
1903	/* mmap_lock is held by m_start */
1904	walk_page_vma(vma, &show_numa_ops, md);
1905
1906	if (!md->pages)
1907		goto out;
1908
1909	if (md->anon)
1910		seq_printf(m, " anon=%lu", md->anon);
1911
1912	if (md->dirty)
1913		seq_printf(m, " dirty=%lu", md->dirty);
1914
1915	if (md->pages != md->anon && md->pages != md->dirty)
1916		seq_printf(m, " mapped=%lu", md->pages);
1917
1918	if (md->mapcount_max > 1)
1919		seq_printf(m, " mapmax=%lu", md->mapcount_max);
1920
1921	if (md->swapcache)
1922		seq_printf(m, " swapcache=%lu", md->swapcache);
1923
1924	if (md->active < md->pages && !is_vm_hugetlb_page(vma))
1925		seq_printf(m, " active=%lu", md->active);
1926
1927	if (md->writeback)
1928		seq_printf(m, " writeback=%lu", md->writeback);
1929
1930	for_each_node_state(nid, N_MEMORY)
1931		if (md->node[nid])
1932			seq_printf(m, " N%d=%lu", nid, md->node[nid]);
1933
1934	seq_printf(m, " kernelpagesize_kB=%lu", vma_kernel_pagesize(vma) >> 10);
1935out:
1936	seq_putc(m, '\n');
 
 
 
1937	return 0;
1938}
1939
1940static const struct seq_operations proc_pid_numa_maps_op = {
1941	.start  = m_start,
1942	.next   = m_next,
1943	.stop   = m_stop,
1944	.show   = show_numa_map,
1945};
1946
1947static int pid_numa_maps_open(struct inode *inode, struct file *file)
1948{
1949	return proc_maps_open(inode, file, &proc_pid_numa_maps_op,
1950				sizeof(struct numa_maps_private));
 
 
 
 
 
 
 
 
 
 
 
 
1951}
1952
1953const struct file_operations proc_pid_numa_maps_operations = {
1954	.open		= pid_numa_maps_open,
1955	.read		= seq_read,
1956	.llseek		= seq_lseek,
1957	.release	= proc_map_release,
1958};
1959
1960#endif /* CONFIG_NUMA */