Linux Audio

Check our new training course

Loading...
v3.1
   1#include <linux/mm.h>
 
 
   2#include <linux/hugetlb.h>
   3#include <linux/huge_mm.h>
   4#include <linux/mount.h>
   5#include <linux/seq_file.h>
   6#include <linux/highmem.h>
   7#include <linux/ptrace.h>
   8#include <linux/slab.h>
   9#include <linux/pagemap.h>
  10#include <linux/mempolicy.h>
  11#include <linux/rmap.h>
  12#include <linux/swap.h>
 
  13#include <linux/swapops.h>
 
 
 
 
 
  14
  15#include <asm/elf.h>
  16#include <asm/uaccess.h>
  17#include <asm/tlbflush.h>
  18#include "internal.h"
  19
 
 
  20void task_mem(struct seq_file *m, struct mm_struct *mm)
  21{
  22	unsigned long data, text, lib, swap;
  23	unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;
  24
 
 
 
 
  25	/*
  26	 * Note: to minimize their overhead, mm maintains hiwater_vm and
  27	 * hiwater_rss only when about to *lower* total_vm or rss.  Any
  28	 * collector of these hiwater stats must therefore get total_vm
  29	 * and rss too, which will usually be the higher.  Barriers? not
  30	 * worth the effort, such snapshots can always be inconsistent.
  31	 */
  32	hiwater_vm = total_vm = mm->total_vm;
  33	if (hiwater_vm < mm->hiwater_vm)
  34		hiwater_vm = mm->hiwater_vm;
  35	hiwater_rss = total_rss = get_mm_rss(mm);
  36	if (hiwater_rss < mm->hiwater_rss)
  37		hiwater_rss = mm->hiwater_rss;
  38
  39	data = mm->total_vm - mm->shared_vm - mm->stack_vm;
  40	text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10;
  41	lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text;
 
 
  42	swap = get_mm_counter(mm, MM_SWAPENTS);
  43	seq_printf(m,
  44		"VmPeak:\t%8lu kB\n"
  45		"VmSize:\t%8lu kB\n"
  46		"VmLck:\t%8lu kB\n"
  47		"VmHWM:\t%8lu kB\n"
  48		"VmRSS:\t%8lu kB\n"
  49		"VmData:\t%8lu kB\n"
  50		"VmStk:\t%8lu kB\n"
  51		"VmExe:\t%8lu kB\n"
  52		"VmLib:\t%8lu kB\n"
  53		"VmPTE:\t%8lu kB\n"
  54		"VmSwap:\t%8lu kB\n",
  55		hiwater_vm << (PAGE_SHIFT-10),
  56		(total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
  57		mm->locked_vm << (PAGE_SHIFT-10),
  58		hiwater_rss << (PAGE_SHIFT-10),
  59		total_rss << (PAGE_SHIFT-10),
  60		data << (PAGE_SHIFT-10),
  61		mm->stack_vm << (PAGE_SHIFT-10), text, lib,
  62		(PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
  63		swap << (PAGE_SHIFT-10));
  64}
 
  65
  66unsigned long task_vsize(struct mm_struct *mm)
  67{
  68	return PAGE_SIZE * mm->total_vm;
  69}
  70
  71unsigned long task_statm(struct mm_struct *mm,
  72			 unsigned long *shared, unsigned long *text,
  73			 unsigned long *data, unsigned long *resident)
  74{
  75	*shared = get_mm_counter(mm, MM_FILEPAGES);
 
  76	*text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
  77								>> PAGE_SHIFT;
  78	*data = mm->total_vm - mm->shared_vm;
  79	*resident = *shared + get_mm_counter(mm, MM_ANONPAGES);
  80	return mm->total_vm;
  81}
  82
  83static void pad_len_spaces(struct seq_file *m, int len)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  84{
  85	len = 25 + sizeof(void*) * 6 - len;
  86	if (len < 1)
  87		len = 1;
  88	seq_printf(m, "%*c", len, ' ');
  89}
 
 
 
 
  90
  91static void vma_stop(struct proc_maps_private *priv, struct vm_area_struct *vma)
 
  92{
  93	if (vma && vma != priv->tail_vma) {
  94		struct mm_struct *mm = vma->vm_mm;
  95		up_read(&mm->mmap_sem);
  96		mmput(mm);
 
 
 
  97	}
 
 
  98}
  99
 100static void *m_start(struct seq_file *m, loff_t *pos)
 101{
 102	struct proc_maps_private *priv = m->private;
 103	unsigned long last_addr = m->version;
 104	struct mm_struct *mm;
 105	struct vm_area_struct *vma, *tail_vma = NULL;
 106	loff_t l = *pos;
 107
 108	/* Clear the per syscall fields in priv */
 109	priv->task = NULL;
 110	priv->tail_vma = NULL;
 111
 112	/*
 113	 * We remember last_addr rather than next_addr to hit with
 114	 * mmap_cache most of the time. We have zero last_addr at
 115	 * the beginning and also after lseek. We will have -1 last_addr
 116	 * after the end of the vmas.
 117	 */
 118
 
 119	if (last_addr == -1UL)
 120		return NULL;
 121
 122	priv->task = get_pid_task(priv->pid, PIDTYPE_PID);
 123	if (!priv->task)
 124		return ERR_PTR(-ESRCH);
 125
 126	mm = mm_for_maps(priv->task);
 127	if (!mm || IS_ERR(mm))
 128		return mm;
 129	down_read(&mm->mmap_sem);
 130
 131	tail_vma = get_gate_vma(priv->task->mm);
 132	priv->tail_vma = tail_vma;
 133
 134	/* Start with last addr hint */
 135	vma = find_vma(mm, last_addr);
 136	if (last_addr && vma) {
 137		vma = vma->vm_next;
 138		goto out;
 139	}
 140
 141	/*
 142	 * Check the vma index is within the range and do
 143	 * sequential scan until m_index.
 144	 */
 145	vma = NULL;
 146	if ((unsigned long)l < mm->map_count) {
 147		vma = mm->mmap;
 148		while (l-- && vma)
 149			vma = vma->vm_next;
 150		goto out;
 151	}
 152
 153	if (l != mm->map_count)
 154		tail_vma = NULL; /* After gate vma */
 
 
 155
 156out:
 157	if (vma)
 158		return vma;
 159
 160	/* End of vmas has been reached */
 161	m->version = (tail_vma != NULL)? 0: -1UL;
 162	up_read(&mm->mmap_sem);
 163	mmput(mm);
 164	return tail_vma;
 
 
 165}
 166
 167static void *m_next(struct seq_file *m, void *v, loff_t *pos)
 168{
 169	struct proc_maps_private *priv = m->private;
 170	struct vm_area_struct *vma = v;
 171	struct vm_area_struct *tail_vma = priv->tail_vma;
 
 
 172
 173	(*pos)++;
 174	if (vma && (vma != tail_vma) && vma->vm_next)
 175		return vma->vm_next;
 176	vma_stop(priv, vma);
 177	return (vma != tail_vma)? tail_vma: NULL;
 178}
 179
 180static void m_stop(struct seq_file *m, void *v)
 
 181{
 182	struct proc_maps_private *priv = m->private;
 183	struct vm_area_struct *vma = v;
 184
 185	if (!IS_ERR(vma))
 186		vma_stop(priv, vma);
 187	if (priv->task)
 188		put_task_struct(priv->task);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 189}
 190
 191static int do_maps_open(struct inode *inode, struct file *file,
 192			const struct seq_operations *ops)
 193{
 194	struct proc_maps_private *priv;
 195	int ret = -ENOMEM;
 196	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
 197	if (priv) {
 198		priv->pid = proc_pid(inode);
 199		ret = seq_open(file, ops);
 200		if (!ret) {
 201			struct seq_file *m = file->private_data;
 202			m->private = priv;
 203		} else {
 204			kfree(priv);
 205		}
 206	}
 207	return ret;
 208}
 209
 210static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
 
 
 
 
 211{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 212	struct mm_struct *mm = vma->vm_mm;
 213	struct file *file = vma->vm_file;
 214	vm_flags_t flags = vma->vm_flags;
 215	unsigned long ino = 0;
 216	unsigned long long pgoff = 0;
 217	unsigned long start, end;
 218	dev_t dev = 0;
 219	int len;
 220
 221	if (file) {
 222		struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
 223		dev = inode->i_sb->s_dev;
 224		ino = inode->i_ino;
 225		pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
 226	}
 227
 228	/* We don't show the stack guard page in /proc/maps */
 229	start = vma->vm_start;
 230	if (stack_guard_page_start(vma, start))
 231		start += PAGE_SIZE;
 232	end = vma->vm_end;
 233	if (stack_guard_page_end(vma, end))
 234		end -= PAGE_SIZE;
 235
 236	seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
 237			start,
 238			end,
 239			flags & VM_READ ? 'r' : '-',
 240			flags & VM_WRITE ? 'w' : '-',
 241			flags & VM_EXEC ? 'x' : '-',
 242			flags & VM_MAYSHARE ? 's' : 'p',
 243			pgoff,
 244			MAJOR(dev), MINOR(dev), ino, &len);
 245
 246	/*
 247	 * Print the dentry name for named mappings, and a
 248	 * special [heap] marker for the heap:
 249	 */
 250	if (file) {
 251		pad_len_spaces(m, len);
 252		seq_path(m, &file->f_path, "\n");
 253	} else {
 254		const char *name = arch_vma_name(vma);
 255		if (!name) {
 256			if (mm) {
 257				if (vma->vm_start <= mm->brk &&
 258						vma->vm_end >= mm->start_brk) {
 259					name = "[heap]";
 260				} else if (vma->vm_start <= mm->start_stack &&
 261					   vma->vm_end >= mm->start_stack) {
 262					name = "[stack]";
 263				}
 264			} else {
 265				name = "[vdso]";
 266			}
 
 
 
 
 
 
 
 267		}
 268		if (name) {
 269			pad_len_spaces(m, len);
 270			seq_puts(m, name);
 
 
 
 
 
 
 
 
 
 
 
 
 271		}
 272	}
 
 
 
 
 
 
 273	seq_putc(m, '\n');
 274}
 275
 276static int show_map(struct seq_file *m, void *v)
 277{
 278	struct vm_area_struct *vma = v;
 279	struct proc_maps_private *priv = m->private;
 280	struct task_struct *task = priv->task;
 281
 282	show_map_vma(m, vma);
 283
 284	if (m->count < m->size)  /* vma is copied successfully */
 285		m->version = (vma != get_gate_vma(task->mm))
 286			? vma->vm_start : 0;
 287	return 0;
 288}
 289
 290static const struct seq_operations proc_pid_maps_op = {
 291	.start	= m_start,
 292	.next	= m_next,
 293	.stop	= m_stop,
 294	.show	= show_map
 295};
 296
 297static int maps_open(struct inode *inode, struct file *file)
 298{
 299	return do_maps_open(inode, file, &proc_pid_maps_op);
 300}
 301
 302const struct file_operations proc_maps_operations = {
 303	.open		= maps_open,
 304	.read		= seq_read,
 305	.llseek		= seq_lseek,
 306	.release	= seq_release_private,
 307};
 308
 309/*
 310 * Proportional Set Size(PSS): my share of RSS.
 311 *
 312 * PSS of a process is the count of pages it has in memory, where each
 313 * page is divided by the number of processes sharing it.  So if a
 314 * process has 1000 pages all to itself, and 1000 shared with one other
 315 * process, its PSS will be 1500.
 316 *
 317 * To keep (accumulated) division errors low, we adopt a 64bit
 318 * fixed-point pss counter to minimize division errors. So (pss >>
 319 * PSS_SHIFT) would be the real byte count.
 320 *
 321 * A shift of 12 before division means (assuming 4K page size):
 322 * 	- 1M 3-user-pages add up to 8KB errors;
 323 * 	- supports mapcount up to 2^24, or 16M;
 324 * 	- supports PSS up to 2^52 bytes, or 4PB.
 325 */
 326#define PSS_SHIFT 12
 327
 328#ifdef CONFIG_PROC_PAGE_MONITOR
 329struct mem_size_stats {
 330	struct vm_area_struct *vma;
 331	unsigned long resident;
 332	unsigned long shared_clean;
 333	unsigned long shared_dirty;
 334	unsigned long private_clean;
 335	unsigned long private_dirty;
 336	unsigned long referenced;
 337	unsigned long anonymous;
 
 338	unsigned long anonymous_thp;
 
 
 339	unsigned long swap;
 
 
 340	u64 pss;
 
 
 
 
 
 
 341};
 342
 
 
 
 
 
 343
 344static void smaps_pte_entry(pte_t ptent, unsigned long addr,
 345		unsigned long ptent_size, struct mm_walk *walk)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 346{
 347	struct mem_size_stats *mss = walk->private;
 348	struct vm_area_struct *vma = mss->vma;
 349	struct page *page;
 350	int mapcount;
 351
 352	if (is_swap_pte(ptent)) {
 353		mss->swap += ptent_size;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 354		return;
 355	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 356
 357	if (!pte_present(ptent))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 358		return;
 
 359
 360	page = vm_normal_page(vma, addr, ptent);
 361	if (!page)
 362		return;
 363
 364	if (PageAnon(page))
 365		mss->anonymous += ptent_size;
 366
 367	mss->resident += ptent_size;
 368	/* Accumulate the size in pages that have been accessed. */
 369	if (pte_young(ptent) || PageReferenced(page))
 370		mss->referenced += ptent_size;
 371	mapcount = page_mapcount(page);
 372	if (mapcount >= 2) {
 373		if (pte_dirty(ptent) || PageDirty(page))
 374			mss->shared_dirty += ptent_size;
 375		else
 376			mss->shared_clean += ptent_size;
 377		mss->pss += (ptent_size << PSS_SHIFT) / mapcount;
 378	} else {
 379		if (pte_dirty(ptent) || PageDirty(page))
 380			mss->private_dirty += ptent_size;
 381		else
 382			mss->private_clean += ptent_size;
 383		mss->pss += (ptent_size << PSS_SHIFT);
 
 
 
 384	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 385}
 
 386
 387static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
 388			   struct mm_walk *walk)
 389{
 390	struct mem_size_stats *mss = walk->private;
 391	struct vm_area_struct *vma = mss->vma;
 392	pte_t *pte;
 393	spinlock_t *ptl;
 394
 395	spin_lock(&walk->mm->page_table_lock);
 396	if (pmd_trans_huge(*pmd)) {
 397		if (pmd_trans_splitting(*pmd)) {
 398			spin_unlock(&walk->mm->page_table_lock);
 399			wait_split_huge_page(vma->anon_vma, pmd);
 400		} else {
 401			smaps_pte_entry(*(pte_t *)pmd, addr,
 402					HPAGE_PMD_SIZE, walk);
 403			spin_unlock(&walk->mm->page_table_lock);
 404			mss->anonymous_thp += HPAGE_PMD_SIZE;
 405			return 0;
 406		}
 407	} else {
 408		spin_unlock(&walk->mm->page_table_lock);
 409	}
 
 
 
 410	/*
 411	 * The mmap_sem held all the way back in m_start() is what
 412	 * keeps khugepaged out of here and from collapsing things
 413	 * in here.
 414	 */
 415	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
 416	for (; addr != end; pte++, addr += PAGE_SIZE)
 417		smaps_pte_entry(*pte, addr, PAGE_SIZE, walk);
 418	pte_unmap_unlock(pte - 1, ptl);
 
 419	cond_resched();
 420	return 0;
 421}
 422
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 423static int show_smap(struct seq_file *m, void *v)
 424{
 425	struct proc_maps_private *priv = m->private;
 426	struct task_struct *task = priv->task;
 427	struct vm_area_struct *vma = v;
 428	struct mem_size_stats mss;
 429	struct mm_walk smaps_walk = {
 430		.pmd_entry = smaps_pte_range,
 431		.mm = vma->vm_mm,
 432		.private = &mss,
 433	};
 434
 435	memset(&mss, 0, sizeof mss);
 436	mss.vma = vma;
 437	/* mmap_sem is held in m_start */
 438	if (vma->vm_mm && !is_vm_hugetlb_page(vma))
 439		walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
 440
 441	show_map_vma(m, vma);
 442
 443	seq_printf(m,
 444		   "Size:           %8lu kB\n"
 445		   "Rss:            %8lu kB\n"
 446		   "Pss:            %8lu kB\n"
 447		   "Shared_Clean:   %8lu kB\n"
 448		   "Shared_Dirty:   %8lu kB\n"
 449		   "Private_Clean:  %8lu kB\n"
 450		   "Private_Dirty:  %8lu kB\n"
 451		   "Referenced:     %8lu kB\n"
 452		   "Anonymous:      %8lu kB\n"
 453		   "AnonHugePages:  %8lu kB\n"
 454		   "Swap:           %8lu kB\n"
 455		   "KernelPageSize: %8lu kB\n"
 456		   "MMUPageSize:    %8lu kB\n"
 457		   "Locked:         %8lu kB\n",
 458		   (vma->vm_end - vma->vm_start) >> 10,
 459		   mss.resident >> 10,
 460		   (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
 461		   mss.shared_clean  >> 10,
 462		   mss.shared_dirty  >> 10,
 463		   mss.private_clean >> 10,
 464		   mss.private_dirty >> 10,
 465		   mss.referenced >> 10,
 466		   mss.anonymous >> 10,
 467		   mss.anonymous_thp >> 10,
 468		   mss.swap >> 10,
 469		   vma_kernel_pagesize(vma) >> 10,
 470		   vma_mmu_pagesize(vma) >> 10,
 471		   (vma->vm_flags & VM_LOCKED) ?
 472			(unsigned long)(mss.pss >> (10 + PSS_SHIFT)) : 0);
 473
 474	if (m->count < m->size)  /* vma is copied successfully */
 475		m->version = (vma != get_gate_vma(task->mm))
 476			? vma->vm_start : 0;
 477	return 0;
 478}
 479
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 480static const struct seq_operations proc_pid_smaps_op = {
 481	.start	= m_start,
 482	.next	= m_next,
 483	.stop	= m_stop,
 484	.show	= show_smap
 485};
 486
 487static int smaps_open(struct inode *inode, struct file *file)
 488{
 489	return do_maps_open(inode, file, &proc_pid_smaps_op);
 490}
 491
 492const struct file_operations proc_smaps_operations = {
 493	.open		= smaps_open,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 494	.read		= seq_read,
 495	.llseek		= seq_lseek,
 496	.release	= seq_release_private,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 497};
 498
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 499static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
 500				unsigned long end, struct mm_walk *walk)
 501{
 502	struct vm_area_struct *vma = walk->private;
 
 503	pte_t *pte, ptent;
 504	spinlock_t *ptl;
 505	struct page *page;
 506
 507	split_huge_page_pmd(walk->mm, pmd);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 508
 509	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
 510	for (; addr != end; pte++, addr += PAGE_SIZE) {
 511		ptent = *pte;
 
 
 
 
 
 
 512		if (!pte_present(ptent))
 513			continue;
 514
 515		page = vm_normal_page(vma, addr, ptent);
 516		if (!page)
 517			continue;
 518
 519		/* Clear accessed and referenced bits. */
 520		ptep_test_and_clear_young(vma, addr, pte);
 
 521		ClearPageReferenced(page);
 522	}
 523	pte_unmap_unlock(pte - 1, ptl);
 524	cond_resched();
 525	return 0;
 526}
 527
 528#define CLEAR_REFS_ALL 1
 529#define CLEAR_REFS_ANON 2
 530#define CLEAR_REFS_MAPPED 3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 531
 532static ssize_t clear_refs_write(struct file *file, const char __user *buf,
 533				size_t count, loff_t *ppos)
 534{
 535	struct task_struct *task;
 536	char buffer[PROC_NUMBUF];
 537	struct mm_struct *mm;
 538	struct vm_area_struct *vma;
 539	int type;
 
 540	int rv;
 541
 542	memset(buffer, 0, sizeof(buffer));
 543	if (count > sizeof(buffer) - 1)
 544		count = sizeof(buffer) - 1;
 545	if (copy_from_user(buffer, buf, count))
 546		return -EFAULT;
 547	rv = kstrtoint(strstrip(buffer), 10, &type);
 548	if (rv < 0)
 549		return rv;
 550	if (type < CLEAR_REFS_ALL || type > CLEAR_REFS_MAPPED)
 
 551		return -EINVAL;
 552	task = get_proc_task(file->f_path.dentry->d_inode);
 
 553	if (!task)
 554		return -ESRCH;
 555	mm = get_task_mm(task);
 556	if (mm) {
 557		struct mm_walk clear_refs_walk = {
 558			.pmd_entry = clear_refs_pte_range,
 559			.mm = mm,
 
 560		};
 561		down_read(&mm->mmap_sem);
 562		for (vma = mm->mmap; vma; vma = vma->vm_next) {
 563			clear_refs_walk.private = vma;
 564			if (is_vm_hugetlb_page(vma))
 565				continue;
 
 566			/*
 567			 * Writing 1 to /proc/pid/clear_refs affects all pages.
 568			 *
 569			 * Writing 2 to /proc/pid/clear_refs only affects
 570			 * Anonymous pages.
 571			 *
 572			 * Writing 3 to /proc/pid/clear_refs only affects file
 573			 * mapped pages.
 574			 */
 575			if (type == CLEAR_REFS_ANON && vma->vm_file)
 576				continue;
 577			if (type == CLEAR_REFS_MAPPED && !vma->vm_file)
 578				continue;
 579			walk_page_range(vma->vm_start, vma->vm_end,
 580					&clear_refs_walk);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 581		}
 582		flush_tlb_mm(mm);
 583		up_read(&mm->mmap_sem);
 
 584		mmput(mm);
 585	}
 586	put_task_struct(task);
 587
 588	return count;
 589}
 590
 591const struct file_operations proc_clear_refs_operations = {
 592	.write		= clear_refs_write,
 593	.llseek		= noop_llseek,
 594};
 595
 
 
 
 
 596struct pagemapread {
 597	int pos, len;
 598	u64 *buffer;
 
 599};
 600
 601#define PM_ENTRY_BYTES      sizeof(u64)
 602#define PM_STATUS_BITS      3
 603#define PM_STATUS_OFFSET    (64 - PM_STATUS_BITS)
 604#define PM_STATUS_MASK      (((1LL << PM_STATUS_BITS) - 1) << PM_STATUS_OFFSET)
 605#define PM_STATUS(nr)       (((nr) << PM_STATUS_OFFSET) & PM_STATUS_MASK)
 606#define PM_PSHIFT_BITS      6
 607#define PM_PSHIFT_OFFSET    (PM_STATUS_OFFSET - PM_PSHIFT_BITS)
 608#define PM_PSHIFT_MASK      (((1LL << PM_PSHIFT_BITS) - 1) << PM_PSHIFT_OFFSET)
 609#define PM_PSHIFT(x)        (((u64) (x) << PM_PSHIFT_OFFSET) & PM_PSHIFT_MASK)
 610#define PM_PFRAME_MASK      ((1LL << PM_PSHIFT_OFFSET) - 1)
 611#define PM_PFRAME(x)        ((x) & PM_PFRAME_MASK)
 612
 613#define PM_PRESENT          PM_STATUS(4LL)
 614#define PM_SWAP             PM_STATUS(2LL)
 615#define PM_NOT_PRESENT      PM_PSHIFT(PAGE_SHIFT)
 616#define PM_END_OF_BUFFER    1
 617
 618static int add_to_pagemap(unsigned long addr, u64 pfn,
 
 
 
 
 
 619			  struct pagemapread *pm)
 620{
 621	pm->buffer[pm->pos++] = pfn;
 622	if (pm->pos >= pm->len)
 623		return PM_END_OF_BUFFER;
 624	return 0;
 625}
 626
 627static int pagemap_pte_hole(unsigned long start, unsigned long end,
 628				struct mm_walk *walk)
 629{
 630	struct pagemapread *pm = walk->private;
 631	unsigned long addr;
 632	int err = 0;
 633	for (addr = start; addr < end; addr += PAGE_SIZE) {
 634		err = add_to_pagemap(addr, PM_NOT_PRESENT, pm);
 635		if (err)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 636			break;
 
 
 
 
 
 
 
 
 
 637	}
 
 638	return err;
 639}
 640
 641static u64 swap_pte_to_pagemap_entry(pte_t pte)
 
 642{
 643	swp_entry_t e = pte_to_swp_entry(pte);
 644	return swp_type(e) | (swp_offset(e) << MAX_SWAPFILES_SHIFT);
 645}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 646
 647static u64 pte_to_pagemap_entry(pte_t pte)
 648{
 649	u64 pme = 0;
 650	if (is_swap_pte(pte))
 651		pme = PM_PFRAME(swap_pte_to_pagemap_entry(pte))
 652			| PM_PSHIFT(PAGE_SHIFT) | PM_SWAP;
 653	else if (pte_present(pte))
 654		pme = PM_PFRAME(pte_pfn(pte))
 655			| PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT;
 656	return pme;
 657}
 658
 659static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
 660			     struct mm_walk *walk)
 661{
 662	struct vm_area_struct *vma;
 663	struct pagemapread *pm = walk->private;
 664	pte_t *pte;
 
 665	int err = 0;
 
 
 666
 667	split_huge_page_pmd(walk->mm, pmd);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 668
 669	/* find the first VMA at or above 'addr' */
 670	vma = find_vma(walk->mm, addr);
 671	for (; addr != end; addr += PAGE_SIZE) {
 672		u64 pfn = PM_NOT_PRESENT;
 673
 674		/* check to see if we've left 'vma' behind
 675		 * and need a new, higher one */
 676		if (vma && (addr >= vma->vm_end))
 677			vma = find_vma(walk->mm, addr);
 678
 679		/* check that 'vma' actually covers this address,
 680		 * and that it isn't a huge page vma */
 681		if (vma && (vma->vm_start <= addr) &&
 682		    !is_vm_hugetlb_page(vma)) {
 683			pte = pte_offset_map(pmd, addr);
 684			pfn = pte_to_pagemap_entry(*pte);
 685			/* unmap before userspace copy */
 686			pte_unmap(pte);
 687		}
 688		err = add_to_pagemap(addr, pfn, pm);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 689		if (err)
 690			return err;
 691	}
 
 692
 693	cond_resched();
 694
 695	return err;
 696}
 697
 698#ifdef CONFIG_HUGETLB_PAGE
 699static u64 huge_pte_to_pagemap_entry(pte_t pte, int offset)
 700{
 701	u64 pme = 0;
 702	if (pte_present(pte))
 703		pme = PM_PFRAME(pte_pfn(pte) + offset)
 704			| PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT;
 705	return pme;
 706}
 707
 708/* This function walks within one hugetlb entry in the single call */
 709static int pagemap_hugetlb_range(pte_t *pte, unsigned long hmask,
 710				 unsigned long addr, unsigned long end,
 711				 struct mm_walk *walk)
 712{
 713	struct pagemapread *pm = walk->private;
 
 
 714	int err = 0;
 715	u64 pfn;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 716
 717	for (; addr != end; addr += PAGE_SIZE) {
 718		int offset = (addr & ~hmask) >> PAGE_SHIFT;
 719		pfn = huge_pte_to_pagemap_entry(*pte, offset);
 720		err = add_to_pagemap(addr, pfn, pm);
 721		if (err)
 722			return err;
 
 
 723	}
 724
 725	cond_resched();
 726
 727	return err;
 728}
 
 
 729#endif /* HUGETLB_PAGE */
 730
 
 
 
 
 
 
 731/*
 732 * /proc/pid/pagemap - an array mapping virtual pages to pfns
 733 *
 734 * For each page in the address space, this file contains one 64-bit entry
 735 * consisting of the following:
 736 *
 737 * Bits 0-55  page frame number (PFN) if present
 738 * Bits 0-4   swap type if swapped
 739 * Bits 5-55  swap offset if swapped
 740 * Bits 55-60 page shift (page size = 1<<page shift)
 741 * Bit  61    reserved for future use
 
 
 
 742 * Bit  62    page swapped
 743 * Bit  63    page present
 744 *
 745 * If the page is not present but in swap, then the PFN contains an
 746 * encoding of the swap file number and the page's offset into the
 747 * swap. Unmapped pages return a null PFN. This allows determining
 748 * precisely which pages are mapped (or in swap) and comparing mapped
 749 * pages between processes.
 750 *
 751 * Efficient users of this interface will use /proc/pid/maps to
 752 * determine which areas of memory are actually mapped and llseek to
 753 * skip over unmapped regions.
 754 */
 755#define PAGEMAP_WALK_SIZE	(PMD_SIZE)
 756#define PAGEMAP_WALK_MASK	(PMD_MASK)
 757static ssize_t pagemap_read(struct file *file, char __user *buf,
 758			    size_t count, loff_t *ppos)
 759{
 760	struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
 761	struct mm_struct *mm;
 762	struct pagemapread pm;
 763	int ret = -ESRCH;
 764	struct mm_walk pagemap_walk = {};
 765	unsigned long src;
 766	unsigned long svpfn;
 767	unsigned long start_vaddr;
 768	unsigned long end_vaddr;
 769	int copied = 0;
 770
 771	if (!task)
 772		goto out;
 773
 774	ret = -EINVAL;
 775	/* file position must be aligned */
 776	if ((*ppos % PM_ENTRY_BYTES) || (count % PM_ENTRY_BYTES))
 777		goto out_task;
 778
 779	ret = 0;
 780	if (!count)
 781		goto out_task;
 782
 783	pm.len = PM_ENTRY_BYTES * (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
 784	pm.buffer = kmalloc(pm.len, GFP_TEMPORARY);
 
 
 
 785	ret = -ENOMEM;
 786	if (!pm.buffer)
 787		goto out_task;
 788
 789	mm = mm_for_maps(task);
 790	ret = PTR_ERR(mm);
 791	if (!mm || IS_ERR(mm))
 792		goto out_free;
 793
 794	pagemap_walk.pmd_entry = pagemap_pte_range;
 795	pagemap_walk.pte_hole = pagemap_pte_hole;
 796#ifdef CONFIG_HUGETLB_PAGE
 797	pagemap_walk.hugetlb_entry = pagemap_hugetlb_range;
 798#endif
 799	pagemap_walk.mm = mm;
 800	pagemap_walk.private = &pm;
 801
 802	src = *ppos;
 803	svpfn = src / PM_ENTRY_BYTES;
 804	start_vaddr = svpfn << PAGE_SHIFT;
 805	end_vaddr = TASK_SIZE_OF(task);
 806
 807	/* watch out for wraparound */
 808	if (svpfn > TASK_SIZE_OF(task) >> PAGE_SHIFT)
 
 
 
 
 
 809		start_vaddr = end_vaddr;
 810
 811	/*
 812	 * The odds are that this will stop walking way
 813	 * before end_vaddr, because the length of the
 814	 * user buffer is tracked in "pm", and the walk
 815	 * will stop when we hit the end of the buffer.
 816	 */
 817	ret = 0;
 818	while (count && (start_vaddr < end_vaddr)) {
 819		int len;
 820		unsigned long end;
 821
 822		pm.pos = 0;
 823		end = (start_vaddr + PAGEMAP_WALK_SIZE) & PAGEMAP_WALK_MASK;
 824		/* overflow ? */
 825		if (end < start_vaddr || end > end_vaddr)
 826			end = end_vaddr;
 827		down_read(&mm->mmap_sem);
 828		ret = walk_page_range(start_vaddr, end, &pagemap_walk);
 829		up_read(&mm->mmap_sem);
 
 
 830		start_vaddr = end;
 831
 832		len = min(count, PM_ENTRY_BYTES * pm.pos);
 833		if (copy_to_user(buf, pm.buffer, len)) {
 834			ret = -EFAULT;
 835			goto out_mm;
 836		}
 837		copied += len;
 838		buf += len;
 839		count -= len;
 840	}
 841	*ppos += copied;
 842	if (!ret || ret == PM_END_OF_BUFFER)
 843		ret = copied;
 844
 845out_mm:
 846	mmput(mm);
 847out_free:
 848	kfree(pm.buffer);
 849out_task:
 850	put_task_struct(task);
 851out:
 852	return ret;
 853}
 854
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 855const struct file_operations proc_pagemap_operations = {
 856	.llseek		= mem_lseek, /* borrow this */
 857	.read		= pagemap_read,
 
 
 858};
 859#endif /* CONFIG_PROC_PAGE_MONITOR */
 860
 861#ifdef CONFIG_NUMA
 862
 863struct numa_maps {
 864	struct vm_area_struct *vma;
 865	unsigned long pages;
 866	unsigned long anon;
 867	unsigned long active;
 868	unsigned long writeback;
 869	unsigned long mapcount_max;
 870	unsigned long dirty;
 871	unsigned long swapcache;
 872	unsigned long node[MAX_NUMNODES];
 873};
 874
 875struct numa_maps_private {
 876	struct proc_maps_private proc_maps;
 877	struct numa_maps md;
 878};
 879
 880static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty,
 881			unsigned long nr_pages)
 882{
 883	int count = page_mapcount(page);
 884
 885	md->pages += nr_pages;
 886	if (pte_dirty || PageDirty(page))
 887		md->dirty += nr_pages;
 888
 889	if (PageSwapCache(page))
 890		md->swapcache += nr_pages;
 891
 892	if (PageActive(page) || PageUnevictable(page))
 893		md->active += nr_pages;
 894
 895	if (PageWriteback(page))
 896		md->writeback += nr_pages;
 897
 898	if (PageAnon(page))
 899		md->anon += nr_pages;
 900
 901	if (count > md->mapcount_max)
 902		md->mapcount_max = count;
 903
 904	md->node[page_to_nid(page)] += nr_pages;
 905}
 906
 907static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma,
 908		unsigned long addr)
 909{
 910	struct page *page;
 911	int nid;
 912
 913	if (!pte_present(pte))
 914		return NULL;
 915
 916	page = vm_normal_page(vma, addr, pte);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 917	if (!page)
 918		return NULL;
 919
 920	if (PageReserved(page))
 921		return NULL;
 922
 923	nid = page_to_nid(page);
 924	if (!node_isset(nid, node_states[N_HIGH_MEMORY]))
 925		return NULL;
 926
 927	return page;
 928}
 
 929
 930static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
 931		unsigned long end, struct mm_walk *walk)
 932{
 933	struct numa_maps *md;
 
 934	spinlock_t *ptl;
 935	pte_t *orig_pte;
 936	pte_t *pte;
 937
 938	md = walk->private;
 939	spin_lock(&walk->mm->page_table_lock);
 940	if (pmd_trans_huge(*pmd)) {
 941		if (pmd_trans_splitting(*pmd)) {
 942			spin_unlock(&walk->mm->page_table_lock);
 943			wait_split_huge_page(md->vma->anon_vma, pmd);
 944		} else {
 945			pte_t huge_pte = *(pte_t *)pmd;
 946			struct page *page;
 947
 948			page = can_gather_numa_stats(huge_pte, md->vma, addr);
 949			if (page)
 950				gather_stats(page, md, pte_dirty(huge_pte),
 951						HPAGE_PMD_SIZE/PAGE_SIZE);
 952			spin_unlock(&walk->mm->page_table_lock);
 953			return 0;
 954		}
 955	} else {
 956		spin_unlock(&walk->mm->page_table_lock);
 957	}
 958
 
 
 
 959	orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
 960	do {
 961		struct page *page = can_gather_numa_stats(*pte, md->vma, addr);
 962		if (!page)
 963			continue;
 964		gather_stats(page, md, pte_dirty(*pte), 1);
 965
 966	} while (pte++, addr += PAGE_SIZE, addr != end);
 967	pte_unmap_unlock(orig_pte, ptl);
 
 968	return 0;
 969}
 970#ifdef CONFIG_HUGETLB_PAGE
 971static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask,
 972		unsigned long addr, unsigned long end, struct mm_walk *walk)
 973{
 
 974	struct numa_maps *md;
 975	struct page *page;
 976
 977	if (pte_none(*pte))
 978		return 0;
 979
 980	page = pte_page(*pte);
 981	if (!page)
 982		return 0;
 983
 984	md = walk->private;
 985	gather_stats(page, md, pte_dirty(*pte), 1);
 986	return 0;
 987}
 988
 989#else
 990static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask,
 991		unsigned long addr, unsigned long end, struct mm_walk *walk)
 992{
 993	return 0;
 994}
 995#endif
 996
 
 
 
 
 
 997/*
 998 * Display pages allocated per node and memory policy via /proc.
 999 */
1000static int show_numa_map(struct seq_file *m, void *v)
1001{
1002	struct numa_maps_private *numa_priv = m->private;
1003	struct proc_maps_private *proc_priv = &numa_priv->proc_maps;
1004	struct vm_area_struct *vma = v;
1005	struct numa_maps *md = &numa_priv->md;
1006	struct file *file = vma->vm_file;
1007	struct mm_struct *mm = vma->vm_mm;
1008	struct mm_walk walk = {};
1009	struct mempolicy *pol;
1010	int n;
1011	char buffer[50];
1012
1013	if (!mm)
1014		return 0;
1015
1016	/* Ensure we start with an empty set of numa_maps statistics. */
1017	memset(md, 0, sizeof(*md));
1018
1019	md->vma = vma;
1020
1021	walk.hugetlb_entry = gather_hugetbl_stats;
1022	walk.pmd_entry = gather_pte_stats;
1023	walk.private = md;
1024	walk.mm = mm;
1025
1026	pol = get_vma_policy(proc_priv->task, vma, vma->vm_start);
1027	mpol_to_str(buffer, sizeof(buffer), pol, 0);
1028	mpol_cond_put(pol);
1029
1030	seq_printf(m, "%08lx %s", vma->vm_start, buffer);
1031
1032	if (file) {
1033		seq_printf(m, " file=");
1034		seq_path(m, &file->f_path, "\n\t= ");
1035	} else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
1036		seq_printf(m, " heap");
1037	} else if (vma->vm_start <= mm->start_stack &&
1038			vma->vm_end >= mm->start_stack) {
1039		seq_printf(m, " stack");
1040	}
1041
1042	walk_page_range(vma->vm_start, vma->vm_end, &walk);
 
 
 
 
1043
1044	if (!md->pages)
1045		goto out;
1046
1047	if (md->anon)
1048		seq_printf(m, " anon=%lu", md->anon);
1049
1050	if (md->dirty)
1051		seq_printf(m, " dirty=%lu", md->dirty);
1052
1053	if (md->pages != md->anon && md->pages != md->dirty)
1054		seq_printf(m, " mapped=%lu", md->pages);
1055
1056	if (md->mapcount_max > 1)
1057		seq_printf(m, " mapmax=%lu", md->mapcount_max);
1058
1059	if (md->swapcache)
1060		seq_printf(m, " swapcache=%lu", md->swapcache);
1061
1062	if (md->active < md->pages && !is_vm_hugetlb_page(vma))
1063		seq_printf(m, " active=%lu", md->active);
1064
1065	if (md->writeback)
1066		seq_printf(m, " writeback=%lu", md->writeback);
1067
1068	for_each_node_state(n, N_HIGH_MEMORY)
1069		if (md->node[n])
1070			seq_printf(m, " N%d=%lu", n, md->node[n]);
 
 
1071out:
1072	seq_putc(m, '\n');
1073
1074	if (m->count < m->size)
1075		m->version = (vma != proc_priv->tail_vma) ? vma->vm_start : 0;
1076	return 0;
1077}
1078
1079static const struct seq_operations proc_pid_numa_maps_op = {
1080        .start  = m_start,
1081        .next   = m_next,
1082        .stop   = m_stop,
1083        .show   = show_numa_map,
1084};
1085
1086static int numa_maps_open(struct inode *inode, struct file *file)
1087{
1088	struct numa_maps_private *priv;
1089	int ret = -ENOMEM;
1090	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1091	if (priv) {
1092		priv->proc_maps.pid = proc_pid(inode);
1093		ret = seq_open(file, &proc_pid_numa_maps_op);
1094		if (!ret) {
1095			struct seq_file *m = file->private_data;
1096			m->private = priv;
1097		} else {
1098			kfree(priv);
1099		}
1100	}
1101	return ret;
1102}
1103
1104const struct file_operations proc_numa_maps_operations = {
1105	.open		= numa_maps_open,
1106	.read		= seq_read,
1107	.llseek		= seq_lseek,
1108	.release	= seq_release_private,
1109};
 
1110#endif /* CONFIG_NUMA */
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2#include <linux/pagewalk.h>
   3#include <linux/mm_inline.h>
   4#include <linux/hugetlb.h>
   5#include <linux/huge_mm.h>
   6#include <linux/mount.h>
   7#include <linux/seq_file.h>
   8#include <linux/highmem.h>
   9#include <linux/ptrace.h>
  10#include <linux/slab.h>
  11#include <linux/pagemap.h>
  12#include <linux/mempolicy.h>
  13#include <linux/rmap.h>
  14#include <linux/swap.h>
  15#include <linux/sched/mm.h>
  16#include <linux/swapops.h>
  17#include <linux/mmu_notifier.h>
  18#include <linux/page_idle.h>
  19#include <linux/shmem_fs.h>
  20#include <linux/uaccess.h>
  21#include <linux/pkeys.h>
  22
  23#include <asm/elf.h>
  24#include <asm/tlb.h>
  25#include <asm/tlbflush.h>
  26#include "internal.h"
  27
  28#define SEQ_PUT_DEC(str, val) \
  29		seq_put_decimal_ull_width(m, str, (val) << (PAGE_SHIFT-10), 8)
  30void task_mem(struct seq_file *m, struct mm_struct *mm)
  31{
  32	unsigned long text, lib, swap, anon, file, shmem;
  33	unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;
  34
  35	anon = get_mm_counter(mm, MM_ANONPAGES);
  36	file = get_mm_counter(mm, MM_FILEPAGES);
  37	shmem = get_mm_counter(mm, MM_SHMEMPAGES);
  38
  39	/*
  40	 * Note: to minimize their overhead, mm maintains hiwater_vm and
  41	 * hiwater_rss only when about to *lower* total_vm or rss.  Any
  42	 * collector of these hiwater stats must therefore get total_vm
  43	 * and rss too, which will usually be the higher.  Barriers? not
  44	 * worth the effort, such snapshots can always be inconsistent.
  45	 */
  46	hiwater_vm = total_vm = mm->total_vm;
  47	if (hiwater_vm < mm->hiwater_vm)
  48		hiwater_vm = mm->hiwater_vm;
  49	hiwater_rss = total_rss = anon + file + shmem;
  50	if (hiwater_rss < mm->hiwater_rss)
  51		hiwater_rss = mm->hiwater_rss;
  52
  53	/* split executable areas between text and lib */
  54	text = PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK);
  55	text = min(text, mm->exec_vm << PAGE_SHIFT);
  56	lib = (mm->exec_vm << PAGE_SHIFT) - text;
  57
  58	swap = get_mm_counter(mm, MM_SWAPENTS);
  59	SEQ_PUT_DEC("VmPeak:\t", hiwater_vm);
  60	SEQ_PUT_DEC(" kB\nVmSize:\t", total_vm);
  61	SEQ_PUT_DEC(" kB\nVmLck:\t", mm->locked_vm);
  62	SEQ_PUT_DEC(" kB\nVmPin:\t", atomic64_read(&mm->pinned_vm));
  63	SEQ_PUT_DEC(" kB\nVmHWM:\t", hiwater_rss);
  64	SEQ_PUT_DEC(" kB\nVmRSS:\t", total_rss);
  65	SEQ_PUT_DEC(" kB\nRssAnon:\t", anon);
  66	SEQ_PUT_DEC(" kB\nRssFile:\t", file);
  67	SEQ_PUT_DEC(" kB\nRssShmem:\t", shmem);
  68	SEQ_PUT_DEC(" kB\nVmData:\t", mm->data_vm);
  69	SEQ_PUT_DEC(" kB\nVmStk:\t", mm->stack_vm);
  70	seq_put_decimal_ull_width(m,
  71		    " kB\nVmExe:\t", text >> 10, 8);
  72	seq_put_decimal_ull_width(m,
  73		    " kB\nVmLib:\t", lib >> 10, 8);
  74	seq_put_decimal_ull_width(m,
  75		    " kB\nVmPTE:\t", mm_pgtables_bytes(mm) >> 10, 8);
  76	SEQ_PUT_DEC(" kB\nVmSwap:\t", swap);
  77	seq_puts(m, " kB\n");
  78	hugetlb_report_usage(m, mm);
 
  79}
  80#undef SEQ_PUT_DEC
  81
  82unsigned long task_vsize(struct mm_struct *mm)
  83{
  84	return PAGE_SIZE * mm->total_vm;
  85}
  86
  87unsigned long task_statm(struct mm_struct *mm,
  88			 unsigned long *shared, unsigned long *text,
  89			 unsigned long *data, unsigned long *resident)
  90{
  91	*shared = get_mm_counter(mm, MM_FILEPAGES) +
  92			get_mm_counter(mm, MM_SHMEMPAGES);
  93	*text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
  94								>> PAGE_SHIFT;
  95	*data = mm->data_vm + mm->stack_vm;
  96	*resident = *shared + get_mm_counter(mm, MM_ANONPAGES);
  97	return mm->total_vm;
  98}
  99
 100#ifdef CONFIG_NUMA
 101/*
 102 * Save get_task_policy() for show_numa_map().
 103 */
 104static void hold_task_mempolicy(struct proc_maps_private *priv)
 105{
 106	struct task_struct *task = priv->task;
 107
 108	task_lock(task);
 109	priv->task_mempolicy = get_task_policy(task);
 110	mpol_get(priv->task_mempolicy);
 111	task_unlock(task);
 112}
 113static void release_task_mempolicy(struct proc_maps_private *priv)
 114{
 115	mpol_put(priv->task_mempolicy);
 116}
 117#else
 118static void hold_task_mempolicy(struct proc_maps_private *priv)
 119{
 
 
 
 
 120}
 121static void release_task_mempolicy(struct proc_maps_private *priv)
 122{
 123}
 124#endif
 125
 126static struct vm_area_struct *proc_get_vma(struct proc_maps_private *priv,
 127						loff_t *ppos)
 128{
 129	struct vm_area_struct *vma = vma_next(&priv->iter);
 130
 131	if (vma) {
 132		*ppos = vma->vm_start;
 133	} else {
 134		*ppos = -2UL;
 135		vma = get_gate_vma(priv->mm);
 136	}
 137
 138	return vma;
 139}
 140
 141static void *m_start(struct seq_file *m, loff_t *ppos)
 142{
 143	struct proc_maps_private *priv = m->private;
 144	unsigned long last_addr = *ppos;
 145	struct mm_struct *mm;
 
 
 
 
 
 
 
 
 
 
 
 
 
 146
 147	/* See m_next(). Zero at the start or after lseek. */
 148	if (last_addr == -1UL)
 149		return NULL;
 150
 151	priv->task = get_proc_task(priv->inode);
 152	if (!priv->task)
 153		return ERR_PTR(-ESRCH);
 154
 155	mm = priv->mm;
 156	if (!mm || !mmget_not_zero(mm)) {
 157		put_task_struct(priv->task);
 158		priv->task = NULL;
 159		return NULL;
 
 
 
 
 
 
 
 
 160	}
 161
 162	if (mmap_read_lock_killable(mm)) {
 163		mmput(mm);
 164		put_task_struct(priv->task);
 165		priv->task = NULL;
 166		return ERR_PTR(-EINTR);
 
 
 
 
 
 167	}
 168
 169	vma_iter_init(&priv->iter, mm, last_addr);
 170	hold_task_mempolicy(priv);
 171	if (last_addr == -2UL)
 172		return get_gate_vma(mm);
 173
 174	return proc_get_vma(priv, ppos);
 175}
 
 176
 177static void *m_next(struct seq_file *m, void *v, loff_t *ppos)
 178{
 179	if (*ppos == -2UL) {
 180		*ppos = -1UL;
 181		return NULL;
 182	}
 183	return proc_get_vma(m->private, ppos);
 184}
 185
 186static void m_stop(struct seq_file *m, void *v)
 187{
 188	struct proc_maps_private *priv = m->private;
 189	struct mm_struct *mm = priv->mm;
 190
 191	if (!priv->task)
 192		return;
 193
 194	release_task_mempolicy(priv);
 195	mmap_read_unlock(mm);
 196	mmput(mm);
 197	put_task_struct(priv->task);
 198	priv->task = NULL;
 199}
 200
 201static int proc_maps_open(struct inode *inode, struct file *file,
 202			const struct seq_operations *ops, int psize)
 203{
 204	struct proc_maps_private *priv = __seq_open_private(file, ops, psize);
 
 205
 206	if (!priv)
 207		return -ENOMEM;
 208
 209	priv->inode = inode;
 210	priv->mm = proc_mem_open(inode, PTRACE_MODE_READ);
 211	if (IS_ERR(priv->mm)) {
 212		int err = PTR_ERR(priv->mm);
 213
 214		seq_release_private(inode, file);
 215		return err;
 216	}
 217
 218	return 0;
 219}
 220
 221static int proc_map_release(struct inode *inode, struct file *file)
 222{
 223	struct seq_file *seq = file->private_data;
 224	struct proc_maps_private *priv = seq->private;
 225
 226	if (priv->mm)
 227		mmdrop(priv->mm);
 228
 229	return seq_release_private(inode, file);
 230}
 231
 232static int do_maps_open(struct inode *inode, struct file *file,
 233			const struct seq_operations *ops)
 234{
 235	return proc_maps_open(inode, file, ops,
 236				sizeof(struct proc_maps_private));
 
 
 
 
 
 
 
 
 
 
 
 
 237}
 238
 239/*
 240 * Indicate if the VMA is a stack for the given task; for
 241 * /proc/PID/maps that is the stack of the main task.
 242 */
 243static int is_stack(struct vm_area_struct *vma)
 244{
 245	/*
 246	 * We make no effort to guess what a given thread considers to be
 247	 * its "stack".  It's not even well-defined for programs written
 248	 * languages like Go.
 249	 */
 250	return vma->vm_start <= vma->vm_mm->start_stack &&
 251		vma->vm_end >= vma->vm_mm->start_stack;
 252}
 253
 254static void show_vma_header_prefix(struct seq_file *m,
 255				   unsigned long start, unsigned long end,
 256				   vm_flags_t flags, unsigned long long pgoff,
 257				   dev_t dev, unsigned long ino)
 258{
 259	seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
 260	seq_put_hex_ll(m, NULL, start, 8);
 261	seq_put_hex_ll(m, "-", end, 8);
 262	seq_putc(m, ' ');
 263	seq_putc(m, flags & VM_READ ? 'r' : '-');
 264	seq_putc(m, flags & VM_WRITE ? 'w' : '-');
 265	seq_putc(m, flags & VM_EXEC ? 'x' : '-');
 266	seq_putc(m, flags & VM_MAYSHARE ? 's' : 'p');
 267	seq_put_hex_ll(m, " ", pgoff, 8);
 268	seq_put_hex_ll(m, " ", MAJOR(dev), 2);
 269	seq_put_hex_ll(m, ":", MINOR(dev), 2);
 270	seq_put_decimal_ull(m, " ", ino);
 271	seq_putc(m, ' ');
 272}
 273
 274static void
 275show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
 276{
 277	struct anon_vma_name *anon_name = NULL;
 278	struct mm_struct *mm = vma->vm_mm;
 279	struct file *file = vma->vm_file;
 280	vm_flags_t flags = vma->vm_flags;
 281	unsigned long ino = 0;
 282	unsigned long long pgoff = 0;
 283	unsigned long start, end;
 284	dev_t dev = 0;
 285	const char *name = NULL;
 286
 287	if (file) {
 288		struct inode *inode = file_inode(vma->vm_file);
 289		dev = inode->i_sb->s_dev;
 290		ino = inode->i_ino;
 291		pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
 292	}
 293
 
 294	start = vma->vm_start;
 
 
 295	end = vma->vm_end;
 296	show_vma_header_prefix(m, start, end, flags, pgoff, dev, ino);
 297	if (mm)
 298		anon_name = anon_vma_name(vma);
 
 
 
 
 
 
 
 
 
 299
 300	/*
 301	 * Print the dentry name for named mappings, and a
 302	 * special [heap] marker for the heap:
 303	 */
 304	if (file) {
 305		seq_pad(m, ' ');
 306		/*
 307		 * If user named this anon shared memory via
 308		 * prctl(PR_SET_VMA ..., use the provided name.
 309		 */
 310		if (anon_name)
 311			seq_printf(m, "[anon_shmem:%s]", anon_name->name);
 312		else
 313			seq_file_path(m, file, "\n");
 314		goto done;
 315	}
 316
 317	if (vma->vm_ops && vma->vm_ops->name) {
 318		name = vma->vm_ops->name(vma);
 319		if (name)
 320			goto done;
 321	}
 322
 323	name = arch_vma_name(vma);
 324	if (!name) {
 325		if (!mm) {
 326			name = "[vdso]";
 327			goto done;
 328		}
 329
 330		if (vma->vm_start <= mm->brk &&
 331		    vma->vm_end >= mm->start_brk) {
 332			name = "[heap]";
 333			goto done;
 334		}
 335
 336		if (is_stack(vma)) {
 337			name = "[stack]";
 338			goto done;
 339		}
 340
 341		if (anon_name) {
 342			seq_pad(m, ' ');
 343			seq_printf(m, "[anon:%s]", anon_name->name);
 344		}
 345	}
 346
 347done:
 348	if (name) {
 349		seq_pad(m, ' ');
 350		seq_puts(m, name);
 351	}
 352	seq_putc(m, '\n');
 353}
 354
 355static int show_map(struct seq_file *m, void *v)
 356{
 357	show_map_vma(m, v);
 
 
 
 
 
 
 
 
 358	return 0;
 359}
 360
 361static const struct seq_operations proc_pid_maps_op = {
 362	.start	= m_start,
 363	.next	= m_next,
 364	.stop	= m_stop,
 365	.show	= show_map
 366};
 367
 368static int pid_maps_open(struct inode *inode, struct file *file)
 369{
 370	return do_maps_open(inode, file, &proc_pid_maps_op);
 371}
 372
 373const struct file_operations proc_pid_maps_operations = {
 374	.open		= pid_maps_open,
 375	.read		= seq_read,
 376	.llseek		= seq_lseek,
 377	.release	= proc_map_release,
 378};
 379
 380/*
 381 * Proportional Set Size(PSS): my share of RSS.
 382 *
 383 * PSS of a process is the count of pages it has in memory, where each
 384 * page is divided by the number of processes sharing it.  So if a
 385 * process has 1000 pages all to itself, and 1000 shared with one other
 386 * process, its PSS will be 1500.
 387 *
 388 * To keep (accumulated) division errors low, we adopt a 64bit
 389 * fixed-point pss counter to minimize division errors. So (pss >>
 390 * PSS_SHIFT) would be the real byte count.
 391 *
 392 * A shift of 12 before division means (assuming 4K page size):
 393 * 	- 1M 3-user-pages add up to 8KB errors;
 394 * 	- supports mapcount up to 2^24, or 16M;
 395 * 	- supports PSS up to 2^52 bytes, or 4PB.
 396 */
 397#define PSS_SHIFT 12
 398
 399#ifdef CONFIG_PROC_PAGE_MONITOR
 400struct mem_size_stats {
 
 401	unsigned long resident;
 402	unsigned long shared_clean;
 403	unsigned long shared_dirty;
 404	unsigned long private_clean;
 405	unsigned long private_dirty;
 406	unsigned long referenced;
 407	unsigned long anonymous;
 408	unsigned long lazyfree;
 409	unsigned long anonymous_thp;
 410	unsigned long shmem_thp;
 411	unsigned long file_thp;
 412	unsigned long swap;
 413	unsigned long shared_hugetlb;
 414	unsigned long private_hugetlb;
 415	u64 pss;
 416	u64 pss_anon;
 417	u64 pss_file;
 418	u64 pss_shmem;
 419	u64 pss_dirty;
 420	u64 pss_locked;
 421	u64 swap_pss;
 422};
 423
 424static void smaps_page_accumulate(struct mem_size_stats *mss,
 425		struct page *page, unsigned long size, unsigned long pss,
 426		bool dirty, bool locked, bool private)
 427{
 428	mss->pss += pss;
 429
 430	if (PageAnon(page))
 431		mss->pss_anon += pss;
 432	else if (PageSwapBacked(page))
 433		mss->pss_shmem += pss;
 434	else
 435		mss->pss_file += pss;
 436
 437	if (locked)
 438		mss->pss_locked += pss;
 439
 440	if (dirty || PageDirty(page)) {
 441		mss->pss_dirty += pss;
 442		if (private)
 443			mss->private_dirty += size;
 444		else
 445			mss->shared_dirty += size;
 446	} else {
 447		if (private)
 448			mss->private_clean += size;
 449		else
 450			mss->shared_clean += size;
 451	}
 452}
 453
 454static void smaps_account(struct mem_size_stats *mss, struct page *page,
 455		bool compound, bool young, bool dirty, bool locked,
 456		bool migration)
 457{
 458	int i, nr = compound ? compound_nr(page) : 1;
 459	unsigned long size = nr * PAGE_SIZE;
 
 
 460
 461	/*
 462	 * First accumulate quantities that depend only on |size| and the type
 463	 * of the compound page.
 464	 */
 465	if (PageAnon(page)) {
 466		mss->anonymous += size;
 467		if (!PageSwapBacked(page) && !dirty && !PageDirty(page))
 468			mss->lazyfree += size;
 469	}
 470
 471	mss->resident += size;
 472	/* Accumulate the size in pages that have been accessed. */
 473	if (young || page_is_young(page) || PageReferenced(page))
 474		mss->referenced += size;
 475
 476	/*
 477	 * Then accumulate quantities that may depend on sharing, or that may
 478	 * differ page-by-page.
 479	 *
 480	 * page_count(page) == 1 guarantees the page is mapped exactly once.
 481	 * If any subpage of the compound page mapped with PTE it would elevate
 482	 * page_count().
 483	 *
 484	 * The page_mapcount() is called to get a snapshot of the mapcount.
 485	 * Without holding the page lock this snapshot can be slightly wrong as
 486	 * we cannot always read the mapcount atomically.  It is not safe to
 487	 * call page_mapcount() even with PTL held if the page is not mapped,
 488	 * especially for migration entries.  Treat regular migration entries
 489	 * as mapcount == 1.
 490	 */
 491	if ((page_count(page) == 1) || migration) {
 492		smaps_page_accumulate(mss, page, size, size << PSS_SHIFT, dirty,
 493			locked, true);
 494		return;
 495	}
 496	for (i = 0; i < nr; i++, page++) {
 497		int mapcount = page_mapcount(page);
 498		unsigned long pss = PAGE_SIZE << PSS_SHIFT;
 499		if (mapcount >= 2)
 500			pss /= mapcount;
 501		smaps_page_accumulate(mss, page, PAGE_SIZE, pss, dirty, locked,
 502				      mapcount < 2);
 503	}
 504}
 505
 506#ifdef CONFIG_SHMEM
 507static int smaps_pte_hole(unsigned long addr, unsigned long end,
 508			  __always_unused int depth, struct mm_walk *walk)
 509{
 510	struct mem_size_stats *mss = walk->private;
 511	struct vm_area_struct *vma = walk->vma;
 512
 513	mss->swap += shmem_partial_swap_usage(walk->vma->vm_file->f_mapping,
 514					      linear_page_index(vma, addr),
 515					      linear_page_index(vma, end));
 516
 517	return 0;
 518}
 519#else
 520#define smaps_pte_hole		NULL
 521#endif /* CONFIG_SHMEM */
 522
 523static void smaps_pte_hole_lookup(unsigned long addr, struct mm_walk *walk)
 524{
 525#ifdef CONFIG_SHMEM
 526	if (walk->ops->pte_hole) {
 527		/* depth is not used */
 528		smaps_pte_hole(addr, addr + PAGE_SIZE, 0, walk);
 529	}
 530#endif
 531}
 532
 533static void smaps_pte_entry(pte_t *pte, unsigned long addr,
 534		struct mm_walk *walk)
 535{
 536	struct mem_size_stats *mss = walk->private;
 537	struct vm_area_struct *vma = walk->vma;
 538	bool locked = !!(vma->vm_flags & VM_LOCKED);
 539	struct page *page = NULL;
 540	bool migration = false, young = false, dirty = false;
 541
 542	if (pte_present(*pte)) {
 543		page = vm_normal_page(vma, addr, *pte);
 544		young = pte_young(*pte);
 545		dirty = pte_dirty(*pte);
 546	} else if (is_swap_pte(*pte)) {
 547		swp_entry_t swpent = pte_to_swp_entry(*pte);
 548
 549		if (!non_swap_entry(swpent)) {
 550			int mapcount;
 551
 552			mss->swap += PAGE_SIZE;
 553			mapcount = swp_swapcount(swpent);
 554			if (mapcount >= 2) {
 555				u64 pss_delta = (u64)PAGE_SIZE << PSS_SHIFT;
 556
 557				do_div(pss_delta, mapcount);
 558				mss->swap_pss += pss_delta;
 559			} else {
 560				mss->swap_pss += (u64)PAGE_SIZE << PSS_SHIFT;
 561			}
 562		} else if (is_pfn_swap_entry(swpent)) {
 563			if (is_migration_entry(swpent))
 564				migration = true;
 565			page = pfn_swap_entry_to_page(swpent);
 566		}
 567	} else {
 568		smaps_pte_hole_lookup(addr, walk);
 569		return;
 570	}
 571
 
 572	if (!page)
 573		return;
 574
 575	smaps_account(mss, page, false, young, dirty, locked, migration);
 576}
 577
 578#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 579static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
 580		struct mm_walk *walk)
 581{
 582	struct mem_size_stats *mss = walk->private;
 583	struct vm_area_struct *vma = walk->vma;
 584	bool locked = !!(vma->vm_flags & VM_LOCKED);
 585	struct page *page = NULL;
 586	bool migration = false;
 587
 588	if (pmd_present(*pmd)) {
 589		/* FOLL_DUMP will return -EFAULT on huge zero page */
 590		page = follow_trans_huge_pmd(vma, addr, pmd, FOLL_DUMP);
 591	} else if (unlikely(thp_migration_supported() && is_swap_pmd(*pmd))) {
 592		swp_entry_t entry = pmd_to_swp_entry(*pmd);
 593
 594		if (is_migration_entry(entry)) {
 595			migration = true;
 596			page = pfn_swap_entry_to_page(entry);
 597		}
 598	}
 599	if (IS_ERR_OR_NULL(page))
 600		return;
 601	if (PageAnon(page))
 602		mss->anonymous_thp += HPAGE_PMD_SIZE;
 603	else if (PageSwapBacked(page))
 604		mss->shmem_thp += HPAGE_PMD_SIZE;
 605	else if (is_zone_device_page(page))
 606		/* pass */;
 607	else
 608		mss->file_thp += HPAGE_PMD_SIZE;
 609
 610	smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd),
 611		      locked, migration);
 612}
 613#else
 614static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
 615		struct mm_walk *walk)
 616{
 617}
 618#endif
 619
 620static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
 621			   struct mm_walk *walk)
 622{
 623	struct vm_area_struct *vma = walk->vma;
 
 624	pte_t *pte;
 625	spinlock_t *ptl;
 626
 627	ptl = pmd_trans_huge_lock(pmd, vma);
 628	if (ptl) {
 629		smaps_pmd_entry(pmd, addr, walk);
 630		spin_unlock(ptl);
 631		goto out;
 
 
 
 
 
 
 
 
 
 632	}
 633
 634	if (pmd_trans_unstable(pmd))
 635		goto out;
 636	/*
 637	 * The mmap_lock held all the way back in m_start() is what
 638	 * keeps khugepaged out of here and from collapsing things
 639	 * in here.
 640	 */
 641	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
 642	for (; addr != end; pte++, addr += PAGE_SIZE)
 643		smaps_pte_entry(pte, addr, walk);
 644	pte_unmap_unlock(pte - 1, ptl);
 645out:
 646	cond_resched();
 647	return 0;
 648}
 649
 650static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma)
 651{
 652	/*
 653	 * Don't forget to update Documentation/ on changes.
 654	 */
 655	static const char mnemonics[BITS_PER_LONG][2] = {
 656		/*
 657		 * In case if we meet a flag we don't know about.
 658		 */
 659		[0 ... (BITS_PER_LONG-1)] = "??",
 660
 661		[ilog2(VM_READ)]	= "rd",
 662		[ilog2(VM_WRITE)]	= "wr",
 663		[ilog2(VM_EXEC)]	= "ex",
 664		[ilog2(VM_SHARED)]	= "sh",
 665		[ilog2(VM_MAYREAD)]	= "mr",
 666		[ilog2(VM_MAYWRITE)]	= "mw",
 667		[ilog2(VM_MAYEXEC)]	= "me",
 668		[ilog2(VM_MAYSHARE)]	= "ms",
 669		[ilog2(VM_GROWSDOWN)]	= "gd",
 670		[ilog2(VM_PFNMAP)]	= "pf",
 671		[ilog2(VM_LOCKED)]	= "lo",
 672		[ilog2(VM_IO)]		= "io",
 673		[ilog2(VM_SEQ_READ)]	= "sr",
 674		[ilog2(VM_RAND_READ)]	= "rr",
 675		[ilog2(VM_DONTCOPY)]	= "dc",
 676		[ilog2(VM_DONTEXPAND)]	= "de",
 677		[ilog2(VM_LOCKONFAULT)]	= "lf",
 678		[ilog2(VM_ACCOUNT)]	= "ac",
 679		[ilog2(VM_NORESERVE)]	= "nr",
 680		[ilog2(VM_HUGETLB)]	= "ht",
 681		[ilog2(VM_SYNC)]	= "sf",
 682		[ilog2(VM_ARCH_1)]	= "ar",
 683		[ilog2(VM_WIPEONFORK)]	= "wf",
 684		[ilog2(VM_DONTDUMP)]	= "dd",
 685#ifdef CONFIG_ARM64_BTI
 686		[ilog2(VM_ARM64_BTI)]	= "bt",
 687#endif
 688#ifdef CONFIG_MEM_SOFT_DIRTY
 689		[ilog2(VM_SOFTDIRTY)]	= "sd",
 690#endif
 691		[ilog2(VM_MIXEDMAP)]	= "mm",
 692		[ilog2(VM_HUGEPAGE)]	= "hg",
 693		[ilog2(VM_NOHUGEPAGE)]	= "nh",
 694		[ilog2(VM_MERGEABLE)]	= "mg",
 695		[ilog2(VM_UFFD_MISSING)]= "um",
 696		[ilog2(VM_UFFD_WP)]	= "uw",
 697#ifdef CONFIG_ARM64_MTE
 698		[ilog2(VM_MTE)]		= "mt",
 699		[ilog2(VM_MTE_ALLOWED)]	= "",
 700#endif
 701#ifdef CONFIG_ARCH_HAS_PKEYS
 702		/* These come out via ProtectionKey: */
 703		[ilog2(VM_PKEY_BIT0)]	= "",
 704		[ilog2(VM_PKEY_BIT1)]	= "",
 705		[ilog2(VM_PKEY_BIT2)]	= "",
 706		[ilog2(VM_PKEY_BIT3)]	= "",
 707#if VM_PKEY_BIT4
 708		[ilog2(VM_PKEY_BIT4)]	= "",
 709#endif
 710#endif /* CONFIG_ARCH_HAS_PKEYS */
 711#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR
 712		[ilog2(VM_UFFD_MINOR)]	= "ui",
 713#endif /* CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */
 714	};
 715	size_t i;
 716
 717	seq_puts(m, "VmFlags: ");
 718	for (i = 0; i < BITS_PER_LONG; i++) {
 719		if (!mnemonics[i][0])
 720			continue;
 721		if (vma->vm_flags & (1UL << i)) {
 722			seq_putc(m, mnemonics[i][0]);
 723			seq_putc(m, mnemonics[i][1]);
 724			seq_putc(m, ' ');
 725		}
 726	}
 727	seq_putc(m, '\n');
 728}
 729
 730#ifdef CONFIG_HUGETLB_PAGE
 731static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask,
 732				 unsigned long addr, unsigned long end,
 733				 struct mm_walk *walk)
 734{
 735	struct mem_size_stats *mss = walk->private;
 736	struct vm_area_struct *vma = walk->vma;
 737	struct page *page = NULL;
 738
 739	if (pte_present(*pte)) {
 740		page = vm_normal_page(vma, addr, *pte);
 741	} else if (is_swap_pte(*pte)) {
 742		swp_entry_t swpent = pte_to_swp_entry(*pte);
 743
 744		if (is_pfn_swap_entry(swpent))
 745			page = pfn_swap_entry_to_page(swpent);
 746	}
 747	if (page) {
 748		if (page_mapcount(page) >= 2 || hugetlb_pmd_shared(pte))
 749			mss->shared_hugetlb += huge_page_size(hstate_vma(vma));
 750		else
 751			mss->private_hugetlb += huge_page_size(hstate_vma(vma));
 752	}
 753	return 0;
 754}
 755#else
 756#define smaps_hugetlb_range	NULL
 757#endif /* HUGETLB_PAGE */
 758
 759static const struct mm_walk_ops smaps_walk_ops = {
 760	.pmd_entry		= smaps_pte_range,
 761	.hugetlb_entry		= smaps_hugetlb_range,
 762};
 763
 764static const struct mm_walk_ops smaps_shmem_walk_ops = {
 765	.pmd_entry		= smaps_pte_range,
 766	.hugetlb_entry		= smaps_hugetlb_range,
 767	.pte_hole		= smaps_pte_hole,
 768};
 769
 770/*
 771 * Gather mem stats from @vma with the indicated beginning
 772 * address @start, and keep them in @mss.
 773 *
 774 * Use vm_start of @vma as the beginning address if @start is 0.
 775 */
 776static void smap_gather_stats(struct vm_area_struct *vma,
 777		struct mem_size_stats *mss, unsigned long start)
 778{
 779	const struct mm_walk_ops *ops = &smaps_walk_ops;
 780
 781	/* Invalid start */
 782	if (start >= vma->vm_end)
 783		return;
 784
 785#ifdef CONFIG_SHMEM
 786	if (vma->vm_file && shmem_mapping(vma->vm_file->f_mapping)) {
 787		/*
 788		 * For shared or readonly shmem mappings we know that all
 789		 * swapped out pages belong to the shmem object, and we can
 790		 * obtain the swap value much more efficiently. For private
 791		 * writable mappings, we might have COW pages that are
 792		 * not affected by the parent swapped out pages of the shmem
 793		 * object, so we have to distinguish them during the page walk.
 794		 * Unless we know that the shmem object (or the part mapped by
 795		 * our VMA) has no swapped out pages at all.
 796		 */
 797		unsigned long shmem_swapped = shmem_swap_usage(vma);
 798
 799		if (!start && (!shmem_swapped || (vma->vm_flags & VM_SHARED) ||
 800					!(vma->vm_flags & VM_WRITE))) {
 801			mss->swap += shmem_swapped;
 802		} else {
 803			ops = &smaps_shmem_walk_ops;
 804		}
 805	}
 806#endif
 807	/* mmap_lock is held in m_start */
 808	if (!start)
 809		walk_page_vma(vma, ops, mss);
 810	else
 811		walk_page_range(vma->vm_mm, start, vma->vm_end, ops, mss);
 812}
 813
 814#define SEQ_PUT_DEC(str, val) \
 815		seq_put_decimal_ull_width(m, str, (val) >> 10, 8)
 816
 817/* Show the contents common for smaps and smaps_rollup */
 818static void __show_smap(struct seq_file *m, const struct mem_size_stats *mss,
 819	bool rollup_mode)
 820{
 821	SEQ_PUT_DEC("Rss:            ", mss->resident);
 822	SEQ_PUT_DEC(" kB\nPss:            ", mss->pss >> PSS_SHIFT);
 823	SEQ_PUT_DEC(" kB\nPss_Dirty:      ", mss->pss_dirty >> PSS_SHIFT);
 824	if (rollup_mode) {
 825		/*
 826		 * These are meaningful only for smaps_rollup, otherwise two of
 827		 * them are zero, and the other one is the same as Pss.
 828		 */
 829		SEQ_PUT_DEC(" kB\nPss_Anon:       ",
 830			mss->pss_anon >> PSS_SHIFT);
 831		SEQ_PUT_DEC(" kB\nPss_File:       ",
 832			mss->pss_file >> PSS_SHIFT);
 833		SEQ_PUT_DEC(" kB\nPss_Shmem:      ",
 834			mss->pss_shmem >> PSS_SHIFT);
 835	}
 836	SEQ_PUT_DEC(" kB\nShared_Clean:   ", mss->shared_clean);
 837	SEQ_PUT_DEC(" kB\nShared_Dirty:   ", mss->shared_dirty);
 838	SEQ_PUT_DEC(" kB\nPrivate_Clean:  ", mss->private_clean);
 839	SEQ_PUT_DEC(" kB\nPrivate_Dirty:  ", mss->private_dirty);
 840	SEQ_PUT_DEC(" kB\nReferenced:     ", mss->referenced);
 841	SEQ_PUT_DEC(" kB\nAnonymous:      ", mss->anonymous);
 842	SEQ_PUT_DEC(" kB\nLazyFree:       ", mss->lazyfree);
 843	SEQ_PUT_DEC(" kB\nAnonHugePages:  ", mss->anonymous_thp);
 844	SEQ_PUT_DEC(" kB\nShmemPmdMapped: ", mss->shmem_thp);
 845	SEQ_PUT_DEC(" kB\nFilePmdMapped:  ", mss->file_thp);
 846	SEQ_PUT_DEC(" kB\nShared_Hugetlb: ", mss->shared_hugetlb);
 847	seq_put_decimal_ull_width(m, " kB\nPrivate_Hugetlb: ",
 848				  mss->private_hugetlb >> 10, 7);
 849	SEQ_PUT_DEC(" kB\nSwap:           ", mss->swap);
 850	SEQ_PUT_DEC(" kB\nSwapPss:        ",
 851					mss->swap_pss >> PSS_SHIFT);
 852	SEQ_PUT_DEC(" kB\nLocked:         ",
 853					mss->pss_locked >> PSS_SHIFT);
 854	seq_puts(m, " kB\n");
 855}
 856
 857static int show_smap(struct seq_file *m, void *v)
 858{
 
 
 859	struct vm_area_struct *vma = v;
 860	struct mem_size_stats mss;
 
 
 
 
 
 861
 862	memset(&mss, 0, sizeof(mss));
 863
 864	smap_gather_stats(vma, &mss, 0);
 
 
 865
 866	show_map_vma(m, vma);
 867
 868	SEQ_PUT_DEC("Size:           ", vma->vm_end - vma->vm_start);
 869	SEQ_PUT_DEC(" kB\nKernelPageSize: ", vma_kernel_pagesize(vma));
 870	SEQ_PUT_DEC(" kB\nMMUPageSize:    ", vma_mmu_pagesize(vma));
 871	seq_puts(m, " kB\n");
 872
 873	__show_smap(m, &mss, false);
 874
 875	seq_printf(m, "THPeligible:    %d\n",
 876		   hugepage_vma_check(vma, vma->vm_flags, true, false, true));
 877
 878	if (arch_pkeys_enabled())
 879		seq_printf(m, "ProtectionKey:  %8u\n", vma_pkey(vma));
 880	show_smap_vma_flags(m, vma);
 881
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 882	return 0;
 883}
 884
 885static int show_smaps_rollup(struct seq_file *m, void *v)
 886{
 887	struct proc_maps_private *priv = m->private;
 888	struct mem_size_stats mss;
 889	struct mm_struct *mm = priv->mm;
 890	struct vm_area_struct *vma;
 891	unsigned long vma_start = 0, last_vma_end = 0;
 892	int ret = 0;
 893	MA_STATE(mas, &mm->mm_mt, 0, 0);
 894
 895	priv->task = get_proc_task(priv->inode);
 896	if (!priv->task)
 897		return -ESRCH;
 898
 899	if (!mm || !mmget_not_zero(mm)) {
 900		ret = -ESRCH;
 901		goto out_put_task;
 902	}
 903
 904	memset(&mss, 0, sizeof(mss));
 905
 906	ret = mmap_read_lock_killable(mm);
 907	if (ret)
 908		goto out_put_mm;
 909
 910	hold_task_mempolicy(priv);
 911	vma = mas_find(&mas, ULONG_MAX);
 912
 913	if (unlikely(!vma))
 914		goto empty_set;
 915
 916	vma_start = vma->vm_start;
 917	do {
 918		smap_gather_stats(vma, &mss, 0);
 919		last_vma_end = vma->vm_end;
 920
 921		/*
 922		 * Release mmap_lock temporarily if someone wants to
 923		 * access it for write request.
 924		 */
 925		if (mmap_lock_is_contended(mm)) {
 926			mas_pause(&mas);
 927			mmap_read_unlock(mm);
 928			ret = mmap_read_lock_killable(mm);
 929			if (ret) {
 930				release_task_mempolicy(priv);
 931				goto out_put_mm;
 932			}
 933
 934			/*
 935			 * After dropping the lock, there are four cases to
 936			 * consider. See the following example for explanation.
 937			 *
 938			 *   +------+------+-----------+
 939			 *   | VMA1 | VMA2 | VMA3      |
 940			 *   +------+------+-----------+
 941			 *   |      |      |           |
 942			 *  4k     8k     16k         400k
 943			 *
 944			 * Suppose we drop the lock after reading VMA2 due to
 945			 * contention, then we get:
 946			 *
 947			 *	last_vma_end = 16k
 948			 *
 949			 * 1) VMA2 is freed, but VMA3 exists:
 950			 *
 951			 *    find_vma(mm, 16k - 1) will return VMA3.
 952			 *    In this case, just continue from VMA3.
 953			 *
 954			 * 2) VMA2 still exists:
 955			 *
 956			 *    find_vma(mm, 16k - 1) will return VMA2.
 957			 *    Iterate the loop like the original one.
 958			 *
 959			 * 3) No more VMAs can be found:
 960			 *
 961			 *    find_vma(mm, 16k - 1) will return NULL.
 962			 *    No more things to do, just break.
 963			 *
 964			 * 4) (last_vma_end - 1) is the middle of a vma (VMA'):
 965			 *
 966			 *    find_vma(mm, 16k - 1) will return VMA' whose range
 967			 *    contains last_vma_end.
 968			 *    Iterate VMA' from last_vma_end.
 969			 */
 970			vma = mas_find(&mas, ULONG_MAX);
 971			/* Case 3 above */
 972			if (!vma)
 973				break;
 974
 975			/* Case 1 above */
 976			if (vma->vm_start >= last_vma_end)
 977				continue;
 978
 979			/* Case 4 above */
 980			if (vma->vm_end > last_vma_end)
 981				smap_gather_stats(vma, &mss, last_vma_end);
 982		}
 983		/* Case 2 above */
 984	} while ((vma = mas_find(&mas, ULONG_MAX)) != NULL);
 985
 986empty_set:
 987	show_vma_header_prefix(m, vma_start, last_vma_end, 0, 0, 0, 0);
 988	seq_pad(m, ' ');
 989	seq_puts(m, "[rollup]\n");
 990
 991	__show_smap(m, &mss, true);
 992
 993	release_task_mempolicy(priv);
 994	mmap_read_unlock(mm);
 995
 996out_put_mm:
 997	mmput(mm);
 998out_put_task:
 999	put_task_struct(priv->task);
1000	priv->task = NULL;
1001
1002	return ret;
1003}
1004#undef SEQ_PUT_DEC
1005
1006static const struct seq_operations proc_pid_smaps_op = {
1007	.start	= m_start,
1008	.next	= m_next,
1009	.stop	= m_stop,
1010	.show	= show_smap
1011};
1012
1013static int pid_smaps_open(struct inode *inode, struct file *file)
1014{
1015	return do_maps_open(inode, file, &proc_pid_smaps_op);
1016}
1017
1018static int smaps_rollup_open(struct inode *inode, struct file *file)
1019{
1020	int ret;
1021	struct proc_maps_private *priv;
1022
1023	priv = kzalloc(sizeof(*priv), GFP_KERNEL_ACCOUNT);
1024	if (!priv)
1025		return -ENOMEM;
1026
1027	ret = single_open(file, show_smaps_rollup, priv);
1028	if (ret)
1029		goto out_free;
1030
1031	priv->inode = inode;
1032	priv->mm = proc_mem_open(inode, PTRACE_MODE_READ);
1033	if (IS_ERR(priv->mm)) {
1034		ret = PTR_ERR(priv->mm);
1035
1036		single_release(inode, file);
1037		goto out_free;
1038	}
1039
1040	return 0;
1041
1042out_free:
1043	kfree(priv);
1044	return ret;
1045}
1046
1047static int smaps_rollup_release(struct inode *inode, struct file *file)
1048{
1049	struct seq_file *seq = file->private_data;
1050	struct proc_maps_private *priv = seq->private;
1051
1052	if (priv->mm)
1053		mmdrop(priv->mm);
1054
1055	kfree(priv);
1056	return single_release(inode, file);
1057}
1058
1059const struct file_operations proc_pid_smaps_operations = {
1060	.open		= pid_smaps_open,
1061	.read		= seq_read,
1062	.llseek		= seq_lseek,
1063	.release	= proc_map_release,
1064};
1065
1066const struct file_operations proc_pid_smaps_rollup_operations = {
1067	.open		= smaps_rollup_open,
1068	.read		= seq_read,
1069	.llseek		= seq_lseek,
1070	.release	= smaps_rollup_release,
1071};
1072
1073enum clear_refs_types {
1074	CLEAR_REFS_ALL = 1,
1075	CLEAR_REFS_ANON,
1076	CLEAR_REFS_MAPPED,
1077	CLEAR_REFS_SOFT_DIRTY,
1078	CLEAR_REFS_MM_HIWATER_RSS,
1079	CLEAR_REFS_LAST,
1080};
1081
1082struct clear_refs_private {
1083	enum clear_refs_types type;
1084};
1085
1086#ifdef CONFIG_MEM_SOFT_DIRTY
1087
1088static inline bool pte_is_pinned(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
1089{
1090	struct page *page;
1091
1092	if (!pte_write(pte))
1093		return false;
1094	if (!is_cow_mapping(vma->vm_flags))
1095		return false;
1096	if (likely(!test_bit(MMF_HAS_PINNED, &vma->vm_mm->flags)))
1097		return false;
1098	page = vm_normal_page(vma, addr, pte);
1099	if (!page)
1100		return false;
1101	return page_maybe_dma_pinned(page);
1102}
1103
1104static inline void clear_soft_dirty(struct vm_area_struct *vma,
1105		unsigned long addr, pte_t *pte)
1106{
1107	/*
1108	 * The soft-dirty tracker uses #PF-s to catch writes
1109	 * to pages, so write-protect the pte as well. See the
1110	 * Documentation/admin-guide/mm/soft-dirty.rst for full description
1111	 * of how soft-dirty works.
1112	 */
1113	pte_t ptent = *pte;
1114
1115	if (pte_present(ptent)) {
1116		pte_t old_pte;
1117
1118		if (pte_is_pinned(vma, addr, ptent))
1119			return;
1120		old_pte = ptep_modify_prot_start(vma, addr, pte);
1121		ptent = pte_wrprotect(old_pte);
1122		ptent = pte_clear_soft_dirty(ptent);
1123		ptep_modify_prot_commit(vma, addr, pte, old_pte, ptent);
1124	} else if (is_swap_pte(ptent)) {
1125		ptent = pte_swp_clear_soft_dirty(ptent);
1126		set_pte_at(vma->vm_mm, addr, pte, ptent);
1127	}
1128}
1129#else
1130static inline void clear_soft_dirty(struct vm_area_struct *vma,
1131		unsigned long addr, pte_t *pte)
1132{
1133}
1134#endif
1135
1136#if defined(CONFIG_MEM_SOFT_DIRTY) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
1137static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
1138		unsigned long addr, pmd_t *pmdp)
1139{
1140	pmd_t old, pmd = *pmdp;
1141
1142	if (pmd_present(pmd)) {
1143		/* See comment in change_huge_pmd() */
1144		old = pmdp_invalidate(vma, addr, pmdp);
1145		if (pmd_dirty(old))
1146			pmd = pmd_mkdirty(pmd);
1147		if (pmd_young(old))
1148			pmd = pmd_mkyoung(pmd);
1149
1150		pmd = pmd_wrprotect(pmd);
1151		pmd = pmd_clear_soft_dirty(pmd);
1152
1153		set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
1154	} else if (is_migration_entry(pmd_to_swp_entry(pmd))) {
1155		pmd = pmd_swp_clear_soft_dirty(pmd);
1156		set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
1157	}
1158}
1159#else
1160static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
1161		unsigned long addr, pmd_t *pmdp)
1162{
1163}
1164#endif
1165
1166static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
1167				unsigned long end, struct mm_walk *walk)
1168{
1169	struct clear_refs_private *cp = walk->private;
1170	struct vm_area_struct *vma = walk->vma;
1171	pte_t *pte, ptent;
1172	spinlock_t *ptl;
1173	struct page *page;
1174
1175	ptl = pmd_trans_huge_lock(pmd, vma);
1176	if (ptl) {
1177		if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
1178			clear_soft_dirty_pmd(vma, addr, pmd);
1179			goto out;
1180		}
1181
1182		if (!pmd_present(*pmd))
1183			goto out;
1184
1185		page = pmd_page(*pmd);
1186
1187		/* Clear accessed and referenced bits. */
1188		pmdp_test_and_clear_young(vma, addr, pmd);
1189		test_and_clear_page_young(page);
1190		ClearPageReferenced(page);
1191out:
1192		spin_unlock(ptl);
1193		return 0;
1194	}
1195
1196	if (pmd_trans_unstable(pmd))
1197		return 0;
1198
1199	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
1200	for (; addr != end; pte++, addr += PAGE_SIZE) {
1201		ptent = *pte;
1202
1203		if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
1204			clear_soft_dirty(vma, addr, pte);
1205			continue;
1206		}
1207
1208		if (!pte_present(ptent))
1209			continue;
1210
1211		page = vm_normal_page(vma, addr, ptent);
1212		if (!page)
1213			continue;
1214
1215		/* Clear accessed and referenced bits. */
1216		ptep_test_and_clear_young(vma, addr, pte);
1217		test_and_clear_page_young(page);
1218		ClearPageReferenced(page);
1219	}
1220	pte_unmap_unlock(pte - 1, ptl);
1221	cond_resched();
1222	return 0;
1223}
1224
1225static int clear_refs_test_walk(unsigned long start, unsigned long end,
1226				struct mm_walk *walk)
1227{
1228	struct clear_refs_private *cp = walk->private;
1229	struct vm_area_struct *vma = walk->vma;
1230
1231	if (vma->vm_flags & VM_PFNMAP)
1232		return 1;
1233
1234	/*
1235	 * Writing 1 to /proc/pid/clear_refs affects all pages.
1236	 * Writing 2 to /proc/pid/clear_refs only affects anonymous pages.
1237	 * Writing 3 to /proc/pid/clear_refs only affects file mapped pages.
1238	 * Writing 4 to /proc/pid/clear_refs affects all pages.
1239	 */
1240	if (cp->type == CLEAR_REFS_ANON && vma->vm_file)
1241		return 1;
1242	if (cp->type == CLEAR_REFS_MAPPED && !vma->vm_file)
1243		return 1;
1244	return 0;
1245}
1246
1247static const struct mm_walk_ops clear_refs_walk_ops = {
1248	.pmd_entry		= clear_refs_pte_range,
1249	.test_walk		= clear_refs_test_walk,
1250};
1251
1252static ssize_t clear_refs_write(struct file *file, const char __user *buf,
1253				size_t count, loff_t *ppos)
1254{
1255	struct task_struct *task;
1256	char buffer[PROC_NUMBUF];
1257	struct mm_struct *mm;
1258	struct vm_area_struct *vma;
1259	enum clear_refs_types type;
1260	int itype;
1261	int rv;
1262
1263	memset(buffer, 0, sizeof(buffer));
1264	if (count > sizeof(buffer) - 1)
1265		count = sizeof(buffer) - 1;
1266	if (copy_from_user(buffer, buf, count))
1267		return -EFAULT;
1268	rv = kstrtoint(strstrip(buffer), 10, &itype);
1269	if (rv < 0)
1270		return rv;
1271	type = (enum clear_refs_types)itype;
1272	if (type < CLEAR_REFS_ALL || type >= CLEAR_REFS_LAST)
1273		return -EINVAL;
1274
1275	task = get_proc_task(file_inode(file));
1276	if (!task)
1277		return -ESRCH;
1278	mm = get_task_mm(task);
1279	if (mm) {
1280		MA_STATE(mas, &mm->mm_mt, 0, 0);
1281		struct mmu_notifier_range range;
1282		struct clear_refs_private cp = {
1283			.type = type,
1284		};
1285
1286		if (mmap_write_lock_killable(mm)) {
1287			count = -EINTR;
1288			goto out_mm;
1289		}
1290		if (type == CLEAR_REFS_MM_HIWATER_RSS) {
1291			/*
1292			 * Writing 5 to /proc/pid/clear_refs resets the peak
1293			 * resident set size to this mm's current rss value.
 
 
 
 
 
1294			 */
1295			reset_mm_hiwater_rss(mm);
1296			goto out_unlock;
1297		}
1298
1299		if (type == CLEAR_REFS_SOFT_DIRTY) {
1300			mas_for_each(&mas, vma, ULONG_MAX) {
1301				if (!(vma->vm_flags & VM_SOFTDIRTY))
1302					continue;
1303				vma->vm_flags &= ~VM_SOFTDIRTY;
1304				vma_set_page_prot(vma);
1305			}
1306
1307			inc_tlb_flush_pending(mm);
1308			mmu_notifier_range_init(&range, MMU_NOTIFY_SOFT_DIRTY,
1309						0, NULL, mm, 0, -1UL);
1310			mmu_notifier_invalidate_range_start(&range);
1311		}
1312		walk_page_range(mm, 0, -1, &clear_refs_walk_ops, &cp);
1313		if (type == CLEAR_REFS_SOFT_DIRTY) {
1314			mmu_notifier_invalidate_range_end(&range);
1315			flush_tlb_mm(mm);
1316			dec_tlb_flush_pending(mm);
1317		}
1318out_unlock:
1319		mmap_write_unlock(mm);
1320out_mm:
1321		mmput(mm);
1322	}
1323	put_task_struct(task);
1324
1325	return count;
1326}
1327
1328const struct file_operations proc_clear_refs_operations = {
1329	.write		= clear_refs_write,
1330	.llseek		= noop_llseek,
1331};
1332
1333typedef struct {
1334	u64 pme;
1335} pagemap_entry_t;
1336
1337struct pagemapread {
1338	int pos, len;		/* units: PM_ENTRY_BYTES, not bytes */
1339	pagemap_entry_t *buffer;
1340	bool show_pfn;
1341};
1342
1343#define PAGEMAP_WALK_SIZE	(PMD_SIZE)
1344#define PAGEMAP_WALK_MASK	(PMD_MASK)
1345
1346#define PM_ENTRY_BYTES		sizeof(pagemap_entry_t)
1347#define PM_PFRAME_BITS		55
1348#define PM_PFRAME_MASK		GENMASK_ULL(PM_PFRAME_BITS - 1, 0)
1349#define PM_SOFT_DIRTY		BIT_ULL(55)
1350#define PM_MMAP_EXCLUSIVE	BIT_ULL(56)
1351#define PM_UFFD_WP		BIT_ULL(57)
1352#define PM_FILE			BIT_ULL(61)
1353#define PM_SWAP			BIT_ULL(62)
1354#define PM_PRESENT		BIT_ULL(63)
1355
 
 
1356#define PM_END_OF_BUFFER    1
1357
1358static inline pagemap_entry_t make_pme(u64 frame, u64 flags)
1359{
1360	return (pagemap_entry_t) { .pme = (frame & PM_PFRAME_MASK) | flags };
1361}
1362
1363static int add_to_pagemap(unsigned long addr, pagemap_entry_t *pme,
1364			  struct pagemapread *pm)
1365{
1366	pm->buffer[pm->pos++] = *pme;
1367	if (pm->pos >= pm->len)
1368		return PM_END_OF_BUFFER;
1369	return 0;
1370}
1371
1372static int pagemap_pte_hole(unsigned long start, unsigned long end,
1373			    __always_unused int depth, struct mm_walk *walk)
1374{
1375	struct pagemapread *pm = walk->private;
1376	unsigned long addr = start;
1377	int err = 0;
1378
1379	while (addr < end) {
1380		struct vm_area_struct *vma = find_vma(walk->mm, addr);
1381		pagemap_entry_t pme = make_pme(0, 0);
1382		/* End of address space hole, which we mark as non-present. */
1383		unsigned long hole_end;
1384
1385		if (vma)
1386			hole_end = min(end, vma->vm_start);
1387		else
1388			hole_end = end;
1389
1390		for (; addr < hole_end; addr += PAGE_SIZE) {
1391			err = add_to_pagemap(addr, &pme, pm);
1392			if (err)
1393				goto out;
1394		}
1395
1396		if (!vma)
1397			break;
1398
1399		/* Addresses in the VMA. */
1400		if (vma->vm_flags & VM_SOFTDIRTY)
1401			pme = make_pme(0, PM_SOFT_DIRTY);
1402		for (; addr < min(end, vma->vm_end); addr += PAGE_SIZE) {
1403			err = add_to_pagemap(addr, &pme, pm);
1404			if (err)
1405				goto out;
1406		}
1407	}
1408out:
1409	return err;
1410}
1411
1412static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm,
1413		struct vm_area_struct *vma, unsigned long addr, pte_t pte)
1414{
1415	u64 frame = 0, flags = 0;
1416	struct page *page = NULL;
1417	bool migration = false;
1418
1419	if (pte_present(pte)) {
1420		if (pm->show_pfn)
1421			frame = pte_pfn(pte);
1422		flags |= PM_PRESENT;
1423		page = vm_normal_page(vma, addr, pte);
1424		if (pte_soft_dirty(pte))
1425			flags |= PM_SOFT_DIRTY;
1426		if (pte_uffd_wp(pte))
1427			flags |= PM_UFFD_WP;
1428	} else if (is_swap_pte(pte)) {
1429		swp_entry_t entry;
1430		if (pte_swp_soft_dirty(pte))
1431			flags |= PM_SOFT_DIRTY;
1432		if (pte_swp_uffd_wp(pte))
1433			flags |= PM_UFFD_WP;
1434		entry = pte_to_swp_entry(pte);
1435		if (pm->show_pfn) {
1436			pgoff_t offset;
1437			/*
1438			 * For PFN swap offsets, keeping the offset field
1439			 * to be PFN only to be compatible with old smaps.
1440			 */
1441			if (is_pfn_swap_entry(entry))
1442				offset = swp_offset_pfn(entry);
1443			else
1444				offset = swp_offset(entry);
1445			frame = swp_type(entry) |
1446			    (offset << MAX_SWAPFILES_SHIFT);
1447		}
1448		flags |= PM_SWAP;
1449		migration = is_migration_entry(entry);
1450		if (is_pfn_swap_entry(entry))
1451			page = pfn_swap_entry_to_page(entry);
1452		if (pte_marker_entry_uffd_wp(entry))
1453			flags |= PM_UFFD_WP;
1454	}
1455
1456	if (page && !PageAnon(page))
1457		flags |= PM_FILE;
1458	if (page && !migration && page_mapcount(page) == 1)
1459		flags |= PM_MMAP_EXCLUSIVE;
1460	if (vma->vm_flags & VM_SOFTDIRTY)
1461		flags |= PM_SOFT_DIRTY;
1462
1463	return make_pme(frame, flags);
 
 
1464}
1465
1466static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
1467			     struct mm_walk *walk)
1468{
1469	struct vm_area_struct *vma = walk->vma;
1470	struct pagemapread *pm = walk->private;
1471	spinlock_t *ptl;
1472	pte_t *pte, *orig_pte;
1473	int err = 0;
1474#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1475	bool migration = false;
1476
1477	ptl = pmd_trans_huge_lock(pmdp, vma);
1478	if (ptl) {
1479		u64 flags = 0, frame = 0;
1480		pmd_t pmd = *pmdp;
1481		struct page *page = NULL;
1482
1483		if (vma->vm_flags & VM_SOFTDIRTY)
1484			flags |= PM_SOFT_DIRTY;
1485
1486		if (pmd_present(pmd)) {
1487			page = pmd_page(pmd);
1488
1489			flags |= PM_PRESENT;
1490			if (pmd_soft_dirty(pmd))
1491				flags |= PM_SOFT_DIRTY;
1492			if (pmd_uffd_wp(pmd))
1493				flags |= PM_UFFD_WP;
1494			if (pm->show_pfn)
1495				frame = pmd_pfn(pmd) +
1496					((addr & ~PMD_MASK) >> PAGE_SHIFT);
1497		}
1498#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1499		else if (is_swap_pmd(pmd)) {
1500			swp_entry_t entry = pmd_to_swp_entry(pmd);
1501			unsigned long offset;
1502
1503			if (pm->show_pfn) {
1504				if (is_pfn_swap_entry(entry))
1505					offset = swp_offset_pfn(entry);
1506				else
1507					offset = swp_offset(entry);
1508				offset = offset +
1509					((addr & ~PMD_MASK) >> PAGE_SHIFT);
1510				frame = swp_type(entry) |
1511					(offset << MAX_SWAPFILES_SHIFT);
1512			}
1513			flags |= PM_SWAP;
1514			if (pmd_swp_soft_dirty(pmd))
1515				flags |= PM_SOFT_DIRTY;
1516			if (pmd_swp_uffd_wp(pmd))
1517				flags |= PM_UFFD_WP;
1518			VM_BUG_ON(!is_pmd_migration_entry(pmd));
1519			migration = is_migration_entry(entry);
1520			page = pfn_swap_entry_to_page(entry);
1521		}
1522#endif
1523
1524		if (page && !migration && page_mapcount(page) == 1)
1525			flags |= PM_MMAP_EXCLUSIVE;
 
 
1526
1527		for (; addr != end; addr += PAGE_SIZE) {
1528			pagemap_entry_t pme = make_pme(frame, flags);
1529
1530			err = add_to_pagemap(addr, &pme, pm);
1531			if (err)
1532				break;
1533			if (pm->show_pfn) {
1534				if (flags & PM_PRESENT)
1535					frame++;
1536				else if (flags & PM_SWAP)
1537					frame += (1 << MAX_SWAPFILES_SHIFT);
1538			}
 
1539		}
1540		spin_unlock(ptl);
1541		return err;
1542	}
1543
1544	if (pmd_trans_unstable(pmdp))
1545		return 0;
1546#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1547
1548	/*
1549	 * We can assume that @vma always points to a valid one and @end never
1550	 * goes beyond vma->vm_end.
1551	 */
1552	orig_pte = pte = pte_offset_map_lock(walk->mm, pmdp, addr, &ptl);
1553	for (; addr < end; pte++, addr += PAGE_SIZE) {
1554		pagemap_entry_t pme;
1555
1556		pme = pte_to_pagemap_entry(pm, vma, addr, *pte);
1557		err = add_to_pagemap(addr, &pme, pm);
1558		if (err)
1559			break;
1560	}
1561	pte_unmap_unlock(orig_pte, ptl);
1562
1563	cond_resched();
1564
1565	return err;
1566}
1567
1568#ifdef CONFIG_HUGETLB_PAGE
 
 
 
 
 
 
 
 
 
1569/* This function walks within one hugetlb entry in the single call */
1570static int pagemap_hugetlb_range(pte_t *ptep, unsigned long hmask,
1571				 unsigned long addr, unsigned long end,
1572				 struct mm_walk *walk)
1573{
1574	struct pagemapread *pm = walk->private;
1575	struct vm_area_struct *vma = walk->vma;
1576	u64 flags = 0, frame = 0;
1577	int err = 0;
1578	pte_t pte;
1579
1580	if (vma->vm_flags & VM_SOFTDIRTY)
1581		flags |= PM_SOFT_DIRTY;
1582
1583	pte = huge_ptep_get(ptep);
1584	if (pte_present(pte)) {
1585		struct page *page = pte_page(pte);
1586
1587		if (!PageAnon(page))
1588			flags |= PM_FILE;
1589
1590		if (page_mapcount(page) == 1)
1591			flags |= PM_MMAP_EXCLUSIVE;
1592
1593		if (huge_pte_uffd_wp(pte))
1594			flags |= PM_UFFD_WP;
1595
1596		flags |= PM_PRESENT;
1597		if (pm->show_pfn)
1598			frame = pte_pfn(pte) +
1599				((addr & ~hmask) >> PAGE_SHIFT);
1600	} else if (pte_swp_uffd_wp_any(pte)) {
1601		flags |= PM_UFFD_WP;
1602	}
1603
1604	for (; addr != end; addr += PAGE_SIZE) {
1605		pagemap_entry_t pme = make_pme(frame, flags);
1606
1607		err = add_to_pagemap(addr, &pme, pm);
1608		if (err)
1609			return err;
1610		if (pm->show_pfn && (flags & PM_PRESENT))
1611			frame++;
1612	}
1613
1614	cond_resched();
1615
1616	return err;
1617}
1618#else
1619#define pagemap_hugetlb_range	NULL
1620#endif /* HUGETLB_PAGE */
1621
1622static const struct mm_walk_ops pagemap_ops = {
1623	.pmd_entry	= pagemap_pmd_range,
1624	.pte_hole	= pagemap_pte_hole,
1625	.hugetlb_entry	= pagemap_hugetlb_range,
1626};
1627
1628/*
1629 * /proc/pid/pagemap - an array mapping virtual pages to pfns
1630 *
1631 * For each page in the address space, this file contains one 64-bit entry
1632 * consisting of the following:
1633 *
1634 * Bits 0-54  page frame number (PFN) if present
1635 * Bits 0-4   swap type if swapped
1636 * Bits 5-54  swap offset if swapped
1637 * Bit  55    pte is soft-dirty (see Documentation/admin-guide/mm/soft-dirty.rst)
1638 * Bit  56    page exclusively mapped
1639 * Bit  57    pte is uffd-wp write-protected
1640 * Bits 58-60 zero
1641 * Bit  61    page is file-page or shared-anon
1642 * Bit  62    page swapped
1643 * Bit  63    page present
1644 *
1645 * If the page is not present but in swap, then the PFN contains an
1646 * encoding of the swap file number and the page's offset into the
1647 * swap. Unmapped pages return a null PFN. This allows determining
1648 * precisely which pages are mapped (or in swap) and comparing mapped
1649 * pages between processes.
1650 *
1651 * Efficient users of this interface will use /proc/pid/maps to
1652 * determine which areas of memory are actually mapped and llseek to
1653 * skip over unmapped regions.
1654 */
 
 
1655static ssize_t pagemap_read(struct file *file, char __user *buf,
1656			    size_t count, loff_t *ppos)
1657{
1658	struct mm_struct *mm = file->private_data;
 
1659	struct pagemapread pm;
 
 
1660	unsigned long src;
1661	unsigned long svpfn;
1662	unsigned long start_vaddr;
1663	unsigned long end_vaddr;
1664	int ret = 0, copied = 0;
1665
1666	if (!mm || !mmget_not_zero(mm))
1667		goto out;
1668
1669	ret = -EINVAL;
1670	/* file position must be aligned */
1671	if ((*ppos % PM_ENTRY_BYTES) || (count % PM_ENTRY_BYTES))
1672		goto out_mm;
1673
1674	ret = 0;
1675	if (!count)
1676		goto out_mm;
1677
1678	/* do not disclose physical addresses: attack vector */
1679	pm.show_pfn = file_ns_capable(file, &init_user_ns, CAP_SYS_ADMIN);
1680
1681	pm.len = (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
1682	pm.buffer = kmalloc_array(pm.len, PM_ENTRY_BYTES, GFP_KERNEL);
1683	ret = -ENOMEM;
1684	if (!pm.buffer)
1685		goto out_mm;
 
 
 
 
 
 
 
 
 
 
 
 
 
1686
1687	src = *ppos;
1688	svpfn = src / PM_ENTRY_BYTES;
1689	end_vaddr = mm->task_size;
 
1690
1691	/* watch out for wraparound */
1692	start_vaddr = end_vaddr;
1693	if (svpfn <= (ULONG_MAX >> PAGE_SHIFT))
1694		start_vaddr = untagged_addr(svpfn << PAGE_SHIFT);
1695
1696	/* Ensure the address is inside the task */
1697	if (start_vaddr > mm->task_size)
1698		start_vaddr = end_vaddr;
1699
1700	/*
1701	 * The odds are that this will stop walking way
1702	 * before end_vaddr, because the length of the
1703	 * user buffer is tracked in "pm", and the walk
1704	 * will stop when we hit the end of the buffer.
1705	 */
1706	ret = 0;
1707	while (count && (start_vaddr < end_vaddr)) {
1708		int len;
1709		unsigned long end;
1710
1711		pm.pos = 0;
1712		end = (start_vaddr + PAGEMAP_WALK_SIZE) & PAGEMAP_WALK_MASK;
1713		/* overflow ? */
1714		if (end < start_vaddr || end > end_vaddr)
1715			end = end_vaddr;
1716		ret = mmap_read_lock_killable(mm);
1717		if (ret)
1718			goto out_free;
1719		ret = walk_page_range(mm, start_vaddr, end, &pagemap_ops, &pm);
1720		mmap_read_unlock(mm);
1721		start_vaddr = end;
1722
1723		len = min(count, PM_ENTRY_BYTES * pm.pos);
1724		if (copy_to_user(buf, pm.buffer, len)) {
1725			ret = -EFAULT;
1726			goto out_free;
1727		}
1728		copied += len;
1729		buf += len;
1730		count -= len;
1731	}
1732	*ppos += copied;
1733	if (!ret || ret == PM_END_OF_BUFFER)
1734		ret = copied;
1735
 
 
1736out_free:
1737	kfree(pm.buffer);
1738out_mm:
1739	mmput(mm);
1740out:
1741	return ret;
1742}
1743
1744static int pagemap_open(struct inode *inode, struct file *file)
1745{
1746	struct mm_struct *mm;
1747
1748	mm = proc_mem_open(inode, PTRACE_MODE_READ);
1749	if (IS_ERR(mm))
1750		return PTR_ERR(mm);
1751	file->private_data = mm;
1752	return 0;
1753}
1754
1755static int pagemap_release(struct inode *inode, struct file *file)
1756{
1757	struct mm_struct *mm = file->private_data;
1758
1759	if (mm)
1760		mmdrop(mm);
1761	return 0;
1762}
1763
1764const struct file_operations proc_pagemap_operations = {
1765	.llseek		= mem_lseek, /* borrow this */
1766	.read		= pagemap_read,
1767	.open		= pagemap_open,
1768	.release	= pagemap_release,
1769};
1770#endif /* CONFIG_PROC_PAGE_MONITOR */
1771
1772#ifdef CONFIG_NUMA
1773
1774struct numa_maps {
 
1775	unsigned long pages;
1776	unsigned long anon;
1777	unsigned long active;
1778	unsigned long writeback;
1779	unsigned long mapcount_max;
1780	unsigned long dirty;
1781	unsigned long swapcache;
1782	unsigned long node[MAX_NUMNODES];
1783};
1784
1785struct numa_maps_private {
1786	struct proc_maps_private proc_maps;
1787	struct numa_maps md;
1788};
1789
1790static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty,
1791			unsigned long nr_pages)
1792{
1793	int count = page_mapcount(page);
1794
1795	md->pages += nr_pages;
1796	if (pte_dirty || PageDirty(page))
1797		md->dirty += nr_pages;
1798
1799	if (PageSwapCache(page))
1800		md->swapcache += nr_pages;
1801
1802	if (PageActive(page) || PageUnevictable(page))
1803		md->active += nr_pages;
1804
1805	if (PageWriteback(page))
1806		md->writeback += nr_pages;
1807
1808	if (PageAnon(page))
1809		md->anon += nr_pages;
1810
1811	if (count > md->mapcount_max)
1812		md->mapcount_max = count;
1813
1814	md->node[page_to_nid(page)] += nr_pages;
1815}
1816
1817static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma,
1818		unsigned long addr)
1819{
1820	struct page *page;
1821	int nid;
1822
1823	if (!pte_present(pte))
1824		return NULL;
1825
1826	page = vm_normal_page(vma, addr, pte);
1827	if (!page || is_zone_device_page(page))
1828		return NULL;
1829
1830	if (PageReserved(page))
1831		return NULL;
1832
1833	nid = page_to_nid(page);
1834	if (!node_isset(nid, node_states[N_MEMORY]))
1835		return NULL;
1836
1837	return page;
1838}
1839
1840#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1841static struct page *can_gather_numa_stats_pmd(pmd_t pmd,
1842					      struct vm_area_struct *vma,
1843					      unsigned long addr)
1844{
1845	struct page *page;
1846	int nid;
1847
1848	if (!pmd_present(pmd))
1849		return NULL;
1850
1851	page = vm_normal_page_pmd(vma, addr, pmd);
1852	if (!page)
1853		return NULL;
1854
1855	if (PageReserved(page))
1856		return NULL;
1857
1858	nid = page_to_nid(page);
1859	if (!node_isset(nid, node_states[N_MEMORY]))
1860		return NULL;
1861
1862	return page;
1863}
1864#endif
1865
1866static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
1867		unsigned long end, struct mm_walk *walk)
1868{
1869	struct numa_maps *md = walk->private;
1870	struct vm_area_struct *vma = walk->vma;
1871	spinlock_t *ptl;
1872	pte_t *orig_pte;
1873	pte_t *pte;
1874
1875#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1876	ptl = pmd_trans_huge_lock(pmd, vma);
1877	if (ptl) {
1878		struct page *page;
1879
1880		page = can_gather_numa_stats_pmd(*pmd, vma, addr);
1881		if (page)
1882			gather_stats(page, md, pmd_dirty(*pmd),
1883				     HPAGE_PMD_SIZE/PAGE_SIZE);
1884		spin_unlock(ptl);
1885		return 0;
 
 
 
 
 
 
 
 
1886	}
1887
1888	if (pmd_trans_unstable(pmd))
1889		return 0;
1890#endif
1891	orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
1892	do {
1893		struct page *page = can_gather_numa_stats(*pte, vma, addr);
1894		if (!page)
1895			continue;
1896		gather_stats(page, md, pte_dirty(*pte), 1);
1897
1898	} while (pte++, addr += PAGE_SIZE, addr != end);
1899	pte_unmap_unlock(orig_pte, ptl);
1900	cond_resched();
1901	return 0;
1902}
1903#ifdef CONFIG_HUGETLB_PAGE
1904static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
1905		unsigned long addr, unsigned long end, struct mm_walk *walk)
1906{
1907	pte_t huge_pte = huge_ptep_get(pte);
1908	struct numa_maps *md;
1909	struct page *page;
1910
1911	if (!pte_present(huge_pte))
1912		return 0;
1913
1914	page = pte_page(huge_pte);
 
 
1915
1916	md = walk->private;
1917	gather_stats(page, md, pte_dirty(huge_pte), 1);
1918	return 0;
1919}
1920
1921#else
1922static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
1923		unsigned long addr, unsigned long end, struct mm_walk *walk)
1924{
1925	return 0;
1926}
1927#endif
1928
1929static const struct mm_walk_ops show_numa_ops = {
1930	.hugetlb_entry = gather_hugetlb_stats,
1931	.pmd_entry = gather_pte_stats,
1932};
1933
1934/*
1935 * Display pages allocated per node and memory policy via /proc.
1936 */
1937static int show_numa_map(struct seq_file *m, void *v)
1938{
1939	struct numa_maps_private *numa_priv = m->private;
1940	struct proc_maps_private *proc_priv = &numa_priv->proc_maps;
1941	struct vm_area_struct *vma = v;
1942	struct numa_maps *md = &numa_priv->md;
1943	struct file *file = vma->vm_file;
1944	struct mm_struct *mm = vma->vm_mm;
 
1945	struct mempolicy *pol;
1946	char buffer[64];
1947	int nid;
1948
1949	if (!mm)
1950		return 0;
1951
1952	/* Ensure we start with an empty set of numa_maps statistics. */
1953	memset(md, 0, sizeof(*md));
1954
1955	pol = __get_vma_policy(vma, vma->vm_start);
1956	if (pol) {
1957		mpol_to_str(buffer, sizeof(buffer), pol);
1958		mpol_cond_put(pol);
1959	} else {
1960		mpol_to_str(buffer, sizeof(buffer), proc_priv->task_mempolicy);
1961	}
 
 
 
1962
1963	seq_printf(m, "%08lx %s", vma->vm_start, buffer);
1964
1965	if (file) {
1966		seq_puts(m, " file=");
1967		seq_file_path(m, file, "\n\t= ");
1968	} else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
1969		seq_puts(m, " heap");
1970	} else if (is_stack(vma)) {
1971		seq_puts(m, " stack");
 
1972	}
1973
1974	if (is_vm_hugetlb_page(vma))
1975		seq_puts(m, " huge");
1976
1977	/* mmap_lock is held by m_start */
1978	walk_page_vma(vma, &show_numa_ops, md);
1979
1980	if (!md->pages)
1981		goto out;
1982
1983	if (md->anon)
1984		seq_printf(m, " anon=%lu", md->anon);
1985
1986	if (md->dirty)
1987		seq_printf(m, " dirty=%lu", md->dirty);
1988
1989	if (md->pages != md->anon && md->pages != md->dirty)
1990		seq_printf(m, " mapped=%lu", md->pages);
1991
1992	if (md->mapcount_max > 1)
1993		seq_printf(m, " mapmax=%lu", md->mapcount_max);
1994
1995	if (md->swapcache)
1996		seq_printf(m, " swapcache=%lu", md->swapcache);
1997
1998	if (md->active < md->pages && !is_vm_hugetlb_page(vma))
1999		seq_printf(m, " active=%lu", md->active);
2000
2001	if (md->writeback)
2002		seq_printf(m, " writeback=%lu", md->writeback);
2003
2004	for_each_node_state(nid, N_MEMORY)
2005		if (md->node[nid])
2006			seq_printf(m, " N%d=%lu", nid, md->node[nid]);
2007
2008	seq_printf(m, " kernelpagesize_kB=%lu", vma_kernel_pagesize(vma) >> 10);
2009out:
2010	seq_putc(m, '\n');
 
 
 
2011	return 0;
2012}
2013
2014static const struct seq_operations proc_pid_numa_maps_op = {
2015	.start  = m_start,
2016	.next   = m_next,
2017	.stop   = m_stop,
2018	.show   = show_numa_map,
2019};
2020
2021static int pid_numa_maps_open(struct inode *inode, struct file *file)
2022{
2023	return proc_maps_open(inode, file, &proc_pid_numa_maps_op,
2024				sizeof(struct numa_maps_private));
 
 
 
 
 
 
 
 
 
 
 
 
2025}
2026
2027const struct file_operations proc_pid_numa_maps_operations = {
2028	.open		= pid_numa_maps_open,
2029	.read		= seq_read,
2030	.llseek		= seq_lseek,
2031	.release	= proc_map_release,
2032};
2033
2034#endif /* CONFIG_NUMA */