Linux Audio

Check our new training course

Loading...
v4.6
   1#include <linux/kernel.h>
   2#include <linux/errno.h>
   3#include <linux/err.h>
   4#include <linux/spinlock.h>
   5
   6#include <linux/mm.h>
   7#include <linux/memremap.h>
   8#include <linux/pagemap.h>
   9#include <linux/rmap.h>
  10#include <linux/swap.h>
  11#include <linux/swapops.h>
  12
  13#include <linux/sched.h>
  14#include <linux/rwsem.h>
  15#include <linux/hugetlb.h>
  16
  17#include <asm/mmu_context.h>
  18#include <asm/pgtable.h>
  19#include <asm/tlbflush.h>
  20
  21#include "internal.h"
  22
  23static struct page *no_page_table(struct vm_area_struct *vma,
  24		unsigned int flags)
  25{
  26	/*
  27	 * When core dumping an enormous anonymous area that nobody
  28	 * has touched so far, we don't want to allocate unnecessary pages or
  29	 * page tables.  Return error instead of NULL to skip handle_mm_fault,
  30	 * then get_dump_page() will return NULL to leave a hole in the dump.
  31	 * But we can only make this optimization where a hole would surely
  32	 * be zero-filled if handle_mm_fault() actually did handle it.
  33	 */
  34	if ((flags & FOLL_DUMP) && (!vma->vm_ops || !vma->vm_ops->fault))
  35		return ERR_PTR(-EFAULT);
  36	return NULL;
  37}
  38
  39static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
  40		pte_t *pte, unsigned int flags)
  41{
  42	/* No page to get reference */
  43	if (flags & FOLL_GET)
  44		return -EFAULT;
  45
  46	if (flags & FOLL_TOUCH) {
  47		pte_t entry = *pte;
  48
  49		if (flags & FOLL_WRITE)
  50			entry = pte_mkdirty(entry);
  51		entry = pte_mkyoung(entry);
  52
  53		if (!pte_same(*pte, entry)) {
  54			set_pte_at(vma->vm_mm, address, pte, entry);
  55			update_mmu_cache(vma, address, pte);
  56		}
  57	}
  58
  59	/* Proper page table entry exists, but no corresponding struct page */
  60	return -EEXIST;
  61}
  62
 
 
 
 
 
 
 
 
 
 
  63static struct page *follow_page_pte(struct vm_area_struct *vma,
  64		unsigned long address, pmd_t *pmd, unsigned int flags)
  65{
  66	struct mm_struct *mm = vma->vm_mm;
  67	struct dev_pagemap *pgmap = NULL;
  68	struct page *page;
  69	spinlock_t *ptl;
  70	pte_t *ptep, pte;
  71
  72retry:
  73	if (unlikely(pmd_bad(*pmd)))
  74		return no_page_table(vma, flags);
  75
  76	ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
  77	pte = *ptep;
  78	if (!pte_present(pte)) {
  79		swp_entry_t entry;
  80		/*
  81		 * KSM's break_ksm() relies upon recognizing a ksm page
  82		 * even while it is being migrated, so for that case we
  83		 * need migration_entry_wait().
  84		 */
  85		if (likely(!(flags & FOLL_MIGRATION)))
  86			goto no_page;
  87		if (pte_none(pte))
  88			goto no_page;
  89		entry = pte_to_swp_entry(pte);
  90		if (!is_migration_entry(entry))
  91			goto no_page;
  92		pte_unmap_unlock(ptep, ptl);
  93		migration_entry_wait(mm, pmd, address);
  94		goto retry;
  95	}
  96	if ((flags & FOLL_NUMA) && pte_protnone(pte))
  97		goto no_page;
  98	if ((flags & FOLL_WRITE) && !pte_write(pte)) {
  99		pte_unmap_unlock(ptep, ptl);
 100		return NULL;
 101	}
 102
 103	page = vm_normal_page(vma, address, pte);
 104	if (!page && pte_devmap(pte) && (flags & FOLL_GET)) {
 105		/*
 106		 * Only return device mapping pages in the FOLL_GET case since
 107		 * they are only valid while holding the pgmap reference.
 108		 */
 109		pgmap = get_dev_pagemap(pte_pfn(pte), NULL);
 110		if (pgmap)
 111			page = pte_page(pte);
 112		else
 113			goto no_page;
 114	} else if (unlikely(!page)) {
 115		if (flags & FOLL_DUMP) {
 116			/* Avoid special (like zero) pages in core dumps */
 117			page = ERR_PTR(-EFAULT);
 118			goto out;
 119		}
 120
 121		if (is_zero_pfn(pte_pfn(pte))) {
 122			page = pte_page(pte);
 123		} else {
 124			int ret;
 125
 126			ret = follow_pfn_pte(vma, address, ptep, flags);
 127			page = ERR_PTR(ret);
 128			goto out;
 129		}
 130	}
 131
 132	if (flags & FOLL_SPLIT && PageTransCompound(page)) {
 133		int ret;
 134		get_page(page);
 135		pte_unmap_unlock(ptep, ptl);
 136		lock_page(page);
 137		ret = split_huge_page(page);
 138		unlock_page(page);
 139		put_page(page);
 140		if (ret)
 141			return ERR_PTR(ret);
 142		goto retry;
 143	}
 144
 145	if (flags & FOLL_GET) {
 146		get_page(page);
 147
 148		/* drop the pgmap reference now that we hold the page */
 149		if (pgmap) {
 150			put_dev_pagemap(pgmap);
 151			pgmap = NULL;
 152		}
 153	}
 154	if (flags & FOLL_TOUCH) {
 155		if ((flags & FOLL_WRITE) &&
 156		    !pte_dirty(pte) && !PageDirty(page))
 157			set_page_dirty(page);
 158		/*
 159		 * pte_mkyoung() would be more correct here, but atomic care
 160		 * is needed to avoid losing the dirty bit: it is easier to use
 161		 * mark_page_accessed().
 162		 */
 163		mark_page_accessed(page);
 164	}
 165	if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
 166		/* Do not mlock pte-mapped THP */
 167		if (PageTransCompound(page))
 168			goto out;
 169
 170		/*
 171		 * The preliminary mapping check is mainly to avoid the
 172		 * pointless overhead of lock_page on the ZERO_PAGE
 173		 * which might bounce very badly if there is contention.
 174		 *
 175		 * If the page is already locked, we don't need to
 176		 * handle it now - vmscan will handle it later if and
 177		 * when it attempts to reclaim the page.
 178		 */
 179		if (page->mapping && trylock_page(page)) {
 180			lru_add_drain();  /* push cached pages to LRU */
 181			/*
 182			 * Because we lock page here, and migration is
 183			 * blocked by the pte's page reference, and we
 184			 * know the page is still mapped, we don't even
 185			 * need to check for file-cache page truncation.
 186			 */
 187			mlock_vma_page(page);
 188			unlock_page(page);
 189		}
 190	}
 191out:
 192	pte_unmap_unlock(ptep, ptl);
 193	return page;
 194no_page:
 195	pte_unmap_unlock(ptep, ptl);
 196	if (!pte_none(pte))
 197		return NULL;
 198	return no_page_table(vma, flags);
 199}
 200
 201/**
 202 * follow_page_mask - look up a page descriptor from a user-virtual address
 203 * @vma: vm_area_struct mapping @address
 204 * @address: virtual address to look up
 205 * @flags: flags modifying lookup behaviour
 206 * @page_mask: on output, *page_mask is set according to the size of the page
 207 *
 208 * @flags can have FOLL_ flags set, defined in <linux/mm.h>
 209 *
 210 * Returns the mapped (struct page *), %NULL if no mapping exists, or
 211 * an error pointer if there is a mapping to something not represented
 212 * by a page descriptor (see also vm_normal_page()).
 213 */
 214struct page *follow_page_mask(struct vm_area_struct *vma,
 215			      unsigned long address, unsigned int flags,
 216			      unsigned int *page_mask)
 217{
 218	pgd_t *pgd;
 219	pud_t *pud;
 220	pmd_t *pmd;
 221	spinlock_t *ptl;
 222	struct page *page;
 223	struct mm_struct *mm = vma->vm_mm;
 224
 225	*page_mask = 0;
 226
 227	page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
 228	if (!IS_ERR(page)) {
 229		BUG_ON(flags & FOLL_GET);
 230		return page;
 231	}
 232
 233	pgd = pgd_offset(mm, address);
 234	if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
 235		return no_page_table(vma, flags);
 236
 237	pud = pud_offset(pgd, address);
 238	if (pud_none(*pud))
 239		return no_page_table(vma, flags);
 240	if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) {
 241		page = follow_huge_pud(mm, address, pud, flags);
 242		if (page)
 243			return page;
 244		return no_page_table(vma, flags);
 245	}
 246	if (unlikely(pud_bad(*pud)))
 247		return no_page_table(vma, flags);
 248
 249	pmd = pmd_offset(pud, address);
 250	if (pmd_none(*pmd))
 251		return no_page_table(vma, flags);
 252	if (pmd_huge(*pmd) && vma->vm_flags & VM_HUGETLB) {
 253		page = follow_huge_pmd(mm, address, pmd, flags);
 254		if (page)
 255			return page;
 256		return no_page_table(vma, flags);
 257	}
 258	if ((flags & FOLL_NUMA) && pmd_protnone(*pmd))
 
 
 
 
 
 259		return no_page_table(vma, flags);
 
 
 
 
 
 
 
 
 
 
 
 260	if (pmd_devmap(*pmd)) {
 261		ptl = pmd_lock(mm, pmd);
 262		page = follow_devmap_pmd(vma, address, pmd, flags);
 263		spin_unlock(ptl);
 264		if (page)
 265			return page;
 266	}
 267	if (likely(!pmd_trans_huge(*pmd)))
 268		return follow_page_pte(vma, address, pmd, flags);
 269
 
 
 
 
 270	ptl = pmd_lock(mm, pmd);
 
 
 
 
 
 
 
 271	if (unlikely(!pmd_trans_huge(*pmd))) {
 272		spin_unlock(ptl);
 273		return follow_page_pte(vma, address, pmd, flags);
 274	}
 275	if (flags & FOLL_SPLIT) {
 276		int ret;
 277		page = pmd_page(*pmd);
 278		if (is_huge_zero_page(page)) {
 279			spin_unlock(ptl);
 280			ret = 0;
 281			split_huge_pmd(vma, pmd, address);
 
 
 282		} else {
 283			get_page(page);
 284			spin_unlock(ptl);
 285			lock_page(page);
 286			ret = split_huge_page(page);
 287			unlock_page(page);
 288			put_page(page);
 
 
 289		}
 290
 291		return ret ? ERR_PTR(ret) :
 292			follow_page_pte(vma, address, pmd, flags);
 293	}
 294
 295	page = follow_trans_huge_pmd(vma, address, pmd, flags);
 296	spin_unlock(ptl);
 297	*page_mask = HPAGE_PMD_NR - 1;
 298	return page;
 299}
 300
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 301static int get_gate_page(struct mm_struct *mm, unsigned long address,
 302		unsigned int gup_flags, struct vm_area_struct **vma,
 303		struct page **page)
 304{
 305	pgd_t *pgd;
 
 306	pud_t *pud;
 307	pmd_t *pmd;
 308	pte_t *pte;
 309	int ret = -EFAULT;
 310
 311	/* user gate pages are read-only */
 312	if (gup_flags & FOLL_WRITE)
 313		return -EFAULT;
 314	if (address > TASK_SIZE)
 315		pgd = pgd_offset_k(address);
 316	else
 317		pgd = pgd_offset_gate(mm, address);
 318	BUG_ON(pgd_none(*pgd));
 319	pud = pud_offset(pgd, address);
 
 
 320	BUG_ON(pud_none(*pud));
 321	pmd = pmd_offset(pud, address);
 322	if (pmd_none(*pmd))
 323		return -EFAULT;
 324	VM_BUG_ON(pmd_trans_huge(*pmd));
 325	pte = pte_offset_map(pmd, address);
 326	if (pte_none(*pte))
 327		goto unmap;
 328	*vma = get_gate_vma(mm);
 329	if (!page)
 330		goto out;
 331	*page = vm_normal_page(*vma, address, *pte);
 332	if (!*page) {
 333		if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(*pte)))
 334			goto unmap;
 335		*page = pte_page(*pte);
 
 
 
 
 
 
 
 336	}
 337	get_page(*page);
 338out:
 339	ret = 0;
 340unmap:
 341	pte_unmap(pte);
 342	return ret;
 343}
 344
 345/*
 346 * mmap_sem must be held on entry.  If @nonblocking != NULL and
 347 * *@flags does not include FOLL_NOWAIT, the mmap_sem may be released.
 348 * If it is, *@nonblocking will be set to 0 and -EBUSY returned.
 349 */
 350static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
 351		unsigned long address, unsigned int *flags, int *nonblocking)
 352{
 353	struct mm_struct *mm = vma->vm_mm;
 354	unsigned int fault_flags = 0;
 355	int ret;
 356
 357	/* mlock all present pages, but do not fault in new pages */
 358	if ((*flags & (FOLL_POPULATE | FOLL_MLOCK)) == FOLL_MLOCK)
 359		return -ENOENT;
 360	/* For mm_populate(), just skip the stack guard page. */
 361	if ((*flags & FOLL_POPULATE) &&
 362			(stack_guard_page_start(vma, address) ||
 363			 stack_guard_page_end(vma, address + PAGE_SIZE)))
 364		return -ENOENT;
 365	if (*flags & FOLL_WRITE)
 366		fault_flags |= FAULT_FLAG_WRITE;
 367	if (*flags & FOLL_REMOTE)
 368		fault_flags |= FAULT_FLAG_REMOTE;
 369	if (nonblocking)
 370		fault_flags |= FAULT_FLAG_ALLOW_RETRY;
 371	if (*flags & FOLL_NOWAIT)
 372		fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT;
 373	if (*flags & FOLL_TRIED) {
 374		VM_WARN_ON_ONCE(fault_flags & FAULT_FLAG_ALLOW_RETRY);
 375		fault_flags |= FAULT_FLAG_TRIED;
 376	}
 377
 378	ret = handle_mm_fault(mm, vma, address, fault_flags);
 379	if (ret & VM_FAULT_ERROR) {
 380		if (ret & VM_FAULT_OOM)
 381			return -ENOMEM;
 382		if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
 383			return *flags & FOLL_HWPOISON ? -EHWPOISON : -EFAULT;
 384		if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
 385			return -EFAULT;
 386		BUG();
 387	}
 388
 389	if (tsk) {
 390		if (ret & VM_FAULT_MAJOR)
 391			tsk->maj_flt++;
 392		else
 393			tsk->min_flt++;
 394	}
 395
 396	if (ret & VM_FAULT_RETRY) {
 397		if (nonblocking)
 398			*nonblocking = 0;
 399		return -EBUSY;
 400	}
 401
 402	/*
 403	 * The VM_FAULT_WRITE bit tells us that do_wp_page has broken COW when
 404	 * necessary, even if maybe_mkwrite decided not to set pte_write. We
 405	 * can thus safely do subsequent page lookups as if they were reads.
 406	 * But only do so when looping for pte_write is futile: in some cases
 407	 * userspace may also be wanting to write to the gotten user page,
 408	 * which a read fault here might prevent (a readonly page might get
 409	 * reCOWed by userspace write).
 410	 */
 411	if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE))
 412		*flags &= ~FOLL_WRITE;
 413	return 0;
 414}
 415
 416static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
 417{
 418	vm_flags_t vm_flags = vma->vm_flags;
 419	int write = (gup_flags & FOLL_WRITE);
 420	int foreign = (gup_flags & FOLL_REMOTE);
 421
 422	if (vm_flags & (VM_IO | VM_PFNMAP))
 423		return -EFAULT;
 424
 
 
 
 425	if (write) {
 426		if (!(vm_flags & VM_WRITE)) {
 427			if (!(gup_flags & FOLL_FORCE))
 428				return -EFAULT;
 429			/*
 430			 * We used to let the write,force case do COW in a
 431			 * VM_MAYWRITE VM_SHARED !VM_WRITE vma, so ptrace could
 432			 * set a breakpoint in a read-only mapping of an
 433			 * executable, without corrupting the file (yet only
 434			 * when that file had been opened for writing!).
 435			 * Anon pages in shared mappings are surprising: now
 436			 * just reject it.
 437			 */
 438			if (!is_cow_mapping(vm_flags))
 439				return -EFAULT;
 440		}
 441	} else if (!(vm_flags & VM_READ)) {
 442		if (!(gup_flags & FOLL_FORCE))
 443			return -EFAULT;
 444		/*
 445		 * Is there actually any vma we can reach here which does not
 446		 * have VM_MAYREAD set?
 447		 */
 448		if (!(vm_flags & VM_MAYREAD))
 449			return -EFAULT;
 450	}
 451	/*
 452	 * gups are always data accesses, not instruction
 453	 * fetches, so execute=false here
 454	 */
 455	if (!arch_vma_access_permitted(vma, write, false, foreign))
 456		return -EFAULT;
 457	return 0;
 458}
 459
 460/**
 461 * __get_user_pages() - pin user pages in memory
 462 * @tsk:	task_struct of target task
 463 * @mm:		mm_struct of target mm
 464 * @start:	starting user address
 465 * @nr_pages:	number of pages from start to pin
 466 * @gup_flags:	flags modifying pin behaviour
 467 * @pages:	array that receives pointers to the pages pinned.
 468 *		Should be at least nr_pages long. Or NULL, if caller
 469 *		only intends to ensure the pages are faulted in.
 470 * @vmas:	array of pointers to vmas corresponding to each page.
 471 *		Or NULL if the caller does not require them.
 472 * @nonblocking: whether waiting for disk IO or mmap_sem contention
 473 *
 474 * Returns number of pages pinned. This may be fewer than the number
 475 * requested. If nr_pages is 0 or negative, returns 0. If no pages
 476 * were pinned, returns -errno. Each page returned must be released
 477 * with a put_page() call when it is finished with. vmas will only
 478 * remain valid while mmap_sem is held.
 479 *
 480 * Must be called with mmap_sem held.  It may be released.  See below.
 481 *
 482 * __get_user_pages walks a process's page tables and takes a reference to
 483 * each struct page that each user address corresponds to at a given
 484 * instant. That is, it takes the page that would be accessed if a user
 485 * thread accesses the given user virtual address at that instant.
 486 *
 487 * This does not guarantee that the page exists in the user mappings when
 488 * __get_user_pages returns, and there may even be a completely different
 489 * page there in some cases (eg. if mmapped pagecache has been invalidated
 490 * and subsequently re faulted). However it does guarantee that the page
 491 * won't be freed completely. And mostly callers simply care that the page
 492 * contains data that was valid *at some point in time*. Typically, an IO
 493 * or similar operation cannot guarantee anything stronger anyway because
 494 * locks can't be held over the syscall boundary.
 495 *
 496 * If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If
 497 * the page is written to, set_page_dirty (or set_page_dirty_lock, as
 498 * appropriate) must be called after the page is finished with, and
 499 * before put_page is called.
 500 *
 501 * If @nonblocking != NULL, __get_user_pages will not wait for disk IO
 502 * or mmap_sem contention, and if waiting is needed to pin all pages,
 503 * *@nonblocking will be set to 0.  Further, if @gup_flags does not
 504 * include FOLL_NOWAIT, the mmap_sem will be released via up_read() in
 505 * this case.
 506 *
 507 * A caller using such a combination of @nonblocking and @gup_flags
 508 * must therefore hold the mmap_sem for reading only, and recognize
 509 * when it's been released.  Otherwise, it must be held for either
 510 * reading or writing and will not be released.
 511 *
 512 * In most cases, get_user_pages or get_user_pages_fast should be used
 513 * instead of __get_user_pages. __get_user_pages should be used only if
 514 * you need some special @gup_flags.
 515 */
 516long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
 517		unsigned long start, unsigned long nr_pages,
 518		unsigned int gup_flags, struct page **pages,
 519		struct vm_area_struct **vmas, int *nonblocking)
 520{
 521	long i = 0;
 522	unsigned int page_mask;
 523	struct vm_area_struct *vma = NULL;
 524
 525	if (!nr_pages)
 526		return 0;
 527
 528	VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET));
 529
 530	/*
 531	 * If FOLL_FORCE is set then do not force a full fault as the hinting
 532	 * fault information is unrelated to the reference behaviour of a task
 533	 * using the address space
 534	 */
 535	if (!(gup_flags & FOLL_FORCE))
 536		gup_flags |= FOLL_NUMA;
 537
 538	do {
 539		struct page *page;
 540		unsigned int foll_flags = gup_flags;
 541		unsigned int page_increm;
 542
 543		/* first iteration or cross vma bound */
 544		if (!vma || start >= vma->vm_end) {
 545			vma = find_extend_vma(mm, start);
 546			if (!vma && in_gate_area(mm, start)) {
 547				int ret;
 548				ret = get_gate_page(mm, start & PAGE_MASK,
 549						gup_flags, &vma,
 550						pages ? &pages[i] : NULL);
 551				if (ret)
 552					return i ? : ret;
 553				page_mask = 0;
 554				goto next_page;
 555			}
 556
 557			if (!vma || check_vma_flags(vma, gup_flags))
 558				return i ? : -EFAULT;
 559			if (is_vm_hugetlb_page(vma)) {
 560				i = follow_hugetlb_page(mm, vma, pages, vmas,
 561						&start, &nr_pages, i,
 562						gup_flags);
 563				continue;
 564			}
 565		}
 566retry:
 567		/*
 568		 * If we have a pending SIGKILL, don't keep faulting pages and
 569		 * potentially allocating memory.
 570		 */
 571		if (unlikely(fatal_signal_pending(current)))
 572			return i ? i : -ERESTARTSYS;
 573		cond_resched();
 574		page = follow_page_mask(vma, start, foll_flags, &page_mask);
 575		if (!page) {
 576			int ret;
 577			ret = faultin_page(tsk, vma, start, &foll_flags,
 578					nonblocking);
 579			switch (ret) {
 580			case 0:
 581				goto retry;
 582			case -EFAULT:
 583			case -ENOMEM:
 584			case -EHWPOISON:
 585				return i ? i : ret;
 586			case -EBUSY:
 587				return i;
 588			case -ENOENT:
 589				goto next_page;
 590			}
 591			BUG();
 592		} else if (PTR_ERR(page) == -EEXIST) {
 593			/*
 594			 * Proper page table entry exists, but no corresponding
 595			 * struct page.
 596			 */
 597			goto next_page;
 598		} else if (IS_ERR(page)) {
 599			return i ? i : PTR_ERR(page);
 600		}
 601		if (pages) {
 602			pages[i] = page;
 603			flush_anon_page(vma, page, start);
 604			flush_dcache_page(page);
 605			page_mask = 0;
 606		}
 607next_page:
 608		if (vmas) {
 609			vmas[i] = vma;
 610			page_mask = 0;
 611		}
 612		page_increm = 1 + (~(start >> PAGE_SHIFT) & page_mask);
 613		if (page_increm > nr_pages)
 614			page_increm = nr_pages;
 615		i += page_increm;
 616		start += page_increm * PAGE_SIZE;
 617		nr_pages -= page_increm;
 618	} while (nr_pages);
 619	return i;
 620}
 621EXPORT_SYMBOL(__get_user_pages);
 622
 623bool vma_permits_fault(struct vm_area_struct *vma, unsigned int fault_flags)
 
 624{
 625	bool write   = !!(fault_flags & FAULT_FLAG_WRITE);
 626	bool foreign = !!(fault_flags & FAULT_FLAG_REMOTE);
 627	vm_flags_t vm_flags = write ? VM_WRITE : VM_READ;
 628
 629	if (!(vm_flags & vma->vm_flags))
 630		return false;
 631
 632	/*
 633	 * The architecture might have a hardware protection
 634	 * mechanism other than read/write that can deny access.
 635	 *
 636	 * gup always represents data access, not instruction
 637	 * fetches, so execute=false here:
 638	 */
 639	if (!arch_vma_access_permitted(vma, write, false, foreign))
 640		return false;
 641
 642	return true;
 643}
 644
 645/*
 646 * fixup_user_fault() - manually resolve a user page fault
 647 * @tsk:	the task_struct to use for page fault accounting, or
 648 *		NULL if faults are not to be recorded.
 649 * @mm:		mm_struct of target mm
 650 * @address:	user address
 651 * @fault_flags:flags to pass down to handle_mm_fault()
 652 * @unlocked:	did we unlock the mmap_sem while retrying, maybe NULL if caller
 653 *		does not allow retry
 654 *
 655 * This is meant to be called in the specific scenario where for locking reasons
 656 * we try to access user memory in atomic context (within a pagefault_disable()
 657 * section), this returns -EFAULT, and we want to resolve the user fault before
 658 * trying again.
 659 *
 660 * Typically this is meant to be used by the futex code.
 661 *
 662 * The main difference with get_user_pages() is that this function will
 663 * unconditionally call handle_mm_fault() which will in turn perform all the
 664 * necessary SW fixup of the dirty and young bits in the PTE, while
 665 * get_user_pages() only guarantees to update these in the struct page.
 666 *
 667 * This is important for some architectures where those bits also gate the
 668 * access permission to the page because they are maintained in software.  On
 669 * such architectures, gup() will not be enough to make a subsequent access
 670 * succeed.
 671 *
 672 * This function will not return with an unlocked mmap_sem. So it has not the
 673 * same semantics wrt the @mm->mmap_sem as does filemap_fault().
 674 */
 675int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
 676		     unsigned long address, unsigned int fault_flags,
 677		     bool *unlocked)
 678{
 679	struct vm_area_struct *vma;
 680	int ret, major = 0;
 681
 682	if (unlocked)
 683		fault_flags |= FAULT_FLAG_ALLOW_RETRY;
 684
 685retry:
 686	vma = find_extend_vma(mm, address);
 687	if (!vma || address < vma->vm_start)
 688		return -EFAULT;
 689
 690	if (!vma_permits_fault(vma, fault_flags))
 691		return -EFAULT;
 692
 693	ret = handle_mm_fault(mm, vma, address, fault_flags);
 694	major |= ret & VM_FAULT_MAJOR;
 695	if (ret & VM_FAULT_ERROR) {
 696		if (ret & VM_FAULT_OOM)
 697			return -ENOMEM;
 698		if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
 699			return -EHWPOISON;
 700		if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
 701			return -EFAULT;
 702		BUG();
 703	}
 704
 705	if (ret & VM_FAULT_RETRY) {
 706		down_read(&mm->mmap_sem);
 707		if (!(fault_flags & FAULT_FLAG_TRIED)) {
 708			*unlocked = true;
 709			fault_flags &= ~FAULT_FLAG_ALLOW_RETRY;
 710			fault_flags |= FAULT_FLAG_TRIED;
 711			goto retry;
 712		}
 713	}
 714
 715	if (tsk) {
 716		if (major)
 717			tsk->maj_flt++;
 718		else
 719			tsk->min_flt++;
 720	}
 721	return 0;
 722}
 
 723
 724static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
 725						struct mm_struct *mm,
 726						unsigned long start,
 727						unsigned long nr_pages,
 728						int write, int force,
 729						struct page **pages,
 730						struct vm_area_struct **vmas,
 731						int *locked, bool notify_drop,
 732						unsigned int flags)
 733{
 734	long ret, pages_done;
 735	bool lock_dropped;
 736
 737	if (locked) {
 738		/* if VM_FAULT_RETRY can be returned, vmas become invalid */
 739		BUG_ON(vmas);
 740		/* check caller initialized locked */
 741		BUG_ON(*locked != 1);
 742	}
 743
 744	if (pages)
 745		flags |= FOLL_GET;
 746	if (write)
 747		flags |= FOLL_WRITE;
 748	if (force)
 749		flags |= FOLL_FORCE;
 750
 751	pages_done = 0;
 752	lock_dropped = false;
 753	for (;;) {
 754		ret = __get_user_pages(tsk, mm, start, nr_pages, flags, pages,
 755				       vmas, locked);
 756		if (!locked)
 757			/* VM_FAULT_RETRY couldn't trigger, bypass */
 758			return ret;
 759
 760		/* VM_FAULT_RETRY cannot return errors */
 761		if (!*locked) {
 762			BUG_ON(ret < 0);
 763			BUG_ON(ret >= nr_pages);
 764		}
 765
 766		if (!pages)
 767			/* If it's a prefault don't insist harder */
 768			return ret;
 769
 770		if (ret > 0) {
 771			nr_pages -= ret;
 772			pages_done += ret;
 773			if (!nr_pages)
 774				break;
 775		}
 776		if (*locked) {
 777			/* VM_FAULT_RETRY didn't trigger */
 
 
 
 778			if (!pages_done)
 779				pages_done = ret;
 780			break;
 781		}
 782		/* VM_FAULT_RETRY triggered, so seek to the faulting offset */
 783		pages += ret;
 784		start += ret << PAGE_SHIFT;
 785
 786		/*
 787		 * Repeat on the address that fired VM_FAULT_RETRY
 788		 * without FAULT_FLAG_ALLOW_RETRY but with
 789		 * FAULT_FLAG_TRIED.
 790		 */
 791		*locked = 1;
 792		lock_dropped = true;
 793		down_read(&mm->mmap_sem);
 794		ret = __get_user_pages(tsk, mm, start, 1, flags | FOLL_TRIED,
 795				       pages, NULL, NULL);
 796		if (ret != 1) {
 797			BUG_ON(ret > 1);
 798			if (!pages_done)
 799				pages_done = ret;
 800			break;
 801		}
 802		nr_pages--;
 803		pages_done++;
 804		if (!nr_pages)
 805			break;
 806		pages++;
 807		start += PAGE_SIZE;
 808	}
 809	if (notify_drop && lock_dropped && *locked) {
 810		/*
 811		 * We must let the caller know we temporarily dropped the lock
 812		 * and so the critical section protected by it was lost.
 813		 */
 814		up_read(&mm->mmap_sem);
 815		*locked = 0;
 816	}
 817	return pages_done;
 818}
 819
 820/*
 821 * We can leverage the VM_FAULT_RETRY functionality in the page fault
 822 * paths better by using either get_user_pages_locked() or
 823 * get_user_pages_unlocked().
 824 *
 825 * get_user_pages_locked() is suitable to replace the form:
 826 *
 827 *      down_read(&mm->mmap_sem);
 828 *      do_something()
 829 *      get_user_pages(tsk, mm, ..., pages, NULL);
 830 *      up_read(&mm->mmap_sem);
 831 *
 832 *  to:
 833 *
 834 *      int locked = 1;
 835 *      down_read(&mm->mmap_sem);
 836 *      do_something()
 837 *      get_user_pages_locked(tsk, mm, ..., pages, &locked);
 838 *      if (locked)
 839 *          up_read(&mm->mmap_sem);
 840 */
 841long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
 842			   int write, int force, struct page **pages,
 843			   int *locked)
 844{
 845	return __get_user_pages_locked(current, current->mm, start, nr_pages,
 846				       write, force, pages, NULL, locked, true,
 847				       FOLL_TOUCH);
 848}
 849EXPORT_SYMBOL(get_user_pages_locked);
 850
 851/*
 852 * Same as get_user_pages_unlocked(...., FOLL_TOUCH) but it allows to
 853 * pass additional gup_flags as last parameter (like FOLL_HWPOISON).
 854 *
 855 * NOTE: here FOLL_TOUCH is not set implicitly and must be set by the
 856 * caller if required (just like with __get_user_pages). "FOLL_GET",
 857 * "FOLL_WRITE" and "FOLL_FORCE" are set implicitly as needed
 858 * according to the parameters "pages", "write", "force"
 859 * respectively.
 860 */
 861__always_inline long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
 862					       unsigned long start, unsigned long nr_pages,
 863					       int write, int force, struct page **pages,
 864					       unsigned int gup_flags)
 865{
 866	long ret;
 867	int locked = 1;
 868	down_read(&mm->mmap_sem);
 869	ret = __get_user_pages_locked(tsk, mm, start, nr_pages, write, force,
 870				      pages, NULL, &locked, false, gup_flags);
 871	if (locked)
 872		up_read(&mm->mmap_sem);
 873	return ret;
 874}
 875EXPORT_SYMBOL(__get_user_pages_unlocked);
 876
 877/*
 878 * get_user_pages_unlocked() is suitable to replace the form:
 879 *
 880 *      down_read(&mm->mmap_sem);
 881 *      get_user_pages(tsk, mm, ..., pages, NULL);
 882 *      up_read(&mm->mmap_sem);
 883 *
 884 *  with:
 885 *
 886 *      get_user_pages_unlocked(tsk, mm, ..., pages);
 887 *
 888 * It is functionally equivalent to get_user_pages_fast so
 889 * get_user_pages_fast should be used instead, if the two parameters
 890 * "tsk" and "mm" are respectively equal to current and current->mm,
 891 * or if "force" shall be set to 1 (get_user_pages_fast misses the
 892 * "force" parameter).
 893 */
 894long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
 895			     int write, int force, struct page **pages)
 896{
 897	return __get_user_pages_unlocked(current, current->mm, start, nr_pages,
 898					 write, force, pages, FOLL_TOUCH);
 
 
 
 
 
 
 
 
 899}
 900EXPORT_SYMBOL(get_user_pages_unlocked);
 901
 902/*
 903 * get_user_pages_remote() - pin user pages in memory
 904 * @tsk:	the task_struct to use for page fault accounting, or
 905 *		NULL if faults are not to be recorded.
 906 * @mm:		mm_struct of target mm
 907 * @start:	starting user address
 908 * @nr_pages:	number of pages from start to pin
 909 * @write:	whether pages will be written to by the caller
 910 * @force:	whether to force access even when user mapping is currently
 911 *		protected (but never forces write access to shared mapping).
 912 * @pages:	array that receives pointers to the pages pinned.
 913 *		Should be at least nr_pages long. Or NULL, if caller
 914 *		only intends to ensure the pages are faulted in.
 915 * @vmas:	array of pointers to vmas corresponding to each page.
 916 *		Or NULL if the caller does not require them.
 
 
 
 917 *
 918 * Returns number of pages pinned. This may be fewer than the number
 919 * requested. If nr_pages is 0 or negative, returns 0. If no pages
 920 * were pinned, returns -errno. Each page returned must be released
 921 * with a put_page() call when it is finished with. vmas will only
 922 * remain valid while mmap_sem is held.
 923 *
 924 * Must be called with mmap_sem held for read or write.
 925 *
 926 * get_user_pages walks a process's page tables and takes a reference to
 927 * each struct page that each user address corresponds to at a given
 928 * instant. That is, it takes the page that would be accessed if a user
 929 * thread accesses the given user virtual address at that instant.
 930 *
 931 * This does not guarantee that the page exists in the user mappings when
 932 * get_user_pages returns, and there may even be a completely different
 933 * page there in some cases (eg. if mmapped pagecache has been invalidated
 934 * and subsequently re faulted). However it does guarantee that the page
 935 * won't be freed completely. And mostly callers simply care that the page
 936 * contains data that was valid *at some point in time*. Typically, an IO
 937 * or similar operation cannot guarantee anything stronger anyway because
 938 * locks can't be held over the syscall boundary.
 939 *
 940 * If write=0, the page must not be written to. If the page is written to,
 941 * set_page_dirty (or set_page_dirty_lock, as appropriate) must be called
 942 * after the page is finished with, and before put_page is called.
 943 *
 944 * get_user_pages is typically used for fewer-copy IO operations, to get a
 945 * handle on the memory by some means other than accesses via the user virtual
 946 * addresses. The pages may be submitted for DMA to devices or accessed via
 947 * their kernel linear mapping (via the kmap APIs). Care should be taken to
 948 * use the correct cache flushing APIs.
 949 *
 950 * See also get_user_pages_fast, for performance critical applications.
 951 *
 952 * get_user_pages should be phased out in favor of
 953 * get_user_pages_locked|unlocked or get_user_pages_fast. Nothing
 954 * should use get_user_pages because it cannot pass
 955 * FAULT_FLAG_ALLOW_RETRY to handle_mm_fault.
 956 */
 957long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
 958		unsigned long start, unsigned long nr_pages,
 959		int write, int force, struct page **pages,
 960		struct vm_area_struct **vmas)
 961{
 962	return __get_user_pages_locked(tsk, mm, start, nr_pages, write, force,
 963				       pages, vmas, NULL, false,
 964				       FOLL_TOUCH | FOLL_REMOTE);
 965}
 966EXPORT_SYMBOL(get_user_pages_remote);
 967
 968/*
 969 * This is the same as get_user_pages_remote(), just with a
 970 * less-flexible calling convention where we assume that the task
 971 * and mm being operated on are the current task's.  We also
 972 * obviously don't pass FOLL_REMOTE in here.
 
 973 */
 974long get_user_pages(unsigned long start, unsigned long nr_pages,
 975		int write, int force, struct page **pages,
 976		struct vm_area_struct **vmas)
 977{
 978	return __get_user_pages_locked(current, current->mm, start, nr_pages,
 979				       write, force, pages, vmas, NULL, false,
 980				       FOLL_TOUCH);
 981}
 982EXPORT_SYMBOL(get_user_pages);
 983
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 984/**
 985 * populate_vma_page_range() -  populate a range of pages in the vma.
 986 * @vma:   target vma
 987 * @start: start address
 988 * @end:   end address
 989 * @nonblocking:
 990 *
 991 * This takes care of mlocking the pages too if VM_LOCKED is set.
 992 *
 993 * return 0 on success, negative error code on error.
 994 *
 995 * vma->vm_mm->mmap_sem must be held.
 996 *
 997 * If @nonblocking is NULL, it may be held for read or write and will
 998 * be unperturbed.
 999 *
1000 * If @nonblocking is non-NULL, it must held for read only and may be
1001 * released.  If it's released, *@nonblocking will be set to 0.
1002 */
1003long populate_vma_page_range(struct vm_area_struct *vma,
1004		unsigned long start, unsigned long end, int *nonblocking)
1005{
1006	struct mm_struct *mm = vma->vm_mm;
1007	unsigned long nr_pages = (end - start) / PAGE_SIZE;
1008	int gup_flags;
1009
1010	VM_BUG_ON(start & ~PAGE_MASK);
1011	VM_BUG_ON(end   & ~PAGE_MASK);
1012	VM_BUG_ON_VMA(start < vma->vm_start, vma);
1013	VM_BUG_ON_VMA(end   > vma->vm_end, vma);
1014	VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm);
1015
1016	gup_flags = FOLL_TOUCH | FOLL_POPULATE | FOLL_MLOCK;
1017	if (vma->vm_flags & VM_LOCKONFAULT)
1018		gup_flags &= ~FOLL_POPULATE;
1019	/*
1020	 * We want to touch writable mappings with a write fault in order
1021	 * to break COW, except for shared mappings because these don't COW
1022	 * and we would not want to dirty them for nothing.
1023	 */
1024	if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE)
1025		gup_flags |= FOLL_WRITE;
1026
1027	/*
1028	 * We want mlock to succeed for regions that have any permissions
1029	 * other than PROT_NONE.
1030	 */
1031	if (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))
1032		gup_flags |= FOLL_FORCE;
1033
1034	/*
1035	 * We made sure addr is within a VMA, so the following will
1036	 * not result in a stack expansion that recurses back here.
1037	 */
1038	return __get_user_pages(current, mm, start, nr_pages, gup_flags,
1039				NULL, NULL, nonblocking);
1040}
1041
1042/*
1043 * __mm_populate - populate and/or mlock pages within a range of address space.
1044 *
1045 * This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap
1046 * flags. VMAs must be already marked with the desired vm_flags, and
1047 * mmap_sem must not be held.
1048 */
1049int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
1050{
1051	struct mm_struct *mm = current->mm;
1052	unsigned long end, nstart, nend;
1053	struct vm_area_struct *vma = NULL;
1054	int locked = 0;
1055	long ret = 0;
1056
1057	VM_BUG_ON(start & ~PAGE_MASK);
1058	VM_BUG_ON(len != PAGE_ALIGN(len));
1059	end = start + len;
1060
1061	for (nstart = start; nstart < end; nstart = nend) {
1062		/*
1063		 * We want to fault in pages for [nstart; end) address range.
1064		 * Find first corresponding VMA.
1065		 */
1066		if (!locked) {
1067			locked = 1;
1068			down_read(&mm->mmap_sem);
1069			vma = find_vma(mm, nstart);
1070		} else if (nstart >= vma->vm_end)
1071			vma = vma->vm_next;
1072		if (!vma || vma->vm_start >= end)
1073			break;
1074		/*
1075		 * Set [nstart; nend) to intersection of desired address
1076		 * range with the first VMA. Also, skip undesirable VMA types.
1077		 */
1078		nend = min(end, vma->vm_end);
1079		if (vma->vm_flags & (VM_IO | VM_PFNMAP))
1080			continue;
1081		if (nstart < vma->vm_start)
1082			nstart = vma->vm_start;
1083		/*
1084		 * Now fault in a range of pages. populate_vma_page_range()
1085		 * double checks the vma flags, so that it won't mlock pages
1086		 * if the vma was already munlocked.
1087		 */
1088		ret = populate_vma_page_range(vma, nstart, nend, &locked);
1089		if (ret < 0) {
1090			if (ignore_errors) {
1091				ret = 0;
1092				continue;	/* continue at next VMA */
1093			}
1094			break;
1095		}
1096		nend = nstart + ret * PAGE_SIZE;
1097		ret = 0;
1098	}
1099	if (locked)
1100		up_read(&mm->mmap_sem);
1101	return ret;	/* 0 or negative error code */
1102}
1103
1104/**
1105 * get_dump_page() - pin user page in memory while writing it to core dump
1106 * @addr: user address
1107 *
1108 * Returns struct page pointer of user page pinned for dump,
1109 * to be freed afterwards by put_page().
1110 *
1111 * Returns NULL on any kind of failure - a hole must then be inserted into
1112 * the corefile, to preserve alignment with its headers; and also returns
1113 * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found -
1114 * allowing a hole to be left in the corefile to save diskspace.
1115 *
1116 * Called without mmap_sem, but after all other threads have been killed.
1117 */
1118#ifdef CONFIG_ELF_CORE
1119struct page *get_dump_page(unsigned long addr)
1120{
1121	struct vm_area_struct *vma;
1122	struct page *page;
1123
1124	if (__get_user_pages(current, current->mm, addr, 1,
1125			     FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma,
1126			     NULL) < 1)
1127		return NULL;
1128	flush_cache_page(vma, addr, page_to_pfn(page));
1129	return page;
1130}
1131#endif /* CONFIG_ELF_CORE */
1132
1133/*
1134 * Generic RCU Fast GUP
1135 *
1136 * get_user_pages_fast attempts to pin user pages by walking the page
1137 * tables directly and avoids taking locks. Thus the walker needs to be
1138 * protected from page table pages being freed from under it, and should
1139 * block any THP splits.
1140 *
1141 * One way to achieve this is to have the walker disable interrupts, and
1142 * rely on IPIs from the TLB flushing code blocking before the page table
1143 * pages are freed. This is unsuitable for architectures that do not need
1144 * to broadcast an IPI when invalidating TLBs.
1145 *
1146 * Another way to achieve this is to batch up page table containing pages
1147 * belonging to more than one mm_user, then rcu_sched a callback to free those
1148 * pages. Disabling interrupts will allow the fast_gup walker to both block
1149 * the rcu_sched callback, and an IPI that we broadcast for splitting THPs
1150 * (which is a relatively rare event). The code below adopts this strategy.
1151 *
1152 * Before activating this code, please be aware that the following assumptions
1153 * are currently made:
1154 *
1155 *  *) HAVE_RCU_TABLE_FREE is enabled, and tlb_remove_table is used to free
1156 *      pages containing page tables.
1157 *
1158 *  *) ptes can be read atomically by the architecture.
1159 *
1160 *  *) access_ok is sufficient to validate userspace address ranges.
1161 *
1162 * The last two assumptions can be relaxed by the addition of helper functions.
1163 *
1164 * This code is based heavily on the PowerPC implementation by Nick Piggin.
1165 */
1166#ifdef CONFIG_HAVE_GENERIC_RCU_GUP
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1167
1168#ifdef __HAVE_ARCH_PTE_SPECIAL
1169static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
1170			 int write, struct page **pages, int *nr)
1171{
 
 
1172	pte_t *ptep, *ptem;
1173	int ret = 0;
1174
1175	ptem = ptep = pte_offset_map(&pmd, addr);
1176	do {
1177		/*
1178		 * In the line below we are assuming that the pte can be read
1179		 * atomically. If this is not the case for your architecture,
1180		 * please wrap this in a helper function!
1181		 *
1182		 * for an example see gup_get_pte in arch/x86/mm/gup.c
1183		 */
1184		pte_t pte = READ_ONCE(*ptep);
1185		struct page *head, *page;
1186
1187		/*
1188		 * Similar to the PMD case below, NUMA hinting must take slow
1189		 * path using the pte_protnone check.
1190		 */
1191		if (!pte_present(pte) || pte_special(pte) ||
1192			pte_protnone(pte) || (write && !pte_write(pte)))
1193			goto pte_unmap;
1194
1195		if (!arch_pte_access_permitted(pte, write))
 
 
 
 
 
 
 
 
 
1196			goto pte_unmap;
1197
1198		VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
1199		page = pte_page(pte);
1200		head = compound_head(page);
1201
1202		if (!page_cache_get_speculative(head))
1203			goto pte_unmap;
1204
1205		if (unlikely(pte_val(pte) != pte_val(*ptep))) {
1206			put_page(head);
1207			goto pte_unmap;
1208		}
1209
1210		VM_BUG_ON_PAGE(compound_head(page) != head, page);
 
 
1211		pages[*nr] = page;
1212		(*nr)++;
1213
1214	} while (ptep++, addr += PAGE_SIZE, addr != end);
1215
1216	ret = 1;
1217
1218pte_unmap:
 
 
1219	pte_unmap(ptem);
1220	return ret;
1221}
1222#else
1223
1224/*
1225 * If we can't determine whether or not a pte is special, then fail immediately
1226 * for ptes. Note, we can still pin HugeTLB and THP as these are guaranteed not
1227 * to be special.
1228 *
1229 * For a futex to be placed on a THP tail page, get_futex_key requires a
1230 * __get_user_pages_fast implementation that can pin pages. Thus it's still
1231 * useful to have gup_huge_pmd even if we can't operate on ptes.
1232 */
1233static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
1234			 int write, struct page **pages, int *nr)
1235{
1236	return 0;
1237}
1238#endif /* __HAVE_ARCH_PTE_SPECIAL */
1239
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1240static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
1241		unsigned long end, int write, struct page **pages, int *nr)
1242{
1243	struct page *head, *page;
1244	int refs;
1245
1246	if (write && !pmd_write(orig))
1247		return 0;
1248
 
 
 
1249	refs = 0;
1250	head = pmd_page(orig);
1251	page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
1252	do {
1253		VM_BUG_ON_PAGE(compound_head(page) != head, page);
1254		pages[*nr] = page;
1255		(*nr)++;
1256		page++;
1257		refs++;
1258	} while (addr += PAGE_SIZE, addr != end);
1259
 
1260	if (!page_cache_add_speculative(head, refs)) {
1261		*nr -= refs;
1262		return 0;
1263	}
1264
1265	if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
1266		*nr -= refs;
1267		while (refs--)
1268			put_page(head);
1269		return 0;
1270	}
1271
 
1272	return 1;
1273}
1274
1275static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
1276		unsigned long end, int write, struct page **pages, int *nr)
1277{
1278	struct page *head, *page;
1279	int refs;
1280
1281	if (write && !pud_write(orig))
1282		return 0;
1283
 
 
 
1284	refs = 0;
1285	head = pud_page(orig);
1286	page = head + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
1287	do {
1288		VM_BUG_ON_PAGE(compound_head(page) != head, page);
1289		pages[*nr] = page;
1290		(*nr)++;
1291		page++;
1292		refs++;
1293	} while (addr += PAGE_SIZE, addr != end);
1294
 
1295	if (!page_cache_add_speculative(head, refs)) {
1296		*nr -= refs;
1297		return 0;
1298	}
1299
1300	if (unlikely(pud_val(orig) != pud_val(*pudp))) {
1301		*nr -= refs;
1302		while (refs--)
1303			put_page(head);
1304		return 0;
1305	}
1306
 
1307	return 1;
1308}
1309
1310static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr,
1311			unsigned long end, int write,
1312			struct page **pages, int *nr)
1313{
1314	int refs;
1315	struct page *head, *page;
1316
1317	if (write && !pgd_write(orig))
1318		return 0;
1319
 
1320	refs = 0;
1321	head = pgd_page(orig);
1322	page = head + ((addr & ~PGDIR_MASK) >> PAGE_SHIFT);
1323	do {
1324		VM_BUG_ON_PAGE(compound_head(page) != head, page);
1325		pages[*nr] = page;
1326		(*nr)++;
1327		page++;
1328		refs++;
1329	} while (addr += PAGE_SIZE, addr != end);
1330
 
1331	if (!page_cache_add_speculative(head, refs)) {
1332		*nr -= refs;
1333		return 0;
1334	}
1335
1336	if (unlikely(pgd_val(orig) != pgd_val(*pgdp))) {
1337		*nr -= refs;
1338		while (refs--)
1339			put_page(head);
1340		return 0;
1341	}
1342
 
1343	return 1;
1344}
1345
1346static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
1347		int write, struct page **pages, int *nr)
1348{
1349	unsigned long next;
1350	pmd_t *pmdp;
1351
1352	pmdp = pmd_offset(&pud, addr);
1353	do {
1354		pmd_t pmd = READ_ONCE(*pmdp);
1355
1356		next = pmd_addr_end(addr, end);
1357		if (pmd_none(pmd))
1358			return 0;
1359
1360		if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd))) {
1361			/*
1362			 * NUMA hinting faults need to be handled in the GUP
1363			 * slowpath for accounting purposes and so that they
1364			 * can be serialised against THP migration.
1365			 */
1366			if (pmd_protnone(pmd))
1367				return 0;
1368
1369			if (!gup_huge_pmd(pmd, pmdp, addr, next, write,
1370				pages, nr))
1371				return 0;
1372
1373		} else if (unlikely(is_hugepd(__hugepd(pmd_val(pmd))))) {
1374			/*
1375			 * architecture have different format for hugetlbfs
1376			 * pmd format and THP pmd format
1377			 */
1378			if (!gup_huge_pd(__hugepd(pmd_val(pmd)), addr,
1379					 PMD_SHIFT, next, write, pages, nr))
1380				return 0;
1381		} else if (!gup_pte_range(pmd, addr, next, write, pages, nr))
1382				return 0;
1383	} while (pmdp++, addr = next, addr != end);
1384
1385	return 1;
1386}
1387
1388static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
1389			 int write, struct page **pages, int *nr)
1390{
1391	unsigned long next;
1392	pud_t *pudp;
1393
1394	pudp = pud_offset(&pgd, addr);
1395	do {
1396		pud_t pud = READ_ONCE(*pudp);
1397
1398		next = pud_addr_end(addr, end);
1399		if (pud_none(pud))
1400			return 0;
1401		if (unlikely(pud_huge(pud))) {
1402			if (!gup_huge_pud(pud, pudp, addr, next, write,
1403					  pages, nr))
1404				return 0;
1405		} else if (unlikely(is_hugepd(__hugepd(pud_val(pud))))) {
1406			if (!gup_huge_pd(__hugepd(pud_val(pud)), addr,
1407					 PUD_SHIFT, next, write, pages, nr))
1408				return 0;
1409		} else if (!gup_pmd_range(pud, addr, next, write, pages, nr))
1410			return 0;
1411	} while (pudp++, addr = next, addr != end);
1412
1413	return 1;
1414}
1415
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1416/*
1417 * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to
1418 * the regular GUP. It will only return non-negative values.
 
 
1419 */
1420int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
1421			  struct page **pages)
1422{
1423	struct mm_struct *mm = current->mm;
1424	unsigned long addr, len, end;
1425	unsigned long next, flags;
1426	pgd_t *pgdp;
1427	int nr = 0;
1428
1429	start &= PAGE_MASK;
1430	addr = start;
1431	len = (unsigned long) nr_pages << PAGE_SHIFT;
1432	end = start + len;
1433
1434	if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
1435					start, len)))
1436		return 0;
1437
1438	/*
1439	 * Disable interrupts.  We use the nested form as we can already have
1440	 * interrupts disabled by get_futex_key.
1441	 *
1442	 * With interrupts disabled, we block page table pages from being
1443	 * freed from under us. See mmu_gather_tlb in asm-generic/tlb.h
1444	 * for more details.
1445	 *
1446	 * We do not adopt an rcu_read_lock(.) here as we also want to
1447	 * block IPIs that come from THPs splitting.
1448	 */
1449
1450	local_irq_save(flags);
1451	pgdp = pgd_offset(mm, addr);
1452	do {
1453		pgd_t pgd = READ_ONCE(*pgdp);
1454
1455		next = pgd_addr_end(addr, end);
1456		if (pgd_none(pgd))
1457			break;
1458		if (unlikely(pgd_huge(pgd))) {
1459			if (!gup_huge_pgd(pgd, pgdp, addr, next, write,
1460					  pages, &nr))
1461				break;
1462		} else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) {
1463			if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr,
1464					 PGDIR_SHIFT, next, write, pages, &nr))
1465				break;
1466		} else if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
1467			break;
1468	} while (pgdp++, addr = next, addr != end);
1469	local_irq_restore(flags);
1470
1471	return nr;
1472}
1473
1474/**
1475 * get_user_pages_fast() - pin user pages in memory
1476 * @start:	starting user address
1477 * @nr_pages:	number of pages from start to pin
1478 * @write:	whether pages will be written to
1479 * @pages:	array that receives pointers to the pages pinned.
1480 *		Should be at least nr_pages long.
1481 *
1482 * Attempt to pin user pages in memory without taking mm->mmap_sem.
1483 * If not successful, it will fall back to taking the lock and
1484 * calling get_user_pages().
1485 *
1486 * Returns number of pages pinned. This may be fewer than the number
1487 * requested. If nr_pages is 0 or negative, returns 0. If no pages
1488 * were pinned, returns -errno.
1489 */
1490int get_user_pages_fast(unsigned long start, int nr_pages, int write,
1491			struct page **pages)
1492{
1493	int nr, ret;
 
1494
1495	start &= PAGE_MASK;
1496	nr = __get_user_pages_fast(start, nr_pages, write, pages);
1497	ret = nr;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1498
1499	if (nr < nr_pages) {
1500		/* Try to get the remaining pages with get_user_pages */
1501		start += nr << PAGE_SHIFT;
1502		pages += nr;
1503
1504		ret = get_user_pages_unlocked(start, nr_pages - nr, write, 0, pages);
 
1505
1506		/* Have to be a bit careful with return values */
1507		if (nr > 0) {
1508			if (ret < 0)
1509				ret = nr;
1510			else
1511				ret += nr;
1512		}
1513	}
1514
1515	return ret;
1516}
1517
1518#endif /* CONFIG_HAVE_GENERIC_RCU_GUP */
v4.17
   1#include <linux/kernel.h>
   2#include <linux/errno.h>
   3#include <linux/err.h>
   4#include <linux/spinlock.h>
   5
   6#include <linux/mm.h>
   7#include <linux/memremap.h>
   8#include <linux/pagemap.h>
   9#include <linux/rmap.h>
  10#include <linux/swap.h>
  11#include <linux/swapops.h>
  12
  13#include <linux/sched/signal.h>
  14#include <linux/rwsem.h>
  15#include <linux/hugetlb.h>
  16
  17#include <asm/mmu_context.h>
  18#include <asm/pgtable.h>
  19#include <asm/tlbflush.h>
  20
  21#include "internal.h"
  22
  23static struct page *no_page_table(struct vm_area_struct *vma,
  24		unsigned int flags)
  25{
  26	/*
  27	 * When core dumping an enormous anonymous area that nobody
  28	 * has touched so far, we don't want to allocate unnecessary pages or
  29	 * page tables.  Return error instead of NULL to skip handle_mm_fault,
  30	 * then get_dump_page() will return NULL to leave a hole in the dump.
  31	 * But we can only make this optimization where a hole would surely
  32	 * be zero-filled if handle_mm_fault() actually did handle it.
  33	 */
  34	if ((flags & FOLL_DUMP) && (!vma->vm_ops || !vma->vm_ops->fault))
  35		return ERR_PTR(-EFAULT);
  36	return NULL;
  37}
  38
  39static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
  40		pte_t *pte, unsigned int flags)
  41{
  42	/* No page to get reference */
  43	if (flags & FOLL_GET)
  44		return -EFAULT;
  45
  46	if (flags & FOLL_TOUCH) {
  47		pte_t entry = *pte;
  48
  49		if (flags & FOLL_WRITE)
  50			entry = pte_mkdirty(entry);
  51		entry = pte_mkyoung(entry);
  52
  53		if (!pte_same(*pte, entry)) {
  54			set_pte_at(vma->vm_mm, address, pte, entry);
  55			update_mmu_cache(vma, address, pte);
  56		}
  57	}
  58
  59	/* Proper page table entry exists, but no corresponding struct page */
  60	return -EEXIST;
  61}
  62
  63/*
  64 * FOLL_FORCE can write to even unwritable pte's, but only
  65 * after we've gone through a COW cycle and they are dirty.
  66 */
  67static inline bool can_follow_write_pte(pte_t pte, unsigned int flags)
  68{
  69	return pte_write(pte) ||
  70		((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte));
  71}
  72
  73static struct page *follow_page_pte(struct vm_area_struct *vma,
  74		unsigned long address, pmd_t *pmd, unsigned int flags)
  75{
  76	struct mm_struct *mm = vma->vm_mm;
  77	struct dev_pagemap *pgmap = NULL;
  78	struct page *page;
  79	spinlock_t *ptl;
  80	pte_t *ptep, pte;
  81
  82retry:
  83	if (unlikely(pmd_bad(*pmd)))
  84		return no_page_table(vma, flags);
  85
  86	ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
  87	pte = *ptep;
  88	if (!pte_present(pte)) {
  89		swp_entry_t entry;
  90		/*
  91		 * KSM's break_ksm() relies upon recognizing a ksm page
  92		 * even while it is being migrated, so for that case we
  93		 * need migration_entry_wait().
  94		 */
  95		if (likely(!(flags & FOLL_MIGRATION)))
  96			goto no_page;
  97		if (pte_none(pte))
  98			goto no_page;
  99		entry = pte_to_swp_entry(pte);
 100		if (!is_migration_entry(entry))
 101			goto no_page;
 102		pte_unmap_unlock(ptep, ptl);
 103		migration_entry_wait(mm, pmd, address);
 104		goto retry;
 105	}
 106	if ((flags & FOLL_NUMA) && pte_protnone(pte))
 107		goto no_page;
 108	if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags)) {
 109		pte_unmap_unlock(ptep, ptl);
 110		return NULL;
 111	}
 112
 113	page = vm_normal_page(vma, address, pte);
 114	if (!page && pte_devmap(pte) && (flags & FOLL_GET)) {
 115		/*
 116		 * Only return device mapping pages in the FOLL_GET case since
 117		 * they are only valid while holding the pgmap reference.
 118		 */
 119		pgmap = get_dev_pagemap(pte_pfn(pte), NULL);
 120		if (pgmap)
 121			page = pte_page(pte);
 122		else
 123			goto no_page;
 124	} else if (unlikely(!page)) {
 125		if (flags & FOLL_DUMP) {
 126			/* Avoid special (like zero) pages in core dumps */
 127			page = ERR_PTR(-EFAULT);
 128			goto out;
 129		}
 130
 131		if (is_zero_pfn(pte_pfn(pte))) {
 132			page = pte_page(pte);
 133		} else {
 134			int ret;
 135
 136			ret = follow_pfn_pte(vma, address, ptep, flags);
 137			page = ERR_PTR(ret);
 138			goto out;
 139		}
 140	}
 141
 142	if (flags & FOLL_SPLIT && PageTransCompound(page)) {
 143		int ret;
 144		get_page(page);
 145		pte_unmap_unlock(ptep, ptl);
 146		lock_page(page);
 147		ret = split_huge_page(page);
 148		unlock_page(page);
 149		put_page(page);
 150		if (ret)
 151			return ERR_PTR(ret);
 152		goto retry;
 153	}
 154
 155	if (flags & FOLL_GET) {
 156		get_page(page);
 157
 158		/* drop the pgmap reference now that we hold the page */
 159		if (pgmap) {
 160			put_dev_pagemap(pgmap);
 161			pgmap = NULL;
 162		}
 163	}
 164	if (flags & FOLL_TOUCH) {
 165		if ((flags & FOLL_WRITE) &&
 166		    !pte_dirty(pte) && !PageDirty(page))
 167			set_page_dirty(page);
 168		/*
 169		 * pte_mkyoung() would be more correct here, but atomic care
 170		 * is needed to avoid losing the dirty bit: it is easier to use
 171		 * mark_page_accessed().
 172		 */
 173		mark_page_accessed(page);
 174	}
 175	if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
 176		/* Do not mlock pte-mapped THP */
 177		if (PageTransCompound(page))
 178			goto out;
 179
 180		/*
 181		 * The preliminary mapping check is mainly to avoid the
 182		 * pointless overhead of lock_page on the ZERO_PAGE
 183		 * which might bounce very badly if there is contention.
 184		 *
 185		 * If the page is already locked, we don't need to
 186		 * handle it now - vmscan will handle it later if and
 187		 * when it attempts to reclaim the page.
 188		 */
 189		if (page->mapping && trylock_page(page)) {
 190			lru_add_drain();  /* push cached pages to LRU */
 191			/*
 192			 * Because we lock page here, and migration is
 193			 * blocked by the pte's page reference, and we
 194			 * know the page is still mapped, we don't even
 195			 * need to check for file-cache page truncation.
 196			 */
 197			mlock_vma_page(page);
 198			unlock_page(page);
 199		}
 200	}
 201out:
 202	pte_unmap_unlock(ptep, ptl);
 203	return page;
 204no_page:
 205	pte_unmap_unlock(ptep, ptl);
 206	if (!pte_none(pte))
 207		return NULL;
 208	return no_page_table(vma, flags);
 209}
 210
 211static struct page *follow_pmd_mask(struct vm_area_struct *vma,
 212				    unsigned long address, pud_t *pudp,
 213				    unsigned int flags, unsigned int *page_mask)
 
 
 
 
 
 
 
 
 
 
 
 
 
 214{
 
 
 215	pmd_t *pmd;
 216	spinlock_t *ptl;
 217	struct page *page;
 218	struct mm_struct *mm = vma->vm_mm;
 219
 220	pmd = pmd_offset(pudp, address);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 221	if (pmd_none(*pmd))
 222		return no_page_table(vma, flags);
 223	if (pmd_huge(*pmd) && vma->vm_flags & VM_HUGETLB) {
 224		page = follow_huge_pmd(mm, address, pmd, flags);
 225		if (page)
 226			return page;
 227		return no_page_table(vma, flags);
 228	}
 229	if (is_hugepd(__hugepd(pmd_val(*pmd)))) {
 230		page = follow_huge_pd(vma, address,
 231				      __hugepd(pmd_val(*pmd)), flags,
 232				      PMD_SHIFT);
 233		if (page)
 234			return page;
 235		return no_page_table(vma, flags);
 236	}
 237retry:
 238	if (!pmd_present(*pmd)) {
 239		if (likely(!(flags & FOLL_MIGRATION)))
 240			return no_page_table(vma, flags);
 241		VM_BUG_ON(thp_migration_supported() &&
 242				  !is_pmd_migration_entry(*pmd));
 243		if (is_pmd_migration_entry(*pmd))
 244			pmd_migration_entry_wait(mm, pmd);
 245		goto retry;
 246	}
 247	if (pmd_devmap(*pmd)) {
 248		ptl = pmd_lock(mm, pmd);
 249		page = follow_devmap_pmd(vma, address, pmd, flags);
 250		spin_unlock(ptl);
 251		if (page)
 252			return page;
 253	}
 254	if (likely(!pmd_trans_huge(*pmd)))
 255		return follow_page_pte(vma, address, pmd, flags);
 256
 257	if ((flags & FOLL_NUMA) && pmd_protnone(*pmd))
 258		return no_page_table(vma, flags);
 259
 260retry_locked:
 261	ptl = pmd_lock(mm, pmd);
 262	if (unlikely(!pmd_present(*pmd))) {
 263		spin_unlock(ptl);
 264		if (likely(!(flags & FOLL_MIGRATION)))
 265			return no_page_table(vma, flags);
 266		pmd_migration_entry_wait(mm, pmd);
 267		goto retry_locked;
 268	}
 269	if (unlikely(!pmd_trans_huge(*pmd))) {
 270		spin_unlock(ptl);
 271		return follow_page_pte(vma, address, pmd, flags);
 272	}
 273	if (flags & FOLL_SPLIT) {
 274		int ret;
 275		page = pmd_page(*pmd);
 276		if (is_huge_zero_page(page)) {
 277			spin_unlock(ptl);
 278			ret = 0;
 279			split_huge_pmd(vma, pmd, address);
 280			if (pmd_trans_unstable(pmd))
 281				ret = -EBUSY;
 282		} else {
 283			get_page(page);
 284			spin_unlock(ptl);
 285			lock_page(page);
 286			ret = split_huge_page(page);
 287			unlock_page(page);
 288			put_page(page);
 289			if (pmd_none(*pmd))
 290				return no_page_table(vma, flags);
 291		}
 292
 293		return ret ? ERR_PTR(ret) :
 294			follow_page_pte(vma, address, pmd, flags);
 295	}
 
 296	page = follow_trans_huge_pmd(vma, address, pmd, flags);
 297	spin_unlock(ptl);
 298	*page_mask = HPAGE_PMD_NR - 1;
 299	return page;
 300}
 301
 302
 303static struct page *follow_pud_mask(struct vm_area_struct *vma,
 304				    unsigned long address, p4d_t *p4dp,
 305				    unsigned int flags, unsigned int *page_mask)
 306{
 307	pud_t *pud;
 308	spinlock_t *ptl;
 309	struct page *page;
 310	struct mm_struct *mm = vma->vm_mm;
 311
 312	pud = pud_offset(p4dp, address);
 313	if (pud_none(*pud))
 314		return no_page_table(vma, flags);
 315	if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) {
 316		page = follow_huge_pud(mm, address, pud, flags);
 317		if (page)
 318			return page;
 319		return no_page_table(vma, flags);
 320	}
 321	if (is_hugepd(__hugepd(pud_val(*pud)))) {
 322		page = follow_huge_pd(vma, address,
 323				      __hugepd(pud_val(*pud)), flags,
 324				      PUD_SHIFT);
 325		if (page)
 326			return page;
 327		return no_page_table(vma, flags);
 328	}
 329	if (pud_devmap(*pud)) {
 330		ptl = pud_lock(mm, pud);
 331		page = follow_devmap_pud(vma, address, pud, flags);
 332		spin_unlock(ptl);
 333		if (page)
 334			return page;
 335	}
 336	if (unlikely(pud_bad(*pud)))
 337		return no_page_table(vma, flags);
 338
 339	return follow_pmd_mask(vma, address, pud, flags, page_mask);
 340}
 341
 342
 343static struct page *follow_p4d_mask(struct vm_area_struct *vma,
 344				    unsigned long address, pgd_t *pgdp,
 345				    unsigned int flags, unsigned int *page_mask)
 346{
 347	p4d_t *p4d;
 348	struct page *page;
 349
 350	p4d = p4d_offset(pgdp, address);
 351	if (p4d_none(*p4d))
 352		return no_page_table(vma, flags);
 353	BUILD_BUG_ON(p4d_huge(*p4d));
 354	if (unlikely(p4d_bad(*p4d)))
 355		return no_page_table(vma, flags);
 356
 357	if (is_hugepd(__hugepd(p4d_val(*p4d)))) {
 358		page = follow_huge_pd(vma, address,
 359				      __hugepd(p4d_val(*p4d)), flags,
 360				      P4D_SHIFT);
 361		if (page)
 362			return page;
 363		return no_page_table(vma, flags);
 364	}
 365	return follow_pud_mask(vma, address, p4d, flags, page_mask);
 366}
 367
 368/**
 369 * follow_page_mask - look up a page descriptor from a user-virtual address
 370 * @vma: vm_area_struct mapping @address
 371 * @address: virtual address to look up
 372 * @flags: flags modifying lookup behaviour
 373 * @page_mask: on output, *page_mask is set according to the size of the page
 374 *
 375 * @flags can have FOLL_ flags set, defined in <linux/mm.h>
 376 *
 377 * Returns the mapped (struct page *), %NULL if no mapping exists, or
 378 * an error pointer if there is a mapping to something not represented
 379 * by a page descriptor (see also vm_normal_page()).
 380 */
 381struct page *follow_page_mask(struct vm_area_struct *vma,
 382			      unsigned long address, unsigned int flags,
 383			      unsigned int *page_mask)
 384{
 385	pgd_t *pgd;
 386	struct page *page;
 387	struct mm_struct *mm = vma->vm_mm;
 388
 389	*page_mask = 0;
 390
 391	/* make this handle hugepd */
 392	page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
 393	if (!IS_ERR(page)) {
 394		BUG_ON(flags & FOLL_GET);
 395		return page;
 396	}
 397
 398	pgd = pgd_offset(mm, address);
 399
 400	if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
 401		return no_page_table(vma, flags);
 402
 403	if (pgd_huge(*pgd)) {
 404		page = follow_huge_pgd(mm, address, pgd, flags);
 405		if (page)
 406			return page;
 407		return no_page_table(vma, flags);
 408	}
 409	if (is_hugepd(__hugepd(pgd_val(*pgd)))) {
 410		page = follow_huge_pd(vma, address,
 411				      __hugepd(pgd_val(*pgd)), flags,
 412				      PGDIR_SHIFT);
 413		if (page)
 414			return page;
 415		return no_page_table(vma, flags);
 416	}
 417
 418	return follow_p4d_mask(vma, address, pgd, flags, page_mask);
 419}
 420
 421static int get_gate_page(struct mm_struct *mm, unsigned long address,
 422		unsigned int gup_flags, struct vm_area_struct **vma,
 423		struct page **page)
 424{
 425	pgd_t *pgd;
 426	p4d_t *p4d;
 427	pud_t *pud;
 428	pmd_t *pmd;
 429	pte_t *pte;
 430	int ret = -EFAULT;
 431
 432	/* user gate pages are read-only */
 433	if (gup_flags & FOLL_WRITE)
 434		return -EFAULT;
 435	if (address > TASK_SIZE)
 436		pgd = pgd_offset_k(address);
 437	else
 438		pgd = pgd_offset_gate(mm, address);
 439	BUG_ON(pgd_none(*pgd));
 440	p4d = p4d_offset(pgd, address);
 441	BUG_ON(p4d_none(*p4d));
 442	pud = pud_offset(p4d, address);
 443	BUG_ON(pud_none(*pud));
 444	pmd = pmd_offset(pud, address);
 445	if (!pmd_present(*pmd))
 446		return -EFAULT;
 447	VM_BUG_ON(pmd_trans_huge(*pmd));
 448	pte = pte_offset_map(pmd, address);
 449	if (pte_none(*pte))
 450		goto unmap;
 451	*vma = get_gate_vma(mm);
 452	if (!page)
 453		goto out;
 454	*page = vm_normal_page(*vma, address, *pte);
 455	if (!*page) {
 456		if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(*pte)))
 457			goto unmap;
 458		*page = pte_page(*pte);
 459
 460		/*
 461		 * This should never happen (a device public page in the gate
 462		 * area).
 463		 */
 464		if (is_device_public_page(*page))
 465			goto unmap;
 466	}
 467	get_page(*page);
 468out:
 469	ret = 0;
 470unmap:
 471	pte_unmap(pte);
 472	return ret;
 473}
 474
 475/*
 476 * mmap_sem must be held on entry.  If @nonblocking != NULL and
 477 * *@flags does not include FOLL_NOWAIT, the mmap_sem may be released.
 478 * If it is, *@nonblocking will be set to 0 and -EBUSY returned.
 479 */
 480static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
 481		unsigned long address, unsigned int *flags, int *nonblocking)
 482{
 
 483	unsigned int fault_flags = 0;
 484	int ret;
 485
 486	/* mlock all present pages, but do not fault in new pages */
 487	if ((*flags & (FOLL_POPULATE | FOLL_MLOCK)) == FOLL_MLOCK)
 488		return -ENOENT;
 
 
 
 
 
 489	if (*flags & FOLL_WRITE)
 490		fault_flags |= FAULT_FLAG_WRITE;
 491	if (*flags & FOLL_REMOTE)
 492		fault_flags |= FAULT_FLAG_REMOTE;
 493	if (nonblocking)
 494		fault_flags |= FAULT_FLAG_ALLOW_RETRY;
 495	if (*flags & FOLL_NOWAIT)
 496		fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT;
 497	if (*flags & FOLL_TRIED) {
 498		VM_WARN_ON_ONCE(fault_flags & FAULT_FLAG_ALLOW_RETRY);
 499		fault_flags |= FAULT_FLAG_TRIED;
 500	}
 501
 502	ret = handle_mm_fault(vma, address, fault_flags);
 503	if (ret & VM_FAULT_ERROR) {
 504		int err = vm_fault_to_errno(ret, *flags);
 505
 506		if (err)
 507			return err;
 
 
 508		BUG();
 509	}
 510
 511	if (tsk) {
 512		if (ret & VM_FAULT_MAJOR)
 513			tsk->maj_flt++;
 514		else
 515			tsk->min_flt++;
 516	}
 517
 518	if (ret & VM_FAULT_RETRY) {
 519		if (nonblocking && !(fault_flags & FAULT_FLAG_RETRY_NOWAIT))
 520			*nonblocking = 0;
 521		return -EBUSY;
 522	}
 523
 524	/*
 525	 * The VM_FAULT_WRITE bit tells us that do_wp_page has broken COW when
 526	 * necessary, even if maybe_mkwrite decided not to set pte_write. We
 527	 * can thus safely do subsequent page lookups as if they were reads.
 528	 * But only do so when looping for pte_write is futile: in some cases
 529	 * userspace may also be wanting to write to the gotten user page,
 530	 * which a read fault here might prevent (a readonly page might get
 531	 * reCOWed by userspace write).
 532	 */
 533	if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE))
 534		*flags |= FOLL_COW;
 535	return 0;
 536}
 537
 538static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
 539{
 540	vm_flags_t vm_flags = vma->vm_flags;
 541	int write = (gup_flags & FOLL_WRITE);
 542	int foreign = (gup_flags & FOLL_REMOTE);
 543
 544	if (vm_flags & (VM_IO | VM_PFNMAP))
 545		return -EFAULT;
 546
 547	if (gup_flags & FOLL_ANON && !vma_is_anonymous(vma))
 548		return -EFAULT;
 549
 550	if (write) {
 551		if (!(vm_flags & VM_WRITE)) {
 552			if (!(gup_flags & FOLL_FORCE))
 553				return -EFAULT;
 554			/*
 555			 * We used to let the write,force case do COW in a
 556			 * VM_MAYWRITE VM_SHARED !VM_WRITE vma, so ptrace could
 557			 * set a breakpoint in a read-only mapping of an
 558			 * executable, without corrupting the file (yet only
 559			 * when that file had been opened for writing!).
 560			 * Anon pages in shared mappings are surprising: now
 561			 * just reject it.
 562			 */
 563			if (!is_cow_mapping(vm_flags))
 564				return -EFAULT;
 565		}
 566	} else if (!(vm_flags & VM_READ)) {
 567		if (!(gup_flags & FOLL_FORCE))
 568			return -EFAULT;
 569		/*
 570		 * Is there actually any vma we can reach here which does not
 571		 * have VM_MAYREAD set?
 572		 */
 573		if (!(vm_flags & VM_MAYREAD))
 574			return -EFAULT;
 575	}
 576	/*
 577	 * gups are always data accesses, not instruction
 578	 * fetches, so execute=false here
 579	 */
 580	if (!arch_vma_access_permitted(vma, write, false, foreign))
 581		return -EFAULT;
 582	return 0;
 583}
 584
 585/**
 586 * __get_user_pages() - pin user pages in memory
 587 * @tsk:	task_struct of target task
 588 * @mm:		mm_struct of target mm
 589 * @start:	starting user address
 590 * @nr_pages:	number of pages from start to pin
 591 * @gup_flags:	flags modifying pin behaviour
 592 * @pages:	array that receives pointers to the pages pinned.
 593 *		Should be at least nr_pages long. Or NULL, if caller
 594 *		only intends to ensure the pages are faulted in.
 595 * @vmas:	array of pointers to vmas corresponding to each page.
 596 *		Or NULL if the caller does not require them.
 597 * @nonblocking: whether waiting for disk IO or mmap_sem contention
 598 *
 599 * Returns number of pages pinned. This may be fewer than the number
 600 * requested. If nr_pages is 0 or negative, returns 0. If no pages
 601 * were pinned, returns -errno. Each page returned must be released
 602 * with a put_page() call when it is finished with. vmas will only
 603 * remain valid while mmap_sem is held.
 604 *
 605 * Must be called with mmap_sem held.  It may be released.  See below.
 606 *
 607 * __get_user_pages walks a process's page tables and takes a reference to
 608 * each struct page that each user address corresponds to at a given
 609 * instant. That is, it takes the page that would be accessed if a user
 610 * thread accesses the given user virtual address at that instant.
 611 *
 612 * This does not guarantee that the page exists in the user mappings when
 613 * __get_user_pages returns, and there may even be a completely different
 614 * page there in some cases (eg. if mmapped pagecache has been invalidated
 615 * and subsequently re faulted). However it does guarantee that the page
 616 * won't be freed completely. And mostly callers simply care that the page
 617 * contains data that was valid *at some point in time*. Typically, an IO
 618 * or similar operation cannot guarantee anything stronger anyway because
 619 * locks can't be held over the syscall boundary.
 620 *
 621 * If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If
 622 * the page is written to, set_page_dirty (or set_page_dirty_lock, as
 623 * appropriate) must be called after the page is finished with, and
 624 * before put_page is called.
 625 *
 626 * If @nonblocking != NULL, __get_user_pages will not wait for disk IO
 627 * or mmap_sem contention, and if waiting is needed to pin all pages,
 628 * *@nonblocking will be set to 0.  Further, if @gup_flags does not
 629 * include FOLL_NOWAIT, the mmap_sem will be released via up_read() in
 630 * this case.
 631 *
 632 * A caller using such a combination of @nonblocking and @gup_flags
 633 * must therefore hold the mmap_sem for reading only, and recognize
 634 * when it's been released.  Otherwise, it must be held for either
 635 * reading or writing and will not be released.
 636 *
 637 * In most cases, get_user_pages or get_user_pages_fast should be used
 638 * instead of __get_user_pages. __get_user_pages should be used only if
 639 * you need some special @gup_flags.
 640 */
 641static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
 642		unsigned long start, unsigned long nr_pages,
 643		unsigned int gup_flags, struct page **pages,
 644		struct vm_area_struct **vmas, int *nonblocking)
 645{
 646	long i = 0;
 647	unsigned int page_mask;
 648	struct vm_area_struct *vma = NULL;
 649
 650	if (!nr_pages)
 651		return 0;
 652
 653	VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET));
 654
 655	/*
 656	 * If FOLL_FORCE is set then do not force a full fault as the hinting
 657	 * fault information is unrelated to the reference behaviour of a task
 658	 * using the address space
 659	 */
 660	if (!(gup_flags & FOLL_FORCE))
 661		gup_flags |= FOLL_NUMA;
 662
 663	do {
 664		struct page *page;
 665		unsigned int foll_flags = gup_flags;
 666		unsigned int page_increm;
 667
 668		/* first iteration or cross vma bound */
 669		if (!vma || start >= vma->vm_end) {
 670			vma = find_extend_vma(mm, start);
 671			if (!vma && in_gate_area(mm, start)) {
 672				int ret;
 673				ret = get_gate_page(mm, start & PAGE_MASK,
 674						gup_flags, &vma,
 675						pages ? &pages[i] : NULL);
 676				if (ret)
 677					return i ? : ret;
 678				page_mask = 0;
 679				goto next_page;
 680			}
 681
 682			if (!vma || check_vma_flags(vma, gup_flags))
 683				return i ? : -EFAULT;
 684			if (is_vm_hugetlb_page(vma)) {
 685				i = follow_hugetlb_page(mm, vma, pages, vmas,
 686						&start, &nr_pages, i,
 687						gup_flags, nonblocking);
 688				continue;
 689			}
 690		}
 691retry:
 692		/*
 693		 * If we have a pending SIGKILL, don't keep faulting pages and
 694		 * potentially allocating memory.
 695		 */
 696		if (unlikely(fatal_signal_pending(current)))
 697			return i ? i : -ERESTARTSYS;
 698		cond_resched();
 699		page = follow_page_mask(vma, start, foll_flags, &page_mask);
 700		if (!page) {
 701			int ret;
 702			ret = faultin_page(tsk, vma, start, &foll_flags,
 703					nonblocking);
 704			switch (ret) {
 705			case 0:
 706				goto retry;
 707			case -EFAULT:
 708			case -ENOMEM:
 709			case -EHWPOISON:
 710				return i ? i : ret;
 711			case -EBUSY:
 712				return i;
 713			case -ENOENT:
 714				goto next_page;
 715			}
 716			BUG();
 717		} else if (PTR_ERR(page) == -EEXIST) {
 718			/*
 719			 * Proper page table entry exists, but no corresponding
 720			 * struct page.
 721			 */
 722			goto next_page;
 723		} else if (IS_ERR(page)) {
 724			return i ? i : PTR_ERR(page);
 725		}
 726		if (pages) {
 727			pages[i] = page;
 728			flush_anon_page(vma, page, start);
 729			flush_dcache_page(page);
 730			page_mask = 0;
 731		}
 732next_page:
 733		if (vmas) {
 734			vmas[i] = vma;
 735			page_mask = 0;
 736		}
 737		page_increm = 1 + (~(start >> PAGE_SHIFT) & page_mask);
 738		if (page_increm > nr_pages)
 739			page_increm = nr_pages;
 740		i += page_increm;
 741		start += page_increm * PAGE_SIZE;
 742		nr_pages -= page_increm;
 743	} while (nr_pages);
 744	return i;
 745}
 
 746
 747static bool vma_permits_fault(struct vm_area_struct *vma,
 748			      unsigned int fault_flags)
 749{
 750	bool write   = !!(fault_flags & FAULT_FLAG_WRITE);
 751	bool foreign = !!(fault_flags & FAULT_FLAG_REMOTE);
 752	vm_flags_t vm_flags = write ? VM_WRITE : VM_READ;
 753
 754	if (!(vm_flags & vma->vm_flags))
 755		return false;
 756
 757	/*
 758	 * The architecture might have a hardware protection
 759	 * mechanism other than read/write that can deny access.
 760	 *
 761	 * gup always represents data access, not instruction
 762	 * fetches, so execute=false here:
 763	 */
 764	if (!arch_vma_access_permitted(vma, write, false, foreign))
 765		return false;
 766
 767	return true;
 768}
 769
 770/*
 771 * fixup_user_fault() - manually resolve a user page fault
 772 * @tsk:	the task_struct to use for page fault accounting, or
 773 *		NULL if faults are not to be recorded.
 774 * @mm:		mm_struct of target mm
 775 * @address:	user address
 776 * @fault_flags:flags to pass down to handle_mm_fault()
 777 * @unlocked:	did we unlock the mmap_sem while retrying, maybe NULL if caller
 778 *		does not allow retry
 779 *
 780 * This is meant to be called in the specific scenario where for locking reasons
 781 * we try to access user memory in atomic context (within a pagefault_disable()
 782 * section), this returns -EFAULT, and we want to resolve the user fault before
 783 * trying again.
 784 *
 785 * Typically this is meant to be used by the futex code.
 786 *
 787 * The main difference with get_user_pages() is that this function will
 788 * unconditionally call handle_mm_fault() which will in turn perform all the
 789 * necessary SW fixup of the dirty and young bits in the PTE, while
 790 * get_user_pages() only guarantees to update these in the struct page.
 791 *
 792 * This is important for some architectures where those bits also gate the
 793 * access permission to the page because they are maintained in software.  On
 794 * such architectures, gup() will not be enough to make a subsequent access
 795 * succeed.
 796 *
 797 * This function will not return with an unlocked mmap_sem. So it has not the
 798 * same semantics wrt the @mm->mmap_sem as does filemap_fault().
 799 */
 800int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
 801		     unsigned long address, unsigned int fault_flags,
 802		     bool *unlocked)
 803{
 804	struct vm_area_struct *vma;
 805	int ret, major = 0;
 806
 807	if (unlocked)
 808		fault_flags |= FAULT_FLAG_ALLOW_RETRY;
 809
 810retry:
 811	vma = find_extend_vma(mm, address);
 812	if (!vma || address < vma->vm_start)
 813		return -EFAULT;
 814
 815	if (!vma_permits_fault(vma, fault_flags))
 816		return -EFAULT;
 817
 818	ret = handle_mm_fault(vma, address, fault_flags);
 819	major |= ret & VM_FAULT_MAJOR;
 820	if (ret & VM_FAULT_ERROR) {
 821		int err = vm_fault_to_errno(ret, 0);
 822
 823		if (err)
 824			return err;
 
 
 825		BUG();
 826	}
 827
 828	if (ret & VM_FAULT_RETRY) {
 829		down_read(&mm->mmap_sem);
 830		if (!(fault_flags & FAULT_FLAG_TRIED)) {
 831			*unlocked = true;
 832			fault_flags &= ~FAULT_FLAG_ALLOW_RETRY;
 833			fault_flags |= FAULT_FLAG_TRIED;
 834			goto retry;
 835		}
 836	}
 837
 838	if (tsk) {
 839		if (major)
 840			tsk->maj_flt++;
 841		else
 842			tsk->min_flt++;
 843	}
 844	return 0;
 845}
 846EXPORT_SYMBOL_GPL(fixup_user_fault);
 847
 848static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
 849						struct mm_struct *mm,
 850						unsigned long start,
 851						unsigned long nr_pages,
 
 852						struct page **pages,
 853						struct vm_area_struct **vmas,
 854						int *locked,
 855						unsigned int flags)
 856{
 857	long ret, pages_done;
 858	bool lock_dropped;
 859
 860	if (locked) {
 861		/* if VM_FAULT_RETRY can be returned, vmas become invalid */
 862		BUG_ON(vmas);
 863		/* check caller initialized locked */
 864		BUG_ON(*locked != 1);
 865	}
 866
 867	if (pages)
 868		flags |= FOLL_GET;
 
 
 
 
 869
 870	pages_done = 0;
 871	lock_dropped = false;
 872	for (;;) {
 873		ret = __get_user_pages(tsk, mm, start, nr_pages, flags, pages,
 874				       vmas, locked);
 875		if (!locked)
 876			/* VM_FAULT_RETRY couldn't trigger, bypass */
 877			return ret;
 878
 879		/* VM_FAULT_RETRY cannot return errors */
 880		if (!*locked) {
 881			BUG_ON(ret < 0);
 882			BUG_ON(ret >= nr_pages);
 883		}
 884
 885		if (!pages)
 886			/* If it's a prefault don't insist harder */
 887			return ret;
 888
 889		if (ret > 0) {
 890			nr_pages -= ret;
 891			pages_done += ret;
 892			if (!nr_pages)
 893				break;
 894		}
 895		if (*locked) {
 896			/*
 897			 * VM_FAULT_RETRY didn't trigger or it was a
 898			 * FOLL_NOWAIT.
 899			 */
 900			if (!pages_done)
 901				pages_done = ret;
 902			break;
 903		}
 904		/* VM_FAULT_RETRY triggered, so seek to the faulting offset */
 905		pages += ret;
 906		start += ret << PAGE_SHIFT;
 907
 908		/*
 909		 * Repeat on the address that fired VM_FAULT_RETRY
 910		 * without FAULT_FLAG_ALLOW_RETRY but with
 911		 * FAULT_FLAG_TRIED.
 912		 */
 913		*locked = 1;
 914		lock_dropped = true;
 915		down_read(&mm->mmap_sem);
 916		ret = __get_user_pages(tsk, mm, start, 1, flags | FOLL_TRIED,
 917				       pages, NULL, NULL);
 918		if (ret != 1) {
 919			BUG_ON(ret > 1);
 920			if (!pages_done)
 921				pages_done = ret;
 922			break;
 923		}
 924		nr_pages--;
 925		pages_done++;
 926		if (!nr_pages)
 927			break;
 928		pages++;
 929		start += PAGE_SIZE;
 930	}
 931	if (lock_dropped && *locked) {
 932		/*
 933		 * We must let the caller know we temporarily dropped the lock
 934		 * and so the critical section protected by it was lost.
 935		 */
 936		up_read(&mm->mmap_sem);
 937		*locked = 0;
 938	}
 939	return pages_done;
 940}
 941
 942/*
 943 * We can leverage the VM_FAULT_RETRY functionality in the page fault
 944 * paths better by using either get_user_pages_locked() or
 945 * get_user_pages_unlocked().
 946 *
 947 * get_user_pages_locked() is suitable to replace the form:
 948 *
 949 *      down_read(&mm->mmap_sem);
 950 *      do_something()
 951 *      get_user_pages(tsk, mm, ..., pages, NULL);
 952 *      up_read(&mm->mmap_sem);
 953 *
 954 *  to:
 955 *
 956 *      int locked = 1;
 957 *      down_read(&mm->mmap_sem);
 958 *      do_something()
 959 *      get_user_pages_locked(tsk, mm, ..., pages, &locked);
 960 *      if (locked)
 961 *          up_read(&mm->mmap_sem);
 962 */
 963long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
 964			   unsigned int gup_flags, struct page **pages,
 965			   int *locked)
 966{
 967	return __get_user_pages_locked(current, current->mm, start, nr_pages,
 968				       pages, NULL, locked,
 969				       gup_flags | FOLL_TOUCH);
 970}
 971EXPORT_SYMBOL(get_user_pages_locked);
 972
 973/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 974 * get_user_pages_unlocked() is suitable to replace the form:
 975 *
 976 *      down_read(&mm->mmap_sem);
 977 *      get_user_pages(tsk, mm, ..., pages, NULL);
 978 *      up_read(&mm->mmap_sem);
 979 *
 980 *  with:
 981 *
 982 *      get_user_pages_unlocked(tsk, mm, ..., pages);
 983 *
 984 * It is functionally equivalent to get_user_pages_fast so
 985 * get_user_pages_fast should be used instead if specific gup_flags
 986 * (e.g. FOLL_FORCE) are not required.
 
 
 987 */
 988long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
 989			     struct page **pages, unsigned int gup_flags)
 990{
 991	struct mm_struct *mm = current->mm;
 992	int locked = 1;
 993	long ret;
 994
 995	down_read(&mm->mmap_sem);
 996	ret = __get_user_pages_locked(current, mm, start, nr_pages, pages, NULL,
 997				      &locked, gup_flags | FOLL_TOUCH);
 998	if (locked)
 999		up_read(&mm->mmap_sem);
1000	return ret;
1001}
1002EXPORT_SYMBOL(get_user_pages_unlocked);
1003
1004/*
1005 * get_user_pages_remote() - pin user pages in memory
1006 * @tsk:	the task_struct to use for page fault accounting, or
1007 *		NULL if faults are not to be recorded.
1008 * @mm:		mm_struct of target mm
1009 * @start:	starting user address
1010 * @nr_pages:	number of pages from start to pin
1011 * @gup_flags:	flags modifying lookup behaviour
 
 
1012 * @pages:	array that receives pointers to the pages pinned.
1013 *		Should be at least nr_pages long. Or NULL, if caller
1014 *		only intends to ensure the pages are faulted in.
1015 * @vmas:	array of pointers to vmas corresponding to each page.
1016 *		Or NULL if the caller does not require them.
1017 * @locked:	pointer to lock flag indicating whether lock is held and
1018 *		subsequently whether VM_FAULT_RETRY functionality can be
1019 *		utilised. Lock must initially be held.
1020 *
1021 * Returns number of pages pinned. This may be fewer than the number
1022 * requested. If nr_pages is 0 or negative, returns 0. If no pages
1023 * were pinned, returns -errno. Each page returned must be released
1024 * with a put_page() call when it is finished with. vmas will only
1025 * remain valid while mmap_sem is held.
1026 *
1027 * Must be called with mmap_sem held for read or write.
1028 *
1029 * get_user_pages walks a process's page tables and takes a reference to
1030 * each struct page that each user address corresponds to at a given
1031 * instant. That is, it takes the page that would be accessed if a user
1032 * thread accesses the given user virtual address at that instant.
1033 *
1034 * This does not guarantee that the page exists in the user mappings when
1035 * get_user_pages returns, and there may even be a completely different
1036 * page there in some cases (eg. if mmapped pagecache has been invalidated
1037 * and subsequently re faulted). However it does guarantee that the page
1038 * won't be freed completely. And mostly callers simply care that the page
1039 * contains data that was valid *at some point in time*. Typically, an IO
1040 * or similar operation cannot guarantee anything stronger anyway because
1041 * locks can't be held over the syscall boundary.
1042 *
1043 * If gup_flags & FOLL_WRITE == 0, the page must not be written to. If the page
1044 * is written to, set_page_dirty (or set_page_dirty_lock, as appropriate) must
1045 * be called after the page is finished with, and before put_page is called.
1046 *
1047 * get_user_pages is typically used for fewer-copy IO operations, to get a
1048 * handle on the memory by some means other than accesses via the user virtual
1049 * addresses. The pages may be submitted for DMA to devices or accessed via
1050 * their kernel linear mapping (via the kmap APIs). Care should be taken to
1051 * use the correct cache flushing APIs.
1052 *
1053 * See also get_user_pages_fast, for performance critical applications.
1054 *
1055 * get_user_pages should be phased out in favor of
1056 * get_user_pages_locked|unlocked or get_user_pages_fast. Nothing
1057 * should use get_user_pages because it cannot pass
1058 * FAULT_FLAG_ALLOW_RETRY to handle_mm_fault.
1059 */
1060long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
1061		unsigned long start, unsigned long nr_pages,
1062		unsigned int gup_flags, struct page **pages,
1063		struct vm_area_struct **vmas, int *locked)
1064{
1065	return __get_user_pages_locked(tsk, mm, start, nr_pages, pages, vmas,
1066				       locked,
1067				       gup_flags | FOLL_TOUCH | FOLL_REMOTE);
1068}
1069EXPORT_SYMBOL(get_user_pages_remote);
1070
1071/*
1072 * This is the same as get_user_pages_remote(), just with a
1073 * less-flexible calling convention where we assume that the task
1074 * and mm being operated on are the current task's and don't allow
1075 * passing of a locked parameter.  We also obviously don't pass
1076 * FOLL_REMOTE in here.
1077 */
1078long get_user_pages(unsigned long start, unsigned long nr_pages,
1079		unsigned int gup_flags, struct page **pages,
1080		struct vm_area_struct **vmas)
1081{
1082	return __get_user_pages_locked(current, current->mm, start, nr_pages,
1083				       pages, vmas, NULL,
1084				       gup_flags | FOLL_TOUCH);
1085}
1086EXPORT_SYMBOL(get_user_pages);
1087
1088#ifdef CONFIG_FS_DAX
1089/*
1090 * This is the same as get_user_pages() in that it assumes we are
1091 * operating on the current task's mm, but it goes further to validate
1092 * that the vmas associated with the address range are suitable for
1093 * longterm elevated page reference counts. For example, filesystem-dax
1094 * mappings are subject to the lifetime enforced by the filesystem and
1095 * we need guarantees that longterm users like RDMA and V4L2 only
1096 * establish mappings that have a kernel enforced revocation mechanism.
1097 *
1098 * "longterm" == userspace controlled elevated page count lifetime.
1099 * Contrast this to iov_iter_get_pages() usages which are transient.
1100 */
1101long get_user_pages_longterm(unsigned long start, unsigned long nr_pages,
1102		unsigned int gup_flags, struct page **pages,
1103		struct vm_area_struct **vmas_arg)
1104{
1105	struct vm_area_struct **vmas = vmas_arg;
1106	struct vm_area_struct *vma_prev = NULL;
1107	long rc, i;
1108
1109	if (!pages)
1110		return -EINVAL;
1111
1112	if (!vmas) {
1113		vmas = kcalloc(nr_pages, sizeof(struct vm_area_struct *),
1114			       GFP_KERNEL);
1115		if (!vmas)
1116			return -ENOMEM;
1117	}
1118
1119	rc = get_user_pages(start, nr_pages, gup_flags, pages, vmas);
1120
1121	for (i = 0; i < rc; i++) {
1122		struct vm_area_struct *vma = vmas[i];
1123
1124		if (vma == vma_prev)
1125			continue;
1126
1127		vma_prev = vma;
1128
1129		if (vma_is_fsdax(vma))
1130			break;
1131	}
1132
1133	/*
1134	 * Either get_user_pages() failed, or the vma validation
1135	 * succeeded, in either case we don't need to put_page() before
1136	 * returning.
1137	 */
1138	if (i >= rc)
1139		goto out;
1140
1141	for (i = 0; i < rc; i++)
1142		put_page(pages[i]);
1143	rc = -EOPNOTSUPP;
1144out:
1145	if (vmas != vmas_arg)
1146		kfree(vmas);
1147	return rc;
1148}
1149EXPORT_SYMBOL(get_user_pages_longterm);
1150#endif /* CONFIG_FS_DAX */
1151
1152/**
1153 * populate_vma_page_range() -  populate a range of pages in the vma.
1154 * @vma:   target vma
1155 * @start: start address
1156 * @end:   end address
1157 * @nonblocking:
1158 *
1159 * This takes care of mlocking the pages too if VM_LOCKED is set.
1160 *
1161 * return 0 on success, negative error code on error.
1162 *
1163 * vma->vm_mm->mmap_sem must be held.
1164 *
1165 * If @nonblocking is NULL, it may be held for read or write and will
1166 * be unperturbed.
1167 *
1168 * If @nonblocking is non-NULL, it must held for read only and may be
1169 * released.  If it's released, *@nonblocking will be set to 0.
1170 */
1171long populate_vma_page_range(struct vm_area_struct *vma,
1172		unsigned long start, unsigned long end, int *nonblocking)
1173{
1174	struct mm_struct *mm = vma->vm_mm;
1175	unsigned long nr_pages = (end - start) / PAGE_SIZE;
1176	int gup_flags;
1177
1178	VM_BUG_ON(start & ~PAGE_MASK);
1179	VM_BUG_ON(end   & ~PAGE_MASK);
1180	VM_BUG_ON_VMA(start < vma->vm_start, vma);
1181	VM_BUG_ON_VMA(end   > vma->vm_end, vma);
1182	VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm);
1183
1184	gup_flags = FOLL_TOUCH | FOLL_POPULATE | FOLL_MLOCK;
1185	if (vma->vm_flags & VM_LOCKONFAULT)
1186		gup_flags &= ~FOLL_POPULATE;
1187	/*
1188	 * We want to touch writable mappings with a write fault in order
1189	 * to break COW, except for shared mappings because these don't COW
1190	 * and we would not want to dirty them for nothing.
1191	 */
1192	if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE)
1193		gup_flags |= FOLL_WRITE;
1194
1195	/*
1196	 * We want mlock to succeed for regions that have any permissions
1197	 * other than PROT_NONE.
1198	 */
1199	if (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))
1200		gup_flags |= FOLL_FORCE;
1201
1202	/*
1203	 * We made sure addr is within a VMA, so the following will
1204	 * not result in a stack expansion that recurses back here.
1205	 */
1206	return __get_user_pages(current, mm, start, nr_pages, gup_flags,
1207				NULL, NULL, nonblocking);
1208}
1209
1210/*
1211 * __mm_populate - populate and/or mlock pages within a range of address space.
1212 *
1213 * This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap
1214 * flags. VMAs must be already marked with the desired vm_flags, and
1215 * mmap_sem must not be held.
1216 */
1217int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
1218{
1219	struct mm_struct *mm = current->mm;
1220	unsigned long end, nstart, nend;
1221	struct vm_area_struct *vma = NULL;
1222	int locked = 0;
1223	long ret = 0;
1224
1225	VM_BUG_ON(start & ~PAGE_MASK);
1226	VM_BUG_ON(len != PAGE_ALIGN(len));
1227	end = start + len;
1228
1229	for (nstart = start; nstart < end; nstart = nend) {
1230		/*
1231		 * We want to fault in pages for [nstart; end) address range.
1232		 * Find first corresponding VMA.
1233		 */
1234		if (!locked) {
1235			locked = 1;
1236			down_read(&mm->mmap_sem);
1237			vma = find_vma(mm, nstart);
1238		} else if (nstart >= vma->vm_end)
1239			vma = vma->vm_next;
1240		if (!vma || vma->vm_start >= end)
1241			break;
1242		/*
1243		 * Set [nstart; nend) to intersection of desired address
1244		 * range with the first VMA. Also, skip undesirable VMA types.
1245		 */
1246		nend = min(end, vma->vm_end);
1247		if (vma->vm_flags & (VM_IO | VM_PFNMAP))
1248			continue;
1249		if (nstart < vma->vm_start)
1250			nstart = vma->vm_start;
1251		/*
1252		 * Now fault in a range of pages. populate_vma_page_range()
1253		 * double checks the vma flags, so that it won't mlock pages
1254		 * if the vma was already munlocked.
1255		 */
1256		ret = populate_vma_page_range(vma, nstart, nend, &locked);
1257		if (ret < 0) {
1258			if (ignore_errors) {
1259				ret = 0;
1260				continue;	/* continue at next VMA */
1261			}
1262			break;
1263		}
1264		nend = nstart + ret * PAGE_SIZE;
1265		ret = 0;
1266	}
1267	if (locked)
1268		up_read(&mm->mmap_sem);
1269	return ret;	/* 0 or negative error code */
1270}
1271
1272/**
1273 * get_dump_page() - pin user page in memory while writing it to core dump
1274 * @addr: user address
1275 *
1276 * Returns struct page pointer of user page pinned for dump,
1277 * to be freed afterwards by put_page().
1278 *
1279 * Returns NULL on any kind of failure - a hole must then be inserted into
1280 * the corefile, to preserve alignment with its headers; and also returns
1281 * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found -
1282 * allowing a hole to be left in the corefile to save diskspace.
1283 *
1284 * Called without mmap_sem, but after all other threads have been killed.
1285 */
1286#ifdef CONFIG_ELF_CORE
1287struct page *get_dump_page(unsigned long addr)
1288{
1289	struct vm_area_struct *vma;
1290	struct page *page;
1291
1292	if (__get_user_pages(current, current->mm, addr, 1,
1293			     FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma,
1294			     NULL) < 1)
1295		return NULL;
1296	flush_cache_page(vma, addr, page_to_pfn(page));
1297	return page;
1298}
1299#endif /* CONFIG_ELF_CORE */
1300
1301/*
1302 * Generic Fast GUP
1303 *
1304 * get_user_pages_fast attempts to pin user pages by walking the page
1305 * tables directly and avoids taking locks. Thus the walker needs to be
1306 * protected from page table pages being freed from under it, and should
1307 * block any THP splits.
1308 *
1309 * One way to achieve this is to have the walker disable interrupts, and
1310 * rely on IPIs from the TLB flushing code blocking before the page table
1311 * pages are freed. This is unsuitable for architectures that do not need
1312 * to broadcast an IPI when invalidating TLBs.
1313 *
1314 * Another way to achieve this is to batch up page table containing pages
1315 * belonging to more than one mm_user, then rcu_sched a callback to free those
1316 * pages. Disabling interrupts will allow the fast_gup walker to both block
1317 * the rcu_sched callback, and an IPI that we broadcast for splitting THPs
1318 * (which is a relatively rare event). The code below adopts this strategy.
1319 *
1320 * Before activating this code, please be aware that the following assumptions
1321 * are currently made:
1322 *
1323 *  *) Either HAVE_RCU_TABLE_FREE is enabled, and tlb_remove_table() is used to
1324 *  free pages containing page tables or TLB flushing requires IPI broadcast.
1325 *
1326 *  *) ptes can be read atomically by the architecture.
1327 *
1328 *  *) access_ok is sufficient to validate userspace address ranges.
1329 *
1330 * The last two assumptions can be relaxed by the addition of helper functions.
1331 *
1332 * This code is based heavily on the PowerPC implementation by Nick Piggin.
1333 */
1334#ifdef CONFIG_HAVE_GENERIC_GUP
1335
1336#ifndef gup_get_pte
1337/*
1338 * We assume that the PTE can be read atomically. If this is not the case for
1339 * your architecture, please provide the helper.
1340 */
1341static inline pte_t gup_get_pte(pte_t *ptep)
1342{
1343	return READ_ONCE(*ptep);
1344}
1345#endif
1346
1347static void undo_dev_pagemap(int *nr, int nr_start, struct page **pages)
1348{
1349	while ((*nr) - nr_start) {
1350		struct page *page = pages[--(*nr)];
1351
1352		ClearPageReferenced(page);
1353		put_page(page);
1354	}
1355}
1356
1357#ifdef __HAVE_ARCH_PTE_SPECIAL
1358static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
1359			 int write, struct page **pages, int *nr)
1360{
1361	struct dev_pagemap *pgmap = NULL;
1362	int nr_start = *nr, ret = 0;
1363	pte_t *ptep, *ptem;
 
1364
1365	ptem = ptep = pte_offset_map(&pmd, addr);
1366	do {
1367		pte_t pte = gup_get_pte(ptep);
 
 
 
 
 
 
 
1368		struct page *head, *page;
1369
1370		/*
1371		 * Similar to the PMD case below, NUMA hinting must take slow
1372		 * path using the pte_protnone check.
1373		 */
1374		if (pte_protnone(pte))
 
1375			goto pte_unmap;
1376
1377		if (!pte_access_permitted(pte, write))
1378			goto pte_unmap;
1379
1380		if (pte_devmap(pte)) {
1381			pgmap = get_dev_pagemap(pte_pfn(pte), pgmap);
1382			if (unlikely(!pgmap)) {
1383				undo_dev_pagemap(nr, nr_start, pages);
1384				goto pte_unmap;
1385			}
1386		} else if (pte_special(pte))
1387			goto pte_unmap;
1388
1389		VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
1390		page = pte_page(pte);
1391		head = compound_head(page);
1392
1393		if (!page_cache_get_speculative(head))
1394			goto pte_unmap;
1395
1396		if (unlikely(pte_val(pte) != pte_val(*ptep))) {
1397			put_page(head);
1398			goto pte_unmap;
1399		}
1400
1401		VM_BUG_ON_PAGE(compound_head(page) != head, page);
1402
1403		SetPageReferenced(page);
1404		pages[*nr] = page;
1405		(*nr)++;
1406
1407	} while (ptep++, addr += PAGE_SIZE, addr != end);
1408
1409	ret = 1;
1410
1411pte_unmap:
1412	if (pgmap)
1413		put_dev_pagemap(pgmap);
1414	pte_unmap(ptem);
1415	return ret;
1416}
1417#else
1418
1419/*
1420 * If we can't determine whether or not a pte is special, then fail immediately
1421 * for ptes. Note, we can still pin HugeTLB and THP as these are guaranteed not
1422 * to be special.
1423 *
1424 * For a futex to be placed on a THP tail page, get_futex_key requires a
1425 * __get_user_pages_fast implementation that can pin pages. Thus it's still
1426 * useful to have gup_huge_pmd even if we can't operate on ptes.
1427 */
1428static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
1429			 int write, struct page **pages, int *nr)
1430{
1431	return 0;
1432}
1433#endif /* __HAVE_ARCH_PTE_SPECIAL */
1434
1435#if defined(__HAVE_ARCH_PTE_DEVMAP) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
1436static int __gup_device_huge(unsigned long pfn, unsigned long addr,
1437		unsigned long end, struct page **pages, int *nr)
1438{
1439	int nr_start = *nr;
1440	struct dev_pagemap *pgmap = NULL;
1441
1442	do {
1443		struct page *page = pfn_to_page(pfn);
1444
1445		pgmap = get_dev_pagemap(pfn, pgmap);
1446		if (unlikely(!pgmap)) {
1447			undo_dev_pagemap(nr, nr_start, pages);
1448			return 0;
1449		}
1450		SetPageReferenced(page);
1451		pages[*nr] = page;
1452		get_page(page);
1453		(*nr)++;
1454		pfn++;
1455	} while (addr += PAGE_SIZE, addr != end);
1456
1457	if (pgmap)
1458		put_dev_pagemap(pgmap);
1459	return 1;
1460}
1461
1462static int __gup_device_huge_pmd(pmd_t pmd, unsigned long addr,
1463		unsigned long end, struct page **pages, int *nr)
1464{
1465	unsigned long fault_pfn;
1466
1467	fault_pfn = pmd_pfn(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
1468	return __gup_device_huge(fault_pfn, addr, end, pages, nr);
1469}
1470
1471static int __gup_device_huge_pud(pud_t pud, unsigned long addr,
1472		unsigned long end, struct page **pages, int *nr)
1473{
1474	unsigned long fault_pfn;
1475
1476	fault_pfn = pud_pfn(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
1477	return __gup_device_huge(fault_pfn, addr, end, pages, nr);
1478}
1479#else
1480static int __gup_device_huge_pmd(pmd_t pmd, unsigned long addr,
1481		unsigned long end, struct page **pages, int *nr)
1482{
1483	BUILD_BUG();
1484	return 0;
1485}
1486
1487static int __gup_device_huge_pud(pud_t pud, unsigned long addr,
1488		unsigned long end, struct page **pages, int *nr)
1489{
1490	BUILD_BUG();
1491	return 0;
1492}
1493#endif
1494
1495static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
1496		unsigned long end, int write, struct page **pages, int *nr)
1497{
1498	struct page *head, *page;
1499	int refs;
1500
1501	if (!pmd_access_permitted(orig, write))
1502		return 0;
1503
1504	if (pmd_devmap(orig))
1505		return __gup_device_huge_pmd(orig, addr, end, pages, nr);
1506
1507	refs = 0;
1508	page = pmd_page(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
 
1509	do {
 
1510		pages[*nr] = page;
1511		(*nr)++;
1512		page++;
1513		refs++;
1514	} while (addr += PAGE_SIZE, addr != end);
1515
1516	head = compound_head(pmd_page(orig));
1517	if (!page_cache_add_speculative(head, refs)) {
1518		*nr -= refs;
1519		return 0;
1520	}
1521
1522	if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
1523		*nr -= refs;
1524		while (refs--)
1525			put_page(head);
1526		return 0;
1527	}
1528
1529	SetPageReferenced(head);
1530	return 1;
1531}
1532
1533static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
1534		unsigned long end, int write, struct page **pages, int *nr)
1535{
1536	struct page *head, *page;
1537	int refs;
1538
1539	if (!pud_access_permitted(orig, write))
1540		return 0;
1541
1542	if (pud_devmap(orig))
1543		return __gup_device_huge_pud(orig, addr, end, pages, nr);
1544
1545	refs = 0;
1546	page = pud_page(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
 
1547	do {
 
1548		pages[*nr] = page;
1549		(*nr)++;
1550		page++;
1551		refs++;
1552	} while (addr += PAGE_SIZE, addr != end);
1553
1554	head = compound_head(pud_page(orig));
1555	if (!page_cache_add_speculative(head, refs)) {
1556		*nr -= refs;
1557		return 0;
1558	}
1559
1560	if (unlikely(pud_val(orig) != pud_val(*pudp))) {
1561		*nr -= refs;
1562		while (refs--)
1563			put_page(head);
1564		return 0;
1565	}
1566
1567	SetPageReferenced(head);
1568	return 1;
1569}
1570
1571static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr,
1572			unsigned long end, int write,
1573			struct page **pages, int *nr)
1574{
1575	int refs;
1576	struct page *head, *page;
1577
1578	if (!pgd_access_permitted(orig, write))
1579		return 0;
1580
1581	BUILD_BUG_ON(pgd_devmap(orig));
1582	refs = 0;
1583	page = pgd_page(orig) + ((addr & ~PGDIR_MASK) >> PAGE_SHIFT);
 
1584	do {
 
1585		pages[*nr] = page;
1586		(*nr)++;
1587		page++;
1588		refs++;
1589	} while (addr += PAGE_SIZE, addr != end);
1590
1591	head = compound_head(pgd_page(orig));
1592	if (!page_cache_add_speculative(head, refs)) {
1593		*nr -= refs;
1594		return 0;
1595	}
1596
1597	if (unlikely(pgd_val(orig) != pgd_val(*pgdp))) {
1598		*nr -= refs;
1599		while (refs--)
1600			put_page(head);
1601		return 0;
1602	}
1603
1604	SetPageReferenced(head);
1605	return 1;
1606}
1607
1608static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
1609		int write, struct page **pages, int *nr)
1610{
1611	unsigned long next;
1612	pmd_t *pmdp;
1613
1614	pmdp = pmd_offset(&pud, addr);
1615	do {
1616		pmd_t pmd = READ_ONCE(*pmdp);
1617
1618		next = pmd_addr_end(addr, end);
1619		if (!pmd_present(pmd))
1620			return 0;
1621
1622		if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd))) {
1623			/*
1624			 * NUMA hinting faults need to be handled in the GUP
1625			 * slowpath for accounting purposes and so that they
1626			 * can be serialised against THP migration.
1627			 */
1628			if (pmd_protnone(pmd))
1629				return 0;
1630
1631			if (!gup_huge_pmd(pmd, pmdp, addr, next, write,
1632				pages, nr))
1633				return 0;
1634
1635		} else if (unlikely(is_hugepd(__hugepd(pmd_val(pmd))))) {
1636			/*
1637			 * architecture have different format for hugetlbfs
1638			 * pmd format and THP pmd format
1639			 */
1640			if (!gup_huge_pd(__hugepd(pmd_val(pmd)), addr,
1641					 PMD_SHIFT, next, write, pages, nr))
1642				return 0;
1643		} else if (!gup_pte_range(pmd, addr, next, write, pages, nr))
1644			return 0;
1645	} while (pmdp++, addr = next, addr != end);
1646
1647	return 1;
1648}
1649
1650static int gup_pud_range(p4d_t p4d, unsigned long addr, unsigned long end,
1651			 int write, struct page **pages, int *nr)
1652{
1653	unsigned long next;
1654	pud_t *pudp;
1655
1656	pudp = pud_offset(&p4d, addr);
1657	do {
1658		pud_t pud = READ_ONCE(*pudp);
1659
1660		next = pud_addr_end(addr, end);
1661		if (pud_none(pud))
1662			return 0;
1663		if (unlikely(pud_huge(pud))) {
1664			if (!gup_huge_pud(pud, pudp, addr, next, write,
1665					  pages, nr))
1666				return 0;
1667		} else if (unlikely(is_hugepd(__hugepd(pud_val(pud))))) {
1668			if (!gup_huge_pd(__hugepd(pud_val(pud)), addr,
1669					 PUD_SHIFT, next, write, pages, nr))
1670				return 0;
1671		} else if (!gup_pmd_range(pud, addr, next, write, pages, nr))
1672			return 0;
1673	} while (pudp++, addr = next, addr != end);
1674
1675	return 1;
1676}
1677
1678static int gup_p4d_range(pgd_t pgd, unsigned long addr, unsigned long end,
1679			 int write, struct page **pages, int *nr)
1680{
1681	unsigned long next;
1682	p4d_t *p4dp;
1683
1684	p4dp = p4d_offset(&pgd, addr);
1685	do {
1686		p4d_t p4d = READ_ONCE(*p4dp);
1687
1688		next = p4d_addr_end(addr, end);
1689		if (p4d_none(p4d))
1690			return 0;
1691		BUILD_BUG_ON(p4d_huge(p4d));
1692		if (unlikely(is_hugepd(__hugepd(p4d_val(p4d))))) {
1693			if (!gup_huge_pd(__hugepd(p4d_val(p4d)), addr,
1694					 P4D_SHIFT, next, write, pages, nr))
1695				return 0;
1696		} else if (!gup_pud_range(p4d, addr, next, write, pages, nr))
1697			return 0;
1698	} while (p4dp++, addr = next, addr != end);
1699
1700	return 1;
1701}
1702
1703static void gup_pgd_range(unsigned long addr, unsigned long end,
1704		int write, struct page **pages, int *nr)
1705{
1706	unsigned long next;
1707	pgd_t *pgdp;
1708
1709	pgdp = pgd_offset(current->mm, addr);
1710	do {
1711		pgd_t pgd = READ_ONCE(*pgdp);
1712
1713		next = pgd_addr_end(addr, end);
1714		if (pgd_none(pgd))
1715			return;
1716		if (unlikely(pgd_huge(pgd))) {
1717			if (!gup_huge_pgd(pgd, pgdp, addr, next, write,
1718					  pages, nr))
1719				return;
1720		} else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) {
1721			if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr,
1722					 PGDIR_SHIFT, next, write, pages, nr))
1723				return;
1724		} else if (!gup_p4d_range(pgd, addr, next, write, pages, nr))
1725			return;
1726	} while (pgdp++, addr = next, addr != end);
1727}
1728
1729#ifndef gup_fast_permitted
1730/*
1731 * Check if it's allowed to use __get_user_pages_fast() for the range, or
1732 * we need to fall back to the slow version:
1733 */
1734bool gup_fast_permitted(unsigned long start, int nr_pages, int write)
1735{
1736	unsigned long len, end;
1737
1738	len = (unsigned long) nr_pages << PAGE_SHIFT;
1739	end = start + len;
1740	return end >= start;
1741}
1742#endif
1743
1744/*
1745 * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to
1746 * the regular GUP.
1747 * Note a difference with get_user_pages_fast: this always returns the
1748 * number of pages pinned, 0 if no pages were pinned.
1749 */
1750int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
1751			  struct page **pages)
1752{
 
1753	unsigned long addr, len, end;
1754	unsigned long flags;
 
1755	int nr = 0;
1756
1757	start &= PAGE_MASK;
1758	addr = start;
1759	len = (unsigned long) nr_pages << PAGE_SHIFT;
1760	end = start + len;
1761
1762	if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
1763					(void __user *)start, len)))
1764		return 0;
1765
1766	/*
1767	 * Disable interrupts.  We use the nested form as we can already have
1768	 * interrupts disabled by get_futex_key.
1769	 *
1770	 * With interrupts disabled, we block page table pages from being
1771	 * freed from under us. See mmu_gather_tlb in asm-generic/tlb.h
1772	 * for more details.
1773	 *
1774	 * We do not adopt an rcu_read_lock(.) here as we also want to
1775	 * block IPIs that come from THPs splitting.
1776	 */
1777
1778	if (gup_fast_permitted(start, nr_pages, write)) {
1779		local_irq_save(flags);
1780		gup_pgd_range(addr, end, write, pages, &nr);
1781		local_irq_restore(flags);
1782	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1783
1784	return nr;
1785}
1786
1787/**
1788 * get_user_pages_fast() - pin user pages in memory
1789 * @start:	starting user address
1790 * @nr_pages:	number of pages from start to pin
1791 * @write:	whether pages will be written to
1792 * @pages:	array that receives pointers to the pages pinned.
1793 *		Should be at least nr_pages long.
1794 *
1795 * Attempt to pin user pages in memory without taking mm->mmap_sem.
1796 * If not successful, it will fall back to taking the lock and
1797 * calling get_user_pages().
1798 *
1799 * Returns number of pages pinned. This may be fewer than the number
1800 * requested. If nr_pages is 0 or negative, returns 0. If no pages
1801 * were pinned, returns -errno.
1802 */
1803int get_user_pages_fast(unsigned long start, int nr_pages, int write,
1804			struct page **pages)
1805{
1806	unsigned long addr, len, end;
1807	int nr = 0, ret = 0;
1808
1809	start &= PAGE_MASK;
1810	addr = start;
1811	len = (unsigned long) nr_pages << PAGE_SHIFT;
1812	end = start + len;
1813
1814	if (nr_pages <= 0)
1815		return 0;
1816
1817	if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
1818					(void __user *)start, len)))
1819		return -EFAULT;
1820
1821	if (gup_fast_permitted(start, nr_pages, write)) {
1822		local_irq_disable();
1823		gup_pgd_range(addr, end, write, pages, &nr);
1824		local_irq_enable();
1825		ret = nr;
1826	}
1827
1828	if (nr < nr_pages) {
1829		/* Try to get the remaining pages with get_user_pages */
1830		start += nr << PAGE_SHIFT;
1831		pages += nr;
1832
1833		ret = get_user_pages_unlocked(start, nr_pages - nr, pages,
1834				write ? FOLL_WRITE : 0);
1835
1836		/* Have to be a bit careful with return values */
1837		if (nr > 0) {
1838			if (ret < 0)
1839				ret = nr;
1840			else
1841				ret += nr;
1842		}
1843	}
1844
1845	return ret;
1846}
1847
1848#endif /* CONFIG_HAVE_GENERIC_GUP */