Linux Audio

Check our new training course

Loading...
v3.15
   1/*
   2 * Generic hugetlb support.
   3 * (C) Nadia Yvette Chambers, April 2004
   4 */
   5#include <linux/list.h>
   6#include <linux/init.h>
   7#include <linux/module.h>
   8#include <linux/mm.h>
   9#include <linux/seq_file.h>
  10#include <linux/sysctl.h>
  11#include <linux/highmem.h>
  12#include <linux/mmu_notifier.h>
  13#include <linux/nodemask.h>
  14#include <linux/pagemap.h>
  15#include <linux/mempolicy.h>
  16#include <linux/compiler.h>
  17#include <linux/cpuset.h>
  18#include <linux/mutex.h>
  19#include <linux/bootmem.h>
  20#include <linux/sysfs.h>
  21#include <linux/slab.h>
  22#include <linux/rmap.h>
  23#include <linux/swap.h>
  24#include <linux/swapops.h>
  25#include <linux/page-isolation.h>
  26#include <linux/jhash.h>
  27
  28#include <asm/page.h>
  29#include <asm/pgtable.h>
  30#include <asm/tlb.h>
  31
  32#include <linux/io.h>
 
  33#include <linux/hugetlb.h>
  34#include <linux/hugetlb_cgroup.h>
  35#include <linux/node.h>
  36#include "internal.h"
  37
  38const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
 
  39unsigned long hugepages_treat_as_movable;
  40
  41int hugetlb_max_hstate __read_mostly;
  42unsigned int default_hstate_idx;
  43struct hstate hstates[HUGE_MAX_HSTATE];
  44
  45__initdata LIST_HEAD(huge_boot_pages);
  46
  47/* for command line parsing */
  48static struct hstate * __initdata parsed_hstate;
  49static unsigned long __initdata default_hstate_max_huge_pages;
  50static unsigned long __initdata default_hstate_size;
  51
  52/*
  53 * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
  54 * free_huge_pages, and surplus_huge_pages.
  55 */
  56DEFINE_SPINLOCK(hugetlb_lock);
  57
  58/*
  59 * Serializes faults on the same logical page.  This is used to
  60 * prevent spurious OOMs when the hugepage pool is fully utilized.
  61 */
  62static int num_fault_mutexes;
  63static struct mutex *htlb_fault_mutex_table ____cacheline_aligned_in_smp;
  64
  65static inline void unlock_or_release_subpool(struct hugepage_subpool *spool)
  66{
  67	bool free = (spool->count == 0) && (spool->used_hpages == 0);
  68
  69	spin_unlock(&spool->lock);
  70
  71	/* If no pages are used, and no other handles to the subpool
  72	 * remain, free the subpool the subpool remain */
  73	if (free)
  74		kfree(spool);
  75}
  76
  77struct hugepage_subpool *hugepage_new_subpool(long nr_blocks)
  78{
  79	struct hugepage_subpool *spool;
  80
  81	spool = kmalloc(sizeof(*spool), GFP_KERNEL);
  82	if (!spool)
  83		return NULL;
  84
  85	spin_lock_init(&spool->lock);
  86	spool->count = 1;
  87	spool->max_hpages = nr_blocks;
  88	spool->used_hpages = 0;
  89
  90	return spool;
  91}
  92
  93void hugepage_put_subpool(struct hugepage_subpool *spool)
  94{
  95	spin_lock(&spool->lock);
  96	BUG_ON(!spool->count);
  97	spool->count--;
  98	unlock_or_release_subpool(spool);
  99}
 100
 101static int hugepage_subpool_get_pages(struct hugepage_subpool *spool,
 102				      long delta)
 103{
 104	int ret = 0;
 105
 106	if (!spool)
 107		return 0;
 108
 109	spin_lock(&spool->lock);
 110	if ((spool->used_hpages + delta) <= spool->max_hpages) {
 111		spool->used_hpages += delta;
 112	} else {
 113		ret = -ENOMEM;
 114	}
 115	spin_unlock(&spool->lock);
 116
 117	return ret;
 118}
 119
 120static void hugepage_subpool_put_pages(struct hugepage_subpool *spool,
 121				       long delta)
 122{
 123	if (!spool)
 124		return;
 125
 126	spin_lock(&spool->lock);
 127	spool->used_hpages -= delta;
 128	/* If hugetlbfs_put_super couldn't free spool due to
 129	* an outstanding quota reference, free it now. */
 130	unlock_or_release_subpool(spool);
 131}
 132
 133static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
 134{
 135	return HUGETLBFS_SB(inode->i_sb)->spool;
 136}
 137
 138static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
 139{
 140	return subpool_inode(file_inode(vma->vm_file));
 141}
 142
 143/*
 144 * Region tracking -- allows tracking of reservations and instantiated pages
 145 *                    across the pages in a mapping.
 146 *
 147 * The region data structures are embedded into a resv_map and
 148 * protected by a resv_map's lock
 
 
 
 
 
 
 
 149 */
 150struct file_region {
 151	struct list_head link;
 152	long from;
 153	long to;
 154};
 155
 156static long region_add(struct resv_map *resv, long f, long t)
 157{
 158	struct list_head *head = &resv->regions;
 159	struct file_region *rg, *nrg, *trg;
 160
 161	spin_lock(&resv->lock);
 162	/* Locate the region we are either in or before. */
 163	list_for_each_entry(rg, head, link)
 164		if (f <= rg->to)
 165			break;
 166
 167	/* Round our left edge to the current segment if it encloses us. */
 168	if (f > rg->from)
 169		f = rg->from;
 170
 171	/* Check for and consume any regions we now overlap with. */
 172	nrg = rg;
 173	list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
 174		if (&rg->link == head)
 175			break;
 176		if (rg->from > t)
 177			break;
 178
 179		/* If this area reaches higher then extend our area to
 180		 * include it completely.  If this is not the first area
 181		 * which we intend to reuse, free it. */
 182		if (rg->to > t)
 183			t = rg->to;
 184		if (rg != nrg) {
 185			list_del(&rg->link);
 186			kfree(rg);
 187		}
 188	}
 189	nrg->from = f;
 190	nrg->to = t;
 191	spin_unlock(&resv->lock);
 192	return 0;
 193}
 194
 195static long region_chg(struct resv_map *resv, long f, long t)
 196{
 197	struct list_head *head = &resv->regions;
 198	struct file_region *rg, *nrg = NULL;
 199	long chg = 0;
 200
 201retry:
 202	spin_lock(&resv->lock);
 203	/* Locate the region we are before or in. */
 204	list_for_each_entry(rg, head, link)
 205		if (f <= rg->to)
 206			break;
 207
 208	/* If we are below the current region then a new region is required.
 209	 * Subtle, allocate a new region at the position but make it zero
 210	 * size such that we can guarantee to record the reservation. */
 211	if (&rg->link == head || t < rg->from) {
 212		if (!nrg) {
 213			spin_unlock(&resv->lock);
 214			nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
 215			if (!nrg)
 216				return -ENOMEM;
 217
 218			nrg->from = f;
 219			nrg->to   = f;
 220			INIT_LIST_HEAD(&nrg->link);
 221			goto retry;
 222		}
 223
 224		list_add(&nrg->link, rg->link.prev);
 225		chg = t - f;
 226		goto out_nrg;
 227	}
 228
 229	/* Round our left edge to the current segment if it encloses us. */
 230	if (f > rg->from)
 231		f = rg->from;
 232	chg = t - f;
 233
 234	/* Check for and consume any regions we now overlap with. */
 235	list_for_each_entry(rg, rg->link.prev, link) {
 236		if (&rg->link == head)
 237			break;
 238		if (rg->from > t)
 239			goto out;
 240
 241		/* We overlap with this area, if it extends further than
 242		 * us then we must extend ourselves.  Account for its
 243		 * existing reservation. */
 244		if (rg->to > t) {
 245			chg += rg->to - t;
 246			t = rg->to;
 247		}
 248		chg -= rg->to - rg->from;
 249	}
 250
 251out:
 252	spin_unlock(&resv->lock);
 253	/*  We already know we raced and no longer need the new region */
 254	kfree(nrg);
 255	return chg;
 256out_nrg:
 257	spin_unlock(&resv->lock);
 258	return chg;
 259}
 260
 261static long region_truncate(struct resv_map *resv, long end)
 262{
 263	struct list_head *head = &resv->regions;
 264	struct file_region *rg, *trg;
 265	long chg = 0;
 266
 267	spin_lock(&resv->lock);
 268	/* Locate the region we are either in or before. */
 269	list_for_each_entry(rg, head, link)
 270		if (end <= rg->to)
 271			break;
 272	if (&rg->link == head)
 273		goto out;
 274
 275	/* If we are in the middle of a region then adjust it. */
 276	if (end > rg->from) {
 277		chg = rg->to - end;
 278		rg->to = end;
 279		rg = list_entry(rg->link.next, typeof(*rg), link);
 280	}
 281
 282	/* Drop any remaining regions. */
 283	list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
 284		if (&rg->link == head)
 285			break;
 286		chg += rg->to - rg->from;
 287		list_del(&rg->link);
 288		kfree(rg);
 289	}
 290
 291out:
 292	spin_unlock(&resv->lock);
 293	return chg;
 294}
 295
 296static long region_count(struct resv_map *resv, long f, long t)
 297{
 298	struct list_head *head = &resv->regions;
 299	struct file_region *rg;
 300	long chg = 0;
 301
 302	spin_lock(&resv->lock);
 303	/* Locate each segment we overlap with, and count that overlap. */
 304	list_for_each_entry(rg, head, link) {
 305		long seg_from;
 306		long seg_to;
 307
 308		if (rg->to <= f)
 309			continue;
 310		if (rg->from >= t)
 311			break;
 312
 313		seg_from = max(rg->from, f);
 314		seg_to = min(rg->to, t);
 315
 316		chg += seg_to - seg_from;
 317	}
 318	spin_unlock(&resv->lock);
 319
 320	return chg;
 321}
 322
 323/*
 324 * Convert the address within this vma to the page offset within
 325 * the mapping, in pagecache page units; huge pages here.
 326 */
 327static pgoff_t vma_hugecache_offset(struct hstate *h,
 328			struct vm_area_struct *vma, unsigned long address)
 329{
 330	return ((address - vma->vm_start) >> huge_page_shift(h)) +
 331			(vma->vm_pgoff >> huge_page_order(h));
 332}
 333
 334pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
 335				     unsigned long address)
 336{
 337	return vma_hugecache_offset(hstate_vma(vma), vma, address);
 338}
 339
 340/*
 341 * Return the size of the pages allocated when backing a VMA. In the majority
 342 * cases this will be same size as used by the page table entries.
 343 */
 344unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
 345{
 346	struct hstate *hstate;
 347
 348	if (!is_vm_hugetlb_page(vma))
 349		return PAGE_SIZE;
 350
 351	hstate = hstate_vma(vma);
 352
 353	return 1UL << huge_page_shift(hstate);
 354}
 355EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
 356
 357/*
 358 * Return the page size being used by the MMU to back a VMA. In the majority
 359 * of cases, the page size used by the kernel matches the MMU size. On
 360 * architectures where it differs, an architecture-specific version of this
 361 * function is required.
 362 */
 363#ifndef vma_mmu_pagesize
 364unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
 365{
 366	return vma_kernel_pagesize(vma);
 367}
 368#endif
 369
 370/*
 371 * Flags for MAP_PRIVATE reservations.  These are stored in the bottom
 372 * bits of the reservation map pointer, which are always clear due to
 373 * alignment.
 374 */
 375#define HPAGE_RESV_OWNER    (1UL << 0)
 376#define HPAGE_RESV_UNMAPPED (1UL << 1)
 377#define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
 378
 379/*
 380 * These helpers are used to track how many pages are reserved for
 381 * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
 382 * is guaranteed to have their future faults succeed.
 383 *
 384 * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
 385 * the reserve counters are updated with the hugetlb_lock held. It is safe
 386 * to reset the VMA at fork() time as it is not in use yet and there is no
 387 * chance of the global counters getting corrupted as a result of the values.
 388 *
 389 * The private mapping reservation is represented in a subtly different
 390 * manner to a shared mapping.  A shared mapping has a region map associated
 391 * with the underlying file, this region map represents the backing file
 392 * pages which have ever had a reservation assigned which this persists even
 393 * after the page is instantiated.  A private mapping has a region map
 394 * associated with the original mmap which is attached to all VMAs which
 395 * reference it, this region map represents those offsets which have consumed
 396 * reservation ie. where pages have been instantiated.
 397 */
 398static unsigned long get_vma_private_data(struct vm_area_struct *vma)
 399{
 400	return (unsigned long)vma->vm_private_data;
 401}
 402
 403static void set_vma_private_data(struct vm_area_struct *vma,
 404							unsigned long value)
 405{
 406	vma->vm_private_data = (void *)value;
 407}
 408
 409struct resv_map *resv_map_alloc(void)
 
 
 
 
 
 410{
 411	struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
 412	if (!resv_map)
 413		return NULL;
 414
 415	kref_init(&resv_map->refs);
 416	spin_lock_init(&resv_map->lock);
 417	INIT_LIST_HEAD(&resv_map->regions);
 418
 419	return resv_map;
 420}
 421
 422void resv_map_release(struct kref *ref)
 423{
 424	struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
 425
 426	/* Clear out any active regions before we release the map. */
 427	region_truncate(resv_map, 0);
 428	kfree(resv_map);
 429}
 430
 431static inline struct resv_map *inode_resv_map(struct inode *inode)
 432{
 433	return inode->i_mapping->private_data;
 434}
 435
 436static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
 437{
 438	VM_BUG_ON(!is_vm_hugetlb_page(vma));
 439	if (vma->vm_flags & VM_MAYSHARE) {
 440		struct address_space *mapping = vma->vm_file->f_mapping;
 441		struct inode *inode = mapping->host;
 442
 443		return inode_resv_map(inode);
 444
 445	} else {
 446		return (struct resv_map *)(get_vma_private_data(vma) &
 447							~HPAGE_RESV_MASK);
 448	}
 449}
 450
 451static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
 452{
 453	VM_BUG_ON(!is_vm_hugetlb_page(vma));
 454	VM_BUG_ON(vma->vm_flags & VM_MAYSHARE);
 455
 456	set_vma_private_data(vma, (get_vma_private_data(vma) &
 457				HPAGE_RESV_MASK) | (unsigned long)map);
 458}
 459
 460static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
 461{
 462	VM_BUG_ON(!is_vm_hugetlb_page(vma));
 463	VM_BUG_ON(vma->vm_flags & VM_MAYSHARE);
 464
 465	set_vma_private_data(vma, get_vma_private_data(vma) | flags);
 466}
 467
 468static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
 469{
 470	VM_BUG_ON(!is_vm_hugetlb_page(vma));
 471
 472	return (get_vma_private_data(vma) & flag) != 0;
 473}
 474
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 475/* Reset counters to 0 and clear all HPAGE_RESV_* flags */
 476void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
 477{
 478	VM_BUG_ON(!is_vm_hugetlb_page(vma));
 479	if (!(vma->vm_flags & VM_MAYSHARE))
 480		vma->vm_private_data = (void *)0;
 481}
 482
 483/* Returns true if the VMA has associated reserve pages */
 484static int vma_has_reserves(struct vm_area_struct *vma, long chg)
 485{
 486	if (vma->vm_flags & VM_NORESERVE) {
 487		/*
 488		 * This address is already reserved by other process(chg == 0),
 489		 * so, we should decrement reserved count. Without decrementing,
 490		 * reserve count remains after releasing inode, because this
 491		 * allocated page will go into page cache and is regarded as
 492		 * coming from reserved pool in releasing step.  Currently, we
 493		 * don't have any other solution to deal with this situation
 494		 * properly, so add work-around here.
 495		 */
 496		if (vma->vm_flags & VM_MAYSHARE && chg == 0)
 497			return 1;
 498		else
 499			return 0;
 500	}
 501
 502	/* Shared mappings always use reserves */
 503	if (vma->vm_flags & VM_MAYSHARE)
 504		return 1;
 505
 506	/*
 507	 * Only the process that called mmap() has reserves for
 508	 * private mappings.
 509	 */
 510	if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
 511		return 1;
 512
 513	return 0;
 514}
 515
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 516static void enqueue_huge_page(struct hstate *h, struct page *page)
 517{
 518	int nid = page_to_nid(page);
 519	list_move(&page->lru, &h->hugepage_freelists[nid]);
 520	h->free_huge_pages++;
 521	h->free_huge_pages_node[nid]++;
 522}
 523
 524static struct page *dequeue_huge_page_node(struct hstate *h, int nid)
 525{
 526	struct page *page;
 527
 528	list_for_each_entry(page, &h->hugepage_freelists[nid], lru)
 529		if (!is_migrate_isolate_page(page))
 530			break;
 531	/*
 532	 * if 'non-isolated free hugepage' not found on the list,
 533	 * the allocation fails.
 534	 */
 535	if (&h->hugepage_freelists[nid] == &page->lru)
 536		return NULL;
 537	list_move(&page->lru, &h->hugepage_activelist);
 
 538	set_page_refcounted(page);
 539	h->free_huge_pages--;
 540	h->free_huge_pages_node[nid]--;
 541	return page;
 542}
 543
 544/* Movability of hugepages depends on migration support. */
 545static inline gfp_t htlb_alloc_mask(struct hstate *h)
 546{
 547	if (hugepages_treat_as_movable || hugepage_migration_support(h))
 548		return GFP_HIGHUSER_MOVABLE;
 549	else
 550		return GFP_HIGHUSER;
 551}
 552
 553static struct page *dequeue_huge_page_vma(struct hstate *h,
 554				struct vm_area_struct *vma,
 555				unsigned long address, int avoid_reserve,
 556				long chg)
 557{
 558	struct page *page = NULL;
 559	struct mempolicy *mpol;
 560	nodemask_t *nodemask;
 561	struct zonelist *zonelist;
 562	struct zone *zone;
 563	struct zoneref *z;
 564	unsigned int cpuset_mems_cookie;
 565
 
 
 
 566	/*
 567	 * A child process with MAP_PRIVATE mappings created by their parent
 568	 * have no page reserves. This check ensures that reservations are
 569	 * not "stolen". The child may still get SIGKILLed
 570	 */
 571	if (!vma_has_reserves(vma, chg) &&
 572			h->free_huge_pages - h->resv_huge_pages == 0)
 573		goto err;
 574
 575	/* If reserves cannot be used, ensure enough pages are in the pool */
 576	if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
 577		goto err;
 578
 579retry_cpuset:
 580	cpuset_mems_cookie = read_mems_allowed_begin();
 581	zonelist = huge_zonelist(vma, address,
 582					htlb_alloc_mask(h), &mpol, &nodemask);
 583
 584	for_each_zone_zonelist_nodemask(zone, z, zonelist,
 585						MAX_NR_ZONES - 1, nodemask) {
 586		if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask(h))) {
 587			page = dequeue_huge_page_node(h, zone_to_nid(zone));
 588			if (page) {
 589				if (avoid_reserve)
 590					break;
 591				if (!vma_has_reserves(vma, chg))
 592					break;
 593
 594				SetPagePrivate(page);
 595				h->resv_huge_pages--;
 596				break;
 597			}
 598		}
 599	}
 600
 601	mpol_cond_put(mpol);
 602	if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
 603		goto retry_cpuset;
 604	return page;
 605
 606err:
 607	return NULL;
 608}
 609
 610static void update_and_free_page(struct hstate *h, struct page *page)
 611{
 612	int i;
 613
 614	VM_BUG_ON(h->order >= MAX_ORDER);
 615
 616	h->nr_huge_pages--;
 617	h->nr_huge_pages_node[page_to_nid(page)]--;
 618	for (i = 0; i < pages_per_huge_page(h); i++) {
 619		page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
 620				1 << PG_referenced | 1 << PG_dirty |
 621				1 << PG_active | 1 << PG_reserved |
 622				1 << PG_private | 1 << PG_writeback);
 623	}
 624	VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page);
 625	set_compound_page_dtor(page, NULL);
 626	set_page_refcounted(page);
 627	arch_release_hugepage(page);
 628	__free_pages(page, huge_page_order(h));
 629}
 630
 631struct hstate *size_to_hstate(unsigned long size)
 632{
 633	struct hstate *h;
 634
 635	for_each_hstate(h) {
 636		if (huge_page_size(h) == size)
 637			return h;
 638	}
 639	return NULL;
 640}
 641
 642static void free_huge_page(struct page *page)
 643{
 644	/*
 645	 * Can't pass hstate in here because it is called from the
 646	 * compound page destructor.
 647	 */
 648	struct hstate *h = page_hstate(page);
 649	int nid = page_to_nid(page);
 650	struct hugepage_subpool *spool =
 651		(struct hugepage_subpool *)page_private(page);
 652	bool restore_reserve;
 653
 
 654	set_page_private(page, 0);
 655	page->mapping = NULL;
 656	BUG_ON(page_count(page));
 657	BUG_ON(page_mapcount(page));
 658	restore_reserve = PagePrivate(page);
 659	ClearPagePrivate(page);
 660
 661	spin_lock(&hugetlb_lock);
 662	hugetlb_cgroup_uncharge_page(hstate_index(h),
 663				     pages_per_huge_page(h), page);
 664	if (restore_reserve)
 665		h->resv_huge_pages++;
 666
 667	if (h->surplus_huge_pages_node[nid] && huge_page_order(h) < MAX_ORDER) {
 668		/* remove the page from active list */
 669		list_del(&page->lru);
 670		update_and_free_page(h, page);
 671		h->surplus_huge_pages--;
 672		h->surplus_huge_pages_node[nid]--;
 673	} else {
 674		arch_clear_hugepage_flags(page);
 675		enqueue_huge_page(h, page);
 676	}
 677	spin_unlock(&hugetlb_lock);
 678	hugepage_subpool_put_pages(spool, 1);
 
 679}
 680
 681static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
 682{
 683	INIT_LIST_HEAD(&page->lru);
 684	set_compound_page_dtor(page, free_huge_page);
 685	spin_lock(&hugetlb_lock);
 686	set_hugetlb_cgroup(page, NULL);
 687	h->nr_huge_pages++;
 688	h->nr_huge_pages_node[nid]++;
 689	spin_unlock(&hugetlb_lock);
 690	put_page(page); /* free it into the hugepage allocator */
 691}
 692
 693static void __init prep_compound_gigantic_page(struct page *page,
 694					       unsigned long order)
 695{
 696	int i;
 697	int nr_pages = 1 << order;
 698	struct page *p = page + 1;
 699
 700	/* we rely on prep_new_huge_page to set the destructor */
 701	set_compound_order(page, order);
 702	__SetPageHead(page);
 703	__ClearPageReserved(page);
 704	for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
 705		__SetPageTail(p);
 706		/*
 707		 * For gigantic hugepages allocated through bootmem at
 708		 * boot, it's safer to be consistent with the not-gigantic
 709		 * hugepages and clear the PG_reserved bit from all tail pages
 710		 * too.  Otherwse drivers using get_user_pages() to access tail
 711		 * pages may get the reference counting wrong if they see
 712		 * PG_reserved set on a tail page (despite the head page not
 713		 * having PG_reserved set).  Enforcing this consistency between
 714		 * head and tail pages allows drivers to optimize away a check
 715		 * on the head page when they need know if put_page() is needed
 716		 * after get_user_pages().
 717		 */
 718		__ClearPageReserved(p);
 719		set_page_count(p, 0);
 720		p->first_page = page;
 721	}
 722}
 723
 724/*
 725 * PageHuge() only returns true for hugetlbfs pages, but not for normal or
 726 * transparent huge pages.  See the PageTransHuge() documentation for more
 727 * details.
 728 */
 729int PageHuge(struct page *page)
 730{
 
 
 731	if (!PageCompound(page))
 732		return 0;
 733
 734	page = compound_head(page);
 735	return get_compound_page_dtor(page) == free_huge_page;
 736}
 737EXPORT_SYMBOL_GPL(PageHuge);
 738
 739/*
 740 * PageHeadHuge() only returns true for hugetlbfs head page, but not for
 741 * normal or transparent huge pages.
 742 */
 743int PageHeadHuge(struct page *page_head)
 744{
 745	if (!PageHead(page_head))
 746		return 0;
 747
 748	return get_compound_page_dtor(page_head) == free_huge_page;
 749}
 750
 751pgoff_t __basepage_index(struct page *page)
 752{
 753	struct page *page_head = compound_head(page);
 754	pgoff_t index = page_index(page_head);
 755	unsigned long compound_idx;
 756
 757	if (!PageHuge(page_head))
 758		return page_index(page);
 759
 760	if (compound_order(page_head) >= MAX_ORDER)
 761		compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
 762	else
 763		compound_idx = page - page_head;
 764
 765	return (index << compound_order(page_head)) + compound_idx;
 766}
 
 767
 768static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
 769{
 770	struct page *page;
 771
 772	if (h->order >= MAX_ORDER)
 773		return NULL;
 774
 775	page = alloc_pages_exact_node(nid,
 776		htlb_alloc_mask(h)|__GFP_COMP|__GFP_THISNODE|
 777						__GFP_REPEAT|__GFP_NOWARN,
 778		huge_page_order(h));
 779	if (page) {
 780		if (arch_prepare_hugepage(page)) {
 781			__free_pages(page, huge_page_order(h));
 782			return NULL;
 783		}
 784		prep_new_huge_page(h, page, nid);
 785	}
 786
 787	return page;
 788}
 789
 790/*
 791 * common helper functions for hstate_next_node_to_{alloc|free}.
 792 * We may have allocated or freed a huge page based on a different
 793 * nodes_allowed previously, so h->next_node_to_{alloc|free} might
 794 * be outside of *nodes_allowed.  Ensure that we use an allowed
 795 * node for alloc or free.
 796 */
 797static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
 798{
 799	nid = next_node(nid, *nodes_allowed);
 800	if (nid == MAX_NUMNODES)
 801		nid = first_node(*nodes_allowed);
 802	VM_BUG_ON(nid >= MAX_NUMNODES);
 803
 804	return nid;
 805}
 806
 807static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
 808{
 809	if (!node_isset(nid, *nodes_allowed))
 810		nid = next_node_allowed(nid, nodes_allowed);
 811	return nid;
 812}
 813
 814/*
 815 * returns the previously saved node ["this node"] from which to
 816 * allocate a persistent huge page for the pool and advance the
 817 * next node from which to allocate, handling wrap at end of node
 818 * mask.
 819 */
 820static int hstate_next_node_to_alloc(struct hstate *h,
 821					nodemask_t *nodes_allowed)
 822{
 823	int nid;
 824
 825	VM_BUG_ON(!nodes_allowed);
 826
 827	nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
 828	h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
 829
 830	return nid;
 831}
 832
 833/*
 834 * helper for free_pool_huge_page() - return the previously saved
 835 * node ["this node"] from which to free a huge page.  Advance the
 836 * next node id whether or not we find a free huge page to free so
 837 * that the next attempt to free addresses the next node.
 838 */
 839static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
 840{
 841	int nid;
 842
 843	VM_BUG_ON(!nodes_allowed);
 844
 845	nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
 846	h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
 847
 848	return nid;
 849}
 850
 851#define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask)		\
 852	for (nr_nodes = nodes_weight(*mask);				\
 853		nr_nodes > 0 &&						\
 854		((node = hstate_next_node_to_alloc(hs, mask)) || 1);	\
 855		nr_nodes--)
 856
 857#define for_each_node_mask_to_free(hs, nr_nodes, node, mask)		\
 858	for (nr_nodes = nodes_weight(*mask);				\
 859		nr_nodes > 0 &&						\
 860		((node = hstate_next_node_to_free(hs, mask)) || 1);	\
 861		nr_nodes--)
 862
 863static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
 864{
 865	struct page *page;
 866	int nr_nodes, node;
 
 867	int ret = 0;
 868
 869	for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
 870		page = alloc_fresh_huge_page_node(h, node);
 
 
 
 871		if (page) {
 872			ret = 1;
 873			break;
 874		}
 875	}
 
 876
 877	if (ret)
 878		count_vm_event(HTLB_BUDDY_PGALLOC);
 879	else
 880		count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
 881
 882	return ret;
 883}
 884
 885/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 886 * Free huge page from pool from next node to free.
 887 * Attempt to keep persistent huge pages more or less
 888 * balanced over allowed nodes.
 889 * Called with hugetlb_lock locked.
 890 */
 891static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
 892							 bool acct_surplus)
 893{
 894	int nr_nodes, node;
 
 895	int ret = 0;
 896
 897	for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
 
 
 
 898		/*
 899		 * If we're returning unused surplus pages, only examine
 900		 * nodes with surplus pages.
 901		 */
 902		if ((!acct_surplus || h->surplus_huge_pages_node[node]) &&
 903		    !list_empty(&h->hugepage_freelists[node])) {
 904			struct page *page =
 905				list_entry(h->hugepage_freelists[node].next,
 906					  struct page, lru);
 907			list_del(&page->lru);
 908			h->free_huge_pages--;
 909			h->free_huge_pages_node[node]--;
 910			if (acct_surplus) {
 911				h->surplus_huge_pages--;
 912				h->surplus_huge_pages_node[node]--;
 913			}
 914			update_and_free_page(h, page);
 915			ret = 1;
 916			break;
 917		}
 918	}
 
 919
 920	return ret;
 921}
 922
 923/*
 924 * Dissolve a given free hugepage into free buddy pages. This function does
 925 * nothing for in-use (including surplus) hugepages.
 926 */
 927static void dissolve_free_huge_page(struct page *page)
 928{
 929	spin_lock(&hugetlb_lock);
 930	if (PageHuge(page) && !page_count(page)) {
 931		struct hstate *h = page_hstate(page);
 932		int nid = page_to_nid(page);
 933		list_del(&page->lru);
 934		h->free_huge_pages--;
 935		h->free_huge_pages_node[nid]--;
 936		update_and_free_page(h, page);
 937	}
 938	spin_unlock(&hugetlb_lock);
 939}
 940
 941/*
 942 * Dissolve free hugepages in a given pfn range. Used by memory hotplug to
 943 * make specified memory blocks removable from the system.
 944 * Note that start_pfn should aligned with (minimum) hugepage size.
 945 */
 946void dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
 947{
 948	unsigned int order = 8 * sizeof(void *);
 949	unsigned long pfn;
 950	struct hstate *h;
 951
 952	/* Set scan step to minimum hugepage size */
 953	for_each_hstate(h)
 954		if (order > huge_page_order(h))
 955			order = huge_page_order(h);
 956	VM_BUG_ON(!IS_ALIGNED(start_pfn, 1 << order));
 957	for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << order)
 958		dissolve_free_huge_page(pfn_to_page(pfn));
 959}
 960
 961static struct page *alloc_buddy_huge_page(struct hstate *h, int nid)
 962{
 963	struct page *page;
 964	unsigned int r_nid;
 965
 966	if (h->order >= MAX_ORDER)
 967		return NULL;
 968
 969	/*
 970	 * Assume we will successfully allocate the surplus page to
 971	 * prevent racing processes from causing the surplus to exceed
 972	 * overcommit
 973	 *
 974	 * This however introduces a different race, where a process B
 975	 * tries to grow the static hugepage pool while alloc_pages() is
 976	 * called by process A. B will only examine the per-node
 977	 * counters in determining if surplus huge pages can be
 978	 * converted to normal huge pages in adjust_pool_surplus(). A
 979	 * won't be able to increment the per-node counter, until the
 980	 * lock is dropped by B, but B doesn't drop hugetlb_lock until
 981	 * no more huge pages can be converted from surplus to normal
 982	 * state (and doesn't try to convert again). Thus, we have a
 983	 * case where a surplus huge page exists, the pool is grown, and
 984	 * the surplus huge page still exists after, even though it
 985	 * should just have been converted to a normal huge page. This
 986	 * does not leak memory, though, as the hugepage will be freed
 987	 * once it is out of use. It also does not allow the counters to
 988	 * go out of whack in adjust_pool_surplus() as we don't modify
 989	 * the node values until we've gotten the hugepage and only the
 990	 * per-node value is checked there.
 991	 */
 992	spin_lock(&hugetlb_lock);
 993	if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
 994		spin_unlock(&hugetlb_lock);
 995		return NULL;
 996	} else {
 997		h->nr_huge_pages++;
 998		h->surplus_huge_pages++;
 999	}
1000	spin_unlock(&hugetlb_lock);
1001
1002	if (nid == NUMA_NO_NODE)
1003		page = alloc_pages(htlb_alloc_mask(h)|__GFP_COMP|
1004				   __GFP_REPEAT|__GFP_NOWARN,
1005				   huge_page_order(h));
1006	else
1007		page = alloc_pages_exact_node(nid,
1008			htlb_alloc_mask(h)|__GFP_COMP|__GFP_THISNODE|
1009			__GFP_REPEAT|__GFP_NOWARN, huge_page_order(h));
1010
1011	if (page && arch_prepare_hugepage(page)) {
1012		__free_pages(page, huge_page_order(h));
1013		page = NULL;
1014	}
1015
1016	spin_lock(&hugetlb_lock);
1017	if (page) {
1018		INIT_LIST_HEAD(&page->lru);
1019		r_nid = page_to_nid(page);
1020		set_compound_page_dtor(page, free_huge_page);
1021		set_hugetlb_cgroup(page, NULL);
1022		/*
1023		 * We incremented the global counters already
1024		 */
1025		h->nr_huge_pages_node[r_nid]++;
1026		h->surplus_huge_pages_node[r_nid]++;
1027		__count_vm_event(HTLB_BUDDY_PGALLOC);
1028	} else {
1029		h->nr_huge_pages--;
1030		h->surplus_huge_pages--;
1031		__count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
1032	}
1033	spin_unlock(&hugetlb_lock);
1034
1035	return page;
1036}
1037
1038/*
1039 * This allocation function is useful in the context where vma is irrelevant.
1040 * E.g. soft-offlining uses this function because it only cares physical
1041 * address of error page.
1042 */
1043struct page *alloc_huge_page_node(struct hstate *h, int nid)
1044{
1045	struct page *page = NULL;
1046
1047	spin_lock(&hugetlb_lock);
1048	if (h->free_huge_pages - h->resv_huge_pages > 0)
1049		page = dequeue_huge_page_node(h, nid);
1050	spin_unlock(&hugetlb_lock);
1051
1052	if (!page)
1053		page = alloc_buddy_huge_page(h, nid);
1054
1055	return page;
1056}
1057
1058/*
1059 * Increase the hugetlb pool such that it can accommodate a reservation
1060 * of size 'delta'.
1061 */
1062static int gather_surplus_pages(struct hstate *h, int delta)
1063{
1064	struct list_head surplus_list;
1065	struct page *page, *tmp;
1066	int ret, i;
1067	int needed, allocated;
1068	bool alloc_ok = true;
1069
1070	needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
1071	if (needed <= 0) {
1072		h->resv_huge_pages += delta;
1073		return 0;
1074	}
1075
1076	allocated = 0;
1077	INIT_LIST_HEAD(&surplus_list);
1078
1079	ret = -ENOMEM;
1080retry:
1081	spin_unlock(&hugetlb_lock);
1082	for (i = 0; i < needed; i++) {
1083		page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
1084		if (!page) {
1085			alloc_ok = false;
1086			break;
1087		}
 
 
 
 
1088		list_add(&page->lru, &surplus_list);
1089	}
1090	allocated += i;
1091
1092	/*
1093	 * After retaking hugetlb_lock, we need to recalculate 'needed'
1094	 * because either resv_huge_pages or free_huge_pages may have changed.
1095	 */
1096	spin_lock(&hugetlb_lock);
1097	needed = (h->resv_huge_pages + delta) -
1098			(h->free_huge_pages + allocated);
1099	if (needed > 0) {
1100		if (alloc_ok)
1101			goto retry;
1102		/*
1103		 * We were not able to allocate enough pages to
1104		 * satisfy the entire reservation so we free what
1105		 * we've allocated so far.
1106		 */
1107		goto free;
1108	}
1109	/*
1110	 * The surplus_list now contains _at_least_ the number of extra pages
1111	 * needed to accommodate the reservation.  Add the appropriate number
1112	 * of pages to the hugetlb pool and free the extras back to the buddy
1113	 * allocator.  Commit the entire reservation here to prevent another
1114	 * process from stealing the pages as they are added to the pool but
1115	 * before they are reserved.
1116	 */
1117	needed += allocated;
1118	h->resv_huge_pages += delta;
1119	ret = 0;
1120
 
1121	/* Free the needed pages to the hugetlb pool */
1122	list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
1123		if ((--needed) < 0)
1124			break;
 
1125		/*
1126		 * This page is now managed by the hugetlb allocator and has
1127		 * no users -- drop the buddy allocator's reference.
1128		 */
1129		put_page_testzero(page);
1130		VM_BUG_ON_PAGE(page_count(page), page);
1131		enqueue_huge_page(h, page);
1132	}
1133free:
1134	spin_unlock(&hugetlb_lock);
1135
1136	/* Free unnecessary surplus pages to the buddy allocator */
1137	list_for_each_entry_safe(page, tmp, &surplus_list, lru)
1138		put_page(page);
 
 
 
 
 
1139	spin_lock(&hugetlb_lock);
1140
1141	return ret;
1142}
1143
1144/*
1145 * When releasing a hugetlb pool reservation, any surplus pages that were
1146 * allocated to satisfy the reservation must be explicitly freed if they were
1147 * never used.
1148 * Called with hugetlb_lock held.
1149 */
1150static void return_unused_surplus_pages(struct hstate *h,
1151					unsigned long unused_resv_pages)
1152{
1153	unsigned long nr_pages;
1154
1155	/* Uncommit the reservation */
1156	h->resv_huge_pages -= unused_resv_pages;
1157
1158	/* Cannot return gigantic pages currently */
1159	if (h->order >= MAX_ORDER)
1160		return;
1161
1162	nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
1163
1164	/*
1165	 * We want to release as many surplus pages as possible, spread
1166	 * evenly across all nodes with memory. Iterate across these nodes
1167	 * until we can no longer free unreserved surplus pages. This occurs
1168	 * when the nodes with surplus pages have no free pages.
1169	 * free_pool_huge_page() will balance the the freed pages across the
1170	 * on-line nodes with memory and will handle the hstate accounting.
1171	 */
1172	while (nr_pages--) {
1173		if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1))
1174			break;
1175		cond_resched_lock(&hugetlb_lock);
1176	}
1177}
1178
1179/*
1180 * Determine if the huge page at addr within the vma has an associated
1181 * reservation.  Where it does not we will need to logically increase
1182 * reservation and actually increase subpool usage before an allocation
1183 * can occur.  Where any new reservation would be required the
1184 * reservation change is prepared, but not committed.  Once the page
1185 * has been allocated from the subpool and instantiated the change should
1186 * be committed via vma_commit_reservation.  No action is required on
1187 * failure.
1188 */
1189static long vma_needs_reservation(struct hstate *h,
1190			struct vm_area_struct *vma, unsigned long addr)
1191{
1192	struct resv_map *resv;
1193	pgoff_t idx;
1194	long chg;
1195
1196	resv = vma_resv_map(vma);
1197	if (!resv)
1198		return 1;
 
1199
1200	idx = vma_hugecache_offset(h, vma, addr);
1201	chg = region_chg(resv, idx, idx + 1);
1202
1203	if (vma->vm_flags & VM_MAYSHARE)
1204		return chg;
1205	else
1206		return chg < 0 ? chg : 0;
 
 
 
 
 
 
1207}
1208static void vma_commit_reservation(struct hstate *h,
1209			struct vm_area_struct *vma, unsigned long addr)
1210{
1211	struct resv_map *resv;
1212	pgoff_t idx;
1213
1214	resv = vma_resv_map(vma);
1215	if (!resv)
1216		return;
 
 
 
 
1217
1218	idx = vma_hugecache_offset(h, vma, addr);
1219	region_add(resv, idx, idx + 1);
 
1220}
1221
1222static struct page *alloc_huge_page(struct vm_area_struct *vma,
1223				    unsigned long addr, int avoid_reserve)
1224{
1225	struct hugepage_subpool *spool = subpool_vma(vma);
1226	struct hstate *h = hstate_vma(vma);
1227	struct page *page;
 
 
1228	long chg;
1229	int ret, idx;
1230	struct hugetlb_cgroup *h_cg;
1231
1232	idx = hstate_index(h);
1233	/*
1234	 * Processes that did not create the mapping will have no
1235	 * reserves and will not have accounted against subpool
1236	 * limit. Check that the subpool limit can be made before
1237	 * satisfying the allocation MAP_NORESERVE mappings may also
1238	 * need pages and subpool limit allocated allocated if no reserve
1239	 * mapping overlaps.
1240	 */
1241	chg = vma_needs_reservation(h, vma, addr);
1242	if (chg < 0)
1243		return ERR_PTR(-ENOMEM);
1244	if (chg || avoid_reserve)
1245		if (hugepage_subpool_get_pages(spool, 1))
1246			return ERR_PTR(-ENOSPC);
1247
1248	ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
1249	if (ret) {
1250		if (chg || avoid_reserve)
1251			hugepage_subpool_put_pages(spool, 1);
1252		return ERR_PTR(-ENOSPC);
1253	}
1254	spin_lock(&hugetlb_lock);
1255	page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, chg);
 
 
1256	if (!page) {
1257		spin_unlock(&hugetlb_lock);
1258		page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
1259		if (!page) {
1260			hugetlb_cgroup_uncharge_cgroup(idx,
1261						       pages_per_huge_page(h),
1262						       h_cg);
1263			if (chg || avoid_reserve)
1264				hugepage_subpool_put_pages(spool, 1);
1265			return ERR_PTR(-ENOSPC);
1266		}
1267		spin_lock(&hugetlb_lock);
1268		list_move(&page->lru, &h->hugepage_activelist);
1269		/* Fall through */
1270	}
1271	hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
1272	spin_unlock(&hugetlb_lock);
1273
1274	set_page_private(page, (unsigned long)spool);
1275
1276	vma_commit_reservation(h, vma, addr);
1277	return page;
1278}
1279
1280/*
1281 * alloc_huge_page()'s wrapper which simply returns the page if allocation
1282 * succeeds, otherwise NULL. This function is called from new_vma_page(),
1283 * where no ERR_VALUE is expected to be returned.
1284 */
1285struct page *alloc_huge_page_noerr(struct vm_area_struct *vma,
1286				unsigned long addr, int avoid_reserve)
1287{
1288	struct page *page = alloc_huge_page(vma, addr, avoid_reserve);
1289	if (IS_ERR(page))
1290		page = NULL;
1291	return page;
1292}
1293
1294int __weak alloc_bootmem_huge_page(struct hstate *h)
1295{
1296	struct huge_bootmem_page *m;
1297	int nr_nodes, node;
1298
1299	for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) {
1300		void *addr;
1301
1302		addr = memblock_virt_alloc_try_nid_nopanic(
1303				huge_page_size(h), huge_page_size(h),
1304				0, BOOTMEM_ALLOC_ACCESSIBLE, node);
 
 
1305		if (addr) {
1306			/*
1307			 * Use the beginning of the huge page to store the
1308			 * huge_bootmem_page struct (until gather_bootmem
1309			 * puts them into the mem_map).
1310			 */
1311			m = addr;
1312			goto found;
1313		}
 
1314	}
1315	return 0;
1316
1317found:
1318	BUG_ON((unsigned long)virt_to_phys(m) & (huge_page_size(h) - 1));
1319	/* Put them into a private list first because mem_map is not up yet */
1320	list_add(&m->list, &huge_boot_pages);
1321	m->hstate = h;
1322	return 1;
1323}
1324
1325static void __init prep_compound_huge_page(struct page *page, int order)
1326{
1327	if (unlikely(order > (MAX_ORDER - 1)))
1328		prep_compound_gigantic_page(page, order);
1329	else
1330		prep_compound_page(page, order);
1331}
1332
1333/* Put bootmem huge pages into the standard lists after mem_map is up */
1334static void __init gather_bootmem_prealloc(void)
1335{
1336	struct huge_bootmem_page *m;
1337
1338	list_for_each_entry(m, &huge_boot_pages, list) {
1339		struct hstate *h = m->hstate;
1340		struct page *page;
1341
1342#ifdef CONFIG_HIGHMEM
1343		page = pfn_to_page(m->phys >> PAGE_SHIFT);
1344		memblock_free_late(__pa(m),
1345				   sizeof(struct huge_bootmem_page));
1346#else
1347		page = virt_to_page(m);
1348#endif
 
1349		WARN_ON(page_count(page) != 1);
1350		prep_compound_huge_page(page, h->order);
1351		WARN_ON(PageReserved(page));
1352		prep_new_huge_page(h, page, page_to_nid(page));
1353		/*
1354		 * If we had gigantic hugepages allocated at boot time, we need
1355		 * to restore the 'stolen' pages to totalram_pages in order to
1356		 * fix confusing memory reports from free(1) and another
1357		 * side-effects, like CommitLimit going negative.
1358		 */
1359		if (h->order > (MAX_ORDER - 1))
1360			adjust_managed_page_count(page, 1 << h->order);
1361	}
1362}
1363
1364static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
1365{
1366	unsigned long i;
1367
1368	for (i = 0; i < h->max_huge_pages; ++i) {
1369		if (h->order >= MAX_ORDER) {
1370			if (!alloc_bootmem_huge_page(h))
1371				break;
1372		} else if (!alloc_fresh_huge_page(h,
1373					 &node_states[N_MEMORY]))
1374			break;
1375	}
1376	h->max_huge_pages = i;
1377}
1378
1379static void __init hugetlb_init_hstates(void)
1380{
1381	struct hstate *h;
1382
1383	for_each_hstate(h) {
1384		/* oversize hugepages were init'ed in early boot */
1385		if (h->order < MAX_ORDER)
1386			hugetlb_hstate_alloc_pages(h);
1387	}
1388}
1389
1390static char * __init memfmt(char *buf, unsigned long n)
1391{
1392	if (n >= (1UL << 30))
1393		sprintf(buf, "%lu GB", n >> 30);
1394	else if (n >= (1UL << 20))
1395		sprintf(buf, "%lu MB", n >> 20);
1396	else
1397		sprintf(buf, "%lu KB", n >> 10);
1398	return buf;
1399}
1400
1401static void __init report_hugepages(void)
1402{
1403	struct hstate *h;
1404
1405	for_each_hstate(h) {
1406		char buf[32];
1407		pr_info("HugeTLB registered %s page size, pre-allocated %ld pages\n",
 
1408			memfmt(buf, huge_page_size(h)),
1409			h->free_huge_pages);
1410	}
1411}
1412
1413#ifdef CONFIG_HIGHMEM
1414static void try_to_free_low(struct hstate *h, unsigned long count,
1415						nodemask_t *nodes_allowed)
1416{
1417	int i;
1418
1419	if (h->order >= MAX_ORDER)
1420		return;
1421
1422	for_each_node_mask(i, *nodes_allowed) {
1423		struct page *page, *next;
1424		struct list_head *freel = &h->hugepage_freelists[i];
1425		list_for_each_entry_safe(page, next, freel, lru) {
1426			if (count >= h->nr_huge_pages)
1427				return;
1428			if (PageHighMem(page))
1429				continue;
1430			list_del(&page->lru);
1431			update_and_free_page(h, page);
1432			h->free_huge_pages--;
1433			h->free_huge_pages_node[page_to_nid(page)]--;
1434		}
1435	}
1436}
1437#else
1438static inline void try_to_free_low(struct hstate *h, unsigned long count,
1439						nodemask_t *nodes_allowed)
1440{
1441}
1442#endif
1443
1444/*
1445 * Increment or decrement surplus_huge_pages.  Keep node-specific counters
1446 * balanced by operating on them in a round-robin fashion.
1447 * Returns 1 if an adjustment was made.
1448 */
1449static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
1450				int delta)
1451{
1452	int nr_nodes, node;
 
1453
1454	VM_BUG_ON(delta != -1 && delta != 1);
1455
1456	if (delta < 0) {
1457		for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
1458			if (h->surplus_huge_pages_node[node])
1459				goto found;
 
 
 
 
 
 
 
 
 
 
 
 
 
1460		}
1461	} else {
1462		for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
1463			if (h->surplus_huge_pages_node[node] <
1464					h->nr_huge_pages_node[node])
1465				goto found;
 
 
 
 
 
1466		}
1467	}
1468	return 0;
1469
1470found:
1471	h->surplus_huge_pages += delta;
1472	h->surplus_huge_pages_node[node] += delta;
1473	return 1;
 
 
 
1474}
1475
1476#define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
1477static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
1478						nodemask_t *nodes_allowed)
1479{
1480	unsigned long min_count, ret;
1481
1482	if (h->order >= MAX_ORDER)
1483		return h->max_huge_pages;
1484
1485	/*
1486	 * Increase the pool size
1487	 * First take pages out of surplus state.  Then make up the
1488	 * remaining difference by allocating fresh huge pages.
1489	 *
1490	 * We might race with alloc_buddy_huge_page() here and be unable
1491	 * to convert a surplus huge page to a normal huge page. That is
1492	 * not critical, though, it just means the overall size of the
1493	 * pool might be one hugepage larger than it needs to be, but
1494	 * within all the constraints specified by the sysctls.
1495	 */
1496	spin_lock(&hugetlb_lock);
1497	while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
1498		if (!adjust_pool_surplus(h, nodes_allowed, -1))
1499			break;
1500	}
1501
1502	while (count > persistent_huge_pages(h)) {
1503		/*
1504		 * If this allocation races such that we no longer need the
1505		 * page, free_huge_page will handle it by freeing the page
1506		 * and reducing the surplus.
1507		 */
1508		spin_unlock(&hugetlb_lock);
1509		ret = alloc_fresh_huge_page(h, nodes_allowed);
1510		spin_lock(&hugetlb_lock);
1511		if (!ret)
1512			goto out;
1513
1514		/* Bail for signals. Probably ctrl-c from user */
1515		if (signal_pending(current))
1516			goto out;
1517	}
1518
1519	/*
1520	 * Decrease the pool size
1521	 * First return free pages to the buddy allocator (being careful
1522	 * to keep enough around to satisfy reservations).  Then place
1523	 * pages into surplus state as needed so the pool will shrink
1524	 * to the desired size as pages become free.
1525	 *
1526	 * By placing pages into the surplus state independent of the
1527	 * overcommit value, we are allowing the surplus pool size to
1528	 * exceed overcommit. There are few sane options here. Since
1529	 * alloc_buddy_huge_page() is checking the global counter,
1530	 * though, we'll note that we're not allowed to exceed surplus
1531	 * and won't grow the pool anywhere else. Not until one of the
1532	 * sysctls are changed, or the surplus pages go out of use.
1533	 */
1534	min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
1535	min_count = max(count, min_count);
1536	try_to_free_low(h, min_count, nodes_allowed);
1537	while (min_count < persistent_huge_pages(h)) {
1538		if (!free_pool_huge_page(h, nodes_allowed, 0))
1539			break;
1540		cond_resched_lock(&hugetlb_lock);
1541	}
1542	while (count < persistent_huge_pages(h)) {
1543		if (!adjust_pool_surplus(h, nodes_allowed, 1))
1544			break;
1545	}
1546out:
1547	ret = persistent_huge_pages(h);
1548	spin_unlock(&hugetlb_lock);
1549	return ret;
1550}
1551
1552#define HSTATE_ATTR_RO(_name) \
1553	static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
1554
1555#define HSTATE_ATTR(_name) \
1556	static struct kobj_attribute _name##_attr = \
1557		__ATTR(_name, 0644, _name##_show, _name##_store)
1558
1559static struct kobject *hugepages_kobj;
1560static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
1561
1562static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
1563
1564static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
1565{
1566	int i;
1567
1568	for (i = 0; i < HUGE_MAX_HSTATE; i++)
1569		if (hstate_kobjs[i] == kobj) {
1570			if (nidp)
1571				*nidp = NUMA_NO_NODE;
1572			return &hstates[i];
1573		}
1574
1575	return kobj_to_node_hstate(kobj, nidp);
1576}
1577
1578static ssize_t nr_hugepages_show_common(struct kobject *kobj,
1579					struct kobj_attribute *attr, char *buf)
1580{
1581	struct hstate *h;
1582	unsigned long nr_huge_pages;
1583	int nid;
1584
1585	h = kobj_to_hstate(kobj, &nid);
1586	if (nid == NUMA_NO_NODE)
1587		nr_huge_pages = h->nr_huge_pages;
1588	else
1589		nr_huge_pages = h->nr_huge_pages_node[nid];
1590
1591	return sprintf(buf, "%lu\n", nr_huge_pages);
1592}
1593
1594static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
1595			struct kobject *kobj, struct kobj_attribute *attr,
1596			const char *buf, size_t len)
1597{
1598	int err;
1599	int nid;
1600	unsigned long count;
1601	struct hstate *h;
1602	NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY);
1603
1604	err = kstrtoul(buf, 10, &count);
1605	if (err)
1606		goto out;
1607
1608	h = kobj_to_hstate(kobj, &nid);
1609	if (h->order >= MAX_ORDER) {
1610		err = -EINVAL;
1611		goto out;
1612	}
1613
1614	if (nid == NUMA_NO_NODE) {
1615		/*
1616		 * global hstate attribute
1617		 */
1618		if (!(obey_mempolicy &&
1619				init_nodemask_of_mempolicy(nodes_allowed))) {
1620			NODEMASK_FREE(nodes_allowed);
1621			nodes_allowed = &node_states[N_MEMORY];
1622		}
1623	} else if (nodes_allowed) {
1624		/*
1625		 * per node hstate attribute: adjust count to global,
1626		 * but restrict alloc/free to the specified node.
1627		 */
1628		count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
1629		init_nodemask_of_node(nodes_allowed, nid);
1630	} else
1631		nodes_allowed = &node_states[N_MEMORY];
1632
1633	h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed);
1634
1635	if (nodes_allowed != &node_states[N_MEMORY])
1636		NODEMASK_FREE(nodes_allowed);
1637
1638	return len;
1639out:
1640	NODEMASK_FREE(nodes_allowed);
1641	return err;
1642}
1643
1644static ssize_t nr_hugepages_show(struct kobject *kobj,
1645				       struct kobj_attribute *attr, char *buf)
1646{
1647	return nr_hugepages_show_common(kobj, attr, buf);
1648}
1649
1650static ssize_t nr_hugepages_store(struct kobject *kobj,
1651	       struct kobj_attribute *attr, const char *buf, size_t len)
1652{
1653	return nr_hugepages_store_common(false, kobj, attr, buf, len);
1654}
1655HSTATE_ATTR(nr_hugepages);
1656
1657#ifdef CONFIG_NUMA
1658
1659/*
1660 * hstate attribute for optionally mempolicy-based constraint on persistent
1661 * huge page alloc/free.
1662 */
1663static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
1664				       struct kobj_attribute *attr, char *buf)
1665{
1666	return nr_hugepages_show_common(kobj, attr, buf);
1667}
1668
1669static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
1670	       struct kobj_attribute *attr, const char *buf, size_t len)
1671{
1672	return nr_hugepages_store_common(true, kobj, attr, buf, len);
1673}
1674HSTATE_ATTR(nr_hugepages_mempolicy);
1675#endif
1676
1677
1678static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
1679					struct kobj_attribute *attr, char *buf)
1680{
1681	struct hstate *h = kobj_to_hstate(kobj, NULL);
1682	return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
1683}
1684
1685static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
1686		struct kobj_attribute *attr, const char *buf, size_t count)
1687{
1688	int err;
1689	unsigned long input;
1690	struct hstate *h = kobj_to_hstate(kobj, NULL);
1691
1692	if (h->order >= MAX_ORDER)
1693		return -EINVAL;
1694
1695	err = kstrtoul(buf, 10, &input);
1696	if (err)
1697		return err;
1698
1699	spin_lock(&hugetlb_lock);
1700	h->nr_overcommit_huge_pages = input;
1701	spin_unlock(&hugetlb_lock);
1702
1703	return count;
1704}
1705HSTATE_ATTR(nr_overcommit_hugepages);
1706
1707static ssize_t free_hugepages_show(struct kobject *kobj,
1708					struct kobj_attribute *attr, char *buf)
1709{
1710	struct hstate *h;
1711	unsigned long free_huge_pages;
1712	int nid;
1713
1714	h = kobj_to_hstate(kobj, &nid);
1715	if (nid == NUMA_NO_NODE)
1716		free_huge_pages = h->free_huge_pages;
1717	else
1718		free_huge_pages = h->free_huge_pages_node[nid];
1719
1720	return sprintf(buf, "%lu\n", free_huge_pages);
1721}
1722HSTATE_ATTR_RO(free_hugepages);
1723
1724static ssize_t resv_hugepages_show(struct kobject *kobj,
1725					struct kobj_attribute *attr, char *buf)
1726{
1727	struct hstate *h = kobj_to_hstate(kobj, NULL);
1728	return sprintf(buf, "%lu\n", h->resv_huge_pages);
1729}
1730HSTATE_ATTR_RO(resv_hugepages);
1731
1732static ssize_t surplus_hugepages_show(struct kobject *kobj,
1733					struct kobj_attribute *attr, char *buf)
1734{
1735	struct hstate *h;
1736	unsigned long surplus_huge_pages;
1737	int nid;
1738
1739	h = kobj_to_hstate(kobj, &nid);
1740	if (nid == NUMA_NO_NODE)
1741		surplus_huge_pages = h->surplus_huge_pages;
1742	else
1743		surplus_huge_pages = h->surplus_huge_pages_node[nid];
1744
1745	return sprintf(buf, "%lu\n", surplus_huge_pages);
1746}
1747HSTATE_ATTR_RO(surplus_hugepages);
1748
1749static struct attribute *hstate_attrs[] = {
1750	&nr_hugepages_attr.attr,
1751	&nr_overcommit_hugepages_attr.attr,
1752	&free_hugepages_attr.attr,
1753	&resv_hugepages_attr.attr,
1754	&surplus_hugepages_attr.attr,
1755#ifdef CONFIG_NUMA
1756	&nr_hugepages_mempolicy_attr.attr,
1757#endif
1758	NULL,
1759};
1760
1761static struct attribute_group hstate_attr_group = {
1762	.attrs = hstate_attrs,
1763};
1764
1765static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
1766				    struct kobject **hstate_kobjs,
1767				    struct attribute_group *hstate_attr_group)
1768{
1769	int retval;
1770	int hi = hstate_index(h);
1771
1772	hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
1773	if (!hstate_kobjs[hi])
1774		return -ENOMEM;
1775
1776	retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
1777	if (retval)
1778		kobject_put(hstate_kobjs[hi]);
1779
1780	return retval;
1781}
1782
1783static void __init hugetlb_sysfs_init(void)
1784{
1785	struct hstate *h;
1786	int err;
1787
1788	hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
1789	if (!hugepages_kobj)
1790		return;
1791
1792	for_each_hstate(h) {
1793		err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
1794					 hstate_kobjs, &hstate_attr_group);
1795		if (err)
1796			pr_err("Hugetlb: Unable to add hstate %s", h->name);
 
1797	}
1798}
1799
1800#ifdef CONFIG_NUMA
1801
1802/*
1803 * node_hstate/s - associate per node hstate attributes, via their kobjects,
1804 * with node devices in node_devices[] using a parallel array.  The array
1805 * index of a node device or _hstate == node id.
1806 * This is here to avoid any static dependency of the node device driver, in
1807 * the base kernel, on the hugetlb module.
1808 */
1809struct node_hstate {
1810	struct kobject		*hugepages_kobj;
1811	struct kobject		*hstate_kobjs[HUGE_MAX_HSTATE];
1812};
1813struct node_hstate node_hstates[MAX_NUMNODES];
1814
1815/*
1816 * A subset of global hstate attributes for node devices
1817 */
1818static struct attribute *per_node_hstate_attrs[] = {
1819	&nr_hugepages_attr.attr,
1820	&free_hugepages_attr.attr,
1821	&surplus_hugepages_attr.attr,
1822	NULL,
1823};
1824
1825static struct attribute_group per_node_hstate_attr_group = {
1826	.attrs = per_node_hstate_attrs,
1827};
1828
1829/*
1830 * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
1831 * Returns node id via non-NULL nidp.
1832 */
1833static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
1834{
1835	int nid;
1836
1837	for (nid = 0; nid < nr_node_ids; nid++) {
1838		struct node_hstate *nhs = &node_hstates[nid];
1839		int i;
1840		for (i = 0; i < HUGE_MAX_HSTATE; i++)
1841			if (nhs->hstate_kobjs[i] == kobj) {
1842				if (nidp)
1843					*nidp = nid;
1844				return &hstates[i];
1845			}
1846	}
1847
1848	BUG();
1849	return NULL;
1850}
1851
1852/*
1853 * Unregister hstate attributes from a single node device.
1854 * No-op if no hstate attributes attached.
1855 */
1856static void hugetlb_unregister_node(struct node *node)
1857{
1858	struct hstate *h;
1859	struct node_hstate *nhs = &node_hstates[node->dev.id];
1860
1861	if (!nhs->hugepages_kobj)
1862		return;		/* no hstate attributes */
1863
1864	for_each_hstate(h) {
1865		int idx = hstate_index(h);
1866		if (nhs->hstate_kobjs[idx]) {
1867			kobject_put(nhs->hstate_kobjs[idx]);
1868			nhs->hstate_kobjs[idx] = NULL;
1869		}
1870	}
1871
1872	kobject_put(nhs->hugepages_kobj);
1873	nhs->hugepages_kobj = NULL;
1874}
1875
1876/*
1877 * hugetlb module exit:  unregister hstate attributes from node devices
1878 * that have them.
1879 */
1880static void hugetlb_unregister_all_nodes(void)
1881{
1882	int nid;
1883
1884	/*
1885	 * disable node device registrations.
1886	 */
1887	register_hugetlbfs_with_node(NULL, NULL);
1888
1889	/*
1890	 * remove hstate attributes from any nodes that have them.
1891	 */
1892	for (nid = 0; nid < nr_node_ids; nid++)
1893		hugetlb_unregister_node(node_devices[nid]);
1894}
1895
1896/*
1897 * Register hstate attributes for a single node device.
1898 * No-op if attributes already registered.
1899 */
1900static void hugetlb_register_node(struct node *node)
1901{
1902	struct hstate *h;
1903	struct node_hstate *nhs = &node_hstates[node->dev.id];
1904	int err;
1905
1906	if (nhs->hugepages_kobj)
1907		return;		/* already allocated */
1908
1909	nhs->hugepages_kobj = kobject_create_and_add("hugepages",
1910							&node->dev.kobj);
1911	if (!nhs->hugepages_kobj)
1912		return;
1913
1914	for_each_hstate(h) {
1915		err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
1916						nhs->hstate_kobjs,
1917						&per_node_hstate_attr_group);
1918		if (err) {
1919			pr_err("Hugetlb: Unable to add hstate %s for node %d\n",
1920				h->name, node->dev.id);
 
1921			hugetlb_unregister_node(node);
1922			break;
1923		}
1924	}
1925}
1926
1927/*
1928 * hugetlb init time:  register hstate attributes for all registered node
1929 * devices of nodes that have memory.  All on-line nodes should have
1930 * registered their associated device by this time.
1931 */
1932static void hugetlb_register_all_nodes(void)
1933{
1934	int nid;
1935
1936	for_each_node_state(nid, N_MEMORY) {
1937		struct node *node = node_devices[nid];
1938		if (node->dev.id == nid)
1939			hugetlb_register_node(node);
1940	}
1941
1942	/*
1943	 * Let the node device driver know we're here so it can
1944	 * [un]register hstate attributes on node hotplug.
1945	 */
1946	register_hugetlbfs_with_node(hugetlb_register_node,
1947				     hugetlb_unregister_node);
1948}
1949#else	/* !CONFIG_NUMA */
1950
1951static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
1952{
1953	BUG();
1954	if (nidp)
1955		*nidp = -1;
1956	return NULL;
1957}
1958
1959static void hugetlb_unregister_all_nodes(void) { }
1960
1961static void hugetlb_register_all_nodes(void) { }
1962
1963#endif
1964
1965static void __exit hugetlb_exit(void)
1966{
1967	struct hstate *h;
1968
1969	hugetlb_unregister_all_nodes();
1970
1971	for_each_hstate(h) {
1972		kobject_put(hstate_kobjs[hstate_index(h)]);
1973	}
1974
1975	kobject_put(hugepages_kobj);
1976	kfree(htlb_fault_mutex_table);
1977}
1978module_exit(hugetlb_exit);
1979
1980static int __init hugetlb_init(void)
1981{
1982	int i;
1983
1984	if (!hugepages_supported())
 
 
1985		return 0;
1986
1987	if (!size_to_hstate(default_hstate_size)) {
1988		default_hstate_size = HPAGE_SIZE;
1989		if (!size_to_hstate(default_hstate_size))
1990			hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
1991	}
1992	default_hstate_idx = hstate_index(size_to_hstate(default_hstate_size));
1993	if (default_hstate_max_huge_pages)
1994		default_hstate.max_huge_pages = default_hstate_max_huge_pages;
1995
1996	hugetlb_init_hstates();
 
1997	gather_bootmem_prealloc();
 
1998	report_hugepages();
1999
2000	hugetlb_sysfs_init();
2001	hugetlb_register_all_nodes();
2002	hugetlb_cgroup_file_init();
2003
2004#ifdef CONFIG_SMP
2005	num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus());
2006#else
2007	num_fault_mutexes = 1;
2008#endif
2009	htlb_fault_mutex_table =
2010		kmalloc(sizeof(struct mutex) * num_fault_mutexes, GFP_KERNEL);
2011	BUG_ON(!htlb_fault_mutex_table);
2012
2013	for (i = 0; i < num_fault_mutexes; i++)
2014		mutex_init(&htlb_fault_mutex_table[i]);
2015	return 0;
2016}
2017module_init(hugetlb_init);
2018
2019/* Should be called on processing a hugepagesz=... option */
2020void __init hugetlb_add_hstate(unsigned order)
2021{
2022	struct hstate *h;
2023	unsigned long i;
2024
2025	if (size_to_hstate(PAGE_SIZE << order)) {
2026		pr_warning("hugepagesz= specified twice, ignoring\n");
2027		return;
2028	}
2029	BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
2030	BUG_ON(order == 0);
2031	h = &hstates[hugetlb_max_hstate++];
2032	h->order = order;
2033	h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
2034	h->nr_huge_pages = 0;
2035	h->free_huge_pages = 0;
2036	for (i = 0; i < MAX_NUMNODES; ++i)
2037		INIT_LIST_HEAD(&h->hugepage_freelists[i]);
2038	INIT_LIST_HEAD(&h->hugepage_activelist);
2039	h->next_nid_to_alloc = first_node(node_states[N_MEMORY]);
2040	h->next_nid_to_free = first_node(node_states[N_MEMORY]);
2041	snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
2042					huge_page_size(h)/1024);
2043
2044	parsed_hstate = h;
2045}
2046
2047static int __init hugetlb_nrpages_setup(char *s)
2048{
2049	unsigned long *mhp;
2050	static unsigned long *last_mhp;
2051
2052	/*
2053	 * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter yet,
2054	 * so this hugepages= parameter goes to the "default hstate".
2055	 */
2056	if (!hugetlb_max_hstate)
2057		mhp = &default_hstate_max_huge_pages;
2058	else
2059		mhp = &parsed_hstate->max_huge_pages;
2060
2061	if (mhp == last_mhp) {
2062		pr_warning("hugepages= specified twice without "
2063			   "interleaving hugepagesz=, ignoring\n");
2064		return 1;
2065	}
2066
2067	if (sscanf(s, "%lu", mhp) <= 0)
2068		*mhp = 0;
2069
2070	/*
2071	 * Global state is always initialized later in hugetlb_init.
2072	 * But we need to allocate >= MAX_ORDER hstates here early to still
2073	 * use the bootmem allocator.
2074	 */
2075	if (hugetlb_max_hstate && parsed_hstate->order >= MAX_ORDER)
2076		hugetlb_hstate_alloc_pages(parsed_hstate);
2077
2078	last_mhp = mhp;
2079
2080	return 1;
2081}
2082__setup("hugepages=", hugetlb_nrpages_setup);
2083
2084static int __init hugetlb_default_setup(char *s)
2085{
2086	default_hstate_size = memparse(s, &s);
2087	return 1;
2088}
2089__setup("default_hugepagesz=", hugetlb_default_setup);
2090
2091static unsigned int cpuset_mems_nr(unsigned int *array)
2092{
2093	int node;
2094	unsigned int nr = 0;
2095
2096	for_each_node_mask(node, cpuset_current_mems_allowed)
2097		nr += array[node];
2098
2099	return nr;
2100}
2101
2102#ifdef CONFIG_SYSCTL
2103static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
2104			 struct ctl_table *table, int write,
2105			 void __user *buffer, size_t *length, loff_t *ppos)
2106{
2107	struct hstate *h = &default_hstate;
2108	unsigned long tmp;
2109	int ret;
2110
2111	if (!hugepages_supported())
2112		return -ENOTSUPP;
2113
2114	tmp = h->max_huge_pages;
2115
2116	if (write && h->order >= MAX_ORDER)
2117		return -EINVAL;
2118
2119	table->data = &tmp;
2120	table->maxlen = sizeof(unsigned long);
2121	ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
2122	if (ret)
2123		goto out;
2124
2125	if (write) {
2126		NODEMASK_ALLOC(nodemask_t, nodes_allowed,
2127						GFP_KERNEL | __GFP_NORETRY);
2128		if (!(obey_mempolicy &&
2129			       init_nodemask_of_mempolicy(nodes_allowed))) {
2130			NODEMASK_FREE(nodes_allowed);
2131			nodes_allowed = &node_states[N_MEMORY];
2132		}
2133		h->max_huge_pages = set_max_huge_pages(h, tmp, nodes_allowed);
2134
2135		if (nodes_allowed != &node_states[N_MEMORY])
2136			NODEMASK_FREE(nodes_allowed);
2137	}
2138out:
2139	return ret;
2140}
2141
2142int hugetlb_sysctl_handler(struct ctl_table *table, int write,
2143			  void __user *buffer, size_t *length, loff_t *ppos)
2144{
2145
2146	return hugetlb_sysctl_handler_common(false, table, write,
2147							buffer, length, ppos);
2148}
2149
2150#ifdef CONFIG_NUMA
2151int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
2152			  void __user *buffer, size_t *length, loff_t *ppos)
2153{
2154	return hugetlb_sysctl_handler_common(true, table, write,
2155							buffer, length, ppos);
2156}
2157#endif /* CONFIG_NUMA */
2158
 
 
 
 
 
 
 
 
 
 
 
 
2159int hugetlb_overcommit_handler(struct ctl_table *table, int write,
2160			void __user *buffer,
2161			size_t *length, loff_t *ppos)
2162{
2163	struct hstate *h = &default_hstate;
2164	unsigned long tmp;
2165	int ret;
2166
2167	if (!hugepages_supported())
2168		return -ENOTSUPP;
2169
2170	tmp = h->nr_overcommit_huge_pages;
2171
2172	if (write && h->order >= MAX_ORDER)
2173		return -EINVAL;
2174
2175	table->data = &tmp;
2176	table->maxlen = sizeof(unsigned long);
2177	ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
2178	if (ret)
2179		goto out;
2180
2181	if (write) {
2182		spin_lock(&hugetlb_lock);
2183		h->nr_overcommit_huge_pages = tmp;
2184		spin_unlock(&hugetlb_lock);
2185	}
2186out:
2187	return ret;
2188}
2189
2190#endif /* CONFIG_SYSCTL */
2191
2192void hugetlb_report_meminfo(struct seq_file *m)
2193{
2194	struct hstate *h = &default_hstate;
2195	if (!hugepages_supported())
2196		return;
2197	seq_printf(m,
2198			"HugePages_Total:   %5lu\n"
2199			"HugePages_Free:    %5lu\n"
2200			"HugePages_Rsvd:    %5lu\n"
2201			"HugePages_Surp:    %5lu\n"
2202			"Hugepagesize:   %8lu kB\n",
2203			h->nr_huge_pages,
2204			h->free_huge_pages,
2205			h->resv_huge_pages,
2206			h->surplus_huge_pages,
2207			1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
2208}
2209
2210int hugetlb_report_node_meminfo(int nid, char *buf)
2211{
2212	struct hstate *h = &default_hstate;
2213	if (!hugepages_supported())
2214		return 0;
2215	return sprintf(buf,
2216		"Node %d HugePages_Total: %5u\n"
2217		"Node %d HugePages_Free:  %5u\n"
2218		"Node %d HugePages_Surp:  %5u\n",
2219		nid, h->nr_huge_pages_node[nid],
2220		nid, h->free_huge_pages_node[nid],
2221		nid, h->surplus_huge_pages_node[nid]);
2222}
2223
2224void hugetlb_show_meminfo(void)
2225{
2226	struct hstate *h;
2227	int nid;
2228
2229	if (!hugepages_supported())
2230		return;
2231
2232	for_each_node_state(nid, N_MEMORY)
2233		for_each_hstate(h)
2234			pr_info("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
2235				nid,
2236				h->nr_huge_pages_node[nid],
2237				h->free_huge_pages_node[nid],
2238				h->surplus_huge_pages_node[nid],
2239				1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
2240}
2241
2242/* Return the number pages of memory we physically have, in PAGE_SIZE units. */
2243unsigned long hugetlb_total_pages(void)
2244{
2245	struct hstate *h;
2246	unsigned long nr_total_pages = 0;
2247
2248	for_each_hstate(h)
2249		nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
2250	return nr_total_pages;
2251}
2252
2253static int hugetlb_acct_memory(struct hstate *h, long delta)
2254{
2255	int ret = -ENOMEM;
2256
2257	spin_lock(&hugetlb_lock);
2258	/*
2259	 * When cpuset is configured, it breaks the strict hugetlb page
2260	 * reservation as the accounting is done on a global variable. Such
2261	 * reservation is completely rubbish in the presence of cpuset because
2262	 * the reservation is not checked against page availability for the
2263	 * current cpuset. Application can still potentially OOM'ed by kernel
2264	 * with lack of free htlb page in cpuset that the task is in.
2265	 * Attempt to enforce strict accounting with cpuset is almost
2266	 * impossible (or too ugly) because cpuset is too fluid that
2267	 * task or memory node can be dynamically moved between cpusets.
2268	 *
2269	 * The change of semantics for shared hugetlb mapping with cpuset is
2270	 * undesirable. However, in order to preserve some of the semantics,
2271	 * we fall back to check against current free page availability as
2272	 * a best attempt and hopefully to minimize the impact of changing
2273	 * semantics that cpuset has.
2274	 */
2275	if (delta > 0) {
2276		if (gather_surplus_pages(h, delta) < 0)
2277			goto out;
2278
2279		if (delta > cpuset_mems_nr(h->free_huge_pages_node)) {
2280			return_unused_surplus_pages(h, delta);
2281			goto out;
2282		}
2283	}
2284
2285	ret = 0;
2286	if (delta < 0)
2287		return_unused_surplus_pages(h, (unsigned long) -delta);
2288
2289out:
2290	spin_unlock(&hugetlb_lock);
2291	return ret;
2292}
2293
2294static void hugetlb_vm_op_open(struct vm_area_struct *vma)
2295{
2296	struct resv_map *resv = vma_resv_map(vma);
2297
2298	/*
2299	 * This new VMA should share its siblings reservation map if present.
2300	 * The VMA will only ever have a valid reservation map pointer where
2301	 * it is being copied for another still existing VMA.  As that VMA
2302	 * has a reference to the reservation map it cannot disappear until
2303	 * after this open call completes.  It is therefore safe to take a
2304	 * new reference here without additional locking.
2305	 */
2306	if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
2307		kref_get(&resv->refs);
2308}
2309
2310static void hugetlb_vm_op_close(struct vm_area_struct *vma)
2311{
2312	struct hstate *h = hstate_vma(vma);
2313	struct resv_map *resv = vma_resv_map(vma);
2314	struct hugepage_subpool *spool = subpool_vma(vma);
2315	unsigned long reserve, start, end;
2316
2317	if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER))
2318		return;
2319
2320	start = vma_hugecache_offset(h, vma, vma->vm_start);
2321	end = vma_hugecache_offset(h, vma, vma->vm_end);
2322
2323	reserve = (end - start) - region_count(resv, start, end);
2324
2325	kref_put(&resv->refs, resv_map_release);
2326
2327	if (reserve) {
2328		hugetlb_acct_memory(h, -reserve);
2329		hugepage_subpool_put_pages(spool, reserve);
 
2330	}
2331}
2332
2333/*
2334 * We cannot handle pagefaults against hugetlb pages at all.  They cause
2335 * handle_mm_fault() to try to instantiate regular-sized pages in the
2336 * hugegpage VMA.  do_page_fault() is supposed to trap this, so BUG is we get
2337 * this far.
2338 */
2339static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2340{
2341	BUG();
2342	return 0;
2343}
2344
2345const struct vm_operations_struct hugetlb_vm_ops = {
2346	.fault = hugetlb_vm_op_fault,
2347	.open = hugetlb_vm_op_open,
2348	.close = hugetlb_vm_op_close,
2349};
2350
2351static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
2352				int writable)
2353{
2354	pte_t entry;
2355
2356	if (writable) {
2357		entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page,
2358					 vma->vm_page_prot)));
2359	} else {
2360		entry = huge_pte_wrprotect(mk_huge_pte(page,
2361					   vma->vm_page_prot));
2362	}
2363	entry = pte_mkyoung(entry);
2364	entry = pte_mkhuge(entry);
2365	entry = arch_make_huge_pte(entry, vma, page, writable);
2366
2367	return entry;
2368}
2369
2370static void set_huge_ptep_writable(struct vm_area_struct *vma,
2371				   unsigned long address, pte_t *ptep)
2372{
2373	pte_t entry;
2374
2375	entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep)));
2376	if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
2377		update_mmu_cache(vma, address, ptep);
2378}
2379
2380
2381int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
2382			    struct vm_area_struct *vma)
2383{
2384	pte_t *src_pte, *dst_pte, entry;
2385	struct page *ptepage;
2386	unsigned long addr;
2387	int cow;
2388	struct hstate *h = hstate_vma(vma);
2389	unsigned long sz = huge_page_size(h);
2390	unsigned long mmun_start;	/* For mmu_notifiers */
2391	unsigned long mmun_end;		/* For mmu_notifiers */
2392	int ret = 0;
2393
2394	cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
2395
2396	mmun_start = vma->vm_start;
2397	mmun_end = vma->vm_end;
2398	if (cow)
2399		mmu_notifier_invalidate_range_start(src, mmun_start, mmun_end);
2400
2401	for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
2402		spinlock_t *src_ptl, *dst_ptl;
2403		src_pte = huge_pte_offset(src, addr);
2404		if (!src_pte)
2405			continue;
2406		dst_pte = huge_pte_alloc(dst, addr, sz);
2407		if (!dst_pte) {
2408			ret = -ENOMEM;
2409			break;
2410		}
2411
2412		/* If the pagetables are shared don't copy or take references */
2413		if (dst_pte == src_pte)
2414			continue;
2415
2416		dst_ptl = huge_pte_lock(h, dst, dst_pte);
2417		src_ptl = huge_pte_lockptr(h, src, src_pte);
2418		spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
2419		if (!huge_pte_none(huge_ptep_get(src_pte))) {
2420			if (cow)
2421				huge_ptep_set_wrprotect(src, addr, src_pte);
2422			entry = huge_ptep_get(src_pte);
2423			ptepage = pte_page(entry);
2424			get_page(ptepage);
2425			page_dup_rmap(ptepage);
2426			set_huge_pte_at(dst, addr, dst_pte, entry);
2427		}
2428		spin_unlock(src_ptl);
2429		spin_unlock(dst_ptl);
2430	}
 
2431
2432	if (cow)
2433		mmu_notifier_invalidate_range_end(src, mmun_start, mmun_end);
2434
2435	return ret;
2436}
2437
2438static int is_hugetlb_entry_migration(pte_t pte)
2439{
2440	swp_entry_t swp;
2441
2442	if (huge_pte_none(pte) || pte_present(pte))
2443		return 0;
2444	swp = pte_to_swp_entry(pte);
2445	if (non_swap_entry(swp) && is_migration_entry(swp))
2446		return 1;
2447	else
2448		return 0;
2449}
2450
2451static int is_hugetlb_entry_hwpoisoned(pte_t pte)
2452{
2453	swp_entry_t swp;
2454
2455	if (huge_pte_none(pte) || pte_present(pte))
2456		return 0;
2457	swp = pte_to_swp_entry(pte);
2458	if (non_swap_entry(swp) && is_hwpoison_entry(swp))
2459		return 1;
2460	else
2461		return 0;
2462}
2463
2464void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
2465			    unsigned long start, unsigned long end,
2466			    struct page *ref_page)
2467{
2468	int force_flush = 0;
2469	struct mm_struct *mm = vma->vm_mm;
2470	unsigned long address;
2471	pte_t *ptep;
2472	pte_t pte;
2473	spinlock_t *ptl;
2474	struct page *page;
 
2475	struct hstate *h = hstate_vma(vma);
2476	unsigned long sz = huge_page_size(h);
2477	const unsigned long mmun_start = start;	/* For mmu_notifiers */
2478	const unsigned long mmun_end   = end;	/* For mmu_notifiers */
 
 
 
 
 
2479
2480	WARN_ON(!is_vm_hugetlb_page(vma));
2481	BUG_ON(start & ~huge_page_mask(h));
2482	BUG_ON(end & ~huge_page_mask(h));
2483
2484	tlb_start_vma(tlb, vma);
2485	mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
2486again:
2487	for (address = start; address < end; address += sz) {
2488		ptep = huge_pte_offset(mm, address);
2489		if (!ptep)
2490			continue;
2491
2492		ptl = huge_pte_lock(h, mm, ptep);
2493		if (huge_pmd_unshare(mm, &address, ptep))
2494			goto unlock;
2495
2496		pte = huge_ptep_get(ptep);
2497		if (huge_pte_none(pte))
2498			goto unlock;
2499
2500		/*
2501		 * HWPoisoned hugepage is already unmapped and dropped reference
2502		 */
2503		if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
2504			huge_pte_clear(mm, address, ptep);
2505			goto unlock;
2506		}
2507
2508		page = pte_page(pte);
2509		/*
2510		 * If a reference page is supplied, it is because a specific
2511		 * page is being unmapped, not a range. Ensure the page we
2512		 * are about to unmap is the actual page of interest.
2513		 */
2514		if (ref_page) {
 
 
 
 
2515			if (page != ref_page)
2516				goto unlock;
2517
2518			/*
2519			 * Mark the VMA as having unmapped its page so that
2520			 * future faults in this VMA will fail rather than
2521			 * looking like data was lost
2522			 */
2523			set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
2524		}
2525
2526		pte = huge_ptep_get_and_clear(mm, address, ptep);
2527		tlb_remove_tlb_entry(tlb, ptep, address);
2528		if (huge_pte_dirty(pte))
2529			set_page_dirty(page);
2530
2531		page_remove_rmap(page);
2532		force_flush = !__tlb_remove_page(tlb, page);
2533		if (force_flush) {
2534			spin_unlock(ptl);
2535			break;
2536		}
2537		/* Bail out after unmapping reference page if supplied */
2538		if (ref_page) {
2539			spin_unlock(ptl);
2540			break;
2541		}
2542unlock:
2543		spin_unlock(ptl);
2544	}
2545	/*
2546	 * mmu_gather ran out of room to batch pages, we break out of
2547	 * the PTE lock to avoid doing the potential expensive TLB invalidate
2548	 * and page-free while holding it.
2549	 */
2550	if (force_flush) {
2551		force_flush = 0;
2552		tlb_flush_mmu(tlb);
2553		if (address < end && !ref_page)
2554			goto again;
2555	}
2556	mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
2557	tlb_end_vma(tlb, vma);
2558}
2559
2560void __unmap_hugepage_range_final(struct mmu_gather *tlb,
2561			  struct vm_area_struct *vma, unsigned long start,
2562			  unsigned long end, struct page *ref_page)
2563{
2564	__unmap_hugepage_range(tlb, vma, start, end, ref_page);
2565
2566	/*
2567	 * Clear this flag so that x86's huge_pmd_share page_table_shareable
2568	 * test will fail on a vma being torn down, and not grab a page table
2569	 * on its way out.  We're lucky that the flag has such an appropriate
2570	 * name, and can in fact be safely cleared here. We could clear it
2571	 * before the __unmap_hugepage_range above, but all that's necessary
2572	 * is to clear it before releasing the i_mmap_mutex. This works
2573	 * because in the context this is called, the VMA is about to be
2574	 * destroyed and the i_mmap_mutex is held.
2575	 */
2576	vma->vm_flags &= ~VM_MAYSHARE;
2577}
2578
2579void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
2580			  unsigned long end, struct page *ref_page)
2581{
2582	struct mm_struct *mm;
2583	struct mmu_gather tlb;
2584
2585	mm = vma->vm_mm;
2586
2587	tlb_gather_mmu(&tlb, mm, start, end);
2588	__unmap_hugepage_range(&tlb, vma, start, end, ref_page);
2589	tlb_finish_mmu(&tlb, start, end);
2590}
2591
2592/*
2593 * This is called when the original mapper is failing to COW a MAP_PRIVATE
2594 * mappping it owns the reserve page for. The intention is to unmap the page
2595 * from other VMAs and let the children be SIGKILLed if they are faulting the
2596 * same region.
2597 */
2598static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
2599				struct page *page, unsigned long address)
2600{
2601	struct hstate *h = hstate_vma(vma);
2602	struct vm_area_struct *iter_vma;
2603	struct address_space *mapping;
 
2604	pgoff_t pgoff;
2605
2606	/*
2607	 * vm_pgoff is in PAGE_SIZE units, hence the different calculation
2608	 * from page cache lookup which is in HPAGE_SIZE units.
2609	 */
2610	address = address & huge_page_mask(h);
2611	pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
2612			vma->vm_pgoff;
2613	mapping = file_inode(vma->vm_file)->i_mapping;
2614
2615	/*
2616	 * Take the mapping lock for the duration of the table walk. As
2617	 * this mapping should be shared between all the VMAs,
2618	 * __unmap_hugepage_range() is called as the lock is already held
2619	 */
2620	mutex_lock(&mapping->i_mmap_mutex);
2621	vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
2622		/* Do not unmap the current VMA */
2623		if (iter_vma == vma)
2624			continue;
2625
2626		/*
2627		 * Unmap the page from other VMAs without their own reserves.
2628		 * They get marked to be SIGKILLed if they fault in these
2629		 * areas. This is because a future no-page fault on this VMA
2630		 * could insert a zeroed page instead of the data existing
2631		 * from the time of fork. This would look like data corruption
2632		 */
2633		if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
2634			unmap_hugepage_range(iter_vma, address,
2635					     address + huge_page_size(h), page);
 
2636	}
2637	mutex_unlock(&mapping->i_mmap_mutex);
2638
2639	return 1;
2640}
2641
2642/*
2643 * Hugetlb_cow() should be called with page lock of the original hugepage held.
2644 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
2645 * cannot race with other handlers or page migration.
2646 * Keep the pte_same checks anyway to make transition from the mutex easier.
2647 */
2648static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
2649			unsigned long address, pte_t *ptep, pte_t pte,
2650			struct page *pagecache_page, spinlock_t *ptl)
2651{
2652	struct hstate *h = hstate_vma(vma);
2653	struct page *old_page, *new_page;
 
2654	int outside_reserve = 0;
2655	unsigned long mmun_start;	/* For mmu_notifiers */
2656	unsigned long mmun_end;		/* For mmu_notifiers */
2657
2658	old_page = pte_page(pte);
2659
2660retry_avoidcopy:
2661	/* If no-one else is actually using this page, avoid the copy
2662	 * and just make the page writable */
2663	if (page_mapcount(old_page) == 1 && PageAnon(old_page)) {
2664		page_move_anon_rmap(old_page, vma, address);
 
 
2665		set_huge_ptep_writable(vma, address, ptep);
2666		return 0;
2667	}
2668
2669	/*
2670	 * If the process that created a MAP_PRIVATE mapping is about to
2671	 * perform a COW due to a shared page count, attempt to satisfy
2672	 * the allocation without using the existing reserves. The pagecache
2673	 * page is used to determine if the reserve at this address was
2674	 * consumed or not. If reserves were used, a partial faulted mapping
2675	 * at the time of fork() could consume its reserves on COW instead
2676	 * of the full address range.
2677	 */
2678	if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
 
2679			old_page != pagecache_page)
2680		outside_reserve = 1;
2681
2682	page_cache_get(old_page);
2683
2684	/* Drop page table lock as buddy allocator may be called */
2685	spin_unlock(ptl);
2686	new_page = alloc_huge_page(vma, address, outside_reserve);
2687
2688	if (IS_ERR(new_page)) {
2689		long err = PTR_ERR(new_page);
2690		page_cache_release(old_page);
2691
2692		/*
2693		 * If a process owning a MAP_PRIVATE mapping fails to COW,
2694		 * it is due to references held by a child and an insufficient
2695		 * huge page pool. To guarantee the original mappers
2696		 * reliability, unmap the page from child processes. The child
2697		 * may get SIGKILLed if it later faults.
2698		 */
2699		if (outside_reserve) {
2700			BUG_ON(huge_pte_none(pte));
2701			if (unmap_ref_private(mm, vma, old_page, address)) {
 
2702				BUG_ON(huge_pte_none(pte));
2703				spin_lock(ptl);
2704				ptep = huge_pte_offset(mm, address & huge_page_mask(h));
2705				if (likely(ptep &&
2706					   pte_same(huge_ptep_get(ptep), pte)))
2707					goto retry_avoidcopy;
2708				/*
2709				 * race occurs while re-acquiring page table
2710				 * lock, and our job is done.
2711				 */
2712				return 0;
2713			}
2714			WARN_ON_ONCE(1);
2715		}
2716
2717		/* Caller expects lock to be held */
2718		spin_lock(ptl);
2719		if (err == -ENOMEM)
2720			return VM_FAULT_OOM;
2721		else
2722			return VM_FAULT_SIGBUS;
2723	}
2724
2725	/*
2726	 * When the original hugepage is shared one, it does not have
2727	 * anon_vma prepared.
2728	 */
2729	if (unlikely(anon_vma_prepare(vma))) {
2730		page_cache_release(new_page);
2731		page_cache_release(old_page);
2732		/* Caller expects lock to be held */
2733		spin_lock(ptl);
2734		return VM_FAULT_OOM;
2735	}
2736
2737	copy_user_huge_page(new_page, old_page, address, vma,
2738			    pages_per_huge_page(h));
2739	__SetPageUptodate(new_page);
2740
2741	mmun_start = address & huge_page_mask(h);
2742	mmun_end = mmun_start + huge_page_size(h);
2743	mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
2744	/*
2745	 * Retake the page table lock to check for racing updates
2746	 * before the page tables are altered
2747	 */
2748	spin_lock(ptl);
2749	ptep = huge_pte_offset(mm, address & huge_page_mask(h));
2750	if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
2751		ClearPagePrivate(new_page);
2752
2753		/* Break COW */
 
 
 
2754		huge_ptep_clear_flush(vma, address, ptep);
2755		set_huge_pte_at(mm, address, ptep,
2756				make_huge_pte(vma, new_page, 1));
2757		page_remove_rmap(old_page);
2758		hugepage_add_new_anon_rmap(new_page, vma, address);
2759		/* Make the old page be freed below */
2760		new_page = old_page;
 
 
 
2761	}
2762	spin_unlock(ptl);
2763	mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
2764	page_cache_release(new_page);
2765	page_cache_release(old_page);
2766
2767	/* Caller expects lock to be held */
2768	spin_lock(ptl);
2769	return 0;
2770}
2771
2772/* Return the pagecache page at a given address within a VMA */
2773static struct page *hugetlbfs_pagecache_page(struct hstate *h,
2774			struct vm_area_struct *vma, unsigned long address)
2775{
2776	struct address_space *mapping;
2777	pgoff_t idx;
2778
2779	mapping = vma->vm_file->f_mapping;
2780	idx = vma_hugecache_offset(h, vma, address);
2781
2782	return find_lock_page(mapping, idx);
2783}
2784
2785/*
2786 * Return whether there is a pagecache page to back given address within VMA.
2787 * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
2788 */
2789static bool hugetlbfs_pagecache_present(struct hstate *h,
2790			struct vm_area_struct *vma, unsigned long address)
2791{
2792	struct address_space *mapping;
2793	pgoff_t idx;
2794	struct page *page;
2795
2796	mapping = vma->vm_file->f_mapping;
2797	idx = vma_hugecache_offset(h, vma, address);
2798
2799	page = find_get_page(mapping, idx);
2800	if (page)
2801		put_page(page);
2802	return page != NULL;
2803}
2804
2805static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
2806			   struct address_space *mapping, pgoff_t idx,
2807			   unsigned long address, pte_t *ptep, unsigned int flags)
2808{
2809	struct hstate *h = hstate_vma(vma);
2810	int ret = VM_FAULT_SIGBUS;
2811	int anon_rmap = 0;
2812	unsigned long size;
2813	struct page *page;
 
2814	pte_t new_pte;
2815	spinlock_t *ptl;
2816
2817	/*
2818	 * Currently, we are forced to kill the process in the event the
2819	 * original mapper has unmapped pages from the child due to a failed
2820	 * COW. Warn that such a situation has occurred as it may not be obvious
2821	 */
2822	if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
2823		pr_warning("PID %d killed due to inadequate hugepage pool\n",
2824			   current->pid);
 
2825		return ret;
2826	}
2827
 
 
 
2828	/*
2829	 * Use page lock to guard against racing truncation
2830	 * before we get page_table_lock.
2831	 */
2832retry:
2833	page = find_lock_page(mapping, idx);
2834	if (!page) {
2835		size = i_size_read(mapping->host) >> huge_page_shift(h);
2836		if (idx >= size)
2837			goto out;
2838		page = alloc_huge_page(vma, address, 0);
2839		if (IS_ERR(page)) {
2840			ret = PTR_ERR(page);
2841			if (ret == -ENOMEM)
2842				ret = VM_FAULT_OOM;
2843			else
2844				ret = VM_FAULT_SIGBUS;
2845			goto out;
2846		}
2847		clear_huge_page(page, address, pages_per_huge_page(h));
2848		__SetPageUptodate(page);
2849
2850		if (vma->vm_flags & VM_MAYSHARE) {
2851			int err;
2852			struct inode *inode = mapping->host;
2853
2854			err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
2855			if (err) {
2856				put_page(page);
2857				if (err == -EEXIST)
2858					goto retry;
2859				goto out;
2860			}
2861			ClearPagePrivate(page);
2862
2863			spin_lock(&inode->i_lock);
2864			inode->i_blocks += blocks_per_huge_page(h);
2865			spin_unlock(&inode->i_lock);
 
2866		} else {
2867			lock_page(page);
2868			if (unlikely(anon_vma_prepare(vma))) {
2869				ret = VM_FAULT_OOM;
2870				goto backout_unlocked;
2871			}
2872			anon_rmap = 1;
2873		}
2874	} else {
2875		/*
2876		 * If memory error occurs between mmap() and fault, some process
2877		 * don't have hwpoisoned swap entry for errored virtual address.
2878		 * So we need to block hugepage fault by PG_hwpoison bit check.
2879		 */
2880		if (unlikely(PageHWPoison(page))) {
2881			ret = VM_FAULT_HWPOISON |
2882				VM_FAULT_SET_HINDEX(hstate_index(h));
2883			goto backout_unlocked;
2884		}
 
2885	}
2886
2887	/*
2888	 * If we are going to COW a private mapping later, we examine the
2889	 * pending reservations for this page now. This will ensure that
2890	 * any allocations necessary to record that reservation occur outside
2891	 * the spinlock.
2892	 */
2893	if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED))
2894		if (vma_needs_reservation(h, vma, address) < 0) {
2895			ret = VM_FAULT_OOM;
2896			goto backout_unlocked;
2897		}
2898
2899	ptl = huge_pte_lockptr(h, mm, ptep);
2900	spin_lock(ptl);
2901	size = i_size_read(mapping->host) >> huge_page_shift(h);
2902	if (idx >= size)
2903		goto backout;
2904
2905	ret = 0;
2906	if (!huge_pte_none(huge_ptep_get(ptep)))
2907		goto backout;
2908
2909	if (anon_rmap) {
2910		ClearPagePrivate(page);
2911		hugepage_add_new_anon_rmap(page, vma, address);
2912	} else
2913		page_dup_rmap(page);
2914	new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
2915				&& (vma->vm_flags & VM_SHARED)));
2916	set_huge_pte_at(mm, address, ptep, new_pte);
2917
2918	if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
2919		/* Optimization, do the COW without a second fault */
2920		ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page, ptl);
2921	}
2922
2923	spin_unlock(ptl);
2924	unlock_page(page);
2925out:
2926	return ret;
2927
2928backout:
2929	spin_unlock(ptl);
2930backout_unlocked:
2931	unlock_page(page);
2932	put_page(page);
2933	goto out;
2934}
2935
2936#ifdef CONFIG_SMP
2937static u32 fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
2938			    struct vm_area_struct *vma,
2939			    struct address_space *mapping,
2940			    pgoff_t idx, unsigned long address)
2941{
2942	unsigned long key[2];
2943	u32 hash;
2944
2945	if (vma->vm_flags & VM_SHARED) {
2946		key[0] = (unsigned long) mapping;
2947		key[1] = idx;
2948	} else {
2949		key[0] = (unsigned long) mm;
2950		key[1] = address >> huge_page_shift(h);
2951	}
2952
2953	hash = jhash2((u32 *)&key, sizeof(key)/sizeof(u32), 0);
2954
2955	return hash & (num_fault_mutexes - 1);
2956}
2957#else
2958/*
2959 * For uniprocesor systems we always use a single mutex, so just
2960 * return 0 and avoid the hashing overhead.
2961 */
2962static u32 fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
2963			    struct vm_area_struct *vma,
2964			    struct address_space *mapping,
2965			    pgoff_t idx, unsigned long address)
2966{
2967	return 0;
2968}
2969#endif
2970
2971int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2972			unsigned long address, unsigned int flags)
2973{
2974	pte_t *ptep, entry;
2975	spinlock_t *ptl;
2976	int ret;
2977	u32 hash;
2978	pgoff_t idx;
2979	struct page *page = NULL;
2980	struct page *pagecache_page = NULL;
 
2981	struct hstate *h = hstate_vma(vma);
2982	struct address_space *mapping;
2983
2984	address &= huge_page_mask(h);
2985
2986	ptep = huge_pte_offset(mm, address);
2987	if (ptep) {
2988		entry = huge_ptep_get(ptep);
2989		if (unlikely(is_hugetlb_entry_migration(entry))) {
2990			migration_entry_wait_huge(vma, mm, ptep);
2991			return 0;
2992		} else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
2993			return VM_FAULT_HWPOISON_LARGE |
2994				VM_FAULT_SET_HINDEX(hstate_index(h));
2995	}
2996
2997	ptep = huge_pte_alloc(mm, address, huge_page_size(h));
2998	if (!ptep)
2999		return VM_FAULT_OOM;
3000
3001	mapping = vma->vm_file->f_mapping;
3002	idx = vma_hugecache_offset(h, vma, address);
3003
3004	/*
3005	 * Serialize hugepage allocation and instantiation, so that we don't
3006	 * get spurious allocation failures if two CPUs race to instantiate
3007	 * the same page in the page cache.
3008	 */
3009	hash = fault_mutex_hash(h, mm, vma, mapping, idx, address);
3010	mutex_lock(&htlb_fault_mutex_table[hash]);
3011
3012	entry = huge_ptep_get(ptep);
3013	if (huge_pte_none(entry)) {
3014		ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags);
3015		goto out_mutex;
3016	}
3017
3018	ret = 0;
3019
3020	/*
3021	 * If we are going to COW the mapping later, we examine the pending
3022	 * reservations for this page now. This will ensure that any
3023	 * allocations necessary to record that reservation occur outside the
3024	 * spinlock. For private mappings, we also lookup the pagecache
3025	 * page now as it is used to determine if a reservation has been
3026	 * consumed.
3027	 */
3028	if ((flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) {
3029		if (vma_needs_reservation(h, vma, address) < 0) {
3030			ret = VM_FAULT_OOM;
3031			goto out_mutex;
3032		}
3033
3034		if (!(vma->vm_flags & VM_MAYSHARE))
3035			pagecache_page = hugetlbfs_pagecache_page(h,
3036								vma, address);
3037	}
3038
3039	/*
3040	 * hugetlb_cow() requires page locks of pte_page(entry) and
3041	 * pagecache_page, so here we need take the former one
3042	 * when page != pagecache_page or !pagecache_page.
3043	 * Note that locking order is always pagecache_page -> page,
3044	 * so no worry about deadlock.
3045	 */
3046	page = pte_page(entry);
3047	get_page(page);
3048	if (page != pagecache_page)
3049		lock_page(page);
3050
3051	ptl = huge_pte_lockptr(h, mm, ptep);
3052	spin_lock(ptl);
3053	/* Check for a racing update before calling hugetlb_cow */
3054	if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
3055		goto out_ptl;
3056
3057
3058	if (flags & FAULT_FLAG_WRITE) {
3059		if (!huge_pte_write(entry)) {
3060			ret = hugetlb_cow(mm, vma, address, ptep, entry,
3061					pagecache_page, ptl);
3062			goto out_ptl;
3063		}
3064		entry = huge_pte_mkdirty(entry);
3065	}
3066	entry = pte_mkyoung(entry);
3067	if (huge_ptep_set_access_flags(vma, address, ptep, entry,
3068						flags & FAULT_FLAG_WRITE))
3069		update_mmu_cache(vma, address, ptep);
3070
3071out_ptl:
3072	spin_unlock(ptl);
3073
3074	if (pagecache_page) {
3075		unlock_page(pagecache_page);
3076		put_page(pagecache_page);
3077	}
3078	if (page != pagecache_page)
3079		unlock_page(page);
3080	put_page(page);
3081
3082out_mutex:
3083	mutex_unlock(&htlb_fault_mutex_table[hash]);
 
3084	return ret;
3085}
3086
3087long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
3088			 struct page **pages, struct vm_area_struct **vmas,
3089			 unsigned long *position, unsigned long *nr_pages,
3090			 long i, unsigned int flags)
 
 
 
 
 
 
 
 
 
3091{
3092	unsigned long pfn_offset;
3093	unsigned long vaddr = *position;
3094	unsigned long remainder = *nr_pages;
3095	struct hstate *h = hstate_vma(vma);
3096
 
3097	while (vaddr < vma->vm_end && remainder) {
3098		pte_t *pte;
3099		spinlock_t *ptl = NULL;
3100		int absent;
3101		struct page *page;
3102
3103		/*
3104		 * Some archs (sparc64, sh*) have multiple pte_ts to
3105		 * each hugepage.  We have to make sure we get the
3106		 * first, for the page indexing below to work.
3107		 *
3108		 * Note that page table lock is not held when pte is null.
3109		 */
3110		pte = huge_pte_offset(mm, vaddr & huge_page_mask(h));
3111		if (pte)
3112			ptl = huge_pte_lock(h, mm, pte);
3113		absent = !pte || huge_pte_none(huge_ptep_get(pte));
3114
3115		/*
3116		 * When coredumping, it suits get_dump_page if we just return
3117		 * an error where there's an empty slot with no huge pagecache
3118		 * to back it.  This way, we avoid allocating a hugepage, and
3119		 * the sparse dumpfile avoids allocating disk blocks, but its
3120		 * huge holes still show up with zeroes where they need to be.
3121		 */
3122		if (absent && (flags & FOLL_DUMP) &&
3123		    !hugetlbfs_pagecache_present(h, vma, vaddr)) {
3124			if (pte)
3125				spin_unlock(ptl);
3126			remainder = 0;
3127			break;
3128		}
3129
3130		/*
3131		 * We need call hugetlb_fault for both hugepages under migration
3132		 * (in which case hugetlb_fault waits for the migration,) and
3133		 * hwpoisoned hugepages (in which case we need to prevent the
3134		 * caller from accessing to them.) In order to do this, we use
3135		 * here is_swap_pte instead of is_hugetlb_entry_migration and
3136		 * is_hugetlb_entry_hwpoisoned. This is because it simply covers
3137		 * both cases, and because we can't follow correct pages
3138		 * directly from any kind of swap entries.
3139		 */
3140		if (absent || is_swap_pte(huge_ptep_get(pte)) ||
3141		    ((flags & FOLL_WRITE) &&
3142		      !huge_pte_write(huge_ptep_get(pte)))) {
3143			int ret;
3144
3145			if (pte)
3146				spin_unlock(ptl);
3147			ret = hugetlb_fault(mm, vma, vaddr,
3148				(flags & FOLL_WRITE) ? FAULT_FLAG_WRITE : 0);
 
3149			if (!(ret & VM_FAULT_ERROR))
3150				continue;
3151
3152			remainder = 0;
3153			break;
3154		}
3155
3156		pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
3157		page = pte_page(huge_ptep_get(pte));
3158same_page:
3159		if (pages) {
3160			pages[i] = mem_map_offset(page, pfn_offset);
3161			get_page_foll(pages[i]);
3162		}
3163
3164		if (vmas)
3165			vmas[i] = vma;
3166
3167		vaddr += PAGE_SIZE;
3168		++pfn_offset;
3169		--remainder;
3170		++i;
3171		if (vaddr < vma->vm_end && remainder &&
3172				pfn_offset < pages_per_huge_page(h)) {
3173			/*
3174			 * We use pfn_offset to avoid touching the pageframes
3175			 * of this compound page.
3176			 */
3177			goto same_page;
3178		}
3179		spin_unlock(ptl);
3180	}
3181	*nr_pages = remainder;
 
3182	*position = vaddr;
3183
3184	return i ? i : -EFAULT;
3185}
3186
3187unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
3188		unsigned long address, unsigned long end, pgprot_t newprot)
3189{
3190	struct mm_struct *mm = vma->vm_mm;
3191	unsigned long start = address;
3192	pte_t *ptep;
3193	pte_t pte;
3194	struct hstate *h = hstate_vma(vma);
3195	unsigned long pages = 0;
3196
3197	BUG_ON(address >= end);
3198	flush_cache_range(vma, address, end);
3199
3200	mmu_notifier_invalidate_range_start(mm, start, end);
3201	mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex);
 
3202	for (; address < end; address += huge_page_size(h)) {
3203		spinlock_t *ptl;
3204		ptep = huge_pte_offset(mm, address);
3205		if (!ptep)
3206			continue;
3207		ptl = huge_pte_lock(h, mm, ptep);
3208		if (huge_pmd_unshare(mm, &address, ptep)) {
3209			pages++;
3210			spin_unlock(ptl);
3211			continue;
3212		}
3213		if (!huge_pte_none(huge_ptep_get(ptep))) {
3214			pte = huge_ptep_get_and_clear(mm, address, ptep);
3215			pte = pte_mkhuge(huge_pte_modify(pte, newprot));
3216			pte = arch_make_huge_pte(pte, vma, NULL, 0);
3217			set_huge_pte_at(mm, address, ptep, pte);
3218			pages++;
3219		}
3220		spin_unlock(ptl);
3221	}
3222	/*
3223	 * Must flush TLB before releasing i_mmap_mutex: x86's huge_pmd_unshare
3224	 * may have cleared our pud entry and done put_page on the page table:
3225	 * once we release i_mmap_mutex, another task can do the final put_page
3226	 * and that page table be reused and filled with junk.
3227	 */
3228	flush_tlb_range(vma, start, end);
3229	mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
3230	mmu_notifier_invalidate_range_end(mm, start, end);
3231
3232	return pages << h->order;
3233}
3234
3235int hugetlb_reserve_pages(struct inode *inode,
3236					long from, long to,
3237					struct vm_area_struct *vma,
3238					vm_flags_t vm_flags)
3239{
3240	long ret, chg;
3241	struct hstate *h = hstate_inode(inode);
3242	struct hugepage_subpool *spool = subpool_inode(inode);
3243	struct resv_map *resv_map;
3244
3245	/*
3246	 * Only apply hugepage reservation if asked. At fault time, an
3247	 * attempt will be made for VM_NORESERVE to allocate a page
3248	 * without using reserves
3249	 */
3250	if (vm_flags & VM_NORESERVE)
3251		return 0;
3252
3253	/*
3254	 * Shared mappings base their reservation on the number of pages that
3255	 * are already allocated on behalf of the file. Private mappings need
3256	 * to reserve the full area even if read-only as mprotect() may be
3257	 * called to make the mapping read-write. Assume !vma is a shm mapping
3258	 */
3259	if (!vma || vma->vm_flags & VM_MAYSHARE) {
3260		resv_map = inode_resv_map(inode);
3261
3262		chg = region_chg(resv_map, from, to);
3263
3264	} else {
3265		resv_map = resv_map_alloc();
3266		if (!resv_map)
3267			return -ENOMEM;
3268
3269		chg = to - from;
3270
3271		set_vma_resv_map(vma, resv_map);
3272		set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
3273	}
3274
3275	if (chg < 0) {
3276		ret = chg;
3277		goto out_err;
3278	}
3279
3280	/* There must be enough pages in the subpool for the mapping */
3281	if (hugepage_subpool_get_pages(spool, chg)) {
3282		ret = -ENOSPC;
3283		goto out_err;
3284	}
3285
3286	/*
3287	 * Check enough hugepages are available for the reservation.
3288	 * Hand the pages back to the subpool if there are not
3289	 */
3290	ret = hugetlb_acct_memory(h, chg);
3291	if (ret < 0) {
3292		hugepage_subpool_put_pages(spool, chg);
3293		goto out_err;
3294	}
3295
3296	/*
3297	 * Account for the reservations made. Shared mappings record regions
3298	 * that have reservations as they are shared by multiple VMAs.
3299	 * When the last VMA disappears, the region map says how much
3300	 * the reservation was and the page cache tells how much of
3301	 * the reservation was consumed. Private mappings are per-VMA and
3302	 * only the consumed reservations are tracked. When the VMA
3303	 * disappears, the original reservation is the VMA size and the
3304	 * consumed reservations are stored in the map. Hence, nothing
3305	 * else has to be done for private mappings here
3306	 */
3307	if (!vma || vma->vm_flags & VM_MAYSHARE)
3308		region_add(resv_map, from, to);
3309	return 0;
3310out_err:
3311	if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
3312		kref_put(&resv_map->refs, resv_map_release);
3313	return ret;
3314}
3315
3316void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
3317{
3318	struct hstate *h = hstate_inode(inode);
3319	struct resv_map *resv_map = inode_resv_map(inode);
3320	long chg = 0;
3321	struct hugepage_subpool *spool = subpool_inode(inode);
3322
3323	if (resv_map)
3324		chg = region_truncate(resv_map, offset);
3325	spin_lock(&inode->i_lock);
3326	inode->i_blocks -= (blocks_per_huge_page(h) * freed);
3327	spin_unlock(&inode->i_lock);
3328
3329	hugepage_subpool_put_pages(spool, (chg - freed));
3330	hugetlb_acct_memory(h, -(chg - freed));
3331}
3332
3333#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
3334static unsigned long page_table_shareable(struct vm_area_struct *svma,
3335				struct vm_area_struct *vma,
3336				unsigned long addr, pgoff_t idx)
3337{
3338	unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
3339				svma->vm_start;
3340	unsigned long sbase = saddr & PUD_MASK;
3341	unsigned long s_end = sbase + PUD_SIZE;
3342
3343	/* Allow segments to share if only one is marked locked */
3344	unsigned long vm_flags = vma->vm_flags & ~VM_LOCKED;
3345	unsigned long svm_flags = svma->vm_flags & ~VM_LOCKED;
3346
3347	/*
3348	 * match the virtual addresses, permission and the alignment of the
3349	 * page table page.
3350	 */
3351	if (pmd_index(addr) != pmd_index(saddr) ||
3352	    vm_flags != svm_flags ||
3353	    sbase < svma->vm_start || svma->vm_end < s_end)
3354		return 0;
3355
3356	return saddr;
3357}
3358
3359static int vma_shareable(struct vm_area_struct *vma, unsigned long addr)
3360{
3361	unsigned long base = addr & PUD_MASK;
3362	unsigned long end = base + PUD_SIZE;
3363
3364	/*
3365	 * check on proper vm_flags and page table alignment
3366	 */
3367	if (vma->vm_flags & VM_MAYSHARE &&
3368	    vma->vm_start <= base && end <= vma->vm_end)
3369		return 1;
3370	return 0;
3371}
3372
3373/*
3374 * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
3375 * and returns the corresponding pte. While this is not necessary for the
3376 * !shared pmd case because we can allocate the pmd later as well, it makes the
3377 * code much cleaner. pmd allocation is essential for the shared case because
3378 * pud has to be populated inside the same i_mmap_mutex section - otherwise
3379 * racing tasks could either miss the sharing (see huge_pte_offset) or select a
3380 * bad pmd for sharing.
3381 */
3382pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
3383{
3384	struct vm_area_struct *vma = find_vma(mm, addr);
3385	struct address_space *mapping = vma->vm_file->f_mapping;
3386	pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
3387			vma->vm_pgoff;
3388	struct vm_area_struct *svma;
3389	unsigned long saddr;
3390	pte_t *spte = NULL;
3391	pte_t *pte;
3392	spinlock_t *ptl;
3393
3394	if (!vma_shareable(vma, addr))
3395		return (pte_t *)pmd_alloc(mm, pud, addr);
3396
3397	mutex_lock(&mapping->i_mmap_mutex);
3398	vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
3399		if (svma == vma)
3400			continue;
3401
3402		saddr = page_table_shareable(svma, vma, addr, idx);
3403		if (saddr) {
3404			spte = huge_pte_offset(svma->vm_mm, saddr);
3405			if (spte) {
3406				get_page(virt_to_page(spte));
3407				break;
3408			}
3409		}
3410	}
3411
3412	if (!spte)
3413		goto out;
3414
3415	ptl = huge_pte_lockptr(hstate_vma(vma), mm, spte);
3416	spin_lock(ptl);
3417	if (pud_none(*pud))
3418		pud_populate(mm, pud,
3419				(pmd_t *)((unsigned long)spte & PAGE_MASK));
3420	else
3421		put_page(virt_to_page(spte));
3422	spin_unlock(ptl);
3423out:
3424	pte = (pte_t *)pmd_alloc(mm, pud, addr);
3425	mutex_unlock(&mapping->i_mmap_mutex);
3426	return pte;
3427}
3428
3429/*
3430 * unmap huge page backed by shared pte.
3431 *
3432 * Hugetlb pte page is ref counted at the time of mapping.  If pte is shared
3433 * indicated by page_count > 1, unmap is achieved by clearing pud and
3434 * decrementing the ref count. If count == 1, the pte page is not shared.
3435 *
3436 * called with page table lock held.
3437 *
3438 * returns: 1 successfully unmapped a shared pte page
3439 *	    0 the underlying pte page is not shared, or it is the last user
3440 */
3441int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
3442{
3443	pgd_t *pgd = pgd_offset(mm, *addr);
3444	pud_t *pud = pud_offset(pgd, *addr);
3445
3446	BUG_ON(page_count(virt_to_page(ptep)) == 0);
3447	if (page_count(virt_to_page(ptep)) == 1)
3448		return 0;
3449
3450	pud_clear(pud);
3451	put_page(virt_to_page(ptep));
3452	*addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE;
3453	return 1;
3454}
3455#define want_pmd_share()	(1)
3456#else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
3457pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
3458{
3459	return NULL;
3460}
3461#define want_pmd_share()	(0)
3462#endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
3463
3464#ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
3465pte_t *huge_pte_alloc(struct mm_struct *mm,
3466			unsigned long addr, unsigned long sz)
3467{
3468	pgd_t *pgd;
3469	pud_t *pud;
3470	pte_t *pte = NULL;
3471
3472	pgd = pgd_offset(mm, addr);
3473	pud = pud_alloc(mm, pgd, addr);
3474	if (pud) {
3475		if (sz == PUD_SIZE) {
3476			pte = (pte_t *)pud;
3477		} else {
3478			BUG_ON(sz != PMD_SIZE);
3479			if (want_pmd_share() && pud_none(*pud))
3480				pte = huge_pmd_share(mm, addr, pud);
3481			else
3482				pte = (pte_t *)pmd_alloc(mm, pud, addr);
3483		}
3484	}
3485	BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte));
3486
3487	return pte;
3488}
3489
3490pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
3491{
3492	pgd_t *pgd;
3493	pud_t *pud;
3494	pmd_t *pmd = NULL;
3495
3496	pgd = pgd_offset(mm, addr);
3497	if (pgd_present(*pgd)) {
3498		pud = pud_offset(pgd, addr);
3499		if (pud_present(*pud)) {
3500			if (pud_huge(*pud))
3501				return (pte_t *)pud;
3502			pmd = pmd_offset(pud, addr);
3503		}
3504	}
3505	return (pte_t *) pmd;
3506}
3507
3508struct page *
3509follow_huge_pmd(struct mm_struct *mm, unsigned long address,
3510		pmd_t *pmd, int write)
3511{
3512	struct page *page;
3513
3514	page = pte_page(*(pte_t *)pmd);
3515	if (page)
3516		page += ((address & ~PMD_MASK) >> PAGE_SHIFT);
3517	return page;
3518}
3519
3520struct page *
3521follow_huge_pud(struct mm_struct *mm, unsigned long address,
3522		pud_t *pud, int write)
3523{
3524	struct page *page;
3525
3526	page = pte_page(*(pte_t *)pud);
3527	if (page)
3528		page += ((address & ~PUD_MASK) >> PAGE_SHIFT);
3529	return page;
3530}
3531
3532#else /* !CONFIG_ARCH_WANT_GENERAL_HUGETLB */
3533
3534/* Can be overriden by architectures */
3535struct page * __weak
3536follow_huge_pud(struct mm_struct *mm, unsigned long address,
3537	       pud_t *pud, int write)
3538{
3539	BUG();
3540	return NULL;
3541}
3542
3543#endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
3544
3545#ifdef CONFIG_MEMORY_FAILURE
3546
3547/* Should be called in hugetlb_lock */
3548static int is_hugepage_on_freelist(struct page *hpage)
3549{
3550	struct page *page;
3551	struct page *tmp;
3552	struct hstate *h = page_hstate(hpage);
3553	int nid = page_to_nid(hpage);
3554
3555	list_for_each_entry_safe(page, tmp, &h->hugepage_freelists[nid], lru)
3556		if (page == hpage)
3557			return 1;
3558	return 0;
3559}
3560
3561/*
3562 * This function is called from memory failure code.
3563 * Assume the caller holds page lock of the head page.
3564 */
3565int dequeue_hwpoisoned_huge_page(struct page *hpage)
3566{
3567	struct hstate *h = page_hstate(hpage);
3568	int nid = page_to_nid(hpage);
3569	int ret = -EBUSY;
3570
3571	spin_lock(&hugetlb_lock);
3572	if (is_hugepage_on_freelist(hpage)) {
3573		/*
3574		 * Hwpoisoned hugepage isn't linked to activelist or freelist,
3575		 * but dangling hpage->lru can trigger list-debug warnings
3576		 * (this happens when we call unpoison_memory() on it),
3577		 * so let it point to itself with list_del_init().
3578		 */
3579		list_del_init(&hpage->lru);
3580		set_page_refcounted(hpage);
3581		h->free_huge_pages--;
3582		h->free_huge_pages_node[nid]--;
3583		ret = 0;
3584	}
3585	spin_unlock(&hugetlb_lock);
3586	return ret;
3587}
3588#endif
3589
3590bool isolate_huge_page(struct page *page, struct list_head *list)
3591{
3592	VM_BUG_ON_PAGE(!PageHead(page), page);
3593	if (!get_page_unless_zero(page))
3594		return false;
3595	spin_lock(&hugetlb_lock);
3596	list_move_tail(&page->lru, list);
3597	spin_unlock(&hugetlb_lock);
3598	return true;
3599}
3600
3601void putback_active_hugepage(struct page *page)
3602{
3603	VM_BUG_ON_PAGE(!PageHead(page), page);
3604	spin_lock(&hugetlb_lock);
3605	list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
3606	spin_unlock(&hugetlb_lock);
3607	put_page(page);
3608}
3609
3610bool is_hugepage_active(struct page *page)
3611{
3612	VM_BUG_ON_PAGE(!PageHuge(page), page);
3613	/*
3614	 * This function can be called for a tail page because the caller,
3615	 * scan_movable_pages, scans through a given pfn-range which typically
3616	 * covers one memory block. In systems using gigantic hugepage (1GB
3617	 * for x86_64,) a hugepage is larger than a memory block, and we don't
3618	 * support migrating such large hugepages for now, so return false
3619	 * when called for tail pages.
3620	 */
3621	if (PageTail(page))
3622		return false;
3623	/*
3624	 * Refcount of a hwpoisoned hugepages is 1, but they are not active,
3625	 * so we should return false for them.
3626	 */
3627	if (unlikely(PageHWPoison(page)))
3628		return false;
3629	return page_count(page) > 0;
3630}
v3.1
   1/*
   2 * Generic hugetlb support.
   3 * (C) William Irwin, April 2004
   4 */
   5#include <linux/list.h>
   6#include <linux/init.h>
   7#include <linux/module.h>
   8#include <linux/mm.h>
   9#include <linux/seq_file.h>
  10#include <linux/sysctl.h>
  11#include <linux/highmem.h>
  12#include <linux/mmu_notifier.h>
  13#include <linux/nodemask.h>
  14#include <linux/pagemap.h>
  15#include <linux/mempolicy.h>
 
  16#include <linux/cpuset.h>
  17#include <linux/mutex.h>
  18#include <linux/bootmem.h>
  19#include <linux/sysfs.h>
  20#include <linux/slab.h>
  21#include <linux/rmap.h>
  22#include <linux/swap.h>
  23#include <linux/swapops.h>
 
 
  24
  25#include <asm/page.h>
  26#include <asm/pgtable.h>
 
 
  27#include <linux/io.h>
  28
  29#include <linux/hugetlb.h>
 
  30#include <linux/node.h>
  31#include "internal.h"
  32
  33const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
  34static gfp_t htlb_alloc_mask = GFP_HIGHUSER;
  35unsigned long hugepages_treat_as_movable;
  36
  37static int max_hstate;
  38unsigned int default_hstate_idx;
  39struct hstate hstates[HUGE_MAX_HSTATE];
  40
  41__initdata LIST_HEAD(huge_boot_pages);
  42
  43/* for command line parsing */
  44static struct hstate * __initdata parsed_hstate;
  45static unsigned long __initdata default_hstate_max_huge_pages;
  46static unsigned long __initdata default_hstate_size;
  47
  48#define for_each_hstate(h) \
  49	for ((h) = hstates; (h) < &hstates[max_hstate]; (h)++)
 
 
 
  50
  51/*
  52 * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
 
  53 */
  54static DEFINE_SPINLOCK(hugetlb_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  55
  56/*
  57 * Region tracking -- allows tracking of reservations and instantiated pages
  58 *                    across the pages in a mapping.
  59 *
  60 * The region data structures are protected by a combination of the mmap_sem
  61 * and the hugetlb_instantion_mutex.  To access or modify a region the caller
  62 * must either hold the mmap_sem for write, or the mmap_sem for read and
  63 * the hugetlb_instantiation mutex:
  64 *
  65 *	down_write(&mm->mmap_sem);
  66 * or
  67 *	down_read(&mm->mmap_sem);
  68 *	mutex_lock(&hugetlb_instantiation_mutex);
  69 */
  70struct file_region {
  71	struct list_head link;
  72	long from;
  73	long to;
  74};
  75
  76static long region_add(struct list_head *head, long f, long t)
  77{
 
  78	struct file_region *rg, *nrg, *trg;
  79
 
  80	/* Locate the region we are either in or before. */
  81	list_for_each_entry(rg, head, link)
  82		if (f <= rg->to)
  83			break;
  84
  85	/* Round our left edge to the current segment if it encloses us. */
  86	if (f > rg->from)
  87		f = rg->from;
  88
  89	/* Check for and consume any regions we now overlap with. */
  90	nrg = rg;
  91	list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
  92		if (&rg->link == head)
  93			break;
  94		if (rg->from > t)
  95			break;
  96
  97		/* If this area reaches higher then extend our area to
  98		 * include it completely.  If this is not the first area
  99		 * which we intend to reuse, free it. */
 100		if (rg->to > t)
 101			t = rg->to;
 102		if (rg != nrg) {
 103			list_del(&rg->link);
 104			kfree(rg);
 105		}
 106	}
 107	nrg->from = f;
 108	nrg->to = t;
 
 109	return 0;
 110}
 111
 112static long region_chg(struct list_head *head, long f, long t)
 113{
 114	struct file_region *rg, *nrg;
 
 115	long chg = 0;
 116
 
 
 117	/* Locate the region we are before or in. */
 118	list_for_each_entry(rg, head, link)
 119		if (f <= rg->to)
 120			break;
 121
 122	/* If we are below the current region then a new region is required.
 123	 * Subtle, allocate a new region at the position but make it zero
 124	 * size such that we can guarantee to record the reservation. */
 125	if (&rg->link == head || t < rg->from) {
 126		nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
 127		if (!nrg)
 128			return -ENOMEM;
 129		nrg->from = f;
 130		nrg->to   = f;
 131		INIT_LIST_HEAD(&nrg->link);
 
 
 
 
 
 
 132		list_add(&nrg->link, rg->link.prev);
 133
 134		return t - f;
 135	}
 136
 137	/* Round our left edge to the current segment if it encloses us. */
 138	if (f > rg->from)
 139		f = rg->from;
 140	chg = t - f;
 141
 142	/* Check for and consume any regions we now overlap with. */
 143	list_for_each_entry(rg, rg->link.prev, link) {
 144		if (&rg->link == head)
 145			break;
 146		if (rg->from > t)
 147			return chg;
 148
 149		/* We overlap with this area, if it extends further than
 150		 * us then we must extend ourselves.  Account for its
 151		 * existing reservation. */
 152		if (rg->to > t) {
 153			chg += rg->to - t;
 154			t = rg->to;
 155		}
 156		chg -= rg->to - rg->from;
 157	}
 
 
 
 
 
 
 
 
 158	return chg;
 159}
 160
 161static long region_truncate(struct list_head *head, long end)
 162{
 
 163	struct file_region *rg, *trg;
 164	long chg = 0;
 165
 
 166	/* Locate the region we are either in or before. */
 167	list_for_each_entry(rg, head, link)
 168		if (end <= rg->to)
 169			break;
 170	if (&rg->link == head)
 171		return 0;
 172
 173	/* If we are in the middle of a region then adjust it. */
 174	if (end > rg->from) {
 175		chg = rg->to - end;
 176		rg->to = end;
 177		rg = list_entry(rg->link.next, typeof(*rg), link);
 178	}
 179
 180	/* Drop any remaining regions. */
 181	list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
 182		if (&rg->link == head)
 183			break;
 184		chg += rg->to - rg->from;
 185		list_del(&rg->link);
 186		kfree(rg);
 187	}
 
 
 
 188	return chg;
 189}
 190
 191static long region_count(struct list_head *head, long f, long t)
 192{
 
 193	struct file_region *rg;
 194	long chg = 0;
 195
 
 196	/* Locate each segment we overlap with, and count that overlap. */
 197	list_for_each_entry(rg, head, link) {
 198		int seg_from;
 199		int seg_to;
 200
 201		if (rg->to <= f)
 202			continue;
 203		if (rg->from >= t)
 204			break;
 205
 206		seg_from = max(rg->from, f);
 207		seg_to = min(rg->to, t);
 208
 209		chg += seg_to - seg_from;
 210	}
 
 211
 212	return chg;
 213}
 214
 215/*
 216 * Convert the address within this vma to the page offset within
 217 * the mapping, in pagecache page units; huge pages here.
 218 */
 219static pgoff_t vma_hugecache_offset(struct hstate *h,
 220			struct vm_area_struct *vma, unsigned long address)
 221{
 222	return ((address - vma->vm_start) >> huge_page_shift(h)) +
 223			(vma->vm_pgoff >> huge_page_order(h));
 224}
 225
 226pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
 227				     unsigned long address)
 228{
 229	return vma_hugecache_offset(hstate_vma(vma), vma, address);
 230}
 231
 232/*
 233 * Return the size of the pages allocated when backing a VMA. In the majority
 234 * cases this will be same size as used by the page table entries.
 235 */
 236unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
 237{
 238	struct hstate *hstate;
 239
 240	if (!is_vm_hugetlb_page(vma))
 241		return PAGE_SIZE;
 242
 243	hstate = hstate_vma(vma);
 244
 245	return 1UL << (hstate->order + PAGE_SHIFT);
 246}
 247EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
 248
 249/*
 250 * Return the page size being used by the MMU to back a VMA. In the majority
 251 * of cases, the page size used by the kernel matches the MMU size. On
 252 * architectures where it differs, an architecture-specific version of this
 253 * function is required.
 254 */
 255#ifndef vma_mmu_pagesize
 256unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
 257{
 258	return vma_kernel_pagesize(vma);
 259}
 260#endif
 261
 262/*
 263 * Flags for MAP_PRIVATE reservations.  These are stored in the bottom
 264 * bits of the reservation map pointer, which are always clear due to
 265 * alignment.
 266 */
 267#define HPAGE_RESV_OWNER    (1UL << 0)
 268#define HPAGE_RESV_UNMAPPED (1UL << 1)
 269#define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
 270
 271/*
 272 * These helpers are used to track how many pages are reserved for
 273 * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
 274 * is guaranteed to have their future faults succeed.
 275 *
 276 * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
 277 * the reserve counters are updated with the hugetlb_lock held. It is safe
 278 * to reset the VMA at fork() time as it is not in use yet and there is no
 279 * chance of the global counters getting corrupted as a result of the values.
 280 *
 281 * The private mapping reservation is represented in a subtly different
 282 * manner to a shared mapping.  A shared mapping has a region map associated
 283 * with the underlying file, this region map represents the backing file
 284 * pages which have ever had a reservation assigned which this persists even
 285 * after the page is instantiated.  A private mapping has a region map
 286 * associated with the original mmap which is attached to all VMAs which
 287 * reference it, this region map represents those offsets which have consumed
 288 * reservation ie. where pages have been instantiated.
 289 */
 290static unsigned long get_vma_private_data(struct vm_area_struct *vma)
 291{
 292	return (unsigned long)vma->vm_private_data;
 293}
 294
 295static void set_vma_private_data(struct vm_area_struct *vma,
 296							unsigned long value)
 297{
 298	vma->vm_private_data = (void *)value;
 299}
 300
 301struct resv_map {
 302	struct kref refs;
 303	struct list_head regions;
 304};
 305
 306static struct resv_map *resv_map_alloc(void)
 307{
 308	struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
 309	if (!resv_map)
 310		return NULL;
 311
 312	kref_init(&resv_map->refs);
 
 313	INIT_LIST_HEAD(&resv_map->regions);
 314
 315	return resv_map;
 316}
 317
 318static void resv_map_release(struct kref *ref)
 319{
 320	struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
 321
 322	/* Clear out any active regions before we release the map. */
 323	region_truncate(&resv_map->regions, 0);
 324	kfree(resv_map);
 325}
 326
 
 
 
 
 
 327static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
 328{
 329	VM_BUG_ON(!is_vm_hugetlb_page(vma));
 330	if (!(vma->vm_flags & VM_MAYSHARE))
 
 
 
 
 
 
 331		return (struct resv_map *)(get_vma_private_data(vma) &
 332							~HPAGE_RESV_MASK);
 333	return NULL;
 334}
 335
 336static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
 337{
 338	VM_BUG_ON(!is_vm_hugetlb_page(vma));
 339	VM_BUG_ON(vma->vm_flags & VM_MAYSHARE);
 340
 341	set_vma_private_data(vma, (get_vma_private_data(vma) &
 342				HPAGE_RESV_MASK) | (unsigned long)map);
 343}
 344
 345static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
 346{
 347	VM_BUG_ON(!is_vm_hugetlb_page(vma));
 348	VM_BUG_ON(vma->vm_flags & VM_MAYSHARE);
 349
 350	set_vma_private_data(vma, get_vma_private_data(vma) | flags);
 351}
 352
 353static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
 354{
 355	VM_BUG_ON(!is_vm_hugetlb_page(vma));
 356
 357	return (get_vma_private_data(vma) & flag) != 0;
 358}
 359
 360/* Decrement the reserved pages in the hugepage pool by one */
 361static void decrement_hugepage_resv_vma(struct hstate *h,
 362			struct vm_area_struct *vma)
 363{
 364	if (vma->vm_flags & VM_NORESERVE)
 365		return;
 366
 367	if (vma->vm_flags & VM_MAYSHARE) {
 368		/* Shared mappings always use reserves */
 369		h->resv_huge_pages--;
 370	} else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
 371		/*
 372		 * Only the process that called mmap() has reserves for
 373		 * private mappings.
 374		 */
 375		h->resv_huge_pages--;
 376	}
 377}
 378
 379/* Reset counters to 0 and clear all HPAGE_RESV_* flags */
 380void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
 381{
 382	VM_BUG_ON(!is_vm_hugetlb_page(vma));
 383	if (!(vma->vm_flags & VM_MAYSHARE))
 384		vma->vm_private_data = (void *)0;
 385}
 386
 387/* Returns true if the VMA has associated reserve pages */
 388static int vma_has_reserves(struct vm_area_struct *vma)
 389{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 390	if (vma->vm_flags & VM_MAYSHARE)
 391		return 1;
 
 
 
 
 
 392	if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
 393		return 1;
 
 394	return 0;
 395}
 396
 397static void copy_gigantic_page(struct page *dst, struct page *src)
 398{
 399	int i;
 400	struct hstate *h = page_hstate(src);
 401	struct page *dst_base = dst;
 402	struct page *src_base = src;
 403
 404	for (i = 0; i < pages_per_huge_page(h); ) {
 405		cond_resched();
 406		copy_highpage(dst, src);
 407
 408		i++;
 409		dst = mem_map_next(dst, dst_base, i);
 410		src = mem_map_next(src, src_base, i);
 411	}
 412}
 413
 414void copy_huge_page(struct page *dst, struct page *src)
 415{
 416	int i;
 417	struct hstate *h = page_hstate(src);
 418
 419	if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES)) {
 420		copy_gigantic_page(dst, src);
 421		return;
 422	}
 423
 424	might_sleep();
 425	for (i = 0; i < pages_per_huge_page(h); i++) {
 426		cond_resched();
 427		copy_highpage(dst + i, src + i);
 428	}
 429}
 430
 431static void enqueue_huge_page(struct hstate *h, struct page *page)
 432{
 433	int nid = page_to_nid(page);
 434	list_add(&page->lru, &h->hugepage_freelists[nid]);
 435	h->free_huge_pages++;
 436	h->free_huge_pages_node[nid]++;
 437}
 438
 439static struct page *dequeue_huge_page_node(struct hstate *h, int nid)
 440{
 441	struct page *page;
 442
 443	if (list_empty(&h->hugepage_freelists[nid]))
 
 
 
 
 
 
 
 444		return NULL;
 445	page = list_entry(h->hugepage_freelists[nid].next, struct page, lru);
 446	list_del(&page->lru);
 447	set_page_refcounted(page);
 448	h->free_huge_pages--;
 449	h->free_huge_pages_node[nid]--;
 450	return page;
 451}
 452
 
 
 
 
 
 
 
 
 
 453static struct page *dequeue_huge_page_vma(struct hstate *h,
 454				struct vm_area_struct *vma,
 455				unsigned long address, int avoid_reserve)
 
 456{
 457	struct page *page = NULL;
 458	struct mempolicy *mpol;
 459	nodemask_t *nodemask;
 460	struct zonelist *zonelist;
 461	struct zone *zone;
 462	struct zoneref *z;
 
 463
 464	get_mems_allowed();
 465	zonelist = huge_zonelist(vma, address,
 466					htlb_alloc_mask, &mpol, &nodemask);
 467	/*
 468	 * A child process with MAP_PRIVATE mappings created by their parent
 469	 * have no page reserves. This check ensures that reservations are
 470	 * not "stolen". The child may still get SIGKILLed
 471	 */
 472	if (!vma_has_reserves(vma) &&
 473			h->free_huge_pages - h->resv_huge_pages == 0)
 474		goto err;
 475
 476	/* If reserves cannot be used, ensure enough pages are in the pool */
 477	if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
 478		goto err;
 479
 
 
 
 
 
 480	for_each_zone_zonelist_nodemask(zone, z, zonelist,
 481						MAX_NR_ZONES - 1, nodemask) {
 482		if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask)) {
 483			page = dequeue_huge_page_node(h, zone_to_nid(zone));
 484			if (page) {
 485				if (!avoid_reserve)
 486					decrement_hugepage_resv_vma(h, vma);
 
 
 
 
 
 487				break;
 488			}
 489		}
 490	}
 491err:
 492	mpol_cond_put(mpol);
 493	put_mems_allowed();
 
 494	return page;
 
 
 
 495}
 496
 497static void update_and_free_page(struct hstate *h, struct page *page)
 498{
 499	int i;
 500
 501	VM_BUG_ON(h->order >= MAX_ORDER);
 502
 503	h->nr_huge_pages--;
 504	h->nr_huge_pages_node[page_to_nid(page)]--;
 505	for (i = 0; i < pages_per_huge_page(h); i++) {
 506		page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
 507				1 << PG_referenced | 1 << PG_dirty |
 508				1 << PG_active | 1 << PG_reserved |
 509				1 << PG_private | 1 << PG_writeback);
 510	}
 
 511	set_compound_page_dtor(page, NULL);
 512	set_page_refcounted(page);
 513	arch_release_hugepage(page);
 514	__free_pages(page, huge_page_order(h));
 515}
 516
 517struct hstate *size_to_hstate(unsigned long size)
 518{
 519	struct hstate *h;
 520
 521	for_each_hstate(h) {
 522		if (huge_page_size(h) == size)
 523			return h;
 524	}
 525	return NULL;
 526}
 527
 528static void free_huge_page(struct page *page)
 529{
 530	/*
 531	 * Can't pass hstate in here because it is called from the
 532	 * compound page destructor.
 533	 */
 534	struct hstate *h = page_hstate(page);
 535	int nid = page_to_nid(page);
 536	struct address_space *mapping;
 
 
 537
 538	mapping = (struct address_space *) page_private(page);
 539	set_page_private(page, 0);
 540	page->mapping = NULL;
 541	BUG_ON(page_count(page));
 542	BUG_ON(page_mapcount(page));
 543	INIT_LIST_HEAD(&page->lru);
 
 544
 545	spin_lock(&hugetlb_lock);
 
 
 
 
 
 546	if (h->surplus_huge_pages_node[nid] && huge_page_order(h) < MAX_ORDER) {
 
 
 547		update_and_free_page(h, page);
 548		h->surplus_huge_pages--;
 549		h->surplus_huge_pages_node[nid]--;
 550	} else {
 
 551		enqueue_huge_page(h, page);
 552	}
 553	spin_unlock(&hugetlb_lock);
 554	if (mapping)
 555		hugetlb_put_quota(mapping, 1);
 556}
 557
 558static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
 559{
 
 560	set_compound_page_dtor(page, free_huge_page);
 561	spin_lock(&hugetlb_lock);
 
 562	h->nr_huge_pages++;
 563	h->nr_huge_pages_node[nid]++;
 564	spin_unlock(&hugetlb_lock);
 565	put_page(page); /* free it into the hugepage allocator */
 566}
 567
 568static void prep_compound_gigantic_page(struct page *page, unsigned long order)
 
 569{
 570	int i;
 571	int nr_pages = 1 << order;
 572	struct page *p = page + 1;
 573
 574	/* we rely on prep_new_huge_page to set the destructor */
 575	set_compound_order(page, order);
 576	__SetPageHead(page);
 
 577	for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
 578		__SetPageTail(p);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 579		p->first_page = page;
 580	}
 581}
 582
 
 
 
 
 
 583int PageHuge(struct page *page)
 584{
 585	compound_page_dtor *dtor;
 586
 587	if (!PageCompound(page))
 588		return 0;
 589
 590	page = compound_head(page);
 591	dtor = get_compound_page_dtor(page);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 592
 593	return dtor == free_huge_page;
 
 
 
 
 
 
 
 
 594}
 595EXPORT_SYMBOL_GPL(PageHuge);
 596
 597static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
 598{
 599	struct page *page;
 600
 601	if (h->order >= MAX_ORDER)
 602		return NULL;
 603
 604	page = alloc_pages_exact_node(nid,
 605		htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
 606						__GFP_REPEAT|__GFP_NOWARN,
 607		huge_page_order(h));
 608	if (page) {
 609		if (arch_prepare_hugepage(page)) {
 610			__free_pages(page, huge_page_order(h));
 611			return NULL;
 612		}
 613		prep_new_huge_page(h, page, nid);
 614	}
 615
 616	return page;
 617}
 618
 619/*
 620 * common helper functions for hstate_next_node_to_{alloc|free}.
 621 * We may have allocated or freed a huge page based on a different
 622 * nodes_allowed previously, so h->next_node_to_{alloc|free} might
 623 * be outside of *nodes_allowed.  Ensure that we use an allowed
 624 * node for alloc or free.
 625 */
 626static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
 627{
 628	nid = next_node(nid, *nodes_allowed);
 629	if (nid == MAX_NUMNODES)
 630		nid = first_node(*nodes_allowed);
 631	VM_BUG_ON(nid >= MAX_NUMNODES);
 632
 633	return nid;
 634}
 635
 636static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
 637{
 638	if (!node_isset(nid, *nodes_allowed))
 639		nid = next_node_allowed(nid, nodes_allowed);
 640	return nid;
 641}
 642
 643/*
 644 * returns the previously saved node ["this node"] from which to
 645 * allocate a persistent huge page for the pool and advance the
 646 * next node from which to allocate, handling wrap at end of node
 647 * mask.
 648 */
 649static int hstate_next_node_to_alloc(struct hstate *h,
 650					nodemask_t *nodes_allowed)
 651{
 652	int nid;
 653
 654	VM_BUG_ON(!nodes_allowed);
 655
 656	nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
 657	h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
 658
 659	return nid;
 660}
 661
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 662static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
 663{
 664	struct page *page;
 665	int start_nid;
 666	int next_nid;
 667	int ret = 0;
 668
 669	start_nid = hstate_next_node_to_alloc(h, nodes_allowed);
 670	next_nid = start_nid;
 671
 672	do {
 673		page = alloc_fresh_huge_page_node(h, next_nid);
 674		if (page) {
 675			ret = 1;
 676			break;
 677		}
 678		next_nid = hstate_next_node_to_alloc(h, nodes_allowed);
 679	} while (next_nid != start_nid);
 680
 681	if (ret)
 682		count_vm_event(HTLB_BUDDY_PGALLOC);
 683	else
 684		count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
 685
 686	return ret;
 687}
 688
 689/*
 690 * helper for free_pool_huge_page() - return the previously saved
 691 * node ["this node"] from which to free a huge page.  Advance the
 692 * next node id whether or not we find a free huge page to free so
 693 * that the next attempt to free addresses the next node.
 694 */
 695static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
 696{
 697	int nid;
 698
 699	VM_BUG_ON(!nodes_allowed);
 700
 701	nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
 702	h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
 703
 704	return nid;
 705}
 706
 707/*
 708 * Free huge page from pool from next node to free.
 709 * Attempt to keep persistent huge pages more or less
 710 * balanced over allowed nodes.
 711 * Called with hugetlb_lock locked.
 712 */
 713static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
 714							 bool acct_surplus)
 715{
 716	int start_nid;
 717	int next_nid;
 718	int ret = 0;
 719
 720	start_nid = hstate_next_node_to_free(h, nodes_allowed);
 721	next_nid = start_nid;
 722
 723	do {
 724		/*
 725		 * If we're returning unused surplus pages, only examine
 726		 * nodes with surplus pages.
 727		 */
 728		if ((!acct_surplus || h->surplus_huge_pages_node[next_nid]) &&
 729		    !list_empty(&h->hugepage_freelists[next_nid])) {
 730			struct page *page =
 731				list_entry(h->hugepage_freelists[next_nid].next,
 732					  struct page, lru);
 733			list_del(&page->lru);
 734			h->free_huge_pages--;
 735			h->free_huge_pages_node[next_nid]--;
 736			if (acct_surplus) {
 737				h->surplus_huge_pages--;
 738				h->surplus_huge_pages_node[next_nid]--;
 739			}
 740			update_and_free_page(h, page);
 741			ret = 1;
 742			break;
 743		}
 744		next_nid = hstate_next_node_to_free(h, nodes_allowed);
 745	} while (next_nid != start_nid);
 746
 747	return ret;
 748}
 749
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 750static struct page *alloc_buddy_huge_page(struct hstate *h, int nid)
 751{
 752	struct page *page;
 753	unsigned int r_nid;
 754
 755	if (h->order >= MAX_ORDER)
 756		return NULL;
 757
 758	/*
 759	 * Assume we will successfully allocate the surplus page to
 760	 * prevent racing processes from causing the surplus to exceed
 761	 * overcommit
 762	 *
 763	 * This however introduces a different race, where a process B
 764	 * tries to grow the static hugepage pool while alloc_pages() is
 765	 * called by process A. B will only examine the per-node
 766	 * counters in determining if surplus huge pages can be
 767	 * converted to normal huge pages in adjust_pool_surplus(). A
 768	 * won't be able to increment the per-node counter, until the
 769	 * lock is dropped by B, but B doesn't drop hugetlb_lock until
 770	 * no more huge pages can be converted from surplus to normal
 771	 * state (and doesn't try to convert again). Thus, we have a
 772	 * case where a surplus huge page exists, the pool is grown, and
 773	 * the surplus huge page still exists after, even though it
 774	 * should just have been converted to a normal huge page. This
 775	 * does not leak memory, though, as the hugepage will be freed
 776	 * once it is out of use. It also does not allow the counters to
 777	 * go out of whack in adjust_pool_surplus() as we don't modify
 778	 * the node values until we've gotten the hugepage and only the
 779	 * per-node value is checked there.
 780	 */
 781	spin_lock(&hugetlb_lock);
 782	if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
 783		spin_unlock(&hugetlb_lock);
 784		return NULL;
 785	} else {
 786		h->nr_huge_pages++;
 787		h->surplus_huge_pages++;
 788	}
 789	spin_unlock(&hugetlb_lock);
 790
 791	if (nid == NUMA_NO_NODE)
 792		page = alloc_pages(htlb_alloc_mask|__GFP_COMP|
 793				   __GFP_REPEAT|__GFP_NOWARN,
 794				   huge_page_order(h));
 795	else
 796		page = alloc_pages_exact_node(nid,
 797			htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
 798			__GFP_REPEAT|__GFP_NOWARN, huge_page_order(h));
 799
 800	if (page && arch_prepare_hugepage(page)) {
 801		__free_pages(page, huge_page_order(h));
 802		return NULL;
 803	}
 804
 805	spin_lock(&hugetlb_lock);
 806	if (page) {
 
 807		r_nid = page_to_nid(page);
 808		set_compound_page_dtor(page, free_huge_page);
 
 809		/*
 810		 * We incremented the global counters already
 811		 */
 812		h->nr_huge_pages_node[r_nid]++;
 813		h->surplus_huge_pages_node[r_nid]++;
 814		__count_vm_event(HTLB_BUDDY_PGALLOC);
 815	} else {
 816		h->nr_huge_pages--;
 817		h->surplus_huge_pages--;
 818		__count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
 819	}
 820	spin_unlock(&hugetlb_lock);
 821
 822	return page;
 823}
 824
 825/*
 826 * This allocation function is useful in the context where vma is irrelevant.
 827 * E.g. soft-offlining uses this function because it only cares physical
 828 * address of error page.
 829 */
 830struct page *alloc_huge_page_node(struct hstate *h, int nid)
 831{
 832	struct page *page;
 833
 834	spin_lock(&hugetlb_lock);
 835	page = dequeue_huge_page_node(h, nid);
 
 836	spin_unlock(&hugetlb_lock);
 837
 838	if (!page)
 839		page = alloc_buddy_huge_page(h, nid);
 840
 841	return page;
 842}
 843
 844/*
 845 * Increase the hugetlb pool such that it can accommodate a reservation
 846 * of size 'delta'.
 847 */
 848static int gather_surplus_pages(struct hstate *h, int delta)
 849{
 850	struct list_head surplus_list;
 851	struct page *page, *tmp;
 852	int ret, i;
 853	int needed, allocated;
 
 854
 855	needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
 856	if (needed <= 0) {
 857		h->resv_huge_pages += delta;
 858		return 0;
 859	}
 860
 861	allocated = 0;
 862	INIT_LIST_HEAD(&surplus_list);
 863
 864	ret = -ENOMEM;
 865retry:
 866	spin_unlock(&hugetlb_lock);
 867	for (i = 0; i < needed; i++) {
 868		page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
 869		if (!page)
 870			/*
 871			 * We were not able to allocate enough pages to
 872			 * satisfy the entire reservation so we free what
 873			 * we've allocated so far.
 874			 */
 875			goto free;
 876
 877		list_add(&page->lru, &surplus_list);
 878	}
 879	allocated += needed;
 880
 881	/*
 882	 * After retaking hugetlb_lock, we need to recalculate 'needed'
 883	 * because either resv_huge_pages or free_huge_pages may have changed.
 884	 */
 885	spin_lock(&hugetlb_lock);
 886	needed = (h->resv_huge_pages + delta) -
 887			(h->free_huge_pages + allocated);
 888	if (needed > 0)
 889		goto retry;
 890
 
 
 
 
 
 
 
 891	/*
 892	 * The surplus_list now contains _at_least_ the number of extra pages
 893	 * needed to accommodate the reservation.  Add the appropriate number
 894	 * of pages to the hugetlb pool and free the extras back to the buddy
 895	 * allocator.  Commit the entire reservation here to prevent another
 896	 * process from stealing the pages as they are added to the pool but
 897	 * before they are reserved.
 898	 */
 899	needed += allocated;
 900	h->resv_huge_pages += delta;
 901	ret = 0;
 902
 903	spin_unlock(&hugetlb_lock);
 904	/* Free the needed pages to the hugetlb pool */
 905	list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
 906		if ((--needed) < 0)
 907			break;
 908		list_del(&page->lru);
 909		/*
 910		 * This page is now managed by the hugetlb allocator and has
 911		 * no users -- drop the buddy allocator's reference.
 912		 */
 913		put_page_testzero(page);
 914		VM_BUG_ON(page_count(page));
 915		enqueue_huge_page(h, page);
 916	}
 
 
 917
 918	/* Free unnecessary surplus pages to the buddy allocator */
 919free:
 920	if (!list_empty(&surplus_list)) {
 921		list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
 922			list_del(&page->lru);
 923			put_page(page);
 924		}
 925	}
 926	spin_lock(&hugetlb_lock);
 927
 928	return ret;
 929}
 930
 931/*
 932 * When releasing a hugetlb pool reservation, any surplus pages that were
 933 * allocated to satisfy the reservation must be explicitly freed if they were
 934 * never used.
 935 * Called with hugetlb_lock held.
 936 */
 937static void return_unused_surplus_pages(struct hstate *h,
 938					unsigned long unused_resv_pages)
 939{
 940	unsigned long nr_pages;
 941
 942	/* Uncommit the reservation */
 943	h->resv_huge_pages -= unused_resv_pages;
 944
 945	/* Cannot return gigantic pages currently */
 946	if (h->order >= MAX_ORDER)
 947		return;
 948
 949	nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
 950
 951	/*
 952	 * We want to release as many surplus pages as possible, spread
 953	 * evenly across all nodes with memory. Iterate across these nodes
 954	 * until we can no longer free unreserved surplus pages. This occurs
 955	 * when the nodes with surplus pages have no free pages.
 956	 * free_pool_huge_page() will balance the the freed pages across the
 957	 * on-line nodes with memory and will handle the hstate accounting.
 958	 */
 959	while (nr_pages--) {
 960		if (!free_pool_huge_page(h, &node_states[N_HIGH_MEMORY], 1))
 961			break;
 
 962	}
 963}
 964
 965/*
 966 * Determine if the huge page at addr within the vma has an associated
 967 * reservation.  Where it does not we will need to logically increase
 968 * reservation and actually increase quota before an allocation can occur.
 969 * Where any new reservation would be required the reservation change is
 970 * prepared, but not committed.  Once the page has been quota'd allocated
 971 * an instantiated the change should be committed via vma_commit_reservation.
 972 * No action is required on failure.
 
 973 */
 974static long vma_needs_reservation(struct hstate *h,
 975			struct vm_area_struct *vma, unsigned long addr)
 976{
 977	struct address_space *mapping = vma->vm_file->f_mapping;
 978	struct inode *inode = mapping->host;
 
 979
 980	if (vma->vm_flags & VM_MAYSHARE) {
 981		pgoff_t idx = vma_hugecache_offset(h, vma, addr);
 982		return region_chg(&inode->i_mapping->private_list,
 983							idx, idx + 1);
 984
 985	} else if (!is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
 986		return 1;
 987
 988	} else  {
 989		long err;
 990		pgoff_t idx = vma_hugecache_offset(h, vma, addr);
 991		struct resv_map *reservations = vma_resv_map(vma);
 992
 993		err = region_chg(&reservations->regions, idx, idx + 1);
 994		if (err < 0)
 995			return err;
 996		return 0;
 997	}
 998}
 999static void vma_commit_reservation(struct hstate *h,
1000			struct vm_area_struct *vma, unsigned long addr)
1001{
1002	struct address_space *mapping = vma->vm_file->f_mapping;
1003	struct inode *inode = mapping->host;
1004
1005	if (vma->vm_flags & VM_MAYSHARE) {
1006		pgoff_t idx = vma_hugecache_offset(h, vma, addr);
1007		region_add(&inode->i_mapping->private_list, idx, idx + 1);
1008
1009	} else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
1010		pgoff_t idx = vma_hugecache_offset(h, vma, addr);
1011		struct resv_map *reservations = vma_resv_map(vma);
1012
1013		/* Mark this page used in the map. */
1014		region_add(&reservations->regions, idx, idx + 1);
1015	}
1016}
1017
1018static struct page *alloc_huge_page(struct vm_area_struct *vma,
1019				    unsigned long addr, int avoid_reserve)
1020{
 
1021	struct hstate *h = hstate_vma(vma);
1022	struct page *page;
1023	struct address_space *mapping = vma->vm_file->f_mapping;
1024	struct inode *inode = mapping->host;
1025	long chg;
 
 
1026
 
1027	/*
1028	 * Processes that did not create the mapping will have no reserves and
1029	 * will not have accounted against quota. Check that the quota can be
1030	 * made before satisfying the allocation
1031	 * MAP_NORESERVE mappings may also need pages and quota allocated
1032	 * if no reserve mapping overlaps.
 
1033	 */
1034	chg = vma_needs_reservation(h, vma, addr);
1035	if (chg < 0)
1036		return ERR_PTR(-VM_FAULT_OOM);
1037	if (chg)
1038		if (hugetlb_get_quota(inode->i_mapping, chg))
1039			return ERR_PTR(-VM_FAULT_SIGBUS);
1040
 
 
 
 
 
 
1041	spin_lock(&hugetlb_lock);
1042	page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve);
1043	spin_unlock(&hugetlb_lock);
1044
1045	if (!page) {
 
1046		page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
1047		if (!page) {
1048			hugetlb_put_quota(inode->i_mapping, chg);
1049			return ERR_PTR(-VM_FAULT_SIGBUS);
 
 
 
 
1050		}
 
 
 
1051	}
 
 
1052
1053	set_page_private(page, (unsigned long) mapping);
1054
1055	vma_commit_reservation(h, vma, addr);
 
 
1056
 
 
 
 
 
 
 
 
 
 
 
1057	return page;
1058}
1059
1060int __weak alloc_bootmem_huge_page(struct hstate *h)
1061{
1062	struct huge_bootmem_page *m;
1063	int nr_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
1064
1065	while (nr_nodes) {
1066		void *addr;
1067
1068		addr = __alloc_bootmem_node_nopanic(
1069				NODE_DATA(hstate_next_node_to_alloc(h,
1070						&node_states[N_HIGH_MEMORY])),
1071				huge_page_size(h), huge_page_size(h), 0);
1072
1073		if (addr) {
1074			/*
1075			 * Use the beginning of the huge page to store the
1076			 * huge_bootmem_page struct (until gather_bootmem
1077			 * puts them into the mem_map).
1078			 */
1079			m = addr;
1080			goto found;
1081		}
1082		nr_nodes--;
1083	}
1084	return 0;
1085
1086found:
1087	BUG_ON((unsigned long)virt_to_phys(m) & (huge_page_size(h) - 1));
1088	/* Put them into a private list first because mem_map is not up yet */
1089	list_add(&m->list, &huge_boot_pages);
1090	m->hstate = h;
1091	return 1;
1092}
1093
1094static void prep_compound_huge_page(struct page *page, int order)
1095{
1096	if (unlikely(order > (MAX_ORDER - 1)))
1097		prep_compound_gigantic_page(page, order);
1098	else
1099		prep_compound_page(page, order);
1100}
1101
1102/* Put bootmem huge pages into the standard lists after mem_map is up */
1103static void __init gather_bootmem_prealloc(void)
1104{
1105	struct huge_bootmem_page *m;
1106
1107	list_for_each_entry(m, &huge_boot_pages, list) {
1108		struct hstate *h = m->hstate;
1109		struct page *page;
1110
1111#ifdef CONFIG_HIGHMEM
1112		page = pfn_to_page(m->phys >> PAGE_SHIFT);
1113		free_bootmem_late((unsigned long)m,
1114				  sizeof(struct huge_bootmem_page));
1115#else
1116		page = virt_to_page(m);
1117#endif
1118		__ClearPageReserved(page);
1119		WARN_ON(page_count(page) != 1);
1120		prep_compound_huge_page(page, h->order);
 
1121		prep_new_huge_page(h, page, page_to_nid(page));
1122		/*
1123		 * If we had gigantic hugepages allocated at boot time, we need
1124		 * to restore the 'stolen' pages to totalram_pages in order to
1125		 * fix confusing memory reports from free(1) and another
1126		 * side-effects, like CommitLimit going negative.
1127		 */
1128		if (h->order > (MAX_ORDER - 1))
1129			totalram_pages += 1 << h->order;
1130	}
1131}
1132
1133static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
1134{
1135	unsigned long i;
1136
1137	for (i = 0; i < h->max_huge_pages; ++i) {
1138		if (h->order >= MAX_ORDER) {
1139			if (!alloc_bootmem_huge_page(h))
1140				break;
1141		} else if (!alloc_fresh_huge_page(h,
1142					 &node_states[N_HIGH_MEMORY]))
1143			break;
1144	}
1145	h->max_huge_pages = i;
1146}
1147
1148static void __init hugetlb_init_hstates(void)
1149{
1150	struct hstate *h;
1151
1152	for_each_hstate(h) {
1153		/* oversize hugepages were init'ed in early boot */
1154		if (h->order < MAX_ORDER)
1155			hugetlb_hstate_alloc_pages(h);
1156	}
1157}
1158
1159static char * __init memfmt(char *buf, unsigned long n)
1160{
1161	if (n >= (1UL << 30))
1162		sprintf(buf, "%lu GB", n >> 30);
1163	else if (n >= (1UL << 20))
1164		sprintf(buf, "%lu MB", n >> 20);
1165	else
1166		sprintf(buf, "%lu KB", n >> 10);
1167	return buf;
1168}
1169
1170static void __init report_hugepages(void)
1171{
1172	struct hstate *h;
1173
1174	for_each_hstate(h) {
1175		char buf[32];
1176		printk(KERN_INFO "HugeTLB registered %s page size, "
1177				 "pre-allocated %ld pages\n",
1178			memfmt(buf, huge_page_size(h)),
1179			h->free_huge_pages);
1180	}
1181}
1182
1183#ifdef CONFIG_HIGHMEM
1184static void try_to_free_low(struct hstate *h, unsigned long count,
1185						nodemask_t *nodes_allowed)
1186{
1187	int i;
1188
1189	if (h->order >= MAX_ORDER)
1190		return;
1191
1192	for_each_node_mask(i, *nodes_allowed) {
1193		struct page *page, *next;
1194		struct list_head *freel = &h->hugepage_freelists[i];
1195		list_for_each_entry_safe(page, next, freel, lru) {
1196			if (count >= h->nr_huge_pages)
1197				return;
1198			if (PageHighMem(page))
1199				continue;
1200			list_del(&page->lru);
1201			update_and_free_page(h, page);
1202			h->free_huge_pages--;
1203			h->free_huge_pages_node[page_to_nid(page)]--;
1204		}
1205	}
1206}
1207#else
1208static inline void try_to_free_low(struct hstate *h, unsigned long count,
1209						nodemask_t *nodes_allowed)
1210{
1211}
1212#endif
1213
1214/*
1215 * Increment or decrement surplus_huge_pages.  Keep node-specific counters
1216 * balanced by operating on them in a round-robin fashion.
1217 * Returns 1 if an adjustment was made.
1218 */
1219static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
1220				int delta)
1221{
1222	int start_nid, next_nid;
1223	int ret = 0;
1224
1225	VM_BUG_ON(delta != -1 && delta != 1);
1226
1227	if (delta < 0)
1228		start_nid = hstate_next_node_to_alloc(h, nodes_allowed);
1229	else
1230		start_nid = hstate_next_node_to_free(h, nodes_allowed);
1231	next_nid = start_nid;
1232
1233	do {
1234		int nid = next_nid;
1235		if (delta < 0)  {
1236			/*
1237			 * To shrink on this node, there must be a surplus page
1238			 */
1239			if (!h->surplus_huge_pages_node[nid]) {
1240				next_nid = hstate_next_node_to_alloc(h,
1241								nodes_allowed);
1242				continue;
1243			}
1244		}
1245		if (delta > 0) {
1246			/*
1247			 * Surplus cannot exceed the total number of pages
1248			 */
1249			if (h->surplus_huge_pages_node[nid] >=
1250						h->nr_huge_pages_node[nid]) {
1251				next_nid = hstate_next_node_to_free(h,
1252								nodes_allowed);
1253				continue;
1254			}
1255		}
 
 
1256
1257		h->surplus_huge_pages += delta;
1258		h->surplus_huge_pages_node[nid] += delta;
1259		ret = 1;
1260		break;
1261	} while (next_nid != start_nid);
1262
1263	return ret;
1264}
1265
1266#define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
1267static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
1268						nodemask_t *nodes_allowed)
1269{
1270	unsigned long min_count, ret;
1271
1272	if (h->order >= MAX_ORDER)
1273		return h->max_huge_pages;
1274
1275	/*
1276	 * Increase the pool size
1277	 * First take pages out of surplus state.  Then make up the
1278	 * remaining difference by allocating fresh huge pages.
1279	 *
1280	 * We might race with alloc_buddy_huge_page() here and be unable
1281	 * to convert a surplus huge page to a normal huge page. That is
1282	 * not critical, though, it just means the overall size of the
1283	 * pool might be one hugepage larger than it needs to be, but
1284	 * within all the constraints specified by the sysctls.
1285	 */
1286	spin_lock(&hugetlb_lock);
1287	while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
1288		if (!adjust_pool_surplus(h, nodes_allowed, -1))
1289			break;
1290	}
1291
1292	while (count > persistent_huge_pages(h)) {
1293		/*
1294		 * If this allocation races such that we no longer need the
1295		 * page, free_huge_page will handle it by freeing the page
1296		 * and reducing the surplus.
1297		 */
1298		spin_unlock(&hugetlb_lock);
1299		ret = alloc_fresh_huge_page(h, nodes_allowed);
1300		spin_lock(&hugetlb_lock);
1301		if (!ret)
1302			goto out;
1303
1304		/* Bail for signals. Probably ctrl-c from user */
1305		if (signal_pending(current))
1306			goto out;
1307	}
1308
1309	/*
1310	 * Decrease the pool size
1311	 * First return free pages to the buddy allocator (being careful
1312	 * to keep enough around to satisfy reservations).  Then place
1313	 * pages into surplus state as needed so the pool will shrink
1314	 * to the desired size as pages become free.
1315	 *
1316	 * By placing pages into the surplus state independent of the
1317	 * overcommit value, we are allowing the surplus pool size to
1318	 * exceed overcommit. There are few sane options here. Since
1319	 * alloc_buddy_huge_page() is checking the global counter,
1320	 * though, we'll note that we're not allowed to exceed surplus
1321	 * and won't grow the pool anywhere else. Not until one of the
1322	 * sysctls are changed, or the surplus pages go out of use.
1323	 */
1324	min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
1325	min_count = max(count, min_count);
1326	try_to_free_low(h, min_count, nodes_allowed);
1327	while (min_count < persistent_huge_pages(h)) {
1328		if (!free_pool_huge_page(h, nodes_allowed, 0))
1329			break;
 
1330	}
1331	while (count < persistent_huge_pages(h)) {
1332		if (!adjust_pool_surplus(h, nodes_allowed, 1))
1333			break;
1334	}
1335out:
1336	ret = persistent_huge_pages(h);
1337	spin_unlock(&hugetlb_lock);
1338	return ret;
1339}
1340
1341#define HSTATE_ATTR_RO(_name) \
1342	static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
1343
1344#define HSTATE_ATTR(_name) \
1345	static struct kobj_attribute _name##_attr = \
1346		__ATTR(_name, 0644, _name##_show, _name##_store)
1347
1348static struct kobject *hugepages_kobj;
1349static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
1350
1351static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
1352
1353static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
1354{
1355	int i;
1356
1357	for (i = 0; i < HUGE_MAX_HSTATE; i++)
1358		if (hstate_kobjs[i] == kobj) {
1359			if (nidp)
1360				*nidp = NUMA_NO_NODE;
1361			return &hstates[i];
1362		}
1363
1364	return kobj_to_node_hstate(kobj, nidp);
1365}
1366
1367static ssize_t nr_hugepages_show_common(struct kobject *kobj,
1368					struct kobj_attribute *attr, char *buf)
1369{
1370	struct hstate *h;
1371	unsigned long nr_huge_pages;
1372	int nid;
1373
1374	h = kobj_to_hstate(kobj, &nid);
1375	if (nid == NUMA_NO_NODE)
1376		nr_huge_pages = h->nr_huge_pages;
1377	else
1378		nr_huge_pages = h->nr_huge_pages_node[nid];
1379
1380	return sprintf(buf, "%lu\n", nr_huge_pages);
1381}
1382
1383static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
1384			struct kobject *kobj, struct kobj_attribute *attr,
1385			const char *buf, size_t len)
1386{
1387	int err;
1388	int nid;
1389	unsigned long count;
1390	struct hstate *h;
1391	NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY);
1392
1393	err = strict_strtoul(buf, 10, &count);
1394	if (err)
1395		goto out;
1396
1397	h = kobj_to_hstate(kobj, &nid);
1398	if (h->order >= MAX_ORDER) {
1399		err = -EINVAL;
1400		goto out;
1401	}
1402
1403	if (nid == NUMA_NO_NODE) {
1404		/*
1405		 * global hstate attribute
1406		 */
1407		if (!(obey_mempolicy &&
1408				init_nodemask_of_mempolicy(nodes_allowed))) {
1409			NODEMASK_FREE(nodes_allowed);
1410			nodes_allowed = &node_states[N_HIGH_MEMORY];
1411		}
1412	} else if (nodes_allowed) {
1413		/*
1414		 * per node hstate attribute: adjust count to global,
1415		 * but restrict alloc/free to the specified node.
1416		 */
1417		count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
1418		init_nodemask_of_node(nodes_allowed, nid);
1419	} else
1420		nodes_allowed = &node_states[N_HIGH_MEMORY];
1421
1422	h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed);
1423
1424	if (nodes_allowed != &node_states[N_HIGH_MEMORY])
1425		NODEMASK_FREE(nodes_allowed);
1426
1427	return len;
1428out:
1429	NODEMASK_FREE(nodes_allowed);
1430	return err;
1431}
1432
1433static ssize_t nr_hugepages_show(struct kobject *kobj,
1434				       struct kobj_attribute *attr, char *buf)
1435{
1436	return nr_hugepages_show_common(kobj, attr, buf);
1437}
1438
1439static ssize_t nr_hugepages_store(struct kobject *kobj,
1440	       struct kobj_attribute *attr, const char *buf, size_t len)
1441{
1442	return nr_hugepages_store_common(false, kobj, attr, buf, len);
1443}
1444HSTATE_ATTR(nr_hugepages);
1445
1446#ifdef CONFIG_NUMA
1447
1448/*
1449 * hstate attribute for optionally mempolicy-based constraint on persistent
1450 * huge page alloc/free.
1451 */
1452static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
1453				       struct kobj_attribute *attr, char *buf)
1454{
1455	return nr_hugepages_show_common(kobj, attr, buf);
1456}
1457
1458static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
1459	       struct kobj_attribute *attr, const char *buf, size_t len)
1460{
1461	return nr_hugepages_store_common(true, kobj, attr, buf, len);
1462}
1463HSTATE_ATTR(nr_hugepages_mempolicy);
1464#endif
1465
1466
1467static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
1468					struct kobj_attribute *attr, char *buf)
1469{
1470	struct hstate *h = kobj_to_hstate(kobj, NULL);
1471	return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
1472}
1473
1474static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
1475		struct kobj_attribute *attr, const char *buf, size_t count)
1476{
1477	int err;
1478	unsigned long input;
1479	struct hstate *h = kobj_to_hstate(kobj, NULL);
1480
1481	if (h->order >= MAX_ORDER)
1482		return -EINVAL;
1483
1484	err = strict_strtoul(buf, 10, &input);
1485	if (err)
1486		return err;
1487
1488	spin_lock(&hugetlb_lock);
1489	h->nr_overcommit_huge_pages = input;
1490	spin_unlock(&hugetlb_lock);
1491
1492	return count;
1493}
1494HSTATE_ATTR(nr_overcommit_hugepages);
1495
1496static ssize_t free_hugepages_show(struct kobject *kobj,
1497					struct kobj_attribute *attr, char *buf)
1498{
1499	struct hstate *h;
1500	unsigned long free_huge_pages;
1501	int nid;
1502
1503	h = kobj_to_hstate(kobj, &nid);
1504	if (nid == NUMA_NO_NODE)
1505		free_huge_pages = h->free_huge_pages;
1506	else
1507		free_huge_pages = h->free_huge_pages_node[nid];
1508
1509	return sprintf(buf, "%lu\n", free_huge_pages);
1510}
1511HSTATE_ATTR_RO(free_hugepages);
1512
1513static ssize_t resv_hugepages_show(struct kobject *kobj,
1514					struct kobj_attribute *attr, char *buf)
1515{
1516	struct hstate *h = kobj_to_hstate(kobj, NULL);
1517	return sprintf(buf, "%lu\n", h->resv_huge_pages);
1518}
1519HSTATE_ATTR_RO(resv_hugepages);
1520
1521static ssize_t surplus_hugepages_show(struct kobject *kobj,
1522					struct kobj_attribute *attr, char *buf)
1523{
1524	struct hstate *h;
1525	unsigned long surplus_huge_pages;
1526	int nid;
1527
1528	h = kobj_to_hstate(kobj, &nid);
1529	if (nid == NUMA_NO_NODE)
1530		surplus_huge_pages = h->surplus_huge_pages;
1531	else
1532		surplus_huge_pages = h->surplus_huge_pages_node[nid];
1533
1534	return sprintf(buf, "%lu\n", surplus_huge_pages);
1535}
1536HSTATE_ATTR_RO(surplus_hugepages);
1537
1538static struct attribute *hstate_attrs[] = {
1539	&nr_hugepages_attr.attr,
1540	&nr_overcommit_hugepages_attr.attr,
1541	&free_hugepages_attr.attr,
1542	&resv_hugepages_attr.attr,
1543	&surplus_hugepages_attr.attr,
1544#ifdef CONFIG_NUMA
1545	&nr_hugepages_mempolicy_attr.attr,
1546#endif
1547	NULL,
1548};
1549
1550static struct attribute_group hstate_attr_group = {
1551	.attrs = hstate_attrs,
1552};
1553
1554static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
1555				    struct kobject **hstate_kobjs,
1556				    struct attribute_group *hstate_attr_group)
1557{
1558	int retval;
1559	int hi = h - hstates;
1560
1561	hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
1562	if (!hstate_kobjs[hi])
1563		return -ENOMEM;
1564
1565	retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
1566	if (retval)
1567		kobject_put(hstate_kobjs[hi]);
1568
1569	return retval;
1570}
1571
1572static void __init hugetlb_sysfs_init(void)
1573{
1574	struct hstate *h;
1575	int err;
1576
1577	hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
1578	if (!hugepages_kobj)
1579		return;
1580
1581	for_each_hstate(h) {
1582		err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
1583					 hstate_kobjs, &hstate_attr_group);
1584		if (err)
1585			printk(KERN_ERR "Hugetlb: Unable to add hstate %s",
1586								h->name);
1587	}
1588}
1589
1590#ifdef CONFIG_NUMA
1591
1592/*
1593 * node_hstate/s - associate per node hstate attributes, via their kobjects,
1594 * with node sysdevs in node_devices[] using a parallel array.  The array
1595 * index of a node sysdev or _hstate == node id.
1596 * This is here to avoid any static dependency of the node sysdev driver, in
1597 * the base kernel, on the hugetlb module.
1598 */
1599struct node_hstate {
1600	struct kobject		*hugepages_kobj;
1601	struct kobject		*hstate_kobjs[HUGE_MAX_HSTATE];
1602};
1603struct node_hstate node_hstates[MAX_NUMNODES];
1604
1605/*
1606 * A subset of global hstate attributes for node sysdevs
1607 */
1608static struct attribute *per_node_hstate_attrs[] = {
1609	&nr_hugepages_attr.attr,
1610	&free_hugepages_attr.attr,
1611	&surplus_hugepages_attr.attr,
1612	NULL,
1613};
1614
1615static struct attribute_group per_node_hstate_attr_group = {
1616	.attrs = per_node_hstate_attrs,
1617};
1618
1619/*
1620 * kobj_to_node_hstate - lookup global hstate for node sysdev hstate attr kobj.
1621 * Returns node id via non-NULL nidp.
1622 */
1623static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
1624{
1625	int nid;
1626
1627	for (nid = 0; nid < nr_node_ids; nid++) {
1628		struct node_hstate *nhs = &node_hstates[nid];
1629		int i;
1630		for (i = 0; i < HUGE_MAX_HSTATE; i++)
1631			if (nhs->hstate_kobjs[i] == kobj) {
1632				if (nidp)
1633					*nidp = nid;
1634				return &hstates[i];
1635			}
1636	}
1637
1638	BUG();
1639	return NULL;
1640}
1641
1642/*
1643 * Unregister hstate attributes from a single node sysdev.
1644 * No-op if no hstate attributes attached.
1645 */
1646void hugetlb_unregister_node(struct node *node)
1647{
1648	struct hstate *h;
1649	struct node_hstate *nhs = &node_hstates[node->sysdev.id];
1650
1651	if (!nhs->hugepages_kobj)
1652		return;		/* no hstate attributes */
1653
1654	for_each_hstate(h)
1655		if (nhs->hstate_kobjs[h - hstates]) {
1656			kobject_put(nhs->hstate_kobjs[h - hstates]);
1657			nhs->hstate_kobjs[h - hstates] = NULL;
 
1658		}
 
1659
1660	kobject_put(nhs->hugepages_kobj);
1661	nhs->hugepages_kobj = NULL;
1662}
1663
1664/*
1665 * hugetlb module exit:  unregister hstate attributes from node sysdevs
1666 * that have them.
1667 */
1668static void hugetlb_unregister_all_nodes(void)
1669{
1670	int nid;
1671
1672	/*
1673	 * disable node sysdev registrations.
1674	 */
1675	register_hugetlbfs_with_node(NULL, NULL);
1676
1677	/*
1678	 * remove hstate attributes from any nodes that have them.
1679	 */
1680	for (nid = 0; nid < nr_node_ids; nid++)
1681		hugetlb_unregister_node(&node_devices[nid]);
1682}
1683
1684/*
1685 * Register hstate attributes for a single node sysdev.
1686 * No-op if attributes already registered.
1687 */
1688void hugetlb_register_node(struct node *node)
1689{
1690	struct hstate *h;
1691	struct node_hstate *nhs = &node_hstates[node->sysdev.id];
1692	int err;
1693
1694	if (nhs->hugepages_kobj)
1695		return;		/* already allocated */
1696
1697	nhs->hugepages_kobj = kobject_create_and_add("hugepages",
1698							&node->sysdev.kobj);
1699	if (!nhs->hugepages_kobj)
1700		return;
1701
1702	for_each_hstate(h) {
1703		err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
1704						nhs->hstate_kobjs,
1705						&per_node_hstate_attr_group);
1706		if (err) {
1707			printk(KERN_ERR "Hugetlb: Unable to add hstate %s"
1708					" for node %d\n",
1709						h->name, node->sysdev.id);
1710			hugetlb_unregister_node(node);
1711			break;
1712		}
1713	}
1714}
1715
1716/*
1717 * hugetlb init time:  register hstate attributes for all registered node
1718 * sysdevs of nodes that have memory.  All on-line nodes should have
1719 * registered their associated sysdev by this time.
1720 */
1721static void hugetlb_register_all_nodes(void)
1722{
1723	int nid;
1724
1725	for_each_node_state(nid, N_HIGH_MEMORY) {
1726		struct node *node = &node_devices[nid];
1727		if (node->sysdev.id == nid)
1728			hugetlb_register_node(node);
1729	}
1730
1731	/*
1732	 * Let the node sysdev driver know we're here so it can
1733	 * [un]register hstate attributes on node hotplug.
1734	 */
1735	register_hugetlbfs_with_node(hugetlb_register_node,
1736				     hugetlb_unregister_node);
1737}
1738#else	/* !CONFIG_NUMA */
1739
1740static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
1741{
1742	BUG();
1743	if (nidp)
1744		*nidp = -1;
1745	return NULL;
1746}
1747
1748static void hugetlb_unregister_all_nodes(void) { }
1749
1750static void hugetlb_register_all_nodes(void) { }
1751
1752#endif
1753
1754static void __exit hugetlb_exit(void)
1755{
1756	struct hstate *h;
1757
1758	hugetlb_unregister_all_nodes();
1759
1760	for_each_hstate(h) {
1761		kobject_put(hstate_kobjs[h - hstates]);
1762	}
1763
1764	kobject_put(hugepages_kobj);
 
1765}
1766module_exit(hugetlb_exit);
1767
1768static int __init hugetlb_init(void)
1769{
1770	/* Some platform decide whether they support huge pages at boot
1771	 * time. On these, such as powerpc, HPAGE_SHIFT is set to 0 when
1772	 * there is no such support
1773	 */
1774	if (HPAGE_SHIFT == 0)
1775		return 0;
1776
1777	if (!size_to_hstate(default_hstate_size)) {
1778		default_hstate_size = HPAGE_SIZE;
1779		if (!size_to_hstate(default_hstate_size))
1780			hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
1781	}
1782	default_hstate_idx = size_to_hstate(default_hstate_size) - hstates;
1783	if (default_hstate_max_huge_pages)
1784		default_hstate.max_huge_pages = default_hstate_max_huge_pages;
1785
1786	hugetlb_init_hstates();
1787
1788	gather_bootmem_prealloc();
1789
1790	report_hugepages();
1791
1792	hugetlb_sysfs_init();
 
 
1793
1794	hugetlb_register_all_nodes();
 
 
 
 
 
 
 
1795
 
 
1796	return 0;
1797}
1798module_init(hugetlb_init);
1799
1800/* Should be called on processing a hugepagesz=... option */
1801void __init hugetlb_add_hstate(unsigned order)
1802{
1803	struct hstate *h;
1804	unsigned long i;
1805
1806	if (size_to_hstate(PAGE_SIZE << order)) {
1807		printk(KERN_WARNING "hugepagesz= specified twice, ignoring\n");
1808		return;
1809	}
1810	BUG_ON(max_hstate >= HUGE_MAX_HSTATE);
1811	BUG_ON(order == 0);
1812	h = &hstates[max_hstate++];
1813	h->order = order;
1814	h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
1815	h->nr_huge_pages = 0;
1816	h->free_huge_pages = 0;
1817	for (i = 0; i < MAX_NUMNODES; ++i)
1818		INIT_LIST_HEAD(&h->hugepage_freelists[i]);
1819	h->next_nid_to_alloc = first_node(node_states[N_HIGH_MEMORY]);
1820	h->next_nid_to_free = first_node(node_states[N_HIGH_MEMORY]);
 
1821	snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
1822					huge_page_size(h)/1024);
1823
1824	parsed_hstate = h;
1825}
1826
1827static int __init hugetlb_nrpages_setup(char *s)
1828{
1829	unsigned long *mhp;
1830	static unsigned long *last_mhp;
1831
1832	/*
1833	 * !max_hstate means we haven't parsed a hugepagesz= parameter yet,
1834	 * so this hugepages= parameter goes to the "default hstate".
1835	 */
1836	if (!max_hstate)
1837		mhp = &default_hstate_max_huge_pages;
1838	else
1839		mhp = &parsed_hstate->max_huge_pages;
1840
1841	if (mhp == last_mhp) {
1842		printk(KERN_WARNING "hugepages= specified twice without "
1843			"interleaving hugepagesz=, ignoring\n");
1844		return 1;
1845	}
1846
1847	if (sscanf(s, "%lu", mhp) <= 0)
1848		*mhp = 0;
1849
1850	/*
1851	 * Global state is always initialized later in hugetlb_init.
1852	 * But we need to allocate >= MAX_ORDER hstates here early to still
1853	 * use the bootmem allocator.
1854	 */
1855	if (max_hstate && parsed_hstate->order >= MAX_ORDER)
1856		hugetlb_hstate_alloc_pages(parsed_hstate);
1857
1858	last_mhp = mhp;
1859
1860	return 1;
1861}
1862__setup("hugepages=", hugetlb_nrpages_setup);
1863
1864static int __init hugetlb_default_setup(char *s)
1865{
1866	default_hstate_size = memparse(s, &s);
1867	return 1;
1868}
1869__setup("default_hugepagesz=", hugetlb_default_setup);
1870
1871static unsigned int cpuset_mems_nr(unsigned int *array)
1872{
1873	int node;
1874	unsigned int nr = 0;
1875
1876	for_each_node_mask(node, cpuset_current_mems_allowed)
1877		nr += array[node];
1878
1879	return nr;
1880}
1881
1882#ifdef CONFIG_SYSCTL
1883static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
1884			 struct ctl_table *table, int write,
1885			 void __user *buffer, size_t *length, loff_t *ppos)
1886{
1887	struct hstate *h = &default_hstate;
1888	unsigned long tmp;
1889	int ret;
1890
 
 
 
1891	tmp = h->max_huge_pages;
1892
1893	if (write && h->order >= MAX_ORDER)
1894		return -EINVAL;
1895
1896	table->data = &tmp;
1897	table->maxlen = sizeof(unsigned long);
1898	ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
1899	if (ret)
1900		goto out;
1901
1902	if (write) {
1903		NODEMASK_ALLOC(nodemask_t, nodes_allowed,
1904						GFP_KERNEL | __GFP_NORETRY);
1905		if (!(obey_mempolicy &&
1906			       init_nodemask_of_mempolicy(nodes_allowed))) {
1907			NODEMASK_FREE(nodes_allowed);
1908			nodes_allowed = &node_states[N_HIGH_MEMORY];
1909		}
1910		h->max_huge_pages = set_max_huge_pages(h, tmp, nodes_allowed);
1911
1912		if (nodes_allowed != &node_states[N_HIGH_MEMORY])
1913			NODEMASK_FREE(nodes_allowed);
1914	}
1915out:
1916	return ret;
1917}
1918
1919int hugetlb_sysctl_handler(struct ctl_table *table, int write,
1920			  void __user *buffer, size_t *length, loff_t *ppos)
1921{
1922
1923	return hugetlb_sysctl_handler_common(false, table, write,
1924							buffer, length, ppos);
1925}
1926
1927#ifdef CONFIG_NUMA
1928int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
1929			  void __user *buffer, size_t *length, loff_t *ppos)
1930{
1931	return hugetlb_sysctl_handler_common(true, table, write,
1932							buffer, length, ppos);
1933}
1934#endif /* CONFIG_NUMA */
1935
1936int hugetlb_treat_movable_handler(struct ctl_table *table, int write,
1937			void __user *buffer,
1938			size_t *length, loff_t *ppos)
1939{
1940	proc_dointvec(table, write, buffer, length, ppos);
1941	if (hugepages_treat_as_movable)
1942		htlb_alloc_mask = GFP_HIGHUSER_MOVABLE;
1943	else
1944		htlb_alloc_mask = GFP_HIGHUSER;
1945	return 0;
1946}
1947
1948int hugetlb_overcommit_handler(struct ctl_table *table, int write,
1949			void __user *buffer,
1950			size_t *length, loff_t *ppos)
1951{
1952	struct hstate *h = &default_hstate;
1953	unsigned long tmp;
1954	int ret;
1955
 
 
 
1956	tmp = h->nr_overcommit_huge_pages;
1957
1958	if (write && h->order >= MAX_ORDER)
1959		return -EINVAL;
1960
1961	table->data = &tmp;
1962	table->maxlen = sizeof(unsigned long);
1963	ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
1964	if (ret)
1965		goto out;
1966
1967	if (write) {
1968		spin_lock(&hugetlb_lock);
1969		h->nr_overcommit_huge_pages = tmp;
1970		spin_unlock(&hugetlb_lock);
1971	}
1972out:
1973	return ret;
1974}
1975
1976#endif /* CONFIG_SYSCTL */
1977
1978void hugetlb_report_meminfo(struct seq_file *m)
1979{
1980	struct hstate *h = &default_hstate;
 
 
1981	seq_printf(m,
1982			"HugePages_Total:   %5lu\n"
1983			"HugePages_Free:    %5lu\n"
1984			"HugePages_Rsvd:    %5lu\n"
1985			"HugePages_Surp:    %5lu\n"
1986			"Hugepagesize:   %8lu kB\n",
1987			h->nr_huge_pages,
1988			h->free_huge_pages,
1989			h->resv_huge_pages,
1990			h->surplus_huge_pages,
1991			1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
1992}
1993
1994int hugetlb_report_node_meminfo(int nid, char *buf)
1995{
1996	struct hstate *h = &default_hstate;
 
 
1997	return sprintf(buf,
1998		"Node %d HugePages_Total: %5u\n"
1999		"Node %d HugePages_Free:  %5u\n"
2000		"Node %d HugePages_Surp:  %5u\n",
2001		nid, h->nr_huge_pages_node[nid],
2002		nid, h->free_huge_pages_node[nid],
2003		nid, h->surplus_huge_pages_node[nid]);
2004}
2005
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2006/* Return the number pages of memory we physically have, in PAGE_SIZE units. */
2007unsigned long hugetlb_total_pages(void)
2008{
2009	struct hstate *h = &default_hstate;
2010	return h->nr_huge_pages * pages_per_huge_page(h);
 
 
 
 
2011}
2012
2013static int hugetlb_acct_memory(struct hstate *h, long delta)
2014{
2015	int ret = -ENOMEM;
2016
2017	spin_lock(&hugetlb_lock);
2018	/*
2019	 * When cpuset is configured, it breaks the strict hugetlb page
2020	 * reservation as the accounting is done on a global variable. Such
2021	 * reservation is completely rubbish in the presence of cpuset because
2022	 * the reservation is not checked against page availability for the
2023	 * current cpuset. Application can still potentially OOM'ed by kernel
2024	 * with lack of free htlb page in cpuset that the task is in.
2025	 * Attempt to enforce strict accounting with cpuset is almost
2026	 * impossible (or too ugly) because cpuset is too fluid that
2027	 * task or memory node can be dynamically moved between cpusets.
2028	 *
2029	 * The change of semantics for shared hugetlb mapping with cpuset is
2030	 * undesirable. However, in order to preserve some of the semantics,
2031	 * we fall back to check against current free page availability as
2032	 * a best attempt and hopefully to minimize the impact of changing
2033	 * semantics that cpuset has.
2034	 */
2035	if (delta > 0) {
2036		if (gather_surplus_pages(h, delta) < 0)
2037			goto out;
2038
2039		if (delta > cpuset_mems_nr(h->free_huge_pages_node)) {
2040			return_unused_surplus_pages(h, delta);
2041			goto out;
2042		}
2043	}
2044
2045	ret = 0;
2046	if (delta < 0)
2047		return_unused_surplus_pages(h, (unsigned long) -delta);
2048
2049out:
2050	spin_unlock(&hugetlb_lock);
2051	return ret;
2052}
2053
2054static void hugetlb_vm_op_open(struct vm_area_struct *vma)
2055{
2056	struct resv_map *reservations = vma_resv_map(vma);
2057
2058	/*
2059	 * This new VMA should share its siblings reservation map if present.
2060	 * The VMA will only ever have a valid reservation map pointer where
2061	 * it is being copied for another still existing VMA.  As that VMA
2062	 * has a reference to the reservation map it cannot disappear until
2063	 * after this open call completes.  It is therefore safe to take a
2064	 * new reference here without additional locking.
2065	 */
2066	if (reservations)
2067		kref_get(&reservations->refs);
2068}
2069
2070static void hugetlb_vm_op_close(struct vm_area_struct *vma)
2071{
2072	struct hstate *h = hstate_vma(vma);
2073	struct resv_map *reservations = vma_resv_map(vma);
2074	unsigned long reserve;
2075	unsigned long start;
2076	unsigned long end;
2077
2078	if (reservations) {
2079		start = vma_hugecache_offset(h, vma, vma->vm_start);
2080		end = vma_hugecache_offset(h, vma, vma->vm_end);
2081
2082		reserve = (end - start) -
2083			region_count(&reservations->regions, start, end);
2084
2085		kref_put(&reservations->refs, resv_map_release);
2086
2087		if (reserve) {
2088			hugetlb_acct_memory(h, -reserve);
2089			hugetlb_put_quota(vma->vm_file->f_mapping, reserve);
2090		}
2091	}
2092}
2093
2094/*
2095 * We cannot handle pagefaults against hugetlb pages at all.  They cause
2096 * handle_mm_fault() to try to instantiate regular-sized pages in the
2097 * hugegpage VMA.  do_page_fault() is supposed to trap this, so BUG is we get
2098 * this far.
2099 */
2100static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2101{
2102	BUG();
2103	return 0;
2104}
2105
2106const struct vm_operations_struct hugetlb_vm_ops = {
2107	.fault = hugetlb_vm_op_fault,
2108	.open = hugetlb_vm_op_open,
2109	.close = hugetlb_vm_op_close,
2110};
2111
2112static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
2113				int writable)
2114{
2115	pte_t entry;
2116
2117	if (writable) {
2118		entry =
2119		    pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
2120	} else {
2121		entry = huge_pte_wrprotect(mk_pte(page, vma->vm_page_prot));
 
2122	}
2123	entry = pte_mkyoung(entry);
2124	entry = pte_mkhuge(entry);
 
2125
2126	return entry;
2127}
2128
2129static void set_huge_ptep_writable(struct vm_area_struct *vma,
2130				   unsigned long address, pte_t *ptep)
2131{
2132	pte_t entry;
2133
2134	entry = pte_mkwrite(pte_mkdirty(huge_ptep_get(ptep)));
2135	if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
2136		update_mmu_cache(vma, address, ptep);
2137}
2138
2139
2140int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
2141			    struct vm_area_struct *vma)
2142{
2143	pte_t *src_pte, *dst_pte, entry;
2144	struct page *ptepage;
2145	unsigned long addr;
2146	int cow;
2147	struct hstate *h = hstate_vma(vma);
2148	unsigned long sz = huge_page_size(h);
 
 
 
2149
2150	cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
2151
 
 
 
 
 
2152	for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
 
2153		src_pte = huge_pte_offset(src, addr);
2154		if (!src_pte)
2155			continue;
2156		dst_pte = huge_pte_alloc(dst, addr, sz);
2157		if (!dst_pte)
2158			goto nomem;
 
 
2159
2160		/* If the pagetables are shared don't copy or take references */
2161		if (dst_pte == src_pte)
2162			continue;
2163
2164		spin_lock(&dst->page_table_lock);
2165		spin_lock_nested(&src->page_table_lock, SINGLE_DEPTH_NESTING);
 
2166		if (!huge_pte_none(huge_ptep_get(src_pte))) {
2167			if (cow)
2168				huge_ptep_set_wrprotect(src, addr, src_pte);
2169			entry = huge_ptep_get(src_pte);
2170			ptepage = pte_page(entry);
2171			get_page(ptepage);
2172			page_dup_rmap(ptepage);
2173			set_huge_pte_at(dst, addr, dst_pte, entry);
2174		}
2175		spin_unlock(&src->page_table_lock);
2176		spin_unlock(&dst->page_table_lock);
2177	}
2178	return 0;
2179
2180nomem:
2181	return -ENOMEM;
 
 
2182}
2183
2184static int is_hugetlb_entry_migration(pte_t pte)
2185{
2186	swp_entry_t swp;
2187
2188	if (huge_pte_none(pte) || pte_present(pte))
2189		return 0;
2190	swp = pte_to_swp_entry(pte);
2191	if (non_swap_entry(swp) && is_migration_entry(swp))
2192		return 1;
2193	else
2194		return 0;
2195}
2196
2197static int is_hugetlb_entry_hwpoisoned(pte_t pte)
2198{
2199	swp_entry_t swp;
2200
2201	if (huge_pte_none(pte) || pte_present(pte))
2202		return 0;
2203	swp = pte_to_swp_entry(pte);
2204	if (non_swap_entry(swp) && is_hwpoison_entry(swp))
2205		return 1;
2206	else
2207		return 0;
2208}
2209
2210void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
2211			    unsigned long end, struct page *ref_page)
 
2212{
 
2213	struct mm_struct *mm = vma->vm_mm;
2214	unsigned long address;
2215	pte_t *ptep;
2216	pte_t pte;
 
2217	struct page *page;
2218	struct page *tmp;
2219	struct hstate *h = hstate_vma(vma);
2220	unsigned long sz = huge_page_size(h);
2221
2222	/*
2223	 * A page gathering list, protected by per file i_mmap_mutex. The
2224	 * lock is used to avoid list corruption from multiple unmapping
2225	 * of the same page since we are using page->lru.
2226	 */
2227	LIST_HEAD(page_list);
2228
2229	WARN_ON(!is_vm_hugetlb_page(vma));
2230	BUG_ON(start & ~huge_page_mask(h));
2231	BUG_ON(end & ~huge_page_mask(h));
2232
2233	mmu_notifier_invalidate_range_start(mm, start, end);
2234	spin_lock(&mm->page_table_lock);
 
2235	for (address = start; address < end; address += sz) {
2236		ptep = huge_pte_offset(mm, address);
2237		if (!ptep)
2238			continue;
2239
 
2240		if (huge_pmd_unshare(mm, &address, ptep))
2241			continue;
 
 
 
 
 
 
 
 
 
 
 
 
2242
 
2243		/*
2244		 * If a reference page is supplied, it is because a specific
2245		 * page is being unmapped, not a range. Ensure the page we
2246		 * are about to unmap is the actual page of interest.
2247		 */
2248		if (ref_page) {
2249			pte = huge_ptep_get(ptep);
2250			if (huge_pte_none(pte))
2251				continue;
2252			page = pte_page(pte);
2253			if (page != ref_page)
2254				continue;
2255
2256			/*
2257			 * Mark the VMA as having unmapped its page so that
2258			 * future faults in this VMA will fail rather than
2259			 * looking like data was lost
2260			 */
2261			set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
2262		}
2263
2264		pte = huge_ptep_get_and_clear(mm, address, ptep);
2265		if (huge_pte_none(pte))
2266			continue;
 
2267
2268		/*
2269		 * HWPoisoned hugepage is already unmapped and dropped reference
2270		 */
2271		if (unlikely(is_hugetlb_entry_hwpoisoned(pte)))
2272			continue;
2273
2274		page = pte_page(pte);
2275		if (pte_dirty(pte))
2276			set_page_dirty(page);
2277		list_add(&page->lru, &page_list);
 
 
 
2278	}
2279	spin_unlock(&mm->page_table_lock);
2280	flush_tlb_range(vma, start, end);
2281	mmu_notifier_invalidate_range_end(mm, start, end);
2282	list_for_each_entry_safe(page, tmp, &page_list, lru) {
2283		page_remove_rmap(page);
2284		list_del(&page->lru);
2285		put_page(page);
 
 
 
2286	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2287}
2288
2289void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
2290			  unsigned long end, struct page *ref_page)
2291{
2292	mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex);
2293	__unmap_hugepage_range(vma, start, end, ref_page);
2294	mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
 
 
 
 
 
2295}
2296
2297/*
2298 * This is called when the original mapper is failing to COW a MAP_PRIVATE
2299 * mappping it owns the reserve page for. The intention is to unmap the page
2300 * from other VMAs and let the children be SIGKILLed if they are faulting the
2301 * same region.
2302 */
2303static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
2304				struct page *page, unsigned long address)
2305{
2306	struct hstate *h = hstate_vma(vma);
2307	struct vm_area_struct *iter_vma;
2308	struct address_space *mapping;
2309	struct prio_tree_iter iter;
2310	pgoff_t pgoff;
2311
2312	/*
2313	 * vm_pgoff is in PAGE_SIZE units, hence the different calculation
2314	 * from page cache lookup which is in HPAGE_SIZE units.
2315	 */
2316	address = address & huge_page_mask(h);
2317	pgoff = ((address - vma->vm_start) >> PAGE_SHIFT)
2318		+ (vma->vm_pgoff >> PAGE_SHIFT);
2319	mapping = (struct address_space *)page_private(page);
2320
2321	/*
2322	 * Take the mapping lock for the duration of the table walk. As
2323	 * this mapping should be shared between all the VMAs,
2324	 * __unmap_hugepage_range() is called as the lock is already held
2325	 */
2326	mutex_lock(&mapping->i_mmap_mutex);
2327	vma_prio_tree_foreach(iter_vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
2328		/* Do not unmap the current VMA */
2329		if (iter_vma == vma)
2330			continue;
2331
2332		/*
2333		 * Unmap the page from other VMAs without their own reserves.
2334		 * They get marked to be SIGKILLed if they fault in these
2335		 * areas. This is because a future no-page fault on this VMA
2336		 * could insert a zeroed page instead of the data existing
2337		 * from the time of fork. This would look like data corruption
2338		 */
2339		if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
2340			__unmap_hugepage_range(iter_vma,
2341				address, address + huge_page_size(h),
2342				page);
2343	}
2344	mutex_unlock(&mapping->i_mmap_mutex);
2345
2346	return 1;
2347}
2348
2349/*
2350 * Hugetlb_cow() should be called with page lock of the original hugepage held.
 
 
 
2351 */
2352static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
2353			unsigned long address, pte_t *ptep, pte_t pte,
2354			struct page *pagecache_page)
2355{
2356	struct hstate *h = hstate_vma(vma);
2357	struct page *old_page, *new_page;
2358	int avoidcopy;
2359	int outside_reserve = 0;
 
 
2360
2361	old_page = pte_page(pte);
2362
2363retry_avoidcopy:
2364	/* If no-one else is actually using this page, avoid the copy
2365	 * and just make the page writable */
2366	avoidcopy = (page_mapcount(old_page) == 1);
2367	if (avoidcopy) {
2368		if (PageAnon(old_page))
2369			page_move_anon_rmap(old_page, vma, address);
2370		set_huge_ptep_writable(vma, address, ptep);
2371		return 0;
2372	}
2373
2374	/*
2375	 * If the process that created a MAP_PRIVATE mapping is about to
2376	 * perform a COW due to a shared page count, attempt to satisfy
2377	 * the allocation without using the existing reserves. The pagecache
2378	 * page is used to determine if the reserve at this address was
2379	 * consumed or not. If reserves were used, a partial faulted mapping
2380	 * at the time of fork() could consume its reserves on COW instead
2381	 * of the full address range.
2382	 */
2383	if (!(vma->vm_flags & VM_MAYSHARE) &&
2384			is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
2385			old_page != pagecache_page)
2386		outside_reserve = 1;
2387
2388	page_cache_get(old_page);
2389
2390	/* Drop page_table_lock as buddy allocator may be called */
2391	spin_unlock(&mm->page_table_lock);
2392	new_page = alloc_huge_page(vma, address, outside_reserve);
2393
2394	if (IS_ERR(new_page)) {
 
2395		page_cache_release(old_page);
2396
2397		/*
2398		 * If a process owning a MAP_PRIVATE mapping fails to COW,
2399		 * it is due to references held by a child and an insufficient
2400		 * huge page pool. To guarantee the original mappers
2401		 * reliability, unmap the page from child processes. The child
2402		 * may get SIGKILLed if it later faults.
2403		 */
2404		if (outside_reserve) {
2405			BUG_ON(huge_pte_none(pte));
2406			if (unmap_ref_private(mm, vma, old_page, address)) {
2407				BUG_ON(page_count(old_page) != 1);
2408				BUG_ON(huge_pte_none(pte));
2409				spin_lock(&mm->page_table_lock);
2410				goto retry_avoidcopy;
 
 
 
 
 
 
 
 
2411			}
2412			WARN_ON_ONCE(1);
2413		}
2414
2415		/* Caller expects lock to be held */
2416		spin_lock(&mm->page_table_lock);
2417		return -PTR_ERR(new_page);
 
 
 
2418	}
2419
2420	/*
2421	 * When the original hugepage is shared one, it does not have
2422	 * anon_vma prepared.
2423	 */
2424	if (unlikely(anon_vma_prepare(vma))) {
 
 
2425		/* Caller expects lock to be held */
2426		spin_lock(&mm->page_table_lock);
2427		return VM_FAULT_OOM;
2428	}
2429
2430	copy_user_huge_page(new_page, old_page, address, vma,
2431			    pages_per_huge_page(h));
2432	__SetPageUptodate(new_page);
2433
 
 
 
2434	/*
2435	 * Retake the page_table_lock to check for racing updates
2436	 * before the page tables are altered
2437	 */
2438	spin_lock(&mm->page_table_lock);
2439	ptep = huge_pte_offset(mm, address & huge_page_mask(h));
2440	if (likely(pte_same(huge_ptep_get(ptep), pte))) {
 
 
2441		/* Break COW */
2442		mmu_notifier_invalidate_range_start(mm,
2443			address & huge_page_mask(h),
2444			(address & huge_page_mask(h)) + huge_page_size(h));
2445		huge_ptep_clear_flush(vma, address, ptep);
2446		set_huge_pte_at(mm, address, ptep,
2447				make_huge_pte(vma, new_page, 1));
2448		page_remove_rmap(old_page);
2449		hugepage_add_new_anon_rmap(new_page, vma, address);
2450		/* Make the old page be freed below */
2451		new_page = old_page;
2452		mmu_notifier_invalidate_range_end(mm,
2453			address & huge_page_mask(h),
2454			(address & huge_page_mask(h)) + huge_page_size(h));
2455	}
 
 
2456	page_cache_release(new_page);
2457	page_cache_release(old_page);
 
 
 
2458	return 0;
2459}
2460
2461/* Return the pagecache page at a given address within a VMA */
2462static struct page *hugetlbfs_pagecache_page(struct hstate *h,
2463			struct vm_area_struct *vma, unsigned long address)
2464{
2465	struct address_space *mapping;
2466	pgoff_t idx;
2467
2468	mapping = vma->vm_file->f_mapping;
2469	idx = vma_hugecache_offset(h, vma, address);
2470
2471	return find_lock_page(mapping, idx);
2472}
2473
2474/*
2475 * Return whether there is a pagecache page to back given address within VMA.
2476 * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
2477 */
2478static bool hugetlbfs_pagecache_present(struct hstate *h,
2479			struct vm_area_struct *vma, unsigned long address)
2480{
2481	struct address_space *mapping;
2482	pgoff_t idx;
2483	struct page *page;
2484
2485	mapping = vma->vm_file->f_mapping;
2486	idx = vma_hugecache_offset(h, vma, address);
2487
2488	page = find_get_page(mapping, idx);
2489	if (page)
2490		put_page(page);
2491	return page != NULL;
2492}
2493
2494static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
2495			unsigned long address, pte_t *ptep, unsigned int flags)
 
2496{
2497	struct hstate *h = hstate_vma(vma);
2498	int ret = VM_FAULT_SIGBUS;
2499	pgoff_t idx;
2500	unsigned long size;
2501	struct page *page;
2502	struct address_space *mapping;
2503	pte_t new_pte;
 
2504
2505	/*
2506	 * Currently, we are forced to kill the process in the event the
2507	 * original mapper has unmapped pages from the child due to a failed
2508	 * COW. Warn that such a situation has occurred as it may not be obvious
2509	 */
2510	if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
2511		printk(KERN_WARNING
2512			"PID %d killed due to inadequate hugepage pool\n",
2513			current->pid);
2514		return ret;
2515	}
2516
2517	mapping = vma->vm_file->f_mapping;
2518	idx = vma_hugecache_offset(h, vma, address);
2519
2520	/*
2521	 * Use page lock to guard against racing truncation
2522	 * before we get page_table_lock.
2523	 */
2524retry:
2525	page = find_lock_page(mapping, idx);
2526	if (!page) {
2527		size = i_size_read(mapping->host) >> huge_page_shift(h);
2528		if (idx >= size)
2529			goto out;
2530		page = alloc_huge_page(vma, address, 0);
2531		if (IS_ERR(page)) {
2532			ret = -PTR_ERR(page);
 
 
 
 
2533			goto out;
2534		}
2535		clear_huge_page(page, address, pages_per_huge_page(h));
2536		__SetPageUptodate(page);
2537
2538		if (vma->vm_flags & VM_MAYSHARE) {
2539			int err;
2540			struct inode *inode = mapping->host;
2541
2542			err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
2543			if (err) {
2544				put_page(page);
2545				if (err == -EEXIST)
2546					goto retry;
2547				goto out;
2548			}
 
2549
2550			spin_lock(&inode->i_lock);
2551			inode->i_blocks += blocks_per_huge_page(h);
2552			spin_unlock(&inode->i_lock);
2553			page_dup_rmap(page);
2554		} else {
2555			lock_page(page);
2556			if (unlikely(anon_vma_prepare(vma))) {
2557				ret = VM_FAULT_OOM;
2558				goto backout_unlocked;
2559			}
2560			hugepage_add_new_anon_rmap(page, vma, address);
2561		}
2562	} else {
2563		/*
2564		 * If memory error occurs between mmap() and fault, some process
2565		 * don't have hwpoisoned swap entry for errored virtual address.
2566		 * So we need to block hugepage fault by PG_hwpoison bit check.
2567		 */
2568		if (unlikely(PageHWPoison(page))) {
2569			ret = VM_FAULT_HWPOISON |
2570			      VM_FAULT_SET_HINDEX(h - hstates);
2571			goto backout_unlocked;
2572		}
2573		page_dup_rmap(page);
2574	}
2575
2576	/*
2577	 * If we are going to COW a private mapping later, we examine the
2578	 * pending reservations for this page now. This will ensure that
2579	 * any allocations necessary to record that reservation occur outside
2580	 * the spinlock.
2581	 */
2582	if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED))
2583		if (vma_needs_reservation(h, vma, address) < 0) {
2584			ret = VM_FAULT_OOM;
2585			goto backout_unlocked;
2586		}
2587
2588	spin_lock(&mm->page_table_lock);
 
2589	size = i_size_read(mapping->host) >> huge_page_shift(h);
2590	if (idx >= size)
2591		goto backout;
2592
2593	ret = 0;
2594	if (!huge_pte_none(huge_ptep_get(ptep)))
2595		goto backout;
2596
 
 
 
 
 
2597	new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
2598				&& (vma->vm_flags & VM_SHARED)));
2599	set_huge_pte_at(mm, address, ptep, new_pte);
2600
2601	if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
2602		/* Optimization, do the COW without a second fault */
2603		ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
2604	}
2605
2606	spin_unlock(&mm->page_table_lock);
2607	unlock_page(page);
2608out:
2609	return ret;
2610
2611backout:
2612	spin_unlock(&mm->page_table_lock);
2613backout_unlocked:
2614	unlock_page(page);
2615	put_page(page);
2616	goto out;
2617}
2618
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2619int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2620			unsigned long address, unsigned int flags)
2621{
2622	pte_t *ptep;
2623	pte_t entry;
2624	int ret;
 
 
2625	struct page *page = NULL;
2626	struct page *pagecache_page = NULL;
2627	static DEFINE_MUTEX(hugetlb_instantiation_mutex);
2628	struct hstate *h = hstate_vma(vma);
 
 
 
2629
2630	ptep = huge_pte_offset(mm, address);
2631	if (ptep) {
2632		entry = huge_ptep_get(ptep);
2633		if (unlikely(is_hugetlb_entry_migration(entry))) {
2634			migration_entry_wait(mm, (pmd_t *)ptep, address);
2635			return 0;
2636		} else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
2637			return VM_FAULT_HWPOISON_LARGE |
2638			       VM_FAULT_SET_HINDEX(h - hstates);
2639	}
2640
2641	ptep = huge_pte_alloc(mm, address, huge_page_size(h));
2642	if (!ptep)
2643		return VM_FAULT_OOM;
2644
 
 
 
2645	/*
2646	 * Serialize hugepage allocation and instantiation, so that we don't
2647	 * get spurious allocation failures if two CPUs race to instantiate
2648	 * the same page in the page cache.
2649	 */
2650	mutex_lock(&hugetlb_instantiation_mutex);
 
 
2651	entry = huge_ptep_get(ptep);
2652	if (huge_pte_none(entry)) {
2653		ret = hugetlb_no_page(mm, vma, address, ptep, flags);
2654		goto out_mutex;
2655	}
2656
2657	ret = 0;
2658
2659	/*
2660	 * If we are going to COW the mapping later, we examine the pending
2661	 * reservations for this page now. This will ensure that any
2662	 * allocations necessary to record that reservation occur outside the
2663	 * spinlock. For private mappings, we also lookup the pagecache
2664	 * page now as it is used to determine if a reservation has been
2665	 * consumed.
2666	 */
2667	if ((flags & FAULT_FLAG_WRITE) && !pte_write(entry)) {
2668		if (vma_needs_reservation(h, vma, address) < 0) {
2669			ret = VM_FAULT_OOM;
2670			goto out_mutex;
2671		}
2672
2673		if (!(vma->vm_flags & VM_MAYSHARE))
2674			pagecache_page = hugetlbfs_pagecache_page(h,
2675								vma, address);
2676	}
2677
2678	/*
2679	 * hugetlb_cow() requires page locks of pte_page(entry) and
2680	 * pagecache_page, so here we need take the former one
2681	 * when page != pagecache_page or !pagecache_page.
2682	 * Note that locking order is always pagecache_page -> page,
2683	 * so no worry about deadlock.
2684	 */
2685	page = pte_page(entry);
 
2686	if (page != pagecache_page)
2687		lock_page(page);
2688
2689	spin_lock(&mm->page_table_lock);
 
2690	/* Check for a racing update before calling hugetlb_cow */
2691	if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
2692		goto out_page_table_lock;
2693
2694
2695	if (flags & FAULT_FLAG_WRITE) {
2696		if (!pte_write(entry)) {
2697			ret = hugetlb_cow(mm, vma, address, ptep, entry,
2698							pagecache_page);
2699			goto out_page_table_lock;
2700		}
2701		entry = pte_mkdirty(entry);
2702	}
2703	entry = pte_mkyoung(entry);
2704	if (huge_ptep_set_access_flags(vma, address, ptep, entry,
2705						flags & FAULT_FLAG_WRITE))
2706		update_mmu_cache(vma, address, ptep);
2707
2708out_page_table_lock:
2709	spin_unlock(&mm->page_table_lock);
2710
2711	if (pagecache_page) {
2712		unlock_page(pagecache_page);
2713		put_page(pagecache_page);
2714	}
2715	if (page != pagecache_page)
2716		unlock_page(page);
 
2717
2718out_mutex:
2719	mutex_unlock(&hugetlb_instantiation_mutex);
2720
2721	return ret;
2722}
2723
2724/* Can be overriden by architectures */
2725__attribute__((weak)) struct page *
2726follow_huge_pud(struct mm_struct *mm, unsigned long address,
2727	       pud_t *pud, int write)
2728{
2729	BUG();
2730	return NULL;
2731}
2732
2733int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
2734			struct page **pages, struct vm_area_struct **vmas,
2735			unsigned long *position, int *length, int i,
2736			unsigned int flags)
2737{
2738	unsigned long pfn_offset;
2739	unsigned long vaddr = *position;
2740	int remainder = *length;
2741	struct hstate *h = hstate_vma(vma);
2742
2743	spin_lock(&mm->page_table_lock);
2744	while (vaddr < vma->vm_end && remainder) {
2745		pte_t *pte;
 
2746		int absent;
2747		struct page *page;
2748
2749		/*
2750		 * Some archs (sparc64, sh*) have multiple pte_ts to
2751		 * each hugepage.  We have to make sure we get the
2752		 * first, for the page indexing below to work.
 
 
2753		 */
2754		pte = huge_pte_offset(mm, vaddr & huge_page_mask(h));
 
 
2755		absent = !pte || huge_pte_none(huge_ptep_get(pte));
2756
2757		/*
2758		 * When coredumping, it suits get_dump_page if we just return
2759		 * an error where there's an empty slot with no huge pagecache
2760		 * to back it.  This way, we avoid allocating a hugepage, and
2761		 * the sparse dumpfile avoids allocating disk blocks, but its
2762		 * huge holes still show up with zeroes where they need to be.
2763		 */
2764		if (absent && (flags & FOLL_DUMP) &&
2765		    !hugetlbfs_pagecache_present(h, vma, vaddr)) {
 
 
2766			remainder = 0;
2767			break;
2768		}
2769
2770		if (absent ||
2771		    ((flags & FOLL_WRITE) && !pte_write(huge_ptep_get(pte)))) {
 
 
 
 
 
 
 
 
 
 
 
2772			int ret;
2773
2774			spin_unlock(&mm->page_table_lock);
 
2775			ret = hugetlb_fault(mm, vma, vaddr,
2776				(flags & FOLL_WRITE) ? FAULT_FLAG_WRITE : 0);
2777			spin_lock(&mm->page_table_lock);
2778			if (!(ret & VM_FAULT_ERROR))
2779				continue;
2780
2781			remainder = 0;
2782			break;
2783		}
2784
2785		pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
2786		page = pte_page(huge_ptep_get(pte));
2787same_page:
2788		if (pages) {
2789			pages[i] = mem_map_offset(page, pfn_offset);
2790			get_page(pages[i]);
2791		}
2792
2793		if (vmas)
2794			vmas[i] = vma;
2795
2796		vaddr += PAGE_SIZE;
2797		++pfn_offset;
2798		--remainder;
2799		++i;
2800		if (vaddr < vma->vm_end && remainder &&
2801				pfn_offset < pages_per_huge_page(h)) {
2802			/*
2803			 * We use pfn_offset to avoid touching the pageframes
2804			 * of this compound page.
2805			 */
2806			goto same_page;
2807		}
 
2808	}
2809	spin_unlock(&mm->page_table_lock);
2810	*length = remainder;
2811	*position = vaddr;
2812
2813	return i ? i : -EFAULT;
2814}
2815
2816void hugetlb_change_protection(struct vm_area_struct *vma,
2817		unsigned long address, unsigned long end, pgprot_t newprot)
2818{
2819	struct mm_struct *mm = vma->vm_mm;
2820	unsigned long start = address;
2821	pte_t *ptep;
2822	pte_t pte;
2823	struct hstate *h = hstate_vma(vma);
 
2824
2825	BUG_ON(address >= end);
2826	flush_cache_range(vma, address, end);
2827
 
2828	mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex);
2829	spin_lock(&mm->page_table_lock);
2830	for (; address < end; address += huge_page_size(h)) {
 
2831		ptep = huge_pte_offset(mm, address);
2832		if (!ptep)
2833			continue;
2834		if (huge_pmd_unshare(mm, &address, ptep))
 
 
 
2835			continue;
 
2836		if (!huge_pte_none(huge_ptep_get(ptep))) {
2837			pte = huge_ptep_get_and_clear(mm, address, ptep);
2838			pte = pte_mkhuge(pte_modify(pte, newprot));
 
2839			set_huge_pte_at(mm, address, ptep, pte);
 
2840		}
 
2841	}
2842	spin_unlock(&mm->page_table_lock);
 
 
 
 
 
 
2843	mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
 
2844
2845	flush_tlb_range(vma, start, end);
2846}
2847
2848int hugetlb_reserve_pages(struct inode *inode,
2849					long from, long to,
2850					struct vm_area_struct *vma,
2851					vm_flags_t vm_flags)
2852{
2853	long ret, chg;
2854	struct hstate *h = hstate_inode(inode);
 
 
2855
2856	/*
2857	 * Only apply hugepage reservation if asked. At fault time, an
2858	 * attempt will be made for VM_NORESERVE to allocate a page
2859	 * and filesystem quota without using reserves
2860	 */
2861	if (vm_flags & VM_NORESERVE)
2862		return 0;
2863
2864	/*
2865	 * Shared mappings base their reservation on the number of pages that
2866	 * are already allocated on behalf of the file. Private mappings need
2867	 * to reserve the full area even if read-only as mprotect() may be
2868	 * called to make the mapping read-write. Assume !vma is a shm mapping
2869	 */
2870	if (!vma || vma->vm_flags & VM_MAYSHARE)
2871		chg = region_chg(&inode->i_mapping->private_list, from, to);
2872	else {
2873		struct resv_map *resv_map = resv_map_alloc();
 
 
 
2874		if (!resv_map)
2875			return -ENOMEM;
2876
2877		chg = to - from;
2878
2879		set_vma_resv_map(vma, resv_map);
2880		set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
2881	}
2882
2883	if (chg < 0)
2884		return chg;
 
 
2885
2886	/* There must be enough filesystem quota for the mapping */
2887	if (hugetlb_get_quota(inode->i_mapping, chg))
2888		return -ENOSPC;
 
 
2889
2890	/*
2891	 * Check enough hugepages are available for the reservation.
2892	 * Hand back the quota if there are not
2893	 */
2894	ret = hugetlb_acct_memory(h, chg);
2895	if (ret < 0) {
2896		hugetlb_put_quota(inode->i_mapping, chg);
2897		return ret;
2898	}
2899
2900	/*
2901	 * Account for the reservations made. Shared mappings record regions
2902	 * that have reservations as they are shared by multiple VMAs.
2903	 * When the last VMA disappears, the region map says how much
2904	 * the reservation was and the page cache tells how much of
2905	 * the reservation was consumed. Private mappings are per-VMA and
2906	 * only the consumed reservations are tracked. When the VMA
2907	 * disappears, the original reservation is the VMA size and the
2908	 * consumed reservations are stored in the map. Hence, nothing
2909	 * else has to be done for private mappings here
2910	 */
2911	if (!vma || vma->vm_flags & VM_MAYSHARE)
2912		region_add(&inode->i_mapping->private_list, from, to);
2913	return 0;
 
 
 
 
2914}
2915
2916void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
2917{
2918	struct hstate *h = hstate_inode(inode);
2919	long chg = region_truncate(&inode->i_mapping->private_list, offset);
 
 
2920
 
 
2921	spin_lock(&inode->i_lock);
2922	inode->i_blocks -= (blocks_per_huge_page(h) * freed);
2923	spin_unlock(&inode->i_lock);
2924
2925	hugetlb_put_quota(inode->i_mapping, (chg - freed));
2926	hugetlb_acct_memory(h, -(chg - freed));
2927}
2928
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2929#ifdef CONFIG_MEMORY_FAILURE
2930
2931/* Should be called in hugetlb_lock */
2932static int is_hugepage_on_freelist(struct page *hpage)
2933{
2934	struct page *page;
2935	struct page *tmp;
2936	struct hstate *h = page_hstate(hpage);
2937	int nid = page_to_nid(hpage);
2938
2939	list_for_each_entry_safe(page, tmp, &h->hugepage_freelists[nid], lru)
2940		if (page == hpage)
2941			return 1;
2942	return 0;
2943}
2944
2945/*
2946 * This function is called from memory failure code.
2947 * Assume the caller holds page lock of the head page.
2948 */
2949int dequeue_hwpoisoned_huge_page(struct page *hpage)
2950{
2951	struct hstate *h = page_hstate(hpage);
2952	int nid = page_to_nid(hpage);
2953	int ret = -EBUSY;
2954
2955	spin_lock(&hugetlb_lock);
2956	if (is_hugepage_on_freelist(hpage)) {
2957		list_del(&hpage->lru);
 
 
 
 
 
 
2958		set_page_refcounted(hpage);
2959		h->free_huge_pages--;
2960		h->free_huge_pages_node[nid]--;
2961		ret = 0;
2962	}
2963	spin_unlock(&hugetlb_lock);
2964	return ret;
2965}
2966#endif