Linux Audio

Check our new training course

Loading...
v4.6
   1/*
   2 *  linux/arch/x86_64/mm/init.c
   3 *
   4 *  Copyright (C) 1995  Linus Torvalds
   5 *  Copyright (C) 2000  Pavel Machek <pavel@ucw.cz>
   6 *  Copyright (C) 2002,2003 Andi Kleen <ak@suse.de>
   7 */
   8
   9#include <linux/signal.h>
  10#include <linux/sched.h>
  11#include <linux/kernel.h>
  12#include <linux/errno.h>
  13#include <linux/string.h>
  14#include <linux/types.h>
  15#include <linux/ptrace.h>
  16#include <linux/mman.h>
  17#include <linux/mm.h>
  18#include <linux/swap.h>
  19#include <linux/smp.h>
  20#include <linux/init.h>
  21#include <linux/initrd.h>
  22#include <linux/pagemap.h>
  23#include <linux/bootmem.h>
  24#include <linux/memblock.h>
  25#include <linux/proc_fs.h>
  26#include <linux/pci.h>
  27#include <linux/pfn.h>
  28#include <linux/poison.h>
  29#include <linux/dma-mapping.h>
  30#include <linux/module.h>
  31#include <linux/memory.h>
  32#include <linux/memory_hotplug.h>
  33#include <linux/memremap.h>
  34#include <linux/nmi.h>
  35#include <linux/gfp.h>
  36#include <linux/kcore.h>
  37
  38#include <asm/processor.h>
  39#include <asm/bios_ebda.h>
  40#include <asm/uaccess.h>
  41#include <asm/pgtable.h>
  42#include <asm/pgalloc.h>
  43#include <asm/dma.h>
  44#include <asm/fixmap.h>
  45#include <asm/e820.h>
  46#include <asm/apic.h>
  47#include <asm/tlb.h>
  48#include <asm/mmu_context.h>
  49#include <asm/proto.h>
  50#include <asm/smp.h>
  51#include <asm/sections.h>
  52#include <asm/kdebug.h>
  53#include <asm/numa.h>
  54#include <asm/cacheflush.h>
  55#include <asm/init.h>
  56#include <asm/uv/uv.h>
  57#include <asm/setup.h>
  58
  59#include "mm_internal.h"
  60
  61static void ident_pmd_init(unsigned long pmd_flag, pmd_t *pmd_page,
  62			   unsigned long addr, unsigned long end)
  63{
  64	addr &= PMD_MASK;
  65	for (; addr < end; addr += PMD_SIZE) {
  66		pmd_t *pmd = pmd_page + pmd_index(addr);
  67
  68		if (!pmd_present(*pmd))
  69			set_pmd(pmd, __pmd(addr | pmd_flag));
  70	}
  71}
  72static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page,
  73			  unsigned long addr, unsigned long end)
  74{
  75	unsigned long next;
  76
  77	for (; addr < end; addr = next) {
  78		pud_t *pud = pud_page + pud_index(addr);
  79		pmd_t *pmd;
  80
  81		next = (addr & PUD_MASK) + PUD_SIZE;
  82		if (next > end)
  83			next = end;
  84
  85		if (pud_present(*pud)) {
  86			pmd = pmd_offset(pud, 0);
  87			ident_pmd_init(info->pmd_flag, pmd, addr, next);
  88			continue;
  89		}
  90		pmd = (pmd_t *)info->alloc_pgt_page(info->context);
  91		if (!pmd)
  92			return -ENOMEM;
  93		ident_pmd_init(info->pmd_flag, pmd, addr, next);
  94		set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
  95	}
  96
  97	return 0;
  98}
  99
 100int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
 101			      unsigned long addr, unsigned long end)
 102{
 103	unsigned long next;
 104	int result;
 105	int off = info->kernel_mapping ? pgd_index(__PAGE_OFFSET) : 0;
 106
 107	for (; addr < end; addr = next) {
 108		pgd_t *pgd = pgd_page + pgd_index(addr) + off;
 109		pud_t *pud;
 110
 111		next = (addr & PGDIR_MASK) + PGDIR_SIZE;
 112		if (next > end)
 113			next = end;
 114
 115		if (pgd_present(*pgd)) {
 116			pud = pud_offset(pgd, 0);
 117			result = ident_pud_init(info, pud, addr, next);
 118			if (result)
 119				return result;
 120			continue;
 121		}
 122
 123		pud = (pud_t *)info->alloc_pgt_page(info->context);
 124		if (!pud)
 125			return -ENOMEM;
 126		result = ident_pud_init(info, pud, addr, next);
 127		if (result)
 128			return result;
 129		set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE));
 130	}
 131
 132	return 0;
 133}
 134
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 135/*
 136 * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
 137 * physical space so we can cache the place of the first one and move
 138 * around without checking the pgd every time.
 139 */
 140
 141pteval_t __supported_pte_mask __read_mostly = ~0;
 142EXPORT_SYMBOL_GPL(__supported_pte_mask);
 143
 144int force_personality32;
 145
 146/*
 147 * noexec32=on|off
 148 * Control non executable heap for 32bit processes.
 149 * To control the stack too use noexec=off
 150 *
 151 * on	PROT_READ does not imply PROT_EXEC for 32-bit processes (default)
 152 * off	PROT_READ implies PROT_EXEC
 153 */
 154static int __init nonx32_setup(char *str)
 155{
 156	if (!strcmp(str, "on"))
 157		force_personality32 &= ~READ_IMPLIES_EXEC;
 158	else if (!strcmp(str, "off"))
 159		force_personality32 |= READ_IMPLIES_EXEC;
 160	return 1;
 161}
 162__setup("noexec32=", nonx32_setup);
 163
 164/*
 165 * When memory was added/removed make sure all the processes MM have
 166 * suitable PGD entries in the local PGD level page.
 167 */
 168void sync_global_pgds(unsigned long start, unsigned long end, int removed)
 169{
 170	unsigned long address;
 171
 172	for (address = start; address <= end; address += PGDIR_SIZE) {
 173		const pgd_t *pgd_ref = pgd_offset_k(address);
 174		struct page *page;
 175
 176		/*
 177		 * When it is called after memory hot remove, pgd_none()
 178		 * returns true. In this case (removed == 1), we must clear
 179		 * the PGD entries in the local PGD level page.
 180		 */
 181		if (pgd_none(*pgd_ref) && !removed)
 182			continue;
 183
 184		spin_lock(&pgd_lock);
 185		list_for_each_entry(page, &pgd_list, lru) {
 186			pgd_t *pgd;
 187			spinlock_t *pgt_lock;
 188
 189			pgd = (pgd_t *)page_address(page) + pgd_index(address);
 190			/* the pgt_lock only for Xen */
 191			pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
 192			spin_lock(pgt_lock);
 193
 194			if (!pgd_none(*pgd_ref) && !pgd_none(*pgd))
 
 
 195				BUG_ON(pgd_page_vaddr(*pgd)
 196				       != pgd_page_vaddr(*pgd_ref));
 197
 198			if (removed) {
 199				if (pgd_none(*pgd_ref) && !pgd_none(*pgd))
 200					pgd_clear(pgd);
 201			} else {
 202				if (pgd_none(*pgd))
 203					set_pgd(pgd, *pgd_ref);
 204			}
 205
 206			spin_unlock(pgt_lock);
 207		}
 208		spin_unlock(&pgd_lock);
 209	}
 210}
 211
 212/*
 213 * NOTE: This function is marked __ref because it calls __init function
 214 * (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0.
 215 */
 216static __ref void *spp_getpage(void)
 217{
 218	void *ptr;
 219
 220	if (after_bootmem)
 221		ptr = (void *) get_zeroed_page(GFP_ATOMIC | __GFP_NOTRACK);
 222	else
 223		ptr = alloc_bootmem_pages(PAGE_SIZE);
 224
 225	if (!ptr || ((unsigned long)ptr & ~PAGE_MASK)) {
 226		panic("set_pte_phys: cannot allocate page data %s\n",
 227			after_bootmem ? "after bootmem" : "");
 228	}
 229
 230	pr_debug("spp_getpage %p\n", ptr);
 231
 232	return ptr;
 233}
 234
 235static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
 236{
 237	if (pgd_none(*pgd)) {
 238		pud_t *pud = (pud_t *)spp_getpage();
 239		pgd_populate(&init_mm, pgd, pud);
 240		if (pud != pud_offset(pgd, 0))
 241			printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
 242			       pud, pud_offset(pgd, 0));
 243	}
 244	return pud_offset(pgd, vaddr);
 245}
 246
 247static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
 248{
 249	if (pud_none(*pud)) {
 250		pmd_t *pmd = (pmd_t *) spp_getpage();
 251		pud_populate(&init_mm, pud, pmd);
 252		if (pmd != pmd_offset(pud, 0))
 253			printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
 254			       pmd, pmd_offset(pud, 0));
 255	}
 256	return pmd_offset(pud, vaddr);
 257}
 258
 259static pte_t *fill_pte(pmd_t *pmd, unsigned long vaddr)
 260{
 261	if (pmd_none(*pmd)) {
 262		pte_t *pte = (pte_t *) spp_getpage();
 263		pmd_populate_kernel(&init_mm, pmd, pte);
 264		if (pte != pte_offset_kernel(pmd, 0))
 265			printk(KERN_ERR "PAGETABLE BUG #02!\n");
 266	}
 267	return pte_offset_kernel(pmd, vaddr);
 268}
 269
 270void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
 271{
 272	pud_t *pud;
 273	pmd_t *pmd;
 274	pte_t *pte;
 275
 276	pud = pud_page + pud_index(vaddr);
 277	pmd = fill_pmd(pud, vaddr);
 278	pte = fill_pte(pmd, vaddr);
 279
 280	set_pte(pte, new_pte);
 281
 282	/*
 283	 * It's enough to flush this one mapping.
 284	 * (PGE mappings get flushed as well)
 285	 */
 286	__flush_tlb_one(vaddr);
 287}
 288
 289void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
 290{
 291	pgd_t *pgd;
 292	pud_t *pud_page;
 293
 294	pr_debug("set_pte_vaddr %lx to %lx\n", vaddr, native_pte_val(pteval));
 295
 296	pgd = pgd_offset_k(vaddr);
 297	if (pgd_none(*pgd)) {
 298		printk(KERN_ERR
 299			"PGD FIXMAP MISSING, it should be setup in head.S!\n");
 300		return;
 301	}
 302	pud_page = (pud_t*)pgd_page_vaddr(*pgd);
 303	set_pte_vaddr_pud(pud_page, vaddr, pteval);
 304}
 305
 306pmd_t * __init populate_extra_pmd(unsigned long vaddr)
 307{
 308	pgd_t *pgd;
 309	pud_t *pud;
 310
 311	pgd = pgd_offset_k(vaddr);
 312	pud = fill_pud(pgd, vaddr);
 313	return fill_pmd(pud, vaddr);
 314}
 315
 316pte_t * __init populate_extra_pte(unsigned long vaddr)
 317{
 318	pmd_t *pmd;
 319
 320	pmd = populate_extra_pmd(vaddr);
 321	return fill_pte(pmd, vaddr);
 322}
 323
 324/*
 325 * Create large page table mappings for a range of physical addresses.
 326 */
 327static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
 328					enum page_cache_mode cache)
 329{
 330	pgd_t *pgd;
 331	pud_t *pud;
 332	pmd_t *pmd;
 333	pgprot_t prot;
 334
 335	pgprot_val(prot) = pgprot_val(PAGE_KERNEL_LARGE) |
 336		pgprot_val(pgprot_4k_2_large(cachemode2pgprot(cache)));
 337	BUG_ON((phys & ~PMD_MASK) || (size & ~PMD_MASK));
 338	for (; size; phys += PMD_SIZE, size -= PMD_SIZE) {
 339		pgd = pgd_offset_k((unsigned long)__va(phys));
 340		if (pgd_none(*pgd)) {
 341			pud = (pud_t *) spp_getpage();
 342			set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
 343						_PAGE_USER));
 344		}
 345		pud = pud_offset(pgd, (unsigned long)__va(phys));
 346		if (pud_none(*pud)) {
 347			pmd = (pmd_t *) spp_getpage();
 348			set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
 349						_PAGE_USER));
 350		}
 351		pmd = pmd_offset(pud, phys);
 352		BUG_ON(!pmd_none(*pmd));
 353		set_pmd(pmd, __pmd(phys | pgprot_val(prot)));
 354	}
 355}
 356
 357void __init init_extra_mapping_wb(unsigned long phys, unsigned long size)
 358{
 359	__init_extra_mapping(phys, size, _PAGE_CACHE_MODE_WB);
 360}
 361
 362void __init init_extra_mapping_uc(unsigned long phys, unsigned long size)
 363{
 364	__init_extra_mapping(phys, size, _PAGE_CACHE_MODE_UC);
 365}
 366
 367/*
 368 * The head.S code sets up the kernel high mapping:
 369 *
 370 *   from __START_KERNEL_map to __START_KERNEL_map + size (== _end-_text)
 371 *
 372 * phys_base holds the negative offset to the kernel, which is added
 373 * to the compile time generated pmds. This results in invalid pmds up
 374 * to the point where we hit the physaddr 0 mapping.
 375 *
 376 * We limit the mappings to the region from _text to _brk_end.  _brk_end
 377 * is rounded up to the 2MB boundary. This catches the invalid pmds as
 378 * well, as they are located before _text:
 379 */
 380void __init cleanup_highmap(void)
 381{
 382	unsigned long vaddr = __START_KERNEL_map;
 383	unsigned long vaddr_end = __START_KERNEL_map + KERNEL_IMAGE_SIZE;
 384	unsigned long end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1;
 385	pmd_t *pmd = level2_kernel_pgt;
 386
 387	/*
 388	 * Native path, max_pfn_mapped is not set yet.
 389	 * Xen has valid max_pfn_mapped set in
 390	 *	arch/x86/xen/mmu.c:xen_setup_kernel_pagetable().
 391	 */
 392	if (max_pfn_mapped)
 393		vaddr_end = __START_KERNEL_map + (max_pfn_mapped << PAGE_SHIFT);
 394
 395	for (; vaddr + PMD_SIZE - 1 < vaddr_end; pmd++, vaddr += PMD_SIZE) {
 396		if (pmd_none(*pmd))
 397			continue;
 398		if (vaddr < (unsigned long) _text || vaddr > end)
 399			set_pmd(pmd, __pmd(0));
 400	}
 401}
 402
 403static unsigned long __meminit
 404phys_pte_init(pte_t *pte_page, unsigned long addr, unsigned long end,
 405	      pgprot_t prot)
 406{
 407	unsigned long pages = 0, next;
 408	unsigned long last_map_addr = end;
 409	int i;
 410
 411	pte_t *pte = pte_page + pte_index(addr);
 412
 413	for (i = pte_index(addr); i < PTRS_PER_PTE; i++, addr = next, pte++) {
 414		next = (addr & PAGE_MASK) + PAGE_SIZE;
 415		if (addr >= end) {
 416			if (!after_bootmem &&
 417			    !e820_any_mapped(addr & PAGE_MASK, next, E820_RAM) &&
 418			    !e820_any_mapped(addr & PAGE_MASK, next, E820_RESERVED_KERN))
 419				set_pte(pte, __pte(0));
 420			continue;
 421		}
 422
 423		/*
 424		 * We will re-use the existing mapping.
 425		 * Xen for example has some special requirements, like mapping
 426		 * pagetable pages as RO. So assume someone who pre-setup
 427		 * these mappings are more intelligent.
 428		 */
 429		if (pte_val(*pte)) {
 430			if (!after_bootmem)
 431				pages++;
 432			continue;
 433		}
 434
 435		if (0)
 436			printk("   pte=%p addr=%lx pte=%016lx\n",
 437			       pte, addr, pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL).pte);
 438		pages++;
 439		set_pte(pte, pfn_pte(addr >> PAGE_SHIFT, prot));
 440		last_map_addr = (addr & PAGE_MASK) + PAGE_SIZE;
 441	}
 442
 443	update_page_count(PG_LEVEL_4K, pages);
 444
 445	return last_map_addr;
 446}
 447
 448static unsigned long __meminit
 449phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
 450	      unsigned long page_size_mask, pgprot_t prot)
 451{
 452	unsigned long pages = 0, next;
 453	unsigned long last_map_addr = end;
 454
 455	int i = pmd_index(address);
 456
 457	for (; i < PTRS_PER_PMD; i++, address = next) {
 458		pmd_t *pmd = pmd_page + pmd_index(address);
 459		pte_t *pte;
 460		pgprot_t new_prot = prot;
 461
 462		next = (address & PMD_MASK) + PMD_SIZE;
 463		if (address >= end) {
 464			if (!after_bootmem &&
 465			    !e820_any_mapped(address & PMD_MASK, next, E820_RAM) &&
 466			    !e820_any_mapped(address & PMD_MASK, next, E820_RESERVED_KERN))
 467				set_pmd(pmd, __pmd(0));
 468			continue;
 469		}
 470
 471		if (pmd_val(*pmd)) {
 472			if (!pmd_large(*pmd)) {
 473				spin_lock(&init_mm.page_table_lock);
 474				pte = (pte_t *)pmd_page_vaddr(*pmd);
 475				last_map_addr = phys_pte_init(pte, address,
 476								end, prot);
 477				spin_unlock(&init_mm.page_table_lock);
 478				continue;
 479			}
 480			/*
 481			 * If we are ok with PG_LEVEL_2M mapping, then we will
 482			 * use the existing mapping,
 483			 *
 484			 * Otherwise, we will split the large page mapping but
 485			 * use the same existing protection bits except for
 486			 * large page, so that we don't violate Intel's TLB
 487			 * Application note (317080) which says, while changing
 488			 * the page sizes, new and old translations should
 489			 * not differ with respect to page frame and
 490			 * attributes.
 491			 */
 492			if (page_size_mask & (1 << PG_LEVEL_2M)) {
 493				if (!after_bootmem)
 494					pages++;
 495				last_map_addr = next;
 496				continue;
 497			}
 498			new_prot = pte_pgprot(pte_clrhuge(*(pte_t *)pmd));
 499		}
 500
 501		if (page_size_mask & (1<<PG_LEVEL_2M)) {
 502			pages++;
 503			spin_lock(&init_mm.page_table_lock);
 504			set_pte((pte_t *)pmd,
 505				pfn_pte((address & PMD_MASK) >> PAGE_SHIFT,
 506					__pgprot(pgprot_val(prot) | _PAGE_PSE)));
 507			spin_unlock(&init_mm.page_table_lock);
 508			last_map_addr = next;
 509			continue;
 510		}
 511
 512		pte = alloc_low_page();
 513		last_map_addr = phys_pte_init(pte, address, end, new_prot);
 514
 515		spin_lock(&init_mm.page_table_lock);
 516		pmd_populate_kernel(&init_mm, pmd, pte);
 517		spin_unlock(&init_mm.page_table_lock);
 518	}
 519	update_page_count(PG_LEVEL_2M, pages);
 520	return last_map_addr;
 521}
 522
 523static unsigned long __meminit
 524phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
 525			 unsigned long page_size_mask)
 526{
 527	unsigned long pages = 0, next;
 528	unsigned long last_map_addr = end;
 529	int i = pud_index(addr);
 530
 531	for (; i < PTRS_PER_PUD; i++, addr = next) {
 532		pud_t *pud = pud_page + pud_index(addr);
 533		pmd_t *pmd;
 534		pgprot_t prot = PAGE_KERNEL;
 535
 536		next = (addr & PUD_MASK) + PUD_SIZE;
 537		if (addr >= end) {
 538			if (!after_bootmem &&
 539			    !e820_any_mapped(addr & PUD_MASK, next, E820_RAM) &&
 540			    !e820_any_mapped(addr & PUD_MASK, next, E820_RESERVED_KERN))
 541				set_pud(pud, __pud(0));
 542			continue;
 543		}
 544
 545		if (pud_val(*pud)) {
 546			if (!pud_large(*pud)) {
 547				pmd = pmd_offset(pud, 0);
 548				last_map_addr = phys_pmd_init(pmd, addr, end,
 549							 page_size_mask, prot);
 550				__flush_tlb_all();
 551				continue;
 552			}
 553			/*
 554			 * If we are ok with PG_LEVEL_1G mapping, then we will
 555			 * use the existing mapping.
 556			 *
 557			 * Otherwise, we will split the gbpage mapping but use
 558			 * the same existing protection  bits except for large
 559			 * page, so that we don't violate Intel's TLB
 560			 * Application note (317080) which says, while changing
 561			 * the page sizes, new and old translations should
 562			 * not differ with respect to page frame and
 563			 * attributes.
 564			 */
 565			if (page_size_mask & (1 << PG_LEVEL_1G)) {
 566				if (!after_bootmem)
 567					pages++;
 568				last_map_addr = next;
 569				continue;
 570			}
 571			prot = pte_pgprot(pte_clrhuge(*(pte_t *)pud));
 572		}
 573
 574		if (page_size_mask & (1<<PG_LEVEL_1G)) {
 575			pages++;
 576			spin_lock(&init_mm.page_table_lock);
 577			set_pte((pte_t *)pud,
 578				pfn_pte((addr & PUD_MASK) >> PAGE_SHIFT,
 579					PAGE_KERNEL_LARGE));
 580			spin_unlock(&init_mm.page_table_lock);
 581			last_map_addr = next;
 582			continue;
 583		}
 584
 585		pmd = alloc_low_page();
 586		last_map_addr = phys_pmd_init(pmd, addr, end, page_size_mask,
 587					      prot);
 588
 589		spin_lock(&init_mm.page_table_lock);
 590		pud_populate(&init_mm, pud, pmd);
 591		spin_unlock(&init_mm.page_table_lock);
 592	}
 593	__flush_tlb_all();
 594
 595	update_page_count(PG_LEVEL_1G, pages);
 596
 597	return last_map_addr;
 598}
 599
 600unsigned long __meminit
 601kernel_physical_mapping_init(unsigned long start,
 602			     unsigned long end,
 603			     unsigned long page_size_mask)
 604{
 605	bool pgd_changed = false;
 606	unsigned long next, last_map_addr = end;
 607	unsigned long addr;
 608
 609	start = (unsigned long)__va(start);
 610	end = (unsigned long)__va(end);
 611	addr = start;
 612
 613	for (; start < end; start = next) {
 614		pgd_t *pgd = pgd_offset_k(start);
 615		pud_t *pud;
 616
 617		next = (start & PGDIR_MASK) + PGDIR_SIZE;
 618
 619		if (pgd_val(*pgd)) {
 620			pud = (pud_t *)pgd_page_vaddr(*pgd);
 621			last_map_addr = phys_pud_init(pud, __pa(start),
 622						 __pa(end), page_size_mask);
 623			continue;
 624		}
 625
 626		pud = alloc_low_page();
 627		last_map_addr = phys_pud_init(pud, __pa(start), __pa(end),
 628						 page_size_mask);
 629
 630		spin_lock(&init_mm.page_table_lock);
 631		pgd_populate(&init_mm, pgd, pud);
 632		spin_unlock(&init_mm.page_table_lock);
 633		pgd_changed = true;
 634	}
 635
 636	if (pgd_changed)
 637		sync_global_pgds(addr, end - 1, 0);
 638
 639	__flush_tlb_all();
 640
 641	return last_map_addr;
 642}
 643
 644#ifndef CONFIG_NUMA
 645void __init initmem_init(void)
 646{
 647	memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0);
 648}
 649#endif
 650
 651void __init paging_init(void)
 652{
 653	sparse_memory_present_with_active_regions(MAX_NUMNODES);
 654	sparse_init();
 655
 656	/*
 657	 * clear the default setting with node 0
 658	 * note: don't use nodes_clear here, that is really clearing when
 659	 *	 numa support is not compiled in, and later node_set_state
 660	 *	 will not set it back.
 661	 */
 662	node_clear_state(0, N_MEMORY);
 663	if (N_MEMORY != N_NORMAL_MEMORY)
 664		node_clear_state(0, N_NORMAL_MEMORY);
 665
 666	zone_sizes_init();
 667}
 668
 669/*
 670 * Memory hotplug specific functions
 671 */
 672#ifdef CONFIG_MEMORY_HOTPLUG
 673/*
 674 * After memory hotplug the variables max_pfn, max_low_pfn and high_memory need
 675 * updating.
 676 */
 677static void  update_end_of_memory_vars(u64 start, u64 size)
 678{
 679	unsigned long end_pfn = PFN_UP(start + size);
 680
 681	if (end_pfn > max_pfn) {
 682		max_pfn = end_pfn;
 683		max_low_pfn = end_pfn;
 684		high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1;
 685	}
 686}
 687
 688/*
 689 * Memory is added always to NORMAL zone. This means you will never get
 690 * additional DMA/DMA32 memory.
 691 */
 692int arch_add_memory(int nid, u64 start, u64 size, bool for_device)
 693{
 694	struct pglist_data *pgdat = NODE_DATA(nid);
 695	struct zone *zone = pgdat->node_zones +
 696		zone_for_memory(nid, start, size, ZONE_NORMAL, for_device);
 697	unsigned long start_pfn = start >> PAGE_SHIFT;
 698	unsigned long nr_pages = size >> PAGE_SHIFT;
 699	int ret;
 700
 701	init_memory_mapping(start, start + size);
 702
 703	ret = __add_pages(nid, zone, start_pfn, nr_pages);
 704	WARN_ON_ONCE(ret);
 705
 706	/* update max_pfn, max_low_pfn and high_memory */
 707	update_end_of_memory_vars(start, size);
 708
 709	return ret;
 710}
 711EXPORT_SYMBOL_GPL(arch_add_memory);
 712
 713#define PAGE_INUSE 0xFD
 714
 715static void __meminit free_pagetable(struct page *page, int order)
 716{
 717	unsigned long magic;
 718	unsigned int nr_pages = 1 << order;
 719	struct vmem_altmap *altmap = to_vmem_altmap((unsigned long) page);
 720
 721	if (altmap) {
 722		vmem_altmap_free(altmap, nr_pages);
 723		return;
 724	}
 725
 726	/* bootmem page has reserved flag */
 727	if (PageReserved(page)) {
 728		__ClearPageReserved(page);
 729
 730		magic = (unsigned long)page->lru.next;
 731		if (magic == SECTION_INFO || magic == MIX_SECTION_INFO) {
 732			while (nr_pages--)
 733				put_page_bootmem(page++);
 734		} else
 735			while (nr_pages--)
 736				free_reserved_page(page++);
 737	} else
 738		free_pages((unsigned long)page_address(page), order);
 739}
 740
 741static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd)
 742{
 743	pte_t *pte;
 744	int i;
 745
 746	for (i = 0; i < PTRS_PER_PTE; i++) {
 747		pte = pte_start + i;
 748		if (pte_val(*pte))
 749			return;
 750	}
 751
 752	/* free a pte talbe */
 753	free_pagetable(pmd_page(*pmd), 0);
 754	spin_lock(&init_mm.page_table_lock);
 755	pmd_clear(pmd);
 756	spin_unlock(&init_mm.page_table_lock);
 757}
 758
 759static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud)
 760{
 761	pmd_t *pmd;
 762	int i;
 763
 764	for (i = 0; i < PTRS_PER_PMD; i++) {
 765		pmd = pmd_start + i;
 766		if (pmd_val(*pmd))
 767			return;
 768	}
 769
 770	/* free a pmd talbe */
 771	free_pagetable(pud_page(*pud), 0);
 772	spin_lock(&init_mm.page_table_lock);
 773	pud_clear(pud);
 774	spin_unlock(&init_mm.page_table_lock);
 775}
 776
 777/* Return true if pgd is changed, otherwise return false. */
 778static bool __meminit free_pud_table(pud_t *pud_start, pgd_t *pgd)
 779{
 780	pud_t *pud;
 781	int i;
 782
 783	for (i = 0; i < PTRS_PER_PUD; i++) {
 784		pud = pud_start + i;
 785		if (pud_val(*pud))
 786			return false;
 787	}
 788
 789	/* free a pud table */
 790	free_pagetable(pgd_page(*pgd), 0);
 791	spin_lock(&init_mm.page_table_lock);
 792	pgd_clear(pgd);
 793	spin_unlock(&init_mm.page_table_lock);
 794
 795	return true;
 796}
 797
 798static void __meminit
 799remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end,
 800		 bool direct)
 801{
 802	unsigned long next, pages = 0;
 803	pte_t *pte;
 804	void *page_addr;
 805	phys_addr_t phys_addr;
 806
 807	pte = pte_start + pte_index(addr);
 808	for (; addr < end; addr = next, pte++) {
 809		next = (addr + PAGE_SIZE) & PAGE_MASK;
 810		if (next > end)
 811			next = end;
 812
 813		if (!pte_present(*pte))
 814			continue;
 815
 816		/*
 817		 * We mapped [0,1G) memory as identity mapping when
 818		 * initializing, in arch/x86/kernel/head_64.S. These
 819		 * pagetables cannot be removed.
 820		 */
 821		phys_addr = pte_val(*pte) + (addr & PAGE_MASK);
 822		if (phys_addr < (phys_addr_t)0x40000000)
 823			return;
 824
 825		if (PAGE_ALIGNED(addr) && PAGE_ALIGNED(next)) {
 
 826			/*
 827			 * Do not free direct mapping pages since they were
 828			 * freed when offlining, or simplely not in use.
 829			 */
 830			if (!direct)
 831				free_pagetable(pte_page(*pte), 0);
 832
 833			spin_lock(&init_mm.page_table_lock);
 834			pte_clear(&init_mm, addr, pte);
 835			spin_unlock(&init_mm.page_table_lock);
 836
 837			/* For non-direct mapping, pages means nothing. */
 838			pages++;
 839		} else {
 840			/*
 841			 * If we are here, we are freeing vmemmap pages since
 842			 * direct mapped memory ranges to be freed are aligned.
 843			 *
 844			 * If we are not removing the whole page, it means
 845			 * other page structs in this page are being used and
 846			 * we canot remove them. So fill the unused page_structs
 847			 * with 0xFD, and remove the page when it is wholly
 848			 * filled with 0xFD.
 849			 */
 850			memset((void *)addr, PAGE_INUSE, next - addr);
 851
 852			page_addr = page_address(pte_page(*pte));
 853			if (!memchr_inv(page_addr, PAGE_INUSE, PAGE_SIZE)) {
 854				free_pagetable(pte_page(*pte), 0);
 855
 856				spin_lock(&init_mm.page_table_lock);
 857				pte_clear(&init_mm, addr, pte);
 858				spin_unlock(&init_mm.page_table_lock);
 859			}
 860		}
 861	}
 862
 863	/* Call free_pte_table() in remove_pmd_table(). */
 864	flush_tlb_all();
 865	if (direct)
 866		update_page_count(PG_LEVEL_4K, -pages);
 867}
 868
 869static void __meminit
 870remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end,
 871		 bool direct)
 872{
 873	unsigned long next, pages = 0;
 874	pte_t *pte_base;
 875	pmd_t *pmd;
 876	void *page_addr;
 877
 878	pmd = pmd_start + pmd_index(addr);
 879	for (; addr < end; addr = next, pmd++) {
 880		next = pmd_addr_end(addr, end);
 881
 882		if (!pmd_present(*pmd))
 883			continue;
 884
 885		if (pmd_large(*pmd)) {
 886			if (IS_ALIGNED(addr, PMD_SIZE) &&
 887			    IS_ALIGNED(next, PMD_SIZE)) {
 888				if (!direct)
 889					free_pagetable(pmd_page(*pmd),
 890						       get_order(PMD_SIZE));
 891
 892				spin_lock(&init_mm.page_table_lock);
 893				pmd_clear(pmd);
 894				spin_unlock(&init_mm.page_table_lock);
 895				pages++;
 896			} else {
 897				/* If here, we are freeing vmemmap pages. */
 898				memset((void *)addr, PAGE_INUSE, next - addr);
 899
 900				page_addr = page_address(pmd_page(*pmd));
 901				if (!memchr_inv(page_addr, PAGE_INUSE,
 902						PMD_SIZE)) {
 903					free_pagetable(pmd_page(*pmd),
 904						       get_order(PMD_SIZE));
 905
 906					spin_lock(&init_mm.page_table_lock);
 907					pmd_clear(pmd);
 908					spin_unlock(&init_mm.page_table_lock);
 909				}
 910			}
 911
 912			continue;
 913		}
 914
 915		pte_base = (pte_t *)pmd_page_vaddr(*pmd);
 916		remove_pte_table(pte_base, addr, next, direct);
 917		free_pte_table(pte_base, pmd);
 918	}
 919
 920	/* Call free_pmd_table() in remove_pud_table(). */
 921	if (direct)
 922		update_page_count(PG_LEVEL_2M, -pages);
 923}
 924
 925static void __meminit
 926remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end,
 927		 bool direct)
 928{
 929	unsigned long next, pages = 0;
 930	pmd_t *pmd_base;
 931	pud_t *pud;
 932	void *page_addr;
 933
 934	pud = pud_start + pud_index(addr);
 935	for (; addr < end; addr = next, pud++) {
 936		next = pud_addr_end(addr, end);
 937
 938		if (!pud_present(*pud))
 939			continue;
 940
 941		if (pud_large(*pud)) {
 942			if (IS_ALIGNED(addr, PUD_SIZE) &&
 943			    IS_ALIGNED(next, PUD_SIZE)) {
 944				if (!direct)
 945					free_pagetable(pud_page(*pud),
 946						       get_order(PUD_SIZE));
 947
 948				spin_lock(&init_mm.page_table_lock);
 949				pud_clear(pud);
 950				spin_unlock(&init_mm.page_table_lock);
 951				pages++;
 952			} else {
 953				/* If here, we are freeing vmemmap pages. */
 954				memset((void *)addr, PAGE_INUSE, next - addr);
 955
 956				page_addr = page_address(pud_page(*pud));
 957				if (!memchr_inv(page_addr, PAGE_INUSE,
 958						PUD_SIZE)) {
 959					free_pagetable(pud_page(*pud),
 960						       get_order(PUD_SIZE));
 961
 962					spin_lock(&init_mm.page_table_lock);
 963					pud_clear(pud);
 964					spin_unlock(&init_mm.page_table_lock);
 965				}
 966			}
 967
 968			continue;
 969		}
 970
 971		pmd_base = (pmd_t *)pud_page_vaddr(*pud);
 972		remove_pmd_table(pmd_base, addr, next, direct);
 973		free_pmd_table(pmd_base, pud);
 974	}
 975
 976	if (direct)
 977		update_page_count(PG_LEVEL_1G, -pages);
 978}
 979
 980/* start and end are both virtual address. */
 981static void __meminit
 982remove_pagetable(unsigned long start, unsigned long end, bool direct)
 983{
 984	unsigned long next;
 985	unsigned long addr;
 986	pgd_t *pgd;
 987	pud_t *pud;
 988	bool pgd_changed = false;
 989
 990	for (addr = start; addr < end; addr = next) {
 991		next = pgd_addr_end(addr, end);
 992
 993		pgd = pgd_offset_k(addr);
 994		if (!pgd_present(*pgd))
 995			continue;
 996
 997		pud = (pud_t *)pgd_page_vaddr(*pgd);
 998		remove_pud_table(pud, addr, next, direct);
 999		if (free_pud_table(pud, pgd))
1000			pgd_changed = true;
1001	}
1002
1003	if (pgd_changed)
1004		sync_global_pgds(start, end - 1, 1);
1005
1006	flush_tlb_all();
1007}
1008
1009void __ref vmemmap_free(unsigned long start, unsigned long end)
1010{
1011	remove_pagetable(start, end, false);
1012}
1013
1014#ifdef CONFIG_MEMORY_HOTREMOVE
1015static void __meminit
1016kernel_physical_mapping_remove(unsigned long start, unsigned long end)
1017{
1018	start = (unsigned long)__va(start);
1019	end = (unsigned long)__va(end);
1020
1021	remove_pagetable(start, end, true);
1022}
1023
1024int __ref arch_remove_memory(u64 start, u64 size)
1025{
1026	unsigned long start_pfn = start >> PAGE_SHIFT;
1027	unsigned long nr_pages = size >> PAGE_SHIFT;
1028	struct page *page = pfn_to_page(start_pfn);
1029	struct vmem_altmap *altmap;
1030	struct zone *zone;
1031	int ret;
1032
1033	/* With altmap the first mapped page is offset from @start */
1034	altmap = to_vmem_altmap((unsigned long) page);
1035	if (altmap)
1036		page += vmem_altmap_offset(altmap);
1037	zone = page_zone(page);
1038	ret = __remove_pages(zone, start_pfn, nr_pages);
1039	WARN_ON_ONCE(ret);
1040	kernel_physical_mapping_remove(start, start + size);
1041
1042	return ret;
1043}
1044#endif
1045#endif /* CONFIG_MEMORY_HOTPLUG */
1046
1047static struct kcore_list kcore_vsyscall;
1048
1049static void __init register_page_bootmem_info(void)
1050{
1051#ifdef CONFIG_NUMA
1052	int i;
1053
1054	for_each_online_node(i)
1055		register_page_bootmem_info_node(NODE_DATA(i));
1056#endif
1057}
1058
1059void __init mem_init(void)
1060{
1061	pci_iommu_alloc();
1062
1063	/* clear_bss() already clear the empty_zero_page */
1064
1065	register_page_bootmem_info();
1066
1067	/* this will put all memory onto the freelists */
1068	free_all_bootmem();
1069	after_bootmem = 1;
1070
1071	/* Register memory areas for /proc/kcore */
1072	kclist_add(&kcore_vsyscall, (void *)VSYSCALL_ADDR,
1073			 PAGE_SIZE, KCORE_OTHER);
1074
1075	mem_init_print_info(NULL);
1076}
1077
 
1078const int rodata_test_data = 0xC3;
1079EXPORT_SYMBOL_GPL(rodata_test_data);
1080
1081int kernel_set_to_readonly;
1082
1083void set_kernel_text_rw(void)
1084{
1085	unsigned long start = PFN_ALIGN(_text);
1086	unsigned long end = PFN_ALIGN(__stop___ex_table);
1087
1088	if (!kernel_set_to_readonly)
1089		return;
1090
1091	pr_debug("Set kernel text: %lx - %lx for read write\n",
1092		 start, end);
1093
1094	/*
1095	 * Make the kernel identity mapping for text RW. Kernel text
1096	 * mapping will always be RO. Refer to the comment in
1097	 * static_protections() in pageattr.c
1098	 */
1099	set_memory_rw(start, (end - start) >> PAGE_SHIFT);
1100}
1101
1102void set_kernel_text_ro(void)
1103{
1104	unsigned long start = PFN_ALIGN(_text);
1105	unsigned long end = PFN_ALIGN(__stop___ex_table);
1106
1107	if (!kernel_set_to_readonly)
1108		return;
1109
1110	pr_debug("Set kernel text: %lx - %lx for read only\n",
1111		 start, end);
1112
1113	/*
1114	 * Set the kernel identity mapping for text RO.
1115	 */
1116	set_memory_ro(start, (end - start) >> PAGE_SHIFT);
1117}
1118
1119void mark_rodata_ro(void)
1120{
1121	unsigned long start = PFN_ALIGN(_text);
1122	unsigned long rodata_start = PFN_ALIGN(__start_rodata);
1123	unsigned long end = (unsigned long) &__end_rodata_hpage_align;
1124	unsigned long text_end = PFN_ALIGN(&__stop___ex_table);
1125	unsigned long rodata_end = PFN_ALIGN(&__end_rodata);
1126	unsigned long all_end;
1127
1128	printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
1129	       (end - start) >> 10);
1130	set_memory_ro(start, (end - start) >> PAGE_SHIFT);
1131
1132	kernel_set_to_readonly = 1;
1133
1134	/*
1135	 * The rodata/data/bss/brk section (but not the kernel text!)
1136	 * should also be not-executable.
1137	 *
1138	 * We align all_end to PMD_SIZE because the existing mapping
1139	 * is a full PMD. If we would align _brk_end to PAGE_SIZE we
1140	 * split the PMD and the reminder between _brk_end and the end
1141	 * of the PMD will remain mapped executable.
1142	 *
1143	 * Any PMD which was setup after the one which covers _brk_end
1144	 * has been zapped already via cleanup_highmem().
1145	 */
1146	all_end = roundup((unsigned long)_brk_end, PMD_SIZE);
1147	set_memory_nx(text_end, (all_end - text_end) >> PAGE_SHIFT);
1148
1149	rodata_test();
1150
1151#ifdef CONFIG_CPA_DEBUG
1152	printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, end);
1153	set_memory_rw(start, (end-start) >> PAGE_SHIFT);
1154
1155	printk(KERN_INFO "Testing CPA: again\n");
1156	set_memory_ro(start, (end-start) >> PAGE_SHIFT);
1157#endif
1158
1159	free_init_pages("unused kernel",
1160			(unsigned long) __va(__pa_symbol(text_end)),
1161			(unsigned long) __va(__pa_symbol(rodata_start)));
1162	free_init_pages("unused kernel",
1163			(unsigned long) __va(__pa_symbol(rodata_end)),
1164			(unsigned long) __va(__pa_symbol(_sdata)));
1165
1166	debug_checkwx();
1167}
1168
 
 
1169int kern_addr_valid(unsigned long addr)
1170{
1171	unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
1172	pgd_t *pgd;
1173	pud_t *pud;
1174	pmd_t *pmd;
1175	pte_t *pte;
1176
1177	if (above != 0 && above != -1UL)
1178		return 0;
1179
1180	pgd = pgd_offset_k(addr);
1181	if (pgd_none(*pgd))
1182		return 0;
1183
1184	pud = pud_offset(pgd, addr);
1185	if (pud_none(*pud))
1186		return 0;
1187
1188	if (pud_large(*pud))
1189		return pfn_valid(pud_pfn(*pud));
1190
1191	pmd = pmd_offset(pud, addr);
1192	if (pmd_none(*pmd))
1193		return 0;
1194
1195	if (pmd_large(*pmd))
1196		return pfn_valid(pmd_pfn(*pmd));
1197
1198	pte = pte_offset_kernel(pmd, addr);
1199	if (pte_none(*pte))
1200		return 0;
1201
1202	return pfn_valid(pte_pfn(*pte));
1203}
1204
1205static unsigned long probe_memory_block_size(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1206{
1207	unsigned long bz = MIN_MEMORY_BLOCK_SIZE;
1208
1209	/* if system is UV or has 64GB of RAM or more, use large blocks */
1210	if (is_uv_system() || ((max_pfn << PAGE_SHIFT) >= (64UL << 30)))
1211		bz = 2UL << 30; /* 2GB */
 
 
1212
1213	pr_info("x86/mm: Memory block size: %ldMB\n", bz >> 20);
 
 
 
 
 
 
 
 
1214
1215	return bz;
 
 
 
 
 
 
1216}
1217
1218static unsigned long memory_block_size_probed;
1219unsigned long memory_block_size_bytes(void)
1220{
1221	if (!memory_block_size_probed)
1222		memory_block_size_probed = probe_memory_block_size();
1223
1224	return memory_block_size_probed;
 
1225}
 
1226
1227#ifdef CONFIG_SPARSEMEM_VMEMMAP
1228/*
1229 * Initialise the sparsemem vmemmap using huge-pages at the PMD level.
1230 */
1231static long __meminitdata addr_start, addr_end;
1232static void __meminitdata *p_start, *p_end;
1233static int __meminitdata node_start;
1234
1235static int __meminit vmemmap_populate_hugepages(unsigned long start,
1236		unsigned long end, int node, struct vmem_altmap *altmap)
1237{
1238	unsigned long addr;
1239	unsigned long next;
1240	pgd_t *pgd;
1241	pud_t *pud;
1242	pmd_t *pmd;
1243
1244	for (addr = start; addr < end; addr = next) {
1245		next = pmd_addr_end(addr, end);
1246
1247		pgd = vmemmap_pgd_populate(addr, node);
1248		if (!pgd)
1249			return -ENOMEM;
1250
1251		pud = vmemmap_pud_populate(pgd, addr, node);
1252		if (!pud)
1253			return -ENOMEM;
1254
1255		pmd = pmd_offset(pud, addr);
1256		if (pmd_none(*pmd)) {
1257			void *p;
1258
1259			p = __vmemmap_alloc_block_buf(PMD_SIZE, node, altmap);
1260			if (p) {
1261				pte_t entry;
1262
1263				entry = pfn_pte(__pa(p) >> PAGE_SHIFT,
1264						PAGE_KERNEL_LARGE);
1265				set_pmd(pmd, __pmd(pte_val(entry)));
1266
1267				/* check to see if we have contiguous blocks */
1268				if (p_end != p || node_start != node) {
1269					if (p_start)
1270						pr_debug(" [%lx-%lx] PMD -> [%p-%p] on node %d\n",
1271						       addr_start, addr_end-1, p_start, p_end-1, node_start);
1272					addr_start = addr;
1273					node_start = node;
1274					p_start = p;
1275				}
1276
1277				addr_end = addr + PMD_SIZE;
1278				p_end = p + PMD_SIZE;
1279				continue;
1280			} else if (altmap)
1281				return -ENOMEM; /* no fallback */
1282		} else if (pmd_large(*pmd)) {
1283			vmemmap_verify((pte_t *)pmd, node, addr, next);
1284			continue;
1285		}
1286		pr_warn_once("vmemmap: falling back to regular page backing\n");
1287		if (vmemmap_populate_basepages(addr, next, node))
1288			return -ENOMEM;
1289	}
1290	return 0;
1291}
1292
1293int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
1294{
1295	struct vmem_altmap *altmap = to_vmem_altmap(start);
1296	int err;
1297
1298	if (cpu_has_pse)
1299		err = vmemmap_populate_hugepages(start, end, node, altmap);
1300	else if (altmap) {
1301		pr_err_once("%s: no cpu support for altmap allocations\n",
1302				__func__);
1303		err = -ENOMEM;
1304	} else
1305		err = vmemmap_populate_basepages(start, end, node);
1306	if (!err)
1307		sync_global_pgds(start, end - 1, 0);
1308	return err;
1309}
1310
1311#if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HAVE_BOOTMEM_INFO_NODE)
1312void register_page_bootmem_memmap(unsigned long section_nr,
1313				  struct page *start_page, unsigned long size)
1314{
1315	unsigned long addr = (unsigned long)start_page;
1316	unsigned long end = (unsigned long)(start_page + size);
1317	unsigned long next;
1318	pgd_t *pgd;
1319	pud_t *pud;
1320	pmd_t *pmd;
1321	unsigned int nr_pages;
1322	struct page *page;
1323
1324	for (; addr < end; addr = next) {
1325		pte_t *pte = NULL;
1326
1327		pgd = pgd_offset_k(addr);
1328		if (pgd_none(*pgd)) {
1329			next = (addr + PAGE_SIZE) & PAGE_MASK;
1330			continue;
1331		}
1332		get_page_bootmem(section_nr, pgd_page(*pgd), MIX_SECTION_INFO);
1333
1334		pud = pud_offset(pgd, addr);
1335		if (pud_none(*pud)) {
1336			next = (addr + PAGE_SIZE) & PAGE_MASK;
1337			continue;
1338		}
1339		get_page_bootmem(section_nr, pud_page(*pud), MIX_SECTION_INFO);
1340
1341		if (!cpu_has_pse) {
1342			next = (addr + PAGE_SIZE) & PAGE_MASK;
1343			pmd = pmd_offset(pud, addr);
1344			if (pmd_none(*pmd))
1345				continue;
1346			get_page_bootmem(section_nr, pmd_page(*pmd),
1347					 MIX_SECTION_INFO);
1348
1349			pte = pte_offset_kernel(pmd, addr);
1350			if (pte_none(*pte))
1351				continue;
1352			get_page_bootmem(section_nr, pte_page(*pte),
1353					 SECTION_INFO);
1354		} else {
1355			next = pmd_addr_end(addr, end);
1356
1357			pmd = pmd_offset(pud, addr);
1358			if (pmd_none(*pmd))
1359				continue;
1360
1361			nr_pages = 1 << (get_order(PMD_SIZE));
1362			page = pmd_page(*pmd);
1363			while (nr_pages--)
1364				get_page_bootmem(section_nr, page++,
1365						 SECTION_INFO);
1366		}
1367	}
1368}
1369#endif
1370
1371void __meminit vmemmap_populate_print_last(void)
1372{
1373	if (p_start) {
1374		pr_debug(" [%lx-%lx] PMD -> [%p-%p] on node %d\n",
1375			addr_start, addr_end-1, p_start, p_end-1, node_start);
1376		p_start = NULL;
1377		p_end = NULL;
1378		node_start = 0;
1379	}
1380}
1381#endif
v3.15
   1/*
   2 *  linux/arch/x86_64/mm/init.c
   3 *
   4 *  Copyright (C) 1995  Linus Torvalds
   5 *  Copyright (C) 2000  Pavel Machek <pavel@ucw.cz>
   6 *  Copyright (C) 2002,2003 Andi Kleen <ak@suse.de>
   7 */
   8
   9#include <linux/signal.h>
  10#include <linux/sched.h>
  11#include <linux/kernel.h>
  12#include <linux/errno.h>
  13#include <linux/string.h>
  14#include <linux/types.h>
  15#include <linux/ptrace.h>
  16#include <linux/mman.h>
  17#include <linux/mm.h>
  18#include <linux/swap.h>
  19#include <linux/smp.h>
  20#include <linux/init.h>
  21#include <linux/initrd.h>
  22#include <linux/pagemap.h>
  23#include <linux/bootmem.h>
  24#include <linux/memblock.h>
  25#include <linux/proc_fs.h>
  26#include <linux/pci.h>
  27#include <linux/pfn.h>
  28#include <linux/poison.h>
  29#include <linux/dma-mapping.h>
  30#include <linux/module.h>
  31#include <linux/memory.h>
  32#include <linux/memory_hotplug.h>
 
  33#include <linux/nmi.h>
  34#include <linux/gfp.h>
  35#include <linux/kcore.h>
  36
  37#include <asm/processor.h>
  38#include <asm/bios_ebda.h>
  39#include <asm/uaccess.h>
  40#include <asm/pgtable.h>
  41#include <asm/pgalloc.h>
  42#include <asm/dma.h>
  43#include <asm/fixmap.h>
  44#include <asm/e820.h>
  45#include <asm/apic.h>
  46#include <asm/tlb.h>
  47#include <asm/mmu_context.h>
  48#include <asm/proto.h>
  49#include <asm/smp.h>
  50#include <asm/sections.h>
  51#include <asm/kdebug.h>
  52#include <asm/numa.h>
  53#include <asm/cacheflush.h>
  54#include <asm/init.h>
  55#include <asm/uv/uv.h>
  56#include <asm/setup.h>
  57
  58#include "mm_internal.h"
  59
  60static void ident_pmd_init(unsigned long pmd_flag, pmd_t *pmd_page,
  61			   unsigned long addr, unsigned long end)
  62{
  63	addr &= PMD_MASK;
  64	for (; addr < end; addr += PMD_SIZE) {
  65		pmd_t *pmd = pmd_page + pmd_index(addr);
  66
  67		if (!pmd_present(*pmd))
  68			set_pmd(pmd, __pmd(addr | pmd_flag));
  69	}
  70}
  71static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page,
  72			  unsigned long addr, unsigned long end)
  73{
  74	unsigned long next;
  75
  76	for (; addr < end; addr = next) {
  77		pud_t *pud = pud_page + pud_index(addr);
  78		pmd_t *pmd;
  79
  80		next = (addr & PUD_MASK) + PUD_SIZE;
  81		if (next > end)
  82			next = end;
  83
  84		if (pud_present(*pud)) {
  85			pmd = pmd_offset(pud, 0);
  86			ident_pmd_init(info->pmd_flag, pmd, addr, next);
  87			continue;
  88		}
  89		pmd = (pmd_t *)info->alloc_pgt_page(info->context);
  90		if (!pmd)
  91			return -ENOMEM;
  92		ident_pmd_init(info->pmd_flag, pmd, addr, next);
  93		set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
  94	}
  95
  96	return 0;
  97}
  98
  99int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
 100			      unsigned long addr, unsigned long end)
 101{
 102	unsigned long next;
 103	int result;
 104	int off = info->kernel_mapping ? pgd_index(__PAGE_OFFSET) : 0;
 105
 106	for (; addr < end; addr = next) {
 107		pgd_t *pgd = pgd_page + pgd_index(addr) + off;
 108		pud_t *pud;
 109
 110		next = (addr & PGDIR_MASK) + PGDIR_SIZE;
 111		if (next > end)
 112			next = end;
 113
 114		if (pgd_present(*pgd)) {
 115			pud = pud_offset(pgd, 0);
 116			result = ident_pud_init(info, pud, addr, next);
 117			if (result)
 118				return result;
 119			continue;
 120		}
 121
 122		pud = (pud_t *)info->alloc_pgt_page(info->context);
 123		if (!pud)
 124			return -ENOMEM;
 125		result = ident_pud_init(info, pud, addr, next);
 126		if (result)
 127			return result;
 128		set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE));
 129	}
 130
 131	return 0;
 132}
 133
 134static int __init parse_direct_gbpages_off(char *arg)
 135{
 136	direct_gbpages = 0;
 137	return 0;
 138}
 139early_param("nogbpages", parse_direct_gbpages_off);
 140
 141static int __init parse_direct_gbpages_on(char *arg)
 142{
 143	direct_gbpages = 1;
 144	return 0;
 145}
 146early_param("gbpages", parse_direct_gbpages_on);
 147
 148/*
 149 * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
 150 * physical space so we can cache the place of the first one and move
 151 * around without checking the pgd every time.
 152 */
 153
 154pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
 155EXPORT_SYMBOL_GPL(__supported_pte_mask);
 156
 157int force_personality32;
 158
 159/*
 160 * noexec32=on|off
 161 * Control non executable heap for 32bit processes.
 162 * To control the stack too use noexec=off
 163 *
 164 * on	PROT_READ does not imply PROT_EXEC for 32-bit processes (default)
 165 * off	PROT_READ implies PROT_EXEC
 166 */
 167static int __init nonx32_setup(char *str)
 168{
 169	if (!strcmp(str, "on"))
 170		force_personality32 &= ~READ_IMPLIES_EXEC;
 171	else if (!strcmp(str, "off"))
 172		force_personality32 |= READ_IMPLIES_EXEC;
 173	return 1;
 174}
 175__setup("noexec32=", nonx32_setup);
 176
 177/*
 178 * When memory was added/removed make sure all the processes MM have
 179 * suitable PGD entries in the local PGD level page.
 180 */
 181void sync_global_pgds(unsigned long start, unsigned long end)
 182{
 183	unsigned long address;
 184
 185	for (address = start; address <= end; address += PGDIR_SIZE) {
 186		const pgd_t *pgd_ref = pgd_offset_k(address);
 187		struct page *page;
 188
 189		if (pgd_none(*pgd_ref))
 
 
 
 
 
 190			continue;
 191
 192		spin_lock(&pgd_lock);
 193		list_for_each_entry(page, &pgd_list, lru) {
 194			pgd_t *pgd;
 195			spinlock_t *pgt_lock;
 196
 197			pgd = (pgd_t *)page_address(page) + pgd_index(address);
 198			/* the pgt_lock only for Xen */
 199			pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
 200			spin_lock(pgt_lock);
 201
 202			if (pgd_none(*pgd))
 203				set_pgd(pgd, *pgd_ref);
 204			else
 205				BUG_ON(pgd_page_vaddr(*pgd)
 206				       != pgd_page_vaddr(*pgd_ref));
 207
 
 
 
 
 
 
 
 
 208			spin_unlock(pgt_lock);
 209		}
 210		spin_unlock(&pgd_lock);
 211	}
 212}
 213
 214/*
 215 * NOTE: This function is marked __ref because it calls __init function
 216 * (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0.
 217 */
 218static __ref void *spp_getpage(void)
 219{
 220	void *ptr;
 221
 222	if (after_bootmem)
 223		ptr = (void *) get_zeroed_page(GFP_ATOMIC | __GFP_NOTRACK);
 224	else
 225		ptr = alloc_bootmem_pages(PAGE_SIZE);
 226
 227	if (!ptr || ((unsigned long)ptr & ~PAGE_MASK)) {
 228		panic("set_pte_phys: cannot allocate page data %s\n",
 229			after_bootmem ? "after bootmem" : "");
 230	}
 231
 232	pr_debug("spp_getpage %p\n", ptr);
 233
 234	return ptr;
 235}
 236
 237static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
 238{
 239	if (pgd_none(*pgd)) {
 240		pud_t *pud = (pud_t *)spp_getpage();
 241		pgd_populate(&init_mm, pgd, pud);
 242		if (pud != pud_offset(pgd, 0))
 243			printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
 244			       pud, pud_offset(pgd, 0));
 245	}
 246	return pud_offset(pgd, vaddr);
 247}
 248
 249static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
 250{
 251	if (pud_none(*pud)) {
 252		pmd_t *pmd = (pmd_t *) spp_getpage();
 253		pud_populate(&init_mm, pud, pmd);
 254		if (pmd != pmd_offset(pud, 0))
 255			printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
 256			       pmd, pmd_offset(pud, 0));
 257	}
 258	return pmd_offset(pud, vaddr);
 259}
 260
 261static pte_t *fill_pte(pmd_t *pmd, unsigned long vaddr)
 262{
 263	if (pmd_none(*pmd)) {
 264		pte_t *pte = (pte_t *) spp_getpage();
 265		pmd_populate_kernel(&init_mm, pmd, pte);
 266		if (pte != pte_offset_kernel(pmd, 0))
 267			printk(KERN_ERR "PAGETABLE BUG #02!\n");
 268	}
 269	return pte_offset_kernel(pmd, vaddr);
 270}
 271
 272void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
 273{
 274	pud_t *pud;
 275	pmd_t *pmd;
 276	pte_t *pte;
 277
 278	pud = pud_page + pud_index(vaddr);
 279	pmd = fill_pmd(pud, vaddr);
 280	pte = fill_pte(pmd, vaddr);
 281
 282	set_pte(pte, new_pte);
 283
 284	/*
 285	 * It's enough to flush this one mapping.
 286	 * (PGE mappings get flushed as well)
 287	 */
 288	__flush_tlb_one(vaddr);
 289}
 290
 291void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
 292{
 293	pgd_t *pgd;
 294	pud_t *pud_page;
 295
 296	pr_debug("set_pte_vaddr %lx to %lx\n", vaddr, native_pte_val(pteval));
 297
 298	pgd = pgd_offset_k(vaddr);
 299	if (pgd_none(*pgd)) {
 300		printk(KERN_ERR
 301			"PGD FIXMAP MISSING, it should be setup in head.S!\n");
 302		return;
 303	}
 304	pud_page = (pud_t*)pgd_page_vaddr(*pgd);
 305	set_pte_vaddr_pud(pud_page, vaddr, pteval);
 306}
 307
 308pmd_t * __init populate_extra_pmd(unsigned long vaddr)
 309{
 310	pgd_t *pgd;
 311	pud_t *pud;
 312
 313	pgd = pgd_offset_k(vaddr);
 314	pud = fill_pud(pgd, vaddr);
 315	return fill_pmd(pud, vaddr);
 316}
 317
 318pte_t * __init populate_extra_pte(unsigned long vaddr)
 319{
 320	pmd_t *pmd;
 321
 322	pmd = populate_extra_pmd(vaddr);
 323	return fill_pte(pmd, vaddr);
 324}
 325
 326/*
 327 * Create large page table mappings for a range of physical addresses.
 328 */
 329static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
 330						pgprot_t prot)
 331{
 332	pgd_t *pgd;
 333	pud_t *pud;
 334	pmd_t *pmd;
 
 335
 
 
 336	BUG_ON((phys & ~PMD_MASK) || (size & ~PMD_MASK));
 337	for (; size; phys += PMD_SIZE, size -= PMD_SIZE) {
 338		pgd = pgd_offset_k((unsigned long)__va(phys));
 339		if (pgd_none(*pgd)) {
 340			pud = (pud_t *) spp_getpage();
 341			set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
 342						_PAGE_USER));
 343		}
 344		pud = pud_offset(pgd, (unsigned long)__va(phys));
 345		if (pud_none(*pud)) {
 346			pmd = (pmd_t *) spp_getpage();
 347			set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
 348						_PAGE_USER));
 349		}
 350		pmd = pmd_offset(pud, phys);
 351		BUG_ON(!pmd_none(*pmd));
 352		set_pmd(pmd, __pmd(phys | pgprot_val(prot)));
 353	}
 354}
 355
 356void __init init_extra_mapping_wb(unsigned long phys, unsigned long size)
 357{
 358	__init_extra_mapping(phys, size, PAGE_KERNEL_LARGE);
 359}
 360
 361void __init init_extra_mapping_uc(unsigned long phys, unsigned long size)
 362{
 363	__init_extra_mapping(phys, size, PAGE_KERNEL_LARGE_NOCACHE);
 364}
 365
 366/*
 367 * The head.S code sets up the kernel high mapping:
 368 *
 369 *   from __START_KERNEL_map to __START_KERNEL_map + size (== _end-_text)
 370 *
 371 * phys_base holds the negative offset to the kernel, which is added
 372 * to the compile time generated pmds. This results in invalid pmds up
 373 * to the point where we hit the physaddr 0 mapping.
 374 *
 375 * We limit the mappings to the region from _text to _brk_end.  _brk_end
 376 * is rounded up to the 2MB boundary. This catches the invalid pmds as
 377 * well, as they are located before _text:
 378 */
 379void __init cleanup_highmap(void)
 380{
 381	unsigned long vaddr = __START_KERNEL_map;
 382	unsigned long vaddr_end = __START_KERNEL_map + KERNEL_IMAGE_SIZE;
 383	unsigned long end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1;
 384	pmd_t *pmd = level2_kernel_pgt;
 385
 386	/*
 387	 * Native path, max_pfn_mapped is not set yet.
 388	 * Xen has valid max_pfn_mapped set in
 389	 *	arch/x86/xen/mmu.c:xen_setup_kernel_pagetable().
 390	 */
 391	if (max_pfn_mapped)
 392		vaddr_end = __START_KERNEL_map + (max_pfn_mapped << PAGE_SHIFT);
 393
 394	for (; vaddr + PMD_SIZE - 1 < vaddr_end; pmd++, vaddr += PMD_SIZE) {
 395		if (pmd_none(*pmd))
 396			continue;
 397		if (vaddr < (unsigned long) _text || vaddr > end)
 398			set_pmd(pmd, __pmd(0));
 399	}
 400}
 401
 402static unsigned long __meminit
 403phys_pte_init(pte_t *pte_page, unsigned long addr, unsigned long end,
 404	      pgprot_t prot)
 405{
 406	unsigned long pages = 0, next;
 407	unsigned long last_map_addr = end;
 408	int i;
 409
 410	pte_t *pte = pte_page + pte_index(addr);
 411
 412	for (i = pte_index(addr); i < PTRS_PER_PTE; i++, addr = next, pte++) {
 413		next = (addr & PAGE_MASK) + PAGE_SIZE;
 414		if (addr >= end) {
 415			if (!after_bootmem &&
 416			    !e820_any_mapped(addr & PAGE_MASK, next, E820_RAM) &&
 417			    !e820_any_mapped(addr & PAGE_MASK, next, E820_RESERVED_KERN))
 418				set_pte(pte, __pte(0));
 419			continue;
 420		}
 421
 422		/*
 423		 * We will re-use the existing mapping.
 424		 * Xen for example has some special requirements, like mapping
 425		 * pagetable pages as RO. So assume someone who pre-setup
 426		 * these mappings are more intelligent.
 427		 */
 428		if (pte_val(*pte)) {
 429			if (!after_bootmem)
 430				pages++;
 431			continue;
 432		}
 433
 434		if (0)
 435			printk("   pte=%p addr=%lx pte=%016lx\n",
 436			       pte, addr, pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL).pte);
 437		pages++;
 438		set_pte(pte, pfn_pte(addr >> PAGE_SHIFT, prot));
 439		last_map_addr = (addr & PAGE_MASK) + PAGE_SIZE;
 440	}
 441
 442	update_page_count(PG_LEVEL_4K, pages);
 443
 444	return last_map_addr;
 445}
 446
 447static unsigned long __meminit
 448phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
 449	      unsigned long page_size_mask, pgprot_t prot)
 450{
 451	unsigned long pages = 0, next;
 452	unsigned long last_map_addr = end;
 453
 454	int i = pmd_index(address);
 455
 456	for (; i < PTRS_PER_PMD; i++, address = next) {
 457		pmd_t *pmd = pmd_page + pmd_index(address);
 458		pte_t *pte;
 459		pgprot_t new_prot = prot;
 460
 461		next = (address & PMD_MASK) + PMD_SIZE;
 462		if (address >= end) {
 463			if (!after_bootmem &&
 464			    !e820_any_mapped(address & PMD_MASK, next, E820_RAM) &&
 465			    !e820_any_mapped(address & PMD_MASK, next, E820_RESERVED_KERN))
 466				set_pmd(pmd, __pmd(0));
 467			continue;
 468		}
 469
 470		if (pmd_val(*pmd)) {
 471			if (!pmd_large(*pmd)) {
 472				spin_lock(&init_mm.page_table_lock);
 473				pte = (pte_t *)pmd_page_vaddr(*pmd);
 474				last_map_addr = phys_pte_init(pte, address,
 475								end, prot);
 476				spin_unlock(&init_mm.page_table_lock);
 477				continue;
 478			}
 479			/*
 480			 * If we are ok with PG_LEVEL_2M mapping, then we will
 481			 * use the existing mapping,
 482			 *
 483			 * Otherwise, we will split the large page mapping but
 484			 * use the same existing protection bits except for
 485			 * large page, so that we don't violate Intel's TLB
 486			 * Application note (317080) which says, while changing
 487			 * the page sizes, new and old translations should
 488			 * not differ with respect to page frame and
 489			 * attributes.
 490			 */
 491			if (page_size_mask & (1 << PG_LEVEL_2M)) {
 492				if (!after_bootmem)
 493					pages++;
 494				last_map_addr = next;
 495				continue;
 496			}
 497			new_prot = pte_pgprot(pte_clrhuge(*(pte_t *)pmd));
 498		}
 499
 500		if (page_size_mask & (1<<PG_LEVEL_2M)) {
 501			pages++;
 502			spin_lock(&init_mm.page_table_lock);
 503			set_pte((pte_t *)pmd,
 504				pfn_pte((address & PMD_MASK) >> PAGE_SHIFT,
 505					__pgprot(pgprot_val(prot) | _PAGE_PSE)));
 506			spin_unlock(&init_mm.page_table_lock);
 507			last_map_addr = next;
 508			continue;
 509		}
 510
 511		pte = alloc_low_page();
 512		last_map_addr = phys_pte_init(pte, address, end, new_prot);
 513
 514		spin_lock(&init_mm.page_table_lock);
 515		pmd_populate_kernel(&init_mm, pmd, pte);
 516		spin_unlock(&init_mm.page_table_lock);
 517	}
 518	update_page_count(PG_LEVEL_2M, pages);
 519	return last_map_addr;
 520}
 521
 522static unsigned long __meminit
 523phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
 524			 unsigned long page_size_mask)
 525{
 526	unsigned long pages = 0, next;
 527	unsigned long last_map_addr = end;
 528	int i = pud_index(addr);
 529
 530	for (; i < PTRS_PER_PUD; i++, addr = next) {
 531		pud_t *pud = pud_page + pud_index(addr);
 532		pmd_t *pmd;
 533		pgprot_t prot = PAGE_KERNEL;
 534
 535		next = (addr & PUD_MASK) + PUD_SIZE;
 536		if (addr >= end) {
 537			if (!after_bootmem &&
 538			    !e820_any_mapped(addr & PUD_MASK, next, E820_RAM) &&
 539			    !e820_any_mapped(addr & PUD_MASK, next, E820_RESERVED_KERN))
 540				set_pud(pud, __pud(0));
 541			continue;
 542		}
 543
 544		if (pud_val(*pud)) {
 545			if (!pud_large(*pud)) {
 546				pmd = pmd_offset(pud, 0);
 547				last_map_addr = phys_pmd_init(pmd, addr, end,
 548							 page_size_mask, prot);
 549				__flush_tlb_all();
 550				continue;
 551			}
 552			/*
 553			 * If we are ok with PG_LEVEL_1G mapping, then we will
 554			 * use the existing mapping.
 555			 *
 556			 * Otherwise, we will split the gbpage mapping but use
 557			 * the same existing protection  bits except for large
 558			 * page, so that we don't violate Intel's TLB
 559			 * Application note (317080) which says, while changing
 560			 * the page sizes, new and old translations should
 561			 * not differ with respect to page frame and
 562			 * attributes.
 563			 */
 564			if (page_size_mask & (1 << PG_LEVEL_1G)) {
 565				if (!after_bootmem)
 566					pages++;
 567				last_map_addr = next;
 568				continue;
 569			}
 570			prot = pte_pgprot(pte_clrhuge(*(pte_t *)pud));
 571		}
 572
 573		if (page_size_mask & (1<<PG_LEVEL_1G)) {
 574			pages++;
 575			spin_lock(&init_mm.page_table_lock);
 576			set_pte((pte_t *)pud,
 577				pfn_pte((addr & PUD_MASK) >> PAGE_SHIFT,
 578					PAGE_KERNEL_LARGE));
 579			spin_unlock(&init_mm.page_table_lock);
 580			last_map_addr = next;
 581			continue;
 582		}
 583
 584		pmd = alloc_low_page();
 585		last_map_addr = phys_pmd_init(pmd, addr, end, page_size_mask,
 586					      prot);
 587
 588		spin_lock(&init_mm.page_table_lock);
 589		pud_populate(&init_mm, pud, pmd);
 590		spin_unlock(&init_mm.page_table_lock);
 591	}
 592	__flush_tlb_all();
 593
 594	update_page_count(PG_LEVEL_1G, pages);
 595
 596	return last_map_addr;
 597}
 598
 599unsigned long __meminit
 600kernel_physical_mapping_init(unsigned long start,
 601			     unsigned long end,
 602			     unsigned long page_size_mask)
 603{
 604	bool pgd_changed = false;
 605	unsigned long next, last_map_addr = end;
 606	unsigned long addr;
 607
 608	start = (unsigned long)__va(start);
 609	end = (unsigned long)__va(end);
 610	addr = start;
 611
 612	for (; start < end; start = next) {
 613		pgd_t *pgd = pgd_offset_k(start);
 614		pud_t *pud;
 615
 616		next = (start & PGDIR_MASK) + PGDIR_SIZE;
 617
 618		if (pgd_val(*pgd)) {
 619			pud = (pud_t *)pgd_page_vaddr(*pgd);
 620			last_map_addr = phys_pud_init(pud, __pa(start),
 621						 __pa(end), page_size_mask);
 622			continue;
 623		}
 624
 625		pud = alloc_low_page();
 626		last_map_addr = phys_pud_init(pud, __pa(start), __pa(end),
 627						 page_size_mask);
 628
 629		spin_lock(&init_mm.page_table_lock);
 630		pgd_populate(&init_mm, pgd, pud);
 631		spin_unlock(&init_mm.page_table_lock);
 632		pgd_changed = true;
 633	}
 634
 635	if (pgd_changed)
 636		sync_global_pgds(addr, end - 1);
 637
 638	__flush_tlb_all();
 639
 640	return last_map_addr;
 641}
 642
 643#ifndef CONFIG_NUMA
 644void __init initmem_init(void)
 645{
 646	memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0);
 647}
 648#endif
 649
 650void __init paging_init(void)
 651{
 652	sparse_memory_present_with_active_regions(MAX_NUMNODES);
 653	sparse_init();
 654
 655	/*
 656	 * clear the default setting with node 0
 657	 * note: don't use nodes_clear here, that is really clearing when
 658	 *	 numa support is not compiled in, and later node_set_state
 659	 *	 will not set it back.
 660	 */
 661	node_clear_state(0, N_MEMORY);
 662	if (N_MEMORY != N_NORMAL_MEMORY)
 663		node_clear_state(0, N_NORMAL_MEMORY);
 664
 665	zone_sizes_init();
 666}
 667
 668/*
 669 * Memory hotplug specific functions
 670 */
 671#ifdef CONFIG_MEMORY_HOTPLUG
 672/*
 673 * After memory hotplug the variables max_pfn, max_low_pfn and high_memory need
 674 * updating.
 675 */
 676static void  update_end_of_memory_vars(u64 start, u64 size)
 677{
 678	unsigned long end_pfn = PFN_UP(start + size);
 679
 680	if (end_pfn > max_pfn) {
 681		max_pfn = end_pfn;
 682		max_low_pfn = end_pfn;
 683		high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1;
 684	}
 685}
 686
 687/*
 688 * Memory is added always to NORMAL zone. This means you will never get
 689 * additional DMA/DMA32 memory.
 690 */
 691int arch_add_memory(int nid, u64 start, u64 size)
 692{
 693	struct pglist_data *pgdat = NODE_DATA(nid);
 694	struct zone *zone = pgdat->node_zones + ZONE_NORMAL;
 
 695	unsigned long start_pfn = start >> PAGE_SHIFT;
 696	unsigned long nr_pages = size >> PAGE_SHIFT;
 697	int ret;
 698
 699	init_memory_mapping(start, start + size);
 700
 701	ret = __add_pages(nid, zone, start_pfn, nr_pages);
 702	WARN_ON_ONCE(ret);
 703
 704	/* update max_pfn, max_low_pfn and high_memory */
 705	update_end_of_memory_vars(start, size);
 706
 707	return ret;
 708}
 709EXPORT_SYMBOL_GPL(arch_add_memory);
 710
 711#define PAGE_INUSE 0xFD
 712
 713static void __meminit free_pagetable(struct page *page, int order)
 714{
 715	unsigned long magic;
 716	unsigned int nr_pages = 1 << order;
 
 
 
 
 
 
 717
 718	/* bootmem page has reserved flag */
 719	if (PageReserved(page)) {
 720		__ClearPageReserved(page);
 721
 722		magic = (unsigned long)page->lru.next;
 723		if (magic == SECTION_INFO || magic == MIX_SECTION_INFO) {
 724			while (nr_pages--)
 725				put_page_bootmem(page++);
 726		} else
 727			while (nr_pages--)
 728				free_reserved_page(page++);
 729	} else
 730		free_pages((unsigned long)page_address(page), order);
 731}
 732
 733static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd)
 734{
 735	pte_t *pte;
 736	int i;
 737
 738	for (i = 0; i < PTRS_PER_PTE; i++) {
 739		pte = pte_start + i;
 740		if (pte_val(*pte))
 741			return;
 742	}
 743
 744	/* free a pte talbe */
 745	free_pagetable(pmd_page(*pmd), 0);
 746	spin_lock(&init_mm.page_table_lock);
 747	pmd_clear(pmd);
 748	spin_unlock(&init_mm.page_table_lock);
 749}
 750
 751static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud)
 752{
 753	pmd_t *pmd;
 754	int i;
 755
 756	for (i = 0; i < PTRS_PER_PMD; i++) {
 757		pmd = pmd_start + i;
 758		if (pmd_val(*pmd))
 759			return;
 760	}
 761
 762	/* free a pmd talbe */
 763	free_pagetable(pud_page(*pud), 0);
 764	spin_lock(&init_mm.page_table_lock);
 765	pud_clear(pud);
 766	spin_unlock(&init_mm.page_table_lock);
 767}
 768
 769/* Return true if pgd is changed, otherwise return false. */
 770static bool __meminit free_pud_table(pud_t *pud_start, pgd_t *pgd)
 771{
 772	pud_t *pud;
 773	int i;
 774
 775	for (i = 0; i < PTRS_PER_PUD; i++) {
 776		pud = pud_start + i;
 777		if (pud_val(*pud))
 778			return false;
 779	}
 780
 781	/* free a pud table */
 782	free_pagetable(pgd_page(*pgd), 0);
 783	spin_lock(&init_mm.page_table_lock);
 784	pgd_clear(pgd);
 785	spin_unlock(&init_mm.page_table_lock);
 786
 787	return true;
 788}
 789
 790static void __meminit
 791remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end,
 792		 bool direct)
 793{
 794	unsigned long next, pages = 0;
 795	pte_t *pte;
 796	void *page_addr;
 797	phys_addr_t phys_addr;
 798
 799	pte = pte_start + pte_index(addr);
 800	for (; addr < end; addr = next, pte++) {
 801		next = (addr + PAGE_SIZE) & PAGE_MASK;
 802		if (next > end)
 803			next = end;
 804
 805		if (!pte_present(*pte))
 806			continue;
 807
 808		/*
 809		 * We mapped [0,1G) memory as identity mapping when
 810		 * initializing, in arch/x86/kernel/head_64.S. These
 811		 * pagetables cannot be removed.
 812		 */
 813		phys_addr = pte_val(*pte) + (addr & PAGE_MASK);
 814		if (phys_addr < (phys_addr_t)0x40000000)
 815			return;
 816
 817		if (IS_ALIGNED(addr, PAGE_SIZE) &&
 818		    IS_ALIGNED(next, PAGE_SIZE)) {
 819			/*
 820			 * Do not free direct mapping pages since they were
 821			 * freed when offlining, or simplely not in use.
 822			 */
 823			if (!direct)
 824				free_pagetable(pte_page(*pte), 0);
 825
 826			spin_lock(&init_mm.page_table_lock);
 827			pte_clear(&init_mm, addr, pte);
 828			spin_unlock(&init_mm.page_table_lock);
 829
 830			/* For non-direct mapping, pages means nothing. */
 831			pages++;
 832		} else {
 833			/*
 834			 * If we are here, we are freeing vmemmap pages since
 835			 * direct mapped memory ranges to be freed are aligned.
 836			 *
 837			 * If we are not removing the whole page, it means
 838			 * other page structs in this page are being used and
 839			 * we canot remove them. So fill the unused page_structs
 840			 * with 0xFD, and remove the page when it is wholly
 841			 * filled with 0xFD.
 842			 */
 843			memset((void *)addr, PAGE_INUSE, next - addr);
 844
 845			page_addr = page_address(pte_page(*pte));
 846			if (!memchr_inv(page_addr, PAGE_INUSE, PAGE_SIZE)) {
 847				free_pagetable(pte_page(*pte), 0);
 848
 849				spin_lock(&init_mm.page_table_lock);
 850				pte_clear(&init_mm, addr, pte);
 851				spin_unlock(&init_mm.page_table_lock);
 852			}
 853		}
 854	}
 855
 856	/* Call free_pte_table() in remove_pmd_table(). */
 857	flush_tlb_all();
 858	if (direct)
 859		update_page_count(PG_LEVEL_4K, -pages);
 860}
 861
 862static void __meminit
 863remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end,
 864		 bool direct)
 865{
 866	unsigned long next, pages = 0;
 867	pte_t *pte_base;
 868	pmd_t *pmd;
 869	void *page_addr;
 870
 871	pmd = pmd_start + pmd_index(addr);
 872	for (; addr < end; addr = next, pmd++) {
 873		next = pmd_addr_end(addr, end);
 874
 875		if (!pmd_present(*pmd))
 876			continue;
 877
 878		if (pmd_large(*pmd)) {
 879			if (IS_ALIGNED(addr, PMD_SIZE) &&
 880			    IS_ALIGNED(next, PMD_SIZE)) {
 881				if (!direct)
 882					free_pagetable(pmd_page(*pmd),
 883						       get_order(PMD_SIZE));
 884
 885				spin_lock(&init_mm.page_table_lock);
 886				pmd_clear(pmd);
 887				spin_unlock(&init_mm.page_table_lock);
 888				pages++;
 889			} else {
 890				/* If here, we are freeing vmemmap pages. */
 891				memset((void *)addr, PAGE_INUSE, next - addr);
 892
 893				page_addr = page_address(pmd_page(*pmd));
 894				if (!memchr_inv(page_addr, PAGE_INUSE,
 895						PMD_SIZE)) {
 896					free_pagetable(pmd_page(*pmd),
 897						       get_order(PMD_SIZE));
 898
 899					spin_lock(&init_mm.page_table_lock);
 900					pmd_clear(pmd);
 901					spin_unlock(&init_mm.page_table_lock);
 902				}
 903			}
 904
 905			continue;
 906		}
 907
 908		pte_base = (pte_t *)pmd_page_vaddr(*pmd);
 909		remove_pte_table(pte_base, addr, next, direct);
 910		free_pte_table(pte_base, pmd);
 911	}
 912
 913	/* Call free_pmd_table() in remove_pud_table(). */
 914	if (direct)
 915		update_page_count(PG_LEVEL_2M, -pages);
 916}
 917
 918static void __meminit
 919remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end,
 920		 bool direct)
 921{
 922	unsigned long next, pages = 0;
 923	pmd_t *pmd_base;
 924	pud_t *pud;
 925	void *page_addr;
 926
 927	pud = pud_start + pud_index(addr);
 928	for (; addr < end; addr = next, pud++) {
 929		next = pud_addr_end(addr, end);
 930
 931		if (!pud_present(*pud))
 932			continue;
 933
 934		if (pud_large(*pud)) {
 935			if (IS_ALIGNED(addr, PUD_SIZE) &&
 936			    IS_ALIGNED(next, PUD_SIZE)) {
 937				if (!direct)
 938					free_pagetable(pud_page(*pud),
 939						       get_order(PUD_SIZE));
 940
 941				spin_lock(&init_mm.page_table_lock);
 942				pud_clear(pud);
 943				spin_unlock(&init_mm.page_table_lock);
 944				pages++;
 945			} else {
 946				/* If here, we are freeing vmemmap pages. */
 947				memset((void *)addr, PAGE_INUSE, next - addr);
 948
 949				page_addr = page_address(pud_page(*pud));
 950				if (!memchr_inv(page_addr, PAGE_INUSE,
 951						PUD_SIZE)) {
 952					free_pagetable(pud_page(*pud),
 953						       get_order(PUD_SIZE));
 954
 955					spin_lock(&init_mm.page_table_lock);
 956					pud_clear(pud);
 957					spin_unlock(&init_mm.page_table_lock);
 958				}
 959			}
 960
 961			continue;
 962		}
 963
 964		pmd_base = (pmd_t *)pud_page_vaddr(*pud);
 965		remove_pmd_table(pmd_base, addr, next, direct);
 966		free_pmd_table(pmd_base, pud);
 967	}
 968
 969	if (direct)
 970		update_page_count(PG_LEVEL_1G, -pages);
 971}
 972
 973/* start and end are both virtual address. */
 974static void __meminit
 975remove_pagetable(unsigned long start, unsigned long end, bool direct)
 976{
 977	unsigned long next;
 
 978	pgd_t *pgd;
 979	pud_t *pud;
 980	bool pgd_changed = false;
 981
 982	for (; start < end; start = next) {
 983		next = pgd_addr_end(start, end);
 984
 985		pgd = pgd_offset_k(start);
 986		if (!pgd_present(*pgd))
 987			continue;
 988
 989		pud = (pud_t *)pgd_page_vaddr(*pgd);
 990		remove_pud_table(pud, start, next, direct);
 991		if (free_pud_table(pud, pgd))
 992			pgd_changed = true;
 993	}
 994
 995	if (pgd_changed)
 996		sync_global_pgds(start, end - 1);
 997
 998	flush_tlb_all();
 999}
1000
1001void __ref vmemmap_free(unsigned long start, unsigned long end)
1002{
1003	remove_pagetable(start, end, false);
1004}
1005
1006#ifdef CONFIG_MEMORY_HOTREMOVE
1007static void __meminit
1008kernel_physical_mapping_remove(unsigned long start, unsigned long end)
1009{
1010	start = (unsigned long)__va(start);
1011	end = (unsigned long)__va(end);
1012
1013	remove_pagetable(start, end, true);
1014}
1015
1016int __ref arch_remove_memory(u64 start, u64 size)
1017{
1018	unsigned long start_pfn = start >> PAGE_SHIFT;
1019	unsigned long nr_pages = size >> PAGE_SHIFT;
 
 
1020	struct zone *zone;
1021	int ret;
1022
1023	zone = page_zone(pfn_to_page(start_pfn));
1024	kernel_physical_mapping_remove(start, start + size);
 
 
 
1025	ret = __remove_pages(zone, start_pfn, nr_pages);
1026	WARN_ON_ONCE(ret);
 
1027
1028	return ret;
1029}
1030#endif
1031#endif /* CONFIG_MEMORY_HOTPLUG */
1032
1033static struct kcore_list kcore_vsyscall;
1034
1035static void __init register_page_bootmem_info(void)
1036{
1037#ifdef CONFIG_NUMA
1038	int i;
1039
1040	for_each_online_node(i)
1041		register_page_bootmem_info_node(NODE_DATA(i));
1042#endif
1043}
1044
1045void __init mem_init(void)
1046{
1047	pci_iommu_alloc();
1048
1049	/* clear_bss() already clear the empty_zero_page */
1050
1051	register_page_bootmem_info();
1052
1053	/* this will put all memory onto the freelists */
1054	free_all_bootmem();
1055	after_bootmem = 1;
1056
1057	/* Register memory areas for /proc/kcore */
1058	kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START,
1059			 VSYSCALL_END - VSYSCALL_START, KCORE_OTHER);
1060
1061	mem_init_print_info(NULL);
1062}
1063
1064#ifdef CONFIG_DEBUG_RODATA
1065const int rodata_test_data = 0xC3;
1066EXPORT_SYMBOL_GPL(rodata_test_data);
1067
1068int kernel_set_to_readonly;
1069
1070void set_kernel_text_rw(void)
1071{
1072	unsigned long start = PFN_ALIGN(_text);
1073	unsigned long end = PFN_ALIGN(__stop___ex_table);
1074
1075	if (!kernel_set_to_readonly)
1076		return;
1077
1078	pr_debug("Set kernel text: %lx - %lx for read write\n",
1079		 start, end);
1080
1081	/*
1082	 * Make the kernel identity mapping for text RW. Kernel text
1083	 * mapping will always be RO. Refer to the comment in
1084	 * static_protections() in pageattr.c
1085	 */
1086	set_memory_rw(start, (end - start) >> PAGE_SHIFT);
1087}
1088
1089void set_kernel_text_ro(void)
1090{
1091	unsigned long start = PFN_ALIGN(_text);
1092	unsigned long end = PFN_ALIGN(__stop___ex_table);
1093
1094	if (!kernel_set_to_readonly)
1095		return;
1096
1097	pr_debug("Set kernel text: %lx - %lx for read only\n",
1098		 start, end);
1099
1100	/*
1101	 * Set the kernel identity mapping for text RO.
1102	 */
1103	set_memory_ro(start, (end - start) >> PAGE_SHIFT);
1104}
1105
1106void mark_rodata_ro(void)
1107{
1108	unsigned long start = PFN_ALIGN(_text);
1109	unsigned long rodata_start = PFN_ALIGN(__start_rodata);
1110	unsigned long end = (unsigned long) &__end_rodata_hpage_align;
1111	unsigned long text_end = PFN_ALIGN(&__stop___ex_table);
1112	unsigned long rodata_end = PFN_ALIGN(&__end_rodata);
1113	unsigned long all_end = PFN_ALIGN(&_end);
1114
1115	printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
1116	       (end - start) >> 10);
1117	set_memory_ro(start, (end - start) >> PAGE_SHIFT);
1118
1119	kernel_set_to_readonly = 1;
1120
1121	/*
1122	 * The rodata/data/bss/brk section (but not the kernel text!)
1123	 * should also be not-executable.
 
 
 
 
 
 
 
 
1124	 */
1125	set_memory_nx(rodata_start, (all_end - rodata_start) >> PAGE_SHIFT);
 
1126
1127	rodata_test();
1128
1129#ifdef CONFIG_CPA_DEBUG
1130	printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, end);
1131	set_memory_rw(start, (end-start) >> PAGE_SHIFT);
1132
1133	printk(KERN_INFO "Testing CPA: again\n");
1134	set_memory_ro(start, (end-start) >> PAGE_SHIFT);
1135#endif
1136
1137	free_init_pages("unused kernel",
1138			(unsigned long) __va(__pa_symbol(text_end)),
1139			(unsigned long) __va(__pa_symbol(rodata_start)));
1140	free_init_pages("unused kernel",
1141			(unsigned long) __va(__pa_symbol(rodata_end)),
1142			(unsigned long) __va(__pa_symbol(_sdata)));
 
 
1143}
1144
1145#endif
1146
1147int kern_addr_valid(unsigned long addr)
1148{
1149	unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
1150	pgd_t *pgd;
1151	pud_t *pud;
1152	pmd_t *pmd;
1153	pte_t *pte;
1154
1155	if (above != 0 && above != -1UL)
1156		return 0;
1157
1158	pgd = pgd_offset_k(addr);
1159	if (pgd_none(*pgd))
1160		return 0;
1161
1162	pud = pud_offset(pgd, addr);
1163	if (pud_none(*pud))
1164		return 0;
1165
1166	if (pud_large(*pud))
1167		return pfn_valid(pud_pfn(*pud));
1168
1169	pmd = pmd_offset(pud, addr);
1170	if (pmd_none(*pmd))
1171		return 0;
1172
1173	if (pmd_large(*pmd))
1174		return pfn_valid(pmd_pfn(*pmd));
1175
1176	pte = pte_offset_kernel(pmd, addr);
1177	if (pte_none(*pte))
1178		return 0;
1179
1180	return pfn_valid(pte_pfn(*pte));
1181}
1182
1183/*
1184 * A pseudo VMA to allow ptrace access for the vsyscall page.  This only
1185 * covers the 64bit vsyscall page now. 32bit has a real VMA now and does
1186 * not need special handling anymore:
1187 */
1188static struct vm_area_struct gate_vma = {
1189	.vm_start	= VSYSCALL_START,
1190	.vm_end		= VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
1191	.vm_page_prot	= PAGE_READONLY_EXEC,
1192	.vm_flags	= VM_READ | VM_EXEC
1193};
1194
1195struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
1196{
1197#ifdef CONFIG_IA32_EMULATION
1198	if (!mm || mm->context.ia32_compat)
1199		return NULL;
1200#endif
1201	return &gate_vma;
1202}
1203
1204int in_gate_area(struct mm_struct *mm, unsigned long addr)
1205{
1206	struct vm_area_struct *vma = get_gate_vma(mm);
1207
1208	if (!vma)
1209		return 0;
1210
1211	return (addr >= vma->vm_start) && (addr < vma->vm_end);
1212}
1213
1214/*
1215 * Use this when you have no reliable mm, typically from interrupt
1216 * context. It is less reliable than using a task's mm and may give
1217 * false positives.
1218 */
1219int in_gate_area_no_mm(unsigned long addr)
1220{
1221	return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END);
1222}
1223
1224const char *arch_vma_name(struct vm_area_struct *vma)
1225{
1226	if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
1227		return "[vdso]";
1228	if (vma == &gate_vma)
1229		return "[vsyscall]";
1230	return NULL;
1231}
1232
1233#ifdef CONFIG_X86_UV
1234unsigned long memory_block_size_bytes(void)
1235{
1236	if (is_uv_system()) {
1237		printk(KERN_INFO "UV: memory block size 2GB\n");
1238		return 2UL * 1024 * 1024 * 1024;
1239	}
1240	return MIN_MEMORY_BLOCK_SIZE;
1241}
1242#endif
1243
1244#ifdef CONFIG_SPARSEMEM_VMEMMAP
1245/*
1246 * Initialise the sparsemem vmemmap using huge-pages at the PMD level.
1247 */
1248static long __meminitdata addr_start, addr_end;
1249static void __meminitdata *p_start, *p_end;
1250static int __meminitdata node_start;
1251
1252static int __meminit vmemmap_populate_hugepages(unsigned long start,
1253						unsigned long end, int node)
1254{
1255	unsigned long addr;
1256	unsigned long next;
1257	pgd_t *pgd;
1258	pud_t *pud;
1259	pmd_t *pmd;
1260
1261	for (addr = start; addr < end; addr = next) {
1262		next = pmd_addr_end(addr, end);
1263
1264		pgd = vmemmap_pgd_populate(addr, node);
1265		if (!pgd)
1266			return -ENOMEM;
1267
1268		pud = vmemmap_pud_populate(pgd, addr, node);
1269		if (!pud)
1270			return -ENOMEM;
1271
1272		pmd = pmd_offset(pud, addr);
1273		if (pmd_none(*pmd)) {
1274			void *p;
1275
1276			p = vmemmap_alloc_block_buf(PMD_SIZE, node);
1277			if (p) {
1278				pte_t entry;
1279
1280				entry = pfn_pte(__pa(p) >> PAGE_SHIFT,
1281						PAGE_KERNEL_LARGE);
1282				set_pmd(pmd, __pmd(pte_val(entry)));
1283
1284				/* check to see if we have contiguous blocks */
1285				if (p_end != p || node_start != node) {
1286					if (p_start)
1287						printk(KERN_DEBUG " [%lx-%lx] PMD -> [%p-%p] on node %d\n",
1288						       addr_start, addr_end-1, p_start, p_end-1, node_start);
1289					addr_start = addr;
1290					node_start = node;
1291					p_start = p;
1292				}
1293
1294				addr_end = addr + PMD_SIZE;
1295				p_end = p + PMD_SIZE;
1296				continue;
1297			}
 
1298		} else if (pmd_large(*pmd)) {
1299			vmemmap_verify((pte_t *)pmd, node, addr, next);
1300			continue;
1301		}
1302		pr_warn_once("vmemmap: falling back to regular page backing\n");
1303		if (vmemmap_populate_basepages(addr, next, node))
1304			return -ENOMEM;
1305	}
1306	return 0;
1307}
1308
1309int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
1310{
 
1311	int err;
1312
1313	if (cpu_has_pse)
1314		err = vmemmap_populate_hugepages(start, end, node);
1315	else
 
 
 
 
1316		err = vmemmap_populate_basepages(start, end, node);
1317	if (!err)
1318		sync_global_pgds(start, end - 1);
1319	return err;
1320}
1321
1322#if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HAVE_BOOTMEM_INFO_NODE)
1323void register_page_bootmem_memmap(unsigned long section_nr,
1324				  struct page *start_page, unsigned long size)
1325{
1326	unsigned long addr = (unsigned long)start_page;
1327	unsigned long end = (unsigned long)(start_page + size);
1328	unsigned long next;
1329	pgd_t *pgd;
1330	pud_t *pud;
1331	pmd_t *pmd;
1332	unsigned int nr_pages;
1333	struct page *page;
1334
1335	for (; addr < end; addr = next) {
1336		pte_t *pte = NULL;
1337
1338		pgd = pgd_offset_k(addr);
1339		if (pgd_none(*pgd)) {
1340			next = (addr + PAGE_SIZE) & PAGE_MASK;
1341			continue;
1342		}
1343		get_page_bootmem(section_nr, pgd_page(*pgd), MIX_SECTION_INFO);
1344
1345		pud = pud_offset(pgd, addr);
1346		if (pud_none(*pud)) {
1347			next = (addr + PAGE_SIZE) & PAGE_MASK;
1348			continue;
1349		}
1350		get_page_bootmem(section_nr, pud_page(*pud), MIX_SECTION_INFO);
1351
1352		if (!cpu_has_pse) {
1353			next = (addr + PAGE_SIZE) & PAGE_MASK;
1354			pmd = pmd_offset(pud, addr);
1355			if (pmd_none(*pmd))
1356				continue;
1357			get_page_bootmem(section_nr, pmd_page(*pmd),
1358					 MIX_SECTION_INFO);
1359
1360			pte = pte_offset_kernel(pmd, addr);
1361			if (pte_none(*pte))
1362				continue;
1363			get_page_bootmem(section_nr, pte_page(*pte),
1364					 SECTION_INFO);
1365		} else {
1366			next = pmd_addr_end(addr, end);
1367
1368			pmd = pmd_offset(pud, addr);
1369			if (pmd_none(*pmd))
1370				continue;
1371
1372			nr_pages = 1 << (get_order(PMD_SIZE));
1373			page = pmd_page(*pmd);
1374			while (nr_pages--)
1375				get_page_bootmem(section_nr, page++,
1376						 SECTION_INFO);
1377		}
1378	}
1379}
1380#endif
1381
1382void __meminit vmemmap_populate_print_last(void)
1383{
1384	if (p_start) {
1385		printk(KERN_DEBUG " [%lx-%lx] PMD -> [%p-%p] on node %d\n",
1386			addr_start, addr_end-1, p_start, p_end-1, node_start);
1387		p_start = NULL;
1388		p_end = NULL;
1389		node_start = 0;
1390	}
1391}
1392#endif