Linux Audio

Check our new training course

Loading...
v4.6
 
   1/*
   2 *  linux/arch/x86_64/mm/init.c
   3 *
   4 *  Copyright (C) 1995  Linus Torvalds
   5 *  Copyright (C) 2000  Pavel Machek <pavel@ucw.cz>
   6 *  Copyright (C) 2002,2003 Andi Kleen <ak@suse.de>
   7 */
   8
   9#include <linux/signal.h>
  10#include <linux/sched.h>
  11#include <linux/kernel.h>
  12#include <linux/errno.h>
  13#include <linux/string.h>
  14#include <linux/types.h>
  15#include <linux/ptrace.h>
  16#include <linux/mman.h>
  17#include <linux/mm.h>
  18#include <linux/swap.h>
  19#include <linux/smp.h>
  20#include <linux/init.h>
  21#include <linux/initrd.h>
  22#include <linux/pagemap.h>
  23#include <linux/bootmem.h>
  24#include <linux/memblock.h>
  25#include <linux/proc_fs.h>
  26#include <linux/pci.h>
  27#include <linux/pfn.h>
  28#include <linux/poison.h>
  29#include <linux/dma-mapping.h>
  30#include <linux/module.h>
  31#include <linux/memory.h>
  32#include <linux/memory_hotplug.h>
  33#include <linux/memremap.h>
  34#include <linux/nmi.h>
  35#include <linux/gfp.h>
  36#include <linux/kcore.h>
  37
  38#include <asm/processor.h>
  39#include <asm/bios_ebda.h>
  40#include <asm/uaccess.h>
  41#include <asm/pgtable.h>
  42#include <asm/pgalloc.h>
  43#include <asm/dma.h>
  44#include <asm/fixmap.h>
  45#include <asm/e820.h>
  46#include <asm/apic.h>
  47#include <asm/tlb.h>
  48#include <asm/mmu_context.h>
  49#include <asm/proto.h>
  50#include <asm/smp.h>
  51#include <asm/sections.h>
  52#include <asm/kdebug.h>
  53#include <asm/numa.h>
  54#include <asm/cacheflush.h>
  55#include <asm/init.h>
  56#include <asm/uv/uv.h>
  57#include <asm/setup.h>
  58
  59#include "mm_internal.h"
  60
  61static void ident_pmd_init(unsigned long pmd_flag, pmd_t *pmd_page,
  62			   unsigned long addr, unsigned long end)
  63{
  64	addr &= PMD_MASK;
  65	for (; addr < end; addr += PMD_SIZE) {
  66		pmd_t *pmd = pmd_page + pmd_index(addr);
  67
  68		if (!pmd_present(*pmd))
  69			set_pmd(pmd, __pmd(addr | pmd_flag));
  70	}
  71}
  72static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page,
  73			  unsigned long addr, unsigned long end)
  74{
  75	unsigned long next;
  76
  77	for (; addr < end; addr = next) {
  78		pud_t *pud = pud_page + pud_index(addr);
  79		pmd_t *pmd;
  80
  81		next = (addr & PUD_MASK) + PUD_SIZE;
  82		if (next > end)
  83			next = end;
  84
  85		if (pud_present(*pud)) {
  86			pmd = pmd_offset(pud, 0);
  87			ident_pmd_init(info->pmd_flag, pmd, addr, next);
  88			continue;
  89		}
  90		pmd = (pmd_t *)info->alloc_pgt_page(info->context);
  91		if (!pmd)
  92			return -ENOMEM;
  93		ident_pmd_init(info->pmd_flag, pmd, addr, next);
  94		set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
  95	}
  96
  97	return 0;
  98}
  99
 100int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
 101			      unsigned long addr, unsigned long end)
 102{
 103	unsigned long next;
 104	int result;
 105	int off = info->kernel_mapping ? pgd_index(__PAGE_OFFSET) : 0;
 106
 107	for (; addr < end; addr = next) {
 108		pgd_t *pgd = pgd_page + pgd_index(addr) + off;
 109		pud_t *pud;
 110
 111		next = (addr & PGDIR_MASK) + PGDIR_SIZE;
 112		if (next > end)
 113			next = end;
 114
 115		if (pgd_present(*pgd)) {
 116			pud = pud_offset(pgd, 0);
 117			result = ident_pud_init(info, pud, addr, next);
 118			if (result)
 119				return result;
 120			continue;
 121		}
 122
 123		pud = (pud_t *)info->alloc_pgt_page(info->context);
 124		if (!pud)
 125			return -ENOMEM;
 126		result = ident_pud_init(info, pud, addr, next);
 127		if (result)
 128			return result;
 129		set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE));
 130	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 131
 132	return 0;
 133}
 134
 135/*
 136 * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
 137 * physical space so we can cache the place of the first one and move
 138 * around without checking the pgd every time.
 139 */
 140
 
 141pteval_t __supported_pte_mask __read_mostly = ~0;
 
 
 142EXPORT_SYMBOL_GPL(__supported_pte_mask);
 
 
 143
 144int force_personality32;
 145
 146/*
 147 * noexec32=on|off
 148 * Control non executable heap for 32bit processes.
 149 * To control the stack too use noexec=off
 150 *
 151 * on	PROT_READ does not imply PROT_EXEC for 32-bit processes (default)
 152 * off	PROT_READ implies PROT_EXEC
 153 */
 154static int __init nonx32_setup(char *str)
 155{
 156	if (!strcmp(str, "on"))
 157		force_personality32 &= ~READ_IMPLIES_EXEC;
 158	else if (!strcmp(str, "off"))
 159		force_personality32 |= READ_IMPLIES_EXEC;
 160	return 1;
 161}
 162__setup("noexec32=", nonx32_setup);
 163
 164/*
 165 * When memory was added/removed make sure all the processes MM have
 166 * suitable PGD entries in the local PGD level page.
 167 */
 168void sync_global_pgds(unsigned long start, unsigned long end, int removed)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 169{
 170	unsigned long address;
 171
 172	for (address = start; address <= end; address += PGDIR_SIZE) {
 173		const pgd_t *pgd_ref = pgd_offset_k(address);
 
 174		struct page *page;
 175
 176		/*
 177		 * When it is called after memory hot remove, pgd_none()
 178		 * returns true. In this case (removed == 1), we must clear
 179		 * the PGD entries in the local PGD level page.
 180		 */
 181		if (pgd_none(*pgd_ref) && !removed)
 
 
 
 182			continue;
 183
 184		spin_lock(&pgd_lock);
 185		list_for_each_entry(page, &pgd_list, lru) {
 186			pgd_t *pgd;
 
 187			spinlock_t *pgt_lock;
 188
 189			pgd = (pgd_t *)page_address(page) + pgd_index(address);
 
 190			/* the pgt_lock only for Xen */
 191			pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
 192			spin_lock(pgt_lock);
 193
 194			if (!pgd_none(*pgd_ref) && !pgd_none(*pgd))
 195				BUG_ON(pgd_page_vaddr(*pgd)
 196				       != pgd_page_vaddr(*pgd_ref));
 197
 198			if (removed) {
 199				if (pgd_none(*pgd_ref) && !pgd_none(*pgd))
 200					pgd_clear(pgd);
 201			} else {
 202				if (pgd_none(*pgd))
 203					set_pgd(pgd, *pgd_ref);
 204			}
 205
 206			spin_unlock(pgt_lock);
 207		}
 208		spin_unlock(&pgd_lock);
 209	}
 210}
 211
 212/*
 
 
 
 
 
 
 
 
 
 
 
 
 213 * NOTE: This function is marked __ref because it calls __init function
 214 * (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0.
 215 */
 216static __ref void *spp_getpage(void)
 217{
 218	void *ptr;
 219
 220	if (after_bootmem)
 221		ptr = (void *) get_zeroed_page(GFP_ATOMIC | __GFP_NOTRACK);
 222	else
 223		ptr = alloc_bootmem_pages(PAGE_SIZE);
 224
 225	if (!ptr || ((unsigned long)ptr & ~PAGE_MASK)) {
 226		panic("set_pte_phys: cannot allocate page data %s\n",
 227			after_bootmem ? "after bootmem" : "");
 228	}
 229
 230	pr_debug("spp_getpage %p\n", ptr);
 231
 232	return ptr;
 233}
 234
 235static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
 236{
 237	if (pgd_none(*pgd)) {
 238		pud_t *pud = (pud_t *)spp_getpage();
 239		pgd_populate(&init_mm, pgd, pud);
 240		if (pud != pud_offset(pgd, 0))
 241			printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
 242			       pud, pud_offset(pgd, 0));
 243	}
 244	return pud_offset(pgd, vaddr);
 
 
 
 
 
 
 
 
 
 
 
 
 245}
 246
 247static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
 248{
 249	if (pud_none(*pud)) {
 250		pmd_t *pmd = (pmd_t *) spp_getpage();
 251		pud_populate(&init_mm, pud, pmd);
 252		if (pmd != pmd_offset(pud, 0))
 253			printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
 254			       pmd, pmd_offset(pud, 0));
 255	}
 256	return pmd_offset(pud, vaddr);
 257}
 258
 259static pte_t *fill_pte(pmd_t *pmd, unsigned long vaddr)
 260{
 261	if (pmd_none(*pmd)) {
 262		pte_t *pte = (pte_t *) spp_getpage();
 263		pmd_populate_kernel(&init_mm, pmd, pte);
 264		if (pte != pte_offset_kernel(pmd, 0))
 265			printk(KERN_ERR "PAGETABLE BUG #02!\n");
 266	}
 267	return pte_offset_kernel(pmd, vaddr);
 268}
 269
 270void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
 271{
 272	pud_t *pud;
 273	pmd_t *pmd;
 274	pte_t *pte;
 275
 276	pud = pud_page + pud_index(vaddr);
 277	pmd = fill_pmd(pud, vaddr);
 278	pte = fill_pte(pmd, vaddr);
 279
 280	set_pte(pte, new_pte);
 281
 282	/*
 283	 * It's enough to flush this one mapping.
 284	 * (PGE mappings get flushed as well)
 285	 */
 286	__flush_tlb_one(vaddr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 287}
 288
 289void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
 290{
 291	pgd_t *pgd;
 292	pud_t *pud_page;
 293
 294	pr_debug("set_pte_vaddr %lx to %lx\n", vaddr, native_pte_val(pteval));
 295
 296	pgd = pgd_offset_k(vaddr);
 297	if (pgd_none(*pgd)) {
 298		printk(KERN_ERR
 299			"PGD FIXMAP MISSING, it should be setup in head.S!\n");
 300		return;
 301	}
 302	pud_page = (pud_t*)pgd_page_vaddr(*pgd);
 303	set_pte_vaddr_pud(pud_page, vaddr, pteval);
 
 304}
 305
 306pmd_t * __init populate_extra_pmd(unsigned long vaddr)
 307{
 308	pgd_t *pgd;
 
 309	pud_t *pud;
 310
 311	pgd = pgd_offset_k(vaddr);
 312	pud = fill_pud(pgd, vaddr);
 
 313	return fill_pmd(pud, vaddr);
 314}
 315
 316pte_t * __init populate_extra_pte(unsigned long vaddr)
 317{
 318	pmd_t *pmd;
 319
 320	pmd = populate_extra_pmd(vaddr);
 321	return fill_pte(pmd, vaddr);
 322}
 323
 324/*
 325 * Create large page table mappings for a range of physical addresses.
 326 */
 327static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
 328					enum page_cache_mode cache)
 329{
 330	pgd_t *pgd;
 
 331	pud_t *pud;
 332	pmd_t *pmd;
 333	pgprot_t prot;
 334
 335	pgprot_val(prot) = pgprot_val(PAGE_KERNEL_LARGE) |
 336		pgprot_val(pgprot_4k_2_large(cachemode2pgprot(cache)));
 337	BUG_ON((phys & ~PMD_MASK) || (size & ~PMD_MASK));
 338	for (; size; phys += PMD_SIZE, size -= PMD_SIZE) {
 339		pgd = pgd_offset_k((unsigned long)__va(phys));
 340		if (pgd_none(*pgd)) {
 
 
 
 
 
 
 341			pud = (pud_t *) spp_getpage();
 342			set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
 343						_PAGE_USER));
 344		}
 345		pud = pud_offset(pgd, (unsigned long)__va(phys));
 346		if (pud_none(*pud)) {
 347			pmd = (pmd_t *) spp_getpage();
 348			set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
 349						_PAGE_USER));
 350		}
 351		pmd = pmd_offset(pud, phys);
 352		BUG_ON(!pmd_none(*pmd));
 353		set_pmd(pmd, __pmd(phys | pgprot_val(prot)));
 354	}
 355}
 356
 357void __init init_extra_mapping_wb(unsigned long phys, unsigned long size)
 358{
 359	__init_extra_mapping(phys, size, _PAGE_CACHE_MODE_WB);
 360}
 361
 362void __init init_extra_mapping_uc(unsigned long phys, unsigned long size)
 363{
 364	__init_extra_mapping(phys, size, _PAGE_CACHE_MODE_UC);
 365}
 366
 367/*
 368 * The head.S code sets up the kernel high mapping:
 369 *
 370 *   from __START_KERNEL_map to __START_KERNEL_map + size (== _end-_text)
 371 *
 372 * phys_base holds the negative offset to the kernel, which is added
 373 * to the compile time generated pmds. This results in invalid pmds up
 374 * to the point where we hit the physaddr 0 mapping.
 375 *
 376 * We limit the mappings to the region from _text to _brk_end.  _brk_end
 377 * is rounded up to the 2MB boundary. This catches the invalid pmds as
 378 * well, as they are located before _text:
 379 */
 380void __init cleanup_highmap(void)
 381{
 382	unsigned long vaddr = __START_KERNEL_map;
 383	unsigned long vaddr_end = __START_KERNEL_map + KERNEL_IMAGE_SIZE;
 384	unsigned long end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1;
 385	pmd_t *pmd = level2_kernel_pgt;
 386
 387	/*
 388	 * Native path, max_pfn_mapped is not set yet.
 389	 * Xen has valid max_pfn_mapped set in
 390	 *	arch/x86/xen/mmu.c:xen_setup_kernel_pagetable().
 391	 */
 392	if (max_pfn_mapped)
 393		vaddr_end = __START_KERNEL_map + (max_pfn_mapped << PAGE_SHIFT);
 394
 395	for (; vaddr + PMD_SIZE - 1 < vaddr_end; pmd++, vaddr += PMD_SIZE) {
 396		if (pmd_none(*pmd))
 397			continue;
 398		if (vaddr < (unsigned long) _text || vaddr > end)
 399			set_pmd(pmd, __pmd(0));
 400	}
 401}
 402
 
 
 
 
 403static unsigned long __meminit
 404phys_pte_init(pte_t *pte_page, unsigned long addr, unsigned long end,
 405	      pgprot_t prot)
 406{
 407	unsigned long pages = 0, next;
 408	unsigned long last_map_addr = end;
 
 409	int i;
 410
 411	pte_t *pte = pte_page + pte_index(addr);
 
 412
 413	for (i = pte_index(addr); i < PTRS_PER_PTE; i++, addr = next, pte++) {
 414		next = (addr & PAGE_MASK) + PAGE_SIZE;
 415		if (addr >= end) {
 416			if (!after_bootmem &&
 417			    !e820_any_mapped(addr & PAGE_MASK, next, E820_RAM) &&
 418			    !e820_any_mapped(addr & PAGE_MASK, next, E820_RESERVED_KERN))
 419				set_pte(pte, __pte(0));
 
 
 420			continue;
 421		}
 422
 423		/*
 424		 * We will re-use the existing mapping.
 425		 * Xen for example has some special requirements, like mapping
 426		 * pagetable pages as RO. So assume someone who pre-setup
 427		 * these mappings are more intelligent.
 428		 */
 429		if (pte_val(*pte)) {
 430			if (!after_bootmem)
 431				pages++;
 432			continue;
 433		}
 434
 435		if (0)
 436			printk("   pte=%p addr=%lx pte=%016lx\n",
 437			       pte, addr, pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL).pte);
 438		pages++;
 439		set_pte(pte, pfn_pte(addr >> PAGE_SHIFT, prot));
 440		last_map_addr = (addr & PAGE_MASK) + PAGE_SIZE;
 441	}
 442
 443	update_page_count(PG_LEVEL_4K, pages);
 444
 445	return last_map_addr;
 446}
 447
 
 
 
 
 
 448static unsigned long __meminit
 449phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
 450	      unsigned long page_size_mask, pgprot_t prot)
 451{
 452	unsigned long pages = 0, next;
 453	unsigned long last_map_addr = end;
 454
 455	int i = pmd_index(address);
 456
 457	for (; i < PTRS_PER_PMD; i++, address = next) {
 458		pmd_t *pmd = pmd_page + pmd_index(address);
 459		pte_t *pte;
 460		pgprot_t new_prot = prot;
 461
 462		next = (address & PMD_MASK) + PMD_SIZE;
 463		if (address >= end) {
 464			if (!after_bootmem &&
 465			    !e820_any_mapped(address & PMD_MASK, next, E820_RAM) &&
 466			    !e820_any_mapped(address & PMD_MASK, next, E820_RESERVED_KERN))
 467				set_pmd(pmd, __pmd(0));
 
 
 468			continue;
 469		}
 470
 471		if (pmd_val(*pmd)) {
 472			if (!pmd_large(*pmd)) {
 473				spin_lock(&init_mm.page_table_lock);
 474				pte = (pte_t *)pmd_page_vaddr(*pmd);
 475				last_map_addr = phys_pte_init(pte, address,
 476								end, prot);
 
 477				spin_unlock(&init_mm.page_table_lock);
 478				continue;
 479			}
 480			/*
 481			 * If we are ok with PG_LEVEL_2M mapping, then we will
 482			 * use the existing mapping,
 483			 *
 484			 * Otherwise, we will split the large page mapping but
 485			 * use the same existing protection bits except for
 486			 * large page, so that we don't violate Intel's TLB
 487			 * Application note (317080) which says, while changing
 488			 * the page sizes, new and old translations should
 489			 * not differ with respect to page frame and
 490			 * attributes.
 491			 */
 492			if (page_size_mask & (1 << PG_LEVEL_2M)) {
 493				if (!after_bootmem)
 494					pages++;
 495				last_map_addr = next;
 496				continue;
 497			}
 498			new_prot = pte_pgprot(pte_clrhuge(*(pte_t *)pmd));
 499		}
 500
 501		if (page_size_mask & (1<<PG_LEVEL_2M)) {
 502			pages++;
 503			spin_lock(&init_mm.page_table_lock);
 504			set_pte((pte_t *)pmd,
 505				pfn_pte((address & PMD_MASK) >> PAGE_SHIFT,
 506					__pgprot(pgprot_val(prot) | _PAGE_PSE)));
 
 507			spin_unlock(&init_mm.page_table_lock);
 508			last_map_addr = next;
 509			continue;
 510		}
 511
 512		pte = alloc_low_page();
 513		last_map_addr = phys_pte_init(pte, address, end, new_prot);
 514
 515		spin_lock(&init_mm.page_table_lock);
 516		pmd_populate_kernel(&init_mm, pmd, pte);
 517		spin_unlock(&init_mm.page_table_lock);
 518	}
 519	update_page_count(PG_LEVEL_2M, pages);
 520	return last_map_addr;
 521}
 522
 
 
 
 
 
 
 523static unsigned long __meminit
 524phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
 525			 unsigned long page_size_mask)
 526{
 527	unsigned long pages = 0, next;
 528	unsigned long last_map_addr = end;
 529	int i = pud_index(addr);
 
 530
 531	for (; i < PTRS_PER_PUD; i++, addr = next) {
 532		pud_t *pud = pud_page + pud_index(addr);
 533		pmd_t *pmd;
 534		pgprot_t prot = PAGE_KERNEL;
 535
 536		next = (addr & PUD_MASK) + PUD_SIZE;
 537		if (addr >= end) {
 
 
 
 538			if (!after_bootmem &&
 539			    !e820_any_mapped(addr & PUD_MASK, next, E820_RAM) &&
 540			    !e820_any_mapped(addr & PUD_MASK, next, E820_RESERVED_KERN))
 541				set_pud(pud, __pud(0));
 
 
 542			continue;
 543		}
 544
 545		if (pud_val(*pud)) {
 546			if (!pud_large(*pud)) {
 547				pmd = pmd_offset(pud, 0);
 548				last_map_addr = phys_pmd_init(pmd, addr, end,
 549							 page_size_mask, prot);
 550				__flush_tlb_all();
 
 551				continue;
 552			}
 553			/*
 554			 * If we are ok with PG_LEVEL_1G mapping, then we will
 555			 * use the existing mapping.
 556			 *
 557			 * Otherwise, we will split the gbpage mapping but use
 558			 * the same existing protection  bits except for large
 559			 * page, so that we don't violate Intel's TLB
 560			 * Application note (317080) which says, while changing
 561			 * the page sizes, new and old translations should
 562			 * not differ with respect to page frame and
 563			 * attributes.
 564			 */
 565			if (page_size_mask & (1 << PG_LEVEL_1G)) {
 566				if (!after_bootmem)
 567					pages++;
 568				last_map_addr = next;
 569				continue;
 570			}
 571			prot = pte_pgprot(pte_clrhuge(*(pte_t *)pud));
 572		}
 573
 574		if (page_size_mask & (1<<PG_LEVEL_1G)) {
 575			pages++;
 576			spin_lock(&init_mm.page_table_lock);
 577			set_pte((pte_t *)pud,
 578				pfn_pte((addr & PUD_MASK) >> PAGE_SHIFT,
 579					PAGE_KERNEL_LARGE));
 
 580			spin_unlock(&init_mm.page_table_lock);
 581			last_map_addr = next;
 582			continue;
 583		}
 584
 585		pmd = alloc_low_page();
 586		last_map_addr = phys_pmd_init(pmd, addr, end, page_size_mask,
 587					      prot);
 588
 589		spin_lock(&init_mm.page_table_lock);
 590		pud_populate(&init_mm, pud, pmd);
 591		spin_unlock(&init_mm.page_table_lock);
 592	}
 593	__flush_tlb_all();
 594
 595	update_page_count(PG_LEVEL_1G, pages);
 596
 597	return last_map_addr;
 598}
 599
 600unsigned long __meminit
 601kernel_physical_mapping_init(unsigned long start,
 602			     unsigned long end,
 603			     unsigned long page_size_mask)
 604{
 605	bool pgd_changed = false;
 606	unsigned long next, last_map_addr = end;
 607	unsigned long addr;
 608
 609	start = (unsigned long)__va(start);
 610	end = (unsigned long)__va(end);
 611	addr = start;
 612
 613	for (; start < end; start = next) {
 614		pgd_t *pgd = pgd_offset_k(start);
 
 
 
 
 615		pud_t *pud;
 616
 617		next = (start & PGDIR_MASK) + PGDIR_SIZE;
 
 618
 619		if (pgd_val(*pgd)) {
 620			pud = (pud_t *)pgd_page_vaddr(*pgd);
 621			last_map_addr = phys_pud_init(pud, __pa(start),
 622						 __pa(end), page_size_mask);
 
 
 
 
 
 
 
 
 
 
 
 623			continue;
 624		}
 625
 626		pud = alloc_low_page();
 627		last_map_addr = phys_pud_init(pud, __pa(start), __pa(end),
 628						 page_size_mask);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 629
 630		spin_lock(&init_mm.page_table_lock);
 631		pgd_populate(&init_mm, pgd, pud);
 
 
 
 
 
 632		spin_unlock(&init_mm.page_table_lock);
 633		pgd_changed = true;
 634	}
 635
 636	if (pgd_changed)
 637		sync_global_pgds(addr, end - 1, 0);
 638
 639	__flush_tlb_all();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 640
 641	return last_map_addr;
 
 
 
 
 
 
 
 
 
 
 
 
 642}
 643
 644#ifndef CONFIG_NUMA
 645void __init initmem_init(void)
 646{
 647	memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0);
 648}
 649#endif
 650
 651void __init paging_init(void)
 652{
 653	sparse_memory_present_with_active_regions(MAX_NUMNODES);
 654	sparse_init();
 655
 656	/*
 657	 * clear the default setting with node 0
 658	 * note: don't use nodes_clear here, that is really clearing when
 659	 *	 numa support is not compiled in, and later node_set_state
 660	 *	 will not set it back.
 661	 */
 662	node_clear_state(0, N_MEMORY);
 663	if (N_MEMORY != N_NORMAL_MEMORY)
 664		node_clear_state(0, N_NORMAL_MEMORY);
 665
 666	zone_sizes_init();
 667}
 668
 669/*
 670 * Memory hotplug specific functions
 671 */
 672#ifdef CONFIG_MEMORY_HOTPLUG
 673/*
 674 * After memory hotplug the variables max_pfn, max_low_pfn and high_memory need
 675 * updating.
 676 */
 677static void  update_end_of_memory_vars(u64 start, u64 size)
 678{
 679	unsigned long end_pfn = PFN_UP(start + size);
 680
 681	if (end_pfn > max_pfn) {
 682		max_pfn = end_pfn;
 683		max_low_pfn = end_pfn;
 684		high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1;
 685	}
 686}
 687
 688/*
 689 * Memory is added always to NORMAL zone. This means you will never get
 690 * additional DMA/DMA32 memory.
 691 */
 692int arch_add_memory(int nid, u64 start, u64 size, bool for_device)
 693{
 694	struct pglist_data *pgdat = NODE_DATA(nid);
 695	struct zone *zone = pgdat->node_zones +
 696		zone_for_memory(nid, start, size, ZONE_NORMAL, for_device);
 697	unsigned long start_pfn = start >> PAGE_SHIFT;
 698	unsigned long nr_pages = size >> PAGE_SHIFT;
 699	int ret;
 700
 701	init_memory_mapping(start, start + size);
 702
 703	ret = __add_pages(nid, zone, start_pfn, nr_pages);
 704	WARN_ON_ONCE(ret);
 705
 706	/* update max_pfn, max_low_pfn and high_memory */
 707	update_end_of_memory_vars(start, size);
 
 708
 709	return ret;
 710}
 711EXPORT_SYMBOL_GPL(arch_add_memory);
 
 
 
 
 
 
 
 
 
 
 712
 713#define PAGE_INUSE 0xFD
 714
 715static void __meminit free_pagetable(struct page *page, int order)
 716{
 717	unsigned long magic;
 718	unsigned int nr_pages = 1 << order;
 719	struct vmem_altmap *altmap = to_vmem_altmap((unsigned long) page);
 720
 721	if (altmap) {
 722		vmem_altmap_free(altmap, nr_pages);
 723		return;
 724	}
 725
 726	/* bootmem page has reserved flag */
 727	if (PageReserved(page)) {
 728		__ClearPageReserved(page);
 729
 730		magic = (unsigned long)page->lru.next;
 731		if (magic == SECTION_INFO || magic == MIX_SECTION_INFO) {
 732			while (nr_pages--)
 733				put_page_bootmem(page++);
 734		} else
 735			while (nr_pages--)
 736				free_reserved_page(page++);
 737	} else
 738		free_pages((unsigned long)page_address(page), order);
 739}
 740
 
 
 
 
 
 
 
 
 
 741static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd)
 742{
 743	pte_t *pte;
 744	int i;
 745
 746	for (i = 0; i < PTRS_PER_PTE; i++) {
 747		pte = pte_start + i;
 748		if (pte_val(*pte))
 749			return;
 750	}
 751
 752	/* free a pte talbe */
 753	free_pagetable(pmd_page(*pmd), 0);
 754	spin_lock(&init_mm.page_table_lock);
 755	pmd_clear(pmd);
 756	spin_unlock(&init_mm.page_table_lock);
 757}
 758
 759static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud)
 760{
 761	pmd_t *pmd;
 762	int i;
 763
 764	for (i = 0; i < PTRS_PER_PMD; i++) {
 765		pmd = pmd_start + i;
 766		if (pmd_val(*pmd))
 767			return;
 768	}
 769
 770	/* free a pmd talbe */
 771	free_pagetable(pud_page(*pud), 0);
 772	spin_lock(&init_mm.page_table_lock);
 773	pud_clear(pud);
 774	spin_unlock(&init_mm.page_table_lock);
 775}
 776
 777/* Return true if pgd is changed, otherwise return false. */
 778static bool __meminit free_pud_table(pud_t *pud_start, pgd_t *pgd)
 779{
 780	pud_t *pud;
 781	int i;
 782
 783	for (i = 0; i < PTRS_PER_PUD; i++) {
 784		pud = pud_start + i;
 785		if (pud_val(*pud))
 786			return false;
 787	}
 788
 789	/* free a pud table */
 790	free_pagetable(pgd_page(*pgd), 0);
 791	spin_lock(&init_mm.page_table_lock);
 792	pgd_clear(pgd);
 793	spin_unlock(&init_mm.page_table_lock);
 794
 795	return true;
 796}
 797
 798static void __meminit
 799remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end,
 800		 bool direct)
 801{
 802	unsigned long next, pages = 0;
 803	pte_t *pte;
 804	void *page_addr;
 805	phys_addr_t phys_addr;
 806
 807	pte = pte_start + pte_index(addr);
 808	for (; addr < end; addr = next, pte++) {
 809		next = (addr + PAGE_SIZE) & PAGE_MASK;
 810		if (next > end)
 811			next = end;
 812
 813		if (!pte_present(*pte))
 814			continue;
 815
 816		/*
 817		 * We mapped [0,1G) memory as identity mapping when
 818		 * initializing, in arch/x86/kernel/head_64.S. These
 819		 * pagetables cannot be removed.
 820		 */
 821		phys_addr = pte_val(*pte) + (addr & PAGE_MASK);
 822		if (phys_addr < (phys_addr_t)0x40000000)
 823			return;
 824
 825		if (PAGE_ALIGNED(addr) && PAGE_ALIGNED(next)) {
 826			/*
 827			 * Do not free direct mapping pages since they were
 828			 * freed when offlining, or simplely not in use.
 829			 */
 830			if (!direct)
 831				free_pagetable(pte_page(*pte), 0);
 832
 833			spin_lock(&init_mm.page_table_lock);
 834			pte_clear(&init_mm, addr, pte);
 835			spin_unlock(&init_mm.page_table_lock);
 836
 837			/* For non-direct mapping, pages means nothing. */
 838			pages++;
 839		} else {
 840			/*
 841			 * If we are here, we are freeing vmemmap pages since
 842			 * direct mapped memory ranges to be freed are aligned.
 843			 *
 844			 * If we are not removing the whole page, it means
 845			 * other page structs in this page are being used and
 846			 * we canot remove them. So fill the unused page_structs
 847			 * with 0xFD, and remove the page when it is wholly
 848			 * filled with 0xFD.
 849			 */
 850			memset((void *)addr, PAGE_INUSE, next - addr);
 851
 852			page_addr = page_address(pte_page(*pte));
 853			if (!memchr_inv(page_addr, PAGE_INUSE, PAGE_SIZE)) {
 854				free_pagetable(pte_page(*pte), 0);
 855
 856				spin_lock(&init_mm.page_table_lock);
 857				pte_clear(&init_mm, addr, pte);
 858				spin_unlock(&init_mm.page_table_lock);
 859			}
 860		}
 861	}
 862
 863	/* Call free_pte_table() in remove_pmd_table(). */
 864	flush_tlb_all();
 865	if (direct)
 866		update_page_count(PG_LEVEL_4K, -pages);
 867}
 868
 869static void __meminit
 870remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end,
 871		 bool direct)
 872{
 873	unsigned long next, pages = 0;
 874	pte_t *pte_base;
 875	pmd_t *pmd;
 876	void *page_addr;
 877
 878	pmd = pmd_start + pmd_index(addr);
 879	for (; addr < end; addr = next, pmd++) {
 880		next = pmd_addr_end(addr, end);
 881
 882		if (!pmd_present(*pmd))
 883			continue;
 884
 885		if (pmd_large(*pmd)) {
 886			if (IS_ALIGNED(addr, PMD_SIZE) &&
 887			    IS_ALIGNED(next, PMD_SIZE)) {
 888				if (!direct)
 889					free_pagetable(pmd_page(*pmd),
 890						       get_order(PMD_SIZE));
 891
 892				spin_lock(&init_mm.page_table_lock);
 893				pmd_clear(pmd);
 894				spin_unlock(&init_mm.page_table_lock);
 895				pages++;
 896			} else {
 897				/* If here, we are freeing vmemmap pages. */
 898				memset((void *)addr, PAGE_INUSE, next - addr);
 899
 900				page_addr = page_address(pmd_page(*pmd));
 901				if (!memchr_inv(page_addr, PAGE_INUSE,
 902						PMD_SIZE)) {
 903					free_pagetable(pmd_page(*pmd),
 904						       get_order(PMD_SIZE));
 905
 906					spin_lock(&init_mm.page_table_lock);
 907					pmd_clear(pmd);
 908					spin_unlock(&init_mm.page_table_lock);
 909				}
 910			}
 911
 912			continue;
 913		}
 914
 915		pte_base = (pte_t *)pmd_page_vaddr(*pmd);
 916		remove_pte_table(pte_base, addr, next, direct);
 917		free_pte_table(pte_base, pmd);
 918	}
 919
 920	/* Call free_pmd_table() in remove_pud_table(). */
 921	if (direct)
 922		update_page_count(PG_LEVEL_2M, -pages);
 923}
 924
 925static void __meminit
 926remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end,
 927		 bool direct)
 928{
 929	unsigned long next, pages = 0;
 930	pmd_t *pmd_base;
 931	pud_t *pud;
 932	void *page_addr;
 933
 934	pud = pud_start + pud_index(addr);
 935	for (; addr < end; addr = next, pud++) {
 936		next = pud_addr_end(addr, end);
 937
 938		if (!pud_present(*pud))
 939			continue;
 940
 941		if (pud_large(*pud)) {
 942			if (IS_ALIGNED(addr, PUD_SIZE) &&
 943			    IS_ALIGNED(next, PUD_SIZE)) {
 944				if (!direct)
 945					free_pagetable(pud_page(*pud),
 946						       get_order(PUD_SIZE));
 947
 948				spin_lock(&init_mm.page_table_lock);
 949				pud_clear(pud);
 950				spin_unlock(&init_mm.page_table_lock);
 951				pages++;
 952			} else {
 953				/* If here, we are freeing vmemmap pages. */
 954				memset((void *)addr, PAGE_INUSE, next - addr);
 955
 956				page_addr = page_address(pud_page(*pud));
 957				if (!memchr_inv(page_addr, PAGE_INUSE,
 958						PUD_SIZE)) {
 959					free_pagetable(pud_page(*pud),
 960						       get_order(PUD_SIZE));
 961
 962					spin_lock(&init_mm.page_table_lock);
 963					pud_clear(pud);
 964					spin_unlock(&init_mm.page_table_lock);
 965				}
 966			}
 967
 968			continue;
 969		}
 970
 971		pmd_base = (pmd_t *)pud_page_vaddr(*pud);
 972		remove_pmd_table(pmd_base, addr, next, direct);
 973		free_pmd_table(pmd_base, pud);
 974	}
 975
 976	if (direct)
 977		update_page_count(PG_LEVEL_1G, -pages);
 978}
 979
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 980/* start and end are both virtual address. */
 981static void __meminit
 982remove_pagetable(unsigned long start, unsigned long end, bool direct)
 
 983{
 984	unsigned long next;
 985	unsigned long addr;
 986	pgd_t *pgd;
 987	pud_t *pud;
 988	bool pgd_changed = false;
 989
 990	for (addr = start; addr < end; addr = next) {
 991		next = pgd_addr_end(addr, end);
 992
 993		pgd = pgd_offset_k(addr);
 994		if (!pgd_present(*pgd))
 995			continue;
 996
 997		pud = (pud_t *)pgd_page_vaddr(*pgd);
 998		remove_pud_table(pud, addr, next, direct);
 999		if (free_pud_table(pud, pgd))
1000			pgd_changed = true;
1001	}
1002
1003	if (pgd_changed)
1004		sync_global_pgds(start, end - 1, 1);
1005
1006	flush_tlb_all();
1007}
1008
1009void __ref vmemmap_free(unsigned long start, unsigned long end)
 
1010{
1011	remove_pagetable(start, end, false);
1012}
1013
1014#ifdef CONFIG_MEMORY_HOTREMOVE
1015static void __meminit
1016kernel_physical_mapping_remove(unsigned long start, unsigned long end)
1017{
1018	start = (unsigned long)__va(start);
1019	end = (unsigned long)__va(end);
1020
1021	remove_pagetable(start, end, true);
1022}
1023
1024int __ref arch_remove_memory(u64 start, u64 size)
 
1025{
1026	unsigned long start_pfn = start >> PAGE_SHIFT;
1027	unsigned long nr_pages = size >> PAGE_SHIFT;
1028	struct page *page = pfn_to_page(start_pfn);
1029	struct vmem_altmap *altmap;
1030	struct zone *zone;
1031	int ret;
1032
1033	/* With altmap the first mapped page is offset from @start */
1034	altmap = to_vmem_altmap((unsigned long) page);
1035	if (altmap)
1036		page += vmem_altmap_offset(altmap);
1037	zone = page_zone(page);
1038	ret = __remove_pages(zone, start_pfn, nr_pages);
1039	WARN_ON_ONCE(ret);
1040	kernel_physical_mapping_remove(start, start + size);
1041
1042	return ret;
1043}
1044#endif
1045#endif /* CONFIG_MEMORY_HOTPLUG */
1046
1047static struct kcore_list kcore_vsyscall;
1048
1049static void __init register_page_bootmem_info(void)
1050{
1051#ifdef CONFIG_NUMA
1052	int i;
1053
1054	for_each_online_node(i)
1055		register_page_bootmem_info_node(NODE_DATA(i));
1056#endif
1057}
1058
1059void __init mem_init(void)
1060{
1061	pci_iommu_alloc();
1062
1063	/* clear_bss() already clear the empty_zero_page */
1064
1065	register_page_bootmem_info();
1066
1067	/* this will put all memory onto the freelists */
1068	free_all_bootmem();
1069	after_bootmem = 1;
 
 
 
 
 
 
 
 
 
1070
1071	/* Register memory areas for /proc/kcore */
1072	kclist_add(&kcore_vsyscall, (void *)VSYSCALL_ADDR,
1073			 PAGE_SIZE, KCORE_OTHER);
1074
1075	mem_init_print_info(NULL);
1076}
1077
1078const int rodata_test_data = 0xC3;
1079EXPORT_SYMBOL_GPL(rodata_test_data);
1080
1081int kernel_set_to_readonly;
1082
1083void set_kernel_text_rw(void)
1084{
1085	unsigned long start = PFN_ALIGN(_text);
1086	unsigned long end = PFN_ALIGN(__stop___ex_table);
1087
1088	if (!kernel_set_to_readonly)
1089		return;
1090
1091	pr_debug("Set kernel text: %lx - %lx for read write\n",
1092		 start, end);
1093
1094	/*
1095	 * Make the kernel identity mapping for text RW. Kernel text
1096	 * mapping will always be RO. Refer to the comment in
1097	 * static_protections() in pageattr.c
1098	 */
1099	set_memory_rw(start, (end - start) >> PAGE_SHIFT);
1100}
1101
1102void set_kernel_text_ro(void)
1103{
1104	unsigned long start = PFN_ALIGN(_text);
1105	unsigned long end = PFN_ALIGN(__stop___ex_table);
1106
1107	if (!kernel_set_to_readonly)
1108		return;
1109
1110	pr_debug("Set kernel text: %lx - %lx for read only\n",
1111		 start, end);
1112
1113	/*
1114	 * Set the kernel identity mapping for text RO.
1115	 */
1116	set_memory_ro(start, (end - start) >> PAGE_SHIFT);
1117}
1118
1119void mark_rodata_ro(void)
1120{
1121	unsigned long start = PFN_ALIGN(_text);
1122	unsigned long rodata_start = PFN_ALIGN(__start_rodata);
1123	unsigned long end = (unsigned long) &__end_rodata_hpage_align;
1124	unsigned long text_end = PFN_ALIGN(&__stop___ex_table);
1125	unsigned long rodata_end = PFN_ALIGN(&__end_rodata);
1126	unsigned long all_end;
1127
1128	printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
1129	       (end - start) >> 10);
1130	set_memory_ro(start, (end - start) >> PAGE_SHIFT);
1131
1132	kernel_set_to_readonly = 1;
1133
1134	/*
1135	 * The rodata/data/bss/brk section (but not the kernel text!)
1136	 * should also be not-executable.
1137	 *
1138	 * We align all_end to PMD_SIZE because the existing mapping
1139	 * is a full PMD. If we would align _brk_end to PAGE_SIZE we
1140	 * split the PMD and the reminder between _brk_end and the end
1141	 * of the PMD will remain mapped executable.
1142	 *
1143	 * Any PMD which was setup after the one which covers _brk_end
1144	 * has been zapped already via cleanup_highmem().
1145	 */
1146	all_end = roundup((unsigned long)_brk_end, PMD_SIZE);
1147	set_memory_nx(text_end, (all_end - text_end) >> PAGE_SHIFT);
1148
1149	rodata_test();
1150
1151#ifdef CONFIG_CPA_DEBUG
1152	printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, end);
1153	set_memory_rw(start, (end-start) >> PAGE_SHIFT);
1154
1155	printk(KERN_INFO "Testing CPA: again\n");
1156	set_memory_ro(start, (end-start) >> PAGE_SHIFT);
1157#endif
1158
1159	free_init_pages("unused kernel",
1160			(unsigned long) __va(__pa_symbol(text_end)),
1161			(unsigned long) __va(__pa_symbol(rodata_start)));
1162	free_init_pages("unused kernel",
1163			(unsigned long) __va(__pa_symbol(rodata_end)),
1164			(unsigned long) __va(__pa_symbol(_sdata)));
1165
1166	debug_checkwx();
1167}
1168
1169int kern_addr_valid(unsigned long addr)
1170{
1171	unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
1172	pgd_t *pgd;
 
1173	pud_t *pud;
1174	pmd_t *pmd;
1175	pte_t *pte;
1176
1177	if (above != 0 && above != -1UL)
1178		return 0;
1179
1180	pgd = pgd_offset_k(addr);
1181	if (pgd_none(*pgd))
1182		return 0;
1183
1184	pud = pud_offset(pgd, addr);
 
 
 
 
1185	if (pud_none(*pud))
1186		return 0;
1187
1188	if (pud_large(*pud))
1189		return pfn_valid(pud_pfn(*pud));
1190
1191	pmd = pmd_offset(pud, addr);
1192	if (pmd_none(*pmd))
1193		return 0;
1194
1195	if (pmd_large(*pmd))
1196		return pfn_valid(pmd_pfn(*pmd));
1197
1198	pte = pte_offset_kernel(pmd, addr);
1199	if (pte_none(*pte))
1200		return 0;
1201
1202	return pfn_valid(pte_pfn(*pte));
1203}
1204
1205static unsigned long probe_memory_block_size(void)
 
 
 
 
 
 
 
 
 
 
 
 
1206{
1207	unsigned long bz = MIN_MEMORY_BLOCK_SIZE;
1208
1209	/* if system is UV or has 64GB of RAM or more, use large blocks */
1210	if (is_uv_system() || ((max_pfn << PAGE_SHIFT) >= (64UL << 30)))
1211		bz = 2UL << 30; /* 2GB */
1212
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1213	pr_info("x86/mm: Memory block size: %ldMB\n", bz >> 20);
1214
1215	return bz;
1216}
1217
1218static unsigned long memory_block_size_probed;
1219unsigned long memory_block_size_bytes(void)
1220{
1221	if (!memory_block_size_probed)
1222		memory_block_size_probed = probe_memory_block_size();
1223
1224	return memory_block_size_probed;
1225}
1226
1227#ifdef CONFIG_SPARSEMEM_VMEMMAP
1228/*
1229 * Initialise the sparsemem vmemmap using huge-pages at the PMD level.
1230 */
1231static long __meminitdata addr_start, addr_end;
1232static void __meminitdata *p_start, *p_end;
1233static int __meminitdata node_start;
1234
1235static int __meminit vmemmap_populate_hugepages(unsigned long start,
1236		unsigned long end, int node, struct vmem_altmap *altmap)
1237{
1238	unsigned long addr;
1239	unsigned long next;
1240	pgd_t *pgd;
 
1241	pud_t *pud;
1242	pmd_t *pmd;
1243
1244	for (addr = start; addr < end; addr = next) {
1245		next = pmd_addr_end(addr, end);
1246
1247		pgd = vmemmap_pgd_populate(addr, node);
1248		if (!pgd)
1249			return -ENOMEM;
1250
1251		pud = vmemmap_pud_populate(pgd, addr, node);
 
 
 
 
1252		if (!pud)
1253			return -ENOMEM;
1254
1255		pmd = pmd_offset(pud, addr);
1256		if (pmd_none(*pmd)) {
1257			void *p;
1258
1259			p = __vmemmap_alloc_block_buf(PMD_SIZE, node, altmap);
 
 
 
1260			if (p) {
1261				pte_t entry;
1262
1263				entry = pfn_pte(__pa(p) >> PAGE_SHIFT,
1264						PAGE_KERNEL_LARGE);
1265				set_pmd(pmd, __pmd(pte_val(entry)));
1266
1267				/* check to see if we have contiguous blocks */
1268				if (p_end != p || node_start != node) {
1269					if (p_start)
1270						pr_debug(" [%lx-%lx] PMD -> [%p-%p] on node %d\n",
1271						       addr_start, addr_end-1, p_start, p_end-1, node_start);
1272					addr_start = addr;
1273					node_start = node;
1274					p_start = p;
1275				}
1276
1277				addr_end = addr + PMD_SIZE;
1278				p_end = p + PMD_SIZE;
1279				continue;
1280			} else if (altmap)
1281				return -ENOMEM; /* no fallback */
1282		} else if (pmd_large(*pmd)) {
1283			vmemmap_verify((pte_t *)pmd, node, addr, next);
1284			continue;
1285		}
1286		pr_warn_once("vmemmap: falling back to regular page backing\n");
1287		if (vmemmap_populate_basepages(addr, next, node))
1288			return -ENOMEM;
1289	}
1290	return 0;
1291}
1292
1293int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
 
1294{
1295	struct vmem_altmap *altmap = to_vmem_altmap(start);
1296	int err;
1297
1298	if (cpu_has_pse)
 
 
1299		err = vmemmap_populate_hugepages(start, end, node, altmap);
1300	else if (altmap) {
1301		pr_err_once("%s: no cpu support for altmap allocations\n",
1302				__func__);
1303		err = -ENOMEM;
1304	} else
1305		err = vmemmap_populate_basepages(start, end, node);
1306	if (!err)
1307		sync_global_pgds(start, end - 1, 0);
1308	return err;
1309}
1310
1311#if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HAVE_BOOTMEM_INFO_NODE)
1312void register_page_bootmem_memmap(unsigned long section_nr,
1313				  struct page *start_page, unsigned long size)
1314{
1315	unsigned long addr = (unsigned long)start_page;
1316	unsigned long end = (unsigned long)(start_page + size);
1317	unsigned long next;
1318	pgd_t *pgd;
 
1319	pud_t *pud;
1320	pmd_t *pmd;
1321	unsigned int nr_pages;
1322	struct page *page;
1323
1324	for (; addr < end; addr = next) {
1325		pte_t *pte = NULL;
1326
1327		pgd = pgd_offset_k(addr);
1328		if (pgd_none(*pgd)) {
1329			next = (addr + PAGE_SIZE) & PAGE_MASK;
1330			continue;
1331		}
1332		get_page_bootmem(section_nr, pgd_page(*pgd), MIX_SECTION_INFO);
1333
1334		pud = pud_offset(pgd, addr);
 
 
 
 
 
 
 
1335		if (pud_none(*pud)) {
1336			next = (addr + PAGE_SIZE) & PAGE_MASK;
1337			continue;
1338		}
1339		get_page_bootmem(section_nr, pud_page(*pud), MIX_SECTION_INFO);
1340
1341		if (!cpu_has_pse) {
1342			next = (addr + PAGE_SIZE) & PAGE_MASK;
1343			pmd = pmd_offset(pud, addr);
1344			if (pmd_none(*pmd))
1345				continue;
1346			get_page_bootmem(section_nr, pmd_page(*pmd),
1347					 MIX_SECTION_INFO);
1348
1349			pte = pte_offset_kernel(pmd, addr);
1350			if (pte_none(*pte))
1351				continue;
1352			get_page_bootmem(section_nr, pte_page(*pte),
1353					 SECTION_INFO);
1354		} else {
1355			next = pmd_addr_end(addr, end);
1356
1357			pmd = pmd_offset(pud, addr);
1358			if (pmd_none(*pmd))
1359				continue;
1360
1361			nr_pages = 1 << (get_order(PMD_SIZE));
1362			page = pmd_page(*pmd);
1363			while (nr_pages--)
1364				get_page_bootmem(section_nr, page++,
1365						 SECTION_INFO);
1366		}
1367	}
1368}
1369#endif
1370
1371void __meminit vmemmap_populate_print_last(void)
1372{
1373	if (p_start) {
1374		pr_debug(" [%lx-%lx] PMD -> [%p-%p] on node %d\n",
1375			addr_start, addr_end-1, p_start, p_end-1, node_start);
1376		p_start = NULL;
1377		p_end = NULL;
1378		node_start = 0;
1379	}
1380}
1381#endif
v5.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  linux/arch/x86_64/mm/init.c
   4 *
   5 *  Copyright (C) 1995  Linus Torvalds
   6 *  Copyright (C) 2000  Pavel Machek <pavel@ucw.cz>
   7 *  Copyright (C) 2002,2003 Andi Kleen <ak@suse.de>
   8 */
   9
  10#include <linux/signal.h>
  11#include <linux/sched.h>
  12#include <linux/kernel.h>
  13#include <linux/errno.h>
  14#include <linux/string.h>
  15#include <linux/types.h>
  16#include <linux/ptrace.h>
  17#include <linux/mman.h>
  18#include <linux/mm.h>
  19#include <linux/swap.h>
  20#include <linux/smp.h>
  21#include <linux/init.h>
  22#include <linux/initrd.h>
  23#include <linux/pagemap.h>
 
  24#include <linux/memblock.h>
  25#include <linux/proc_fs.h>
  26#include <linux/pci.h>
  27#include <linux/pfn.h>
  28#include <linux/poison.h>
  29#include <linux/dma-mapping.h>
 
  30#include <linux/memory.h>
  31#include <linux/memory_hotplug.h>
  32#include <linux/memremap.h>
  33#include <linux/nmi.h>
  34#include <linux/gfp.h>
  35#include <linux/kcore.h>
  36
  37#include <asm/processor.h>
  38#include <asm/bios_ebda.h>
  39#include <linux/uaccess.h>
  40#include <asm/pgtable.h>
  41#include <asm/pgalloc.h>
  42#include <asm/dma.h>
  43#include <asm/fixmap.h>
  44#include <asm/e820/api.h>
  45#include <asm/apic.h>
  46#include <asm/tlb.h>
  47#include <asm/mmu_context.h>
  48#include <asm/proto.h>
  49#include <asm/smp.h>
  50#include <asm/sections.h>
  51#include <asm/kdebug.h>
  52#include <asm/numa.h>
  53#include <asm/set_memory.h>
  54#include <asm/init.h>
  55#include <asm/uv/uv.h>
  56#include <asm/setup.h>
  57
  58#include "mm_internal.h"
  59
  60#include "ident_map.c"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  61
  62#define DEFINE_POPULATE(fname, type1, type2, init)		\
  63static inline void fname##_init(struct mm_struct *mm,		\
  64		type1##_t *arg1, type2##_t *arg2, bool init)	\
  65{								\
  66	if (init)						\
  67		fname##_safe(mm, arg1, arg2);			\
  68	else							\
  69		fname(mm, arg1, arg2);				\
  70}
  71
  72DEFINE_POPULATE(p4d_populate, p4d, pud, init)
  73DEFINE_POPULATE(pgd_populate, pgd, p4d, init)
  74DEFINE_POPULATE(pud_populate, pud, pmd, init)
  75DEFINE_POPULATE(pmd_populate_kernel, pmd, pte, init)
  76
  77#define DEFINE_ENTRY(type1, type2, init)			\
  78static inline void set_##type1##_init(type1##_t *arg1,		\
  79			type2##_t arg2, bool init)		\
  80{								\
  81	if (init)						\
  82		set_##type1##_safe(arg1, arg2);			\
  83	else							\
  84		set_##type1(arg1, arg2);			\
  85}
  86
  87DEFINE_ENTRY(p4d, p4d, init)
  88DEFINE_ENTRY(pud, pud, init)
  89DEFINE_ENTRY(pmd, pmd, init)
  90DEFINE_ENTRY(pte, pte, init)
  91
 
 
  92
  93/*
  94 * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
  95 * physical space so we can cache the place of the first one and move
  96 * around without checking the pgd every time.
  97 */
  98
  99/* Bits supported by the hardware: */
 100pteval_t __supported_pte_mask __read_mostly = ~0;
 101/* Bits allowed in normal kernel mappings: */
 102pteval_t __default_kernel_pte_mask __read_mostly = ~0;
 103EXPORT_SYMBOL_GPL(__supported_pte_mask);
 104/* Used in PAGE_KERNEL_* macros which are reasonably used out-of-tree: */
 105EXPORT_SYMBOL(__default_kernel_pte_mask);
 106
 107int force_personality32;
 108
 109/*
 110 * noexec32=on|off
 111 * Control non executable heap for 32bit processes.
 112 * To control the stack too use noexec=off
 113 *
 114 * on	PROT_READ does not imply PROT_EXEC for 32-bit processes (default)
 115 * off	PROT_READ implies PROT_EXEC
 116 */
 117static int __init nonx32_setup(char *str)
 118{
 119	if (!strcmp(str, "on"))
 120		force_personality32 &= ~READ_IMPLIES_EXEC;
 121	else if (!strcmp(str, "off"))
 122		force_personality32 |= READ_IMPLIES_EXEC;
 123	return 1;
 124}
 125__setup("noexec32=", nonx32_setup);
 126
 127static void sync_global_pgds_l5(unsigned long start, unsigned long end)
 128{
 129	unsigned long addr;
 130
 131	for (addr = start; addr <= end; addr = ALIGN(addr + 1, PGDIR_SIZE)) {
 132		const pgd_t *pgd_ref = pgd_offset_k(addr);
 133		struct page *page;
 134
 135		/* Check for overflow */
 136		if (addr < start)
 137			break;
 138
 139		if (pgd_none(*pgd_ref))
 140			continue;
 141
 142		spin_lock(&pgd_lock);
 143		list_for_each_entry(page, &pgd_list, lru) {
 144			pgd_t *pgd;
 145			spinlock_t *pgt_lock;
 146
 147			pgd = (pgd_t *)page_address(page) + pgd_index(addr);
 148			/* the pgt_lock only for Xen */
 149			pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
 150			spin_lock(pgt_lock);
 151
 152			if (!pgd_none(*pgd_ref) && !pgd_none(*pgd))
 153				BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
 154
 155			if (pgd_none(*pgd))
 156				set_pgd(pgd, *pgd_ref);
 157
 158			spin_unlock(pgt_lock);
 159		}
 160		spin_unlock(&pgd_lock);
 161	}
 162}
 163
 164static void sync_global_pgds_l4(unsigned long start, unsigned long end)
 165{
 166	unsigned long addr;
 167
 168	for (addr = start; addr <= end; addr = ALIGN(addr + 1, PGDIR_SIZE)) {
 169		pgd_t *pgd_ref = pgd_offset_k(addr);
 170		const p4d_t *p4d_ref;
 171		struct page *page;
 172
 173		/*
 174		 * With folded p4d, pgd_none() is always false, we need to
 175		 * handle synchonization on p4d level.
 
 176		 */
 177		MAYBE_BUILD_BUG_ON(pgd_none(*pgd_ref));
 178		p4d_ref = p4d_offset(pgd_ref, addr);
 179
 180		if (p4d_none(*p4d_ref))
 181			continue;
 182
 183		spin_lock(&pgd_lock);
 184		list_for_each_entry(page, &pgd_list, lru) {
 185			pgd_t *pgd;
 186			p4d_t *p4d;
 187			spinlock_t *pgt_lock;
 188
 189			pgd = (pgd_t *)page_address(page) + pgd_index(addr);
 190			p4d = p4d_offset(pgd, addr);
 191			/* the pgt_lock only for Xen */
 192			pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
 193			spin_lock(pgt_lock);
 194
 195			if (!p4d_none(*p4d_ref) && !p4d_none(*p4d))
 196				BUG_ON(p4d_page_vaddr(*p4d)
 197				       != p4d_page_vaddr(*p4d_ref));
 198
 199			if (p4d_none(*p4d))
 200				set_p4d(p4d, *p4d_ref);
 
 
 
 
 
 201
 202			spin_unlock(pgt_lock);
 203		}
 204		spin_unlock(&pgd_lock);
 205	}
 206}
 207
 208/*
 209 * When memory was added make sure all the processes MM have
 210 * suitable PGD entries in the local PGD level page.
 211 */
 212void sync_global_pgds(unsigned long start, unsigned long end)
 213{
 214	if (pgtable_l5_enabled())
 215		sync_global_pgds_l5(start, end);
 216	else
 217		sync_global_pgds_l4(start, end);
 218}
 219
 220/*
 221 * NOTE: This function is marked __ref because it calls __init function
 222 * (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0.
 223 */
 224static __ref void *spp_getpage(void)
 225{
 226	void *ptr;
 227
 228	if (after_bootmem)
 229		ptr = (void *) get_zeroed_page(GFP_ATOMIC);
 230	else
 231		ptr = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
 232
 233	if (!ptr || ((unsigned long)ptr & ~PAGE_MASK)) {
 234		panic("set_pte_phys: cannot allocate page data %s\n",
 235			after_bootmem ? "after bootmem" : "");
 236	}
 237
 238	pr_debug("spp_getpage %p\n", ptr);
 239
 240	return ptr;
 241}
 242
 243static p4d_t *fill_p4d(pgd_t *pgd, unsigned long vaddr)
 244{
 245	if (pgd_none(*pgd)) {
 246		p4d_t *p4d = (p4d_t *)spp_getpage();
 247		pgd_populate(&init_mm, pgd, p4d);
 248		if (p4d != p4d_offset(pgd, 0))
 249			printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
 250			       p4d, p4d_offset(pgd, 0));
 251	}
 252	return p4d_offset(pgd, vaddr);
 253}
 254
 255static pud_t *fill_pud(p4d_t *p4d, unsigned long vaddr)
 256{
 257	if (p4d_none(*p4d)) {
 258		pud_t *pud = (pud_t *)spp_getpage();
 259		p4d_populate(&init_mm, p4d, pud);
 260		if (pud != pud_offset(p4d, 0))
 261			printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
 262			       pud, pud_offset(p4d, 0));
 263	}
 264	return pud_offset(p4d, vaddr);
 265}
 266
 267static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
 268{
 269	if (pud_none(*pud)) {
 270		pmd_t *pmd = (pmd_t *) spp_getpage();
 271		pud_populate(&init_mm, pud, pmd);
 272		if (pmd != pmd_offset(pud, 0))
 273			printk(KERN_ERR "PAGETABLE BUG #02! %p <-> %p\n",
 274			       pmd, pmd_offset(pud, 0));
 275	}
 276	return pmd_offset(pud, vaddr);
 277}
 278
 279static pte_t *fill_pte(pmd_t *pmd, unsigned long vaddr)
 280{
 281	if (pmd_none(*pmd)) {
 282		pte_t *pte = (pte_t *) spp_getpage();
 283		pmd_populate_kernel(&init_mm, pmd, pte);
 284		if (pte != pte_offset_kernel(pmd, 0))
 285			printk(KERN_ERR "PAGETABLE BUG #03!\n");
 286	}
 287	return pte_offset_kernel(pmd, vaddr);
 288}
 289
 290static void __set_pte_vaddr(pud_t *pud, unsigned long vaddr, pte_t new_pte)
 291{
 292	pmd_t *pmd = fill_pmd(pud, vaddr);
 293	pte_t *pte = fill_pte(pmd, vaddr);
 
 
 
 
 
 294
 295	set_pte(pte, new_pte);
 296
 297	/*
 298	 * It's enough to flush this one mapping.
 299	 * (PGE mappings get flushed as well)
 300	 */
 301	__flush_tlb_one_kernel(vaddr);
 302}
 303
 304void set_pte_vaddr_p4d(p4d_t *p4d_page, unsigned long vaddr, pte_t new_pte)
 305{
 306	p4d_t *p4d = p4d_page + p4d_index(vaddr);
 307	pud_t *pud = fill_pud(p4d, vaddr);
 308
 309	__set_pte_vaddr(pud, vaddr, new_pte);
 310}
 311
 312void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
 313{
 314	pud_t *pud = pud_page + pud_index(vaddr);
 315
 316	__set_pte_vaddr(pud, vaddr, new_pte);
 317}
 318
 319void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
 320{
 321	pgd_t *pgd;
 322	p4d_t *p4d_page;
 323
 324	pr_debug("set_pte_vaddr %lx to %lx\n", vaddr, native_pte_val(pteval));
 325
 326	pgd = pgd_offset_k(vaddr);
 327	if (pgd_none(*pgd)) {
 328		printk(KERN_ERR
 329			"PGD FIXMAP MISSING, it should be setup in head.S!\n");
 330		return;
 331	}
 332
 333	p4d_page = p4d_offset(pgd, 0);
 334	set_pte_vaddr_p4d(p4d_page, vaddr, pteval);
 335}
 336
 337pmd_t * __init populate_extra_pmd(unsigned long vaddr)
 338{
 339	pgd_t *pgd;
 340	p4d_t *p4d;
 341	pud_t *pud;
 342
 343	pgd = pgd_offset_k(vaddr);
 344	p4d = fill_p4d(pgd, vaddr);
 345	pud = fill_pud(p4d, vaddr);
 346	return fill_pmd(pud, vaddr);
 347}
 348
 349pte_t * __init populate_extra_pte(unsigned long vaddr)
 350{
 351	pmd_t *pmd;
 352
 353	pmd = populate_extra_pmd(vaddr);
 354	return fill_pte(pmd, vaddr);
 355}
 356
 357/*
 358 * Create large page table mappings for a range of physical addresses.
 359 */
 360static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
 361					enum page_cache_mode cache)
 362{
 363	pgd_t *pgd;
 364	p4d_t *p4d;
 365	pud_t *pud;
 366	pmd_t *pmd;
 367	pgprot_t prot;
 368
 369	pgprot_val(prot) = pgprot_val(PAGE_KERNEL_LARGE) |
 370		pgprot_val(pgprot_4k_2_large(cachemode2pgprot(cache)));
 371	BUG_ON((phys & ~PMD_MASK) || (size & ~PMD_MASK));
 372	for (; size; phys += PMD_SIZE, size -= PMD_SIZE) {
 373		pgd = pgd_offset_k((unsigned long)__va(phys));
 374		if (pgd_none(*pgd)) {
 375			p4d = (p4d_t *) spp_getpage();
 376			set_pgd(pgd, __pgd(__pa(p4d) | _KERNPG_TABLE |
 377						_PAGE_USER));
 378		}
 379		p4d = p4d_offset(pgd, (unsigned long)__va(phys));
 380		if (p4d_none(*p4d)) {
 381			pud = (pud_t *) spp_getpage();
 382			set_p4d(p4d, __p4d(__pa(pud) | _KERNPG_TABLE |
 383						_PAGE_USER));
 384		}
 385		pud = pud_offset(p4d, (unsigned long)__va(phys));
 386		if (pud_none(*pud)) {
 387			pmd = (pmd_t *) spp_getpage();
 388			set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
 389						_PAGE_USER));
 390		}
 391		pmd = pmd_offset(pud, phys);
 392		BUG_ON(!pmd_none(*pmd));
 393		set_pmd(pmd, __pmd(phys | pgprot_val(prot)));
 394	}
 395}
 396
 397void __init init_extra_mapping_wb(unsigned long phys, unsigned long size)
 398{
 399	__init_extra_mapping(phys, size, _PAGE_CACHE_MODE_WB);
 400}
 401
 402void __init init_extra_mapping_uc(unsigned long phys, unsigned long size)
 403{
 404	__init_extra_mapping(phys, size, _PAGE_CACHE_MODE_UC);
 405}
 406
 407/*
 408 * The head.S code sets up the kernel high mapping:
 409 *
 410 *   from __START_KERNEL_map to __START_KERNEL_map + size (== _end-_text)
 411 *
 412 * phys_base holds the negative offset to the kernel, which is added
 413 * to the compile time generated pmds. This results in invalid pmds up
 414 * to the point where we hit the physaddr 0 mapping.
 415 *
 416 * We limit the mappings to the region from _text to _brk_end.  _brk_end
 417 * is rounded up to the 2MB boundary. This catches the invalid pmds as
 418 * well, as they are located before _text:
 419 */
 420void __init cleanup_highmap(void)
 421{
 422	unsigned long vaddr = __START_KERNEL_map;
 423	unsigned long vaddr_end = __START_KERNEL_map + KERNEL_IMAGE_SIZE;
 424	unsigned long end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1;
 425	pmd_t *pmd = level2_kernel_pgt;
 426
 427	/*
 428	 * Native path, max_pfn_mapped is not set yet.
 429	 * Xen has valid max_pfn_mapped set in
 430	 *	arch/x86/xen/mmu.c:xen_setup_kernel_pagetable().
 431	 */
 432	if (max_pfn_mapped)
 433		vaddr_end = __START_KERNEL_map + (max_pfn_mapped << PAGE_SHIFT);
 434
 435	for (; vaddr + PMD_SIZE - 1 < vaddr_end; pmd++, vaddr += PMD_SIZE) {
 436		if (pmd_none(*pmd))
 437			continue;
 438		if (vaddr < (unsigned long) _text || vaddr > end)
 439			set_pmd(pmd, __pmd(0));
 440	}
 441}
 442
 443/*
 444 * Create PTE level page table mapping for physical addresses.
 445 * It returns the last physical address mapped.
 446 */
 447static unsigned long __meminit
 448phys_pte_init(pte_t *pte_page, unsigned long paddr, unsigned long paddr_end,
 449	      pgprot_t prot, bool init)
 450{
 451	unsigned long pages = 0, paddr_next;
 452	unsigned long paddr_last = paddr_end;
 453	pte_t *pte;
 454	int i;
 455
 456	pte = pte_page + pte_index(paddr);
 457	i = pte_index(paddr);
 458
 459	for (; i < PTRS_PER_PTE; i++, paddr = paddr_next, pte++) {
 460		paddr_next = (paddr & PAGE_MASK) + PAGE_SIZE;
 461		if (paddr >= paddr_end) {
 462			if (!after_bootmem &&
 463			    !e820__mapped_any(paddr & PAGE_MASK, paddr_next,
 464					     E820_TYPE_RAM) &&
 465			    !e820__mapped_any(paddr & PAGE_MASK, paddr_next,
 466					     E820_TYPE_RESERVED_KERN))
 467				set_pte_init(pte, __pte(0), init);
 468			continue;
 469		}
 470
 471		/*
 472		 * We will re-use the existing mapping.
 473		 * Xen for example has some special requirements, like mapping
 474		 * pagetable pages as RO. So assume someone who pre-setup
 475		 * these mappings are more intelligent.
 476		 */
 477		if (!pte_none(*pte)) {
 478			if (!after_bootmem)
 479				pages++;
 480			continue;
 481		}
 482
 483		if (0)
 484			pr_info("   pte=%p addr=%lx pte=%016lx\n", pte, paddr,
 485				pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL).pte);
 486		pages++;
 487		set_pte_init(pte, pfn_pte(paddr >> PAGE_SHIFT, prot), init);
 488		paddr_last = (paddr & PAGE_MASK) + PAGE_SIZE;
 489	}
 490
 491	update_page_count(PG_LEVEL_4K, pages);
 492
 493	return paddr_last;
 494}
 495
 496/*
 497 * Create PMD level page table mapping for physical addresses. The virtual
 498 * and physical address have to be aligned at this level.
 499 * It returns the last physical address mapped.
 500 */
 501static unsigned long __meminit
 502phys_pmd_init(pmd_t *pmd_page, unsigned long paddr, unsigned long paddr_end,
 503	      unsigned long page_size_mask, pgprot_t prot, bool init)
 504{
 505	unsigned long pages = 0, paddr_next;
 506	unsigned long paddr_last = paddr_end;
 507
 508	int i = pmd_index(paddr);
 509
 510	for (; i < PTRS_PER_PMD; i++, paddr = paddr_next) {
 511		pmd_t *pmd = pmd_page + pmd_index(paddr);
 512		pte_t *pte;
 513		pgprot_t new_prot = prot;
 514
 515		paddr_next = (paddr & PMD_MASK) + PMD_SIZE;
 516		if (paddr >= paddr_end) {
 517			if (!after_bootmem &&
 518			    !e820__mapped_any(paddr & PMD_MASK, paddr_next,
 519					     E820_TYPE_RAM) &&
 520			    !e820__mapped_any(paddr & PMD_MASK, paddr_next,
 521					     E820_TYPE_RESERVED_KERN))
 522				set_pmd_init(pmd, __pmd(0), init);
 523			continue;
 524		}
 525
 526		if (!pmd_none(*pmd)) {
 527			if (!pmd_large(*pmd)) {
 528				spin_lock(&init_mm.page_table_lock);
 529				pte = (pte_t *)pmd_page_vaddr(*pmd);
 530				paddr_last = phys_pte_init(pte, paddr,
 531							   paddr_end, prot,
 532							   init);
 533				spin_unlock(&init_mm.page_table_lock);
 534				continue;
 535			}
 536			/*
 537			 * If we are ok with PG_LEVEL_2M mapping, then we will
 538			 * use the existing mapping,
 539			 *
 540			 * Otherwise, we will split the large page mapping but
 541			 * use the same existing protection bits except for
 542			 * large page, so that we don't violate Intel's TLB
 543			 * Application note (317080) which says, while changing
 544			 * the page sizes, new and old translations should
 545			 * not differ with respect to page frame and
 546			 * attributes.
 547			 */
 548			if (page_size_mask & (1 << PG_LEVEL_2M)) {
 549				if (!after_bootmem)
 550					pages++;
 551				paddr_last = paddr_next;
 552				continue;
 553			}
 554			new_prot = pte_pgprot(pte_clrhuge(*(pte_t *)pmd));
 555		}
 556
 557		if (page_size_mask & (1<<PG_LEVEL_2M)) {
 558			pages++;
 559			spin_lock(&init_mm.page_table_lock);
 560			set_pte_init((pte_t *)pmd,
 561				     pfn_pte((paddr & PMD_MASK) >> PAGE_SHIFT,
 562					     __pgprot(pgprot_val(prot) | _PAGE_PSE)),
 563				     init);
 564			spin_unlock(&init_mm.page_table_lock);
 565			paddr_last = paddr_next;
 566			continue;
 567		}
 568
 569		pte = alloc_low_page();
 570		paddr_last = phys_pte_init(pte, paddr, paddr_end, new_prot, init);
 571
 572		spin_lock(&init_mm.page_table_lock);
 573		pmd_populate_kernel_init(&init_mm, pmd, pte, init);
 574		spin_unlock(&init_mm.page_table_lock);
 575	}
 576	update_page_count(PG_LEVEL_2M, pages);
 577	return paddr_last;
 578}
 579
 580/*
 581 * Create PUD level page table mapping for physical addresses. The virtual
 582 * and physical address do not have to be aligned at this level. KASLR can
 583 * randomize virtual addresses up to this level.
 584 * It returns the last physical address mapped.
 585 */
 586static unsigned long __meminit
 587phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
 588	      unsigned long page_size_mask, bool init)
 589{
 590	unsigned long pages = 0, paddr_next;
 591	unsigned long paddr_last = paddr_end;
 592	unsigned long vaddr = (unsigned long)__va(paddr);
 593	int i = pud_index(vaddr);
 594
 595	for (; i < PTRS_PER_PUD; i++, paddr = paddr_next) {
 596		pud_t *pud;
 597		pmd_t *pmd;
 598		pgprot_t prot = PAGE_KERNEL;
 599
 600		vaddr = (unsigned long)__va(paddr);
 601		pud = pud_page + pud_index(vaddr);
 602		paddr_next = (paddr & PUD_MASK) + PUD_SIZE;
 603
 604		if (paddr >= paddr_end) {
 605			if (!after_bootmem &&
 606			    !e820__mapped_any(paddr & PUD_MASK, paddr_next,
 607					     E820_TYPE_RAM) &&
 608			    !e820__mapped_any(paddr & PUD_MASK, paddr_next,
 609					     E820_TYPE_RESERVED_KERN))
 610				set_pud_init(pud, __pud(0), init);
 611			continue;
 612		}
 613
 614		if (!pud_none(*pud)) {
 615			if (!pud_large(*pud)) {
 616				pmd = pmd_offset(pud, 0);
 617				paddr_last = phys_pmd_init(pmd, paddr,
 618							   paddr_end,
 619							   page_size_mask,
 620							   prot, init);
 621				continue;
 622			}
 623			/*
 624			 * If we are ok with PG_LEVEL_1G mapping, then we will
 625			 * use the existing mapping.
 626			 *
 627			 * Otherwise, we will split the gbpage mapping but use
 628			 * the same existing protection  bits except for large
 629			 * page, so that we don't violate Intel's TLB
 630			 * Application note (317080) which says, while changing
 631			 * the page sizes, new and old translations should
 632			 * not differ with respect to page frame and
 633			 * attributes.
 634			 */
 635			if (page_size_mask & (1 << PG_LEVEL_1G)) {
 636				if (!after_bootmem)
 637					pages++;
 638				paddr_last = paddr_next;
 639				continue;
 640			}
 641			prot = pte_pgprot(pte_clrhuge(*(pte_t *)pud));
 642		}
 643
 644		if (page_size_mask & (1<<PG_LEVEL_1G)) {
 645			pages++;
 646			spin_lock(&init_mm.page_table_lock);
 647			set_pte_init((pte_t *)pud,
 648				     pfn_pte((paddr & PUD_MASK) >> PAGE_SHIFT,
 649					     PAGE_KERNEL_LARGE),
 650				     init);
 651			spin_unlock(&init_mm.page_table_lock);
 652			paddr_last = paddr_next;
 653			continue;
 654		}
 655
 656		pmd = alloc_low_page();
 657		paddr_last = phys_pmd_init(pmd, paddr, paddr_end,
 658					   page_size_mask, prot, init);
 659
 660		spin_lock(&init_mm.page_table_lock);
 661		pud_populate_init(&init_mm, pud, pmd, init);
 662		spin_unlock(&init_mm.page_table_lock);
 663	}
 
 664
 665	update_page_count(PG_LEVEL_1G, pages);
 666
 667	return paddr_last;
 668}
 669
 670static unsigned long __meminit
 671phys_p4d_init(p4d_t *p4d_page, unsigned long paddr, unsigned long paddr_end,
 672	      unsigned long page_size_mask, bool init)
 
 673{
 674	unsigned long vaddr, vaddr_end, vaddr_next, paddr_next, paddr_last;
 
 
 675
 676	paddr_last = paddr_end;
 677	vaddr = (unsigned long)__va(paddr);
 678	vaddr_end = (unsigned long)__va(paddr_end);
 679
 680	if (!pgtable_l5_enabled())
 681		return phys_pud_init((pud_t *) p4d_page, paddr, paddr_end,
 682				     page_size_mask, init);
 683
 684	for (; vaddr < vaddr_end; vaddr = vaddr_next) {
 685		p4d_t *p4d = p4d_page + p4d_index(vaddr);
 686		pud_t *pud;
 687
 688		vaddr_next = (vaddr & P4D_MASK) + P4D_SIZE;
 689		paddr = __pa(vaddr);
 690
 691		if (paddr >= paddr_end) {
 692			paddr_next = __pa(vaddr_next);
 693			if (!after_bootmem &&
 694			    !e820__mapped_any(paddr & P4D_MASK, paddr_next,
 695					     E820_TYPE_RAM) &&
 696			    !e820__mapped_any(paddr & P4D_MASK, paddr_next,
 697					     E820_TYPE_RESERVED_KERN))
 698				set_p4d_init(p4d, __p4d(0), init);
 699			continue;
 700		}
 701
 702		if (!p4d_none(*p4d)) {
 703			pud = pud_offset(p4d, 0);
 704			paddr_last = phys_pud_init(pud, paddr, __pa(vaddr_end),
 705					page_size_mask, init);
 706			continue;
 707		}
 708
 709		pud = alloc_low_page();
 710		paddr_last = phys_pud_init(pud, paddr, __pa(vaddr_end),
 711					   page_size_mask, init);
 712
 713		spin_lock(&init_mm.page_table_lock);
 714		p4d_populate_init(&init_mm, p4d, pud, init);
 715		spin_unlock(&init_mm.page_table_lock);
 716	}
 717
 718	return paddr_last;
 719}
 720
 721static unsigned long __meminit
 722__kernel_physical_mapping_init(unsigned long paddr_start,
 723			       unsigned long paddr_end,
 724			       unsigned long page_size_mask,
 725			       bool init)
 726{
 727	bool pgd_changed = false;
 728	unsigned long vaddr, vaddr_start, vaddr_end, vaddr_next, paddr_last;
 729
 730	paddr_last = paddr_end;
 731	vaddr = (unsigned long)__va(paddr_start);
 732	vaddr_end = (unsigned long)__va(paddr_end);
 733	vaddr_start = vaddr;
 734
 735	for (; vaddr < vaddr_end; vaddr = vaddr_next) {
 736		pgd_t *pgd = pgd_offset_k(vaddr);
 737		p4d_t *p4d;
 738
 739		vaddr_next = (vaddr & PGDIR_MASK) + PGDIR_SIZE;
 740
 741		if (pgd_val(*pgd)) {
 742			p4d = (p4d_t *)pgd_page_vaddr(*pgd);
 743			paddr_last = phys_p4d_init(p4d, __pa(vaddr),
 744						   __pa(vaddr_end),
 745						   page_size_mask,
 746						   init);
 747			continue;
 748		}
 749
 750		p4d = alloc_low_page();
 751		paddr_last = phys_p4d_init(p4d, __pa(vaddr), __pa(vaddr_end),
 752					   page_size_mask, init);
 753
 754		spin_lock(&init_mm.page_table_lock);
 755		if (pgtable_l5_enabled())
 756			pgd_populate_init(&init_mm, pgd, p4d, init);
 757		else
 758			p4d_populate_init(&init_mm, p4d_offset(pgd, vaddr),
 759					  (pud_t *) p4d, init);
 760
 761		spin_unlock(&init_mm.page_table_lock);
 762		pgd_changed = true;
 763	}
 764
 765	if (pgd_changed)
 766		sync_global_pgds(vaddr_start, vaddr_end - 1);
 767
 768	return paddr_last;
 769}
 770
 771
 772/*
 773 * Create page table mapping for the physical memory for specific physical
 774 * addresses. Note that it can only be used to populate non-present entries.
 775 * The virtual and physical addresses have to be aligned on PMD level
 776 * down. It returns the last physical address mapped.
 777 */
 778unsigned long __meminit
 779kernel_physical_mapping_init(unsigned long paddr_start,
 780			     unsigned long paddr_end,
 781			     unsigned long page_size_mask)
 782{
 783	return __kernel_physical_mapping_init(paddr_start, paddr_end,
 784					      page_size_mask, true);
 785}
 786
 787/*
 788 * This function is similar to kernel_physical_mapping_init() above with the
 789 * exception that it uses set_{pud,pmd}() instead of the set_{pud,pte}_safe()
 790 * when updating the mapping. The caller is responsible to flush the TLBs after
 791 * the function returns.
 792 */
 793unsigned long __meminit
 794kernel_physical_mapping_change(unsigned long paddr_start,
 795			       unsigned long paddr_end,
 796			       unsigned long page_size_mask)
 797{
 798	return __kernel_physical_mapping_init(paddr_start, paddr_end,
 799					      page_size_mask, false);
 800}
 801
 802#ifndef CONFIG_NUMA
 803void __init initmem_init(void)
 804{
 805	memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
 806}
 807#endif
 808
 809void __init paging_init(void)
 810{
 811	sparse_memory_present_with_active_regions(MAX_NUMNODES);
 812	sparse_init();
 813
 814	/*
 815	 * clear the default setting with node 0
 816	 * note: don't use nodes_clear here, that is really clearing when
 817	 *	 numa support is not compiled in, and later node_set_state
 818	 *	 will not set it back.
 819	 */
 820	node_clear_state(0, N_MEMORY);
 821	if (N_MEMORY != N_NORMAL_MEMORY)
 822		node_clear_state(0, N_NORMAL_MEMORY);
 823
 824	zone_sizes_init();
 825}
 826
 827/*
 828 * Memory hotplug specific functions
 829 */
 830#ifdef CONFIG_MEMORY_HOTPLUG
 831/*
 832 * After memory hotplug the variables max_pfn, max_low_pfn and high_memory need
 833 * updating.
 834 */
 835static void update_end_of_memory_vars(u64 start, u64 size)
 836{
 837	unsigned long end_pfn = PFN_UP(start + size);
 838
 839	if (end_pfn > max_pfn) {
 840		max_pfn = end_pfn;
 841		max_low_pfn = end_pfn;
 842		high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1;
 843	}
 844}
 845
 846int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
 847				struct mhp_restrictions *restrictions)
 
 
 
 848{
 
 
 
 
 
 849	int ret;
 850
 851	ret = __add_pages(nid, start_pfn, nr_pages, restrictions);
 
 
 852	WARN_ON_ONCE(ret);
 853
 854	/* update max_pfn, max_low_pfn and high_memory */
 855	update_end_of_memory_vars(start_pfn << PAGE_SHIFT,
 856				  nr_pages << PAGE_SHIFT);
 857
 858	return ret;
 859}
 860
 861int arch_add_memory(int nid, u64 start, u64 size,
 862			struct mhp_restrictions *restrictions)
 863{
 864	unsigned long start_pfn = start >> PAGE_SHIFT;
 865	unsigned long nr_pages = size >> PAGE_SHIFT;
 866
 867	init_memory_mapping(start, start + size);
 868
 869	return add_pages(nid, start_pfn, nr_pages, restrictions);
 870}
 871
 872#define PAGE_INUSE 0xFD
 873
 874static void __meminit free_pagetable(struct page *page, int order)
 875{
 876	unsigned long magic;
 877	unsigned int nr_pages = 1 << order;
 
 
 
 
 
 
 878
 879	/* bootmem page has reserved flag */
 880	if (PageReserved(page)) {
 881		__ClearPageReserved(page);
 882
 883		magic = (unsigned long)page->freelist;
 884		if (magic == SECTION_INFO || magic == MIX_SECTION_INFO) {
 885			while (nr_pages--)
 886				put_page_bootmem(page++);
 887		} else
 888			while (nr_pages--)
 889				free_reserved_page(page++);
 890	} else
 891		free_pages((unsigned long)page_address(page), order);
 892}
 893
 894static void __meminit free_hugepage_table(struct page *page,
 895		struct vmem_altmap *altmap)
 896{
 897	if (altmap)
 898		vmem_altmap_free(altmap, PMD_SIZE / PAGE_SIZE);
 899	else
 900		free_pagetable(page, get_order(PMD_SIZE));
 901}
 902
 903static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd)
 904{
 905	pte_t *pte;
 906	int i;
 907
 908	for (i = 0; i < PTRS_PER_PTE; i++) {
 909		pte = pte_start + i;
 910		if (!pte_none(*pte))
 911			return;
 912	}
 913
 914	/* free a pte talbe */
 915	free_pagetable(pmd_page(*pmd), 0);
 916	spin_lock(&init_mm.page_table_lock);
 917	pmd_clear(pmd);
 918	spin_unlock(&init_mm.page_table_lock);
 919}
 920
 921static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud)
 922{
 923	pmd_t *pmd;
 924	int i;
 925
 926	for (i = 0; i < PTRS_PER_PMD; i++) {
 927		pmd = pmd_start + i;
 928		if (!pmd_none(*pmd))
 929			return;
 930	}
 931
 932	/* free a pmd talbe */
 933	free_pagetable(pud_page(*pud), 0);
 934	spin_lock(&init_mm.page_table_lock);
 935	pud_clear(pud);
 936	spin_unlock(&init_mm.page_table_lock);
 937}
 938
 939static void __meminit free_pud_table(pud_t *pud_start, p4d_t *p4d)
 
 940{
 941	pud_t *pud;
 942	int i;
 943
 944	for (i = 0; i < PTRS_PER_PUD; i++) {
 945		pud = pud_start + i;
 946		if (!pud_none(*pud))
 947			return;
 948	}
 949
 950	/* free a pud talbe */
 951	free_pagetable(p4d_page(*p4d), 0);
 952	spin_lock(&init_mm.page_table_lock);
 953	p4d_clear(p4d);
 954	spin_unlock(&init_mm.page_table_lock);
 
 
 955}
 956
 957static void __meminit
 958remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end,
 959		 bool direct)
 960{
 961	unsigned long next, pages = 0;
 962	pte_t *pte;
 963	void *page_addr;
 964	phys_addr_t phys_addr;
 965
 966	pte = pte_start + pte_index(addr);
 967	for (; addr < end; addr = next, pte++) {
 968		next = (addr + PAGE_SIZE) & PAGE_MASK;
 969		if (next > end)
 970			next = end;
 971
 972		if (!pte_present(*pte))
 973			continue;
 974
 975		/*
 976		 * We mapped [0,1G) memory as identity mapping when
 977		 * initializing, in arch/x86/kernel/head_64.S. These
 978		 * pagetables cannot be removed.
 979		 */
 980		phys_addr = pte_val(*pte) + (addr & PAGE_MASK);
 981		if (phys_addr < (phys_addr_t)0x40000000)
 982			return;
 983
 984		if (PAGE_ALIGNED(addr) && PAGE_ALIGNED(next)) {
 985			/*
 986			 * Do not free direct mapping pages since they were
 987			 * freed when offlining, or simplely not in use.
 988			 */
 989			if (!direct)
 990				free_pagetable(pte_page(*pte), 0);
 991
 992			spin_lock(&init_mm.page_table_lock);
 993			pte_clear(&init_mm, addr, pte);
 994			spin_unlock(&init_mm.page_table_lock);
 995
 996			/* For non-direct mapping, pages means nothing. */
 997			pages++;
 998		} else {
 999			/*
1000			 * If we are here, we are freeing vmemmap pages since
1001			 * direct mapped memory ranges to be freed are aligned.
1002			 *
1003			 * If we are not removing the whole page, it means
1004			 * other page structs in this page are being used and
1005			 * we canot remove them. So fill the unused page_structs
1006			 * with 0xFD, and remove the page when it is wholly
1007			 * filled with 0xFD.
1008			 */
1009			memset((void *)addr, PAGE_INUSE, next - addr);
1010
1011			page_addr = page_address(pte_page(*pte));
1012			if (!memchr_inv(page_addr, PAGE_INUSE, PAGE_SIZE)) {
1013				free_pagetable(pte_page(*pte), 0);
1014
1015				spin_lock(&init_mm.page_table_lock);
1016				pte_clear(&init_mm, addr, pte);
1017				spin_unlock(&init_mm.page_table_lock);
1018			}
1019		}
1020	}
1021
1022	/* Call free_pte_table() in remove_pmd_table(). */
1023	flush_tlb_all();
1024	if (direct)
1025		update_page_count(PG_LEVEL_4K, -pages);
1026}
1027
1028static void __meminit
1029remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end,
1030		 bool direct, struct vmem_altmap *altmap)
1031{
1032	unsigned long next, pages = 0;
1033	pte_t *pte_base;
1034	pmd_t *pmd;
1035	void *page_addr;
1036
1037	pmd = pmd_start + pmd_index(addr);
1038	for (; addr < end; addr = next, pmd++) {
1039		next = pmd_addr_end(addr, end);
1040
1041		if (!pmd_present(*pmd))
1042			continue;
1043
1044		if (pmd_large(*pmd)) {
1045			if (IS_ALIGNED(addr, PMD_SIZE) &&
1046			    IS_ALIGNED(next, PMD_SIZE)) {
1047				if (!direct)
1048					free_hugepage_table(pmd_page(*pmd),
1049							    altmap);
1050
1051				spin_lock(&init_mm.page_table_lock);
1052				pmd_clear(pmd);
1053				spin_unlock(&init_mm.page_table_lock);
1054				pages++;
1055			} else {
1056				/* If here, we are freeing vmemmap pages. */
1057				memset((void *)addr, PAGE_INUSE, next - addr);
1058
1059				page_addr = page_address(pmd_page(*pmd));
1060				if (!memchr_inv(page_addr, PAGE_INUSE,
1061						PMD_SIZE)) {
1062					free_hugepage_table(pmd_page(*pmd),
1063							    altmap);
1064
1065					spin_lock(&init_mm.page_table_lock);
1066					pmd_clear(pmd);
1067					spin_unlock(&init_mm.page_table_lock);
1068				}
1069			}
1070
1071			continue;
1072		}
1073
1074		pte_base = (pte_t *)pmd_page_vaddr(*pmd);
1075		remove_pte_table(pte_base, addr, next, direct);
1076		free_pte_table(pte_base, pmd);
1077	}
1078
1079	/* Call free_pmd_table() in remove_pud_table(). */
1080	if (direct)
1081		update_page_count(PG_LEVEL_2M, -pages);
1082}
1083
1084static void __meminit
1085remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end,
1086		 struct vmem_altmap *altmap, bool direct)
1087{
1088	unsigned long next, pages = 0;
1089	pmd_t *pmd_base;
1090	pud_t *pud;
1091	void *page_addr;
1092
1093	pud = pud_start + pud_index(addr);
1094	for (; addr < end; addr = next, pud++) {
1095		next = pud_addr_end(addr, end);
1096
1097		if (!pud_present(*pud))
1098			continue;
1099
1100		if (pud_large(*pud)) {
1101			if (IS_ALIGNED(addr, PUD_SIZE) &&
1102			    IS_ALIGNED(next, PUD_SIZE)) {
1103				if (!direct)
1104					free_pagetable(pud_page(*pud),
1105						       get_order(PUD_SIZE));
1106
1107				spin_lock(&init_mm.page_table_lock);
1108				pud_clear(pud);
1109				spin_unlock(&init_mm.page_table_lock);
1110				pages++;
1111			} else {
1112				/* If here, we are freeing vmemmap pages. */
1113				memset((void *)addr, PAGE_INUSE, next - addr);
1114
1115				page_addr = page_address(pud_page(*pud));
1116				if (!memchr_inv(page_addr, PAGE_INUSE,
1117						PUD_SIZE)) {
1118					free_pagetable(pud_page(*pud),
1119						       get_order(PUD_SIZE));
1120
1121					spin_lock(&init_mm.page_table_lock);
1122					pud_clear(pud);
1123					spin_unlock(&init_mm.page_table_lock);
1124				}
1125			}
1126
1127			continue;
1128		}
1129
1130		pmd_base = pmd_offset(pud, 0);
1131		remove_pmd_table(pmd_base, addr, next, direct, altmap);
1132		free_pmd_table(pmd_base, pud);
1133	}
1134
1135	if (direct)
1136		update_page_count(PG_LEVEL_1G, -pages);
1137}
1138
1139static void __meminit
1140remove_p4d_table(p4d_t *p4d_start, unsigned long addr, unsigned long end,
1141		 struct vmem_altmap *altmap, bool direct)
1142{
1143	unsigned long next, pages = 0;
1144	pud_t *pud_base;
1145	p4d_t *p4d;
1146
1147	p4d = p4d_start + p4d_index(addr);
1148	for (; addr < end; addr = next, p4d++) {
1149		next = p4d_addr_end(addr, end);
1150
1151		if (!p4d_present(*p4d))
1152			continue;
1153
1154		BUILD_BUG_ON(p4d_large(*p4d));
1155
1156		pud_base = pud_offset(p4d, 0);
1157		remove_pud_table(pud_base, addr, next, altmap, direct);
1158		/*
1159		 * For 4-level page tables we do not want to free PUDs, but in the
1160		 * 5-level case we should free them. This code will have to change
1161		 * to adapt for boot-time switching between 4 and 5 level page tables.
1162		 */
1163		if (pgtable_l5_enabled())
1164			free_pud_table(pud_base, p4d);
1165	}
1166
1167	if (direct)
1168		update_page_count(PG_LEVEL_512G, -pages);
1169}
1170
1171/* start and end are both virtual address. */
1172static void __meminit
1173remove_pagetable(unsigned long start, unsigned long end, bool direct,
1174		struct vmem_altmap *altmap)
1175{
1176	unsigned long next;
1177	unsigned long addr;
1178	pgd_t *pgd;
1179	p4d_t *p4d;
 
1180
1181	for (addr = start; addr < end; addr = next) {
1182		next = pgd_addr_end(addr, end);
1183
1184		pgd = pgd_offset_k(addr);
1185		if (!pgd_present(*pgd))
1186			continue;
1187
1188		p4d = p4d_offset(pgd, 0);
1189		remove_p4d_table(p4d, addr, next, altmap, direct);
 
 
1190	}
1191
 
 
 
1192	flush_tlb_all();
1193}
1194
1195void __ref vmemmap_free(unsigned long start, unsigned long end,
1196		struct vmem_altmap *altmap)
1197{
1198	remove_pagetable(start, end, false, altmap);
1199}
1200
 
1201static void __meminit
1202kernel_physical_mapping_remove(unsigned long start, unsigned long end)
1203{
1204	start = (unsigned long)__va(start);
1205	end = (unsigned long)__va(end);
1206
1207	remove_pagetable(start, end, true, NULL);
1208}
1209
1210void __ref arch_remove_memory(int nid, u64 start, u64 size,
1211			      struct vmem_altmap *altmap)
1212{
1213	unsigned long start_pfn = start >> PAGE_SHIFT;
1214	unsigned long nr_pages = size >> PAGE_SHIFT;
1215	struct page *page = pfn_to_page(start_pfn) + vmem_altmap_offset(altmap);
1216	struct zone *zone = page_zone(page);
 
 
1217
1218	__remove_pages(zone, start_pfn, nr_pages, altmap);
 
 
 
 
 
 
1219	kernel_physical_mapping_remove(start, start + size);
 
 
1220}
 
1221#endif /* CONFIG_MEMORY_HOTPLUG */
1222
1223static struct kcore_list kcore_vsyscall;
1224
1225static void __init register_page_bootmem_info(void)
1226{
1227#ifdef CONFIG_NUMA
1228	int i;
1229
1230	for_each_online_node(i)
1231		register_page_bootmem_info_node(NODE_DATA(i));
1232#endif
1233}
1234
1235void __init mem_init(void)
1236{
1237	pci_iommu_alloc();
1238
1239	/* clear_bss() already clear the empty_zero_page */
1240
 
 
1241	/* this will put all memory onto the freelists */
1242	memblock_free_all();
1243	after_bootmem = 1;
1244	x86_init.hyper.init_after_bootmem();
1245
1246	/*
1247	 * Must be done after boot memory is put on freelist, because here we
1248	 * might set fields in deferred struct pages that have not yet been
1249	 * initialized, and memblock_free_all() initializes all the reserved
1250	 * deferred pages for us.
1251	 */
1252	register_page_bootmem_info();
1253
1254	/* Register memory areas for /proc/kcore */
1255	if (get_gate_vma(&init_mm))
1256		kclist_add(&kcore_vsyscall, (void *)VSYSCALL_ADDR, PAGE_SIZE, KCORE_USER);
1257
1258	mem_init_print_info(NULL);
1259}
1260
 
 
 
1261int kernel_set_to_readonly;
1262
1263void set_kernel_text_rw(void)
1264{
1265	unsigned long start = PFN_ALIGN(_text);
1266	unsigned long end = PFN_ALIGN(__stop___ex_table);
1267
1268	if (!kernel_set_to_readonly)
1269		return;
1270
1271	pr_debug("Set kernel text: %lx - %lx for read write\n",
1272		 start, end);
1273
1274	/*
1275	 * Make the kernel identity mapping for text RW. Kernel text
1276	 * mapping will always be RO. Refer to the comment in
1277	 * static_protections() in pageattr.c
1278	 */
1279	set_memory_rw(start, (end - start) >> PAGE_SHIFT);
1280}
1281
1282void set_kernel_text_ro(void)
1283{
1284	unsigned long start = PFN_ALIGN(_text);
1285	unsigned long end = PFN_ALIGN(__stop___ex_table);
1286
1287	if (!kernel_set_to_readonly)
1288		return;
1289
1290	pr_debug("Set kernel text: %lx - %lx for read only\n",
1291		 start, end);
1292
1293	/*
1294	 * Set the kernel identity mapping for text RO.
1295	 */
1296	set_memory_ro(start, (end - start) >> PAGE_SHIFT);
1297}
1298
1299void mark_rodata_ro(void)
1300{
1301	unsigned long start = PFN_ALIGN(_text);
1302	unsigned long rodata_start = PFN_ALIGN(__start_rodata);
1303	unsigned long end = (unsigned long) &__end_rodata_hpage_align;
1304	unsigned long text_end = PFN_ALIGN(&__stop___ex_table);
1305	unsigned long rodata_end = PFN_ALIGN(&__end_rodata);
1306	unsigned long all_end;
1307
1308	printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
1309	       (end - start) >> 10);
1310	set_memory_ro(start, (end - start) >> PAGE_SHIFT);
1311
1312	kernel_set_to_readonly = 1;
1313
1314	/*
1315	 * The rodata/data/bss/brk section (but not the kernel text!)
1316	 * should also be not-executable.
1317	 *
1318	 * We align all_end to PMD_SIZE because the existing mapping
1319	 * is a full PMD. If we would align _brk_end to PAGE_SIZE we
1320	 * split the PMD and the reminder between _brk_end and the end
1321	 * of the PMD will remain mapped executable.
1322	 *
1323	 * Any PMD which was setup after the one which covers _brk_end
1324	 * has been zapped already via cleanup_highmem().
1325	 */
1326	all_end = roundup((unsigned long)_brk_end, PMD_SIZE);
1327	set_memory_nx(text_end, (all_end - text_end) >> PAGE_SHIFT);
1328
 
 
1329#ifdef CONFIG_CPA_DEBUG
1330	printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, end);
1331	set_memory_rw(start, (end-start) >> PAGE_SHIFT);
1332
1333	printk(KERN_INFO "Testing CPA: again\n");
1334	set_memory_ro(start, (end-start) >> PAGE_SHIFT);
1335#endif
1336
1337	free_kernel_image_pages((void *)text_end, (void *)rodata_start);
1338	free_kernel_image_pages((void *)rodata_end, (void *)_sdata);
 
 
 
 
1339
1340	debug_checkwx();
1341}
1342
1343int kern_addr_valid(unsigned long addr)
1344{
1345	unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
1346	pgd_t *pgd;
1347	p4d_t *p4d;
1348	pud_t *pud;
1349	pmd_t *pmd;
1350	pte_t *pte;
1351
1352	if (above != 0 && above != -1UL)
1353		return 0;
1354
1355	pgd = pgd_offset_k(addr);
1356	if (pgd_none(*pgd))
1357		return 0;
1358
1359	p4d = p4d_offset(pgd, addr);
1360	if (p4d_none(*p4d))
1361		return 0;
1362
1363	pud = pud_offset(p4d, addr);
1364	if (pud_none(*pud))
1365		return 0;
1366
1367	if (pud_large(*pud))
1368		return pfn_valid(pud_pfn(*pud));
1369
1370	pmd = pmd_offset(pud, addr);
1371	if (pmd_none(*pmd))
1372		return 0;
1373
1374	if (pmd_large(*pmd))
1375		return pfn_valid(pmd_pfn(*pmd));
1376
1377	pte = pte_offset_kernel(pmd, addr);
1378	if (pte_none(*pte))
1379		return 0;
1380
1381	return pfn_valid(pte_pfn(*pte));
1382}
1383
1384/*
1385 * Block size is the minimum amount of memory which can be hotplugged or
1386 * hotremoved. It must be power of two and must be equal or larger than
1387 * MIN_MEMORY_BLOCK_SIZE.
1388 */
1389#define MAX_BLOCK_SIZE (2UL << 30)
1390
1391/* Amount of ram needed to start using large blocks */
1392#define MEM_SIZE_FOR_LARGE_BLOCK (64UL << 30)
1393
1394/* Adjustable memory block size */
1395static unsigned long set_memory_block_size;
1396int __init set_memory_block_size_order(unsigned int order)
1397{
1398	unsigned long size = 1UL << order;
1399
1400	if (size > MEM_SIZE_FOR_LARGE_BLOCK || size < MIN_MEMORY_BLOCK_SIZE)
1401		return -EINVAL;
 
1402
1403	set_memory_block_size = size;
1404	return 0;
1405}
1406
1407static unsigned long probe_memory_block_size(void)
1408{
1409	unsigned long boot_mem_end = max_pfn << PAGE_SHIFT;
1410	unsigned long bz;
1411
1412	/* If memory block size has been set, then use it */
1413	bz = set_memory_block_size;
1414	if (bz)
1415		goto done;
1416
1417	/* Use regular block if RAM is smaller than MEM_SIZE_FOR_LARGE_BLOCK */
1418	if (boot_mem_end < MEM_SIZE_FOR_LARGE_BLOCK) {
1419		bz = MIN_MEMORY_BLOCK_SIZE;
1420		goto done;
1421	}
1422
1423	/* Find the largest allowed block size that aligns to memory end */
1424	for (bz = MAX_BLOCK_SIZE; bz > MIN_MEMORY_BLOCK_SIZE; bz >>= 1) {
1425		if (IS_ALIGNED(boot_mem_end, bz))
1426			break;
1427	}
1428done:
1429	pr_info("x86/mm: Memory block size: %ldMB\n", bz >> 20);
1430
1431	return bz;
1432}
1433
1434static unsigned long memory_block_size_probed;
1435unsigned long memory_block_size_bytes(void)
1436{
1437	if (!memory_block_size_probed)
1438		memory_block_size_probed = probe_memory_block_size();
1439
1440	return memory_block_size_probed;
1441}
1442
1443#ifdef CONFIG_SPARSEMEM_VMEMMAP
1444/*
1445 * Initialise the sparsemem vmemmap using huge-pages at the PMD level.
1446 */
1447static long __meminitdata addr_start, addr_end;
1448static void __meminitdata *p_start, *p_end;
1449static int __meminitdata node_start;
1450
1451static int __meminit vmemmap_populate_hugepages(unsigned long start,
1452		unsigned long end, int node, struct vmem_altmap *altmap)
1453{
1454	unsigned long addr;
1455	unsigned long next;
1456	pgd_t *pgd;
1457	p4d_t *p4d;
1458	pud_t *pud;
1459	pmd_t *pmd;
1460
1461	for (addr = start; addr < end; addr = next) {
1462		next = pmd_addr_end(addr, end);
1463
1464		pgd = vmemmap_pgd_populate(addr, node);
1465		if (!pgd)
1466			return -ENOMEM;
1467
1468		p4d = vmemmap_p4d_populate(pgd, addr, node);
1469		if (!p4d)
1470			return -ENOMEM;
1471
1472		pud = vmemmap_pud_populate(p4d, addr, node);
1473		if (!pud)
1474			return -ENOMEM;
1475
1476		pmd = pmd_offset(pud, addr);
1477		if (pmd_none(*pmd)) {
1478			void *p;
1479
1480			if (altmap)
1481				p = altmap_alloc_block_buf(PMD_SIZE, altmap);
1482			else
1483				p = vmemmap_alloc_block_buf(PMD_SIZE, node);
1484			if (p) {
1485				pte_t entry;
1486
1487				entry = pfn_pte(__pa(p) >> PAGE_SHIFT,
1488						PAGE_KERNEL_LARGE);
1489				set_pmd(pmd, __pmd(pte_val(entry)));
1490
1491				/* check to see if we have contiguous blocks */
1492				if (p_end != p || node_start != node) {
1493					if (p_start)
1494						pr_debug(" [%lx-%lx] PMD -> [%p-%p] on node %d\n",
1495						       addr_start, addr_end-1, p_start, p_end-1, node_start);
1496					addr_start = addr;
1497					node_start = node;
1498					p_start = p;
1499				}
1500
1501				addr_end = addr + PMD_SIZE;
1502				p_end = p + PMD_SIZE;
1503				continue;
1504			} else if (altmap)
1505				return -ENOMEM; /* no fallback */
1506		} else if (pmd_large(*pmd)) {
1507			vmemmap_verify((pte_t *)pmd, node, addr, next);
1508			continue;
1509		}
 
1510		if (vmemmap_populate_basepages(addr, next, node))
1511			return -ENOMEM;
1512	}
1513	return 0;
1514}
1515
1516int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
1517		struct vmem_altmap *altmap)
1518{
 
1519	int err;
1520
1521	if (end - start < PAGES_PER_SECTION * sizeof(struct page))
1522		err = vmemmap_populate_basepages(start, end, node);
1523	else if (boot_cpu_has(X86_FEATURE_PSE))
1524		err = vmemmap_populate_hugepages(start, end, node, altmap);
1525	else if (altmap) {
1526		pr_err_once("%s: no cpu support for altmap allocations\n",
1527				__func__);
1528		err = -ENOMEM;
1529	} else
1530		err = vmemmap_populate_basepages(start, end, node);
1531	if (!err)
1532		sync_global_pgds(start, end - 1);
1533	return err;
1534}
1535
1536#if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HAVE_BOOTMEM_INFO_NODE)
1537void register_page_bootmem_memmap(unsigned long section_nr,
1538				  struct page *start_page, unsigned long nr_pages)
1539{
1540	unsigned long addr = (unsigned long)start_page;
1541	unsigned long end = (unsigned long)(start_page + nr_pages);
1542	unsigned long next;
1543	pgd_t *pgd;
1544	p4d_t *p4d;
1545	pud_t *pud;
1546	pmd_t *pmd;
1547	unsigned int nr_pmd_pages;
1548	struct page *page;
1549
1550	for (; addr < end; addr = next) {
1551		pte_t *pte = NULL;
1552
1553		pgd = pgd_offset_k(addr);
1554		if (pgd_none(*pgd)) {
1555			next = (addr + PAGE_SIZE) & PAGE_MASK;
1556			continue;
1557		}
1558		get_page_bootmem(section_nr, pgd_page(*pgd), MIX_SECTION_INFO);
1559
1560		p4d = p4d_offset(pgd, addr);
1561		if (p4d_none(*p4d)) {
1562			next = (addr + PAGE_SIZE) & PAGE_MASK;
1563			continue;
1564		}
1565		get_page_bootmem(section_nr, p4d_page(*p4d), MIX_SECTION_INFO);
1566
1567		pud = pud_offset(p4d, addr);
1568		if (pud_none(*pud)) {
1569			next = (addr + PAGE_SIZE) & PAGE_MASK;
1570			continue;
1571		}
1572		get_page_bootmem(section_nr, pud_page(*pud), MIX_SECTION_INFO);
1573
1574		if (!boot_cpu_has(X86_FEATURE_PSE)) {
1575			next = (addr + PAGE_SIZE) & PAGE_MASK;
1576			pmd = pmd_offset(pud, addr);
1577			if (pmd_none(*pmd))
1578				continue;
1579			get_page_bootmem(section_nr, pmd_page(*pmd),
1580					 MIX_SECTION_INFO);
1581
1582			pte = pte_offset_kernel(pmd, addr);
1583			if (pte_none(*pte))
1584				continue;
1585			get_page_bootmem(section_nr, pte_page(*pte),
1586					 SECTION_INFO);
1587		} else {
1588			next = pmd_addr_end(addr, end);
1589
1590			pmd = pmd_offset(pud, addr);
1591			if (pmd_none(*pmd))
1592				continue;
1593
1594			nr_pmd_pages = 1 << get_order(PMD_SIZE);
1595			page = pmd_page(*pmd);
1596			while (nr_pmd_pages--)
1597				get_page_bootmem(section_nr, page++,
1598						 SECTION_INFO);
1599		}
1600	}
1601}
1602#endif
1603
1604void __meminit vmemmap_populate_print_last(void)
1605{
1606	if (p_start) {
1607		pr_debug(" [%lx-%lx] PMD -> [%p-%p] on node %d\n",
1608			addr_start, addr_end-1, p_start, p_end-1, node_start);
1609		p_start = NULL;
1610		p_end = NULL;
1611		node_start = 0;
1612	}
1613}
1614#endif