Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  Copyright (C) 1995  Linus Torvalds
   4 *
   5 *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
   6 *
   7 *  Memory region support
   8 *	David Parsons <orc@pell.chi.il.us>, July-August 1999
   9 *
  10 *  Added E820 sanitization routine (removes overlapping memory regions);
  11 *  Brian Moyle <bmoyle@mvista.com>, February 2001
  12 *
  13 * Moved CPU detection code to cpu/${cpu}.c
  14 *    Patrick Mochel <mochel@osdl.org>, March 2002
  15 *
  16 *  Provisions for empty E820 memory regions (reported by certain BIOSes).
  17 *  Alex Achenbach <xela@slit.de>, December 2002.
  18 *
  19 */
  20
  21/*
  22 * This file handles the architecture-dependent parts of initialization
  23 */
  24
  25#include <linux/sched.h>
  26#include <linux/mm.h>
  27#include <linux/mmzone.h>
  28#include <linux/screen_info.h>
  29#include <linux/ioport.h>
  30#include <linux/acpi.h>
  31#include <linux/sfi.h>
  32#include <linux/apm_bios.h>
  33#include <linux/initrd.h>
 
  34#include <linux/memblock.h>
  35#include <linux/seq_file.h>
  36#include <linux/console.h>
  37#include <linux/root_dev.h>
  38#include <linux/highmem.h>
  39#include <linux/export.h>
  40#include <linux/efi.h>
  41#include <linux/init.h>
  42#include <linux/edd.h>
  43#include <linux/iscsi_ibft.h>
  44#include <linux/nodemask.h>
  45#include <linux/kexec.h>
  46#include <linux/dmi.h>
  47#include <linux/pfn.h>
  48#include <linux/pci.h>
  49#include <asm/pci-direct.h>
  50#include <linux/init_ohci1394_dma.h>
  51#include <linux/kvm_para.h>
  52#include <linux/dma-contiguous.h>
  53#include <xen/xen.h>
  54#include <uapi/linux/mount.h>
  55
  56#include <linux/errno.h>
  57#include <linux/kernel.h>
  58#include <linux/stddef.h>
  59#include <linux/unistd.h>
  60#include <linux/ptrace.h>
  61#include <linux/user.h>
  62#include <linux/delay.h>
  63
  64#include <linux/kallsyms.h>
  65#include <linux/cpufreq.h>
  66#include <linux/dma-mapping.h>
  67#include <linux/ctype.h>
  68#include <linux/uaccess.h>
  69
  70#include <linux/percpu.h>
  71#include <linux/crash_dump.h>
  72#include <linux/tboot.h>
  73#include <linux/jiffies.h>
  74#include <linux/mem_encrypt.h>
  75#include <linux/sizes.h>
  76
  77#include <linux/usb/xhci-dbgp.h>
  78#include <video/edid.h>
  79
  80#include <asm/mtrr.h>
  81#include <asm/apic.h>
  82#include <asm/realmode.h>
  83#include <asm/e820/api.h>
  84#include <asm/mpspec.h>
  85#include <asm/setup.h>
  86#include <asm/efi.h>
  87#include <asm/timer.h>
  88#include <asm/i8259.h>
  89#include <asm/sections.h>
  90#include <asm/io_apic.h>
  91#include <asm/ist.h>
  92#include <asm/setup_arch.h>
  93#include <asm/bios_ebda.h>
  94#include <asm/cacheflush.h>
  95#include <asm/processor.h>
  96#include <asm/bugs.h>
  97#include <asm/kasan.h>
  98
  99#include <asm/vsyscall.h>
 100#include <asm/cpu.h>
 101#include <asm/desc.h>
 102#include <asm/dma.h>
 103#include <asm/iommu.h>
 104#include <asm/gart.h>
 105#include <asm/mmu_context.h>
 106#include <asm/proto.h>
 107
 108#include <asm/paravirt.h>
 109#include <asm/hypervisor.h>
 110#include <asm/olpc_ofw.h>
 111
 112#include <asm/percpu.h>
 113#include <asm/topology.h>
 114#include <asm/apicdef.h>
 115#include <asm/amd_nb.h>
 116#include <asm/mce.h>
 117#include <asm/alternative.h>
 118#include <asm/prom.h>
 119#include <asm/microcode.h>
 120#include <asm/kaslr.h>
 121#include <asm/unwind.h>
 122
 123/*
 124 * max_low_pfn_mapped: highest direct mapped pfn under 4GB
 125 * max_pfn_mapped:     highest direct mapped pfn over 4GB
 126 *
 127 * The direct mapping only covers E820_TYPE_RAM regions, so the ranges and gaps are
 128 * represented by pfn_mapped
 129 */
 130unsigned long max_low_pfn_mapped;
 131unsigned long max_pfn_mapped;
 132
 133#ifdef CONFIG_DMI
 134RESERVE_BRK(dmi_alloc, 65536);
 135#endif
 136
 137
 138static __initdata unsigned long _brk_start = (unsigned long)__brk_base;
 139unsigned long _brk_end = (unsigned long)__brk_base;
 140
 
 
 
 
 
 
 
 
 
 
 
 
 141struct boot_params boot_params;
 142
 143/*
 144 * Machine setup..
 145 */
 146static struct resource data_resource = {
 147	.name	= "Kernel data",
 148	.start	= 0,
 149	.end	= 0,
 150	.flags	= IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
 151};
 152
 153static struct resource code_resource = {
 154	.name	= "Kernel code",
 155	.start	= 0,
 156	.end	= 0,
 157	.flags	= IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
 158};
 159
 160static struct resource bss_resource = {
 161	.name	= "Kernel bss",
 162	.start	= 0,
 163	.end	= 0,
 164	.flags	= IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
 165};
 166
 167
 168#ifdef CONFIG_X86_32
 169/* cpu data as detected by the assembly code in head_32.S */
 170struct cpuinfo_x86 new_cpu_data;
 171
 
 172/* common cpu data for all cpus */
 173struct cpuinfo_x86 boot_cpu_data __read_mostly;
 
 
 174EXPORT_SYMBOL(boot_cpu_data);
 175
 176unsigned int def_to_bigsmp;
 177
 178/* for MCA, but anyone else can use it if they want */
 179unsigned int machine_id;
 180unsigned int machine_submodel_id;
 181unsigned int BIOS_revision;
 182
 183struct apm_info apm_info;
 184EXPORT_SYMBOL(apm_info);
 185
 186#if defined(CONFIG_X86_SPEEDSTEP_SMI) || \
 187	defined(CONFIG_X86_SPEEDSTEP_SMI_MODULE)
 188struct ist_info ist_info;
 189EXPORT_SYMBOL(ist_info);
 190#else
 191struct ist_info ist_info;
 192#endif
 193
 194#else
 195struct cpuinfo_x86 boot_cpu_data __read_mostly;
 
 
 196EXPORT_SYMBOL(boot_cpu_data);
 197#endif
 198
 199
 200#if !defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
 201__visible unsigned long mmu_cr4_features __ro_after_init;
 202#else
 203__visible unsigned long mmu_cr4_features __ro_after_init = X86_CR4_PAE;
 204#endif
 205
 206/* Boot loader ID and version as integers, for the benefit of proc_dointvec */
 207int bootloader_type, bootloader_version;
 208
 209/*
 210 * Setup options
 211 */
 212struct screen_info screen_info;
 213EXPORT_SYMBOL(screen_info);
 214struct edid_info edid_info;
 215EXPORT_SYMBOL_GPL(edid_info);
 216
 217extern int root_mountflags;
 218
 219unsigned long saved_video_mode;
 220
 221#define RAMDISK_IMAGE_START_MASK	0x07FF
 222#define RAMDISK_PROMPT_FLAG		0x8000
 223#define RAMDISK_LOAD_FLAG		0x4000
 224
 225static char __initdata command_line[COMMAND_LINE_SIZE];
 226#ifdef CONFIG_CMDLINE_BOOL
 227static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE;
 228#endif
 229
 230#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
 231struct edd edd;
 232#ifdef CONFIG_EDD_MODULE
 233EXPORT_SYMBOL(edd);
 234#endif
 235/**
 236 * copy_edd() - Copy the BIOS EDD information
 237 *              from boot_params into a safe place.
 238 *
 239 */
 240static inline void __init copy_edd(void)
 241{
 242     memcpy(edd.mbr_signature, boot_params.edd_mbr_sig_buffer,
 243	    sizeof(edd.mbr_signature));
 244     memcpy(edd.edd_info, boot_params.eddbuf, sizeof(edd.edd_info));
 245     edd.mbr_signature_nr = boot_params.edd_mbr_sig_buf_entries;
 246     edd.edd_info_nr = boot_params.eddbuf_entries;
 247}
 248#else
 249static inline void __init copy_edd(void)
 250{
 251}
 252#endif
 253
 254void * __init extend_brk(size_t size, size_t align)
 255{
 256	size_t mask = align - 1;
 257	void *ret;
 258
 259	BUG_ON(_brk_start == 0);
 260	BUG_ON(align & mask);
 261
 262	_brk_end = (_brk_end + mask) & ~mask;
 263	BUG_ON((char *)(_brk_end + size) > __brk_limit);
 264
 265	ret = (void *)_brk_end;
 266	_brk_end += size;
 267
 268	memset(ret, 0, size);
 269
 270	return ret;
 271}
 272
 273#ifdef CONFIG_X86_32
 274static void __init cleanup_highmap(void)
 275{
 276}
 277#endif
 278
 279static void __init reserve_brk(void)
 280{
 281	if (_brk_end > _brk_start)
 282		memblock_reserve(__pa_symbol(_brk_start),
 283				 _brk_end - _brk_start);
 284
 285	/* Mark brk area as locked down and no longer taking any
 286	   new allocations */
 287	_brk_start = 0;
 288}
 289
 290u64 relocated_ramdisk;
 291
 292#ifdef CONFIG_BLK_DEV_INITRD
 293
 294static u64 __init get_ramdisk_image(void)
 295{
 296	u64 ramdisk_image = boot_params.hdr.ramdisk_image;
 297
 298	ramdisk_image |= (u64)boot_params.ext_ramdisk_image << 32;
 299
 300	return ramdisk_image;
 301}
 302static u64 __init get_ramdisk_size(void)
 303{
 304	u64 ramdisk_size = boot_params.hdr.ramdisk_size;
 305
 306	ramdisk_size |= (u64)boot_params.ext_ramdisk_size << 32;
 307
 308	return ramdisk_size;
 309}
 310
 311static void __init relocate_initrd(void)
 312{
 313	/* Assume only end is not page aligned */
 314	u64 ramdisk_image = get_ramdisk_image();
 315	u64 ramdisk_size  = get_ramdisk_size();
 316	u64 area_size     = PAGE_ALIGN(ramdisk_size);
 317
 318	/* We need to move the initrd down into directly mapped mem */
 319	relocated_ramdisk = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped),
 320						   area_size, PAGE_SIZE);
 321
 322	if (!relocated_ramdisk)
 323		panic("Cannot find place for new RAMDISK of size %lld\n",
 324		      ramdisk_size);
 325
 326	/* Note: this includes all the mem currently occupied by
 327	   the initrd, we rely on that fact to keep the data intact. */
 328	memblock_reserve(relocated_ramdisk, area_size);
 329	initrd_start = relocated_ramdisk + PAGE_OFFSET;
 330	initrd_end   = initrd_start + ramdisk_size;
 331	printk(KERN_INFO "Allocated new RAMDISK: [mem %#010llx-%#010llx]\n",
 332	       relocated_ramdisk, relocated_ramdisk + ramdisk_size - 1);
 333
 334	copy_from_early_mem((void *)initrd_start, ramdisk_image, ramdisk_size);
 335
 336	printk(KERN_INFO "Move RAMDISK from [mem %#010llx-%#010llx] to"
 337		" [mem %#010llx-%#010llx]\n",
 338		ramdisk_image, ramdisk_image + ramdisk_size - 1,
 339		relocated_ramdisk, relocated_ramdisk + ramdisk_size - 1);
 340}
 341
 342static void __init early_reserve_initrd(void)
 343{
 344	/* Assume only end is not page aligned */
 345	u64 ramdisk_image = get_ramdisk_image();
 346	u64 ramdisk_size  = get_ramdisk_size();
 347	u64 ramdisk_end   = PAGE_ALIGN(ramdisk_image + ramdisk_size);
 348
 349	if (!boot_params.hdr.type_of_loader ||
 350	    !ramdisk_image || !ramdisk_size)
 351		return;		/* No initrd provided by bootloader */
 352
 353	memblock_reserve(ramdisk_image, ramdisk_end - ramdisk_image);
 354}
 355static void __init reserve_initrd(void)
 356{
 357	/* Assume only end is not page aligned */
 358	u64 ramdisk_image = get_ramdisk_image();
 359	u64 ramdisk_size  = get_ramdisk_size();
 360	u64 ramdisk_end   = PAGE_ALIGN(ramdisk_image + ramdisk_size);
 361	u64 mapped_size;
 362
 363	if (!boot_params.hdr.type_of_loader ||
 364	    !ramdisk_image || !ramdisk_size)
 365		return;		/* No initrd provided by bootloader */
 366
 367	initrd_start = 0;
 368
 369	mapped_size = memblock_mem_size(max_pfn_mapped);
 370	if (ramdisk_size >= (mapped_size>>1))
 371		panic("initrd too large to handle, "
 372		       "disabling initrd (%lld needed, %lld available)\n",
 373		       ramdisk_size, mapped_size>>1);
 374
 375	printk(KERN_INFO "RAMDISK: [mem %#010llx-%#010llx]\n", ramdisk_image,
 376			ramdisk_end - 1);
 377
 378	if (pfn_range_is_mapped(PFN_DOWN(ramdisk_image),
 379				PFN_DOWN(ramdisk_end))) {
 380		/* All are mapped, easy case */
 381		initrd_start = ramdisk_image + PAGE_OFFSET;
 382		initrd_end = initrd_start + ramdisk_size;
 383		return;
 384	}
 385
 386	relocate_initrd();
 387
 388	memblock_free(ramdisk_image, ramdisk_end - ramdisk_image);
 389}
 390
 391#else
 392static void __init early_reserve_initrd(void)
 393{
 394}
 395static void __init reserve_initrd(void)
 396{
 397}
 398#endif /* CONFIG_BLK_DEV_INITRD */
 399
 400static void __init parse_setup_data(void)
 401{
 402	struct setup_data *data;
 403	u64 pa_data, pa_next;
 404
 405	pa_data = boot_params.hdr.setup_data;
 406	while (pa_data) {
 407		u32 data_len, data_type;
 408
 409		data = early_memremap(pa_data, sizeof(*data));
 410		data_len = data->len + sizeof(struct setup_data);
 411		data_type = data->type;
 412		pa_next = data->next;
 413		early_memunmap(data, sizeof(*data));
 414
 415		switch (data_type) {
 416		case SETUP_E820_EXT:
 417			e820__memory_setup_extended(pa_data, data_len);
 418			break;
 419		case SETUP_DTB:
 420			add_dtb(pa_data);
 421			break;
 422		case SETUP_EFI:
 423			parse_efi_setup(pa_data, data_len);
 424			break;
 425		default:
 426			break;
 427		}
 428		pa_data = pa_next;
 429	}
 430}
 431
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 432static void __init memblock_x86_reserve_range_setup_data(void)
 433{
 434	struct setup_data *data;
 435	u64 pa_data;
 436
 437	pa_data = boot_params.hdr.setup_data;
 438	while (pa_data) {
 439		data = early_memremap(pa_data, sizeof(*data));
 440		memblock_reserve(pa_data, sizeof(*data) + data->len);
 441		pa_data = data->next;
 442		early_memunmap(data, sizeof(*data));
 443	}
 444}
 445
 446/*
 447 * --------- Crashkernel reservation ------------------------------
 448 */
 449
 450#ifdef CONFIG_KEXEC_CORE
 451
 452/* 16M alignment for crash kernel regions */
 453#define CRASH_ALIGN		SZ_16M
 454
 455/*
 456 * Keep the crash kernel below this limit.
 457 *
 458 * On 32 bits earlier kernels would limit the kernel to the low 512 MiB
 459 * due to mapping restrictions.
 460 *
 461 * On 64bit, kdump kernel need be restricted to be under 64TB, which is
 462 * the upper limit of system RAM in 4-level paing mode. Since the kdump
 463 * jumping could be from 5-level to 4-level, the jumping will fail if
 464 * kernel is put above 64TB, and there's no way to detect the paging mode
 465 * of the kernel which will be loaded for dumping during the 1st kernel
 466 * bootup.
 467 */
 468#ifdef CONFIG_X86_32
 469# define CRASH_ADDR_LOW_MAX	SZ_512M
 470# define CRASH_ADDR_HIGH_MAX	SZ_512M
 471#else
 472# define CRASH_ADDR_LOW_MAX	SZ_4G
 473# define CRASH_ADDR_HIGH_MAX	SZ_64T
 474#endif
 475
 476static int __init reserve_crashkernel_low(void)
 477{
 478#ifdef CONFIG_X86_64
 479	unsigned long long base, low_base = 0, low_size = 0;
 480	unsigned long total_low_mem;
 481	int ret;
 482
 483	total_low_mem = memblock_mem_size(1UL << (32 - PAGE_SHIFT));
 484
 485	/* crashkernel=Y,low */
 486	ret = parse_crashkernel_low(boot_command_line, total_low_mem, &low_size, &base);
 487	if (ret) {
 488		/*
 489		 * two parts from kernel/dma/swiotlb.c:
 490		 * -swiotlb size: user-specified with swiotlb= or default.
 491		 *
 492		 * -swiotlb overflow buffer: now hardcoded to 32k. We round it
 493		 * to 8M for other buffers that may need to stay low too. Also
 494		 * make sure we allocate enough extra low memory so that we
 495		 * don't run out of DMA buffers for 32-bit devices.
 496		 */
 497		low_size = max(swiotlb_size_or_default() + (8UL << 20), 256UL << 20);
 498	} else {
 499		/* passed with crashkernel=0,low ? */
 500		if (!low_size)
 501			return 0;
 502	}
 503
 504	low_base = memblock_find_in_range(0, 1ULL << 32, low_size, CRASH_ALIGN);
 505	if (!low_base) {
 506		pr_err("Cannot reserve %ldMB crashkernel low memory, please try smaller size.\n",
 507		       (unsigned long)(low_size >> 20));
 508		return -ENOMEM;
 509	}
 510
 511	ret = memblock_reserve(low_base, low_size);
 512	if (ret) {
 513		pr_err("%s: Error reserving crashkernel low memblock.\n", __func__);
 514		return ret;
 515	}
 516
 517	pr_info("Reserving %ldMB of low memory at %ldMB for crashkernel (System low RAM: %ldMB)\n",
 518		(unsigned long)(low_size >> 20),
 519		(unsigned long)(low_base >> 20),
 520		(unsigned long)(total_low_mem >> 20));
 521
 522	crashk_low_res.start = low_base;
 523	crashk_low_res.end   = low_base + low_size - 1;
 524	insert_resource(&iomem_resource, &crashk_low_res);
 525#endif
 526	return 0;
 527}
 528
 529static void __init reserve_crashkernel(void)
 530{
 531	unsigned long long crash_size, crash_base, total_mem;
 532	bool high = false;
 533	int ret;
 534
 535	total_mem = memblock_phys_mem_size();
 536
 537	/* crashkernel=XM */
 538	ret = parse_crashkernel(boot_command_line, total_mem, &crash_size, &crash_base);
 539	if (ret != 0 || crash_size <= 0) {
 540		/* crashkernel=X,high */
 541		ret = parse_crashkernel_high(boot_command_line, total_mem,
 542					     &crash_size, &crash_base);
 543		if (ret != 0 || crash_size <= 0)
 544			return;
 545		high = true;
 546	}
 547
 548	if (xen_pv_domain()) {
 549		pr_info("Ignoring crashkernel for a Xen PV domain\n");
 550		return;
 551	}
 552
 553	/* 0 means: find the address automatically */
 554	if (!crash_base) {
 555		/*
 556		 * Set CRASH_ADDR_LOW_MAX upper bound for crash memory,
 557		 * crashkernel=x,high reserves memory over 4G, also allocates
 558		 * 256M extra low memory for DMA buffers and swiotlb.
 559		 * But the extra memory is not required for all machines.
 560		 * So try low memory first and fall back to high memory
 561		 * unless "crashkernel=size[KMG],high" is specified.
 562		 */
 563		if (!high)
 564			crash_base = memblock_find_in_range(CRASH_ALIGN,
 565						CRASH_ADDR_LOW_MAX,
 566						crash_size, CRASH_ALIGN);
 567		if (!crash_base)
 568			crash_base = memblock_find_in_range(CRASH_ALIGN,
 569						CRASH_ADDR_HIGH_MAX,
 570						crash_size, CRASH_ALIGN);
 571		if (!crash_base) {
 572			pr_info("crashkernel reservation failed - No suitable area found.\n");
 573			return;
 574		}
 
 575	} else {
 576		unsigned long long start;
 577
 578		start = memblock_find_in_range(crash_base,
 579					       crash_base + crash_size,
 580					       crash_size, 1 << 20);
 581		if (start != crash_base) {
 582			pr_info("crashkernel reservation failed - memory is in use.\n");
 583			return;
 584		}
 585	}
 586	ret = memblock_reserve(crash_base, crash_size);
 587	if (ret) {
 588		pr_err("%s: Error reserving crashkernel memblock.\n", __func__);
 589		return;
 590	}
 591
 592	if (crash_base >= (1ULL << 32) && reserve_crashkernel_low()) {
 593		memblock_free(crash_base, crash_size);
 594		return;
 595	}
 596
 597	pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n",
 598		(unsigned long)(crash_size >> 20),
 599		(unsigned long)(crash_base >> 20),
 600		(unsigned long)(total_mem >> 20));
 601
 602	crashk_res.start = crash_base;
 603	crashk_res.end   = crash_base + crash_size - 1;
 604	insert_resource(&iomem_resource, &crashk_res);
 605}
 606#else
 607static void __init reserve_crashkernel(void)
 608{
 609}
 610#endif
 611
 612static struct resource standard_io_resources[] = {
 613	{ .name = "dma1", .start = 0x00, .end = 0x1f,
 614		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
 615	{ .name = "pic1", .start = 0x20, .end = 0x21,
 616		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
 617	{ .name = "timer0", .start = 0x40, .end = 0x43,
 618		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
 619	{ .name = "timer1", .start = 0x50, .end = 0x53,
 620		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
 621	{ .name = "keyboard", .start = 0x60, .end = 0x60,
 622		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
 623	{ .name = "keyboard", .start = 0x64, .end = 0x64,
 624		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
 625	{ .name = "dma page reg", .start = 0x80, .end = 0x8f,
 626		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
 627	{ .name = "pic2", .start = 0xa0, .end = 0xa1,
 628		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
 629	{ .name = "dma2", .start = 0xc0, .end = 0xdf,
 630		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
 631	{ .name = "fpu", .start = 0xf0, .end = 0xff,
 632		.flags = IORESOURCE_BUSY | IORESOURCE_IO }
 633};
 634
 635void __init reserve_standard_io_resources(void)
 636{
 637	int i;
 638
 639	/* request I/O space for devices used on all i[345]86 PCs */
 640	for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++)
 641		request_resource(&ioport_resource, &standard_io_resources[i]);
 642
 643}
 644
 645static __init void reserve_ibft_region(void)
 646{
 647	unsigned long addr, size = 0;
 648
 649	addr = find_ibft_region(&size);
 650
 651	if (size)
 652		memblock_reserve(addr, size);
 653}
 654
 655static bool __init snb_gfx_workaround_needed(void)
 656{
 657#ifdef CONFIG_PCI
 658	int i;
 659	u16 vendor, devid;
 660	static const __initconst u16 snb_ids[] = {
 661		0x0102,
 662		0x0112,
 663		0x0122,
 664		0x0106,
 665		0x0116,
 666		0x0126,
 667		0x010a,
 668	};
 669
 670	/* Assume no if something weird is going on with PCI */
 671	if (!early_pci_allowed())
 672		return false;
 673
 674	vendor = read_pci_config_16(0, 2, 0, PCI_VENDOR_ID);
 675	if (vendor != 0x8086)
 676		return false;
 677
 678	devid = read_pci_config_16(0, 2, 0, PCI_DEVICE_ID);
 679	for (i = 0; i < ARRAY_SIZE(snb_ids); i++)
 680		if (devid == snb_ids[i])
 681			return true;
 682#endif
 683
 684	return false;
 685}
 686
 687/*
 688 * Sandy Bridge graphics has trouble with certain ranges, exclude
 689 * them from allocation.
 690 */
 691static void __init trim_snb_memory(void)
 692{
 693	static const __initconst unsigned long bad_pages[] = {
 694		0x20050000,
 695		0x20110000,
 696		0x20130000,
 697		0x20138000,
 698		0x40004000,
 699	};
 700	int i;
 701
 702	if (!snb_gfx_workaround_needed())
 703		return;
 704
 705	printk(KERN_DEBUG "reserving inaccessible SNB gfx pages\n");
 706
 707	/*
 708	 * Reserve all memory below the 1 MB mark that has not
 709	 * already been reserved.
 710	 */
 711	memblock_reserve(0, 1<<20);
 712	
 713	for (i = 0; i < ARRAY_SIZE(bad_pages); i++) {
 714		if (memblock_reserve(bad_pages[i], PAGE_SIZE))
 715			printk(KERN_WARNING "failed to reserve 0x%08lx\n",
 716			       bad_pages[i]);
 717	}
 718}
 719
 720/*
 721 * Here we put platform-specific memory range workarounds, i.e.
 722 * memory known to be corrupt or otherwise in need to be reserved on
 723 * specific platforms.
 724 *
 725 * If this gets used more widely it could use a real dispatch mechanism.
 726 */
 727static void __init trim_platform_memory_ranges(void)
 728{
 729	trim_snb_memory();
 730}
 731
 732static void __init trim_bios_range(void)
 733{
 734	/*
 735	 * A special case is the first 4Kb of memory;
 736	 * This is a BIOS owned area, not kernel ram, but generally
 737	 * not listed as such in the E820 table.
 738	 *
 739	 * This typically reserves additional memory (64KiB by default)
 740	 * since some BIOSes are known to corrupt low memory.  See the
 741	 * Kconfig help text for X86_RESERVE_LOW.
 742	 */
 743	e820__range_update(0, PAGE_SIZE, E820_TYPE_RAM, E820_TYPE_RESERVED);
 744
 745	/*
 746	 * special case: Some BIOSen report the PC BIOS
 747	 * area (640->1Mb) as ram even though it is not.
 748	 * take them out.
 749	 */
 750	e820__range_remove(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_TYPE_RAM, 1);
 751
 752	e820__update_table(e820_table);
 753}
 754
 755/* called before trim_bios_range() to spare extra sanitize */
 756static void __init e820_add_kernel_range(void)
 757{
 758	u64 start = __pa_symbol(_text);
 759	u64 size = __pa_symbol(_end) - start;
 760
 761	/*
 762	 * Complain if .text .data and .bss are not marked as E820_TYPE_RAM and
 763	 * attempt to fix it by adding the range. We may have a confused BIOS,
 764	 * or the user may have used memmap=exactmap or memmap=xxM$yyM to
 765	 * exclude kernel range. If we really are running on top non-RAM,
 766	 * we will crash later anyways.
 767	 */
 768	if (e820__mapped_all(start, start + size, E820_TYPE_RAM))
 769		return;
 770
 771	pr_warn(".text .data .bss are not marked as E820_TYPE_RAM!\n");
 772	e820__range_remove(start, size, E820_TYPE_RAM, 0);
 773	e820__range_add(start, size, E820_TYPE_RAM);
 774}
 775
 776static unsigned reserve_low = CONFIG_X86_RESERVE_LOW << 10;
 777
 778static int __init parse_reservelow(char *p)
 779{
 780	unsigned long long size;
 781
 782	if (!p)
 783		return -EINVAL;
 784
 785	size = memparse(p, &p);
 786
 787	if (size < 4096)
 788		size = 4096;
 789
 790	if (size > 640*1024)
 791		size = 640*1024;
 792
 793	reserve_low = size;
 794
 795	return 0;
 796}
 797
 798early_param("reservelow", parse_reservelow);
 799
 800static void __init trim_low_memory_range(void)
 801{
 802	memblock_reserve(0, ALIGN(reserve_low, PAGE_SIZE));
 803}
 804	
 805/*
 806 * Dump out kernel offset information on panic.
 807 */
 808static int
 809dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p)
 810{
 811	if (kaslr_enabled()) {
 812		pr_emerg("Kernel Offset: 0x%lx from 0x%lx (relocation range: 0x%lx-0x%lx)\n",
 813			 kaslr_offset(),
 814			 __START_KERNEL,
 815			 __START_KERNEL_map,
 816			 MODULES_VADDR-1);
 817	} else {
 818		pr_emerg("Kernel Offset: disabled\n");
 819	}
 820
 821	return 0;
 822}
 823
 824/*
 825 * Determine if we were loaded by an EFI loader.  If so, then we have also been
 826 * passed the efi memmap, systab, etc., so we should use these data structures
 827 * for initialization.  Note, the efi init code path is determined by the
 828 * global efi_enabled. This allows the same kernel image to be used on existing
 829 * systems (with a traditional BIOS) as well as on EFI systems.
 830 */
 831/*
 832 * setup_arch - architecture-specific boot-time initializations
 833 *
 834 * Note: On x86_64, fixmaps are ready for use even before this is called.
 835 */
 836
 837void __init setup_arch(char **cmdline_p)
 838{
 839	/*
 840	 * Reserve the memory occupied by the kernel between _text and
 841	 * __end_of_kernel_reserve symbols. Any kernel sections after the
 842	 * __end_of_kernel_reserve symbol must be explicitly reserved with a
 843	 * separate memblock_reserve() or they will be discarded.
 844	 */
 845	memblock_reserve(__pa_symbol(_text),
 846			 (unsigned long)__end_of_kernel_reserve - (unsigned long)_text);
 847
 848	/*
 849	 * Make sure page 0 is always reserved because on systems with
 850	 * L1TF its contents can be leaked to user processes.
 851	 */
 852	memblock_reserve(0, PAGE_SIZE);
 853
 854	early_reserve_initrd();
 855
 856	/*
 857	 * At this point everything still needed from the boot loader
 858	 * or BIOS or kernel text should be early reserved or marked not
 859	 * RAM in e820. All other memory is free game.
 860	 */
 861
 862#ifdef CONFIG_X86_32
 863	memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data));
 864
 865	/*
 866	 * copy kernel address range established so far and switch
 867	 * to the proper swapper page table
 868	 */
 869	clone_pgd_range(swapper_pg_dir     + KERNEL_PGD_BOUNDARY,
 870			initial_page_table + KERNEL_PGD_BOUNDARY,
 871			KERNEL_PGD_PTRS);
 872
 873	load_cr3(swapper_pg_dir);
 874	/*
 875	 * Note: Quark X1000 CPUs advertise PGE incorrectly and require
 876	 * a cr3 based tlb flush, so the following __flush_tlb_all()
 877	 * will not flush anything because the cpu quirk which clears
 878	 * X86_FEATURE_PGE has not been invoked yet. Though due to the
 879	 * load_cr3() above the TLB has been flushed already. The
 880	 * quirk is invoked before subsequent calls to __flush_tlb_all()
 881	 * so proper operation is guaranteed.
 882	 */
 883	__flush_tlb_all();
 884#else
 885	printk(KERN_INFO "Command line: %s\n", boot_command_line);
 886	boot_cpu_data.x86_phys_bits = MAX_PHYSMEM_BITS;
 887#endif
 888
 889	/*
 890	 * If we have OLPC OFW, we might end up relocating the fixmap due to
 891	 * reserve_top(), so do this before touching the ioremap area.
 892	 */
 893	olpc_ofw_detect();
 894
 895	idt_setup_early_traps();
 896	early_cpu_init();
 897	arch_init_ideal_nops();
 898	jump_label_init();
 899	early_ioremap_init();
 900
 901	setup_olpc_ofw_pgd();
 902
 903	ROOT_DEV = old_decode_dev(boot_params.hdr.root_dev);
 904	screen_info = boot_params.screen_info;
 905	edid_info = boot_params.edid_info;
 906#ifdef CONFIG_X86_32
 907	apm_info.bios = boot_params.apm_bios_info;
 908	ist_info = boot_params.ist_info;
 909#endif
 910	saved_video_mode = boot_params.hdr.vid_mode;
 911	bootloader_type = boot_params.hdr.type_of_loader;
 912	if ((bootloader_type >> 4) == 0xe) {
 913		bootloader_type &= 0xf;
 914		bootloader_type |= (boot_params.hdr.ext_loader_type+0x10) << 4;
 915	}
 916	bootloader_version  = bootloader_type & 0xf;
 917	bootloader_version |= boot_params.hdr.ext_loader_ver << 4;
 918
 919#ifdef CONFIG_BLK_DEV_RAM
 920	rd_image_start = boot_params.hdr.ram_size & RAMDISK_IMAGE_START_MASK;
 921	rd_prompt = ((boot_params.hdr.ram_size & RAMDISK_PROMPT_FLAG) != 0);
 922	rd_doload = ((boot_params.hdr.ram_size & RAMDISK_LOAD_FLAG) != 0);
 923#endif
 924#ifdef CONFIG_EFI
 925	if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature,
 926		     EFI32_LOADER_SIGNATURE, 4)) {
 927		set_bit(EFI_BOOT, &efi.flags);
 928	} else if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature,
 929		     EFI64_LOADER_SIGNATURE, 4)) {
 930		set_bit(EFI_BOOT, &efi.flags);
 931		set_bit(EFI_64BIT, &efi.flags);
 932	}
 
 
 
 933#endif
 934
 935	x86_init.oem.arch_setup();
 936
 937	iomem_resource.end = (1ULL << boot_cpu_data.x86_phys_bits) - 1;
 938	e820__memory_setup();
 939	parse_setup_data();
 940
 941	copy_edd();
 942
 943	if (!boot_params.hdr.root_flags)
 944		root_mountflags &= ~MS_RDONLY;
 945	init_mm.start_code = (unsigned long) _text;
 946	init_mm.end_code = (unsigned long) _etext;
 947	init_mm.end_data = (unsigned long) _edata;
 948	init_mm.brk = _brk_end;
 949
 950	mpx_mm_init(&init_mm);
 951
 952	code_resource.start = __pa_symbol(_text);
 953	code_resource.end = __pa_symbol(_etext)-1;
 954	data_resource.start = __pa_symbol(_etext);
 955	data_resource.end = __pa_symbol(_edata)-1;
 956	bss_resource.start = __pa_symbol(__bss_start);
 957	bss_resource.end = __pa_symbol(__bss_stop)-1;
 958
 959#ifdef CONFIG_CMDLINE_BOOL
 960#ifdef CONFIG_CMDLINE_OVERRIDE
 961	strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
 962#else
 963	if (builtin_cmdline[0]) {
 964		/* append boot loader cmdline to builtin */
 965		strlcat(builtin_cmdline, " ", COMMAND_LINE_SIZE);
 966		strlcat(builtin_cmdline, boot_command_line, COMMAND_LINE_SIZE);
 967		strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
 968	}
 969#endif
 970#endif
 971
 972	strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
 973	*cmdline_p = command_line;
 974
 975	/*
 976	 * x86_configure_nx() is called before parse_early_param() to detect
 977	 * whether hardware doesn't support NX (so that the early EHCI debug
 978	 * console setup can safely call set_fixmap()). It may then be called
 979	 * again from within noexec_setup() during parsing early parameters
 980	 * to honor the respective command line option.
 981	 */
 982	x86_configure_nx();
 983
 984	parse_early_param();
 985
 986	if (efi_enabled(EFI_BOOT))
 987		efi_memblock_x86_reserve_range();
 988#ifdef CONFIG_MEMORY_HOTPLUG
 989	/*
 990	 * Memory used by the kernel cannot be hot-removed because Linux
 991	 * cannot migrate the kernel pages. When memory hotplug is
 992	 * enabled, we should prevent memblock from allocating memory
 993	 * for the kernel.
 994	 *
 995	 * ACPI SRAT records all hotpluggable memory ranges. But before
 996	 * SRAT is parsed, we don't know about it.
 997	 *
 998	 * The kernel image is loaded into memory at very early time. We
 999	 * cannot prevent this anyway. So on NUMA system, we set any
1000	 * node the kernel resides in as un-hotpluggable.
1001	 *
1002	 * Since on modern servers, one node could have double-digit
1003	 * gigabytes memory, we can assume the memory around the kernel
1004	 * image is also un-hotpluggable. So before SRAT is parsed, just
1005	 * allocate memory near the kernel image to try the best to keep
1006	 * the kernel away from hotpluggable memory.
1007	 */
1008	if (movable_node_is_enabled())
1009		memblock_set_bottom_up(true);
1010#endif
1011
1012	x86_report_nx();
1013
1014	/* after early param, so could get panic from serial */
1015	memblock_x86_reserve_range_setup_data();
1016
1017	if (acpi_mps_check()) {
1018#ifdef CONFIG_X86_LOCAL_APIC
1019		disable_apic = 1;
1020#endif
1021		setup_clear_cpu_cap(X86_FEATURE_APIC);
1022	}
1023
1024	e820__reserve_setup_data();
1025	e820__finish_early_params();
 
 
 
 
 
 
1026
1027	if (efi_enabled(EFI_BOOT))
1028		efi_init();
1029
1030	dmi_setup();
 
 
1031
1032	/*
1033	 * VMware detection requires dmi to be available, so this
1034	 * needs to be done after dmi_setup(), for the boot CPU.
1035	 */
1036	init_hypervisor_platform();
1037
1038	tsc_early_init();
1039	x86_init.resources.probe_roms();
1040
1041	/* after parse_early_param, so could debug it */
1042	insert_resource(&iomem_resource, &code_resource);
1043	insert_resource(&iomem_resource, &data_resource);
1044	insert_resource(&iomem_resource, &bss_resource);
1045
1046	e820_add_kernel_range();
1047	trim_bios_range();
1048#ifdef CONFIG_X86_32
1049	if (ppro_with_ram_bug()) {
1050		e820__range_update(0x70000000ULL, 0x40000ULL, E820_TYPE_RAM,
1051				  E820_TYPE_RESERVED);
1052		e820__update_table(e820_table);
1053		printk(KERN_INFO "fixed physical RAM map:\n");
1054		e820__print_table("bad_ppro");
1055	}
1056#else
1057	early_gart_iommu_check();
1058#endif
1059
1060	/*
1061	 * partially used pages are not usable - thus
1062	 * we are rounding upwards:
1063	 */
1064	max_pfn = e820__end_of_ram_pfn();
1065
1066	/* update e820 for memory not covered by WB MTRRs */
1067	mtrr_bp_init();
1068	if (mtrr_trim_uncached_memory(max_pfn))
1069		max_pfn = e820__end_of_ram_pfn();
1070
1071	max_possible_pfn = max_pfn;
1072
1073	/*
1074	 * This call is required when the CPU does not support PAT. If
1075	 * mtrr_bp_init() invoked it already via pat_init() the call has no
1076	 * effect.
1077	 */
1078	init_cache_modes();
1079
1080	/*
1081	 * Define random base addresses for memory sections after max_pfn is
1082	 * defined and before each memory section base is used.
1083	 */
1084	kernel_randomize_memory();
1085
1086#ifdef CONFIG_X86_32
1087	/* max_low_pfn get updated here */
1088	find_low_pfn_range();
1089#else
1090	check_x2apic();
1091
1092	/* How many end-of-memory variables you have, grandma! */
1093	/* need this before calling reserve_initrd */
1094	if (max_pfn > (1UL<<(32 - PAGE_SHIFT)))
1095		max_low_pfn = e820__end_of_low_ram_pfn();
1096	else
1097		max_low_pfn = max_pfn;
1098
1099	high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1;
1100#endif
1101
1102	/*
1103	 * Find and reserve possible boot-time SMP configuration:
1104	 */
1105	find_smp_config();
1106
1107	reserve_ibft_region();
1108
1109	early_alloc_pgt_buf();
1110
1111	/*
1112	 * Need to conclude brk, before e820__memblock_setup()
1113	 *  it could use memblock_find_in_range, could overlap with
1114	 *  brk area.
1115	 */
1116	reserve_brk();
1117
1118	cleanup_highmap();
1119
1120	memblock_set_current_limit(ISA_END_ADDRESS);
1121	e820__memblock_setup();
1122
1123	reserve_bios_regions();
1124
1125	if (efi_enabled(EFI_MEMMAP)) {
1126		efi_fake_memmap();
1127		efi_find_mirror();
1128		efi_esrt_init();
1129
1130		/*
1131		 * The EFI specification says that boot service code won't be
1132		 * called after ExitBootServices(). This is, in fact, a lie.
1133		 */
 
1134		efi_reserve_boot_services();
1135	}
1136
1137	/* preallocate 4k for mptable mpc */
1138	e820__memblock_alloc_reserved_mpc_new();
1139
1140#ifdef CONFIG_X86_CHECK_BIOS_CORRUPTION
1141	setup_bios_corruption_check();
1142#endif
1143
1144#ifdef CONFIG_X86_32
1145	printk(KERN_DEBUG "initial memory mapped: [mem 0x00000000-%#010lx]\n",
1146			(max_pfn_mapped<<PAGE_SHIFT) - 1);
1147#endif
1148
1149	reserve_real_mode();
1150
1151	trim_platform_memory_ranges();
1152	trim_low_memory_range();
1153
1154	init_mem_mapping();
1155
1156	idt_setup_early_pf();
1157
1158	/*
1159	 * Update mmu_cr4_features (and, indirectly, trampoline_cr4_features)
1160	 * with the current CR4 value.  This may not be necessary, but
1161	 * auditing all the early-boot CR4 manipulation would be needed to
1162	 * rule it out.
1163	 *
1164	 * Mask off features that don't work outside long mode (just
1165	 * PCIDE for now).
1166	 */
1167	mmu_cr4_features = __read_cr4() & ~X86_CR4_PCIDE;
1168
1169	memblock_set_current_limit(get_max_mapped());
1170
1171	/*
1172	 * NOTE: On x86-32, only from this point on, fixmaps are ready for use.
1173	 */
1174
1175#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
1176	if (init_ohci1394_dma_early)
1177		init_ohci1394_dma_on_all_controllers();
1178#endif
1179	/* Allocate bigger log buffer */
1180	setup_log_buf(1);
1181
1182	if (efi_enabled(EFI_BOOT)) {
1183		switch (boot_params.secure_boot) {
1184		case efi_secureboot_mode_disabled:
1185			pr_info("Secure boot disabled\n");
1186			break;
1187		case efi_secureboot_mode_enabled:
1188			pr_info("Secure boot enabled\n");
1189			break;
1190		default:
1191			pr_info("Secure boot could not be determined\n");
1192			break;
1193		}
1194	}
1195
1196	reserve_initrd();
1197
1198	acpi_table_upgrade();
 
 
1199
1200	vsmp_init();
1201
1202	io_delay_init();
1203
1204	early_platform_quirks();
1205
1206	/*
1207	 * Parse the ACPI tables for possible boot-time SMP configuration.
1208	 */
1209	acpi_boot_table_init();
1210
1211	early_acpi_boot_init();
1212
1213	initmem_init();
1214	dma_contiguous_reserve(max_pfn_mapped << PAGE_SHIFT);
1215
1216	/*
1217	 * Reserve memory for crash kernel after SRAT is parsed so that it
1218	 * won't consume hotpluggable memory.
1219	 */
1220	reserve_crashkernel();
1221
1222	memblock_find_dma_reserve();
1223
1224	if (!early_xdbc_setup_hardware())
1225		early_xdbc_register_console();
 
1226
1227	x86_init.paging.pagetable_init();
1228
1229	kasan_init();
1230
 
 
 
 
 
 
 
 
 
 
 
 
 
1231	/*
1232	 * Sync back kernel address range.
1233	 *
1234	 * FIXME: Can the later sync in setup_cpu_entry_areas() replace
1235	 * this call?
1236	 */
1237	sync_initial_page_table();
 
 
 
1238
1239	tboot_probe();
1240
1241	map_vsyscall();
1242
1243	generic_apic_probe();
1244
1245	early_quirks();
1246
1247	/*
1248	 * Read APIC and some other early information from ACPI tables.
1249	 */
1250	acpi_boot_init();
1251	sfi_init();
1252	x86_dtb_init();
1253
1254	/*
1255	 * get boot-time SMP configuration:
1256	 */
1257	get_smp_config();
1258
1259	/*
1260	 * Systems w/o ACPI and mptables might not have it mapped the local
1261	 * APIC yet, but prefill_possible_map() might need to access it.
1262	 */
1263	init_apic_mappings();
1264
1265	prefill_possible_map();
1266
1267	init_cpu_to_node();
1268
 
1269	io_apic_init_mappings();
1270
1271	x86_init.hyper.guest_late_init();
1272
1273	e820__reserve_resources();
1274	e820__register_nosave_regions(max_pfn);
1275
1276	x86_init.resources.reserve_resources();
1277
1278	e820__setup_pci_gap();
1279
1280#ifdef CONFIG_VT
1281#if defined(CONFIG_VGA_CONSOLE)
1282	if (!efi_enabled(EFI_BOOT) || (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY))
1283		conswitchp = &vga_con;
1284#elif defined(CONFIG_DUMMY_CONSOLE)
1285	conswitchp = &dummy_con;
1286#endif
1287#endif
1288	x86_init.oem.banner();
1289
1290	x86_init.timers.wallclock_init();
1291
1292	mcheck_init();
1293
 
 
1294	register_refined_jiffies(CLOCK_TICK_RATE);
1295
1296#ifdef CONFIG_EFI
1297	if (efi_enabled(EFI_BOOT))
1298		efi_apply_memmap_quirks();
1299#endif
1300
1301	unwind_init();
1302}
1303
1304#ifdef CONFIG_X86_32
1305
1306static struct resource video_ram_resource = {
1307	.name	= "Video RAM area",
1308	.start	= 0xa0000,
1309	.end	= 0xbffff,
1310	.flags	= IORESOURCE_BUSY | IORESOURCE_MEM
1311};
1312
1313void __init i386_reserve_resources(void)
1314{
1315	request_resource(&iomem_resource, &video_ram_resource);
1316	reserve_standard_io_resources();
1317}
1318
1319#endif /* CONFIG_X86_32 */
1320
1321static struct notifier_block kernel_offset_notifier = {
1322	.notifier_call = dump_kernel_offset
1323};
1324
1325static int __init register_kernel_offset_dumper(void)
1326{
1327	atomic_notifier_chain_register(&panic_notifier_list,
1328					&kernel_offset_notifier);
1329	return 0;
1330}
1331__initcall(register_kernel_offset_dumper);
v4.6
 
   1/*
   2 *  Copyright (C) 1995  Linus Torvalds
   3 *
   4 *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
   5 *
   6 *  Memory region support
   7 *	David Parsons <orc@pell.chi.il.us>, July-August 1999
   8 *
   9 *  Added E820 sanitization routine (removes overlapping memory regions);
  10 *  Brian Moyle <bmoyle@mvista.com>, February 2001
  11 *
  12 * Moved CPU detection code to cpu/${cpu}.c
  13 *    Patrick Mochel <mochel@osdl.org>, March 2002
  14 *
  15 *  Provisions for empty E820 memory regions (reported by certain BIOSes).
  16 *  Alex Achenbach <xela@slit.de>, December 2002.
  17 *
  18 */
  19
  20/*
  21 * This file handles the architecture-dependent parts of initialization
  22 */
  23
  24#include <linux/sched.h>
  25#include <linux/mm.h>
  26#include <linux/mmzone.h>
  27#include <linux/screen_info.h>
  28#include <linux/ioport.h>
  29#include <linux/acpi.h>
  30#include <linux/sfi.h>
  31#include <linux/apm_bios.h>
  32#include <linux/initrd.h>
  33#include <linux/bootmem.h>
  34#include <linux/memblock.h>
  35#include <linux/seq_file.h>
  36#include <linux/console.h>
  37#include <linux/root_dev.h>
  38#include <linux/highmem.h>
  39#include <linux/module.h>
  40#include <linux/efi.h>
  41#include <linux/init.h>
  42#include <linux/edd.h>
  43#include <linux/iscsi_ibft.h>
  44#include <linux/nodemask.h>
  45#include <linux/kexec.h>
  46#include <linux/dmi.h>
  47#include <linux/pfn.h>
  48#include <linux/pci.h>
  49#include <asm/pci-direct.h>
  50#include <linux/init_ohci1394_dma.h>
  51#include <linux/kvm_para.h>
  52#include <linux/dma-contiguous.h>
 
 
  53
  54#include <linux/errno.h>
  55#include <linux/kernel.h>
  56#include <linux/stddef.h>
  57#include <linux/unistd.h>
  58#include <linux/ptrace.h>
  59#include <linux/user.h>
  60#include <linux/delay.h>
  61
  62#include <linux/kallsyms.h>
  63#include <linux/cpufreq.h>
  64#include <linux/dma-mapping.h>
  65#include <linux/ctype.h>
  66#include <linux/uaccess.h>
  67
  68#include <linux/percpu.h>
  69#include <linux/crash_dump.h>
  70#include <linux/tboot.h>
  71#include <linux/jiffies.h>
 
 
  72
 
  73#include <video/edid.h>
  74
  75#include <asm/mtrr.h>
  76#include <asm/apic.h>
  77#include <asm/realmode.h>
  78#include <asm/e820.h>
  79#include <asm/mpspec.h>
  80#include <asm/setup.h>
  81#include <asm/efi.h>
  82#include <asm/timer.h>
  83#include <asm/i8259.h>
  84#include <asm/sections.h>
  85#include <asm/io_apic.h>
  86#include <asm/ist.h>
  87#include <asm/setup_arch.h>
  88#include <asm/bios_ebda.h>
  89#include <asm/cacheflush.h>
  90#include <asm/processor.h>
  91#include <asm/bugs.h>
  92#include <asm/kasan.h>
  93
  94#include <asm/vsyscall.h>
  95#include <asm/cpu.h>
  96#include <asm/desc.h>
  97#include <asm/dma.h>
  98#include <asm/iommu.h>
  99#include <asm/gart.h>
 100#include <asm/mmu_context.h>
 101#include <asm/proto.h>
 102
 103#include <asm/paravirt.h>
 104#include <asm/hypervisor.h>
 105#include <asm/olpc_ofw.h>
 106
 107#include <asm/percpu.h>
 108#include <asm/topology.h>
 109#include <asm/apicdef.h>
 110#include <asm/amd_nb.h>
 111#include <asm/mce.h>
 112#include <asm/alternative.h>
 113#include <asm/prom.h>
 114#include <asm/microcode.h>
 115#include <asm/mmu_context.h>
 
 116
 117/*
 118 * max_low_pfn_mapped: highest direct mapped pfn under 4GB
 119 * max_pfn_mapped:     highest direct mapped pfn over 4GB
 120 *
 121 * The direct mapping only covers E820_RAM regions, so the ranges and gaps are
 122 * represented by pfn_mapped
 123 */
 124unsigned long max_low_pfn_mapped;
 125unsigned long max_pfn_mapped;
 126
 127#ifdef CONFIG_DMI
 128RESERVE_BRK(dmi_alloc, 65536);
 129#endif
 130
 131
 132static __initdata unsigned long _brk_start = (unsigned long)__brk_base;
 133unsigned long _brk_end = (unsigned long)__brk_base;
 134
 135#ifdef CONFIG_X86_64
 136int default_cpu_present_to_apicid(int mps_cpu)
 137{
 138	return __default_cpu_present_to_apicid(mps_cpu);
 139}
 140
 141int default_check_phys_apicid_present(int phys_apicid)
 142{
 143	return __default_check_phys_apicid_present(phys_apicid);
 144}
 145#endif
 146
 147struct boot_params boot_params;
 148
 149/*
 150 * Machine setup..
 151 */
 152static struct resource data_resource = {
 153	.name	= "Kernel data",
 154	.start	= 0,
 155	.end	= 0,
 156	.flags	= IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
 157};
 158
 159static struct resource code_resource = {
 160	.name	= "Kernel code",
 161	.start	= 0,
 162	.end	= 0,
 163	.flags	= IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
 164};
 165
 166static struct resource bss_resource = {
 167	.name	= "Kernel bss",
 168	.start	= 0,
 169	.end	= 0,
 170	.flags	= IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
 171};
 172
 173
 174#ifdef CONFIG_X86_32
 175/* cpu data as detected by the assembly code in head.S */
 176struct cpuinfo_x86 new_cpu_data = {
 177	.wp_works_ok = -1,
 178};
 179/* common cpu data for all cpus */
 180struct cpuinfo_x86 boot_cpu_data __read_mostly = {
 181	.wp_works_ok = -1,
 182};
 183EXPORT_SYMBOL(boot_cpu_data);
 184
 185unsigned int def_to_bigsmp;
 186
 187/* for MCA, but anyone else can use it if they want */
 188unsigned int machine_id;
 189unsigned int machine_submodel_id;
 190unsigned int BIOS_revision;
 191
 192struct apm_info apm_info;
 193EXPORT_SYMBOL(apm_info);
 194
 195#if defined(CONFIG_X86_SPEEDSTEP_SMI) || \
 196	defined(CONFIG_X86_SPEEDSTEP_SMI_MODULE)
 197struct ist_info ist_info;
 198EXPORT_SYMBOL(ist_info);
 199#else
 200struct ist_info ist_info;
 201#endif
 202
 203#else
 204struct cpuinfo_x86 boot_cpu_data __read_mostly = {
 205	.x86_phys_bits = MAX_PHYSMEM_BITS,
 206};
 207EXPORT_SYMBOL(boot_cpu_data);
 208#endif
 209
 210
 211#if !defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
 212__visible unsigned long mmu_cr4_features;
 213#else
 214__visible unsigned long mmu_cr4_features = X86_CR4_PAE;
 215#endif
 216
 217/* Boot loader ID and version as integers, for the benefit of proc_dointvec */
 218int bootloader_type, bootloader_version;
 219
 220/*
 221 * Setup options
 222 */
 223struct screen_info screen_info;
 224EXPORT_SYMBOL(screen_info);
 225struct edid_info edid_info;
 226EXPORT_SYMBOL_GPL(edid_info);
 227
 228extern int root_mountflags;
 229
 230unsigned long saved_video_mode;
 231
 232#define RAMDISK_IMAGE_START_MASK	0x07FF
 233#define RAMDISK_PROMPT_FLAG		0x8000
 234#define RAMDISK_LOAD_FLAG		0x4000
 235
 236static char __initdata command_line[COMMAND_LINE_SIZE];
 237#ifdef CONFIG_CMDLINE_BOOL
 238static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE;
 239#endif
 240
 241#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
 242struct edd edd;
 243#ifdef CONFIG_EDD_MODULE
 244EXPORT_SYMBOL(edd);
 245#endif
 246/**
 247 * copy_edd() - Copy the BIOS EDD information
 248 *              from boot_params into a safe place.
 249 *
 250 */
 251static inline void __init copy_edd(void)
 252{
 253     memcpy(edd.mbr_signature, boot_params.edd_mbr_sig_buffer,
 254	    sizeof(edd.mbr_signature));
 255     memcpy(edd.edd_info, boot_params.eddbuf, sizeof(edd.edd_info));
 256     edd.mbr_signature_nr = boot_params.edd_mbr_sig_buf_entries;
 257     edd.edd_info_nr = boot_params.eddbuf_entries;
 258}
 259#else
 260static inline void __init copy_edd(void)
 261{
 262}
 263#endif
 264
 265void * __init extend_brk(size_t size, size_t align)
 266{
 267	size_t mask = align - 1;
 268	void *ret;
 269
 270	BUG_ON(_brk_start == 0);
 271	BUG_ON(align & mask);
 272
 273	_brk_end = (_brk_end + mask) & ~mask;
 274	BUG_ON((char *)(_brk_end + size) > __brk_limit);
 275
 276	ret = (void *)_brk_end;
 277	_brk_end += size;
 278
 279	memset(ret, 0, size);
 280
 281	return ret;
 282}
 283
 284#ifdef CONFIG_X86_32
 285static void __init cleanup_highmap(void)
 286{
 287}
 288#endif
 289
 290static void __init reserve_brk(void)
 291{
 292	if (_brk_end > _brk_start)
 293		memblock_reserve(__pa_symbol(_brk_start),
 294				 _brk_end - _brk_start);
 295
 296	/* Mark brk area as locked down and no longer taking any
 297	   new allocations */
 298	_brk_start = 0;
 299}
 300
 301u64 relocated_ramdisk;
 302
 303#ifdef CONFIG_BLK_DEV_INITRD
 304
 305static u64 __init get_ramdisk_image(void)
 306{
 307	u64 ramdisk_image = boot_params.hdr.ramdisk_image;
 308
 309	ramdisk_image |= (u64)boot_params.ext_ramdisk_image << 32;
 310
 311	return ramdisk_image;
 312}
 313static u64 __init get_ramdisk_size(void)
 314{
 315	u64 ramdisk_size = boot_params.hdr.ramdisk_size;
 316
 317	ramdisk_size |= (u64)boot_params.ext_ramdisk_size << 32;
 318
 319	return ramdisk_size;
 320}
 321
 322static void __init relocate_initrd(void)
 323{
 324	/* Assume only end is not page aligned */
 325	u64 ramdisk_image = get_ramdisk_image();
 326	u64 ramdisk_size  = get_ramdisk_size();
 327	u64 area_size     = PAGE_ALIGN(ramdisk_size);
 328
 329	/* We need to move the initrd down into directly mapped mem */
 330	relocated_ramdisk = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped),
 331						   area_size, PAGE_SIZE);
 332
 333	if (!relocated_ramdisk)
 334		panic("Cannot find place for new RAMDISK of size %lld\n",
 335		      ramdisk_size);
 336
 337	/* Note: this includes all the mem currently occupied by
 338	   the initrd, we rely on that fact to keep the data intact. */
 339	memblock_reserve(relocated_ramdisk, area_size);
 340	initrd_start = relocated_ramdisk + PAGE_OFFSET;
 341	initrd_end   = initrd_start + ramdisk_size;
 342	printk(KERN_INFO "Allocated new RAMDISK: [mem %#010llx-%#010llx]\n",
 343	       relocated_ramdisk, relocated_ramdisk + ramdisk_size - 1);
 344
 345	copy_from_early_mem((void *)initrd_start, ramdisk_image, ramdisk_size);
 346
 347	printk(KERN_INFO "Move RAMDISK from [mem %#010llx-%#010llx] to"
 348		" [mem %#010llx-%#010llx]\n",
 349		ramdisk_image, ramdisk_image + ramdisk_size - 1,
 350		relocated_ramdisk, relocated_ramdisk + ramdisk_size - 1);
 351}
 352
 353static void __init early_reserve_initrd(void)
 354{
 355	/* Assume only end is not page aligned */
 356	u64 ramdisk_image = get_ramdisk_image();
 357	u64 ramdisk_size  = get_ramdisk_size();
 358	u64 ramdisk_end   = PAGE_ALIGN(ramdisk_image + ramdisk_size);
 359
 360	if (!boot_params.hdr.type_of_loader ||
 361	    !ramdisk_image || !ramdisk_size)
 362		return;		/* No initrd provided by bootloader */
 363
 364	memblock_reserve(ramdisk_image, ramdisk_end - ramdisk_image);
 365}
 366static void __init reserve_initrd(void)
 367{
 368	/* Assume only end is not page aligned */
 369	u64 ramdisk_image = get_ramdisk_image();
 370	u64 ramdisk_size  = get_ramdisk_size();
 371	u64 ramdisk_end   = PAGE_ALIGN(ramdisk_image + ramdisk_size);
 372	u64 mapped_size;
 373
 374	if (!boot_params.hdr.type_of_loader ||
 375	    !ramdisk_image || !ramdisk_size)
 376		return;		/* No initrd provided by bootloader */
 377
 378	initrd_start = 0;
 379
 380	mapped_size = memblock_mem_size(max_pfn_mapped);
 381	if (ramdisk_size >= (mapped_size>>1))
 382		panic("initrd too large to handle, "
 383		       "disabling initrd (%lld needed, %lld available)\n",
 384		       ramdisk_size, mapped_size>>1);
 385
 386	printk(KERN_INFO "RAMDISK: [mem %#010llx-%#010llx]\n", ramdisk_image,
 387			ramdisk_end - 1);
 388
 389	if (pfn_range_is_mapped(PFN_DOWN(ramdisk_image),
 390				PFN_DOWN(ramdisk_end))) {
 391		/* All are mapped, easy case */
 392		initrd_start = ramdisk_image + PAGE_OFFSET;
 393		initrd_end = initrd_start + ramdisk_size;
 394		return;
 395	}
 396
 397	relocate_initrd();
 398
 399	memblock_free(ramdisk_image, ramdisk_end - ramdisk_image);
 400}
 
 401#else
 402static void __init early_reserve_initrd(void)
 403{
 404}
 405static void __init reserve_initrd(void)
 406{
 407}
 408#endif /* CONFIG_BLK_DEV_INITRD */
 409
 410static void __init parse_setup_data(void)
 411{
 412	struct setup_data *data;
 413	u64 pa_data, pa_next;
 414
 415	pa_data = boot_params.hdr.setup_data;
 416	while (pa_data) {
 417		u32 data_len, data_type;
 418
 419		data = early_memremap(pa_data, sizeof(*data));
 420		data_len = data->len + sizeof(struct setup_data);
 421		data_type = data->type;
 422		pa_next = data->next;
 423		early_memunmap(data, sizeof(*data));
 424
 425		switch (data_type) {
 426		case SETUP_E820_EXT:
 427			parse_e820_ext(pa_data, data_len);
 428			break;
 429		case SETUP_DTB:
 430			add_dtb(pa_data);
 431			break;
 432		case SETUP_EFI:
 433			parse_efi_setup(pa_data, data_len);
 434			break;
 435		default:
 436			break;
 437		}
 438		pa_data = pa_next;
 439	}
 440}
 441
 442static void __init e820_reserve_setup_data(void)
 443{
 444	struct setup_data *data;
 445	u64 pa_data;
 446
 447	pa_data = boot_params.hdr.setup_data;
 448	if (!pa_data)
 449		return;
 450
 451	while (pa_data) {
 452		data = early_memremap(pa_data, sizeof(*data));
 453		e820_update_range(pa_data, sizeof(*data)+data->len,
 454			 E820_RAM, E820_RESERVED_KERN);
 455		pa_data = data->next;
 456		early_memunmap(data, sizeof(*data));
 457	}
 458
 459	sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
 460	memcpy(&e820_saved, &e820, sizeof(struct e820map));
 461	printk(KERN_INFO "extended physical RAM map:\n");
 462	e820_print_map("reserve setup_data");
 463}
 464
 465static void __init memblock_x86_reserve_range_setup_data(void)
 466{
 467	struct setup_data *data;
 468	u64 pa_data;
 469
 470	pa_data = boot_params.hdr.setup_data;
 471	while (pa_data) {
 472		data = early_memremap(pa_data, sizeof(*data));
 473		memblock_reserve(pa_data, sizeof(*data) + data->len);
 474		pa_data = data->next;
 475		early_memunmap(data, sizeof(*data));
 476	}
 477}
 478
 479/*
 480 * --------- Crashkernel reservation ------------------------------
 481 */
 482
 483#ifdef CONFIG_KEXEC_CORE
 484
 485/* 16M alignment for crash kernel regions */
 486#define CRASH_ALIGN		(16 << 20)
 487
 488/*
 489 * Keep the crash kernel below this limit.  On 32 bits earlier kernels
 490 * would limit the kernel to the low 512 MiB due to mapping restrictions.
 491 * On 64bit, old kexec-tools need to under 896MiB.
 
 
 
 
 
 
 
 
 492 */
 493#ifdef CONFIG_X86_32
 494# define CRASH_ADDR_LOW_MAX	(512 << 20)
 495# define CRASH_ADDR_HIGH_MAX	(512 << 20)
 496#else
 497# define CRASH_ADDR_LOW_MAX	(896UL << 20)
 498# define CRASH_ADDR_HIGH_MAX	MAXMEM
 499#endif
 500
 501static int __init reserve_crashkernel_low(void)
 502{
 503#ifdef CONFIG_X86_64
 504	unsigned long long base, low_base = 0, low_size = 0;
 505	unsigned long total_low_mem;
 506	int ret;
 507
 508	total_low_mem = memblock_mem_size(1UL << (32 - PAGE_SHIFT));
 509
 510	/* crashkernel=Y,low */
 511	ret = parse_crashkernel_low(boot_command_line, total_low_mem, &low_size, &base);
 512	if (ret) {
 513		/*
 514		 * two parts from lib/swiotlb.c:
 515		 * -swiotlb size: user-specified with swiotlb= or default.
 516		 *
 517		 * -swiotlb overflow buffer: now hardcoded to 32k. We round it
 518		 * to 8M for other buffers that may need to stay low too. Also
 519		 * make sure we allocate enough extra low memory so that we
 520		 * don't run out of DMA buffers for 32-bit devices.
 521		 */
 522		low_size = max(swiotlb_size_or_default() + (8UL << 20), 256UL << 20);
 523	} else {
 524		/* passed with crashkernel=0,low ? */
 525		if (!low_size)
 526			return 0;
 527	}
 528
 529	low_base = memblock_find_in_range(low_size, 1ULL << 32, low_size, CRASH_ALIGN);
 530	if (!low_base) {
 531		pr_err("Cannot reserve %ldMB crashkernel low memory, please try smaller size.\n",
 532		       (unsigned long)(low_size >> 20));
 533		return -ENOMEM;
 534	}
 535
 536	ret = memblock_reserve(low_base, low_size);
 537	if (ret) {
 538		pr_err("%s: Error reserving crashkernel low memblock.\n", __func__);
 539		return ret;
 540	}
 541
 542	pr_info("Reserving %ldMB of low memory at %ldMB for crashkernel (System low RAM: %ldMB)\n",
 543		(unsigned long)(low_size >> 20),
 544		(unsigned long)(low_base >> 20),
 545		(unsigned long)(total_low_mem >> 20));
 546
 547	crashk_low_res.start = low_base;
 548	crashk_low_res.end   = low_base + low_size - 1;
 549	insert_resource(&iomem_resource, &crashk_low_res);
 550#endif
 551	return 0;
 552}
 553
 554static void __init reserve_crashkernel(void)
 555{
 556	unsigned long long crash_size, crash_base, total_mem;
 557	bool high = false;
 558	int ret;
 559
 560	total_mem = memblock_phys_mem_size();
 561
 562	/* crashkernel=XM */
 563	ret = parse_crashkernel(boot_command_line, total_mem, &crash_size, &crash_base);
 564	if (ret != 0 || crash_size <= 0) {
 565		/* crashkernel=X,high */
 566		ret = parse_crashkernel_high(boot_command_line, total_mem,
 567					     &crash_size, &crash_base);
 568		if (ret != 0 || crash_size <= 0)
 569			return;
 570		high = true;
 571	}
 572
 
 
 
 
 
 573	/* 0 means: find the address automatically */
 574	if (crash_base <= 0) {
 575		/*
 576		 *  kexec want bzImage is below CRASH_KERNEL_ADDR_MAX
 
 
 
 
 
 577		 */
 578		crash_base = memblock_find_in_range(CRASH_ALIGN,
 579						    high ? CRASH_ADDR_HIGH_MAX
 580							 : CRASH_ADDR_LOW_MAX,
 581						    crash_size, CRASH_ALIGN);
 
 
 
 
 582		if (!crash_base) {
 583			pr_info("crashkernel reservation failed - No suitable area found.\n");
 584			return;
 585		}
 586
 587	} else {
 588		unsigned long long start;
 589
 590		start = memblock_find_in_range(crash_base,
 591					       crash_base + crash_size,
 592					       crash_size, 1 << 20);
 593		if (start != crash_base) {
 594			pr_info("crashkernel reservation failed - memory is in use.\n");
 595			return;
 596		}
 597	}
 598	ret = memblock_reserve(crash_base, crash_size);
 599	if (ret) {
 600		pr_err("%s: Error reserving crashkernel memblock.\n", __func__);
 601		return;
 602	}
 603
 604	if (crash_base >= (1ULL << 32) && reserve_crashkernel_low()) {
 605		memblock_free(crash_base, crash_size);
 606		return;
 607	}
 608
 609	pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n",
 610		(unsigned long)(crash_size >> 20),
 611		(unsigned long)(crash_base >> 20),
 612		(unsigned long)(total_mem >> 20));
 613
 614	crashk_res.start = crash_base;
 615	crashk_res.end   = crash_base + crash_size - 1;
 616	insert_resource(&iomem_resource, &crashk_res);
 617}
 618#else
 619static void __init reserve_crashkernel(void)
 620{
 621}
 622#endif
 623
 624static struct resource standard_io_resources[] = {
 625	{ .name = "dma1", .start = 0x00, .end = 0x1f,
 626		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
 627	{ .name = "pic1", .start = 0x20, .end = 0x21,
 628		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
 629	{ .name = "timer0", .start = 0x40, .end = 0x43,
 630		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
 631	{ .name = "timer1", .start = 0x50, .end = 0x53,
 632		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
 633	{ .name = "keyboard", .start = 0x60, .end = 0x60,
 634		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
 635	{ .name = "keyboard", .start = 0x64, .end = 0x64,
 636		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
 637	{ .name = "dma page reg", .start = 0x80, .end = 0x8f,
 638		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
 639	{ .name = "pic2", .start = 0xa0, .end = 0xa1,
 640		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
 641	{ .name = "dma2", .start = 0xc0, .end = 0xdf,
 642		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
 643	{ .name = "fpu", .start = 0xf0, .end = 0xff,
 644		.flags = IORESOURCE_BUSY | IORESOURCE_IO }
 645};
 646
 647void __init reserve_standard_io_resources(void)
 648{
 649	int i;
 650
 651	/* request I/O space for devices used on all i[345]86 PCs */
 652	for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++)
 653		request_resource(&ioport_resource, &standard_io_resources[i]);
 654
 655}
 656
 657static __init void reserve_ibft_region(void)
 658{
 659	unsigned long addr, size = 0;
 660
 661	addr = find_ibft_region(&size);
 662
 663	if (size)
 664		memblock_reserve(addr, size);
 665}
 666
 667static bool __init snb_gfx_workaround_needed(void)
 668{
 669#ifdef CONFIG_PCI
 670	int i;
 671	u16 vendor, devid;
 672	static const __initconst u16 snb_ids[] = {
 673		0x0102,
 674		0x0112,
 675		0x0122,
 676		0x0106,
 677		0x0116,
 678		0x0126,
 679		0x010a,
 680	};
 681
 682	/* Assume no if something weird is going on with PCI */
 683	if (!early_pci_allowed())
 684		return false;
 685
 686	vendor = read_pci_config_16(0, 2, 0, PCI_VENDOR_ID);
 687	if (vendor != 0x8086)
 688		return false;
 689
 690	devid = read_pci_config_16(0, 2, 0, PCI_DEVICE_ID);
 691	for (i = 0; i < ARRAY_SIZE(snb_ids); i++)
 692		if (devid == snb_ids[i])
 693			return true;
 694#endif
 695
 696	return false;
 697}
 698
 699/*
 700 * Sandy Bridge graphics has trouble with certain ranges, exclude
 701 * them from allocation.
 702 */
 703static void __init trim_snb_memory(void)
 704{
 705	static const __initconst unsigned long bad_pages[] = {
 706		0x20050000,
 707		0x20110000,
 708		0x20130000,
 709		0x20138000,
 710		0x40004000,
 711	};
 712	int i;
 713
 714	if (!snb_gfx_workaround_needed())
 715		return;
 716
 717	printk(KERN_DEBUG "reserving inaccessible SNB gfx pages\n");
 718
 719	/*
 720	 * Reserve all memory below the 1 MB mark that has not
 721	 * already been reserved.
 722	 */
 723	memblock_reserve(0, 1<<20);
 724	
 725	for (i = 0; i < ARRAY_SIZE(bad_pages); i++) {
 726		if (memblock_reserve(bad_pages[i], PAGE_SIZE))
 727			printk(KERN_WARNING "failed to reserve 0x%08lx\n",
 728			       bad_pages[i]);
 729	}
 730}
 731
 732/*
 733 * Here we put platform-specific memory range workarounds, i.e.
 734 * memory known to be corrupt or otherwise in need to be reserved on
 735 * specific platforms.
 736 *
 737 * If this gets used more widely it could use a real dispatch mechanism.
 738 */
 739static void __init trim_platform_memory_ranges(void)
 740{
 741	trim_snb_memory();
 742}
 743
 744static void __init trim_bios_range(void)
 745{
 746	/*
 747	 * A special case is the first 4Kb of memory;
 748	 * This is a BIOS owned area, not kernel ram, but generally
 749	 * not listed as such in the E820 table.
 750	 *
 751	 * This typically reserves additional memory (64KiB by default)
 752	 * since some BIOSes are known to corrupt low memory.  See the
 753	 * Kconfig help text for X86_RESERVE_LOW.
 754	 */
 755	e820_update_range(0, PAGE_SIZE, E820_RAM, E820_RESERVED);
 756
 757	/*
 758	 * special case: Some BIOSen report the PC BIOS
 759	 * area (640->1Mb) as ram even though it is not.
 760	 * take them out.
 761	 */
 762	e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
 763
 764	sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
 765}
 766
 767/* called before trim_bios_range() to spare extra sanitize */
 768static void __init e820_add_kernel_range(void)
 769{
 770	u64 start = __pa_symbol(_text);
 771	u64 size = __pa_symbol(_end) - start;
 772
 773	/*
 774	 * Complain if .text .data and .bss are not marked as E820_RAM and
 775	 * attempt to fix it by adding the range. We may have a confused BIOS,
 776	 * or the user may have used memmap=exactmap or memmap=xxM$yyM to
 777	 * exclude kernel range. If we really are running on top non-RAM,
 778	 * we will crash later anyways.
 779	 */
 780	if (e820_all_mapped(start, start + size, E820_RAM))
 781		return;
 782
 783	pr_warn(".text .data .bss are not marked as E820_RAM!\n");
 784	e820_remove_range(start, size, E820_RAM, 0);
 785	e820_add_region(start, size, E820_RAM);
 786}
 787
 788static unsigned reserve_low = CONFIG_X86_RESERVE_LOW << 10;
 789
 790static int __init parse_reservelow(char *p)
 791{
 792	unsigned long long size;
 793
 794	if (!p)
 795		return -EINVAL;
 796
 797	size = memparse(p, &p);
 798
 799	if (size < 4096)
 800		size = 4096;
 801
 802	if (size > 640*1024)
 803		size = 640*1024;
 804
 805	reserve_low = size;
 806
 807	return 0;
 808}
 809
 810early_param("reservelow", parse_reservelow);
 811
 812static void __init trim_low_memory_range(void)
 813{
 814	memblock_reserve(0, ALIGN(reserve_low, PAGE_SIZE));
 815}
 816	
 817/*
 818 * Dump out kernel offset information on panic.
 819 */
 820static int
 821dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p)
 822{
 823	if (kaslr_enabled()) {
 824		pr_emerg("Kernel Offset: 0x%lx from 0x%lx (relocation range: 0x%lx-0x%lx)\n",
 825			 kaslr_offset(),
 826			 __START_KERNEL,
 827			 __START_KERNEL_map,
 828			 MODULES_VADDR-1);
 829	} else {
 830		pr_emerg("Kernel Offset: disabled\n");
 831	}
 832
 833	return 0;
 834}
 835
 836/*
 837 * Determine if we were loaded by an EFI loader.  If so, then we have also been
 838 * passed the efi memmap, systab, etc., so we should use these data structures
 839 * for initialization.  Note, the efi init code path is determined by the
 840 * global efi_enabled. This allows the same kernel image to be used on existing
 841 * systems (with a traditional BIOS) as well as on EFI systems.
 842 */
 843/*
 844 * setup_arch - architecture-specific boot-time initializations
 845 *
 846 * Note: On x86_64, fixmaps are ready for use even before this is called.
 847 */
 848
 849void __init setup_arch(char **cmdline_p)
 850{
 
 
 
 
 
 
 851	memblock_reserve(__pa_symbol(_text),
 852			 (unsigned long)__bss_stop - (unsigned long)_text);
 
 
 
 
 
 
 853
 854	early_reserve_initrd();
 855
 856	/*
 857	 * At this point everything still needed from the boot loader
 858	 * or BIOS or kernel text should be early reserved or marked not
 859	 * RAM in e820. All other memory is free game.
 860	 */
 861
 862#ifdef CONFIG_X86_32
 863	memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data));
 864
 865	/*
 866	 * copy kernel address range established so far and switch
 867	 * to the proper swapper page table
 868	 */
 869	clone_pgd_range(swapper_pg_dir     + KERNEL_PGD_BOUNDARY,
 870			initial_page_table + KERNEL_PGD_BOUNDARY,
 871			KERNEL_PGD_PTRS);
 872
 873	load_cr3(swapper_pg_dir);
 874	/*
 875	 * Note: Quark X1000 CPUs advertise PGE incorrectly and require
 876	 * a cr3 based tlb flush, so the following __flush_tlb_all()
 877	 * will not flush anything because the cpu quirk which clears
 878	 * X86_FEATURE_PGE has not been invoked yet. Though due to the
 879	 * load_cr3() above the TLB has been flushed already. The
 880	 * quirk is invoked before subsequent calls to __flush_tlb_all()
 881	 * so proper operation is guaranteed.
 882	 */
 883	__flush_tlb_all();
 884#else
 885	printk(KERN_INFO "Command line: %s\n", boot_command_line);
 
 886#endif
 887
 888	/*
 889	 * If we have OLPC OFW, we might end up relocating the fixmap due to
 890	 * reserve_top(), so do this before touching the ioremap area.
 891	 */
 892	olpc_ofw_detect();
 893
 894	early_trap_init();
 895	early_cpu_init();
 
 
 896	early_ioremap_init();
 897
 898	setup_olpc_ofw_pgd();
 899
 900	ROOT_DEV = old_decode_dev(boot_params.hdr.root_dev);
 901	screen_info = boot_params.screen_info;
 902	edid_info = boot_params.edid_info;
 903#ifdef CONFIG_X86_32
 904	apm_info.bios = boot_params.apm_bios_info;
 905	ist_info = boot_params.ist_info;
 906#endif
 907	saved_video_mode = boot_params.hdr.vid_mode;
 908	bootloader_type = boot_params.hdr.type_of_loader;
 909	if ((bootloader_type >> 4) == 0xe) {
 910		bootloader_type &= 0xf;
 911		bootloader_type |= (boot_params.hdr.ext_loader_type+0x10) << 4;
 912	}
 913	bootloader_version  = bootloader_type & 0xf;
 914	bootloader_version |= boot_params.hdr.ext_loader_ver << 4;
 915
 916#ifdef CONFIG_BLK_DEV_RAM
 917	rd_image_start = boot_params.hdr.ram_size & RAMDISK_IMAGE_START_MASK;
 918	rd_prompt = ((boot_params.hdr.ram_size & RAMDISK_PROMPT_FLAG) != 0);
 919	rd_doload = ((boot_params.hdr.ram_size & RAMDISK_LOAD_FLAG) != 0);
 920#endif
 921#ifdef CONFIG_EFI
 922	if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature,
 923		     EFI32_LOADER_SIGNATURE, 4)) {
 924		set_bit(EFI_BOOT, &efi.flags);
 925	} else if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature,
 926		     EFI64_LOADER_SIGNATURE, 4)) {
 927		set_bit(EFI_BOOT, &efi.flags);
 928		set_bit(EFI_64BIT, &efi.flags);
 929	}
 930
 931	if (efi_enabled(EFI_BOOT))
 932		efi_memblock_x86_reserve_range();
 933#endif
 934
 935	x86_init.oem.arch_setup();
 936
 937	iomem_resource.end = (1ULL << boot_cpu_data.x86_phys_bits) - 1;
 938	setup_memory_map();
 939	parse_setup_data();
 940
 941	copy_edd();
 942
 943	if (!boot_params.hdr.root_flags)
 944		root_mountflags &= ~MS_RDONLY;
 945	init_mm.start_code = (unsigned long) _text;
 946	init_mm.end_code = (unsigned long) _etext;
 947	init_mm.end_data = (unsigned long) _edata;
 948	init_mm.brk = _brk_end;
 949
 950	mpx_mm_init(&init_mm);
 951
 952	code_resource.start = __pa_symbol(_text);
 953	code_resource.end = __pa_symbol(_etext)-1;
 954	data_resource.start = __pa_symbol(_etext);
 955	data_resource.end = __pa_symbol(_edata)-1;
 956	bss_resource.start = __pa_symbol(__bss_start);
 957	bss_resource.end = __pa_symbol(__bss_stop)-1;
 958
 959#ifdef CONFIG_CMDLINE_BOOL
 960#ifdef CONFIG_CMDLINE_OVERRIDE
 961	strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
 962#else
 963	if (builtin_cmdline[0]) {
 964		/* append boot loader cmdline to builtin */
 965		strlcat(builtin_cmdline, " ", COMMAND_LINE_SIZE);
 966		strlcat(builtin_cmdline, boot_command_line, COMMAND_LINE_SIZE);
 967		strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
 968	}
 969#endif
 970#endif
 971
 972	strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
 973	*cmdline_p = command_line;
 974
 975	/*
 976	 * x86_configure_nx() is called before parse_early_param() to detect
 977	 * whether hardware doesn't support NX (so that the early EHCI debug
 978	 * console setup can safely call set_fixmap()). It may then be called
 979	 * again from within noexec_setup() during parsing early parameters
 980	 * to honor the respective command line option.
 981	 */
 982	x86_configure_nx();
 983
 984	parse_early_param();
 985
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 986	x86_report_nx();
 987
 988	/* after early param, so could get panic from serial */
 989	memblock_x86_reserve_range_setup_data();
 990
 991	if (acpi_mps_check()) {
 992#ifdef CONFIG_X86_LOCAL_APIC
 993		disable_apic = 1;
 994#endif
 995		setup_clear_cpu_cap(X86_FEATURE_APIC);
 996	}
 997
 998#ifdef CONFIG_PCI
 999	if (pci_early_dump_regs)
1000		early_dump_pci_devices();
1001#endif
1002
1003	/* update the e820_saved too */
1004	e820_reserve_setup_data();
1005	finish_e820_parsing();
1006
1007	if (efi_enabled(EFI_BOOT))
1008		efi_init();
1009
1010	dmi_scan_machine();
1011	dmi_memdev_walk();
1012	dmi_set_dump_stack_arch_desc();
1013
1014	/*
1015	 * VMware detection requires dmi to be available, so this
1016	 * needs to be done after dmi_scan_machine, for the BP.
1017	 */
1018	init_hypervisor_platform();
1019
 
1020	x86_init.resources.probe_roms();
1021
1022	/* after parse_early_param, so could debug it */
1023	insert_resource(&iomem_resource, &code_resource);
1024	insert_resource(&iomem_resource, &data_resource);
1025	insert_resource(&iomem_resource, &bss_resource);
1026
1027	e820_add_kernel_range();
1028	trim_bios_range();
1029#ifdef CONFIG_X86_32
1030	if (ppro_with_ram_bug()) {
1031		e820_update_range(0x70000000ULL, 0x40000ULL, E820_RAM,
1032				  E820_RESERVED);
1033		sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
1034		printk(KERN_INFO "fixed physical RAM map:\n");
1035		e820_print_map("bad_ppro");
1036	}
1037#else
1038	early_gart_iommu_check();
1039#endif
1040
1041	/*
1042	 * partially used pages are not usable - thus
1043	 * we are rounding upwards:
1044	 */
1045	max_pfn = e820_end_of_ram_pfn();
1046
1047	/* update e820 for memory not covered by WB MTRRs */
1048	mtrr_bp_init();
1049	if (mtrr_trim_uncached_memory(max_pfn))
1050		max_pfn = e820_end_of_ram_pfn();
1051
1052	max_possible_pfn = max_pfn;
1053
 
 
 
 
 
 
 
 
 
 
 
 
 
1054#ifdef CONFIG_X86_32
1055	/* max_low_pfn get updated here */
1056	find_low_pfn_range();
1057#else
1058	check_x2apic();
1059
1060	/* How many end-of-memory variables you have, grandma! */
1061	/* need this before calling reserve_initrd */
1062	if (max_pfn > (1UL<<(32 - PAGE_SHIFT)))
1063		max_low_pfn = e820_end_of_low_ram_pfn();
1064	else
1065		max_low_pfn = max_pfn;
1066
1067	high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1;
1068#endif
1069
1070	/*
1071	 * Find and reserve possible boot-time SMP configuration:
1072	 */
1073	find_smp_config();
1074
1075	reserve_ibft_region();
1076
1077	early_alloc_pgt_buf();
1078
1079	/*
1080	 * Need to conclude brk, before memblock_x86_fill()
1081	 *  it could use memblock_find_in_range, could overlap with
1082	 *  brk area.
1083	 */
1084	reserve_brk();
1085
1086	cleanup_highmap();
1087
1088	memblock_set_current_limit(ISA_END_ADDRESS);
1089	memblock_x86_fill();
1090
1091	if (efi_enabled(EFI_BOOT)) {
 
 
1092		efi_fake_memmap();
1093		efi_find_mirror();
1094	}
1095
1096	/*
1097	 * The EFI specification says that boot service code won't be called
1098	 * after ExitBootServices(). This is, in fact, a lie.
1099	 */
1100	if (efi_enabled(EFI_MEMMAP))
1101		efi_reserve_boot_services();
 
1102
1103	/* preallocate 4k for mptable mpc */
1104	early_reserve_e820_mpc_new();
1105
1106#ifdef CONFIG_X86_CHECK_BIOS_CORRUPTION
1107	setup_bios_corruption_check();
1108#endif
1109
1110#ifdef CONFIG_X86_32
1111	printk(KERN_DEBUG "initial memory mapped: [mem 0x00000000-%#010lx]\n",
1112			(max_pfn_mapped<<PAGE_SHIFT) - 1);
1113#endif
1114
1115	reserve_real_mode();
1116
1117	trim_platform_memory_ranges();
1118	trim_low_memory_range();
1119
1120	init_mem_mapping();
1121
1122	early_trap_pf_init();
1123
1124	setup_real_mode();
 
 
 
 
 
 
 
 
 
1125
1126	memblock_set_current_limit(get_max_mapped());
1127
1128	/*
1129	 * NOTE: On x86-32, only from this point on, fixmaps are ready for use.
1130	 */
1131
1132#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
1133	if (init_ohci1394_dma_early)
1134		init_ohci1394_dma_on_all_controllers();
1135#endif
1136	/* Allocate bigger log buffer */
1137	setup_log_buf(1);
1138
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1139	reserve_initrd();
1140
1141#if defined(CONFIG_ACPI) && defined(CONFIG_BLK_DEV_INITRD)
1142	acpi_initrd_override((void *)initrd_start, initrd_end - initrd_start);
1143#endif
1144
1145	vsmp_init();
1146
1147	io_delay_init();
1148
 
 
1149	/*
1150	 * Parse the ACPI tables for possible boot-time SMP configuration.
1151	 */
1152	acpi_boot_table_init();
1153
1154	early_acpi_boot_init();
1155
1156	initmem_init();
1157	dma_contiguous_reserve(max_pfn_mapped << PAGE_SHIFT);
1158
1159	/*
1160	 * Reserve memory for crash kernel after SRAT is parsed so that it
1161	 * won't consume hotpluggable memory.
1162	 */
1163	reserve_crashkernel();
1164
1165	memblock_find_dma_reserve();
1166
1167#ifdef CONFIG_KVM_GUEST
1168	kvmclock_init();
1169#endif
1170
1171	x86_init.paging.pagetable_init();
1172
1173	kasan_init();
1174
1175	if (boot_cpu_data.cpuid_level >= 0) {
1176		/* A CPU has %cr4 if and only if it has CPUID */
1177		mmu_cr4_features = __read_cr4();
1178		if (trampoline_cr4_features)
1179			*trampoline_cr4_features = mmu_cr4_features;
1180	}
1181
1182#ifdef CONFIG_X86_32
1183	/* sync back kernel address range */
1184	clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY,
1185			swapper_pg_dir     + KERNEL_PGD_BOUNDARY,
1186			KERNEL_PGD_PTRS);
1187
1188	/*
1189	 * sync back low identity map too.  It is used for example
1190	 * in the 32-bit EFI stub.
 
 
1191	 */
1192	clone_pgd_range(initial_page_table,
1193			swapper_pg_dir     + KERNEL_PGD_BOUNDARY,
1194			min(KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
1195#endif
1196
1197	tboot_probe();
1198
1199	map_vsyscall();
1200
1201	generic_apic_probe();
1202
1203	early_quirks();
1204
1205	/*
1206	 * Read APIC and some other early information from ACPI tables.
1207	 */
1208	acpi_boot_init();
1209	sfi_init();
1210	x86_dtb_init();
1211
1212	/*
1213	 * get boot-time SMP configuration:
1214	 */
1215	if (smp_found_config)
1216		get_smp_config();
 
 
 
 
 
1217
1218	prefill_possible_map();
1219
1220	init_cpu_to_node();
1221
1222	init_apic_mappings();
1223	io_apic_init_mappings();
1224
1225	kvm_guest_init();
1226
1227	e820_reserve_resources();
1228	e820_mark_nosave_regions(max_low_pfn);
1229
1230	x86_init.resources.reserve_resources();
1231
1232	e820_setup_gap();
1233
1234#ifdef CONFIG_VT
1235#if defined(CONFIG_VGA_CONSOLE)
1236	if (!efi_enabled(EFI_BOOT) || (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY))
1237		conswitchp = &vga_con;
1238#elif defined(CONFIG_DUMMY_CONSOLE)
1239	conswitchp = &dummy_con;
1240#endif
1241#endif
1242	x86_init.oem.banner();
1243
1244	x86_init.timers.wallclock_init();
1245
1246	mcheck_init();
1247
1248	arch_init_ideal_nops();
1249
1250	register_refined_jiffies(CLOCK_TICK_RATE);
1251
1252#ifdef CONFIG_EFI
1253	if (efi_enabled(EFI_BOOT))
1254		efi_apply_memmap_quirks();
1255#endif
 
 
1256}
1257
1258#ifdef CONFIG_X86_32
1259
1260static struct resource video_ram_resource = {
1261	.name	= "Video RAM area",
1262	.start	= 0xa0000,
1263	.end	= 0xbffff,
1264	.flags	= IORESOURCE_BUSY | IORESOURCE_MEM
1265};
1266
1267void __init i386_reserve_resources(void)
1268{
1269	request_resource(&iomem_resource, &video_ram_resource);
1270	reserve_standard_io_resources();
1271}
1272
1273#endif /* CONFIG_X86_32 */
1274
1275static struct notifier_block kernel_offset_notifier = {
1276	.notifier_call = dump_kernel_offset
1277};
1278
1279static int __init register_kernel_offset_dumper(void)
1280{
1281	atomic_notifier_chain_register(&panic_notifier_list,
1282					&kernel_offset_notifier);
1283	return 0;
1284}
1285__initcall(register_kernel_offset_dumper);
1286
1287void arch_show_smap(struct seq_file *m, struct vm_area_struct *vma)
1288{
1289	if (!boot_cpu_has(X86_FEATURE_OSPKE))
1290		return;
1291
1292	seq_printf(m, "ProtectionKey:  %8u\n", vma_pkey(vma));
1293}