Linux Audio

Check our new training course

Loading...
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  Copyright (C) 1995  Linus Torvalds
   4 *
   5 * This file contains the setup_arch() code, which handles the architecture-dependent
   6 * parts of early kernel initialization.
   7 */
   8#include <linux/acpi.h>
   9#include <linux/console.h>
  10#include <linux/crash_dump.h>
  11#include <linux/dma-map-ops.h>
  12#include <linux/efi.h>
  13#include <linux/ima.h>
  14#include <linux/init_ohci1394_dma.h>
  15#include <linux/initrd.h>
  16#include <linux/iscsi_ibft.h>
  17#include <linux/memblock.h>
  18#include <linux/panic_notifier.h>
  19#include <linux/pci.h>
  20#include <linux/root_dev.h>
 
  21#include <linux/hugetlb.h>
  22#include <linux/tboot.h>
  23#include <linux/usb/xhci-dbgp.h>
  24#include <linux/static_call.h>
  25#include <linux/swiotlb.h>
  26#include <linux/random.h>
  27
  28#include <uapi/linux/mount.h>
  29
  30#include <xen/xen.h>
  31
  32#include <asm/apic.h>
  33#include <asm/efi.h>
  34#include <asm/numa.h>
  35#include <asm/bios_ebda.h>
  36#include <asm/bugs.h>
  37#include <asm/cacheinfo.h>
  38#include <asm/coco.h>
  39#include <asm/cpu.h>
  40#include <asm/efi.h>
  41#include <asm/gart.h>
  42#include <asm/hypervisor.h>
  43#include <asm/io_apic.h>
  44#include <asm/kasan.h>
  45#include <asm/kaslr.h>
  46#include <asm/mce.h>
  47#include <asm/memtype.h>
  48#include <asm/mtrr.h>
  49#include <asm/realmode.h>
  50#include <asm/olpc_ofw.h>
  51#include <asm/pci-direct.h>
  52#include <asm/prom.h>
  53#include <asm/proto.h>
  54#include <asm/thermal.h>
  55#include <asm/unwind.h>
  56#include <asm/vsyscall.h>
  57#include <linux/vmalloc.h>
  58
  59/*
  60 * max_low_pfn_mapped: highest directly mapped pfn < 4 GB
  61 * max_pfn_mapped:     highest directly mapped pfn > 4 GB
  62 *
  63 * The direct mapping only covers E820_TYPE_RAM regions, so the ranges and gaps are
  64 * represented by pfn_mapped[].
  65 */
  66unsigned long max_low_pfn_mapped;
  67unsigned long max_pfn_mapped;
  68
  69#ifdef CONFIG_DMI
  70RESERVE_BRK(dmi_alloc, 65536);
  71#endif
  72
  73
 
 
 
 
 
  74unsigned long _brk_start = (unsigned long)__brk_base;
  75unsigned long _brk_end   = (unsigned long)__brk_base;
  76
  77struct boot_params boot_params;
  78
  79/*
  80 * These are the four main kernel memory regions, we put them into
  81 * the resource tree so that kdump tools and other debugging tools
  82 * recover it:
  83 */
  84
  85static struct resource rodata_resource = {
  86	.name	= "Kernel rodata",
  87	.start	= 0,
  88	.end	= 0,
  89	.flags	= IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
  90};
  91
  92static struct resource data_resource = {
  93	.name	= "Kernel data",
  94	.start	= 0,
  95	.end	= 0,
  96	.flags	= IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
  97};
  98
  99static struct resource code_resource = {
 100	.name	= "Kernel code",
 101	.start	= 0,
 102	.end	= 0,
 103	.flags	= IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
 104};
 105
 106static struct resource bss_resource = {
 107	.name	= "Kernel bss",
 108	.start	= 0,
 109	.end	= 0,
 110	.flags	= IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
 111};
 112
 113
 114#ifdef CONFIG_X86_32
 115/* CPU data as detected by the assembly code in head_32.S */
 116struct cpuinfo_x86 new_cpu_data;
 117
 
 
 
 
 
 
 
 
 
 
 
 118struct apm_info apm_info;
 119EXPORT_SYMBOL(apm_info);
 120
 121#if defined(CONFIG_X86_SPEEDSTEP_SMI) || \
 122	defined(CONFIG_X86_SPEEDSTEP_SMI_MODULE)
 123struct ist_info ist_info;
 124EXPORT_SYMBOL(ist_info);
 125#else
 126struct ist_info ist_info;
 127#endif
 128
 129#endif
 130
 131struct cpuinfo_x86 boot_cpu_data __read_mostly;
 132EXPORT_SYMBOL(boot_cpu_data);
 
 
 133
 134#if !defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
 135__visible unsigned long mmu_cr4_features __ro_after_init;
 136#else
 137__visible unsigned long mmu_cr4_features __ro_after_init = X86_CR4_PAE;
 138#endif
 139
 140#ifdef CONFIG_IMA
 141static phys_addr_t ima_kexec_buffer_phys;
 142static size_t ima_kexec_buffer_size;
 143#endif
 144
 145/* Boot loader ID and version as integers, for the benefit of proc_dointvec */
 146int bootloader_type, bootloader_version;
 147
 148/*
 149 * Setup options
 150 */
 151struct screen_info screen_info;
 152EXPORT_SYMBOL(screen_info);
 153struct edid_info edid_info;
 154EXPORT_SYMBOL_GPL(edid_info);
 155
 156extern int root_mountflags;
 157
 158unsigned long saved_video_mode;
 159
 160#define RAMDISK_IMAGE_START_MASK	0x07FF
 161#define RAMDISK_PROMPT_FLAG		0x8000
 162#define RAMDISK_LOAD_FLAG		0x4000
 163
 164static char __initdata command_line[COMMAND_LINE_SIZE];
 165#ifdef CONFIG_CMDLINE_BOOL
 166static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE;
 167#endif
 168
 169#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
 170struct edd edd;
 171#ifdef CONFIG_EDD_MODULE
 172EXPORT_SYMBOL(edd);
 173#endif
 174/**
 175 * copy_edd() - Copy the BIOS EDD information
 176 *              from boot_params into a safe place.
 177 *
 178 */
 179static inline void __init copy_edd(void)
 180{
 181     memcpy(edd.mbr_signature, boot_params.edd_mbr_sig_buffer,
 182	    sizeof(edd.mbr_signature));
 183     memcpy(edd.edd_info, boot_params.eddbuf, sizeof(edd.edd_info));
 184     edd.mbr_signature_nr = boot_params.edd_mbr_sig_buf_entries;
 185     edd.edd_info_nr = boot_params.eddbuf_entries;
 186}
 187#else
 188static inline void __init copy_edd(void)
 189{
 190}
 191#endif
 192
 193void * __init extend_brk(size_t size, size_t align)
 194{
 195	size_t mask = align - 1;
 196	void *ret;
 197
 198	BUG_ON(_brk_start == 0);
 199	BUG_ON(align & mask);
 200
 201	_brk_end = (_brk_end + mask) & ~mask;
 202	BUG_ON((char *)(_brk_end + size) > __brk_limit);
 203
 204	ret = (void *)_brk_end;
 205	_brk_end += size;
 206
 207	memset(ret, 0, size);
 208
 209	return ret;
 210}
 211
 212#ifdef CONFIG_X86_32
 213static void __init cleanup_highmap(void)
 214{
 215}
 216#endif
 217
 218static void __init reserve_brk(void)
 219{
 220	if (_brk_end > _brk_start)
 221		memblock_reserve(__pa_symbol(_brk_start),
 222				 _brk_end - _brk_start);
 223
 224	/* Mark brk area as locked down and no longer taking any
 225	   new allocations */
 226	_brk_start = 0;
 227}
 228
 
 
 229#ifdef CONFIG_BLK_DEV_INITRD
 230
 231static u64 __init get_ramdisk_image(void)
 232{
 233	u64 ramdisk_image = boot_params.hdr.ramdisk_image;
 234
 235	ramdisk_image |= (u64)boot_params.ext_ramdisk_image << 32;
 236
 237	if (ramdisk_image == 0)
 238		ramdisk_image = phys_initrd_start;
 239
 240	return ramdisk_image;
 241}
 242static u64 __init get_ramdisk_size(void)
 243{
 244	u64 ramdisk_size = boot_params.hdr.ramdisk_size;
 245
 246	ramdisk_size |= (u64)boot_params.ext_ramdisk_size << 32;
 247
 248	if (ramdisk_size == 0)
 249		ramdisk_size = phys_initrd_size;
 250
 251	return ramdisk_size;
 252}
 253
 254static void __init relocate_initrd(void)
 255{
 256	/* Assume only end is not page aligned */
 257	u64 ramdisk_image = get_ramdisk_image();
 258	u64 ramdisk_size  = get_ramdisk_size();
 259	u64 area_size     = PAGE_ALIGN(ramdisk_size);
 260
 261	/* We need to move the initrd down into directly mapped mem */
 262	u64 relocated_ramdisk = memblock_phys_alloc_range(area_size, PAGE_SIZE, 0,
 263						      PFN_PHYS(max_pfn_mapped));
 
 264	if (!relocated_ramdisk)
 265		panic("Cannot find place for new RAMDISK of size %lld\n",
 266		      ramdisk_size);
 267
 
 
 
 268	initrd_start = relocated_ramdisk + PAGE_OFFSET;
 269	initrd_end   = initrd_start + ramdisk_size;
 270	printk(KERN_INFO "Allocated new RAMDISK: [mem %#010llx-%#010llx]\n",
 271	       relocated_ramdisk, relocated_ramdisk + ramdisk_size - 1);
 272
 273	copy_from_early_mem((void *)initrd_start, ramdisk_image, ramdisk_size);
 274
 275	printk(KERN_INFO "Move RAMDISK from [mem %#010llx-%#010llx] to"
 276		" [mem %#010llx-%#010llx]\n",
 277		ramdisk_image, ramdisk_image + ramdisk_size - 1,
 278		relocated_ramdisk, relocated_ramdisk + ramdisk_size - 1);
 279}
 280
 281static void __init early_reserve_initrd(void)
 282{
 283	/* Assume only end is not page aligned */
 284	u64 ramdisk_image = get_ramdisk_image();
 285	u64 ramdisk_size  = get_ramdisk_size();
 286	u64 ramdisk_end   = PAGE_ALIGN(ramdisk_image + ramdisk_size);
 287
 288	if (!boot_params.hdr.type_of_loader ||
 289	    !ramdisk_image || !ramdisk_size)
 290		return;		/* No initrd provided by bootloader */
 291
 292	memblock_reserve(ramdisk_image, ramdisk_end - ramdisk_image);
 293}
 294
 295static void __init reserve_initrd(void)
 296{
 297	/* Assume only end is not page aligned */
 298	u64 ramdisk_image = get_ramdisk_image();
 299	u64 ramdisk_size  = get_ramdisk_size();
 300	u64 ramdisk_end   = PAGE_ALIGN(ramdisk_image + ramdisk_size);
 
 301
 302	if (!boot_params.hdr.type_of_loader ||
 303	    !ramdisk_image || !ramdisk_size)
 304		return;		/* No initrd provided by bootloader */
 305
 306	initrd_start = 0;
 307
 
 
 
 
 
 
 308	printk(KERN_INFO "RAMDISK: [mem %#010llx-%#010llx]\n", ramdisk_image,
 309			ramdisk_end - 1);
 310
 311	if (pfn_range_is_mapped(PFN_DOWN(ramdisk_image),
 312				PFN_DOWN(ramdisk_end))) {
 313		/* All are mapped, easy case */
 314		initrd_start = ramdisk_image + PAGE_OFFSET;
 315		initrd_end = initrd_start + ramdisk_size;
 316		return;
 317	}
 318
 319	relocate_initrd();
 320
 321	memblock_phys_free(ramdisk_image, ramdisk_end - ramdisk_image);
 322}
 323
 324#else
 325static void __init early_reserve_initrd(void)
 326{
 327}
 328static void __init reserve_initrd(void)
 329{
 330}
 331#endif /* CONFIG_BLK_DEV_INITRD */
 332
 333static void __init add_early_ima_buffer(u64 phys_addr)
 334{
 335#ifdef CONFIG_IMA
 336	struct ima_setup_data *data;
 337
 338	data = early_memremap(phys_addr + sizeof(struct setup_data), sizeof(*data));
 339	if (!data) {
 340		pr_warn("setup: failed to memremap ima_setup_data entry\n");
 341		return;
 342	}
 343
 344	if (data->size) {
 345		memblock_reserve(data->addr, data->size);
 346		ima_kexec_buffer_phys = data->addr;
 347		ima_kexec_buffer_size = data->size;
 348	}
 349
 350	early_memunmap(data, sizeof(*data));
 351#else
 352	pr_warn("Passed IMA kexec data, but CONFIG_IMA not set. Ignoring.\n");
 353#endif
 354}
 355
 356#if defined(CONFIG_HAVE_IMA_KEXEC) && !defined(CONFIG_OF_FLATTREE)
 357int __init ima_free_kexec_buffer(void)
 358{
 359	if (!ima_kexec_buffer_size)
 360		return -ENOENT;
 361
 362	memblock_free_late(ima_kexec_buffer_phys,
 363			   ima_kexec_buffer_size);
 364
 365	ima_kexec_buffer_phys = 0;
 366	ima_kexec_buffer_size = 0;
 367
 368	return 0;
 369}
 370
 371int __init ima_get_kexec_buffer(void **addr, size_t *size)
 372{
 373	if (!ima_kexec_buffer_size)
 374		return -ENOENT;
 375
 376	*addr = __va(ima_kexec_buffer_phys);
 377	*size = ima_kexec_buffer_size;
 378
 379	return 0;
 380}
 381#endif
 382
 383static void __init parse_setup_data(void)
 384{
 385	struct setup_data *data;
 386	u64 pa_data, pa_next;
 387
 388	pa_data = boot_params.hdr.setup_data;
 389	while (pa_data) {
 390		u32 data_len, data_type;
 391
 392		data = early_memremap(pa_data, sizeof(*data));
 393		data_len = data->len + sizeof(struct setup_data);
 394		data_type = data->type;
 395		pa_next = data->next;
 396		early_memunmap(data, sizeof(*data));
 397
 398		switch (data_type) {
 399		case SETUP_E820_EXT:
 400			e820__memory_setup_extended(pa_data, data_len);
 401			break;
 402		case SETUP_DTB:
 403			add_dtb(pa_data);
 404			break;
 405		case SETUP_EFI:
 406			parse_efi_setup(pa_data, data_len);
 407			break;
 408		case SETUP_IMA:
 409			add_early_ima_buffer(pa_data);
 410			break;
 411		case SETUP_RNG_SEED:
 412			data = early_memremap(pa_data, data_len);
 413			add_bootloader_randomness(data->data, data->len);
 414			/* Zero seed for forward secrecy. */
 415			memzero_explicit(data->data, data->len);
 416			/* Zero length in case we find ourselves back here by accident. */
 417			memzero_explicit(&data->len, sizeof(data->len));
 418			early_memunmap(data, data_len);
 419			break;
 420		default:
 421			break;
 422		}
 423		pa_data = pa_next;
 424	}
 425}
 426
 427static void __init memblock_x86_reserve_range_setup_data(void)
 428{
 429	struct setup_indirect *indirect;
 430	struct setup_data *data;
 431	u64 pa_data, pa_next;
 432	u32 len;
 433
 434	pa_data = boot_params.hdr.setup_data;
 435	while (pa_data) {
 436		data = early_memremap(pa_data, sizeof(*data));
 437		if (!data) {
 438			pr_warn("setup: failed to memremap setup_data entry\n");
 439			return;
 440		}
 441
 442		len = sizeof(*data);
 443		pa_next = data->next;
 
 
 444
 445		memblock_reserve(pa_data, sizeof(*data) + data->len);
 
 
 
 446
 447		if (data->type == SETUP_INDIRECT) {
 448			len += data->len;
 449			early_memunmap(data, sizeof(*data));
 450			data = early_memremap(pa_data, len);
 451			if (!data) {
 452				pr_warn("setup: failed to memremap indirect setup_data\n");
 453				return;
 454			}
 455
 456			indirect = (struct setup_indirect *)data->data;
 457
 458			if (indirect->type != SETUP_INDIRECT)
 459				memblock_reserve(indirect->addr, indirect->len);
 460		}
 461
 462		pa_data = pa_next;
 463		early_memunmap(data, len);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 464	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 465}
 466
 467static void __init arch_reserve_crashkernel(void)
 468{
 469	unsigned long long crash_base, crash_size, low_size = 0;
 470	char *cmdline = boot_command_line;
 471	bool high = false;
 472	int ret;
 473
 474	if (!IS_ENABLED(CONFIG_CRASH_RESERVE))
 475		return;
 476
 477	ret = parse_crashkernel(cmdline, memblock_phys_mem_size(),
 478				&crash_size, &crash_base,
 479				&low_size, &high);
 480	if (ret)
 481		return;
 
 
 
 
 
 482
 483	if (xen_pv_domain()) {
 484		pr_info("Ignoring crashkernel for a Xen PV domain\n");
 485		return;
 486	}
 487
 488	reserve_crashkernel_generic(cmdline, crash_size, crash_base,
 489				    low_size, high);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 490}
 
 491
 492static struct resource standard_io_resources[] = {
 493	{ .name = "dma1", .start = 0x00, .end = 0x1f,
 494		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
 495	{ .name = "pic1", .start = 0x20, .end = 0x21,
 496		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
 497	{ .name = "timer0", .start = 0x40, .end = 0x43,
 498		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
 499	{ .name = "timer1", .start = 0x50, .end = 0x53,
 500		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
 501	{ .name = "keyboard", .start = 0x60, .end = 0x60,
 502		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
 503	{ .name = "keyboard", .start = 0x64, .end = 0x64,
 504		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
 505	{ .name = "dma page reg", .start = 0x80, .end = 0x8f,
 506		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
 507	{ .name = "pic2", .start = 0xa0, .end = 0xa1,
 508		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
 509	{ .name = "dma2", .start = 0xc0, .end = 0xdf,
 510		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
 511	{ .name = "fpu", .start = 0xf0, .end = 0xff,
 512		.flags = IORESOURCE_BUSY | IORESOURCE_IO }
 513};
 514
 515void __init reserve_standard_io_resources(void)
 516{
 517	int i;
 518
 519	/* request I/O space for devices used on all i[345]86 PCs */
 520	for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++)
 521		request_resource(&ioport_resource, &standard_io_resources[i]);
 522
 523}
 524
 
 
 
 
 
 
 
 
 
 
 525static bool __init snb_gfx_workaround_needed(void)
 526{
 527#ifdef CONFIG_PCI
 528	int i;
 529	u16 vendor, devid;
 530	static const __initconst u16 snb_ids[] = {
 531		0x0102,
 532		0x0112,
 533		0x0122,
 534		0x0106,
 535		0x0116,
 536		0x0126,
 537		0x010a,
 538	};
 539
 540	/* Assume no if something weird is going on with PCI */
 541	if (!early_pci_allowed())
 542		return false;
 543
 544	vendor = read_pci_config_16(0, 2, 0, PCI_VENDOR_ID);
 545	if (vendor != 0x8086)
 546		return false;
 547
 548	devid = read_pci_config_16(0, 2, 0, PCI_DEVICE_ID);
 549	for (i = 0; i < ARRAY_SIZE(snb_ids); i++)
 550		if (devid == snb_ids[i])
 551			return true;
 552#endif
 553
 554	return false;
 555}
 556
 557/*
 558 * Sandy Bridge graphics has trouble with certain ranges, exclude
 559 * them from allocation.
 560 */
 561static void __init trim_snb_memory(void)
 562{
 563	static const __initconst unsigned long bad_pages[] = {
 564		0x20050000,
 565		0x20110000,
 566		0x20130000,
 567		0x20138000,
 568		0x40004000,
 569	};
 570	int i;
 571
 572	if (!snb_gfx_workaround_needed())
 573		return;
 574
 575	printk(KERN_DEBUG "reserving inaccessible SNB gfx pages\n");
 576
 577	/*
 578	 * SandyBridge integrated graphics devices have a bug that prevents
 579	 * them from accessing certain memory ranges, namely anything below
 580	 * 1M and in the pages listed in bad_pages[] above.
 581	 *
 582	 * To avoid these pages being ever accessed by SNB gfx devices reserve
 583	 * bad_pages that have not already been reserved at boot time.
 584	 * All memory below the 1 MB mark is anyway reserved later during
 585	 * setup_arch(), so there is no need to reserve it here.
 586	 */
 587
 
 588	for (i = 0; i < ARRAY_SIZE(bad_pages); i++) {
 589		if (memblock_reserve(bad_pages[i], PAGE_SIZE))
 590			printk(KERN_WARNING "failed to reserve 0x%08lx\n",
 591			       bad_pages[i]);
 592	}
 593}
 594
 
 
 
 
 
 
 
 
 
 
 
 
 595static void __init trim_bios_range(void)
 596{
 597	/*
 598	 * A special case is the first 4Kb of memory;
 599	 * This is a BIOS owned area, not kernel ram, but generally
 600	 * not listed as such in the E820 table.
 601	 *
 602	 * This typically reserves additional memory (64KiB by default)
 603	 * since some BIOSes are known to corrupt low memory.  See the
 604	 * Kconfig help text for X86_RESERVE_LOW.
 605	 */
 606	e820__range_update(0, PAGE_SIZE, E820_TYPE_RAM, E820_TYPE_RESERVED);
 607
 608	/*
 609	 * special case: Some BIOSes report the PC BIOS
 610	 * area (640Kb -> 1Mb) as RAM even though it is not.
 611	 * take them out.
 612	 */
 613	e820__range_remove(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_TYPE_RAM, 1);
 614
 615	e820__update_table(e820_table);
 616}
 617
 618/* called before trim_bios_range() to spare extra sanitize */
 619static void __init e820_add_kernel_range(void)
 620{
 621	u64 start = __pa_symbol(_text);
 622	u64 size = __pa_symbol(_end) - start;
 623
 624	/*
 625	 * Complain if .text .data and .bss are not marked as E820_TYPE_RAM and
 626	 * attempt to fix it by adding the range. We may have a confused BIOS,
 627	 * or the user may have used memmap=exactmap or memmap=xxM$yyM to
 628	 * exclude kernel range. If we really are running on top non-RAM,
 629	 * we will crash later anyways.
 630	 */
 631	if (e820__mapped_all(start, start + size, E820_TYPE_RAM))
 632		return;
 633
 634	pr_warn(".text .data .bss are not marked as E820_TYPE_RAM!\n");
 635	e820__range_remove(start, size, E820_TYPE_RAM, 0);
 636	e820__range_add(start, size, E820_TYPE_RAM);
 637}
 638
 639static void __init early_reserve_memory(void)
 
 
 640{
 641	/*
 642	 * Reserve the memory occupied by the kernel between _text and
 643	 * __end_of_kernel_reserve symbols. Any kernel sections after the
 644	 * __end_of_kernel_reserve symbol must be explicitly reserved with a
 645	 * separate memblock_reserve() or they will be discarded.
 646	 */
 647	memblock_reserve(__pa_symbol(_text),
 648			 (unsigned long)__end_of_kernel_reserve - (unsigned long)_text);
 649
 650	/*
 651	 * The first 4Kb of memory is a BIOS owned area, but generally it is
 652	 * not listed as such in the E820 table.
 653	 *
 654	 * Reserve the first 64K of memory since some BIOSes are known to
 655	 * corrupt low memory. After the real mode trampoline is allocated the
 656	 * rest of the memory below 640k is reserved.
 657	 *
 658	 * In addition, make sure page 0 is always reserved because on
 659	 * systems with L1TF its contents can be leaked to user processes.
 660	 */
 661	memblock_reserve(0, SZ_64K);
 662
 663	early_reserve_initrd();
 664
 665	memblock_x86_reserve_range_setup_data();
 
 666
 667	reserve_bios_regions();
 668	trim_snb_memory();
 
 
 
 
 669}
 670
 
 
 
 
 
 
 
 671/*
 672 * Dump out kernel offset information on panic.
 673 */
 674static int
 675dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p)
 676{
 677	if (kaslr_enabled()) {
 678		pr_emerg("Kernel Offset: 0x%lx from 0x%lx (relocation range: 0x%lx-0x%lx)\n",
 679			 kaslr_offset(),
 680			 __START_KERNEL,
 681			 __START_KERNEL_map,
 682			 MODULES_VADDR-1);
 683	} else {
 684		pr_emerg("Kernel Offset: disabled\n");
 685	}
 686
 687	return 0;
 688}
 689
 690void x86_configure_nx(void)
 691{
 692	if (boot_cpu_has(X86_FEATURE_NX))
 693		__supported_pte_mask |= _PAGE_NX;
 694	else
 695		__supported_pte_mask &= ~_PAGE_NX;
 696}
 697
 698static void __init x86_report_nx(void)
 699{
 700	if (!boot_cpu_has(X86_FEATURE_NX)) {
 701		printk(KERN_NOTICE "Notice: NX (Execute Disable) protection "
 702		       "missing in CPU!\n");
 703	} else {
 704#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
 705		printk(KERN_INFO "NX (Execute Disable) protection: active\n");
 706#else
 707		/* 32bit non-PAE kernel, NX cannot be used */
 708		printk(KERN_NOTICE "Notice: NX (Execute Disable) protection "
 709		       "cannot be enabled: non-PAE kernel!\n");
 710#endif
 711	}
 712}
 713
 714/*
 715 * Determine if we were loaded by an EFI loader.  If so, then we have also been
 716 * passed the efi memmap, systab, etc., so we should use these data structures
 717 * for initialization.  Note, the efi init code path is determined by the
 718 * global efi_enabled. This allows the same kernel image to be used on existing
 719 * systems (with a traditional BIOS) as well as on EFI systems.
 720 */
 721/*
 722 * setup_arch - architecture-specific boot-time initializations
 723 *
 724 * Note: On x86_64, fixmaps are ready for use even before this is called.
 725 */
 726
 727void __init setup_arch(char **cmdline_p)
 728{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 729#ifdef CONFIG_X86_32
 730	memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data));
 731
 732	/*
 733	 * copy kernel address range established so far and switch
 734	 * to the proper swapper page table
 735	 */
 736	clone_pgd_range(swapper_pg_dir     + KERNEL_PGD_BOUNDARY,
 737			initial_page_table + KERNEL_PGD_BOUNDARY,
 738			KERNEL_PGD_PTRS);
 739
 740	load_cr3(swapper_pg_dir);
 741	/*
 742	 * Note: Quark X1000 CPUs advertise PGE incorrectly and require
 743	 * a cr3 based tlb flush, so the following __flush_tlb_all()
 744	 * will not flush anything because the CPU quirk which clears
 745	 * X86_FEATURE_PGE has not been invoked yet. Though due to the
 746	 * load_cr3() above the TLB has been flushed already. The
 747	 * quirk is invoked before subsequent calls to __flush_tlb_all()
 748	 * so proper operation is guaranteed.
 749	 */
 750	__flush_tlb_all();
 751#else
 752	printk(KERN_INFO "Command line: %s\n", boot_command_line);
 753	boot_cpu_data.x86_phys_bits = MAX_PHYSMEM_BITS;
 754#endif
 755
 756	/*
 757	 * If we have OLPC OFW, we might end up relocating the fixmap due to
 758	 * reserve_top(), so do this before touching the ioremap area.
 759	 */
 760	olpc_ofw_detect();
 761
 762	idt_setup_early_traps();
 763	early_cpu_init();
 
 764	jump_label_init();
 765	static_call_init();
 766	early_ioremap_init();
 767
 768	setup_olpc_ofw_pgd();
 769
 770	ROOT_DEV = old_decode_dev(boot_params.hdr.root_dev);
 771	screen_info = boot_params.screen_info;
 772	edid_info = boot_params.edid_info;
 773#ifdef CONFIG_X86_32
 774	apm_info.bios = boot_params.apm_bios_info;
 775	ist_info = boot_params.ist_info;
 776#endif
 777	saved_video_mode = boot_params.hdr.vid_mode;
 778	bootloader_type = boot_params.hdr.type_of_loader;
 779	if ((bootloader_type >> 4) == 0xe) {
 780		bootloader_type &= 0xf;
 781		bootloader_type |= (boot_params.hdr.ext_loader_type+0x10) << 4;
 782	}
 783	bootloader_version  = bootloader_type & 0xf;
 784	bootloader_version |= boot_params.hdr.ext_loader_ver << 4;
 785
 786#ifdef CONFIG_BLK_DEV_RAM
 787	rd_image_start = boot_params.hdr.ram_size & RAMDISK_IMAGE_START_MASK;
 788#endif
 789#ifdef CONFIG_EFI
 790	if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature,
 791		     EFI32_LOADER_SIGNATURE, 4)) {
 792		set_bit(EFI_BOOT, &efi.flags);
 793	} else if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature,
 794		     EFI64_LOADER_SIGNATURE, 4)) {
 795		set_bit(EFI_BOOT, &efi.flags);
 796		set_bit(EFI_64BIT, &efi.flags);
 797	}
 798#endif
 799
 800	x86_init.oem.arch_setup();
 801
 802	/*
 803	 * Do some memory reservations *before* memory is added to memblock, so
 804	 * memblock allocations won't overwrite it.
 805	 *
 806	 * After this point, everything still needed from the boot loader or
 807	 * firmware or kernel text should be early reserved or marked not RAM in
 808	 * e820. All other memory is free game.
 809	 *
 810	 * This call needs to happen before e820__memory_setup() which calls the
 811	 * xen_memory_setup() on Xen dom0 which relies on the fact that those
 812	 * early reservations have happened already.
 813	 */
 814	early_reserve_memory();
 815
 816	iomem_resource.end = (1ULL << boot_cpu_data.x86_phys_bits) - 1;
 817	e820__memory_setup();
 818	parse_setup_data();
 819
 820	copy_edd();
 821
 822	if (!boot_params.hdr.root_flags)
 823		root_mountflags &= ~MS_RDONLY;
 824	setup_initial_init_mm(_text, _etext, _edata, (void *)_brk_end);
 
 
 
 825
 826	code_resource.start = __pa_symbol(_text);
 827	code_resource.end = __pa_symbol(_etext)-1;
 828	rodata_resource.start = __pa_symbol(__start_rodata);
 829	rodata_resource.end = __pa_symbol(__end_rodata)-1;
 830	data_resource.start = __pa_symbol(_sdata);
 831	data_resource.end = __pa_symbol(_edata)-1;
 832	bss_resource.start = __pa_symbol(__bss_start);
 833	bss_resource.end = __pa_symbol(__bss_stop)-1;
 834
 835#ifdef CONFIG_CMDLINE_BOOL
 836#ifdef CONFIG_CMDLINE_OVERRIDE
 837	strscpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
 838#else
 839	if (builtin_cmdline[0]) {
 840		/* append boot loader cmdline to builtin */
 841		strlcat(builtin_cmdline, " ", COMMAND_LINE_SIZE);
 842		strlcat(builtin_cmdline, boot_command_line, COMMAND_LINE_SIZE);
 843		strscpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
 844	}
 845#endif
 846#endif
 847
 848	strscpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
 849	*cmdline_p = command_line;
 850
 851	/*
 852	 * x86_configure_nx() is called before parse_early_param() to detect
 853	 * whether hardware doesn't support NX (so that the early EHCI debug
 854	 * console setup can safely call set_fixmap()).
 
 
 855	 */
 856	x86_configure_nx();
 857
 858	parse_early_param();
 859
 860	if (efi_enabled(EFI_BOOT))
 861		efi_memblock_x86_reserve_range();
 862
 863#ifdef CONFIG_MEMORY_HOTPLUG
 864	/*
 865	 * Memory used by the kernel cannot be hot-removed because Linux
 866	 * cannot migrate the kernel pages. When memory hotplug is
 867	 * enabled, we should prevent memblock from allocating memory
 868	 * for the kernel.
 869	 *
 870	 * ACPI SRAT records all hotpluggable memory ranges. But before
 871	 * SRAT is parsed, we don't know about it.
 872	 *
 873	 * The kernel image is loaded into memory at very early time. We
 874	 * cannot prevent this anyway. So on NUMA system, we set any
 875	 * node the kernel resides in as un-hotpluggable.
 876	 *
 877	 * Since on modern servers, one node could have double-digit
 878	 * gigabytes memory, we can assume the memory around the kernel
 879	 * image is also un-hotpluggable. So before SRAT is parsed, just
 880	 * allocate memory near the kernel image to try the best to keep
 881	 * the kernel away from hotpluggable memory.
 882	 */
 883	if (movable_node_is_enabled())
 884		memblock_set_bottom_up(true);
 885#endif
 886
 887	x86_report_nx();
 888
 889	apic_setup_apic_calls();
 
 890
 891	if (acpi_mps_check()) {
 892#ifdef CONFIG_X86_LOCAL_APIC
 893		apic_is_disabled = true;
 894#endif
 895		setup_clear_cpu_cap(X86_FEATURE_APIC);
 896	}
 897
 898	e820__reserve_setup_data();
 899	e820__finish_early_params();
 900
 901	if (efi_enabled(EFI_BOOT))
 902		efi_init();
 903
 904	reserve_ibft_region();
 905	x86_init.resources.dmi_setup();
 906
 907	/*
 908	 * VMware detection requires dmi to be available, so this
 909	 * needs to be done after dmi_setup(), for the boot CPU.
 910	 * For some guest types (Xen PV, SEV-SNP, TDX) it is required to be
 911	 * called before cache_bp_init() for setting up MTRR state.
 912	 */
 913	init_hypervisor_platform();
 914
 915	tsc_early_init();
 916	x86_init.resources.probe_roms();
 917
 918	/* after parse_early_param, so could debug it */
 919	insert_resource(&iomem_resource, &code_resource);
 920	insert_resource(&iomem_resource, &rodata_resource);
 921	insert_resource(&iomem_resource, &data_resource);
 922	insert_resource(&iomem_resource, &bss_resource);
 923
 924	e820_add_kernel_range();
 925	trim_bios_range();
 926#ifdef CONFIG_X86_32
 927	if (ppro_with_ram_bug()) {
 928		e820__range_update(0x70000000ULL, 0x40000ULL, E820_TYPE_RAM,
 929				  E820_TYPE_RESERVED);
 930		e820__update_table(e820_table);
 931		printk(KERN_INFO "fixed physical RAM map:\n");
 932		e820__print_table("bad_ppro");
 933	}
 934#else
 935	early_gart_iommu_check();
 936#endif
 937
 938	/*
 939	 * partially used pages are not usable - thus
 940	 * we are rounding upwards:
 941	 */
 942	max_pfn = e820__end_of_ram_pfn();
 943
 944	/* update e820 for memory not covered by WB MTRRs */
 945	cache_bp_init();
 946	if (mtrr_trim_uncached_memory(max_pfn))
 947		max_pfn = e820__end_of_ram_pfn();
 948
 949	max_possible_pfn = max_pfn;
 950
 951	/*
 
 
 
 
 
 
 
 952	 * Define random base addresses for memory sections after max_pfn is
 953	 * defined and before each memory section base is used.
 954	 */
 955	kernel_randomize_memory();
 956
 957#ifdef CONFIG_X86_32
 958	/* max_low_pfn get updated here */
 959	find_low_pfn_range();
 960#else
 961	check_x2apic();
 962
 963	/* How many end-of-memory variables you have, grandma! */
 964	/* need this before calling reserve_initrd */
 965	if (max_pfn > (1UL<<(32 - PAGE_SHIFT)))
 966		max_low_pfn = e820__end_of_low_ram_pfn();
 967	else
 968		max_low_pfn = max_pfn;
 969
 970	high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1;
 971#endif
 972
 973	/* Find and reserve MPTABLE area */
 974	x86_init.mpparse.find_mptable();
 
 
 
 
 975
 976	early_alloc_pgt_buf();
 977
 978	/*
 979	 * Need to conclude brk, before e820__memblock_setup()
 980	 * it could use memblock_find_in_range, could overlap with
 981	 * brk area.
 982	 */
 983	reserve_brk();
 984
 985	cleanup_highmap();
 986
 987	memblock_set_current_limit(ISA_END_ADDRESS);
 988	e820__memblock_setup();
 989
 990	/*
 991	 * Needs to run after memblock setup because it needs the physical
 992	 * memory size.
 993	 */
 994	mem_encrypt_setup_arch();
 995	cc_random_init();
 996
 997	efi_fake_memmap();
 998	efi_find_mirror();
 999	efi_esrt_init();
1000	efi_mokvar_table_init();
1001
1002	/*
1003	 * The EFI specification says that boot service code won't be
1004	 * called after ExitBootServices(). This is, in fact, a lie.
1005	 */
1006	efi_reserve_boot_services();
1007
1008	/* preallocate 4k for mptable mpc */
1009	e820__memblock_alloc_reserved_mpc_new();
1010
1011#ifdef CONFIG_X86_CHECK_BIOS_CORRUPTION
1012	setup_bios_corruption_check();
1013#endif
1014
1015#ifdef CONFIG_X86_32
1016	printk(KERN_DEBUG "initial memory mapped: [mem 0x00000000-%#010lx]\n",
1017			(max_pfn_mapped<<PAGE_SHIFT) - 1);
1018#endif
1019
1020	/*
1021	 * Find free memory for the real mode trampoline and place it there. If
1022	 * there is not enough free memory under 1M, on EFI-enabled systems
1023	 * there will be additional attempt to reclaim the memory for the real
1024	 * mode trampoline at efi_free_boot_services().
1025	 *
1026	 * Unconditionally reserve the entire first 1M of RAM because BIOSes
1027	 * are known to corrupt low memory and several hundred kilobytes are not
1028	 * worth complex detection what memory gets clobbered. Windows does the
1029	 * same thing for very similar reasons.
1030	 *
1031	 * Moreover, on machines with SandyBridge graphics or in setups that use
1032	 * crashkernel the entire 1M is reserved anyway.
1033	 *
1034	 * Note the host kernel TDX also requires the first 1MB being reserved.
1035	 */
1036	x86_platform.realmode_reserve();
1037
1038	init_mem_mapping();
1039
1040	idt_setup_early_pf();
1041
1042	/*
1043	 * Update mmu_cr4_features (and, indirectly, trampoline_cr4_features)
1044	 * with the current CR4 value.  This may not be necessary, but
1045	 * auditing all the early-boot CR4 manipulation would be needed to
1046	 * rule it out.
1047	 *
1048	 * Mask off features that don't work outside long mode (just
1049	 * PCIDE for now).
1050	 */
1051	mmu_cr4_features = __read_cr4() & ~X86_CR4_PCIDE;
1052
1053	memblock_set_current_limit(get_max_mapped());
1054
1055	/*
1056	 * NOTE: On x86-32, only from this point on, fixmaps are ready for use.
1057	 */
1058
1059#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
1060	if (init_ohci1394_dma_early)
1061		init_ohci1394_dma_on_all_controllers();
1062#endif
1063	/* Allocate bigger log buffer */
1064	setup_log_buf(1);
1065
1066	if (efi_enabled(EFI_BOOT)) {
1067		switch (boot_params.secure_boot) {
1068		case efi_secureboot_mode_disabled:
1069			pr_info("Secure boot disabled\n");
1070			break;
1071		case efi_secureboot_mode_enabled:
1072			pr_info("Secure boot enabled\n");
1073			break;
1074		default:
1075			pr_info("Secure boot could not be determined\n");
1076			break;
1077		}
1078	}
1079
1080	reserve_initrd();
1081
1082	acpi_table_upgrade();
1083	/* Look for ACPI tables and reserve memory occupied by them. */
1084	acpi_boot_table_init();
1085
1086	vsmp_init();
1087
1088	io_delay_init();
1089
1090	early_platform_quirks();
1091
1092	/* Some platforms need the APIC registered for NUMA configuration */
1093	early_acpi_boot_init();
1094	x86_init.mpparse.early_parse_smp_cfg();
 
1095
1096	x86_flattree_get_config();
1097
1098	initmem_init();
1099	dma_contiguous_reserve(max_pfn_mapped << PAGE_SHIFT);
1100
1101	if (boot_cpu_has(X86_FEATURE_GBPAGES))
1102		hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT);
1103
1104	/*
1105	 * Reserve memory for crash kernel after SRAT is parsed so that it
1106	 * won't consume hotpluggable memory.
1107	 */
1108	arch_reserve_crashkernel();
1109
1110	memblock_find_dma_reserve();
1111
1112	if (!early_xdbc_setup_hardware())
1113		early_xdbc_register_console();
1114
1115	x86_init.paging.pagetable_init();
1116
1117	kasan_init();
1118
1119	/*
1120	 * Sync back kernel address range.
1121	 *
1122	 * FIXME: Can the later sync in setup_cpu_entry_areas() replace
1123	 * this call?
1124	 */
1125	sync_initial_page_table();
1126
1127	tboot_probe();
1128
1129	map_vsyscall();
1130
1131	x86_32_probe_apic();
1132
1133	early_quirks();
1134
1135	topology_apply_cmdline_limits_early();
1136
1137	/*
1138	 * Parse SMP configuration. Try ACPI first and then the platform
1139	 * specific parser.
1140	 */
1141	acpi_boot_init();
1142	x86_init.mpparse.parse_smp_cfg();
 
 
 
 
 
 
1143
1144	/* Last opportunity to detect and map the local APIC */
 
 
 
1145	init_apic_mappings();
1146
1147	topology_init_possible_cpus();
1148
1149	init_cpu_to_node();
1150	init_gi_nodes();
1151
1152	io_apic_init_mappings();
1153
1154	x86_init.hyper.guest_late_init();
1155
1156	e820__reserve_resources();
1157	e820__register_nosave_regions(max_pfn);
1158
1159	x86_init.resources.reserve_resources();
1160
1161	e820__setup_pci_gap();
1162
1163#ifdef CONFIG_VT
1164#if defined(CONFIG_VGA_CONSOLE)
1165	if (!efi_enabled(EFI_BOOT) || (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY))
1166		vgacon_register_screen(&screen_info);
1167#endif
1168#endif
1169	x86_init.oem.banner();
1170
1171	x86_init.timers.wallclock_init();
1172
1173	/*
1174	 * This needs to run before setup_local_APIC() which soft-disables the
1175	 * local APIC temporarily and that masks the thermal LVT interrupt,
1176	 * leading to softlockups on machines which have configured SMI
1177	 * interrupt delivery.
1178	 */
1179	therm_lvt_init();
1180
1181	mcheck_init();
1182
1183	register_refined_jiffies(CLOCK_TICK_RATE);
1184
1185#ifdef CONFIG_EFI
1186	if (efi_enabled(EFI_BOOT))
1187		efi_apply_memmap_quirks();
1188#endif
1189
1190	unwind_init();
1191}
1192
1193#ifdef CONFIG_X86_32
1194
1195static struct resource video_ram_resource = {
1196	.name	= "Video RAM area",
1197	.start	= 0xa0000,
1198	.end	= 0xbffff,
1199	.flags	= IORESOURCE_BUSY | IORESOURCE_MEM
1200};
1201
1202void __init i386_reserve_resources(void)
1203{
1204	request_resource(&iomem_resource, &video_ram_resource);
1205	reserve_standard_io_resources();
1206}
1207
1208#endif /* CONFIG_X86_32 */
1209
1210static struct notifier_block kernel_offset_notifier = {
1211	.notifier_call = dump_kernel_offset
1212};
1213
1214static int __init register_kernel_offset_dumper(void)
1215{
1216	atomic_notifier_chain_register(&panic_notifier_list,
1217					&kernel_offset_notifier);
1218	return 0;
1219}
1220__initcall(register_kernel_offset_dumper);
v5.9
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  Copyright (C) 1995  Linus Torvalds
   4 *
   5 * This file contains the setup_arch() code, which handles the architecture-dependent
   6 * parts of early kernel initialization.
   7 */
 
   8#include <linux/console.h>
   9#include <linux/crash_dump.h>
  10#include <linux/dmi.h>
  11#include <linux/efi.h>
 
  12#include <linux/init_ohci1394_dma.h>
  13#include <linux/initrd.h>
  14#include <linux/iscsi_ibft.h>
  15#include <linux/memblock.h>
 
  16#include <linux/pci.h>
  17#include <linux/root_dev.h>
  18#include <linux/sfi.h>
  19#include <linux/hugetlb.h>
  20#include <linux/tboot.h>
  21#include <linux/usb/xhci-dbgp.h>
 
 
 
  22
  23#include <uapi/linux/mount.h>
  24
  25#include <xen/xen.h>
  26
  27#include <asm/apic.h>
 
  28#include <asm/numa.h>
  29#include <asm/bios_ebda.h>
  30#include <asm/bugs.h>
 
 
  31#include <asm/cpu.h>
  32#include <asm/efi.h>
  33#include <asm/gart.h>
  34#include <asm/hypervisor.h>
  35#include <asm/io_apic.h>
  36#include <asm/kasan.h>
  37#include <asm/kaslr.h>
  38#include <asm/mce.h>
 
  39#include <asm/mtrr.h>
  40#include <asm/realmode.h>
  41#include <asm/olpc_ofw.h>
  42#include <asm/pci-direct.h>
  43#include <asm/prom.h>
  44#include <asm/proto.h>
 
  45#include <asm/unwind.h>
  46#include <asm/vsyscall.h>
  47#include <linux/vmalloc.h>
  48
  49/*
  50 * max_low_pfn_mapped: highest directly mapped pfn < 4 GB
  51 * max_pfn_mapped:     highest directly mapped pfn > 4 GB
  52 *
  53 * The direct mapping only covers E820_TYPE_RAM regions, so the ranges and gaps are
  54 * represented by pfn_mapped[].
  55 */
  56unsigned long max_low_pfn_mapped;
  57unsigned long max_pfn_mapped;
  58
  59#ifdef CONFIG_DMI
  60RESERVE_BRK(dmi_alloc, 65536);
  61#endif
  62
  63
  64/*
  65 * Range of the BSS area. The size of the BSS area is determined
  66 * at link time, with RESERVE_BRK*() facility reserving additional
  67 * chunks.
  68 */
  69unsigned long _brk_start = (unsigned long)__brk_base;
  70unsigned long _brk_end   = (unsigned long)__brk_base;
  71
  72struct boot_params boot_params;
  73
  74/*
  75 * These are the four main kernel memory regions, we put them into
  76 * the resource tree so that kdump tools and other debugging tools
  77 * recover it:
  78 */
  79
  80static struct resource rodata_resource = {
  81	.name	= "Kernel rodata",
  82	.start	= 0,
  83	.end	= 0,
  84	.flags	= IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
  85};
  86
  87static struct resource data_resource = {
  88	.name	= "Kernel data",
  89	.start	= 0,
  90	.end	= 0,
  91	.flags	= IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
  92};
  93
  94static struct resource code_resource = {
  95	.name	= "Kernel code",
  96	.start	= 0,
  97	.end	= 0,
  98	.flags	= IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
  99};
 100
 101static struct resource bss_resource = {
 102	.name	= "Kernel bss",
 103	.start	= 0,
 104	.end	= 0,
 105	.flags	= IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
 106};
 107
 108
 109#ifdef CONFIG_X86_32
 110/* CPU data as detected by the assembly code in head_32.S */
 111struct cpuinfo_x86 new_cpu_data;
 112
 113/* Common CPU data for all CPUs */
 114struct cpuinfo_x86 boot_cpu_data __read_mostly;
 115EXPORT_SYMBOL(boot_cpu_data);
 116
 117unsigned int def_to_bigsmp;
 118
 119/* For MCA, but anyone else can use it if they want */
 120unsigned int machine_id;
 121unsigned int machine_submodel_id;
 122unsigned int BIOS_revision;
 123
 124struct apm_info apm_info;
 125EXPORT_SYMBOL(apm_info);
 126
 127#if defined(CONFIG_X86_SPEEDSTEP_SMI) || \
 128	defined(CONFIG_X86_SPEEDSTEP_SMI_MODULE)
 129struct ist_info ist_info;
 130EXPORT_SYMBOL(ist_info);
 131#else
 132struct ist_info ist_info;
 133#endif
 134
 135#else
 
 136struct cpuinfo_x86 boot_cpu_data __read_mostly;
 137EXPORT_SYMBOL(boot_cpu_data);
 138#endif
 139
 140
 141#if !defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
 142__visible unsigned long mmu_cr4_features __ro_after_init;
 143#else
 144__visible unsigned long mmu_cr4_features __ro_after_init = X86_CR4_PAE;
 145#endif
 146
 
 
 
 
 
 147/* Boot loader ID and version as integers, for the benefit of proc_dointvec */
 148int bootloader_type, bootloader_version;
 149
 150/*
 151 * Setup options
 152 */
 153struct screen_info screen_info;
 154EXPORT_SYMBOL(screen_info);
 155struct edid_info edid_info;
 156EXPORT_SYMBOL_GPL(edid_info);
 157
 158extern int root_mountflags;
 159
 160unsigned long saved_video_mode;
 161
 162#define RAMDISK_IMAGE_START_MASK	0x07FF
 163#define RAMDISK_PROMPT_FLAG		0x8000
 164#define RAMDISK_LOAD_FLAG		0x4000
 165
 166static char __initdata command_line[COMMAND_LINE_SIZE];
 167#ifdef CONFIG_CMDLINE_BOOL
 168static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE;
 169#endif
 170
 171#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
 172struct edd edd;
 173#ifdef CONFIG_EDD_MODULE
 174EXPORT_SYMBOL(edd);
 175#endif
 176/**
 177 * copy_edd() - Copy the BIOS EDD information
 178 *              from boot_params into a safe place.
 179 *
 180 */
 181static inline void __init copy_edd(void)
 182{
 183     memcpy(edd.mbr_signature, boot_params.edd_mbr_sig_buffer,
 184	    sizeof(edd.mbr_signature));
 185     memcpy(edd.edd_info, boot_params.eddbuf, sizeof(edd.edd_info));
 186     edd.mbr_signature_nr = boot_params.edd_mbr_sig_buf_entries;
 187     edd.edd_info_nr = boot_params.eddbuf_entries;
 188}
 189#else
 190static inline void __init copy_edd(void)
 191{
 192}
 193#endif
 194
 195void * __init extend_brk(size_t size, size_t align)
 196{
 197	size_t mask = align - 1;
 198	void *ret;
 199
 200	BUG_ON(_brk_start == 0);
 201	BUG_ON(align & mask);
 202
 203	_brk_end = (_brk_end + mask) & ~mask;
 204	BUG_ON((char *)(_brk_end + size) > __brk_limit);
 205
 206	ret = (void *)_brk_end;
 207	_brk_end += size;
 208
 209	memset(ret, 0, size);
 210
 211	return ret;
 212}
 213
 214#ifdef CONFIG_X86_32
 215static void __init cleanup_highmap(void)
 216{
 217}
 218#endif
 219
 220static void __init reserve_brk(void)
 221{
 222	if (_brk_end > _brk_start)
 223		memblock_reserve(__pa_symbol(_brk_start),
 224				 _brk_end - _brk_start);
 225
 226	/* Mark brk area as locked down and no longer taking any
 227	   new allocations */
 228	_brk_start = 0;
 229}
 230
 231u64 relocated_ramdisk;
 232
 233#ifdef CONFIG_BLK_DEV_INITRD
 234
 235static u64 __init get_ramdisk_image(void)
 236{
 237	u64 ramdisk_image = boot_params.hdr.ramdisk_image;
 238
 239	ramdisk_image |= (u64)boot_params.ext_ramdisk_image << 32;
 240
 241	if (ramdisk_image == 0)
 242		ramdisk_image = phys_initrd_start;
 243
 244	return ramdisk_image;
 245}
 246static u64 __init get_ramdisk_size(void)
 247{
 248	u64 ramdisk_size = boot_params.hdr.ramdisk_size;
 249
 250	ramdisk_size |= (u64)boot_params.ext_ramdisk_size << 32;
 251
 252	if (ramdisk_size == 0)
 253		ramdisk_size = phys_initrd_size;
 254
 255	return ramdisk_size;
 256}
 257
 258static void __init relocate_initrd(void)
 259{
 260	/* Assume only end is not page aligned */
 261	u64 ramdisk_image = get_ramdisk_image();
 262	u64 ramdisk_size  = get_ramdisk_size();
 263	u64 area_size     = PAGE_ALIGN(ramdisk_size);
 264
 265	/* We need to move the initrd down into directly mapped mem */
 266	relocated_ramdisk = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped),
 267						   area_size, PAGE_SIZE);
 268
 269	if (!relocated_ramdisk)
 270		panic("Cannot find place for new RAMDISK of size %lld\n",
 271		      ramdisk_size);
 272
 273	/* Note: this includes all the mem currently occupied by
 274	   the initrd, we rely on that fact to keep the data intact. */
 275	memblock_reserve(relocated_ramdisk, area_size);
 276	initrd_start = relocated_ramdisk + PAGE_OFFSET;
 277	initrd_end   = initrd_start + ramdisk_size;
 278	printk(KERN_INFO "Allocated new RAMDISK: [mem %#010llx-%#010llx]\n",
 279	       relocated_ramdisk, relocated_ramdisk + ramdisk_size - 1);
 280
 281	copy_from_early_mem((void *)initrd_start, ramdisk_image, ramdisk_size);
 282
 283	printk(KERN_INFO "Move RAMDISK from [mem %#010llx-%#010llx] to"
 284		" [mem %#010llx-%#010llx]\n",
 285		ramdisk_image, ramdisk_image + ramdisk_size - 1,
 286		relocated_ramdisk, relocated_ramdisk + ramdisk_size - 1);
 287}
 288
 289static void __init early_reserve_initrd(void)
 290{
 291	/* Assume only end is not page aligned */
 292	u64 ramdisk_image = get_ramdisk_image();
 293	u64 ramdisk_size  = get_ramdisk_size();
 294	u64 ramdisk_end   = PAGE_ALIGN(ramdisk_image + ramdisk_size);
 295
 296	if (!boot_params.hdr.type_of_loader ||
 297	    !ramdisk_image || !ramdisk_size)
 298		return;		/* No initrd provided by bootloader */
 299
 300	memblock_reserve(ramdisk_image, ramdisk_end - ramdisk_image);
 301}
 
 302static void __init reserve_initrd(void)
 303{
 304	/* Assume only end is not page aligned */
 305	u64 ramdisk_image = get_ramdisk_image();
 306	u64 ramdisk_size  = get_ramdisk_size();
 307	u64 ramdisk_end   = PAGE_ALIGN(ramdisk_image + ramdisk_size);
 308	u64 mapped_size;
 309
 310	if (!boot_params.hdr.type_of_loader ||
 311	    !ramdisk_image || !ramdisk_size)
 312		return;		/* No initrd provided by bootloader */
 313
 314	initrd_start = 0;
 315
 316	mapped_size = memblock_mem_size(max_pfn_mapped);
 317	if (ramdisk_size >= (mapped_size>>1))
 318		panic("initrd too large to handle, "
 319		       "disabling initrd (%lld needed, %lld available)\n",
 320		       ramdisk_size, mapped_size>>1);
 321
 322	printk(KERN_INFO "RAMDISK: [mem %#010llx-%#010llx]\n", ramdisk_image,
 323			ramdisk_end - 1);
 324
 325	if (pfn_range_is_mapped(PFN_DOWN(ramdisk_image),
 326				PFN_DOWN(ramdisk_end))) {
 327		/* All are mapped, easy case */
 328		initrd_start = ramdisk_image + PAGE_OFFSET;
 329		initrd_end = initrd_start + ramdisk_size;
 330		return;
 331	}
 332
 333	relocate_initrd();
 334
 335	memblock_free(ramdisk_image, ramdisk_end - ramdisk_image);
 336}
 337
 338#else
 339static void __init early_reserve_initrd(void)
 340{
 341}
 342static void __init reserve_initrd(void)
 343{
 344}
 345#endif /* CONFIG_BLK_DEV_INITRD */
 346
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 347static void __init parse_setup_data(void)
 348{
 349	struct setup_data *data;
 350	u64 pa_data, pa_next;
 351
 352	pa_data = boot_params.hdr.setup_data;
 353	while (pa_data) {
 354		u32 data_len, data_type;
 355
 356		data = early_memremap(pa_data, sizeof(*data));
 357		data_len = data->len + sizeof(struct setup_data);
 358		data_type = data->type;
 359		pa_next = data->next;
 360		early_memunmap(data, sizeof(*data));
 361
 362		switch (data_type) {
 363		case SETUP_E820_EXT:
 364			e820__memory_setup_extended(pa_data, data_len);
 365			break;
 366		case SETUP_DTB:
 367			add_dtb(pa_data);
 368			break;
 369		case SETUP_EFI:
 370			parse_efi_setup(pa_data, data_len);
 371			break;
 
 
 
 
 
 
 
 
 
 
 
 
 372		default:
 373			break;
 374		}
 375		pa_data = pa_next;
 376	}
 377}
 378
 379static void __init memblock_x86_reserve_range_setup_data(void)
 380{
 
 381	struct setup_data *data;
 382	u64 pa_data;
 
 383
 384	pa_data = boot_params.hdr.setup_data;
 385	while (pa_data) {
 386		data = early_memremap(pa_data, sizeof(*data));
 387		memblock_reserve(pa_data, sizeof(*data) + data->len);
 
 
 
 388
 389		if (data->type == SETUP_INDIRECT &&
 390		    ((struct setup_indirect *)data->data)->type != SETUP_INDIRECT)
 391			memblock_reserve(((struct setup_indirect *)data->data)->addr,
 392					 ((struct setup_indirect *)data->data)->len);
 393
 394		pa_data = data->next;
 395		early_memunmap(data, sizeof(*data));
 396	}
 397}
 398
 399/*
 400 * --------- Crashkernel reservation ------------------------------
 401 */
 
 
 
 
 
 402
 403#ifdef CONFIG_KEXEC_CORE
 404
 405/* 16M alignment for crash kernel regions */
 406#define CRASH_ALIGN		SZ_16M
 
 407
 408/*
 409 * Keep the crash kernel below this limit.
 410 *
 411 * Earlier 32-bits kernels would limit the kernel to the low 512 MB range
 412 * due to mapping restrictions.
 413 *
 414 * 64-bit kdump kernels need to be restricted to be under 64 TB, which is
 415 * the upper limit of system RAM in 4-level paging mode. Since the kdump
 416 * jump could be from 5-level paging to 4-level paging, the jump will fail if
 417 * the kernel is put above 64 TB, and during the 1st kernel bootup there's
 418 * no good way to detect the paging mode of the target kernel which will be
 419 * loaded for dumping.
 420 */
 421#ifdef CONFIG_X86_32
 422# define CRASH_ADDR_LOW_MAX	SZ_512M
 423# define CRASH_ADDR_HIGH_MAX	SZ_512M
 424#else
 425# define CRASH_ADDR_LOW_MAX	SZ_4G
 426# define CRASH_ADDR_HIGH_MAX	SZ_64T
 427#endif
 428
 429static int __init reserve_crashkernel_low(void)
 430{
 431#ifdef CONFIG_X86_64
 432	unsigned long long base, low_base = 0, low_size = 0;
 433	unsigned long total_low_mem;
 434	int ret;
 435
 436	total_low_mem = memblock_mem_size(1UL << (32 - PAGE_SHIFT));
 437
 438	/* crashkernel=Y,low */
 439	ret = parse_crashkernel_low(boot_command_line, total_low_mem, &low_size, &base);
 440	if (ret) {
 441		/*
 442		 * two parts from kernel/dma/swiotlb.c:
 443		 * -swiotlb size: user-specified with swiotlb= or default.
 444		 *
 445		 * -swiotlb overflow buffer: now hardcoded to 32k. We round it
 446		 * to 8M for other buffers that may need to stay low too. Also
 447		 * make sure we allocate enough extra low memory so that we
 448		 * don't run out of DMA buffers for 32-bit devices.
 449		 */
 450		low_size = max(swiotlb_size_or_default() + (8UL << 20), 256UL << 20);
 451	} else {
 452		/* passed with crashkernel=0,low ? */
 453		if (!low_size)
 454			return 0;
 455	}
 456
 457	low_base = memblock_find_in_range(0, 1ULL << 32, low_size, CRASH_ALIGN);
 458	if (!low_base) {
 459		pr_err("Cannot reserve %ldMB crashkernel low memory, please try smaller size.\n",
 460		       (unsigned long)(low_size >> 20));
 461		return -ENOMEM;
 462	}
 463
 464	ret = memblock_reserve(low_base, low_size);
 465	if (ret) {
 466		pr_err("%s: Error reserving crashkernel low memblock.\n", __func__);
 467		return ret;
 468	}
 469
 470	pr_info("Reserving %ldMB of low memory at %ldMB for crashkernel (System low RAM: %ldMB)\n",
 471		(unsigned long)(low_size >> 20),
 472		(unsigned long)(low_base >> 20),
 473		(unsigned long)(total_low_mem >> 20));
 474
 475	crashk_low_res.start = low_base;
 476	crashk_low_res.end   = low_base + low_size - 1;
 477	insert_resource(&iomem_resource, &crashk_low_res);
 478#endif
 479	return 0;
 480}
 481
 482static void __init reserve_crashkernel(void)
 483{
 484	unsigned long long crash_size, crash_base, total_mem;
 
 485	bool high = false;
 486	int ret;
 487
 488	total_mem = memblock_phys_mem_size();
 
 489
 490	/* crashkernel=XM */
 491	ret = parse_crashkernel(boot_command_line, total_mem, &crash_size, &crash_base);
 492	if (ret != 0 || crash_size <= 0) {
 493		/* crashkernel=X,high */
 494		ret = parse_crashkernel_high(boot_command_line, total_mem,
 495					     &crash_size, &crash_base);
 496		if (ret != 0 || crash_size <= 0)
 497			return;
 498		high = true;
 499	}
 500
 501	if (xen_pv_domain()) {
 502		pr_info("Ignoring crashkernel for a Xen PV domain\n");
 503		return;
 504	}
 505
 506	/* 0 means: find the address automatically */
 507	if (!crash_base) {
 508		/*
 509		 * Set CRASH_ADDR_LOW_MAX upper bound for crash memory,
 510		 * crashkernel=x,high reserves memory over 4G, also allocates
 511		 * 256M extra low memory for DMA buffers and swiotlb.
 512		 * But the extra memory is not required for all machines.
 513		 * So try low memory first and fall back to high memory
 514		 * unless "crashkernel=size[KMG],high" is specified.
 515		 */
 516		if (!high)
 517			crash_base = memblock_find_in_range(CRASH_ALIGN,
 518						CRASH_ADDR_LOW_MAX,
 519						crash_size, CRASH_ALIGN);
 520		if (!crash_base)
 521			crash_base = memblock_find_in_range(CRASH_ALIGN,
 522						CRASH_ADDR_HIGH_MAX,
 523						crash_size, CRASH_ALIGN);
 524		if (!crash_base) {
 525			pr_info("crashkernel reservation failed - No suitable area found.\n");
 526			return;
 527		}
 528	} else {
 529		unsigned long long start;
 530
 531		start = memblock_find_in_range(crash_base,
 532					       crash_base + crash_size,
 533					       crash_size, 1 << 20);
 534		if (start != crash_base) {
 535			pr_info("crashkernel reservation failed - memory is in use.\n");
 536			return;
 537		}
 538	}
 539	ret = memblock_reserve(crash_base, crash_size);
 540	if (ret) {
 541		pr_err("%s: Error reserving crashkernel memblock.\n", __func__);
 542		return;
 543	}
 544
 545	if (crash_base >= (1ULL << 32) && reserve_crashkernel_low()) {
 546		memblock_free(crash_base, crash_size);
 547		return;
 548	}
 549
 550	pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n",
 551		(unsigned long)(crash_size >> 20),
 552		(unsigned long)(crash_base >> 20),
 553		(unsigned long)(total_mem >> 20));
 554
 555	crashk_res.start = crash_base;
 556	crashk_res.end   = crash_base + crash_size - 1;
 557	insert_resource(&iomem_resource, &crashk_res);
 558}
 559#else
 560static void __init reserve_crashkernel(void)
 561{
 562}
 563#endif
 564
 565static struct resource standard_io_resources[] = {
 566	{ .name = "dma1", .start = 0x00, .end = 0x1f,
 567		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
 568	{ .name = "pic1", .start = 0x20, .end = 0x21,
 569		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
 570	{ .name = "timer0", .start = 0x40, .end = 0x43,
 571		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
 572	{ .name = "timer1", .start = 0x50, .end = 0x53,
 573		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
 574	{ .name = "keyboard", .start = 0x60, .end = 0x60,
 575		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
 576	{ .name = "keyboard", .start = 0x64, .end = 0x64,
 577		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
 578	{ .name = "dma page reg", .start = 0x80, .end = 0x8f,
 579		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
 580	{ .name = "pic2", .start = 0xa0, .end = 0xa1,
 581		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
 582	{ .name = "dma2", .start = 0xc0, .end = 0xdf,
 583		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
 584	{ .name = "fpu", .start = 0xf0, .end = 0xff,
 585		.flags = IORESOURCE_BUSY | IORESOURCE_IO }
 586};
 587
 588void __init reserve_standard_io_resources(void)
 589{
 590	int i;
 591
 592	/* request I/O space for devices used on all i[345]86 PCs */
 593	for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++)
 594		request_resource(&ioport_resource, &standard_io_resources[i]);
 595
 596}
 597
 598static __init void reserve_ibft_region(void)
 599{
 600	unsigned long addr, size = 0;
 601
 602	addr = find_ibft_region(&size);
 603
 604	if (size)
 605		memblock_reserve(addr, size);
 606}
 607
 608static bool __init snb_gfx_workaround_needed(void)
 609{
 610#ifdef CONFIG_PCI
 611	int i;
 612	u16 vendor, devid;
 613	static const __initconst u16 snb_ids[] = {
 614		0x0102,
 615		0x0112,
 616		0x0122,
 617		0x0106,
 618		0x0116,
 619		0x0126,
 620		0x010a,
 621	};
 622
 623	/* Assume no if something weird is going on with PCI */
 624	if (!early_pci_allowed())
 625		return false;
 626
 627	vendor = read_pci_config_16(0, 2, 0, PCI_VENDOR_ID);
 628	if (vendor != 0x8086)
 629		return false;
 630
 631	devid = read_pci_config_16(0, 2, 0, PCI_DEVICE_ID);
 632	for (i = 0; i < ARRAY_SIZE(snb_ids); i++)
 633		if (devid == snb_ids[i])
 634			return true;
 635#endif
 636
 637	return false;
 638}
 639
 640/*
 641 * Sandy Bridge graphics has trouble with certain ranges, exclude
 642 * them from allocation.
 643 */
 644static void __init trim_snb_memory(void)
 645{
 646	static const __initconst unsigned long bad_pages[] = {
 647		0x20050000,
 648		0x20110000,
 649		0x20130000,
 650		0x20138000,
 651		0x40004000,
 652	};
 653	int i;
 654
 655	if (!snb_gfx_workaround_needed())
 656		return;
 657
 658	printk(KERN_DEBUG "reserving inaccessible SNB gfx pages\n");
 659
 660	/*
 661	 * Reserve all memory below the 1 MB mark that has not
 662	 * already been reserved.
 
 
 
 
 
 
 663	 */
 664	memblock_reserve(0, 1<<20);
 665	
 666	for (i = 0; i < ARRAY_SIZE(bad_pages); i++) {
 667		if (memblock_reserve(bad_pages[i], PAGE_SIZE))
 668			printk(KERN_WARNING "failed to reserve 0x%08lx\n",
 669			       bad_pages[i]);
 670	}
 671}
 672
 673/*
 674 * Here we put platform-specific memory range workarounds, i.e.
 675 * memory known to be corrupt or otherwise in need to be reserved on
 676 * specific platforms.
 677 *
 678 * If this gets used more widely it could use a real dispatch mechanism.
 679 */
 680static void __init trim_platform_memory_ranges(void)
 681{
 682	trim_snb_memory();
 683}
 684
 685static void __init trim_bios_range(void)
 686{
 687	/*
 688	 * A special case is the first 4Kb of memory;
 689	 * This is a BIOS owned area, not kernel ram, but generally
 690	 * not listed as such in the E820 table.
 691	 *
 692	 * This typically reserves additional memory (64KiB by default)
 693	 * since some BIOSes are known to corrupt low memory.  See the
 694	 * Kconfig help text for X86_RESERVE_LOW.
 695	 */
 696	e820__range_update(0, PAGE_SIZE, E820_TYPE_RAM, E820_TYPE_RESERVED);
 697
 698	/*
 699	 * special case: Some BIOSes report the PC BIOS
 700	 * area (640Kb -> 1Mb) as RAM even though it is not.
 701	 * take them out.
 702	 */
 703	e820__range_remove(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_TYPE_RAM, 1);
 704
 705	e820__update_table(e820_table);
 706}
 707
 708/* called before trim_bios_range() to spare extra sanitize */
 709static void __init e820_add_kernel_range(void)
 710{
 711	u64 start = __pa_symbol(_text);
 712	u64 size = __pa_symbol(_end) - start;
 713
 714	/*
 715	 * Complain if .text .data and .bss are not marked as E820_TYPE_RAM and
 716	 * attempt to fix it by adding the range. We may have a confused BIOS,
 717	 * or the user may have used memmap=exactmap or memmap=xxM$yyM to
 718	 * exclude kernel range. If we really are running on top non-RAM,
 719	 * we will crash later anyways.
 720	 */
 721	if (e820__mapped_all(start, start + size, E820_TYPE_RAM))
 722		return;
 723
 724	pr_warn(".text .data .bss are not marked as E820_TYPE_RAM!\n");
 725	e820__range_remove(start, size, E820_TYPE_RAM, 0);
 726	e820__range_add(start, size, E820_TYPE_RAM);
 727}
 728
 729static unsigned reserve_low = CONFIG_X86_RESERVE_LOW << 10;
 730
 731static int __init parse_reservelow(char *p)
 732{
 733	unsigned long long size;
 
 
 
 
 
 
 
 734
 735	if (!p)
 736		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 737
 738	size = memparse(p, &p);
 739
 740	if (size < 4096)
 741		size = 4096;
 742
 743	if (size > 640*1024)
 744		size = 640*1024;
 745
 746	reserve_low = size;
 747
 748	return 0;
 749}
 750
 751early_param("reservelow", parse_reservelow);
 752
 753static void __init trim_low_memory_range(void)
 754{
 755	memblock_reserve(0, ALIGN(reserve_low, PAGE_SIZE));
 756}
 757	
 758/*
 759 * Dump out kernel offset information on panic.
 760 */
 761static int
 762dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p)
 763{
 764	if (kaslr_enabled()) {
 765		pr_emerg("Kernel Offset: 0x%lx from 0x%lx (relocation range: 0x%lx-0x%lx)\n",
 766			 kaslr_offset(),
 767			 __START_KERNEL,
 768			 __START_KERNEL_map,
 769			 MODULES_VADDR-1);
 770	} else {
 771		pr_emerg("Kernel Offset: disabled\n");
 772	}
 773
 774	return 0;
 775}
 776
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 777/*
 778 * Determine if we were loaded by an EFI loader.  If so, then we have also been
 779 * passed the efi memmap, systab, etc., so we should use these data structures
 780 * for initialization.  Note, the efi init code path is determined by the
 781 * global efi_enabled. This allows the same kernel image to be used on existing
 782 * systems (with a traditional BIOS) as well as on EFI systems.
 783 */
 784/*
 785 * setup_arch - architecture-specific boot-time initializations
 786 *
 787 * Note: On x86_64, fixmaps are ready for use even before this is called.
 788 */
 789
 790void __init setup_arch(char **cmdline_p)
 791{
 792	/*
 793	 * Reserve the memory occupied by the kernel between _text and
 794	 * __end_of_kernel_reserve symbols. Any kernel sections after the
 795	 * __end_of_kernel_reserve symbol must be explicitly reserved with a
 796	 * separate memblock_reserve() or they will be discarded.
 797	 */
 798	memblock_reserve(__pa_symbol(_text),
 799			 (unsigned long)__end_of_kernel_reserve - (unsigned long)_text);
 800
 801	/*
 802	 * Make sure page 0 is always reserved because on systems with
 803	 * L1TF its contents can be leaked to user processes.
 804	 */
 805	memblock_reserve(0, PAGE_SIZE);
 806
 807	early_reserve_initrd();
 808
 809	/*
 810	 * At this point everything still needed from the boot loader
 811	 * or BIOS or kernel text should be early reserved or marked not
 812	 * RAM in e820. All other memory is free game.
 813	 */
 814
 815#ifdef CONFIG_X86_32
 816	memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data));
 817
 818	/*
 819	 * copy kernel address range established so far and switch
 820	 * to the proper swapper page table
 821	 */
 822	clone_pgd_range(swapper_pg_dir     + KERNEL_PGD_BOUNDARY,
 823			initial_page_table + KERNEL_PGD_BOUNDARY,
 824			KERNEL_PGD_PTRS);
 825
 826	load_cr3(swapper_pg_dir);
 827	/*
 828	 * Note: Quark X1000 CPUs advertise PGE incorrectly and require
 829	 * a cr3 based tlb flush, so the following __flush_tlb_all()
 830	 * will not flush anything because the CPU quirk which clears
 831	 * X86_FEATURE_PGE has not been invoked yet. Though due to the
 832	 * load_cr3() above the TLB has been flushed already. The
 833	 * quirk is invoked before subsequent calls to __flush_tlb_all()
 834	 * so proper operation is guaranteed.
 835	 */
 836	__flush_tlb_all();
 837#else
 838	printk(KERN_INFO "Command line: %s\n", boot_command_line);
 839	boot_cpu_data.x86_phys_bits = MAX_PHYSMEM_BITS;
 840#endif
 841
 842	/*
 843	 * If we have OLPC OFW, we might end up relocating the fixmap due to
 844	 * reserve_top(), so do this before touching the ioremap area.
 845	 */
 846	olpc_ofw_detect();
 847
 848	idt_setup_early_traps();
 849	early_cpu_init();
 850	arch_init_ideal_nops();
 851	jump_label_init();
 
 852	early_ioremap_init();
 853
 854	setup_olpc_ofw_pgd();
 855
 856	ROOT_DEV = old_decode_dev(boot_params.hdr.root_dev);
 857	screen_info = boot_params.screen_info;
 858	edid_info = boot_params.edid_info;
 859#ifdef CONFIG_X86_32
 860	apm_info.bios = boot_params.apm_bios_info;
 861	ist_info = boot_params.ist_info;
 862#endif
 863	saved_video_mode = boot_params.hdr.vid_mode;
 864	bootloader_type = boot_params.hdr.type_of_loader;
 865	if ((bootloader_type >> 4) == 0xe) {
 866		bootloader_type &= 0xf;
 867		bootloader_type |= (boot_params.hdr.ext_loader_type+0x10) << 4;
 868	}
 869	bootloader_version  = bootloader_type & 0xf;
 870	bootloader_version |= boot_params.hdr.ext_loader_ver << 4;
 871
 872#ifdef CONFIG_BLK_DEV_RAM
 873	rd_image_start = boot_params.hdr.ram_size & RAMDISK_IMAGE_START_MASK;
 874#endif
 875#ifdef CONFIG_EFI
 876	if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature,
 877		     EFI32_LOADER_SIGNATURE, 4)) {
 878		set_bit(EFI_BOOT, &efi.flags);
 879	} else if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature,
 880		     EFI64_LOADER_SIGNATURE, 4)) {
 881		set_bit(EFI_BOOT, &efi.flags);
 882		set_bit(EFI_64BIT, &efi.flags);
 883	}
 884#endif
 885
 886	x86_init.oem.arch_setup();
 887
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 888	iomem_resource.end = (1ULL << boot_cpu_data.x86_phys_bits) - 1;
 889	e820__memory_setup();
 890	parse_setup_data();
 891
 892	copy_edd();
 893
 894	if (!boot_params.hdr.root_flags)
 895		root_mountflags &= ~MS_RDONLY;
 896	init_mm.start_code = (unsigned long) _text;
 897	init_mm.end_code = (unsigned long) _etext;
 898	init_mm.end_data = (unsigned long) _edata;
 899	init_mm.brk = _brk_end;
 900
 901	code_resource.start = __pa_symbol(_text);
 902	code_resource.end = __pa_symbol(_etext)-1;
 903	rodata_resource.start = __pa_symbol(__start_rodata);
 904	rodata_resource.end = __pa_symbol(__end_rodata)-1;
 905	data_resource.start = __pa_symbol(_sdata);
 906	data_resource.end = __pa_symbol(_edata)-1;
 907	bss_resource.start = __pa_symbol(__bss_start);
 908	bss_resource.end = __pa_symbol(__bss_stop)-1;
 909
 910#ifdef CONFIG_CMDLINE_BOOL
 911#ifdef CONFIG_CMDLINE_OVERRIDE
 912	strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
 913#else
 914	if (builtin_cmdline[0]) {
 915		/* append boot loader cmdline to builtin */
 916		strlcat(builtin_cmdline, " ", COMMAND_LINE_SIZE);
 917		strlcat(builtin_cmdline, boot_command_line, COMMAND_LINE_SIZE);
 918		strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
 919	}
 920#endif
 921#endif
 922
 923	strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
 924	*cmdline_p = command_line;
 925
 926	/*
 927	 * x86_configure_nx() is called before parse_early_param() to detect
 928	 * whether hardware doesn't support NX (so that the early EHCI debug
 929	 * console setup can safely call set_fixmap()). It may then be called
 930	 * again from within noexec_setup() during parsing early parameters
 931	 * to honor the respective command line option.
 932	 */
 933	x86_configure_nx();
 934
 935	parse_early_param();
 936
 937	if (efi_enabled(EFI_BOOT))
 938		efi_memblock_x86_reserve_range();
 
 939#ifdef CONFIG_MEMORY_HOTPLUG
 940	/*
 941	 * Memory used by the kernel cannot be hot-removed because Linux
 942	 * cannot migrate the kernel pages. When memory hotplug is
 943	 * enabled, we should prevent memblock from allocating memory
 944	 * for the kernel.
 945	 *
 946	 * ACPI SRAT records all hotpluggable memory ranges. But before
 947	 * SRAT is parsed, we don't know about it.
 948	 *
 949	 * The kernel image is loaded into memory at very early time. We
 950	 * cannot prevent this anyway. So on NUMA system, we set any
 951	 * node the kernel resides in as un-hotpluggable.
 952	 *
 953	 * Since on modern servers, one node could have double-digit
 954	 * gigabytes memory, we can assume the memory around the kernel
 955	 * image is also un-hotpluggable. So before SRAT is parsed, just
 956	 * allocate memory near the kernel image to try the best to keep
 957	 * the kernel away from hotpluggable memory.
 958	 */
 959	if (movable_node_is_enabled())
 960		memblock_set_bottom_up(true);
 961#endif
 962
 963	x86_report_nx();
 964
 965	/* after early param, so could get panic from serial */
 966	memblock_x86_reserve_range_setup_data();
 967
 968	if (acpi_mps_check()) {
 969#ifdef CONFIG_X86_LOCAL_APIC
 970		disable_apic = 1;
 971#endif
 972		setup_clear_cpu_cap(X86_FEATURE_APIC);
 973	}
 974
 975	e820__reserve_setup_data();
 976	e820__finish_early_params();
 977
 978	if (efi_enabled(EFI_BOOT))
 979		efi_init();
 980
 981	dmi_setup();
 
 982
 983	/*
 984	 * VMware detection requires dmi to be available, so this
 985	 * needs to be done after dmi_setup(), for the boot CPU.
 
 
 986	 */
 987	init_hypervisor_platform();
 988
 989	tsc_early_init();
 990	x86_init.resources.probe_roms();
 991
 992	/* after parse_early_param, so could debug it */
 993	insert_resource(&iomem_resource, &code_resource);
 994	insert_resource(&iomem_resource, &rodata_resource);
 995	insert_resource(&iomem_resource, &data_resource);
 996	insert_resource(&iomem_resource, &bss_resource);
 997
 998	e820_add_kernel_range();
 999	trim_bios_range();
1000#ifdef CONFIG_X86_32
1001	if (ppro_with_ram_bug()) {
1002		e820__range_update(0x70000000ULL, 0x40000ULL, E820_TYPE_RAM,
1003				  E820_TYPE_RESERVED);
1004		e820__update_table(e820_table);
1005		printk(KERN_INFO "fixed physical RAM map:\n");
1006		e820__print_table("bad_ppro");
1007	}
1008#else
1009	early_gart_iommu_check();
1010#endif
1011
1012	/*
1013	 * partially used pages are not usable - thus
1014	 * we are rounding upwards:
1015	 */
1016	max_pfn = e820__end_of_ram_pfn();
1017
1018	/* update e820 for memory not covered by WB MTRRs */
1019	mtrr_bp_init();
1020	if (mtrr_trim_uncached_memory(max_pfn))
1021		max_pfn = e820__end_of_ram_pfn();
1022
1023	max_possible_pfn = max_pfn;
1024
1025	/*
1026	 * This call is required when the CPU does not support PAT. If
1027	 * mtrr_bp_init() invoked it already via pat_init() the call has no
1028	 * effect.
1029	 */
1030	init_cache_modes();
1031
1032	/*
1033	 * Define random base addresses for memory sections after max_pfn is
1034	 * defined and before each memory section base is used.
1035	 */
1036	kernel_randomize_memory();
1037
1038#ifdef CONFIG_X86_32
1039	/* max_low_pfn get updated here */
1040	find_low_pfn_range();
1041#else
1042	check_x2apic();
1043
1044	/* How many end-of-memory variables you have, grandma! */
1045	/* need this before calling reserve_initrd */
1046	if (max_pfn > (1UL<<(32 - PAGE_SHIFT)))
1047		max_low_pfn = e820__end_of_low_ram_pfn();
1048	else
1049		max_low_pfn = max_pfn;
1050
1051	high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1;
1052#endif
1053
1054	/*
1055	 * Find and reserve possible boot-time SMP configuration:
1056	 */
1057	find_smp_config();
1058
1059	reserve_ibft_region();
1060
1061	early_alloc_pgt_buf();
1062
1063	/*
1064	 * Need to conclude brk, before e820__memblock_setup()
1065	 *  it could use memblock_find_in_range, could overlap with
1066	 *  brk area.
1067	 */
1068	reserve_brk();
1069
1070	cleanup_highmap();
1071
1072	memblock_set_current_limit(ISA_END_ADDRESS);
1073	e820__memblock_setup();
1074
1075	reserve_bios_regions();
 
 
 
 
 
1076
1077	efi_fake_memmap();
1078	efi_find_mirror();
1079	efi_esrt_init();
 
1080
1081	/*
1082	 * The EFI specification says that boot service code won't be
1083	 * called after ExitBootServices(). This is, in fact, a lie.
1084	 */
1085	efi_reserve_boot_services();
1086
1087	/* preallocate 4k for mptable mpc */
1088	e820__memblock_alloc_reserved_mpc_new();
1089
1090#ifdef CONFIG_X86_CHECK_BIOS_CORRUPTION
1091	setup_bios_corruption_check();
1092#endif
1093
1094#ifdef CONFIG_X86_32
1095	printk(KERN_DEBUG "initial memory mapped: [mem 0x00000000-%#010lx]\n",
1096			(max_pfn_mapped<<PAGE_SHIFT) - 1);
1097#endif
1098
1099	reserve_real_mode();
1100
1101	trim_platform_memory_ranges();
1102	trim_low_memory_range();
 
 
 
 
 
 
 
 
 
 
 
 
 
1103
1104	init_mem_mapping();
1105
1106	idt_setup_early_pf();
1107
1108	/*
1109	 * Update mmu_cr4_features (and, indirectly, trampoline_cr4_features)
1110	 * with the current CR4 value.  This may not be necessary, but
1111	 * auditing all the early-boot CR4 manipulation would be needed to
1112	 * rule it out.
1113	 *
1114	 * Mask off features that don't work outside long mode (just
1115	 * PCIDE for now).
1116	 */
1117	mmu_cr4_features = __read_cr4() & ~X86_CR4_PCIDE;
1118
1119	memblock_set_current_limit(get_max_mapped());
1120
1121	/*
1122	 * NOTE: On x86-32, only from this point on, fixmaps are ready for use.
1123	 */
1124
1125#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
1126	if (init_ohci1394_dma_early)
1127		init_ohci1394_dma_on_all_controllers();
1128#endif
1129	/* Allocate bigger log buffer */
1130	setup_log_buf(1);
1131
1132	if (efi_enabled(EFI_BOOT)) {
1133		switch (boot_params.secure_boot) {
1134		case efi_secureboot_mode_disabled:
1135			pr_info("Secure boot disabled\n");
1136			break;
1137		case efi_secureboot_mode_enabled:
1138			pr_info("Secure boot enabled\n");
1139			break;
1140		default:
1141			pr_info("Secure boot could not be determined\n");
1142			break;
1143		}
1144	}
1145
1146	reserve_initrd();
1147
1148	acpi_table_upgrade();
 
 
1149
1150	vsmp_init();
1151
1152	io_delay_init();
1153
1154	early_platform_quirks();
1155
1156	/*
1157	 * Parse the ACPI tables for possible boot-time SMP configuration.
1158	 */
1159	acpi_boot_table_init();
1160
1161	early_acpi_boot_init();
1162
1163	initmem_init();
1164	dma_contiguous_reserve(max_pfn_mapped << PAGE_SHIFT);
1165
1166	if (boot_cpu_has(X86_FEATURE_GBPAGES))
1167		hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT);
1168
1169	/*
1170	 * Reserve memory for crash kernel after SRAT is parsed so that it
1171	 * won't consume hotpluggable memory.
1172	 */
1173	reserve_crashkernel();
1174
1175	memblock_find_dma_reserve();
1176
1177	if (!early_xdbc_setup_hardware())
1178		early_xdbc_register_console();
1179
1180	x86_init.paging.pagetable_init();
1181
1182	kasan_init();
1183
1184	/*
1185	 * Sync back kernel address range.
1186	 *
1187	 * FIXME: Can the later sync in setup_cpu_entry_areas() replace
1188	 * this call?
1189	 */
1190	sync_initial_page_table();
1191
1192	tboot_probe();
1193
1194	map_vsyscall();
1195
1196	generic_apic_probe();
1197
1198	early_quirks();
1199
 
 
1200	/*
1201	 * Read APIC and some other early information from ACPI tables.
 
1202	 */
1203	acpi_boot_init();
1204	sfi_init();
1205	x86_dtb_init();
1206
1207	/*
1208	 * get boot-time SMP configuration:
1209	 */
1210	get_smp_config();
1211
1212	/*
1213	 * Systems w/o ACPI and mptables might not have it mapped the local
1214	 * APIC yet, but prefill_possible_map() might need to access it.
1215	 */
1216	init_apic_mappings();
1217
1218	prefill_possible_map();
1219
1220	init_cpu_to_node();
 
1221
1222	io_apic_init_mappings();
1223
1224	x86_init.hyper.guest_late_init();
1225
1226	e820__reserve_resources();
1227	e820__register_nosave_regions(max_pfn);
1228
1229	x86_init.resources.reserve_resources();
1230
1231	e820__setup_pci_gap();
1232
1233#ifdef CONFIG_VT
1234#if defined(CONFIG_VGA_CONSOLE)
1235	if (!efi_enabled(EFI_BOOT) || (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY))
1236		conswitchp = &vga_con;
1237#endif
1238#endif
1239	x86_init.oem.banner();
1240
1241	x86_init.timers.wallclock_init();
 
 
 
 
 
 
 
 
1242
1243	mcheck_init();
1244
1245	register_refined_jiffies(CLOCK_TICK_RATE);
1246
1247#ifdef CONFIG_EFI
1248	if (efi_enabled(EFI_BOOT))
1249		efi_apply_memmap_quirks();
1250#endif
1251
1252	unwind_init();
1253}
1254
1255#ifdef CONFIG_X86_32
1256
1257static struct resource video_ram_resource = {
1258	.name	= "Video RAM area",
1259	.start	= 0xa0000,
1260	.end	= 0xbffff,
1261	.flags	= IORESOURCE_BUSY | IORESOURCE_MEM
1262};
1263
1264void __init i386_reserve_resources(void)
1265{
1266	request_resource(&iomem_resource, &video_ram_resource);
1267	reserve_standard_io_resources();
1268}
1269
1270#endif /* CONFIG_X86_32 */
1271
1272static struct notifier_block kernel_offset_notifier = {
1273	.notifier_call = dump_kernel_offset
1274};
1275
1276static int __init register_kernel_offset_dumper(void)
1277{
1278	atomic_notifier_chain_register(&panic_notifier_list,
1279					&kernel_offset_notifier);
1280	return 0;
1281}
1282__initcall(register_kernel_offset_dumper);