Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *	Intel Multiprocessor Specification 1.1 and 1.4
  4 *	compliant MP-table parsing routines.
  5 *
  6 *	(c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk>
  7 *	(c) 1998, 1999, 2000, 2009 Ingo Molnar <mingo@redhat.com>
  8 *      (c) 2008 Alexey Starikovskiy <astarikovskiy@suse.de>
  9 */
 10
 11#include <linux/mm.h>
 12#include <linux/init.h>
 13#include <linux/delay.h>
 
 14#include <linux/memblock.h>
 15#include <linux/kernel_stat.h>
 16#include <linux/mc146818rtc.h>
 17#include <linux/bitops.h>
 18#include <linux/acpi.h>
 19#include <linux/smp.h>
 20#include <linux/pci.h>
 21
 22#include <asm/irqdomain.h>
 23#include <asm/mtrr.h>
 24#include <asm/mpspec.h>
 25#include <asm/pgalloc.h>
 26#include <asm/io_apic.h>
 27#include <asm/proto.h>
 28#include <asm/bios_ebda.h>
 29#include <asm/e820/api.h>
 30#include <asm/setup.h>
 31#include <asm/smp.h>
 32
 33#include <asm/apic.h>
 34/*
 35 * Checksum an MP configuration block.
 36 */
 37
 38static int __init mpf_checksum(unsigned char *mp, int len)
 39{
 40	int sum = 0;
 41
 42	while (len--)
 43		sum += *mp++;
 44
 45	return sum & 0xFF;
 46}
 47
 48int __init default_mpc_apic_id(struct mpc_cpu *m)
 49{
 50	return m->apicid;
 51}
 52
 53static void __init MP_processor_info(struct mpc_cpu *m)
 54{
 55	int apicid;
 56	char *bootup_cpu = "";
 57
 58	if (!(m->cpuflag & CPU_ENABLED)) {
 59		disabled_cpus++;
 60		return;
 61	}
 62
 63	apicid = x86_init.mpparse.mpc_apic_id(m);
 64
 65	if (m->cpuflag & CPU_BOOTPROCESSOR) {
 66		bootup_cpu = " (Bootup-CPU)";
 67		boot_cpu_physical_apicid = m->apicid;
 68	}
 69
 70	pr_info("Processor #%d%s\n", m->apicid, bootup_cpu);
 71	generic_processor_info(apicid, m->apicver);
 72}
 73
 74#ifdef CONFIG_X86_IO_APIC
 75void __init default_mpc_oem_bus_info(struct mpc_bus *m, char *str)
 76{
 77	memcpy(str, m->bustype, 6);
 78	str[6] = 0;
 79	apic_printk(APIC_VERBOSE, "Bus #%d is %s\n", m->busid, str);
 80}
 81
 82static void __init MP_bus_info(struct mpc_bus *m)
 83{
 84	char str[7];
 85
 86	x86_init.mpparse.mpc_oem_bus_info(m, str);
 87
 88#if MAX_MP_BUSSES < 256
 89	if (m->busid >= MAX_MP_BUSSES) {
 90		pr_warn("MP table busid value (%d) for bustype %s is too large, max. supported is %d\n",
 91			m->busid, str, MAX_MP_BUSSES - 1);
 92		return;
 93	}
 94#endif
 95
 96	set_bit(m->busid, mp_bus_not_pci);
 97	if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA) - 1) == 0) {
 98#ifdef CONFIG_EISA
 99		mp_bus_id_to_type[m->busid] = MP_BUS_ISA;
100#endif
101	} else if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI) - 1) == 0) {
102		if (x86_init.mpparse.mpc_oem_pci_bus)
103			x86_init.mpparse.mpc_oem_pci_bus(m);
104
105		clear_bit(m->busid, mp_bus_not_pci);
106#ifdef CONFIG_EISA
107		mp_bus_id_to_type[m->busid] = MP_BUS_PCI;
108	} else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA) - 1) == 0) {
109		mp_bus_id_to_type[m->busid] = MP_BUS_EISA;
110#endif
111	} else
112		pr_warn("Unknown bustype %s - ignoring\n", str);
113}
114
115static void __init MP_ioapic_info(struct mpc_ioapic *m)
116{
117	struct ioapic_domain_cfg cfg = {
118		.type = IOAPIC_DOMAIN_LEGACY,
119		.ops = &mp_ioapic_irqdomain_ops,
120	};
121
122	if (m->flags & MPC_APIC_USABLE)
123		mp_register_ioapic(m->apicid, m->apicaddr, gsi_top, &cfg);
124}
125
126static void __init print_mp_irq_info(struct mpc_intsrc *mp_irq)
127{
128	apic_printk(APIC_VERBOSE,
129		"Int: type %d, pol %d, trig %d, bus %02x, IRQ %02x, APIC ID %x, APIC INT %02x\n",
130		mp_irq->irqtype, mp_irq->irqflag & 3,
131		(mp_irq->irqflag >> 2) & 3, mp_irq->srcbus,
132		mp_irq->srcbusirq, mp_irq->dstapic, mp_irq->dstirq);
133}
134
135#else /* CONFIG_X86_IO_APIC */
136static inline void __init MP_bus_info(struct mpc_bus *m) {}
137static inline void __init MP_ioapic_info(struct mpc_ioapic *m) {}
138#endif /* CONFIG_X86_IO_APIC */
139
140static void __init MP_lintsrc_info(struct mpc_lintsrc *m)
141{
142	apic_printk(APIC_VERBOSE,
143		"Lint: type %d, pol %d, trig %d, bus %02x, IRQ %02x, APIC ID %x, APIC LINT %02x\n",
144		m->irqtype, m->irqflag & 3, (m->irqflag >> 2) & 3, m->srcbusid,
145		m->srcbusirq, m->destapic, m->destapiclint);
146}
147
148/*
149 * Read/parse the MPC
150 */
151static int __init smp_check_mpc(struct mpc_table *mpc, char *oem, char *str)
152{
153
154	if (memcmp(mpc->signature, MPC_SIGNATURE, 4)) {
155		pr_err("MPTABLE: bad signature [%c%c%c%c]!\n",
156		       mpc->signature[0], mpc->signature[1],
157		       mpc->signature[2], mpc->signature[3]);
158		return 0;
159	}
160	if (mpf_checksum((unsigned char *)mpc, mpc->length)) {
161		pr_err("MPTABLE: checksum error!\n");
162		return 0;
163	}
164	if (mpc->spec != 0x01 && mpc->spec != 0x04) {
165		pr_err("MPTABLE: bad table version (%d)!!\n", mpc->spec);
166		return 0;
167	}
168	if (!mpc->lapic) {
169		pr_err("MPTABLE: null local APIC address!\n");
170		return 0;
171	}
172	memcpy(oem, mpc->oem, 8);
173	oem[8] = 0;
174	pr_info("MPTABLE: OEM ID: %s\n", oem);
175
176	memcpy(str, mpc->productid, 12);
177	str[12] = 0;
178
179	pr_info("MPTABLE: Product ID: %s\n", str);
180
181	pr_info("MPTABLE: APIC at: 0x%X\n", mpc->lapic);
182
183	return 1;
184}
185
186static void skip_entry(unsigned char **ptr, int *count, int size)
187{
188	*ptr += size;
189	*count += size;
190}
191
192static void __init smp_dump_mptable(struct mpc_table *mpc, unsigned char *mpt)
193{
194	pr_err("Your mptable is wrong, contact your HW vendor!\n");
195	pr_cont("type %x\n", *mpt);
196	print_hex_dump(KERN_ERR, "  ", DUMP_PREFIX_ADDRESS, 16,
197			1, mpc, mpc->length, 1);
198}
199
200void __init default_smp_read_mpc_oem(struct mpc_table *mpc) { }
201
202static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early)
203{
204	char str[16];
205	char oem[10];
206
207	int count = sizeof(*mpc);
208	unsigned char *mpt = ((unsigned char *)mpc) + count;
209
210	if (!smp_check_mpc(mpc, oem, str))
211		return 0;
212
213	/* Initialize the lapic mapping */
214	if (!acpi_lapic)
215		register_lapic_address(mpc->lapic);
216
217	if (early)
218		return 1;
219
220	if (mpc->oemptr)
221		x86_init.mpparse.smp_read_mpc_oem(mpc);
222
223	/*
224	 *      Now process the configuration blocks.
225	 */
226	x86_init.mpparse.mpc_record(0);
227
228	while (count < mpc->length) {
229		switch (*mpt) {
230		case MP_PROCESSOR:
231			/* ACPI may have already provided this data */
232			if (!acpi_lapic)
233				MP_processor_info((struct mpc_cpu *)mpt);
234			skip_entry(&mpt, &count, sizeof(struct mpc_cpu));
235			break;
236		case MP_BUS:
237			MP_bus_info((struct mpc_bus *)mpt);
238			skip_entry(&mpt, &count, sizeof(struct mpc_bus));
239			break;
240		case MP_IOAPIC:
241			MP_ioapic_info((struct mpc_ioapic *)mpt);
242			skip_entry(&mpt, &count, sizeof(struct mpc_ioapic));
243			break;
244		case MP_INTSRC:
245			mp_save_irq((struct mpc_intsrc *)mpt);
246			skip_entry(&mpt, &count, sizeof(struct mpc_intsrc));
247			break;
248		case MP_LINTSRC:
249			MP_lintsrc_info((struct mpc_lintsrc *)mpt);
250			skip_entry(&mpt, &count, sizeof(struct mpc_lintsrc));
251			break;
252		default:
253			/* wrong mptable */
254			smp_dump_mptable(mpc, mpt);
255			count = mpc->length;
256			break;
257		}
258		x86_init.mpparse.mpc_record(1);
259	}
260
261	if (!num_processors)
262		pr_err("MPTABLE: no processors registered!\n");
263	return num_processors;
264}
265
266#ifdef CONFIG_X86_IO_APIC
267
268static int __init ELCR_trigger(unsigned int irq)
269{
270	unsigned int port;
271
272	port = 0x4d0 + (irq >> 3);
273	return (inb(port) >> (irq & 7)) & 1;
274}
275
276static void __init construct_default_ioirq_mptable(int mpc_default_type)
277{
278	struct mpc_intsrc intsrc;
279	int i;
280	int ELCR_fallback = 0;
281
282	intsrc.type = MP_INTSRC;
283	intsrc.irqflag = MP_IRQTRIG_DEFAULT | MP_IRQPOL_DEFAULT;
284	intsrc.srcbus = 0;
285	intsrc.dstapic = mpc_ioapic_id(0);
286
287	intsrc.irqtype = mp_INT;
288
289	/*
290	 *  If true, we have an ISA/PCI system with no IRQ entries
291	 *  in the MP table. To prevent the PCI interrupts from being set up
292	 *  incorrectly, we try to use the ELCR. The sanity check to see if
293	 *  there is good ELCR data is very simple - IRQ0, 1, 2 and 13 can
294	 *  never be level sensitive, so we simply see if the ELCR agrees.
295	 *  If it does, we assume it's valid.
296	 */
297	if (mpc_default_type == 5) {
298		pr_info("ISA/PCI bus type with no IRQ information... falling back to ELCR\n");
299
300		if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) ||
301		    ELCR_trigger(13))
302			pr_err("ELCR contains invalid data... not using ELCR\n");
303		else {
304			pr_info("Using ELCR to identify PCI interrupts\n");
305			ELCR_fallback = 1;
306		}
307	}
308
309	for (i = 0; i < 16; i++) {
310		switch (mpc_default_type) {
311		case 2:
312			if (i == 0 || i == 13)
313				continue;	/* IRQ0 & IRQ13 not connected */
314			/* fall through */
315		default:
316			if (i == 2)
317				continue;	/* IRQ2 is never connected */
318		}
319
320		if (ELCR_fallback) {
321			/*
322			 *  If the ELCR indicates a level-sensitive interrupt, we
323			 *  copy that information over to the MP table in the
324			 *  irqflag field (level sensitive, active high polarity).
325			 */
326			if (ELCR_trigger(i)) {
327				intsrc.irqflag = MP_IRQTRIG_LEVEL |
328						 MP_IRQPOL_ACTIVE_HIGH;
329			} else {
330				intsrc.irqflag = MP_IRQTRIG_DEFAULT |
331						 MP_IRQPOL_DEFAULT;
332			}
333		}
334
335		intsrc.srcbusirq = i;
336		intsrc.dstirq = i ? i : 2;	/* IRQ0 to INTIN2 */
337		mp_save_irq(&intsrc);
338	}
339
340	intsrc.irqtype = mp_ExtINT;
341	intsrc.srcbusirq = 0;
342	intsrc.dstirq = 0;	/* 8259A to INTIN0 */
343	mp_save_irq(&intsrc);
344}
345
346
347static void __init construct_ioapic_table(int mpc_default_type)
348{
349	struct mpc_ioapic ioapic;
350	struct mpc_bus bus;
351
352	bus.type = MP_BUS;
353	bus.busid = 0;
354	switch (mpc_default_type) {
355	default:
356		pr_err("???\nUnknown standard configuration %d\n",
357		       mpc_default_type);
358		/* fall through */
359	case 1:
360	case 5:
361		memcpy(bus.bustype, "ISA   ", 6);
362		break;
363	case 2:
364	case 6:
365	case 3:
366		memcpy(bus.bustype, "EISA  ", 6);
367		break;
368	}
369	MP_bus_info(&bus);
370	if (mpc_default_type > 4) {
371		bus.busid = 1;
372		memcpy(bus.bustype, "PCI   ", 6);
373		MP_bus_info(&bus);
374	}
375
376	ioapic.type	= MP_IOAPIC;
377	ioapic.apicid	= 2;
378	ioapic.apicver	= mpc_default_type > 4 ? 0x10 : 0x01;
379	ioapic.flags	= MPC_APIC_USABLE;
380	ioapic.apicaddr	= IO_APIC_DEFAULT_PHYS_BASE;
381	MP_ioapic_info(&ioapic);
382
383	/*
384	 * We set up most of the low 16 IO-APIC pins according to MPS rules.
385	 */
386	construct_default_ioirq_mptable(mpc_default_type);
387}
388#else
389static inline void __init construct_ioapic_table(int mpc_default_type) { }
390#endif
391
392static inline void __init construct_default_ISA_mptable(int mpc_default_type)
393{
394	struct mpc_cpu processor;
395	struct mpc_lintsrc lintsrc;
396	int linttypes[2] = { mp_ExtINT, mp_NMI };
397	int i;
398
399	/*
400	 * local APIC has default address
401	 */
402	mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
403
404	/*
405	 * 2 CPUs, numbered 0 & 1.
406	 */
407	processor.type = MP_PROCESSOR;
408	/* Either an integrated APIC or a discrete 82489DX. */
409	processor.apicver = mpc_default_type > 4 ? 0x10 : 0x01;
410	processor.cpuflag = CPU_ENABLED;
411	processor.cpufeature = (boot_cpu_data.x86 << 8) |
412	    (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_stepping;
413	processor.featureflag = boot_cpu_data.x86_capability[CPUID_1_EDX];
414	processor.reserved[0] = 0;
415	processor.reserved[1] = 0;
416	for (i = 0; i < 2; i++) {
417		processor.apicid = i;
418		MP_processor_info(&processor);
419	}
420
421	construct_ioapic_table(mpc_default_type);
422
423	lintsrc.type = MP_LINTSRC;
424	lintsrc.irqflag = MP_IRQTRIG_DEFAULT | MP_IRQPOL_DEFAULT;
425	lintsrc.srcbusid = 0;
426	lintsrc.srcbusirq = 0;
427	lintsrc.destapic = MP_APIC_ALL;
428	for (i = 0; i < 2; i++) {
429		lintsrc.irqtype = linttypes[i];
430		lintsrc.destapiclint = i;
431		MP_lintsrc_info(&lintsrc);
432	}
433}
434
435static unsigned long mpf_base;
436static bool mpf_found;
437
438static unsigned long __init get_mpc_size(unsigned long physptr)
439{
440	struct mpc_table *mpc;
441	unsigned long size;
442
443	mpc = early_memremap(physptr, PAGE_SIZE);
444	size = mpc->length;
445	early_memunmap(mpc, PAGE_SIZE);
446	apic_printk(APIC_VERBOSE, "  mpc: %lx-%lx\n", physptr, physptr + size);
447
448	return size;
449}
450
451static int __init check_physptr(struct mpf_intel *mpf, unsigned int early)
452{
453	struct mpc_table *mpc;
454	unsigned long size;
455
456	size = get_mpc_size(mpf->physptr);
457	mpc = early_memremap(mpf->physptr, size);
458
459	/*
460	 * Read the physical hardware table.  Anything here will
461	 * override the defaults.
462	 */
463	if (!smp_read_mpc(mpc, early)) {
464#ifdef CONFIG_X86_LOCAL_APIC
465		smp_found_config = 0;
466#endif
467		pr_err("BIOS bug, MP table errors detected!...\n");
468		pr_cont("... disabling SMP support. (tell your hw vendor)\n");
469		early_memunmap(mpc, size);
470		return -1;
471	}
472	early_memunmap(mpc, size);
473
474	if (early)
475		return -1;
476
477#ifdef CONFIG_X86_IO_APIC
478	/*
479	 * If there are no explicit MP IRQ entries, then we are
480	 * broken.  We set up most of the low 16 IO-APIC pins to
481	 * ISA defaults and hope it will work.
482	 */
483	if (!mp_irq_entries) {
484		struct mpc_bus bus;
485
486		pr_err("BIOS bug, no explicit IRQ entries, using default mptable. (tell your hw vendor)\n");
487
488		bus.type = MP_BUS;
489		bus.busid = 0;
490		memcpy(bus.bustype, "ISA   ", 6);
491		MP_bus_info(&bus);
492
493		construct_default_ioirq_mptable(0);
494	}
495#endif
496
497	return 0;
498}
499
500/*
501 * Scan the memory blocks for an SMP configuration block.
502 */
503void __init default_get_smp_config(unsigned int early)
504{
505	struct mpf_intel *mpf;
506
507	if (!smp_found_config)
508		return;
509
510	if (!mpf_found)
511		return;
512
513	if (acpi_lapic && early)
514		return;
515
516	/*
517	 * MPS doesn't support hyperthreading, aka only have
518	 * thread 0 apic id in MPS table
519	 */
520	if (acpi_lapic && acpi_ioapic)
521		return;
522
523	mpf = early_memremap(mpf_base, sizeof(*mpf));
524	if (!mpf) {
525		pr_err("MPTABLE: error mapping MP table\n");
526		return;
527	}
528
529	pr_info("Intel MultiProcessor Specification v1.%d\n",
530		mpf->specification);
531#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32)
532	if (mpf->feature2 & (1 << 7)) {
533		pr_info("    IMCR and PIC compatibility mode.\n");
534		pic_mode = 1;
535	} else {
536		pr_info("    Virtual Wire compatibility mode.\n");
537		pic_mode = 0;
538	}
539#endif
540	/*
541	 * Now see if we need to read further.
542	 */
543	if (mpf->feature1) {
544		if (early) {
545			/*
546			 * local APIC has default address
547			 */
548			mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
549			goto out;
550		}
551
552		pr_info("Default MP configuration #%d\n", mpf->feature1);
553		construct_default_ISA_mptable(mpf->feature1);
554
555	} else if (mpf->physptr) {
556		if (check_physptr(mpf, early))
557			goto out;
 
 
558	} else
559		BUG();
560
561	if (!early)
562		pr_info("Processors: %d\n", num_processors);
563	/*
564	 * Only use the first configuration found.
565	 */
566out:
567	early_memunmap(mpf, sizeof(*mpf));
568}
569
570static void __init smp_reserve_memory(struct mpf_intel *mpf)
571{
572	memblock_reserve(mpf->physptr, get_mpc_size(mpf->physptr));
573}
574
575static int __init smp_scan_config(unsigned long base, unsigned long length)
576{
577	unsigned int *bp;
578	struct mpf_intel *mpf;
579	int ret = 0;
580
581	apic_printk(APIC_VERBOSE, "Scan for SMP in [mem %#010lx-%#010lx]\n",
582		    base, base + length - 1);
583	BUILD_BUG_ON(sizeof(*mpf) != 16);
584
585	while (length > 0) {
586		bp = early_memremap(base, length);
587		mpf = (struct mpf_intel *)bp;
588		if ((*bp == SMP_MAGIC_IDENT) &&
589		    (mpf->length == 1) &&
590		    !mpf_checksum((unsigned char *)bp, 16) &&
591		    ((mpf->specification == 1)
592		     || (mpf->specification == 4))) {
593#ifdef CONFIG_X86_LOCAL_APIC
594			smp_found_config = 1;
595#endif
596			mpf_base = base;
597			mpf_found = true;
598
599			pr_info("found SMP MP-table at [mem %#010lx-%#010lx]\n",
600				base, base + sizeof(*mpf) - 1);
601
602			memblock_reserve(base, sizeof(*mpf));
603			if (mpf->physptr)
604				smp_reserve_memory(mpf);
605
606			ret = 1;
607		}
608		early_memunmap(bp, length);
609
610		if (ret)
611			break;
612
613		base += 16;
614		length -= 16;
615	}
616	return ret;
617}
618
619void __init default_find_smp_config(void)
620{
621	unsigned int address;
622
623	/*
624	 * FIXME: Linux assumes you have 640K of base ram..
625	 * this continues the error...
626	 *
627	 * 1) Scan the bottom 1K for a signature
628	 * 2) Scan the top 1K of base RAM
629	 * 3) Scan the 64K of bios
630	 */
631	if (smp_scan_config(0x0, 0x400) ||
632	    smp_scan_config(639 * 0x400, 0x400) ||
633	    smp_scan_config(0xF0000, 0x10000))
634		return;
635	/*
636	 * If it is an SMP machine we should know now, unless the
637	 * configuration is in an EISA bus machine with an
638	 * extended bios data area.
639	 *
640	 * there is a real-mode segmented pointer pointing to the
641	 * 4K EBDA area at 0x40E, calculate and scan it here.
642	 *
643	 * NOTE! There are Linux loaders that will corrupt the EBDA
644	 * area, and as such this kind of SMP config may be less
645	 * trustworthy, simply because the SMP table may have been
646	 * stomped on during early boot. These loaders are buggy and
647	 * should be fixed.
648	 *
649	 * MP1.4 SPEC states to only scan first 1K of 4K EBDA.
650	 */
651
652	address = get_bios_ebda();
653	if (address)
654		smp_scan_config(address, 0x400);
655}
656
657#ifdef CONFIG_X86_IO_APIC
658static u8 __initdata irq_used[MAX_IRQ_SOURCES];
659
660static int  __init get_MP_intsrc_index(struct mpc_intsrc *m)
661{
662	int i;
663
664	if (m->irqtype != mp_INT)
665		return 0;
666
667	if (m->irqflag != (MP_IRQTRIG_LEVEL | MP_IRQPOL_ACTIVE_LOW))
668		return 0;
669
670	/* not legacy */
671
672	for (i = 0; i < mp_irq_entries; i++) {
673		if (mp_irqs[i].irqtype != mp_INT)
674			continue;
675
676		if (mp_irqs[i].irqflag != (MP_IRQTRIG_LEVEL |
677					   MP_IRQPOL_ACTIVE_LOW))
678			continue;
679
680		if (mp_irqs[i].srcbus != m->srcbus)
681			continue;
682		if (mp_irqs[i].srcbusirq != m->srcbusirq)
683			continue;
684		if (irq_used[i]) {
685			/* already claimed */
686			return -2;
687		}
688		irq_used[i] = 1;
689		return i;
690	}
691
692	/* not found */
693	return -1;
694}
695
696#define SPARE_SLOT_NUM 20
697
698static struct mpc_intsrc __initdata *m_spare[SPARE_SLOT_NUM];
699
700static void __init check_irq_src(struct mpc_intsrc *m, int *nr_m_spare)
701{
702	int i;
703
704	apic_printk(APIC_VERBOSE, "OLD ");
705	print_mp_irq_info(m);
706
707	i = get_MP_intsrc_index(m);
708	if (i > 0) {
709		memcpy(m, &mp_irqs[i], sizeof(*m));
710		apic_printk(APIC_VERBOSE, "NEW ");
711		print_mp_irq_info(&mp_irqs[i]);
712		return;
713	}
714	if (!i) {
715		/* legacy, do nothing */
716		return;
717	}
718	if (*nr_m_spare < SPARE_SLOT_NUM) {
719		/*
720		 * not found (-1), or duplicated (-2) are invalid entries,
721		 * we need to use the slot later
722		 */
723		m_spare[*nr_m_spare] = m;
724		*nr_m_spare += 1;
725	}
726}
727
728static int __init
729check_slot(unsigned long mpc_new_phys, unsigned long mpc_new_length, int count)
730{
731	if (!mpc_new_phys || count <= mpc_new_length) {
732		WARN(1, "update_mptable: No spare slots (length: %x)\n", count);
733		return -1;
734	}
735
736	return 0;
737}
738#else /* CONFIG_X86_IO_APIC */
739static
740inline void __init check_irq_src(struct mpc_intsrc *m, int *nr_m_spare) {}
741#endif /* CONFIG_X86_IO_APIC */
742
743static int  __init replace_intsrc_all(struct mpc_table *mpc,
744					unsigned long mpc_new_phys,
745					unsigned long mpc_new_length)
746{
747#ifdef CONFIG_X86_IO_APIC
748	int i;
749#endif
750	int count = sizeof(*mpc);
751	int nr_m_spare = 0;
752	unsigned char *mpt = ((unsigned char *)mpc) + count;
753
754	pr_info("mpc_length %x\n", mpc->length);
755	while (count < mpc->length) {
756		switch (*mpt) {
757		case MP_PROCESSOR:
758			skip_entry(&mpt, &count, sizeof(struct mpc_cpu));
759			break;
760		case MP_BUS:
761			skip_entry(&mpt, &count, sizeof(struct mpc_bus));
762			break;
763		case MP_IOAPIC:
764			skip_entry(&mpt, &count, sizeof(struct mpc_ioapic));
765			break;
766		case MP_INTSRC:
767			check_irq_src((struct mpc_intsrc *)mpt, &nr_m_spare);
768			skip_entry(&mpt, &count, sizeof(struct mpc_intsrc));
769			break;
770		case MP_LINTSRC:
771			skip_entry(&mpt, &count, sizeof(struct mpc_lintsrc));
772			break;
773		default:
774			/* wrong mptable */
775			smp_dump_mptable(mpc, mpt);
776			goto out;
777		}
778	}
779
780#ifdef CONFIG_X86_IO_APIC
781	for (i = 0; i < mp_irq_entries; i++) {
782		if (irq_used[i])
783			continue;
784
785		if (mp_irqs[i].irqtype != mp_INT)
786			continue;
787
788		if (mp_irqs[i].irqflag != (MP_IRQTRIG_LEVEL |
789					   MP_IRQPOL_ACTIVE_LOW))
790			continue;
791
792		if (nr_m_spare > 0) {
793			apic_printk(APIC_VERBOSE, "*NEW* found\n");
794			nr_m_spare--;
795			memcpy(m_spare[nr_m_spare], &mp_irqs[i], sizeof(mp_irqs[i]));
796			m_spare[nr_m_spare] = NULL;
797		} else {
798			struct mpc_intsrc *m = (struct mpc_intsrc *)mpt;
799			count += sizeof(struct mpc_intsrc);
800			if (check_slot(mpc_new_phys, mpc_new_length, count) < 0)
801				goto out;
802			memcpy(m, &mp_irqs[i], sizeof(*m));
803			mpc->length = count;
804			mpt += sizeof(struct mpc_intsrc);
805		}
806		print_mp_irq_info(&mp_irqs[i]);
807	}
808#endif
809out:
810	/* update checksum */
811	mpc->checksum = 0;
812	mpc->checksum -= mpf_checksum((unsigned char *)mpc, mpc->length);
813
814	return 0;
815}
816
817int enable_update_mptable;
818
819static int __init update_mptable_setup(char *str)
820{
821	enable_update_mptable = 1;
822#ifdef CONFIG_PCI
823	pci_routeirq = 1;
824#endif
825	return 0;
826}
827early_param("update_mptable", update_mptable_setup);
828
829static unsigned long __initdata mpc_new_phys;
830static unsigned long mpc_new_length __initdata = 4096;
831
832/* alloc_mptable or alloc_mptable=4k */
833static int __initdata alloc_mptable;
834static int __init parse_alloc_mptable_opt(char *p)
835{
836	enable_update_mptable = 1;
837#ifdef CONFIG_PCI
838	pci_routeirq = 1;
839#endif
840	alloc_mptable = 1;
841	if (!p)
842		return 0;
843	mpc_new_length = memparse(p, &p);
844	return 0;
845}
846early_param("alloc_mptable", parse_alloc_mptable_opt);
847
848void __init e820__memblock_alloc_reserved_mpc_new(void)
849{
850	if (enable_update_mptable && alloc_mptable)
851		mpc_new_phys = e820__memblock_alloc_reserved(mpc_new_length, 4);
852}
853
854static int __init update_mp_table(void)
855{
856	char str[16];
857	char oem[10];
858	struct mpf_intel *mpf;
859	struct mpc_table *mpc, *mpc_new;
860	unsigned long size;
861
862	if (!enable_update_mptable)
863		return 0;
864
865	if (!mpf_found)
866		return 0;
867
868	mpf = early_memremap(mpf_base, sizeof(*mpf));
869	if (!mpf) {
870		pr_err("MPTABLE: mpf early_memremap() failed\n");
871		return 0;
872	}
873
874	/*
875	 * Now see if we need to go further.
876	 */
877	if (mpf->feature1)
878		goto do_unmap_mpf;
879
880	if (!mpf->physptr)
881		goto do_unmap_mpf;
882
883	size = get_mpc_size(mpf->physptr);
884	mpc = early_memremap(mpf->physptr, size);
885	if (!mpc) {
886		pr_err("MPTABLE: mpc early_memremap() failed\n");
887		goto do_unmap_mpf;
888	}
889
890	if (!smp_check_mpc(mpc, oem, str))
891		goto do_unmap_mpc;
892
893	pr_info("mpf: %llx\n", (u64)mpf_base);
894	pr_info("physptr: %x\n", mpf->physptr);
895
896	if (mpc_new_phys && mpc->length > mpc_new_length) {
897		mpc_new_phys = 0;
898		pr_info("mpc_new_length is %ld, please use alloc_mptable=8k\n",
899			mpc_new_length);
900	}
901
902	if (!mpc_new_phys) {
903		unsigned char old, new;
904		/* check if we can change the position */
905		mpc->checksum = 0;
906		old = mpf_checksum((unsigned char *)mpc, mpc->length);
907		mpc->checksum = 0xff;
908		new = mpf_checksum((unsigned char *)mpc, mpc->length);
909		if (old == new) {
910			pr_info("mpc is readonly, please try alloc_mptable instead\n");
911			goto do_unmap_mpc;
912		}
913		pr_info("use in-position replacing\n");
914	} else {
915		mpc_new = early_memremap(mpc_new_phys, mpc_new_length);
916		if (!mpc_new) {
917			pr_err("MPTABLE: new mpc early_memremap() failed\n");
918			goto do_unmap_mpc;
919		}
920		mpf->physptr = mpc_new_phys;
921		memcpy(mpc_new, mpc, mpc->length);
922		early_memunmap(mpc, size);
923		mpc = mpc_new;
924		size = mpc_new_length;
925		/* check if we can modify that */
926		if (mpc_new_phys - mpf->physptr) {
927			struct mpf_intel *mpf_new;
928			/* steal 16 bytes from [0, 1k) */
929			mpf_new = early_memremap(0x400 - 16, sizeof(*mpf_new));
930			if (!mpf_new) {
931				pr_err("MPTABLE: new mpf early_memremap() failed\n");
932				goto do_unmap_mpc;
933			}
934			pr_info("mpf new: %x\n", 0x400 - 16);
935			memcpy(mpf_new, mpf, 16);
936			early_memunmap(mpf, sizeof(*mpf));
937			mpf = mpf_new;
938			mpf->physptr = mpc_new_phys;
939		}
940		mpf->checksum = 0;
941		mpf->checksum -= mpf_checksum((unsigned char *)mpf, 16);
942		pr_info("physptr new: %x\n", mpf->physptr);
943	}
944
945	/*
946	 * only replace the one with mp_INT and
947	 *	 MP_IRQ_TRIGGER_LEVEL|MP_IRQ_POLARITY_LOW,
948	 * already in mp_irqs , stored by ... and mp_config_acpi_gsi,
949	 * may need pci=routeirq for all coverage
950	 */
951	replace_intsrc_all(mpc, mpc_new_phys, mpc_new_length);
952
953do_unmap_mpc:
954	early_memunmap(mpc, size);
955
956do_unmap_mpf:
957	early_memunmap(mpf, sizeof(*mpf));
958
959	return 0;
960}
961
962late_initcall(update_mp_table);
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *	Intel Multiprocessor Specification 1.1 and 1.4
  4 *	compliant MP-table parsing routines.
  5 *
  6 *	(c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk>
  7 *	(c) 1998, 1999, 2000, 2009 Ingo Molnar <mingo@redhat.com>
  8 *      (c) 2008 Alexey Starikovskiy <astarikovskiy@suse.de>
  9 */
 10
 11#include <linux/mm.h>
 12#include <linux/init.h>
 13#include <linux/delay.h>
 14#include <linux/bootmem.h>
 15#include <linux/memblock.h>
 16#include <linux/kernel_stat.h>
 17#include <linux/mc146818rtc.h>
 18#include <linux/bitops.h>
 19#include <linux/acpi.h>
 20#include <linux/smp.h>
 21#include <linux/pci.h>
 22
 23#include <asm/irqdomain.h>
 24#include <asm/mtrr.h>
 25#include <asm/mpspec.h>
 26#include <asm/pgalloc.h>
 27#include <asm/io_apic.h>
 28#include <asm/proto.h>
 29#include <asm/bios_ebda.h>
 30#include <asm/e820/api.h>
 31#include <asm/setup.h>
 32#include <asm/smp.h>
 33
 34#include <asm/apic.h>
 35/*
 36 * Checksum an MP configuration block.
 37 */
 38
 39static int __init mpf_checksum(unsigned char *mp, int len)
 40{
 41	int sum = 0;
 42
 43	while (len--)
 44		sum += *mp++;
 45
 46	return sum & 0xFF;
 47}
 48
 49int __init default_mpc_apic_id(struct mpc_cpu *m)
 50{
 51	return m->apicid;
 52}
 53
 54static void __init MP_processor_info(struct mpc_cpu *m)
 55{
 56	int apicid;
 57	char *bootup_cpu = "";
 58
 59	if (!(m->cpuflag & CPU_ENABLED)) {
 60		disabled_cpus++;
 61		return;
 62	}
 63
 64	apicid = x86_init.mpparse.mpc_apic_id(m);
 65
 66	if (m->cpuflag & CPU_BOOTPROCESSOR) {
 67		bootup_cpu = " (Bootup-CPU)";
 68		boot_cpu_physical_apicid = m->apicid;
 69	}
 70
 71	pr_info("Processor #%d%s\n", m->apicid, bootup_cpu);
 72	generic_processor_info(apicid, m->apicver);
 73}
 74
 75#ifdef CONFIG_X86_IO_APIC
 76void __init default_mpc_oem_bus_info(struct mpc_bus *m, char *str)
 77{
 78	memcpy(str, m->bustype, 6);
 79	str[6] = 0;
 80	apic_printk(APIC_VERBOSE, "Bus #%d is %s\n", m->busid, str);
 81}
 82
 83static void __init MP_bus_info(struct mpc_bus *m)
 84{
 85	char str[7];
 86
 87	x86_init.mpparse.mpc_oem_bus_info(m, str);
 88
 89#if MAX_MP_BUSSES < 256
 90	if (m->busid >= MAX_MP_BUSSES) {
 91		pr_warn("MP table busid value (%d) for bustype %s is too large, max. supported is %d\n",
 92			m->busid, str, MAX_MP_BUSSES - 1);
 93		return;
 94	}
 95#endif
 96
 97	set_bit(m->busid, mp_bus_not_pci);
 98	if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA) - 1) == 0) {
 99#ifdef CONFIG_EISA
100		mp_bus_id_to_type[m->busid] = MP_BUS_ISA;
101#endif
102	} else if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI) - 1) == 0) {
103		if (x86_init.mpparse.mpc_oem_pci_bus)
104			x86_init.mpparse.mpc_oem_pci_bus(m);
105
106		clear_bit(m->busid, mp_bus_not_pci);
107#ifdef CONFIG_EISA
108		mp_bus_id_to_type[m->busid] = MP_BUS_PCI;
109	} else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA) - 1) == 0) {
110		mp_bus_id_to_type[m->busid] = MP_BUS_EISA;
111#endif
112	} else
113		pr_warn("Unknown bustype %s - ignoring\n", str);
114}
115
116static void __init MP_ioapic_info(struct mpc_ioapic *m)
117{
118	struct ioapic_domain_cfg cfg = {
119		.type = IOAPIC_DOMAIN_LEGACY,
120		.ops = &mp_ioapic_irqdomain_ops,
121	};
122
123	if (m->flags & MPC_APIC_USABLE)
124		mp_register_ioapic(m->apicid, m->apicaddr, gsi_top, &cfg);
125}
126
127static void __init print_mp_irq_info(struct mpc_intsrc *mp_irq)
128{
129	apic_printk(APIC_VERBOSE,
130		"Int: type %d, pol %d, trig %d, bus %02x, IRQ %02x, APIC ID %x, APIC INT %02x\n",
131		mp_irq->irqtype, mp_irq->irqflag & 3,
132		(mp_irq->irqflag >> 2) & 3, mp_irq->srcbus,
133		mp_irq->srcbusirq, mp_irq->dstapic, mp_irq->dstirq);
134}
135
136#else /* CONFIG_X86_IO_APIC */
137static inline void __init MP_bus_info(struct mpc_bus *m) {}
138static inline void __init MP_ioapic_info(struct mpc_ioapic *m) {}
139#endif /* CONFIG_X86_IO_APIC */
140
141static void __init MP_lintsrc_info(struct mpc_lintsrc *m)
142{
143	apic_printk(APIC_VERBOSE,
144		"Lint: type %d, pol %d, trig %d, bus %02x, IRQ %02x, APIC ID %x, APIC LINT %02x\n",
145		m->irqtype, m->irqflag & 3, (m->irqflag >> 2) & 3, m->srcbusid,
146		m->srcbusirq, m->destapic, m->destapiclint);
147}
148
149/*
150 * Read/parse the MPC
151 */
152static int __init smp_check_mpc(struct mpc_table *mpc, char *oem, char *str)
153{
154
155	if (memcmp(mpc->signature, MPC_SIGNATURE, 4)) {
156		pr_err("MPTABLE: bad signature [%c%c%c%c]!\n",
157		       mpc->signature[0], mpc->signature[1],
158		       mpc->signature[2], mpc->signature[3]);
159		return 0;
160	}
161	if (mpf_checksum((unsigned char *)mpc, mpc->length)) {
162		pr_err("MPTABLE: checksum error!\n");
163		return 0;
164	}
165	if (mpc->spec != 0x01 && mpc->spec != 0x04) {
166		pr_err("MPTABLE: bad table version (%d)!!\n", mpc->spec);
167		return 0;
168	}
169	if (!mpc->lapic) {
170		pr_err("MPTABLE: null local APIC address!\n");
171		return 0;
172	}
173	memcpy(oem, mpc->oem, 8);
174	oem[8] = 0;
175	pr_info("MPTABLE: OEM ID: %s\n", oem);
176
177	memcpy(str, mpc->productid, 12);
178	str[12] = 0;
179
180	pr_info("MPTABLE: Product ID: %s\n", str);
181
182	pr_info("MPTABLE: APIC at: 0x%X\n", mpc->lapic);
183
184	return 1;
185}
186
187static void skip_entry(unsigned char **ptr, int *count, int size)
188{
189	*ptr += size;
190	*count += size;
191}
192
193static void __init smp_dump_mptable(struct mpc_table *mpc, unsigned char *mpt)
194{
195	pr_err("Your mptable is wrong, contact your HW vendor!\n");
196	pr_cont("type %x\n", *mpt);
197	print_hex_dump(KERN_ERR, "  ", DUMP_PREFIX_ADDRESS, 16,
198			1, mpc, mpc->length, 1);
199}
200
201void __init default_smp_read_mpc_oem(struct mpc_table *mpc) { }
202
203static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early)
204{
205	char str[16];
206	char oem[10];
207
208	int count = sizeof(*mpc);
209	unsigned char *mpt = ((unsigned char *)mpc) + count;
210
211	if (!smp_check_mpc(mpc, oem, str))
212		return 0;
213
214	/* Initialize the lapic mapping */
215	if (!acpi_lapic)
216		register_lapic_address(mpc->lapic);
217
218	if (early)
219		return 1;
220
221	if (mpc->oemptr)
222		x86_init.mpparse.smp_read_mpc_oem(mpc);
223
224	/*
225	 *      Now process the configuration blocks.
226	 */
227	x86_init.mpparse.mpc_record(0);
228
229	while (count < mpc->length) {
230		switch (*mpt) {
231		case MP_PROCESSOR:
232			/* ACPI may have already provided this data */
233			if (!acpi_lapic)
234				MP_processor_info((struct mpc_cpu *)mpt);
235			skip_entry(&mpt, &count, sizeof(struct mpc_cpu));
236			break;
237		case MP_BUS:
238			MP_bus_info((struct mpc_bus *)mpt);
239			skip_entry(&mpt, &count, sizeof(struct mpc_bus));
240			break;
241		case MP_IOAPIC:
242			MP_ioapic_info((struct mpc_ioapic *)mpt);
243			skip_entry(&mpt, &count, sizeof(struct mpc_ioapic));
244			break;
245		case MP_INTSRC:
246			mp_save_irq((struct mpc_intsrc *)mpt);
247			skip_entry(&mpt, &count, sizeof(struct mpc_intsrc));
248			break;
249		case MP_LINTSRC:
250			MP_lintsrc_info((struct mpc_lintsrc *)mpt);
251			skip_entry(&mpt, &count, sizeof(struct mpc_lintsrc));
252			break;
253		default:
254			/* wrong mptable */
255			smp_dump_mptable(mpc, mpt);
256			count = mpc->length;
257			break;
258		}
259		x86_init.mpparse.mpc_record(1);
260	}
261
262	if (!num_processors)
263		pr_err("MPTABLE: no processors registered!\n");
264	return num_processors;
265}
266
267#ifdef CONFIG_X86_IO_APIC
268
269static int __init ELCR_trigger(unsigned int irq)
270{
271	unsigned int port;
272
273	port = 0x4d0 + (irq >> 3);
274	return (inb(port) >> (irq & 7)) & 1;
275}
276
277static void __init construct_default_ioirq_mptable(int mpc_default_type)
278{
279	struct mpc_intsrc intsrc;
280	int i;
281	int ELCR_fallback = 0;
282
283	intsrc.type = MP_INTSRC;
284	intsrc.irqflag = MP_IRQTRIG_DEFAULT | MP_IRQPOL_DEFAULT;
285	intsrc.srcbus = 0;
286	intsrc.dstapic = mpc_ioapic_id(0);
287
288	intsrc.irqtype = mp_INT;
289
290	/*
291	 *  If true, we have an ISA/PCI system with no IRQ entries
292	 *  in the MP table. To prevent the PCI interrupts from being set up
293	 *  incorrectly, we try to use the ELCR. The sanity check to see if
294	 *  there is good ELCR data is very simple - IRQ0, 1, 2 and 13 can
295	 *  never be level sensitive, so we simply see if the ELCR agrees.
296	 *  If it does, we assume it's valid.
297	 */
298	if (mpc_default_type == 5) {
299		pr_info("ISA/PCI bus type with no IRQ information... falling back to ELCR\n");
300
301		if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) ||
302		    ELCR_trigger(13))
303			pr_err("ELCR contains invalid data... not using ELCR\n");
304		else {
305			pr_info("Using ELCR to identify PCI interrupts\n");
306			ELCR_fallback = 1;
307		}
308	}
309
310	for (i = 0; i < 16; i++) {
311		switch (mpc_default_type) {
312		case 2:
313			if (i == 0 || i == 13)
314				continue;	/* IRQ0 & IRQ13 not connected */
315			/* fall through */
316		default:
317			if (i == 2)
318				continue;	/* IRQ2 is never connected */
319		}
320
321		if (ELCR_fallback) {
322			/*
323			 *  If the ELCR indicates a level-sensitive interrupt, we
324			 *  copy that information over to the MP table in the
325			 *  irqflag field (level sensitive, active high polarity).
326			 */
327			if (ELCR_trigger(i)) {
328				intsrc.irqflag = MP_IRQTRIG_LEVEL |
329						 MP_IRQPOL_ACTIVE_HIGH;
330			} else {
331				intsrc.irqflag = MP_IRQTRIG_DEFAULT |
332						 MP_IRQPOL_DEFAULT;
333			}
334		}
335
336		intsrc.srcbusirq = i;
337		intsrc.dstirq = i ? i : 2;	/* IRQ0 to INTIN2 */
338		mp_save_irq(&intsrc);
339	}
340
341	intsrc.irqtype = mp_ExtINT;
342	intsrc.srcbusirq = 0;
343	intsrc.dstirq = 0;	/* 8259A to INTIN0 */
344	mp_save_irq(&intsrc);
345}
346
347
348static void __init construct_ioapic_table(int mpc_default_type)
349{
350	struct mpc_ioapic ioapic;
351	struct mpc_bus bus;
352
353	bus.type = MP_BUS;
354	bus.busid = 0;
355	switch (mpc_default_type) {
356	default:
357		pr_err("???\nUnknown standard configuration %d\n",
358		       mpc_default_type);
359		/* fall through */
360	case 1:
361	case 5:
362		memcpy(bus.bustype, "ISA   ", 6);
363		break;
364	case 2:
365	case 6:
366	case 3:
367		memcpy(bus.bustype, "EISA  ", 6);
368		break;
369	}
370	MP_bus_info(&bus);
371	if (mpc_default_type > 4) {
372		bus.busid = 1;
373		memcpy(bus.bustype, "PCI   ", 6);
374		MP_bus_info(&bus);
375	}
376
377	ioapic.type	= MP_IOAPIC;
378	ioapic.apicid	= 2;
379	ioapic.apicver	= mpc_default_type > 4 ? 0x10 : 0x01;
380	ioapic.flags	= MPC_APIC_USABLE;
381	ioapic.apicaddr	= IO_APIC_DEFAULT_PHYS_BASE;
382	MP_ioapic_info(&ioapic);
383
384	/*
385	 * We set up most of the low 16 IO-APIC pins according to MPS rules.
386	 */
387	construct_default_ioirq_mptable(mpc_default_type);
388}
389#else
390static inline void __init construct_ioapic_table(int mpc_default_type) { }
391#endif
392
393static inline void __init construct_default_ISA_mptable(int mpc_default_type)
394{
395	struct mpc_cpu processor;
396	struct mpc_lintsrc lintsrc;
397	int linttypes[2] = { mp_ExtINT, mp_NMI };
398	int i;
399
400	/*
401	 * local APIC has default address
402	 */
403	mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
404
405	/*
406	 * 2 CPUs, numbered 0 & 1.
407	 */
408	processor.type = MP_PROCESSOR;
409	/* Either an integrated APIC or a discrete 82489DX. */
410	processor.apicver = mpc_default_type > 4 ? 0x10 : 0x01;
411	processor.cpuflag = CPU_ENABLED;
412	processor.cpufeature = (boot_cpu_data.x86 << 8) |
413	    (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_stepping;
414	processor.featureflag = boot_cpu_data.x86_capability[CPUID_1_EDX];
415	processor.reserved[0] = 0;
416	processor.reserved[1] = 0;
417	for (i = 0; i < 2; i++) {
418		processor.apicid = i;
419		MP_processor_info(&processor);
420	}
421
422	construct_ioapic_table(mpc_default_type);
423
424	lintsrc.type = MP_LINTSRC;
425	lintsrc.irqflag = MP_IRQTRIG_DEFAULT | MP_IRQPOL_DEFAULT;
426	lintsrc.srcbusid = 0;
427	lintsrc.srcbusirq = 0;
428	lintsrc.destapic = MP_APIC_ALL;
429	for (i = 0; i < 2; i++) {
430		lintsrc.irqtype = linttypes[i];
431		lintsrc.destapiclint = i;
432		MP_lintsrc_info(&lintsrc);
433	}
434}
435
436static unsigned long mpf_base;
437static bool mpf_found;
438
439static unsigned long __init get_mpc_size(unsigned long physptr)
440{
441	struct mpc_table *mpc;
442	unsigned long size;
443
444	mpc = early_memremap(physptr, PAGE_SIZE);
445	size = mpc->length;
446	early_memunmap(mpc, PAGE_SIZE);
447	apic_printk(APIC_VERBOSE, "  mpc: %lx-%lx\n", physptr, physptr + size);
448
449	return size;
450}
451
452static int __init check_physptr(struct mpf_intel *mpf, unsigned int early)
453{
454	struct mpc_table *mpc;
455	unsigned long size;
456
457	size = get_mpc_size(mpf->physptr);
458	mpc = early_memremap(mpf->physptr, size);
459
460	/*
461	 * Read the physical hardware table.  Anything here will
462	 * override the defaults.
463	 */
464	if (!smp_read_mpc(mpc, early)) {
465#ifdef CONFIG_X86_LOCAL_APIC
466		smp_found_config = 0;
467#endif
468		pr_err("BIOS bug, MP table errors detected!...\n");
469		pr_cont("... disabling SMP support. (tell your hw vendor)\n");
470		early_memunmap(mpc, size);
471		return -1;
472	}
473	early_memunmap(mpc, size);
474
475	if (early)
476		return -1;
477
478#ifdef CONFIG_X86_IO_APIC
479	/*
480	 * If there are no explicit MP IRQ entries, then we are
481	 * broken.  We set up most of the low 16 IO-APIC pins to
482	 * ISA defaults and hope it will work.
483	 */
484	if (!mp_irq_entries) {
485		struct mpc_bus bus;
486
487		pr_err("BIOS bug, no explicit IRQ entries, using default mptable. (tell your hw vendor)\n");
488
489		bus.type = MP_BUS;
490		bus.busid = 0;
491		memcpy(bus.bustype, "ISA   ", 6);
492		MP_bus_info(&bus);
493
494		construct_default_ioirq_mptable(0);
495	}
496#endif
497
498	return 0;
499}
500
501/*
502 * Scan the memory blocks for an SMP configuration block.
503 */
504void __init default_get_smp_config(unsigned int early)
505{
506	struct mpf_intel *mpf;
507
508	if (!smp_found_config)
509		return;
510
511	if (!mpf_found)
512		return;
513
514	if (acpi_lapic && early)
515		return;
516
517	/*
518	 * MPS doesn't support hyperthreading, aka only have
519	 * thread 0 apic id in MPS table
520	 */
521	if (acpi_lapic && acpi_ioapic)
522		return;
523
524	mpf = early_memremap(mpf_base, sizeof(*mpf));
525	if (!mpf) {
526		pr_err("MPTABLE: error mapping MP table\n");
527		return;
528	}
529
530	pr_info("Intel MultiProcessor Specification v1.%d\n",
531		mpf->specification);
532#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32)
533	if (mpf->feature2 & (1 << 7)) {
534		pr_info("    IMCR and PIC compatibility mode.\n");
535		pic_mode = 1;
536	} else {
537		pr_info("    Virtual Wire compatibility mode.\n");
538		pic_mode = 0;
539	}
540#endif
541	/*
542	 * Now see if we need to read further.
543	 */
544	if (mpf->feature1) {
545		if (early) {
546			/*
547			 * local APIC has default address
548			 */
549			mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
550			return;
551		}
552
553		pr_info("Default MP configuration #%d\n", mpf->feature1);
554		construct_default_ISA_mptable(mpf->feature1);
555
556	} else if (mpf->physptr) {
557		if (check_physptr(mpf, early)) {
558			early_memunmap(mpf, sizeof(*mpf));
559			return;
560		}
561	} else
562		BUG();
563
564	if (!early)
565		pr_info("Processors: %d\n", num_processors);
566	/*
567	 * Only use the first configuration found.
568	 */
569
570	early_memunmap(mpf, sizeof(*mpf));
571}
572
573static void __init smp_reserve_memory(struct mpf_intel *mpf)
574{
575	memblock_reserve(mpf->physptr, get_mpc_size(mpf->physptr));
576}
577
578static int __init smp_scan_config(unsigned long base, unsigned long length)
579{
580	unsigned int *bp;
581	struct mpf_intel *mpf;
582	int ret = 0;
583
584	apic_printk(APIC_VERBOSE, "Scan for SMP in [mem %#010lx-%#010lx]\n",
585		    base, base + length - 1);
586	BUILD_BUG_ON(sizeof(*mpf) != 16);
587
588	while (length > 0) {
589		bp = early_memremap(base, length);
590		mpf = (struct mpf_intel *)bp;
591		if ((*bp == SMP_MAGIC_IDENT) &&
592		    (mpf->length == 1) &&
593		    !mpf_checksum((unsigned char *)bp, 16) &&
594		    ((mpf->specification == 1)
595		     || (mpf->specification == 4))) {
596#ifdef CONFIG_X86_LOCAL_APIC
597			smp_found_config = 1;
598#endif
599			mpf_base = base;
600			mpf_found = true;
601
602			pr_info("found SMP MP-table at [mem %#010lx-%#010lx] mapped at [%p]\n",
603				base, base + sizeof(*mpf) - 1, mpf);
604
605			memblock_reserve(base, sizeof(*mpf));
606			if (mpf->physptr)
607				smp_reserve_memory(mpf);
608
609			ret = 1;
610		}
611		early_memunmap(bp, length);
612
613		if (ret)
614			break;
615
616		base += 16;
617		length -= 16;
618	}
619	return ret;
620}
621
622void __init default_find_smp_config(void)
623{
624	unsigned int address;
625
626	/*
627	 * FIXME: Linux assumes you have 640K of base ram..
628	 * this continues the error...
629	 *
630	 * 1) Scan the bottom 1K for a signature
631	 * 2) Scan the top 1K of base RAM
632	 * 3) Scan the 64K of bios
633	 */
634	if (smp_scan_config(0x0, 0x400) ||
635	    smp_scan_config(639 * 0x400, 0x400) ||
636	    smp_scan_config(0xF0000, 0x10000))
637		return;
638	/*
639	 * If it is an SMP machine we should know now, unless the
640	 * configuration is in an EISA bus machine with an
641	 * extended bios data area.
642	 *
643	 * there is a real-mode segmented pointer pointing to the
644	 * 4K EBDA area at 0x40E, calculate and scan it here.
645	 *
646	 * NOTE! There are Linux loaders that will corrupt the EBDA
647	 * area, and as such this kind of SMP config may be less
648	 * trustworthy, simply because the SMP table may have been
649	 * stomped on during early boot. These loaders are buggy and
650	 * should be fixed.
651	 *
652	 * MP1.4 SPEC states to only scan first 1K of 4K EBDA.
653	 */
654
655	address = get_bios_ebda();
656	if (address)
657		smp_scan_config(address, 0x400);
658}
659
660#ifdef CONFIG_X86_IO_APIC
661static u8 __initdata irq_used[MAX_IRQ_SOURCES];
662
663static int  __init get_MP_intsrc_index(struct mpc_intsrc *m)
664{
665	int i;
666
667	if (m->irqtype != mp_INT)
668		return 0;
669
670	if (m->irqflag != (MP_IRQTRIG_LEVEL | MP_IRQPOL_ACTIVE_LOW))
671		return 0;
672
673	/* not legacy */
674
675	for (i = 0; i < mp_irq_entries; i++) {
676		if (mp_irqs[i].irqtype != mp_INT)
677			continue;
678
679		if (mp_irqs[i].irqflag != (MP_IRQTRIG_LEVEL |
680					   MP_IRQPOL_ACTIVE_LOW))
681			continue;
682
683		if (mp_irqs[i].srcbus != m->srcbus)
684			continue;
685		if (mp_irqs[i].srcbusirq != m->srcbusirq)
686			continue;
687		if (irq_used[i]) {
688			/* already claimed */
689			return -2;
690		}
691		irq_used[i] = 1;
692		return i;
693	}
694
695	/* not found */
696	return -1;
697}
698
699#define SPARE_SLOT_NUM 20
700
701static struct mpc_intsrc __initdata *m_spare[SPARE_SLOT_NUM];
702
703static void __init check_irq_src(struct mpc_intsrc *m, int *nr_m_spare)
704{
705	int i;
706
707	apic_printk(APIC_VERBOSE, "OLD ");
708	print_mp_irq_info(m);
709
710	i = get_MP_intsrc_index(m);
711	if (i > 0) {
712		memcpy(m, &mp_irqs[i], sizeof(*m));
713		apic_printk(APIC_VERBOSE, "NEW ");
714		print_mp_irq_info(&mp_irqs[i]);
715		return;
716	}
717	if (!i) {
718		/* legacy, do nothing */
719		return;
720	}
721	if (*nr_m_spare < SPARE_SLOT_NUM) {
722		/*
723		 * not found (-1), or duplicated (-2) are invalid entries,
724		 * we need to use the slot later
725		 */
726		m_spare[*nr_m_spare] = m;
727		*nr_m_spare += 1;
728	}
729}
730
731static int __init
732check_slot(unsigned long mpc_new_phys, unsigned long mpc_new_length, int count)
733{
734	if (!mpc_new_phys || count <= mpc_new_length) {
735		WARN(1, "update_mptable: No spare slots (length: %x)\n", count);
736		return -1;
737	}
738
739	return 0;
740}
741#else /* CONFIG_X86_IO_APIC */
742static
743inline void __init check_irq_src(struct mpc_intsrc *m, int *nr_m_spare) {}
744#endif /* CONFIG_X86_IO_APIC */
745
746static int  __init replace_intsrc_all(struct mpc_table *mpc,
747					unsigned long mpc_new_phys,
748					unsigned long mpc_new_length)
749{
750#ifdef CONFIG_X86_IO_APIC
751	int i;
752#endif
753	int count = sizeof(*mpc);
754	int nr_m_spare = 0;
755	unsigned char *mpt = ((unsigned char *)mpc) + count;
756
757	pr_info("mpc_length %x\n", mpc->length);
758	while (count < mpc->length) {
759		switch (*mpt) {
760		case MP_PROCESSOR:
761			skip_entry(&mpt, &count, sizeof(struct mpc_cpu));
762			break;
763		case MP_BUS:
764			skip_entry(&mpt, &count, sizeof(struct mpc_bus));
765			break;
766		case MP_IOAPIC:
767			skip_entry(&mpt, &count, sizeof(struct mpc_ioapic));
768			break;
769		case MP_INTSRC:
770			check_irq_src((struct mpc_intsrc *)mpt, &nr_m_spare);
771			skip_entry(&mpt, &count, sizeof(struct mpc_intsrc));
772			break;
773		case MP_LINTSRC:
774			skip_entry(&mpt, &count, sizeof(struct mpc_lintsrc));
775			break;
776		default:
777			/* wrong mptable */
778			smp_dump_mptable(mpc, mpt);
779			goto out;
780		}
781	}
782
783#ifdef CONFIG_X86_IO_APIC
784	for (i = 0; i < mp_irq_entries; i++) {
785		if (irq_used[i])
786			continue;
787
788		if (mp_irqs[i].irqtype != mp_INT)
789			continue;
790
791		if (mp_irqs[i].irqflag != (MP_IRQTRIG_LEVEL |
792					   MP_IRQPOL_ACTIVE_LOW))
793			continue;
794
795		if (nr_m_spare > 0) {
796			apic_printk(APIC_VERBOSE, "*NEW* found\n");
797			nr_m_spare--;
798			memcpy(m_spare[nr_m_spare], &mp_irqs[i], sizeof(mp_irqs[i]));
799			m_spare[nr_m_spare] = NULL;
800		} else {
801			struct mpc_intsrc *m = (struct mpc_intsrc *)mpt;
802			count += sizeof(struct mpc_intsrc);
803			if (check_slot(mpc_new_phys, mpc_new_length, count) < 0)
804				goto out;
805			memcpy(m, &mp_irqs[i], sizeof(*m));
806			mpc->length = count;
807			mpt += sizeof(struct mpc_intsrc);
808		}
809		print_mp_irq_info(&mp_irqs[i]);
810	}
811#endif
812out:
813	/* update checksum */
814	mpc->checksum = 0;
815	mpc->checksum -= mpf_checksum((unsigned char *)mpc, mpc->length);
816
817	return 0;
818}
819
820int enable_update_mptable;
821
822static int __init update_mptable_setup(char *str)
823{
824	enable_update_mptable = 1;
825#ifdef CONFIG_PCI
826	pci_routeirq = 1;
827#endif
828	return 0;
829}
830early_param("update_mptable", update_mptable_setup);
831
832static unsigned long __initdata mpc_new_phys;
833static unsigned long mpc_new_length __initdata = 4096;
834
835/* alloc_mptable or alloc_mptable=4k */
836static int __initdata alloc_mptable;
837static int __init parse_alloc_mptable_opt(char *p)
838{
839	enable_update_mptable = 1;
840#ifdef CONFIG_PCI
841	pci_routeirq = 1;
842#endif
843	alloc_mptable = 1;
844	if (!p)
845		return 0;
846	mpc_new_length = memparse(p, &p);
847	return 0;
848}
849early_param("alloc_mptable", parse_alloc_mptable_opt);
850
851void __init e820__memblock_alloc_reserved_mpc_new(void)
852{
853	if (enable_update_mptable && alloc_mptable)
854		mpc_new_phys = e820__memblock_alloc_reserved(mpc_new_length, 4);
855}
856
857static int __init update_mp_table(void)
858{
859	char str[16];
860	char oem[10];
861	struct mpf_intel *mpf;
862	struct mpc_table *mpc, *mpc_new;
863	unsigned long size;
864
865	if (!enable_update_mptable)
866		return 0;
867
868	if (!mpf_found)
869		return 0;
870
871	mpf = early_memremap(mpf_base, sizeof(*mpf));
872	if (!mpf) {
873		pr_err("MPTABLE: mpf early_memremap() failed\n");
874		return 0;
875	}
876
877	/*
878	 * Now see if we need to go further.
879	 */
880	if (mpf->feature1)
881		goto do_unmap_mpf;
882
883	if (!mpf->physptr)
884		goto do_unmap_mpf;
885
886	size = get_mpc_size(mpf->physptr);
887	mpc = early_memremap(mpf->physptr, size);
888	if (!mpc) {
889		pr_err("MPTABLE: mpc early_memremap() failed\n");
890		goto do_unmap_mpf;
891	}
892
893	if (!smp_check_mpc(mpc, oem, str))
894		goto do_unmap_mpc;
895
896	pr_info("mpf: %llx\n", (u64)mpf_base);
897	pr_info("physptr: %x\n", mpf->physptr);
898
899	if (mpc_new_phys && mpc->length > mpc_new_length) {
900		mpc_new_phys = 0;
901		pr_info("mpc_new_length is %ld, please use alloc_mptable=8k\n",
902			mpc_new_length);
903	}
904
905	if (!mpc_new_phys) {
906		unsigned char old, new;
907		/* check if we can change the position */
908		mpc->checksum = 0;
909		old = mpf_checksum((unsigned char *)mpc, mpc->length);
910		mpc->checksum = 0xff;
911		new = mpf_checksum((unsigned char *)mpc, mpc->length);
912		if (old == new) {
913			pr_info("mpc is readonly, please try alloc_mptable instead\n");
914			goto do_unmap_mpc;
915		}
916		pr_info("use in-position replacing\n");
917	} else {
918		mpc_new = early_memremap(mpc_new_phys, mpc_new_length);
919		if (!mpc_new) {
920			pr_err("MPTABLE: new mpc early_memremap() failed\n");
921			goto do_unmap_mpc;
922		}
923		mpf->physptr = mpc_new_phys;
924		memcpy(mpc_new, mpc, mpc->length);
925		early_memunmap(mpc, size);
926		mpc = mpc_new;
927		size = mpc_new_length;
928		/* check if we can modify that */
929		if (mpc_new_phys - mpf->physptr) {
930			struct mpf_intel *mpf_new;
931			/* steal 16 bytes from [0, 1k) */
932			mpf_new = early_memremap(0x400 - 16, sizeof(*mpf_new));
933			if (!mpf_new) {
934				pr_err("MPTABLE: new mpf early_memremap() failed\n");
935				goto do_unmap_mpc;
936			}
937			pr_info("mpf new: %x\n", 0x400 - 16);
938			memcpy(mpf_new, mpf, 16);
939			early_memunmap(mpf, sizeof(*mpf));
940			mpf = mpf_new;
941			mpf->physptr = mpc_new_phys;
942		}
943		mpf->checksum = 0;
944		mpf->checksum -= mpf_checksum((unsigned char *)mpf, 16);
945		pr_info("physptr new: %x\n", mpf->physptr);
946	}
947
948	/*
949	 * only replace the one with mp_INT and
950	 *	 MP_IRQ_TRIGGER_LEVEL|MP_IRQ_POLARITY_LOW,
951	 * already in mp_irqs , stored by ... and mp_config_acpi_gsi,
952	 * may need pci=routeirq for all coverage
953	 */
954	replace_intsrc_all(mpc, mpc_new_phys, mpc_new_length);
955
956do_unmap_mpc:
957	early_memunmap(mpc, size);
958
959do_unmap_mpf:
960	early_memunmap(mpf, sizeof(*mpf));
961
962	return 0;
963}
964
965late_initcall(update_mp_table);