Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *	Intel Multiprocessor Specification 1.1 and 1.4
  4 *	compliant MP-table parsing routines.
  5 *
  6 *	(c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk>
  7 *	(c) 1998, 1999, 2000, 2009 Ingo Molnar <mingo@redhat.com>
  8 *      (c) 2008 Alexey Starikovskiy <astarikovskiy@suse.de>
  9 */
 10
 11#include <linux/mm.h>
 12#include <linux/init.h>
 13#include <linux/delay.h>
 14#include <linux/memblock.h>
 15#include <linux/kernel_stat.h>
 16#include <linux/mc146818rtc.h>
 17#include <linux/bitops.h>
 18#include <linux/acpi.h>
 19#include <linux/smp.h>
 20#include <linux/pci.h>
 21
 
 
 
 22#include <asm/irqdomain.h>
 23#include <asm/mtrr.h>
 24#include <asm/mpspec.h>
 25#include <asm/pgalloc.h>
 26#include <asm/io_apic.h>
 27#include <asm/proto.h>
 28#include <asm/bios_ebda.h>
 29#include <asm/e820/api.h>
 30#include <asm/setup.h>
 31#include <asm/smp.h>
 32
 33#include <asm/apic.h>
 34/*
 35 * Checksum an MP configuration block.
 36 */
 37
 38static int __init mpf_checksum(unsigned char *mp, int len)
 39{
 40	int sum = 0;
 41
 42	while (len--)
 43		sum += *mp++;
 44
 45	return sum & 0xFF;
 46}
 47
 48int __init default_mpc_apic_id(struct mpc_cpu *m)
 49{
 50	return m->apicid;
 51}
 52
 53static void __init MP_processor_info(struct mpc_cpu *m)
 54{
 55	int apicid;
 56	char *bootup_cpu = "";
 57
 58	if (!(m->cpuflag & CPU_ENABLED)) {
 59		disabled_cpus++;
 60		return;
 61	}
 62
 63	apicid = x86_init.mpparse.mpc_apic_id(m);
 64
 65	if (m->cpuflag & CPU_BOOTPROCESSOR) {
 66		bootup_cpu = " (Bootup-CPU)";
 67		boot_cpu_physical_apicid = m->apicid;
 68	}
 69
 70	pr_info("Processor #%d%s\n", m->apicid, bootup_cpu);
 71	generic_processor_info(apicid, m->apicver);
 72}
 73
 74#ifdef CONFIG_X86_IO_APIC
 75void __init default_mpc_oem_bus_info(struct mpc_bus *m, char *str)
 76{
 77	memcpy(str, m->bustype, 6);
 78	str[6] = 0;
 79	apic_printk(APIC_VERBOSE, "Bus #%d is %s\n", m->busid, str);
 80}
 81
 82static void __init MP_bus_info(struct mpc_bus *m)
 83{
 84	char str[7];
 85
 86	x86_init.mpparse.mpc_oem_bus_info(m, str);
 87
 88#if MAX_MP_BUSSES < 256
 89	if (m->busid >= MAX_MP_BUSSES) {
 90		pr_warn("MP table busid value (%d) for bustype %s is too large, max. supported is %d\n",
 91			m->busid, str, MAX_MP_BUSSES - 1);
 92		return;
 93	}
 94#endif
 95
 96	set_bit(m->busid, mp_bus_not_pci);
 97	if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA) - 1) == 0) {
 98#ifdef CONFIG_EISA
 99		mp_bus_id_to_type[m->busid] = MP_BUS_ISA;
100#endif
101	} else if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI) - 1) == 0) {
102		if (x86_init.mpparse.mpc_oem_pci_bus)
103			x86_init.mpparse.mpc_oem_pci_bus(m);
104
105		clear_bit(m->busid, mp_bus_not_pci);
106#ifdef CONFIG_EISA
107		mp_bus_id_to_type[m->busid] = MP_BUS_PCI;
108	} else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA) - 1) == 0) {
109		mp_bus_id_to_type[m->busid] = MP_BUS_EISA;
110#endif
111	} else
112		pr_warn("Unknown bustype %s - ignoring\n", str);
113}
114
115static void __init MP_ioapic_info(struct mpc_ioapic *m)
116{
117	struct ioapic_domain_cfg cfg = {
118		.type = IOAPIC_DOMAIN_LEGACY,
119		.ops = &mp_ioapic_irqdomain_ops,
120	};
121
122	if (m->flags & MPC_APIC_USABLE)
123		mp_register_ioapic(m->apicid, m->apicaddr, gsi_top, &cfg);
124}
125
126static void __init print_mp_irq_info(struct mpc_intsrc *mp_irq)
127{
128	apic_printk(APIC_VERBOSE,
129		"Int: type %d, pol %d, trig %d, bus %02x, IRQ %02x, APIC ID %x, APIC INT %02x\n",
130		mp_irq->irqtype, mp_irq->irqflag & 3,
131		(mp_irq->irqflag >> 2) & 3, mp_irq->srcbus,
132		mp_irq->srcbusirq, mp_irq->dstapic, mp_irq->dstirq);
133}
134
135#else /* CONFIG_X86_IO_APIC */
136static inline void __init MP_bus_info(struct mpc_bus *m) {}
137static inline void __init MP_ioapic_info(struct mpc_ioapic *m) {}
138#endif /* CONFIG_X86_IO_APIC */
139
140static void __init MP_lintsrc_info(struct mpc_lintsrc *m)
141{
142	apic_printk(APIC_VERBOSE,
143		"Lint: type %d, pol %d, trig %d, bus %02x, IRQ %02x, APIC ID %x, APIC LINT %02x\n",
144		m->irqtype, m->irqflag & 3, (m->irqflag >> 2) & 3, m->srcbusid,
145		m->srcbusirq, m->destapic, m->destapiclint);
146}
147
148/*
149 * Read/parse the MPC
150 */
151static int __init smp_check_mpc(struct mpc_table *mpc, char *oem, char *str)
152{
153
154	if (memcmp(mpc->signature, MPC_SIGNATURE, 4)) {
155		pr_err("MPTABLE: bad signature [%c%c%c%c]!\n",
156		       mpc->signature[0], mpc->signature[1],
157		       mpc->signature[2], mpc->signature[3]);
158		return 0;
159	}
160	if (mpf_checksum((unsigned char *)mpc, mpc->length)) {
161		pr_err("MPTABLE: checksum error!\n");
162		return 0;
163	}
164	if (mpc->spec != 0x01 && mpc->spec != 0x04) {
165		pr_err("MPTABLE: bad table version (%d)!!\n", mpc->spec);
166		return 0;
167	}
168	if (!mpc->lapic) {
169		pr_err("MPTABLE: null local APIC address!\n");
170		return 0;
171	}
172	memcpy(oem, mpc->oem, 8);
173	oem[8] = 0;
174	pr_info("MPTABLE: OEM ID: %s\n", oem);
175
176	memcpy(str, mpc->productid, 12);
177	str[12] = 0;
178
179	pr_info("MPTABLE: Product ID: %s\n", str);
180
181	pr_info("MPTABLE: APIC at: 0x%X\n", mpc->lapic);
182
183	return 1;
184}
185
186static void skip_entry(unsigned char **ptr, int *count, int size)
187{
188	*ptr += size;
189	*count += size;
190}
191
192static void __init smp_dump_mptable(struct mpc_table *mpc, unsigned char *mpt)
193{
194	pr_err("Your mptable is wrong, contact your HW vendor!\n");
195	pr_cont("type %x\n", *mpt);
196	print_hex_dump(KERN_ERR, "  ", DUMP_PREFIX_ADDRESS, 16,
197			1, mpc, mpc->length, 1);
198}
199
200void __init default_smp_read_mpc_oem(struct mpc_table *mpc) { }
201
202static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early)
203{
204	char str[16];
205	char oem[10];
206
207	int count = sizeof(*mpc);
208	unsigned char *mpt = ((unsigned char *)mpc) + count;
209
210	if (!smp_check_mpc(mpc, oem, str))
211		return 0;
212
213	/* Initialize the lapic mapping */
214	if (!acpi_lapic)
215		register_lapic_address(mpc->lapic);
216
217	if (early)
218		return 1;
219
220	if (mpc->oemptr)
221		x86_init.mpparse.smp_read_mpc_oem(mpc);
222
223	/*
224	 *      Now process the configuration blocks.
225	 */
226	x86_init.mpparse.mpc_record(0);
227
228	while (count < mpc->length) {
229		switch (*mpt) {
230		case MP_PROCESSOR:
231			/* ACPI may have already provided this data */
232			if (!acpi_lapic)
233				MP_processor_info((struct mpc_cpu *)mpt);
234			skip_entry(&mpt, &count, sizeof(struct mpc_cpu));
235			break;
236		case MP_BUS:
237			MP_bus_info((struct mpc_bus *)mpt);
238			skip_entry(&mpt, &count, sizeof(struct mpc_bus));
239			break;
240		case MP_IOAPIC:
241			MP_ioapic_info((struct mpc_ioapic *)mpt);
242			skip_entry(&mpt, &count, sizeof(struct mpc_ioapic));
243			break;
244		case MP_INTSRC:
245			mp_save_irq((struct mpc_intsrc *)mpt);
246			skip_entry(&mpt, &count, sizeof(struct mpc_intsrc));
247			break;
248		case MP_LINTSRC:
249			MP_lintsrc_info((struct mpc_lintsrc *)mpt);
250			skip_entry(&mpt, &count, sizeof(struct mpc_lintsrc));
251			break;
252		default:
253			/* wrong mptable */
254			smp_dump_mptable(mpc, mpt);
255			count = mpc->length;
256			break;
257		}
258		x86_init.mpparse.mpc_record(1);
259	}
260
261	if (!num_processors)
262		pr_err("MPTABLE: no processors registered!\n");
263	return num_processors;
264}
265
266#ifdef CONFIG_X86_IO_APIC
267
268static int __init ELCR_trigger(unsigned int irq)
269{
270	unsigned int port;
271
272	port = 0x4d0 + (irq >> 3);
273	return (inb(port) >> (irq & 7)) & 1;
274}
275
276static void __init construct_default_ioirq_mptable(int mpc_default_type)
277{
278	struct mpc_intsrc intsrc;
279	int i;
280	int ELCR_fallback = 0;
281
282	intsrc.type = MP_INTSRC;
283	intsrc.irqflag = MP_IRQTRIG_DEFAULT | MP_IRQPOL_DEFAULT;
284	intsrc.srcbus = 0;
285	intsrc.dstapic = mpc_ioapic_id(0);
286
287	intsrc.irqtype = mp_INT;
288
289	/*
290	 *  If true, we have an ISA/PCI system with no IRQ entries
291	 *  in the MP table. To prevent the PCI interrupts from being set up
292	 *  incorrectly, we try to use the ELCR. The sanity check to see if
293	 *  there is good ELCR data is very simple - IRQ0, 1, 2 and 13 can
294	 *  never be level sensitive, so we simply see if the ELCR agrees.
295	 *  If it does, we assume it's valid.
296	 */
297	if (mpc_default_type == 5) {
298		pr_info("ISA/PCI bus type with no IRQ information... falling back to ELCR\n");
299
300		if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) ||
301		    ELCR_trigger(13))
302			pr_err("ELCR contains invalid data... not using ELCR\n");
303		else {
304			pr_info("Using ELCR to identify PCI interrupts\n");
305			ELCR_fallback = 1;
306		}
307	}
308
309	for (i = 0; i < 16; i++) {
310		switch (mpc_default_type) {
311		case 2:
312			if (i == 0 || i == 13)
313				continue;	/* IRQ0 & IRQ13 not connected */
314			/* fall through */
315		default:
316			if (i == 2)
317				continue;	/* IRQ2 is never connected */
318		}
319
320		if (ELCR_fallback) {
321			/*
322			 *  If the ELCR indicates a level-sensitive interrupt, we
323			 *  copy that information over to the MP table in the
324			 *  irqflag field (level sensitive, active high polarity).
325			 */
326			if (ELCR_trigger(i)) {
327				intsrc.irqflag = MP_IRQTRIG_LEVEL |
328						 MP_IRQPOL_ACTIVE_HIGH;
329			} else {
330				intsrc.irqflag = MP_IRQTRIG_DEFAULT |
331						 MP_IRQPOL_DEFAULT;
332			}
333		}
334
335		intsrc.srcbusirq = i;
336		intsrc.dstirq = i ? i : 2;	/* IRQ0 to INTIN2 */
337		mp_save_irq(&intsrc);
338	}
339
340	intsrc.irqtype = mp_ExtINT;
341	intsrc.srcbusirq = 0;
342	intsrc.dstirq = 0;	/* 8259A to INTIN0 */
343	mp_save_irq(&intsrc);
344}
345
346
347static void __init construct_ioapic_table(int mpc_default_type)
348{
349	struct mpc_ioapic ioapic;
350	struct mpc_bus bus;
351
352	bus.type = MP_BUS;
353	bus.busid = 0;
354	switch (mpc_default_type) {
355	default:
356		pr_err("???\nUnknown standard configuration %d\n",
357		       mpc_default_type);
358		/* fall through */
359	case 1:
360	case 5:
361		memcpy(bus.bustype, "ISA   ", 6);
362		break;
363	case 2:
364	case 6:
365	case 3:
366		memcpy(bus.bustype, "EISA  ", 6);
367		break;
368	}
369	MP_bus_info(&bus);
370	if (mpc_default_type > 4) {
371		bus.busid = 1;
372		memcpy(bus.bustype, "PCI   ", 6);
373		MP_bus_info(&bus);
374	}
375
376	ioapic.type	= MP_IOAPIC;
377	ioapic.apicid	= 2;
378	ioapic.apicver	= mpc_default_type > 4 ? 0x10 : 0x01;
379	ioapic.flags	= MPC_APIC_USABLE;
380	ioapic.apicaddr	= IO_APIC_DEFAULT_PHYS_BASE;
381	MP_ioapic_info(&ioapic);
382
383	/*
384	 * We set up most of the low 16 IO-APIC pins according to MPS rules.
385	 */
386	construct_default_ioirq_mptable(mpc_default_type);
387}
388#else
389static inline void __init construct_ioapic_table(int mpc_default_type) { }
390#endif
391
392static inline void __init construct_default_ISA_mptable(int mpc_default_type)
393{
394	struct mpc_cpu processor;
395	struct mpc_lintsrc lintsrc;
396	int linttypes[2] = { mp_ExtINT, mp_NMI };
397	int i;
398
399	/*
400	 * local APIC has default address
401	 */
402	mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
403
404	/*
405	 * 2 CPUs, numbered 0 & 1.
406	 */
407	processor.type = MP_PROCESSOR;
408	/* Either an integrated APIC or a discrete 82489DX. */
409	processor.apicver = mpc_default_type > 4 ? 0x10 : 0x01;
410	processor.cpuflag = CPU_ENABLED;
411	processor.cpufeature = (boot_cpu_data.x86 << 8) |
412	    (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_stepping;
413	processor.featureflag = boot_cpu_data.x86_capability[CPUID_1_EDX];
414	processor.reserved[0] = 0;
415	processor.reserved[1] = 0;
416	for (i = 0; i < 2; i++) {
417		processor.apicid = i;
418		MP_processor_info(&processor);
419	}
420
421	construct_ioapic_table(mpc_default_type);
422
423	lintsrc.type = MP_LINTSRC;
424	lintsrc.irqflag = MP_IRQTRIG_DEFAULT | MP_IRQPOL_DEFAULT;
425	lintsrc.srcbusid = 0;
426	lintsrc.srcbusirq = 0;
427	lintsrc.destapic = MP_APIC_ALL;
428	for (i = 0; i < 2; i++) {
429		lintsrc.irqtype = linttypes[i];
430		lintsrc.destapiclint = i;
431		MP_lintsrc_info(&lintsrc);
432	}
433}
434
435static unsigned long mpf_base;
436static bool mpf_found;
437
438static unsigned long __init get_mpc_size(unsigned long physptr)
439{
440	struct mpc_table *mpc;
441	unsigned long size;
442
443	mpc = early_memremap(physptr, PAGE_SIZE);
444	size = mpc->length;
445	early_memunmap(mpc, PAGE_SIZE);
446	apic_printk(APIC_VERBOSE, "  mpc: %lx-%lx\n", physptr, physptr + size);
447
448	return size;
449}
450
451static int __init check_physptr(struct mpf_intel *mpf, unsigned int early)
452{
453	struct mpc_table *mpc;
454	unsigned long size;
455
456	size = get_mpc_size(mpf->physptr);
457	mpc = early_memremap(mpf->physptr, size);
458
459	/*
460	 * Read the physical hardware table.  Anything here will
461	 * override the defaults.
462	 */
463	if (!smp_read_mpc(mpc, early)) {
464#ifdef CONFIG_X86_LOCAL_APIC
465		smp_found_config = 0;
466#endif
467		pr_err("BIOS bug, MP table errors detected!...\n");
468		pr_cont("... disabling SMP support. (tell your hw vendor)\n");
469		early_memunmap(mpc, size);
470		return -1;
471	}
472	early_memunmap(mpc, size);
473
474	if (early)
475		return -1;
476
477#ifdef CONFIG_X86_IO_APIC
478	/*
479	 * If there are no explicit MP IRQ entries, then we are
480	 * broken.  We set up most of the low 16 IO-APIC pins to
481	 * ISA defaults and hope it will work.
482	 */
483	if (!mp_irq_entries) {
484		struct mpc_bus bus;
485
486		pr_err("BIOS bug, no explicit IRQ entries, using default mptable. (tell your hw vendor)\n");
487
488		bus.type = MP_BUS;
489		bus.busid = 0;
490		memcpy(bus.bustype, "ISA   ", 6);
491		MP_bus_info(&bus);
492
493		construct_default_ioirq_mptable(0);
494	}
495#endif
496
497	return 0;
498}
499
500/*
501 * Scan the memory blocks for an SMP configuration block.
502 */
503void __init default_get_smp_config(unsigned int early)
504{
505	struct mpf_intel *mpf;
506
507	if (!smp_found_config)
508		return;
509
510	if (!mpf_found)
511		return;
512
513	if (acpi_lapic && early)
514		return;
515
516	/*
517	 * MPS doesn't support hyperthreading, aka only have
518	 * thread 0 apic id in MPS table
519	 */
520	if (acpi_lapic && acpi_ioapic)
521		return;
522
523	mpf = early_memremap(mpf_base, sizeof(*mpf));
524	if (!mpf) {
525		pr_err("MPTABLE: error mapping MP table\n");
526		return;
527	}
528
529	pr_info("Intel MultiProcessor Specification v1.%d\n",
530		mpf->specification);
531#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32)
532	if (mpf->feature2 & (1 << 7)) {
533		pr_info("    IMCR and PIC compatibility mode.\n");
534		pic_mode = 1;
535	} else {
536		pr_info("    Virtual Wire compatibility mode.\n");
537		pic_mode = 0;
538	}
539#endif
540	/*
541	 * Now see if we need to read further.
542	 */
543	if (mpf->feature1) {
544		if (early) {
545			/*
546			 * local APIC has default address
547			 */
548			mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
549			goto out;
550		}
551
552		pr_info("Default MP configuration #%d\n", mpf->feature1);
553		construct_default_ISA_mptable(mpf->feature1);
554
555	} else if (mpf->physptr) {
556		if (check_physptr(mpf, early))
557			goto out;
558	} else
559		BUG();
560
561	if (!early)
562		pr_info("Processors: %d\n", num_processors);
563	/*
564	 * Only use the first configuration found.
565	 */
566out:
567	early_memunmap(mpf, sizeof(*mpf));
568}
569
570static void __init smp_reserve_memory(struct mpf_intel *mpf)
571{
572	memblock_reserve(mpf->physptr, get_mpc_size(mpf->physptr));
573}
574
575static int __init smp_scan_config(unsigned long base, unsigned long length)
576{
577	unsigned int *bp;
578	struct mpf_intel *mpf;
579	int ret = 0;
580
581	apic_printk(APIC_VERBOSE, "Scan for SMP in [mem %#010lx-%#010lx]\n",
582		    base, base + length - 1);
583	BUILD_BUG_ON(sizeof(*mpf) != 16);
584
585	while (length > 0) {
586		bp = early_memremap(base, length);
587		mpf = (struct mpf_intel *)bp;
588		if ((*bp == SMP_MAGIC_IDENT) &&
589		    (mpf->length == 1) &&
590		    !mpf_checksum((unsigned char *)bp, 16) &&
591		    ((mpf->specification == 1)
592		     || (mpf->specification == 4))) {
593#ifdef CONFIG_X86_LOCAL_APIC
594			smp_found_config = 1;
595#endif
596			mpf_base = base;
597			mpf_found = true;
598
599			pr_info("found SMP MP-table at [mem %#010lx-%#010lx]\n",
600				base, base + sizeof(*mpf) - 1);
601
602			memblock_reserve(base, sizeof(*mpf));
603			if (mpf->physptr)
604				smp_reserve_memory(mpf);
605
606			ret = 1;
607		}
608		early_memunmap(bp, length);
609
610		if (ret)
611			break;
612
613		base += 16;
614		length -= 16;
615	}
616	return ret;
617}
618
619void __init default_find_smp_config(void)
620{
621	unsigned int address;
622
623	/*
624	 * FIXME: Linux assumes you have 640K of base ram..
625	 * this continues the error...
626	 *
627	 * 1) Scan the bottom 1K for a signature
628	 * 2) Scan the top 1K of base RAM
629	 * 3) Scan the 64K of bios
630	 */
631	if (smp_scan_config(0x0, 0x400) ||
632	    smp_scan_config(639 * 0x400, 0x400) ||
633	    smp_scan_config(0xF0000, 0x10000))
634		return;
635	/*
636	 * If it is an SMP machine we should know now, unless the
637	 * configuration is in an EISA bus machine with an
638	 * extended bios data area.
639	 *
640	 * there is a real-mode segmented pointer pointing to the
641	 * 4K EBDA area at 0x40E, calculate and scan it here.
642	 *
643	 * NOTE! There are Linux loaders that will corrupt the EBDA
644	 * area, and as such this kind of SMP config may be less
645	 * trustworthy, simply because the SMP table may have been
646	 * stomped on during early boot. These loaders are buggy and
647	 * should be fixed.
648	 *
649	 * MP1.4 SPEC states to only scan first 1K of 4K EBDA.
650	 */
651
652	address = get_bios_ebda();
653	if (address)
654		smp_scan_config(address, 0x400);
655}
656
657#ifdef CONFIG_X86_IO_APIC
658static u8 __initdata irq_used[MAX_IRQ_SOURCES];
659
660static int  __init get_MP_intsrc_index(struct mpc_intsrc *m)
661{
662	int i;
663
664	if (m->irqtype != mp_INT)
665		return 0;
666
667	if (m->irqflag != (MP_IRQTRIG_LEVEL | MP_IRQPOL_ACTIVE_LOW))
668		return 0;
669
670	/* not legacy */
671
672	for (i = 0; i < mp_irq_entries; i++) {
673		if (mp_irqs[i].irqtype != mp_INT)
674			continue;
675
676		if (mp_irqs[i].irqflag != (MP_IRQTRIG_LEVEL |
677					   MP_IRQPOL_ACTIVE_LOW))
678			continue;
679
680		if (mp_irqs[i].srcbus != m->srcbus)
681			continue;
682		if (mp_irqs[i].srcbusirq != m->srcbusirq)
683			continue;
684		if (irq_used[i]) {
685			/* already claimed */
686			return -2;
687		}
688		irq_used[i] = 1;
689		return i;
690	}
691
692	/* not found */
693	return -1;
694}
695
696#define SPARE_SLOT_NUM 20
697
698static struct mpc_intsrc __initdata *m_spare[SPARE_SLOT_NUM];
699
700static void __init check_irq_src(struct mpc_intsrc *m, int *nr_m_spare)
701{
702	int i;
703
704	apic_printk(APIC_VERBOSE, "OLD ");
705	print_mp_irq_info(m);
706
707	i = get_MP_intsrc_index(m);
708	if (i > 0) {
709		memcpy(m, &mp_irqs[i], sizeof(*m));
710		apic_printk(APIC_VERBOSE, "NEW ");
711		print_mp_irq_info(&mp_irqs[i]);
712		return;
713	}
714	if (!i) {
715		/* legacy, do nothing */
716		return;
717	}
718	if (*nr_m_spare < SPARE_SLOT_NUM) {
719		/*
720		 * not found (-1), or duplicated (-2) are invalid entries,
721		 * we need to use the slot later
722		 */
723		m_spare[*nr_m_spare] = m;
724		*nr_m_spare += 1;
725	}
726}
727
728static int __init
729check_slot(unsigned long mpc_new_phys, unsigned long mpc_new_length, int count)
730{
731	if (!mpc_new_phys || count <= mpc_new_length) {
732		WARN(1, "update_mptable: No spare slots (length: %x)\n", count);
733		return -1;
734	}
735
736	return 0;
737}
738#else /* CONFIG_X86_IO_APIC */
739static
740inline void __init check_irq_src(struct mpc_intsrc *m, int *nr_m_spare) {}
741#endif /* CONFIG_X86_IO_APIC */
742
743static int  __init replace_intsrc_all(struct mpc_table *mpc,
744					unsigned long mpc_new_phys,
745					unsigned long mpc_new_length)
746{
747#ifdef CONFIG_X86_IO_APIC
748	int i;
749#endif
750	int count = sizeof(*mpc);
751	int nr_m_spare = 0;
752	unsigned char *mpt = ((unsigned char *)mpc) + count;
753
754	pr_info("mpc_length %x\n", mpc->length);
755	while (count < mpc->length) {
756		switch (*mpt) {
757		case MP_PROCESSOR:
758			skip_entry(&mpt, &count, sizeof(struct mpc_cpu));
759			break;
760		case MP_BUS:
761			skip_entry(&mpt, &count, sizeof(struct mpc_bus));
762			break;
763		case MP_IOAPIC:
764			skip_entry(&mpt, &count, sizeof(struct mpc_ioapic));
765			break;
766		case MP_INTSRC:
767			check_irq_src((struct mpc_intsrc *)mpt, &nr_m_spare);
768			skip_entry(&mpt, &count, sizeof(struct mpc_intsrc));
769			break;
770		case MP_LINTSRC:
771			skip_entry(&mpt, &count, sizeof(struct mpc_lintsrc));
772			break;
773		default:
774			/* wrong mptable */
775			smp_dump_mptable(mpc, mpt);
776			goto out;
777		}
778	}
779
780#ifdef CONFIG_X86_IO_APIC
781	for (i = 0; i < mp_irq_entries; i++) {
782		if (irq_used[i])
783			continue;
784
785		if (mp_irqs[i].irqtype != mp_INT)
786			continue;
787
788		if (mp_irqs[i].irqflag != (MP_IRQTRIG_LEVEL |
789					   MP_IRQPOL_ACTIVE_LOW))
790			continue;
791
792		if (nr_m_spare > 0) {
793			apic_printk(APIC_VERBOSE, "*NEW* found\n");
794			nr_m_spare--;
795			memcpy(m_spare[nr_m_spare], &mp_irqs[i], sizeof(mp_irqs[i]));
796			m_spare[nr_m_spare] = NULL;
797		} else {
798			struct mpc_intsrc *m = (struct mpc_intsrc *)mpt;
799			count += sizeof(struct mpc_intsrc);
800			if (check_slot(mpc_new_phys, mpc_new_length, count) < 0)
801				goto out;
802			memcpy(m, &mp_irqs[i], sizeof(*m));
803			mpc->length = count;
804			mpt += sizeof(struct mpc_intsrc);
805		}
806		print_mp_irq_info(&mp_irqs[i]);
807	}
808#endif
809out:
810	/* update checksum */
811	mpc->checksum = 0;
812	mpc->checksum -= mpf_checksum((unsigned char *)mpc, mpc->length);
813
814	return 0;
815}
816
817int enable_update_mptable;
818
819static int __init update_mptable_setup(char *str)
820{
821	enable_update_mptable = 1;
822#ifdef CONFIG_PCI
823	pci_routeirq = 1;
824#endif
825	return 0;
826}
827early_param("update_mptable", update_mptable_setup);
828
829static unsigned long __initdata mpc_new_phys;
830static unsigned long mpc_new_length __initdata = 4096;
831
832/* alloc_mptable or alloc_mptable=4k */
833static int __initdata alloc_mptable;
834static int __init parse_alloc_mptable_opt(char *p)
835{
836	enable_update_mptable = 1;
837#ifdef CONFIG_PCI
838	pci_routeirq = 1;
839#endif
840	alloc_mptable = 1;
841	if (!p)
842		return 0;
843	mpc_new_length = memparse(p, &p);
844	return 0;
845}
846early_param("alloc_mptable", parse_alloc_mptable_opt);
847
848void __init e820__memblock_alloc_reserved_mpc_new(void)
849{
850	if (enable_update_mptable && alloc_mptable)
851		mpc_new_phys = e820__memblock_alloc_reserved(mpc_new_length, 4);
852}
853
854static int __init update_mp_table(void)
855{
856	char str[16];
857	char oem[10];
858	struct mpf_intel *mpf;
859	struct mpc_table *mpc, *mpc_new;
860	unsigned long size;
861
862	if (!enable_update_mptable)
863		return 0;
864
865	if (!mpf_found)
866		return 0;
867
868	mpf = early_memremap(mpf_base, sizeof(*mpf));
869	if (!mpf) {
870		pr_err("MPTABLE: mpf early_memremap() failed\n");
871		return 0;
872	}
873
874	/*
875	 * Now see if we need to go further.
876	 */
877	if (mpf->feature1)
878		goto do_unmap_mpf;
879
880	if (!mpf->physptr)
881		goto do_unmap_mpf;
882
883	size = get_mpc_size(mpf->physptr);
884	mpc = early_memremap(mpf->physptr, size);
885	if (!mpc) {
886		pr_err("MPTABLE: mpc early_memremap() failed\n");
887		goto do_unmap_mpf;
888	}
889
890	if (!smp_check_mpc(mpc, oem, str))
891		goto do_unmap_mpc;
892
893	pr_info("mpf: %llx\n", (u64)mpf_base);
894	pr_info("physptr: %x\n", mpf->physptr);
895
896	if (mpc_new_phys && mpc->length > mpc_new_length) {
897		mpc_new_phys = 0;
898		pr_info("mpc_new_length is %ld, please use alloc_mptable=8k\n",
899			mpc_new_length);
900	}
901
902	if (!mpc_new_phys) {
903		unsigned char old, new;
904		/* check if we can change the position */
905		mpc->checksum = 0;
906		old = mpf_checksum((unsigned char *)mpc, mpc->length);
907		mpc->checksum = 0xff;
908		new = mpf_checksum((unsigned char *)mpc, mpc->length);
909		if (old == new) {
910			pr_info("mpc is readonly, please try alloc_mptable instead\n");
911			goto do_unmap_mpc;
912		}
913		pr_info("use in-position replacing\n");
914	} else {
915		mpc_new = early_memremap(mpc_new_phys, mpc_new_length);
916		if (!mpc_new) {
917			pr_err("MPTABLE: new mpc early_memremap() failed\n");
918			goto do_unmap_mpc;
919		}
920		mpf->physptr = mpc_new_phys;
921		memcpy(mpc_new, mpc, mpc->length);
922		early_memunmap(mpc, size);
923		mpc = mpc_new;
924		size = mpc_new_length;
925		/* check if we can modify that */
926		if (mpc_new_phys - mpf->physptr) {
927			struct mpf_intel *mpf_new;
928			/* steal 16 bytes from [0, 1k) */
929			mpf_new = early_memremap(0x400 - 16, sizeof(*mpf_new));
930			if (!mpf_new) {
931				pr_err("MPTABLE: new mpf early_memremap() failed\n");
932				goto do_unmap_mpc;
933			}
934			pr_info("mpf new: %x\n", 0x400 - 16);
935			memcpy(mpf_new, mpf, 16);
936			early_memunmap(mpf, sizeof(*mpf));
937			mpf = mpf_new;
938			mpf->physptr = mpc_new_phys;
939		}
940		mpf->checksum = 0;
941		mpf->checksum -= mpf_checksum((unsigned char *)mpf, 16);
942		pr_info("physptr new: %x\n", mpf->physptr);
943	}
944
945	/*
946	 * only replace the one with mp_INT and
947	 *	 MP_IRQ_TRIGGER_LEVEL|MP_IRQ_POLARITY_LOW,
948	 * already in mp_irqs , stored by ... and mp_config_acpi_gsi,
949	 * may need pci=routeirq for all coverage
950	 */
951	replace_intsrc_all(mpc, mpc_new_phys, mpc_new_length);
952
953do_unmap_mpc:
954	early_memunmap(mpc, size);
955
956do_unmap_mpf:
957	early_memunmap(mpf, sizeof(*mpf));
958
959	return 0;
960}
961
962late_initcall(update_mp_table);
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *	Intel Multiprocessor Specification 1.1 and 1.4
  4 *	compliant MP-table parsing routines.
  5 *
  6 *	(c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk>
  7 *	(c) 1998, 1999, 2000, 2009 Ingo Molnar <mingo@redhat.com>
  8 *      (c) 2008 Alexey Starikovskiy <astarikovskiy@suse.de>
  9 */
 10
 11#include <linux/mm.h>
 12#include <linux/init.h>
 13#include <linux/delay.h>
 14#include <linux/memblock.h>
 15#include <linux/kernel_stat.h>
 16#include <linux/mc146818rtc.h>
 17#include <linux/bitops.h>
 18#include <linux/acpi.h>
 19#include <linux/smp.h>
 20#include <linux/pci.h>
 21
 22#include <asm/i8259.h>
 23#include <asm/io_apic.h>
 24#include <asm/acpi.h>
 25#include <asm/irqdomain.h>
 26#include <asm/mtrr.h>
 27#include <asm/mpspec.h>
 
 
 28#include <asm/proto.h>
 29#include <asm/bios_ebda.h>
 30#include <asm/e820/api.h>
 31#include <asm/setup.h>
 32#include <asm/smp.h>
 33
 34#include <asm/apic.h>
 35/*
 36 * Checksum an MP configuration block.
 37 */
 38
 39static int __init mpf_checksum(unsigned char *mp, int len)
 40{
 41	int sum = 0;
 42
 43	while (len--)
 44		sum += *mp++;
 45
 46	return sum & 0xFF;
 47}
 48
 
 
 
 
 
 49static void __init MP_processor_info(struct mpc_cpu *m)
 50{
 
 51	char *bootup_cpu = "";
 52
 53	if (!(m->cpuflag & CPU_ENABLED)) {
 54		disabled_cpus++;
 55		return;
 56	}
 57
 58	if (m->cpuflag & CPU_BOOTPROCESSOR)
 
 
 59		bootup_cpu = " (Bootup-CPU)";
 
 
 60
 61	pr_info("Processor #%d%s\n", m->apicid, bootup_cpu);
 62	generic_processor_info(m->apicid);
 63}
 64
 65#ifdef CONFIG_X86_IO_APIC
 66static void __init mpc_oem_bus_info(struct mpc_bus *m, char *str)
 67{
 68	memcpy(str, m->bustype, 6);
 69	str[6] = 0;
 70	apic_printk(APIC_VERBOSE, "Bus #%d is %s\n", m->busid, str);
 71}
 72
 73static void __init MP_bus_info(struct mpc_bus *m)
 74{
 75	char str[7];
 76
 77	mpc_oem_bus_info(m, str);
 78
 79#if MAX_MP_BUSSES < 256
 80	if (m->busid >= MAX_MP_BUSSES) {
 81		pr_warn("MP table busid value (%d) for bustype %s is too large, max. supported is %d\n",
 82			m->busid, str, MAX_MP_BUSSES - 1);
 83		return;
 84	}
 85#endif
 86
 87	set_bit(m->busid, mp_bus_not_pci);
 88	if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA) - 1) == 0) {
 89#ifdef CONFIG_EISA
 90		mp_bus_id_to_type[m->busid] = MP_BUS_ISA;
 91#endif
 92	} else if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI) - 1) == 0) {
 
 
 
 93		clear_bit(m->busid, mp_bus_not_pci);
 94#ifdef CONFIG_EISA
 95		mp_bus_id_to_type[m->busid] = MP_BUS_PCI;
 96	} else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA) - 1) == 0) {
 97		mp_bus_id_to_type[m->busid] = MP_BUS_EISA;
 98#endif
 99	} else
100		pr_warn("Unknown bustype %s - ignoring\n", str);
101}
102
103static void __init MP_ioapic_info(struct mpc_ioapic *m)
104{
105	struct ioapic_domain_cfg cfg = {
106		.type = IOAPIC_DOMAIN_LEGACY,
107		.ops = &mp_ioapic_irqdomain_ops,
108	};
109
110	if (m->flags & MPC_APIC_USABLE)
111		mp_register_ioapic(m->apicid, m->apicaddr, gsi_top, &cfg);
112}
113
114static void __init print_mp_irq_info(struct mpc_intsrc *mp_irq)
115{
116	apic_printk(APIC_VERBOSE,
117		"Int: type %d, pol %d, trig %d, bus %02x, IRQ %02x, APIC ID %x, APIC INT %02x\n",
118		mp_irq->irqtype, mp_irq->irqflag & 3,
119		(mp_irq->irqflag >> 2) & 3, mp_irq->srcbus,
120		mp_irq->srcbusirq, mp_irq->dstapic, mp_irq->dstirq);
121}
122
123#else /* CONFIG_X86_IO_APIC */
124static inline void __init MP_bus_info(struct mpc_bus *m) {}
125static inline void __init MP_ioapic_info(struct mpc_ioapic *m) {}
126#endif /* CONFIG_X86_IO_APIC */
127
128static void __init MP_lintsrc_info(struct mpc_lintsrc *m)
129{
130	apic_printk(APIC_VERBOSE,
131		"Lint: type %d, pol %d, trig %d, bus %02x, IRQ %02x, APIC ID %x, APIC LINT %02x\n",
132		m->irqtype, m->irqflag & 3, (m->irqflag >> 2) & 3, m->srcbusid,
133		m->srcbusirq, m->destapic, m->destapiclint);
134}
135
136/*
137 * Read/parse the MPC
138 */
139static int __init smp_check_mpc(struct mpc_table *mpc, char *oem, char *str)
140{
141
142	if (memcmp(mpc->signature, MPC_SIGNATURE, 4)) {
143		pr_err("MPTABLE: bad signature [%c%c%c%c]!\n",
144		       mpc->signature[0], mpc->signature[1],
145		       mpc->signature[2], mpc->signature[3]);
146		return 0;
147	}
148	if (mpf_checksum((unsigned char *)mpc, mpc->length)) {
149		pr_err("MPTABLE: checksum error!\n");
150		return 0;
151	}
152	if (mpc->spec != 0x01 && mpc->spec != 0x04) {
153		pr_err("MPTABLE: bad table version (%d)!!\n", mpc->spec);
154		return 0;
155	}
156	if (!mpc->lapic) {
157		pr_err("MPTABLE: null local APIC address!\n");
158		return 0;
159	}
160	memcpy(oem, mpc->oem, 8);
161	oem[8] = 0;
162	pr_info("MPTABLE: OEM ID: %s\n", oem);
163
164	memcpy(str, mpc->productid, 12);
165	str[12] = 0;
166
167	pr_info("MPTABLE: Product ID: %s\n", str);
168
169	pr_info("MPTABLE: APIC at: 0x%X\n", mpc->lapic);
170
171	return 1;
172}
173
174static void skip_entry(unsigned char **ptr, int *count, int size)
175{
176	*ptr += size;
177	*count += size;
178}
179
180static void __init smp_dump_mptable(struct mpc_table *mpc, unsigned char *mpt)
181{
182	pr_err("Your mptable is wrong, contact your HW vendor!\n");
183	pr_cont("type %x\n", *mpt);
184	print_hex_dump(KERN_ERR, "  ", DUMP_PREFIX_ADDRESS, 16,
185			1, mpc, mpc->length, 1);
186}
187
 
 
188static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early)
189{
190	char str[16];
191	char oem[10];
192
193	int count = sizeof(*mpc);
194	unsigned char *mpt = ((unsigned char *)mpc) + count;
195
196	if (!smp_check_mpc(mpc, oem, str))
197		return 0;
198
199	/* Initialize the lapic mapping */
200	if (!acpi_lapic)
201		register_lapic_address(mpc->lapic);
202
203	if (early)
204		return 1;
205
206	/* Now process the configuration blocks. */
 
 
 
 
 
 
 
207	while (count < mpc->length) {
208		switch (*mpt) {
209		case MP_PROCESSOR:
210			/* ACPI may have already provided this data */
211			if (!acpi_lapic)
212				MP_processor_info((struct mpc_cpu *)mpt);
213			skip_entry(&mpt, &count, sizeof(struct mpc_cpu));
214			break;
215		case MP_BUS:
216			MP_bus_info((struct mpc_bus *)mpt);
217			skip_entry(&mpt, &count, sizeof(struct mpc_bus));
218			break;
219		case MP_IOAPIC:
220			MP_ioapic_info((struct mpc_ioapic *)mpt);
221			skip_entry(&mpt, &count, sizeof(struct mpc_ioapic));
222			break;
223		case MP_INTSRC:
224			mp_save_irq((struct mpc_intsrc *)mpt);
225			skip_entry(&mpt, &count, sizeof(struct mpc_intsrc));
226			break;
227		case MP_LINTSRC:
228			MP_lintsrc_info((struct mpc_lintsrc *)mpt);
229			skip_entry(&mpt, &count, sizeof(struct mpc_lintsrc));
230			break;
231		default:
232			/* wrong mptable */
233			smp_dump_mptable(mpc, mpt);
234			count = mpc->length;
235			break;
236		}
 
237	}
238
239	if (!num_processors)
240		pr_err("MPTABLE: no processors registered!\n");
241	return num_processors;
242}
243
244#ifdef CONFIG_X86_IO_APIC
245
246static int __init ELCR_trigger(unsigned int irq)
247{
248	unsigned int port;
249
250	port = PIC_ELCR1 + (irq >> 3);
251	return (inb(port) >> (irq & 7)) & 1;
252}
253
254static void __init construct_default_ioirq_mptable(int mpc_default_type)
255{
256	struct mpc_intsrc intsrc;
257	int i;
258	int ELCR_fallback = 0;
259
260	intsrc.type = MP_INTSRC;
261	intsrc.irqflag = MP_IRQTRIG_DEFAULT | MP_IRQPOL_DEFAULT;
262	intsrc.srcbus = 0;
263	intsrc.dstapic = mpc_ioapic_id(0);
264
265	intsrc.irqtype = mp_INT;
266
267	/*
268	 *  If true, we have an ISA/PCI system with no IRQ entries
269	 *  in the MP table. To prevent the PCI interrupts from being set up
270	 *  incorrectly, we try to use the ELCR. The sanity check to see if
271	 *  there is good ELCR data is very simple - IRQ0, 1, 2 and 13 can
272	 *  never be level sensitive, so we simply see if the ELCR agrees.
273	 *  If it does, we assume it's valid.
274	 */
275	if (mpc_default_type == 5) {
276		pr_info("ISA/PCI bus type with no IRQ information... falling back to ELCR\n");
277
278		if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) ||
279		    ELCR_trigger(13))
280			pr_err("ELCR contains invalid data... not using ELCR\n");
281		else {
282			pr_info("Using ELCR to identify PCI interrupts\n");
283			ELCR_fallback = 1;
284		}
285	}
286
287	for (i = 0; i < 16; i++) {
288		switch (mpc_default_type) {
289		case 2:
290			if (i == 0 || i == 13)
291				continue;	/* IRQ0 & IRQ13 not connected */
292			fallthrough;
293		default:
294			if (i == 2)
295				continue;	/* IRQ2 is never connected */
296		}
297
298		if (ELCR_fallback) {
299			/*
300			 *  If the ELCR indicates a level-sensitive interrupt, we
301			 *  copy that information over to the MP table in the
302			 *  irqflag field (level sensitive, active high polarity).
303			 */
304			if (ELCR_trigger(i)) {
305				intsrc.irqflag = MP_IRQTRIG_LEVEL |
306						 MP_IRQPOL_ACTIVE_HIGH;
307			} else {
308				intsrc.irqflag = MP_IRQTRIG_DEFAULT |
309						 MP_IRQPOL_DEFAULT;
310			}
311		}
312
313		intsrc.srcbusirq = i;
314		intsrc.dstirq = i ? i : 2;	/* IRQ0 to INTIN2 */
315		mp_save_irq(&intsrc);
316	}
317
318	intsrc.irqtype = mp_ExtINT;
319	intsrc.srcbusirq = 0;
320	intsrc.dstirq = 0;	/* 8259A to INTIN0 */
321	mp_save_irq(&intsrc);
322}
323
324
325static void __init construct_ioapic_table(int mpc_default_type)
326{
327	struct mpc_ioapic ioapic;
328	struct mpc_bus bus;
329
330	bus.type = MP_BUS;
331	bus.busid = 0;
332	switch (mpc_default_type) {
333	default:
334		pr_err("???\nUnknown standard configuration %d\n",
335		       mpc_default_type);
336		fallthrough;
337	case 1:
338	case 5:
339		memcpy(bus.bustype, "ISA   ", 6);
340		break;
341	case 2:
342	case 6:
343	case 3:
344		memcpy(bus.bustype, "EISA  ", 6);
345		break;
346	}
347	MP_bus_info(&bus);
348	if (mpc_default_type > 4) {
349		bus.busid = 1;
350		memcpy(bus.bustype, "PCI   ", 6);
351		MP_bus_info(&bus);
352	}
353
354	ioapic.type	= MP_IOAPIC;
355	ioapic.apicid	= 2;
356	ioapic.apicver	= mpc_default_type > 4 ? 0x10 : 0x01;
357	ioapic.flags	= MPC_APIC_USABLE;
358	ioapic.apicaddr	= IO_APIC_DEFAULT_PHYS_BASE;
359	MP_ioapic_info(&ioapic);
360
361	/*
362	 * We set up most of the low 16 IO-APIC pins according to MPS rules.
363	 */
364	construct_default_ioirq_mptable(mpc_default_type);
365}
366#else
367static inline void __init construct_ioapic_table(int mpc_default_type) { }
368#endif
369
370static inline void __init construct_default_ISA_mptable(int mpc_default_type)
371{
372	struct mpc_cpu processor;
373	struct mpc_lintsrc lintsrc;
374	int linttypes[2] = { mp_ExtINT, mp_NMI };
375	int i;
376
377	/*
 
 
 
 
 
378	 * 2 CPUs, numbered 0 & 1.
379	 */
380	processor.type = MP_PROCESSOR;
381	/* Either an integrated APIC or a discrete 82489DX. */
382	processor.apicver = mpc_default_type > 4 ? 0x10 : 0x01;
383	processor.cpuflag = CPU_ENABLED;
384	processor.cpufeature = (boot_cpu_data.x86 << 8) |
385	    (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_stepping;
386	processor.featureflag = boot_cpu_data.x86_capability[CPUID_1_EDX];
387	processor.reserved[0] = 0;
388	processor.reserved[1] = 0;
389	for (i = 0; i < 2; i++) {
390		processor.apicid = i;
391		MP_processor_info(&processor);
392	}
393
394	construct_ioapic_table(mpc_default_type);
395
396	lintsrc.type = MP_LINTSRC;
397	lintsrc.irqflag = MP_IRQTRIG_DEFAULT | MP_IRQPOL_DEFAULT;
398	lintsrc.srcbusid = 0;
399	lintsrc.srcbusirq = 0;
400	lintsrc.destapic = MP_APIC_ALL;
401	for (i = 0; i < 2; i++) {
402		lintsrc.irqtype = linttypes[i];
403		lintsrc.destapiclint = i;
404		MP_lintsrc_info(&lintsrc);
405	}
406}
407
408static unsigned long mpf_base;
409static bool mpf_found;
410
411static unsigned long __init get_mpc_size(unsigned long physptr)
412{
413	struct mpc_table *mpc;
414	unsigned long size;
415
416	mpc = early_memremap(physptr, PAGE_SIZE);
417	size = mpc->length;
418	early_memunmap(mpc, PAGE_SIZE);
419	apic_printk(APIC_VERBOSE, "  mpc: %lx-%lx\n", physptr, physptr + size);
420
421	return size;
422}
423
424static int __init check_physptr(struct mpf_intel *mpf, unsigned int early)
425{
426	struct mpc_table *mpc;
427	unsigned long size;
428
429	size = get_mpc_size(mpf->physptr);
430	mpc = early_memremap(mpf->physptr, size);
431
432	/*
433	 * Read the physical hardware table.  Anything here will
434	 * override the defaults.
435	 */
436	if (!smp_read_mpc(mpc, early)) {
437#ifdef CONFIG_X86_LOCAL_APIC
438		smp_found_config = 0;
439#endif
440		pr_err("BIOS bug, MP table errors detected!...\n");
441		pr_cont("... disabling SMP support. (tell your hw vendor)\n");
442		early_memunmap(mpc, size);
443		return -1;
444	}
445	early_memunmap(mpc, size);
446
447	if (early)
448		return -1;
449
450#ifdef CONFIG_X86_IO_APIC
451	/*
452	 * If there are no explicit MP IRQ entries, then we are
453	 * broken.  We set up most of the low 16 IO-APIC pins to
454	 * ISA defaults and hope it will work.
455	 */
456	if (!mp_irq_entries) {
457		struct mpc_bus bus;
458
459		pr_err("BIOS bug, no explicit IRQ entries, using default mptable. (tell your hw vendor)\n");
460
461		bus.type = MP_BUS;
462		bus.busid = 0;
463		memcpy(bus.bustype, "ISA   ", 6);
464		MP_bus_info(&bus);
465
466		construct_default_ioirq_mptable(0);
467	}
468#endif
469
470	return 0;
471}
472
473/*
474 * Scan the memory blocks for an SMP configuration block.
475 */
476void __init default_get_smp_config(unsigned int early)
477{
478	struct mpf_intel *mpf;
479
480	if (!smp_found_config)
481		return;
482
483	if (!mpf_found)
484		return;
485
486	if (acpi_lapic && early)
487		return;
488
489	/*
490	 * MPS doesn't support hyperthreading, aka only have
491	 * thread 0 apic id in MPS table
492	 */
493	if (acpi_lapic && acpi_ioapic)
494		return;
495
496	mpf = early_memremap(mpf_base, sizeof(*mpf));
497	if (!mpf) {
498		pr_err("MPTABLE: error mapping MP table\n");
499		return;
500	}
501
502	pr_info("Intel MultiProcessor Specification v1.%d\n",
503		mpf->specification);
504#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32)
505	if (mpf->feature2 & (1 << 7)) {
506		pr_info("    IMCR and PIC compatibility mode.\n");
507		pic_mode = 1;
508	} else {
509		pr_info("    Virtual Wire compatibility mode.\n");
510		pic_mode = 0;
511	}
512#endif
513	/*
514	 * Now see if we need to read further.
515	 */
516	if (mpf->feature1) {
517		if (early) {
518			/* Local APIC has default address */
519			register_lapic_address(APIC_DEFAULT_PHYS_BASE);
 
 
520			goto out;
521		}
522
523		pr_info("Default MP configuration #%d\n", mpf->feature1);
524		construct_default_ISA_mptable(mpf->feature1);
525
526	} else if (mpf->physptr) {
527		if (check_physptr(mpf, early))
528			goto out;
529	} else
530		BUG();
531
532	if (!early)
533		pr_info("Processors: %d\n", num_processors);
534	/*
535	 * Only use the first configuration found.
536	 */
537out:
538	early_memunmap(mpf, sizeof(*mpf));
539}
540
541static void __init smp_reserve_memory(struct mpf_intel *mpf)
542{
543	memblock_reserve(mpf->physptr, get_mpc_size(mpf->physptr));
544}
545
546static int __init smp_scan_config(unsigned long base, unsigned long length)
547{
548	unsigned int *bp;
549	struct mpf_intel *mpf;
550	int ret = 0;
551
552	apic_printk(APIC_VERBOSE, "Scan for SMP in [mem %#010lx-%#010lx]\n",
553		    base, base + length - 1);
554	BUILD_BUG_ON(sizeof(*mpf) != 16);
555
556	while (length > 0) {
557		bp = early_memremap(base, length);
558		mpf = (struct mpf_intel *)bp;
559		if ((*bp == SMP_MAGIC_IDENT) &&
560		    (mpf->length == 1) &&
561		    !mpf_checksum((unsigned char *)bp, 16) &&
562		    ((mpf->specification == 1)
563		     || (mpf->specification == 4))) {
564#ifdef CONFIG_X86_LOCAL_APIC
565			smp_found_config = 1;
566#endif
567			mpf_base = base;
568			mpf_found = true;
569
570			pr_info("found SMP MP-table at [mem %#010lx-%#010lx]\n",
571				base, base + sizeof(*mpf) - 1);
572
573			memblock_reserve(base, sizeof(*mpf));
574			if (mpf->physptr)
575				smp_reserve_memory(mpf);
576
577			ret = 1;
578		}
579		early_memunmap(bp, length);
580
581		if (ret)
582			break;
583
584		base += 16;
585		length -= 16;
586	}
587	return ret;
588}
589
590void __init default_find_smp_config(void)
591{
592	unsigned int address;
593
594	/*
595	 * FIXME: Linux assumes you have 640K of base ram..
596	 * this continues the error...
597	 *
598	 * 1) Scan the bottom 1K for a signature
599	 * 2) Scan the top 1K of base RAM
600	 * 3) Scan the 64K of bios
601	 */
602	if (smp_scan_config(0x0, 0x400) ||
603	    smp_scan_config(639 * 0x400, 0x400) ||
604	    smp_scan_config(0xF0000, 0x10000))
605		return;
606	/*
607	 * If it is an SMP machine we should know now, unless the
608	 * configuration is in an EISA bus machine with an
609	 * extended bios data area.
610	 *
611	 * there is a real-mode segmented pointer pointing to the
612	 * 4K EBDA area at 0x40E, calculate and scan it here.
613	 *
614	 * NOTE! There are Linux loaders that will corrupt the EBDA
615	 * area, and as such this kind of SMP config may be less
616	 * trustworthy, simply because the SMP table may have been
617	 * stomped on during early boot. These loaders are buggy and
618	 * should be fixed.
619	 *
620	 * MP1.4 SPEC states to only scan first 1K of 4K EBDA.
621	 */
622
623	address = get_bios_ebda();
624	if (address)
625		smp_scan_config(address, 0x400);
626}
627
628#ifdef CONFIG_X86_IO_APIC
629static u8 __initdata irq_used[MAX_IRQ_SOURCES];
630
631static int  __init get_MP_intsrc_index(struct mpc_intsrc *m)
632{
633	int i;
634
635	if (m->irqtype != mp_INT)
636		return 0;
637
638	if (m->irqflag != (MP_IRQTRIG_LEVEL | MP_IRQPOL_ACTIVE_LOW))
639		return 0;
640
641	/* not legacy */
642
643	for (i = 0; i < mp_irq_entries; i++) {
644		if (mp_irqs[i].irqtype != mp_INT)
645			continue;
646
647		if (mp_irqs[i].irqflag != (MP_IRQTRIG_LEVEL |
648					   MP_IRQPOL_ACTIVE_LOW))
649			continue;
650
651		if (mp_irqs[i].srcbus != m->srcbus)
652			continue;
653		if (mp_irqs[i].srcbusirq != m->srcbusirq)
654			continue;
655		if (irq_used[i]) {
656			/* already claimed */
657			return -2;
658		}
659		irq_used[i] = 1;
660		return i;
661	}
662
663	/* not found */
664	return -1;
665}
666
667#define SPARE_SLOT_NUM 20
668
669static struct mpc_intsrc __initdata *m_spare[SPARE_SLOT_NUM];
670
671static void __init check_irq_src(struct mpc_intsrc *m, int *nr_m_spare)
672{
673	int i;
674
675	apic_printk(APIC_VERBOSE, "OLD ");
676	print_mp_irq_info(m);
677
678	i = get_MP_intsrc_index(m);
679	if (i > 0) {
680		memcpy(m, &mp_irqs[i], sizeof(*m));
681		apic_printk(APIC_VERBOSE, "NEW ");
682		print_mp_irq_info(&mp_irqs[i]);
683		return;
684	}
685	if (!i) {
686		/* legacy, do nothing */
687		return;
688	}
689	if (*nr_m_spare < SPARE_SLOT_NUM) {
690		/*
691		 * not found (-1), or duplicated (-2) are invalid entries,
692		 * we need to use the slot later
693		 */
694		m_spare[*nr_m_spare] = m;
695		*nr_m_spare += 1;
696	}
697}
698
699static int __init
700check_slot(unsigned long mpc_new_phys, unsigned long mpc_new_length, int count)
701{
702	if (!mpc_new_phys || count <= mpc_new_length) {
703		WARN(1, "update_mptable: No spare slots (length: %x)\n", count);
704		return -1;
705	}
706
707	return 0;
708}
709#else /* CONFIG_X86_IO_APIC */
710static
711inline void __init check_irq_src(struct mpc_intsrc *m, int *nr_m_spare) {}
712#endif /* CONFIG_X86_IO_APIC */
713
714static int  __init replace_intsrc_all(struct mpc_table *mpc,
715					unsigned long mpc_new_phys,
716					unsigned long mpc_new_length)
717{
718#ifdef CONFIG_X86_IO_APIC
719	int i;
720#endif
721	int count = sizeof(*mpc);
722	int nr_m_spare = 0;
723	unsigned char *mpt = ((unsigned char *)mpc) + count;
724
725	pr_info("mpc_length %x\n", mpc->length);
726	while (count < mpc->length) {
727		switch (*mpt) {
728		case MP_PROCESSOR:
729			skip_entry(&mpt, &count, sizeof(struct mpc_cpu));
730			break;
731		case MP_BUS:
732			skip_entry(&mpt, &count, sizeof(struct mpc_bus));
733			break;
734		case MP_IOAPIC:
735			skip_entry(&mpt, &count, sizeof(struct mpc_ioapic));
736			break;
737		case MP_INTSRC:
738			check_irq_src((struct mpc_intsrc *)mpt, &nr_m_spare);
739			skip_entry(&mpt, &count, sizeof(struct mpc_intsrc));
740			break;
741		case MP_LINTSRC:
742			skip_entry(&mpt, &count, sizeof(struct mpc_lintsrc));
743			break;
744		default:
745			/* wrong mptable */
746			smp_dump_mptable(mpc, mpt);
747			goto out;
748		}
749	}
750
751#ifdef CONFIG_X86_IO_APIC
752	for (i = 0; i < mp_irq_entries; i++) {
753		if (irq_used[i])
754			continue;
755
756		if (mp_irqs[i].irqtype != mp_INT)
757			continue;
758
759		if (mp_irqs[i].irqflag != (MP_IRQTRIG_LEVEL |
760					   MP_IRQPOL_ACTIVE_LOW))
761			continue;
762
763		if (nr_m_spare > 0) {
764			apic_printk(APIC_VERBOSE, "*NEW* found\n");
765			nr_m_spare--;
766			memcpy(m_spare[nr_m_spare], &mp_irqs[i], sizeof(mp_irqs[i]));
767			m_spare[nr_m_spare] = NULL;
768		} else {
769			struct mpc_intsrc *m = (struct mpc_intsrc *)mpt;
770			count += sizeof(struct mpc_intsrc);
771			if (check_slot(mpc_new_phys, mpc_new_length, count) < 0)
772				goto out;
773			memcpy(m, &mp_irqs[i], sizeof(*m));
774			mpc->length = count;
775			mpt += sizeof(struct mpc_intsrc);
776		}
777		print_mp_irq_info(&mp_irqs[i]);
778	}
779#endif
780out:
781	/* update checksum */
782	mpc->checksum = 0;
783	mpc->checksum -= mpf_checksum((unsigned char *)mpc, mpc->length);
784
785	return 0;
786}
787
788int enable_update_mptable;
789
790static int __init update_mptable_setup(char *str)
791{
792	enable_update_mptable = 1;
793#ifdef CONFIG_PCI
794	pci_routeirq = 1;
795#endif
796	return 0;
797}
798early_param("update_mptable", update_mptable_setup);
799
800static unsigned long __initdata mpc_new_phys;
801static unsigned long mpc_new_length __initdata = 4096;
802
803/* alloc_mptable or alloc_mptable=4k */
804static int __initdata alloc_mptable;
805static int __init parse_alloc_mptable_opt(char *p)
806{
807	enable_update_mptable = 1;
808#ifdef CONFIG_PCI
809	pci_routeirq = 1;
810#endif
811	alloc_mptable = 1;
812	if (!p)
813		return 0;
814	mpc_new_length = memparse(p, &p);
815	return 0;
816}
817early_param("alloc_mptable", parse_alloc_mptable_opt);
818
819void __init e820__memblock_alloc_reserved_mpc_new(void)
820{
821	if (enable_update_mptable && alloc_mptable)
822		mpc_new_phys = e820__memblock_alloc_reserved(mpc_new_length, 4);
823}
824
825static int __init update_mp_table(void)
826{
827	char str[16];
828	char oem[10];
829	struct mpf_intel *mpf;
830	struct mpc_table *mpc, *mpc_new;
831	unsigned long size;
832
833	if (!enable_update_mptable)
834		return 0;
835
836	if (!mpf_found)
837		return 0;
838
839	mpf = early_memremap(mpf_base, sizeof(*mpf));
840	if (!mpf) {
841		pr_err("MPTABLE: mpf early_memremap() failed\n");
842		return 0;
843	}
844
845	/*
846	 * Now see if we need to go further.
847	 */
848	if (mpf->feature1)
849		goto do_unmap_mpf;
850
851	if (!mpf->physptr)
852		goto do_unmap_mpf;
853
854	size = get_mpc_size(mpf->physptr);
855	mpc = early_memremap(mpf->physptr, size);
856	if (!mpc) {
857		pr_err("MPTABLE: mpc early_memremap() failed\n");
858		goto do_unmap_mpf;
859	}
860
861	if (!smp_check_mpc(mpc, oem, str))
862		goto do_unmap_mpc;
863
864	pr_info("mpf: %llx\n", (u64)mpf_base);
865	pr_info("physptr: %x\n", mpf->physptr);
866
867	if (mpc_new_phys && mpc->length > mpc_new_length) {
868		mpc_new_phys = 0;
869		pr_info("mpc_new_length is %ld, please use alloc_mptable=8k\n",
870			mpc_new_length);
871	}
872
873	if (!mpc_new_phys) {
874		unsigned char old, new;
875		/* check if we can change the position */
876		mpc->checksum = 0;
877		old = mpf_checksum((unsigned char *)mpc, mpc->length);
878		mpc->checksum = 0xff;
879		new = mpf_checksum((unsigned char *)mpc, mpc->length);
880		if (old == new) {
881			pr_info("mpc is readonly, please try alloc_mptable instead\n");
882			goto do_unmap_mpc;
883		}
884		pr_info("use in-position replacing\n");
885	} else {
886		mpc_new = early_memremap(mpc_new_phys, mpc_new_length);
887		if (!mpc_new) {
888			pr_err("MPTABLE: new mpc early_memremap() failed\n");
889			goto do_unmap_mpc;
890		}
891		mpf->physptr = mpc_new_phys;
892		memcpy(mpc_new, mpc, mpc->length);
893		early_memunmap(mpc, size);
894		mpc = mpc_new;
895		size = mpc_new_length;
896		/* check if we can modify that */
897		if (mpc_new_phys - mpf->physptr) {
898			struct mpf_intel *mpf_new;
899			/* steal 16 bytes from [0, 1k) */
900			mpf_new = early_memremap(0x400 - 16, sizeof(*mpf_new));
901			if (!mpf_new) {
902				pr_err("MPTABLE: new mpf early_memremap() failed\n");
903				goto do_unmap_mpc;
904			}
905			pr_info("mpf new: %x\n", 0x400 - 16);
906			memcpy(mpf_new, mpf, 16);
907			early_memunmap(mpf, sizeof(*mpf));
908			mpf = mpf_new;
909			mpf->physptr = mpc_new_phys;
910		}
911		mpf->checksum = 0;
912		mpf->checksum -= mpf_checksum((unsigned char *)mpf, 16);
913		pr_info("physptr new: %x\n", mpf->physptr);
914	}
915
916	/*
917	 * only replace the one with mp_INT and
918	 *	 MP_IRQ_TRIGGER_LEVEL|MP_IRQ_POLARITY_LOW,
919	 * already in mp_irqs , stored by ... and mp_config_acpi_gsi,
920	 * may need pci=routeirq for all coverage
921	 */
922	replace_intsrc_all(mpc, mpc_new_phys, mpc_new_length);
923
924do_unmap_mpc:
925	early_memunmap(mpc, size);
926
927do_unmap_mpf:
928	early_memunmap(mpf, sizeof(*mpf));
929
930	return 0;
931}
932
933late_initcall(update_mp_table);