Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *	Intel Multiprocessor Specification 1.1 and 1.4
  4 *	compliant MP-table parsing routines.
  5 *
  6 *	(c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk>
  7 *	(c) 1998, 1999, 2000, 2009 Ingo Molnar <mingo@redhat.com>
  8 *      (c) 2008 Alexey Starikovskiy <astarikovskiy@suse.de>
  9 */
 10
 11#include <linux/mm.h>
 12#include <linux/init.h>
 13#include <linux/delay.h>
 14#include <linux/memblock.h>
 15#include <linux/kernel_stat.h>
 16#include <linux/mc146818rtc.h>
 17#include <linux/bitops.h>
 18#include <linux/acpi.h>
 19#include <linux/smp.h>
 20#include <linux/pci.h>
 21
 
 
 
 22#include <asm/irqdomain.h>
 23#include <asm/mtrr.h>
 24#include <asm/mpspec.h>
 25#include <asm/pgalloc.h>
 26#include <asm/io_apic.h>
 27#include <asm/proto.h>
 28#include <asm/bios_ebda.h>
 29#include <asm/e820/api.h>
 30#include <asm/setup.h>
 31#include <asm/smp.h>
 32
 33#include <asm/apic.h>
 34/*
 35 * Checksum an MP configuration block.
 36 */
 37
 
 
 38static int __init mpf_checksum(unsigned char *mp, int len)
 39{
 40	int sum = 0;
 41
 42	while (len--)
 43		sum += *mp++;
 44
 45	return sum & 0xFF;
 46}
 47
 48int __init default_mpc_apic_id(struct mpc_cpu *m)
 49{
 50	return m->apicid;
 51}
 52
 53static void __init MP_processor_info(struct mpc_cpu *m)
 54{
 55	int apicid;
 56	char *bootup_cpu = "";
 57
 58	if (!(m->cpuflag & CPU_ENABLED)) {
 59		disabled_cpus++;
 60		return;
 61	}
 62
 63	apicid = x86_init.mpparse.mpc_apic_id(m);
 64
 65	if (m->cpuflag & CPU_BOOTPROCESSOR) {
 66		bootup_cpu = " (Bootup-CPU)";
 67		boot_cpu_physical_apicid = m->apicid;
 68	}
 69
 70	pr_info("Processor #%d%s\n", m->apicid, bootup_cpu);
 71	generic_processor_info(apicid, m->apicver);
 72}
 73
 74#ifdef CONFIG_X86_IO_APIC
 75void __init default_mpc_oem_bus_info(struct mpc_bus *m, char *str)
 76{
 77	memcpy(str, m->bustype, 6);
 78	str[6] = 0;
 79	apic_printk(APIC_VERBOSE, "Bus #%d is %s\n", m->busid, str);
 80}
 81
 82static void __init MP_bus_info(struct mpc_bus *m)
 83{
 84	char str[7];
 85
 86	x86_init.mpparse.mpc_oem_bus_info(m, str);
 87
 88#if MAX_MP_BUSSES < 256
 89	if (m->busid >= MAX_MP_BUSSES) {
 90		pr_warn("MP table busid value (%d) for bustype %s is too large, max. supported is %d\n",
 91			m->busid, str, MAX_MP_BUSSES - 1);
 92		return;
 93	}
 94#endif
 95
 96	set_bit(m->busid, mp_bus_not_pci);
 97	if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA) - 1) == 0) {
 98#ifdef CONFIG_EISA
 99		mp_bus_id_to_type[m->busid] = MP_BUS_ISA;
100#endif
101	} else if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI) - 1) == 0) {
102		if (x86_init.mpparse.mpc_oem_pci_bus)
103			x86_init.mpparse.mpc_oem_pci_bus(m);
104
105		clear_bit(m->busid, mp_bus_not_pci);
106#ifdef CONFIG_EISA
107		mp_bus_id_to_type[m->busid] = MP_BUS_PCI;
108	} else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA) - 1) == 0) {
109		mp_bus_id_to_type[m->busid] = MP_BUS_EISA;
110#endif
111	} else
112		pr_warn("Unknown bustype %s - ignoring\n", str);
113}
114
115static void __init MP_ioapic_info(struct mpc_ioapic *m)
116{
117	struct ioapic_domain_cfg cfg = {
118		.type = IOAPIC_DOMAIN_LEGACY,
119		.ops = &mp_ioapic_irqdomain_ops,
120	};
121
122	if (m->flags & MPC_APIC_USABLE)
123		mp_register_ioapic(m->apicid, m->apicaddr, gsi_top, &cfg);
124}
125
126static void __init print_mp_irq_info(struct mpc_intsrc *mp_irq)
127{
128	apic_printk(APIC_VERBOSE,
129		"Int: type %d, pol %d, trig %d, bus %02x, IRQ %02x, APIC ID %x, APIC INT %02x\n",
130		mp_irq->irqtype, mp_irq->irqflag & 3,
131		(mp_irq->irqflag >> 2) & 3, mp_irq->srcbus,
132		mp_irq->srcbusirq, mp_irq->dstapic, mp_irq->dstirq);
133}
134
135#else /* CONFIG_X86_IO_APIC */
136static inline void __init MP_bus_info(struct mpc_bus *m) {}
137static inline void __init MP_ioapic_info(struct mpc_ioapic *m) {}
138#endif /* CONFIG_X86_IO_APIC */
139
140static void __init MP_lintsrc_info(struct mpc_lintsrc *m)
141{
142	apic_printk(APIC_VERBOSE,
143		"Lint: type %d, pol %d, trig %d, bus %02x, IRQ %02x, APIC ID %x, APIC LINT %02x\n",
144		m->irqtype, m->irqflag & 3, (m->irqflag >> 2) & 3, m->srcbusid,
145		m->srcbusirq, m->destapic, m->destapiclint);
146}
147
148/*
149 * Read/parse the MPC
150 */
151static int __init smp_check_mpc(struct mpc_table *mpc, char *oem, char *str)
152{
153
154	if (memcmp(mpc->signature, MPC_SIGNATURE, 4)) {
155		pr_err("MPTABLE: bad signature [%c%c%c%c]!\n",
156		       mpc->signature[0], mpc->signature[1],
157		       mpc->signature[2], mpc->signature[3]);
158		return 0;
159	}
160	if (mpf_checksum((unsigned char *)mpc, mpc->length)) {
161		pr_err("MPTABLE: checksum error!\n");
162		return 0;
163	}
164	if (mpc->spec != 0x01 && mpc->spec != 0x04) {
165		pr_err("MPTABLE: bad table version (%d)!!\n", mpc->spec);
166		return 0;
167	}
168	if (!mpc->lapic) {
169		pr_err("MPTABLE: null local APIC address!\n");
170		return 0;
171	}
172	memcpy(oem, mpc->oem, 8);
173	oem[8] = 0;
174	pr_info("MPTABLE: OEM ID: %s\n", oem);
175
176	memcpy(str, mpc->productid, 12);
177	str[12] = 0;
178
179	pr_info("MPTABLE: Product ID: %s\n", str);
180
181	pr_info("MPTABLE: APIC at: 0x%X\n", mpc->lapic);
182
183	return 1;
184}
185
186static void skip_entry(unsigned char **ptr, int *count, int size)
187{
188	*ptr += size;
189	*count += size;
190}
191
192static void __init smp_dump_mptable(struct mpc_table *mpc, unsigned char *mpt)
193{
194	pr_err("Your mptable is wrong, contact your HW vendor!\n");
195	pr_cont("type %x\n", *mpt);
196	print_hex_dump(KERN_ERR, "  ", DUMP_PREFIX_ADDRESS, 16,
197			1, mpc, mpc->length, 1);
198}
199
200void __init default_smp_read_mpc_oem(struct mpc_table *mpc) { }
201
202static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early)
203{
204	char str[16];
205	char oem[10];
206
207	int count = sizeof(*mpc);
208	unsigned char *mpt = ((unsigned char *)mpc) + count;
209
210	if (!smp_check_mpc(mpc, oem, str))
211		return 0;
212
213	/* Initialize the lapic mapping */
214	if (!acpi_lapic)
215		register_lapic_address(mpc->lapic);
216
217	if (early)
218		return 1;
 
219
220	if (mpc->oemptr)
221		x86_init.mpparse.smp_read_mpc_oem(mpc);
222
223	/*
224	 *      Now process the configuration blocks.
225	 */
226	x86_init.mpparse.mpc_record(0);
227
228	while (count < mpc->length) {
229		switch (*mpt) {
230		case MP_PROCESSOR:
231			/* ACPI may have already provided this data */
232			if (!acpi_lapic)
233				MP_processor_info((struct mpc_cpu *)mpt);
234			skip_entry(&mpt, &count, sizeof(struct mpc_cpu));
235			break;
236		case MP_BUS:
237			MP_bus_info((struct mpc_bus *)mpt);
238			skip_entry(&mpt, &count, sizeof(struct mpc_bus));
239			break;
240		case MP_IOAPIC:
241			MP_ioapic_info((struct mpc_ioapic *)mpt);
242			skip_entry(&mpt, &count, sizeof(struct mpc_ioapic));
243			break;
244		case MP_INTSRC:
245			mp_save_irq((struct mpc_intsrc *)mpt);
246			skip_entry(&mpt, &count, sizeof(struct mpc_intsrc));
247			break;
248		case MP_LINTSRC:
249			MP_lintsrc_info((struct mpc_lintsrc *)mpt);
250			skip_entry(&mpt, &count, sizeof(struct mpc_lintsrc));
251			break;
252		default:
253			/* wrong mptable */
254			smp_dump_mptable(mpc, mpt);
255			count = mpc->length;
256			break;
257		}
258		x86_init.mpparse.mpc_record(1);
259	}
260
261	if (!num_processors)
262		pr_err("MPTABLE: no processors registered!\n");
263	return num_processors;
264}
265
266#ifdef CONFIG_X86_IO_APIC
267
268static int __init ELCR_trigger(unsigned int irq)
269{
270	unsigned int port;
271
272	port = 0x4d0 + (irq >> 3);
273	return (inb(port) >> (irq & 7)) & 1;
274}
275
276static void __init construct_default_ioirq_mptable(int mpc_default_type)
277{
278	struct mpc_intsrc intsrc;
279	int i;
280	int ELCR_fallback = 0;
281
282	intsrc.type = MP_INTSRC;
283	intsrc.irqflag = MP_IRQTRIG_DEFAULT | MP_IRQPOL_DEFAULT;
284	intsrc.srcbus = 0;
285	intsrc.dstapic = mpc_ioapic_id(0);
286
287	intsrc.irqtype = mp_INT;
288
289	/*
290	 *  If true, we have an ISA/PCI system with no IRQ entries
291	 *  in the MP table. To prevent the PCI interrupts from being set up
292	 *  incorrectly, we try to use the ELCR. The sanity check to see if
293	 *  there is good ELCR data is very simple - IRQ0, 1, 2 and 13 can
294	 *  never be level sensitive, so we simply see if the ELCR agrees.
295	 *  If it does, we assume it's valid.
296	 */
297	if (mpc_default_type == 5) {
298		pr_info("ISA/PCI bus type with no IRQ information... falling back to ELCR\n");
299
300		if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) ||
301		    ELCR_trigger(13))
302			pr_err("ELCR contains invalid data... not using ELCR\n");
303		else {
304			pr_info("Using ELCR to identify PCI interrupts\n");
305			ELCR_fallback = 1;
306		}
307	}
308
309	for (i = 0; i < 16; i++) {
310		switch (mpc_default_type) {
311		case 2:
312			if (i == 0 || i == 13)
313				continue;	/* IRQ0 & IRQ13 not connected */
314			/* fall through */
315		default:
316			if (i == 2)
317				continue;	/* IRQ2 is never connected */
318		}
319
320		if (ELCR_fallback) {
321			/*
322			 *  If the ELCR indicates a level-sensitive interrupt, we
323			 *  copy that information over to the MP table in the
324			 *  irqflag field (level sensitive, active high polarity).
325			 */
326			if (ELCR_trigger(i)) {
327				intsrc.irqflag = MP_IRQTRIG_LEVEL |
328						 MP_IRQPOL_ACTIVE_HIGH;
329			} else {
330				intsrc.irqflag = MP_IRQTRIG_DEFAULT |
331						 MP_IRQPOL_DEFAULT;
332			}
333		}
334
335		intsrc.srcbusirq = i;
336		intsrc.dstirq = i ? i : 2;	/* IRQ0 to INTIN2 */
337		mp_save_irq(&intsrc);
338	}
339
340	intsrc.irqtype = mp_ExtINT;
341	intsrc.srcbusirq = 0;
342	intsrc.dstirq = 0;	/* 8259A to INTIN0 */
343	mp_save_irq(&intsrc);
344}
345
346
347static void __init construct_ioapic_table(int mpc_default_type)
348{
349	struct mpc_ioapic ioapic;
350	struct mpc_bus bus;
351
352	bus.type = MP_BUS;
353	bus.busid = 0;
354	switch (mpc_default_type) {
355	default:
356		pr_err("???\nUnknown standard configuration %d\n",
357		       mpc_default_type);
358		/* fall through */
359	case 1:
360	case 5:
361		memcpy(bus.bustype, "ISA   ", 6);
362		break;
363	case 2:
364	case 6:
365	case 3:
366		memcpy(bus.bustype, "EISA  ", 6);
367		break;
368	}
369	MP_bus_info(&bus);
370	if (mpc_default_type > 4) {
371		bus.busid = 1;
372		memcpy(bus.bustype, "PCI   ", 6);
373		MP_bus_info(&bus);
374	}
375
376	ioapic.type	= MP_IOAPIC;
377	ioapic.apicid	= 2;
378	ioapic.apicver	= mpc_default_type > 4 ? 0x10 : 0x01;
379	ioapic.flags	= MPC_APIC_USABLE;
380	ioapic.apicaddr	= IO_APIC_DEFAULT_PHYS_BASE;
381	MP_ioapic_info(&ioapic);
382
383	/*
384	 * We set up most of the low 16 IO-APIC pins according to MPS rules.
385	 */
386	construct_default_ioirq_mptable(mpc_default_type);
387}
388#else
389static inline void __init construct_ioapic_table(int mpc_default_type) { }
390#endif
391
392static inline void __init construct_default_ISA_mptable(int mpc_default_type)
393{
394	struct mpc_cpu processor;
395	struct mpc_lintsrc lintsrc;
396	int linttypes[2] = { mp_ExtINT, mp_NMI };
397	int i;
398
399	/*
400	 * local APIC has default address
401	 */
402	mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
403
404	/*
405	 * 2 CPUs, numbered 0 & 1.
406	 */
407	processor.type = MP_PROCESSOR;
408	/* Either an integrated APIC or a discrete 82489DX. */
409	processor.apicver = mpc_default_type > 4 ? 0x10 : 0x01;
410	processor.cpuflag = CPU_ENABLED;
411	processor.cpufeature = (boot_cpu_data.x86 << 8) |
412	    (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_stepping;
413	processor.featureflag = boot_cpu_data.x86_capability[CPUID_1_EDX];
414	processor.reserved[0] = 0;
415	processor.reserved[1] = 0;
416	for (i = 0; i < 2; i++) {
417		processor.apicid = i;
418		MP_processor_info(&processor);
419	}
420
421	construct_ioapic_table(mpc_default_type);
422
423	lintsrc.type = MP_LINTSRC;
424	lintsrc.irqflag = MP_IRQTRIG_DEFAULT | MP_IRQPOL_DEFAULT;
425	lintsrc.srcbusid = 0;
426	lintsrc.srcbusirq = 0;
427	lintsrc.destapic = MP_APIC_ALL;
428	for (i = 0; i < 2; i++) {
429		lintsrc.irqtype = linttypes[i];
430		lintsrc.destapiclint = i;
431		MP_lintsrc_info(&lintsrc);
432	}
433}
434
435static unsigned long mpf_base;
436static bool mpf_found;
437
438static unsigned long __init get_mpc_size(unsigned long physptr)
439{
440	struct mpc_table *mpc;
441	unsigned long size;
442
443	mpc = early_memremap(physptr, PAGE_SIZE);
444	size = mpc->length;
445	early_memunmap(mpc, PAGE_SIZE);
446	apic_printk(APIC_VERBOSE, "  mpc: %lx-%lx\n", physptr, physptr + size);
447
448	return size;
449}
450
451static int __init check_physptr(struct mpf_intel *mpf, unsigned int early)
452{
453	struct mpc_table *mpc;
454	unsigned long size;
455
456	size = get_mpc_size(mpf->physptr);
457	mpc = early_memremap(mpf->physptr, size);
458
459	/*
460	 * Read the physical hardware table.  Anything here will
461	 * override the defaults.
462	 */
463	if (!smp_read_mpc(mpc, early)) {
464#ifdef CONFIG_X86_LOCAL_APIC
465		smp_found_config = 0;
466#endif
467		pr_err("BIOS bug, MP table errors detected!...\n");
468		pr_cont("... disabling SMP support. (tell your hw vendor)\n");
469		early_memunmap(mpc, size);
470		return -1;
471	}
472	early_memunmap(mpc, size);
473
474	if (early)
475		return -1;
476
477#ifdef CONFIG_X86_IO_APIC
478	/*
479	 * If there are no explicit MP IRQ entries, then we are
480	 * broken.  We set up most of the low 16 IO-APIC pins to
481	 * ISA defaults and hope it will work.
482	 */
483	if (!mp_irq_entries) {
484		struct mpc_bus bus;
485
486		pr_err("BIOS bug, no explicit IRQ entries, using default mptable. (tell your hw vendor)\n");
487
488		bus.type = MP_BUS;
489		bus.busid = 0;
490		memcpy(bus.bustype, "ISA   ", 6);
491		MP_bus_info(&bus);
492
493		construct_default_ioirq_mptable(0);
494	}
495#endif
496
497	return 0;
498}
499
500/*
501 * Scan the memory blocks for an SMP configuration block.
502 */
503void __init default_get_smp_config(unsigned int early)
504{
505	struct mpf_intel *mpf;
506
507	if (!smp_found_config)
508		return;
509
510	if (!mpf_found)
511		return;
512
513	if (acpi_lapic && early)
514		return;
515
516	/*
517	 * MPS doesn't support hyperthreading, aka only have
518	 * thread 0 apic id in MPS table
519	 */
520	if (acpi_lapic && acpi_ioapic)
521		return;
522
523	mpf = early_memremap(mpf_base, sizeof(*mpf));
524	if (!mpf) {
525		pr_err("MPTABLE: error mapping MP table\n");
526		return;
527	}
528
529	pr_info("Intel MultiProcessor Specification v1.%d\n",
530		mpf->specification);
531#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32)
532	if (mpf->feature2 & (1 << 7)) {
533		pr_info("    IMCR and PIC compatibility mode.\n");
534		pic_mode = 1;
535	} else {
536		pr_info("    Virtual Wire compatibility mode.\n");
537		pic_mode = 0;
538	}
539#endif
540	/*
541	 * Now see if we need to read further.
542	 */
543	if (mpf->feature1) {
544		if (early) {
545			/*
546			 * local APIC has default address
547			 */
548			mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
549			goto out;
550		}
551
552		pr_info("Default MP configuration #%d\n", mpf->feature1);
553		construct_default_ISA_mptable(mpf->feature1);
554
555	} else if (mpf->physptr) {
556		if (check_physptr(mpf, early))
557			goto out;
558	} else
559		BUG();
560
561	if (!early)
562		pr_info("Processors: %d\n", num_processors);
563	/*
564	 * Only use the first configuration found.
565	 */
566out:
567	early_memunmap(mpf, sizeof(*mpf));
568}
569
 
 
 
 
 
 
 
 
 
 
570static void __init smp_reserve_memory(struct mpf_intel *mpf)
571{
572	memblock_reserve(mpf->physptr, get_mpc_size(mpf->physptr));
573}
574
575static int __init smp_scan_config(unsigned long base, unsigned long length)
576{
577	unsigned int *bp;
578	struct mpf_intel *mpf;
579	int ret = 0;
580
581	apic_printk(APIC_VERBOSE, "Scan for SMP in [mem %#010lx-%#010lx]\n",
582		    base, base + length - 1);
583	BUILD_BUG_ON(sizeof(*mpf) != 16);
584
585	while (length > 0) {
586		bp = early_memremap(base, length);
587		mpf = (struct mpf_intel *)bp;
588		if ((*bp == SMP_MAGIC_IDENT) &&
589		    (mpf->length == 1) &&
590		    !mpf_checksum((unsigned char *)bp, 16) &&
591		    ((mpf->specification == 1)
592		     || (mpf->specification == 4))) {
593#ifdef CONFIG_X86_LOCAL_APIC
594			smp_found_config = 1;
595#endif
596			mpf_base = base;
597			mpf_found = true;
598
599			pr_info("found SMP MP-table at [mem %#010lx-%#010lx]\n",
600				base, base + sizeof(*mpf) - 1);
601
602			memblock_reserve(base, sizeof(*mpf));
603			if (mpf->physptr)
604				smp_reserve_memory(mpf);
605
606			ret = 1;
607		}
608		early_memunmap(bp, length);
609
610		if (ret)
611			break;
612
613		base += 16;
614		length -= 16;
615	}
616	return ret;
617}
618
619void __init default_find_smp_config(void)
620{
621	unsigned int address;
622
623	/*
624	 * FIXME: Linux assumes you have 640K of base ram..
625	 * this continues the error...
626	 *
627	 * 1) Scan the bottom 1K for a signature
628	 * 2) Scan the top 1K of base RAM
629	 * 3) Scan the 64K of bios
630	 */
631	if (smp_scan_config(0x0, 0x400) ||
632	    smp_scan_config(639 * 0x400, 0x400) ||
633	    smp_scan_config(0xF0000, 0x10000))
634		return;
635	/*
636	 * If it is an SMP machine we should know now, unless the
637	 * configuration is in an EISA bus machine with an
638	 * extended bios data area.
639	 *
640	 * there is a real-mode segmented pointer pointing to the
641	 * 4K EBDA area at 0x40E, calculate and scan it here.
642	 *
643	 * NOTE! There are Linux loaders that will corrupt the EBDA
644	 * area, and as such this kind of SMP config may be less
645	 * trustworthy, simply because the SMP table may have been
646	 * stomped on during early boot. These loaders are buggy and
647	 * should be fixed.
648	 *
649	 * MP1.4 SPEC states to only scan first 1K of 4K EBDA.
650	 */
651
652	address = get_bios_ebda();
653	if (address)
654		smp_scan_config(address, 0x400);
655}
656
657#ifdef CONFIG_X86_IO_APIC
658static u8 __initdata irq_used[MAX_IRQ_SOURCES];
659
660static int  __init get_MP_intsrc_index(struct mpc_intsrc *m)
661{
662	int i;
663
664	if (m->irqtype != mp_INT)
665		return 0;
666
667	if (m->irqflag != (MP_IRQTRIG_LEVEL | MP_IRQPOL_ACTIVE_LOW))
668		return 0;
669
670	/* not legacy */
671
672	for (i = 0; i < mp_irq_entries; i++) {
673		if (mp_irqs[i].irqtype != mp_INT)
674			continue;
675
676		if (mp_irqs[i].irqflag != (MP_IRQTRIG_LEVEL |
677					   MP_IRQPOL_ACTIVE_LOW))
678			continue;
679
680		if (mp_irqs[i].srcbus != m->srcbus)
681			continue;
682		if (mp_irqs[i].srcbusirq != m->srcbusirq)
683			continue;
684		if (irq_used[i]) {
685			/* already claimed */
686			return -2;
687		}
688		irq_used[i] = 1;
689		return i;
690	}
691
692	/* not found */
693	return -1;
694}
695
696#define SPARE_SLOT_NUM 20
697
698static struct mpc_intsrc __initdata *m_spare[SPARE_SLOT_NUM];
699
700static void __init check_irq_src(struct mpc_intsrc *m, int *nr_m_spare)
701{
702	int i;
703
704	apic_printk(APIC_VERBOSE, "OLD ");
705	print_mp_irq_info(m);
706
707	i = get_MP_intsrc_index(m);
708	if (i > 0) {
709		memcpy(m, &mp_irqs[i], sizeof(*m));
710		apic_printk(APIC_VERBOSE, "NEW ");
711		print_mp_irq_info(&mp_irqs[i]);
712		return;
713	}
714	if (!i) {
715		/* legacy, do nothing */
716		return;
717	}
718	if (*nr_m_spare < SPARE_SLOT_NUM) {
719		/*
720		 * not found (-1), or duplicated (-2) are invalid entries,
721		 * we need to use the slot later
722		 */
723		m_spare[*nr_m_spare] = m;
724		*nr_m_spare += 1;
725	}
726}
727
728static int __init
729check_slot(unsigned long mpc_new_phys, unsigned long mpc_new_length, int count)
730{
731	if (!mpc_new_phys || count <= mpc_new_length) {
732		WARN(1, "update_mptable: No spare slots (length: %x)\n", count);
733		return -1;
734	}
735
736	return 0;
737}
738#else /* CONFIG_X86_IO_APIC */
739static
740inline void __init check_irq_src(struct mpc_intsrc *m, int *nr_m_spare) {}
741#endif /* CONFIG_X86_IO_APIC */
742
743static int  __init replace_intsrc_all(struct mpc_table *mpc,
744					unsigned long mpc_new_phys,
745					unsigned long mpc_new_length)
746{
747#ifdef CONFIG_X86_IO_APIC
748	int i;
749#endif
750	int count = sizeof(*mpc);
751	int nr_m_spare = 0;
752	unsigned char *mpt = ((unsigned char *)mpc) + count;
753
754	pr_info("mpc_length %x\n", mpc->length);
755	while (count < mpc->length) {
756		switch (*mpt) {
757		case MP_PROCESSOR:
758			skip_entry(&mpt, &count, sizeof(struct mpc_cpu));
759			break;
760		case MP_BUS:
761			skip_entry(&mpt, &count, sizeof(struct mpc_bus));
762			break;
763		case MP_IOAPIC:
764			skip_entry(&mpt, &count, sizeof(struct mpc_ioapic));
765			break;
766		case MP_INTSRC:
767			check_irq_src((struct mpc_intsrc *)mpt, &nr_m_spare);
768			skip_entry(&mpt, &count, sizeof(struct mpc_intsrc));
769			break;
770		case MP_LINTSRC:
771			skip_entry(&mpt, &count, sizeof(struct mpc_lintsrc));
772			break;
773		default:
774			/* wrong mptable */
775			smp_dump_mptable(mpc, mpt);
776			goto out;
777		}
778	}
779
780#ifdef CONFIG_X86_IO_APIC
781	for (i = 0; i < mp_irq_entries; i++) {
782		if (irq_used[i])
783			continue;
784
785		if (mp_irqs[i].irqtype != mp_INT)
786			continue;
787
788		if (mp_irqs[i].irqflag != (MP_IRQTRIG_LEVEL |
789					   MP_IRQPOL_ACTIVE_LOW))
790			continue;
791
792		if (nr_m_spare > 0) {
793			apic_printk(APIC_VERBOSE, "*NEW* found\n");
794			nr_m_spare--;
795			memcpy(m_spare[nr_m_spare], &mp_irqs[i], sizeof(mp_irqs[i]));
796			m_spare[nr_m_spare] = NULL;
797		} else {
798			struct mpc_intsrc *m = (struct mpc_intsrc *)mpt;
799			count += sizeof(struct mpc_intsrc);
800			if (check_slot(mpc_new_phys, mpc_new_length, count) < 0)
801				goto out;
802			memcpy(m, &mp_irqs[i], sizeof(*m));
803			mpc->length = count;
804			mpt += sizeof(struct mpc_intsrc);
805		}
806		print_mp_irq_info(&mp_irqs[i]);
807	}
808#endif
809out:
810	/* update checksum */
811	mpc->checksum = 0;
812	mpc->checksum -= mpf_checksum((unsigned char *)mpc, mpc->length);
813
814	return 0;
815}
816
817int enable_update_mptable;
818
819static int __init update_mptable_setup(char *str)
820{
821	enable_update_mptable = 1;
822#ifdef CONFIG_PCI
823	pci_routeirq = 1;
824#endif
825	return 0;
826}
827early_param("update_mptable", update_mptable_setup);
828
829static unsigned long __initdata mpc_new_phys;
830static unsigned long mpc_new_length __initdata = 4096;
831
832/* alloc_mptable or alloc_mptable=4k */
833static int __initdata alloc_mptable;
834static int __init parse_alloc_mptable_opt(char *p)
835{
836	enable_update_mptable = 1;
837#ifdef CONFIG_PCI
838	pci_routeirq = 1;
839#endif
840	alloc_mptable = 1;
841	if (!p)
842		return 0;
843	mpc_new_length = memparse(p, &p);
844	return 0;
845}
846early_param("alloc_mptable", parse_alloc_mptable_opt);
847
848void __init e820__memblock_alloc_reserved_mpc_new(void)
849{
850	if (enable_update_mptable && alloc_mptable)
851		mpc_new_phys = e820__memblock_alloc_reserved(mpc_new_length, 4);
852}
853
854static int __init update_mp_table(void)
855{
856	char str[16];
857	char oem[10];
858	struct mpf_intel *mpf;
859	struct mpc_table *mpc, *mpc_new;
860	unsigned long size;
861
862	if (!enable_update_mptable)
863		return 0;
864
865	if (!mpf_found)
866		return 0;
867
868	mpf = early_memremap(mpf_base, sizeof(*mpf));
869	if (!mpf) {
870		pr_err("MPTABLE: mpf early_memremap() failed\n");
871		return 0;
872	}
873
874	/*
875	 * Now see if we need to go further.
876	 */
877	if (mpf->feature1)
878		goto do_unmap_mpf;
879
880	if (!mpf->physptr)
881		goto do_unmap_mpf;
882
883	size = get_mpc_size(mpf->physptr);
884	mpc = early_memremap(mpf->physptr, size);
885	if (!mpc) {
886		pr_err("MPTABLE: mpc early_memremap() failed\n");
887		goto do_unmap_mpf;
888	}
889
890	if (!smp_check_mpc(mpc, oem, str))
891		goto do_unmap_mpc;
892
893	pr_info("mpf: %llx\n", (u64)mpf_base);
894	pr_info("physptr: %x\n", mpf->physptr);
895
896	if (mpc_new_phys && mpc->length > mpc_new_length) {
897		mpc_new_phys = 0;
898		pr_info("mpc_new_length is %ld, please use alloc_mptable=8k\n",
899			mpc_new_length);
900	}
901
902	if (!mpc_new_phys) {
903		unsigned char old, new;
904		/* check if we can change the position */
905		mpc->checksum = 0;
906		old = mpf_checksum((unsigned char *)mpc, mpc->length);
907		mpc->checksum = 0xff;
908		new = mpf_checksum((unsigned char *)mpc, mpc->length);
909		if (old == new) {
910			pr_info("mpc is readonly, please try alloc_mptable instead\n");
911			goto do_unmap_mpc;
912		}
913		pr_info("use in-position replacing\n");
914	} else {
915		mpc_new = early_memremap(mpc_new_phys, mpc_new_length);
916		if (!mpc_new) {
917			pr_err("MPTABLE: new mpc early_memremap() failed\n");
918			goto do_unmap_mpc;
919		}
920		mpf->physptr = mpc_new_phys;
921		memcpy(mpc_new, mpc, mpc->length);
922		early_memunmap(mpc, size);
923		mpc = mpc_new;
924		size = mpc_new_length;
925		/* check if we can modify that */
926		if (mpc_new_phys - mpf->physptr) {
927			struct mpf_intel *mpf_new;
928			/* steal 16 bytes from [0, 1k) */
929			mpf_new = early_memremap(0x400 - 16, sizeof(*mpf_new));
930			if (!mpf_new) {
931				pr_err("MPTABLE: new mpf early_memremap() failed\n");
932				goto do_unmap_mpc;
933			}
934			pr_info("mpf new: %x\n", 0x400 - 16);
935			memcpy(mpf_new, mpf, 16);
936			early_memunmap(mpf, sizeof(*mpf));
937			mpf = mpf_new;
938			mpf->physptr = mpc_new_phys;
939		}
940		mpf->checksum = 0;
941		mpf->checksum -= mpf_checksum((unsigned char *)mpf, 16);
942		pr_info("physptr new: %x\n", mpf->physptr);
943	}
944
945	/*
946	 * only replace the one with mp_INT and
947	 *	 MP_IRQ_TRIGGER_LEVEL|MP_IRQ_POLARITY_LOW,
948	 * already in mp_irqs , stored by ... and mp_config_acpi_gsi,
949	 * may need pci=routeirq for all coverage
950	 */
951	replace_intsrc_all(mpc, mpc_new_phys, mpc_new_length);
952
953do_unmap_mpc:
954	early_memunmap(mpc, size);
955
956do_unmap_mpf:
957	early_memunmap(mpf, sizeof(*mpf));
958
959	return 0;
960}
961
962late_initcall(update_mp_table);
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *	Intel Multiprocessor Specification 1.1 and 1.4
  4 *	compliant MP-table parsing routines.
  5 *
  6 *	(c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk>
  7 *	(c) 1998, 1999, 2000, 2009 Ingo Molnar <mingo@redhat.com>
  8 *      (c) 2008 Alexey Starikovskiy <astarikovskiy@suse.de>
  9 */
 10
 11#include <linux/mm.h>
 12#include <linux/init.h>
 13#include <linux/delay.h>
 14#include <linux/memblock.h>
 15#include <linux/kernel_stat.h>
 16#include <linux/mc146818rtc.h>
 17#include <linux/bitops.h>
 18#include <linux/acpi.h>
 19#include <linux/smp.h>
 20#include <linux/pci.h>
 21
 22#include <asm/i8259.h>
 23#include <asm/io_apic.h>
 24#include <asm/acpi.h>
 25#include <asm/irqdomain.h>
 26#include <asm/mtrr.h>
 27#include <asm/mpspec.h>
 
 
 28#include <asm/proto.h>
 29#include <asm/bios_ebda.h>
 30#include <asm/e820/api.h>
 31#include <asm/setup.h>
 32#include <asm/smp.h>
 33
 34#include <asm/apic.h>
 35/*
 36 * Checksum an MP configuration block.
 37 */
 38
 39static unsigned int num_procs __initdata;
 40
 41static int __init mpf_checksum(unsigned char *mp, int len)
 42{
 43	int sum = 0;
 44
 45	while (len--)
 46		sum += *mp++;
 47
 48	return sum & 0xFF;
 49}
 50
 
 
 
 
 
 51static void __init MP_processor_info(struct mpc_cpu *m)
 52{
 
 53	char *bootup_cpu = "";
 54
 55	topology_register_apic(m->apicid, CPU_ACPIID_INVALID, m->cpuflag & CPU_ENABLED);
 56	if (!(m->cpuflag & CPU_ENABLED))
 57		return;
 
 
 
 58
 59	if (m->cpuflag & CPU_BOOTPROCESSOR)
 60		bootup_cpu = " (Bootup-CPU)";
 
 
 61
 62	pr_info("Processor #%d%s\n", m->apicid, bootup_cpu);
 63	num_procs++;
 64}
 65
 66#ifdef CONFIG_X86_IO_APIC
 67static void __init mpc_oem_bus_info(struct mpc_bus *m, char *str)
 68{
 69	memcpy(str, m->bustype, 6);
 70	str[6] = 0;
 71	apic_pr_verbose("Bus #%d is %s\n", m->busid, str);
 72}
 73
 74static void __init MP_bus_info(struct mpc_bus *m)
 75{
 76	char str[7];
 77
 78	mpc_oem_bus_info(m, str);
 79
 80#if MAX_MP_BUSSES < 256
 81	if (m->busid >= MAX_MP_BUSSES) {
 82		pr_warn("MP table busid value (%d) for bustype %s is too large, max. supported is %d\n",
 83			m->busid, str, MAX_MP_BUSSES - 1);
 84		return;
 85	}
 86#endif
 87
 88	set_bit(m->busid, mp_bus_not_pci);
 89	if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA) - 1) == 0) {
 90#ifdef CONFIG_EISA
 91		mp_bus_id_to_type[m->busid] = MP_BUS_ISA;
 92#endif
 93	} else if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI) - 1) == 0) {
 
 
 
 94		clear_bit(m->busid, mp_bus_not_pci);
 95#ifdef CONFIG_EISA
 96		mp_bus_id_to_type[m->busid] = MP_BUS_PCI;
 97	} else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA) - 1) == 0) {
 98		mp_bus_id_to_type[m->busid] = MP_BUS_EISA;
 99#endif
100	} else
101		pr_warn("Unknown bustype %s - ignoring\n", str);
102}
103
104static void __init MP_ioapic_info(struct mpc_ioapic *m)
105{
106	struct ioapic_domain_cfg cfg = {
107		.type = IOAPIC_DOMAIN_LEGACY,
108		.ops = &mp_ioapic_irqdomain_ops,
109	};
110
111	if (m->flags & MPC_APIC_USABLE)
112		mp_register_ioapic(m->apicid, m->apicaddr, gsi_top, &cfg);
113}
114
115static void __init print_mp_irq_info(struct mpc_intsrc *mp_irq)
116{
117	apic_printk(APIC_VERBOSE,
118		"Int: type %d, pol %d, trig %d, bus %02x, IRQ %02x, APIC ID %x, APIC INT %02x\n",
119		mp_irq->irqtype, mp_irq->irqflag & 3,
120		(mp_irq->irqflag >> 2) & 3, mp_irq->srcbus,
121		mp_irq->srcbusirq, mp_irq->dstapic, mp_irq->dstirq);
122}
123
124#else /* CONFIG_X86_IO_APIC */
125static inline void __init MP_bus_info(struct mpc_bus *m) {}
126static inline void __init MP_ioapic_info(struct mpc_ioapic *m) {}
127#endif /* CONFIG_X86_IO_APIC */
128
129static void __init MP_lintsrc_info(struct mpc_lintsrc *m)
130{
131	apic_printk(APIC_VERBOSE,
132		"Lint: type %d, pol %d, trig %d, bus %02x, IRQ %02x, APIC ID %x, APIC LINT %02x\n",
133		m->irqtype, m->irqflag & 3, (m->irqflag >> 2) & 3, m->srcbusid,
134		m->srcbusirq, m->destapic, m->destapiclint);
135}
136
137/*
138 * Read/parse the MPC
139 */
140static int __init smp_check_mpc(struct mpc_table *mpc, char *oem, char *str)
141{
142
143	if (memcmp(mpc->signature, MPC_SIGNATURE, 4)) {
144		pr_err("MPTABLE: bad signature [%c%c%c%c]!\n",
145		       mpc->signature[0], mpc->signature[1],
146		       mpc->signature[2], mpc->signature[3]);
147		return 0;
148	}
149	if (mpf_checksum((unsigned char *)mpc, mpc->length)) {
150		pr_err("MPTABLE: checksum error!\n");
151		return 0;
152	}
153	if (mpc->spec != 0x01 && mpc->spec != 0x04) {
154		pr_err("MPTABLE: bad table version (%d)!!\n", mpc->spec);
155		return 0;
156	}
157	if (!mpc->lapic) {
158		pr_err("MPTABLE: null local APIC address!\n");
159		return 0;
160	}
161	memcpy(oem, mpc->oem, 8);
162	oem[8] = 0;
163	pr_info("MPTABLE: OEM ID: %s\n", oem);
164
165	memcpy(str, mpc->productid, 12);
166	str[12] = 0;
167
168	pr_info("MPTABLE: Product ID: %s\n", str);
169
170	pr_info("MPTABLE: APIC at: 0x%X\n", mpc->lapic);
171
172	return 1;
173}
174
175static void skip_entry(unsigned char **ptr, int *count, int size)
176{
177	*ptr += size;
178	*count += size;
179}
180
181static void __init smp_dump_mptable(struct mpc_table *mpc, unsigned char *mpt)
182{
183	pr_err("Your mptable is wrong, contact your HW vendor!\n");
184	pr_cont("type %x\n", *mpt);
185	print_hex_dump(KERN_ERR, "  ", DUMP_PREFIX_ADDRESS, 16,
186			1, mpc, mpc->length, 1);
187}
188
 
 
189static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early)
190{
191	char str[16];
192	char oem[10];
193
194	int count = sizeof(*mpc);
195	unsigned char *mpt = ((unsigned char *)mpc) + count;
196
197	if (!smp_check_mpc(mpc, oem, str))
198		return 0;
199
200	if (early) {
201		/* Initialize the lapic mapping */
202		if (!acpi_lapic)
203			register_lapic_address(mpc->lapic);
 
204		return 1;
205	}
206
207	/* Now process the configuration blocks. */
 
 
 
 
 
 
 
208	while (count < mpc->length) {
209		switch (*mpt) {
210		case MP_PROCESSOR:
211			/* ACPI may have already provided this data */
212			if (!acpi_lapic)
213				MP_processor_info((struct mpc_cpu *)mpt);
214			skip_entry(&mpt, &count, sizeof(struct mpc_cpu));
215			break;
216		case MP_BUS:
217			MP_bus_info((struct mpc_bus *)mpt);
218			skip_entry(&mpt, &count, sizeof(struct mpc_bus));
219			break;
220		case MP_IOAPIC:
221			MP_ioapic_info((struct mpc_ioapic *)mpt);
222			skip_entry(&mpt, &count, sizeof(struct mpc_ioapic));
223			break;
224		case MP_INTSRC:
225			mp_save_irq((struct mpc_intsrc *)mpt);
226			skip_entry(&mpt, &count, sizeof(struct mpc_intsrc));
227			break;
228		case MP_LINTSRC:
229			MP_lintsrc_info((struct mpc_lintsrc *)mpt);
230			skip_entry(&mpt, &count, sizeof(struct mpc_lintsrc));
231			break;
232		default:
233			/* wrong mptable */
234			smp_dump_mptable(mpc, mpt);
235			count = mpc->length;
236			break;
237		}
 
238	}
239
240	if (!num_procs && !acpi_lapic)
241		pr_err("MPTABLE: no processors registered!\n");
242	return num_procs || acpi_lapic;
243}
244
245#ifdef CONFIG_X86_IO_APIC
246
247static int __init ELCR_trigger(unsigned int irq)
248{
249	unsigned int port;
250
251	port = PIC_ELCR1 + (irq >> 3);
252	return (inb(port) >> (irq & 7)) & 1;
253}
254
255static void __init construct_default_ioirq_mptable(int mpc_default_type)
256{
257	struct mpc_intsrc intsrc;
258	int i;
259	int ELCR_fallback = 0;
260
261	intsrc.type = MP_INTSRC;
262	intsrc.irqflag = MP_IRQTRIG_DEFAULT | MP_IRQPOL_DEFAULT;
263	intsrc.srcbus = 0;
264	intsrc.dstapic = mpc_ioapic_id(0);
265
266	intsrc.irqtype = mp_INT;
267
268	/*
269	 *  If true, we have an ISA/PCI system with no IRQ entries
270	 *  in the MP table. To prevent the PCI interrupts from being set up
271	 *  incorrectly, we try to use the ELCR. The sanity check to see if
272	 *  there is good ELCR data is very simple - IRQ0, 1, 2 and 13 can
273	 *  never be level sensitive, so we simply see if the ELCR agrees.
274	 *  If it does, we assume it's valid.
275	 */
276	if (mpc_default_type == 5) {
277		pr_info("ISA/PCI bus type with no IRQ information... falling back to ELCR\n");
278
279		if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) ||
280		    ELCR_trigger(13))
281			pr_err("ELCR contains invalid data... not using ELCR\n");
282		else {
283			pr_info("Using ELCR to identify PCI interrupts\n");
284			ELCR_fallback = 1;
285		}
286	}
287
288	for (i = 0; i < 16; i++) {
289		switch (mpc_default_type) {
290		case 2:
291			if (i == 0 || i == 13)
292				continue;	/* IRQ0 & IRQ13 not connected */
293			fallthrough;
294		default:
295			if (i == 2)
296				continue;	/* IRQ2 is never connected */
297		}
298
299		if (ELCR_fallback) {
300			/*
301			 *  If the ELCR indicates a level-sensitive interrupt, we
302			 *  copy that information over to the MP table in the
303			 *  irqflag field (level sensitive, active high polarity).
304			 */
305			if (ELCR_trigger(i)) {
306				intsrc.irqflag = MP_IRQTRIG_LEVEL |
307						 MP_IRQPOL_ACTIVE_HIGH;
308			} else {
309				intsrc.irqflag = MP_IRQTRIG_DEFAULT |
310						 MP_IRQPOL_DEFAULT;
311			}
312		}
313
314		intsrc.srcbusirq = i;
315		intsrc.dstirq = i ? i : 2;	/* IRQ0 to INTIN2 */
316		mp_save_irq(&intsrc);
317	}
318
319	intsrc.irqtype = mp_ExtINT;
320	intsrc.srcbusirq = 0;
321	intsrc.dstirq = 0;	/* 8259A to INTIN0 */
322	mp_save_irq(&intsrc);
323}
324
325
326static void __init construct_ioapic_table(int mpc_default_type)
327{
328	struct mpc_ioapic ioapic;
329	struct mpc_bus bus;
330
331	bus.type = MP_BUS;
332	bus.busid = 0;
333	switch (mpc_default_type) {
334	default:
335		pr_err("???\nUnknown standard configuration %d\n",
336		       mpc_default_type);
337		fallthrough;
338	case 1:
339	case 5:
340		memcpy(bus.bustype, "ISA   ", 6);
341		break;
342	case 2:
343	case 6:
344	case 3:
345		memcpy(bus.bustype, "EISA  ", 6);
346		break;
347	}
348	MP_bus_info(&bus);
349	if (mpc_default_type > 4) {
350		bus.busid = 1;
351		memcpy(bus.bustype, "PCI   ", 6);
352		MP_bus_info(&bus);
353	}
354
355	ioapic.type	= MP_IOAPIC;
356	ioapic.apicid	= 2;
357	ioapic.apicver	= mpc_default_type > 4 ? 0x10 : 0x01;
358	ioapic.flags	= MPC_APIC_USABLE;
359	ioapic.apicaddr	= IO_APIC_DEFAULT_PHYS_BASE;
360	MP_ioapic_info(&ioapic);
361
362	/*
363	 * We set up most of the low 16 IO-APIC pins according to MPS rules.
364	 */
365	construct_default_ioirq_mptable(mpc_default_type);
366}
367#else
368static inline void __init construct_ioapic_table(int mpc_default_type) { }
369#endif
370
371static inline void __init construct_default_ISA_mptable(int mpc_default_type)
372{
373	struct mpc_cpu processor;
374	struct mpc_lintsrc lintsrc;
375	int linttypes[2] = { mp_ExtINT, mp_NMI };
376	int i;
377
378	/*
 
 
 
 
 
379	 * 2 CPUs, numbered 0 & 1.
380	 */
381	processor.type = MP_PROCESSOR;
382	/* Either an integrated APIC or a discrete 82489DX. */
383	processor.apicver = mpc_default_type > 4 ? 0x10 : 0x01;
384	processor.cpuflag = CPU_ENABLED;
385	processor.cpufeature = (boot_cpu_data.x86 << 8) |
386	    (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_stepping;
387	processor.featureflag = boot_cpu_data.x86_capability[CPUID_1_EDX];
388	processor.reserved[0] = 0;
389	processor.reserved[1] = 0;
390	for (i = 0; i < 2; i++) {
391		processor.apicid = i;
392		MP_processor_info(&processor);
393	}
394
395	construct_ioapic_table(mpc_default_type);
396
397	lintsrc.type = MP_LINTSRC;
398	lintsrc.irqflag = MP_IRQTRIG_DEFAULT | MP_IRQPOL_DEFAULT;
399	lintsrc.srcbusid = 0;
400	lintsrc.srcbusirq = 0;
401	lintsrc.destapic = MP_APIC_ALL;
402	for (i = 0; i < 2; i++) {
403		lintsrc.irqtype = linttypes[i];
404		lintsrc.destapiclint = i;
405		MP_lintsrc_info(&lintsrc);
406	}
407}
408
409static unsigned long mpf_base;
410static bool mpf_found;
411
412static unsigned long __init get_mpc_size(unsigned long physptr)
413{
414	struct mpc_table *mpc;
415	unsigned long size;
416
417	mpc = early_memremap(physptr, PAGE_SIZE);
418	size = mpc->length;
419	early_memunmap(mpc, PAGE_SIZE);
420	apic_pr_verbose("  mpc: %lx-%lx\n", physptr, physptr + size);
421
422	return size;
423}
424
425static int __init check_physptr(struct mpf_intel *mpf, unsigned int early)
426{
427	struct mpc_table *mpc;
428	unsigned long size;
429
430	size = get_mpc_size(mpf->physptr);
431	mpc = early_memremap(mpf->physptr, size);
432
433	/*
434	 * Read the physical hardware table.  Anything here will
435	 * override the defaults.
436	 */
437	if (!smp_read_mpc(mpc, early)) {
438#ifdef CONFIG_X86_LOCAL_APIC
439		smp_found_config = 0;
440#endif
441		pr_err("BIOS bug, MP table errors detected!...\n");
442		pr_cont("... disabling SMP support. (tell your hw vendor)\n");
443		early_memunmap(mpc, size);
444		return -1;
445	}
446	early_memunmap(mpc, size);
447
448	if (early)
449		return -1;
450
451#ifdef CONFIG_X86_IO_APIC
452	/*
453	 * If there are no explicit MP IRQ entries, then we are
454	 * broken.  We set up most of the low 16 IO-APIC pins to
455	 * ISA defaults and hope it will work.
456	 */
457	if (!mp_irq_entries) {
458		struct mpc_bus bus;
459
460		pr_err("BIOS bug, no explicit IRQ entries, using default mptable. (tell your hw vendor)\n");
461
462		bus.type = MP_BUS;
463		bus.busid = 0;
464		memcpy(bus.bustype, "ISA   ", 6);
465		MP_bus_info(&bus);
466
467		construct_default_ioirq_mptable(0);
468	}
469#endif
470
471	return 0;
472}
473
474/*
475 * Scan the memory blocks for an SMP configuration block.
476 */
477static __init void mpparse_get_smp_config(unsigned int early)
478{
479	struct mpf_intel *mpf;
480
481	if (!smp_found_config)
482		return;
483
484	if (!mpf_found)
485		return;
486
487	if (acpi_lapic && early)
488		return;
489
490	/*
491	 * MPS doesn't support hyperthreading, aka only have
492	 * thread 0 apic id in MPS table
493	 */
494	if (acpi_lapic && acpi_ioapic)
495		return;
496
497	mpf = early_memremap(mpf_base, sizeof(*mpf));
498	if (!mpf) {
499		pr_err("MPTABLE: error mapping MP table\n");
500		return;
501	}
502
503	pr_info("Intel MultiProcessor Specification v1.%d\n",
504		mpf->specification);
505#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32)
506	if (mpf->feature2 & (1 << 7)) {
507		pr_info("    IMCR and PIC compatibility mode.\n");
508		pic_mode = 1;
509	} else {
510		pr_info("    Virtual Wire compatibility mode.\n");
511		pic_mode = 0;
512	}
513#endif
514	/*
515	 * Now see if we need to read further.
516	 */
517	if (mpf->feature1) {
518		if (early) {
519			/* Local APIC has default address */
520			register_lapic_address(APIC_DEFAULT_PHYS_BASE);
 
 
521			goto out;
522		}
523
524		pr_info("Default MP configuration #%d\n", mpf->feature1);
525		construct_default_ISA_mptable(mpf->feature1);
526
527	} else if (mpf->physptr) {
528		if (check_physptr(mpf, early))
529			goto out;
530	} else
531		BUG();
532
533	if (!early && !acpi_lapic)
534		pr_info("Processors: %d\n", num_procs);
535	/*
536	 * Only use the first configuration found.
537	 */
538out:
539	early_memunmap(mpf, sizeof(*mpf));
540}
541
542void __init mpparse_parse_early_smp_config(void)
543{
544	mpparse_get_smp_config(true);
545}
546
547void __init mpparse_parse_smp_config(void)
548{
549	mpparse_get_smp_config(false);
550}
551
552static void __init smp_reserve_memory(struct mpf_intel *mpf)
553{
554	memblock_reserve(mpf->physptr, get_mpc_size(mpf->physptr));
555}
556
557static int __init smp_scan_config(unsigned long base, unsigned long length)
558{
559	unsigned int *bp;
560	struct mpf_intel *mpf;
561	int ret = 0;
562
563	apic_pr_verbose("Scan for SMP in [mem %#010lx-%#010lx]\n", base, base + length - 1);
 
564	BUILD_BUG_ON(sizeof(*mpf) != 16);
565
566	while (length > 0) {
567		bp = early_memremap(base, length);
568		mpf = (struct mpf_intel *)bp;
569		if ((*bp == SMP_MAGIC_IDENT) &&
570		    (mpf->length == 1) &&
571		    !mpf_checksum((unsigned char *)bp, 16) &&
572		    ((mpf->specification == 1)
573		     || (mpf->specification == 4))) {
574#ifdef CONFIG_X86_LOCAL_APIC
575			smp_found_config = 1;
576#endif
577			mpf_base = base;
578			mpf_found = true;
579
580			pr_info("found SMP MP-table at [mem %#010lx-%#010lx]\n",
581				base, base + sizeof(*mpf) - 1);
582
583			memblock_reserve(base, sizeof(*mpf));
584			if (mpf->physptr)
585				smp_reserve_memory(mpf);
586
587			ret = 1;
588		}
589		early_memunmap(bp, length);
590
591		if (ret)
592			break;
593
594		base += 16;
595		length -= 16;
596	}
597	return ret;
598}
599
600void __init mpparse_find_mptable(void)
601{
602	unsigned int address;
603
604	/*
605	 * FIXME: Linux assumes you have 640K of base ram..
606	 * this continues the error...
607	 *
608	 * 1) Scan the bottom 1K for a signature
609	 * 2) Scan the top 1K of base RAM
610	 * 3) Scan the 64K of bios
611	 */
612	if (smp_scan_config(0x0, 0x400) ||
613	    smp_scan_config(639 * 0x400, 0x400) ||
614	    smp_scan_config(0xF0000, 0x10000))
615		return;
616	/*
617	 * If it is an SMP machine we should know now, unless the
618	 * configuration is in an EISA bus machine with an
619	 * extended bios data area.
620	 *
621	 * there is a real-mode segmented pointer pointing to the
622	 * 4K EBDA area at 0x40E, calculate and scan it here.
623	 *
624	 * NOTE! There are Linux loaders that will corrupt the EBDA
625	 * area, and as such this kind of SMP config may be less
626	 * trustworthy, simply because the SMP table may have been
627	 * stomped on during early boot. These loaders are buggy and
628	 * should be fixed.
629	 *
630	 * MP1.4 SPEC states to only scan first 1K of 4K EBDA.
631	 */
632
633	address = get_bios_ebda();
634	if (address)
635		smp_scan_config(address, 0x400);
636}
637
638#ifdef CONFIG_X86_IO_APIC
639static u8 __initdata irq_used[MAX_IRQ_SOURCES];
640
641static int  __init get_MP_intsrc_index(struct mpc_intsrc *m)
642{
643	int i;
644
645	if (m->irqtype != mp_INT)
646		return 0;
647
648	if (m->irqflag != (MP_IRQTRIG_LEVEL | MP_IRQPOL_ACTIVE_LOW))
649		return 0;
650
651	/* not legacy */
652
653	for (i = 0; i < mp_irq_entries; i++) {
654		if (mp_irqs[i].irqtype != mp_INT)
655			continue;
656
657		if (mp_irqs[i].irqflag != (MP_IRQTRIG_LEVEL |
658					   MP_IRQPOL_ACTIVE_LOW))
659			continue;
660
661		if (mp_irqs[i].srcbus != m->srcbus)
662			continue;
663		if (mp_irqs[i].srcbusirq != m->srcbusirq)
664			continue;
665		if (irq_used[i]) {
666			/* already claimed */
667			return -2;
668		}
669		irq_used[i] = 1;
670		return i;
671	}
672
673	/* not found */
674	return -1;
675}
676
677#define SPARE_SLOT_NUM 20
678
679static struct mpc_intsrc __initdata *m_spare[SPARE_SLOT_NUM];
680
681static void __init check_irq_src(struct mpc_intsrc *m, int *nr_m_spare)
682{
683	int i;
684
685	apic_pr_verbose("OLD ");
686	print_mp_irq_info(m);
687
688	i = get_MP_intsrc_index(m);
689	if (i > 0) {
690		memcpy(m, &mp_irqs[i], sizeof(*m));
691		apic_pr_verbose("NEW ");
692		print_mp_irq_info(&mp_irqs[i]);
693		return;
694	}
695	if (!i) {
696		/* legacy, do nothing */
697		return;
698	}
699	if (*nr_m_spare < SPARE_SLOT_NUM) {
700		/*
701		 * not found (-1), or duplicated (-2) are invalid entries,
702		 * we need to use the slot later
703		 */
704		m_spare[*nr_m_spare] = m;
705		*nr_m_spare += 1;
706	}
707}
708
709static int __init
710check_slot(unsigned long mpc_new_phys, unsigned long mpc_new_length, int count)
711{
712	if (!mpc_new_phys || count <= mpc_new_length) {
713		WARN(1, "update_mptable: No spare slots (length: %x)\n", count);
714		return -1;
715	}
716
717	return 0;
718}
719#else /* CONFIG_X86_IO_APIC */
720static
721inline void __init check_irq_src(struct mpc_intsrc *m, int *nr_m_spare) {}
722#endif /* CONFIG_X86_IO_APIC */
723
724static int  __init replace_intsrc_all(struct mpc_table *mpc,
725					unsigned long mpc_new_phys,
726					unsigned long mpc_new_length)
727{
728#ifdef CONFIG_X86_IO_APIC
729	int i;
730#endif
731	int count = sizeof(*mpc);
732	int nr_m_spare = 0;
733	unsigned char *mpt = ((unsigned char *)mpc) + count;
734
735	pr_info("mpc_length %x\n", mpc->length);
736	while (count < mpc->length) {
737		switch (*mpt) {
738		case MP_PROCESSOR:
739			skip_entry(&mpt, &count, sizeof(struct mpc_cpu));
740			break;
741		case MP_BUS:
742			skip_entry(&mpt, &count, sizeof(struct mpc_bus));
743			break;
744		case MP_IOAPIC:
745			skip_entry(&mpt, &count, sizeof(struct mpc_ioapic));
746			break;
747		case MP_INTSRC:
748			check_irq_src((struct mpc_intsrc *)mpt, &nr_m_spare);
749			skip_entry(&mpt, &count, sizeof(struct mpc_intsrc));
750			break;
751		case MP_LINTSRC:
752			skip_entry(&mpt, &count, sizeof(struct mpc_lintsrc));
753			break;
754		default:
755			/* wrong mptable */
756			smp_dump_mptable(mpc, mpt);
757			goto out;
758		}
759	}
760
761#ifdef CONFIG_X86_IO_APIC
762	for (i = 0; i < mp_irq_entries; i++) {
763		if (irq_used[i])
764			continue;
765
766		if (mp_irqs[i].irqtype != mp_INT)
767			continue;
768
769		if (mp_irqs[i].irqflag != (MP_IRQTRIG_LEVEL |
770					   MP_IRQPOL_ACTIVE_LOW))
771			continue;
772
773		if (nr_m_spare > 0) {
774			apic_pr_verbose("*NEW* found\n");
775			nr_m_spare--;
776			memcpy(m_spare[nr_m_spare], &mp_irqs[i], sizeof(mp_irqs[i]));
777			m_spare[nr_m_spare] = NULL;
778		} else {
779			struct mpc_intsrc *m = (struct mpc_intsrc *)mpt;
780			count += sizeof(struct mpc_intsrc);
781			if (check_slot(mpc_new_phys, mpc_new_length, count) < 0)
782				goto out;
783			memcpy(m, &mp_irqs[i], sizeof(*m));
784			mpc->length = count;
785			mpt += sizeof(struct mpc_intsrc);
786		}
787		print_mp_irq_info(&mp_irqs[i]);
788	}
789#endif
790out:
791	/* update checksum */
792	mpc->checksum = 0;
793	mpc->checksum -= mpf_checksum((unsigned char *)mpc, mpc->length);
794
795	return 0;
796}
797
798int enable_update_mptable;
799
800static int __init update_mptable_setup(char *str)
801{
802	enable_update_mptable = 1;
803#ifdef CONFIG_PCI
804	pci_routeirq = 1;
805#endif
806	return 0;
807}
808early_param("update_mptable", update_mptable_setup);
809
810static unsigned long __initdata mpc_new_phys;
811static unsigned long mpc_new_length __initdata = 4096;
812
813/* alloc_mptable or alloc_mptable=4k */
814static int __initdata alloc_mptable;
815static int __init parse_alloc_mptable_opt(char *p)
816{
817	enable_update_mptable = 1;
818#ifdef CONFIG_PCI
819	pci_routeirq = 1;
820#endif
821	alloc_mptable = 1;
822	if (!p)
823		return 0;
824	mpc_new_length = memparse(p, &p);
825	return 0;
826}
827early_param("alloc_mptable", parse_alloc_mptable_opt);
828
829void __init e820__memblock_alloc_reserved_mpc_new(void)
830{
831	if (enable_update_mptable && alloc_mptable)
832		mpc_new_phys = e820__memblock_alloc_reserved(mpc_new_length, 4);
833}
834
835static int __init update_mp_table(void)
836{
837	char str[16];
838	char oem[10];
839	struct mpf_intel *mpf;
840	struct mpc_table *mpc, *mpc_new;
841	unsigned long size;
842
843	if (!enable_update_mptable)
844		return 0;
845
846	if (!mpf_found)
847		return 0;
848
849	mpf = early_memremap(mpf_base, sizeof(*mpf));
850	if (!mpf) {
851		pr_err("MPTABLE: mpf early_memremap() failed\n");
852		return 0;
853	}
854
855	/*
856	 * Now see if we need to go further.
857	 */
858	if (mpf->feature1)
859		goto do_unmap_mpf;
860
861	if (!mpf->physptr)
862		goto do_unmap_mpf;
863
864	size = get_mpc_size(mpf->physptr);
865	mpc = early_memremap(mpf->physptr, size);
866	if (!mpc) {
867		pr_err("MPTABLE: mpc early_memremap() failed\n");
868		goto do_unmap_mpf;
869	}
870
871	if (!smp_check_mpc(mpc, oem, str))
872		goto do_unmap_mpc;
873
874	pr_info("mpf: %llx\n", (u64)mpf_base);
875	pr_info("physptr: %x\n", mpf->physptr);
876
877	if (mpc_new_phys && mpc->length > mpc_new_length) {
878		mpc_new_phys = 0;
879		pr_info("mpc_new_length is %ld, please use alloc_mptable=8k\n",
880			mpc_new_length);
881	}
882
883	if (!mpc_new_phys) {
884		unsigned char old, new;
885		/* check if we can change the position */
886		mpc->checksum = 0;
887		old = mpf_checksum((unsigned char *)mpc, mpc->length);
888		mpc->checksum = 0xff;
889		new = mpf_checksum((unsigned char *)mpc, mpc->length);
890		if (old == new) {
891			pr_info("mpc is readonly, please try alloc_mptable instead\n");
892			goto do_unmap_mpc;
893		}
894		pr_info("use in-position replacing\n");
895	} else {
896		mpc_new = early_memremap(mpc_new_phys, mpc_new_length);
897		if (!mpc_new) {
898			pr_err("MPTABLE: new mpc early_memremap() failed\n");
899			goto do_unmap_mpc;
900		}
901		mpf->physptr = mpc_new_phys;
902		memcpy(mpc_new, mpc, mpc->length);
903		early_memunmap(mpc, size);
904		mpc = mpc_new;
905		size = mpc_new_length;
906		/* check if we can modify that */
907		if (mpc_new_phys - mpf->physptr) {
908			struct mpf_intel *mpf_new;
909			/* steal 16 bytes from [0, 1k) */
910			mpf_new = early_memremap(0x400 - 16, sizeof(*mpf_new));
911			if (!mpf_new) {
912				pr_err("MPTABLE: new mpf early_memremap() failed\n");
913				goto do_unmap_mpc;
914			}
915			pr_info("mpf new: %x\n", 0x400 - 16);
916			memcpy(mpf_new, mpf, 16);
917			early_memunmap(mpf, sizeof(*mpf));
918			mpf = mpf_new;
919			mpf->physptr = mpc_new_phys;
920		}
921		mpf->checksum = 0;
922		mpf->checksum -= mpf_checksum((unsigned char *)mpf, 16);
923		pr_info("physptr new: %x\n", mpf->physptr);
924	}
925
926	/*
927	 * only replace the one with mp_INT and
928	 *	 MP_IRQ_TRIGGER_LEVEL|MP_IRQ_POLARITY_LOW,
929	 * already in mp_irqs , stored by ... and mp_config_acpi_gsi,
930	 * may need pci=routeirq for all coverage
931	 */
932	replace_intsrc_all(mpc, mpc_new_phys, mpc_new_length);
933
934do_unmap_mpc:
935	early_memunmap(mpc, size);
936
937do_unmap_mpf:
938	early_memunmap(mpf, sizeof(*mpf));
939
940	return 0;
941}
942
943late_initcall(update_mp_table);