Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Intel Multiprocessor Specification 1.1 and 1.4
4 * compliant MP-table parsing routines.
5 *
6 * (c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk>
7 * (c) 1998, 1999, 2000, 2009 Ingo Molnar <mingo@redhat.com>
8 * (c) 2008 Alexey Starikovskiy <astarikovskiy@suse.de>
9 */
10
11#include <linux/mm.h>
12#include <linux/init.h>
13#include <linux/delay.h>
14#include <linux/memblock.h>
15#include <linux/kernel_stat.h>
16#include <linux/mc146818rtc.h>
17#include <linux/bitops.h>
18#include <linux/acpi.h>
19#include <linux/smp.h>
20#include <linux/pci.h>
21
22#include <asm/i8259.h>
23#include <asm/io_apic.h>
24#include <asm/acpi.h>
25#include <asm/irqdomain.h>
26#include <asm/mtrr.h>
27#include <asm/mpspec.h>
28#include <asm/proto.h>
29#include <asm/bios_ebda.h>
30#include <asm/e820/api.h>
31#include <asm/setup.h>
32#include <asm/smp.h>
33
34#include <asm/apic.h>
35/*
36 * Checksum an MP configuration block.
37 */
38
39static int __init mpf_checksum(unsigned char *mp, int len)
40{
41 int sum = 0;
42
43 while (len--)
44 sum += *mp++;
45
46 return sum & 0xFF;
47}
48
49static void __init MP_processor_info(struct mpc_cpu *m)
50{
51 int apicid;
52 char *bootup_cpu = "";
53
54 if (!(m->cpuflag & CPU_ENABLED)) {
55 disabled_cpus++;
56 return;
57 }
58
59 apicid = m->apicid;
60
61 if (m->cpuflag & CPU_BOOTPROCESSOR) {
62 bootup_cpu = " (Bootup-CPU)";
63 boot_cpu_physical_apicid = m->apicid;
64 }
65
66 pr_info("Processor #%d%s\n", m->apicid, bootup_cpu);
67 generic_processor_info(apicid, m->apicver);
68}
69
70#ifdef CONFIG_X86_IO_APIC
71static void __init mpc_oem_bus_info(struct mpc_bus *m, char *str)
72{
73 memcpy(str, m->bustype, 6);
74 str[6] = 0;
75 apic_printk(APIC_VERBOSE, "Bus #%d is %s\n", m->busid, str);
76}
77
78static void __init MP_bus_info(struct mpc_bus *m)
79{
80 char str[7];
81
82 mpc_oem_bus_info(m, str);
83
84#if MAX_MP_BUSSES < 256
85 if (m->busid >= MAX_MP_BUSSES) {
86 pr_warn("MP table busid value (%d) for bustype %s is too large, max. supported is %d\n",
87 m->busid, str, MAX_MP_BUSSES - 1);
88 return;
89 }
90#endif
91
92 set_bit(m->busid, mp_bus_not_pci);
93 if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA) - 1) == 0) {
94#ifdef CONFIG_EISA
95 mp_bus_id_to_type[m->busid] = MP_BUS_ISA;
96#endif
97 } else if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI) - 1) == 0) {
98 clear_bit(m->busid, mp_bus_not_pci);
99#ifdef CONFIG_EISA
100 mp_bus_id_to_type[m->busid] = MP_BUS_PCI;
101 } else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA) - 1) == 0) {
102 mp_bus_id_to_type[m->busid] = MP_BUS_EISA;
103#endif
104 } else
105 pr_warn("Unknown bustype %s - ignoring\n", str);
106}
107
108static void __init MP_ioapic_info(struct mpc_ioapic *m)
109{
110 struct ioapic_domain_cfg cfg = {
111 .type = IOAPIC_DOMAIN_LEGACY,
112 .ops = &mp_ioapic_irqdomain_ops,
113 };
114
115 if (m->flags & MPC_APIC_USABLE)
116 mp_register_ioapic(m->apicid, m->apicaddr, gsi_top, &cfg);
117}
118
119static void __init print_mp_irq_info(struct mpc_intsrc *mp_irq)
120{
121 apic_printk(APIC_VERBOSE,
122 "Int: type %d, pol %d, trig %d, bus %02x, IRQ %02x, APIC ID %x, APIC INT %02x\n",
123 mp_irq->irqtype, mp_irq->irqflag & 3,
124 (mp_irq->irqflag >> 2) & 3, mp_irq->srcbus,
125 mp_irq->srcbusirq, mp_irq->dstapic, mp_irq->dstirq);
126}
127
128#else /* CONFIG_X86_IO_APIC */
129static inline void __init MP_bus_info(struct mpc_bus *m) {}
130static inline void __init MP_ioapic_info(struct mpc_ioapic *m) {}
131#endif /* CONFIG_X86_IO_APIC */
132
133static void __init MP_lintsrc_info(struct mpc_lintsrc *m)
134{
135 apic_printk(APIC_VERBOSE,
136 "Lint: type %d, pol %d, trig %d, bus %02x, IRQ %02x, APIC ID %x, APIC LINT %02x\n",
137 m->irqtype, m->irqflag & 3, (m->irqflag >> 2) & 3, m->srcbusid,
138 m->srcbusirq, m->destapic, m->destapiclint);
139}
140
141/*
142 * Read/parse the MPC
143 */
144static int __init smp_check_mpc(struct mpc_table *mpc, char *oem, char *str)
145{
146
147 if (memcmp(mpc->signature, MPC_SIGNATURE, 4)) {
148 pr_err("MPTABLE: bad signature [%c%c%c%c]!\n",
149 mpc->signature[0], mpc->signature[1],
150 mpc->signature[2], mpc->signature[3]);
151 return 0;
152 }
153 if (mpf_checksum((unsigned char *)mpc, mpc->length)) {
154 pr_err("MPTABLE: checksum error!\n");
155 return 0;
156 }
157 if (mpc->spec != 0x01 && mpc->spec != 0x04) {
158 pr_err("MPTABLE: bad table version (%d)!!\n", mpc->spec);
159 return 0;
160 }
161 if (!mpc->lapic) {
162 pr_err("MPTABLE: null local APIC address!\n");
163 return 0;
164 }
165 memcpy(oem, mpc->oem, 8);
166 oem[8] = 0;
167 pr_info("MPTABLE: OEM ID: %s\n", oem);
168
169 memcpy(str, mpc->productid, 12);
170 str[12] = 0;
171
172 pr_info("MPTABLE: Product ID: %s\n", str);
173
174 pr_info("MPTABLE: APIC at: 0x%X\n", mpc->lapic);
175
176 return 1;
177}
178
179static void skip_entry(unsigned char **ptr, int *count, int size)
180{
181 *ptr += size;
182 *count += size;
183}
184
185static void __init smp_dump_mptable(struct mpc_table *mpc, unsigned char *mpt)
186{
187 pr_err("Your mptable is wrong, contact your HW vendor!\n");
188 pr_cont("type %x\n", *mpt);
189 print_hex_dump(KERN_ERR, " ", DUMP_PREFIX_ADDRESS, 16,
190 1, mpc, mpc->length, 1);
191}
192
193static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early)
194{
195 char str[16];
196 char oem[10];
197
198 int count = sizeof(*mpc);
199 unsigned char *mpt = ((unsigned char *)mpc) + count;
200
201 if (!smp_check_mpc(mpc, oem, str))
202 return 0;
203
204 /* Initialize the lapic mapping */
205 if (!acpi_lapic)
206 register_lapic_address(mpc->lapic);
207
208 if (early)
209 return 1;
210
211 /* Now process the configuration blocks. */
212 while (count < mpc->length) {
213 switch (*mpt) {
214 case MP_PROCESSOR:
215 /* ACPI may have already provided this data */
216 if (!acpi_lapic)
217 MP_processor_info((struct mpc_cpu *)mpt);
218 skip_entry(&mpt, &count, sizeof(struct mpc_cpu));
219 break;
220 case MP_BUS:
221 MP_bus_info((struct mpc_bus *)mpt);
222 skip_entry(&mpt, &count, sizeof(struct mpc_bus));
223 break;
224 case MP_IOAPIC:
225 MP_ioapic_info((struct mpc_ioapic *)mpt);
226 skip_entry(&mpt, &count, sizeof(struct mpc_ioapic));
227 break;
228 case MP_INTSRC:
229 mp_save_irq((struct mpc_intsrc *)mpt);
230 skip_entry(&mpt, &count, sizeof(struct mpc_intsrc));
231 break;
232 case MP_LINTSRC:
233 MP_lintsrc_info((struct mpc_lintsrc *)mpt);
234 skip_entry(&mpt, &count, sizeof(struct mpc_lintsrc));
235 break;
236 default:
237 /* wrong mptable */
238 smp_dump_mptable(mpc, mpt);
239 count = mpc->length;
240 break;
241 }
242 }
243
244 if (!num_processors)
245 pr_err("MPTABLE: no processors registered!\n");
246 return num_processors;
247}
248
249#ifdef CONFIG_X86_IO_APIC
250
251static int __init ELCR_trigger(unsigned int irq)
252{
253 unsigned int port;
254
255 port = PIC_ELCR1 + (irq >> 3);
256 return (inb(port) >> (irq & 7)) & 1;
257}
258
259static void __init construct_default_ioirq_mptable(int mpc_default_type)
260{
261 struct mpc_intsrc intsrc;
262 int i;
263 int ELCR_fallback = 0;
264
265 intsrc.type = MP_INTSRC;
266 intsrc.irqflag = MP_IRQTRIG_DEFAULT | MP_IRQPOL_DEFAULT;
267 intsrc.srcbus = 0;
268 intsrc.dstapic = mpc_ioapic_id(0);
269
270 intsrc.irqtype = mp_INT;
271
272 /*
273 * If true, we have an ISA/PCI system with no IRQ entries
274 * in the MP table. To prevent the PCI interrupts from being set up
275 * incorrectly, we try to use the ELCR. The sanity check to see if
276 * there is good ELCR data is very simple - IRQ0, 1, 2 and 13 can
277 * never be level sensitive, so we simply see if the ELCR agrees.
278 * If it does, we assume it's valid.
279 */
280 if (mpc_default_type == 5) {
281 pr_info("ISA/PCI bus type with no IRQ information... falling back to ELCR\n");
282
283 if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) ||
284 ELCR_trigger(13))
285 pr_err("ELCR contains invalid data... not using ELCR\n");
286 else {
287 pr_info("Using ELCR to identify PCI interrupts\n");
288 ELCR_fallback = 1;
289 }
290 }
291
292 for (i = 0; i < 16; i++) {
293 switch (mpc_default_type) {
294 case 2:
295 if (i == 0 || i == 13)
296 continue; /* IRQ0 & IRQ13 not connected */
297 fallthrough;
298 default:
299 if (i == 2)
300 continue; /* IRQ2 is never connected */
301 }
302
303 if (ELCR_fallback) {
304 /*
305 * If the ELCR indicates a level-sensitive interrupt, we
306 * copy that information over to the MP table in the
307 * irqflag field (level sensitive, active high polarity).
308 */
309 if (ELCR_trigger(i)) {
310 intsrc.irqflag = MP_IRQTRIG_LEVEL |
311 MP_IRQPOL_ACTIVE_HIGH;
312 } else {
313 intsrc.irqflag = MP_IRQTRIG_DEFAULT |
314 MP_IRQPOL_DEFAULT;
315 }
316 }
317
318 intsrc.srcbusirq = i;
319 intsrc.dstirq = i ? i : 2; /* IRQ0 to INTIN2 */
320 mp_save_irq(&intsrc);
321 }
322
323 intsrc.irqtype = mp_ExtINT;
324 intsrc.srcbusirq = 0;
325 intsrc.dstirq = 0; /* 8259A to INTIN0 */
326 mp_save_irq(&intsrc);
327}
328
329
330static void __init construct_ioapic_table(int mpc_default_type)
331{
332 struct mpc_ioapic ioapic;
333 struct mpc_bus bus;
334
335 bus.type = MP_BUS;
336 bus.busid = 0;
337 switch (mpc_default_type) {
338 default:
339 pr_err("???\nUnknown standard configuration %d\n",
340 mpc_default_type);
341 fallthrough;
342 case 1:
343 case 5:
344 memcpy(bus.bustype, "ISA ", 6);
345 break;
346 case 2:
347 case 6:
348 case 3:
349 memcpy(bus.bustype, "EISA ", 6);
350 break;
351 }
352 MP_bus_info(&bus);
353 if (mpc_default_type > 4) {
354 bus.busid = 1;
355 memcpy(bus.bustype, "PCI ", 6);
356 MP_bus_info(&bus);
357 }
358
359 ioapic.type = MP_IOAPIC;
360 ioapic.apicid = 2;
361 ioapic.apicver = mpc_default_type > 4 ? 0x10 : 0x01;
362 ioapic.flags = MPC_APIC_USABLE;
363 ioapic.apicaddr = IO_APIC_DEFAULT_PHYS_BASE;
364 MP_ioapic_info(&ioapic);
365
366 /*
367 * We set up most of the low 16 IO-APIC pins according to MPS rules.
368 */
369 construct_default_ioirq_mptable(mpc_default_type);
370}
371#else
372static inline void __init construct_ioapic_table(int mpc_default_type) { }
373#endif
374
375static inline void __init construct_default_ISA_mptable(int mpc_default_type)
376{
377 struct mpc_cpu processor;
378 struct mpc_lintsrc lintsrc;
379 int linttypes[2] = { mp_ExtINT, mp_NMI };
380 int i;
381
382 /*
383 * local APIC has default address
384 */
385 mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
386
387 /*
388 * 2 CPUs, numbered 0 & 1.
389 */
390 processor.type = MP_PROCESSOR;
391 /* Either an integrated APIC or a discrete 82489DX. */
392 processor.apicver = mpc_default_type > 4 ? 0x10 : 0x01;
393 processor.cpuflag = CPU_ENABLED;
394 processor.cpufeature = (boot_cpu_data.x86 << 8) |
395 (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_stepping;
396 processor.featureflag = boot_cpu_data.x86_capability[CPUID_1_EDX];
397 processor.reserved[0] = 0;
398 processor.reserved[1] = 0;
399 for (i = 0; i < 2; i++) {
400 processor.apicid = i;
401 MP_processor_info(&processor);
402 }
403
404 construct_ioapic_table(mpc_default_type);
405
406 lintsrc.type = MP_LINTSRC;
407 lintsrc.irqflag = MP_IRQTRIG_DEFAULT | MP_IRQPOL_DEFAULT;
408 lintsrc.srcbusid = 0;
409 lintsrc.srcbusirq = 0;
410 lintsrc.destapic = MP_APIC_ALL;
411 for (i = 0; i < 2; i++) {
412 lintsrc.irqtype = linttypes[i];
413 lintsrc.destapiclint = i;
414 MP_lintsrc_info(&lintsrc);
415 }
416}
417
418static unsigned long mpf_base;
419static bool mpf_found;
420
421static unsigned long __init get_mpc_size(unsigned long physptr)
422{
423 struct mpc_table *mpc;
424 unsigned long size;
425
426 mpc = early_memremap(physptr, PAGE_SIZE);
427 size = mpc->length;
428 early_memunmap(mpc, PAGE_SIZE);
429 apic_printk(APIC_VERBOSE, " mpc: %lx-%lx\n", physptr, physptr + size);
430
431 return size;
432}
433
434static int __init check_physptr(struct mpf_intel *mpf, unsigned int early)
435{
436 struct mpc_table *mpc;
437 unsigned long size;
438
439 size = get_mpc_size(mpf->physptr);
440 mpc = early_memremap(mpf->physptr, size);
441
442 /*
443 * Read the physical hardware table. Anything here will
444 * override the defaults.
445 */
446 if (!smp_read_mpc(mpc, early)) {
447#ifdef CONFIG_X86_LOCAL_APIC
448 smp_found_config = 0;
449#endif
450 pr_err("BIOS bug, MP table errors detected!...\n");
451 pr_cont("... disabling SMP support. (tell your hw vendor)\n");
452 early_memunmap(mpc, size);
453 return -1;
454 }
455 early_memunmap(mpc, size);
456
457 if (early)
458 return -1;
459
460#ifdef CONFIG_X86_IO_APIC
461 /*
462 * If there are no explicit MP IRQ entries, then we are
463 * broken. We set up most of the low 16 IO-APIC pins to
464 * ISA defaults and hope it will work.
465 */
466 if (!mp_irq_entries) {
467 struct mpc_bus bus;
468
469 pr_err("BIOS bug, no explicit IRQ entries, using default mptable. (tell your hw vendor)\n");
470
471 bus.type = MP_BUS;
472 bus.busid = 0;
473 memcpy(bus.bustype, "ISA ", 6);
474 MP_bus_info(&bus);
475
476 construct_default_ioirq_mptable(0);
477 }
478#endif
479
480 return 0;
481}
482
483/*
484 * Scan the memory blocks for an SMP configuration block.
485 */
486void __init default_get_smp_config(unsigned int early)
487{
488 struct mpf_intel *mpf;
489
490 if (!smp_found_config)
491 return;
492
493 if (!mpf_found)
494 return;
495
496 if (acpi_lapic && early)
497 return;
498
499 /*
500 * MPS doesn't support hyperthreading, aka only have
501 * thread 0 apic id in MPS table
502 */
503 if (acpi_lapic && acpi_ioapic)
504 return;
505
506 mpf = early_memremap(mpf_base, sizeof(*mpf));
507 if (!mpf) {
508 pr_err("MPTABLE: error mapping MP table\n");
509 return;
510 }
511
512 pr_info("Intel MultiProcessor Specification v1.%d\n",
513 mpf->specification);
514#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32)
515 if (mpf->feature2 & (1 << 7)) {
516 pr_info(" IMCR and PIC compatibility mode.\n");
517 pic_mode = 1;
518 } else {
519 pr_info(" Virtual Wire compatibility mode.\n");
520 pic_mode = 0;
521 }
522#endif
523 /*
524 * Now see if we need to read further.
525 */
526 if (mpf->feature1) {
527 if (early) {
528 /*
529 * local APIC has default address
530 */
531 mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
532 goto out;
533 }
534
535 pr_info("Default MP configuration #%d\n", mpf->feature1);
536 construct_default_ISA_mptable(mpf->feature1);
537
538 } else if (mpf->physptr) {
539 if (check_physptr(mpf, early))
540 goto out;
541 } else
542 BUG();
543
544 if (!early)
545 pr_info("Processors: %d\n", num_processors);
546 /*
547 * Only use the first configuration found.
548 */
549out:
550 early_memunmap(mpf, sizeof(*mpf));
551}
552
553static void __init smp_reserve_memory(struct mpf_intel *mpf)
554{
555 memblock_reserve(mpf->physptr, get_mpc_size(mpf->physptr));
556}
557
558static int __init smp_scan_config(unsigned long base, unsigned long length)
559{
560 unsigned int *bp;
561 struct mpf_intel *mpf;
562 int ret = 0;
563
564 apic_printk(APIC_VERBOSE, "Scan for SMP in [mem %#010lx-%#010lx]\n",
565 base, base + length - 1);
566 BUILD_BUG_ON(sizeof(*mpf) != 16);
567
568 while (length > 0) {
569 bp = early_memremap(base, length);
570 mpf = (struct mpf_intel *)bp;
571 if ((*bp == SMP_MAGIC_IDENT) &&
572 (mpf->length == 1) &&
573 !mpf_checksum((unsigned char *)bp, 16) &&
574 ((mpf->specification == 1)
575 || (mpf->specification == 4))) {
576#ifdef CONFIG_X86_LOCAL_APIC
577 smp_found_config = 1;
578#endif
579 mpf_base = base;
580 mpf_found = true;
581
582 pr_info("found SMP MP-table at [mem %#010lx-%#010lx]\n",
583 base, base + sizeof(*mpf) - 1);
584
585 memblock_reserve(base, sizeof(*mpf));
586 if (mpf->physptr)
587 smp_reserve_memory(mpf);
588
589 ret = 1;
590 }
591 early_memunmap(bp, length);
592
593 if (ret)
594 break;
595
596 base += 16;
597 length -= 16;
598 }
599 return ret;
600}
601
602void __init default_find_smp_config(void)
603{
604 unsigned int address;
605
606 /*
607 * FIXME: Linux assumes you have 640K of base ram..
608 * this continues the error...
609 *
610 * 1) Scan the bottom 1K for a signature
611 * 2) Scan the top 1K of base RAM
612 * 3) Scan the 64K of bios
613 */
614 if (smp_scan_config(0x0, 0x400) ||
615 smp_scan_config(639 * 0x400, 0x400) ||
616 smp_scan_config(0xF0000, 0x10000))
617 return;
618 /*
619 * If it is an SMP machine we should know now, unless the
620 * configuration is in an EISA bus machine with an
621 * extended bios data area.
622 *
623 * there is a real-mode segmented pointer pointing to the
624 * 4K EBDA area at 0x40E, calculate and scan it here.
625 *
626 * NOTE! There are Linux loaders that will corrupt the EBDA
627 * area, and as such this kind of SMP config may be less
628 * trustworthy, simply because the SMP table may have been
629 * stomped on during early boot. These loaders are buggy and
630 * should be fixed.
631 *
632 * MP1.4 SPEC states to only scan first 1K of 4K EBDA.
633 */
634
635 address = get_bios_ebda();
636 if (address)
637 smp_scan_config(address, 0x400);
638}
639
640#ifdef CONFIG_X86_IO_APIC
641static u8 __initdata irq_used[MAX_IRQ_SOURCES];
642
643static int __init get_MP_intsrc_index(struct mpc_intsrc *m)
644{
645 int i;
646
647 if (m->irqtype != mp_INT)
648 return 0;
649
650 if (m->irqflag != (MP_IRQTRIG_LEVEL | MP_IRQPOL_ACTIVE_LOW))
651 return 0;
652
653 /* not legacy */
654
655 for (i = 0; i < mp_irq_entries; i++) {
656 if (mp_irqs[i].irqtype != mp_INT)
657 continue;
658
659 if (mp_irqs[i].irqflag != (MP_IRQTRIG_LEVEL |
660 MP_IRQPOL_ACTIVE_LOW))
661 continue;
662
663 if (mp_irqs[i].srcbus != m->srcbus)
664 continue;
665 if (mp_irqs[i].srcbusirq != m->srcbusirq)
666 continue;
667 if (irq_used[i]) {
668 /* already claimed */
669 return -2;
670 }
671 irq_used[i] = 1;
672 return i;
673 }
674
675 /* not found */
676 return -1;
677}
678
679#define SPARE_SLOT_NUM 20
680
681static struct mpc_intsrc __initdata *m_spare[SPARE_SLOT_NUM];
682
683static void __init check_irq_src(struct mpc_intsrc *m, int *nr_m_spare)
684{
685 int i;
686
687 apic_printk(APIC_VERBOSE, "OLD ");
688 print_mp_irq_info(m);
689
690 i = get_MP_intsrc_index(m);
691 if (i > 0) {
692 memcpy(m, &mp_irqs[i], sizeof(*m));
693 apic_printk(APIC_VERBOSE, "NEW ");
694 print_mp_irq_info(&mp_irqs[i]);
695 return;
696 }
697 if (!i) {
698 /* legacy, do nothing */
699 return;
700 }
701 if (*nr_m_spare < SPARE_SLOT_NUM) {
702 /*
703 * not found (-1), or duplicated (-2) are invalid entries,
704 * we need to use the slot later
705 */
706 m_spare[*nr_m_spare] = m;
707 *nr_m_spare += 1;
708 }
709}
710
711static int __init
712check_slot(unsigned long mpc_new_phys, unsigned long mpc_new_length, int count)
713{
714 if (!mpc_new_phys || count <= mpc_new_length) {
715 WARN(1, "update_mptable: No spare slots (length: %x)\n", count);
716 return -1;
717 }
718
719 return 0;
720}
721#else /* CONFIG_X86_IO_APIC */
722static
723inline void __init check_irq_src(struct mpc_intsrc *m, int *nr_m_spare) {}
724#endif /* CONFIG_X86_IO_APIC */
725
726static int __init replace_intsrc_all(struct mpc_table *mpc,
727 unsigned long mpc_new_phys,
728 unsigned long mpc_new_length)
729{
730#ifdef CONFIG_X86_IO_APIC
731 int i;
732#endif
733 int count = sizeof(*mpc);
734 int nr_m_spare = 0;
735 unsigned char *mpt = ((unsigned char *)mpc) + count;
736
737 pr_info("mpc_length %x\n", mpc->length);
738 while (count < mpc->length) {
739 switch (*mpt) {
740 case MP_PROCESSOR:
741 skip_entry(&mpt, &count, sizeof(struct mpc_cpu));
742 break;
743 case MP_BUS:
744 skip_entry(&mpt, &count, sizeof(struct mpc_bus));
745 break;
746 case MP_IOAPIC:
747 skip_entry(&mpt, &count, sizeof(struct mpc_ioapic));
748 break;
749 case MP_INTSRC:
750 check_irq_src((struct mpc_intsrc *)mpt, &nr_m_spare);
751 skip_entry(&mpt, &count, sizeof(struct mpc_intsrc));
752 break;
753 case MP_LINTSRC:
754 skip_entry(&mpt, &count, sizeof(struct mpc_lintsrc));
755 break;
756 default:
757 /* wrong mptable */
758 smp_dump_mptable(mpc, mpt);
759 goto out;
760 }
761 }
762
763#ifdef CONFIG_X86_IO_APIC
764 for (i = 0; i < mp_irq_entries; i++) {
765 if (irq_used[i])
766 continue;
767
768 if (mp_irqs[i].irqtype != mp_INT)
769 continue;
770
771 if (mp_irqs[i].irqflag != (MP_IRQTRIG_LEVEL |
772 MP_IRQPOL_ACTIVE_LOW))
773 continue;
774
775 if (nr_m_spare > 0) {
776 apic_printk(APIC_VERBOSE, "*NEW* found\n");
777 nr_m_spare--;
778 memcpy(m_spare[nr_m_spare], &mp_irqs[i], sizeof(mp_irqs[i]));
779 m_spare[nr_m_spare] = NULL;
780 } else {
781 struct mpc_intsrc *m = (struct mpc_intsrc *)mpt;
782 count += sizeof(struct mpc_intsrc);
783 if (check_slot(mpc_new_phys, mpc_new_length, count) < 0)
784 goto out;
785 memcpy(m, &mp_irqs[i], sizeof(*m));
786 mpc->length = count;
787 mpt += sizeof(struct mpc_intsrc);
788 }
789 print_mp_irq_info(&mp_irqs[i]);
790 }
791#endif
792out:
793 /* update checksum */
794 mpc->checksum = 0;
795 mpc->checksum -= mpf_checksum((unsigned char *)mpc, mpc->length);
796
797 return 0;
798}
799
800int enable_update_mptable;
801
802static int __init update_mptable_setup(char *str)
803{
804 enable_update_mptable = 1;
805#ifdef CONFIG_PCI
806 pci_routeirq = 1;
807#endif
808 return 0;
809}
810early_param("update_mptable", update_mptable_setup);
811
812static unsigned long __initdata mpc_new_phys;
813static unsigned long mpc_new_length __initdata = 4096;
814
815/* alloc_mptable or alloc_mptable=4k */
816static int __initdata alloc_mptable;
817static int __init parse_alloc_mptable_opt(char *p)
818{
819 enable_update_mptable = 1;
820#ifdef CONFIG_PCI
821 pci_routeirq = 1;
822#endif
823 alloc_mptable = 1;
824 if (!p)
825 return 0;
826 mpc_new_length = memparse(p, &p);
827 return 0;
828}
829early_param("alloc_mptable", parse_alloc_mptable_opt);
830
831void __init e820__memblock_alloc_reserved_mpc_new(void)
832{
833 if (enable_update_mptable && alloc_mptable)
834 mpc_new_phys = e820__memblock_alloc_reserved(mpc_new_length, 4);
835}
836
837static int __init update_mp_table(void)
838{
839 char str[16];
840 char oem[10];
841 struct mpf_intel *mpf;
842 struct mpc_table *mpc, *mpc_new;
843 unsigned long size;
844
845 if (!enable_update_mptable)
846 return 0;
847
848 if (!mpf_found)
849 return 0;
850
851 mpf = early_memremap(mpf_base, sizeof(*mpf));
852 if (!mpf) {
853 pr_err("MPTABLE: mpf early_memremap() failed\n");
854 return 0;
855 }
856
857 /*
858 * Now see if we need to go further.
859 */
860 if (mpf->feature1)
861 goto do_unmap_mpf;
862
863 if (!mpf->physptr)
864 goto do_unmap_mpf;
865
866 size = get_mpc_size(mpf->physptr);
867 mpc = early_memremap(mpf->physptr, size);
868 if (!mpc) {
869 pr_err("MPTABLE: mpc early_memremap() failed\n");
870 goto do_unmap_mpf;
871 }
872
873 if (!smp_check_mpc(mpc, oem, str))
874 goto do_unmap_mpc;
875
876 pr_info("mpf: %llx\n", (u64)mpf_base);
877 pr_info("physptr: %x\n", mpf->physptr);
878
879 if (mpc_new_phys && mpc->length > mpc_new_length) {
880 mpc_new_phys = 0;
881 pr_info("mpc_new_length is %ld, please use alloc_mptable=8k\n",
882 mpc_new_length);
883 }
884
885 if (!mpc_new_phys) {
886 unsigned char old, new;
887 /* check if we can change the position */
888 mpc->checksum = 0;
889 old = mpf_checksum((unsigned char *)mpc, mpc->length);
890 mpc->checksum = 0xff;
891 new = mpf_checksum((unsigned char *)mpc, mpc->length);
892 if (old == new) {
893 pr_info("mpc is readonly, please try alloc_mptable instead\n");
894 goto do_unmap_mpc;
895 }
896 pr_info("use in-position replacing\n");
897 } else {
898 mpc_new = early_memremap(mpc_new_phys, mpc_new_length);
899 if (!mpc_new) {
900 pr_err("MPTABLE: new mpc early_memremap() failed\n");
901 goto do_unmap_mpc;
902 }
903 mpf->physptr = mpc_new_phys;
904 memcpy(mpc_new, mpc, mpc->length);
905 early_memunmap(mpc, size);
906 mpc = mpc_new;
907 size = mpc_new_length;
908 /* check if we can modify that */
909 if (mpc_new_phys - mpf->physptr) {
910 struct mpf_intel *mpf_new;
911 /* steal 16 bytes from [0, 1k) */
912 mpf_new = early_memremap(0x400 - 16, sizeof(*mpf_new));
913 if (!mpf_new) {
914 pr_err("MPTABLE: new mpf early_memremap() failed\n");
915 goto do_unmap_mpc;
916 }
917 pr_info("mpf new: %x\n", 0x400 - 16);
918 memcpy(mpf_new, mpf, 16);
919 early_memunmap(mpf, sizeof(*mpf));
920 mpf = mpf_new;
921 mpf->physptr = mpc_new_phys;
922 }
923 mpf->checksum = 0;
924 mpf->checksum -= mpf_checksum((unsigned char *)mpf, 16);
925 pr_info("physptr new: %x\n", mpf->physptr);
926 }
927
928 /*
929 * only replace the one with mp_INT and
930 * MP_IRQ_TRIGGER_LEVEL|MP_IRQ_POLARITY_LOW,
931 * already in mp_irqs , stored by ... and mp_config_acpi_gsi,
932 * may need pci=routeirq for all coverage
933 */
934 replace_intsrc_all(mpc, mpc_new_phys, mpc_new_length);
935
936do_unmap_mpc:
937 early_memunmap(mpc, size);
938
939do_unmap_mpf:
940 early_memunmap(mpf, sizeof(*mpf));
941
942 return 0;
943}
944
945late_initcall(update_mp_table);
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Intel Multiprocessor Specification 1.1 and 1.4
4 * compliant MP-table parsing routines.
5 *
6 * (c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk>
7 * (c) 1998, 1999, 2000, 2009 Ingo Molnar <mingo@redhat.com>
8 * (c) 2008 Alexey Starikovskiy <astarikovskiy@suse.de>
9 */
10
11#include <linux/mm.h>
12#include <linux/init.h>
13#include <linux/delay.h>
14#include <linux/memblock.h>
15#include <linux/kernel_stat.h>
16#include <linux/mc146818rtc.h>
17#include <linux/bitops.h>
18#include <linux/acpi.h>
19#include <linux/smp.h>
20#include <linux/pci.h>
21
22#include <asm/io_apic.h>
23#include <asm/acpi.h>
24#include <asm/irqdomain.h>
25#include <asm/mtrr.h>
26#include <asm/mpspec.h>
27#include <asm/io_apic.h>
28#include <asm/proto.h>
29#include <asm/bios_ebda.h>
30#include <asm/e820/api.h>
31#include <asm/setup.h>
32#include <asm/smp.h>
33
34#include <asm/apic.h>
35/*
36 * Checksum an MP configuration block.
37 */
38
39static int __init mpf_checksum(unsigned char *mp, int len)
40{
41 int sum = 0;
42
43 while (len--)
44 sum += *mp++;
45
46 return sum & 0xFF;
47}
48
49int __init default_mpc_apic_id(struct mpc_cpu *m)
50{
51 return m->apicid;
52}
53
54static void __init MP_processor_info(struct mpc_cpu *m)
55{
56 int apicid;
57 char *bootup_cpu = "";
58
59 if (!(m->cpuflag & CPU_ENABLED)) {
60 disabled_cpus++;
61 return;
62 }
63
64 apicid = x86_init.mpparse.mpc_apic_id(m);
65
66 if (m->cpuflag & CPU_BOOTPROCESSOR) {
67 bootup_cpu = " (Bootup-CPU)";
68 boot_cpu_physical_apicid = m->apicid;
69 }
70
71 pr_info("Processor #%d%s\n", m->apicid, bootup_cpu);
72 generic_processor_info(apicid, m->apicver);
73}
74
75#ifdef CONFIG_X86_IO_APIC
76void __init default_mpc_oem_bus_info(struct mpc_bus *m, char *str)
77{
78 memcpy(str, m->bustype, 6);
79 str[6] = 0;
80 apic_printk(APIC_VERBOSE, "Bus #%d is %s\n", m->busid, str);
81}
82
83static void __init MP_bus_info(struct mpc_bus *m)
84{
85 char str[7];
86
87 x86_init.mpparse.mpc_oem_bus_info(m, str);
88
89#if MAX_MP_BUSSES < 256
90 if (m->busid >= MAX_MP_BUSSES) {
91 pr_warn("MP table busid value (%d) for bustype %s is too large, max. supported is %d\n",
92 m->busid, str, MAX_MP_BUSSES - 1);
93 return;
94 }
95#endif
96
97 set_bit(m->busid, mp_bus_not_pci);
98 if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA) - 1) == 0) {
99#ifdef CONFIG_EISA
100 mp_bus_id_to_type[m->busid] = MP_BUS_ISA;
101#endif
102 } else if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI) - 1) == 0) {
103 if (x86_init.mpparse.mpc_oem_pci_bus)
104 x86_init.mpparse.mpc_oem_pci_bus(m);
105
106 clear_bit(m->busid, mp_bus_not_pci);
107#ifdef CONFIG_EISA
108 mp_bus_id_to_type[m->busid] = MP_BUS_PCI;
109 } else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA) - 1) == 0) {
110 mp_bus_id_to_type[m->busid] = MP_BUS_EISA;
111#endif
112 } else
113 pr_warn("Unknown bustype %s - ignoring\n", str);
114}
115
116static void __init MP_ioapic_info(struct mpc_ioapic *m)
117{
118 struct ioapic_domain_cfg cfg = {
119 .type = IOAPIC_DOMAIN_LEGACY,
120 .ops = &mp_ioapic_irqdomain_ops,
121 };
122
123 if (m->flags & MPC_APIC_USABLE)
124 mp_register_ioapic(m->apicid, m->apicaddr, gsi_top, &cfg);
125}
126
127static void __init print_mp_irq_info(struct mpc_intsrc *mp_irq)
128{
129 apic_printk(APIC_VERBOSE,
130 "Int: type %d, pol %d, trig %d, bus %02x, IRQ %02x, APIC ID %x, APIC INT %02x\n",
131 mp_irq->irqtype, mp_irq->irqflag & 3,
132 (mp_irq->irqflag >> 2) & 3, mp_irq->srcbus,
133 mp_irq->srcbusirq, mp_irq->dstapic, mp_irq->dstirq);
134}
135
136#else /* CONFIG_X86_IO_APIC */
137static inline void __init MP_bus_info(struct mpc_bus *m) {}
138static inline void __init MP_ioapic_info(struct mpc_ioapic *m) {}
139#endif /* CONFIG_X86_IO_APIC */
140
141static void __init MP_lintsrc_info(struct mpc_lintsrc *m)
142{
143 apic_printk(APIC_VERBOSE,
144 "Lint: type %d, pol %d, trig %d, bus %02x, IRQ %02x, APIC ID %x, APIC LINT %02x\n",
145 m->irqtype, m->irqflag & 3, (m->irqflag >> 2) & 3, m->srcbusid,
146 m->srcbusirq, m->destapic, m->destapiclint);
147}
148
149/*
150 * Read/parse the MPC
151 */
152static int __init smp_check_mpc(struct mpc_table *mpc, char *oem, char *str)
153{
154
155 if (memcmp(mpc->signature, MPC_SIGNATURE, 4)) {
156 pr_err("MPTABLE: bad signature [%c%c%c%c]!\n",
157 mpc->signature[0], mpc->signature[1],
158 mpc->signature[2], mpc->signature[3]);
159 return 0;
160 }
161 if (mpf_checksum((unsigned char *)mpc, mpc->length)) {
162 pr_err("MPTABLE: checksum error!\n");
163 return 0;
164 }
165 if (mpc->spec != 0x01 && mpc->spec != 0x04) {
166 pr_err("MPTABLE: bad table version (%d)!!\n", mpc->spec);
167 return 0;
168 }
169 if (!mpc->lapic) {
170 pr_err("MPTABLE: null local APIC address!\n");
171 return 0;
172 }
173 memcpy(oem, mpc->oem, 8);
174 oem[8] = 0;
175 pr_info("MPTABLE: OEM ID: %s\n", oem);
176
177 memcpy(str, mpc->productid, 12);
178 str[12] = 0;
179
180 pr_info("MPTABLE: Product ID: %s\n", str);
181
182 pr_info("MPTABLE: APIC at: 0x%X\n", mpc->lapic);
183
184 return 1;
185}
186
187static void skip_entry(unsigned char **ptr, int *count, int size)
188{
189 *ptr += size;
190 *count += size;
191}
192
193static void __init smp_dump_mptable(struct mpc_table *mpc, unsigned char *mpt)
194{
195 pr_err("Your mptable is wrong, contact your HW vendor!\n");
196 pr_cont("type %x\n", *mpt);
197 print_hex_dump(KERN_ERR, " ", DUMP_PREFIX_ADDRESS, 16,
198 1, mpc, mpc->length, 1);
199}
200
201void __init default_smp_read_mpc_oem(struct mpc_table *mpc) { }
202
203static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early)
204{
205 char str[16];
206 char oem[10];
207
208 int count = sizeof(*mpc);
209 unsigned char *mpt = ((unsigned char *)mpc) + count;
210
211 if (!smp_check_mpc(mpc, oem, str))
212 return 0;
213
214 /* Initialize the lapic mapping */
215 if (!acpi_lapic)
216 register_lapic_address(mpc->lapic);
217
218 if (early)
219 return 1;
220
221 if (mpc->oemptr)
222 x86_init.mpparse.smp_read_mpc_oem(mpc);
223
224 /*
225 * Now process the configuration blocks.
226 */
227 x86_init.mpparse.mpc_record(0);
228
229 while (count < mpc->length) {
230 switch (*mpt) {
231 case MP_PROCESSOR:
232 /* ACPI may have already provided this data */
233 if (!acpi_lapic)
234 MP_processor_info((struct mpc_cpu *)mpt);
235 skip_entry(&mpt, &count, sizeof(struct mpc_cpu));
236 break;
237 case MP_BUS:
238 MP_bus_info((struct mpc_bus *)mpt);
239 skip_entry(&mpt, &count, sizeof(struct mpc_bus));
240 break;
241 case MP_IOAPIC:
242 MP_ioapic_info((struct mpc_ioapic *)mpt);
243 skip_entry(&mpt, &count, sizeof(struct mpc_ioapic));
244 break;
245 case MP_INTSRC:
246 mp_save_irq((struct mpc_intsrc *)mpt);
247 skip_entry(&mpt, &count, sizeof(struct mpc_intsrc));
248 break;
249 case MP_LINTSRC:
250 MP_lintsrc_info((struct mpc_lintsrc *)mpt);
251 skip_entry(&mpt, &count, sizeof(struct mpc_lintsrc));
252 break;
253 default:
254 /* wrong mptable */
255 smp_dump_mptable(mpc, mpt);
256 count = mpc->length;
257 break;
258 }
259 x86_init.mpparse.mpc_record(1);
260 }
261
262 if (!num_processors)
263 pr_err("MPTABLE: no processors registered!\n");
264 return num_processors;
265}
266
267#ifdef CONFIG_X86_IO_APIC
268
269static int __init ELCR_trigger(unsigned int irq)
270{
271 unsigned int port;
272
273 port = 0x4d0 + (irq >> 3);
274 return (inb(port) >> (irq & 7)) & 1;
275}
276
277static void __init construct_default_ioirq_mptable(int mpc_default_type)
278{
279 struct mpc_intsrc intsrc;
280 int i;
281 int ELCR_fallback = 0;
282
283 intsrc.type = MP_INTSRC;
284 intsrc.irqflag = MP_IRQTRIG_DEFAULT | MP_IRQPOL_DEFAULT;
285 intsrc.srcbus = 0;
286 intsrc.dstapic = mpc_ioapic_id(0);
287
288 intsrc.irqtype = mp_INT;
289
290 /*
291 * If true, we have an ISA/PCI system with no IRQ entries
292 * in the MP table. To prevent the PCI interrupts from being set up
293 * incorrectly, we try to use the ELCR. The sanity check to see if
294 * there is good ELCR data is very simple - IRQ0, 1, 2 and 13 can
295 * never be level sensitive, so we simply see if the ELCR agrees.
296 * If it does, we assume it's valid.
297 */
298 if (mpc_default_type == 5) {
299 pr_info("ISA/PCI bus type with no IRQ information... falling back to ELCR\n");
300
301 if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) ||
302 ELCR_trigger(13))
303 pr_err("ELCR contains invalid data... not using ELCR\n");
304 else {
305 pr_info("Using ELCR to identify PCI interrupts\n");
306 ELCR_fallback = 1;
307 }
308 }
309
310 for (i = 0; i < 16; i++) {
311 switch (mpc_default_type) {
312 case 2:
313 if (i == 0 || i == 13)
314 continue; /* IRQ0 & IRQ13 not connected */
315 fallthrough;
316 default:
317 if (i == 2)
318 continue; /* IRQ2 is never connected */
319 }
320
321 if (ELCR_fallback) {
322 /*
323 * If the ELCR indicates a level-sensitive interrupt, we
324 * copy that information over to the MP table in the
325 * irqflag field (level sensitive, active high polarity).
326 */
327 if (ELCR_trigger(i)) {
328 intsrc.irqflag = MP_IRQTRIG_LEVEL |
329 MP_IRQPOL_ACTIVE_HIGH;
330 } else {
331 intsrc.irqflag = MP_IRQTRIG_DEFAULT |
332 MP_IRQPOL_DEFAULT;
333 }
334 }
335
336 intsrc.srcbusirq = i;
337 intsrc.dstirq = i ? i : 2; /* IRQ0 to INTIN2 */
338 mp_save_irq(&intsrc);
339 }
340
341 intsrc.irqtype = mp_ExtINT;
342 intsrc.srcbusirq = 0;
343 intsrc.dstirq = 0; /* 8259A to INTIN0 */
344 mp_save_irq(&intsrc);
345}
346
347
348static void __init construct_ioapic_table(int mpc_default_type)
349{
350 struct mpc_ioapic ioapic;
351 struct mpc_bus bus;
352
353 bus.type = MP_BUS;
354 bus.busid = 0;
355 switch (mpc_default_type) {
356 default:
357 pr_err("???\nUnknown standard configuration %d\n",
358 mpc_default_type);
359 fallthrough;
360 case 1:
361 case 5:
362 memcpy(bus.bustype, "ISA ", 6);
363 break;
364 case 2:
365 case 6:
366 case 3:
367 memcpy(bus.bustype, "EISA ", 6);
368 break;
369 }
370 MP_bus_info(&bus);
371 if (mpc_default_type > 4) {
372 bus.busid = 1;
373 memcpy(bus.bustype, "PCI ", 6);
374 MP_bus_info(&bus);
375 }
376
377 ioapic.type = MP_IOAPIC;
378 ioapic.apicid = 2;
379 ioapic.apicver = mpc_default_type > 4 ? 0x10 : 0x01;
380 ioapic.flags = MPC_APIC_USABLE;
381 ioapic.apicaddr = IO_APIC_DEFAULT_PHYS_BASE;
382 MP_ioapic_info(&ioapic);
383
384 /*
385 * We set up most of the low 16 IO-APIC pins according to MPS rules.
386 */
387 construct_default_ioirq_mptable(mpc_default_type);
388}
389#else
390static inline void __init construct_ioapic_table(int mpc_default_type) { }
391#endif
392
393static inline void __init construct_default_ISA_mptable(int mpc_default_type)
394{
395 struct mpc_cpu processor;
396 struct mpc_lintsrc lintsrc;
397 int linttypes[2] = { mp_ExtINT, mp_NMI };
398 int i;
399
400 /*
401 * local APIC has default address
402 */
403 mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
404
405 /*
406 * 2 CPUs, numbered 0 & 1.
407 */
408 processor.type = MP_PROCESSOR;
409 /* Either an integrated APIC or a discrete 82489DX. */
410 processor.apicver = mpc_default_type > 4 ? 0x10 : 0x01;
411 processor.cpuflag = CPU_ENABLED;
412 processor.cpufeature = (boot_cpu_data.x86 << 8) |
413 (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_stepping;
414 processor.featureflag = boot_cpu_data.x86_capability[CPUID_1_EDX];
415 processor.reserved[0] = 0;
416 processor.reserved[1] = 0;
417 for (i = 0; i < 2; i++) {
418 processor.apicid = i;
419 MP_processor_info(&processor);
420 }
421
422 construct_ioapic_table(mpc_default_type);
423
424 lintsrc.type = MP_LINTSRC;
425 lintsrc.irqflag = MP_IRQTRIG_DEFAULT | MP_IRQPOL_DEFAULT;
426 lintsrc.srcbusid = 0;
427 lintsrc.srcbusirq = 0;
428 lintsrc.destapic = MP_APIC_ALL;
429 for (i = 0; i < 2; i++) {
430 lintsrc.irqtype = linttypes[i];
431 lintsrc.destapiclint = i;
432 MP_lintsrc_info(&lintsrc);
433 }
434}
435
436static unsigned long mpf_base;
437static bool mpf_found;
438
439static unsigned long __init get_mpc_size(unsigned long physptr)
440{
441 struct mpc_table *mpc;
442 unsigned long size;
443
444 mpc = early_memremap(physptr, PAGE_SIZE);
445 size = mpc->length;
446 early_memunmap(mpc, PAGE_SIZE);
447 apic_printk(APIC_VERBOSE, " mpc: %lx-%lx\n", physptr, physptr + size);
448
449 return size;
450}
451
452static int __init check_physptr(struct mpf_intel *mpf, unsigned int early)
453{
454 struct mpc_table *mpc;
455 unsigned long size;
456
457 size = get_mpc_size(mpf->physptr);
458 mpc = early_memremap(mpf->physptr, size);
459
460 /*
461 * Read the physical hardware table. Anything here will
462 * override the defaults.
463 */
464 if (!smp_read_mpc(mpc, early)) {
465#ifdef CONFIG_X86_LOCAL_APIC
466 smp_found_config = 0;
467#endif
468 pr_err("BIOS bug, MP table errors detected!...\n");
469 pr_cont("... disabling SMP support. (tell your hw vendor)\n");
470 early_memunmap(mpc, size);
471 return -1;
472 }
473 early_memunmap(mpc, size);
474
475 if (early)
476 return -1;
477
478#ifdef CONFIG_X86_IO_APIC
479 /*
480 * If there are no explicit MP IRQ entries, then we are
481 * broken. We set up most of the low 16 IO-APIC pins to
482 * ISA defaults and hope it will work.
483 */
484 if (!mp_irq_entries) {
485 struct mpc_bus bus;
486
487 pr_err("BIOS bug, no explicit IRQ entries, using default mptable. (tell your hw vendor)\n");
488
489 bus.type = MP_BUS;
490 bus.busid = 0;
491 memcpy(bus.bustype, "ISA ", 6);
492 MP_bus_info(&bus);
493
494 construct_default_ioirq_mptable(0);
495 }
496#endif
497
498 return 0;
499}
500
501/*
502 * Scan the memory blocks for an SMP configuration block.
503 */
504void __init default_get_smp_config(unsigned int early)
505{
506 struct mpf_intel *mpf;
507
508 if (!smp_found_config)
509 return;
510
511 if (!mpf_found)
512 return;
513
514 if (acpi_lapic && early)
515 return;
516
517 /*
518 * MPS doesn't support hyperthreading, aka only have
519 * thread 0 apic id in MPS table
520 */
521 if (acpi_lapic && acpi_ioapic)
522 return;
523
524 mpf = early_memremap(mpf_base, sizeof(*mpf));
525 if (!mpf) {
526 pr_err("MPTABLE: error mapping MP table\n");
527 return;
528 }
529
530 pr_info("Intel MultiProcessor Specification v1.%d\n",
531 mpf->specification);
532#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32)
533 if (mpf->feature2 & (1 << 7)) {
534 pr_info(" IMCR and PIC compatibility mode.\n");
535 pic_mode = 1;
536 } else {
537 pr_info(" Virtual Wire compatibility mode.\n");
538 pic_mode = 0;
539 }
540#endif
541 /*
542 * Now see if we need to read further.
543 */
544 if (mpf->feature1) {
545 if (early) {
546 /*
547 * local APIC has default address
548 */
549 mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
550 goto out;
551 }
552
553 pr_info("Default MP configuration #%d\n", mpf->feature1);
554 construct_default_ISA_mptable(mpf->feature1);
555
556 } else if (mpf->physptr) {
557 if (check_physptr(mpf, early))
558 goto out;
559 } else
560 BUG();
561
562 if (!early)
563 pr_info("Processors: %d\n", num_processors);
564 /*
565 * Only use the first configuration found.
566 */
567out:
568 early_memunmap(mpf, sizeof(*mpf));
569}
570
571static void __init smp_reserve_memory(struct mpf_intel *mpf)
572{
573 memblock_reserve(mpf->physptr, get_mpc_size(mpf->physptr));
574}
575
576static int __init smp_scan_config(unsigned long base, unsigned long length)
577{
578 unsigned int *bp;
579 struct mpf_intel *mpf;
580 int ret = 0;
581
582 apic_printk(APIC_VERBOSE, "Scan for SMP in [mem %#010lx-%#010lx]\n",
583 base, base + length - 1);
584 BUILD_BUG_ON(sizeof(*mpf) != 16);
585
586 while (length > 0) {
587 bp = early_memremap(base, length);
588 mpf = (struct mpf_intel *)bp;
589 if ((*bp == SMP_MAGIC_IDENT) &&
590 (mpf->length == 1) &&
591 !mpf_checksum((unsigned char *)bp, 16) &&
592 ((mpf->specification == 1)
593 || (mpf->specification == 4))) {
594#ifdef CONFIG_X86_LOCAL_APIC
595 smp_found_config = 1;
596#endif
597 mpf_base = base;
598 mpf_found = true;
599
600 pr_info("found SMP MP-table at [mem %#010lx-%#010lx]\n",
601 base, base + sizeof(*mpf) - 1);
602
603 memblock_reserve(base, sizeof(*mpf));
604 if (mpf->physptr)
605 smp_reserve_memory(mpf);
606
607 ret = 1;
608 }
609 early_memunmap(bp, length);
610
611 if (ret)
612 break;
613
614 base += 16;
615 length -= 16;
616 }
617 return ret;
618}
619
620void __init default_find_smp_config(void)
621{
622 unsigned int address;
623
624 /*
625 * FIXME: Linux assumes you have 640K of base ram..
626 * this continues the error...
627 *
628 * 1) Scan the bottom 1K for a signature
629 * 2) Scan the top 1K of base RAM
630 * 3) Scan the 64K of bios
631 */
632 if (smp_scan_config(0x0, 0x400) ||
633 smp_scan_config(639 * 0x400, 0x400) ||
634 smp_scan_config(0xF0000, 0x10000))
635 return;
636 /*
637 * If it is an SMP machine we should know now, unless the
638 * configuration is in an EISA bus machine with an
639 * extended bios data area.
640 *
641 * there is a real-mode segmented pointer pointing to the
642 * 4K EBDA area at 0x40E, calculate and scan it here.
643 *
644 * NOTE! There are Linux loaders that will corrupt the EBDA
645 * area, and as such this kind of SMP config may be less
646 * trustworthy, simply because the SMP table may have been
647 * stomped on during early boot. These loaders are buggy and
648 * should be fixed.
649 *
650 * MP1.4 SPEC states to only scan first 1K of 4K EBDA.
651 */
652
653 address = get_bios_ebda();
654 if (address)
655 smp_scan_config(address, 0x400);
656}
657
658#ifdef CONFIG_X86_IO_APIC
659static u8 __initdata irq_used[MAX_IRQ_SOURCES];
660
661static int __init get_MP_intsrc_index(struct mpc_intsrc *m)
662{
663 int i;
664
665 if (m->irqtype != mp_INT)
666 return 0;
667
668 if (m->irqflag != (MP_IRQTRIG_LEVEL | MP_IRQPOL_ACTIVE_LOW))
669 return 0;
670
671 /* not legacy */
672
673 for (i = 0; i < mp_irq_entries; i++) {
674 if (mp_irqs[i].irqtype != mp_INT)
675 continue;
676
677 if (mp_irqs[i].irqflag != (MP_IRQTRIG_LEVEL |
678 MP_IRQPOL_ACTIVE_LOW))
679 continue;
680
681 if (mp_irqs[i].srcbus != m->srcbus)
682 continue;
683 if (mp_irqs[i].srcbusirq != m->srcbusirq)
684 continue;
685 if (irq_used[i]) {
686 /* already claimed */
687 return -2;
688 }
689 irq_used[i] = 1;
690 return i;
691 }
692
693 /* not found */
694 return -1;
695}
696
697#define SPARE_SLOT_NUM 20
698
699static struct mpc_intsrc __initdata *m_spare[SPARE_SLOT_NUM];
700
701static void __init check_irq_src(struct mpc_intsrc *m, int *nr_m_spare)
702{
703 int i;
704
705 apic_printk(APIC_VERBOSE, "OLD ");
706 print_mp_irq_info(m);
707
708 i = get_MP_intsrc_index(m);
709 if (i > 0) {
710 memcpy(m, &mp_irqs[i], sizeof(*m));
711 apic_printk(APIC_VERBOSE, "NEW ");
712 print_mp_irq_info(&mp_irqs[i]);
713 return;
714 }
715 if (!i) {
716 /* legacy, do nothing */
717 return;
718 }
719 if (*nr_m_spare < SPARE_SLOT_NUM) {
720 /*
721 * not found (-1), or duplicated (-2) are invalid entries,
722 * we need to use the slot later
723 */
724 m_spare[*nr_m_spare] = m;
725 *nr_m_spare += 1;
726 }
727}
728
729static int __init
730check_slot(unsigned long mpc_new_phys, unsigned long mpc_new_length, int count)
731{
732 if (!mpc_new_phys || count <= mpc_new_length) {
733 WARN(1, "update_mptable: No spare slots (length: %x)\n", count);
734 return -1;
735 }
736
737 return 0;
738}
739#else /* CONFIG_X86_IO_APIC */
740static
741inline void __init check_irq_src(struct mpc_intsrc *m, int *nr_m_spare) {}
742#endif /* CONFIG_X86_IO_APIC */
743
744static int __init replace_intsrc_all(struct mpc_table *mpc,
745 unsigned long mpc_new_phys,
746 unsigned long mpc_new_length)
747{
748#ifdef CONFIG_X86_IO_APIC
749 int i;
750#endif
751 int count = sizeof(*mpc);
752 int nr_m_spare = 0;
753 unsigned char *mpt = ((unsigned char *)mpc) + count;
754
755 pr_info("mpc_length %x\n", mpc->length);
756 while (count < mpc->length) {
757 switch (*mpt) {
758 case MP_PROCESSOR:
759 skip_entry(&mpt, &count, sizeof(struct mpc_cpu));
760 break;
761 case MP_BUS:
762 skip_entry(&mpt, &count, sizeof(struct mpc_bus));
763 break;
764 case MP_IOAPIC:
765 skip_entry(&mpt, &count, sizeof(struct mpc_ioapic));
766 break;
767 case MP_INTSRC:
768 check_irq_src((struct mpc_intsrc *)mpt, &nr_m_spare);
769 skip_entry(&mpt, &count, sizeof(struct mpc_intsrc));
770 break;
771 case MP_LINTSRC:
772 skip_entry(&mpt, &count, sizeof(struct mpc_lintsrc));
773 break;
774 default:
775 /* wrong mptable */
776 smp_dump_mptable(mpc, mpt);
777 goto out;
778 }
779 }
780
781#ifdef CONFIG_X86_IO_APIC
782 for (i = 0; i < mp_irq_entries; i++) {
783 if (irq_used[i])
784 continue;
785
786 if (mp_irqs[i].irqtype != mp_INT)
787 continue;
788
789 if (mp_irqs[i].irqflag != (MP_IRQTRIG_LEVEL |
790 MP_IRQPOL_ACTIVE_LOW))
791 continue;
792
793 if (nr_m_spare > 0) {
794 apic_printk(APIC_VERBOSE, "*NEW* found\n");
795 nr_m_spare--;
796 memcpy(m_spare[nr_m_spare], &mp_irqs[i], sizeof(mp_irqs[i]));
797 m_spare[nr_m_spare] = NULL;
798 } else {
799 struct mpc_intsrc *m = (struct mpc_intsrc *)mpt;
800 count += sizeof(struct mpc_intsrc);
801 if (check_slot(mpc_new_phys, mpc_new_length, count) < 0)
802 goto out;
803 memcpy(m, &mp_irqs[i], sizeof(*m));
804 mpc->length = count;
805 mpt += sizeof(struct mpc_intsrc);
806 }
807 print_mp_irq_info(&mp_irqs[i]);
808 }
809#endif
810out:
811 /* update checksum */
812 mpc->checksum = 0;
813 mpc->checksum -= mpf_checksum((unsigned char *)mpc, mpc->length);
814
815 return 0;
816}
817
818int enable_update_mptable;
819
820static int __init update_mptable_setup(char *str)
821{
822 enable_update_mptable = 1;
823#ifdef CONFIG_PCI
824 pci_routeirq = 1;
825#endif
826 return 0;
827}
828early_param("update_mptable", update_mptable_setup);
829
830static unsigned long __initdata mpc_new_phys;
831static unsigned long mpc_new_length __initdata = 4096;
832
833/* alloc_mptable or alloc_mptable=4k */
834static int __initdata alloc_mptable;
835static int __init parse_alloc_mptable_opt(char *p)
836{
837 enable_update_mptable = 1;
838#ifdef CONFIG_PCI
839 pci_routeirq = 1;
840#endif
841 alloc_mptable = 1;
842 if (!p)
843 return 0;
844 mpc_new_length = memparse(p, &p);
845 return 0;
846}
847early_param("alloc_mptable", parse_alloc_mptable_opt);
848
849void __init e820__memblock_alloc_reserved_mpc_new(void)
850{
851 if (enable_update_mptable && alloc_mptable)
852 mpc_new_phys = e820__memblock_alloc_reserved(mpc_new_length, 4);
853}
854
855static int __init update_mp_table(void)
856{
857 char str[16];
858 char oem[10];
859 struct mpf_intel *mpf;
860 struct mpc_table *mpc, *mpc_new;
861 unsigned long size;
862
863 if (!enable_update_mptable)
864 return 0;
865
866 if (!mpf_found)
867 return 0;
868
869 mpf = early_memremap(mpf_base, sizeof(*mpf));
870 if (!mpf) {
871 pr_err("MPTABLE: mpf early_memremap() failed\n");
872 return 0;
873 }
874
875 /*
876 * Now see if we need to go further.
877 */
878 if (mpf->feature1)
879 goto do_unmap_mpf;
880
881 if (!mpf->physptr)
882 goto do_unmap_mpf;
883
884 size = get_mpc_size(mpf->physptr);
885 mpc = early_memremap(mpf->physptr, size);
886 if (!mpc) {
887 pr_err("MPTABLE: mpc early_memremap() failed\n");
888 goto do_unmap_mpf;
889 }
890
891 if (!smp_check_mpc(mpc, oem, str))
892 goto do_unmap_mpc;
893
894 pr_info("mpf: %llx\n", (u64)mpf_base);
895 pr_info("physptr: %x\n", mpf->physptr);
896
897 if (mpc_new_phys && mpc->length > mpc_new_length) {
898 mpc_new_phys = 0;
899 pr_info("mpc_new_length is %ld, please use alloc_mptable=8k\n",
900 mpc_new_length);
901 }
902
903 if (!mpc_new_phys) {
904 unsigned char old, new;
905 /* check if we can change the position */
906 mpc->checksum = 0;
907 old = mpf_checksum((unsigned char *)mpc, mpc->length);
908 mpc->checksum = 0xff;
909 new = mpf_checksum((unsigned char *)mpc, mpc->length);
910 if (old == new) {
911 pr_info("mpc is readonly, please try alloc_mptable instead\n");
912 goto do_unmap_mpc;
913 }
914 pr_info("use in-position replacing\n");
915 } else {
916 mpc_new = early_memremap(mpc_new_phys, mpc_new_length);
917 if (!mpc_new) {
918 pr_err("MPTABLE: new mpc early_memremap() failed\n");
919 goto do_unmap_mpc;
920 }
921 mpf->physptr = mpc_new_phys;
922 memcpy(mpc_new, mpc, mpc->length);
923 early_memunmap(mpc, size);
924 mpc = mpc_new;
925 size = mpc_new_length;
926 /* check if we can modify that */
927 if (mpc_new_phys - mpf->physptr) {
928 struct mpf_intel *mpf_new;
929 /* steal 16 bytes from [0, 1k) */
930 mpf_new = early_memremap(0x400 - 16, sizeof(*mpf_new));
931 if (!mpf_new) {
932 pr_err("MPTABLE: new mpf early_memremap() failed\n");
933 goto do_unmap_mpc;
934 }
935 pr_info("mpf new: %x\n", 0x400 - 16);
936 memcpy(mpf_new, mpf, 16);
937 early_memunmap(mpf, sizeof(*mpf));
938 mpf = mpf_new;
939 mpf->physptr = mpc_new_phys;
940 }
941 mpf->checksum = 0;
942 mpf->checksum -= mpf_checksum((unsigned char *)mpf, 16);
943 pr_info("physptr new: %x\n", mpf->physptr);
944 }
945
946 /*
947 * only replace the one with mp_INT and
948 * MP_IRQ_TRIGGER_LEVEL|MP_IRQ_POLARITY_LOW,
949 * already in mp_irqs , stored by ... and mp_config_acpi_gsi,
950 * may need pci=routeirq for all coverage
951 */
952 replace_intsrc_all(mpc, mpc_new_phys, mpc_new_length);
953
954do_unmap_mpc:
955 early_memunmap(mpc, size);
956
957do_unmap_mpf:
958 early_memunmap(mpf, sizeof(*mpf));
959
960 return 0;
961}
962
963late_initcall(update_mp_table);