Loading...
1/*
2 * Intel Multiprocessor Specification 1.1 and 1.4
3 * compliant MP-table parsing routines.
4 *
5 * (c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk>
6 * (c) 1998, 1999, 2000, 2009 Ingo Molnar <mingo@redhat.com>
7 * (c) 2008 Alexey Starikovskiy <astarikovskiy@suse.de>
8 */
9
10#include <linux/mm.h>
11#include <linux/init.h>
12#include <linux/delay.h>
13#include <linux/bootmem.h>
14#include <linux/memblock.h>
15#include <linux/kernel_stat.h>
16#include <linux/mc146818rtc.h>
17#include <linux/bitops.h>
18#include <linux/acpi.h>
19#include <linux/module.h>
20#include <linux/smp.h>
21#include <linux/pci.h>
22
23#include <asm/mtrr.h>
24#include <asm/mpspec.h>
25#include <asm/pgalloc.h>
26#include <asm/io_apic.h>
27#include <asm/proto.h>
28#include <asm/bios_ebda.h>
29#include <asm/e820.h>
30#include <asm/setup.h>
31#include <asm/smp.h>
32
33#include <asm/apic.h>
34/*
35 * Checksum an MP configuration block.
36 */
37
38static int __init mpf_checksum(unsigned char *mp, int len)
39{
40 int sum = 0;
41
42 while (len--)
43 sum += *mp++;
44
45 return sum & 0xFF;
46}
47
48int __init default_mpc_apic_id(struct mpc_cpu *m)
49{
50 return m->apicid;
51}
52
53static void __init MP_processor_info(struct mpc_cpu *m)
54{
55 int apicid;
56 char *bootup_cpu = "";
57
58 if (!(m->cpuflag & CPU_ENABLED)) {
59 disabled_cpus++;
60 return;
61 }
62
63 apicid = x86_init.mpparse.mpc_apic_id(m);
64
65 if (m->cpuflag & CPU_BOOTPROCESSOR) {
66 bootup_cpu = " (Bootup-CPU)";
67 boot_cpu_physical_apicid = m->apicid;
68 }
69
70 printk(KERN_INFO "Processor #%d%s\n", m->apicid, bootup_cpu);
71 generic_processor_info(apicid, m->apicver);
72}
73
74#ifdef CONFIG_X86_IO_APIC
75void __init default_mpc_oem_bus_info(struct mpc_bus *m, char *str)
76{
77 memcpy(str, m->bustype, 6);
78 str[6] = 0;
79 apic_printk(APIC_VERBOSE, "Bus #%d is %s\n", m->busid, str);
80}
81
82static void __init MP_bus_info(struct mpc_bus *m)
83{
84 char str[7];
85
86 x86_init.mpparse.mpc_oem_bus_info(m, str);
87
88#if MAX_MP_BUSSES < 256
89 if (m->busid >= MAX_MP_BUSSES) {
90 printk(KERN_WARNING "MP table busid value (%d) for bustype %s "
91 " is too large, max. supported is %d\n",
92 m->busid, str, MAX_MP_BUSSES - 1);
93 return;
94 }
95#endif
96
97 set_bit(m->busid, mp_bus_not_pci);
98 if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA) - 1) == 0) {
99#ifdef CONFIG_EISA
100 mp_bus_id_to_type[m->busid] = MP_BUS_ISA;
101#endif
102 } else if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI) - 1) == 0) {
103 if (x86_init.mpparse.mpc_oem_pci_bus)
104 x86_init.mpparse.mpc_oem_pci_bus(m);
105
106 clear_bit(m->busid, mp_bus_not_pci);
107#ifdef CONFIG_EISA
108 mp_bus_id_to_type[m->busid] = MP_BUS_PCI;
109 } else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA) - 1) == 0) {
110 mp_bus_id_to_type[m->busid] = MP_BUS_EISA;
111#endif
112 } else
113 printk(KERN_WARNING "Unknown bustype %s - ignoring\n", str);
114}
115
116static void __init MP_ioapic_info(struct mpc_ioapic *m)
117{
118 if (m->flags & MPC_APIC_USABLE)
119 mp_register_ioapic(m->apicid, m->apicaddr, gsi_top);
120}
121
122static void __init print_mp_irq_info(struct mpc_intsrc *mp_irq)
123{
124 apic_printk(APIC_VERBOSE, "Int: type %d, pol %d, trig %d, bus %02x,"
125 " IRQ %02x, APIC ID %x, APIC INT %02x\n",
126 mp_irq->irqtype, mp_irq->irqflag & 3,
127 (mp_irq->irqflag >> 2) & 3, mp_irq->srcbus,
128 mp_irq->srcbusirq, mp_irq->dstapic, mp_irq->dstirq);
129}
130
131#else /* CONFIG_X86_IO_APIC */
132static inline void __init MP_bus_info(struct mpc_bus *m) {}
133static inline void __init MP_ioapic_info(struct mpc_ioapic *m) {}
134#endif /* CONFIG_X86_IO_APIC */
135
136static void __init MP_lintsrc_info(struct mpc_lintsrc *m)
137{
138 apic_printk(APIC_VERBOSE, "Lint: type %d, pol %d, trig %d, bus %02x,"
139 " IRQ %02x, APIC ID %x, APIC LINT %02x\n",
140 m->irqtype, m->irqflag & 3, (m->irqflag >> 2) & 3, m->srcbusid,
141 m->srcbusirq, m->destapic, m->destapiclint);
142}
143
144/*
145 * Read/parse the MPC
146 */
147static int __init smp_check_mpc(struct mpc_table *mpc, char *oem, char *str)
148{
149
150 if (memcmp(mpc->signature, MPC_SIGNATURE, 4)) {
151 printk(KERN_ERR "MPTABLE: bad signature [%c%c%c%c]!\n",
152 mpc->signature[0], mpc->signature[1],
153 mpc->signature[2], mpc->signature[3]);
154 return 0;
155 }
156 if (mpf_checksum((unsigned char *)mpc, mpc->length)) {
157 printk(KERN_ERR "MPTABLE: checksum error!\n");
158 return 0;
159 }
160 if (mpc->spec != 0x01 && mpc->spec != 0x04) {
161 printk(KERN_ERR "MPTABLE: bad table version (%d)!!\n",
162 mpc->spec);
163 return 0;
164 }
165 if (!mpc->lapic) {
166 printk(KERN_ERR "MPTABLE: null local APIC address!\n");
167 return 0;
168 }
169 memcpy(oem, mpc->oem, 8);
170 oem[8] = 0;
171 printk(KERN_INFO "MPTABLE: OEM ID: %s\n", oem);
172
173 memcpy(str, mpc->productid, 12);
174 str[12] = 0;
175
176 printk(KERN_INFO "MPTABLE: Product ID: %s\n", str);
177
178 printk(KERN_INFO "MPTABLE: APIC at: 0x%X\n", mpc->lapic);
179
180 return 1;
181}
182
183static void skip_entry(unsigned char **ptr, int *count, int size)
184{
185 *ptr += size;
186 *count += size;
187}
188
189static void __init smp_dump_mptable(struct mpc_table *mpc, unsigned char *mpt)
190{
191 printk(KERN_ERR "Your mptable is wrong, contact your HW vendor!\n"
192 "type %x\n", *mpt);
193 print_hex_dump(KERN_ERR, " ", DUMP_PREFIX_ADDRESS, 16,
194 1, mpc, mpc->length, 1);
195}
196
197void __init default_smp_read_mpc_oem(struct mpc_table *mpc) { }
198
199static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early)
200{
201 char str[16];
202 char oem[10];
203
204 int count = sizeof(*mpc);
205 unsigned char *mpt = ((unsigned char *)mpc) + count;
206
207 if (!smp_check_mpc(mpc, oem, str))
208 return 0;
209
210#ifdef CONFIG_X86_32
211 generic_mps_oem_check(mpc, oem, str);
212#endif
213 /* Initialize the lapic mapping */
214 if (!acpi_lapic)
215 register_lapic_address(mpc->lapic);
216
217 if (early)
218 return 1;
219
220 if (mpc->oemptr)
221 x86_init.mpparse.smp_read_mpc_oem(mpc);
222
223 /*
224 * Now process the configuration blocks.
225 */
226 x86_init.mpparse.mpc_record(0);
227
228 while (count < mpc->length) {
229 switch (*mpt) {
230 case MP_PROCESSOR:
231 /* ACPI may have already provided this data */
232 if (!acpi_lapic)
233 MP_processor_info((struct mpc_cpu *)mpt);
234 skip_entry(&mpt, &count, sizeof(struct mpc_cpu));
235 break;
236 case MP_BUS:
237 MP_bus_info((struct mpc_bus *)mpt);
238 skip_entry(&mpt, &count, sizeof(struct mpc_bus));
239 break;
240 case MP_IOAPIC:
241 MP_ioapic_info((struct mpc_ioapic *)mpt);
242 skip_entry(&mpt, &count, sizeof(struct mpc_ioapic));
243 break;
244 case MP_INTSRC:
245 mp_save_irq((struct mpc_intsrc *)mpt);
246 skip_entry(&mpt, &count, sizeof(struct mpc_intsrc));
247 break;
248 case MP_LINTSRC:
249 MP_lintsrc_info((struct mpc_lintsrc *)mpt);
250 skip_entry(&mpt, &count, sizeof(struct mpc_lintsrc));
251 break;
252 default:
253 /* wrong mptable */
254 smp_dump_mptable(mpc, mpt);
255 count = mpc->length;
256 break;
257 }
258 x86_init.mpparse.mpc_record(1);
259 }
260
261 if (!num_processors)
262 printk(KERN_ERR "MPTABLE: no processors registered!\n");
263 return num_processors;
264}
265
266#ifdef CONFIG_X86_IO_APIC
267
268static int __init ELCR_trigger(unsigned int irq)
269{
270 unsigned int port;
271
272 port = 0x4d0 + (irq >> 3);
273 return (inb(port) >> (irq & 7)) & 1;
274}
275
276static void __init construct_default_ioirq_mptable(int mpc_default_type)
277{
278 struct mpc_intsrc intsrc;
279 int i;
280 int ELCR_fallback = 0;
281
282 intsrc.type = MP_INTSRC;
283 intsrc.irqflag = 0; /* conforming */
284 intsrc.srcbus = 0;
285 intsrc.dstapic = mpc_ioapic_id(0);
286
287 intsrc.irqtype = mp_INT;
288
289 /*
290 * If true, we have an ISA/PCI system with no IRQ entries
291 * in the MP table. To prevent the PCI interrupts from being set up
292 * incorrectly, we try to use the ELCR. The sanity check to see if
293 * there is good ELCR data is very simple - IRQ0, 1, 2 and 13 can
294 * never be level sensitive, so we simply see if the ELCR agrees.
295 * If it does, we assume it's valid.
296 */
297 if (mpc_default_type == 5) {
298 printk(KERN_INFO "ISA/PCI bus type with no IRQ information... "
299 "falling back to ELCR\n");
300
301 if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) ||
302 ELCR_trigger(13))
303 printk(KERN_ERR "ELCR contains invalid data... "
304 "not using ELCR\n");
305 else {
306 printk(KERN_INFO
307 "Using ELCR to identify PCI interrupts\n");
308 ELCR_fallback = 1;
309 }
310 }
311
312 for (i = 0; i < 16; i++) {
313 switch (mpc_default_type) {
314 case 2:
315 if (i == 0 || i == 13)
316 continue; /* IRQ0 & IRQ13 not connected */
317 /* fall through */
318 default:
319 if (i == 2)
320 continue; /* IRQ2 is never connected */
321 }
322
323 if (ELCR_fallback) {
324 /*
325 * If the ELCR indicates a level-sensitive interrupt, we
326 * copy that information over to the MP table in the
327 * irqflag field (level sensitive, active high polarity).
328 */
329 if (ELCR_trigger(i))
330 intsrc.irqflag = 13;
331 else
332 intsrc.irqflag = 0;
333 }
334
335 intsrc.srcbusirq = i;
336 intsrc.dstirq = i ? i : 2; /* IRQ0 to INTIN2 */
337 mp_save_irq(&intsrc);
338 }
339
340 intsrc.irqtype = mp_ExtINT;
341 intsrc.srcbusirq = 0;
342 intsrc.dstirq = 0; /* 8259A to INTIN0 */
343 mp_save_irq(&intsrc);
344}
345
346
347static void __init construct_ioapic_table(int mpc_default_type)
348{
349 struct mpc_ioapic ioapic;
350 struct mpc_bus bus;
351
352 bus.type = MP_BUS;
353 bus.busid = 0;
354 switch (mpc_default_type) {
355 default:
356 printk(KERN_ERR "???\nUnknown standard configuration %d\n",
357 mpc_default_type);
358 /* fall through */
359 case 1:
360 case 5:
361 memcpy(bus.bustype, "ISA ", 6);
362 break;
363 case 2:
364 case 6:
365 case 3:
366 memcpy(bus.bustype, "EISA ", 6);
367 break;
368 }
369 MP_bus_info(&bus);
370 if (mpc_default_type > 4) {
371 bus.busid = 1;
372 memcpy(bus.bustype, "PCI ", 6);
373 MP_bus_info(&bus);
374 }
375
376 ioapic.type = MP_IOAPIC;
377 ioapic.apicid = 2;
378 ioapic.apicver = mpc_default_type > 4 ? 0x10 : 0x01;
379 ioapic.flags = MPC_APIC_USABLE;
380 ioapic.apicaddr = IO_APIC_DEFAULT_PHYS_BASE;
381 MP_ioapic_info(&ioapic);
382
383 /*
384 * We set up most of the low 16 IO-APIC pins according to MPS rules.
385 */
386 construct_default_ioirq_mptable(mpc_default_type);
387}
388#else
389static inline void __init construct_ioapic_table(int mpc_default_type) { }
390#endif
391
392static inline void __init construct_default_ISA_mptable(int mpc_default_type)
393{
394 struct mpc_cpu processor;
395 struct mpc_lintsrc lintsrc;
396 int linttypes[2] = { mp_ExtINT, mp_NMI };
397 int i;
398
399 /*
400 * local APIC has default address
401 */
402 mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
403
404 /*
405 * 2 CPUs, numbered 0 & 1.
406 */
407 processor.type = MP_PROCESSOR;
408 /* Either an integrated APIC or a discrete 82489DX. */
409 processor.apicver = mpc_default_type > 4 ? 0x10 : 0x01;
410 processor.cpuflag = CPU_ENABLED;
411 processor.cpufeature = (boot_cpu_data.x86 << 8) |
412 (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_mask;
413 processor.featureflag = boot_cpu_data.x86_capability[0];
414 processor.reserved[0] = 0;
415 processor.reserved[1] = 0;
416 for (i = 0; i < 2; i++) {
417 processor.apicid = i;
418 MP_processor_info(&processor);
419 }
420
421 construct_ioapic_table(mpc_default_type);
422
423 lintsrc.type = MP_LINTSRC;
424 lintsrc.irqflag = 0; /* conforming */
425 lintsrc.srcbusid = 0;
426 lintsrc.srcbusirq = 0;
427 lintsrc.destapic = MP_APIC_ALL;
428 for (i = 0; i < 2; i++) {
429 lintsrc.irqtype = linttypes[i];
430 lintsrc.destapiclint = i;
431 MP_lintsrc_info(&lintsrc);
432 }
433}
434
435static struct mpf_intel *mpf_found;
436
437static unsigned long __init get_mpc_size(unsigned long physptr)
438{
439 struct mpc_table *mpc;
440 unsigned long size;
441
442 mpc = early_ioremap(physptr, PAGE_SIZE);
443 size = mpc->length;
444 early_iounmap(mpc, PAGE_SIZE);
445 apic_printk(APIC_VERBOSE, " mpc: %lx-%lx\n", physptr, physptr + size);
446
447 return size;
448}
449
450static int __init check_physptr(struct mpf_intel *mpf, unsigned int early)
451{
452 struct mpc_table *mpc;
453 unsigned long size;
454
455 size = get_mpc_size(mpf->physptr);
456 mpc = early_ioremap(mpf->physptr, size);
457 /*
458 * Read the physical hardware table. Anything here will
459 * override the defaults.
460 */
461 if (!smp_read_mpc(mpc, early)) {
462#ifdef CONFIG_X86_LOCAL_APIC
463 smp_found_config = 0;
464#endif
465 printk(KERN_ERR "BIOS bug, MP table errors detected!...\n"
466 "... disabling SMP support. (tell your hw vendor)\n");
467 early_iounmap(mpc, size);
468 return -1;
469 }
470 early_iounmap(mpc, size);
471
472 if (early)
473 return -1;
474
475#ifdef CONFIG_X86_IO_APIC
476 /*
477 * If there are no explicit MP IRQ entries, then we are
478 * broken. We set up most of the low 16 IO-APIC pins to
479 * ISA defaults and hope it will work.
480 */
481 if (!mp_irq_entries) {
482 struct mpc_bus bus;
483
484 printk(KERN_ERR "BIOS bug, no explicit IRQ entries, "
485 "using default mptable. (tell your hw vendor)\n");
486
487 bus.type = MP_BUS;
488 bus.busid = 0;
489 memcpy(bus.bustype, "ISA ", 6);
490 MP_bus_info(&bus);
491
492 construct_default_ioirq_mptable(0);
493 }
494#endif
495
496 return 0;
497}
498
499/*
500 * Scan the memory blocks for an SMP configuration block.
501 */
502void __init default_get_smp_config(unsigned int early)
503{
504 struct mpf_intel *mpf = mpf_found;
505
506 if (!mpf)
507 return;
508
509 if (acpi_lapic && early)
510 return;
511
512 /*
513 * MPS doesn't support hyperthreading, aka only have
514 * thread 0 apic id in MPS table
515 */
516 if (acpi_lapic && acpi_ioapic)
517 return;
518
519 printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n",
520 mpf->specification);
521#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32)
522 if (mpf->feature2 & (1 << 7)) {
523 printk(KERN_INFO " IMCR and PIC compatibility mode.\n");
524 pic_mode = 1;
525 } else {
526 printk(KERN_INFO " Virtual Wire compatibility mode.\n");
527 pic_mode = 0;
528 }
529#endif
530 /*
531 * Now see if we need to read further.
532 */
533 if (mpf->feature1 != 0) {
534 if (early) {
535 /*
536 * local APIC has default address
537 */
538 mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
539 return;
540 }
541
542 printk(KERN_INFO "Default MP configuration #%d\n",
543 mpf->feature1);
544 construct_default_ISA_mptable(mpf->feature1);
545
546 } else if (mpf->physptr) {
547 if (check_physptr(mpf, early))
548 return;
549 } else
550 BUG();
551
552 if (!early)
553 printk(KERN_INFO "Processors: %d\n", num_processors);
554 /*
555 * Only use the first configuration found.
556 */
557}
558
559static void __init smp_reserve_memory(struct mpf_intel *mpf)
560{
561 memblock_reserve(mpf->physptr, get_mpc_size(mpf->physptr));
562}
563
564static int __init smp_scan_config(unsigned long base, unsigned long length)
565{
566 unsigned int *bp = phys_to_virt(base);
567 struct mpf_intel *mpf;
568 unsigned long mem;
569
570 apic_printk(APIC_VERBOSE, "Scan for SMP in [mem %#010lx-%#010lx]\n",
571 base, base + length - 1);
572 BUILD_BUG_ON(sizeof(*mpf) != 16);
573
574 while (length > 0) {
575 mpf = (struct mpf_intel *)bp;
576 if ((*bp == SMP_MAGIC_IDENT) &&
577 (mpf->length == 1) &&
578 !mpf_checksum((unsigned char *)bp, 16) &&
579 ((mpf->specification == 1)
580 || (mpf->specification == 4))) {
581#ifdef CONFIG_X86_LOCAL_APIC
582 smp_found_config = 1;
583#endif
584 mpf_found = mpf;
585
586 printk(KERN_INFO "found SMP MP-table at [mem %#010llx-%#010llx] mapped at [%p]\n",
587 (unsigned long long) virt_to_phys(mpf),
588 (unsigned long long) virt_to_phys(mpf) +
589 sizeof(*mpf) - 1, mpf);
590
591 mem = virt_to_phys(mpf);
592 memblock_reserve(mem, sizeof(*mpf));
593 if (mpf->physptr)
594 smp_reserve_memory(mpf);
595
596 return 1;
597 }
598 bp += 4;
599 length -= 16;
600 }
601 return 0;
602}
603
604void __init default_find_smp_config(void)
605{
606 unsigned int address;
607
608 /*
609 * FIXME: Linux assumes you have 640K of base ram..
610 * this continues the error...
611 *
612 * 1) Scan the bottom 1K for a signature
613 * 2) Scan the top 1K of base RAM
614 * 3) Scan the 64K of bios
615 */
616 if (smp_scan_config(0x0, 0x400) ||
617 smp_scan_config(639 * 0x400, 0x400) ||
618 smp_scan_config(0xF0000, 0x10000))
619 return;
620 /*
621 * If it is an SMP machine we should know now, unless the
622 * configuration is in an EISA bus machine with an
623 * extended bios data area.
624 *
625 * there is a real-mode segmented pointer pointing to the
626 * 4K EBDA area at 0x40E, calculate and scan it here.
627 *
628 * NOTE! There are Linux loaders that will corrupt the EBDA
629 * area, and as such this kind of SMP config may be less
630 * trustworthy, simply because the SMP table may have been
631 * stomped on during early boot. These loaders are buggy and
632 * should be fixed.
633 *
634 * MP1.4 SPEC states to only scan first 1K of 4K EBDA.
635 */
636
637 address = get_bios_ebda();
638 if (address)
639 smp_scan_config(address, 0x400);
640}
641
642#ifdef CONFIG_X86_IO_APIC
643static u8 __initdata irq_used[MAX_IRQ_SOURCES];
644
645static int __init get_MP_intsrc_index(struct mpc_intsrc *m)
646{
647 int i;
648
649 if (m->irqtype != mp_INT)
650 return 0;
651
652 if (m->irqflag != 0x0f)
653 return 0;
654
655 /* not legacy */
656
657 for (i = 0; i < mp_irq_entries; i++) {
658 if (mp_irqs[i].irqtype != mp_INT)
659 continue;
660
661 if (mp_irqs[i].irqflag != 0x0f)
662 continue;
663
664 if (mp_irqs[i].srcbus != m->srcbus)
665 continue;
666 if (mp_irqs[i].srcbusirq != m->srcbusirq)
667 continue;
668 if (irq_used[i]) {
669 /* already claimed */
670 return -2;
671 }
672 irq_used[i] = 1;
673 return i;
674 }
675
676 /* not found */
677 return -1;
678}
679
680#define SPARE_SLOT_NUM 20
681
682static struct mpc_intsrc __initdata *m_spare[SPARE_SLOT_NUM];
683
684static void __init check_irq_src(struct mpc_intsrc *m, int *nr_m_spare)
685{
686 int i;
687
688 apic_printk(APIC_VERBOSE, "OLD ");
689 print_mp_irq_info(m);
690
691 i = get_MP_intsrc_index(m);
692 if (i > 0) {
693 memcpy(m, &mp_irqs[i], sizeof(*m));
694 apic_printk(APIC_VERBOSE, "NEW ");
695 print_mp_irq_info(&mp_irqs[i]);
696 return;
697 }
698 if (!i) {
699 /* legacy, do nothing */
700 return;
701 }
702 if (*nr_m_spare < SPARE_SLOT_NUM) {
703 /*
704 * not found (-1), or duplicated (-2) are invalid entries,
705 * we need to use the slot later
706 */
707 m_spare[*nr_m_spare] = m;
708 *nr_m_spare += 1;
709 }
710}
711
712static int __init
713check_slot(unsigned long mpc_new_phys, unsigned long mpc_new_length, int count)
714{
715 if (!mpc_new_phys || count <= mpc_new_length) {
716 WARN(1, "update_mptable: No spare slots (length: %x)\n", count);
717 return -1;
718 }
719
720 return 0;
721}
722#else /* CONFIG_X86_IO_APIC */
723static
724inline void __init check_irq_src(struct mpc_intsrc *m, int *nr_m_spare) {}
725#endif /* CONFIG_X86_IO_APIC */
726
727static int __init replace_intsrc_all(struct mpc_table *mpc,
728 unsigned long mpc_new_phys,
729 unsigned long mpc_new_length)
730{
731#ifdef CONFIG_X86_IO_APIC
732 int i;
733#endif
734 int count = sizeof(*mpc);
735 int nr_m_spare = 0;
736 unsigned char *mpt = ((unsigned char *)mpc) + count;
737
738 printk(KERN_INFO "mpc_length %x\n", mpc->length);
739 while (count < mpc->length) {
740 switch (*mpt) {
741 case MP_PROCESSOR:
742 skip_entry(&mpt, &count, sizeof(struct mpc_cpu));
743 break;
744 case MP_BUS:
745 skip_entry(&mpt, &count, sizeof(struct mpc_bus));
746 break;
747 case MP_IOAPIC:
748 skip_entry(&mpt, &count, sizeof(struct mpc_ioapic));
749 break;
750 case MP_INTSRC:
751 check_irq_src((struct mpc_intsrc *)mpt, &nr_m_spare);
752 skip_entry(&mpt, &count, sizeof(struct mpc_intsrc));
753 break;
754 case MP_LINTSRC:
755 skip_entry(&mpt, &count, sizeof(struct mpc_lintsrc));
756 break;
757 default:
758 /* wrong mptable */
759 smp_dump_mptable(mpc, mpt);
760 goto out;
761 }
762 }
763
764#ifdef CONFIG_X86_IO_APIC
765 for (i = 0; i < mp_irq_entries; i++) {
766 if (irq_used[i])
767 continue;
768
769 if (mp_irqs[i].irqtype != mp_INT)
770 continue;
771
772 if (mp_irqs[i].irqflag != 0x0f)
773 continue;
774
775 if (nr_m_spare > 0) {
776 apic_printk(APIC_VERBOSE, "*NEW* found\n");
777 nr_m_spare--;
778 memcpy(m_spare[nr_m_spare], &mp_irqs[i], sizeof(mp_irqs[i]));
779 m_spare[nr_m_spare] = NULL;
780 } else {
781 struct mpc_intsrc *m = (struct mpc_intsrc *)mpt;
782 count += sizeof(struct mpc_intsrc);
783 if (check_slot(mpc_new_phys, mpc_new_length, count) < 0)
784 goto out;
785 memcpy(m, &mp_irqs[i], sizeof(*m));
786 mpc->length = count;
787 mpt += sizeof(struct mpc_intsrc);
788 }
789 print_mp_irq_info(&mp_irqs[i]);
790 }
791#endif
792out:
793 /* update checksum */
794 mpc->checksum = 0;
795 mpc->checksum -= mpf_checksum((unsigned char *)mpc, mpc->length);
796
797 return 0;
798}
799
800int enable_update_mptable;
801
802static int __init update_mptable_setup(char *str)
803{
804 enable_update_mptable = 1;
805#ifdef CONFIG_PCI
806 pci_routeirq = 1;
807#endif
808 return 0;
809}
810early_param("update_mptable", update_mptable_setup);
811
812static unsigned long __initdata mpc_new_phys;
813static unsigned long mpc_new_length __initdata = 4096;
814
815/* alloc_mptable or alloc_mptable=4k */
816static int __initdata alloc_mptable;
817static int __init parse_alloc_mptable_opt(char *p)
818{
819 enable_update_mptable = 1;
820#ifdef CONFIG_PCI
821 pci_routeirq = 1;
822#endif
823 alloc_mptable = 1;
824 if (!p)
825 return 0;
826 mpc_new_length = memparse(p, &p);
827 return 0;
828}
829early_param("alloc_mptable", parse_alloc_mptable_opt);
830
831void __init early_reserve_e820_mpc_new(void)
832{
833 if (enable_update_mptable && alloc_mptable)
834 mpc_new_phys = early_reserve_e820(mpc_new_length, 4);
835}
836
837static int __init update_mp_table(void)
838{
839 char str[16];
840 char oem[10];
841 struct mpf_intel *mpf;
842 struct mpc_table *mpc, *mpc_new;
843
844 if (!enable_update_mptable)
845 return 0;
846
847 mpf = mpf_found;
848 if (!mpf)
849 return 0;
850
851 /*
852 * Now see if we need to go further.
853 */
854 if (mpf->feature1 != 0)
855 return 0;
856
857 if (!mpf->physptr)
858 return 0;
859
860 mpc = phys_to_virt(mpf->physptr);
861
862 if (!smp_check_mpc(mpc, oem, str))
863 return 0;
864
865 printk(KERN_INFO "mpf: %llx\n", (u64)virt_to_phys(mpf));
866 printk(KERN_INFO "physptr: %x\n", mpf->physptr);
867
868 if (mpc_new_phys && mpc->length > mpc_new_length) {
869 mpc_new_phys = 0;
870 printk(KERN_INFO "mpc_new_length is %ld, please use alloc_mptable=8k\n",
871 mpc_new_length);
872 }
873
874 if (!mpc_new_phys) {
875 unsigned char old, new;
876 /* check if we can change the position */
877 mpc->checksum = 0;
878 old = mpf_checksum((unsigned char *)mpc, mpc->length);
879 mpc->checksum = 0xff;
880 new = mpf_checksum((unsigned char *)mpc, mpc->length);
881 if (old == new) {
882 printk(KERN_INFO "mpc is readonly, please try alloc_mptable instead\n");
883 return 0;
884 }
885 printk(KERN_INFO "use in-position replacing\n");
886 } else {
887 mpf->physptr = mpc_new_phys;
888 mpc_new = phys_to_virt(mpc_new_phys);
889 memcpy(mpc_new, mpc, mpc->length);
890 mpc = mpc_new;
891 /* check if we can modify that */
892 if (mpc_new_phys - mpf->physptr) {
893 struct mpf_intel *mpf_new;
894 /* steal 16 bytes from [0, 1k) */
895 printk(KERN_INFO "mpf new: %x\n", 0x400 - 16);
896 mpf_new = phys_to_virt(0x400 - 16);
897 memcpy(mpf_new, mpf, 16);
898 mpf = mpf_new;
899 mpf->physptr = mpc_new_phys;
900 }
901 mpf->checksum = 0;
902 mpf->checksum -= mpf_checksum((unsigned char *)mpf, 16);
903 printk(KERN_INFO "physptr new: %x\n", mpf->physptr);
904 }
905
906 /*
907 * only replace the one with mp_INT and
908 * MP_IRQ_TRIGGER_LEVEL|MP_IRQ_POLARITY_LOW,
909 * already in mp_irqs , stored by ... and mp_config_acpi_gsi,
910 * may need pci=routeirq for all coverage
911 */
912 replace_intsrc_all(mpc, mpc_new_phys, mpc_new_length);
913
914 return 0;
915}
916
917late_initcall(update_mp_table);
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Intel Multiprocessor Specification 1.1 and 1.4
4 * compliant MP-table parsing routines.
5 *
6 * (c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk>
7 * (c) 1998, 1999, 2000, 2009 Ingo Molnar <mingo@redhat.com>
8 * (c) 2008 Alexey Starikovskiy <astarikovskiy@suse.de>
9 */
10
11#include <linux/mm.h>
12#include <linux/init.h>
13#include <linux/delay.h>
14#include <linux/memblock.h>
15#include <linux/kernel_stat.h>
16#include <linux/mc146818rtc.h>
17#include <linux/bitops.h>
18#include <linux/acpi.h>
19#include <linux/smp.h>
20#include <linux/pci.h>
21
22#include <asm/i8259.h>
23#include <asm/io_apic.h>
24#include <asm/acpi.h>
25#include <asm/irqdomain.h>
26#include <asm/mtrr.h>
27#include <asm/mpspec.h>
28#include <asm/proto.h>
29#include <asm/bios_ebda.h>
30#include <asm/e820/api.h>
31#include <asm/setup.h>
32#include <asm/smp.h>
33
34#include <asm/apic.h>
35/*
36 * Checksum an MP configuration block.
37 */
38
39static unsigned int num_procs __initdata;
40
41static int __init mpf_checksum(unsigned char *mp, int len)
42{
43 int sum = 0;
44
45 while (len--)
46 sum += *mp++;
47
48 return sum & 0xFF;
49}
50
51static void __init MP_processor_info(struct mpc_cpu *m)
52{
53 char *bootup_cpu = "";
54
55 topology_register_apic(m->apicid, CPU_ACPIID_INVALID, m->cpuflag & CPU_ENABLED);
56 if (!(m->cpuflag & CPU_ENABLED))
57 return;
58
59 if (m->cpuflag & CPU_BOOTPROCESSOR)
60 bootup_cpu = " (Bootup-CPU)";
61
62 pr_info("Processor #%d%s\n", m->apicid, bootup_cpu);
63 num_procs++;
64}
65
66#ifdef CONFIG_X86_IO_APIC
67static void __init mpc_oem_bus_info(struct mpc_bus *m, char *str)
68{
69 memcpy(str, m->bustype, 6);
70 str[6] = 0;
71 apic_printk(APIC_VERBOSE, "Bus #%d is %s\n", m->busid, str);
72}
73
74static void __init MP_bus_info(struct mpc_bus *m)
75{
76 char str[7];
77
78 mpc_oem_bus_info(m, str);
79
80#if MAX_MP_BUSSES < 256
81 if (m->busid >= MAX_MP_BUSSES) {
82 pr_warn("MP table busid value (%d) for bustype %s is too large, max. supported is %d\n",
83 m->busid, str, MAX_MP_BUSSES - 1);
84 return;
85 }
86#endif
87
88 set_bit(m->busid, mp_bus_not_pci);
89 if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA) - 1) == 0) {
90#ifdef CONFIG_EISA
91 mp_bus_id_to_type[m->busid] = MP_BUS_ISA;
92#endif
93 } else if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI) - 1) == 0) {
94 clear_bit(m->busid, mp_bus_not_pci);
95#ifdef CONFIG_EISA
96 mp_bus_id_to_type[m->busid] = MP_BUS_PCI;
97 } else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA) - 1) == 0) {
98 mp_bus_id_to_type[m->busid] = MP_BUS_EISA;
99#endif
100 } else
101 pr_warn("Unknown bustype %s - ignoring\n", str);
102}
103
104static void __init MP_ioapic_info(struct mpc_ioapic *m)
105{
106 struct ioapic_domain_cfg cfg = {
107 .type = IOAPIC_DOMAIN_LEGACY,
108 .ops = &mp_ioapic_irqdomain_ops,
109 };
110
111 if (m->flags & MPC_APIC_USABLE)
112 mp_register_ioapic(m->apicid, m->apicaddr, gsi_top, &cfg);
113}
114
115static void __init print_mp_irq_info(struct mpc_intsrc *mp_irq)
116{
117 apic_printk(APIC_VERBOSE,
118 "Int: type %d, pol %d, trig %d, bus %02x, IRQ %02x, APIC ID %x, APIC INT %02x\n",
119 mp_irq->irqtype, mp_irq->irqflag & 3,
120 (mp_irq->irqflag >> 2) & 3, mp_irq->srcbus,
121 mp_irq->srcbusirq, mp_irq->dstapic, mp_irq->dstirq);
122}
123
124#else /* CONFIG_X86_IO_APIC */
125static inline void __init MP_bus_info(struct mpc_bus *m) {}
126static inline void __init MP_ioapic_info(struct mpc_ioapic *m) {}
127#endif /* CONFIG_X86_IO_APIC */
128
129static void __init MP_lintsrc_info(struct mpc_lintsrc *m)
130{
131 apic_printk(APIC_VERBOSE,
132 "Lint: type %d, pol %d, trig %d, bus %02x, IRQ %02x, APIC ID %x, APIC LINT %02x\n",
133 m->irqtype, m->irqflag & 3, (m->irqflag >> 2) & 3, m->srcbusid,
134 m->srcbusirq, m->destapic, m->destapiclint);
135}
136
137/*
138 * Read/parse the MPC
139 */
140static int __init smp_check_mpc(struct mpc_table *mpc, char *oem, char *str)
141{
142
143 if (memcmp(mpc->signature, MPC_SIGNATURE, 4)) {
144 pr_err("MPTABLE: bad signature [%c%c%c%c]!\n",
145 mpc->signature[0], mpc->signature[1],
146 mpc->signature[2], mpc->signature[3]);
147 return 0;
148 }
149 if (mpf_checksum((unsigned char *)mpc, mpc->length)) {
150 pr_err("MPTABLE: checksum error!\n");
151 return 0;
152 }
153 if (mpc->spec != 0x01 && mpc->spec != 0x04) {
154 pr_err("MPTABLE: bad table version (%d)!!\n", mpc->spec);
155 return 0;
156 }
157 if (!mpc->lapic) {
158 pr_err("MPTABLE: null local APIC address!\n");
159 return 0;
160 }
161 memcpy(oem, mpc->oem, 8);
162 oem[8] = 0;
163 pr_info("MPTABLE: OEM ID: %s\n", oem);
164
165 memcpy(str, mpc->productid, 12);
166 str[12] = 0;
167
168 pr_info("MPTABLE: Product ID: %s\n", str);
169
170 pr_info("MPTABLE: APIC at: 0x%X\n", mpc->lapic);
171
172 return 1;
173}
174
175static void skip_entry(unsigned char **ptr, int *count, int size)
176{
177 *ptr += size;
178 *count += size;
179}
180
181static void __init smp_dump_mptable(struct mpc_table *mpc, unsigned char *mpt)
182{
183 pr_err("Your mptable is wrong, contact your HW vendor!\n");
184 pr_cont("type %x\n", *mpt);
185 print_hex_dump(KERN_ERR, " ", DUMP_PREFIX_ADDRESS, 16,
186 1, mpc, mpc->length, 1);
187}
188
189static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early)
190{
191 char str[16];
192 char oem[10];
193
194 int count = sizeof(*mpc);
195 unsigned char *mpt = ((unsigned char *)mpc) + count;
196
197 if (!smp_check_mpc(mpc, oem, str))
198 return 0;
199
200 if (early) {
201 /* Initialize the lapic mapping */
202 if (!acpi_lapic)
203 register_lapic_address(mpc->lapic);
204 return 1;
205 }
206
207 /* Now process the configuration blocks. */
208 while (count < mpc->length) {
209 switch (*mpt) {
210 case MP_PROCESSOR:
211 /* ACPI may have already provided this data */
212 if (!acpi_lapic)
213 MP_processor_info((struct mpc_cpu *)mpt);
214 skip_entry(&mpt, &count, sizeof(struct mpc_cpu));
215 break;
216 case MP_BUS:
217 MP_bus_info((struct mpc_bus *)mpt);
218 skip_entry(&mpt, &count, sizeof(struct mpc_bus));
219 break;
220 case MP_IOAPIC:
221 MP_ioapic_info((struct mpc_ioapic *)mpt);
222 skip_entry(&mpt, &count, sizeof(struct mpc_ioapic));
223 break;
224 case MP_INTSRC:
225 mp_save_irq((struct mpc_intsrc *)mpt);
226 skip_entry(&mpt, &count, sizeof(struct mpc_intsrc));
227 break;
228 case MP_LINTSRC:
229 MP_lintsrc_info((struct mpc_lintsrc *)mpt);
230 skip_entry(&mpt, &count, sizeof(struct mpc_lintsrc));
231 break;
232 default:
233 /* wrong mptable */
234 smp_dump_mptable(mpc, mpt);
235 count = mpc->length;
236 break;
237 }
238 }
239
240 if (!num_procs && !acpi_lapic)
241 pr_err("MPTABLE: no processors registered!\n");
242 return num_procs || acpi_lapic;
243}
244
245#ifdef CONFIG_X86_IO_APIC
246
247static int __init ELCR_trigger(unsigned int irq)
248{
249 unsigned int port;
250
251 port = PIC_ELCR1 + (irq >> 3);
252 return (inb(port) >> (irq & 7)) & 1;
253}
254
255static void __init construct_default_ioirq_mptable(int mpc_default_type)
256{
257 struct mpc_intsrc intsrc;
258 int i;
259 int ELCR_fallback = 0;
260
261 intsrc.type = MP_INTSRC;
262 intsrc.irqflag = MP_IRQTRIG_DEFAULT | MP_IRQPOL_DEFAULT;
263 intsrc.srcbus = 0;
264 intsrc.dstapic = mpc_ioapic_id(0);
265
266 intsrc.irqtype = mp_INT;
267
268 /*
269 * If true, we have an ISA/PCI system with no IRQ entries
270 * in the MP table. To prevent the PCI interrupts from being set up
271 * incorrectly, we try to use the ELCR. The sanity check to see if
272 * there is good ELCR data is very simple - IRQ0, 1, 2 and 13 can
273 * never be level sensitive, so we simply see if the ELCR agrees.
274 * If it does, we assume it's valid.
275 */
276 if (mpc_default_type == 5) {
277 pr_info("ISA/PCI bus type with no IRQ information... falling back to ELCR\n");
278
279 if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) ||
280 ELCR_trigger(13))
281 pr_err("ELCR contains invalid data... not using ELCR\n");
282 else {
283 pr_info("Using ELCR to identify PCI interrupts\n");
284 ELCR_fallback = 1;
285 }
286 }
287
288 for (i = 0; i < 16; i++) {
289 switch (mpc_default_type) {
290 case 2:
291 if (i == 0 || i == 13)
292 continue; /* IRQ0 & IRQ13 not connected */
293 fallthrough;
294 default:
295 if (i == 2)
296 continue; /* IRQ2 is never connected */
297 }
298
299 if (ELCR_fallback) {
300 /*
301 * If the ELCR indicates a level-sensitive interrupt, we
302 * copy that information over to the MP table in the
303 * irqflag field (level sensitive, active high polarity).
304 */
305 if (ELCR_trigger(i)) {
306 intsrc.irqflag = MP_IRQTRIG_LEVEL |
307 MP_IRQPOL_ACTIVE_HIGH;
308 } else {
309 intsrc.irqflag = MP_IRQTRIG_DEFAULT |
310 MP_IRQPOL_DEFAULT;
311 }
312 }
313
314 intsrc.srcbusirq = i;
315 intsrc.dstirq = i ? i : 2; /* IRQ0 to INTIN2 */
316 mp_save_irq(&intsrc);
317 }
318
319 intsrc.irqtype = mp_ExtINT;
320 intsrc.srcbusirq = 0;
321 intsrc.dstirq = 0; /* 8259A to INTIN0 */
322 mp_save_irq(&intsrc);
323}
324
325
326static void __init construct_ioapic_table(int mpc_default_type)
327{
328 struct mpc_ioapic ioapic;
329 struct mpc_bus bus;
330
331 bus.type = MP_BUS;
332 bus.busid = 0;
333 switch (mpc_default_type) {
334 default:
335 pr_err("???\nUnknown standard configuration %d\n",
336 mpc_default_type);
337 fallthrough;
338 case 1:
339 case 5:
340 memcpy(bus.bustype, "ISA ", 6);
341 break;
342 case 2:
343 case 6:
344 case 3:
345 memcpy(bus.bustype, "EISA ", 6);
346 break;
347 }
348 MP_bus_info(&bus);
349 if (mpc_default_type > 4) {
350 bus.busid = 1;
351 memcpy(bus.bustype, "PCI ", 6);
352 MP_bus_info(&bus);
353 }
354
355 ioapic.type = MP_IOAPIC;
356 ioapic.apicid = 2;
357 ioapic.apicver = mpc_default_type > 4 ? 0x10 : 0x01;
358 ioapic.flags = MPC_APIC_USABLE;
359 ioapic.apicaddr = IO_APIC_DEFAULT_PHYS_BASE;
360 MP_ioapic_info(&ioapic);
361
362 /*
363 * We set up most of the low 16 IO-APIC pins according to MPS rules.
364 */
365 construct_default_ioirq_mptable(mpc_default_type);
366}
367#else
368static inline void __init construct_ioapic_table(int mpc_default_type) { }
369#endif
370
371static inline void __init construct_default_ISA_mptable(int mpc_default_type)
372{
373 struct mpc_cpu processor;
374 struct mpc_lintsrc lintsrc;
375 int linttypes[2] = { mp_ExtINT, mp_NMI };
376 int i;
377
378 /*
379 * 2 CPUs, numbered 0 & 1.
380 */
381 processor.type = MP_PROCESSOR;
382 /* Either an integrated APIC or a discrete 82489DX. */
383 processor.apicver = mpc_default_type > 4 ? 0x10 : 0x01;
384 processor.cpuflag = CPU_ENABLED;
385 processor.cpufeature = (boot_cpu_data.x86 << 8) |
386 (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_stepping;
387 processor.featureflag = boot_cpu_data.x86_capability[CPUID_1_EDX];
388 processor.reserved[0] = 0;
389 processor.reserved[1] = 0;
390 for (i = 0; i < 2; i++) {
391 processor.apicid = i;
392 MP_processor_info(&processor);
393 }
394
395 construct_ioapic_table(mpc_default_type);
396
397 lintsrc.type = MP_LINTSRC;
398 lintsrc.irqflag = MP_IRQTRIG_DEFAULT | MP_IRQPOL_DEFAULT;
399 lintsrc.srcbusid = 0;
400 lintsrc.srcbusirq = 0;
401 lintsrc.destapic = MP_APIC_ALL;
402 for (i = 0; i < 2; i++) {
403 lintsrc.irqtype = linttypes[i];
404 lintsrc.destapiclint = i;
405 MP_lintsrc_info(&lintsrc);
406 }
407}
408
409static unsigned long mpf_base;
410static bool mpf_found;
411
412static unsigned long __init get_mpc_size(unsigned long physptr)
413{
414 struct mpc_table *mpc;
415 unsigned long size;
416
417 mpc = early_memremap(physptr, PAGE_SIZE);
418 size = mpc->length;
419 early_memunmap(mpc, PAGE_SIZE);
420 apic_printk(APIC_VERBOSE, " mpc: %lx-%lx\n", physptr, physptr + size);
421
422 return size;
423}
424
425static int __init check_physptr(struct mpf_intel *mpf, unsigned int early)
426{
427 struct mpc_table *mpc;
428 unsigned long size;
429
430 size = get_mpc_size(mpf->physptr);
431 mpc = early_memremap(mpf->physptr, size);
432
433 /*
434 * Read the physical hardware table. Anything here will
435 * override the defaults.
436 */
437 if (!smp_read_mpc(mpc, early)) {
438#ifdef CONFIG_X86_LOCAL_APIC
439 smp_found_config = 0;
440#endif
441 pr_err("BIOS bug, MP table errors detected!...\n");
442 pr_cont("... disabling SMP support. (tell your hw vendor)\n");
443 early_memunmap(mpc, size);
444 return -1;
445 }
446 early_memunmap(mpc, size);
447
448 if (early)
449 return -1;
450
451#ifdef CONFIG_X86_IO_APIC
452 /*
453 * If there are no explicit MP IRQ entries, then we are
454 * broken. We set up most of the low 16 IO-APIC pins to
455 * ISA defaults and hope it will work.
456 */
457 if (!mp_irq_entries) {
458 struct mpc_bus bus;
459
460 pr_err("BIOS bug, no explicit IRQ entries, using default mptable. (tell your hw vendor)\n");
461
462 bus.type = MP_BUS;
463 bus.busid = 0;
464 memcpy(bus.bustype, "ISA ", 6);
465 MP_bus_info(&bus);
466
467 construct_default_ioirq_mptable(0);
468 }
469#endif
470
471 return 0;
472}
473
474/*
475 * Scan the memory blocks for an SMP configuration block.
476 */
477static __init void mpparse_get_smp_config(unsigned int early)
478{
479 struct mpf_intel *mpf;
480
481 if (!smp_found_config)
482 return;
483
484 if (!mpf_found)
485 return;
486
487 if (acpi_lapic && early)
488 return;
489
490 /*
491 * MPS doesn't support hyperthreading, aka only have
492 * thread 0 apic id in MPS table
493 */
494 if (acpi_lapic && acpi_ioapic)
495 return;
496
497 mpf = early_memremap(mpf_base, sizeof(*mpf));
498 if (!mpf) {
499 pr_err("MPTABLE: error mapping MP table\n");
500 return;
501 }
502
503 pr_info("Intel MultiProcessor Specification v1.%d\n",
504 mpf->specification);
505#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32)
506 if (mpf->feature2 & (1 << 7)) {
507 pr_info(" IMCR and PIC compatibility mode.\n");
508 pic_mode = 1;
509 } else {
510 pr_info(" Virtual Wire compatibility mode.\n");
511 pic_mode = 0;
512 }
513#endif
514 /*
515 * Now see if we need to read further.
516 */
517 if (mpf->feature1) {
518 if (early) {
519 /* Local APIC has default address */
520 register_lapic_address(APIC_DEFAULT_PHYS_BASE);
521 goto out;
522 }
523
524 pr_info("Default MP configuration #%d\n", mpf->feature1);
525 construct_default_ISA_mptable(mpf->feature1);
526
527 } else if (mpf->physptr) {
528 if (check_physptr(mpf, early))
529 goto out;
530 } else
531 BUG();
532
533 if (!early && !acpi_lapic)
534 pr_info("Processors: %d\n", num_procs);
535 /*
536 * Only use the first configuration found.
537 */
538out:
539 early_memunmap(mpf, sizeof(*mpf));
540}
541
542void __init mpparse_parse_early_smp_config(void)
543{
544 mpparse_get_smp_config(true);
545}
546
547void __init mpparse_parse_smp_config(void)
548{
549 mpparse_get_smp_config(false);
550}
551
552static void __init smp_reserve_memory(struct mpf_intel *mpf)
553{
554 memblock_reserve(mpf->physptr, get_mpc_size(mpf->physptr));
555}
556
557static int __init smp_scan_config(unsigned long base, unsigned long length)
558{
559 unsigned int *bp;
560 struct mpf_intel *mpf;
561 int ret = 0;
562
563 apic_printk(APIC_VERBOSE, "Scan for SMP in [mem %#010lx-%#010lx]\n",
564 base, base + length - 1);
565 BUILD_BUG_ON(sizeof(*mpf) != 16);
566
567 while (length > 0) {
568 bp = early_memremap(base, length);
569 mpf = (struct mpf_intel *)bp;
570 if ((*bp == SMP_MAGIC_IDENT) &&
571 (mpf->length == 1) &&
572 !mpf_checksum((unsigned char *)bp, 16) &&
573 ((mpf->specification == 1)
574 || (mpf->specification == 4))) {
575#ifdef CONFIG_X86_LOCAL_APIC
576 smp_found_config = 1;
577#endif
578 mpf_base = base;
579 mpf_found = true;
580
581 pr_info("found SMP MP-table at [mem %#010lx-%#010lx]\n",
582 base, base + sizeof(*mpf) - 1);
583
584 memblock_reserve(base, sizeof(*mpf));
585 if (mpf->physptr)
586 smp_reserve_memory(mpf);
587
588 ret = 1;
589 }
590 early_memunmap(bp, length);
591
592 if (ret)
593 break;
594
595 base += 16;
596 length -= 16;
597 }
598 return ret;
599}
600
601void __init mpparse_find_mptable(void)
602{
603 unsigned int address;
604
605 /*
606 * FIXME: Linux assumes you have 640K of base ram..
607 * this continues the error...
608 *
609 * 1) Scan the bottom 1K for a signature
610 * 2) Scan the top 1K of base RAM
611 * 3) Scan the 64K of bios
612 */
613 if (smp_scan_config(0x0, 0x400) ||
614 smp_scan_config(639 * 0x400, 0x400) ||
615 smp_scan_config(0xF0000, 0x10000))
616 return;
617 /*
618 * If it is an SMP machine we should know now, unless the
619 * configuration is in an EISA bus machine with an
620 * extended bios data area.
621 *
622 * there is a real-mode segmented pointer pointing to the
623 * 4K EBDA area at 0x40E, calculate and scan it here.
624 *
625 * NOTE! There are Linux loaders that will corrupt the EBDA
626 * area, and as such this kind of SMP config may be less
627 * trustworthy, simply because the SMP table may have been
628 * stomped on during early boot. These loaders are buggy and
629 * should be fixed.
630 *
631 * MP1.4 SPEC states to only scan first 1K of 4K EBDA.
632 */
633
634 address = get_bios_ebda();
635 if (address)
636 smp_scan_config(address, 0x400);
637}
638
639#ifdef CONFIG_X86_IO_APIC
640static u8 __initdata irq_used[MAX_IRQ_SOURCES];
641
642static int __init get_MP_intsrc_index(struct mpc_intsrc *m)
643{
644 int i;
645
646 if (m->irqtype != mp_INT)
647 return 0;
648
649 if (m->irqflag != (MP_IRQTRIG_LEVEL | MP_IRQPOL_ACTIVE_LOW))
650 return 0;
651
652 /* not legacy */
653
654 for (i = 0; i < mp_irq_entries; i++) {
655 if (mp_irqs[i].irqtype != mp_INT)
656 continue;
657
658 if (mp_irqs[i].irqflag != (MP_IRQTRIG_LEVEL |
659 MP_IRQPOL_ACTIVE_LOW))
660 continue;
661
662 if (mp_irqs[i].srcbus != m->srcbus)
663 continue;
664 if (mp_irqs[i].srcbusirq != m->srcbusirq)
665 continue;
666 if (irq_used[i]) {
667 /* already claimed */
668 return -2;
669 }
670 irq_used[i] = 1;
671 return i;
672 }
673
674 /* not found */
675 return -1;
676}
677
678#define SPARE_SLOT_NUM 20
679
680static struct mpc_intsrc __initdata *m_spare[SPARE_SLOT_NUM];
681
682static void __init check_irq_src(struct mpc_intsrc *m, int *nr_m_spare)
683{
684 int i;
685
686 apic_printk(APIC_VERBOSE, "OLD ");
687 print_mp_irq_info(m);
688
689 i = get_MP_intsrc_index(m);
690 if (i > 0) {
691 memcpy(m, &mp_irqs[i], sizeof(*m));
692 apic_printk(APIC_VERBOSE, "NEW ");
693 print_mp_irq_info(&mp_irqs[i]);
694 return;
695 }
696 if (!i) {
697 /* legacy, do nothing */
698 return;
699 }
700 if (*nr_m_spare < SPARE_SLOT_NUM) {
701 /*
702 * not found (-1), or duplicated (-2) are invalid entries,
703 * we need to use the slot later
704 */
705 m_spare[*nr_m_spare] = m;
706 *nr_m_spare += 1;
707 }
708}
709
710static int __init
711check_slot(unsigned long mpc_new_phys, unsigned long mpc_new_length, int count)
712{
713 if (!mpc_new_phys || count <= mpc_new_length) {
714 WARN(1, "update_mptable: No spare slots (length: %x)\n", count);
715 return -1;
716 }
717
718 return 0;
719}
720#else /* CONFIG_X86_IO_APIC */
721static
722inline void __init check_irq_src(struct mpc_intsrc *m, int *nr_m_spare) {}
723#endif /* CONFIG_X86_IO_APIC */
724
725static int __init replace_intsrc_all(struct mpc_table *mpc,
726 unsigned long mpc_new_phys,
727 unsigned long mpc_new_length)
728{
729#ifdef CONFIG_X86_IO_APIC
730 int i;
731#endif
732 int count = sizeof(*mpc);
733 int nr_m_spare = 0;
734 unsigned char *mpt = ((unsigned char *)mpc) + count;
735
736 pr_info("mpc_length %x\n", mpc->length);
737 while (count < mpc->length) {
738 switch (*mpt) {
739 case MP_PROCESSOR:
740 skip_entry(&mpt, &count, sizeof(struct mpc_cpu));
741 break;
742 case MP_BUS:
743 skip_entry(&mpt, &count, sizeof(struct mpc_bus));
744 break;
745 case MP_IOAPIC:
746 skip_entry(&mpt, &count, sizeof(struct mpc_ioapic));
747 break;
748 case MP_INTSRC:
749 check_irq_src((struct mpc_intsrc *)mpt, &nr_m_spare);
750 skip_entry(&mpt, &count, sizeof(struct mpc_intsrc));
751 break;
752 case MP_LINTSRC:
753 skip_entry(&mpt, &count, sizeof(struct mpc_lintsrc));
754 break;
755 default:
756 /* wrong mptable */
757 smp_dump_mptable(mpc, mpt);
758 goto out;
759 }
760 }
761
762#ifdef CONFIG_X86_IO_APIC
763 for (i = 0; i < mp_irq_entries; i++) {
764 if (irq_used[i])
765 continue;
766
767 if (mp_irqs[i].irqtype != mp_INT)
768 continue;
769
770 if (mp_irqs[i].irqflag != (MP_IRQTRIG_LEVEL |
771 MP_IRQPOL_ACTIVE_LOW))
772 continue;
773
774 if (nr_m_spare > 0) {
775 apic_printk(APIC_VERBOSE, "*NEW* found\n");
776 nr_m_spare--;
777 memcpy(m_spare[nr_m_spare], &mp_irqs[i], sizeof(mp_irqs[i]));
778 m_spare[nr_m_spare] = NULL;
779 } else {
780 struct mpc_intsrc *m = (struct mpc_intsrc *)mpt;
781 count += sizeof(struct mpc_intsrc);
782 if (check_slot(mpc_new_phys, mpc_new_length, count) < 0)
783 goto out;
784 memcpy(m, &mp_irqs[i], sizeof(*m));
785 mpc->length = count;
786 mpt += sizeof(struct mpc_intsrc);
787 }
788 print_mp_irq_info(&mp_irqs[i]);
789 }
790#endif
791out:
792 /* update checksum */
793 mpc->checksum = 0;
794 mpc->checksum -= mpf_checksum((unsigned char *)mpc, mpc->length);
795
796 return 0;
797}
798
799int enable_update_mptable;
800
801static int __init update_mptable_setup(char *str)
802{
803 enable_update_mptable = 1;
804#ifdef CONFIG_PCI
805 pci_routeirq = 1;
806#endif
807 return 0;
808}
809early_param("update_mptable", update_mptable_setup);
810
811static unsigned long __initdata mpc_new_phys;
812static unsigned long mpc_new_length __initdata = 4096;
813
814/* alloc_mptable or alloc_mptable=4k */
815static int __initdata alloc_mptable;
816static int __init parse_alloc_mptable_opt(char *p)
817{
818 enable_update_mptable = 1;
819#ifdef CONFIG_PCI
820 pci_routeirq = 1;
821#endif
822 alloc_mptable = 1;
823 if (!p)
824 return 0;
825 mpc_new_length = memparse(p, &p);
826 return 0;
827}
828early_param("alloc_mptable", parse_alloc_mptable_opt);
829
830void __init e820__memblock_alloc_reserved_mpc_new(void)
831{
832 if (enable_update_mptable && alloc_mptable)
833 mpc_new_phys = e820__memblock_alloc_reserved(mpc_new_length, 4);
834}
835
836static int __init update_mp_table(void)
837{
838 char str[16];
839 char oem[10];
840 struct mpf_intel *mpf;
841 struct mpc_table *mpc, *mpc_new;
842 unsigned long size;
843
844 if (!enable_update_mptable)
845 return 0;
846
847 if (!mpf_found)
848 return 0;
849
850 mpf = early_memremap(mpf_base, sizeof(*mpf));
851 if (!mpf) {
852 pr_err("MPTABLE: mpf early_memremap() failed\n");
853 return 0;
854 }
855
856 /*
857 * Now see if we need to go further.
858 */
859 if (mpf->feature1)
860 goto do_unmap_mpf;
861
862 if (!mpf->physptr)
863 goto do_unmap_mpf;
864
865 size = get_mpc_size(mpf->physptr);
866 mpc = early_memremap(mpf->physptr, size);
867 if (!mpc) {
868 pr_err("MPTABLE: mpc early_memremap() failed\n");
869 goto do_unmap_mpf;
870 }
871
872 if (!smp_check_mpc(mpc, oem, str))
873 goto do_unmap_mpc;
874
875 pr_info("mpf: %llx\n", (u64)mpf_base);
876 pr_info("physptr: %x\n", mpf->physptr);
877
878 if (mpc_new_phys && mpc->length > mpc_new_length) {
879 mpc_new_phys = 0;
880 pr_info("mpc_new_length is %ld, please use alloc_mptable=8k\n",
881 mpc_new_length);
882 }
883
884 if (!mpc_new_phys) {
885 unsigned char old, new;
886 /* check if we can change the position */
887 mpc->checksum = 0;
888 old = mpf_checksum((unsigned char *)mpc, mpc->length);
889 mpc->checksum = 0xff;
890 new = mpf_checksum((unsigned char *)mpc, mpc->length);
891 if (old == new) {
892 pr_info("mpc is readonly, please try alloc_mptable instead\n");
893 goto do_unmap_mpc;
894 }
895 pr_info("use in-position replacing\n");
896 } else {
897 mpc_new = early_memremap(mpc_new_phys, mpc_new_length);
898 if (!mpc_new) {
899 pr_err("MPTABLE: new mpc early_memremap() failed\n");
900 goto do_unmap_mpc;
901 }
902 mpf->physptr = mpc_new_phys;
903 memcpy(mpc_new, mpc, mpc->length);
904 early_memunmap(mpc, size);
905 mpc = mpc_new;
906 size = mpc_new_length;
907 /* check if we can modify that */
908 if (mpc_new_phys - mpf->physptr) {
909 struct mpf_intel *mpf_new;
910 /* steal 16 bytes from [0, 1k) */
911 mpf_new = early_memremap(0x400 - 16, sizeof(*mpf_new));
912 if (!mpf_new) {
913 pr_err("MPTABLE: new mpf early_memremap() failed\n");
914 goto do_unmap_mpc;
915 }
916 pr_info("mpf new: %x\n", 0x400 - 16);
917 memcpy(mpf_new, mpf, 16);
918 early_memunmap(mpf, sizeof(*mpf));
919 mpf = mpf_new;
920 mpf->physptr = mpc_new_phys;
921 }
922 mpf->checksum = 0;
923 mpf->checksum -= mpf_checksum((unsigned char *)mpf, 16);
924 pr_info("physptr new: %x\n", mpf->physptr);
925 }
926
927 /*
928 * only replace the one with mp_INT and
929 * MP_IRQ_TRIGGER_LEVEL|MP_IRQ_POLARITY_LOW,
930 * already in mp_irqs , stored by ... and mp_config_acpi_gsi,
931 * may need pci=routeirq for all coverage
932 */
933 replace_intsrc_all(mpc, mpc_new_phys, mpc_new_length);
934
935do_unmap_mpc:
936 early_memunmap(mpc, size);
937
938do_unmap_mpf:
939 early_memunmap(mpf, sizeof(*mpf));
940
941 return 0;
942}
943
944late_initcall(update_mp_table);