Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Intel Multiprocessor Specification 1.1 and 1.4
4 * compliant MP-table parsing routines.
5 *
6 * (c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk>
7 * (c) 1998, 1999, 2000, 2009 Ingo Molnar <mingo@redhat.com>
8 * (c) 2008 Alexey Starikovskiy <astarikovskiy@suse.de>
9 */
10
11#include <linux/mm.h>
12#include <linux/init.h>
13#include <linux/delay.h>
14#include <linux/memblock.h>
15#include <linux/kernel_stat.h>
16#include <linux/mc146818rtc.h>
17#include <linux/bitops.h>
18#include <linux/acpi.h>
19#include <linux/smp.h>
20#include <linux/pci.h>
21
22#include <asm/i8259.h>
23#include <asm/io_apic.h>
24#include <asm/acpi.h>
25#include <asm/irqdomain.h>
26#include <asm/mtrr.h>
27#include <asm/mpspec.h>
28#include <asm/proto.h>
29#include <asm/bios_ebda.h>
30#include <asm/e820/api.h>
31#include <asm/setup.h>
32#include <asm/smp.h>
33
34#include <asm/apic.h>
35/*
36 * Checksum an MP configuration block.
37 */
38
39static unsigned int num_procs __initdata;
40
41static int __init mpf_checksum(unsigned char *mp, int len)
42{
43 int sum = 0;
44
45 while (len--)
46 sum += *mp++;
47
48 return sum & 0xFF;
49}
50
51static void __init MP_processor_info(struct mpc_cpu *m)
52{
53 char *bootup_cpu = "";
54
55 topology_register_apic(m->apicid, CPU_ACPIID_INVALID, m->cpuflag & CPU_ENABLED);
56 if (!(m->cpuflag & CPU_ENABLED))
57 return;
58
59 if (m->cpuflag & CPU_BOOTPROCESSOR)
60 bootup_cpu = " (Bootup-CPU)";
61
62 pr_info("Processor #%d%s\n", m->apicid, bootup_cpu);
63 num_procs++;
64}
65
66#ifdef CONFIG_X86_IO_APIC
67static void __init mpc_oem_bus_info(struct mpc_bus *m, char *str)
68{
69 memcpy(str, m->bustype, 6);
70 str[6] = 0;
71 apic_pr_verbose("Bus #%d is %s\n", m->busid, str);
72}
73
74static void __init MP_bus_info(struct mpc_bus *m)
75{
76 char str[7];
77
78 mpc_oem_bus_info(m, str);
79
80#if MAX_MP_BUSSES < 256
81 if (m->busid >= MAX_MP_BUSSES) {
82 pr_warn("MP table busid value (%d) for bustype %s is too large, max. supported is %d\n",
83 m->busid, str, MAX_MP_BUSSES - 1);
84 return;
85 }
86#endif
87
88 set_bit(m->busid, mp_bus_not_pci);
89 if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA) - 1) == 0) {
90#ifdef CONFIG_EISA
91 mp_bus_id_to_type[m->busid] = MP_BUS_ISA;
92#endif
93 } else if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI) - 1) == 0) {
94 clear_bit(m->busid, mp_bus_not_pci);
95#ifdef CONFIG_EISA
96 mp_bus_id_to_type[m->busid] = MP_BUS_PCI;
97 } else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA) - 1) == 0) {
98 mp_bus_id_to_type[m->busid] = MP_BUS_EISA;
99#endif
100 } else
101 pr_warn("Unknown bustype %s - ignoring\n", str);
102}
103
104static void __init MP_ioapic_info(struct mpc_ioapic *m)
105{
106 struct ioapic_domain_cfg cfg = {
107 .type = IOAPIC_DOMAIN_LEGACY,
108 .ops = &mp_ioapic_irqdomain_ops,
109 };
110
111 if (m->flags & MPC_APIC_USABLE)
112 mp_register_ioapic(m->apicid, m->apicaddr, gsi_top, &cfg);
113}
114
115static void __init print_mp_irq_info(struct mpc_intsrc *mp_irq)
116{
117 apic_printk(APIC_VERBOSE,
118 "Int: type %d, pol %d, trig %d, bus %02x, IRQ %02x, APIC ID %x, APIC INT %02x\n",
119 mp_irq->irqtype, mp_irq->irqflag & 3,
120 (mp_irq->irqflag >> 2) & 3, mp_irq->srcbus,
121 mp_irq->srcbusirq, mp_irq->dstapic, mp_irq->dstirq);
122}
123
124#else /* CONFIG_X86_IO_APIC */
125static inline void __init MP_bus_info(struct mpc_bus *m) {}
126static inline void __init MP_ioapic_info(struct mpc_ioapic *m) {}
127#endif /* CONFIG_X86_IO_APIC */
128
129static void __init MP_lintsrc_info(struct mpc_lintsrc *m)
130{
131 apic_printk(APIC_VERBOSE,
132 "Lint: type %d, pol %d, trig %d, bus %02x, IRQ %02x, APIC ID %x, APIC LINT %02x\n",
133 m->irqtype, m->irqflag & 3, (m->irqflag >> 2) & 3, m->srcbusid,
134 m->srcbusirq, m->destapic, m->destapiclint);
135}
136
137/*
138 * Read/parse the MPC
139 */
140static int __init smp_check_mpc(struct mpc_table *mpc, char *oem, char *str)
141{
142
143 if (memcmp(mpc->signature, MPC_SIGNATURE, 4)) {
144 pr_err("MPTABLE: bad signature [%c%c%c%c]!\n",
145 mpc->signature[0], mpc->signature[1],
146 mpc->signature[2], mpc->signature[3]);
147 return 0;
148 }
149 if (mpf_checksum((unsigned char *)mpc, mpc->length)) {
150 pr_err("MPTABLE: checksum error!\n");
151 return 0;
152 }
153 if (mpc->spec != 0x01 && mpc->spec != 0x04) {
154 pr_err("MPTABLE: bad table version (%d)!!\n", mpc->spec);
155 return 0;
156 }
157 if (!mpc->lapic) {
158 pr_err("MPTABLE: null local APIC address!\n");
159 return 0;
160 }
161 memcpy(oem, mpc->oem, 8);
162 oem[8] = 0;
163 pr_info("MPTABLE: OEM ID: %s\n", oem);
164
165 memcpy(str, mpc->productid, 12);
166 str[12] = 0;
167
168 pr_info("MPTABLE: Product ID: %s\n", str);
169
170 pr_info("MPTABLE: APIC at: 0x%X\n", mpc->lapic);
171
172 return 1;
173}
174
175static void skip_entry(unsigned char **ptr, int *count, int size)
176{
177 *ptr += size;
178 *count += size;
179}
180
181static void __init smp_dump_mptable(struct mpc_table *mpc, unsigned char *mpt)
182{
183 pr_err("Your mptable is wrong, contact your HW vendor!\n");
184 pr_cont("type %x\n", *mpt);
185 print_hex_dump(KERN_ERR, " ", DUMP_PREFIX_ADDRESS, 16,
186 1, mpc, mpc->length, 1);
187}
188
189static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early)
190{
191 char str[16];
192 char oem[10];
193
194 int count = sizeof(*mpc);
195 unsigned char *mpt = ((unsigned char *)mpc) + count;
196
197 if (!smp_check_mpc(mpc, oem, str))
198 return 0;
199
200 if (early) {
201 /* Initialize the lapic mapping */
202 if (!acpi_lapic)
203 register_lapic_address(mpc->lapic);
204 return 1;
205 }
206
207 /* Now process the configuration blocks. */
208 while (count < mpc->length) {
209 switch (*mpt) {
210 case MP_PROCESSOR:
211 /* ACPI may have already provided this data */
212 if (!acpi_lapic)
213 MP_processor_info((struct mpc_cpu *)mpt);
214 skip_entry(&mpt, &count, sizeof(struct mpc_cpu));
215 break;
216 case MP_BUS:
217 MP_bus_info((struct mpc_bus *)mpt);
218 skip_entry(&mpt, &count, sizeof(struct mpc_bus));
219 break;
220 case MP_IOAPIC:
221 MP_ioapic_info((struct mpc_ioapic *)mpt);
222 skip_entry(&mpt, &count, sizeof(struct mpc_ioapic));
223 break;
224 case MP_INTSRC:
225 mp_save_irq((struct mpc_intsrc *)mpt);
226 skip_entry(&mpt, &count, sizeof(struct mpc_intsrc));
227 break;
228 case MP_LINTSRC:
229 MP_lintsrc_info((struct mpc_lintsrc *)mpt);
230 skip_entry(&mpt, &count, sizeof(struct mpc_lintsrc));
231 break;
232 default:
233 /* wrong mptable */
234 smp_dump_mptable(mpc, mpt);
235 count = mpc->length;
236 break;
237 }
238 }
239
240 if (!num_procs && !acpi_lapic)
241 pr_err("MPTABLE: no processors registered!\n");
242 return num_procs || acpi_lapic;
243}
244
245#ifdef CONFIG_X86_IO_APIC
246
247static int __init ELCR_trigger(unsigned int irq)
248{
249 unsigned int port;
250
251 port = PIC_ELCR1 + (irq >> 3);
252 return (inb(port) >> (irq & 7)) & 1;
253}
254
255static void __init construct_default_ioirq_mptable(int mpc_default_type)
256{
257 struct mpc_intsrc intsrc;
258 int i;
259 int ELCR_fallback = 0;
260
261 intsrc.type = MP_INTSRC;
262 intsrc.irqflag = MP_IRQTRIG_DEFAULT | MP_IRQPOL_DEFAULT;
263 intsrc.srcbus = 0;
264 intsrc.dstapic = mpc_ioapic_id(0);
265
266 intsrc.irqtype = mp_INT;
267
268 /*
269 * If true, we have an ISA/PCI system with no IRQ entries
270 * in the MP table. To prevent the PCI interrupts from being set up
271 * incorrectly, we try to use the ELCR. The sanity check to see if
272 * there is good ELCR data is very simple - IRQ0, 1, 2 and 13 can
273 * never be level sensitive, so we simply see if the ELCR agrees.
274 * If it does, we assume it's valid.
275 */
276 if (mpc_default_type == 5) {
277 pr_info("ISA/PCI bus type with no IRQ information... falling back to ELCR\n");
278
279 if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) ||
280 ELCR_trigger(13))
281 pr_err("ELCR contains invalid data... not using ELCR\n");
282 else {
283 pr_info("Using ELCR to identify PCI interrupts\n");
284 ELCR_fallback = 1;
285 }
286 }
287
288 for (i = 0; i < 16; i++) {
289 switch (mpc_default_type) {
290 case 2:
291 if (i == 0 || i == 13)
292 continue; /* IRQ0 & IRQ13 not connected */
293 fallthrough;
294 default:
295 if (i == 2)
296 continue; /* IRQ2 is never connected */
297 }
298
299 if (ELCR_fallback) {
300 /*
301 * If the ELCR indicates a level-sensitive interrupt, we
302 * copy that information over to the MP table in the
303 * irqflag field (level sensitive, active high polarity).
304 */
305 if (ELCR_trigger(i)) {
306 intsrc.irqflag = MP_IRQTRIG_LEVEL |
307 MP_IRQPOL_ACTIVE_HIGH;
308 } else {
309 intsrc.irqflag = MP_IRQTRIG_DEFAULT |
310 MP_IRQPOL_DEFAULT;
311 }
312 }
313
314 intsrc.srcbusirq = i;
315 intsrc.dstirq = i ? i : 2; /* IRQ0 to INTIN2 */
316 mp_save_irq(&intsrc);
317 }
318
319 intsrc.irqtype = mp_ExtINT;
320 intsrc.srcbusirq = 0;
321 intsrc.dstirq = 0; /* 8259A to INTIN0 */
322 mp_save_irq(&intsrc);
323}
324
325
326static void __init construct_ioapic_table(int mpc_default_type)
327{
328 struct mpc_ioapic ioapic;
329 struct mpc_bus bus;
330
331 bus.type = MP_BUS;
332 bus.busid = 0;
333 switch (mpc_default_type) {
334 default:
335 pr_err("???\nUnknown standard configuration %d\n",
336 mpc_default_type);
337 fallthrough;
338 case 1:
339 case 5:
340 memcpy(bus.bustype, "ISA ", 6);
341 break;
342 case 2:
343 case 6:
344 case 3:
345 memcpy(bus.bustype, "EISA ", 6);
346 break;
347 }
348 MP_bus_info(&bus);
349 if (mpc_default_type > 4) {
350 bus.busid = 1;
351 memcpy(bus.bustype, "PCI ", 6);
352 MP_bus_info(&bus);
353 }
354
355 ioapic.type = MP_IOAPIC;
356 ioapic.apicid = 2;
357 ioapic.apicver = mpc_default_type > 4 ? 0x10 : 0x01;
358 ioapic.flags = MPC_APIC_USABLE;
359 ioapic.apicaddr = IO_APIC_DEFAULT_PHYS_BASE;
360 MP_ioapic_info(&ioapic);
361
362 /*
363 * We set up most of the low 16 IO-APIC pins according to MPS rules.
364 */
365 construct_default_ioirq_mptable(mpc_default_type);
366}
367#else
368static inline void __init construct_ioapic_table(int mpc_default_type) { }
369#endif
370
371static inline void __init construct_default_ISA_mptable(int mpc_default_type)
372{
373 struct mpc_cpu processor;
374 struct mpc_lintsrc lintsrc;
375 int linttypes[2] = { mp_ExtINT, mp_NMI };
376 int i;
377
378 /*
379 * 2 CPUs, numbered 0 & 1.
380 */
381 processor.type = MP_PROCESSOR;
382 /* Either an integrated APIC or a discrete 82489DX. */
383 processor.apicver = mpc_default_type > 4 ? 0x10 : 0x01;
384 processor.cpuflag = CPU_ENABLED;
385 processor.cpufeature = (boot_cpu_data.x86 << 8) |
386 (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_stepping;
387 processor.featureflag = boot_cpu_data.x86_capability[CPUID_1_EDX];
388 processor.reserved[0] = 0;
389 processor.reserved[1] = 0;
390 for (i = 0; i < 2; i++) {
391 processor.apicid = i;
392 MP_processor_info(&processor);
393 }
394
395 construct_ioapic_table(mpc_default_type);
396
397 lintsrc.type = MP_LINTSRC;
398 lintsrc.irqflag = MP_IRQTRIG_DEFAULT | MP_IRQPOL_DEFAULT;
399 lintsrc.srcbusid = 0;
400 lintsrc.srcbusirq = 0;
401 lintsrc.destapic = MP_APIC_ALL;
402 for (i = 0; i < 2; i++) {
403 lintsrc.irqtype = linttypes[i];
404 lintsrc.destapiclint = i;
405 MP_lintsrc_info(&lintsrc);
406 }
407}
408
409static unsigned long mpf_base;
410static bool mpf_found;
411
412static unsigned long __init get_mpc_size(unsigned long physptr)
413{
414 struct mpc_table *mpc;
415 unsigned long size;
416
417 mpc = early_memremap(physptr, PAGE_SIZE);
418 size = mpc->length;
419 early_memunmap(mpc, PAGE_SIZE);
420 apic_pr_verbose(" mpc: %lx-%lx\n", physptr, physptr + size);
421
422 return size;
423}
424
425static int __init check_physptr(struct mpf_intel *mpf, unsigned int early)
426{
427 struct mpc_table *mpc;
428 unsigned long size;
429
430 size = get_mpc_size(mpf->physptr);
431 mpc = early_memremap(mpf->physptr, size);
432
433 /*
434 * Read the physical hardware table. Anything here will
435 * override the defaults.
436 */
437 if (!smp_read_mpc(mpc, early)) {
438#ifdef CONFIG_X86_LOCAL_APIC
439 smp_found_config = 0;
440#endif
441 pr_err("BIOS bug, MP table errors detected!...\n");
442 pr_cont("... disabling SMP support. (tell your hw vendor)\n");
443 early_memunmap(mpc, size);
444 return -1;
445 }
446 early_memunmap(mpc, size);
447
448 if (early)
449 return -1;
450
451#ifdef CONFIG_X86_IO_APIC
452 /*
453 * If there are no explicit MP IRQ entries, then we are
454 * broken. We set up most of the low 16 IO-APIC pins to
455 * ISA defaults and hope it will work.
456 */
457 if (!mp_irq_entries) {
458 struct mpc_bus bus;
459
460 pr_err("BIOS bug, no explicit IRQ entries, using default mptable. (tell your hw vendor)\n");
461
462 bus.type = MP_BUS;
463 bus.busid = 0;
464 memcpy(bus.bustype, "ISA ", 6);
465 MP_bus_info(&bus);
466
467 construct_default_ioirq_mptable(0);
468 }
469#endif
470
471 return 0;
472}
473
474/*
475 * Scan the memory blocks for an SMP configuration block.
476 */
477static __init void mpparse_get_smp_config(unsigned int early)
478{
479 struct mpf_intel *mpf;
480
481 if (!smp_found_config)
482 return;
483
484 if (!mpf_found)
485 return;
486
487 if (acpi_lapic && early)
488 return;
489
490 /*
491 * MPS doesn't support hyperthreading, aka only have
492 * thread 0 apic id in MPS table
493 */
494 if (acpi_lapic && acpi_ioapic)
495 return;
496
497 mpf = early_memremap(mpf_base, sizeof(*mpf));
498 if (!mpf) {
499 pr_err("MPTABLE: error mapping MP table\n");
500 return;
501 }
502
503 pr_info("Intel MultiProcessor Specification v1.%d\n",
504 mpf->specification);
505#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32)
506 if (mpf->feature2 & (1 << 7)) {
507 pr_info(" IMCR and PIC compatibility mode.\n");
508 pic_mode = 1;
509 } else {
510 pr_info(" Virtual Wire compatibility mode.\n");
511 pic_mode = 0;
512 }
513#endif
514 /*
515 * Now see if we need to read further.
516 */
517 if (mpf->feature1) {
518 if (early) {
519 /* Local APIC has default address */
520 register_lapic_address(APIC_DEFAULT_PHYS_BASE);
521 goto out;
522 }
523
524 pr_info("Default MP configuration #%d\n", mpf->feature1);
525 construct_default_ISA_mptable(mpf->feature1);
526
527 } else if (mpf->physptr) {
528 if (check_physptr(mpf, early))
529 goto out;
530 } else
531 BUG();
532
533 if (!early && !acpi_lapic)
534 pr_info("Processors: %d\n", num_procs);
535 /*
536 * Only use the first configuration found.
537 */
538out:
539 early_memunmap(mpf, sizeof(*mpf));
540}
541
542void __init mpparse_parse_early_smp_config(void)
543{
544 mpparse_get_smp_config(true);
545}
546
547void __init mpparse_parse_smp_config(void)
548{
549 mpparse_get_smp_config(false);
550}
551
552static void __init smp_reserve_memory(struct mpf_intel *mpf)
553{
554 memblock_reserve(mpf->physptr, get_mpc_size(mpf->physptr));
555}
556
557static int __init smp_scan_config(unsigned long base, unsigned long length)
558{
559 unsigned int *bp;
560 struct mpf_intel *mpf;
561 int ret = 0;
562
563 apic_pr_verbose("Scan for SMP in [mem %#010lx-%#010lx]\n", base, base + length - 1);
564 BUILD_BUG_ON(sizeof(*mpf) != 16);
565
566 while (length > 0) {
567 bp = early_memremap(base, length);
568 mpf = (struct mpf_intel *)bp;
569 if ((*bp == SMP_MAGIC_IDENT) &&
570 (mpf->length == 1) &&
571 !mpf_checksum((unsigned char *)bp, 16) &&
572 ((mpf->specification == 1)
573 || (mpf->specification == 4))) {
574#ifdef CONFIG_X86_LOCAL_APIC
575 smp_found_config = 1;
576#endif
577 mpf_base = base;
578 mpf_found = true;
579
580 pr_info("found SMP MP-table at [mem %#010lx-%#010lx]\n",
581 base, base + sizeof(*mpf) - 1);
582
583 memblock_reserve(base, sizeof(*mpf));
584 if (mpf->physptr)
585 smp_reserve_memory(mpf);
586
587 ret = 1;
588 }
589 early_memunmap(bp, length);
590
591 if (ret)
592 break;
593
594 base += 16;
595 length -= 16;
596 }
597 return ret;
598}
599
600void __init mpparse_find_mptable(void)
601{
602 unsigned int address;
603
604 /*
605 * FIXME: Linux assumes you have 640K of base ram..
606 * this continues the error...
607 *
608 * 1) Scan the bottom 1K for a signature
609 * 2) Scan the top 1K of base RAM
610 * 3) Scan the 64K of bios
611 */
612 if (smp_scan_config(0x0, 0x400) ||
613 smp_scan_config(639 * 0x400, 0x400) ||
614 smp_scan_config(0xF0000, 0x10000))
615 return;
616 /*
617 * If it is an SMP machine we should know now, unless the
618 * configuration is in an EISA bus machine with an
619 * extended bios data area.
620 *
621 * there is a real-mode segmented pointer pointing to the
622 * 4K EBDA area at 0x40E, calculate and scan it here.
623 *
624 * NOTE! There are Linux loaders that will corrupt the EBDA
625 * area, and as such this kind of SMP config may be less
626 * trustworthy, simply because the SMP table may have been
627 * stomped on during early boot. These loaders are buggy and
628 * should be fixed.
629 *
630 * MP1.4 SPEC states to only scan first 1K of 4K EBDA.
631 */
632
633 address = get_bios_ebda();
634 if (address)
635 smp_scan_config(address, 0x400);
636}
637
638#ifdef CONFIG_X86_IO_APIC
639static u8 __initdata irq_used[MAX_IRQ_SOURCES];
640
641static int __init get_MP_intsrc_index(struct mpc_intsrc *m)
642{
643 int i;
644
645 if (m->irqtype != mp_INT)
646 return 0;
647
648 if (m->irqflag != (MP_IRQTRIG_LEVEL | MP_IRQPOL_ACTIVE_LOW))
649 return 0;
650
651 /* not legacy */
652
653 for (i = 0; i < mp_irq_entries; i++) {
654 if (mp_irqs[i].irqtype != mp_INT)
655 continue;
656
657 if (mp_irqs[i].irqflag != (MP_IRQTRIG_LEVEL |
658 MP_IRQPOL_ACTIVE_LOW))
659 continue;
660
661 if (mp_irqs[i].srcbus != m->srcbus)
662 continue;
663 if (mp_irqs[i].srcbusirq != m->srcbusirq)
664 continue;
665 if (irq_used[i]) {
666 /* already claimed */
667 return -2;
668 }
669 irq_used[i] = 1;
670 return i;
671 }
672
673 /* not found */
674 return -1;
675}
676
677#define SPARE_SLOT_NUM 20
678
679static struct mpc_intsrc __initdata *m_spare[SPARE_SLOT_NUM];
680
681static void __init check_irq_src(struct mpc_intsrc *m, int *nr_m_spare)
682{
683 int i;
684
685 apic_pr_verbose("OLD ");
686 print_mp_irq_info(m);
687
688 i = get_MP_intsrc_index(m);
689 if (i > 0) {
690 memcpy(m, &mp_irqs[i], sizeof(*m));
691 apic_pr_verbose("NEW ");
692 print_mp_irq_info(&mp_irqs[i]);
693 return;
694 }
695 if (!i) {
696 /* legacy, do nothing */
697 return;
698 }
699 if (*nr_m_spare < SPARE_SLOT_NUM) {
700 /*
701 * not found (-1), or duplicated (-2) are invalid entries,
702 * we need to use the slot later
703 */
704 m_spare[*nr_m_spare] = m;
705 *nr_m_spare += 1;
706 }
707}
708
709static int __init
710check_slot(unsigned long mpc_new_phys, unsigned long mpc_new_length, int count)
711{
712 if (!mpc_new_phys || count <= mpc_new_length) {
713 WARN(1, "update_mptable: No spare slots (length: %x)\n", count);
714 return -1;
715 }
716
717 return 0;
718}
719#else /* CONFIG_X86_IO_APIC */
720static
721inline void __init check_irq_src(struct mpc_intsrc *m, int *nr_m_spare) {}
722#endif /* CONFIG_X86_IO_APIC */
723
724static int __init replace_intsrc_all(struct mpc_table *mpc,
725 unsigned long mpc_new_phys,
726 unsigned long mpc_new_length)
727{
728#ifdef CONFIG_X86_IO_APIC
729 int i;
730#endif
731 int count = sizeof(*mpc);
732 int nr_m_spare = 0;
733 unsigned char *mpt = ((unsigned char *)mpc) + count;
734
735 pr_info("mpc_length %x\n", mpc->length);
736 while (count < mpc->length) {
737 switch (*mpt) {
738 case MP_PROCESSOR:
739 skip_entry(&mpt, &count, sizeof(struct mpc_cpu));
740 break;
741 case MP_BUS:
742 skip_entry(&mpt, &count, sizeof(struct mpc_bus));
743 break;
744 case MP_IOAPIC:
745 skip_entry(&mpt, &count, sizeof(struct mpc_ioapic));
746 break;
747 case MP_INTSRC:
748 check_irq_src((struct mpc_intsrc *)mpt, &nr_m_spare);
749 skip_entry(&mpt, &count, sizeof(struct mpc_intsrc));
750 break;
751 case MP_LINTSRC:
752 skip_entry(&mpt, &count, sizeof(struct mpc_lintsrc));
753 break;
754 default:
755 /* wrong mptable */
756 smp_dump_mptable(mpc, mpt);
757 goto out;
758 }
759 }
760
761#ifdef CONFIG_X86_IO_APIC
762 for (i = 0; i < mp_irq_entries; i++) {
763 if (irq_used[i])
764 continue;
765
766 if (mp_irqs[i].irqtype != mp_INT)
767 continue;
768
769 if (mp_irqs[i].irqflag != (MP_IRQTRIG_LEVEL |
770 MP_IRQPOL_ACTIVE_LOW))
771 continue;
772
773 if (nr_m_spare > 0) {
774 apic_pr_verbose("*NEW* found\n");
775 nr_m_spare--;
776 memcpy(m_spare[nr_m_spare], &mp_irqs[i], sizeof(mp_irqs[i]));
777 m_spare[nr_m_spare] = NULL;
778 } else {
779 struct mpc_intsrc *m = (struct mpc_intsrc *)mpt;
780 count += sizeof(struct mpc_intsrc);
781 if (check_slot(mpc_new_phys, mpc_new_length, count) < 0)
782 goto out;
783 memcpy(m, &mp_irqs[i], sizeof(*m));
784 mpc->length = count;
785 mpt += sizeof(struct mpc_intsrc);
786 }
787 print_mp_irq_info(&mp_irqs[i]);
788 }
789#endif
790out:
791 /* update checksum */
792 mpc->checksum = 0;
793 mpc->checksum -= mpf_checksum((unsigned char *)mpc, mpc->length);
794
795 return 0;
796}
797
798int enable_update_mptable;
799
800static int __init update_mptable_setup(char *str)
801{
802 enable_update_mptable = 1;
803#ifdef CONFIG_PCI
804 pci_routeirq = 1;
805#endif
806 return 0;
807}
808early_param("update_mptable", update_mptable_setup);
809
810static unsigned long __initdata mpc_new_phys;
811static unsigned long mpc_new_length __initdata = 4096;
812
813/* alloc_mptable or alloc_mptable=4k */
814static int __initdata alloc_mptable;
815static int __init parse_alloc_mptable_opt(char *p)
816{
817 enable_update_mptable = 1;
818#ifdef CONFIG_PCI
819 pci_routeirq = 1;
820#endif
821 alloc_mptable = 1;
822 if (!p)
823 return 0;
824 mpc_new_length = memparse(p, &p);
825 return 0;
826}
827early_param("alloc_mptable", parse_alloc_mptable_opt);
828
829void __init e820__memblock_alloc_reserved_mpc_new(void)
830{
831 if (enable_update_mptable && alloc_mptable)
832 mpc_new_phys = e820__memblock_alloc_reserved(mpc_new_length, 4);
833}
834
835static int __init update_mp_table(void)
836{
837 char str[16];
838 char oem[10];
839 struct mpf_intel *mpf;
840 struct mpc_table *mpc, *mpc_new;
841 unsigned long size;
842
843 if (!enable_update_mptable)
844 return 0;
845
846 if (!mpf_found)
847 return 0;
848
849 mpf = early_memremap(mpf_base, sizeof(*mpf));
850 if (!mpf) {
851 pr_err("MPTABLE: mpf early_memremap() failed\n");
852 return 0;
853 }
854
855 /*
856 * Now see if we need to go further.
857 */
858 if (mpf->feature1)
859 goto do_unmap_mpf;
860
861 if (!mpf->physptr)
862 goto do_unmap_mpf;
863
864 size = get_mpc_size(mpf->physptr);
865 mpc = early_memremap(mpf->physptr, size);
866 if (!mpc) {
867 pr_err("MPTABLE: mpc early_memremap() failed\n");
868 goto do_unmap_mpf;
869 }
870
871 if (!smp_check_mpc(mpc, oem, str))
872 goto do_unmap_mpc;
873
874 pr_info("mpf: %llx\n", (u64)mpf_base);
875 pr_info("physptr: %x\n", mpf->physptr);
876
877 if (mpc_new_phys && mpc->length > mpc_new_length) {
878 mpc_new_phys = 0;
879 pr_info("mpc_new_length is %ld, please use alloc_mptable=8k\n",
880 mpc_new_length);
881 }
882
883 if (!mpc_new_phys) {
884 unsigned char old, new;
885 /* check if we can change the position */
886 mpc->checksum = 0;
887 old = mpf_checksum((unsigned char *)mpc, mpc->length);
888 mpc->checksum = 0xff;
889 new = mpf_checksum((unsigned char *)mpc, mpc->length);
890 if (old == new) {
891 pr_info("mpc is readonly, please try alloc_mptable instead\n");
892 goto do_unmap_mpc;
893 }
894 pr_info("use in-position replacing\n");
895 } else {
896 mpc_new = early_memremap(mpc_new_phys, mpc_new_length);
897 if (!mpc_new) {
898 pr_err("MPTABLE: new mpc early_memremap() failed\n");
899 goto do_unmap_mpc;
900 }
901 mpf->physptr = mpc_new_phys;
902 memcpy(mpc_new, mpc, mpc->length);
903 early_memunmap(mpc, size);
904 mpc = mpc_new;
905 size = mpc_new_length;
906 /* check if we can modify that */
907 if (mpc_new_phys - mpf->physptr) {
908 struct mpf_intel *mpf_new;
909 /* steal 16 bytes from [0, 1k) */
910 mpf_new = early_memremap(0x400 - 16, sizeof(*mpf_new));
911 if (!mpf_new) {
912 pr_err("MPTABLE: new mpf early_memremap() failed\n");
913 goto do_unmap_mpc;
914 }
915 pr_info("mpf new: %x\n", 0x400 - 16);
916 memcpy(mpf_new, mpf, 16);
917 early_memunmap(mpf, sizeof(*mpf));
918 mpf = mpf_new;
919 mpf->physptr = mpc_new_phys;
920 }
921 mpf->checksum = 0;
922 mpf->checksum -= mpf_checksum((unsigned char *)mpf, 16);
923 pr_info("physptr new: %x\n", mpf->physptr);
924 }
925
926 /*
927 * only replace the one with mp_INT and
928 * MP_IRQ_TRIGGER_LEVEL|MP_IRQ_POLARITY_LOW,
929 * already in mp_irqs , stored by ... and mp_config_acpi_gsi,
930 * may need pci=routeirq for all coverage
931 */
932 replace_intsrc_all(mpc, mpc_new_phys, mpc_new_length);
933
934do_unmap_mpc:
935 early_memunmap(mpc, size);
936
937do_unmap_mpf:
938 early_memunmap(mpf, sizeof(*mpf));
939
940 return 0;
941}
942
943late_initcall(update_mp_table);
1/*
2 * Intel Multiprocessor Specification 1.1 and 1.4
3 * compliant MP-table parsing routines.
4 *
5 * (c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk>
6 * (c) 1998, 1999, 2000, 2009 Ingo Molnar <mingo@redhat.com>
7 * (c) 2008 Alexey Starikovskiy <astarikovskiy@suse.de>
8 */
9
10#include <linux/mm.h>
11#include <linux/init.h>
12#include <linux/delay.h>
13#include <linux/bootmem.h>
14#include <linux/memblock.h>
15#include <linux/kernel_stat.h>
16#include <linux/mc146818rtc.h>
17#include <linux/bitops.h>
18#include <linux/acpi.h>
19#include <linux/module.h>
20#include <linux/smp.h>
21#include <linux/pci.h>
22
23#include <asm/mtrr.h>
24#include <asm/mpspec.h>
25#include <asm/pgalloc.h>
26#include <asm/io_apic.h>
27#include <asm/proto.h>
28#include <asm/bios_ebda.h>
29#include <asm/e820.h>
30#include <asm/trampoline.h>
31#include <asm/setup.h>
32#include <asm/smp.h>
33
34#include <asm/apic.h>
35/*
36 * Checksum an MP configuration block.
37 */
38
39static int __init mpf_checksum(unsigned char *mp, int len)
40{
41 int sum = 0;
42
43 while (len--)
44 sum += *mp++;
45
46 return sum & 0xFF;
47}
48
49int __init default_mpc_apic_id(struct mpc_cpu *m)
50{
51 return m->apicid;
52}
53
54static void __init MP_processor_info(struct mpc_cpu *m)
55{
56 int apicid;
57 char *bootup_cpu = "";
58
59 if (!(m->cpuflag & CPU_ENABLED)) {
60 disabled_cpus++;
61 return;
62 }
63
64 apicid = x86_init.mpparse.mpc_apic_id(m);
65
66 if (m->cpuflag & CPU_BOOTPROCESSOR) {
67 bootup_cpu = " (Bootup-CPU)";
68 boot_cpu_physical_apicid = m->apicid;
69 }
70
71 printk(KERN_INFO "Processor #%d%s\n", m->apicid, bootup_cpu);
72 generic_processor_info(apicid, m->apicver);
73}
74
75#ifdef CONFIG_X86_IO_APIC
76void __init default_mpc_oem_bus_info(struct mpc_bus *m, char *str)
77{
78 memcpy(str, m->bustype, 6);
79 str[6] = 0;
80 apic_printk(APIC_VERBOSE, "Bus #%d is %s\n", m->busid, str);
81}
82
83static void __init MP_bus_info(struct mpc_bus *m)
84{
85 char str[7];
86
87 x86_init.mpparse.mpc_oem_bus_info(m, str);
88
89#if MAX_MP_BUSSES < 256
90 if (m->busid >= MAX_MP_BUSSES) {
91 printk(KERN_WARNING "MP table busid value (%d) for bustype %s "
92 " is too large, max. supported is %d\n",
93 m->busid, str, MAX_MP_BUSSES - 1);
94 return;
95 }
96#endif
97
98 if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA) - 1) == 0) {
99 set_bit(m->busid, mp_bus_not_pci);
100#if defined(CONFIG_EISA) || defined(CONFIG_MCA)
101 mp_bus_id_to_type[m->busid] = MP_BUS_ISA;
102#endif
103 } else if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI) - 1) == 0) {
104 if (x86_init.mpparse.mpc_oem_pci_bus)
105 x86_init.mpparse.mpc_oem_pci_bus(m);
106
107 clear_bit(m->busid, mp_bus_not_pci);
108#if defined(CONFIG_EISA) || defined(CONFIG_MCA)
109 mp_bus_id_to_type[m->busid] = MP_BUS_PCI;
110 } else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA) - 1) == 0) {
111 mp_bus_id_to_type[m->busid] = MP_BUS_EISA;
112 } else if (strncmp(str, BUSTYPE_MCA, sizeof(BUSTYPE_MCA) - 1) == 0) {
113 mp_bus_id_to_type[m->busid] = MP_BUS_MCA;
114#endif
115 } else
116 printk(KERN_WARNING "Unknown bustype %s - ignoring\n", str);
117}
118
119static void __init MP_ioapic_info(struct mpc_ioapic *m)
120{
121 if (m->flags & MPC_APIC_USABLE)
122 mp_register_ioapic(m->apicid, m->apicaddr, gsi_top);
123}
124
125static void __init print_mp_irq_info(struct mpc_intsrc *mp_irq)
126{
127 apic_printk(APIC_VERBOSE, "Int: type %d, pol %d, trig %d, bus %02x,"
128 " IRQ %02x, APIC ID %x, APIC INT %02x\n",
129 mp_irq->irqtype, mp_irq->irqflag & 3,
130 (mp_irq->irqflag >> 2) & 3, mp_irq->srcbus,
131 mp_irq->srcbusirq, mp_irq->dstapic, mp_irq->dstirq);
132}
133
134#else /* CONFIG_X86_IO_APIC */
135static inline void __init MP_bus_info(struct mpc_bus *m) {}
136static inline void __init MP_ioapic_info(struct mpc_ioapic *m) {}
137#endif /* CONFIG_X86_IO_APIC */
138
139static void __init MP_lintsrc_info(struct mpc_lintsrc *m)
140{
141 apic_printk(APIC_VERBOSE, "Lint: type %d, pol %d, trig %d, bus %02x,"
142 " IRQ %02x, APIC ID %x, APIC LINT %02x\n",
143 m->irqtype, m->irqflag & 3, (m->irqflag >> 2) & 3, m->srcbusid,
144 m->srcbusirq, m->destapic, m->destapiclint);
145}
146
147/*
148 * Read/parse the MPC
149 */
150static int __init smp_check_mpc(struct mpc_table *mpc, char *oem, char *str)
151{
152
153 if (memcmp(mpc->signature, MPC_SIGNATURE, 4)) {
154 printk(KERN_ERR "MPTABLE: bad signature [%c%c%c%c]!\n",
155 mpc->signature[0], mpc->signature[1],
156 mpc->signature[2], mpc->signature[3]);
157 return 0;
158 }
159 if (mpf_checksum((unsigned char *)mpc, mpc->length)) {
160 printk(KERN_ERR "MPTABLE: checksum error!\n");
161 return 0;
162 }
163 if (mpc->spec != 0x01 && mpc->spec != 0x04) {
164 printk(KERN_ERR "MPTABLE: bad table version (%d)!!\n",
165 mpc->spec);
166 return 0;
167 }
168 if (!mpc->lapic) {
169 printk(KERN_ERR "MPTABLE: null local APIC address!\n");
170 return 0;
171 }
172 memcpy(oem, mpc->oem, 8);
173 oem[8] = 0;
174 printk(KERN_INFO "MPTABLE: OEM ID: %s\n", oem);
175
176 memcpy(str, mpc->productid, 12);
177 str[12] = 0;
178
179 printk(KERN_INFO "MPTABLE: Product ID: %s\n", str);
180
181 printk(KERN_INFO "MPTABLE: APIC at: 0x%X\n", mpc->lapic);
182
183 return 1;
184}
185
186static void skip_entry(unsigned char **ptr, int *count, int size)
187{
188 *ptr += size;
189 *count += size;
190}
191
192static void __init smp_dump_mptable(struct mpc_table *mpc, unsigned char *mpt)
193{
194 printk(KERN_ERR "Your mptable is wrong, contact your HW vendor!\n"
195 "type %x\n", *mpt);
196 print_hex_dump(KERN_ERR, " ", DUMP_PREFIX_ADDRESS, 16,
197 1, mpc, mpc->length, 1);
198}
199
200void __init default_smp_read_mpc_oem(struct mpc_table *mpc) { }
201
202static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early)
203{
204 char str[16];
205 char oem[10];
206
207 int count = sizeof(*mpc);
208 unsigned char *mpt = ((unsigned char *)mpc) + count;
209
210 if (!smp_check_mpc(mpc, oem, str))
211 return 0;
212
213#ifdef CONFIG_X86_32
214 generic_mps_oem_check(mpc, oem, str);
215#endif
216 /* Initialize the lapic mapping */
217 if (!acpi_lapic)
218 register_lapic_address(mpc->lapic);
219
220 if (early)
221 return 1;
222
223 if (mpc->oemptr)
224 x86_init.mpparse.smp_read_mpc_oem(mpc);
225
226 /*
227 * Now process the configuration blocks.
228 */
229 x86_init.mpparse.mpc_record(0);
230
231 while (count < mpc->length) {
232 switch (*mpt) {
233 case MP_PROCESSOR:
234 /* ACPI may have already provided this data */
235 if (!acpi_lapic)
236 MP_processor_info((struct mpc_cpu *)mpt);
237 skip_entry(&mpt, &count, sizeof(struct mpc_cpu));
238 break;
239 case MP_BUS:
240 MP_bus_info((struct mpc_bus *)mpt);
241 skip_entry(&mpt, &count, sizeof(struct mpc_bus));
242 break;
243 case MP_IOAPIC:
244 MP_ioapic_info((struct mpc_ioapic *)mpt);
245 skip_entry(&mpt, &count, sizeof(struct mpc_ioapic));
246 break;
247 case MP_INTSRC:
248 mp_save_irq((struct mpc_intsrc *)mpt);
249 skip_entry(&mpt, &count, sizeof(struct mpc_intsrc));
250 break;
251 case MP_LINTSRC:
252 MP_lintsrc_info((struct mpc_lintsrc *)mpt);
253 skip_entry(&mpt, &count, sizeof(struct mpc_lintsrc));
254 break;
255 default:
256 /* wrong mptable */
257 smp_dump_mptable(mpc, mpt);
258 count = mpc->length;
259 break;
260 }
261 x86_init.mpparse.mpc_record(1);
262 }
263
264 if (!num_processors)
265 printk(KERN_ERR "MPTABLE: no processors registered!\n");
266 return num_processors;
267}
268
269#ifdef CONFIG_X86_IO_APIC
270
271static int __init ELCR_trigger(unsigned int irq)
272{
273 unsigned int port;
274
275 port = 0x4d0 + (irq >> 3);
276 return (inb(port) >> (irq & 7)) & 1;
277}
278
279static void __init construct_default_ioirq_mptable(int mpc_default_type)
280{
281 struct mpc_intsrc intsrc;
282 int i;
283 int ELCR_fallback = 0;
284
285 intsrc.type = MP_INTSRC;
286 intsrc.irqflag = 0; /* conforming */
287 intsrc.srcbus = 0;
288 intsrc.dstapic = mpc_ioapic_id(0);
289
290 intsrc.irqtype = mp_INT;
291
292 /*
293 * If true, we have an ISA/PCI system with no IRQ entries
294 * in the MP table. To prevent the PCI interrupts from being set up
295 * incorrectly, we try to use the ELCR. The sanity check to see if
296 * there is good ELCR data is very simple - IRQ0, 1, 2 and 13 can
297 * never be level sensitive, so we simply see if the ELCR agrees.
298 * If it does, we assume it's valid.
299 */
300 if (mpc_default_type == 5) {
301 printk(KERN_INFO "ISA/PCI bus type with no IRQ information... "
302 "falling back to ELCR\n");
303
304 if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) ||
305 ELCR_trigger(13))
306 printk(KERN_ERR "ELCR contains invalid data... "
307 "not using ELCR\n");
308 else {
309 printk(KERN_INFO
310 "Using ELCR to identify PCI interrupts\n");
311 ELCR_fallback = 1;
312 }
313 }
314
315 for (i = 0; i < 16; i++) {
316 switch (mpc_default_type) {
317 case 2:
318 if (i == 0 || i == 13)
319 continue; /* IRQ0 & IRQ13 not connected */
320 /* fall through */
321 default:
322 if (i == 2)
323 continue; /* IRQ2 is never connected */
324 }
325
326 if (ELCR_fallback) {
327 /*
328 * If the ELCR indicates a level-sensitive interrupt, we
329 * copy that information over to the MP table in the
330 * irqflag field (level sensitive, active high polarity).
331 */
332 if (ELCR_trigger(i))
333 intsrc.irqflag = 13;
334 else
335 intsrc.irqflag = 0;
336 }
337
338 intsrc.srcbusirq = i;
339 intsrc.dstirq = i ? i : 2; /* IRQ0 to INTIN2 */
340 mp_save_irq(&intsrc);
341 }
342
343 intsrc.irqtype = mp_ExtINT;
344 intsrc.srcbusirq = 0;
345 intsrc.dstirq = 0; /* 8259A to INTIN0 */
346 mp_save_irq(&intsrc);
347}
348
349
350static void __init construct_ioapic_table(int mpc_default_type)
351{
352 struct mpc_ioapic ioapic;
353 struct mpc_bus bus;
354
355 bus.type = MP_BUS;
356 bus.busid = 0;
357 switch (mpc_default_type) {
358 default:
359 printk(KERN_ERR "???\nUnknown standard configuration %d\n",
360 mpc_default_type);
361 /* fall through */
362 case 1:
363 case 5:
364 memcpy(bus.bustype, "ISA ", 6);
365 break;
366 case 2:
367 case 6:
368 case 3:
369 memcpy(bus.bustype, "EISA ", 6);
370 break;
371 case 4:
372 case 7:
373 memcpy(bus.bustype, "MCA ", 6);
374 }
375 MP_bus_info(&bus);
376 if (mpc_default_type > 4) {
377 bus.busid = 1;
378 memcpy(bus.bustype, "PCI ", 6);
379 MP_bus_info(&bus);
380 }
381
382 ioapic.type = MP_IOAPIC;
383 ioapic.apicid = 2;
384 ioapic.apicver = mpc_default_type > 4 ? 0x10 : 0x01;
385 ioapic.flags = MPC_APIC_USABLE;
386 ioapic.apicaddr = IO_APIC_DEFAULT_PHYS_BASE;
387 MP_ioapic_info(&ioapic);
388
389 /*
390 * We set up most of the low 16 IO-APIC pins according to MPS rules.
391 */
392 construct_default_ioirq_mptable(mpc_default_type);
393}
394#else
395static inline void __init construct_ioapic_table(int mpc_default_type) { }
396#endif
397
398static inline void __init construct_default_ISA_mptable(int mpc_default_type)
399{
400 struct mpc_cpu processor;
401 struct mpc_lintsrc lintsrc;
402 int linttypes[2] = { mp_ExtINT, mp_NMI };
403 int i;
404
405 /*
406 * local APIC has default address
407 */
408 mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
409
410 /*
411 * 2 CPUs, numbered 0 & 1.
412 */
413 processor.type = MP_PROCESSOR;
414 /* Either an integrated APIC or a discrete 82489DX. */
415 processor.apicver = mpc_default_type > 4 ? 0x10 : 0x01;
416 processor.cpuflag = CPU_ENABLED;
417 processor.cpufeature = (boot_cpu_data.x86 << 8) |
418 (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_mask;
419 processor.featureflag = boot_cpu_data.x86_capability[0];
420 processor.reserved[0] = 0;
421 processor.reserved[1] = 0;
422 for (i = 0; i < 2; i++) {
423 processor.apicid = i;
424 MP_processor_info(&processor);
425 }
426
427 construct_ioapic_table(mpc_default_type);
428
429 lintsrc.type = MP_LINTSRC;
430 lintsrc.irqflag = 0; /* conforming */
431 lintsrc.srcbusid = 0;
432 lintsrc.srcbusirq = 0;
433 lintsrc.destapic = MP_APIC_ALL;
434 for (i = 0; i < 2; i++) {
435 lintsrc.irqtype = linttypes[i];
436 lintsrc.destapiclint = i;
437 MP_lintsrc_info(&lintsrc);
438 }
439}
440
441static struct mpf_intel *mpf_found;
442
443static unsigned long __init get_mpc_size(unsigned long physptr)
444{
445 struct mpc_table *mpc;
446 unsigned long size;
447
448 mpc = early_ioremap(physptr, PAGE_SIZE);
449 size = mpc->length;
450 early_iounmap(mpc, PAGE_SIZE);
451 apic_printk(APIC_VERBOSE, " mpc: %lx-%lx\n", physptr, physptr + size);
452
453 return size;
454}
455
456static int __init check_physptr(struct mpf_intel *mpf, unsigned int early)
457{
458 struct mpc_table *mpc;
459 unsigned long size;
460
461 size = get_mpc_size(mpf->physptr);
462 mpc = early_ioremap(mpf->physptr, size);
463 /*
464 * Read the physical hardware table. Anything here will
465 * override the defaults.
466 */
467 if (!smp_read_mpc(mpc, early)) {
468#ifdef CONFIG_X86_LOCAL_APIC
469 smp_found_config = 0;
470#endif
471 printk(KERN_ERR "BIOS bug, MP table errors detected!...\n"
472 "... disabling SMP support. (tell your hw vendor)\n");
473 early_iounmap(mpc, size);
474 return -1;
475 }
476 early_iounmap(mpc, size);
477
478 if (early)
479 return -1;
480
481#ifdef CONFIG_X86_IO_APIC
482 /*
483 * If there are no explicit MP IRQ entries, then we are
484 * broken. We set up most of the low 16 IO-APIC pins to
485 * ISA defaults and hope it will work.
486 */
487 if (!mp_irq_entries) {
488 struct mpc_bus bus;
489
490 printk(KERN_ERR "BIOS bug, no explicit IRQ entries, "
491 "using default mptable. (tell your hw vendor)\n");
492
493 bus.type = MP_BUS;
494 bus.busid = 0;
495 memcpy(bus.bustype, "ISA ", 6);
496 MP_bus_info(&bus);
497
498 construct_default_ioirq_mptable(0);
499 }
500#endif
501
502 return 0;
503}
504
505/*
506 * Scan the memory blocks for an SMP configuration block.
507 */
508void __init default_get_smp_config(unsigned int early)
509{
510 struct mpf_intel *mpf = mpf_found;
511
512 if (!mpf)
513 return;
514
515 if (acpi_lapic && early)
516 return;
517
518 /*
519 * MPS doesn't support hyperthreading, aka only have
520 * thread 0 apic id in MPS table
521 */
522 if (acpi_lapic && acpi_ioapic)
523 return;
524
525 printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n",
526 mpf->specification);
527#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32)
528 if (mpf->feature2 & (1 << 7)) {
529 printk(KERN_INFO " IMCR and PIC compatibility mode.\n");
530 pic_mode = 1;
531 } else {
532 printk(KERN_INFO " Virtual Wire compatibility mode.\n");
533 pic_mode = 0;
534 }
535#endif
536 /*
537 * Now see if we need to read further.
538 */
539 if (mpf->feature1 != 0) {
540 if (early) {
541 /*
542 * local APIC has default address
543 */
544 mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
545 return;
546 }
547
548 printk(KERN_INFO "Default MP configuration #%d\n",
549 mpf->feature1);
550 construct_default_ISA_mptable(mpf->feature1);
551
552 } else if (mpf->physptr) {
553 if (check_physptr(mpf, early))
554 return;
555 } else
556 BUG();
557
558 if (!early)
559 printk(KERN_INFO "Processors: %d\n", num_processors);
560 /*
561 * Only use the first configuration found.
562 */
563}
564
565static void __init smp_reserve_memory(struct mpf_intel *mpf)
566{
567 unsigned long size = get_mpc_size(mpf->physptr);
568
569 memblock_x86_reserve_range(mpf->physptr, mpf->physptr+size, "* MP-table mpc");
570}
571
572static int __init smp_scan_config(unsigned long base, unsigned long length)
573{
574 unsigned int *bp = phys_to_virt(base);
575 struct mpf_intel *mpf;
576 unsigned long mem;
577
578 apic_printk(APIC_VERBOSE, "Scan SMP from %p for %ld bytes.\n",
579 bp, length);
580 BUILD_BUG_ON(sizeof(*mpf) != 16);
581
582 while (length > 0) {
583 mpf = (struct mpf_intel *)bp;
584 if ((*bp == SMP_MAGIC_IDENT) &&
585 (mpf->length == 1) &&
586 !mpf_checksum((unsigned char *)bp, 16) &&
587 ((mpf->specification == 1)
588 || (mpf->specification == 4))) {
589#ifdef CONFIG_X86_LOCAL_APIC
590 smp_found_config = 1;
591#endif
592 mpf_found = mpf;
593
594 printk(KERN_INFO "found SMP MP-table at [%p] %llx\n",
595 mpf, (u64)virt_to_phys(mpf));
596
597 mem = virt_to_phys(mpf);
598 memblock_x86_reserve_range(mem, mem + sizeof(*mpf), "* MP-table mpf");
599 if (mpf->physptr)
600 smp_reserve_memory(mpf);
601
602 return 1;
603 }
604 bp += 4;
605 length -= 16;
606 }
607 return 0;
608}
609
610void __init default_find_smp_config(void)
611{
612 unsigned int address;
613
614 /*
615 * FIXME: Linux assumes you have 640K of base ram..
616 * this continues the error...
617 *
618 * 1) Scan the bottom 1K for a signature
619 * 2) Scan the top 1K of base RAM
620 * 3) Scan the 64K of bios
621 */
622 if (smp_scan_config(0x0, 0x400) ||
623 smp_scan_config(639 * 0x400, 0x400) ||
624 smp_scan_config(0xF0000, 0x10000))
625 return;
626 /*
627 * If it is an SMP machine we should know now, unless the
628 * configuration is in an EISA/MCA bus machine with an
629 * extended bios data area.
630 *
631 * there is a real-mode segmented pointer pointing to the
632 * 4K EBDA area at 0x40E, calculate and scan it here.
633 *
634 * NOTE! There are Linux loaders that will corrupt the EBDA
635 * area, and as such this kind of SMP config may be less
636 * trustworthy, simply because the SMP table may have been
637 * stomped on during early boot. These loaders are buggy and
638 * should be fixed.
639 *
640 * MP1.4 SPEC states to only scan first 1K of 4K EBDA.
641 */
642
643 address = get_bios_ebda();
644 if (address)
645 smp_scan_config(address, 0x400);
646}
647
648#ifdef CONFIG_X86_IO_APIC
649static u8 __initdata irq_used[MAX_IRQ_SOURCES];
650
651static int __init get_MP_intsrc_index(struct mpc_intsrc *m)
652{
653 int i;
654
655 if (m->irqtype != mp_INT)
656 return 0;
657
658 if (m->irqflag != 0x0f)
659 return 0;
660
661 /* not legacy */
662
663 for (i = 0; i < mp_irq_entries; i++) {
664 if (mp_irqs[i].irqtype != mp_INT)
665 continue;
666
667 if (mp_irqs[i].irqflag != 0x0f)
668 continue;
669
670 if (mp_irqs[i].srcbus != m->srcbus)
671 continue;
672 if (mp_irqs[i].srcbusirq != m->srcbusirq)
673 continue;
674 if (irq_used[i]) {
675 /* already claimed */
676 return -2;
677 }
678 irq_used[i] = 1;
679 return i;
680 }
681
682 /* not found */
683 return -1;
684}
685
686#define SPARE_SLOT_NUM 20
687
688static struct mpc_intsrc __initdata *m_spare[SPARE_SLOT_NUM];
689
690static void __init check_irq_src(struct mpc_intsrc *m, int *nr_m_spare)
691{
692 int i;
693
694 apic_printk(APIC_VERBOSE, "OLD ");
695 print_mp_irq_info(m);
696
697 i = get_MP_intsrc_index(m);
698 if (i > 0) {
699 memcpy(m, &mp_irqs[i], sizeof(*m));
700 apic_printk(APIC_VERBOSE, "NEW ");
701 print_mp_irq_info(&mp_irqs[i]);
702 return;
703 }
704 if (!i) {
705 /* legacy, do nothing */
706 return;
707 }
708 if (*nr_m_spare < SPARE_SLOT_NUM) {
709 /*
710 * not found (-1), or duplicated (-2) are invalid entries,
711 * we need to use the slot later
712 */
713 m_spare[*nr_m_spare] = m;
714 *nr_m_spare += 1;
715 }
716}
717
718static int __init
719check_slot(unsigned long mpc_new_phys, unsigned long mpc_new_length, int count)
720{
721 if (!mpc_new_phys || count <= mpc_new_length) {
722 WARN(1, "update_mptable: No spare slots (length: %x)\n", count);
723 return -1;
724 }
725
726 return 0;
727}
728#else /* CONFIG_X86_IO_APIC */
729static
730inline void __init check_irq_src(struct mpc_intsrc *m, int *nr_m_spare) {}
731#endif /* CONFIG_X86_IO_APIC */
732
733static int __init replace_intsrc_all(struct mpc_table *mpc,
734 unsigned long mpc_new_phys,
735 unsigned long mpc_new_length)
736{
737#ifdef CONFIG_X86_IO_APIC
738 int i;
739#endif
740 int count = sizeof(*mpc);
741 int nr_m_spare = 0;
742 unsigned char *mpt = ((unsigned char *)mpc) + count;
743
744 printk(KERN_INFO "mpc_length %x\n", mpc->length);
745 while (count < mpc->length) {
746 switch (*mpt) {
747 case MP_PROCESSOR:
748 skip_entry(&mpt, &count, sizeof(struct mpc_cpu));
749 break;
750 case MP_BUS:
751 skip_entry(&mpt, &count, sizeof(struct mpc_bus));
752 break;
753 case MP_IOAPIC:
754 skip_entry(&mpt, &count, sizeof(struct mpc_ioapic));
755 break;
756 case MP_INTSRC:
757 check_irq_src((struct mpc_intsrc *)mpt, &nr_m_spare);
758 skip_entry(&mpt, &count, sizeof(struct mpc_intsrc));
759 break;
760 case MP_LINTSRC:
761 skip_entry(&mpt, &count, sizeof(struct mpc_lintsrc));
762 break;
763 default:
764 /* wrong mptable */
765 smp_dump_mptable(mpc, mpt);
766 goto out;
767 }
768 }
769
770#ifdef CONFIG_X86_IO_APIC
771 for (i = 0; i < mp_irq_entries; i++) {
772 if (irq_used[i])
773 continue;
774
775 if (mp_irqs[i].irqtype != mp_INT)
776 continue;
777
778 if (mp_irqs[i].irqflag != 0x0f)
779 continue;
780
781 if (nr_m_spare > 0) {
782 apic_printk(APIC_VERBOSE, "*NEW* found\n");
783 nr_m_spare--;
784 memcpy(m_spare[nr_m_spare], &mp_irqs[i], sizeof(mp_irqs[i]));
785 m_spare[nr_m_spare] = NULL;
786 } else {
787 struct mpc_intsrc *m = (struct mpc_intsrc *)mpt;
788 count += sizeof(struct mpc_intsrc);
789 if (check_slot(mpc_new_phys, mpc_new_length, count) < 0)
790 goto out;
791 memcpy(m, &mp_irqs[i], sizeof(*m));
792 mpc->length = count;
793 mpt += sizeof(struct mpc_intsrc);
794 }
795 print_mp_irq_info(&mp_irqs[i]);
796 }
797#endif
798out:
799 /* update checksum */
800 mpc->checksum = 0;
801 mpc->checksum -= mpf_checksum((unsigned char *)mpc, mpc->length);
802
803 return 0;
804}
805
806int enable_update_mptable;
807
808static int __init update_mptable_setup(char *str)
809{
810 enable_update_mptable = 1;
811#ifdef CONFIG_PCI
812 pci_routeirq = 1;
813#endif
814 return 0;
815}
816early_param("update_mptable", update_mptable_setup);
817
818static unsigned long __initdata mpc_new_phys;
819static unsigned long mpc_new_length __initdata = 4096;
820
821/* alloc_mptable or alloc_mptable=4k */
822static int __initdata alloc_mptable;
823static int __init parse_alloc_mptable_opt(char *p)
824{
825 enable_update_mptable = 1;
826#ifdef CONFIG_PCI
827 pci_routeirq = 1;
828#endif
829 alloc_mptable = 1;
830 if (!p)
831 return 0;
832 mpc_new_length = memparse(p, &p);
833 return 0;
834}
835early_param("alloc_mptable", parse_alloc_mptable_opt);
836
837void __init early_reserve_e820_mpc_new(void)
838{
839 if (enable_update_mptable && alloc_mptable) {
840 u64 startt = 0;
841 mpc_new_phys = early_reserve_e820(startt, mpc_new_length, 4);
842 }
843}
844
845static int __init update_mp_table(void)
846{
847 char str[16];
848 char oem[10];
849 struct mpf_intel *mpf;
850 struct mpc_table *mpc, *mpc_new;
851
852 if (!enable_update_mptable)
853 return 0;
854
855 mpf = mpf_found;
856 if (!mpf)
857 return 0;
858
859 /*
860 * Now see if we need to go further.
861 */
862 if (mpf->feature1 != 0)
863 return 0;
864
865 if (!mpf->physptr)
866 return 0;
867
868 mpc = phys_to_virt(mpf->physptr);
869
870 if (!smp_check_mpc(mpc, oem, str))
871 return 0;
872
873 printk(KERN_INFO "mpf: %llx\n", (u64)virt_to_phys(mpf));
874 printk(KERN_INFO "physptr: %x\n", mpf->physptr);
875
876 if (mpc_new_phys && mpc->length > mpc_new_length) {
877 mpc_new_phys = 0;
878 printk(KERN_INFO "mpc_new_length is %ld, please use alloc_mptable=8k\n",
879 mpc_new_length);
880 }
881
882 if (!mpc_new_phys) {
883 unsigned char old, new;
884 /* check if we can change the position */
885 mpc->checksum = 0;
886 old = mpf_checksum((unsigned char *)mpc, mpc->length);
887 mpc->checksum = 0xff;
888 new = mpf_checksum((unsigned char *)mpc, mpc->length);
889 if (old == new) {
890 printk(KERN_INFO "mpc is readonly, please try alloc_mptable instead\n");
891 return 0;
892 }
893 printk(KERN_INFO "use in-position replacing\n");
894 } else {
895 mpf->physptr = mpc_new_phys;
896 mpc_new = phys_to_virt(mpc_new_phys);
897 memcpy(mpc_new, mpc, mpc->length);
898 mpc = mpc_new;
899 /* check if we can modify that */
900 if (mpc_new_phys - mpf->physptr) {
901 struct mpf_intel *mpf_new;
902 /* steal 16 bytes from [0, 1k) */
903 printk(KERN_INFO "mpf new: %x\n", 0x400 - 16);
904 mpf_new = phys_to_virt(0x400 - 16);
905 memcpy(mpf_new, mpf, 16);
906 mpf = mpf_new;
907 mpf->physptr = mpc_new_phys;
908 }
909 mpf->checksum = 0;
910 mpf->checksum -= mpf_checksum((unsigned char *)mpf, 16);
911 printk(KERN_INFO "physptr new: %x\n", mpf->physptr);
912 }
913
914 /*
915 * only replace the one with mp_INT and
916 * MP_IRQ_TRIGGER_LEVEL|MP_IRQ_POLARITY_LOW,
917 * already in mp_irqs , stored by ... and mp_config_acpi_gsi,
918 * may need pci=routeirq for all coverage
919 */
920 replace_intsrc_all(mpc, mpc_new_phys, mpc_new_length);
921
922 return 0;
923}
924
925late_initcall(update_mp_table);