Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 *  acpi.c - Architecture-Specific Low-Level ACPI Support
  4 *
  5 *  Copyright (C) 1999 VA Linux Systems
  6 *  Copyright (C) 1999,2000 Walt Drummond <drummond@valinux.com>
  7 *  Copyright (C) 2000, 2002-2003 Hewlett-Packard Co.
  8 *	David Mosberger-Tang <davidm@hpl.hp.com>
  9 *  Copyright (C) 2000 Intel Corp.
 10 *  Copyright (C) 2000,2001 J.I. Lee <jung-ik.lee@intel.com>
 11 *  Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
 12 *  Copyright (C) 2001 Jenna Hall <jenna.s.hall@intel.com>
 13 *  Copyright (C) 2001 Takayoshi Kochi <t-kochi@bq.jp.nec.com>
 14 *  Copyright (C) 2002 Erich Focht <efocht@ess.nec.de>
 15 *  Copyright (C) 2004 Ashok Raj <ashok.raj@intel.com>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 16 */
 17
 18#include <linux/module.h>
 19#include <linux/init.h>
 20#include <linux/kernel.h>
 21#include <linux/sched.h>
 22#include <linux/smp.h>
 23#include <linux/string.h>
 24#include <linux/types.h>
 25#include <linux/irq.h>
 26#include <linux/acpi.h>
 27#include <linux/efi.h>
 28#include <linux/mmzone.h>
 29#include <linux/nodemask.h>
 30#include <linux/slab.h>
 31#include <acpi/processor.h>
 32#include <asm/io.h>
 33#include <asm/iosapic.h>
 
 34#include <asm/page.h>
 35#include <asm/numa.h>
 36#include <asm/sal.h>
 37#include <asm/cyclone.h>
 38
 39#define PREFIX			"ACPI: "
 40
 41int acpi_lapic;
 42unsigned int acpi_cpei_override;
 43unsigned int acpi_cpei_phys_cpuid;
 44
 45unsigned long acpi_wakeup_address = 0;
 46
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 47#define ACPI_MAX_PLATFORM_INTERRUPTS	256
 48
 49/* Array to record platform interrupt vectors for generic interrupt routing. */
 50int platform_intr_list[ACPI_MAX_PLATFORM_INTERRUPTS] = {
 51	[0 ... ACPI_MAX_PLATFORM_INTERRUPTS - 1] = -1
 52};
 53
 54enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_IOSAPIC;
 55
 56/*
 57 * Interrupt routing API for device drivers.  Provides interrupt vector for
 58 * a generic platform event.  Currently only CPEI is implemented.
 59 */
 60int acpi_request_vector(u32 int_type)
 61{
 62	int vector = -1;
 63
 64	if (int_type < ACPI_MAX_PLATFORM_INTERRUPTS) {
 65		/* corrected platform error interrupt */
 66		vector = platform_intr_list[int_type];
 67	} else
 68		printk(KERN_ERR
 69		       "acpi_request_vector(): invalid interrupt type\n");
 70	return vector;
 71}
 72
 73void __init __iomem *__acpi_map_table(unsigned long phys, unsigned long size)
 74{
 75	return __va(phys);
 76}
 77
 78void __init __acpi_unmap_table(void __iomem *map, unsigned long size)
 79{
 80}
 81
 82/* --------------------------------------------------------------------------
 83                            Boot-time Table Parsing
 84   -------------------------------------------------------------------------- */
 85
 86static int available_cpus __initdata;
 87struct acpi_table_madt *acpi_madt __initdata;
 88static u8 has_8259;
 89
 90static int __init
 91acpi_parse_lapic_addr_ovr(union acpi_subtable_headers * header,
 92			  const unsigned long end)
 93{
 94	struct acpi_madt_local_apic_override *lapic;
 95
 96	lapic = (struct acpi_madt_local_apic_override *)header;
 97
 98	if (BAD_MADT_ENTRY(lapic, end))
 99		return -EINVAL;
100
101	if (lapic->address) {
102		iounmap(ipi_base_addr);
103		ipi_base_addr = ioremap(lapic->address, 0);
104	}
105	return 0;
106}
107
108static int __init
109acpi_parse_lsapic(union acpi_subtable_headers *header, const unsigned long end)
110{
111	struct acpi_madt_local_sapic *lsapic;
112
113	lsapic = (struct acpi_madt_local_sapic *)header;
114
115	/*Skip BAD_MADT_ENTRY check, as lsapic size could vary */
116
117	if (lsapic->lapic_flags & ACPI_MADT_ENABLED) {
118#ifdef CONFIG_SMP
119		smp_boot_data.cpu_phys_id[available_cpus] =
120		    (lsapic->id << 8) | lsapic->eid;
121#endif
122		++available_cpus;
123	}
124
125	total_cpus++;
126	return 0;
127}
128
129static int __init
130acpi_parse_lapic_nmi(union acpi_subtable_headers * header, const unsigned long end)
131{
132	struct acpi_madt_local_apic_nmi *lacpi_nmi;
133
134	lacpi_nmi = (struct acpi_madt_local_apic_nmi *)header;
135
136	if (BAD_MADT_ENTRY(lacpi_nmi, end))
137		return -EINVAL;
138
139	/* TBD: Support lapic_nmi entries */
140	return 0;
141}
142
143static int __init
144acpi_parse_iosapic(union acpi_subtable_headers * header, const unsigned long end)
145{
146	struct acpi_madt_io_sapic *iosapic;
147
148	iosapic = (struct acpi_madt_io_sapic *)header;
149
150	if (BAD_MADT_ENTRY(iosapic, end))
151		return -EINVAL;
152
153	return iosapic_init(iosapic->address, iosapic->global_irq_base);
154}
155
156static unsigned int __initdata acpi_madt_rev;
157
158static int __init
159acpi_parse_plat_int_src(union acpi_subtable_headers * header,
160			const unsigned long end)
161{
162	struct acpi_madt_interrupt_source *plintsrc;
163	int vector;
164
165	plintsrc = (struct acpi_madt_interrupt_source *)header;
166
167	if (BAD_MADT_ENTRY(plintsrc, end))
168		return -EINVAL;
169
170	/*
171	 * Get vector assignment for this interrupt, set attributes,
172	 * and program the IOSAPIC routing table.
173	 */
174	vector = iosapic_register_platform_intr(plintsrc->type,
175						plintsrc->global_irq,
176						plintsrc->io_sapic_vector,
177						plintsrc->eid,
178						plintsrc->id,
179						((plintsrc->inti_flags & ACPI_MADT_POLARITY_MASK) ==
180						 ACPI_MADT_POLARITY_ACTIVE_HIGH) ?
181						IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW,
182						((plintsrc->inti_flags & ACPI_MADT_TRIGGER_MASK) ==
183						 ACPI_MADT_TRIGGER_EDGE) ?
184						IOSAPIC_EDGE : IOSAPIC_LEVEL);
185
186	platform_intr_list[plintsrc->type] = vector;
187	if (acpi_madt_rev > 1) {
188		acpi_cpei_override = plintsrc->flags & ACPI_MADT_CPEI_OVERRIDE;
189	}
190
191	/*
192	 * Save the physical id, so we can check when its being removed
193	 */
194	acpi_cpei_phys_cpuid = ((plintsrc->id << 8) | (plintsrc->eid)) & 0xffff;
195
196	return 0;
197}
198
199#ifdef CONFIG_HOTPLUG_CPU
200unsigned int can_cpei_retarget(void)
201{
202	extern int cpe_vector;
203	extern unsigned int force_cpei_retarget;
204
205	/*
206	 * Only if CPEI is supported and the override flag
207	 * is present, otherwise return that its re-targettable
208	 * if we are in polling mode.
209	 */
210	if (cpe_vector > 0) {
211		if (acpi_cpei_override || force_cpei_retarget)
212			return 1;
213		else
214			return 0;
215	}
216	return 1;
217}
218
219unsigned int is_cpu_cpei_target(unsigned int cpu)
220{
221	unsigned int logical_id;
222
223	logical_id = cpu_logical_id(acpi_cpei_phys_cpuid);
224
225	if (logical_id == cpu)
226		return 1;
227	else
228		return 0;
229}
230
231void set_cpei_target_cpu(unsigned int cpu)
232{
233	acpi_cpei_phys_cpuid = cpu_physical_id(cpu);
234}
235#endif
236
237unsigned int get_cpei_target_cpu(void)
238{
239	return acpi_cpei_phys_cpuid;
240}
241
242static int __init
243acpi_parse_int_src_ovr(union acpi_subtable_headers * header,
244		       const unsigned long end)
245{
246	struct acpi_madt_interrupt_override *p;
247
248	p = (struct acpi_madt_interrupt_override *)header;
249
250	if (BAD_MADT_ENTRY(p, end))
251		return -EINVAL;
252
253	iosapic_override_isa_irq(p->source_irq, p->global_irq,
254				 ((p->inti_flags & ACPI_MADT_POLARITY_MASK) ==
255				  ACPI_MADT_POLARITY_ACTIVE_LOW) ?
256				 IOSAPIC_POL_LOW : IOSAPIC_POL_HIGH,
257				 ((p->inti_flags & ACPI_MADT_TRIGGER_MASK) ==
258				 ACPI_MADT_TRIGGER_LEVEL) ?
259				 IOSAPIC_LEVEL : IOSAPIC_EDGE);
260	return 0;
261}
262
263static int __init
264acpi_parse_nmi_src(union acpi_subtable_headers * header, const unsigned long end)
265{
266	struct acpi_madt_nmi_source *nmi_src;
267
268	nmi_src = (struct acpi_madt_nmi_source *)header;
269
270	if (BAD_MADT_ENTRY(nmi_src, end))
271		return -EINVAL;
272
273	/* TBD: Support nimsrc entries */
274	return 0;
275}
276
277static void __init acpi_madt_oem_check(char *oem_id, char *oem_table_id)
278{
279	if (!strncmp(oem_id, "IBM", 3) && (!strncmp(oem_table_id, "SERMOW", 6))) {
280
281		/*
282		 * Unfortunately ITC_DRIFT is not yet part of the
283		 * official SAL spec, so the ITC_DRIFT bit is not
284		 * set by the BIOS on this hardware.
285		 */
286		sal_platform_features |= IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT;
287
288		cyclone_setup();
289	}
290}
291
292static int __init acpi_parse_madt(struct acpi_table_header *table)
293{
294	acpi_madt = (struct acpi_table_madt *)table;
295
296	acpi_madt_rev = acpi_madt->header.revision;
297
298	/* remember the value for reference after free_initmem() */
299#ifdef CONFIG_ITANIUM
300	has_8259 = 1;		/* Firmware on old Itanium systems is broken */
301#else
302	has_8259 = acpi_madt->flags & ACPI_MADT_PCAT_COMPAT;
303#endif
304	iosapic_system_init(has_8259);
305
306	/* Get base address of IPI Message Block */
307
308	if (acpi_madt->address)
309		ipi_base_addr = ioremap(acpi_madt->address, 0);
310
311	printk(KERN_INFO PREFIX "Local APIC address %p\n", ipi_base_addr);
312
313	acpi_madt_oem_check(acpi_madt->header.oem_id,
314			    acpi_madt->header.oem_table_id);
315
316	return 0;
317}
318
319#ifdef CONFIG_ACPI_NUMA
320
321#undef SLIT_DEBUG
322
323#define PXM_FLAG_LEN ((MAX_PXM_DOMAINS + 1)/32)
324
325static int __initdata srat_num_cpus;	/* number of cpus */
326static u32 pxm_flag[PXM_FLAG_LEN];
327#define pxm_bit_set(bit)	(set_bit(bit,(void *)pxm_flag))
328#define pxm_bit_test(bit)	(test_bit(bit,(void *)pxm_flag))
329static struct acpi_table_slit __initdata *slit_table;
330cpumask_t early_cpu_possible_map = CPU_MASK_NONE;
331
332static int __init
333get_processor_proximity_domain(struct acpi_srat_cpu_affinity *pa)
334{
335	int pxm;
336
337	pxm = pa->proximity_domain_lo;
338	if (acpi_srat_revision >= 2)
339		pxm += pa->proximity_domain_hi[0] << 8;
340	return pxm;
341}
342
343static int __init
344get_memory_proximity_domain(struct acpi_srat_mem_affinity *ma)
345{
346	int pxm;
347
348	pxm = ma->proximity_domain;
349	if (acpi_srat_revision <= 1)
350		pxm &= 0xff;
351
352	return pxm;
353}
354
355/*
356 * ACPI 2.0 SLIT (System Locality Information Table)
357 * http://devresource.hp.com/devresource/Docs/TechPapers/IA64/slit.pdf
358 */
359void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
360{
361	u32 len;
362
363	len = sizeof(struct acpi_table_header) + 8
364	    + slit->locality_count * slit->locality_count;
365	if (slit->header.length != len) {
366		printk(KERN_ERR
367		       "ACPI 2.0 SLIT: size mismatch: %d expected, %d actual\n",
368		       len, slit->header.length);
369		return;
370	}
371	slit_table = slit;
372}
373
374void __init
375acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
376{
377	int pxm;
378
379	if (!(pa->flags & ACPI_SRAT_CPU_ENABLED))
380		return;
381
382	if (srat_num_cpus >= ARRAY_SIZE(node_cpuid)) {
383		printk_once(KERN_WARNING
384			    "node_cpuid[%ld] is too small, may not be able to use all cpus\n",
385			    ARRAY_SIZE(node_cpuid));
386		return;
387	}
388	pxm = get_processor_proximity_domain(pa);
389
390	/* record this node in proximity bitmap */
391	pxm_bit_set(pxm);
392
393	node_cpuid[srat_num_cpus].phys_id =
394	    (pa->apic_id << 8) | (pa->local_sapic_eid);
395	/* nid should be overridden as logical node id later */
396	node_cpuid[srat_num_cpus].nid = pxm;
397	cpumask_set_cpu(srat_num_cpus, &early_cpu_possible_map);
398	srat_num_cpus++;
399}
400
401int __init
402acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
403{
404	unsigned long paddr, size;
405	int pxm;
406	struct node_memblk_s *p, *q, *pend;
407
408	pxm = get_memory_proximity_domain(ma);
409
410	/* fill node memory chunk structure */
411	paddr = ma->base_address;
412	size = ma->length;
413
414	/* Ignore disabled entries */
415	if (!(ma->flags & ACPI_SRAT_MEM_ENABLED))
416		return -1;
417
418	if (num_node_memblks >= NR_NODE_MEMBLKS) {
419		pr_err("NUMA: too many memblk ranges\n");
420		return -EINVAL;
421	}
422
423	/* record this node in proximity bitmap */
424	pxm_bit_set(pxm);
425
426	/* Insertion sort based on base address */
427	pend = &node_memblk[num_node_memblks];
428	for (p = &node_memblk[0]; p < pend; p++) {
429		if (paddr < p->start_paddr)
430			break;
431	}
432	if (p < pend) {
433		for (q = pend - 1; q >= p; q--)
434			*(q + 1) = *q;
435	}
436	p->start_paddr = paddr;
437	p->size = size;
438	p->nid = pxm;
439	num_node_memblks++;
440	return 0;
441}
442
443void __init acpi_numa_fixup(void)
444{
445	int i, j, node_from, node_to;
446
447	/* If there's no SRAT, fix the phys_id and mark node 0 online */
448	if (srat_num_cpus == 0) {
449		node_set_online(0);
450		node_cpuid[0].phys_id = hard_smp_processor_id();
451		return;
452	}
453
454	/*
455	 * MCD - This can probably be dropped now.  No need for pxm ID to node ID
456	 * mapping with sparse node numbering iff MAX_PXM_DOMAINS <= MAX_NUMNODES.
457	 */
458	nodes_clear(node_online_map);
459	for (i = 0; i < MAX_PXM_DOMAINS; i++) {
460		if (pxm_bit_test(i)) {
461			int nid = acpi_map_pxm_to_node(i);
462			node_set_online(nid);
463		}
464	}
465
466	/* set logical node id in memory chunk structure */
467	for (i = 0; i < num_node_memblks; i++)
468		node_memblk[i].nid = pxm_to_node(node_memblk[i].nid);
469
470	/* assign memory bank numbers for each chunk on each node */
471	for_each_online_node(i) {
472		int bank;
473
474		bank = 0;
475		for (j = 0; j < num_node_memblks; j++)
476			if (node_memblk[j].nid == i)
477				node_memblk[j].bank = bank++;
478	}
479
480	/* set logical node id in cpu structure */
481	for_each_possible_early_cpu(i)
482		node_cpuid[i].nid = pxm_to_node(node_cpuid[i].nid);
483
484	printk(KERN_INFO "Number of logical nodes in system = %d\n",
485	       num_online_nodes());
486	printk(KERN_INFO "Number of memory chunks in system = %d\n",
487	       num_node_memblks);
488
489	if (!slit_table) {
490		for (i = 0; i < MAX_NUMNODES; i++)
491			for (j = 0; j < MAX_NUMNODES; j++)
492				slit_distance(i, j) = i == j ?
493					LOCAL_DISTANCE : REMOTE_DISTANCE;
494		return;
495	}
496
497	memset(numa_slit, -1, sizeof(numa_slit));
498	for (i = 0; i < slit_table->locality_count; i++) {
499		if (!pxm_bit_test(i))
500			continue;
501		node_from = pxm_to_node(i);
502		for (j = 0; j < slit_table->locality_count; j++) {
503			if (!pxm_bit_test(j))
504				continue;
505			node_to = pxm_to_node(j);
506			slit_distance(node_from, node_to) =
507			    slit_table->entry[i * slit_table->locality_count + j];
508		}
509	}
510
511#ifdef SLIT_DEBUG
512	printk("ACPI 2.0 SLIT locality table:\n");
513	for_each_online_node(i) {
514		for_each_online_node(j)
515		    printk("%03d ", node_distance(i, j));
516		printk("\n");
517	}
518#endif
519}
520#endif				/* CONFIG_ACPI_NUMA */
521
522/*
523 * success: return IRQ number (>=0)
524 * failure: return < 0
525 */
526int acpi_register_gsi(struct device *dev, u32 gsi, int triggering, int polarity)
527{
528	if (acpi_irq_model == ACPI_IRQ_MODEL_PLATFORM)
529		return gsi;
530
531	if (has_8259 && gsi < 16)
532		return isa_irq_to_vector(gsi);
533
534	return iosapic_register_intr(gsi,
535				     (polarity ==
536				      ACPI_ACTIVE_HIGH) ? IOSAPIC_POL_HIGH :
537				     IOSAPIC_POL_LOW,
538				     (triggering ==
539				      ACPI_EDGE_SENSITIVE) ? IOSAPIC_EDGE :
540				     IOSAPIC_LEVEL);
541}
542EXPORT_SYMBOL_GPL(acpi_register_gsi);
543
544void acpi_unregister_gsi(u32 gsi)
545{
546	if (acpi_irq_model == ACPI_IRQ_MODEL_PLATFORM)
547		return;
548
549	if (has_8259 && gsi < 16)
550		return;
551
552	iosapic_unregister_intr(gsi);
553}
554EXPORT_SYMBOL_GPL(acpi_unregister_gsi);
555
556static int __init acpi_parse_fadt(struct acpi_table_header *table)
557{
558	struct acpi_table_header *fadt_header;
559	struct acpi_table_fadt *fadt;
560
561	fadt_header = (struct acpi_table_header *)table;
562	if (fadt_header->revision != 3)
563		return -ENODEV;	/* Only deal with ACPI 2.0 FADT */
564
565	fadt = (struct acpi_table_fadt *)fadt_header;
566
567	acpi_register_gsi(NULL, fadt->sci_interrupt, ACPI_LEVEL_SENSITIVE,
568				 ACPI_ACTIVE_LOW);
569	return 0;
570}
571
572int __init early_acpi_boot_init(void)
573{
574	int ret;
575
576	/*
577	 * do a partial walk of MADT to determine how many CPUs
578	 * we have including offline CPUs
579	 */
580	if (acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt)) {
581		printk(KERN_ERR PREFIX "Can't find MADT\n");
582		return 0;
583	}
584
585	ret = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_SAPIC,
586		acpi_parse_lsapic, NR_CPUS);
587	if (ret < 1)
588		printk(KERN_ERR PREFIX
589		       "Error parsing MADT - no LAPIC entries\n");
590	else
591		acpi_lapic = 1;
592
593#ifdef CONFIG_SMP
594	if (available_cpus == 0) {
595		printk(KERN_INFO "ACPI: Found 0 CPUS; assuming 1\n");
596		printk(KERN_INFO "CPU 0 (0x%04x)", hard_smp_processor_id());
597		smp_boot_data.cpu_phys_id[available_cpus] =
598		    hard_smp_processor_id();
599		available_cpus = 1;	/* We've got at least one of these, no? */
600	}
601	smp_boot_data.cpu_count = available_cpus;
602#endif
603	/* Make boot-up look pretty */
604	printk(KERN_INFO "%d CPUs available, %d CPUs total\n", available_cpus,
605	       total_cpus);
606
607	return 0;
608}
609
610int __init acpi_boot_init(void)
611{
612
613	/*
614	 * MADT
615	 * ----
616	 * Parse the Multiple APIC Description Table (MADT), if exists.
617	 * Note that this table provides platform SMP configuration
618	 * information -- the successor to MPS tables.
619	 */
620
621	if (acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt)) {
622		printk(KERN_ERR PREFIX "Can't find MADT\n");
623		goto skip_madt;
624	}
625
626	/* Local APIC */
627
628	if (acpi_table_parse_madt
629	    (ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE, acpi_parse_lapic_addr_ovr, 0) < 0)
630		printk(KERN_ERR PREFIX
631		       "Error parsing LAPIC address override entry\n");
632
633	if (acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_NMI, acpi_parse_lapic_nmi, 0)
634	    < 0)
635		printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n");
636
637	/* I/O APIC */
638
639	if (acpi_table_parse_madt
640	    (ACPI_MADT_TYPE_IO_SAPIC, acpi_parse_iosapic, NR_IOSAPICS) < 1) {
641		printk(KERN_ERR PREFIX
642		       "Error parsing MADT - no IOSAPIC entries\n");
 
643	}
644
645	/* System-Level Interrupt Routing */
646
647	if (acpi_table_parse_madt
648	    (ACPI_MADT_TYPE_INTERRUPT_SOURCE, acpi_parse_plat_int_src,
649	     ACPI_MAX_PLATFORM_INTERRUPTS) < 0)
650		printk(KERN_ERR PREFIX
651		       "Error parsing platform interrupt source entry\n");
652
653	if (acpi_table_parse_madt
654	    (ACPI_MADT_TYPE_INTERRUPT_OVERRIDE, acpi_parse_int_src_ovr, 0) < 0)
655		printk(KERN_ERR PREFIX
656		       "Error parsing interrupt source overrides entry\n");
657
658	if (acpi_table_parse_madt(ACPI_MADT_TYPE_NMI_SOURCE, acpi_parse_nmi_src, 0) < 0)
659		printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n");
660      skip_madt:
661
662	/*
663	 * FADT says whether a legacy keyboard controller is present.
664	 * The FADT also contains an SCI_INT line, by which the system
665	 * gets interrupts such as power and sleep buttons.  If it's not
666	 * on a Legacy interrupt, it needs to be setup.
667	 */
668	if (acpi_table_parse(ACPI_SIG_FADT, acpi_parse_fadt))
669		printk(KERN_ERR PREFIX "Can't find FADT\n");
670
671#ifdef CONFIG_ACPI_NUMA
672#ifdef CONFIG_SMP
673	if (srat_num_cpus == 0) {
674		int cpu, i = 1;
675		for (cpu = 0; cpu < smp_boot_data.cpu_count; cpu++)
676			if (smp_boot_data.cpu_phys_id[cpu] !=
677			    hard_smp_processor_id())
678				node_cpuid[i++].phys_id =
679				    smp_boot_data.cpu_phys_id[cpu];
680	}
681#endif
682	build_cpu_to_node_map();
683#endif
684	return 0;
685}
686
687int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
688{
689	int tmp;
690
691	if (has_8259 && gsi < 16)
692		*irq = isa_irq_to_vector(gsi);
693	else {
694		tmp = gsi_to_irq(gsi);
695		if (tmp == -1)
696			return -1;
697		*irq = tmp;
698	}
699	return 0;
700}
701
702int acpi_isa_irq_to_gsi(unsigned isa_irq, u32 *gsi)
703{
704	if (isa_irq >= 16)
705		return -1;
706	*gsi = isa_irq;
707	return 0;
708}
709
710/*
711 *  ACPI based hotplug CPU support
712 */
713#ifdef CONFIG_ACPI_HOTPLUG_CPU
714int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
715{
716#ifdef CONFIG_ACPI_NUMA
717	/*
718	 * We don't have cpu-only-node hotadd. But if the system equips
719	 * SRAT table, pxm is already found and node is ready.
720  	 * So, just pxm_to_nid(pxm) is OK.
721	 * This code here is for the system which doesn't have full SRAT
722  	 * table for possible cpus.
723	 */
724	node_cpuid[cpu].phys_id = physid;
725	node_cpuid[cpu].nid = acpi_get_node(handle);
726#endif
727	return 0;
728}
729
730int additional_cpus __initdata = -1;
731
732static __init int setup_additional_cpus(char *s)
733{
734	if (s)
735		additional_cpus = simple_strtol(s, NULL, 0);
736
737	return 0;
738}
739
740early_param("additional_cpus", setup_additional_cpus);
741
742/*
743 * cpu_possible_mask should be static, it cannot change as CPUs
744 * are onlined, or offlined. The reason is per-cpu data-structures
745 * are allocated by some modules at init time, and dont expect to
746 * do this dynamically on cpu arrival/departure.
747 * cpu_present_mask on the other hand can change dynamically.
748 * In case when cpu_hotplug is not compiled, then we resort to current
749 * behaviour, which is cpu_possible == cpu_present.
750 * - Ashok Raj
751 *
752 * Three ways to find out the number of additional hotplug CPUs:
753 * - If the BIOS specified disabled CPUs in ACPI/mptables use that.
754 * - The user can overwrite it with additional_cpus=NUM
755 * - Otherwise don't reserve additional CPUs.
756 */
757__init void prefill_possible_map(void)
758{
759	int i;
760	int possible, disabled_cpus;
761
762	disabled_cpus = total_cpus - available_cpus;
763
764 	if (additional_cpus == -1) {
765 		if (disabled_cpus > 0)
766			additional_cpus = disabled_cpus;
767 		else
768			additional_cpus = 0;
769 	}
770
771	possible = available_cpus + additional_cpus;
772
773	if (possible > nr_cpu_ids)
774		possible = nr_cpu_ids;
775
776	printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n",
777		possible, max((possible - available_cpus), 0));
778
779	for (i = 0; i < possible; i++)
780		set_cpu_possible(i, true);
781}
782
783static int _acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu)
784{
785	cpumask_t tmp_map;
786	int cpu;
787
788	cpumask_complement(&tmp_map, cpu_present_mask);
789	cpu = cpumask_first(&tmp_map);
790	if (cpu >= nr_cpu_ids)
791		return -EINVAL;
792
793	acpi_map_cpu2node(handle, cpu, physid);
794
795	set_cpu_present(cpu, true);
796	ia64_cpu_to_sapicid[cpu] = physid;
797
798	acpi_processor_set_pdc(handle);
799
800	*pcpu = cpu;
801	return (0);
802}
803
804/* wrapper to silence section mismatch warning */
805int __ref acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, u32 acpi_id,
806		       int *pcpu)
807{
808	return _acpi_map_lsapic(handle, physid, pcpu);
809}
810EXPORT_SYMBOL(acpi_map_cpu);
811
812int acpi_unmap_cpu(int cpu)
813{
814	ia64_cpu_to_sapicid[cpu] = -1;
815	set_cpu_present(cpu, false);
816
817#ifdef CONFIG_ACPI_NUMA
818	/* NUMA specific cleanup's */
819#endif
820
821	return (0);
822}
823EXPORT_SYMBOL(acpi_unmap_cpu);
824#endif				/* CONFIG_ACPI_HOTPLUG_CPU */
825
826#ifdef CONFIG_ACPI_NUMA
827static acpi_status acpi_map_iosapic(acpi_handle handle, u32 depth,
828				    void *context, void **ret)
829{
830	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
831	union acpi_object *obj;
832	struct acpi_madt_io_sapic *iosapic;
833	unsigned int gsi_base;
834	int node;
835
836	/* Only care about objects w/ a method that returns the MADT */
837	if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
838		return AE_OK;
839
840	if (!buffer.length || !buffer.pointer)
841		return AE_OK;
842
843	obj = buffer.pointer;
844	if (obj->type != ACPI_TYPE_BUFFER ||
845	    obj->buffer.length < sizeof(*iosapic)) {
846		kfree(buffer.pointer);
847		return AE_OK;
848	}
849
850	iosapic = (struct acpi_madt_io_sapic *)obj->buffer.pointer;
851
852	if (iosapic->header.type != ACPI_MADT_TYPE_IO_SAPIC) {
853		kfree(buffer.pointer);
854		return AE_OK;
855	}
856
857	gsi_base = iosapic->global_irq_base;
858
859	kfree(buffer.pointer);
860
861	/* OK, it's an IOSAPIC MADT entry; associate it with a node */
862	node = acpi_get_node(handle);
863	if (node == NUMA_NO_NODE || !node_online(node) ||
864	    cpumask_empty(cpumask_of_node(node)))
865		return AE_OK;
866
867	/* We know a gsi to node mapping! */
868	map_iosapic_to_node(gsi_base, node);
869	return AE_OK;
870}
871
872static int __init
873acpi_map_iosapics (void)
874{
875	acpi_get_devices(NULL, acpi_map_iosapic, NULL, NULL);
876	return 0;
877}
878
879fs_initcall(acpi_map_iosapics);
880#endif				/* CONFIG_ACPI_NUMA */
881
882int __ref acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base)
883{
884	int err;
885
886	if ((err = iosapic_init(phys_addr, gsi_base)))
887		return err;
888
889#ifdef CONFIG_ACPI_NUMA
890	acpi_map_iosapic(handle, 0, NULL, NULL);
891#endif				/* CONFIG_ACPI_NUMA */
892
893	return 0;
894}
895
896EXPORT_SYMBOL(acpi_register_ioapic);
897
898int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base)
899{
900	return iosapic_remove(gsi_base);
901}
902
903EXPORT_SYMBOL(acpi_unregister_ioapic);
904
905/*
906 * acpi_suspend_lowlevel() - save kernel state and suspend.
907 *
908 * TBD when when IA64 starts to support suspend...
909 */
910int acpi_suspend_lowlevel(void) { return 0; }
v4.6
 
  1/*
  2 *  acpi.c - Architecture-Specific Low-Level ACPI Support
  3 *
  4 *  Copyright (C) 1999 VA Linux Systems
  5 *  Copyright (C) 1999,2000 Walt Drummond <drummond@valinux.com>
  6 *  Copyright (C) 2000, 2002-2003 Hewlett-Packard Co.
  7 *	David Mosberger-Tang <davidm@hpl.hp.com>
  8 *  Copyright (C) 2000 Intel Corp.
  9 *  Copyright (C) 2000,2001 J.I. Lee <jung-ik.lee@intel.com>
 10 *  Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
 11 *  Copyright (C) 2001 Jenna Hall <jenna.s.hall@intel.com>
 12 *  Copyright (C) 2001 Takayoshi Kochi <t-kochi@bq.jp.nec.com>
 13 *  Copyright (C) 2002 Erich Focht <efocht@ess.nec.de>
 14 *  Copyright (C) 2004 Ashok Raj <ashok.raj@intel.com>
 15 *
 16 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 17 *
 18 *  This program is free software; you can redistribute it and/or modify
 19 *  it under the terms of the GNU General Public License as published by
 20 *  the Free Software Foundation; either version 2 of the License, or
 21 *  (at your option) any later version.
 22 *
 23 *  This program is distributed in the hope that it will be useful,
 24 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
 25 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 26 *  GNU General Public License for more details.
 27 *
 28 *  You should have received a copy of the GNU General Public License
 29 *  along with this program; if not, write to the Free Software
 30 *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
 31 *
 32 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 33 */
 34
 35#include <linux/module.h>
 36#include <linux/init.h>
 37#include <linux/kernel.h>
 38#include <linux/sched.h>
 39#include <linux/smp.h>
 40#include <linux/string.h>
 41#include <linux/types.h>
 42#include <linux/irq.h>
 43#include <linux/acpi.h>
 44#include <linux/efi.h>
 45#include <linux/mmzone.h>
 46#include <linux/nodemask.h>
 47#include <linux/slab.h>
 48#include <acpi/processor.h>
 49#include <asm/io.h>
 50#include <asm/iosapic.h>
 51#include <asm/machvec.h>
 52#include <asm/page.h>
 53#include <asm/numa.h>
 54#include <asm/sal.h>
 55#include <asm/cyclone.h>
 56
 57#define PREFIX			"ACPI: "
 58
 59int acpi_lapic;
 60unsigned int acpi_cpei_override;
 61unsigned int acpi_cpei_phys_cpuid;
 62
 63unsigned long acpi_wakeup_address = 0;
 64
 65#ifdef CONFIG_IA64_GENERIC
 66static unsigned long __init acpi_find_rsdp(void)
 67{
 68	unsigned long rsdp_phys = 0;
 69
 70	if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
 71		rsdp_phys = efi.acpi20;
 72	else if (efi.acpi != EFI_INVALID_TABLE_ADDR)
 73		printk(KERN_WARNING PREFIX
 74		       "v1.0/r0.71 tables no longer supported\n");
 75	return rsdp_phys;
 76}
 77
 78const char __init *
 79acpi_get_sysname(void)
 80{
 81	unsigned long rsdp_phys;
 82	struct acpi_table_rsdp *rsdp;
 83	struct acpi_table_xsdt *xsdt;
 84	struct acpi_table_header *hdr;
 85#ifdef CONFIG_INTEL_IOMMU
 86	u64 i, nentries;
 87#endif
 88
 89	rsdp_phys = acpi_find_rsdp();
 90	if (!rsdp_phys) {
 91		printk(KERN_ERR
 92		       "ACPI 2.0 RSDP not found, default to \"dig\"\n");
 93		return "dig";
 94	}
 95
 96	rsdp = (struct acpi_table_rsdp *)__va(rsdp_phys);
 97	if (strncmp(rsdp->signature, ACPI_SIG_RSDP, sizeof(ACPI_SIG_RSDP) - 1)) {
 98		printk(KERN_ERR
 99		       "ACPI 2.0 RSDP signature incorrect, default to \"dig\"\n");
100		return "dig";
101	}
102
103	xsdt = (struct acpi_table_xsdt *)__va(rsdp->xsdt_physical_address);
104	hdr = &xsdt->header;
105	if (strncmp(hdr->signature, ACPI_SIG_XSDT, sizeof(ACPI_SIG_XSDT) - 1)) {
106		printk(KERN_ERR
107		       "ACPI 2.0 XSDT signature incorrect, default to \"dig\"\n");
108		return "dig";
109	}
110
111	if (!strcmp(hdr->oem_id, "HP")) {
112		return "hpzx1";
113	} else if (!strcmp(hdr->oem_id, "SGI")) {
114		if (!strcmp(hdr->oem_table_id + 4, "UV"))
115			return "uv";
116		else
117			return "sn2";
118	}
119
120#ifdef CONFIG_INTEL_IOMMU
121	/* Look for Intel IOMMU */
122	nentries = (hdr->length - sizeof(*hdr)) /
123			 sizeof(xsdt->table_offset_entry[0]);
124	for (i = 0; i < nentries; i++) {
125		hdr = __va(xsdt->table_offset_entry[i]);
126		if (strncmp(hdr->signature, ACPI_SIG_DMAR,
127			sizeof(ACPI_SIG_DMAR) - 1) == 0)
128			return "dig_vtd";
129	}
130#endif
131
132	return "dig";
133}
134#endif /* CONFIG_IA64_GENERIC */
135
136#define ACPI_MAX_PLATFORM_INTERRUPTS	256
137
138/* Array to record platform interrupt vectors for generic interrupt routing. */
139int platform_intr_list[ACPI_MAX_PLATFORM_INTERRUPTS] = {
140	[0 ... ACPI_MAX_PLATFORM_INTERRUPTS - 1] = -1
141};
142
143enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_IOSAPIC;
144
145/*
146 * Interrupt routing API for device drivers.  Provides interrupt vector for
147 * a generic platform event.  Currently only CPEI is implemented.
148 */
149int acpi_request_vector(u32 int_type)
150{
151	int vector = -1;
152
153	if (int_type < ACPI_MAX_PLATFORM_INTERRUPTS) {
154		/* corrected platform error interrupt */
155		vector = platform_intr_list[int_type];
156	} else
157		printk(KERN_ERR
158		       "acpi_request_vector(): invalid interrupt type\n");
159	return vector;
160}
161
162char *__init __acpi_map_table(unsigned long phys_addr, unsigned long size)
163{
164	return __va(phys_addr);
165}
166
167void __init __acpi_unmap_table(char *map, unsigned long size)
168{
169}
170
171/* --------------------------------------------------------------------------
172                            Boot-time Table Parsing
173   -------------------------------------------------------------------------- */
174
175static int available_cpus __initdata;
176struct acpi_table_madt *acpi_madt __initdata;
177static u8 has_8259;
178
179static int __init
180acpi_parse_lapic_addr_ovr(struct acpi_subtable_header * header,
181			  const unsigned long end)
182{
183	struct acpi_madt_local_apic_override *lapic;
184
185	lapic = (struct acpi_madt_local_apic_override *)header;
186
187	if (BAD_MADT_ENTRY(lapic, end))
188		return -EINVAL;
189
190	if (lapic->address) {
191		iounmap(ipi_base_addr);
192		ipi_base_addr = ioremap(lapic->address, 0);
193	}
194	return 0;
195}
196
197static int __init
198acpi_parse_lsapic(struct acpi_subtable_header * header, const unsigned long end)
199{
200	struct acpi_madt_local_sapic *lsapic;
201
202	lsapic = (struct acpi_madt_local_sapic *)header;
203
204	/*Skip BAD_MADT_ENTRY check, as lsapic size could vary */
205
206	if (lsapic->lapic_flags & ACPI_MADT_ENABLED) {
207#ifdef CONFIG_SMP
208		smp_boot_data.cpu_phys_id[available_cpus] =
209		    (lsapic->id << 8) | lsapic->eid;
210#endif
211		++available_cpus;
212	}
213
214	total_cpus++;
215	return 0;
216}
217
218static int __init
219acpi_parse_lapic_nmi(struct acpi_subtable_header * header, const unsigned long end)
220{
221	struct acpi_madt_local_apic_nmi *lacpi_nmi;
222
223	lacpi_nmi = (struct acpi_madt_local_apic_nmi *)header;
224
225	if (BAD_MADT_ENTRY(lacpi_nmi, end))
226		return -EINVAL;
227
228	/* TBD: Support lapic_nmi entries */
229	return 0;
230}
231
232static int __init
233acpi_parse_iosapic(struct acpi_subtable_header * header, const unsigned long end)
234{
235	struct acpi_madt_io_sapic *iosapic;
236
237	iosapic = (struct acpi_madt_io_sapic *)header;
238
239	if (BAD_MADT_ENTRY(iosapic, end))
240		return -EINVAL;
241
242	return iosapic_init(iosapic->address, iosapic->global_irq_base);
243}
244
245static unsigned int __initdata acpi_madt_rev;
246
247static int __init
248acpi_parse_plat_int_src(struct acpi_subtable_header * header,
249			const unsigned long end)
250{
251	struct acpi_madt_interrupt_source *plintsrc;
252	int vector;
253
254	plintsrc = (struct acpi_madt_interrupt_source *)header;
255
256	if (BAD_MADT_ENTRY(plintsrc, end))
257		return -EINVAL;
258
259	/*
260	 * Get vector assignment for this interrupt, set attributes,
261	 * and program the IOSAPIC routing table.
262	 */
263	vector = iosapic_register_platform_intr(plintsrc->type,
264						plintsrc->global_irq,
265						plintsrc->io_sapic_vector,
266						plintsrc->eid,
267						plintsrc->id,
268						((plintsrc->inti_flags & ACPI_MADT_POLARITY_MASK) ==
269						 ACPI_MADT_POLARITY_ACTIVE_HIGH) ?
270						IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW,
271						((plintsrc->inti_flags & ACPI_MADT_TRIGGER_MASK) ==
272						 ACPI_MADT_TRIGGER_EDGE) ?
273						IOSAPIC_EDGE : IOSAPIC_LEVEL);
274
275	platform_intr_list[plintsrc->type] = vector;
276	if (acpi_madt_rev > 1) {
277		acpi_cpei_override = plintsrc->flags & ACPI_MADT_CPEI_OVERRIDE;
278	}
279
280	/*
281	 * Save the physical id, so we can check when its being removed
282	 */
283	acpi_cpei_phys_cpuid = ((plintsrc->id << 8) | (plintsrc->eid)) & 0xffff;
284
285	return 0;
286}
287
288#ifdef CONFIG_HOTPLUG_CPU
289unsigned int can_cpei_retarget(void)
290{
291	extern int cpe_vector;
292	extern unsigned int force_cpei_retarget;
293
294	/*
295	 * Only if CPEI is supported and the override flag
296	 * is present, otherwise return that its re-targettable
297	 * if we are in polling mode.
298	 */
299	if (cpe_vector > 0) {
300		if (acpi_cpei_override || force_cpei_retarget)
301			return 1;
302		else
303			return 0;
304	}
305	return 1;
306}
307
308unsigned int is_cpu_cpei_target(unsigned int cpu)
309{
310	unsigned int logical_id;
311
312	logical_id = cpu_logical_id(acpi_cpei_phys_cpuid);
313
314	if (logical_id == cpu)
315		return 1;
316	else
317		return 0;
318}
319
320void set_cpei_target_cpu(unsigned int cpu)
321{
322	acpi_cpei_phys_cpuid = cpu_physical_id(cpu);
323}
324#endif
325
326unsigned int get_cpei_target_cpu(void)
327{
328	return acpi_cpei_phys_cpuid;
329}
330
331static int __init
332acpi_parse_int_src_ovr(struct acpi_subtable_header * header,
333		       const unsigned long end)
334{
335	struct acpi_madt_interrupt_override *p;
336
337	p = (struct acpi_madt_interrupt_override *)header;
338
339	if (BAD_MADT_ENTRY(p, end))
340		return -EINVAL;
341
342	iosapic_override_isa_irq(p->source_irq, p->global_irq,
343				 ((p->inti_flags & ACPI_MADT_POLARITY_MASK) ==
344				  ACPI_MADT_POLARITY_ACTIVE_LOW) ?
345				 IOSAPIC_POL_LOW : IOSAPIC_POL_HIGH,
346				 ((p->inti_flags & ACPI_MADT_TRIGGER_MASK) ==
347				 ACPI_MADT_TRIGGER_LEVEL) ?
348				 IOSAPIC_LEVEL : IOSAPIC_EDGE);
349	return 0;
350}
351
352static int __init
353acpi_parse_nmi_src(struct acpi_subtable_header * header, const unsigned long end)
354{
355	struct acpi_madt_nmi_source *nmi_src;
356
357	nmi_src = (struct acpi_madt_nmi_source *)header;
358
359	if (BAD_MADT_ENTRY(nmi_src, end))
360		return -EINVAL;
361
362	/* TBD: Support nimsrc entries */
363	return 0;
364}
365
366static void __init acpi_madt_oem_check(char *oem_id, char *oem_table_id)
367{
368	if (!strncmp(oem_id, "IBM", 3) && (!strncmp(oem_table_id, "SERMOW", 6))) {
369
370		/*
371		 * Unfortunately ITC_DRIFT is not yet part of the
372		 * official SAL spec, so the ITC_DRIFT bit is not
373		 * set by the BIOS on this hardware.
374		 */
375		sal_platform_features |= IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT;
376
377		cyclone_setup();
378	}
379}
380
381static int __init acpi_parse_madt(struct acpi_table_header *table)
382{
383	acpi_madt = (struct acpi_table_madt *)table;
384
385	acpi_madt_rev = acpi_madt->header.revision;
386
387	/* remember the value for reference after free_initmem() */
388#ifdef CONFIG_ITANIUM
389	has_8259 = 1;		/* Firmware on old Itanium systems is broken */
390#else
391	has_8259 = acpi_madt->flags & ACPI_MADT_PCAT_COMPAT;
392#endif
393	iosapic_system_init(has_8259);
394
395	/* Get base address of IPI Message Block */
396
397	if (acpi_madt->address)
398		ipi_base_addr = ioremap(acpi_madt->address, 0);
399
400	printk(KERN_INFO PREFIX "Local APIC address %p\n", ipi_base_addr);
401
402	acpi_madt_oem_check(acpi_madt->header.oem_id,
403			    acpi_madt->header.oem_table_id);
404
405	return 0;
406}
407
408#ifdef CONFIG_ACPI_NUMA
409
410#undef SLIT_DEBUG
411
412#define PXM_FLAG_LEN ((MAX_PXM_DOMAINS + 1)/32)
413
414static int __initdata srat_num_cpus;	/* number of cpus */
415static u32 pxm_flag[PXM_FLAG_LEN];
416#define pxm_bit_set(bit)	(set_bit(bit,(void *)pxm_flag))
417#define pxm_bit_test(bit)	(test_bit(bit,(void *)pxm_flag))
418static struct acpi_table_slit __initdata *slit_table;
419cpumask_t early_cpu_possible_map = CPU_MASK_NONE;
420
421static int __init
422get_processor_proximity_domain(struct acpi_srat_cpu_affinity *pa)
423{
424	int pxm;
425
426	pxm = pa->proximity_domain_lo;
427	if (ia64_platform_is("sn2") || acpi_srat_revision >= 2)
428		pxm += pa->proximity_domain_hi[0] << 8;
429	return pxm;
430}
431
432static int __init
433get_memory_proximity_domain(struct acpi_srat_mem_affinity *ma)
434{
435	int pxm;
436
437	pxm = ma->proximity_domain;
438	if (!ia64_platform_is("sn2") && acpi_srat_revision <= 1)
439		pxm &= 0xff;
440
441	return pxm;
442}
443
444/*
445 * ACPI 2.0 SLIT (System Locality Information Table)
446 * http://devresource.hp.com/devresource/Docs/TechPapers/IA64/slit.pdf
447 */
448void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
449{
450	u32 len;
451
452	len = sizeof(struct acpi_table_header) + 8
453	    + slit->locality_count * slit->locality_count;
454	if (slit->header.length != len) {
455		printk(KERN_ERR
456		       "ACPI 2.0 SLIT: size mismatch: %d expected, %d actual\n",
457		       len, slit->header.length);
458		return;
459	}
460	slit_table = slit;
461}
462
463void __init
464acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
465{
466	int pxm;
467
468	if (!(pa->flags & ACPI_SRAT_CPU_ENABLED))
469		return;
470
471	if (srat_num_cpus >= ARRAY_SIZE(node_cpuid)) {
472		printk_once(KERN_WARNING
473			    "node_cpuid[%ld] is too small, may not be able to use all cpus\n",
474			    ARRAY_SIZE(node_cpuid));
475		return;
476	}
477	pxm = get_processor_proximity_domain(pa);
478
479	/* record this node in proximity bitmap */
480	pxm_bit_set(pxm);
481
482	node_cpuid[srat_num_cpus].phys_id =
483	    (pa->apic_id << 8) | (pa->local_sapic_eid);
484	/* nid should be overridden as logical node id later */
485	node_cpuid[srat_num_cpus].nid = pxm;
486	cpumask_set_cpu(srat_num_cpus, &early_cpu_possible_map);
487	srat_num_cpus++;
488}
489
490int __init
491acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
492{
493	unsigned long paddr, size;
494	int pxm;
495	struct node_memblk_s *p, *q, *pend;
496
497	pxm = get_memory_proximity_domain(ma);
498
499	/* fill node memory chunk structure */
500	paddr = ma->base_address;
501	size = ma->length;
502
503	/* Ignore disabled entries */
504	if (!(ma->flags & ACPI_SRAT_MEM_ENABLED))
505		return -1;
506
 
 
 
 
 
507	/* record this node in proximity bitmap */
508	pxm_bit_set(pxm);
509
510	/* Insertion sort based on base address */
511	pend = &node_memblk[num_node_memblks];
512	for (p = &node_memblk[0]; p < pend; p++) {
513		if (paddr < p->start_paddr)
514			break;
515	}
516	if (p < pend) {
517		for (q = pend - 1; q >= p; q--)
518			*(q + 1) = *q;
519	}
520	p->start_paddr = paddr;
521	p->size = size;
522	p->nid = pxm;
523	num_node_memblks++;
524	return 0;
525}
526
527void __init acpi_numa_arch_fixup(void)
528{
529	int i, j, node_from, node_to;
530
531	/* If there's no SRAT, fix the phys_id and mark node 0 online */
532	if (srat_num_cpus == 0) {
533		node_set_online(0);
534		node_cpuid[0].phys_id = hard_smp_processor_id();
535		return;
536	}
537
538	/*
539	 * MCD - This can probably be dropped now.  No need for pxm ID to node ID
540	 * mapping with sparse node numbering iff MAX_PXM_DOMAINS <= MAX_NUMNODES.
541	 */
542	nodes_clear(node_online_map);
543	for (i = 0; i < MAX_PXM_DOMAINS; i++) {
544		if (pxm_bit_test(i)) {
545			int nid = acpi_map_pxm_to_node(i);
546			node_set_online(nid);
547		}
548	}
549
550	/* set logical node id in memory chunk structure */
551	for (i = 0; i < num_node_memblks; i++)
552		node_memblk[i].nid = pxm_to_node(node_memblk[i].nid);
553
554	/* assign memory bank numbers for each chunk on each node */
555	for_each_online_node(i) {
556		int bank;
557
558		bank = 0;
559		for (j = 0; j < num_node_memblks; j++)
560			if (node_memblk[j].nid == i)
561				node_memblk[j].bank = bank++;
562	}
563
564	/* set logical node id in cpu structure */
565	for_each_possible_early_cpu(i)
566		node_cpuid[i].nid = pxm_to_node(node_cpuid[i].nid);
567
568	printk(KERN_INFO "Number of logical nodes in system = %d\n",
569	       num_online_nodes());
570	printk(KERN_INFO "Number of memory chunks in system = %d\n",
571	       num_node_memblks);
572
573	if (!slit_table) {
574		for (i = 0; i < MAX_NUMNODES; i++)
575			for (j = 0; j < MAX_NUMNODES; j++)
576				node_distance(i, j) = i == j ? LOCAL_DISTANCE :
577							REMOTE_DISTANCE;
578		return;
579	}
580
581	memset(numa_slit, -1, sizeof(numa_slit));
582	for (i = 0; i < slit_table->locality_count; i++) {
583		if (!pxm_bit_test(i))
584			continue;
585		node_from = pxm_to_node(i);
586		for (j = 0; j < slit_table->locality_count; j++) {
587			if (!pxm_bit_test(j))
588				continue;
589			node_to = pxm_to_node(j);
590			node_distance(node_from, node_to) =
591			    slit_table->entry[i * slit_table->locality_count + j];
592		}
593	}
594
595#ifdef SLIT_DEBUG
596	printk("ACPI 2.0 SLIT locality table:\n");
597	for_each_online_node(i) {
598		for_each_online_node(j)
599		    printk("%03d ", node_distance(i, j));
600		printk("\n");
601	}
602#endif
603}
604#endif				/* CONFIG_ACPI_NUMA */
605
606/*
607 * success: return IRQ number (>=0)
608 * failure: return < 0
609 */
610int acpi_register_gsi(struct device *dev, u32 gsi, int triggering, int polarity)
611{
612	if (acpi_irq_model == ACPI_IRQ_MODEL_PLATFORM)
613		return gsi;
614
615	if (has_8259 && gsi < 16)
616		return isa_irq_to_vector(gsi);
617
618	return iosapic_register_intr(gsi,
619				     (polarity ==
620				      ACPI_ACTIVE_HIGH) ? IOSAPIC_POL_HIGH :
621				     IOSAPIC_POL_LOW,
622				     (triggering ==
623				      ACPI_EDGE_SENSITIVE) ? IOSAPIC_EDGE :
624				     IOSAPIC_LEVEL);
625}
626EXPORT_SYMBOL_GPL(acpi_register_gsi);
627
628void acpi_unregister_gsi(u32 gsi)
629{
630	if (acpi_irq_model == ACPI_IRQ_MODEL_PLATFORM)
631		return;
632
633	if (has_8259 && gsi < 16)
634		return;
635
636	iosapic_unregister_intr(gsi);
637}
638EXPORT_SYMBOL_GPL(acpi_unregister_gsi);
639
640static int __init acpi_parse_fadt(struct acpi_table_header *table)
641{
642	struct acpi_table_header *fadt_header;
643	struct acpi_table_fadt *fadt;
644
645	fadt_header = (struct acpi_table_header *)table;
646	if (fadt_header->revision != 3)
647		return -ENODEV;	/* Only deal with ACPI 2.0 FADT */
648
649	fadt = (struct acpi_table_fadt *)fadt_header;
650
651	acpi_register_gsi(NULL, fadt->sci_interrupt, ACPI_LEVEL_SENSITIVE,
652				 ACPI_ACTIVE_LOW);
653	return 0;
654}
655
656int __init early_acpi_boot_init(void)
657{
658	int ret;
659
660	/*
661	 * do a partial walk of MADT to determine how many CPUs
662	 * we have including offline CPUs
663	 */
664	if (acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt)) {
665		printk(KERN_ERR PREFIX "Can't find MADT\n");
666		return 0;
667	}
668
669	ret = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_SAPIC,
670		acpi_parse_lsapic, NR_CPUS);
671	if (ret < 1)
672		printk(KERN_ERR PREFIX
673		       "Error parsing MADT - no LAPIC entries\n");
674	else
675		acpi_lapic = 1;
676
677#ifdef CONFIG_SMP
678	if (available_cpus == 0) {
679		printk(KERN_INFO "ACPI: Found 0 CPUS; assuming 1\n");
680		printk(KERN_INFO "CPU 0 (0x%04x)", hard_smp_processor_id());
681		smp_boot_data.cpu_phys_id[available_cpus] =
682		    hard_smp_processor_id();
683		available_cpus = 1;	/* We've got at least one of these, no? */
684	}
685	smp_boot_data.cpu_count = available_cpus;
686#endif
687	/* Make boot-up look pretty */
688	printk(KERN_INFO "%d CPUs available, %d CPUs total\n", available_cpus,
689	       total_cpus);
690
691	return 0;
692}
693
694int __init acpi_boot_init(void)
695{
696
697	/*
698	 * MADT
699	 * ----
700	 * Parse the Multiple APIC Description Table (MADT), if exists.
701	 * Note that this table provides platform SMP configuration
702	 * information -- the successor to MPS tables.
703	 */
704
705	if (acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt)) {
706		printk(KERN_ERR PREFIX "Can't find MADT\n");
707		goto skip_madt;
708	}
709
710	/* Local APIC */
711
712	if (acpi_table_parse_madt
713	    (ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE, acpi_parse_lapic_addr_ovr, 0) < 0)
714		printk(KERN_ERR PREFIX
715		       "Error parsing LAPIC address override entry\n");
716
717	if (acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_NMI, acpi_parse_lapic_nmi, 0)
718	    < 0)
719		printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n");
720
721	/* I/O APIC */
722
723	if (acpi_table_parse_madt
724	    (ACPI_MADT_TYPE_IO_SAPIC, acpi_parse_iosapic, NR_IOSAPICS) < 1) {
725		if (!ia64_platform_is("sn2"))
726			printk(KERN_ERR PREFIX
727			       "Error parsing MADT - no IOSAPIC entries\n");
728	}
729
730	/* System-Level Interrupt Routing */
731
732	if (acpi_table_parse_madt
733	    (ACPI_MADT_TYPE_INTERRUPT_SOURCE, acpi_parse_plat_int_src,
734	     ACPI_MAX_PLATFORM_INTERRUPTS) < 0)
735		printk(KERN_ERR PREFIX
736		       "Error parsing platform interrupt source entry\n");
737
738	if (acpi_table_parse_madt
739	    (ACPI_MADT_TYPE_INTERRUPT_OVERRIDE, acpi_parse_int_src_ovr, 0) < 0)
740		printk(KERN_ERR PREFIX
741		       "Error parsing interrupt source overrides entry\n");
742
743	if (acpi_table_parse_madt(ACPI_MADT_TYPE_NMI_SOURCE, acpi_parse_nmi_src, 0) < 0)
744		printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n");
745      skip_madt:
746
747	/*
748	 * FADT says whether a legacy keyboard controller is present.
749	 * The FADT also contains an SCI_INT line, by which the system
750	 * gets interrupts such as power and sleep buttons.  If it's not
751	 * on a Legacy interrupt, it needs to be setup.
752	 */
753	if (acpi_table_parse(ACPI_SIG_FADT, acpi_parse_fadt))
754		printk(KERN_ERR PREFIX "Can't find FADT\n");
755
756#ifdef CONFIG_ACPI_NUMA
757#ifdef CONFIG_SMP
758	if (srat_num_cpus == 0) {
759		int cpu, i = 1;
760		for (cpu = 0; cpu < smp_boot_data.cpu_count; cpu++)
761			if (smp_boot_data.cpu_phys_id[cpu] !=
762			    hard_smp_processor_id())
763				node_cpuid[i++].phys_id =
764				    smp_boot_data.cpu_phys_id[cpu];
765	}
766#endif
767	build_cpu_to_node_map();
768#endif
769	return 0;
770}
771
772int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
773{
774	int tmp;
775
776	if (has_8259 && gsi < 16)
777		*irq = isa_irq_to_vector(gsi);
778	else {
779		tmp = gsi_to_irq(gsi);
780		if (tmp == -1)
781			return -1;
782		*irq = tmp;
783	}
784	return 0;
785}
786
787int acpi_isa_irq_to_gsi(unsigned isa_irq, u32 *gsi)
788{
789	if (isa_irq >= 16)
790		return -1;
791	*gsi = isa_irq;
792	return 0;
793}
794
795/*
796 *  ACPI based hotplug CPU support
797 */
798#ifdef CONFIG_ACPI_HOTPLUG_CPU
799static int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
800{
801#ifdef CONFIG_ACPI_NUMA
802	/*
803	 * We don't have cpu-only-node hotadd. But if the system equips
804	 * SRAT table, pxm is already found and node is ready.
805  	 * So, just pxm_to_nid(pxm) is OK.
806	 * This code here is for the system which doesn't have full SRAT
807  	 * table for possible cpus.
808	 */
809	node_cpuid[cpu].phys_id = physid;
810	node_cpuid[cpu].nid = acpi_get_node(handle);
811#endif
812	return 0;
813}
814
815int additional_cpus __initdata = -1;
816
817static __init int setup_additional_cpus(char *s)
818{
819	if (s)
820		additional_cpus = simple_strtol(s, NULL, 0);
821
822	return 0;
823}
824
825early_param("additional_cpus", setup_additional_cpus);
826
827/*
828 * cpu_possible_mask should be static, it cannot change as CPUs
829 * are onlined, or offlined. The reason is per-cpu data-structures
830 * are allocated by some modules at init time, and dont expect to
831 * do this dynamically on cpu arrival/departure.
832 * cpu_present_mask on the other hand can change dynamically.
833 * In case when cpu_hotplug is not compiled, then we resort to current
834 * behaviour, which is cpu_possible == cpu_present.
835 * - Ashok Raj
836 *
837 * Three ways to find out the number of additional hotplug CPUs:
838 * - If the BIOS specified disabled CPUs in ACPI/mptables use that.
839 * - The user can overwrite it with additional_cpus=NUM
840 * - Otherwise don't reserve additional CPUs.
841 */
842__init void prefill_possible_map(void)
843{
844	int i;
845	int possible, disabled_cpus;
846
847	disabled_cpus = total_cpus - available_cpus;
848
849 	if (additional_cpus == -1) {
850 		if (disabled_cpus > 0)
851			additional_cpus = disabled_cpus;
852 		else
853			additional_cpus = 0;
854 	}
855
856	possible = available_cpus + additional_cpus;
857
858	if (possible > nr_cpu_ids)
859		possible = nr_cpu_ids;
860
861	printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n",
862		possible, max((possible - available_cpus), 0));
863
864	for (i = 0; i < possible; i++)
865		set_cpu_possible(i, true);
866}
867
868static int _acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu)
869{
870	cpumask_t tmp_map;
871	int cpu;
872
873	cpumask_complement(&tmp_map, cpu_present_mask);
874	cpu = cpumask_first(&tmp_map);
875	if (cpu >= nr_cpu_ids)
876		return -EINVAL;
877
878	acpi_map_cpu2node(handle, cpu, physid);
879
880	set_cpu_present(cpu, true);
881	ia64_cpu_to_sapicid[cpu] = physid;
882
883	acpi_processor_set_pdc(handle);
884
885	*pcpu = cpu;
886	return (0);
887}
888
889/* wrapper to silence section mismatch warning */
890int __ref acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, int *pcpu)
 
891{
892	return _acpi_map_lsapic(handle, physid, pcpu);
893}
894EXPORT_SYMBOL(acpi_map_cpu);
895
896int acpi_unmap_cpu(int cpu)
897{
898	ia64_cpu_to_sapicid[cpu] = -1;
899	set_cpu_present(cpu, false);
900
901#ifdef CONFIG_ACPI_NUMA
902	/* NUMA specific cleanup's */
903#endif
904
905	return (0);
906}
907EXPORT_SYMBOL(acpi_unmap_cpu);
908#endif				/* CONFIG_ACPI_HOTPLUG_CPU */
909
910#ifdef CONFIG_ACPI_NUMA
911static acpi_status acpi_map_iosapic(acpi_handle handle, u32 depth,
912				    void *context, void **ret)
913{
914	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
915	union acpi_object *obj;
916	struct acpi_madt_io_sapic *iosapic;
917	unsigned int gsi_base;
918	int node;
919
920	/* Only care about objects w/ a method that returns the MADT */
921	if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
922		return AE_OK;
923
924	if (!buffer.length || !buffer.pointer)
925		return AE_OK;
926
927	obj = buffer.pointer;
928	if (obj->type != ACPI_TYPE_BUFFER ||
929	    obj->buffer.length < sizeof(*iosapic)) {
930		kfree(buffer.pointer);
931		return AE_OK;
932	}
933
934	iosapic = (struct acpi_madt_io_sapic *)obj->buffer.pointer;
935
936	if (iosapic->header.type != ACPI_MADT_TYPE_IO_SAPIC) {
937		kfree(buffer.pointer);
938		return AE_OK;
939	}
940
941	gsi_base = iosapic->global_irq_base;
942
943	kfree(buffer.pointer);
944
945	/* OK, it's an IOSAPIC MADT entry; associate it with a node */
946	node = acpi_get_node(handle);
947	if (node == NUMA_NO_NODE || !node_online(node) ||
948	    cpumask_empty(cpumask_of_node(node)))
949		return AE_OK;
950
951	/* We know a gsi to node mapping! */
952	map_iosapic_to_node(gsi_base, node);
953	return AE_OK;
954}
955
956static int __init
957acpi_map_iosapics (void)
958{
959	acpi_get_devices(NULL, acpi_map_iosapic, NULL, NULL);
960	return 0;
961}
962
963fs_initcall(acpi_map_iosapics);
964#endif				/* CONFIG_ACPI_NUMA */
965
966int __ref acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base)
967{
968	int err;
969
970	if ((err = iosapic_init(phys_addr, gsi_base)))
971		return err;
972
973#ifdef CONFIG_ACPI_NUMA
974	acpi_map_iosapic(handle, 0, NULL, NULL);
975#endif				/* CONFIG_ACPI_NUMA */
976
977	return 0;
978}
979
980EXPORT_SYMBOL(acpi_register_ioapic);
981
982int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base)
983{
984	return iosapic_remove(gsi_base);
985}
986
987EXPORT_SYMBOL(acpi_unregister_ioapic);
988
989/*
990 * acpi_suspend_lowlevel() - save kernel state and suspend.
991 *
992 * TBD when when IA64 starts to support suspend...
993 */
994int acpi_suspend_lowlevel(void) { return 0; }