xref: /openbmc/linux/arch/ia64/kernel/acpi.c (revision 1da177e4)
1 /*
2  *  acpi.c - Architecture-Specific Low-Level ACPI Support
3  *
4  *  Copyright (C) 1999 VA Linux Systems
5  *  Copyright (C) 1999,2000 Walt Drummond <drummond@valinux.com>
6  *  Copyright (C) 2000, 2002-2003 Hewlett-Packard Co.
7  *	David Mosberger-Tang <davidm@hpl.hp.com>
8  *  Copyright (C) 2000 Intel Corp.
9  *  Copyright (C) 2000,2001 J.I. Lee <jung-ik.lee@intel.com>
10  *  Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
11  *  Copyright (C) 2001 Jenna Hall <jenna.s.hall@intel.com>
12  *  Copyright (C) 2001 Takayoshi Kochi <t-kochi@bq.jp.nec.com>
13  *  Copyright (C) 2002 Erich Focht <efocht@ess.nec.de>
14  *
15  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
16  *
17  *  This program is free software; you can redistribute it and/or modify
18  *  it under the terms of the GNU General Public License as published by
19  *  the Free Software Foundation; either version 2 of the License, or
20  *  (at your option) any later version.
21  *
22  *  This program is distributed in the hope that it will be useful,
23  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
24  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
25  *  GNU General Public License for more details.
26  *
27  *  You should have received a copy of the GNU General Public License
28  *  along with this program; if not, write to the Free Software
29  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
30  *
31  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
32  */
33 
34 #include <linux/config.h>
35 #include <linux/module.h>
36 #include <linux/init.h>
37 #include <linux/kernel.h>
38 #include <linux/sched.h>
39 #include <linux/smp.h>
40 #include <linux/string.h>
41 #include <linux/types.h>
42 #include <linux/irq.h>
43 #include <linux/acpi.h>
44 #include <linux/efi.h>
45 #include <linux/mmzone.h>
46 #include <linux/nodemask.h>
47 #include <asm/io.h>
48 #include <asm/iosapic.h>
49 #include <asm/machvec.h>
50 #include <asm/page.h>
51 #include <asm/system.h>
52 #include <asm/numa.h>
53 #include <asm/sal.h>
54 #include <asm/cyclone.h>
55 
56 #define BAD_MADT_ENTRY(entry, end) (                                        \
57 		(!entry) || (unsigned long)entry + sizeof(*entry) > end ||  \
58 		((acpi_table_entry_header *)entry)->length != sizeof(*entry))
59 
60 #define PREFIX			"ACPI: "
61 
62 void (*pm_idle) (void);
63 EXPORT_SYMBOL(pm_idle);
64 void (*pm_power_off) (void);
65 EXPORT_SYMBOL(pm_power_off);
66 
67 unsigned char acpi_kbd_controller_present = 1;
68 unsigned char acpi_legacy_devices;
69 
70 #define MAX_SAPICS 256
71 u16 ia64_acpiid_to_sapicid[MAX_SAPICS] =
72 	{ [0 ... MAX_SAPICS - 1] = -1 };
73 EXPORT_SYMBOL(ia64_acpiid_to_sapicid);
74 
75 const char *
76 acpi_get_sysname (void)
77 {
78 #ifdef CONFIG_IA64_GENERIC
79 	unsigned long rsdp_phys;
80 	struct acpi20_table_rsdp *rsdp;
81 	struct acpi_table_xsdt *xsdt;
82 	struct acpi_table_header *hdr;
83 
84 	rsdp_phys = acpi_find_rsdp();
85 	if (!rsdp_phys) {
86 		printk(KERN_ERR "ACPI 2.0 RSDP not found, default to \"dig\"\n");
87 		return "dig";
88 	}
89 
90 	rsdp = (struct acpi20_table_rsdp *) __va(rsdp_phys);
91 	if (strncmp(rsdp->signature, RSDP_SIG, sizeof(RSDP_SIG) - 1)) {
92 		printk(KERN_ERR "ACPI 2.0 RSDP signature incorrect, default to \"dig\"\n");
93 		return "dig";
94 	}
95 
96 	xsdt = (struct acpi_table_xsdt *) __va(rsdp->xsdt_address);
97 	hdr = &xsdt->header;
98 	if (strncmp(hdr->signature, XSDT_SIG, sizeof(XSDT_SIG) - 1)) {
99 		printk(KERN_ERR "ACPI 2.0 XSDT signature incorrect, default to \"dig\"\n");
100 		return "dig";
101 	}
102 
103 	if (!strcmp(hdr->oem_id, "HP")) {
104 		return "hpzx1";
105 	}
106 	else if (!strcmp(hdr->oem_id, "SGI")) {
107 		return "sn2";
108 	}
109 
110 	return "dig";
111 #else
112 # if defined (CONFIG_IA64_HP_SIM)
113 	return "hpsim";
114 # elif defined (CONFIG_IA64_HP_ZX1)
115 	return "hpzx1";
116 # elif defined (CONFIG_IA64_HP_ZX1_SWIOTLB)
117 	return "hpzx1_swiotlb";
118 # elif defined (CONFIG_IA64_SGI_SN2)
119 	return "sn2";
120 # elif defined (CONFIG_IA64_DIG)
121 	return "dig";
122 # else
123 #	error Unknown platform.  Fix acpi.c.
124 # endif
125 #endif
126 }
127 
128 #ifdef CONFIG_ACPI_BOOT
129 
130 #define ACPI_MAX_PLATFORM_INTERRUPTS	256
131 
132 /* Array to record platform interrupt vectors for generic interrupt routing. */
133 int platform_intr_list[ACPI_MAX_PLATFORM_INTERRUPTS] = {
134 	[0 ... ACPI_MAX_PLATFORM_INTERRUPTS - 1] = -1
135 };
136 
137 enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_IOSAPIC;
138 
139 /*
140  * Interrupt routing API for device drivers.  Provides interrupt vector for
141  * a generic platform event.  Currently only CPEI is implemented.
142  */
143 int
144 acpi_request_vector (u32 int_type)
145 {
146 	int vector = -1;
147 
148 	if (int_type < ACPI_MAX_PLATFORM_INTERRUPTS) {
149 		/* corrected platform error interrupt */
150 		vector = platform_intr_list[int_type];
151 	} else
152 		printk(KERN_ERR "acpi_request_vector(): invalid interrupt type\n");
153 	return vector;
154 }
155 
156 char *
157 __acpi_map_table (unsigned long phys_addr, unsigned long size)
158 {
159 	return __va(phys_addr);
160 }
161 
162 /* --------------------------------------------------------------------------
163                             Boot-time Table Parsing
164    -------------------------------------------------------------------------- */
165 
166 static int			total_cpus __initdata;
167 static int			available_cpus __initdata;
168 struct acpi_table_madt *	acpi_madt __initdata;
169 static u8			has_8259;
170 
171 
172 static int __init
173 acpi_parse_lapic_addr_ovr (
174 	acpi_table_entry_header *header, const unsigned long end)
175 {
176 	struct acpi_table_lapic_addr_ovr *lapic;
177 
178 	lapic = (struct acpi_table_lapic_addr_ovr *) header;
179 
180 	if (BAD_MADT_ENTRY(lapic, end))
181 		return -EINVAL;
182 
183 	if (lapic->address) {
184 		iounmap(ipi_base_addr);
185 		ipi_base_addr = ioremap(lapic->address, 0);
186 	}
187 	return 0;
188 }
189 
190 
191 static int __init
192 acpi_parse_lsapic (acpi_table_entry_header *header, const unsigned long end)
193 {
194 	struct acpi_table_lsapic *lsapic;
195 
196 	lsapic = (struct acpi_table_lsapic *) header;
197 
198 	if (BAD_MADT_ENTRY(lsapic, end))
199 		return -EINVAL;
200 
201 	if (lsapic->flags.enabled) {
202 #ifdef CONFIG_SMP
203 		smp_boot_data.cpu_phys_id[available_cpus] = (lsapic->id << 8) | lsapic->eid;
204 #endif
205 		ia64_acpiid_to_sapicid[lsapic->acpi_id] = (lsapic->id << 8) | lsapic->eid;
206 		++available_cpus;
207 	}
208 
209 	total_cpus++;
210 	return 0;
211 }
212 
213 
214 static int __init
215 acpi_parse_lapic_nmi (acpi_table_entry_header *header, const unsigned long end)
216 {
217 	struct acpi_table_lapic_nmi *lacpi_nmi;
218 
219 	lacpi_nmi = (struct acpi_table_lapic_nmi*) header;
220 
221 	if (BAD_MADT_ENTRY(lacpi_nmi, end))
222 		return -EINVAL;
223 
224 	/* TBD: Support lapic_nmi entries */
225 	return 0;
226 }
227 
228 
229 static int __init
230 acpi_parse_iosapic (acpi_table_entry_header *header, const unsigned long end)
231 {
232 	struct acpi_table_iosapic *iosapic;
233 
234 	iosapic = (struct acpi_table_iosapic *) header;
235 
236 	if (BAD_MADT_ENTRY(iosapic, end))
237 		return -EINVAL;
238 
239 	iosapic_init(iosapic->address, iosapic->global_irq_base);
240 
241 	return 0;
242 }
243 
244 
245 static int __init
246 acpi_parse_plat_int_src (
247 	acpi_table_entry_header *header, const unsigned long end)
248 {
249 	struct acpi_table_plat_int_src *plintsrc;
250 	int vector;
251 
252 	plintsrc = (struct acpi_table_plat_int_src *) header;
253 
254 	if (BAD_MADT_ENTRY(plintsrc, end))
255 		return -EINVAL;
256 
257 	/*
258 	 * Get vector assignment for this interrupt, set attributes,
259 	 * and program the IOSAPIC routing table.
260 	 */
261 	vector = iosapic_register_platform_intr(plintsrc->type,
262 						plintsrc->global_irq,
263 						plintsrc->iosapic_vector,
264 						plintsrc->eid,
265 						plintsrc->id,
266 						(plintsrc->flags.polarity == 1) ? IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW,
267 						(plintsrc->flags.trigger == 1) ? IOSAPIC_EDGE : IOSAPIC_LEVEL);
268 
269 	platform_intr_list[plintsrc->type] = vector;
270 	return 0;
271 }
272 
273 
274 static int __init
275 acpi_parse_int_src_ovr (
276 	acpi_table_entry_header *header, const unsigned long end)
277 {
278 	struct acpi_table_int_src_ovr *p;
279 
280 	p = (struct acpi_table_int_src_ovr *) header;
281 
282 	if (BAD_MADT_ENTRY(p, end))
283 		return -EINVAL;
284 
285 	iosapic_override_isa_irq(p->bus_irq, p->global_irq,
286 				 (p->flags.polarity == 1) ? IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW,
287 				 (p->flags.trigger == 1) ? IOSAPIC_EDGE : IOSAPIC_LEVEL);
288 	return 0;
289 }
290 
291 
292 static int __init
293 acpi_parse_nmi_src (acpi_table_entry_header *header, const unsigned long end)
294 {
295 	struct acpi_table_nmi_src *nmi_src;
296 
297 	nmi_src = (struct acpi_table_nmi_src*) header;
298 
299 	if (BAD_MADT_ENTRY(nmi_src, end))
300 		return -EINVAL;
301 
302 	/* TBD: Support nimsrc entries */
303 	return 0;
304 }
305 
306 static void __init
307 acpi_madt_oem_check (char *oem_id, char *oem_table_id)
308 {
309 	if (!strncmp(oem_id, "IBM", 3) &&
310 	    (!strncmp(oem_table_id, "SERMOW", 6))) {
311 
312 		/*
313 		 * Unfortunately ITC_DRIFT is not yet part of the
314 		 * official SAL spec, so the ITC_DRIFT bit is not
315 		 * set by the BIOS on this hardware.
316 		 */
317 		sal_platform_features |= IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT;
318 
319 		cyclone_setup();
320 	}
321 }
322 
323 static int __init
324 acpi_parse_madt (unsigned long phys_addr, unsigned long size)
325 {
326 	if (!phys_addr || !size)
327 		return -EINVAL;
328 
329 	acpi_madt = (struct acpi_table_madt *) __va(phys_addr);
330 
331 	/* remember the value for reference after free_initmem() */
332 #ifdef CONFIG_ITANIUM
333 	has_8259 = 1; /* Firmware on old Itanium systems is broken */
334 #else
335 	has_8259 = acpi_madt->flags.pcat_compat;
336 #endif
337 	iosapic_system_init(has_8259);
338 
339 	/* Get base address of IPI Message Block */
340 
341 	if (acpi_madt->lapic_address)
342 		ipi_base_addr = ioremap(acpi_madt->lapic_address, 0);
343 
344 	printk(KERN_INFO PREFIX "Local APIC address %p\n", ipi_base_addr);
345 
346 	acpi_madt_oem_check(acpi_madt->header.oem_id,
347 		acpi_madt->header.oem_table_id);
348 
349 	return 0;
350 }
351 
352 
353 #ifdef CONFIG_ACPI_NUMA
354 
355 #undef SLIT_DEBUG
356 
357 #define PXM_FLAG_LEN ((MAX_PXM_DOMAINS + 1)/32)
358 
359 static int __initdata srat_num_cpus;			/* number of cpus */
360 static u32 __devinitdata pxm_flag[PXM_FLAG_LEN];
361 #define pxm_bit_set(bit)	(set_bit(bit,(void *)pxm_flag))
362 #define pxm_bit_test(bit)	(test_bit(bit,(void *)pxm_flag))
363 /* maps to convert between proximity domain and logical node ID */
364 int __devinitdata pxm_to_nid_map[MAX_PXM_DOMAINS];
365 int __initdata nid_to_pxm_map[MAX_NUMNODES];
366 static struct acpi_table_slit __initdata *slit_table;
367 
368 /*
369  * ACPI 2.0 SLIT (System Locality Information Table)
370  * http://devresource.hp.com/devresource/Docs/TechPapers/IA64/slit.pdf
371  */
372 void __init
373 acpi_numa_slit_init (struct acpi_table_slit *slit)
374 {
375 	u32 len;
376 
377 	len = sizeof(struct acpi_table_header) + 8
378 		+ slit->localities * slit->localities;
379 	if (slit->header.length != len) {
380 		printk(KERN_ERR "ACPI 2.0 SLIT: size mismatch: %d expected, %d actual\n",
381 		       len, slit->header.length);
382 		memset(numa_slit, 10, sizeof(numa_slit));
383 		return;
384 	}
385 	slit_table = slit;
386 }
387 
388 void __init
389 acpi_numa_processor_affinity_init (struct acpi_table_processor_affinity *pa)
390 {
391 	/* record this node in proximity bitmap */
392 	pxm_bit_set(pa->proximity_domain);
393 
394 	node_cpuid[srat_num_cpus].phys_id = (pa->apic_id << 8) | (pa->lsapic_eid);
395 	/* nid should be overridden as logical node id later */
396 	node_cpuid[srat_num_cpus].nid = pa->proximity_domain;
397 	srat_num_cpus++;
398 }
399 
400 void __init
401 acpi_numa_memory_affinity_init (struct acpi_table_memory_affinity *ma)
402 {
403 	unsigned long paddr, size;
404 	u8 pxm;
405 	struct node_memblk_s *p, *q, *pend;
406 
407 	pxm = ma->proximity_domain;
408 
409 	/* fill node memory chunk structure */
410 	paddr = ma->base_addr_hi;
411 	paddr = (paddr << 32) | ma->base_addr_lo;
412 	size = ma->length_hi;
413 	size = (size << 32) | ma->length_lo;
414 
415 	/* Ignore disabled entries */
416 	if (!ma->flags.enabled)
417 		return;
418 
419 	/* record this node in proximity bitmap */
420 	pxm_bit_set(pxm);
421 
422 	/* Insertion sort based on base address */
423 	pend = &node_memblk[num_node_memblks];
424 	for (p = &node_memblk[0]; p < pend; p++) {
425 		if (paddr < p->start_paddr)
426 			break;
427 	}
428 	if (p < pend) {
429 		for (q = pend - 1; q >= p; q--)
430 			*(q + 1) = *q;
431 	}
432 	p->start_paddr = paddr;
433 	p->size = size;
434 	p->nid = pxm;
435 	num_node_memblks++;
436 }
437 
438 void __init
439 acpi_numa_arch_fixup (void)
440 {
441 	int i, j, node_from, node_to;
442 
443 	/* If there's no SRAT, fix the phys_id and mark node 0 online */
444 	if (srat_num_cpus == 0) {
445 		node_set_online(0);
446 		node_cpuid[0].phys_id = hard_smp_processor_id();
447 		return;
448 	}
449 
450 	/*
451 	 * MCD - This can probably be dropped now.  No need for pxm ID to node ID
452 	 * mapping with sparse node numbering iff MAX_PXM_DOMAINS <= MAX_NUMNODES.
453 	 */
454 	/* calculate total number of nodes in system from PXM bitmap */
455 	memset(pxm_to_nid_map, -1, sizeof(pxm_to_nid_map));
456 	memset(nid_to_pxm_map, -1, sizeof(nid_to_pxm_map));
457 	nodes_clear(node_online_map);
458 	for (i = 0; i < MAX_PXM_DOMAINS; i++) {
459 		if (pxm_bit_test(i)) {
460 			int nid = num_online_nodes();
461 			pxm_to_nid_map[i] = nid;
462 			nid_to_pxm_map[nid] = i;
463 			node_set_online(nid);
464 		}
465 	}
466 
467 	/* set logical node id in memory chunk structure */
468 	for (i = 0; i < num_node_memblks; i++)
469 		node_memblk[i].nid = pxm_to_nid_map[node_memblk[i].nid];
470 
471 	/* assign memory bank numbers for each chunk on each node */
472 	for_each_online_node(i) {
473 		int bank;
474 
475 		bank = 0;
476 		for (j = 0; j < num_node_memblks; j++)
477 			if (node_memblk[j].nid == i)
478 				node_memblk[j].bank = bank++;
479 	}
480 
481 	/* set logical node id in cpu structure */
482 	for (i = 0; i < srat_num_cpus; i++)
483 		node_cpuid[i].nid = pxm_to_nid_map[node_cpuid[i].nid];
484 
485 	printk(KERN_INFO "Number of logical nodes in system = %d\n", num_online_nodes());
486 	printk(KERN_INFO "Number of memory chunks in system = %d\n", num_node_memblks);
487 
488 	if (!slit_table) return;
489 	memset(numa_slit, -1, sizeof(numa_slit));
490 	for (i=0; i<slit_table->localities; i++) {
491 		if (!pxm_bit_test(i))
492 			continue;
493 		node_from = pxm_to_nid_map[i];
494 		for (j=0; j<slit_table->localities; j++) {
495 			if (!pxm_bit_test(j))
496 				continue;
497 			node_to = pxm_to_nid_map[j];
498 			node_distance(node_from, node_to) =
499 				slit_table->entry[i*slit_table->localities + j];
500 		}
501 	}
502 
503 #ifdef SLIT_DEBUG
504 	printk("ACPI 2.0 SLIT locality table:\n");
505 	for_each_online_node(i) {
506 		for_each_online_node(j)
507 			printk("%03d ", node_distance(i,j));
508 		printk("\n");
509 	}
510 #endif
511 }
512 #endif /* CONFIG_ACPI_NUMA */
513 
514 unsigned int
515 acpi_register_gsi (u32 gsi, int edge_level, int active_high_low)
516 {
517 	if (has_8259 && gsi < 16)
518 		return isa_irq_to_vector(gsi);
519 
520 	return iosapic_register_intr(gsi,
521 			(active_high_low == ACPI_ACTIVE_HIGH) ? IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW,
522 			(edge_level == ACPI_EDGE_SENSITIVE) ? IOSAPIC_EDGE : IOSAPIC_LEVEL);
523 }
524 EXPORT_SYMBOL(acpi_register_gsi);
525 
526 #ifdef CONFIG_ACPI_DEALLOCATE_IRQ
527 void
528 acpi_unregister_gsi (u32 gsi)
529 {
530 	iosapic_unregister_intr(gsi);
531 }
532 EXPORT_SYMBOL(acpi_unregister_gsi);
533 #endif /* CONFIG_ACPI_DEALLOCATE_IRQ */
534 
535 static int __init
536 acpi_parse_fadt (unsigned long phys_addr, unsigned long size)
537 {
538 	struct acpi_table_header *fadt_header;
539 	struct fadt_descriptor_rev2 *fadt;
540 
541 	if (!phys_addr || !size)
542 		return -EINVAL;
543 
544 	fadt_header = (struct acpi_table_header *) __va(phys_addr);
545 	if (fadt_header->revision != 3)
546 		return -ENODEV;		/* Only deal with ACPI 2.0 FADT */
547 
548 	fadt = (struct fadt_descriptor_rev2 *) fadt_header;
549 
550 	if (!(fadt->iapc_boot_arch & BAF_8042_KEYBOARD_CONTROLLER))
551 		acpi_kbd_controller_present = 0;
552 
553 	if (fadt->iapc_boot_arch & BAF_LEGACY_DEVICES)
554 		acpi_legacy_devices = 1;
555 
556 	acpi_register_gsi(fadt->sci_int, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW);
557 	return 0;
558 }
559 
560 
561 unsigned long __init
562 acpi_find_rsdp (void)
563 {
564 	unsigned long rsdp_phys = 0;
565 
566 	if (efi.acpi20)
567 		rsdp_phys = __pa(efi.acpi20);
568 	else if (efi.acpi)
569 		printk(KERN_WARNING PREFIX "v1.0/r0.71 tables no longer supported\n");
570 	return rsdp_phys;
571 }
572 
573 
574 int __init
575 acpi_boot_init (void)
576 {
577 
578 	/*
579 	 * MADT
580 	 * ----
581 	 * Parse the Multiple APIC Description Table (MADT), if exists.
582 	 * Note that this table provides platform SMP configuration
583 	 * information -- the successor to MPS tables.
584 	 */
585 
586 	if (acpi_table_parse(ACPI_APIC, acpi_parse_madt) < 1) {
587 		printk(KERN_ERR PREFIX "Can't find MADT\n");
588 		goto skip_madt;
589 	}
590 
591 	/* Local APIC */
592 
593 	if (acpi_table_parse_madt(ACPI_MADT_LAPIC_ADDR_OVR, acpi_parse_lapic_addr_ovr, 0) < 0)
594 		printk(KERN_ERR PREFIX "Error parsing LAPIC address override entry\n");
595 
596 	if (acpi_table_parse_madt(ACPI_MADT_LSAPIC, acpi_parse_lsapic, NR_CPUS) < 1)
597 		printk(KERN_ERR PREFIX "Error parsing MADT - no LAPIC entries\n");
598 
599 	if (acpi_table_parse_madt(ACPI_MADT_LAPIC_NMI, acpi_parse_lapic_nmi, 0) < 0)
600 		printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n");
601 
602 	/* I/O APIC */
603 
604 	if (acpi_table_parse_madt(ACPI_MADT_IOSAPIC, acpi_parse_iosapic, NR_IOSAPICS) < 1)
605 		printk(KERN_ERR PREFIX "Error parsing MADT - no IOSAPIC entries\n");
606 
607 	/* System-Level Interrupt Routing */
608 
609 	if (acpi_table_parse_madt(ACPI_MADT_PLAT_INT_SRC, acpi_parse_plat_int_src, ACPI_MAX_PLATFORM_INTERRUPTS) < 0)
610 		printk(KERN_ERR PREFIX "Error parsing platform interrupt source entry\n");
611 
612 	if (acpi_table_parse_madt(ACPI_MADT_INT_SRC_OVR, acpi_parse_int_src_ovr, 0) < 0)
613 		printk(KERN_ERR PREFIX "Error parsing interrupt source overrides entry\n");
614 
615 	if (acpi_table_parse_madt(ACPI_MADT_NMI_SRC, acpi_parse_nmi_src, 0) < 0)
616 		printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n");
617   skip_madt:
618 
619 	/*
620 	 * FADT says whether a legacy keyboard controller is present.
621 	 * The FADT also contains an SCI_INT line, by which the system
622 	 * gets interrupts such as power and sleep buttons.  If it's not
623 	 * on a Legacy interrupt, it needs to be setup.
624 	 */
625 	if (acpi_table_parse(ACPI_FADT, acpi_parse_fadt) < 1)
626 		printk(KERN_ERR PREFIX "Can't find FADT\n");
627 
628 #ifdef CONFIG_SMP
629 	if (available_cpus == 0) {
630 		printk(KERN_INFO "ACPI: Found 0 CPUS; assuming 1\n");
631 		printk(KERN_INFO "CPU 0 (0x%04x)", hard_smp_processor_id());
632 		smp_boot_data.cpu_phys_id[available_cpus] = hard_smp_processor_id();
633 		available_cpus = 1; /* We've got at least one of these, no? */
634 	}
635 	smp_boot_data.cpu_count = available_cpus;
636 
637 	smp_build_cpu_map();
638 # ifdef CONFIG_ACPI_NUMA
639 	if (srat_num_cpus == 0) {
640 		int cpu, i = 1;
641 		for (cpu = 0; cpu < smp_boot_data.cpu_count; cpu++)
642 			if (smp_boot_data.cpu_phys_id[cpu] != hard_smp_processor_id())
643 				node_cpuid[i++].phys_id = smp_boot_data.cpu_phys_id[cpu];
644 	}
645 	build_cpu_to_node_map();
646 # endif
647 #endif
648 	/* Make boot-up look pretty */
649 	printk(KERN_INFO "%d CPUs available, %d CPUs total\n", available_cpus, total_cpus);
650 	return 0;
651 }
652 
653 int
654 acpi_gsi_to_irq (u32 gsi, unsigned int *irq)
655 {
656 	int vector;
657 
658 	if (has_8259 && gsi < 16)
659 		*irq = isa_irq_to_vector(gsi);
660 	else {
661 		vector = gsi_to_vector(gsi);
662 		if (vector == -1)
663 			return -1;
664 
665 		*irq = vector;
666 	}
667 	return 0;
668 }
669 
670 /*
671  *  ACPI based hotplug CPU support
672  */
673 #ifdef CONFIG_ACPI_HOTPLUG_CPU
674 static
675 int
676 acpi_map_cpu2node(acpi_handle handle, int cpu, long physid)
677 {
678 #ifdef CONFIG_ACPI_NUMA
679 	int 			pxm_id;
680 
681 	pxm_id = acpi_get_pxm(handle);
682 
683 	/*
684 	 * Assuming that the container driver would have set the proximity
685 	 * domain and would have initialized pxm_to_nid_map[pxm_id] && pxm_flag
686 	 */
687 	node_cpuid[cpu].nid = (pxm_id < 0) ? 0:
688 			pxm_to_nid_map[pxm_id];
689 
690 	node_cpuid[cpu].phys_id =  physid;
691 #endif
692 	return(0);
693 }
694 
695 
696 int
697 acpi_map_lsapic(acpi_handle handle, int *pcpu)
698 {
699 	struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
700 	union acpi_object *obj;
701 	struct acpi_table_lsapic *lsapic;
702 	cpumask_t tmp_map;
703 	long physid;
704 	int cpu;
705 
706 	if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
707 		return -EINVAL;
708 
709 	if (!buffer.length ||  !buffer.pointer)
710 		return -EINVAL;
711 
712 	obj = buffer.pointer;
713 	if (obj->type != ACPI_TYPE_BUFFER ||
714 	    obj->buffer.length < sizeof(*lsapic)) {
715 		acpi_os_free(buffer.pointer);
716 		return -EINVAL;
717 	}
718 
719 	lsapic = (struct acpi_table_lsapic *)obj->buffer.pointer;
720 
721 	if ((lsapic->header.type != ACPI_MADT_LSAPIC) ||
722 	    (!lsapic->flags.enabled)) {
723 		acpi_os_free(buffer.pointer);
724 		return -EINVAL;
725 	}
726 
727 	physid = ((lsapic->id <<8) | (lsapic->eid));
728 
729 	acpi_os_free(buffer.pointer);
730 	buffer.length = ACPI_ALLOCATE_BUFFER;
731 	buffer.pointer = NULL;
732 
733 	cpus_complement(tmp_map, cpu_present_map);
734 	cpu = first_cpu(tmp_map);
735 	if(cpu >= NR_CPUS)
736 		return -EINVAL;
737 
738 	acpi_map_cpu2node(handle, cpu, physid);
739 
740  	cpu_set(cpu, cpu_present_map);
741 	ia64_cpu_to_sapicid[cpu] = physid;
742 	ia64_acpiid_to_sapicid[lsapic->acpi_id] = ia64_cpu_to_sapicid[cpu];
743 
744 	*pcpu = cpu;
745 	return(0);
746 }
747 EXPORT_SYMBOL(acpi_map_lsapic);
748 
749 
750 int
751 acpi_unmap_lsapic(int cpu)
752 {
753 	int i;
754 
755 	for (i=0; i<MAX_SAPICS; i++) {
756  		if (ia64_acpiid_to_sapicid[i] == ia64_cpu_to_sapicid[cpu]) {
757  			ia64_acpiid_to_sapicid[i] = -1;
758  			break;
759  		}
760  	}
761 	ia64_cpu_to_sapicid[cpu] = -1;
762 	cpu_clear(cpu,cpu_present_map);
763 
764 #ifdef CONFIG_ACPI_NUMA
765 	/* NUMA specific cleanup's */
766 #endif
767 
768 	return(0);
769 }
770 EXPORT_SYMBOL(acpi_unmap_lsapic);
771 #endif /* CONFIG_ACPI_HOTPLUG_CPU */
772 
773 
774 #ifdef CONFIG_ACPI_NUMA
775 acpi_status __init
776 acpi_map_iosapic (acpi_handle handle, u32 depth, void *context, void **ret)
777 {
778 	struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
779 	union acpi_object *obj;
780 	struct acpi_table_iosapic *iosapic;
781 	unsigned int gsi_base;
782 	int node;
783 
784 	/* Only care about objects w/ a method that returns the MADT */
785 	if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
786 		return AE_OK;
787 
788 	if (!buffer.length || !buffer.pointer)
789 		return AE_OK;
790 
791 	obj = buffer.pointer;
792 	if (obj->type != ACPI_TYPE_BUFFER ||
793 	    obj->buffer.length < sizeof(*iosapic)) {
794 		acpi_os_free(buffer.pointer);
795 		return AE_OK;
796 	}
797 
798 	iosapic = (struct acpi_table_iosapic *)obj->buffer.pointer;
799 
800 	if (iosapic->header.type != ACPI_MADT_IOSAPIC) {
801 		acpi_os_free(buffer.pointer);
802 		return AE_OK;
803 	}
804 
805 	gsi_base = iosapic->global_irq_base;
806 
807 	acpi_os_free(buffer.pointer);
808 	buffer.length = ACPI_ALLOCATE_BUFFER;
809 	buffer.pointer = NULL;
810 
811 	/*
812 	 * OK, it's an IOSAPIC MADT entry, look for a _PXM method to tell
813 	 * us which node to associate this with.
814 	 */
815 	if (ACPI_FAILURE(acpi_evaluate_object(handle, "_PXM", NULL, &buffer)))
816 		return AE_OK;
817 
818 	if (!buffer.length || !buffer.pointer)
819 		return AE_OK;
820 
821 	obj = buffer.pointer;
822 
823 	if (obj->type != ACPI_TYPE_INTEGER ||
824 	    obj->integer.value >= MAX_PXM_DOMAINS) {
825 		acpi_os_free(buffer.pointer);
826 		return AE_OK;
827 	}
828 
829 	node = pxm_to_nid_map[obj->integer.value];
830 	acpi_os_free(buffer.pointer);
831 
832 	if (node >= MAX_NUMNODES || !node_online(node) ||
833 	    cpus_empty(node_to_cpumask(node)))
834 		return AE_OK;
835 
836 	/* We know a gsi to node mapping! */
837 	map_iosapic_to_node(gsi_base, node);
838 	return AE_OK;
839 }
840 #endif /* CONFIG_NUMA */
841 #endif /* CONFIG_ACPI_BOOT */
842