xref: /openbmc/linux/drivers/acpi/processor_core.c (revision 82ced6fd)
1 /*
2  * acpi_processor.c - ACPI Processor Driver ($Revision: 71 $)
3  *
4  *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5  *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6  *  Copyright (C) 2004       Dominik Brodowski <linux@brodo.de>
7  *  Copyright (C) 2004  Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
8  *  			- Added processor hotplug support
9  *
10  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
11  *
12  *  This program is free software; you can redistribute it and/or modify
13  *  it under the terms of the GNU General Public License as published by
14  *  the Free Software Foundation; either version 2 of the License, or (at
15  *  your option) any later version.
16  *
17  *  This program is distributed in the hope that it will be useful, but
18  *  WITHOUT ANY WARRANTY; without even the implied warranty of
19  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
20  *  General Public License for more details.
21  *
22  *  You should have received a copy of the GNU General Public License along
23  *  with this program; if not, write to the Free Software Foundation, Inc.,
24  *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
25  *
26  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
27  *  TBD:
28  *	1. Make # power states dynamic.
29  *	2. Support duty_cycle values that span bit 4.
30  *	3. Optimize by having scheduler determine business instead of
31  *	   having us try to calculate it here.
32  *	4. Need C1 timing -- must modify kernel (IRQ handler) to get this.
33  */
34 
35 #include <linux/kernel.h>
36 #include <linux/module.h>
37 #include <linux/init.h>
38 #include <linux/types.h>
39 #include <linux/pci.h>
40 #include <linux/pm.h>
41 #include <linux/cpufreq.h>
42 #include <linux/cpu.h>
43 #include <linux/proc_fs.h>
44 #include <linux/seq_file.h>
45 #include <linux/dmi.h>
46 #include <linux/moduleparam.h>
47 #include <linux/cpuidle.h>
48 
49 #include <asm/io.h>
50 #include <asm/system.h>
51 #include <asm/cpu.h>
52 #include <asm/delay.h>
53 #include <asm/uaccess.h>
54 #include <asm/processor.h>
55 #include <asm/smp.h>
56 #include <asm/acpi.h>
57 
58 #include <acpi/acpi_bus.h>
59 #include <acpi/acpi_drivers.h>
60 #include <acpi/processor.h>
61 
62 #define ACPI_PROCESSOR_CLASS		"processor"
63 #define ACPI_PROCESSOR_DEVICE_NAME	"Processor"
64 #define ACPI_PROCESSOR_FILE_INFO	"info"
65 #define ACPI_PROCESSOR_FILE_THROTTLING	"throttling"
66 #define ACPI_PROCESSOR_FILE_LIMIT	"limit"
67 #define ACPI_PROCESSOR_NOTIFY_PERFORMANCE 0x80
68 #define ACPI_PROCESSOR_NOTIFY_POWER	0x81
69 #define ACPI_PROCESSOR_NOTIFY_THROTTLING	0x82
70 
71 #define ACPI_PROCESSOR_LIMIT_USER	0
72 #define ACPI_PROCESSOR_LIMIT_THERMAL	1
73 
74 #define _COMPONENT		ACPI_PROCESSOR_COMPONENT
75 ACPI_MODULE_NAME("processor_core");
76 
77 MODULE_AUTHOR("Paul Diefenbaugh");
78 MODULE_DESCRIPTION("ACPI Processor Driver");
79 MODULE_LICENSE("GPL");
80 
81 static int acpi_processor_add(struct acpi_device *device);
82 static int acpi_processor_start(struct acpi_device *device);
83 static int acpi_processor_remove(struct acpi_device *device, int type);
84 static int acpi_processor_info_open_fs(struct inode *inode, struct file *file);
85 static void acpi_processor_notify(struct acpi_device *device, u32 event);
86 static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu);
87 static int acpi_processor_handle_eject(struct acpi_processor *pr);
88 
89 
90 static const struct acpi_device_id processor_device_ids[] = {
91 	{ACPI_PROCESSOR_OBJECT_HID, 0},
92 	{ACPI_PROCESSOR_HID, 0},
93 	{"", 0},
94 };
95 MODULE_DEVICE_TABLE(acpi, processor_device_ids);
96 
97 static struct acpi_driver acpi_processor_driver = {
98 	.name = "processor",
99 	.class = ACPI_PROCESSOR_CLASS,
100 	.ids = processor_device_ids,
101 	.ops = {
102 		.add = acpi_processor_add,
103 		.remove = acpi_processor_remove,
104 		.start = acpi_processor_start,
105 		.suspend = acpi_processor_suspend,
106 		.resume = acpi_processor_resume,
107 		.notify = acpi_processor_notify,
108 		},
109 };
110 
111 #define INSTALL_NOTIFY_HANDLER		1
112 #define UNINSTALL_NOTIFY_HANDLER	2
113 
114 static const struct file_operations acpi_processor_info_fops = {
115 	.owner = THIS_MODULE,
116 	.open = acpi_processor_info_open_fs,
117 	.read = seq_read,
118 	.llseek = seq_lseek,
119 	.release = single_release,
120 };
121 
122 DEFINE_PER_CPU(struct acpi_processor *, processors);
123 struct acpi_processor_errata errata __read_mostly;
124 static int set_no_mwait(const struct dmi_system_id *id)
125 {
126 	printk(KERN_NOTICE PREFIX "%s detected - "
127 		"disabling mwait for CPU C-states\n", id->ident);
128 	idle_nomwait = 1;
129 	return 0;
130 }
131 
132 static struct dmi_system_id __cpuinitdata processor_idle_dmi_table[] = {
133 	{
134 	set_no_mwait, "IFL91 board", {
135 	DMI_MATCH(DMI_BIOS_VENDOR, "COMPAL"),
136 	DMI_MATCH(DMI_SYS_VENDOR, "ZEPTO"),
137 	DMI_MATCH(DMI_PRODUCT_VERSION, "3215W"),
138 	DMI_MATCH(DMI_BOARD_NAME, "IFL91") }, NULL},
139 	{
140 	set_no_mwait, "Extensa 5220", {
141 	DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
142 	DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
143 	DMI_MATCH(DMI_PRODUCT_VERSION, "0100"),
144 	DMI_MATCH(DMI_BOARD_NAME, "Columbia") }, NULL},
145 	{},
146 };
147 
148 /* --------------------------------------------------------------------------
149                                 Errata Handling
150    -------------------------------------------------------------------------- */
151 
152 static int acpi_processor_errata_piix4(struct pci_dev *dev)
153 {
154 	u8 value1 = 0;
155 	u8 value2 = 0;
156 
157 
158 	if (!dev)
159 		return -EINVAL;
160 
161 	/*
162 	 * Note that 'dev' references the PIIX4 ACPI Controller.
163 	 */
164 
165 	switch (dev->revision) {
166 	case 0:
167 		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4 A-step\n"));
168 		break;
169 	case 1:
170 		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4 B-step\n"));
171 		break;
172 	case 2:
173 		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4E\n"));
174 		break;
175 	case 3:
176 		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4M\n"));
177 		break;
178 	default:
179 		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found unknown PIIX4\n"));
180 		break;
181 	}
182 
183 	switch (dev->revision) {
184 
185 	case 0:		/* PIIX4 A-step */
186 	case 1:		/* PIIX4 B-step */
187 		/*
188 		 * See specification changes #13 ("Manual Throttle Duty Cycle")
189 		 * and #14 ("Enabling and Disabling Manual Throttle"), plus
190 		 * erratum #5 ("STPCLK# Deassertion Time") from the January
191 		 * 2002 PIIX4 specification update.  Applies to only older
192 		 * PIIX4 models.
193 		 */
194 		errata.piix4.throttle = 1;
195 
196 	case 2:		/* PIIX4E */
197 	case 3:		/* PIIX4M */
198 		/*
199 		 * See erratum #18 ("C3 Power State/BMIDE and Type-F DMA
200 		 * Livelock") from the January 2002 PIIX4 specification update.
201 		 * Applies to all PIIX4 models.
202 		 */
203 
204 		/*
205 		 * BM-IDE
206 		 * ------
207 		 * Find the PIIX4 IDE Controller and get the Bus Master IDE
208 		 * Status register address.  We'll use this later to read
209 		 * each IDE controller's DMA status to make sure we catch all
210 		 * DMA activity.
211 		 */
212 		dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
213 				     PCI_DEVICE_ID_INTEL_82371AB,
214 				     PCI_ANY_ID, PCI_ANY_ID, NULL);
215 		if (dev) {
216 			errata.piix4.bmisx = pci_resource_start(dev, 4);
217 			pci_dev_put(dev);
218 		}
219 
220 		/*
221 		 * Type-F DMA
222 		 * ----------
223 		 * Find the PIIX4 ISA Controller and read the Motherboard
224 		 * DMA controller's status to see if Type-F (Fast) DMA mode
225 		 * is enabled (bit 7) on either channel.  Note that we'll
226 		 * disable C3 support if this is enabled, as some legacy
227 		 * devices won't operate well if fast DMA is disabled.
228 		 */
229 		dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
230 				     PCI_DEVICE_ID_INTEL_82371AB_0,
231 				     PCI_ANY_ID, PCI_ANY_ID, NULL);
232 		if (dev) {
233 			pci_read_config_byte(dev, 0x76, &value1);
234 			pci_read_config_byte(dev, 0x77, &value2);
235 			if ((value1 & 0x80) || (value2 & 0x80))
236 				errata.piix4.fdma = 1;
237 			pci_dev_put(dev);
238 		}
239 
240 		break;
241 	}
242 
243 	if (errata.piix4.bmisx)
244 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
245 				  "Bus master activity detection (BM-IDE) erratum enabled\n"));
246 	if (errata.piix4.fdma)
247 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
248 				  "Type-F DMA livelock erratum (C3 disabled)\n"));
249 
250 	return 0;
251 }
252 
253 static int acpi_processor_errata(struct acpi_processor *pr)
254 {
255 	int result = 0;
256 	struct pci_dev *dev = NULL;
257 
258 
259 	if (!pr)
260 		return -EINVAL;
261 
262 	/*
263 	 * PIIX4
264 	 */
265 	dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
266 			     PCI_DEVICE_ID_INTEL_82371AB_3, PCI_ANY_ID,
267 			     PCI_ANY_ID, NULL);
268 	if (dev) {
269 		result = acpi_processor_errata_piix4(dev);
270 		pci_dev_put(dev);
271 	}
272 
273 	return result;
274 }
275 
276 /* --------------------------------------------------------------------------
277                               Common ACPI processor functions
278    -------------------------------------------------------------------------- */
279 
280 /*
281  * _PDC is required for a BIOS-OS handshake for most of the newer
282  * ACPI processor features.
283  */
284 static int acpi_processor_set_pdc(struct acpi_processor *pr)
285 {
286 	struct acpi_object_list *pdc_in = pr->pdc;
287 	acpi_status status = AE_OK;
288 
289 
290 	if (!pdc_in)
291 		return status;
292 	if (idle_nomwait) {
293 		/*
294 		 * If mwait is disabled for CPU C-states, the C2C3_FFH access
295 		 * mode will be disabled in the parameter of _PDC object.
296 		 * Of course C1_FFH access mode will also be disabled.
297 		 */
298 		union acpi_object *obj;
299 		u32 *buffer = NULL;
300 
301 		obj = pdc_in->pointer;
302 		buffer = (u32 *)(obj->buffer.pointer);
303 		buffer[2] &= ~(ACPI_PDC_C_C2C3_FFH | ACPI_PDC_C_C1_FFH);
304 
305 	}
306 	status = acpi_evaluate_object(pr->handle, "_PDC", pdc_in, NULL);
307 
308 	if (ACPI_FAILURE(status))
309 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
310 		    "Could not evaluate _PDC, using legacy perf. control...\n"));
311 
312 	return status;
313 }
314 
315 /* --------------------------------------------------------------------------
316                               FS Interface (/proc)
317    -------------------------------------------------------------------------- */
318 
319 static struct proc_dir_entry *acpi_processor_dir = NULL;
320 
321 static int acpi_processor_info_seq_show(struct seq_file *seq, void *offset)
322 {
323 	struct acpi_processor *pr = seq->private;
324 
325 
326 	if (!pr)
327 		goto end;
328 
329 	seq_printf(seq, "processor id:            %d\n"
330 		   "acpi id:                 %d\n"
331 		   "bus mastering control:   %s\n"
332 		   "power management:        %s\n"
333 		   "throttling control:      %s\n"
334 		   "limit interface:         %s\n",
335 		   pr->id,
336 		   pr->acpi_id,
337 		   pr->flags.bm_control ? "yes" : "no",
338 		   pr->flags.power ? "yes" : "no",
339 		   pr->flags.throttling ? "yes" : "no",
340 		   pr->flags.limit ? "yes" : "no");
341 
342       end:
343 	return 0;
344 }
345 
346 static int acpi_processor_info_open_fs(struct inode *inode, struct file *file)
347 {
348 	return single_open(file, acpi_processor_info_seq_show,
349 			   PDE(inode)->data);
350 }
351 
352 static int acpi_processor_add_fs(struct acpi_device *device)
353 {
354 	struct proc_dir_entry *entry = NULL;
355 
356 
357 	if (!acpi_device_dir(device)) {
358 		acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device),
359 						     acpi_processor_dir);
360 		if (!acpi_device_dir(device))
361 			return -ENODEV;
362 	}
363 
364 	/* 'info' [R] */
365 	entry = proc_create_data(ACPI_PROCESSOR_FILE_INFO,
366 				 S_IRUGO, acpi_device_dir(device),
367 				 &acpi_processor_info_fops,
368 				 acpi_driver_data(device));
369 	if (!entry)
370 		return -EIO;
371 
372 	/* 'throttling' [R/W] */
373 	entry = proc_create_data(ACPI_PROCESSOR_FILE_THROTTLING,
374 				 S_IFREG | S_IRUGO | S_IWUSR,
375 				 acpi_device_dir(device),
376 				 &acpi_processor_throttling_fops,
377 				 acpi_driver_data(device));
378 	if (!entry)
379 		return -EIO;
380 
381 	/* 'limit' [R/W] */
382 	entry = proc_create_data(ACPI_PROCESSOR_FILE_LIMIT,
383 				 S_IFREG | S_IRUGO | S_IWUSR,
384 				 acpi_device_dir(device),
385 				 &acpi_processor_limit_fops,
386 				 acpi_driver_data(device));
387 	if (!entry)
388 		return -EIO;
389 	return 0;
390 }
391 
392 static int acpi_processor_remove_fs(struct acpi_device *device)
393 {
394 
395 	if (acpi_device_dir(device)) {
396 		remove_proc_entry(ACPI_PROCESSOR_FILE_INFO,
397 				  acpi_device_dir(device));
398 		remove_proc_entry(ACPI_PROCESSOR_FILE_THROTTLING,
399 				  acpi_device_dir(device));
400 		remove_proc_entry(ACPI_PROCESSOR_FILE_LIMIT,
401 				  acpi_device_dir(device));
402 		remove_proc_entry(acpi_device_bid(device), acpi_processor_dir);
403 		acpi_device_dir(device) = NULL;
404 	}
405 
406 	return 0;
407 }
408 
409 /* Use the acpiid in MADT to map cpus in case of SMP */
410 
411 #ifndef CONFIG_SMP
412 static int get_cpu_id(acpi_handle handle, int type, u32 acpi_id) { return -1; }
413 #else
414 
415 static struct acpi_table_madt *madt;
416 
417 static int map_lapic_id(struct acpi_subtable_header *entry,
418 		 u32 acpi_id, int *apic_id)
419 {
420 	struct acpi_madt_local_apic *lapic =
421 		(struct acpi_madt_local_apic *)entry;
422 	if ((lapic->lapic_flags & ACPI_MADT_ENABLED) &&
423 	    lapic->processor_id == acpi_id) {
424 		*apic_id = lapic->id;
425 		return 1;
426 	}
427 	return 0;
428 }
429 
430 static int map_x2apic_id(struct acpi_subtable_header *entry,
431 			 int device_declaration, u32 acpi_id, int *apic_id)
432 {
433 	struct acpi_madt_local_x2apic *apic =
434 		(struct acpi_madt_local_x2apic *)entry;
435 	u32 tmp = apic->local_apic_id;
436 
437 	/* Only check enabled APICs*/
438 	if (!(apic->lapic_flags & ACPI_MADT_ENABLED))
439 		return 0;
440 
441 	/* Device statement declaration type */
442 	if (device_declaration) {
443 		if (apic->uid == acpi_id)
444 			goto found;
445 	}
446 
447 	return 0;
448 found:
449 	*apic_id = tmp;
450 	return 1;
451 }
452 
453 static int map_lsapic_id(struct acpi_subtable_header *entry,
454 		int device_declaration, u32 acpi_id, int *apic_id)
455 {
456 	struct acpi_madt_local_sapic *lsapic =
457 		(struct acpi_madt_local_sapic *)entry;
458 	u32 tmp = (lsapic->id << 8) | lsapic->eid;
459 
460 	/* Only check enabled APICs*/
461 	if (!(lsapic->lapic_flags & ACPI_MADT_ENABLED))
462 		return 0;
463 
464 	/* Device statement declaration type */
465 	if (device_declaration) {
466 		if (entry->length < 16)
467 			printk(KERN_ERR PREFIX
468 			    "Invalid LSAPIC with Device type processor (SAPIC ID %#x)\n",
469 			    tmp);
470 		else if (lsapic->uid == acpi_id)
471 			goto found;
472 	/* Processor statement declaration type */
473 	} else if (lsapic->processor_id == acpi_id)
474 		goto found;
475 
476 	return 0;
477 found:
478 	*apic_id = tmp;
479 	return 1;
480 }
481 
482 static int map_madt_entry(int type, u32 acpi_id)
483 {
484 	unsigned long madt_end, entry;
485 	int apic_id = -1;
486 
487 	if (!madt)
488 		return apic_id;
489 
490 	entry = (unsigned long)madt;
491 	madt_end = entry + madt->header.length;
492 
493 	/* Parse all entries looking for a match. */
494 
495 	entry += sizeof(struct acpi_table_madt);
496 	while (entry + sizeof(struct acpi_subtable_header) < madt_end) {
497 		struct acpi_subtable_header *header =
498 			(struct acpi_subtable_header *)entry;
499 		if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) {
500 			if (map_lapic_id(header, acpi_id, &apic_id))
501 				break;
502 		} else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC) {
503 			if (map_x2apic_id(header, type, acpi_id, &apic_id))
504 				break;
505 		} else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
506 			if (map_lsapic_id(header, type, acpi_id, &apic_id))
507 				break;
508 		}
509 		entry += header->length;
510 	}
511 	return apic_id;
512 }
513 
514 static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
515 {
516 	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
517 	union acpi_object *obj;
518 	struct acpi_subtable_header *header;
519 	int apic_id = -1;
520 
521 	if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
522 		goto exit;
523 
524 	if (!buffer.length || !buffer.pointer)
525 		goto exit;
526 
527 	obj = buffer.pointer;
528 	if (obj->type != ACPI_TYPE_BUFFER ||
529 	    obj->buffer.length < sizeof(struct acpi_subtable_header)) {
530 		goto exit;
531 	}
532 
533 	header = (struct acpi_subtable_header *)obj->buffer.pointer;
534 	if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) {
535 		map_lapic_id(header, acpi_id, &apic_id);
536 	} else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
537 		map_lsapic_id(header, type, acpi_id, &apic_id);
538 	}
539 
540 exit:
541 	if (buffer.pointer)
542 		kfree(buffer.pointer);
543 	return apic_id;
544 }
545 
546 static int get_cpu_id(acpi_handle handle, int type, u32 acpi_id)
547 {
548 	int i;
549 	int apic_id = -1;
550 
551 	apic_id = map_mat_entry(handle, type, acpi_id);
552 	if (apic_id == -1)
553 		apic_id = map_madt_entry(type, acpi_id);
554 	if (apic_id == -1)
555 		return apic_id;
556 
557 	for_each_possible_cpu(i) {
558 		if (cpu_physical_id(i) == apic_id)
559 			return i;
560 	}
561 	return -1;
562 }
563 #endif
564 
565 /* --------------------------------------------------------------------------
566                                  Driver Interface
567    -------------------------------------------------------------------------- */
568 
569 static int acpi_processor_get_info(struct acpi_device *device)
570 {
571 	acpi_status status = 0;
572 	union acpi_object object = { 0 };
573 	struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
574 	struct acpi_processor *pr;
575 	int cpu_index, device_declaration = 0;
576 	static int cpu0_initialized;
577 
578 	pr = acpi_driver_data(device);
579 	if (!pr)
580 		return -EINVAL;
581 
582 	if (num_online_cpus() > 1)
583 		errata.smp = TRUE;
584 
585 	acpi_processor_errata(pr);
586 
587 	/*
588 	 * Check to see if we have bus mastering arbitration control.  This
589 	 * is required for proper C3 usage (to maintain cache coherency).
590 	 */
591 	if (acpi_gbl_FADT.pm2_control_block && acpi_gbl_FADT.pm2_control_length) {
592 		pr->flags.bm_control = 1;
593 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
594 				  "Bus mastering arbitration control present\n"));
595 	} else
596 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
597 				  "No bus mastering arbitration control\n"));
598 
599 	if (!strcmp(acpi_device_hid(device), ACPI_PROCESSOR_HID)) {
600 		/*
601 		 * Declared with "Device" statement; match _UID.
602 		 * Note that we don't handle string _UIDs yet.
603 		 */
604 		unsigned long long value;
605 		status = acpi_evaluate_integer(pr->handle, METHOD_NAME__UID,
606 						NULL, &value);
607 		if (ACPI_FAILURE(status)) {
608 			printk(KERN_ERR PREFIX
609 			    "Evaluating processor _UID [%#x]\n", status);
610 			return -ENODEV;
611 		}
612 		device_declaration = 1;
613 		pr->acpi_id = value;
614 	} else {
615 		/* Declared with "Processor" statement; match ProcessorID */
616 		status = acpi_evaluate_object(pr->handle, NULL, NULL, &buffer);
617 		if (ACPI_FAILURE(status)) {
618 			printk(KERN_ERR PREFIX "Evaluating processor object\n");
619 			return -ENODEV;
620 		}
621 
622 		/*
623 		 * TBD: Synch processor ID (via LAPIC/LSAPIC structures) on SMP.
624 		 *      >>> 'acpi_get_processor_id(acpi_id, &id)' in
625 		 *      arch/xxx/acpi.c
626 		 */
627 		pr->acpi_id = object.processor.proc_id;
628 	}
629 	cpu_index = get_cpu_id(pr->handle, device_declaration, pr->acpi_id);
630 
631 	/* Handle UP system running SMP kernel, with no LAPIC in MADT */
632 	if (!cpu0_initialized && (cpu_index == -1) &&
633 	    (num_online_cpus() == 1)) {
634 		cpu_index = 0;
635 	}
636 
637 	cpu0_initialized = 1;
638 
639 	pr->id = cpu_index;
640 
641 	/*
642 	 *  Extra Processor objects may be enumerated on MP systems with
643 	 *  less than the max # of CPUs. They should be ignored _iff
644 	 *  they are physically not present.
645 	 */
646 	if (pr->id == -1) {
647 		if (ACPI_FAILURE
648 		    (acpi_processor_hotadd_init(pr->handle, &pr->id))) {
649 			return -ENODEV;
650 		}
651 	}
652 
653 	ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Processor [%d:%d]\n", pr->id,
654 			  pr->acpi_id));
655 
656 	if (!object.processor.pblk_address)
657 		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No PBLK (NULL address)\n"));
658 	else if (object.processor.pblk_length != 6)
659 		printk(KERN_ERR PREFIX "Invalid PBLK length [%d]\n",
660 			    object.processor.pblk_length);
661 	else {
662 		pr->throttling.address = object.processor.pblk_address;
663 		pr->throttling.duty_offset = acpi_gbl_FADT.duty_offset;
664 		pr->throttling.duty_width = acpi_gbl_FADT.duty_width;
665 
666 		pr->pblk = object.processor.pblk_address;
667 
668 		/*
669 		 * We don't care about error returns - we just try to mark
670 		 * these reserved so that nobody else is confused into thinking
671 		 * that this region might be unused..
672 		 *
673 		 * (In particular, allocating the IO range for Cardbus)
674 		 */
675 		request_region(pr->throttling.address, 6, "ACPI CPU throttle");
676 	}
677 
678 	/*
679 	 * If ACPI describes a slot number for this CPU, we can use it
680 	 * ensure we get the right value in the "physical id" field
681 	 * of /proc/cpuinfo
682 	 */
683 	status = acpi_evaluate_object(pr->handle, "_SUN", NULL, &buffer);
684 	if (ACPI_SUCCESS(status))
685 		arch_fix_phys_package_id(pr->id, object.integer.value);
686 
687 	return 0;
688 }
689 
690 static DEFINE_PER_CPU(void *, processor_device_array);
691 
692 static int __cpuinit acpi_processor_start(struct acpi_device *device)
693 {
694 	int result = 0;
695 	struct acpi_processor *pr;
696 	struct sys_device *sysdev;
697 
698 	pr = acpi_driver_data(device);
699 
700 	result = acpi_processor_get_info(device);
701 	if (result) {
702 		/* Processor is physically not present */
703 		return 0;
704 	}
705 
706 	BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
707 
708 	/*
709 	 * Buggy BIOS check
710 	 * ACPI id of processors can be reported wrongly by the BIOS.
711 	 * Don't trust it blindly
712 	 */
713 	if (per_cpu(processor_device_array, pr->id) != NULL &&
714 	    per_cpu(processor_device_array, pr->id) != device) {
715 		printk(KERN_WARNING "BIOS reported wrong ACPI id "
716 			"for the processor\n");
717 		return -ENODEV;
718 	}
719 	per_cpu(processor_device_array, pr->id) = device;
720 
721 	per_cpu(processors, pr->id) = pr;
722 
723 	result = acpi_processor_add_fs(device);
724 	if (result)
725 		goto end;
726 
727 	sysdev = get_cpu_sysdev(pr->id);
728 	if (sysfs_create_link(&device->dev.kobj, &sysdev->kobj, "sysdev"))
729 		return -EFAULT;
730 
731 	/* _PDC call should be done before doing anything else (if reqd.). */
732 	arch_acpi_processor_init_pdc(pr);
733 	acpi_processor_set_pdc(pr);
734 #ifdef CONFIG_CPU_FREQ
735 	acpi_processor_ppc_has_changed(pr);
736 #endif
737 	acpi_processor_get_throttling_info(pr);
738 	acpi_processor_get_limit_info(pr);
739 
740 
741 	acpi_processor_power_init(pr, device);
742 
743 	pr->cdev = thermal_cooling_device_register("Processor", device,
744 						&processor_cooling_ops);
745 	if (IS_ERR(pr->cdev)) {
746 		result = PTR_ERR(pr->cdev);
747 		goto end;
748 	}
749 
750 	dev_info(&device->dev, "registered as cooling_device%d\n",
751 		 pr->cdev->id);
752 
753 	result = sysfs_create_link(&device->dev.kobj,
754 				   &pr->cdev->device.kobj,
755 				   "thermal_cooling");
756 	if (result)
757 		printk(KERN_ERR PREFIX "Create sysfs link\n");
758 	result = sysfs_create_link(&pr->cdev->device.kobj,
759 				   &device->dev.kobj,
760 				   "device");
761 	if (result)
762 		printk(KERN_ERR PREFIX "Create sysfs link\n");
763 
764 	if (pr->flags.throttling) {
765 		printk(KERN_INFO PREFIX "%s [%s] (supports",
766 		       acpi_device_name(device), acpi_device_bid(device));
767 		printk(" %d throttling states", pr->throttling.state_count);
768 		printk(")\n");
769 	}
770 
771       end:
772 
773 	return result;
774 }
775 
776 static void acpi_processor_notify(struct acpi_device *device, u32 event)
777 {
778 	struct acpi_processor *pr = acpi_driver_data(device);
779 	int saved;
780 
781 	if (!pr)
782 		return;
783 
784 	switch (event) {
785 	case ACPI_PROCESSOR_NOTIFY_PERFORMANCE:
786 		saved = pr->performance_platform_limit;
787 		acpi_processor_ppc_has_changed(pr);
788 		if (saved == pr->performance_platform_limit)
789 			break;
790 		acpi_bus_generate_proc_event(device, event,
791 					pr->performance_platform_limit);
792 		acpi_bus_generate_netlink_event(device->pnp.device_class,
793 						  dev_name(&device->dev), event,
794 						  pr->performance_platform_limit);
795 		break;
796 	case ACPI_PROCESSOR_NOTIFY_POWER:
797 		acpi_processor_cst_has_changed(pr);
798 		acpi_bus_generate_proc_event(device, event, 0);
799 		acpi_bus_generate_netlink_event(device->pnp.device_class,
800 						  dev_name(&device->dev), event, 0);
801 		break;
802 	case ACPI_PROCESSOR_NOTIFY_THROTTLING:
803 		acpi_processor_tstate_has_changed(pr);
804 		acpi_bus_generate_proc_event(device, event, 0);
805 		acpi_bus_generate_netlink_event(device->pnp.device_class,
806 						  dev_name(&device->dev), event, 0);
807 	default:
808 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
809 				  "Unsupported event [0x%x]\n", event));
810 		break;
811 	}
812 
813 	return;
814 }
815 
816 static int acpi_cpu_soft_notify(struct notifier_block *nfb,
817 		unsigned long action, void *hcpu)
818 {
819 	unsigned int cpu = (unsigned long)hcpu;
820 	struct acpi_processor *pr = per_cpu(processors, cpu);
821 
822 	if (action == CPU_ONLINE && pr) {
823 		acpi_processor_ppc_has_changed(pr);
824 		acpi_processor_cst_has_changed(pr);
825 		acpi_processor_tstate_has_changed(pr);
826 	}
827 	return NOTIFY_OK;
828 }
829 
830 static struct notifier_block acpi_cpu_notifier =
831 {
832 	    .notifier_call = acpi_cpu_soft_notify,
833 };
834 
835 static int acpi_processor_add(struct acpi_device *device)
836 {
837 	struct acpi_processor *pr = NULL;
838 
839 
840 	if (!device)
841 		return -EINVAL;
842 
843 	pr = kzalloc(sizeof(struct acpi_processor), GFP_KERNEL);
844 	if (!pr)
845 		return -ENOMEM;
846 
847 	if (!alloc_cpumask_var(&pr->throttling.shared_cpu_map, GFP_KERNEL)) {
848 		kfree(pr);
849 		return -ENOMEM;
850 	}
851 
852 	pr->handle = device->handle;
853 	strcpy(acpi_device_name(device), ACPI_PROCESSOR_DEVICE_NAME);
854 	strcpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS);
855 	device->driver_data = pr;
856 
857 	return 0;
858 }
859 
860 static int acpi_processor_remove(struct acpi_device *device, int type)
861 {
862 	struct acpi_processor *pr = NULL;
863 
864 
865 	if (!device || !acpi_driver_data(device))
866 		return -EINVAL;
867 
868 	pr = acpi_driver_data(device);
869 
870 	if (pr->id >= nr_cpu_ids)
871 		goto free;
872 
873 	if (type == ACPI_BUS_REMOVAL_EJECT) {
874 		if (acpi_processor_handle_eject(pr))
875 			return -EINVAL;
876 	}
877 
878 	acpi_processor_power_exit(pr, device);
879 
880 	sysfs_remove_link(&device->dev.kobj, "sysdev");
881 
882 	acpi_processor_remove_fs(device);
883 
884 	if (pr->cdev) {
885 		sysfs_remove_link(&device->dev.kobj, "thermal_cooling");
886 		sysfs_remove_link(&pr->cdev->device.kobj, "device");
887 		thermal_cooling_device_unregister(pr->cdev);
888 		pr->cdev = NULL;
889 	}
890 
891 	per_cpu(processors, pr->id) = NULL;
892 	per_cpu(processor_device_array, pr->id) = NULL;
893 
894 free:
895 	free_cpumask_var(pr->throttling.shared_cpu_map);
896 	kfree(pr);
897 
898 	return 0;
899 }
900 
901 #ifdef CONFIG_ACPI_HOTPLUG_CPU
902 /****************************************************************************
903  * 	Acpi processor hotplug support 				       	    *
904  ****************************************************************************/
905 
906 static int is_processor_present(acpi_handle handle)
907 {
908 	acpi_status status;
909 	unsigned long long sta = 0;
910 
911 
912 	status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
913 
914 	if (ACPI_SUCCESS(status) && (sta & ACPI_STA_DEVICE_PRESENT))
915 		return 1;
916 
917 	/*
918 	 * _STA is mandatory for a processor that supports hot plug
919 	 */
920 	if (status == AE_NOT_FOUND)
921 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
922 				"Processor does not support hot plug\n"));
923 	else
924 		ACPI_EXCEPTION((AE_INFO, status,
925 				"Processor Device is not present"));
926 	return 0;
927 }
928 
929 static
930 int acpi_processor_device_add(acpi_handle handle, struct acpi_device **device)
931 {
932 	acpi_handle phandle;
933 	struct acpi_device *pdev;
934 	struct acpi_processor *pr;
935 
936 
937 	if (acpi_get_parent(handle, &phandle)) {
938 		return -ENODEV;
939 	}
940 
941 	if (acpi_bus_get_device(phandle, &pdev)) {
942 		return -ENODEV;
943 	}
944 
945 	if (acpi_bus_add(device, pdev, handle, ACPI_BUS_TYPE_PROCESSOR)) {
946 		return -ENODEV;
947 	}
948 
949 	acpi_bus_start(*device);
950 
951 	pr = acpi_driver_data(*device);
952 	if (!pr)
953 		return -ENODEV;
954 
955 	if ((pr->id >= 0) && (pr->id < nr_cpu_ids)) {
956 		kobject_uevent(&(*device)->dev.kobj, KOBJ_ONLINE);
957 	}
958 	return 0;
959 }
960 
961 static void __ref acpi_processor_hotplug_notify(acpi_handle handle,
962 						u32 event, void *data)
963 {
964 	struct acpi_processor *pr;
965 	struct acpi_device *device = NULL;
966 	int result;
967 
968 
969 	switch (event) {
970 	case ACPI_NOTIFY_BUS_CHECK:
971 	case ACPI_NOTIFY_DEVICE_CHECK:
972 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
973 		"Processor driver received %s event\n",
974 		       (event == ACPI_NOTIFY_BUS_CHECK) ?
975 		       "ACPI_NOTIFY_BUS_CHECK" : "ACPI_NOTIFY_DEVICE_CHECK"));
976 
977 		if (!is_processor_present(handle))
978 			break;
979 
980 		if (acpi_bus_get_device(handle, &device)) {
981 			result = acpi_processor_device_add(handle, &device);
982 			if (result)
983 				printk(KERN_ERR PREFIX
984 					    "Unable to add the device\n");
985 			break;
986 		}
987 
988 		pr = acpi_driver_data(device);
989 		if (!pr) {
990 			printk(KERN_ERR PREFIX "Driver data is NULL\n");
991 			break;
992 		}
993 
994 		if (pr->id >= 0 && (pr->id < nr_cpu_ids)) {
995 			kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE);
996 			break;
997 		}
998 
999 		result = acpi_processor_start(device);
1000 		if ((!result) && ((pr->id >= 0) && (pr->id < nr_cpu_ids))) {
1001 			kobject_uevent(&device->dev.kobj, KOBJ_ONLINE);
1002 		} else {
1003 			printk(KERN_ERR PREFIX "Device [%s] failed to start\n",
1004 				    acpi_device_bid(device));
1005 		}
1006 		break;
1007 	case ACPI_NOTIFY_EJECT_REQUEST:
1008 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1009 				  "received ACPI_NOTIFY_EJECT_REQUEST\n"));
1010 
1011 		if (acpi_bus_get_device(handle, &device)) {
1012 			printk(KERN_ERR PREFIX
1013 				    "Device don't exist, dropping EJECT\n");
1014 			break;
1015 		}
1016 		pr = acpi_driver_data(device);
1017 		if (!pr) {
1018 			printk(KERN_ERR PREFIX
1019 				    "Driver data is NULL, dropping EJECT\n");
1020 			return;
1021 		}
1022 
1023 		if ((pr->id < nr_cpu_ids) && (cpu_present(pr->id)))
1024 			kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE);
1025 		break;
1026 	default:
1027 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1028 				  "Unsupported event [0x%x]\n", event));
1029 		break;
1030 	}
1031 
1032 	return;
1033 }
1034 
1035 static acpi_status
1036 processor_walk_namespace_cb(acpi_handle handle,
1037 			    u32 lvl, void *context, void **rv)
1038 {
1039 	acpi_status status;
1040 	int *action = context;
1041 	acpi_object_type type = 0;
1042 
1043 	status = acpi_get_type(handle, &type);
1044 	if (ACPI_FAILURE(status))
1045 		return (AE_OK);
1046 
1047 	if (type != ACPI_TYPE_PROCESSOR)
1048 		return (AE_OK);
1049 
1050 	switch (*action) {
1051 	case INSTALL_NOTIFY_HANDLER:
1052 		acpi_install_notify_handler(handle,
1053 					    ACPI_SYSTEM_NOTIFY,
1054 					    acpi_processor_hotplug_notify,
1055 					    NULL);
1056 		break;
1057 	case UNINSTALL_NOTIFY_HANDLER:
1058 		acpi_remove_notify_handler(handle,
1059 					   ACPI_SYSTEM_NOTIFY,
1060 					   acpi_processor_hotplug_notify);
1061 		break;
1062 	default:
1063 		break;
1064 	}
1065 
1066 	return (AE_OK);
1067 }
1068 
1069 static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu)
1070 {
1071 
1072 	if (!is_processor_present(handle)) {
1073 		return AE_ERROR;
1074 	}
1075 
1076 	if (acpi_map_lsapic(handle, p_cpu))
1077 		return AE_ERROR;
1078 
1079 	if (arch_register_cpu(*p_cpu)) {
1080 		acpi_unmap_lsapic(*p_cpu);
1081 		return AE_ERROR;
1082 	}
1083 
1084 	return AE_OK;
1085 }
1086 
1087 static int acpi_processor_handle_eject(struct acpi_processor *pr)
1088 {
1089 	if (cpu_online(pr->id))
1090 		cpu_down(pr->id);
1091 
1092 	arch_unregister_cpu(pr->id);
1093 	acpi_unmap_lsapic(pr->id);
1094 	return (0);
1095 }
1096 #else
1097 static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu)
1098 {
1099 	return AE_ERROR;
1100 }
1101 static int acpi_processor_handle_eject(struct acpi_processor *pr)
1102 {
1103 	return (-EINVAL);
1104 }
1105 #endif
1106 
1107 static
1108 void acpi_processor_install_hotplug_notify(void)
1109 {
1110 #ifdef CONFIG_ACPI_HOTPLUG_CPU
1111 	int action = INSTALL_NOTIFY_HANDLER;
1112 	acpi_walk_namespace(ACPI_TYPE_PROCESSOR,
1113 			    ACPI_ROOT_OBJECT,
1114 			    ACPI_UINT32_MAX,
1115 			    processor_walk_namespace_cb, &action, NULL);
1116 #endif
1117 	register_hotcpu_notifier(&acpi_cpu_notifier);
1118 }
1119 
1120 static
1121 void acpi_processor_uninstall_hotplug_notify(void)
1122 {
1123 #ifdef CONFIG_ACPI_HOTPLUG_CPU
1124 	int action = UNINSTALL_NOTIFY_HANDLER;
1125 	acpi_walk_namespace(ACPI_TYPE_PROCESSOR,
1126 			    ACPI_ROOT_OBJECT,
1127 			    ACPI_UINT32_MAX,
1128 			    processor_walk_namespace_cb, &action, NULL);
1129 #endif
1130 	unregister_hotcpu_notifier(&acpi_cpu_notifier);
1131 }
1132 
1133 /*
1134  * We keep the driver loaded even when ACPI is not running.
1135  * This is needed for the powernow-k8 driver, that works even without
1136  * ACPI, but needs symbols from this driver
1137  */
1138 
1139 static int __init acpi_processor_init(void)
1140 {
1141 	int result = 0;
1142 
1143 	memset(&errata, 0, sizeof(errata));
1144 
1145 #ifdef CONFIG_SMP
1146 	if (ACPI_FAILURE(acpi_get_table(ACPI_SIG_MADT, 0,
1147 				(struct acpi_table_header **)&madt)))
1148 		madt = NULL;
1149 #endif
1150 
1151 	acpi_processor_dir = proc_mkdir(ACPI_PROCESSOR_CLASS, acpi_root_dir);
1152 	if (!acpi_processor_dir)
1153 		return -ENOMEM;
1154 
1155 	/*
1156 	 * Check whether the system is DMI table. If yes, OSPM
1157 	 * should not use mwait for CPU-states.
1158 	 */
1159 	dmi_check_system(processor_idle_dmi_table);
1160 	result = cpuidle_register_driver(&acpi_idle_driver);
1161 	if (result < 0)
1162 		goto out_proc;
1163 
1164 	result = acpi_bus_register_driver(&acpi_processor_driver);
1165 	if (result < 0)
1166 		goto out_cpuidle;
1167 
1168 	acpi_processor_install_hotplug_notify();
1169 
1170 	acpi_thermal_cpufreq_init();
1171 
1172 	acpi_processor_ppc_init();
1173 
1174 	acpi_processor_throttling_init();
1175 
1176 	return 0;
1177 
1178 out_cpuidle:
1179 	cpuidle_unregister_driver(&acpi_idle_driver);
1180 
1181 out_proc:
1182 	remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir);
1183 
1184 	return result;
1185 }
1186 
1187 static void __exit acpi_processor_exit(void)
1188 {
1189 	acpi_processor_ppc_exit();
1190 
1191 	acpi_thermal_cpufreq_exit();
1192 
1193 	acpi_processor_uninstall_hotplug_notify();
1194 
1195 	acpi_bus_unregister_driver(&acpi_processor_driver);
1196 
1197 	cpuidle_unregister_driver(&acpi_idle_driver);
1198 
1199 	remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir);
1200 
1201 	return;
1202 }
1203 
1204 module_init(acpi_processor_init);
1205 module_exit(acpi_processor_exit);
1206 
1207 EXPORT_SYMBOL(acpi_processor_set_thermal_limit);
1208 
1209 MODULE_ALIAS("processor");
1210