xref: /openbmc/linux/drivers/acpi/processor_core.c (revision b8bb76713ec50df2f11efee386e16f93d51e1076)
1 /*
2  * acpi_processor.c - ACPI Processor Driver ($Revision: 71 $)
3  *
4  *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5  *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6  *  Copyright (C) 2004       Dominik Brodowski <linux@brodo.de>
7  *  Copyright (C) 2004  Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
8  *  			- Added processor hotplug support
9  *
10  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
11  *
12  *  This program is free software; you can redistribute it and/or modify
13  *  it under the terms of the GNU General Public License as published by
14  *  the Free Software Foundation; either version 2 of the License, or (at
15  *  your option) any later version.
16  *
17  *  This program is distributed in the hope that it will be useful, but
18  *  WITHOUT ANY WARRANTY; without even the implied warranty of
19  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
20  *  General Public License for more details.
21  *
22  *  You should have received a copy of the GNU General Public License along
23  *  with this program; if not, write to the Free Software Foundation, Inc.,
24  *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
25  *
26  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
27  *  TBD:
28  *	1. Make # power states dynamic.
29  *	2. Support duty_cycle values that span bit 4.
30  *	3. Optimize by having scheduler determine business instead of
31  *	   having us try to calculate it here.
32  *	4. Need C1 timing -- must modify kernel (IRQ handler) to get this.
33  */
34 
35 #include <linux/kernel.h>
36 #include <linux/module.h>
37 #include <linux/init.h>
38 #include <linux/types.h>
39 #include <linux/pci.h>
40 #include <linux/pm.h>
41 #include <linux/cpufreq.h>
42 #include <linux/cpu.h>
43 #include <linux/proc_fs.h>
44 #include <linux/seq_file.h>
45 #include <linux/dmi.h>
46 #include <linux/moduleparam.h>
47 #include <linux/cpuidle.h>
48 
49 #include <asm/io.h>
50 #include <asm/system.h>
51 #include <asm/cpu.h>
52 #include <asm/delay.h>
53 #include <asm/uaccess.h>
54 #include <asm/processor.h>
55 #include <asm/smp.h>
56 #include <asm/acpi.h>
57 
58 #include <acpi/acpi_bus.h>
59 #include <acpi/acpi_drivers.h>
60 #include <acpi/processor.h>
61 
62 #define ACPI_PROCESSOR_CLASS		"processor"
63 #define ACPI_PROCESSOR_DEVICE_NAME	"Processor"
64 #define ACPI_PROCESSOR_FILE_INFO	"info"
65 #define ACPI_PROCESSOR_FILE_THROTTLING	"throttling"
66 #define ACPI_PROCESSOR_FILE_LIMIT	"limit"
67 #define ACPI_PROCESSOR_NOTIFY_PERFORMANCE 0x80
68 #define ACPI_PROCESSOR_NOTIFY_POWER	0x81
69 #define ACPI_PROCESSOR_NOTIFY_THROTTLING	0x82
70 
71 #define ACPI_PROCESSOR_LIMIT_USER	0
72 #define ACPI_PROCESSOR_LIMIT_THERMAL	1
73 
74 #define _COMPONENT		ACPI_PROCESSOR_COMPONENT
75 ACPI_MODULE_NAME("processor_core");
76 
77 MODULE_AUTHOR("Paul Diefenbaugh");
78 MODULE_DESCRIPTION("ACPI Processor Driver");
79 MODULE_LICENSE("GPL");
80 
81 static int acpi_processor_add(struct acpi_device *device);
82 static int acpi_processor_start(struct acpi_device *device);
83 static int acpi_processor_remove(struct acpi_device *device, int type);
84 static int acpi_processor_info_open_fs(struct inode *inode, struct file *file);
85 static void acpi_processor_notify(acpi_handle handle, u32 event, void *data);
86 static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu);
87 static int acpi_processor_handle_eject(struct acpi_processor *pr);
88 
89 
90 static const struct acpi_device_id processor_device_ids[] = {
91 	{ACPI_PROCESSOR_OBJECT_HID, 0},
92 	{ACPI_PROCESSOR_HID, 0},
93 	{"", 0},
94 };
95 MODULE_DEVICE_TABLE(acpi, processor_device_ids);
96 
97 static struct acpi_driver acpi_processor_driver = {
98 	.name = "processor",
99 	.class = ACPI_PROCESSOR_CLASS,
100 	.ids = processor_device_ids,
101 	.ops = {
102 		.add = acpi_processor_add,
103 		.remove = acpi_processor_remove,
104 		.start = acpi_processor_start,
105 		.suspend = acpi_processor_suspend,
106 		.resume = acpi_processor_resume,
107 		},
108 };
109 
110 #define INSTALL_NOTIFY_HANDLER		1
111 #define UNINSTALL_NOTIFY_HANDLER	2
112 
113 static const struct file_operations acpi_processor_info_fops = {
114 	.owner = THIS_MODULE,
115 	.open = acpi_processor_info_open_fs,
116 	.read = seq_read,
117 	.llseek = seq_lseek,
118 	.release = single_release,
119 };
120 
121 DEFINE_PER_CPU(struct acpi_processor *, processors);
122 struct acpi_processor_errata errata __read_mostly;
123 static int set_no_mwait(const struct dmi_system_id *id)
124 {
125 	printk(KERN_NOTICE PREFIX "%s detected - "
126 		"disabling mwait for CPU C-states\n", id->ident);
127 	idle_nomwait = 1;
128 	return 0;
129 }
130 
131 static struct dmi_system_id __cpuinitdata processor_idle_dmi_table[] = {
132 	{
133 	set_no_mwait, "IFL91 board", {
134 	DMI_MATCH(DMI_BIOS_VENDOR, "COMPAL"),
135 	DMI_MATCH(DMI_SYS_VENDOR, "ZEPTO"),
136 	DMI_MATCH(DMI_PRODUCT_VERSION, "3215W"),
137 	DMI_MATCH(DMI_BOARD_NAME, "IFL91") }, NULL},
138 	{
139 	set_no_mwait, "Extensa 5220", {
140 	DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
141 	DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
142 	DMI_MATCH(DMI_PRODUCT_VERSION, "0100"),
143 	DMI_MATCH(DMI_BOARD_NAME, "Columbia") }, NULL},
144 	{},
145 };
146 
147 /* --------------------------------------------------------------------------
148                                 Errata Handling
149    -------------------------------------------------------------------------- */
150 
151 static int acpi_processor_errata_piix4(struct pci_dev *dev)
152 {
153 	u8 value1 = 0;
154 	u8 value2 = 0;
155 
156 
157 	if (!dev)
158 		return -EINVAL;
159 
160 	/*
161 	 * Note that 'dev' references the PIIX4 ACPI Controller.
162 	 */
163 
164 	switch (dev->revision) {
165 	case 0:
166 		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4 A-step\n"));
167 		break;
168 	case 1:
169 		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4 B-step\n"));
170 		break;
171 	case 2:
172 		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4E\n"));
173 		break;
174 	case 3:
175 		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4M\n"));
176 		break;
177 	default:
178 		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found unknown PIIX4\n"));
179 		break;
180 	}
181 
182 	switch (dev->revision) {
183 
184 	case 0:		/* PIIX4 A-step */
185 	case 1:		/* PIIX4 B-step */
186 		/*
187 		 * See specification changes #13 ("Manual Throttle Duty Cycle")
188 		 * and #14 ("Enabling and Disabling Manual Throttle"), plus
189 		 * erratum #5 ("STPCLK# Deassertion Time") from the January
190 		 * 2002 PIIX4 specification update.  Applies to only older
191 		 * PIIX4 models.
192 		 */
193 		errata.piix4.throttle = 1;
194 
195 	case 2:		/* PIIX4E */
196 	case 3:		/* PIIX4M */
197 		/*
198 		 * See erratum #18 ("C3 Power State/BMIDE and Type-F DMA
199 		 * Livelock") from the January 2002 PIIX4 specification update.
200 		 * Applies to all PIIX4 models.
201 		 */
202 
203 		/*
204 		 * BM-IDE
205 		 * ------
206 		 * Find the PIIX4 IDE Controller and get the Bus Master IDE
207 		 * Status register address.  We'll use this later to read
208 		 * each IDE controller's DMA status to make sure we catch all
209 		 * DMA activity.
210 		 */
211 		dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
212 				     PCI_DEVICE_ID_INTEL_82371AB,
213 				     PCI_ANY_ID, PCI_ANY_ID, NULL);
214 		if (dev) {
215 			errata.piix4.bmisx = pci_resource_start(dev, 4);
216 			pci_dev_put(dev);
217 		}
218 
219 		/*
220 		 * Type-F DMA
221 		 * ----------
222 		 * Find the PIIX4 ISA Controller and read the Motherboard
223 		 * DMA controller's status to see if Type-F (Fast) DMA mode
224 		 * is enabled (bit 7) on either channel.  Note that we'll
225 		 * disable C3 support if this is enabled, as some legacy
226 		 * devices won't operate well if fast DMA is disabled.
227 		 */
228 		dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
229 				     PCI_DEVICE_ID_INTEL_82371AB_0,
230 				     PCI_ANY_ID, PCI_ANY_ID, NULL);
231 		if (dev) {
232 			pci_read_config_byte(dev, 0x76, &value1);
233 			pci_read_config_byte(dev, 0x77, &value2);
234 			if ((value1 & 0x80) || (value2 & 0x80))
235 				errata.piix4.fdma = 1;
236 			pci_dev_put(dev);
237 		}
238 
239 		break;
240 	}
241 
242 	if (errata.piix4.bmisx)
243 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
244 				  "Bus master activity detection (BM-IDE) erratum enabled\n"));
245 	if (errata.piix4.fdma)
246 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
247 				  "Type-F DMA livelock erratum (C3 disabled)\n"));
248 
249 	return 0;
250 }
251 
252 static int acpi_processor_errata(struct acpi_processor *pr)
253 {
254 	int result = 0;
255 	struct pci_dev *dev = NULL;
256 
257 
258 	if (!pr)
259 		return -EINVAL;
260 
261 	/*
262 	 * PIIX4
263 	 */
264 	dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
265 			     PCI_DEVICE_ID_INTEL_82371AB_3, PCI_ANY_ID,
266 			     PCI_ANY_ID, NULL);
267 	if (dev) {
268 		result = acpi_processor_errata_piix4(dev);
269 		pci_dev_put(dev);
270 	}
271 
272 	return result;
273 }
274 
275 /* --------------------------------------------------------------------------
276                               Common ACPI processor functions
277    -------------------------------------------------------------------------- */
278 
279 /*
280  * _PDC is required for a BIOS-OS handshake for most of the newer
281  * ACPI processor features.
282  */
283 static int acpi_processor_set_pdc(struct acpi_processor *pr)
284 {
285 	struct acpi_object_list *pdc_in = pr->pdc;
286 	acpi_status status = AE_OK;
287 
288 
289 	if (!pdc_in)
290 		return status;
291 	if (idle_nomwait) {
292 		/*
293 		 * If mwait is disabled for CPU C-states, the C2C3_FFH access
294 		 * mode will be disabled in the parameter of _PDC object.
295 		 * Of course C1_FFH access mode will also be disabled.
296 		 */
297 		union acpi_object *obj;
298 		u32 *buffer = NULL;
299 
300 		obj = pdc_in->pointer;
301 		buffer = (u32 *)(obj->buffer.pointer);
302 		buffer[2] &= ~(ACPI_PDC_C_C2C3_FFH | ACPI_PDC_C_C1_FFH);
303 
304 	}
305 	status = acpi_evaluate_object(pr->handle, "_PDC", pdc_in, NULL);
306 
307 	if (ACPI_FAILURE(status))
308 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
309 		    "Could not evaluate _PDC, using legacy perf. control...\n"));
310 
311 	return status;
312 }
313 
314 /* --------------------------------------------------------------------------
315                               FS Interface (/proc)
316    -------------------------------------------------------------------------- */
317 
318 static struct proc_dir_entry *acpi_processor_dir = NULL;
319 
320 static int acpi_processor_info_seq_show(struct seq_file *seq, void *offset)
321 {
322 	struct acpi_processor *pr = seq->private;
323 
324 
325 	if (!pr)
326 		goto end;
327 
328 	seq_printf(seq, "processor id:            %d\n"
329 		   "acpi id:                 %d\n"
330 		   "bus mastering control:   %s\n"
331 		   "power management:        %s\n"
332 		   "throttling control:      %s\n"
333 		   "limit interface:         %s\n",
334 		   pr->id,
335 		   pr->acpi_id,
336 		   pr->flags.bm_control ? "yes" : "no",
337 		   pr->flags.power ? "yes" : "no",
338 		   pr->flags.throttling ? "yes" : "no",
339 		   pr->flags.limit ? "yes" : "no");
340 
341       end:
342 	return 0;
343 }
344 
345 static int acpi_processor_info_open_fs(struct inode *inode, struct file *file)
346 {
347 	return single_open(file, acpi_processor_info_seq_show,
348 			   PDE(inode)->data);
349 }
350 
351 static int acpi_processor_add_fs(struct acpi_device *device)
352 {
353 	struct proc_dir_entry *entry = NULL;
354 
355 
356 	if (!acpi_device_dir(device)) {
357 		acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device),
358 						     acpi_processor_dir);
359 		if (!acpi_device_dir(device))
360 			return -ENODEV;
361 	}
362 
363 	/* 'info' [R] */
364 	entry = proc_create_data(ACPI_PROCESSOR_FILE_INFO,
365 				 S_IRUGO, acpi_device_dir(device),
366 				 &acpi_processor_info_fops,
367 				 acpi_driver_data(device));
368 	if (!entry)
369 		return -EIO;
370 
371 	/* 'throttling' [R/W] */
372 	entry = proc_create_data(ACPI_PROCESSOR_FILE_THROTTLING,
373 				 S_IFREG | S_IRUGO | S_IWUSR,
374 				 acpi_device_dir(device),
375 				 &acpi_processor_throttling_fops,
376 				 acpi_driver_data(device));
377 	if (!entry)
378 		return -EIO;
379 
380 	/* 'limit' [R/W] */
381 	entry = proc_create_data(ACPI_PROCESSOR_FILE_LIMIT,
382 				 S_IFREG | S_IRUGO | S_IWUSR,
383 				 acpi_device_dir(device),
384 				 &acpi_processor_limit_fops,
385 				 acpi_driver_data(device));
386 	if (!entry)
387 		return -EIO;
388 	return 0;
389 }
390 
391 static int acpi_processor_remove_fs(struct acpi_device *device)
392 {
393 
394 	if (acpi_device_dir(device)) {
395 		remove_proc_entry(ACPI_PROCESSOR_FILE_INFO,
396 				  acpi_device_dir(device));
397 		remove_proc_entry(ACPI_PROCESSOR_FILE_THROTTLING,
398 				  acpi_device_dir(device));
399 		remove_proc_entry(ACPI_PROCESSOR_FILE_LIMIT,
400 				  acpi_device_dir(device));
401 		remove_proc_entry(acpi_device_bid(device), acpi_processor_dir);
402 		acpi_device_dir(device) = NULL;
403 	}
404 
405 	return 0;
406 }
407 
408 /* Use the acpiid in MADT to map cpus in case of SMP */
409 
410 #ifndef CONFIG_SMP
411 static int get_cpu_id(acpi_handle handle, int type, u32 acpi_id) { return -1; }
412 #else
413 
414 static struct acpi_table_madt *madt;
415 
416 static int map_lapic_id(struct acpi_subtable_header *entry,
417 		 u32 acpi_id, int *apic_id)
418 {
419 	struct acpi_madt_local_apic *lapic =
420 		(struct acpi_madt_local_apic *)entry;
421 	if ((lapic->lapic_flags & ACPI_MADT_ENABLED) &&
422 	    lapic->processor_id == acpi_id) {
423 		*apic_id = lapic->id;
424 		return 1;
425 	}
426 	return 0;
427 }
428 
429 static int map_lsapic_id(struct acpi_subtable_header *entry,
430 		int device_declaration, u32 acpi_id, int *apic_id)
431 {
432 	struct acpi_madt_local_sapic *lsapic =
433 		(struct acpi_madt_local_sapic *)entry;
434 	u32 tmp = (lsapic->id << 8) | lsapic->eid;
435 
436 	/* Only check enabled APICs*/
437 	if (!(lsapic->lapic_flags & ACPI_MADT_ENABLED))
438 		return 0;
439 
440 	/* Device statement declaration type */
441 	if (device_declaration) {
442 		if (entry->length < 16)
443 			printk(KERN_ERR PREFIX
444 			    "Invalid LSAPIC with Device type processor (SAPIC ID %#x)\n",
445 			    tmp);
446 		else if (lsapic->uid == acpi_id)
447 			goto found;
448 	/* Processor statement declaration type */
449 	} else if (lsapic->processor_id == acpi_id)
450 		goto found;
451 
452 	return 0;
453 found:
454 	*apic_id = tmp;
455 	return 1;
456 }
457 
458 static int map_madt_entry(int type, u32 acpi_id)
459 {
460 	unsigned long madt_end, entry;
461 	int apic_id = -1;
462 
463 	if (!madt)
464 		return apic_id;
465 
466 	entry = (unsigned long)madt;
467 	madt_end = entry + madt->header.length;
468 
469 	/* Parse all entries looking for a match. */
470 
471 	entry += sizeof(struct acpi_table_madt);
472 	while (entry + sizeof(struct acpi_subtable_header) < madt_end) {
473 		struct acpi_subtable_header *header =
474 			(struct acpi_subtable_header *)entry;
475 		if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) {
476 			if (map_lapic_id(header, acpi_id, &apic_id))
477 				break;
478 		} else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
479 			if (map_lsapic_id(header, type, acpi_id, &apic_id))
480 				break;
481 		}
482 		entry += header->length;
483 	}
484 	return apic_id;
485 }
486 
487 static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
488 {
489 	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
490 	union acpi_object *obj;
491 	struct acpi_subtable_header *header;
492 	int apic_id = -1;
493 
494 	if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
495 		goto exit;
496 
497 	if (!buffer.length || !buffer.pointer)
498 		goto exit;
499 
500 	obj = buffer.pointer;
501 	if (obj->type != ACPI_TYPE_BUFFER ||
502 	    obj->buffer.length < sizeof(struct acpi_subtable_header)) {
503 		goto exit;
504 	}
505 
506 	header = (struct acpi_subtable_header *)obj->buffer.pointer;
507 	if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) {
508 		map_lapic_id(header, acpi_id, &apic_id);
509 	} else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
510 		map_lsapic_id(header, type, acpi_id, &apic_id);
511 	}
512 
513 exit:
514 	if (buffer.pointer)
515 		kfree(buffer.pointer);
516 	return apic_id;
517 }
518 
519 static int get_cpu_id(acpi_handle handle, int type, u32 acpi_id)
520 {
521 	int i;
522 	int apic_id = -1;
523 
524 	apic_id = map_mat_entry(handle, type, acpi_id);
525 	if (apic_id == -1)
526 		apic_id = map_madt_entry(type, acpi_id);
527 	if (apic_id == -1)
528 		return apic_id;
529 
530 	for_each_possible_cpu(i) {
531 		if (cpu_physical_id(i) == apic_id)
532 			return i;
533 	}
534 	return -1;
535 }
536 #endif
537 
538 /* --------------------------------------------------------------------------
539                                  Driver Interface
540    -------------------------------------------------------------------------- */
541 
542 static int acpi_processor_get_info(struct acpi_device *device)
543 {
544 	acpi_status status = 0;
545 	union acpi_object object = { 0 };
546 	struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
547 	struct acpi_processor *pr;
548 	int cpu_index, device_declaration = 0;
549 	static int cpu0_initialized;
550 
551 	pr = acpi_driver_data(device);
552 	if (!pr)
553 		return -EINVAL;
554 
555 	if (num_online_cpus() > 1)
556 		errata.smp = TRUE;
557 
558 	acpi_processor_errata(pr);
559 
560 	/*
561 	 * Check to see if we have bus mastering arbitration control.  This
562 	 * is required for proper C3 usage (to maintain cache coherency).
563 	 */
564 	if (acpi_gbl_FADT.pm2_control_block && acpi_gbl_FADT.pm2_control_length) {
565 		pr->flags.bm_control = 1;
566 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
567 				  "Bus mastering arbitration control present\n"));
568 	} else
569 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
570 				  "No bus mastering arbitration control\n"));
571 
572 	if (!strcmp(acpi_device_hid(device), ACPI_PROCESSOR_HID)) {
573 		/*
574 		 * Declared with "Device" statement; match _UID.
575 		 * Note that we don't handle string _UIDs yet.
576 		 */
577 		unsigned long long value;
578 		status = acpi_evaluate_integer(pr->handle, METHOD_NAME__UID,
579 						NULL, &value);
580 		if (ACPI_FAILURE(status)) {
581 			printk(KERN_ERR PREFIX
582 			    "Evaluating processor _UID [%#x]\n", status);
583 			return -ENODEV;
584 		}
585 		device_declaration = 1;
586 		pr->acpi_id = value;
587 	} else {
588 		/* Declared with "Processor" statement; match ProcessorID */
589 		status = acpi_evaluate_object(pr->handle, NULL, NULL, &buffer);
590 		if (ACPI_FAILURE(status)) {
591 			printk(KERN_ERR PREFIX "Evaluating processor object\n");
592 			return -ENODEV;
593 		}
594 
595 		/*
596 		 * TBD: Synch processor ID (via LAPIC/LSAPIC structures) on SMP.
597 		 *      >>> 'acpi_get_processor_id(acpi_id, &id)' in
598 		 *      arch/xxx/acpi.c
599 		 */
600 		pr->acpi_id = object.processor.proc_id;
601 	}
602 	cpu_index = get_cpu_id(pr->handle, device_declaration, pr->acpi_id);
603 
604 	/* Handle UP system running SMP kernel, with no LAPIC in MADT */
605 	if (!cpu0_initialized && (cpu_index == -1) &&
606 	    (num_online_cpus() == 1)) {
607 		cpu_index = 0;
608 	}
609 
610 	cpu0_initialized = 1;
611 
612 	pr->id = cpu_index;
613 
614 	/*
615 	 *  Extra Processor objects may be enumerated on MP systems with
616 	 *  less than the max # of CPUs. They should be ignored _iff
617 	 *  they are physically not present.
618 	 */
619 	if (pr->id == -1) {
620 		if (ACPI_FAILURE
621 		    (acpi_processor_hotadd_init(pr->handle, &pr->id))) {
622 			return -ENODEV;
623 		}
624 	}
625 
626 	ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Processor [%d:%d]\n", pr->id,
627 			  pr->acpi_id));
628 
629 	if (!object.processor.pblk_address)
630 		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No PBLK (NULL address)\n"));
631 	else if (object.processor.pblk_length != 6)
632 		printk(KERN_ERR PREFIX "Invalid PBLK length [%d]\n",
633 			    object.processor.pblk_length);
634 	else {
635 		pr->throttling.address = object.processor.pblk_address;
636 		pr->throttling.duty_offset = acpi_gbl_FADT.duty_offset;
637 		pr->throttling.duty_width = acpi_gbl_FADT.duty_width;
638 
639 		pr->pblk = object.processor.pblk_address;
640 
641 		/*
642 		 * We don't care about error returns - we just try to mark
643 		 * these reserved so that nobody else is confused into thinking
644 		 * that this region might be unused..
645 		 *
646 		 * (In particular, allocating the IO range for Cardbus)
647 		 */
648 		request_region(pr->throttling.address, 6, "ACPI CPU throttle");
649 	}
650 
651 	/*
652 	 * If ACPI describes a slot number for this CPU, we can use it
653 	 * ensure we get the right value in the "physical id" field
654 	 * of /proc/cpuinfo
655 	 */
656 	status = acpi_evaluate_object(pr->handle, "_SUN", NULL, &buffer);
657 	if (ACPI_SUCCESS(status))
658 		arch_fix_phys_package_id(pr->id, object.integer.value);
659 
660 	return 0;
661 }
662 
663 static DEFINE_PER_CPU(void *, processor_device_array);
664 
665 static int __cpuinit acpi_processor_start(struct acpi_device *device)
666 {
667 	int result = 0;
668 	acpi_status status = AE_OK;
669 	struct acpi_processor *pr;
670 	struct sys_device *sysdev;
671 
672 	pr = acpi_driver_data(device);
673 
674 	result = acpi_processor_get_info(device);
675 	if (result) {
676 		/* Processor is physically not present */
677 		return 0;
678 	}
679 
680 	BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
681 
682 	/*
683 	 * Buggy BIOS check
684 	 * ACPI id of processors can be reported wrongly by the BIOS.
685 	 * Don't trust it blindly
686 	 */
687 	if (per_cpu(processor_device_array, pr->id) != NULL &&
688 	    per_cpu(processor_device_array, pr->id) != device) {
689 		printk(KERN_WARNING "BIOS reported wrong ACPI id "
690 			"for the processor\n");
691 		return -ENODEV;
692 	}
693 	per_cpu(processor_device_array, pr->id) = device;
694 
695 	per_cpu(processors, pr->id) = pr;
696 
697 	result = acpi_processor_add_fs(device);
698 	if (result)
699 		goto end;
700 
701 	sysdev = get_cpu_sysdev(pr->id);
702 	if (sysfs_create_link(&device->dev.kobj, &sysdev->kobj, "sysdev"))
703 		return -EFAULT;
704 
705 	status = acpi_install_notify_handler(pr->handle, ACPI_DEVICE_NOTIFY,
706 					     acpi_processor_notify, pr);
707 
708 	/* _PDC call should be done before doing anything else (if reqd.). */
709 	arch_acpi_processor_init_pdc(pr);
710 	acpi_processor_set_pdc(pr);
711 #ifdef CONFIG_CPU_FREQ
712 	acpi_processor_ppc_has_changed(pr);
713 #endif
714 	acpi_processor_get_throttling_info(pr);
715 	acpi_processor_get_limit_info(pr);
716 
717 
718 	acpi_processor_power_init(pr, device);
719 
720 	pr->cdev = thermal_cooling_device_register("Processor", device,
721 						&processor_cooling_ops);
722 	if (IS_ERR(pr->cdev)) {
723 		result = PTR_ERR(pr->cdev);
724 		goto end;
725 	}
726 
727 	dev_info(&device->dev, "registered as cooling_device%d\n",
728 		 pr->cdev->id);
729 
730 	result = sysfs_create_link(&device->dev.kobj,
731 				   &pr->cdev->device.kobj,
732 				   "thermal_cooling");
733 	if (result)
734 		printk(KERN_ERR PREFIX "Create sysfs link\n");
735 	result = sysfs_create_link(&pr->cdev->device.kobj,
736 				   &device->dev.kobj,
737 				   "device");
738 	if (result)
739 		printk(KERN_ERR PREFIX "Create sysfs link\n");
740 
741 	if (pr->flags.throttling) {
742 		printk(KERN_INFO PREFIX "%s [%s] (supports",
743 		       acpi_device_name(device), acpi_device_bid(device));
744 		printk(" %d throttling states", pr->throttling.state_count);
745 		printk(")\n");
746 	}
747 
748       end:
749 
750 	return result;
751 }
752 
753 static void acpi_processor_notify(acpi_handle handle, u32 event, void *data)
754 {
755 	struct acpi_processor *pr = data;
756 	struct acpi_device *device = NULL;
757 	int saved;
758 
759 	if (!pr)
760 		return;
761 
762 	if (acpi_bus_get_device(pr->handle, &device))
763 		return;
764 
765 	switch (event) {
766 	case ACPI_PROCESSOR_NOTIFY_PERFORMANCE:
767 		saved = pr->performance_platform_limit;
768 		acpi_processor_ppc_has_changed(pr);
769 		if (saved == pr->performance_platform_limit)
770 			break;
771 		acpi_bus_generate_proc_event(device, event,
772 					pr->performance_platform_limit);
773 		acpi_bus_generate_netlink_event(device->pnp.device_class,
774 						  dev_name(&device->dev), event,
775 						  pr->performance_platform_limit);
776 		break;
777 	case ACPI_PROCESSOR_NOTIFY_POWER:
778 		acpi_processor_cst_has_changed(pr);
779 		acpi_bus_generate_proc_event(device, event, 0);
780 		acpi_bus_generate_netlink_event(device->pnp.device_class,
781 						  dev_name(&device->dev), event, 0);
782 		break;
783 	case ACPI_PROCESSOR_NOTIFY_THROTTLING:
784 		acpi_processor_tstate_has_changed(pr);
785 		acpi_bus_generate_proc_event(device, event, 0);
786 		acpi_bus_generate_netlink_event(device->pnp.device_class,
787 						  dev_name(&device->dev), event, 0);
788 	default:
789 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
790 				  "Unsupported event [0x%x]\n", event));
791 		break;
792 	}
793 
794 	return;
795 }
796 
797 static int acpi_cpu_soft_notify(struct notifier_block *nfb,
798 		unsigned long action, void *hcpu)
799 {
800 	unsigned int cpu = (unsigned long)hcpu;
801 	struct acpi_processor *pr = per_cpu(processors, cpu);
802 
803 	if (action == CPU_ONLINE && pr) {
804 		acpi_processor_ppc_has_changed(pr);
805 		acpi_processor_cst_has_changed(pr);
806 		acpi_processor_tstate_has_changed(pr);
807 	}
808 	return NOTIFY_OK;
809 }
810 
811 static struct notifier_block acpi_cpu_notifier =
812 {
813 	    .notifier_call = acpi_cpu_soft_notify,
814 };
815 
816 static int acpi_processor_add(struct acpi_device *device)
817 {
818 	struct acpi_processor *pr = NULL;
819 
820 
821 	if (!device)
822 		return -EINVAL;
823 
824 	pr = kzalloc(sizeof(struct acpi_processor), GFP_KERNEL);
825 	if (!pr)
826 		return -ENOMEM;
827 
828 	if (!alloc_cpumask_var(&pr->throttling.shared_cpu_map, GFP_KERNEL)) {
829 		kfree(pr);
830 		return -ENOMEM;
831 	}
832 
833 	pr->handle = device->handle;
834 	strcpy(acpi_device_name(device), ACPI_PROCESSOR_DEVICE_NAME);
835 	strcpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS);
836 	device->driver_data = pr;
837 
838 	return 0;
839 }
840 
841 static int acpi_processor_remove(struct acpi_device *device, int type)
842 {
843 	acpi_status status = AE_OK;
844 	struct acpi_processor *pr = NULL;
845 
846 
847 	if (!device || !acpi_driver_data(device))
848 		return -EINVAL;
849 
850 	pr = acpi_driver_data(device);
851 
852 	if (pr->id >= nr_cpu_ids)
853 		goto free;
854 
855 	if (type == ACPI_BUS_REMOVAL_EJECT) {
856 		if (acpi_processor_handle_eject(pr))
857 			return -EINVAL;
858 	}
859 
860 	acpi_processor_power_exit(pr, device);
861 
862 	status = acpi_remove_notify_handler(pr->handle, ACPI_DEVICE_NOTIFY,
863 					    acpi_processor_notify);
864 
865 	sysfs_remove_link(&device->dev.kobj, "sysdev");
866 
867 	acpi_processor_remove_fs(device);
868 
869 	if (pr->cdev) {
870 		sysfs_remove_link(&device->dev.kobj, "thermal_cooling");
871 		sysfs_remove_link(&pr->cdev->device.kobj, "device");
872 		thermal_cooling_device_unregister(pr->cdev);
873 		pr->cdev = NULL;
874 	}
875 
876 	per_cpu(processors, pr->id) = NULL;
877 	per_cpu(processor_device_array, pr->id) = NULL;
878 
879 free:
880 	free_cpumask_var(pr->throttling.shared_cpu_map);
881 	kfree(pr);
882 
883 	return 0;
884 }
885 
886 #ifdef CONFIG_ACPI_HOTPLUG_CPU
887 /****************************************************************************
888  * 	Acpi processor hotplug support 				       	    *
889  ****************************************************************************/
890 
891 static int is_processor_present(acpi_handle handle)
892 {
893 	acpi_status status;
894 	unsigned long long sta = 0;
895 
896 
897 	status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
898 
899 	if (ACPI_SUCCESS(status) && (sta & ACPI_STA_DEVICE_PRESENT))
900 		return 1;
901 
902 	/*
903 	 * _STA is mandatory for a processor that supports hot plug
904 	 */
905 	if (status == AE_NOT_FOUND)
906 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
907 				"Processor does not support hot plug\n"));
908 	else
909 		ACPI_EXCEPTION((AE_INFO, status,
910 				"Processor Device is not present"));
911 	return 0;
912 }
913 
914 static
915 int acpi_processor_device_add(acpi_handle handle, struct acpi_device **device)
916 {
917 	acpi_handle phandle;
918 	struct acpi_device *pdev;
919 	struct acpi_processor *pr;
920 
921 
922 	if (acpi_get_parent(handle, &phandle)) {
923 		return -ENODEV;
924 	}
925 
926 	if (acpi_bus_get_device(phandle, &pdev)) {
927 		return -ENODEV;
928 	}
929 
930 	if (acpi_bus_add(device, pdev, handle, ACPI_BUS_TYPE_PROCESSOR)) {
931 		return -ENODEV;
932 	}
933 
934 	acpi_bus_start(*device);
935 
936 	pr = acpi_driver_data(*device);
937 	if (!pr)
938 		return -ENODEV;
939 
940 	if ((pr->id >= 0) && (pr->id < nr_cpu_ids)) {
941 		kobject_uevent(&(*device)->dev.kobj, KOBJ_ONLINE);
942 	}
943 	return 0;
944 }
945 
946 static void __ref acpi_processor_hotplug_notify(acpi_handle handle,
947 						u32 event, void *data)
948 {
949 	struct acpi_processor *pr;
950 	struct acpi_device *device = NULL;
951 	int result;
952 
953 
954 	switch (event) {
955 	case ACPI_NOTIFY_BUS_CHECK:
956 	case ACPI_NOTIFY_DEVICE_CHECK:
957 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
958 		"Processor driver received %s event\n",
959 		       (event == ACPI_NOTIFY_BUS_CHECK) ?
960 		       "ACPI_NOTIFY_BUS_CHECK" : "ACPI_NOTIFY_DEVICE_CHECK"));
961 
962 		if (!is_processor_present(handle))
963 			break;
964 
965 		if (acpi_bus_get_device(handle, &device)) {
966 			result = acpi_processor_device_add(handle, &device);
967 			if (result)
968 				printk(KERN_ERR PREFIX
969 					    "Unable to add the device\n");
970 			break;
971 		}
972 
973 		pr = acpi_driver_data(device);
974 		if (!pr) {
975 			printk(KERN_ERR PREFIX "Driver data is NULL\n");
976 			break;
977 		}
978 
979 		if (pr->id >= 0 && (pr->id < nr_cpu_ids)) {
980 			kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE);
981 			break;
982 		}
983 
984 		result = acpi_processor_start(device);
985 		if ((!result) && ((pr->id >= 0) && (pr->id < nr_cpu_ids))) {
986 			kobject_uevent(&device->dev.kobj, KOBJ_ONLINE);
987 		} else {
988 			printk(KERN_ERR PREFIX "Device [%s] failed to start\n",
989 				    acpi_device_bid(device));
990 		}
991 		break;
992 	case ACPI_NOTIFY_EJECT_REQUEST:
993 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
994 				  "received ACPI_NOTIFY_EJECT_REQUEST\n"));
995 
996 		if (acpi_bus_get_device(handle, &device)) {
997 			printk(KERN_ERR PREFIX
998 				    "Device don't exist, dropping EJECT\n");
999 			break;
1000 		}
1001 		pr = acpi_driver_data(device);
1002 		if (!pr) {
1003 			printk(KERN_ERR PREFIX
1004 				    "Driver data is NULL, dropping EJECT\n");
1005 			return;
1006 		}
1007 
1008 		if ((pr->id < nr_cpu_ids) && (cpu_present(pr->id)))
1009 			kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE);
1010 		break;
1011 	default:
1012 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1013 				  "Unsupported event [0x%x]\n", event));
1014 		break;
1015 	}
1016 
1017 	return;
1018 }
1019 
1020 static acpi_status
1021 processor_walk_namespace_cb(acpi_handle handle,
1022 			    u32 lvl, void *context, void **rv)
1023 {
1024 	acpi_status status;
1025 	int *action = context;
1026 	acpi_object_type type = 0;
1027 
1028 	status = acpi_get_type(handle, &type);
1029 	if (ACPI_FAILURE(status))
1030 		return (AE_OK);
1031 
1032 	if (type != ACPI_TYPE_PROCESSOR)
1033 		return (AE_OK);
1034 
1035 	switch (*action) {
1036 	case INSTALL_NOTIFY_HANDLER:
1037 		acpi_install_notify_handler(handle,
1038 					    ACPI_SYSTEM_NOTIFY,
1039 					    acpi_processor_hotplug_notify,
1040 					    NULL);
1041 		break;
1042 	case UNINSTALL_NOTIFY_HANDLER:
1043 		acpi_remove_notify_handler(handle,
1044 					   ACPI_SYSTEM_NOTIFY,
1045 					   acpi_processor_hotplug_notify);
1046 		break;
1047 	default:
1048 		break;
1049 	}
1050 
1051 	return (AE_OK);
1052 }
1053 
1054 static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu)
1055 {
1056 
1057 	if (!is_processor_present(handle)) {
1058 		return AE_ERROR;
1059 	}
1060 
1061 	if (acpi_map_lsapic(handle, p_cpu))
1062 		return AE_ERROR;
1063 
1064 	if (arch_register_cpu(*p_cpu)) {
1065 		acpi_unmap_lsapic(*p_cpu);
1066 		return AE_ERROR;
1067 	}
1068 
1069 	return AE_OK;
1070 }
1071 
1072 static int acpi_processor_handle_eject(struct acpi_processor *pr)
1073 {
1074 	if (cpu_online(pr->id))
1075 		cpu_down(pr->id);
1076 
1077 	arch_unregister_cpu(pr->id);
1078 	acpi_unmap_lsapic(pr->id);
1079 	return (0);
1080 }
1081 #else
1082 static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu)
1083 {
1084 	return AE_ERROR;
1085 }
1086 static int acpi_processor_handle_eject(struct acpi_processor *pr)
1087 {
1088 	return (-EINVAL);
1089 }
1090 #endif
1091 
1092 static
1093 void acpi_processor_install_hotplug_notify(void)
1094 {
1095 #ifdef CONFIG_ACPI_HOTPLUG_CPU
1096 	int action = INSTALL_NOTIFY_HANDLER;
1097 	acpi_walk_namespace(ACPI_TYPE_PROCESSOR,
1098 			    ACPI_ROOT_OBJECT,
1099 			    ACPI_UINT32_MAX,
1100 			    processor_walk_namespace_cb, &action, NULL);
1101 #endif
1102 	register_hotcpu_notifier(&acpi_cpu_notifier);
1103 }
1104 
1105 static
1106 void acpi_processor_uninstall_hotplug_notify(void)
1107 {
1108 #ifdef CONFIG_ACPI_HOTPLUG_CPU
1109 	int action = UNINSTALL_NOTIFY_HANDLER;
1110 	acpi_walk_namespace(ACPI_TYPE_PROCESSOR,
1111 			    ACPI_ROOT_OBJECT,
1112 			    ACPI_UINT32_MAX,
1113 			    processor_walk_namespace_cb, &action, NULL);
1114 #endif
1115 	unregister_hotcpu_notifier(&acpi_cpu_notifier);
1116 }
1117 
1118 /*
1119  * We keep the driver loaded even when ACPI is not running.
1120  * This is needed for the powernow-k8 driver, that works even without
1121  * ACPI, but needs symbols from this driver
1122  */
1123 
1124 static int __init acpi_processor_init(void)
1125 {
1126 	int result = 0;
1127 
1128 	memset(&errata, 0, sizeof(errata));
1129 
1130 #ifdef CONFIG_SMP
1131 	if (ACPI_FAILURE(acpi_get_table(ACPI_SIG_MADT, 0,
1132 				(struct acpi_table_header **)&madt)))
1133 		madt = NULL;
1134 #endif
1135 
1136 	acpi_processor_dir = proc_mkdir(ACPI_PROCESSOR_CLASS, acpi_root_dir);
1137 	if (!acpi_processor_dir)
1138 		return -ENOMEM;
1139 
1140 	/*
1141 	 * Check whether the system is DMI table. If yes, OSPM
1142 	 * should not use mwait for CPU-states.
1143 	 */
1144 	dmi_check_system(processor_idle_dmi_table);
1145 	result = cpuidle_register_driver(&acpi_idle_driver);
1146 	if (result < 0)
1147 		goto out_proc;
1148 
1149 	result = acpi_bus_register_driver(&acpi_processor_driver);
1150 	if (result < 0)
1151 		goto out_cpuidle;
1152 
1153 	acpi_processor_install_hotplug_notify();
1154 
1155 	acpi_thermal_cpufreq_init();
1156 
1157 	acpi_processor_ppc_init();
1158 
1159 	acpi_processor_throttling_init();
1160 
1161 	return 0;
1162 
1163 out_cpuidle:
1164 	cpuidle_unregister_driver(&acpi_idle_driver);
1165 
1166 out_proc:
1167 	remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir);
1168 
1169 	return result;
1170 }
1171 
1172 static void __exit acpi_processor_exit(void)
1173 {
1174 	acpi_processor_ppc_exit();
1175 
1176 	acpi_thermal_cpufreq_exit();
1177 
1178 	acpi_processor_uninstall_hotplug_notify();
1179 
1180 	acpi_bus_unregister_driver(&acpi_processor_driver);
1181 
1182 	cpuidle_unregister_driver(&acpi_idle_driver);
1183 
1184 	remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir);
1185 
1186 	return;
1187 }
1188 
1189 module_init(acpi_processor_init);
1190 module_exit(acpi_processor_exit);
1191 
1192 EXPORT_SYMBOL(acpi_processor_set_thermal_limit);
1193 
1194 MODULE_ALIAS("processor");
1195