1 /*
2  * acpi_processor.c - ACPI Processor Driver ($Revision: 71 $)
3  *
4  *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5  *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6  *  Copyright (C) 2004       Dominik Brodowski <linux@brodo.de>
7  *  Copyright (C) 2004  Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
8  *  			- Added processor hotplug support
9  *
10  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
11  *
12  *  This program is free software; you can redistribute it and/or modify
13  *  it under the terms of the GNU General Public License as published by
14  *  the Free Software Foundation; either version 2 of the License, or (at
15  *  your option) any later version.
16  *
17  *  This program is distributed in the hope that it will be useful, but
18  *  WITHOUT ANY WARRANTY; without even the implied warranty of
19  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
20  *  General Public License for more details.
21  *
22  *  You should have received a copy of the GNU General Public License along
23  *  with this program; if not, write to the Free Software Foundation, Inc.,
24  *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
25  *
26  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
27  *  TBD:
28  *	1. Make # power states dynamic.
29  *	2. Support duty_cycle values that span bit 4.
30  *	3. Optimize by having scheduler determine business instead of
31  *	   having us try to calculate it here.
32  *	4. Need C1 timing -- must modify kernel (IRQ handler) to get this.
33  */
34 
35 #include <linux/kernel.h>
36 #include <linux/module.h>
37 #include <linux/init.h>
38 #include <linux/types.h>
39 #include <linux/pci.h>
40 #include <linux/pm.h>
41 #include <linux/cpufreq.h>
42 #include <linux/cpu.h>
43 #include <linux/dmi.h>
44 #include <linux/moduleparam.h>
45 #include <linux/cpuidle.h>
46 #include <linux/slab.h>
47 #include <linux/acpi.h>
48 
49 #include <asm/io.h>
50 #include <asm/cpu.h>
51 #include <asm/delay.h>
52 #include <asm/uaccess.h>
53 #include <asm/processor.h>
54 #include <asm/smp.h>
55 #include <asm/acpi.h>
56 
57 #include <acpi/acpi_bus.h>
58 #include <acpi/acpi_drivers.h>
59 #include <acpi/processor.h>
60 
61 #define PREFIX "ACPI: "
62 
63 #define ACPI_PROCESSOR_CLASS		"processor"
64 #define ACPI_PROCESSOR_DEVICE_NAME	"Processor"
65 #define ACPI_PROCESSOR_FILE_INFO	"info"
66 #define ACPI_PROCESSOR_FILE_THROTTLING	"throttling"
67 #define ACPI_PROCESSOR_FILE_LIMIT	"limit"
68 #define ACPI_PROCESSOR_NOTIFY_PERFORMANCE 0x80
69 #define ACPI_PROCESSOR_NOTIFY_POWER	0x81
70 #define ACPI_PROCESSOR_NOTIFY_THROTTLING	0x82
71 #define ACPI_PROCESSOR_DEVICE_HID	"ACPI0007"
72 
73 #define ACPI_PROCESSOR_LIMIT_USER	0
74 #define ACPI_PROCESSOR_LIMIT_THERMAL	1
75 
76 #define _COMPONENT		ACPI_PROCESSOR_COMPONENT
77 ACPI_MODULE_NAME("processor_driver");
78 
79 MODULE_AUTHOR("Paul Diefenbaugh");
80 MODULE_DESCRIPTION("ACPI Processor Driver");
81 MODULE_LICENSE("GPL");
82 
83 static int acpi_processor_add(struct acpi_device *device);
84 static int acpi_processor_remove(struct acpi_device *device, int type);
85 static void acpi_processor_notify(struct acpi_device *device, u32 event);
86 static acpi_status acpi_processor_hotadd_init(struct acpi_processor *pr);
87 static int acpi_processor_handle_eject(struct acpi_processor *pr);
88 static int acpi_processor_start(struct acpi_processor *pr);
89 
90 static const struct acpi_device_id processor_device_ids[] = {
91 	{ACPI_PROCESSOR_OBJECT_HID, 0},
92 	{ACPI_PROCESSOR_DEVICE_HID, 0},
93 	{"", 0},
94 };
95 MODULE_DEVICE_TABLE(acpi, processor_device_ids);
96 
97 static SIMPLE_DEV_PM_OPS(acpi_processor_pm,
98 			 acpi_processor_suspend, acpi_processor_resume);
99 
100 static struct acpi_driver acpi_processor_driver = {
101 	.name = "processor",
102 	.class = ACPI_PROCESSOR_CLASS,
103 	.ids = processor_device_ids,
104 	.ops = {
105 		.add = acpi_processor_add,
106 		.remove = acpi_processor_remove,
107 		.notify = acpi_processor_notify,
108 		},
109 	.drv.pm = &acpi_processor_pm,
110 };
111 
112 #define INSTALL_NOTIFY_HANDLER		1
113 #define UNINSTALL_NOTIFY_HANDLER	2
114 
115 DEFINE_PER_CPU(struct acpi_processor *, processors);
116 EXPORT_PER_CPU_SYMBOL(processors);
117 
118 struct acpi_processor_errata errata __read_mostly;
119 
120 /* --------------------------------------------------------------------------
121                                 Errata Handling
122    -------------------------------------------------------------------------- */
123 
124 static int acpi_processor_errata_piix4(struct pci_dev *dev)
125 {
126 	u8 value1 = 0;
127 	u8 value2 = 0;
128 
129 
130 	if (!dev)
131 		return -EINVAL;
132 
133 	/*
134 	 * Note that 'dev' references the PIIX4 ACPI Controller.
135 	 */
136 
137 	switch (dev->revision) {
138 	case 0:
139 		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4 A-step\n"));
140 		break;
141 	case 1:
142 		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4 B-step\n"));
143 		break;
144 	case 2:
145 		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4E\n"));
146 		break;
147 	case 3:
148 		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4M\n"));
149 		break;
150 	default:
151 		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found unknown PIIX4\n"));
152 		break;
153 	}
154 
155 	switch (dev->revision) {
156 
157 	case 0:		/* PIIX4 A-step */
158 	case 1:		/* PIIX4 B-step */
159 		/*
160 		 * See specification changes #13 ("Manual Throttle Duty Cycle")
161 		 * and #14 ("Enabling and Disabling Manual Throttle"), plus
162 		 * erratum #5 ("STPCLK# Deassertion Time") from the January
163 		 * 2002 PIIX4 specification update.  Applies to only older
164 		 * PIIX4 models.
165 		 */
166 		errata.piix4.throttle = 1;
167 
168 	case 2:		/* PIIX4E */
169 	case 3:		/* PIIX4M */
170 		/*
171 		 * See erratum #18 ("C3 Power State/BMIDE and Type-F DMA
172 		 * Livelock") from the January 2002 PIIX4 specification update.
173 		 * Applies to all PIIX4 models.
174 		 */
175 
176 		/*
177 		 * BM-IDE
178 		 * ------
179 		 * Find the PIIX4 IDE Controller and get the Bus Master IDE
180 		 * Status register address.  We'll use this later to read
181 		 * each IDE controller's DMA status to make sure we catch all
182 		 * DMA activity.
183 		 */
184 		dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
185 				     PCI_DEVICE_ID_INTEL_82371AB,
186 				     PCI_ANY_ID, PCI_ANY_ID, NULL);
187 		if (dev) {
188 			errata.piix4.bmisx = pci_resource_start(dev, 4);
189 			pci_dev_put(dev);
190 		}
191 
192 		/*
193 		 * Type-F DMA
194 		 * ----------
195 		 * Find the PIIX4 ISA Controller and read the Motherboard
196 		 * DMA controller's status to see if Type-F (Fast) DMA mode
197 		 * is enabled (bit 7) on either channel.  Note that we'll
198 		 * disable C3 support if this is enabled, as some legacy
199 		 * devices won't operate well if fast DMA is disabled.
200 		 */
201 		dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
202 				     PCI_DEVICE_ID_INTEL_82371AB_0,
203 				     PCI_ANY_ID, PCI_ANY_ID, NULL);
204 		if (dev) {
205 			pci_read_config_byte(dev, 0x76, &value1);
206 			pci_read_config_byte(dev, 0x77, &value2);
207 			if ((value1 & 0x80) || (value2 & 0x80))
208 				errata.piix4.fdma = 1;
209 			pci_dev_put(dev);
210 		}
211 
212 		break;
213 	}
214 
215 	if (errata.piix4.bmisx)
216 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
217 				  "Bus master activity detection (BM-IDE) erratum enabled\n"));
218 	if (errata.piix4.fdma)
219 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
220 				  "Type-F DMA livelock erratum (C3 disabled)\n"));
221 
222 	return 0;
223 }
224 
225 static int acpi_processor_errata(struct acpi_processor *pr)
226 {
227 	int result = 0;
228 	struct pci_dev *dev = NULL;
229 
230 
231 	if (!pr)
232 		return -EINVAL;
233 
234 	/*
235 	 * PIIX4
236 	 */
237 	dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
238 			     PCI_DEVICE_ID_INTEL_82371AB_3, PCI_ANY_ID,
239 			     PCI_ANY_ID, NULL);
240 	if (dev) {
241 		result = acpi_processor_errata_piix4(dev);
242 		pci_dev_put(dev);
243 	}
244 
245 	return result;
246 }
247 
248 /* --------------------------------------------------------------------------
249                                  Driver Interface
250    -------------------------------------------------------------------------- */
251 
252 static int acpi_processor_get_info(struct acpi_device *device)
253 {
254 	acpi_status status = 0;
255 	union acpi_object object = { 0 };
256 	struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
257 	struct acpi_processor *pr;
258 	int cpu_index, device_declaration = 0;
259 	static int cpu0_initialized;
260 
261 	pr = acpi_driver_data(device);
262 	if (!pr)
263 		return -EINVAL;
264 
265 	if (num_online_cpus() > 1)
266 		errata.smp = TRUE;
267 
268 	acpi_processor_errata(pr);
269 
270 	/*
271 	 * Check to see if we have bus mastering arbitration control.  This
272 	 * is required for proper C3 usage (to maintain cache coherency).
273 	 */
274 	if (acpi_gbl_FADT.pm2_control_block && acpi_gbl_FADT.pm2_control_length) {
275 		pr->flags.bm_control = 1;
276 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
277 				  "Bus mastering arbitration control present\n"));
278 	} else
279 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
280 				  "No bus mastering arbitration control\n"));
281 
282 	if (!strcmp(acpi_device_hid(device), ACPI_PROCESSOR_OBJECT_HID)) {
283 		/* Declared with "Processor" statement; match ProcessorID */
284 		status = acpi_evaluate_object(pr->handle, NULL, NULL, &buffer);
285 		if (ACPI_FAILURE(status)) {
286 			dev_err(&device->dev,
287 				"Failed to evaluate processor object (0x%x)\n",
288 				status);
289 			return -ENODEV;
290 		}
291 
292 		/*
293 		 * TBD: Synch processor ID (via LAPIC/LSAPIC structures) on SMP.
294 		 *      >>> 'acpi_get_processor_id(acpi_id, &id)' in
295 		 *      arch/xxx/acpi.c
296 		 */
297 		pr->acpi_id = object.processor.proc_id;
298 	} else {
299 		/*
300 		 * Declared with "Device" statement; match _UID.
301 		 * Note that we don't handle string _UIDs yet.
302 		 */
303 		unsigned long long value;
304 		status = acpi_evaluate_integer(pr->handle, METHOD_NAME__UID,
305 						NULL, &value);
306 		if (ACPI_FAILURE(status)) {
307 			dev_err(&device->dev,
308 				"Failed to evaluate processor _UID (0x%x)\n",
309 				status);
310 			return -ENODEV;
311 		}
312 		device_declaration = 1;
313 		pr->acpi_id = value;
314 	}
315 	cpu_index = acpi_get_cpuid(pr->handle, device_declaration, pr->acpi_id);
316 
317 	/* Handle UP system running SMP kernel, with no LAPIC in MADT */
318 	if (!cpu0_initialized && (cpu_index == -1) &&
319 	    (num_online_cpus() == 1)) {
320 		cpu_index = 0;
321 	}
322 
323 	cpu0_initialized = 1;
324 
325 	pr->id = cpu_index;
326 
327 	/*
328 	 *  Extra Processor objects may be enumerated on MP systems with
329 	 *  less than the max # of CPUs. They should be ignored _iff
330 	 *  they are physically not present.
331 	 */
332 	if (pr->id == -1) {
333 		if (ACPI_FAILURE(acpi_processor_hotadd_init(pr)))
334 			return -ENODEV;
335 	}
336 	/*
337 	 * On some boxes several processors use the same processor bus id.
338 	 * But they are located in different scope. For example:
339 	 * \_SB.SCK0.CPU0
340 	 * \_SB.SCK1.CPU0
341 	 * Rename the processor device bus id. And the new bus id will be
342 	 * generated as the following format:
343 	 * CPU+CPU ID.
344 	 */
345 	sprintf(acpi_device_bid(device), "CPU%X", pr->id);
346 	ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Processor [%d:%d]\n", pr->id,
347 			  pr->acpi_id));
348 
349 	if (!object.processor.pblk_address)
350 		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No PBLK (NULL address)\n"));
351 	else if (object.processor.pblk_length != 6)
352 		dev_err(&device->dev, "Invalid PBLK length [%d]\n",
353 			    object.processor.pblk_length);
354 	else {
355 		pr->throttling.address = object.processor.pblk_address;
356 		pr->throttling.duty_offset = acpi_gbl_FADT.duty_offset;
357 		pr->throttling.duty_width = acpi_gbl_FADT.duty_width;
358 
359 		pr->pblk = object.processor.pblk_address;
360 
361 		/*
362 		 * We don't care about error returns - we just try to mark
363 		 * these reserved so that nobody else is confused into thinking
364 		 * that this region might be unused..
365 		 *
366 		 * (In particular, allocating the IO range for Cardbus)
367 		 */
368 		request_region(pr->throttling.address, 6, "ACPI CPU throttle");
369 	}
370 
371 	/*
372 	 * If ACPI describes a slot number for this CPU, we can use it
373 	 * ensure we get the right value in the "physical id" field
374 	 * of /proc/cpuinfo
375 	 */
376 	status = acpi_evaluate_object(pr->handle, "_SUN", NULL, &buffer);
377 	if (ACPI_SUCCESS(status))
378 		arch_fix_phys_package_id(pr->id, object.integer.value);
379 
380 	return 0;
381 }
382 
383 static DEFINE_PER_CPU(void *, processor_device_array);
384 
385 static void acpi_processor_notify(struct acpi_device *device, u32 event)
386 {
387 	struct acpi_processor *pr = acpi_driver_data(device);
388 	int saved;
389 
390 	if (!pr)
391 		return;
392 
393 	switch (event) {
394 	case ACPI_PROCESSOR_NOTIFY_PERFORMANCE:
395 		saved = pr->performance_platform_limit;
396 		acpi_processor_ppc_has_changed(pr, 1);
397 		if (saved == pr->performance_platform_limit)
398 			break;
399 		acpi_bus_generate_proc_event(device, event,
400 					pr->performance_platform_limit);
401 		acpi_bus_generate_netlink_event(device->pnp.device_class,
402 						  dev_name(&device->dev), event,
403 						  pr->performance_platform_limit);
404 		break;
405 	case ACPI_PROCESSOR_NOTIFY_POWER:
406 		acpi_processor_cst_has_changed(pr);
407 		acpi_bus_generate_proc_event(device, event, 0);
408 		acpi_bus_generate_netlink_event(device->pnp.device_class,
409 						  dev_name(&device->dev), event, 0);
410 		break;
411 	case ACPI_PROCESSOR_NOTIFY_THROTTLING:
412 		acpi_processor_tstate_has_changed(pr);
413 		acpi_bus_generate_proc_event(device, event, 0);
414 		acpi_bus_generate_netlink_event(device->pnp.device_class,
415 						  dev_name(&device->dev), event, 0);
416 		break;
417 	default:
418 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
419 				  "Unsupported event [0x%x]\n", event));
420 		break;
421 	}
422 
423 	return;
424 }
425 
426 static int acpi_cpu_soft_notify(struct notifier_block *nfb,
427 		unsigned long action, void *hcpu)
428 {
429 	unsigned int cpu = (unsigned long)hcpu;
430 	struct acpi_processor *pr = per_cpu(processors, cpu);
431 
432 	if (action == CPU_ONLINE && pr) {
433 		/* CPU got physically hotplugged and onlined the first time:
434 		 * Initialize missing things
435 		 */
436 		if (pr->flags.need_hotplug_init) {
437 			pr_info("Will online and init hotplugged CPU: %d\n",
438 				pr->id);
439 			WARN(acpi_processor_start(pr), "Failed to start CPU:"
440 				" %d\n", pr->id);
441 			pr->flags.need_hotplug_init = 0;
442 		/* Normal CPU soft online event */
443 		} else {
444 			acpi_processor_ppc_has_changed(pr, 0);
445 			acpi_processor_hotplug(pr);
446 			acpi_processor_reevaluate_tstate(pr, action);
447 			acpi_processor_tstate_has_changed(pr);
448 		}
449 	}
450 	if (action == CPU_DEAD && pr) {
451 		/* invalidate the flag.throttling after one CPU is offline */
452 		acpi_processor_reevaluate_tstate(pr, action);
453 	}
454 	return NOTIFY_OK;
455 }
456 
457 static struct notifier_block acpi_cpu_notifier =
458 {
459 	    .notifier_call = acpi_cpu_soft_notify,
460 };
461 
462 /*
463  * acpi_processor_start() is called by the cpu_hotplug_notifier func:
464  * acpi_cpu_soft_notify(). Getting it __cpuinit{data} is difficult, the
465  * root cause seem to be that acpi_processor_uninstall_hotplug_notify()
466  * is in the module_exit (__exit) func. Allowing acpi_processor_start()
467  * to not be in __cpuinit section, but being called from __cpuinit funcs
468  * via __ref looks like the right thing to do here.
469  */
470 static __ref int acpi_processor_start(struct acpi_processor *pr)
471 {
472 	struct acpi_device *device = per_cpu(processor_device_array, pr->id);
473 	int result = 0;
474 
475 #ifdef CONFIG_CPU_FREQ
476 	acpi_processor_ppc_has_changed(pr, 0);
477 	acpi_processor_load_module(pr);
478 #endif
479 	acpi_processor_get_throttling_info(pr);
480 	acpi_processor_get_limit_info(pr);
481 
482 	if (!cpuidle_get_driver() || cpuidle_get_driver() == &acpi_idle_driver)
483 		acpi_processor_power_init(pr);
484 
485 	pr->cdev = thermal_cooling_device_register("Processor", device,
486 						   &processor_cooling_ops);
487 	if (IS_ERR(pr->cdev)) {
488 		result = PTR_ERR(pr->cdev);
489 		goto err_power_exit;
490 	}
491 
492 	dev_dbg(&device->dev, "registered as cooling_device%d\n",
493 		pr->cdev->id);
494 
495 	result = sysfs_create_link(&device->dev.kobj,
496 				   &pr->cdev->device.kobj,
497 				   "thermal_cooling");
498 	if (result) {
499 		dev_err(&device->dev,
500 			"Failed to create sysfs link 'thermal_cooling'\n");
501 		goto err_thermal_unregister;
502 	}
503 	result = sysfs_create_link(&pr->cdev->device.kobj,
504 				   &device->dev.kobj,
505 				   "device");
506 	if (result) {
507 		dev_err(&pr->cdev->device,
508 			"Failed to create sysfs link 'device'\n");
509 		goto err_remove_sysfs_thermal;
510 	}
511 
512 	return 0;
513 
514 err_remove_sysfs_thermal:
515 	sysfs_remove_link(&device->dev.kobj, "thermal_cooling");
516 err_thermal_unregister:
517 	thermal_cooling_device_unregister(pr->cdev);
518 err_power_exit:
519 	acpi_processor_power_exit(pr);
520 
521 	return result;
522 }
523 
524 /*
525  * Do not put anything in here which needs the core to be online.
526  * For example MSR access or setting up things which check for cpuinfo_x86
527  * (cpu_data(cpu)) values, like CPU feature flags, family, model, etc.
528  * Such things have to be put in and set up above in acpi_processor_start()
529  */
530 static int __cpuinit acpi_processor_add(struct acpi_device *device)
531 {
532 	struct acpi_processor *pr = NULL;
533 	int result = 0;
534 	struct device *dev;
535 
536 	pr = kzalloc(sizeof(struct acpi_processor), GFP_KERNEL);
537 	if (!pr)
538 		return -ENOMEM;
539 
540 	if (!zalloc_cpumask_var(&pr->throttling.shared_cpu_map, GFP_KERNEL)) {
541 		result = -ENOMEM;
542 		goto err_free_pr;
543 	}
544 
545 	pr->handle = device->handle;
546 	strcpy(acpi_device_name(device), ACPI_PROCESSOR_DEVICE_NAME);
547 	strcpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS);
548 	device->driver_data = pr;
549 
550 	result = acpi_processor_get_info(device);
551 	if (result) {
552 		/* Processor is physically not present */
553 		return 0;
554 	}
555 
556 #ifdef CONFIG_SMP
557 	if (pr->id >= setup_max_cpus && pr->id != 0)
558 		return 0;
559 #endif
560 
561 	BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
562 
563 	/*
564 	 * Buggy BIOS check
565 	 * ACPI id of processors can be reported wrongly by the BIOS.
566 	 * Don't trust it blindly
567 	 */
568 	if (per_cpu(processor_device_array, pr->id) != NULL &&
569 	    per_cpu(processor_device_array, pr->id) != device) {
570 		dev_warn(&device->dev,
571 			"BIOS reported wrong ACPI id %d for the processor\n",
572 			pr->id);
573 		result = -ENODEV;
574 		goto err_free_cpumask;
575 	}
576 	per_cpu(processor_device_array, pr->id) = device;
577 
578 	per_cpu(processors, pr->id) = pr;
579 
580 	dev = get_cpu_device(pr->id);
581 	if (sysfs_create_link(&device->dev.kobj, &dev->kobj, "sysdev")) {
582 		result = -EFAULT;
583 		goto err_clear_processor;
584 	}
585 
586 	/*
587 	 * Do not start hotplugged CPUs now, but when they
588 	 * are onlined the first time
589 	 */
590 	if (pr->flags.need_hotplug_init)
591 		return 0;
592 
593 	result = acpi_processor_start(pr);
594 	if (result)
595 		goto err_remove_sysfs;
596 
597 	return 0;
598 
599 err_remove_sysfs:
600 	sysfs_remove_link(&device->dev.kobj, "sysdev");
601 err_clear_processor:
602 	/*
603 	 * processor_device_array is not cleared to allow checks for buggy BIOS
604 	 */
605 	per_cpu(processors, pr->id) = NULL;
606 err_free_cpumask:
607 	free_cpumask_var(pr->throttling.shared_cpu_map);
608 err_free_pr:
609 	kfree(pr);
610 	return result;
611 }
612 
613 static int acpi_processor_remove(struct acpi_device *device, int type)
614 {
615 	struct acpi_processor *pr = NULL;
616 
617 
618 	if (!device || !acpi_driver_data(device))
619 		return -EINVAL;
620 
621 	pr = acpi_driver_data(device);
622 
623 	if (pr->id >= nr_cpu_ids)
624 		goto free;
625 
626 	if (type == ACPI_BUS_REMOVAL_EJECT) {
627 		if (acpi_processor_handle_eject(pr))
628 			return -EINVAL;
629 	}
630 
631 	acpi_processor_power_exit(pr);
632 
633 	sysfs_remove_link(&device->dev.kobj, "sysdev");
634 
635 	if (pr->cdev) {
636 		sysfs_remove_link(&device->dev.kobj, "thermal_cooling");
637 		sysfs_remove_link(&pr->cdev->device.kobj, "device");
638 		thermal_cooling_device_unregister(pr->cdev);
639 		pr->cdev = NULL;
640 	}
641 
642 	per_cpu(processors, pr->id) = NULL;
643 	per_cpu(processor_device_array, pr->id) = NULL;
644 
645 free:
646 	free_cpumask_var(pr->throttling.shared_cpu_map);
647 	kfree(pr);
648 
649 	return 0;
650 }
651 
652 #ifdef CONFIG_ACPI_HOTPLUG_CPU
653 /****************************************************************************
654  * 	Acpi processor hotplug support 				       	    *
655  ****************************************************************************/
656 
657 static int is_processor_present(acpi_handle handle)
658 {
659 	acpi_status status;
660 	unsigned long long sta = 0;
661 
662 
663 	status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
664 
665 	if (ACPI_SUCCESS(status) && (sta & ACPI_STA_DEVICE_PRESENT))
666 		return 1;
667 
668 	/*
669 	 * _STA is mandatory for a processor that supports hot plug
670 	 */
671 	if (status == AE_NOT_FOUND)
672 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
673 				"Processor does not support hot plug\n"));
674 	else
675 		ACPI_EXCEPTION((AE_INFO, status,
676 				"Processor Device is not present"));
677 	return 0;
678 }
679 
680 static
681 int acpi_processor_device_add(acpi_handle handle, struct acpi_device **device)
682 {
683 	acpi_handle phandle;
684 	struct acpi_device *pdev;
685 
686 
687 	if (acpi_get_parent(handle, &phandle)) {
688 		return -ENODEV;
689 	}
690 
691 	if (acpi_bus_get_device(phandle, &pdev)) {
692 		return -ENODEV;
693 	}
694 
695 	if (acpi_bus_add(device, pdev, handle, ACPI_BUS_TYPE_PROCESSOR)) {
696 		return -ENODEV;
697 	}
698 
699 	return 0;
700 }
701 
702 static void acpi_processor_hotplug_notify(acpi_handle handle,
703 					  u32 event, void *data)
704 {
705 	struct acpi_device *device = NULL;
706 	struct acpi_eject_event *ej_event = NULL;
707 	u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; /* default */
708 	int result;
709 
710 	switch (event) {
711 	case ACPI_NOTIFY_BUS_CHECK:
712 	case ACPI_NOTIFY_DEVICE_CHECK:
713 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
714 		"Processor driver received %s event\n",
715 		       (event == ACPI_NOTIFY_BUS_CHECK) ?
716 		       "ACPI_NOTIFY_BUS_CHECK" : "ACPI_NOTIFY_DEVICE_CHECK"));
717 
718 		if (!is_processor_present(handle))
719 			break;
720 
721 		if (!acpi_bus_get_device(handle, &device))
722 			break;
723 
724 		result = acpi_processor_device_add(handle, &device);
725 		if (result) {
726 			acpi_handle_err(handle, "Unable to add the device\n");
727 			break;
728 		}
729 
730 		ost_code = ACPI_OST_SC_SUCCESS;
731 		break;
732 
733 	case ACPI_NOTIFY_EJECT_REQUEST:
734 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
735 				  "received ACPI_NOTIFY_EJECT_REQUEST\n"));
736 
737 		if (acpi_bus_get_device(handle, &device)) {
738 			acpi_handle_err(handle,
739 				"Device don't exist, dropping EJECT\n");
740 			break;
741 		}
742 		if (!acpi_driver_data(device)) {
743 			acpi_handle_err(handle,
744 				"Driver data is NULL, dropping EJECT\n");
745 			break;
746 		}
747 
748 		ej_event = kmalloc(sizeof(*ej_event), GFP_KERNEL);
749 		if (!ej_event) {
750 			acpi_handle_err(handle, "No memory, dropping EJECT\n");
751 			break;
752 		}
753 
754 		ej_event->handle = handle;
755 		ej_event->event = ACPI_NOTIFY_EJECT_REQUEST;
756 		acpi_os_hotplug_execute(acpi_bus_hot_remove_device,
757 					(void *)ej_event);
758 
759 		/* eject is performed asynchronously */
760 		return;
761 
762 	default:
763 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
764 				  "Unsupported event [0x%x]\n", event));
765 
766 		/* non-hotplug event; possibly handled by other handler */
767 		return;
768 	}
769 
770 	/* Inform firmware that the hotplug operation has completed */
771 	(void) acpi_evaluate_hotplug_ost(handle, event, ost_code, NULL);
772 	return;
773 }
774 
775 static acpi_status is_processor_device(acpi_handle handle)
776 {
777 	struct acpi_device_info *info;
778 	char *hid;
779 	acpi_status status;
780 
781 	status = acpi_get_object_info(handle, &info);
782 	if (ACPI_FAILURE(status))
783 		return status;
784 
785 	if (info->type == ACPI_TYPE_PROCESSOR) {
786 		kfree(info);
787 		return AE_OK;	/* found a processor object */
788 	}
789 
790 	if (!(info->valid & ACPI_VALID_HID)) {
791 		kfree(info);
792 		return AE_ERROR;
793 	}
794 
795 	hid = info->hardware_id.string;
796 	if ((hid == NULL) || strcmp(hid, ACPI_PROCESSOR_DEVICE_HID)) {
797 		kfree(info);
798 		return AE_ERROR;
799 	}
800 
801 	kfree(info);
802 	return AE_OK;	/* found a processor device object */
803 }
804 
805 static acpi_status
806 processor_walk_namespace_cb(acpi_handle handle,
807 			    u32 lvl, void *context, void **rv)
808 {
809 	acpi_status status;
810 	int *action = context;
811 
812 	status = is_processor_device(handle);
813 	if (ACPI_FAILURE(status))
814 		return AE_OK;	/* not a processor; continue to walk */
815 
816 	switch (*action) {
817 	case INSTALL_NOTIFY_HANDLER:
818 		acpi_install_notify_handler(handle,
819 					    ACPI_SYSTEM_NOTIFY,
820 					    acpi_processor_hotplug_notify,
821 					    NULL);
822 		break;
823 	case UNINSTALL_NOTIFY_HANDLER:
824 		acpi_remove_notify_handler(handle,
825 					   ACPI_SYSTEM_NOTIFY,
826 					   acpi_processor_hotplug_notify);
827 		break;
828 	default:
829 		break;
830 	}
831 
832 	/* found a processor; skip walking underneath */
833 	return AE_CTRL_DEPTH;
834 }
835 
836 static acpi_status acpi_processor_hotadd_init(struct acpi_processor *pr)
837 {
838 	acpi_handle handle = pr->handle;
839 
840 	if (!is_processor_present(handle)) {
841 		return AE_ERROR;
842 	}
843 
844 	if (acpi_map_lsapic(handle, &pr->id))
845 		return AE_ERROR;
846 
847 	if (arch_register_cpu(pr->id)) {
848 		acpi_unmap_lsapic(pr->id);
849 		return AE_ERROR;
850 	}
851 
852 	/* CPU got hot-plugged, but cpu_data is not initialized yet
853 	 * Set flag to delay cpu_idle/throttling initialization
854 	 * in:
855 	 * acpi_processor_add()
856 	 *   acpi_processor_get_info()
857 	 * and do it when the CPU gets online the first time
858 	 * TBD: Cleanup above functions and try to do this more elegant.
859 	 */
860 	pr_info("CPU %d got hotplugged\n", pr->id);
861 	pr->flags.need_hotplug_init = 1;
862 
863 	return AE_OK;
864 }
865 
866 static int acpi_processor_handle_eject(struct acpi_processor *pr)
867 {
868 	if (cpu_online(pr->id))
869 		cpu_down(pr->id);
870 
871 	get_online_cpus();
872 	/*
873 	 * The cpu might become online again at this point. So we check whether
874 	 * the cpu has been onlined or not. If the cpu became online, it means
875 	 * that someone wants to use the cpu. So acpi_processor_handle_eject()
876 	 * returns -EAGAIN.
877 	 */
878 	if (unlikely(cpu_online(pr->id))) {
879 		put_online_cpus();
880 		pr_warn("Failed to remove CPU %d, because other task "
881 			"brought the CPU back online\n", pr->id);
882 		return -EAGAIN;
883 	}
884 	arch_unregister_cpu(pr->id);
885 	acpi_unmap_lsapic(pr->id);
886 	put_online_cpus();
887 	return (0);
888 }
889 #else
890 static acpi_status acpi_processor_hotadd_init(struct acpi_processor *pr)
891 {
892 	return AE_ERROR;
893 }
894 static int acpi_processor_handle_eject(struct acpi_processor *pr)
895 {
896 	return (-EINVAL);
897 }
898 #endif
899 
900 static
901 void acpi_processor_install_hotplug_notify(void)
902 {
903 #ifdef CONFIG_ACPI_HOTPLUG_CPU
904 	int action = INSTALL_NOTIFY_HANDLER;
905 	acpi_walk_namespace(ACPI_TYPE_ANY,
906 			    ACPI_ROOT_OBJECT,
907 			    ACPI_UINT32_MAX,
908 			    processor_walk_namespace_cb, NULL, &action, NULL);
909 #endif
910 	register_hotcpu_notifier(&acpi_cpu_notifier);
911 }
912 
913 static
914 void acpi_processor_uninstall_hotplug_notify(void)
915 {
916 #ifdef CONFIG_ACPI_HOTPLUG_CPU
917 	int action = UNINSTALL_NOTIFY_HANDLER;
918 	acpi_walk_namespace(ACPI_TYPE_ANY,
919 			    ACPI_ROOT_OBJECT,
920 			    ACPI_UINT32_MAX,
921 			    processor_walk_namespace_cb, NULL, &action, NULL);
922 #endif
923 	unregister_hotcpu_notifier(&acpi_cpu_notifier);
924 }
925 
926 /*
927  * We keep the driver loaded even when ACPI is not running.
928  * This is needed for the powernow-k8 driver, that works even without
929  * ACPI, but needs symbols from this driver
930  */
931 
932 static int __init acpi_processor_init(void)
933 {
934 	int result = 0;
935 
936 	if (acpi_disabled)
937 		return 0;
938 
939 	result = acpi_bus_register_driver(&acpi_processor_driver);
940 	if (result < 0)
941 		return result;
942 
943 	acpi_processor_install_hotplug_notify();
944 
945 	acpi_thermal_cpufreq_init();
946 
947 	acpi_processor_ppc_init();
948 
949 	acpi_processor_throttling_init();
950 
951 	return 0;
952 }
953 
954 static void __exit acpi_processor_exit(void)
955 {
956 	if (acpi_disabled)
957 		return;
958 
959 	acpi_processor_ppc_exit();
960 
961 	acpi_thermal_cpufreq_exit();
962 
963 	acpi_processor_uninstall_hotplug_notify();
964 
965 	acpi_bus_unregister_driver(&acpi_processor_driver);
966 
967 	return;
968 }
969 
970 module_init(acpi_processor_init);
971 module_exit(acpi_processor_exit);
972 
973 MODULE_ALIAS("processor");
974