1 /*
2  * acpi_processor.c - ACPI Processor Driver ($Revision: 71 $)
3  *
4  *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5  *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6  *  Copyright (C) 2004       Dominik Brodowski <linux@brodo.de>
7  *  Copyright (C) 2004  Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
8  *  			- Added processor hotplug support
9  *
10  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
11  *
12  *  This program is free software; you can redistribute it and/or modify
13  *  it under the terms of the GNU General Public License as published by
14  *  the Free Software Foundation; either version 2 of the License, or (at
15  *  your option) any later version.
16  *
17  *  This program is distributed in the hope that it will be useful, but
18  *  WITHOUT ANY WARRANTY; without even the implied warranty of
19  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
20  *  General Public License for more details.
21  *
22  *  You should have received a copy of the GNU General Public License along
23  *  with this program; if not, write to the Free Software Foundation, Inc.,
24  *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
25  *
26  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
27  *  TBD:
28  *	1. Make # power states dynamic.
29  *	2. Support duty_cycle values that span bit 4.
30  *	3. Optimize by having scheduler determine business instead of
31  *	   having us try to calculate it here.
32  *	4. Need C1 timing -- must modify kernel (IRQ handler) to get this.
33  */
34 
35 #include <linux/kernel.h>
36 #include <linux/module.h>
37 #include <linux/init.h>
38 #include <linux/types.h>
39 #include <linux/pci.h>
40 #include <linux/pm.h>
41 #include <linux/cpufreq.h>
42 #include <linux/cpu.h>
43 #include <linux/dmi.h>
44 #include <linux/moduleparam.h>
45 #include <linux/cpuidle.h>
46 #include <linux/slab.h>
47 #include <linux/acpi.h>
48 #include <linux/memory_hotplug.h>
49 
50 #include <asm/io.h>
51 #include <asm/cpu.h>
52 #include <asm/delay.h>
53 #include <asm/uaccess.h>
54 #include <asm/processor.h>
55 #include <asm/smp.h>
56 #include <asm/acpi.h>
57 
58 #include <acpi/acpi_bus.h>
59 #include <acpi/acpi_drivers.h>
60 #include <acpi/processor.h>
61 
62 #define PREFIX "ACPI: "
63 
64 #define ACPI_PROCESSOR_CLASS		"processor"
65 #define ACPI_PROCESSOR_DEVICE_NAME	"Processor"
66 #define ACPI_PROCESSOR_FILE_INFO	"info"
67 #define ACPI_PROCESSOR_FILE_THROTTLING	"throttling"
68 #define ACPI_PROCESSOR_FILE_LIMIT	"limit"
69 #define ACPI_PROCESSOR_NOTIFY_PERFORMANCE 0x80
70 #define ACPI_PROCESSOR_NOTIFY_POWER	0x81
71 #define ACPI_PROCESSOR_NOTIFY_THROTTLING	0x82
72 #define ACPI_PROCESSOR_DEVICE_HID	"ACPI0007"
73 
74 #define ACPI_PROCESSOR_LIMIT_USER	0
75 #define ACPI_PROCESSOR_LIMIT_THERMAL	1
76 
77 #define _COMPONENT		ACPI_PROCESSOR_COMPONENT
78 ACPI_MODULE_NAME("processor_driver");
79 
80 MODULE_AUTHOR("Paul Diefenbaugh");
81 MODULE_DESCRIPTION("ACPI Processor Driver");
82 MODULE_LICENSE("GPL");
83 
84 static int acpi_processor_add(struct acpi_device *device);
85 static int acpi_processor_remove(struct acpi_device *device);
86 static void acpi_processor_notify(struct acpi_device *device, u32 event);
87 static acpi_status acpi_processor_hotadd_init(struct acpi_processor *pr);
88 static int acpi_processor_handle_eject(struct acpi_processor *pr);
89 static int acpi_processor_start(struct acpi_processor *pr);
90 
91 static const struct acpi_device_id processor_device_ids[] = {
92 	{ACPI_PROCESSOR_OBJECT_HID, 0},
93 	{ACPI_PROCESSOR_DEVICE_HID, 0},
94 	{"", 0},
95 };
96 MODULE_DEVICE_TABLE(acpi, processor_device_ids);
97 
98 static struct acpi_driver acpi_processor_driver = {
99 	.name = "processor",
100 	.class = ACPI_PROCESSOR_CLASS,
101 	.ids = processor_device_ids,
102 	.ops = {
103 		.add = acpi_processor_add,
104 		.remove = acpi_processor_remove,
105 		.notify = acpi_processor_notify,
106 		},
107 };
108 
109 #define INSTALL_NOTIFY_HANDLER		1
110 #define UNINSTALL_NOTIFY_HANDLER	2
111 
112 DEFINE_PER_CPU(struct acpi_processor *, processors);
113 EXPORT_PER_CPU_SYMBOL(processors);
114 
115 struct acpi_processor_errata errata __read_mostly;
116 
117 /* --------------------------------------------------------------------------
118                                 Errata Handling
119    -------------------------------------------------------------------------- */
120 
121 static int acpi_processor_errata_piix4(struct pci_dev *dev)
122 {
123 	u8 value1 = 0;
124 	u8 value2 = 0;
125 
126 
127 	if (!dev)
128 		return -EINVAL;
129 
130 	/*
131 	 * Note that 'dev' references the PIIX4 ACPI Controller.
132 	 */
133 
134 	switch (dev->revision) {
135 	case 0:
136 		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4 A-step\n"));
137 		break;
138 	case 1:
139 		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4 B-step\n"));
140 		break;
141 	case 2:
142 		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4E\n"));
143 		break;
144 	case 3:
145 		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4M\n"));
146 		break;
147 	default:
148 		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found unknown PIIX4\n"));
149 		break;
150 	}
151 
152 	switch (dev->revision) {
153 
154 	case 0:		/* PIIX4 A-step */
155 	case 1:		/* PIIX4 B-step */
156 		/*
157 		 * See specification changes #13 ("Manual Throttle Duty Cycle")
158 		 * and #14 ("Enabling and Disabling Manual Throttle"), plus
159 		 * erratum #5 ("STPCLK# Deassertion Time") from the January
160 		 * 2002 PIIX4 specification update.  Applies to only older
161 		 * PIIX4 models.
162 		 */
163 		errata.piix4.throttle = 1;
164 
165 	case 2:		/* PIIX4E */
166 	case 3:		/* PIIX4M */
167 		/*
168 		 * See erratum #18 ("C3 Power State/BMIDE and Type-F DMA
169 		 * Livelock") from the January 2002 PIIX4 specification update.
170 		 * Applies to all PIIX4 models.
171 		 */
172 
173 		/*
174 		 * BM-IDE
175 		 * ------
176 		 * Find the PIIX4 IDE Controller and get the Bus Master IDE
177 		 * Status register address.  We'll use this later to read
178 		 * each IDE controller's DMA status to make sure we catch all
179 		 * DMA activity.
180 		 */
181 		dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
182 				     PCI_DEVICE_ID_INTEL_82371AB,
183 				     PCI_ANY_ID, PCI_ANY_ID, NULL);
184 		if (dev) {
185 			errata.piix4.bmisx = pci_resource_start(dev, 4);
186 			pci_dev_put(dev);
187 		}
188 
189 		/*
190 		 * Type-F DMA
191 		 * ----------
192 		 * Find the PIIX4 ISA Controller and read the Motherboard
193 		 * DMA controller's status to see if Type-F (Fast) DMA mode
194 		 * is enabled (bit 7) on either channel.  Note that we'll
195 		 * disable C3 support if this is enabled, as some legacy
196 		 * devices won't operate well if fast DMA is disabled.
197 		 */
198 		dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
199 				     PCI_DEVICE_ID_INTEL_82371AB_0,
200 				     PCI_ANY_ID, PCI_ANY_ID, NULL);
201 		if (dev) {
202 			pci_read_config_byte(dev, 0x76, &value1);
203 			pci_read_config_byte(dev, 0x77, &value2);
204 			if ((value1 & 0x80) || (value2 & 0x80))
205 				errata.piix4.fdma = 1;
206 			pci_dev_put(dev);
207 		}
208 
209 		break;
210 	}
211 
212 	if (errata.piix4.bmisx)
213 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
214 				  "Bus master activity detection (BM-IDE) erratum enabled\n"));
215 	if (errata.piix4.fdma)
216 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
217 				  "Type-F DMA livelock erratum (C3 disabled)\n"));
218 
219 	return 0;
220 }
221 
222 static int acpi_processor_errata(struct acpi_processor *pr)
223 {
224 	int result = 0;
225 	struct pci_dev *dev = NULL;
226 
227 
228 	if (!pr)
229 		return -EINVAL;
230 
231 	/*
232 	 * PIIX4
233 	 */
234 	dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
235 			     PCI_DEVICE_ID_INTEL_82371AB_3, PCI_ANY_ID,
236 			     PCI_ANY_ID, NULL);
237 	if (dev) {
238 		result = acpi_processor_errata_piix4(dev);
239 		pci_dev_put(dev);
240 	}
241 
242 	return result;
243 }
244 
245 /* --------------------------------------------------------------------------
246                                  Driver Interface
247    -------------------------------------------------------------------------- */
248 
249 static int acpi_processor_get_info(struct acpi_device *device)
250 {
251 	acpi_status status = 0;
252 	union acpi_object object = { 0 };
253 	struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
254 	struct acpi_processor *pr;
255 	int cpu_index, device_declaration = 0;
256 	static int cpu0_initialized;
257 
258 	pr = acpi_driver_data(device);
259 	if (!pr)
260 		return -EINVAL;
261 
262 	if (num_online_cpus() > 1)
263 		errata.smp = TRUE;
264 
265 	acpi_processor_errata(pr);
266 
267 	/*
268 	 * Check to see if we have bus mastering arbitration control.  This
269 	 * is required for proper C3 usage (to maintain cache coherency).
270 	 */
271 	if (acpi_gbl_FADT.pm2_control_block && acpi_gbl_FADT.pm2_control_length) {
272 		pr->flags.bm_control = 1;
273 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
274 				  "Bus mastering arbitration control present\n"));
275 	} else
276 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
277 				  "No bus mastering arbitration control\n"));
278 
279 	if (!strcmp(acpi_device_hid(device), ACPI_PROCESSOR_OBJECT_HID)) {
280 		/* Declared with "Processor" statement; match ProcessorID */
281 		status = acpi_evaluate_object(pr->handle, NULL, NULL, &buffer);
282 		if (ACPI_FAILURE(status)) {
283 			dev_err(&device->dev,
284 				"Failed to evaluate processor object (0x%x)\n",
285 				status);
286 			return -ENODEV;
287 		}
288 
289 		/*
290 		 * TBD: Synch processor ID (via LAPIC/LSAPIC structures) on SMP.
291 		 *      >>> 'acpi_get_processor_id(acpi_id, &id)' in
292 		 *      arch/xxx/acpi.c
293 		 */
294 		pr->acpi_id = object.processor.proc_id;
295 	} else {
296 		/*
297 		 * Declared with "Device" statement; match _UID.
298 		 * Note that we don't handle string _UIDs yet.
299 		 */
300 		unsigned long long value;
301 		status = acpi_evaluate_integer(pr->handle, METHOD_NAME__UID,
302 						NULL, &value);
303 		if (ACPI_FAILURE(status)) {
304 			dev_err(&device->dev,
305 				"Failed to evaluate processor _UID (0x%x)\n",
306 				status);
307 			return -ENODEV;
308 		}
309 		device_declaration = 1;
310 		pr->acpi_id = value;
311 	}
312 	cpu_index = acpi_get_cpuid(pr->handle, device_declaration, pr->acpi_id);
313 
314 	/* Handle UP system running SMP kernel, with no LAPIC in MADT */
315 	if (!cpu0_initialized && (cpu_index == -1) &&
316 	    (num_online_cpus() == 1)) {
317 		cpu_index = 0;
318 	}
319 
320 	cpu0_initialized = 1;
321 
322 	pr->id = cpu_index;
323 
324 	/*
325 	 *  Extra Processor objects may be enumerated on MP systems with
326 	 *  less than the max # of CPUs. They should be ignored _iff
327 	 *  they are physically not present.
328 	 */
329 	if (pr->id == -1) {
330 		if (ACPI_FAILURE(acpi_processor_hotadd_init(pr)))
331 			return -ENODEV;
332 	}
333 	/*
334 	 * On some boxes several processors use the same processor bus id.
335 	 * But they are located in different scope. For example:
336 	 * \_SB.SCK0.CPU0
337 	 * \_SB.SCK1.CPU0
338 	 * Rename the processor device bus id. And the new bus id will be
339 	 * generated as the following format:
340 	 * CPU+CPU ID.
341 	 */
342 	sprintf(acpi_device_bid(device), "CPU%X", pr->id);
343 	ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Processor [%d:%d]\n", pr->id,
344 			  pr->acpi_id));
345 
346 	if (!object.processor.pblk_address)
347 		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No PBLK (NULL address)\n"));
348 	else if (object.processor.pblk_length != 6)
349 		dev_err(&device->dev, "Invalid PBLK length [%d]\n",
350 			    object.processor.pblk_length);
351 	else {
352 		pr->throttling.address = object.processor.pblk_address;
353 		pr->throttling.duty_offset = acpi_gbl_FADT.duty_offset;
354 		pr->throttling.duty_width = acpi_gbl_FADT.duty_width;
355 
356 		pr->pblk = object.processor.pblk_address;
357 
358 		/*
359 		 * We don't care about error returns - we just try to mark
360 		 * these reserved so that nobody else is confused into thinking
361 		 * that this region might be unused..
362 		 *
363 		 * (In particular, allocating the IO range for Cardbus)
364 		 */
365 		request_region(pr->throttling.address, 6, "ACPI CPU throttle");
366 	}
367 
368 	/*
369 	 * If ACPI describes a slot number for this CPU, we can use it
370 	 * ensure we get the right value in the "physical id" field
371 	 * of /proc/cpuinfo
372 	 */
373 	status = acpi_evaluate_object(pr->handle, "_SUN", NULL, &buffer);
374 	if (ACPI_SUCCESS(status))
375 		arch_fix_phys_package_id(pr->id, object.integer.value);
376 
377 	return 0;
378 }
379 
380 static DEFINE_PER_CPU(void *, processor_device_array);
381 
382 static void acpi_processor_notify(struct acpi_device *device, u32 event)
383 {
384 	struct acpi_processor *pr = acpi_driver_data(device);
385 	int saved;
386 
387 	if (!pr)
388 		return;
389 
390 	switch (event) {
391 	case ACPI_PROCESSOR_NOTIFY_PERFORMANCE:
392 		saved = pr->performance_platform_limit;
393 		acpi_processor_ppc_has_changed(pr, 1);
394 		if (saved == pr->performance_platform_limit)
395 			break;
396 		acpi_bus_generate_proc_event(device, event,
397 					pr->performance_platform_limit);
398 		acpi_bus_generate_netlink_event(device->pnp.device_class,
399 						  dev_name(&device->dev), event,
400 						  pr->performance_platform_limit);
401 		break;
402 	case ACPI_PROCESSOR_NOTIFY_POWER:
403 		acpi_processor_cst_has_changed(pr);
404 		acpi_bus_generate_proc_event(device, event, 0);
405 		acpi_bus_generate_netlink_event(device->pnp.device_class,
406 						  dev_name(&device->dev), event, 0);
407 		break;
408 	case ACPI_PROCESSOR_NOTIFY_THROTTLING:
409 		acpi_processor_tstate_has_changed(pr);
410 		acpi_bus_generate_proc_event(device, event, 0);
411 		acpi_bus_generate_netlink_event(device->pnp.device_class,
412 						  dev_name(&device->dev), event, 0);
413 		break;
414 	default:
415 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
416 				  "Unsupported event [0x%x]\n", event));
417 		break;
418 	}
419 
420 	return;
421 }
422 
423 static int acpi_cpu_soft_notify(struct notifier_block *nfb,
424 		unsigned long action, void *hcpu)
425 {
426 	unsigned int cpu = (unsigned long)hcpu;
427 	struct acpi_processor *pr = per_cpu(processors, cpu);
428 
429 	if (action == CPU_ONLINE && pr) {
430 		/* CPU got physically hotplugged and onlined the first time:
431 		 * Initialize missing things
432 		 */
433 		if (pr->flags.need_hotplug_init) {
434 			pr_info("Will online and init hotplugged CPU: %d\n",
435 				pr->id);
436 			WARN(acpi_processor_start(pr), "Failed to start CPU:"
437 				" %d\n", pr->id);
438 			pr->flags.need_hotplug_init = 0;
439 		/* Normal CPU soft online event */
440 		} else {
441 			acpi_processor_ppc_has_changed(pr, 0);
442 			acpi_processor_hotplug(pr);
443 			acpi_processor_reevaluate_tstate(pr, action);
444 			acpi_processor_tstate_has_changed(pr);
445 		}
446 	}
447 	if (action == CPU_DEAD && pr) {
448 		/* invalidate the flag.throttling after one CPU is offline */
449 		acpi_processor_reevaluate_tstate(pr, action);
450 	}
451 	return NOTIFY_OK;
452 }
453 
454 static struct notifier_block acpi_cpu_notifier =
455 {
456 	    .notifier_call = acpi_cpu_soft_notify,
457 };
458 
459 /*
460  * acpi_processor_start() is called by the cpu_hotplug_notifier func:
461  * acpi_cpu_soft_notify(). Getting it __cpuinit{data} is difficult, the
462  * root cause seem to be that acpi_processor_uninstall_hotplug_notify()
463  * is in the module_exit (__exit) func. Allowing acpi_processor_start()
464  * to not be in __cpuinit section, but being called from __cpuinit funcs
465  * via __ref looks like the right thing to do here.
466  */
467 static __ref int acpi_processor_start(struct acpi_processor *pr)
468 {
469 	struct acpi_device *device = per_cpu(processor_device_array, pr->id);
470 	int result = 0;
471 
472 #ifdef CONFIG_CPU_FREQ
473 	acpi_processor_ppc_has_changed(pr, 0);
474 	acpi_processor_load_module(pr);
475 #endif
476 	acpi_processor_get_throttling_info(pr);
477 	acpi_processor_get_limit_info(pr);
478 
479 	if (!cpuidle_get_driver() || cpuidle_get_driver() == &acpi_idle_driver)
480 		acpi_processor_power_init(pr);
481 
482 	pr->cdev = thermal_cooling_device_register("Processor", device,
483 						   &processor_cooling_ops);
484 	if (IS_ERR(pr->cdev)) {
485 		result = PTR_ERR(pr->cdev);
486 		goto err_power_exit;
487 	}
488 
489 	dev_dbg(&device->dev, "registered as cooling_device%d\n",
490 		pr->cdev->id);
491 
492 	result = sysfs_create_link(&device->dev.kobj,
493 				   &pr->cdev->device.kobj,
494 				   "thermal_cooling");
495 	if (result) {
496 		dev_err(&device->dev,
497 			"Failed to create sysfs link 'thermal_cooling'\n");
498 		goto err_thermal_unregister;
499 	}
500 	result = sysfs_create_link(&pr->cdev->device.kobj,
501 				   &device->dev.kobj,
502 				   "device");
503 	if (result) {
504 		dev_err(&pr->cdev->device,
505 			"Failed to create sysfs link 'device'\n");
506 		goto err_remove_sysfs_thermal;
507 	}
508 
509 	return 0;
510 
511 err_remove_sysfs_thermal:
512 	sysfs_remove_link(&device->dev.kobj, "thermal_cooling");
513 err_thermal_unregister:
514 	thermal_cooling_device_unregister(pr->cdev);
515 err_power_exit:
516 	acpi_processor_power_exit(pr);
517 
518 	return result;
519 }
520 
521 /*
522  * Do not put anything in here which needs the core to be online.
523  * For example MSR access or setting up things which check for cpuinfo_x86
524  * (cpu_data(cpu)) values, like CPU feature flags, family, model, etc.
525  * Such things have to be put in and set up above in acpi_processor_start()
526  */
527 static int __cpuinit acpi_processor_add(struct acpi_device *device)
528 {
529 	struct acpi_processor *pr = NULL;
530 	int result = 0;
531 	struct device *dev;
532 
533 	pr = kzalloc(sizeof(struct acpi_processor), GFP_KERNEL);
534 	if (!pr)
535 		return -ENOMEM;
536 
537 	if (!zalloc_cpumask_var(&pr->throttling.shared_cpu_map, GFP_KERNEL)) {
538 		result = -ENOMEM;
539 		goto err_free_pr;
540 	}
541 
542 	pr->handle = device->handle;
543 	strcpy(acpi_device_name(device), ACPI_PROCESSOR_DEVICE_NAME);
544 	strcpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS);
545 	device->driver_data = pr;
546 
547 	result = acpi_processor_get_info(device);
548 	if (result) {
549 		/* Processor is physically not present */
550 		return 0;
551 	}
552 
553 #ifdef CONFIG_SMP
554 	if (pr->id >= setup_max_cpus && pr->id != 0)
555 		return 0;
556 #endif
557 
558 	BUG_ON(pr->id >= nr_cpu_ids);
559 
560 	/*
561 	 * Buggy BIOS check
562 	 * ACPI id of processors can be reported wrongly by the BIOS.
563 	 * Don't trust it blindly
564 	 */
565 	if (per_cpu(processor_device_array, pr->id) != NULL &&
566 	    per_cpu(processor_device_array, pr->id) != device) {
567 		dev_warn(&device->dev,
568 			"BIOS reported wrong ACPI id %d for the processor\n",
569 			pr->id);
570 		result = -ENODEV;
571 		goto err_free_cpumask;
572 	}
573 	per_cpu(processor_device_array, pr->id) = device;
574 
575 	per_cpu(processors, pr->id) = pr;
576 
577 	dev = get_cpu_device(pr->id);
578 	if (sysfs_create_link(&device->dev.kobj, &dev->kobj, "sysdev")) {
579 		result = -EFAULT;
580 		goto err_clear_processor;
581 	}
582 
583 	/*
584 	 * Do not start hotplugged CPUs now, but when they
585 	 * are onlined the first time
586 	 */
587 	if (pr->flags.need_hotplug_init)
588 		return 0;
589 
590 	result = acpi_processor_start(pr);
591 	if (result)
592 		goto err_remove_sysfs;
593 
594 	return 0;
595 
596 err_remove_sysfs:
597 	sysfs_remove_link(&device->dev.kobj, "sysdev");
598 err_clear_processor:
599 	/*
600 	 * processor_device_array is not cleared to allow checks for buggy BIOS
601 	 */
602 	per_cpu(processors, pr->id) = NULL;
603 err_free_cpumask:
604 	free_cpumask_var(pr->throttling.shared_cpu_map);
605 err_free_pr:
606 	kfree(pr);
607 	return result;
608 }
609 
610 static int acpi_processor_remove(struct acpi_device *device)
611 {
612 	struct acpi_processor *pr = NULL;
613 
614 
615 	if (!device || !acpi_driver_data(device))
616 		return -EINVAL;
617 
618 	pr = acpi_driver_data(device);
619 
620 	if (pr->id >= nr_cpu_ids)
621 		goto free;
622 
623 	if (device->removal_type == ACPI_BUS_REMOVAL_EJECT) {
624 		if (acpi_processor_handle_eject(pr))
625 			return -EINVAL;
626 	}
627 
628 	acpi_processor_power_exit(pr);
629 
630 	sysfs_remove_link(&device->dev.kobj, "sysdev");
631 
632 	if (pr->cdev) {
633 		sysfs_remove_link(&device->dev.kobj, "thermal_cooling");
634 		sysfs_remove_link(&pr->cdev->device.kobj, "device");
635 		thermal_cooling_device_unregister(pr->cdev);
636 		pr->cdev = NULL;
637 	}
638 
639 	per_cpu(processors, pr->id) = NULL;
640 	per_cpu(processor_device_array, pr->id) = NULL;
641 	try_offline_node(cpu_to_node(pr->id));
642 
643 free:
644 	free_cpumask_var(pr->throttling.shared_cpu_map);
645 	kfree(pr);
646 
647 	return 0;
648 }
649 
650 #ifdef CONFIG_ACPI_HOTPLUG_CPU
651 /****************************************************************************
652  * 	Acpi processor hotplug support 				       	    *
653  ****************************************************************************/
654 
655 static int is_processor_present(acpi_handle handle)
656 {
657 	acpi_status status;
658 	unsigned long long sta = 0;
659 
660 
661 	status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
662 
663 	if (ACPI_SUCCESS(status) && (sta & ACPI_STA_DEVICE_PRESENT))
664 		return 1;
665 
666 	/*
667 	 * _STA is mandatory for a processor that supports hot plug
668 	 */
669 	if (status == AE_NOT_FOUND)
670 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
671 				"Processor does not support hot plug\n"));
672 	else
673 		ACPI_EXCEPTION((AE_INFO, status,
674 				"Processor Device is not present"));
675 	return 0;
676 }
677 
678 static void acpi_processor_hotplug_notify(acpi_handle handle,
679 					  u32 event, void *data)
680 {
681 	struct acpi_device *device = NULL;
682 	struct acpi_eject_event *ej_event = NULL;
683 	u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; /* default */
684 	acpi_status status;
685 	int result;
686 
687 	acpi_scan_lock_acquire();
688 
689 	switch (event) {
690 	case ACPI_NOTIFY_BUS_CHECK:
691 	case ACPI_NOTIFY_DEVICE_CHECK:
692 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
693 		"Processor driver received %s event\n",
694 		       (event == ACPI_NOTIFY_BUS_CHECK) ?
695 		       "ACPI_NOTIFY_BUS_CHECK" : "ACPI_NOTIFY_DEVICE_CHECK"));
696 
697 		if (!is_processor_present(handle))
698 			break;
699 
700 		if (!acpi_bus_get_device(handle, &device))
701 			break;
702 
703 		result = acpi_bus_scan(handle);
704 		if (result) {
705 			acpi_handle_err(handle, "Unable to add the device\n");
706 			break;
707 		}
708 		result = acpi_bus_get_device(handle, &device);
709 		if (result) {
710 			acpi_handle_err(handle, "Missing device object\n");
711 			break;
712 		}
713 		ost_code = ACPI_OST_SC_SUCCESS;
714 		break;
715 
716 	case ACPI_NOTIFY_EJECT_REQUEST:
717 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
718 				  "received ACPI_NOTIFY_EJECT_REQUEST\n"));
719 
720 		if (acpi_bus_get_device(handle, &device)) {
721 			acpi_handle_err(handle,
722 				"Device don't exist, dropping EJECT\n");
723 			break;
724 		}
725 		if (!acpi_driver_data(device)) {
726 			acpi_handle_err(handle,
727 				"Driver data is NULL, dropping EJECT\n");
728 			break;
729 		}
730 
731 		ej_event = kmalloc(sizeof(*ej_event), GFP_KERNEL);
732 		if (!ej_event) {
733 			acpi_handle_err(handle, "No memory, dropping EJECT\n");
734 			break;
735 		}
736 
737 		get_device(&device->dev);
738 		ej_event->device = device;
739 		ej_event->event = ACPI_NOTIFY_EJECT_REQUEST;
740 		/* The eject is carried out asynchronously. */
741 		status = acpi_os_hotplug_execute(acpi_bus_hot_remove_device,
742 						 ej_event);
743 		if (ACPI_FAILURE(status)) {
744 			put_device(&device->dev);
745 			kfree(ej_event);
746 			break;
747 		}
748 		goto out;
749 
750 	default:
751 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
752 				  "Unsupported event [0x%x]\n", event));
753 
754 		/* non-hotplug event; possibly handled by other handler */
755 		goto out;
756 	}
757 
758 	/* Inform firmware that the hotplug operation has completed */
759 	(void) acpi_evaluate_hotplug_ost(handle, event, ost_code, NULL);
760 
761  out:
762 	acpi_scan_lock_release();
763 }
764 
765 static acpi_status is_processor_device(acpi_handle handle)
766 {
767 	struct acpi_device_info *info;
768 	char *hid;
769 	acpi_status status;
770 
771 	status = acpi_get_object_info(handle, &info);
772 	if (ACPI_FAILURE(status))
773 		return status;
774 
775 	if (info->type == ACPI_TYPE_PROCESSOR) {
776 		kfree(info);
777 		return AE_OK;	/* found a processor object */
778 	}
779 
780 	if (!(info->valid & ACPI_VALID_HID)) {
781 		kfree(info);
782 		return AE_ERROR;
783 	}
784 
785 	hid = info->hardware_id.string;
786 	if ((hid == NULL) || strcmp(hid, ACPI_PROCESSOR_DEVICE_HID)) {
787 		kfree(info);
788 		return AE_ERROR;
789 	}
790 
791 	kfree(info);
792 	return AE_OK;	/* found a processor device object */
793 }
794 
795 static acpi_status
796 processor_walk_namespace_cb(acpi_handle handle,
797 			    u32 lvl, void *context, void **rv)
798 {
799 	acpi_status status;
800 	int *action = context;
801 
802 	status = is_processor_device(handle);
803 	if (ACPI_FAILURE(status))
804 		return AE_OK;	/* not a processor; continue to walk */
805 
806 	switch (*action) {
807 	case INSTALL_NOTIFY_HANDLER:
808 		acpi_install_notify_handler(handle,
809 					    ACPI_SYSTEM_NOTIFY,
810 					    acpi_processor_hotplug_notify,
811 					    NULL);
812 		break;
813 	case UNINSTALL_NOTIFY_HANDLER:
814 		acpi_remove_notify_handler(handle,
815 					   ACPI_SYSTEM_NOTIFY,
816 					   acpi_processor_hotplug_notify);
817 		break;
818 	default:
819 		break;
820 	}
821 
822 	/* found a processor; skip walking underneath */
823 	return AE_CTRL_DEPTH;
824 }
825 
826 static acpi_status acpi_processor_hotadd_init(struct acpi_processor *pr)
827 {
828 	acpi_handle handle = pr->handle;
829 
830 	if (!is_processor_present(handle)) {
831 		return AE_ERROR;
832 	}
833 
834 	if (acpi_map_lsapic(handle, &pr->id))
835 		return AE_ERROR;
836 
837 	if (arch_register_cpu(pr->id)) {
838 		acpi_unmap_lsapic(pr->id);
839 		return AE_ERROR;
840 	}
841 
842 	/* CPU got hot-plugged, but cpu_data is not initialized yet
843 	 * Set flag to delay cpu_idle/throttling initialization
844 	 * in:
845 	 * acpi_processor_add()
846 	 *   acpi_processor_get_info()
847 	 * and do it when the CPU gets online the first time
848 	 * TBD: Cleanup above functions and try to do this more elegant.
849 	 */
850 	pr_info("CPU %d got hotplugged\n", pr->id);
851 	pr->flags.need_hotplug_init = 1;
852 
853 	return AE_OK;
854 }
855 
856 static int acpi_processor_handle_eject(struct acpi_processor *pr)
857 {
858 	if (cpu_online(pr->id))
859 		cpu_down(pr->id);
860 
861 	get_online_cpus();
862 	/*
863 	 * The cpu might become online again at this point. So we check whether
864 	 * the cpu has been onlined or not. If the cpu became online, it means
865 	 * that someone wants to use the cpu. So acpi_processor_handle_eject()
866 	 * returns -EAGAIN.
867 	 */
868 	if (unlikely(cpu_online(pr->id))) {
869 		put_online_cpus();
870 		pr_warn("Failed to remove CPU %d, because other task "
871 			"brought the CPU back online\n", pr->id);
872 		return -EAGAIN;
873 	}
874 	arch_unregister_cpu(pr->id);
875 	acpi_unmap_lsapic(pr->id);
876 	put_online_cpus();
877 	return (0);
878 }
879 #else
880 static acpi_status acpi_processor_hotadd_init(struct acpi_processor *pr)
881 {
882 	return AE_ERROR;
883 }
884 static int acpi_processor_handle_eject(struct acpi_processor *pr)
885 {
886 	return (-EINVAL);
887 }
888 #endif
889 
890 static
891 void acpi_processor_install_hotplug_notify(void)
892 {
893 #ifdef CONFIG_ACPI_HOTPLUG_CPU
894 	int action = INSTALL_NOTIFY_HANDLER;
895 	acpi_walk_namespace(ACPI_TYPE_ANY,
896 			    ACPI_ROOT_OBJECT,
897 			    ACPI_UINT32_MAX,
898 			    processor_walk_namespace_cb, NULL, &action, NULL);
899 #endif
900 	register_hotcpu_notifier(&acpi_cpu_notifier);
901 }
902 
903 static
904 void acpi_processor_uninstall_hotplug_notify(void)
905 {
906 #ifdef CONFIG_ACPI_HOTPLUG_CPU
907 	int action = UNINSTALL_NOTIFY_HANDLER;
908 	acpi_walk_namespace(ACPI_TYPE_ANY,
909 			    ACPI_ROOT_OBJECT,
910 			    ACPI_UINT32_MAX,
911 			    processor_walk_namespace_cb, NULL, &action, NULL);
912 #endif
913 	unregister_hotcpu_notifier(&acpi_cpu_notifier);
914 }
915 
916 /*
917  * We keep the driver loaded even when ACPI is not running.
918  * This is needed for the powernow-k8 driver, that works even without
919  * ACPI, but needs symbols from this driver
920  */
921 
922 static int __init acpi_processor_init(void)
923 {
924 	int result = 0;
925 
926 	if (acpi_disabled)
927 		return 0;
928 
929 	result = acpi_bus_register_driver(&acpi_processor_driver);
930 	if (result < 0)
931 		return result;
932 
933 	acpi_processor_syscore_init();
934 
935 	acpi_processor_install_hotplug_notify();
936 
937 	acpi_thermal_cpufreq_init();
938 
939 	acpi_processor_ppc_init();
940 
941 	acpi_processor_throttling_init();
942 
943 	return 0;
944 }
945 
946 static void __exit acpi_processor_exit(void)
947 {
948 	if (acpi_disabled)
949 		return;
950 
951 	acpi_processor_ppc_exit();
952 
953 	acpi_thermal_cpufreq_exit();
954 
955 	acpi_processor_uninstall_hotplug_notify();
956 
957 	acpi_processor_syscore_exit();
958 
959 	acpi_bus_unregister_driver(&acpi_processor_driver);
960 
961 	return;
962 }
963 
964 module_init(acpi_processor_init);
965 module_exit(acpi_processor_exit);
966 
967 MODULE_ALIAS("processor");
968