1 /* 2 * acpi_processor.c - ACPI processor enumeration support 3 * 4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> 5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 6 * Copyright (C) 2004 Dominik Brodowski <linux@brodo.de> 7 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> 8 * Copyright (C) 2013, Intel Corporation 9 * Rafael J. Wysocki <rafael.j.wysocki@intel.com> 10 * 11 * This program is free software; you can redistribute it and/or modify it 12 * under the terms of the GNU General Public License version 2 as published 13 * by the Free Software Foundation. 14 */ 15 16 #include <linux/acpi.h> 17 #include <linux/device.h> 18 #include <linux/kernel.h> 19 #include <linux/module.h> 20 #include <linux/pci.h> 21 22 #include <acpi/processor.h> 23 24 #include <asm/cpu.h> 25 26 #include "internal.h" 27 28 #define _COMPONENT ACPI_PROCESSOR_COMPONENT 29 30 ACPI_MODULE_NAME("processor"); 31 32 DEFINE_PER_CPU(struct acpi_processor *, processors); 33 EXPORT_PER_CPU_SYMBOL(processors); 34 35 /* -------------------------------------------------------------------------- 36 Errata Handling 37 -------------------------------------------------------------------------- */ 38 39 struct acpi_processor_errata errata __read_mostly; 40 EXPORT_SYMBOL_GPL(errata); 41 42 static int acpi_processor_errata_piix4(struct pci_dev *dev) 43 { 44 u8 value1 = 0; 45 u8 value2 = 0; 46 47 48 if (!dev) 49 return -EINVAL; 50 51 /* 52 * Note that 'dev' references the PIIX4 ACPI Controller. 53 */ 54 55 switch (dev->revision) { 56 case 0: 57 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4 A-step\n")); 58 break; 59 case 1: 60 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4 B-step\n")); 61 break; 62 case 2: 63 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4E\n")); 64 break; 65 case 3: 66 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4M\n")); 67 break; 68 default: 69 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found unknown PIIX4\n")); 70 break; 71 } 72 73 switch (dev->revision) { 74 75 case 0: /* PIIX4 A-step */ 76 case 1: /* PIIX4 B-step */ 77 /* 78 * See specification changes #13 ("Manual Throttle Duty Cycle") 79 * and #14 ("Enabling and Disabling Manual Throttle"), plus 80 * erratum #5 ("STPCLK# Deassertion Time") from the January 81 * 2002 PIIX4 specification update. Applies to only older 82 * PIIX4 models. 83 */ 84 errata.piix4.throttle = 1; 85 86 case 2: /* PIIX4E */ 87 case 3: /* PIIX4M */ 88 /* 89 * See erratum #18 ("C3 Power State/BMIDE and Type-F DMA 90 * Livelock") from the January 2002 PIIX4 specification update. 91 * Applies to all PIIX4 models. 92 */ 93 94 /* 95 * BM-IDE 96 * ------ 97 * Find the PIIX4 IDE Controller and get the Bus Master IDE 98 * Status register address. We'll use this later to read 99 * each IDE controller's DMA status to make sure we catch all 100 * DMA activity. 101 */ 102 dev = pci_get_subsys(PCI_VENDOR_ID_INTEL, 103 PCI_DEVICE_ID_INTEL_82371AB, 104 PCI_ANY_ID, PCI_ANY_ID, NULL); 105 if (dev) { 106 errata.piix4.bmisx = pci_resource_start(dev, 4); 107 pci_dev_put(dev); 108 } 109 110 /* 111 * Type-F DMA 112 * ---------- 113 * Find the PIIX4 ISA Controller and read the Motherboard 114 * DMA controller's status to see if Type-F (Fast) DMA mode 115 * is enabled (bit 7) on either channel. Note that we'll 116 * disable C3 support if this is enabled, as some legacy 117 * devices won't operate well if fast DMA is disabled. 118 */ 119 dev = pci_get_subsys(PCI_VENDOR_ID_INTEL, 120 PCI_DEVICE_ID_INTEL_82371AB_0, 121 PCI_ANY_ID, PCI_ANY_ID, NULL); 122 if (dev) { 123 pci_read_config_byte(dev, 0x76, &value1); 124 pci_read_config_byte(dev, 0x77, &value2); 125 if ((value1 & 0x80) || (value2 & 0x80)) 126 errata.piix4.fdma = 1; 127 pci_dev_put(dev); 128 } 129 130 break; 131 } 132 133 if (errata.piix4.bmisx) 134 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 135 "Bus master activity detection (BM-IDE) erratum enabled\n")); 136 if (errata.piix4.fdma) 137 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 138 "Type-F DMA livelock erratum (C3 disabled)\n")); 139 140 return 0; 141 } 142 143 static int acpi_processor_errata(void) 144 { 145 int result = 0; 146 struct pci_dev *dev = NULL; 147 148 /* 149 * PIIX4 150 */ 151 dev = pci_get_subsys(PCI_VENDOR_ID_INTEL, 152 PCI_DEVICE_ID_INTEL_82371AB_3, PCI_ANY_ID, 153 PCI_ANY_ID, NULL); 154 if (dev) { 155 result = acpi_processor_errata_piix4(dev); 156 pci_dev_put(dev); 157 } 158 159 return result; 160 } 161 162 /* -------------------------------------------------------------------------- 163 Initialization 164 -------------------------------------------------------------------------- */ 165 166 #ifdef CONFIG_ACPI_HOTPLUG_CPU 167 static int acpi_processor_hotadd_init(struct acpi_processor *pr) 168 { 169 unsigned long long sta; 170 acpi_status status; 171 int ret; 172 173 status = acpi_evaluate_integer(pr->handle, "_STA", NULL, &sta); 174 if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_PRESENT)) 175 return -ENODEV; 176 177 cpu_maps_update_begin(); 178 cpu_hotplug_begin(); 179 180 ret = acpi_map_lsapic(pr->handle, pr->apic_id, &pr->id); 181 if (ret) 182 goto out; 183 184 ret = arch_register_cpu(pr->id); 185 if (ret) { 186 acpi_unmap_lsapic(pr->id); 187 goto out; 188 } 189 190 /* 191 * CPU got hot-added, but cpu_data is not initialized yet. Set a flag 192 * to delay cpu_idle/throttling initialization and do it when the CPU 193 * gets online for the first time. 194 */ 195 pr_info("CPU%d has been hot-added\n", pr->id); 196 pr->flags.need_hotplug_init = 1; 197 198 out: 199 cpu_hotplug_done(); 200 cpu_maps_update_done(); 201 return ret; 202 } 203 #else 204 static inline int acpi_processor_hotadd_init(struct acpi_processor *pr) 205 { 206 return -ENODEV; 207 } 208 #endif /* CONFIG_ACPI_HOTPLUG_CPU */ 209 210 static int acpi_processor_get_info(struct acpi_device *device) 211 { 212 union acpi_object object = { 0 }; 213 struct acpi_buffer buffer = { sizeof(union acpi_object), &object }; 214 struct acpi_processor *pr = acpi_driver_data(device); 215 int cpu_index, device_declaration = 0; 216 acpi_status status = AE_OK; 217 static int cpu0_initialized; 218 unsigned long long value; 219 220 acpi_processor_errata(); 221 222 /* 223 * Check to see if we have bus mastering arbitration control. This 224 * is required for proper C3 usage (to maintain cache coherency). 225 */ 226 if (acpi_gbl_FADT.pm2_control_block && acpi_gbl_FADT.pm2_control_length) { 227 pr->flags.bm_control = 1; 228 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 229 "Bus mastering arbitration control present\n")); 230 } else 231 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 232 "No bus mastering arbitration control\n")); 233 234 if (!strcmp(acpi_device_hid(device), ACPI_PROCESSOR_OBJECT_HID)) { 235 /* Declared with "Processor" statement; match ProcessorID */ 236 status = acpi_evaluate_object(pr->handle, NULL, NULL, &buffer); 237 if (ACPI_FAILURE(status)) { 238 dev_err(&device->dev, 239 "Failed to evaluate processor object (0x%x)\n", 240 status); 241 return -ENODEV; 242 } 243 244 pr->acpi_id = object.processor.proc_id; 245 } else { 246 /* 247 * Declared with "Device" statement; match _UID. 248 * Note that we don't handle string _UIDs yet. 249 */ 250 status = acpi_evaluate_integer(pr->handle, METHOD_NAME__UID, 251 NULL, &value); 252 if (ACPI_FAILURE(status)) { 253 dev_err(&device->dev, 254 "Failed to evaluate processor _UID (0x%x)\n", 255 status); 256 return -ENODEV; 257 } 258 device_declaration = 1; 259 pr->acpi_id = value; 260 } 261 pr->apic_id = acpi_get_apicid(pr->handle, device_declaration, 262 pr->acpi_id); 263 cpu_index = acpi_map_cpuid(pr->apic_id, pr->acpi_id); 264 265 /* Handle UP system running SMP kernel, with no LAPIC in MADT */ 266 if (!cpu0_initialized && (cpu_index == -1) && 267 (num_online_cpus() == 1)) { 268 cpu_index = 0; 269 } 270 271 cpu0_initialized = 1; 272 273 pr->id = cpu_index; 274 275 /* 276 * Extra Processor objects may be enumerated on MP systems with 277 * less than the max # of CPUs. They should be ignored _iff 278 * they are physically not present. 279 */ 280 if (pr->id == -1) { 281 int ret = acpi_processor_hotadd_init(pr); 282 if (ret) 283 return ret; 284 } 285 /* 286 * On some boxes several processors use the same processor bus id. 287 * But they are located in different scope. For example: 288 * \_SB.SCK0.CPU0 289 * \_SB.SCK1.CPU0 290 * Rename the processor device bus id. And the new bus id will be 291 * generated as the following format: 292 * CPU+CPU ID. 293 */ 294 sprintf(acpi_device_bid(device), "CPU%X", pr->id); 295 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Processor [%d:%d]\n", pr->id, 296 pr->acpi_id)); 297 298 if (!object.processor.pblk_address) 299 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No PBLK (NULL address)\n")); 300 else if (object.processor.pblk_length != 6) 301 dev_err(&device->dev, "Invalid PBLK length [%d]\n", 302 object.processor.pblk_length); 303 else { 304 pr->throttling.address = object.processor.pblk_address; 305 pr->throttling.duty_offset = acpi_gbl_FADT.duty_offset; 306 pr->throttling.duty_width = acpi_gbl_FADT.duty_width; 307 308 pr->pblk = object.processor.pblk_address; 309 310 /* 311 * We don't care about error returns - we just try to mark 312 * these reserved so that nobody else is confused into thinking 313 * that this region might be unused.. 314 * 315 * (In particular, allocating the IO range for Cardbus) 316 */ 317 request_region(pr->throttling.address, 6, "ACPI CPU throttle"); 318 } 319 320 /* 321 * If ACPI describes a slot number for this CPU, we can use it to 322 * ensure we get the right value in the "physical id" field 323 * of /proc/cpuinfo 324 */ 325 status = acpi_evaluate_integer(pr->handle, "_SUN", NULL, &value); 326 if (ACPI_SUCCESS(status)) 327 arch_fix_phys_package_id(pr->id, value); 328 329 return 0; 330 } 331 332 /* 333 * Do not put anything in here which needs the core to be online. 334 * For example MSR access or setting up things which check for cpuinfo_x86 335 * (cpu_data(cpu)) values, like CPU feature flags, family, model, etc. 336 * Such things have to be put in and set up by the processor driver's .probe(). 337 */ 338 static DEFINE_PER_CPU(void *, processor_device_array); 339 340 static int acpi_processor_add(struct acpi_device *device, 341 const struct acpi_device_id *id) 342 { 343 struct acpi_processor *pr; 344 struct device *dev; 345 int result = 0; 346 347 pr = kzalloc(sizeof(struct acpi_processor), GFP_KERNEL); 348 if (!pr) 349 return -ENOMEM; 350 351 if (!zalloc_cpumask_var(&pr->throttling.shared_cpu_map, GFP_KERNEL)) { 352 result = -ENOMEM; 353 goto err_free_pr; 354 } 355 356 pr->handle = device->handle; 357 strcpy(acpi_device_name(device), ACPI_PROCESSOR_DEVICE_NAME); 358 strcpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS); 359 device->driver_data = pr; 360 361 result = acpi_processor_get_info(device); 362 if (result) /* Processor is not physically present or unavailable */ 363 return 0; 364 365 #ifdef CONFIG_SMP 366 if (pr->id >= setup_max_cpus && pr->id != 0) 367 return 0; 368 #endif 369 370 BUG_ON(pr->id >= nr_cpu_ids); 371 372 /* 373 * Buggy BIOS check. 374 * ACPI id of processors can be reported wrongly by the BIOS. 375 * Don't trust it blindly 376 */ 377 if (per_cpu(processor_device_array, pr->id) != NULL && 378 per_cpu(processor_device_array, pr->id) != device) { 379 dev_warn(&device->dev, 380 "BIOS reported wrong ACPI id %d for the processor\n", 381 pr->id); 382 /* Give up, but do not abort the namespace scan. */ 383 goto err; 384 } 385 /* 386 * processor_device_array is not cleared on errors to allow buggy BIOS 387 * checks. 388 */ 389 per_cpu(processor_device_array, pr->id) = device; 390 per_cpu(processors, pr->id) = pr; 391 392 dev = get_cpu_device(pr->id); 393 if (!dev) { 394 result = -ENODEV; 395 goto err; 396 } 397 398 result = acpi_bind_one(dev, pr->handle); 399 if (result) 400 goto err; 401 402 pr->dev = dev; 403 dev->offline = pr->flags.need_hotplug_init; 404 405 /* Trigger the processor driver's .probe() if present. */ 406 if (device_attach(dev) >= 0) 407 return 1; 408 409 dev_err(dev, "Processor driver could not be attached\n"); 410 acpi_unbind_one(dev); 411 412 err: 413 free_cpumask_var(pr->throttling.shared_cpu_map); 414 device->driver_data = NULL; 415 per_cpu(processors, pr->id) = NULL; 416 err_free_pr: 417 kfree(pr); 418 return result; 419 } 420 421 #ifdef CONFIG_ACPI_HOTPLUG_CPU 422 /* -------------------------------------------------------------------------- 423 Removal 424 -------------------------------------------------------------------------- */ 425 426 static void acpi_processor_remove(struct acpi_device *device) 427 { 428 struct acpi_processor *pr; 429 430 if (!device || !acpi_driver_data(device)) 431 return; 432 433 pr = acpi_driver_data(device); 434 if (pr->id >= nr_cpu_ids) 435 goto out; 436 437 /* 438 * The only reason why we ever get here is CPU hot-removal. The CPU is 439 * already offline and the ACPI device removal locking prevents it from 440 * being put back online at this point. 441 * 442 * Unbind the driver from the processor device and detach it from the 443 * ACPI companion object. 444 */ 445 device_release_driver(pr->dev); 446 acpi_unbind_one(pr->dev); 447 448 /* Clean up. */ 449 per_cpu(processor_device_array, pr->id) = NULL; 450 per_cpu(processors, pr->id) = NULL; 451 452 cpu_maps_update_begin(); 453 cpu_hotplug_begin(); 454 455 /* Remove the CPU. */ 456 arch_unregister_cpu(pr->id); 457 acpi_unmap_lsapic(pr->id); 458 459 cpu_hotplug_done(); 460 cpu_maps_update_done(); 461 462 try_offline_node(cpu_to_node(pr->id)); 463 464 out: 465 free_cpumask_var(pr->throttling.shared_cpu_map); 466 kfree(pr); 467 } 468 #endif /* CONFIG_ACPI_HOTPLUG_CPU */ 469 470 /* 471 * The following ACPI IDs are known to be suitable for representing as 472 * processor devices. 473 */ 474 static const struct acpi_device_id processor_device_ids[] = { 475 476 { ACPI_PROCESSOR_OBJECT_HID, }, 477 { ACPI_PROCESSOR_DEVICE_HID, }, 478 479 { } 480 }; 481 482 static struct acpi_scan_handler __refdata processor_handler = { 483 .ids = processor_device_ids, 484 .attach = acpi_processor_add, 485 #ifdef CONFIG_ACPI_HOTPLUG_CPU 486 .detach = acpi_processor_remove, 487 #endif 488 .hotplug = { 489 .enabled = true, 490 }, 491 }; 492 493 void __init acpi_processor_init(void) 494 { 495 acpi_scan_add_handler_with_hotplug(&processor_handler, "processor"); 496 } 497