1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * scan.c - support for transforming the ACPI namespace into individual objects 4 */ 5 6 #include <linux/module.h> 7 #include <linux/init.h> 8 #include <linux/slab.h> 9 #include <linux/kernel.h> 10 #include <linux/acpi.h> 11 #include <linux/acpi_iort.h> 12 #include <linux/signal.h> 13 #include <linux/kthread.h> 14 #include <linux/dmi.h> 15 #include <linux/nls.h> 16 #include <linux/dma-map-ops.h> 17 #include <linux/platform_data/x86/apple.h> 18 #include <linux/pgtable.h> 19 20 #include "internal.h" 21 22 extern struct acpi_device *acpi_root; 23 24 #define ACPI_BUS_CLASS "system_bus" 25 #define ACPI_BUS_HID "LNXSYBUS" 26 #define ACPI_BUS_DEVICE_NAME "System Bus" 27 28 #define ACPI_IS_ROOT_DEVICE(device) (!(device)->parent) 29 30 #define INVALID_ACPI_HANDLE ((acpi_handle)empty_zero_page) 31 32 static const char *dummy_hid = "device"; 33 34 static LIST_HEAD(acpi_dep_list); 35 static DEFINE_MUTEX(acpi_dep_list_lock); 36 LIST_HEAD(acpi_bus_id_list); 37 static DEFINE_MUTEX(acpi_scan_lock); 38 static LIST_HEAD(acpi_scan_handlers_list); 39 DEFINE_MUTEX(acpi_device_lock); 40 LIST_HEAD(acpi_wakeup_device_list); 41 static DEFINE_MUTEX(acpi_hp_context_lock); 42 43 /* 44 * The UART device described by the SPCR table is the only object which needs 45 * special-casing. Everything else is covered by ACPI namespace paths in STAO 46 * table. 47 */ 48 static u64 spcr_uart_addr; 49 50 struct acpi_dep_data { 51 struct list_head node; 52 acpi_handle supplier; 53 acpi_handle consumer; 54 }; 55 56 void acpi_scan_lock_acquire(void) 57 { 58 mutex_lock(&acpi_scan_lock); 59 } 60 EXPORT_SYMBOL_GPL(acpi_scan_lock_acquire); 61 62 void acpi_scan_lock_release(void) 63 { 64 mutex_unlock(&acpi_scan_lock); 65 } 66 EXPORT_SYMBOL_GPL(acpi_scan_lock_release); 67 68 void acpi_lock_hp_context(void) 69 { 70 mutex_lock(&acpi_hp_context_lock); 71 } 72 73 void acpi_unlock_hp_context(void) 74 { 75 mutex_unlock(&acpi_hp_context_lock); 76 } 77 78 void acpi_initialize_hp_context(struct acpi_device *adev, 79 struct acpi_hotplug_context *hp, 80 int (*notify)(struct acpi_device *, u32), 81 void (*uevent)(struct acpi_device *, u32)) 82 { 83 acpi_lock_hp_context(); 84 hp->notify = notify; 85 hp->uevent = uevent; 86 acpi_set_hp_context(adev, hp); 87 acpi_unlock_hp_context(); 88 } 89 EXPORT_SYMBOL_GPL(acpi_initialize_hp_context); 90 91 int acpi_scan_add_handler(struct acpi_scan_handler *handler) 92 { 93 if (!handler) 94 return -EINVAL; 95 96 list_add_tail(&handler->list_node, &acpi_scan_handlers_list); 97 return 0; 98 } 99 100 int acpi_scan_add_handler_with_hotplug(struct acpi_scan_handler *handler, 101 const char *hotplug_profile_name) 102 { 103 int error; 104 105 error = acpi_scan_add_handler(handler); 106 if (error) 107 return error; 108 109 acpi_sysfs_add_hotplug_profile(&handler->hotplug, hotplug_profile_name); 110 return 0; 111 } 112 113 bool acpi_scan_is_offline(struct acpi_device *adev, bool uevent) 114 { 115 struct acpi_device_physical_node *pn; 116 bool offline = true; 117 char *envp[] = { "EVENT=offline", NULL }; 118 119 /* 120 * acpi_container_offline() calls this for all of the container's 121 * children under the container's physical_node_lock lock. 122 */ 123 mutex_lock_nested(&adev->physical_node_lock, SINGLE_DEPTH_NESTING); 124 125 list_for_each_entry(pn, &adev->physical_node_list, node) 126 if (device_supports_offline(pn->dev) && !pn->dev->offline) { 127 if (uevent) 128 kobject_uevent_env(&pn->dev->kobj, KOBJ_CHANGE, envp); 129 130 offline = false; 131 break; 132 } 133 134 mutex_unlock(&adev->physical_node_lock); 135 return offline; 136 } 137 138 static acpi_status acpi_bus_offline(acpi_handle handle, u32 lvl, void *data, 139 void **ret_p) 140 { 141 struct acpi_device *device = NULL; 142 struct acpi_device_physical_node *pn; 143 bool second_pass = (bool)data; 144 acpi_status status = AE_OK; 145 146 if (acpi_bus_get_device(handle, &device)) 147 return AE_OK; 148 149 if (device->handler && !device->handler->hotplug.enabled) { 150 *ret_p = &device->dev; 151 return AE_SUPPORT; 152 } 153 154 mutex_lock(&device->physical_node_lock); 155 156 list_for_each_entry(pn, &device->physical_node_list, node) { 157 int ret; 158 159 if (second_pass) { 160 /* Skip devices offlined by the first pass. */ 161 if (pn->put_online) 162 continue; 163 } else { 164 pn->put_online = false; 165 } 166 ret = device_offline(pn->dev); 167 if (ret >= 0) { 168 pn->put_online = !ret; 169 } else { 170 *ret_p = pn->dev; 171 if (second_pass) { 172 status = AE_ERROR; 173 break; 174 } 175 } 176 } 177 178 mutex_unlock(&device->physical_node_lock); 179 180 return status; 181 } 182 183 static acpi_status acpi_bus_online(acpi_handle handle, u32 lvl, void *data, 184 void **ret_p) 185 { 186 struct acpi_device *device = NULL; 187 struct acpi_device_physical_node *pn; 188 189 if (acpi_bus_get_device(handle, &device)) 190 return AE_OK; 191 192 mutex_lock(&device->physical_node_lock); 193 194 list_for_each_entry(pn, &device->physical_node_list, node) 195 if (pn->put_online) { 196 device_online(pn->dev); 197 pn->put_online = false; 198 } 199 200 mutex_unlock(&device->physical_node_lock); 201 202 return AE_OK; 203 } 204 205 static int acpi_scan_try_to_offline(struct acpi_device *device) 206 { 207 acpi_handle handle = device->handle; 208 struct device *errdev = NULL; 209 acpi_status status; 210 211 /* 212 * Carry out two passes here and ignore errors in the first pass, 213 * because if the devices in question are memory blocks and 214 * CONFIG_MEMCG is set, one of the blocks may hold data structures 215 * that the other blocks depend on, but it is not known in advance which 216 * block holds them. 217 * 218 * If the first pass is successful, the second one isn't needed, though. 219 */ 220 status = acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX, 221 NULL, acpi_bus_offline, (void *)false, 222 (void **)&errdev); 223 if (status == AE_SUPPORT) { 224 dev_warn(errdev, "Offline disabled.\n"); 225 acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX, 226 acpi_bus_online, NULL, NULL, NULL); 227 return -EPERM; 228 } 229 acpi_bus_offline(handle, 0, (void *)false, (void **)&errdev); 230 if (errdev) { 231 errdev = NULL; 232 acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX, 233 NULL, acpi_bus_offline, (void *)true, 234 (void **)&errdev); 235 if (!errdev) 236 acpi_bus_offline(handle, 0, (void *)true, 237 (void **)&errdev); 238 239 if (errdev) { 240 dev_warn(errdev, "Offline failed.\n"); 241 acpi_bus_online(handle, 0, NULL, NULL); 242 acpi_walk_namespace(ACPI_TYPE_ANY, handle, 243 ACPI_UINT32_MAX, acpi_bus_online, 244 NULL, NULL, NULL); 245 return -EBUSY; 246 } 247 } 248 return 0; 249 } 250 251 static int acpi_scan_hot_remove(struct acpi_device *device) 252 { 253 acpi_handle handle = device->handle; 254 unsigned long long sta; 255 acpi_status status; 256 257 if (device->handler && device->handler->hotplug.demand_offline) { 258 if (!acpi_scan_is_offline(device, true)) 259 return -EBUSY; 260 } else { 261 int error = acpi_scan_try_to_offline(device); 262 if (error) 263 return error; 264 } 265 266 acpi_handle_debug(handle, "Ejecting\n"); 267 268 acpi_bus_trim(device); 269 270 acpi_evaluate_lck(handle, 0); 271 /* 272 * TBD: _EJD support. 273 */ 274 status = acpi_evaluate_ej0(handle); 275 if (status == AE_NOT_FOUND) 276 return -ENODEV; 277 else if (ACPI_FAILURE(status)) 278 return -EIO; 279 280 /* 281 * Verify if eject was indeed successful. If not, log an error 282 * message. No need to call _OST since _EJ0 call was made OK. 283 */ 284 status = acpi_evaluate_integer(handle, "_STA", NULL, &sta); 285 if (ACPI_FAILURE(status)) { 286 acpi_handle_warn(handle, 287 "Status check after eject failed (0x%x)\n", status); 288 } else if (sta & ACPI_STA_DEVICE_ENABLED) { 289 acpi_handle_warn(handle, 290 "Eject incomplete - status 0x%llx\n", sta); 291 } 292 293 return 0; 294 } 295 296 static int acpi_scan_device_not_present(struct acpi_device *adev) 297 { 298 if (!acpi_device_enumerated(adev)) { 299 dev_warn(&adev->dev, "Still not present\n"); 300 return -EALREADY; 301 } 302 acpi_bus_trim(adev); 303 return 0; 304 } 305 306 static int acpi_scan_device_check(struct acpi_device *adev) 307 { 308 int error; 309 310 acpi_bus_get_status(adev); 311 if (adev->status.present || adev->status.functional) { 312 /* 313 * This function is only called for device objects for which 314 * matching scan handlers exist. The only situation in which 315 * the scan handler is not attached to this device object yet 316 * is when the device has just appeared (either it wasn't 317 * present at all before or it was removed and then added 318 * again). 319 */ 320 if (adev->handler) { 321 dev_warn(&adev->dev, "Already enumerated\n"); 322 return -EALREADY; 323 } 324 error = acpi_bus_scan(adev->handle); 325 if (error) { 326 dev_warn(&adev->dev, "Namespace scan failure\n"); 327 return error; 328 } 329 if (!adev->handler) { 330 dev_warn(&adev->dev, "Enumeration failure\n"); 331 error = -ENODEV; 332 } 333 } else { 334 error = acpi_scan_device_not_present(adev); 335 } 336 return error; 337 } 338 339 static int acpi_scan_bus_check(struct acpi_device *adev) 340 { 341 struct acpi_scan_handler *handler = adev->handler; 342 struct acpi_device *child; 343 int error; 344 345 acpi_bus_get_status(adev); 346 if (!(adev->status.present || adev->status.functional)) { 347 acpi_scan_device_not_present(adev); 348 return 0; 349 } 350 if (handler && handler->hotplug.scan_dependent) 351 return handler->hotplug.scan_dependent(adev); 352 353 error = acpi_bus_scan(adev->handle); 354 if (error) { 355 dev_warn(&adev->dev, "Namespace scan failure\n"); 356 return error; 357 } 358 list_for_each_entry(child, &adev->children, node) { 359 error = acpi_scan_bus_check(child); 360 if (error) 361 return error; 362 } 363 return 0; 364 } 365 366 static int acpi_generic_hotplug_event(struct acpi_device *adev, u32 type) 367 { 368 switch (type) { 369 case ACPI_NOTIFY_BUS_CHECK: 370 return acpi_scan_bus_check(adev); 371 case ACPI_NOTIFY_DEVICE_CHECK: 372 return acpi_scan_device_check(adev); 373 case ACPI_NOTIFY_EJECT_REQUEST: 374 case ACPI_OST_EC_OSPM_EJECT: 375 if (adev->handler && !adev->handler->hotplug.enabled) { 376 dev_info(&adev->dev, "Eject disabled\n"); 377 return -EPERM; 378 } 379 acpi_evaluate_ost(adev->handle, ACPI_NOTIFY_EJECT_REQUEST, 380 ACPI_OST_SC_EJECT_IN_PROGRESS, NULL); 381 return acpi_scan_hot_remove(adev); 382 } 383 return -EINVAL; 384 } 385 386 void acpi_device_hotplug(struct acpi_device *adev, u32 src) 387 { 388 u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; 389 int error = -ENODEV; 390 391 lock_device_hotplug(); 392 mutex_lock(&acpi_scan_lock); 393 394 /* 395 * The device object's ACPI handle cannot become invalid as long as we 396 * are holding acpi_scan_lock, but it might have become invalid before 397 * that lock was acquired. 398 */ 399 if (adev->handle == INVALID_ACPI_HANDLE) 400 goto err_out; 401 402 if (adev->flags.is_dock_station) { 403 error = dock_notify(adev, src); 404 } else if (adev->flags.hotplug_notify) { 405 error = acpi_generic_hotplug_event(adev, src); 406 } else { 407 int (*notify)(struct acpi_device *, u32); 408 409 acpi_lock_hp_context(); 410 notify = adev->hp ? adev->hp->notify : NULL; 411 acpi_unlock_hp_context(); 412 /* 413 * There may be additional notify handlers for device objects 414 * without the .event() callback, so ignore them here. 415 */ 416 if (notify) 417 error = notify(adev, src); 418 else 419 goto out; 420 } 421 switch (error) { 422 case 0: 423 ost_code = ACPI_OST_SC_SUCCESS; 424 break; 425 case -EPERM: 426 ost_code = ACPI_OST_SC_EJECT_NOT_SUPPORTED; 427 break; 428 case -EBUSY: 429 ost_code = ACPI_OST_SC_DEVICE_BUSY; 430 break; 431 default: 432 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; 433 break; 434 } 435 436 err_out: 437 acpi_evaluate_ost(adev->handle, src, ost_code, NULL); 438 439 out: 440 acpi_bus_put_acpi_device(adev); 441 mutex_unlock(&acpi_scan_lock); 442 unlock_device_hotplug(); 443 } 444 445 static void acpi_free_power_resources_lists(struct acpi_device *device) 446 { 447 int i; 448 449 if (device->wakeup.flags.valid) 450 acpi_power_resources_list_free(&device->wakeup.resources); 451 452 if (!device->power.flags.power_resources) 453 return; 454 455 for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3_HOT; i++) { 456 struct acpi_device_power_state *ps = &device->power.states[i]; 457 acpi_power_resources_list_free(&ps->resources); 458 } 459 } 460 461 static void acpi_device_release(struct device *dev) 462 { 463 struct acpi_device *acpi_dev = to_acpi_device(dev); 464 465 acpi_free_properties(acpi_dev); 466 acpi_free_pnp_ids(&acpi_dev->pnp); 467 acpi_free_power_resources_lists(acpi_dev); 468 kfree(acpi_dev); 469 } 470 471 static void acpi_device_del(struct acpi_device *device) 472 { 473 struct acpi_device_bus_id *acpi_device_bus_id; 474 475 mutex_lock(&acpi_device_lock); 476 if (device->parent) 477 list_del(&device->node); 478 479 list_for_each_entry(acpi_device_bus_id, &acpi_bus_id_list, node) 480 if (!strcmp(acpi_device_bus_id->bus_id, 481 acpi_device_hid(device))) { 482 ida_simple_remove(&acpi_device_bus_id->instance_ida, device->pnp.instance_no); 483 if (ida_is_empty(&acpi_device_bus_id->instance_ida)) { 484 list_del(&acpi_device_bus_id->node); 485 kfree_const(acpi_device_bus_id->bus_id); 486 kfree(acpi_device_bus_id); 487 } 488 break; 489 } 490 491 list_del(&device->wakeup_list); 492 mutex_unlock(&acpi_device_lock); 493 494 acpi_power_add_remove_device(device, false); 495 acpi_device_remove_files(device); 496 if (device->remove) 497 device->remove(device); 498 499 device_del(&device->dev); 500 } 501 502 static BLOCKING_NOTIFIER_HEAD(acpi_reconfig_chain); 503 504 static LIST_HEAD(acpi_device_del_list); 505 static DEFINE_MUTEX(acpi_device_del_lock); 506 507 static void acpi_device_del_work_fn(struct work_struct *work_not_used) 508 { 509 for (;;) { 510 struct acpi_device *adev; 511 512 mutex_lock(&acpi_device_del_lock); 513 514 if (list_empty(&acpi_device_del_list)) { 515 mutex_unlock(&acpi_device_del_lock); 516 break; 517 } 518 adev = list_first_entry(&acpi_device_del_list, 519 struct acpi_device, del_list); 520 list_del(&adev->del_list); 521 522 mutex_unlock(&acpi_device_del_lock); 523 524 blocking_notifier_call_chain(&acpi_reconfig_chain, 525 ACPI_RECONFIG_DEVICE_REMOVE, adev); 526 527 acpi_device_del(adev); 528 /* 529 * Drop references to all power resources that might have been 530 * used by the device. 531 */ 532 acpi_power_transition(adev, ACPI_STATE_D3_COLD); 533 acpi_dev_put(adev); 534 } 535 } 536 537 /** 538 * acpi_scan_drop_device - Drop an ACPI device object. 539 * @handle: Handle of an ACPI namespace node, not used. 540 * @context: Address of the ACPI device object to drop. 541 * 542 * This is invoked by acpi_ns_delete_node() during the removal of the ACPI 543 * namespace node the device object pointed to by @context is attached to. 544 * 545 * The unregistration is carried out asynchronously to avoid running 546 * acpi_device_del() under the ACPICA's namespace mutex and the list is used to 547 * ensure the correct ordering (the device objects must be unregistered in the 548 * same order in which the corresponding namespace nodes are deleted). 549 */ 550 static void acpi_scan_drop_device(acpi_handle handle, void *context) 551 { 552 static DECLARE_WORK(work, acpi_device_del_work_fn); 553 struct acpi_device *adev = context; 554 555 mutex_lock(&acpi_device_del_lock); 556 557 /* 558 * Use the ACPI hotplug workqueue which is ordered, so this work item 559 * won't run after any hotplug work items submitted subsequently. That 560 * prevents attempts to register device objects identical to those being 561 * deleted from happening concurrently (such attempts result from 562 * hotplug events handled via the ACPI hotplug workqueue). It also will 563 * run after all of the work items submitted previously, which helps 564 * those work items to ensure that they are not accessing stale device 565 * objects. 566 */ 567 if (list_empty(&acpi_device_del_list)) 568 acpi_queue_hotplug_work(&work); 569 570 list_add_tail(&adev->del_list, &acpi_device_del_list); 571 /* Make acpi_ns_validate_handle() return NULL for this handle. */ 572 adev->handle = INVALID_ACPI_HANDLE; 573 574 mutex_unlock(&acpi_device_del_lock); 575 } 576 577 static struct acpi_device *handle_to_device(acpi_handle handle, 578 void (*callback)(void *)) 579 { 580 struct acpi_device *adev = NULL; 581 acpi_status status; 582 583 status = acpi_get_data_full(handle, acpi_scan_drop_device, 584 (void **)&adev, callback); 585 if (ACPI_FAILURE(status) || !adev) { 586 acpi_handle_debug(handle, "No context!\n"); 587 return NULL; 588 } 589 return adev; 590 } 591 592 int acpi_bus_get_device(acpi_handle handle, struct acpi_device **device) 593 { 594 if (!device) 595 return -EINVAL; 596 597 *device = handle_to_device(handle, NULL); 598 if (!*device) 599 return -ENODEV; 600 601 return 0; 602 } 603 EXPORT_SYMBOL(acpi_bus_get_device); 604 605 static void get_acpi_device(void *dev) 606 { 607 acpi_dev_get(dev); 608 } 609 610 struct acpi_device *acpi_bus_get_acpi_device(acpi_handle handle) 611 { 612 return handle_to_device(handle, get_acpi_device); 613 } 614 615 void acpi_bus_put_acpi_device(struct acpi_device *adev) 616 { 617 acpi_dev_put(adev); 618 } 619 620 static struct acpi_device_bus_id *acpi_device_bus_id_match(const char *dev_id) 621 { 622 struct acpi_device_bus_id *acpi_device_bus_id; 623 624 /* Find suitable bus_id and instance number in acpi_bus_id_list. */ 625 list_for_each_entry(acpi_device_bus_id, &acpi_bus_id_list, node) { 626 if (!strcmp(acpi_device_bus_id->bus_id, dev_id)) 627 return acpi_device_bus_id; 628 } 629 return NULL; 630 } 631 632 static int acpi_device_set_name(struct acpi_device *device, 633 struct acpi_device_bus_id *acpi_device_bus_id) 634 { 635 struct ida *instance_ida = &acpi_device_bus_id->instance_ida; 636 int result; 637 638 result = ida_simple_get(instance_ida, 0, ACPI_MAX_DEVICE_INSTANCES, GFP_KERNEL); 639 if (result < 0) 640 return result; 641 642 device->pnp.instance_no = result; 643 dev_set_name(&device->dev, "%s:%02x", acpi_device_bus_id->bus_id, result); 644 return 0; 645 } 646 647 int acpi_device_add(struct acpi_device *device, 648 void (*release)(struct device *)) 649 { 650 struct acpi_device_bus_id *acpi_device_bus_id; 651 int result; 652 653 if (device->handle) { 654 acpi_status status; 655 656 status = acpi_attach_data(device->handle, acpi_scan_drop_device, 657 device); 658 if (ACPI_FAILURE(status)) { 659 acpi_handle_err(device->handle, 660 "Unable to attach device data\n"); 661 return -ENODEV; 662 } 663 } 664 665 /* 666 * Linkage 667 * ------- 668 * Link this device to its parent and siblings. 669 */ 670 INIT_LIST_HEAD(&device->children); 671 INIT_LIST_HEAD(&device->node); 672 INIT_LIST_HEAD(&device->wakeup_list); 673 INIT_LIST_HEAD(&device->physical_node_list); 674 INIT_LIST_HEAD(&device->del_list); 675 mutex_init(&device->physical_node_lock); 676 677 mutex_lock(&acpi_device_lock); 678 679 acpi_device_bus_id = acpi_device_bus_id_match(acpi_device_hid(device)); 680 if (acpi_device_bus_id) { 681 result = acpi_device_set_name(device, acpi_device_bus_id); 682 if (result) 683 goto err_unlock; 684 } else { 685 acpi_device_bus_id = kzalloc(sizeof(*acpi_device_bus_id), 686 GFP_KERNEL); 687 if (!acpi_device_bus_id) { 688 result = -ENOMEM; 689 goto err_unlock; 690 } 691 acpi_device_bus_id->bus_id = 692 kstrdup_const(acpi_device_hid(device), GFP_KERNEL); 693 if (!acpi_device_bus_id->bus_id) { 694 kfree(acpi_device_bus_id); 695 result = -ENOMEM; 696 goto err_unlock; 697 } 698 699 ida_init(&acpi_device_bus_id->instance_ida); 700 701 result = acpi_device_set_name(device, acpi_device_bus_id); 702 if (result) { 703 kfree_const(acpi_device_bus_id->bus_id); 704 kfree(acpi_device_bus_id); 705 goto err_unlock; 706 } 707 708 list_add_tail(&acpi_device_bus_id->node, &acpi_bus_id_list); 709 } 710 711 if (device->parent) 712 list_add_tail(&device->node, &device->parent->children); 713 714 if (device->wakeup.flags.valid) 715 list_add_tail(&device->wakeup_list, &acpi_wakeup_device_list); 716 717 mutex_unlock(&acpi_device_lock); 718 719 if (device->parent) 720 device->dev.parent = &device->parent->dev; 721 722 device->dev.bus = &acpi_bus_type; 723 device->dev.release = release; 724 result = device_add(&device->dev); 725 if (result) { 726 dev_err(&device->dev, "Error registering device\n"); 727 goto err; 728 } 729 730 result = acpi_device_setup_files(device); 731 if (result) 732 printk(KERN_ERR PREFIX "Error creating sysfs interface for device %s\n", 733 dev_name(&device->dev)); 734 735 return 0; 736 737 err: 738 mutex_lock(&acpi_device_lock); 739 740 if (device->parent) 741 list_del(&device->node); 742 743 list_del(&device->wakeup_list); 744 745 err_unlock: 746 mutex_unlock(&acpi_device_lock); 747 748 acpi_detach_data(device->handle, acpi_scan_drop_device); 749 750 return result; 751 } 752 753 /* -------------------------------------------------------------------------- 754 Device Enumeration 755 -------------------------------------------------------------------------- */ 756 static bool acpi_info_matches_ids(struct acpi_device_info *info, 757 const char * const ids[]) 758 { 759 struct acpi_pnp_device_id_list *cid_list = NULL; 760 int i, index; 761 762 if (!(info->valid & ACPI_VALID_HID)) 763 return false; 764 765 index = match_string(ids, -1, info->hardware_id.string); 766 if (index >= 0) 767 return true; 768 769 if (info->valid & ACPI_VALID_CID) 770 cid_list = &info->compatible_id_list; 771 772 if (!cid_list) 773 return false; 774 775 for (i = 0; i < cid_list->count; i++) { 776 index = match_string(ids, -1, cid_list->ids[i].string); 777 if (index >= 0) 778 return true; 779 } 780 781 return false; 782 } 783 784 /* List of HIDs for which we ignore matching ACPI devices, when checking _DEP lists. */ 785 static const char * const acpi_ignore_dep_ids[] = { 786 "PNP0D80", /* Windows-compatible System Power Management Controller */ 787 "INT33BD", /* Intel Baytrail Mailbox Device */ 788 NULL 789 }; 790 791 static struct acpi_device *acpi_bus_get_parent(acpi_handle handle) 792 { 793 struct acpi_device *device = NULL; 794 acpi_status status; 795 796 /* 797 * Fixed hardware devices do not appear in the namespace and do not 798 * have handles, but we fabricate acpi_devices for them, so we have 799 * to deal with them specially. 800 */ 801 if (!handle) 802 return acpi_root; 803 804 do { 805 status = acpi_get_parent(handle, &handle); 806 if (ACPI_FAILURE(status)) 807 return status == AE_NULL_ENTRY ? NULL : acpi_root; 808 } while (acpi_bus_get_device(handle, &device)); 809 return device; 810 } 811 812 acpi_status 813 acpi_bus_get_ejd(acpi_handle handle, acpi_handle *ejd) 814 { 815 acpi_status status; 816 acpi_handle tmp; 817 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; 818 union acpi_object *obj; 819 820 status = acpi_get_handle(handle, "_EJD", &tmp); 821 if (ACPI_FAILURE(status)) 822 return status; 823 824 status = acpi_evaluate_object(handle, "_EJD", NULL, &buffer); 825 if (ACPI_SUCCESS(status)) { 826 obj = buffer.pointer; 827 status = acpi_get_handle(ACPI_ROOT_OBJECT, obj->string.pointer, 828 ejd); 829 kfree(buffer.pointer); 830 } 831 return status; 832 } 833 EXPORT_SYMBOL_GPL(acpi_bus_get_ejd); 834 835 static int acpi_bus_extract_wakeup_device_power_package(struct acpi_device *dev) 836 { 837 acpi_handle handle = dev->handle; 838 struct acpi_device_wakeup *wakeup = &dev->wakeup; 839 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 840 union acpi_object *package = NULL; 841 union acpi_object *element = NULL; 842 acpi_status status; 843 int err = -ENODATA; 844 845 INIT_LIST_HEAD(&wakeup->resources); 846 847 /* _PRW */ 848 status = acpi_evaluate_object(handle, "_PRW", NULL, &buffer); 849 if (ACPI_FAILURE(status)) { 850 acpi_handle_info(handle, "_PRW evaluation failed: %s\n", 851 acpi_format_exception(status)); 852 return err; 853 } 854 855 package = (union acpi_object *)buffer.pointer; 856 857 if (!package || package->package.count < 2) 858 goto out; 859 860 element = &(package->package.elements[0]); 861 if (!element) 862 goto out; 863 864 if (element->type == ACPI_TYPE_PACKAGE) { 865 if ((element->package.count < 2) || 866 (element->package.elements[0].type != 867 ACPI_TYPE_LOCAL_REFERENCE) 868 || (element->package.elements[1].type != ACPI_TYPE_INTEGER)) 869 goto out; 870 871 wakeup->gpe_device = 872 element->package.elements[0].reference.handle; 873 wakeup->gpe_number = 874 (u32) element->package.elements[1].integer.value; 875 } else if (element->type == ACPI_TYPE_INTEGER) { 876 wakeup->gpe_device = NULL; 877 wakeup->gpe_number = element->integer.value; 878 } else { 879 goto out; 880 } 881 882 element = &(package->package.elements[1]); 883 if (element->type != ACPI_TYPE_INTEGER) 884 goto out; 885 886 wakeup->sleep_state = element->integer.value; 887 888 err = acpi_extract_power_resources(package, 2, &wakeup->resources); 889 if (err) 890 goto out; 891 892 if (!list_empty(&wakeup->resources)) { 893 int sleep_state; 894 895 err = acpi_power_wakeup_list_init(&wakeup->resources, 896 &sleep_state); 897 if (err) { 898 acpi_handle_warn(handle, "Retrieving current states " 899 "of wakeup power resources failed\n"); 900 acpi_power_resources_list_free(&wakeup->resources); 901 goto out; 902 } 903 if (sleep_state < wakeup->sleep_state) { 904 acpi_handle_warn(handle, "Overriding _PRW sleep state " 905 "(S%d) by S%d from power resources\n", 906 (int)wakeup->sleep_state, sleep_state); 907 wakeup->sleep_state = sleep_state; 908 } 909 } 910 911 out: 912 kfree(buffer.pointer); 913 return err; 914 } 915 916 static bool acpi_wakeup_gpe_init(struct acpi_device *device) 917 { 918 static const struct acpi_device_id button_device_ids[] = { 919 {"PNP0C0C", 0}, /* Power button */ 920 {"PNP0C0D", 0}, /* Lid */ 921 {"PNP0C0E", 0}, /* Sleep button */ 922 {"", 0}, 923 }; 924 struct acpi_device_wakeup *wakeup = &device->wakeup; 925 acpi_status status; 926 927 wakeup->flags.notifier_present = 0; 928 929 /* Power button, Lid switch always enable wakeup */ 930 if (!acpi_match_device_ids(device, button_device_ids)) { 931 if (!acpi_match_device_ids(device, &button_device_ids[1])) { 932 /* Do not use Lid/sleep button for S5 wakeup */ 933 if (wakeup->sleep_state == ACPI_STATE_S5) 934 wakeup->sleep_state = ACPI_STATE_S4; 935 } 936 acpi_mark_gpe_for_wake(wakeup->gpe_device, wakeup->gpe_number); 937 device_set_wakeup_capable(&device->dev, true); 938 return true; 939 } 940 941 status = acpi_setup_gpe_for_wake(device->handle, wakeup->gpe_device, 942 wakeup->gpe_number); 943 return ACPI_SUCCESS(status); 944 } 945 946 static void acpi_bus_get_wakeup_device_flags(struct acpi_device *device) 947 { 948 int err; 949 950 /* Presence of _PRW indicates wake capable */ 951 if (!acpi_has_method(device->handle, "_PRW")) 952 return; 953 954 err = acpi_bus_extract_wakeup_device_power_package(device); 955 if (err) { 956 dev_err(&device->dev, "Unable to extract wakeup power resources"); 957 return; 958 } 959 960 device->wakeup.flags.valid = acpi_wakeup_gpe_init(device); 961 device->wakeup.prepare_count = 0; 962 /* 963 * Call _PSW/_DSW object to disable its ability to wake the sleeping 964 * system for the ACPI device with the _PRW object. 965 * The _PSW object is deprecated in ACPI 3.0 and is replaced by _DSW. 966 * So it is necessary to call _DSW object first. Only when it is not 967 * present will the _PSW object used. 968 */ 969 err = acpi_device_sleep_wake(device, 0, 0, 0); 970 if (err) 971 pr_debug("error in _DSW or _PSW evaluation\n"); 972 } 973 974 static void acpi_bus_init_power_state(struct acpi_device *device, int state) 975 { 976 struct acpi_device_power_state *ps = &device->power.states[state]; 977 char pathname[5] = { '_', 'P', 'R', '0' + state, '\0' }; 978 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 979 acpi_status status; 980 981 INIT_LIST_HEAD(&ps->resources); 982 983 /* Evaluate "_PRx" to get referenced power resources */ 984 status = acpi_evaluate_object(device->handle, pathname, NULL, &buffer); 985 if (ACPI_SUCCESS(status)) { 986 union acpi_object *package = buffer.pointer; 987 988 if (buffer.length && package 989 && package->type == ACPI_TYPE_PACKAGE 990 && package->package.count) 991 acpi_extract_power_resources(package, 0, &ps->resources); 992 993 ACPI_FREE(buffer.pointer); 994 } 995 996 /* Evaluate "_PSx" to see if we can do explicit sets */ 997 pathname[2] = 'S'; 998 if (acpi_has_method(device->handle, pathname)) 999 ps->flags.explicit_set = 1; 1000 1001 /* State is valid if there are means to put the device into it. */ 1002 if (!list_empty(&ps->resources) || ps->flags.explicit_set) 1003 ps->flags.valid = 1; 1004 1005 ps->power = -1; /* Unknown - driver assigned */ 1006 ps->latency = -1; /* Unknown - driver assigned */ 1007 } 1008 1009 static void acpi_bus_get_power_flags(struct acpi_device *device) 1010 { 1011 u32 i; 1012 1013 /* Presence of _PS0|_PR0 indicates 'power manageable' */ 1014 if (!acpi_has_method(device->handle, "_PS0") && 1015 !acpi_has_method(device->handle, "_PR0")) 1016 return; 1017 1018 device->flags.power_manageable = 1; 1019 1020 /* 1021 * Power Management Flags 1022 */ 1023 if (acpi_has_method(device->handle, "_PSC")) 1024 device->power.flags.explicit_get = 1; 1025 1026 if (acpi_has_method(device->handle, "_IRC")) 1027 device->power.flags.inrush_current = 1; 1028 1029 if (acpi_has_method(device->handle, "_DSW")) 1030 device->power.flags.dsw_present = 1; 1031 1032 /* 1033 * Enumerate supported power management states 1034 */ 1035 for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3_HOT; i++) 1036 acpi_bus_init_power_state(device, i); 1037 1038 INIT_LIST_HEAD(&device->power.states[ACPI_STATE_D3_COLD].resources); 1039 1040 /* Set the defaults for D0 and D3hot (always supported). */ 1041 device->power.states[ACPI_STATE_D0].flags.valid = 1; 1042 device->power.states[ACPI_STATE_D0].power = 100; 1043 device->power.states[ACPI_STATE_D3_HOT].flags.valid = 1; 1044 1045 /* 1046 * Use power resources only if the D0 list of them is populated, because 1047 * some platforms may provide _PR3 only to indicate D3cold support and 1048 * in those cases the power resources list returned by it may be bogus. 1049 */ 1050 if (!list_empty(&device->power.states[ACPI_STATE_D0].resources)) { 1051 device->power.flags.power_resources = 1; 1052 /* 1053 * D3cold is supported if the D3hot list of power resources is 1054 * not empty. 1055 */ 1056 if (!list_empty(&device->power.states[ACPI_STATE_D3_HOT].resources)) 1057 device->power.states[ACPI_STATE_D3_COLD].flags.valid = 1; 1058 } 1059 1060 if (acpi_bus_init_power(device)) 1061 device->flags.power_manageable = 0; 1062 } 1063 1064 static void acpi_bus_get_flags(struct acpi_device *device) 1065 { 1066 /* Presence of _STA indicates 'dynamic_status' */ 1067 if (acpi_has_method(device->handle, "_STA")) 1068 device->flags.dynamic_status = 1; 1069 1070 /* Presence of _RMV indicates 'removable' */ 1071 if (acpi_has_method(device->handle, "_RMV")) 1072 device->flags.removable = 1; 1073 1074 /* Presence of _EJD|_EJ0 indicates 'ejectable' */ 1075 if (acpi_has_method(device->handle, "_EJD") || 1076 acpi_has_method(device->handle, "_EJ0")) 1077 device->flags.ejectable = 1; 1078 } 1079 1080 static void acpi_device_get_busid(struct acpi_device *device) 1081 { 1082 char bus_id[5] = { '?', 0 }; 1083 struct acpi_buffer buffer = { sizeof(bus_id), bus_id }; 1084 int i = 0; 1085 1086 /* 1087 * Bus ID 1088 * ------ 1089 * The device's Bus ID is simply the object name. 1090 * TBD: Shouldn't this value be unique (within the ACPI namespace)? 1091 */ 1092 if (ACPI_IS_ROOT_DEVICE(device)) { 1093 strcpy(device->pnp.bus_id, "ACPI"); 1094 return; 1095 } 1096 1097 switch (device->device_type) { 1098 case ACPI_BUS_TYPE_POWER_BUTTON: 1099 strcpy(device->pnp.bus_id, "PWRF"); 1100 break; 1101 case ACPI_BUS_TYPE_SLEEP_BUTTON: 1102 strcpy(device->pnp.bus_id, "SLPF"); 1103 break; 1104 case ACPI_BUS_TYPE_ECDT_EC: 1105 strcpy(device->pnp.bus_id, "ECDT"); 1106 break; 1107 default: 1108 acpi_get_name(device->handle, ACPI_SINGLE_NAME, &buffer); 1109 /* Clean up trailing underscores (if any) */ 1110 for (i = 3; i > 1; i--) { 1111 if (bus_id[i] == '_') 1112 bus_id[i] = '\0'; 1113 else 1114 break; 1115 } 1116 strcpy(device->pnp.bus_id, bus_id); 1117 break; 1118 } 1119 } 1120 1121 /* 1122 * acpi_ata_match - see if an acpi object is an ATA device 1123 * 1124 * If an acpi object has one of the ACPI ATA methods defined, 1125 * then we can safely call it an ATA device. 1126 */ 1127 bool acpi_ata_match(acpi_handle handle) 1128 { 1129 return acpi_has_method(handle, "_GTF") || 1130 acpi_has_method(handle, "_GTM") || 1131 acpi_has_method(handle, "_STM") || 1132 acpi_has_method(handle, "_SDD"); 1133 } 1134 1135 /* 1136 * acpi_bay_match - see if an acpi object is an ejectable driver bay 1137 * 1138 * If an acpi object is ejectable and has one of the ACPI ATA methods defined, 1139 * then we can safely call it an ejectable drive bay 1140 */ 1141 bool acpi_bay_match(acpi_handle handle) 1142 { 1143 acpi_handle phandle; 1144 1145 if (!acpi_has_method(handle, "_EJ0")) 1146 return false; 1147 if (acpi_ata_match(handle)) 1148 return true; 1149 if (ACPI_FAILURE(acpi_get_parent(handle, &phandle))) 1150 return false; 1151 1152 return acpi_ata_match(phandle); 1153 } 1154 1155 bool acpi_device_is_battery(struct acpi_device *adev) 1156 { 1157 struct acpi_hardware_id *hwid; 1158 1159 list_for_each_entry(hwid, &adev->pnp.ids, list) 1160 if (!strcmp("PNP0C0A", hwid->id)) 1161 return true; 1162 1163 return false; 1164 } 1165 1166 static bool is_ejectable_bay(struct acpi_device *adev) 1167 { 1168 acpi_handle handle = adev->handle; 1169 1170 if (acpi_has_method(handle, "_EJ0") && acpi_device_is_battery(adev)) 1171 return true; 1172 1173 return acpi_bay_match(handle); 1174 } 1175 1176 /* 1177 * acpi_dock_match - see if an acpi object has a _DCK method 1178 */ 1179 bool acpi_dock_match(acpi_handle handle) 1180 { 1181 return acpi_has_method(handle, "_DCK"); 1182 } 1183 1184 static acpi_status 1185 acpi_backlight_cap_match(acpi_handle handle, u32 level, void *context, 1186 void **return_value) 1187 { 1188 long *cap = context; 1189 1190 if (acpi_has_method(handle, "_BCM") && 1191 acpi_has_method(handle, "_BCL")) { 1192 acpi_handle_debug(handle, "Found generic backlight support\n"); 1193 *cap |= ACPI_VIDEO_BACKLIGHT; 1194 /* We have backlight support, no need to scan further */ 1195 return AE_CTRL_TERMINATE; 1196 } 1197 return 0; 1198 } 1199 1200 /* Returns true if the ACPI object is a video device which can be 1201 * handled by video.ko. 1202 * The device will get a Linux specific CID added in scan.c to 1203 * identify the device as an ACPI graphics device 1204 * Be aware that the graphics device may not be physically present 1205 * Use acpi_video_get_capabilities() to detect general ACPI video 1206 * capabilities of present cards 1207 */ 1208 long acpi_is_video_device(acpi_handle handle) 1209 { 1210 long video_caps = 0; 1211 1212 /* Is this device able to support video switching ? */ 1213 if (acpi_has_method(handle, "_DOD") || acpi_has_method(handle, "_DOS")) 1214 video_caps |= ACPI_VIDEO_OUTPUT_SWITCHING; 1215 1216 /* Is this device able to retrieve a video ROM ? */ 1217 if (acpi_has_method(handle, "_ROM")) 1218 video_caps |= ACPI_VIDEO_ROM_AVAILABLE; 1219 1220 /* Is this device able to configure which video head to be POSTed ? */ 1221 if (acpi_has_method(handle, "_VPO") && 1222 acpi_has_method(handle, "_GPD") && 1223 acpi_has_method(handle, "_SPD")) 1224 video_caps |= ACPI_VIDEO_DEVICE_POSTING; 1225 1226 /* Only check for backlight functionality if one of the above hit. */ 1227 if (video_caps) 1228 acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1229 ACPI_UINT32_MAX, acpi_backlight_cap_match, NULL, 1230 &video_caps, NULL); 1231 1232 return video_caps; 1233 } 1234 EXPORT_SYMBOL(acpi_is_video_device); 1235 1236 const char *acpi_device_hid(struct acpi_device *device) 1237 { 1238 struct acpi_hardware_id *hid; 1239 1240 if (list_empty(&device->pnp.ids)) 1241 return dummy_hid; 1242 1243 hid = list_first_entry(&device->pnp.ids, struct acpi_hardware_id, list); 1244 return hid->id; 1245 } 1246 EXPORT_SYMBOL(acpi_device_hid); 1247 1248 static void acpi_add_id(struct acpi_device_pnp *pnp, const char *dev_id) 1249 { 1250 struct acpi_hardware_id *id; 1251 1252 id = kmalloc(sizeof(*id), GFP_KERNEL); 1253 if (!id) 1254 return; 1255 1256 id->id = kstrdup_const(dev_id, GFP_KERNEL); 1257 if (!id->id) { 1258 kfree(id); 1259 return; 1260 } 1261 1262 list_add_tail(&id->list, &pnp->ids); 1263 pnp->type.hardware_id = 1; 1264 } 1265 1266 /* 1267 * Old IBM workstations have a DSDT bug wherein the SMBus object 1268 * lacks the SMBUS01 HID and the methods do not have the necessary "_" 1269 * prefix. Work around this. 1270 */ 1271 static bool acpi_ibm_smbus_match(acpi_handle handle) 1272 { 1273 char node_name[ACPI_PATH_SEGMENT_LENGTH]; 1274 struct acpi_buffer path = { sizeof(node_name), node_name }; 1275 1276 if (!dmi_name_in_vendors("IBM")) 1277 return false; 1278 1279 /* Look for SMBS object */ 1280 if (ACPI_FAILURE(acpi_get_name(handle, ACPI_SINGLE_NAME, &path)) || 1281 strcmp("SMBS", path.pointer)) 1282 return false; 1283 1284 /* Does it have the necessary (but misnamed) methods? */ 1285 if (acpi_has_method(handle, "SBI") && 1286 acpi_has_method(handle, "SBR") && 1287 acpi_has_method(handle, "SBW")) 1288 return true; 1289 1290 return false; 1291 } 1292 1293 static bool acpi_object_is_system_bus(acpi_handle handle) 1294 { 1295 acpi_handle tmp; 1296 1297 if (ACPI_SUCCESS(acpi_get_handle(NULL, "\\_SB", &tmp)) && 1298 tmp == handle) 1299 return true; 1300 if (ACPI_SUCCESS(acpi_get_handle(NULL, "\\_TZ", &tmp)) && 1301 tmp == handle) 1302 return true; 1303 1304 return false; 1305 } 1306 1307 static void acpi_set_pnp_ids(acpi_handle handle, struct acpi_device_pnp *pnp, 1308 int device_type) 1309 { 1310 struct acpi_device_info *info = NULL; 1311 struct acpi_pnp_device_id_list *cid_list; 1312 int i; 1313 1314 switch (device_type) { 1315 case ACPI_BUS_TYPE_DEVICE: 1316 if (handle == ACPI_ROOT_OBJECT) { 1317 acpi_add_id(pnp, ACPI_SYSTEM_HID); 1318 break; 1319 } 1320 1321 acpi_get_object_info(handle, &info); 1322 if (!info) { 1323 pr_err(PREFIX "%s: Error reading device info\n", 1324 __func__); 1325 return; 1326 } 1327 1328 if (info->valid & ACPI_VALID_HID) { 1329 acpi_add_id(pnp, info->hardware_id.string); 1330 pnp->type.platform_id = 1; 1331 } 1332 if (info->valid & ACPI_VALID_CID) { 1333 cid_list = &info->compatible_id_list; 1334 for (i = 0; i < cid_list->count; i++) 1335 acpi_add_id(pnp, cid_list->ids[i].string); 1336 } 1337 if (info->valid & ACPI_VALID_ADR) { 1338 pnp->bus_address = info->address; 1339 pnp->type.bus_address = 1; 1340 } 1341 if (info->valid & ACPI_VALID_UID) 1342 pnp->unique_id = kstrdup(info->unique_id.string, 1343 GFP_KERNEL); 1344 if (info->valid & ACPI_VALID_CLS) 1345 acpi_add_id(pnp, info->class_code.string); 1346 1347 kfree(info); 1348 1349 /* 1350 * Some devices don't reliably have _HIDs & _CIDs, so add 1351 * synthetic HIDs to make sure drivers can find them. 1352 */ 1353 if (acpi_is_video_device(handle)) 1354 acpi_add_id(pnp, ACPI_VIDEO_HID); 1355 else if (acpi_bay_match(handle)) 1356 acpi_add_id(pnp, ACPI_BAY_HID); 1357 else if (acpi_dock_match(handle)) 1358 acpi_add_id(pnp, ACPI_DOCK_HID); 1359 else if (acpi_ibm_smbus_match(handle)) 1360 acpi_add_id(pnp, ACPI_SMBUS_IBM_HID); 1361 else if (list_empty(&pnp->ids) && 1362 acpi_object_is_system_bus(handle)) { 1363 /* \_SB, \_TZ, LNXSYBUS */ 1364 acpi_add_id(pnp, ACPI_BUS_HID); 1365 strcpy(pnp->device_name, ACPI_BUS_DEVICE_NAME); 1366 strcpy(pnp->device_class, ACPI_BUS_CLASS); 1367 } 1368 1369 break; 1370 case ACPI_BUS_TYPE_POWER: 1371 acpi_add_id(pnp, ACPI_POWER_HID); 1372 break; 1373 case ACPI_BUS_TYPE_PROCESSOR: 1374 acpi_add_id(pnp, ACPI_PROCESSOR_OBJECT_HID); 1375 break; 1376 case ACPI_BUS_TYPE_THERMAL: 1377 acpi_add_id(pnp, ACPI_THERMAL_HID); 1378 break; 1379 case ACPI_BUS_TYPE_POWER_BUTTON: 1380 acpi_add_id(pnp, ACPI_BUTTON_HID_POWERF); 1381 break; 1382 case ACPI_BUS_TYPE_SLEEP_BUTTON: 1383 acpi_add_id(pnp, ACPI_BUTTON_HID_SLEEPF); 1384 break; 1385 case ACPI_BUS_TYPE_ECDT_EC: 1386 acpi_add_id(pnp, ACPI_ECDT_HID); 1387 break; 1388 } 1389 } 1390 1391 void acpi_free_pnp_ids(struct acpi_device_pnp *pnp) 1392 { 1393 struct acpi_hardware_id *id, *tmp; 1394 1395 list_for_each_entry_safe(id, tmp, &pnp->ids, list) { 1396 kfree_const(id->id); 1397 kfree(id); 1398 } 1399 kfree(pnp->unique_id); 1400 } 1401 1402 /** 1403 * acpi_dma_supported - Check DMA support for the specified device. 1404 * @adev: The pointer to acpi device 1405 * 1406 * Return false if DMA is not supported. Otherwise, return true 1407 */ 1408 bool acpi_dma_supported(struct acpi_device *adev) 1409 { 1410 if (!adev) 1411 return false; 1412 1413 if (adev->flags.cca_seen) 1414 return true; 1415 1416 /* 1417 * Per ACPI 6.0 sec 6.2.17, assume devices can do cache-coherent 1418 * DMA on "Intel platforms". Presumably that includes all x86 and 1419 * ia64, and other arches will set CONFIG_ACPI_CCA_REQUIRED=y. 1420 */ 1421 if (!IS_ENABLED(CONFIG_ACPI_CCA_REQUIRED)) 1422 return true; 1423 1424 return false; 1425 } 1426 1427 /** 1428 * acpi_get_dma_attr - Check the supported DMA attr for the specified device. 1429 * @adev: The pointer to acpi device 1430 * 1431 * Return enum dev_dma_attr. 1432 */ 1433 enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev) 1434 { 1435 if (!acpi_dma_supported(adev)) 1436 return DEV_DMA_NOT_SUPPORTED; 1437 1438 if (adev->flags.coherent_dma) 1439 return DEV_DMA_COHERENT; 1440 else 1441 return DEV_DMA_NON_COHERENT; 1442 } 1443 1444 /** 1445 * acpi_dma_get_range() - Get device DMA parameters. 1446 * 1447 * @dev: device to configure 1448 * @dma_addr: pointer device DMA address result 1449 * @offset: pointer to the DMA offset result 1450 * @size: pointer to DMA range size result 1451 * 1452 * Evaluate DMA regions and return respectively DMA region start, offset 1453 * and size in dma_addr, offset and size on parsing success; it does not 1454 * update the passed in values on failure. 1455 * 1456 * Return 0 on success, < 0 on failure. 1457 */ 1458 int acpi_dma_get_range(struct device *dev, u64 *dma_addr, u64 *offset, 1459 u64 *size) 1460 { 1461 struct acpi_device *adev; 1462 LIST_HEAD(list); 1463 struct resource_entry *rentry; 1464 int ret; 1465 struct device *dma_dev = dev; 1466 u64 len, dma_start = U64_MAX, dma_end = 0, dma_offset = 0; 1467 1468 /* 1469 * Walk the device tree chasing an ACPI companion with a _DMA 1470 * object while we go. Stop if we find a device with an ACPI 1471 * companion containing a _DMA method. 1472 */ 1473 do { 1474 adev = ACPI_COMPANION(dma_dev); 1475 if (adev && acpi_has_method(adev->handle, METHOD_NAME__DMA)) 1476 break; 1477 1478 dma_dev = dma_dev->parent; 1479 } while (dma_dev); 1480 1481 if (!dma_dev) 1482 return -ENODEV; 1483 1484 if (!acpi_has_method(adev->handle, METHOD_NAME__CRS)) { 1485 acpi_handle_warn(adev->handle, "_DMA is valid only if _CRS is present\n"); 1486 return -EINVAL; 1487 } 1488 1489 ret = acpi_dev_get_dma_resources(adev, &list); 1490 if (ret > 0) { 1491 list_for_each_entry(rentry, &list, node) { 1492 if (dma_offset && rentry->offset != dma_offset) { 1493 ret = -EINVAL; 1494 dev_warn(dma_dev, "Can't handle multiple windows with different offsets\n"); 1495 goto out; 1496 } 1497 dma_offset = rentry->offset; 1498 1499 /* Take lower and upper limits */ 1500 if (rentry->res->start < dma_start) 1501 dma_start = rentry->res->start; 1502 if (rentry->res->end > dma_end) 1503 dma_end = rentry->res->end; 1504 } 1505 1506 if (dma_start >= dma_end) { 1507 ret = -EINVAL; 1508 dev_dbg(dma_dev, "Invalid DMA regions configuration\n"); 1509 goto out; 1510 } 1511 1512 *dma_addr = dma_start - dma_offset; 1513 len = dma_end - dma_start; 1514 *size = max(len, len + 1); 1515 *offset = dma_offset; 1516 } 1517 out: 1518 acpi_dev_free_resource_list(&list); 1519 1520 return ret >= 0 ? 0 : ret; 1521 } 1522 1523 /** 1524 * acpi_dma_configure_id - Set-up DMA configuration for the device. 1525 * @dev: The pointer to the device 1526 * @attr: device dma attributes 1527 * @input_id: input device id const value pointer 1528 */ 1529 int acpi_dma_configure_id(struct device *dev, enum dev_dma_attr attr, 1530 const u32 *input_id) 1531 { 1532 const struct iommu_ops *iommu; 1533 u64 dma_addr = 0, size = 0; 1534 1535 if (attr == DEV_DMA_NOT_SUPPORTED) { 1536 set_dma_ops(dev, &dma_dummy_ops); 1537 return 0; 1538 } 1539 1540 iort_dma_setup(dev, &dma_addr, &size); 1541 1542 iommu = iort_iommu_configure_id(dev, input_id); 1543 if (PTR_ERR(iommu) == -EPROBE_DEFER) 1544 return -EPROBE_DEFER; 1545 1546 arch_setup_dma_ops(dev, dma_addr, size, 1547 iommu, attr == DEV_DMA_COHERENT); 1548 1549 return 0; 1550 } 1551 EXPORT_SYMBOL_GPL(acpi_dma_configure_id); 1552 1553 static void acpi_init_coherency(struct acpi_device *adev) 1554 { 1555 unsigned long long cca = 0; 1556 acpi_status status; 1557 struct acpi_device *parent = adev->parent; 1558 1559 if (parent && parent->flags.cca_seen) { 1560 /* 1561 * From ACPI spec, OSPM will ignore _CCA if an ancestor 1562 * already saw one. 1563 */ 1564 adev->flags.cca_seen = 1; 1565 cca = parent->flags.coherent_dma; 1566 } else { 1567 status = acpi_evaluate_integer(adev->handle, "_CCA", 1568 NULL, &cca); 1569 if (ACPI_SUCCESS(status)) 1570 adev->flags.cca_seen = 1; 1571 else if (!IS_ENABLED(CONFIG_ACPI_CCA_REQUIRED)) 1572 /* 1573 * If architecture does not specify that _CCA is 1574 * required for DMA-able devices (e.g. x86), 1575 * we default to _CCA=1. 1576 */ 1577 cca = 1; 1578 else 1579 acpi_handle_debug(adev->handle, 1580 "ACPI device is missing _CCA.\n"); 1581 } 1582 1583 adev->flags.coherent_dma = cca; 1584 } 1585 1586 static int acpi_check_serial_bus_slave(struct acpi_resource *ares, void *data) 1587 { 1588 bool *is_serial_bus_slave_p = data; 1589 1590 if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS) 1591 return 1; 1592 1593 *is_serial_bus_slave_p = true; 1594 1595 /* no need to do more checking */ 1596 return -1; 1597 } 1598 1599 static bool acpi_is_indirect_io_slave(struct acpi_device *device) 1600 { 1601 struct acpi_device *parent = device->parent; 1602 static const struct acpi_device_id indirect_io_hosts[] = { 1603 {"HISI0191", 0}, 1604 {} 1605 }; 1606 1607 return parent && !acpi_match_device_ids(parent, indirect_io_hosts); 1608 } 1609 1610 static bool acpi_device_enumeration_by_parent(struct acpi_device *device) 1611 { 1612 struct list_head resource_list; 1613 bool is_serial_bus_slave = false; 1614 /* 1615 * These devices have multiple I2cSerialBus resources and an i2c-client 1616 * must be instantiated for each, each with its own i2c_device_id. 1617 * Normally we only instantiate an i2c-client for the first resource, 1618 * using the ACPI HID as id. These special cases are handled by the 1619 * drivers/platform/x86/i2c-multi-instantiate.c driver, which knows 1620 * which i2c_device_id to use for each resource. 1621 */ 1622 static const struct acpi_device_id i2c_multi_instantiate_ids[] = { 1623 {"BSG1160", }, 1624 {"BSG2150", }, 1625 {"INT33FE", }, 1626 {"INT3515", }, 1627 {} 1628 }; 1629 1630 if (acpi_is_indirect_io_slave(device)) 1631 return true; 1632 1633 /* Macs use device properties in lieu of _CRS resources */ 1634 if (x86_apple_machine && 1635 (fwnode_property_present(&device->fwnode, "spiSclkPeriod") || 1636 fwnode_property_present(&device->fwnode, "i2cAddress") || 1637 fwnode_property_present(&device->fwnode, "baud"))) 1638 return true; 1639 1640 /* Instantiate a pdev for the i2c-multi-instantiate drv to bind to */ 1641 if (!acpi_match_device_ids(device, i2c_multi_instantiate_ids)) 1642 return false; 1643 1644 INIT_LIST_HEAD(&resource_list); 1645 acpi_dev_get_resources(device, &resource_list, 1646 acpi_check_serial_bus_slave, 1647 &is_serial_bus_slave); 1648 acpi_dev_free_resource_list(&resource_list); 1649 1650 return is_serial_bus_slave; 1651 } 1652 1653 void acpi_init_device_object(struct acpi_device *device, acpi_handle handle, 1654 int type) 1655 { 1656 INIT_LIST_HEAD(&device->pnp.ids); 1657 device->device_type = type; 1658 device->handle = handle; 1659 device->parent = acpi_bus_get_parent(handle); 1660 fwnode_init(&device->fwnode, &acpi_device_fwnode_ops); 1661 acpi_set_device_status(device, ACPI_STA_DEFAULT); 1662 acpi_device_get_busid(device); 1663 acpi_set_pnp_ids(handle, &device->pnp, type); 1664 acpi_init_properties(device); 1665 acpi_bus_get_flags(device); 1666 device->flags.match_driver = false; 1667 device->flags.initialized = true; 1668 device->flags.enumeration_by_parent = 1669 acpi_device_enumeration_by_parent(device); 1670 acpi_device_clear_enumerated(device); 1671 device_initialize(&device->dev); 1672 dev_set_uevent_suppress(&device->dev, true); 1673 acpi_init_coherency(device); 1674 /* Assume there are unmet deps to start with. */ 1675 device->dep_unmet = 1; 1676 } 1677 1678 void acpi_device_add_finalize(struct acpi_device *device) 1679 { 1680 dev_set_uevent_suppress(&device->dev, false); 1681 kobject_uevent(&device->dev.kobj, KOBJ_ADD); 1682 } 1683 1684 static void acpi_scan_init_status(struct acpi_device *adev) 1685 { 1686 if (acpi_bus_get_status(adev)) 1687 acpi_set_device_status(adev, 0); 1688 } 1689 1690 static int acpi_add_single_object(struct acpi_device **child, 1691 acpi_handle handle, int type) 1692 { 1693 struct acpi_device *device; 1694 int result; 1695 1696 device = kzalloc(sizeof(struct acpi_device), GFP_KERNEL); 1697 if (!device) 1698 return -ENOMEM; 1699 1700 acpi_init_device_object(device, handle, type); 1701 /* 1702 * Getting the status is delayed till here so that we can call 1703 * acpi_bus_get_status() and use its quirk handling. Note that 1704 * this must be done before the get power-/wakeup_dev-flags calls. 1705 */ 1706 if (type == ACPI_BUS_TYPE_DEVICE || type == ACPI_BUS_TYPE_PROCESSOR) 1707 acpi_scan_init_status(device); 1708 1709 acpi_bus_get_power_flags(device); 1710 acpi_bus_get_wakeup_device_flags(device); 1711 1712 result = acpi_device_add(device, acpi_device_release); 1713 if (result) { 1714 acpi_device_release(&device->dev); 1715 return result; 1716 } 1717 1718 acpi_power_add_remove_device(device, true); 1719 acpi_device_add_finalize(device); 1720 1721 acpi_handle_debug(handle, "Added as %s, parent %s\n", 1722 dev_name(&device->dev), device->parent ? 1723 dev_name(&device->parent->dev) : "(null)"); 1724 1725 *child = device; 1726 return 0; 1727 } 1728 1729 static acpi_status acpi_get_resource_memory(struct acpi_resource *ares, 1730 void *context) 1731 { 1732 struct resource *res = context; 1733 1734 if (acpi_dev_resource_memory(ares, res)) 1735 return AE_CTRL_TERMINATE; 1736 1737 return AE_OK; 1738 } 1739 1740 static bool acpi_device_should_be_hidden(acpi_handle handle) 1741 { 1742 acpi_status status; 1743 struct resource res; 1744 1745 /* Check if it should ignore the UART device */ 1746 if (!(spcr_uart_addr && acpi_has_method(handle, METHOD_NAME__CRS))) 1747 return false; 1748 1749 /* 1750 * The UART device described in SPCR table is assumed to have only one 1751 * memory resource present. So we only look for the first one here. 1752 */ 1753 status = acpi_walk_resources(handle, METHOD_NAME__CRS, 1754 acpi_get_resource_memory, &res); 1755 if (ACPI_FAILURE(status) || res.start != spcr_uart_addr) 1756 return false; 1757 1758 acpi_handle_info(handle, "The UART device @%pa in SPCR table will be hidden\n", 1759 &res.start); 1760 1761 return true; 1762 } 1763 1764 bool acpi_device_is_present(const struct acpi_device *adev) 1765 { 1766 return adev->status.present || adev->status.functional; 1767 } 1768 1769 static bool acpi_scan_handler_matching(struct acpi_scan_handler *handler, 1770 const char *idstr, 1771 const struct acpi_device_id **matchid) 1772 { 1773 const struct acpi_device_id *devid; 1774 1775 if (handler->match) 1776 return handler->match(idstr, matchid); 1777 1778 for (devid = handler->ids; devid->id[0]; devid++) 1779 if (!strcmp((char *)devid->id, idstr)) { 1780 if (matchid) 1781 *matchid = devid; 1782 1783 return true; 1784 } 1785 1786 return false; 1787 } 1788 1789 static struct acpi_scan_handler *acpi_scan_match_handler(const char *idstr, 1790 const struct acpi_device_id **matchid) 1791 { 1792 struct acpi_scan_handler *handler; 1793 1794 list_for_each_entry(handler, &acpi_scan_handlers_list, list_node) 1795 if (acpi_scan_handler_matching(handler, idstr, matchid)) 1796 return handler; 1797 1798 return NULL; 1799 } 1800 1801 void acpi_scan_hotplug_enabled(struct acpi_hotplug_profile *hotplug, bool val) 1802 { 1803 if (!!hotplug->enabled == !!val) 1804 return; 1805 1806 mutex_lock(&acpi_scan_lock); 1807 1808 hotplug->enabled = val; 1809 1810 mutex_unlock(&acpi_scan_lock); 1811 } 1812 1813 static void acpi_scan_init_hotplug(struct acpi_device *adev) 1814 { 1815 struct acpi_hardware_id *hwid; 1816 1817 if (acpi_dock_match(adev->handle) || is_ejectable_bay(adev)) { 1818 acpi_dock_add(adev); 1819 return; 1820 } 1821 list_for_each_entry(hwid, &adev->pnp.ids, list) { 1822 struct acpi_scan_handler *handler; 1823 1824 handler = acpi_scan_match_handler(hwid->id, NULL); 1825 if (handler) { 1826 adev->flags.hotplug_notify = true; 1827 break; 1828 } 1829 } 1830 } 1831 1832 static u32 acpi_scan_check_dep(acpi_handle handle, bool check_dep) 1833 { 1834 struct acpi_handle_list dep_devices; 1835 acpi_status status; 1836 u32 count; 1837 int i; 1838 1839 /* 1840 * Check for _HID here to avoid deferring the enumeration of: 1841 * 1. PCI devices. 1842 * 2. ACPI nodes describing USB ports. 1843 * Still, checking for _HID catches more then just these cases ... 1844 */ 1845 if (!check_dep || !acpi_has_method(handle, "_DEP") || 1846 !acpi_has_method(handle, "_HID")) 1847 return 0; 1848 1849 status = acpi_evaluate_reference(handle, "_DEP", NULL, &dep_devices); 1850 if (ACPI_FAILURE(status)) { 1851 acpi_handle_debug(handle, "Failed to evaluate _DEP.\n"); 1852 return 0; 1853 } 1854 1855 for (count = 0, i = 0; i < dep_devices.count; i++) { 1856 struct acpi_device_info *info; 1857 struct acpi_dep_data *dep; 1858 bool skip; 1859 1860 status = acpi_get_object_info(dep_devices.handles[i], &info); 1861 if (ACPI_FAILURE(status)) { 1862 acpi_handle_debug(handle, "Error reading _DEP device info\n"); 1863 continue; 1864 } 1865 1866 skip = acpi_info_matches_ids(info, acpi_ignore_dep_ids); 1867 kfree(info); 1868 1869 if (skip) 1870 continue; 1871 1872 dep = kzalloc(sizeof(*dep), GFP_KERNEL); 1873 if (!dep) 1874 continue; 1875 1876 count++; 1877 1878 dep->supplier = dep_devices.handles[i]; 1879 dep->consumer = handle; 1880 1881 mutex_lock(&acpi_dep_list_lock); 1882 list_add_tail(&dep->node , &acpi_dep_list); 1883 mutex_unlock(&acpi_dep_list_lock); 1884 } 1885 1886 return count; 1887 } 1888 1889 static void acpi_scan_dep_init(struct acpi_device *adev) 1890 { 1891 struct acpi_dep_data *dep; 1892 1893 adev->dep_unmet = 0; 1894 1895 mutex_lock(&acpi_dep_list_lock); 1896 1897 list_for_each_entry(dep, &acpi_dep_list, node) { 1898 if (dep->consumer == adev->handle) 1899 adev->dep_unmet++; 1900 } 1901 1902 mutex_unlock(&acpi_dep_list_lock); 1903 } 1904 1905 static bool acpi_bus_scan_second_pass; 1906 1907 static acpi_status acpi_bus_check_add(acpi_handle handle, bool check_dep, 1908 struct acpi_device **adev_p) 1909 { 1910 struct acpi_device *device = NULL; 1911 acpi_object_type acpi_type; 1912 int type; 1913 1914 acpi_bus_get_device(handle, &device); 1915 if (device) 1916 goto out; 1917 1918 if (ACPI_FAILURE(acpi_get_type(handle, &acpi_type))) 1919 return AE_OK; 1920 1921 switch (acpi_type) { 1922 case ACPI_TYPE_DEVICE: 1923 if (acpi_device_should_be_hidden(handle)) 1924 return AE_OK; 1925 1926 /* Bail out if there are dependencies. */ 1927 if (acpi_scan_check_dep(handle, check_dep) > 0) { 1928 acpi_bus_scan_second_pass = true; 1929 return AE_CTRL_DEPTH; 1930 } 1931 1932 fallthrough; 1933 case ACPI_TYPE_ANY: /* for ACPI_ROOT_OBJECT */ 1934 type = ACPI_BUS_TYPE_DEVICE; 1935 break; 1936 1937 case ACPI_TYPE_PROCESSOR: 1938 type = ACPI_BUS_TYPE_PROCESSOR; 1939 break; 1940 1941 case ACPI_TYPE_THERMAL: 1942 type = ACPI_BUS_TYPE_THERMAL; 1943 break; 1944 1945 case ACPI_TYPE_POWER: 1946 acpi_add_power_resource(handle); 1947 fallthrough; 1948 default: 1949 return AE_OK; 1950 } 1951 1952 acpi_add_single_object(&device, handle, type); 1953 if (!device) 1954 return AE_CTRL_DEPTH; 1955 1956 acpi_scan_init_hotplug(device); 1957 /* 1958 * If check_dep is true at this point, the device has no dependencies, 1959 * or the creation of the device object would have been postponed above. 1960 */ 1961 if (check_dep) 1962 device->dep_unmet = 0; 1963 else 1964 acpi_scan_dep_init(device); 1965 1966 out: 1967 if (!*adev_p) 1968 *adev_p = device; 1969 1970 return AE_OK; 1971 } 1972 1973 static acpi_status acpi_bus_check_add_1(acpi_handle handle, u32 lvl_not_used, 1974 void *not_used, void **ret_p) 1975 { 1976 return acpi_bus_check_add(handle, true, (struct acpi_device **)ret_p); 1977 } 1978 1979 static acpi_status acpi_bus_check_add_2(acpi_handle handle, u32 lvl_not_used, 1980 void *not_used, void **ret_p) 1981 { 1982 return acpi_bus_check_add(handle, false, (struct acpi_device **)ret_p); 1983 } 1984 1985 static void acpi_default_enumeration(struct acpi_device *device) 1986 { 1987 /* 1988 * Do not enumerate devices with enumeration_by_parent flag set as 1989 * they will be enumerated by their respective parents. 1990 */ 1991 if (!device->flags.enumeration_by_parent) { 1992 acpi_create_platform_device(device, NULL); 1993 acpi_device_set_enumerated(device); 1994 } else { 1995 blocking_notifier_call_chain(&acpi_reconfig_chain, 1996 ACPI_RECONFIG_DEVICE_ADD, device); 1997 } 1998 } 1999 2000 static const struct acpi_device_id generic_device_ids[] = { 2001 {ACPI_DT_NAMESPACE_HID, }, 2002 {"", }, 2003 }; 2004 2005 static int acpi_generic_device_attach(struct acpi_device *adev, 2006 const struct acpi_device_id *not_used) 2007 { 2008 /* 2009 * Since ACPI_DT_NAMESPACE_HID is the only ID handled here, the test 2010 * below can be unconditional. 2011 */ 2012 if (adev->data.of_compatible) 2013 acpi_default_enumeration(adev); 2014 2015 return 1; 2016 } 2017 2018 static struct acpi_scan_handler generic_device_handler = { 2019 .ids = generic_device_ids, 2020 .attach = acpi_generic_device_attach, 2021 }; 2022 2023 static int acpi_scan_attach_handler(struct acpi_device *device) 2024 { 2025 struct acpi_hardware_id *hwid; 2026 int ret = 0; 2027 2028 list_for_each_entry(hwid, &device->pnp.ids, list) { 2029 const struct acpi_device_id *devid; 2030 struct acpi_scan_handler *handler; 2031 2032 handler = acpi_scan_match_handler(hwid->id, &devid); 2033 if (handler) { 2034 if (!handler->attach) { 2035 device->pnp.type.platform_id = 0; 2036 continue; 2037 } 2038 device->handler = handler; 2039 ret = handler->attach(device, devid); 2040 if (ret > 0) 2041 break; 2042 2043 device->handler = NULL; 2044 if (ret < 0) 2045 break; 2046 } 2047 } 2048 2049 return ret; 2050 } 2051 2052 static void acpi_bus_attach(struct acpi_device *device, bool first_pass) 2053 { 2054 struct acpi_device *child; 2055 bool skip = !first_pass && device->flags.visited; 2056 acpi_handle ejd; 2057 int ret; 2058 2059 if (skip) 2060 goto ok; 2061 2062 if (ACPI_SUCCESS(acpi_bus_get_ejd(device->handle, &ejd))) 2063 register_dock_dependent_device(device, ejd); 2064 2065 acpi_bus_get_status(device); 2066 /* Skip devices that are not present. */ 2067 if (!acpi_device_is_present(device)) { 2068 device->flags.initialized = false; 2069 acpi_device_clear_enumerated(device); 2070 device->flags.power_manageable = 0; 2071 return; 2072 } 2073 if (device->handler) 2074 goto ok; 2075 2076 if (!device->flags.initialized) { 2077 device->flags.power_manageable = 2078 device->power.states[ACPI_STATE_D0].flags.valid; 2079 if (acpi_bus_init_power(device)) 2080 device->flags.power_manageable = 0; 2081 2082 device->flags.initialized = true; 2083 } else if (device->flags.visited) { 2084 goto ok; 2085 } 2086 2087 ret = acpi_scan_attach_handler(device); 2088 if (ret < 0) 2089 return; 2090 2091 device->flags.match_driver = true; 2092 if (ret > 0 && !device->flags.enumeration_by_parent) { 2093 acpi_device_set_enumerated(device); 2094 goto ok; 2095 } 2096 2097 ret = device_attach(&device->dev); 2098 if (ret < 0) 2099 return; 2100 2101 if (device->pnp.type.platform_id || device->flags.enumeration_by_parent) 2102 acpi_default_enumeration(device); 2103 else 2104 acpi_device_set_enumerated(device); 2105 2106 ok: 2107 list_for_each_entry(child, &device->children, node) 2108 acpi_bus_attach(child, first_pass); 2109 2110 if (!skip && device->handler && device->handler->hotplug.notify_online) 2111 device->handler->hotplug.notify_online(device); 2112 } 2113 2114 void acpi_walk_dep_device_list(acpi_handle handle) 2115 { 2116 struct acpi_dep_data *dep, *tmp; 2117 struct acpi_device *adev; 2118 2119 mutex_lock(&acpi_dep_list_lock); 2120 list_for_each_entry_safe(dep, tmp, &acpi_dep_list, node) { 2121 if (dep->supplier == handle) { 2122 acpi_bus_get_device(dep->consumer, &adev); 2123 2124 if (adev) { 2125 adev->dep_unmet--; 2126 if (!adev->dep_unmet) 2127 acpi_bus_attach(adev, true); 2128 } 2129 2130 list_del(&dep->node); 2131 kfree(dep); 2132 } 2133 } 2134 mutex_unlock(&acpi_dep_list_lock); 2135 } 2136 EXPORT_SYMBOL_GPL(acpi_walk_dep_device_list); 2137 2138 /** 2139 * acpi_bus_scan - Add ACPI device node objects in a given namespace scope. 2140 * @handle: Root of the namespace scope to scan. 2141 * 2142 * Scan a given ACPI tree (probably recently hot-plugged) and create and add 2143 * found devices. 2144 * 2145 * If no devices were found, -ENODEV is returned, but it does not mean that 2146 * there has been a real error. There just have been no suitable ACPI objects 2147 * in the table trunk from which the kernel could create a device and add an 2148 * appropriate driver. 2149 * 2150 * Must be called under acpi_scan_lock. 2151 */ 2152 int acpi_bus_scan(acpi_handle handle) 2153 { 2154 struct acpi_device *device = NULL; 2155 2156 acpi_bus_scan_second_pass = false; 2157 2158 /* Pass 1: Avoid enumerating devices with missing dependencies. */ 2159 2160 if (ACPI_SUCCESS(acpi_bus_check_add(handle, true, &device))) 2161 acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX, 2162 acpi_bus_check_add_1, NULL, NULL, 2163 (void **)&device); 2164 2165 if (!device) 2166 return -ENODEV; 2167 2168 acpi_bus_attach(device, true); 2169 2170 if (!acpi_bus_scan_second_pass) 2171 return 0; 2172 2173 /* Pass 2: Enumerate all of the remaining devices. */ 2174 2175 device = NULL; 2176 2177 if (ACPI_SUCCESS(acpi_bus_check_add(handle, false, &device))) 2178 acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX, 2179 acpi_bus_check_add_2, NULL, NULL, 2180 (void **)&device); 2181 2182 acpi_bus_attach(device, false); 2183 2184 return 0; 2185 } 2186 EXPORT_SYMBOL(acpi_bus_scan); 2187 2188 /** 2189 * acpi_bus_trim - Detach scan handlers and drivers from ACPI device objects. 2190 * @adev: Root of the ACPI namespace scope to walk. 2191 * 2192 * Must be called under acpi_scan_lock. 2193 */ 2194 void acpi_bus_trim(struct acpi_device *adev) 2195 { 2196 struct acpi_scan_handler *handler = adev->handler; 2197 struct acpi_device *child; 2198 2199 list_for_each_entry_reverse(child, &adev->children, node) 2200 acpi_bus_trim(child); 2201 2202 adev->flags.match_driver = false; 2203 if (handler) { 2204 if (handler->detach) 2205 handler->detach(adev); 2206 2207 adev->handler = NULL; 2208 } else { 2209 device_release_driver(&adev->dev); 2210 } 2211 /* 2212 * Most likely, the device is going away, so put it into D3cold before 2213 * that. 2214 */ 2215 acpi_device_set_power(adev, ACPI_STATE_D3_COLD); 2216 adev->flags.initialized = false; 2217 acpi_device_clear_enumerated(adev); 2218 } 2219 EXPORT_SYMBOL_GPL(acpi_bus_trim); 2220 2221 int acpi_bus_register_early_device(int type) 2222 { 2223 struct acpi_device *device = NULL; 2224 int result; 2225 2226 result = acpi_add_single_object(&device, NULL, type); 2227 if (result) 2228 return result; 2229 2230 device->flags.match_driver = true; 2231 return device_attach(&device->dev); 2232 } 2233 EXPORT_SYMBOL_GPL(acpi_bus_register_early_device); 2234 2235 static int acpi_bus_scan_fixed(void) 2236 { 2237 int result = 0; 2238 2239 /* 2240 * Enumerate all fixed-feature devices. 2241 */ 2242 if (!(acpi_gbl_FADT.flags & ACPI_FADT_POWER_BUTTON)) { 2243 struct acpi_device *device = NULL; 2244 2245 result = acpi_add_single_object(&device, NULL, 2246 ACPI_BUS_TYPE_POWER_BUTTON); 2247 if (result) 2248 return result; 2249 2250 device->flags.match_driver = true; 2251 result = device_attach(&device->dev); 2252 if (result < 0) 2253 return result; 2254 2255 device_init_wakeup(&device->dev, true); 2256 } 2257 2258 if (!(acpi_gbl_FADT.flags & ACPI_FADT_SLEEP_BUTTON)) { 2259 struct acpi_device *device = NULL; 2260 2261 result = acpi_add_single_object(&device, NULL, 2262 ACPI_BUS_TYPE_SLEEP_BUTTON); 2263 if (result) 2264 return result; 2265 2266 device->flags.match_driver = true; 2267 result = device_attach(&device->dev); 2268 } 2269 2270 return result < 0 ? result : 0; 2271 } 2272 2273 static void __init acpi_get_spcr_uart_addr(void) 2274 { 2275 acpi_status status; 2276 struct acpi_table_spcr *spcr_ptr; 2277 2278 status = acpi_get_table(ACPI_SIG_SPCR, 0, 2279 (struct acpi_table_header **)&spcr_ptr); 2280 if (ACPI_FAILURE(status)) { 2281 pr_warn(PREFIX "STAO table present, but SPCR is missing\n"); 2282 return; 2283 } 2284 2285 spcr_uart_addr = spcr_ptr->serial_port.address; 2286 acpi_put_table((struct acpi_table_header *)spcr_ptr); 2287 } 2288 2289 static bool acpi_scan_initialized; 2290 2291 int __init acpi_scan_init(void) 2292 { 2293 int result; 2294 acpi_status status; 2295 struct acpi_table_stao *stao_ptr; 2296 2297 acpi_pci_root_init(); 2298 acpi_pci_link_init(); 2299 acpi_processor_init(); 2300 acpi_platform_init(); 2301 acpi_lpss_init(); 2302 acpi_apd_init(); 2303 acpi_cmos_rtc_init(); 2304 acpi_container_init(); 2305 acpi_memory_hotplug_init(); 2306 acpi_watchdog_init(); 2307 acpi_pnp_init(); 2308 acpi_int340x_thermal_init(); 2309 acpi_amba_init(); 2310 acpi_init_lpit(); 2311 2312 acpi_scan_add_handler(&generic_device_handler); 2313 2314 /* 2315 * If there is STAO table, check whether it needs to ignore the UART 2316 * device in SPCR table. 2317 */ 2318 status = acpi_get_table(ACPI_SIG_STAO, 0, 2319 (struct acpi_table_header **)&stao_ptr); 2320 if (ACPI_SUCCESS(status)) { 2321 if (stao_ptr->header.length > sizeof(struct acpi_table_stao)) 2322 pr_info(PREFIX "STAO Name List not yet supported.\n"); 2323 2324 if (stao_ptr->ignore_uart) 2325 acpi_get_spcr_uart_addr(); 2326 2327 acpi_put_table((struct acpi_table_header *)stao_ptr); 2328 } 2329 2330 acpi_gpe_apply_masked_gpes(); 2331 acpi_update_all_gpes(); 2332 2333 /* 2334 * Although we call __add_memory() that is documented to require the 2335 * device_hotplug_lock, it is not necessary here because this is an 2336 * early code when userspace or any other code path cannot trigger 2337 * hotplug/hotunplug operations. 2338 */ 2339 mutex_lock(&acpi_scan_lock); 2340 /* 2341 * Enumerate devices in the ACPI namespace. 2342 */ 2343 result = acpi_bus_scan(ACPI_ROOT_OBJECT); 2344 if (result) 2345 goto out; 2346 2347 result = acpi_bus_get_device(ACPI_ROOT_OBJECT, &acpi_root); 2348 if (result) 2349 goto out; 2350 2351 /* Fixed feature devices do not exist on HW-reduced platform */ 2352 if (!acpi_gbl_reduced_hardware) { 2353 result = acpi_bus_scan_fixed(); 2354 if (result) { 2355 acpi_detach_data(acpi_root->handle, 2356 acpi_scan_drop_device); 2357 acpi_device_del(acpi_root); 2358 acpi_bus_put_acpi_device(acpi_root); 2359 goto out; 2360 } 2361 } 2362 2363 acpi_turn_off_unused_power_resources(true); 2364 2365 acpi_scan_initialized = true; 2366 2367 out: 2368 mutex_unlock(&acpi_scan_lock); 2369 return result; 2370 } 2371 2372 static struct acpi_probe_entry *ape; 2373 static int acpi_probe_count; 2374 static DEFINE_MUTEX(acpi_probe_mutex); 2375 2376 static int __init acpi_match_madt(union acpi_subtable_headers *header, 2377 const unsigned long end) 2378 { 2379 if (!ape->subtable_valid || ape->subtable_valid(&header->common, ape)) 2380 if (!ape->probe_subtbl(header, end)) 2381 acpi_probe_count++; 2382 2383 return 0; 2384 } 2385 2386 int __init __acpi_probe_device_table(struct acpi_probe_entry *ap_head, int nr) 2387 { 2388 int count = 0; 2389 2390 if (acpi_disabled) 2391 return 0; 2392 2393 mutex_lock(&acpi_probe_mutex); 2394 for (ape = ap_head; nr; ape++, nr--) { 2395 if (ACPI_COMPARE_NAMESEG(ACPI_SIG_MADT, ape->id)) { 2396 acpi_probe_count = 0; 2397 acpi_table_parse_madt(ape->type, acpi_match_madt, 0); 2398 count += acpi_probe_count; 2399 } else { 2400 int res; 2401 res = acpi_table_parse(ape->id, ape->probe_table); 2402 if (!res) 2403 count++; 2404 } 2405 } 2406 mutex_unlock(&acpi_probe_mutex); 2407 2408 return count; 2409 } 2410 2411 struct acpi_table_events_work { 2412 struct work_struct work; 2413 void *table; 2414 u32 event; 2415 }; 2416 2417 static void acpi_table_events_fn(struct work_struct *work) 2418 { 2419 struct acpi_table_events_work *tew; 2420 2421 tew = container_of(work, struct acpi_table_events_work, work); 2422 2423 if (tew->event == ACPI_TABLE_EVENT_LOAD) { 2424 acpi_scan_lock_acquire(); 2425 acpi_bus_scan(ACPI_ROOT_OBJECT); 2426 acpi_scan_lock_release(); 2427 } 2428 2429 kfree(tew); 2430 } 2431 2432 void acpi_scan_table_handler(u32 event, void *table, void *context) 2433 { 2434 struct acpi_table_events_work *tew; 2435 2436 if (!acpi_scan_initialized) 2437 return; 2438 2439 if (event != ACPI_TABLE_EVENT_LOAD) 2440 return; 2441 2442 tew = kmalloc(sizeof(*tew), GFP_KERNEL); 2443 if (!tew) 2444 return; 2445 2446 INIT_WORK(&tew->work, acpi_table_events_fn); 2447 tew->table = table; 2448 tew->event = event; 2449 2450 schedule_work(&tew->work); 2451 } 2452 2453 int acpi_reconfig_notifier_register(struct notifier_block *nb) 2454 { 2455 return blocking_notifier_chain_register(&acpi_reconfig_chain, nb); 2456 } 2457 EXPORT_SYMBOL(acpi_reconfig_notifier_register); 2458 2459 int acpi_reconfig_notifier_unregister(struct notifier_block *nb) 2460 { 2461 return blocking_notifier_chain_unregister(&acpi_reconfig_chain, nb); 2462 } 2463 EXPORT_SYMBOL(acpi_reconfig_notifier_unregister); 2464