1 /****************************************************************************** 2 * 3 * Module Name: evgpe - General Purpose Event handling and dispatch 4 * 5 *****************************************************************************/ 6 7 /* 8 * Copyright (C) 2000 - 2016, Intel Corp. 9 * All rights reserved. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions, and the following disclaimer, 16 * without modification. 17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 18 * substantially similar to the "NO WARRANTY" disclaimer below 19 * ("Disclaimer") and any redistribution must be conditioned upon 20 * including a substantially similar Disclaimer requirement for further 21 * binary redistribution. 22 * 3. Neither the names of the above-listed copyright holders nor the names 23 * of any contributors may be used to endorse or promote products derived 24 * from this software without specific prior written permission. 25 * 26 * Alternatively, this software may be distributed under the terms of the 27 * GNU General Public License ("GPL") version 2 as published by the Free 28 * Software Foundation. 29 * 30 * NO WARRANTY 31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 35 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 40 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 41 * POSSIBILITY OF SUCH DAMAGES. 42 */ 43 44 #include <acpi/acpi.h> 45 #include "accommon.h" 46 #include "acevents.h" 47 #include "acnamesp.h" 48 49 #define _COMPONENT ACPI_EVENTS 50 ACPI_MODULE_NAME("evgpe") 51 #if (!ACPI_REDUCED_HARDWARE) /* Entire module */ 52 /* Local prototypes */ 53 static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context); 54 55 static void ACPI_SYSTEM_XFACE acpi_ev_asynch_enable_gpe(void *context); 56 57 /******************************************************************************* 58 * 59 * FUNCTION: acpi_ev_update_gpe_enable_mask 60 * 61 * PARAMETERS: gpe_event_info - GPE to update 62 * 63 * RETURN: Status 64 * 65 * DESCRIPTION: Updates GPE register enable mask based upon whether there are 66 * runtime references to this GPE 67 * 68 ******************************************************************************/ 69 70 acpi_status 71 acpi_ev_update_gpe_enable_mask(struct acpi_gpe_event_info *gpe_event_info) 72 { 73 struct acpi_gpe_register_info *gpe_register_info; 74 u32 register_bit; 75 76 ACPI_FUNCTION_TRACE(ev_update_gpe_enable_mask); 77 78 gpe_register_info = gpe_event_info->register_info; 79 if (!gpe_register_info) { 80 return_ACPI_STATUS(AE_NOT_EXIST); 81 } 82 83 register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info); 84 85 /* Clear the run bit up front */ 86 87 ACPI_CLEAR_BIT(gpe_register_info->enable_for_run, register_bit); 88 89 /* Set the mask bit only if there are references to this GPE */ 90 91 if (gpe_event_info->runtime_count) { 92 ACPI_SET_BIT(gpe_register_info->enable_for_run, 93 (u8)register_bit); 94 } 95 96 gpe_register_info->enable_mask = gpe_register_info->enable_for_run; 97 return_ACPI_STATUS(AE_OK); 98 } 99 100 /******************************************************************************* 101 * 102 * FUNCTION: acpi_ev_enable_gpe 103 * 104 * PARAMETERS: gpe_event_info - GPE to enable 105 * 106 * RETURN: Status 107 * 108 * DESCRIPTION: Clear a GPE of stale events and enable it. 109 * 110 ******************************************************************************/ 111 112 acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info) 113 { 114 acpi_status status; 115 116 ACPI_FUNCTION_TRACE(ev_enable_gpe); 117 118 /* Clear the GPE (of stale events) */ 119 120 status = acpi_hw_clear_gpe(gpe_event_info); 121 if (ACPI_FAILURE(status)) { 122 return_ACPI_STATUS(status); 123 } 124 125 /* Enable the requested GPE */ 126 127 status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE); 128 return_ACPI_STATUS(status); 129 } 130 131 /******************************************************************************* 132 * 133 * FUNCTION: acpi_ev_add_gpe_reference 134 * 135 * PARAMETERS: gpe_event_info - Add a reference to this GPE 136 * 137 * RETURN: Status 138 * 139 * DESCRIPTION: Add a reference to a GPE. On the first reference, the GPE is 140 * hardware-enabled. 141 * 142 ******************************************************************************/ 143 144 acpi_status 145 acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info) 146 { 147 acpi_status status = AE_OK; 148 149 ACPI_FUNCTION_TRACE(ev_add_gpe_reference); 150 151 if (gpe_event_info->runtime_count == ACPI_UINT8_MAX) { 152 return_ACPI_STATUS(AE_LIMIT); 153 } 154 155 gpe_event_info->runtime_count++; 156 if (gpe_event_info->runtime_count == 1) { 157 158 /* Enable on first reference */ 159 160 status = acpi_ev_update_gpe_enable_mask(gpe_event_info); 161 if (ACPI_SUCCESS(status)) { 162 status = acpi_ev_enable_gpe(gpe_event_info); 163 } 164 165 if (ACPI_FAILURE(status)) { 166 gpe_event_info->runtime_count--; 167 } 168 } 169 170 return_ACPI_STATUS(status); 171 } 172 173 /******************************************************************************* 174 * 175 * FUNCTION: acpi_ev_remove_gpe_reference 176 * 177 * PARAMETERS: gpe_event_info - Remove a reference to this GPE 178 * 179 * RETURN: Status 180 * 181 * DESCRIPTION: Remove a reference to a GPE. When the last reference is 182 * removed, the GPE is hardware-disabled. 183 * 184 ******************************************************************************/ 185 186 acpi_status 187 acpi_ev_remove_gpe_reference(struct acpi_gpe_event_info *gpe_event_info) 188 { 189 acpi_status status = AE_OK; 190 191 ACPI_FUNCTION_TRACE(ev_remove_gpe_reference); 192 193 if (!gpe_event_info->runtime_count) { 194 return_ACPI_STATUS(AE_LIMIT); 195 } 196 197 gpe_event_info->runtime_count--; 198 if (!gpe_event_info->runtime_count) { 199 200 /* Disable on last reference */ 201 202 status = acpi_ev_update_gpe_enable_mask(gpe_event_info); 203 if (ACPI_SUCCESS(status)) { 204 status = 205 acpi_hw_low_set_gpe(gpe_event_info, 206 ACPI_GPE_DISABLE); 207 } 208 209 if (ACPI_FAILURE(status)) { 210 gpe_event_info->runtime_count++; 211 } 212 } 213 214 return_ACPI_STATUS(status); 215 } 216 217 /******************************************************************************* 218 * 219 * FUNCTION: acpi_ev_low_get_gpe_info 220 * 221 * PARAMETERS: gpe_number - Raw GPE number 222 * gpe_block - A GPE info block 223 * 224 * RETURN: A GPE event_info struct. NULL if not a valid GPE (The gpe_number 225 * is not within the specified GPE block) 226 * 227 * DESCRIPTION: Returns the event_info struct associated with this GPE. This is 228 * the low-level implementation of ev_get_gpe_event_info. 229 * 230 ******************************************************************************/ 231 232 struct acpi_gpe_event_info *acpi_ev_low_get_gpe_info(u32 gpe_number, 233 struct acpi_gpe_block_info 234 *gpe_block) 235 { 236 u32 gpe_index; 237 238 /* 239 * Validate that the gpe_number is within the specified gpe_block. 240 * (Two steps) 241 */ 242 if (!gpe_block || (gpe_number < gpe_block->block_base_number)) { 243 return (NULL); 244 } 245 246 gpe_index = gpe_number - gpe_block->block_base_number; 247 if (gpe_index >= gpe_block->gpe_count) { 248 return (NULL); 249 } 250 251 return (&gpe_block->event_info[gpe_index]); 252 } 253 254 255 /******************************************************************************* 256 * 257 * FUNCTION: acpi_ev_get_gpe_event_info 258 * 259 * PARAMETERS: gpe_device - Device node. NULL for GPE0/GPE1 260 * gpe_number - Raw GPE number 261 * 262 * RETURN: A GPE event_info struct. NULL if not a valid GPE 263 * 264 * DESCRIPTION: Returns the event_info struct associated with this GPE. 265 * Validates the gpe_block and the gpe_number 266 * 267 * Should be called only when the GPE lists are semaphore locked 268 * and not subject to change. 269 * 270 ******************************************************************************/ 271 272 struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device, 273 u32 gpe_number) 274 { 275 union acpi_operand_object *obj_desc; 276 struct acpi_gpe_event_info *gpe_info; 277 u32 i; 278 279 ACPI_FUNCTION_ENTRY(); 280 281 /* A NULL gpe_device means use the FADT-defined GPE block(s) */ 282 283 if (!gpe_device) { 284 285 /* Examine GPE Block 0 and 1 (These blocks are permanent) */ 286 287 for (i = 0; i < ACPI_MAX_GPE_BLOCKS; i++) { 288 gpe_info = acpi_ev_low_get_gpe_info(gpe_number, 289 acpi_gbl_gpe_fadt_blocks 290 [i]); 291 if (gpe_info) { 292 return (gpe_info); 293 } 294 } 295 296 /* The gpe_number was not in the range of either FADT GPE block */ 297 298 return (NULL); 299 } 300 301 /* A Non-NULL gpe_device means this is a GPE Block Device */ 302 303 obj_desc = 304 acpi_ns_get_attached_object((struct acpi_namespace_node *) 305 gpe_device); 306 if (!obj_desc || !obj_desc->device.gpe_block) { 307 return (NULL); 308 } 309 310 return (acpi_ev_low_get_gpe_info 311 (gpe_number, obj_desc->device.gpe_block)); 312 } 313 314 /******************************************************************************* 315 * 316 * FUNCTION: acpi_ev_gpe_detect 317 * 318 * PARAMETERS: gpe_xrupt_list - Interrupt block for this interrupt. 319 * Can have multiple GPE blocks attached. 320 * 321 * RETURN: INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED 322 * 323 * DESCRIPTION: Detect if any GP events have occurred. This function is 324 * executed at interrupt level. 325 * 326 ******************************************************************************/ 327 328 u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info *gpe_xrupt_list) 329 { 330 acpi_status status; 331 struct acpi_gpe_block_info *gpe_block; 332 struct acpi_namespace_node *gpe_device; 333 struct acpi_gpe_register_info *gpe_register_info; 334 struct acpi_gpe_event_info *gpe_event_info; 335 u32 gpe_number; 336 struct acpi_gpe_handler_info *gpe_handler_info; 337 u32 int_status = ACPI_INTERRUPT_NOT_HANDLED; 338 u8 enabled_status_byte; 339 u32 status_reg; 340 u32 enable_reg; 341 acpi_cpu_flags flags; 342 u32 i; 343 u32 j; 344 345 ACPI_FUNCTION_NAME(ev_gpe_detect); 346 347 /* Check for the case where there are no GPEs */ 348 349 if (!gpe_xrupt_list) { 350 return (int_status); 351 } 352 353 /* 354 * We need to obtain the GPE lock for both the data structs and registers 355 * Note: Not necessary to obtain the hardware lock, since the GPE 356 * registers are owned by the gpe_lock. 357 */ 358 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); 359 360 /* Examine all GPE blocks attached to this interrupt level */ 361 362 gpe_block = gpe_xrupt_list->gpe_block_list_head; 363 while (gpe_block) { 364 gpe_device = gpe_block->node; 365 366 /* 367 * Read all of the 8-bit GPE status and enable registers in this GPE 368 * block, saving all of them. Find all currently active GP events. 369 */ 370 for (i = 0; i < gpe_block->register_count; i++) { 371 372 /* Get the next status/enable pair */ 373 374 gpe_register_info = &gpe_block->register_info[i]; 375 376 /* 377 * Optimization: If there are no GPEs enabled within this 378 * register, we can safely ignore the entire register. 379 */ 380 if (!(gpe_register_info->enable_for_run | 381 gpe_register_info->enable_for_wake)) { 382 ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS, 383 "Ignore disabled registers for GPE %02X-%02X: " 384 "RunEnable=%02X, WakeEnable=%02X\n", 385 gpe_register_info-> 386 base_gpe_number, 387 gpe_register_info-> 388 base_gpe_number + 389 (ACPI_GPE_REGISTER_WIDTH - 1), 390 gpe_register_info-> 391 enable_for_run, 392 gpe_register_info-> 393 enable_for_wake)); 394 continue; 395 } 396 397 /* Read the Status Register */ 398 399 status = 400 acpi_hw_read(&status_reg, 401 &gpe_register_info->status_address); 402 if (ACPI_FAILURE(status)) { 403 goto unlock_and_exit; 404 } 405 406 /* Read the Enable Register */ 407 408 status = 409 acpi_hw_read(&enable_reg, 410 &gpe_register_info->enable_address); 411 if (ACPI_FAILURE(status)) { 412 goto unlock_and_exit; 413 } 414 415 ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS, 416 "Read registers for GPE %02X-%02X: Status=%02X, Enable=%02X, " 417 "RunEnable=%02X, WakeEnable=%02X\n", 418 gpe_register_info->base_gpe_number, 419 gpe_register_info->base_gpe_number + 420 (ACPI_GPE_REGISTER_WIDTH - 1), 421 status_reg, enable_reg, 422 gpe_register_info->enable_for_run, 423 gpe_register_info->enable_for_wake)); 424 425 /* Check if there is anything active at all in this register */ 426 427 enabled_status_byte = (u8)(status_reg & enable_reg); 428 if (!enabled_status_byte) { 429 430 /* No active GPEs in this register, move on */ 431 432 continue; 433 } 434 435 /* Now look at the individual GPEs in this byte register */ 436 437 for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) { 438 439 /* Examine one GPE bit */ 440 441 gpe_event_info = 442 &gpe_block-> 443 event_info[((acpi_size)i * 444 ACPI_GPE_REGISTER_WIDTH) + j]; 445 gpe_number = 446 j + gpe_register_info->base_gpe_number; 447 448 if (enabled_status_byte & (1 << j)) { 449 450 /* Invoke global event handler if present */ 451 452 acpi_gpe_count++; 453 if (acpi_gbl_global_event_handler) { 454 acpi_gbl_global_event_handler 455 (ACPI_EVENT_TYPE_GPE, 456 gpe_device, gpe_number, 457 acpi_gbl_global_event_handler_context); 458 } 459 460 /* Found an active GPE */ 461 462 if (ACPI_GPE_DISPATCH_TYPE 463 (gpe_event_info->flags) == 464 ACPI_GPE_DISPATCH_RAW_HANDLER) { 465 466 /* Dispatch the event to a raw handler */ 467 468 gpe_handler_info = 469 gpe_event_info->dispatch. 470 handler; 471 472 /* 473 * There is no protection around the namespace node 474 * and the GPE handler to ensure a safe destruction 475 * because: 476 * 1. The namespace node is expected to always 477 * exist after loading a table. 478 * 2. The GPE handler is expected to be flushed by 479 * acpi_os_wait_events_complete() before the 480 * destruction. 481 */ 482 acpi_os_release_lock 483 (acpi_gbl_gpe_lock, flags); 484 int_status |= 485 gpe_handler_info-> 486 address(gpe_device, 487 gpe_number, 488 gpe_handler_info-> 489 context); 490 flags = 491 acpi_os_acquire_lock 492 (acpi_gbl_gpe_lock); 493 } else { 494 /* 495 * Dispatch the event to a standard handler or 496 * method. 497 */ 498 int_status |= 499 acpi_ev_gpe_dispatch 500 (gpe_device, gpe_event_info, 501 gpe_number); 502 } 503 } 504 } 505 } 506 507 gpe_block = gpe_block->next; 508 } 509 510 unlock_and_exit: 511 512 acpi_os_release_lock(acpi_gbl_gpe_lock, flags); 513 return (int_status); 514 } 515 516 /******************************************************************************* 517 * 518 * FUNCTION: acpi_ev_asynch_execute_gpe_method 519 * 520 * PARAMETERS: Context (gpe_event_info) - Info for this GPE 521 * 522 * RETURN: None 523 * 524 * DESCRIPTION: Perform the actual execution of a GPE control method. This 525 * function is called from an invocation of acpi_os_execute and 526 * therefore does NOT execute at interrupt level - so that 527 * the control method itself is not executed in the context of 528 * an interrupt handler. 529 * 530 ******************************************************************************/ 531 532 static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context) 533 { 534 struct acpi_gpe_event_info *gpe_event_info = context; 535 acpi_status status = AE_OK; 536 struct acpi_evaluate_info *info; 537 struct acpi_gpe_notify_info *notify; 538 539 ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method); 540 541 /* Do the correct dispatch - normal method or implicit notify */ 542 543 switch (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags)) { 544 case ACPI_GPE_DISPATCH_NOTIFY: 545 /* 546 * Implicit notify. 547 * Dispatch a DEVICE_WAKE notify to the appropriate handler. 548 * NOTE: the request is queued for execution after this method 549 * completes. The notify handlers are NOT invoked synchronously 550 * from this thread -- because handlers may in turn run other 551 * control methods. 552 * 553 * June 2012: Expand implicit notify mechanism to support 554 * notifies on multiple device objects. 555 */ 556 notify = gpe_event_info->dispatch.notify_list; 557 while (ACPI_SUCCESS(status) && notify) { 558 status = 559 acpi_ev_queue_notify_request(notify->device_node, 560 ACPI_NOTIFY_DEVICE_WAKE); 561 562 notify = notify->next; 563 } 564 565 break; 566 567 case ACPI_GPE_DISPATCH_METHOD: 568 569 /* Allocate the evaluation information block */ 570 571 info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info)); 572 if (!info) { 573 status = AE_NO_MEMORY; 574 } else { 575 /* 576 * Invoke the GPE Method (_Lxx, _Exx) i.e., evaluate the 577 * _Lxx/_Exx control method that corresponds to this GPE 578 */ 579 info->prefix_node = 580 gpe_event_info->dispatch.method_node; 581 info->flags = ACPI_IGNORE_RETURN_VALUE; 582 583 status = acpi_ns_evaluate(info); 584 ACPI_FREE(info); 585 } 586 587 if (ACPI_FAILURE(status)) { 588 ACPI_EXCEPTION((AE_INFO, status, 589 "while evaluating GPE method [%4.4s]", 590 acpi_ut_get_node_name(gpe_event_info-> 591 dispatch. 592 method_node))); 593 } 594 break; 595 596 default: 597 598 goto error_exit; /* Should never happen */ 599 } 600 601 /* Defer enabling of GPE until all notify handlers are done */ 602 603 status = acpi_os_execute(OSL_NOTIFY_HANDLER, 604 acpi_ev_asynch_enable_gpe, gpe_event_info); 605 if (ACPI_SUCCESS(status)) { 606 return_VOID; 607 } 608 609 error_exit: 610 acpi_ev_asynch_enable_gpe(gpe_event_info); 611 return_VOID; 612 } 613 614 615 /******************************************************************************* 616 * 617 * FUNCTION: acpi_ev_asynch_enable_gpe 618 * 619 * PARAMETERS: Context (gpe_event_info) - Info for this GPE 620 * Callback from acpi_os_execute 621 * 622 * RETURN: None 623 * 624 * DESCRIPTION: Asynchronous clear/enable for GPE. This allows the GPE to 625 * complete (i.e., finish execution of Notify) 626 * 627 ******************************************************************************/ 628 629 static void ACPI_SYSTEM_XFACE acpi_ev_asynch_enable_gpe(void *context) 630 { 631 struct acpi_gpe_event_info *gpe_event_info = context; 632 acpi_cpu_flags flags; 633 634 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); 635 (void)acpi_ev_finish_gpe(gpe_event_info); 636 acpi_os_release_lock(acpi_gbl_gpe_lock, flags); 637 638 return; 639 } 640 641 642 /******************************************************************************* 643 * 644 * FUNCTION: acpi_ev_finish_gpe 645 * 646 * PARAMETERS: gpe_event_info - Info for this GPE 647 * 648 * RETURN: Status 649 * 650 * DESCRIPTION: Clear/Enable a GPE. Common code that is used after execution 651 * of a GPE method or a synchronous or asynchronous GPE handler. 652 * 653 ******************************************************************************/ 654 655 acpi_status acpi_ev_finish_gpe(struct acpi_gpe_event_info *gpe_event_info) 656 { 657 acpi_status status; 658 659 if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) == 660 ACPI_GPE_LEVEL_TRIGGERED) { 661 /* 662 * GPE is level-triggered, we clear the GPE status bit after 663 * handling the event. 664 */ 665 status = acpi_hw_clear_gpe(gpe_event_info); 666 if (ACPI_FAILURE(status)) { 667 return (status); 668 } 669 } 670 671 /* 672 * Enable this GPE, conditionally. This means that the GPE will 673 * only be physically enabled if the enable_mask bit is set 674 * in the event_info. 675 */ 676 (void)acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_CONDITIONAL_ENABLE); 677 return (AE_OK); 678 } 679 680 681 /******************************************************************************* 682 * 683 * FUNCTION: acpi_ev_gpe_dispatch 684 * 685 * PARAMETERS: gpe_device - Device node. NULL for GPE0/GPE1 686 * gpe_event_info - Info for this GPE 687 * gpe_number - Number relative to the parent GPE block 688 * 689 * RETURN: INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED 690 * 691 * DESCRIPTION: Dispatch a General Purpose Event to either a function (e.g. EC) 692 * or method (e.g. _Lxx/_Exx) handler. 693 * 694 * This function executes at interrupt level. 695 * 696 ******************************************************************************/ 697 698 u32 699 acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device, 700 struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number) 701 { 702 acpi_status status; 703 u32 return_value; 704 705 ACPI_FUNCTION_TRACE(ev_gpe_dispatch); 706 707 /* 708 * Always disable the GPE so that it does not keep firing before 709 * any asynchronous activity completes (either from the execution 710 * of a GPE method or an asynchronous GPE handler.) 711 * 712 * If there is no handler or method to run, just disable the 713 * GPE and leave it disabled permanently to prevent further such 714 * pointless events from firing. 715 */ 716 status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE); 717 if (ACPI_FAILURE(status)) { 718 ACPI_EXCEPTION((AE_INFO, status, 719 "Unable to disable GPE %02X", gpe_number)); 720 return_UINT32(ACPI_INTERRUPT_NOT_HANDLED); 721 } 722 723 /* 724 * If edge-triggered, clear the GPE status bit now. Note that 725 * level-triggered events are cleared after the GPE is serviced. 726 */ 727 if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) == 728 ACPI_GPE_EDGE_TRIGGERED) { 729 status = acpi_hw_clear_gpe(gpe_event_info); 730 if (ACPI_FAILURE(status)) { 731 ACPI_EXCEPTION((AE_INFO, status, 732 "Unable to clear GPE %02X", 733 gpe_number)); 734 (void)acpi_hw_low_set_gpe(gpe_event_info, 735 ACPI_GPE_CONDITIONAL_ENABLE); 736 return_UINT32(ACPI_INTERRUPT_NOT_HANDLED); 737 } 738 } 739 740 /* 741 * Dispatch the GPE to either an installed handler or the control 742 * method associated with this GPE (_Lxx or _Exx). If a handler 743 * exists, we invoke it and do not attempt to run the method. 744 * If there is neither a handler nor a method, leave the GPE 745 * disabled. 746 */ 747 switch (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags)) { 748 case ACPI_GPE_DISPATCH_HANDLER: 749 750 /* Invoke the installed handler (at interrupt level) */ 751 752 return_value = 753 gpe_event_info->dispatch.handler->address(gpe_device, 754 gpe_number, 755 gpe_event_info-> 756 dispatch.handler-> 757 context); 758 759 /* If requested, clear (if level-triggered) and reenable the GPE */ 760 761 if (return_value & ACPI_REENABLE_GPE) { 762 (void)acpi_ev_finish_gpe(gpe_event_info); 763 } 764 break; 765 766 case ACPI_GPE_DISPATCH_METHOD: 767 case ACPI_GPE_DISPATCH_NOTIFY: 768 /* 769 * Execute the method associated with the GPE 770 * NOTE: Level-triggered GPEs are cleared after the method completes. 771 */ 772 status = acpi_os_execute(OSL_GPE_HANDLER, 773 acpi_ev_asynch_execute_gpe_method, 774 gpe_event_info); 775 if (ACPI_FAILURE(status)) { 776 ACPI_EXCEPTION((AE_INFO, status, 777 "Unable to queue handler for GPE %02X - event disabled", 778 gpe_number)); 779 } 780 break; 781 782 default: 783 /* 784 * No handler or method to run! 785 * 03/2010: This case should no longer be possible. We will not allow 786 * a GPE to be enabled if it has no handler or method. 787 */ 788 ACPI_ERROR((AE_INFO, 789 "No handler or method for GPE %02X, disabling event", 790 gpe_number)); 791 792 break; 793 } 794 795 return_UINT32(ACPI_INTERRUPT_HANDLED); 796 } 797 798 #endif /* !ACPI_REDUCED_HARDWARE */ 799