1 /****************************************************************************** 2 * 3 * Module Name: evgpe - General Purpose Event handling and dispatch 4 * 5 *****************************************************************************/ 6 7 /* 8 * Copyright (C) 2000 - 2010, Intel Corp. 9 * All rights reserved. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions, and the following disclaimer, 16 * without modification. 17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 18 * substantially similar to the "NO WARRANTY" disclaimer below 19 * ("Disclaimer") and any redistribution must be conditioned upon 20 * including a substantially similar Disclaimer requirement for further 21 * binary redistribution. 22 * 3. Neither the names of the above-listed copyright holders nor the names 23 * of any contributors may be used to endorse or promote products derived 24 * from this software without specific prior written permission. 25 * 26 * Alternatively, this software may be distributed under the terms of the 27 * GNU General Public License ("GPL") version 2 as published by the Free 28 * Software Foundation. 29 * 30 * NO WARRANTY 31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 35 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 40 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 41 * POSSIBILITY OF SUCH DAMAGES. 42 */ 43 44 #include <acpi/acpi.h> 45 #include "accommon.h" 46 #include "acevents.h" 47 #include "acnamesp.h" 48 49 #define _COMPONENT ACPI_EVENTS 50 ACPI_MODULE_NAME("evgpe") 51 52 /* Local prototypes */ 53 static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context); 54 55 /******************************************************************************* 56 * 57 * FUNCTION: acpi_ev_update_gpe_enable_mask 58 * 59 * PARAMETERS: gpe_event_info - GPE to update 60 * 61 * RETURN: Status 62 * 63 * DESCRIPTION: Updates GPE register enable mask based upon whether there are 64 * runtime references to this GPE 65 * 66 ******************************************************************************/ 67 68 acpi_status 69 acpi_ev_update_gpe_enable_mask(struct acpi_gpe_event_info *gpe_event_info) 70 { 71 struct acpi_gpe_register_info *gpe_register_info; 72 u32 register_bit; 73 74 ACPI_FUNCTION_TRACE(ev_update_gpe_enable_mask); 75 76 gpe_register_info = gpe_event_info->register_info; 77 if (!gpe_register_info) { 78 return_ACPI_STATUS(AE_NOT_EXIST); 79 } 80 81 register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info, 82 gpe_register_info); 83 84 /* Clear the run bit up front */ 85 86 ACPI_CLEAR_BIT(gpe_register_info->enable_for_run, register_bit); 87 88 /* Set the mask bit only if there are references to this GPE */ 89 90 if (gpe_event_info->runtime_count) { 91 ACPI_SET_BIT(gpe_register_info->enable_for_run, (u8)register_bit); 92 } 93 94 return_ACPI_STATUS(AE_OK); 95 } 96 97 /******************************************************************************* 98 * 99 * FUNCTION: acpi_ev_enable_gpe 100 * 101 * PARAMETERS: gpe_event_info - GPE to enable 102 * 103 * RETURN: Status 104 * 105 * DESCRIPTION: Clear the given GPE from stale events and enable it. 106 * 107 ******************************************************************************/ 108 acpi_status 109 acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info) 110 { 111 acpi_status status; 112 113 ACPI_FUNCTION_TRACE(ev_enable_gpe); 114 115 /* 116 * We will only allow a GPE to be enabled if it has either an 117 * associated method (_Lxx/_Exx) or a handler. Otherwise, the 118 * GPE will be immediately disabled by acpi_ev_gpe_dispatch the 119 * first time it fires. 120 */ 121 if (!(gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK)) { 122 return_ACPI_STATUS(AE_NO_HANDLER); 123 } 124 125 /* Clear the GPE (of stale events) */ 126 status = acpi_hw_clear_gpe(gpe_event_info); 127 if (ACPI_FAILURE(status)) { 128 return_ACPI_STATUS(status); 129 } 130 131 /* Enable the requested GPE */ 132 status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE); 133 134 return_ACPI_STATUS(status); 135 } 136 137 138 /******************************************************************************* 139 * 140 * FUNCTION: acpi_raw_enable_gpe 141 * 142 * PARAMETERS: gpe_event_info - GPE to enable 143 * 144 * RETURN: Status 145 * 146 * DESCRIPTION: Add a reference to a GPE. On the first reference, the GPE is 147 * hardware-enabled. 148 * 149 ******************************************************************************/ 150 151 acpi_status acpi_raw_enable_gpe(struct acpi_gpe_event_info *gpe_event_info) 152 { 153 acpi_status status = AE_OK; 154 155 if (gpe_event_info->runtime_count == ACPI_UINT8_MAX) { 156 return_ACPI_STATUS(AE_LIMIT); 157 } 158 159 gpe_event_info->runtime_count++; 160 if (gpe_event_info->runtime_count == 1) { 161 status = acpi_ev_update_gpe_enable_mask(gpe_event_info); 162 if (ACPI_SUCCESS(status)) { 163 status = acpi_ev_enable_gpe(gpe_event_info); 164 } 165 166 if (ACPI_FAILURE(status)) { 167 gpe_event_info->runtime_count--; 168 } 169 } 170 171 return_ACPI_STATUS(status); 172 } 173 174 /******************************************************************************* 175 * 176 * FUNCTION: acpi_raw_disable_gpe 177 * 178 * PARAMETERS: gpe_event_info - GPE to disable 179 * 180 * RETURN: Status 181 * 182 * DESCRIPTION: Remove a reference to a GPE. When the last reference is 183 * removed, the GPE is hardware-disabled. 184 * 185 ******************************************************************************/ 186 187 acpi_status acpi_raw_disable_gpe(struct acpi_gpe_event_info *gpe_event_info) 188 { 189 acpi_status status = AE_OK; 190 191 if (!gpe_event_info->runtime_count) { 192 return_ACPI_STATUS(AE_LIMIT); 193 } 194 195 gpe_event_info->runtime_count--; 196 if (!gpe_event_info->runtime_count) { 197 status = acpi_ev_update_gpe_enable_mask(gpe_event_info); 198 if (ACPI_SUCCESS(status)) { 199 status = acpi_hw_low_set_gpe(gpe_event_info, 200 ACPI_GPE_DISABLE); 201 } 202 203 if (ACPI_FAILURE(status)) { 204 gpe_event_info->runtime_count++; 205 } 206 } 207 208 return_ACPI_STATUS(status); 209 } 210 211 /******************************************************************************* 212 * 213 * FUNCTION: acpi_ev_low_get_gpe_info 214 * 215 * PARAMETERS: gpe_number - Raw GPE number 216 * gpe_block - A GPE info block 217 * 218 * RETURN: A GPE event_info struct. NULL if not a valid GPE (The gpe_number 219 * is not within the specified GPE block) 220 * 221 * DESCRIPTION: Returns the event_info struct associated with this GPE. This is 222 * the low-level implementation of ev_get_gpe_event_info. 223 * 224 ******************************************************************************/ 225 226 struct acpi_gpe_event_info *acpi_ev_low_get_gpe_info(u32 gpe_number, 227 struct acpi_gpe_block_info 228 *gpe_block) 229 { 230 u32 gpe_index; 231 232 /* 233 * Validate that the gpe_number is within the specified gpe_block. 234 * (Two steps) 235 */ 236 if (!gpe_block || (gpe_number < gpe_block->block_base_number)) { 237 return (NULL); 238 } 239 240 gpe_index = gpe_number - gpe_block->block_base_number; 241 if (gpe_index >= gpe_block->gpe_count) { 242 return (NULL); 243 } 244 245 return (&gpe_block->event_info[gpe_index]); 246 } 247 248 249 /******************************************************************************* 250 * 251 * FUNCTION: acpi_ev_get_gpe_event_info 252 * 253 * PARAMETERS: gpe_device - Device node. NULL for GPE0/GPE1 254 * gpe_number - Raw GPE number 255 * 256 * RETURN: A GPE event_info struct. NULL if not a valid GPE 257 * 258 * DESCRIPTION: Returns the event_info struct associated with this GPE. 259 * Validates the gpe_block and the gpe_number 260 * 261 * Should be called only when the GPE lists are semaphore locked 262 * and not subject to change. 263 * 264 ******************************************************************************/ 265 266 struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device, 267 u32 gpe_number) 268 { 269 union acpi_operand_object *obj_desc; 270 struct acpi_gpe_event_info *gpe_info; 271 u32 i; 272 273 ACPI_FUNCTION_ENTRY(); 274 275 /* A NULL gpe_device means use the FADT-defined GPE block(s) */ 276 277 if (!gpe_device) { 278 279 /* Examine GPE Block 0 and 1 (These blocks are permanent) */ 280 281 for (i = 0; i < ACPI_MAX_GPE_BLOCKS; i++) { 282 gpe_info = acpi_ev_low_get_gpe_info(gpe_number, 283 acpi_gbl_gpe_fadt_blocks 284 [i]); 285 if (gpe_info) { 286 return (gpe_info); 287 } 288 } 289 290 /* The gpe_number was not in the range of either FADT GPE block */ 291 292 return (NULL); 293 } 294 295 /* A Non-NULL gpe_device means this is a GPE Block Device */ 296 297 obj_desc = acpi_ns_get_attached_object((struct acpi_namespace_node *) 298 gpe_device); 299 if (!obj_desc || !obj_desc->device.gpe_block) { 300 return (NULL); 301 } 302 303 return (acpi_ev_low_get_gpe_info 304 (gpe_number, obj_desc->device.gpe_block)); 305 } 306 307 /******************************************************************************* 308 * 309 * FUNCTION: acpi_ev_gpe_detect 310 * 311 * PARAMETERS: gpe_xrupt_list - Interrupt block for this interrupt. 312 * Can have multiple GPE blocks attached. 313 * 314 * RETURN: INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED 315 * 316 * DESCRIPTION: Detect if any GP events have occurred. This function is 317 * executed at interrupt level. 318 * 319 ******************************************************************************/ 320 321 u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list) 322 { 323 acpi_status status; 324 struct acpi_gpe_block_info *gpe_block; 325 struct acpi_gpe_register_info *gpe_register_info; 326 u32 int_status = ACPI_INTERRUPT_NOT_HANDLED; 327 u8 enabled_status_byte; 328 u32 status_reg; 329 u32 enable_reg; 330 acpi_cpu_flags flags; 331 u32 i; 332 u32 j; 333 334 ACPI_FUNCTION_NAME(ev_gpe_detect); 335 336 /* Check for the case where there are no GPEs */ 337 338 if (!gpe_xrupt_list) { 339 return (int_status); 340 } 341 342 /* 343 * We need to obtain the GPE lock for both the data structs and registers 344 * Note: Not necessary to obtain the hardware lock, since the GPE 345 * registers are owned by the gpe_lock. 346 */ 347 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); 348 349 /* Examine all GPE blocks attached to this interrupt level */ 350 351 gpe_block = gpe_xrupt_list->gpe_block_list_head; 352 while (gpe_block) { 353 /* 354 * Read all of the 8-bit GPE status and enable registers in this GPE 355 * block, saving all of them. Find all currently active GP events. 356 */ 357 for (i = 0; i < gpe_block->register_count; i++) { 358 359 /* Get the next status/enable pair */ 360 361 gpe_register_info = &gpe_block->register_info[i]; 362 363 /* Read the Status Register */ 364 365 status = 366 acpi_hw_read(&status_reg, 367 &gpe_register_info->status_address); 368 if (ACPI_FAILURE(status)) { 369 goto unlock_and_exit; 370 } 371 372 /* Read the Enable Register */ 373 374 status = 375 acpi_hw_read(&enable_reg, 376 &gpe_register_info->enable_address); 377 if (ACPI_FAILURE(status)) { 378 goto unlock_and_exit; 379 } 380 381 ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS, 382 "Read GPE Register at GPE%X: Status=%02X, Enable=%02X\n", 383 gpe_register_info->base_gpe_number, 384 status_reg, enable_reg)); 385 386 /* Check if there is anything active at all in this register */ 387 388 enabled_status_byte = (u8) (status_reg & enable_reg); 389 if (!enabled_status_byte) { 390 391 /* No active GPEs in this register, move on */ 392 393 continue; 394 } 395 396 /* Now look at the individual GPEs in this byte register */ 397 398 for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) { 399 400 /* Examine one GPE bit */ 401 402 if (enabled_status_byte & (1 << j)) { 403 /* 404 * Found an active GPE. Dispatch the event to a handler 405 * or method. 406 */ 407 int_status |= 408 acpi_ev_gpe_dispatch(&gpe_block-> 409 event_info[((acpi_size) i * ACPI_GPE_REGISTER_WIDTH) + j], j + gpe_register_info->base_gpe_number); 410 } 411 } 412 } 413 414 gpe_block = gpe_block->next; 415 } 416 417 unlock_and_exit: 418 419 acpi_os_release_lock(acpi_gbl_gpe_lock, flags); 420 return (int_status); 421 } 422 423 /******************************************************************************* 424 * 425 * FUNCTION: acpi_ev_asynch_execute_gpe_method 426 * 427 * PARAMETERS: Context (gpe_event_info) - Info for this GPE 428 * 429 * RETURN: None 430 * 431 * DESCRIPTION: Perform the actual execution of a GPE control method. This 432 * function is called from an invocation of acpi_os_execute and 433 * therefore does NOT execute at interrupt level - so that 434 * the control method itself is not executed in the context of 435 * an interrupt handler. 436 * 437 ******************************************************************************/ 438 static void acpi_ev_asynch_enable_gpe(void *context); 439 440 static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context) 441 { 442 struct acpi_gpe_event_info *gpe_event_info = (void *)context; 443 acpi_status status; 444 struct acpi_gpe_event_info local_gpe_event_info; 445 struct acpi_evaluate_info *info; 446 447 ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method); 448 449 status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); 450 if (ACPI_FAILURE(status)) { 451 return_VOID; 452 } 453 454 /* Must revalidate the gpe_number/gpe_block */ 455 456 if (!acpi_ev_valid_gpe_event(gpe_event_info)) { 457 status = acpi_ut_release_mutex(ACPI_MTX_EVENTS); 458 return_VOID; 459 } 460 461 /* 462 * Take a snapshot of the GPE info for this level - we copy the info to 463 * prevent a race condition with remove_handler/remove_block. 464 */ 465 ACPI_MEMCPY(&local_gpe_event_info, gpe_event_info, 466 sizeof(struct acpi_gpe_event_info)); 467 468 status = acpi_ut_release_mutex(ACPI_MTX_EVENTS); 469 if (ACPI_FAILURE(status)) { 470 return_VOID; 471 } 472 473 /* 474 * Must check for control method type dispatch one more time to avoid a 475 * race with ev_gpe_install_handler 476 */ 477 if ((local_gpe_event_info.flags & ACPI_GPE_DISPATCH_MASK) == 478 ACPI_GPE_DISPATCH_METHOD) { 479 480 /* Allocate the evaluation information block */ 481 482 info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info)); 483 if (!info) { 484 status = AE_NO_MEMORY; 485 } else { 486 /* 487 * Invoke the GPE Method (_Lxx, _Exx) i.e., evaluate the _Lxx/_Exx 488 * control method that corresponds to this GPE 489 */ 490 info->prefix_node = 491 local_gpe_event_info.dispatch.method_node; 492 info->flags = ACPI_IGNORE_RETURN_VALUE; 493 494 status = acpi_ns_evaluate(info); 495 ACPI_FREE(info); 496 } 497 498 if (ACPI_FAILURE(status)) { 499 ACPI_EXCEPTION((AE_INFO, status, 500 "while evaluating GPE method [%4.4s]", 501 acpi_ut_get_node_name 502 (local_gpe_event_info.dispatch. 503 method_node))); 504 } 505 } 506 /* Defer enabling of GPE until all notify handlers are done */ 507 acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_ev_asynch_enable_gpe, 508 gpe_event_info); 509 return_VOID; 510 } 511 512 static void acpi_ev_asynch_enable_gpe(void *context) 513 { 514 struct acpi_gpe_event_info *gpe_event_info = context; 515 acpi_status status; 516 if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) == 517 ACPI_GPE_LEVEL_TRIGGERED) { 518 /* 519 * GPE is level-triggered, we clear the GPE status bit after handling 520 * the event. 521 */ 522 status = acpi_hw_clear_gpe(gpe_event_info); 523 if (ACPI_FAILURE(status)) { 524 return_VOID; 525 } 526 } 527 528 /* 529 * Enable this GPE, conditionally. This means that the GPE will only be 530 * physically enabled if the enable_for_run bit is set in the event_info 531 */ 532 (void)acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_COND_ENABLE); 533 534 return_VOID; 535 } 536 537 /******************************************************************************* 538 * 539 * FUNCTION: acpi_ev_gpe_dispatch 540 * 541 * PARAMETERS: gpe_event_info - Info for this GPE 542 * gpe_number - Number relative to the parent GPE block 543 * 544 * RETURN: INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED 545 * 546 * DESCRIPTION: Dispatch a General Purpose Event to either a function (e.g. EC) 547 * or method (e.g. _Lxx/_Exx) handler. 548 * 549 * This function executes at interrupt level. 550 * 551 ******************************************************************************/ 552 553 u32 554 acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number) 555 { 556 acpi_status status; 557 558 ACPI_FUNCTION_TRACE(ev_gpe_dispatch); 559 560 acpi_os_gpe_count(gpe_number); 561 562 /* 563 * If edge-triggered, clear the GPE status bit now. Note that 564 * level-triggered events are cleared after the GPE is serviced. 565 */ 566 if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) == 567 ACPI_GPE_EDGE_TRIGGERED) { 568 status = acpi_hw_clear_gpe(gpe_event_info); 569 if (ACPI_FAILURE(status)) { 570 ACPI_EXCEPTION((AE_INFO, status, 571 "Unable to clear GPE[0x%2X]", 572 gpe_number)); 573 return_UINT32(ACPI_INTERRUPT_NOT_HANDLED); 574 } 575 } 576 577 /* 578 * Dispatch the GPE to either an installed handler, or the control method 579 * associated with this GPE (_Lxx or _Exx). If a handler exists, we invoke 580 * it and do not attempt to run the method. If there is neither a handler 581 * nor a method, we disable this GPE to prevent further such pointless 582 * events from firing. 583 */ 584 switch (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) { 585 case ACPI_GPE_DISPATCH_HANDLER: 586 587 /* 588 * Invoke the installed handler (at interrupt level) 589 * Ignore return status for now. 590 * TBD: leave GPE disabled on error? 591 */ 592 (void)gpe_event_info->dispatch.handler->address(gpe_event_info-> 593 dispatch. 594 handler-> 595 context); 596 597 /* It is now safe to clear level-triggered events. */ 598 599 if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) == 600 ACPI_GPE_LEVEL_TRIGGERED) { 601 status = acpi_hw_clear_gpe(gpe_event_info); 602 if (ACPI_FAILURE(status)) { 603 ACPI_EXCEPTION((AE_INFO, status, 604 "Unable to clear GPE[0x%2X]", 605 gpe_number)); 606 return_UINT32(ACPI_INTERRUPT_NOT_HANDLED); 607 } 608 } 609 break; 610 611 case ACPI_GPE_DISPATCH_METHOD: 612 613 /* 614 * Disable the GPE, so it doesn't keep firing before the method has a 615 * chance to run (it runs asynchronously with interrupts enabled). 616 */ 617 status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE); 618 if (ACPI_FAILURE(status)) { 619 ACPI_EXCEPTION((AE_INFO, status, 620 "Unable to disable GPE[0x%2X]", 621 gpe_number)); 622 return_UINT32(ACPI_INTERRUPT_NOT_HANDLED); 623 } 624 625 /* 626 * Execute the method associated with the GPE 627 * NOTE: Level-triggered GPEs are cleared after the method completes. 628 */ 629 status = acpi_os_execute(OSL_GPE_HANDLER, 630 acpi_ev_asynch_execute_gpe_method, 631 gpe_event_info); 632 if (ACPI_FAILURE(status)) { 633 ACPI_EXCEPTION((AE_INFO, status, 634 "Unable to queue handler for GPE[0x%2X] - event disabled", 635 gpe_number)); 636 } 637 break; 638 639 default: 640 641 /* 642 * No handler or method to run! 643 * 03/2010: This case should no longer be possible. We will not allow 644 * a GPE to be enabled if it has no handler or method. 645 */ 646 ACPI_ERROR((AE_INFO, 647 "No handler or method for GPE[0x%2X], disabling event", 648 gpe_number)); 649 650 /* 651 * Disable the GPE. The GPE will remain disabled a handler 652 * is installed or ACPICA is restarted. 653 */ 654 status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE); 655 if (ACPI_FAILURE(status)) { 656 ACPI_EXCEPTION((AE_INFO, status, 657 "Unable to disable GPE[0x%2X]", 658 gpe_number)); 659 return_UINT32(ACPI_INTERRUPT_NOT_HANDLED); 660 } 661 break; 662 } 663 664 return_UINT32(ACPI_INTERRUPT_HANDLED); 665 } 666