1 /****************************************************************************** 2 * 3 * Module Name: evgpeblk - GPE block creation and initialization. 4 * 5 *****************************************************************************/ 6 7 /* 8 * Copyright (C) 2000 - 2018, Intel Corp. 9 * All rights reserved. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions, and the following disclaimer, 16 * without modification. 17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 18 * substantially similar to the "NO WARRANTY" disclaimer below 19 * ("Disclaimer") and any redistribution must be conditioned upon 20 * including a substantially similar Disclaimer requirement for further 21 * binary redistribution. 22 * 3. Neither the names of the above-listed copyright holders nor the names 23 * of any contributors may be used to endorse or promote products derived 24 * from this software without specific prior written permission. 25 * 26 * Alternatively, this software may be distributed under the terms of the 27 * GNU General Public License ("GPL") version 2 as published by the Free 28 * Software Foundation. 29 * 30 * NO WARRANTY 31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 35 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 40 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 41 * POSSIBILITY OF SUCH DAMAGES. 42 */ 43 44 #include <acpi/acpi.h> 45 #include "accommon.h" 46 #include "acevents.h" 47 #include "acnamesp.h" 48 49 #define _COMPONENT ACPI_EVENTS 50 ACPI_MODULE_NAME("evgpeblk") 51 #if (!ACPI_REDUCED_HARDWARE) /* Entire module */ 52 /* Local prototypes */ 53 static acpi_status 54 acpi_ev_install_gpe_block(struct acpi_gpe_block_info *gpe_block, 55 u32 interrupt_number); 56 57 static acpi_status 58 acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block); 59 60 /******************************************************************************* 61 * 62 * FUNCTION: acpi_ev_install_gpe_block 63 * 64 * PARAMETERS: gpe_block - New GPE block 65 * interrupt_number - Xrupt to be associated with this 66 * GPE block 67 * 68 * RETURN: Status 69 * 70 * DESCRIPTION: Install new GPE block with mutex support 71 * 72 ******************************************************************************/ 73 74 static acpi_status 75 acpi_ev_install_gpe_block(struct acpi_gpe_block_info *gpe_block, 76 u32 interrupt_number) 77 { 78 struct acpi_gpe_block_info *next_gpe_block; 79 struct acpi_gpe_xrupt_info *gpe_xrupt_block; 80 acpi_status status; 81 acpi_cpu_flags flags; 82 83 ACPI_FUNCTION_TRACE(ev_install_gpe_block); 84 85 status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); 86 if (ACPI_FAILURE(status)) { 87 return_ACPI_STATUS(status); 88 } 89 90 status = 91 acpi_ev_get_gpe_xrupt_block(interrupt_number, &gpe_xrupt_block); 92 if (ACPI_FAILURE(status)) { 93 goto unlock_and_exit; 94 } 95 96 /* Install the new block at the end of the list with lock */ 97 98 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); 99 if (gpe_xrupt_block->gpe_block_list_head) { 100 next_gpe_block = gpe_xrupt_block->gpe_block_list_head; 101 while (next_gpe_block->next) { 102 next_gpe_block = next_gpe_block->next; 103 } 104 105 next_gpe_block->next = gpe_block; 106 gpe_block->previous = next_gpe_block; 107 } else { 108 gpe_xrupt_block->gpe_block_list_head = gpe_block; 109 } 110 111 gpe_block->xrupt_block = gpe_xrupt_block; 112 acpi_os_release_lock(acpi_gbl_gpe_lock, flags); 113 114 unlock_and_exit: 115 (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); 116 return_ACPI_STATUS(status); 117 } 118 119 /******************************************************************************* 120 * 121 * FUNCTION: acpi_ev_delete_gpe_block 122 * 123 * PARAMETERS: gpe_block - Existing GPE block 124 * 125 * RETURN: Status 126 * 127 * DESCRIPTION: Remove a GPE block 128 * 129 ******************************************************************************/ 130 131 acpi_status acpi_ev_delete_gpe_block(struct acpi_gpe_block_info *gpe_block) 132 { 133 acpi_status status; 134 acpi_cpu_flags flags; 135 136 ACPI_FUNCTION_TRACE(ev_install_gpe_block); 137 138 status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); 139 if (ACPI_FAILURE(status)) { 140 return_ACPI_STATUS(status); 141 } 142 143 /* Disable all GPEs in this block */ 144 145 status = 146 acpi_hw_disable_gpe_block(gpe_block->xrupt_block, gpe_block, NULL); 147 148 if (!gpe_block->previous && !gpe_block->next) { 149 150 /* This is the last gpe_block on this interrupt */ 151 152 status = acpi_ev_delete_gpe_xrupt(gpe_block->xrupt_block); 153 if (ACPI_FAILURE(status)) { 154 goto unlock_and_exit; 155 } 156 } else { 157 /* Remove the block on this interrupt with lock */ 158 159 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); 160 if (gpe_block->previous) { 161 gpe_block->previous->next = gpe_block->next; 162 } else { 163 gpe_block->xrupt_block->gpe_block_list_head = 164 gpe_block->next; 165 } 166 167 if (gpe_block->next) { 168 gpe_block->next->previous = gpe_block->previous; 169 } 170 171 acpi_os_release_lock(acpi_gbl_gpe_lock, flags); 172 } 173 174 acpi_current_gpe_count -= gpe_block->gpe_count; 175 176 /* Free the gpe_block */ 177 178 ACPI_FREE(gpe_block->register_info); 179 ACPI_FREE(gpe_block->event_info); 180 ACPI_FREE(gpe_block); 181 182 unlock_and_exit: 183 status = acpi_ut_release_mutex(ACPI_MTX_EVENTS); 184 return_ACPI_STATUS(status); 185 } 186 187 /******************************************************************************* 188 * 189 * FUNCTION: acpi_ev_create_gpe_info_blocks 190 * 191 * PARAMETERS: gpe_block - New GPE block 192 * 193 * RETURN: Status 194 * 195 * DESCRIPTION: Create the register_info and event_info blocks for this GPE block 196 * 197 ******************************************************************************/ 198 199 static acpi_status 200 acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block) 201 { 202 struct acpi_gpe_register_info *gpe_register_info = NULL; 203 struct acpi_gpe_event_info *gpe_event_info = NULL; 204 struct acpi_gpe_event_info *this_event; 205 struct acpi_gpe_register_info *this_register; 206 u32 i; 207 u32 j; 208 acpi_status status; 209 210 ACPI_FUNCTION_TRACE(ev_create_gpe_info_blocks); 211 212 /* Allocate the GPE register information block */ 213 214 gpe_register_info = ACPI_ALLOCATE_ZEROED((acpi_size)gpe_block-> 215 register_count * 216 sizeof(struct 217 acpi_gpe_register_info)); 218 if (!gpe_register_info) { 219 ACPI_ERROR((AE_INFO, 220 "Could not allocate the GpeRegisterInfo table")); 221 return_ACPI_STATUS(AE_NO_MEMORY); 222 } 223 224 /* 225 * Allocate the GPE event_info block. There are eight distinct GPEs 226 * per register. Initialization to zeros is sufficient. 227 */ 228 gpe_event_info = ACPI_ALLOCATE_ZEROED((acpi_size)gpe_block->gpe_count * 229 sizeof(struct 230 acpi_gpe_event_info)); 231 if (!gpe_event_info) { 232 ACPI_ERROR((AE_INFO, 233 "Could not allocate the GpeEventInfo table")); 234 status = AE_NO_MEMORY; 235 goto error_exit; 236 } 237 238 /* Save the new Info arrays in the GPE block */ 239 240 gpe_block->register_info = gpe_register_info; 241 gpe_block->event_info = gpe_event_info; 242 243 /* 244 * Initialize the GPE Register and Event structures. A goal of these 245 * tables is to hide the fact that there are two separate GPE register 246 * sets in a given GPE hardware block, the status registers occupy the 247 * first half, and the enable registers occupy the second half. 248 */ 249 this_register = gpe_register_info; 250 this_event = gpe_event_info; 251 252 for (i = 0; i < gpe_block->register_count; i++) { 253 254 /* Init the register_info for this GPE register (8 GPEs) */ 255 256 this_register->base_gpe_number = (u16) 257 (gpe_block->block_base_number + 258 (i * ACPI_GPE_REGISTER_WIDTH)); 259 260 this_register->status_address.address = gpe_block->address + i; 261 262 this_register->enable_address.address = 263 gpe_block->address + i + gpe_block->register_count; 264 265 this_register->status_address.space_id = gpe_block->space_id; 266 this_register->enable_address.space_id = gpe_block->space_id; 267 this_register->status_address.bit_width = 268 ACPI_GPE_REGISTER_WIDTH; 269 this_register->enable_address.bit_width = 270 ACPI_GPE_REGISTER_WIDTH; 271 this_register->status_address.bit_offset = 0; 272 this_register->enable_address.bit_offset = 0; 273 274 /* Init the event_info for each GPE within this register */ 275 276 for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) { 277 this_event->gpe_number = 278 (u8) (this_register->base_gpe_number + j); 279 this_event->register_info = this_register; 280 this_event++; 281 } 282 283 /* Disable all GPEs within this register */ 284 285 status = acpi_hw_write(0x00, &this_register->enable_address); 286 if (ACPI_FAILURE(status)) { 287 goto error_exit; 288 } 289 290 /* Clear any pending GPE events within this register */ 291 292 status = acpi_hw_write(0xFF, &this_register->status_address); 293 if (ACPI_FAILURE(status)) { 294 goto error_exit; 295 } 296 297 this_register++; 298 } 299 300 return_ACPI_STATUS(AE_OK); 301 302 error_exit: 303 if (gpe_register_info) { 304 ACPI_FREE(gpe_register_info); 305 } 306 if (gpe_event_info) { 307 ACPI_FREE(gpe_event_info); 308 } 309 310 return_ACPI_STATUS(status); 311 } 312 313 /******************************************************************************* 314 * 315 * FUNCTION: acpi_ev_create_gpe_block 316 * 317 * PARAMETERS: gpe_device - Handle to the parent GPE block 318 * gpe_block_address - Address and space_ID 319 * register_count - Number of GPE register pairs in the block 320 * gpe_block_base_number - Starting GPE number for the block 321 * interrupt_number - H/W interrupt for the block 322 * return_gpe_block - Where the new block descriptor is returned 323 * 324 * RETURN: Status 325 * 326 * DESCRIPTION: Create and Install a block of GPE registers. All GPEs within 327 * the block are disabled at exit. 328 * Note: Assumes namespace is locked. 329 * 330 ******************************************************************************/ 331 332 acpi_status 333 acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device, 334 u64 address, 335 u8 space_id, 336 u32 register_count, 337 u16 gpe_block_base_number, 338 u32 interrupt_number, 339 struct acpi_gpe_block_info **return_gpe_block) 340 { 341 acpi_status status; 342 struct acpi_gpe_block_info *gpe_block; 343 struct acpi_gpe_walk_info walk_info; 344 345 ACPI_FUNCTION_TRACE(ev_create_gpe_block); 346 347 if (!register_count) { 348 return_ACPI_STATUS(AE_OK); 349 } 350 351 /* Allocate a new GPE block */ 352 353 gpe_block = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_block_info)); 354 if (!gpe_block) { 355 return_ACPI_STATUS(AE_NO_MEMORY); 356 } 357 358 /* Initialize the new GPE block */ 359 360 gpe_block->address = address; 361 gpe_block->space_id = space_id; 362 gpe_block->node = gpe_device; 363 gpe_block->gpe_count = (u16)(register_count * ACPI_GPE_REGISTER_WIDTH); 364 gpe_block->initialized = FALSE; 365 gpe_block->register_count = register_count; 366 gpe_block->block_base_number = gpe_block_base_number; 367 368 /* 369 * Create the register_info and event_info sub-structures 370 * Note: disables and clears all GPEs in the block 371 */ 372 status = acpi_ev_create_gpe_info_blocks(gpe_block); 373 if (ACPI_FAILURE(status)) { 374 ACPI_FREE(gpe_block); 375 return_ACPI_STATUS(status); 376 } 377 378 /* Install the new block in the global lists */ 379 380 status = acpi_ev_install_gpe_block(gpe_block, interrupt_number); 381 if (ACPI_FAILURE(status)) { 382 ACPI_FREE(gpe_block->register_info); 383 ACPI_FREE(gpe_block->event_info); 384 ACPI_FREE(gpe_block); 385 return_ACPI_STATUS(status); 386 } 387 388 acpi_gbl_all_gpes_initialized = FALSE; 389 390 /* Find all GPE methods (_Lxx or_Exx) for this block */ 391 392 walk_info.gpe_block = gpe_block; 393 walk_info.gpe_device = gpe_device; 394 walk_info.execute_by_owner_id = FALSE; 395 396 status = acpi_ns_walk_namespace(ACPI_TYPE_METHOD, gpe_device, 397 ACPI_UINT32_MAX, ACPI_NS_WALK_NO_UNLOCK, 398 acpi_ev_match_gpe_method, NULL, 399 &walk_info, NULL); 400 401 /* Return the new block */ 402 403 if (return_gpe_block) { 404 (*return_gpe_block) = gpe_block; 405 } 406 407 ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, 408 " Initialized GPE %02X to %02X [%4.4s] %u regs on interrupt 0x%X%s\n", 409 (u32)gpe_block->block_base_number, 410 (u32)(gpe_block->block_base_number + 411 (gpe_block->gpe_count - 1)), 412 gpe_device->name.ascii, gpe_block->register_count, 413 interrupt_number, 414 interrupt_number == 415 acpi_gbl_FADT.sci_interrupt ? " (SCI)" : "")); 416 417 /* Update global count of currently available GPEs */ 418 419 acpi_current_gpe_count += gpe_block->gpe_count; 420 return_ACPI_STATUS(AE_OK); 421 } 422 423 /******************************************************************************* 424 * 425 * FUNCTION: acpi_ev_initialize_gpe_block 426 * 427 * PARAMETERS: acpi_gpe_callback 428 * 429 * RETURN: Status 430 * 431 * DESCRIPTION: Initialize and enable a GPE block. Enable GPEs that have 432 * associated methods. 433 * Note: Assumes namespace is locked. 434 * 435 ******************************************************************************/ 436 437 acpi_status 438 acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, 439 struct acpi_gpe_block_info *gpe_block, 440 void *ignored) 441 { 442 acpi_status status; 443 acpi_event_status event_status; 444 struct acpi_gpe_event_info *gpe_event_info; 445 u32 gpe_enabled_count; 446 u32 gpe_index; 447 u32 gpe_number; 448 u32 i; 449 u32 j; 450 451 ACPI_FUNCTION_TRACE(ev_initialize_gpe_block); 452 453 /* 454 * Ignore a null GPE block (e.g., if no GPE block 1 exists), and 455 * any GPE blocks that have been initialized already. 456 */ 457 if (!gpe_block || gpe_block->initialized) { 458 return_ACPI_STATUS(AE_OK); 459 } 460 461 /* 462 * Enable all GPEs that have a corresponding method and have the 463 * ACPI_GPE_CAN_WAKE flag unset. Any other GPEs within this block 464 * must be enabled via the acpi_enable_gpe() interface. 465 */ 466 gpe_enabled_count = 0; 467 468 for (i = 0; i < gpe_block->register_count; i++) { 469 for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) { 470 471 /* Get the info block for this particular GPE */ 472 473 gpe_index = (i * ACPI_GPE_REGISTER_WIDTH) + j; 474 gpe_event_info = &gpe_block->event_info[gpe_index]; 475 gpe_number = gpe_block->block_base_number + gpe_index; 476 477 /* 478 * Ignore GPEs that have no corresponding _Lxx/_Exx method 479 * and GPEs that are used for wakeup 480 */ 481 if ((ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) != 482 ACPI_GPE_DISPATCH_METHOD) 483 || (gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) { 484 continue; 485 } 486 487 event_status = 0; 488 (void)acpi_hw_get_gpe_status(gpe_event_info, 489 &event_status); 490 491 status = acpi_ev_add_gpe_reference(gpe_event_info); 492 if (ACPI_FAILURE(status)) { 493 ACPI_EXCEPTION((AE_INFO, status, 494 "Could not enable GPE 0x%02X", 495 gpe_number)); 496 continue; 497 } 498 499 gpe_event_info->flags |= ACPI_GPE_AUTO_ENABLED; 500 501 if (event_status & ACPI_EVENT_FLAG_STATUS_SET) { 502 ACPI_INFO(("GPE 0x%02X active on init", 503 gpe_number)); 504 (void)acpi_ev_gpe_dispatch(gpe_block->node, 505 gpe_event_info, 506 gpe_number); 507 } 508 509 gpe_enabled_count++; 510 } 511 } 512 513 if (gpe_enabled_count) { 514 ACPI_INFO(("Enabled %u GPEs in block %02X to %02X", 515 gpe_enabled_count, (u32)gpe_block->block_base_number, 516 (u32)(gpe_block->block_base_number + 517 (gpe_block->gpe_count - 1)))); 518 } 519 520 gpe_block->initialized = TRUE; 521 522 return_ACPI_STATUS(AE_OK); 523 } 524 525 #endif /* !ACPI_REDUCED_HARDWARE */ 526