1 /****************************************************************************** 2 * 3 * Module Name: dsmethod - Parser/Interpreter interface - control method parsing 4 * 5 *****************************************************************************/ 6 7 /* 8 * Copyright (C) 2000 - 2013, Intel Corp. 9 * All rights reserved. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions, and the following disclaimer, 16 * without modification. 17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 18 * substantially similar to the "NO WARRANTY" disclaimer below 19 * ("Disclaimer") and any redistribution must be conditioned upon 20 * including a substantially similar Disclaimer requirement for further 21 * binary redistribution. 22 * 3. Neither the names of the above-listed copyright holders nor the names 23 * of any contributors may be used to endorse or promote products derived 24 * from this software without specific prior written permission. 25 * 26 * Alternatively, this software may be distributed under the terms of the 27 * GNU General Public License ("GPL") version 2 as published by the Free 28 * Software Foundation. 29 * 30 * NO WARRANTY 31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 35 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 40 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 41 * POSSIBILITY OF SUCH DAMAGES. 42 */ 43 44 #include <acpi/acpi.h> 45 #include "accommon.h" 46 #include "acdispat.h" 47 #include "acinterp.h" 48 #include "acnamesp.h" 49 #ifdef ACPI_DISASSEMBLER 50 #include "acdisasm.h" 51 #endif 52 53 #define _COMPONENT ACPI_DISPATCHER 54 ACPI_MODULE_NAME("dsmethod") 55 56 /* Local prototypes */ 57 static acpi_status 58 acpi_ds_create_method_mutex(union acpi_operand_object *method_desc); 59 60 /******************************************************************************* 61 * 62 * FUNCTION: acpi_ds_method_error 63 * 64 * PARAMETERS: status - Execution status 65 * walk_state - Current state 66 * 67 * RETURN: Status 68 * 69 * DESCRIPTION: Called on method error. Invoke the global exception handler if 70 * present, dump the method data if the disassembler is configured 71 * 72 * Note: Allows the exception handler to change the status code 73 * 74 ******************************************************************************/ 75 76 acpi_status 77 acpi_ds_method_error(acpi_status status, struct acpi_walk_state *walk_state) 78 { 79 ACPI_FUNCTION_ENTRY(); 80 81 /* Ignore AE_OK and control exception codes */ 82 83 if (ACPI_SUCCESS(status) || (status & AE_CODE_CONTROL)) { 84 return (status); 85 } 86 87 /* Invoke the global exception handler */ 88 89 if (acpi_gbl_exception_handler) { 90 91 /* Exit the interpreter, allow handler to execute methods */ 92 93 acpi_ex_exit_interpreter(); 94 95 /* 96 * Handler can map the exception code to anything it wants, including 97 * AE_OK, in which case the executing method will not be aborted. 98 */ 99 status = acpi_gbl_exception_handler(status, 100 walk_state->method_node ? 101 walk_state->method_node-> 102 name.integer : 0, 103 walk_state->opcode, 104 walk_state->aml_offset, 105 NULL); 106 acpi_ex_enter_interpreter(); 107 } 108 109 acpi_ds_clear_implicit_return(walk_state); 110 111 #ifdef ACPI_DISASSEMBLER 112 if (ACPI_FAILURE(status)) { 113 114 /* Display method locals/args if disassembler is present */ 115 116 acpi_dm_dump_method_info(status, walk_state, walk_state->op); 117 } 118 #endif 119 120 return (status); 121 } 122 123 /******************************************************************************* 124 * 125 * FUNCTION: acpi_ds_create_method_mutex 126 * 127 * PARAMETERS: obj_desc - The method object 128 * 129 * RETURN: Status 130 * 131 * DESCRIPTION: Create a mutex object for a serialized control method 132 * 133 ******************************************************************************/ 134 135 static acpi_status 136 acpi_ds_create_method_mutex(union acpi_operand_object *method_desc) 137 { 138 union acpi_operand_object *mutex_desc; 139 acpi_status status; 140 141 ACPI_FUNCTION_TRACE(ds_create_method_mutex); 142 143 /* Create the new mutex object */ 144 145 mutex_desc = acpi_ut_create_internal_object(ACPI_TYPE_MUTEX); 146 if (!mutex_desc) { 147 return_ACPI_STATUS(AE_NO_MEMORY); 148 } 149 150 /* Create the actual OS Mutex */ 151 152 status = acpi_os_create_mutex(&mutex_desc->mutex.os_mutex); 153 if (ACPI_FAILURE(status)) { 154 acpi_ut_delete_object_desc(mutex_desc); 155 return_ACPI_STATUS(status); 156 } 157 158 mutex_desc->mutex.sync_level = method_desc->method.sync_level; 159 method_desc->method.mutex = mutex_desc; 160 return_ACPI_STATUS(AE_OK); 161 } 162 163 /******************************************************************************* 164 * 165 * FUNCTION: acpi_ds_begin_method_execution 166 * 167 * PARAMETERS: method_node - Node of the method 168 * obj_desc - The method object 169 * walk_state - current state, NULL if not yet executing 170 * a method. 171 * 172 * RETURN: Status 173 * 174 * DESCRIPTION: Prepare a method for execution. Parses the method if necessary, 175 * increments the thread count, and waits at the method semaphore 176 * for clearance to execute. 177 * 178 ******************************************************************************/ 179 180 acpi_status 181 acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node, 182 union acpi_operand_object *obj_desc, 183 struct acpi_walk_state *walk_state) 184 { 185 acpi_status status = AE_OK; 186 187 ACPI_FUNCTION_TRACE_PTR(ds_begin_method_execution, method_node); 188 189 if (!method_node) { 190 return_ACPI_STATUS(AE_NULL_ENTRY); 191 } 192 193 /* Prevent wraparound of thread count */ 194 195 if (obj_desc->method.thread_count == ACPI_UINT8_MAX) { 196 ACPI_ERROR((AE_INFO, 197 "Method reached maximum reentrancy limit (255)")); 198 return_ACPI_STATUS(AE_AML_METHOD_LIMIT); 199 } 200 201 /* 202 * If this method is serialized, we need to acquire the method mutex. 203 */ 204 if (obj_desc->method.info_flags & ACPI_METHOD_SERIALIZED) { 205 /* 206 * Create a mutex for the method if it is defined to be Serialized 207 * and a mutex has not already been created. We defer the mutex creation 208 * until a method is actually executed, to minimize the object count 209 */ 210 if (!obj_desc->method.mutex) { 211 status = acpi_ds_create_method_mutex(obj_desc); 212 if (ACPI_FAILURE(status)) { 213 return_ACPI_STATUS(status); 214 } 215 } 216 217 /* 218 * The current_sync_level (per-thread) must be less than or equal to 219 * the sync level of the method. This mechanism provides some 220 * deadlock prevention 221 * 222 * Top-level method invocation has no walk state at this point 223 */ 224 if (walk_state && 225 (walk_state->thread->current_sync_level > 226 obj_desc->method.mutex->mutex.sync_level)) { 227 ACPI_ERROR((AE_INFO, 228 "Cannot acquire Mutex for method [%4.4s], current SyncLevel is too large (%u)", 229 acpi_ut_get_node_name(method_node), 230 walk_state->thread->current_sync_level)); 231 232 return_ACPI_STATUS(AE_AML_MUTEX_ORDER); 233 } 234 235 /* 236 * Obtain the method mutex if necessary. Do not acquire mutex for a 237 * recursive call. 238 */ 239 if (!walk_state || 240 !obj_desc->method.mutex->mutex.thread_id || 241 (walk_state->thread->thread_id != 242 obj_desc->method.mutex->mutex.thread_id)) { 243 /* 244 * Acquire the method mutex. This releases the interpreter if we 245 * block (and reacquires it before it returns) 246 */ 247 status = 248 acpi_ex_system_wait_mutex(obj_desc->method.mutex-> 249 mutex.os_mutex, 250 ACPI_WAIT_FOREVER); 251 if (ACPI_FAILURE(status)) { 252 return_ACPI_STATUS(status); 253 } 254 255 /* Update the mutex and walk info and save the original sync_level */ 256 257 if (walk_state) { 258 obj_desc->method.mutex->mutex. 259 original_sync_level = 260 walk_state->thread->current_sync_level; 261 262 obj_desc->method.mutex->mutex.thread_id = 263 walk_state->thread->thread_id; 264 walk_state->thread->current_sync_level = 265 obj_desc->method.sync_level; 266 } else { 267 obj_desc->method.mutex->mutex. 268 original_sync_level = 269 obj_desc->method.mutex->mutex.sync_level; 270 } 271 } 272 273 /* Always increase acquisition depth */ 274 275 obj_desc->method.mutex->mutex.acquisition_depth++; 276 } 277 278 /* 279 * Allocate an Owner ID for this method, only if this is the first thread 280 * to begin concurrent execution. We only need one owner_id, even if the 281 * method is invoked recursively. 282 */ 283 if (!obj_desc->method.owner_id) { 284 status = acpi_ut_allocate_owner_id(&obj_desc->method.owner_id); 285 if (ACPI_FAILURE(status)) { 286 goto cleanup; 287 } 288 } 289 290 /* 291 * Increment the method parse tree thread count since it has been 292 * reentered one more time (even if it is the same thread) 293 */ 294 obj_desc->method.thread_count++; 295 acpi_method_count++; 296 return_ACPI_STATUS(status); 297 298 cleanup: 299 /* On error, must release the method mutex (if present) */ 300 301 if (obj_desc->method.mutex) { 302 acpi_os_release_mutex(obj_desc->method.mutex->mutex.os_mutex); 303 } 304 return_ACPI_STATUS(status); 305 } 306 307 /******************************************************************************* 308 * 309 * FUNCTION: acpi_ds_call_control_method 310 * 311 * PARAMETERS: thread - Info for this thread 312 * this_walk_state - Current walk state 313 * op - Current Op to be walked 314 * 315 * RETURN: Status 316 * 317 * DESCRIPTION: Transfer execution to a called control method 318 * 319 ******************************************************************************/ 320 321 acpi_status 322 acpi_ds_call_control_method(struct acpi_thread_state *thread, 323 struct acpi_walk_state *this_walk_state, 324 union acpi_parse_object *op) 325 { 326 acpi_status status; 327 struct acpi_namespace_node *method_node; 328 struct acpi_walk_state *next_walk_state = NULL; 329 union acpi_operand_object *obj_desc; 330 struct acpi_evaluate_info *info; 331 u32 i; 332 333 ACPI_FUNCTION_TRACE_PTR(ds_call_control_method, this_walk_state); 334 335 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, 336 "Calling method %p, currentstate=%p\n", 337 this_walk_state->prev_op, this_walk_state)); 338 339 /* 340 * Get the namespace entry for the control method we are about to call 341 */ 342 method_node = this_walk_state->method_call_node; 343 if (!method_node) { 344 return_ACPI_STATUS(AE_NULL_ENTRY); 345 } 346 347 obj_desc = acpi_ns_get_attached_object(method_node); 348 if (!obj_desc) { 349 return_ACPI_STATUS(AE_NULL_OBJECT); 350 } 351 352 /* Init for new method, possibly wait on method mutex */ 353 354 status = acpi_ds_begin_method_execution(method_node, obj_desc, 355 this_walk_state); 356 if (ACPI_FAILURE(status)) { 357 return_ACPI_STATUS(status); 358 } 359 360 /* Begin method parse/execution. Create a new walk state */ 361 362 next_walk_state = acpi_ds_create_walk_state(obj_desc->method.owner_id, 363 NULL, obj_desc, thread); 364 if (!next_walk_state) { 365 status = AE_NO_MEMORY; 366 goto cleanup; 367 } 368 369 /* 370 * The resolved arguments were put on the previous walk state's operand 371 * stack. Operands on the previous walk state stack always 372 * start at index 0. Also, null terminate the list of arguments 373 */ 374 this_walk_state->operands[this_walk_state->num_operands] = NULL; 375 376 /* 377 * Allocate and initialize the evaluation information block 378 * TBD: this is somewhat inefficient, should change interface to 379 * ds_init_aml_walk. For now, keeps this struct off the CPU stack 380 */ 381 info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info)); 382 if (!info) { 383 status = AE_NO_MEMORY; 384 goto cleanup; 385 } 386 387 info->parameters = &this_walk_state->operands[0]; 388 389 status = acpi_ds_init_aml_walk(next_walk_state, NULL, method_node, 390 obj_desc->method.aml_start, 391 obj_desc->method.aml_length, info, 392 ACPI_IMODE_EXECUTE); 393 394 ACPI_FREE(info); 395 if (ACPI_FAILURE(status)) { 396 goto cleanup; 397 } 398 399 /* 400 * Delete the operands on the previous walkstate operand stack 401 * (they were copied to new objects) 402 */ 403 for (i = 0; i < obj_desc->method.param_count; i++) { 404 acpi_ut_remove_reference(this_walk_state->operands[i]); 405 this_walk_state->operands[i] = NULL; 406 } 407 408 /* Clear the operand stack */ 409 410 this_walk_state->num_operands = 0; 411 412 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, 413 "**** Begin nested execution of [%4.4s] **** WalkState=%p\n", 414 method_node->name.ascii, next_walk_state)); 415 416 /* Invoke an internal method if necessary */ 417 418 if (obj_desc->method.info_flags & ACPI_METHOD_INTERNAL_ONLY) { 419 status = 420 obj_desc->method.dispatch.implementation(next_walk_state); 421 if (status == AE_OK) { 422 status = AE_CTRL_TERMINATE; 423 } 424 } 425 426 return_ACPI_STATUS(status); 427 428 cleanup: 429 430 /* On error, we must terminate the method properly */ 431 432 acpi_ds_terminate_control_method(obj_desc, next_walk_state); 433 if (next_walk_state) { 434 acpi_ds_delete_walk_state(next_walk_state); 435 } 436 437 return_ACPI_STATUS(status); 438 } 439 440 /******************************************************************************* 441 * 442 * FUNCTION: acpi_ds_restart_control_method 443 * 444 * PARAMETERS: walk_state - State for preempted method (caller) 445 * return_desc - Return value from the called method 446 * 447 * RETURN: Status 448 * 449 * DESCRIPTION: Restart a method that was preempted by another (nested) method 450 * invocation. Handle the return value (if any) from the callee. 451 * 452 ******************************************************************************/ 453 454 acpi_status 455 acpi_ds_restart_control_method(struct acpi_walk_state *walk_state, 456 union acpi_operand_object *return_desc) 457 { 458 acpi_status status; 459 int same_as_implicit_return; 460 461 ACPI_FUNCTION_TRACE_PTR(ds_restart_control_method, walk_state); 462 463 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, 464 "****Restart [%4.4s] Op %p ReturnValueFromCallee %p\n", 465 acpi_ut_get_node_name(walk_state->method_node), 466 walk_state->method_call_op, return_desc)); 467 468 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, 469 " ReturnFromThisMethodUsed?=%X ResStack %p Walk %p\n", 470 walk_state->return_used, 471 walk_state->results, walk_state)); 472 473 /* Did the called method return a value? */ 474 475 if (return_desc) { 476 477 /* Is the implicit return object the same as the return desc? */ 478 479 same_as_implicit_return = 480 (walk_state->implicit_return_obj == return_desc); 481 482 /* Are we actually going to use the return value? */ 483 484 if (walk_state->return_used) { 485 486 /* Save the return value from the previous method */ 487 488 status = acpi_ds_result_push(return_desc, walk_state); 489 if (ACPI_FAILURE(status)) { 490 acpi_ut_remove_reference(return_desc); 491 return_ACPI_STATUS(status); 492 } 493 494 /* 495 * Save as THIS method's return value in case it is returned 496 * immediately to yet another method 497 */ 498 walk_state->return_desc = return_desc; 499 } 500 501 /* 502 * The following code is the optional support for the so-called 503 * "implicit return". Some AML code assumes that the last value of the 504 * method is "implicitly" returned to the caller, in the absence of an 505 * explicit return value. 506 * 507 * Just save the last result of the method as the return value. 508 * 509 * NOTE: this is optional because the ASL language does not actually 510 * support this behavior. 511 */ 512 else if (!acpi_ds_do_implicit_return 513 (return_desc, walk_state, FALSE) 514 || same_as_implicit_return) { 515 /* 516 * Delete the return value if it will not be used by the 517 * calling method or remove one reference if the explicit return 518 * is the same as the implicit return value. 519 */ 520 acpi_ut_remove_reference(return_desc); 521 } 522 } 523 524 return_ACPI_STATUS(AE_OK); 525 } 526 527 /******************************************************************************* 528 * 529 * FUNCTION: acpi_ds_terminate_control_method 530 * 531 * PARAMETERS: method_desc - Method object 532 * walk_state - State associated with the method 533 * 534 * RETURN: None 535 * 536 * DESCRIPTION: Terminate a control method. Delete everything that the method 537 * created, delete all locals and arguments, and delete the parse 538 * tree if requested. 539 * 540 * MUTEX: Interpreter is locked 541 * 542 ******************************************************************************/ 543 544 void 545 acpi_ds_terminate_control_method(union acpi_operand_object *method_desc, 546 struct acpi_walk_state *walk_state) 547 { 548 549 ACPI_FUNCTION_TRACE_PTR(ds_terminate_control_method, walk_state); 550 551 /* method_desc is required, walk_state is optional */ 552 553 if (!method_desc) { 554 return_VOID; 555 } 556 557 if (walk_state) { 558 559 /* Delete all arguments and locals */ 560 561 acpi_ds_method_data_delete_all(walk_state); 562 563 /* 564 * If method is serialized, release the mutex and restore the 565 * current sync level for this thread 566 */ 567 if (method_desc->method.mutex) { 568 569 /* Acquisition Depth handles recursive calls */ 570 571 method_desc->method.mutex->mutex.acquisition_depth--; 572 if (!method_desc->method.mutex->mutex.acquisition_depth) { 573 walk_state->thread->current_sync_level = 574 method_desc->method.mutex->mutex. 575 original_sync_level; 576 577 acpi_os_release_mutex(method_desc->method. 578 mutex->mutex.os_mutex); 579 method_desc->method.mutex->mutex.thread_id = 0; 580 } 581 } 582 583 /* 584 * Delete any namespace objects created anywhere within the 585 * namespace by the execution of this method. Unless: 586 * 1) This method is a module-level executable code method, in which 587 * case we want make the objects permanent. 588 * 2) There are other threads executing the method, in which case we 589 * will wait until the last thread has completed. 590 */ 591 if (!(method_desc->method.info_flags & ACPI_METHOD_MODULE_LEVEL) 592 && (method_desc->method.thread_count == 1)) { 593 594 /* Delete any direct children of (created by) this method */ 595 596 acpi_ns_delete_namespace_subtree(walk_state-> 597 method_node); 598 599 /* 600 * Delete any objects that were created by this method 601 * elsewhere in the namespace (if any were created). 602 * Use of the ACPI_METHOD_MODIFIED_NAMESPACE optimizes the 603 * deletion such that we don't have to perform an entire 604 * namespace walk for every control method execution. 605 */ 606 if (method_desc->method. 607 info_flags & ACPI_METHOD_MODIFIED_NAMESPACE) { 608 acpi_ns_delete_namespace_by_owner(method_desc-> 609 method. 610 owner_id); 611 method_desc->method.info_flags &= 612 ~ACPI_METHOD_MODIFIED_NAMESPACE; 613 } 614 } 615 } 616 617 /* Decrement the thread count on the method */ 618 619 if (method_desc->method.thread_count) { 620 method_desc->method.thread_count--; 621 } else { 622 ACPI_ERROR((AE_INFO, "Invalid zero thread count in method")); 623 } 624 625 /* Are there any other threads currently executing this method? */ 626 627 if (method_desc->method.thread_count) { 628 /* 629 * Additional threads. Do not release the owner_id in this case, 630 * we immediately reuse it for the next thread executing this method 631 */ 632 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, 633 "*** Completed execution of one thread, %u threads remaining\n", 634 method_desc->method.thread_count)); 635 } else { 636 /* This is the only executing thread for this method */ 637 638 /* 639 * Support to dynamically change a method from not_serialized to 640 * Serialized if it appears that the method is incorrectly written and 641 * does not support multiple thread execution. The best example of this 642 * is if such a method creates namespace objects and blocks. A second 643 * thread will fail with an AE_ALREADY_EXISTS exception. 644 * 645 * This code is here because we must wait until the last thread exits 646 * before marking the method as serialized. 647 */ 648 if (method_desc->method. 649 info_flags & ACPI_METHOD_SERIALIZED_PENDING) { 650 if (walk_state) { 651 ACPI_INFO((AE_INFO, 652 "Marking method %4.4s as Serialized because of AE_ALREADY_EXISTS error", 653 walk_state->method_node->name. 654 ascii)); 655 } 656 657 /* 658 * Method tried to create an object twice and was marked as 659 * "pending serialized". The probable cause is that the method 660 * cannot handle reentrancy. 661 * 662 * The method was created as not_serialized, but it tried to create 663 * a named object and then blocked, causing the second thread 664 * entrance to begin and then fail. Workaround this problem by 665 * marking the method permanently as Serialized when the last 666 * thread exits here. 667 */ 668 method_desc->method.info_flags &= 669 ~ACPI_METHOD_SERIALIZED_PENDING; 670 method_desc->method.info_flags |= 671 ACPI_METHOD_SERIALIZED; 672 method_desc->method.sync_level = 0; 673 } 674 675 /* No more threads, we can free the owner_id */ 676 677 if (! 678 (method_desc->method. 679 info_flags & ACPI_METHOD_MODULE_LEVEL)) { 680 acpi_ut_release_owner_id(&method_desc->method.owner_id); 681 } 682 } 683 684 return_VOID; 685 } 686