xref: /openbmc/linux/drivers/acpi/acpica/dsmethod.c (revision 612c2932)
1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /******************************************************************************
3  *
4  * Module Name: dsmethod - Parser/Interpreter interface - control method parsing
5  *
6  * Copyright (C) 2000 - 2023, Intel Corp.
7  *
8  *****************************************************************************/
9 
10 #include <acpi/acpi.h>
11 #include "accommon.h"
12 #include "acdispat.h"
13 #include "acinterp.h"
14 #include "acnamesp.h"
15 #include "acparser.h"
16 #include "amlcode.h"
17 #include "acdebug.h"
18 
19 #define _COMPONENT          ACPI_DISPATCHER
20 ACPI_MODULE_NAME("dsmethod")
21 
22 /* Local prototypes */
23 static acpi_status
24 acpi_ds_detect_named_opcodes(struct acpi_walk_state *walk_state,
25 			     union acpi_parse_object **out_op);
26 
27 static acpi_status
28 acpi_ds_create_method_mutex(union acpi_operand_object *method_desc);
29 
30 /*******************************************************************************
31  *
32  * FUNCTION:    acpi_ds_auto_serialize_method
33  *
34  * PARAMETERS:  node                        - Namespace Node of the method
35  *              obj_desc                    - Method object attached to node
36  *
37  * RETURN:      Status
38  *
39  * DESCRIPTION: Parse a control method AML to scan for control methods that
40  *              need serialization due to the creation of named objects.
41  *
42  * NOTE: It is a bit of overkill to mark all such methods serialized, since
43  * there is only a problem if the method actually blocks during execution.
44  * A blocking operation is, for example, a Sleep() operation, or any access
45  * to an operation region. However, it is probably not possible to easily
46  * detect whether a method will block or not, so we simply mark all suspicious
47  * methods as serialized.
48  *
49  * NOTE2: This code is essentially a generic routine for parsing a single
50  * control method.
51  *
52  ******************************************************************************/
53 
54 acpi_status
acpi_ds_auto_serialize_method(struct acpi_namespace_node * node,union acpi_operand_object * obj_desc)55 acpi_ds_auto_serialize_method(struct acpi_namespace_node *node,
56 			      union acpi_operand_object *obj_desc)
57 {
58 	acpi_status status;
59 	union acpi_parse_object *op = NULL;
60 	struct acpi_walk_state *walk_state;
61 
62 	ACPI_FUNCTION_TRACE_PTR(ds_auto_serialize_method, node);
63 
64 	ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
65 			  "Method auto-serialization parse [%4.4s] %p\n",
66 			  acpi_ut_get_node_name(node), node));
67 
68 	/* Create/Init a root op for the method parse tree */
69 
70 	op = acpi_ps_alloc_op(AML_METHOD_OP, obj_desc->method.aml_start);
71 	if (!op) {
72 		return_ACPI_STATUS(AE_NO_MEMORY);
73 	}
74 
75 	acpi_ps_set_name(op, node->name.integer);
76 	op->common.node = node;
77 
78 	/* Create and initialize a new walk state */
79 
80 	walk_state =
81 	    acpi_ds_create_walk_state(node->owner_id, NULL, NULL, NULL);
82 	if (!walk_state) {
83 		acpi_ps_free_op(op);
84 		return_ACPI_STATUS(AE_NO_MEMORY);
85 	}
86 
87 	status = acpi_ds_init_aml_walk(walk_state, op, node,
88 				       obj_desc->method.aml_start,
89 				       obj_desc->method.aml_length, NULL, 0);
90 	if (ACPI_FAILURE(status)) {
91 		acpi_ds_delete_walk_state(walk_state);
92 		acpi_ps_free_op(op);
93 		return_ACPI_STATUS(status);
94 	}
95 
96 	walk_state->descending_callback = acpi_ds_detect_named_opcodes;
97 
98 	/* Parse the method, scan for creation of named objects */
99 
100 	status = acpi_ps_parse_aml(walk_state);
101 
102 	acpi_ps_delete_parse_tree(op);
103 	return_ACPI_STATUS(status);
104 }
105 
106 /*******************************************************************************
107  *
108  * FUNCTION:    acpi_ds_detect_named_opcodes
109  *
110  * PARAMETERS:  walk_state      - Current state of the parse tree walk
111  *              out_op          - Unused, required for parser interface
112  *
113  * RETURN:      Status
114  *
115  * DESCRIPTION: Descending callback used during the loading of ACPI tables.
116  *              Currently used to detect methods that must be marked serialized
117  *              in order to avoid problems with the creation of named objects.
118  *
119  ******************************************************************************/
120 
121 static acpi_status
acpi_ds_detect_named_opcodes(struct acpi_walk_state * walk_state,union acpi_parse_object ** out_op)122 acpi_ds_detect_named_opcodes(struct acpi_walk_state *walk_state,
123 			     union acpi_parse_object **out_op)
124 {
125 
126 	ACPI_FUNCTION_NAME(acpi_ds_detect_named_opcodes);
127 
128 	/* We are only interested in opcodes that create a new name */
129 
130 	if (!
131 	    (walk_state->op_info->
132 	     flags & (AML_NAMED | AML_CREATE | AML_FIELD))) {
133 		return (AE_OK);
134 	}
135 
136 	/*
137 	 * At this point, we know we have a Named object opcode.
138 	 * Mark the method as serialized. Later code will create a mutex for
139 	 * this method to enforce serialization.
140 	 *
141 	 * Note, ACPI_METHOD_IGNORE_SYNC_LEVEL flag means that we will ignore the
142 	 * Sync Level mechanism for this method, even though it is now serialized.
143 	 * Otherwise, there can be conflicts with existing ASL code that actually
144 	 * uses sync levels.
145 	 */
146 	walk_state->method_desc->method.sync_level = 0;
147 	walk_state->method_desc->method.info_flags |=
148 	    (ACPI_METHOD_SERIALIZED | ACPI_METHOD_IGNORE_SYNC_LEVEL);
149 
150 	ACPI_DEBUG_PRINT((ACPI_DB_INFO,
151 			  "Method serialized [%4.4s] %p - [%s] (%4.4X)\n",
152 			  walk_state->method_node->name.ascii,
153 			  walk_state->method_node, walk_state->op_info->name,
154 			  walk_state->opcode));
155 
156 	/* Abort the parse, no need to examine this method any further */
157 
158 	return (AE_CTRL_TERMINATE);
159 }
160 
161 /*******************************************************************************
162  *
163  * FUNCTION:    acpi_ds_method_error
164  *
165  * PARAMETERS:  status          - Execution status
166  *              walk_state      - Current state
167  *
168  * RETURN:      Status
169  *
170  * DESCRIPTION: Called on method error. Invoke the global exception handler if
171  *              present, dump the method data if the debugger is configured
172  *
173  *              Note: Allows the exception handler to change the status code
174  *
175  ******************************************************************************/
176 
177 acpi_status
acpi_ds_method_error(acpi_status status,struct acpi_walk_state * walk_state)178 acpi_ds_method_error(acpi_status status, struct acpi_walk_state *walk_state)
179 {
180 	u32 aml_offset;
181 	acpi_name name = 0;
182 
183 	ACPI_FUNCTION_ENTRY();
184 
185 	/* Ignore AE_OK and control exception codes */
186 
187 	if (ACPI_SUCCESS(status) || (status & AE_CODE_CONTROL)) {
188 		return (status);
189 	}
190 
191 	/* Invoke the global exception handler */
192 
193 	if (acpi_gbl_exception_handler) {
194 
195 		/* Exit the interpreter, allow handler to execute methods */
196 
197 		acpi_ex_exit_interpreter();
198 
199 		/*
200 		 * Handler can map the exception code to anything it wants, including
201 		 * AE_OK, in which case the executing method will not be aborted.
202 		 */
203 		aml_offset = (u32)ACPI_PTR_DIFF(walk_state->aml,
204 						walk_state->parser_state.
205 						aml_start);
206 
207 		if (walk_state->method_node) {
208 			name = walk_state->method_node->name.integer;
209 		} else if (walk_state->deferred_node) {
210 			name = walk_state->deferred_node->name.integer;
211 		}
212 
213 		status = acpi_gbl_exception_handler(status, name,
214 						    walk_state->opcode,
215 						    aml_offset, NULL);
216 		acpi_ex_enter_interpreter();
217 	}
218 
219 	acpi_ds_clear_implicit_return(walk_state);
220 
221 	if (ACPI_FAILURE(status)) {
222 		acpi_ds_dump_method_stack(status, walk_state, walk_state->op);
223 
224 		/* Display method locals/args if debugger is present */
225 
226 #ifdef ACPI_DEBUGGER
227 		acpi_db_dump_method_info(status, walk_state);
228 #endif
229 	}
230 
231 	return (status);
232 }
233 
234 /*******************************************************************************
235  *
236  * FUNCTION:    acpi_ds_create_method_mutex
237  *
238  * PARAMETERS:  obj_desc            - The method object
239  *
240  * RETURN:      Status
241  *
242  * DESCRIPTION: Create a mutex object for a serialized control method
243  *
244  ******************************************************************************/
245 
246 static acpi_status
acpi_ds_create_method_mutex(union acpi_operand_object * method_desc)247 acpi_ds_create_method_mutex(union acpi_operand_object *method_desc)
248 {
249 	union acpi_operand_object *mutex_desc;
250 	acpi_status status;
251 
252 	ACPI_FUNCTION_TRACE(ds_create_method_mutex);
253 
254 	/* Create the new mutex object */
255 
256 	mutex_desc = acpi_ut_create_internal_object(ACPI_TYPE_MUTEX);
257 	if (!mutex_desc) {
258 		return_ACPI_STATUS(AE_NO_MEMORY);
259 	}
260 
261 	/* Create the actual OS Mutex */
262 
263 	status = acpi_os_create_mutex(&mutex_desc->mutex.os_mutex);
264 	if (ACPI_FAILURE(status)) {
265 		acpi_ut_delete_object_desc(mutex_desc);
266 		return_ACPI_STATUS(status);
267 	}
268 
269 	mutex_desc->mutex.sync_level = method_desc->method.sync_level;
270 	method_desc->method.mutex = mutex_desc;
271 	return_ACPI_STATUS(AE_OK);
272 }
273 
274 /*******************************************************************************
275  *
276  * FUNCTION:    acpi_ds_begin_method_execution
277  *
278  * PARAMETERS:  method_node         - Node of the method
279  *              obj_desc            - The method object
280  *              walk_state          - current state, NULL if not yet executing
281  *                                    a method.
282  *
283  * RETURN:      Status
284  *
285  * DESCRIPTION: Prepare a method for execution. Parses the method if necessary,
286  *              increments the thread count, and waits at the method semaphore
287  *              for clearance to execute.
288  *
289  ******************************************************************************/
290 
291 acpi_status
acpi_ds_begin_method_execution(struct acpi_namespace_node * method_node,union acpi_operand_object * obj_desc,struct acpi_walk_state * walk_state)292 acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node,
293 			       union acpi_operand_object *obj_desc,
294 			       struct acpi_walk_state *walk_state)
295 {
296 	acpi_status status = AE_OK;
297 
298 	ACPI_FUNCTION_TRACE_PTR(ds_begin_method_execution, method_node);
299 
300 	if (!method_node) {
301 		return_ACPI_STATUS(AE_NULL_ENTRY);
302 	}
303 
304 	acpi_ex_start_trace_method(method_node, obj_desc, walk_state);
305 
306 	/* Prevent wraparound of thread count */
307 
308 	if (obj_desc->method.thread_count == ACPI_UINT8_MAX) {
309 		ACPI_ERROR((AE_INFO,
310 			    "Method reached maximum reentrancy limit (255)"));
311 		return_ACPI_STATUS(AE_AML_METHOD_LIMIT);
312 	}
313 
314 	/*
315 	 * If this method is serialized, we need to acquire the method mutex.
316 	 */
317 	if (obj_desc->method.info_flags & ACPI_METHOD_SERIALIZED) {
318 		/*
319 		 * Create a mutex for the method if it is defined to be Serialized
320 		 * and a mutex has not already been created. We defer the mutex creation
321 		 * until a method is actually executed, to minimize the object count
322 		 */
323 		if (!obj_desc->method.mutex) {
324 			status = acpi_ds_create_method_mutex(obj_desc);
325 			if (ACPI_FAILURE(status)) {
326 				return_ACPI_STATUS(status);
327 			}
328 		}
329 
330 		/*
331 		 * The current_sync_level (per-thread) must be less than or equal to
332 		 * the sync level of the method. This mechanism provides some
333 		 * deadlock prevention.
334 		 *
335 		 * If the method was auto-serialized, we just ignore the sync level
336 		 * mechanism, because auto-serialization of methods can interfere
337 		 * with ASL code that actually uses sync levels.
338 		 *
339 		 * Top-level method invocation has no walk state at this point
340 		 */
341 		if (walk_state &&
342 		    (!(obj_desc->method.
343 		       info_flags & ACPI_METHOD_IGNORE_SYNC_LEVEL))
344 		    && (walk_state->thread->current_sync_level >
345 			obj_desc->method.mutex->mutex.sync_level)) {
346 			ACPI_ERROR((AE_INFO,
347 				    "Cannot acquire Mutex for method [%4.4s]"
348 				    ", current SyncLevel is too large (%u)",
349 				    acpi_ut_get_node_name(method_node),
350 				    walk_state->thread->current_sync_level));
351 
352 			return_ACPI_STATUS(AE_AML_MUTEX_ORDER);
353 		}
354 
355 		/*
356 		 * Obtain the method mutex if necessary. Do not acquire mutex for a
357 		 * recursive call.
358 		 */
359 		if (!walk_state ||
360 		    !obj_desc->method.mutex->mutex.thread_id ||
361 		    (walk_state->thread->thread_id !=
362 		     obj_desc->method.mutex->mutex.thread_id)) {
363 			/*
364 			 * Acquire the method mutex. This releases the interpreter if we
365 			 * block (and reacquires it before it returns)
366 			 */
367 			status =
368 			    acpi_ex_system_wait_mutex(obj_desc->method.mutex->
369 						      mutex.os_mutex,
370 						      ACPI_WAIT_FOREVER);
371 			if (ACPI_FAILURE(status)) {
372 				return_ACPI_STATUS(status);
373 			}
374 
375 			/* Update the mutex and walk info and save the original sync_level */
376 
377 			if (walk_state) {
378 				obj_desc->method.mutex->mutex.
379 				    original_sync_level =
380 				    walk_state->thread->current_sync_level;
381 
382 				obj_desc->method.mutex->mutex.thread_id =
383 				    walk_state->thread->thread_id;
384 
385 				/*
386 				 * Update the current sync_level only if this is not an auto-
387 				 * serialized method. In the auto case, we have to ignore
388 				 * the sync level for the method mutex (created for the
389 				 * auto-serialization) because we have no idea of what the
390 				 * sync level should be. Therefore, just ignore it.
391 				 */
392 				if (!(obj_desc->method.info_flags &
393 				      ACPI_METHOD_IGNORE_SYNC_LEVEL)) {
394 					walk_state->thread->current_sync_level =
395 					    obj_desc->method.sync_level;
396 				}
397 			} else {
398 				obj_desc->method.mutex->mutex.
399 				    original_sync_level =
400 				    obj_desc->method.mutex->mutex.sync_level;
401 
402 				obj_desc->method.mutex->mutex.thread_id =
403 				    acpi_os_get_thread_id();
404 			}
405 		}
406 
407 		/* Always increase acquisition depth */
408 
409 		obj_desc->method.mutex->mutex.acquisition_depth++;
410 	}
411 
412 	/*
413 	 * Allocate an Owner ID for this method, only if this is the first thread
414 	 * to begin concurrent execution. We only need one owner_id, even if the
415 	 * method is invoked recursively.
416 	 */
417 	if (!obj_desc->method.owner_id) {
418 		status = acpi_ut_allocate_owner_id(&obj_desc->method.owner_id);
419 		if (ACPI_FAILURE(status)) {
420 			goto cleanup;
421 		}
422 	}
423 
424 	/*
425 	 * Increment the method parse tree thread count since it has been
426 	 * reentered one more time (even if it is the same thread)
427 	 */
428 	obj_desc->method.thread_count++;
429 	acpi_method_count++;
430 	return_ACPI_STATUS(status);
431 
432 cleanup:
433 	/* On error, must release the method mutex (if present) */
434 
435 	if (obj_desc->method.mutex) {
436 		acpi_os_release_mutex(obj_desc->method.mutex->mutex.os_mutex);
437 	}
438 	return_ACPI_STATUS(status);
439 }
440 
441 /*******************************************************************************
442  *
443  * FUNCTION:    acpi_ds_call_control_method
444  *
445  * PARAMETERS:  thread              - Info for this thread
446  *              this_walk_state     - Current walk state
447  *              op                  - Current Op to be walked
448  *
449  * RETURN:      Status
450  *
451  * DESCRIPTION: Transfer execution to a called control method
452  *
453  ******************************************************************************/
454 
455 acpi_status
acpi_ds_call_control_method(struct acpi_thread_state * thread,struct acpi_walk_state * this_walk_state,union acpi_parse_object * op)456 acpi_ds_call_control_method(struct acpi_thread_state *thread,
457 			    struct acpi_walk_state *this_walk_state,
458 			    union acpi_parse_object *op)
459 {
460 	acpi_status status;
461 	struct acpi_namespace_node *method_node;
462 	struct acpi_walk_state *next_walk_state = NULL;
463 	union acpi_operand_object *obj_desc;
464 	struct acpi_evaluate_info *info;
465 	u32 i;
466 
467 	ACPI_FUNCTION_TRACE_PTR(ds_call_control_method, this_walk_state);
468 
469 	ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
470 			  "Calling method %p, currentstate=%p\n",
471 			  this_walk_state->prev_op, this_walk_state));
472 
473 	/*
474 	 * Get the namespace entry for the control method we are about to call
475 	 */
476 	method_node = this_walk_state->method_call_node;
477 	if (!method_node) {
478 		return_ACPI_STATUS(AE_NULL_ENTRY);
479 	}
480 
481 	obj_desc = acpi_ns_get_attached_object(method_node);
482 	if (!obj_desc) {
483 		return_ACPI_STATUS(AE_NULL_OBJECT);
484 	}
485 
486 	/* Init for new method, possibly wait on method mutex */
487 
488 	status =
489 	    acpi_ds_begin_method_execution(method_node, obj_desc,
490 					   this_walk_state);
491 	if (ACPI_FAILURE(status)) {
492 		return_ACPI_STATUS(status);
493 	}
494 
495 	/* Begin method parse/execution. Create a new walk state */
496 
497 	next_walk_state =
498 	    acpi_ds_create_walk_state(obj_desc->method.owner_id, NULL, obj_desc,
499 				      thread);
500 	if (!next_walk_state) {
501 		status = AE_NO_MEMORY;
502 		goto cleanup;
503 	}
504 
505 	/*
506 	 * The resolved arguments were put on the previous walk state's operand
507 	 * stack. Operands on the previous walk state stack always
508 	 * start at index 0. Also, null terminate the list of arguments
509 	 */
510 	this_walk_state->operands[this_walk_state->num_operands] = NULL;
511 
512 	/*
513 	 * Allocate and initialize the evaluation information block
514 	 * TBD: this is somewhat inefficient, should change interface to
515 	 * ds_init_aml_walk. For now, keeps this struct off the CPU stack
516 	 */
517 	info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
518 	if (!info) {
519 		status = AE_NO_MEMORY;
520 		goto pop_walk_state;
521 	}
522 
523 	info->parameters = &this_walk_state->operands[0];
524 
525 	status = acpi_ds_init_aml_walk(next_walk_state, NULL, method_node,
526 				       obj_desc->method.aml_start,
527 				       obj_desc->method.aml_length, info,
528 				       ACPI_IMODE_EXECUTE);
529 
530 	ACPI_FREE(info);
531 	if (ACPI_FAILURE(status)) {
532 		goto pop_walk_state;
533 	}
534 
535 	next_walk_state->method_nesting_depth =
536 	    this_walk_state->method_nesting_depth + 1;
537 
538 	/*
539 	 * Delete the operands on the previous walkstate operand stack
540 	 * (they were copied to new objects)
541 	 */
542 	for (i = 0; i < obj_desc->method.param_count; i++) {
543 		acpi_ut_remove_reference(this_walk_state->operands[i]);
544 		this_walk_state->operands[i] = NULL;
545 	}
546 
547 	/* Clear the operand stack */
548 
549 	this_walk_state->num_operands = 0;
550 
551 	ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
552 			  "**** Begin nested execution of [%4.4s] **** WalkState=%p\n",
553 			  method_node->name.ascii, next_walk_state));
554 
555 	this_walk_state->method_pathname =
556 	    acpi_ns_get_normalized_pathname(method_node, TRUE);
557 	this_walk_state->method_is_nested = TRUE;
558 
559 	/* Optional object evaluation log */
560 
561 	ACPI_DEBUG_PRINT_RAW((ACPI_DB_EVALUATION,
562 			      "%-26s:  %*s%s\n", "   Nested method call",
563 			      next_walk_state->method_nesting_depth * 3, " ",
564 			      &this_walk_state->method_pathname[1]));
565 
566 	/* Invoke an internal method if necessary */
567 
568 	if (obj_desc->method.info_flags & ACPI_METHOD_INTERNAL_ONLY) {
569 		status =
570 		    obj_desc->method.dispatch.implementation(next_walk_state);
571 		if (status == AE_OK) {
572 			status = AE_CTRL_TERMINATE;
573 		}
574 	}
575 
576 	return_ACPI_STATUS(status);
577 
578 pop_walk_state:
579 
580 	/* On error, pop the walk state to be deleted from thread */
581 
582 	acpi_ds_pop_walk_state(thread);
583 
584 cleanup:
585 
586 	/* On error, we must terminate the method properly */
587 
588 	acpi_ds_terminate_control_method(obj_desc, next_walk_state);
589 	acpi_ds_delete_walk_state(next_walk_state);
590 
591 	return_ACPI_STATUS(status);
592 }
593 
594 /*******************************************************************************
595  *
596  * FUNCTION:    acpi_ds_restart_control_method
597  *
598  * PARAMETERS:  walk_state          - State for preempted method (caller)
599  *              return_desc         - Return value from the called method
600  *
601  * RETURN:      Status
602  *
603  * DESCRIPTION: Restart a method that was preempted by another (nested) method
604  *              invocation. Handle the return value (if any) from the callee.
605  *
606  ******************************************************************************/
607 
608 acpi_status
acpi_ds_restart_control_method(struct acpi_walk_state * walk_state,union acpi_operand_object * return_desc)609 acpi_ds_restart_control_method(struct acpi_walk_state *walk_state,
610 			       union acpi_operand_object *return_desc)
611 {
612 	acpi_status status;
613 	int same_as_implicit_return;
614 
615 	ACPI_FUNCTION_TRACE_PTR(ds_restart_control_method, walk_state);
616 
617 	ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
618 			  "****Restart [%4.4s] Op %p ReturnValueFromCallee %p\n",
619 			  acpi_ut_get_node_name(walk_state->method_node),
620 			  walk_state->method_call_op, return_desc));
621 
622 	ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
623 			  "    ReturnFromThisMethodUsed?=%X ResStack %p Walk %p\n",
624 			  walk_state->return_used,
625 			  walk_state->results, walk_state));
626 
627 	/* Did the called method return a value? */
628 
629 	if (return_desc) {
630 
631 		/* Is the implicit return object the same as the return desc? */
632 
633 		same_as_implicit_return =
634 		    (walk_state->implicit_return_obj == return_desc);
635 
636 		/* Are we actually going to use the return value? */
637 
638 		if (walk_state->return_used) {
639 
640 			/* Save the return value from the previous method */
641 
642 			status = acpi_ds_result_push(return_desc, walk_state);
643 			if (ACPI_FAILURE(status)) {
644 				acpi_ut_remove_reference(return_desc);
645 				return_ACPI_STATUS(status);
646 			}
647 
648 			/*
649 			 * Save as THIS method's return value in case it is returned
650 			 * immediately to yet another method
651 			 */
652 			walk_state->return_desc = return_desc;
653 		}
654 
655 		/*
656 		 * The following code is the optional support for the so-called
657 		 * "implicit return". Some AML code assumes that the last value of the
658 		 * method is "implicitly" returned to the caller, in the absence of an
659 		 * explicit return value.
660 		 *
661 		 * Just save the last result of the method as the return value.
662 		 *
663 		 * NOTE: this is optional because the ASL language does not actually
664 		 * support this behavior.
665 		 */
666 		else if (!acpi_ds_do_implicit_return
667 			 (return_desc, walk_state, FALSE)
668 			 || same_as_implicit_return) {
669 			/*
670 			 * Delete the return value if it will not be used by the
671 			 * calling method or remove one reference if the explicit return
672 			 * is the same as the implicit return value.
673 			 */
674 			acpi_ut_remove_reference(return_desc);
675 		}
676 	}
677 
678 	return_ACPI_STATUS(AE_OK);
679 }
680 
681 /*******************************************************************************
682  *
683  * FUNCTION:    acpi_ds_terminate_control_method
684  *
685  * PARAMETERS:  method_desc         - Method object
686  *              walk_state          - State associated with the method
687  *
688  * RETURN:      None
689  *
690  * DESCRIPTION: Terminate a control method. Delete everything that the method
691  *              created, delete all locals and arguments, and delete the parse
692  *              tree if requested.
693  *
694  * MUTEX:       Interpreter is locked
695  *
696  ******************************************************************************/
697 
698 void
acpi_ds_terminate_control_method(union acpi_operand_object * method_desc,struct acpi_walk_state * walk_state)699 acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
700 				 struct acpi_walk_state *walk_state)
701 {
702 
703 	ACPI_FUNCTION_TRACE_PTR(ds_terminate_control_method, walk_state);
704 
705 	/* method_desc is required, walk_state is optional */
706 
707 	if (!method_desc) {
708 		return_VOID;
709 	}
710 
711 	if (walk_state) {
712 
713 		/* Delete all arguments and locals */
714 
715 		acpi_ds_method_data_delete_all(walk_state);
716 
717 		/*
718 		 * Delete any namespace objects created anywhere within the
719 		 * namespace by the execution of this method. Unless:
720 		 * 1) This method is a module-level executable code method, in which
721 		 *    case we want make the objects permanent.
722 		 * 2) There are other threads executing the method, in which case we
723 		 *    will wait until the last thread has completed.
724 		 */
725 		if (!(method_desc->method.info_flags & ACPI_METHOD_MODULE_LEVEL)
726 		    && (method_desc->method.thread_count == 1)) {
727 
728 			/* Delete any direct children of (created by) this method */
729 
730 			(void)acpi_ex_exit_interpreter();
731 			acpi_ns_delete_namespace_subtree(walk_state->
732 							 method_node);
733 			(void)acpi_ex_enter_interpreter();
734 
735 			/*
736 			 * Delete any objects that were created by this method
737 			 * elsewhere in the namespace (if any were created).
738 			 * Use of the ACPI_METHOD_MODIFIED_NAMESPACE optimizes the
739 			 * deletion such that we don't have to perform an entire
740 			 * namespace walk for every control method execution.
741 			 */
742 			if (method_desc->method.
743 			    info_flags & ACPI_METHOD_MODIFIED_NAMESPACE) {
744 				(void)acpi_ex_exit_interpreter();
745 				acpi_ns_delete_namespace_by_owner(method_desc->
746 								  method.
747 								  owner_id);
748 				(void)acpi_ex_enter_interpreter();
749 				method_desc->method.info_flags &=
750 				    ~ACPI_METHOD_MODIFIED_NAMESPACE;
751 			}
752 		}
753 
754 		/*
755 		 * If method is serialized, release the mutex and restore the
756 		 * current sync level for this thread
757 		 */
758 		if (method_desc->method.mutex) {
759 
760 			/* Acquisition Depth handles recursive calls */
761 
762 			method_desc->method.mutex->mutex.acquisition_depth--;
763 			if (!method_desc->method.mutex->mutex.acquisition_depth) {
764 				walk_state->thread->current_sync_level =
765 				    method_desc->method.mutex->mutex.
766 				    original_sync_level;
767 
768 				acpi_os_release_mutex(method_desc->method.
769 						      mutex->mutex.os_mutex);
770 				method_desc->method.mutex->mutex.thread_id = 0;
771 			}
772 		}
773 	}
774 
775 	/* Decrement the thread count on the method */
776 
777 	if (method_desc->method.thread_count) {
778 		method_desc->method.thread_count--;
779 	} else {
780 		ACPI_ERROR((AE_INFO, "Invalid zero thread count in method"));
781 	}
782 
783 	/* Are there any other threads currently executing this method? */
784 
785 	if (method_desc->method.thread_count) {
786 		/*
787 		 * Additional threads. Do not release the owner_id in this case,
788 		 * we immediately reuse it for the next thread executing this method
789 		 */
790 		ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
791 				  "*** Completed execution of one thread, %u threads remaining\n",
792 				  method_desc->method.thread_count));
793 	} else {
794 		/* This is the only executing thread for this method */
795 
796 		/*
797 		 * Support to dynamically change a method from not_serialized to
798 		 * Serialized if it appears that the method is incorrectly written and
799 		 * does not support multiple thread execution. The best example of this
800 		 * is if such a method creates namespace objects and blocks. A second
801 		 * thread will fail with an AE_ALREADY_EXISTS exception.
802 		 *
803 		 * This code is here because we must wait until the last thread exits
804 		 * before marking the method as serialized.
805 		 */
806 		if (method_desc->method.
807 		    info_flags & ACPI_METHOD_SERIALIZED_PENDING) {
808 			if (walk_state) {
809 				ACPI_INFO(("Marking method %4.4s as Serialized "
810 					   "because of AE_ALREADY_EXISTS error",
811 					   walk_state->method_node->name.
812 					   ascii));
813 			}
814 
815 			/*
816 			 * Method tried to create an object twice and was marked as
817 			 * "pending serialized". The probable cause is that the method
818 			 * cannot handle reentrancy.
819 			 *
820 			 * The method was created as not_serialized, but it tried to create
821 			 * a named object and then blocked, causing the second thread
822 			 * entrance to begin and then fail. Workaround this problem by
823 			 * marking the method permanently as Serialized when the last
824 			 * thread exits here.
825 			 */
826 			method_desc->method.info_flags &=
827 			    ~ACPI_METHOD_SERIALIZED_PENDING;
828 
829 			method_desc->method.info_flags |=
830 			    (ACPI_METHOD_SERIALIZED |
831 			     ACPI_METHOD_IGNORE_SYNC_LEVEL);
832 			method_desc->method.sync_level = 0;
833 		}
834 
835 		/* No more threads, we can free the owner_id */
836 
837 		if (!
838 		    (method_desc->method.
839 		     info_flags & ACPI_METHOD_MODULE_LEVEL)) {
840 			acpi_ut_release_owner_id(&method_desc->method.owner_id);
841 		}
842 	}
843 
844 	acpi_ex_stop_trace_method((struct acpi_namespace_node *)method_desc->
845 				  method.node, method_desc, walk_state);
846 
847 	return_VOID;
848 }
849