Linux Audio

Check our new training course

Loading...
v3.1
 
  1/******************************************************************************
  2 *
  3 * Module Name: dsmethod - Parser/Interpreter interface - control method parsing
  4 *
 
 
  5 *****************************************************************************/
  6
  7/*
  8 * Copyright (C) 2000 - 2011, Intel Corp.
  9 * All rights reserved.
 10 *
 11 * Redistribution and use in source and binary forms, with or without
 12 * modification, are permitted provided that the following conditions
 13 * are met:
 14 * 1. Redistributions of source code must retain the above copyright
 15 *    notice, this list of conditions, and the following disclaimer,
 16 *    without modification.
 17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
 18 *    substantially similar to the "NO WARRANTY" disclaimer below
 19 *    ("Disclaimer") and any redistribution must be conditioned upon
 20 *    including a substantially similar Disclaimer requirement for further
 21 *    binary redistribution.
 22 * 3. Neither the names of the above-listed copyright holders nor the names
 23 *    of any contributors may be used to endorse or promote products derived
 24 *    from this software without specific prior written permission.
 25 *
 26 * Alternatively, this software may be distributed under the terms of the
 27 * GNU General Public License ("GPL") version 2 as published by the Free
 28 * Software Foundation.
 29 *
 30 * NO WARRANTY
 31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
 34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 35 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
 36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
 37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
 38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
 39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
 40 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 41 * POSSIBILITY OF SUCH DAMAGES.
 42 */
 43
 44#include <acpi/acpi.h>
 45#include "accommon.h"
 46#include "acdispat.h"
 47#include "acinterp.h"
 48#include "acnamesp.h"
 49#ifdef	ACPI_DISASSEMBLER
 50#include <acpi/acdisasm.h>
 51#endif
 52
 53#define _COMPONENT          ACPI_DISPATCHER
 54ACPI_MODULE_NAME("dsmethod")
 55
 56/* Local prototypes */
 57static acpi_status
 
 
 
 
 58acpi_ds_create_method_mutex(union acpi_operand_object *method_desc);
 59
 60/*******************************************************************************
 61 *
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 62 * FUNCTION:    acpi_ds_method_error
 63 *
 64 * PARAMETERS:  Status          - Execution status
 65 *              walk_state      - Current state
 66 *
 67 * RETURN:      Status
 68 *
 69 * DESCRIPTION: Called on method error. Invoke the global exception handler if
 70 *              present, dump the method data if the disassembler is configured
 71 *
 72 *              Note: Allows the exception handler to change the status code
 73 *
 74 ******************************************************************************/
 75
 76acpi_status
 77acpi_ds_method_error(acpi_status status, struct acpi_walk_state *walk_state)
 78{
 
 
 
 79	ACPI_FUNCTION_ENTRY();
 80
 81	/* Ignore AE_OK and control exception codes */
 82
 83	if (ACPI_SUCCESS(status) || (status & AE_CODE_CONTROL)) {
 84		return (status);
 85	}
 86
 87	/* Invoke the global exception handler */
 88
 89	if (acpi_gbl_exception_handler) {
 90
 91		/* Exit the interpreter, allow handler to execute methods */
 92
 93		acpi_ex_exit_interpreter();
 94
 95		/*
 96		 * Handler can map the exception code to anything it wants, including
 97		 * AE_OK, in which case the executing method will not be aborted.
 98		 */
 99		status = acpi_gbl_exception_handler(status,
100						    walk_state->method_node ?
101						    walk_state->method_node->
102						    name.integer : 0,
 
 
 
 
 
 
 
103						    walk_state->opcode,
104						    walk_state->aml_offset,
105						    NULL);
106		acpi_ex_enter_interpreter();
107	}
108
109	acpi_ds_clear_implicit_return(walk_state);
110
111#ifdef ACPI_DISASSEMBLER
112	if (ACPI_FAILURE(status)) {
 
113
114		/* Display method locals/args if disassembler is present */
115
116		acpi_dm_dump_method_info(status, walk_state, walk_state->op);
117	}
118#endif
 
119
120	return (status);
121}
122
123/*******************************************************************************
124 *
125 * FUNCTION:    acpi_ds_create_method_mutex
126 *
127 * PARAMETERS:  obj_desc            - The method object
128 *
129 * RETURN:      Status
130 *
131 * DESCRIPTION: Create a mutex object for a serialized control method
132 *
133 ******************************************************************************/
134
135static acpi_status
136acpi_ds_create_method_mutex(union acpi_operand_object *method_desc)
137{
138	union acpi_operand_object *mutex_desc;
139	acpi_status status;
140
141	ACPI_FUNCTION_TRACE(ds_create_method_mutex);
142
143	/* Create the new mutex object */
144
145	mutex_desc = acpi_ut_create_internal_object(ACPI_TYPE_MUTEX);
146	if (!mutex_desc) {
147		return_ACPI_STATUS(AE_NO_MEMORY);
148	}
149
150	/* Create the actual OS Mutex */
151
152	status = acpi_os_create_mutex(&mutex_desc->mutex.os_mutex);
153	if (ACPI_FAILURE(status)) {
 
154		return_ACPI_STATUS(status);
155	}
156
157	mutex_desc->mutex.sync_level = method_desc->method.sync_level;
158	method_desc->method.mutex = mutex_desc;
159	return_ACPI_STATUS(AE_OK);
160}
161
162/*******************************************************************************
163 *
164 * FUNCTION:    acpi_ds_begin_method_execution
165 *
166 * PARAMETERS:  method_node         - Node of the method
167 *              obj_desc            - The method object
168 *              walk_state          - current state, NULL if not yet executing
169 *                                    a method.
170 *
171 * RETURN:      Status
172 *
173 * DESCRIPTION: Prepare a method for execution.  Parses the method if necessary,
174 *              increments the thread count, and waits at the method semaphore
175 *              for clearance to execute.
176 *
177 ******************************************************************************/
178
179acpi_status
180acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node,
181			       union acpi_operand_object *obj_desc,
182			       struct acpi_walk_state *walk_state)
183{
184	acpi_status status = AE_OK;
185
186	ACPI_FUNCTION_TRACE_PTR(ds_begin_method_execution, method_node);
187
188	if (!method_node) {
189		return_ACPI_STATUS(AE_NULL_ENTRY);
190	}
191
 
 
192	/* Prevent wraparound of thread count */
193
194	if (obj_desc->method.thread_count == ACPI_UINT8_MAX) {
195		ACPI_ERROR((AE_INFO,
196			    "Method reached maximum reentrancy limit (255)"));
197		return_ACPI_STATUS(AE_AML_METHOD_LIMIT);
198	}
199
200	/*
201	 * If this method is serialized, we need to acquire the method mutex.
202	 */
203	if (obj_desc->method.info_flags & ACPI_METHOD_SERIALIZED) {
204		/*
205		 * Create a mutex for the method if it is defined to be Serialized
206		 * and a mutex has not already been created. We defer the mutex creation
207		 * until a method is actually executed, to minimize the object count
208		 */
209		if (!obj_desc->method.mutex) {
210			status = acpi_ds_create_method_mutex(obj_desc);
211			if (ACPI_FAILURE(status)) {
212				return_ACPI_STATUS(status);
213			}
214		}
215
216		/*
217		 * The current_sync_level (per-thread) must be less than or equal to
218		 * the sync level of the method. This mechanism provides some
219		 * deadlock prevention
 
 
 
 
220		 *
221		 * Top-level method invocation has no walk state at this point
222		 */
223		if (walk_state &&
224		    (walk_state->thread->current_sync_level >
225		     obj_desc->method.mutex->mutex.sync_level)) {
 
 
226			ACPI_ERROR((AE_INFO,
227				    "Cannot acquire Mutex for method [%4.4s], current SyncLevel is too large (%u)",
 
228				    acpi_ut_get_node_name(method_node),
229				    walk_state->thread->current_sync_level));
230
231			return_ACPI_STATUS(AE_AML_MUTEX_ORDER);
232		}
233
234		/*
235		 * Obtain the method mutex if necessary. Do not acquire mutex for a
236		 * recursive call.
237		 */
238		if (!walk_state ||
239		    !obj_desc->method.mutex->mutex.thread_id ||
240		    (walk_state->thread->thread_id !=
241		     obj_desc->method.mutex->mutex.thread_id)) {
242			/*
243			 * Acquire the method mutex. This releases the interpreter if we
244			 * block (and reacquires it before it returns)
245			 */
246			status =
247			    acpi_ex_system_wait_mutex(obj_desc->method.mutex->
248						      mutex.os_mutex,
249						      ACPI_WAIT_FOREVER);
250			if (ACPI_FAILURE(status)) {
251				return_ACPI_STATUS(status);
252			}
253
254			/* Update the mutex and walk info and save the original sync_level */
255
256			if (walk_state) {
257				obj_desc->method.mutex->mutex.
258				    original_sync_level =
259				    walk_state->thread->current_sync_level;
260
261				obj_desc->method.mutex->mutex.thread_id =
262				    walk_state->thread->thread_id;
263				walk_state->thread->current_sync_level =
264				    obj_desc->method.sync_level;
 
 
 
 
 
 
 
 
 
 
 
265			} else {
266				obj_desc->method.mutex->mutex.
267				    original_sync_level =
268				    obj_desc->method.mutex->mutex.sync_level;
 
 
 
269			}
270		}
271
272		/* Always increase acquisition depth */
273
274		obj_desc->method.mutex->mutex.acquisition_depth++;
275	}
276
277	/*
278	 * Allocate an Owner ID for this method, only if this is the first thread
279	 * to begin concurrent execution. We only need one owner_id, even if the
280	 * method is invoked recursively.
281	 */
282	if (!obj_desc->method.owner_id) {
283		status = acpi_ut_allocate_owner_id(&obj_desc->method.owner_id);
284		if (ACPI_FAILURE(status)) {
285			goto cleanup;
286		}
287	}
288
289	/*
290	 * Increment the method parse tree thread count since it has been
291	 * reentered one more time (even if it is the same thread)
292	 */
293	obj_desc->method.thread_count++;
 
294	return_ACPI_STATUS(status);
295
296      cleanup:
297	/* On error, must release the method mutex (if present) */
298
299	if (obj_desc->method.mutex) {
300		acpi_os_release_mutex(obj_desc->method.mutex->mutex.os_mutex);
301	}
302	return_ACPI_STATUS(status);
303}
304
305/*******************************************************************************
306 *
307 * FUNCTION:    acpi_ds_call_control_method
308 *
309 * PARAMETERS:  Thread              - Info for this thread
310 *              this_walk_state     - Current walk state
311 *              Op                  - Current Op to be walked
312 *
313 * RETURN:      Status
314 *
315 * DESCRIPTION: Transfer execution to a called control method
316 *
317 ******************************************************************************/
318
319acpi_status
320acpi_ds_call_control_method(struct acpi_thread_state *thread,
321			    struct acpi_walk_state *this_walk_state,
322			    union acpi_parse_object *op)
323{
324	acpi_status status;
325	struct acpi_namespace_node *method_node;
326	struct acpi_walk_state *next_walk_state = NULL;
327	union acpi_operand_object *obj_desc;
328	struct acpi_evaluate_info *info;
329	u32 i;
330
331	ACPI_FUNCTION_TRACE_PTR(ds_call_control_method, this_walk_state);
332
333	ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
334			  "Calling method %p, currentstate=%p\n",
335			  this_walk_state->prev_op, this_walk_state));
336
337	/*
338	 * Get the namespace entry for the control method we are about to call
339	 */
340	method_node = this_walk_state->method_call_node;
341	if (!method_node) {
342		return_ACPI_STATUS(AE_NULL_ENTRY);
343	}
344
345	obj_desc = acpi_ns_get_attached_object(method_node);
346	if (!obj_desc) {
347		return_ACPI_STATUS(AE_NULL_OBJECT);
348	}
349
350	/* Init for new method, possibly wait on method mutex */
351
352	status = acpi_ds_begin_method_execution(method_node, obj_desc,
353						this_walk_state);
 
354	if (ACPI_FAILURE(status)) {
355		return_ACPI_STATUS(status);
356	}
357
358	/* Begin method parse/execution. Create a new walk state */
359
360	next_walk_state = acpi_ds_create_walk_state(obj_desc->method.owner_id,
361						    NULL, obj_desc, thread);
 
362	if (!next_walk_state) {
363		status = AE_NO_MEMORY;
364		goto cleanup;
365	}
366
367	/*
368	 * The resolved arguments were put on the previous walk state's operand
369	 * stack. Operands on the previous walk state stack always
370	 * start at index 0. Also, null terminate the list of arguments
371	 */
372	this_walk_state->operands[this_walk_state->num_operands] = NULL;
373
374	/*
375	 * Allocate and initialize the evaluation information block
376	 * TBD: this is somewhat inefficient, should change interface to
377	 * ds_init_aml_walk. For now, keeps this struct off the CPU stack
378	 */
379	info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
380	if (!info) {
381		return_ACPI_STATUS(AE_NO_MEMORY);
 
382	}
383
384	info->parameters = &this_walk_state->operands[0];
385
386	status = acpi_ds_init_aml_walk(next_walk_state, NULL, method_node,
387				       obj_desc->method.aml_start,
388				       obj_desc->method.aml_length, info,
389				       ACPI_IMODE_EXECUTE);
390
391	ACPI_FREE(info);
392	if (ACPI_FAILURE(status)) {
393		goto cleanup;
394	}
395
 
 
 
396	/*
397	 * Delete the operands on the previous walkstate operand stack
398	 * (they were copied to new objects)
399	 */
400	for (i = 0; i < obj_desc->method.param_count; i++) {
401		acpi_ut_remove_reference(this_walk_state->operands[i]);
402		this_walk_state->operands[i] = NULL;
403	}
404
405	/* Clear the operand stack */
406
407	this_walk_state->num_operands = 0;
408
409	ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
410			  "**** Begin nested execution of [%4.4s] **** WalkState=%p\n",
411			  method_node->name.ascii, next_walk_state));
412
 
 
 
 
 
 
 
 
 
 
 
413	/* Invoke an internal method if necessary */
414
415	if (obj_desc->method.info_flags & ACPI_METHOD_INTERNAL_ONLY) {
416		status =
417		    obj_desc->method.dispatch.implementation(next_walk_state);
418		if (status == AE_OK) {
419			status = AE_CTRL_TERMINATE;
420		}
421	}
422
423	return_ACPI_STATUS(status);
424
425      cleanup:
 
 
 
 
 
 
426
427	/* On error, we must terminate the method properly */
428
429	acpi_ds_terminate_control_method(obj_desc, next_walk_state);
430	if (next_walk_state) {
431		acpi_ds_delete_walk_state(next_walk_state);
432	}
433
434	return_ACPI_STATUS(status);
435}
436
437/*******************************************************************************
438 *
439 * FUNCTION:    acpi_ds_restart_control_method
440 *
441 * PARAMETERS:  walk_state          - State for preempted method (caller)
442 *              return_desc         - Return value from the called method
443 *
444 * RETURN:      Status
445 *
446 * DESCRIPTION: Restart a method that was preempted by another (nested) method
447 *              invocation.  Handle the return value (if any) from the callee.
448 *
449 ******************************************************************************/
450
451acpi_status
452acpi_ds_restart_control_method(struct acpi_walk_state *walk_state,
453			       union acpi_operand_object *return_desc)
454{
455	acpi_status status;
456	int same_as_implicit_return;
457
458	ACPI_FUNCTION_TRACE_PTR(ds_restart_control_method, walk_state);
459
460	ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
461			  "****Restart [%4.4s] Op %p ReturnValueFromCallee %p\n",
462			  acpi_ut_get_node_name(walk_state->method_node),
463			  walk_state->method_call_op, return_desc));
464
465	ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
466			  "    ReturnFromThisMethodUsed?=%X ResStack %p Walk %p\n",
467			  walk_state->return_used,
468			  walk_state->results, walk_state));
469
470	/* Did the called method return a value? */
471
472	if (return_desc) {
473
474		/* Is the implicit return object the same as the return desc? */
475
476		same_as_implicit_return =
477		    (walk_state->implicit_return_obj == return_desc);
478
479		/* Are we actually going to use the return value? */
480
481		if (walk_state->return_used) {
482
483			/* Save the return value from the previous method */
484
485			status = acpi_ds_result_push(return_desc, walk_state);
486			if (ACPI_FAILURE(status)) {
487				acpi_ut_remove_reference(return_desc);
488				return_ACPI_STATUS(status);
489			}
490
491			/*
492			 * Save as THIS method's return value in case it is returned
493			 * immediately to yet another method
494			 */
495			walk_state->return_desc = return_desc;
496		}
497
498		/*
499		 * The following code is the optional support for the so-called
500		 * "implicit return". Some AML code assumes that the last value of the
501		 * method is "implicitly" returned to the caller, in the absence of an
502		 * explicit return value.
503		 *
504		 * Just save the last result of the method as the return value.
505		 *
506		 * NOTE: this is optional because the ASL language does not actually
507		 * support this behavior.
508		 */
509		else if (!acpi_ds_do_implicit_return
510			 (return_desc, walk_state, FALSE)
511			 || same_as_implicit_return) {
512			/*
513			 * Delete the return value if it will not be used by the
514			 * calling method or remove one reference if the explicit return
515			 * is the same as the implicit return value.
516			 */
517			acpi_ut_remove_reference(return_desc);
518		}
519	}
520
521	return_ACPI_STATUS(AE_OK);
522}
523
524/*******************************************************************************
525 *
526 * FUNCTION:    acpi_ds_terminate_control_method
527 *
528 * PARAMETERS:  method_desc         - Method object
529 *              walk_state          - State associated with the method
530 *
531 * RETURN:      None
532 *
533 * DESCRIPTION: Terminate a control method.  Delete everything that the method
534 *              created, delete all locals and arguments, and delete the parse
535 *              tree if requested.
536 *
537 * MUTEX:       Interpreter is locked
538 *
539 ******************************************************************************/
540
541void
542acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
543				 struct acpi_walk_state *walk_state)
544{
545
546	ACPI_FUNCTION_TRACE_PTR(ds_terminate_control_method, walk_state);
547
548	/* method_desc is required, walk_state is optional */
549
550	if (!method_desc) {
551		return_VOID;
552	}
553
554	if (walk_state) {
555
556		/* Delete all arguments and locals */
557
558		acpi_ds_method_data_delete_all(walk_state);
559
560		/*
561		 * If method is serialized, release the mutex and restore the
562		 * current sync level for this thread
563		 */
564		if (method_desc->method.mutex) {
565
566			/* Acquisition Depth handles recursive calls */
567
568			method_desc->method.mutex->mutex.acquisition_depth--;
569			if (!method_desc->method.mutex->mutex.acquisition_depth) {
570				walk_state->thread->current_sync_level =
571				    method_desc->method.mutex->mutex.
572				    original_sync_level;
573
574				acpi_os_release_mutex(method_desc->method.
575						      mutex->mutex.os_mutex);
576				method_desc->method.mutex->mutex.thread_id = 0;
577			}
578		}
579
580		/*
581		 * Delete any namespace objects created anywhere within the
582		 * namespace by the execution of this method. Unless:
583		 * 1) This method is a module-level executable code method, in which
584		 *    case we want make the objects permanent.
585		 * 2) There are other threads executing the method, in which case we
586		 *    will wait until the last thread has completed.
587		 */
588		if (!(method_desc->method.info_flags & ACPI_METHOD_MODULE_LEVEL)
589		    && (method_desc->method.thread_count == 1)) {
590
591			/* Delete any direct children of (created by) this method */
592
 
593			acpi_ns_delete_namespace_subtree(walk_state->
594							 method_node);
 
595
596			/*
597			 * Delete any objects that were created by this method
598			 * elsewhere in the namespace (if any were created).
599			 * Use of the ACPI_METHOD_MODIFIED_NAMESPACE optimizes the
600			 * deletion such that we don't have to perform an entire
601			 * namespace walk for every control method execution.
602			 */
603			if (method_desc->method.
604			    info_flags & ACPI_METHOD_MODIFIED_NAMESPACE) {
 
605				acpi_ns_delete_namespace_by_owner(method_desc->
606								  method.
607								  owner_id);
 
608				method_desc->method.info_flags &=
609				    ~ACPI_METHOD_MODIFIED_NAMESPACE;
610			}
611		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
612	}
613
614	/* Decrement the thread count on the method */
615
616	if (method_desc->method.thread_count) {
617		method_desc->method.thread_count--;
618	} else {
619		ACPI_ERROR((AE_INFO, "Invalid zero thread count in method"));
620	}
621
622	/* Are there any other threads currently executing this method? */
623
624	if (method_desc->method.thread_count) {
625		/*
626		 * Additional threads. Do not release the owner_id in this case,
627		 * we immediately reuse it for the next thread executing this method
628		 */
629		ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
630				  "*** Completed execution of one thread, %u threads remaining\n",
631				  method_desc->method.thread_count));
632	} else {
633		/* This is the only executing thread for this method */
634
635		/*
636		 * Support to dynamically change a method from not_serialized to
637		 * Serialized if it appears that the method is incorrectly written and
638		 * does not support multiple thread execution. The best example of this
639		 * is if such a method creates namespace objects and blocks. A second
640		 * thread will fail with an AE_ALREADY_EXISTS exception.
641		 *
642		 * This code is here because we must wait until the last thread exits
643		 * before marking the method as serialized.
644		 */
645		if (method_desc->method.
646		    info_flags & ACPI_METHOD_SERIALIZED_PENDING) {
647			if (walk_state) {
648				ACPI_INFO((AE_INFO,
649					   "Marking method %4.4s as Serialized because of AE_ALREADY_EXISTS error",
650					   walk_state->method_node->name.
651					   ascii));
652			}
653
654			/*
655			 * Method tried to create an object twice and was marked as
656			 * "pending serialized". The probable cause is that the method
657			 * cannot handle reentrancy.
658			 *
659			 * The method was created as not_serialized, but it tried to create
660			 * a named object and then blocked, causing the second thread
661			 * entrance to begin and then fail. Workaround this problem by
662			 * marking the method permanently as Serialized when the last
663			 * thread exits here.
664			 */
665			method_desc->method.info_flags &=
666			    ~ACPI_METHOD_SERIALIZED_PENDING;
 
667			method_desc->method.info_flags |=
668			    ACPI_METHOD_SERIALIZED;
 
669			method_desc->method.sync_level = 0;
670		}
671
672		/* No more threads, we can free the owner_id */
673
674		if (!
675		    (method_desc->method.
676		     info_flags & ACPI_METHOD_MODULE_LEVEL)) {
677			acpi_ut_release_owner_id(&method_desc->method.owner_id);
678		}
679	}
 
 
 
680
681	return_VOID;
682}
v6.9.4
  1// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
  2/******************************************************************************
  3 *
  4 * Module Name: dsmethod - Parser/Interpreter interface - control method parsing
  5 *
  6 * Copyright (C) 2000 - 2023, Intel Corp.
  7 *
  8 *****************************************************************************/
  9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 10#include <acpi/acpi.h>
 11#include "accommon.h"
 12#include "acdispat.h"
 13#include "acinterp.h"
 14#include "acnamesp.h"
 15#include "acparser.h"
 16#include "amlcode.h"
 17#include "acdebug.h"
 18
 19#define _COMPONENT          ACPI_DISPATCHER
 20ACPI_MODULE_NAME("dsmethod")
 21
 22/* Local prototypes */
 23static acpi_status
 24acpi_ds_detect_named_opcodes(struct acpi_walk_state *walk_state,
 25			     union acpi_parse_object **out_op);
 26
 27static acpi_status
 28acpi_ds_create_method_mutex(union acpi_operand_object *method_desc);
 29
 30/*******************************************************************************
 31 *
 32 * FUNCTION:    acpi_ds_auto_serialize_method
 33 *
 34 * PARAMETERS:  node                        - Namespace Node of the method
 35 *              obj_desc                    - Method object attached to node
 36 *
 37 * RETURN:      Status
 38 *
 39 * DESCRIPTION: Parse a control method AML to scan for control methods that
 40 *              need serialization due to the creation of named objects.
 41 *
 42 * NOTE: It is a bit of overkill to mark all such methods serialized, since
 43 * there is only a problem if the method actually blocks during execution.
 44 * A blocking operation is, for example, a Sleep() operation, or any access
 45 * to an operation region. However, it is probably not possible to easily
 46 * detect whether a method will block or not, so we simply mark all suspicious
 47 * methods as serialized.
 48 *
 49 * NOTE2: This code is essentially a generic routine for parsing a single
 50 * control method.
 51 *
 52 ******************************************************************************/
 53
 54acpi_status
 55acpi_ds_auto_serialize_method(struct acpi_namespace_node *node,
 56			      union acpi_operand_object *obj_desc)
 57{
 58	acpi_status status;
 59	union acpi_parse_object *op = NULL;
 60	struct acpi_walk_state *walk_state;
 61
 62	ACPI_FUNCTION_TRACE_PTR(ds_auto_serialize_method, node);
 63
 64	ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
 65			  "Method auto-serialization parse [%4.4s] %p\n",
 66			  acpi_ut_get_node_name(node), node));
 67
 68	/* Create/Init a root op for the method parse tree */
 69
 70	op = acpi_ps_alloc_op(AML_METHOD_OP, obj_desc->method.aml_start);
 71	if (!op) {
 72		return_ACPI_STATUS(AE_NO_MEMORY);
 73	}
 74
 75	acpi_ps_set_name(op, node->name.integer);
 76	op->common.node = node;
 77
 78	/* Create and initialize a new walk state */
 79
 80	walk_state =
 81	    acpi_ds_create_walk_state(node->owner_id, NULL, NULL, NULL);
 82	if (!walk_state) {
 83		acpi_ps_free_op(op);
 84		return_ACPI_STATUS(AE_NO_MEMORY);
 85	}
 86
 87	status = acpi_ds_init_aml_walk(walk_state, op, node,
 88				       obj_desc->method.aml_start,
 89				       obj_desc->method.aml_length, NULL, 0);
 90	if (ACPI_FAILURE(status)) {
 91		acpi_ds_delete_walk_state(walk_state);
 92		acpi_ps_free_op(op);
 93		return_ACPI_STATUS(status);
 94	}
 95
 96	walk_state->descending_callback = acpi_ds_detect_named_opcodes;
 97
 98	/* Parse the method, scan for creation of named objects */
 99
100	status = acpi_ps_parse_aml(walk_state);
101
102	acpi_ps_delete_parse_tree(op);
103	return_ACPI_STATUS(status);
104}
105
106/*******************************************************************************
107 *
108 * FUNCTION:    acpi_ds_detect_named_opcodes
109 *
110 * PARAMETERS:  walk_state      - Current state of the parse tree walk
111 *              out_op          - Unused, required for parser interface
112 *
113 * RETURN:      Status
114 *
115 * DESCRIPTION: Descending callback used during the loading of ACPI tables.
116 *              Currently used to detect methods that must be marked serialized
117 *              in order to avoid problems with the creation of named objects.
118 *
119 ******************************************************************************/
120
121static acpi_status
122acpi_ds_detect_named_opcodes(struct acpi_walk_state *walk_state,
123			     union acpi_parse_object **out_op)
124{
125
126	ACPI_FUNCTION_NAME(acpi_ds_detect_named_opcodes);
127
128	/* We are only interested in opcodes that create a new name */
129
130	if (!
131	    (walk_state->op_info->
132	     flags & (AML_NAMED | AML_CREATE | AML_FIELD))) {
133		return (AE_OK);
134	}
135
136	/*
137	 * At this point, we know we have a Named object opcode.
138	 * Mark the method as serialized. Later code will create a mutex for
139	 * this method to enforce serialization.
140	 *
141	 * Note, ACPI_METHOD_IGNORE_SYNC_LEVEL flag means that we will ignore the
142	 * Sync Level mechanism for this method, even though it is now serialized.
143	 * Otherwise, there can be conflicts with existing ASL code that actually
144	 * uses sync levels.
145	 */
146	walk_state->method_desc->method.sync_level = 0;
147	walk_state->method_desc->method.info_flags |=
148	    (ACPI_METHOD_SERIALIZED | ACPI_METHOD_IGNORE_SYNC_LEVEL);
149
150	ACPI_DEBUG_PRINT((ACPI_DB_INFO,
151			  "Method serialized [%4.4s] %p - [%s] (%4.4X)\n",
152			  walk_state->method_node->name.ascii,
153			  walk_state->method_node, walk_state->op_info->name,
154			  walk_state->opcode));
155
156	/* Abort the parse, no need to examine this method any further */
157
158	return (AE_CTRL_TERMINATE);
159}
160
161/*******************************************************************************
162 *
163 * FUNCTION:    acpi_ds_method_error
164 *
165 * PARAMETERS:  status          - Execution status
166 *              walk_state      - Current state
167 *
168 * RETURN:      Status
169 *
170 * DESCRIPTION: Called on method error. Invoke the global exception handler if
171 *              present, dump the method data if the debugger is configured
172 *
173 *              Note: Allows the exception handler to change the status code
174 *
175 ******************************************************************************/
176
177acpi_status
178acpi_ds_method_error(acpi_status status, struct acpi_walk_state *walk_state)
179{
180	u32 aml_offset;
181	acpi_name name = 0;
182
183	ACPI_FUNCTION_ENTRY();
184
185	/* Ignore AE_OK and control exception codes */
186
187	if (ACPI_SUCCESS(status) || (status & AE_CODE_CONTROL)) {
188		return (status);
189	}
190
191	/* Invoke the global exception handler */
192
193	if (acpi_gbl_exception_handler) {
194
195		/* Exit the interpreter, allow handler to execute methods */
196
197		acpi_ex_exit_interpreter();
198
199		/*
200		 * Handler can map the exception code to anything it wants, including
201		 * AE_OK, in which case the executing method will not be aborted.
202		 */
203		aml_offset = (u32)ACPI_PTR_DIFF(walk_state->aml,
204						walk_state->parser_state.
205						aml_start);
206
207		if (walk_state->method_node) {
208			name = walk_state->method_node->name.integer;
209		} else if (walk_state->deferred_node) {
210			name = walk_state->deferred_node->name.integer;
211		}
212
213		status = acpi_gbl_exception_handler(status, name,
214						    walk_state->opcode,
215						    aml_offset, NULL);
 
216		acpi_ex_enter_interpreter();
217	}
218
219	acpi_ds_clear_implicit_return(walk_state);
220
 
221	if (ACPI_FAILURE(status)) {
222		acpi_ds_dump_method_stack(status, walk_state, walk_state->op);
223
224		/* Display method locals/args if debugger is present */
225
226#ifdef ACPI_DEBUGGER
227		acpi_db_dump_method_info(status, walk_state);
228#endif
229	}
230
231	return (status);
232}
233
234/*******************************************************************************
235 *
236 * FUNCTION:    acpi_ds_create_method_mutex
237 *
238 * PARAMETERS:  obj_desc            - The method object
239 *
240 * RETURN:      Status
241 *
242 * DESCRIPTION: Create a mutex object for a serialized control method
243 *
244 ******************************************************************************/
245
246static acpi_status
247acpi_ds_create_method_mutex(union acpi_operand_object *method_desc)
248{
249	union acpi_operand_object *mutex_desc;
250	acpi_status status;
251
252	ACPI_FUNCTION_TRACE(ds_create_method_mutex);
253
254	/* Create the new mutex object */
255
256	mutex_desc = acpi_ut_create_internal_object(ACPI_TYPE_MUTEX);
257	if (!mutex_desc) {
258		return_ACPI_STATUS(AE_NO_MEMORY);
259	}
260
261	/* Create the actual OS Mutex */
262
263	status = acpi_os_create_mutex(&mutex_desc->mutex.os_mutex);
264	if (ACPI_FAILURE(status)) {
265		acpi_ut_delete_object_desc(mutex_desc);
266		return_ACPI_STATUS(status);
267	}
268
269	mutex_desc->mutex.sync_level = method_desc->method.sync_level;
270	method_desc->method.mutex = mutex_desc;
271	return_ACPI_STATUS(AE_OK);
272}
273
274/*******************************************************************************
275 *
276 * FUNCTION:    acpi_ds_begin_method_execution
277 *
278 * PARAMETERS:  method_node         - Node of the method
279 *              obj_desc            - The method object
280 *              walk_state          - current state, NULL if not yet executing
281 *                                    a method.
282 *
283 * RETURN:      Status
284 *
285 * DESCRIPTION: Prepare a method for execution. Parses the method if necessary,
286 *              increments the thread count, and waits at the method semaphore
287 *              for clearance to execute.
288 *
289 ******************************************************************************/
290
291acpi_status
292acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node,
293			       union acpi_operand_object *obj_desc,
294			       struct acpi_walk_state *walk_state)
295{
296	acpi_status status = AE_OK;
297
298	ACPI_FUNCTION_TRACE_PTR(ds_begin_method_execution, method_node);
299
300	if (!method_node) {
301		return_ACPI_STATUS(AE_NULL_ENTRY);
302	}
303
304	acpi_ex_start_trace_method(method_node, obj_desc, walk_state);
305
306	/* Prevent wraparound of thread count */
307
308	if (obj_desc->method.thread_count == ACPI_UINT8_MAX) {
309		ACPI_ERROR((AE_INFO,
310			    "Method reached maximum reentrancy limit (255)"));
311		return_ACPI_STATUS(AE_AML_METHOD_LIMIT);
312	}
313
314	/*
315	 * If this method is serialized, we need to acquire the method mutex.
316	 */
317	if (obj_desc->method.info_flags & ACPI_METHOD_SERIALIZED) {
318		/*
319		 * Create a mutex for the method if it is defined to be Serialized
320		 * and a mutex has not already been created. We defer the mutex creation
321		 * until a method is actually executed, to minimize the object count
322		 */
323		if (!obj_desc->method.mutex) {
324			status = acpi_ds_create_method_mutex(obj_desc);
325			if (ACPI_FAILURE(status)) {
326				return_ACPI_STATUS(status);
327			}
328		}
329
330		/*
331		 * The current_sync_level (per-thread) must be less than or equal to
332		 * the sync level of the method. This mechanism provides some
333		 * deadlock prevention.
334		 *
335		 * If the method was auto-serialized, we just ignore the sync level
336		 * mechanism, because auto-serialization of methods can interfere
337		 * with ASL code that actually uses sync levels.
338		 *
339		 * Top-level method invocation has no walk state at this point
340		 */
341		if (walk_state &&
342		    (!(obj_desc->method.
343		       info_flags & ACPI_METHOD_IGNORE_SYNC_LEVEL))
344		    && (walk_state->thread->current_sync_level >
345			obj_desc->method.mutex->mutex.sync_level)) {
346			ACPI_ERROR((AE_INFO,
347				    "Cannot acquire Mutex for method [%4.4s]"
348				    ", current SyncLevel is too large (%u)",
349				    acpi_ut_get_node_name(method_node),
350				    walk_state->thread->current_sync_level));
351
352			return_ACPI_STATUS(AE_AML_MUTEX_ORDER);
353		}
354
355		/*
356		 * Obtain the method mutex if necessary. Do not acquire mutex for a
357		 * recursive call.
358		 */
359		if (!walk_state ||
360		    !obj_desc->method.mutex->mutex.thread_id ||
361		    (walk_state->thread->thread_id !=
362		     obj_desc->method.mutex->mutex.thread_id)) {
363			/*
364			 * Acquire the method mutex. This releases the interpreter if we
365			 * block (and reacquires it before it returns)
366			 */
367			status =
368			    acpi_ex_system_wait_mutex(obj_desc->method.mutex->
369						      mutex.os_mutex,
370						      ACPI_WAIT_FOREVER);
371			if (ACPI_FAILURE(status)) {
372				return_ACPI_STATUS(status);
373			}
374
375			/* Update the mutex and walk info and save the original sync_level */
376
377			if (walk_state) {
378				obj_desc->method.mutex->mutex.
379				    original_sync_level =
380				    walk_state->thread->current_sync_level;
381
382				obj_desc->method.mutex->mutex.thread_id =
383				    walk_state->thread->thread_id;
384
385				/*
386				 * Update the current sync_level only if this is not an auto-
387				 * serialized method. In the auto case, we have to ignore
388				 * the sync level for the method mutex (created for the
389				 * auto-serialization) because we have no idea of what the
390				 * sync level should be. Therefore, just ignore it.
391				 */
392				if (!(obj_desc->method.info_flags &
393				      ACPI_METHOD_IGNORE_SYNC_LEVEL)) {
394					walk_state->thread->current_sync_level =
395					    obj_desc->method.sync_level;
396				}
397			} else {
398				obj_desc->method.mutex->mutex.
399				    original_sync_level =
400				    obj_desc->method.mutex->mutex.sync_level;
401
402				obj_desc->method.mutex->mutex.thread_id =
403				    acpi_os_get_thread_id();
404			}
405		}
406
407		/* Always increase acquisition depth */
408
409		obj_desc->method.mutex->mutex.acquisition_depth++;
410	}
411
412	/*
413	 * Allocate an Owner ID for this method, only if this is the first thread
414	 * to begin concurrent execution. We only need one owner_id, even if the
415	 * method is invoked recursively.
416	 */
417	if (!obj_desc->method.owner_id) {
418		status = acpi_ut_allocate_owner_id(&obj_desc->method.owner_id);
419		if (ACPI_FAILURE(status)) {
420			goto cleanup;
421		}
422	}
423
424	/*
425	 * Increment the method parse tree thread count since it has been
426	 * reentered one more time (even if it is the same thread)
427	 */
428	obj_desc->method.thread_count++;
429	acpi_method_count++;
430	return_ACPI_STATUS(status);
431
432cleanup:
433	/* On error, must release the method mutex (if present) */
434
435	if (obj_desc->method.mutex) {
436		acpi_os_release_mutex(obj_desc->method.mutex->mutex.os_mutex);
437	}
438	return_ACPI_STATUS(status);
439}
440
441/*******************************************************************************
442 *
443 * FUNCTION:    acpi_ds_call_control_method
444 *
445 * PARAMETERS:  thread              - Info for this thread
446 *              this_walk_state     - Current walk state
447 *              op                  - Current Op to be walked
448 *
449 * RETURN:      Status
450 *
451 * DESCRIPTION: Transfer execution to a called control method
452 *
453 ******************************************************************************/
454
455acpi_status
456acpi_ds_call_control_method(struct acpi_thread_state *thread,
457			    struct acpi_walk_state *this_walk_state,
458			    union acpi_parse_object *op)
459{
460	acpi_status status;
461	struct acpi_namespace_node *method_node;
462	struct acpi_walk_state *next_walk_state = NULL;
463	union acpi_operand_object *obj_desc;
464	struct acpi_evaluate_info *info;
465	u32 i;
466
467	ACPI_FUNCTION_TRACE_PTR(ds_call_control_method, this_walk_state);
468
469	ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
470			  "Calling method %p, currentstate=%p\n",
471			  this_walk_state->prev_op, this_walk_state));
472
473	/*
474	 * Get the namespace entry for the control method we are about to call
475	 */
476	method_node = this_walk_state->method_call_node;
477	if (!method_node) {
478		return_ACPI_STATUS(AE_NULL_ENTRY);
479	}
480
481	obj_desc = acpi_ns_get_attached_object(method_node);
482	if (!obj_desc) {
483		return_ACPI_STATUS(AE_NULL_OBJECT);
484	}
485
486	/* Init for new method, possibly wait on method mutex */
487
488	status =
489	    acpi_ds_begin_method_execution(method_node, obj_desc,
490					   this_walk_state);
491	if (ACPI_FAILURE(status)) {
492		return_ACPI_STATUS(status);
493	}
494
495	/* Begin method parse/execution. Create a new walk state */
496
497	next_walk_state =
498	    acpi_ds_create_walk_state(obj_desc->method.owner_id, NULL, obj_desc,
499				      thread);
500	if (!next_walk_state) {
501		status = AE_NO_MEMORY;
502		goto cleanup;
503	}
504
505	/*
506	 * The resolved arguments were put on the previous walk state's operand
507	 * stack. Operands on the previous walk state stack always
508	 * start at index 0. Also, null terminate the list of arguments
509	 */
510	this_walk_state->operands[this_walk_state->num_operands] = NULL;
511
512	/*
513	 * Allocate and initialize the evaluation information block
514	 * TBD: this is somewhat inefficient, should change interface to
515	 * ds_init_aml_walk. For now, keeps this struct off the CPU stack
516	 */
517	info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
518	if (!info) {
519		status = AE_NO_MEMORY;
520		goto pop_walk_state;
521	}
522
523	info->parameters = &this_walk_state->operands[0];
524
525	status = acpi_ds_init_aml_walk(next_walk_state, NULL, method_node,
526				       obj_desc->method.aml_start,
527				       obj_desc->method.aml_length, info,
528				       ACPI_IMODE_EXECUTE);
529
530	ACPI_FREE(info);
531	if (ACPI_FAILURE(status)) {
532		goto pop_walk_state;
533	}
534
535	next_walk_state->method_nesting_depth =
536	    this_walk_state->method_nesting_depth + 1;
537
538	/*
539	 * Delete the operands on the previous walkstate operand stack
540	 * (they were copied to new objects)
541	 */
542	for (i = 0; i < obj_desc->method.param_count; i++) {
543		acpi_ut_remove_reference(this_walk_state->operands[i]);
544		this_walk_state->operands[i] = NULL;
545	}
546
547	/* Clear the operand stack */
548
549	this_walk_state->num_operands = 0;
550
551	ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
552			  "**** Begin nested execution of [%4.4s] **** WalkState=%p\n",
553			  method_node->name.ascii, next_walk_state));
554
555	this_walk_state->method_pathname =
556	    acpi_ns_get_normalized_pathname(method_node, TRUE);
557	this_walk_state->method_is_nested = TRUE;
558
559	/* Optional object evaluation log */
560
561	ACPI_DEBUG_PRINT_RAW((ACPI_DB_EVALUATION,
562			      "%-26s:  %*s%s\n", "   Nested method call",
563			      next_walk_state->method_nesting_depth * 3, " ",
564			      &this_walk_state->method_pathname[1]));
565
566	/* Invoke an internal method if necessary */
567
568	if (obj_desc->method.info_flags & ACPI_METHOD_INTERNAL_ONLY) {
569		status =
570		    obj_desc->method.dispatch.implementation(next_walk_state);
571		if (status == AE_OK) {
572			status = AE_CTRL_TERMINATE;
573		}
574	}
575
576	return_ACPI_STATUS(status);
577
578pop_walk_state:
579
580	/* On error, pop the walk state to be deleted from thread */
581
582	acpi_ds_pop_walk_state(thread);
583
584cleanup:
585
586	/* On error, we must terminate the method properly */
587
588	acpi_ds_terminate_control_method(obj_desc, next_walk_state);
589	acpi_ds_delete_walk_state(next_walk_state);
 
 
590
591	return_ACPI_STATUS(status);
592}
593
594/*******************************************************************************
595 *
596 * FUNCTION:    acpi_ds_restart_control_method
597 *
598 * PARAMETERS:  walk_state          - State for preempted method (caller)
599 *              return_desc         - Return value from the called method
600 *
601 * RETURN:      Status
602 *
603 * DESCRIPTION: Restart a method that was preempted by another (nested) method
604 *              invocation. Handle the return value (if any) from the callee.
605 *
606 ******************************************************************************/
607
608acpi_status
609acpi_ds_restart_control_method(struct acpi_walk_state *walk_state,
610			       union acpi_operand_object *return_desc)
611{
612	acpi_status status;
613	int same_as_implicit_return;
614
615	ACPI_FUNCTION_TRACE_PTR(ds_restart_control_method, walk_state);
616
617	ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
618			  "****Restart [%4.4s] Op %p ReturnValueFromCallee %p\n",
619			  acpi_ut_get_node_name(walk_state->method_node),
620			  walk_state->method_call_op, return_desc));
621
622	ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
623			  "    ReturnFromThisMethodUsed?=%X ResStack %p Walk %p\n",
624			  walk_state->return_used,
625			  walk_state->results, walk_state));
626
627	/* Did the called method return a value? */
628
629	if (return_desc) {
630
631		/* Is the implicit return object the same as the return desc? */
632
633		same_as_implicit_return =
634		    (walk_state->implicit_return_obj == return_desc);
635
636		/* Are we actually going to use the return value? */
637
638		if (walk_state->return_used) {
639
640			/* Save the return value from the previous method */
641
642			status = acpi_ds_result_push(return_desc, walk_state);
643			if (ACPI_FAILURE(status)) {
644				acpi_ut_remove_reference(return_desc);
645				return_ACPI_STATUS(status);
646			}
647
648			/*
649			 * Save as THIS method's return value in case it is returned
650			 * immediately to yet another method
651			 */
652			walk_state->return_desc = return_desc;
653		}
654
655		/*
656		 * The following code is the optional support for the so-called
657		 * "implicit return". Some AML code assumes that the last value of the
658		 * method is "implicitly" returned to the caller, in the absence of an
659		 * explicit return value.
660		 *
661		 * Just save the last result of the method as the return value.
662		 *
663		 * NOTE: this is optional because the ASL language does not actually
664		 * support this behavior.
665		 */
666		else if (!acpi_ds_do_implicit_return
667			 (return_desc, walk_state, FALSE)
668			 || same_as_implicit_return) {
669			/*
670			 * Delete the return value if it will not be used by the
671			 * calling method or remove one reference if the explicit return
672			 * is the same as the implicit return value.
673			 */
674			acpi_ut_remove_reference(return_desc);
675		}
676	}
677
678	return_ACPI_STATUS(AE_OK);
679}
680
681/*******************************************************************************
682 *
683 * FUNCTION:    acpi_ds_terminate_control_method
684 *
685 * PARAMETERS:  method_desc         - Method object
686 *              walk_state          - State associated with the method
687 *
688 * RETURN:      None
689 *
690 * DESCRIPTION: Terminate a control method. Delete everything that the method
691 *              created, delete all locals and arguments, and delete the parse
692 *              tree if requested.
693 *
694 * MUTEX:       Interpreter is locked
695 *
696 ******************************************************************************/
697
698void
699acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
700				 struct acpi_walk_state *walk_state)
701{
702
703	ACPI_FUNCTION_TRACE_PTR(ds_terminate_control_method, walk_state);
704
705	/* method_desc is required, walk_state is optional */
706
707	if (!method_desc) {
708		return_VOID;
709	}
710
711	if (walk_state) {
712
713		/* Delete all arguments and locals */
714
715		acpi_ds_method_data_delete_all(walk_state);
716
717		/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
718		 * Delete any namespace objects created anywhere within the
719		 * namespace by the execution of this method. Unless:
720		 * 1) This method is a module-level executable code method, in which
721		 *    case we want make the objects permanent.
722		 * 2) There are other threads executing the method, in which case we
723		 *    will wait until the last thread has completed.
724		 */
725		if (!(method_desc->method.info_flags & ACPI_METHOD_MODULE_LEVEL)
726		    && (method_desc->method.thread_count == 1)) {
727
728			/* Delete any direct children of (created by) this method */
729
730			(void)acpi_ex_exit_interpreter();
731			acpi_ns_delete_namespace_subtree(walk_state->
732							 method_node);
733			(void)acpi_ex_enter_interpreter();
734
735			/*
736			 * Delete any objects that were created by this method
737			 * elsewhere in the namespace (if any were created).
738			 * Use of the ACPI_METHOD_MODIFIED_NAMESPACE optimizes the
739			 * deletion such that we don't have to perform an entire
740			 * namespace walk for every control method execution.
741			 */
742			if (method_desc->method.
743			    info_flags & ACPI_METHOD_MODIFIED_NAMESPACE) {
744				(void)acpi_ex_exit_interpreter();
745				acpi_ns_delete_namespace_by_owner(method_desc->
746								  method.
747								  owner_id);
748				(void)acpi_ex_enter_interpreter();
749				method_desc->method.info_flags &=
750				    ~ACPI_METHOD_MODIFIED_NAMESPACE;
751			}
752		}
753
754		/*
755		 * If method is serialized, release the mutex and restore the
756		 * current sync level for this thread
757		 */
758		if (method_desc->method.mutex) {
759
760			/* Acquisition Depth handles recursive calls */
761
762			method_desc->method.mutex->mutex.acquisition_depth--;
763			if (!method_desc->method.mutex->mutex.acquisition_depth) {
764				walk_state->thread->current_sync_level =
765				    method_desc->method.mutex->mutex.
766				    original_sync_level;
767
768				acpi_os_release_mutex(method_desc->method.
769						      mutex->mutex.os_mutex);
770				method_desc->method.mutex->mutex.thread_id = 0;
771			}
772		}
773	}
774
775	/* Decrement the thread count on the method */
776
777	if (method_desc->method.thread_count) {
778		method_desc->method.thread_count--;
779	} else {
780		ACPI_ERROR((AE_INFO, "Invalid zero thread count in method"));
781	}
782
783	/* Are there any other threads currently executing this method? */
784
785	if (method_desc->method.thread_count) {
786		/*
787		 * Additional threads. Do not release the owner_id in this case,
788		 * we immediately reuse it for the next thread executing this method
789		 */
790		ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
791				  "*** Completed execution of one thread, %u threads remaining\n",
792				  method_desc->method.thread_count));
793	} else {
794		/* This is the only executing thread for this method */
795
796		/*
797		 * Support to dynamically change a method from not_serialized to
798		 * Serialized if it appears that the method is incorrectly written and
799		 * does not support multiple thread execution. The best example of this
800		 * is if such a method creates namespace objects and blocks. A second
801		 * thread will fail with an AE_ALREADY_EXISTS exception.
802		 *
803		 * This code is here because we must wait until the last thread exits
804		 * before marking the method as serialized.
805		 */
806		if (method_desc->method.
807		    info_flags & ACPI_METHOD_SERIALIZED_PENDING) {
808			if (walk_state) {
809				ACPI_INFO(("Marking method %4.4s as Serialized "
810					   "because of AE_ALREADY_EXISTS error",
811					   walk_state->method_node->name.
812					   ascii));
813			}
814
815			/*
816			 * Method tried to create an object twice and was marked as
817			 * "pending serialized". The probable cause is that the method
818			 * cannot handle reentrancy.
819			 *
820			 * The method was created as not_serialized, but it tried to create
821			 * a named object and then blocked, causing the second thread
822			 * entrance to begin and then fail. Workaround this problem by
823			 * marking the method permanently as Serialized when the last
824			 * thread exits here.
825			 */
826			method_desc->method.info_flags &=
827			    ~ACPI_METHOD_SERIALIZED_PENDING;
828
829			method_desc->method.info_flags |=
830			    (ACPI_METHOD_SERIALIZED |
831			     ACPI_METHOD_IGNORE_SYNC_LEVEL);
832			method_desc->method.sync_level = 0;
833		}
834
835		/* No more threads, we can free the owner_id */
836
837		if (!
838		    (method_desc->method.
839		     info_flags & ACPI_METHOD_MODULE_LEVEL)) {
840			acpi_ut_release_owner_id(&method_desc->method.owner_id);
841		}
842	}
843
844	acpi_ex_stop_trace_method((struct acpi_namespace_node *)method_desc->
845				  method.node, method_desc, walk_state);
846
847	return_VOID;
848}