xref: /openbmc/linux/drivers/acpi/acpica/evgpe.c (revision dfd4f649)
1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /******************************************************************************
3  *
4  * Module Name: evgpe - General Purpose Event handling and dispatch
5  *
6  * Copyright (C) 2000 - 2019, Intel Corp.
7  *
8  *****************************************************************************/
9 
10 #include <acpi/acpi.h>
11 #include "accommon.h"
12 #include "acevents.h"
13 #include "acnamesp.h"
14 
15 #define _COMPONENT          ACPI_EVENTS
16 ACPI_MODULE_NAME("evgpe")
17 #if (!ACPI_REDUCED_HARDWARE)	/* Entire module */
18 /* Local prototypes */
19 static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context);
20 
21 static void ACPI_SYSTEM_XFACE acpi_ev_asynch_enable_gpe(void *context);
22 
23 /*******************************************************************************
24  *
25  * FUNCTION:    acpi_ev_update_gpe_enable_mask
26  *
27  * PARAMETERS:  gpe_event_info          - GPE to update
28  *
29  * RETURN:      Status
30  *
31  * DESCRIPTION: Updates GPE register enable mask based upon whether there are
32  *              runtime references to this GPE
33  *
34  ******************************************************************************/
35 
36 acpi_status
37 acpi_ev_update_gpe_enable_mask(struct acpi_gpe_event_info *gpe_event_info)
38 {
39 	struct acpi_gpe_register_info *gpe_register_info;
40 	u32 register_bit;
41 
42 	ACPI_FUNCTION_TRACE(ev_update_gpe_enable_mask);
43 
44 	gpe_register_info = gpe_event_info->register_info;
45 	if (!gpe_register_info) {
46 		return_ACPI_STATUS(AE_NOT_EXIST);
47 	}
48 
49 	register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info);
50 
51 	/* Clear the run bit up front */
52 
53 	ACPI_CLEAR_BIT(gpe_register_info->enable_for_run, register_bit);
54 
55 	/* Set the mask bit only if there are references to this GPE */
56 
57 	if (gpe_event_info->runtime_count) {
58 		ACPI_SET_BIT(gpe_register_info->enable_for_run,
59 			     (u8)register_bit);
60 	}
61 
62 	gpe_register_info->enable_mask = gpe_register_info->enable_for_run;
63 	return_ACPI_STATUS(AE_OK);
64 }
65 
66 /*******************************************************************************
67  *
68  * FUNCTION:    acpi_ev_enable_gpe
69  *
70  * PARAMETERS:  gpe_event_info          - GPE to enable
71  *
72  * RETURN:      Status
73  *
74  * DESCRIPTION: Enable a GPE.
75  *
76  ******************************************************************************/
77 
78 acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
79 {
80 	acpi_status status;
81 
82 	ACPI_FUNCTION_TRACE(ev_enable_gpe);
83 
84 	/* Enable the requested GPE */
85 
86 	status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE);
87 	return_ACPI_STATUS(status);
88 }
89 
90 /*******************************************************************************
91  *
92  * FUNCTION:    acpi_ev_mask_gpe
93  *
94  * PARAMETERS:  gpe_event_info          - GPE to be blocked/unblocked
95  *              is_masked               - Whether the GPE is masked or not
96  *
97  * RETURN:      Status
98  *
99  * DESCRIPTION: Unconditionally mask/unmask a GPE during runtime.
100  *
101  ******************************************************************************/
102 
103 acpi_status
104 acpi_ev_mask_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 is_masked)
105 {
106 	struct acpi_gpe_register_info *gpe_register_info;
107 	u32 register_bit;
108 
109 	ACPI_FUNCTION_TRACE(ev_mask_gpe);
110 
111 	gpe_register_info = gpe_event_info->register_info;
112 	if (!gpe_register_info) {
113 		return_ACPI_STATUS(AE_NOT_EXIST);
114 	}
115 
116 	register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info);
117 
118 	/* Perform the action */
119 
120 	if (is_masked) {
121 		if (register_bit & gpe_register_info->mask_for_run) {
122 			return_ACPI_STATUS(AE_BAD_PARAMETER);
123 		}
124 
125 		(void)acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE);
126 		ACPI_SET_BIT(gpe_register_info->mask_for_run, (u8)register_bit);
127 	} else {
128 		if (!(register_bit & gpe_register_info->mask_for_run)) {
129 			return_ACPI_STATUS(AE_BAD_PARAMETER);
130 		}
131 
132 		ACPI_CLEAR_BIT(gpe_register_info->mask_for_run,
133 			       (u8)register_bit);
134 		if (gpe_event_info->runtime_count
135 		    && !gpe_event_info->disable_for_dispatch) {
136 			(void)acpi_hw_low_set_gpe(gpe_event_info,
137 						  ACPI_GPE_ENABLE);
138 		}
139 	}
140 
141 	return_ACPI_STATUS(AE_OK);
142 }
143 
144 /*******************************************************************************
145  *
146  * FUNCTION:    acpi_ev_add_gpe_reference
147  *
148  * PARAMETERS:  gpe_event_info          - Add a reference to this GPE
149  *
150  * RETURN:      Status
151  *
152  * DESCRIPTION: Add a reference to a GPE. On the first reference, the GPE is
153  *              hardware-enabled.
154  *
155  ******************************************************************************/
156 
157 acpi_status
158 acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info)
159 {
160 	acpi_status status = AE_OK;
161 
162 	ACPI_FUNCTION_TRACE(ev_add_gpe_reference);
163 
164 	if (gpe_event_info->runtime_count == ACPI_UINT8_MAX) {
165 		return_ACPI_STATUS(AE_LIMIT);
166 	}
167 
168 	gpe_event_info->runtime_count++;
169 	if (gpe_event_info->runtime_count == 1) {
170 
171 		/* Enable on first reference */
172 
173 		status = acpi_ev_update_gpe_enable_mask(gpe_event_info);
174 		if (ACPI_SUCCESS(status)) {
175 			status = acpi_ev_enable_gpe(gpe_event_info);
176 		}
177 
178 		if (ACPI_FAILURE(status)) {
179 			gpe_event_info->runtime_count--;
180 		}
181 	}
182 
183 	return_ACPI_STATUS(status);
184 }
185 
186 /*******************************************************************************
187  *
188  * FUNCTION:    acpi_ev_remove_gpe_reference
189  *
190  * PARAMETERS:  gpe_event_info          - Remove a reference to this GPE
191  *
192  * RETURN:      Status
193  *
194  * DESCRIPTION: Remove a reference to a GPE. When the last reference is
195  *              removed, the GPE is hardware-disabled.
196  *
197  ******************************************************************************/
198 
199 acpi_status
200 acpi_ev_remove_gpe_reference(struct acpi_gpe_event_info *gpe_event_info)
201 {
202 	acpi_status status = AE_OK;
203 
204 	ACPI_FUNCTION_TRACE(ev_remove_gpe_reference);
205 
206 	if (!gpe_event_info->runtime_count) {
207 		return_ACPI_STATUS(AE_LIMIT);
208 	}
209 
210 	gpe_event_info->runtime_count--;
211 	if (!gpe_event_info->runtime_count) {
212 
213 		/* Disable on last reference */
214 
215 		status = acpi_ev_update_gpe_enable_mask(gpe_event_info);
216 		if (ACPI_SUCCESS(status)) {
217 			status =
218 			    acpi_hw_low_set_gpe(gpe_event_info,
219 						ACPI_GPE_DISABLE);
220 		}
221 
222 		if (ACPI_FAILURE(status)) {
223 			gpe_event_info->runtime_count++;
224 		}
225 	}
226 
227 	return_ACPI_STATUS(status);
228 }
229 
230 /*******************************************************************************
231  *
232  * FUNCTION:    acpi_ev_low_get_gpe_info
233  *
234  * PARAMETERS:  gpe_number          - Raw GPE number
235  *              gpe_block           - A GPE info block
236  *
237  * RETURN:      A GPE event_info struct. NULL if not a valid GPE (The gpe_number
238  *              is not within the specified GPE block)
239  *
240  * DESCRIPTION: Returns the event_info struct associated with this GPE. This is
241  *              the low-level implementation of ev_get_gpe_event_info.
242  *
243  ******************************************************************************/
244 
245 struct acpi_gpe_event_info *acpi_ev_low_get_gpe_info(u32 gpe_number,
246 						     struct acpi_gpe_block_info
247 						     *gpe_block)
248 {
249 	u32 gpe_index;
250 
251 	/*
252 	 * Validate that the gpe_number is within the specified gpe_block.
253 	 * (Two steps)
254 	 */
255 	if (!gpe_block || (gpe_number < gpe_block->block_base_number)) {
256 		return (NULL);
257 	}
258 
259 	gpe_index = gpe_number - gpe_block->block_base_number;
260 	if (gpe_index >= gpe_block->gpe_count) {
261 		return (NULL);
262 	}
263 
264 	return (&gpe_block->event_info[gpe_index]);
265 }
266 
267 
268 /*******************************************************************************
269  *
270  * FUNCTION:    acpi_ev_get_gpe_event_info
271  *
272  * PARAMETERS:  gpe_device          - Device node. NULL for GPE0/GPE1
273  *              gpe_number          - Raw GPE number
274  *
275  * RETURN:      A GPE event_info struct. NULL if not a valid GPE
276  *
277  * DESCRIPTION: Returns the event_info struct associated with this GPE.
278  *              Validates the gpe_block and the gpe_number
279  *
280  *              Should be called only when the GPE lists are semaphore locked
281  *              and not subject to change.
282  *
283  ******************************************************************************/
284 
285 struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device,
286 						       u32 gpe_number)
287 {
288 	union acpi_operand_object *obj_desc;
289 	struct acpi_gpe_event_info *gpe_info;
290 	u32 i;
291 
292 	ACPI_FUNCTION_ENTRY();
293 
294 	/* A NULL gpe_device means use the FADT-defined GPE block(s) */
295 
296 	if (!gpe_device) {
297 
298 		/* Examine GPE Block 0 and 1 (These blocks are permanent) */
299 
300 		for (i = 0; i < ACPI_MAX_GPE_BLOCKS; i++) {
301 			gpe_info = acpi_ev_low_get_gpe_info(gpe_number,
302 							    acpi_gbl_gpe_fadt_blocks
303 							    [i]);
304 			if (gpe_info) {
305 				return (gpe_info);
306 			}
307 		}
308 
309 		/* The gpe_number was not in the range of either FADT GPE block */
310 
311 		return (NULL);
312 	}
313 
314 	/* A Non-NULL gpe_device means this is a GPE Block Device */
315 
316 	obj_desc =
317 	    acpi_ns_get_attached_object((struct acpi_namespace_node *)
318 					       gpe_device);
319 	if (!obj_desc || !obj_desc->device.gpe_block) {
320 		return (NULL);
321 	}
322 
323 	return (acpi_ev_low_get_gpe_info
324 		(gpe_number, obj_desc->device.gpe_block));
325 }
326 
327 /*******************************************************************************
328  *
329  * FUNCTION:    acpi_ev_gpe_detect
330  *
331  * PARAMETERS:  gpe_xrupt_list      - Interrupt block for this interrupt.
332  *                                    Can have multiple GPE blocks attached.
333  *
334  * RETURN:      INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED
335  *
336  * DESCRIPTION: Detect if any GP events have occurred. This function is
337  *              executed at interrupt level.
338  *
339  ******************************************************************************/
340 
341 u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info *gpe_xrupt_list)
342 {
343 	struct acpi_gpe_block_info *gpe_block;
344 	struct acpi_namespace_node *gpe_device;
345 	struct acpi_gpe_register_info *gpe_register_info;
346 	struct acpi_gpe_event_info *gpe_event_info;
347 	u32 gpe_number;
348 	u32 int_status = ACPI_INTERRUPT_NOT_HANDLED;
349 	acpi_cpu_flags flags;
350 	u32 i;
351 	u32 j;
352 
353 	ACPI_FUNCTION_NAME(ev_gpe_detect);
354 
355 	/* Check for the case where there are no GPEs */
356 
357 	if (!gpe_xrupt_list) {
358 		return (int_status);
359 	}
360 
361 	/*
362 	 * We need to obtain the GPE lock for both the data structs and registers
363 	 * Note: Not necessary to obtain the hardware lock, since the GPE
364 	 * registers are owned by the gpe_lock.
365 	 */
366 	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
367 
368 	/* Examine all GPE blocks attached to this interrupt level */
369 
370 	gpe_block = gpe_xrupt_list->gpe_block_list_head;
371 	while (gpe_block) {
372 		gpe_device = gpe_block->node;
373 
374 		/*
375 		 * Read all of the 8-bit GPE status and enable registers in this GPE
376 		 * block, saving all of them. Find all currently active GP events.
377 		 */
378 		for (i = 0; i < gpe_block->register_count; i++) {
379 
380 			/* Get the next status/enable pair */
381 
382 			gpe_register_info = &gpe_block->register_info[i];
383 
384 			/*
385 			 * Optimization: If there are no GPEs enabled within this
386 			 * register, we can safely ignore the entire register.
387 			 */
388 			if (!(gpe_register_info->enable_for_run |
389 			      gpe_register_info->enable_for_wake)) {
390 				ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS,
391 						  "Ignore disabled registers for GPE %02X-%02X: "
392 						  "RunEnable=%02X, WakeEnable=%02X\n",
393 						  gpe_register_info->
394 						  base_gpe_number,
395 						  gpe_register_info->
396 						  base_gpe_number +
397 						  (ACPI_GPE_REGISTER_WIDTH - 1),
398 						  gpe_register_info->
399 						  enable_for_run,
400 						  gpe_register_info->
401 						  enable_for_wake));
402 				continue;
403 			}
404 
405 			/* Now look at the individual GPEs in this byte register */
406 
407 			for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
408 
409 				/* Detect and dispatch one GPE bit */
410 
411 				gpe_event_info =
412 				    &gpe_block->
413 				    event_info[((acpi_size)i *
414 						ACPI_GPE_REGISTER_WIDTH) + j];
415 				gpe_number =
416 				    j + gpe_register_info->base_gpe_number;
417 				acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
418 				int_status |=
419 				    acpi_ev_detect_gpe(gpe_device,
420 						       gpe_event_info,
421 						       gpe_number);
422 				flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
423 			}
424 		}
425 
426 		gpe_block = gpe_block->next;
427 	}
428 
429 	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
430 	return (int_status);
431 }
432 
433 /*******************************************************************************
434  *
435  * FUNCTION:    acpi_ev_asynch_execute_gpe_method
436  *
437  * PARAMETERS:  Context (gpe_event_info) - Info for this GPE
438  *
439  * RETURN:      None
440  *
441  * DESCRIPTION: Perform the actual execution of a GPE control method. This
442  *              function is called from an invocation of acpi_os_execute and
443  *              therefore does NOT execute at interrupt level - so that
444  *              the control method itself is not executed in the context of
445  *              an interrupt handler.
446  *
447  ******************************************************************************/
448 
449 static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
450 {
451 	struct acpi_gpe_event_info *gpe_event_info = context;
452 	acpi_status status = AE_OK;
453 	struct acpi_evaluate_info *info;
454 	struct acpi_gpe_notify_info *notify;
455 
456 	ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method);
457 
458 	/* Do the correct dispatch - normal method or implicit notify */
459 
460 	switch (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags)) {
461 	case ACPI_GPE_DISPATCH_NOTIFY:
462 		/*
463 		 * Implicit notify.
464 		 * Dispatch a DEVICE_WAKE notify to the appropriate handler.
465 		 * NOTE: the request is queued for execution after this method
466 		 * completes. The notify handlers are NOT invoked synchronously
467 		 * from this thread -- because handlers may in turn run other
468 		 * control methods.
469 		 *
470 		 * June 2012: Expand implicit notify mechanism to support
471 		 * notifies on multiple device objects.
472 		 */
473 		notify = gpe_event_info->dispatch.notify_list;
474 		while (ACPI_SUCCESS(status) && notify) {
475 			status =
476 			    acpi_ev_queue_notify_request(notify->device_node,
477 							 ACPI_NOTIFY_DEVICE_WAKE);
478 
479 			notify = notify->next;
480 		}
481 
482 		break;
483 
484 	case ACPI_GPE_DISPATCH_METHOD:
485 
486 		/* Allocate the evaluation information block */
487 
488 		info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
489 		if (!info) {
490 			status = AE_NO_MEMORY;
491 		} else {
492 			/*
493 			 * Invoke the GPE Method (_Lxx, _Exx) i.e., evaluate the
494 			 * _Lxx/_Exx control method that corresponds to this GPE
495 			 */
496 			info->prefix_node =
497 			    gpe_event_info->dispatch.method_node;
498 			info->flags = ACPI_IGNORE_RETURN_VALUE;
499 
500 			status = acpi_ns_evaluate(info);
501 			ACPI_FREE(info);
502 		}
503 
504 		if (ACPI_FAILURE(status)) {
505 			ACPI_EXCEPTION((AE_INFO, status,
506 					"while evaluating GPE method [%4.4s]",
507 					acpi_ut_get_node_name(gpe_event_info->
508 							      dispatch.
509 							      method_node)));
510 		}
511 		break;
512 
513 	default:
514 
515 		goto error_exit;	/* Should never happen */
516 	}
517 
518 	/* Defer enabling of GPE until all notify handlers are done */
519 
520 	status = acpi_os_execute(OSL_NOTIFY_HANDLER,
521 				 acpi_ev_asynch_enable_gpe, gpe_event_info);
522 	if (ACPI_SUCCESS(status)) {
523 		return_VOID;
524 	}
525 
526 error_exit:
527 	acpi_ev_asynch_enable_gpe(gpe_event_info);
528 	return_VOID;
529 }
530 
531 
532 /*******************************************************************************
533  *
534  * FUNCTION:    acpi_ev_asynch_enable_gpe
535  *
536  * PARAMETERS:  Context (gpe_event_info) - Info for this GPE
537  *              Callback from acpi_os_execute
538  *
539  * RETURN:      None
540  *
541  * DESCRIPTION: Asynchronous clear/enable for GPE. This allows the GPE to
542  *              complete (i.e., finish execution of Notify)
543  *
544  ******************************************************************************/
545 
546 static void ACPI_SYSTEM_XFACE acpi_ev_asynch_enable_gpe(void *context)
547 {
548 	struct acpi_gpe_event_info *gpe_event_info = context;
549 	acpi_cpu_flags flags;
550 
551 	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
552 	(void)acpi_ev_finish_gpe(gpe_event_info);
553 	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
554 
555 	return;
556 }
557 
558 
559 /*******************************************************************************
560  *
561  * FUNCTION:    acpi_ev_finish_gpe
562  *
563  * PARAMETERS:  gpe_event_info      - Info for this GPE
564  *
565  * RETURN:      Status
566  *
567  * DESCRIPTION: Clear/Enable a GPE. Common code that is used after execution
568  *              of a GPE method or a synchronous or asynchronous GPE handler.
569  *
570  ******************************************************************************/
571 
572 acpi_status acpi_ev_finish_gpe(struct acpi_gpe_event_info *gpe_event_info)
573 {
574 	acpi_status status;
575 
576 	if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) ==
577 	    ACPI_GPE_LEVEL_TRIGGERED) {
578 		/*
579 		 * GPE is level-triggered, we clear the GPE status bit after
580 		 * handling the event.
581 		 */
582 		status = acpi_hw_clear_gpe(gpe_event_info);
583 		if (ACPI_FAILURE(status)) {
584 			return (status);
585 		}
586 	}
587 
588 	/*
589 	 * Enable this GPE, conditionally. This means that the GPE will
590 	 * only be physically enabled if the enable_mask bit is set
591 	 * in the event_info.
592 	 */
593 	(void)acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_CONDITIONAL_ENABLE);
594 	gpe_event_info->disable_for_dispatch = FALSE;
595 	return (AE_OK);
596 }
597 
598 
599 /*******************************************************************************
600  *
601  * FUNCTION:    acpi_ev_detect_gpe
602  *
603  * PARAMETERS:  gpe_device          - Device node. NULL for GPE0/GPE1
604  *              gpe_event_info      - Info for this GPE
605  *              gpe_number          - Number relative to the parent GPE block
606  *
607  * RETURN:      INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED
608  *
609  * DESCRIPTION: Detect and dispatch a General Purpose Event to either a function
610  *              (e.g. EC) or method (e.g. _Lxx/_Exx) handler.
611  * NOTE:        GPE is W1C, so it is possible to handle a single GPE from both
612  *              task and irq context in parallel as long as the process to
613  *              detect and mask the GPE is atomic.
614  *              However the atomicity of ACPI_GPE_DISPATCH_RAW_HANDLER is
615  *              dependent on the raw handler itself.
616  *
617  ******************************************************************************/
618 
619 u32
620 acpi_ev_detect_gpe(struct acpi_namespace_node *gpe_device,
621 		   struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
622 {
623 	u32 int_status = ACPI_INTERRUPT_NOT_HANDLED;
624 	u8 enabled_status_byte;
625 	u64 status_reg;
626 	u64 enable_reg;
627 	u32 register_bit;
628 	struct acpi_gpe_register_info *gpe_register_info;
629 	struct acpi_gpe_handler_info *gpe_handler_info;
630 	acpi_cpu_flags flags;
631 	acpi_status status;
632 
633 	ACPI_FUNCTION_TRACE(ev_gpe_detect);
634 
635 	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
636 
637 	if (!gpe_event_info) {
638 		gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
639 		if (!gpe_event_info)
640 			goto error_exit;
641 	}
642 
643 	/* Get the info block for the entire GPE register */
644 
645 	gpe_register_info = gpe_event_info->register_info;
646 
647 	/* Get the register bitmask for this GPE */
648 
649 	register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info);
650 
651 	/* GPE currently enabled (enable bit == 1)? */
652 
653 	status = acpi_hw_read(&enable_reg, &gpe_register_info->enable_address);
654 	if (ACPI_FAILURE(status)) {
655 		goto error_exit;
656 	}
657 
658 	/* GPE currently active (status bit == 1)? */
659 
660 	status = acpi_hw_read(&status_reg, &gpe_register_info->status_address);
661 	if (ACPI_FAILURE(status)) {
662 		goto error_exit;
663 	}
664 
665 	/* Check if there is anything active at all in this GPE */
666 
667 	ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS,
668 			  "Read registers for GPE %02X: Status=%02X, Enable=%02X, "
669 			  "RunEnable=%02X, WakeEnable=%02X\n",
670 			  gpe_number,
671 			  (u32)(status_reg & register_bit),
672 			  (u32)(enable_reg & register_bit),
673 			  gpe_register_info->enable_for_run,
674 			  gpe_register_info->enable_for_wake));
675 
676 	enabled_status_byte = (u8)(status_reg & enable_reg);
677 	if (!(enabled_status_byte & register_bit)) {
678 		goto error_exit;
679 	}
680 
681 	/* Invoke global event handler if present */
682 
683 	acpi_gpe_count++;
684 	if (acpi_gbl_global_event_handler) {
685 		acpi_gbl_global_event_handler(ACPI_EVENT_TYPE_GPE,
686 					      gpe_device, gpe_number,
687 					      acpi_gbl_global_event_handler_context);
688 	}
689 
690 	/* Found an active GPE */
691 
692 	if (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
693 	    ACPI_GPE_DISPATCH_RAW_HANDLER) {
694 
695 		/* Dispatch the event to a raw handler */
696 
697 		gpe_handler_info = gpe_event_info->dispatch.handler;
698 
699 		/*
700 		 * There is no protection around the namespace node
701 		 * and the GPE handler to ensure a safe destruction
702 		 * because:
703 		 * 1. The namespace node is expected to always
704 		 *    exist after loading a table.
705 		 * 2. The GPE handler is expected to be flushed by
706 		 *    acpi_os_wait_events_complete() before the
707 		 *    destruction.
708 		 */
709 		acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
710 		int_status |=
711 		    gpe_handler_info->address(gpe_device, gpe_number,
712 					      gpe_handler_info->context);
713 		flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
714 	} else {
715 		/* Dispatch the event to a standard handler or method. */
716 
717 		int_status |= acpi_ev_gpe_dispatch(gpe_device,
718 						   gpe_event_info, gpe_number);
719 	}
720 
721 error_exit:
722 	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
723 	return (int_status);
724 }
725 
726 /*******************************************************************************
727  *
728  * FUNCTION:    acpi_ev_gpe_dispatch
729  *
730  * PARAMETERS:  gpe_device          - Device node. NULL for GPE0/GPE1
731  *              gpe_event_info      - Info for this GPE
732  *              gpe_number          - Number relative to the parent GPE block
733  *
734  * RETURN:      INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED
735  *
736  * DESCRIPTION: Dispatch a General Purpose Event to either a function (e.g. EC)
737  *              or method (e.g. _Lxx/_Exx) handler.
738  *
739  ******************************************************************************/
740 
741 u32
742 acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device,
743 		     struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
744 {
745 	acpi_status status;
746 	u32 return_value;
747 
748 	ACPI_FUNCTION_TRACE(ev_gpe_dispatch);
749 
750 	/*
751 	 * Always disable the GPE so that it does not keep firing before
752 	 * any asynchronous activity completes (either from the execution
753 	 * of a GPE method or an asynchronous GPE handler.)
754 	 *
755 	 * If there is no handler or method to run, just disable the
756 	 * GPE and leave it disabled permanently to prevent further such
757 	 * pointless events from firing.
758 	 */
759 	status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE);
760 	if (ACPI_FAILURE(status)) {
761 		ACPI_EXCEPTION((AE_INFO, status,
762 				"Unable to disable GPE %02X", gpe_number));
763 		return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
764 	}
765 
766 	/*
767 	 * If edge-triggered, clear the GPE status bit now. Note that
768 	 * level-triggered events are cleared after the GPE is serviced.
769 	 */
770 	if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) ==
771 	    ACPI_GPE_EDGE_TRIGGERED) {
772 		status = acpi_hw_clear_gpe(gpe_event_info);
773 		if (ACPI_FAILURE(status)) {
774 			ACPI_EXCEPTION((AE_INFO, status,
775 					"Unable to clear GPE %02X",
776 					gpe_number));
777 			(void)acpi_hw_low_set_gpe(gpe_event_info,
778 						  ACPI_GPE_CONDITIONAL_ENABLE);
779 			return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
780 		}
781 	}
782 
783 	gpe_event_info->disable_for_dispatch = TRUE;
784 
785 	/*
786 	 * Dispatch the GPE to either an installed handler or the control
787 	 * method associated with this GPE (_Lxx or _Exx). If a handler
788 	 * exists, we invoke it and do not attempt to run the method.
789 	 * If there is neither a handler nor a method, leave the GPE
790 	 * disabled.
791 	 */
792 	switch (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags)) {
793 	case ACPI_GPE_DISPATCH_HANDLER:
794 
795 		/* Invoke the installed handler (at interrupt level) */
796 
797 		return_value =
798 		    gpe_event_info->dispatch.handler->address(gpe_device,
799 							      gpe_number,
800 							      gpe_event_info->
801 							      dispatch.handler->
802 							      context);
803 
804 		/* If requested, clear (if level-triggered) and re-enable the GPE */
805 
806 		if (return_value & ACPI_REENABLE_GPE) {
807 			(void)acpi_ev_finish_gpe(gpe_event_info);
808 		}
809 		break;
810 
811 	case ACPI_GPE_DISPATCH_METHOD:
812 	case ACPI_GPE_DISPATCH_NOTIFY:
813 		/*
814 		 * Execute the method associated with the GPE
815 		 * NOTE: Level-triggered GPEs are cleared after the method completes.
816 		 */
817 		status = acpi_os_execute(OSL_GPE_HANDLER,
818 					 acpi_ev_asynch_execute_gpe_method,
819 					 gpe_event_info);
820 		if (ACPI_FAILURE(status)) {
821 			ACPI_EXCEPTION((AE_INFO, status,
822 					"Unable to queue handler for GPE %02X - event disabled",
823 					gpe_number));
824 		}
825 		break;
826 
827 	default:
828 		/*
829 		 * No handler or method to run!
830 		 * 03/2010: This case should no longer be possible. We will not allow
831 		 * a GPE to be enabled if it has no handler or method.
832 		 */
833 		ACPI_ERROR((AE_INFO,
834 			    "No handler or method for GPE %02X, disabling event",
835 			    gpe_number));
836 
837 		break;
838 	}
839 
840 	return_UINT32(ACPI_INTERRUPT_HANDLED);
841 }
842 
843 #endif				/* !ACPI_REDUCED_HARDWARE */
844