xref: /openbmc/linux/drivers/acpi/acpica/evgpe.c (revision b8d312aa)
1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /******************************************************************************
3  *
4  * Module Name: evgpe - General Purpose Event handling and dispatch
5  *
6  * Copyright (C) 2000 - 2019, Intel Corp.
7  *
8  *****************************************************************************/
9 
10 #include <acpi/acpi.h>
11 #include "accommon.h"
12 #include "acevents.h"
13 #include "acnamesp.h"
14 
15 #define _COMPONENT          ACPI_EVENTS
16 ACPI_MODULE_NAME("evgpe")
17 #if (!ACPI_REDUCED_HARDWARE)	/* Entire module */
18 /* Local prototypes */
19 static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context);
20 
21 static void ACPI_SYSTEM_XFACE acpi_ev_asynch_enable_gpe(void *context);
22 
23 /*******************************************************************************
24  *
25  * FUNCTION:    acpi_ev_update_gpe_enable_mask
26  *
27  * PARAMETERS:  gpe_event_info          - GPE to update
28  *
29  * RETURN:      Status
30  *
31  * DESCRIPTION: Updates GPE register enable mask based upon whether there are
32  *              runtime references to this GPE
33  *
34  ******************************************************************************/
35 
36 acpi_status
37 acpi_ev_update_gpe_enable_mask(struct acpi_gpe_event_info *gpe_event_info)
38 {
39 	struct acpi_gpe_register_info *gpe_register_info;
40 	u32 register_bit;
41 
42 	ACPI_FUNCTION_TRACE(ev_update_gpe_enable_mask);
43 
44 	gpe_register_info = gpe_event_info->register_info;
45 	if (!gpe_register_info) {
46 		return_ACPI_STATUS(AE_NOT_EXIST);
47 	}
48 
49 	register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info);
50 
51 	/* Clear the run bit up front */
52 
53 	ACPI_CLEAR_BIT(gpe_register_info->enable_for_run, register_bit);
54 
55 	/* Set the mask bit only if there are references to this GPE */
56 
57 	if (gpe_event_info->runtime_count) {
58 		ACPI_SET_BIT(gpe_register_info->enable_for_run,
59 			     (u8)register_bit);
60 	}
61 
62 	gpe_register_info->enable_mask = gpe_register_info->enable_for_run;
63 	return_ACPI_STATUS(AE_OK);
64 }
65 
66 /*******************************************************************************
67  *
68  * FUNCTION:    acpi_ev_enable_gpe
69  *
70  * PARAMETERS:  gpe_event_info          - GPE to enable
71  *
72  * RETURN:      Status
73  *
74  * DESCRIPTION: Enable a GPE.
75  *
76  ******************************************************************************/
77 
78 acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
79 {
80 	acpi_status status;
81 
82 	ACPI_FUNCTION_TRACE(ev_enable_gpe);
83 
84 	/* Enable the requested GPE */
85 
86 	status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE);
87 	return_ACPI_STATUS(status);
88 }
89 
90 /*******************************************************************************
91  *
92  * FUNCTION:    acpi_ev_mask_gpe
93  *
94  * PARAMETERS:  gpe_event_info          - GPE to be blocked/unblocked
95  *              is_masked               - Whether the GPE is masked or not
96  *
97  * RETURN:      Status
98  *
99  * DESCRIPTION: Unconditionally mask/unmask a GPE during runtime.
100  *
101  ******************************************************************************/
102 
103 acpi_status
104 acpi_ev_mask_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 is_masked)
105 {
106 	struct acpi_gpe_register_info *gpe_register_info;
107 	u32 register_bit;
108 
109 	ACPI_FUNCTION_TRACE(ev_mask_gpe);
110 
111 	gpe_register_info = gpe_event_info->register_info;
112 	if (!gpe_register_info) {
113 		return_ACPI_STATUS(AE_NOT_EXIST);
114 	}
115 
116 	register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info);
117 
118 	/* Perform the action */
119 
120 	if (is_masked) {
121 		if (register_bit & gpe_register_info->mask_for_run) {
122 			return_ACPI_STATUS(AE_BAD_PARAMETER);
123 		}
124 
125 		(void)acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE);
126 		ACPI_SET_BIT(gpe_register_info->mask_for_run, (u8)register_bit);
127 	} else {
128 		if (!(register_bit & gpe_register_info->mask_for_run)) {
129 			return_ACPI_STATUS(AE_BAD_PARAMETER);
130 		}
131 
132 		ACPI_CLEAR_BIT(gpe_register_info->mask_for_run,
133 			       (u8)register_bit);
134 		if (gpe_event_info->runtime_count
135 		    && !gpe_event_info->disable_for_dispatch) {
136 			(void)acpi_hw_low_set_gpe(gpe_event_info,
137 						  ACPI_GPE_ENABLE);
138 		}
139 	}
140 
141 	return_ACPI_STATUS(AE_OK);
142 }
143 
144 /*******************************************************************************
145  *
146  * FUNCTION:    acpi_ev_add_gpe_reference
147  *
148  * PARAMETERS:  gpe_event_info          - Add a reference to this GPE
149  *              clear_on_enable         - Clear GPE status before enabling it
150  *
151  * RETURN:      Status
152  *
153  * DESCRIPTION: Add a reference to a GPE. On the first reference, the GPE is
154  *              hardware-enabled.
155  *
156  ******************************************************************************/
157 
158 acpi_status
159 acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info,
160 			  u8 clear_on_enable)
161 {
162 	acpi_status status = AE_OK;
163 
164 	ACPI_FUNCTION_TRACE(ev_add_gpe_reference);
165 
166 	if (gpe_event_info->runtime_count == ACPI_UINT8_MAX) {
167 		return_ACPI_STATUS(AE_LIMIT);
168 	}
169 
170 	gpe_event_info->runtime_count++;
171 	if (gpe_event_info->runtime_count == 1) {
172 
173 		/* Enable on first reference */
174 
175 		if (clear_on_enable) {
176 			(void)acpi_hw_clear_gpe(gpe_event_info);
177 		}
178 
179 		status = acpi_ev_update_gpe_enable_mask(gpe_event_info);
180 		if (ACPI_SUCCESS(status)) {
181 			status = acpi_ev_enable_gpe(gpe_event_info);
182 		}
183 
184 		if (ACPI_FAILURE(status)) {
185 			gpe_event_info->runtime_count--;
186 		}
187 	}
188 
189 	return_ACPI_STATUS(status);
190 }
191 
192 /*******************************************************************************
193  *
194  * FUNCTION:    acpi_ev_remove_gpe_reference
195  *
196  * PARAMETERS:  gpe_event_info          - Remove a reference to this GPE
197  *
198  * RETURN:      Status
199  *
200  * DESCRIPTION: Remove a reference to a GPE. When the last reference is
201  *              removed, the GPE is hardware-disabled.
202  *
203  ******************************************************************************/
204 
205 acpi_status
206 acpi_ev_remove_gpe_reference(struct acpi_gpe_event_info *gpe_event_info)
207 {
208 	acpi_status status = AE_OK;
209 
210 	ACPI_FUNCTION_TRACE(ev_remove_gpe_reference);
211 
212 	if (!gpe_event_info->runtime_count) {
213 		return_ACPI_STATUS(AE_LIMIT);
214 	}
215 
216 	gpe_event_info->runtime_count--;
217 	if (!gpe_event_info->runtime_count) {
218 
219 		/* Disable on last reference */
220 
221 		status = acpi_ev_update_gpe_enable_mask(gpe_event_info);
222 		if (ACPI_SUCCESS(status)) {
223 			status =
224 			    acpi_hw_low_set_gpe(gpe_event_info,
225 						ACPI_GPE_DISABLE);
226 		}
227 
228 		if (ACPI_FAILURE(status)) {
229 			gpe_event_info->runtime_count++;
230 		}
231 	}
232 
233 	return_ACPI_STATUS(status);
234 }
235 
236 /*******************************************************************************
237  *
238  * FUNCTION:    acpi_ev_low_get_gpe_info
239  *
240  * PARAMETERS:  gpe_number          - Raw GPE number
241  *              gpe_block           - A GPE info block
242  *
243  * RETURN:      A GPE event_info struct. NULL if not a valid GPE (The gpe_number
244  *              is not within the specified GPE block)
245  *
246  * DESCRIPTION: Returns the event_info struct associated with this GPE. This is
247  *              the low-level implementation of ev_get_gpe_event_info.
248  *
249  ******************************************************************************/
250 
251 struct acpi_gpe_event_info *acpi_ev_low_get_gpe_info(u32 gpe_number,
252 						     struct acpi_gpe_block_info
253 						     *gpe_block)
254 {
255 	u32 gpe_index;
256 
257 	/*
258 	 * Validate that the gpe_number is within the specified gpe_block.
259 	 * (Two steps)
260 	 */
261 	if (!gpe_block || (gpe_number < gpe_block->block_base_number)) {
262 		return (NULL);
263 	}
264 
265 	gpe_index = gpe_number - gpe_block->block_base_number;
266 	if (gpe_index >= gpe_block->gpe_count) {
267 		return (NULL);
268 	}
269 
270 	return (&gpe_block->event_info[gpe_index]);
271 }
272 
273 
274 /*******************************************************************************
275  *
276  * FUNCTION:    acpi_ev_get_gpe_event_info
277  *
278  * PARAMETERS:  gpe_device          - Device node. NULL for GPE0/GPE1
279  *              gpe_number          - Raw GPE number
280  *
281  * RETURN:      A GPE event_info struct. NULL if not a valid GPE
282  *
283  * DESCRIPTION: Returns the event_info struct associated with this GPE.
284  *              Validates the gpe_block and the gpe_number
285  *
286  *              Should be called only when the GPE lists are semaphore locked
287  *              and not subject to change.
288  *
289  ******************************************************************************/
290 
291 struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device,
292 						       u32 gpe_number)
293 {
294 	union acpi_operand_object *obj_desc;
295 	struct acpi_gpe_event_info *gpe_info;
296 	u32 i;
297 
298 	ACPI_FUNCTION_ENTRY();
299 
300 	/* A NULL gpe_device means use the FADT-defined GPE block(s) */
301 
302 	if (!gpe_device) {
303 
304 		/* Examine GPE Block 0 and 1 (These blocks are permanent) */
305 
306 		for (i = 0; i < ACPI_MAX_GPE_BLOCKS; i++) {
307 			gpe_info = acpi_ev_low_get_gpe_info(gpe_number,
308 							    acpi_gbl_gpe_fadt_blocks
309 							    [i]);
310 			if (gpe_info) {
311 				return (gpe_info);
312 			}
313 		}
314 
315 		/* The gpe_number was not in the range of either FADT GPE block */
316 
317 		return (NULL);
318 	}
319 
320 	/* A Non-NULL gpe_device means this is a GPE Block Device */
321 
322 	obj_desc =
323 	    acpi_ns_get_attached_object((struct acpi_namespace_node *)
324 					       gpe_device);
325 	if (!obj_desc || !obj_desc->device.gpe_block) {
326 		return (NULL);
327 	}
328 
329 	return (acpi_ev_low_get_gpe_info
330 		(gpe_number, obj_desc->device.gpe_block));
331 }
332 
333 /*******************************************************************************
334  *
335  * FUNCTION:    acpi_ev_gpe_detect
336  *
337  * PARAMETERS:  gpe_xrupt_list      - Interrupt block for this interrupt.
338  *                                    Can have multiple GPE blocks attached.
339  *
340  * RETURN:      INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED
341  *
342  * DESCRIPTION: Detect if any GP events have occurred. This function is
343  *              executed at interrupt level.
344  *
345  ******************************************************************************/
346 
347 u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info *gpe_xrupt_list)
348 {
349 	struct acpi_gpe_block_info *gpe_block;
350 	struct acpi_namespace_node *gpe_device;
351 	struct acpi_gpe_register_info *gpe_register_info;
352 	struct acpi_gpe_event_info *gpe_event_info;
353 	u32 gpe_number;
354 	u32 int_status = ACPI_INTERRUPT_NOT_HANDLED;
355 	acpi_cpu_flags flags;
356 	u32 i;
357 	u32 j;
358 
359 	ACPI_FUNCTION_NAME(ev_gpe_detect);
360 
361 	/* Check for the case where there are no GPEs */
362 
363 	if (!gpe_xrupt_list) {
364 		return (int_status);
365 	}
366 
367 	/*
368 	 * We need to obtain the GPE lock for both the data structs and registers
369 	 * Note: Not necessary to obtain the hardware lock, since the GPE
370 	 * registers are owned by the gpe_lock.
371 	 */
372 	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
373 
374 	/* Examine all GPE blocks attached to this interrupt level */
375 
376 	gpe_block = gpe_xrupt_list->gpe_block_list_head;
377 	while (gpe_block) {
378 		gpe_device = gpe_block->node;
379 
380 		/*
381 		 * Read all of the 8-bit GPE status and enable registers in this GPE
382 		 * block, saving all of them. Find all currently active GP events.
383 		 */
384 		for (i = 0; i < gpe_block->register_count; i++) {
385 
386 			/* Get the next status/enable pair */
387 
388 			gpe_register_info = &gpe_block->register_info[i];
389 
390 			/*
391 			 * Optimization: If there are no GPEs enabled within this
392 			 * register, we can safely ignore the entire register.
393 			 */
394 			if (!(gpe_register_info->enable_for_run |
395 			      gpe_register_info->enable_for_wake)) {
396 				ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS,
397 						  "Ignore disabled registers for GPE %02X-%02X: "
398 						  "RunEnable=%02X, WakeEnable=%02X\n",
399 						  gpe_register_info->
400 						  base_gpe_number,
401 						  gpe_register_info->
402 						  base_gpe_number +
403 						  (ACPI_GPE_REGISTER_WIDTH - 1),
404 						  gpe_register_info->
405 						  enable_for_run,
406 						  gpe_register_info->
407 						  enable_for_wake));
408 				continue;
409 			}
410 
411 			/* Now look at the individual GPEs in this byte register */
412 
413 			for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
414 
415 				/* Detect and dispatch one GPE bit */
416 
417 				gpe_event_info =
418 				    &gpe_block->
419 				    event_info[((acpi_size)i *
420 						ACPI_GPE_REGISTER_WIDTH) + j];
421 				gpe_number =
422 				    j + gpe_register_info->base_gpe_number;
423 				acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
424 				int_status |=
425 				    acpi_ev_detect_gpe(gpe_device,
426 						       gpe_event_info,
427 						       gpe_number);
428 				flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
429 			}
430 		}
431 
432 		gpe_block = gpe_block->next;
433 	}
434 
435 	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
436 	return (int_status);
437 }
438 
439 /*******************************************************************************
440  *
441  * FUNCTION:    acpi_ev_asynch_execute_gpe_method
442  *
443  * PARAMETERS:  Context (gpe_event_info) - Info for this GPE
444  *
445  * RETURN:      None
446  *
447  * DESCRIPTION: Perform the actual execution of a GPE control method. This
448  *              function is called from an invocation of acpi_os_execute and
449  *              therefore does NOT execute at interrupt level - so that
450  *              the control method itself is not executed in the context of
451  *              an interrupt handler.
452  *
453  ******************************************************************************/
454 
455 static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
456 {
457 	struct acpi_gpe_event_info *gpe_event_info = context;
458 	acpi_status status = AE_OK;
459 	struct acpi_evaluate_info *info;
460 	struct acpi_gpe_notify_info *notify;
461 
462 	ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method);
463 
464 	/* Do the correct dispatch - normal method or implicit notify */
465 
466 	switch (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags)) {
467 	case ACPI_GPE_DISPATCH_NOTIFY:
468 		/*
469 		 * Implicit notify.
470 		 * Dispatch a DEVICE_WAKE notify to the appropriate handler.
471 		 * NOTE: the request is queued for execution after this method
472 		 * completes. The notify handlers are NOT invoked synchronously
473 		 * from this thread -- because handlers may in turn run other
474 		 * control methods.
475 		 *
476 		 * June 2012: Expand implicit notify mechanism to support
477 		 * notifies on multiple device objects.
478 		 */
479 		notify = gpe_event_info->dispatch.notify_list;
480 		while (ACPI_SUCCESS(status) && notify) {
481 			status =
482 			    acpi_ev_queue_notify_request(notify->device_node,
483 							 ACPI_NOTIFY_DEVICE_WAKE);
484 
485 			notify = notify->next;
486 		}
487 
488 		break;
489 
490 	case ACPI_GPE_DISPATCH_METHOD:
491 
492 		/* Allocate the evaluation information block */
493 
494 		info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
495 		if (!info) {
496 			status = AE_NO_MEMORY;
497 		} else {
498 			/*
499 			 * Invoke the GPE Method (_Lxx, _Exx) i.e., evaluate the
500 			 * _Lxx/_Exx control method that corresponds to this GPE
501 			 */
502 			info->prefix_node =
503 			    gpe_event_info->dispatch.method_node;
504 			info->flags = ACPI_IGNORE_RETURN_VALUE;
505 
506 			status = acpi_ns_evaluate(info);
507 			ACPI_FREE(info);
508 		}
509 
510 		if (ACPI_FAILURE(status)) {
511 			ACPI_EXCEPTION((AE_INFO, status,
512 					"while evaluating GPE method [%4.4s]",
513 					acpi_ut_get_node_name(gpe_event_info->
514 							      dispatch.
515 							      method_node)));
516 		}
517 		break;
518 
519 	default:
520 
521 		goto error_exit;	/* Should never happen */
522 	}
523 
524 	/* Defer enabling of GPE until all notify handlers are done */
525 
526 	status = acpi_os_execute(OSL_NOTIFY_HANDLER,
527 				 acpi_ev_asynch_enable_gpe, gpe_event_info);
528 	if (ACPI_SUCCESS(status)) {
529 		return_VOID;
530 	}
531 
532 error_exit:
533 	acpi_ev_asynch_enable_gpe(gpe_event_info);
534 	return_VOID;
535 }
536 
537 
538 /*******************************************************************************
539  *
540  * FUNCTION:    acpi_ev_asynch_enable_gpe
541  *
542  * PARAMETERS:  Context (gpe_event_info) - Info for this GPE
543  *              Callback from acpi_os_execute
544  *
545  * RETURN:      None
546  *
547  * DESCRIPTION: Asynchronous clear/enable for GPE. This allows the GPE to
548  *              complete (i.e., finish execution of Notify)
549  *
550  ******************************************************************************/
551 
552 static void ACPI_SYSTEM_XFACE acpi_ev_asynch_enable_gpe(void *context)
553 {
554 	struct acpi_gpe_event_info *gpe_event_info = context;
555 	acpi_cpu_flags flags;
556 
557 	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
558 	(void)acpi_ev_finish_gpe(gpe_event_info);
559 	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
560 
561 	return;
562 }
563 
564 
565 /*******************************************************************************
566  *
567  * FUNCTION:    acpi_ev_finish_gpe
568  *
569  * PARAMETERS:  gpe_event_info      - Info for this GPE
570  *
571  * RETURN:      Status
572  *
573  * DESCRIPTION: Clear/Enable a GPE. Common code that is used after execution
574  *              of a GPE method or a synchronous or asynchronous GPE handler.
575  *
576  ******************************************************************************/
577 
578 acpi_status acpi_ev_finish_gpe(struct acpi_gpe_event_info *gpe_event_info)
579 {
580 	acpi_status status;
581 
582 	if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) ==
583 	    ACPI_GPE_LEVEL_TRIGGERED) {
584 		/*
585 		 * GPE is level-triggered, we clear the GPE status bit after
586 		 * handling the event.
587 		 */
588 		status = acpi_hw_clear_gpe(gpe_event_info);
589 		if (ACPI_FAILURE(status)) {
590 			return (status);
591 		}
592 	}
593 
594 	/*
595 	 * Enable this GPE, conditionally. This means that the GPE will
596 	 * only be physically enabled if the enable_mask bit is set
597 	 * in the event_info.
598 	 */
599 	(void)acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_CONDITIONAL_ENABLE);
600 	gpe_event_info->disable_for_dispatch = FALSE;
601 	return (AE_OK);
602 }
603 
604 
605 /*******************************************************************************
606  *
607  * FUNCTION:    acpi_ev_detect_gpe
608  *
609  * PARAMETERS:  gpe_device          - Device node. NULL for GPE0/GPE1
610  *              gpe_event_info      - Info for this GPE
611  *              gpe_number          - Number relative to the parent GPE block
612  *
613  * RETURN:      INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED
614  *
615  * DESCRIPTION: Detect and dispatch a General Purpose Event to either a function
616  *              (e.g. EC) or method (e.g. _Lxx/_Exx) handler.
617  * NOTE:        GPE is W1C, so it is possible to handle a single GPE from both
618  *              task and irq context in parallel as long as the process to
619  *              detect and mask the GPE is atomic.
620  *              However the atomicity of ACPI_GPE_DISPATCH_RAW_HANDLER is
621  *              dependent on the raw handler itself.
622  *
623  ******************************************************************************/
624 
625 u32
626 acpi_ev_detect_gpe(struct acpi_namespace_node *gpe_device,
627 		   struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
628 {
629 	u32 int_status = ACPI_INTERRUPT_NOT_HANDLED;
630 	u8 enabled_status_byte;
631 	u64 status_reg;
632 	u64 enable_reg;
633 	u32 register_bit;
634 	struct acpi_gpe_register_info *gpe_register_info;
635 	struct acpi_gpe_handler_info *gpe_handler_info;
636 	acpi_cpu_flags flags;
637 	acpi_status status;
638 
639 	ACPI_FUNCTION_TRACE(ev_gpe_detect);
640 
641 	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
642 
643 	if (!gpe_event_info) {
644 		gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
645 		if (!gpe_event_info)
646 			goto error_exit;
647 	}
648 
649 	/* Get the info block for the entire GPE register */
650 
651 	gpe_register_info = gpe_event_info->register_info;
652 
653 	/* Get the register bitmask for this GPE */
654 
655 	register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info);
656 
657 	/* GPE currently enabled (enable bit == 1)? */
658 
659 	status = acpi_hw_read(&enable_reg, &gpe_register_info->enable_address);
660 	if (ACPI_FAILURE(status)) {
661 		goto error_exit;
662 	}
663 
664 	/* GPE currently active (status bit == 1)? */
665 
666 	status = acpi_hw_read(&status_reg, &gpe_register_info->status_address);
667 	if (ACPI_FAILURE(status)) {
668 		goto error_exit;
669 	}
670 
671 	/* Check if there is anything active at all in this GPE */
672 
673 	ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS,
674 			  "Read registers for GPE %02X: Status=%02X, Enable=%02X, "
675 			  "RunEnable=%02X, WakeEnable=%02X\n",
676 			  gpe_number,
677 			  (u32)(status_reg & register_bit),
678 			  (u32)(enable_reg & register_bit),
679 			  gpe_register_info->enable_for_run,
680 			  gpe_register_info->enable_for_wake));
681 
682 	enabled_status_byte = (u8)(status_reg & enable_reg);
683 	if (!(enabled_status_byte & register_bit)) {
684 		goto error_exit;
685 	}
686 
687 	/* Invoke global event handler if present */
688 
689 	acpi_gpe_count++;
690 	if (acpi_gbl_global_event_handler) {
691 		acpi_gbl_global_event_handler(ACPI_EVENT_TYPE_GPE,
692 					      gpe_device, gpe_number,
693 					      acpi_gbl_global_event_handler_context);
694 	}
695 
696 	/* Found an active GPE */
697 
698 	if (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
699 	    ACPI_GPE_DISPATCH_RAW_HANDLER) {
700 
701 		/* Dispatch the event to a raw handler */
702 
703 		gpe_handler_info = gpe_event_info->dispatch.handler;
704 
705 		/*
706 		 * There is no protection around the namespace node
707 		 * and the GPE handler to ensure a safe destruction
708 		 * because:
709 		 * 1. The namespace node is expected to always
710 		 *    exist after loading a table.
711 		 * 2. The GPE handler is expected to be flushed by
712 		 *    acpi_os_wait_events_complete() before the
713 		 *    destruction.
714 		 */
715 		acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
716 		int_status |=
717 		    gpe_handler_info->address(gpe_device, gpe_number,
718 					      gpe_handler_info->context);
719 		flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
720 	} else {
721 		/* Dispatch the event to a standard handler or method. */
722 
723 		int_status |= acpi_ev_gpe_dispatch(gpe_device,
724 						   gpe_event_info, gpe_number);
725 	}
726 
727 error_exit:
728 	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
729 	return (int_status);
730 }
731 
732 /*******************************************************************************
733  *
734  * FUNCTION:    acpi_ev_gpe_dispatch
735  *
736  * PARAMETERS:  gpe_device          - Device node. NULL for GPE0/GPE1
737  *              gpe_event_info      - Info for this GPE
738  *              gpe_number          - Number relative to the parent GPE block
739  *
740  * RETURN:      INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED
741  *
742  * DESCRIPTION: Dispatch a General Purpose Event to either a function (e.g. EC)
743  *              or method (e.g. _Lxx/_Exx) handler.
744  *
745  ******************************************************************************/
746 
747 u32
748 acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device,
749 		     struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
750 {
751 	acpi_status status;
752 	u32 return_value;
753 
754 	ACPI_FUNCTION_TRACE(ev_gpe_dispatch);
755 
756 	/*
757 	 * Always disable the GPE so that it does not keep firing before
758 	 * any asynchronous activity completes (either from the execution
759 	 * of a GPE method or an asynchronous GPE handler.)
760 	 *
761 	 * If there is no handler or method to run, just disable the
762 	 * GPE and leave it disabled permanently to prevent further such
763 	 * pointless events from firing.
764 	 */
765 	status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE);
766 	if (ACPI_FAILURE(status)) {
767 		ACPI_EXCEPTION((AE_INFO, status,
768 				"Unable to disable GPE %02X", gpe_number));
769 		return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
770 	}
771 
772 	/*
773 	 * If edge-triggered, clear the GPE status bit now. Note that
774 	 * level-triggered events are cleared after the GPE is serviced.
775 	 */
776 	if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) ==
777 	    ACPI_GPE_EDGE_TRIGGERED) {
778 		status = acpi_hw_clear_gpe(gpe_event_info);
779 		if (ACPI_FAILURE(status)) {
780 			ACPI_EXCEPTION((AE_INFO, status,
781 					"Unable to clear GPE %02X",
782 					gpe_number));
783 			(void)acpi_hw_low_set_gpe(gpe_event_info,
784 						  ACPI_GPE_CONDITIONAL_ENABLE);
785 			return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
786 		}
787 	}
788 
789 	gpe_event_info->disable_for_dispatch = TRUE;
790 
791 	/*
792 	 * Dispatch the GPE to either an installed handler or the control
793 	 * method associated with this GPE (_Lxx or _Exx). If a handler
794 	 * exists, we invoke it and do not attempt to run the method.
795 	 * If there is neither a handler nor a method, leave the GPE
796 	 * disabled.
797 	 */
798 	switch (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags)) {
799 	case ACPI_GPE_DISPATCH_HANDLER:
800 
801 		/* Invoke the installed handler (at interrupt level) */
802 
803 		return_value =
804 		    gpe_event_info->dispatch.handler->address(gpe_device,
805 							      gpe_number,
806 							      gpe_event_info->
807 							      dispatch.handler->
808 							      context);
809 
810 		/* If requested, clear (if level-triggered) and re-enable the GPE */
811 
812 		if (return_value & ACPI_REENABLE_GPE) {
813 			(void)acpi_ev_finish_gpe(gpe_event_info);
814 		}
815 		break;
816 
817 	case ACPI_GPE_DISPATCH_METHOD:
818 	case ACPI_GPE_DISPATCH_NOTIFY:
819 		/*
820 		 * Execute the method associated with the GPE
821 		 * NOTE: Level-triggered GPEs are cleared after the method completes.
822 		 */
823 		status = acpi_os_execute(OSL_GPE_HANDLER,
824 					 acpi_ev_asynch_execute_gpe_method,
825 					 gpe_event_info);
826 		if (ACPI_FAILURE(status)) {
827 			ACPI_EXCEPTION((AE_INFO, status,
828 					"Unable to queue handler for GPE %02X - event disabled",
829 					gpe_number));
830 		}
831 		break;
832 
833 	default:
834 		/*
835 		 * No handler or method to run!
836 		 * 03/2010: This case should no longer be possible. We will not allow
837 		 * a GPE to be enabled if it has no handler or method.
838 		 */
839 		ACPI_ERROR((AE_INFO,
840 			    "No handler or method for GPE %02X, disabling event",
841 			    gpe_number));
842 
843 		break;
844 	}
845 
846 	return_UINT32(ACPI_INTERRUPT_HANDLED);
847 }
848 
849 #endif				/* !ACPI_REDUCED_HARDWARE */
850