xref: /openbmc/linux/drivers/acpi/acpica/evgpe.c (revision bf8981a2aa082d9d64771b47c8a1c9c388d8cd40)
1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /******************************************************************************
3  *
4  * Module Name: evgpe - General Purpose Event handling and dispatch
5  *
6  * Copyright (C) 2000 - 2019, Intel Corp.
7  *
8  *****************************************************************************/
9 
10 #include <acpi/acpi.h>
11 #include "accommon.h"
12 #include "acevents.h"
13 #include "acnamesp.h"
14 
15 #define _COMPONENT          ACPI_EVENTS
16 ACPI_MODULE_NAME("evgpe")
17 #if (!ACPI_REDUCED_HARDWARE)	/* Entire module */
18 /* Local prototypes */
19 static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context);
20 
21 static void ACPI_SYSTEM_XFACE acpi_ev_asynch_enable_gpe(void *context);
22 
23 /*******************************************************************************
24  *
25  * FUNCTION:    acpi_ev_update_gpe_enable_mask
26  *
27  * PARAMETERS:  gpe_event_info          - GPE to update
28  *
29  * RETURN:      Status
30  *
31  * DESCRIPTION: Updates GPE register enable mask based upon whether there are
32  *              runtime references to this GPE
33  *
34  ******************************************************************************/
35 
36 acpi_status
37 acpi_ev_update_gpe_enable_mask(struct acpi_gpe_event_info *gpe_event_info)
38 {
39 	struct acpi_gpe_register_info *gpe_register_info;
40 	u32 register_bit;
41 
42 	ACPI_FUNCTION_TRACE(ev_update_gpe_enable_mask);
43 
44 	gpe_register_info = gpe_event_info->register_info;
45 	if (!gpe_register_info) {
46 		return_ACPI_STATUS(AE_NOT_EXIST);
47 	}
48 
49 	register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info);
50 
51 	/* Clear the run bit up front */
52 
53 	ACPI_CLEAR_BIT(gpe_register_info->enable_for_run, register_bit);
54 
55 	/* Set the mask bit only if there are references to this GPE */
56 
57 	if (gpe_event_info->runtime_count) {
58 		ACPI_SET_BIT(gpe_register_info->enable_for_run,
59 			     (u8)register_bit);
60 	}
61 
62 	gpe_register_info->enable_mask = gpe_register_info->enable_for_run;
63 	return_ACPI_STATUS(AE_OK);
64 }
65 
66 /*******************************************************************************
67  *
68  * FUNCTION:    acpi_ev_enable_gpe
69  *
70  * PARAMETERS:  gpe_event_info          - GPE to enable
71  *
72  * RETURN:      Status
73  *
74  * DESCRIPTION: Enable a GPE.
75  *
76  ******************************************************************************/
77 
78 acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
79 {
80 	acpi_status status;
81 
82 	ACPI_FUNCTION_TRACE(ev_enable_gpe);
83 
84 	/* Clear the GPE status */
85 	status = acpi_hw_clear_gpe(gpe_event_info);
86 	if (ACPI_FAILURE(status))
87 		return_ACPI_STATUS(status);
88 
89 	/* Enable the requested GPE */
90 	status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE);
91 	return_ACPI_STATUS(status);
92 }
93 
94 /*******************************************************************************
95  *
96  * FUNCTION:    acpi_ev_mask_gpe
97  *
98  * PARAMETERS:  gpe_event_info          - GPE to be blocked/unblocked
99  *              is_masked               - Whether the GPE is masked or not
100  *
101  * RETURN:      Status
102  *
103  * DESCRIPTION: Unconditionally mask/unmask a GPE during runtime.
104  *
105  ******************************************************************************/
106 
107 acpi_status
108 acpi_ev_mask_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 is_masked)
109 {
110 	struct acpi_gpe_register_info *gpe_register_info;
111 	u32 register_bit;
112 
113 	ACPI_FUNCTION_TRACE(ev_mask_gpe);
114 
115 	gpe_register_info = gpe_event_info->register_info;
116 	if (!gpe_register_info) {
117 		return_ACPI_STATUS(AE_NOT_EXIST);
118 	}
119 
120 	register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info);
121 
122 	/* Perform the action */
123 
124 	if (is_masked) {
125 		if (register_bit & gpe_register_info->mask_for_run) {
126 			return_ACPI_STATUS(AE_BAD_PARAMETER);
127 		}
128 
129 		(void)acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE);
130 		ACPI_SET_BIT(gpe_register_info->mask_for_run, (u8)register_bit);
131 	} else {
132 		if (!(register_bit & gpe_register_info->mask_for_run)) {
133 			return_ACPI_STATUS(AE_BAD_PARAMETER);
134 		}
135 
136 		ACPI_CLEAR_BIT(gpe_register_info->mask_for_run,
137 			       (u8)register_bit);
138 		if (gpe_event_info->runtime_count
139 		    && !gpe_event_info->disable_for_dispatch) {
140 			(void)acpi_hw_low_set_gpe(gpe_event_info,
141 						  ACPI_GPE_ENABLE);
142 		}
143 	}
144 
145 	return_ACPI_STATUS(AE_OK);
146 }
147 
148 /*******************************************************************************
149  *
150  * FUNCTION:    acpi_ev_add_gpe_reference
151  *
152  * PARAMETERS:  gpe_event_info          - Add a reference to this GPE
153  *
154  * RETURN:      Status
155  *
156  * DESCRIPTION: Add a reference to a GPE. On the first reference, the GPE is
157  *              hardware-enabled.
158  *
159  ******************************************************************************/
160 
161 acpi_status
162 acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info)
163 {
164 	acpi_status status = AE_OK;
165 
166 	ACPI_FUNCTION_TRACE(ev_add_gpe_reference);
167 
168 	if (gpe_event_info->runtime_count == ACPI_UINT8_MAX) {
169 		return_ACPI_STATUS(AE_LIMIT);
170 	}
171 
172 	gpe_event_info->runtime_count++;
173 	if (gpe_event_info->runtime_count == 1) {
174 
175 		/* Enable on first reference */
176 
177 		status = acpi_ev_update_gpe_enable_mask(gpe_event_info);
178 		if (ACPI_SUCCESS(status)) {
179 			status = acpi_ev_enable_gpe(gpe_event_info);
180 		}
181 
182 		if (ACPI_FAILURE(status)) {
183 			gpe_event_info->runtime_count--;
184 		}
185 	}
186 
187 	return_ACPI_STATUS(status);
188 }
189 
190 /*******************************************************************************
191  *
192  * FUNCTION:    acpi_ev_remove_gpe_reference
193  *
194  * PARAMETERS:  gpe_event_info          - Remove a reference to this GPE
195  *
196  * RETURN:      Status
197  *
198  * DESCRIPTION: Remove a reference to a GPE. When the last reference is
199  *              removed, the GPE is hardware-disabled.
200  *
201  ******************************************************************************/
202 
203 acpi_status
204 acpi_ev_remove_gpe_reference(struct acpi_gpe_event_info *gpe_event_info)
205 {
206 	acpi_status status = AE_OK;
207 
208 	ACPI_FUNCTION_TRACE(ev_remove_gpe_reference);
209 
210 	if (!gpe_event_info->runtime_count) {
211 		return_ACPI_STATUS(AE_LIMIT);
212 	}
213 
214 	gpe_event_info->runtime_count--;
215 	if (!gpe_event_info->runtime_count) {
216 
217 		/* Disable on last reference */
218 
219 		status = acpi_ev_update_gpe_enable_mask(gpe_event_info);
220 		if (ACPI_SUCCESS(status)) {
221 			status =
222 			    acpi_hw_low_set_gpe(gpe_event_info,
223 						ACPI_GPE_DISABLE);
224 		}
225 
226 		if (ACPI_FAILURE(status)) {
227 			gpe_event_info->runtime_count++;
228 		}
229 	}
230 
231 	return_ACPI_STATUS(status);
232 }
233 
234 /*******************************************************************************
235  *
236  * FUNCTION:    acpi_ev_low_get_gpe_info
237  *
238  * PARAMETERS:  gpe_number          - Raw GPE number
239  *              gpe_block           - A GPE info block
240  *
241  * RETURN:      A GPE event_info struct. NULL if not a valid GPE (The gpe_number
242  *              is not within the specified GPE block)
243  *
244  * DESCRIPTION: Returns the event_info struct associated with this GPE. This is
245  *              the low-level implementation of ev_get_gpe_event_info.
246  *
247  ******************************************************************************/
248 
249 struct acpi_gpe_event_info *acpi_ev_low_get_gpe_info(u32 gpe_number,
250 						     struct acpi_gpe_block_info
251 						     *gpe_block)
252 {
253 	u32 gpe_index;
254 
255 	/*
256 	 * Validate that the gpe_number is within the specified gpe_block.
257 	 * (Two steps)
258 	 */
259 	if (!gpe_block || (gpe_number < gpe_block->block_base_number)) {
260 		return (NULL);
261 	}
262 
263 	gpe_index = gpe_number - gpe_block->block_base_number;
264 	if (gpe_index >= gpe_block->gpe_count) {
265 		return (NULL);
266 	}
267 
268 	return (&gpe_block->event_info[gpe_index]);
269 }
270 
271 
272 /*******************************************************************************
273  *
274  * FUNCTION:    acpi_ev_get_gpe_event_info
275  *
276  * PARAMETERS:  gpe_device          - Device node. NULL for GPE0/GPE1
277  *              gpe_number          - Raw GPE number
278  *
279  * RETURN:      A GPE event_info struct. NULL if not a valid GPE
280  *
281  * DESCRIPTION: Returns the event_info struct associated with this GPE.
282  *              Validates the gpe_block and the gpe_number
283  *
284  *              Should be called only when the GPE lists are semaphore locked
285  *              and not subject to change.
286  *
287  ******************************************************************************/
288 
289 struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device,
290 						       u32 gpe_number)
291 {
292 	union acpi_operand_object *obj_desc;
293 	struct acpi_gpe_event_info *gpe_info;
294 	u32 i;
295 
296 	ACPI_FUNCTION_ENTRY();
297 
298 	/* A NULL gpe_device means use the FADT-defined GPE block(s) */
299 
300 	if (!gpe_device) {
301 
302 		/* Examine GPE Block 0 and 1 (These blocks are permanent) */
303 
304 		for (i = 0; i < ACPI_MAX_GPE_BLOCKS; i++) {
305 			gpe_info = acpi_ev_low_get_gpe_info(gpe_number,
306 							    acpi_gbl_gpe_fadt_blocks
307 							    [i]);
308 			if (gpe_info) {
309 				return (gpe_info);
310 			}
311 		}
312 
313 		/* The gpe_number was not in the range of either FADT GPE block */
314 
315 		return (NULL);
316 	}
317 
318 	/* A Non-NULL gpe_device means this is a GPE Block Device */
319 
320 	obj_desc =
321 	    acpi_ns_get_attached_object((struct acpi_namespace_node *)
322 					       gpe_device);
323 	if (!obj_desc || !obj_desc->device.gpe_block) {
324 		return (NULL);
325 	}
326 
327 	return (acpi_ev_low_get_gpe_info
328 		(gpe_number, obj_desc->device.gpe_block));
329 }
330 
331 /*******************************************************************************
332  *
333  * FUNCTION:    acpi_ev_gpe_detect
334  *
335  * PARAMETERS:  gpe_xrupt_list      - Interrupt block for this interrupt.
336  *                                    Can have multiple GPE blocks attached.
337  *
338  * RETURN:      INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED
339  *
340  * DESCRIPTION: Detect if any GP events have occurred. This function is
341  *              executed at interrupt level.
342  *
343  ******************************************************************************/
344 
345 u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info *gpe_xrupt_list)
346 {
347 	struct acpi_gpe_block_info *gpe_block;
348 	struct acpi_namespace_node *gpe_device;
349 	struct acpi_gpe_register_info *gpe_register_info;
350 	struct acpi_gpe_event_info *gpe_event_info;
351 	u32 gpe_number;
352 	u32 int_status = ACPI_INTERRUPT_NOT_HANDLED;
353 	acpi_cpu_flags flags;
354 	u32 i;
355 	u32 j;
356 
357 	ACPI_FUNCTION_NAME(ev_gpe_detect);
358 
359 	/* Check for the case where there are no GPEs */
360 
361 	if (!gpe_xrupt_list) {
362 		return (int_status);
363 	}
364 
365 	/*
366 	 * We need to obtain the GPE lock for both the data structs and registers
367 	 * Note: Not necessary to obtain the hardware lock, since the GPE
368 	 * registers are owned by the gpe_lock.
369 	 */
370 	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
371 
372 	/* Examine all GPE blocks attached to this interrupt level */
373 
374 	gpe_block = gpe_xrupt_list->gpe_block_list_head;
375 	while (gpe_block) {
376 		gpe_device = gpe_block->node;
377 
378 		/*
379 		 * Read all of the 8-bit GPE status and enable registers in this GPE
380 		 * block, saving all of them. Find all currently active GP events.
381 		 */
382 		for (i = 0; i < gpe_block->register_count; i++) {
383 
384 			/* Get the next status/enable pair */
385 
386 			gpe_register_info = &gpe_block->register_info[i];
387 
388 			/*
389 			 * Optimization: If there are no GPEs enabled within this
390 			 * register, we can safely ignore the entire register.
391 			 */
392 			if (!(gpe_register_info->enable_for_run |
393 			      gpe_register_info->enable_for_wake)) {
394 				ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS,
395 						  "Ignore disabled registers for GPE %02X-%02X: "
396 						  "RunEnable=%02X, WakeEnable=%02X\n",
397 						  gpe_register_info->
398 						  base_gpe_number,
399 						  gpe_register_info->
400 						  base_gpe_number +
401 						  (ACPI_GPE_REGISTER_WIDTH - 1),
402 						  gpe_register_info->
403 						  enable_for_run,
404 						  gpe_register_info->
405 						  enable_for_wake));
406 				continue;
407 			}
408 
409 			/* Now look at the individual GPEs in this byte register */
410 
411 			for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
412 
413 				/* Detect and dispatch one GPE bit */
414 
415 				gpe_event_info =
416 				    &gpe_block->
417 				    event_info[((acpi_size)i *
418 						ACPI_GPE_REGISTER_WIDTH) + j];
419 				gpe_number =
420 				    j + gpe_register_info->base_gpe_number;
421 				acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
422 				int_status |=
423 				    acpi_ev_detect_gpe(gpe_device,
424 						       gpe_event_info,
425 						       gpe_number);
426 				flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
427 			}
428 		}
429 
430 		gpe_block = gpe_block->next;
431 	}
432 
433 	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
434 	return (int_status);
435 }
436 
437 /*******************************************************************************
438  *
439  * FUNCTION:    acpi_ev_asynch_execute_gpe_method
440  *
441  * PARAMETERS:  Context (gpe_event_info) - Info for this GPE
442  *
443  * RETURN:      None
444  *
445  * DESCRIPTION: Perform the actual execution of a GPE control method. This
446  *              function is called from an invocation of acpi_os_execute and
447  *              therefore does NOT execute at interrupt level - so that
448  *              the control method itself is not executed in the context of
449  *              an interrupt handler.
450  *
451  ******************************************************************************/
452 
453 static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
454 {
455 	struct acpi_gpe_event_info *gpe_event_info = context;
456 	acpi_status status = AE_OK;
457 	struct acpi_evaluate_info *info;
458 	struct acpi_gpe_notify_info *notify;
459 
460 	ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method);
461 
462 	/* Do the correct dispatch - normal method or implicit notify */
463 
464 	switch (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags)) {
465 	case ACPI_GPE_DISPATCH_NOTIFY:
466 		/*
467 		 * Implicit notify.
468 		 * Dispatch a DEVICE_WAKE notify to the appropriate handler.
469 		 * NOTE: the request is queued for execution after this method
470 		 * completes. The notify handlers are NOT invoked synchronously
471 		 * from this thread -- because handlers may in turn run other
472 		 * control methods.
473 		 *
474 		 * June 2012: Expand implicit notify mechanism to support
475 		 * notifies on multiple device objects.
476 		 */
477 		notify = gpe_event_info->dispatch.notify_list;
478 		while (ACPI_SUCCESS(status) && notify) {
479 			status =
480 			    acpi_ev_queue_notify_request(notify->device_node,
481 							 ACPI_NOTIFY_DEVICE_WAKE);
482 
483 			notify = notify->next;
484 		}
485 
486 		break;
487 
488 	case ACPI_GPE_DISPATCH_METHOD:
489 
490 		/* Allocate the evaluation information block */
491 
492 		info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
493 		if (!info) {
494 			status = AE_NO_MEMORY;
495 		} else {
496 			/*
497 			 * Invoke the GPE Method (_Lxx, _Exx) i.e., evaluate the
498 			 * _Lxx/_Exx control method that corresponds to this GPE
499 			 */
500 			info->prefix_node =
501 			    gpe_event_info->dispatch.method_node;
502 			info->flags = ACPI_IGNORE_RETURN_VALUE;
503 
504 			status = acpi_ns_evaluate(info);
505 			ACPI_FREE(info);
506 		}
507 
508 		if (ACPI_FAILURE(status)) {
509 			ACPI_EXCEPTION((AE_INFO, status,
510 					"while evaluating GPE method [%4.4s]",
511 					acpi_ut_get_node_name(gpe_event_info->
512 							      dispatch.
513 							      method_node)));
514 		}
515 		break;
516 
517 	default:
518 
519 		goto error_exit;	/* Should never happen */
520 	}
521 
522 	/* Defer enabling of GPE until all notify handlers are done */
523 
524 	status = acpi_os_execute(OSL_NOTIFY_HANDLER,
525 				 acpi_ev_asynch_enable_gpe, gpe_event_info);
526 	if (ACPI_SUCCESS(status)) {
527 		return_VOID;
528 	}
529 
530 error_exit:
531 	acpi_ev_asynch_enable_gpe(gpe_event_info);
532 	return_VOID;
533 }
534 
535 
536 /*******************************************************************************
537  *
538  * FUNCTION:    acpi_ev_asynch_enable_gpe
539  *
540  * PARAMETERS:  Context (gpe_event_info) - Info for this GPE
541  *              Callback from acpi_os_execute
542  *
543  * RETURN:      None
544  *
545  * DESCRIPTION: Asynchronous clear/enable for GPE. This allows the GPE to
546  *              complete (i.e., finish execution of Notify)
547  *
548  ******************************************************************************/
549 
550 static void ACPI_SYSTEM_XFACE acpi_ev_asynch_enable_gpe(void *context)
551 {
552 	struct acpi_gpe_event_info *gpe_event_info = context;
553 	acpi_cpu_flags flags;
554 
555 	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
556 	(void)acpi_ev_finish_gpe(gpe_event_info);
557 	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
558 
559 	return;
560 }
561 
562 
563 /*******************************************************************************
564  *
565  * FUNCTION:    acpi_ev_finish_gpe
566  *
567  * PARAMETERS:  gpe_event_info      - Info for this GPE
568  *
569  * RETURN:      Status
570  *
571  * DESCRIPTION: Clear/Enable a GPE. Common code that is used after execution
572  *              of a GPE method or a synchronous or asynchronous GPE handler.
573  *
574  ******************************************************************************/
575 
576 acpi_status acpi_ev_finish_gpe(struct acpi_gpe_event_info *gpe_event_info)
577 {
578 	acpi_status status;
579 
580 	if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) ==
581 	    ACPI_GPE_LEVEL_TRIGGERED) {
582 		/*
583 		 * GPE is level-triggered, we clear the GPE status bit after
584 		 * handling the event.
585 		 */
586 		status = acpi_hw_clear_gpe(gpe_event_info);
587 		if (ACPI_FAILURE(status)) {
588 			return (status);
589 		}
590 	}
591 
592 	/*
593 	 * Enable this GPE, conditionally. This means that the GPE will
594 	 * only be physically enabled if the enable_mask bit is set
595 	 * in the event_info.
596 	 */
597 	(void)acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_CONDITIONAL_ENABLE);
598 	gpe_event_info->disable_for_dispatch = FALSE;
599 	return (AE_OK);
600 }
601 
602 
603 /*******************************************************************************
604  *
605  * FUNCTION:    acpi_ev_detect_gpe
606  *
607  * PARAMETERS:  gpe_device          - Device node. NULL for GPE0/GPE1
608  *              gpe_event_info      - Info for this GPE
609  *              gpe_number          - Number relative to the parent GPE block
610  *
611  * RETURN:      INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED
612  *
613  * DESCRIPTION: Detect and dispatch a General Purpose Event to either a function
614  *              (e.g. EC) or method (e.g. _Lxx/_Exx) handler.
615  * NOTE:        GPE is W1C, so it is possible to handle a single GPE from both
616  *              task and irq context in parallel as long as the process to
617  *              detect and mask the GPE is atomic.
618  *              However the atomicity of ACPI_GPE_DISPATCH_RAW_HANDLER is
619  *              dependent on the raw handler itself.
620  *
621  ******************************************************************************/
622 
623 u32
624 acpi_ev_detect_gpe(struct acpi_namespace_node *gpe_device,
625 		   struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
626 {
627 	u32 int_status = ACPI_INTERRUPT_NOT_HANDLED;
628 	u8 enabled_status_byte;
629 	u64 status_reg;
630 	u64 enable_reg;
631 	u32 register_bit;
632 	struct acpi_gpe_register_info *gpe_register_info;
633 	struct acpi_gpe_handler_info *gpe_handler_info;
634 	acpi_cpu_flags flags;
635 	acpi_status status;
636 
637 	ACPI_FUNCTION_TRACE(ev_gpe_detect);
638 
639 	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
640 
641 	if (!gpe_event_info) {
642 		gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
643 		if (!gpe_event_info)
644 			goto error_exit;
645 	}
646 
647 	/* Get the info block for the entire GPE register */
648 
649 	gpe_register_info = gpe_event_info->register_info;
650 
651 	/* Get the register bitmask for this GPE */
652 
653 	register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info);
654 
655 	/* GPE currently enabled (enable bit == 1)? */
656 
657 	status = acpi_hw_read(&enable_reg, &gpe_register_info->enable_address);
658 	if (ACPI_FAILURE(status)) {
659 		goto error_exit;
660 	}
661 
662 	/* GPE currently active (status bit == 1)? */
663 
664 	status = acpi_hw_read(&status_reg, &gpe_register_info->status_address);
665 	if (ACPI_FAILURE(status)) {
666 		goto error_exit;
667 	}
668 
669 	/* Check if there is anything active at all in this GPE */
670 
671 	ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS,
672 			  "Read registers for GPE %02X: Status=%02X, Enable=%02X, "
673 			  "RunEnable=%02X, WakeEnable=%02X\n",
674 			  gpe_number,
675 			  (u32)(status_reg & register_bit),
676 			  (u32)(enable_reg & register_bit),
677 			  gpe_register_info->enable_for_run,
678 			  gpe_register_info->enable_for_wake));
679 
680 	enabled_status_byte = (u8)(status_reg & enable_reg);
681 	if (!(enabled_status_byte & register_bit)) {
682 		goto error_exit;
683 	}
684 
685 	/* Invoke global event handler if present */
686 
687 	acpi_gpe_count++;
688 	if (acpi_gbl_global_event_handler) {
689 		acpi_gbl_global_event_handler(ACPI_EVENT_TYPE_GPE,
690 					      gpe_device, gpe_number,
691 					      acpi_gbl_global_event_handler_context);
692 	}
693 
694 	/* Found an active GPE */
695 
696 	if (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
697 	    ACPI_GPE_DISPATCH_RAW_HANDLER) {
698 
699 		/* Dispatch the event to a raw handler */
700 
701 		gpe_handler_info = gpe_event_info->dispatch.handler;
702 
703 		/*
704 		 * There is no protection around the namespace node
705 		 * and the GPE handler to ensure a safe destruction
706 		 * because:
707 		 * 1. The namespace node is expected to always
708 		 *    exist after loading a table.
709 		 * 2. The GPE handler is expected to be flushed by
710 		 *    acpi_os_wait_events_complete() before the
711 		 *    destruction.
712 		 */
713 		acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
714 		int_status |=
715 		    gpe_handler_info->address(gpe_device, gpe_number,
716 					      gpe_handler_info->context);
717 		flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
718 	} else {
719 		/* Dispatch the event to a standard handler or method. */
720 
721 		int_status |= acpi_ev_gpe_dispatch(gpe_device,
722 						   gpe_event_info, gpe_number);
723 	}
724 
725 error_exit:
726 	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
727 	return (int_status);
728 }
729 
730 /*******************************************************************************
731  *
732  * FUNCTION:    acpi_ev_gpe_dispatch
733  *
734  * PARAMETERS:  gpe_device          - Device node. NULL for GPE0/GPE1
735  *              gpe_event_info      - Info for this GPE
736  *              gpe_number          - Number relative to the parent GPE block
737  *
738  * RETURN:      INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED
739  *
740  * DESCRIPTION: Dispatch a General Purpose Event to either a function (e.g. EC)
741  *              or method (e.g. _Lxx/_Exx) handler.
742  *
743  ******************************************************************************/
744 
745 u32
746 acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device,
747 		     struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
748 {
749 	acpi_status status;
750 	u32 return_value;
751 
752 	ACPI_FUNCTION_TRACE(ev_gpe_dispatch);
753 
754 	/*
755 	 * Always disable the GPE so that it does not keep firing before
756 	 * any asynchronous activity completes (either from the execution
757 	 * of a GPE method or an asynchronous GPE handler.)
758 	 *
759 	 * If there is no handler or method to run, just disable the
760 	 * GPE and leave it disabled permanently to prevent further such
761 	 * pointless events from firing.
762 	 */
763 	status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE);
764 	if (ACPI_FAILURE(status)) {
765 		ACPI_EXCEPTION((AE_INFO, status,
766 				"Unable to disable GPE %02X", gpe_number));
767 		return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
768 	}
769 
770 	/*
771 	 * If edge-triggered, clear the GPE status bit now. Note that
772 	 * level-triggered events are cleared after the GPE is serviced.
773 	 */
774 	if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) ==
775 	    ACPI_GPE_EDGE_TRIGGERED) {
776 		status = acpi_hw_clear_gpe(gpe_event_info);
777 		if (ACPI_FAILURE(status)) {
778 			ACPI_EXCEPTION((AE_INFO, status,
779 					"Unable to clear GPE %02X",
780 					gpe_number));
781 			(void)acpi_hw_low_set_gpe(gpe_event_info,
782 						  ACPI_GPE_CONDITIONAL_ENABLE);
783 			return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
784 		}
785 	}
786 
787 	gpe_event_info->disable_for_dispatch = TRUE;
788 
789 	/*
790 	 * Dispatch the GPE to either an installed handler or the control
791 	 * method associated with this GPE (_Lxx or _Exx). If a handler
792 	 * exists, we invoke it and do not attempt to run the method.
793 	 * If there is neither a handler nor a method, leave the GPE
794 	 * disabled.
795 	 */
796 	switch (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags)) {
797 	case ACPI_GPE_DISPATCH_HANDLER:
798 
799 		/* Invoke the installed handler (at interrupt level) */
800 
801 		return_value =
802 		    gpe_event_info->dispatch.handler->address(gpe_device,
803 							      gpe_number,
804 							      gpe_event_info->
805 							      dispatch.handler->
806 							      context);
807 
808 		/* If requested, clear (if level-triggered) and re-enable the GPE */
809 
810 		if (return_value & ACPI_REENABLE_GPE) {
811 			(void)acpi_ev_finish_gpe(gpe_event_info);
812 		}
813 		break;
814 
815 	case ACPI_GPE_DISPATCH_METHOD:
816 	case ACPI_GPE_DISPATCH_NOTIFY:
817 		/*
818 		 * Execute the method associated with the GPE
819 		 * NOTE: Level-triggered GPEs are cleared after the method completes.
820 		 */
821 		status = acpi_os_execute(OSL_GPE_HANDLER,
822 					 acpi_ev_asynch_execute_gpe_method,
823 					 gpe_event_info);
824 		if (ACPI_FAILURE(status)) {
825 			ACPI_EXCEPTION((AE_INFO, status,
826 					"Unable to queue handler for GPE %02X - event disabled",
827 					gpe_number));
828 		}
829 		break;
830 
831 	default:
832 		/*
833 		 * No handler or method to run!
834 		 * 03/2010: This case should no longer be possible. We will not allow
835 		 * a GPE to be enabled if it has no handler or method.
836 		 */
837 		ACPI_ERROR((AE_INFO,
838 			    "No handler or method for GPE %02X, disabling event",
839 			    gpe_number));
840 
841 		break;
842 	}
843 
844 	return_UINT32(ACPI_INTERRUPT_HANDLED);
845 }
846 
847 #endif				/* !ACPI_REDUCED_HARDWARE */
848