xref: /openbmc/linux/drivers/acpi/acpica/evgpe.c (revision 6ee73861)
1 /******************************************************************************
2  *
3  * Module Name: evgpe - General Purpose Event handling and dispatch
4  *
5  *****************************************************************************/
6 
7 /*
8  * Copyright (C) 2000 - 2008, Intel Corp.
9  * All rights reserved.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions, and the following disclaimer,
16  *    without modification.
17  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18  *    substantially similar to the "NO WARRANTY" disclaimer below
19  *    ("Disclaimer") and any redistribution must be conditioned upon
20  *    including a substantially similar Disclaimer requirement for further
21  *    binary redistribution.
22  * 3. Neither the names of the above-listed copyright holders nor the names
23  *    of any contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * Alternatively, this software may be distributed under the terms of the
27  * GNU General Public License ("GPL") version 2 as published by the Free
28  * Software Foundation.
29  *
30  * NO WARRANTY
31  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
34  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35  * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41  * POSSIBILITY OF SUCH DAMAGES.
42  */
43 
44 #include <acpi/acpi.h>
45 #include "accommon.h"
46 #include "acevents.h"
47 #include "acnamesp.h"
48 
49 #define _COMPONENT          ACPI_EVENTS
50 ACPI_MODULE_NAME("evgpe")
51 
52 /* Local prototypes */
53 static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context);
54 
55 /*******************************************************************************
56  *
57  * FUNCTION:    acpi_ev_set_gpe_type
58  *
59  * PARAMETERS:  gpe_event_info          - GPE to set
60  *              Type                    - New type
61  *
62  * RETURN:      Status
63  *
64  * DESCRIPTION: Sets the new type for the GPE (wake, run, or wake/run)
65  *
66  ******************************************************************************/
67 
68 acpi_status
69 acpi_ev_set_gpe_type(struct acpi_gpe_event_info *gpe_event_info, u8 type)
70 {
71 	acpi_status status;
72 
73 	ACPI_FUNCTION_TRACE(ev_set_gpe_type);
74 
75 	/* Validate type and update register enable masks */
76 
77 	switch (type) {
78 	case ACPI_GPE_TYPE_WAKE:
79 	case ACPI_GPE_TYPE_RUNTIME:
80 	case ACPI_GPE_TYPE_WAKE_RUN:
81 		break;
82 
83 	default:
84 		return_ACPI_STATUS(AE_BAD_PARAMETER);
85 	}
86 
87 	/* Disable the GPE if currently enabled */
88 
89 	status = acpi_ev_disable_gpe(gpe_event_info);
90 
91 	/* Clear the type bits and insert the new Type */
92 
93 	gpe_event_info->flags &= ~ACPI_GPE_TYPE_MASK;
94 	gpe_event_info->flags |= type;
95 	return_ACPI_STATUS(status);
96 }
97 
98 /*******************************************************************************
99  *
100  * FUNCTION:    acpi_ev_update_gpe_enable_masks
101  *
102  * PARAMETERS:  gpe_event_info          - GPE to update
103  *              Type                    - What to do: ACPI_GPE_DISABLE or
104  *                                        ACPI_GPE_ENABLE
105  *
106  * RETURN:      Status
107  *
108  * DESCRIPTION: Updates GPE register enable masks based on the GPE type
109  *
110  ******************************************************************************/
111 
112 acpi_status
113 acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info,
114 				u8 type)
115 {
116 	struct acpi_gpe_register_info *gpe_register_info;
117 	u8 register_bit;
118 
119 	ACPI_FUNCTION_TRACE(ev_update_gpe_enable_masks);
120 
121 	gpe_register_info = gpe_event_info->register_info;
122 	if (!gpe_register_info) {
123 		return_ACPI_STATUS(AE_NOT_EXIST);
124 	}
125 
126 	register_bit = (u8)
127 	    (1 <<
128 	     (gpe_event_info->gpe_number - gpe_register_info->base_gpe_number));
129 
130 	/* 1) Disable case. Simply clear all enable bits */
131 
132 	if (type == ACPI_GPE_DISABLE) {
133 		ACPI_CLEAR_BIT(gpe_register_info->enable_for_wake,
134 			       register_bit);
135 		ACPI_CLEAR_BIT(gpe_register_info->enable_for_run, register_bit);
136 		return_ACPI_STATUS(AE_OK);
137 	}
138 
139 	/* 2) Enable case. Set/Clear the appropriate enable bits */
140 
141 	switch (gpe_event_info->flags & ACPI_GPE_TYPE_MASK) {
142 	case ACPI_GPE_TYPE_WAKE:
143 		ACPI_SET_BIT(gpe_register_info->enable_for_wake, register_bit);
144 		ACPI_CLEAR_BIT(gpe_register_info->enable_for_run, register_bit);
145 		break;
146 
147 	case ACPI_GPE_TYPE_RUNTIME:
148 		ACPI_CLEAR_BIT(gpe_register_info->enable_for_wake,
149 			       register_bit);
150 		ACPI_SET_BIT(gpe_register_info->enable_for_run, register_bit);
151 		break;
152 
153 	case ACPI_GPE_TYPE_WAKE_RUN:
154 		ACPI_SET_BIT(gpe_register_info->enable_for_wake, register_bit);
155 		ACPI_SET_BIT(gpe_register_info->enable_for_run, register_bit);
156 		break;
157 
158 	default:
159 		return_ACPI_STATUS(AE_BAD_PARAMETER);
160 	}
161 
162 	return_ACPI_STATUS(AE_OK);
163 }
164 
165 /*******************************************************************************
166  *
167  * FUNCTION:    acpi_ev_enable_gpe
168  *
169  * PARAMETERS:  gpe_event_info          - GPE to enable
170  *              write_to_hardware       - Enable now, or just mark data structs
171  *                                        (WAKE GPEs should be deferred)
172  *
173  * RETURN:      Status
174  *
175  * DESCRIPTION: Enable a GPE based on the GPE type
176  *
177  ******************************************************************************/
178 
179 acpi_status
180 acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info,
181 		   u8 write_to_hardware)
182 {
183 	acpi_status status;
184 
185 	ACPI_FUNCTION_TRACE(ev_enable_gpe);
186 
187 	/* Make sure HW enable masks are updated */
188 
189 	status =
190 	    acpi_ev_update_gpe_enable_masks(gpe_event_info, ACPI_GPE_ENABLE);
191 	if (ACPI_FAILURE(status)) {
192 		return_ACPI_STATUS(status);
193 	}
194 
195 	/* Mark wake-enabled or HW enable, or both */
196 
197 	switch (gpe_event_info->flags & ACPI_GPE_TYPE_MASK) {
198 	case ACPI_GPE_TYPE_WAKE:
199 
200 		ACPI_SET_BIT(gpe_event_info->flags, ACPI_GPE_WAKE_ENABLED);
201 		break;
202 
203 	case ACPI_GPE_TYPE_WAKE_RUN:
204 
205 		ACPI_SET_BIT(gpe_event_info->flags, ACPI_GPE_WAKE_ENABLED);
206 
207 		/*lint -fallthrough */
208 
209 	case ACPI_GPE_TYPE_RUNTIME:
210 
211 		ACPI_SET_BIT(gpe_event_info->flags, ACPI_GPE_RUN_ENABLED);
212 
213 		if (write_to_hardware) {
214 
215 			/* Clear the GPE (of stale events), then enable it */
216 
217 			status = acpi_hw_clear_gpe(gpe_event_info);
218 			if (ACPI_FAILURE(status)) {
219 				return_ACPI_STATUS(status);
220 			}
221 
222 			/* Enable the requested runtime GPE */
223 
224 			status = acpi_hw_write_gpe_enable_reg(gpe_event_info);
225 		}
226 		break;
227 
228 	default:
229 		return_ACPI_STATUS(AE_BAD_PARAMETER);
230 	}
231 
232 	return_ACPI_STATUS(AE_OK);
233 }
234 
235 /*******************************************************************************
236  *
237  * FUNCTION:    acpi_ev_disable_gpe
238  *
239  * PARAMETERS:  gpe_event_info          - GPE to disable
240  *
241  * RETURN:      Status
242  *
243  * DESCRIPTION: Disable a GPE based on the GPE type
244  *
245  ******************************************************************************/
246 
247 acpi_status acpi_ev_disable_gpe(struct acpi_gpe_event_info *gpe_event_info)
248 {
249 	acpi_status status;
250 
251 	ACPI_FUNCTION_TRACE(ev_disable_gpe);
252 
253 	/* Make sure HW enable masks are updated */
254 
255 	status =
256 	    acpi_ev_update_gpe_enable_masks(gpe_event_info, ACPI_GPE_DISABLE);
257 	if (ACPI_FAILURE(status)) {
258 		return_ACPI_STATUS(status);
259 	}
260 
261 	/* Clear the appropriate enabled flags for this GPE */
262 
263 	switch (gpe_event_info->flags & ACPI_GPE_TYPE_MASK) {
264 	case ACPI_GPE_TYPE_WAKE:
265 		ACPI_CLEAR_BIT(gpe_event_info->flags, ACPI_GPE_WAKE_ENABLED);
266 		break;
267 
268 	case ACPI_GPE_TYPE_WAKE_RUN:
269 		ACPI_CLEAR_BIT(gpe_event_info->flags, ACPI_GPE_WAKE_ENABLED);
270 
271 		/* fallthrough */
272 
273 	case ACPI_GPE_TYPE_RUNTIME:
274 
275 		/* Disable the requested runtime GPE */
276 
277 		ACPI_CLEAR_BIT(gpe_event_info->flags, ACPI_GPE_RUN_ENABLED);
278 		break;
279 
280 	default:
281 		break;
282 	}
283 
284 	/*
285 	 * Even if we don't know the GPE type, make sure that we always
286 	 * disable it. low_disable_gpe will just clear the enable bit for this
287 	 * GPE and write it. It will not write out the current GPE enable mask,
288 	 * since this may inadvertently enable GPEs too early, if a rogue GPE has
289 	 * come in during ACPICA initialization - possibly as a result of AML or
290 	 * other code that has enabled the GPE.
291 	 */
292 	status = acpi_hw_low_disable_gpe(gpe_event_info);
293 	return_ACPI_STATUS(status);
294 }
295 
296 /*******************************************************************************
297  *
298  * FUNCTION:    acpi_ev_get_gpe_event_info
299  *
300  * PARAMETERS:  gpe_device          - Device node. NULL for GPE0/GPE1
301  *              gpe_number          - Raw GPE number
302  *
303  * RETURN:      A GPE event_info struct. NULL if not a valid GPE
304  *
305  * DESCRIPTION: Returns the event_info struct associated with this GPE.
306  *              Validates the gpe_block and the gpe_number
307  *
308  *              Should be called only when the GPE lists are semaphore locked
309  *              and not subject to change.
310  *
311  ******************************************************************************/
312 
313 struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device,
314 						       u32 gpe_number)
315 {
316 	union acpi_operand_object *obj_desc;
317 	struct acpi_gpe_block_info *gpe_block;
318 	u32 i;
319 
320 	ACPI_FUNCTION_ENTRY();
321 
322 	/* A NULL gpe_block means use the FADT-defined GPE block(s) */
323 
324 	if (!gpe_device) {
325 
326 		/* Examine GPE Block 0 and 1 (These blocks are permanent) */
327 
328 		for (i = 0; i < ACPI_MAX_GPE_BLOCKS; i++) {
329 			gpe_block = acpi_gbl_gpe_fadt_blocks[i];
330 			if (gpe_block) {
331 				if ((gpe_number >= gpe_block->block_base_number)
332 				    && (gpe_number <
333 					gpe_block->block_base_number +
334 					(gpe_block->register_count * 8))) {
335 					return (&gpe_block->
336 						event_info[gpe_number -
337 							   gpe_block->
338 							   block_base_number]);
339 				}
340 			}
341 		}
342 
343 		/* The gpe_number was not in the range of either FADT GPE block */
344 
345 		return (NULL);
346 	}
347 
348 	/* A Non-NULL gpe_device means this is a GPE Block Device */
349 
350 	obj_desc = acpi_ns_get_attached_object((struct acpi_namespace_node *)
351 					       gpe_device);
352 	if (!obj_desc || !obj_desc->device.gpe_block) {
353 		return (NULL);
354 	}
355 
356 	gpe_block = obj_desc->device.gpe_block;
357 
358 	if ((gpe_number >= gpe_block->block_base_number) &&
359 	    (gpe_number <
360 	     gpe_block->block_base_number + (gpe_block->register_count * 8))) {
361 		return (&gpe_block->
362 			event_info[gpe_number - gpe_block->block_base_number]);
363 	}
364 
365 	return (NULL);
366 }
367 
368 /*******************************************************************************
369  *
370  * FUNCTION:    acpi_ev_gpe_detect
371  *
372  * PARAMETERS:  gpe_xrupt_list      - Interrupt block for this interrupt.
373  *                                    Can have multiple GPE blocks attached.
374  *
375  * RETURN:      INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED
376  *
377  * DESCRIPTION: Detect if any GP events have occurred. This function is
378  *              executed at interrupt level.
379  *
380  ******************************************************************************/
381 
382 u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
383 {
384 	acpi_status status;
385 	struct acpi_gpe_block_info *gpe_block;
386 	struct acpi_gpe_register_info *gpe_register_info;
387 	u32 int_status = ACPI_INTERRUPT_NOT_HANDLED;
388 	u8 enabled_status_byte;
389 	u32 status_reg;
390 	u32 enable_reg;
391 	acpi_cpu_flags flags;
392 	u32 i;
393 	u32 j;
394 
395 	ACPI_FUNCTION_NAME(ev_gpe_detect);
396 
397 	/* Check for the case where there are no GPEs */
398 
399 	if (!gpe_xrupt_list) {
400 		return (int_status);
401 	}
402 
403 	/*
404 	 * We need to obtain the GPE lock for both the data structs and registers
405 	 * Note: Not necessary to obtain the hardware lock, since the GPE
406 	 * registers are owned by the gpe_lock.
407 	 */
408 	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
409 
410 	/* Examine all GPE blocks attached to this interrupt level */
411 
412 	gpe_block = gpe_xrupt_list->gpe_block_list_head;
413 	while (gpe_block) {
414 		/*
415 		 * Read all of the 8-bit GPE status and enable registers in this GPE
416 		 * block, saving all of them. Find all currently active GP events.
417 		 */
418 		for (i = 0; i < gpe_block->register_count; i++) {
419 
420 			/* Get the next status/enable pair */
421 
422 			gpe_register_info = &gpe_block->register_info[i];
423 
424 			/* Read the Status Register */
425 
426 			status =
427 			    acpi_hw_read(&status_reg,
428 					 &gpe_register_info->status_address);
429 			if (ACPI_FAILURE(status)) {
430 				goto unlock_and_exit;
431 			}
432 
433 			/* Read the Enable Register */
434 
435 			status =
436 			    acpi_hw_read(&enable_reg,
437 					 &gpe_register_info->enable_address);
438 			if (ACPI_FAILURE(status)) {
439 				goto unlock_and_exit;
440 			}
441 
442 			ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS,
443 					  "Read GPE Register at GPE%X: Status=%02X, Enable=%02X\n",
444 					  gpe_register_info->base_gpe_number,
445 					  status_reg, enable_reg));
446 
447 			/* Check if there is anything active at all in this register */
448 
449 			enabled_status_byte = (u8) (status_reg & enable_reg);
450 			if (!enabled_status_byte) {
451 
452 				/* No active GPEs in this register, move on */
453 
454 				continue;
455 			}
456 
457 			/* Now look at the individual GPEs in this byte register */
458 
459 			for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
460 
461 				/* Examine one GPE bit */
462 
463 				if (enabled_status_byte & (1 << j)) {
464 					/*
465 					 * Found an active GPE. Dispatch the event to a handler
466 					 * or method.
467 					 */
468 					int_status |=
469 					    acpi_ev_gpe_dispatch(&gpe_block->
470 						event_info[((acpi_size) i * ACPI_GPE_REGISTER_WIDTH) + j], j + gpe_register_info->base_gpe_number);
471 				}
472 			}
473 		}
474 
475 		gpe_block = gpe_block->next;
476 	}
477 
478       unlock_and_exit:
479 
480 	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
481 	return (int_status);
482 }
483 
484 /*******************************************************************************
485  *
486  * FUNCTION:    acpi_ev_asynch_execute_gpe_method
487  *
488  * PARAMETERS:  Context (gpe_event_info) - Info for this GPE
489  *
490  * RETURN:      None
491  *
492  * DESCRIPTION: Perform the actual execution of a GPE control method. This
493  *              function is called from an invocation of acpi_os_execute and
494  *              therefore does NOT execute at interrupt level - so that
495  *              the control method itself is not executed in the context of
496  *              an interrupt handler.
497  *
498  ******************************************************************************/
499 static void acpi_ev_asynch_enable_gpe(void *context);
500 
501 static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
502 {
503 	struct acpi_gpe_event_info *gpe_event_info = (void *)context;
504 	acpi_status status;
505 	struct acpi_gpe_event_info local_gpe_event_info;
506 	struct acpi_evaluate_info *info;
507 
508 	ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method);
509 
510 	status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
511 	if (ACPI_FAILURE(status)) {
512 		return_VOID;
513 	}
514 
515 	/* Must revalidate the gpe_number/gpe_block */
516 
517 	if (!acpi_ev_valid_gpe_event(gpe_event_info)) {
518 		status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
519 		return_VOID;
520 	}
521 
522 	/* Set the GPE flags for return to enabled state */
523 
524 	(void)acpi_ev_enable_gpe(gpe_event_info, FALSE);
525 
526 	/*
527 	 * Take a snapshot of the GPE info for this level - we copy the info to
528 	 * prevent a race condition with remove_handler/remove_block.
529 	 */
530 	ACPI_MEMCPY(&local_gpe_event_info, gpe_event_info,
531 		    sizeof(struct acpi_gpe_event_info));
532 
533 	status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
534 	if (ACPI_FAILURE(status)) {
535 		return_VOID;
536 	}
537 
538 	/*
539 	 * Must check for control method type dispatch one more time to avoid a
540 	 * race with ev_gpe_install_handler
541 	 */
542 	if ((local_gpe_event_info.flags & ACPI_GPE_DISPATCH_MASK) ==
543 	    ACPI_GPE_DISPATCH_METHOD) {
544 
545 		/* Allocate the evaluation information block */
546 
547 		info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
548 		if (!info) {
549 			status = AE_NO_MEMORY;
550 		} else {
551 			/*
552 			 * Invoke the GPE Method (_Lxx, _Exx) i.e., evaluate the _Lxx/_Exx
553 			 * control method that corresponds to this GPE
554 			 */
555 			info->prefix_node =
556 			    local_gpe_event_info.dispatch.method_node;
557 			info->flags = ACPI_IGNORE_RETURN_VALUE;
558 
559 			status = acpi_ns_evaluate(info);
560 			ACPI_FREE(info);
561 		}
562 
563 		if (ACPI_FAILURE(status)) {
564 			ACPI_EXCEPTION((AE_INFO, status,
565 					"while evaluating GPE method [%4.4s]",
566 					acpi_ut_get_node_name
567 					(local_gpe_event_info.dispatch.
568 					 method_node)));
569 		}
570 	}
571 	/* Defer enabling of GPE until all notify handlers are done */
572 	acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_ev_asynch_enable_gpe,
573 				gpe_event_info);
574 	return_VOID;
575 }
576 
577 static void acpi_ev_asynch_enable_gpe(void *context)
578 {
579 	struct acpi_gpe_event_info *gpe_event_info = context;
580 	acpi_status status;
581 	if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) ==
582 	    ACPI_GPE_LEVEL_TRIGGERED) {
583 		/*
584 		 * GPE is level-triggered, we clear the GPE status bit after handling
585 		 * the event.
586 		 */
587 		status = acpi_hw_clear_gpe(gpe_event_info);
588 		if (ACPI_FAILURE(status)) {
589 			return_VOID;
590 		}
591 	}
592 
593 	/* Enable this GPE */
594 	(void)acpi_hw_write_gpe_enable_reg(gpe_event_info);
595 	return_VOID;
596 }
597 
598 /*******************************************************************************
599  *
600  * FUNCTION:    acpi_ev_gpe_dispatch
601  *
602  * PARAMETERS:  gpe_event_info  - Info for this GPE
603  *              gpe_number      - Number relative to the parent GPE block
604  *
605  * RETURN:      INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED
606  *
607  * DESCRIPTION: Dispatch a General Purpose Event to either a function (e.g. EC)
608  *              or method (e.g. _Lxx/_Exx) handler.
609  *
610  *              This function executes at interrupt level.
611  *
612  ******************************************************************************/
613 
614 u32
615 acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
616 {
617 	acpi_status status;
618 
619 	ACPI_FUNCTION_TRACE(ev_gpe_dispatch);
620 
621 	acpi_os_gpe_count(gpe_number);
622 
623 	/*
624 	 * If edge-triggered, clear the GPE status bit now. Note that
625 	 * level-triggered events are cleared after the GPE is serviced.
626 	 */
627 	if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) ==
628 	    ACPI_GPE_EDGE_TRIGGERED) {
629 		status = acpi_hw_clear_gpe(gpe_event_info);
630 		if (ACPI_FAILURE(status)) {
631 			ACPI_EXCEPTION((AE_INFO, status,
632 					"Unable to clear GPE[%2X]",
633 					gpe_number));
634 			return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
635 		}
636 	}
637 
638 	/*
639 	 * Dispatch the GPE to either an installed handler, or the control method
640 	 * associated with this GPE (_Lxx or _Exx). If a handler exists, we invoke
641 	 * it and do not attempt to run the method. If there is neither a handler
642 	 * nor a method, we disable this GPE to prevent further such pointless
643 	 * events from firing.
644 	 */
645 	switch (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) {
646 	case ACPI_GPE_DISPATCH_HANDLER:
647 
648 		/*
649 		 * Invoke the installed handler (at interrupt level)
650 		 * Ignore return status for now.
651 		 * TBD: leave GPE disabled on error?
652 		 */
653 		(void)gpe_event_info->dispatch.handler->address(gpe_event_info->
654 								dispatch.
655 								handler->
656 								context);
657 
658 		/* It is now safe to clear level-triggered events. */
659 
660 		if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) ==
661 		    ACPI_GPE_LEVEL_TRIGGERED) {
662 			status = acpi_hw_clear_gpe(gpe_event_info);
663 			if (ACPI_FAILURE(status)) {
664 				ACPI_EXCEPTION((AE_INFO, status,
665 						"Unable to clear GPE[%2X]",
666 						gpe_number));
667 				return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
668 			}
669 		}
670 		break;
671 
672 	case ACPI_GPE_DISPATCH_METHOD:
673 
674 		/*
675 		 * Disable the GPE, so it doesn't keep firing before the method has a
676 		 * chance to run (it runs asynchronously with interrupts enabled).
677 		 */
678 		status = acpi_ev_disable_gpe(gpe_event_info);
679 		if (ACPI_FAILURE(status)) {
680 			ACPI_EXCEPTION((AE_INFO, status,
681 					"Unable to disable GPE[%2X]",
682 					gpe_number));
683 			return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
684 		}
685 
686 		/*
687 		 * Execute the method associated with the GPE
688 		 * NOTE: Level-triggered GPEs are cleared after the method completes.
689 		 */
690 		status = acpi_os_execute(OSL_GPE_HANDLER,
691 					 acpi_ev_asynch_execute_gpe_method,
692 					 gpe_event_info);
693 		if (ACPI_FAILURE(status)) {
694 			ACPI_EXCEPTION((AE_INFO, status,
695 					"Unable to queue handler for GPE[%2X] - event disabled",
696 					gpe_number));
697 		}
698 		break;
699 
700 	default:
701 
702 		/* No handler or method to run! */
703 
704 		ACPI_ERROR((AE_INFO,
705 			    "No handler or method for GPE[%2X], disabling event",
706 			    gpe_number));
707 
708 		/*
709 		 * Disable the GPE. The GPE will remain disabled until the ACPICA
710 		 * Core Subsystem is restarted, or a handler is installed.
711 		 */
712 		status = acpi_ev_disable_gpe(gpe_event_info);
713 		if (ACPI_FAILURE(status)) {
714 			ACPI_EXCEPTION((AE_INFO, status,
715 					"Unable to disable GPE[%2X]",
716 					gpe_number));
717 			return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
718 		}
719 		break;
720 	}
721 
722 	return_UINT32(ACPI_INTERRUPT_HANDLED);
723 }
724