xref: /openbmc/linux/drivers/acpi/acpica/evgpeutil.c (revision f7018c21)
1 /******************************************************************************
2  *
3  * Module Name: evgpeutil - GPE utilities
4  *
5  *****************************************************************************/
6 
7 /*
8  * Copyright (C) 2000 - 2014, Intel Corp.
9  * All rights reserved.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions, and the following disclaimer,
16  *    without modification.
17  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18  *    substantially similar to the "NO WARRANTY" disclaimer below
19  *    ("Disclaimer") and any redistribution must be conditioned upon
20  *    including a substantially similar Disclaimer requirement for further
21  *    binary redistribution.
22  * 3. Neither the names of the above-listed copyright holders nor the names
23  *    of any contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * Alternatively, this software may be distributed under the terms of the
27  * GNU General Public License ("GPL") version 2 as published by the Free
28  * Software Foundation.
29  *
30  * NO WARRANTY
31  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
34  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35  * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41  * POSSIBILITY OF SUCH DAMAGES.
42  */
43 
44 #include <acpi/acpi.h>
45 #include "accommon.h"
46 #include "acevents.h"
47 
48 #define _COMPONENT          ACPI_EVENTS
49 ACPI_MODULE_NAME("evgpeutil")
50 
51 #if (!ACPI_REDUCED_HARDWARE)	/* Entire module */
52 /*******************************************************************************
53  *
54  * FUNCTION:    acpi_ev_walk_gpe_list
55  *
56  * PARAMETERS:  gpe_walk_callback   - Routine called for each GPE block
57  *              context             - Value passed to callback
58  *
59  * RETURN:      Status
60  *
61  * DESCRIPTION: Walk the GPE lists.
62  *
63  ******************************************************************************/
64 acpi_status
65 acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback, void *context)
66 {
67 	struct acpi_gpe_block_info *gpe_block;
68 	struct acpi_gpe_xrupt_info *gpe_xrupt_info;
69 	acpi_status status = AE_OK;
70 	acpi_cpu_flags flags;
71 
72 	ACPI_FUNCTION_TRACE(ev_walk_gpe_list);
73 
74 	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
75 
76 	/* Walk the interrupt level descriptor list */
77 
78 	gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head;
79 	while (gpe_xrupt_info) {
80 
81 		/* Walk all Gpe Blocks attached to this interrupt level */
82 
83 		gpe_block = gpe_xrupt_info->gpe_block_list_head;
84 		while (gpe_block) {
85 
86 			/* One callback per GPE block */
87 
88 			status =
89 			    gpe_walk_callback(gpe_xrupt_info, gpe_block,
90 					      context);
91 			if (ACPI_FAILURE(status)) {
92 				if (status == AE_CTRL_END) {	/* Callback abort */
93 					status = AE_OK;
94 				}
95 				goto unlock_and_exit;
96 			}
97 
98 			gpe_block = gpe_block->next;
99 		}
100 
101 		gpe_xrupt_info = gpe_xrupt_info->next;
102 	}
103 
104 unlock_and_exit:
105 	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
106 	return_ACPI_STATUS(status);
107 }
108 
109 /*******************************************************************************
110  *
111  * FUNCTION:    acpi_ev_valid_gpe_event
112  *
113  * PARAMETERS:  gpe_event_info              - Info for this GPE
114  *
115  * RETURN:      TRUE if the gpe_event is valid
116  *
117  * DESCRIPTION: Validate a GPE event. DO NOT CALL FROM INTERRUPT LEVEL.
118  *              Should be called only when the GPE lists are semaphore locked
119  *              and not subject to change.
120  *
121  ******************************************************************************/
122 
123 u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info)
124 {
125 	struct acpi_gpe_xrupt_info *gpe_xrupt_block;
126 	struct acpi_gpe_block_info *gpe_block;
127 
128 	ACPI_FUNCTION_ENTRY();
129 
130 	/* No need for spin lock since we are not changing any list elements */
131 
132 	/* Walk the GPE interrupt levels */
133 
134 	gpe_xrupt_block = acpi_gbl_gpe_xrupt_list_head;
135 	while (gpe_xrupt_block) {
136 		gpe_block = gpe_xrupt_block->gpe_block_list_head;
137 
138 		/* Walk the GPE blocks on this interrupt level */
139 
140 		while (gpe_block) {
141 			if ((&gpe_block->event_info[0] <= gpe_event_info) &&
142 			    (&gpe_block->event_info[gpe_block->gpe_count] >
143 			     gpe_event_info)) {
144 				return (TRUE);
145 			}
146 
147 			gpe_block = gpe_block->next;
148 		}
149 
150 		gpe_xrupt_block = gpe_xrupt_block->next;
151 	}
152 
153 	return (FALSE);
154 }
155 
156 /*******************************************************************************
157  *
158  * FUNCTION:    acpi_ev_get_gpe_device
159  *
160  * PARAMETERS:  GPE_WALK_CALLBACK
161  *
162  * RETURN:      Status
163  *
164  * DESCRIPTION: Matches the input GPE index (0-current_gpe_count) with a GPE
165  *              block device. NULL if the GPE is one of the FADT-defined GPEs.
166  *
167  ******************************************************************************/
168 
169 acpi_status
170 acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
171 		       struct acpi_gpe_block_info *gpe_block, void *context)
172 {
173 	struct acpi_gpe_device_info *info = context;
174 
175 	/* Increment Index by the number of GPEs in this block */
176 
177 	info->next_block_base_index += gpe_block->gpe_count;
178 
179 	if (info->index < info->next_block_base_index) {
180 		/*
181 		 * The GPE index is within this block, get the node. Leave the node
182 		 * NULL for the FADT-defined GPEs
183 		 */
184 		if ((gpe_block->node)->type == ACPI_TYPE_DEVICE) {
185 			info->gpe_device = gpe_block->node;
186 		}
187 
188 		info->status = AE_OK;
189 		return (AE_CTRL_END);
190 	}
191 
192 	return (AE_OK);
193 }
194 
195 /*******************************************************************************
196  *
197  * FUNCTION:    acpi_ev_get_gpe_xrupt_block
198  *
199  * PARAMETERS:  interrupt_number            - Interrupt for a GPE block
200  *              gpe_xrupt_block             - Where the block is returned
201  *
202  * RETURN:      Status
203  *
204  * DESCRIPTION: Get or Create a GPE interrupt block. There is one interrupt
205  *              block per unique interrupt level used for GPEs. Should be
206  *              called only when the GPE lists are semaphore locked and not
207  *              subject to change.
208  *
209  ******************************************************************************/
210 
211 acpi_status
212 acpi_ev_get_gpe_xrupt_block(u32 interrupt_number,
213 			    struct acpi_gpe_xrupt_info ** gpe_xrupt_block)
214 {
215 	struct acpi_gpe_xrupt_info *next_gpe_xrupt;
216 	struct acpi_gpe_xrupt_info *gpe_xrupt;
217 	acpi_status status;
218 	acpi_cpu_flags flags;
219 
220 	ACPI_FUNCTION_TRACE(ev_get_gpe_xrupt_block);
221 
222 	/* No need for lock since we are not changing any list elements here */
223 
224 	next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head;
225 	while (next_gpe_xrupt) {
226 		if (next_gpe_xrupt->interrupt_number == interrupt_number) {
227 			*gpe_xrupt_block = next_gpe_xrupt;
228 			return_ACPI_STATUS(AE_OK);
229 		}
230 
231 		next_gpe_xrupt = next_gpe_xrupt->next;
232 	}
233 
234 	/* Not found, must allocate a new xrupt descriptor */
235 
236 	gpe_xrupt = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_xrupt_info));
237 	if (!gpe_xrupt) {
238 		return_ACPI_STATUS(AE_NO_MEMORY);
239 	}
240 
241 	gpe_xrupt->interrupt_number = interrupt_number;
242 
243 	/* Install new interrupt descriptor with spin lock */
244 
245 	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
246 	if (acpi_gbl_gpe_xrupt_list_head) {
247 		next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head;
248 		while (next_gpe_xrupt->next) {
249 			next_gpe_xrupt = next_gpe_xrupt->next;
250 		}
251 
252 		next_gpe_xrupt->next = gpe_xrupt;
253 		gpe_xrupt->previous = next_gpe_xrupt;
254 	} else {
255 		acpi_gbl_gpe_xrupt_list_head = gpe_xrupt;
256 	}
257 
258 	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
259 
260 	/* Install new interrupt handler if not SCI_INT */
261 
262 	if (interrupt_number != acpi_gbl_FADT.sci_interrupt) {
263 		status = acpi_os_install_interrupt_handler(interrupt_number,
264 							   acpi_ev_gpe_xrupt_handler,
265 							   gpe_xrupt);
266 		if (ACPI_FAILURE(status)) {
267 			ACPI_EXCEPTION((AE_INFO, status,
268 					"Could not install GPE interrupt handler at level 0x%X",
269 					interrupt_number));
270 			return_ACPI_STATUS(status);
271 		}
272 	}
273 
274 	*gpe_xrupt_block = gpe_xrupt;
275 	return_ACPI_STATUS(AE_OK);
276 }
277 
278 /*******************************************************************************
279  *
280  * FUNCTION:    acpi_ev_delete_gpe_xrupt
281  *
282  * PARAMETERS:  gpe_xrupt       - A GPE interrupt info block
283  *
284  * RETURN:      Status
285  *
286  * DESCRIPTION: Remove and free a gpe_xrupt block. Remove an associated
287  *              interrupt handler if not the SCI interrupt.
288  *
289  ******************************************************************************/
290 
291 acpi_status acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt)
292 {
293 	acpi_status status;
294 	acpi_cpu_flags flags;
295 
296 	ACPI_FUNCTION_TRACE(ev_delete_gpe_xrupt);
297 
298 	/* We never want to remove the SCI interrupt handler */
299 
300 	if (gpe_xrupt->interrupt_number == acpi_gbl_FADT.sci_interrupt) {
301 		gpe_xrupt->gpe_block_list_head = NULL;
302 		return_ACPI_STATUS(AE_OK);
303 	}
304 
305 	/* Disable this interrupt */
306 
307 	status =
308 	    acpi_os_remove_interrupt_handler(gpe_xrupt->interrupt_number,
309 					     acpi_ev_gpe_xrupt_handler);
310 	if (ACPI_FAILURE(status)) {
311 		return_ACPI_STATUS(status);
312 	}
313 
314 	/* Unlink the interrupt block with lock */
315 
316 	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
317 	if (gpe_xrupt->previous) {
318 		gpe_xrupt->previous->next = gpe_xrupt->next;
319 	} else {
320 		/* No previous, update list head */
321 
322 		acpi_gbl_gpe_xrupt_list_head = gpe_xrupt->next;
323 	}
324 
325 	if (gpe_xrupt->next) {
326 		gpe_xrupt->next->previous = gpe_xrupt->previous;
327 	}
328 	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
329 
330 	/* Free the block */
331 
332 	ACPI_FREE(gpe_xrupt);
333 	return_ACPI_STATUS(AE_OK);
334 }
335 
336 /*******************************************************************************
337  *
338  * FUNCTION:    acpi_ev_delete_gpe_handlers
339  *
340  * PARAMETERS:  gpe_xrupt_info      - GPE Interrupt info
341  *              gpe_block           - Gpe Block info
342  *
343  * RETURN:      Status
344  *
345  * DESCRIPTION: Delete all Handler objects found in the GPE data structs.
346  *              Used only prior to termination.
347  *
348  ******************************************************************************/
349 
350 acpi_status
351 acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
352 			    struct acpi_gpe_block_info *gpe_block,
353 			    void *context)
354 {
355 	struct acpi_gpe_event_info *gpe_event_info;
356 	struct acpi_gpe_notify_info *notify;
357 	struct acpi_gpe_notify_info *next;
358 	u32 i;
359 	u32 j;
360 
361 	ACPI_FUNCTION_TRACE(ev_delete_gpe_handlers);
362 
363 	/* Examine each GPE Register within the block */
364 
365 	for (i = 0; i < gpe_block->register_count; i++) {
366 
367 		/* Now look at the individual GPEs in this byte register */
368 
369 		for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
370 			gpe_event_info = &gpe_block->event_info[((acpi_size) i *
371 								 ACPI_GPE_REGISTER_WIDTH)
372 								+ j];
373 
374 			if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
375 			    ACPI_GPE_DISPATCH_HANDLER) {
376 
377 				/* Delete an installed handler block */
378 
379 				ACPI_FREE(gpe_event_info->dispatch.handler);
380 				gpe_event_info->dispatch.handler = NULL;
381 				gpe_event_info->flags &=
382 				    ~ACPI_GPE_DISPATCH_MASK;
383 			} else
384 			    if ((gpe_event_info->
385 				 flags & ACPI_GPE_DISPATCH_MASK) ==
386 				ACPI_GPE_DISPATCH_NOTIFY) {
387 
388 				/* Delete the implicit notification device list */
389 
390 				notify = gpe_event_info->dispatch.notify_list;
391 				while (notify) {
392 					next = notify->next;
393 					ACPI_FREE(notify);
394 					notify = next;
395 				}
396 				gpe_event_info->dispatch.notify_list = NULL;
397 				gpe_event_info->flags &=
398 				    ~ACPI_GPE_DISPATCH_MASK;
399 			}
400 		}
401 	}
402 
403 	return_ACPI_STATUS(AE_OK);
404 }
405 
406 #endif				/* !ACPI_REDUCED_HARDWARE */
407