xref: /openbmc/linux/drivers/acpi/acpica/evgpeutil.c (revision 48ca54e3)
1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /******************************************************************************
3  *
4  * Module Name: evgpeutil - GPE utilities
5  *
6  * Copyright (C) 2000 - 2022, Intel Corp.
7  *
8  *****************************************************************************/
9 
10 #include <acpi/acpi.h>
11 #include "accommon.h"
12 #include "acevents.h"
13 
14 #define _COMPONENT          ACPI_EVENTS
15 ACPI_MODULE_NAME("evgpeutil")
16 
17 #if (!ACPI_REDUCED_HARDWARE)	/* Entire module */
18 /*******************************************************************************
19  *
20  * FUNCTION:    acpi_ev_walk_gpe_list
21  *
22  * PARAMETERS:  gpe_walk_callback   - Routine called for each GPE block
23  *              context             - Value passed to callback
24  *
25  * RETURN:      Status
26  *
27  * DESCRIPTION: Walk the GPE lists.
28  *
29  ******************************************************************************/
30 acpi_status
31 acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback, void *context)
32 {
33 	struct acpi_gpe_block_info *gpe_block;
34 	struct acpi_gpe_xrupt_info *gpe_xrupt_info;
35 	acpi_status status = AE_OK;
36 	acpi_cpu_flags flags;
37 
38 	ACPI_FUNCTION_TRACE(ev_walk_gpe_list);
39 
40 	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
41 
42 	/* Walk the interrupt level descriptor list */
43 
44 	gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head;
45 	while (gpe_xrupt_info) {
46 
47 		/* Walk all Gpe Blocks attached to this interrupt level */
48 
49 		gpe_block = gpe_xrupt_info->gpe_block_list_head;
50 		while (gpe_block) {
51 
52 			/* One callback per GPE block */
53 
54 			status =
55 			    gpe_walk_callback(gpe_xrupt_info, gpe_block,
56 					      context);
57 			if (ACPI_FAILURE(status)) {
58 				if (status == AE_CTRL_END) {	/* Callback abort */
59 					status = AE_OK;
60 				}
61 				goto unlock_and_exit;
62 			}
63 
64 			gpe_block = gpe_block->next;
65 		}
66 
67 		gpe_xrupt_info = gpe_xrupt_info->next;
68 	}
69 
70 unlock_and_exit:
71 	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
72 	return_ACPI_STATUS(status);
73 }
74 
75 /*******************************************************************************
76  *
77  * FUNCTION:    acpi_ev_get_gpe_device
78  *
79  * PARAMETERS:  GPE_WALK_CALLBACK
80  *
81  * RETURN:      Status
82  *
83  * DESCRIPTION: Matches the input GPE index (0-current_gpe_count) with a GPE
84  *              block device. NULL if the GPE is one of the FADT-defined GPEs.
85  *
86  ******************************************************************************/
87 
88 acpi_status
89 acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
90 		       struct acpi_gpe_block_info *gpe_block, void *context)
91 {
92 	struct acpi_gpe_device_info *info = context;
93 
94 	/* Increment Index by the number of GPEs in this block */
95 
96 	info->next_block_base_index += gpe_block->gpe_count;
97 
98 	if (info->index < info->next_block_base_index) {
99 		/*
100 		 * The GPE index is within this block, get the node. Leave the node
101 		 * NULL for the FADT-defined GPEs
102 		 */
103 		if ((gpe_block->node)->type == ACPI_TYPE_DEVICE) {
104 			info->gpe_device = gpe_block->node;
105 		}
106 
107 		info->status = AE_OK;
108 		return (AE_CTRL_END);
109 	}
110 
111 	return (AE_OK);
112 }
113 
114 /*******************************************************************************
115  *
116  * FUNCTION:    acpi_ev_get_gpe_xrupt_block
117  *
118  * PARAMETERS:  interrupt_number            - Interrupt for a GPE block
119  *              gpe_xrupt_block             - Where the block is returned
120  *
121  * RETURN:      Status
122  *
123  * DESCRIPTION: Get or Create a GPE interrupt block. There is one interrupt
124  *              block per unique interrupt level used for GPEs. Should be
125  *              called only when the GPE lists are semaphore locked and not
126  *              subject to change.
127  *
128  ******************************************************************************/
129 
130 acpi_status
131 acpi_ev_get_gpe_xrupt_block(u32 interrupt_number,
132 			    struct acpi_gpe_xrupt_info **gpe_xrupt_block)
133 {
134 	struct acpi_gpe_xrupt_info *next_gpe_xrupt;
135 	struct acpi_gpe_xrupt_info *gpe_xrupt;
136 	acpi_status status;
137 	acpi_cpu_flags flags;
138 
139 	ACPI_FUNCTION_TRACE(ev_get_gpe_xrupt_block);
140 
141 	/* No need for lock since we are not changing any list elements here */
142 
143 	next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head;
144 	while (next_gpe_xrupt) {
145 		if (next_gpe_xrupt->interrupt_number == interrupt_number) {
146 			*gpe_xrupt_block = next_gpe_xrupt;
147 			return_ACPI_STATUS(AE_OK);
148 		}
149 
150 		next_gpe_xrupt = next_gpe_xrupt->next;
151 	}
152 
153 	/* Not found, must allocate a new xrupt descriptor */
154 
155 	gpe_xrupt = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_xrupt_info));
156 	if (!gpe_xrupt) {
157 		return_ACPI_STATUS(AE_NO_MEMORY);
158 	}
159 
160 	gpe_xrupt->interrupt_number = interrupt_number;
161 
162 	/* Install new interrupt descriptor with spin lock */
163 
164 	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
165 	if (acpi_gbl_gpe_xrupt_list_head) {
166 		next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head;
167 		while (next_gpe_xrupt->next) {
168 			next_gpe_xrupt = next_gpe_xrupt->next;
169 		}
170 
171 		next_gpe_xrupt->next = gpe_xrupt;
172 		gpe_xrupt->previous = next_gpe_xrupt;
173 	} else {
174 		acpi_gbl_gpe_xrupt_list_head = gpe_xrupt;
175 	}
176 
177 	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
178 
179 	/* Install new interrupt handler if not SCI_INT */
180 
181 	if (interrupt_number != acpi_gbl_FADT.sci_interrupt) {
182 		status = acpi_os_install_interrupt_handler(interrupt_number,
183 							   acpi_ev_gpe_xrupt_handler,
184 							   gpe_xrupt);
185 		if (ACPI_FAILURE(status)) {
186 			ACPI_EXCEPTION((AE_INFO, status,
187 					"Could not install GPE interrupt handler at level 0x%X",
188 					interrupt_number));
189 			return_ACPI_STATUS(status);
190 		}
191 	}
192 
193 	*gpe_xrupt_block = gpe_xrupt;
194 	return_ACPI_STATUS(AE_OK);
195 }
196 
197 /*******************************************************************************
198  *
199  * FUNCTION:    acpi_ev_delete_gpe_xrupt
200  *
201  * PARAMETERS:  gpe_xrupt       - A GPE interrupt info block
202  *
203  * RETURN:      Status
204  *
205  * DESCRIPTION: Remove and free a gpe_xrupt block. Remove an associated
206  *              interrupt handler if not the SCI interrupt.
207  *
208  ******************************************************************************/
209 
210 acpi_status acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt)
211 {
212 	acpi_status status;
213 	acpi_cpu_flags flags;
214 
215 	ACPI_FUNCTION_TRACE(ev_delete_gpe_xrupt);
216 
217 	/* We never want to remove the SCI interrupt handler */
218 
219 	if (gpe_xrupt->interrupt_number == acpi_gbl_FADT.sci_interrupt) {
220 		gpe_xrupt->gpe_block_list_head = NULL;
221 		return_ACPI_STATUS(AE_OK);
222 	}
223 
224 	/* Disable this interrupt */
225 
226 	status =
227 	    acpi_os_remove_interrupt_handler(gpe_xrupt->interrupt_number,
228 					     acpi_ev_gpe_xrupt_handler);
229 	if (ACPI_FAILURE(status)) {
230 		return_ACPI_STATUS(status);
231 	}
232 
233 	/* Unlink the interrupt block with lock */
234 
235 	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
236 	if (gpe_xrupt->previous) {
237 		gpe_xrupt->previous->next = gpe_xrupt->next;
238 	} else {
239 		/* No previous, update list head */
240 
241 		acpi_gbl_gpe_xrupt_list_head = gpe_xrupt->next;
242 	}
243 
244 	if (gpe_xrupt->next) {
245 		gpe_xrupt->next->previous = gpe_xrupt->previous;
246 	}
247 	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
248 
249 	/* Free the block */
250 
251 	ACPI_FREE(gpe_xrupt);
252 	return_ACPI_STATUS(AE_OK);
253 }
254 
255 /*******************************************************************************
256  *
257  * FUNCTION:    acpi_ev_delete_gpe_handlers
258  *
259  * PARAMETERS:  gpe_xrupt_info      - GPE Interrupt info
260  *              gpe_block           - Gpe Block info
261  *
262  * RETURN:      Status
263  *
264  * DESCRIPTION: Delete all Handler objects found in the GPE data structs.
265  *              Used only prior to termination.
266  *
267  ******************************************************************************/
268 
269 acpi_status
270 acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
271 			    struct acpi_gpe_block_info *gpe_block,
272 			    void *context)
273 {
274 	struct acpi_gpe_event_info *gpe_event_info;
275 	struct acpi_gpe_notify_info *notify;
276 	struct acpi_gpe_notify_info *next;
277 	u32 i;
278 	u32 j;
279 
280 	ACPI_FUNCTION_TRACE(ev_delete_gpe_handlers);
281 
282 	/* Examine each GPE Register within the block */
283 
284 	for (i = 0; i < gpe_block->register_count; i++) {
285 
286 		/* Now look at the individual GPEs in this byte register */
287 
288 		for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
289 			gpe_event_info = &gpe_block->event_info[((acpi_size)i *
290 								 ACPI_GPE_REGISTER_WIDTH)
291 								+ j];
292 
293 			if ((ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
294 			     ACPI_GPE_DISPATCH_HANDLER) ||
295 			    (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
296 			     ACPI_GPE_DISPATCH_RAW_HANDLER)) {
297 
298 				/* Delete an installed handler block */
299 
300 				ACPI_FREE(gpe_event_info->dispatch.handler);
301 				gpe_event_info->dispatch.handler = NULL;
302 				gpe_event_info->flags &=
303 				    ~ACPI_GPE_DISPATCH_MASK;
304 			} else if (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags)
305 				   == ACPI_GPE_DISPATCH_NOTIFY) {
306 
307 				/* Delete the implicit notification device list */
308 
309 				notify = gpe_event_info->dispatch.notify_list;
310 				while (notify) {
311 					next = notify->next;
312 					ACPI_FREE(notify);
313 					notify = next;
314 				}
315 
316 				gpe_event_info->dispatch.notify_list = NULL;
317 				gpe_event_info->flags &=
318 				    ~ACPI_GPE_DISPATCH_MASK;
319 			}
320 		}
321 	}
322 
323 	return_ACPI_STATUS(AE_OK);
324 }
325 
326 #endif				/* !ACPI_REDUCED_HARDWARE */
327