xref: /openbmc/linux/drivers/acpi/acpica/evgpeinit.c (revision 6726d552)
1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /******************************************************************************
3  *
4  * Module Name: evgpeinit - System GPE initialization and update
5  *
6  * Copyright (C) 2000 - 2022, Intel Corp.
7  *
8  *****************************************************************************/
9 
10 #include <acpi/acpi.h>
11 #include "accommon.h"
12 #include "acevents.h"
13 #include "acnamesp.h"
14 
15 #define _COMPONENT          ACPI_EVENTS
16 ACPI_MODULE_NAME("evgpeinit")
17 #if (!ACPI_REDUCED_HARDWARE)	/* Entire module */
18 /*
19  * Note: History of _PRW support in ACPICA
20  *
21  * Originally (2000 - 2010), the GPE initialization code performed a walk of
22  * the entire namespace to execute the _PRW methods and detect all GPEs
23  * capable of waking the system.
24  *
25  * As of 10/2010, the _PRW method execution has been removed since it is
26  * actually unnecessary. The host OS must in fact execute all _PRW methods
27  * in order to identify the device/power-resource dependencies. We now put
28  * the onus on the host OS to identify the wake GPEs as part of this process
29  * and to inform ACPICA of these GPEs via the acpi_setup_gpe_for_wake interface. This
30  * not only reduces the complexity of the ACPICA initialization code, but in
31  * some cases (on systems with very large namespaces) it should reduce the
32  * kernel boot time as well.
33  */
34 
35 #ifdef ACPI_GPE_USE_LOGICAL_ADDRESSES
36 #define ACPI_FADT_GPE_BLOCK_ADDRESS(N)	\
37 	acpi_gbl_FADT.xgpe##N##_block.space_id == \
38 					ACPI_ADR_SPACE_SYSTEM_MEMORY ? \
39 		(u64)acpi_gbl_xgpe##N##_block_logical_address : \
40 		acpi_gbl_FADT.xgpe##N##_block.address
41 #else
42 #define ACPI_FADT_GPE_BLOCK_ADDRESS(N)	acpi_gbl_FADT.xgpe##N##_block.address
43 #endif		/* ACPI_GPE_USE_LOGICAL_ADDRESSES */
44 
45 /*******************************************************************************
46  *
47  * FUNCTION:    acpi_ev_gpe_initialize
48  *
49  * PARAMETERS:  None
50  *
51  * RETURN:      Status
52  *
53  * DESCRIPTION: Initialize the GPE data structures and the FADT GPE 0/1 blocks
54  *
55  ******************************************************************************/
56 acpi_status acpi_ev_gpe_initialize(void)
57 {
58 	u32 register_count0 = 0;
59 	u32 register_count1 = 0;
60 	u32 gpe_number_max = 0;
61 	acpi_status status;
62 	u64 address;
63 
64 	ACPI_FUNCTION_TRACE(ev_gpe_initialize);
65 
66 	ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
67 			      "Initializing General Purpose Events (GPEs):\n"));
68 
69 	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
70 	if (ACPI_FAILURE(status)) {
71 		return_ACPI_STATUS(status);
72 	}
73 
74 	/*
75 	 * Initialize the GPE Block(s) defined in the FADT
76 	 *
77 	 * Why the GPE register block lengths are divided by 2:  From the ACPI
78 	 * Spec, section "General-Purpose Event Registers", we have:
79 	 *
80 	 * "Each register block contains two registers of equal length
81 	 *  GPEx_STS and GPEx_EN (where x is 0 or 1). The length of the
82 	 *  GPE0_STS and GPE0_EN registers is equal to half the GPE0_LEN
83 	 *  The length of the GPE1_STS and GPE1_EN registers is equal to
84 	 *  half the GPE1_LEN. If a generic register block is not supported
85 	 *  then its respective block pointer and block length values in the
86 	 *  FADT table contain zeros. The GPE0_LEN and GPE1_LEN do not need
87 	 *  to be the same size."
88 	 */
89 
90 	/*
91 	 * Determine the maximum GPE number for this machine.
92 	 *
93 	 * Note: both GPE0 and GPE1 are optional, and either can exist without
94 	 * the other.
95 	 *
96 	 * If EITHER the register length OR the block address are zero, then that
97 	 * particular block is not supported.
98 	 */
99 	address = ACPI_FADT_GPE_BLOCK_ADDRESS(0);
100 
101 	if (acpi_gbl_FADT.gpe0_block_length && address) {
102 
103 		/* GPE block 0 exists (has both length and address > 0) */
104 
105 		register_count0 = (u16)(acpi_gbl_FADT.gpe0_block_length / 2);
106 		gpe_number_max =
107 		    (register_count0 * ACPI_GPE_REGISTER_WIDTH) - 1;
108 
109 		/* Install GPE Block 0 */
110 
111 		status = acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device,
112 						  address,
113 						  acpi_gbl_FADT.xgpe0_block.
114 						  space_id, register_count0, 0,
115 						  acpi_gbl_FADT.sci_interrupt,
116 						  &acpi_gbl_gpe_fadt_blocks[0]);
117 
118 		if (ACPI_FAILURE(status)) {
119 			ACPI_EXCEPTION((AE_INFO, status,
120 					"Could not create GPE Block 0"));
121 		}
122 	}
123 
124 	address = ACPI_FADT_GPE_BLOCK_ADDRESS(1);
125 
126 	if (acpi_gbl_FADT.gpe1_block_length && address) {
127 
128 		/* GPE block 1 exists (has both length and address > 0) */
129 
130 		register_count1 = (u16)(acpi_gbl_FADT.gpe1_block_length / 2);
131 
132 		/* Check for GPE0/GPE1 overlap (if both banks exist) */
133 
134 		if ((register_count0) &&
135 		    (gpe_number_max >= acpi_gbl_FADT.gpe1_base)) {
136 			ACPI_ERROR((AE_INFO,
137 				    "GPE0 block (GPE 0 to %u) overlaps the GPE1 block "
138 				    "(GPE %u to %u) - Ignoring GPE1",
139 				    gpe_number_max, acpi_gbl_FADT.gpe1_base,
140 				    acpi_gbl_FADT.gpe1_base +
141 				    ((register_count1 *
142 				      ACPI_GPE_REGISTER_WIDTH) - 1)));
143 
144 			/* Ignore GPE1 block by setting the register count to zero */
145 
146 			register_count1 = 0;
147 		} else {
148 			/* Install GPE Block 1 */
149 
150 			status =
151 			    acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device,
152 						     address,
153 						     acpi_gbl_FADT.xgpe1_block.
154 						     space_id, register_count1,
155 						     acpi_gbl_FADT.gpe1_base,
156 						     acpi_gbl_FADT.
157 						     sci_interrupt,
158 						     &acpi_gbl_gpe_fadt_blocks
159 						     [1]);
160 
161 			if (ACPI_FAILURE(status)) {
162 				ACPI_EXCEPTION((AE_INFO, status,
163 						"Could not create GPE Block 1"));
164 			}
165 
166 			/*
167 			 * GPE0 and GPE1 do not have to be contiguous in the GPE number
168 			 * space. However, GPE0 always starts at GPE number zero.
169 			 */
170 		}
171 	}
172 
173 	/* Exit if there are no GPE registers */
174 
175 	if ((register_count0 + register_count1) == 0) {
176 
177 		/* GPEs are not required by ACPI, this is OK */
178 
179 		ACPI_DEBUG_PRINT((ACPI_DB_INIT,
180 				  "There are no GPE blocks defined in the FADT\n"));
181 		goto cleanup;
182 	}
183 
184 cleanup:
185 	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
186 	return_ACPI_STATUS(AE_OK);
187 }
188 
189 /*******************************************************************************
190  *
191  * FUNCTION:    acpi_ev_update_gpes
192  *
193  * PARAMETERS:  table_owner_id      - ID of the newly-loaded ACPI table
194  *
195  * RETURN:      None
196  *
197  * DESCRIPTION: Check for new GPE methods (_Lxx/_Exx) made available as a
198  *              result of a Load() or load_table() operation. If new GPE
199  *              methods have been installed, register the new methods.
200  *
201  ******************************************************************************/
202 
203 void acpi_ev_update_gpes(acpi_owner_id table_owner_id)
204 {
205 	struct acpi_gpe_xrupt_info *gpe_xrupt_info;
206 	struct acpi_gpe_block_info *gpe_block;
207 	struct acpi_gpe_walk_info walk_info;
208 	acpi_status status = AE_OK;
209 
210 	/*
211 	 * Find any _Lxx/_Exx GPE methods that have just been loaded.
212 	 *
213 	 * Any GPEs that correspond to new _Lxx/_Exx methods are immediately
214 	 * enabled.
215 	 *
216 	 * Examine the namespace underneath each gpe_device within the
217 	 * gpe_block lists.
218 	 */
219 	status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
220 	if (ACPI_FAILURE(status)) {
221 		return;
222 	}
223 
224 	walk_info.count = 0;
225 	walk_info.owner_id = table_owner_id;
226 	walk_info.execute_by_owner_id = TRUE;
227 
228 	/* Walk the interrupt level descriptor list */
229 
230 	gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head;
231 	while (gpe_xrupt_info) {
232 
233 		/* Walk all Gpe Blocks attached to this interrupt level */
234 
235 		gpe_block = gpe_xrupt_info->gpe_block_list_head;
236 		while (gpe_block) {
237 			walk_info.gpe_block = gpe_block;
238 			walk_info.gpe_device = gpe_block->node;
239 
240 			status = acpi_ns_walk_namespace(ACPI_TYPE_METHOD,
241 							walk_info.gpe_device,
242 							ACPI_UINT32_MAX,
243 							ACPI_NS_WALK_NO_UNLOCK,
244 							acpi_ev_match_gpe_method,
245 							NULL, &walk_info, NULL);
246 			if (ACPI_FAILURE(status)) {
247 				ACPI_EXCEPTION((AE_INFO, status,
248 						"While decoding _Lxx/_Exx methods"));
249 			}
250 
251 			gpe_block = gpe_block->next;
252 		}
253 
254 		gpe_xrupt_info = gpe_xrupt_info->next;
255 	}
256 
257 	if (walk_info.count) {
258 		ACPI_INFO(("Enabled %u new GPEs", walk_info.count));
259 	}
260 
261 	(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
262 	return;
263 }
264 
265 /*******************************************************************************
266  *
267  * FUNCTION:    acpi_ev_match_gpe_method
268  *
269  * PARAMETERS:  Callback from walk_namespace
270  *
271  * RETURN:      Status
272  *
273  * DESCRIPTION: Called from acpi_walk_namespace. Expects each object to be a
274  *              control method under the _GPE portion of the namespace.
275  *              Extract the name and GPE type from the object, saving this
276  *              information for quick lookup during GPE dispatch. Allows a
277  *              per-owner_id evaluation if execute_by_owner_id is TRUE in the
278  *              walk_info parameter block.
279  *
280  *              The name of each GPE control method is of the form:
281  *              "_Lxx" or "_Exx", where:
282  *                  L      - means that the GPE is level triggered
283  *                  E      - means that the GPE is edge triggered
284  *                  xx     - is the GPE number [in HEX]
285  *
286  * If walk_info->execute_by_owner_id is TRUE, we only execute examine GPE methods
287  * with that owner.
288  *
289  ******************************************************************************/
290 
291 acpi_status
292 acpi_ev_match_gpe_method(acpi_handle obj_handle,
293 			 u32 level, void *context, void **return_value)
294 {
295 	struct acpi_namespace_node *method_node =
296 	    ACPI_CAST_PTR(struct acpi_namespace_node, obj_handle);
297 	struct acpi_gpe_walk_info *walk_info =
298 	    ACPI_CAST_PTR(struct acpi_gpe_walk_info, context);
299 	struct acpi_gpe_event_info *gpe_event_info;
300 	acpi_status status;
301 	u32 gpe_number;
302 	u8 temp_gpe_number;
303 	char name[ACPI_NAMESEG_SIZE + 1];
304 	u8 type;
305 
306 	ACPI_FUNCTION_TRACE(ev_match_gpe_method);
307 
308 	/* Check if requested owner_id matches this owner_id */
309 
310 	if ((walk_info->execute_by_owner_id) &&
311 	    (method_node->owner_id != walk_info->owner_id)) {
312 		return_ACPI_STATUS(AE_OK);
313 	}
314 
315 	/*
316 	 * Match and decode the _Lxx and _Exx GPE method names
317 	 *
318 	 * 1) Extract the method name and null terminate it
319 	 */
320 	ACPI_MOVE_32_TO_32(name, &method_node->name.integer);
321 	name[ACPI_NAMESEG_SIZE] = 0;
322 
323 	/* 2) Name must begin with an underscore */
324 
325 	if (name[0] != '_') {
326 		return_ACPI_STATUS(AE_OK);	/* Ignore this method */
327 	}
328 
329 	/*
330 	 * 3) Edge/Level determination is based on the 2nd character
331 	 *    of the method name
332 	 */
333 	switch (name[1]) {
334 	case 'L':
335 
336 		type = ACPI_GPE_LEVEL_TRIGGERED;
337 		break;
338 
339 	case 'E':
340 
341 		type = ACPI_GPE_EDGE_TRIGGERED;
342 		break;
343 
344 	default:
345 
346 		/* Unknown method type, just ignore it */
347 
348 		ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
349 				  "Ignoring unknown GPE method type: %s "
350 				  "(name not of form _Lxx or _Exx)", name));
351 		return_ACPI_STATUS(AE_OK);
352 	}
353 
354 	/* 4) The last two characters of the name are the hex GPE Number */
355 
356 	status = acpi_ut_ascii_to_hex_byte(&name[2], &temp_gpe_number);
357 	if (ACPI_FAILURE(status)) {
358 
359 		/* Conversion failed; invalid method, just ignore it */
360 
361 		ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
362 				  "Could not extract GPE number from name: %s "
363 				  "(name is not of form _Lxx or _Exx)", name));
364 		return_ACPI_STATUS(AE_OK);
365 	}
366 
367 	/* Ensure that we have a valid GPE number for this GPE block */
368 
369 	gpe_number = (u32)temp_gpe_number;
370 	gpe_event_info =
371 	    acpi_ev_low_get_gpe_info(gpe_number, walk_info->gpe_block);
372 	if (!gpe_event_info) {
373 		/*
374 		 * This gpe_number is not valid for this GPE block, just ignore it.
375 		 * However, it may be valid for a different GPE block, since GPE0
376 		 * and GPE1 methods both appear under \_GPE.
377 		 */
378 		return_ACPI_STATUS(AE_OK);
379 	}
380 
381 	if ((ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
382 	     ACPI_GPE_DISPATCH_HANDLER) ||
383 	    (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
384 	     ACPI_GPE_DISPATCH_RAW_HANDLER)) {
385 
386 		/* If there is already a handler, ignore this GPE method */
387 
388 		return_ACPI_STATUS(AE_OK);
389 	}
390 
391 	if (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
392 	    ACPI_GPE_DISPATCH_METHOD) {
393 		/*
394 		 * If there is already a method, ignore this method. But check
395 		 * for a type mismatch (if both the _Lxx AND _Exx exist)
396 		 */
397 		if (type != (gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK)) {
398 			ACPI_ERROR((AE_INFO,
399 				    "For GPE 0x%.2X, found both _L%2.2X and _E%2.2X methods",
400 				    gpe_number, gpe_number, gpe_number));
401 		}
402 		return_ACPI_STATUS(AE_OK);
403 	}
404 
405 	/* Disable the GPE in case it's been enabled already. */
406 
407 	(void)acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE);
408 
409 	/*
410 	 * Add the GPE information from above to the gpe_event_info block for
411 	 * use during dispatch of this GPE.
412 	 */
413 	gpe_event_info->flags &= ~(ACPI_GPE_DISPATCH_MASK);
414 	gpe_event_info->flags |= (u8)(type | ACPI_GPE_DISPATCH_METHOD);
415 	gpe_event_info->dispatch.method_node = method_node;
416 
417 	ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
418 			  "Registered GPE method %s as GPE number 0x%.2X\n",
419 			  name, gpe_number));
420 	return_ACPI_STATUS(AE_OK);
421 }
422 
423 #endif				/* !ACPI_REDUCED_HARDWARE */
424