xref: /openbmc/linux/drivers/acpi/apei/apei-base.c (revision 293d5b43)
1 /*
2  * apei-base.c - ACPI Platform Error Interface (APEI) supporting
3  * infrastructure
4  *
5  * APEI allows to report errors (for example from the chipset) to the
6  * the operating system. This improves NMI handling especially. In
7  * addition it supports error serialization and error injection.
8  *
9  * For more information about APEI, please refer to ACPI Specification
10  * version 4.0, chapter 17.
11  *
12  * This file has Common functions used by more than one APEI table,
13  * including framework of interpreter for ERST and EINJ; resource
14  * management for APEI registers.
15  *
16  * Copyright (C) 2009, Intel Corp.
17  *	Author: Huang Ying <ying.huang@intel.com>
18  *
19  * This program is free software; you can redistribute it and/or
20  * modify it under the terms of the GNU General Public License version
21  * 2 as published by the Free Software Foundation.
22  *
23  * This program is distributed in the hope that it will be useful,
24  * but WITHOUT ANY WARRANTY; without even the implied warranty of
25  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
26  * GNU General Public License for more details.
27  */
28 
29 #include <linux/kernel.h>
30 #include <linux/module.h>
31 #include <linux/init.h>
32 #include <linux/acpi.h>
33 #include <linux/slab.h>
34 #include <linux/io.h>
35 #include <linux/kref.h>
36 #include <linux/rculist.h>
37 #include <linux/interrupt.h>
38 #include <linux/debugfs.h>
39 #include <asm/unaligned.h>
40 
41 #include "apei-internal.h"
42 
43 #define APEI_PFX "APEI: "
44 
45 /*
46  * APEI ERST (Error Record Serialization Table) and EINJ (Error
47  * INJection) interpreter framework.
48  */
49 
50 #define APEI_EXEC_PRESERVE_REGISTER	0x1
51 
52 void apei_exec_ctx_init(struct apei_exec_context *ctx,
53 			struct apei_exec_ins_type *ins_table,
54 			u32 instructions,
55 			struct acpi_whea_header *action_table,
56 			u32 entries)
57 {
58 	ctx->ins_table = ins_table;
59 	ctx->instructions = instructions;
60 	ctx->action_table = action_table;
61 	ctx->entries = entries;
62 }
63 EXPORT_SYMBOL_GPL(apei_exec_ctx_init);
64 
65 int __apei_exec_read_register(struct acpi_whea_header *entry, u64 *val)
66 {
67 	int rc;
68 
69 	rc = apei_read(val, &entry->register_region);
70 	if (rc)
71 		return rc;
72 	*val >>= entry->register_region.bit_offset;
73 	*val &= entry->mask;
74 
75 	return 0;
76 }
77 
78 int apei_exec_read_register(struct apei_exec_context *ctx,
79 			    struct acpi_whea_header *entry)
80 {
81 	int rc;
82 	u64 val = 0;
83 
84 	rc = __apei_exec_read_register(entry, &val);
85 	if (rc)
86 		return rc;
87 	ctx->value = val;
88 
89 	return 0;
90 }
91 EXPORT_SYMBOL_GPL(apei_exec_read_register);
92 
93 int apei_exec_read_register_value(struct apei_exec_context *ctx,
94 				  struct acpi_whea_header *entry)
95 {
96 	int rc;
97 
98 	rc = apei_exec_read_register(ctx, entry);
99 	if (rc)
100 		return rc;
101 	ctx->value = (ctx->value == entry->value);
102 
103 	return 0;
104 }
105 EXPORT_SYMBOL_GPL(apei_exec_read_register_value);
106 
107 int __apei_exec_write_register(struct acpi_whea_header *entry, u64 val)
108 {
109 	int rc;
110 
111 	val &= entry->mask;
112 	val <<= entry->register_region.bit_offset;
113 	if (entry->flags & APEI_EXEC_PRESERVE_REGISTER) {
114 		u64 valr = 0;
115 		rc = apei_read(&valr, &entry->register_region);
116 		if (rc)
117 			return rc;
118 		valr &= ~(entry->mask << entry->register_region.bit_offset);
119 		val |= valr;
120 	}
121 	rc = apei_write(val, &entry->register_region);
122 
123 	return rc;
124 }
125 
126 int apei_exec_write_register(struct apei_exec_context *ctx,
127 			     struct acpi_whea_header *entry)
128 {
129 	return __apei_exec_write_register(entry, ctx->value);
130 }
131 EXPORT_SYMBOL_GPL(apei_exec_write_register);
132 
133 int apei_exec_write_register_value(struct apei_exec_context *ctx,
134 				   struct acpi_whea_header *entry)
135 {
136 	int rc;
137 
138 	ctx->value = entry->value;
139 	rc = apei_exec_write_register(ctx, entry);
140 
141 	return rc;
142 }
143 EXPORT_SYMBOL_GPL(apei_exec_write_register_value);
144 
145 int apei_exec_noop(struct apei_exec_context *ctx,
146 		   struct acpi_whea_header *entry)
147 {
148 	return 0;
149 }
150 EXPORT_SYMBOL_GPL(apei_exec_noop);
151 
152 /*
153  * Interpret the specified action. Go through whole action table,
154  * execute all instructions belong to the action.
155  */
156 int __apei_exec_run(struct apei_exec_context *ctx, u8 action,
157 		    bool optional)
158 {
159 	int rc = -ENOENT;
160 	u32 i, ip;
161 	struct acpi_whea_header *entry;
162 	apei_exec_ins_func_t run;
163 
164 	ctx->ip = 0;
165 
166 	/*
167 	 * "ip" is the instruction pointer of current instruction,
168 	 * "ctx->ip" specifies the next instruction to executed,
169 	 * instruction "run" function may change the "ctx->ip" to
170 	 * implement "goto" semantics.
171 	 */
172 rewind:
173 	ip = 0;
174 	for (i = 0; i < ctx->entries; i++) {
175 		entry = &ctx->action_table[i];
176 		if (entry->action != action)
177 			continue;
178 		if (ip == ctx->ip) {
179 			if (entry->instruction >= ctx->instructions ||
180 			    !ctx->ins_table[entry->instruction].run) {
181 				pr_warning(FW_WARN APEI_PFX
182 			"Invalid action table, unknown instruction type: %d\n",
183 					   entry->instruction);
184 				return -EINVAL;
185 			}
186 			run = ctx->ins_table[entry->instruction].run;
187 			rc = run(ctx, entry);
188 			if (rc < 0)
189 				return rc;
190 			else if (rc != APEI_EXEC_SET_IP)
191 				ctx->ip++;
192 		}
193 		ip++;
194 		if (ctx->ip < ip)
195 			goto rewind;
196 	}
197 
198 	return !optional && rc < 0 ? rc : 0;
199 }
200 EXPORT_SYMBOL_GPL(__apei_exec_run);
201 
202 typedef int (*apei_exec_entry_func_t)(struct apei_exec_context *ctx,
203 				      struct acpi_whea_header *entry,
204 				      void *data);
205 
206 static int apei_exec_for_each_entry(struct apei_exec_context *ctx,
207 				    apei_exec_entry_func_t func,
208 				    void *data,
209 				    int *end)
210 {
211 	u8 ins;
212 	int i, rc;
213 	struct acpi_whea_header *entry;
214 	struct apei_exec_ins_type *ins_table = ctx->ins_table;
215 
216 	for (i = 0; i < ctx->entries; i++) {
217 		entry = ctx->action_table + i;
218 		ins = entry->instruction;
219 		if (end)
220 			*end = i;
221 		if (ins >= ctx->instructions || !ins_table[ins].run) {
222 			pr_warning(FW_WARN APEI_PFX
223 			"Invalid action table, unknown instruction type: %d\n",
224 				   ins);
225 			return -EINVAL;
226 		}
227 		rc = func(ctx, entry, data);
228 		if (rc)
229 			return rc;
230 	}
231 
232 	return 0;
233 }
234 
235 static int pre_map_gar_callback(struct apei_exec_context *ctx,
236 				struct acpi_whea_header *entry,
237 				void *data)
238 {
239 	u8 ins = entry->instruction;
240 
241 	if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER)
242 		return apei_map_generic_address(&entry->register_region);
243 
244 	return 0;
245 }
246 
247 /*
248  * Pre-map all GARs in action table to make it possible to access them
249  * in NMI handler.
250  */
251 int apei_exec_pre_map_gars(struct apei_exec_context *ctx)
252 {
253 	int rc, end;
254 
255 	rc = apei_exec_for_each_entry(ctx, pre_map_gar_callback,
256 				      NULL, &end);
257 	if (rc) {
258 		struct apei_exec_context ctx_unmap;
259 		memcpy(&ctx_unmap, ctx, sizeof(*ctx));
260 		ctx_unmap.entries = end;
261 		apei_exec_post_unmap_gars(&ctx_unmap);
262 	}
263 
264 	return rc;
265 }
266 EXPORT_SYMBOL_GPL(apei_exec_pre_map_gars);
267 
268 static int post_unmap_gar_callback(struct apei_exec_context *ctx,
269 				   struct acpi_whea_header *entry,
270 				   void *data)
271 {
272 	u8 ins = entry->instruction;
273 
274 	if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER)
275 		apei_unmap_generic_address(&entry->register_region);
276 
277 	return 0;
278 }
279 
280 /* Post-unmap all GAR in action table. */
281 int apei_exec_post_unmap_gars(struct apei_exec_context *ctx)
282 {
283 	return apei_exec_for_each_entry(ctx, post_unmap_gar_callback,
284 					NULL, NULL);
285 }
286 EXPORT_SYMBOL_GPL(apei_exec_post_unmap_gars);
287 
288 /*
289  * Resource management for GARs in APEI
290  */
291 struct apei_res {
292 	struct list_head list;
293 	unsigned long start;
294 	unsigned long end;
295 };
296 
297 /* Collect all resources requested, to avoid conflict */
298 struct apei_resources apei_resources_all = {
299 	.iomem = LIST_HEAD_INIT(apei_resources_all.iomem),
300 	.ioport = LIST_HEAD_INIT(apei_resources_all.ioport),
301 };
302 
303 static int apei_res_add(struct list_head *res_list,
304 			unsigned long start, unsigned long size)
305 {
306 	struct apei_res *res, *resn, *res_ins = NULL;
307 	unsigned long end = start + size;
308 
309 	if (end <= start)
310 		return 0;
311 repeat:
312 	list_for_each_entry_safe(res, resn, res_list, list) {
313 		if (res->start > end || res->end < start)
314 			continue;
315 		else if (end <= res->end && start >= res->start) {
316 			kfree(res_ins);
317 			return 0;
318 		}
319 		list_del(&res->list);
320 		res->start = start = min(res->start, start);
321 		res->end = end = max(res->end, end);
322 		kfree(res_ins);
323 		res_ins = res;
324 		goto repeat;
325 	}
326 
327 	if (res_ins)
328 		list_add(&res_ins->list, res_list);
329 	else {
330 		res_ins = kmalloc(sizeof(*res), GFP_KERNEL);
331 		if (!res_ins)
332 			return -ENOMEM;
333 		res_ins->start = start;
334 		res_ins->end = end;
335 		list_add(&res_ins->list, res_list);
336 	}
337 
338 	return 0;
339 }
340 
341 static int apei_res_sub(struct list_head *res_list1,
342 			struct list_head *res_list2)
343 {
344 	struct apei_res *res1, *resn1, *res2, *res;
345 	res1 = list_entry(res_list1->next, struct apei_res, list);
346 	resn1 = list_entry(res1->list.next, struct apei_res, list);
347 	while (&res1->list != res_list1) {
348 		list_for_each_entry(res2, res_list2, list) {
349 			if (res1->start >= res2->end ||
350 			    res1->end <= res2->start)
351 				continue;
352 			else if (res1->end <= res2->end &&
353 				 res1->start >= res2->start) {
354 				list_del(&res1->list);
355 				kfree(res1);
356 				break;
357 			} else if (res1->end > res2->end &&
358 				   res1->start < res2->start) {
359 				res = kmalloc(sizeof(*res), GFP_KERNEL);
360 				if (!res)
361 					return -ENOMEM;
362 				res->start = res2->end;
363 				res->end = res1->end;
364 				res1->end = res2->start;
365 				list_add(&res->list, &res1->list);
366 				resn1 = res;
367 			} else {
368 				if (res1->start < res2->start)
369 					res1->end = res2->start;
370 				else
371 					res1->start = res2->end;
372 			}
373 		}
374 		res1 = resn1;
375 		resn1 = list_entry(resn1->list.next, struct apei_res, list);
376 	}
377 
378 	return 0;
379 }
380 
381 static void apei_res_clean(struct list_head *res_list)
382 {
383 	struct apei_res *res, *resn;
384 
385 	list_for_each_entry_safe(res, resn, res_list, list) {
386 		list_del(&res->list);
387 		kfree(res);
388 	}
389 }
390 
391 void apei_resources_fini(struct apei_resources *resources)
392 {
393 	apei_res_clean(&resources->iomem);
394 	apei_res_clean(&resources->ioport);
395 }
396 EXPORT_SYMBOL_GPL(apei_resources_fini);
397 
398 static int apei_resources_merge(struct apei_resources *resources1,
399 				struct apei_resources *resources2)
400 {
401 	int rc;
402 	struct apei_res *res;
403 
404 	list_for_each_entry(res, &resources2->iomem, list) {
405 		rc = apei_res_add(&resources1->iomem, res->start,
406 				  res->end - res->start);
407 		if (rc)
408 			return rc;
409 	}
410 	list_for_each_entry(res, &resources2->ioport, list) {
411 		rc = apei_res_add(&resources1->ioport, res->start,
412 				  res->end - res->start);
413 		if (rc)
414 			return rc;
415 	}
416 
417 	return 0;
418 }
419 
420 int apei_resources_add(struct apei_resources *resources,
421 		       unsigned long start, unsigned long size,
422 		       bool iomem)
423 {
424 	if (iomem)
425 		return apei_res_add(&resources->iomem, start, size);
426 	else
427 		return apei_res_add(&resources->ioport, start, size);
428 }
429 EXPORT_SYMBOL_GPL(apei_resources_add);
430 
431 /*
432  * EINJ has two groups of GARs (EINJ table entry and trigger table
433  * entry), so common resources are subtracted from the trigger table
434  * resources before the second requesting.
435  */
436 int apei_resources_sub(struct apei_resources *resources1,
437 		       struct apei_resources *resources2)
438 {
439 	int rc;
440 
441 	rc = apei_res_sub(&resources1->iomem, &resources2->iomem);
442 	if (rc)
443 		return rc;
444 	return apei_res_sub(&resources1->ioport, &resources2->ioport);
445 }
446 EXPORT_SYMBOL_GPL(apei_resources_sub);
447 
448 static int apei_get_res_callback(__u64 start, __u64 size, void *data)
449 {
450 	struct apei_resources *resources = data;
451 	return apei_res_add(&resources->iomem, start, size);
452 }
453 
454 static int apei_get_nvs_resources(struct apei_resources *resources)
455 {
456 	return acpi_nvs_for_each_region(apei_get_res_callback, resources);
457 }
458 
459 int (*arch_apei_filter_addr)(int (*func)(__u64 start, __u64 size,
460 				     void *data), void *data);
461 static int apei_get_arch_resources(struct apei_resources *resources)
462 
463 {
464 	return arch_apei_filter_addr(apei_get_res_callback, resources);
465 }
466 
467 /*
468  * IO memory/port resource management mechanism is used to check
469  * whether memory/port area used by GARs conflicts with normal memory
470  * or IO memory/port of devices.
471  */
472 int apei_resources_request(struct apei_resources *resources,
473 			   const char *desc)
474 {
475 	struct apei_res *res, *res_bak = NULL;
476 	struct resource *r;
477 	struct apei_resources nvs_resources, arch_res;
478 	int rc;
479 
480 	rc = apei_resources_sub(resources, &apei_resources_all);
481 	if (rc)
482 		return rc;
483 
484 	/*
485 	 * Some firmware uses ACPI NVS region, that has been marked as
486 	 * busy, so exclude it from APEI resources to avoid false
487 	 * conflict.
488 	 */
489 	apei_resources_init(&nvs_resources);
490 	rc = apei_get_nvs_resources(&nvs_resources);
491 	if (rc)
492 		goto nvs_res_fini;
493 	rc = apei_resources_sub(resources, &nvs_resources);
494 	if (rc)
495 		goto nvs_res_fini;
496 
497 	if (arch_apei_filter_addr) {
498 		apei_resources_init(&arch_res);
499 		rc = apei_get_arch_resources(&arch_res);
500 		if (rc)
501 			goto arch_res_fini;
502 		rc = apei_resources_sub(resources, &arch_res);
503 		if (rc)
504 			goto arch_res_fini;
505 	}
506 
507 	rc = -EINVAL;
508 	list_for_each_entry(res, &resources->iomem, list) {
509 		r = request_mem_region(res->start, res->end - res->start,
510 				       desc);
511 		if (!r) {
512 			pr_err(APEI_PFX
513 		"Can not request [mem %#010llx-%#010llx] for %s registers\n",
514 			       (unsigned long long)res->start,
515 			       (unsigned long long)res->end - 1, desc);
516 			res_bak = res;
517 			goto err_unmap_iomem;
518 		}
519 	}
520 
521 	list_for_each_entry(res, &resources->ioport, list) {
522 		r = request_region(res->start, res->end - res->start, desc);
523 		if (!r) {
524 			pr_err(APEI_PFX
525 		"Can not request [io  %#06llx-%#06llx] for %s registers\n",
526 			       (unsigned long long)res->start,
527 			       (unsigned long long)res->end - 1, desc);
528 			res_bak = res;
529 			goto err_unmap_ioport;
530 		}
531 	}
532 
533 	rc = apei_resources_merge(&apei_resources_all, resources);
534 	if (rc) {
535 		pr_err(APEI_PFX "Fail to merge resources!\n");
536 		goto err_unmap_ioport;
537 	}
538 
539 	goto arch_res_fini;
540 
541 err_unmap_ioport:
542 	list_for_each_entry(res, &resources->ioport, list) {
543 		if (res == res_bak)
544 			break;
545 		release_region(res->start, res->end - res->start);
546 	}
547 	res_bak = NULL;
548 err_unmap_iomem:
549 	list_for_each_entry(res, &resources->iomem, list) {
550 		if (res == res_bak)
551 			break;
552 		release_mem_region(res->start, res->end - res->start);
553 	}
554 arch_res_fini:
555 	if (arch_apei_filter_addr)
556 		apei_resources_fini(&arch_res);
557 nvs_res_fini:
558 	apei_resources_fini(&nvs_resources);
559 	return rc;
560 }
561 EXPORT_SYMBOL_GPL(apei_resources_request);
562 
563 void apei_resources_release(struct apei_resources *resources)
564 {
565 	int rc;
566 	struct apei_res *res;
567 
568 	list_for_each_entry(res, &resources->iomem, list)
569 		release_mem_region(res->start, res->end - res->start);
570 	list_for_each_entry(res, &resources->ioport, list)
571 		release_region(res->start, res->end - res->start);
572 
573 	rc = apei_resources_sub(&apei_resources_all, resources);
574 	if (rc)
575 		pr_err(APEI_PFX "Fail to sub resources!\n");
576 }
577 EXPORT_SYMBOL_GPL(apei_resources_release);
578 
579 static int apei_check_gar(struct acpi_generic_address *reg, u64 *paddr,
580 				u32 *access_bit_width)
581 {
582 	u32 bit_width, bit_offset, access_size_code, space_id;
583 
584 	bit_width = reg->bit_width;
585 	bit_offset = reg->bit_offset;
586 	access_size_code = reg->access_width;
587 	space_id = reg->space_id;
588 	*paddr = get_unaligned(&reg->address);
589 	if (!*paddr) {
590 		pr_warning(FW_BUG APEI_PFX
591 			   "Invalid physical address in GAR [0x%llx/%u/%u/%u/%u]\n",
592 			   *paddr, bit_width, bit_offset, access_size_code,
593 			   space_id);
594 		return -EINVAL;
595 	}
596 
597 	if (access_size_code < 1 || access_size_code > 4) {
598 		pr_warning(FW_BUG APEI_PFX
599 			   "Invalid access size code in GAR [0x%llx/%u/%u/%u/%u]\n",
600 			   *paddr, bit_width, bit_offset, access_size_code,
601 			   space_id);
602 		return -EINVAL;
603 	}
604 	*access_bit_width = 1UL << (access_size_code + 2);
605 
606 	/* Fixup common BIOS bug */
607 	if (bit_width == 32 && bit_offset == 0 && (*paddr & 0x03) == 0 &&
608 	    *access_bit_width < 32)
609 		*access_bit_width = 32;
610 	else if (bit_width == 64 && bit_offset == 0 && (*paddr & 0x07) == 0 &&
611 	    *access_bit_width < 64)
612 		*access_bit_width = 64;
613 
614 	if ((bit_width + bit_offset) > *access_bit_width) {
615 		pr_warning(FW_BUG APEI_PFX
616 			   "Invalid bit width + offset in GAR [0x%llx/%u/%u/%u/%u]\n",
617 			   *paddr, bit_width, bit_offset, access_size_code,
618 			   space_id);
619 		return -EINVAL;
620 	}
621 
622 	if (space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY &&
623 	    space_id != ACPI_ADR_SPACE_SYSTEM_IO) {
624 		pr_warning(FW_BUG APEI_PFX
625 			   "Invalid address space type in GAR [0x%llx/%u/%u/%u/%u]\n",
626 			   *paddr, bit_width, bit_offset, access_size_code,
627 			   space_id);
628 		return -EINVAL;
629 	}
630 
631 	return 0;
632 }
633 
634 int apei_map_generic_address(struct acpi_generic_address *reg)
635 {
636 	int rc;
637 	u32 access_bit_width;
638 	u64 address;
639 
640 	rc = apei_check_gar(reg, &address, &access_bit_width);
641 	if (rc)
642 		return rc;
643 	return acpi_os_map_generic_address(reg);
644 }
645 EXPORT_SYMBOL_GPL(apei_map_generic_address);
646 
647 /* read GAR in interrupt (including NMI) or process context */
648 int apei_read(u64 *val, struct acpi_generic_address *reg)
649 {
650 	int rc;
651 	u32 access_bit_width;
652 	u64 address;
653 	acpi_status status;
654 
655 	rc = apei_check_gar(reg, &address, &access_bit_width);
656 	if (rc)
657 		return rc;
658 
659 	*val = 0;
660 	switch(reg->space_id) {
661 	case ACPI_ADR_SPACE_SYSTEM_MEMORY:
662 		status = acpi_os_read_memory((acpi_physical_address) address,
663 					       val, access_bit_width);
664 		if (ACPI_FAILURE(status))
665 			return -EIO;
666 		break;
667 	case ACPI_ADR_SPACE_SYSTEM_IO:
668 		status = acpi_os_read_port(address, (u32 *)val,
669 					   access_bit_width);
670 		if (ACPI_FAILURE(status))
671 			return -EIO;
672 		break;
673 	default:
674 		return -EINVAL;
675 	}
676 
677 	return 0;
678 }
679 EXPORT_SYMBOL_GPL(apei_read);
680 
681 /* write GAR in interrupt (including NMI) or process context */
682 int apei_write(u64 val, struct acpi_generic_address *reg)
683 {
684 	int rc;
685 	u32 access_bit_width;
686 	u64 address;
687 	acpi_status status;
688 
689 	rc = apei_check_gar(reg, &address, &access_bit_width);
690 	if (rc)
691 		return rc;
692 
693 	switch (reg->space_id) {
694 	case ACPI_ADR_SPACE_SYSTEM_MEMORY:
695 		status = acpi_os_write_memory((acpi_physical_address) address,
696 						val, access_bit_width);
697 		if (ACPI_FAILURE(status))
698 			return -EIO;
699 		break;
700 	case ACPI_ADR_SPACE_SYSTEM_IO:
701 		status = acpi_os_write_port(address, val, access_bit_width);
702 		if (ACPI_FAILURE(status))
703 			return -EIO;
704 		break;
705 	default:
706 		return -EINVAL;
707 	}
708 
709 	return 0;
710 }
711 EXPORT_SYMBOL_GPL(apei_write);
712 
713 static int collect_res_callback(struct apei_exec_context *ctx,
714 				struct acpi_whea_header *entry,
715 				void *data)
716 {
717 	struct apei_resources *resources = data;
718 	struct acpi_generic_address *reg = &entry->register_region;
719 	u8 ins = entry->instruction;
720 	u32 access_bit_width;
721 	u64 paddr;
722 	int rc;
723 
724 	if (!(ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER))
725 		return 0;
726 
727 	rc = apei_check_gar(reg, &paddr, &access_bit_width);
728 	if (rc)
729 		return rc;
730 
731 	switch (reg->space_id) {
732 	case ACPI_ADR_SPACE_SYSTEM_MEMORY:
733 		return apei_res_add(&resources->iomem, paddr,
734 				    access_bit_width / 8);
735 	case ACPI_ADR_SPACE_SYSTEM_IO:
736 		return apei_res_add(&resources->ioport, paddr,
737 				    access_bit_width / 8);
738 	default:
739 		return -EINVAL;
740 	}
741 }
742 
743 /*
744  * Same register may be used by multiple instructions in GARs, so
745  * resources are collected before requesting.
746  */
747 int apei_exec_collect_resources(struct apei_exec_context *ctx,
748 				struct apei_resources *resources)
749 {
750 	return apei_exec_for_each_entry(ctx, collect_res_callback,
751 					resources, NULL);
752 }
753 EXPORT_SYMBOL_GPL(apei_exec_collect_resources);
754 
755 struct dentry *apei_get_debugfs_dir(void)
756 {
757 	static struct dentry *dapei;
758 
759 	if (!dapei)
760 		dapei = debugfs_create_dir("apei", NULL);
761 
762 	return dapei;
763 }
764 EXPORT_SYMBOL_GPL(apei_get_debugfs_dir);
765 
766 int __weak arch_apei_enable_cmcff(struct acpi_hest_header *hest_hdr,
767 				  void *data)
768 {
769 	return 1;
770 }
771 EXPORT_SYMBOL_GPL(arch_apei_enable_cmcff);
772 
773 void __weak arch_apei_report_mem_error(int sev,
774 				       struct cper_sec_mem_err *mem_err)
775 {
776 }
777 EXPORT_SYMBOL_GPL(arch_apei_report_mem_error);
778 
779 int apei_osc_setup(void)
780 {
781 	static u8 whea_uuid_str[] = "ed855e0c-6c90-47bf-a62a-26de0fc5ad5c";
782 	acpi_handle handle;
783 	u32 capbuf[3];
784 	struct acpi_osc_context context = {
785 		.uuid_str	= whea_uuid_str,
786 		.rev		= 1,
787 		.cap.length	= sizeof(capbuf),
788 		.cap.pointer	= capbuf,
789 	};
790 
791 	capbuf[OSC_QUERY_DWORD] = OSC_QUERY_ENABLE;
792 	capbuf[OSC_SUPPORT_DWORD] = 1;
793 	capbuf[OSC_CONTROL_DWORD] = 0;
794 
795 	if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle))
796 	    || ACPI_FAILURE(acpi_run_osc(handle, &context)))
797 		return -EIO;
798 	else {
799 		kfree(context.ret.pointer);
800 		return 0;
801 	}
802 }
803 EXPORT_SYMBOL_GPL(apei_osc_setup);
804