1 /* 2 * apei-base.c - ACPI Platform Error Interface (APEI) supporting 3 * infrastructure 4 * 5 * APEI allows to report errors (for example from the chipset) to the 6 * the operating system. This improves NMI handling especially. In 7 * addition it supports error serialization and error injection. 8 * 9 * For more information about APEI, please refer to ACPI Specification 10 * version 4.0, chapter 17. 11 * 12 * This file has Common functions used by more than one APEI table, 13 * including framework of interpreter for ERST and EINJ; resource 14 * management for APEI registers. 15 * 16 * Copyright (C) 2009, Intel Corp. 17 * Author: Huang Ying <ying.huang@intel.com> 18 * 19 * This program is free software; you can redistribute it and/or 20 * modify it under the terms of the GNU General Public License version 21 * 2 as published by the Free Software Foundation. 22 * 23 * This program is distributed in the hope that it will be useful, 24 * but WITHOUT ANY WARRANTY; without even the implied warranty of 25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 26 * GNU General Public License for more details. 27 * 28 * You should have received a copy of the GNU General Public License 29 * along with this program; if not, write to the Free Software 30 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 31 */ 32 33 #include <linux/kernel.h> 34 #include <linux/module.h> 35 #include <linux/init.h> 36 #include <linux/acpi.h> 37 #include <linux/acpi_io.h> 38 #include <linux/slab.h> 39 #include <linux/io.h> 40 #include <linux/kref.h> 41 #include <linux/rculist.h> 42 #include <linux/interrupt.h> 43 #include <linux/debugfs.h> 44 45 #include "apei-internal.h" 46 47 #define APEI_PFX "APEI: " 48 49 /* 50 * APEI ERST (Error Record Serialization Table) and EINJ (Error 51 * INJection) interpreter framework. 52 */ 53 54 #define APEI_EXEC_PRESERVE_REGISTER 0x1 55 56 void apei_exec_ctx_init(struct apei_exec_context *ctx, 57 struct apei_exec_ins_type *ins_table, 58 u32 instructions, 59 struct acpi_whea_header *action_table, 60 u32 entries) 61 { 62 ctx->ins_table = ins_table; 63 ctx->instructions = instructions; 64 ctx->action_table = action_table; 65 ctx->entries = entries; 66 } 67 EXPORT_SYMBOL_GPL(apei_exec_ctx_init); 68 69 int __apei_exec_read_register(struct acpi_whea_header *entry, u64 *val) 70 { 71 int rc; 72 73 rc = apei_read(val, &entry->register_region); 74 if (rc) 75 return rc; 76 *val >>= entry->register_region.bit_offset; 77 *val &= entry->mask; 78 79 return 0; 80 } 81 82 int apei_exec_read_register(struct apei_exec_context *ctx, 83 struct acpi_whea_header *entry) 84 { 85 int rc; 86 u64 val = 0; 87 88 rc = __apei_exec_read_register(entry, &val); 89 if (rc) 90 return rc; 91 ctx->value = val; 92 93 return 0; 94 } 95 EXPORT_SYMBOL_GPL(apei_exec_read_register); 96 97 int apei_exec_read_register_value(struct apei_exec_context *ctx, 98 struct acpi_whea_header *entry) 99 { 100 int rc; 101 102 rc = apei_exec_read_register(ctx, entry); 103 if (rc) 104 return rc; 105 ctx->value = (ctx->value == entry->value); 106 107 return 0; 108 } 109 EXPORT_SYMBOL_GPL(apei_exec_read_register_value); 110 111 int __apei_exec_write_register(struct acpi_whea_header *entry, u64 val) 112 { 113 int rc; 114 115 val &= entry->mask; 116 val <<= entry->register_region.bit_offset; 117 if (entry->flags & APEI_EXEC_PRESERVE_REGISTER) { 118 u64 valr = 0; 119 rc = apei_read(&valr, &entry->register_region); 120 if (rc) 121 return rc; 122 valr &= ~(entry->mask << entry->register_region.bit_offset); 123 val |= valr; 124 } 125 rc = apei_write(val, &entry->register_region); 126 127 return rc; 128 } 129 130 int apei_exec_write_register(struct apei_exec_context *ctx, 131 struct acpi_whea_header *entry) 132 { 133 return __apei_exec_write_register(entry, ctx->value); 134 } 135 EXPORT_SYMBOL_GPL(apei_exec_write_register); 136 137 int apei_exec_write_register_value(struct apei_exec_context *ctx, 138 struct acpi_whea_header *entry) 139 { 140 int rc; 141 142 ctx->value = entry->value; 143 rc = apei_exec_write_register(ctx, entry); 144 145 return rc; 146 } 147 EXPORT_SYMBOL_GPL(apei_exec_write_register_value); 148 149 int apei_exec_noop(struct apei_exec_context *ctx, 150 struct acpi_whea_header *entry) 151 { 152 return 0; 153 } 154 EXPORT_SYMBOL_GPL(apei_exec_noop); 155 156 /* 157 * Interpret the specified action. Go through whole action table, 158 * execute all instructions belong to the action. 159 */ 160 int __apei_exec_run(struct apei_exec_context *ctx, u8 action, 161 bool optional) 162 { 163 int rc = -ENOENT; 164 u32 i, ip; 165 struct acpi_whea_header *entry; 166 apei_exec_ins_func_t run; 167 168 ctx->ip = 0; 169 170 /* 171 * "ip" is the instruction pointer of current instruction, 172 * "ctx->ip" specifies the next instruction to executed, 173 * instruction "run" function may change the "ctx->ip" to 174 * implement "goto" semantics. 175 */ 176 rewind: 177 ip = 0; 178 for (i = 0; i < ctx->entries; i++) { 179 entry = &ctx->action_table[i]; 180 if (entry->action != action) 181 continue; 182 if (ip == ctx->ip) { 183 if (entry->instruction >= ctx->instructions || 184 !ctx->ins_table[entry->instruction].run) { 185 pr_warning(FW_WARN APEI_PFX 186 "Invalid action table, unknown instruction type: %d\n", 187 entry->instruction); 188 return -EINVAL; 189 } 190 run = ctx->ins_table[entry->instruction].run; 191 rc = run(ctx, entry); 192 if (rc < 0) 193 return rc; 194 else if (rc != APEI_EXEC_SET_IP) 195 ctx->ip++; 196 } 197 ip++; 198 if (ctx->ip < ip) 199 goto rewind; 200 } 201 202 return !optional && rc < 0 ? rc : 0; 203 } 204 EXPORT_SYMBOL_GPL(__apei_exec_run); 205 206 typedef int (*apei_exec_entry_func_t)(struct apei_exec_context *ctx, 207 struct acpi_whea_header *entry, 208 void *data); 209 210 static int apei_exec_for_each_entry(struct apei_exec_context *ctx, 211 apei_exec_entry_func_t func, 212 void *data, 213 int *end) 214 { 215 u8 ins; 216 int i, rc; 217 struct acpi_whea_header *entry; 218 struct apei_exec_ins_type *ins_table = ctx->ins_table; 219 220 for (i = 0; i < ctx->entries; i++) { 221 entry = ctx->action_table + i; 222 ins = entry->instruction; 223 if (end) 224 *end = i; 225 if (ins >= ctx->instructions || !ins_table[ins].run) { 226 pr_warning(FW_WARN APEI_PFX 227 "Invalid action table, unknown instruction type: %d\n", 228 ins); 229 return -EINVAL; 230 } 231 rc = func(ctx, entry, data); 232 if (rc) 233 return rc; 234 } 235 236 return 0; 237 } 238 239 static int pre_map_gar_callback(struct apei_exec_context *ctx, 240 struct acpi_whea_header *entry, 241 void *data) 242 { 243 u8 ins = entry->instruction; 244 245 if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER) 246 return acpi_os_map_generic_address(&entry->register_region); 247 248 return 0; 249 } 250 251 /* 252 * Pre-map all GARs in action table to make it possible to access them 253 * in NMI handler. 254 */ 255 int apei_exec_pre_map_gars(struct apei_exec_context *ctx) 256 { 257 int rc, end; 258 259 rc = apei_exec_for_each_entry(ctx, pre_map_gar_callback, 260 NULL, &end); 261 if (rc) { 262 struct apei_exec_context ctx_unmap; 263 memcpy(&ctx_unmap, ctx, sizeof(*ctx)); 264 ctx_unmap.entries = end; 265 apei_exec_post_unmap_gars(&ctx_unmap); 266 } 267 268 return rc; 269 } 270 EXPORT_SYMBOL_GPL(apei_exec_pre_map_gars); 271 272 static int post_unmap_gar_callback(struct apei_exec_context *ctx, 273 struct acpi_whea_header *entry, 274 void *data) 275 { 276 u8 ins = entry->instruction; 277 278 if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER) 279 acpi_os_unmap_generic_address(&entry->register_region); 280 281 return 0; 282 } 283 284 /* Post-unmap all GAR in action table. */ 285 int apei_exec_post_unmap_gars(struct apei_exec_context *ctx) 286 { 287 return apei_exec_for_each_entry(ctx, post_unmap_gar_callback, 288 NULL, NULL); 289 } 290 EXPORT_SYMBOL_GPL(apei_exec_post_unmap_gars); 291 292 /* 293 * Resource management for GARs in APEI 294 */ 295 struct apei_res { 296 struct list_head list; 297 unsigned long start; 298 unsigned long end; 299 }; 300 301 /* Collect all resources requested, to avoid conflict */ 302 struct apei_resources apei_resources_all = { 303 .iomem = LIST_HEAD_INIT(apei_resources_all.iomem), 304 .ioport = LIST_HEAD_INIT(apei_resources_all.ioport), 305 }; 306 307 static int apei_res_add(struct list_head *res_list, 308 unsigned long start, unsigned long size) 309 { 310 struct apei_res *res, *resn, *res_ins = NULL; 311 unsigned long end = start + size; 312 313 if (end <= start) 314 return 0; 315 repeat: 316 list_for_each_entry_safe(res, resn, res_list, list) { 317 if (res->start > end || res->end < start) 318 continue; 319 else if (end <= res->end && start >= res->start) { 320 kfree(res_ins); 321 return 0; 322 } 323 list_del(&res->list); 324 res->start = start = min(res->start, start); 325 res->end = end = max(res->end, end); 326 kfree(res_ins); 327 res_ins = res; 328 goto repeat; 329 } 330 331 if (res_ins) 332 list_add(&res_ins->list, res_list); 333 else { 334 res_ins = kmalloc(sizeof(*res), GFP_KERNEL); 335 if (!res_ins) 336 return -ENOMEM; 337 res_ins->start = start; 338 res_ins->end = end; 339 list_add(&res_ins->list, res_list); 340 } 341 342 return 0; 343 } 344 345 static int apei_res_sub(struct list_head *res_list1, 346 struct list_head *res_list2) 347 { 348 struct apei_res *res1, *resn1, *res2, *res; 349 res1 = list_entry(res_list1->next, struct apei_res, list); 350 resn1 = list_entry(res1->list.next, struct apei_res, list); 351 while (&res1->list != res_list1) { 352 list_for_each_entry(res2, res_list2, list) { 353 if (res1->start >= res2->end || 354 res1->end <= res2->start) 355 continue; 356 else if (res1->end <= res2->end && 357 res1->start >= res2->start) { 358 list_del(&res1->list); 359 kfree(res1); 360 break; 361 } else if (res1->end > res2->end && 362 res1->start < res2->start) { 363 res = kmalloc(sizeof(*res), GFP_KERNEL); 364 if (!res) 365 return -ENOMEM; 366 res->start = res2->end; 367 res->end = res1->end; 368 res1->end = res2->start; 369 list_add(&res->list, &res1->list); 370 resn1 = res; 371 } else { 372 if (res1->start < res2->start) 373 res1->end = res2->start; 374 else 375 res1->start = res2->end; 376 } 377 } 378 res1 = resn1; 379 resn1 = list_entry(resn1->list.next, struct apei_res, list); 380 } 381 382 return 0; 383 } 384 385 static void apei_res_clean(struct list_head *res_list) 386 { 387 struct apei_res *res, *resn; 388 389 list_for_each_entry_safe(res, resn, res_list, list) { 390 list_del(&res->list); 391 kfree(res); 392 } 393 } 394 395 void apei_resources_fini(struct apei_resources *resources) 396 { 397 apei_res_clean(&resources->iomem); 398 apei_res_clean(&resources->ioport); 399 } 400 EXPORT_SYMBOL_GPL(apei_resources_fini); 401 402 static int apei_resources_merge(struct apei_resources *resources1, 403 struct apei_resources *resources2) 404 { 405 int rc; 406 struct apei_res *res; 407 408 list_for_each_entry(res, &resources2->iomem, list) { 409 rc = apei_res_add(&resources1->iomem, res->start, 410 res->end - res->start); 411 if (rc) 412 return rc; 413 } 414 list_for_each_entry(res, &resources2->ioport, list) { 415 rc = apei_res_add(&resources1->ioport, res->start, 416 res->end - res->start); 417 if (rc) 418 return rc; 419 } 420 421 return 0; 422 } 423 424 int apei_resources_add(struct apei_resources *resources, 425 unsigned long start, unsigned long size, 426 bool iomem) 427 { 428 if (iomem) 429 return apei_res_add(&resources->iomem, start, size); 430 else 431 return apei_res_add(&resources->ioport, start, size); 432 } 433 EXPORT_SYMBOL_GPL(apei_resources_add); 434 435 /* 436 * EINJ has two groups of GARs (EINJ table entry and trigger table 437 * entry), so common resources are subtracted from the trigger table 438 * resources before the second requesting. 439 */ 440 int apei_resources_sub(struct apei_resources *resources1, 441 struct apei_resources *resources2) 442 { 443 int rc; 444 445 rc = apei_res_sub(&resources1->iomem, &resources2->iomem); 446 if (rc) 447 return rc; 448 return apei_res_sub(&resources1->ioport, &resources2->ioport); 449 } 450 EXPORT_SYMBOL_GPL(apei_resources_sub); 451 452 static int apei_get_nvs_callback(__u64 start, __u64 size, void *data) 453 { 454 struct apei_resources *resources = data; 455 return apei_res_add(&resources->iomem, start, size); 456 } 457 458 static int apei_get_nvs_resources(struct apei_resources *resources) 459 { 460 return acpi_nvs_for_each_region(apei_get_nvs_callback, resources); 461 } 462 463 /* 464 * IO memory/port resource management mechanism is used to check 465 * whether memory/port area used by GARs conflicts with normal memory 466 * or IO memory/port of devices. 467 */ 468 int apei_resources_request(struct apei_resources *resources, 469 const char *desc) 470 { 471 struct apei_res *res, *res_bak = NULL; 472 struct resource *r; 473 struct apei_resources nvs_resources; 474 int rc; 475 476 rc = apei_resources_sub(resources, &apei_resources_all); 477 if (rc) 478 return rc; 479 480 /* 481 * Some firmware uses ACPI NVS region, that has been marked as 482 * busy, so exclude it from APEI resources to avoid false 483 * conflict. 484 */ 485 apei_resources_init(&nvs_resources); 486 rc = apei_get_nvs_resources(&nvs_resources); 487 if (rc) 488 goto res_fini; 489 rc = apei_resources_sub(resources, &nvs_resources); 490 if (rc) 491 goto res_fini; 492 493 rc = -EINVAL; 494 list_for_each_entry(res, &resources->iomem, list) { 495 r = request_mem_region(res->start, res->end - res->start, 496 desc); 497 if (!r) { 498 pr_err(APEI_PFX 499 "Can not request [mem %#010llx-%#010llx] for %s registers\n", 500 (unsigned long long)res->start, 501 (unsigned long long)res->end - 1, desc); 502 res_bak = res; 503 goto err_unmap_iomem; 504 } 505 } 506 507 list_for_each_entry(res, &resources->ioport, list) { 508 r = request_region(res->start, res->end - res->start, desc); 509 if (!r) { 510 pr_err(APEI_PFX 511 "Can not request [io %#06llx-%#06llx] for %s registers\n", 512 (unsigned long long)res->start, 513 (unsigned long long)res->end - 1, desc); 514 res_bak = res; 515 goto err_unmap_ioport; 516 } 517 } 518 519 rc = apei_resources_merge(&apei_resources_all, resources); 520 if (rc) { 521 pr_err(APEI_PFX "Fail to merge resources!\n"); 522 goto err_unmap_ioport; 523 } 524 525 return 0; 526 err_unmap_ioport: 527 list_for_each_entry(res, &resources->ioport, list) { 528 if (res == res_bak) 529 break; 530 release_region(res->start, res->end - res->start); 531 } 532 res_bak = NULL; 533 err_unmap_iomem: 534 list_for_each_entry(res, &resources->iomem, list) { 535 if (res == res_bak) 536 break; 537 release_mem_region(res->start, res->end - res->start); 538 } 539 res_fini: 540 apei_resources_fini(&nvs_resources); 541 return rc; 542 } 543 EXPORT_SYMBOL_GPL(apei_resources_request); 544 545 void apei_resources_release(struct apei_resources *resources) 546 { 547 int rc; 548 struct apei_res *res; 549 550 list_for_each_entry(res, &resources->iomem, list) 551 release_mem_region(res->start, res->end - res->start); 552 list_for_each_entry(res, &resources->ioport, list) 553 release_region(res->start, res->end - res->start); 554 555 rc = apei_resources_sub(&apei_resources_all, resources); 556 if (rc) 557 pr_err(APEI_PFX "Fail to sub resources!\n"); 558 } 559 EXPORT_SYMBOL_GPL(apei_resources_release); 560 561 static int apei_check_gar(struct acpi_generic_address *reg, u64 *paddr) 562 { 563 u32 width, space_id; 564 565 width = reg->bit_width; 566 space_id = reg->space_id; 567 /* Handle possible alignment issues */ 568 memcpy(paddr, ®->address, sizeof(*paddr)); 569 if (!*paddr) { 570 pr_warning(FW_BUG APEI_PFX 571 "Invalid physical address in GAR [0x%llx/%u/%u]\n", 572 *paddr, width, space_id); 573 return -EINVAL; 574 } 575 576 if ((width != 8) && (width != 16) && (width != 32) && (width != 64)) { 577 pr_warning(FW_BUG APEI_PFX 578 "Invalid bit width in GAR [0x%llx/%u/%u]\n", 579 *paddr, width, space_id); 580 return -EINVAL; 581 } 582 583 if (space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY && 584 space_id != ACPI_ADR_SPACE_SYSTEM_IO) { 585 pr_warning(FW_BUG APEI_PFX 586 "Invalid address space type in GAR [0x%llx/%u/%u]\n", 587 *paddr, width, space_id); 588 return -EINVAL; 589 } 590 591 return 0; 592 } 593 594 /* read GAR in interrupt (including NMI) or process context */ 595 int apei_read(u64 *val, struct acpi_generic_address *reg) 596 { 597 int rc; 598 u64 address; 599 u32 tmp, width = reg->bit_width; 600 acpi_status status; 601 602 rc = apei_check_gar(reg, &address); 603 if (rc) 604 return rc; 605 606 if (width == 64) 607 width = 32; /* Break into two 32-bit transfers */ 608 609 *val = 0; 610 switch(reg->space_id) { 611 case ACPI_ADR_SPACE_SYSTEM_MEMORY: 612 status = acpi_os_read_memory((acpi_physical_address) 613 address, &tmp, width); 614 if (ACPI_FAILURE(status)) 615 return -EIO; 616 *val = tmp; 617 618 if (reg->bit_width == 64) { 619 /* Read the top 32 bits */ 620 status = acpi_os_read_memory((acpi_physical_address) 621 (address + 4), &tmp, 32); 622 if (ACPI_FAILURE(status)) 623 return -EIO; 624 *val |= ((u64)tmp << 32); 625 } 626 break; 627 case ACPI_ADR_SPACE_SYSTEM_IO: 628 status = acpi_os_read_port(address, (u32 *)val, reg->bit_width); 629 if (ACPI_FAILURE(status)) 630 return -EIO; 631 break; 632 default: 633 return -EINVAL; 634 } 635 636 return 0; 637 } 638 EXPORT_SYMBOL_GPL(apei_read); 639 640 /* write GAR in interrupt (including NMI) or process context */ 641 int apei_write(u64 val, struct acpi_generic_address *reg) 642 { 643 int rc; 644 u64 address; 645 u32 width = reg->bit_width; 646 acpi_status status; 647 648 rc = apei_check_gar(reg, &address); 649 if (rc) 650 return rc; 651 652 if (width == 64) 653 width = 32; /* Break into two 32-bit transfers */ 654 655 switch (reg->space_id) { 656 case ACPI_ADR_SPACE_SYSTEM_MEMORY: 657 status = acpi_os_write_memory((acpi_physical_address) 658 address, ACPI_LODWORD(val), 659 width); 660 if (ACPI_FAILURE(status)) 661 return -EIO; 662 663 if (reg->bit_width == 64) { 664 status = acpi_os_write_memory((acpi_physical_address) 665 (address + 4), 666 ACPI_HIDWORD(val), 32); 667 if (ACPI_FAILURE(status)) 668 return -EIO; 669 } 670 break; 671 case ACPI_ADR_SPACE_SYSTEM_IO: 672 status = acpi_os_write_port(address, val, reg->bit_width); 673 if (ACPI_FAILURE(status)) 674 return -EIO; 675 break; 676 default: 677 return -EINVAL; 678 } 679 680 return 0; 681 } 682 EXPORT_SYMBOL_GPL(apei_write); 683 684 static int collect_res_callback(struct apei_exec_context *ctx, 685 struct acpi_whea_header *entry, 686 void *data) 687 { 688 struct apei_resources *resources = data; 689 struct acpi_generic_address *reg = &entry->register_region; 690 u8 ins = entry->instruction; 691 u64 paddr; 692 int rc; 693 694 if (!(ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER)) 695 return 0; 696 697 rc = apei_check_gar(reg, &paddr); 698 if (rc) 699 return rc; 700 701 switch (reg->space_id) { 702 case ACPI_ADR_SPACE_SYSTEM_MEMORY: 703 return apei_res_add(&resources->iomem, paddr, 704 reg->bit_width / 8); 705 case ACPI_ADR_SPACE_SYSTEM_IO: 706 return apei_res_add(&resources->ioport, paddr, 707 reg->bit_width / 8); 708 default: 709 return -EINVAL; 710 } 711 } 712 713 /* 714 * Same register may be used by multiple instructions in GARs, so 715 * resources are collected before requesting. 716 */ 717 int apei_exec_collect_resources(struct apei_exec_context *ctx, 718 struct apei_resources *resources) 719 { 720 return apei_exec_for_each_entry(ctx, collect_res_callback, 721 resources, NULL); 722 } 723 EXPORT_SYMBOL_GPL(apei_exec_collect_resources); 724 725 struct dentry *apei_get_debugfs_dir(void) 726 { 727 static struct dentry *dapei; 728 729 if (!dapei) 730 dapei = debugfs_create_dir("apei", NULL); 731 732 return dapei; 733 } 734 EXPORT_SYMBOL_GPL(apei_get_debugfs_dir); 735 736 int apei_osc_setup(void) 737 { 738 static u8 whea_uuid_str[] = "ed855e0c-6c90-47bf-a62a-26de0fc5ad5c"; 739 acpi_handle handle; 740 u32 capbuf[3]; 741 struct acpi_osc_context context = { 742 .uuid_str = whea_uuid_str, 743 .rev = 1, 744 .cap.length = sizeof(capbuf), 745 .cap.pointer = capbuf, 746 }; 747 748 capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE; 749 capbuf[OSC_SUPPORT_TYPE] = 1; 750 capbuf[OSC_CONTROL_TYPE] = 0; 751 752 if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle)) 753 || ACPI_FAILURE(acpi_run_osc(handle, &context))) 754 return -EIO; 755 else { 756 kfree(context.ret.pointer); 757 return 0; 758 } 759 } 760 EXPORT_SYMBOL_GPL(apei_osc_setup); 761