1 /* 2 * apei-base.c - ACPI Platform Error Interface (APEI) supporting 3 * infrastructure 4 * 5 * APEI allows to report errors (for example from the chipset) to the 6 * the operating system. This improves NMI handling especially. In 7 * addition it supports error serialization and error injection. 8 * 9 * For more information about APEI, please refer to ACPI Specification 10 * version 4.0, chapter 17. 11 * 12 * This file has Common functions used by more than one APEI table, 13 * including framework of interpreter for ERST and EINJ; resource 14 * management for APEI registers. 15 * 16 * Copyright (C) 2009, Intel Corp. 17 * Author: Huang Ying <ying.huang@intel.com> 18 * 19 * This program is free software; you can redistribute it and/or 20 * modify it under the terms of the GNU General Public License version 21 * 2 as published by the Free Software Foundation. 22 * 23 * This program is distributed in the hope that it will be useful, 24 * but WITHOUT ANY WARRANTY; without even the implied warranty of 25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 26 * GNU General Public License for more details. 27 * 28 * You should have received a copy of the GNU General Public License 29 * along with this program; if not, write to the Free Software 30 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 31 */ 32 33 #include <linux/kernel.h> 34 #include <linux/module.h> 35 #include <linux/init.h> 36 #include <linux/acpi.h> 37 #include <linux/slab.h> 38 #include <linux/io.h> 39 #include <linux/kref.h> 40 #include <linux/rculist.h> 41 #include <linux/interrupt.h> 42 #include <linux/debugfs.h> 43 #include <acpi/atomicio.h> 44 45 #include "apei-internal.h" 46 47 #define APEI_PFX "APEI: " 48 49 /* 50 * APEI ERST (Error Record Serialization Table) and EINJ (Error 51 * INJection) interpreter framework. 52 */ 53 54 #define APEI_EXEC_PRESERVE_REGISTER 0x1 55 56 void apei_exec_ctx_init(struct apei_exec_context *ctx, 57 struct apei_exec_ins_type *ins_table, 58 u32 instructions, 59 struct acpi_whea_header *action_table, 60 u32 entries) 61 { 62 ctx->ins_table = ins_table; 63 ctx->instructions = instructions; 64 ctx->action_table = action_table; 65 ctx->entries = entries; 66 } 67 EXPORT_SYMBOL_GPL(apei_exec_ctx_init); 68 69 int __apei_exec_read_register(struct acpi_whea_header *entry, u64 *val) 70 { 71 int rc; 72 73 rc = acpi_atomic_read(val, &entry->register_region); 74 if (rc) 75 return rc; 76 *val >>= entry->register_region.bit_offset; 77 *val &= entry->mask; 78 79 return 0; 80 } 81 82 int apei_exec_read_register(struct apei_exec_context *ctx, 83 struct acpi_whea_header *entry) 84 { 85 int rc; 86 u64 val = 0; 87 88 rc = __apei_exec_read_register(entry, &val); 89 if (rc) 90 return rc; 91 ctx->value = val; 92 93 return 0; 94 } 95 EXPORT_SYMBOL_GPL(apei_exec_read_register); 96 97 int apei_exec_read_register_value(struct apei_exec_context *ctx, 98 struct acpi_whea_header *entry) 99 { 100 int rc; 101 102 rc = apei_exec_read_register(ctx, entry); 103 if (rc) 104 return rc; 105 ctx->value = (ctx->value == entry->value); 106 107 return 0; 108 } 109 EXPORT_SYMBOL_GPL(apei_exec_read_register_value); 110 111 int __apei_exec_write_register(struct acpi_whea_header *entry, u64 val) 112 { 113 int rc; 114 115 val &= entry->mask; 116 val <<= entry->register_region.bit_offset; 117 if (entry->flags & APEI_EXEC_PRESERVE_REGISTER) { 118 u64 valr = 0; 119 rc = acpi_atomic_read(&valr, &entry->register_region); 120 if (rc) 121 return rc; 122 valr &= ~(entry->mask << entry->register_region.bit_offset); 123 val |= valr; 124 } 125 rc = acpi_atomic_write(val, &entry->register_region); 126 127 return rc; 128 } 129 130 int apei_exec_write_register(struct apei_exec_context *ctx, 131 struct acpi_whea_header *entry) 132 { 133 return __apei_exec_write_register(entry, ctx->value); 134 } 135 EXPORT_SYMBOL_GPL(apei_exec_write_register); 136 137 int apei_exec_write_register_value(struct apei_exec_context *ctx, 138 struct acpi_whea_header *entry) 139 { 140 int rc; 141 142 ctx->value = entry->value; 143 rc = apei_exec_write_register(ctx, entry); 144 145 return rc; 146 } 147 EXPORT_SYMBOL_GPL(apei_exec_write_register_value); 148 149 int apei_exec_noop(struct apei_exec_context *ctx, 150 struct acpi_whea_header *entry) 151 { 152 return 0; 153 } 154 EXPORT_SYMBOL_GPL(apei_exec_noop); 155 156 /* 157 * Interpret the specified action. Go through whole action table, 158 * execute all instructions belong to the action. 159 */ 160 int apei_exec_run(struct apei_exec_context *ctx, u8 action) 161 { 162 int rc; 163 u32 i, ip; 164 struct acpi_whea_header *entry; 165 apei_exec_ins_func_t run; 166 167 ctx->ip = 0; 168 169 /* 170 * "ip" is the instruction pointer of current instruction, 171 * "ctx->ip" specifies the next instruction to executed, 172 * instruction "run" function may change the "ctx->ip" to 173 * implement "goto" semantics. 174 */ 175 rewind: 176 ip = 0; 177 for (i = 0; i < ctx->entries; i++) { 178 entry = &ctx->action_table[i]; 179 if (entry->action != action) 180 continue; 181 if (ip == ctx->ip) { 182 if (entry->instruction >= ctx->instructions || 183 !ctx->ins_table[entry->instruction].run) { 184 pr_warning(FW_WARN APEI_PFX 185 "Invalid action table, unknown instruction type: %d\n", 186 entry->instruction); 187 return -EINVAL; 188 } 189 run = ctx->ins_table[entry->instruction].run; 190 rc = run(ctx, entry); 191 if (rc < 0) 192 return rc; 193 else if (rc != APEI_EXEC_SET_IP) 194 ctx->ip++; 195 } 196 ip++; 197 if (ctx->ip < ip) 198 goto rewind; 199 } 200 201 return 0; 202 } 203 EXPORT_SYMBOL_GPL(apei_exec_run); 204 205 typedef int (*apei_exec_entry_func_t)(struct apei_exec_context *ctx, 206 struct acpi_whea_header *entry, 207 void *data); 208 209 static int apei_exec_for_each_entry(struct apei_exec_context *ctx, 210 apei_exec_entry_func_t func, 211 void *data, 212 int *end) 213 { 214 u8 ins; 215 int i, rc; 216 struct acpi_whea_header *entry; 217 struct apei_exec_ins_type *ins_table = ctx->ins_table; 218 219 for (i = 0; i < ctx->entries; i++) { 220 entry = ctx->action_table + i; 221 ins = entry->instruction; 222 if (end) 223 *end = i; 224 if (ins >= ctx->instructions || !ins_table[ins].run) { 225 pr_warning(FW_WARN APEI_PFX 226 "Invalid action table, unknown instruction type: %d\n", 227 ins); 228 return -EINVAL; 229 } 230 rc = func(ctx, entry, data); 231 if (rc) 232 return rc; 233 } 234 235 return 0; 236 } 237 238 static int pre_map_gar_callback(struct apei_exec_context *ctx, 239 struct acpi_whea_header *entry, 240 void *data) 241 { 242 u8 ins = entry->instruction; 243 244 if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER) 245 return acpi_pre_map_gar(&entry->register_region); 246 247 return 0; 248 } 249 250 /* 251 * Pre-map all GARs in action table to make it possible to access them 252 * in NMI handler. 253 */ 254 int apei_exec_pre_map_gars(struct apei_exec_context *ctx) 255 { 256 int rc, end; 257 258 rc = apei_exec_for_each_entry(ctx, pre_map_gar_callback, 259 NULL, &end); 260 if (rc) { 261 struct apei_exec_context ctx_unmap; 262 memcpy(&ctx_unmap, ctx, sizeof(*ctx)); 263 ctx_unmap.entries = end; 264 apei_exec_post_unmap_gars(&ctx_unmap); 265 } 266 267 return rc; 268 } 269 EXPORT_SYMBOL_GPL(apei_exec_pre_map_gars); 270 271 static int post_unmap_gar_callback(struct apei_exec_context *ctx, 272 struct acpi_whea_header *entry, 273 void *data) 274 { 275 u8 ins = entry->instruction; 276 277 if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER) 278 acpi_post_unmap_gar(&entry->register_region); 279 280 return 0; 281 } 282 283 /* Post-unmap all GAR in action table. */ 284 int apei_exec_post_unmap_gars(struct apei_exec_context *ctx) 285 { 286 return apei_exec_for_each_entry(ctx, post_unmap_gar_callback, 287 NULL, NULL); 288 } 289 EXPORT_SYMBOL_GPL(apei_exec_post_unmap_gars); 290 291 /* 292 * Resource management for GARs in APEI 293 */ 294 struct apei_res { 295 struct list_head list; 296 unsigned long start; 297 unsigned long end; 298 }; 299 300 /* Collect all resources requested, to avoid conflict */ 301 struct apei_resources apei_resources_all = { 302 .iomem = LIST_HEAD_INIT(apei_resources_all.iomem), 303 .ioport = LIST_HEAD_INIT(apei_resources_all.ioport), 304 }; 305 306 static int apei_res_add(struct list_head *res_list, 307 unsigned long start, unsigned long size) 308 { 309 struct apei_res *res, *resn, *res_ins = NULL; 310 unsigned long end = start + size; 311 312 if (end <= start) 313 return 0; 314 repeat: 315 list_for_each_entry_safe(res, resn, res_list, list) { 316 if (res->start > end || res->end < start) 317 continue; 318 else if (end <= res->end && start >= res->start) { 319 kfree(res_ins); 320 return 0; 321 } 322 list_del(&res->list); 323 res->start = start = min(res->start, start); 324 res->end = end = max(res->end, end); 325 kfree(res_ins); 326 res_ins = res; 327 goto repeat; 328 } 329 330 if (res_ins) 331 list_add(&res_ins->list, res_list); 332 else { 333 res_ins = kmalloc(sizeof(*res), GFP_KERNEL); 334 if (!res_ins) 335 return -ENOMEM; 336 res_ins->start = start; 337 res_ins->end = end; 338 list_add(&res_ins->list, res_list); 339 } 340 341 return 0; 342 } 343 344 static int apei_res_sub(struct list_head *res_list1, 345 struct list_head *res_list2) 346 { 347 struct apei_res *res1, *resn1, *res2, *res; 348 res1 = list_entry(res_list1->next, struct apei_res, list); 349 resn1 = list_entry(res1->list.next, struct apei_res, list); 350 while (&res1->list != res_list1) { 351 list_for_each_entry(res2, res_list2, list) { 352 if (res1->start >= res2->end || 353 res1->end <= res2->start) 354 continue; 355 else if (res1->end <= res2->end && 356 res1->start >= res2->start) { 357 list_del(&res1->list); 358 kfree(res1); 359 break; 360 } else if (res1->end > res2->end && 361 res1->start < res2->start) { 362 res = kmalloc(sizeof(*res), GFP_KERNEL); 363 if (!res) 364 return -ENOMEM; 365 res->start = res2->end; 366 res->end = res1->end; 367 res1->end = res2->start; 368 list_add(&res->list, &res1->list); 369 resn1 = res; 370 } else { 371 if (res1->start < res2->start) 372 res1->end = res2->start; 373 else 374 res1->start = res2->end; 375 } 376 } 377 res1 = resn1; 378 resn1 = list_entry(resn1->list.next, struct apei_res, list); 379 } 380 381 return 0; 382 } 383 384 static void apei_res_clean(struct list_head *res_list) 385 { 386 struct apei_res *res, *resn; 387 388 list_for_each_entry_safe(res, resn, res_list, list) { 389 list_del(&res->list); 390 kfree(res); 391 } 392 } 393 394 void apei_resources_fini(struct apei_resources *resources) 395 { 396 apei_res_clean(&resources->iomem); 397 apei_res_clean(&resources->ioport); 398 } 399 EXPORT_SYMBOL_GPL(apei_resources_fini); 400 401 static int apei_resources_merge(struct apei_resources *resources1, 402 struct apei_resources *resources2) 403 { 404 int rc; 405 struct apei_res *res; 406 407 list_for_each_entry(res, &resources2->iomem, list) { 408 rc = apei_res_add(&resources1->iomem, res->start, 409 res->end - res->start); 410 if (rc) 411 return rc; 412 } 413 list_for_each_entry(res, &resources2->ioport, list) { 414 rc = apei_res_add(&resources1->ioport, res->start, 415 res->end - res->start); 416 if (rc) 417 return rc; 418 } 419 420 return 0; 421 } 422 423 /* 424 * EINJ has two groups of GARs (EINJ table entry and trigger table 425 * entry), so common resources are subtracted from the trigger table 426 * resources before the second requesting. 427 */ 428 int apei_resources_sub(struct apei_resources *resources1, 429 struct apei_resources *resources2) 430 { 431 int rc; 432 433 rc = apei_res_sub(&resources1->iomem, &resources2->iomem); 434 if (rc) 435 return rc; 436 return apei_res_sub(&resources1->ioport, &resources2->ioport); 437 } 438 EXPORT_SYMBOL_GPL(apei_resources_sub); 439 440 /* 441 * IO memory/port rersource management mechanism is used to check 442 * whether memory/port area used by GARs conflicts with normal memory 443 * or IO memory/port of devices. 444 */ 445 int apei_resources_request(struct apei_resources *resources, 446 const char *desc) 447 { 448 struct apei_res *res, *res_bak = NULL; 449 struct resource *r; 450 int rc; 451 452 rc = apei_resources_sub(resources, &apei_resources_all); 453 if (rc) 454 return rc; 455 456 rc = -EINVAL; 457 list_for_each_entry(res, &resources->iomem, list) { 458 r = request_mem_region(res->start, res->end - res->start, 459 desc); 460 if (!r) { 461 pr_err(APEI_PFX 462 "Can not request iomem region <%016llx-%016llx> for GARs.\n", 463 (unsigned long long)res->start, 464 (unsigned long long)res->end); 465 res_bak = res; 466 goto err_unmap_iomem; 467 } 468 } 469 470 list_for_each_entry(res, &resources->ioport, list) { 471 r = request_region(res->start, res->end - res->start, desc); 472 if (!r) { 473 pr_err(APEI_PFX 474 "Can not request ioport region <%016llx-%016llx> for GARs.\n", 475 (unsigned long long)res->start, 476 (unsigned long long)res->end); 477 res_bak = res; 478 goto err_unmap_ioport; 479 } 480 } 481 482 rc = apei_resources_merge(&apei_resources_all, resources); 483 if (rc) { 484 pr_err(APEI_PFX "Fail to merge resources!\n"); 485 goto err_unmap_ioport; 486 } 487 488 return 0; 489 err_unmap_ioport: 490 list_for_each_entry(res, &resources->ioport, list) { 491 if (res == res_bak) 492 break; 493 release_region(res->start, res->end - res->start); 494 } 495 res_bak = NULL; 496 err_unmap_iomem: 497 list_for_each_entry(res, &resources->iomem, list) { 498 if (res == res_bak) 499 break; 500 release_mem_region(res->start, res->end - res->start); 501 } 502 return rc; 503 } 504 EXPORT_SYMBOL_GPL(apei_resources_request); 505 506 void apei_resources_release(struct apei_resources *resources) 507 { 508 int rc; 509 struct apei_res *res; 510 511 list_for_each_entry(res, &resources->iomem, list) 512 release_mem_region(res->start, res->end - res->start); 513 list_for_each_entry(res, &resources->ioport, list) 514 release_region(res->start, res->end - res->start); 515 516 rc = apei_resources_sub(&apei_resources_all, resources); 517 if (rc) 518 pr_err(APEI_PFX "Fail to sub resources!\n"); 519 } 520 EXPORT_SYMBOL_GPL(apei_resources_release); 521 522 static int apei_check_gar(struct acpi_generic_address *reg, u64 *paddr) 523 { 524 u32 width, space_id; 525 526 width = reg->bit_width; 527 space_id = reg->space_id; 528 /* Handle possible alignment issues */ 529 memcpy(paddr, ®->address, sizeof(*paddr)); 530 if (!*paddr) { 531 pr_warning(FW_BUG APEI_PFX 532 "Invalid physical address in GAR [0x%llx/%u/%u]\n", 533 *paddr, width, space_id); 534 return -EINVAL; 535 } 536 537 if ((width != 8) && (width != 16) && (width != 32) && (width != 64)) { 538 pr_warning(FW_BUG APEI_PFX 539 "Invalid bit width in GAR [0x%llx/%u/%u]\n", 540 *paddr, width, space_id); 541 return -EINVAL; 542 } 543 544 if (space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY && 545 space_id != ACPI_ADR_SPACE_SYSTEM_IO) { 546 pr_warning(FW_BUG APEI_PFX 547 "Invalid address space type in GAR [0x%llx/%u/%u]\n", 548 *paddr, width, space_id); 549 return -EINVAL; 550 } 551 552 return 0; 553 } 554 555 static int collect_res_callback(struct apei_exec_context *ctx, 556 struct acpi_whea_header *entry, 557 void *data) 558 { 559 struct apei_resources *resources = data; 560 struct acpi_generic_address *reg = &entry->register_region; 561 u8 ins = entry->instruction; 562 u64 paddr; 563 int rc; 564 565 if (!(ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER)) 566 return 0; 567 568 rc = apei_check_gar(reg, &paddr); 569 if (rc) 570 return rc; 571 572 switch (reg->space_id) { 573 case ACPI_ADR_SPACE_SYSTEM_MEMORY: 574 return apei_res_add(&resources->iomem, paddr, 575 reg->bit_width / 8); 576 case ACPI_ADR_SPACE_SYSTEM_IO: 577 return apei_res_add(&resources->ioport, paddr, 578 reg->bit_width / 8); 579 default: 580 return -EINVAL; 581 } 582 } 583 584 /* 585 * Same register may be used by multiple instructions in GARs, so 586 * resources are collected before requesting. 587 */ 588 int apei_exec_collect_resources(struct apei_exec_context *ctx, 589 struct apei_resources *resources) 590 { 591 return apei_exec_for_each_entry(ctx, collect_res_callback, 592 resources, NULL); 593 } 594 EXPORT_SYMBOL_GPL(apei_exec_collect_resources); 595 596 struct dentry *apei_get_debugfs_dir(void) 597 { 598 static struct dentry *dapei; 599 600 if (!dapei) 601 dapei = debugfs_create_dir("apei", NULL); 602 603 return dapei; 604 } 605 EXPORT_SYMBOL_GPL(apei_get_debugfs_dir); 606