1 /* 2 * APEI Generic Hardware Error Source support 3 * 4 * Generic Hardware Error Source provides a way to report platform 5 * hardware errors (such as that from chipset). It works in so called 6 * "Firmware First" mode, that is, hardware errors are reported to 7 * firmware firstly, then reported to Linux by firmware. This way, 8 * some non-standard hardware error registers or non-standard hardware 9 * link can be checked by firmware to produce more hardware error 10 * information for Linux. 11 * 12 * For more information about Generic Hardware Error Source, please 13 * refer to ACPI Specification version 4.0, section 17.3.2.6 14 * 15 * Copyright 2010,2011 Intel Corp. 16 * Author: Huang Ying <ying.huang@intel.com> 17 * 18 * This program is free software; you can redistribute it and/or 19 * modify it under the terms of the GNU General Public License version 20 * 2 as published by the Free Software Foundation; 21 * 22 * This program is distributed in the hope that it will be useful, 23 * but WITHOUT ANY WARRANTY; without even the implied warranty of 24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 25 * GNU General Public License for more details. 26 * 27 * You should have received a copy of the GNU General Public License 28 * along with this program; if not, write to the Free Software 29 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 30 */ 31 32 #include <linux/kernel.h> 33 #include <linux/module.h> 34 #include <linux/init.h> 35 #include <linux/acpi.h> 36 #include <linux/io.h> 37 #include <linux/interrupt.h> 38 #include <linux/timer.h> 39 #include <linux/cper.h> 40 #include <linux/kdebug.h> 41 #include <linux/platform_device.h> 42 #include <linux/mutex.h> 43 #include <linux/ratelimit.h> 44 #include <linux/vmalloc.h> 45 #include <linux/irq_work.h> 46 #include <linux/llist.h> 47 #include <linux/genalloc.h> 48 #include <linux/pci.h> 49 #include <linux/aer.h> 50 51 #include <acpi/ghes.h> 52 #include <asm/mce.h> 53 #include <asm/tlbflush.h> 54 #include <asm/nmi.h> 55 56 #include "apei-internal.h" 57 58 #define GHES_PFX "GHES: " 59 60 #define GHES_ESTATUS_MAX_SIZE 65536 61 #define GHES_ESOURCE_PREALLOC_MAX_SIZE 65536 62 63 #define GHES_ESTATUS_POOL_MIN_ALLOC_ORDER 3 64 65 /* This is just an estimation for memory pool allocation */ 66 #define GHES_ESTATUS_CACHE_AVG_SIZE 512 67 68 #define GHES_ESTATUS_CACHES_SIZE 4 69 70 #define GHES_ESTATUS_IN_CACHE_MAX_NSEC 10000000000ULL 71 /* Prevent too many caches are allocated because of RCU */ 72 #define GHES_ESTATUS_CACHE_ALLOCED_MAX (GHES_ESTATUS_CACHES_SIZE * 3 / 2) 73 74 #define GHES_ESTATUS_CACHE_LEN(estatus_len) \ 75 (sizeof(struct ghes_estatus_cache) + (estatus_len)) 76 #define GHES_ESTATUS_FROM_CACHE(estatus_cache) \ 77 ((struct acpi_generic_status *) \ 78 ((struct ghes_estatus_cache *)(estatus_cache) + 1)) 79 80 #define GHES_ESTATUS_NODE_LEN(estatus_len) \ 81 (sizeof(struct ghes_estatus_node) + (estatus_len)) 82 #define GHES_ESTATUS_FROM_NODE(estatus_node) \ 83 ((struct acpi_generic_status *) \ 84 ((struct ghes_estatus_node *)(estatus_node) + 1)) 85 86 bool ghes_disable; 87 module_param_named(disable, ghes_disable, bool, 0); 88 89 static int ghes_panic_timeout __read_mostly = 30; 90 91 /* 92 * All error sources notified with SCI shares one notifier function, 93 * so they need to be linked and checked one by one. This is applied 94 * to NMI too. 95 * 96 * RCU is used for these lists, so ghes_list_mutex is only used for 97 * list changing, not for traversing. 98 */ 99 static LIST_HEAD(ghes_sci); 100 static LIST_HEAD(ghes_nmi); 101 static DEFINE_MUTEX(ghes_list_mutex); 102 103 /* 104 * NMI may be triggered on any CPU, so ghes_nmi_lock is used for 105 * mutual exclusion. 106 */ 107 static DEFINE_RAW_SPINLOCK(ghes_nmi_lock); 108 109 /* 110 * Because the memory area used to transfer hardware error information 111 * from BIOS to Linux can be determined only in NMI, IRQ or timer 112 * handler, but general ioremap can not be used in atomic context, so 113 * a special version of atomic ioremap is implemented for that. 114 */ 115 116 /* 117 * Two virtual pages are used, one for NMI context, the other for 118 * IRQ/PROCESS context 119 */ 120 #define GHES_IOREMAP_PAGES 2 121 #define GHES_IOREMAP_NMI_PAGE(base) (base) 122 #define GHES_IOREMAP_IRQ_PAGE(base) ((base) + PAGE_SIZE) 123 124 /* virtual memory area for atomic ioremap */ 125 static struct vm_struct *ghes_ioremap_area; 126 /* 127 * These 2 spinlock is used to prevent atomic ioremap virtual memory 128 * area from being mapped simultaneously. 129 */ 130 static DEFINE_RAW_SPINLOCK(ghes_ioremap_lock_nmi); 131 static DEFINE_SPINLOCK(ghes_ioremap_lock_irq); 132 133 /* 134 * printk is not safe in NMI context. So in NMI handler, we allocate 135 * required memory from lock-less memory allocator 136 * (ghes_estatus_pool), save estatus into it, put them into lock-less 137 * list (ghes_estatus_llist), then delay printk into IRQ context via 138 * irq_work (ghes_proc_irq_work). ghes_estatus_size_request record 139 * required pool size by all NMI error source. 140 */ 141 static struct gen_pool *ghes_estatus_pool; 142 static unsigned long ghes_estatus_pool_size_request; 143 static struct llist_head ghes_estatus_llist; 144 static struct irq_work ghes_proc_irq_work; 145 146 struct ghes_estatus_cache *ghes_estatus_caches[GHES_ESTATUS_CACHES_SIZE]; 147 static atomic_t ghes_estatus_cache_alloced; 148 149 static int ghes_ioremap_init(void) 150 { 151 ghes_ioremap_area = __get_vm_area(PAGE_SIZE * GHES_IOREMAP_PAGES, 152 VM_IOREMAP, VMALLOC_START, VMALLOC_END); 153 if (!ghes_ioremap_area) { 154 pr_err(GHES_PFX "Failed to allocate virtual memory area for atomic ioremap.\n"); 155 return -ENOMEM; 156 } 157 158 return 0; 159 } 160 161 static void ghes_ioremap_exit(void) 162 { 163 free_vm_area(ghes_ioremap_area); 164 } 165 166 static void __iomem *ghes_ioremap_pfn_nmi(u64 pfn) 167 { 168 unsigned long vaddr; 169 170 vaddr = (unsigned long)GHES_IOREMAP_NMI_PAGE(ghes_ioremap_area->addr); 171 ioremap_page_range(vaddr, vaddr + PAGE_SIZE, 172 pfn << PAGE_SHIFT, PAGE_KERNEL); 173 174 return (void __iomem *)vaddr; 175 } 176 177 static void __iomem *ghes_ioremap_pfn_irq(u64 pfn) 178 { 179 unsigned long vaddr; 180 181 vaddr = (unsigned long)GHES_IOREMAP_IRQ_PAGE(ghes_ioremap_area->addr); 182 ioremap_page_range(vaddr, vaddr + PAGE_SIZE, 183 pfn << PAGE_SHIFT, PAGE_KERNEL); 184 185 return (void __iomem *)vaddr; 186 } 187 188 static void ghes_iounmap_nmi(void __iomem *vaddr_ptr) 189 { 190 unsigned long vaddr = (unsigned long __force)vaddr_ptr; 191 void *base = ghes_ioremap_area->addr; 192 193 BUG_ON(vaddr != (unsigned long)GHES_IOREMAP_NMI_PAGE(base)); 194 unmap_kernel_range_noflush(vaddr, PAGE_SIZE); 195 __flush_tlb_one(vaddr); 196 } 197 198 static void ghes_iounmap_irq(void __iomem *vaddr_ptr) 199 { 200 unsigned long vaddr = (unsigned long __force)vaddr_ptr; 201 void *base = ghes_ioremap_area->addr; 202 203 BUG_ON(vaddr != (unsigned long)GHES_IOREMAP_IRQ_PAGE(base)); 204 unmap_kernel_range_noflush(vaddr, PAGE_SIZE); 205 __flush_tlb_one(vaddr); 206 } 207 208 static int ghes_estatus_pool_init(void) 209 { 210 ghes_estatus_pool = gen_pool_create(GHES_ESTATUS_POOL_MIN_ALLOC_ORDER, -1); 211 if (!ghes_estatus_pool) 212 return -ENOMEM; 213 return 0; 214 } 215 216 static void ghes_estatus_pool_free_chunk_page(struct gen_pool *pool, 217 struct gen_pool_chunk *chunk, 218 void *data) 219 { 220 free_page(chunk->start_addr); 221 } 222 223 static void ghes_estatus_pool_exit(void) 224 { 225 gen_pool_for_each_chunk(ghes_estatus_pool, 226 ghes_estatus_pool_free_chunk_page, NULL); 227 gen_pool_destroy(ghes_estatus_pool); 228 } 229 230 static int ghes_estatus_pool_expand(unsigned long len) 231 { 232 unsigned long i, pages, size, addr; 233 int ret; 234 235 ghes_estatus_pool_size_request += PAGE_ALIGN(len); 236 size = gen_pool_size(ghes_estatus_pool); 237 if (size >= ghes_estatus_pool_size_request) 238 return 0; 239 pages = (ghes_estatus_pool_size_request - size) / PAGE_SIZE; 240 for (i = 0; i < pages; i++) { 241 addr = __get_free_page(GFP_KERNEL); 242 if (!addr) 243 return -ENOMEM; 244 ret = gen_pool_add(ghes_estatus_pool, addr, PAGE_SIZE, -1); 245 if (ret) 246 return ret; 247 } 248 249 return 0; 250 } 251 252 static void ghes_estatus_pool_shrink(unsigned long len) 253 { 254 ghes_estatus_pool_size_request -= PAGE_ALIGN(len); 255 } 256 257 static struct ghes *ghes_new(struct acpi_hest_generic *generic) 258 { 259 struct ghes *ghes; 260 unsigned int error_block_length; 261 int rc; 262 263 ghes = kzalloc(sizeof(*ghes), GFP_KERNEL); 264 if (!ghes) 265 return ERR_PTR(-ENOMEM); 266 ghes->generic = generic; 267 rc = apei_map_generic_address(&generic->error_status_address); 268 if (rc) 269 goto err_free; 270 error_block_length = generic->error_block_length; 271 if (error_block_length > GHES_ESTATUS_MAX_SIZE) { 272 pr_warning(FW_WARN GHES_PFX 273 "Error status block length is too long: %u for " 274 "generic hardware error source: %d.\n", 275 error_block_length, generic->header.source_id); 276 error_block_length = GHES_ESTATUS_MAX_SIZE; 277 } 278 ghes->estatus = kmalloc(error_block_length, GFP_KERNEL); 279 if (!ghes->estatus) { 280 rc = -ENOMEM; 281 goto err_unmap; 282 } 283 284 return ghes; 285 286 err_unmap: 287 apei_unmap_generic_address(&generic->error_status_address); 288 err_free: 289 kfree(ghes); 290 return ERR_PTR(rc); 291 } 292 293 static void ghes_fini(struct ghes *ghes) 294 { 295 kfree(ghes->estatus); 296 apei_unmap_generic_address(&ghes->generic->error_status_address); 297 } 298 299 static inline int ghes_severity(int severity) 300 { 301 switch (severity) { 302 case CPER_SEV_INFORMATIONAL: 303 return GHES_SEV_NO; 304 case CPER_SEV_CORRECTED: 305 return GHES_SEV_CORRECTED; 306 case CPER_SEV_RECOVERABLE: 307 return GHES_SEV_RECOVERABLE; 308 case CPER_SEV_FATAL: 309 return GHES_SEV_PANIC; 310 default: 311 /* Unknown, go panic */ 312 return GHES_SEV_PANIC; 313 } 314 } 315 316 static void ghes_copy_tofrom_phys(void *buffer, u64 paddr, u32 len, 317 int from_phys) 318 { 319 void __iomem *vaddr; 320 unsigned long flags = 0; 321 int in_nmi = in_nmi(); 322 u64 offset; 323 u32 trunk; 324 325 while (len > 0) { 326 offset = paddr - (paddr & PAGE_MASK); 327 if (in_nmi) { 328 raw_spin_lock(&ghes_ioremap_lock_nmi); 329 vaddr = ghes_ioremap_pfn_nmi(paddr >> PAGE_SHIFT); 330 } else { 331 spin_lock_irqsave(&ghes_ioremap_lock_irq, flags); 332 vaddr = ghes_ioremap_pfn_irq(paddr >> PAGE_SHIFT); 333 } 334 trunk = PAGE_SIZE - offset; 335 trunk = min(trunk, len); 336 if (from_phys) 337 memcpy_fromio(buffer, vaddr + offset, trunk); 338 else 339 memcpy_toio(vaddr + offset, buffer, trunk); 340 len -= trunk; 341 paddr += trunk; 342 buffer += trunk; 343 if (in_nmi) { 344 ghes_iounmap_nmi(vaddr); 345 raw_spin_unlock(&ghes_ioremap_lock_nmi); 346 } else { 347 ghes_iounmap_irq(vaddr); 348 spin_unlock_irqrestore(&ghes_ioremap_lock_irq, flags); 349 } 350 } 351 } 352 353 static int ghes_read_estatus(struct ghes *ghes, int silent) 354 { 355 struct acpi_hest_generic *g = ghes->generic; 356 u64 buf_paddr; 357 u32 len; 358 int rc; 359 360 rc = apei_read(&buf_paddr, &g->error_status_address); 361 if (rc) { 362 if (!silent && printk_ratelimit()) 363 pr_warning(FW_WARN GHES_PFX 364 "Failed to read error status block address for hardware error source: %d.\n", 365 g->header.source_id); 366 return -EIO; 367 } 368 if (!buf_paddr) 369 return -ENOENT; 370 371 ghes_copy_tofrom_phys(ghes->estatus, buf_paddr, 372 sizeof(*ghes->estatus), 1); 373 if (!ghes->estatus->block_status) 374 return -ENOENT; 375 376 ghes->buffer_paddr = buf_paddr; 377 ghes->flags |= GHES_TO_CLEAR; 378 379 rc = -EIO; 380 len = cper_estatus_len(ghes->estatus); 381 if (len < sizeof(*ghes->estatus)) 382 goto err_read_block; 383 if (len > ghes->generic->error_block_length) 384 goto err_read_block; 385 if (cper_estatus_check_header(ghes->estatus)) 386 goto err_read_block; 387 ghes_copy_tofrom_phys(ghes->estatus + 1, 388 buf_paddr + sizeof(*ghes->estatus), 389 len - sizeof(*ghes->estatus), 1); 390 if (cper_estatus_check(ghes->estatus)) 391 goto err_read_block; 392 rc = 0; 393 394 err_read_block: 395 if (rc && !silent && printk_ratelimit()) 396 pr_warning(FW_WARN GHES_PFX 397 "Failed to read error status block!\n"); 398 return rc; 399 } 400 401 static void ghes_clear_estatus(struct ghes *ghes) 402 { 403 ghes->estatus->block_status = 0; 404 if (!(ghes->flags & GHES_TO_CLEAR)) 405 return; 406 ghes_copy_tofrom_phys(ghes->estatus, ghes->buffer_paddr, 407 sizeof(ghes->estatus->block_status), 0); 408 ghes->flags &= ~GHES_TO_CLEAR; 409 } 410 411 static void ghes_handle_memory_failure(struct acpi_generic_data *gdata, int sev) 412 { 413 #ifdef CONFIG_ACPI_APEI_MEMORY_FAILURE 414 unsigned long pfn; 415 int flags = -1; 416 int sec_sev = ghes_severity(gdata->error_severity); 417 struct cper_sec_mem_err *mem_err; 418 mem_err = (struct cper_sec_mem_err *)(gdata + 1); 419 420 if (!(mem_err->validation_bits & CPER_MEM_VALID_PA)) 421 return; 422 423 pfn = mem_err->physical_addr >> PAGE_SHIFT; 424 if (!pfn_valid(pfn)) { 425 pr_warn_ratelimited(FW_WARN GHES_PFX 426 "Invalid address in generic error data: %#llx\n", 427 mem_err->physical_addr); 428 return; 429 } 430 431 /* iff following two events can be handled properly by now */ 432 if (sec_sev == GHES_SEV_CORRECTED && 433 (gdata->flags & CPER_SEC_ERROR_THRESHOLD_EXCEEDED)) 434 flags = MF_SOFT_OFFLINE; 435 if (sev == GHES_SEV_RECOVERABLE && sec_sev == GHES_SEV_RECOVERABLE) 436 flags = 0; 437 438 if (flags != -1) 439 memory_failure_queue(pfn, 0, flags); 440 #endif 441 } 442 443 static void ghes_do_proc(struct ghes *ghes, 444 const struct acpi_generic_status *estatus) 445 { 446 int sev, sec_sev; 447 struct acpi_generic_data *gdata; 448 449 sev = ghes_severity(estatus->error_severity); 450 apei_estatus_for_each_section(estatus, gdata) { 451 sec_sev = ghes_severity(gdata->error_severity); 452 if (!uuid_le_cmp(*(uuid_le *)gdata->section_type, 453 CPER_SEC_PLATFORM_MEM)) { 454 struct cper_sec_mem_err *mem_err; 455 mem_err = (struct cper_sec_mem_err *)(gdata+1); 456 ghes_edac_report_mem_error(ghes, sev, mem_err); 457 458 #ifdef CONFIG_X86_MCE 459 apei_mce_report_mem_error(sev, mem_err); 460 #endif 461 ghes_handle_memory_failure(gdata, sev); 462 } 463 #ifdef CONFIG_ACPI_APEI_PCIEAER 464 else if (!uuid_le_cmp(*(uuid_le *)gdata->section_type, 465 CPER_SEC_PCIE)) { 466 struct cper_sec_pcie *pcie_err; 467 pcie_err = (struct cper_sec_pcie *)(gdata+1); 468 if (sev == GHES_SEV_RECOVERABLE && 469 sec_sev == GHES_SEV_RECOVERABLE && 470 pcie_err->validation_bits & CPER_PCIE_VALID_DEVICE_ID && 471 pcie_err->validation_bits & CPER_PCIE_VALID_AER_INFO) { 472 unsigned int devfn; 473 int aer_severity; 474 475 devfn = PCI_DEVFN(pcie_err->device_id.device, 476 pcie_err->device_id.function); 477 aer_severity = cper_severity_to_aer(sev); 478 479 /* 480 * If firmware reset the component to contain 481 * the error, we must reinitialize it before 482 * use, so treat it as a fatal AER error. 483 */ 484 if (gdata->flags & CPER_SEC_RESET) 485 aer_severity = AER_FATAL; 486 487 aer_recover_queue(pcie_err->device_id.segment, 488 pcie_err->device_id.bus, 489 devfn, aer_severity, 490 (struct aer_capability_regs *) 491 pcie_err->aer_info); 492 } 493 494 } 495 #endif 496 } 497 } 498 499 static void __ghes_print_estatus(const char *pfx, 500 const struct acpi_hest_generic *generic, 501 const struct acpi_generic_status *estatus) 502 { 503 static atomic_t seqno; 504 unsigned int curr_seqno; 505 char pfx_seq[64]; 506 507 if (pfx == NULL) { 508 if (ghes_severity(estatus->error_severity) <= 509 GHES_SEV_CORRECTED) 510 pfx = KERN_WARNING; 511 else 512 pfx = KERN_ERR; 513 } 514 curr_seqno = atomic_inc_return(&seqno); 515 snprintf(pfx_seq, sizeof(pfx_seq), "%s{%u}" HW_ERR, pfx, curr_seqno); 516 printk("%s""Hardware error from APEI Generic Hardware Error Source: %d\n", 517 pfx_seq, generic->header.source_id); 518 cper_estatus_print(pfx_seq, estatus); 519 } 520 521 static int ghes_print_estatus(const char *pfx, 522 const struct acpi_hest_generic *generic, 523 const struct acpi_generic_status *estatus) 524 { 525 /* Not more than 2 messages every 5 seconds */ 526 static DEFINE_RATELIMIT_STATE(ratelimit_corrected, 5*HZ, 2); 527 static DEFINE_RATELIMIT_STATE(ratelimit_uncorrected, 5*HZ, 2); 528 struct ratelimit_state *ratelimit; 529 530 if (ghes_severity(estatus->error_severity) <= GHES_SEV_CORRECTED) 531 ratelimit = &ratelimit_corrected; 532 else 533 ratelimit = &ratelimit_uncorrected; 534 if (__ratelimit(ratelimit)) { 535 __ghes_print_estatus(pfx, generic, estatus); 536 return 1; 537 } 538 return 0; 539 } 540 541 /* 542 * GHES error status reporting throttle, to report more kinds of 543 * errors, instead of just most frequently occurred errors. 544 */ 545 static int ghes_estatus_cached(struct acpi_generic_status *estatus) 546 { 547 u32 len; 548 int i, cached = 0; 549 unsigned long long now; 550 struct ghes_estatus_cache *cache; 551 struct acpi_generic_status *cache_estatus; 552 553 len = cper_estatus_len(estatus); 554 rcu_read_lock(); 555 for (i = 0; i < GHES_ESTATUS_CACHES_SIZE; i++) { 556 cache = rcu_dereference(ghes_estatus_caches[i]); 557 if (cache == NULL) 558 continue; 559 if (len != cache->estatus_len) 560 continue; 561 cache_estatus = GHES_ESTATUS_FROM_CACHE(cache); 562 if (memcmp(estatus, cache_estatus, len)) 563 continue; 564 atomic_inc(&cache->count); 565 now = sched_clock(); 566 if (now - cache->time_in < GHES_ESTATUS_IN_CACHE_MAX_NSEC) 567 cached = 1; 568 break; 569 } 570 rcu_read_unlock(); 571 return cached; 572 } 573 574 static struct ghes_estatus_cache *ghes_estatus_cache_alloc( 575 struct acpi_hest_generic *generic, 576 struct acpi_generic_status *estatus) 577 { 578 int alloced; 579 u32 len, cache_len; 580 struct ghes_estatus_cache *cache; 581 struct acpi_generic_status *cache_estatus; 582 583 alloced = atomic_add_return(1, &ghes_estatus_cache_alloced); 584 if (alloced > GHES_ESTATUS_CACHE_ALLOCED_MAX) { 585 atomic_dec(&ghes_estatus_cache_alloced); 586 return NULL; 587 } 588 len = cper_estatus_len(estatus); 589 cache_len = GHES_ESTATUS_CACHE_LEN(len); 590 cache = (void *)gen_pool_alloc(ghes_estatus_pool, cache_len); 591 if (!cache) { 592 atomic_dec(&ghes_estatus_cache_alloced); 593 return NULL; 594 } 595 cache_estatus = GHES_ESTATUS_FROM_CACHE(cache); 596 memcpy(cache_estatus, estatus, len); 597 cache->estatus_len = len; 598 atomic_set(&cache->count, 0); 599 cache->generic = generic; 600 cache->time_in = sched_clock(); 601 return cache; 602 } 603 604 static void ghes_estatus_cache_free(struct ghes_estatus_cache *cache) 605 { 606 u32 len; 607 608 len = cper_estatus_len(GHES_ESTATUS_FROM_CACHE(cache)); 609 len = GHES_ESTATUS_CACHE_LEN(len); 610 gen_pool_free(ghes_estatus_pool, (unsigned long)cache, len); 611 atomic_dec(&ghes_estatus_cache_alloced); 612 } 613 614 static void ghes_estatus_cache_rcu_free(struct rcu_head *head) 615 { 616 struct ghes_estatus_cache *cache; 617 618 cache = container_of(head, struct ghes_estatus_cache, rcu); 619 ghes_estatus_cache_free(cache); 620 } 621 622 static void ghes_estatus_cache_add( 623 struct acpi_hest_generic *generic, 624 struct acpi_generic_status *estatus) 625 { 626 int i, slot = -1, count; 627 unsigned long long now, duration, period, max_period = 0; 628 struct ghes_estatus_cache *cache, *slot_cache = NULL, *new_cache; 629 630 new_cache = ghes_estatus_cache_alloc(generic, estatus); 631 if (new_cache == NULL) 632 return; 633 rcu_read_lock(); 634 now = sched_clock(); 635 for (i = 0; i < GHES_ESTATUS_CACHES_SIZE; i++) { 636 cache = rcu_dereference(ghes_estatus_caches[i]); 637 if (cache == NULL) { 638 slot = i; 639 slot_cache = NULL; 640 break; 641 } 642 duration = now - cache->time_in; 643 if (duration >= GHES_ESTATUS_IN_CACHE_MAX_NSEC) { 644 slot = i; 645 slot_cache = cache; 646 break; 647 } 648 count = atomic_read(&cache->count); 649 period = duration; 650 do_div(period, (count + 1)); 651 if (period > max_period) { 652 max_period = period; 653 slot = i; 654 slot_cache = cache; 655 } 656 } 657 /* new_cache must be put into array after its contents are written */ 658 smp_wmb(); 659 if (slot != -1 && cmpxchg(ghes_estatus_caches + slot, 660 slot_cache, new_cache) == slot_cache) { 661 if (slot_cache) 662 call_rcu(&slot_cache->rcu, ghes_estatus_cache_rcu_free); 663 } else 664 ghes_estatus_cache_free(new_cache); 665 rcu_read_unlock(); 666 } 667 668 static int ghes_proc(struct ghes *ghes) 669 { 670 int rc; 671 672 rc = ghes_read_estatus(ghes, 0); 673 if (rc) 674 goto out; 675 if (!ghes_estatus_cached(ghes->estatus)) { 676 if (ghes_print_estatus(NULL, ghes->generic, ghes->estatus)) 677 ghes_estatus_cache_add(ghes->generic, ghes->estatus); 678 } 679 ghes_do_proc(ghes, ghes->estatus); 680 out: 681 ghes_clear_estatus(ghes); 682 return 0; 683 } 684 685 static void ghes_add_timer(struct ghes *ghes) 686 { 687 struct acpi_hest_generic *g = ghes->generic; 688 unsigned long expire; 689 690 if (!g->notify.poll_interval) { 691 pr_warning(FW_WARN GHES_PFX "Poll interval is 0 for generic hardware error source: %d, disabled.\n", 692 g->header.source_id); 693 return; 694 } 695 expire = jiffies + msecs_to_jiffies(g->notify.poll_interval); 696 ghes->timer.expires = round_jiffies_relative(expire); 697 add_timer(&ghes->timer); 698 } 699 700 static void ghes_poll_func(unsigned long data) 701 { 702 struct ghes *ghes = (void *)data; 703 704 ghes_proc(ghes); 705 if (!(ghes->flags & GHES_EXITING)) 706 ghes_add_timer(ghes); 707 } 708 709 static irqreturn_t ghes_irq_func(int irq, void *data) 710 { 711 struct ghes *ghes = data; 712 int rc; 713 714 rc = ghes_proc(ghes); 715 if (rc) 716 return IRQ_NONE; 717 718 return IRQ_HANDLED; 719 } 720 721 static int ghes_notify_sci(struct notifier_block *this, 722 unsigned long event, void *data) 723 { 724 struct ghes *ghes; 725 int ret = NOTIFY_DONE; 726 727 rcu_read_lock(); 728 list_for_each_entry_rcu(ghes, &ghes_sci, list) { 729 if (!ghes_proc(ghes)) 730 ret = NOTIFY_OK; 731 } 732 rcu_read_unlock(); 733 734 return ret; 735 } 736 737 static struct llist_node *llist_nodes_reverse(struct llist_node *llnode) 738 { 739 struct llist_node *next, *tail = NULL; 740 741 while (llnode) { 742 next = llnode->next; 743 llnode->next = tail; 744 tail = llnode; 745 llnode = next; 746 } 747 748 return tail; 749 } 750 751 static void ghes_proc_in_irq(struct irq_work *irq_work) 752 { 753 struct llist_node *llnode, *next; 754 struct ghes_estatus_node *estatus_node; 755 struct acpi_hest_generic *generic; 756 struct acpi_generic_status *estatus; 757 u32 len, node_len; 758 759 llnode = llist_del_all(&ghes_estatus_llist); 760 /* 761 * Because the time order of estatus in list is reversed, 762 * revert it back to proper order. 763 */ 764 llnode = llist_nodes_reverse(llnode); 765 while (llnode) { 766 next = llnode->next; 767 estatus_node = llist_entry(llnode, struct ghes_estatus_node, 768 llnode); 769 estatus = GHES_ESTATUS_FROM_NODE(estatus_node); 770 len = cper_estatus_len(estatus); 771 node_len = GHES_ESTATUS_NODE_LEN(len); 772 ghes_do_proc(estatus_node->ghes, estatus); 773 if (!ghes_estatus_cached(estatus)) { 774 generic = estatus_node->generic; 775 if (ghes_print_estatus(NULL, generic, estatus)) 776 ghes_estatus_cache_add(generic, estatus); 777 } 778 gen_pool_free(ghes_estatus_pool, (unsigned long)estatus_node, 779 node_len); 780 llnode = next; 781 } 782 } 783 784 static void ghes_print_queued_estatus(void) 785 { 786 struct llist_node *llnode; 787 struct ghes_estatus_node *estatus_node; 788 struct acpi_hest_generic *generic; 789 struct acpi_generic_status *estatus; 790 u32 len, node_len; 791 792 llnode = llist_del_all(&ghes_estatus_llist); 793 /* 794 * Because the time order of estatus in list is reversed, 795 * revert it back to proper order. 796 */ 797 llnode = llist_nodes_reverse(llnode); 798 while (llnode) { 799 estatus_node = llist_entry(llnode, struct ghes_estatus_node, 800 llnode); 801 estatus = GHES_ESTATUS_FROM_NODE(estatus_node); 802 len = cper_estatus_len(estatus); 803 node_len = GHES_ESTATUS_NODE_LEN(len); 804 generic = estatus_node->generic; 805 ghes_print_estatus(NULL, generic, estatus); 806 llnode = llnode->next; 807 } 808 } 809 810 static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs) 811 { 812 struct ghes *ghes, *ghes_global = NULL; 813 int sev, sev_global = -1; 814 int ret = NMI_DONE; 815 816 raw_spin_lock(&ghes_nmi_lock); 817 list_for_each_entry_rcu(ghes, &ghes_nmi, list) { 818 if (ghes_read_estatus(ghes, 1)) { 819 ghes_clear_estatus(ghes); 820 continue; 821 } 822 sev = ghes_severity(ghes->estatus->error_severity); 823 if (sev > sev_global) { 824 sev_global = sev; 825 ghes_global = ghes; 826 } 827 ret = NMI_HANDLED; 828 } 829 830 if (ret == NMI_DONE) 831 goto out; 832 833 if (sev_global >= GHES_SEV_PANIC) { 834 oops_begin(); 835 ghes_print_queued_estatus(); 836 __ghes_print_estatus(KERN_EMERG, ghes_global->generic, 837 ghes_global->estatus); 838 /* reboot to log the error! */ 839 if (panic_timeout == 0) 840 panic_timeout = ghes_panic_timeout; 841 panic("Fatal hardware error!"); 842 } 843 844 list_for_each_entry_rcu(ghes, &ghes_nmi, list) { 845 #ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG 846 u32 len, node_len; 847 struct ghes_estatus_node *estatus_node; 848 struct acpi_generic_status *estatus; 849 #endif 850 if (!(ghes->flags & GHES_TO_CLEAR)) 851 continue; 852 #ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG 853 if (ghes_estatus_cached(ghes->estatus)) 854 goto next; 855 /* Save estatus for further processing in IRQ context */ 856 len = cper_estatus_len(ghes->estatus); 857 node_len = GHES_ESTATUS_NODE_LEN(len); 858 estatus_node = (void *)gen_pool_alloc(ghes_estatus_pool, 859 node_len); 860 if (estatus_node) { 861 estatus_node->ghes = ghes; 862 estatus_node->generic = ghes->generic; 863 estatus = GHES_ESTATUS_FROM_NODE(estatus_node); 864 memcpy(estatus, ghes->estatus, len); 865 llist_add(&estatus_node->llnode, &ghes_estatus_llist); 866 } 867 next: 868 #endif 869 ghes_clear_estatus(ghes); 870 } 871 #ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG 872 irq_work_queue(&ghes_proc_irq_work); 873 #endif 874 875 out: 876 raw_spin_unlock(&ghes_nmi_lock); 877 return ret; 878 } 879 880 static struct notifier_block ghes_notifier_sci = { 881 .notifier_call = ghes_notify_sci, 882 }; 883 884 static unsigned long ghes_esource_prealloc_size( 885 const struct acpi_hest_generic *generic) 886 { 887 unsigned long block_length, prealloc_records, prealloc_size; 888 889 block_length = min_t(unsigned long, generic->error_block_length, 890 GHES_ESTATUS_MAX_SIZE); 891 prealloc_records = max_t(unsigned long, 892 generic->records_to_preallocate, 1); 893 prealloc_size = min_t(unsigned long, block_length * prealloc_records, 894 GHES_ESOURCE_PREALLOC_MAX_SIZE); 895 896 return prealloc_size; 897 } 898 899 static int ghes_probe(struct platform_device *ghes_dev) 900 { 901 struct acpi_hest_generic *generic; 902 struct ghes *ghes = NULL; 903 unsigned long len; 904 int rc = -EINVAL; 905 906 generic = *(struct acpi_hest_generic **)ghes_dev->dev.platform_data; 907 if (!generic->enabled) 908 return -ENODEV; 909 910 switch (generic->notify.type) { 911 case ACPI_HEST_NOTIFY_POLLED: 912 case ACPI_HEST_NOTIFY_EXTERNAL: 913 case ACPI_HEST_NOTIFY_SCI: 914 case ACPI_HEST_NOTIFY_NMI: 915 break; 916 case ACPI_HEST_NOTIFY_LOCAL: 917 pr_warning(GHES_PFX "Generic hardware error source: %d notified via local interrupt is not supported!\n", 918 generic->header.source_id); 919 goto err; 920 default: 921 pr_warning(FW_WARN GHES_PFX "Unknown notification type: %u for generic hardware error source: %d\n", 922 generic->notify.type, generic->header.source_id); 923 goto err; 924 } 925 926 rc = -EIO; 927 if (generic->error_block_length < 928 sizeof(struct acpi_generic_status)) { 929 pr_warning(FW_BUG GHES_PFX "Invalid error block length: %u for generic hardware error source: %d\n", 930 generic->error_block_length, 931 generic->header.source_id); 932 goto err; 933 } 934 ghes = ghes_new(generic); 935 if (IS_ERR(ghes)) { 936 rc = PTR_ERR(ghes); 937 ghes = NULL; 938 goto err; 939 } 940 941 rc = ghes_edac_register(ghes, &ghes_dev->dev); 942 if (rc < 0) 943 goto err; 944 945 switch (generic->notify.type) { 946 case ACPI_HEST_NOTIFY_POLLED: 947 ghes->timer.function = ghes_poll_func; 948 ghes->timer.data = (unsigned long)ghes; 949 init_timer_deferrable(&ghes->timer); 950 ghes_add_timer(ghes); 951 break; 952 case ACPI_HEST_NOTIFY_EXTERNAL: 953 /* External interrupt vector is GSI */ 954 rc = acpi_gsi_to_irq(generic->notify.vector, &ghes->irq); 955 if (rc) { 956 pr_err(GHES_PFX "Failed to map GSI to IRQ for generic hardware error source: %d\n", 957 generic->header.source_id); 958 goto err_edac_unreg; 959 } 960 rc = request_irq(ghes->irq, ghes_irq_func, 0, "GHES IRQ", ghes); 961 if (rc) { 962 pr_err(GHES_PFX "Failed to register IRQ for generic hardware error source: %d\n", 963 generic->header.source_id); 964 goto err_edac_unreg; 965 } 966 break; 967 case ACPI_HEST_NOTIFY_SCI: 968 mutex_lock(&ghes_list_mutex); 969 if (list_empty(&ghes_sci)) 970 register_acpi_hed_notifier(&ghes_notifier_sci); 971 list_add_rcu(&ghes->list, &ghes_sci); 972 mutex_unlock(&ghes_list_mutex); 973 break; 974 case ACPI_HEST_NOTIFY_NMI: 975 len = ghes_esource_prealloc_size(generic); 976 ghes_estatus_pool_expand(len); 977 mutex_lock(&ghes_list_mutex); 978 if (list_empty(&ghes_nmi)) 979 register_nmi_handler(NMI_LOCAL, ghes_notify_nmi, 0, 980 "ghes"); 981 list_add_rcu(&ghes->list, &ghes_nmi); 982 mutex_unlock(&ghes_list_mutex); 983 break; 984 default: 985 BUG(); 986 } 987 platform_set_drvdata(ghes_dev, ghes); 988 989 return 0; 990 err_edac_unreg: 991 ghes_edac_unregister(ghes); 992 err: 993 if (ghes) { 994 ghes_fini(ghes); 995 kfree(ghes); 996 } 997 return rc; 998 } 999 1000 static int ghes_remove(struct platform_device *ghes_dev) 1001 { 1002 struct ghes *ghes; 1003 struct acpi_hest_generic *generic; 1004 unsigned long len; 1005 1006 ghes = platform_get_drvdata(ghes_dev); 1007 generic = ghes->generic; 1008 1009 ghes->flags |= GHES_EXITING; 1010 switch (generic->notify.type) { 1011 case ACPI_HEST_NOTIFY_POLLED: 1012 del_timer_sync(&ghes->timer); 1013 break; 1014 case ACPI_HEST_NOTIFY_EXTERNAL: 1015 free_irq(ghes->irq, ghes); 1016 break; 1017 case ACPI_HEST_NOTIFY_SCI: 1018 mutex_lock(&ghes_list_mutex); 1019 list_del_rcu(&ghes->list); 1020 if (list_empty(&ghes_sci)) 1021 unregister_acpi_hed_notifier(&ghes_notifier_sci); 1022 mutex_unlock(&ghes_list_mutex); 1023 break; 1024 case ACPI_HEST_NOTIFY_NMI: 1025 mutex_lock(&ghes_list_mutex); 1026 list_del_rcu(&ghes->list); 1027 if (list_empty(&ghes_nmi)) 1028 unregister_nmi_handler(NMI_LOCAL, "ghes"); 1029 mutex_unlock(&ghes_list_mutex); 1030 /* 1031 * To synchronize with NMI handler, ghes can only be 1032 * freed after NMI handler finishes. 1033 */ 1034 synchronize_rcu(); 1035 len = ghes_esource_prealloc_size(generic); 1036 ghes_estatus_pool_shrink(len); 1037 break; 1038 default: 1039 BUG(); 1040 break; 1041 } 1042 1043 ghes_fini(ghes); 1044 1045 ghes_edac_unregister(ghes); 1046 1047 kfree(ghes); 1048 1049 platform_set_drvdata(ghes_dev, NULL); 1050 1051 return 0; 1052 } 1053 1054 static struct platform_driver ghes_platform_driver = { 1055 .driver = { 1056 .name = "GHES", 1057 .owner = THIS_MODULE, 1058 }, 1059 .probe = ghes_probe, 1060 .remove = ghes_remove, 1061 }; 1062 1063 static int __init ghes_init(void) 1064 { 1065 int rc; 1066 1067 if (acpi_disabled) 1068 return -ENODEV; 1069 1070 if (hest_disable) { 1071 pr_info(GHES_PFX "HEST is not enabled!\n"); 1072 return -EINVAL; 1073 } 1074 1075 if (ghes_disable) { 1076 pr_info(GHES_PFX "GHES is not enabled!\n"); 1077 return -EINVAL; 1078 } 1079 1080 init_irq_work(&ghes_proc_irq_work, ghes_proc_in_irq); 1081 1082 rc = ghes_ioremap_init(); 1083 if (rc) 1084 goto err; 1085 1086 rc = ghes_estatus_pool_init(); 1087 if (rc) 1088 goto err_ioremap_exit; 1089 1090 rc = ghes_estatus_pool_expand(GHES_ESTATUS_CACHE_AVG_SIZE * 1091 GHES_ESTATUS_CACHE_ALLOCED_MAX); 1092 if (rc) 1093 goto err_pool_exit; 1094 1095 rc = platform_driver_register(&ghes_platform_driver); 1096 if (rc) 1097 goto err_pool_exit; 1098 1099 rc = apei_osc_setup(); 1100 if (rc == 0 && osc_sb_apei_support_acked) 1101 pr_info(GHES_PFX "APEI firmware first mode is enabled by APEI bit and WHEA _OSC.\n"); 1102 else if (rc == 0 && !osc_sb_apei_support_acked) 1103 pr_info(GHES_PFX "APEI firmware first mode is enabled by WHEA _OSC.\n"); 1104 else if (rc && osc_sb_apei_support_acked) 1105 pr_info(GHES_PFX "APEI firmware first mode is enabled by APEI bit.\n"); 1106 else 1107 pr_info(GHES_PFX "Failed to enable APEI firmware first mode.\n"); 1108 1109 return 0; 1110 err_pool_exit: 1111 ghes_estatus_pool_exit(); 1112 err_ioremap_exit: 1113 ghes_ioremap_exit(); 1114 err: 1115 return rc; 1116 } 1117 1118 static void __exit ghes_exit(void) 1119 { 1120 platform_driver_unregister(&ghes_platform_driver); 1121 ghes_estatus_pool_exit(); 1122 ghes_ioremap_exit(); 1123 } 1124 1125 module_init(ghes_init); 1126 module_exit(ghes_exit); 1127 1128 MODULE_AUTHOR("Huang Ying"); 1129 MODULE_DESCRIPTION("APEI Generic Hardware Error Source support"); 1130 MODULE_LICENSE("GPL"); 1131 MODULE_ALIAS("platform:GHES"); 1132