1 /* 2 * APEI Generic Hardware Error Source support 3 * 4 * Generic Hardware Error Source provides a way to report platform 5 * hardware errors (such as that from chipset). It works in so called 6 * "Firmware First" mode, that is, hardware errors are reported to 7 * firmware firstly, then reported to Linux by firmware. This way, 8 * some non-standard hardware error registers or non-standard hardware 9 * link can be checked by firmware to produce more hardware error 10 * information for Linux. 11 * 12 * For more information about Generic Hardware Error Source, please 13 * refer to ACPI Specification version 4.0, section 17.3.2.6 14 * 15 * Copyright 2010,2011 Intel Corp. 16 * Author: Huang Ying <ying.huang@intel.com> 17 * 18 * This program is free software; you can redistribute it and/or 19 * modify it under the terms of the GNU General Public License version 20 * 2 as published by the Free Software Foundation; 21 * 22 * This program is distributed in the hope that it will be useful, 23 * but WITHOUT ANY WARRANTY; without even the implied warranty of 24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 25 * GNU General Public License for more details. 26 * 27 * You should have received a copy of the GNU General Public License 28 * along with this program; if not, write to the Free Software 29 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 30 */ 31 32 #include <linux/kernel.h> 33 #include <linux/module.h> 34 #include <linux/init.h> 35 #include <linux/acpi.h> 36 #include <linux/acpi_io.h> 37 #include <linux/io.h> 38 #include <linux/interrupt.h> 39 #include <linux/timer.h> 40 #include <linux/cper.h> 41 #include <linux/kdebug.h> 42 #include <linux/platform_device.h> 43 #include <linux/mutex.h> 44 #include <linux/ratelimit.h> 45 #include <linux/vmalloc.h> 46 #include <linux/irq_work.h> 47 #include <linux/llist.h> 48 #include <linux/genalloc.h> 49 #include <linux/pci.h> 50 #include <linux/aer.h> 51 52 #include <acpi/ghes.h> 53 #include <asm/mce.h> 54 #include <asm/tlbflush.h> 55 #include <asm/nmi.h> 56 57 #include "apei-internal.h" 58 59 #define GHES_PFX "GHES: " 60 61 #define GHES_ESTATUS_MAX_SIZE 65536 62 #define GHES_ESOURCE_PREALLOC_MAX_SIZE 65536 63 64 #define GHES_ESTATUS_POOL_MIN_ALLOC_ORDER 3 65 66 /* This is just an estimation for memory pool allocation */ 67 #define GHES_ESTATUS_CACHE_AVG_SIZE 512 68 69 #define GHES_ESTATUS_CACHES_SIZE 4 70 71 #define GHES_ESTATUS_IN_CACHE_MAX_NSEC 10000000000ULL 72 /* Prevent too many caches are allocated because of RCU */ 73 #define GHES_ESTATUS_CACHE_ALLOCED_MAX (GHES_ESTATUS_CACHES_SIZE * 3 / 2) 74 75 #define GHES_ESTATUS_CACHE_LEN(estatus_len) \ 76 (sizeof(struct ghes_estatus_cache) + (estatus_len)) 77 #define GHES_ESTATUS_FROM_CACHE(estatus_cache) \ 78 ((struct acpi_generic_status *) \ 79 ((struct ghes_estatus_cache *)(estatus_cache) + 1)) 80 81 #define GHES_ESTATUS_NODE_LEN(estatus_len) \ 82 (sizeof(struct ghes_estatus_node) + (estatus_len)) 83 #define GHES_ESTATUS_FROM_NODE(estatus_node) \ 84 ((struct acpi_generic_status *) \ 85 ((struct ghes_estatus_node *)(estatus_node) + 1)) 86 87 bool ghes_disable; 88 module_param_named(disable, ghes_disable, bool, 0); 89 90 static int ghes_panic_timeout __read_mostly = 30; 91 92 /* 93 * All error sources notified with SCI shares one notifier function, 94 * so they need to be linked and checked one by one. This is applied 95 * to NMI too. 96 * 97 * RCU is used for these lists, so ghes_list_mutex is only used for 98 * list changing, not for traversing. 99 */ 100 static LIST_HEAD(ghes_sci); 101 static LIST_HEAD(ghes_nmi); 102 static DEFINE_MUTEX(ghes_list_mutex); 103 104 /* 105 * NMI may be triggered on any CPU, so ghes_nmi_lock is used for 106 * mutual exclusion. 107 */ 108 static DEFINE_RAW_SPINLOCK(ghes_nmi_lock); 109 110 /* 111 * Because the memory area used to transfer hardware error information 112 * from BIOS to Linux can be determined only in NMI, IRQ or timer 113 * handler, but general ioremap can not be used in atomic context, so 114 * a special version of atomic ioremap is implemented for that. 115 */ 116 117 /* 118 * Two virtual pages are used, one for NMI context, the other for 119 * IRQ/PROCESS context 120 */ 121 #define GHES_IOREMAP_PAGES 2 122 #define GHES_IOREMAP_NMI_PAGE(base) (base) 123 #define GHES_IOREMAP_IRQ_PAGE(base) ((base) + PAGE_SIZE) 124 125 /* virtual memory area for atomic ioremap */ 126 static struct vm_struct *ghes_ioremap_area; 127 /* 128 * These 2 spinlock is used to prevent atomic ioremap virtual memory 129 * area from being mapped simultaneously. 130 */ 131 static DEFINE_RAW_SPINLOCK(ghes_ioremap_lock_nmi); 132 static DEFINE_SPINLOCK(ghes_ioremap_lock_irq); 133 134 /* 135 * printk is not safe in NMI context. So in NMI handler, we allocate 136 * required memory from lock-less memory allocator 137 * (ghes_estatus_pool), save estatus into it, put them into lock-less 138 * list (ghes_estatus_llist), then delay printk into IRQ context via 139 * irq_work (ghes_proc_irq_work). ghes_estatus_size_request record 140 * required pool size by all NMI error source. 141 */ 142 static struct gen_pool *ghes_estatus_pool; 143 static unsigned long ghes_estatus_pool_size_request; 144 static struct llist_head ghes_estatus_llist; 145 static struct irq_work ghes_proc_irq_work; 146 147 struct ghes_estatus_cache *ghes_estatus_caches[GHES_ESTATUS_CACHES_SIZE]; 148 static atomic_t ghes_estatus_cache_alloced; 149 150 static int ghes_ioremap_init(void) 151 { 152 ghes_ioremap_area = __get_vm_area(PAGE_SIZE * GHES_IOREMAP_PAGES, 153 VM_IOREMAP, VMALLOC_START, VMALLOC_END); 154 if (!ghes_ioremap_area) { 155 pr_err(GHES_PFX "Failed to allocate virtual memory area for atomic ioremap.\n"); 156 return -ENOMEM; 157 } 158 159 return 0; 160 } 161 162 static void ghes_ioremap_exit(void) 163 { 164 free_vm_area(ghes_ioremap_area); 165 } 166 167 static void __iomem *ghes_ioremap_pfn_nmi(u64 pfn) 168 { 169 unsigned long vaddr; 170 171 vaddr = (unsigned long)GHES_IOREMAP_NMI_PAGE(ghes_ioremap_area->addr); 172 ioremap_page_range(vaddr, vaddr + PAGE_SIZE, 173 pfn << PAGE_SHIFT, PAGE_KERNEL); 174 175 return (void __iomem *)vaddr; 176 } 177 178 static void __iomem *ghes_ioremap_pfn_irq(u64 pfn) 179 { 180 unsigned long vaddr; 181 182 vaddr = (unsigned long)GHES_IOREMAP_IRQ_PAGE(ghes_ioremap_area->addr); 183 ioremap_page_range(vaddr, vaddr + PAGE_SIZE, 184 pfn << PAGE_SHIFT, PAGE_KERNEL); 185 186 return (void __iomem *)vaddr; 187 } 188 189 static void ghes_iounmap_nmi(void __iomem *vaddr_ptr) 190 { 191 unsigned long vaddr = (unsigned long __force)vaddr_ptr; 192 void *base = ghes_ioremap_area->addr; 193 194 BUG_ON(vaddr != (unsigned long)GHES_IOREMAP_NMI_PAGE(base)); 195 unmap_kernel_range_noflush(vaddr, PAGE_SIZE); 196 __flush_tlb_one(vaddr); 197 } 198 199 static void ghes_iounmap_irq(void __iomem *vaddr_ptr) 200 { 201 unsigned long vaddr = (unsigned long __force)vaddr_ptr; 202 void *base = ghes_ioremap_area->addr; 203 204 BUG_ON(vaddr != (unsigned long)GHES_IOREMAP_IRQ_PAGE(base)); 205 unmap_kernel_range_noflush(vaddr, PAGE_SIZE); 206 __flush_tlb_one(vaddr); 207 } 208 209 static int ghes_estatus_pool_init(void) 210 { 211 ghes_estatus_pool = gen_pool_create(GHES_ESTATUS_POOL_MIN_ALLOC_ORDER, -1); 212 if (!ghes_estatus_pool) 213 return -ENOMEM; 214 return 0; 215 } 216 217 static void ghes_estatus_pool_free_chunk_page(struct gen_pool *pool, 218 struct gen_pool_chunk *chunk, 219 void *data) 220 { 221 free_page(chunk->start_addr); 222 } 223 224 static void ghes_estatus_pool_exit(void) 225 { 226 gen_pool_for_each_chunk(ghes_estatus_pool, 227 ghes_estatus_pool_free_chunk_page, NULL); 228 gen_pool_destroy(ghes_estatus_pool); 229 } 230 231 static int ghes_estatus_pool_expand(unsigned long len) 232 { 233 unsigned long i, pages, size, addr; 234 int ret; 235 236 ghes_estatus_pool_size_request += PAGE_ALIGN(len); 237 size = gen_pool_size(ghes_estatus_pool); 238 if (size >= ghes_estatus_pool_size_request) 239 return 0; 240 pages = (ghes_estatus_pool_size_request - size) / PAGE_SIZE; 241 for (i = 0; i < pages; i++) { 242 addr = __get_free_page(GFP_KERNEL); 243 if (!addr) 244 return -ENOMEM; 245 ret = gen_pool_add(ghes_estatus_pool, addr, PAGE_SIZE, -1); 246 if (ret) 247 return ret; 248 } 249 250 return 0; 251 } 252 253 static void ghes_estatus_pool_shrink(unsigned long len) 254 { 255 ghes_estatus_pool_size_request -= PAGE_ALIGN(len); 256 } 257 258 static struct ghes *ghes_new(struct acpi_hest_generic *generic) 259 { 260 struct ghes *ghes; 261 unsigned int error_block_length; 262 int rc; 263 264 ghes = kzalloc(sizeof(*ghes), GFP_KERNEL); 265 if (!ghes) 266 return ERR_PTR(-ENOMEM); 267 ghes->generic = generic; 268 rc = apei_map_generic_address(&generic->error_status_address); 269 if (rc) 270 goto err_free; 271 error_block_length = generic->error_block_length; 272 if (error_block_length > GHES_ESTATUS_MAX_SIZE) { 273 pr_warning(FW_WARN GHES_PFX 274 "Error status block length is too long: %u for " 275 "generic hardware error source: %d.\n", 276 error_block_length, generic->header.source_id); 277 error_block_length = GHES_ESTATUS_MAX_SIZE; 278 } 279 ghes->estatus = kmalloc(error_block_length, GFP_KERNEL); 280 if (!ghes->estatus) { 281 rc = -ENOMEM; 282 goto err_unmap; 283 } 284 285 return ghes; 286 287 err_unmap: 288 apei_unmap_generic_address(&generic->error_status_address); 289 err_free: 290 kfree(ghes); 291 return ERR_PTR(rc); 292 } 293 294 static void ghes_fini(struct ghes *ghes) 295 { 296 kfree(ghes->estatus); 297 apei_unmap_generic_address(&ghes->generic->error_status_address); 298 } 299 300 static inline int ghes_severity(int severity) 301 { 302 switch (severity) { 303 case CPER_SEV_INFORMATIONAL: 304 return GHES_SEV_NO; 305 case CPER_SEV_CORRECTED: 306 return GHES_SEV_CORRECTED; 307 case CPER_SEV_RECOVERABLE: 308 return GHES_SEV_RECOVERABLE; 309 case CPER_SEV_FATAL: 310 return GHES_SEV_PANIC; 311 default: 312 /* Unknown, go panic */ 313 return GHES_SEV_PANIC; 314 } 315 } 316 317 static void ghes_copy_tofrom_phys(void *buffer, u64 paddr, u32 len, 318 int from_phys) 319 { 320 void __iomem *vaddr; 321 unsigned long flags = 0; 322 int in_nmi = in_nmi(); 323 u64 offset; 324 u32 trunk; 325 326 while (len > 0) { 327 offset = paddr - (paddr & PAGE_MASK); 328 if (in_nmi) { 329 raw_spin_lock(&ghes_ioremap_lock_nmi); 330 vaddr = ghes_ioremap_pfn_nmi(paddr >> PAGE_SHIFT); 331 } else { 332 spin_lock_irqsave(&ghes_ioremap_lock_irq, flags); 333 vaddr = ghes_ioremap_pfn_irq(paddr >> PAGE_SHIFT); 334 } 335 trunk = PAGE_SIZE - offset; 336 trunk = min(trunk, len); 337 if (from_phys) 338 memcpy_fromio(buffer, vaddr + offset, trunk); 339 else 340 memcpy_toio(vaddr + offset, buffer, trunk); 341 len -= trunk; 342 paddr += trunk; 343 buffer += trunk; 344 if (in_nmi) { 345 ghes_iounmap_nmi(vaddr); 346 raw_spin_unlock(&ghes_ioremap_lock_nmi); 347 } else { 348 ghes_iounmap_irq(vaddr); 349 spin_unlock_irqrestore(&ghes_ioremap_lock_irq, flags); 350 } 351 } 352 } 353 354 static int ghes_read_estatus(struct ghes *ghes, int silent) 355 { 356 struct acpi_hest_generic *g = ghes->generic; 357 u64 buf_paddr; 358 u32 len; 359 int rc; 360 361 rc = apei_read(&buf_paddr, &g->error_status_address); 362 if (rc) { 363 if (!silent && printk_ratelimit()) 364 pr_warning(FW_WARN GHES_PFX 365 "Failed to read error status block address for hardware error source: %d.\n", 366 g->header.source_id); 367 return -EIO; 368 } 369 if (!buf_paddr) 370 return -ENOENT; 371 372 ghes_copy_tofrom_phys(ghes->estatus, buf_paddr, 373 sizeof(*ghes->estatus), 1); 374 if (!ghes->estatus->block_status) 375 return -ENOENT; 376 377 ghes->buffer_paddr = buf_paddr; 378 ghes->flags |= GHES_TO_CLEAR; 379 380 rc = -EIO; 381 len = cper_estatus_len(ghes->estatus); 382 if (len < sizeof(*ghes->estatus)) 383 goto err_read_block; 384 if (len > ghes->generic->error_block_length) 385 goto err_read_block; 386 if (cper_estatus_check_header(ghes->estatus)) 387 goto err_read_block; 388 ghes_copy_tofrom_phys(ghes->estatus + 1, 389 buf_paddr + sizeof(*ghes->estatus), 390 len - sizeof(*ghes->estatus), 1); 391 if (cper_estatus_check(ghes->estatus)) 392 goto err_read_block; 393 rc = 0; 394 395 err_read_block: 396 if (rc && !silent && printk_ratelimit()) 397 pr_warning(FW_WARN GHES_PFX 398 "Failed to read error status block!\n"); 399 return rc; 400 } 401 402 static void ghes_clear_estatus(struct ghes *ghes) 403 { 404 ghes->estatus->block_status = 0; 405 if (!(ghes->flags & GHES_TO_CLEAR)) 406 return; 407 ghes_copy_tofrom_phys(ghes->estatus, ghes->buffer_paddr, 408 sizeof(ghes->estatus->block_status), 0); 409 ghes->flags &= ~GHES_TO_CLEAR; 410 } 411 412 static void ghes_handle_memory_failure(struct acpi_generic_data *gdata, int sev) 413 { 414 #ifdef CONFIG_ACPI_APEI_MEMORY_FAILURE 415 unsigned long pfn; 416 int sec_sev = ghes_severity(gdata->error_severity); 417 struct cper_sec_mem_err *mem_err; 418 mem_err = (struct cper_sec_mem_err *)(gdata + 1); 419 420 if (sec_sev == GHES_SEV_CORRECTED && 421 (gdata->flags & CPER_SEC_ERROR_THRESHOLD_EXCEEDED) && 422 (mem_err->validation_bits & CPER_MEM_VALID_PA)) { 423 pfn = mem_err->physical_addr >> PAGE_SHIFT; 424 if (pfn_valid(pfn)) 425 memory_failure_queue(pfn, 0, MF_SOFT_OFFLINE); 426 else if (printk_ratelimit()) 427 pr_warn(FW_WARN GHES_PFX 428 "Invalid address in generic error data: %#llx\n", 429 mem_err->physical_addr); 430 } 431 if (sev == GHES_SEV_RECOVERABLE && 432 sec_sev == GHES_SEV_RECOVERABLE && 433 mem_err->validation_bits & CPER_MEM_VALID_PA) { 434 pfn = mem_err->physical_addr >> PAGE_SHIFT; 435 memory_failure_queue(pfn, 0, 0); 436 } 437 #endif 438 } 439 440 static void ghes_do_proc(struct ghes *ghes, 441 const struct acpi_generic_status *estatus) 442 { 443 int sev, sec_sev; 444 struct acpi_generic_data *gdata; 445 446 sev = ghes_severity(estatus->error_severity); 447 apei_estatus_for_each_section(estatus, gdata) { 448 sec_sev = ghes_severity(gdata->error_severity); 449 if (!uuid_le_cmp(*(uuid_le *)gdata->section_type, 450 CPER_SEC_PLATFORM_MEM)) { 451 struct cper_sec_mem_err *mem_err; 452 mem_err = (struct cper_sec_mem_err *)(gdata+1); 453 ghes_edac_report_mem_error(ghes, sev, mem_err); 454 455 #ifdef CONFIG_X86_MCE 456 apei_mce_report_mem_error(sev == GHES_SEV_CORRECTED, 457 mem_err); 458 #endif 459 ghes_handle_memory_failure(gdata, sev); 460 } 461 #ifdef CONFIG_ACPI_APEI_PCIEAER 462 else if (!uuid_le_cmp(*(uuid_le *)gdata->section_type, 463 CPER_SEC_PCIE)) { 464 struct cper_sec_pcie *pcie_err; 465 pcie_err = (struct cper_sec_pcie *)(gdata+1); 466 if (sev == GHES_SEV_RECOVERABLE && 467 sec_sev == GHES_SEV_RECOVERABLE && 468 pcie_err->validation_bits & CPER_PCIE_VALID_DEVICE_ID && 469 pcie_err->validation_bits & CPER_PCIE_VALID_AER_INFO) { 470 unsigned int devfn; 471 int aer_severity; 472 473 devfn = PCI_DEVFN(pcie_err->device_id.device, 474 pcie_err->device_id.function); 475 aer_severity = cper_severity_to_aer(sev); 476 477 /* 478 * If firmware reset the component to contain 479 * the error, we must reinitialize it before 480 * use, so treat it as a fatal AER error. 481 */ 482 if (gdata->flags & CPER_SEC_RESET) 483 aer_severity = AER_FATAL; 484 485 aer_recover_queue(pcie_err->device_id.segment, 486 pcie_err->device_id.bus, 487 devfn, aer_severity, 488 (struct aer_capability_regs *) 489 pcie_err->aer_info); 490 } 491 492 } 493 #endif 494 } 495 } 496 497 static void __ghes_print_estatus(const char *pfx, 498 const struct acpi_hest_generic *generic, 499 const struct acpi_generic_status *estatus) 500 { 501 static atomic_t seqno; 502 unsigned int curr_seqno; 503 char pfx_seq[64]; 504 505 if (pfx == NULL) { 506 if (ghes_severity(estatus->error_severity) <= 507 GHES_SEV_CORRECTED) 508 pfx = KERN_WARNING; 509 else 510 pfx = KERN_ERR; 511 } 512 curr_seqno = atomic_inc_return(&seqno); 513 snprintf(pfx_seq, sizeof(pfx_seq), "%s{%u}" HW_ERR, pfx, curr_seqno); 514 printk("%s""Hardware error from APEI Generic Hardware Error Source: %d\n", 515 pfx_seq, generic->header.source_id); 516 cper_estatus_print(pfx_seq, estatus); 517 } 518 519 static int ghes_print_estatus(const char *pfx, 520 const struct acpi_hest_generic *generic, 521 const struct acpi_generic_status *estatus) 522 { 523 /* Not more than 2 messages every 5 seconds */ 524 static DEFINE_RATELIMIT_STATE(ratelimit_corrected, 5*HZ, 2); 525 static DEFINE_RATELIMIT_STATE(ratelimit_uncorrected, 5*HZ, 2); 526 struct ratelimit_state *ratelimit; 527 528 if (ghes_severity(estatus->error_severity) <= GHES_SEV_CORRECTED) 529 ratelimit = &ratelimit_corrected; 530 else 531 ratelimit = &ratelimit_uncorrected; 532 if (__ratelimit(ratelimit)) { 533 __ghes_print_estatus(pfx, generic, estatus); 534 return 1; 535 } 536 return 0; 537 } 538 539 /* 540 * GHES error status reporting throttle, to report more kinds of 541 * errors, instead of just most frequently occurred errors. 542 */ 543 static int ghes_estatus_cached(struct acpi_generic_status *estatus) 544 { 545 u32 len; 546 int i, cached = 0; 547 unsigned long long now; 548 struct ghes_estatus_cache *cache; 549 struct acpi_generic_status *cache_estatus; 550 551 len = cper_estatus_len(estatus); 552 rcu_read_lock(); 553 for (i = 0; i < GHES_ESTATUS_CACHES_SIZE; i++) { 554 cache = rcu_dereference(ghes_estatus_caches[i]); 555 if (cache == NULL) 556 continue; 557 if (len != cache->estatus_len) 558 continue; 559 cache_estatus = GHES_ESTATUS_FROM_CACHE(cache); 560 if (memcmp(estatus, cache_estatus, len)) 561 continue; 562 atomic_inc(&cache->count); 563 now = sched_clock(); 564 if (now - cache->time_in < GHES_ESTATUS_IN_CACHE_MAX_NSEC) 565 cached = 1; 566 break; 567 } 568 rcu_read_unlock(); 569 return cached; 570 } 571 572 static struct ghes_estatus_cache *ghes_estatus_cache_alloc( 573 struct acpi_hest_generic *generic, 574 struct acpi_generic_status *estatus) 575 { 576 int alloced; 577 u32 len, cache_len; 578 struct ghes_estatus_cache *cache; 579 struct acpi_generic_status *cache_estatus; 580 581 alloced = atomic_add_return(1, &ghes_estatus_cache_alloced); 582 if (alloced > GHES_ESTATUS_CACHE_ALLOCED_MAX) { 583 atomic_dec(&ghes_estatus_cache_alloced); 584 return NULL; 585 } 586 len = cper_estatus_len(estatus); 587 cache_len = GHES_ESTATUS_CACHE_LEN(len); 588 cache = (void *)gen_pool_alloc(ghes_estatus_pool, cache_len); 589 if (!cache) { 590 atomic_dec(&ghes_estatus_cache_alloced); 591 return NULL; 592 } 593 cache_estatus = GHES_ESTATUS_FROM_CACHE(cache); 594 memcpy(cache_estatus, estatus, len); 595 cache->estatus_len = len; 596 atomic_set(&cache->count, 0); 597 cache->generic = generic; 598 cache->time_in = sched_clock(); 599 return cache; 600 } 601 602 static void ghes_estatus_cache_free(struct ghes_estatus_cache *cache) 603 { 604 u32 len; 605 606 len = cper_estatus_len(GHES_ESTATUS_FROM_CACHE(cache)); 607 len = GHES_ESTATUS_CACHE_LEN(len); 608 gen_pool_free(ghes_estatus_pool, (unsigned long)cache, len); 609 atomic_dec(&ghes_estatus_cache_alloced); 610 } 611 612 static void ghes_estatus_cache_rcu_free(struct rcu_head *head) 613 { 614 struct ghes_estatus_cache *cache; 615 616 cache = container_of(head, struct ghes_estatus_cache, rcu); 617 ghes_estatus_cache_free(cache); 618 } 619 620 static void ghes_estatus_cache_add( 621 struct acpi_hest_generic *generic, 622 struct acpi_generic_status *estatus) 623 { 624 int i, slot = -1, count; 625 unsigned long long now, duration, period, max_period = 0; 626 struct ghes_estatus_cache *cache, *slot_cache = NULL, *new_cache; 627 628 new_cache = ghes_estatus_cache_alloc(generic, estatus); 629 if (new_cache == NULL) 630 return; 631 rcu_read_lock(); 632 now = sched_clock(); 633 for (i = 0; i < GHES_ESTATUS_CACHES_SIZE; i++) { 634 cache = rcu_dereference(ghes_estatus_caches[i]); 635 if (cache == NULL) { 636 slot = i; 637 slot_cache = NULL; 638 break; 639 } 640 duration = now - cache->time_in; 641 if (duration >= GHES_ESTATUS_IN_CACHE_MAX_NSEC) { 642 slot = i; 643 slot_cache = cache; 644 break; 645 } 646 count = atomic_read(&cache->count); 647 period = duration; 648 do_div(period, (count + 1)); 649 if (period > max_period) { 650 max_period = period; 651 slot = i; 652 slot_cache = cache; 653 } 654 } 655 /* new_cache must be put into array after its contents are written */ 656 smp_wmb(); 657 if (slot != -1 && cmpxchg(ghes_estatus_caches + slot, 658 slot_cache, new_cache) == slot_cache) { 659 if (slot_cache) 660 call_rcu(&slot_cache->rcu, ghes_estatus_cache_rcu_free); 661 } else 662 ghes_estatus_cache_free(new_cache); 663 rcu_read_unlock(); 664 } 665 666 static int ghes_proc(struct ghes *ghes) 667 { 668 int rc; 669 670 rc = ghes_read_estatus(ghes, 0); 671 if (rc) 672 goto out; 673 if (!ghes_estatus_cached(ghes->estatus)) { 674 if (ghes_print_estatus(NULL, ghes->generic, ghes->estatus)) 675 ghes_estatus_cache_add(ghes->generic, ghes->estatus); 676 } 677 ghes_do_proc(ghes, ghes->estatus); 678 out: 679 ghes_clear_estatus(ghes); 680 return 0; 681 } 682 683 static void ghes_add_timer(struct ghes *ghes) 684 { 685 struct acpi_hest_generic *g = ghes->generic; 686 unsigned long expire; 687 688 if (!g->notify.poll_interval) { 689 pr_warning(FW_WARN GHES_PFX "Poll interval is 0 for generic hardware error source: %d, disabled.\n", 690 g->header.source_id); 691 return; 692 } 693 expire = jiffies + msecs_to_jiffies(g->notify.poll_interval); 694 ghes->timer.expires = round_jiffies_relative(expire); 695 add_timer(&ghes->timer); 696 } 697 698 static void ghes_poll_func(unsigned long data) 699 { 700 struct ghes *ghes = (void *)data; 701 702 ghes_proc(ghes); 703 if (!(ghes->flags & GHES_EXITING)) 704 ghes_add_timer(ghes); 705 } 706 707 static irqreturn_t ghes_irq_func(int irq, void *data) 708 { 709 struct ghes *ghes = data; 710 int rc; 711 712 rc = ghes_proc(ghes); 713 if (rc) 714 return IRQ_NONE; 715 716 return IRQ_HANDLED; 717 } 718 719 static int ghes_notify_sci(struct notifier_block *this, 720 unsigned long event, void *data) 721 { 722 struct ghes *ghes; 723 int ret = NOTIFY_DONE; 724 725 rcu_read_lock(); 726 list_for_each_entry_rcu(ghes, &ghes_sci, list) { 727 if (!ghes_proc(ghes)) 728 ret = NOTIFY_OK; 729 } 730 rcu_read_unlock(); 731 732 return ret; 733 } 734 735 static struct llist_node *llist_nodes_reverse(struct llist_node *llnode) 736 { 737 struct llist_node *next, *tail = NULL; 738 739 while (llnode) { 740 next = llnode->next; 741 llnode->next = tail; 742 tail = llnode; 743 llnode = next; 744 } 745 746 return tail; 747 } 748 749 static void ghes_proc_in_irq(struct irq_work *irq_work) 750 { 751 struct llist_node *llnode, *next; 752 struct ghes_estatus_node *estatus_node; 753 struct acpi_hest_generic *generic; 754 struct acpi_generic_status *estatus; 755 u32 len, node_len; 756 757 llnode = llist_del_all(&ghes_estatus_llist); 758 /* 759 * Because the time order of estatus in list is reversed, 760 * revert it back to proper order. 761 */ 762 llnode = llist_nodes_reverse(llnode); 763 while (llnode) { 764 next = llnode->next; 765 estatus_node = llist_entry(llnode, struct ghes_estatus_node, 766 llnode); 767 estatus = GHES_ESTATUS_FROM_NODE(estatus_node); 768 len = cper_estatus_len(estatus); 769 node_len = GHES_ESTATUS_NODE_LEN(len); 770 ghes_do_proc(estatus_node->ghes, estatus); 771 if (!ghes_estatus_cached(estatus)) { 772 generic = estatus_node->generic; 773 if (ghes_print_estatus(NULL, generic, estatus)) 774 ghes_estatus_cache_add(generic, estatus); 775 } 776 gen_pool_free(ghes_estatus_pool, (unsigned long)estatus_node, 777 node_len); 778 llnode = next; 779 } 780 } 781 782 static void ghes_print_queued_estatus(void) 783 { 784 struct llist_node *llnode; 785 struct ghes_estatus_node *estatus_node; 786 struct acpi_hest_generic *generic; 787 struct acpi_generic_status *estatus; 788 u32 len, node_len; 789 790 llnode = llist_del_all(&ghes_estatus_llist); 791 /* 792 * Because the time order of estatus in list is reversed, 793 * revert it back to proper order. 794 */ 795 llnode = llist_nodes_reverse(llnode); 796 while (llnode) { 797 estatus_node = llist_entry(llnode, struct ghes_estatus_node, 798 llnode); 799 estatus = GHES_ESTATUS_FROM_NODE(estatus_node); 800 len = cper_estatus_len(estatus); 801 node_len = GHES_ESTATUS_NODE_LEN(len); 802 generic = estatus_node->generic; 803 ghes_print_estatus(NULL, generic, estatus); 804 llnode = llnode->next; 805 } 806 } 807 808 static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs) 809 { 810 struct ghes *ghes, *ghes_global = NULL; 811 int sev, sev_global = -1; 812 int ret = NMI_DONE; 813 814 raw_spin_lock(&ghes_nmi_lock); 815 list_for_each_entry_rcu(ghes, &ghes_nmi, list) { 816 if (ghes_read_estatus(ghes, 1)) { 817 ghes_clear_estatus(ghes); 818 continue; 819 } 820 sev = ghes_severity(ghes->estatus->error_severity); 821 if (sev > sev_global) { 822 sev_global = sev; 823 ghes_global = ghes; 824 } 825 ret = NMI_HANDLED; 826 } 827 828 if (ret == NMI_DONE) 829 goto out; 830 831 if (sev_global >= GHES_SEV_PANIC) { 832 oops_begin(); 833 ghes_print_queued_estatus(); 834 __ghes_print_estatus(KERN_EMERG, ghes_global->generic, 835 ghes_global->estatus); 836 /* reboot to log the error! */ 837 if (panic_timeout == 0) 838 panic_timeout = ghes_panic_timeout; 839 panic("Fatal hardware error!"); 840 } 841 842 list_for_each_entry_rcu(ghes, &ghes_nmi, list) { 843 #ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG 844 u32 len, node_len; 845 struct ghes_estatus_node *estatus_node; 846 struct acpi_generic_status *estatus; 847 #endif 848 if (!(ghes->flags & GHES_TO_CLEAR)) 849 continue; 850 #ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG 851 if (ghes_estatus_cached(ghes->estatus)) 852 goto next; 853 /* Save estatus for further processing in IRQ context */ 854 len = cper_estatus_len(ghes->estatus); 855 node_len = GHES_ESTATUS_NODE_LEN(len); 856 estatus_node = (void *)gen_pool_alloc(ghes_estatus_pool, 857 node_len); 858 if (estatus_node) { 859 estatus_node->ghes = ghes; 860 estatus_node->generic = ghes->generic; 861 estatus = GHES_ESTATUS_FROM_NODE(estatus_node); 862 memcpy(estatus, ghes->estatus, len); 863 llist_add(&estatus_node->llnode, &ghes_estatus_llist); 864 } 865 next: 866 #endif 867 ghes_clear_estatus(ghes); 868 } 869 #ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG 870 irq_work_queue(&ghes_proc_irq_work); 871 #endif 872 873 out: 874 raw_spin_unlock(&ghes_nmi_lock); 875 return ret; 876 } 877 878 static struct notifier_block ghes_notifier_sci = { 879 .notifier_call = ghes_notify_sci, 880 }; 881 882 static unsigned long ghes_esource_prealloc_size( 883 const struct acpi_hest_generic *generic) 884 { 885 unsigned long block_length, prealloc_records, prealloc_size; 886 887 block_length = min_t(unsigned long, generic->error_block_length, 888 GHES_ESTATUS_MAX_SIZE); 889 prealloc_records = max_t(unsigned long, 890 generic->records_to_preallocate, 1); 891 prealloc_size = min_t(unsigned long, block_length * prealloc_records, 892 GHES_ESOURCE_PREALLOC_MAX_SIZE); 893 894 return prealloc_size; 895 } 896 897 static int ghes_probe(struct platform_device *ghes_dev) 898 { 899 struct acpi_hest_generic *generic; 900 struct ghes *ghes = NULL; 901 unsigned long len; 902 int rc = -EINVAL; 903 904 generic = *(struct acpi_hest_generic **)ghes_dev->dev.platform_data; 905 if (!generic->enabled) 906 return -ENODEV; 907 908 switch (generic->notify.type) { 909 case ACPI_HEST_NOTIFY_POLLED: 910 case ACPI_HEST_NOTIFY_EXTERNAL: 911 case ACPI_HEST_NOTIFY_SCI: 912 case ACPI_HEST_NOTIFY_NMI: 913 break; 914 case ACPI_HEST_NOTIFY_LOCAL: 915 pr_warning(GHES_PFX "Generic hardware error source: %d notified via local interrupt is not supported!\n", 916 generic->header.source_id); 917 goto err; 918 default: 919 pr_warning(FW_WARN GHES_PFX "Unknown notification type: %u for generic hardware error source: %d\n", 920 generic->notify.type, generic->header.source_id); 921 goto err; 922 } 923 924 rc = -EIO; 925 if (generic->error_block_length < 926 sizeof(struct acpi_generic_status)) { 927 pr_warning(FW_BUG GHES_PFX "Invalid error block length: %u for generic hardware error source: %d\n", 928 generic->error_block_length, 929 generic->header.source_id); 930 goto err; 931 } 932 ghes = ghes_new(generic); 933 if (IS_ERR(ghes)) { 934 rc = PTR_ERR(ghes); 935 ghes = NULL; 936 goto err; 937 } 938 939 rc = ghes_edac_register(ghes, &ghes_dev->dev); 940 if (rc < 0) 941 goto err; 942 943 switch (generic->notify.type) { 944 case ACPI_HEST_NOTIFY_POLLED: 945 ghes->timer.function = ghes_poll_func; 946 ghes->timer.data = (unsigned long)ghes; 947 init_timer_deferrable(&ghes->timer); 948 ghes_add_timer(ghes); 949 break; 950 case ACPI_HEST_NOTIFY_EXTERNAL: 951 /* External interrupt vector is GSI */ 952 rc = acpi_gsi_to_irq(generic->notify.vector, &ghes->irq); 953 if (rc) { 954 pr_err(GHES_PFX "Failed to map GSI to IRQ for generic hardware error source: %d\n", 955 generic->header.source_id); 956 goto err_edac_unreg; 957 } 958 rc = request_irq(ghes->irq, ghes_irq_func, 0, "GHES IRQ", ghes); 959 if (rc) { 960 pr_err(GHES_PFX "Failed to register IRQ for generic hardware error source: %d\n", 961 generic->header.source_id); 962 goto err_edac_unreg; 963 } 964 break; 965 case ACPI_HEST_NOTIFY_SCI: 966 mutex_lock(&ghes_list_mutex); 967 if (list_empty(&ghes_sci)) 968 register_acpi_hed_notifier(&ghes_notifier_sci); 969 list_add_rcu(&ghes->list, &ghes_sci); 970 mutex_unlock(&ghes_list_mutex); 971 break; 972 case ACPI_HEST_NOTIFY_NMI: 973 len = ghes_esource_prealloc_size(generic); 974 ghes_estatus_pool_expand(len); 975 mutex_lock(&ghes_list_mutex); 976 if (list_empty(&ghes_nmi)) 977 register_nmi_handler(NMI_LOCAL, ghes_notify_nmi, 0, 978 "ghes"); 979 list_add_rcu(&ghes->list, &ghes_nmi); 980 mutex_unlock(&ghes_list_mutex); 981 break; 982 default: 983 BUG(); 984 } 985 platform_set_drvdata(ghes_dev, ghes); 986 987 return 0; 988 err_edac_unreg: 989 ghes_edac_unregister(ghes); 990 err: 991 if (ghes) { 992 ghes_fini(ghes); 993 kfree(ghes); 994 } 995 return rc; 996 } 997 998 static int ghes_remove(struct platform_device *ghes_dev) 999 { 1000 struct ghes *ghes; 1001 struct acpi_hest_generic *generic; 1002 unsigned long len; 1003 1004 ghes = platform_get_drvdata(ghes_dev); 1005 generic = ghes->generic; 1006 1007 ghes->flags |= GHES_EXITING; 1008 switch (generic->notify.type) { 1009 case ACPI_HEST_NOTIFY_POLLED: 1010 del_timer_sync(&ghes->timer); 1011 break; 1012 case ACPI_HEST_NOTIFY_EXTERNAL: 1013 free_irq(ghes->irq, ghes); 1014 break; 1015 case ACPI_HEST_NOTIFY_SCI: 1016 mutex_lock(&ghes_list_mutex); 1017 list_del_rcu(&ghes->list); 1018 if (list_empty(&ghes_sci)) 1019 unregister_acpi_hed_notifier(&ghes_notifier_sci); 1020 mutex_unlock(&ghes_list_mutex); 1021 break; 1022 case ACPI_HEST_NOTIFY_NMI: 1023 mutex_lock(&ghes_list_mutex); 1024 list_del_rcu(&ghes->list); 1025 if (list_empty(&ghes_nmi)) 1026 unregister_nmi_handler(NMI_LOCAL, "ghes"); 1027 mutex_unlock(&ghes_list_mutex); 1028 /* 1029 * To synchronize with NMI handler, ghes can only be 1030 * freed after NMI handler finishes. 1031 */ 1032 synchronize_rcu(); 1033 len = ghes_esource_prealloc_size(generic); 1034 ghes_estatus_pool_shrink(len); 1035 break; 1036 default: 1037 BUG(); 1038 break; 1039 } 1040 1041 ghes_fini(ghes); 1042 1043 ghes_edac_unregister(ghes); 1044 1045 kfree(ghes); 1046 1047 platform_set_drvdata(ghes_dev, NULL); 1048 1049 return 0; 1050 } 1051 1052 static struct platform_driver ghes_platform_driver = { 1053 .driver = { 1054 .name = "GHES", 1055 .owner = THIS_MODULE, 1056 }, 1057 .probe = ghes_probe, 1058 .remove = ghes_remove, 1059 }; 1060 1061 static int __init ghes_init(void) 1062 { 1063 int rc; 1064 1065 if (acpi_disabled) 1066 return -ENODEV; 1067 1068 if (hest_disable) { 1069 pr_info(GHES_PFX "HEST is not enabled!\n"); 1070 return -EINVAL; 1071 } 1072 1073 if (ghes_disable) { 1074 pr_info(GHES_PFX "GHES is not enabled!\n"); 1075 return -EINVAL; 1076 } 1077 1078 init_irq_work(&ghes_proc_irq_work, ghes_proc_in_irq); 1079 1080 rc = ghes_ioremap_init(); 1081 if (rc) 1082 goto err; 1083 1084 rc = ghes_estatus_pool_init(); 1085 if (rc) 1086 goto err_ioremap_exit; 1087 1088 rc = ghes_estatus_pool_expand(GHES_ESTATUS_CACHE_AVG_SIZE * 1089 GHES_ESTATUS_CACHE_ALLOCED_MAX); 1090 if (rc) 1091 goto err_pool_exit; 1092 1093 rc = platform_driver_register(&ghes_platform_driver); 1094 if (rc) 1095 goto err_pool_exit; 1096 1097 rc = apei_osc_setup(); 1098 if (rc == 0 && osc_sb_apei_support_acked) 1099 pr_info(GHES_PFX "APEI firmware first mode is enabled by APEI bit and WHEA _OSC.\n"); 1100 else if (rc == 0 && !osc_sb_apei_support_acked) 1101 pr_info(GHES_PFX "APEI firmware first mode is enabled by WHEA _OSC.\n"); 1102 else if (rc && osc_sb_apei_support_acked) 1103 pr_info(GHES_PFX "APEI firmware first mode is enabled by APEI bit.\n"); 1104 else 1105 pr_info(GHES_PFX "Failed to enable APEI firmware first mode.\n"); 1106 1107 return 0; 1108 err_pool_exit: 1109 ghes_estatus_pool_exit(); 1110 err_ioremap_exit: 1111 ghes_ioremap_exit(); 1112 err: 1113 return rc; 1114 } 1115 1116 static void __exit ghes_exit(void) 1117 { 1118 platform_driver_unregister(&ghes_platform_driver); 1119 ghes_estatus_pool_exit(); 1120 ghes_ioremap_exit(); 1121 } 1122 1123 module_init(ghes_init); 1124 module_exit(ghes_exit); 1125 1126 MODULE_AUTHOR("Huang Ying"); 1127 MODULE_DESCRIPTION("APEI Generic Hardware Error Source support"); 1128 MODULE_LICENSE("GPL"); 1129 MODULE_ALIAS("platform:GHES"); 1130