1 /* 2 * APEI Generic Hardware Error Source support 3 * 4 * Generic Hardware Error Source provides a way to report platform 5 * hardware errors (such as that from chipset). It works in so called 6 * "Firmware First" mode, that is, hardware errors are reported to 7 * firmware firstly, then reported to Linux by firmware. This way, 8 * some non-standard hardware error registers or non-standard hardware 9 * link can be checked by firmware to produce more hardware error 10 * information for Linux. 11 * 12 * For more information about Generic Hardware Error Source, please 13 * refer to ACPI Specification version 4.0, section 17.3.2.6 14 * 15 * Copyright 2010,2011 Intel Corp. 16 * Author: Huang Ying <ying.huang@intel.com> 17 * 18 * This program is free software; you can redistribute it and/or 19 * modify it under the terms of the GNU General Public License version 20 * 2 as published by the Free Software Foundation; 21 * 22 * This program is distributed in the hope that it will be useful, 23 * but WITHOUT ANY WARRANTY; without even the implied warranty of 24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 25 * GNU General Public License for more details. 26 * 27 * You should have received a copy of the GNU General Public License 28 * along with this program; if not, write to the Free Software 29 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 30 */ 31 32 #include <linux/kernel.h> 33 #include <linux/module.h> 34 #include <linux/init.h> 35 #include <linux/acpi.h> 36 #include <linux/acpi_io.h> 37 #include <linux/io.h> 38 #include <linux/interrupt.h> 39 #include <linux/timer.h> 40 #include <linux/cper.h> 41 #include <linux/kdebug.h> 42 #include <linux/platform_device.h> 43 #include <linux/mutex.h> 44 #include <linux/ratelimit.h> 45 #include <linux/vmalloc.h> 46 #include <linux/irq_work.h> 47 #include <linux/llist.h> 48 #include <linux/genalloc.h> 49 #include <linux/pci.h> 50 #include <linux/aer.h> 51 52 #include <acpi/ghes.h> 53 #include <asm/mce.h> 54 #include <asm/tlbflush.h> 55 #include <asm/nmi.h> 56 57 #include "apei-internal.h" 58 59 #define GHES_PFX "GHES: " 60 61 #define GHES_ESTATUS_MAX_SIZE 65536 62 #define GHES_ESOURCE_PREALLOC_MAX_SIZE 65536 63 64 #define GHES_ESTATUS_POOL_MIN_ALLOC_ORDER 3 65 66 /* This is just an estimation for memory pool allocation */ 67 #define GHES_ESTATUS_CACHE_AVG_SIZE 512 68 69 #define GHES_ESTATUS_CACHES_SIZE 4 70 71 #define GHES_ESTATUS_IN_CACHE_MAX_NSEC 10000000000ULL 72 /* Prevent too many caches are allocated because of RCU */ 73 #define GHES_ESTATUS_CACHE_ALLOCED_MAX (GHES_ESTATUS_CACHES_SIZE * 3 / 2) 74 75 #define GHES_ESTATUS_CACHE_LEN(estatus_len) \ 76 (sizeof(struct ghes_estatus_cache) + (estatus_len)) 77 #define GHES_ESTATUS_FROM_CACHE(estatus_cache) \ 78 ((struct acpi_hest_generic_status *) \ 79 ((struct ghes_estatus_cache *)(estatus_cache) + 1)) 80 81 #define GHES_ESTATUS_NODE_LEN(estatus_len) \ 82 (sizeof(struct ghes_estatus_node) + (estatus_len)) 83 #define GHES_ESTATUS_FROM_NODE(estatus_node) \ 84 ((struct acpi_hest_generic_status *) \ 85 ((struct ghes_estatus_node *)(estatus_node) + 1)) 86 87 bool ghes_disable; 88 module_param_named(disable, ghes_disable, bool, 0); 89 90 static int ghes_panic_timeout __read_mostly = 30; 91 92 /* 93 * All error sources notified with SCI shares one notifier function, 94 * so they need to be linked and checked one by one. This is applied 95 * to NMI too. 96 * 97 * RCU is used for these lists, so ghes_list_mutex is only used for 98 * list changing, not for traversing. 99 */ 100 static LIST_HEAD(ghes_sci); 101 static LIST_HEAD(ghes_nmi); 102 static DEFINE_MUTEX(ghes_list_mutex); 103 104 /* 105 * NMI may be triggered on any CPU, so ghes_nmi_lock is used for 106 * mutual exclusion. 107 */ 108 static DEFINE_RAW_SPINLOCK(ghes_nmi_lock); 109 110 /* 111 * Because the memory area used to transfer hardware error information 112 * from BIOS to Linux can be determined only in NMI, IRQ or timer 113 * handler, but general ioremap can not be used in atomic context, so 114 * a special version of atomic ioremap is implemented for that. 115 */ 116 117 /* 118 * Two virtual pages are used, one for NMI context, the other for 119 * IRQ/PROCESS context 120 */ 121 #define GHES_IOREMAP_PAGES 2 122 #define GHES_IOREMAP_NMI_PAGE(base) (base) 123 #define GHES_IOREMAP_IRQ_PAGE(base) ((base) + PAGE_SIZE) 124 125 /* virtual memory area for atomic ioremap */ 126 static struct vm_struct *ghes_ioremap_area; 127 /* 128 * These 2 spinlock is used to prevent atomic ioremap virtual memory 129 * area from being mapped simultaneously. 130 */ 131 static DEFINE_RAW_SPINLOCK(ghes_ioremap_lock_nmi); 132 static DEFINE_SPINLOCK(ghes_ioremap_lock_irq); 133 134 /* 135 * printk is not safe in NMI context. So in NMI handler, we allocate 136 * required memory from lock-less memory allocator 137 * (ghes_estatus_pool), save estatus into it, put them into lock-less 138 * list (ghes_estatus_llist), then delay printk into IRQ context via 139 * irq_work (ghes_proc_irq_work). ghes_estatus_size_request record 140 * required pool size by all NMI error source. 141 */ 142 static struct gen_pool *ghes_estatus_pool; 143 static unsigned long ghes_estatus_pool_size_request; 144 static struct llist_head ghes_estatus_llist; 145 static struct irq_work ghes_proc_irq_work; 146 147 struct ghes_estatus_cache *ghes_estatus_caches[GHES_ESTATUS_CACHES_SIZE]; 148 static atomic_t ghes_estatus_cache_alloced; 149 150 static int ghes_ioremap_init(void) 151 { 152 ghes_ioremap_area = __get_vm_area(PAGE_SIZE * GHES_IOREMAP_PAGES, 153 VM_IOREMAP, VMALLOC_START, VMALLOC_END); 154 if (!ghes_ioremap_area) { 155 pr_err(GHES_PFX "Failed to allocate virtual memory area for atomic ioremap.\n"); 156 return -ENOMEM; 157 } 158 159 return 0; 160 } 161 162 static void ghes_ioremap_exit(void) 163 { 164 free_vm_area(ghes_ioremap_area); 165 } 166 167 static void __iomem *ghes_ioremap_pfn_nmi(u64 pfn) 168 { 169 unsigned long vaddr; 170 171 vaddr = (unsigned long)GHES_IOREMAP_NMI_PAGE(ghes_ioremap_area->addr); 172 ioremap_page_range(vaddr, vaddr + PAGE_SIZE, 173 pfn << PAGE_SHIFT, PAGE_KERNEL); 174 175 return (void __iomem *)vaddr; 176 } 177 178 static void __iomem *ghes_ioremap_pfn_irq(u64 pfn) 179 { 180 unsigned long vaddr; 181 182 vaddr = (unsigned long)GHES_IOREMAP_IRQ_PAGE(ghes_ioremap_area->addr); 183 ioremap_page_range(vaddr, vaddr + PAGE_SIZE, 184 pfn << PAGE_SHIFT, PAGE_KERNEL); 185 186 return (void __iomem *)vaddr; 187 } 188 189 static void ghes_iounmap_nmi(void __iomem *vaddr_ptr) 190 { 191 unsigned long vaddr = (unsigned long __force)vaddr_ptr; 192 void *base = ghes_ioremap_area->addr; 193 194 BUG_ON(vaddr != (unsigned long)GHES_IOREMAP_NMI_PAGE(base)); 195 unmap_kernel_range_noflush(vaddr, PAGE_SIZE); 196 __flush_tlb_one(vaddr); 197 } 198 199 static void ghes_iounmap_irq(void __iomem *vaddr_ptr) 200 { 201 unsigned long vaddr = (unsigned long __force)vaddr_ptr; 202 void *base = ghes_ioremap_area->addr; 203 204 BUG_ON(vaddr != (unsigned long)GHES_IOREMAP_IRQ_PAGE(base)); 205 unmap_kernel_range_noflush(vaddr, PAGE_SIZE); 206 __flush_tlb_one(vaddr); 207 } 208 209 static int ghes_estatus_pool_init(void) 210 { 211 ghes_estatus_pool = gen_pool_create(GHES_ESTATUS_POOL_MIN_ALLOC_ORDER, -1); 212 if (!ghes_estatus_pool) 213 return -ENOMEM; 214 return 0; 215 } 216 217 static void ghes_estatus_pool_free_chunk_page(struct gen_pool *pool, 218 struct gen_pool_chunk *chunk, 219 void *data) 220 { 221 free_page(chunk->start_addr); 222 } 223 224 static void ghes_estatus_pool_exit(void) 225 { 226 gen_pool_for_each_chunk(ghes_estatus_pool, 227 ghes_estatus_pool_free_chunk_page, NULL); 228 gen_pool_destroy(ghes_estatus_pool); 229 } 230 231 static int ghes_estatus_pool_expand(unsigned long len) 232 { 233 unsigned long i, pages, size, addr; 234 int ret; 235 236 ghes_estatus_pool_size_request += PAGE_ALIGN(len); 237 size = gen_pool_size(ghes_estatus_pool); 238 if (size >= ghes_estatus_pool_size_request) 239 return 0; 240 pages = (ghes_estatus_pool_size_request - size) / PAGE_SIZE; 241 for (i = 0; i < pages; i++) { 242 addr = __get_free_page(GFP_KERNEL); 243 if (!addr) 244 return -ENOMEM; 245 ret = gen_pool_add(ghes_estatus_pool, addr, PAGE_SIZE, -1); 246 if (ret) 247 return ret; 248 } 249 250 return 0; 251 } 252 253 static void ghes_estatus_pool_shrink(unsigned long len) 254 { 255 ghes_estatus_pool_size_request -= PAGE_ALIGN(len); 256 } 257 258 static struct ghes *ghes_new(struct acpi_hest_generic *generic) 259 { 260 struct ghes *ghes; 261 unsigned int error_block_length; 262 int rc; 263 264 ghes = kzalloc(sizeof(*ghes), GFP_KERNEL); 265 if (!ghes) 266 return ERR_PTR(-ENOMEM); 267 ghes->generic = generic; 268 rc = apei_map_generic_address(&generic->error_status_address); 269 if (rc) 270 goto err_free; 271 error_block_length = generic->error_block_length; 272 if (error_block_length > GHES_ESTATUS_MAX_SIZE) { 273 pr_warning(FW_WARN GHES_PFX 274 "Error status block length is too long: %u for " 275 "generic hardware error source: %d.\n", 276 error_block_length, generic->header.source_id); 277 error_block_length = GHES_ESTATUS_MAX_SIZE; 278 } 279 ghes->estatus = kmalloc(error_block_length, GFP_KERNEL); 280 if (!ghes->estatus) { 281 rc = -ENOMEM; 282 goto err_unmap; 283 } 284 285 return ghes; 286 287 err_unmap: 288 apei_unmap_generic_address(&generic->error_status_address); 289 err_free: 290 kfree(ghes); 291 return ERR_PTR(rc); 292 } 293 294 static void ghes_fini(struct ghes *ghes) 295 { 296 kfree(ghes->estatus); 297 apei_unmap_generic_address(&ghes->generic->error_status_address); 298 } 299 300 static inline int ghes_severity(int severity) 301 { 302 switch (severity) { 303 case CPER_SEV_INFORMATIONAL: 304 return GHES_SEV_NO; 305 case CPER_SEV_CORRECTED: 306 return GHES_SEV_CORRECTED; 307 case CPER_SEV_RECOVERABLE: 308 return GHES_SEV_RECOVERABLE; 309 case CPER_SEV_FATAL: 310 return GHES_SEV_PANIC; 311 default: 312 /* Unknown, go panic */ 313 return GHES_SEV_PANIC; 314 } 315 } 316 317 static void ghes_copy_tofrom_phys(void *buffer, u64 paddr, u32 len, 318 int from_phys) 319 { 320 void __iomem *vaddr; 321 unsigned long flags = 0; 322 int in_nmi = in_nmi(); 323 u64 offset; 324 u32 trunk; 325 326 while (len > 0) { 327 offset = paddr - (paddr & PAGE_MASK); 328 if (in_nmi) { 329 raw_spin_lock(&ghes_ioremap_lock_nmi); 330 vaddr = ghes_ioremap_pfn_nmi(paddr >> PAGE_SHIFT); 331 } else { 332 spin_lock_irqsave(&ghes_ioremap_lock_irq, flags); 333 vaddr = ghes_ioremap_pfn_irq(paddr >> PAGE_SHIFT); 334 } 335 trunk = PAGE_SIZE - offset; 336 trunk = min(trunk, len); 337 if (from_phys) 338 memcpy_fromio(buffer, vaddr + offset, trunk); 339 else 340 memcpy_toio(vaddr + offset, buffer, trunk); 341 len -= trunk; 342 paddr += trunk; 343 buffer += trunk; 344 if (in_nmi) { 345 ghes_iounmap_nmi(vaddr); 346 raw_spin_unlock(&ghes_ioremap_lock_nmi); 347 } else { 348 ghes_iounmap_irq(vaddr); 349 spin_unlock_irqrestore(&ghes_ioremap_lock_irq, flags); 350 } 351 } 352 } 353 354 static int ghes_read_estatus(struct ghes *ghes, int silent) 355 { 356 struct acpi_hest_generic *g = ghes->generic; 357 u64 buf_paddr; 358 u32 len; 359 int rc; 360 361 rc = apei_read(&buf_paddr, &g->error_status_address); 362 if (rc) { 363 if (!silent && printk_ratelimit()) 364 pr_warning(FW_WARN GHES_PFX 365 "Failed to read error status block address for hardware error source: %d.\n", 366 g->header.source_id); 367 return -EIO; 368 } 369 if (!buf_paddr) 370 return -ENOENT; 371 372 ghes_copy_tofrom_phys(ghes->estatus, buf_paddr, 373 sizeof(*ghes->estatus), 1); 374 if (!ghes->estatus->block_status) 375 return -ENOENT; 376 377 ghes->buffer_paddr = buf_paddr; 378 ghes->flags |= GHES_TO_CLEAR; 379 380 rc = -EIO; 381 len = apei_estatus_len(ghes->estatus); 382 if (len < sizeof(*ghes->estatus)) 383 goto err_read_block; 384 if (len > ghes->generic->error_block_length) 385 goto err_read_block; 386 if (apei_estatus_check_header(ghes->estatus)) 387 goto err_read_block; 388 ghes_copy_tofrom_phys(ghes->estatus + 1, 389 buf_paddr + sizeof(*ghes->estatus), 390 len - sizeof(*ghes->estatus), 1); 391 if (apei_estatus_check(ghes->estatus)) 392 goto err_read_block; 393 rc = 0; 394 395 err_read_block: 396 if (rc && !silent && printk_ratelimit()) 397 pr_warning(FW_WARN GHES_PFX 398 "Failed to read error status block!\n"); 399 return rc; 400 } 401 402 static void ghes_clear_estatus(struct ghes *ghes) 403 { 404 ghes->estatus->block_status = 0; 405 if (!(ghes->flags & GHES_TO_CLEAR)) 406 return; 407 ghes_copy_tofrom_phys(ghes->estatus, ghes->buffer_paddr, 408 sizeof(ghes->estatus->block_status), 0); 409 ghes->flags &= ~GHES_TO_CLEAR; 410 } 411 412 static void ghes_do_proc(struct ghes *ghes, 413 const struct acpi_hest_generic_status *estatus) 414 { 415 int sev, sec_sev; 416 struct acpi_hest_generic_data *gdata; 417 418 sev = ghes_severity(estatus->error_severity); 419 apei_estatus_for_each_section(estatus, gdata) { 420 sec_sev = ghes_severity(gdata->error_severity); 421 if (!uuid_le_cmp(*(uuid_le *)gdata->section_type, 422 CPER_SEC_PLATFORM_MEM)) { 423 struct cper_sec_mem_err *mem_err; 424 mem_err = (struct cper_sec_mem_err *)(gdata+1); 425 ghes_edac_report_mem_error(ghes, sev, mem_err); 426 427 #ifdef CONFIG_X86_MCE 428 apei_mce_report_mem_error(sev == GHES_SEV_CORRECTED, 429 mem_err); 430 #endif 431 #ifdef CONFIG_ACPI_APEI_MEMORY_FAILURE 432 if (sev == GHES_SEV_RECOVERABLE && 433 sec_sev == GHES_SEV_RECOVERABLE && 434 mem_err->validation_bits & CPER_MEM_VALID_PHYSICAL_ADDRESS) { 435 unsigned long pfn; 436 pfn = mem_err->physical_addr >> PAGE_SHIFT; 437 memory_failure_queue(pfn, 0, 0); 438 } 439 #endif 440 } 441 #ifdef CONFIG_ACPI_APEI_PCIEAER 442 else if (!uuid_le_cmp(*(uuid_le *)gdata->section_type, 443 CPER_SEC_PCIE)) { 444 struct cper_sec_pcie *pcie_err; 445 pcie_err = (struct cper_sec_pcie *)(gdata+1); 446 if (sev == GHES_SEV_RECOVERABLE && 447 sec_sev == GHES_SEV_RECOVERABLE && 448 pcie_err->validation_bits & CPER_PCIE_VALID_DEVICE_ID && 449 pcie_err->validation_bits & CPER_PCIE_VALID_AER_INFO) { 450 unsigned int devfn; 451 int aer_severity; 452 453 devfn = PCI_DEVFN(pcie_err->device_id.device, 454 pcie_err->device_id.function); 455 aer_severity = cper_severity_to_aer(sev); 456 457 /* 458 * If firmware reset the component to contain 459 * the error, we must reinitialize it before 460 * use, so treat it as a fatal AER error. 461 */ 462 if (gdata->flags & CPER_SEC_RESET) 463 aer_severity = AER_FATAL; 464 465 aer_recover_queue(pcie_err->device_id.segment, 466 pcie_err->device_id.bus, 467 devfn, aer_severity, 468 (struct aer_capability_regs *) 469 pcie_err->aer_info); 470 } 471 472 } 473 #endif 474 } 475 } 476 477 static void __ghes_print_estatus(const char *pfx, 478 const struct acpi_hest_generic *generic, 479 const struct acpi_hest_generic_status *estatus) 480 { 481 static atomic_t seqno; 482 unsigned int curr_seqno; 483 char pfx_seq[64]; 484 485 if (pfx == NULL) { 486 if (ghes_severity(estatus->error_severity) <= 487 GHES_SEV_CORRECTED) 488 pfx = KERN_WARNING; 489 else 490 pfx = KERN_ERR; 491 } 492 curr_seqno = atomic_inc_return(&seqno); 493 snprintf(pfx_seq, sizeof(pfx_seq), "%s{%u}" HW_ERR, pfx, curr_seqno); 494 printk("%s""Hardware error from APEI Generic Hardware Error Source: %d\n", 495 pfx_seq, generic->header.source_id); 496 apei_estatus_print(pfx_seq, estatus); 497 } 498 499 static int ghes_print_estatus(const char *pfx, 500 const struct acpi_hest_generic *generic, 501 const struct acpi_hest_generic_status *estatus) 502 { 503 /* Not more than 2 messages every 5 seconds */ 504 static DEFINE_RATELIMIT_STATE(ratelimit_corrected, 5*HZ, 2); 505 static DEFINE_RATELIMIT_STATE(ratelimit_uncorrected, 5*HZ, 2); 506 struct ratelimit_state *ratelimit; 507 508 if (ghes_severity(estatus->error_severity) <= GHES_SEV_CORRECTED) 509 ratelimit = &ratelimit_corrected; 510 else 511 ratelimit = &ratelimit_uncorrected; 512 if (__ratelimit(ratelimit)) { 513 __ghes_print_estatus(pfx, generic, estatus); 514 return 1; 515 } 516 return 0; 517 } 518 519 /* 520 * GHES error status reporting throttle, to report more kinds of 521 * errors, instead of just most frequently occurred errors. 522 */ 523 static int ghes_estatus_cached(struct acpi_hest_generic_status *estatus) 524 { 525 u32 len; 526 int i, cached = 0; 527 unsigned long long now; 528 struct ghes_estatus_cache *cache; 529 struct acpi_hest_generic_status *cache_estatus; 530 531 len = apei_estatus_len(estatus); 532 rcu_read_lock(); 533 for (i = 0; i < GHES_ESTATUS_CACHES_SIZE; i++) { 534 cache = rcu_dereference(ghes_estatus_caches[i]); 535 if (cache == NULL) 536 continue; 537 if (len != cache->estatus_len) 538 continue; 539 cache_estatus = GHES_ESTATUS_FROM_CACHE(cache); 540 if (memcmp(estatus, cache_estatus, len)) 541 continue; 542 atomic_inc(&cache->count); 543 now = sched_clock(); 544 if (now - cache->time_in < GHES_ESTATUS_IN_CACHE_MAX_NSEC) 545 cached = 1; 546 break; 547 } 548 rcu_read_unlock(); 549 return cached; 550 } 551 552 static struct ghes_estatus_cache *ghes_estatus_cache_alloc( 553 struct acpi_hest_generic *generic, 554 struct acpi_hest_generic_status *estatus) 555 { 556 int alloced; 557 u32 len, cache_len; 558 struct ghes_estatus_cache *cache; 559 struct acpi_hest_generic_status *cache_estatus; 560 561 alloced = atomic_add_return(1, &ghes_estatus_cache_alloced); 562 if (alloced > GHES_ESTATUS_CACHE_ALLOCED_MAX) { 563 atomic_dec(&ghes_estatus_cache_alloced); 564 return NULL; 565 } 566 len = apei_estatus_len(estatus); 567 cache_len = GHES_ESTATUS_CACHE_LEN(len); 568 cache = (void *)gen_pool_alloc(ghes_estatus_pool, cache_len); 569 if (!cache) { 570 atomic_dec(&ghes_estatus_cache_alloced); 571 return NULL; 572 } 573 cache_estatus = GHES_ESTATUS_FROM_CACHE(cache); 574 memcpy(cache_estatus, estatus, len); 575 cache->estatus_len = len; 576 atomic_set(&cache->count, 0); 577 cache->generic = generic; 578 cache->time_in = sched_clock(); 579 return cache; 580 } 581 582 static void ghes_estatus_cache_free(struct ghes_estatus_cache *cache) 583 { 584 u32 len; 585 586 len = apei_estatus_len(GHES_ESTATUS_FROM_CACHE(cache)); 587 len = GHES_ESTATUS_CACHE_LEN(len); 588 gen_pool_free(ghes_estatus_pool, (unsigned long)cache, len); 589 atomic_dec(&ghes_estatus_cache_alloced); 590 } 591 592 static void ghes_estatus_cache_rcu_free(struct rcu_head *head) 593 { 594 struct ghes_estatus_cache *cache; 595 596 cache = container_of(head, struct ghes_estatus_cache, rcu); 597 ghes_estatus_cache_free(cache); 598 } 599 600 static void ghes_estatus_cache_add( 601 struct acpi_hest_generic *generic, 602 struct acpi_hest_generic_status *estatus) 603 { 604 int i, slot = -1, count; 605 unsigned long long now, duration, period, max_period = 0; 606 struct ghes_estatus_cache *cache, *slot_cache = NULL, *new_cache; 607 608 new_cache = ghes_estatus_cache_alloc(generic, estatus); 609 if (new_cache == NULL) 610 return; 611 rcu_read_lock(); 612 now = sched_clock(); 613 for (i = 0; i < GHES_ESTATUS_CACHES_SIZE; i++) { 614 cache = rcu_dereference(ghes_estatus_caches[i]); 615 if (cache == NULL) { 616 slot = i; 617 slot_cache = NULL; 618 break; 619 } 620 duration = now - cache->time_in; 621 if (duration >= GHES_ESTATUS_IN_CACHE_MAX_NSEC) { 622 slot = i; 623 slot_cache = cache; 624 break; 625 } 626 count = atomic_read(&cache->count); 627 period = duration; 628 do_div(period, (count + 1)); 629 if (period > max_period) { 630 max_period = period; 631 slot = i; 632 slot_cache = cache; 633 } 634 } 635 /* new_cache must be put into array after its contents are written */ 636 smp_wmb(); 637 if (slot != -1 && cmpxchg(ghes_estatus_caches + slot, 638 slot_cache, new_cache) == slot_cache) { 639 if (slot_cache) 640 call_rcu(&slot_cache->rcu, ghes_estatus_cache_rcu_free); 641 } else 642 ghes_estatus_cache_free(new_cache); 643 rcu_read_unlock(); 644 } 645 646 static int ghes_proc(struct ghes *ghes) 647 { 648 int rc; 649 650 rc = ghes_read_estatus(ghes, 0); 651 if (rc) 652 goto out; 653 if (!ghes_estatus_cached(ghes->estatus)) { 654 if (ghes_print_estatus(NULL, ghes->generic, ghes->estatus)) 655 ghes_estatus_cache_add(ghes->generic, ghes->estatus); 656 } 657 ghes_do_proc(ghes, ghes->estatus); 658 out: 659 ghes_clear_estatus(ghes); 660 return 0; 661 } 662 663 static void ghes_add_timer(struct ghes *ghes) 664 { 665 struct acpi_hest_generic *g = ghes->generic; 666 unsigned long expire; 667 668 if (!g->notify.poll_interval) { 669 pr_warning(FW_WARN GHES_PFX "Poll interval is 0 for generic hardware error source: %d, disabled.\n", 670 g->header.source_id); 671 return; 672 } 673 expire = jiffies + msecs_to_jiffies(g->notify.poll_interval); 674 ghes->timer.expires = round_jiffies_relative(expire); 675 add_timer(&ghes->timer); 676 } 677 678 static void ghes_poll_func(unsigned long data) 679 { 680 struct ghes *ghes = (void *)data; 681 682 ghes_proc(ghes); 683 if (!(ghes->flags & GHES_EXITING)) 684 ghes_add_timer(ghes); 685 } 686 687 static irqreturn_t ghes_irq_func(int irq, void *data) 688 { 689 struct ghes *ghes = data; 690 int rc; 691 692 rc = ghes_proc(ghes); 693 if (rc) 694 return IRQ_NONE; 695 696 return IRQ_HANDLED; 697 } 698 699 static int ghes_notify_sci(struct notifier_block *this, 700 unsigned long event, void *data) 701 { 702 struct ghes *ghes; 703 int ret = NOTIFY_DONE; 704 705 rcu_read_lock(); 706 list_for_each_entry_rcu(ghes, &ghes_sci, list) { 707 if (!ghes_proc(ghes)) 708 ret = NOTIFY_OK; 709 } 710 rcu_read_unlock(); 711 712 return ret; 713 } 714 715 static struct llist_node *llist_nodes_reverse(struct llist_node *llnode) 716 { 717 struct llist_node *next, *tail = NULL; 718 719 while (llnode) { 720 next = llnode->next; 721 llnode->next = tail; 722 tail = llnode; 723 llnode = next; 724 } 725 726 return tail; 727 } 728 729 static void ghes_proc_in_irq(struct irq_work *irq_work) 730 { 731 struct llist_node *llnode, *next; 732 struct ghes_estatus_node *estatus_node; 733 struct acpi_hest_generic *generic; 734 struct acpi_hest_generic_status *estatus; 735 u32 len, node_len; 736 737 llnode = llist_del_all(&ghes_estatus_llist); 738 /* 739 * Because the time order of estatus in list is reversed, 740 * revert it back to proper order. 741 */ 742 llnode = llist_nodes_reverse(llnode); 743 while (llnode) { 744 next = llnode->next; 745 estatus_node = llist_entry(llnode, struct ghes_estatus_node, 746 llnode); 747 estatus = GHES_ESTATUS_FROM_NODE(estatus_node); 748 len = apei_estatus_len(estatus); 749 node_len = GHES_ESTATUS_NODE_LEN(len); 750 ghes_do_proc(estatus_node->ghes, estatus); 751 if (!ghes_estatus_cached(estatus)) { 752 generic = estatus_node->generic; 753 if (ghes_print_estatus(NULL, generic, estatus)) 754 ghes_estatus_cache_add(generic, estatus); 755 } 756 gen_pool_free(ghes_estatus_pool, (unsigned long)estatus_node, 757 node_len); 758 llnode = next; 759 } 760 } 761 762 static void ghes_print_queued_estatus(void) 763 { 764 struct llist_node *llnode; 765 struct ghes_estatus_node *estatus_node; 766 struct acpi_hest_generic *generic; 767 struct acpi_hest_generic_status *estatus; 768 u32 len, node_len; 769 770 llnode = llist_del_all(&ghes_estatus_llist); 771 /* 772 * Because the time order of estatus in list is reversed, 773 * revert it back to proper order. 774 */ 775 llnode = llist_nodes_reverse(llnode); 776 while (llnode) { 777 estatus_node = llist_entry(llnode, struct ghes_estatus_node, 778 llnode); 779 estatus = GHES_ESTATUS_FROM_NODE(estatus_node); 780 len = apei_estatus_len(estatus); 781 node_len = GHES_ESTATUS_NODE_LEN(len); 782 generic = estatus_node->generic; 783 ghes_print_estatus(NULL, generic, estatus); 784 llnode = llnode->next; 785 } 786 } 787 788 static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs) 789 { 790 struct ghes *ghes, *ghes_global = NULL; 791 int sev, sev_global = -1; 792 int ret = NMI_DONE; 793 794 raw_spin_lock(&ghes_nmi_lock); 795 list_for_each_entry_rcu(ghes, &ghes_nmi, list) { 796 if (ghes_read_estatus(ghes, 1)) { 797 ghes_clear_estatus(ghes); 798 continue; 799 } 800 sev = ghes_severity(ghes->estatus->error_severity); 801 if (sev > sev_global) { 802 sev_global = sev; 803 ghes_global = ghes; 804 } 805 ret = NMI_HANDLED; 806 } 807 808 if (ret == NMI_DONE) 809 goto out; 810 811 if (sev_global >= GHES_SEV_PANIC) { 812 oops_begin(); 813 ghes_print_queued_estatus(); 814 __ghes_print_estatus(KERN_EMERG, ghes_global->generic, 815 ghes_global->estatus); 816 /* reboot to log the error! */ 817 if (panic_timeout == 0) 818 panic_timeout = ghes_panic_timeout; 819 panic("Fatal hardware error!"); 820 } 821 822 list_for_each_entry_rcu(ghes, &ghes_nmi, list) { 823 #ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG 824 u32 len, node_len; 825 struct ghes_estatus_node *estatus_node; 826 struct acpi_hest_generic_status *estatus; 827 #endif 828 if (!(ghes->flags & GHES_TO_CLEAR)) 829 continue; 830 #ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG 831 if (ghes_estatus_cached(ghes->estatus)) 832 goto next; 833 /* Save estatus for further processing in IRQ context */ 834 len = apei_estatus_len(ghes->estatus); 835 node_len = GHES_ESTATUS_NODE_LEN(len); 836 estatus_node = (void *)gen_pool_alloc(ghes_estatus_pool, 837 node_len); 838 if (estatus_node) { 839 estatus_node->ghes = ghes; 840 estatus_node->generic = ghes->generic; 841 estatus = GHES_ESTATUS_FROM_NODE(estatus_node); 842 memcpy(estatus, ghes->estatus, len); 843 llist_add(&estatus_node->llnode, &ghes_estatus_llist); 844 } 845 next: 846 #endif 847 ghes_clear_estatus(ghes); 848 } 849 #ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG 850 irq_work_queue(&ghes_proc_irq_work); 851 #endif 852 853 out: 854 raw_spin_unlock(&ghes_nmi_lock); 855 return ret; 856 } 857 858 static struct notifier_block ghes_notifier_sci = { 859 .notifier_call = ghes_notify_sci, 860 }; 861 862 static unsigned long ghes_esource_prealloc_size( 863 const struct acpi_hest_generic *generic) 864 { 865 unsigned long block_length, prealloc_records, prealloc_size; 866 867 block_length = min_t(unsigned long, generic->error_block_length, 868 GHES_ESTATUS_MAX_SIZE); 869 prealloc_records = max_t(unsigned long, 870 generic->records_to_preallocate, 1); 871 prealloc_size = min_t(unsigned long, block_length * prealloc_records, 872 GHES_ESOURCE_PREALLOC_MAX_SIZE); 873 874 return prealloc_size; 875 } 876 877 static int ghes_probe(struct platform_device *ghes_dev) 878 { 879 struct acpi_hest_generic *generic; 880 struct ghes *ghes = NULL; 881 unsigned long len; 882 int rc = -EINVAL; 883 884 generic = *(struct acpi_hest_generic **)ghes_dev->dev.platform_data; 885 if (!generic->enabled) 886 return -ENODEV; 887 888 switch (generic->notify.type) { 889 case ACPI_HEST_NOTIFY_POLLED: 890 case ACPI_HEST_NOTIFY_EXTERNAL: 891 case ACPI_HEST_NOTIFY_SCI: 892 case ACPI_HEST_NOTIFY_NMI: 893 break; 894 case ACPI_HEST_NOTIFY_LOCAL: 895 pr_warning(GHES_PFX "Generic hardware error source: %d notified via local interrupt is not supported!\n", 896 generic->header.source_id); 897 goto err; 898 default: 899 pr_warning(FW_WARN GHES_PFX "Unknown notification type: %u for generic hardware error source: %d\n", 900 generic->notify.type, generic->header.source_id); 901 goto err; 902 } 903 904 rc = -EIO; 905 if (generic->error_block_length < 906 sizeof(struct acpi_hest_generic_status)) { 907 pr_warning(FW_BUG GHES_PFX "Invalid error block length: %u for generic hardware error source: %d\n", 908 generic->error_block_length, 909 generic->header.source_id); 910 goto err; 911 } 912 ghes = ghes_new(generic); 913 if (IS_ERR(ghes)) { 914 rc = PTR_ERR(ghes); 915 ghes = NULL; 916 goto err; 917 } 918 919 rc = ghes_edac_register(ghes, &ghes_dev->dev); 920 if (rc < 0) 921 goto err; 922 923 switch (generic->notify.type) { 924 case ACPI_HEST_NOTIFY_POLLED: 925 ghes->timer.function = ghes_poll_func; 926 ghes->timer.data = (unsigned long)ghes; 927 init_timer_deferrable(&ghes->timer); 928 ghes_add_timer(ghes); 929 break; 930 case ACPI_HEST_NOTIFY_EXTERNAL: 931 /* External interrupt vector is GSI */ 932 rc = acpi_gsi_to_irq(generic->notify.vector, &ghes->irq); 933 if (rc) { 934 pr_err(GHES_PFX "Failed to map GSI to IRQ for generic hardware error source: %d\n", 935 generic->header.source_id); 936 goto err_edac_unreg; 937 } 938 rc = request_irq(ghes->irq, ghes_irq_func, 0, "GHES IRQ", ghes); 939 if (rc) { 940 pr_err(GHES_PFX "Failed to register IRQ for generic hardware error source: %d\n", 941 generic->header.source_id); 942 goto err_edac_unreg; 943 } 944 break; 945 case ACPI_HEST_NOTIFY_SCI: 946 mutex_lock(&ghes_list_mutex); 947 if (list_empty(&ghes_sci)) 948 register_acpi_hed_notifier(&ghes_notifier_sci); 949 list_add_rcu(&ghes->list, &ghes_sci); 950 mutex_unlock(&ghes_list_mutex); 951 break; 952 case ACPI_HEST_NOTIFY_NMI: 953 len = ghes_esource_prealloc_size(generic); 954 ghes_estatus_pool_expand(len); 955 mutex_lock(&ghes_list_mutex); 956 if (list_empty(&ghes_nmi)) 957 register_nmi_handler(NMI_LOCAL, ghes_notify_nmi, 0, 958 "ghes"); 959 list_add_rcu(&ghes->list, &ghes_nmi); 960 mutex_unlock(&ghes_list_mutex); 961 break; 962 default: 963 BUG(); 964 } 965 platform_set_drvdata(ghes_dev, ghes); 966 967 return 0; 968 err_edac_unreg: 969 ghes_edac_unregister(ghes); 970 err: 971 if (ghes) { 972 ghes_fini(ghes); 973 kfree(ghes); 974 } 975 return rc; 976 } 977 978 static int ghes_remove(struct platform_device *ghes_dev) 979 { 980 struct ghes *ghes; 981 struct acpi_hest_generic *generic; 982 unsigned long len; 983 984 ghes = platform_get_drvdata(ghes_dev); 985 generic = ghes->generic; 986 987 ghes->flags |= GHES_EXITING; 988 switch (generic->notify.type) { 989 case ACPI_HEST_NOTIFY_POLLED: 990 del_timer_sync(&ghes->timer); 991 break; 992 case ACPI_HEST_NOTIFY_EXTERNAL: 993 free_irq(ghes->irq, ghes); 994 break; 995 case ACPI_HEST_NOTIFY_SCI: 996 mutex_lock(&ghes_list_mutex); 997 list_del_rcu(&ghes->list); 998 if (list_empty(&ghes_sci)) 999 unregister_acpi_hed_notifier(&ghes_notifier_sci); 1000 mutex_unlock(&ghes_list_mutex); 1001 break; 1002 case ACPI_HEST_NOTIFY_NMI: 1003 mutex_lock(&ghes_list_mutex); 1004 list_del_rcu(&ghes->list); 1005 if (list_empty(&ghes_nmi)) 1006 unregister_nmi_handler(NMI_LOCAL, "ghes"); 1007 mutex_unlock(&ghes_list_mutex); 1008 /* 1009 * To synchronize with NMI handler, ghes can only be 1010 * freed after NMI handler finishes. 1011 */ 1012 synchronize_rcu(); 1013 len = ghes_esource_prealloc_size(generic); 1014 ghes_estatus_pool_shrink(len); 1015 break; 1016 default: 1017 BUG(); 1018 break; 1019 } 1020 1021 ghes_fini(ghes); 1022 1023 ghes_edac_unregister(ghes); 1024 1025 kfree(ghes); 1026 1027 platform_set_drvdata(ghes_dev, NULL); 1028 1029 return 0; 1030 } 1031 1032 static struct platform_driver ghes_platform_driver = { 1033 .driver = { 1034 .name = "GHES", 1035 .owner = THIS_MODULE, 1036 }, 1037 .probe = ghes_probe, 1038 .remove = ghes_remove, 1039 }; 1040 1041 static int __init ghes_init(void) 1042 { 1043 int rc; 1044 1045 if (acpi_disabled) 1046 return -ENODEV; 1047 1048 if (hest_disable) { 1049 pr_info(GHES_PFX "HEST is not enabled!\n"); 1050 return -EINVAL; 1051 } 1052 1053 if (ghes_disable) { 1054 pr_info(GHES_PFX "GHES is not enabled!\n"); 1055 return -EINVAL; 1056 } 1057 1058 init_irq_work(&ghes_proc_irq_work, ghes_proc_in_irq); 1059 1060 rc = ghes_ioremap_init(); 1061 if (rc) 1062 goto err; 1063 1064 rc = ghes_estatus_pool_init(); 1065 if (rc) 1066 goto err_ioremap_exit; 1067 1068 rc = ghes_estatus_pool_expand(GHES_ESTATUS_CACHE_AVG_SIZE * 1069 GHES_ESTATUS_CACHE_ALLOCED_MAX); 1070 if (rc) 1071 goto err_pool_exit; 1072 1073 rc = platform_driver_register(&ghes_platform_driver); 1074 if (rc) 1075 goto err_pool_exit; 1076 1077 rc = apei_osc_setup(); 1078 if (rc == 0 && osc_sb_apei_support_acked) 1079 pr_info(GHES_PFX "APEI firmware first mode is enabled by APEI bit and WHEA _OSC.\n"); 1080 else if (rc == 0 && !osc_sb_apei_support_acked) 1081 pr_info(GHES_PFX "APEI firmware first mode is enabled by WHEA _OSC.\n"); 1082 else if (rc && osc_sb_apei_support_acked) 1083 pr_info(GHES_PFX "APEI firmware first mode is enabled by APEI bit.\n"); 1084 else 1085 pr_info(GHES_PFX "Failed to enable APEI firmware first mode.\n"); 1086 1087 return 0; 1088 err_pool_exit: 1089 ghes_estatus_pool_exit(); 1090 err_ioremap_exit: 1091 ghes_ioremap_exit(); 1092 err: 1093 return rc; 1094 } 1095 1096 static void __exit ghes_exit(void) 1097 { 1098 platform_driver_unregister(&ghes_platform_driver); 1099 ghes_estatus_pool_exit(); 1100 ghes_ioremap_exit(); 1101 } 1102 1103 module_init(ghes_init); 1104 module_exit(ghes_exit); 1105 1106 MODULE_AUTHOR("Huang Ying"); 1107 MODULE_DESCRIPTION("APEI Generic Hardware Error Source support"); 1108 MODULE_LICENSE("GPL"); 1109 MODULE_ALIAS("platform:GHES"); 1110