1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright IBM Corporation 2001, 2005, 2006 4 * Copyright Dave Engebretsen & Todd Inglett 2001 5 * Copyright Linas Vepstas 2005, 2006 6 * Copyright 2001-2012 IBM Corporation. 7 * 8 * Please address comments and feedback to Linas Vepstas <linas@austin.ibm.com> 9 */ 10 11 #include <linux/delay.h> 12 #include <linux/sched.h> 13 #include <linux/init.h> 14 #include <linux/list.h> 15 #include <linux/pci.h> 16 #include <linux/iommu.h> 17 #include <linux/proc_fs.h> 18 #include <linux/rbtree.h> 19 #include <linux/reboot.h> 20 #include <linux/seq_file.h> 21 #include <linux/spinlock.h> 22 #include <linux/export.h> 23 #include <linux/of.h> 24 25 #include <linux/atomic.h> 26 #include <asm/debugfs.h> 27 #include <asm/eeh.h> 28 #include <asm/eeh_event.h> 29 #include <asm/io.h> 30 #include <asm/iommu.h> 31 #include <asm/machdep.h> 32 #include <asm/ppc-pci.h> 33 #include <asm/rtas.h> 34 #include <asm/pte-walk.h> 35 36 37 /** Overview: 38 * EEH, or "Enhanced Error Handling" is a PCI bridge technology for 39 * dealing with PCI bus errors that can't be dealt with within the 40 * usual PCI framework, except by check-stopping the CPU. Systems 41 * that are designed for high-availability/reliability cannot afford 42 * to crash due to a "mere" PCI error, thus the need for EEH. 43 * An EEH-capable bridge operates by converting a detected error 44 * into a "slot freeze", taking the PCI adapter off-line, making 45 * the slot behave, from the OS'es point of view, as if the slot 46 * were "empty": all reads return 0xff's and all writes are silently 47 * ignored. EEH slot isolation events can be triggered by parity 48 * errors on the address or data busses (e.g. during posted writes), 49 * which in turn might be caused by low voltage on the bus, dust, 50 * vibration, humidity, radioactivity or plain-old failed hardware. 51 * 52 * Note, however, that one of the leading causes of EEH slot 53 * freeze events are buggy device drivers, buggy device microcode, 54 * or buggy device hardware. This is because any attempt by the 55 * device to bus-master data to a memory address that is not 56 * assigned to the device will trigger a slot freeze. (The idea 57 * is to prevent devices-gone-wild from corrupting system memory). 58 * Buggy hardware/drivers will have a miserable time co-existing 59 * with EEH. 60 * 61 * Ideally, a PCI device driver, when suspecting that an isolation 62 * event has occurred (e.g. by reading 0xff's), will then ask EEH 63 * whether this is the case, and then take appropriate steps to 64 * reset the PCI slot, the PCI device, and then resume operations. 65 * However, until that day, the checking is done here, with the 66 * eeh_check_failure() routine embedded in the MMIO macros. If 67 * the slot is found to be isolated, an "EEH Event" is synthesized 68 * and sent out for processing. 69 */ 70 71 /* If a device driver keeps reading an MMIO register in an interrupt 72 * handler after a slot isolation event, it might be broken. 73 * This sets the threshold for how many read attempts we allow 74 * before printing an error message. 75 */ 76 #define EEH_MAX_FAILS 2100000 77 78 /* Time to wait for a PCI slot to report status, in milliseconds */ 79 #define PCI_BUS_RESET_WAIT_MSEC (5*60*1000) 80 81 /* 82 * EEH probe mode support, which is part of the flags, 83 * is to support multiple platforms for EEH. Some platforms 84 * like pSeries do PCI emunation based on device tree. 85 * However, other platforms like powernv probe PCI devices 86 * from hardware. The flag is used to distinguish that. 87 * In addition, struct eeh_ops::probe would be invoked for 88 * particular OF node or PCI device so that the corresponding 89 * PE would be created there. 90 */ 91 int eeh_subsystem_flags; 92 EXPORT_SYMBOL(eeh_subsystem_flags); 93 94 /* 95 * EEH allowed maximal frozen times. If one particular PE's 96 * frozen count in last hour exceeds this limit, the PE will 97 * be forced to be offline permanently. 98 */ 99 u32 eeh_max_freezes = 5; 100 101 /* 102 * Controls whether a recovery event should be scheduled when an 103 * isolated device is discovered. This is only really useful for 104 * debugging problems with the EEH core. 105 */ 106 bool eeh_debugfs_no_recover; 107 108 /* Platform dependent EEH operations */ 109 struct eeh_ops *eeh_ops = NULL; 110 111 /* Lock to avoid races due to multiple reports of an error */ 112 DEFINE_RAW_SPINLOCK(confirm_error_lock); 113 EXPORT_SYMBOL_GPL(confirm_error_lock); 114 115 /* Lock to protect passed flags */ 116 static DEFINE_MUTEX(eeh_dev_mutex); 117 118 /* Buffer for reporting pci register dumps. Its here in BSS, and 119 * not dynamically alloced, so that it ends up in RMO where RTAS 120 * can access it. 121 */ 122 #define EEH_PCI_REGS_LOG_LEN 8192 123 static unsigned char pci_regs_buf[EEH_PCI_REGS_LOG_LEN]; 124 125 /* 126 * The struct is used to maintain the EEH global statistic 127 * information. Besides, the EEH global statistics will be 128 * exported to user space through procfs 129 */ 130 struct eeh_stats { 131 u64 no_device; /* PCI device not found */ 132 u64 no_dn; /* OF node not found */ 133 u64 no_cfg_addr; /* Config address not found */ 134 u64 ignored_check; /* EEH check skipped */ 135 u64 total_mmio_ffs; /* Total EEH checks */ 136 u64 false_positives; /* Unnecessary EEH checks */ 137 u64 slot_resets; /* PE reset */ 138 }; 139 140 static struct eeh_stats eeh_stats; 141 142 static int __init eeh_setup(char *str) 143 { 144 if (!strcmp(str, "off")) 145 eeh_add_flag(EEH_FORCE_DISABLED); 146 else if (!strcmp(str, "early_log")) 147 eeh_add_flag(EEH_EARLY_DUMP_LOG); 148 149 return 1; 150 } 151 __setup("eeh=", eeh_setup); 152 153 void eeh_show_enabled(void) 154 { 155 if (eeh_has_flag(EEH_FORCE_DISABLED)) 156 pr_info("EEH: Recovery disabled by kernel parameter.\n"); 157 else if (eeh_has_flag(EEH_ENABLED)) 158 pr_info("EEH: Capable adapter found: recovery enabled.\n"); 159 else 160 pr_info("EEH: No capable adapters found: recovery disabled.\n"); 161 } 162 163 /* 164 * This routine captures assorted PCI configuration space data 165 * for the indicated PCI device, and puts them into a buffer 166 * for RTAS error logging. 167 */ 168 static size_t eeh_dump_dev_log(struct eeh_dev *edev, char *buf, size_t len) 169 { 170 u32 cfg; 171 int cap, i; 172 int n = 0, l = 0; 173 char buffer[128]; 174 175 n += scnprintf(buf+n, len-n, "%04x:%02x:%02x.%01x\n", 176 edev->pe->phb->global_number, edev->bdfn >> 8, 177 PCI_SLOT(edev->bdfn), PCI_FUNC(edev->bdfn)); 178 pr_warn("EEH: of node=%04x:%02x:%02x.%01x\n", 179 edev->pe->phb->global_number, edev->bdfn >> 8, 180 PCI_SLOT(edev->bdfn), PCI_FUNC(edev->bdfn)); 181 182 eeh_ops->read_config(edev, PCI_VENDOR_ID, 4, &cfg); 183 n += scnprintf(buf+n, len-n, "dev/vend:%08x\n", cfg); 184 pr_warn("EEH: PCI device/vendor: %08x\n", cfg); 185 186 eeh_ops->read_config(edev, PCI_COMMAND, 4, &cfg); 187 n += scnprintf(buf+n, len-n, "cmd/stat:%x\n", cfg); 188 pr_warn("EEH: PCI cmd/status register: %08x\n", cfg); 189 190 /* Gather bridge-specific registers */ 191 if (edev->mode & EEH_DEV_BRIDGE) { 192 eeh_ops->read_config(edev, PCI_SEC_STATUS, 2, &cfg); 193 n += scnprintf(buf+n, len-n, "sec stat:%x\n", cfg); 194 pr_warn("EEH: Bridge secondary status: %04x\n", cfg); 195 196 eeh_ops->read_config(edev, PCI_BRIDGE_CONTROL, 2, &cfg); 197 n += scnprintf(buf+n, len-n, "brdg ctl:%x\n", cfg); 198 pr_warn("EEH: Bridge control: %04x\n", cfg); 199 } 200 201 /* Dump out the PCI-X command and status regs */ 202 cap = edev->pcix_cap; 203 if (cap) { 204 eeh_ops->read_config(edev, cap, 4, &cfg); 205 n += scnprintf(buf+n, len-n, "pcix-cmd:%x\n", cfg); 206 pr_warn("EEH: PCI-X cmd: %08x\n", cfg); 207 208 eeh_ops->read_config(edev, cap+4, 4, &cfg); 209 n += scnprintf(buf+n, len-n, "pcix-stat:%x\n", cfg); 210 pr_warn("EEH: PCI-X status: %08x\n", cfg); 211 } 212 213 /* If PCI-E capable, dump PCI-E cap 10 */ 214 cap = edev->pcie_cap; 215 if (cap) { 216 n += scnprintf(buf+n, len-n, "pci-e cap10:\n"); 217 pr_warn("EEH: PCI-E capabilities and status follow:\n"); 218 219 for (i=0; i<=8; i++) { 220 eeh_ops->read_config(edev, cap+4*i, 4, &cfg); 221 n += scnprintf(buf+n, len-n, "%02x:%x\n", 4*i, cfg); 222 223 if ((i % 4) == 0) { 224 if (i != 0) 225 pr_warn("%s\n", buffer); 226 227 l = scnprintf(buffer, sizeof(buffer), 228 "EEH: PCI-E %02x: %08x ", 229 4*i, cfg); 230 } else { 231 l += scnprintf(buffer+l, sizeof(buffer)-l, 232 "%08x ", cfg); 233 } 234 235 } 236 237 pr_warn("%s\n", buffer); 238 } 239 240 /* If AER capable, dump it */ 241 cap = edev->aer_cap; 242 if (cap) { 243 n += scnprintf(buf+n, len-n, "pci-e AER:\n"); 244 pr_warn("EEH: PCI-E AER capability register set follows:\n"); 245 246 for (i=0; i<=13; i++) { 247 eeh_ops->read_config(edev, cap+4*i, 4, &cfg); 248 n += scnprintf(buf+n, len-n, "%02x:%x\n", 4*i, cfg); 249 250 if ((i % 4) == 0) { 251 if (i != 0) 252 pr_warn("%s\n", buffer); 253 254 l = scnprintf(buffer, sizeof(buffer), 255 "EEH: PCI-E AER %02x: %08x ", 256 4*i, cfg); 257 } else { 258 l += scnprintf(buffer+l, sizeof(buffer)-l, 259 "%08x ", cfg); 260 } 261 } 262 263 pr_warn("%s\n", buffer); 264 } 265 266 return n; 267 } 268 269 static void *eeh_dump_pe_log(struct eeh_pe *pe, void *flag) 270 { 271 struct eeh_dev *edev, *tmp; 272 size_t *plen = flag; 273 274 eeh_pe_for_each_dev(pe, edev, tmp) 275 *plen += eeh_dump_dev_log(edev, pci_regs_buf + *plen, 276 EEH_PCI_REGS_LOG_LEN - *plen); 277 278 return NULL; 279 } 280 281 /** 282 * eeh_slot_error_detail - Generate combined log including driver log and error log 283 * @pe: EEH PE 284 * @severity: temporary or permanent error log 285 * 286 * This routine should be called to generate the combined log, which 287 * is comprised of driver log and error log. The driver log is figured 288 * out from the config space of the corresponding PCI device, while 289 * the error log is fetched through platform dependent function call. 290 */ 291 void eeh_slot_error_detail(struct eeh_pe *pe, int severity) 292 { 293 size_t loglen = 0; 294 295 /* 296 * When the PHB is fenced or dead, it's pointless to collect 297 * the data from PCI config space because it should return 298 * 0xFF's. For ER, we still retrieve the data from the PCI 299 * config space. 300 * 301 * For pHyp, we have to enable IO for log retrieval. Otherwise, 302 * 0xFF's is always returned from PCI config space. 303 * 304 * When the @severity is EEH_LOG_PERM, the PE is going to be 305 * removed. Prior to that, the drivers for devices included in 306 * the PE will be closed. The drivers rely on working IO path 307 * to bring the devices to quiet state. Otherwise, PCI traffic 308 * from those devices after they are removed is like to cause 309 * another unexpected EEH error. 310 */ 311 if (!(pe->type & EEH_PE_PHB)) { 312 if (eeh_has_flag(EEH_ENABLE_IO_FOR_LOG) || 313 severity == EEH_LOG_PERM) 314 eeh_pci_enable(pe, EEH_OPT_THAW_MMIO); 315 316 /* 317 * The config space of some PCI devices can't be accessed 318 * when their PEs are in frozen state. Otherwise, fenced 319 * PHB might be seen. Those PEs are identified with flag 320 * EEH_PE_CFG_RESTRICTED, indicating EEH_PE_CFG_BLOCKED 321 * is set automatically when the PE is put to EEH_PE_ISOLATED. 322 * 323 * Restoring BARs possibly triggers PCI config access in 324 * (OPAL) firmware and then causes fenced PHB. If the 325 * PCI config is blocked with flag EEH_PE_CFG_BLOCKED, it's 326 * pointless to restore BARs and dump config space. 327 */ 328 eeh_ops->configure_bridge(pe); 329 if (!(pe->state & EEH_PE_CFG_BLOCKED)) { 330 eeh_pe_restore_bars(pe); 331 332 pci_regs_buf[0] = 0; 333 eeh_pe_traverse(pe, eeh_dump_pe_log, &loglen); 334 } 335 } 336 337 eeh_ops->get_log(pe, severity, pci_regs_buf, loglen); 338 } 339 340 /** 341 * eeh_token_to_phys - Convert EEH address token to phys address 342 * @token: I/O token, should be address in the form 0xA.... 343 * 344 * This routine should be called to convert virtual I/O address 345 * to physical one. 346 */ 347 static inline unsigned long eeh_token_to_phys(unsigned long token) 348 { 349 pte_t *ptep; 350 unsigned long pa; 351 int hugepage_shift; 352 353 /* 354 * We won't find hugepages here(this is iomem). Hence we are not 355 * worried about _PAGE_SPLITTING/collapse. Also we will not hit 356 * page table free, because of init_mm. 357 */ 358 ptep = find_init_mm_pte(token, &hugepage_shift); 359 if (!ptep) 360 return token; 361 362 pa = pte_pfn(*ptep); 363 364 /* On radix we can do hugepage mappings for io, so handle that */ 365 if (hugepage_shift) { 366 pa <<= hugepage_shift; 367 pa |= token & ((1ul << hugepage_shift) - 1); 368 } else { 369 pa <<= PAGE_SHIFT; 370 pa |= token & (PAGE_SIZE - 1); 371 } 372 373 return pa; 374 } 375 376 /* 377 * On PowerNV platform, we might already have fenced PHB there. 378 * For that case, it's meaningless to recover frozen PE. Intead, 379 * We have to handle fenced PHB firstly. 380 */ 381 static int eeh_phb_check_failure(struct eeh_pe *pe) 382 { 383 struct eeh_pe *phb_pe; 384 unsigned long flags; 385 int ret; 386 387 if (!eeh_has_flag(EEH_PROBE_MODE_DEV)) 388 return -EPERM; 389 390 /* Find the PHB PE */ 391 phb_pe = eeh_phb_pe_get(pe->phb); 392 if (!phb_pe) { 393 pr_warn("%s Can't find PE for PHB#%x\n", 394 __func__, pe->phb->global_number); 395 return -EEXIST; 396 } 397 398 /* If the PHB has been in problematic state */ 399 eeh_serialize_lock(&flags); 400 if (phb_pe->state & EEH_PE_ISOLATED) { 401 ret = 0; 402 goto out; 403 } 404 405 /* Check PHB state */ 406 ret = eeh_ops->get_state(phb_pe, NULL); 407 if ((ret < 0) || 408 (ret == EEH_STATE_NOT_SUPPORT) || eeh_state_active(ret)) { 409 ret = 0; 410 goto out; 411 } 412 413 /* Isolate the PHB and send event */ 414 eeh_pe_mark_isolated(phb_pe); 415 eeh_serialize_unlock(flags); 416 417 pr_debug("EEH: PHB#%x failure detected, location: %s\n", 418 phb_pe->phb->global_number, eeh_pe_loc_get(phb_pe)); 419 eeh_send_failure_event(phb_pe); 420 return 1; 421 out: 422 eeh_serialize_unlock(flags); 423 return ret; 424 } 425 426 /** 427 * eeh_dev_check_failure - Check if all 1's data is due to EEH slot freeze 428 * @edev: eeh device 429 * 430 * Check for an EEH failure for the given device node. Call this 431 * routine if the result of a read was all 0xff's and you want to 432 * find out if this is due to an EEH slot freeze. This routine 433 * will query firmware for the EEH status. 434 * 435 * Returns 0 if there has not been an EEH error; otherwise returns 436 * a non-zero value and queues up a slot isolation event notification. 437 * 438 * It is safe to call this routine in an interrupt context. 439 */ 440 int eeh_dev_check_failure(struct eeh_dev *edev) 441 { 442 int ret; 443 unsigned long flags; 444 struct device_node *dn; 445 struct pci_dev *dev; 446 struct eeh_pe *pe, *parent_pe; 447 int rc = 0; 448 const char *location = NULL; 449 450 eeh_stats.total_mmio_ffs++; 451 452 if (!eeh_enabled()) 453 return 0; 454 455 if (!edev) { 456 eeh_stats.no_dn++; 457 return 0; 458 } 459 dev = eeh_dev_to_pci_dev(edev); 460 pe = eeh_dev_to_pe(edev); 461 462 /* Access to IO BARs might get this far and still not want checking. */ 463 if (!pe) { 464 eeh_stats.ignored_check++; 465 eeh_edev_dbg(edev, "Ignored check\n"); 466 return 0; 467 } 468 469 if (!pe->addr && !pe->config_addr) { 470 eeh_stats.no_cfg_addr++; 471 return 0; 472 } 473 474 /* 475 * On PowerNV platform, we might already have fenced PHB 476 * there and we need take care of that firstly. 477 */ 478 ret = eeh_phb_check_failure(pe); 479 if (ret > 0) 480 return ret; 481 482 /* 483 * If the PE isn't owned by us, we shouldn't check the 484 * state. Instead, let the owner handle it if the PE has 485 * been frozen. 486 */ 487 if (eeh_pe_passed(pe)) 488 return 0; 489 490 /* If we already have a pending isolation event for this 491 * slot, we know it's bad already, we don't need to check. 492 * Do this checking under a lock; as multiple PCI devices 493 * in one slot might report errors simultaneously, and we 494 * only want one error recovery routine running. 495 */ 496 eeh_serialize_lock(&flags); 497 rc = 1; 498 if (pe->state & EEH_PE_ISOLATED) { 499 pe->check_count++; 500 if (pe->check_count == EEH_MAX_FAILS) { 501 dn = pci_device_to_OF_node(dev); 502 if (dn) 503 location = of_get_property(dn, "ibm,loc-code", 504 NULL); 505 eeh_edev_err(edev, "%d reads ignored for recovering device at location=%s driver=%s\n", 506 pe->check_count, 507 location ? location : "unknown", 508 eeh_driver_name(dev)); 509 eeh_edev_err(edev, "Might be infinite loop in %s driver\n", 510 eeh_driver_name(dev)); 511 dump_stack(); 512 } 513 goto dn_unlock; 514 } 515 516 /* 517 * Now test for an EEH failure. This is VERY expensive. 518 * Note that the eeh_config_addr may be a parent device 519 * in the case of a device behind a bridge, or it may be 520 * function zero of a multi-function device. 521 * In any case they must share a common PHB. 522 */ 523 ret = eeh_ops->get_state(pe, NULL); 524 525 /* Note that config-io to empty slots may fail; 526 * they are empty when they don't have children. 527 * We will punt with the following conditions: Failure to get 528 * PE's state, EEH not support and Permanently unavailable 529 * state, PE is in good state. 530 */ 531 if ((ret < 0) || 532 (ret == EEH_STATE_NOT_SUPPORT) || eeh_state_active(ret)) { 533 eeh_stats.false_positives++; 534 pe->false_positives++; 535 rc = 0; 536 goto dn_unlock; 537 } 538 539 /* 540 * It should be corner case that the parent PE has been 541 * put into frozen state as well. We should take care 542 * that at first. 543 */ 544 parent_pe = pe->parent; 545 while (parent_pe) { 546 /* Hit the ceiling ? */ 547 if (parent_pe->type & EEH_PE_PHB) 548 break; 549 550 /* Frozen parent PE ? */ 551 ret = eeh_ops->get_state(parent_pe, NULL); 552 if (ret > 0 && !eeh_state_active(ret)) { 553 pe = parent_pe; 554 pr_err("EEH: Failure of PHB#%x-PE#%x will be handled at parent PHB#%x-PE#%x.\n", 555 pe->phb->global_number, pe->addr, 556 pe->phb->global_number, parent_pe->addr); 557 } 558 559 /* Next parent level */ 560 parent_pe = parent_pe->parent; 561 } 562 563 eeh_stats.slot_resets++; 564 565 /* Avoid repeated reports of this failure, including problems 566 * with other functions on this device, and functions under 567 * bridges. 568 */ 569 eeh_pe_mark_isolated(pe); 570 eeh_serialize_unlock(flags); 571 572 /* Most EEH events are due to device driver bugs. Having 573 * a stack trace will help the device-driver authors figure 574 * out what happened. So print that out. 575 */ 576 pr_debug("EEH: %s: Frozen PHB#%x-PE#%x detected\n", 577 __func__, pe->phb->global_number, pe->addr); 578 eeh_send_failure_event(pe); 579 580 return 1; 581 582 dn_unlock: 583 eeh_serialize_unlock(flags); 584 return rc; 585 } 586 587 EXPORT_SYMBOL_GPL(eeh_dev_check_failure); 588 589 /** 590 * eeh_check_failure - Check if all 1's data is due to EEH slot freeze 591 * @token: I/O address 592 * 593 * Check for an EEH failure at the given I/O address. Call this 594 * routine if the result of a read was all 0xff's and you want to 595 * find out if this is due to an EEH slot freeze event. This routine 596 * will query firmware for the EEH status. 597 * 598 * Note this routine is safe to call in an interrupt context. 599 */ 600 int eeh_check_failure(const volatile void __iomem *token) 601 { 602 unsigned long addr; 603 struct eeh_dev *edev; 604 605 /* Finding the phys addr + pci device; this is pretty quick. */ 606 addr = eeh_token_to_phys((unsigned long __force) token); 607 edev = eeh_addr_cache_get_dev(addr); 608 if (!edev) { 609 eeh_stats.no_device++; 610 return 0; 611 } 612 613 return eeh_dev_check_failure(edev); 614 } 615 EXPORT_SYMBOL(eeh_check_failure); 616 617 618 /** 619 * eeh_pci_enable - Enable MMIO or DMA transfers for this slot 620 * @pe: EEH PE 621 * 622 * This routine should be called to reenable frozen MMIO or DMA 623 * so that it would work correctly again. It's useful while doing 624 * recovery or log collection on the indicated device. 625 */ 626 int eeh_pci_enable(struct eeh_pe *pe, int function) 627 { 628 int active_flag, rc; 629 630 /* 631 * pHyp doesn't allow to enable IO or DMA on unfrozen PE. 632 * Also, it's pointless to enable them on unfrozen PE. So 633 * we have to check before enabling IO or DMA. 634 */ 635 switch (function) { 636 case EEH_OPT_THAW_MMIO: 637 active_flag = EEH_STATE_MMIO_ACTIVE | EEH_STATE_MMIO_ENABLED; 638 break; 639 case EEH_OPT_THAW_DMA: 640 active_flag = EEH_STATE_DMA_ACTIVE; 641 break; 642 case EEH_OPT_DISABLE: 643 case EEH_OPT_ENABLE: 644 case EEH_OPT_FREEZE_PE: 645 active_flag = 0; 646 break; 647 default: 648 pr_warn("%s: Invalid function %d\n", 649 __func__, function); 650 return -EINVAL; 651 } 652 653 /* 654 * Check if IO or DMA has been enabled before 655 * enabling them. 656 */ 657 if (active_flag) { 658 rc = eeh_ops->get_state(pe, NULL); 659 if (rc < 0) 660 return rc; 661 662 /* Needn't enable it at all */ 663 if (rc == EEH_STATE_NOT_SUPPORT) 664 return 0; 665 666 /* It's already enabled */ 667 if (rc & active_flag) 668 return 0; 669 } 670 671 672 /* Issue the request */ 673 rc = eeh_ops->set_option(pe, function); 674 if (rc) 675 pr_warn("%s: Unexpected state change %d on " 676 "PHB#%x-PE#%x, err=%d\n", 677 __func__, function, pe->phb->global_number, 678 pe->addr, rc); 679 680 /* Check if the request is finished successfully */ 681 if (active_flag) { 682 rc = eeh_wait_state(pe, PCI_BUS_RESET_WAIT_MSEC); 683 if (rc < 0) 684 return rc; 685 686 if (rc & active_flag) 687 return 0; 688 689 return -EIO; 690 } 691 692 return rc; 693 } 694 695 static void eeh_disable_and_save_dev_state(struct eeh_dev *edev, 696 void *userdata) 697 { 698 struct pci_dev *pdev = eeh_dev_to_pci_dev(edev); 699 struct pci_dev *dev = userdata; 700 701 /* 702 * The caller should have disabled and saved the 703 * state for the specified device 704 */ 705 if (!pdev || pdev == dev) 706 return; 707 708 /* Ensure we have D0 power state */ 709 pci_set_power_state(pdev, PCI_D0); 710 711 /* Save device state */ 712 pci_save_state(pdev); 713 714 /* 715 * Disable device to avoid any DMA traffic and 716 * interrupt from the device 717 */ 718 pci_write_config_word(pdev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE); 719 } 720 721 static void eeh_restore_dev_state(struct eeh_dev *edev, void *userdata) 722 { 723 struct pci_dev *pdev = eeh_dev_to_pci_dev(edev); 724 struct pci_dev *dev = userdata; 725 726 if (!pdev) 727 return; 728 729 /* Apply customization from firmware */ 730 if (eeh_ops->restore_config) 731 eeh_ops->restore_config(edev); 732 733 /* The caller should restore state for the specified device */ 734 if (pdev != dev) 735 pci_restore_state(pdev); 736 } 737 738 /** 739 * pcibios_set_pcie_reset_state - Set PCI-E reset state 740 * @dev: pci device struct 741 * @state: reset state to enter 742 * 743 * Return value: 744 * 0 if success 745 */ 746 int pcibios_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state) 747 { 748 struct eeh_dev *edev = pci_dev_to_eeh_dev(dev); 749 struct eeh_pe *pe = eeh_dev_to_pe(edev); 750 751 if (!pe) { 752 pr_err("%s: No PE found on PCI device %s\n", 753 __func__, pci_name(dev)); 754 return -EINVAL; 755 } 756 757 switch (state) { 758 case pcie_deassert_reset: 759 eeh_ops->reset(pe, EEH_RESET_DEACTIVATE); 760 eeh_unfreeze_pe(pe); 761 if (!(pe->type & EEH_PE_VF)) 762 eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED, true); 763 eeh_pe_dev_traverse(pe, eeh_restore_dev_state, dev); 764 eeh_pe_state_clear(pe, EEH_PE_ISOLATED, true); 765 break; 766 case pcie_hot_reset: 767 eeh_pe_mark_isolated(pe); 768 eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED, true); 769 eeh_ops->set_option(pe, EEH_OPT_FREEZE_PE); 770 eeh_pe_dev_traverse(pe, eeh_disable_and_save_dev_state, dev); 771 if (!(pe->type & EEH_PE_VF)) 772 eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED); 773 eeh_ops->reset(pe, EEH_RESET_HOT); 774 break; 775 case pcie_warm_reset: 776 eeh_pe_mark_isolated(pe); 777 eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED, true); 778 eeh_ops->set_option(pe, EEH_OPT_FREEZE_PE); 779 eeh_pe_dev_traverse(pe, eeh_disable_and_save_dev_state, dev); 780 if (!(pe->type & EEH_PE_VF)) 781 eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED); 782 eeh_ops->reset(pe, EEH_RESET_FUNDAMENTAL); 783 break; 784 default: 785 eeh_pe_state_clear(pe, EEH_PE_ISOLATED | EEH_PE_CFG_BLOCKED, true); 786 return -EINVAL; 787 }; 788 789 return 0; 790 } 791 792 /** 793 * eeh_set_pe_freset - Check the required reset for the indicated device 794 * @data: EEH device 795 * @flag: return value 796 * 797 * Each device might have its preferred reset type: fundamental or 798 * hot reset. The routine is used to collected the information for 799 * the indicated device and its children so that the bunch of the 800 * devices could be reset properly. 801 */ 802 static void eeh_set_dev_freset(struct eeh_dev *edev, void *flag) 803 { 804 struct pci_dev *dev; 805 unsigned int *freset = (unsigned int *)flag; 806 807 dev = eeh_dev_to_pci_dev(edev); 808 if (dev) 809 *freset |= dev->needs_freset; 810 } 811 812 static void eeh_pe_refreeze_passed(struct eeh_pe *root) 813 { 814 struct eeh_pe *pe; 815 int state; 816 817 eeh_for_each_pe(root, pe) { 818 if (eeh_pe_passed(pe)) { 819 state = eeh_ops->get_state(pe, NULL); 820 if (state & 821 (EEH_STATE_MMIO_ACTIVE | EEH_STATE_MMIO_ENABLED)) { 822 pr_info("EEH: Passed-through PE PHB#%x-PE#%x was thawed by reset, re-freezing for safety.\n", 823 pe->phb->global_number, pe->addr); 824 eeh_pe_set_option(pe, EEH_OPT_FREEZE_PE); 825 } 826 } 827 } 828 } 829 830 /** 831 * eeh_pe_reset_full - Complete a full reset process on the indicated PE 832 * @pe: EEH PE 833 * 834 * This function executes a full reset procedure on a PE, including setting 835 * the appropriate flags, performing a fundamental or hot reset, and then 836 * deactivating the reset status. It is designed to be used within the EEH 837 * subsystem, as opposed to eeh_pe_reset which is exported to drivers and 838 * only performs a single operation at a time. 839 * 840 * This function will attempt to reset a PE three times before failing. 841 */ 842 int eeh_pe_reset_full(struct eeh_pe *pe, bool include_passed) 843 { 844 int reset_state = (EEH_PE_RESET | EEH_PE_CFG_BLOCKED); 845 int type = EEH_RESET_HOT; 846 unsigned int freset = 0; 847 int i, state = 0, ret; 848 849 /* 850 * Determine the type of reset to perform - hot or fundamental. 851 * Hot reset is the default operation, unless any device under the 852 * PE requires a fundamental reset. 853 */ 854 eeh_pe_dev_traverse(pe, eeh_set_dev_freset, &freset); 855 856 if (freset) 857 type = EEH_RESET_FUNDAMENTAL; 858 859 /* Mark the PE as in reset state and block config space accesses */ 860 eeh_pe_state_mark(pe, reset_state); 861 862 /* Make three attempts at resetting the bus */ 863 for (i = 0; i < 3; i++) { 864 ret = eeh_pe_reset(pe, type, include_passed); 865 if (!ret) 866 ret = eeh_pe_reset(pe, EEH_RESET_DEACTIVATE, 867 include_passed); 868 if (ret) { 869 ret = -EIO; 870 pr_warn("EEH: Failure %d resetting PHB#%x-PE#%x (attempt %d)\n\n", 871 state, pe->phb->global_number, pe->addr, i + 1); 872 continue; 873 } 874 if (i) 875 pr_warn("EEH: PHB#%x-PE#%x: Successful reset (attempt %d)\n", 876 pe->phb->global_number, pe->addr, i + 1); 877 878 /* Wait until the PE is in a functioning state */ 879 state = eeh_wait_state(pe, PCI_BUS_RESET_WAIT_MSEC); 880 if (state < 0) { 881 pr_warn("EEH: Unrecoverable slot failure on PHB#%x-PE#%x", 882 pe->phb->global_number, pe->addr); 883 ret = -ENOTRECOVERABLE; 884 break; 885 } 886 if (eeh_state_active(state)) 887 break; 888 else 889 pr_warn("EEH: PHB#%x-PE#%x: Slot inactive after reset: 0x%x (attempt %d)\n", 890 pe->phb->global_number, pe->addr, state, i + 1); 891 } 892 893 /* Resetting the PE may have unfrozen child PEs. If those PEs have been 894 * (potentially) passed through to a guest, re-freeze them: 895 */ 896 if (!include_passed) 897 eeh_pe_refreeze_passed(pe); 898 899 eeh_pe_state_clear(pe, reset_state, true); 900 return ret; 901 } 902 903 /** 904 * eeh_save_bars - Save device bars 905 * @edev: PCI device associated EEH device 906 * 907 * Save the values of the device bars. Unlike the restore 908 * routine, this routine is *not* recursive. This is because 909 * PCI devices are added individually; but, for the restore, 910 * an entire slot is reset at a time. 911 */ 912 void eeh_save_bars(struct eeh_dev *edev) 913 { 914 int i; 915 916 if (!edev) 917 return; 918 919 for (i = 0; i < 16; i++) 920 eeh_ops->read_config(edev, i * 4, 4, &edev->config_space[i]); 921 922 /* 923 * For PCI bridges including root port, we need enable bus 924 * master explicitly. Otherwise, it can't fetch IODA table 925 * entries correctly. So we cache the bit in advance so that 926 * we can restore it after reset, either PHB range or PE range. 927 */ 928 if (edev->mode & EEH_DEV_BRIDGE) 929 edev->config_space[1] |= PCI_COMMAND_MASTER; 930 } 931 932 /** 933 * eeh_ops_register - Register platform dependent EEH operations 934 * @ops: platform dependent EEH operations 935 * 936 * Register the platform dependent EEH operation callback 937 * functions. The platform should call this function before 938 * any other EEH operations. 939 */ 940 int __init eeh_ops_register(struct eeh_ops *ops) 941 { 942 if (!ops->name) { 943 pr_warn("%s: Invalid EEH ops name for %p\n", 944 __func__, ops); 945 return -EINVAL; 946 } 947 948 if (eeh_ops && eeh_ops != ops) { 949 pr_warn("%s: EEH ops of platform %s already existing (%s)\n", 950 __func__, eeh_ops->name, ops->name); 951 return -EEXIST; 952 } 953 954 eeh_ops = ops; 955 956 return 0; 957 } 958 959 /** 960 * eeh_ops_unregister - Unreigster platform dependent EEH operations 961 * @name: name of EEH platform operations 962 * 963 * Unregister the platform dependent EEH operation callback 964 * functions. 965 */ 966 int __exit eeh_ops_unregister(const char *name) 967 { 968 if (!name || !strlen(name)) { 969 pr_warn("%s: Invalid EEH ops name\n", 970 __func__); 971 return -EINVAL; 972 } 973 974 if (eeh_ops && !strcmp(eeh_ops->name, name)) { 975 eeh_ops = NULL; 976 return 0; 977 } 978 979 return -EEXIST; 980 } 981 982 static int eeh_reboot_notifier(struct notifier_block *nb, 983 unsigned long action, void *unused) 984 { 985 eeh_clear_flag(EEH_ENABLED); 986 return NOTIFY_DONE; 987 } 988 989 static struct notifier_block eeh_reboot_nb = { 990 .notifier_call = eeh_reboot_notifier, 991 }; 992 993 /** 994 * eeh_init - EEH initialization 995 * 996 * Initialize EEH by trying to enable it for all of the adapters in the system. 997 * As a side effect we can determine here if eeh is supported at all. 998 * Note that we leave EEH on so failed config cycles won't cause a machine 999 * check. If a user turns off EEH for a particular adapter they are really 1000 * telling Linux to ignore errors. Some hardware (e.g. POWER5) won't 1001 * grant access to a slot if EEH isn't enabled, and so we always enable 1002 * EEH for all slots/all devices. 1003 * 1004 * The eeh-force-off option disables EEH checking globally, for all slots. 1005 * Even if force-off is set, the EEH hardware is still enabled, so that 1006 * newer systems can boot. 1007 */ 1008 static int eeh_init(void) 1009 { 1010 struct pci_controller *hose, *tmp; 1011 int ret = 0; 1012 1013 /* Register reboot notifier */ 1014 ret = register_reboot_notifier(&eeh_reboot_nb); 1015 if (ret) { 1016 pr_warn("%s: Failed to register notifier (%d)\n", 1017 __func__, ret); 1018 return ret; 1019 } 1020 1021 /* call platform initialization function */ 1022 if (!eeh_ops) { 1023 pr_warn("%s: Platform EEH operation not found\n", 1024 __func__); 1025 return -EEXIST; 1026 } else if ((ret = eeh_ops->init())) 1027 return ret; 1028 1029 /* Initialize PHB PEs */ 1030 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) 1031 eeh_phb_pe_create(hose); 1032 1033 eeh_addr_cache_init(); 1034 1035 /* Initialize EEH event */ 1036 return eeh_event_init(); 1037 } 1038 1039 core_initcall_sync(eeh_init); 1040 1041 static int eeh_device_notifier(struct notifier_block *nb, 1042 unsigned long action, void *data) 1043 { 1044 struct device *dev = data; 1045 1046 switch (action) { 1047 /* 1048 * Note: It's not possible to perform EEH device addition (i.e. 1049 * {pseries,pnv}_pcibios_bus_add_device()) here because it depends on 1050 * the device's resources, which have not yet been set up. 1051 */ 1052 case BUS_NOTIFY_DEL_DEVICE: 1053 eeh_remove_device(to_pci_dev(dev)); 1054 break; 1055 default: 1056 break; 1057 } 1058 return NOTIFY_DONE; 1059 } 1060 1061 static struct notifier_block eeh_device_nb = { 1062 .notifier_call = eeh_device_notifier, 1063 }; 1064 1065 static __init int eeh_set_bus_notifier(void) 1066 { 1067 bus_register_notifier(&pci_bus_type, &eeh_device_nb); 1068 return 0; 1069 } 1070 arch_initcall(eeh_set_bus_notifier); 1071 1072 /** 1073 * eeh_probe_device() - Perform EEH initialization for the indicated pci device 1074 * @dev: pci device for which to set up EEH 1075 * 1076 * This routine must be used to complete EEH initialization for PCI 1077 * devices that were added after system boot (e.g. hotplug, dlpar). 1078 */ 1079 void eeh_probe_device(struct pci_dev *dev) 1080 { 1081 struct eeh_dev *edev; 1082 1083 pr_debug("EEH: Adding device %s\n", pci_name(dev)); 1084 1085 /* 1086 * pci_dev_to_eeh_dev() can only work if eeh_probe_dev() was 1087 * already called for this device. 1088 */ 1089 if (WARN_ON_ONCE(pci_dev_to_eeh_dev(dev))) { 1090 pci_dbg(dev, "Already bound to an eeh_dev!\n"); 1091 return; 1092 } 1093 1094 edev = eeh_ops->probe(dev); 1095 if (!edev) { 1096 pr_debug("EEH: Adding device failed\n"); 1097 return; 1098 } 1099 1100 /* 1101 * FIXME: We rely on pcibios_release_device() to remove the 1102 * existing EEH state. The release function is only called if 1103 * the pci_dev's refcount drops to zero so if something is 1104 * keeping a ref to a device (e.g. a filesystem) we need to 1105 * remove the old EEH state. 1106 * 1107 * FIXME: HEY MA, LOOK AT ME, NO LOCKING! 1108 */ 1109 if (edev->pdev && edev->pdev != dev) { 1110 eeh_pe_tree_remove(edev); 1111 eeh_addr_cache_rmv_dev(edev->pdev); 1112 eeh_sysfs_remove_device(edev->pdev); 1113 1114 /* 1115 * We definitely should have the PCI device removed 1116 * though it wasn't correctly. So we needn't call 1117 * into error handler afterwards. 1118 */ 1119 edev->mode |= EEH_DEV_NO_HANDLER; 1120 } 1121 1122 /* bind the pdev and the edev together */ 1123 edev->pdev = dev; 1124 dev->dev.archdata.edev = edev; 1125 eeh_addr_cache_insert_dev(dev); 1126 eeh_sysfs_add_device(dev); 1127 } 1128 1129 /** 1130 * eeh_remove_device - Undo EEH setup for the indicated pci device 1131 * @dev: pci device to be removed 1132 * 1133 * This routine should be called when a device is removed from 1134 * a running system (e.g. by hotplug or dlpar). It unregisters 1135 * the PCI device from the EEH subsystem. I/O errors affecting 1136 * this device will no longer be detected after this call; thus, 1137 * i/o errors affecting this slot may leave this device unusable. 1138 */ 1139 void eeh_remove_device(struct pci_dev *dev) 1140 { 1141 struct eeh_dev *edev; 1142 1143 if (!dev || !eeh_enabled()) 1144 return; 1145 edev = pci_dev_to_eeh_dev(dev); 1146 1147 /* Unregister the device with the EEH/PCI address search system */ 1148 dev_dbg(&dev->dev, "EEH: Removing device\n"); 1149 1150 if (!edev || !edev->pdev || !edev->pe) { 1151 dev_dbg(&dev->dev, "EEH: Device not referenced!\n"); 1152 return; 1153 } 1154 1155 /* 1156 * During the hotplug for EEH error recovery, we need the EEH 1157 * device attached to the parent PE in order for BAR restore 1158 * a bit later. So we keep it for BAR restore and remove it 1159 * from the parent PE during the BAR resotre. 1160 */ 1161 edev->pdev = NULL; 1162 1163 /* 1164 * eeh_sysfs_remove_device() uses pci_dev_to_eeh_dev() so we need to 1165 * remove the sysfs files before clearing dev.archdata.edev 1166 */ 1167 if (edev->mode & EEH_DEV_SYSFS) 1168 eeh_sysfs_remove_device(dev); 1169 1170 /* 1171 * We're removing from the PCI subsystem, that means 1172 * the PCI device driver can't support EEH or not 1173 * well. So we rely on hotplug completely to do recovery 1174 * for the specific PCI device. 1175 */ 1176 edev->mode |= EEH_DEV_NO_HANDLER; 1177 1178 eeh_addr_cache_rmv_dev(dev); 1179 1180 /* 1181 * The flag "in_error" is used to trace EEH devices for VFs 1182 * in error state or not. It's set in eeh_report_error(). If 1183 * it's not set, eeh_report_{reset,resume}() won't be called 1184 * for the VF EEH device. 1185 */ 1186 edev->in_error = false; 1187 dev->dev.archdata.edev = NULL; 1188 if (!(edev->pe->state & EEH_PE_KEEP)) 1189 eeh_pe_tree_remove(edev); 1190 else 1191 edev->mode |= EEH_DEV_DISCONNECTED; 1192 } 1193 1194 int eeh_unfreeze_pe(struct eeh_pe *pe) 1195 { 1196 int ret; 1197 1198 ret = eeh_pci_enable(pe, EEH_OPT_THAW_MMIO); 1199 if (ret) { 1200 pr_warn("%s: Failure %d enabling IO on PHB#%x-PE#%x\n", 1201 __func__, ret, pe->phb->global_number, pe->addr); 1202 return ret; 1203 } 1204 1205 ret = eeh_pci_enable(pe, EEH_OPT_THAW_DMA); 1206 if (ret) { 1207 pr_warn("%s: Failure %d enabling DMA on PHB#%x-PE#%x\n", 1208 __func__, ret, pe->phb->global_number, pe->addr); 1209 return ret; 1210 } 1211 1212 return ret; 1213 } 1214 1215 1216 static struct pci_device_id eeh_reset_ids[] = { 1217 { PCI_DEVICE(0x19a2, 0x0710) }, /* Emulex, BE */ 1218 { PCI_DEVICE(0x10df, 0xe220) }, /* Emulex, Lancer */ 1219 { PCI_DEVICE(0x14e4, 0x1657) }, /* Broadcom BCM5719 */ 1220 { 0 } 1221 }; 1222 1223 static int eeh_pe_change_owner(struct eeh_pe *pe) 1224 { 1225 struct eeh_dev *edev, *tmp; 1226 struct pci_dev *pdev; 1227 struct pci_device_id *id; 1228 int ret; 1229 1230 /* Check PE state */ 1231 ret = eeh_ops->get_state(pe, NULL); 1232 if (ret < 0 || ret == EEH_STATE_NOT_SUPPORT) 1233 return 0; 1234 1235 /* Unfrozen PE, nothing to do */ 1236 if (eeh_state_active(ret)) 1237 return 0; 1238 1239 /* Frozen PE, check if it needs PE level reset */ 1240 eeh_pe_for_each_dev(pe, edev, tmp) { 1241 pdev = eeh_dev_to_pci_dev(edev); 1242 if (!pdev) 1243 continue; 1244 1245 for (id = &eeh_reset_ids[0]; id->vendor != 0; id++) { 1246 if (id->vendor != PCI_ANY_ID && 1247 id->vendor != pdev->vendor) 1248 continue; 1249 if (id->device != PCI_ANY_ID && 1250 id->device != pdev->device) 1251 continue; 1252 if (id->subvendor != PCI_ANY_ID && 1253 id->subvendor != pdev->subsystem_vendor) 1254 continue; 1255 if (id->subdevice != PCI_ANY_ID && 1256 id->subdevice != pdev->subsystem_device) 1257 continue; 1258 1259 return eeh_pe_reset_and_recover(pe); 1260 } 1261 } 1262 1263 ret = eeh_unfreeze_pe(pe); 1264 if (!ret) 1265 eeh_pe_state_clear(pe, EEH_PE_ISOLATED, true); 1266 return ret; 1267 } 1268 1269 /** 1270 * eeh_dev_open - Increase count of pass through devices for PE 1271 * @pdev: PCI device 1272 * 1273 * Increase count of passed through devices for the indicated 1274 * PE. In the result, the EEH errors detected on the PE won't be 1275 * reported. The PE owner will be responsible for detection 1276 * and recovery. 1277 */ 1278 int eeh_dev_open(struct pci_dev *pdev) 1279 { 1280 struct eeh_dev *edev; 1281 int ret = -ENODEV; 1282 1283 mutex_lock(&eeh_dev_mutex); 1284 1285 /* No PCI device ? */ 1286 if (!pdev) 1287 goto out; 1288 1289 /* No EEH device or PE ? */ 1290 edev = pci_dev_to_eeh_dev(pdev); 1291 if (!edev || !edev->pe) 1292 goto out; 1293 1294 /* 1295 * The PE might have been put into frozen state, but we 1296 * didn't detect that yet. The passed through PCI devices 1297 * in frozen PE won't work properly. Clear the frozen state 1298 * in advance. 1299 */ 1300 ret = eeh_pe_change_owner(edev->pe); 1301 if (ret) 1302 goto out; 1303 1304 /* Increase PE's pass through count */ 1305 atomic_inc(&edev->pe->pass_dev_cnt); 1306 mutex_unlock(&eeh_dev_mutex); 1307 1308 return 0; 1309 out: 1310 mutex_unlock(&eeh_dev_mutex); 1311 return ret; 1312 } 1313 EXPORT_SYMBOL_GPL(eeh_dev_open); 1314 1315 /** 1316 * eeh_dev_release - Decrease count of pass through devices for PE 1317 * @pdev: PCI device 1318 * 1319 * Decrease count of pass through devices for the indicated PE. If 1320 * there is no passed through device in PE, the EEH errors detected 1321 * on the PE will be reported and handled as usual. 1322 */ 1323 void eeh_dev_release(struct pci_dev *pdev) 1324 { 1325 struct eeh_dev *edev; 1326 1327 mutex_lock(&eeh_dev_mutex); 1328 1329 /* No PCI device ? */ 1330 if (!pdev) 1331 goto out; 1332 1333 /* No EEH device ? */ 1334 edev = pci_dev_to_eeh_dev(pdev); 1335 if (!edev || !edev->pe || !eeh_pe_passed(edev->pe)) 1336 goto out; 1337 1338 /* Decrease PE's pass through count */ 1339 WARN_ON(atomic_dec_if_positive(&edev->pe->pass_dev_cnt) < 0); 1340 eeh_pe_change_owner(edev->pe); 1341 out: 1342 mutex_unlock(&eeh_dev_mutex); 1343 } 1344 EXPORT_SYMBOL(eeh_dev_release); 1345 1346 #ifdef CONFIG_IOMMU_API 1347 1348 static int dev_has_iommu_table(struct device *dev, void *data) 1349 { 1350 struct pci_dev *pdev = to_pci_dev(dev); 1351 struct pci_dev **ppdev = data; 1352 1353 if (!dev) 1354 return 0; 1355 1356 if (device_iommu_mapped(dev)) { 1357 *ppdev = pdev; 1358 return 1; 1359 } 1360 1361 return 0; 1362 } 1363 1364 /** 1365 * eeh_iommu_group_to_pe - Convert IOMMU group to EEH PE 1366 * @group: IOMMU group 1367 * 1368 * The routine is called to convert IOMMU group to EEH PE. 1369 */ 1370 struct eeh_pe *eeh_iommu_group_to_pe(struct iommu_group *group) 1371 { 1372 struct pci_dev *pdev = NULL; 1373 struct eeh_dev *edev; 1374 int ret; 1375 1376 /* No IOMMU group ? */ 1377 if (!group) 1378 return NULL; 1379 1380 ret = iommu_group_for_each_dev(group, &pdev, dev_has_iommu_table); 1381 if (!ret || !pdev) 1382 return NULL; 1383 1384 /* No EEH device or PE ? */ 1385 edev = pci_dev_to_eeh_dev(pdev); 1386 if (!edev || !edev->pe) 1387 return NULL; 1388 1389 return edev->pe; 1390 } 1391 EXPORT_SYMBOL_GPL(eeh_iommu_group_to_pe); 1392 1393 #endif /* CONFIG_IOMMU_API */ 1394 1395 /** 1396 * eeh_pe_set_option - Set options for the indicated PE 1397 * @pe: EEH PE 1398 * @option: requested option 1399 * 1400 * The routine is called to enable or disable EEH functionality 1401 * on the indicated PE, to enable IO or DMA for the frozen PE. 1402 */ 1403 int eeh_pe_set_option(struct eeh_pe *pe, int option) 1404 { 1405 int ret = 0; 1406 1407 /* Invalid PE ? */ 1408 if (!pe) 1409 return -ENODEV; 1410 1411 /* 1412 * EEH functionality could possibly be disabled, just 1413 * return error for the case. And the EEH functinality 1414 * isn't expected to be disabled on one specific PE. 1415 */ 1416 switch (option) { 1417 case EEH_OPT_ENABLE: 1418 if (eeh_enabled()) { 1419 ret = eeh_pe_change_owner(pe); 1420 break; 1421 } 1422 ret = -EIO; 1423 break; 1424 case EEH_OPT_DISABLE: 1425 break; 1426 case EEH_OPT_THAW_MMIO: 1427 case EEH_OPT_THAW_DMA: 1428 case EEH_OPT_FREEZE_PE: 1429 if (!eeh_ops || !eeh_ops->set_option) { 1430 ret = -ENOENT; 1431 break; 1432 } 1433 1434 ret = eeh_pci_enable(pe, option); 1435 break; 1436 default: 1437 pr_debug("%s: Option %d out of range (%d, %d)\n", 1438 __func__, option, EEH_OPT_DISABLE, EEH_OPT_THAW_DMA); 1439 ret = -EINVAL; 1440 } 1441 1442 return ret; 1443 } 1444 EXPORT_SYMBOL_GPL(eeh_pe_set_option); 1445 1446 /** 1447 * eeh_pe_get_state - Retrieve PE's state 1448 * @pe: EEH PE 1449 * 1450 * Retrieve the PE's state, which includes 3 aspects: enabled 1451 * DMA, enabled IO and asserted reset. 1452 */ 1453 int eeh_pe_get_state(struct eeh_pe *pe) 1454 { 1455 int result, ret = 0; 1456 bool rst_active, dma_en, mmio_en; 1457 1458 /* Existing PE ? */ 1459 if (!pe) 1460 return -ENODEV; 1461 1462 if (!eeh_ops || !eeh_ops->get_state) 1463 return -ENOENT; 1464 1465 /* 1466 * If the parent PE is owned by the host kernel and is undergoing 1467 * error recovery, we should return the PE state as temporarily 1468 * unavailable so that the error recovery on the guest is suspended 1469 * until the recovery completes on the host. 1470 */ 1471 if (pe->parent && 1472 !(pe->state & EEH_PE_REMOVED) && 1473 (pe->parent->state & (EEH_PE_ISOLATED | EEH_PE_RECOVERING))) 1474 return EEH_PE_STATE_UNAVAIL; 1475 1476 result = eeh_ops->get_state(pe, NULL); 1477 rst_active = !!(result & EEH_STATE_RESET_ACTIVE); 1478 dma_en = !!(result & EEH_STATE_DMA_ENABLED); 1479 mmio_en = !!(result & EEH_STATE_MMIO_ENABLED); 1480 1481 if (rst_active) 1482 ret = EEH_PE_STATE_RESET; 1483 else if (dma_en && mmio_en) 1484 ret = EEH_PE_STATE_NORMAL; 1485 else if (!dma_en && !mmio_en) 1486 ret = EEH_PE_STATE_STOPPED_IO_DMA; 1487 else if (!dma_en && mmio_en) 1488 ret = EEH_PE_STATE_STOPPED_DMA; 1489 else 1490 ret = EEH_PE_STATE_UNAVAIL; 1491 1492 return ret; 1493 } 1494 EXPORT_SYMBOL_GPL(eeh_pe_get_state); 1495 1496 static int eeh_pe_reenable_devices(struct eeh_pe *pe, bool include_passed) 1497 { 1498 struct eeh_dev *edev, *tmp; 1499 struct pci_dev *pdev; 1500 int ret = 0; 1501 1502 eeh_pe_restore_bars(pe); 1503 1504 /* 1505 * Reenable PCI devices as the devices passed 1506 * through are always enabled before the reset. 1507 */ 1508 eeh_pe_for_each_dev(pe, edev, tmp) { 1509 pdev = eeh_dev_to_pci_dev(edev); 1510 if (!pdev) 1511 continue; 1512 1513 ret = pci_reenable_device(pdev); 1514 if (ret) { 1515 pr_warn("%s: Failure %d reenabling %s\n", 1516 __func__, ret, pci_name(pdev)); 1517 return ret; 1518 } 1519 } 1520 1521 /* The PE is still in frozen state */ 1522 if (include_passed || !eeh_pe_passed(pe)) { 1523 ret = eeh_unfreeze_pe(pe); 1524 } else 1525 pr_info("EEH: Note: Leaving passthrough PHB#%x-PE#%x frozen.\n", 1526 pe->phb->global_number, pe->addr); 1527 if (!ret) 1528 eeh_pe_state_clear(pe, EEH_PE_ISOLATED, include_passed); 1529 return ret; 1530 } 1531 1532 1533 /** 1534 * eeh_pe_reset - Issue PE reset according to specified type 1535 * @pe: EEH PE 1536 * @option: reset type 1537 * 1538 * The routine is called to reset the specified PE with the 1539 * indicated type, either fundamental reset or hot reset. 1540 * PE reset is the most important part for error recovery. 1541 */ 1542 int eeh_pe_reset(struct eeh_pe *pe, int option, bool include_passed) 1543 { 1544 int ret = 0; 1545 1546 /* Invalid PE ? */ 1547 if (!pe) 1548 return -ENODEV; 1549 1550 if (!eeh_ops || !eeh_ops->set_option || !eeh_ops->reset) 1551 return -ENOENT; 1552 1553 switch (option) { 1554 case EEH_RESET_DEACTIVATE: 1555 ret = eeh_ops->reset(pe, option); 1556 eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED, include_passed); 1557 if (ret) 1558 break; 1559 1560 ret = eeh_pe_reenable_devices(pe, include_passed); 1561 break; 1562 case EEH_RESET_HOT: 1563 case EEH_RESET_FUNDAMENTAL: 1564 /* 1565 * Proactively freeze the PE to drop all MMIO access 1566 * during reset, which should be banned as it's always 1567 * cause recursive EEH error. 1568 */ 1569 eeh_ops->set_option(pe, EEH_OPT_FREEZE_PE); 1570 1571 eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED); 1572 ret = eeh_ops->reset(pe, option); 1573 break; 1574 default: 1575 pr_debug("%s: Unsupported option %d\n", 1576 __func__, option); 1577 ret = -EINVAL; 1578 } 1579 1580 return ret; 1581 } 1582 EXPORT_SYMBOL_GPL(eeh_pe_reset); 1583 1584 /** 1585 * eeh_pe_configure - Configure PCI bridges after PE reset 1586 * @pe: EEH PE 1587 * 1588 * The routine is called to restore the PCI config space for 1589 * those PCI devices, especially PCI bridges affected by PE 1590 * reset issued previously. 1591 */ 1592 int eeh_pe_configure(struct eeh_pe *pe) 1593 { 1594 int ret = 0; 1595 1596 /* Invalid PE ? */ 1597 if (!pe) 1598 return -ENODEV; 1599 1600 return ret; 1601 } 1602 EXPORT_SYMBOL_GPL(eeh_pe_configure); 1603 1604 /** 1605 * eeh_pe_inject_err - Injecting the specified PCI error to the indicated PE 1606 * @pe: the indicated PE 1607 * @type: error type 1608 * @function: error function 1609 * @addr: address 1610 * @mask: address mask 1611 * 1612 * The routine is called to inject the specified PCI error, which 1613 * is determined by @type and @function, to the indicated PE for 1614 * testing purpose. 1615 */ 1616 int eeh_pe_inject_err(struct eeh_pe *pe, int type, int func, 1617 unsigned long addr, unsigned long mask) 1618 { 1619 /* Invalid PE ? */ 1620 if (!pe) 1621 return -ENODEV; 1622 1623 /* Unsupported operation ? */ 1624 if (!eeh_ops || !eeh_ops->err_inject) 1625 return -ENOENT; 1626 1627 /* Check on PCI error type */ 1628 if (type != EEH_ERR_TYPE_32 && type != EEH_ERR_TYPE_64) 1629 return -EINVAL; 1630 1631 /* Check on PCI error function */ 1632 if (func < EEH_ERR_FUNC_MIN || func > EEH_ERR_FUNC_MAX) 1633 return -EINVAL; 1634 1635 return eeh_ops->err_inject(pe, type, func, addr, mask); 1636 } 1637 EXPORT_SYMBOL_GPL(eeh_pe_inject_err); 1638 1639 static int proc_eeh_show(struct seq_file *m, void *v) 1640 { 1641 if (!eeh_enabled()) { 1642 seq_printf(m, "EEH Subsystem is globally disabled\n"); 1643 seq_printf(m, "eeh_total_mmio_ffs=%llu\n", eeh_stats.total_mmio_ffs); 1644 } else { 1645 seq_printf(m, "EEH Subsystem is enabled\n"); 1646 seq_printf(m, 1647 "no device=%llu\n" 1648 "no device node=%llu\n" 1649 "no config address=%llu\n" 1650 "check not wanted=%llu\n" 1651 "eeh_total_mmio_ffs=%llu\n" 1652 "eeh_false_positives=%llu\n" 1653 "eeh_slot_resets=%llu\n", 1654 eeh_stats.no_device, 1655 eeh_stats.no_dn, 1656 eeh_stats.no_cfg_addr, 1657 eeh_stats.ignored_check, 1658 eeh_stats.total_mmio_ffs, 1659 eeh_stats.false_positives, 1660 eeh_stats.slot_resets); 1661 } 1662 1663 return 0; 1664 } 1665 1666 #ifdef CONFIG_DEBUG_FS 1667 static int eeh_enable_dbgfs_set(void *data, u64 val) 1668 { 1669 if (val) 1670 eeh_clear_flag(EEH_FORCE_DISABLED); 1671 else 1672 eeh_add_flag(EEH_FORCE_DISABLED); 1673 1674 return 0; 1675 } 1676 1677 static int eeh_enable_dbgfs_get(void *data, u64 *val) 1678 { 1679 if (eeh_enabled()) 1680 *val = 0x1ul; 1681 else 1682 *val = 0x0ul; 1683 return 0; 1684 } 1685 1686 DEFINE_DEBUGFS_ATTRIBUTE(eeh_enable_dbgfs_ops, eeh_enable_dbgfs_get, 1687 eeh_enable_dbgfs_set, "0x%llx\n"); 1688 1689 static ssize_t eeh_force_recover_write(struct file *filp, 1690 const char __user *user_buf, 1691 size_t count, loff_t *ppos) 1692 { 1693 struct pci_controller *hose; 1694 uint32_t phbid, pe_no; 1695 struct eeh_pe *pe; 1696 char buf[20]; 1697 int ret; 1698 1699 ret = simple_write_to_buffer(buf, sizeof(buf), ppos, user_buf, count); 1700 if (!ret) 1701 return -EFAULT; 1702 1703 /* 1704 * When PE is NULL the event is a "special" event. Rather than 1705 * recovering a specific PE it forces the EEH core to scan for failed 1706 * PHBs and recovers each. This needs to be done before any device 1707 * recoveries can occur. 1708 */ 1709 if (!strncmp(buf, "hwcheck", 7)) { 1710 __eeh_send_failure_event(NULL); 1711 return count; 1712 } 1713 1714 ret = sscanf(buf, "%x:%x", &phbid, &pe_no); 1715 if (ret != 2) 1716 return -EINVAL; 1717 1718 hose = pci_find_controller_for_domain(phbid); 1719 if (!hose) 1720 return -ENODEV; 1721 1722 /* Retrieve PE */ 1723 pe = eeh_pe_get(hose, pe_no, 0); 1724 if (!pe) 1725 return -ENODEV; 1726 1727 /* 1728 * We don't do any state checking here since the detection 1729 * process is async to the recovery process. The recovery 1730 * thread *should* not break even if we schedule a recovery 1731 * from an odd state (e.g. PE removed, or recovery of a 1732 * non-isolated PE) 1733 */ 1734 __eeh_send_failure_event(pe); 1735 1736 return ret < 0 ? ret : count; 1737 } 1738 1739 static const struct file_operations eeh_force_recover_fops = { 1740 .open = simple_open, 1741 .llseek = no_llseek, 1742 .write = eeh_force_recover_write, 1743 }; 1744 1745 static ssize_t eeh_debugfs_dev_usage(struct file *filp, 1746 char __user *user_buf, 1747 size_t count, loff_t *ppos) 1748 { 1749 static const char usage[] = "input format: <domain>:<bus>:<dev>.<fn>\n"; 1750 1751 return simple_read_from_buffer(user_buf, count, ppos, 1752 usage, sizeof(usage) - 1); 1753 } 1754 1755 static ssize_t eeh_dev_check_write(struct file *filp, 1756 const char __user *user_buf, 1757 size_t count, loff_t *ppos) 1758 { 1759 uint32_t domain, bus, dev, fn; 1760 struct pci_dev *pdev; 1761 struct eeh_dev *edev; 1762 char buf[20]; 1763 int ret; 1764 1765 memset(buf, 0, sizeof(buf)); 1766 ret = simple_write_to_buffer(buf, sizeof(buf)-1, ppos, user_buf, count); 1767 if (!ret) 1768 return -EFAULT; 1769 1770 ret = sscanf(buf, "%x:%x:%x.%x", &domain, &bus, &dev, &fn); 1771 if (ret != 4) { 1772 pr_err("%s: expected 4 args, got %d\n", __func__, ret); 1773 return -EINVAL; 1774 } 1775 1776 pdev = pci_get_domain_bus_and_slot(domain, bus, (dev << 3) | fn); 1777 if (!pdev) 1778 return -ENODEV; 1779 1780 edev = pci_dev_to_eeh_dev(pdev); 1781 if (!edev) { 1782 pci_err(pdev, "No eeh_dev for this device!\n"); 1783 pci_dev_put(pdev); 1784 return -ENODEV; 1785 } 1786 1787 ret = eeh_dev_check_failure(edev); 1788 pci_info(pdev, "eeh_dev_check_failure(%04x:%02x:%02x.%01x) = %d\n", 1789 domain, bus, dev, fn, ret); 1790 1791 pci_dev_put(pdev); 1792 1793 return count; 1794 } 1795 1796 static const struct file_operations eeh_dev_check_fops = { 1797 .open = simple_open, 1798 .llseek = no_llseek, 1799 .write = eeh_dev_check_write, 1800 .read = eeh_debugfs_dev_usage, 1801 }; 1802 1803 static int eeh_debugfs_break_device(struct pci_dev *pdev) 1804 { 1805 struct resource *bar = NULL; 1806 void __iomem *mapped; 1807 u16 old, bit; 1808 int i, pos; 1809 1810 /* Do we have an MMIO BAR to disable? */ 1811 for (i = 0; i <= PCI_STD_RESOURCE_END; i++) { 1812 struct resource *r = &pdev->resource[i]; 1813 1814 if (!r->flags || !r->start) 1815 continue; 1816 if (r->flags & IORESOURCE_IO) 1817 continue; 1818 if (r->flags & IORESOURCE_UNSET) 1819 continue; 1820 1821 bar = r; 1822 break; 1823 } 1824 1825 if (!bar) { 1826 pci_err(pdev, "Unable to find Memory BAR to cause EEH with\n"); 1827 return -ENXIO; 1828 } 1829 1830 pci_err(pdev, "Going to break: %pR\n", bar); 1831 1832 if (pdev->is_virtfn) { 1833 #ifndef CONFIG_PCI_IOV 1834 return -ENXIO; 1835 #else 1836 /* 1837 * VFs don't have a per-function COMMAND register, so the best 1838 * we can do is clear the Memory Space Enable bit in the PF's 1839 * SRIOV control reg. 1840 * 1841 * Unfortunately, this requires that we have a PF (i.e doesn't 1842 * work for a passed-through VF) and it has the potential side 1843 * effect of also causing an EEH on every other VF under the 1844 * PF. Oh well. 1845 */ 1846 pdev = pdev->physfn; 1847 if (!pdev) 1848 return -ENXIO; /* passed through VFs have no PF */ 1849 1850 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); 1851 pos += PCI_SRIOV_CTRL; 1852 bit = PCI_SRIOV_CTRL_MSE; 1853 #endif /* !CONFIG_PCI_IOV */ 1854 } else { 1855 bit = PCI_COMMAND_MEMORY; 1856 pos = PCI_COMMAND; 1857 } 1858 1859 /* 1860 * Process here is: 1861 * 1862 * 1. Disable Memory space. 1863 * 1864 * 2. Perform an MMIO to the device. This should result in an error 1865 * (CA / UR) being raised by the device which results in an EEH 1866 * PE freeze. Using the in_8() accessor skips the eeh detection hook 1867 * so the freeze hook so the EEH Detection machinery won't be 1868 * triggered here. This is to match the usual behaviour of EEH 1869 * where the HW will asyncronously freeze a PE and it's up to 1870 * the kernel to notice and deal with it. 1871 * 1872 * 3. Turn Memory space back on. This is more important for VFs 1873 * since recovery will probably fail if we don't. For normal 1874 * the COMMAND register is reset as a part of re-initialising 1875 * the device. 1876 * 1877 * Breaking stuff is the point so who cares if it's racy ;) 1878 */ 1879 pci_read_config_word(pdev, pos, &old); 1880 1881 mapped = ioremap(bar->start, PAGE_SIZE); 1882 if (!mapped) { 1883 pci_err(pdev, "Unable to map MMIO BAR %pR\n", bar); 1884 return -ENXIO; 1885 } 1886 1887 pci_write_config_word(pdev, pos, old & ~bit); 1888 in_8(mapped); 1889 pci_write_config_word(pdev, pos, old); 1890 1891 iounmap(mapped); 1892 1893 return 0; 1894 } 1895 1896 static ssize_t eeh_dev_break_write(struct file *filp, 1897 const char __user *user_buf, 1898 size_t count, loff_t *ppos) 1899 { 1900 uint32_t domain, bus, dev, fn; 1901 struct pci_dev *pdev; 1902 char buf[20]; 1903 int ret; 1904 1905 memset(buf, 0, sizeof(buf)); 1906 ret = simple_write_to_buffer(buf, sizeof(buf)-1, ppos, user_buf, count); 1907 if (!ret) 1908 return -EFAULT; 1909 1910 ret = sscanf(buf, "%x:%x:%x.%x", &domain, &bus, &dev, &fn); 1911 if (ret != 4) { 1912 pr_err("%s: expected 4 args, got %d\n", __func__, ret); 1913 return -EINVAL; 1914 } 1915 1916 pdev = pci_get_domain_bus_and_slot(domain, bus, (dev << 3) | fn); 1917 if (!pdev) 1918 return -ENODEV; 1919 1920 ret = eeh_debugfs_break_device(pdev); 1921 pci_dev_put(pdev); 1922 1923 if (ret < 0) 1924 return ret; 1925 1926 return count; 1927 } 1928 1929 static const struct file_operations eeh_dev_break_fops = { 1930 .open = simple_open, 1931 .llseek = no_llseek, 1932 .write = eeh_dev_break_write, 1933 .read = eeh_debugfs_dev_usage, 1934 }; 1935 1936 #endif 1937 1938 static int __init eeh_init_proc(void) 1939 { 1940 if (machine_is(pseries) || machine_is(powernv)) { 1941 proc_create_single("powerpc/eeh", 0, NULL, proc_eeh_show); 1942 #ifdef CONFIG_DEBUG_FS 1943 debugfs_create_file_unsafe("eeh_enable", 0600, 1944 powerpc_debugfs_root, NULL, 1945 &eeh_enable_dbgfs_ops); 1946 debugfs_create_u32("eeh_max_freezes", 0600, 1947 powerpc_debugfs_root, &eeh_max_freezes); 1948 debugfs_create_bool("eeh_disable_recovery", 0600, 1949 powerpc_debugfs_root, 1950 &eeh_debugfs_no_recover); 1951 debugfs_create_file_unsafe("eeh_dev_check", 0600, 1952 powerpc_debugfs_root, NULL, 1953 &eeh_dev_check_fops); 1954 debugfs_create_file_unsafe("eeh_dev_break", 0600, 1955 powerpc_debugfs_root, NULL, 1956 &eeh_dev_break_fops); 1957 debugfs_create_file_unsafe("eeh_force_recover", 0600, 1958 powerpc_debugfs_root, NULL, 1959 &eeh_force_recover_fops); 1960 eeh_cache_debugfs_init(); 1961 #endif 1962 } 1963 1964 return 0; 1965 } 1966 __initcall(eeh_init_proc); 1967