1 /* 2 * PCI Error Recovery Driver for RPA-compliant PPC64 platform. 3 * Copyright IBM Corp. 2004 2005 4 * Copyright Linas Vepstas <linas@linas.org> 2004, 2005 5 * 6 * All rights reserved. 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or (at 11 * your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 16 * NON INFRINGEMENT. See the GNU General Public License for more 17 * details. 18 * 19 * You should have received a copy of the GNU General Public License 20 * along with this program; if not, write to the Free Software 21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 22 * 23 * Send comments and feedback to Linas Vepstas <linas@austin.ibm.com> 24 */ 25 #include <linux/delay.h> 26 #include <linux/interrupt.h> 27 #include <linux/irq.h> 28 #include <linux/module.h> 29 #include <linux/pci.h> 30 #include <asm/eeh.h> 31 #include <asm/eeh_event.h> 32 #include <asm/ppc-pci.h> 33 #include <asm/pci-bridge.h> 34 #include <asm/prom.h> 35 #include <asm/rtas.h> 36 37 /** 38 * eeh_pcid_name - Retrieve name of PCI device driver 39 * @pdev: PCI device 40 * 41 * This routine is used to retrieve the name of PCI device driver 42 * if that's valid. 43 */ 44 static inline const char *eeh_pcid_name(struct pci_dev *pdev) 45 { 46 if (pdev && pdev->dev.driver) 47 return pdev->dev.driver->name; 48 return ""; 49 } 50 51 /** 52 * eeh_pcid_get - Get the PCI device driver 53 * @pdev: PCI device 54 * 55 * The function is used to retrieve the PCI device driver for 56 * the indicated PCI device. Besides, we will increase the reference 57 * of the PCI device driver to prevent that being unloaded on 58 * the fly. Otherwise, kernel crash would be seen. 59 */ 60 static inline struct pci_driver *eeh_pcid_get(struct pci_dev *pdev) 61 { 62 if (!pdev || !pdev->driver) 63 return NULL; 64 65 if (!try_module_get(pdev->driver->driver.owner)) 66 return NULL; 67 68 return pdev->driver; 69 } 70 71 /** 72 * eeh_pcid_put - Dereference on the PCI device driver 73 * @pdev: PCI device 74 * 75 * The function is called to do dereference on the PCI device 76 * driver of the indicated PCI device. 77 */ 78 static inline void eeh_pcid_put(struct pci_dev *pdev) 79 { 80 if (!pdev || !pdev->driver) 81 return; 82 83 module_put(pdev->driver->driver.owner); 84 } 85 86 #if 0 87 static void print_device_node_tree(struct pci_dn *pdn, int dent) 88 { 89 int i; 90 struct device_node *pc; 91 92 if (!pdn) 93 return; 94 for (i = 0; i < dent; i++) 95 printk(" "); 96 printk("dn=%s mode=%x \tcfg_addr=%x pe_addr=%x \tfull=%s\n", 97 pdn->node->name, pdn->eeh_mode, pdn->eeh_config_addr, 98 pdn->eeh_pe_config_addr, pdn->node->full_name); 99 dent += 3; 100 pc = pdn->node->child; 101 while (pc) { 102 print_device_node_tree(PCI_DN(pc), dent); 103 pc = pc->sibling; 104 } 105 } 106 #endif 107 108 /** 109 * eeh_disable_irq - Disable interrupt for the recovering device 110 * @dev: PCI device 111 * 112 * This routine must be called when reporting temporary or permanent 113 * error to the particular PCI device to disable interrupt of that 114 * device. If the device has enabled MSI or MSI-X interrupt, we needn't 115 * do real work because EEH should freeze DMA transfers for those PCI 116 * devices encountering EEH errors, which includes MSI or MSI-X. 117 */ 118 static void eeh_disable_irq(struct pci_dev *dev) 119 { 120 struct eeh_dev *edev = pci_dev_to_eeh_dev(dev); 121 122 /* Don't disable MSI and MSI-X interrupts. They are 123 * effectively disabled by the DMA Stopped state 124 * when an EEH error occurs. 125 */ 126 if (dev->msi_enabled || dev->msix_enabled) 127 return; 128 129 if (!irq_has_action(dev->irq)) 130 return; 131 132 edev->mode |= EEH_DEV_IRQ_DISABLED; 133 disable_irq_nosync(dev->irq); 134 } 135 136 /** 137 * eeh_enable_irq - Enable interrupt for the recovering device 138 * @dev: PCI device 139 * 140 * This routine must be called to enable interrupt while failed 141 * device could be resumed. 142 */ 143 static void eeh_enable_irq(struct pci_dev *dev) 144 { 145 struct eeh_dev *edev = pci_dev_to_eeh_dev(dev); 146 147 if ((edev->mode) & EEH_DEV_IRQ_DISABLED) { 148 edev->mode &= ~EEH_DEV_IRQ_DISABLED; 149 /* 150 * FIXME !!!!! 151 * 152 * This is just ass backwards. This maze has 153 * unbalanced irq_enable/disable calls. So instead of 154 * finding the root cause it works around the warning 155 * in the irq_enable code by conditionally calling 156 * into it. 157 * 158 * That's just wrong.The warning in the core code is 159 * there to tell people to fix their assymetries in 160 * their own code, not by abusing the core information 161 * to avoid it. 162 * 163 * I so wish that the assymetry would be the other way 164 * round and a few more irq_disable calls render that 165 * shit unusable forever. 166 * 167 * tglx 168 */ 169 if (irqd_irq_disabled(irq_get_irq_data(dev->irq))) 170 enable_irq(dev->irq); 171 } 172 } 173 174 static bool eeh_dev_removed(struct eeh_dev *edev) 175 { 176 /* EEH device removed ? */ 177 if (!edev || (edev->mode & EEH_DEV_REMOVED)) 178 return true; 179 180 return false; 181 } 182 183 static void *eeh_dev_save_state(void *data, void *userdata) 184 { 185 struct eeh_dev *edev = data; 186 struct pci_dev *pdev; 187 188 if (!edev) 189 return NULL; 190 191 pdev = eeh_dev_to_pci_dev(edev); 192 if (!pdev) 193 return NULL; 194 195 pci_save_state(pdev); 196 return NULL; 197 } 198 199 /** 200 * eeh_report_error - Report pci error to each device driver 201 * @data: eeh device 202 * @userdata: return value 203 * 204 * Report an EEH error to each device driver, collect up and 205 * merge the device driver responses. Cumulative response 206 * passed back in "userdata". 207 */ 208 static void *eeh_report_error(void *data, void *userdata) 209 { 210 struct eeh_dev *edev = (struct eeh_dev *)data; 211 struct pci_dev *dev = eeh_dev_to_pci_dev(edev); 212 enum pci_ers_result rc, *res = userdata; 213 struct pci_driver *driver; 214 215 if (!dev || eeh_dev_removed(edev)) 216 return NULL; 217 dev->error_state = pci_channel_io_frozen; 218 219 driver = eeh_pcid_get(dev); 220 if (!driver) return NULL; 221 222 eeh_disable_irq(dev); 223 224 if (!driver->err_handler || 225 !driver->err_handler->error_detected) { 226 eeh_pcid_put(dev); 227 return NULL; 228 } 229 230 rc = driver->err_handler->error_detected(dev, pci_channel_io_frozen); 231 232 /* A driver that needs a reset trumps all others */ 233 if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc; 234 if (*res == PCI_ERS_RESULT_NONE) *res = rc; 235 236 eeh_pcid_put(dev); 237 return NULL; 238 } 239 240 /** 241 * eeh_report_mmio_enabled - Tell drivers that MMIO has been enabled 242 * @data: eeh device 243 * @userdata: return value 244 * 245 * Tells each device driver that IO ports, MMIO and config space I/O 246 * are now enabled. Collects up and merges the device driver responses. 247 * Cumulative response passed back in "userdata". 248 */ 249 static void *eeh_report_mmio_enabled(void *data, void *userdata) 250 { 251 struct eeh_dev *edev = (struct eeh_dev *)data; 252 struct pci_dev *dev = eeh_dev_to_pci_dev(edev); 253 enum pci_ers_result rc, *res = userdata; 254 struct pci_driver *driver; 255 256 if (!dev || eeh_dev_removed(edev)) 257 return NULL; 258 259 driver = eeh_pcid_get(dev); 260 if (!driver) return NULL; 261 262 if (!driver->err_handler || 263 !driver->err_handler->mmio_enabled || 264 (edev->mode & EEH_DEV_NO_HANDLER)) { 265 eeh_pcid_put(dev); 266 return NULL; 267 } 268 269 rc = driver->err_handler->mmio_enabled(dev); 270 271 /* A driver that needs a reset trumps all others */ 272 if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc; 273 if (*res == PCI_ERS_RESULT_NONE) *res = rc; 274 275 eeh_pcid_put(dev); 276 return NULL; 277 } 278 279 /** 280 * eeh_report_reset - Tell device that slot has been reset 281 * @data: eeh device 282 * @userdata: return value 283 * 284 * This routine must be called while EEH tries to reset particular 285 * PCI device so that the associated PCI device driver could take 286 * some actions, usually to save data the driver needs so that the 287 * driver can work again while the device is recovered. 288 */ 289 static void *eeh_report_reset(void *data, void *userdata) 290 { 291 struct eeh_dev *edev = (struct eeh_dev *)data; 292 struct pci_dev *dev = eeh_dev_to_pci_dev(edev); 293 enum pci_ers_result rc, *res = userdata; 294 struct pci_driver *driver; 295 296 if (!dev || eeh_dev_removed(edev)) 297 return NULL; 298 dev->error_state = pci_channel_io_normal; 299 300 driver = eeh_pcid_get(dev); 301 if (!driver) return NULL; 302 303 eeh_enable_irq(dev); 304 305 if (!driver->err_handler || 306 !driver->err_handler->slot_reset || 307 (edev->mode & EEH_DEV_NO_HANDLER)) { 308 eeh_pcid_put(dev); 309 return NULL; 310 } 311 312 rc = driver->err_handler->slot_reset(dev); 313 if ((*res == PCI_ERS_RESULT_NONE) || 314 (*res == PCI_ERS_RESULT_RECOVERED)) *res = rc; 315 if (*res == PCI_ERS_RESULT_DISCONNECT && 316 rc == PCI_ERS_RESULT_NEED_RESET) *res = rc; 317 318 eeh_pcid_put(dev); 319 return NULL; 320 } 321 322 static void *eeh_dev_restore_state(void *data, void *userdata) 323 { 324 struct eeh_dev *edev = data; 325 struct pci_dev *pdev; 326 327 if (!edev) 328 return NULL; 329 330 pdev = eeh_dev_to_pci_dev(edev); 331 if (!pdev) 332 return NULL; 333 334 pci_restore_state(pdev); 335 return NULL; 336 } 337 338 /** 339 * eeh_report_resume - Tell device to resume normal operations 340 * @data: eeh device 341 * @userdata: return value 342 * 343 * This routine must be called to notify the device driver that it 344 * could resume so that the device driver can do some initialization 345 * to make the recovered device work again. 346 */ 347 static void *eeh_report_resume(void *data, void *userdata) 348 { 349 struct eeh_dev *edev = (struct eeh_dev *)data; 350 struct pci_dev *dev = eeh_dev_to_pci_dev(edev); 351 struct pci_driver *driver; 352 353 if (!dev || eeh_dev_removed(edev)) 354 return NULL; 355 dev->error_state = pci_channel_io_normal; 356 357 driver = eeh_pcid_get(dev); 358 if (!driver) return NULL; 359 360 eeh_enable_irq(dev); 361 362 if (!driver->err_handler || 363 !driver->err_handler->resume || 364 (edev->mode & EEH_DEV_NO_HANDLER)) { 365 edev->mode &= ~EEH_DEV_NO_HANDLER; 366 eeh_pcid_put(dev); 367 return NULL; 368 } 369 370 driver->err_handler->resume(dev); 371 372 eeh_pcid_put(dev); 373 return NULL; 374 } 375 376 /** 377 * eeh_report_failure - Tell device driver that device is dead. 378 * @data: eeh device 379 * @userdata: return value 380 * 381 * This informs the device driver that the device is permanently 382 * dead, and that no further recovery attempts will be made on it. 383 */ 384 static void *eeh_report_failure(void *data, void *userdata) 385 { 386 struct eeh_dev *edev = (struct eeh_dev *)data; 387 struct pci_dev *dev = eeh_dev_to_pci_dev(edev); 388 struct pci_driver *driver; 389 390 if (!dev || eeh_dev_removed(edev)) 391 return NULL; 392 dev->error_state = pci_channel_io_perm_failure; 393 394 driver = eeh_pcid_get(dev); 395 if (!driver) return NULL; 396 397 eeh_disable_irq(dev); 398 399 if (!driver->err_handler || 400 !driver->err_handler->error_detected) { 401 eeh_pcid_put(dev); 402 return NULL; 403 } 404 405 driver->err_handler->error_detected(dev, pci_channel_io_perm_failure); 406 407 eeh_pcid_put(dev); 408 return NULL; 409 } 410 411 static void *eeh_rmv_device(void *data, void *userdata) 412 { 413 struct pci_driver *driver; 414 struct eeh_dev *edev = (struct eeh_dev *)data; 415 struct pci_dev *dev = eeh_dev_to_pci_dev(edev); 416 int *removed = (int *)userdata; 417 418 /* 419 * Actually, we should remove the PCI bridges as well. 420 * However, that's lots of complexity to do that, 421 * particularly some of devices under the bridge might 422 * support EEH. So we just care about PCI devices for 423 * simplicity here. 424 */ 425 if (!dev || (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE)) 426 return NULL; 427 428 /* 429 * We rely on count-based pcibios_release_device() to 430 * detach permanently offlined PEs. Unfortunately, that's 431 * not reliable enough. We might have the permanently 432 * offlined PEs attached, but we needn't take care of 433 * them and their child devices. 434 */ 435 if (eeh_dev_removed(edev)) 436 return NULL; 437 438 driver = eeh_pcid_get(dev); 439 if (driver) { 440 eeh_pcid_put(dev); 441 if (driver->err_handler) 442 return NULL; 443 } 444 445 /* Remove it from PCI subsystem */ 446 pr_debug("EEH: Removing %s without EEH sensitive driver\n", 447 pci_name(dev)); 448 edev->bus = dev->bus; 449 edev->mode |= EEH_DEV_DISCONNECTED; 450 (*removed)++; 451 452 pci_lock_rescan_remove(); 453 pci_stop_and_remove_bus_device(dev); 454 pci_unlock_rescan_remove(); 455 456 return NULL; 457 } 458 459 static void *eeh_pe_detach_dev(void *data, void *userdata) 460 { 461 struct eeh_pe *pe = (struct eeh_pe *)data; 462 struct eeh_dev *edev, *tmp; 463 464 eeh_pe_for_each_dev(pe, edev, tmp) { 465 if (!(edev->mode & EEH_DEV_DISCONNECTED)) 466 continue; 467 468 edev->mode &= ~(EEH_DEV_DISCONNECTED | EEH_DEV_IRQ_DISABLED); 469 eeh_rmv_from_parent_pe(edev); 470 } 471 472 return NULL; 473 } 474 475 /* 476 * Explicitly clear PE's frozen state for PowerNV where 477 * we have frozen PE until BAR restore is completed. It's 478 * harmless to clear it for pSeries. To be consistent with 479 * PE reset (for 3 times), we try to clear the frozen state 480 * for 3 times as well. 481 */ 482 static void *__eeh_clear_pe_frozen_state(void *data, void *flag) 483 { 484 struct eeh_pe *pe = (struct eeh_pe *)data; 485 bool *clear_sw_state = flag; 486 int i, rc = 1; 487 488 for (i = 0; rc && i < 3; i++) 489 rc = eeh_unfreeze_pe(pe, clear_sw_state); 490 491 /* Stop immediately on any errors */ 492 if (rc) { 493 pr_warn("%s: Failure %d unfreezing PHB#%x-PE#%x\n", 494 __func__, rc, pe->phb->global_number, pe->addr); 495 return (void *)pe; 496 } 497 498 return NULL; 499 } 500 501 static int eeh_clear_pe_frozen_state(struct eeh_pe *pe, 502 bool clear_sw_state) 503 { 504 void *rc; 505 506 rc = eeh_pe_traverse(pe, __eeh_clear_pe_frozen_state, &clear_sw_state); 507 if (!rc) 508 eeh_pe_state_clear(pe, EEH_PE_ISOLATED); 509 510 return rc ? -EIO : 0; 511 } 512 513 int eeh_pe_reset_and_recover(struct eeh_pe *pe) 514 { 515 int result, ret; 516 517 /* Bail if the PE is being recovered */ 518 if (pe->state & EEH_PE_RECOVERING) 519 return 0; 520 521 /* Put the PE into recovery mode */ 522 eeh_pe_state_mark(pe, EEH_PE_RECOVERING); 523 524 /* Save states */ 525 eeh_pe_dev_traverse(pe, eeh_dev_save_state, NULL); 526 527 /* Report error */ 528 eeh_pe_dev_traverse(pe, eeh_report_error, &result); 529 530 /* Issue reset */ 531 eeh_pe_state_mark(pe, EEH_PE_RESET); 532 ret = eeh_reset_pe(pe); 533 if (ret) { 534 eeh_pe_state_clear(pe, EEH_PE_RECOVERING | EEH_PE_RESET); 535 return ret; 536 } 537 eeh_pe_state_clear(pe, EEH_PE_RESET); 538 539 /* Unfreeze the PE */ 540 ret = eeh_clear_pe_frozen_state(pe, true); 541 if (ret) { 542 eeh_pe_state_clear(pe, EEH_PE_RECOVERING); 543 return ret; 544 } 545 546 /* Notify completion of reset */ 547 eeh_pe_dev_traverse(pe, eeh_report_reset, &result); 548 549 /* Restore device state */ 550 eeh_pe_dev_traverse(pe, eeh_dev_restore_state, NULL); 551 552 /* Resume */ 553 eeh_pe_dev_traverse(pe, eeh_report_resume, NULL); 554 555 /* Clear recovery mode */ 556 eeh_pe_state_clear(pe, EEH_PE_RECOVERING); 557 558 return 0; 559 } 560 561 /** 562 * eeh_reset_device - Perform actual reset of a pci slot 563 * @pe: EEH PE 564 * @bus: PCI bus corresponding to the isolcated slot 565 * 566 * This routine must be called to do reset on the indicated PE. 567 * During the reset, udev might be invoked because those affected 568 * PCI devices will be removed and then added. 569 */ 570 static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus) 571 { 572 struct pci_bus *frozen_bus = eeh_pe_bus_get(pe); 573 struct timeval tstamp; 574 int cnt, rc, removed = 0; 575 576 /* pcibios will clear the counter; save the value */ 577 cnt = pe->freeze_count; 578 tstamp = pe->tstamp; 579 580 /* 581 * We don't remove the corresponding PE instances because 582 * we need the information afterwords. The attached EEH 583 * devices are expected to be attached soon when calling 584 * into pcibios_add_pci_devices(). 585 */ 586 eeh_pe_state_mark(pe, EEH_PE_KEEP); 587 if (bus) { 588 pci_lock_rescan_remove(); 589 pcibios_remove_pci_devices(bus); 590 pci_unlock_rescan_remove(); 591 } else if (frozen_bus) { 592 eeh_pe_dev_traverse(pe, eeh_rmv_device, &removed); 593 } 594 595 /* 596 * Reset the pci controller. (Asserts RST#; resets config space). 597 * Reconfigure bridges and devices. Don't try to bring the system 598 * up if the reset failed for some reason. 599 * 600 * During the reset, it's very dangerous to have uncontrolled PCI 601 * config accesses. So we prefer to block them. However, controlled 602 * PCI config accesses initiated from EEH itself are allowed. 603 */ 604 eeh_pe_state_mark(pe, EEH_PE_RESET); 605 rc = eeh_reset_pe(pe); 606 if (rc) { 607 eeh_pe_state_clear(pe, EEH_PE_RESET); 608 return rc; 609 } 610 611 pci_lock_rescan_remove(); 612 613 /* Restore PE */ 614 eeh_ops->configure_bridge(pe); 615 eeh_pe_restore_bars(pe); 616 eeh_pe_state_clear(pe, EEH_PE_RESET); 617 618 /* Clear frozen state */ 619 rc = eeh_clear_pe_frozen_state(pe, false); 620 if (rc) 621 return rc; 622 623 /* Give the system 5 seconds to finish running the user-space 624 * hotplug shutdown scripts, e.g. ifdown for ethernet. Yes, 625 * this is a hack, but if we don't do this, and try to bring 626 * the device up before the scripts have taken it down, 627 * potentially weird things happen. 628 */ 629 if (bus) { 630 pr_info("EEH: Sleep 5s ahead of complete hotplug\n"); 631 ssleep(5); 632 633 /* 634 * The EEH device is still connected with its parent 635 * PE. We should disconnect it so the binding can be 636 * rebuilt when adding PCI devices. 637 */ 638 eeh_pe_traverse(pe, eeh_pe_detach_dev, NULL); 639 pcibios_add_pci_devices(bus); 640 } else if (frozen_bus && removed) { 641 pr_info("EEH: Sleep 5s ahead of partial hotplug\n"); 642 ssleep(5); 643 644 eeh_pe_traverse(pe, eeh_pe_detach_dev, NULL); 645 pcibios_add_pci_devices(frozen_bus); 646 } 647 eeh_pe_state_clear(pe, EEH_PE_KEEP); 648 649 pe->tstamp = tstamp; 650 pe->freeze_count = cnt; 651 652 pci_unlock_rescan_remove(); 653 return 0; 654 } 655 656 /* The longest amount of time to wait for a pci device 657 * to come back on line, in seconds. 658 */ 659 #define MAX_WAIT_FOR_RECOVERY 300 660 661 static void eeh_handle_normal_event(struct eeh_pe *pe) 662 { 663 struct pci_bus *frozen_bus; 664 int rc = 0; 665 enum pci_ers_result result = PCI_ERS_RESULT_NONE; 666 667 frozen_bus = eeh_pe_bus_get(pe); 668 if (!frozen_bus) { 669 pr_err("%s: Cannot find PCI bus for PHB#%d-PE#%x\n", 670 __func__, pe->phb->global_number, pe->addr); 671 return; 672 } 673 674 eeh_pe_update_time_stamp(pe); 675 pe->freeze_count++; 676 if (pe->freeze_count > EEH_MAX_ALLOWED_FREEZES) 677 goto excess_failures; 678 pr_warn("EEH: This PCI device has failed %d times in the last hour\n", 679 pe->freeze_count); 680 681 /* Walk the various device drivers attached to this slot through 682 * a reset sequence, giving each an opportunity to do what it needs 683 * to accomplish the reset. Each child gets a report of the 684 * status ... if any child can't handle the reset, then the entire 685 * slot is dlpar removed and added. 686 */ 687 pr_info("EEH: Notify device drivers to shutdown\n"); 688 eeh_pe_dev_traverse(pe, eeh_report_error, &result); 689 690 /* Get the current PCI slot state. This can take a long time, 691 * sometimes over 3 seconds for certain systems. 692 */ 693 rc = eeh_ops->wait_state(pe, MAX_WAIT_FOR_RECOVERY*1000); 694 if (rc < 0 || rc == EEH_STATE_NOT_SUPPORT) { 695 pr_warn("EEH: Permanent failure\n"); 696 goto hard_fail; 697 } 698 699 /* Since rtas may enable MMIO when posting the error log, 700 * don't post the error log until after all dev drivers 701 * have been informed. 702 */ 703 pr_info("EEH: Collect temporary log\n"); 704 eeh_slot_error_detail(pe, EEH_LOG_TEMP); 705 706 /* If all device drivers were EEH-unaware, then shut 707 * down all of the device drivers, and hope they 708 * go down willingly, without panicing the system. 709 */ 710 if (result == PCI_ERS_RESULT_NONE) { 711 pr_info("EEH: Reset with hotplug activity\n"); 712 rc = eeh_reset_device(pe, frozen_bus); 713 if (rc) { 714 pr_warn("%s: Unable to reset, err=%d\n", 715 __func__, rc); 716 goto hard_fail; 717 } 718 } 719 720 /* If all devices reported they can proceed, then re-enable MMIO */ 721 if (result == PCI_ERS_RESULT_CAN_RECOVER) { 722 pr_info("EEH: Enable I/O for affected devices\n"); 723 rc = eeh_pci_enable(pe, EEH_OPT_THAW_MMIO); 724 725 if (rc < 0) 726 goto hard_fail; 727 if (rc) { 728 result = PCI_ERS_RESULT_NEED_RESET; 729 } else { 730 pr_info("EEH: Notify device drivers to resume I/O\n"); 731 eeh_pe_dev_traverse(pe, eeh_report_mmio_enabled, &result); 732 } 733 } 734 735 /* If all devices reported they can proceed, then re-enable DMA */ 736 if (result == PCI_ERS_RESULT_CAN_RECOVER) { 737 pr_info("EEH: Enabled DMA for affected devices\n"); 738 rc = eeh_pci_enable(pe, EEH_OPT_THAW_DMA); 739 740 if (rc < 0) 741 goto hard_fail; 742 if (rc) { 743 result = PCI_ERS_RESULT_NEED_RESET; 744 } else { 745 /* 746 * We didn't do PE reset for the case. The PE 747 * is still in frozen state. Clear it before 748 * resuming the PE. 749 */ 750 eeh_pe_state_clear(pe, EEH_PE_ISOLATED); 751 result = PCI_ERS_RESULT_RECOVERED; 752 } 753 } 754 755 /* If any device has a hard failure, then shut off everything. */ 756 if (result == PCI_ERS_RESULT_DISCONNECT) { 757 pr_warn("EEH: Device driver gave up\n"); 758 goto hard_fail; 759 } 760 761 /* If any device called out for a reset, then reset the slot */ 762 if (result == PCI_ERS_RESULT_NEED_RESET) { 763 pr_info("EEH: Reset without hotplug activity\n"); 764 rc = eeh_reset_device(pe, NULL); 765 if (rc) { 766 pr_warn("%s: Cannot reset, err=%d\n", 767 __func__, rc); 768 goto hard_fail; 769 } 770 771 pr_info("EEH: Notify device drivers " 772 "the completion of reset\n"); 773 result = PCI_ERS_RESULT_NONE; 774 eeh_pe_dev_traverse(pe, eeh_report_reset, &result); 775 } 776 777 /* All devices should claim they have recovered by now. */ 778 if ((result != PCI_ERS_RESULT_RECOVERED) && 779 (result != PCI_ERS_RESULT_NONE)) { 780 pr_warn("EEH: Not recovered\n"); 781 goto hard_fail; 782 } 783 784 /* Tell all device drivers that they can resume operations */ 785 pr_info("EEH: Notify device driver to resume\n"); 786 eeh_pe_dev_traverse(pe, eeh_report_resume, NULL); 787 788 return; 789 790 excess_failures: 791 /* 792 * About 90% of all real-life EEH failures in the field 793 * are due to poorly seated PCI cards. Only 10% or so are 794 * due to actual, failed cards. 795 */ 796 pr_err("EEH: PHB#%d-PE#%x has failed %d times in the\n" 797 "last hour and has been permanently disabled.\n" 798 "Please try reseating or replacing it.\n", 799 pe->phb->global_number, pe->addr, 800 pe->freeze_count); 801 goto perm_error; 802 803 hard_fail: 804 pr_err("EEH: Unable to recover from failure from PHB#%d-PE#%x.\n" 805 "Please try reseating or replacing it\n", 806 pe->phb->global_number, pe->addr); 807 808 perm_error: 809 eeh_slot_error_detail(pe, EEH_LOG_PERM); 810 811 /* Notify all devices that they're about to go down. */ 812 eeh_pe_dev_traverse(pe, eeh_report_failure, NULL); 813 814 /* Mark the PE to be removed permanently */ 815 pe->freeze_count = EEH_MAX_ALLOWED_FREEZES + 1; 816 817 /* 818 * Shut down the device drivers for good. We mark 819 * all removed devices correctly to avoid access 820 * the their PCI config any more. 821 */ 822 if (frozen_bus) { 823 eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED); 824 825 pci_lock_rescan_remove(); 826 pcibios_remove_pci_devices(frozen_bus); 827 pci_unlock_rescan_remove(); 828 } 829 } 830 831 static void eeh_handle_special_event(void) 832 { 833 struct eeh_pe *pe, *phb_pe; 834 struct pci_bus *bus; 835 struct pci_controller *hose; 836 unsigned long flags; 837 int rc; 838 839 840 do { 841 rc = eeh_ops->next_error(&pe); 842 843 switch (rc) { 844 case EEH_NEXT_ERR_DEAD_IOC: 845 /* Mark all PHBs in dead state */ 846 eeh_serialize_lock(&flags); 847 848 /* Purge all events */ 849 eeh_remove_event(NULL, true); 850 851 list_for_each_entry(hose, &hose_list, list_node) { 852 phb_pe = eeh_phb_pe_get(hose); 853 if (!phb_pe) continue; 854 855 eeh_pe_state_mark(phb_pe, EEH_PE_ISOLATED); 856 } 857 858 eeh_serialize_unlock(flags); 859 860 break; 861 case EEH_NEXT_ERR_FROZEN_PE: 862 case EEH_NEXT_ERR_FENCED_PHB: 863 case EEH_NEXT_ERR_DEAD_PHB: 864 /* Mark the PE in fenced state */ 865 eeh_serialize_lock(&flags); 866 867 /* Purge all events of the PHB */ 868 eeh_remove_event(pe, true); 869 870 if (rc == EEH_NEXT_ERR_DEAD_PHB) 871 eeh_pe_state_mark(pe, EEH_PE_ISOLATED); 872 else 873 eeh_pe_state_mark(pe, 874 EEH_PE_ISOLATED | EEH_PE_RECOVERING); 875 876 eeh_serialize_unlock(flags); 877 878 break; 879 case EEH_NEXT_ERR_NONE: 880 return; 881 default: 882 pr_warn("%s: Invalid value %d from next_error()\n", 883 __func__, rc); 884 return; 885 } 886 887 /* 888 * For fenced PHB and frozen PE, it's handled as normal 889 * event. We have to remove the affected PHBs for dead 890 * PHB and IOC 891 */ 892 if (rc == EEH_NEXT_ERR_FROZEN_PE || 893 rc == EEH_NEXT_ERR_FENCED_PHB) { 894 eeh_handle_normal_event(pe); 895 eeh_pe_state_clear(pe, EEH_PE_RECOVERING); 896 } else { 897 pci_lock_rescan_remove(); 898 list_for_each_entry(hose, &hose_list, list_node) { 899 phb_pe = eeh_phb_pe_get(hose); 900 if (!phb_pe || 901 !(phb_pe->state & EEH_PE_ISOLATED) || 902 (phb_pe->state & EEH_PE_RECOVERING)) 903 continue; 904 905 /* Notify all devices to be down */ 906 bus = eeh_pe_bus_get(phb_pe); 907 eeh_pe_dev_traverse(pe, 908 eeh_report_failure, NULL); 909 pcibios_remove_pci_devices(bus); 910 } 911 pci_unlock_rescan_remove(); 912 } 913 914 /* 915 * If we have detected dead IOC, we needn't proceed 916 * any more since all PHBs would have been removed 917 */ 918 if (rc == EEH_NEXT_ERR_DEAD_IOC) 919 break; 920 } while (rc != EEH_NEXT_ERR_NONE); 921 } 922 923 /** 924 * eeh_handle_event - Reset a PCI device after hard lockup. 925 * @pe: EEH PE 926 * 927 * While PHB detects address or data parity errors on particular PCI 928 * slot, the associated PE will be frozen. Besides, DMA's occurring 929 * to wild addresses (which usually happen due to bugs in device 930 * drivers or in PCI adapter firmware) can cause EEH error. #SERR, 931 * #PERR or other misc PCI-related errors also can trigger EEH errors. 932 * 933 * Recovery process consists of unplugging the device driver (which 934 * generated hotplug events to userspace), then issuing a PCI #RST to 935 * the device, then reconfiguring the PCI config space for all bridges 936 * & devices under this slot, and then finally restarting the device 937 * drivers (which cause a second set of hotplug events to go out to 938 * userspace). 939 */ 940 void eeh_handle_event(struct eeh_pe *pe) 941 { 942 if (pe) 943 eeh_handle_normal_event(pe); 944 else 945 eeh_handle_special_event(); 946 } 947