1 /* 2 * PCI Error Recovery Driver for RPA-compliant PPC64 platform. 3 * Copyright IBM Corp. 2004 2005 4 * Copyright Linas Vepstas <linas@linas.org> 2004, 2005 5 * 6 * All rights reserved. 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or (at 11 * your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 16 * NON INFRINGEMENT. See the GNU General Public License for more 17 * details. 18 * 19 * You should have received a copy of the GNU General Public License 20 * along with this program; if not, write to the Free Software 21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 22 * 23 * Send comments and feedback to Linas Vepstas <linas@austin.ibm.com> 24 */ 25 #include <linux/delay.h> 26 #include <linux/interrupt.h> 27 #include <linux/irq.h> 28 #include <linux/module.h> 29 #include <linux/pci.h> 30 #include <asm/eeh.h> 31 #include <asm/eeh_event.h> 32 #include <asm/ppc-pci.h> 33 #include <asm/pci-bridge.h> 34 #include <asm/prom.h> 35 #include <asm/rtas.h> 36 37 /** 38 * eeh_pcid_name - Retrieve name of PCI device driver 39 * @pdev: PCI device 40 * 41 * This routine is used to retrieve the name of PCI device driver 42 * if that's valid. 43 */ 44 static inline const char *eeh_pcid_name(struct pci_dev *pdev) 45 { 46 if (pdev && pdev->dev.driver) 47 return pdev->dev.driver->name; 48 return ""; 49 } 50 51 /** 52 * eeh_pcid_get - Get the PCI device driver 53 * @pdev: PCI device 54 * 55 * The function is used to retrieve the PCI device driver for 56 * the indicated PCI device. Besides, we will increase the reference 57 * of the PCI device driver to prevent that being unloaded on 58 * the fly. Otherwise, kernel crash would be seen. 59 */ 60 static inline struct pci_driver *eeh_pcid_get(struct pci_dev *pdev) 61 { 62 if (!pdev || !pdev->driver) 63 return NULL; 64 65 if (!try_module_get(pdev->driver->driver.owner)) 66 return NULL; 67 68 return pdev->driver; 69 } 70 71 /** 72 * eeh_pcid_put - Dereference on the PCI device driver 73 * @pdev: PCI device 74 * 75 * The function is called to do dereference on the PCI device 76 * driver of the indicated PCI device. 77 */ 78 static inline void eeh_pcid_put(struct pci_dev *pdev) 79 { 80 if (!pdev || !pdev->driver) 81 return; 82 83 module_put(pdev->driver->driver.owner); 84 } 85 86 /** 87 * eeh_disable_irq - Disable interrupt for the recovering device 88 * @dev: PCI device 89 * 90 * This routine must be called when reporting temporary or permanent 91 * error to the particular PCI device to disable interrupt of that 92 * device. If the device has enabled MSI or MSI-X interrupt, we needn't 93 * do real work because EEH should freeze DMA transfers for those PCI 94 * devices encountering EEH errors, which includes MSI or MSI-X. 95 */ 96 static void eeh_disable_irq(struct pci_dev *dev) 97 { 98 struct eeh_dev *edev = pci_dev_to_eeh_dev(dev); 99 100 /* Don't disable MSI and MSI-X interrupts. They are 101 * effectively disabled by the DMA Stopped state 102 * when an EEH error occurs. 103 */ 104 if (dev->msi_enabled || dev->msix_enabled) 105 return; 106 107 if (!irq_has_action(dev->irq)) 108 return; 109 110 edev->mode |= EEH_DEV_IRQ_DISABLED; 111 disable_irq_nosync(dev->irq); 112 } 113 114 /** 115 * eeh_enable_irq - Enable interrupt for the recovering device 116 * @dev: PCI device 117 * 118 * This routine must be called to enable interrupt while failed 119 * device could be resumed. 120 */ 121 static void eeh_enable_irq(struct pci_dev *dev) 122 { 123 struct eeh_dev *edev = pci_dev_to_eeh_dev(dev); 124 125 if ((edev->mode) & EEH_DEV_IRQ_DISABLED) { 126 edev->mode &= ~EEH_DEV_IRQ_DISABLED; 127 /* 128 * FIXME !!!!! 129 * 130 * This is just ass backwards. This maze has 131 * unbalanced irq_enable/disable calls. So instead of 132 * finding the root cause it works around the warning 133 * in the irq_enable code by conditionally calling 134 * into it. 135 * 136 * That's just wrong.The warning in the core code is 137 * there to tell people to fix their assymetries in 138 * their own code, not by abusing the core information 139 * to avoid it. 140 * 141 * I so wish that the assymetry would be the other way 142 * round and a few more irq_disable calls render that 143 * shit unusable forever. 144 * 145 * tglx 146 */ 147 if (irqd_irq_disabled(irq_get_irq_data(dev->irq))) 148 enable_irq(dev->irq); 149 } 150 } 151 152 static bool eeh_dev_removed(struct eeh_dev *edev) 153 { 154 /* EEH device removed ? */ 155 if (!edev || (edev->mode & EEH_DEV_REMOVED)) 156 return true; 157 158 return false; 159 } 160 161 static void *eeh_dev_save_state(void *data, void *userdata) 162 { 163 struct eeh_dev *edev = data; 164 struct pci_dev *pdev; 165 166 if (!edev) 167 return NULL; 168 169 pdev = eeh_dev_to_pci_dev(edev); 170 if (!pdev) 171 return NULL; 172 173 pci_save_state(pdev); 174 return NULL; 175 } 176 177 /** 178 * eeh_report_error - Report pci error to each device driver 179 * @data: eeh device 180 * @userdata: return value 181 * 182 * Report an EEH error to each device driver, collect up and 183 * merge the device driver responses. Cumulative response 184 * passed back in "userdata". 185 */ 186 static void *eeh_report_error(void *data, void *userdata) 187 { 188 struct eeh_dev *edev = (struct eeh_dev *)data; 189 struct pci_dev *dev = eeh_dev_to_pci_dev(edev); 190 enum pci_ers_result rc, *res = userdata; 191 struct pci_driver *driver; 192 193 if (!dev || eeh_dev_removed(edev)) 194 return NULL; 195 dev->error_state = pci_channel_io_frozen; 196 197 driver = eeh_pcid_get(dev); 198 if (!driver) return NULL; 199 200 eeh_disable_irq(dev); 201 202 if (!driver->err_handler || 203 !driver->err_handler->error_detected) { 204 eeh_pcid_put(dev); 205 return NULL; 206 } 207 208 rc = driver->err_handler->error_detected(dev, pci_channel_io_frozen); 209 210 /* A driver that needs a reset trumps all others */ 211 if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc; 212 if (*res == PCI_ERS_RESULT_NONE) *res = rc; 213 214 eeh_pcid_put(dev); 215 return NULL; 216 } 217 218 /** 219 * eeh_report_mmio_enabled - Tell drivers that MMIO has been enabled 220 * @data: eeh device 221 * @userdata: return value 222 * 223 * Tells each device driver that IO ports, MMIO and config space I/O 224 * are now enabled. Collects up and merges the device driver responses. 225 * Cumulative response passed back in "userdata". 226 */ 227 static void *eeh_report_mmio_enabled(void *data, void *userdata) 228 { 229 struct eeh_dev *edev = (struct eeh_dev *)data; 230 struct pci_dev *dev = eeh_dev_to_pci_dev(edev); 231 enum pci_ers_result rc, *res = userdata; 232 struct pci_driver *driver; 233 234 if (!dev || eeh_dev_removed(edev)) 235 return NULL; 236 237 driver = eeh_pcid_get(dev); 238 if (!driver) return NULL; 239 240 if (!driver->err_handler || 241 !driver->err_handler->mmio_enabled || 242 (edev->mode & EEH_DEV_NO_HANDLER)) { 243 eeh_pcid_put(dev); 244 return NULL; 245 } 246 247 rc = driver->err_handler->mmio_enabled(dev); 248 249 /* A driver that needs a reset trumps all others */ 250 if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc; 251 if (*res == PCI_ERS_RESULT_NONE) *res = rc; 252 253 eeh_pcid_put(dev); 254 return NULL; 255 } 256 257 /** 258 * eeh_report_reset - Tell device that slot has been reset 259 * @data: eeh device 260 * @userdata: return value 261 * 262 * This routine must be called while EEH tries to reset particular 263 * PCI device so that the associated PCI device driver could take 264 * some actions, usually to save data the driver needs so that the 265 * driver can work again while the device is recovered. 266 */ 267 static void *eeh_report_reset(void *data, void *userdata) 268 { 269 struct eeh_dev *edev = (struct eeh_dev *)data; 270 struct pci_dev *dev = eeh_dev_to_pci_dev(edev); 271 enum pci_ers_result rc, *res = userdata; 272 struct pci_driver *driver; 273 274 if (!dev || eeh_dev_removed(edev)) 275 return NULL; 276 dev->error_state = pci_channel_io_normal; 277 278 driver = eeh_pcid_get(dev); 279 if (!driver) return NULL; 280 281 eeh_enable_irq(dev); 282 283 if (!driver->err_handler || 284 !driver->err_handler->slot_reset || 285 (edev->mode & EEH_DEV_NO_HANDLER)) { 286 eeh_pcid_put(dev); 287 return NULL; 288 } 289 290 rc = driver->err_handler->slot_reset(dev); 291 if ((*res == PCI_ERS_RESULT_NONE) || 292 (*res == PCI_ERS_RESULT_RECOVERED)) *res = rc; 293 if (*res == PCI_ERS_RESULT_DISCONNECT && 294 rc == PCI_ERS_RESULT_NEED_RESET) *res = rc; 295 296 eeh_pcid_put(dev); 297 return NULL; 298 } 299 300 static void *eeh_dev_restore_state(void *data, void *userdata) 301 { 302 struct eeh_dev *edev = data; 303 struct pci_dev *pdev; 304 305 if (!edev) 306 return NULL; 307 308 pdev = eeh_dev_to_pci_dev(edev); 309 if (!pdev) 310 return NULL; 311 312 pci_restore_state(pdev); 313 return NULL; 314 } 315 316 /** 317 * eeh_report_resume - Tell device to resume normal operations 318 * @data: eeh device 319 * @userdata: return value 320 * 321 * This routine must be called to notify the device driver that it 322 * could resume so that the device driver can do some initialization 323 * to make the recovered device work again. 324 */ 325 static void *eeh_report_resume(void *data, void *userdata) 326 { 327 struct eeh_dev *edev = (struct eeh_dev *)data; 328 struct pci_dev *dev = eeh_dev_to_pci_dev(edev); 329 struct pci_driver *driver; 330 331 if (!dev || eeh_dev_removed(edev)) 332 return NULL; 333 dev->error_state = pci_channel_io_normal; 334 335 driver = eeh_pcid_get(dev); 336 if (!driver) return NULL; 337 338 eeh_enable_irq(dev); 339 340 if (!driver->err_handler || 341 !driver->err_handler->resume || 342 (edev->mode & EEH_DEV_NO_HANDLER)) { 343 edev->mode &= ~EEH_DEV_NO_HANDLER; 344 eeh_pcid_put(dev); 345 return NULL; 346 } 347 348 driver->err_handler->resume(dev); 349 350 eeh_pcid_put(dev); 351 return NULL; 352 } 353 354 /** 355 * eeh_report_failure - Tell device driver that device is dead. 356 * @data: eeh device 357 * @userdata: return value 358 * 359 * This informs the device driver that the device is permanently 360 * dead, and that no further recovery attempts will be made on it. 361 */ 362 static void *eeh_report_failure(void *data, void *userdata) 363 { 364 struct eeh_dev *edev = (struct eeh_dev *)data; 365 struct pci_dev *dev = eeh_dev_to_pci_dev(edev); 366 struct pci_driver *driver; 367 368 if (!dev || eeh_dev_removed(edev)) 369 return NULL; 370 dev->error_state = pci_channel_io_perm_failure; 371 372 driver = eeh_pcid_get(dev); 373 if (!driver) return NULL; 374 375 eeh_disable_irq(dev); 376 377 if (!driver->err_handler || 378 !driver->err_handler->error_detected) { 379 eeh_pcid_put(dev); 380 return NULL; 381 } 382 383 driver->err_handler->error_detected(dev, pci_channel_io_perm_failure); 384 385 eeh_pcid_put(dev); 386 return NULL; 387 } 388 389 static void *eeh_rmv_device(void *data, void *userdata) 390 { 391 struct pci_driver *driver; 392 struct eeh_dev *edev = (struct eeh_dev *)data; 393 struct pci_dev *dev = eeh_dev_to_pci_dev(edev); 394 int *removed = (int *)userdata; 395 396 /* 397 * Actually, we should remove the PCI bridges as well. 398 * However, that's lots of complexity to do that, 399 * particularly some of devices under the bridge might 400 * support EEH. So we just care about PCI devices for 401 * simplicity here. 402 */ 403 if (!dev || (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE)) 404 return NULL; 405 406 /* 407 * We rely on count-based pcibios_release_device() to 408 * detach permanently offlined PEs. Unfortunately, that's 409 * not reliable enough. We might have the permanently 410 * offlined PEs attached, but we needn't take care of 411 * them and their child devices. 412 */ 413 if (eeh_dev_removed(edev)) 414 return NULL; 415 416 driver = eeh_pcid_get(dev); 417 if (driver) { 418 eeh_pcid_put(dev); 419 if (driver->err_handler && 420 driver->err_handler->error_detected && 421 driver->err_handler->slot_reset && 422 driver->err_handler->resume) 423 return NULL; 424 } 425 426 /* Remove it from PCI subsystem */ 427 pr_debug("EEH: Removing %s without EEH sensitive driver\n", 428 pci_name(dev)); 429 edev->bus = dev->bus; 430 edev->mode |= EEH_DEV_DISCONNECTED; 431 (*removed)++; 432 433 pci_lock_rescan_remove(); 434 pci_stop_and_remove_bus_device(dev); 435 pci_unlock_rescan_remove(); 436 437 return NULL; 438 } 439 440 static void *eeh_pe_detach_dev(void *data, void *userdata) 441 { 442 struct eeh_pe *pe = (struct eeh_pe *)data; 443 struct eeh_dev *edev, *tmp; 444 445 eeh_pe_for_each_dev(pe, edev, tmp) { 446 if (!(edev->mode & EEH_DEV_DISCONNECTED)) 447 continue; 448 449 edev->mode &= ~(EEH_DEV_DISCONNECTED | EEH_DEV_IRQ_DISABLED); 450 eeh_rmv_from_parent_pe(edev); 451 } 452 453 return NULL; 454 } 455 456 /* 457 * Explicitly clear PE's frozen state for PowerNV where 458 * we have frozen PE until BAR restore is completed. It's 459 * harmless to clear it for pSeries. To be consistent with 460 * PE reset (for 3 times), we try to clear the frozen state 461 * for 3 times as well. 462 */ 463 static void *__eeh_clear_pe_frozen_state(void *data, void *flag) 464 { 465 struct eeh_pe *pe = (struct eeh_pe *)data; 466 bool *clear_sw_state = flag; 467 int i, rc = 1; 468 469 for (i = 0; rc && i < 3; i++) 470 rc = eeh_unfreeze_pe(pe, clear_sw_state); 471 472 /* Stop immediately on any errors */ 473 if (rc) { 474 pr_warn("%s: Failure %d unfreezing PHB#%x-PE#%x\n", 475 __func__, rc, pe->phb->global_number, pe->addr); 476 return (void *)pe; 477 } 478 479 return NULL; 480 } 481 482 static int eeh_clear_pe_frozen_state(struct eeh_pe *pe, 483 bool clear_sw_state) 484 { 485 void *rc; 486 487 rc = eeh_pe_traverse(pe, __eeh_clear_pe_frozen_state, &clear_sw_state); 488 if (!rc) 489 eeh_pe_state_clear(pe, EEH_PE_ISOLATED); 490 491 return rc ? -EIO : 0; 492 } 493 494 int eeh_pe_reset_and_recover(struct eeh_pe *pe) 495 { 496 int result, ret; 497 498 /* Bail if the PE is being recovered */ 499 if (pe->state & EEH_PE_RECOVERING) 500 return 0; 501 502 /* Put the PE into recovery mode */ 503 eeh_pe_state_mark(pe, EEH_PE_RECOVERING); 504 505 /* Save states */ 506 eeh_pe_dev_traverse(pe, eeh_dev_save_state, NULL); 507 508 /* Report error */ 509 eeh_pe_dev_traverse(pe, eeh_report_error, &result); 510 511 /* Issue reset */ 512 ret = eeh_reset_pe(pe); 513 if (ret) { 514 eeh_pe_state_clear(pe, EEH_PE_RECOVERING); 515 return ret; 516 } 517 518 /* Unfreeze the PE */ 519 ret = eeh_clear_pe_frozen_state(pe, true); 520 if (ret) { 521 eeh_pe_state_clear(pe, EEH_PE_RECOVERING); 522 return ret; 523 } 524 525 /* Notify completion of reset */ 526 eeh_pe_dev_traverse(pe, eeh_report_reset, &result); 527 528 /* Restore device state */ 529 eeh_pe_dev_traverse(pe, eeh_dev_restore_state, NULL); 530 531 /* Resume */ 532 eeh_pe_dev_traverse(pe, eeh_report_resume, NULL); 533 534 /* Clear recovery mode */ 535 eeh_pe_state_clear(pe, EEH_PE_RECOVERING); 536 537 return 0; 538 } 539 540 /** 541 * eeh_reset_device - Perform actual reset of a pci slot 542 * @pe: EEH PE 543 * @bus: PCI bus corresponding to the isolcated slot 544 * 545 * This routine must be called to do reset on the indicated PE. 546 * During the reset, udev might be invoked because those affected 547 * PCI devices will be removed and then added. 548 */ 549 static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus) 550 { 551 struct pci_bus *frozen_bus = eeh_pe_bus_get(pe); 552 struct timeval tstamp; 553 int cnt, rc, removed = 0; 554 555 /* pcibios will clear the counter; save the value */ 556 cnt = pe->freeze_count; 557 tstamp = pe->tstamp; 558 559 /* 560 * We don't remove the corresponding PE instances because 561 * we need the information afterwords. The attached EEH 562 * devices are expected to be attached soon when calling 563 * into pcibios_add_pci_devices(). 564 */ 565 eeh_pe_state_mark(pe, EEH_PE_KEEP); 566 if (bus) { 567 pci_lock_rescan_remove(); 568 pcibios_remove_pci_devices(bus); 569 pci_unlock_rescan_remove(); 570 } else if (frozen_bus) { 571 eeh_pe_dev_traverse(pe, eeh_rmv_device, &removed); 572 } 573 574 /* 575 * Reset the pci controller. (Asserts RST#; resets config space). 576 * Reconfigure bridges and devices. Don't try to bring the system 577 * up if the reset failed for some reason. 578 * 579 * During the reset, it's very dangerous to have uncontrolled PCI 580 * config accesses. So we prefer to block them. However, controlled 581 * PCI config accesses initiated from EEH itself are allowed. 582 */ 583 rc = eeh_reset_pe(pe); 584 if (rc) 585 return rc; 586 587 pci_lock_rescan_remove(); 588 589 /* Restore PE */ 590 eeh_ops->configure_bridge(pe); 591 eeh_pe_restore_bars(pe); 592 593 /* 594 * If it's PHB PE, the frozen state on all available PEs should have 595 * been cleared by the PHB reset. Otherwise, we unfreeze the PE and its 596 * child PEs because they might be in frozen state. 597 */ 598 if (!(pe->type & EEH_PE_PHB)) { 599 rc = eeh_clear_pe_frozen_state(pe, false); 600 if (rc) 601 return rc; 602 } 603 604 /* Give the system 5 seconds to finish running the user-space 605 * hotplug shutdown scripts, e.g. ifdown for ethernet. Yes, 606 * this is a hack, but if we don't do this, and try to bring 607 * the device up before the scripts have taken it down, 608 * potentially weird things happen. 609 */ 610 if (bus) { 611 pr_info("EEH: Sleep 5s ahead of complete hotplug\n"); 612 ssleep(5); 613 614 /* 615 * The EEH device is still connected with its parent 616 * PE. We should disconnect it so the binding can be 617 * rebuilt when adding PCI devices. 618 */ 619 eeh_pe_traverse(pe, eeh_pe_detach_dev, NULL); 620 pcibios_add_pci_devices(bus); 621 } else if (frozen_bus && removed) { 622 pr_info("EEH: Sleep 5s ahead of partial hotplug\n"); 623 ssleep(5); 624 625 eeh_pe_traverse(pe, eeh_pe_detach_dev, NULL); 626 pcibios_add_pci_devices(frozen_bus); 627 } 628 eeh_pe_state_clear(pe, EEH_PE_KEEP); 629 630 pe->tstamp = tstamp; 631 pe->freeze_count = cnt; 632 633 pci_unlock_rescan_remove(); 634 return 0; 635 } 636 637 /* The longest amount of time to wait for a pci device 638 * to come back on line, in seconds. 639 */ 640 #define MAX_WAIT_FOR_RECOVERY 300 641 642 static void eeh_handle_normal_event(struct eeh_pe *pe) 643 { 644 struct pci_bus *frozen_bus; 645 int rc = 0; 646 enum pci_ers_result result = PCI_ERS_RESULT_NONE; 647 648 frozen_bus = eeh_pe_bus_get(pe); 649 if (!frozen_bus) { 650 pr_err("%s: Cannot find PCI bus for PHB#%d-PE#%x\n", 651 __func__, pe->phb->global_number, pe->addr); 652 return; 653 } 654 655 eeh_pe_update_time_stamp(pe); 656 pe->freeze_count++; 657 if (pe->freeze_count > eeh_max_freezes) 658 goto excess_failures; 659 pr_warn("EEH: This PCI device has failed %d times in the last hour\n", 660 pe->freeze_count); 661 662 /* Walk the various device drivers attached to this slot through 663 * a reset sequence, giving each an opportunity to do what it needs 664 * to accomplish the reset. Each child gets a report of the 665 * status ... if any child can't handle the reset, then the entire 666 * slot is dlpar removed and added. 667 * 668 * When the PHB is fenced, we have to issue a reset to recover from 669 * the error. Override the result if necessary to have partially 670 * hotplug for this case. 671 */ 672 pr_info("EEH: Notify device drivers to shutdown\n"); 673 eeh_pe_dev_traverse(pe, eeh_report_error, &result); 674 if ((pe->type & EEH_PE_PHB) && 675 result != PCI_ERS_RESULT_NONE && 676 result != PCI_ERS_RESULT_NEED_RESET) 677 result = PCI_ERS_RESULT_NEED_RESET; 678 679 /* Get the current PCI slot state. This can take a long time, 680 * sometimes over 300 seconds for certain systems. 681 */ 682 rc = eeh_ops->wait_state(pe, MAX_WAIT_FOR_RECOVERY*1000); 683 if (rc < 0 || rc == EEH_STATE_NOT_SUPPORT) { 684 pr_warn("EEH: Permanent failure\n"); 685 goto hard_fail; 686 } 687 688 /* Since rtas may enable MMIO when posting the error log, 689 * don't post the error log until after all dev drivers 690 * have been informed. 691 */ 692 pr_info("EEH: Collect temporary log\n"); 693 eeh_slot_error_detail(pe, EEH_LOG_TEMP); 694 695 /* If all device drivers were EEH-unaware, then shut 696 * down all of the device drivers, and hope they 697 * go down willingly, without panicing the system. 698 */ 699 if (result == PCI_ERS_RESULT_NONE) { 700 pr_info("EEH: Reset with hotplug activity\n"); 701 rc = eeh_reset_device(pe, frozen_bus); 702 if (rc) { 703 pr_warn("%s: Unable to reset, err=%d\n", 704 __func__, rc); 705 goto hard_fail; 706 } 707 } 708 709 /* If all devices reported they can proceed, then re-enable MMIO */ 710 if (result == PCI_ERS_RESULT_CAN_RECOVER) { 711 pr_info("EEH: Enable I/O for affected devices\n"); 712 rc = eeh_pci_enable(pe, EEH_OPT_THAW_MMIO); 713 714 if (rc < 0) 715 goto hard_fail; 716 if (rc) { 717 result = PCI_ERS_RESULT_NEED_RESET; 718 } else { 719 pr_info("EEH: Notify device drivers to resume I/O\n"); 720 eeh_pe_dev_traverse(pe, eeh_report_mmio_enabled, &result); 721 } 722 } 723 724 /* If all devices reported they can proceed, then re-enable DMA */ 725 if (result == PCI_ERS_RESULT_CAN_RECOVER) { 726 pr_info("EEH: Enabled DMA for affected devices\n"); 727 rc = eeh_pci_enable(pe, EEH_OPT_THAW_DMA); 728 729 if (rc < 0) 730 goto hard_fail; 731 if (rc) { 732 result = PCI_ERS_RESULT_NEED_RESET; 733 } else { 734 /* 735 * We didn't do PE reset for the case. The PE 736 * is still in frozen state. Clear it before 737 * resuming the PE. 738 */ 739 eeh_pe_state_clear(pe, EEH_PE_ISOLATED); 740 result = PCI_ERS_RESULT_RECOVERED; 741 } 742 } 743 744 /* If any device has a hard failure, then shut off everything. */ 745 if (result == PCI_ERS_RESULT_DISCONNECT) { 746 pr_warn("EEH: Device driver gave up\n"); 747 goto hard_fail; 748 } 749 750 /* If any device called out for a reset, then reset the slot */ 751 if (result == PCI_ERS_RESULT_NEED_RESET) { 752 pr_info("EEH: Reset without hotplug activity\n"); 753 rc = eeh_reset_device(pe, NULL); 754 if (rc) { 755 pr_warn("%s: Cannot reset, err=%d\n", 756 __func__, rc); 757 goto hard_fail; 758 } 759 760 pr_info("EEH: Notify device drivers " 761 "the completion of reset\n"); 762 result = PCI_ERS_RESULT_NONE; 763 eeh_pe_dev_traverse(pe, eeh_report_reset, &result); 764 } 765 766 /* All devices should claim they have recovered by now. */ 767 if ((result != PCI_ERS_RESULT_RECOVERED) && 768 (result != PCI_ERS_RESULT_NONE)) { 769 pr_warn("EEH: Not recovered\n"); 770 goto hard_fail; 771 } 772 773 /* Tell all device drivers that they can resume operations */ 774 pr_info("EEH: Notify device driver to resume\n"); 775 eeh_pe_dev_traverse(pe, eeh_report_resume, NULL); 776 777 return; 778 779 excess_failures: 780 /* 781 * About 90% of all real-life EEH failures in the field 782 * are due to poorly seated PCI cards. Only 10% or so are 783 * due to actual, failed cards. 784 */ 785 pr_err("EEH: PHB#%d-PE#%x has failed %d times in the\n" 786 "last hour and has been permanently disabled.\n" 787 "Please try reseating or replacing it.\n", 788 pe->phb->global_number, pe->addr, 789 pe->freeze_count); 790 goto perm_error; 791 792 hard_fail: 793 pr_err("EEH: Unable to recover from failure from PHB#%d-PE#%x.\n" 794 "Please try reseating or replacing it\n", 795 pe->phb->global_number, pe->addr); 796 797 perm_error: 798 eeh_slot_error_detail(pe, EEH_LOG_PERM); 799 800 /* Notify all devices that they're about to go down. */ 801 eeh_pe_dev_traverse(pe, eeh_report_failure, NULL); 802 803 /* Mark the PE to be removed permanently */ 804 eeh_pe_state_mark(pe, EEH_PE_REMOVED); 805 806 /* 807 * Shut down the device drivers for good. We mark 808 * all removed devices correctly to avoid access 809 * the their PCI config any more. 810 */ 811 if (frozen_bus) { 812 eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED); 813 814 pci_lock_rescan_remove(); 815 pcibios_remove_pci_devices(frozen_bus); 816 pci_unlock_rescan_remove(); 817 } 818 } 819 820 static void eeh_handle_special_event(void) 821 { 822 struct eeh_pe *pe, *phb_pe; 823 struct pci_bus *bus; 824 struct pci_controller *hose; 825 unsigned long flags; 826 int rc; 827 828 829 do { 830 rc = eeh_ops->next_error(&pe); 831 832 switch (rc) { 833 case EEH_NEXT_ERR_DEAD_IOC: 834 /* Mark all PHBs in dead state */ 835 eeh_serialize_lock(&flags); 836 837 /* Purge all events */ 838 eeh_remove_event(NULL, true); 839 840 list_for_each_entry(hose, &hose_list, list_node) { 841 phb_pe = eeh_phb_pe_get(hose); 842 if (!phb_pe) continue; 843 844 eeh_pe_state_mark(phb_pe, EEH_PE_ISOLATED); 845 } 846 847 eeh_serialize_unlock(flags); 848 849 break; 850 case EEH_NEXT_ERR_FROZEN_PE: 851 case EEH_NEXT_ERR_FENCED_PHB: 852 case EEH_NEXT_ERR_DEAD_PHB: 853 /* Mark the PE in fenced state */ 854 eeh_serialize_lock(&flags); 855 856 /* Purge all events of the PHB */ 857 eeh_remove_event(pe, true); 858 859 if (rc == EEH_NEXT_ERR_DEAD_PHB) 860 eeh_pe_state_mark(pe, EEH_PE_ISOLATED); 861 else 862 eeh_pe_state_mark(pe, 863 EEH_PE_ISOLATED | EEH_PE_RECOVERING); 864 865 eeh_serialize_unlock(flags); 866 867 break; 868 case EEH_NEXT_ERR_NONE: 869 return; 870 default: 871 pr_warn("%s: Invalid value %d from next_error()\n", 872 __func__, rc); 873 return; 874 } 875 876 /* 877 * For fenced PHB and frozen PE, it's handled as normal 878 * event. We have to remove the affected PHBs for dead 879 * PHB and IOC 880 */ 881 if (rc == EEH_NEXT_ERR_FROZEN_PE || 882 rc == EEH_NEXT_ERR_FENCED_PHB) { 883 eeh_handle_normal_event(pe); 884 eeh_pe_state_clear(pe, EEH_PE_RECOVERING); 885 } else { 886 pci_lock_rescan_remove(); 887 list_for_each_entry(hose, &hose_list, list_node) { 888 phb_pe = eeh_phb_pe_get(hose); 889 if (!phb_pe || 890 !(phb_pe->state & EEH_PE_ISOLATED) || 891 (phb_pe->state & EEH_PE_RECOVERING)) 892 continue; 893 894 /* Notify all devices to be down */ 895 bus = eeh_pe_bus_get(phb_pe); 896 eeh_pe_dev_traverse(pe, 897 eeh_report_failure, NULL); 898 pcibios_remove_pci_devices(bus); 899 } 900 pci_unlock_rescan_remove(); 901 } 902 903 /* 904 * If we have detected dead IOC, we needn't proceed 905 * any more since all PHBs would have been removed 906 */ 907 if (rc == EEH_NEXT_ERR_DEAD_IOC) 908 break; 909 } while (rc != EEH_NEXT_ERR_NONE); 910 } 911 912 /** 913 * eeh_handle_event - Reset a PCI device after hard lockup. 914 * @pe: EEH PE 915 * 916 * While PHB detects address or data parity errors on particular PCI 917 * slot, the associated PE will be frozen. Besides, DMA's occurring 918 * to wild addresses (which usually happen due to bugs in device 919 * drivers or in PCI adapter firmware) can cause EEH error. #SERR, 920 * #PERR or other misc PCI-related errors also can trigger EEH errors. 921 * 922 * Recovery process consists of unplugging the device driver (which 923 * generated hotplug events to userspace), then issuing a PCI #RST to 924 * the device, then reconfiguring the PCI config space for all bridges 925 * & devices under this slot, and then finally restarting the device 926 * drivers (which cause a second set of hotplug events to go out to 927 * userspace). 928 */ 929 void eeh_handle_event(struct eeh_pe *pe) 930 { 931 if (pe) 932 eeh_handle_normal_event(pe); 933 else 934 eeh_handle_special_event(); 935 } 936