1 /* 2 * PCI Error Recovery Driver for RPA-compliant PPC64 platform. 3 * Copyright IBM Corp. 2004 2005 4 * Copyright Linas Vepstas <linas@linas.org> 2004, 2005 5 * 6 * All rights reserved. 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or (at 11 * your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 16 * NON INFRINGEMENT. See the GNU General Public License for more 17 * details. 18 * 19 * You should have received a copy of the GNU General Public License 20 * along with this program; if not, write to the Free Software 21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 22 * 23 * Send comments and feedback to Linas Vepstas <linas@austin.ibm.com> 24 */ 25 #include <linux/delay.h> 26 #include <linux/interrupt.h> 27 #include <linux/irq.h> 28 #include <linux/module.h> 29 #include <linux/pci.h> 30 #include <asm/eeh.h> 31 #include <asm/eeh_event.h> 32 #include <asm/ppc-pci.h> 33 #include <asm/pci-bridge.h> 34 #include <asm/prom.h> 35 #include <asm/rtas.h> 36 37 struct eeh_rmv_data { 38 struct list_head edev_list; 39 int removed; 40 }; 41 42 /** 43 * eeh_pcid_name - Retrieve name of PCI device driver 44 * @pdev: PCI device 45 * 46 * This routine is used to retrieve the name of PCI device driver 47 * if that's valid. 48 */ 49 static inline const char *eeh_pcid_name(struct pci_dev *pdev) 50 { 51 if (pdev && pdev->dev.driver) 52 return pdev->dev.driver->name; 53 return ""; 54 } 55 56 /** 57 * eeh_pcid_get - Get the PCI device driver 58 * @pdev: PCI device 59 * 60 * The function is used to retrieve the PCI device driver for 61 * the indicated PCI device. Besides, we will increase the reference 62 * of the PCI device driver to prevent that being unloaded on 63 * the fly. Otherwise, kernel crash would be seen. 64 */ 65 static inline struct pci_driver *eeh_pcid_get(struct pci_dev *pdev) 66 { 67 if (!pdev || !pdev->driver) 68 return NULL; 69 70 if (!try_module_get(pdev->driver->driver.owner)) 71 return NULL; 72 73 return pdev->driver; 74 } 75 76 /** 77 * eeh_pcid_put - Dereference on the PCI device driver 78 * @pdev: PCI device 79 * 80 * The function is called to do dereference on the PCI device 81 * driver of the indicated PCI device. 82 */ 83 static inline void eeh_pcid_put(struct pci_dev *pdev) 84 { 85 if (!pdev || !pdev->driver) 86 return; 87 88 module_put(pdev->driver->driver.owner); 89 } 90 91 /** 92 * eeh_disable_irq - Disable interrupt for the recovering device 93 * @dev: PCI device 94 * 95 * This routine must be called when reporting temporary or permanent 96 * error to the particular PCI device to disable interrupt of that 97 * device. If the device has enabled MSI or MSI-X interrupt, we needn't 98 * do real work because EEH should freeze DMA transfers for those PCI 99 * devices encountering EEH errors, which includes MSI or MSI-X. 100 */ 101 static void eeh_disable_irq(struct pci_dev *dev) 102 { 103 struct eeh_dev *edev = pci_dev_to_eeh_dev(dev); 104 105 /* Don't disable MSI and MSI-X interrupts. They are 106 * effectively disabled by the DMA Stopped state 107 * when an EEH error occurs. 108 */ 109 if (dev->msi_enabled || dev->msix_enabled) 110 return; 111 112 if (!irq_has_action(dev->irq)) 113 return; 114 115 edev->mode |= EEH_DEV_IRQ_DISABLED; 116 disable_irq_nosync(dev->irq); 117 } 118 119 /** 120 * eeh_enable_irq - Enable interrupt for the recovering device 121 * @dev: PCI device 122 * 123 * This routine must be called to enable interrupt while failed 124 * device could be resumed. 125 */ 126 static void eeh_enable_irq(struct pci_dev *dev) 127 { 128 struct eeh_dev *edev = pci_dev_to_eeh_dev(dev); 129 130 if ((edev->mode) & EEH_DEV_IRQ_DISABLED) { 131 edev->mode &= ~EEH_DEV_IRQ_DISABLED; 132 /* 133 * FIXME !!!!! 134 * 135 * This is just ass backwards. This maze has 136 * unbalanced irq_enable/disable calls. So instead of 137 * finding the root cause it works around the warning 138 * in the irq_enable code by conditionally calling 139 * into it. 140 * 141 * That's just wrong.The warning in the core code is 142 * there to tell people to fix their asymmetries in 143 * their own code, not by abusing the core information 144 * to avoid it. 145 * 146 * I so wish that the assymetry would be the other way 147 * round and a few more irq_disable calls render that 148 * shit unusable forever. 149 * 150 * tglx 151 */ 152 if (irqd_irq_disabled(irq_get_irq_data(dev->irq))) 153 enable_irq(dev->irq); 154 } 155 } 156 157 static bool eeh_dev_removed(struct eeh_dev *edev) 158 { 159 /* EEH device removed ? */ 160 if (!edev || (edev->mode & EEH_DEV_REMOVED)) 161 return true; 162 163 return false; 164 } 165 166 static void *eeh_dev_save_state(void *data, void *userdata) 167 { 168 struct eeh_dev *edev = data; 169 struct pci_dev *pdev; 170 171 if (!edev) 172 return NULL; 173 174 /* 175 * We cannot access the config space on some adapters. 176 * Otherwise, it will cause fenced PHB. We don't save 177 * the content in their config space and will restore 178 * from the initial config space saved when the EEH 179 * device is created. 180 */ 181 if (edev->pe && (edev->pe->state & EEH_PE_CFG_RESTRICTED)) 182 return NULL; 183 184 pdev = eeh_dev_to_pci_dev(edev); 185 if (!pdev) 186 return NULL; 187 188 pci_save_state(pdev); 189 return NULL; 190 } 191 192 /** 193 * eeh_report_error - Report pci error to each device driver 194 * @data: eeh device 195 * @userdata: return value 196 * 197 * Report an EEH error to each device driver, collect up and 198 * merge the device driver responses. Cumulative response 199 * passed back in "userdata". 200 */ 201 static void *eeh_report_error(void *data, void *userdata) 202 { 203 struct eeh_dev *edev = (struct eeh_dev *)data; 204 struct pci_dev *dev = eeh_dev_to_pci_dev(edev); 205 enum pci_ers_result rc, *res = userdata; 206 struct pci_driver *driver; 207 208 if (!dev || eeh_dev_removed(edev) || eeh_pe_passed(edev->pe)) 209 return NULL; 210 211 device_lock(&dev->dev); 212 dev->error_state = pci_channel_io_frozen; 213 214 driver = eeh_pcid_get(dev); 215 if (!driver) goto out_no_dev; 216 217 eeh_disable_irq(dev); 218 219 if (!driver->err_handler || 220 !driver->err_handler->error_detected) 221 goto out; 222 223 rc = driver->err_handler->error_detected(dev, pci_channel_io_frozen); 224 225 /* A driver that needs a reset trumps all others */ 226 if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc; 227 if (*res == PCI_ERS_RESULT_NONE) *res = rc; 228 229 edev->in_error = true; 230 pci_uevent_ers(dev, PCI_ERS_RESULT_NONE); 231 232 out: 233 eeh_pcid_put(dev); 234 out_no_dev: 235 device_unlock(&dev->dev); 236 return NULL; 237 } 238 239 /** 240 * eeh_report_mmio_enabled - Tell drivers that MMIO has been enabled 241 * @data: eeh device 242 * @userdata: return value 243 * 244 * Tells each device driver that IO ports, MMIO and config space I/O 245 * are now enabled. Collects up and merges the device driver responses. 246 * Cumulative response passed back in "userdata". 247 */ 248 static void *eeh_report_mmio_enabled(void *data, void *userdata) 249 { 250 struct eeh_dev *edev = (struct eeh_dev *)data; 251 struct pci_dev *dev = eeh_dev_to_pci_dev(edev); 252 enum pci_ers_result rc, *res = userdata; 253 struct pci_driver *driver; 254 255 if (!dev || eeh_dev_removed(edev) || eeh_pe_passed(edev->pe)) 256 return NULL; 257 258 device_lock(&dev->dev); 259 driver = eeh_pcid_get(dev); 260 if (!driver) goto out_no_dev; 261 262 if (!driver->err_handler || 263 !driver->err_handler->mmio_enabled || 264 (edev->mode & EEH_DEV_NO_HANDLER)) 265 goto out; 266 267 rc = driver->err_handler->mmio_enabled(dev); 268 269 /* A driver that needs a reset trumps all others */ 270 if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc; 271 if (*res == PCI_ERS_RESULT_NONE) *res = rc; 272 273 out: 274 eeh_pcid_put(dev); 275 out_no_dev: 276 device_unlock(&dev->dev); 277 return NULL; 278 } 279 280 /** 281 * eeh_report_reset - Tell device that slot has been reset 282 * @data: eeh device 283 * @userdata: return value 284 * 285 * This routine must be called while EEH tries to reset particular 286 * PCI device so that the associated PCI device driver could take 287 * some actions, usually to save data the driver needs so that the 288 * driver can work again while the device is recovered. 289 */ 290 static void *eeh_report_reset(void *data, void *userdata) 291 { 292 struct eeh_dev *edev = (struct eeh_dev *)data; 293 struct pci_dev *dev = eeh_dev_to_pci_dev(edev); 294 enum pci_ers_result rc, *res = userdata; 295 struct pci_driver *driver; 296 297 if (!dev || eeh_dev_removed(edev) || eeh_pe_passed(edev->pe)) 298 return NULL; 299 300 device_lock(&dev->dev); 301 dev->error_state = pci_channel_io_normal; 302 303 driver = eeh_pcid_get(dev); 304 if (!driver) goto out_no_dev; 305 306 eeh_enable_irq(dev); 307 308 if (!driver->err_handler || 309 !driver->err_handler->slot_reset || 310 (edev->mode & EEH_DEV_NO_HANDLER) || 311 (!edev->in_error)) 312 goto out; 313 314 rc = driver->err_handler->slot_reset(dev); 315 if ((*res == PCI_ERS_RESULT_NONE) || 316 (*res == PCI_ERS_RESULT_RECOVERED)) *res = rc; 317 if (*res == PCI_ERS_RESULT_DISCONNECT && 318 rc == PCI_ERS_RESULT_NEED_RESET) *res = rc; 319 320 out: 321 eeh_pcid_put(dev); 322 out_no_dev: 323 device_unlock(&dev->dev); 324 return NULL; 325 } 326 327 static void *eeh_dev_restore_state(void *data, void *userdata) 328 { 329 struct eeh_dev *edev = data; 330 struct pci_dev *pdev; 331 332 if (!edev) 333 return NULL; 334 335 /* 336 * The content in the config space isn't saved because 337 * the blocked config space on some adapters. We have 338 * to restore the initial saved config space when the 339 * EEH device is created. 340 */ 341 if (edev->pe && (edev->pe->state & EEH_PE_CFG_RESTRICTED)) { 342 if (list_is_last(&edev->list, &edev->pe->edevs)) 343 eeh_pe_restore_bars(edev->pe); 344 345 return NULL; 346 } 347 348 pdev = eeh_dev_to_pci_dev(edev); 349 if (!pdev) 350 return NULL; 351 352 pci_restore_state(pdev); 353 return NULL; 354 } 355 356 /** 357 * eeh_report_resume - Tell device to resume normal operations 358 * @data: eeh device 359 * @userdata: return value 360 * 361 * This routine must be called to notify the device driver that it 362 * could resume so that the device driver can do some initialization 363 * to make the recovered device work again. 364 */ 365 static void *eeh_report_resume(void *data, void *userdata) 366 { 367 struct eeh_dev *edev = (struct eeh_dev *)data; 368 struct pci_dev *dev = eeh_dev_to_pci_dev(edev); 369 bool was_in_error; 370 struct pci_driver *driver; 371 372 if (!dev || eeh_dev_removed(edev) || eeh_pe_passed(edev->pe)) 373 return NULL; 374 375 device_lock(&dev->dev); 376 dev->error_state = pci_channel_io_normal; 377 378 driver = eeh_pcid_get(dev); 379 if (!driver) goto out_no_dev; 380 381 was_in_error = edev->in_error; 382 edev->in_error = false; 383 eeh_enable_irq(dev); 384 385 if (!driver->err_handler || 386 !driver->err_handler->resume || 387 (edev->mode & EEH_DEV_NO_HANDLER) || !was_in_error) { 388 edev->mode &= ~EEH_DEV_NO_HANDLER; 389 goto out; 390 } 391 392 driver->err_handler->resume(dev); 393 394 pci_uevent_ers(dev, PCI_ERS_RESULT_RECOVERED); 395 out: 396 eeh_pcid_put(dev); 397 #ifdef CONFIG_PCI_IOV 398 if (eeh_ops->notify_resume && eeh_dev_to_pdn(edev)) 399 eeh_ops->notify_resume(eeh_dev_to_pdn(edev)); 400 #endif 401 out_no_dev: 402 device_unlock(&dev->dev); 403 return NULL; 404 } 405 406 /** 407 * eeh_report_failure - Tell device driver that device is dead. 408 * @data: eeh device 409 * @userdata: return value 410 * 411 * This informs the device driver that the device is permanently 412 * dead, and that no further recovery attempts will be made on it. 413 */ 414 static void *eeh_report_failure(void *data, void *userdata) 415 { 416 struct eeh_dev *edev = (struct eeh_dev *)data; 417 struct pci_dev *dev = eeh_dev_to_pci_dev(edev); 418 struct pci_driver *driver; 419 420 if (!dev || eeh_dev_removed(edev) || eeh_pe_passed(edev->pe)) 421 return NULL; 422 423 device_lock(&dev->dev); 424 dev->error_state = pci_channel_io_perm_failure; 425 426 driver = eeh_pcid_get(dev); 427 if (!driver) goto out_no_dev; 428 429 eeh_disable_irq(dev); 430 431 if (!driver->err_handler || 432 !driver->err_handler->error_detected) 433 goto out; 434 435 driver->err_handler->error_detected(dev, pci_channel_io_perm_failure); 436 437 pci_uevent_ers(dev, PCI_ERS_RESULT_DISCONNECT); 438 out: 439 eeh_pcid_put(dev); 440 out_no_dev: 441 device_unlock(&dev->dev); 442 return NULL; 443 } 444 445 static void *eeh_add_virt_device(void *data, void *userdata) 446 { 447 struct pci_driver *driver; 448 struct eeh_dev *edev = (struct eeh_dev *)data; 449 struct pci_dev *dev = eeh_dev_to_pci_dev(edev); 450 struct pci_dn *pdn = eeh_dev_to_pdn(edev); 451 452 if (!(edev->physfn)) { 453 pr_warn("%s: EEH dev %04x:%02x:%02x.%01x not for VF\n", 454 __func__, pdn->phb->global_number, pdn->busno, 455 PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn)); 456 return NULL; 457 } 458 459 driver = eeh_pcid_get(dev); 460 if (driver) { 461 eeh_pcid_put(dev); 462 if (driver->err_handler) 463 return NULL; 464 } 465 466 #ifdef CONFIG_PCI_IOV 467 pci_iov_add_virtfn(edev->physfn, pdn->vf_index); 468 #endif 469 return NULL; 470 } 471 472 static void *eeh_rmv_device(void *data, void *userdata) 473 { 474 struct pci_driver *driver; 475 struct eeh_dev *edev = (struct eeh_dev *)data; 476 struct pci_dev *dev = eeh_dev_to_pci_dev(edev); 477 struct eeh_rmv_data *rmv_data = (struct eeh_rmv_data *)userdata; 478 int *removed = rmv_data ? &rmv_data->removed : NULL; 479 480 /* 481 * Actually, we should remove the PCI bridges as well. 482 * However, that's lots of complexity to do that, 483 * particularly some of devices under the bridge might 484 * support EEH. So we just care about PCI devices for 485 * simplicity here. 486 */ 487 if (!dev || (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)) 488 return NULL; 489 490 /* 491 * We rely on count-based pcibios_release_device() to 492 * detach permanently offlined PEs. Unfortunately, that's 493 * not reliable enough. We might have the permanently 494 * offlined PEs attached, but we needn't take care of 495 * them and their child devices. 496 */ 497 if (eeh_dev_removed(edev)) 498 return NULL; 499 500 driver = eeh_pcid_get(dev); 501 if (driver) { 502 eeh_pcid_put(dev); 503 if (removed && 504 eeh_pe_passed(edev->pe)) 505 return NULL; 506 if (removed && 507 driver->err_handler && 508 driver->err_handler->error_detected && 509 driver->err_handler->slot_reset) 510 return NULL; 511 } 512 513 /* Remove it from PCI subsystem */ 514 pr_debug("EEH: Removing %s without EEH sensitive driver\n", 515 pci_name(dev)); 516 edev->bus = dev->bus; 517 edev->mode |= EEH_DEV_DISCONNECTED; 518 if (removed) 519 (*removed)++; 520 521 if (edev->physfn) { 522 #ifdef CONFIG_PCI_IOV 523 struct pci_dn *pdn = eeh_dev_to_pdn(edev); 524 525 pci_iov_remove_virtfn(edev->physfn, pdn->vf_index); 526 edev->pdev = NULL; 527 528 /* 529 * We have to set the VF PE number to invalid one, which is 530 * required to plug the VF successfully. 531 */ 532 pdn->pe_number = IODA_INVALID_PE; 533 #endif 534 if (rmv_data) 535 list_add(&edev->rmv_list, &rmv_data->edev_list); 536 } else { 537 pci_lock_rescan_remove(); 538 pci_stop_and_remove_bus_device(dev); 539 pci_unlock_rescan_remove(); 540 } 541 542 return NULL; 543 } 544 545 static void *eeh_pe_detach_dev(void *data, void *userdata) 546 { 547 struct eeh_pe *pe = (struct eeh_pe *)data; 548 struct eeh_dev *edev, *tmp; 549 550 eeh_pe_for_each_dev(pe, edev, tmp) { 551 if (!(edev->mode & EEH_DEV_DISCONNECTED)) 552 continue; 553 554 edev->mode &= ~(EEH_DEV_DISCONNECTED | EEH_DEV_IRQ_DISABLED); 555 eeh_rmv_from_parent_pe(edev); 556 } 557 558 return NULL; 559 } 560 561 /* 562 * Explicitly clear PE's frozen state for PowerNV where 563 * we have frozen PE until BAR restore is completed. It's 564 * harmless to clear it for pSeries. To be consistent with 565 * PE reset (for 3 times), we try to clear the frozen state 566 * for 3 times as well. 567 */ 568 static void *__eeh_clear_pe_frozen_state(void *data, void *flag) 569 { 570 struct eeh_pe *pe = (struct eeh_pe *)data; 571 bool clear_sw_state = *(bool *)flag; 572 int i, rc = 1; 573 574 for (i = 0; rc && i < 3; i++) 575 rc = eeh_unfreeze_pe(pe, clear_sw_state); 576 577 /* Stop immediately on any errors */ 578 if (rc) { 579 pr_warn("%s: Failure %d unfreezing PHB#%x-PE#%x\n", 580 __func__, rc, pe->phb->global_number, pe->addr); 581 return (void *)pe; 582 } 583 584 return NULL; 585 } 586 587 static int eeh_clear_pe_frozen_state(struct eeh_pe *pe, 588 bool clear_sw_state) 589 { 590 void *rc; 591 592 rc = eeh_pe_traverse(pe, __eeh_clear_pe_frozen_state, &clear_sw_state); 593 if (!rc) 594 eeh_pe_state_clear(pe, EEH_PE_ISOLATED); 595 596 return rc ? -EIO : 0; 597 } 598 599 int eeh_pe_reset_and_recover(struct eeh_pe *pe) 600 { 601 int ret; 602 603 /* Bail if the PE is being recovered */ 604 if (pe->state & EEH_PE_RECOVERING) 605 return 0; 606 607 /* Put the PE into recovery mode */ 608 eeh_pe_state_mark(pe, EEH_PE_RECOVERING); 609 610 /* Save states */ 611 eeh_pe_dev_traverse(pe, eeh_dev_save_state, NULL); 612 613 /* Issue reset */ 614 ret = eeh_pe_reset_full(pe); 615 if (ret) { 616 eeh_pe_state_clear(pe, EEH_PE_RECOVERING); 617 return ret; 618 } 619 620 /* Unfreeze the PE */ 621 ret = eeh_clear_pe_frozen_state(pe, true); 622 if (ret) { 623 eeh_pe_state_clear(pe, EEH_PE_RECOVERING); 624 return ret; 625 } 626 627 /* Restore device state */ 628 eeh_pe_dev_traverse(pe, eeh_dev_restore_state, NULL); 629 630 /* Clear recovery mode */ 631 eeh_pe_state_clear(pe, EEH_PE_RECOVERING); 632 633 return 0; 634 } 635 636 /** 637 * eeh_reset_device - Perform actual reset of a pci slot 638 * @driver_eeh_aware: Does the device's driver provide EEH support? 639 * @pe: EEH PE 640 * @bus: PCI bus corresponding to the isolcated slot 641 * @rmv_data: Optional, list to record removed devices 642 * 643 * This routine must be called to do reset on the indicated PE. 644 * During the reset, udev might be invoked because those affected 645 * PCI devices will be removed and then added. 646 */ 647 static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus, 648 struct eeh_rmv_data *rmv_data, 649 bool driver_eeh_aware) 650 { 651 time64_t tstamp; 652 int cnt, rc; 653 struct eeh_dev *edev; 654 655 /* pcibios will clear the counter; save the value */ 656 cnt = pe->freeze_count; 657 tstamp = pe->tstamp; 658 659 /* 660 * We don't remove the corresponding PE instances because 661 * we need the information afterwords. The attached EEH 662 * devices are expected to be attached soon when calling 663 * into pci_hp_add_devices(). 664 */ 665 eeh_pe_state_mark(pe, EEH_PE_KEEP); 666 if (driver_eeh_aware || (pe->type & EEH_PE_VF)) { 667 eeh_pe_dev_traverse(pe, eeh_rmv_device, rmv_data); 668 } else { 669 pci_lock_rescan_remove(); 670 pci_hp_remove_devices(bus); 671 pci_unlock_rescan_remove(); 672 } 673 674 /* 675 * Reset the pci controller. (Asserts RST#; resets config space). 676 * Reconfigure bridges and devices. Don't try to bring the system 677 * up if the reset failed for some reason. 678 * 679 * During the reset, it's very dangerous to have uncontrolled PCI 680 * config accesses. So we prefer to block them. However, controlled 681 * PCI config accesses initiated from EEH itself are allowed. 682 */ 683 rc = eeh_pe_reset_full(pe); 684 if (rc) 685 return rc; 686 687 pci_lock_rescan_remove(); 688 689 /* Restore PE */ 690 eeh_ops->configure_bridge(pe); 691 eeh_pe_restore_bars(pe); 692 693 /* Clear frozen state */ 694 rc = eeh_clear_pe_frozen_state(pe, false); 695 if (rc) { 696 pci_unlock_rescan_remove(); 697 return rc; 698 } 699 700 /* Give the system 5 seconds to finish running the user-space 701 * hotplug shutdown scripts, e.g. ifdown for ethernet. Yes, 702 * this is a hack, but if we don't do this, and try to bring 703 * the device up before the scripts have taken it down, 704 * potentially weird things happen. 705 */ 706 if (!driver_eeh_aware || rmv_data->removed) { 707 pr_info("EEH: Sleep 5s ahead of %s hotplug\n", 708 (driver_eeh_aware ? "partial" : "complete")); 709 ssleep(5); 710 711 /* 712 * The EEH device is still connected with its parent 713 * PE. We should disconnect it so the binding can be 714 * rebuilt when adding PCI devices. 715 */ 716 edev = list_first_entry(&pe->edevs, struct eeh_dev, list); 717 eeh_pe_traverse(pe, eeh_pe_detach_dev, NULL); 718 if (pe->type & EEH_PE_VF) { 719 eeh_add_virt_device(edev, NULL); 720 } else { 721 if (!driver_eeh_aware) 722 eeh_pe_state_clear(pe, EEH_PE_PRI_BUS); 723 pci_hp_add_devices(bus); 724 } 725 } 726 eeh_pe_state_clear(pe, EEH_PE_KEEP); 727 728 pe->tstamp = tstamp; 729 pe->freeze_count = cnt; 730 731 pci_unlock_rescan_remove(); 732 return 0; 733 } 734 735 /* The longest amount of time to wait for a pci device 736 * to come back on line, in seconds. 737 */ 738 #define MAX_WAIT_FOR_RECOVERY 300 739 740 /** 741 * eeh_handle_normal_event - Handle EEH events on a specific PE 742 * @pe: EEH PE - which should not be used after we return, as it may 743 * have been invalidated. 744 * 745 * Attempts to recover the given PE. If recovery fails or the PE has failed 746 * too many times, remove the PE. 747 * 748 * While PHB detects address or data parity errors on particular PCI 749 * slot, the associated PE will be frozen. Besides, DMA's occurring 750 * to wild addresses (which usually happen due to bugs in device 751 * drivers or in PCI adapter firmware) can cause EEH error. #SERR, 752 * #PERR or other misc PCI-related errors also can trigger EEH errors. 753 * 754 * Recovery process consists of unplugging the device driver (which 755 * generated hotplug events to userspace), then issuing a PCI #RST to 756 * the device, then reconfiguring the PCI config space for all bridges 757 * & devices under this slot, and then finally restarting the device 758 * drivers (which cause a second set of hotplug events to go out to 759 * userspace). 760 */ 761 void eeh_handle_normal_event(struct eeh_pe *pe) 762 { 763 struct pci_bus *bus; 764 struct eeh_dev *edev, *tmp; 765 int rc = 0; 766 enum pci_ers_result result = PCI_ERS_RESULT_NONE; 767 struct eeh_rmv_data rmv_data = {LIST_HEAD_INIT(rmv_data.edev_list), 0}; 768 769 bus = eeh_pe_bus_get(pe); 770 if (!bus) { 771 pr_err("%s: Cannot find PCI bus for PHB#%x-PE#%x\n", 772 __func__, pe->phb->global_number, pe->addr); 773 return; 774 } 775 776 eeh_pe_state_mark(pe, EEH_PE_RECOVERING); 777 778 eeh_pe_update_time_stamp(pe); 779 pe->freeze_count++; 780 if (pe->freeze_count > eeh_max_freezes) { 781 pr_err("EEH: PHB#%x-PE#%x has failed %d times in the\n" 782 "last hour and has been permanently disabled.\n", 783 pe->phb->global_number, pe->addr, 784 pe->freeze_count); 785 goto hard_fail; 786 } 787 pr_warn("EEH: This PCI device has failed %d times in the last hour\n", 788 pe->freeze_count); 789 790 /* Walk the various device drivers attached to this slot through 791 * a reset sequence, giving each an opportunity to do what it needs 792 * to accomplish the reset. Each child gets a report of the 793 * status ... if any child can't handle the reset, then the entire 794 * slot is dlpar removed and added. 795 * 796 * When the PHB is fenced, we have to issue a reset to recover from 797 * the error. Override the result if necessary to have partially 798 * hotplug for this case. 799 */ 800 pr_info("EEH: Notify device drivers to shutdown\n"); 801 eeh_pe_dev_traverse(pe, eeh_report_error, &result); 802 if ((pe->type & EEH_PE_PHB) && 803 result != PCI_ERS_RESULT_NONE && 804 result != PCI_ERS_RESULT_NEED_RESET) 805 result = PCI_ERS_RESULT_NEED_RESET; 806 807 /* Get the current PCI slot state. This can take a long time, 808 * sometimes over 300 seconds for certain systems. 809 */ 810 rc = eeh_ops->wait_state(pe, MAX_WAIT_FOR_RECOVERY*1000); 811 if (rc < 0 || rc == EEH_STATE_NOT_SUPPORT) { 812 pr_warn("EEH: Permanent failure\n"); 813 goto hard_fail; 814 } 815 816 /* Since rtas may enable MMIO when posting the error log, 817 * don't post the error log until after all dev drivers 818 * have been informed. 819 */ 820 pr_info("EEH: Collect temporary log\n"); 821 eeh_slot_error_detail(pe, EEH_LOG_TEMP); 822 823 /* If all device drivers were EEH-unaware, then shut 824 * down all of the device drivers, and hope they 825 * go down willingly, without panicing the system. 826 */ 827 if (result == PCI_ERS_RESULT_NONE) { 828 pr_info("EEH: Reset with hotplug activity\n"); 829 rc = eeh_reset_device(pe, bus, NULL, false); 830 if (rc) { 831 pr_warn("%s: Unable to reset, err=%d\n", 832 __func__, rc); 833 goto hard_fail; 834 } 835 } 836 837 /* If all devices reported they can proceed, then re-enable MMIO */ 838 if (result == PCI_ERS_RESULT_CAN_RECOVER) { 839 pr_info("EEH: Enable I/O for affected devices\n"); 840 rc = eeh_pci_enable(pe, EEH_OPT_THAW_MMIO); 841 842 if (rc < 0) 843 goto hard_fail; 844 if (rc) { 845 result = PCI_ERS_RESULT_NEED_RESET; 846 } else { 847 pr_info("EEH: Notify device drivers to resume I/O\n"); 848 eeh_pe_dev_traverse(pe, eeh_report_mmio_enabled, &result); 849 } 850 } 851 852 /* If all devices reported they can proceed, then re-enable DMA */ 853 if (result == PCI_ERS_RESULT_CAN_RECOVER) { 854 pr_info("EEH: Enabled DMA for affected devices\n"); 855 rc = eeh_pci_enable(pe, EEH_OPT_THAW_DMA); 856 857 if (rc < 0) 858 goto hard_fail; 859 if (rc) { 860 result = PCI_ERS_RESULT_NEED_RESET; 861 } else { 862 /* 863 * We didn't do PE reset for the case. The PE 864 * is still in frozen state. Clear it before 865 * resuming the PE. 866 */ 867 eeh_pe_state_clear(pe, EEH_PE_ISOLATED); 868 result = PCI_ERS_RESULT_RECOVERED; 869 } 870 } 871 872 /* If any device has a hard failure, then shut off everything. */ 873 if (result == PCI_ERS_RESULT_DISCONNECT) { 874 pr_warn("EEH: Device driver gave up\n"); 875 goto hard_fail; 876 } 877 878 /* If any device called out for a reset, then reset the slot */ 879 if (result == PCI_ERS_RESULT_NEED_RESET) { 880 pr_info("EEH: Reset without hotplug activity\n"); 881 rc = eeh_reset_device(pe, bus, &rmv_data, true); 882 if (rc) { 883 pr_warn("%s: Cannot reset, err=%d\n", 884 __func__, rc); 885 goto hard_fail; 886 } 887 888 pr_info("EEH: Notify device drivers " 889 "the completion of reset\n"); 890 result = PCI_ERS_RESULT_NONE; 891 eeh_pe_dev_traverse(pe, eeh_report_reset, &result); 892 } 893 894 /* All devices should claim they have recovered by now. */ 895 if ((result != PCI_ERS_RESULT_RECOVERED) && 896 (result != PCI_ERS_RESULT_NONE)) { 897 pr_warn("EEH: Not recovered\n"); 898 goto hard_fail; 899 } 900 901 /* 902 * For those hot removed VFs, we should add back them after PF get 903 * recovered properly. 904 */ 905 list_for_each_entry_safe(edev, tmp, &rmv_data.edev_list, rmv_list) { 906 eeh_add_virt_device(edev, NULL); 907 list_del(&edev->rmv_list); 908 } 909 910 /* Tell all device drivers that they can resume operations */ 911 pr_info("EEH: Notify device driver to resume\n"); 912 eeh_pe_dev_traverse(pe, eeh_report_resume, NULL); 913 914 goto final; 915 916 hard_fail: 917 /* 918 * About 90% of all real-life EEH failures in the field 919 * are due to poorly seated PCI cards. Only 10% or so are 920 * due to actual, failed cards. 921 */ 922 pr_err("EEH: Unable to recover from failure from PHB#%x-PE#%x.\n" 923 "Please try reseating or replacing it\n", 924 pe->phb->global_number, pe->addr); 925 926 eeh_slot_error_detail(pe, EEH_LOG_PERM); 927 928 /* Notify all devices that they're about to go down. */ 929 eeh_pe_dev_traverse(pe, eeh_report_failure, NULL); 930 931 /* Mark the PE to be removed permanently */ 932 eeh_pe_state_mark(pe, EEH_PE_REMOVED); 933 934 /* 935 * Shut down the device drivers for good. We mark 936 * all removed devices correctly to avoid access 937 * the their PCI config any more. 938 */ 939 if (pe->type & EEH_PE_VF) { 940 eeh_pe_dev_traverse(pe, eeh_rmv_device, NULL); 941 eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED); 942 } else { 943 eeh_pe_state_clear(pe, EEH_PE_PRI_BUS); 944 eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED); 945 946 pci_lock_rescan_remove(); 947 pci_hp_remove_devices(bus); 948 pci_unlock_rescan_remove(); 949 /* The passed PE should no longer be used */ 950 return; 951 } 952 final: 953 eeh_pe_state_clear(pe, EEH_PE_RECOVERING); 954 } 955 956 /** 957 * eeh_handle_special_event - Handle EEH events without a specific failing PE 958 * 959 * Called when an EEH event is detected but can't be narrowed down to a 960 * specific PE. Iterates through possible failures and handles them as 961 * necessary. 962 */ 963 void eeh_handle_special_event(void) 964 { 965 struct eeh_pe *pe, *phb_pe; 966 struct pci_bus *bus; 967 struct pci_controller *hose; 968 unsigned long flags; 969 int rc; 970 971 972 do { 973 rc = eeh_ops->next_error(&pe); 974 975 switch (rc) { 976 case EEH_NEXT_ERR_DEAD_IOC: 977 /* Mark all PHBs in dead state */ 978 eeh_serialize_lock(&flags); 979 980 /* Purge all events */ 981 eeh_remove_event(NULL, true); 982 983 list_for_each_entry(hose, &hose_list, list_node) { 984 phb_pe = eeh_phb_pe_get(hose); 985 if (!phb_pe) continue; 986 987 eeh_pe_state_mark(phb_pe, EEH_PE_ISOLATED); 988 } 989 990 eeh_serialize_unlock(flags); 991 992 break; 993 case EEH_NEXT_ERR_FROZEN_PE: 994 case EEH_NEXT_ERR_FENCED_PHB: 995 case EEH_NEXT_ERR_DEAD_PHB: 996 /* Mark the PE in fenced state */ 997 eeh_serialize_lock(&flags); 998 999 /* Purge all events of the PHB */ 1000 eeh_remove_event(pe, true); 1001 1002 if (rc == EEH_NEXT_ERR_DEAD_PHB) 1003 eeh_pe_state_mark(pe, EEH_PE_ISOLATED); 1004 else 1005 eeh_pe_state_mark(pe, 1006 EEH_PE_ISOLATED | EEH_PE_RECOVERING); 1007 1008 eeh_serialize_unlock(flags); 1009 1010 break; 1011 case EEH_NEXT_ERR_NONE: 1012 return; 1013 default: 1014 pr_warn("%s: Invalid value %d from next_error()\n", 1015 __func__, rc); 1016 return; 1017 } 1018 1019 /* 1020 * For fenced PHB and frozen PE, it's handled as normal 1021 * event. We have to remove the affected PHBs for dead 1022 * PHB and IOC 1023 */ 1024 if (rc == EEH_NEXT_ERR_FROZEN_PE || 1025 rc == EEH_NEXT_ERR_FENCED_PHB) { 1026 eeh_handle_normal_event(pe); 1027 } else { 1028 pci_lock_rescan_remove(); 1029 list_for_each_entry(hose, &hose_list, list_node) { 1030 phb_pe = eeh_phb_pe_get(hose); 1031 if (!phb_pe || 1032 !(phb_pe->state & EEH_PE_ISOLATED) || 1033 (phb_pe->state & EEH_PE_RECOVERING)) 1034 continue; 1035 1036 /* Notify all devices to be down */ 1037 eeh_pe_state_clear(pe, EEH_PE_PRI_BUS); 1038 eeh_pe_dev_traverse(pe, 1039 eeh_report_failure, NULL); 1040 bus = eeh_pe_bus_get(phb_pe); 1041 if (!bus) { 1042 pr_err("%s: Cannot find PCI bus for " 1043 "PHB#%x-PE#%x\n", 1044 __func__, 1045 pe->phb->global_number, 1046 pe->addr); 1047 break; 1048 } 1049 pci_hp_remove_devices(bus); 1050 } 1051 pci_unlock_rescan_remove(); 1052 } 1053 1054 /* 1055 * If we have detected dead IOC, we needn't proceed 1056 * any more since all PHBs would have been removed 1057 */ 1058 if (rc == EEH_NEXT_ERR_DEAD_IOC) 1059 break; 1060 } while (rc != EEH_NEXT_ERR_NONE); 1061 } 1062