1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * PCI Error Recovery Driver for RPA-compliant PPC64 platform. 4 * Copyright IBM Corp. 2004 2005 5 * Copyright Linas Vepstas <linas@linas.org> 2004, 2005 6 * 7 * Send comments and feedback to Linas Vepstas <linas@austin.ibm.com> 8 */ 9 #include <linux/delay.h> 10 #include <linux/interrupt.h> 11 #include <linux/irq.h> 12 #include <linux/module.h> 13 #include <linux/pci.h> 14 #include <linux/pci_hotplug.h> 15 #include <asm/eeh.h> 16 #include <asm/eeh_event.h> 17 #include <asm/ppc-pci.h> 18 #include <asm/pci-bridge.h> 19 #include <asm/prom.h> 20 #include <asm/rtas.h> 21 22 struct eeh_rmv_data { 23 struct list_head removed_vf_list; 24 int removed_dev_count; 25 }; 26 27 static int eeh_result_priority(enum pci_ers_result result) 28 { 29 switch (result) { 30 case PCI_ERS_RESULT_NONE: 31 return 1; 32 case PCI_ERS_RESULT_NO_AER_DRIVER: 33 return 2; 34 case PCI_ERS_RESULT_RECOVERED: 35 return 3; 36 case PCI_ERS_RESULT_CAN_RECOVER: 37 return 4; 38 case PCI_ERS_RESULT_DISCONNECT: 39 return 5; 40 case PCI_ERS_RESULT_NEED_RESET: 41 return 6; 42 default: 43 WARN_ONCE(1, "Unknown pci_ers_result value: %d\n", (int)result); 44 return 0; 45 } 46 }; 47 48 static const char *pci_ers_result_name(enum pci_ers_result result) 49 { 50 switch (result) { 51 case PCI_ERS_RESULT_NONE: 52 return "none"; 53 case PCI_ERS_RESULT_CAN_RECOVER: 54 return "can recover"; 55 case PCI_ERS_RESULT_NEED_RESET: 56 return "need reset"; 57 case PCI_ERS_RESULT_DISCONNECT: 58 return "disconnect"; 59 case PCI_ERS_RESULT_RECOVERED: 60 return "recovered"; 61 case PCI_ERS_RESULT_NO_AER_DRIVER: 62 return "no AER driver"; 63 default: 64 WARN_ONCE(1, "Unknown result type: %d\n", (int)result); 65 return "unknown"; 66 } 67 }; 68 69 static enum pci_ers_result pci_ers_merge_result(enum pci_ers_result old, 70 enum pci_ers_result new) 71 { 72 if (eeh_result_priority(new) > eeh_result_priority(old)) 73 return new; 74 return old; 75 } 76 77 static bool eeh_dev_removed(struct eeh_dev *edev) 78 { 79 return !edev || (edev->mode & EEH_DEV_REMOVED); 80 } 81 82 static bool eeh_edev_actionable(struct eeh_dev *edev) 83 { 84 if (!edev->pdev) 85 return false; 86 if (edev->pdev->error_state == pci_channel_io_perm_failure) 87 return false; 88 if (eeh_dev_removed(edev)) 89 return false; 90 if (eeh_pe_passed(edev->pe)) 91 return false; 92 93 return true; 94 } 95 96 /** 97 * eeh_pcid_get - Get the PCI device driver 98 * @pdev: PCI device 99 * 100 * The function is used to retrieve the PCI device driver for 101 * the indicated PCI device. Besides, we will increase the reference 102 * of the PCI device driver to prevent that being unloaded on 103 * the fly. Otherwise, kernel crash would be seen. 104 */ 105 static inline struct pci_driver *eeh_pcid_get(struct pci_dev *pdev) 106 { 107 if (!pdev || !pdev->driver) 108 return NULL; 109 110 if (!try_module_get(pdev->driver->driver.owner)) 111 return NULL; 112 113 return pdev->driver; 114 } 115 116 /** 117 * eeh_pcid_put - Dereference on the PCI device driver 118 * @pdev: PCI device 119 * 120 * The function is called to do dereference on the PCI device 121 * driver of the indicated PCI device. 122 */ 123 static inline void eeh_pcid_put(struct pci_dev *pdev) 124 { 125 if (!pdev || !pdev->driver) 126 return; 127 128 module_put(pdev->driver->driver.owner); 129 } 130 131 /** 132 * eeh_disable_irq - Disable interrupt for the recovering device 133 * @dev: PCI device 134 * 135 * This routine must be called when reporting temporary or permanent 136 * error to the particular PCI device to disable interrupt of that 137 * device. If the device has enabled MSI or MSI-X interrupt, we needn't 138 * do real work because EEH should freeze DMA transfers for those PCI 139 * devices encountering EEH errors, which includes MSI or MSI-X. 140 */ 141 static void eeh_disable_irq(struct eeh_dev *edev) 142 { 143 /* Don't disable MSI and MSI-X interrupts. They are 144 * effectively disabled by the DMA Stopped state 145 * when an EEH error occurs. 146 */ 147 if (edev->pdev->msi_enabled || edev->pdev->msix_enabled) 148 return; 149 150 if (!irq_has_action(edev->pdev->irq)) 151 return; 152 153 edev->mode |= EEH_DEV_IRQ_DISABLED; 154 disable_irq_nosync(edev->pdev->irq); 155 } 156 157 /** 158 * eeh_enable_irq - Enable interrupt for the recovering device 159 * @dev: PCI device 160 * 161 * This routine must be called to enable interrupt while failed 162 * device could be resumed. 163 */ 164 static void eeh_enable_irq(struct eeh_dev *edev) 165 { 166 if ((edev->mode) & EEH_DEV_IRQ_DISABLED) { 167 edev->mode &= ~EEH_DEV_IRQ_DISABLED; 168 /* 169 * FIXME !!!!! 170 * 171 * This is just ass backwards. This maze has 172 * unbalanced irq_enable/disable calls. So instead of 173 * finding the root cause it works around the warning 174 * in the irq_enable code by conditionally calling 175 * into it. 176 * 177 * That's just wrong.The warning in the core code is 178 * there to tell people to fix their asymmetries in 179 * their own code, not by abusing the core information 180 * to avoid it. 181 * 182 * I so wish that the assymetry would be the other way 183 * round and a few more irq_disable calls render that 184 * shit unusable forever. 185 * 186 * tglx 187 */ 188 if (irqd_irq_disabled(irq_get_irq_data(edev->pdev->irq))) 189 enable_irq(edev->pdev->irq); 190 } 191 } 192 193 static void eeh_dev_save_state(struct eeh_dev *edev, void *userdata) 194 { 195 struct pci_dev *pdev; 196 197 if (!edev) 198 return; 199 200 /* 201 * We cannot access the config space on some adapters. 202 * Otherwise, it will cause fenced PHB. We don't save 203 * the content in their config space and will restore 204 * from the initial config space saved when the EEH 205 * device is created. 206 */ 207 if (edev->pe && (edev->pe->state & EEH_PE_CFG_RESTRICTED)) 208 return; 209 210 pdev = eeh_dev_to_pci_dev(edev); 211 if (!pdev) 212 return; 213 214 pci_save_state(pdev); 215 } 216 217 static void eeh_set_channel_state(struct eeh_pe *root, enum pci_channel_state s) 218 { 219 struct eeh_pe *pe; 220 struct eeh_dev *edev, *tmp; 221 222 eeh_for_each_pe(root, pe) 223 eeh_pe_for_each_dev(pe, edev, tmp) 224 if (eeh_edev_actionable(edev)) 225 edev->pdev->error_state = s; 226 } 227 228 static void eeh_set_irq_state(struct eeh_pe *root, bool enable) 229 { 230 struct eeh_pe *pe; 231 struct eeh_dev *edev, *tmp; 232 233 eeh_for_each_pe(root, pe) { 234 eeh_pe_for_each_dev(pe, edev, tmp) { 235 if (!eeh_edev_actionable(edev)) 236 continue; 237 238 if (!eeh_pcid_get(edev->pdev)) 239 continue; 240 241 if (enable) 242 eeh_enable_irq(edev); 243 else 244 eeh_disable_irq(edev); 245 246 eeh_pcid_put(edev->pdev); 247 } 248 } 249 } 250 251 typedef enum pci_ers_result (*eeh_report_fn)(struct eeh_dev *, 252 struct pci_dev *, 253 struct pci_driver *); 254 static void eeh_pe_report_edev(struct eeh_dev *edev, eeh_report_fn fn, 255 enum pci_ers_result *result) 256 { 257 struct pci_dev *pdev; 258 struct pci_driver *driver; 259 enum pci_ers_result new_result; 260 261 pci_lock_rescan_remove(); 262 pdev = edev->pdev; 263 if (pdev) 264 get_device(&pdev->dev); 265 pci_unlock_rescan_remove(); 266 if (!pdev) { 267 eeh_edev_info(edev, "no device"); 268 return; 269 } 270 device_lock(&pdev->dev); 271 if (eeh_edev_actionable(edev)) { 272 driver = eeh_pcid_get(pdev); 273 274 if (!driver) 275 eeh_edev_info(edev, "no driver"); 276 else if (!driver->err_handler) 277 eeh_edev_info(edev, "driver not EEH aware"); 278 else if (edev->mode & EEH_DEV_NO_HANDLER) 279 eeh_edev_info(edev, "driver bound too late"); 280 else { 281 new_result = fn(edev, pdev, driver); 282 eeh_edev_info(edev, "%s driver reports: '%s'", 283 driver->name, 284 pci_ers_result_name(new_result)); 285 if (result) 286 *result = pci_ers_merge_result(*result, 287 new_result); 288 } 289 if (driver) 290 eeh_pcid_put(pdev); 291 } else { 292 eeh_edev_info(edev, "not actionable (%d,%d,%d)", !!pdev, 293 !eeh_dev_removed(edev), !eeh_pe_passed(edev->pe)); 294 } 295 device_unlock(&pdev->dev); 296 if (edev->pdev != pdev) 297 eeh_edev_warn(edev, "Device changed during processing!\n"); 298 put_device(&pdev->dev); 299 } 300 301 static void eeh_pe_report(const char *name, struct eeh_pe *root, 302 eeh_report_fn fn, enum pci_ers_result *result) 303 { 304 struct eeh_pe *pe; 305 struct eeh_dev *edev, *tmp; 306 307 pr_info("EEH: Beginning: '%s'\n", name); 308 eeh_for_each_pe(root, pe) eeh_pe_for_each_dev(pe, edev, tmp) 309 eeh_pe_report_edev(edev, fn, result); 310 if (result) 311 pr_info("EEH: Finished:'%s' with aggregate recovery state:'%s'\n", 312 name, pci_ers_result_name(*result)); 313 else 314 pr_info("EEH: Finished:'%s'", name); 315 } 316 317 /** 318 * eeh_report_error - Report pci error to each device driver 319 * @edev: eeh device 320 * @driver: device's PCI driver 321 * 322 * Report an EEH error to each device driver. 323 */ 324 static enum pci_ers_result eeh_report_error(struct eeh_dev *edev, 325 struct pci_dev *pdev, 326 struct pci_driver *driver) 327 { 328 enum pci_ers_result rc; 329 330 if (!driver->err_handler->error_detected) 331 return PCI_ERS_RESULT_NONE; 332 333 eeh_edev_info(edev, "Invoking %s->error_detected(IO frozen)", 334 driver->name); 335 rc = driver->err_handler->error_detected(pdev, pci_channel_io_frozen); 336 337 edev->in_error = true; 338 pci_uevent_ers(pdev, PCI_ERS_RESULT_NONE); 339 return rc; 340 } 341 342 /** 343 * eeh_report_mmio_enabled - Tell drivers that MMIO has been enabled 344 * @edev: eeh device 345 * @driver: device's PCI driver 346 * 347 * Tells each device driver that IO ports, MMIO and config space I/O 348 * are now enabled. 349 */ 350 static enum pci_ers_result eeh_report_mmio_enabled(struct eeh_dev *edev, 351 struct pci_dev *pdev, 352 struct pci_driver *driver) 353 { 354 if (!driver->err_handler->mmio_enabled) 355 return PCI_ERS_RESULT_NONE; 356 eeh_edev_info(edev, "Invoking %s->mmio_enabled()", driver->name); 357 return driver->err_handler->mmio_enabled(pdev); 358 } 359 360 /** 361 * eeh_report_reset - Tell device that slot has been reset 362 * @edev: eeh device 363 * @driver: device's PCI driver 364 * 365 * This routine must be called while EEH tries to reset particular 366 * PCI device so that the associated PCI device driver could take 367 * some actions, usually to save data the driver needs so that the 368 * driver can work again while the device is recovered. 369 */ 370 static enum pci_ers_result eeh_report_reset(struct eeh_dev *edev, 371 struct pci_dev *pdev, 372 struct pci_driver *driver) 373 { 374 if (!driver->err_handler->slot_reset || !edev->in_error) 375 return PCI_ERS_RESULT_NONE; 376 eeh_edev_info(edev, "Invoking %s->slot_reset()", driver->name); 377 return driver->err_handler->slot_reset(pdev); 378 } 379 380 static void eeh_dev_restore_state(struct eeh_dev *edev, void *userdata) 381 { 382 struct pci_dev *pdev; 383 384 if (!edev) 385 return; 386 387 /* 388 * The content in the config space isn't saved because 389 * the blocked config space on some adapters. We have 390 * to restore the initial saved config space when the 391 * EEH device is created. 392 */ 393 if (edev->pe && (edev->pe->state & EEH_PE_CFG_RESTRICTED)) { 394 if (list_is_last(&edev->entry, &edev->pe->edevs)) 395 eeh_pe_restore_bars(edev->pe); 396 397 return; 398 } 399 400 pdev = eeh_dev_to_pci_dev(edev); 401 if (!pdev) 402 return; 403 404 pci_restore_state(pdev); 405 } 406 407 /** 408 * eeh_report_resume - Tell device to resume normal operations 409 * @edev: eeh device 410 * @driver: device's PCI driver 411 * 412 * This routine must be called to notify the device driver that it 413 * could resume so that the device driver can do some initialization 414 * to make the recovered device work again. 415 */ 416 static enum pci_ers_result eeh_report_resume(struct eeh_dev *edev, 417 struct pci_dev *pdev, 418 struct pci_driver *driver) 419 { 420 if (!driver->err_handler->resume || !edev->in_error) 421 return PCI_ERS_RESULT_NONE; 422 423 eeh_edev_info(edev, "Invoking %s->resume()", driver->name); 424 driver->err_handler->resume(pdev); 425 426 pci_uevent_ers(edev->pdev, PCI_ERS_RESULT_RECOVERED); 427 #ifdef CONFIG_PCI_IOV 428 if (eeh_ops->notify_resume && eeh_dev_to_pdn(edev)) 429 eeh_ops->notify_resume(eeh_dev_to_pdn(edev)); 430 #endif 431 return PCI_ERS_RESULT_NONE; 432 } 433 434 /** 435 * eeh_report_failure - Tell device driver that device is dead. 436 * @edev: eeh device 437 * @driver: device's PCI driver 438 * 439 * This informs the device driver that the device is permanently 440 * dead, and that no further recovery attempts will be made on it. 441 */ 442 static enum pci_ers_result eeh_report_failure(struct eeh_dev *edev, 443 struct pci_dev *pdev, 444 struct pci_driver *driver) 445 { 446 enum pci_ers_result rc; 447 448 if (!driver->err_handler->error_detected) 449 return PCI_ERS_RESULT_NONE; 450 451 eeh_edev_info(edev, "Invoking %s->error_detected(permanent failure)", 452 driver->name); 453 rc = driver->err_handler->error_detected(pdev, 454 pci_channel_io_perm_failure); 455 456 pci_uevent_ers(pdev, PCI_ERS_RESULT_DISCONNECT); 457 return rc; 458 } 459 460 static void *eeh_add_virt_device(struct eeh_dev *edev) 461 { 462 struct pci_driver *driver; 463 struct pci_dev *dev = eeh_dev_to_pci_dev(edev); 464 465 if (!(edev->physfn)) { 466 eeh_edev_warn(edev, "Not for VF\n"); 467 return NULL; 468 } 469 470 driver = eeh_pcid_get(dev); 471 if (driver) { 472 if (driver->err_handler) { 473 eeh_pcid_put(dev); 474 return NULL; 475 } 476 eeh_pcid_put(dev); 477 } 478 479 #ifdef CONFIG_PCI_IOV 480 pci_iov_add_virtfn(edev->physfn, eeh_dev_to_pdn(edev)->vf_index); 481 #endif 482 return NULL; 483 } 484 485 static void eeh_rmv_device(struct eeh_dev *edev, void *userdata) 486 { 487 struct pci_driver *driver; 488 struct pci_dev *dev = eeh_dev_to_pci_dev(edev); 489 struct eeh_rmv_data *rmv_data = (struct eeh_rmv_data *)userdata; 490 491 /* 492 * Actually, we should remove the PCI bridges as well. 493 * However, that's lots of complexity to do that, 494 * particularly some of devices under the bridge might 495 * support EEH. So we just care about PCI devices for 496 * simplicity here. 497 */ 498 if (!eeh_edev_actionable(edev) || 499 (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)) 500 return; 501 502 if (rmv_data) { 503 driver = eeh_pcid_get(dev); 504 if (driver) { 505 if (driver->err_handler && 506 driver->err_handler->error_detected && 507 driver->err_handler->slot_reset) { 508 eeh_pcid_put(dev); 509 return; 510 } 511 eeh_pcid_put(dev); 512 } 513 } 514 515 /* Remove it from PCI subsystem */ 516 pr_info("EEH: Removing %s without EEH sensitive driver\n", 517 pci_name(dev)); 518 edev->mode |= EEH_DEV_DISCONNECTED; 519 if (rmv_data) 520 rmv_data->removed_dev_count++; 521 522 if (edev->physfn) { 523 #ifdef CONFIG_PCI_IOV 524 struct pci_dn *pdn = eeh_dev_to_pdn(edev); 525 526 pci_iov_remove_virtfn(edev->physfn, pdn->vf_index); 527 edev->pdev = NULL; 528 #endif 529 if (rmv_data) 530 list_add(&edev->rmv_entry, &rmv_data->removed_vf_list); 531 } else { 532 pci_lock_rescan_remove(); 533 pci_stop_and_remove_bus_device(dev); 534 pci_unlock_rescan_remove(); 535 } 536 } 537 538 static void *eeh_pe_detach_dev(struct eeh_pe *pe, void *userdata) 539 { 540 struct eeh_dev *edev, *tmp; 541 542 eeh_pe_for_each_dev(pe, edev, tmp) { 543 if (!(edev->mode & EEH_DEV_DISCONNECTED)) 544 continue; 545 546 edev->mode &= ~(EEH_DEV_DISCONNECTED | EEH_DEV_IRQ_DISABLED); 547 eeh_rmv_from_parent_pe(edev); 548 } 549 550 return NULL; 551 } 552 553 /* 554 * Explicitly clear PE's frozen state for PowerNV where 555 * we have frozen PE until BAR restore is completed. It's 556 * harmless to clear it for pSeries. To be consistent with 557 * PE reset (for 3 times), we try to clear the frozen state 558 * for 3 times as well. 559 */ 560 static int eeh_clear_pe_frozen_state(struct eeh_pe *root, bool include_passed) 561 { 562 struct eeh_pe *pe; 563 int i; 564 565 eeh_for_each_pe(root, pe) { 566 if (include_passed || !eeh_pe_passed(pe)) { 567 for (i = 0; i < 3; i++) 568 if (!eeh_unfreeze_pe(pe)) 569 break; 570 if (i >= 3) 571 return -EIO; 572 } 573 } 574 eeh_pe_state_clear(root, EEH_PE_ISOLATED, include_passed); 575 return 0; 576 } 577 578 int eeh_pe_reset_and_recover(struct eeh_pe *pe) 579 { 580 int ret; 581 582 /* Bail if the PE is being recovered */ 583 if (pe->state & EEH_PE_RECOVERING) 584 return 0; 585 586 /* Put the PE into recovery mode */ 587 eeh_pe_state_mark(pe, EEH_PE_RECOVERING); 588 589 /* Save states */ 590 eeh_pe_dev_traverse(pe, eeh_dev_save_state, NULL); 591 592 /* Issue reset */ 593 ret = eeh_pe_reset_full(pe, true); 594 if (ret) { 595 eeh_pe_state_clear(pe, EEH_PE_RECOVERING, true); 596 return ret; 597 } 598 599 /* Unfreeze the PE */ 600 ret = eeh_clear_pe_frozen_state(pe, true); 601 if (ret) { 602 eeh_pe_state_clear(pe, EEH_PE_RECOVERING, true); 603 return ret; 604 } 605 606 /* Restore device state */ 607 eeh_pe_dev_traverse(pe, eeh_dev_restore_state, NULL); 608 609 /* Clear recovery mode */ 610 eeh_pe_state_clear(pe, EEH_PE_RECOVERING, true); 611 612 return 0; 613 } 614 615 /** 616 * eeh_reset_device - Perform actual reset of a pci slot 617 * @driver_eeh_aware: Does the device's driver provide EEH support? 618 * @pe: EEH PE 619 * @bus: PCI bus corresponding to the isolcated slot 620 * @rmv_data: Optional, list to record removed devices 621 * 622 * This routine must be called to do reset on the indicated PE. 623 * During the reset, udev might be invoked because those affected 624 * PCI devices will be removed and then added. 625 */ 626 static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus, 627 struct eeh_rmv_data *rmv_data, 628 bool driver_eeh_aware) 629 { 630 time64_t tstamp; 631 int cnt, rc; 632 struct eeh_dev *edev; 633 struct eeh_pe *tmp_pe; 634 bool any_passed = false; 635 636 eeh_for_each_pe(pe, tmp_pe) 637 any_passed |= eeh_pe_passed(tmp_pe); 638 639 /* pcibios will clear the counter; save the value */ 640 cnt = pe->freeze_count; 641 tstamp = pe->tstamp; 642 643 /* 644 * We don't remove the corresponding PE instances because 645 * we need the information afterwords. The attached EEH 646 * devices are expected to be attached soon when calling 647 * into pci_hp_add_devices(). 648 */ 649 eeh_pe_state_mark(pe, EEH_PE_KEEP); 650 if (any_passed || driver_eeh_aware || (pe->type & EEH_PE_VF)) { 651 eeh_pe_dev_traverse(pe, eeh_rmv_device, rmv_data); 652 } else { 653 pci_lock_rescan_remove(); 654 pci_hp_remove_devices(bus); 655 pci_unlock_rescan_remove(); 656 } 657 658 /* 659 * Reset the pci controller. (Asserts RST#; resets config space). 660 * Reconfigure bridges and devices. Don't try to bring the system 661 * up if the reset failed for some reason. 662 * 663 * During the reset, it's very dangerous to have uncontrolled PCI 664 * config accesses. So we prefer to block them. However, controlled 665 * PCI config accesses initiated from EEH itself are allowed. 666 */ 667 rc = eeh_pe_reset_full(pe, false); 668 if (rc) 669 return rc; 670 671 pci_lock_rescan_remove(); 672 673 /* Restore PE */ 674 eeh_ops->configure_bridge(pe); 675 eeh_pe_restore_bars(pe); 676 677 /* Clear frozen state */ 678 rc = eeh_clear_pe_frozen_state(pe, false); 679 if (rc) { 680 pci_unlock_rescan_remove(); 681 return rc; 682 } 683 684 /* Give the system 5 seconds to finish running the user-space 685 * hotplug shutdown scripts, e.g. ifdown for ethernet. Yes, 686 * this is a hack, but if we don't do this, and try to bring 687 * the device up before the scripts have taken it down, 688 * potentially weird things happen. 689 */ 690 if (!driver_eeh_aware || rmv_data->removed_dev_count) { 691 pr_info("EEH: Sleep 5s ahead of %s hotplug\n", 692 (driver_eeh_aware ? "partial" : "complete")); 693 ssleep(5); 694 695 /* 696 * The EEH device is still connected with its parent 697 * PE. We should disconnect it so the binding can be 698 * rebuilt when adding PCI devices. 699 */ 700 edev = list_first_entry(&pe->edevs, struct eeh_dev, entry); 701 eeh_pe_traverse(pe, eeh_pe_detach_dev, NULL); 702 if (pe->type & EEH_PE_VF) { 703 eeh_add_virt_device(edev); 704 } else { 705 if (!driver_eeh_aware) 706 eeh_pe_state_clear(pe, EEH_PE_PRI_BUS, true); 707 pci_hp_add_devices(bus); 708 } 709 } 710 eeh_pe_state_clear(pe, EEH_PE_KEEP, true); 711 712 pe->tstamp = tstamp; 713 pe->freeze_count = cnt; 714 715 pci_unlock_rescan_remove(); 716 return 0; 717 } 718 719 /* The longest amount of time to wait for a pci device 720 * to come back on line, in seconds. 721 */ 722 #define MAX_WAIT_FOR_RECOVERY 300 723 724 725 /* Walks the PE tree after processing an event to remove any stale PEs. 726 * 727 * NB: This needs to be recursive to ensure the leaf PEs get removed 728 * before their parents do. Although this is possible to do recursively 729 * we don't since this is easier to read and we need to garantee 730 * the leaf nodes will be handled first. 731 */ 732 static void eeh_pe_cleanup(struct eeh_pe *pe) 733 { 734 struct eeh_pe *child_pe, *tmp; 735 736 list_for_each_entry_safe(child_pe, tmp, &pe->child_list, child) 737 eeh_pe_cleanup(child_pe); 738 739 if (pe->state & EEH_PE_KEEP) 740 return; 741 742 if (!(pe->state & EEH_PE_INVALID)) 743 return; 744 745 if (list_empty(&pe->edevs) && list_empty(&pe->child_list)) { 746 list_del(&pe->child); 747 kfree(pe); 748 } 749 } 750 751 /** 752 * eeh_check_slot_presence - Check if a device is still present in a slot 753 * @pdev: pci_dev to check 754 * 755 * This function may return a false positive if we can't determine the slot's 756 * presence state. This might happen for for PCIe slots if the PE containing 757 * the upstream bridge is also frozen, or the bridge is part of the same PE 758 * as the device. 759 * 760 * This shouldn't happen often, but you might see it if you hotplug a PCIe 761 * switch. 762 */ 763 static bool eeh_slot_presence_check(struct pci_dev *pdev) 764 { 765 const struct hotplug_slot_ops *ops; 766 struct pci_slot *slot; 767 u8 state; 768 int rc; 769 770 if (!pdev) 771 return false; 772 773 if (pdev->error_state == pci_channel_io_perm_failure) 774 return false; 775 776 slot = pdev->slot; 777 if (!slot || !slot->hotplug) 778 return true; 779 780 ops = slot->hotplug->ops; 781 if (!ops || !ops->get_adapter_status) 782 return true; 783 784 /* set the attention indicator while we've got the slot ops */ 785 if (ops->set_attention_status) 786 ops->set_attention_status(slot->hotplug, 1); 787 788 rc = ops->get_adapter_status(slot->hotplug, &state); 789 if (rc) 790 return true; 791 792 return !!state; 793 } 794 795 static void eeh_clear_slot_attention(struct pci_dev *pdev) 796 { 797 const struct hotplug_slot_ops *ops; 798 struct pci_slot *slot; 799 800 if (!pdev) 801 return; 802 803 if (pdev->error_state == pci_channel_io_perm_failure) 804 return; 805 806 slot = pdev->slot; 807 if (!slot || !slot->hotplug) 808 return; 809 810 ops = slot->hotplug->ops; 811 if (!ops || !ops->set_attention_status) 812 return; 813 814 ops->set_attention_status(slot->hotplug, 0); 815 } 816 817 /** 818 * eeh_handle_normal_event - Handle EEH events on a specific PE 819 * @pe: EEH PE - which should not be used after we return, as it may 820 * have been invalidated. 821 * 822 * Attempts to recover the given PE. If recovery fails or the PE has failed 823 * too many times, remove the PE. 824 * 825 * While PHB detects address or data parity errors on particular PCI 826 * slot, the associated PE will be frozen. Besides, DMA's occurring 827 * to wild addresses (which usually happen due to bugs in device 828 * drivers or in PCI adapter firmware) can cause EEH error. #SERR, 829 * #PERR or other misc PCI-related errors also can trigger EEH errors. 830 * 831 * Recovery process consists of unplugging the device driver (which 832 * generated hotplug events to userspace), then issuing a PCI #RST to 833 * the device, then reconfiguring the PCI config space for all bridges 834 * & devices under this slot, and then finally restarting the device 835 * drivers (which cause a second set of hotplug events to go out to 836 * userspace). 837 */ 838 void eeh_handle_normal_event(struct eeh_pe *pe) 839 { 840 struct pci_bus *bus; 841 struct eeh_dev *edev, *tmp; 842 struct eeh_pe *tmp_pe; 843 int rc = 0; 844 enum pci_ers_result result = PCI_ERS_RESULT_NONE; 845 struct eeh_rmv_data rmv_data = 846 {LIST_HEAD_INIT(rmv_data.removed_vf_list), 0}; 847 int devices = 0; 848 849 bus = eeh_pe_bus_get(pe); 850 if (!bus) { 851 pr_err("%s: Cannot find PCI bus for PHB#%x-PE#%x\n", 852 __func__, pe->phb->global_number, pe->addr); 853 return; 854 } 855 856 /* 857 * When devices are hot-removed we might get an EEH due to 858 * a driver attempting to touch the MMIO space of a removed 859 * device. In this case we don't have a device to recover 860 * so suppress the event if we can't find any present devices. 861 * 862 * The hotplug driver should take care of tearing down the 863 * device itself. 864 */ 865 eeh_for_each_pe(pe, tmp_pe) 866 eeh_pe_for_each_dev(tmp_pe, edev, tmp) 867 if (eeh_slot_presence_check(edev->pdev)) 868 devices++; 869 870 if (!devices) { 871 pr_debug("EEH: Frozen PHB#%x-PE#%x is empty!\n", 872 pe->phb->global_number, pe->addr); 873 goto out; /* nothing to recover */ 874 } 875 876 /* Log the event */ 877 if (pe->type & EEH_PE_PHB) { 878 pr_err("EEH: Recovering PHB#%x, location: %s\n", 879 pe->phb->global_number, eeh_pe_loc_get(pe)); 880 } else { 881 struct eeh_pe *phb_pe = eeh_phb_pe_get(pe->phb); 882 883 pr_err("EEH: Recovering PHB#%x-PE#%x\n", 884 pe->phb->global_number, pe->addr); 885 pr_err("EEH: PE location: %s, PHB location: %s\n", 886 eeh_pe_loc_get(pe), eeh_pe_loc_get(phb_pe)); 887 } 888 889 #ifdef CONFIG_STACKTRACE 890 /* 891 * Print the saved stack trace now that we've verified there's 892 * something to recover. 893 */ 894 if (pe->trace_entries) { 895 void **ptrs = (void **) pe->stack_trace; 896 int i; 897 898 pr_err("EEH: Frozen PHB#%x-PE#%x detected\n", 899 pe->phb->global_number, pe->addr); 900 901 /* FIXME: Use the same format as dump_stack() */ 902 pr_err("EEH: Call Trace:\n"); 903 for (i = 0; i < pe->trace_entries; i++) 904 pr_err("EEH: [%pK] %pS\n", ptrs[i], ptrs[i]); 905 906 pe->trace_entries = 0; 907 } 908 #endif /* CONFIG_STACKTRACE */ 909 910 eeh_pe_update_time_stamp(pe); 911 pe->freeze_count++; 912 if (pe->freeze_count > eeh_max_freezes) { 913 pr_err("EEH: PHB#%x-PE#%x has failed %d times in the last hour and has been permanently disabled.\n", 914 pe->phb->global_number, pe->addr, 915 pe->freeze_count); 916 result = PCI_ERS_RESULT_DISCONNECT; 917 } 918 919 eeh_for_each_pe(pe, tmp_pe) 920 eeh_pe_for_each_dev(tmp_pe, edev, tmp) 921 edev->mode &= ~EEH_DEV_NO_HANDLER; 922 923 /* Walk the various device drivers attached to this slot through 924 * a reset sequence, giving each an opportunity to do what it needs 925 * to accomplish the reset. Each child gets a report of the 926 * status ... if any child can't handle the reset, then the entire 927 * slot is dlpar removed and added. 928 * 929 * When the PHB is fenced, we have to issue a reset to recover from 930 * the error. Override the result if necessary to have partially 931 * hotplug for this case. 932 */ 933 if (result != PCI_ERS_RESULT_DISCONNECT) { 934 pr_warn("EEH: This PCI device has failed %d times in the last hour and will be permanently disabled after %d failures.\n", 935 pe->freeze_count, eeh_max_freezes); 936 pr_info("EEH: Notify device drivers to shutdown\n"); 937 eeh_set_channel_state(pe, pci_channel_io_frozen); 938 eeh_set_irq_state(pe, false); 939 eeh_pe_report("error_detected(IO frozen)", pe, 940 eeh_report_error, &result); 941 if ((pe->type & EEH_PE_PHB) && 942 result != PCI_ERS_RESULT_NONE && 943 result != PCI_ERS_RESULT_NEED_RESET) 944 result = PCI_ERS_RESULT_NEED_RESET; 945 } 946 947 /* Get the current PCI slot state. This can take a long time, 948 * sometimes over 300 seconds for certain systems. 949 */ 950 if (result != PCI_ERS_RESULT_DISCONNECT) { 951 rc = eeh_wait_state(pe, MAX_WAIT_FOR_RECOVERY*1000); 952 if (rc < 0 || rc == EEH_STATE_NOT_SUPPORT) { 953 pr_warn("EEH: Permanent failure\n"); 954 result = PCI_ERS_RESULT_DISCONNECT; 955 } 956 } 957 958 /* Since rtas may enable MMIO when posting the error log, 959 * don't post the error log until after all dev drivers 960 * have been informed. 961 */ 962 if (result != PCI_ERS_RESULT_DISCONNECT) { 963 pr_info("EEH: Collect temporary log\n"); 964 eeh_slot_error_detail(pe, EEH_LOG_TEMP); 965 } 966 967 /* If all device drivers were EEH-unaware, then shut 968 * down all of the device drivers, and hope they 969 * go down willingly, without panicing the system. 970 */ 971 if (result == PCI_ERS_RESULT_NONE) { 972 pr_info("EEH: Reset with hotplug activity\n"); 973 rc = eeh_reset_device(pe, bus, NULL, false); 974 if (rc) { 975 pr_warn("%s: Unable to reset, err=%d\n", 976 __func__, rc); 977 result = PCI_ERS_RESULT_DISCONNECT; 978 } 979 } 980 981 /* If all devices reported they can proceed, then re-enable MMIO */ 982 if (result == PCI_ERS_RESULT_CAN_RECOVER) { 983 pr_info("EEH: Enable I/O for affected devices\n"); 984 rc = eeh_pci_enable(pe, EEH_OPT_THAW_MMIO); 985 986 if (rc < 0) { 987 result = PCI_ERS_RESULT_DISCONNECT; 988 } else if (rc) { 989 result = PCI_ERS_RESULT_NEED_RESET; 990 } else { 991 pr_info("EEH: Notify device drivers to resume I/O\n"); 992 eeh_pe_report("mmio_enabled", pe, 993 eeh_report_mmio_enabled, &result); 994 } 995 } 996 997 /* If all devices reported they can proceed, then re-enable DMA */ 998 if (result == PCI_ERS_RESULT_CAN_RECOVER) { 999 pr_info("EEH: Enabled DMA for affected devices\n"); 1000 rc = eeh_pci_enable(pe, EEH_OPT_THAW_DMA); 1001 1002 if (rc < 0) { 1003 result = PCI_ERS_RESULT_DISCONNECT; 1004 } else if (rc) { 1005 result = PCI_ERS_RESULT_NEED_RESET; 1006 } else { 1007 /* 1008 * We didn't do PE reset for the case. The PE 1009 * is still in frozen state. Clear it before 1010 * resuming the PE. 1011 */ 1012 eeh_pe_state_clear(pe, EEH_PE_ISOLATED, true); 1013 result = PCI_ERS_RESULT_RECOVERED; 1014 } 1015 } 1016 1017 /* If any device called out for a reset, then reset the slot */ 1018 if (result == PCI_ERS_RESULT_NEED_RESET) { 1019 pr_info("EEH: Reset without hotplug activity\n"); 1020 rc = eeh_reset_device(pe, bus, &rmv_data, true); 1021 if (rc) { 1022 pr_warn("%s: Cannot reset, err=%d\n", 1023 __func__, rc); 1024 result = PCI_ERS_RESULT_DISCONNECT; 1025 } else { 1026 result = PCI_ERS_RESULT_NONE; 1027 eeh_set_channel_state(pe, pci_channel_io_normal); 1028 eeh_set_irq_state(pe, true); 1029 eeh_pe_report("slot_reset", pe, eeh_report_reset, 1030 &result); 1031 } 1032 } 1033 1034 if ((result == PCI_ERS_RESULT_RECOVERED) || 1035 (result == PCI_ERS_RESULT_NONE)) { 1036 /* 1037 * For those hot removed VFs, we should add back them after PF 1038 * get recovered properly. 1039 */ 1040 list_for_each_entry_safe(edev, tmp, &rmv_data.removed_vf_list, 1041 rmv_entry) { 1042 eeh_add_virt_device(edev); 1043 list_del(&edev->rmv_entry); 1044 } 1045 1046 /* Tell all device drivers that they can resume operations */ 1047 pr_info("EEH: Notify device driver to resume\n"); 1048 eeh_set_channel_state(pe, pci_channel_io_normal); 1049 eeh_set_irq_state(pe, true); 1050 eeh_pe_report("resume", pe, eeh_report_resume, NULL); 1051 eeh_for_each_pe(pe, tmp_pe) { 1052 eeh_pe_for_each_dev(tmp_pe, edev, tmp) { 1053 edev->mode &= ~EEH_DEV_NO_HANDLER; 1054 edev->in_error = false; 1055 } 1056 } 1057 1058 pr_info("EEH: Recovery successful.\n"); 1059 } else { 1060 /* 1061 * About 90% of all real-life EEH failures in the field 1062 * are due to poorly seated PCI cards. Only 10% or so are 1063 * due to actual, failed cards. 1064 */ 1065 pr_err("EEH: Unable to recover from failure from PHB#%x-PE#%x.\n" 1066 "Please try reseating or replacing it\n", 1067 pe->phb->global_number, pe->addr); 1068 1069 eeh_slot_error_detail(pe, EEH_LOG_PERM); 1070 1071 /* Notify all devices that they're about to go down. */ 1072 eeh_set_channel_state(pe, pci_channel_io_perm_failure); 1073 eeh_set_irq_state(pe, false); 1074 eeh_pe_report("error_detected(permanent failure)", pe, 1075 eeh_report_failure, NULL); 1076 1077 /* Mark the PE to be removed permanently */ 1078 eeh_pe_state_mark(pe, EEH_PE_REMOVED); 1079 1080 /* 1081 * Shut down the device drivers for good. We mark 1082 * all removed devices correctly to avoid access 1083 * the their PCI config any more. 1084 */ 1085 if (pe->type & EEH_PE_VF) { 1086 eeh_pe_dev_traverse(pe, eeh_rmv_device, NULL); 1087 eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED); 1088 } else { 1089 eeh_pe_state_clear(pe, EEH_PE_PRI_BUS, true); 1090 eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED); 1091 1092 pci_lock_rescan_remove(); 1093 pci_hp_remove_devices(bus); 1094 pci_unlock_rescan_remove(); 1095 /* The passed PE should no longer be used */ 1096 return; 1097 } 1098 } 1099 1100 out: 1101 /* 1102 * Clean up any PEs without devices. While marked as EEH_PE_RECOVERYING 1103 * we don't want to modify the PE tree structure so we do it here. 1104 */ 1105 eeh_pe_cleanup(pe); 1106 1107 /* clear the slot attention LED for all recovered devices */ 1108 eeh_for_each_pe(pe, tmp_pe) 1109 eeh_pe_for_each_dev(tmp_pe, edev, tmp) 1110 eeh_clear_slot_attention(edev->pdev); 1111 1112 eeh_pe_state_clear(pe, EEH_PE_RECOVERING, true); 1113 } 1114 1115 /** 1116 * eeh_handle_special_event - Handle EEH events without a specific failing PE 1117 * 1118 * Called when an EEH event is detected but can't be narrowed down to a 1119 * specific PE. Iterates through possible failures and handles them as 1120 * necessary. 1121 */ 1122 void eeh_handle_special_event(void) 1123 { 1124 struct eeh_pe *pe, *phb_pe, *tmp_pe; 1125 struct eeh_dev *edev, *tmp_edev; 1126 struct pci_bus *bus; 1127 struct pci_controller *hose; 1128 unsigned long flags; 1129 int rc; 1130 1131 1132 do { 1133 rc = eeh_ops->next_error(&pe); 1134 1135 switch (rc) { 1136 case EEH_NEXT_ERR_DEAD_IOC: 1137 /* Mark all PHBs in dead state */ 1138 eeh_serialize_lock(&flags); 1139 1140 /* Purge all events */ 1141 eeh_remove_event(NULL, true); 1142 1143 list_for_each_entry(hose, &hose_list, list_node) { 1144 phb_pe = eeh_phb_pe_get(hose); 1145 if (!phb_pe) continue; 1146 1147 eeh_pe_mark_isolated(phb_pe); 1148 } 1149 1150 eeh_serialize_unlock(flags); 1151 1152 break; 1153 case EEH_NEXT_ERR_FROZEN_PE: 1154 case EEH_NEXT_ERR_FENCED_PHB: 1155 case EEH_NEXT_ERR_DEAD_PHB: 1156 /* Mark the PE in fenced state */ 1157 eeh_serialize_lock(&flags); 1158 1159 /* Purge all events of the PHB */ 1160 eeh_remove_event(pe, true); 1161 1162 if (rc != EEH_NEXT_ERR_DEAD_PHB) 1163 eeh_pe_state_mark(pe, EEH_PE_RECOVERING); 1164 eeh_pe_mark_isolated(pe); 1165 1166 eeh_serialize_unlock(flags); 1167 1168 break; 1169 case EEH_NEXT_ERR_NONE: 1170 return; 1171 default: 1172 pr_warn("%s: Invalid value %d from next_error()\n", 1173 __func__, rc); 1174 return; 1175 } 1176 1177 /* 1178 * For fenced PHB and frozen PE, it's handled as normal 1179 * event. We have to remove the affected PHBs for dead 1180 * PHB and IOC 1181 */ 1182 if (rc == EEH_NEXT_ERR_FROZEN_PE || 1183 rc == EEH_NEXT_ERR_FENCED_PHB) { 1184 eeh_pe_state_mark(pe, EEH_PE_RECOVERING); 1185 eeh_handle_normal_event(pe); 1186 } else { 1187 pci_lock_rescan_remove(); 1188 list_for_each_entry(hose, &hose_list, list_node) { 1189 phb_pe = eeh_phb_pe_get(hose); 1190 if (!phb_pe || 1191 !(phb_pe->state & EEH_PE_ISOLATED) || 1192 (phb_pe->state & EEH_PE_RECOVERING)) 1193 continue; 1194 1195 eeh_for_each_pe(pe, tmp_pe) 1196 eeh_pe_for_each_dev(tmp_pe, edev, tmp_edev) 1197 edev->mode &= ~EEH_DEV_NO_HANDLER; 1198 1199 /* Notify all devices to be down */ 1200 eeh_pe_state_clear(pe, EEH_PE_PRI_BUS, true); 1201 eeh_set_channel_state(pe, pci_channel_io_perm_failure); 1202 eeh_pe_report( 1203 "error_detected(permanent failure)", pe, 1204 eeh_report_failure, NULL); 1205 bus = eeh_pe_bus_get(phb_pe); 1206 if (!bus) { 1207 pr_err("%s: Cannot find PCI bus for " 1208 "PHB#%x-PE#%x\n", 1209 __func__, 1210 pe->phb->global_number, 1211 pe->addr); 1212 break; 1213 } 1214 pci_hp_remove_devices(bus); 1215 } 1216 pci_unlock_rescan_remove(); 1217 } 1218 1219 /* 1220 * If we have detected dead IOC, we needn't proceed 1221 * any more since all PHBs would have been removed 1222 */ 1223 if (rc == EEH_NEXT_ERR_DEAD_IOC) 1224 break; 1225 } while (rc != EEH_NEXT_ERR_NONE); 1226 } 1227