1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * PowerNV Platform dependent EEH operations 4 * 5 * Copyright Benjamin Herrenschmidt & Gavin Shan, IBM Corporation 2013. 6 */ 7 8 #include <linux/atomic.h> 9 #include <linux/debugfs.h> 10 #include <linux/delay.h> 11 #include <linux/export.h> 12 #include <linux/init.h> 13 #include <linux/interrupt.h> 14 #include <linux/list.h> 15 #include <linux/msi.h> 16 #include <linux/of.h> 17 #include <linux/pci.h> 18 #include <linux/proc_fs.h> 19 #include <linux/rbtree.h> 20 #include <linux/sched.h> 21 #include <linux/seq_file.h> 22 #include <linux/spinlock.h> 23 24 #include <asm/eeh.h> 25 #include <asm/eeh_event.h> 26 #include <asm/firmware.h> 27 #include <asm/io.h> 28 #include <asm/iommu.h> 29 #include <asm/machdep.h> 30 #include <asm/msi_bitmap.h> 31 #include <asm/opal.h> 32 #include <asm/ppc-pci.h> 33 #include <asm/pnv-pci.h> 34 35 #include "powernv.h" 36 #include "pci.h" 37 #include "../../../../drivers/pci/pci.h" 38 39 static int eeh_event_irq = -EINVAL; 40 41 void pnv_pcibios_bus_add_device(struct pci_dev *pdev) 42 { 43 dev_dbg(&pdev->dev, "EEH: Setting up device\n"); 44 eeh_probe_device(pdev); 45 } 46 47 static int pnv_eeh_init(void) 48 { 49 struct pci_controller *hose; 50 struct pnv_phb *phb; 51 int max_diag_size = PNV_PCI_DIAG_BUF_SIZE; 52 53 if (!firmware_has_feature(FW_FEATURE_OPAL)) { 54 pr_warn("%s: OPAL is required !\n", 55 __func__); 56 return -EINVAL; 57 } 58 59 /* Set probe mode */ 60 eeh_add_flag(EEH_PROBE_MODE_DEV); 61 62 /* 63 * P7IOC blocks PCI config access to frozen PE, but PHB3 64 * doesn't do that. So we have to selectively enable I/O 65 * prior to collecting error log. 66 */ 67 list_for_each_entry(hose, &hose_list, list_node) { 68 phb = hose->private_data; 69 70 if (phb->model == PNV_PHB_MODEL_P7IOC) 71 eeh_add_flag(EEH_ENABLE_IO_FOR_LOG); 72 73 if (phb->diag_data_size > max_diag_size) 74 max_diag_size = phb->diag_data_size; 75 76 /* 77 * PE#0 should be regarded as valid by EEH core 78 * if it's not the reserved one. Currently, we 79 * have the reserved PE#255 and PE#127 for PHB3 80 * and P7IOC separately. So we should regard 81 * PE#0 as valid for PHB3 and P7IOC. 82 */ 83 if (phb->ioda.reserved_pe_idx != 0) 84 eeh_add_flag(EEH_VALID_PE_ZERO); 85 86 break; 87 } 88 89 eeh_set_pe_aux_size(max_diag_size); 90 ppc_md.pcibios_bus_add_device = pnv_pcibios_bus_add_device; 91 92 return 0; 93 } 94 95 static irqreturn_t pnv_eeh_event(int irq, void *data) 96 { 97 /* 98 * We simply send a special EEH event if EEH has been 99 * enabled. We don't care about EEH events until we've 100 * finished processing the outstanding ones. Event processing 101 * gets unmasked in next_error() if EEH is enabled. 102 */ 103 disable_irq_nosync(irq); 104 105 if (eeh_enabled()) 106 eeh_send_failure_event(NULL); 107 108 return IRQ_HANDLED; 109 } 110 111 #ifdef CONFIG_DEBUG_FS 112 static ssize_t pnv_eeh_ei_write(struct file *filp, 113 const char __user *user_buf, 114 size_t count, loff_t *ppos) 115 { 116 struct pci_controller *hose = filp->private_data; 117 struct eeh_pe *pe; 118 int pe_no, type, func; 119 unsigned long addr, mask; 120 char buf[50]; 121 int ret; 122 123 if (!eeh_ops || !eeh_ops->err_inject) 124 return -ENXIO; 125 126 /* Copy over argument buffer */ 127 ret = simple_write_to_buffer(buf, sizeof(buf), ppos, user_buf, count); 128 if (!ret) 129 return -EFAULT; 130 131 /* Retrieve parameters */ 132 ret = sscanf(buf, "%x:%x:%x:%lx:%lx", 133 &pe_no, &type, &func, &addr, &mask); 134 if (ret != 5) 135 return -EINVAL; 136 137 /* Retrieve PE */ 138 pe = eeh_pe_get(hose, pe_no, 0); 139 if (!pe) 140 return -ENODEV; 141 142 /* Do error injection */ 143 ret = eeh_ops->err_inject(pe, type, func, addr, mask); 144 return ret < 0 ? ret : count; 145 } 146 147 static const struct file_operations pnv_eeh_ei_fops = { 148 .open = simple_open, 149 .llseek = no_llseek, 150 .write = pnv_eeh_ei_write, 151 }; 152 153 static int pnv_eeh_dbgfs_set(void *data, int offset, u64 val) 154 { 155 struct pci_controller *hose = data; 156 struct pnv_phb *phb = hose->private_data; 157 158 out_be64(phb->regs + offset, val); 159 return 0; 160 } 161 162 static int pnv_eeh_dbgfs_get(void *data, int offset, u64 *val) 163 { 164 struct pci_controller *hose = data; 165 struct pnv_phb *phb = hose->private_data; 166 167 *val = in_be64(phb->regs + offset); 168 return 0; 169 } 170 171 #define PNV_EEH_DBGFS_ENTRY(name, reg) \ 172 static int pnv_eeh_dbgfs_set_##name(void *data, u64 val) \ 173 { \ 174 return pnv_eeh_dbgfs_set(data, reg, val); \ 175 } \ 176 \ 177 static int pnv_eeh_dbgfs_get_##name(void *data, u64 *val) \ 178 { \ 179 return pnv_eeh_dbgfs_get(data, reg, val); \ 180 } \ 181 \ 182 DEFINE_SIMPLE_ATTRIBUTE(pnv_eeh_dbgfs_ops_##name, \ 183 pnv_eeh_dbgfs_get_##name, \ 184 pnv_eeh_dbgfs_set_##name, \ 185 "0x%llx\n") 186 187 PNV_EEH_DBGFS_ENTRY(outb, 0xD10); 188 PNV_EEH_DBGFS_ENTRY(inbA, 0xD90); 189 PNV_EEH_DBGFS_ENTRY(inbB, 0xE10); 190 191 #endif /* CONFIG_DEBUG_FS */ 192 193 void pnv_eeh_enable_phbs(void) 194 { 195 struct pci_controller *hose; 196 struct pnv_phb *phb; 197 198 list_for_each_entry(hose, &hose_list, list_node) { 199 phb = hose->private_data; 200 /* 201 * If EEH is enabled, we're going to rely on that. 202 * Otherwise, we restore to conventional mechanism 203 * to clear frozen PE during PCI config access. 204 */ 205 if (eeh_enabled()) 206 phb->flags |= PNV_PHB_FLAG_EEH; 207 else 208 phb->flags &= ~PNV_PHB_FLAG_EEH; 209 } 210 } 211 212 /** 213 * pnv_eeh_post_init - EEH platform dependent post initialization 214 * 215 * EEH platform dependent post initialization on powernv. When 216 * the function is called, the EEH PEs and devices should have 217 * been built. If the I/O cache staff has been built, EEH is 218 * ready to supply service. 219 */ 220 int pnv_eeh_post_init(void) 221 { 222 struct pci_controller *hose; 223 struct pnv_phb *phb; 224 int ret = 0; 225 226 eeh_show_enabled(); 227 228 /* Register OPAL event notifier */ 229 eeh_event_irq = opal_event_request(ilog2(OPAL_EVENT_PCI_ERROR)); 230 if (eeh_event_irq < 0) { 231 pr_err("%s: Can't register OPAL event interrupt (%d)\n", 232 __func__, eeh_event_irq); 233 return eeh_event_irq; 234 } 235 236 ret = request_irq(eeh_event_irq, pnv_eeh_event, 237 IRQ_TYPE_LEVEL_HIGH, "opal-eeh", NULL); 238 if (ret < 0) { 239 irq_dispose_mapping(eeh_event_irq); 240 pr_err("%s: Can't request OPAL event interrupt (%d)\n", 241 __func__, eeh_event_irq); 242 return ret; 243 } 244 245 if (!eeh_enabled()) 246 disable_irq(eeh_event_irq); 247 248 pnv_eeh_enable_phbs(); 249 250 list_for_each_entry(hose, &hose_list, list_node) { 251 phb = hose->private_data; 252 253 /* Create debugfs entries */ 254 #ifdef CONFIG_DEBUG_FS 255 if (phb->has_dbgfs || !phb->dbgfs) 256 continue; 257 258 phb->has_dbgfs = 1; 259 debugfs_create_file("err_injct", 0200, 260 phb->dbgfs, hose, 261 &pnv_eeh_ei_fops); 262 263 debugfs_create_file("err_injct_outbound", 0600, 264 phb->dbgfs, hose, 265 &pnv_eeh_dbgfs_ops_outb); 266 debugfs_create_file("err_injct_inboundA", 0600, 267 phb->dbgfs, hose, 268 &pnv_eeh_dbgfs_ops_inbA); 269 debugfs_create_file("err_injct_inboundB", 0600, 270 phb->dbgfs, hose, 271 &pnv_eeh_dbgfs_ops_inbB); 272 #endif /* CONFIG_DEBUG_FS */ 273 } 274 275 return ret; 276 } 277 278 static int pnv_eeh_find_cap(struct pci_dn *pdn, int cap) 279 { 280 int pos = PCI_CAPABILITY_LIST; 281 int cnt = 48; /* Maximal number of capabilities */ 282 u32 status, id; 283 284 if (!pdn) 285 return 0; 286 287 /* Check if the device supports capabilities */ 288 pnv_pci_cfg_read(pdn, PCI_STATUS, 2, &status); 289 if (!(status & PCI_STATUS_CAP_LIST)) 290 return 0; 291 292 while (cnt--) { 293 pnv_pci_cfg_read(pdn, pos, 1, &pos); 294 if (pos < 0x40) 295 break; 296 297 pos &= ~3; 298 pnv_pci_cfg_read(pdn, pos + PCI_CAP_LIST_ID, 1, &id); 299 if (id == 0xff) 300 break; 301 302 /* Found */ 303 if (id == cap) 304 return pos; 305 306 /* Next one */ 307 pos += PCI_CAP_LIST_NEXT; 308 } 309 310 return 0; 311 } 312 313 static int pnv_eeh_find_ecap(struct pci_dn *pdn, int cap) 314 { 315 struct eeh_dev *edev = pdn_to_eeh_dev(pdn); 316 u32 header; 317 int pos = 256, ttl = (4096 - 256) / 8; 318 319 if (!edev || !edev->pcie_cap) 320 return 0; 321 if (pnv_pci_cfg_read(pdn, pos, 4, &header) != PCIBIOS_SUCCESSFUL) 322 return 0; 323 else if (!header) 324 return 0; 325 326 while (ttl-- > 0) { 327 if (PCI_EXT_CAP_ID(header) == cap && pos) 328 return pos; 329 330 pos = PCI_EXT_CAP_NEXT(header); 331 if (pos < 256) 332 break; 333 334 if (pnv_pci_cfg_read(pdn, pos, 4, &header) != PCIBIOS_SUCCESSFUL) 335 break; 336 } 337 338 return 0; 339 } 340 341 /** 342 * pnv_eeh_probe - Do probe on PCI device 343 * @pdev: pci_dev to probe 344 * 345 * Create, or find the existing, eeh_dev for this pci_dev. 346 */ 347 static struct eeh_dev *pnv_eeh_probe(struct pci_dev *pdev) 348 { 349 struct pci_dn *pdn = pci_get_pdn(pdev); 350 struct pci_controller *hose = pdn->phb; 351 struct pnv_phb *phb = hose->private_data; 352 struct eeh_dev *edev = pdn_to_eeh_dev(pdn); 353 uint32_t pcie_flags; 354 int ret; 355 int config_addr = (pdn->busno << 8) | (pdn->devfn); 356 357 /* 358 * When probing the root bridge, which doesn't have any 359 * subordinate PCI devices. We don't have OF node for 360 * the root bridge. So it's not reasonable to continue 361 * the probing. 362 */ 363 if (!edev || edev->pe) 364 return NULL; 365 366 /* already configured? */ 367 if (edev->pdev) { 368 pr_debug("%s: found existing edev for %04x:%02x:%02x.%01x\n", 369 __func__, hose->global_number, config_addr >> 8, 370 PCI_SLOT(config_addr), PCI_FUNC(config_addr)); 371 return edev; 372 } 373 374 /* Skip for PCI-ISA bridge */ 375 if ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA) 376 return NULL; 377 378 eeh_edev_dbg(edev, "Probing device\n"); 379 380 /* Initialize eeh device */ 381 edev->mode &= 0xFFFFFF00; 382 edev->pcix_cap = pnv_eeh_find_cap(pdn, PCI_CAP_ID_PCIX); 383 edev->pcie_cap = pnv_eeh_find_cap(pdn, PCI_CAP_ID_EXP); 384 edev->af_cap = pnv_eeh_find_cap(pdn, PCI_CAP_ID_AF); 385 edev->aer_cap = pnv_eeh_find_ecap(pdn, PCI_EXT_CAP_ID_ERR); 386 if ((pdev->class >> 8) == PCI_CLASS_BRIDGE_PCI) { 387 edev->mode |= EEH_DEV_BRIDGE; 388 if (edev->pcie_cap) { 389 pnv_pci_cfg_read(pdn, edev->pcie_cap + PCI_EXP_FLAGS, 390 2, &pcie_flags); 391 pcie_flags = (pcie_flags & PCI_EXP_FLAGS_TYPE) >> 4; 392 if (pcie_flags == PCI_EXP_TYPE_ROOT_PORT) 393 edev->mode |= EEH_DEV_ROOT_PORT; 394 else if (pcie_flags == PCI_EXP_TYPE_DOWNSTREAM) 395 edev->mode |= EEH_DEV_DS_PORT; 396 } 397 } 398 399 edev->pe_config_addr = phb->ioda.pe_rmap[config_addr]; 400 401 /* Create PE */ 402 ret = eeh_pe_tree_insert(edev); 403 if (ret) { 404 eeh_edev_warn(edev, "Failed to add device to PE (code %d)\n", ret); 405 return NULL; 406 } 407 408 /* 409 * If the PE contains any one of following adapters, the 410 * PCI config space can't be accessed when dumping EEH log. 411 * Otherwise, we will run into fenced PHB caused by shortage 412 * of outbound credits in the adapter. The PCI config access 413 * should be blocked until PE reset. MMIO access is dropped 414 * by hardware certainly. In order to drop PCI config requests, 415 * one more flag (EEH_PE_CFG_RESTRICTED) is introduced, which 416 * will be checked in the backend for PE state retrival. If 417 * the PE becomes frozen for the first time and the flag has 418 * been set for the PE, we will set EEH_PE_CFG_BLOCKED for 419 * that PE to block its config space. 420 * 421 * Broadcom BCM5718 2-ports NICs (14e4:1656) 422 * Broadcom Austin 4-ports NICs (14e4:1657) 423 * Broadcom Shiner 4-ports 1G NICs (14e4:168a) 424 * Broadcom Shiner 2-ports 10G NICs (14e4:168e) 425 */ 426 if ((pdn->vendor_id == PCI_VENDOR_ID_BROADCOM && 427 pdn->device_id == 0x1656) || 428 (pdn->vendor_id == PCI_VENDOR_ID_BROADCOM && 429 pdn->device_id == 0x1657) || 430 (pdn->vendor_id == PCI_VENDOR_ID_BROADCOM && 431 pdn->device_id == 0x168a) || 432 (pdn->vendor_id == PCI_VENDOR_ID_BROADCOM && 433 pdn->device_id == 0x168e)) 434 edev->pe->state |= EEH_PE_CFG_RESTRICTED; 435 436 /* 437 * Cache the PE primary bus, which can't be fetched when 438 * full hotplug is in progress. In that case, all child 439 * PCI devices of the PE are expected to be removed prior 440 * to PE reset. 441 */ 442 if (!(edev->pe->state & EEH_PE_PRI_BUS)) { 443 edev->pe->bus = pci_find_bus(hose->global_number, 444 pdn->busno); 445 if (edev->pe->bus) 446 edev->pe->state |= EEH_PE_PRI_BUS; 447 } 448 449 /* 450 * Enable EEH explicitly so that we will do EEH check 451 * while accessing I/O stuff 452 */ 453 if (!eeh_has_flag(EEH_ENABLED)) { 454 enable_irq(eeh_event_irq); 455 pnv_eeh_enable_phbs(); 456 eeh_add_flag(EEH_ENABLED); 457 } 458 459 /* Save memory bars */ 460 eeh_save_bars(edev); 461 462 eeh_edev_dbg(edev, "EEH enabled on device\n"); 463 464 return edev; 465 } 466 467 /** 468 * pnv_eeh_set_option - Initialize EEH or MMIO/DMA reenable 469 * @pe: EEH PE 470 * @option: operation to be issued 471 * 472 * The function is used to control the EEH functionality globally. 473 * Currently, following options are support according to PAPR: 474 * Enable EEH, Disable EEH, Enable MMIO and Enable DMA 475 */ 476 static int pnv_eeh_set_option(struct eeh_pe *pe, int option) 477 { 478 struct pci_controller *hose = pe->phb; 479 struct pnv_phb *phb = hose->private_data; 480 bool freeze_pe = false; 481 int opt; 482 s64 rc; 483 484 switch (option) { 485 case EEH_OPT_DISABLE: 486 return -EPERM; 487 case EEH_OPT_ENABLE: 488 return 0; 489 case EEH_OPT_THAW_MMIO: 490 opt = OPAL_EEH_ACTION_CLEAR_FREEZE_MMIO; 491 break; 492 case EEH_OPT_THAW_DMA: 493 opt = OPAL_EEH_ACTION_CLEAR_FREEZE_DMA; 494 break; 495 case EEH_OPT_FREEZE_PE: 496 freeze_pe = true; 497 opt = OPAL_EEH_ACTION_SET_FREEZE_ALL; 498 break; 499 default: 500 pr_warn("%s: Invalid option %d\n", __func__, option); 501 return -EINVAL; 502 } 503 504 /* Freeze master and slave PEs if PHB supports compound PEs */ 505 if (freeze_pe) { 506 if (phb->freeze_pe) { 507 phb->freeze_pe(phb, pe->addr); 508 return 0; 509 } 510 511 rc = opal_pci_eeh_freeze_set(phb->opal_id, pe->addr, opt); 512 if (rc != OPAL_SUCCESS) { 513 pr_warn("%s: Failure %lld freezing PHB#%x-PE#%x\n", 514 __func__, rc, phb->hose->global_number, 515 pe->addr); 516 return -EIO; 517 } 518 519 return 0; 520 } 521 522 /* Unfreeze master and slave PEs if PHB supports */ 523 if (phb->unfreeze_pe) 524 return phb->unfreeze_pe(phb, pe->addr, opt); 525 526 rc = opal_pci_eeh_freeze_clear(phb->opal_id, pe->addr, opt); 527 if (rc != OPAL_SUCCESS) { 528 pr_warn("%s: Failure %lld enable %d for PHB#%x-PE#%x\n", 529 __func__, rc, option, phb->hose->global_number, 530 pe->addr); 531 return -EIO; 532 } 533 534 return 0; 535 } 536 537 static void pnv_eeh_get_phb_diag(struct eeh_pe *pe) 538 { 539 struct pnv_phb *phb = pe->phb->private_data; 540 s64 rc; 541 542 rc = opal_pci_get_phb_diag_data2(phb->opal_id, pe->data, 543 phb->diag_data_size); 544 if (rc != OPAL_SUCCESS) 545 pr_warn("%s: Failure %lld getting PHB#%x diag-data\n", 546 __func__, rc, pe->phb->global_number); 547 } 548 549 static int pnv_eeh_get_phb_state(struct eeh_pe *pe) 550 { 551 struct pnv_phb *phb = pe->phb->private_data; 552 u8 fstate = 0; 553 __be16 pcierr = 0; 554 s64 rc; 555 int result = 0; 556 557 rc = opal_pci_eeh_freeze_status(phb->opal_id, 558 pe->addr, 559 &fstate, 560 &pcierr, 561 NULL); 562 if (rc != OPAL_SUCCESS) { 563 pr_warn("%s: Failure %lld getting PHB#%x state\n", 564 __func__, rc, phb->hose->global_number); 565 return EEH_STATE_NOT_SUPPORT; 566 } 567 568 /* 569 * Check PHB state. If the PHB is frozen for the 570 * first time, to dump the PHB diag-data. 571 */ 572 if (be16_to_cpu(pcierr) != OPAL_EEH_PHB_ERROR) { 573 result = (EEH_STATE_MMIO_ACTIVE | 574 EEH_STATE_DMA_ACTIVE | 575 EEH_STATE_MMIO_ENABLED | 576 EEH_STATE_DMA_ENABLED); 577 } else if (!(pe->state & EEH_PE_ISOLATED)) { 578 eeh_pe_mark_isolated(pe); 579 pnv_eeh_get_phb_diag(pe); 580 581 if (eeh_has_flag(EEH_EARLY_DUMP_LOG)) 582 pnv_pci_dump_phb_diag_data(pe->phb, pe->data); 583 } 584 585 return result; 586 } 587 588 static int pnv_eeh_get_pe_state(struct eeh_pe *pe) 589 { 590 struct pnv_phb *phb = pe->phb->private_data; 591 u8 fstate = 0; 592 __be16 pcierr = 0; 593 s64 rc; 594 int result; 595 596 /* 597 * We don't clobber hardware frozen state until PE 598 * reset is completed. In order to keep EEH core 599 * moving forward, we have to return operational 600 * state during PE reset. 601 */ 602 if (pe->state & EEH_PE_RESET) { 603 result = (EEH_STATE_MMIO_ACTIVE | 604 EEH_STATE_DMA_ACTIVE | 605 EEH_STATE_MMIO_ENABLED | 606 EEH_STATE_DMA_ENABLED); 607 return result; 608 } 609 610 /* 611 * Fetch PE state from hardware. If the PHB 612 * supports compound PE, let it handle that. 613 */ 614 if (phb->get_pe_state) { 615 fstate = phb->get_pe_state(phb, pe->addr); 616 } else { 617 rc = opal_pci_eeh_freeze_status(phb->opal_id, 618 pe->addr, 619 &fstate, 620 &pcierr, 621 NULL); 622 if (rc != OPAL_SUCCESS) { 623 pr_warn("%s: Failure %lld getting PHB#%x-PE%x state\n", 624 __func__, rc, phb->hose->global_number, 625 pe->addr); 626 return EEH_STATE_NOT_SUPPORT; 627 } 628 } 629 630 /* Figure out state */ 631 switch (fstate) { 632 case OPAL_EEH_STOPPED_NOT_FROZEN: 633 result = (EEH_STATE_MMIO_ACTIVE | 634 EEH_STATE_DMA_ACTIVE | 635 EEH_STATE_MMIO_ENABLED | 636 EEH_STATE_DMA_ENABLED); 637 break; 638 case OPAL_EEH_STOPPED_MMIO_FREEZE: 639 result = (EEH_STATE_DMA_ACTIVE | 640 EEH_STATE_DMA_ENABLED); 641 break; 642 case OPAL_EEH_STOPPED_DMA_FREEZE: 643 result = (EEH_STATE_MMIO_ACTIVE | 644 EEH_STATE_MMIO_ENABLED); 645 break; 646 case OPAL_EEH_STOPPED_MMIO_DMA_FREEZE: 647 result = 0; 648 break; 649 case OPAL_EEH_STOPPED_RESET: 650 result = EEH_STATE_RESET_ACTIVE; 651 break; 652 case OPAL_EEH_STOPPED_TEMP_UNAVAIL: 653 result = EEH_STATE_UNAVAILABLE; 654 break; 655 case OPAL_EEH_STOPPED_PERM_UNAVAIL: 656 result = EEH_STATE_NOT_SUPPORT; 657 break; 658 default: 659 result = EEH_STATE_NOT_SUPPORT; 660 pr_warn("%s: Invalid PHB#%x-PE#%x state %x\n", 661 __func__, phb->hose->global_number, 662 pe->addr, fstate); 663 } 664 665 /* 666 * If PHB supports compound PE, to freeze all 667 * slave PEs for consistency. 668 * 669 * If the PE is switching to frozen state for the 670 * first time, to dump the PHB diag-data. 671 */ 672 if (!(result & EEH_STATE_NOT_SUPPORT) && 673 !(result & EEH_STATE_UNAVAILABLE) && 674 !(result & EEH_STATE_MMIO_ACTIVE) && 675 !(result & EEH_STATE_DMA_ACTIVE) && 676 !(pe->state & EEH_PE_ISOLATED)) { 677 if (phb->freeze_pe) 678 phb->freeze_pe(phb, pe->addr); 679 680 eeh_pe_mark_isolated(pe); 681 pnv_eeh_get_phb_diag(pe); 682 683 if (eeh_has_flag(EEH_EARLY_DUMP_LOG)) 684 pnv_pci_dump_phb_diag_data(pe->phb, pe->data); 685 } 686 687 return result; 688 } 689 690 /** 691 * pnv_eeh_get_state - Retrieve PE state 692 * @pe: EEH PE 693 * @delay: delay while PE state is temporarily unavailable 694 * 695 * Retrieve the state of the specified PE. For IODA-compitable 696 * platform, it should be retrieved from IODA table. Therefore, 697 * we prefer passing down to hardware implementation to handle 698 * it. 699 */ 700 static int pnv_eeh_get_state(struct eeh_pe *pe, int *delay) 701 { 702 int ret; 703 704 if (pe->type & EEH_PE_PHB) 705 ret = pnv_eeh_get_phb_state(pe); 706 else 707 ret = pnv_eeh_get_pe_state(pe); 708 709 if (!delay) 710 return ret; 711 712 /* 713 * If the PE state is temporarily unavailable, 714 * to inform the EEH core delay for default 715 * period (1 second) 716 */ 717 *delay = 0; 718 if (ret & EEH_STATE_UNAVAILABLE) 719 *delay = 1000; 720 721 return ret; 722 } 723 724 static s64 pnv_eeh_poll(unsigned long id) 725 { 726 s64 rc = OPAL_HARDWARE; 727 728 while (1) { 729 rc = opal_pci_poll(id); 730 if (rc <= 0) 731 break; 732 733 if (system_state < SYSTEM_RUNNING) 734 udelay(1000 * rc); 735 else 736 msleep(rc); 737 } 738 739 return rc; 740 } 741 742 int pnv_eeh_phb_reset(struct pci_controller *hose, int option) 743 { 744 struct pnv_phb *phb = hose->private_data; 745 s64 rc = OPAL_HARDWARE; 746 747 pr_debug("%s: Reset PHB#%x, option=%d\n", 748 __func__, hose->global_number, option); 749 750 /* Issue PHB complete reset request */ 751 if (option == EEH_RESET_FUNDAMENTAL || 752 option == EEH_RESET_HOT) 753 rc = opal_pci_reset(phb->opal_id, 754 OPAL_RESET_PHB_COMPLETE, 755 OPAL_ASSERT_RESET); 756 else if (option == EEH_RESET_DEACTIVATE) 757 rc = opal_pci_reset(phb->opal_id, 758 OPAL_RESET_PHB_COMPLETE, 759 OPAL_DEASSERT_RESET); 760 if (rc < 0) 761 goto out; 762 763 /* 764 * Poll state of the PHB until the request is done 765 * successfully. The PHB reset is usually PHB complete 766 * reset followed by hot reset on root bus. So we also 767 * need the PCI bus settlement delay. 768 */ 769 if (rc > 0) 770 rc = pnv_eeh_poll(phb->opal_id); 771 if (option == EEH_RESET_DEACTIVATE) { 772 if (system_state < SYSTEM_RUNNING) 773 udelay(1000 * EEH_PE_RST_SETTLE_TIME); 774 else 775 msleep(EEH_PE_RST_SETTLE_TIME); 776 } 777 out: 778 if (rc != OPAL_SUCCESS) 779 return -EIO; 780 781 return 0; 782 } 783 784 static int pnv_eeh_root_reset(struct pci_controller *hose, int option) 785 { 786 struct pnv_phb *phb = hose->private_data; 787 s64 rc = OPAL_HARDWARE; 788 789 pr_debug("%s: Reset PHB#%x, option=%d\n", 790 __func__, hose->global_number, option); 791 792 /* 793 * During the reset deassert time, we needn't care 794 * the reset scope because the firmware does nothing 795 * for fundamental or hot reset during deassert phase. 796 */ 797 if (option == EEH_RESET_FUNDAMENTAL) 798 rc = opal_pci_reset(phb->opal_id, 799 OPAL_RESET_PCI_FUNDAMENTAL, 800 OPAL_ASSERT_RESET); 801 else if (option == EEH_RESET_HOT) 802 rc = opal_pci_reset(phb->opal_id, 803 OPAL_RESET_PCI_HOT, 804 OPAL_ASSERT_RESET); 805 else if (option == EEH_RESET_DEACTIVATE) 806 rc = opal_pci_reset(phb->opal_id, 807 OPAL_RESET_PCI_HOT, 808 OPAL_DEASSERT_RESET); 809 if (rc < 0) 810 goto out; 811 812 /* Poll state of the PHB until the request is done */ 813 if (rc > 0) 814 rc = pnv_eeh_poll(phb->opal_id); 815 if (option == EEH_RESET_DEACTIVATE) 816 msleep(EEH_PE_RST_SETTLE_TIME); 817 out: 818 if (rc != OPAL_SUCCESS) 819 return -EIO; 820 821 return 0; 822 } 823 824 static int __pnv_eeh_bridge_reset(struct pci_dev *dev, int option) 825 { 826 struct pci_dn *pdn = pci_get_pdn_by_devfn(dev->bus, dev->devfn); 827 struct eeh_dev *edev = pdn_to_eeh_dev(pdn); 828 int aer = edev ? edev->aer_cap : 0; 829 u32 ctrl; 830 831 pr_debug("%s: Secondary Reset PCI bus %04x:%02x with option %d\n", 832 __func__, pci_domain_nr(dev->bus), 833 dev->bus->number, option); 834 835 switch (option) { 836 case EEH_RESET_FUNDAMENTAL: 837 case EEH_RESET_HOT: 838 /* Don't report linkDown event */ 839 if (aer) { 840 eeh_ops->read_config(edev, aer + PCI_ERR_UNCOR_MASK, 841 4, &ctrl); 842 ctrl |= PCI_ERR_UNC_SURPDN; 843 eeh_ops->write_config(edev, aer + PCI_ERR_UNCOR_MASK, 844 4, ctrl); 845 } 846 847 eeh_ops->read_config(edev, PCI_BRIDGE_CONTROL, 2, &ctrl); 848 ctrl |= PCI_BRIDGE_CTL_BUS_RESET; 849 eeh_ops->write_config(edev, PCI_BRIDGE_CONTROL, 2, ctrl); 850 851 msleep(EEH_PE_RST_HOLD_TIME); 852 break; 853 case EEH_RESET_DEACTIVATE: 854 eeh_ops->read_config(edev, PCI_BRIDGE_CONTROL, 2, &ctrl); 855 ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET; 856 eeh_ops->write_config(edev, PCI_BRIDGE_CONTROL, 2, ctrl); 857 858 msleep(EEH_PE_RST_SETTLE_TIME); 859 860 /* Continue reporting linkDown event */ 861 if (aer) { 862 eeh_ops->read_config(edev, aer + PCI_ERR_UNCOR_MASK, 863 4, &ctrl); 864 ctrl &= ~PCI_ERR_UNC_SURPDN; 865 eeh_ops->write_config(edev, aer + PCI_ERR_UNCOR_MASK, 866 4, ctrl); 867 } 868 869 break; 870 } 871 872 return 0; 873 } 874 875 static int pnv_eeh_bridge_reset(struct pci_dev *pdev, int option) 876 { 877 struct pci_controller *hose = pci_bus_to_host(pdev->bus); 878 struct pnv_phb *phb = hose->private_data; 879 struct device_node *dn = pci_device_to_OF_node(pdev); 880 uint64_t id = PCI_SLOT_ID(phb->opal_id, 881 (pdev->bus->number << 8) | pdev->devfn); 882 uint8_t scope; 883 int64_t rc; 884 885 /* Hot reset to the bus if firmware cannot handle */ 886 if (!dn || !of_get_property(dn, "ibm,reset-by-firmware", NULL)) 887 return __pnv_eeh_bridge_reset(pdev, option); 888 889 pr_debug("%s: FW reset PCI bus %04x:%02x with option %d\n", 890 __func__, pci_domain_nr(pdev->bus), 891 pdev->bus->number, option); 892 893 switch (option) { 894 case EEH_RESET_FUNDAMENTAL: 895 scope = OPAL_RESET_PCI_FUNDAMENTAL; 896 break; 897 case EEH_RESET_HOT: 898 scope = OPAL_RESET_PCI_HOT; 899 break; 900 case EEH_RESET_DEACTIVATE: 901 return 0; 902 default: 903 dev_dbg(&pdev->dev, "%s: Unsupported reset %d\n", 904 __func__, option); 905 return -EINVAL; 906 } 907 908 rc = opal_pci_reset(id, scope, OPAL_ASSERT_RESET); 909 if (rc <= OPAL_SUCCESS) 910 goto out; 911 912 rc = pnv_eeh_poll(id); 913 out: 914 return (rc == OPAL_SUCCESS) ? 0 : -EIO; 915 } 916 917 void pnv_pci_reset_secondary_bus(struct pci_dev *dev) 918 { 919 struct pci_controller *hose; 920 921 if (pci_is_root_bus(dev->bus)) { 922 hose = pci_bus_to_host(dev->bus); 923 pnv_eeh_root_reset(hose, EEH_RESET_HOT); 924 pnv_eeh_root_reset(hose, EEH_RESET_DEACTIVATE); 925 } else { 926 pnv_eeh_bridge_reset(dev, EEH_RESET_HOT); 927 pnv_eeh_bridge_reset(dev, EEH_RESET_DEACTIVATE); 928 } 929 } 930 931 static void pnv_eeh_wait_for_pending(struct pci_dn *pdn, const char *type, 932 int pos, u16 mask) 933 { 934 struct eeh_dev *edev = pdn->edev; 935 int i, status = 0; 936 937 /* Wait for Transaction Pending bit to be cleared */ 938 for (i = 0; i < 4; i++) { 939 eeh_ops->read_config(edev, pos, 2, &status); 940 if (!(status & mask)) 941 return; 942 943 msleep((1 << i) * 100); 944 } 945 946 pr_warn("%s: Pending transaction while issuing %sFLR to %04x:%02x:%02x.%01x\n", 947 __func__, type, 948 pdn->phb->global_number, pdn->busno, 949 PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn)); 950 } 951 952 static int pnv_eeh_do_flr(struct pci_dn *pdn, int option) 953 { 954 struct eeh_dev *edev = pdn_to_eeh_dev(pdn); 955 u32 reg = 0; 956 957 if (WARN_ON(!edev->pcie_cap)) 958 return -ENOTTY; 959 960 eeh_ops->read_config(edev, edev->pcie_cap + PCI_EXP_DEVCAP, 4, ®); 961 if (!(reg & PCI_EXP_DEVCAP_FLR)) 962 return -ENOTTY; 963 964 switch (option) { 965 case EEH_RESET_HOT: 966 case EEH_RESET_FUNDAMENTAL: 967 pnv_eeh_wait_for_pending(pdn, "", 968 edev->pcie_cap + PCI_EXP_DEVSTA, 969 PCI_EXP_DEVSTA_TRPND); 970 eeh_ops->read_config(edev, edev->pcie_cap + PCI_EXP_DEVCTL, 971 4, ®); 972 reg |= PCI_EXP_DEVCTL_BCR_FLR; 973 eeh_ops->write_config(edev, edev->pcie_cap + PCI_EXP_DEVCTL, 974 4, reg); 975 msleep(EEH_PE_RST_HOLD_TIME); 976 break; 977 case EEH_RESET_DEACTIVATE: 978 eeh_ops->read_config(edev, edev->pcie_cap + PCI_EXP_DEVCTL, 979 4, ®); 980 reg &= ~PCI_EXP_DEVCTL_BCR_FLR; 981 eeh_ops->write_config(edev, edev->pcie_cap + PCI_EXP_DEVCTL, 982 4, reg); 983 msleep(EEH_PE_RST_SETTLE_TIME); 984 break; 985 } 986 987 return 0; 988 } 989 990 static int pnv_eeh_do_af_flr(struct pci_dn *pdn, int option) 991 { 992 struct eeh_dev *edev = pdn_to_eeh_dev(pdn); 993 u32 cap = 0; 994 995 if (WARN_ON(!edev->af_cap)) 996 return -ENOTTY; 997 998 eeh_ops->read_config(edev, edev->af_cap + PCI_AF_CAP, 1, &cap); 999 if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR)) 1000 return -ENOTTY; 1001 1002 switch (option) { 1003 case EEH_RESET_HOT: 1004 case EEH_RESET_FUNDAMENTAL: 1005 /* 1006 * Wait for Transaction Pending bit to clear. A word-aligned 1007 * test is used, so we use the conrol offset rather than status 1008 * and shift the test bit to match. 1009 */ 1010 pnv_eeh_wait_for_pending(pdn, "AF", 1011 edev->af_cap + PCI_AF_CTRL, 1012 PCI_AF_STATUS_TP << 8); 1013 eeh_ops->write_config(edev, edev->af_cap + PCI_AF_CTRL, 1014 1, PCI_AF_CTRL_FLR); 1015 msleep(EEH_PE_RST_HOLD_TIME); 1016 break; 1017 case EEH_RESET_DEACTIVATE: 1018 eeh_ops->write_config(edev, edev->af_cap + PCI_AF_CTRL, 1, 0); 1019 msleep(EEH_PE_RST_SETTLE_TIME); 1020 break; 1021 } 1022 1023 return 0; 1024 } 1025 1026 static int pnv_eeh_reset_vf_pe(struct eeh_pe *pe, int option) 1027 { 1028 struct eeh_dev *edev; 1029 struct pci_dn *pdn; 1030 int ret; 1031 1032 /* The VF PE should have only one child device */ 1033 edev = list_first_entry_or_null(&pe->edevs, struct eeh_dev, entry); 1034 pdn = eeh_dev_to_pdn(edev); 1035 if (!pdn) 1036 return -ENXIO; 1037 1038 ret = pnv_eeh_do_flr(pdn, option); 1039 if (!ret) 1040 return ret; 1041 1042 return pnv_eeh_do_af_flr(pdn, option); 1043 } 1044 1045 /** 1046 * pnv_eeh_reset - Reset the specified PE 1047 * @pe: EEH PE 1048 * @option: reset option 1049 * 1050 * Do reset on the indicated PE. For PCI bus sensitive PE, 1051 * we need to reset the parent p2p bridge. The PHB has to 1052 * be reinitialized if the p2p bridge is root bridge. For 1053 * PCI device sensitive PE, we will try to reset the device 1054 * through FLR. For now, we don't have OPAL APIs to do HARD 1055 * reset yet, so all reset would be SOFT (HOT) reset. 1056 */ 1057 static int pnv_eeh_reset(struct eeh_pe *pe, int option) 1058 { 1059 struct pci_controller *hose = pe->phb; 1060 struct pnv_phb *phb; 1061 struct pci_bus *bus; 1062 int64_t rc; 1063 1064 /* 1065 * For PHB reset, we always have complete reset. For those PEs whose 1066 * primary bus derived from root complex (root bus) or root port 1067 * (usually bus#1), we apply hot or fundamental reset on the root port. 1068 * For other PEs, we always have hot reset on the PE primary bus. 1069 * 1070 * Here, we have different design to pHyp, which always clear the 1071 * frozen state during PE reset. However, the good idea here from 1072 * benh is to keep frozen state before we get PE reset done completely 1073 * (until BAR restore). With the frozen state, HW drops illegal IO 1074 * or MMIO access, which can incur recrusive frozen PE during PE 1075 * reset. The side effect is that EEH core has to clear the frozen 1076 * state explicitly after BAR restore. 1077 */ 1078 if (pe->type & EEH_PE_PHB) 1079 return pnv_eeh_phb_reset(hose, option); 1080 1081 /* 1082 * The frozen PE might be caused by PAPR error injection 1083 * registers, which are expected to be cleared after hitting 1084 * frozen PE as stated in the hardware spec. Unfortunately, 1085 * that's not true on P7IOC. So we have to clear it manually 1086 * to avoid recursive EEH errors during recovery. 1087 */ 1088 phb = hose->private_data; 1089 if (phb->model == PNV_PHB_MODEL_P7IOC && 1090 (option == EEH_RESET_HOT || 1091 option == EEH_RESET_FUNDAMENTAL)) { 1092 rc = opal_pci_reset(phb->opal_id, 1093 OPAL_RESET_PHB_ERROR, 1094 OPAL_ASSERT_RESET); 1095 if (rc != OPAL_SUCCESS) { 1096 pr_warn("%s: Failure %lld clearing error injection registers\n", 1097 __func__, rc); 1098 return -EIO; 1099 } 1100 } 1101 1102 if (pe->type & EEH_PE_VF) 1103 return pnv_eeh_reset_vf_pe(pe, option); 1104 1105 bus = eeh_pe_bus_get(pe); 1106 if (!bus) { 1107 pr_err("%s: Cannot find PCI bus for PHB#%x-PE#%x\n", 1108 __func__, pe->phb->global_number, pe->addr); 1109 return -EIO; 1110 } 1111 1112 if (pci_is_root_bus(bus)) 1113 return pnv_eeh_root_reset(hose, option); 1114 1115 /* 1116 * For hot resets try use the generic PCI error recovery reset 1117 * functions. These correctly handles the case where the secondary 1118 * bus is behind a hotplug slot and it will use the slot provided 1119 * reset methods to prevent spurious hotplug events during the reset. 1120 * 1121 * Fundemental resets need to be handled internally to EEH since the 1122 * PCI core doesn't really have a concept of a fundemental reset, 1123 * mainly because there's no standard way to generate one. Only a 1124 * few devices require an FRESET so it should be fine. 1125 */ 1126 if (option != EEH_RESET_FUNDAMENTAL) { 1127 /* 1128 * NB: Skiboot and pnv_eeh_bridge_reset() also no-op the 1129 * de-assert step. It's like the OPAL reset API was 1130 * poorly designed or something... 1131 */ 1132 if (option == EEH_RESET_DEACTIVATE) 1133 return 0; 1134 1135 rc = pci_bus_error_reset(bus->self); 1136 if (!rc) 1137 return 0; 1138 } 1139 1140 /* otherwise, use the generic bridge reset. this might call into FW */ 1141 if (pci_is_root_bus(bus->parent)) 1142 return pnv_eeh_root_reset(hose, option); 1143 return pnv_eeh_bridge_reset(bus->self, option); 1144 } 1145 1146 /** 1147 * pnv_eeh_get_log - Retrieve error log 1148 * @pe: EEH PE 1149 * @severity: temporary or permanent error log 1150 * @drv_log: driver log to be combined with retrieved error log 1151 * @len: length of driver log 1152 * 1153 * Retrieve the temporary or permanent error from the PE. 1154 */ 1155 static int pnv_eeh_get_log(struct eeh_pe *pe, int severity, 1156 char *drv_log, unsigned long len) 1157 { 1158 if (!eeh_has_flag(EEH_EARLY_DUMP_LOG)) 1159 pnv_pci_dump_phb_diag_data(pe->phb, pe->data); 1160 1161 return 0; 1162 } 1163 1164 /** 1165 * pnv_eeh_configure_bridge - Configure PCI bridges in the indicated PE 1166 * @pe: EEH PE 1167 * 1168 * The function will be called to reconfigure the bridges included 1169 * in the specified PE so that the mulfunctional PE would be recovered 1170 * again. 1171 */ 1172 static int pnv_eeh_configure_bridge(struct eeh_pe *pe) 1173 { 1174 return 0; 1175 } 1176 1177 /** 1178 * pnv_pe_err_inject - Inject specified error to the indicated PE 1179 * @pe: the indicated PE 1180 * @type: error type 1181 * @func: specific error type 1182 * @addr: address 1183 * @mask: address mask 1184 * 1185 * The routine is called to inject specified error, which is 1186 * determined by @type and @func, to the indicated PE for 1187 * testing purpose. 1188 */ 1189 static int pnv_eeh_err_inject(struct eeh_pe *pe, int type, int func, 1190 unsigned long addr, unsigned long mask) 1191 { 1192 struct pci_controller *hose = pe->phb; 1193 struct pnv_phb *phb = hose->private_data; 1194 s64 rc; 1195 1196 if (type != OPAL_ERR_INJECT_TYPE_IOA_BUS_ERR && 1197 type != OPAL_ERR_INJECT_TYPE_IOA_BUS_ERR64) { 1198 pr_warn("%s: Invalid error type %d\n", 1199 __func__, type); 1200 return -ERANGE; 1201 } 1202 1203 if (func < OPAL_ERR_INJECT_FUNC_IOA_LD_MEM_ADDR || 1204 func > OPAL_ERR_INJECT_FUNC_IOA_DMA_WR_TARGET) { 1205 pr_warn("%s: Invalid error function %d\n", 1206 __func__, func); 1207 return -ERANGE; 1208 } 1209 1210 /* Firmware supports error injection ? */ 1211 if (!opal_check_token(OPAL_PCI_ERR_INJECT)) { 1212 pr_warn("%s: Firmware doesn't support error injection\n", 1213 __func__); 1214 return -ENXIO; 1215 } 1216 1217 /* Do error injection */ 1218 rc = opal_pci_err_inject(phb->opal_id, pe->addr, 1219 type, func, addr, mask); 1220 if (rc != OPAL_SUCCESS) { 1221 pr_warn("%s: Failure %lld injecting error " 1222 "%d-%d to PHB#%x-PE#%x\n", 1223 __func__, rc, type, func, 1224 hose->global_number, pe->addr); 1225 return -EIO; 1226 } 1227 1228 return 0; 1229 } 1230 1231 static inline bool pnv_eeh_cfg_blocked(struct pci_dn *pdn) 1232 { 1233 struct eeh_dev *edev = pdn_to_eeh_dev(pdn); 1234 1235 if (!edev || !edev->pe) 1236 return false; 1237 1238 /* 1239 * We will issue FLR or AF FLR to all VFs, which are contained 1240 * in VF PE. It relies on the EEH PCI config accessors. So we 1241 * can't block them during the window. 1242 */ 1243 if (edev->physfn && (edev->pe->state & EEH_PE_RESET)) 1244 return false; 1245 1246 if (edev->pe->state & EEH_PE_CFG_BLOCKED) 1247 return true; 1248 1249 return false; 1250 } 1251 1252 static int pnv_eeh_read_config(struct eeh_dev *edev, 1253 int where, int size, u32 *val) 1254 { 1255 struct pci_dn *pdn = eeh_dev_to_pdn(edev); 1256 1257 if (!pdn) 1258 return PCIBIOS_DEVICE_NOT_FOUND; 1259 1260 if (pnv_eeh_cfg_blocked(pdn)) { 1261 *val = 0xFFFFFFFF; 1262 return PCIBIOS_SET_FAILED; 1263 } 1264 1265 return pnv_pci_cfg_read(pdn, where, size, val); 1266 } 1267 1268 static int pnv_eeh_write_config(struct eeh_dev *edev, 1269 int where, int size, u32 val) 1270 { 1271 struct pci_dn *pdn = eeh_dev_to_pdn(edev); 1272 1273 if (!pdn) 1274 return PCIBIOS_DEVICE_NOT_FOUND; 1275 1276 if (pnv_eeh_cfg_blocked(pdn)) 1277 return PCIBIOS_SET_FAILED; 1278 1279 return pnv_pci_cfg_write(pdn, where, size, val); 1280 } 1281 1282 static void pnv_eeh_dump_hub_diag_common(struct OpalIoP7IOCErrorData *data) 1283 { 1284 /* GEM */ 1285 if (data->gemXfir || data->gemRfir || 1286 data->gemRirqfir || data->gemMask || data->gemRwof) 1287 pr_info(" GEM: %016llx %016llx %016llx %016llx %016llx\n", 1288 be64_to_cpu(data->gemXfir), 1289 be64_to_cpu(data->gemRfir), 1290 be64_to_cpu(data->gemRirqfir), 1291 be64_to_cpu(data->gemMask), 1292 be64_to_cpu(data->gemRwof)); 1293 1294 /* LEM */ 1295 if (data->lemFir || data->lemErrMask || 1296 data->lemAction0 || data->lemAction1 || data->lemWof) 1297 pr_info(" LEM: %016llx %016llx %016llx %016llx %016llx\n", 1298 be64_to_cpu(data->lemFir), 1299 be64_to_cpu(data->lemErrMask), 1300 be64_to_cpu(data->lemAction0), 1301 be64_to_cpu(data->lemAction1), 1302 be64_to_cpu(data->lemWof)); 1303 } 1304 1305 static void pnv_eeh_get_and_dump_hub_diag(struct pci_controller *hose) 1306 { 1307 struct pnv_phb *phb = hose->private_data; 1308 struct OpalIoP7IOCErrorData *data = 1309 (struct OpalIoP7IOCErrorData*)phb->diag_data; 1310 long rc; 1311 1312 rc = opal_pci_get_hub_diag_data(phb->hub_id, data, sizeof(*data)); 1313 if (rc != OPAL_SUCCESS) { 1314 pr_warn("%s: Failed to get HUB#%llx diag-data (%ld)\n", 1315 __func__, phb->hub_id, rc); 1316 return; 1317 } 1318 1319 switch (be16_to_cpu(data->type)) { 1320 case OPAL_P7IOC_DIAG_TYPE_RGC: 1321 pr_info("P7IOC diag-data for RGC\n\n"); 1322 pnv_eeh_dump_hub_diag_common(data); 1323 if (data->rgc.rgcStatus || data->rgc.rgcLdcp) 1324 pr_info(" RGC: %016llx %016llx\n", 1325 be64_to_cpu(data->rgc.rgcStatus), 1326 be64_to_cpu(data->rgc.rgcLdcp)); 1327 break; 1328 case OPAL_P7IOC_DIAG_TYPE_BI: 1329 pr_info("P7IOC diag-data for BI %s\n\n", 1330 data->bi.biDownbound ? "Downbound" : "Upbound"); 1331 pnv_eeh_dump_hub_diag_common(data); 1332 if (data->bi.biLdcp0 || data->bi.biLdcp1 || 1333 data->bi.biLdcp2 || data->bi.biFenceStatus) 1334 pr_info(" BI: %016llx %016llx %016llx %016llx\n", 1335 be64_to_cpu(data->bi.biLdcp0), 1336 be64_to_cpu(data->bi.biLdcp1), 1337 be64_to_cpu(data->bi.biLdcp2), 1338 be64_to_cpu(data->bi.biFenceStatus)); 1339 break; 1340 case OPAL_P7IOC_DIAG_TYPE_CI: 1341 pr_info("P7IOC diag-data for CI Port %d\n\n", 1342 data->ci.ciPort); 1343 pnv_eeh_dump_hub_diag_common(data); 1344 if (data->ci.ciPortStatus || data->ci.ciPortLdcp) 1345 pr_info(" CI: %016llx %016llx\n", 1346 be64_to_cpu(data->ci.ciPortStatus), 1347 be64_to_cpu(data->ci.ciPortLdcp)); 1348 break; 1349 case OPAL_P7IOC_DIAG_TYPE_MISC: 1350 pr_info("P7IOC diag-data for MISC\n\n"); 1351 pnv_eeh_dump_hub_diag_common(data); 1352 break; 1353 case OPAL_P7IOC_DIAG_TYPE_I2C: 1354 pr_info("P7IOC diag-data for I2C\n\n"); 1355 pnv_eeh_dump_hub_diag_common(data); 1356 break; 1357 default: 1358 pr_warn("%s: Invalid type of HUB#%llx diag-data (%d)\n", 1359 __func__, phb->hub_id, data->type); 1360 } 1361 } 1362 1363 static int pnv_eeh_get_pe(struct pci_controller *hose, 1364 u16 pe_no, struct eeh_pe **pe) 1365 { 1366 struct pnv_phb *phb = hose->private_data; 1367 struct pnv_ioda_pe *pnv_pe; 1368 struct eeh_pe *dev_pe; 1369 1370 /* 1371 * If PHB supports compound PE, to fetch 1372 * the master PE because slave PE is invisible 1373 * to EEH core. 1374 */ 1375 pnv_pe = &phb->ioda.pe_array[pe_no]; 1376 if (pnv_pe->flags & PNV_IODA_PE_SLAVE) { 1377 pnv_pe = pnv_pe->master; 1378 WARN_ON(!pnv_pe || 1379 !(pnv_pe->flags & PNV_IODA_PE_MASTER)); 1380 pe_no = pnv_pe->pe_number; 1381 } 1382 1383 /* Find the PE according to PE# */ 1384 dev_pe = eeh_pe_get(hose, pe_no, 0); 1385 if (!dev_pe) 1386 return -EEXIST; 1387 1388 /* Freeze the (compound) PE */ 1389 *pe = dev_pe; 1390 if (!(dev_pe->state & EEH_PE_ISOLATED)) 1391 phb->freeze_pe(phb, pe_no); 1392 1393 /* 1394 * At this point, we're sure the (compound) PE should 1395 * have been frozen. However, we still need poke until 1396 * hitting the frozen PE on top level. 1397 */ 1398 dev_pe = dev_pe->parent; 1399 while (dev_pe && !(dev_pe->type & EEH_PE_PHB)) { 1400 int ret; 1401 ret = eeh_ops->get_state(dev_pe, NULL); 1402 if (ret <= 0 || eeh_state_active(ret)) { 1403 dev_pe = dev_pe->parent; 1404 continue; 1405 } 1406 1407 /* Frozen parent PE */ 1408 *pe = dev_pe; 1409 if (!(dev_pe->state & EEH_PE_ISOLATED)) 1410 phb->freeze_pe(phb, dev_pe->addr); 1411 1412 /* Next one */ 1413 dev_pe = dev_pe->parent; 1414 } 1415 1416 return 0; 1417 } 1418 1419 /** 1420 * pnv_eeh_next_error - Retrieve next EEH error to handle 1421 * @pe: Affected PE 1422 * 1423 * The function is expected to be called by EEH core while it gets 1424 * special EEH event (without binding PE). The function calls to 1425 * OPAL APIs for next error to handle. The informational error is 1426 * handled internally by platform. However, the dead IOC, dead PHB, 1427 * fenced PHB and frozen PE should be handled by EEH core eventually. 1428 */ 1429 static int pnv_eeh_next_error(struct eeh_pe **pe) 1430 { 1431 struct pci_controller *hose; 1432 struct pnv_phb *phb; 1433 struct eeh_pe *phb_pe, *parent_pe; 1434 __be64 frozen_pe_no; 1435 __be16 err_type, severity; 1436 long rc; 1437 int state, ret = EEH_NEXT_ERR_NONE; 1438 1439 /* 1440 * While running here, it's safe to purge the event queue. The 1441 * event should still be masked. 1442 */ 1443 eeh_remove_event(NULL, false); 1444 1445 list_for_each_entry(hose, &hose_list, list_node) { 1446 /* 1447 * If the subordinate PCI buses of the PHB has been 1448 * removed or is exactly under error recovery, we 1449 * needn't take care of it any more. 1450 */ 1451 phb = hose->private_data; 1452 phb_pe = eeh_phb_pe_get(hose); 1453 if (!phb_pe || (phb_pe->state & EEH_PE_ISOLATED)) 1454 continue; 1455 1456 rc = opal_pci_next_error(phb->opal_id, 1457 &frozen_pe_no, &err_type, &severity); 1458 if (rc != OPAL_SUCCESS) { 1459 pr_devel("%s: Invalid return value on " 1460 "PHB#%x (0x%lx) from opal_pci_next_error", 1461 __func__, hose->global_number, rc); 1462 continue; 1463 } 1464 1465 /* If the PHB doesn't have error, stop processing */ 1466 if (be16_to_cpu(err_type) == OPAL_EEH_NO_ERROR || 1467 be16_to_cpu(severity) == OPAL_EEH_SEV_NO_ERROR) { 1468 pr_devel("%s: No error found on PHB#%x\n", 1469 __func__, hose->global_number); 1470 continue; 1471 } 1472 1473 /* 1474 * Processing the error. We're expecting the error with 1475 * highest priority reported upon multiple errors on the 1476 * specific PHB. 1477 */ 1478 pr_devel("%s: Error (%d, %d, %llu) on PHB#%x\n", 1479 __func__, be16_to_cpu(err_type), 1480 be16_to_cpu(severity), be64_to_cpu(frozen_pe_no), 1481 hose->global_number); 1482 switch (be16_to_cpu(err_type)) { 1483 case OPAL_EEH_IOC_ERROR: 1484 if (be16_to_cpu(severity) == OPAL_EEH_SEV_IOC_DEAD) { 1485 pr_err("EEH: dead IOC detected\n"); 1486 ret = EEH_NEXT_ERR_DEAD_IOC; 1487 } else if (be16_to_cpu(severity) == OPAL_EEH_SEV_INF) { 1488 pr_info("EEH: IOC informative error " 1489 "detected\n"); 1490 pnv_eeh_get_and_dump_hub_diag(hose); 1491 ret = EEH_NEXT_ERR_NONE; 1492 } 1493 1494 break; 1495 case OPAL_EEH_PHB_ERROR: 1496 if (be16_to_cpu(severity) == OPAL_EEH_SEV_PHB_DEAD) { 1497 *pe = phb_pe; 1498 pr_err("EEH: dead PHB#%x detected, " 1499 "location: %s\n", 1500 hose->global_number, 1501 eeh_pe_loc_get(phb_pe)); 1502 ret = EEH_NEXT_ERR_DEAD_PHB; 1503 } else if (be16_to_cpu(severity) == 1504 OPAL_EEH_SEV_PHB_FENCED) { 1505 *pe = phb_pe; 1506 pr_err("EEH: Fenced PHB#%x detected, " 1507 "location: %s\n", 1508 hose->global_number, 1509 eeh_pe_loc_get(phb_pe)); 1510 ret = EEH_NEXT_ERR_FENCED_PHB; 1511 } else if (be16_to_cpu(severity) == OPAL_EEH_SEV_INF) { 1512 pr_info("EEH: PHB#%x informative error " 1513 "detected, location: %s\n", 1514 hose->global_number, 1515 eeh_pe_loc_get(phb_pe)); 1516 pnv_eeh_get_phb_diag(phb_pe); 1517 pnv_pci_dump_phb_diag_data(hose, phb_pe->data); 1518 ret = EEH_NEXT_ERR_NONE; 1519 } 1520 1521 break; 1522 case OPAL_EEH_PE_ERROR: 1523 /* 1524 * If we can't find the corresponding PE, we 1525 * just try to unfreeze. 1526 */ 1527 if (pnv_eeh_get_pe(hose, 1528 be64_to_cpu(frozen_pe_no), pe)) { 1529 pr_info("EEH: Clear non-existing PHB#%x-PE#%llx\n", 1530 hose->global_number, be64_to_cpu(frozen_pe_no)); 1531 pr_info("EEH: PHB location: %s\n", 1532 eeh_pe_loc_get(phb_pe)); 1533 1534 /* Dump PHB diag-data */ 1535 rc = opal_pci_get_phb_diag_data2(phb->opal_id, 1536 phb->diag_data, phb->diag_data_size); 1537 if (rc == OPAL_SUCCESS) 1538 pnv_pci_dump_phb_diag_data(hose, 1539 phb->diag_data); 1540 1541 /* Try best to clear it */ 1542 opal_pci_eeh_freeze_clear(phb->opal_id, 1543 be64_to_cpu(frozen_pe_no), 1544 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL); 1545 ret = EEH_NEXT_ERR_NONE; 1546 } else if ((*pe)->state & EEH_PE_ISOLATED || 1547 eeh_pe_passed(*pe)) { 1548 ret = EEH_NEXT_ERR_NONE; 1549 } else { 1550 pr_err("EEH: Frozen PE#%x " 1551 "on PHB#%x detected\n", 1552 (*pe)->addr, 1553 (*pe)->phb->global_number); 1554 pr_err("EEH: PE location: %s, " 1555 "PHB location: %s\n", 1556 eeh_pe_loc_get(*pe), 1557 eeh_pe_loc_get(phb_pe)); 1558 ret = EEH_NEXT_ERR_FROZEN_PE; 1559 } 1560 1561 break; 1562 default: 1563 pr_warn("%s: Unexpected error type %d\n", 1564 __func__, be16_to_cpu(err_type)); 1565 } 1566 1567 /* 1568 * EEH core will try recover from fenced PHB or 1569 * frozen PE. In the time for frozen PE, EEH core 1570 * enable IO path for that before collecting logs, 1571 * but it ruins the site. So we have to dump the 1572 * log in advance here. 1573 */ 1574 if ((ret == EEH_NEXT_ERR_FROZEN_PE || 1575 ret == EEH_NEXT_ERR_FENCED_PHB) && 1576 !((*pe)->state & EEH_PE_ISOLATED)) { 1577 eeh_pe_mark_isolated(*pe); 1578 pnv_eeh_get_phb_diag(*pe); 1579 1580 if (eeh_has_flag(EEH_EARLY_DUMP_LOG)) 1581 pnv_pci_dump_phb_diag_data((*pe)->phb, 1582 (*pe)->data); 1583 } 1584 1585 /* 1586 * We probably have the frozen parent PE out there and 1587 * we need have to handle frozen parent PE firstly. 1588 */ 1589 if (ret == EEH_NEXT_ERR_FROZEN_PE) { 1590 parent_pe = (*pe)->parent; 1591 while (parent_pe) { 1592 /* Hit the ceiling ? */ 1593 if (parent_pe->type & EEH_PE_PHB) 1594 break; 1595 1596 /* Frozen parent PE ? */ 1597 state = eeh_ops->get_state(parent_pe, NULL); 1598 if (state > 0 && !eeh_state_active(state)) 1599 *pe = parent_pe; 1600 1601 /* Next parent level */ 1602 parent_pe = parent_pe->parent; 1603 } 1604 1605 /* We possibly migrate to another PE */ 1606 eeh_pe_mark_isolated(*pe); 1607 } 1608 1609 /* 1610 * If we have no errors on the specific PHB or only 1611 * informative error there, we continue poking it. 1612 * Otherwise, we need actions to be taken by upper 1613 * layer. 1614 */ 1615 if (ret > EEH_NEXT_ERR_INF) 1616 break; 1617 } 1618 1619 /* Unmask the event */ 1620 if (ret == EEH_NEXT_ERR_NONE && eeh_enabled()) 1621 enable_irq(eeh_event_irq); 1622 1623 return ret; 1624 } 1625 1626 static int pnv_eeh_restore_config(struct eeh_dev *edev) 1627 { 1628 struct pnv_phb *phb; 1629 s64 ret = 0; 1630 1631 if (!edev) 1632 return -EEXIST; 1633 1634 if (edev->physfn) 1635 return 0; 1636 1637 phb = edev->controller->private_data; 1638 ret = opal_pci_reinit(phb->opal_id, 1639 OPAL_REINIT_PCI_DEV, edev->bdfn); 1640 1641 if (ret) { 1642 pr_warn("%s: Can't reinit PCI dev 0x%x (%lld)\n", 1643 __func__, edev->bdfn, ret); 1644 return -EIO; 1645 } 1646 1647 return ret; 1648 } 1649 1650 static struct eeh_ops pnv_eeh_ops = { 1651 .name = "powernv", 1652 .init = pnv_eeh_init, 1653 .probe = pnv_eeh_probe, 1654 .set_option = pnv_eeh_set_option, 1655 .get_state = pnv_eeh_get_state, 1656 .reset = pnv_eeh_reset, 1657 .get_log = pnv_eeh_get_log, 1658 .configure_bridge = pnv_eeh_configure_bridge, 1659 .err_inject = pnv_eeh_err_inject, 1660 .read_config = pnv_eeh_read_config, 1661 .write_config = pnv_eeh_write_config, 1662 .next_error = pnv_eeh_next_error, 1663 .restore_config = pnv_eeh_restore_config, 1664 .notify_resume = NULL 1665 }; 1666 1667 #ifdef CONFIG_PCI_IOV 1668 static void pnv_pci_fixup_vf_mps(struct pci_dev *pdev) 1669 { 1670 struct pci_dn *pdn = pci_get_pdn(pdev); 1671 int parent_mps; 1672 1673 if (!pdev->is_virtfn) 1674 return; 1675 1676 /* Synchronize MPS for VF and PF */ 1677 parent_mps = pcie_get_mps(pdev->physfn); 1678 if ((128 << pdev->pcie_mpss) >= parent_mps) 1679 pcie_set_mps(pdev, parent_mps); 1680 pdn->mps = pcie_get_mps(pdev); 1681 } 1682 DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pnv_pci_fixup_vf_mps); 1683 #endif /* CONFIG_PCI_IOV */ 1684 1685 /** 1686 * eeh_powernv_init - Register platform dependent EEH operations 1687 * 1688 * EEH initialization on powernv platform. This function should be 1689 * called before any EEH related functions. 1690 */ 1691 static int __init eeh_powernv_init(void) 1692 { 1693 int ret = -EINVAL; 1694 1695 ret = eeh_ops_register(&pnv_eeh_ops); 1696 if (!ret) 1697 pr_info("EEH: PowerNV platform initialized\n"); 1698 else 1699 pr_info("EEH: Failed to initialize PowerNV platform (%d)\n", ret); 1700 1701 return ret; 1702 } 1703 machine_early_initcall(powernv, eeh_powernv_init); 1704