1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * PowerNV Platform dependent EEH operations 4 * 5 * Copyright Benjamin Herrenschmidt & Gavin Shan, IBM Corporation 2013. 6 */ 7 8 #include <linux/atomic.h> 9 #include <linux/debugfs.h> 10 #include <linux/delay.h> 11 #include <linux/export.h> 12 #include <linux/init.h> 13 #include <linux/interrupt.h> 14 #include <linux/list.h> 15 #include <linux/msi.h> 16 #include <linux/of.h> 17 #include <linux/pci.h> 18 #include <linux/proc_fs.h> 19 #include <linux/rbtree.h> 20 #include <linux/sched.h> 21 #include <linux/seq_file.h> 22 #include <linux/spinlock.h> 23 24 #include <asm/eeh.h> 25 #include <asm/eeh_event.h> 26 #include <asm/firmware.h> 27 #include <asm/io.h> 28 #include <asm/iommu.h> 29 #include <asm/machdep.h> 30 #include <asm/msi_bitmap.h> 31 #include <asm/opal.h> 32 #include <asm/ppc-pci.h> 33 #include <asm/pnv-pci.h> 34 35 #include "powernv.h" 36 #include "pci.h" 37 #include "../../../../drivers/pci/pci.h" 38 39 static int eeh_event_irq = -EINVAL; 40 41 void pnv_pcibios_bus_add_device(struct pci_dev *pdev) 42 { 43 dev_dbg(&pdev->dev, "EEH: Setting up device\n"); 44 eeh_probe_device(pdev); 45 } 46 47 static int pnv_eeh_init(void) 48 { 49 struct pci_controller *hose; 50 struct pnv_phb *phb; 51 int max_diag_size = PNV_PCI_DIAG_BUF_SIZE; 52 53 if (!firmware_has_feature(FW_FEATURE_OPAL)) { 54 pr_warn("%s: OPAL is required !\n", 55 __func__); 56 return -EINVAL; 57 } 58 59 /* Set probe mode */ 60 eeh_add_flag(EEH_PROBE_MODE_DEV); 61 62 /* 63 * P7IOC blocks PCI config access to frozen PE, but PHB3 64 * doesn't do that. So we have to selectively enable I/O 65 * prior to collecting error log. 66 */ 67 list_for_each_entry(hose, &hose_list, list_node) { 68 phb = hose->private_data; 69 70 if (phb->model == PNV_PHB_MODEL_P7IOC) 71 eeh_add_flag(EEH_ENABLE_IO_FOR_LOG); 72 73 if (phb->diag_data_size > max_diag_size) 74 max_diag_size = phb->diag_data_size; 75 76 /* 77 * PE#0 should be regarded as valid by EEH core 78 * if it's not the reserved one. Currently, we 79 * have the reserved PE#255 and PE#127 for PHB3 80 * and P7IOC separately. So we should regard 81 * PE#0 as valid for PHB3 and P7IOC. 82 */ 83 if (phb->ioda.reserved_pe_idx != 0) 84 eeh_add_flag(EEH_VALID_PE_ZERO); 85 86 break; 87 } 88 89 eeh_set_pe_aux_size(max_diag_size); 90 ppc_md.pcibios_bus_add_device = pnv_pcibios_bus_add_device; 91 92 return 0; 93 } 94 95 static irqreturn_t pnv_eeh_event(int irq, void *data) 96 { 97 /* 98 * We simply send a special EEH event if EEH has been 99 * enabled. We don't care about EEH events until we've 100 * finished processing the outstanding ones. Event processing 101 * gets unmasked in next_error() if EEH is enabled. 102 */ 103 disable_irq_nosync(irq); 104 105 if (eeh_enabled()) 106 eeh_send_failure_event(NULL); 107 108 return IRQ_HANDLED; 109 } 110 111 #ifdef CONFIG_DEBUG_FS 112 static ssize_t pnv_eeh_ei_write(struct file *filp, 113 const char __user *user_buf, 114 size_t count, loff_t *ppos) 115 { 116 struct pci_controller *hose = filp->private_data; 117 struct eeh_pe *pe; 118 int pe_no, type, func; 119 unsigned long addr, mask; 120 char buf[50]; 121 int ret; 122 123 if (!eeh_ops || !eeh_ops->err_inject) 124 return -ENXIO; 125 126 /* Copy over argument buffer */ 127 ret = simple_write_to_buffer(buf, sizeof(buf), ppos, user_buf, count); 128 if (!ret) 129 return -EFAULT; 130 131 /* Retrieve parameters */ 132 ret = sscanf(buf, "%x:%x:%x:%lx:%lx", 133 &pe_no, &type, &func, &addr, &mask); 134 if (ret != 5) 135 return -EINVAL; 136 137 /* Retrieve PE */ 138 pe = eeh_pe_get(hose, pe_no, 0); 139 if (!pe) 140 return -ENODEV; 141 142 /* Do error injection */ 143 ret = eeh_ops->err_inject(pe, type, func, addr, mask); 144 return ret < 0 ? ret : count; 145 } 146 147 static const struct file_operations pnv_eeh_ei_fops = { 148 .open = simple_open, 149 .llseek = no_llseek, 150 .write = pnv_eeh_ei_write, 151 }; 152 153 static int pnv_eeh_dbgfs_set(void *data, int offset, u64 val) 154 { 155 struct pci_controller *hose = data; 156 struct pnv_phb *phb = hose->private_data; 157 158 out_be64(phb->regs + offset, val); 159 return 0; 160 } 161 162 static int pnv_eeh_dbgfs_get(void *data, int offset, u64 *val) 163 { 164 struct pci_controller *hose = data; 165 struct pnv_phb *phb = hose->private_data; 166 167 *val = in_be64(phb->regs + offset); 168 return 0; 169 } 170 171 #define PNV_EEH_DBGFS_ENTRY(name, reg) \ 172 static int pnv_eeh_dbgfs_set_##name(void *data, u64 val) \ 173 { \ 174 return pnv_eeh_dbgfs_set(data, reg, val); \ 175 } \ 176 \ 177 static int pnv_eeh_dbgfs_get_##name(void *data, u64 *val) \ 178 { \ 179 return pnv_eeh_dbgfs_get(data, reg, val); \ 180 } \ 181 \ 182 DEFINE_SIMPLE_ATTRIBUTE(pnv_eeh_dbgfs_ops_##name, \ 183 pnv_eeh_dbgfs_get_##name, \ 184 pnv_eeh_dbgfs_set_##name, \ 185 "0x%llx\n") 186 187 PNV_EEH_DBGFS_ENTRY(outb, 0xD10); 188 PNV_EEH_DBGFS_ENTRY(inbA, 0xD90); 189 PNV_EEH_DBGFS_ENTRY(inbB, 0xE10); 190 191 #endif /* CONFIG_DEBUG_FS */ 192 193 void pnv_eeh_enable_phbs(void) 194 { 195 struct pci_controller *hose; 196 struct pnv_phb *phb; 197 198 list_for_each_entry(hose, &hose_list, list_node) { 199 phb = hose->private_data; 200 /* 201 * If EEH is enabled, we're going to rely on that. 202 * Otherwise, we restore to conventional mechanism 203 * to clear frozen PE during PCI config access. 204 */ 205 if (eeh_enabled()) 206 phb->flags |= PNV_PHB_FLAG_EEH; 207 else 208 phb->flags &= ~PNV_PHB_FLAG_EEH; 209 } 210 } 211 212 /** 213 * pnv_eeh_post_init - EEH platform dependent post initialization 214 * 215 * EEH platform dependent post initialization on powernv. When 216 * the function is called, the EEH PEs and devices should have 217 * been built. If the I/O cache staff has been built, EEH is 218 * ready to supply service. 219 */ 220 int pnv_eeh_post_init(void) 221 { 222 struct pci_controller *hose; 223 struct pnv_phb *phb; 224 int ret = 0; 225 226 eeh_show_enabled(); 227 228 /* Register OPAL event notifier */ 229 eeh_event_irq = opal_event_request(ilog2(OPAL_EVENT_PCI_ERROR)); 230 if (eeh_event_irq < 0) { 231 pr_err("%s: Can't register OPAL event interrupt (%d)\n", 232 __func__, eeh_event_irq); 233 return eeh_event_irq; 234 } 235 236 ret = request_irq(eeh_event_irq, pnv_eeh_event, 237 IRQ_TYPE_LEVEL_HIGH, "opal-eeh", NULL); 238 if (ret < 0) { 239 irq_dispose_mapping(eeh_event_irq); 240 pr_err("%s: Can't request OPAL event interrupt (%d)\n", 241 __func__, eeh_event_irq); 242 return ret; 243 } 244 245 if (!eeh_enabled()) 246 disable_irq(eeh_event_irq); 247 248 pnv_eeh_enable_phbs(); 249 250 list_for_each_entry(hose, &hose_list, list_node) { 251 phb = hose->private_data; 252 253 /* Create debugfs entries */ 254 #ifdef CONFIG_DEBUG_FS 255 if (phb->has_dbgfs || !phb->dbgfs) 256 continue; 257 258 phb->has_dbgfs = 1; 259 debugfs_create_file("err_injct", 0200, 260 phb->dbgfs, hose, 261 &pnv_eeh_ei_fops); 262 263 debugfs_create_file("err_injct_outbound", 0600, 264 phb->dbgfs, hose, 265 &pnv_eeh_dbgfs_ops_outb); 266 debugfs_create_file("err_injct_inboundA", 0600, 267 phb->dbgfs, hose, 268 &pnv_eeh_dbgfs_ops_inbA); 269 debugfs_create_file("err_injct_inboundB", 0600, 270 phb->dbgfs, hose, 271 &pnv_eeh_dbgfs_ops_inbB); 272 #endif /* CONFIG_DEBUG_FS */ 273 } 274 275 return ret; 276 } 277 278 static int pnv_eeh_find_cap(struct pci_dn *pdn, int cap) 279 { 280 int pos = PCI_CAPABILITY_LIST; 281 int cnt = 48; /* Maximal number of capabilities */ 282 u32 status, id; 283 284 if (!pdn) 285 return 0; 286 287 /* Check if the device supports capabilities */ 288 pnv_pci_cfg_read(pdn, PCI_STATUS, 2, &status); 289 if (!(status & PCI_STATUS_CAP_LIST)) 290 return 0; 291 292 while (cnt--) { 293 pnv_pci_cfg_read(pdn, pos, 1, &pos); 294 if (pos < 0x40) 295 break; 296 297 pos &= ~3; 298 pnv_pci_cfg_read(pdn, pos + PCI_CAP_LIST_ID, 1, &id); 299 if (id == 0xff) 300 break; 301 302 /* Found */ 303 if (id == cap) 304 return pos; 305 306 /* Next one */ 307 pos += PCI_CAP_LIST_NEXT; 308 } 309 310 return 0; 311 } 312 313 static int pnv_eeh_find_ecap(struct pci_dn *pdn, int cap) 314 { 315 struct eeh_dev *edev = pdn_to_eeh_dev(pdn); 316 u32 header; 317 int pos = 256, ttl = (4096 - 256) / 8; 318 319 if (!edev || !edev->pcie_cap) 320 return 0; 321 if (pnv_pci_cfg_read(pdn, pos, 4, &header) != PCIBIOS_SUCCESSFUL) 322 return 0; 323 else if (!header) 324 return 0; 325 326 while (ttl-- > 0) { 327 if (PCI_EXT_CAP_ID(header) == cap && pos) 328 return pos; 329 330 pos = PCI_EXT_CAP_NEXT(header); 331 if (pos < 256) 332 break; 333 334 if (pnv_pci_cfg_read(pdn, pos, 4, &header) != PCIBIOS_SUCCESSFUL) 335 break; 336 } 337 338 return 0; 339 } 340 341 /** 342 * pnv_eeh_probe - Do probe on PCI device 343 * @pdev: pci_dev to probe 344 * 345 * Create, or find the existing, eeh_dev for this pci_dev. 346 */ 347 static struct eeh_dev *pnv_eeh_probe(struct pci_dev *pdev) 348 { 349 struct pci_dn *pdn = pci_get_pdn(pdev); 350 struct pci_controller *hose = pdn->phb; 351 struct pnv_phb *phb = hose->private_data; 352 struct eeh_dev *edev = pdn_to_eeh_dev(pdn); 353 uint32_t pcie_flags; 354 int ret; 355 int config_addr = (pdn->busno << 8) | (pdn->devfn); 356 357 /* 358 * When probing the root bridge, which doesn't have any 359 * subordinate PCI devices. We don't have OF node for 360 * the root bridge. So it's not reasonable to continue 361 * the probing. 362 */ 363 if (!edev || edev->pe) 364 return NULL; 365 366 /* already configured? */ 367 if (edev->pdev) { 368 pr_debug("%s: found existing edev for %04x:%02x:%02x.%01x\n", 369 __func__, hose->global_number, config_addr >> 8, 370 PCI_SLOT(config_addr), PCI_FUNC(config_addr)); 371 return edev; 372 } 373 374 /* Skip for PCI-ISA bridge */ 375 if ((pdn->class_code >> 8) == PCI_CLASS_BRIDGE_ISA) 376 return NULL; 377 378 eeh_edev_dbg(edev, "Probing device\n"); 379 380 /* Initialize eeh device */ 381 edev->class_code = pdn->class_code; 382 edev->mode &= 0xFFFFFF00; 383 edev->pcix_cap = pnv_eeh_find_cap(pdn, PCI_CAP_ID_PCIX); 384 edev->pcie_cap = pnv_eeh_find_cap(pdn, PCI_CAP_ID_EXP); 385 edev->af_cap = pnv_eeh_find_cap(pdn, PCI_CAP_ID_AF); 386 edev->aer_cap = pnv_eeh_find_ecap(pdn, PCI_EXT_CAP_ID_ERR); 387 if ((edev->class_code >> 8) == PCI_CLASS_BRIDGE_PCI) { 388 edev->mode |= EEH_DEV_BRIDGE; 389 if (edev->pcie_cap) { 390 pnv_pci_cfg_read(pdn, edev->pcie_cap + PCI_EXP_FLAGS, 391 2, &pcie_flags); 392 pcie_flags = (pcie_flags & PCI_EXP_FLAGS_TYPE) >> 4; 393 if (pcie_flags == PCI_EXP_TYPE_ROOT_PORT) 394 edev->mode |= EEH_DEV_ROOT_PORT; 395 else if (pcie_flags == PCI_EXP_TYPE_DOWNSTREAM) 396 edev->mode |= EEH_DEV_DS_PORT; 397 } 398 } 399 400 edev->pe_config_addr = phb->ioda.pe_rmap[config_addr]; 401 402 /* Create PE */ 403 ret = eeh_add_to_parent_pe(edev); 404 if (ret) { 405 eeh_edev_warn(edev, "Failed to add device to PE (code %d)\n", ret); 406 return NULL; 407 } 408 409 /* 410 * If the PE contains any one of following adapters, the 411 * PCI config space can't be accessed when dumping EEH log. 412 * Otherwise, we will run into fenced PHB caused by shortage 413 * of outbound credits in the adapter. The PCI config access 414 * should be blocked until PE reset. MMIO access is dropped 415 * by hardware certainly. In order to drop PCI config requests, 416 * one more flag (EEH_PE_CFG_RESTRICTED) is introduced, which 417 * will be checked in the backend for PE state retrival. If 418 * the PE becomes frozen for the first time and the flag has 419 * been set for the PE, we will set EEH_PE_CFG_BLOCKED for 420 * that PE to block its config space. 421 * 422 * Broadcom BCM5718 2-ports NICs (14e4:1656) 423 * Broadcom Austin 4-ports NICs (14e4:1657) 424 * Broadcom Shiner 4-ports 1G NICs (14e4:168a) 425 * Broadcom Shiner 2-ports 10G NICs (14e4:168e) 426 */ 427 if ((pdn->vendor_id == PCI_VENDOR_ID_BROADCOM && 428 pdn->device_id == 0x1656) || 429 (pdn->vendor_id == PCI_VENDOR_ID_BROADCOM && 430 pdn->device_id == 0x1657) || 431 (pdn->vendor_id == PCI_VENDOR_ID_BROADCOM && 432 pdn->device_id == 0x168a) || 433 (pdn->vendor_id == PCI_VENDOR_ID_BROADCOM && 434 pdn->device_id == 0x168e)) 435 edev->pe->state |= EEH_PE_CFG_RESTRICTED; 436 437 /* 438 * Cache the PE primary bus, which can't be fetched when 439 * full hotplug is in progress. In that case, all child 440 * PCI devices of the PE are expected to be removed prior 441 * to PE reset. 442 */ 443 if (!(edev->pe->state & EEH_PE_PRI_BUS)) { 444 edev->pe->bus = pci_find_bus(hose->global_number, 445 pdn->busno); 446 if (edev->pe->bus) 447 edev->pe->state |= EEH_PE_PRI_BUS; 448 } 449 450 /* 451 * Enable EEH explicitly so that we will do EEH check 452 * while accessing I/O stuff 453 */ 454 if (!eeh_has_flag(EEH_ENABLED)) { 455 enable_irq(eeh_event_irq); 456 pnv_eeh_enable_phbs(); 457 eeh_add_flag(EEH_ENABLED); 458 } 459 460 /* Save memory bars */ 461 eeh_save_bars(edev); 462 463 eeh_edev_dbg(edev, "EEH enabled on device\n"); 464 465 return edev; 466 } 467 468 /** 469 * pnv_eeh_set_option - Initialize EEH or MMIO/DMA reenable 470 * @pe: EEH PE 471 * @option: operation to be issued 472 * 473 * The function is used to control the EEH functionality globally. 474 * Currently, following options are support according to PAPR: 475 * Enable EEH, Disable EEH, Enable MMIO and Enable DMA 476 */ 477 static int pnv_eeh_set_option(struct eeh_pe *pe, int option) 478 { 479 struct pci_controller *hose = pe->phb; 480 struct pnv_phb *phb = hose->private_data; 481 bool freeze_pe = false; 482 int opt; 483 s64 rc; 484 485 switch (option) { 486 case EEH_OPT_DISABLE: 487 return -EPERM; 488 case EEH_OPT_ENABLE: 489 return 0; 490 case EEH_OPT_THAW_MMIO: 491 opt = OPAL_EEH_ACTION_CLEAR_FREEZE_MMIO; 492 break; 493 case EEH_OPT_THAW_DMA: 494 opt = OPAL_EEH_ACTION_CLEAR_FREEZE_DMA; 495 break; 496 case EEH_OPT_FREEZE_PE: 497 freeze_pe = true; 498 opt = OPAL_EEH_ACTION_SET_FREEZE_ALL; 499 break; 500 default: 501 pr_warn("%s: Invalid option %d\n", __func__, option); 502 return -EINVAL; 503 } 504 505 /* Freeze master and slave PEs if PHB supports compound PEs */ 506 if (freeze_pe) { 507 if (phb->freeze_pe) { 508 phb->freeze_pe(phb, pe->addr); 509 return 0; 510 } 511 512 rc = opal_pci_eeh_freeze_set(phb->opal_id, pe->addr, opt); 513 if (rc != OPAL_SUCCESS) { 514 pr_warn("%s: Failure %lld freezing PHB#%x-PE#%x\n", 515 __func__, rc, phb->hose->global_number, 516 pe->addr); 517 return -EIO; 518 } 519 520 return 0; 521 } 522 523 /* Unfreeze master and slave PEs if PHB supports */ 524 if (phb->unfreeze_pe) 525 return phb->unfreeze_pe(phb, pe->addr, opt); 526 527 rc = opal_pci_eeh_freeze_clear(phb->opal_id, pe->addr, opt); 528 if (rc != OPAL_SUCCESS) { 529 pr_warn("%s: Failure %lld enable %d for PHB#%x-PE#%x\n", 530 __func__, rc, option, phb->hose->global_number, 531 pe->addr); 532 return -EIO; 533 } 534 535 return 0; 536 } 537 538 static void pnv_eeh_get_phb_diag(struct eeh_pe *pe) 539 { 540 struct pnv_phb *phb = pe->phb->private_data; 541 s64 rc; 542 543 rc = opal_pci_get_phb_diag_data2(phb->opal_id, pe->data, 544 phb->diag_data_size); 545 if (rc != OPAL_SUCCESS) 546 pr_warn("%s: Failure %lld getting PHB#%x diag-data\n", 547 __func__, rc, pe->phb->global_number); 548 } 549 550 static int pnv_eeh_get_phb_state(struct eeh_pe *pe) 551 { 552 struct pnv_phb *phb = pe->phb->private_data; 553 u8 fstate = 0; 554 __be16 pcierr = 0; 555 s64 rc; 556 int result = 0; 557 558 rc = opal_pci_eeh_freeze_status(phb->opal_id, 559 pe->addr, 560 &fstate, 561 &pcierr, 562 NULL); 563 if (rc != OPAL_SUCCESS) { 564 pr_warn("%s: Failure %lld getting PHB#%x state\n", 565 __func__, rc, phb->hose->global_number); 566 return EEH_STATE_NOT_SUPPORT; 567 } 568 569 /* 570 * Check PHB state. If the PHB is frozen for the 571 * first time, to dump the PHB diag-data. 572 */ 573 if (be16_to_cpu(pcierr) != OPAL_EEH_PHB_ERROR) { 574 result = (EEH_STATE_MMIO_ACTIVE | 575 EEH_STATE_DMA_ACTIVE | 576 EEH_STATE_MMIO_ENABLED | 577 EEH_STATE_DMA_ENABLED); 578 } else if (!(pe->state & EEH_PE_ISOLATED)) { 579 eeh_pe_mark_isolated(pe); 580 pnv_eeh_get_phb_diag(pe); 581 582 if (eeh_has_flag(EEH_EARLY_DUMP_LOG)) 583 pnv_pci_dump_phb_diag_data(pe->phb, pe->data); 584 } 585 586 return result; 587 } 588 589 static int pnv_eeh_get_pe_state(struct eeh_pe *pe) 590 { 591 struct pnv_phb *phb = pe->phb->private_data; 592 u8 fstate = 0; 593 __be16 pcierr = 0; 594 s64 rc; 595 int result; 596 597 /* 598 * We don't clobber hardware frozen state until PE 599 * reset is completed. In order to keep EEH core 600 * moving forward, we have to return operational 601 * state during PE reset. 602 */ 603 if (pe->state & EEH_PE_RESET) { 604 result = (EEH_STATE_MMIO_ACTIVE | 605 EEH_STATE_DMA_ACTIVE | 606 EEH_STATE_MMIO_ENABLED | 607 EEH_STATE_DMA_ENABLED); 608 return result; 609 } 610 611 /* 612 * Fetch PE state from hardware. If the PHB 613 * supports compound PE, let it handle that. 614 */ 615 if (phb->get_pe_state) { 616 fstate = phb->get_pe_state(phb, pe->addr); 617 } else { 618 rc = opal_pci_eeh_freeze_status(phb->opal_id, 619 pe->addr, 620 &fstate, 621 &pcierr, 622 NULL); 623 if (rc != OPAL_SUCCESS) { 624 pr_warn("%s: Failure %lld getting PHB#%x-PE%x state\n", 625 __func__, rc, phb->hose->global_number, 626 pe->addr); 627 return EEH_STATE_NOT_SUPPORT; 628 } 629 } 630 631 /* Figure out state */ 632 switch (fstate) { 633 case OPAL_EEH_STOPPED_NOT_FROZEN: 634 result = (EEH_STATE_MMIO_ACTIVE | 635 EEH_STATE_DMA_ACTIVE | 636 EEH_STATE_MMIO_ENABLED | 637 EEH_STATE_DMA_ENABLED); 638 break; 639 case OPAL_EEH_STOPPED_MMIO_FREEZE: 640 result = (EEH_STATE_DMA_ACTIVE | 641 EEH_STATE_DMA_ENABLED); 642 break; 643 case OPAL_EEH_STOPPED_DMA_FREEZE: 644 result = (EEH_STATE_MMIO_ACTIVE | 645 EEH_STATE_MMIO_ENABLED); 646 break; 647 case OPAL_EEH_STOPPED_MMIO_DMA_FREEZE: 648 result = 0; 649 break; 650 case OPAL_EEH_STOPPED_RESET: 651 result = EEH_STATE_RESET_ACTIVE; 652 break; 653 case OPAL_EEH_STOPPED_TEMP_UNAVAIL: 654 result = EEH_STATE_UNAVAILABLE; 655 break; 656 case OPAL_EEH_STOPPED_PERM_UNAVAIL: 657 result = EEH_STATE_NOT_SUPPORT; 658 break; 659 default: 660 result = EEH_STATE_NOT_SUPPORT; 661 pr_warn("%s: Invalid PHB#%x-PE#%x state %x\n", 662 __func__, phb->hose->global_number, 663 pe->addr, fstate); 664 } 665 666 /* 667 * If PHB supports compound PE, to freeze all 668 * slave PEs for consistency. 669 * 670 * If the PE is switching to frozen state for the 671 * first time, to dump the PHB diag-data. 672 */ 673 if (!(result & EEH_STATE_NOT_SUPPORT) && 674 !(result & EEH_STATE_UNAVAILABLE) && 675 !(result & EEH_STATE_MMIO_ACTIVE) && 676 !(result & EEH_STATE_DMA_ACTIVE) && 677 !(pe->state & EEH_PE_ISOLATED)) { 678 if (phb->freeze_pe) 679 phb->freeze_pe(phb, pe->addr); 680 681 eeh_pe_mark_isolated(pe); 682 pnv_eeh_get_phb_diag(pe); 683 684 if (eeh_has_flag(EEH_EARLY_DUMP_LOG)) 685 pnv_pci_dump_phb_diag_data(pe->phb, pe->data); 686 } 687 688 return result; 689 } 690 691 /** 692 * pnv_eeh_get_state - Retrieve PE state 693 * @pe: EEH PE 694 * @delay: delay while PE state is temporarily unavailable 695 * 696 * Retrieve the state of the specified PE. For IODA-compitable 697 * platform, it should be retrieved from IODA table. Therefore, 698 * we prefer passing down to hardware implementation to handle 699 * it. 700 */ 701 static int pnv_eeh_get_state(struct eeh_pe *pe, int *delay) 702 { 703 int ret; 704 705 if (pe->type & EEH_PE_PHB) 706 ret = pnv_eeh_get_phb_state(pe); 707 else 708 ret = pnv_eeh_get_pe_state(pe); 709 710 if (!delay) 711 return ret; 712 713 /* 714 * If the PE state is temporarily unavailable, 715 * to inform the EEH core delay for default 716 * period (1 second) 717 */ 718 *delay = 0; 719 if (ret & EEH_STATE_UNAVAILABLE) 720 *delay = 1000; 721 722 return ret; 723 } 724 725 static s64 pnv_eeh_poll(unsigned long id) 726 { 727 s64 rc = OPAL_HARDWARE; 728 729 while (1) { 730 rc = opal_pci_poll(id); 731 if (rc <= 0) 732 break; 733 734 if (system_state < SYSTEM_RUNNING) 735 udelay(1000 * rc); 736 else 737 msleep(rc); 738 } 739 740 return rc; 741 } 742 743 int pnv_eeh_phb_reset(struct pci_controller *hose, int option) 744 { 745 struct pnv_phb *phb = hose->private_data; 746 s64 rc = OPAL_HARDWARE; 747 748 pr_debug("%s: Reset PHB#%x, option=%d\n", 749 __func__, hose->global_number, option); 750 751 /* Issue PHB complete reset request */ 752 if (option == EEH_RESET_FUNDAMENTAL || 753 option == EEH_RESET_HOT) 754 rc = opal_pci_reset(phb->opal_id, 755 OPAL_RESET_PHB_COMPLETE, 756 OPAL_ASSERT_RESET); 757 else if (option == EEH_RESET_DEACTIVATE) 758 rc = opal_pci_reset(phb->opal_id, 759 OPAL_RESET_PHB_COMPLETE, 760 OPAL_DEASSERT_RESET); 761 if (rc < 0) 762 goto out; 763 764 /* 765 * Poll state of the PHB until the request is done 766 * successfully. The PHB reset is usually PHB complete 767 * reset followed by hot reset on root bus. So we also 768 * need the PCI bus settlement delay. 769 */ 770 if (rc > 0) 771 rc = pnv_eeh_poll(phb->opal_id); 772 if (option == EEH_RESET_DEACTIVATE) { 773 if (system_state < SYSTEM_RUNNING) 774 udelay(1000 * EEH_PE_RST_SETTLE_TIME); 775 else 776 msleep(EEH_PE_RST_SETTLE_TIME); 777 } 778 out: 779 if (rc != OPAL_SUCCESS) 780 return -EIO; 781 782 return 0; 783 } 784 785 static int pnv_eeh_root_reset(struct pci_controller *hose, int option) 786 { 787 struct pnv_phb *phb = hose->private_data; 788 s64 rc = OPAL_HARDWARE; 789 790 pr_debug("%s: Reset PHB#%x, option=%d\n", 791 __func__, hose->global_number, option); 792 793 /* 794 * During the reset deassert time, we needn't care 795 * the reset scope because the firmware does nothing 796 * for fundamental or hot reset during deassert phase. 797 */ 798 if (option == EEH_RESET_FUNDAMENTAL) 799 rc = opal_pci_reset(phb->opal_id, 800 OPAL_RESET_PCI_FUNDAMENTAL, 801 OPAL_ASSERT_RESET); 802 else if (option == EEH_RESET_HOT) 803 rc = opal_pci_reset(phb->opal_id, 804 OPAL_RESET_PCI_HOT, 805 OPAL_ASSERT_RESET); 806 else if (option == EEH_RESET_DEACTIVATE) 807 rc = opal_pci_reset(phb->opal_id, 808 OPAL_RESET_PCI_HOT, 809 OPAL_DEASSERT_RESET); 810 if (rc < 0) 811 goto out; 812 813 /* Poll state of the PHB until the request is done */ 814 if (rc > 0) 815 rc = pnv_eeh_poll(phb->opal_id); 816 if (option == EEH_RESET_DEACTIVATE) 817 msleep(EEH_PE_RST_SETTLE_TIME); 818 out: 819 if (rc != OPAL_SUCCESS) 820 return -EIO; 821 822 return 0; 823 } 824 825 static int __pnv_eeh_bridge_reset(struct pci_dev *dev, int option) 826 { 827 struct pci_dn *pdn = pci_get_pdn_by_devfn(dev->bus, dev->devfn); 828 struct eeh_dev *edev = pdn_to_eeh_dev(pdn); 829 int aer = edev ? edev->aer_cap : 0; 830 u32 ctrl; 831 832 pr_debug("%s: Secondary Reset PCI bus %04x:%02x with option %d\n", 833 __func__, pci_domain_nr(dev->bus), 834 dev->bus->number, option); 835 836 switch (option) { 837 case EEH_RESET_FUNDAMENTAL: 838 case EEH_RESET_HOT: 839 /* Don't report linkDown event */ 840 if (aer) { 841 eeh_ops->read_config(edev, aer + PCI_ERR_UNCOR_MASK, 842 4, &ctrl); 843 ctrl |= PCI_ERR_UNC_SURPDN; 844 eeh_ops->write_config(edev, aer + PCI_ERR_UNCOR_MASK, 845 4, ctrl); 846 } 847 848 eeh_ops->read_config(edev, PCI_BRIDGE_CONTROL, 2, &ctrl); 849 ctrl |= PCI_BRIDGE_CTL_BUS_RESET; 850 eeh_ops->write_config(edev, PCI_BRIDGE_CONTROL, 2, ctrl); 851 852 msleep(EEH_PE_RST_HOLD_TIME); 853 break; 854 case EEH_RESET_DEACTIVATE: 855 eeh_ops->read_config(edev, PCI_BRIDGE_CONTROL, 2, &ctrl); 856 ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET; 857 eeh_ops->write_config(edev, PCI_BRIDGE_CONTROL, 2, ctrl); 858 859 msleep(EEH_PE_RST_SETTLE_TIME); 860 861 /* Continue reporting linkDown event */ 862 if (aer) { 863 eeh_ops->read_config(edev, aer + PCI_ERR_UNCOR_MASK, 864 4, &ctrl); 865 ctrl &= ~PCI_ERR_UNC_SURPDN; 866 eeh_ops->write_config(edev, aer + PCI_ERR_UNCOR_MASK, 867 4, ctrl); 868 } 869 870 break; 871 } 872 873 return 0; 874 } 875 876 static int pnv_eeh_bridge_reset(struct pci_dev *pdev, int option) 877 { 878 struct pci_controller *hose = pci_bus_to_host(pdev->bus); 879 struct pnv_phb *phb = hose->private_data; 880 struct device_node *dn = pci_device_to_OF_node(pdev); 881 uint64_t id = PCI_SLOT_ID(phb->opal_id, 882 (pdev->bus->number << 8) | pdev->devfn); 883 uint8_t scope; 884 int64_t rc; 885 886 /* Hot reset to the bus if firmware cannot handle */ 887 if (!dn || !of_get_property(dn, "ibm,reset-by-firmware", NULL)) 888 return __pnv_eeh_bridge_reset(pdev, option); 889 890 pr_debug("%s: FW reset PCI bus %04x:%02x with option %d\n", 891 __func__, pci_domain_nr(pdev->bus), 892 pdev->bus->number, option); 893 894 switch (option) { 895 case EEH_RESET_FUNDAMENTAL: 896 scope = OPAL_RESET_PCI_FUNDAMENTAL; 897 break; 898 case EEH_RESET_HOT: 899 scope = OPAL_RESET_PCI_HOT; 900 break; 901 case EEH_RESET_DEACTIVATE: 902 return 0; 903 default: 904 dev_dbg(&pdev->dev, "%s: Unsupported reset %d\n", 905 __func__, option); 906 return -EINVAL; 907 } 908 909 rc = opal_pci_reset(id, scope, OPAL_ASSERT_RESET); 910 if (rc <= OPAL_SUCCESS) 911 goto out; 912 913 rc = pnv_eeh_poll(id); 914 out: 915 return (rc == OPAL_SUCCESS) ? 0 : -EIO; 916 } 917 918 void pnv_pci_reset_secondary_bus(struct pci_dev *dev) 919 { 920 struct pci_controller *hose; 921 922 if (pci_is_root_bus(dev->bus)) { 923 hose = pci_bus_to_host(dev->bus); 924 pnv_eeh_root_reset(hose, EEH_RESET_HOT); 925 pnv_eeh_root_reset(hose, EEH_RESET_DEACTIVATE); 926 } else { 927 pnv_eeh_bridge_reset(dev, EEH_RESET_HOT); 928 pnv_eeh_bridge_reset(dev, EEH_RESET_DEACTIVATE); 929 } 930 } 931 932 static void pnv_eeh_wait_for_pending(struct pci_dn *pdn, const char *type, 933 int pos, u16 mask) 934 { 935 struct eeh_dev *edev = pdn->edev; 936 int i, status = 0; 937 938 /* Wait for Transaction Pending bit to be cleared */ 939 for (i = 0; i < 4; i++) { 940 eeh_ops->read_config(edev, pos, 2, &status); 941 if (!(status & mask)) 942 return; 943 944 msleep((1 << i) * 100); 945 } 946 947 pr_warn("%s: Pending transaction while issuing %sFLR to %04x:%02x:%02x.%01x\n", 948 __func__, type, 949 pdn->phb->global_number, pdn->busno, 950 PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn)); 951 } 952 953 static int pnv_eeh_do_flr(struct pci_dn *pdn, int option) 954 { 955 struct eeh_dev *edev = pdn_to_eeh_dev(pdn); 956 u32 reg = 0; 957 958 if (WARN_ON(!edev->pcie_cap)) 959 return -ENOTTY; 960 961 eeh_ops->read_config(edev, edev->pcie_cap + PCI_EXP_DEVCAP, 4, ®); 962 if (!(reg & PCI_EXP_DEVCAP_FLR)) 963 return -ENOTTY; 964 965 switch (option) { 966 case EEH_RESET_HOT: 967 case EEH_RESET_FUNDAMENTAL: 968 pnv_eeh_wait_for_pending(pdn, "", 969 edev->pcie_cap + PCI_EXP_DEVSTA, 970 PCI_EXP_DEVSTA_TRPND); 971 eeh_ops->read_config(edev, edev->pcie_cap + PCI_EXP_DEVCTL, 972 4, ®); 973 reg |= PCI_EXP_DEVCTL_BCR_FLR; 974 eeh_ops->write_config(edev, edev->pcie_cap + PCI_EXP_DEVCTL, 975 4, reg); 976 msleep(EEH_PE_RST_HOLD_TIME); 977 break; 978 case EEH_RESET_DEACTIVATE: 979 eeh_ops->read_config(edev, edev->pcie_cap + PCI_EXP_DEVCTL, 980 4, ®); 981 reg &= ~PCI_EXP_DEVCTL_BCR_FLR; 982 eeh_ops->write_config(edev, edev->pcie_cap + PCI_EXP_DEVCTL, 983 4, reg); 984 msleep(EEH_PE_RST_SETTLE_TIME); 985 break; 986 } 987 988 return 0; 989 } 990 991 static int pnv_eeh_do_af_flr(struct pci_dn *pdn, int option) 992 { 993 struct eeh_dev *edev = pdn_to_eeh_dev(pdn); 994 u32 cap = 0; 995 996 if (WARN_ON(!edev->af_cap)) 997 return -ENOTTY; 998 999 eeh_ops->read_config(edev, edev->af_cap + PCI_AF_CAP, 1, &cap); 1000 if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR)) 1001 return -ENOTTY; 1002 1003 switch (option) { 1004 case EEH_RESET_HOT: 1005 case EEH_RESET_FUNDAMENTAL: 1006 /* 1007 * Wait for Transaction Pending bit to clear. A word-aligned 1008 * test is used, so we use the conrol offset rather than status 1009 * and shift the test bit to match. 1010 */ 1011 pnv_eeh_wait_for_pending(pdn, "AF", 1012 edev->af_cap + PCI_AF_CTRL, 1013 PCI_AF_STATUS_TP << 8); 1014 eeh_ops->write_config(edev, edev->af_cap + PCI_AF_CTRL, 1015 1, PCI_AF_CTRL_FLR); 1016 msleep(EEH_PE_RST_HOLD_TIME); 1017 break; 1018 case EEH_RESET_DEACTIVATE: 1019 eeh_ops->write_config(edev, edev->af_cap + PCI_AF_CTRL, 1, 0); 1020 msleep(EEH_PE_RST_SETTLE_TIME); 1021 break; 1022 } 1023 1024 return 0; 1025 } 1026 1027 static int pnv_eeh_reset_vf_pe(struct eeh_pe *pe, int option) 1028 { 1029 struct eeh_dev *edev; 1030 struct pci_dn *pdn; 1031 int ret; 1032 1033 /* The VF PE should have only one child device */ 1034 edev = list_first_entry_or_null(&pe->edevs, struct eeh_dev, entry); 1035 pdn = eeh_dev_to_pdn(edev); 1036 if (!pdn) 1037 return -ENXIO; 1038 1039 ret = pnv_eeh_do_flr(pdn, option); 1040 if (!ret) 1041 return ret; 1042 1043 return pnv_eeh_do_af_flr(pdn, option); 1044 } 1045 1046 /** 1047 * pnv_eeh_reset - Reset the specified PE 1048 * @pe: EEH PE 1049 * @option: reset option 1050 * 1051 * Do reset on the indicated PE. For PCI bus sensitive PE, 1052 * we need to reset the parent p2p bridge. The PHB has to 1053 * be reinitialized if the p2p bridge is root bridge. For 1054 * PCI device sensitive PE, we will try to reset the device 1055 * through FLR. For now, we don't have OPAL APIs to do HARD 1056 * reset yet, so all reset would be SOFT (HOT) reset. 1057 */ 1058 static int pnv_eeh_reset(struct eeh_pe *pe, int option) 1059 { 1060 struct pci_controller *hose = pe->phb; 1061 struct pnv_phb *phb; 1062 struct pci_bus *bus; 1063 int64_t rc; 1064 1065 /* 1066 * For PHB reset, we always have complete reset. For those PEs whose 1067 * primary bus derived from root complex (root bus) or root port 1068 * (usually bus#1), we apply hot or fundamental reset on the root port. 1069 * For other PEs, we always have hot reset on the PE primary bus. 1070 * 1071 * Here, we have different design to pHyp, which always clear the 1072 * frozen state during PE reset. However, the good idea here from 1073 * benh is to keep frozen state before we get PE reset done completely 1074 * (until BAR restore). With the frozen state, HW drops illegal IO 1075 * or MMIO access, which can incur recrusive frozen PE during PE 1076 * reset. The side effect is that EEH core has to clear the frozen 1077 * state explicitly after BAR restore. 1078 */ 1079 if (pe->type & EEH_PE_PHB) 1080 return pnv_eeh_phb_reset(hose, option); 1081 1082 /* 1083 * The frozen PE might be caused by PAPR error injection 1084 * registers, which are expected to be cleared after hitting 1085 * frozen PE as stated in the hardware spec. Unfortunately, 1086 * that's not true on P7IOC. So we have to clear it manually 1087 * to avoid recursive EEH errors during recovery. 1088 */ 1089 phb = hose->private_data; 1090 if (phb->model == PNV_PHB_MODEL_P7IOC && 1091 (option == EEH_RESET_HOT || 1092 option == EEH_RESET_FUNDAMENTAL)) { 1093 rc = opal_pci_reset(phb->opal_id, 1094 OPAL_RESET_PHB_ERROR, 1095 OPAL_ASSERT_RESET); 1096 if (rc != OPAL_SUCCESS) { 1097 pr_warn("%s: Failure %lld clearing error injection registers\n", 1098 __func__, rc); 1099 return -EIO; 1100 } 1101 } 1102 1103 if (pe->type & EEH_PE_VF) 1104 return pnv_eeh_reset_vf_pe(pe, option); 1105 1106 bus = eeh_pe_bus_get(pe); 1107 if (!bus) { 1108 pr_err("%s: Cannot find PCI bus for PHB#%x-PE#%x\n", 1109 __func__, pe->phb->global_number, pe->addr); 1110 return -EIO; 1111 } 1112 1113 if (pci_is_root_bus(bus)) 1114 return pnv_eeh_root_reset(hose, option); 1115 1116 /* 1117 * For hot resets try use the generic PCI error recovery reset 1118 * functions. These correctly handles the case where the secondary 1119 * bus is behind a hotplug slot and it will use the slot provided 1120 * reset methods to prevent spurious hotplug events during the reset. 1121 * 1122 * Fundemental resets need to be handled internally to EEH since the 1123 * PCI core doesn't really have a concept of a fundemental reset, 1124 * mainly because there's no standard way to generate one. Only a 1125 * few devices require an FRESET so it should be fine. 1126 */ 1127 if (option != EEH_RESET_FUNDAMENTAL) { 1128 /* 1129 * NB: Skiboot and pnv_eeh_bridge_reset() also no-op the 1130 * de-assert step. It's like the OPAL reset API was 1131 * poorly designed or something... 1132 */ 1133 if (option == EEH_RESET_DEACTIVATE) 1134 return 0; 1135 1136 rc = pci_bus_error_reset(bus->self); 1137 if (!rc) 1138 return 0; 1139 } 1140 1141 /* otherwise, use the generic bridge reset. this might call into FW */ 1142 if (pci_is_root_bus(bus->parent)) 1143 return pnv_eeh_root_reset(hose, option); 1144 return pnv_eeh_bridge_reset(bus->self, option); 1145 } 1146 1147 /** 1148 * pnv_eeh_get_log - Retrieve error log 1149 * @pe: EEH PE 1150 * @severity: temporary or permanent error log 1151 * @drv_log: driver log to be combined with retrieved error log 1152 * @len: length of driver log 1153 * 1154 * Retrieve the temporary or permanent error from the PE. 1155 */ 1156 static int pnv_eeh_get_log(struct eeh_pe *pe, int severity, 1157 char *drv_log, unsigned long len) 1158 { 1159 if (!eeh_has_flag(EEH_EARLY_DUMP_LOG)) 1160 pnv_pci_dump_phb_diag_data(pe->phb, pe->data); 1161 1162 return 0; 1163 } 1164 1165 /** 1166 * pnv_eeh_configure_bridge - Configure PCI bridges in the indicated PE 1167 * @pe: EEH PE 1168 * 1169 * The function will be called to reconfigure the bridges included 1170 * in the specified PE so that the mulfunctional PE would be recovered 1171 * again. 1172 */ 1173 static int pnv_eeh_configure_bridge(struct eeh_pe *pe) 1174 { 1175 return 0; 1176 } 1177 1178 /** 1179 * pnv_pe_err_inject - Inject specified error to the indicated PE 1180 * @pe: the indicated PE 1181 * @type: error type 1182 * @func: specific error type 1183 * @addr: address 1184 * @mask: address mask 1185 * 1186 * The routine is called to inject specified error, which is 1187 * determined by @type and @func, to the indicated PE for 1188 * testing purpose. 1189 */ 1190 static int pnv_eeh_err_inject(struct eeh_pe *pe, int type, int func, 1191 unsigned long addr, unsigned long mask) 1192 { 1193 struct pci_controller *hose = pe->phb; 1194 struct pnv_phb *phb = hose->private_data; 1195 s64 rc; 1196 1197 if (type != OPAL_ERR_INJECT_TYPE_IOA_BUS_ERR && 1198 type != OPAL_ERR_INJECT_TYPE_IOA_BUS_ERR64) { 1199 pr_warn("%s: Invalid error type %d\n", 1200 __func__, type); 1201 return -ERANGE; 1202 } 1203 1204 if (func < OPAL_ERR_INJECT_FUNC_IOA_LD_MEM_ADDR || 1205 func > OPAL_ERR_INJECT_FUNC_IOA_DMA_WR_TARGET) { 1206 pr_warn("%s: Invalid error function %d\n", 1207 __func__, func); 1208 return -ERANGE; 1209 } 1210 1211 /* Firmware supports error injection ? */ 1212 if (!opal_check_token(OPAL_PCI_ERR_INJECT)) { 1213 pr_warn("%s: Firmware doesn't support error injection\n", 1214 __func__); 1215 return -ENXIO; 1216 } 1217 1218 /* Do error injection */ 1219 rc = opal_pci_err_inject(phb->opal_id, pe->addr, 1220 type, func, addr, mask); 1221 if (rc != OPAL_SUCCESS) { 1222 pr_warn("%s: Failure %lld injecting error " 1223 "%d-%d to PHB#%x-PE#%x\n", 1224 __func__, rc, type, func, 1225 hose->global_number, pe->addr); 1226 return -EIO; 1227 } 1228 1229 return 0; 1230 } 1231 1232 static inline bool pnv_eeh_cfg_blocked(struct pci_dn *pdn) 1233 { 1234 struct eeh_dev *edev = pdn_to_eeh_dev(pdn); 1235 1236 if (!edev || !edev->pe) 1237 return false; 1238 1239 /* 1240 * We will issue FLR or AF FLR to all VFs, which are contained 1241 * in VF PE. It relies on the EEH PCI config accessors. So we 1242 * can't block them during the window. 1243 */ 1244 if (edev->physfn && (edev->pe->state & EEH_PE_RESET)) 1245 return false; 1246 1247 if (edev->pe->state & EEH_PE_CFG_BLOCKED) 1248 return true; 1249 1250 return false; 1251 } 1252 1253 static int pnv_eeh_read_config(struct eeh_dev *edev, 1254 int where, int size, u32 *val) 1255 { 1256 struct pci_dn *pdn = eeh_dev_to_pdn(edev); 1257 1258 if (!pdn) 1259 return PCIBIOS_DEVICE_NOT_FOUND; 1260 1261 if (pnv_eeh_cfg_blocked(pdn)) { 1262 *val = 0xFFFFFFFF; 1263 return PCIBIOS_SET_FAILED; 1264 } 1265 1266 return pnv_pci_cfg_read(pdn, where, size, val); 1267 } 1268 1269 static int pnv_eeh_write_config(struct eeh_dev *edev, 1270 int where, int size, u32 val) 1271 { 1272 struct pci_dn *pdn = eeh_dev_to_pdn(edev); 1273 1274 if (!pdn) 1275 return PCIBIOS_DEVICE_NOT_FOUND; 1276 1277 if (pnv_eeh_cfg_blocked(pdn)) 1278 return PCIBIOS_SET_FAILED; 1279 1280 return pnv_pci_cfg_write(pdn, where, size, val); 1281 } 1282 1283 static void pnv_eeh_dump_hub_diag_common(struct OpalIoP7IOCErrorData *data) 1284 { 1285 /* GEM */ 1286 if (data->gemXfir || data->gemRfir || 1287 data->gemRirqfir || data->gemMask || data->gemRwof) 1288 pr_info(" GEM: %016llx %016llx %016llx %016llx %016llx\n", 1289 be64_to_cpu(data->gemXfir), 1290 be64_to_cpu(data->gemRfir), 1291 be64_to_cpu(data->gemRirqfir), 1292 be64_to_cpu(data->gemMask), 1293 be64_to_cpu(data->gemRwof)); 1294 1295 /* LEM */ 1296 if (data->lemFir || data->lemErrMask || 1297 data->lemAction0 || data->lemAction1 || data->lemWof) 1298 pr_info(" LEM: %016llx %016llx %016llx %016llx %016llx\n", 1299 be64_to_cpu(data->lemFir), 1300 be64_to_cpu(data->lemErrMask), 1301 be64_to_cpu(data->lemAction0), 1302 be64_to_cpu(data->lemAction1), 1303 be64_to_cpu(data->lemWof)); 1304 } 1305 1306 static void pnv_eeh_get_and_dump_hub_diag(struct pci_controller *hose) 1307 { 1308 struct pnv_phb *phb = hose->private_data; 1309 struct OpalIoP7IOCErrorData *data = 1310 (struct OpalIoP7IOCErrorData*)phb->diag_data; 1311 long rc; 1312 1313 rc = opal_pci_get_hub_diag_data(phb->hub_id, data, sizeof(*data)); 1314 if (rc != OPAL_SUCCESS) { 1315 pr_warn("%s: Failed to get HUB#%llx diag-data (%ld)\n", 1316 __func__, phb->hub_id, rc); 1317 return; 1318 } 1319 1320 switch (be16_to_cpu(data->type)) { 1321 case OPAL_P7IOC_DIAG_TYPE_RGC: 1322 pr_info("P7IOC diag-data for RGC\n\n"); 1323 pnv_eeh_dump_hub_diag_common(data); 1324 if (data->rgc.rgcStatus || data->rgc.rgcLdcp) 1325 pr_info(" RGC: %016llx %016llx\n", 1326 be64_to_cpu(data->rgc.rgcStatus), 1327 be64_to_cpu(data->rgc.rgcLdcp)); 1328 break; 1329 case OPAL_P7IOC_DIAG_TYPE_BI: 1330 pr_info("P7IOC diag-data for BI %s\n\n", 1331 data->bi.biDownbound ? "Downbound" : "Upbound"); 1332 pnv_eeh_dump_hub_diag_common(data); 1333 if (data->bi.biLdcp0 || data->bi.biLdcp1 || 1334 data->bi.biLdcp2 || data->bi.biFenceStatus) 1335 pr_info(" BI: %016llx %016llx %016llx %016llx\n", 1336 be64_to_cpu(data->bi.biLdcp0), 1337 be64_to_cpu(data->bi.biLdcp1), 1338 be64_to_cpu(data->bi.biLdcp2), 1339 be64_to_cpu(data->bi.biFenceStatus)); 1340 break; 1341 case OPAL_P7IOC_DIAG_TYPE_CI: 1342 pr_info("P7IOC diag-data for CI Port %d\n\n", 1343 data->ci.ciPort); 1344 pnv_eeh_dump_hub_diag_common(data); 1345 if (data->ci.ciPortStatus || data->ci.ciPortLdcp) 1346 pr_info(" CI: %016llx %016llx\n", 1347 be64_to_cpu(data->ci.ciPortStatus), 1348 be64_to_cpu(data->ci.ciPortLdcp)); 1349 break; 1350 case OPAL_P7IOC_DIAG_TYPE_MISC: 1351 pr_info("P7IOC diag-data for MISC\n\n"); 1352 pnv_eeh_dump_hub_diag_common(data); 1353 break; 1354 case OPAL_P7IOC_DIAG_TYPE_I2C: 1355 pr_info("P7IOC diag-data for I2C\n\n"); 1356 pnv_eeh_dump_hub_diag_common(data); 1357 break; 1358 default: 1359 pr_warn("%s: Invalid type of HUB#%llx diag-data (%d)\n", 1360 __func__, phb->hub_id, data->type); 1361 } 1362 } 1363 1364 static int pnv_eeh_get_pe(struct pci_controller *hose, 1365 u16 pe_no, struct eeh_pe **pe) 1366 { 1367 struct pnv_phb *phb = hose->private_data; 1368 struct pnv_ioda_pe *pnv_pe; 1369 struct eeh_pe *dev_pe; 1370 1371 /* 1372 * If PHB supports compound PE, to fetch 1373 * the master PE because slave PE is invisible 1374 * to EEH core. 1375 */ 1376 pnv_pe = &phb->ioda.pe_array[pe_no]; 1377 if (pnv_pe->flags & PNV_IODA_PE_SLAVE) { 1378 pnv_pe = pnv_pe->master; 1379 WARN_ON(!pnv_pe || 1380 !(pnv_pe->flags & PNV_IODA_PE_MASTER)); 1381 pe_no = pnv_pe->pe_number; 1382 } 1383 1384 /* Find the PE according to PE# */ 1385 dev_pe = eeh_pe_get(hose, pe_no, 0); 1386 if (!dev_pe) 1387 return -EEXIST; 1388 1389 /* Freeze the (compound) PE */ 1390 *pe = dev_pe; 1391 if (!(dev_pe->state & EEH_PE_ISOLATED)) 1392 phb->freeze_pe(phb, pe_no); 1393 1394 /* 1395 * At this point, we're sure the (compound) PE should 1396 * have been frozen. However, we still need poke until 1397 * hitting the frozen PE on top level. 1398 */ 1399 dev_pe = dev_pe->parent; 1400 while (dev_pe && !(dev_pe->type & EEH_PE_PHB)) { 1401 int ret; 1402 ret = eeh_ops->get_state(dev_pe, NULL); 1403 if (ret <= 0 || eeh_state_active(ret)) { 1404 dev_pe = dev_pe->parent; 1405 continue; 1406 } 1407 1408 /* Frozen parent PE */ 1409 *pe = dev_pe; 1410 if (!(dev_pe->state & EEH_PE_ISOLATED)) 1411 phb->freeze_pe(phb, dev_pe->addr); 1412 1413 /* Next one */ 1414 dev_pe = dev_pe->parent; 1415 } 1416 1417 return 0; 1418 } 1419 1420 /** 1421 * pnv_eeh_next_error - Retrieve next EEH error to handle 1422 * @pe: Affected PE 1423 * 1424 * The function is expected to be called by EEH core while it gets 1425 * special EEH event (without binding PE). The function calls to 1426 * OPAL APIs for next error to handle. The informational error is 1427 * handled internally by platform. However, the dead IOC, dead PHB, 1428 * fenced PHB and frozen PE should be handled by EEH core eventually. 1429 */ 1430 static int pnv_eeh_next_error(struct eeh_pe **pe) 1431 { 1432 struct pci_controller *hose; 1433 struct pnv_phb *phb; 1434 struct eeh_pe *phb_pe, *parent_pe; 1435 __be64 frozen_pe_no; 1436 __be16 err_type, severity; 1437 long rc; 1438 int state, ret = EEH_NEXT_ERR_NONE; 1439 1440 /* 1441 * While running here, it's safe to purge the event queue. The 1442 * event should still be masked. 1443 */ 1444 eeh_remove_event(NULL, false); 1445 1446 list_for_each_entry(hose, &hose_list, list_node) { 1447 /* 1448 * If the subordinate PCI buses of the PHB has been 1449 * removed or is exactly under error recovery, we 1450 * needn't take care of it any more. 1451 */ 1452 phb = hose->private_data; 1453 phb_pe = eeh_phb_pe_get(hose); 1454 if (!phb_pe || (phb_pe->state & EEH_PE_ISOLATED)) 1455 continue; 1456 1457 rc = opal_pci_next_error(phb->opal_id, 1458 &frozen_pe_no, &err_type, &severity); 1459 if (rc != OPAL_SUCCESS) { 1460 pr_devel("%s: Invalid return value on " 1461 "PHB#%x (0x%lx) from opal_pci_next_error", 1462 __func__, hose->global_number, rc); 1463 continue; 1464 } 1465 1466 /* If the PHB doesn't have error, stop processing */ 1467 if (be16_to_cpu(err_type) == OPAL_EEH_NO_ERROR || 1468 be16_to_cpu(severity) == OPAL_EEH_SEV_NO_ERROR) { 1469 pr_devel("%s: No error found on PHB#%x\n", 1470 __func__, hose->global_number); 1471 continue; 1472 } 1473 1474 /* 1475 * Processing the error. We're expecting the error with 1476 * highest priority reported upon multiple errors on the 1477 * specific PHB. 1478 */ 1479 pr_devel("%s: Error (%d, %d, %llu) on PHB#%x\n", 1480 __func__, be16_to_cpu(err_type), 1481 be16_to_cpu(severity), be64_to_cpu(frozen_pe_no), 1482 hose->global_number); 1483 switch (be16_to_cpu(err_type)) { 1484 case OPAL_EEH_IOC_ERROR: 1485 if (be16_to_cpu(severity) == OPAL_EEH_SEV_IOC_DEAD) { 1486 pr_err("EEH: dead IOC detected\n"); 1487 ret = EEH_NEXT_ERR_DEAD_IOC; 1488 } else if (be16_to_cpu(severity) == OPAL_EEH_SEV_INF) { 1489 pr_info("EEH: IOC informative error " 1490 "detected\n"); 1491 pnv_eeh_get_and_dump_hub_diag(hose); 1492 ret = EEH_NEXT_ERR_NONE; 1493 } 1494 1495 break; 1496 case OPAL_EEH_PHB_ERROR: 1497 if (be16_to_cpu(severity) == OPAL_EEH_SEV_PHB_DEAD) { 1498 *pe = phb_pe; 1499 pr_err("EEH: dead PHB#%x detected, " 1500 "location: %s\n", 1501 hose->global_number, 1502 eeh_pe_loc_get(phb_pe)); 1503 ret = EEH_NEXT_ERR_DEAD_PHB; 1504 } else if (be16_to_cpu(severity) == 1505 OPAL_EEH_SEV_PHB_FENCED) { 1506 *pe = phb_pe; 1507 pr_err("EEH: Fenced PHB#%x detected, " 1508 "location: %s\n", 1509 hose->global_number, 1510 eeh_pe_loc_get(phb_pe)); 1511 ret = EEH_NEXT_ERR_FENCED_PHB; 1512 } else if (be16_to_cpu(severity) == OPAL_EEH_SEV_INF) { 1513 pr_info("EEH: PHB#%x informative error " 1514 "detected, location: %s\n", 1515 hose->global_number, 1516 eeh_pe_loc_get(phb_pe)); 1517 pnv_eeh_get_phb_diag(phb_pe); 1518 pnv_pci_dump_phb_diag_data(hose, phb_pe->data); 1519 ret = EEH_NEXT_ERR_NONE; 1520 } 1521 1522 break; 1523 case OPAL_EEH_PE_ERROR: 1524 /* 1525 * If we can't find the corresponding PE, we 1526 * just try to unfreeze. 1527 */ 1528 if (pnv_eeh_get_pe(hose, 1529 be64_to_cpu(frozen_pe_no), pe)) { 1530 pr_info("EEH: Clear non-existing PHB#%x-PE#%llx\n", 1531 hose->global_number, be64_to_cpu(frozen_pe_no)); 1532 pr_info("EEH: PHB location: %s\n", 1533 eeh_pe_loc_get(phb_pe)); 1534 1535 /* Dump PHB diag-data */ 1536 rc = opal_pci_get_phb_diag_data2(phb->opal_id, 1537 phb->diag_data, phb->diag_data_size); 1538 if (rc == OPAL_SUCCESS) 1539 pnv_pci_dump_phb_diag_data(hose, 1540 phb->diag_data); 1541 1542 /* Try best to clear it */ 1543 opal_pci_eeh_freeze_clear(phb->opal_id, 1544 be64_to_cpu(frozen_pe_no), 1545 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL); 1546 ret = EEH_NEXT_ERR_NONE; 1547 } else if ((*pe)->state & EEH_PE_ISOLATED || 1548 eeh_pe_passed(*pe)) { 1549 ret = EEH_NEXT_ERR_NONE; 1550 } else { 1551 pr_err("EEH: Frozen PE#%x " 1552 "on PHB#%x detected\n", 1553 (*pe)->addr, 1554 (*pe)->phb->global_number); 1555 pr_err("EEH: PE location: %s, " 1556 "PHB location: %s\n", 1557 eeh_pe_loc_get(*pe), 1558 eeh_pe_loc_get(phb_pe)); 1559 ret = EEH_NEXT_ERR_FROZEN_PE; 1560 } 1561 1562 break; 1563 default: 1564 pr_warn("%s: Unexpected error type %d\n", 1565 __func__, be16_to_cpu(err_type)); 1566 } 1567 1568 /* 1569 * EEH core will try recover from fenced PHB or 1570 * frozen PE. In the time for frozen PE, EEH core 1571 * enable IO path for that before collecting logs, 1572 * but it ruins the site. So we have to dump the 1573 * log in advance here. 1574 */ 1575 if ((ret == EEH_NEXT_ERR_FROZEN_PE || 1576 ret == EEH_NEXT_ERR_FENCED_PHB) && 1577 !((*pe)->state & EEH_PE_ISOLATED)) { 1578 eeh_pe_mark_isolated(*pe); 1579 pnv_eeh_get_phb_diag(*pe); 1580 1581 if (eeh_has_flag(EEH_EARLY_DUMP_LOG)) 1582 pnv_pci_dump_phb_diag_data((*pe)->phb, 1583 (*pe)->data); 1584 } 1585 1586 /* 1587 * We probably have the frozen parent PE out there and 1588 * we need have to handle frozen parent PE firstly. 1589 */ 1590 if (ret == EEH_NEXT_ERR_FROZEN_PE) { 1591 parent_pe = (*pe)->parent; 1592 while (parent_pe) { 1593 /* Hit the ceiling ? */ 1594 if (parent_pe->type & EEH_PE_PHB) 1595 break; 1596 1597 /* Frozen parent PE ? */ 1598 state = eeh_ops->get_state(parent_pe, NULL); 1599 if (state > 0 && !eeh_state_active(state)) 1600 *pe = parent_pe; 1601 1602 /* Next parent level */ 1603 parent_pe = parent_pe->parent; 1604 } 1605 1606 /* We possibly migrate to another PE */ 1607 eeh_pe_mark_isolated(*pe); 1608 } 1609 1610 /* 1611 * If we have no errors on the specific PHB or only 1612 * informative error there, we continue poking it. 1613 * Otherwise, we need actions to be taken by upper 1614 * layer. 1615 */ 1616 if (ret > EEH_NEXT_ERR_INF) 1617 break; 1618 } 1619 1620 /* Unmask the event */ 1621 if (ret == EEH_NEXT_ERR_NONE && eeh_enabled()) 1622 enable_irq(eeh_event_irq); 1623 1624 return ret; 1625 } 1626 1627 static int pnv_eeh_restore_config(struct eeh_dev *edev) 1628 { 1629 struct pnv_phb *phb; 1630 s64 ret = 0; 1631 1632 if (!edev) 1633 return -EEXIST; 1634 1635 if (edev->physfn) 1636 return 0; 1637 1638 phb = edev->controller->private_data; 1639 ret = opal_pci_reinit(phb->opal_id, 1640 OPAL_REINIT_PCI_DEV, edev->bdfn); 1641 1642 if (ret) { 1643 pr_warn("%s: Can't reinit PCI dev 0x%x (%lld)\n", 1644 __func__, edev->bdfn, ret); 1645 return -EIO; 1646 } 1647 1648 return ret; 1649 } 1650 1651 static struct eeh_ops pnv_eeh_ops = { 1652 .name = "powernv", 1653 .init = pnv_eeh_init, 1654 .probe = pnv_eeh_probe, 1655 .set_option = pnv_eeh_set_option, 1656 .get_state = pnv_eeh_get_state, 1657 .reset = pnv_eeh_reset, 1658 .get_log = pnv_eeh_get_log, 1659 .configure_bridge = pnv_eeh_configure_bridge, 1660 .err_inject = pnv_eeh_err_inject, 1661 .read_config = pnv_eeh_read_config, 1662 .write_config = pnv_eeh_write_config, 1663 .next_error = pnv_eeh_next_error, 1664 .restore_config = pnv_eeh_restore_config, 1665 .notify_resume = NULL 1666 }; 1667 1668 #ifdef CONFIG_PCI_IOV 1669 static void pnv_pci_fixup_vf_mps(struct pci_dev *pdev) 1670 { 1671 struct pci_dn *pdn = pci_get_pdn(pdev); 1672 int parent_mps; 1673 1674 if (!pdev->is_virtfn) 1675 return; 1676 1677 /* Synchronize MPS for VF and PF */ 1678 parent_mps = pcie_get_mps(pdev->physfn); 1679 if ((128 << pdev->pcie_mpss) >= parent_mps) 1680 pcie_set_mps(pdev, parent_mps); 1681 pdn->mps = pcie_get_mps(pdev); 1682 } 1683 DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pnv_pci_fixup_vf_mps); 1684 #endif /* CONFIG_PCI_IOV */ 1685 1686 /** 1687 * eeh_powernv_init - Register platform dependent EEH operations 1688 * 1689 * EEH initialization on powernv platform. This function should be 1690 * called before any EEH related functions. 1691 */ 1692 static int __init eeh_powernv_init(void) 1693 { 1694 int ret = -EINVAL; 1695 1696 ret = eeh_ops_register(&pnv_eeh_ops); 1697 if (!ret) 1698 pr_info("EEH: PowerNV platform initialized\n"); 1699 else 1700 pr_info("EEH: Failed to initialize PowerNV platform (%d)\n", ret); 1701 1702 return ret; 1703 } 1704 machine_early_initcall(powernv, eeh_powernv_init); 1705