1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * PowerNV Platform dependent EEH operations 4 * 5 * Copyright Benjamin Herrenschmidt & Gavin Shan, IBM Corporation 2013. 6 */ 7 8 #include <linux/atomic.h> 9 #include <linux/debugfs.h> 10 #include <linux/delay.h> 11 #include <linux/export.h> 12 #include <linux/init.h> 13 #include <linux/interrupt.h> 14 #include <linux/list.h> 15 #include <linux/msi.h> 16 #include <linux/of.h> 17 #include <linux/pci.h> 18 #include <linux/proc_fs.h> 19 #include <linux/rbtree.h> 20 #include <linux/sched.h> 21 #include <linux/seq_file.h> 22 #include <linux/spinlock.h> 23 24 #include <asm/eeh.h> 25 #include <asm/eeh_event.h> 26 #include <asm/firmware.h> 27 #include <asm/io.h> 28 #include <asm/iommu.h> 29 #include <asm/machdep.h> 30 #include <asm/msi_bitmap.h> 31 #include <asm/opal.h> 32 #include <asm/ppc-pci.h> 33 #include <asm/pnv-pci.h> 34 35 #include "powernv.h" 36 #include "pci.h" 37 #include "../../../../drivers/pci/pci.h" 38 39 static int eeh_event_irq = -EINVAL; 40 41 void pnv_pcibios_bus_add_device(struct pci_dev *pdev) 42 { 43 struct pci_dn *pdn = pci_get_pdn(pdev); 44 45 if (!pdn || eeh_has_flag(EEH_FORCE_DISABLED)) 46 return; 47 48 dev_dbg(&pdev->dev, "EEH: Setting up device\n"); 49 eeh_add_device_early(pdn); 50 eeh_add_device_late(pdev); 51 } 52 53 static int pnv_eeh_init(void) 54 { 55 struct pci_controller *hose; 56 struct pnv_phb *phb; 57 int max_diag_size = PNV_PCI_DIAG_BUF_SIZE; 58 59 if (!firmware_has_feature(FW_FEATURE_OPAL)) { 60 pr_warn("%s: OPAL is required !\n", 61 __func__); 62 return -EINVAL; 63 } 64 65 /* Set probe mode */ 66 eeh_add_flag(EEH_PROBE_MODE_DEV); 67 68 /* 69 * P7IOC blocks PCI config access to frozen PE, but PHB3 70 * doesn't do that. So we have to selectively enable I/O 71 * prior to collecting error log. 72 */ 73 list_for_each_entry(hose, &hose_list, list_node) { 74 phb = hose->private_data; 75 76 if (phb->model == PNV_PHB_MODEL_P7IOC) 77 eeh_add_flag(EEH_ENABLE_IO_FOR_LOG); 78 79 if (phb->diag_data_size > max_diag_size) 80 max_diag_size = phb->diag_data_size; 81 82 /* 83 * PE#0 should be regarded as valid by EEH core 84 * if it's not the reserved one. Currently, we 85 * have the reserved PE#255 and PE#127 for PHB3 86 * and P7IOC separately. So we should regard 87 * PE#0 as valid for PHB3 and P7IOC. 88 */ 89 if (phb->ioda.reserved_pe_idx != 0) 90 eeh_add_flag(EEH_VALID_PE_ZERO); 91 92 break; 93 } 94 95 eeh_set_pe_aux_size(max_diag_size); 96 ppc_md.pcibios_bus_add_device = pnv_pcibios_bus_add_device; 97 98 return 0; 99 } 100 101 static irqreturn_t pnv_eeh_event(int irq, void *data) 102 { 103 /* 104 * We simply send a special EEH event if EEH has been 105 * enabled. We don't care about EEH events until we've 106 * finished processing the outstanding ones. Event processing 107 * gets unmasked in next_error() if EEH is enabled. 108 */ 109 disable_irq_nosync(irq); 110 111 if (eeh_enabled()) 112 eeh_send_failure_event(NULL); 113 114 return IRQ_HANDLED; 115 } 116 117 #ifdef CONFIG_DEBUG_FS 118 static ssize_t pnv_eeh_ei_write(struct file *filp, 119 const char __user *user_buf, 120 size_t count, loff_t *ppos) 121 { 122 struct pci_controller *hose = filp->private_data; 123 struct eeh_pe *pe; 124 int pe_no, type, func; 125 unsigned long addr, mask; 126 char buf[50]; 127 int ret; 128 129 if (!eeh_ops || !eeh_ops->err_inject) 130 return -ENXIO; 131 132 /* Copy over argument buffer */ 133 ret = simple_write_to_buffer(buf, sizeof(buf), ppos, user_buf, count); 134 if (!ret) 135 return -EFAULT; 136 137 /* Retrieve parameters */ 138 ret = sscanf(buf, "%x:%x:%x:%lx:%lx", 139 &pe_no, &type, &func, &addr, &mask); 140 if (ret != 5) 141 return -EINVAL; 142 143 /* Retrieve PE */ 144 pe = eeh_pe_get(hose, pe_no, 0); 145 if (!pe) 146 return -ENODEV; 147 148 /* Do error injection */ 149 ret = eeh_ops->err_inject(pe, type, func, addr, mask); 150 return ret < 0 ? ret : count; 151 } 152 153 static const struct file_operations pnv_eeh_ei_fops = { 154 .open = simple_open, 155 .llseek = no_llseek, 156 .write = pnv_eeh_ei_write, 157 }; 158 159 static int pnv_eeh_dbgfs_set(void *data, int offset, u64 val) 160 { 161 struct pci_controller *hose = data; 162 struct pnv_phb *phb = hose->private_data; 163 164 out_be64(phb->regs + offset, val); 165 return 0; 166 } 167 168 static int pnv_eeh_dbgfs_get(void *data, int offset, u64 *val) 169 { 170 struct pci_controller *hose = data; 171 struct pnv_phb *phb = hose->private_data; 172 173 *val = in_be64(phb->regs + offset); 174 return 0; 175 } 176 177 #define PNV_EEH_DBGFS_ENTRY(name, reg) \ 178 static int pnv_eeh_dbgfs_set_##name(void *data, u64 val) \ 179 { \ 180 return pnv_eeh_dbgfs_set(data, reg, val); \ 181 } \ 182 \ 183 static int pnv_eeh_dbgfs_get_##name(void *data, u64 *val) \ 184 { \ 185 return pnv_eeh_dbgfs_get(data, reg, val); \ 186 } \ 187 \ 188 DEFINE_SIMPLE_ATTRIBUTE(pnv_eeh_dbgfs_ops_##name, \ 189 pnv_eeh_dbgfs_get_##name, \ 190 pnv_eeh_dbgfs_set_##name, \ 191 "0x%llx\n") 192 193 PNV_EEH_DBGFS_ENTRY(outb, 0xD10); 194 PNV_EEH_DBGFS_ENTRY(inbA, 0xD90); 195 PNV_EEH_DBGFS_ENTRY(inbB, 0xE10); 196 197 #endif /* CONFIG_DEBUG_FS */ 198 199 void pnv_eeh_enable_phbs(void) 200 { 201 struct pci_controller *hose; 202 struct pnv_phb *phb; 203 204 list_for_each_entry(hose, &hose_list, list_node) { 205 phb = hose->private_data; 206 /* 207 * If EEH is enabled, we're going to rely on that. 208 * Otherwise, we restore to conventional mechanism 209 * to clear frozen PE during PCI config access. 210 */ 211 if (eeh_enabled()) 212 phb->flags |= PNV_PHB_FLAG_EEH; 213 else 214 phb->flags &= ~PNV_PHB_FLAG_EEH; 215 } 216 } 217 218 /** 219 * pnv_eeh_post_init - EEH platform dependent post initialization 220 * 221 * EEH platform dependent post initialization on powernv. When 222 * the function is called, the EEH PEs and devices should have 223 * been built. If the I/O cache staff has been built, EEH is 224 * ready to supply service. 225 */ 226 int pnv_eeh_post_init(void) 227 { 228 struct pci_controller *hose; 229 struct pnv_phb *phb; 230 int ret = 0; 231 232 eeh_show_enabled(); 233 234 /* Register OPAL event notifier */ 235 eeh_event_irq = opal_event_request(ilog2(OPAL_EVENT_PCI_ERROR)); 236 if (eeh_event_irq < 0) { 237 pr_err("%s: Can't register OPAL event interrupt (%d)\n", 238 __func__, eeh_event_irq); 239 return eeh_event_irq; 240 } 241 242 ret = request_irq(eeh_event_irq, pnv_eeh_event, 243 IRQ_TYPE_LEVEL_HIGH, "opal-eeh", NULL); 244 if (ret < 0) { 245 irq_dispose_mapping(eeh_event_irq); 246 pr_err("%s: Can't request OPAL event interrupt (%d)\n", 247 __func__, eeh_event_irq); 248 return ret; 249 } 250 251 if (!eeh_enabled()) 252 disable_irq(eeh_event_irq); 253 254 pnv_eeh_enable_phbs(); 255 256 list_for_each_entry(hose, &hose_list, list_node) { 257 phb = hose->private_data; 258 259 /* Create debugfs entries */ 260 #ifdef CONFIG_DEBUG_FS 261 if (phb->has_dbgfs || !phb->dbgfs) 262 continue; 263 264 phb->has_dbgfs = 1; 265 debugfs_create_file("err_injct", 0200, 266 phb->dbgfs, hose, 267 &pnv_eeh_ei_fops); 268 269 debugfs_create_file("err_injct_outbound", 0600, 270 phb->dbgfs, hose, 271 &pnv_eeh_dbgfs_ops_outb); 272 debugfs_create_file("err_injct_inboundA", 0600, 273 phb->dbgfs, hose, 274 &pnv_eeh_dbgfs_ops_inbA); 275 debugfs_create_file("err_injct_inboundB", 0600, 276 phb->dbgfs, hose, 277 &pnv_eeh_dbgfs_ops_inbB); 278 #endif /* CONFIG_DEBUG_FS */ 279 } 280 281 return ret; 282 } 283 284 static int pnv_eeh_find_cap(struct pci_dn *pdn, int cap) 285 { 286 int pos = PCI_CAPABILITY_LIST; 287 int cnt = 48; /* Maximal number of capabilities */ 288 u32 status, id; 289 290 if (!pdn) 291 return 0; 292 293 /* Check if the device supports capabilities */ 294 pnv_pci_cfg_read(pdn, PCI_STATUS, 2, &status); 295 if (!(status & PCI_STATUS_CAP_LIST)) 296 return 0; 297 298 while (cnt--) { 299 pnv_pci_cfg_read(pdn, pos, 1, &pos); 300 if (pos < 0x40) 301 break; 302 303 pos &= ~3; 304 pnv_pci_cfg_read(pdn, pos + PCI_CAP_LIST_ID, 1, &id); 305 if (id == 0xff) 306 break; 307 308 /* Found */ 309 if (id == cap) 310 return pos; 311 312 /* Next one */ 313 pos += PCI_CAP_LIST_NEXT; 314 } 315 316 return 0; 317 } 318 319 static int pnv_eeh_find_ecap(struct pci_dn *pdn, int cap) 320 { 321 struct eeh_dev *edev = pdn_to_eeh_dev(pdn); 322 u32 header; 323 int pos = 256, ttl = (4096 - 256) / 8; 324 325 if (!edev || !edev->pcie_cap) 326 return 0; 327 if (pnv_pci_cfg_read(pdn, pos, 4, &header) != PCIBIOS_SUCCESSFUL) 328 return 0; 329 else if (!header) 330 return 0; 331 332 while (ttl-- > 0) { 333 if (PCI_EXT_CAP_ID(header) == cap && pos) 334 return pos; 335 336 pos = PCI_EXT_CAP_NEXT(header); 337 if (pos < 256) 338 break; 339 340 if (pnv_pci_cfg_read(pdn, pos, 4, &header) != PCIBIOS_SUCCESSFUL) 341 break; 342 } 343 344 return 0; 345 } 346 347 /** 348 * pnv_eeh_probe - Do probe on PCI device 349 * @pdn: PCI device node 350 * @data: unused 351 * 352 * When EEH module is installed during system boot, all PCI devices 353 * are checked one by one to see if it supports EEH. The function 354 * is introduced for the purpose. By default, EEH has been enabled 355 * on all PCI devices. That's to say, we only need do necessary 356 * initialization on the corresponding eeh device and create PE 357 * accordingly. 358 * 359 * It's notable that's unsafe to retrieve the EEH device through 360 * the corresponding PCI device. During the PCI device hotplug, which 361 * was possiblly triggered by EEH core, the binding between EEH device 362 * and the PCI device isn't built yet. 363 */ 364 static void *pnv_eeh_probe(struct pci_dn *pdn, void *data) 365 { 366 struct pci_controller *hose = pdn->phb; 367 struct pnv_phb *phb = hose->private_data; 368 struct eeh_dev *edev = pdn_to_eeh_dev(pdn); 369 uint32_t pcie_flags; 370 int ret; 371 int config_addr = (pdn->busno << 8) | (pdn->devfn); 372 373 /* 374 * When probing the root bridge, which doesn't have any 375 * subordinate PCI devices. We don't have OF node for 376 * the root bridge. So it's not reasonable to continue 377 * the probing. 378 */ 379 if (!edev || edev->pe) 380 return NULL; 381 382 /* Skip for PCI-ISA bridge */ 383 if ((pdn->class_code >> 8) == PCI_CLASS_BRIDGE_ISA) 384 return NULL; 385 386 eeh_edev_dbg(edev, "Probing device\n"); 387 388 /* Initialize eeh device */ 389 edev->class_code = pdn->class_code; 390 edev->mode &= 0xFFFFFF00; 391 edev->pcix_cap = pnv_eeh_find_cap(pdn, PCI_CAP_ID_PCIX); 392 edev->pcie_cap = pnv_eeh_find_cap(pdn, PCI_CAP_ID_EXP); 393 edev->af_cap = pnv_eeh_find_cap(pdn, PCI_CAP_ID_AF); 394 edev->aer_cap = pnv_eeh_find_ecap(pdn, PCI_EXT_CAP_ID_ERR); 395 if ((edev->class_code >> 8) == PCI_CLASS_BRIDGE_PCI) { 396 edev->mode |= EEH_DEV_BRIDGE; 397 if (edev->pcie_cap) { 398 pnv_pci_cfg_read(pdn, edev->pcie_cap + PCI_EXP_FLAGS, 399 2, &pcie_flags); 400 pcie_flags = (pcie_flags & PCI_EXP_FLAGS_TYPE) >> 4; 401 if (pcie_flags == PCI_EXP_TYPE_ROOT_PORT) 402 edev->mode |= EEH_DEV_ROOT_PORT; 403 else if (pcie_flags == PCI_EXP_TYPE_DOWNSTREAM) 404 edev->mode |= EEH_DEV_DS_PORT; 405 } 406 } 407 408 edev->pe_config_addr = phb->ioda.pe_rmap[config_addr]; 409 410 /* Create PE */ 411 ret = eeh_add_to_parent_pe(edev); 412 if (ret) { 413 eeh_edev_warn(edev, "Failed to add device to PE (code %d)\n", ret); 414 return NULL; 415 } 416 417 /* 418 * If the PE contains any one of following adapters, the 419 * PCI config space can't be accessed when dumping EEH log. 420 * Otherwise, we will run into fenced PHB caused by shortage 421 * of outbound credits in the adapter. The PCI config access 422 * should be blocked until PE reset. MMIO access is dropped 423 * by hardware certainly. In order to drop PCI config requests, 424 * one more flag (EEH_PE_CFG_RESTRICTED) is introduced, which 425 * will be checked in the backend for PE state retrival. If 426 * the PE becomes frozen for the first time and the flag has 427 * been set for the PE, we will set EEH_PE_CFG_BLOCKED for 428 * that PE to block its config space. 429 * 430 * Broadcom BCM5718 2-ports NICs (14e4:1656) 431 * Broadcom Austin 4-ports NICs (14e4:1657) 432 * Broadcom Shiner 4-ports 1G NICs (14e4:168a) 433 * Broadcom Shiner 2-ports 10G NICs (14e4:168e) 434 */ 435 if ((pdn->vendor_id == PCI_VENDOR_ID_BROADCOM && 436 pdn->device_id == 0x1656) || 437 (pdn->vendor_id == PCI_VENDOR_ID_BROADCOM && 438 pdn->device_id == 0x1657) || 439 (pdn->vendor_id == PCI_VENDOR_ID_BROADCOM && 440 pdn->device_id == 0x168a) || 441 (pdn->vendor_id == PCI_VENDOR_ID_BROADCOM && 442 pdn->device_id == 0x168e)) 443 edev->pe->state |= EEH_PE_CFG_RESTRICTED; 444 445 /* 446 * Cache the PE primary bus, which can't be fetched when 447 * full hotplug is in progress. In that case, all child 448 * PCI devices of the PE are expected to be removed prior 449 * to PE reset. 450 */ 451 if (!(edev->pe->state & EEH_PE_PRI_BUS)) { 452 edev->pe->bus = pci_find_bus(hose->global_number, 453 pdn->busno); 454 if (edev->pe->bus) 455 edev->pe->state |= EEH_PE_PRI_BUS; 456 } 457 458 /* 459 * Enable EEH explicitly so that we will do EEH check 460 * while accessing I/O stuff 461 */ 462 if (!eeh_has_flag(EEH_ENABLED)) { 463 enable_irq(eeh_event_irq); 464 pnv_eeh_enable_phbs(); 465 eeh_add_flag(EEH_ENABLED); 466 } 467 468 /* Save memory bars */ 469 eeh_save_bars(edev); 470 471 eeh_edev_dbg(edev, "EEH enabled on device\n"); 472 473 return NULL; 474 } 475 476 /** 477 * pnv_eeh_set_option - Initialize EEH or MMIO/DMA reenable 478 * @pe: EEH PE 479 * @option: operation to be issued 480 * 481 * The function is used to control the EEH functionality globally. 482 * Currently, following options are support according to PAPR: 483 * Enable EEH, Disable EEH, Enable MMIO and Enable DMA 484 */ 485 static int pnv_eeh_set_option(struct eeh_pe *pe, int option) 486 { 487 struct pci_controller *hose = pe->phb; 488 struct pnv_phb *phb = hose->private_data; 489 bool freeze_pe = false; 490 int opt; 491 s64 rc; 492 493 switch (option) { 494 case EEH_OPT_DISABLE: 495 return -EPERM; 496 case EEH_OPT_ENABLE: 497 return 0; 498 case EEH_OPT_THAW_MMIO: 499 opt = OPAL_EEH_ACTION_CLEAR_FREEZE_MMIO; 500 break; 501 case EEH_OPT_THAW_DMA: 502 opt = OPAL_EEH_ACTION_CLEAR_FREEZE_DMA; 503 break; 504 case EEH_OPT_FREEZE_PE: 505 freeze_pe = true; 506 opt = OPAL_EEH_ACTION_SET_FREEZE_ALL; 507 break; 508 default: 509 pr_warn("%s: Invalid option %d\n", __func__, option); 510 return -EINVAL; 511 } 512 513 /* Freeze master and slave PEs if PHB supports compound PEs */ 514 if (freeze_pe) { 515 if (phb->freeze_pe) { 516 phb->freeze_pe(phb, pe->addr); 517 return 0; 518 } 519 520 rc = opal_pci_eeh_freeze_set(phb->opal_id, pe->addr, opt); 521 if (rc != OPAL_SUCCESS) { 522 pr_warn("%s: Failure %lld freezing PHB#%x-PE#%x\n", 523 __func__, rc, phb->hose->global_number, 524 pe->addr); 525 return -EIO; 526 } 527 528 return 0; 529 } 530 531 /* Unfreeze master and slave PEs if PHB supports */ 532 if (phb->unfreeze_pe) 533 return phb->unfreeze_pe(phb, pe->addr, opt); 534 535 rc = opal_pci_eeh_freeze_clear(phb->opal_id, pe->addr, opt); 536 if (rc != OPAL_SUCCESS) { 537 pr_warn("%s: Failure %lld enable %d for PHB#%x-PE#%x\n", 538 __func__, rc, option, phb->hose->global_number, 539 pe->addr); 540 return -EIO; 541 } 542 543 return 0; 544 } 545 546 /** 547 * pnv_eeh_get_pe_addr - Retrieve PE address 548 * @pe: EEH PE 549 * 550 * Retrieve the PE address according to the given tranditional 551 * PCI BDF (Bus/Device/Function) address. 552 */ 553 static int pnv_eeh_get_pe_addr(struct eeh_pe *pe) 554 { 555 return pe->addr; 556 } 557 558 static void pnv_eeh_get_phb_diag(struct eeh_pe *pe) 559 { 560 struct pnv_phb *phb = pe->phb->private_data; 561 s64 rc; 562 563 rc = opal_pci_get_phb_diag_data2(phb->opal_id, pe->data, 564 phb->diag_data_size); 565 if (rc != OPAL_SUCCESS) 566 pr_warn("%s: Failure %lld getting PHB#%x diag-data\n", 567 __func__, rc, pe->phb->global_number); 568 } 569 570 static int pnv_eeh_get_phb_state(struct eeh_pe *pe) 571 { 572 struct pnv_phb *phb = pe->phb->private_data; 573 u8 fstate = 0; 574 __be16 pcierr = 0; 575 s64 rc; 576 int result = 0; 577 578 rc = opal_pci_eeh_freeze_status(phb->opal_id, 579 pe->addr, 580 &fstate, 581 &pcierr, 582 NULL); 583 if (rc != OPAL_SUCCESS) { 584 pr_warn("%s: Failure %lld getting PHB#%x state\n", 585 __func__, rc, phb->hose->global_number); 586 return EEH_STATE_NOT_SUPPORT; 587 } 588 589 /* 590 * Check PHB state. If the PHB is frozen for the 591 * first time, to dump the PHB diag-data. 592 */ 593 if (be16_to_cpu(pcierr) != OPAL_EEH_PHB_ERROR) { 594 result = (EEH_STATE_MMIO_ACTIVE | 595 EEH_STATE_DMA_ACTIVE | 596 EEH_STATE_MMIO_ENABLED | 597 EEH_STATE_DMA_ENABLED); 598 } else if (!(pe->state & EEH_PE_ISOLATED)) { 599 eeh_pe_mark_isolated(pe); 600 pnv_eeh_get_phb_diag(pe); 601 602 if (eeh_has_flag(EEH_EARLY_DUMP_LOG)) 603 pnv_pci_dump_phb_diag_data(pe->phb, pe->data); 604 } 605 606 return result; 607 } 608 609 static int pnv_eeh_get_pe_state(struct eeh_pe *pe) 610 { 611 struct pnv_phb *phb = pe->phb->private_data; 612 u8 fstate = 0; 613 __be16 pcierr = 0; 614 s64 rc; 615 int result; 616 617 /* 618 * We don't clobber hardware frozen state until PE 619 * reset is completed. In order to keep EEH core 620 * moving forward, we have to return operational 621 * state during PE reset. 622 */ 623 if (pe->state & EEH_PE_RESET) { 624 result = (EEH_STATE_MMIO_ACTIVE | 625 EEH_STATE_DMA_ACTIVE | 626 EEH_STATE_MMIO_ENABLED | 627 EEH_STATE_DMA_ENABLED); 628 return result; 629 } 630 631 /* 632 * Fetch PE state from hardware. If the PHB 633 * supports compound PE, let it handle that. 634 */ 635 if (phb->get_pe_state) { 636 fstate = phb->get_pe_state(phb, pe->addr); 637 } else { 638 rc = opal_pci_eeh_freeze_status(phb->opal_id, 639 pe->addr, 640 &fstate, 641 &pcierr, 642 NULL); 643 if (rc != OPAL_SUCCESS) { 644 pr_warn("%s: Failure %lld getting PHB#%x-PE%x state\n", 645 __func__, rc, phb->hose->global_number, 646 pe->addr); 647 return EEH_STATE_NOT_SUPPORT; 648 } 649 } 650 651 /* Figure out state */ 652 switch (fstate) { 653 case OPAL_EEH_STOPPED_NOT_FROZEN: 654 result = (EEH_STATE_MMIO_ACTIVE | 655 EEH_STATE_DMA_ACTIVE | 656 EEH_STATE_MMIO_ENABLED | 657 EEH_STATE_DMA_ENABLED); 658 break; 659 case OPAL_EEH_STOPPED_MMIO_FREEZE: 660 result = (EEH_STATE_DMA_ACTIVE | 661 EEH_STATE_DMA_ENABLED); 662 break; 663 case OPAL_EEH_STOPPED_DMA_FREEZE: 664 result = (EEH_STATE_MMIO_ACTIVE | 665 EEH_STATE_MMIO_ENABLED); 666 break; 667 case OPAL_EEH_STOPPED_MMIO_DMA_FREEZE: 668 result = 0; 669 break; 670 case OPAL_EEH_STOPPED_RESET: 671 result = EEH_STATE_RESET_ACTIVE; 672 break; 673 case OPAL_EEH_STOPPED_TEMP_UNAVAIL: 674 result = EEH_STATE_UNAVAILABLE; 675 break; 676 case OPAL_EEH_STOPPED_PERM_UNAVAIL: 677 result = EEH_STATE_NOT_SUPPORT; 678 break; 679 default: 680 result = EEH_STATE_NOT_SUPPORT; 681 pr_warn("%s: Invalid PHB#%x-PE#%x state %x\n", 682 __func__, phb->hose->global_number, 683 pe->addr, fstate); 684 } 685 686 /* 687 * If PHB supports compound PE, to freeze all 688 * slave PEs for consistency. 689 * 690 * If the PE is switching to frozen state for the 691 * first time, to dump the PHB diag-data. 692 */ 693 if (!(result & EEH_STATE_NOT_SUPPORT) && 694 !(result & EEH_STATE_UNAVAILABLE) && 695 !(result & EEH_STATE_MMIO_ACTIVE) && 696 !(result & EEH_STATE_DMA_ACTIVE) && 697 !(pe->state & EEH_PE_ISOLATED)) { 698 if (phb->freeze_pe) 699 phb->freeze_pe(phb, pe->addr); 700 701 eeh_pe_mark_isolated(pe); 702 pnv_eeh_get_phb_diag(pe); 703 704 if (eeh_has_flag(EEH_EARLY_DUMP_LOG)) 705 pnv_pci_dump_phb_diag_data(pe->phb, pe->data); 706 } 707 708 return result; 709 } 710 711 /** 712 * pnv_eeh_get_state - Retrieve PE state 713 * @pe: EEH PE 714 * @delay: delay while PE state is temporarily unavailable 715 * 716 * Retrieve the state of the specified PE. For IODA-compitable 717 * platform, it should be retrieved from IODA table. Therefore, 718 * we prefer passing down to hardware implementation to handle 719 * it. 720 */ 721 static int pnv_eeh_get_state(struct eeh_pe *pe, int *delay) 722 { 723 int ret; 724 725 if (pe->type & EEH_PE_PHB) 726 ret = pnv_eeh_get_phb_state(pe); 727 else 728 ret = pnv_eeh_get_pe_state(pe); 729 730 if (!delay) 731 return ret; 732 733 /* 734 * If the PE state is temporarily unavailable, 735 * to inform the EEH core delay for default 736 * period (1 second) 737 */ 738 *delay = 0; 739 if (ret & EEH_STATE_UNAVAILABLE) 740 *delay = 1000; 741 742 return ret; 743 } 744 745 static s64 pnv_eeh_poll(unsigned long id) 746 { 747 s64 rc = OPAL_HARDWARE; 748 749 while (1) { 750 rc = opal_pci_poll(id); 751 if (rc <= 0) 752 break; 753 754 if (system_state < SYSTEM_RUNNING) 755 udelay(1000 * rc); 756 else 757 msleep(rc); 758 } 759 760 return rc; 761 } 762 763 int pnv_eeh_phb_reset(struct pci_controller *hose, int option) 764 { 765 struct pnv_phb *phb = hose->private_data; 766 s64 rc = OPAL_HARDWARE; 767 768 pr_debug("%s: Reset PHB#%x, option=%d\n", 769 __func__, hose->global_number, option); 770 771 /* Issue PHB complete reset request */ 772 if (option == EEH_RESET_FUNDAMENTAL || 773 option == EEH_RESET_HOT) 774 rc = opal_pci_reset(phb->opal_id, 775 OPAL_RESET_PHB_COMPLETE, 776 OPAL_ASSERT_RESET); 777 else if (option == EEH_RESET_DEACTIVATE) 778 rc = opal_pci_reset(phb->opal_id, 779 OPAL_RESET_PHB_COMPLETE, 780 OPAL_DEASSERT_RESET); 781 if (rc < 0) 782 goto out; 783 784 /* 785 * Poll state of the PHB until the request is done 786 * successfully. The PHB reset is usually PHB complete 787 * reset followed by hot reset on root bus. So we also 788 * need the PCI bus settlement delay. 789 */ 790 if (rc > 0) 791 rc = pnv_eeh_poll(phb->opal_id); 792 if (option == EEH_RESET_DEACTIVATE) { 793 if (system_state < SYSTEM_RUNNING) 794 udelay(1000 * EEH_PE_RST_SETTLE_TIME); 795 else 796 msleep(EEH_PE_RST_SETTLE_TIME); 797 } 798 out: 799 if (rc != OPAL_SUCCESS) 800 return -EIO; 801 802 return 0; 803 } 804 805 static int pnv_eeh_root_reset(struct pci_controller *hose, int option) 806 { 807 struct pnv_phb *phb = hose->private_data; 808 s64 rc = OPAL_HARDWARE; 809 810 pr_debug("%s: Reset PHB#%x, option=%d\n", 811 __func__, hose->global_number, option); 812 813 /* 814 * During the reset deassert time, we needn't care 815 * the reset scope because the firmware does nothing 816 * for fundamental or hot reset during deassert phase. 817 */ 818 if (option == EEH_RESET_FUNDAMENTAL) 819 rc = opal_pci_reset(phb->opal_id, 820 OPAL_RESET_PCI_FUNDAMENTAL, 821 OPAL_ASSERT_RESET); 822 else if (option == EEH_RESET_HOT) 823 rc = opal_pci_reset(phb->opal_id, 824 OPAL_RESET_PCI_HOT, 825 OPAL_ASSERT_RESET); 826 else if (option == EEH_RESET_DEACTIVATE) 827 rc = opal_pci_reset(phb->opal_id, 828 OPAL_RESET_PCI_HOT, 829 OPAL_DEASSERT_RESET); 830 if (rc < 0) 831 goto out; 832 833 /* Poll state of the PHB until the request is done */ 834 if (rc > 0) 835 rc = pnv_eeh_poll(phb->opal_id); 836 if (option == EEH_RESET_DEACTIVATE) 837 msleep(EEH_PE_RST_SETTLE_TIME); 838 out: 839 if (rc != OPAL_SUCCESS) 840 return -EIO; 841 842 return 0; 843 } 844 845 static int __pnv_eeh_bridge_reset(struct pci_dev *dev, int option) 846 { 847 struct pci_dn *pdn = pci_get_pdn_by_devfn(dev->bus, dev->devfn); 848 struct eeh_dev *edev = pdn_to_eeh_dev(pdn); 849 int aer = edev ? edev->aer_cap : 0; 850 u32 ctrl; 851 852 pr_debug("%s: Secondary Reset PCI bus %04x:%02x with option %d\n", 853 __func__, pci_domain_nr(dev->bus), 854 dev->bus->number, option); 855 856 switch (option) { 857 case EEH_RESET_FUNDAMENTAL: 858 case EEH_RESET_HOT: 859 /* Don't report linkDown event */ 860 if (aer) { 861 eeh_ops->read_config(pdn, aer + PCI_ERR_UNCOR_MASK, 862 4, &ctrl); 863 ctrl |= PCI_ERR_UNC_SURPDN; 864 eeh_ops->write_config(pdn, aer + PCI_ERR_UNCOR_MASK, 865 4, ctrl); 866 } 867 868 eeh_ops->read_config(pdn, PCI_BRIDGE_CONTROL, 2, &ctrl); 869 ctrl |= PCI_BRIDGE_CTL_BUS_RESET; 870 eeh_ops->write_config(pdn, PCI_BRIDGE_CONTROL, 2, ctrl); 871 872 msleep(EEH_PE_RST_HOLD_TIME); 873 break; 874 case EEH_RESET_DEACTIVATE: 875 eeh_ops->read_config(pdn, PCI_BRIDGE_CONTROL, 2, &ctrl); 876 ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET; 877 eeh_ops->write_config(pdn, PCI_BRIDGE_CONTROL, 2, ctrl); 878 879 msleep(EEH_PE_RST_SETTLE_TIME); 880 881 /* Continue reporting linkDown event */ 882 if (aer) { 883 eeh_ops->read_config(pdn, aer + PCI_ERR_UNCOR_MASK, 884 4, &ctrl); 885 ctrl &= ~PCI_ERR_UNC_SURPDN; 886 eeh_ops->write_config(pdn, aer + PCI_ERR_UNCOR_MASK, 887 4, ctrl); 888 } 889 890 break; 891 } 892 893 return 0; 894 } 895 896 static int pnv_eeh_bridge_reset(struct pci_dev *pdev, int option) 897 { 898 struct pci_controller *hose = pci_bus_to_host(pdev->bus); 899 struct pnv_phb *phb = hose->private_data; 900 struct device_node *dn = pci_device_to_OF_node(pdev); 901 uint64_t id = PCI_SLOT_ID(phb->opal_id, 902 (pdev->bus->number << 8) | pdev->devfn); 903 uint8_t scope; 904 int64_t rc; 905 906 /* Hot reset to the bus if firmware cannot handle */ 907 if (!dn || !of_get_property(dn, "ibm,reset-by-firmware", NULL)) 908 return __pnv_eeh_bridge_reset(pdev, option); 909 910 pr_debug("%s: FW reset PCI bus %04x:%02x with option %d\n", 911 __func__, pci_domain_nr(pdev->bus), 912 pdev->bus->number, option); 913 914 switch (option) { 915 case EEH_RESET_FUNDAMENTAL: 916 scope = OPAL_RESET_PCI_FUNDAMENTAL; 917 break; 918 case EEH_RESET_HOT: 919 scope = OPAL_RESET_PCI_HOT; 920 break; 921 case EEH_RESET_DEACTIVATE: 922 return 0; 923 default: 924 dev_dbg(&pdev->dev, "%s: Unsupported reset %d\n", 925 __func__, option); 926 return -EINVAL; 927 } 928 929 rc = opal_pci_reset(id, scope, OPAL_ASSERT_RESET); 930 if (rc <= OPAL_SUCCESS) 931 goto out; 932 933 rc = pnv_eeh_poll(id); 934 out: 935 return (rc == OPAL_SUCCESS) ? 0 : -EIO; 936 } 937 938 void pnv_pci_reset_secondary_bus(struct pci_dev *dev) 939 { 940 struct pci_controller *hose; 941 942 if (pci_is_root_bus(dev->bus)) { 943 hose = pci_bus_to_host(dev->bus); 944 pnv_eeh_root_reset(hose, EEH_RESET_HOT); 945 pnv_eeh_root_reset(hose, EEH_RESET_DEACTIVATE); 946 } else { 947 pnv_eeh_bridge_reset(dev, EEH_RESET_HOT); 948 pnv_eeh_bridge_reset(dev, EEH_RESET_DEACTIVATE); 949 } 950 } 951 952 static void pnv_eeh_wait_for_pending(struct pci_dn *pdn, const char *type, 953 int pos, u16 mask) 954 { 955 int i, status = 0; 956 957 /* Wait for Transaction Pending bit to be cleared */ 958 for (i = 0; i < 4; i++) { 959 eeh_ops->read_config(pdn, pos, 2, &status); 960 if (!(status & mask)) 961 return; 962 963 msleep((1 << i) * 100); 964 } 965 966 pr_warn("%s: Pending transaction while issuing %sFLR to %04x:%02x:%02x.%01x\n", 967 __func__, type, 968 pdn->phb->global_number, pdn->busno, 969 PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn)); 970 } 971 972 static int pnv_eeh_do_flr(struct pci_dn *pdn, int option) 973 { 974 struct eeh_dev *edev = pdn_to_eeh_dev(pdn); 975 u32 reg = 0; 976 977 if (WARN_ON(!edev->pcie_cap)) 978 return -ENOTTY; 979 980 eeh_ops->read_config(pdn, edev->pcie_cap + PCI_EXP_DEVCAP, 4, ®); 981 if (!(reg & PCI_EXP_DEVCAP_FLR)) 982 return -ENOTTY; 983 984 switch (option) { 985 case EEH_RESET_HOT: 986 case EEH_RESET_FUNDAMENTAL: 987 pnv_eeh_wait_for_pending(pdn, "", 988 edev->pcie_cap + PCI_EXP_DEVSTA, 989 PCI_EXP_DEVSTA_TRPND); 990 eeh_ops->read_config(pdn, edev->pcie_cap + PCI_EXP_DEVCTL, 991 4, ®); 992 reg |= PCI_EXP_DEVCTL_BCR_FLR; 993 eeh_ops->write_config(pdn, edev->pcie_cap + PCI_EXP_DEVCTL, 994 4, reg); 995 msleep(EEH_PE_RST_HOLD_TIME); 996 break; 997 case EEH_RESET_DEACTIVATE: 998 eeh_ops->read_config(pdn, edev->pcie_cap + PCI_EXP_DEVCTL, 999 4, ®); 1000 reg &= ~PCI_EXP_DEVCTL_BCR_FLR; 1001 eeh_ops->write_config(pdn, edev->pcie_cap + PCI_EXP_DEVCTL, 1002 4, reg); 1003 msleep(EEH_PE_RST_SETTLE_TIME); 1004 break; 1005 } 1006 1007 return 0; 1008 } 1009 1010 static int pnv_eeh_do_af_flr(struct pci_dn *pdn, int option) 1011 { 1012 struct eeh_dev *edev = pdn_to_eeh_dev(pdn); 1013 u32 cap = 0; 1014 1015 if (WARN_ON(!edev->af_cap)) 1016 return -ENOTTY; 1017 1018 eeh_ops->read_config(pdn, edev->af_cap + PCI_AF_CAP, 1, &cap); 1019 if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR)) 1020 return -ENOTTY; 1021 1022 switch (option) { 1023 case EEH_RESET_HOT: 1024 case EEH_RESET_FUNDAMENTAL: 1025 /* 1026 * Wait for Transaction Pending bit to clear. A word-aligned 1027 * test is used, so we use the conrol offset rather than status 1028 * and shift the test bit to match. 1029 */ 1030 pnv_eeh_wait_for_pending(pdn, "AF", 1031 edev->af_cap + PCI_AF_CTRL, 1032 PCI_AF_STATUS_TP << 8); 1033 eeh_ops->write_config(pdn, edev->af_cap + PCI_AF_CTRL, 1034 1, PCI_AF_CTRL_FLR); 1035 msleep(EEH_PE_RST_HOLD_TIME); 1036 break; 1037 case EEH_RESET_DEACTIVATE: 1038 eeh_ops->write_config(pdn, edev->af_cap + PCI_AF_CTRL, 1, 0); 1039 msleep(EEH_PE_RST_SETTLE_TIME); 1040 break; 1041 } 1042 1043 return 0; 1044 } 1045 1046 static int pnv_eeh_reset_vf_pe(struct eeh_pe *pe, int option) 1047 { 1048 struct eeh_dev *edev; 1049 struct pci_dn *pdn; 1050 int ret; 1051 1052 /* The VF PE should have only one child device */ 1053 edev = list_first_entry_or_null(&pe->edevs, struct eeh_dev, entry); 1054 pdn = eeh_dev_to_pdn(edev); 1055 if (!pdn) 1056 return -ENXIO; 1057 1058 ret = pnv_eeh_do_flr(pdn, option); 1059 if (!ret) 1060 return ret; 1061 1062 return pnv_eeh_do_af_flr(pdn, option); 1063 } 1064 1065 /** 1066 * pnv_eeh_reset - Reset the specified PE 1067 * @pe: EEH PE 1068 * @option: reset option 1069 * 1070 * Do reset on the indicated PE. For PCI bus sensitive PE, 1071 * we need to reset the parent p2p bridge. The PHB has to 1072 * be reinitialized if the p2p bridge is root bridge. For 1073 * PCI device sensitive PE, we will try to reset the device 1074 * through FLR. For now, we don't have OPAL APIs to do HARD 1075 * reset yet, so all reset would be SOFT (HOT) reset. 1076 */ 1077 static int pnv_eeh_reset(struct eeh_pe *pe, int option) 1078 { 1079 struct pci_controller *hose = pe->phb; 1080 struct pnv_phb *phb; 1081 struct pci_bus *bus; 1082 int64_t rc; 1083 1084 /* 1085 * For PHB reset, we always have complete reset. For those PEs whose 1086 * primary bus derived from root complex (root bus) or root port 1087 * (usually bus#1), we apply hot or fundamental reset on the root port. 1088 * For other PEs, we always have hot reset on the PE primary bus. 1089 * 1090 * Here, we have different design to pHyp, which always clear the 1091 * frozen state during PE reset. However, the good idea here from 1092 * benh is to keep frozen state before we get PE reset done completely 1093 * (until BAR restore). With the frozen state, HW drops illegal IO 1094 * or MMIO access, which can incur recrusive frozen PE during PE 1095 * reset. The side effect is that EEH core has to clear the frozen 1096 * state explicitly after BAR restore. 1097 */ 1098 if (pe->type & EEH_PE_PHB) 1099 return pnv_eeh_phb_reset(hose, option); 1100 1101 /* 1102 * The frozen PE might be caused by PAPR error injection 1103 * registers, which are expected to be cleared after hitting 1104 * frozen PE as stated in the hardware spec. Unfortunately, 1105 * that's not true on P7IOC. So we have to clear it manually 1106 * to avoid recursive EEH errors during recovery. 1107 */ 1108 phb = hose->private_data; 1109 if (phb->model == PNV_PHB_MODEL_P7IOC && 1110 (option == EEH_RESET_HOT || 1111 option == EEH_RESET_FUNDAMENTAL)) { 1112 rc = opal_pci_reset(phb->opal_id, 1113 OPAL_RESET_PHB_ERROR, 1114 OPAL_ASSERT_RESET); 1115 if (rc != OPAL_SUCCESS) { 1116 pr_warn("%s: Failure %lld clearing error injection registers\n", 1117 __func__, rc); 1118 return -EIO; 1119 } 1120 } 1121 1122 if (pe->type & EEH_PE_VF) 1123 return pnv_eeh_reset_vf_pe(pe, option); 1124 1125 bus = eeh_pe_bus_get(pe); 1126 if (!bus) { 1127 pr_err("%s: Cannot find PCI bus for PHB#%x-PE#%x\n", 1128 __func__, pe->phb->global_number, pe->addr); 1129 return -EIO; 1130 } 1131 1132 if (pci_is_root_bus(bus)) 1133 return pnv_eeh_root_reset(hose, option); 1134 1135 /* 1136 * For hot resets try use the generic PCI error recovery reset 1137 * functions. These correctly handles the case where the secondary 1138 * bus is behind a hotplug slot and it will use the slot provided 1139 * reset methods to prevent spurious hotplug events during the reset. 1140 * 1141 * Fundemental resets need to be handled internally to EEH since the 1142 * PCI core doesn't really have a concept of a fundemental reset, 1143 * mainly because there's no standard way to generate one. Only a 1144 * few devices require an FRESET so it should be fine. 1145 */ 1146 if (option != EEH_RESET_FUNDAMENTAL) { 1147 /* 1148 * NB: Skiboot and pnv_eeh_bridge_reset() also no-op the 1149 * de-assert step. It's like the OPAL reset API was 1150 * poorly designed or something... 1151 */ 1152 if (option == EEH_RESET_DEACTIVATE) 1153 return 0; 1154 1155 rc = pci_bus_error_reset(bus->self); 1156 if (!rc) 1157 return 0; 1158 } 1159 1160 /* otherwise, use the generic bridge reset. this might call into FW */ 1161 if (pci_is_root_bus(bus->parent)) 1162 return pnv_eeh_root_reset(hose, option); 1163 return pnv_eeh_bridge_reset(bus->self, option); 1164 } 1165 1166 /** 1167 * pnv_eeh_get_log - Retrieve error log 1168 * @pe: EEH PE 1169 * @severity: temporary or permanent error log 1170 * @drv_log: driver log to be combined with retrieved error log 1171 * @len: length of driver log 1172 * 1173 * Retrieve the temporary or permanent error from the PE. 1174 */ 1175 static int pnv_eeh_get_log(struct eeh_pe *pe, int severity, 1176 char *drv_log, unsigned long len) 1177 { 1178 if (!eeh_has_flag(EEH_EARLY_DUMP_LOG)) 1179 pnv_pci_dump_phb_diag_data(pe->phb, pe->data); 1180 1181 return 0; 1182 } 1183 1184 /** 1185 * pnv_eeh_configure_bridge - Configure PCI bridges in the indicated PE 1186 * @pe: EEH PE 1187 * 1188 * The function will be called to reconfigure the bridges included 1189 * in the specified PE so that the mulfunctional PE would be recovered 1190 * again. 1191 */ 1192 static int pnv_eeh_configure_bridge(struct eeh_pe *pe) 1193 { 1194 return 0; 1195 } 1196 1197 /** 1198 * pnv_pe_err_inject - Inject specified error to the indicated PE 1199 * @pe: the indicated PE 1200 * @type: error type 1201 * @func: specific error type 1202 * @addr: address 1203 * @mask: address mask 1204 * 1205 * The routine is called to inject specified error, which is 1206 * determined by @type and @func, to the indicated PE for 1207 * testing purpose. 1208 */ 1209 static int pnv_eeh_err_inject(struct eeh_pe *pe, int type, int func, 1210 unsigned long addr, unsigned long mask) 1211 { 1212 struct pci_controller *hose = pe->phb; 1213 struct pnv_phb *phb = hose->private_data; 1214 s64 rc; 1215 1216 if (type != OPAL_ERR_INJECT_TYPE_IOA_BUS_ERR && 1217 type != OPAL_ERR_INJECT_TYPE_IOA_BUS_ERR64) { 1218 pr_warn("%s: Invalid error type %d\n", 1219 __func__, type); 1220 return -ERANGE; 1221 } 1222 1223 if (func < OPAL_ERR_INJECT_FUNC_IOA_LD_MEM_ADDR || 1224 func > OPAL_ERR_INJECT_FUNC_IOA_DMA_WR_TARGET) { 1225 pr_warn("%s: Invalid error function %d\n", 1226 __func__, func); 1227 return -ERANGE; 1228 } 1229 1230 /* Firmware supports error injection ? */ 1231 if (!opal_check_token(OPAL_PCI_ERR_INJECT)) { 1232 pr_warn("%s: Firmware doesn't support error injection\n", 1233 __func__); 1234 return -ENXIO; 1235 } 1236 1237 /* Do error injection */ 1238 rc = opal_pci_err_inject(phb->opal_id, pe->addr, 1239 type, func, addr, mask); 1240 if (rc != OPAL_SUCCESS) { 1241 pr_warn("%s: Failure %lld injecting error " 1242 "%d-%d to PHB#%x-PE#%x\n", 1243 __func__, rc, type, func, 1244 hose->global_number, pe->addr); 1245 return -EIO; 1246 } 1247 1248 return 0; 1249 } 1250 1251 static inline bool pnv_eeh_cfg_blocked(struct pci_dn *pdn) 1252 { 1253 struct eeh_dev *edev = pdn_to_eeh_dev(pdn); 1254 1255 if (!edev || !edev->pe) 1256 return false; 1257 1258 /* 1259 * We will issue FLR or AF FLR to all VFs, which are contained 1260 * in VF PE. It relies on the EEH PCI config accessors. So we 1261 * can't block them during the window. 1262 */ 1263 if (edev->physfn && (edev->pe->state & EEH_PE_RESET)) 1264 return false; 1265 1266 if (edev->pe->state & EEH_PE_CFG_BLOCKED) 1267 return true; 1268 1269 return false; 1270 } 1271 1272 static int pnv_eeh_read_config(struct pci_dn *pdn, 1273 int where, int size, u32 *val) 1274 { 1275 if (!pdn) 1276 return PCIBIOS_DEVICE_NOT_FOUND; 1277 1278 if (pnv_eeh_cfg_blocked(pdn)) { 1279 *val = 0xFFFFFFFF; 1280 return PCIBIOS_SET_FAILED; 1281 } 1282 1283 return pnv_pci_cfg_read(pdn, where, size, val); 1284 } 1285 1286 static int pnv_eeh_write_config(struct pci_dn *pdn, 1287 int where, int size, u32 val) 1288 { 1289 if (!pdn) 1290 return PCIBIOS_DEVICE_NOT_FOUND; 1291 1292 if (pnv_eeh_cfg_blocked(pdn)) 1293 return PCIBIOS_SET_FAILED; 1294 1295 return pnv_pci_cfg_write(pdn, where, size, val); 1296 } 1297 1298 static void pnv_eeh_dump_hub_diag_common(struct OpalIoP7IOCErrorData *data) 1299 { 1300 /* GEM */ 1301 if (data->gemXfir || data->gemRfir || 1302 data->gemRirqfir || data->gemMask || data->gemRwof) 1303 pr_info(" GEM: %016llx %016llx %016llx %016llx %016llx\n", 1304 be64_to_cpu(data->gemXfir), 1305 be64_to_cpu(data->gemRfir), 1306 be64_to_cpu(data->gemRirqfir), 1307 be64_to_cpu(data->gemMask), 1308 be64_to_cpu(data->gemRwof)); 1309 1310 /* LEM */ 1311 if (data->lemFir || data->lemErrMask || 1312 data->lemAction0 || data->lemAction1 || data->lemWof) 1313 pr_info(" LEM: %016llx %016llx %016llx %016llx %016llx\n", 1314 be64_to_cpu(data->lemFir), 1315 be64_to_cpu(data->lemErrMask), 1316 be64_to_cpu(data->lemAction0), 1317 be64_to_cpu(data->lemAction1), 1318 be64_to_cpu(data->lemWof)); 1319 } 1320 1321 static void pnv_eeh_get_and_dump_hub_diag(struct pci_controller *hose) 1322 { 1323 struct pnv_phb *phb = hose->private_data; 1324 struct OpalIoP7IOCErrorData *data = 1325 (struct OpalIoP7IOCErrorData*)phb->diag_data; 1326 long rc; 1327 1328 rc = opal_pci_get_hub_diag_data(phb->hub_id, data, sizeof(*data)); 1329 if (rc != OPAL_SUCCESS) { 1330 pr_warn("%s: Failed to get HUB#%llx diag-data (%ld)\n", 1331 __func__, phb->hub_id, rc); 1332 return; 1333 } 1334 1335 switch (be16_to_cpu(data->type)) { 1336 case OPAL_P7IOC_DIAG_TYPE_RGC: 1337 pr_info("P7IOC diag-data for RGC\n\n"); 1338 pnv_eeh_dump_hub_diag_common(data); 1339 if (data->rgc.rgcStatus || data->rgc.rgcLdcp) 1340 pr_info(" RGC: %016llx %016llx\n", 1341 be64_to_cpu(data->rgc.rgcStatus), 1342 be64_to_cpu(data->rgc.rgcLdcp)); 1343 break; 1344 case OPAL_P7IOC_DIAG_TYPE_BI: 1345 pr_info("P7IOC diag-data for BI %s\n\n", 1346 data->bi.biDownbound ? "Downbound" : "Upbound"); 1347 pnv_eeh_dump_hub_diag_common(data); 1348 if (data->bi.biLdcp0 || data->bi.biLdcp1 || 1349 data->bi.biLdcp2 || data->bi.biFenceStatus) 1350 pr_info(" BI: %016llx %016llx %016llx %016llx\n", 1351 be64_to_cpu(data->bi.biLdcp0), 1352 be64_to_cpu(data->bi.biLdcp1), 1353 be64_to_cpu(data->bi.biLdcp2), 1354 be64_to_cpu(data->bi.biFenceStatus)); 1355 break; 1356 case OPAL_P7IOC_DIAG_TYPE_CI: 1357 pr_info("P7IOC diag-data for CI Port %d\n\n", 1358 data->ci.ciPort); 1359 pnv_eeh_dump_hub_diag_common(data); 1360 if (data->ci.ciPortStatus || data->ci.ciPortLdcp) 1361 pr_info(" CI: %016llx %016llx\n", 1362 be64_to_cpu(data->ci.ciPortStatus), 1363 be64_to_cpu(data->ci.ciPortLdcp)); 1364 break; 1365 case OPAL_P7IOC_DIAG_TYPE_MISC: 1366 pr_info("P7IOC diag-data for MISC\n\n"); 1367 pnv_eeh_dump_hub_diag_common(data); 1368 break; 1369 case OPAL_P7IOC_DIAG_TYPE_I2C: 1370 pr_info("P7IOC diag-data for I2C\n\n"); 1371 pnv_eeh_dump_hub_diag_common(data); 1372 break; 1373 default: 1374 pr_warn("%s: Invalid type of HUB#%llx diag-data (%d)\n", 1375 __func__, phb->hub_id, data->type); 1376 } 1377 } 1378 1379 static int pnv_eeh_get_pe(struct pci_controller *hose, 1380 u16 pe_no, struct eeh_pe **pe) 1381 { 1382 struct pnv_phb *phb = hose->private_data; 1383 struct pnv_ioda_pe *pnv_pe; 1384 struct eeh_pe *dev_pe; 1385 1386 /* 1387 * If PHB supports compound PE, to fetch 1388 * the master PE because slave PE is invisible 1389 * to EEH core. 1390 */ 1391 pnv_pe = &phb->ioda.pe_array[pe_no]; 1392 if (pnv_pe->flags & PNV_IODA_PE_SLAVE) { 1393 pnv_pe = pnv_pe->master; 1394 WARN_ON(!pnv_pe || 1395 !(pnv_pe->flags & PNV_IODA_PE_MASTER)); 1396 pe_no = pnv_pe->pe_number; 1397 } 1398 1399 /* Find the PE according to PE# */ 1400 dev_pe = eeh_pe_get(hose, pe_no, 0); 1401 if (!dev_pe) 1402 return -EEXIST; 1403 1404 /* Freeze the (compound) PE */ 1405 *pe = dev_pe; 1406 if (!(dev_pe->state & EEH_PE_ISOLATED)) 1407 phb->freeze_pe(phb, pe_no); 1408 1409 /* 1410 * At this point, we're sure the (compound) PE should 1411 * have been frozen. However, we still need poke until 1412 * hitting the frozen PE on top level. 1413 */ 1414 dev_pe = dev_pe->parent; 1415 while (dev_pe && !(dev_pe->type & EEH_PE_PHB)) { 1416 int ret; 1417 ret = eeh_ops->get_state(dev_pe, NULL); 1418 if (ret <= 0 || eeh_state_active(ret)) { 1419 dev_pe = dev_pe->parent; 1420 continue; 1421 } 1422 1423 /* Frozen parent PE */ 1424 *pe = dev_pe; 1425 if (!(dev_pe->state & EEH_PE_ISOLATED)) 1426 phb->freeze_pe(phb, dev_pe->addr); 1427 1428 /* Next one */ 1429 dev_pe = dev_pe->parent; 1430 } 1431 1432 return 0; 1433 } 1434 1435 /** 1436 * pnv_eeh_next_error - Retrieve next EEH error to handle 1437 * @pe: Affected PE 1438 * 1439 * The function is expected to be called by EEH core while it gets 1440 * special EEH event (without binding PE). The function calls to 1441 * OPAL APIs for next error to handle. The informational error is 1442 * handled internally by platform. However, the dead IOC, dead PHB, 1443 * fenced PHB and frozen PE should be handled by EEH core eventually. 1444 */ 1445 static int pnv_eeh_next_error(struct eeh_pe **pe) 1446 { 1447 struct pci_controller *hose; 1448 struct pnv_phb *phb; 1449 struct eeh_pe *phb_pe, *parent_pe; 1450 __be64 frozen_pe_no; 1451 __be16 err_type, severity; 1452 long rc; 1453 int state, ret = EEH_NEXT_ERR_NONE; 1454 1455 /* 1456 * While running here, it's safe to purge the event queue. The 1457 * event should still be masked. 1458 */ 1459 eeh_remove_event(NULL, false); 1460 1461 list_for_each_entry(hose, &hose_list, list_node) { 1462 /* 1463 * If the subordinate PCI buses of the PHB has been 1464 * removed or is exactly under error recovery, we 1465 * needn't take care of it any more. 1466 */ 1467 phb = hose->private_data; 1468 phb_pe = eeh_phb_pe_get(hose); 1469 if (!phb_pe || (phb_pe->state & EEH_PE_ISOLATED)) 1470 continue; 1471 1472 rc = opal_pci_next_error(phb->opal_id, 1473 &frozen_pe_no, &err_type, &severity); 1474 if (rc != OPAL_SUCCESS) { 1475 pr_devel("%s: Invalid return value on " 1476 "PHB#%x (0x%lx) from opal_pci_next_error", 1477 __func__, hose->global_number, rc); 1478 continue; 1479 } 1480 1481 /* If the PHB doesn't have error, stop processing */ 1482 if (be16_to_cpu(err_type) == OPAL_EEH_NO_ERROR || 1483 be16_to_cpu(severity) == OPAL_EEH_SEV_NO_ERROR) { 1484 pr_devel("%s: No error found on PHB#%x\n", 1485 __func__, hose->global_number); 1486 continue; 1487 } 1488 1489 /* 1490 * Processing the error. We're expecting the error with 1491 * highest priority reported upon multiple errors on the 1492 * specific PHB. 1493 */ 1494 pr_devel("%s: Error (%d, %d, %llu) on PHB#%x\n", 1495 __func__, be16_to_cpu(err_type), 1496 be16_to_cpu(severity), be64_to_cpu(frozen_pe_no), 1497 hose->global_number); 1498 switch (be16_to_cpu(err_type)) { 1499 case OPAL_EEH_IOC_ERROR: 1500 if (be16_to_cpu(severity) == OPAL_EEH_SEV_IOC_DEAD) { 1501 pr_err("EEH: dead IOC detected\n"); 1502 ret = EEH_NEXT_ERR_DEAD_IOC; 1503 } else if (be16_to_cpu(severity) == OPAL_EEH_SEV_INF) { 1504 pr_info("EEH: IOC informative error " 1505 "detected\n"); 1506 pnv_eeh_get_and_dump_hub_diag(hose); 1507 ret = EEH_NEXT_ERR_NONE; 1508 } 1509 1510 break; 1511 case OPAL_EEH_PHB_ERROR: 1512 if (be16_to_cpu(severity) == OPAL_EEH_SEV_PHB_DEAD) { 1513 *pe = phb_pe; 1514 pr_err("EEH: dead PHB#%x detected, " 1515 "location: %s\n", 1516 hose->global_number, 1517 eeh_pe_loc_get(phb_pe)); 1518 ret = EEH_NEXT_ERR_DEAD_PHB; 1519 } else if (be16_to_cpu(severity) == 1520 OPAL_EEH_SEV_PHB_FENCED) { 1521 *pe = phb_pe; 1522 pr_err("EEH: Fenced PHB#%x detected, " 1523 "location: %s\n", 1524 hose->global_number, 1525 eeh_pe_loc_get(phb_pe)); 1526 ret = EEH_NEXT_ERR_FENCED_PHB; 1527 } else if (be16_to_cpu(severity) == OPAL_EEH_SEV_INF) { 1528 pr_info("EEH: PHB#%x informative error " 1529 "detected, location: %s\n", 1530 hose->global_number, 1531 eeh_pe_loc_get(phb_pe)); 1532 pnv_eeh_get_phb_diag(phb_pe); 1533 pnv_pci_dump_phb_diag_data(hose, phb_pe->data); 1534 ret = EEH_NEXT_ERR_NONE; 1535 } 1536 1537 break; 1538 case OPAL_EEH_PE_ERROR: 1539 /* 1540 * If we can't find the corresponding PE, we 1541 * just try to unfreeze. 1542 */ 1543 if (pnv_eeh_get_pe(hose, 1544 be64_to_cpu(frozen_pe_no), pe)) { 1545 pr_info("EEH: Clear non-existing PHB#%x-PE#%llx\n", 1546 hose->global_number, be64_to_cpu(frozen_pe_no)); 1547 pr_info("EEH: PHB location: %s\n", 1548 eeh_pe_loc_get(phb_pe)); 1549 1550 /* Dump PHB diag-data */ 1551 rc = opal_pci_get_phb_diag_data2(phb->opal_id, 1552 phb->diag_data, phb->diag_data_size); 1553 if (rc == OPAL_SUCCESS) 1554 pnv_pci_dump_phb_diag_data(hose, 1555 phb->diag_data); 1556 1557 /* Try best to clear it */ 1558 opal_pci_eeh_freeze_clear(phb->opal_id, 1559 be64_to_cpu(frozen_pe_no), 1560 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL); 1561 ret = EEH_NEXT_ERR_NONE; 1562 } else if ((*pe)->state & EEH_PE_ISOLATED || 1563 eeh_pe_passed(*pe)) { 1564 ret = EEH_NEXT_ERR_NONE; 1565 } else { 1566 pr_err("EEH: Frozen PE#%x " 1567 "on PHB#%x detected\n", 1568 (*pe)->addr, 1569 (*pe)->phb->global_number); 1570 pr_err("EEH: PE location: %s, " 1571 "PHB location: %s\n", 1572 eeh_pe_loc_get(*pe), 1573 eeh_pe_loc_get(phb_pe)); 1574 ret = EEH_NEXT_ERR_FROZEN_PE; 1575 } 1576 1577 break; 1578 default: 1579 pr_warn("%s: Unexpected error type %d\n", 1580 __func__, be16_to_cpu(err_type)); 1581 } 1582 1583 /* 1584 * EEH core will try recover from fenced PHB or 1585 * frozen PE. In the time for frozen PE, EEH core 1586 * enable IO path for that before collecting logs, 1587 * but it ruins the site. So we have to dump the 1588 * log in advance here. 1589 */ 1590 if ((ret == EEH_NEXT_ERR_FROZEN_PE || 1591 ret == EEH_NEXT_ERR_FENCED_PHB) && 1592 !((*pe)->state & EEH_PE_ISOLATED)) { 1593 eeh_pe_mark_isolated(*pe); 1594 pnv_eeh_get_phb_diag(*pe); 1595 1596 if (eeh_has_flag(EEH_EARLY_DUMP_LOG)) 1597 pnv_pci_dump_phb_diag_data((*pe)->phb, 1598 (*pe)->data); 1599 } 1600 1601 /* 1602 * We probably have the frozen parent PE out there and 1603 * we need have to handle frozen parent PE firstly. 1604 */ 1605 if (ret == EEH_NEXT_ERR_FROZEN_PE) { 1606 parent_pe = (*pe)->parent; 1607 while (parent_pe) { 1608 /* Hit the ceiling ? */ 1609 if (parent_pe->type & EEH_PE_PHB) 1610 break; 1611 1612 /* Frozen parent PE ? */ 1613 state = eeh_ops->get_state(parent_pe, NULL); 1614 if (state > 0 && !eeh_state_active(state)) 1615 *pe = parent_pe; 1616 1617 /* Next parent level */ 1618 parent_pe = parent_pe->parent; 1619 } 1620 1621 /* We possibly migrate to another PE */ 1622 eeh_pe_mark_isolated(*pe); 1623 } 1624 1625 /* 1626 * If we have no errors on the specific PHB or only 1627 * informative error there, we continue poking it. 1628 * Otherwise, we need actions to be taken by upper 1629 * layer. 1630 */ 1631 if (ret > EEH_NEXT_ERR_INF) 1632 break; 1633 } 1634 1635 /* Unmask the event */ 1636 if (ret == EEH_NEXT_ERR_NONE && eeh_enabled()) 1637 enable_irq(eeh_event_irq); 1638 1639 return ret; 1640 } 1641 1642 static int pnv_eeh_restore_config(struct pci_dn *pdn) 1643 { 1644 struct eeh_dev *edev = pdn_to_eeh_dev(pdn); 1645 struct pnv_phb *phb; 1646 s64 ret = 0; 1647 int config_addr = (pdn->busno << 8) | (pdn->devfn); 1648 1649 if (!edev) 1650 return -EEXIST; 1651 1652 /* 1653 * We have to restore the PCI config space after reset since the 1654 * firmware can't see SRIOV VFs. 1655 * 1656 * FIXME: The MPS, error routing rules, timeout setting are worthy 1657 * to be exported by firmware in extendible way. 1658 */ 1659 if (edev->physfn) { 1660 ret = eeh_restore_vf_config(pdn); 1661 } else { 1662 phb = pdn->phb->private_data; 1663 ret = opal_pci_reinit(phb->opal_id, 1664 OPAL_REINIT_PCI_DEV, config_addr); 1665 } 1666 1667 if (ret) { 1668 pr_warn("%s: Can't reinit PCI dev 0x%x (%lld)\n", 1669 __func__, config_addr, ret); 1670 return -EIO; 1671 } 1672 1673 return ret; 1674 } 1675 1676 static struct eeh_ops pnv_eeh_ops = { 1677 .name = "powernv", 1678 .init = pnv_eeh_init, 1679 .probe = pnv_eeh_probe, 1680 .set_option = pnv_eeh_set_option, 1681 .get_pe_addr = pnv_eeh_get_pe_addr, 1682 .get_state = pnv_eeh_get_state, 1683 .reset = pnv_eeh_reset, 1684 .get_log = pnv_eeh_get_log, 1685 .configure_bridge = pnv_eeh_configure_bridge, 1686 .err_inject = pnv_eeh_err_inject, 1687 .read_config = pnv_eeh_read_config, 1688 .write_config = pnv_eeh_write_config, 1689 .next_error = pnv_eeh_next_error, 1690 .restore_config = pnv_eeh_restore_config, 1691 .notify_resume = NULL 1692 }; 1693 1694 #ifdef CONFIG_PCI_IOV 1695 static void pnv_pci_fixup_vf_mps(struct pci_dev *pdev) 1696 { 1697 struct pci_dn *pdn = pci_get_pdn(pdev); 1698 int parent_mps; 1699 1700 if (!pdev->is_virtfn) 1701 return; 1702 1703 /* Synchronize MPS for VF and PF */ 1704 parent_mps = pcie_get_mps(pdev->physfn); 1705 if ((128 << pdev->pcie_mpss) >= parent_mps) 1706 pcie_set_mps(pdev, parent_mps); 1707 pdn->mps = pcie_get_mps(pdev); 1708 } 1709 DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pnv_pci_fixup_vf_mps); 1710 #endif /* CONFIG_PCI_IOV */ 1711 1712 /** 1713 * eeh_powernv_init - Register platform dependent EEH operations 1714 * 1715 * EEH initialization on powernv platform. This function should be 1716 * called before any EEH related functions. 1717 */ 1718 static int __init eeh_powernv_init(void) 1719 { 1720 int ret = -EINVAL; 1721 1722 ret = eeh_ops_register(&pnv_eeh_ops); 1723 if (!ret) 1724 pr_info("EEH: PowerNV platform initialized\n"); 1725 else 1726 pr_info("EEH: Failed to initialize PowerNV platform (%d)\n", ret); 1727 1728 return ret; 1729 } 1730 machine_early_initcall(powernv, eeh_powernv_init); 1731