1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * PowerNV Platform dependent EEH operations 4 * 5 * Copyright Benjamin Herrenschmidt & Gavin Shan, IBM Corporation 2013. 6 */ 7 8 #include <linux/atomic.h> 9 #include <linux/debugfs.h> 10 #include <linux/delay.h> 11 #include <linux/export.h> 12 #include <linux/init.h> 13 #include <linux/interrupt.h> 14 #include <linux/list.h> 15 #include <linux/msi.h> 16 #include <linux/of.h> 17 #include <linux/pci.h> 18 #include <linux/proc_fs.h> 19 #include <linux/rbtree.h> 20 #include <linux/sched.h> 21 #include <linux/seq_file.h> 22 #include <linux/spinlock.h> 23 24 #include <asm/eeh.h> 25 #include <asm/eeh_event.h> 26 #include <asm/firmware.h> 27 #include <asm/io.h> 28 #include <asm/iommu.h> 29 #include <asm/machdep.h> 30 #include <asm/msi_bitmap.h> 31 #include <asm/opal.h> 32 #include <asm/ppc-pci.h> 33 #include <asm/pnv-pci.h> 34 35 #include "powernv.h" 36 #include "pci.h" 37 38 static int eeh_event_irq = -EINVAL; 39 40 void pnv_pcibios_bus_add_device(struct pci_dev *pdev) 41 { 42 struct pci_dn *pdn = pci_get_pdn(pdev); 43 44 if (eeh_has_flag(EEH_FORCE_DISABLED)) 45 return; 46 47 pr_debug("%s: EEH: Setting up device %s.\n", __func__, pci_name(pdev)); 48 eeh_add_device_early(pdn); 49 eeh_add_device_late(pdev); 50 eeh_sysfs_add_device(pdev); 51 } 52 53 static int pnv_eeh_init(void) 54 { 55 struct pci_controller *hose; 56 struct pnv_phb *phb; 57 int max_diag_size = PNV_PCI_DIAG_BUF_SIZE; 58 59 if (!firmware_has_feature(FW_FEATURE_OPAL)) { 60 pr_warn("%s: OPAL is required !\n", 61 __func__); 62 return -EINVAL; 63 } 64 65 /* Set probe mode */ 66 eeh_add_flag(EEH_PROBE_MODE_DEV); 67 68 /* 69 * P7IOC blocks PCI config access to frozen PE, but PHB3 70 * doesn't do that. So we have to selectively enable I/O 71 * prior to collecting error log. 72 */ 73 list_for_each_entry(hose, &hose_list, list_node) { 74 phb = hose->private_data; 75 76 if (phb->model == PNV_PHB_MODEL_P7IOC) 77 eeh_add_flag(EEH_ENABLE_IO_FOR_LOG); 78 79 if (phb->diag_data_size > max_diag_size) 80 max_diag_size = phb->diag_data_size; 81 82 /* 83 * PE#0 should be regarded as valid by EEH core 84 * if it's not the reserved one. Currently, we 85 * have the reserved PE#255 and PE#127 for PHB3 86 * and P7IOC separately. So we should regard 87 * PE#0 as valid for PHB3 and P7IOC. 88 */ 89 if (phb->ioda.reserved_pe_idx != 0) 90 eeh_add_flag(EEH_VALID_PE_ZERO); 91 92 break; 93 } 94 95 eeh_set_pe_aux_size(max_diag_size); 96 ppc_md.pcibios_bus_add_device = pnv_pcibios_bus_add_device; 97 98 return 0; 99 } 100 101 static irqreturn_t pnv_eeh_event(int irq, void *data) 102 { 103 /* 104 * We simply send a special EEH event if EEH has been 105 * enabled. We don't care about EEH events until we've 106 * finished processing the outstanding ones. Event processing 107 * gets unmasked in next_error() if EEH is enabled. 108 */ 109 disable_irq_nosync(irq); 110 111 if (eeh_enabled()) 112 eeh_send_failure_event(NULL); 113 114 return IRQ_HANDLED; 115 } 116 117 #ifdef CONFIG_DEBUG_FS 118 static ssize_t pnv_eeh_ei_write(struct file *filp, 119 const char __user *user_buf, 120 size_t count, loff_t *ppos) 121 { 122 struct pci_controller *hose = filp->private_data; 123 struct eeh_pe *pe; 124 int pe_no, type, func; 125 unsigned long addr, mask; 126 char buf[50]; 127 int ret; 128 129 if (!eeh_ops || !eeh_ops->err_inject) 130 return -ENXIO; 131 132 /* Copy over argument buffer */ 133 ret = simple_write_to_buffer(buf, sizeof(buf), ppos, user_buf, count); 134 if (!ret) 135 return -EFAULT; 136 137 /* Retrieve parameters */ 138 ret = sscanf(buf, "%x:%x:%x:%lx:%lx", 139 &pe_no, &type, &func, &addr, &mask); 140 if (ret != 5) 141 return -EINVAL; 142 143 /* Retrieve PE */ 144 pe = eeh_pe_get(hose, pe_no, 0); 145 if (!pe) 146 return -ENODEV; 147 148 /* Do error injection */ 149 ret = eeh_ops->err_inject(pe, type, func, addr, mask); 150 return ret < 0 ? ret : count; 151 } 152 153 static const struct file_operations pnv_eeh_ei_fops = { 154 .open = simple_open, 155 .llseek = no_llseek, 156 .write = pnv_eeh_ei_write, 157 }; 158 159 static int pnv_eeh_dbgfs_set(void *data, int offset, u64 val) 160 { 161 struct pci_controller *hose = data; 162 struct pnv_phb *phb = hose->private_data; 163 164 out_be64(phb->regs + offset, val); 165 return 0; 166 } 167 168 static int pnv_eeh_dbgfs_get(void *data, int offset, u64 *val) 169 { 170 struct pci_controller *hose = data; 171 struct pnv_phb *phb = hose->private_data; 172 173 *val = in_be64(phb->regs + offset); 174 return 0; 175 } 176 177 #define PNV_EEH_DBGFS_ENTRY(name, reg) \ 178 static int pnv_eeh_dbgfs_set_##name(void *data, u64 val) \ 179 { \ 180 return pnv_eeh_dbgfs_set(data, reg, val); \ 181 } \ 182 \ 183 static int pnv_eeh_dbgfs_get_##name(void *data, u64 *val) \ 184 { \ 185 return pnv_eeh_dbgfs_get(data, reg, val); \ 186 } \ 187 \ 188 DEFINE_SIMPLE_ATTRIBUTE(pnv_eeh_dbgfs_ops_##name, \ 189 pnv_eeh_dbgfs_get_##name, \ 190 pnv_eeh_dbgfs_set_##name, \ 191 "0x%llx\n") 192 193 PNV_EEH_DBGFS_ENTRY(outb, 0xD10); 194 PNV_EEH_DBGFS_ENTRY(inbA, 0xD90); 195 PNV_EEH_DBGFS_ENTRY(inbB, 0xE10); 196 197 #endif /* CONFIG_DEBUG_FS */ 198 199 void pnv_eeh_enable_phbs(void) 200 { 201 struct pci_controller *hose; 202 struct pnv_phb *phb; 203 204 list_for_each_entry(hose, &hose_list, list_node) { 205 phb = hose->private_data; 206 /* 207 * If EEH is enabled, we're going to rely on that. 208 * Otherwise, we restore to conventional mechanism 209 * to clear frozen PE during PCI config access. 210 */ 211 if (eeh_enabled()) 212 phb->flags |= PNV_PHB_FLAG_EEH; 213 else 214 phb->flags &= ~PNV_PHB_FLAG_EEH; 215 } 216 } 217 218 /** 219 * pnv_eeh_post_init - EEH platform dependent post initialization 220 * 221 * EEH platform dependent post initialization on powernv. When 222 * the function is called, the EEH PEs and devices should have 223 * been built. If the I/O cache staff has been built, EEH is 224 * ready to supply service. 225 */ 226 int pnv_eeh_post_init(void) 227 { 228 struct pci_controller *hose; 229 struct pnv_phb *phb; 230 int ret = 0; 231 232 /* Probe devices & build address cache */ 233 eeh_probe_devices(); 234 eeh_addr_cache_build(); 235 236 /* Register OPAL event notifier */ 237 eeh_event_irq = opal_event_request(ilog2(OPAL_EVENT_PCI_ERROR)); 238 if (eeh_event_irq < 0) { 239 pr_err("%s: Can't register OPAL event interrupt (%d)\n", 240 __func__, eeh_event_irq); 241 return eeh_event_irq; 242 } 243 244 ret = request_irq(eeh_event_irq, pnv_eeh_event, 245 IRQ_TYPE_LEVEL_HIGH, "opal-eeh", NULL); 246 if (ret < 0) { 247 irq_dispose_mapping(eeh_event_irq); 248 pr_err("%s: Can't request OPAL event interrupt (%d)\n", 249 __func__, eeh_event_irq); 250 return ret; 251 } 252 253 if (!eeh_enabled()) 254 disable_irq(eeh_event_irq); 255 256 pnv_eeh_enable_phbs(); 257 258 list_for_each_entry(hose, &hose_list, list_node) { 259 phb = hose->private_data; 260 261 /* Create debugfs entries */ 262 #ifdef CONFIG_DEBUG_FS 263 if (phb->has_dbgfs || !phb->dbgfs) 264 continue; 265 266 phb->has_dbgfs = 1; 267 debugfs_create_file("err_injct", 0200, 268 phb->dbgfs, hose, 269 &pnv_eeh_ei_fops); 270 271 debugfs_create_file("err_injct_outbound", 0600, 272 phb->dbgfs, hose, 273 &pnv_eeh_dbgfs_ops_outb); 274 debugfs_create_file("err_injct_inboundA", 0600, 275 phb->dbgfs, hose, 276 &pnv_eeh_dbgfs_ops_inbA); 277 debugfs_create_file("err_injct_inboundB", 0600, 278 phb->dbgfs, hose, 279 &pnv_eeh_dbgfs_ops_inbB); 280 #endif /* CONFIG_DEBUG_FS */ 281 } 282 283 return ret; 284 } 285 286 static int pnv_eeh_find_cap(struct pci_dn *pdn, int cap) 287 { 288 int pos = PCI_CAPABILITY_LIST; 289 int cnt = 48; /* Maximal number of capabilities */ 290 u32 status, id; 291 292 if (!pdn) 293 return 0; 294 295 /* Check if the device supports capabilities */ 296 pnv_pci_cfg_read(pdn, PCI_STATUS, 2, &status); 297 if (!(status & PCI_STATUS_CAP_LIST)) 298 return 0; 299 300 while (cnt--) { 301 pnv_pci_cfg_read(pdn, pos, 1, &pos); 302 if (pos < 0x40) 303 break; 304 305 pos &= ~3; 306 pnv_pci_cfg_read(pdn, pos + PCI_CAP_LIST_ID, 1, &id); 307 if (id == 0xff) 308 break; 309 310 /* Found */ 311 if (id == cap) 312 return pos; 313 314 /* Next one */ 315 pos += PCI_CAP_LIST_NEXT; 316 } 317 318 return 0; 319 } 320 321 static int pnv_eeh_find_ecap(struct pci_dn *pdn, int cap) 322 { 323 struct eeh_dev *edev = pdn_to_eeh_dev(pdn); 324 u32 header; 325 int pos = 256, ttl = (4096 - 256) / 8; 326 327 if (!edev || !edev->pcie_cap) 328 return 0; 329 if (pnv_pci_cfg_read(pdn, pos, 4, &header) != PCIBIOS_SUCCESSFUL) 330 return 0; 331 else if (!header) 332 return 0; 333 334 while (ttl-- > 0) { 335 if (PCI_EXT_CAP_ID(header) == cap && pos) 336 return pos; 337 338 pos = PCI_EXT_CAP_NEXT(header); 339 if (pos < 256) 340 break; 341 342 if (pnv_pci_cfg_read(pdn, pos, 4, &header) != PCIBIOS_SUCCESSFUL) 343 break; 344 } 345 346 return 0; 347 } 348 349 /** 350 * pnv_eeh_probe - Do probe on PCI device 351 * @pdn: PCI device node 352 * @data: unused 353 * 354 * When EEH module is installed during system boot, all PCI devices 355 * are checked one by one to see if it supports EEH. The function 356 * is introduced for the purpose. By default, EEH has been enabled 357 * on all PCI devices. That's to say, we only need do necessary 358 * initialization on the corresponding eeh device and create PE 359 * accordingly. 360 * 361 * It's notable that's unsafe to retrieve the EEH device through 362 * the corresponding PCI device. During the PCI device hotplug, which 363 * was possiblly triggered by EEH core, the binding between EEH device 364 * and the PCI device isn't built yet. 365 */ 366 static void *pnv_eeh_probe(struct pci_dn *pdn, void *data) 367 { 368 struct pci_controller *hose = pdn->phb; 369 struct pnv_phb *phb = hose->private_data; 370 struct eeh_dev *edev = pdn_to_eeh_dev(pdn); 371 uint32_t pcie_flags; 372 int ret; 373 int config_addr = (pdn->busno << 8) | (pdn->devfn); 374 375 pr_debug("%s: probing %04x:%02x:%02x.%01x\n", 376 __func__, hose->global_number, pdn->busno, 377 PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn)); 378 379 /* 380 * When probing the root bridge, which doesn't have any 381 * subordinate PCI devices. We don't have OF node for 382 * the root bridge. So it's not reasonable to continue 383 * the probing. 384 */ 385 if (!edev || edev->pe) 386 return NULL; 387 388 /* Skip for PCI-ISA bridge */ 389 if ((pdn->class_code >> 8) == PCI_CLASS_BRIDGE_ISA) 390 return NULL; 391 392 /* Initialize eeh device */ 393 edev->class_code = pdn->class_code; 394 edev->mode &= 0xFFFFFF00; 395 edev->pcix_cap = pnv_eeh_find_cap(pdn, PCI_CAP_ID_PCIX); 396 edev->pcie_cap = pnv_eeh_find_cap(pdn, PCI_CAP_ID_EXP); 397 edev->af_cap = pnv_eeh_find_cap(pdn, PCI_CAP_ID_AF); 398 edev->aer_cap = pnv_eeh_find_ecap(pdn, PCI_EXT_CAP_ID_ERR); 399 if ((edev->class_code >> 8) == PCI_CLASS_BRIDGE_PCI) { 400 edev->mode |= EEH_DEV_BRIDGE; 401 if (edev->pcie_cap) { 402 pnv_pci_cfg_read(pdn, edev->pcie_cap + PCI_EXP_FLAGS, 403 2, &pcie_flags); 404 pcie_flags = (pcie_flags & PCI_EXP_FLAGS_TYPE) >> 4; 405 if (pcie_flags == PCI_EXP_TYPE_ROOT_PORT) 406 edev->mode |= EEH_DEV_ROOT_PORT; 407 else if (pcie_flags == PCI_EXP_TYPE_DOWNSTREAM) 408 edev->mode |= EEH_DEV_DS_PORT; 409 } 410 } 411 412 edev->pe_config_addr = phb->ioda.pe_rmap[config_addr]; 413 414 /* Create PE */ 415 ret = eeh_add_to_parent_pe(edev); 416 if (ret) { 417 pr_warn("%s: Can't add PCI dev %04x:%02x:%02x.%01x to parent PE (%x)\n", 418 __func__, hose->global_number, pdn->busno, 419 PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn), ret); 420 return NULL; 421 } 422 423 /* 424 * If the PE contains any one of following adapters, the 425 * PCI config space can't be accessed when dumping EEH log. 426 * Otherwise, we will run into fenced PHB caused by shortage 427 * of outbound credits in the adapter. The PCI config access 428 * should be blocked until PE reset. MMIO access is dropped 429 * by hardware certainly. In order to drop PCI config requests, 430 * one more flag (EEH_PE_CFG_RESTRICTED) is introduced, which 431 * will be checked in the backend for PE state retrival. If 432 * the PE becomes frozen for the first time and the flag has 433 * been set for the PE, we will set EEH_PE_CFG_BLOCKED for 434 * that PE to block its config space. 435 * 436 * Broadcom BCM5718 2-ports NICs (14e4:1656) 437 * Broadcom Austin 4-ports NICs (14e4:1657) 438 * Broadcom Shiner 4-ports 1G NICs (14e4:168a) 439 * Broadcom Shiner 2-ports 10G NICs (14e4:168e) 440 */ 441 if ((pdn->vendor_id == PCI_VENDOR_ID_BROADCOM && 442 pdn->device_id == 0x1656) || 443 (pdn->vendor_id == PCI_VENDOR_ID_BROADCOM && 444 pdn->device_id == 0x1657) || 445 (pdn->vendor_id == PCI_VENDOR_ID_BROADCOM && 446 pdn->device_id == 0x168a) || 447 (pdn->vendor_id == PCI_VENDOR_ID_BROADCOM && 448 pdn->device_id == 0x168e)) 449 edev->pe->state |= EEH_PE_CFG_RESTRICTED; 450 451 /* 452 * Cache the PE primary bus, which can't be fetched when 453 * full hotplug is in progress. In that case, all child 454 * PCI devices of the PE are expected to be removed prior 455 * to PE reset. 456 */ 457 if (!(edev->pe->state & EEH_PE_PRI_BUS)) { 458 edev->pe->bus = pci_find_bus(hose->global_number, 459 pdn->busno); 460 if (edev->pe->bus) 461 edev->pe->state |= EEH_PE_PRI_BUS; 462 } 463 464 /* 465 * Enable EEH explicitly so that we will do EEH check 466 * while accessing I/O stuff 467 */ 468 if (!eeh_has_flag(EEH_ENABLED)) { 469 enable_irq(eeh_event_irq); 470 pnv_eeh_enable_phbs(); 471 eeh_add_flag(EEH_ENABLED); 472 } 473 474 /* Save memory bars */ 475 eeh_save_bars(edev); 476 477 pr_debug("%s: EEH enabled on %02x:%02x.%01x PHB#%x-PE#%x\n", 478 __func__, pdn->busno, PCI_SLOT(pdn->devfn), 479 PCI_FUNC(pdn->devfn), edev->pe->phb->global_number, 480 edev->pe->addr); 481 482 return NULL; 483 } 484 485 /** 486 * pnv_eeh_set_option - Initialize EEH or MMIO/DMA reenable 487 * @pe: EEH PE 488 * @option: operation to be issued 489 * 490 * The function is used to control the EEH functionality globally. 491 * Currently, following options are support according to PAPR: 492 * Enable EEH, Disable EEH, Enable MMIO and Enable DMA 493 */ 494 static int pnv_eeh_set_option(struct eeh_pe *pe, int option) 495 { 496 struct pci_controller *hose = pe->phb; 497 struct pnv_phb *phb = hose->private_data; 498 bool freeze_pe = false; 499 int opt; 500 s64 rc; 501 502 switch (option) { 503 case EEH_OPT_DISABLE: 504 return -EPERM; 505 case EEH_OPT_ENABLE: 506 return 0; 507 case EEH_OPT_THAW_MMIO: 508 opt = OPAL_EEH_ACTION_CLEAR_FREEZE_MMIO; 509 break; 510 case EEH_OPT_THAW_DMA: 511 opt = OPAL_EEH_ACTION_CLEAR_FREEZE_DMA; 512 break; 513 case EEH_OPT_FREEZE_PE: 514 freeze_pe = true; 515 opt = OPAL_EEH_ACTION_SET_FREEZE_ALL; 516 break; 517 default: 518 pr_warn("%s: Invalid option %d\n", __func__, option); 519 return -EINVAL; 520 } 521 522 /* Freeze master and slave PEs if PHB supports compound PEs */ 523 if (freeze_pe) { 524 if (phb->freeze_pe) { 525 phb->freeze_pe(phb, pe->addr); 526 return 0; 527 } 528 529 rc = opal_pci_eeh_freeze_set(phb->opal_id, pe->addr, opt); 530 if (rc != OPAL_SUCCESS) { 531 pr_warn("%s: Failure %lld freezing PHB#%x-PE#%x\n", 532 __func__, rc, phb->hose->global_number, 533 pe->addr); 534 return -EIO; 535 } 536 537 return 0; 538 } 539 540 /* Unfreeze master and slave PEs if PHB supports */ 541 if (phb->unfreeze_pe) 542 return phb->unfreeze_pe(phb, pe->addr, opt); 543 544 rc = opal_pci_eeh_freeze_clear(phb->opal_id, pe->addr, opt); 545 if (rc != OPAL_SUCCESS) { 546 pr_warn("%s: Failure %lld enable %d for PHB#%x-PE#%x\n", 547 __func__, rc, option, phb->hose->global_number, 548 pe->addr); 549 return -EIO; 550 } 551 552 return 0; 553 } 554 555 /** 556 * pnv_eeh_get_pe_addr - Retrieve PE address 557 * @pe: EEH PE 558 * 559 * Retrieve the PE address according to the given tranditional 560 * PCI BDF (Bus/Device/Function) address. 561 */ 562 static int pnv_eeh_get_pe_addr(struct eeh_pe *pe) 563 { 564 return pe->addr; 565 } 566 567 static void pnv_eeh_get_phb_diag(struct eeh_pe *pe) 568 { 569 struct pnv_phb *phb = pe->phb->private_data; 570 s64 rc; 571 572 rc = opal_pci_get_phb_diag_data2(phb->opal_id, pe->data, 573 phb->diag_data_size); 574 if (rc != OPAL_SUCCESS) 575 pr_warn("%s: Failure %lld getting PHB#%x diag-data\n", 576 __func__, rc, pe->phb->global_number); 577 } 578 579 static int pnv_eeh_get_phb_state(struct eeh_pe *pe) 580 { 581 struct pnv_phb *phb = pe->phb->private_data; 582 u8 fstate = 0; 583 __be16 pcierr = 0; 584 s64 rc; 585 int result = 0; 586 587 rc = opal_pci_eeh_freeze_status(phb->opal_id, 588 pe->addr, 589 &fstate, 590 &pcierr, 591 NULL); 592 if (rc != OPAL_SUCCESS) { 593 pr_warn("%s: Failure %lld getting PHB#%x state\n", 594 __func__, rc, phb->hose->global_number); 595 return EEH_STATE_NOT_SUPPORT; 596 } 597 598 /* 599 * Check PHB state. If the PHB is frozen for the 600 * first time, to dump the PHB diag-data. 601 */ 602 if (be16_to_cpu(pcierr) != OPAL_EEH_PHB_ERROR) { 603 result = (EEH_STATE_MMIO_ACTIVE | 604 EEH_STATE_DMA_ACTIVE | 605 EEH_STATE_MMIO_ENABLED | 606 EEH_STATE_DMA_ENABLED); 607 } else if (!(pe->state & EEH_PE_ISOLATED)) { 608 eeh_pe_mark_isolated(pe); 609 pnv_eeh_get_phb_diag(pe); 610 611 if (eeh_has_flag(EEH_EARLY_DUMP_LOG)) 612 pnv_pci_dump_phb_diag_data(pe->phb, pe->data); 613 } 614 615 return result; 616 } 617 618 static int pnv_eeh_get_pe_state(struct eeh_pe *pe) 619 { 620 struct pnv_phb *phb = pe->phb->private_data; 621 u8 fstate = 0; 622 __be16 pcierr = 0; 623 s64 rc; 624 int result; 625 626 /* 627 * We don't clobber hardware frozen state until PE 628 * reset is completed. In order to keep EEH core 629 * moving forward, we have to return operational 630 * state during PE reset. 631 */ 632 if (pe->state & EEH_PE_RESET) { 633 result = (EEH_STATE_MMIO_ACTIVE | 634 EEH_STATE_DMA_ACTIVE | 635 EEH_STATE_MMIO_ENABLED | 636 EEH_STATE_DMA_ENABLED); 637 return result; 638 } 639 640 /* 641 * Fetch PE state from hardware. If the PHB 642 * supports compound PE, let it handle that. 643 */ 644 if (phb->get_pe_state) { 645 fstate = phb->get_pe_state(phb, pe->addr); 646 } else { 647 rc = opal_pci_eeh_freeze_status(phb->opal_id, 648 pe->addr, 649 &fstate, 650 &pcierr, 651 NULL); 652 if (rc != OPAL_SUCCESS) { 653 pr_warn("%s: Failure %lld getting PHB#%x-PE%x state\n", 654 __func__, rc, phb->hose->global_number, 655 pe->addr); 656 return EEH_STATE_NOT_SUPPORT; 657 } 658 } 659 660 /* Figure out state */ 661 switch (fstate) { 662 case OPAL_EEH_STOPPED_NOT_FROZEN: 663 result = (EEH_STATE_MMIO_ACTIVE | 664 EEH_STATE_DMA_ACTIVE | 665 EEH_STATE_MMIO_ENABLED | 666 EEH_STATE_DMA_ENABLED); 667 break; 668 case OPAL_EEH_STOPPED_MMIO_FREEZE: 669 result = (EEH_STATE_DMA_ACTIVE | 670 EEH_STATE_DMA_ENABLED); 671 break; 672 case OPAL_EEH_STOPPED_DMA_FREEZE: 673 result = (EEH_STATE_MMIO_ACTIVE | 674 EEH_STATE_MMIO_ENABLED); 675 break; 676 case OPAL_EEH_STOPPED_MMIO_DMA_FREEZE: 677 result = 0; 678 break; 679 case OPAL_EEH_STOPPED_RESET: 680 result = EEH_STATE_RESET_ACTIVE; 681 break; 682 case OPAL_EEH_STOPPED_TEMP_UNAVAIL: 683 result = EEH_STATE_UNAVAILABLE; 684 break; 685 case OPAL_EEH_STOPPED_PERM_UNAVAIL: 686 result = EEH_STATE_NOT_SUPPORT; 687 break; 688 default: 689 result = EEH_STATE_NOT_SUPPORT; 690 pr_warn("%s: Invalid PHB#%x-PE#%x state %x\n", 691 __func__, phb->hose->global_number, 692 pe->addr, fstate); 693 } 694 695 /* 696 * If PHB supports compound PE, to freeze all 697 * slave PEs for consistency. 698 * 699 * If the PE is switching to frozen state for the 700 * first time, to dump the PHB diag-data. 701 */ 702 if (!(result & EEH_STATE_NOT_SUPPORT) && 703 !(result & EEH_STATE_UNAVAILABLE) && 704 !(result & EEH_STATE_MMIO_ACTIVE) && 705 !(result & EEH_STATE_DMA_ACTIVE) && 706 !(pe->state & EEH_PE_ISOLATED)) { 707 if (phb->freeze_pe) 708 phb->freeze_pe(phb, pe->addr); 709 710 eeh_pe_mark_isolated(pe); 711 pnv_eeh_get_phb_diag(pe); 712 713 if (eeh_has_flag(EEH_EARLY_DUMP_LOG)) 714 pnv_pci_dump_phb_diag_data(pe->phb, pe->data); 715 } 716 717 return result; 718 } 719 720 /** 721 * pnv_eeh_get_state - Retrieve PE state 722 * @pe: EEH PE 723 * @delay: delay while PE state is temporarily unavailable 724 * 725 * Retrieve the state of the specified PE. For IODA-compitable 726 * platform, it should be retrieved from IODA table. Therefore, 727 * we prefer passing down to hardware implementation to handle 728 * it. 729 */ 730 static int pnv_eeh_get_state(struct eeh_pe *pe, int *delay) 731 { 732 int ret; 733 734 if (pe->type & EEH_PE_PHB) 735 ret = pnv_eeh_get_phb_state(pe); 736 else 737 ret = pnv_eeh_get_pe_state(pe); 738 739 if (!delay) 740 return ret; 741 742 /* 743 * If the PE state is temporarily unavailable, 744 * to inform the EEH core delay for default 745 * period (1 second) 746 */ 747 *delay = 0; 748 if (ret & EEH_STATE_UNAVAILABLE) 749 *delay = 1000; 750 751 return ret; 752 } 753 754 static s64 pnv_eeh_poll(unsigned long id) 755 { 756 s64 rc = OPAL_HARDWARE; 757 758 while (1) { 759 rc = opal_pci_poll(id); 760 if (rc <= 0) 761 break; 762 763 if (system_state < SYSTEM_RUNNING) 764 udelay(1000 * rc); 765 else 766 msleep(rc); 767 } 768 769 return rc; 770 } 771 772 int pnv_eeh_phb_reset(struct pci_controller *hose, int option) 773 { 774 struct pnv_phb *phb = hose->private_data; 775 s64 rc = OPAL_HARDWARE; 776 777 pr_debug("%s: Reset PHB#%x, option=%d\n", 778 __func__, hose->global_number, option); 779 780 /* Issue PHB complete reset request */ 781 if (option == EEH_RESET_FUNDAMENTAL || 782 option == EEH_RESET_HOT) 783 rc = opal_pci_reset(phb->opal_id, 784 OPAL_RESET_PHB_COMPLETE, 785 OPAL_ASSERT_RESET); 786 else if (option == EEH_RESET_DEACTIVATE) 787 rc = opal_pci_reset(phb->opal_id, 788 OPAL_RESET_PHB_COMPLETE, 789 OPAL_DEASSERT_RESET); 790 if (rc < 0) 791 goto out; 792 793 /* 794 * Poll state of the PHB until the request is done 795 * successfully. The PHB reset is usually PHB complete 796 * reset followed by hot reset on root bus. So we also 797 * need the PCI bus settlement delay. 798 */ 799 if (rc > 0) 800 rc = pnv_eeh_poll(phb->opal_id); 801 if (option == EEH_RESET_DEACTIVATE) { 802 if (system_state < SYSTEM_RUNNING) 803 udelay(1000 * EEH_PE_RST_SETTLE_TIME); 804 else 805 msleep(EEH_PE_RST_SETTLE_TIME); 806 } 807 out: 808 if (rc != OPAL_SUCCESS) 809 return -EIO; 810 811 return 0; 812 } 813 814 static int pnv_eeh_root_reset(struct pci_controller *hose, int option) 815 { 816 struct pnv_phb *phb = hose->private_data; 817 s64 rc = OPAL_HARDWARE; 818 819 pr_debug("%s: Reset PHB#%x, option=%d\n", 820 __func__, hose->global_number, option); 821 822 /* 823 * During the reset deassert time, we needn't care 824 * the reset scope because the firmware does nothing 825 * for fundamental or hot reset during deassert phase. 826 */ 827 if (option == EEH_RESET_FUNDAMENTAL) 828 rc = opal_pci_reset(phb->opal_id, 829 OPAL_RESET_PCI_FUNDAMENTAL, 830 OPAL_ASSERT_RESET); 831 else if (option == EEH_RESET_HOT) 832 rc = opal_pci_reset(phb->opal_id, 833 OPAL_RESET_PCI_HOT, 834 OPAL_ASSERT_RESET); 835 else if (option == EEH_RESET_DEACTIVATE) 836 rc = opal_pci_reset(phb->opal_id, 837 OPAL_RESET_PCI_HOT, 838 OPAL_DEASSERT_RESET); 839 if (rc < 0) 840 goto out; 841 842 /* Poll state of the PHB until the request is done */ 843 if (rc > 0) 844 rc = pnv_eeh_poll(phb->opal_id); 845 if (option == EEH_RESET_DEACTIVATE) 846 msleep(EEH_PE_RST_SETTLE_TIME); 847 out: 848 if (rc != OPAL_SUCCESS) 849 return -EIO; 850 851 return 0; 852 } 853 854 static int __pnv_eeh_bridge_reset(struct pci_dev *dev, int option) 855 { 856 struct pci_dn *pdn = pci_get_pdn_by_devfn(dev->bus, dev->devfn); 857 struct eeh_dev *edev = pdn_to_eeh_dev(pdn); 858 int aer = edev ? edev->aer_cap : 0; 859 u32 ctrl; 860 861 pr_debug("%s: Reset PCI bus %04x:%02x with option %d\n", 862 __func__, pci_domain_nr(dev->bus), 863 dev->bus->number, option); 864 865 switch (option) { 866 case EEH_RESET_FUNDAMENTAL: 867 case EEH_RESET_HOT: 868 /* Don't report linkDown event */ 869 if (aer) { 870 eeh_ops->read_config(pdn, aer + PCI_ERR_UNCOR_MASK, 871 4, &ctrl); 872 ctrl |= PCI_ERR_UNC_SURPDN; 873 eeh_ops->write_config(pdn, aer + PCI_ERR_UNCOR_MASK, 874 4, ctrl); 875 } 876 877 eeh_ops->read_config(pdn, PCI_BRIDGE_CONTROL, 2, &ctrl); 878 ctrl |= PCI_BRIDGE_CTL_BUS_RESET; 879 eeh_ops->write_config(pdn, PCI_BRIDGE_CONTROL, 2, ctrl); 880 881 msleep(EEH_PE_RST_HOLD_TIME); 882 break; 883 case EEH_RESET_DEACTIVATE: 884 eeh_ops->read_config(pdn, PCI_BRIDGE_CONTROL, 2, &ctrl); 885 ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET; 886 eeh_ops->write_config(pdn, PCI_BRIDGE_CONTROL, 2, ctrl); 887 888 msleep(EEH_PE_RST_SETTLE_TIME); 889 890 /* Continue reporting linkDown event */ 891 if (aer) { 892 eeh_ops->read_config(pdn, aer + PCI_ERR_UNCOR_MASK, 893 4, &ctrl); 894 ctrl &= ~PCI_ERR_UNC_SURPDN; 895 eeh_ops->write_config(pdn, aer + PCI_ERR_UNCOR_MASK, 896 4, ctrl); 897 } 898 899 break; 900 } 901 902 return 0; 903 } 904 905 static int pnv_eeh_bridge_reset(struct pci_dev *pdev, int option) 906 { 907 struct pci_controller *hose = pci_bus_to_host(pdev->bus); 908 struct pnv_phb *phb = hose->private_data; 909 struct device_node *dn = pci_device_to_OF_node(pdev); 910 uint64_t id = PCI_SLOT_ID(phb->opal_id, 911 (pdev->bus->number << 8) | pdev->devfn); 912 uint8_t scope; 913 int64_t rc; 914 915 /* Hot reset to the bus if firmware cannot handle */ 916 if (!dn || !of_get_property(dn, "ibm,reset-by-firmware", NULL)) 917 return __pnv_eeh_bridge_reset(pdev, option); 918 919 switch (option) { 920 case EEH_RESET_FUNDAMENTAL: 921 scope = OPAL_RESET_PCI_FUNDAMENTAL; 922 break; 923 case EEH_RESET_HOT: 924 scope = OPAL_RESET_PCI_HOT; 925 break; 926 case EEH_RESET_DEACTIVATE: 927 return 0; 928 default: 929 dev_dbg(&pdev->dev, "%s: Unsupported reset %d\n", 930 __func__, option); 931 return -EINVAL; 932 } 933 934 rc = opal_pci_reset(id, scope, OPAL_ASSERT_RESET); 935 if (rc <= OPAL_SUCCESS) 936 goto out; 937 938 rc = pnv_eeh_poll(id); 939 out: 940 return (rc == OPAL_SUCCESS) ? 0 : -EIO; 941 } 942 943 void pnv_pci_reset_secondary_bus(struct pci_dev *dev) 944 { 945 struct pci_controller *hose; 946 947 if (pci_is_root_bus(dev->bus)) { 948 hose = pci_bus_to_host(dev->bus); 949 pnv_eeh_root_reset(hose, EEH_RESET_HOT); 950 pnv_eeh_root_reset(hose, EEH_RESET_DEACTIVATE); 951 } else { 952 pnv_eeh_bridge_reset(dev, EEH_RESET_HOT); 953 pnv_eeh_bridge_reset(dev, EEH_RESET_DEACTIVATE); 954 } 955 } 956 957 static void pnv_eeh_wait_for_pending(struct pci_dn *pdn, const char *type, 958 int pos, u16 mask) 959 { 960 int i, status = 0; 961 962 /* Wait for Transaction Pending bit to be cleared */ 963 for (i = 0; i < 4; i++) { 964 eeh_ops->read_config(pdn, pos, 2, &status); 965 if (!(status & mask)) 966 return; 967 968 msleep((1 << i) * 100); 969 } 970 971 pr_warn("%s: Pending transaction while issuing %sFLR to %04x:%02x:%02x.%01x\n", 972 __func__, type, 973 pdn->phb->global_number, pdn->busno, 974 PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn)); 975 } 976 977 static int pnv_eeh_do_flr(struct pci_dn *pdn, int option) 978 { 979 struct eeh_dev *edev = pdn_to_eeh_dev(pdn); 980 u32 reg = 0; 981 982 if (WARN_ON(!edev->pcie_cap)) 983 return -ENOTTY; 984 985 eeh_ops->read_config(pdn, edev->pcie_cap + PCI_EXP_DEVCAP, 4, ®); 986 if (!(reg & PCI_EXP_DEVCAP_FLR)) 987 return -ENOTTY; 988 989 switch (option) { 990 case EEH_RESET_HOT: 991 case EEH_RESET_FUNDAMENTAL: 992 pnv_eeh_wait_for_pending(pdn, "", 993 edev->pcie_cap + PCI_EXP_DEVSTA, 994 PCI_EXP_DEVSTA_TRPND); 995 eeh_ops->read_config(pdn, edev->pcie_cap + PCI_EXP_DEVCTL, 996 4, ®); 997 reg |= PCI_EXP_DEVCTL_BCR_FLR; 998 eeh_ops->write_config(pdn, edev->pcie_cap + PCI_EXP_DEVCTL, 999 4, reg); 1000 msleep(EEH_PE_RST_HOLD_TIME); 1001 break; 1002 case EEH_RESET_DEACTIVATE: 1003 eeh_ops->read_config(pdn, edev->pcie_cap + PCI_EXP_DEVCTL, 1004 4, ®); 1005 reg &= ~PCI_EXP_DEVCTL_BCR_FLR; 1006 eeh_ops->write_config(pdn, edev->pcie_cap + PCI_EXP_DEVCTL, 1007 4, reg); 1008 msleep(EEH_PE_RST_SETTLE_TIME); 1009 break; 1010 } 1011 1012 return 0; 1013 } 1014 1015 static int pnv_eeh_do_af_flr(struct pci_dn *pdn, int option) 1016 { 1017 struct eeh_dev *edev = pdn_to_eeh_dev(pdn); 1018 u32 cap = 0; 1019 1020 if (WARN_ON(!edev->af_cap)) 1021 return -ENOTTY; 1022 1023 eeh_ops->read_config(pdn, edev->af_cap + PCI_AF_CAP, 1, &cap); 1024 if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR)) 1025 return -ENOTTY; 1026 1027 switch (option) { 1028 case EEH_RESET_HOT: 1029 case EEH_RESET_FUNDAMENTAL: 1030 /* 1031 * Wait for Transaction Pending bit to clear. A word-aligned 1032 * test is used, so we use the conrol offset rather than status 1033 * and shift the test bit to match. 1034 */ 1035 pnv_eeh_wait_for_pending(pdn, "AF", 1036 edev->af_cap + PCI_AF_CTRL, 1037 PCI_AF_STATUS_TP << 8); 1038 eeh_ops->write_config(pdn, edev->af_cap + PCI_AF_CTRL, 1039 1, PCI_AF_CTRL_FLR); 1040 msleep(EEH_PE_RST_HOLD_TIME); 1041 break; 1042 case EEH_RESET_DEACTIVATE: 1043 eeh_ops->write_config(pdn, edev->af_cap + PCI_AF_CTRL, 1, 0); 1044 msleep(EEH_PE_RST_SETTLE_TIME); 1045 break; 1046 } 1047 1048 return 0; 1049 } 1050 1051 static int pnv_eeh_reset_vf_pe(struct eeh_pe *pe, int option) 1052 { 1053 struct eeh_dev *edev; 1054 struct pci_dn *pdn; 1055 int ret; 1056 1057 /* The VF PE should have only one child device */ 1058 edev = list_first_entry_or_null(&pe->edevs, struct eeh_dev, entry); 1059 pdn = eeh_dev_to_pdn(edev); 1060 if (!pdn) 1061 return -ENXIO; 1062 1063 ret = pnv_eeh_do_flr(pdn, option); 1064 if (!ret) 1065 return ret; 1066 1067 return pnv_eeh_do_af_flr(pdn, option); 1068 } 1069 1070 /** 1071 * pnv_eeh_reset - Reset the specified PE 1072 * @pe: EEH PE 1073 * @option: reset option 1074 * 1075 * Do reset on the indicated PE. For PCI bus sensitive PE, 1076 * we need to reset the parent p2p bridge. The PHB has to 1077 * be reinitialized if the p2p bridge is root bridge. For 1078 * PCI device sensitive PE, we will try to reset the device 1079 * through FLR. For now, we don't have OPAL APIs to do HARD 1080 * reset yet, so all reset would be SOFT (HOT) reset. 1081 */ 1082 static int pnv_eeh_reset(struct eeh_pe *pe, int option) 1083 { 1084 struct pci_controller *hose = pe->phb; 1085 struct pnv_phb *phb; 1086 struct pci_bus *bus; 1087 int64_t rc; 1088 1089 /* 1090 * For PHB reset, we always have complete reset. For those PEs whose 1091 * primary bus derived from root complex (root bus) or root port 1092 * (usually bus#1), we apply hot or fundamental reset on the root port. 1093 * For other PEs, we always have hot reset on the PE primary bus. 1094 * 1095 * Here, we have different design to pHyp, which always clear the 1096 * frozen state during PE reset. However, the good idea here from 1097 * benh is to keep frozen state before we get PE reset done completely 1098 * (until BAR restore). With the frozen state, HW drops illegal IO 1099 * or MMIO access, which can incur recrusive frozen PE during PE 1100 * reset. The side effect is that EEH core has to clear the frozen 1101 * state explicitly after BAR restore. 1102 */ 1103 if (pe->type & EEH_PE_PHB) 1104 return pnv_eeh_phb_reset(hose, option); 1105 1106 /* 1107 * The frozen PE might be caused by PAPR error injection 1108 * registers, which are expected to be cleared after hitting 1109 * frozen PE as stated in the hardware spec. Unfortunately, 1110 * that's not true on P7IOC. So we have to clear it manually 1111 * to avoid recursive EEH errors during recovery. 1112 */ 1113 phb = hose->private_data; 1114 if (phb->model == PNV_PHB_MODEL_P7IOC && 1115 (option == EEH_RESET_HOT || 1116 option == EEH_RESET_FUNDAMENTAL)) { 1117 rc = opal_pci_reset(phb->opal_id, 1118 OPAL_RESET_PHB_ERROR, 1119 OPAL_ASSERT_RESET); 1120 if (rc != OPAL_SUCCESS) { 1121 pr_warn("%s: Failure %lld clearing error injection registers\n", 1122 __func__, rc); 1123 return -EIO; 1124 } 1125 } 1126 1127 if (pe->type & EEH_PE_VF) 1128 return pnv_eeh_reset_vf_pe(pe, option); 1129 1130 bus = eeh_pe_bus_get(pe); 1131 if (!bus) { 1132 pr_err("%s: Cannot find PCI bus for PHB#%x-PE#%x\n", 1133 __func__, pe->phb->global_number, pe->addr); 1134 return -EIO; 1135 } 1136 1137 /* 1138 * If dealing with the root bus (or the bus underneath the 1139 * root port), we reset the bus underneath the root port. 1140 * 1141 * The cxl driver depends on this behaviour for bi-modal card 1142 * switching. 1143 */ 1144 if (pci_is_root_bus(bus) || 1145 pci_is_root_bus(bus->parent)) 1146 return pnv_eeh_root_reset(hose, option); 1147 1148 return pnv_eeh_bridge_reset(bus->self, option); 1149 } 1150 1151 /** 1152 * pnv_eeh_get_log - Retrieve error log 1153 * @pe: EEH PE 1154 * @severity: temporary or permanent error log 1155 * @drv_log: driver log to be combined with retrieved error log 1156 * @len: length of driver log 1157 * 1158 * Retrieve the temporary or permanent error from the PE. 1159 */ 1160 static int pnv_eeh_get_log(struct eeh_pe *pe, int severity, 1161 char *drv_log, unsigned long len) 1162 { 1163 if (!eeh_has_flag(EEH_EARLY_DUMP_LOG)) 1164 pnv_pci_dump_phb_diag_data(pe->phb, pe->data); 1165 1166 return 0; 1167 } 1168 1169 /** 1170 * pnv_eeh_configure_bridge - Configure PCI bridges in the indicated PE 1171 * @pe: EEH PE 1172 * 1173 * The function will be called to reconfigure the bridges included 1174 * in the specified PE so that the mulfunctional PE would be recovered 1175 * again. 1176 */ 1177 static int pnv_eeh_configure_bridge(struct eeh_pe *pe) 1178 { 1179 return 0; 1180 } 1181 1182 /** 1183 * pnv_pe_err_inject - Inject specified error to the indicated PE 1184 * @pe: the indicated PE 1185 * @type: error type 1186 * @func: specific error type 1187 * @addr: address 1188 * @mask: address mask 1189 * 1190 * The routine is called to inject specified error, which is 1191 * determined by @type and @func, to the indicated PE for 1192 * testing purpose. 1193 */ 1194 static int pnv_eeh_err_inject(struct eeh_pe *pe, int type, int func, 1195 unsigned long addr, unsigned long mask) 1196 { 1197 struct pci_controller *hose = pe->phb; 1198 struct pnv_phb *phb = hose->private_data; 1199 s64 rc; 1200 1201 if (type != OPAL_ERR_INJECT_TYPE_IOA_BUS_ERR && 1202 type != OPAL_ERR_INJECT_TYPE_IOA_BUS_ERR64) { 1203 pr_warn("%s: Invalid error type %d\n", 1204 __func__, type); 1205 return -ERANGE; 1206 } 1207 1208 if (func < OPAL_ERR_INJECT_FUNC_IOA_LD_MEM_ADDR || 1209 func > OPAL_ERR_INJECT_FUNC_IOA_DMA_WR_TARGET) { 1210 pr_warn("%s: Invalid error function %d\n", 1211 __func__, func); 1212 return -ERANGE; 1213 } 1214 1215 /* Firmware supports error injection ? */ 1216 if (!opal_check_token(OPAL_PCI_ERR_INJECT)) { 1217 pr_warn("%s: Firmware doesn't support error injection\n", 1218 __func__); 1219 return -ENXIO; 1220 } 1221 1222 /* Do error injection */ 1223 rc = opal_pci_err_inject(phb->opal_id, pe->addr, 1224 type, func, addr, mask); 1225 if (rc != OPAL_SUCCESS) { 1226 pr_warn("%s: Failure %lld injecting error " 1227 "%d-%d to PHB#%x-PE#%x\n", 1228 __func__, rc, type, func, 1229 hose->global_number, pe->addr); 1230 return -EIO; 1231 } 1232 1233 return 0; 1234 } 1235 1236 static inline bool pnv_eeh_cfg_blocked(struct pci_dn *pdn) 1237 { 1238 struct eeh_dev *edev = pdn_to_eeh_dev(pdn); 1239 1240 if (!edev || !edev->pe) 1241 return false; 1242 1243 /* 1244 * We will issue FLR or AF FLR to all VFs, which are contained 1245 * in VF PE. It relies on the EEH PCI config accessors. So we 1246 * can't block them during the window. 1247 */ 1248 if (edev->physfn && (edev->pe->state & EEH_PE_RESET)) 1249 return false; 1250 1251 if (edev->pe->state & EEH_PE_CFG_BLOCKED) 1252 return true; 1253 1254 return false; 1255 } 1256 1257 static int pnv_eeh_read_config(struct pci_dn *pdn, 1258 int where, int size, u32 *val) 1259 { 1260 if (!pdn) 1261 return PCIBIOS_DEVICE_NOT_FOUND; 1262 1263 if (pnv_eeh_cfg_blocked(pdn)) { 1264 *val = 0xFFFFFFFF; 1265 return PCIBIOS_SET_FAILED; 1266 } 1267 1268 return pnv_pci_cfg_read(pdn, where, size, val); 1269 } 1270 1271 static int pnv_eeh_write_config(struct pci_dn *pdn, 1272 int where, int size, u32 val) 1273 { 1274 if (!pdn) 1275 return PCIBIOS_DEVICE_NOT_FOUND; 1276 1277 if (pnv_eeh_cfg_blocked(pdn)) 1278 return PCIBIOS_SET_FAILED; 1279 1280 return pnv_pci_cfg_write(pdn, where, size, val); 1281 } 1282 1283 static void pnv_eeh_dump_hub_diag_common(struct OpalIoP7IOCErrorData *data) 1284 { 1285 /* GEM */ 1286 if (data->gemXfir || data->gemRfir || 1287 data->gemRirqfir || data->gemMask || data->gemRwof) 1288 pr_info(" GEM: %016llx %016llx %016llx %016llx %016llx\n", 1289 be64_to_cpu(data->gemXfir), 1290 be64_to_cpu(data->gemRfir), 1291 be64_to_cpu(data->gemRirqfir), 1292 be64_to_cpu(data->gemMask), 1293 be64_to_cpu(data->gemRwof)); 1294 1295 /* LEM */ 1296 if (data->lemFir || data->lemErrMask || 1297 data->lemAction0 || data->lemAction1 || data->lemWof) 1298 pr_info(" LEM: %016llx %016llx %016llx %016llx %016llx\n", 1299 be64_to_cpu(data->lemFir), 1300 be64_to_cpu(data->lemErrMask), 1301 be64_to_cpu(data->lemAction0), 1302 be64_to_cpu(data->lemAction1), 1303 be64_to_cpu(data->lemWof)); 1304 } 1305 1306 static void pnv_eeh_get_and_dump_hub_diag(struct pci_controller *hose) 1307 { 1308 struct pnv_phb *phb = hose->private_data; 1309 struct OpalIoP7IOCErrorData *data = 1310 (struct OpalIoP7IOCErrorData*)phb->diag_data; 1311 long rc; 1312 1313 rc = opal_pci_get_hub_diag_data(phb->hub_id, data, sizeof(*data)); 1314 if (rc != OPAL_SUCCESS) { 1315 pr_warn("%s: Failed to get HUB#%llx diag-data (%ld)\n", 1316 __func__, phb->hub_id, rc); 1317 return; 1318 } 1319 1320 switch (be16_to_cpu(data->type)) { 1321 case OPAL_P7IOC_DIAG_TYPE_RGC: 1322 pr_info("P7IOC diag-data for RGC\n\n"); 1323 pnv_eeh_dump_hub_diag_common(data); 1324 if (data->rgc.rgcStatus || data->rgc.rgcLdcp) 1325 pr_info(" RGC: %016llx %016llx\n", 1326 be64_to_cpu(data->rgc.rgcStatus), 1327 be64_to_cpu(data->rgc.rgcLdcp)); 1328 break; 1329 case OPAL_P7IOC_DIAG_TYPE_BI: 1330 pr_info("P7IOC diag-data for BI %s\n\n", 1331 data->bi.biDownbound ? "Downbound" : "Upbound"); 1332 pnv_eeh_dump_hub_diag_common(data); 1333 if (data->bi.biLdcp0 || data->bi.biLdcp1 || 1334 data->bi.biLdcp2 || data->bi.biFenceStatus) 1335 pr_info(" BI: %016llx %016llx %016llx %016llx\n", 1336 be64_to_cpu(data->bi.biLdcp0), 1337 be64_to_cpu(data->bi.biLdcp1), 1338 be64_to_cpu(data->bi.biLdcp2), 1339 be64_to_cpu(data->bi.biFenceStatus)); 1340 break; 1341 case OPAL_P7IOC_DIAG_TYPE_CI: 1342 pr_info("P7IOC diag-data for CI Port %d\n\n", 1343 data->ci.ciPort); 1344 pnv_eeh_dump_hub_diag_common(data); 1345 if (data->ci.ciPortStatus || data->ci.ciPortLdcp) 1346 pr_info(" CI: %016llx %016llx\n", 1347 be64_to_cpu(data->ci.ciPortStatus), 1348 be64_to_cpu(data->ci.ciPortLdcp)); 1349 break; 1350 case OPAL_P7IOC_DIAG_TYPE_MISC: 1351 pr_info("P7IOC diag-data for MISC\n\n"); 1352 pnv_eeh_dump_hub_diag_common(data); 1353 break; 1354 case OPAL_P7IOC_DIAG_TYPE_I2C: 1355 pr_info("P7IOC diag-data for I2C\n\n"); 1356 pnv_eeh_dump_hub_diag_common(data); 1357 break; 1358 default: 1359 pr_warn("%s: Invalid type of HUB#%llx diag-data (%d)\n", 1360 __func__, phb->hub_id, data->type); 1361 } 1362 } 1363 1364 static int pnv_eeh_get_pe(struct pci_controller *hose, 1365 u16 pe_no, struct eeh_pe **pe) 1366 { 1367 struct pnv_phb *phb = hose->private_data; 1368 struct pnv_ioda_pe *pnv_pe; 1369 struct eeh_pe *dev_pe; 1370 1371 /* 1372 * If PHB supports compound PE, to fetch 1373 * the master PE because slave PE is invisible 1374 * to EEH core. 1375 */ 1376 pnv_pe = &phb->ioda.pe_array[pe_no]; 1377 if (pnv_pe->flags & PNV_IODA_PE_SLAVE) { 1378 pnv_pe = pnv_pe->master; 1379 WARN_ON(!pnv_pe || 1380 !(pnv_pe->flags & PNV_IODA_PE_MASTER)); 1381 pe_no = pnv_pe->pe_number; 1382 } 1383 1384 /* Find the PE according to PE# */ 1385 dev_pe = eeh_pe_get(hose, pe_no, 0); 1386 if (!dev_pe) 1387 return -EEXIST; 1388 1389 /* Freeze the (compound) PE */ 1390 *pe = dev_pe; 1391 if (!(dev_pe->state & EEH_PE_ISOLATED)) 1392 phb->freeze_pe(phb, pe_no); 1393 1394 /* 1395 * At this point, we're sure the (compound) PE should 1396 * have been frozen. However, we still need poke until 1397 * hitting the frozen PE on top level. 1398 */ 1399 dev_pe = dev_pe->parent; 1400 while (dev_pe && !(dev_pe->type & EEH_PE_PHB)) { 1401 int ret; 1402 ret = eeh_ops->get_state(dev_pe, NULL); 1403 if (ret <= 0 || eeh_state_active(ret)) { 1404 dev_pe = dev_pe->parent; 1405 continue; 1406 } 1407 1408 /* Frozen parent PE */ 1409 *pe = dev_pe; 1410 if (!(dev_pe->state & EEH_PE_ISOLATED)) 1411 phb->freeze_pe(phb, dev_pe->addr); 1412 1413 /* Next one */ 1414 dev_pe = dev_pe->parent; 1415 } 1416 1417 return 0; 1418 } 1419 1420 /** 1421 * pnv_eeh_next_error - Retrieve next EEH error to handle 1422 * @pe: Affected PE 1423 * 1424 * The function is expected to be called by EEH core while it gets 1425 * special EEH event (without binding PE). The function calls to 1426 * OPAL APIs for next error to handle. The informational error is 1427 * handled internally by platform. However, the dead IOC, dead PHB, 1428 * fenced PHB and frozen PE should be handled by EEH core eventually. 1429 */ 1430 static int pnv_eeh_next_error(struct eeh_pe **pe) 1431 { 1432 struct pci_controller *hose; 1433 struct pnv_phb *phb; 1434 struct eeh_pe *phb_pe, *parent_pe; 1435 __be64 frozen_pe_no; 1436 __be16 err_type, severity; 1437 long rc; 1438 int state, ret = EEH_NEXT_ERR_NONE; 1439 1440 /* 1441 * While running here, it's safe to purge the event queue. The 1442 * event should still be masked. 1443 */ 1444 eeh_remove_event(NULL, false); 1445 1446 list_for_each_entry(hose, &hose_list, list_node) { 1447 /* 1448 * If the subordinate PCI buses of the PHB has been 1449 * removed or is exactly under error recovery, we 1450 * needn't take care of it any more. 1451 */ 1452 phb = hose->private_data; 1453 phb_pe = eeh_phb_pe_get(hose); 1454 if (!phb_pe || (phb_pe->state & EEH_PE_ISOLATED)) 1455 continue; 1456 1457 rc = opal_pci_next_error(phb->opal_id, 1458 &frozen_pe_no, &err_type, &severity); 1459 if (rc != OPAL_SUCCESS) { 1460 pr_devel("%s: Invalid return value on " 1461 "PHB#%x (0x%lx) from opal_pci_next_error", 1462 __func__, hose->global_number, rc); 1463 continue; 1464 } 1465 1466 /* If the PHB doesn't have error, stop processing */ 1467 if (be16_to_cpu(err_type) == OPAL_EEH_NO_ERROR || 1468 be16_to_cpu(severity) == OPAL_EEH_SEV_NO_ERROR) { 1469 pr_devel("%s: No error found on PHB#%x\n", 1470 __func__, hose->global_number); 1471 continue; 1472 } 1473 1474 /* 1475 * Processing the error. We're expecting the error with 1476 * highest priority reported upon multiple errors on the 1477 * specific PHB. 1478 */ 1479 pr_devel("%s: Error (%d, %d, %llu) on PHB#%x\n", 1480 __func__, be16_to_cpu(err_type), 1481 be16_to_cpu(severity), be64_to_cpu(frozen_pe_no), 1482 hose->global_number); 1483 switch (be16_to_cpu(err_type)) { 1484 case OPAL_EEH_IOC_ERROR: 1485 if (be16_to_cpu(severity) == OPAL_EEH_SEV_IOC_DEAD) { 1486 pr_err("EEH: dead IOC detected\n"); 1487 ret = EEH_NEXT_ERR_DEAD_IOC; 1488 } else if (be16_to_cpu(severity) == OPAL_EEH_SEV_INF) { 1489 pr_info("EEH: IOC informative error " 1490 "detected\n"); 1491 pnv_eeh_get_and_dump_hub_diag(hose); 1492 ret = EEH_NEXT_ERR_NONE; 1493 } 1494 1495 break; 1496 case OPAL_EEH_PHB_ERROR: 1497 if (be16_to_cpu(severity) == OPAL_EEH_SEV_PHB_DEAD) { 1498 *pe = phb_pe; 1499 pr_err("EEH: dead PHB#%x detected, " 1500 "location: %s\n", 1501 hose->global_number, 1502 eeh_pe_loc_get(phb_pe)); 1503 ret = EEH_NEXT_ERR_DEAD_PHB; 1504 } else if (be16_to_cpu(severity) == 1505 OPAL_EEH_SEV_PHB_FENCED) { 1506 *pe = phb_pe; 1507 pr_err("EEH: Fenced PHB#%x detected, " 1508 "location: %s\n", 1509 hose->global_number, 1510 eeh_pe_loc_get(phb_pe)); 1511 ret = EEH_NEXT_ERR_FENCED_PHB; 1512 } else if (be16_to_cpu(severity) == OPAL_EEH_SEV_INF) { 1513 pr_info("EEH: PHB#%x informative error " 1514 "detected, location: %s\n", 1515 hose->global_number, 1516 eeh_pe_loc_get(phb_pe)); 1517 pnv_eeh_get_phb_diag(phb_pe); 1518 pnv_pci_dump_phb_diag_data(hose, phb_pe->data); 1519 ret = EEH_NEXT_ERR_NONE; 1520 } 1521 1522 break; 1523 case OPAL_EEH_PE_ERROR: 1524 /* 1525 * If we can't find the corresponding PE, we 1526 * just try to unfreeze. 1527 */ 1528 if (pnv_eeh_get_pe(hose, 1529 be64_to_cpu(frozen_pe_no), pe)) { 1530 pr_info("EEH: Clear non-existing PHB#%x-PE#%llx\n", 1531 hose->global_number, be64_to_cpu(frozen_pe_no)); 1532 pr_info("EEH: PHB location: %s\n", 1533 eeh_pe_loc_get(phb_pe)); 1534 1535 /* Dump PHB diag-data */ 1536 rc = opal_pci_get_phb_diag_data2(phb->opal_id, 1537 phb->diag_data, phb->diag_data_size); 1538 if (rc == OPAL_SUCCESS) 1539 pnv_pci_dump_phb_diag_data(hose, 1540 phb->diag_data); 1541 1542 /* Try best to clear it */ 1543 opal_pci_eeh_freeze_clear(phb->opal_id, 1544 be64_to_cpu(frozen_pe_no), 1545 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL); 1546 ret = EEH_NEXT_ERR_NONE; 1547 } else if ((*pe)->state & EEH_PE_ISOLATED || 1548 eeh_pe_passed(*pe)) { 1549 ret = EEH_NEXT_ERR_NONE; 1550 } else { 1551 pr_err("EEH: Frozen PE#%x " 1552 "on PHB#%x detected\n", 1553 (*pe)->addr, 1554 (*pe)->phb->global_number); 1555 pr_err("EEH: PE location: %s, " 1556 "PHB location: %s\n", 1557 eeh_pe_loc_get(*pe), 1558 eeh_pe_loc_get(phb_pe)); 1559 ret = EEH_NEXT_ERR_FROZEN_PE; 1560 } 1561 1562 break; 1563 default: 1564 pr_warn("%s: Unexpected error type %d\n", 1565 __func__, be16_to_cpu(err_type)); 1566 } 1567 1568 /* 1569 * EEH core will try recover from fenced PHB or 1570 * frozen PE. In the time for frozen PE, EEH core 1571 * enable IO path for that before collecting logs, 1572 * but it ruins the site. So we have to dump the 1573 * log in advance here. 1574 */ 1575 if ((ret == EEH_NEXT_ERR_FROZEN_PE || 1576 ret == EEH_NEXT_ERR_FENCED_PHB) && 1577 !((*pe)->state & EEH_PE_ISOLATED)) { 1578 eeh_pe_mark_isolated(*pe); 1579 pnv_eeh_get_phb_diag(*pe); 1580 1581 if (eeh_has_flag(EEH_EARLY_DUMP_LOG)) 1582 pnv_pci_dump_phb_diag_data((*pe)->phb, 1583 (*pe)->data); 1584 } 1585 1586 /* 1587 * We probably have the frozen parent PE out there and 1588 * we need have to handle frozen parent PE firstly. 1589 */ 1590 if (ret == EEH_NEXT_ERR_FROZEN_PE) { 1591 parent_pe = (*pe)->parent; 1592 while (parent_pe) { 1593 /* Hit the ceiling ? */ 1594 if (parent_pe->type & EEH_PE_PHB) 1595 break; 1596 1597 /* Frozen parent PE ? */ 1598 state = eeh_ops->get_state(parent_pe, NULL); 1599 if (state > 0 && !eeh_state_active(state)) 1600 *pe = parent_pe; 1601 1602 /* Next parent level */ 1603 parent_pe = parent_pe->parent; 1604 } 1605 1606 /* We possibly migrate to another PE */ 1607 eeh_pe_mark_isolated(*pe); 1608 } 1609 1610 /* 1611 * If we have no errors on the specific PHB or only 1612 * informative error there, we continue poking it. 1613 * Otherwise, we need actions to be taken by upper 1614 * layer. 1615 */ 1616 if (ret > EEH_NEXT_ERR_INF) 1617 break; 1618 } 1619 1620 /* Unmask the event */ 1621 if (ret == EEH_NEXT_ERR_NONE && eeh_enabled()) 1622 enable_irq(eeh_event_irq); 1623 1624 return ret; 1625 } 1626 1627 static int pnv_eeh_restore_config(struct pci_dn *pdn) 1628 { 1629 struct eeh_dev *edev = pdn_to_eeh_dev(pdn); 1630 struct pnv_phb *phb; 1631 s64 ret = 0; 1632 int config_addr = (pdn->busno << 8) | (pdn->devfn); 1633 1634 if (!edev) 1635 return -EEXIST; 1636 1637 /* 1638 * We have to restore the PCI config space after reset since the 1639 * firmware can't see SRIOV VFs. 1640 * 1641 * FIXME: The MPS, error routing rules, timeout setting are worthy 1642 * to be exported by firmware in extendible way. 1643 */ 1644 if (edev->physfn) { 1645 ret = eeh_restore_vf_config(pdn); 1646 } else { 1647 phb = pdn->phb->private_data; 1648 ret = opal_pci_reinit(phb->opal_id, 1649 OPAL_REINIT_PCI_DEV, config_addr); 1650 } 1651 1652 if (ret) { 1653 pr_warn("%s: Can't reinit PCI dev 0x%x (%lld)\n", 1654 __func__, config_addr, ret); 1655 return -EIO; 1656 } 1657 1658 return ret; 1659 } 1660 1661 static struct eeh_ops pnv_eeh_ops = { 1662 .name = "powernv", 1663 .init = pnv_eeh_init, 1664 .probe = pnv_eeh_probe, 1665 .set_option = pnv_eeh_set_option, 1666 .get_pe_addr = pnv_eeh_get_pe_addr, 1667 .get_state = pnv_eeh_get_state, 1668 .reset = pnv_eeh_reset, 1669 .get_log = pnv_eeh_get_log, 1670 .configure_bridge = pnv_eeh_configure_bridge, 1671 .err_inject = pnv_eeh_err_inject, 1672 .read_config = pnv_eeh_read_config, 1673 .write_config = pnv_eeh_write_config, 1674 .next_error = pnv_eeh_next_error, 1675 .restore_config = pnv_eeh_restore_config, 1676 .notify_resume = NULL 1677 }; 1678 1679 #ifdef CONFIG_PCI_IOV 1680 static void pnv_pci_fixup_vf_mps(struct pci_dev *pdev) 1681 { 1682 struct pci_dn *pdn = pci_get_pdn(pdev); 1683 int parent_mps; 1684 1685 if (!pdev->is_virtfn) 1686 return; 1687 1688 /* Synchronize MPS for VF and PF */ 1689 parent_mps = pcie_get_mps(pdev->physfn); 1690 if ((128 << pdev->pcie_mpss) >= parent_mps) 1691 pcie_set_mps(pdev, parent_mps); 1692 pdn->mps = pcie_get_mps(pdev); 1693 } 1694 DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pnv_pci_fixup_vf_mps); 1695 #endif /* CONFIG_PCI_IOV */ 1696 1697 /** 1698 * eeh_powernv_init - Register platform dependent EEH operations 1699 * 1700 * EEH initialization on powernv platform. This function should be 1701 * called before any EEH related functions. 1702 */ 1703 static int __init eeh_powernv_init(void) 1704 { 1705 int ret = -EINVAL; 1706 1707 ret = eeh_ops_register(&pnv_eeh_ops); 1708 if (!ret) 1709 pr_info("EEH: PowerNV platform initialized\n"); 1710 else 1711 pr_info("EEH: Failed to initialize PowerNV platform (%d)\n", ret); 1712 1713 return ret; 1714 } 1715 machine_early_initcall(powernv, eeh_powernv_init); 1716