1 /* 2 * The file intends to implement the platform dependent EEH operations on 3 * powernv platform. Actually, the powernv was created in order to fully 4 * hypervisor support. 5 * 6 * Copyright Benjamin Herrenschmidt & Gavin Shan, IBM Corporation 2013. 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 */ 13 14 #include <linux/atomic.h> 15 #include <linux/debugfs.h> 16 #include <linux/delay.h> 17 #include <linux/export.h> 18 #include <linux/init.h> 19 #include <linux/list.h> 20 #include <linux/msi.h> 21 #include <linux/of.h> 22 #include <linux/pci.h> 23 #include <linux/proc_fs.h> 24 #include <linux/rbtree.h> 25 #include <linux/sched.h> 26 #include <linux/seq_file.h> 27 #include <linux/spinlock.h> 28 29 #include <asm/eeh.h> 30 #include <asm/eeh_event.h> 31 #include <asm/firmware.h> 32 #include <asm/io.h> 33 #include <asm/iommu.h> 34 #include <asm/machdep.h> 35 #include <asm/msi_bitmap.h> 36 #include <asm/opal.h> 37 #include <asm/ppc-pci.h> 38 39 #include "powernv.h" 40 #include "pci.h" 41 42 static bool pnv_eeh_nb_init = false; 43 44 /** 45 * pnv_eeh_init - EEH platform dependent initialization 46 * 47 * EEH platform dependent initialization on powernv 48 */ 49 static int pnv_eeh_init(void) 50 { 51 struct pci_controller *hose; 52 struct pnv_phb *phb; 53 54 /* We require OPALv3 */ 55 if (!firmware_has_feature(FW_FEATURE_OPALv3)) { 56 pr_warn("%s: OPALv3 is required !\n", 57 __func__); 58 return -EINVAL; 59 } 60 61 /* Set probe mode */ 62 eeh_add_flag(EEH_PROBE_MODE_DEV); 63 64 /* 65 * P7IOC blocks PCI config access to frozen PE, but PHB3 66 * doesn't do that. So we have to selectively enable I/O 67 * prior to collecting error log. 68 */ 69 list_for_each_entry(hose, &hose_list, list_node) { 70 phb = hose->private_data; 71 72 if (phb->model == PNV_PHB_MODEL_P7IOC) 73 eeh_add_flag(EEH_ENABLE_IO_FOR_LOG); 74 75 /* 76 * PE#0 should be regarded as valid by EEH core 77 * if it's not the reserved one. Currently, we 78 * have the reserved PE#0 and PE#127 for PHB3 79 * and P7IOC separately. So we should regard 80 * PE#0 as valid for P7IOC. 81 */ 82 if (phb->ioda.reserved_pe != 0) 83 eeh_add_flag(EEH_VALID_PE_ZERO); 84 85 break; 86 } 87 88 return 0; 89 } 90 91 static int pnv_eeh_event(struct notifier_block *nb, 92 unsigned long events, void *change) 93 { 94 uint64_t changed_evts = (uint64_t)change; 95 96 /* 97 * We simply send special EEH event if EEH has 98 * been enabled, or clear pending events in 99 * case that we enable EEH soon 100 */ 101 if (!(changed_evts & OPAL_EVENT_PCI_ERROR) || 102 !(events & OPAL_EVENT_PCI_ERROR)) 103 return 0; 104 105 if (eeh_enabled()) 106 eeh_send_failure_event(NULL); 107 else 108 opal_notifier_update_evt(OPAL_EVENT_PCI_ERROR, 0x0ul); 109 110 return 0; 111 } 112 113 static struct notifier_block pnv_eeh_nb = { 114 .notifier_call = pnv_eeh_event, 115 .next = NULL, 116 .priority = 0 117 }; 118 119 #ifdef CONFIG_DEBUG_FS 120 static ssize_t pnv_eeh_ei_write(struct file *filp, 121 const char __user *user_buf, 122 size_t count, loff_t *ppos) 123 { 124 struct pci_controller *hose = filp->private_data; 125 struct eeh_dev *edev; 126 struct eeh_pe *pe; 127 int pe_no, type, func; 128 unsigned long addr, mask; 129 char buf[50]; 130 int ret; 131 132 if (!eeh_ops || !eeh_ops->err_inject) 133 return -ENXIO; 134 135 /* Copy over argument buffer */ 136 ret = simple_write_to_buffer(buf, sizeof(buf), ppos, user_buf, count); 137 if (!ret) 138 return -EFAULT; 139 140 /* Retrieve parameters */ 141 ret = sscanf(buf, "%x:%x:%x:%lx:%lx", 142 &pe_no, &type, &func, &addr, &mask); 143 if (ret != 5) 144 return -EINVAL; 145 146 /* Retrieve PE */ 147 edev = kzalloc(sizeof(*edev), GFP_KERNEL); 148 if (!edev) 149 return -ENOMEM; 150 edev->phb = hose; 151 edev->pe_config_addr = pe_no; 152 pe = eeh_pe_get(edev); 153 kfree(edev); 154 if (!pe) 155 return -ENODEV; 156 157 /* Do error injection */ 158 ret = eeh_ops->err_inject(pe, type, func, addr, mask); 159 return ret < 0 ? ret : count; 160 } 161 162 static const struct file_operations pnv_eeh_ei_fops = { 163 .open = simple_open, 164 .llseek = no_llseek, 165 .write = pnv_eeh_ei_write, 166 }; 167 168 static int pnv_eeh_dbgfs_set(void *data, int offset, u64 val) 169 { 170 struct pci_controller *hose = data; 171 struct pnv_phb *phb = hose->private_data; 172 173 out_be64(phb->regs + offset, val); 174 return 0; 175 } 176 177 static int pnv_eeh_dbgfs_get(void *data, int offset, u64 *val) 178 { 179 struct pci_controller *hose = data; 180 struct pnv_phb *phb = hose->private_data; 181 182 *val = in_be64(phb->regs + offset); 183 return 0; 184 } 185 186 static int pnv_eeh_outb_dbgfs_set(void *data, u64 val) 187 { 188 return pnv_eeh_dbgfs_set(data, 0xD10, val); 189 } 190 191 static int pnv_eeh_outb_dbgfs_get(void *data, u64 *val) 192 { 193 return pnv_eeh_dbgfs_get(data, 0xD10, val); 194 } 195 196 static int pnv_eeh_inbA_dbgfs_set(void *data, u64 val) 197 { 198 return pnv_eeh_dbgfs_set(data, 0xD90, val); 199 } 200 201 static int pnv_eeh_inbA_dbgfs_get(void *data, u64 *val) 202 { 203 return pnv_eeh_dbgfs_get(data, 0xD90, val); 204 } 205 206 static int pnv_eeh_inbB_dbgfs_set(void *data, u64 val) 207 { 208 return pnv_eeh_dbgfs_set(data, 0xE10, val); 209 } 210 211 static int pnv_eeh_inbB_dbgfs_get(void *data, u64 *val) 212 { 213 return pnv_eeh_dbgfs_get(data, 0xE10, val); 214 } 215 216 DEFINE_SIMPLE_ATTRIBUTE(pnv_eeh_outb_dbgfs_ops, pnv_eeh_outb_dbgfs_get, 217 pnv_eeh_outb_dbgfs_set, "0x%llx\n"); 218 DEFINE_SIMPLE_ATTRIBUTE(pnv_eeh_inbA_dbgfs_ops, pnv_eeh_inbA_dbgfs_get, 219 pnv_eeh_inbA_dbgfs_set, "0x%llx\n"); 220 DEFINE_SIMPLE_ATTRIBUTE(pnv_eeh_inbB_dbgfs_ops, pnv_eeh_inbB_dbgfs_get, 221 pnv_eeh_inbB_dbgfs_set, "0x%llx\n"); 222 #endif /* CONFIG_DEBUG_FS */ 223 224 /** 225 * pnv_eeh_post_init - EEH platform dependent post initialization 226 * 227 * EEH platform dependent post initialization on powernv. When 228 * the function is called, the EEH PEs and devices should have 229 * been built. If the I/O cache staff has been built, EEH is 230 * ready to supply service. 231 */ 232 static int pnv_eeh_post_init(void) 233 { 234 struct pci_controller *hose; 235 struct pnv_phb *phb; 236 int ret = 0; 237 238 /* Register OPAL event notifier */ 239 if (!pnv_eeh_nb_init) { 240 ret = opal_notifier_register(&pnv_eeh_nb); 241 if (ret) { 242 pr_warn("%s: Can't register OPAL event notifier (%d)\n", 243 __func__, ret); 244 return ret; 245 } 246 247 pnv_eeh_nb_init = true; 248 } 249 250 list_for_each_entry(hose, &hose_list, list_node) { 251 phb = hose->private_data; 252 253 /* 254 * If EEH is enabled, we're going to rely on that. 255 * Otherwise, we restore to conventional mechanism 256 * to clear frozen PE during PCI config access. 257 */ 258 if (eeh_enabled()) 259 phb->flags |= PNV_PHB_FLAG_EEH; 260 else 261 phb->flags &= ~PNV_PHB_FLAG_EEH; 262 263 /* Create debugfs entries */ 264 #ifdef CONFIG_DEBUG_FS 265 if (phb->has_dbgfs || !phb->dbgfs) 266 continue; 267 268 phb->has_dbgfs = 1; 269 debugfs_create_file("err_injct", 0200, 270 phb->dbgfs, hose, 271 &pnv_eeh_ei_fops); 272 273 debugfs_create_file("err_injct_outbound", 0600, 274 phb->dbgfs, hose, 275 &pnv_eeh_outb_dbgfs_ops); 276 debugfs_create_file("err_injct_inboundA", 0600, 277 phb->dbgfs, hose, 278 &pnv_eeh_inbA_dbgfs_ops); 279 debugfs_create_file("err_injct_inboundB", 0600, 280 phb->dbgfs, hose, 281 &pnv_eeh_inbB_dbgfs_ops); 282 #endif /* CONFIG_DEBUG_FS */ 283 } 284 285 286 return ret; 287 } 288 289 static int pnv_eeh_cap_start(struct pci_dn *pdn) 290 { 291 u32 status; 292 293 if (!pdn) 294 return 0; 295 296 pnv_pci_cfg_read(pdn, PCI_STATUS, 2, &status); 297 if (!(status & PCI_STATUS_CAP_LIST)) 298 return 0; 299 300 return PCI_CAPABILITY_LIST; 301 } 302 303 static int pnv_eeh_find_cap(struct pci_dn *pdn, int cap) 304 { 305 int pos = pnv_eeh_cap_start(pdn); 306 int cnt = 48; /* Maximal number of capabilities */ 307 u32 id; 308 309 if (!pos) 310 return 0; 311 312 while (cnt--) { 313 pnv_pci_cfg_read(pdn, pos, 1, &pos); 314 if (pos < 0x40) 315 break; 316 317 pos &= ~3; 318 pnv_pci_cfg_read(pdn, pos + PCI_CAP_LIST_ID, 1, &id); 319 if (id == 0xff) 320 break; 321 322 /* Found */ 323 if (id == cap) 324 return pos; 325 326 /* Next one */ 327 pos += PCI_CAP_LIST_NEXT; 328 } 329 330 return 0; 331 } 332 333 static int pnv_eeh_find_ecap(struct pci_dn *pdn, int cap) 334 { 335 struct eeh_dev *edev = pdn_to_eeh_dev(pdn); 336 u32 header; 337 int pos = 256, ttl = (4096 - 256) / 8; 338 339 if (!edev || !edev->pcie_cap) 340 return 0; 341 if (pnv_pci_cfg_read(pdn, pos, 4, &header) != PCIBIOS_SUCCESSFUL) 342 return 0; 343 else if (!header) 344 return 0; 345 346 while (ttl-- > 0) { 347 if (PCI_EXT_CAP_ID(header) == cap && pos) 348 return pos; 349 350 pos = PCI_EXT_CAP_NEXT(header); 351 if (pos < 256) 352 break; 353 354 if (pnv_pci_cfg_read(pdn, pos, 4, &header) != PCIBIOS_SUCCESSFUL) 355 break; 356 } 357 358 return 0; 359 } 360 361 /** 362 * pnv_eeh_probe - Do probe on PCI device 363 * @pdn: PCI device node 364 * @data: unused 365 * 366 * When EEH module is installed during system boot, all PCI devices 367 * are checked one by one to see if it supports EEH. The function 368 * is introduced for the purpose. By default, EEH has been enabled 369 * on all PCI devices. That's to say, we only need do necessary 370 * initialization on the corresponding eeh device and create PE 371 * accordingly. 372 * 373 * It's notable that's unsafe to retrieve the EEH device through 374 * the corresponding PCI device. During the PCI device hotplug, which 375 * was possiblly triggered by EEH core, the binding between EEH device 376 * and the PCI device isn't built yet. 377 */ 378 static void *pnv_eeh_probe(struct pci_dn *pdn, void *data) 379 { 380 struct pci_controller *hose = pdn->phb; 381 struct pnv_phb *phb = hose->private_data; 382 struct eeh_dev *edev = pdn_to_eeh_dev(pdn); 383 uint32_t pcie_flags; 384 int ret; 385 386 /* 387 * When probing the root bridge, which doesn't have any 388 * subordinate PCI devices. We don't have OF node for 389 * the root bridge. So it's not reasonable to continue 390 * the probing. 391 */ 392 if (!edev || edev->pe) 393 return NULL; 394 395 /* Skip for PCI-ISA bridge */ 396 if ((pdn->class_code >> 8) == PCI_CLASS_BRIDGE_ISA) 397 return NULL; 398 399 /* Initialize eeh device */ 400 edev->class_code = pdn->class_code; 401 edev->mode &= 0xFFFFFF00; 402 edev->pcix_cap = pnv_eeh_find_cap(pdn, PCI_CAP_ID_PCIX); 403 edev->pcie_cap = pnv_eeh_find_cap(pdn, PCI_CAP_ID_EXP); 404 edev->aer_cap = pnv_eeh_find_ecap(pdn, PCI_EXT_CAP_ID_ERR); 405 if ((edev->class_code >> 8) == PCI_CLASS_BRIDGE_PCI) { 406 edev->mode |= EEH_DEV_BRIDGE; 407 if (edev->pcie_cap) { 408 pnv_pci_cfg_read(pdn, edev->pcie_cap + PCI_EXP_FLAGS, 409 2, &pcie_flags); 410 pcie_flags = (pcie_flags & PCI_EXP_FLAGS_TYPE) >> 4; 411 if (pcie_flags == PCI_EXP_TYPE_ROOT_PORT) 412 edev->mode |= EEH_DEV_ROOT_PORT; 413 else if (pcie_flags == PCI_EXP_TYPE_DOWNSTREAM) 414 edev->mode |= EEH_DEV_DS_PORT; 415 } 416 } 417 418 edev->config_addr = (pdn->busno << 8) | (pdn->devfn); 419 edev->pe_config_addr = phb->ioda.pe_rmap[edev->config_addr]; 420 421 /* Create PE */ 422 ret = eeh_add_to_parent_pe(edev); 423 if (ret) { 424 pr_warn("%s: Can't add PCI dev %04x:%02x:%02x.%01x to parent PE (%d)\n", 425 __func__, hose->global_number, pdn->busno, 426 PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn), ret); 427 return NULL; 428 } 429 430 /* 431 * If the PE contains any one of following adapters, the 432 * PCI config space can't be accessed when dumping EEH log. 433 * Otherwise, we will run into fenced PHB caused by shortage 434 * of outbound credits in the adapter. The PCI config access 435 * should be blocked until PE reset. MMIO access is dropped 436 * by hardware certainly. In order to drop PCI config requests, 437 * one more flag (EEH_PE_CFG_RESTRICTED) is introduced, which 438 * will be checked in the backend for PE state retrival. If 439 * the PE becomes frozen for the first time and the flag has 440 * been set for the PE, we will set EEH_PE_CFG_BLOCKED for 441 * that PE to block its config space. 442 * 443 * Broadcom Austin 4-ports NICs (14e4:1657) 444 * Broadcom Shiner 2-ports 10G NICs (14e4:168e) 445 */ 446 if ((pdn->vendor_id == PCI_VENDOR_ID_BROADCOM && 447 pdn->device_id == 0x1657) || 448 (pdn->vendor_id == PCI_VENDOR_ID_BROADCOM && 449 pdn->device_id == 0x168e)) 450 edev->pe->state |= EEH_PE_CFG_RESTRICTED; 451 452 /* 453 * Cache the PE primary bus, which can't be fetched when 454 * full hotplug is in progress. In that case, all child 455 * PCI devices of the PE are expected to be removed prior 456 * to PE reset. 457 */ 458 if (!edev->pe->bus) 459 edev->pe->bus = pci_find_bus(hose->global_number, 460 pdn->busno); 461 462 /* 463 * Enable EEH explicitly so that we will do EEH check 464 * while accessing I/O stuff 465 */ 466 eeh_add_flag(EEH_ENABLED); 467 468 /* Save memory bars */ 469 eeh_save_bars(edev); 470 471 return NULL; 472 } 473 474 /** 475 * pnv_eeh_set_option - Initialize EEH or MMIO/DMA reenable 476 * @pe: EEH PE 477 * @option: operation to be issued 478 * 479 * The function is used to control the EEH functionality globally. 480 * Currently, following options are support according to PAPR: 481 * Enable EEH, Disable EEH, Enable MMIO and Enable DMA 482 */ 483 static int pnv_eeh_set_option(struct eeh_pe *pe, int option) 484 { 485 struct pci_controller *hose = pe->phb; 486 struct pnv_phb *phb = hose->private_data; 487 bool freeze_pe = false; 488 int opt, ret = 0; 489 s64 rc; 490 491 /* Sanity check on option */ 492 switch (option) { 493 case EEH_OPT_DISABLE: 494 return -EPERM; 495 case EEH_OPT_ENABLE: 496 return 0; 497 case EEH_OPT_THAW_MMIO: 498 opt = OPAL_EEH_ACTION_CLEAR_FREEZE_MMIO; 499 break; 500 case EEH_OPT_THAW_DMA: 501 opt = OPAL_EEH_ACTION_CLEAR_FREEZE_DMA; 502 break; 503 case EEH_OPT_FREEZE_PE: 504 freeze_pe = true; 505 opt = OPAL_EEH_ACTION_SET_FREEZE_ALL; 506 break; 507 default: 508 pr_warn("%s: Invalid option %d\n", __func__, option); 509 return -EINVAL; 510 } 511 512 /* If PHB supports compound PE, to handle it */ 513 if (freeze_pe) { 514 if (phb->freeze_pe) { 515 phb->freeze_pe(phb, pe->addr); 516 } else { 517 rc = opal_pci_eeh_freeze_set(phb->opal_id, 518 pe->addr, opt); 519 if (rc != OPAL_SUCCESS) { 520 pr_warn("%s: Failure %lld freezing " 521 "PHB#%x-PE#%x\n", 522 __func__, rc, 523 phb->hose->global_number, pe->addr); 524 ret = -EIO; 525 } 526 } 527 } else { 528 if (phb->unfreeze_pe) { 529 ret = phb->unfreeze_pe(phb, pe->addr, opt); 530 } else { 531 rc = opal_pci_eeh_freeze_clear(phb->opal_id, 532 pe->addr, opt); 533 if (rc != OPAL_SUCCESS) { 534 pr_warn("%s: Failure %lld enable %d " 535 "for PHB#%x-PE#%x\n", 536 __func__, rc, option, 537 phb->hose->global_number, pe->addr); 538 ret = -EIO; 539 } 540 } 541 } 542 543 return ret; 544 } 545 546 /** 547 * pnv_eeh_get_pe_addr - Retrieve PE address 548 * @pe: EEH PE 549 * 550 * Retrieve the PE address according to the given tranditional 551 * PCI BDF (Bus/Device/Function) address. 552 */ 553 static int pnv_eeh_get_pe_addr(struct eeh_pe *pe) 554 { 555 return pe->addr; 556 } 557 558 static void pnv_eeh_get_phb_diag(struct eeh_pe *pe) 559 { 560 struct pnv_phb *phb = pe->phb->private_data; 561 s64 rc; 562 563 rc = opal_pci_get_phb_diag_data2(phb->opal_id, pe->data, 564 PNV_PCI_DIAG_BUF_SIZE); 565 if (rc != OPAL_SUCCESS) 566 pr_warn("%s: Failure %lld getting PHB#%x diag-data\n", 567 __func__, rc, pe->phb->global_number); 568 } 569 570 static int pnv_eeh_get_phb_state(struct eeh_pe *pe) 571 { 572 struct pnv_phb *phb = pe->phb->private_data; 573 u8 fstate; 574 __be16 pcierr; 575 s64 rc; 576 int result = 0; 577 578 rc = opal_pci_eeh_freeze_status(phb->opal_id, 579 pe->addr, 580 &fstate, 581 &pcierr, 582 NULL); 583 if (rc != OPAL_SUCCESS) { 584 pr_warn("%s: Failure %lld getting PHB#%x state\n", 585 __func__, rc, phb->hose->global_number); 586 return EEH_STATE_NOT_SUPPORT; 587 } 588 589 /* 590 * Check PHB state. If the PHB is frozen for the 591 * first time, to dump the PHB diag-data. 592 */ 593 if (be16_to_cpu(pcierr) != OPAL_EEH_PHB_ERROR) { 594 result = (EEH_STATE_MMIO_ACTIVE | 595 EEH_STATE_DMA_ACTIVE | 596 EEH_STATE_MMIO_ENABLED | 597 EEH_STATE_DMA_ENABLED); 598 } else if (!(pe->state & EEH_PE_ISOLATED)) { 599 eeh_pe_state_mark(pe, EEH_PE_ISOLATED); 600 pnv_eeh_get_phb_diag(pe); 601 602 if (eeh_has_flag(EEH_EARLY_DUMP_LOG)) 603 pnv_pci_dump_phb_diag_data(pe->phb, pe->data); 604 } 605 606 return result; 607 } 608 609 static int pnv_eeh_get_pe_state(struct eeh_pe *pe) 610 { 611 struct pnv_phb *phb = pe->phb->private_data; 612 u8 fstate; 613 __be16 pcierr; 614 s64 rc; 615 int result; 616 617 /* 618 * We don't clobber hardware frozen state until PE 619 * reset is completed. In order to keep EEH core 620 * moving forward, we have to return operational 621 * state during PE reset. 622 */ 623 if (pe->state & EEH_PE_RESET) { 624 result = (EEH_STATE_MMIO_ACTIVE | 625 EEH_STATE_DMA_ACTIVE | 626 EEH_STATE_MMIO_ENABLED | 627 EEH_STATE_DMA_ENABLED); 628 return result; 629 } 630 631 /* 632 * Fetch PE state from hardware. If the PHB 633 * supports compound PE, let it handle that. 634 */ 635 if (phb->get_pe_state) { 636 fstate = phb->get_pe_state(phb, pe->addr); 637 } else { 638 rc = opal_pci_eeh_freeze_status(phb->opal_id, 639 pe->addr, 640 &fstate, 641 &pcierr, 642 NULL); 643 if (rc != OPAL_SUCCESS) { 644 pr_warn("%s: Failure %lld getting PHB#%x-PE%x state\n", 645 __func__, rc, phb->hose->global_number, 646 pe->addr); 647 return EEH_STATE_NOT_SUPPORT; 648 } 649 } 650 651 /* Figure out state */ 652 switch (fstate) { 653 case OPAL_EEH_STOPPED_NOT_FROZEN: 654 result = (EEH_STATE_MMIO_ACTIVE | 655 EEH_STATE_DMA_ACTIVE | 656 EEH_STATE_MMIO_ENABLED | 657 EEH_STATE_DMA_ENABLED); 658 break; 659 case OPAL_EEH_STOPPED_MMIO_FREEZE: 660 result = (EEH_STATE_DMA_ACTIVE | 661 EEH_STATE_DMA_ENABLED); 662 break; 663 case OPAL_EEH_STOPPED_DMA_FREEZE: 664 result = (EEH_STATE_MMIO_ACTIVE | 665 EEH_STATE_MMIO_ENABLED); 666 break; 667 case OPAL_EEH_STOPPED_MMIO_DMA_FREEZE: 668 result = 0; 669 break; 670 case OPAL_EEH_STOPPED_RESET: 671 result = EEH_STATE_RESET_ACTIVE; 672 break; 673 case OPAL_EEH_STOPPED_TEMP_UNAVAIL: 674 result = EEH_STATE_UNAVAILABLE; 675 break; 676 case OPAL_EEH_STOPPED_PERM_UNAVAIL: 677 result = EEH_STATE_NOT_SUPPORT; 678 break; 679 default: 680 result = EEH_STATE_NOT_SUPPORT; 681 pr_warn("%s: Invalid PHB#%x-PE#%x state %x\n", 682 __func__, phb->hose->global_number, 683 pe->addr, fstate); 684 } 685 686 /* 687 * If PHB supports compound PE, to freeze all 688 * slave PEs for consistency. 689 * 690 * If the PE is switching to frozen state for the 691 * first time, to dump the PHB diag-data. 692 */ 693 if (!(result & EEH_STATE_NOT_SUPPORT) && 694 !(result & EEH_STATE_UNAVAILABLE) && 695 !(result & EEH_STATE_MMIO_ACTIVE) && 696 !(result & EEH_STATE_DMA_ACTIVE) && 697 !(pe->state & EEH_PE_ISOLATED)) { 698 if (phb->freeze_pe) 699 phb->freeze_pe(phb, pe->addr); 700 701 eeh_pe_state_mark(pe, EEH_PE_ISOLATED); 702 pnv_eeh_get_phb_diag(pe); 703 704 if (eeh_has_flag(EEH_EARLY_DUMP_LOG)) 705 pnv_pci_dump_phb_diag_data(pe->phb, pe->data); 706 } 707 708 return result; 709 } 710 711 /** 712 * pnv_eeh_get_state - Retrieve PE state 713 * @pe: EEH PE 714 * @delay: delay while PE state is temporarily unavailable 715 * 716 * Retrieve the state of the specified PE. For IODA-compitable 717 * platform, it should be retrieved from IODA table. Therefore, 718 * we prefer passing down to hardware implementation to handle 719 * it. 720 */ 721 static int pnv_eeh_get_state(struct eeh_pe *pe, int *delay) 722 { 723 int ret; 724 725 if (pe->type & EEH_PE_PHB) 726 ret = pnv_eeh_get_phb_state(pe); 727 else 728 ret = pnv_eeh_get_pe_state(pe); 729 730 if (!delay) 731 return ret; 732 733 /* 734 * If the PE state is temporarily unavailable, 735 * to inform the EEH core delay for default 736 * period (1 second) 737 */ 738 *delay = 0; 739 if (ret & EEH_STATE_UNAVAILABLE) 740 *delay = 1000; 741 742 return ret; 743 } 744 745 static s64 pnv_eeh_phb_poll(struct pnv_phb *phb) 746 { 747 s64 rc = OPAL_HARDWARE; 748 749 while (1) { 750 rc = opal_pci_poll(phb->opal_id); 751 if (rc <= 0) 752 break; 753 754 if (system_state < SYSTEM_RUNNING) 755 udelay(1000 * rc); 756 else 757 msleep(rc); 758 } 759 760 return rc; 761 } 762 763 int pnv_eeh_phb_reset(struct pci_controller *hose, int option) 764 { 765 struct pnv_phb *phb = hose->private_data; 766 s64 rc = OPAL_HARDWARE; 767 768 pr_debug("%s: Reset PHB#%x, option=%d\n", 769 __func__, hose->global_number, option); 770 771 /* Issue PHB complete reset request */ 772 if (option == EEH_RESET_FUNDAMENTAL || 773 option == EEH_RESET_HOT) 774 rc = opal_pci_reset(phb->opal_id, 775 OPAL_RESET_PHB_COMPLETE, 776 OPAL_ASSERT_RESET); 777 else if (option == EEH_RESET_DEACTIVATE) 778 rc = opal_pci_reset(phb->opal_id, 779 OPAL_RESET_PHB_COMPLETE, 780 OPAL_DEASSERT_RESET); 781 if (rc < 0) 782 goto out; 783 784 /* 785 * Poll state of the PHB until the request is done 786 * successfully. The PHB reset is usually PHB complete 787 * reset followed by hot reset on root bus. So we also 788 * need the PCI bus settlement delay. 789 */ 790 rc = pnv_eeh_phb_poll(phb); 791 if (option == EEH_RESET_DEACTIVATE) { 792 if (system_state < SYSTEM_RUNNING) 793 udelay(1000 * EEH_PE_RST_SETTLE_TIME); 794 else 795 msleep(EEH_PE_RST_SETTLE_TIME); 796 } 797 out: 798 if (rc != OPAL_SUCCESS) 799 return -EIO; 800 801 return 0; 802 } 803 804 static int pnv_eeh_root_reset(struct pci_controller *hose, int option) 805 { 806 struct pnv_phb *phb = hose->private_data; 807 s64 rc = OPAL_HARDWARE; 808 809 pr_debug("%s: Reset PHB#%x, option=%d\n", 810 __func__, hose->global_number, option); 811 812 /* 813 * During the reset deassert time, we needn't care 814 * the reset scope because the firmware does nothing 815 * for fundamental or hot reset during deassert phase. 816 */ 817 if (option == EEH_RESET_FUNDAMENTAL) 818 rc = opal_pci_reset(phb->opal_id, 819 OPAL_RESET_PCI_FUNDAMENTAL, 820 OPAL_ASSERT_RESET); 821 else if (option == EEH_RESET_HOT) 822 rc = opal_pci_reset(phb->opal_id, 823 OPAL_RESET_PCI_HOT, 824 OPAL_ASSERT_RESET); 825 else if (option == EEH_RESET_DEACTIVATE) 826 rc = opal_pci_reset(phb->opal_id, 827 OPAL_RESET_PCI_HOT, 828 OPAL_DEASSERT_RESET); 829 if (rc < 0) 830 goto out; 831 832 /* Poll state of the PHB until the request is done */ 833 rc = pnv_eeh_phb_poll(phb); 834 if (option == EEH_RESET_DEACTIVATE) 835 msleep(EEH_PE_RST_SETTLE_TIME); 836 out: 837 if (rc != OPAL_SUCCESS) 838 return -EIO; 839 840 return 0; 841 } 842 843 static int pnv_eeh_bridge_reset(struct pci_dev *dev, int option) 844 { 845 struct device_node *dn = pci_device_to_OF_node(dev); 846 struct eeh_dev *edev = of_node_to_eeh_dev(dn); 847 int aer = edev ? edev->aer_cap : 0; 848 u32 ctrl; 849 850 pr_debug("%s: Reset PCI bus %04x:%02x with option %d\n", 851 __func__, pci_domain_nr(dev->bus), 852 dev->bus->number, option); 853 854 switch (option) { 855 case EEH_RESET_FUNDAMENTAL: 856 case EEH_RESET_HOT: 857 /* Don't report linkDown event */ 858 if (aer) { 859 eeh_ops->read_config(dn, aer + PCI_ERR_UNCOR_MASK, 860 4, &ctrl); 861 ctrl |= PCI_ERR_UNC_SURPDN; 862 eeh_ops->write_config(dn, aer + PCI_ERR_UNCOR_MASK, 863 4, ctrl); 864 } 865 866 eeh_ops->read_config(dn, PCI_BRIDGE_CONTROL, 2, &ctrl); 867 ctrl |= PCI_BRIDGE_CTL_BUS_RESET; 868 eeh_ops->write_config(dn, PCI_BRIDGE_CONTROL, 2, ctrl); 869 870 msleep(EEH_PE_RST_HOLD_TIME); 871 break; 872 case EEH_RESET_DEACTIVATE: 873 eeh_ops->read_config(dn, PCI_BRIDGE_CONTROL, 2, &ctrl); 874 ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET; 875 eeh_ops->write_config(dn, PCI_BRIDGE_CONTROL, 2, ctrl); 876 877 msleep(EEH_PE_RST_SETTLE_TIME); 878 879 /* Continue reporting linkDown event */ 880 if (aer) { 881 eeh_ops->read_config(dn, aer + PCI_ERR_UNCOR_MASK, 882 4, &ctrl); 883 ctrl &= ~PCI_ERR_UNC_SURPDN; 884 eeh_ops->write_config(dn, aer + PCI_ERR_UNCOR_MASK, 885 4, ctrl); 886 } 887 888 break; 889 } 890 891 return 0; 892 } 893 894 void pnv_pci_reset_secondary_bus(struct pci_dev *dev) 895 { 896 struct pci_controller *hose; 897 898 if (pci_is_root_bus(dev->bus)) { 899 hose = pci_bus_to_host(dev->bus); 900 pnv_eeh_root_reset(hose, EEH_RESET_HOT); 901 pnv_eeh_root_reset(hose, EEH_RESET_DEACTIVATE); 902 } else { 903 pnv_eeh_bridge_reset(dev, EEH_RESET_HOT); 904 pnv_eeh_bridge_reset(dev, EEH_RESET_DEACTIVATE); 905 } 906 } 907 908 /** 909 * pnv_eeh_reset - Reset the specified PE 910 * @pe: EEH PE 911 * @option: reset option 912 * 913 * Do reset on the indicated PE. For PCI bus sensitive PE, 914 * we need to reset the parent p2p bridge. The PHB has to 915 * be reinitialized if the p2p bridge is root bridge. For 916 * PCI device sensitive PE, we will try to reset the device 917 * through FLR. For now, we don't have OPAL APIs to do HARD 918 * reset yet, so all reset would be SOFT (HOT) reset. 919 */ 920 static int pnv_eeh_reset(struct eeh_pe *pe, int option) 921 { 922 struct pci_controller *hose = pe->phb; 923 struct pci_bus *bus; 924 int ret; 925 926 /* 927 * For PHB reset, we always have complete reset. For those PEs whose 928 * primary bus derived from root complex (root bus) or root port 929 * (usually bus#1), we apply hot or fundamental reset on the root port. 930 * For other PEs, we always have hot reset on the PE primary bus. 931 * 932 * Here, we have different design to pHyp, which always clear the 933 * frozen state during PE reset. However, the good idea here from 934 * benh is to keep frozen state before we get PE reset done completely 935 * (until BAR restore). With the frozen state, HW drops illegal IO 936 * or MMIO access, which can incur recrusive frozen PE during PE 937 * reset. The side effect is that EEH core has to clear the frozen 938 * state explicitly after BAR restore. 939 */ 940 if (pe->type & EEH_PE_PHB) { 941 ret = pnv_eeh_phb_reset(hose, option); 942 } else { 943 struct pnv_phb *phb; 944 s64 rc; 945 946 /* 947 * The frozen PE might be caused by PAPR error injection 948 * registers, which are expected to be cleared after hitting 949 * frozen PE as stated in the hardware spec. Unfortunately, 950 * that's not true on P7IOC. So we have to clear it manually 951 * to avoid recursive EEH errors during recovery. 952 */ 953 phb = hose->private_data; 954 if (phb->model == PNV_PHB_MODEL_P7IOC && 955 (option == EEH_RESET_HOT || 956 option == EEH_RESET_FUNDAMENTAL)) { 957 rc = opal_pci_reset(phb->opal_id, 958 OPAL_RESET_PHB_ERROR, 959 OPAL_ASSERT_RESET); 960 if (rc != OPAL_SUCCESS) { 961 pr_warn("%s: Failure %lld clearing " 962 "error injection registers\n", 963 __func__, rc); 964 return -EIO; 965 } 966 } 967 968 bus = eeh_pe_bus_get(pe); 969 if (pci_is_root_bus(bus) || 970 pci_is_root_bus(bus->parent)) 971 ret = pnv_eeh_root_reset(hose, option); 972 else 973 ret = pnv_eeh_bridge_reset(bus->self, option); 974 } 975 976 return ret; 977 } 978 979 /** 980 * pnv_eeh_wait_state - Wait for PE state 981 * @pe: EEH PE 982 * @max_wait: maximal period in microsecond 983 * 984 * Wait for the state of associated PE. It might take some time 985 * to retrieve the PE's state. 986 */ 987 static int pnv_eeh_wait_state(struct eeh_pe *pe, int max_wait) 988 { 989 int ret; 990 int mwait; 991 992 while (1) { 993 ret = pnv_eeh_get_state(pe, &mwait); 994 995 /* 996 * If the PE's state is temporarily unavailable, 997 * we have to wait for the specified time. Otherwise, 998 * the PE's state will be returned immediately. 999 */ 1000 if (ret != EEH_STATE_UNAVAILABLE) 1001 return ret; 1002 1003 max_wait -= mwait; 1004 if (max_wait <= 0) { 1005 pr_warn("%s: Timeout getting PE#%x's state (%d)\n", 1006 __func__, pe->addr, max_wait); 1007 return EEH_STATE_NOT_SUPPORT; 1008 } 1009 1010 msleep(mwait); 1011 } 1012 1013 return EEH_STATE_NOT_SUPPORT; 1014 } 1015 1016 /** 1017 * pnv_eeh_get_log - Retrieve error log 1018 * @pe: EEH PE 1019 * @severity: temporary or permanent error log 1020 * @drv_log: driver log to be combined with retrieved error log 1021 * @len: length of driver log 1022 * 1023 * Retrieve the temporary or permanent error from the PE. 1024 */ 1025 static int pnv_eeh_get_log(struct eeh_pe *pe, int severity, 1026 char *drv_log, unsigned long len) 1027 { 1028 if (!eeh_has_flag(EEH_EARLY_DUMP_LOG)) 1029 pnv_pci_dump_phb_diag_data(pe->phb, pe->data); 1030 1031 return 0; 1032 } 1033 1034 /** 1035 * pnv_eeh_configure_bridge - Configure PCI bridges in the indicated PE 1036 * @pe: EEH PE 1037 * 1038 * The function will be called to reconfigure the bridges included 1039 * in the specified PE so that the mulfunctional PE would be recovered 1040 * again. 1041 */ 1042 static int pnv_eeh_configure_bridge(struct eeh_pe *pe) 1043 { 1044 return 0; 1045 } 1046 1047 /** 1048 * pnv_pe_err_inject - Inject specified error to the indicated PE 1049 * @pe: the indicated PE 1050 * @type: error type 1051 * @func: specific error type 1052 * @addr: address 1053 * @mask: address mask 1054 * 1055 * The routine is called to inject specified error, which is 1056 * determined by @type and @func, to the indicated PE for 1057 * testing purpose. 1058 */ 1059 static int pnv_eeh_err_inject(struct eeh_pe *pe, int type, int func, 1060 unsigned long addr, unsigned long mask) 1061 { 1062 struct pci_controller *hose = pe->phb; 1063 struct pnv_phb *phb = hose->private_data; 1064 s64 rc; 1065 1066 /* Sanity check on error type */ 1067 if (type != OPAL_ERR_INJECT_TYPE_IOA_BUS_ERR && 1068 type != OPAL_ERR_INJECT_TYPE_IOA_BUS_ERR64) { 1069 pr_warn("%s: Invalid error type %d\n", 1070 __func__, type); 1071 return -ERANGE; 1072 } 1073 1074 if (func < OPAL_ERR_INJECT_FUNC_IOA_LD_MEM_ADDR || 1075 func > OPAL_ERR_INJECT_FUNC_IOA_DMA_WR_TARGET) { 1076 pr_warn("%s: Invalid error function %d\n", 1077 __func__, func); 1078 return -ERANGE; 1079 } 1080 1081 /* Firmware supports error injection ? */ 1082 if (!opal_check_token(OPAL_PCI_ERR_INJECT)) { 1083 pr_warn("%s: Firmware doesn't support error injection\n", 1084 __func__); 1085 return -ENXIO; 1086 } 1087 1088 /* Do error injection */ 1089 rc = opal_pci_err_inject(phb->opal_id, pe->addr, 1090 type, func, addr, mask); 1091 if (rc != OPAL_SUCCESS) { 1092 pr_warn("%s: Failure %lld injecting error " 1093 "%d-%d to PHB#%x-PE#%x\n", 1094 __func__, rc, type, func, 1095 hose->global_number, pe->addr); 1096 return -EIO; 1097 } 1098 1099 return 0; 1100 } 1101 1102 static inline bool pnv_eeh_cfg_blocked(struct device_node *dn) 1103 { 1104 struct eeh_dev *edev = of_node_to_eeh_dev(dn); 1105 1106 if (!edev || !edev->pe) 1107 return false; 1108 1109 if (edev->pe->state & EEH_PE_CFG_BLOCKED) 1110 return true; 1111 1112 return false; 1113 } 1114 1115 static int pnv_eeh_read_config(struct device_node *dn, 1116 int where, int size, u32 *val) 1117 { 1118 struct pci_dn *pdn = PCI_DN(dn); 1119 1120 if (!pdn) 1121 return PCIBIOS_DEVICE_NOT_FOUND; 1122 1123 if (pnv_eeh_cfg_blocked(dn)) { 1124 *val = 0xFFFFFFFF; 1125 return PCIBIOS_SET_FAILED; 1126 } 1127 1128 return pnv_pci_cfg_read(pdn, where, size, val); 1129 } 1130 1131 static int pnv_eeh_write_config(struct device_node *dn, 1132 int where, int size, u32 val) 1133 { 1134 struct pci_dn *pdn = PCI_DN(dn); 1135 1136 if (!pdn) 1137 return PCIBIOS_DEVICE_NOT_FOUND; 1138 1139 if (pnv_eeh_cfg_blocked(dn)) 1140 return PCIBIOS_SET_FAILED; 1141 1142 return pnv_pci_cfg_write(pdn, where, size, val); 1143 } 1144 1145 static void pnv_eeh_dump_hub_diag_common(struct OpalIoP7IOCErrorData *data) 1146 { 1147 /* GEM */ 1148 if (data->gemXfir || data->gemRfir || 1149 data->gemRirqfir || data->gemMask || data->gemRwof) 1150 pr_info(" GEM: %016llx %016llx %016llx %016llx %016llx\n", 1151 be64_to_cpu(data->gemXfir), 1152 be64_to_cpu(data->gemRfir), 1153 be64_to_cpu(data->gemRirqfir), 1154 be64_to_cpu(data->gemMask), 1155 be64_to_cpu(data->gemRwof)); 1156 1157 /* LEM */ 1158 if (data->lemFir || data->lemErrMask || 1159 data->lemAction0 || data->lemAction1 || data->lemWof) 1160 pr_info(" LEM: %016llx %016llx %016llx %016llx %016llx\n", 1161 be64_to_cpu(data->lemFir), 1162 be64_to_cpu(data->lemErrMask), 1163 be64_to_cpu(data->lemAction0), 1164 be64_to_cpu(data->lemAction1), 1165 be64_to_cpu(data->lemWof)); 1166 } 1167 1168 static void pnv_eeh_get_and_dump_hub_diag(struct pci_controller *hose) 1169 { 1170 struct pnv_phb *phb = hose->private_data; 1171 struct OpalIoP7IOCErrorData *data = &phb->diag.hub_diag; 1172 long rc; 1173 1174 rc = opal_pci_get_hub_diag_data(phb->hub_id, data, sizeof(*data)); 1175 if (rc != OPAL_SUCCESS) { 1176 pr_warn("%s: Failed to get HUB#%llx diag-data (%ld)\n", 1177 __func__, phb->hub_id, rc); 1178 return; 1179 } 1180 1181 switch (data->type) { 1182 case OPAL_P7IOC_DIAG_TYPE_RGC: 1183 pr_info("P7IOC diag-data for RGC\n\n"); 1184 pnv_eeh_dump_hub_diag_common(data); 1185 if (data->rgc.rgcStatus || data->rgc.rgcLdcp) 1186 pr_info(" RGC: %016llx %016llx\n", 1187 be64_to_cpu(data->rgc.rgcStatus), 1188 be64_to_cpu(data->rgc.rgcLdcp)); 1189 break; 1190 case OPAL_P7IOC_DIAG_TYPE_BI: 1191 pr_info("P7IOC diag-data for BI %s\n\n", 1192 data->bi.biDownbound ? "Downbound" : "Upbound"); 1193 pnv_eeh_dump_hub_diag_common(data); 1194 if (data->bi.biLdcp0 || data->bi.biLdcp1 || 1195 data->bi.biLdcp2 || data->bi.biFenceStatus) 1196 pr_info(" BI: %016llx %016llx %016llx %016llx\n", 1197 be64_to_cpu(data->bi.biLdcp0), 1198 be64_to_cpu(data->bi.biLdcp1), 1199 be64_to_cpu(data->bi.biLdcp2), 1200 be64_to_cpu(data->bi.biFenceStatus)); 1201 break; 1202 case OPAL_P7IOC_DIAG_TYPE_CI: 1203 pr_info("P7IOC diag-data for CI Port %d\n\n", 1204 data->ci.ciPort); 1205 pnv_eeh_dump_hub_diag_common(data); 1206 if (data->ci.ciPortStatus || data->ci.ciPortLdcp) 1207 pr_info(" CI: %016llx %016llx\n", 1208 be64_to_cpu(data->ci.ciPortStatus), 1209 be64_to_cpu(data->ci.ciPortLdcp)); 1210 break; 1211 case OPAL_P7IOC_DIAG_TYPE_MISC: 1212 pr_info("P7IOC diag-data for MISC\n\n"); 1213 pnv_eeh_dump_hub_diag_common(data); 1214 break; 1215 case OPAL_P7IOC_DIAG_TYPE_I2C: 1216 pr_info("P7IOC diag-data for I2C\n\n"); 1217 pnv_eeh_dump_hub_diag_common(data); 1218 break; 1219 default: 1220 pr_warn("%s: Invalid type of HUB#%llx diag-data (%d)\n", 1221 __func__, phb->hub_id, data->type); 1222 } 1223 } 1224 1225 static int pnv_eeh_get_pe(struct pci_controller *hose, 1226 u16 pe_no, struct eeh_pe **pe) 1227 { 1228 struct pnv_phb *phb = hose->private_data; 1229 struct pnv_ioda_pe *pnv_pe; 1230 struct eeh_pe *dev_pe; 1231 struct eeh_dev edev; 1232 1233 /* 1234 * If PHB supports compound PE, to fetch 1235 * the master PE because slave PE is invisible 1236 * to EEH core. 1237 */ 1238 pnv_pe = &phb->ioda.pe_array[pe_no]; 1239 if (pnv_pe->flags & PNV_IODA_PE_SLAVE) { 1240 pnv_pe = pnv_pe->master; 1241 WARN_ON(!pnv_pe || 1242 !(pnv_pe->flags & PNV_IODA_PE_MASTER)); 1243 pe_no = pnv_pe->pe_number; 1244 } 1245 1246 /* Find the PE according to PE# */ 1247 memset(&edev, 0, sizeof(struct eeh_dev)); 1248 edev.phb = hose; 1249 edev.pe_config_addr = pe_no; 1250 dev_pe = eeh_pe_get(&edev); 1251 if (!dev_pe) 1252 return -EEXIST; 1253 1254 /* Freeze the (compound) PE */ 1255 *pe = dev_pe; 1256 if (!(dev_pe->state & EEH_PE_ISOLATED)) 1257 phb->freeze_pe(phb, pe_no); 1258 1259 /* 1260 * At this point, we're sure the (compound) PE should 1261 * have been frozen. However, we still need poke until 1262 * hitting the frozen PE on top level. 1263 */ 1264 dev_pe = dev_pe->parent; 1265 while (dev_pe && !(dev_pe->type & EEH_PE_PHB)) { 1266 int ret; 1267 int active_flags = (EEH_STATE_MMIO_ACTIVE | 1268 EEH_STATE_DMA_ACTIVE); 1269 1270 ret = eeh_ops->get_state(dev_pe, NULL); 1271 if (ret <= 0 || (ret & active_flags) == active_flags) { 1272 dev_pe = dev_pe->parent; 1273 continue; 1274 } 1275 1276 /* Frozen parent PE */ 1277 *pe = dev_pe; 1278 if (!(dev_pe->state & EEH_PE_ISOLATED)) 1279 phb->freeze_pe(phb, dev_pe->addr); 1280 1281 /* Next one */ 1282 dev_pe = dev_pe->parent; 1283 } 1284 1285 return 0; 1286 } 1287 1288 /** 1289 * pnv_eeh_next_error - Retrieve next EEH error to handle 1290 * @pe: Affected PE 1291 * 1292 * The function is expected to be called by EEH core while it gets 1293 * special EEH event (without binding PE). The function calls to 1294 * OPAL APIs for next error to handle. The informational error is 1295 * handled internally by platform. However, the dead IOC, dead PHB, 1296 * fenced PHB and frozen PE should be handled by EEH core eventually. 1297 */ 1298 static int pnv_eeh_next_error(struct eeh_pe **pe) 1299 { 1300 struct pci_controller *hose; 1301 struct pnv_phb *phb; 1302 struct eeh_pe *phb_pe, *parent_pe; 1303 __be64 frozen_pe_no; 1304 __be16 err_type, severity; 1305 int active_flags = (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE); 1306 long rc; 1307 int state, ret = EEH_NEXT_ERR_NONE; 1308 1309 /* 1310 * While running here, it's safe to purge the event queue. 1311 * And we should keep the cached OPAL notifier event sychronized 1312 * between the kernel and firmware. 1313 */ 1314 eeh_remove_event(NULL, false); 1315 opal_notifier_update_evt(OPAL_EVENT_PCI_ERROR, 0x0ul); 1316 1317 list_for_each_entry(hose, &hose_list, list_node) { 1318 /* 1319 * If the subordinate PCI buses of the PHB has been 1320 * removed or is exactly under error recovery, we 1321 * needn't take care of it any more. 1322 */ 1323 phb = hose->private_data; 1324 phb_pe = eeh_phb_pe_get(hose); 1325 if (!phb_pe || (phb_pe->state & EEH_PE_ISOLATED)) 1326 continue; 1327 1328 rc = opal_pci_next_error(phb->opal_id, 1329 &frozen_pe_no, &err_type, &severity); 1330 if (rc != OPAL_SUCCESS) { 1331 pr_devel("%s: Invalid return value on " 1332 "PHB#%x (0x%lx) from opal_pci_next_error", 1333 __func__, hose->global_number, rc); 1334 continue; 1335 } 1336 1337 /* If the PHB doesn't have error, stop processing */ 1338 if (be16_to_cpu(err_type) == OPAL_EEH_NO_ERROR || 1339 be16_to_cpu(severity) == OPAL_EEH_SEV_NO_ERROR) { 1340 pr_devel("%s: No error found on PHB#%x\n", 1341 __func__, hose->global_number); 1342 continue; 1343 } 1344 1345 /* 1346 * Processing the error. We're expecting the error with 1347 * highest priority reported upon multiple errors on the 1348 * specific PHB. 1349 */ 1350 pr_devel("%s: Error (%d, %d, %llu) on PHB#%x\n", 1351 __func__, be16_to_cpu(err_type), 1352 be16_to_cpu(severity), be64_to_cpu(frozen_pe_no), 1353 hose->global_number); 1354 switch (be16_to_cpu(err_type)) { 1355 case OPAL_EEH_IOC_ERROR: 1356 if (be16_to_cpu(severity) == OPAL_EEH_SEV_IOC_DEAD) { 1357 pr_err("EEH: dead IOC detected\n"); 1358 ret = EEH_NEXT_ERR_DEAD_IOC; 1359 } else if (be16_to_cpu(severity) == OPAL_EEH_SEV_INF) { 1360 pr_info("EEH: IOC informative error " 1361 "detected\n"); 1362 pnv_eeh_get_and_dump_hub_diag(hose); 1363 ret = EEH_NEXT_ERR_NONE; 1364 } 1365 1366 break; 1367 case OPAL_EEH_PHB_ERROR: 1368 if (be16_to_cpu(severity) == OPAL_EEH_SEV_PHB_DEAD) { 1369 *pe = phb_pe; 1370 pr_err("EEH: dead PHB#%x detected, " 1371 "location: %s\n", 1372 hose->global_number, 1373 eeh_pe_loc_get(phb_pe)); 1374 ret = EEH_NEXT_ERR_DEAD_PHB; 1375 } else if (be16_to_cpu(severity) == 1376 OPAL_EEH_SEV_PHB_FENCED) { 1377 *pe = phb_pe; 1378 pr_err("EEH: Fenced PHB#%x detected, " 1379 "location: %s\n", 1380 hose->global_number, 1381 eeh_pe_loc_get(phb_pe)); 1382 ret = EEH_NEXT_ERR_FENCED_PHB; 1383 } else if (be16_to_cpu(severity) == OPAL_EEH_SEV_INF) { 1384 pr_info("EEH: PHB#%x informative error " 1385 "detected, location: %s\n", 1386 hose->global_number, 1387 eeh_pe_loc_get(phb_pe)); 1388 pnv_eeh_get_phb_diag(phb_pe); 1389 pnv_pci_dump_phb_diag_data(hose, phb_pe->data); 1390 ret = EEH_NEXT_ERR_NONE; 1391 } 1392 1393 break; 1394 case OPAL_EEH_PE_ERROR: 1395 /* 1396 * If we can't find the corresponding PE, we 1397 * just try to unfreeze. 1398 */ 1399 if (pnv_eeh_get_pe(hose, 1400 be64_to_cpu(frozen_pe_no), pe)) { 1401 /* Try best to clear it */ 1402 pr_info("EEH: Clear non-existing PHB#%x-PE#%llx\n", 1403 hose->global_number, frozen_pe_no); 1404 pr_info("EEH: PHB location: %s\n", 1405 eeh_pe_loc_get(phb_pe)); 1406 opal_pci_eeh_freeze_clear(phb->opal_id, 1407 frozen_pe_no, 1408 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL); 1409 ret = EEH_NEXT_ERR_NONE; 1410 } else if ((*pe)->state & EEH_PE_ISOLATED || 1411 eeh_pe_passed(*pe)) { 1412 ret = EEH_NEXT_ERR_NONE; 1413 } else { 1414 pr_err("EEH: Frozen PE#%x " 1415 "on PHB#%x detected\n", 1416 (*pe)->addr, 1417 (*pe)->phb->global_number); 1418 pr_err("EEH: PE location: %s, " 1419 "PHB location: %s\n", 1420 eeh_pe_loc_get(*pe), 1421 eeh_pe_loc_get(phb_pe)); 1422 ret = EEH_NEXT_ERR_FROZEN_PE; 1423 } 1424 1425 break; 1426 default: 1427 pr_warn("%s: Unexpected error type %d\n", 1428 __func__, be16_to_cpu(err_type)); 1429 } 1430 1431 /* 1432 * EEH core will try recover from fenced PHB or 1433 * frozen PE. In the time for frozen PE, EEH core 1434 * enable IO path for that before collecting logs, 1435 * but it ruins the site. So we have to dump the 1436 * log in advance here. 1437 */ 1438 if ((ret == EEH_NEXT_ERR_FROZEN_PE || 1439 ret == EEH_NEXT_ERR_FENCED_PHB) && 1440 !((*pe)->state & EEH_PE_ISOLATED)) { 1441 eeh_pe_state_mark(*pe, EEH_PE_ISOLATED); 1442 pnv_eeh_get_phb_diag(*pe); 1443 1444 if (eeh_has_flag(EEH_EARLY_DUMP_LOG)) 1445 pnv_pci_dump_phb_diag_data((*pe)->phb, 1446 (*pe)->data); 1447 } 1448 1449 /* 1450 * We probably have the frozen parent PE out there and 1451 * we need have to handle frozen parent PE firstly. 1452 */ 1453 if (ret == EEH_NEXT_ERR_FROZEN_PE) { 1454 parent_pe = (*pe)->parent; 1455 while (parent_pe) { 1456 /* Hit the ceiling ? */ 1457 if (parent_pe->type & EEH_PE_PHB) 1458 break; 1459 1460 /* Frozen parent PE ? */ 1461 state = eeh_ops->get_state(parent_pe, NULL); 1462 if (state > 0 && 1463 (state & active_flags) != active_flags) 1464 *pe = parent_pe; 1465 1466 /* Next parent level */ 1467 parent_pe = parent_pe->parent; 1468 } 1469 1470 /* We possibly migrate to another PE */ 1471 eeh_pe_state_mark(*pe, EEH_PE_ISOLATED); 1472 } 1473 1474 /* 1475 * If we have no errors on the specific PHB or only 1476 * informative error there, we continue poking it. 1477 * Otherwise, we need actions to be taken by upper 1478 * layer. 1479 */ 1480 if (ret > EEH_NEXT_ERR_INF) 1481 break; 1482 } 1483 1484 return ret; 1485 } 1486 1487 static int pnv_eeh_restore_config(struct device_node *dn) 1488 { 1489 struct eeh_dev *edev = of_node_to_eeh_dev(dn); 1490 struct pnv_phb *phb; 1491 s64 ret; 1492 1493 if (!edev) 1494 return -EEXIST; 1495 1496 phb = edev->phb->private_data; 1497 ret = opal_pci_reinit(phb->opal_id, 1498 OPAL_REINIT_PCI_DEV, edev->config_addr); 1499 if (ret) { 1500 pr_warn("%s: Can't reinit PCI dev 0x%x (%lld)\n", 1501 __func__, edev->config_addr, ret); 1502 return -EIO; 1503 } 1504 1505 return 0; 1506 } 1507 1508 static struct eeh_ops pnv_eeh_ops = { 1509 .name = "powernv", 1510 .init = pnv_eeh_init, 1511 .post_init = pnv_eeh_post_init, 1512 .probe = pnv_eeh_probe, 1513 .set_option = pnv_eeh_set_option, 1514 .get_pe_addr = pnv_eeh_get_pe_addr, 1515 .get_state = pnv_eeh_get_state, 1516 .reset = pnv_eeh_reset, 1517 .wait_state = pnv_eeh_wait_state, 1518 .get_log = pnv_eeh_get_log, 1519 .configure_bridge = pnv_eeh_configure_bridge, 1520 .err_inject = pnv_eeh_err_inject, 1521 .read_config = pnv_eeh_read_config, 1522 .write_config = pnv_eeh_write_config, 1523 .next_error = pnv_eeh_next_error, 1524 .restore_config = pnv_eeh_restore_config 1525 }; 1526 1527 /** 1528 * eeh_powernv_init - Register platform dependent EEH operations 1529 * 1530 * EEH initialization on powernv platform. This function should be 1531 * called before any EEH related functions. 1532 */ 1533 static int __init eeh_powernv_init(void) 1534 { 1535 int ret = -EINVAL; 1536 1537 eeh_set_pe_aux_size(PNV_PCI_DIAG_BUF_SIZE); 1538 ret = eeh_ops_register(&pnv_eeh_ops); 1539 if (!ret) 1540 pr_info("EEH: PowerNV platform initialized\n"); 1541 else 1542 pr_info("EEH: Failed to initialize PowerNV platform (%d)\n", ret); 1543 1544 return ret; 1545 } 1546 machine_early_initcall(powernv, eeh_powernv_init); 1547