1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Support PCI/PCIe on PowerNV platforms 4 * 5 * Copyright 2011 Benjamin Herrenschmidt, IBM Corp. 6 */ 7 8 #include <linux/kernel.h> 9 #include <linux/pci.h> 10 #include <linux/delay.h> 11 #include <linux/string.h> 12 #include <linux/init.h> 13 #include <linux/irq.h> 14 #include <linux/io.h> 15 #include <linux/msi.h> 16 #include <linux/iommu.h> 17 #include <linux/sched/mm.h> 18 19 #include <asm/sections.h> 20 #include <asm/io.h> 21 #include <asm/prom.h> 22 #include <asm/pci-bridge.h> 23 #include <asm/machdep.h> 24 #include <asm/msi_bitmap.h> 25 #include <asm/ppc-pci.h> 26 #include <asm/pnv-pci.h> 27 #include <asm/opal.h> 28 #include <asm/iommu.h> 29 #include <asm/tce.h> 30 #include <asm/firmware.h> 31 #include <asm/eeh_event.h> 32 #include <asm/eeh.h> 33 34 #include "powernv.h" 35 #include "pci.h" 36 37 static DEFINE_MUTEX(tunnel_mutex); 38 39 int pnv_pci_get_slot_id(struct device_node *np, uint64_t *id) 40 { 41 struct device_node *node = np; 42 u32 bdfn; 43 u64 phbid; 44 int ret; 45 46 ret = of_property_read_u32(np, "reg", &bdfn); 47 if (ret) 48 return -ENXIO; 49 50 bdfn = ((bdfn & 0x00ffff00) >> 8); 51 for (node = np; node; node = of_get_parent(node)) { 52 if (!PCI_DN(node)) { 53 of_node_put(node); 54 break; 55 } 56 57 if (!of_device_is_compatible(node, "ibm,ioda2-phb") && 58 !of_device_is_compatible(node, "ibm,ioda3-phb") && 59 !of_device_is_compatible(node, "ibm,ioda2-npu2-opencapi-phb")) { 60 of_node_put(node); 61 continue; 62 } 63 64 ret = of_property_read_u64(node, "ibm,opal-phbid", &phbid); 65 if (ret) { 66 of_node_put(node); 67 return -ENXIO; 68 } 69 70 if (of_device_is_compatible(node, "ibm,ioda2-npu2-opencapi-phb")) 71 *id = PCI_PHB_SLOT_ID(phbid); 72 else 73 *id = PCI_SLOT_ID(phbid, bdfn); 74 return 0; 75 } 76 77 return -ENODEV; 78 } 79 EXPORT_SYMBOL_GPL(pnv_pci_get_slot_id); 80 81 int pnv_pci_get_device_tree(uint32_t phandle, void *buf, uint64_t len) 82 { 83 int64_t rc; 84 85 if (!opal_check_token(OPAL_GET_DEVICE_TREE)) 86 return -ENXIO; 87 88 rc = opal_get_device_tree(phandle, (uint64_t)buf, len); 89 if (rc < OPAL_SUCCESS) 90 return -EIO; 91 92 return rc; 93 } 94 EXPORT_SYMBOL_GPL(pnv_pci_get_device_tree); 95 96 int pnv_pci_get_presence_state(uint64_t id, uint8_t *state) 97 { 98 int64_t rc; 99 100 if (!opal_check_token(OPAL_PCI_GET_PRESENCE_STATE)) 101 return -ENXIO; 102 103 rc = opal_pci_get_presence_state(id, (uint64_t)state); 104 if (rc != OPAL_SUCCESS) 105 return -EIO; 106 107 return 0; 108 } 109 EXPORT_SYMBOL_GPL(pnv_pci_get_presence_state); 110 111 int pnv_pci_get_power_state(uint64_t id, uint8_t *state) 112 { 113 int64_t rc; 114 115 if (!opal_check_token(OPAL_PCI_GET_POWER_STATE)) 116 return -ENXIO; 117 118 rc = opal_pci_get_power_state(id, (uint64_t)state); 119 if (rc != OPAL_SUCCESS) 120 return -EIO; 121 122 return 0; 123 } 124 EXPORT_SYMBOL_GPL(pnv_pci_get_power_state); 125 126 int pnv_pci_set_power_state(uint64_t id, uint8_t state, struct opal_msg *msg) 127 { 128 struct opal_msg m; 129 int token, ret; 130 int64_t rc; 131 132 if (!opal_check_token(OPAL_PCI_SET_POWER_STATE)) 133 return -ENXIO; 134 135 token = opal_async_get_token_interruptible(); 136 if (unlikely(token < 0)) 137 return token; 138 139 rc = opal_pci_set_power_state(token, id, (uint64_t)&state); 140 if (rc == OPAL_SUCCESS) { 141 ret = 0; 142 goto exit; 143 } else if (rc != OPAL_ASYNC_COMPLETION) { 144 ret = -EIO; 145 goto exit; 146 } 147 148 ret = opal_async_wait_response(token, &m); 149 if (ret < 0) 150 goto exit; 151 152 if (msg) { 153 ret = 1; 154 memcpy(msg, &m, sizeof(m)); 155 } 156 157 exit: 158 opal_async_release_token(token); 159 return ret; 160 } 161 EXPORT_SYMBOL_GPL(pnv_pci_set_power_state); 162 163 int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) 164 { 165 struct pci_controller *hose = pci_bus_to_host(pdev->bus); 166 struct pnv_phb *phb = hose->private_data; 167 struct msi_desc *entry; 168 struct msi_msg msg; 169 int hwirq; 170 unsigned int virq; 171 int rc; 172 173 if (WARN_ON(!phb) || !phb->msi_bmp.bitmap) 174 return -ENODEV; 175 176 if (pdev->no_64bit_msi && !phb->msi32_support) 177 return -ENODEV; 178 179 for_each_pci_msi_entry(entry, pdev) { 180 if (!entry->msi_attrib.is_64 && !phb->msi32_support) { 181 pr_warn("%s: Supports only 64-bit MSIs\n", 182 pci_name(pdev)); 183 return -ENXIO; 184 } 185 hwirq = msi_bitmap_alloc_hwirqs(&phb->msi_bmp, 1); 186 if (hwirq < 0) { 187 pr_warn("%s: Failed to find a free MSI\n", 188 pci_name(pdev)); 189 return -ENOSPC; 190 } 191 virq = irq_create_mapping(NULL, phb->msi_base + hwirq); 192 if (!virq) { 193 pr_warn("%s: Failed to map MSI to linux irq\n", 194 pci_name(pdev)); 195 msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq, 1); 196 return -ENOMEM; 197 } 198 rc = phb->msi_setup(phb, pdev, phb->msi_base + hwirq, 199 virq, entry->msi_attrib.is_64, &msg); 200 if (rc) { 201 pr_warn("%s: Failed to setup MSI\n", pci_name(pdev)); 202 irq_dispose_mapping(virq); 203 msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq, 1); 204 return rc; 205 } 206 irq_set_msi_desc(virq, entry); 207 pci_write_msi_msg(virq, &msg); 208 } 209 return 0; 210 } 211 212 void pnv_teardown_msi_irqs(struct pci_dev *pdev) 213 { 214 struct pci_controller *hose = pci_bus_to_host(pdev->bus); 215 struct pnv_phb *phb = hose->private_data; 216 struct msi_desc *entry; 217 irq_hw_number_t hwirq; 218 219 if (WARN_ON(!phb)) 220 return; 221 222 for_each_pci_msi_entry(entry, pdev) { 223 if (!entry->irq) 224 continue; 225 hwirq = virq_to_hw(entry->irq); 226 irq_set_msi_desc(entry->irq, NULL); 227 irq_dispose_mapping(entry->irq); 228 msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq - phb->msi_base, 1); 229 } 230 } 231 232 /* Nicely print the contents of the PE State Tables (PEST). */ 233 static void pnv_pci_dump_pest(__be64 pestA[], __be64 pestB[], int pest_size) 234 { 235 __be64 prevA = ULONG_MAX, prevB = ULONG_MAX; 236 bool dup = false; 237 int i; 238 239 for (i = 0; i < pest_size; i++) { 240 __be64 peA = be64_to_cpu(pestA[i]); 241 __be64 peB = be64_to_cpu(pestB[i]); 242 243 if (peA != prevA || peB != prevB) { 244 if (dup) { 245 pr_info("PE[..%03x] A/B: as above\n", i-1); 246 dup = false; 247 } 248 prevA = peA; 249 prevB = peB; 250 if (peA & PNV_IODA_STOPPED_STATE || 251 peB & PNV_IODA_STOPPED_STATE) 252 pr_info("PE[%03x] A/B: %016llx %016llx\n", 253 i, peA, peB); 254 } else if (!dup && (peA & PNV_IODA_STOPPED_STATE || 255 peB & PNV_IODA_STOPPED_STATE)) { 256 dup = true; 257 } 258 } 259 } 260 261 static void pnv_pci_dump_p7ioc_diag_data(struct pci_controller *hose, 262 struct OpalIoPhbErrorCommon *common) 263 { 264 struct OpalIoP7IOCPhbErrorData *data; 265 266 data = (struct OpalIoP7IOCPhbErrorData *)common; 267 pr_info("P7IOC PHB#%x Diag-data (Version: %d)\n", 268 hose->global_number, be32_to_cpu(common->version)); 269 270 if (data->brdgCtl) 271 pr_info("brdgCtl: %08x\n", 272 be32_to_cpu(data->brdgCtl)); 273 if (data->portStatusReg || data->rootCmplxStatus || 274 data->busAgentStatus) 275 pr_info("UtlSts: %08x %08x %08x\n", 276 be32_to_cpu(data->portStatusReg), 277 be32_to_cpu(data->rootCmplxStatus), 278 be32_to_cpu(data->busAgentStatus)); 279 if (data->deviceStatus || data->slotStatus || 280 data->linkStatus || data->devCmdStatus || 281 data->devSecStatus) 282 pr_info("RootSts: %08x %08x %08x %08x %08x\n", 283 be32_to_cpu(data->deviceStatus), 284 be32_to_cpu(data->slotStatus), 285 be32_to_cpu(data->linkStatus), 286 be32_to_cpu(data->devCmdStatus), 287 be32_to_cpu(data->devSecStatus)); 288 if (data->rootErrorStatus || data->uncorrErrorStatus || 289 data->corrErrorStatus) 290 pr_info("RootErrSts: %08x %08x %08x\n", 291 be32_to_cpu(data->rootErrorStatus), 292 be32_to_cpu(data->uncorrErrorStatus), 293 be32_to_cpu(data->corrErrorStatus)); 294 if (data->tlpHdr1 || data->tlpHdr2 || 295 data->tlpHdr3 || data->tlpHdr4) 296 pr_info("RootErrLog: %08x %08x %08x %08x\n", 297 be32_to_cpu(data->tlpHdr1), 298 be32_to_cpu(data->tlpHdr2), 299 be32_to_cpu(data->tlpHdr3), 300 be32_to_cpu(data->tlpHdr4)); 301 if (data->sourceId || data->errorClass || 302 data->correlator) 303 pr_info("RootErrLog1: %08x %016llx %016llx\n", 304 be32_to_cpu(data->sourceId), 305 be64_to_cpu(data->errorClass), 306 be64_to_cpu(data->correlator)); 307 if (data->p7iocPlssr || data->p7iocCsr) 308 pr_info("PhbSts: %016llx %016llx\n", 309 be64_to_cpu(data->p7iocPlssr), 310 be64_to_cpu(data->p7iocCsr)); 311 if (data->lemFir) 312 pr_info("Lem: %016llx %016llx %016llx\n", 313 be64_to_cpu(data->lemFir), 314 be64_to_cpu(data->lemErrorMask), 315 be64_to_cpu(data->lemWOF)); 316 if (data->phbErrorStatus) 317 pr_info("PhbErr: %016llx %016llx %016llx %016llx\n", 318 be64_to_cpu(data->phbErrorStatus), 319 be64_to_cpu(data->phbFirstErrorStatus), 320 be64_to_cpu(data->phbErrorLog0), 321 be64_to_cpu(data->phbErrorLog1)); 322 if (data->mmioErrorStatus) 323 pr_info("OutErr: %016llx %016llx %016llx %016llx\n", 324 be64_to_cpu(data->mmioErrorStatus), 325 be64_to_cpu(data->mmioFirstErrorStatus), 326 be64_to_cpu(data->mmioErrorLog0), 327 be64_to_cpu(data->mmioErrorLog1)); 328 if (data->dma0ErrorStatus) 329 pr_info("InAErr: %016llx %016llx %016llx %016llx\n", 330 be64_to_cpu(data->dma0ErrorStatus), 331 be64_to_cpu(data->dma0FirstErrorStatus), 332 be64_to_cpu(data->dma0ErrorLog0), 333 be64_to_cpu(data->dma0ErrorLog1)); 334 if (data->dma1ErrorStatus) 335 pr_info("InBErr: %016llx %016llx %016llx %016llx\n", 336 be64_to_cpu(data->dma1ErrorStatus), 337 be64_to_cpu(data->dma1FirstErrorStatus), 338 be64_to_cpu(data->dma1ErrorLog0), 339 be64_to_cpu(data->dma1ErrorLog1)); 340 341 pnv_pci_dump_pest(data->pestA, data->pestB, OPAL_P7IOC_NUM_PEST_REGS); 342 } 343 344 static void pnv_pci_dump_phb3_diag_data(struct pci_controller *hose, 345 struct OpalIoPhbErrorCommon *common) 346 { 347 struct OpalIoPhb3ErrorData *data; 348 349 data = (struct OpalIoPhb3ErrorData*)common; 350 pr_info("PHB3 PHB#%x Diag-data (Version: %d)\n", 351 hose->global_number, be32_to_cpu(common->version)); 352 if (data->brdgCtl) 353 pr_info("brdgCtl: %08x\n", 354 be32_to_cpu(data->brdgCtl)); 355 if (data->portStatusReg || data->rootCmplxStatus || 356 data->busAgentStatus) 357 pr_info("UtlSts: %08x %08x %08x\n", 358 be32_to_cpu(data->portStatusReg), 359 be32_to_cpu(data->rootCmplxStatus), 360 be32_to_cpu(data->busAgentStatus)); 361 if (data->deviceStatus || data->slotStatus || 362 data->linkStatus || data->devCmdStatus || 363 data->devSecStatus) 364 pr_info("RootSts: %08x %08x %08x %08x %08x\n", 365 be32_to_cpu(data->deviceStatus), 366 be32_to_cpu(data->slotStatus), 367 be32_to_cpu(data->linkStatus), 368 be32_to_cpu(data->devCmdStatus), 369 be32_to_cpu(data->devSecStatus)); 370 if (data->rootErrorStatus || data->uncorrErrorStatus || 371 data->corrErrorStatus) 372 pr_info("RootErrSts: %08x %08x %08x\n", 373 be32_to_cpu(data->rootErrorStatus), 374 be32_to_cpu(data->uncorrErrorStatus), 375 be32_to_cpu(data->corrErrorStatus)); 376 if (data->tlpHdr1 || data->tlpHdr2 || 377 data->tlpHdr3 || data->tlpHdr4) 378 pr_info("RootErrLog: %08x %08x %08x %08x\n", 379 be32_to_cpu(data->tlpHdr1), 380 be32_to_cpu(data->tlpHdr2), 381 be32_to_cpu(data->tlpHdr3), 382 be32_to_cpu(data->tlpHdr4)); 383 if (data->sourceId || data->errorClass || 384 data->correlator) 385 pr_info("RootErrLog1: %08x %016llx %016llx\n", 386 be32_to_cpu(data->sourceId), 387 be64_to_cpu(data->errorClass), 388 be64_to_cpu(data->correlator)); 389 if (data->nFir) 390 pr_info("nFir: %016llx %016llx %016llx\n", 391 be64_to_cpu(data->nFir), 392 be64_to_cpu(data->nFirMask), 393 be64_to_cpu(data->nFirWOF)); 394 if (data->phbPlssr || data->phbCsr) 395 pr_info("PhbSts: %016llx %016llx\n", 396 be64_to_cpu(data->phbPlssr), 397 be64_to_cpu(data->phbCsr)); 398 if (data->lemFir) 399 pr_info("Lem: %016llx %016llx %016llx\n", 400 be64_to_cpu(data->lemFir), 401 be64_to_cpu(data->lemErrorMask), 402 be64_to_cpu(data->lemWOF)); 403 if (data->phbErrorStatus) 404 pr_info("PhbErr: %016llx %016llx %016llx %016llx\n", 405 be64_to_cpu(data->phbErrorStatus), 406 be64_to_cpu(data->phbFirstErrorStatus), 407 be64_to_cpu(data->phbErrorLog0), 408 be64_to_cpu(data->phbErrorLog1)); 409 if (data->mmioErrorStatus) 410 pr_info("OutErr: %016llx %016llx %016llx %016llx\n", 411 be64_to_cpu(data->mmioErrorStatus), 412 be64_to_cpu(data->mmioFirstErrorStatus), 413 be64_to_cpu(data->mmioErrorLog0), 414 be64_to_cpu(data->mmioErrorLog1)); 415 if (data->dma0ErrorStatus) 416 pr_info("InAErr: %016llx %016llx %016llx %016llx\n", 417 be64_to_cpu(data->dma0ErrorStatus), 418 be64_to_cpu(data->dma0FirstErrorStatus), 419 be64_to_cpu(data->dma0ErrorLog0), 420 be64_to_cpu(data->dma0ErrorLog1)); 421 if (data->dma1ErrorStatus) 422 pr_info("InBErr: %016llx %016llx %016llx %016llx\n", 423 be64_to_cpu(data->dma1ErrorStatus), 424 be64_to_cpu(data->dma1FirstErrorStatus), 425 be64_to_cpu(data->dma1ErrorLog0), 426 be64_to_cpu(data->dma1ErrorLog1)); 427 428 pnv_pci_dump_pest(data->pestA, data->pestB, OPAL_PHB3_NUM_PEST_REGS); 429 } 430 431 static void pnv_pci_dump_phb4_diag_data(struct pci_controller *hose, 432 struct OpalIoPhbErrorCommon *common) 433 { 434 struct OpalIoPhb4ErrorData *data; 435 436 data = (struct OpalIoPhb4ErrorData*)common; 437 pr_info("PHB4 PHB#%d Diag-data (Version: %d)\n", 438 hose->global_number, be32_to_cpu(common->version)); 439 if (data->brdgCtl) 440 pr_info("brdgCtl: %08x\n", 441 be32_to_cpu(data->brdgCtl)); 442 if (data->deviceStatus || data->slotStatus || 443 data->linkStatus || data->devCmdStatus || 444 data->devSecStatus) 445 pr_info("RootSts: %08x %08x %08x %08x %08x\n", 446 be32_to_cpu(data->deviceStatus), 447 be32_to_cpu(data->slotStatus), 448 be32_to_cpu(data->linkStatus), 449 be32_to_cpu(data->devCmdStatus), 450 be32_to_cpu(data->devSecStatus)); 451 if (data->rootErrorStatus || data->uncorrErrorStatus || 452 data->corrErrorStatus) 453 pr_info("RootErrSts: %08x %08x %08x\n", 454 be32_to_cpu(data->rootErrorStatus), 455 be32_to_cpu(data->uncorrErrorStatus), 456 be32_to_cpu(data->corrErrorStatus)); 457 if (data->tlpHdr1 || data->tlpHdr2 || 458 data->tlpHdr3 || data->tlpHdr4) 459 pr_info("RootErrLog: %08x %08x %08x %08x\n", 460 be32_to_cpu(data->tlpHdr1), 461 be32_to_cpu(data->tlpHdr2), 462 be32_to_cpu(data->tlpHdr3), 463 be32_to_cpu(data->tlpHdr4)); 464 if (data->sourceId) 465 pr_info("sourceId: %08x\n", be32_to_cpu(data->sourceId)); 466 if (data->nFir) 467 pr_info("nFir: %016llx %016llx %016llx\n", 468 be64_to_cpu(data->nFir), 469 be64_to_cpu(data->nFirMask), 470 be64_to_cpu(data->nFirWOF)); 471 if (data->phbPlssr || data->phbCsr) 472 pr_info("PhbSts: %016llx %016llx\n", 473 be64_to_cpu(data->phbPlssr), 474 be64_to_cpu(data->phbCsr)); 475 if (data->lemFir) 476 pr_info("Lem: %016llx %016llx %016llx\n", 477 be64_to_cpu(data->lemFir), 478 be64_to_cpu(data->lemErrorMask), 479 be64_to_cpu(data->lemWOF)); 480 if (data->phbErrorStatus) 481 pr_info("PhbErr: %016llx %016llx %016llx %016llx\n", 482 be64_to_cpu(data->phbErrorStatus), 483 be64_to_cpu(data->phbFirstErrorStatus), 484 be64_to_cpu(data->phbErrorLog0), 485 be64_to_cpu(data->phbErrorLog1)); 486 if (data->phbTxeErrorStatus) 487 pr_info("PhbTxeErr: %016llx %016llx %016llx %016llx\n", 488 be64_to_cpu(data->phbTxeErrorStatus), 489 be64_to_cpu(data->phbTxeFirstErrorStatus), 490 be64_to_cpu(data->phbTxeErrorLog0), 491 be64_to_cpu(data->phbTxeErrorLog1)); 492 if (data->phbRxeArbErrorStatus) 493 pr_info("RxeArbErr: %016llx %016llx %016llx %016llx\n", 494 be64_to_cpu(data->phbRxeArbErrorStatus), 495 be64_to_cpu(data->phbRxeArbFirstErrorStatus), 496 be64_to_cpu(data->phbRxeArbErrorLog0), 497 be64_to_cpu(data->phbRxeArbErrorLog1)); 498 if (data->phbRxeMrgErrorStatus) 499 pr_info("RxeMrgErr: %016llx %016llx %016llx %016llx\n", 500 be64_to_cpu(data->phbRxeMrgErrorStatus), 501 be64_to_cpu(data->phbRxeMrgFirstErrorStatus), 502 be64_to_cpu(data->phbRxeMrgErrorLog0), 503 be64_to_cpu(data->phbRxeMrgErrorLog1)); 504 if (data->phbRxeTceErrorStatus) 505 pr_info("RxeTceErr: %016llx %016llx %016llx %016llx\n", 506 be64_to_cpu(data->phbRxeTceErrorStatus), 507 be64_to_cpu(data->phbRxeTceFirstErrorStatus), 508 be64_to_cpu(data->phbRxeTceErrorLog0), 509 be64_to_cpu(data->phbRxeTceErrorLog1)); 510 511 if (data->phbPblErrorStatus) 512 pr_info("PblErr: %016llx %016llx %016llx %016llx\n", 513 be64_to_cpu(data->phbPblErrorStatus), 514 be64_to_cpu(data->phbPblFirstErrorStatus), 515 be64_to_cpu(data->phbPblErrorLog0), 516 be64_to_cpu(data->phbPblErrorLog1)); 517 if (data->phbPcieDlpErrorStatus) 518 pr_info("PcieDlp: %016llx %016llx %016llx\n", 519 be64_to_cpu(data->phbPcieDlpErrorLog1), 520 be64_to_cpu(data->phbPcieDlpErrorLog2), 521 be64_to_cpu(data->phbPcieDlpErrorStatus)); 522 if (data->phbRegbErrorStatus) 523 pr_info("RegbErr: %016llx %016llx %016llx %016llx\n", 524 be64_to_cpu(data->phbRegbErrorStatus), 525 be64_to_cpu(data->phbRegbFirstErrorStatus), 526 be64_to_cpu(data->phbRegbErrorLog0), 527 be64_to_cpu(data->phbRegbErrorLog1)); 528 529 530 pnv_pci_dump_pest(data->pestA, data->pestB, OPAL_PHB4_NUM_PEST_REGS); 531 } 532 533 void pnv_pci_dump_phb_diag_data(struct pci_controller *hose, 534 unsigned char *log_buff) 535 { 536 struct OpalIoPhbErrorCommon *common; 537 538 if (!hose || !log_buff) 539 return; 540 541 common = (struct OpalIoPhbErrorCommon *)log_buff; 542 switch (be32_to_cpu(common->ioType)) { 543 case OPAL_PHB_ERROR_DATA_TYPE_P7IOC: 544 pnv_pci_dump_p7ioc_diag_data(hose, common); 545 break; 546 case OPAL_PHB_ERROR_DATA_TYPE_PHB3: 547 pnv_pci_dump_phb3_diag_data(hose, common); 548 break; 549 case OPAL_PHB_ERROR_DATA_TYPE_PHB4: 550 pnv_pci_dump_phb4_diag_data(hose, common); 551 break; 552 default: 553 pr_warn("%s: Unrecognized ioType %d\n", 554 __func__, be32_to_cpu(common->ioType)); 555 } 556 } 557 558 static void pnv_pci_handle_eeh_config(struct pnv_phb *phb, u32 pe_no) 559 { 560 unsigned long flags, rc; 561 int has_diag, ret = 0; 562 563 spin_lock_irqsave(&phb->lock, flags); 564 565 /* Fetch PHB diag-data */ 566 rc = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag_data, 567 phb->diag_data_size); 568 has_diag = (rc == OPAL_SUCCESS); 569 570 /* If PHB supports compound PE, to handle it */ 571 if (phb->unfreeze_pe) { 572 ret = phb->unfreeze_pe(phb, 573 pe_no, 574 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL); 575 } else { 576 rc = opal_pci_eeh_freeze_clear(phb->opal_id, 577 pe_no, 578 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL); 579 if (rc) { 580 pr_warn("%s: Failure %ld clearing frozen " 581 "PHB#%x-PE#%x\n", 582 __func__, rc, phb->hose->global_number, 583 pe_no); 584 ret = -EIO; 585 } 586 } 587 588 /* 589 * For now, let's only display the diag buffer when we fail to clear 590 * the EEH status. We'll do more sensible things later when we have 591 * proper EEH support. We need to make sure we don't pollute ourselves 592 * with the normal errors generated when probing empty slots 593 */ 594 if (has_diag && ret) 595 pnv_pci_dump_phb_diag_data(phb->hose, phb->diag_data); 596 597 spin_unlock_irqrestore(&phb->lock, flags); 598 } 599 600 static void pnv_pci_config_check_eeh(struct pci_dn *pdn) 601 { 602 struct pnv_phb *phb = pdn->phb->private_data; 603 u8 fstate = 0; 604 __be16 pcierr = 0; 605 unsigned int pe_no; 606 s64 rc; 607 608 /* 609 * Get the PE#. During the PCI probe stage, we might not 610 * setup that yet. So all ER errors should be mapped to 611 * reserved PE. 612 */ 613 pe_no = pdn->pe_number; 614 if (pe_no == IODA_INVALID_PE) { 615 pe_no = phb->ioda.reserved_pe_idx; 616 } 617 618 /* 619 * Fetch frozen state. If the PHB support compound PE, 620 * we need handle that case. 621 */ 622 if (phb->get_pe_state) { 623 fstate = phb->get_pe_state(phb, pe_no); 624 } else { 625 rc = opal_pci_eeh_freeze_status(phb->opal_id, 626 pe_no, 627 &fstate, 628 &pcierr, 629 NULL); 630 if (rc) { 631 pr_warn("%s: Failure %lld getting PHB#%x-PE#%x state\n", 632 __func__, rc, phb->hose->global_number, pe_no); 633 return; 634 } 635 } 636 637 pr_devel(" -> EEH check, bdfn=%04x PE#%x fstate=%x\n", 638 (pdn->busno << 8) | (pdn->devfn), pe_no, fstate); 639 640 /* Clear the frozen state if applicable */ 641 if (fstate == OPAL_EEH_STOPPED_MMIO_FREEZE || 642 fstate == OPAL_EEH_STOPPED_DMA_FREEZE || 643 fstate == OPAL_EEH_STOPPED_MMIO_DMA_FREEZE) { 644 /* 645 * If PHB supports compound PE, freeze it for 646 * consistency. 647 */ 648 if (phb->freeze_pe) 649 phb->freeze_pe(phb, pe_no); 650 651 pnv_pci_handle_eeh_config(phb, pe_no); 652 } 653 } 654 655 int pnv_pci_cfg_read(struct pci_dn *pdn, 656 int where, int size, u32 *val) 657 { 658 struct pnv_phb *phb = pdn->phb->private_data; 659 u32 bdfn = (pdn->busno << 8) | pdn->devfn; 660 s64 rc; 661 662 switch (size) { 663 case 1: { 664 u8 v8; 665 rc = opal_pci_config_read_byte(phb->opal_id, bdfn, where, &v8); 666 *val = (rc == OPAL_SUCCESS) ? v8 : 0xff; 667 break; 668 } 669 case 2: { 670 __be16 v16; 671 rc = opal_pci_config_read_half_word(phb->opal_id, bdfn, where, 672 &v16); 673 *val = (rc == OPAL_SUCCESS) ? be16_to_cpu(v16) : 0xffff; 674 break; 675 } 676 case 4: { 677 __be32 v32; 678 rc = opal_pci_config_read_word(phb->opal_id, bdfn, where, &v32); 679 *val = (rc == OPAL_SUCCESS) ? be32_to_cpu(v32) : 0xffffffff; 680 break; 681 } 682 default: 683 return PCIBIOS_FUNC_NOT_SUPPORTED; 684 } 685 686 pr_devel("%s: bus: %x devfn: %x +%x/%x -> %08x\n", 687 __func__, pdn->busno, pdn->devfn, where, size, *val); 688 return PCIBIOS_SUCCESSFUL; 689 } 690 691 int pnv_pci_cfg_write(struct pci_dn *pdn, 692 int where, int size, u32 val) 693 { 694 struct pnv_phb *phb = pdn->phb->private_data; 695 u32 bdfn = (pdn->busno << 8) | pdn->devfn; 696 697 pr_devel("%s: bus: %x devfn: %x +%x/%x -> %08x\n", 698 __func__, pdn->busno, pdn->devfn, where, size, val); 699 switch (size) { 700 case 1: 701 opal_pci_config_write_byte(phb->opal_id, bdfn, where, val); 702 break; 703 case 2: 704 opal_pci_config_write_half_word(phb->opal_id, bdfn, where, val); 705 break; 706 case 4: 707 opal_pci_config_write_word(phb->opal_id, bdfn, where, val); 708 break; 709 default: 710 return PCIBIOS_FUNC_NOT_SUPPORTED; 711 } 712 713 return PCIBIOS_SUCCESSFUL; 714 } 715 716 #if CONFIG_EEH 717 static bool pnv_pci_cfg_check(struct pci_dn *pdn) 718 { 719 struct eeh_dev *edev = NULL; 720 struct pnv_phb *phb = pdn->phb->private_data; 721 722 /* EEH not enabled ? */ 723 if (!(phb->flags & PNV_PHB_FLAG_EEH)) 724 return true; 725 726 /* PE reset or device removed ? */ 727 edev = pdn->edev; 728 if (edev) { 729 if (edev->pe && 730 (edev->pe->state & EEH_PE_CFG_BLOCKED)) 731 return false; 732 733 if (edev->mode & EEH_DEV_REMOVED) 734 return false; 735 } 736 737 return true; 738 } 739 #else 740 static inline pnv_pci_cfg_check(struct pci_dn *pdn) 741 { 742 return true; 743 } 744 #endif /* CONFIG_EEH */ 745 746 static int pnv_pci_read_config(struct pci_bus *bus, 747 unsigned int devfn, 748 int where, int size, u32 *val) 749 { 750 struct pci_dn *pdn; 751 struct pnv_phb *phb; 752 int ret; 753 754 *val = 0xFFFFFFFF; 755 pdn = pci_get_pdn_by_devfn(bus, devfn); 756 if (!pdn) 757 return PCIBIOS_DEVICE_NOT_FOUND; 758 759 if (!pnv_pci_cfg_check(pdn)) 760 return PCIBIOS_DEVICE_NOT_FOUND; 761 762 ret = pnv_pci_cfg_read(pdn, where, size, val); 763 phb = pdn->phb->private_data; 764 if (phb->flags & PNV_PHB_FLAG_EEH && pdn->edev) { 765 if (*val == EEH_IO_ERROR_VALUE(size) && 766 eeh_dev_check_failure(pdn->edev)) 767 return PCIBIOS_DEVICE_NOT_FOUND; 768 } else { 769 pnv_pci_config_check_eeh(pdn); 770 } 771 772 return ret; 773 } 774 775 static int pnv_pci_write_config(struct pci_bus *bus, 776 unsigned int devfn, 777 int where, int size, u32 val) 778 { 779 struct pci_dn *pdn; 780 struct pnv_phb *phb; 781 int ret; 782 783 pdn = pci_get_pdn_by_devfn(bus, devfn); 784 if (!pdn) 785 return PCIBIOS_DEVICE_NOT_FOUND; 786 787 if (!pnv_pci_cfg_check(pdn)) 788 return PCIBIOS_DEVICE_NOT_FOUND; 789 790 ret = pnv_pci_cfg_write(pdn, where, size, val); 791 phb = pdn->phb->private_data; 792 if (!(phb->flags & PNV_PHB_FLAG_EEH)) 793 pnv_pci_config_check_eeh(pdn); 794 795 return ret; 796 } 797 798 struct pci_ops pnv_pci_ops = { 799 .read = pnv_pci_read_config, 800 .write = pnv_pci_write_config, 801 }; 802 803 struct iommu_table *pnv_pci_table_alloc(int nid) 804 { 805 struct iommu_table *tbl; 806 807 tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, nid); 808 if (!tbl) 809 return NULL; 810 811 INIT_LIST_HEAD_RCU(&tbl->it_group_list); 812 kref_init(&tbl->it_kref); 813 814 return tbl; 815 } 816 817 struct device_node *pnv_pci_get_phb_node(struct pci_dev *dev) 818 { 819 struct pci_controller *hose = pci_bus_to_host(dev->bus); 820 821 return of_node_get(hose->dn); 822 } 823 EXPORT_SYMBOL(pnv_pci_get_phb_node); 824 825 int pnv_pci_set_tunnel_bar(struct pci_dev *dev, u64 addr, int enable) 826 { 827 __be64 val; 828 struct pci_controller *hose; 829 struct pnv_phb *phb; 830 u64 tunnel_bar; 831 int rc; 832 833 if (!opal_check_token(OPAL_PCI_GET_PBCQ_TUNNEL_BAR)) 834 return -ENXIO; 835 if (!opal_check_token(OPAL_PCI_SET_PBCQ_TUNNEL_BAR)) 836 return -ENXIO; 837 838 hose = pci_bus_to_host(dev->bus); 839 phb = hose->private_data; 840 841 mutex_lock(&tunnel_mutex); 842 rc = opal_pci_get_pbcq_tunnel_bar(phb->opal_id, &val); 843 if (rc != OPAL_SUCCESS) { 844 rc = -EIO; 845 goto out; 846 } 847 tunnel_bar = be64_to_cpu(val); 848 if (enable) { 849 /* 850 * Only one device per PHB can use atomics. 851 * Our policy is first-come, first-served. 852 */ 853 if (tunnel_bar) { 854 if (tunnel_bar != addr) 855 rc = -EBUSY; 856 else 857 rc = 0; /* Setting same address twice is ok */ 858 goto out; 859 } 860 } else { 861 /* 862 * The device that owns atomics and wants to release 863 * them must pass the same address with enable == 0. 864 */ 865 if (tunnel_bar != addr) { 866 rc = -EPERM; 867 goto out; 868 } 869 addr = 0x0ULL; 870 } 871 rc = opal_pci_set_pbcq_tunnel_bar(phb->opal_id, addr); 872 rc = opal_error_code(rc); 873 out: 874 mutex_unlock(&tunnel_mutex); 875 return rc; 876 } 877 EXPORT_SYMBOL_GPL(pnv_pci_set_tunnel_bar); 878 879 void pnv_pci_shutdown(void) 880 { 881 struct pci_controller *hose; 882 883 list_for_each_entry(hose, &hose_list, list_node) 884 if (hose->controller_ops.shutdown) 885 hose->controller_ops.shutdown(hose); 886 } 887 888 /* Fixup wrong class code in p7ioc and p8 root complex */ 889 static void pnv_p7ioc_rc_quirk(struct pci_dev *dev) 890 { 891 dev->class = PCI_CLASS_BRIDGE_PCI << 8; 892 } 893 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_IBM, 0x3b9, pnv_p7ioc_rc_quirk); 894 895 void __init pnv_pci_init(void) 896 { 897 struct device_node *np; 898 899 pci_add_flags(PCI_CAN_SKIP_ISA_ALIGN); 900 901 /* If we don't have OPAL, eg. in sim, just skip PCI probe */ 902 if (!firmware_has_feature(FW_FEATURE_OPAL)) 903 return; 904 905 #ifdef CONFIG_PCIEPORTBUS 906 /* 907 * On PowerNV PCIe devices are (currently) managed in cooperation 908 * with firmware. This isn't *strictly* required, but there's enough 909 * assumptions baked into both firmware and the platform code that 910 * it's unwise to allow the portbus services to be used. 911 * 912 * We need to fix this eventually, but for now set this flag to disable 913 * the portbus driver. The AER service isn't required since that AER 914 * events are handled via EEH. The pciehp hotplug driver can't work 915 * without kernel changes (and portbus binding breaks pnv_php). The 916 * other services also require some thinking about how we're going 917 * to integrate them. 918 */ 919 pcie_ports_disabled = true; 920 #endif 921 922 /* Look for IODA IO-Hubs. */ 923 for_each_compatible_node(np, NULL, "ibm,ioda-hub") { 924 pnv_pci_init_ioda_hub(np); 925 } 926 927 /* Look for ioda2 built-in PHB3's */ 928 for_each_compatible_node(np, NULL, "ibm,ioda2-phb") 929 pnv_pci_init_ioda2_phb(np); 930 931 /* Look for ioda3 built-in PHB4's, we treat them as IODA2 */ 932 for_each_compatible_node(np, NULL, "ibm,ioda3-phb") 933 pnv_pci_init_ioda2_phb(np); 934 935 /* Look for NPU PHBs */ 936 for_each_compatible_node(np, NULL, "ibm,ioda2-npu-phb") 937 pnv_pci_init_npu_phb(np); 938 939 /* 940 * Look for NPU2 PHBs which we treat mostly as NPU PHBs with 941 * the exception of TCE kill which requires an OPAL call. 942 */ 943 for_each_compatible_node(np, NULL, "ibm,ioda2-npu2-phb") 944 pnv_pci_init_npu_phb(np); 945 946 /* Look for NPU2 OpenCAPI PHBs */ 947 for_each_compatible_node(np, NULL, "ibm,ioda2-npu2-opencapi-phb") 948 pnv_pci_init_npu2_opencapi_phb(np); 949 950 /* Configure IOMMU DMA hooks */ 951 set_pci_dma_ops(&dma_iommu_ops); 952 } 953 954 static int pnv_tce_iommu_bus_notifier(struct notifier_block *nb, 955 unsigned long action, void *data) 956 { 957 struct device *dev = data; 958 959 switch (action) { 960 case BUS_NOTIFY_DEL_DEVICE: 961 iommu_del_device(dev); 962 return 0; 963 default: 964 return 0; 965 } 966 } 967 968 static struct notifier_block pnv_tce_iommu_bus_nb = { 969 .notifier_call = pnv_tce_iommu_bus_notifier, 970 }; 971 972 static int __init pnv_tce_iommu_bus_notifier_init(void) 973 { 974 bus_register_notifier(&pci_bus_type, &pnv_tce_iommu_bus_nb); 975 return 0; 976 } 977 machine_subsys_initcall_sync(powernv, pnv_tce_iommu_bus_notifier_init); 978