1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Support PCI/PCIe on PowerNV platforms 4 * 5 * Copyright 2011 Benjamin Herrenschmidt, IBM Corp. 6 */ 7 8 #include <linux/kernel.h> 9 #include <linux/pci.h> 10 #include <linux/delay.h> 11 #include <linux/string.h> 12 #include <linux/init.h> 13 #include <linux/irq.h> 14 #include <linux/io.h> 15 #include <linux/msi.h> 16 #include <linux/iommu.h> 17 #include <linux/sched/mm.h> 18 19 #include <asm/sections.h> 20 #include <asm/io.h> 21 #include <asm/prom.h> 22 #include <asm/pci-bridge.h> 23 #include <asm/machdep.h> 24 #include <asm/msi_bitmap.h> 25 #include <asm/ppc-pci.h> 26 #include <asm/pnv-pci.h> 27 #include <asm/opal.h> 28 #include <asm/iommu.h> 29 #include <asm/tce.h> 30 #include <asm/firmware.h> 31 #include <asm/eeh_event.h> 32 #include <asm/eeh.h> 33 34 #include "powernv.h" 35 #include "pci.h" 36 37 static DEFINE_MUTEX(p2p_mutex); 38 static DEFINE_MUTEX(tunnel_mutex); 39 40 int pnv_pci_get_slot_id(struct device_node *np, uint64_t *id) 41 { 42 struct device_node *parent = np; 43 u32 bdfn; 44 u64 phbid; 45 int ret; 46 47 ret = of_property_read_u32(np, "reg", &bdfn); 48 if (ret) 49 return -ENXIO; 50 51 bdfn = ((bdfn & 0x00ffff00) >> 8); 52 while ((parent = of_get_parent(parent))) { 53 if (!PCI_DN(parent)) { 54 of_node_put(parent); 55 break; 56 } 57 58 if (!of_device_is_compatible(parent, "ibm,ioda2-phb")) { 59 of_node_put(parent); 60 continue; 61 } 62 63 ret = of_property_read_u64(parent, "ibm,opal-phbid", &phbid); 64 if (ret) { 65 of_node_put(parent); 66 return -ENXIO; 67 } 68 69 *id = PCI_SLOT_ID(phbid, bdfn); 70 return 0; 71 } 72 73 return -ENODEV; 74 } 75 EXPORT_SYMBOL_GPL(pnv_pci_get_slot_id); 76 77 int pnv_pci_get_device_tree(uint32_t phandle, void *buf, uint64_t len) 78 { 79 int64_t rc; 80 81 if (!opal_check_token(OPAL_GET_DEVICE_TREE)) 82 return -ENXIO; 83 84 rc = opal_get_device_tree(phandle, (uint64_t)buf, len); 85 if (rc < OPAL_SUCCESS) 86 return -EIO; 87 88 return rc; 89 } 90 EXPORT_SYMBOL_GPL(pnv_pci_get_device_tree); 91 92 int pnv_pci_get_presence_state(uint64_t id, uint8_t *state) 93 { 94 int64_t rc; 95 96 if (!opal_check_token(OPAL_PCI_GET_PRESENCE_STATE)) 97 return -ENXIO; 98 99 rc = opal_pci_get_presence_state(id, (uint64_t)state); 100 if (rc != OPAL_SUCCESS) 101 return -EIO; 102 103 return 0; 104 } 105 EXPORT_SYMBOL_GPL(pnv_pci_get_presence_state); 106 107 int pnv_pci_get_power_state(uint64_t id, uint8_t *state) 108 { 109 int64_t rc; 110 111 if (!opal_check_token(OPAL_PCI_GET_POWER_STATE)) 112 return -ENXIO; 113 114 rc = opal_pci_get_power_state(id, (uint64_t)state); 115 if (rc != OPAL_SUCCESS) 116 return -EIO; 117 118 return 0; 119 } 120 EXPORT_SYMBOL_GPL(pnv_pci_get_power_state); 121 122 int pnv_pci_set_power_state(uint64_t id, uint8_t state, struct opal_msg *msg) 123 { 124 struct opal_msg m; 125 int token, ret; 126 int64_t rc; 127 128 if (!opal_check_token(OPAL_PCI_SET_POWER_STATE)) 129 return -ENXIO; 130 131 token = opal_async_get_token_interruptible(); 132 if (unlikely(token < 0)) 133 return token; 134 135 rc = opal_pci_set_power_state(token, id, (uint64_t)&state); 136 if (rc == OPAL_SUCCESS) { 137 ret = 0; 138 goto exit; 139 } else if (rc != OPAL_ASYNC_COMPLETION) { 140 ret = -EIO; 141 goto exit; 142 } 143 144 ret = opal_async_wait_response(token, &m); 145 if (ret < 0) 146 goto exit; 147 148 if (msg) { 149 ret = 1; 150 memcpy(msg, &m, sizeof(m)); 151 } 152 153 exit: 154 opal_async_release_token(token); 155 return ret; 156 } 157 EXPORT_SYMBOL_GPL(pnv_pci_set_power_state); 158 159 int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) 160 { 161 struct pci_controller *hose = pci_bus_to_host(pdev->bus); 162 struct pnv_phb *phb = hose->private_data; 163 struct msi_desc *entry; 164 struct msi_msg msg; 165 int hwirq; 166 unsigned int virq; 167 int rc; 168 169 if (WARN_ON(!phb) || !phb->msi_bmp.bitmap) 170 return -ENODEV; 171 172 if (pdev->no_64bit_msi && !phb->msi32_support) 173 return -ENODEV; 174 175 for_each_pci_msi_entry(entry, pdev) { 176 if (!entry->msi_attrib.is_64 && !phb->msi32_support) { 177 pr_warn("%s: Supports only 64-bit MSIs\n", 178 pci_name(pdev)); 179 return -ENXIO; 180 } 181 hwirq = msi_bitmap_alloc_hwirqs(&phb->msi_bmp, 1); 182 if (hwirq < 0) { 183 pr_warn("%s: Failed to find a free MSI\n", 184 pci_name(pdev)); 185 return -ENOSPC; 186 } 187 virq = irq_create_mapping(NULL, phb->msi_base + hwirq); 188 if (!virq) { 189 pr_warn("%s: Failed to map MSI to linux irq\n", 190 pci_name(pdev)); 191 msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq, 1); 192 return -ENOMEM; 193 } 194 rc = phb->msi_setup(phb, pdev, phb->msi_base + hwirq, 195 virq, entry->msi_attrib.is_64, &msg); 196 if (rc) { 197 pr_warn("%s: Failed to setup MSI\n", pci_name(pdev)); 198 irq_dispose_mapping(virq); 199 msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq, 1); 200 return rc; 201 } 202 irq_set_msi_desc(virq, entry); 203 pci_write_msi_msg(virq, &msg); 204 } 205 return 0; 206 } 207 208 void pnv_teardown_msi_irqs(struct pci_dev *pdev) 209 { 210 struct pci_controller *hose = pci_bus_to_host(pdev->bus); 211 struct pnv_phb *phb = hose->private_data; 212 struct msi_desc *entry; 213 irq_hw_number_t hwirq; 214 215 if (WARN_ON(!phb)) 216 return; 217 218 for_each_pci_msi_entry(entry, pdev) { 219 if (!entry->irq) 220 continue; 221 hwirq = virq_to_hw(entry->irq); 222 irq_set_msi_desc(entry->irq, NULL); 223 irq_dispose_mapping(entry->irq); 224 msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq - phb->msi_base, 1); 225 } 226 } 227 228 /* Nicely print the contents of the PE State Tables (PEST). */ 229 static void pnv_pci_dump_pest(__be64 pestA[], __be64 pestB[], int pest_size) 230 { 231 __be64 prevA = ULONG_MAX, prevB = ULONG_MAX; 232 bool dup = false; 233 int i; 234 235 for (i = 0; i < pest_size; i++) { 236 __be64 peA = be64_to_cpu(pestA[i]); 237 __be64 peB = be64_to_cpu(pestB[i]); 238 239 if (peA != prevA || peB != prevB) { 240 if (dup) { 241 pr_info("PE[..%03x] A/B: as above\n", i-1); 242 dup = false; 243 } 244 prevA = peA; 245 prevB = peB; 246 if (peA & PNV_IODA_STOPPED_STATE || 247 peB & PNV_IODA_STOPPED_STATE) 248 pr_info("PE[%03x] A/B: %016llx %016llx\n", 249 i, peA, peB); 250 } else if (!dup && (peA & PNV_IODA_STOPPED_STATE || 251 peB & PNV_IODA_STOPPED_STATE)) { 252 dup = true; 253 } 254 } 255 } 256 257 static void pnv_pci_dump_p7ioc_diag_data(struct pci_controller *hose, 258 struct OpalIoPhbErrorCommon *common) 259 { 260 struct OpalIoP7IOCPhbErrorData *data; 261 262 data = (struct OpalIoP7IOCPhbErrorData *)common; 263 pr_info("P7IOC PHB#%x Diag-data (Version: %d)\n", 264 hose->global_number, be32_to_cpu(common->version)); 265 266 if (data->brdgCtl) 267 pr_info("brdgCtl: %08x\n", 268 be32_to_cpu(data->brdgCtl)); 269 if (data->portStatusReg || data->rootCmplxStatus || 270 data->busAgentStatus) 271 pr_info("UtlSts: %08x %08x %08x\n", 272 be32_to_cpu(data->portStatusReg), 273 be32_to_cpu(data->rootCmplxStatus), 274 be32_to_cpu(data->busAgentStatus)); 275 if (data->deviceStatus || data->slotStatus || 276 data->linkStatus || data->devCmdStatus || 277 data->devSecStatus) 278 pr_info("RootSts: %08x %08x %08x %08x %08x\n", 279 be32_to_cpu(data->deviceStatus), 280 be32_to_cpu(data->slotStatus), 281 be32_to_cpu(data->linkStatus), 282 be32_to_cpu(data->devCmdStatus), 283 be32_to_cpu(data->devSecStatus)); 284 if (data->rootErrorStatus || data->uncorrErrorStatus || 285 data->corrErrorStatus) 286 pr_info("RootErrSts: %08x %08x %08x\n", 287 be32_to_cpu(data->rootErrorStatus), 288 be32_to_cpu(data->uncorrErrorStatus), 289 be32_to_cpu(data->corrErrorStatus)); 290 if (data->tlpHdr1 || data->tlpHdr2 || 291 data->tlpHdr3 || data->tlpHdr4) 292 pr_info("RootErrLog: %08x %08x %08x %08x\n", 293 be32_to_cpu(data->tlpHdr1), 294 be32_to_cpu(data->tlpHdr2), 295 be32_to_cpu(data->tlpHdr3), 296 be32_to_cpu(data->tlpHdr4)); 297 if (data->sourceId || data->errorClass || 298 data->correlator) 299 pr_info("RootErrLog1: %08x %016llx %016llx\n", 300 be32_to_cpu(data->sourceId), 301 be64_to_cpu(data->errorClass), 302 be64_to_cpu(data->correlator)); 303 if (data->p7iocPlssr || data->p7iocCsr) 304 pr_info("PhbSts: %016llx %016llx\n", 305 be64_to_cpu(data->p7iocPlssr), 306 be64_to_cpu(data->p7iocCsr)); 307 if (data->lemFir) 308 pr_info("Lem: %016llx %016llx %016llx\n", 309 be64_to_cpu(data->lemFir), 310 be64_to_cpu(data->lemErrorMask), 311 be64_to_cpu(data->lemWOF)); 312 if (data->phbErrorStatus) 313 pr_info("PhbErr: %016llx %016llx %016llx %016llx\n", 314 be64_to_cpu(data->phbErrorStatus), 315 be64_to_cpu(data->phbFirstErrorStatus), 316 be64_to_cpu(data->phbErrorLog0), 317 be64_to_cpu(data->phbErrorLog1)); 318 if (data->mmioErrorStatus) 319 pr_info("OutErr: %016llx %016llx %016llx %016llx\n", 320 be64_to_cpu(data->mmioErrorStatus), 321 be64_to_cpu(data->mmioFirstErrorStatus), 322 be64_to_cpu(data->mmioErrorLog0), 323 be64_to_cpu(data->mmioErrorLog1)); 324 if (data->dma0ErrorStatus) 325 pr_info("InAErr: %016llx %016llx %016llx %016llx\n", 326 be64_to_cpu(data->dma0ErrorStatus), 327 be64_to_cpu(data->dma0FirstErrorStatus), 328 be64_to_cpu(data->dma0ErrorLog0), 329 be64_to_cpu(data->dma0ErrorLog1)); 330 if (data->dma1ErrorStatus) 331 pr_info("InBErr: %016llx %016llx %016llx %016llx\n", 332 be64_to_cpu(data->dma1ErrorStatus), 333 be64_to_cpu(data->dma1FirstErrorStatus), 334 be64_to_cpu(data->dma1ErrorLog0), 335 be64_to_cpu(data->dma1ErrorLog1)); 336 337 pnv_pci_dump_pest(data->pestA, data->pestB, OPAL_P7IOC_NUM_PEST_REGS); 338 } 339 340 static void pnv_pci_dump_phb3_diag_data(struct pci_controller *hose, 341 struct OpalIoPhbErrorCommon *common) 342 { 343 struct OpalIoPhb3ErrorData *data; 344 345 data = (struct OpalIoPhb3ErrorData*)common; 346 pr_info("PHB3 PHB#%x Diag-data (Version: %d)\n", 347 hose->global_number, be32_to_cpu(common->version)); 348 if (data->brdgCtl) 349 pr_info("brdgCtl: %08x\n", 350 be32_to_cpu(data->brdgCtl)); 351 if (data->portStatusReg || data->rootCmplxStatus || 352 data->busAgentStatus) 353 pr_info("UtlSts: %08x %08x %08x\n", 354 be32_to_cpu(data->portStatusReg), 355 be32_to_cpu(data->rootCmplxStatus), 356 be32_to_cpu(data->busAgentStatus)); 357 if (data->deviceStatus || data->slotStatus || 358 data->linkStatus || data->devCmdStatus || 359 data->devSecStatus) 360 pr_info("RootSts: %08x %08x %08x %08x %08x\n", 361 be32_to_cpu(data->deviceStatus), 362 be32_to_cpu(data->slotStatus), 363 be32_to_cpu(data->linkStatus), 364 be32_to_cpu(data->devCmdStatus), 365 be32_to_cpu(data->devSecStatus)); 366 if (data->rootErrorStatus || data->uncorrErrorStatus || 367 data->corrErrorStatus) 368 pr_info("RootErrSts: %08x %08x %08x\n", 369 be32_to_cpu(data->rootErrorStatus), 370 be32_to_cpu(data->uncorrErrorStatus), 371 be32_to_cpu(data->corrErrorStatus)); 372 if (data->tlpHdr1 || data->tlpHdr2 || 373 data->tlpHdr3 || data->tlpHdr4) 374 pr_info("RootErrLog: %08x %08x %08x %08x\n", 375 be32_to_cpu(data->tlpHdr1), 376 be32_to_cpu(data->tlpHdr2), 377 be32_to_cpu(data->tlpHdr3), 378 be32_to_cpu(data->tlpHdr4)); 379 if (data->sourceId || data->errorClass || 380 data->correlator) 381 pr_info("RootErrLog1: %08x %016llx %016llx\n", 382 be32_to_cpu(data->sourceId), 383 be64_to_cpu(data->errorClass), 384 be64_to_cpu(data->correlator)); 385 if (data->nFir) 386 pr_info("nFir: %016llx %016llx %016llx\n", 387 be64_to_cpu(data->nFir), 388 be64_to_cpu(data->nFirMask), 389 be64_to_cpu(data->nFirWOF)); 390 if (data->phbPlssr || data->phbCsr) 391 pr_info("PhbSts: %016llx %016llx\n", 392 be64_to_cpu(data->phbPlssr), 393 be64_to_cpu(data->phbCsr)); 394 if (data->lemFir) 395 pr_info("Lem: %016llx %016llx %016llx\n", 396 be64_to_cpu(data->lemFir), 397 be64_to_cpu(data->lemErrorMask), 398 be64_to_cpu(data->lemWOF)); 399 if (data->phbErrorStatus) 400 pr_info("PhbErr: %016llx %016llx %016llx %016llx\n", 401 be64_to_cpu(data->phbErrorStatus), 402 be64_to_cpu(data->phbFirstErrorStatus), 403 be64_to_cpu(data->phbErrorLog0), 404 be64_to_cpu(data->phbErrorLog1)); 405 if (data->mmioErrorStatus) 406 pr_info("OutErr: %016llx %016llx %016llx %016llx\n", 407 be64_to_cpu(data->mmioErrorStatus), 408 be64_to_cpu(data->mmioFirstErrorStatus), 409 be64_to_cpu(data->mmioErrorLog0), 410 be64_to_cpu(data->mmioErrorLog1)); 411 if (data->dma0ErrorStatus) 412 pr_info("InAErr: %016llx %016llx %016llx %016llx\n", 413 be64_to_cpu(data->dma0ErrorStatus), 414 be64_to_cpu(data->dma0FirstErrorStatus), 415 be64_to_cpu(data->dma0ErrorLog0), 416 be64_to_cpu(data->dma0ErrorLog1)); 417 if (data->dma1ErrorStatus) 418 pr_info("InBErr: %016llx %016llx %016llx %016llx\n", 419 be64_to_cpu(data->dma1ErrorStatus), 420 be64_to_cpu(data->dma1FirstErrorStatus), 421 be64_to_cpu(data->dma1ErrorLog0), 422 be64_to_cpu(data->dma1ErrorLog1)); 423 424 pnv_pci_dump_pest(data->pestA, data->pestB, OPAL_PHB3_NUM_PEST_REGS); 425 } 426 427 static void pnv_pci_dump_phb4_diag_data(struct pci_controller *hose, 428 struct OpalIoPhbErrorCommon *common) 429 { 430 struct OpalIoPhb4ErrorData *data; 431 432 data = (struct OpalIoPhb4ErrorData*)common; 433 pr_info("PHB4 PHB#%d Diag-data (Version: %d)\n", 434 hose->global_number, be32_to_cpu(common->version)); 435 if (data->brdgCtl) 436 pr_info("brdgCtl: %08x\n", 437 be32_to_cpu(data->brdgCtl)); 438 if (data->deviceStatus || data->slotStatus || 439 data->linkStatus || data->devCmdStatus || 440 data->devSecStatus) 441 pr_info("RootSts: %08x %08x %08x %08x %08x\n", 442 be32_to_cpu(data->deviceStatus), 443 be32_to_cpu(data->slotStatus), 444 be32_to_cpu(data->linkStatus), 445 be32_to_cpu(data->devCmdStatus), 446 be32_to_cpu(data->devSecStatus)); 447 if (data->rootErrorStatus || data->uncorrErrorStatus || 448 data->corrErrorStatus) 449 pr_info("RootErrSts: %08x %08x %08x\n", 450 be32_to_cpu(data->rootErrorStatus), 451 be32_to_cpu(data->uncorrErrorStatus), 452 be32_to_cpu(data->corrErrorStatus)); 453 if (data->tlpHdr1 || data->tlpHdr2 || 454 data->tlpHdr3 || data->tlpHdr4) 455 pr_info("RootErrLog: %08x %08x %08x %08x\n", 456 be32_to_cpu(data->tlpHdr1), 457 be32_to_cpu(data->tlpHdr2), 458 be32_to_cpu(data->tlpHdr3), 459 be32_to_cpu(data->tlpHdr4)); 460 if (data->sourceId) 461 pr_info("sourceId: %08x\n", be32_to_cpu(data->sourceId)); 462 if (data->nFir) 463 pr_info("nFir: %016llx %016llx %016llx\n", 464 be64_to_cpu(data->nFir), 465 be64_to_cpu(data->nFirMask), 466 be64_to_cpu(data->nFirWOF)); 467 if (data->phbPlssr || data->phbCsr) 468 pr_info("PhbSts: %016llx %016llx\n", 469 be64_to_cpu(data->phbPlssr), 470 be64_to_cpu(data->phbCsr)); 471 if (data->lemFir) 472 pr_info("Lem: %016llx %016llx %016llx\n", 473 be64_to_cpu(data->lemFir), 474 be64_to_cpu(data->lemErrorMask), 475 be64_to_cpu(data->lemWOF)); 476 if (data->phbErrorStatus) 477 pr_info("PhbErr: %016llx %016llx %016llx %016llx\n", 478 be64_to_cpu(data->phbErrorStatus), 479 be64_to_cpu(data->phbFirstErrorStatus), 480 be64_to_cpu(data->phbErrorLog0), 481 be64_to_cpu(data->phbErrorLog1)); 482 if (data->phbTxeErrorStatus) 483 pr_info("PhbTxeErr: %016llx %016llx %016llx %016llx\n", 484 be64_to_cpu(data->phbTxeErrorStatus), 485 be64_to_cpu(data->phbTxeFirstErrorStatus), 486 be64_to_cpu(data->phbTxeErrorLog0), 487 be64_to_cpu(data->phbTxeErrorLog1)); 488 if (data->phbRxeArbErrorStatus) 489 pr_info("RxeArbErr: %016llx %016llx %016llx %016llx\n", 490 be64_to_cpu(data->phbRxeArbErrorStatus), 491 be64_to_cpu(data->phbRxeArbFirstErrorStatus), 492 be64_to_cpu(data->phbRxeArbErrorLog0), 493 be64_to_cpu(data->phbRxeArbErrorLog1)); 494 if (data->phbRxeMrgErrorStatus) 495 pr_info("RxeMrgErr: %016llx %016llx %016llx %016llx\n", 496 be64_to_cpu(data->phbRxeMrgErrorStatus), 497 be64_to_cpu(data->phbRxeMrgFirstErrorStatus), 498 be64_to_cpu(data->phbRxeMrgErrorLog0), 499 be64_to_cpu(data->phbRxeMrgErrorLog1)); 500 if (data->phbRxeTceErrorStatus) 501 pr_info("RxeTceErr: %016llx %016llx %016llx %016llx\n", 502 be64_to_cpu(data->phbRxeTceErrorStatus), 503 be64_to_cpu(data->phbRxeTceFirstErrorStatus), 504 be64_to_cpu(data->phbRxeTceErrorLog0), 505 be64_to_cpu(data->phbRxeTceErrorLog1)); 506 507 if (data->phbPblErrorStatus) 508 pr_info("PblErr: %016llx %016llx %016llx %016llx\n", 509 be64_to_cpu(data->phbPblErrorStatus), 510 be64_to_cpu(data->phbPblFirstErrorStatus), 511 be64_to_cpu(data->phbPblErrorLog0), 512 be64_to_cpu(data->phbPblErrorLog1)); 513 if (data->phbPcieDlpErrorStatus) 514 pr_info("PcieDlp: %016llx %016llx %016llx\n", 515 be64_to_cpu(data->phbPcieDlpErrorLog1), 516 be64_to_cpu(data->phbPcieDlpErrorLog2), 517 be64_to_cpu(data->phbPcieDlpErrorStatus)); 518 if (data->phbRegbErrorStatus) 519 pr_info("RegbErr: %016llx %016llx %016llx %016llx\n", 520 be64_to_cpu(data->phbRegbErrorStatus), 521 be64_to_cpu(data->phbRegbFirstErrorStatus), 522 be64_to_cpu(data->phbRegbErrorLog0), 523 be64_to_cpu(data->phbRegbErrorLog1)); 524 525 526 pnv_pci_dump_pest(data->pestA, data->pestB, OPAL_PHB4_NUM_PEST_REGS); 527 } 528 529 void pnv_pci_dump_phb_diag_data(struct pci_controller *hose, 530 unsigned char *log_buff) 531 { 532 struct OpalIoPhbErrorCommon *common; 533 534 if (!hose || !log_buff) 535 return; 536 537 common = (struct OpalIoPhbErrorCommon *)log_buff; 538 switch (be32_to_cpu(common->ioType)) { 539 case OPAL_PHB_ERROR_DATA_TYPE_P7IOC: 540 pnv_pci_dump_p7ioc_diag_data(hose, common); 541 break; 542 case OPAL_PHB_ERROR_DATA_TYPE_PHB3: 543 pnv_pci_dump_phb3_diag_data(hose, common); 544 break; 545 case OPAL_PHB_ERROR_DATA_TYPE_PHB4: 546 pnv_pci_dump_phb4_diag_data(hose, common); 547 break; 548 default: 549 pr_warn("%s: Unrecognized ioType %d\n", 550 __func__, be32_to_cpu(common->ioType)); 551 } 552 } 553 554 static void pnv_pci_handle_eeh_config(struct pnv_phb *phb, u32 pe_no) 555 { 556 unsigned long flags, rc; 557 int has_diag, ret = 0; 558 559 spin_lock_irqsave(&phb->lock, flags); 560 561 /* Fetch PHB diag-data */ 562 rc = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag_data, 563 phb->diag_data_size); 564 has_diag = (rc == OPAL_SUCCESS); 565 566 /* If PHB supports compound PE, to handle it */ 567 if (phb->unfreeze_pe) { 568 ret = phb->unfreeze_pe(phb, 569 pe_no, 570 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL); 571 } else { 572 rc = opal_pci_eeh_freeze_clear(phb->opal_id, 573 pe_no, 574 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL); 575 if (rc) { 576 pr_warn("%s: Failure %ld clearing frozen " 577 "PHB#%x-PE#%x\n", 578 __func__, rc, phb->hose->global_number, 579 pe_no); 580 ret = -EIO; 581 } 582 } 583 584 /* 585 * For now, let's only display the diag buffer when we fail to clear 586 * the EEH status. We'll do more sensible things later when we have 587 * proper EEH support. We need to make sure we don't pollute ourselves 588 * with the normal errors generated when probing empty slots 589 */ 590 if (has_diag && ret) 591 pnv_pci_dump_phb_diag_data(phb->hose, phb->diag_data); 592 593 spin_unlock_irqrestore(&phb->lock, flags); 594 } 595 596 static void pnv_pci_config_check_eeh(struct pci_dn *pdn) 597 { 598 struct pnv_phb *phb = pdn->phb->private_data; 599 u8 fstate = 0; 600 __be16 pcierr = 0; 601 unsigned int pe_no; 602 s64 rc; 603 604 /* 605 * Get the PE#. During the PCI probe stage, we might not 606 * setup that yet. So all ER errors should be mapped to 607 * reserved PE. 608 */ 609 pe_no = pdn->pe_number; 610 if (pe_no == IODA_INVALID_PE) { 611 pe_no = phb->ioda.reserved_pe_idx; 612 } 613 614 /* 615 * Fetch frozen state. If the PHB support compound PE, 616 * we need handle that case. 617 */ 618 if (phb->get_pe_state) { 619 fstate = phb->get_pe_state(phb, pe_no); 620 } else { 621 rc = opal_pci_eeh_freeze_status(phb->opal_id, 622 pe_no, 623 &fstate, 624 &pcierr, 625 NULL); 626 if (rc) { 627 pr_warn("%s: Failure %lld getting PHB#%x-PE#%x state\n", 628 __func__, rc, phb->hose->global_number, pe_no); 629 return; 630 } 631 } 632 633 pr_devel(" -> EEH check, bdfn=%04x PE#%x fstate=%x\n", 634 (pdn->busno << 8) | (pdn->devfn), pe_no, fstate); 635 636 /* Clear the frozen state if applicable */ 637 if (fstate == OPAL_EEH_STOPPED_MMIO_FREEZE || 638 fstate == OPAL_EEH_STOPPED_DMA_FREEZE || 639 fstate == OPAL_EEH_STOPPED_MMIO_DMA_FREEZE) { 640 /* 641 * If PHB supports compound PE, freeze it for 642 * consistency. 643 */ 644 if (phb->freeze_pe) 645 phb->freeze_pe(phb, pe_no); 646 647 pnv_pci_handle_eeh_config(phb, pe_no); 648 } 649 } 650 651 int pnv_pci_cfg_read(struct pci_dn *pdn, 652 int where, int size, u32 *val) 653 { 654 struct pnv_phb *phb = pdn->phb->private_data; 655 u32 bdfn = (pdn->busno << 8) | pdn->devfn; 656 s64 rc; 657 658 switch (size) { 659 case 1: { 660 u8 v8; 661 rc = opal_pci_config_read_byte(phb->opal_id, bdfn, where, &v8); 662 *val = (rc == OPAL_SUCCESS) ? v8 : 0xff; 663 break; 664 } 665 case 2: { 666 __be16 v16; 667 rc = opal_pci_config_read_half_word(phb->opal_id, bdfn, where, 668 &v16); 669 *val = (rc == OPAL_SUCCESS) ? be16_to_cpu(v16) : 0xffff; 670 break; 671 } 672 case 4: { 673 __be32 v32; 674 rc = opal_pci_config_read_word(phb->opal_id, bdfn, where, &v32); 675 *val = (rc == OPAL_SUCCESS) ? be32_to_cpu(v32) : 0xffffffff; 676 break; 677 } 678 default: 679 return PCIBIOS_FUNC_NOT_SUPPORTED; 680 } 681 682 pr_devel("%s: bus: %x devfn: %x +%x/%x -> %08x\n", 683 __func__, pdn->busno, pdn->devfn, where, size, *val); 684 return PCIBIOS_SUCCESSFUL; 685 } 686 687 int pnv_pci_cfg_write(struct pci_dn *pdn, 688 int where, int size, u32 val) 689 { 690 struct pnv_phb *phb = pdn->phb->private_data; 691 u32 bdfn = (pdn->busno << 8) | pdn->devfn; 692 693 pr_devel("%s: bus: %x devfn: %x +%x/%x -> %08x\n", 694 __func__, pdn->busno, pdn->devfn, where, size, val); 695 switch (size) { 696 case 1: 697 opal_pci_config_write_byte(phb->opal_id, bdfn, where, val); 698 break; 699 case 2: 700 opal_pci_config_write_half_word(phb->opal_id, bdfn, where, val); 701 break; 702 case 4: 703 opal_pci_config_write_word(phb->opal_id, bdfn, where, val); 704 break; 705 default: 706 return PCIBIOS_FUNC_NOT_SUPPORTED; 707 } 708 709 return PCIBIOS_SUCCESSFUL; 710 } 711 712 #if CONFIG_EEH 713 static bool pnv_pci_cfg_check(struct pci_dn *pdn) 714 { 715 struct eeh_dev *edev = NULL; 716 struct pnv_phb *phb = pdn->phb->private_data; 717 718 /* EEH not enabled ? */ 719 if (!(phb->flags & PNV_PHB_FLAG_EEH)) 720 return true; 721 722 /* PE reset or device removed ? */ 723 edev = pdn->edev; 724 if (edev) { 725 if (edev->pe && 726 (edev->pe->state & EEH_PE_CFG_BLOCKED)) 727 return false; 728 729 if (edev->mode & EEH_DEV_REMOVED) 730 return false; 731 } 732 733 return true; 734 } 735 #else 736 static inline pnv_pci_cfg_check(struct pci_dn *pdn) 737 { 738 return true; 739 } 740 #endif /* CONFIG_EEH */ 741 742 static int pnv_pci_read_config(struct pci_bus *bus, 743 unsigned int devfn, 744 int where, int size, u32 *val) 745 { 746 struct pci_dn *pdn; 747 struct pnv_phb *phb; 748 int ret; 749 750 *val = 0xFFFFFFFF; 751 pdn = pci_get_pdn_by_devfn(bus, devfn); 752 if (!pdn) 753 return PCIBIOS_DEVICE_NOT_FOUND; 754 755 if (!pnv_pci_cfg_check(pdn)) 756 return PCIBIOS_DEVICE_NOT_FOUND; 757 758 ret = pnv_pci_cfg_read(pdn, where, size, val); 759 phb = pdn->phb->private_data; 760 if (phb->flags & PNV_PHB_FLAG_EEH && pdn->edev) { 761 if (*val == EEH_IO_ERROR_VALUE(size) && 762 eeh_dev_check_failure(pdn->edev)) 763 return PCIBIOS_DEVICE_NOT_FOUND; 764 } else { 765 pnv_pci_config_check_eeh(pdn); 766 } 767 768 return ret; 769 } 770 771 static int pnv_pci_write_config(struct pci_bus *bus, 772 unsigned int devfn, 773 int where, int size, u32 val) 774 { 775 struct pci_dn *pdn; 776 struct pnv_phb *phb; 777 int ret; 778 779 pdn = pci_get_pdn_by_devfn(bus, devfn); 780 if (!pdn) 781 return PCIBIOS_DEVICE_NOT_FOUND; 782 783 if (!pnv_pci_cfg_check(pdn)) 784 return PCIBIOS_DEVICE_NOT_FOUND; 785 786 ret = pnv_pci_cfg_write(pdn, where, size, val); 787 phb = pdn->phb->private_data; 788 if (!(phb->flags & PNV_PHB_FLAG_EEH)) 789 pnv_pci_config_check_eeh(pdn); 790 791 return ret; 792 } 793 794 struct pci_ops pnv_pci_ops = { 795 .read = pnv_pci_read_config, 796 .write = pnv_pci_write_config, 797 }; 798 799 struct iommu_table *pnv_pci_table_alloc(int nid) 800 { 801 struct iommu_table *tbl; 802 803 tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, nid); 804 if (!tbl) 805 return NULL; 806 807 INIT_LIST_HEAD_RCU(&tbl->it_group_list); 808 kref_init(&tbl->it_kref); 809 810 return tbl; 811 } 812 813 void pnv_pci_dma_dev_setup(struct pci_dev *pdev) 814 { 815 struct pci_controller *hose = pci_bus_to_host(pdev->bus); 816 struct pnv_phb *phb = hose->private_data; 817 #ifdef CONFIG_PCI_IOV 818 struct pnv_ioda_pe *pe; 819 struct pci_dn *pdn; 820 821 /* Fix the VF pdn PE number */ 822 if (pdev->is_virtfn) { 823 pdn = pci_get_pdn(pdev); 824 WARN_ON(pdn->pe_number != IODA_INVALID_PE); 825 list_for_each_entry(pe, &phb->ioda.pe_list, list) { 826 if (pe->rid == ((pdev->bus->number << 8) | 827 (pdev->devfn & 0xff))) { 828 pdn->pe_number = pe->pe_number; 829 pe->pdev = pdev; 830 break; 831 } 832 } 833 } 834 #endif /* CONFIG_PCI_IOV */ 835 836 if (phb && phb->dma_dev_setup) 837 phb->dma_dev_setup(phb, pdev); 838 } 839 840 void pnv_pci_dma_bus_setup(struct pci_bus *bus) 841 { 842 struct pci_controller *hose = bus->sysdata; 843 struct pnv_phb *phb = hose->private_data; 844 struct pnv_ioda_pe *pe; 845 846 list_for_each_entry(pe, &phb->ioda.pe_list, list) { 847 if (!(pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))) 848 continue; 849 850 if (!pe->pbus) 851 continue; 852 853 if (bus->number == ((pe->rid >> 8) & 0xFF)) { 854 pe->pbus = bus; 855 break; 856 } 857 } 858 } 859 860 int pnv_pci_set_p2p(struct pci_dev *initiator, struct pci_dev *target, u64 desc) 861 { 862 struct pci_controller *hose; 863 struct pnv_phb *phb_init, *phb_target; 864 struct pnv_ioda_pe *pe_init; 865 int rc; 866 867 if (!opal_check_token(OPAL_PCI_SET_P2P)) 868 return -ENXIO; 869 870 hose = pci_bus_to_host(initiator->bus); 871 phb_init = hose->private_data; 872 873 hose = pci_bus_to_host(target->bus); 874 phb_target = hose->private_data; 875 876 pe_init = pnv_ioda_get_pe(initiator); 877 if (!pe_init) 878 return -ENODEV; 879 880 /* 881 * Configuring the initiator's PHB requires to adjust its 882 * TVE#1 setting. Since the same device can be an initiator 883 * several times for different target devices, we need to keep 884 * a reference count to know when we can restore the default 885 * bypass setting on its TVE#1 when disabling. Opal is not 886 * tracking PE states, so we add a reference count on the PE 887 * in linux. 888 * 889 * For the target, the configuration is per PHB, so we keep a 890 * target reference count on the PHB. 891 */ 892 mutex_lock(&p2p_mutex); 893 894 if (desc & OPAL_PCI_P2P_ENABLE) { 895 /* always go to opal to validate the configuration */ 896 rc = opal_pci_set_p2p(phb_init->opal_id, phb_target->opal_id, 897 desc, pe_init->pe_number); 898 899 if (rc != OPAL_SUCCESS) { 900 rc = -EIO; 901 goto out; 902 } 903 904 pe_init->p2p_initiator_count++; 905 phb_target->p2p_target_count++; 906 } else { 907 if (!pe_init->p2p_initiator_count || 908 !phb_target->p2p_target_count) { 909 rc = -EINVAL; 910 goto out; 911 } 912 913 if (--pe_init->p2p_initiator_count == 0) 914 pnv_pci_ioda2_set_bypass(pe_init, true); 915 916 if (--phb_target->p2p_target_count == 0) { 917 rc = opal_pci_set_p2p(phb_init->opal_id, 918 phb_target->opal_id, desc, 919 pe_init->pe_number); 920 if (rc != OPAL_SUCCESS) { 921 rc = -EIO; 922 goto out; 923 } 924 } 925 } 926 rc = 0; 927 out: 928 mutex_unlock(&p2p_mutex); 929 return rc; 930 } 931 EXPORT_SYMBOL_GPL(pnv_pci_set_p2p); 932 933 struct device_node *pnv_pci_get_phb_node(struct pci_dev *dev) 934 { 935 struct pci_controller *hose = pci_bus_to_host(dev->bus); 936 937 return of_node_get(hose->dn); 938 } 939 EXPORT_SYMBOL(pnv_pci_get_phb_node); 940 941 int pnv_pci_enable_tunnel(struct pci_dev *dev, u64 *asnind) 942 { 943 struct device_node *np; 944 const __be32 *prop; 945 struct pnv_ioda_pe *pe; 946 uint16_t window_id; 947 int rc; 948 949 if (!radix_enabled()) 950 return -ENXIO; 951 952 if (!(np = pnv_pci_get_phb_node(dev))) 953 return -ENXIO; 954 955 prop = of_get_property(np, "ibm,phb-indications", NULL); 956 of_node_put(np); 957 958 if (!prop || !prop[1]) 959 return -ENXIO; 960 961 *asnind = (u64)be32_to_cpu(prop[1]); 962 pe = pnv_ioda_get_pe(dev); 963 if (!pe) 964 return -ENODEV; 965 966 /* Increase real window size to accept as_notify messages. */ 967 window_id = (pe->pe_number << 1 ) + 1; 968 rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id, pe->pe_number, 969 window_id, pe->tce_bypass_base, 970 (uint64_t)1 << 48); 971 return opal_error_code(rc); 972 } 973 EXPORT_SYMBOL_GPL(pnv_pci_enable_tunnel); 974 975 int pnv_pci_disable_tunnel(struct pci_dev *dev) 976 { 977 struct pnv_ioda_pe *pe; 978 979 pe = pnv_ioda_get_pe(dev); 980 if (!pe) 981 return -ENODEV; 982 983 /* Restore default real window size. */ 984 pnv_pci_ioda2_set_bypass(pe, true); 985 return 0; 986 } 987 EXPORT_SYMBOL_GPL(pnv_pci_disable_tunnel); 988 989 int pnv_pci_set_tunnel_bar(struct pci_dev *dev, u64 addr, int enable) 990 { 991 __be64 val; 992 struct pci_controller *hose; 993 struct pnv_phb *phb; 994 u64 tunnel_bar; 995 int rc; 996 997 if (!opal_check_token(OPAL_PCI_GET_PBCQ_TUNNEL_BAR)) 998 return -ENXIO; 999 if (!opal_check_token(OPAL_PCI_SET_PBCQ_TUNNEL_BAR)) 1000 return -ENXIO; 1001 1002 hose = pci_bus_to_host(dev->bus); 1003 phb = hose->private_data; 1004 1005 mutex_lock(&tunnel_mutex); 1006 rc = opal_pci_get_pbcq_tunnel_bar(phb->opal_id, &val); 1007 if (rc != OPAL_SUCCESS) { 1008 rc = -EIO; 1009 goto out; 1010 } 1011 tunnel_bar = be64_to_cpu(val); 1012 if (enable) { 1013 /* 1014 * Only one device per PHB can use atomics. 1015 * Our policy is first-come, first-served. 1016 */ 1017 if (tunnel_bar) { 1018 if (tunnel_bar != addr) 1019 rc = -EBUSY; 1020 else 1021 rc = 0; /* Setting same address twice is ok */ 1022 goto out; 1023 } 1024 } else { 1025 /* 1026 * The device that owns atomics and wants to release 1027 * them must pass the same address with enable == 0. 1028 */ 1029 if (tunnel_bar != addr) { 1030 rc = -EPERM; 1031 goto out; 1032 } 1033 addr = 0x0ULL; 1034 } 1035 rc = opal_pci_set_pbcq_tunnel_bar(phb->opal_id, addr); 1036 rc = opal_error_code(rc); 1037 out: 1038 mutex_unlock(&tunnel_mutex); 1039 return rc; 1040 } 1041 EXPORT_SYMBOL_GPL(pnv_pci_set_tunnel_bar); 1042 1043 #ifdef CONFIG_PPC64 /* for thread.tidr */ 1044 int pnv_pci_get_as_notify_info(struct task_struct *task, u32 *lpid, u32 *pid, 1045 u32 *tid) 1046 { 1047 struct mm_struct *mm = NULL; 1048 1049 if (task == NULL) 1050 return -EINVAL; 1051 1052 mm = get_task_mm(task); 1053 if (mm == NULL) 1054 return -EINVAL; 1055 1056 *pid = mm->context.id; 1057 mmput(mm); 1058 1059 *tid = task->thread.tidr; 1060 *lpid = mfspr(SPRN_LPID); 1061 return 0; 1062 } 1063 EXPORT_SYMBOL_GPL(pnv_pci_get_as_notify_info); 1064 #endif 1065 1066 void pnv_pci_shutdown(void) 1067 { 1068 struct pci_controller *hose; 1069 1070 list_for_each_entry(hose, &hose_list, list_node) 1071 if (hose->controller_ops.shutdown) 1072 hose->controller_ops.shutdown(hose); 1073 } 1074 1075 /* Fixup wrong class code in p7ioc and p8 root complex */ 1076 static void pnv_p7ioc_rc_quirk(struct pci_dev *dev) 1077 { 1078 dev->class = PCI_CLASS_BRIDGE_PCI << 8; 1079 } 1080 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_IBM, 0x3b9, pnv_p7ioc_rc_quirk); 1081 1082 void __init pnv_pci_init(void) 1083 { 1084 struct device_node *np; 1085 1086 pci_add_flags(PCI_CAN_SKIP_ISA_ALIGN); 1087 1088 /* If we don't have OPAL, eg. in sim, just skip PCI probe */ 1089 if (!firmware_has_feature(FW_FEATURE_OPAL)) 1090 return; 1091 1092 /* Look for IODA IO-Hubs. */ 1093 for_each_compatible_node(np, NULL, "ibm,ioda-hub") { 1094 pnv_pci_init_ioda_hub(np); 1095 } 1096 1097 /* Look for ioda2 built-in PHB3's */ 1098 for_each_compatible_node(np, NULL, "ibm,ioda2-phb") 1099 pnv_pci_init_ioda2_phb(np); 1100 1101 /* Look for ioda3 built-in PHB4's, we treat them as IODA2 */ 1102 for_each_compatible_node(np, NULL, "ibm,ioda3-phb") 1103 pnv_pci_init_ioda2_phb(np); 1104 1105 /* Look for NPU PHBs */ 1106 for_each_compatible_node(np, NULL, "ibm,ioda2-npu-phb") 1107 pnv_pci_init_npu_phb(np); 1108 1109 /* 1110 * Look for NPU2 PHBs which we treat mostly as NPU PHBs with 1111 * the exception of TCE kill which requires an OPAL call. 1112 */ 1113 for_each_compatible_node(np, NULL, "ibm,ioda2-npu2-phb") 1114 pnv_pci_init_npu_phb(np); 1115 1116 /* Look for NPU2 OpenCAPI PHBs */ 1117 for_each_compatible_node(np, NULL, "ibm,ioda2-npu2-opencapi-phb") 1118 pnv_pci_init_npu2_opencapi_phb(np); 1119 1120 /* Configure IOMMU DMA hooks */ 1121 set_pci_dma_ops(&dma_iommu_ops); 1122 } 1123 1124 static int pnv_tce_iommu_bus_notifier(struct notifier_block *nb, 1125 unsigned long action, void *data) 1126 { 1127 struct device *dev = data; 1128 struct pci_dev *pdev; 1129 struct pci_dn *pdn; 1130 struct pnv_ioda_pe *pe; 1131 struct pci_controller *hose; 1132 struct pnv_phb *phb; 1133 1134 switch (action) { 1135 case BUS_NOTIFY_ADD_DEVICE: 1136 pdev = to_pci_dev(dev); 1137 pdn = pci_get_pdn(pdev); 1138 hose = pci_bus_to_host(pdev->bus); 1139 phb = hose->private_data; 1140 1141 WARN_ON_ONCE(!phb); 1142 if (!pdn || pdn->pe_number == IODA_INVALID_PE || !phb) 1143 return 0; 1144 1145 pe = &phb->ioda.pe_array[pdn->pe_number]; 1146 if (!pe->table_group.group) 1147 return 0; 1148 iommu_add_device(&pe->table_group, dev); 1149 return 0; 1150 case BUS_NOTIFY_DEL_DEVICE: 1151 iommu_del_device(dev); 1152 return 0; 1153 default: 1154 return 0; 1155 } 1156 } 1157 1158 static struct notifier_block pnv_tce_iommu_bus_nb = { 1159 .notifier_call = pnv_tce_iommu_bus_notifier, 1160 }; 1161 1162 static int __init pnv_tce_iommu_bus_notifier_init(void) 1163 { 1164 bus_register_notifier(&pci_bus_type, &pnv_tce_iommu_bus_nb); 1165 return 0; 1166 } 1167 machine_subsys_initcall_sync(powernv, pnv_tce_iommu_bus_notifier_init); 1168