1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Support PCI/PCIe on PowerNV platforms 4 * 5 * Copyright 2011 Benjamin Herrenschmidt, IBM Corp. 6 */ 7 8 #include <linux/kernel.h> 9 #include <linux/pci.h> 10 #include <linux/delay.h> 11 #include <linux/string.h> 12 #include <linux/init.h> 13 #include <linux/irq.h> 14 #include <linux/io.h> 15 #include <linux/msi.h> 16 #include <linux/iommu.h> 17 #include <linux/sched/mm.h> 18 19 #include <asm/sections.h> 20 #include <asm/io.h> 21 #include <asm/prom.h> 22 #include <asm/pci-bridge.h> 23 #include <asm/machdep.h> 24 #include <asm/msi_bitmap.h> 25 #include <asm/ppc-pci.h> 26 #include <asm/pnv-pci.h> 27 #include <asm/opal.h> 28 #include <asm/iommu.h> 29 #include <asm/tce.h> 30 #include <asm/firmware.h> 31 #include <asm/eeh_event.h> 32 #include <asm/eeh.h> 33 34 #include "powernv.h" 35 #include "pci.h" 36 37 static DEFINE_MUTEX(tunnel_mutex); 38 39 int pnv_pci_get_slot_id(struct device_node *np, uint64_t *id) 40 { 41 struct device_node *parent = np; 42 u32 bdfn; 43 u64 phbid; 44 int ret; 45 46 ret = of_property_read_u32(np, "reg", &bdfn); 47 if (ret) 48 return -ENXIO; 49 50 bdfn = ((bdfn & 0x00ffff00) >> 8); 51 while ((parent = of_get_parent(parent))) { 52 if (!PCI_DN(parent)) { 53 of_node_put(parent); 54 break; 55 } 56 57 if (!of_device_is_compatible(parent, "ibm,ioda2-phb")) { 58 of_node_put(parent); 59 continue; 60 } 61 62 ret = of_property_read_u64(parent, "ibm,opal-phbid", &phbid); 63 if (ret) { 64 of_node_put(parent); 65 return -ENXIO; 66 } 67 68 *id = PCI_SLOT_ID(phbid, bdfn); 69 return 0; 70 } 71 72 return -ENODEV; 73 } 74 EXPORT_SYMBOL_GPL(pnv_pci_get_slot_id); 75 76 int pnv_pci_get_device_tree(uint32_t phandle, void *buf, uint64_t len) 77 { 78 int64_t rc; 79 80 if (!opal_check_token(OPAL_GET_DEVICE_TREE)) 81 return -ENXIO; 82 83 rc = opal_get_device_tree(phandle, (uint64_t)buf, len); 84 if (rc < OPAL_SUCCESS) 85 return -EIO; 86 87 return rc; 88 } 89 EXPORT_SYMBOL_GPL(pnv_pci_get_device_tree); 90 91 int pnv_pci_get_presence_state(uint64_t id, uint8_t *state) 92 { 93 int64_t rc; 94 95 if (!opal_check_token(OPAL_PCI_GET_PRESENCE_STATE)) 96 return -ENXIO; 97 98 rc = opal_pci_get_presence_state(id, (uint64_t)state); 99 if (rc != OPAL_SUCCESS) 100 return -EIO; 101 102 return 0; 103 } 104 EXPORT_SYMBOL_GPL(pnv_pci_get_presence_state); 105 106 int pnv_pci_get_power_state(uint64_t id, uint8_t *state) 107 { 108 int64_t rc; 109 110 if (!opal_check_token(OPAL_PCI_GET_POWER_STATE)) 111 return -ENXIO; 112 113 rc = opal_pci_get_power_state(id, (uint64_t)state); 114 if (rc != OPAL_SUCCESS) 115 return -EIO; 116 117 return 0; 118 } 119 EXPORT_SYMBOL_GPL(pnv_pci_get_power_state); 120 121 int pnv_pci_set_power_state(uint64_t id, uint8_t state, struct opal_msg *msg) 122 { 123 struct opal_msg m; 124 int token, ret; 125 int64_t rc; 126 127 if (!opal_check_token(OPAL_PCI_SET_POWER_STATE)) 128 return -ENXIO; 129 130 token = opal_async_get_token_interruptible(); 131 if (unlikely(token < 0)) 132 return token; 133 134 rc = opal_pci_set_power_state(token, id, (uint64_t)&state); 135 if (rc == OPAL_SUCCESS) { 136 ret = 0; 137 goto exit; 138 } else if (rc != OPAL_ASYNC_COMPLETION) { 139 ret = -EIO; 140 goto exit; 141 } 142 143 ret = opal_async_wait_response(token, &m); 144 if (ret < 0) 145 goto exit; 146 147 if (msg) { 148 ret = 1; 149 memcpy(msg, &m, sizeof(m)); 150 } 151 152 exit: 153 opal_async_release_token(token); 154 return ret; 155 } 156 EXPORT_SYMBOL_GPL(pnv_pci_set_power_state); 157 158 int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) 159 { 160 struct pci_controller *hose = pci_bus_to_host(pdev->bus); 161 struct pnv_phb *phb = hose->private_data; 162 struct msi_desc *entry; 163 struct msi_msg msg; 164 int hwirq; 165 unsigned int virq; 166 int rc; 167 168 if (WARN_ON(!phb) || !phb->msi_bmp.bitmap) 169 return -ENODEV; 170 171 if (pdev->no_64bit_msi && !phb->msi32_support) 172 return -ENODEV; 173 174 for_each_pci_msi_entry(entry, pdev) { 175 if (!entry->msi_attrib.is_64 && !phb->msi32_support) { 176 pr_warn("%s: Supports only 64-bit MSIs\n", 177 pci_name(pdev)); 178 return -ENXIO; 179 } 180 hwirq = msi_bitmap_alloc_hwirqs(&phb->msi_bmp, 1); 181 if (hwirq < 0) { 182 pr_warn("%s: Failed to find a free MSI\n", 183 pci_name(pdev)); 184 return -ENOSPC; 185 } 186 virq = irq_create_mapping(NULL, phb->msi_base + hwirq); 187 if (!virq) { 188 pr_warn("%s: Failed to map MSI to linux irq\n", 189 pci_name(pdev)); 190 msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq, 1); 191 return -ENOMEM; 192 } 193 rc = phb->msi_setup(phb, pdev, phb->msi_base + hwirq, 194 virq, entry->msi_attrib.is_64, &msg); 195 if (rc) { 196 pr_warn("%s: Failed to setup MSI\n", pci_name(pdev)); 197 irq_dispose_mapping(virq); 198 msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq, 1); 199 return rc; 200 } 201 irq_set_msi_desc(virq, entry); 202 pci_write_msi_msg(virq, &msg); 203 } 204 return 0; 205 } 206 207 void pnv_teardown_msi_irqs(struct pci_dev *pdev) 208 { 209 struct pci_controller *hose = pci_bus_to_host(pdev->bus); 210 struct pnv_phb *phb = hose->private_data; 211 struct msi_desc *entry; 212 irq_hw_number_t hwirq; 213 214 if (WARN_ON(!phb)) 215 return; 216 217 for_each_pci_msi_entry(entry, pdev) { 218 if (!entry->irq) 219 continue; 220 hwirq = virq_to_hw(entry->irq); 221 irq_set_msi_desc(entry->irq, NULL); 222 irq_dispose_mapping(entry->irq); 223 msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq - phb->msi_base, 1); 224 } 225 } 226 227 /* Nicely print the contents of the PE State Tables (PEST). */ 228 static void pnv_pci_dump_pest(__be64 pestA[], __be64 pestB[], int pest_size) 229 { 230 __be64 prevA = ULONG_MAX, prevB = ULONG_MAX; 231 bool dup = false; 232 int i; 233 234 for (i = 0; i < pest_size; i++) { 235 __be64 peA = be64_to_cpu(pestA[i]); 236 __be64 peB = be64_to_cpu(pestB[i]); 237 238 if (peA != prevA || peB != prevB) { 239 if (dup) { 240 pr_info("PE[..%03x] A/B: as above\n", i-1); 241 dup = false; 242 } 243 prevA = peA; 244 prevB = peB; 245 if (peA & PNV_IODA_STOPPED_STATE || 246 peB & PNV_IODA_STOPPED_STATE) 247 pr_info("PE[%03x] A/B: %016llx %016llx\n", 248 i, peA, peB); 249 } else if (!dup && (peA & PNV_IODA_STOPPED_STATE || 250 peB & PNV_IODA_STOPPED_STATE)) { 251 dup = true; 252 } 253 } 254 } 255 256 static void pnv_pci_dump_p7ioc_diag_data(struct pci_controller *hose, 257 struct OpalIoPhbErrorCommon *common) 258 { 259 struct OpalIoP7IOCPhbErrorData *data; 260 261 data = (struct OpalIoP7IOCPhbErrorData *)common; 262 pr_info("P7IOC PHB#%x Diag-data (Version: %d)\n", 263 hose->global_number, be32_to_cpu(common->version)); 264 265 if (data->brdgCtl) 266 pr_info("brdgCtl: %08x\n", 267 be32_to_cpu(data->brdgCtl)); 268 if (data->portStatusReg || data->rootCmplxStatus || 269 data->busAgentStatus) 270 pr_info("UtlSts: %08x %08x %08x\n", 271 be32_to_cpu(data->portStatusReg), 272 be32_to_cpu(data->rootCmplxStatus), 273 be32_to_cpu(data->busAgentStatus)); 274 if (data->deviceStatus || data->slotStatus || 275 data->linkStatus || data->devCmdStatus || 276 data->devSecStatus) 277 pr_info("RootSts: %08x %08x %08x %08x %08x\n", 278 be32_to_cpu(data->deviceStatus), 279 be32_to_cpu(data->slotStatus), 280 be32_to_cpu(data->linkStatus), 281 be32_to_cpu(data->devCmdStatus), 282 be32_to_cpu(data->devSecStatus)); 283 if (data->rootErrorStatus || data->uncorrErrorStatus || 284 data->corrErrorStatus) 285 pr_info("RootErrSts: %08x %08x %08x\n", 286 be32_to_cpu(data->rootErrorStatus), 287 be32_to_cpu(data->uncorrErrorStatus), 288 be32_to_cpu(data->corrErrorStatus)); 289 if (data->tlpHdr1 || data->tlpHdr2 || 290 data->tlpHdr3 || data->tlpHdr4) 291 pr_info("RootErrLog: %08x %08x %08x %08x\n", 292 be32_to_cpu(data->tlpHdr1), 293 be32_to_cpu(data->tlpHdr2), 294 be32_to_cpu(data->tlpHdr3), 295 be32_to_cpu(data->tlpHdr4)); 296 if (data->sourceId || data->errorClass || 297 data->correlator) 298 pr_info("RootErrLog1: %08x %016llx %016llx\n", 299 be32_to_cpu(data->sourceId), 300 be64_to_cpu(data->errorClass), 301 be64_to_cpu(data->correlator)); 302 if (data->p7iocPlssr || data->p7iocCsr) 303 pr_info("PhbSts: %016llx %016llx\n", 304 be64_to_cpu(data->p7iocPlssr), 305 be64_to_cpu(data->p7iocCsr)); 306 if (data->lemFir) 307 pr_info("Lem: %016llx %016llx %016llx\n", 308 be64_to_cpu(data->lemFir), 309 be64_to_cpu(data->lemErrorMask), 310 be64_to_cpu(data->lemWOF)); 311 if (data->phbErrorStatus) 312 pr_info("PhbErr: %016llx %016llx %016llx %016llx\n", 313 be64_to_cpu(data->phbErrorStatus), 314 be64_to_cpu(data->phbFirstErrorStatus), 315 be64_to_cpu(data->phbErrorLog0), 316 be64_to_cpu(data->phbErrorLog1)); 317 if (data->mmioErrorStatus) 318 pr_info("OutErr: %016llx %016llx %016llx %016llx\n", 319 be64_to_cpu(data->mmioErrorStatus), 320 be64_to_cpu(data->mmioFirstErrorStatus), 321 be64_to_cpu(data->mmioErrorLog0), 322 be64_to_cpu(data->mmioErrorLog1)); 323 if (data->dma0ErrorStatus) 324 pr_info("InAErr: %016llx %016llx %016llx %016llx\n", 325 be64_to_cpu(data->dma0ErrorStatus), 326 be64_to_cpu(data->dma0FirstErrorStatus), 327 be64_to_cpu(data->dma0ErrorLog0), 328 be64_to_cpu(data->dma0ErrorLog1)); 329 if (data->dma1ErrorStatus) 330 pr_info("InBErr: %016llx %016llx %016llx %016llx\n", 331 be64_to_cpu(data->dma1ErrorStatus), 332 be64_to_cpu(data->dma1FirstErrorStatus), 333 be64_to_cpu(data->dma1ErrorLog0), 334 be64_to_cpu(data->dma1ErrorLog1)); 335 336 pnv_pci_dump_pest(data->pestA, data->pestB, OPAL_P7IOC_NUM_PEST_REGS); 337 } 338 339 static void pnv_pci_dump_phb3_diag_data(struct pci_controller *hose, 340 struct OpalIoPhbErrorCommon *common) 341 { 342 struct OpalIoPhb3ErrorData *data; 343 344 data = (struct OpalIoPhb3ErrorData*)common; 345 pr_info("PHB3 PHB#%x Diag-data (Version: %d)\n", 346 hose->global_number, be32_to_cpu(common->version)); 347 if (data->brdgCtl) 348 pr_info("brdgCtl: %08x\n", 349 be32_to_cpu(data->brdgCtl)); 350 if (data->portStatusReg || data->rootCmplxStatus || 351 data->busAgentStatus) 352 pr_info("UtlSts: %08x %08x %08x\n", 353 be32_to_cpu(data->portStatusReg), 354 be32_to_cpu(data->rootCmplxStatus), 355 be32_to_cpu(data->busAgentStatus)); 356 if (data->deviceStatus || data->slotStatus || 357 data->linkStatus || data->devCmdStatus || 358 data->devSecStatus) 359 pr_info("RootSts: %08x %08x %08x %08x %08x\n", 360 be32_to_cpu(data->deviceStatus), 361 be32_to_cpu(data->slotStatus), 362 be32_to_cpu(data->linkStatus), 363 be32_to_cpu(data->devCmdStatus), 364 be32_to_cpu(data->devSecStatus)); 365 if (data->rootErrorStatus || data->uncorrErrorStatus || 366 data->corrErrorStatus) 367 pr_info("RootErrSts: %08x %08x %08x\n", 368 be32_to_cpu(data->rootErrorStatus), 369 be32_to_cpu(data->uncorrErrorStatus), 370 be32_to_cpu(data->corrErrorStatus)); 371 if (data->tlpHdr1 || data->tlpHdr2 || 372 data->tlpHdr3 || data->tlpHdr4) 373 pr_info("RootErrLog: %08x %08x %08x %08x\n", 374 be32_to_cpu(data->tlpHdr1), 375 be32_to_cpu(data->tlpHdr2), 376 be32_to_cpu(data->tlpHdr3), 377 be32_to_cpu(data->tlpHdr4)); 378 if (data->sourceId || data->errorClass || 379 data->correlator) 380 pr_info("RootErrLog1: %08x %016llx %016llx\n", 381 be32_to_cpu(data->sourceId), 382 be64_to_cpu(data->errorClass), 383 be64_to_cpu(data->correlator)); 384 if (data->nFir) 385 pr_info("nFir: %016llx %016llx %016llx\n", 386 be64_to_cpu(data->nFir), 387 be64_to_cpu(data->nFirMask), 388 be64_to_cpu(data->nFirWOF)); 389 if (data->phbPlssr || data->phbCsr) 390 pr_info("PhbSts: %016llx %016llx\n", 391 be64_to_cpu(data->phbPlssr), 392 be64_to_cpu(data->phbCsr)); 393 if (data->lemFir) 394 pr_info("Lem: %016llx %016llx %016llx\n", 395 be64_to_cpu(data->lemFir), 396 be64_to_cpu(data->lemErrorMask), 397 be64_to_cpu(data->lemWOF)); 398 if (data->phbErrorStatus) 399 pr_info("PhbErr: %016llx %016llx %016llx %016llx\n", 400 be64_to_cpu(data->phbErrorStatus), 401 be64_to_cpu(data->phbFirstErrorStatus), 402 be64_to_cpu(data->phbErrorLog0), 403 be64_to_cpu(data->phbErrorLog1)); 404 if (data->mmioErrorStatus) 405 pr_info("OutErr: %016llx %016llx %016llx %016llx\n", 406 be64_to_cpu(data->mmioErrorStatus), 407 be64_to_cpu(data->mmioFirstErrorStatus), 408 be64_to_cpu(data->mmioErrorLog0), 409 be64_to_cpu(data->mmioErrorLog1)); 410 if (data->dma0ErrorStatus) 411 pr_info("InAErr: %016llx %016llx %016llx %016llx\n", 412 be64_to_cpu(data->dma0ErrorStatus), 413 be64_to_cpu(data->dma0FirstErrorStatus), 414 be64_to_cpu(data->dma0ErrorLog0), 415 be64_to_cpu(data->dma0ErrorLog1)); 416 if (data->dma1ErrorStatus) 417 pr_info("InBErr: %016llx %016llx %016llx %016llx\n", 418 be64_to_cpu(data->dma1ErrorStatus), 419 be64_to_cpu(data->dma1FirstErrorStatus), 420 be64_to_cpu(data->dma1ErrorLog0), 421 be64_to_cpu(data->dma1ErrorLog1)); 422 423 pnv_pci_dump_pest(data->pestA, data->pestB, OPAL_PHB3_NUM_PEST_REGS); 424 } 425 426 static void pnv_pci_dump_phb4_diag_data(struct pci_controller *hose, 427 struct OpalIoPhbErrorCommon *common) 428 { 429 struct OpalIoPhb4ErrorData *data; 430 431 data = (struct OpalIoPhb4ErrorData*)common; 432 pr_info("PHB4 PHB#%d Diag-data (Version: %d)\n", 433 hose->global_number, be32_to_cpu(common->version)); 434 if (data->brdgCtl) 435 pr_info("brdgCtl: %08x\n", 436 be32_to_cpu(data->brdgCtl)); 437 if (data->deviceStatus || data->slotStatus || 438 data->linkStatus || data->devCmdStatus || 439 data->devSecStatus) 440 pr_info("RootSts: %08x %08x %08x %08x %08x\n", 441 be32_to_cpu(data->deviceStatus), 442 be32_to_cpu(data->slotStatus), 443 be32_to_cpu(data->linkStatus), 444 be32_to_cpu(data->devCmdStatus), 445 be32_to_cpu(data->devSecStatus)); 446 if (data->rootErrorStatus || data->uncorrErrorStatus || 447 data->corrErrorStatus) 448 pr_info("RootErrSts: %08x %08x %08x\n", 449 be32_to_cpu(data->rootErrorStatus), 450 be32_to_cpu(data->uncorrErrorStatus), 451 be32_to_cpu(data->corrErrorStatus)); 452 if (data->tlpHdr1 || data->tlpHdr2 || 453 data->tlpHdr3 || data->tlpHdr4) 454 pr_info("RootErrLog: %08x %08x %08x %08x\n", 455 be32_to_cpu(data->tlpHdr1), 456 be32_to_cpu(data->tlpHdr2), 457 be32_to_cpu(data->tlpHdr3), 458 be32_to_cpu(data->tlpHdr4)); 459 if (data->sourceId) 460 pr_info("sourceId: %08x\n", be32_to_cpu(data->sourceId)); 461 if (data->nFir) 462 pr_info("nFir: %016llx %016llx %016llx\n", 463 be64_to_cpu(data->nFir), 464 be64_to_cpu(data->nFirMask), 465 be64_to_cpu(data->nFirWOF)); 466 if (data->phbPlssr || data->phbCsr) 467 pr_info("PhbSts: %016llx %016llx\n", 468 be64_to_cpu(data->phbPlssr), 469 be64_to_cpu(data->phbCsr)); 470 if (data->lemFir) 471 pr_info("Lem: %016llx %016llx %016llx\n", 472 be64_to_cpu(data->lemFir), 473 be64_to_cpu(data->lemErrorMask), 474 be64_to_cpu(data->lemWOF)); 475 if (data->phbErrorStatus) 476 pr_info("PhbErr: %016llx %016llx %016llx %016llx\n", 477 be64_to_cpu(data->phbErrorStatus), 478 be64_to_cpu(data->phbFirstErrorStatus), 479 be64_to_cpu(data->phbErrorLog0), 480 be64_to_cpu(data->phbErrorLog1)); 481 if (data->phbTxeErrorStatus) 482 pr_info("PhbTxeErr: %016llx %016llx %016llx %016llx\n", 483 be64_to_cpu(data->phbTxeErrorStatus), 484 be64_to_cpu(data->phbTxeFirstErrorStatus), 485 be64_to_cpu(data->phbTxeErrorLog0), 486 be64_to_cpu(data->phbTxeErrorLog1)); 487 if (data->phbRxeArbErrorStatus) 488 pr_info("RxeArbErr: %016llx %016llx %016llx %016llx\n", 489 be64_to_cpu(data->phbRxeArbErrorStatus), 490 be64_to_cpu(data->phbRxeArbFirstErrorStatus), 491 be64_to_cpu(data->phbRxeArbErrorLog0), 492 be64_to_cpu(data->phbRxeArbErrorLog1)); 493 if (data->phbRxeMrgErrorStatus) 494 pr_info("RxeMrgErr: %016llx %016llx %016llx %016llx\n", 495 be64_to_cpu(data->phbRxeMrgErrorStatus), 496 be64_to_cpu(data->phbRxeMrgFirstErrorStatus), 497 be64_to_cpu(data->phbRxeMrgErrorLog0), 498 be64_to_cpu(data->phbRxeMrgErrorLog1)); 499 if (data->phbRxeTceErrorStatus) 500 pr_info("RxeTceErr: %016llx %016llx %016llx %016llx\n", 501 be64_to_cpu(data->phbRxeTceErrorStatus), 502 be64_to_cpu(data->phbRxeTceFirstErrorStatus), 503 be64_to_cpu(data->phbRxeTceErrorLog0), 504 be64_to_cpu(data->phbRxeTceErrorLog1)); 505 506 if (data->phbPblErrorStatus) 507 pr_info("PblErr: %016llx %016llx %016llx %016llx\n", 508 be64_to_cpu(data->phbPblErrorStatus), 509 be64_to_cpu(data->phbPblFirstErrorStatus), 510 be64_to_cpu(data->phbPblErrorLog0), 511 be64_to_cpu(data->phbPblErrorLog1)); 512 if (data->phbPcieDlpErrorStatus) 513 pr_info("PcieDlp: %016llx %016llx %016llx\n", 514 be64_to_cpu(data->phbPcieDlpErrorLog1), 515 be64_to_cpu(data->phbPcieDlpErrorLog2), 516 be64_to_cpu(data->phbPcieDlpErrorStatus)); 517 if (data->phbRegbErrorStatus) 518 pr_info("RegbErr: %016llx %016llx %016llx %016llx\n", 519 be64_to_cpu(data->phbRegbErrorStatus), 520 be64_to_cpu(data->phbRegbFirstErrorStatus), 521 be64_to_cpu(data->phbRegbErrorLog0), 522 be64_to_cpu(data->phbRegbErrorLog1)); 523 524 525 pnv_pci_dump_pest(data->pestA, data->pestB, OPAL_PHB4_NUM_PEST_REGS); 526 } 527 528 void pnv_pci_dump_phb_diag_data(struct pci_controller *hose, 529 unsigned char *log_buff) 530 { 531 struct OpalIoPhbErrorCommon *common; 532 533 if (!hose || !log_buff) 534 return; 535 536 common = (struct OpalIoPhbErrorCommon *)log_buff; 537 switch (be32_to_cpu(common->ioType)) { 538 case OPAL_PHB_ERROR_DATA_TYPE_P7IOC: 539 pnv_pci_dump_p7ioc_diag_data(hose, common); 540 break; 541 case OPAL_PHB_ERROR_DATA_TYPE_PHB3: 542 pnv_pci_dump_phb3_diag_data(hose, common); 543 break; 544 case OPAL_PHB_ERROR_DATA_TYPE_PHB4: 545 pnv_pci_dump_phb4_diag_data(hose, common); 546 break; 547 default: 548 pr_warn("%s: Unrecognized ioType %d\n", 549 __func__, be32_to_cpu(common->ioType)); 550 } 551 } 552 553 static void pnv_pci_handle_eeh_config(struct pnv_phb *phb, u32 pe_no) 554 { 555 unsigned long flags, rc; 556 int has_diag, ret = 0; 557 558 spin_lock_irqsave(&phb->lock, flags); 559 560 /* Fetch PHB diag-data */ 561 rc = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag_data, 562 phb->diag_data_size); 563 has_diag = (rc == OPAL_SUCCESS); 564 565 /* If PHB supports compound PE, to handle it */ 566 if (phb->unfreeze_pe) { 567 ret = phb->unfreeze_pe(phb, 568 pe_no, 569 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL); 570 } else { 571 rc = opal_pci_eeh_freeze_clear(phb->opal_id, 572 pe_no, 573 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL); 574 if (rc) { 575 pr_warn("%s: Failure %ld clearing frozen " 576 "PHB#%x-PE#%x\n", 577 __func__, rc, phb->hose->global_number, 578 pe_no); 579 ret = -EIO; 580 } 581 } 582 583 /* 584 * For now, let's only display the diag buffer when we fail to clear 585 * the EEH status. We'll do more sensible things later when we have 586 * proper EEH support. We need to make sure we don't pollute ourselves 587 * with the normal errors generated when probing empty slots 588 */ 589 if (has_diag && ret) 590 pnv_pci_dump_phb_diag_data(phb->hose, phb->diag_data); 591 592 spin_unlock_irqrestore(&phb->lock, flags); 593 } 594 595 static void pnv_pci_config_check_eeh(struct pci_dn *pdn) 596 { 597 struct pnv_phb *phb = pdn->phb->private_data; 598 u8 fstate = 0; 599 __be16 pcierr = 0; 600 unsigned int pe_no; 601 s64 rc; 602 603 /* 604 * Get the PE#. During the PCI probe stage, we might not 605 * setup that yet. So all ER errors should be mapped to 606 * reserved PE. 607 */ 608 pe_no = pdn->pe_number; 609 if (pe_no == IODA_INVALID_PE) { 610 pe_no = phb->ioda.reserved_pe_idx; 611 } 612 613 /* 614 * Fetch frozen state. If the PHB support compound PE, 615 * we need handle that case. 616 */ 617 if (phb->get_pe_state) { 618 fstate = phb->get_pe_state(phb, pe_no); 619 } else { 620 rc = opal_pci_eeh_freeze_status(phb->opal_id, 621 pe_no, 622 &fstate, 623 &pcierr, 624 NULL); 625 if (rc) { 626 pr_warn("%s: Failure %lld getting PHB#%x-PE#%x state\n", 627 __func__, rc, phb->hose->global_number, pe_no); 628 return; 629 } 630 } 631 632 pr_devel(" -> EEH check, bdfn=%04x PE#%x fstate=%x\n", 633 (pdn->busno << 8) | (pdn->devfn), pe_no, fstate); 634 635 /* Clear the frozen state if applicable */ 636 if (fstate == OPAL_EEH_STOPPED_MMIO_FREEZE || 637 fstate == OPAL_EEH_STOPPED_DMA_FREEZE || 638 fstate == OPAL_EEH_STOPPED_MMIO_DMA_FREEZE) { 639 /* 640 * If PHB supports compound PE, freeze it for 641 * consistency. 642 */ 643 if (phb->freeze_pe) 644 phb->freeze_pe(phb, pe_no); 645 646 pnv_pci_handle_eeh_config(phb, pe_no); 647 } 648 } 649 650 int pnv_pci_cfg_read(struct pci_dn *pdn, 651 int where, int size, u32 *val) 652 { 653 struct pnv_phb *phb = pdn->phb->private_data; 654 u32 bdfn = (pdn->busno << 8) | pdn->devfn; 655 s64 rc; 656 657 switch (size) { 658 case 1: { 659 u8 v8; 660 rc = opal_pci_config_read_byte(phb->opal_id, bdfn, where, &v8); 661 *val = (rc == OPAL_SUCCESS) ? v8 : 0xff; 662 break; 663 } 664 case 2: { 665 __be16 v16; 666 rc = opal_pci_config_read_half_word(phb->opal_id, bdfn, where, 667 &v16); 668 *val = (rc == OPAL_SUCCESS) ? be16_to_cpu(v16) : 0xffff; 669 break; 670 } 671 case 4: { 672 __be32 v32; 673 rc = opal_pci_config_read_word(phb->opal_id, bdfn, where, &v32); 674 *val = (rc == OPAL_SUCCESS) ? be32_to_cpu(v32) : 0xffffffff; 675 break; 676 } 677 default: 678 return PCIBIOS_FUNC_NOT_SUPPORTED; 679 } 680 681 pr_devel("%s: bus: %x devfn: %x +%x/%x -> %08x\n", 682 __func__, pdn->busno, pdn->devfn, where, size, *val); 683 return PCIBIOS_SUCCESSFUL; 684 } 685 686 int pnv_pci_cfg_write(struct pci_dn *pdn, 687 int where, int size, u32 val) 688 { 689 struct pnv_phb *phb = pdn->phb->private_data; 690 u32 bdfn = (pdn->busno << 8) | pdn->devfn; 691 692 pr_devel("%s: bus: %x devfn: %x +%x/%x -> %08x\n", 693 __func__, pdn->busno, pdn->devfn, where, size, val); 694 switch (size) { 695 case 1: 696 opal_pci_config_write_byte(phb->opal_id, bdfn, where, val); 697 break; 698 case 2: 699 opal_pci_config_write_half_word(phb->opal_id, bdfn, where, val); 700 break; 701 case 4: 702 opal_pci_config_write_word(phb->opal_id, bdfn, where, val); 703 break; 704 default: 705 return PCIBIOS_FUNC_NOT_SUPPORTED; 706 } 707 708 return PCIBIOS_SUCCESSFUL; 709 } 710 711 #if CONFIG_EEH 712 static bool pnv_pci_cfg_check(struct pci_dn *pdn) 713 { 714 struct eeh_dev *edev = NULL; 715 struct pnv_phb *phb = pdn->phb->private_data; 716 717 /* EEH not enabled ? */ 718 if (!(phb->flags & PNV_PHB_FLAG_EEH)) 719 return true; 720 721 /* PE reset or device removed ? */ 722 edev = pdn->edev; 723 if (edev) { 724 if (edev->pe && 725 (edev->pe->state & EEH_PE_CFG_BLOCKED)) 726 return false; 727 728 if (edev->mode & EEH_DEV_REMOVED) 729 return false; 730 } 731 732 return true; 733 } 734 #else 735 static inline pnv_pci_cfg_check(struct pci_dn *pdn) 736 { 737 return true; 738 } 739 #endif /* CONFIG_EEH */ 740 741 static int pnv_pci_read_config(struct pci_bus *bus, 742 unsigned int devfn, 743 int where, int size, u32 *val) 744 { 745 struct pci_dn *pdn; 746 struct pnv_phb *phb; 747 int ret; 748 749 *val = 0xFFFFFFFF; 750 pdn = pci_get_pdn_by_devfn(bus, devfn); 751 if (!pdn) 752 return PCIBIOS_DEVICE_NOT_FOUND; 753 754 if (!pnv_pci_cfg_check(pdn)) 755 return PCIBIOS_DEVICE_NOT_FOUND; 756 757 ret = pnv_pci_cfg_read(pdn, where, size, val); 758 phb = pdn->phb->private_data; 759 if (phb->flags & PNV_PHB_FLAG_EEH && pdn->edev) { 760 if (*val == EEH_IO_ERROR_VALUE(size) && 761 eeh_dev_check_failure(pdn->edev)) 762 return PCIBIOS_DEVICE_NOT_FOUND; 763 } else { 764 pnv_pci_config_check_eeh(pdn); 765 } 766 767 return ret; 768 } 769 770 static int pnv_pci_write_config(struct pci_bus *bus, 771 unsigned int devfn, 772 int where, int size, u32 val) 773 { 774 struct pci_dn *pdn; 775 struct pnv_phb *phb; 776 int ret; 777 778 pdn = pci_get_pdn_by_devfn(bus, devfn); 779 if (!pdn) 780 return PCIBIOS_DEVICE_NOT_FOUND; 781 782 if (!pnv_pci_cfg_check(pdn)) 783 return PCIBIOS_DEVICE_NOT_FOUND; 784 785 ret = pnv_pci_cfg_write(pdn, where, size, val); 786 phb = pdn->phb->private_data; 787 if (!(phb->flags & PNV_PHB_FLAG_EEH)) 788 pnv_pci_config_check_eeh(pdn); 789 790 return ret; 791 } 792 793 struct pci_ops pnv_pci_ops = { 794 .read = pnv_pci_read_config, 795 .write = pnv_pci_write_config, 796 }; 797 798 struct iommu_table *pnv_pci_table_alloc(int nid) 799 { 800 struct iommu_table *tbl; 801 802 tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, nid); 803 if (!tbl) 804 return NULL; 805 806 INIT_LIST_HEAD_RCU(&tbl->it_group_list); 807 kref_init(&tbl->it_kref); 808 809 return tbl; 810 } 811 812 void pnv_pci_dma_dev_setup(struct pci_dev *pdev) 813 { 814 struct pci_controller *hose = pci_bus_to_host(pdev->bus); 815 struct pnv_phb *phb = hose->private_data; 816 #ifdef CONFIG_PCI_IOV 817 struct pnv_ioda_pe *pe; 818 struct pci_dn *pdn; 819 820 /* Fix the VF pdn PE number */ 821 if (pdev->is_virtfn) { 822 pdn = pci_get_pdn(pdev); 823 WARN_ON(pdn->pe_number != IODA_INVALID_PE); 824 list_for_each_entry(pe, &phb->ioda.pe_list, list) { 825 if (pe->rid == ((pdev->bus->number << 8) | 826 (pdev->devfn & 0xff))) { 827 pdn->pe_number = pe->pe_number; 828 pe->pdev = pdev; 829 break; 830 } 831 } 832 } 833 #endif /* CONFIG_PCI_IOV */ 834 835 if (phb && phb->dma_dev_setup) 836 phb->dma_dev_setup(phb, pdev); 837 } 838 839 void pnv_pci_dma_bus_setup(struct pci_bus *bus) 840 { 841 struct pci_controller *hose = bus->sysdata; 842 struct pnv_phb *phb = hose->private_data; 843 struct pnv_ioda_pe *pe; 844 845 list_for_each_entry(pe, &phb->ioda.pe_list, list) { 846 if (!(pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))) 847 continue; 848 849 if (!pe->pbus) 850 continue; 851 852 if (bus->number == ((pe->rid >> 8) & 0xFF)) { 853 pe->pbus = bus; 854 break; 855 } 856 } 857 } 858 859 struct device_node *pnv_pci_get_phb_node(struct pci_dev *dev) 860 { 861 struct pci_controller *hose = pci_bus_to_host(dev->bus); 862 863 return of_node_get(hose->dn); 864 } 865 EXPORT_SYMBOL(pnv_pci_get_phb_node); 866 867 int pnv_pci_set_tunnel_bar(struct pci_dev *dev, u64 addr, int enable) 868 { 869 __be64 val; 870 struct pci_controller *hose; 871 struct pnv_phb *phb; 872 u64 tunnel_bar; 873 int rc; 874 875 if (!opal_check_token(OPAL_PCI_GET_PBCQ_TUNNEL_BAR)) 876 return -ENXIO; 877 if (!opal_check_token(OPAL_PCI_SET_PBCQ_TUNNEL_BAR)) 878 return -ENXIO; 879 880 hose = pci_bus_to_host(dev->bus); 881 phb = hose->private_data; 882 883 mutex_lock(&tunnel_mutex); 884 rc = opal_pci_get_pbcq_tunnel_bar(phb->opal_id, &val); 885 if (rc != OPAL_SUCCESS) { 886 rc = -EIO; 887 goto out; 888 } 889 tunnel_bar = be64_to_cpu(val); 890 if (enable) { 891 /* 892 * Only one device per PHB can use atomics. 893 * Our policy is first-come, first-served. 894 */ 895 if (tunnel_bar) { 896 if (tunnel_bar != addr) 897 rc = -EBUSY; 898 else 899 rc = 0; /* Setting same address twice is ok */ 900 goto out; 901 } 902 } else { 903 /* 904 * The device that owns atomics and wants to release 905 * them must pass the same address with enable == 0. 906 */ 907 if (tunnel_bar != addr) { 908 rc = -EPERM; 909 goto out; 910 } 911 addr = 0x0ULL; 912 } 913 rc = opal_pci_set_pbcq_tunnel_bar(phb->opal_id, addr); 914 rc = opal_error_code(rc); 915 out: 916 mutex_unlock(&tunnel_mutex); 917 return rc; 918 } 919 EXPORT_SYMBOL_GPL(pnv_pci_set_tunnel_bar); 920 921 void pnv_pci_shutdown(void) 922 { 923 struct pci_controller *hose; 924 925 list_for_each_entry(hose, &hose_list, list_node) 926 if (hose->controller_ops.shutdown) 927 hose->controller_ops.shutdown(hose); 928 } 929 930 /* Fixup wrong class code in p7ioc and p8 root complex */ 931 static void pnv_p7ioc_rc_quirk(struct pci_dev *dev) 932 { 933 dev->class = PCI_CLASS_BRIDGE_PCI << 8; 934 } 935 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_IBM, 0x3b9, pnv_p7ioc_rc_quirk); 936 937 void __init pnv_pci_init(void) 938 { 939 struct device_node *np; 940 941 pci_add_flags(PCI_CAN_SKIP_ISA_ALIGN); 942 943 /* If we don't have OPAL, eg. in sim, just skip PCI probe */ 944 if (!firmware_has_feature(FW_FEATURE_OPAL)) 945 return; 946 947 /* Look for IODA IO-Hubs. */ 948 for_each_compatible_node(np, NULL, "ibm,ioda-hub") { 949 pnv_pci_init_ioda_hub(np); 950 } 951 952 /* Look for ioda2 built-in PHB3's */ 953 for_each_compatible_node(np, NULL, "ibm,ioda2-phb") 954 pnv_pci_init_ioda2_phb(np); 955 956 /* Look for ioda3 built-in PHB4's, we treat them as IODA2 */ 957 for_each_compatible_node(np, NULL, "ibm,ioda3-phb") 958 pnv_pci_init_ioda2_phb(np); 959 960 /* Look for NPU PHBs */ 961 for_each_compatible_node(np, NULL, "ibm,ioda2-npu-phb") 962 pnv_pci_init_npu_phb(np); 963 964 /* 965 * Look for NPU2 PHBs which we treat mostly as NPU PHBs with 966 * the exception of TCE kill which requires an OPAL call. 967 */ 968 for_each_compatible_node(np, NULL, "ibm,ioda2-npu2-phb") 969 pnv_pci_init_npu_phb(np); 970 971 /* Look for NPU2 OpenCAPI PHBs */ 972 for_each_compatible_node(np, NULL, "ibm,ioda2-npu2-opencapi-phb") 973 pnv_pci_init_npu2_opencapi_phb(np); 974 975 /* Configure IOMMU DMA hooks */ 976 set_pci_dma_ops(&dma_iommu_ops); 977 } 978 979 static int pnv_tce_iommu_bus_notifier(struct notifier_block *nb, 980 unsigned long action, void *data) 981 { 982 struct device *dev = data; 983 struct pci_dev *pdev; 984 struct pci_dn *pdn; 985 struct pnv_ioda_pe *pe; 986 struct pci_controller *hose; 987 struct pnv_phb *phb; 988 989 switch (action) { 990 case BUS_NOTIFY_ADD_DEVICE: 991 pdev = to_pci_dev(dev); 992 pdn = pci_get_pdn(pdev); 993 hose = pci_bus_to_host(pdev->bus); 994 phb = hose->private_data; 995 996 WARN_ON_ONCE(!phb); 997 if (!pdn || pdn->pe_number == IODA_INVALID_PE || !phb) 998 return 0; 999 1000 pe = &phb->ioda.pe_array[pdn->pe_number]; 1001 if (!pe->table_group.group) 1002 return 0; 1003 iommu_add_device(&pe->table_group, dev); 1004 return 0; 1005 case BUS_NOTIFY_DEL_DEVICE: 1006 iommu_del_device(dev); 1007 return 0; 1008 default: 1009 return 0; 1010 } 1011 } 1012 1013 static struct notifier_block pnv_tce_iommu_bus_nb = { 1014 .notifier_call = pnv_tce_iommu_bus_notifier, 1015 }; 1016 1017 static int __init pnv_tce_iommu_bus_notifier_init(void) 1018 { 1019 bus_register_notifier(&pci_bus_type, &pnv_tce_iommu_bus_nb); 1020 return 0; 1021 } 1022 machine_subsys_initcall_sync(powernv, pnv_tce_iommu_bus_notifier_init); 1023