1 /* 2 * Support PCI/PCIe on PowerNV platforms 3 * 4 * Copyright 2011 Benjamin Herrenschmidt, IBM Corp. 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 */ 11 12 #include <linux/kernel.h> 13 #include <linux/pci.h> 14 #include <linux/delay.h> 15 #include <linux/string.h> 16 #include <linux/init.h> 17 #include <linux/irq.h> 18 #include <linux/io.h> 19 #include <linux/msi.h> 20 #include <linux/iommu.h> 21 22 #include <asm/sections.h> 23 #include <asm/io.h> 24 #include <asm/prom.h> 25 #include <asm/pci-bridge.h> 26 #include <asm/machdep.h> 27 #include <asm/msi_bitmap.h> 28 #include <asm/ppc-pci.h> 29 #include <asm/pnv-pci.h> 30 #include <asm/opal.h> 31 #include <asm/iommu.h> 32 #include <asm/tce.h> 33 #include <asm/firmware.h> 34 #include <asm/eeh_event.h> 35 #include <asm/eeh.h> 36 37 #include "powernv.h" 38 #include "pci.h" 39 40 static DEFINE_MUTEX(p2p_mutex); 41 42 int pnv_pci_get_slot_id(struct device_node *np, uint64_t *id) 43 { 44 struct device_node *parent = np; 45 u32 bdfn; 46 u64 phbid; 47 int ret; 48 49 ret = of_property_read_u32(np, "reg", &bdfn); 50 if (ret) 51 return -ENXIO; 52 53 bdfn = ((bdfn & 0x00ffff00) >> 8); 54 while ((parent = of_get_parent(parent))) { 55 if (!PCI_DN(parent)) { 56 of_node_put(parent); 57 break; 58 } 59 60 if (!of_device_is_compatible(parent, "ibm,ioda2-phb")) { 61 of_node_put(parent); 62 continue; 63 } 64 65 ret = of_property_read_u64(parent, "ibm,opal-phbid", &phbid); 66 if (ret) { 67 of_node_put(parent); 68 return -ENXIO; 69 } 70 71 *id = PCI_SLOT_ID(phbid, bdfn); 72 return 0; 73 } 74 75 return -ENODEV; 76 } 77 EXPORT_SYMBOL_GPL(pnv_pci_get_slot_id); 78 79 int pnv_pci_get_device_tree(uint32_t phandle, void *buf, uint64_t len) 80 { 81 int64_t rc; 82 83 if (!opal_check_token(OPAL_GET_DEVICE_TREE)) 84 return -ENXIO; 85 86 rc = opal_get_device_tree(phandle, (uint64_t)buf, len); 87 if (rc < OPAL_SUCCESS) 88 return -EIO; 89 90 return rc; 91 } 92 EXPORT_SYMBOL_GPL(pnv_pci_get_device_tree); 93 94 int pnv_pci_get_presence_state(uint64_t id, uint8_t *state) 95 { 96 int64_t rc; 97 98 if (!opal_check_token(OPAL_PCI_GET_PRESENCE_STATE)) 99 return -ENXIO; 100 101 rc = opal_pci_get_presence_state(id, (uint64_t)state); 102 if (rc != OPAL_SUCCESS) 103 return -EIO; 104 105 return 0; 106 } 107 EXPORT_SYMBOL_GPL(pnv_pci_get_presence_state); 108 109 int pnv_pci_get_power_state(uint64_t id, uint8_t *state) 110 { 111 int64_t rc; 112 113 if (!opal_check_token(OPAL_PCI_GET_POWER_STATE)) 114 return -ENXIO; 115 116 rc = opal_pci_get_power_state(id, (uint64_t)state); 117 if (rc != OPAL_SUCCESS) 118 return -EIO; 119 120 return 0; 121 } 122 EXPORT_SYMBOL_GPL(pnv_pci_get_power_state); 123 124 int pnv_pci_set_power_state(uint64_t id, uint8_t state, struct opal_msg *msg) 125 { 126 struct opal_msg m; 127 int token, ret; 128 int64_t rc; 129 130 if (!opal_check_token(OPAL_PCI_SET_POWER_STATE)) 131 return -ENXIO; 132 133 token = opal_async_get_token_interruptible(); 134 if (unlikely(token < 0)) 135 return token; 136 137 rc = opal_pci_set_power_state(token, id, (uint64_t)&state); 138 if (rc == OPAL_SUCCESS) { 139 ret = 0; 140 goto exit; 141 } else if (rc != OPAL_ASYNC_COMPLETION) { 142 ret = -EIO; 143 goto exit; 144 } 145 146 ret = opal_async_wait_response(token, &m); 147 if (ret < 0) 148 goto exit; 149 150 if (msg) { 151 ret = 1; 152 memcpy(msg, &m, sizeof(m)); 153 } 154 155 exit: 156 opal_async_release_token(token); 157 return ret; 158 } 159 EXPORT_SYMBOL_GPL(pnv_pci_set_power_state); 160 161 #ifdef CONFIG_PCI_MSI 162 int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) 163 { 164 struct pci_controller *hose = pci_bus_to_host(pdev->bus); 165 struct pnv_phb *phb = hose->private_data; 166 struct msi_desc *entry; 167 struct msi_msg msg; 168 int hwirq; 169 unsigned int virq; 170 int rc; 171 172 if (WARN_ON(!phb) || !phb->msi_bmp.bitmap) 173 return -ENODEV; 174 175 if (pdev->no_64bit_msi && !phb->msi32_support) 176 return -ENODEV; 177 178 for_each_pci_msi_entry(entry, pdev) { 179 if (!entry->msi_attrib.is_64 && !phb->msi32_support) { 180 pr_warn("%s: Supports only 64-bit MSIs\n", 181 pci_name(pdev)); 182 return -ENXIO; 183 } 184 hwirq = msi_bitmap_alloc_hwirqs(&phb->msi_bmp, 1); 185 if (hwirq < 0) { 186 pr_warn("%s: Failed to find a free MSI\n", 187 pci_name(pdev)); 188 return -ENOSPC; 189 } 190 virq = irq_create_mapping(NULL, phb->msi_base + hwirq); 191 if (!virq) { 192 pr_warn("%s: Failed to map MSI to linux irq\n", 193 pci_name(pdev)); 194 msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq, 1); 195 return -ENOMEM; 196 } 197 rc = phb->msi_setup(phb, pdev, phb->msi_base + hwirq, 198 virq, entry->msi_attrib.is_64, &msg); 199 if (rc) { 200 pr_warn("%s: Failed to setup MSI\n", pci_name(pdev)); 201 irq_dispose_mapping(virq); 202 msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq, 1); 203 return rc; 204 } 205 irq_set_msi_desc(virq, entry); 206 pci_write_msi_msg(virq, &msg); 207 } 208 return 0; 209 } 210 211 void pnv_teardown_msi_irqs(struct pci_dev *pdev) 212 { 213 struct pci_controller *hose = pci_bus_to_host(pdev->bus); 214 struct pnv_phb *phb = hose->private_data; 215 struct msi_desc *entry; 216 irq_hw_number_t hwirq; 217 218 if (WARN_ON(!phb)) 219 return; 220 221 for_each_pci_msi_entry(entry, pdev) { 222 if (!entry->irq) 223 continue; 224 hwirq = virq_to_hw(entry->irq); 225 irq_set_msi_desc(entry->irq, NULL); 226 irq_dispose_mapping(entry->irq); 227 msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq - phb->msi_base, 1); 228 } 229 } 230 #endif /* CONFIG_PCI_MSI */ 231 232 /* Nicely print the contents of the PE State Tables (PEST). */ 233 static void pnv_pci_dump_pest(__be64 pestA[], __be64 pestB[], int pest_size) 234 { 235 __be64 prevA = ULONG_MAX, prevB = ULONG_MAX; 236 bool dup = false; 237 int i; 238 239 for (i = 0; i < pest_size; i++) { 240 __be64 peA = be64_to_cpu(pestA[i]); 241 __be64 peB = be64_to_cpu(pestB[i]); 242 243 if (peA != prevA || peB != prevB) { 244 if (dup) { 245 pr_info("PE[..%03x] A/B: as above\n", i-1); 246 dup = false; 247 } 248 prevA = peA; 249 prevB = peB; 250 if (peA & PNV_IODA_STOPPED_STATE || 251 peB & PNV_IODA_STOPPED_STATE) 252 pr_info("PE[%03x] A/B: %016llx %016llx\n", 253 i, peA, peB); 254 } else if (!dup && (peA & PNV_IODA_STOPPED_STATE || 255 peB & PNV_IODA_STOPPED_STATE)) { 256 dup = true; 257 } 258 } 259 } 260 261 static void pnv_pci_dump_p7ioc_diag_data(struct pci_controller *hose, 262 struct OpalIoPhbErrorCommon *common) 263 { 264 struct OpalIoP7IOCPhbErrorData *data; 265 266 data = (struct OpalIoP7IOCPhbErrorData *)common; 267 pr_info("P7IOC PHB#%x Diag-data (Version: %d)\n", 268 hose->global_number, be32_to_cpu(common->version)); 269 270 if (data->brdgCtl) 271 pr_info("brdgCtl: %08x\n", 272 be32_to_cpu(data->brdgCtl)); 273 if (data->portStatusReg || data->rootCmplxStatus || 274 data->busAgentStatus) 275 pr_info("UtlSts: %08x %08x %08x\n", 276 be32_to_cpu(data->portStatusReg), 277 be32_to_cpu(data->rootCmplxStatus), 278 be32_to_cpu(data->busAgentStatus)); 279 if (data->deviceStatus || data->slotStatus || 280 data->linkStatus || data->devCmdStatus || 281 data->devSecStatus) 282 pr_info("RootSts: %08x %08x %08x %08x %08x\n", 283 be32_to_cpu(data->deviceStatus), 284 be32_to_cpu(data->slotStatus), 285 be32_to_cpu(data->linkStatus), 286 be32_to_cpu(data->devCmdStatus), 287 be32_to_cpu(data->devSecStatus)); 288 if (data->rootErrorStatus || data->uncorrErrorStatus || 289 data->corrErrorStatus) 290 pr_info("RootErrSts: %08x %08x %08x\n", 291 be32_to_cpu(data->rootErrorStatus), 292 be32_to_cpu(data->uncorrErrorStatus), 293 be32_to_cpu(data->corrErrorStatus)); 294 if (data->tlpHdr1 || data->tlpHdr2 || 295 data->tlpHdr3 || data->tlpHdr4) 296 pr_info("RootErrLog: %08x %08x %08x %08x\n", 297 be32_to_cpu(data->tlpHdr1), 298 be32_to_cpu(data->tlpHdr2), 299 be32_to_cpu(data->tlpHdr3), 300 be32_to_cpu(data->tlpHdr4)); 301 if (data->sourceId || data->errorClass || 302 data->correlator) 303 pr_info("RootErrLog1: %08x %016llx %016llx\n", 304 be32_to_cpu(data->sourceId), 305 be64_to_cpu(data->errorClass), 306 be64_to_cpu(data->correlator)); 307 if (data->p7iocPlssr || data->p7iocCsr) 308 pr_info("PhbSts: %016llx %016llx\n", 309 be64_to_cpu(data->p7iocPlssr), 310 be64_to_cpu(data->p7iocCsr)); 311 if (data->lemFir) 312 pr_info("Lem: %016llx %016llx %016llx\n", 313 be64_to_cpu(data->lemFir), 314 be64_to_cpu(data->lemErrorMask), 315 be64_to_cpu(data->lemWOF)); 316 if (data->phbErrorStatus) 317 pr_info("PhbErr: %016llx %016llx %016llx %016llx\n", 318 be64_to_cpu(data->phbErrorStatus), 319 be64_to_cpu(data->phbFirstErrorStatus), 320 be64_to_cpu(data->phbErrorLog0), 321 be64_to_cpu(data->phbErrorLog1)); 322 if (data->mmioErrorStatus) 323 pr_info("OutErr: %016llx %016llx %016llx %016llx\n", 324 be64_to_cpu(data->mmioErrorStatus), 325 be64_to_cpu(data->mmioFirstErrorStatus), 326 be64_to_cpu(data->mmioErrorLog0), 327 be64_to_cpu(data->mmioErrorLog1)); 328 if (data->dma0ErrorStatus) 329 pr_info("InAErr: %016llx %016llx %016llx %016llx\n", 330 be64_to_cpu(data->dma0ErrorStatus), 331 be64_to_cpu(data->dma0FirstErrorStatus), 332 be64_to_cpu(data->dma0ErrorLog0), 333 be64_to_cpu(data->dma0ErrorLog1)); 334 if (data->dma1ErrorStatus) 335 pr_info("InBErr: %016llx %016llx %016llx %016llx\n", 336 be64_to_cpu(data->dma1ErrorStatus), 337 be64_to_cpu(data->dma1FirstErrorStatus), 338 be64_to_cpu(data->dma1ErrorLog0), 339 be64_to_cpu(data->dma1ErrorLog1)); 340 341 pnv_pci_dump_pest(data->pestA, data->pestB, OPAL_P7IOC_NUM_PEST_REGS); 342 } 343 344 static void pnv_pci_dump_phb3_diag_data(struct pci_controller *hose, 345 struct OpalIoPhbErrorCommon *common) 346 { 347 struct OpalIoPhb3ErrorData *data; 348 349 data = (struct OpalIoPhb3ErrorData*)common; 350 pr_info("PHB3 PHB#%x Diag-data (Version: %d)\n", 351 hose->global_number, be32_to_cpu(common->version)); 352 if (data->brdgCtl) 353 pr_info("brdgCtl: %08x\n", 354 be32_to_cpu(data->brdgCtl)); 355 if (data->portStatusReg || data->rootCmplxStatus || 356 data->busAgentStatus) 357 pr_info("UtlSts: %08x %08x %08x\n", 358 be32_to_cpu(data->portStatusReg), 359 be32_to_cpu(data->rootCmplxStatus), 360 be32_to_cpu(data->busAgentStatus)); 361 if (data->deviceStatus || data->slotStatus || 362 data->linkStatus || data->devCmdStatus || 363 data->devSecStatus) 364 pr_info("RootSts: %08x %08x %08x %08x %08x\n", 365 be32_to_cpu(data->deviceStatus), 366 be32_to_cpu(data->slotStatus), 367 be32_to_cpu(data->linkStatus), 368 be32_to_cpu(data->devCmdStatus), 369 be32_to_cpu(data->devSecStatus)); 370 if (data->rootErrorStatus || data->uncorrErrorStatus || 371 data->corrErrorStatus) 372 pr_info("RootErrSts: %08x %08x %08x\n", 373 be32_to_cpu(data->rootErrorStatus), 374 be32_to_cpu(data->uncorrErrorStatus), 375 be32_to_cpu(data->corrErrorStatus)); 376 if (data->tlpHdr1 || data->tlpHdr2 || 377 data->tlpHdr3 || data->tlpHdr4) 378 pr_info("RootErrLog: %08x %08x %08x %08x\n", 379 be32_to_cpu(data->tlpHdr1), 380 be32_to_cpu(data->tlpHdr2), 381 be32_to_cpu(data->tlpHdr3), 382 be32_to_cpu(data->tlpHdr4)); 383 if (data->sourceId || data->errorClass || 384 data->correlator) 385 pr_info("RootErrLog1: %08x %016llx %016llx\n", 386 be32_to_cpu(data->sourceId), 387 be64_to_cpu(data->errorClass), 388 be64_to_cpu(data->correlator)); 389 if (data->nFir) 390 pr_info("nFir: %016llx %016llx %016llx\n", 391 be64_to_cpu(data->nFir), 392 be64_to_cpu(data->nFirMask), 393 be64_to_cpu(data->nFirWOF)); 394 if (data->phbPlssr || data->phbCsr) 395 pr_info("PhbSts: %016llx %016llx\n", 396 be64_to_cpu(data->phbPlssr), 397 be64_to_cpu(data->phbCsr)); 398 if (data->lemFir) 399 pr_info("Lem: %016llx %016llx %016llx\n", 400 be64_to_cpu(data->lemFir), 401 be64_to_cpu(data->lemErrorMask), 402 be64_to_cpu(data->lemWOF)); 403 if (data->phbErrorStatus) 404 pr_info("PhbErr: %016llx %016llx %016llx %016llx\n", 405 be64_to_cpu(data->phbErrorStatus), 406 be64_to_cpu(data->phbFirstErrorStatus), 407 be64_to_cpu(data->phbErrorLog0), 408 be64_to_cpu(data->phbErrorLog1)); 409 if (data->mmioErrorStatus) 410 pr_info("OutErr: %016llx %016llx %016llx %016llx\n", 411 be64_to_cpu(data->mmioErrorStatus), 412 be64_to_cpu(data->mmioFirstErrorStatus), 413 be64_to_cpu(data->mmioErrorLog0), 414 be64_to_cpu(data->mmioErrorLog1)); 415 if (data->dma0ErrorStatus) 416 pr_info("InAErr: %016llx %016llx %016llx %016llx\n", 417 be64_to_cpu(data->dma0ErrorStatus), 418 be64_to_cpu(data->dma0FirstErrorStatus), 419 be64_to_cpu(data->dma0ErrorLog0), 420 be64_to_cpu(data->dma0ErrorLog1)); 421 if (data->dma1ErrorStatus) 422 pr_info("InBErr: %016llx %016llx %016llx %016llx\n", 423 be64_to_cpu(data->dma1ErrorStatus), 424 be64_to_cpu(data->dma1FirstErrorStatus), 425 be64_to_cpu(data->dma1ErrorLog0), 426 be64_to_cpu(data->dma1ErrorLog1)); 427 428 pnv_pci_dump_pest(data->pestA, data->pestB, OPAL_PHB3_NUM_PEST_REGS); 429 } 430 431 static void pnv_pci_dump_phb4_diag_data(struct pci_controller *hose, 432 struct OpalIoPhbErrorCommon *common) 433 { 434 struct OpalIoPhb4ErrorData *data; 435 436 data = (struct OpalIoPhb4ErrorData*)common; 437 pr_info("PHB4 PHB#%d Diag-data (Version: %d)\n", 438 hose->global_number, be32_to_cpu(common->version)); 439 if (data->brdgCtl) 440 pr_info("brdgCtl: %08x\n", 441 be32_to_cpu(data->brdgCtl)); 442 if (data->deviceStatus || data->slotStatus || 443 data->linkStatus || data->devCmdStatus || 444 data->devSecStatus) 445 pr_info("RootSts: %08x %08x %08x %08x %08x\n", 446 be32_to_cpu(data->deviceStatus), 447 be32_to_cpu(data->slotStatus), 448 be32_to_cpu(data->linkStatus), 449 be32_to_cpu(data->devCmdStatus), 450 be32_to_cpu(data->devSecStatus)); 451 if (data->rootErrorStatus || data->uncorrErrorStatus || 452 data->corrErrorStatus) 453 pr_info("RootErrSts: %08x %08x %08x\n", 454 be32_to_cpu(data->rootErrorStatus), 455 be32_to_cpu(data->uncorrErrorStatus), 456 be32_to_cpu(data->corrErrorStatus)); 457 if (data->tlpHdr1 || data->tlpHdr2 || 458 data->tlpHdr3 || data->tlpHdr4) 459 pr_info("RootErrLog: %08x %08x %08x %08x\n", 460 be32_to_cpu(data->tlpHdr1), 461 be32_to_cpu(data->tlpHdr2), 462 be32_to_cpu(data->tlpHdr3), 463 be32_to_cpu(data->tlpHdr4)); 464 if (data->sourceId) 465 pr_info("sourceId: %08x\n", be32_to_cpu(data->sourceId)); 466 if (data->nFir) 467 pr_info("nFir: %016llx %016llx %016llx\n", 468 be64_to_cpu(data->nFir), 469 be64_to_cpu(data->nFirMask), 470 be64_to_cpu(data->nFirWOF)); 471 if (data->phbPlssr || data->phbCsr) 472 pr_info("PhbSts: %016llx %016llx\n", 473 be64_to_cpu(data->phbPlssr), 474 be64_to_cpu(data->phbCsr)); 475 if (data->lemFir) 476 pr_info("Lem: %016llx %016llx %016llx\n", 477 be64_to_cpu(data->lemFir), 478 be64_to_cpu(data->lemErrorMask), 479 be64_to_cpu(data->lemWOF)); 480 if (data->phbErrorStatus) 481 pr_info("PhbErr: %016llx %016llx %016llx %016llx\n", 482 be64_to_cpu(data->phbErrorStatus), 483 be64_to_cpu(data->phbFirstErrorStatus), 484 be64_to_cpu(data->phbErrorLog0), 485 be64_to_cpu(data->phbErrorLog1)); 486 if (data->phbTxeErrorStatus) 487 pr_info("PhbTxeErr: %016llx %016llx %016llx %016llx\n", 488 be64_to_cpu(data->phbTxeErrorStatus), 489 be64_to_cpu(data->phbTxeFirstErrorStatus), 490 be64_to_cpu(data->phbTxeErrorLog0), 491 be64_to_cpu(data->phbTxeErrorLog1)); 492 if (data->phbRxeArbErrorStatus) 493 pr_info("RxeArbErr: %016llx %016llx %016llx %016llx\n", 494 be64_to_cpu(data->phbRxeArbErrorStatus), 495 be64_to_cpu(data->phbRxeArbFirstErrorStatus), 496 be64_to_cpu(data->phbRxeArbErrorLog0), 497 be64_to_cpu(data->phbRxeArbErrorLog1)); 498 if (data->phbRxeMrgErrorStatus) 499 pr_info("RxeMrgErr: %016llx %016llx %016llx %016llx\n", 500 be64_to_cpu(data->phbRxeMrgErrorStatus), 501 be64_to_cpu(data->phbRxeMrgFirstErrorStatus), 502 be64_to_cpu(data->phbRxeMrgErrorLog0), 503 be64_to_cpu(data->phbRxeMrgErrorLog1)); 504 if (data->phbRxeTceErrorStatus) 505 pr_info("RxeTceErr: %016llx %016llx %016llx %016llx\n", 506 be64_to_cpu(data->phbRxeTceErrorStatus), 507 be64_to_cpu(data->phbRxeTceFirstErrorStatus), 508 be64_to_cpu(data->phbRxeTceErrorLog0), 509 be64_to_cpu(data->phbRxeTceErrorLog1)); 510 511 if (data->phbPblErrorStatus) 512 pr_info("PblErr: %016llx %016llx %016llx %016llx\n", 513 be64_to_cpu(data->phbPblErrorStatus), 514 be64_to_cpu(data->phbPblFirstErrorStatus), 515 be64_to_cpu(data->phbPblErrorLog0), 516 be64_to_cpu(data->phbPblErrorLog1)); 517 if (data->phbPcieDlpErrorStatus) 518 pr_info("PcieDlp: %016llx %016llx %016llx\n", 519 be64_to_cpu(data->phbPcieDlpErrorLog1), 520 be64_to_cpu(data->phbPcieDlpErrorLog2), 521 be64_to_cpu(data->phbPcieDlpErrorStatus)); 522 if (data->phbRegbErrorStatus) 523 pr_info("RegbErr: %016llx %016llx %016llx %016llx\n", 524 be64_to_cpu(data->phbRegbErrorStatus), 525 be64_to_cpu(data->phbRegbFirstErrorStatus), 526 be64_to_cpu(data->phbRegbErrorLog0), 527 be64_to_cpu(data->phbRegbErrorLog1)); 528 529 530 pnv_pci_dump_pest(data->pestA, data->pestB, OPAL_PHB4_NUM_PEST_REGS); 531 } 532 533 void pnv_pci_dump_phb_diag_data(struct pci_controller *hose, 534 unsigned char *log_buff) 535 { 536 struct OpalIoPhbErrorCommon *common; 537 538 if (!hose || !log_buff) 539 return; 540 541 common = (struct OpalIoPhbErrorCommon *)log_buff; 542 switch (be32_to_cpu(common->ioType)) { 543 case OPAL_PHB_ERROR_DATA_TYPE_P7IOC: 544 pnv_pci_dump_p7ioc_diag_data(hose, common); 545 break; 546 case OPAL_PHB_ERROR_DATA_TYPE_PHB3: 547 pnv_pci_dump_phb3_diag_data(hose, common); 548 break; 549 case OPAL_PHB_ERROR_DATA_TYPE_PHB4: 550 pnv_pci_dump_phb4_diag_data(hose, common); 551 break; 552 default: 553 pr_warn("%s: Unrecognized ioType %d\n", 554 __func__, be32_to_cpu(common->ioType)); 555 } 556 } 557 558 static void pnv_pci_handle_eeh_config(struct pnv_phb *phb, u32 pe_no) 559 { 560 unsigned long flags, rc; 561 int has_diag, ret = 0; 562 563 spin_lock_irqsave(&phb->lock, flags); 564 565 /* Fetch PHB diag-data */ 566 rc = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag_data, 567 phb->diag_data_size); 568 has_diag = (rc == OPAL_SUCCESS); 569 570 /* If PHB supports compound PE, to handle it */ 571 if (phb->unfreeze_pe) { 572 ret = phb->unfreeze_pe(phb, 573 pe_no, 574 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL); 575 } else { 576 rc = opal_pci_eeh_freeze_clear(phb->opal_id, 577 pe_no, 578 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL); 579 if (rc) { 580 pr_warn("%s: Failure %ld clearing frozen " 581 "PHB#%x-PE#%x\n", 582 __func__, rc, phb->hose->global_number, 583 pe_no); 584 ret = -EIO; 585 } 586 } 587 588 /* 589 * For now, let's only display the diag buffer when we fail to clear 590 * the EEH status. We'll do more sensible things later when we have 591 * proper EEH support. We need to make sure we don't pollute ourselves 592 * with the normal errors generated when probing empty slots 593 */ 594 if (has_diag && ret) 595 pnv_pci_dump_phb_diag_data(phb->hose, phb->diag_data); 596 597 spin_unlock_irqrestore(&phb->lock, flags); 598 } 599 600 static void pnv_pci_config_check_eeh(struct pci_dn *pdn) 601 { 602 struct pnv_phb *phb = pdn->phb->private_data; 603 u8 fstate; 604 __be16 pcierr; 605 unsigned int pe_no; 606 s64 rc; 607 608 /* 609 * Get the PE#. During the PCI probe stage, we might not 610 * setup that yet. So all ER errors should be mapped to 611 * reserved PE. 612 */ 613 pe_no = pdn->pe_number; 614 if (pe_no == IODA_INVALID_PE) { 615 pe_no = phb->ioda.reserved_pe_idx; 616 } 617 618 /* 619 * Fetch frozen state. If the PHB support compound PE, 620 * we need handle that case. 621 */ 622 if (phb->get_pe_state) { 623 fstate = phb->get_pe_state(phb, pe_no); 624 } else { 625 rc = opal_pci_eeh_freeze_status(phb->opal_id, 626 pe_no, 627 &fstate, 628 &pcierr, 629 NULL); 630 if (rc) { 631 pr_warn("%s: Failure %lld getting PHB#%x-PE#%x state\n", 632 __func__, rc, phb->hose->global_number, pe_no); 633 return; 634 } 635 } 636 637 pr_devel(" -> EEH check, bdfn=%04x PE#%x fstate=%x\n", 638 (pdn->busno << 8) | (pdn->devfn), pe_no, fstate); 639 640 /* Clear the frozen state if applicable */ 641 if (fstate == OPAL_EEH_STOPPED_MMIO_FREEZE || 642 fstate == OPAL_EEH_STOPPED_DMA_FREEZE || 643 fstate == OPAL_EEH_STOPPED_MMIO_DMA_FREEZE) { 644 /* 645 * If PHB supports compound PE, freeze it for 646 * consistency. 647 */ 648 if (phb->freeze_pe) 649 phb->freeze_pe(phb, pe_no); 650 651 pnv_pci_handle_eeh_config(phb, pe_no); 652 } 653 } 654 655 int pnv_pci_cfg_read(struct pci_dn *pdn, 656 int where, int size, u32 *val) 657 { 658 struct pnv_phb *phb = pdn->phb->private_data; 659 u32 bdfn = (pdn->busno << 8) | pdn->devfn; 660 s64 rc; 661 662 switch (size) { 663 case 1: { 664 u8 v8; 665 rc = opal_pci_config_read_byte(phb->opal_id, bdfn, where, &v8); 666 *val = (rc == OPAL_SUCCESS) ? v8 : 0xff; 667 break; 668 } 669 case 2: { 670 __be16 v16; 671 rc = opal_pci_config_read_half_word(phb->opal_id, bdfn, where, 672 &v16); 673 *val = (rc == OPAL_SUCCESS) ? be16_to_cpu(v16) : 0xffff; 674 break; 675 } 676 case 4: { 677 __be32 v32; 678 rc = opal_pci_config_read_word(phb->opal_id, bdfn, where, &v32); 679 *val = (rc == OPAL_SUCCESS) ? be32_to_cpu(v32) : 0xffffffff; 680 break; 681 } 682 default: 683 return PCIBIOS_FUNC_NOT_SUPPORTED; 684 } 685 686 pr_devel("%s: bus: %x devfn: %x +%x/%x -> %08x\n", 687 __func__, pdn->busno, pdn->devfn, where, size, *val); 688 return PCIBIOS_SUCCESSFUL; 689 } 690 691 int pnv_pci_cfg_write(struct pci_dn *pdn, 692 int where, int size, u32 val) 693 { 694 struct pnv_phb *phb = pdn->phb->private_data; 695 u32 bdfn = (pdn->busno << 8) | pdn->devfn; 696 697 pr_devel("%s: bus: %x devfn: %x +%x/%x -> %08x\n", 698 __func__, pdn->busno, pdn->devfn, where, size, val); 699 switch (size) { 700 case 1: 701 opal_pci_config_write_byte(phb->opal_id, bdfn, where, val); 702 break; 703 case 2: 704 opal_pci_config_write_half_word(phb->opal_id, bdfn, where, val); 705 break; 706 case 4: 707 opal_pci_config_write_word(phb->opal_id, bdfn, where, val); 708 break; 709 default: 710 return PCIBIOS_FUNC_NOT_SUPPORTED; 711 } 712 713 return PCIBIOS_SUCCESSFUL; 714 } 715 716 #if CONFIG_EEH 717 static bool pnv_pci_cfg_check(struct pci_dn *pdn) 718 { 719 struct eeh_dev *edev = NULL; 720 struct pnv_phb *phb = pdn->phb->private_data; 721 722 /* EEH not enabled ? */ 723 if (!(phb->flags & PNV_PHB_FLAG_EEH)) 724 return true; 725 726 /* PE reset or device removed ? */ 727 edev = pdn->edev; 728 if (edev) { 729 if (edev->pe && 730 (edev->pe->state & EEH_PE_CFG_BLOCKED)) 731 return false; 732 733 if (edev->mode & EEH_DEV_REMOVED) 734 return false; 735 } 736 737 return true; 738 } 739 #else 740 static inline pnv_pci_cfg_check(struct pci_dn *pdn) 741 { 742 return true; 743 } 744 #endif /* CONFIG_EEH */ 745 746 static int pnv_pci_read_config(struct pci_bus *bus, 747 unsigned int devfn, 748 int where, int size, u32 *val) 749 { 750 struct pci_dn *pdn; 751 struct pnv_phb *phb; 752 int ret; 753 754 *val = 0xFFFFFFFF; 755 pdn = pci_get_pdn_by_devfn(bus, devfn); 756 if (!pdn) 757 return PCIBIOS_DEVICE_NOT_FOUND; 758 759 if (!pnv_pci_cfg_check(pdn)) 760 return PCIBIOS_DEVICE_NOT_FOUND; 761 762 ret = pnv_pci_cfg_read(pdn, where, size, val); 763 phb = pdn->phb->private_data; 764 if (phb->flags & PNV_PHB_FLAG_EEH && pdn->edev) { 765 if (*val == EEH_IO_ERROR_VALUE(size) && 766 eeh_dev_check_failure(pdn->edev)) 767 return PCIBIOS_DEVICE_NOT_FOUND; 768 } else { 769 pnv_pci_config_check_eeh(pdn); 770 } 771 772 return ret; 773 } 774 775 static int pnv_pci_write_config(struct pci_bus *bus, 776 unsigned int devfn, 777 int where, int size, u32 val) 778 { 779 struct pci_dn *pdn; 780 struct pnv_phb *phb; 781 int ret; 782 783 pdn = pci_get_pdn_by_devfn(bus, devfn); 784 if (!pdn) 785 return PCIBIOS_DEVICE_NOT_FOUND; 786 787 if (!pnv_pci_cfg_check(pdn)) 788 return PCIBIOS_DEVICE_NOT_FOUND; 789 790 ret = pnv_pci_cfg_write(pdn, where, size, val); 791 phb = pdn->phb->private_data; 792 if (!(phb->flags & PNV_PHB_FLAG_EEH)) 793 pnv_pci_config_check_eeh(pdn); 794 795 return ret; 796 } 797 798 struct pci_ops pnv_pci_ops = { 799 .read = pnv_pci_read_config, 800 .write = pnv_pci_write_config, 801 }; 802 803 static __be64 *pnv_tce(struct iommu_table *tbl, long idx) 804 { 805 __be64 *tmp = ((__be64 *)tbl->it_base); 806 int level = tbl->it_indirect_levels; 807 const long shift = ilog2(tbl->it_level_size); 808 unsigned long mask = (tbl->it_level_size - 1) << (level * shift); 809 810 while (level) { 811 int n = (idx & mask) >> (level * shift); 812 unsigned long tce = be64_to_cpu(tmp[n]); 813 814 tmp = __va(tce & ~(TCE_PCI_READ | TCE_PCI_WRITE)); 815 idx &= ~mask; 816 mask >>= shift; 817 --level; 818 } 819 820 return tmp + idx; 821 } 822 823 int pnv_tce_build(struct iommu_table *tbl, long index, long npages, 824 unsigned long uaddr, enum dma_data_direction direction, 825 unsigned long attrs) 826 { 827 u64 proto_tce = iommu_direction_to_tce_perm(direction); 828 u64 rpn = __pa(uaddr) >> tbl->it_page_shift; 829 long i; 830 831 if (proto_tce & TCE_PCI_WRITE) 832 proto_tce |= TCE_PCI_READ; 833 834 for (i = 0; i < npages; i++) { 835 unsigned long newtce = proto_tce | 836 ((rpn + i) << tbl->it_page_shift); 837 unsigned long idx = index - tbl->it_offset + i; 838 839 *(pnv_tce(tbl, idx)) = cpu_to_be64(newtce); 840 } 841 842 return 0; 843 } 844 845 #ifdef CONFIG_IOMMU_API 846 int pnv_tce_xchg(struct iommu_table *tbl, long index, 847 unsigned long *hpa, enum dma_data_direction *direction) 848 { 849 u64 proto_tce = iommu_direction_to_tce_perm(*direction); 850 unsigned long newtce = *hpa | proto_tce, oldtce; 851 unsigned long idx = index - tbl->it_offset; 852 853 BUG_ON(*hpa & ~IOMMU_PAGE_MASK(tbl)); 854 855 if (newtce & TCE_PCI_WRITE) 856 newtce |= TCE_PCI_READ; 857 858 oldtce = be64_to_cpu(xchg(pnv_tce(tbl, idx), cpu_to_be64(newtce))); 859 *hpa = oldtce & ~(TCE_PCI_READ | TCE_PCI_WRITE); 860 *direction = iommu_tce_direction(oldtce); 861 862 return 0; 863 } 864 #endif 865 866 void pnv_tce_free(struct iommu_table *tbl, long index, long npages) 867 { 868 long i; 869 870 for (i = 0; i < npages; i++) { 871 unsigned long idx = index - tbl->it_offset + i; 872 873 *(pnv_tce(tbl, idx)) = cpu_to_be64(0); 874 } 875 } 876 877 unsigned long pnv_tce_get(struct iommu_table *tbl, long index) 878 { 879 return be64_to_cpu(*(pnv_tce(tbl, index - tbl->it_offset))); 880 } 881 882 struct iommu_table *pnv_pci_table_alloc(int nid) 883 { 884 struct iommu_table *tbl; 885 886 tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, nid); 887 if (!tbl) 888 return NULL; 889 890 INIT_LIST_HEAD_RCU(&tbl->it_group_list); 891 kref_init(&tbl->it_kref); 892 893 return tbl; 894 } 895 896 long pnv_pci_link_table_and_group(int node, int num, 897 struct iommu_table *tbl, 898 struct iommu_table_group *table_group) 899 { 900 struct iommu_table_group_link *tgl = NULL; 901 902 if (WARN_ON(!tbl || !table_group)) 903 return -EINVAL; 904 905 tgl = kzalloc_node(sizeof(struct iommu_table_group_link), GFP_KERNEL, 906 node); 907 if (!tgl) 908 return -ENOMEM; 909 910 tgl->table_group = table_group; 911 list_add_rcu(&tgl->next, &tbl->it_group_list); 912 913 table_group->tables[num] = tbl; 914 915 return 0; 916 } 917 918 static void pnv_iommu_table_group_link_free(struct rcu_head *head) 919 { 920 struct iommu_table_group_link *tgl = container_of(head, 921 struct iommu_table_group_link, rcu); 922 923 kfree(tgl); 924 } 925 926 void pnv_pci_unlink_table_and_group(struct iommu_table *tbl, 927 struct iommu_table_group *table_group) 928 { 929 long i; 930 bool found; 931 struct iommu_table_group_link *tgl; 932 933 if (!tbl || !table_group) 934 return; 935 936 /* Remove link to a group from table's list of attached groups */ 937 found = false; 938 list_for_each_entry_rcu(tgl, &tbl->it_group_list, next) { 939 if (tgl->table_group == table_group) { 940 list_del_rcu(&tgl->next); 941 call_rcu(&tgl->rcu, pnv_iommu_table_group_link_free); 942 found = true; 943 break; 944 } 945 } 946 if (WARN_ON(!found)) 947 return; 948 949 /* Clean a pointer to iommu_table in iommu_table_group::tables[] */ 950 found = false; 951 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) { 952 if (table_group->tables[i] == tbl) { 953 table_group->tables[i] = NULL; 954 found = true; 955 break; 956 } 957 } 958 WARN_ON(!found); 959 } 960 961 void pnv_pci_setup_iommu_table(struct iommu_table *tbl, 962 void *tce_mem, u64 tce_size, 963 u64 dma_offset, unsigned page_shift) 964 { 965 tbl->it_blocksize = 16; 966 tbl->it_base = (unsigned long)tce_mem; 967 tbl->it_page_shift = page_shift; 968 tbl->it_offset = dma_offset >> tbl->it_page_shift; 969 tbl->it_index = 0; 970 tbl->it_size = tce_size >> 3; 971 tbl->it_busno = 0; 972 tbl->it_type = TCE_PCI; 973 } 974 975 void pnv_pci_dma_dev_setup(struct pci_dev *pdev) 976 { 977 struct pci_controller *hose = pci_bus_to_host(pdev->bus); 978 struct pnv_phb *phb = hose->private_data; 979 #ifdef CONFIG_PCI_IOV 980 struct pnv_ioda_pe *pe; 981 struct pci_dn *pdn; 982 983 /* Fix the VF pdn PE number */ 984 if (pdev->is_virtfn) { 985 pdn = pci_get_pdn(pdev); 986 WARN_ON(pdn->pe_number != IODA_INVALID_PE); 987 list_for_each_entry(pe, &phb->ioda.pe_list, list) { 988 if (pe->rid == ((pdev->bus->number << 8) | 989 (pdev->devfn & 0xff))) { 990 pdn->pe_number = pe->pe_number; 991 pe->pdev = pdev; 992 break; 993 } 994 } 995 } 996 #endif /* CONFIG_PCI_IOV */ 997 998 if (phb && phb->dma_dev_setup) 999 phb->dma_dev_setup(phb, pdev); 1000 } 1001 1002 void pnv_pci_dma_bus_setup(struct pci_bus *bus) 1003 { 1004 struct pci_controller *hose = bus->sysdata; 1005 struct pnv_phb *phb = hose->private_data; 1006 struct pnv_ioda_pe *pe; 1007 1008 list_for_each_entry(pe, &phb->ioda.pe_list, list) { 1009 if (!(pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))) 1010 continue; 1011 1012 if (!pe->pbus) 1013 continue; 1014 1015 if (bus->number == ((pe->rid >> 8) & 0xFF)) { 1016 pe->pbus = bus; 1017 break; 1018 } 1019 } 1020 } 1021 1022 int pnv_pci_set_p2p(struct pci_dev *initiator, struct pci_dev *target, u64 desc) 1023 { 1024 struct pci_controller *hose; 1025 struct pnv_phb *phb_init, *phb_target; 1026 struct pnv_ioda_pe *pe_init; 1027 int rc; 1028 1029 if (!opal_check_token(OPAL_PCI_SET_P2P)) 1030 return -ENXIO; 1031 1032 hose = pci_bus_to_host(initiator->bus); 1033 phb_init = hose->private_data; 1034 1035 hose = pci_bus_to_host(target->bus); 1036 phb_target = hose->private_data; 1037 1038 pe_init = pnv_ioda_get_pe(initiator); 1039 if (!pe_init) 1040 return -ENODEV; 1041 1042 /* 1043 * Configuring the initiator's PHB requires to adjust its 1044 * TVE#1 setting. Since the same device can be an initiator 1045 * several times for different target devices, we need to keep 1046 * a reference count to know when we can restore the default 1047 * bypass setting on its TVE#1 when disabling. Opal is not 1048 * tracking PE states, so we add a reference count on the PE 1049 * in linux. 1050 * 1051 * For the target, the configuration is per PHB, so we keep a 1052 * target reference count on the PHB. 1053 */ 1054 mutex_lock(&p2p_mutex); 1055 1056 if (desc & OPAL_PCI_P2P_ENABLE) { 1057 /* always go to opal to validate the configuration */ 1058 rc = opal_pci_set_p2p(phb_init->opal_id, phb_target->opal_id, 1059 desc, pe_init->pe_number); 1060 1061 if (rc != OPAL_SUCCESS) { 1062 rc = -EIO; 1063 goto out; 1064 } 1065 1066 pe_init->p2p_initiator_count++; 1067 phb_target->p2p_target_count++; 1068 } else { 1069 if (!pe_init->p2p_initiator_count || 1070 !phb_target->p2p_target_count) { 1071 rc = -EINVAL; 1072 goto out; 1073 } 1074 1075 if (--pe_init->p2p_initiator_count == 0) 1076 pnv_pci_ioda2_set_bypass(pe_init, true); 1077 1078 if (--phb_target->p2p_target_count == 0) { 1079 rc = opal_pci_set_p2p(phb_init->opal_id, 1080 phb_target->opal_id, desc, 1081 pe_init->pe_number); 1082 if (rc != OPAL_SUCCESS) { 1083 rc = -EIO; 1084 goto out; 1085 } 1086 } 1087 } 1088 rc = 0; 1089 out: 1090 mutex_unlock(&p2p_mutex); 1091 return rc; 1092 } 1093 EXPORT_SYMBOL_GPL(pnv_pci_set_p2p); 1094 1095 void pnv_pci_shutdown(void) 1096 { 1097 struct pci_controller *hose; 1098 1099 list_for_each_entry(hose, &hose_list, list_node) 1100 if (hose->controller_ops.shutdown) 1101 hose->controller_ops.shutdown(hose); 1102 } 1103 1104 /* Fixup wrong class code in p7ioc and p8 root complex */ 1105 static void pnv_p7ioc_rc_quirk(struct pci_dev *dev) 1106 { 1107 dev->class = PCI_CLASS_BRIDGE_PCI << 8; 1108 } 1109 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_IBM, 0x3b9, pnv_p7ioc_rc_quirk); 1110 1111 void __init pnv_pci_init(void) 1112 { 1113 struct device_node *np; 1114 1115 pci_add_flags(PCI_CAN_SKIP_ISA_ALIGN); 1116 1117 /* If we don't have OPAL, eg. in sim, just skip PCI probe */ 1118 if (!firmware_has_feature(FW_FEATURE_OPAL)) 1119 return; 1120 1121 /* Look for IODA IO-Hubs. */ 1122 for_each_compatible_node(np, NULL, "ibm,ioda-hub") { 1123 pnv_pci_init_ioda_hub(np); 1124 } 1125 1126 /* Look for ioda2 built-in PHB3's */ 1127 for_each_compatible_node(np, NULL, "ibm,ioda2-phb") 1128 pnv_pci_init_ioda2_phb(np); 1129 1130 /* Look for ioda3 built-in PHB4's, we treat them as IODA2 */ 1131 for_each_compatible_node(np, NULL, "ibm,ioda3-phb") 1132 pnv_pci_init_ioda2_phb(np); 1133 1134 /* Look for NPU PHBs */ 1135 for_each_compatible_node(np, NULL, "ibm,ioda2-npu-phb") 1136 pnv_pci_init_npu_phb(np); 1137 1138 /* 1139 * Look for NPU2 PHBs which we treat mostly as NPU PHBs with 1140 * the exception of TCE kill which requires an OPAL call. 1141 */ 1142 for_each_compatible_node(np, NULL, "ibm,ioda2-npu2-phb") 1143 pnv_pci_init_npu_phb(np); 1144 1145 /* Look for NPU2 OpenCAPI PHBs */ 1146 for_each_compatible_node(np, NULL, "ibm,ioda2-npu2-opencapi-phb") 1147 pnv_pci_init_npu2_opencapi_phb(np); 1148 1149 /* Configure IOMMU DMA hooks */ 1150 set_pci_dma_ops(&dma_iommu_ops); 1151 } 1152 1153 machine_subsys_initcall_sync(powernv, tce_iommu_bus_notifier_init); 1154