1 /* 2 * Support PCI/PCIe on PowerNV platforms 3 * 4 * Copyright 2011 Benjamin Herrenschmidt, IBM Corp. 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 */ 11 12 #include <linux/kernel.h> 13 #include <linux/pci.h> 14 #include <linux/delay.h> 15 #include <linux/string.h> 16 #include <linux/init.h> 17 #include <linux/irq.h> 18 #include <linux/io.h> 19 #include <linux/msi.h> 20 #include <linux/iommu.h> 21 22 #include <asm/sections.h> 23 #include <asm/io.h> 24 #include <asm/prom.h> 25 #include <asm/pci-bridge.h> 26 #include <asm/machdep.h> 27 #include <asm/msi_bitmap.h> 28 #include <asm/ppc-pci.h> 29 #include <asm/pnv-pci.h> 30 #include <asm/opal.h> 31 #include <asm/iommu.h> 32 #include <asm/tce.h> 33 #include <asm/firmware.h> 34 #include <asm/eeh_event.h> 35 #include <asm/eeh.h> 36 37 #include "powernv.h" 38 #include "pci.h" 39 40 int pnv_pci_get_slot_id(struct device_node *np, uint64_t *id) 41 { 42 struct device_node *parent = np; 43 u32 bdfn; 44 u64 phbid; 45 int ret; 46 47 ret = of_property_read_u32(np, "reg", &bdfn); 48 if (ret) 49 return -ENXIO; 50 51 bdfn = ((bdfn & 0x00ffff00) >> 8); 52 while ((parent = of_get_parent(parent))) { 53 if (!PCI_DN(parent)) { 54 of_node_put(parent); 55 break; 56 } 57 58 if (!of_device_is_compatible(parent, "ibm,ioda2-phb")) { 59 of_node_put(parent); 60 continue; 61 } 62 63 ret = of_property_read_u64(parent, "ibm,opal-phbid", &phbid); 64 if (ret) { 65 of_node_put(parent); 66 return -ENXIO; 67 } 68 69 *id = PCI_SLOT_ID(phbid, bdfn); 70 return 0; 71 } 72 73 return -ENODEV; 74 } 75 EXPORT_SYMBOL_GPL(pnv_pci_get_slot_id); 76 77 int pnv_pci_get_device_tree(uint32_t phandle, void *buf, uint64_t len) 78 { 79 int64_t rc; 80 81 if (!opal_check_token(OPAL_GET_DEVICE_TREE)) 82 return -ENXIO; 83 84 rc = opal_get_device_tree(phandle, (uint64_t)buf, len); 85 if (rc < OPAL_SUCCESS) 86 return -EIO; 87 88 return rc; 89 } 90 EXPORT_SYMBOL_GPL(pnv_pci_get_device_tree); 91 92 int pnv_pci_get_presence_state(uint64_t id, uint8_t *state) 93 { 94 int64_t rc; 95 96 if (!opal_check_token(OPAL_PCI_GET_PRESENCE_STATE)) 97 return -ENXIO; 98 99 rc = opal_pci_get_presence_state(id, (uint64_t)state); 100 if (rc != OPAL_SUCCESS) 101 return -EIO; 102 103 return 0; 104 } 105 EXPORT_SYMBOL_GPL(pnv_pci_get_presence_state); 106 107 int pnv_pci_get_power_state(uint64_t id, uint8_t *state) 108 { 109 int64_t rc; 110 111 if (!opal_check_token(OPAL_PCI_GET_POWER_STATE)) 112 return -ENXIO; 113 114 rc = opal_pci_get_power_state(id, (uint64_t)state); 115 if (rc != OPAL_SUCCESS) 116 return -EIO; 117 118 return 0; 119 } 120 EXPORT_SYMBOL_GPL(pnv_pci_get_power_state); 121 122 int pnv_pci_set_power_state(uint64_t id, uint8_t state, struct opal_msg *msg) 123 { 124 struct opal_msg m; 125 int token, ret; 126 int64_t rc; 127 128 if (!opal_check_token(OPAL_PCI_SET_POWER_STATE)) 129 return -ENXIO; 130 131 token = opal_async_get_token_interruptible(); 132 if (unlikely(token < 0)) 133 return token; 134 135 rc = opal_pci_set_power_state(token, id, (uint64_t)&state); 136 if (rc == OPAL_SUCCESS) { 137 ret = 0; 138 goto exit; 139 } else if (rc != OPAL_ASYNC_COMPLETION) { 140 ret = -EIO; 141 goto exit; 142 } 143 144 ret = opal_async_wait_response(token, &m); 145 if (ret < 0) 146 goto exit; 147 148 if (msg) { 149 ret = 1; 150 memcpy(msg, &m, sizeof(m)); 151 } 152 153 exit: 154 opal_async_release_token(token); 155 return ret; 156 } 157 EXPORT_SYMBOL_GPL(pnv_pci_set_power_state); 158 159 #ifdef CONFIG_PCI_MSI 160 int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) 161 { 162 struct pci_controller *hose = pci_bus_to_host(pdev->bus); 163 struct pnv_phb *phb = hose->private_data; 164 struct msi_desc *entry; 165 struct msi_msg msg; 166 int hwirq; 167 unsigned int virq; 168 int rc; 169 170 if (WARN_ON(!phb) || !phb->msi_bmp.bitmap) 171 return -ENODEV; 172 173 if (pdev->no_64bit_msi && !phb->msi32_support) 174 return -ENODEV; 175 176 for_each_pci_msi_entry(entry, pdev) { 177 if (!entry->msi_attrib.is_64 && !phb->msi32_support) { 178 pr_warn("%s: Supports only 64-bit MSIs\n", 179 pci_name(pdev)); 180 return -ENXIO; 181 } 182 hwirq = msi_bitmap_alloc_hwirqs(&phb->msi_bmp, 1); 183 if (hwirq < 0) { 184 pr_warn("%s: Failed to find a free MSI\n", 185 pci_name(pdev)); 186 return -ENOSPC; 187 } 188 virq = irq_create_mapping(NULL, phb->msi_base + hwirq); 189 if (!virq) { 190 pr_warn("%s: Failed to map MSI to linux irq\n", 191 pci_name(pdev)); 192 msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq, 1); 193 return -ENOMEM; 194 } 195 rc = phb->msi_setup(phb, pdev, phb->msi_base + hwirq, 196 virq, entry->msi_attrib.is_64, &msg); 197 if (rc) { 198 pr_warn("%s: Failed to setup MSI\n", pci_name(pdev)); 199 irq_dispose_mapping(virq); 200 msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq, 1); 201 return rc; 202 } 203 irq_set_msi_desc(virq, entry); 204 pci_write_msi_msg(virq, &msg); 205 } 206 return 0; 207 } 208 209 void pnv_teardown_msi_irqs(struct pci_dev *pdev) 210 { 211 struct pci_controller *hose = pci_bus_to_host(pdev->bus); 212 struct pnv_phb *phb = hose->private_data; 213 struct msi_desc *entry; 214 irq_hw_number_t hwirq; 215 216 if (WARN_ON(!phb)) 217 return; 218 219 for_each_pci_msi_entry(entry, pdev) { 220 if (!entry->irq) 221 continue; 222 hwirq = virq_to_hw(entry->irq); 223 irq_set_msi_desc(entry->irq, NULL); 224 irq_dispose_mapping(entry->irq); 225 msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq - phb->msi_base, 1); 226 } 227 } 228 #endif /* CONFIG_PCI_MSI */ 229 230 static void pnv_pci_dump_p7ioc_diag_data(struct pci_controller *hose, 231 struct OpalIoPhbErrorCommon *common) 232 { 233 struct OpalIoP7IOCPhbErrorData *data; 234 int i; 235 236 data = (struct OpalIoP7IOCPhbErrorData *)common; 237 pr_info("P7IOC PHB#%x Diag-data (Version: %d)\n", 238 hose->global_number, be32_to_cpu(common->version)); 239 240 if (data->brdgCtl) 241 pr_info("brdgCtl: %08x\n", 242 be32_to_cpu(data->brdgCtl)); 243 if (data->portStatusReg || data->rootCmplxStatus || 244 data->busAgentStatus) 245 pr_info("UtlSts: %08x %08x %08x\n", 246 be32_to_cpu(data->portStatusReg), 247 be32_to_cpu(data->rootCmplxStatus), 248 be32_to_cpu(data->busAgentStatus)); 249 if (data->deviceStatus || data->slotStatus || 250 data->linkStatus || data->devCmdStatus || 251 data->devSecStatus) 252 pr_info("RootSts: %08x %08x %08x %08x %08x\n", 253 be32_to_cpu(data->deviceStatus), 254 be32_to_cpu(data->slotStatus), 255 be32_to_cpu(data->linkStatus), 256 be32_to_cpu(data->devCmdStatus), 257 be32_to_cpu(data->devSecStatus)); 258 if (data->rootErrorStatus || data->uncorrErrorStatus || 259 data->corrErrorStatus) 260 pr_info("RootErrSts: %08x %08x %08x\n", 261 be32_to_cpu(data->rootErrorStatus), 262 be32_to_cpu(data->uncorrErrorStatus), 263 be32_to_cpu(data->corrErrorStatus)); 264 if (data->tlpHdr1 || data->tlpHdr2 || 265 data->tlpHdr3 || data->tlpHdr4) 266 pr_info("RootErrLog: %08x %08x %08x %08x\n", 267 be32_to_cpu(data->tlpHdr1), 268 be32_to_cpu(data->tlpHdr2), 269 be32_to_cpu(data->tlpHdr3), 270 be32_to_cpu(data->tlpHdr4)); 271 if (data->sourceId || data->errorClass || 272 data->correlator) 273 pr_info("RootErrLog1: %08x %016llx %016llx\n", 274 be32_to_cpu(data->sourceId), 275 be64_to_cpu(data->errorClass), 276 be64_to_cpu(data->correlator)); 277 if (data->p7iocPlssr || data->p7iocCsr) 278 pr_info("PhbSts: %016llx %016llx\n", 279 be64_to_cpu(data->p7iocPlssr), 280 be64_to_cpu(data->p7iocCsr)); 281 if (data->lemFir) 282 pr_info("Lem: %016llx %016llx %016llx\n", 283 be64_to_cpu(data->lemFir), 284 be64_to_cpu(data->lemErrorMask), 285 be64_to_cpu(data->lemWOF)); 286 if (data->phbErrorStatus) 287 pr_info("PhbErr: %016llx %016llx %016llx %016llx\n", 288 be64_to_cpu(data->phbErrorStatus), 289 be64_to_cpu(data->phbFirstErrorStatus), 290 be64_to_cpu(data->phbErrorLog0), 291 be64_to_cpu(data->phbErrorLog1)); 292 if (data->mmioErrorStatus) 293 pr_info("OutErr: %016llx %016llx %016llx %016llx\n", 294 be64_to_cpu(data->mmioErrorStatus), 295 be64_to_cpu(data->mmioFirstErrorStatus), 296 be64_to_cpu(data->mmioErrorLog0), 297 be64_to_cpu(data->mmioErrorLog1)); 298 if (data->dma0ErrorStatus) 299 pr_info("InAErr: %016llx %016llx %016llx %016llx\n", 300 be64_to_cpu(data->dma0ErrorStatus), 301 be64_to_cpu(data->dma0FirstErrorStatus), 302 be64_to_cpu(data->dma0ErrorLog0), 303 be64_to_cpu(data->dma0ErrorLog1)); 304 if (data->dma1ErrorStatus) 305 pr_info("InBErr: %016llx %016llx %016llx %016llx\n", 306 be64_to_cpu(data->dma1ErrorStatus), 307 be64_to_cpu(data->dma1FirstErrorStatus), 308 be64_to_cpu(data->dma1ErrorLog0), 309 be64_to_cpu(data->dma1ErrorLog1)); 310 311 for (i = 0; i < OPAL_P7IOC_NUM_PEST_REGS; i++) { 312 if ((be64_to_cpu(data->pestA[i]) >> 63) == 0 && 313 (be64_to_cpu(data->pestB[i]) >> 63) == 0) 314 continue; 315 316 pr_info("PE[%3d] A/B: %016llx %016llx\n", 317 i, be64_to_cpu(data->pestA[i]), 318 be64_to_cpu(data->pestB[i])); 319 } 320 } 321 322 static void pnv_pci_dump_phb3_diag_data(struct pci_controller *hose, 323 struct OpalIoPhbErrorCommon *common) 324 { 325 struct OpalIoPhb3ErrorData *data; 326 int i; 327 328 data = (struct OpalIoPhb3ErrorData*)common; 329 pr_info("PHB3 PHB#%x Diag-data (Version: %d)\n", 330 hose->global_number, be32_to_cpu(common->version)); 331 if (data->brdgCtl) 332 pr_info("brdgCtl: %08x\n", 333 be32_to_cpu(data->brdgCtl)); 334 if (data->portStatusReg || data->rootCmplxStatus || 335 data->busAgentStatus) 336 pr_info("UtlSts: %08x %08x %08x\n", 337 be32_to_cpu(data->portStatusReg), 338 be32_to_cpu(data->rootCmplxStatus), 339 be32_to_cpu(data->busAgentStatus)); 340 if (data->deviceStatus || data->slotStatus || 341 data->linkStatus || data->devCmdStatus || 342 data->devSecStatus) 343 pr_info("RootSts: %08x %08x %08x %08x %08x\n", 344 be32_to_cpu(data->deviceStatus), 345 be32_to_cpu(data->slotStatus), 346 be32_to_cpu(data->linkStatus), 347 be32_to_cpu(data->devCmdStatus), 348 be32_to_cpu(data->devSecStatus)); 349 if (data->rootErrorStatus || data->uncorrErrorStatus || 350 data->corrErrorStatus) 351 pr_info("RootErrSts: %08x %08x %08x\n", 352 be32_to_cpu(data->rootErrorStatus), 353 be32_to_cpu(data->uncorrErrorStatus), 354 be32_to_cpu(data->corrErrorStatus)); 355 if (data->tlpHdr1 || data->tlpHdr2 || 356 data->tlpHdr3 || data->tlpHdr4) 357 pr_info("RootErrLog: %08x %08x %08x %08x\n", 358 be32_to_cpu(data->tlpHdr1), 359 be32_to_cpu(data->tlpHdr2), 360 be32_to_cpu(data->tlpHdr3), 361 be32_to_cpu(data->tlpHdr4)); 362 if (data->sourceId || data->errorClass || 363 data->correlator) 364 pr_info("RootErrLog1: %08x %016llx %016llx\n", 365 be32_to_cpu(data->sourceId), 366 be64_to_cpu(data->errorClass), 367 be64_to_cpu(data->correlator)); 368 if (data->nFir) 369 pr_info("nFir: %016llx %016llx %016llx\n", 370 be64_to_cpu(data->nFir), 371 be64_to_cpu(data->nFirMask), 372 be64_to_cpu(data->nFirWOF)); 373 if (data->phbPlssr || data->phbCsr) 374 pr_info("PhbSts: %016llx %016llx\n", 375 be64_to_cpu(data->phbPlssr), 376 be64_to_cpu(data->phbCsr)); 377 if (data->lemFir) 378 pr_info("Lem: %016llx %016llx %016llx\n", 379 be64_to_cpu(data->lemFir), 380 be64_to_cpu(data->lemErrorMask), 381 be64_to_cpu(data->lemWOF)); 382 if (data->phbErrorStatus) 383 pr_info("PhbErr: %016llx %016llx %016llx %016llx\n", 384 be64_to_cpu(data->phbErrorStatus), 385 be64_to_cpu(data->phbFirstErrorStatus), 386 be64_to_cpu(data->phbErrorLog0), 387 be64_to_cpu(data->phbErrorLog1)); 388 if (data->mmioErrorStatus) 389 pr_info("OutErr: %016llx %016llx %016llx %016llx\n", 390 be64_to_cpu(data->mmioErrorStatus), 391 be64_to_cpu(data->mmioFirstErrorStatus), 392 be64_to_cpu(data->mmioErrorLog0), 393 be64_to_cpu(data->mmioErrorLog1)); 394 if (data->dma0ErrorStatus) 395 pr_info("InAErr: %016llx %016llx %016llx %016llx\n", 396 be64_to_cpu(data->dma0ErrorStatus), 397 be64_to_cpu(data->dma0FirstErrorStatus), 398 be64_to_cpu(data->dma0ErrorLog0), 399 be64_to_cpu(data->dma0ErrorLog1)); 400 if (data->dma1ErrorStatus) 401 pr_info("InBErr: %016llx %016llx %016llx %016llx\n", 402 be64_to_cpu(data->dma1ErrorStatus), 403 be64_to_cpu(data->dma1FirstErrorStatus), 404 be64_to_cpu(data->dma1ErrorLog0), 405 be64_to_cpu(data->dma1ErrorLog1)); 406 407 for (i = 0; i < OPAL_PHB3_NUM_PEST_REGS; i++) { 408 if ((be64_to_cpu(data->pestA[i]) >> 63) == 0 && 409 (be64_to_cpu(data->pestB[i]) >> 63) == 0) 410 continue; 411 412 pr_info("PE[%3d] A/B: %016llx %016llx\n", 413 i, be64_to_cpu(data->pestA[i]), 414 be64_to_cpu(data->pestB[i])); 415 } 416 } 417 418 void pnv_pci_dump_phb_diag_data(struct pci_controller *hose, 419 unsigned char *log_buff) 420 { 421 struct OpalIoPhbErrorCommon *common; 422 423 if (!hose || !log_buff) 424 return; 425 426 common = (struct OpalIoPhbErrorCommon *)log_buff; 427 switch (be32_to_cpu(common->ioType)) { 428 case OPAL_PHB_ERROR_DATA_TYPE_P7IOC: 429 pnv_pci_dump_p7ioc_diag_data(hose, common); 430 break; 431 case OPAL_PHB_ERROR_DATA_TYPE_PHB3: 432 pnv_pci_dump_phb3_diag_data(hose, common); 433 break; 434 default: 435 pr_warn("%s: Unrecognized ioType %d\n", 436 __func__, be32_to_cpu(common->ioType)); 437 } 438 } 439 440 static void pnv_pci_handle_eeh_config(struct pnv_phb *phb, u32 pe_no) 441 { 442 unsigned long flags, rc; 443 int has_diag, ret = 0; 444 445 spin_lock_irqsave(&phb->lock, flags); 446 447 /* Fetch PHB diag-data */ 448 rc = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag.blob, 449 PNV_PCI_DIAG_BUF_SIZE); 450 has_diag = (rc == OPAL_SUCCESS); 451 452 /* If PHB supports compound PE, to handle it */ 453 if (phb->unfreeze_pe) { 454 ret = phb->unfreeze_pe(phb, 455 pe_no, 456 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL); 457 } else { 458 rc = opal_pci_eeh_freeze_clear(phb->opal_id, 459 pe_no, 460 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL); 461 if (rc) { 462 pr_warn("%s: Failure %ld clearing frozen " 463 "PHB#%x-PE#%x\n", 464 __func__, rc, phb->hose->global_number, 465 pe_no); 466 ret = -EIO; 467 } 468 } 469 470 /* 471 * For now, let's only display the diag buffer when we fail to clear 472 * the EEH status. We'll do more sensible things later when we have 473 * proper EEH support. We need to make sure we don't pollute ourselves 474 * with the normal errors generated when probing empty slots 475 */ 476 if (has_diag && ret) 477 pnv_pci_dump_phb_diag_data(phb->hose, phb->diag.blob); 478 479 spin_unlock_irqrestore(&phb->lock, flags); 480 } 481 482 static void pnv_pci_config_check_eeh(struct pci_dn *pdn) 483 { 484 struct pnv_phb *phb = pdn->phb->private_data; 485 u8 fstate; 486 __be16 pcierr; 487 unsigned int pe_no; 488 s64 rc; 489 490 /* 491 * Get the PE#. During the PCI probe stage, we might not 492 * setup that yet. So all ER errors should be mapped to 493 * reserved PE. 494 */ 495 pe_no = pdn->pe_number; 496 if (pe_no == IODA_INVALID_PE) { 497 pe_no = phb->ioda.reserved_pe_idx; 498 } 499 500 /* 501 * Fetch frozen state. If the PHB support compound PE, 502 * we need handle that case. 503 */ 504 if (phb->get_pe_state) { 505 fstate = phb->get_pe_state(phb, pe_no); 506 } else { 507 rc = opal_pci_eeh_freeze_status(phb->opal_id, 508 pe_no, 509 &fstate, 510 &pcierr, 511 NULL); 512 if (rc) { 513 pr_warn("%s: Failure %lld getting PHB#%x-PE#%x state\n", 514 __func__, rc, phb->hose->global_number, pe_no); 515 return; 516 } 517 } 518 519 pr_devel(" -> EEH check, bdfn=%04x PE#%x fstate=%x\n", 520 (pdn->busno << 8) | (pdn->devfn), pe_no, fstate); 521 522 /* Clear the frozen state if applicable */ 523 if (fstate == OPAL_EEH_STOPPED_MMIO_FREEZE || 524 fstate == OPAL_EEH_STOPPED_DMA_FREEZE || 525 fstate == OPAL_EEH_STOPPED_MMIO_DMA_FREEZE) { 526 /* 527 * If PHB supports compound PE, freeze it for 528 * consistency. 529 */ 530 if (phb->freeze_pe) 531 phb->freeze_pe(phb, pe_no); 532 533 pnv_pci_handle_eeh_config(phb, pe_no); 534 } 535 } 536 537 int pnv_pci_cfg_read(struct pci_dn *pdn, 538 int where, int size, u32 *val) 539 { 540 struct pnv_phb *phb = pdn->phb->private_data; 541 u32 bdfn = (pdn->busno << 8) | pdn->devfn; 542 s64 rc; 543 544 switch (size) { 545 case 1: { 546 u8 v8; 547 rc = opal_pci_config_read_byte(phb->opal_id, bdfn, where, &v8); 548 *val = (rc == OPAL_SUCCESS) ? v8 : 0xff; 549 break; 550 } 551 case 2: { 552 __be16 v16; 553 rc = opal_pci_config_read_half_word(phb->opal_id, bdfn, where, 554 &v16); 555 *val = (rc == OPAL_SUCCESS) ? be16_to_cpu(v16) : 0xffff; 556 break; 557 } 558 case 4: { 559 __be32 v32; 560 rc = opal_pci_config_read_word(phb->opal_id, bdfn, where, &v32); 561 *val = (rc == OPAL_SUCCESS) ? be32_to_cpu(v32) : 0xffffffff; 562 break; 563 } 564 default: 565 return PCIBIOS_FUNC_NOT_SUPPORTED; 566 } 567 568 pr_devel("%s: bus: %x devfn: %x +%x/%x -> %08x\n", 569 __func__, pdn->busno, pdn->devfn, where, size, *val); 570 return PCIBIOS_SUCCESSFUL; 571 } 572 573 int pnv_pci_cfg_write(struct pci_dn *pdn, 574 int where, int size, u32 val) 575 { 576 struct pnv_phb *phb = pdn->phb->private_data; 577 u32 bdfn = (pdn->busno << 8) | pdn->devfn; 578 579 pr_devel("%s: bus: %x devfn: %x +%x/%x -> %08x\n", 580 __func__, pdn->busno, pdn->devfn, where, size, val); 581 switch (size) { 582 case 1: 583 opal_pci_config_write_byte(phb->opal_id, bdfn, where, val); 584 break; 585 case 2: 586 opal_pci_config_write_half_word(phb->opal_id, bdfn, where, val); 587 break; 588 case 4: 589 opal_pci_config_write_word(phb->opal_id, bdfn, where, val); 590 break; 591 default: 592 return PCIBIOS_FUNC_NOT_SUPPORTED; 593 } 594 595 return PCIBIOS_SUCCESSFUL; 596 } 597 598 #if CONFIG_EEH 599 static bool pnv_pci_cfg_check(struct pci_dn *pdn) 600 { 601 struct eeh_dev *edev = NULL; 602 struct pnv_phb *phb = pdn->phb->private_data; 603 604 /* EEH not enabled ? */ 605 if (!(phb->flags & PNV_PHB_FLAG_EEH)) 606 return true; 607 608 /* PE reset or device removed ? */ 609 edev = pdn->edev; 610 if (edev) { 611 if (edev->pe && 612 (edev->pe->state & EEH_PE_CFG_BLOCKED)) 613 return false; 614 615 if (edev->mode & EEH_DEV_REMOVED) 616 return false; 617 } 618 619 return true; 620 } 621 #else 622 static inline pnv_pci_cfg_check(struct pci_dn *pdn) 623 { 624 return true; 625 } 626 #endif /* CONFIG_EEH */ 627 628 static int pnv_pci_read_config(struct pci_bus *bus, 629 unsigned int devfn, 630 int where, int size, u32 *val) 631 { 632 struct pci_dn *pdn; 633 struct pnv_phb *phb; 634 int ret; 635 636 *val = 0xFFFFFFFF; 637 pdn = pci_get_pdn_by_devfn(bus, devfn); 638 if (!pdn) 639 return PCIBIOS_DEVICE_NOT_FOUND; 640 641 if (!pnv_pci_cfg_check(pdn)) 642 return PCIBIOS_DEVICE_NOT_FOUND; 643 644 ret = pnv_pci_cfg_read(pdn, where, size, val); 645 phb = pdn->phb->private_data; 646 if (phb->flags & PNV_PHB_FLAG_EEH && pdn->edev) { 647 if (*val == EEH_IO_ERROR_VALUE(size) && 648 eeh_dev_check_failure(pdn->edev)) 649 return PCIBIOS_DEVICE_NOT_FOUND; 650 } else { 651 pnv_pci_config_check_eeh(pdn); 652 } 653 654 return ret; 655 } 656 657 static int pnv_pci_write_config(struct pci_bus *bus, 658 unsigned int devfn, 659 int where, int size, u32 val) 660 { 661 struct pci_dn *pdn; 662 struct pnv_phb *phb; 663 int ret; 664 665 pdn = pci_get_pdn_by_devfn(bus, devfn); 666 if (!pdn) 667 return PCIBIOS_DEVICE_NOT_FOUND; 668 669 if (!pnv_pci_cfg_check(pdn)) 670 return PCIBIOS_DEVICE_NOT_FOUND; 671 672 ret = pnv_pci_cfg_write(pdn, where, size, val); 673 phb = pdn->phb->private_data; 674 if (!(phb->flags & PNV_PHB_FLAG_EEH)) 675 pnv_pci_config_check_eeh(pdn); 676 677 return ret; 678 } 679 680 struct pci_ops pnv_pci_ops = { 681 .read = pnv_pci_read_config, 682 .write = pnv_pci_write_config, 683 }; 684 685 static __be64 *pnv_tce(struct iommu_table *tbl, long idx) 686 { 687 __be64 *tmp = ((__be64 *)tbl->it_base); 688 int level = tbl->it_indirect_levels; 689 const long shift = ilog2(tbl->it_level_size); 690 unsigned long mask = (tbl->it_level_size - 1) << (level * shift); 691 692 while (level) { 693 int n = (idx & mask) >> (level * shift); 694 unsigned long tce = be64_to_cpu(tmp[n]); 695 696 tmp = __va(tce & ~(TCE_PCI_READ | TCE_PCI_WRITE)); 697 idx &= ~mask; 698 mask >>= shift; 699 --level; 700 } 701 702 return tmp + idx; 703 } 704 705 int pnv_tce_build(struct iommu_table *tbl, long index, long npages, 706 unsigned long uaddr, enum dma_data_direction direction, 707 unsigned long attrs) 708 { 709 u64 proto_tce = iommu_direction_to_tce_perm(direction); 710 u64 rpn = __pa(uaddr) >> tbl->it_page_shift; 711 long i; 712 713 if (proto_tce & TCE_PCI_WRITE) 714 proto_tce |= TCE_PCI_READ; 715 716 for (i = 0; i < npages; i++) { 717 unsigned long newtce = proto_tce | 718 ((rpn + i) << tbl->it_page_shift); 719 unsigned long idx = index - tbl->it_offset + i; 720 721 *(pnv_tce(tbl, idx)) = cpu_to_be64(newtce); 722 } 723 724 return 0; 725 } 726 727 #ifdef CONFIG_IOMMU_API 728 int pnv_tce_xchg(struct iommu_table *tbl, long index, 729 unsigned long *hpa, enum dma_data_direction *direction) 730 { 731 u64 proto_tce = iommu_direction_to_tce_perm(*direction); 732 unsigned long newtce = *hpa | proto_tce, oldtce; 733 unsigned long idx = index - tbl->it_offset; 734 735 BUG_ON(*hpa & ~IOMMU_PAGE_MASK(tbl)); 736 737 if (newtce & TCE_PCI_WRITE) 738 newtce |= TCE_PCI_READ; 739 740 oldtce = be64_to_cpu(xchg(pnv_tce(tbl, idx), cpu_to_be64(newtce))); 741 *hpa = oldtce & ~(TCE_PCI_READ | TCE_PCI_WRITE); 742 *direction = iommu_tce_direction(oldtce); 743 744 return 0; 745 } 746 #endif 747 748 void pnv_tce_free(struct iommu_table *tbl, long index, long npages) 749 { 750 long i; 751 752 for (i = 0; i < npages; i++) { 753 unsigned long idx = index - tbl->it_offset + i; 754 755 *(pnv_tce(tbl, idx)) = cpu_to_be64(0); 756 } 757 } 758 759 unsigned long pnv_tce_get(struct iommu_table *tbl, long index) 760 { 761 return be64_to_cpu(*(pnv_tce(tbl, index - tbl->it_offset))); 762 } 763 764 struct iommu_table *pnv_pci_table_alloc(int nid) 765 { 766 struct iommu_table *tbl; 767 768 tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, nid); 769 if (!tbl) 770 return NULL; 771 772 INIT_LIST_HEAD_RCU(&tbl->it_group_list); 773 kref_init(&tbl->it_kref); 774 775 return tbl; 776 } 777 778 long pnv_pci_link_table_and_group(int node, int num, 779 struct iommu_table *tbl, 780 struct iommu_table_group *table_group) 781 { 782 struct iommu_table_group_link *tgl = NULL; 783 784 if (WARN_ON(!tbl || !table_group)) 785 return -EINVAL; 786 787 tgl = kzalloc_node(sizeof(struct iommu_table_group_link), GFP_KERNEL, 788 node); 789 if (!tgl) 790 return -ENOMEM; 791 792 tgl->table_group = table_group; 793 list_add_rcu(&tgl->next, &tbl->it_group_list); 794 795 table_group->tables[num] = tbl; 796 797 return 0; 798 } 799 800 static void pnv_iommu_table_group_link_free(struct rcu_head *head) 801 { 802 struct iommu_table_group_link *tgl = container_of(head, 803 struct iommu_table_group_link, rcu); 804 805 kfree(tgl); 806 } 807 808 void pnv_pci_unlink_table_and_group(struct iommu_table *tbl, 809 struct iommu_table_group *table_group) 810 { 811 long i; 812 bool found; 813 struct iommu_table_group_link *tgl; 814 815 if (!tbl || !table_group) 816 return; 817 818 /* Remove link to a group from table's list of attached groups */ 819 found = false; 820 list_for_each_entry_rcu(tgl, &tbl->it_group_list, next) { 821 if (tgl->table_group == table_group) { 822 list_del_rcu(&tgl->next); 823 call_rcu(&tgl->rcu, pnv_iommu_table_group_link_free); 824 found = true; 825 break; 826 } 827 } 828 if (WARN_ON(!found)) 829 return; 830 831 /* Clean a pointer to iommu_table in iommu_table_group::tables[] */ 832 found = false; 833 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) { 834 if (table_group->tables[i] == tbl) { 835 table_group->tables[i] = NULL; 836 found = true; 837 break; 838 } 839 } 840 WARN_ON(!found); 841 } 842 843 void pnv_pci_setup_iommu_table(struct iommu_table *tbl, 844 void *tce_mem, u64 tce_size, 845 u64 dma_offset, unsigned page_shift) 846 { 847 tbl->it_blocksize = 16; 848 tbl->it_base = (unsigned long)tce_mem; 849 tbl->it_page_shift = page_shift; 850 tbl->it_offset = dma_offset >> tbl->it_page_shift; 851 tbl->it_index = 0; 852 tbl->it_size = tce_size >> 3; 853 tbl->it_busno = 0; 854 tbl->it_type = TCE_PCI; 855 } 856 857 void pnv_pci_dma_dev_setup(struct pci_dev *pdev) 858 { 859 struct pci_controller *hose = pci_bus_to_host(pdev->bus); 860 struct pnv_phb *phb = hose->private_data; 861 #ifdef CONFIG_PCI_IOV 862 struct pnv_ioda_pe *pe; 863 struct pci_dn *pdn; 864 865 /* Fix the VF pdn PE number */ 866 if (pdev->is_virtfn) { 867 pdn = pci_get_pdn(pdev); 868 WARN_ON(pdn->pe_number != IODA_INVALID_PE); 869 list_for_each_entry(pe, &phb->ioda.pe_list, list) { 870 if (pe->rid == ((pdev->bus->number << 8) | 871 (pdev->devfn & 0xff))) { 872 pdn->pe_number = pe->pe_number; 873 pe->pdev = pdev; 874 break; 875 } 876 } 877 } 878 #endif /* CONFIG_PCI_IOV */ 879 880 if (phb && phb->dma_dev_setup) 881 phb->dma_dev_setup(phb, pdev); 882 } 883 884 void pnv_pci_dma_bus_setup(struct pci_bus *bus) 885 { 886 struct pci_controller *hose = bus->sysdata; 887 struct pnv_phb *phb = hose->private_data; 888 struct pnv_ioda_pe *pe; 889 890 list_for_each_entry(pe, &phb->ioda.pe_list, list) { 891 if (!(pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))) 892 continue; 893 894 if (!pe->pbus) 895 continue; 896 897 if (bus->number == ((pe->rid >> 8) & 0xFF)) { 898 pe->pbus = bus; 899 break; 900 } 901 } 902 } 903 904 void pnv_pci_shutdown(void) 905 { 906 struct pci_controller *hose; 907 908 list_for_each_entry(hose, &hose_list, list_node) 909 if (hose->controller_ops.shutdown) 910 hose->controller_ops.shutdown(hose); 911 } 912 913 /* Fixup wrong class code in p7ioc and p8 root complex */ 914 static void pnv_p7ioc_rc_quirk(struct pci_dev *dev) 915 { 916 dev->class = PCI_CLASS_BRIDGE_PCI << 8; 917 } 918 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_IBM, 0x3b9, pnv_p7ioc_rc_quirk); 919 920 void __init pnv_pci_init(void) 921 { 922 struct device_node *np; 923 924 pci_add_flags(PCI_CAN_SKIP_ISA_ALIGN); 925 926 /* If we don't have OPAL, eg. in sim, just skip PCI probe */ 927 if (!firmware_has_feature(FW_FEATURE_OPAL)) 928 return; 929 930 /* Look for IODA IO-Hubs. */ 931 for_each_compatible_node(np, NULL, "ibm,ioda-hub") { 932 pnv_pci_init_ioda_hub(np); 933 } 934 935 /* Look for ioda2 built-in PHB3's */ 936 for_each_compatible_node(np, NULL, "ibm,ioda2-phb") 937 pnv_pci_init_ioda2_phb(np); 938 939 /* Look for ioda3 built-in PHB4's, we treat them as IODA2 */ 940 for_each_compatible_node(np, NULL, "ibm,ioda3-phb") 941 pnv_pci_init_ioda2_phb(np); 942 943 /* Look for NPU PHBs */ 944 for_each_compatible_node(np, NULL, "ibm,ioda2-npu-phb") 945 pnv_pci_init_npu_phb(np); 946 947 /* 948 * Look for NPU2 PHBs which we treat mostly as NPU PHBs with 949 * the exception of TCE kill which requires an OPAL call. 950 */ 951 for_each_compatible_node(np, NULL, "ibm,ioda2-npu2-phb") 952 pnv_pci_init_npu_phb(np); 953 954 /* Configure IOMMU DMA hooks */ 955 set_pci_dma_ops(&dma_iommu_ops); 956 } 957 958 machine_subsys_initcall_sync(powernv, tce_iommu_bus_notifier_init); 959