1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Support PCI/PCIe on PowerNV platforms 4 * 5 * Copyright 2011 Benjamin Herrenschmidt, IBM Corp. 6 */ 7 8 #undef DEBUG 9 10 #include <linux/kernel.h> 11 #include <linux/pci.h> 12 #include <linux/crash_dump.h> 13 #include <linux/delay.h> 14 #include <linux/string.h> 15 #include <linux/init.h> 16 #include <linux/memblock.h> 17 #include <linux/irq.h> 18 #include <linux/io.h> 19 #include <linux/msi.h> 20 #include <linux/iommu.h> 21 #include <linux/rculist.h> 22 #include <linux/sizes.h> 23 #include <linux/debugfs.h> 24 #include <linux/of_address.h> 25 #include <linux/of_irq.h> 26 27 #include <asm/sections.h> 28 #include <asm/io.h> 29 #include <asm/pci-bridge.h> 30 #include <asm/machdep.h> 31 #include <asm/msi_bitmap.h> 32 #include <asm/ppc-pci.h> 33 #include <asm/opal.h> 34 #include <asm/iommu.h> 35 #include <asm/tce.h> 36 #include <asm/xics.h> 37 #include <asm/firmware.h> 38 #include <asm/pnv-pci.h> 39 #include <asm/mmzone.h> 40 #include <asm/xive.h> 41 42 #include <misc/cxl-base.h> 43 44 #include "powernv.h" 45 #include "pci.h" 46 #include "../../../../drivers/pci/pci.h" 47 48 static const char * const pnv_phb_names[] = { "IODA1", "IODA2", "NPU_OCAPI" }; 49 50 static void pnv_pci_ioda2_set_bypass(struct pnv_ioda_pe *pe, bool enable); 51 static void pnv_pci_configure_bus(struct pci_bus *bus); 52 53 void pe_level_printk(const struct pnv_ioda_pe *pe, const char *level, 54 const char *fmt, ...) 55 { 56 struct va_format vaf; 57 va_list args; 58 char pfix[32]; 59 60 va_start(args, fmt); 61 62 vaf.fmt = fmt; 63 vaf.va = &args; 64 65 if (pe->flags & PNV_IODA_PE_DEV) 66 strscpy(pfix, dev_name(&pe->pdev->dev), sizeof(pfix)); 67 else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)) 68 sprintf(pfix, "%04x:%02x ", 69 pci_domain_nr(pe->pbus), pe->pbus->number); 70 #ifdef CONFIG_PCI_IOV 71 else if (pe->flags & PNV_IODA_PE_VF) 72 sprintf(pfix, "%04x:%02x:%2x.%d", 73 pci_domain_nr(pe->parent_dev->bus), 74 (pe->rid & 0xff00) >> 8, 75 PCI_SLOT(pe->rid), PCI_FUNC(pe->rid)); 76 #endif /* CONFIG_PCI_IOV*/ 77 78 printk("%spci %s: [PE# %.2x] %pV", 79 level, pfix, pe->pe_number, &vaf); 80 81 va_end(args); 82 } 83 84 static bool pnv_iommu_bypass_disabled __read_mostly; 85 static bool pci_reset_phbs __read_mostly; 86 87 static int __init iommu_setup(char *str) 88 { 89 if (!str) 90 return -EINVAL; 91 92 while (*str) { 93 if (!strncmp(str, "nobypass", 8)) { 94 pnv_iommu_bypass_disabled = true; 95 pr_info("PowerNV: IOMMU bypass window disabled.\n"); 96 break; 97 } 98 str += strcspn(str, ","); 99 if (*str == ',') 100 str++; 101 } 102 103 return 0; 104 } 105 early_param("iommu", iommu_setup); 106 107 static int __init pci_reset_phbs_setup(char *str) 108 { 109 pci_reset_phbs = true; 110 return 0; 111 } 112 113 early_param("ppc_pci_reset_phbs", pci_reset_phbs_setup); 114 115 static struct pnv_ioda_pe *pnv_ioda_init_pe(struct pnv_phb *phb, int pe_no) 116 { 117 s64 rc; 118 119 phb->ioda.pe_array[pe_no].phb = phb; 120 phb->ioda.pe_array[pe_no].pe_number = pe_no; 121 phb->ioda.pe_array[pe_no].dma_setup_done = false; 122 123 /* 124 * Clear the PE frozen state as it might be put into frozen state 125 * in the last PCI remove path. It's not harmful to do so when the 126 * PE is already in unfrozen state. 127 */ 128 rc = opal_pci_eeh_freeze_clear(phb->opal_id, pe_no, 129 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL); 130 if (rc != OPAL_SUCCESS && rc != OPAL_UNSUPPORTED) 131 pr_warn("%s: Error %lld unfreezing PHB#%x-PE#%x\n", 132 __func__, rc, phb->hose->global_number, pe_no); 133 134 return &phb->ioda.pe_array[pe_no]; 135 } 136 137 static void pnv_ioda_reserve_pe(struct pnv_phb *phb, int pe_no) 138 { 139 if (!(pe_no >= 0 && pe_no < phb->ioda.total_pe_num)) { 140 pr_warn("%s: Invalid PE %x on PHB#%x\n", 141 __func__, pe_no, phb->hose->global_number); 142 return; 143 } 144 145 mutex_lock(&phb->ioda.pe_alloc_mutex); 146 if (test_and_set_bit(pe_no, phb->ioda.pe_alloc)) 147 pr_debug("%s: PE %x was reserved on PHB#%x\n", 148 __func__, pe_no, phb->hose->global_number); 149 mutex_unlock(&phb->ioda.pe_alloc_mutex); 150 151 pnv_ioda_init_pe(phb, pe_no); 152 } 153 154 struct pnv_ioda_pe *pnv_ioda_alloc_pe(struct pnv_phb *phb, int count) 155 { 156 struct pnv_ioda_pe *ret = NULL; 157 int run = 0, pe, i; 158 159 mutex_lock(&phb->ioda.pe_alloc_mutex); 160 161 /* scan backwards for a run of @count cleared bits */ 162 for (pe = phb->ioda.total_pe_num - 1; pe >= 0; pe--) { 163 if (test_bit(pe, phb->ioda.pe_alloc)) { 164 run = 0; 165 continue; 166 } 167 168 run++; 169 if (run == count) 170 break; 171 } 172 if (run != count) 173 goto out; 174 175 for (i = pe; i < pe + count; i++) { 176 set_bit(i, phb->ioda.pe_alloc); 177 pnv_ioda_init_pe(phb, i); 178 } 179 ret = &phb->ioda.pe_array[pe]; 180 181 out: 182 mutex_unlock(&phb->ioda.pe_alloc_mutex); 183 return ret; 184 } 185 186 void pnv_ioda_free_pe(struct pnv_ioda_pe *pe) 187 { 188 struct pnv_phb *phb = pe->phb; 189 unsigned int pe_num = pe->pe_number; 190 191 WARN_ON(pe->pdev); 192 memset(pe, 0, sizeof(struct pnv_ioda_pe)); 193 194 mutex_lock(&phb->ioda.pe_alloc_mutex); 195 clear_bit(pe_num, phb->ioda.pe_alloc); 196 mutex_unlock(&phb->ioda.pe_alloc_mutex); 197 } 198 199 /* The default M64 BAR is shared by all PEs */ 200 static int pnv_ioda2_init_m64(struct pnv_phb *phb) 201 { 202 const char *desc; 203 struct resource *r; 204 s64 rc; 205 206 /* Configure the default M64 BAR */ 207 rc = opal_pci_set_phb_mem_window(phb->opal_id, 208 OPAL_M64_WINDOW_TYPE, 209 phb->ioda.m64_bar_idx, 210 phb->ioda.m64_base, 211 0, /* unused */ 212 phb->ioda.m64_size); 213 if (rc != OPAL_SUCCESS) { 214 desc = "configuring"; 215 goto fail; 216 } 217 218 /* Enable the default M64 BAR */ 219 rc = opal_pci_phb_mmio_enable(phb->opal_id, 220 OPAL_M64_WINDOW_TYPE, 221 phb->ioda.m64_bar_idx, 222 OPAL_ENABLE_M64_SPLIT); 223 if (rc != OPAL_SUCCESS) { 224 desc = "enabling"; 225 goto fail; 226 } 227 228 /* 229 * Exclude the segments for reserved and root bus PE, which 230 * are first or last two PEs. 231 */ 232 r = &phb->hose->mem_resources[1]; 233 if (phb->ioda.reserved_pe_idx == 0) 234 r->start += (2 * phb->ioda.m64_segsize); 235 else if (phb->ioda.reserved_pe_idx == (phb->ioda.total_pe_num - 1)) 236 r->end -= (2 * phb->ioda.m64_segsize); 237 else 238 pr_warn(" Cannot strip M64 segment for reserved PE#%x\n", 239 phb->ioda.reserved_pe_idx); 240 241 return 0; 242 243 fail: 244 pr_warn(" Failure %lld %s M64 BAR#%d\n", 245 rc, desc, phb->ioda.m64_bar_idx); 246 opal_pci_phb_mmio_enable(phb->opal_id, 247 OPAL_M64_WINDOW_TYPE, 248 phb->ioda.m64_bar_idx, 249 OPAL_DISABLE_M64); 250 return -EIO; 251 } 252 253 static void pnv_ioda_reserve_dev_m64_pe(struct pci_dev *pdev, 254 unsigned long *pe_bitmap) 255 { 256 struct pnv_phb *phb = pci_bus_to_pnvhb(pdev->bus); 257 struct resource *r; 258 resource_size_t base, sgsz, start, end; 259 int segno, i; 260 261 base = phb->ioda.m64_base; 262 sgsz = phb->ioda.m64_segsize; 263 for (i = 0; i <= PCI_ROM_RESOURCE; i++) { 264 r = &pdev->resource[i]; 265 if (!r->parent || !pnv_pci_is_m64(phb, r)) 266 continue; 267 268 start = ALIGN_DOWN(r->start - base, sgsz); 269 end = ALIGN(r->end - base, sgsz); 270 for (segno = start / sgsz; segno < end / sgsz; segno++) { 271 if (pe_bitmap) 272 set_bit(segno, pe_bitmap); 273 else 274 pnv_ioda_reserve_pe(phb, segno); 275 } 276 } 277 } 278 279 static void pnv_ioda_reserve_m64_pe(struct pci_bus *bus, 280 unsigned long *pe_bitmap, 281 bool all) 282 { 283 struct pci_dev *pdev; 284 285 list_for_each_entry(pdev, &bus->devices, bus_list) { 286 pnv_ioda_reserve_dev_m64_pe(pdev, pe_bitmap); 287 288 if (all && pdev->subordinate) 289 pnv_ioda_reserve_m64_pe(pdev->subordinate, 290 pe_bitmap, all); 291 } 292 } 293 294 static struct pnv_ioda_pe *pnv_ioda_pick_m64_pe(struct pci_bus *bus, bool all) 295 { 296 struct pnv_phb *phb = pci_bus_to_pnvhb(bus); 297 struct pnv_ioda_pe *master_pe, *pe; 298 unsigned long size, *pe_alloc; 299 int i; 300 301 /* Root bus shouldn't use M64 */ 302 if (pci_is_root_bus(bus)) 303 return NULL; 304 305 /* Allocate bitmap */ 306 size = ALIGN(phb->ioda.total_pe_num / 8, sizeof(unsigned long)); 307 pe_alloc = kzalloc(size, GFP_KERNEL); 308 if (!pe_alloc) { 309 pr_warn("%s: Out of memory !\n", 310 __func__); 311 return NULL; 312 } 313 314 /* Figure out reserved PE numbers by the PE */ 315 pnv_ioda_reserve_m64_pe(bus, pe_alloc, all); 316 317 /* 318 * the current bus might not own M64 window and that's all 319 * contributed by its child buses. For the case, we needn't 320 * pick M64 dependent PE#. 321 */ 322 if (bitmap_empty(pe_alloc, phb->ioda.total_pe_num)) { 323 kfree(pe_alloc); 324 return NULL; 325 } 326 327 /* 328 * Figure out the master PE and put all slave PEs to master 329 * PE's list to form compound PE. 330 */ 331 master_pe = NULL; 332 i = -1; 333 while ((i = find_next_bit(pe_alloc, phb->ioda.total_pe_num, i + 1)) < 334 phb->ioda.total_pe_num) { 335 pe = &phb->ioda.pe_array[i]; 336 337 phb->ioda.m64_segmap[pe->pe_number] = pe->pe_number; 338 if (!master_pe) { 339 pe->flags |= PNV_IODA_PE_MASTER; 340 INIT_LIST_HEAD(&pe->slaves); 341 master_pe = pe; 342 } else { 343 pe->flags |= PNV_IODA_PE_SLAVE; 344 pe->master = master_pe; 345 list_add_tail(&pe->list, &master_pe->slaves); 346 } 347 } 348 349 kfree(pe_alloc); 350 return master_pe; 351 } 352 353 static void __init pnv_ioda_parse_m64_window(struct pnv_phb *phb) 354 { 355 struct pci_controller *hose = phb->hose; 356 struct device_node *dn = hose->dn; 357 struct resource *res; 358 u32 m64_range[2], i; 359 const __be32 *r; 360 u64 pci_addr; 361 362 if (phb->type != PNV_PHB_IODA1 && phb->type != PNV_PHB_IODA2) { 363 pr_info(" Not support M64 window\n"); 364 return; 365 } 366 367 if (!firmware_has_feature(FW_FEATURE_OPAL)) { 368 pr_info(" Firmware too old to support M64 window\n"); 369 return; 370 } 371 372 r = of_get_property(dn, "ibm,opal-m64-window", NULL); 373 if (!r) { 374 pr_info(" No <ibm,opal-m64-window> on %pOF\n", 375 dn); 376 return; 377 } 378 379 /* 380 * Find the available M64 BAR range and pickup the last one for 381 * covering the whole 64-bits space. We support only one range. 382 */ 383 if (of_property_read_u32_array(dn, "ibm,opal-available-m64-ranges", 384 m64_range, 2)) { 385 /* In absence of the property, assume 0..15 */ 386 m64_range[0] = 0; 387 m64_range[1] = 16; 388 } 389 /* We only support 64 bits in our allocator */ 390 if (m64_range[1] > 63) { 391 pr_warn("%s: Limiting M64 range to 63 (from %d) on PHB#%x\n", 392 __func__, m64_range[1], phb->hose->global_number); 393 m64_range[1] = 63; 394 } 395 /* Empty range, no m64 */ 396 if (m64_range[1] <= m64_range[0]) { 397 pr_warn("%s: M64 empty, disabling M64 usage on PHB#%x\n", 398 __func__, phb->hose->global_number); 399 return; 400 } 401 402 /* Configure M64 informations */ 403 res = &hose->mem_resources[1]; 404 res->name = dn->full_name; 405 res->start = of_translate_address(dn, r + 2); 406 res->end = res->start + of_read_number(r + 4, 2) - 1; 407 res->flags = (IORESOURCE_MEM | IORESOURCE_MEM_64 | IORESOURCE_PREFETCH); 408 pci_addr = of_read_number(r, 2); 409 hose->mem_offset[1] = res->start - pci_addr; 410 411 phb->ioda.m64_size = resource_size(res); 412 phb->ioda.m64_segsize = phb->ioda.m64_size / phb->ioda.total_pe_num; 413 phb->ioda.m64_base = pci_addr; 414 415 /* This lines up nicely with the display from processing OF ranges */ 416 pr_info(" MEM 0x%016llx..0x%016llx -> 0x%016llx (M64 #%d..%d)\n", 417 res->start, res->end, pci_addr, m64_range[0], 418 m64_range[0] + m64_range[1] - 1); 419 420 /* Mark all M64 used up by default */ 421 phb->ioda.m64_bar_alloc = (unsigned long)-1; 422 423 /* Use last M64 BAR to cover M64 window */ 424 m64_range[1]--; 425 phb->ioda.m64_bar_idx = m64_range[0] + m64_range[1]; 426 427 pr_info(" Using M64 #%d as default window\n", phb->ioda.m64_bar_idx); 428 429 /* Mark remaining ones free */ 430 for (i = m64_range[0]; i < m64_range[1]; i++) 431 clear_bit(i, &phb->ioda.m64_bar_alloc); 432 433 /* 434 * Setup init functions for M64 based on IODA version, IODA3 uses 435 * the IODA2 code. 436 */ 437 phb->init_m64 = pnv_ioda2_init_m64; 438 } 439 440 static void pnv_ioda_freeze_pe(struct pnv_phb *phb, int pe_no) 441 { 442 struct pnv_ioda_pe *pe = &phb->ioda.pe_array[pe_no]; 443 struct pnv_ioda_pe *slave; 444 s64 rc; 445 446 /* Fetch master PE */ 447 if (pe->flags & PNV_IODA_PE_SLAVE) { 448 pe = pe->master; 449 if (WARN_ON(!pe || !(pe->flags & PNV_IODA_PE_MASTER))) 450 return; 451 452 pe_no = pe->pe_number; 453 } 454 455 /* Freeze master PE */ 456 rc = opal_pci_eeh_freeze_set(phb->opal_id, 457 pe_no, 458 OPAL_EEH_ACTION_SET_FREEZE_ALL); 459 if (rc != OPAL_SUCCESS) { 460 pr_warn("%s: Failure %lld freezing PHB#%x-PE#%x\n", 461 __func__, rc, phb->hose->global_number, pe_no); 462 return; 463 } 464 465 /* Freeze slave PEs */ 466 if (!(pe->flags & PNV_IODA_PE_MASTER)) 467 return; 468 469 list_for_each_entry(slave, &pe->slaves, list) { 470 rc = opal_pci_eeh_freeze_set(phb->opal_id, 471 slave->pe_number, 472 OPAL_EEH_ACTION_SET_FREEZE_ALL); 473 if (rc != OPAL_SUCCESS) 474 pr_warn("%s: Failure %lld freezing PHB#%x-PE#%x\n", 475 __func__, rc, phb->hose->global_number, 476 slave->pe_number); 477 } 478 } 479 480 static int pnv_ioda_unfreeze_pe(struct pnv_phb *phb, int pe_no, int opt) 481 { 482 struct pnv_ioda_pe *pe, *slave; 483 s64 rc; 484 485 /* Find master PE */ 486 pe = &phb->ioda.pe_array[pe_no]; 487 if (pe->flags & PNV_IODA_PE_SLAVE) { 488 pe = pe->master; 489 WARN_ON(!pe || !(pe->flags & PNV_IODA_PE_MASTER)); 490 pe_no = pe->pe_number; 491 } 492 493 /* Clear frozen state for master PE */ 494 rc = opal_pci_eeh_freeze_clear(phb->opal_id, pe_no, opt); 495 if (rc != OPAL_SUCCESS) { 496 pr_warn("%s: Failure %lld clear %d on PHB#%x-PE#%x\n", 497 __func__, rc, opt, phb->hose->global_number, pe_no); 498 return -EIO; 499 } 500 501 if (!(pe->flags & PNV_IODA_PE_MASTER)) 502 return 0; 503 504 /* Clear frozen state for slave PEs */ 505 list_for_each_entry(slave, &pe->slaves, list) { 506 rc = opal_pci_eeh_freeze_clear(phb->opal_id, 507 slave->pe_number, 508 opt); 509 if (rc != OPAL_SUCCESS) { 510 pr_warn("%s: Failure %lld clear %d on PHB#%x-PE#%x\n", 511 __func__, rc, opt, phb->hose->global_number, 512 slave->pe_number); 513 return -EIO; 514 } 515 } 516 517 return 0; 518 } 519 520 static int pnv_ioda_get_pe_state(struct pnv_phb *phb, int pe_no) 521 { 522 struct pnv_ioda_pe *slave, *pe; 523 u8 fstate = 0, state; 524 __be16 pcierr = 0; 525 s64 rc; 526 527 /* Sanity check on PE number */ 528 if (pe_no < 0 || pe_no >= phb->ioda.total_pe_num) 529 return OPAL_EEH_STOPPED_PERM_UNAVAIL; 530 531 /* 532 * Fetch the master PE and the PE instance might be 533 * not initialized yet. 534 */ 535 pe = &phb->ioda.pe_array[pe_no]; 536 if (pe->flags & PNV_IODA_PE_SLAVE) { 537 pe = pe->master; 538 WARN_ON(!pe || !(pe->flags & PNV_IODA_PE_MASTER)); 539 pe_no = pe->pe_number; 540 } 541 542 /* Check the master PE */ 543 rc = opal_pci_eeh_freeze_status(phb->opal_id, pe_no, 544 &state, &pcierr, NULL); 545 if (rc != OPAL_SUCCESS) { 546 pr_warn("%s: Failure %lld getting " 547 "PHB#%x-PE#%x state\n", 548 __func__, rc, 549 phb->hose->global_number, pe_no); 550 return OPAL_EEH_STOPPED_TEMP_UNAVAIL; 551 } 552 553 /* Check the slave PE */ 554 if (!(pe->flags & PNV_IODA_PE_MASTER)) 555 return state; 556 557 list_for_each_entry(slave, &pe->slaves, list) { 558 rc = opal_pci_eeh_freeze_status(phb->opal_id, 559 slave->pe_number, 560 &fstate, 561 &pcierr, 562 NULL); 563 if (rc != OPAL_SUCCESS) { 564 pr_warn("%s: Failure %lld getting " 565 "PHB#%x-PE#%x state\n", 566 __func__, rc, 567 phb->hose->global_number, slave->pe_number); 568 return OPAL_EEH_STOPPED_TEMP_UNAVAIL; 569 } 570 571 /* 572 * Override the result based on the ascending 573 * priority. 574 */ 575 if (fstate > state) 576 state = fstate; 577 } 578 579 return state; 580 } 581 582 struct pnv_ioda_pe *pnv_pci_bdfn_to_pe(struct pnv_phb *phb, u16 bdfn) 583 { 584 int pe_number = phb->ioda.pe_rmap[bdfn]; 585 586 if (pe_number == IODA_INVALID_PE) 587 return NULL; 588 589 return &phb->ioda.pe_array[pe_number]; 590 } 591 592 struct pnv_ioda_pe *pnv_ioda_get_pe(struct pci_dev *dev) 593 { 594 struct pnv_phb *phb = pci_bus_to_pnvhb(dev->bus); 595 struct pci_dn *pdn = pci_get_pdn(dev); 596 597 if (!pdn) 598 return NULL; 599 if (pdn->pe_number == IODA_INVALID_PE) 600 return NULL; 601 return &phb->ioda.pe_array[pdn->pe_number]; 602 } 603 604 static int pnv_ioda_set_one_peltv(struct pnv_phb *phb, 605 struct pnv_ioda_pe *parent, 606 struct pnv_ioda_pe *child, 607 bool is_add) 608 { 609 const char *desc = is_add ? "adding" : "removing"; 610 uint8_t op = is_add ? OPAL_ADD_PE_TO_DOMAIN : 611 OPAL_REMOVE_PE_FROM_DOMAIN; 612 struct pnv_ioda_pe *slave; 613 long rc; 614 615 /* Parent PE affects child PE */ 616 rc = opal_pci_set_peltv(phb->opal_id, parent->pe_number, 617 child->pe_number, op); 618 if (rc != OPAL_SUCCESS) { 619 pe_warn(child, "OPAL error %ld %s to parent PELTV\n", 620 rc, desc); 621 return -ENXIO; 622 } 623 624 if (!(child->flags & PNV_IODA_PE_MASTER)) 625 return 0; 626 627 /* Compound case: parent PE affects slave PEs */ 628 list_for_each_entry(slave, &child->slaves, list) { 629 rc = opal_pci_set_peltv(phb->opal_id, parent->pe_number, 630 slave->pe_number, op); 631 if (rc != OPAL_SUCCESS) { 632 pe_warn(slave, "OPAL error %ld %s to parent PELTV\n", 633 rc, desc); 634 return -ENXIO; 635 } 636 } 637 638 return 0; 639 } 640 641 static int pnv_ioda_set_peltv(struct pnv_phb *phb, 642 struct pnv_ioda_pe *pe, 643 bool is_add) 644 { 645 struct pnv_ioda_pe *slave; 646 struct pci_dev *pdev = NULL; 647 int ret; 648 649 /* 650 * Clear PE frozen state. If it's master PE, we need 651 * clear slave PE frozen state as well. 652 */ 653 if (is_add) { 654 opal_pci_eeh_freeze_clear(phb->opal_id, pe->pe_number, 655 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL); 656 if (pe->flags & PNV_IODA_PE_MASTER) { 657 list_for_each_entry(slave, &pe->slaves, list) 658 opal_pci_eeh_freeze_clear(phb->opal_id, 659 slave->pe_number, 660 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL); 661 } 662 } 663 664 /* 665 * Associate PE in PELT. We need add the PE into the 666 * corresponding PELT-V as well. Otherwise, the error 667 * originated from the PE might contribute to other 668 * PEs. 669 */ 670 ret = pnv_ioda_set_one_peltv(phb, pe, pe, is_add); 671 if (ret) 672 return ret; 673 674 /* For compound PEs, any one affects all of them */ 675 if (pe->flags & PNV_IODA_PE_MASTER) { 676 list_for_each_entry(slave, &pe->slaves, list) { 677 ret = pnv_ioda_set_one_peltv(phb, slave, pe, is_add); 678 if (ret) 679 return ret; 680 } 681 } 682 683 if (pe->flags & (PNV_IODA_PE_BUS_ALL | PNV_IODA_PE_BUS)) 684 pdev = pe->pbus->self; 685 else if (pe->flags & PNV_IODA_PE_DEV) 686 pdev = pe->pdev->bus->self; 687 #ifdef CONFIG_PCI_IOV 688 else if (pe->flags & PNV_IODA_PE_VF) 689 pdev = pe->parent_dev; 690 #endif /* CONFIG_PCI_IOV */ 691 while (pdev) { 692 struct pci_dn *pdn = pci_get_pdn(pdev); 693 struct pnv_ioda_pe *parent; 694 695 if (pdn && pdn->pe_number != IODA_INVALID_PE) { 696 parent = &phb->ioda.pe_array[pdn->pe_number]; 697 ret = pnv_ioda_set_one_peltv(phb, parent, pe, is_add); 698 if (ret) 699 return ret; 700 } 701 702 pdev = pdev->bus->self; 703 } 704 705 return 0; 706 } 707 708 static void pnv_ioda_unset_peltv(struct pnv_phb *phb, 709 struct pnv_ioda_pe *pe, 710 struct pci_dev *parent) 711 { 712 int64_t rc; 713 714 while (parent) { 715 struct pci_dn *pdn = pci_get_pdn(parent); 716 717 if (pdn && pdn->pe_number != IODA_INVALID_PE) { 718 rc = opal_pci_set_peltv(phb->opal_id, pdn->pe_number, 719 pe->pe_number, 720 OPAL_REMOVE_PE_FROM_DOMAIN); 721 /* XXX What to do in case of error ? */ 722 } 723 parent = parent->bus->self; 724 } 725 726 opal_pci_eeh_freeze_clear(phb->opal_id, pe->pe_number, 727 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL); 728 729 /* Disassociate PE in PELT */ 730 rc = opal_pci_set_peltv(phb->opal_id, pe->pe_number, 731 pe->pe_number, OPAL_REMOVE_PE_FROM_DOMAIN); 732 if (rc) 733 pe_warn(pe, "OPAL error %lld remove self from PELTV\n", rc); 734 } 735 736 int pnv_ioda_deconfigure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe) 737 { 738 struct pci_dev *parent; 739 uint8_t bcomp, dcomp, fcomp; 740 int64_t rc; 741 long rid_end, rid; 742 743 /* Currently, we just deconfigure VF PE. Bus PE will always there.*/ 744 if (pe->pbus) { 745 int count; 746 747 dcomp = OPAL_IGNORE_RID_DEVICE_NUMBER; 748 fcomp = OPAL_IGNORE_RID_FUNCTION_NUMBER; 749 parent = pe->pbus->self; 750 if (pe->flags & PNV_IODA_PE_BUS_ALL) 751 count = resource_size(&pe->pbus->busn_res); 752 else 753 count = 1; 754 755 switch(count) { 756 case 1: bcomp = OpalPciBusAll; break; 757 case 2: bcomp = OpalPciBus7Bits; break; 758 case 4: bcomp = OpalPciBus6Bits; break; 759 case 8: bcomp = OpalPciBus5Bits; break; 760 case 16: bcomp = OpalPciBus4Bits; break; 761 case 32: bcomp = OpalPciBus3Bits; break; 762 default: 763 dev_err(&pe->pbus->dev, "Number of subordinate buses %d unsupported\n", 764 count); 765 /* Do an exact match only */ 766 bcomp = OpalPciBusAll; 767 } 768 rid_end = pe->rid + (count << 8); 769 } else { 770 #ifdef CONFIG_PCI_IOV 771 if (pe->flags & PNV_IODA_PE_VF) 772 parent = pe->parent_dev; 773 else 774 #endif 775 parent = pe->pdev->bus->self; 776 bcomp = OpalPciBusAll; 777 dcomp = OPAL_COMPARE_RID_DEVICE_NUMBER; 778 fcomp = OPAL_COMPARE_RID_FUNCTION_NUMBER; 779 rid_end = pe->rid + 1; 780 } 781 782 /* Clear the reverse map */ 783 for (rid = pe->rid; rid < rid_end; rid++) 784 phb->ioda.pe_rmap[rid] = IODA_INVALID_PE; 785 786 /* 787 * Release from all parents PELT-V. NPUs don't have a PELTV 788 * table 789 */ 790 if (phb->type != PNV_PHB_NPU_OCAPI) 791 pnv_ioda_unset_peltv(phb, pe, parent); 792 793 rc = opal_pci_set_pe(phb->opal_id, pe->pe_number, pe->rid, 794 bcomp, dcomp, fcomp, OPAL_UNMAP_PE); 795 if (rc) 796 pe_err(pe, "OPAL error %lld trying to setup PELT table\n", rc); 797 798 pe->pbus = NULL; 799 pe->pdev = NULL; 800 #ifdef CONFIG_PCI_IOV 801 pe->parent_dev = NULL; 802 #endif 803 804 return 0; 805 } 806 807 int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe) 808 { 809 uint8_t bcomp, dcomp, fcomp; 810 long rc, rid_end, rid; 811 812 /* Bus validation ? */ 813 if (pe->pbus) { 814 int count; 815 816 dcomp = OPAL_IGNORE_RID_DEVICE_NUMBER; 817 fcomp = OPAL_IGNORE_RID_FUNCTION_NUMBER; 818 if (pe->flags & PNV_IODA_PE_BUS_ALL) 819 count = resource_size(&pe->pbus->busn_res); 820 else 821 count = 1; 822 823 switch(count) { 824 case 1: bcomp = OpalPciBusAll; break; 825 case 2: bcomp = OpalPciBus7Bits; break; 826 case 4: bcomp = OpalPciBus6Bits; break; 827 case 8: bcomp = OpalPciBus5Bits; break; 828 case 16: bcomp = OpalPciBus4Bits; break; 829 case 32: bcomp = OpalPciBus3Bits; break; 830 default: 831 dev_err(&pe->pbus->dev, "Number of subordinate buses %d unsupported\n", 832 count); 833 /* Do an exact match only */ 834 bcomp = OpalPciBusAll; 835 } 836 rid_end = pe->rid + (count << 8); 837 } else { 838 bcomp = OpalPciBusAll; 839 dcomp = OPAL_COMPARE_RID_DEVICE_NUMBER; 840 fcomp = OPAL_COMPARE_RID_FUNCTION_NUMBER; 841 rid_end = pe->rid + 1; 842 } 843 844 /* 845 * Associate PE in PELT. We need add the PE into the 846 * corresponding PELT-V as well. Otherwise, the error 847 * originated from the PE might contribute to other 848 * PEs. 849 */ 850 rc = opal_pci_set_pe(phb->opal_id, pe->pe_number, pe->rid, 851 bcomp, dcomp, fcomp, OPAL_MAP_PE); 852 if (rc) { 853 pe_err(pe, "OPAL error %ld trying to setup PELT table\n", rc); 854 return -ENXIO; 855 } 856 857 /* 858 * Configure PELTV. NPUs don't have a PELTV table so skip 859 * configuration on them. 860 */ 861 if (phb->type != PNV_PHB_NPU_OCAPI) 862 pnv_ioda_set_peltv(phb, pe, true); 863 864 /* Setup reverse map */ 865 for (rid = pe->rid; rid < rid_end; rid++) 866 phb->ioda.pe_rmap[rid] = pe->pe_number; 867 868 pe->mve_number = 0; 869 870 return 0; 871 } 872 873 static struct pnv_ioda_pe *pnv_ioda_setup_dev_PE(struct pci_dev *dev) 874 { 875 struct pnv_phb *phb = pci_bus_to_pnvhb(dev->bus); 876 struct pci_dn *pdn = pci_get_pdn(dev); 877 struct pnv_ioda_pe *pe; 878 879 if (!pdn) { 880 pr_err("%s: Device tree node not associated properly\n", 881 pci_name(dev)); 882 return NULL; 883 } 884 if (pdn->pe_number != IODA_INVALID_PE) 885 return NULL; 886 887 pe = pnv_ioda_alloc_pe(phb, 1); 888 if (!pe) { 889 pr_warn("%s: Not enough PE# available, disabling device\n", 890 pci_name(dev)); 891 return NULL; 892 } 893 894 /* NOTE: We don't get a reference for the pointer in the PE 895 * data structure, both the device and PE structures should be 896 * destroyed at the same time. 897 * 898 * At some point we want to remove the PDN completely anyways 899 */ 900 pdn->pe_number = pe->pe_number; 901 pe->flags = PNV_IODA_PE_DEV; 902 pe->pdev = dev; 903 pe->pbus = NULL; 904 pe->mve_number = -1; 905 pe->rid = dev->bus->number << 8 | pdn->devfn; 906 pe->device_count++; 907 908 pe_info(pe, "Associated device to PE\n"); 909 910 if (pnv_ioda_configure_pe(phb, pe)) { 911 /* XXX What do we do here ? */ 912 pnv_ioda_free_pe(pe); 913 pdn->pe_number = IODA_INVALID_PE; 914 pe->pdev = NULL; 915 return NULL; 916 } 917 918 /* Put PE to the list */ 919 mutex_lock(&phb->ioda.pe_list_mutex); 920 list_add_tail(&pe->list, &phb->ioda.pe_list); 921 mutex_unlock(&phb->ioda.pe_list_mutex); 922 return pe; 923 } 924 925 /* 926 * There're 2 types of PCI bus sensitive PEs: One that is compromised of 927 * single PCI bus. Another one that contains the primary PCI bus and its 928 * subordinate PCI devices and buses. The second type of PE is normally 929 * orgiriated by PCIe-to-PCI bridge or PLX switch downstream ports. 930 */ 931 static struct pnv_ioda_pe *pnv_ioda_setup_bus_PE(struct pci_bus *bus, bool all) 932 { 933 struct pnv_phb *phb = pci_bus_to_pnvhb(bus); 934 struct pnv_ioda_pe *pe = NULL; 935 unsigned int pe_num; 936 937 /* 938 * In partial hotplug case, the PE instance might be still alive. 939 * We should reuse it instead of allocating a new one. 940 */ 941 pe_num = phb->ioda.pe_rmap[bus->number << 8]; 942 if (WARN_ON(pe_num != IODA_INVALID_PE)) { 943 pe = &phb->ioda.pe_array[pe_num]; 944 return NULL; 945 } 946 947 /* PE number for root bus should have been reserved */ 948 if (pci_is_root_bus(bus)) 949 pe = &phb->ioda.pe_array[phb->ioda.root_pe_idx]; 950 951 /* Check if PE is determined by M64 */ 952 if (!pe) 953 pe = pnv_ioda_pick_m64_pe(bus, all); 954 955 /* The PE number isn't pinned by M64 */ 956 if (!pe) 957 pe = pnv_ioda_alloc_pe(phb, 1); 958 959 if (!pe) { 960 pr_warn("%s: Not enough PE# available for PCI bus %04x:%02x\n", 961 __func__, pci_domain_nr(bus), bus->number); 962 return NULL; 963 } 964 965 pe->flags |= (all ? PNV_IODA_PE_BUS_ALL : PNV_IODA_PE_BUS); 966 pe->pbus = bus; 967 pe->pdev = NULL; 968 pe->mve_number = -1; 969 pe->rid = bus->busn_res.start << 8; 970 971 if (all) 972 pe_info(pe, "Secondary bus %pad..%pad associated with PE#%x\n", 973 &bus->busn_res.start, &bus->busn_res.end, 974 pe->pe_number); 975 else 976 pe_info(pe, "Secondary bus %pad associated with PE#%x\n", 977 &bus->busn_res.start, pe->pe_number); 978 979 if (pnv_ioda_configure_pe(phb, pe)) { 980 /* XXX What do we do here ? */ 981 pnv_ioda_free_pe(pe); 982 pe->pbus = NULL; 983 return NULL; 984 } 985 986 /* Put PE to the list */ 987 list_add_tail(&pe->list, &phb->ioda.pe_list); 988 989 return pe; 990 } 991 992 static void pnv_pci_ioda_dma_dev_setup(struct pci_dev *pdev) 993 { 994 struct pnv_phb *phb = pci_bus_to_pnvhb(pdev->bus); 995 struct pci_dn *pdn = pci_get_pdn(pdev); 996 struct pnv_ioda_pe *pe; 997 998 /* Check if the BDFN for this device is associated with a PE yet */ 999 pe = pnv_pci_bdfn_to_pe(phb, pdev->devfn | (pdev->bus->number << 8)); 1000 if (!pe) { 1001 /* VF PEs should be pre-configured in pnv_pci_sriov_enable() */ 1002 if (WARN_ON(pdev->is_virtfn)) 1003 return; 1004 1005 pnv_pci_configure_bus(pdev->bus); 1006 pe = pnv_pci_bdfn_to_pe(phb, pdev->devfn | (pdev->bus->number << 8)); 1007 pci_info(pdev, "Configured PE#%x\n", pe ? pe->pe_number : 0xfffff); 1008 1009 1010 /* 1011 * If we can't setup the IODA PE something has gone horribly 1012 * wrong and we can't enable DMA for the device. 1013 */ 1014 if (WARN_ON(!pe)) 1015 return; 1016 } else { 1017 pci_info(pdev, "Added to existing PE#%x\n", pe->pe_number); 1018 } 1019 1020 /* 1021 * We assume that bridges *probably* don't need to do any DMA so we can 1022 * skip allocating a TCE table, etc unless we get a non-bridge device. 1023 */ 1024 if (!pe->dma_setup_done && !pci_is_bridge(pdev)) { 1025 switch (phb->type) { 1026 case PNV_PHB_IODA2: 1027 pnv_pci_ioda2_setup_dma_pe(phb, pe); 1028 break; 1029 default: 1030 pr_warn("%s: No DMA for PHB#%x (type %d)\n", 1031 __func__, phb->hose->global_number, phb->type); 1032 } 1033 } 1034 1035 if (pdn) 1036 pdn->pe_number = pe->pe_number; 1037 pe->device_count++; 1038 1039 WARN_ON(get_dma_ops(&pdev->dev) != &dma_iommu_ops); 1040 pdev->dev.archdata.dma_offset = pe->tce_bypass_base; 1041 set_iommu_table_base(&pdev->dev, pe->table_group.tables[0]); 1042 1043 /* PEs with a DMA weight of zero won't have a group */ 1044 if (pe->table_group.group) 1045 iommu_add_device(&pe->table_group, &pdev->dev); 1046 } 1047 1048 /* 1049 * Reconfigure TVE#0 to be usable as 64-bit DMA space. 1050 * 1051 * The first 4GB of virtual memory for a PE is reserved for 32-bit accesses. 1052 * Devices can only access more than that if bit 59 of the PCI address is set 1053 * by hardware, which indicates TVE#1 should be used instead of TVE#0. 1054 * Many PCI devices are not capable of addressing that many bits, and as a 1055 * result are limited to the 4GB of virtual memory made available to 32-bit 1056 * devices in TVE#0. 1057 * 1058 * In order to work around this, reconfigure TVE#0 to be suitable for 64-bit 1059 * devices by configuring the virtual memory past the first 4GB inaccessible 1060 * by 64-bit DMAs. This should only be used by devices that want more than 1061 * 4GB, and only on PEs that have no 32-bit devices. 1062 * 1063 * Currently this will only work on PHB3 (POWER8). 1064 */ 1065 static int pnv_pci_ioda_dma_64bit_bypass(struct pnv_ioda_pe *pe) 1066 { 1067 u64 window_size, table_size, tce_count, addr; 1068 struct page *table_pages; 1069 u64 tce_order = 28; /* 256MB TCEs */ 1070 __be64 *tces; 1071 s64 rc; 1072 1073 /* 1074 * Window size needs to be a power of two, but needs to account for 1075 * shifting memory by the 4GB offset required to skip 32bit space. 1076 */ 1077 window_size = roundup_pow_of_two(memory_hotplug_max() + (1ULL << 32)); 1078 tce_count = window_size >> tce_order; 1079 table_size = tce_count << 3; 1080 1081 if (table_size < PAGE_SIZE) 1082 table_size = PAGE_SIZE; 1083 1084 table_pages = alloc_pages_node(pe->phb->hose->node, GFP_KERNEL, 1085 get_order(table_size)); 1086 if (!table_pages) 1087 goto err; 1088 1089 tces = page_address(table_pages); 1090 if (!tces) 1091 goto err; 1092 1093 memset(tces, 0, table_size); 1094 1095 for (addr = 0; addr < memory_hotplug_max(); addr += (1 << tce_order)) { 1096 tces[(addr + (1ULL << 32)) >> tce_order] = 1097 cpu_to_be64(addr | TCE_PCI_READ | TCE_PCI_WRITE); 1098 } 1099 1100 rc = opal_pci_map_pe_dma_window(pe->phb->opal_id, 1101 pe->pe_number, 1102 /* reconfigure window 0 */ 1103 (pe->pe_number << 1) + 0, 1104 1, 1105 __pa(tces), 1106 table_size, 1107 1 << tce_order); 1108 if (rc == OPAL_SUCCESS) { 1109 pe_info(pe, "Using 64-bit DMA iommu bypass (through TVE#0)\n"); 1110 return 0; 1111 } 1112 err: 1113 pe_err(pe, "Error configuring 64-bit DMA bypass\n"); 1114 return -EIO; 1115 } 1116 1117 static bool pnv_pci_ioda_iommu_bypass_supported(struct pci_dev *pdev, 1118 u64 dma_mask) 1119 { 1120 struct pnv_phb *phb = pci_bus_to_pnvhb(pdev->bus); 1121 struct pci_dn *pdn = pci_get_pdn(pdev); 1122 struct pnv_ioda_pe *pe; 1123 1124 if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE)) 1125 return false; 1126 1127 pe = &phb->ioda.pe_array[pdn->pe_number]; 1128 if (pe->tce_bypass_enabled) { 1129 u64 top = pe->tce_bypass_base + memblock_end_of_DRAM() - 1; 1130 if (dma_mask >= top) 1131 return true; 1132 } 1133 1134 /* 1135 * If the device can't set the TCE bypass bit but still wants 1136 * to access 4GB or more, on PHB3 we can reconfigure TVE#0 to 1137 * bypass the 32-bit region and be usable for 64-bit DMAs. 1138 * The device needs to be able to address all of this space. 1139 */ 1140 if (dma_mask >> 32 && 1141 dma_mask > (memory_hotplug_max() + (1ULL << 32)) && 1142 /* pe->pdev should be set if it's a single device, pe->pbus if not */ 1143 (pe->device_count == 1 || !pe->pbus) && 1144 phb->model == PNV_PHB_MODEL_PHB3) { 1145 /* Configure the bypass mode */ 1146 s64 rc = pnv_pci_ioda_dma_64bit_bypass(pe); 1147 if (rc) 1148 return false; 1149 /* 4GB offset bypasses 32-bit space */ 1150 pdev->dev.archdata.dma_offset = (1ULL << 32); 1151 return true; 1152 } 1153 1154 return false; 1155 } 1156 1157 static inline __be64 __iomem *pnv_ioda_get_inval_reg(struct pnv_phb *phb) 1158 { 1159 return phb->regs + 0x210; 1160 } 1161 1162 #ifdef CONFIG_IOMMU_API 1163 /* Common for IODA1 and IODA2 */ 1164 static int pnv_ioda_tce_xchg_no_kill(struct iommu_table *tbl, long index, 1165 unsigned long *hpa, enum dma_data_direction *direction) 1166 { 1167 return pnv_tce_xchg(tbl, index, hpa, direction); 1168 } 1169 #endif 1170 1171 #define PHB3_TCE_KILL_INVAL_ALL PPC_BIT(0) 1172 #define PHB3_TCE_KILL_INVAL_PE PPC_BIT(1) 1173 #define PHB3_TCE_KILL_INVAL_ONE PPC_BIT(2) 1174 1175 static inline void pnv_pci_phb3_tce_invalidate_pe(struct pnv_ioda_pe *pe) 1176 { 1177 /* 01xb - invalidate TCEs that match the specified PE# */ 1178 __be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb); 1179 unsigned long val = PHB3_TCE_KILL_INVAL_PE | (pe->pe_number & 0xFF); 1180 1181 mb(); /* Ensure above stores are visible */ 1182 __raw_writeq_be(val, invalidate); 1183 } 1184 1185 static void pnv_pci_phb3_tce_invalidate(struct pnv_ioda_pe *pe, 1186 unsigned shift, unsigned long index, 1187 unsigned long npages) 1188 { 1189 __be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb); 1190 unsigned long start, end, inc; 1191 1192 /* We'll invalidate DMA address in PE scope */ 1193 start = PHB3_TCE_KILL_INVAL_ONE; 1194 start |= (pe->pe_number & 0xFF); 1195 end = start; 1196 1197 /* Figure out the start, end and step */ 1198 start |= (index << shift); 1199 end |= ((index + npages - 1) << shift); 1200 inc = (0x1ull << shift); 1201 mb(); 1202 1203 while (start <= end) { 1204 __raw_writeq_be(start, invalidate); 1205 start += inc; 1206 } 1207 } 1208 1209 static inline void pnv_pci_ioda2_tce_invalidate_pe(struct pnv_ioda_pe *pe) 1210 { 1211 struct pnv_phb *phb = pe->phb; 1212 1213 if (phb->model == PNV_PHB_MODEL_PHB3 && phb->regs) 1214 pnv_pci_phb3_tce_invalidate_pe(pe); 1215 else 1216 opal_pci_tce_kill(phb->opal_id, OPAL_PCI_TCE_KILL_PE, 1217 pe->pe_number, 0, 0, 0); 1218 } 1219 1220 static void pnv_pci_ioda2_tce_invalidate(struct iommu_table *tbl, 1221 unsigned long index, unsigned long npages) 1222 { 1223 struct iommu_table_group_link *tgl; 1224 1225 list_for_each_entry_lockless(tgl, &tbl->it_group_list, next) { 1226 struct pnv_ioda_pe *pe = container_of(tgl->table_group, 1227 struct pnv_ioda_pe, table_group); 1228 struct pnv_phb *phb = pe->phb; 1229 unsigned int shift = tbl->it_page_shift; 1230 1231 if (phb->model == PNV_PHB_MODEL_PHB3 && phb->regs) 1232 pnv_pci_phb3_tce_invalidate(pe, shift, 1233 index, npages); 1234 else 1235 opal_pci_tce_kill(phb->opal_id, 1236 OPAL_PCI_TCE_KILL_PAGES, 1237 pe->pe_number, 1u << shift, 1238 index << shift, npages); 1239 } 1240 } 1241 1242 static int pnv_ioda2_tce_build(struct iommu_table *tbl, long index, 1243 long npages, unsigned long uaddr, 1244 enum dma_data_direction direction, 1245 unsigned long attrs) 1246 { 1247 int ret = pnv_tce_build(tbl, index, npages, uaddr, direction, 1248 attrs); 1249 1250 if (!ret) 1251 pnv_pci_ioda2_tce_invalidate(tbl, index, npages); 1252 1253 return ret; 1254 } 1255 1256 static void pnv_ioda2_tce_free(struct iommu_table *tbl, long index, 1257 long npages) 1258 { 1259 pnv_tce_free(tbl, index, npages); 1260 1261 pnv_pci_ioda2_tce_invalidate(tbl, index, npages); 1262 } 1263 1264 static struct iommu_table_ops pnv_ioda2_iommu_ops = { 1265 .set = pnv_ioda2_tce_build, 1266 #ifdef CONFIG_IOMMU_API 1267 .xchg_no_kill = pnv_ioda_tce_xchg_no_kill, 1268 .tce_kill = pnv_pci_ioda2_tce_invalidate, 1269 .useraddrptr = pnv_tce_useraddrptr, 1270 #endif 1271 .clear = pnv_ioda2_tce_free, 1272 .get = pnv_tce_get, 1273 .free = pnv_pci_ioda2_table_free_pages, 1274 }; 1275 1276 static long pnv_pci_ioda2_set_window(struct iommu_table_group *table_group, 1277 int num, struct iommu_table *tbl) 1278 { 1279 struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe, 1280 table_group); 1281 struct pnv_phb *phb = pe->phb; 1282 int64_t rc; 1283 const unsigned long size = tbl->it_indirect_levels ? 1284 tbl->it_level_size : tbl->it_size; 1285 const __u64 start_addr = tbl->it_offset << tbl->it_page_shift; 1286 const __u64 win_size = tbl->it_size << tbl->it_page_shift; 1287 1288 pe_info(pe, "Setting up window#%d %llx..%llx pg=%lx\n", 1289 num, start_addr, start_addr + win_size - 1, 1290 IOMMU_PAGE_SIZE(tbl)); 1291 1292 /* 1293 * Map TCE table through TVT. The TVE index is the PE number 1294 * shifted by 1 bit for 32-bits DMA space. 1295 */ 1296 rc = opal_pci_map_pe_dma_window(phb->opal_id, 1297 pe->pe_number, 1298 (pe->pe_number << 1) + num, 1299 tbl->it_indirect_levels + 1, 1300 __pa(tbl->it_base), 1301 size << 3, 1302 IOMMU_PAGE_SIZE(tbl)); 1303 if (rc) { 1304 pe_err(pe, "Failed to configure TCE table, err %lld\n", rc); 1305 return rc; 1306 } 1307 1308 pnv_pci_link_table_and_group(phb->hose->node, num, 1309 tbl, &pe->table_group); 1310 pnv_pci_ioda2_tce_invalidate_pe(pe); 1311 1312 return 0; 1313 } 1314 1315 static void pnv_pci_ioda2_set_bypass(struct pnv_ioda_pe *pe, bool enable) 1316 { 1317 uint16_t window_id = (pe->pe_number << 1 ) + 1; 1318 int64_t rc; 1319 1320 pe_info(pe, "%sabling 64-bit DMA bypass\n", enable ? "En" : "Dis"); 1321 if (enable) { 1322 phys_addr_t top = memblock_end_of_DRAM(); 1323 1324 top = roundup_pow_of_two(top); 1325 rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id, 1326 pe->pe_number, 1327 window_id, 1328 pe->tce_bypass_base, 1329 top); 1330 } else { 1331 rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id, 1332 pe->pe_number, 1333 window_id, 1334 pe->tce_bypass_base, 1335 0); 1336 } 1337 if (rc) 1338 pe_err(pe, "OPAL error %lld configuring bypass window\n", rc); 1339 else 1340 pe->tce_bypass_enabled = enable; 1341 } 1342 1343 static long pnv_pci_ioda2_create_table(struct iommu_table_group *table_group, 1344 int num, __u32 page_shift, __u64 window_size, __u32 levels, 1345 bool alloc_userspace_copy, struct iommu_table **ptbl) 1346 { 1347 struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe, 1348 table_group); 1349 int nid = pe->phb->hose->node; 1350 __u64 bus_offset = num ? pe->tce_bypass_base : table_group->tce32_start; 1351 long ret; 1352 struct iommu_table *tbl; 1353 1354 tbl = pnv_pci_table_alloc(nid); 1355 if (!tbl) 1356 return -ENOMEM; 1357 1358 tbl->it_ops = &pnv_ioda2_iommu_ops; 1359 1360 ret = pnv_pci_ioda2_table_alloc_pages(nid, 1361 bus_offset, page_shift, window_size, 1362 levels, alloc_userspace_copy, tbl); 1363 if (ret) { 1364 iommu_tce_table_put(tbl); 1365 return ret; 1366 } 1367 1368 *ptbl = tbl; 1369 1370 return 0; 1371 } 1372 1373 static long pnv_pci_ioda2_setup_default_config(struct pnv_ioda_pe *pe) 1374 { 1375 struct iommu_table *tbl = NULL; 1376 long rc; 1377 unsigned long res_start, res_end; 1378 1379 /* 1380 * crashkernel= specifies the kdump kernel's maximum memory at 1381 * some offset and there is no guaranteed the result is a power 1382 * of 2, which will cause errors later. 1383 */ 1384 const u64 max_memory = __rounddown_pow_of_two(memory_hotplug_max()); 1385 1386 /* 1387 * In memory constrained environments, e.g. kdump kernel, the 1388 * DMA window can be larger than available memory, which will 1389 * cause errors later. 1390 */ 1391 const u64 maxblock = 1UL << (PAGE_SHIFT + MAX_ORDER); 1392 1393 /* 1394 * We create the default window as big as we can. The constraint is 1395 * the max order of allocation possible. The TCE table is likely to 1396 * end up being multilevel and with on-demand allocation in place, 1397 * the initial use is not going to be huge as the default window aims 1398 * to support crippled devices (i.e. not fully 64bit DMAble) only. 1399 */ 1400 /* iommu_table::it_map uses 1 bit per IOMMU page, hence 8 */ 1401 const u64 window_size = min((maxblock * 8) << PAGE_SHIFT, max_memory); 1402 /* Each TCE level cannot exceed maxblock so go multilevel if needed */ 1403 unsigned long tces_order = ilog2(window_size >> PAGE_SHIFT); 1404 unsigned long tcelevel_order = ilog2(maxblock >> 3); 1405 unsigned int levels = tces_order / tcelevel_order; 1406 1407 if (tces_order % tcelevel_order) 1408 levels += 1; 1409 /* 1410 * We try to stick to default levels (which is >1 at the moment) in 1411 * order to save memory by relying on on-demain TCE level allocation. 1412 */ 1413 levels = max_t(unsigned int, levels, POWERNV_IOMMU_DEFAULT_LEVELS); 1414 1415 rc = pnv_pci_ioda2_create_table(&pe->table_group, 0, PAGE_SHIFT, 1416 window_size, levels, false, &tbl); 1417 if (rc) { 1418 pe_err(pe, "Failed to create 32-bit TCE table, err %ld", 1419 rc); 1420 return rc; 1421 } 1422 1423 /* We use top part of 32bit space for MMIO so exclude it from DMA */ 1424 res_start = 0; 1425 res_end = 0; 1426 if (window_size > pe->phb->ioda.m32_pci_base) { 1427 res_start = pe->phb->ioda.m32_pci_base >> tbl->it_page_shift; 1428 res_end = min(window_size, SZ_4G) >> tbl->it_page_shift; 1429 } 1430 1431 tbl->it_index = (pe->phb->hose->global_number << 16) | pe->pe_number; 1432 if (iommu_init_table(tbl, pe->phb->hose->node, res_start, res_end)) 1433 rc = pnv_pci_ioda2_set_window(&pe->table_group, 0, tbl); 1434 else 1435 rc = -ENOMEM; 1436 if (rc) { 1437 pe_err(pe, "Failed to configure 32-bit TCE table, err %ld\n", rc); 1438 iommu_tce_table_put(tbl); 1439 tbl = NULL; /* This clears iommu_table_base below */ 1440 } 1441 if (!pnv_iommu_bypass_disabled) 1442 pnv_pci_ioda2_set_bypass(pe, true); 1443 1444 /* 1445 * Set table base for the case of IOMMU DMA use. Usually this is done 1446 * from dma_dev_setup() which is not called when a device is returned 1447 * from VFIO so do it here. 1448 */ 1449 if (pe->pdev) 1450 set_iommu_table_base(&pe->pdev->dev, tbl); 1451 1452 return 0; 1453 } 1454 1455 static long pnv_pci_ioda2_unset_window(struct iommu_table_group *table_group, 1456 int num) 1457 { 1458 struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe, 1459 table_group); 1460 struct pnv_phb *phb = pe->phb; 1461 long ret; 1462 1463 pe_info(pe, "Removing DMA window #%d\n", num); 1464 1465 ret = opal_pci_map_pe_dma_window(phb->opal_id, pe->pe_number, 1466 (pe->pe_number << 1) + num, 1467 0/* levels */, 0/* table address */, 1468 0/* table size */, 0/* page size */); 1469 if (ret) 1470 pe_warn(pe, "Unmapping failed, ret = %ld\n", ret); 1471 else 1472 pnv_pci_ioda2_tce_invalidate_pe(pe); 1473 1474 pnv_pci_unlink_table_and_group(table_group->tables[num], table_group); 1475 1476 return ret; 1477 } 1478 1479 #ifdef CONFIG_IOMMU_API 1480 unsigned long pnv_pci_ioda2_get_table_size(__u32 page_shift, 1481 __u64 window_size, __u32 levels) 1482 { 1483 unsigned long bytes = 0; 1484 const unsigned window_shift = ilog2(window_size); 1485 unsigned entries_shift = window_shift - page_shift; 1486 unsigned table_shift = entries_shift + 3; 1487 unsigned long tce_table_size = max(0x1000UL, 1UL << table_shift); 1488 unsigned long direct_table_size; 1489 1490 if (!levels || (levels > POWERNV_IOMMU_MAX_LEVELS) || 1491 !is_power_of_2(window_size)) 1492 return 0; 1493 1494 /* Calculate a direct table size from window_size and levels */ 1495 entries_shift = (entries_shift + levels - 1) / levels; 1496 table_shift = entries_shift + 3; 1497 table_shift = max_t(unsigned, table_shift, PAGE_SHIFT); 1498 direct_table_size = 1UL << table_shift; 1499 1500 for ( ; levels; --levels) { 1501 bytes += ALIGN(tce_table_size, direct_table_size); 1502 1503 tce_table_size /= direct_table_size; 1504 tce_table_size <<= 3; 1505 tce_table_size = max_t(unsigned long, 1506 tce_table_size, direct_table_size); 1507 } 1508 1509 return bytes + bytes; /* one for HW table, one for userspace copy */ 1510 } 1511 1512 static long pnv_pci_ioda2_create_table_userspace( 1513 struct iommu_table_group *table_group, 1514 int num, __u32 page_shift, __u64 window_size, __u32 levels, 1515 struct iommu_table **ptbl) 1516 { 1517 long ret = pnv_pci_ioda2_create_table(table_group, 1518 num, page_shift, window_size, levels, true, ptbl); 1519 1520 if (!ret) 1521 (*ptbl)->it_allocated_size = pnv_pci_ioda2_get_table_size( 1522 page_shift, window_size, levels); 1523 return ret; 1524 } 1525 1526 static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, struct pci_bus *bus) 1527 { 1528 struct pci_dev *dev; 1529 1530 list_for_each_entry(dev, &bus->devices, bus_list) { 1531 set_iommu_table_base(&dev->dev, pe->table_group.tables[0]); 1532 dev->dev.archdata.dma_offset = pe->tce_bypass_base; 1533 1534 if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate) 1535 pnv_ioda_setup_bus_dma(pe, dev->subordinate); 1536 } 1537 } 1538 1539 static long pnv_ioda2_take_ownership(struct iommu_table_group *table_group) 1540 { 1541 struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe, 1542 table_group); 1543 /* Store @tbl as pnv_pci_ioda2_unset_window() resets it */ 1544 struct iommu_table *tbl = pe->table_group.tables[0]; 1545 1546 /* 1547 * iommu_ops transfers the ownership per a device and we mode 1548 * the group ownership with the first device in the group. 1549 */ 1550 if (!tbl) 1551 return 0; 1552 1553 pnv_pci_ioda2_set_bypass(pe, false); 1554 pnv_pci_ioda2_unset_window(&pe->table_group, 0); 1555 if (pe->pbus) 1556 pnv_ioda_setup_bus_dma(pe, pe->pbus); 1557 else if (pe->pdev) 1558 set_iommu_table_base(&pe->pdev->dev, NULL); 1559 iommu_tce_table_put(tbl); 1560 1561 return 0; 1562 } 1563 1564 static void pnv_ioda2_release_ownership(struct iommu_table_group *table_group) 1565 { 1566 struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe, 1567 table_group); 1568 1569 /* See the comment about iommu_ops above */ 1570 if (pe->table_group.tables[0]) 1571 return; 1572 pnv_pci_ioda2_setup_default_config(pe); 1573 if (pe->pbus) 1574 pnv_ioda_setup_bus_dma(pe, pe->pbus); 1575 } 1576 1577 static struct iommu_table_group_ops pnv_pci_ioda2_ops = { 1578 .get_table_size = pnv_pci_ioda2_get_table_size, 1579 .create_table = pnv_pci_ioda2_create_table_userspace, 1580 .set_window = pnv_pci_ioda2_set_window, 1581 .unset_window = pnv_pci_ioda2_unset_window, 1582 .take_ownership = pnv_ioda2_take_ownership, 1583 .release_ownership = pnv_ioda2_release_ownership, 1584 }; 1585 #endif 1586 1587 void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb, 1588 struct pnv_ioda_pe *pe) 1589 { 1590 int64_t rc; 1591 1592 /* TVE #1 is selected by PCI address bit 59 */ 1593 pe->tce_bypass_base = 1ull << 59; 1594 1595 /* The PE will reserve all possible 32-bits space */ 1596 pe_info(pe, "Setting up 32-bit TCE table at 0..%08x\n", 1597 phb->ioda.m32_pci_base); 1598 1599 /* Setup linux iommu table */ 1600 pe->table_group.tce32_start = 0; 1601 pe->table_group.tce32_size = phb->ioda.m32_pci_base; 1602 pe->table_group.max_dynamic_windows_supported = 1603 IOMMU_TABLE_GROUP_MAX_TABLES; 1604 pe->table_group.max_levels = POWERNV_IOMMU_MAX_LEVELS; 1605 pe->table_group.pgsizes = pnv_ioda_parse_tce_sizes(phb); 1606 1607 rc = pnv_pci_ioda2_setup_default_config(pe); 1608 if (rc) 1609 return; 1610 1611 #ifdef CONFIG_IOMMU_API 1612 pe->table_group.ops = &pnv_pci_ioda2_ops; 1613 iommu_register_group(&pe->table_group, phb->hose->global_number, 1614 pe->pe_number); 1615 #endif 1616 pe->dma_setup_done = true; 1617 } 1618 1619 /* 1620 * Called from KVM in real mode to EOI passthru interrupts. The ICP 1621 * EOI is handled directly in KVM in kvmppc_deliver_irq_passthru(). 1622 * 1623 * The IRQ data is mapped in the PCI-MSI domain and the EOI OPAL call 1624 * needs an HW IRQ number mapped in the XICS IRQ domain. The HW IRQ 1625 * numbers of the in-the-middle MSI domain are vector numbers and it's 1626 * good enough for OPAL. Use that. 1627 */ 1628 int64_t pnv_opal_pci_msi_eoi(struct irq_data *d) 1629 { 1630 struct pci_controller *hose = irq_data_get_irq_chip_data(d->parent_data); 1631 struct pnv_phb *phb = hose->private_data; 1632 1633 return opal_pci_msi_eoi(phb->opal_id, d->parent_data->hwirq); 1634 } 1635 1636 /* 1637 * The IRQ data is mapped in the XICS domain, with OPAL HW IRQ numbers 1638 */ 1639 static void pnv_ioda2_msi_eoi(struct irq_data *d) 1640 { 1641 int64_t rc; 1642 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); 1643 struct pci_controller *hose = irq_data_get_irq_chip_data(d); 1644 struct pnv_phb *phb = hose->private_data; 1645 1646 rc = opal_pci_msi_eoi(phb->opal_id, hw_irq); 1647 WARN_ON_ONCE(rc); 1648 1649 icp_native_eoi(d); 1650 } 1651 1652 /* P8/CXL only */ 1653 void pnv_set_msi_irq_chip(struct pnv_phb *phb, unsigned int virq) 1654 { 1655 struct irq_data *idata; 1656 struct irq_chip *ichip; 1657 1658 /* The MSI EOI OPAL call is only needed on PHB3 */ 1659 if (phb->model != PNV_PHB_MODEL_PHB3) 1660 return; 1661 1662 if (!phb->ioda.irq_chip_init) { 1663 /* 1664 * First time we setup an MSI IRQ, we need to setup the 1665 * corresponding IRQ chip to route correctly. 1666 */ 1667 idata = irq_get_irq_data(virq); 1668 ichip = irq_data_get_irq_chip(idata); 1669 phb->ioda.irq_chip_init = 1; 1670 phb->ioda.irq_chip = *ichip; 1671 phb->ioda.irq_chip.irq_eoi = pnv_ioda2_msi_eoi; 1672 } 1673 irq_set_chip(virq, &phb->ioda.irq_chip); 1674 irq_set_chip_data(virq, phb->hose); 1675 } 1676 1677 static struct irq_chip pnv_pci_msi_irq_chip; 1678 1679 /* 1680 * Returns true iff chip is something that we could call 1681 * pnv_opal_pci_msi_eoi for. 1682 */ 1683 bool is_pnv_opal_msi(struct irq_chip *chip) 1684 { 1685 return chip == &pnv_pci_msi_irq_chip; 1686 } 1687 EXPORT_SYMBOL_GPL(is_pnv_opal_msi); 1688 1689 static int __pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev, 1690 unsigned int xive_num, 1691 unsigned int is_64, struct msi_msg *msg) 1692 { 1693 struct pnv_ioda_pe *pe = pnv_ioda_get_pe(dev); 1694 __be32 data; 1695 int rc; 1696 1697 dev_dbg(&dev->dev, "%s: setup %s-bit MSI for vector #%d\n", __func__, 1698 is_64 ? "64" : "32", xive_num); 1699 1700 /* No PE assigned ? bail out ... no MSI for you ! */ 1701 if (pe == NULL) 1702 return -ENXIO; 1703 1704 /* Check if we have an MVE */ 1705 if (pe->mve_number < 0) 1706 return -ENXIO; 1707 1708 /* Force 32-bit MSI on some broken devices */ 1709 if (dev->no_64bit_msi) 1710 is_64 = 0; 1711 1712 /* Assign XIVE to PE */ 1713 rc = opal_pci_set_xive_pe(phb->opal_id, pe->pe_number, xive_num); 1714 if (rc) { 1715 pr_warn("%s: OPAL error %d setting XIVE %d PE\n", 1716 pci_name(dev), rc, xive_num); 1717 return -EIO; 1718 } 1719 1720 if (is_64) { 1721 __be64 addr64; 1722 1723 rc = opal_get_msi_64(phb->opal_id, pe->mve_number, xive_num, 1, 1724 &addr64, &data); 1725 if (rc) { 1726 pr_warn("%s: OPAL error %d getting 64-bit MSI data\n", 1727 pci_name(dev), rc); 1728 return -EIO; 1729 } 1730 msg->address_hi = be64_to_cpu(addr64) >> 32; 1731 msg->address_lo = be64_to_cpu(addr64) & 0xfffffffful; 1732 } else { 1733 __be32 addr32; 1734 1735 rc = opal_get_msi_32(phb->opal_id, pe->mve_number, xive_num, 1, 1736 &addr32, &data); 1737 if (rc) { 1738 pr_warn("%s: OPAL error %d getting 32-bit MSI data\n", 1739 pci_name(dev), rc); 1740 return -EIO; 1741 } 1742 msg->address_hi = 0; 1743 msg->address_lo = be32_to_cpu(addr32); 1744 } 1745 msg->data = be32_to_cpu(data); 1746 1747 return 0; 1748 } 1749 1750 /* 1751 * The msi_free() op is called before irq_domain_free_irqs_top() when 1752 * the handler data is still available. Use that to clear the XIVE 1753 * controller. 1754 */ 1755 static void pnv_msi_ops_msi_free(struct irq_domain *domain, 1756 struct msi_domain_info *info, 1757 unsigned int irq) 1758 { 1759 if (xive_enabled()) 1760 xive_irq_free_data(irq); 1761 } 1762 1763 static struct msi_domain_ops pnv_pci_msi_domain_ops = { 1764 .msi_free = pnv_msi_ops_msi_free, 1765 }; 1766 1767 static void pnv_msi_shutdown(struct irq_data *d) 1768 { 1769 d = d->parent_data; 1770 if (d->chip->irq_shutdown) 1771 d->chip->irq_shutdown(d); 1772 } 1773 1774 static void pnv_msi_mask(struct irq_data *d) 1775 { 1776 pci_msi_mask_irq(d); 1777 irq_chip_mask_parent(d); 1778 } 1779 1780 static void pnv_msi_unmask(struct irq_data *d) 1781 { 1782 pci_msi_unmask_irq(d); 1783 irq_chip_unmask_parent(d); 1784 } 1785 1786 static struct irq_chip pnv_pci_msi_irq_chip = { 1787 .name = "PNV-PCI-MSI", 1788 .irq_shutdown = pnv_msi_shutdown, 1789 .irq_mask = pnv_msi_mask, 1790 .irq_unmask = pnv_msi_unmask, 1791 .irq_eoi = irq_chip_eoi_parent, 1792 }; 1793 1794 static struct msi_domain_info pnv_msi_domain_info = { 1795 .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | 1796 MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX), 1797 .ops = &pnv_pci_msi_domain_ops, 1798 .chip = &pnv_pci_msi_irq_chip, 1799 }; 1800 1801 static void pnv_msi_compose_msg(struct irq_data *d, struct msi_msg *msg) 1802 { 1803 struct msi_desc *entry = irq_data_get_msi_desc(d); 1804 struct pci_dev *pdev = msi_desc_to_pci_dev(entry); 1805 struct pci_controller *hose = irq_data_get_irq_chip_data(d); 1806 struct pnv_phb *phb = hose->private_data; 1807 int rc; 1808 1809 rc = __pnv_pci_ioda_msi_setup(phb, pdev, d->hwirq, 1810 entry->pci.msi_attrib.is_64, msg); 1811 if (rc) 1812 dev_err(&pdev->dev, "Failed to setup %s-bit MSI #%ld : %d\n", 1813 entry->pci.msi_attrib.is_64 ? "64" : "32", d->hwirq, rc); 1814 } 1815 1816 /* 1817 * The IRQ data is mapped in the MSI domain in which HW IRQ numbers 1818 * correspond to vector numbers. 1819 */ 1820 static void pnv_msi_eoi(struct irq_data *d) 1821 { 1822 struct pci_controller *hose = irq_data_get_irq_chip_data(d); 1823 struct pnv_phb *phb = hose->private_data; 1824 1825 if (phb->model == PNV_PHB_MODEL_PHB3) { 1826 /* 1827 * The EOI OPAL call takes an OPAL HW IRQ number but 1828 * since it is translated into a vector number in 1829 * OPAL, use that directly. 1830 */ 1831 WARN_ON_ONCE(opal_pci_msi_eoi(phb->opal_id, d->hwirq)); 1832 } 1833 1834 irq_chip_eoi_parent(d); 1835 } 1836 1837 static struct irq_chip pnv_msi_irq_chip = { 1838 .name = "PNV-MSI", 1839 .irq_shutdown = pnv_msi_shutdown, 1840 .irq_mask = irq_chip_mask_parent, 1841 .irq_unmask = irq_chip_unmask_parent, 1842 .irq_eoi = pnv_msi_eoi, 1843 .irq_set_affinity = irq_chip_set_affinity_parent, 1844 .irq_compose_msi_msg = pnv_msi_compose_msg, 1845 }; 1846 1847 static int pnv_irq_parent_domain_alloc(struct irq_domain *domain, 1848 unsigned int virq, int hwirq) 1849 { 1850 struct irq_fwspec parent_fwspec; 1851 int ret; 1852 1853 parent_fwspec.fwnode = domain->parent->fwnode; 1854 parent_fwspec.param_count = 2; 1855 parent_fwspec.param[0] = hwirq; 1856 parent_fwspec.param[1] = IRQ_TYPE_EDGE_RISING; 1857 1858 ret = irq_domain_alloc_irqs_parent(domain, virq, 1, &parent_fwspec); 1859 if (ret) 1860 return ret; 1861 1862 return 0; 1863 } 1864 1865 static int pnv_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, 1866 unsigned int nr_irqs, void *arg) 1867 { 1868 struct pci_controller *hose = domain->host_data; 1869 struct pnv_phb *phb = hose->private_data; 1870 msi_alloc_info_t *info = arg; 1871 struct pci_dev *pdev = msi_desc_to_pci_dev(info->desc); 1872 int hwirq; 1873 int i, ret; 1874 1875 hwirq = msi_bitmap_alloc_hwirqs(&phb->msi_bmp, nr_irqs); 1876 if (hwirq < 0) { 1877 dev_warn(&pdev->dev, "failed to find a free MSI\n"); 1878 return -ENOSPC; 1879 } 1880 1881 dev_dbg(&pdev->dev, "%s bridge %pOF %d/%x #%d\n", __func__, 1882 hose->dn, virq, hwirq, nr_irqs); 1883 1884 for (i = 0; i < nr_irqs; i++) { 1885 ret = pnv_irq_parent_domain_alloc(domain, virq + i, 1886 phb->msi_base + hwirq + i); 1887 if (ret) 1888 goto out; 1889 1890 irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i, 1891 &pnv_msi_irq_chip, hose); 1892 } 1893 1894 return 0; 1895 1896 out: 1897 irq_domain_free_irqs_parent(domain, virq, i - 1); 1898 msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq, nr_irqs); 1899 return ret; 1900 } 1901 1902 static void pnv_irq_domain_free(struct irq_domain *domain, unsigned int virq, 1903 unsigned int nr_irqs) 1904 { 1905 struct irq_data *d = irq_domain_get_irq_data(domain, virq); 1906 struct pci_controller *hose = irq_data_get_irq_chip_data(d); 1907 struct pnv_phb *phb = hose->private_data; 1908 1909 pr_debug("%s bridge %pOF %d/%lx #%d\n", __func__, hose->dn, 1910 virq, d->hwirq, nr_irqs); 1911 1912 msi_bitmap_free_hwirqs(&phb->msi_bmp, d->hwirq, nr_irqs); 1913 /* XIVE domain is cleared through ->msi_free() */ 1914 } 1915 1916 static const struct irq_domain_ops pnv_irq_domain_ops = { 1917 .alloc = pnv_irq_domain_alloc, 1918 .free = pnv_irq_domain_free, 1919 }; 1920 1921 static int __init pnv_msi_allocate_domains(struct pci_controller *hose, unsigned int count) 1922 { 1923 struct pnv_phb *phb = hose->private_data; 1924 struct irq_domain *parent = irq_get_default_host(); 1925 1926 hose->fwnode = irq_domain_alloc_named_id_fwnode("PNV-MSI", phb->opal_id); 1927 if (!hose->fwnode) 1928 return -ENOMEM; 1929 1930 hose->dev_domain = irq_domain_create_hierarchy(parent, 0, count, 1931 hose->fwnode, 1932 &pnv_irq_domain_ops, hose); 1933 if (!hose->dev_domain) { 1934 pr_err("PCI: failed to create IRQ domain bridge %pOF (domain %d)\n", 1935 hose->dn, hose->global_number); 1936 irq_domain_free_fwnode(hose->fwnode); 1937 return -ENOMEM; 1938 } 1939 1940 hose->msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(hose->dn), 1941 &pnv_msi_domain_info, 1942 hose->dev_domain); 1943 if (!hose->msi_domain) { 1944 pr_err("PCI: failed to create MSI IRQ domain bridge %pOF (domain %d)\n", 1945 hose->dn, hose->global_number); 1946 irq_domain_free_fwnode(hose->fwnode); 1947 irq_domain_remove(hose->dev_domain); 1948 return -ENOMEM; 1949 } 1950 1951 return 0; 1952 } 1953 1954 static void __init pnv_pci_init_ioda_msis(struct pnv_phb *phb) 1955 { 1956 unsigned int count; 1957 const __be32 *prop = of_get_property(phb->hose->dn, 1958 "ibm,opal-msi-ranges", NULL); 1959 if (!prop) { 1960 /* BML Fallback */ 1961 prop = of_get_property(phb->hose->dn, "msi-ranges", NULL); 1962 } 1963 if (!prop) 1964 return; 1965 1966 phb->msi_base = be32_to_cpup(prop); 1967 count = be32_to_cpup(prop + 1); 1968 if (msi_bitmap_alloc(&phb->msi_bmp, count, phb->hose->dn)) { 1969 pr_err("PCI %d: Failed to allocate MSI bitmap !\n", 1970 phb->hose->global_number); 1971 return; 1972 } 1973 1974 pr_info(" Allocated bitmap for %d MSIs (base IRQ 0x%x)\n", 1975 count, phb->msi_base); 1976 1977 pnv_msi_allocate_domains(phb->hose, count); 1978 } 1979 1980 static void pnv_ioda_setup_pe_res(struct pnv_ioda_pe *pe, 1981 struct resource *res) 1982 { 1983 struct pnv_phb *phb = pe->phb; 1984 struct pci_bus_region region; 1985 int index; 1986 int64_t rc; 1987 1988 if (!res || !res->flags || res->start > res->end || 1989 res->flags & IORESOURCE_UNSET) 1990 return; 1991 1992 if (res->flags & IORESOURCE_IO) { 1993 region.start = res->start - phb->ioda.io_pci_base; 1994 region.end = res->end - phb->ioda.io_pci_base; 1995 index = region.start / phb->ioda.io_segsize; 1996 1997 while (index < phb->ioda.total_pe_num && 1998 region.start <= region.end) { 1999 phb->ioda.io_segmap[index] = pe->pe_number; 2000 rc = opal_pci_map_pe_mmio_window(phb->opal_id, 2001 pe->pe_number, OPAL_IO_WINDOW_TYPE, 0, index); 2002 if (rc != OPAL_SUCCESS) { 2003 pr_err("%s: Error %lld mapping IO segment#%d to PE#%x\n", 2004 __func__, rc, index, pe->pe_number); 2005 break; 2006 } 2007 2008 region.start += phb->ioda.io_segsize; 2009 index++; 2010 } 2011 } else if ((res->flags & IORESOURCE_MEM) && 2012 !pnv_pci_is_m64(phb, res)) { 2013 region.start = res->start - 2014 phb->hose->mem_offset[0] - 2015 phb->ioda.m32_pci_base; 2016 region.end = res->end - 2017 phb->hose->mem_offset[0] - 2018 phb->ioda.m32_pci_base; 2019 index = region.start / phb->ioda.m32_segsize; 2020 2021 while (index < phb->ioda.total_pe_num && 2022 region.start <= region.end) { 2023 phb->ioda.m32_segmap[index] = pe->pe_number; 2024 rc = opal_pci_map_pe_mmio_window(phb->opal_id, 2025 pe->pe_number, OPAL_M32_WINDOW_TYPE, 0, index); 2026 if (rc != OPAL_SUCCESS) { 2027 pr_err("%s: Error %lld mapping M32 segment#%d to PE#%x", 2028 __func__, rc, index, pe->pe_number); 2029 break; 2030 } 2031 2032 region.start += phb->ioda.m32_segsize; 2033 index++; 2034 } 2035 } 2036 } 2037 2038 /* 2039 * This function is supposed to be called on basis of PE from top 2040 * to bottom style. So the I/O or MMIO segment assigned to 2041 * parent PE could be overridden by its child PEs if necessary. 2042 */ 2043 static void pnv_ioda_setup_pe_seg(struct pnv_ioda_pe *pe) 2044 { 2045 struct pci_dev *pdev; 2046 int i; 2047 2048 /* 2049 * NOTE: We only care PCI bus based PE for now. For PCI 2050 * device based PE, for example SRIOV sensitive VF should 2051 * be figured out later. 2052 */ 2053 BUG_ON(!(pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))); 2054 2055 list_for_each_entry(pdev, &pe->pbus->devices, bus_list) { 2056 for (i = 0; i <= PCI_ROM_RESOURCE; i++) 2057 pnv_ioda_setup_pe_res(pe, &pdev->resource[i]); 2058 2059 /* 2060 * If the PE contains all subordinate PCI buses, the 2061 * windows of the child bridges should be mapped to 2062 * the PE as well. 2063 */ 2064 if (!(pe->flags & PNV_IODA_PE_BUS_ALL) || !pci_is_bridge(pdev)) 2065 continue; 2066 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) 2067 pnv_ioda_setup_pe_res(pe, 2068 &pdev->resource[PCI_BRIDGE_RESOURCES + i]); 2069 } 2070 } 2071 2072 #ifdef CONFIG_DEBUG_FS 2073 static int pnv_pci_diag_data_set(void *data, u64 val) 2074 { 2075 struct pnv_phb *phb = data; 2076 s64 ret; 2077 2078 /* Retrieve the diag data from firmware */ 2079 ret = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag_data, 2080 phb->diag_data_size); 2081 if (ret != OPAL_SUCCESS) 2082 return -EIO; 2083 2084 /* Print the diag data to the kernel log */ 2085 pnv_pci_dump_phb_diag_data(phb->hose, phb->diag_data); 2086 return 0; 2087 } 2088 2089 DEFINE_DEBUGFS_ATTRIBUTE(pnv_pci_diag_data_fops, NULL, pnv_pci_diag_data_set, 2090 "%llu\n"); 2091 2092 static int pnv_pci_ioda_pe_dump(void *data, u64 val) 2093 { 2094 struct pnv_phb *phb = data; 2095 int pe_num; 2096 2097 for (pe_num = 0; pe_num < phb->ioda.total_pe_num; pe_num++) { 2098 struct pnv_ioda_pe *pe = &phb->ioda.pe_array[pe_num]; 2099 2100 if (!test_bit(pe_num, phb->ioda.pe_alloc)) 2101 continue; 2102 2103 pe_warn(pe, "rid: %04x dev count: %2d flags: %s%s%s%s%s%s\n", 2104 pe->rid, pe->device_count, 2105 (pe->flags & PNV_IODA_PE_DEV) ? "dev " : "", 2106 (pe->flags & PNV_IODA_PE_BUS) ? "bus " : "", 2107 (pe->flags & PNV_IODA_PE_BUS_ALL) ? "all " : "", 2108 (pe->flags & PNV_IODA_PE_MASTER) ? "master " : "", 2109 (pe->flags & PNV_IODA_PE_SLAVE) ? "slave " : "", 2110 (pe->flags & PNV_IODA_PE_VF) ? "vf " : ""); 2111 } 2112 2113 return 0; 2114 } 2115 2116 DEFINE_DEBUGFS_ATTRIBUTE(pnv_pci_ioda_pe_dump_fops, NULL, 2117 pnv_pci_ioda_pe_dump, "%llu\n"); 2118 2119 #endif /* CONFIG_DEBUG_FS */ 2120 2121 static void pnv_pci_ioda_create_dbgfs(void) 2122 { 2123 #ifdef CONFIG_DEBUG_FS 2124 struct pci_controller *hose, *tmp; 2125 struct pnv_phb *phb; 2126 char name[16]; 2127 2128 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { 2129 phb = hose->private_data; 2130 2131 sprintf(name, "PCI%04x", hose->global_number); 2132 phb->dbgfs = debugfs_create_dir(name, arch_debugfs_dir); 2133 2134 debugfs_create_file_unsafe("dump_diag_regs", 0200, phb->dbgfs, 2135 phb, &pnv_pci_diag_data_fops); 2136 debugfs_create_file_unsafe("dump_ioda_pe_state", 0200, phb->dbgfs, 2137 phb, &pnv_pci_ioda_pe_dump_fops); 2138 } 2139 #endif /* CONFIG_DEBUG_FS */ 2140 } 2141 2142 static void pnv_pci_enable_bridge(struct pci_bus *bus) 2143 { 2144 struct pci_dev *dev = bus->self; 2145 struct pci_bus *child; 2146 2147 /* Empty bus ? bail */ 2148 if (list_empty(&bus->devices)) 2149 return; 2150 2151 /* 2152 * If there's a bridge associated with that bus enable it. This works 2153 * around races in the generic code if the enabling is done during 2154 * parallel probing. This can be removed once those races have been 2155 * fixed. 2156 */ 2157 if (dev) { 2158 int rc = pci_enable_device(dev); 2159 if (rc) 2160 pci_err(dev, "Error enabling bridge (%d)\n", rc); 2161 pci_set_master(dev); 2162 } 2163 2164 /* Perform the same to child busses */ 2165 list_for_each_entry(child, &bus->children, node) 2166 pnv_pci_enable_bridge(child); 2167 } 2168 2169 static void pnv_pci_enable_bridges(void) 2170 { 2171 struct pci_controller *hose; 2172 2173 list_for_each_entry(hose, &hose_list, list_node) 2174 pnv_pci_enable_bridge(hose->bus); 2175 } 2176 2177 static void pnv_pci_ioda_fixup(void) 2178 { 2179 pnv_pci_ioda_create_dbgfs(); 2180 2181 pnv_pci_enable_bridges(); 2182 2183 #ifdef CONFIG_EEH 2184 pnv_eeh_post_init(); 2185 #endif 2186 } 2187 2188 /* 2189 * Returns the alignment for I/O or memory windows for P2P 2190 * bridges. That actually depends on how PEs are segmented. 2191 * For now, we return I/O or M32 segment size for PE sensitive 2192 * P2P bridges. Otherwise, the default values (4KiB for I/O, 2193 * 1MiB for memory) will be returned. 2194 * 2195 * The current PCI bus might be put into one PE, which was 2196 * create against the parent PCI bridge. For that case, we 2197 * needn't enlarge the alignment so that we can save some 2198 * resources. 2199 */ 2200 static resource_size_t pnv_pci_window_alignment(struct pci_bus *bus, 2201 unsigned long type) 2202 { 2203 struct pnv_phb *phb = pci_bus_to_pnvhb(bus); 2204 int num_pci_bridges = 0; 2205 struct pci_dev *bridge; 2206 2207 bridge = bus->self; 2208 while (bridge) { 2209 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE) { 2210 num_pci_bridges++; 2211 if (num_pci_bridges >= 2) 2212 return 1; 2213 } 2214 2215 bridge = bridge->bus->self; 2216 } 2217 2218 /* 2219 * We fall back to M32 if M64 isn't supported. We enforce the M64 2220 * alignment for any 64-bit resource, PCIe doesn't care and 2221 * bridges only do 64-bit prefetchable anyway. 2222 */ 2223 if (phb->ioda.m64_segsize && pnv_pci_is_m64_flags(type)) 2224 return phb->ioda.m64_segsize; 2225 if (type & IORESOURCE_MEM) 2226 return phb->ioda.m32_segsize; 2227 2228 return phb->ioda.io_segsize; 2229 } 2230 2231 /* 2232 * We are updating root port or the upstream port of the 2233 * bridge behind the root port with PHB's windows in order 2234 * to accommodate the changes on required resources during 2235 * PCI (slot) hotplug, which is connected to either root 2236 * port or the downstream ports of PCIe switch behind the 2237 * root port. 2238 */ 2239 static void pnv_pci_fixup_bridge_resources(struct pci_bus *bus, 2240 unsigned long type) 2241 { 2242 struct pci_controller *hose = pci_bus_to_host(bus); 2243 struct pnv_phb *phb = hose->private_data; 2244 struct pci_dev *bridge = bus->self; 2245 struct resource *r, *w; 2246 bool msi_region = false; 2247 int i; 2248 2249 /* Check if we need apply fixup to the bridge's windows */ 2250 if (!pci_is_root_bus(bridge->bus) && 2251 !pci_is_root_bus(bridge->bus->self->bus)) 2252 return; 2253 2254 /* Fixup the resources */ 2255 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) { 2256 r = &bridge->resource[PCI_BRIDGE_RESOURCES + i]; 2257 if (!r->flags || !r->parent) 2258 continue; 2259 2260 w = NULL; 2261 if (r->flags & type & IORESOURCE_IO) 2262 w = &hose->io_resource; 2263 else if (pnv_pci_is_m64(phb, r) && 2264 (type & IORESOURCE_PREFETCH) && 2265 phb->ioda.m64_segsize) 2266 w = &hose->mem_resources[1]; 2267 else if (r->flags & type & IORESOURCE_MEM) { 2268 w = &hose->mem_resources[0]; 2269 msi_region = true; 2270 } 2271 2272 r->start = w->start; 2273 r->end = w->end; 2274 2275 /* The 64KB 32-bits MSI region shouldn't be included in 2276 * the 32-bits bridge window. Otherwise, we can see strange 2277 * issues. One of them is EEH error observed on Garrison. 2278 * 2279 * Exclude top 1MB region which is the minimal alignment of 2280 * 32-bits bridge window. 2281 */ 2282 if (msi_region) { 2283 r->end += 0x10000; 2284 r->end -= 0x100000; 2285 } 2286 } 2287 } 2288 2289 static void pnv_pci_configure_bus(struct pci_bus *bus) 2290 { 2291 struct pci_dev *bridge = bus->self; 2292 struct pnv_ioda_pe *pe; 2293 bool all = (bridge && pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE); 2294 2295 dev_info(&bus->dev, "Configuring PE for bus\n"); 2296 2297 /* Don't assign PE to PCI bus, which doesn't have subordinate devices */ 2298 if (WARN_ON(list_empty(&bus->devices))) 2299 return; 2300 2301 /* Reserve PEs according to used M64 resources */ 2302 pnv_ioda_reserve_m64_pe(bus, NULL, all); 2303 2304 /* 2305 * Assign PE. We might run here because of partial hotplug. 2306 * For the case, we just pick up the existing PE and should 2307 * not allocate resources again. 2308 */ 2309 pe = pnv_ioda_setup_bus_PE(bus, all); 2310 if (!pe) 2311 return; 2312 2313 pnv_ioda_setup_pe_seg(pe); 2314 } 2315 2316 static resource_size_t pnv_pci_default_alignment(void) 2317 { 2318 return PAGE_SIZE; 2319 } 2320 2321 /* Prevent enabling devices for which we couldn't properly 2322 * assign a PE 2323 */ 2324 static bool pnv_pci_enable_device_hook(struct pci_dev *dev) 2325 { 2326 struct pci_dn *pdn; 2327 2328 pdn = pci_get_pdn(dev); 2329 if (!pdn || pdn->pe_number == IODA_INVALID_PE) { 2330 pci_err(dev, "pci_enable_device() blocked, no PE assigned.\n"); 2331 return false; 2332 } 2333 2334 return true; 2335 } 2336 2337 static bool pnv_ocapi_enable_device_hook(struct pci_dev *dev) 2338 { 2339 struct pci_dn *pdn; 2340 struct pnv_ioda_pe *pe; 2341 2342 pdn = pci_get_pdn(dev); 2343 if (!pdn) 2344 return false; 2345 2346 if (pdn->pe_number == IODA_INVALID_PE) { 2347 pe = pnv_ioda_setup_dev_PE(dev); 2348 if (!pe) 2349 return false; 2350 } 2351 return true; 2352 } 2353 2354 void pnv_pci_ioda2_release_pe_dma(struct pnv_ioda_pe *pe) 2355 { 2356 struct iommu_table *tbl = pe->table_group.tables[0]; 2357 int64_t rc; 2358 2359 if (!pe->dma_setup_done) 2360 return; 2361 2362 rc = pnv_pci_ioda2_unset_window(&pe->table_group, 0); 2363 if (rc) 2364 pe_warn(pe, "OPAL error %lld release DMA window\n", rc); 2365 2366 pnv_pci_ioda2_set_bypass(pe, false); 2367 if (pe->table_group.group) { 2368 iommu_group_put(pe->table_group.group); 2369 WARN_ON(pe->table_group.group); 2370 } 2371 2372 iommu_tce_table_put(tbl); 2373 } 2374 2375 static void pnv_ioda_free_pe_seg(struct pnv_ioda_pe *pe, 2376 unsigned short win, 2377 unsigned int *map) 2378 { 2379 struct pnv_phb *phb = pe->phb; 2380 int idx; 2381 int64_t rc; 2382 2383 for (idx = 0; idx < phb->ioda.total_pe_num; idx++) { 2384 if (map[idx] != pe->pe_number) 2385 continue; 2386 2387 rc = opal_pci_map_pe_mmio_window(phb->opal_id, 2388 phb->ioda.reserved_pe_idx, win, 0, idx); 2389 2390 if (rc != OPAL_SUCCESS) 2391 pe_warn(pe, "Error %lld unmapping (%d) segment#%d\n", 2392 rc, win, idx); 2393 2394 map[idx] = IODA_INVALID_PE; 2395 } 2396 } 2397 2398 static void pnv_ioda_release_pe_seg(struct pnv_ioda_pe *pe) 2399 { 2400 struct pnv_phb *phb = pe->phb; 2401 2402 if (phb->type == PNV_PHB_IODA2) { 2403 pnv_ioda_free_pe_seg(pe, OPAL_M32_WINDOW_TYPE, 2404 phb->ioda.m32_segmap); 2405 } 2406 } 2407 2408 static void pnv_ioda_release_pe(struct pnv_ioda_pe *pe) 2409 { 2410 struct pnv_phb *phb = pe->phb; 2411 struct pnv_ioda_pe *slave, *tmp; 2412 2413 pe_info(pe, "Releasing PE\n"); 2414 2415 mutex_lock(&phb->ioda.pe_list_mutex); 2416 list_del(&pe->list); 2417 mutex_unlock(&phb->ioda.pe_list_mutex); 2418 2419 switch (phb->type) { 2420 case PNV_PHB_IODA2: 2421 pnv_pci_ioda2_release_pe_dma(pe); 2422 break; 2423 case PNV_PHB_NPU_OCAPI: 2424 break; 2425 default: 2426 WARN_ON(1); 2427 } 2428 2429 pnv_ioda_release_pe_seg(pe); 2430 pnv_ioda_deconfigure_pe(pe->phb, pe); 2431 2432 /* Release slave PEs in the compound PE */ 2433 if (pe->flags & PNV_IODA_PE_MASTER) { 2434 list_for_each_entry_safe(slave, tmp, &pe->slaves, list) { 2435 list_del(&slave->list); 2436 pnv_ioda_free_pe(slave); 2437 } 2438 } 2439 2440 /* 2441 * The PE for root bus can be removed because of hotplug in EEH 2442 * recovery for fenced PHB error. We need to mark the PE dead so 2443 * that it can be populated again in PCI hot add path. The PE 2444 * shouldn't be destroyed as it's the global reserved resource. 2445 */ 2446 if (phb->ioda.root_pe_idx == pe->pe_number) 2447 return; 2448 2449 pnv_ioda_free_pe(pe); 2450 } 2451 2452 static void pnv_pci_release_device(struct pci_dev *pdev) 2453 { 2454 struct pnv_phb *phb = pci_bus_to_pnvhb(pdev->bus); 2455 struct pci_dn *pdn = pci_get_pdn(pdev); 2456 struct pnv_ioda_pe *pe; 2457 2458 /* The VF PE state is torn down when sriov_disable() is called */ 2459 if (pdev->is_virtfn) 2460 return; 2461 2462 if (!pdn || pdn->pe_number == IODA_INVALID_PE) 2463 return; 2464 2465 #ifdef CONFIG_PCI_IOV 2466 /* 2467 * FIXME: Try move this to sriov_disable(). It's here since we allocate 2468 * the iov state at probe time since we need to fiddle with the IOV 2469 * resources. 2470 */ 2471 if (pdev->is_physfn) 2472 kfree(pdev->dev.archdata.iov_data); 2473 #endif 2474 2475 /* 2476 * PCI hotplug can happen as part of EEH error recovery. The @pdn 2477 * isn't removed and added afterwards in this scenario. We should 2478 * set the PE number in @pdn to an invalid one. Otherwise, the PE's 2479 * device count is decreased on removing devices while failing to 2480 * be increased on adding devices. It leads to unbalanced PE's device 2481 * count and eventually make normal PCI hotplug path broken. 2482 */ 2483 pe = &phb->ioda.pe_array[pdn->pe_number]; 2484 pdn->pe_number = IODA_INVALID_PE; 2485 2486 WARN_ON(--pe->device_count < 0); 2487 if (pe->device_count == 0) 2488 pnv_ioda_release_pe(pe); 2489 } 2490 2491 static void pnv_pci_ioda_shutdown(struct pci_controller *hose) 2492 { 2493 struct pnv_phb *phb = hose->private_data; 2494 2495 opal_pci_reset(phb->opal_id, OPAL_RESET_PCI_IODA_TABLE, 2496 OPAL_ASSERT_RESET); 2497 } 2498 2499 static void pnv_pci_ioda_dma_bus_setup(struct pci_bus *bus) 2500 { 2501 struct pnv_phb *phb = pci_bus_to_pnvhb(bus); 2502 struct pnv_ioda_pe *pe; 2503 2504 list_for_each_entry(pe, &phb->ioda.pe_list, list) { 2505 if (!(pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))) 2506 continue; 2507 2508 if (!pe->pbus) 2509 continue; 2510 2511 if (bus->number == ((pe->rid >> 8) & 0xFF)) { 2512 pe->pbus = bus; 2513 break; 2514 } 2515 } 2516 } 2517 2518 #ifdef CONFIG_IOMMU_API 2519 static struct iommu_group *pnv_pci_device_group(struct pci_controller *hose, 2520 struct pci_dev *pdev) 2521 { 2522 struct pnv_phb *phb = hose->private_data; 2523 struct pnv_ioda_pe *pe; 2524 2525 if (WARN_ON(!phb)) 2526 return ERR_PTR(-ENODEV); 2527 2528 pe = pnv_pci_bdfn_to_pe(phb, pdev->devfn | (pdev->bus->number << 8)); 2529 if (!pe) 2530 return ERR_PTR(-ENODEV); 2531 2532 if (!pe->table_group.group) 2533 return ERR_PTR(-ENODEV); 2534 2535 return iommu_group_ref_get(pe->table_group.group); 2536 } 2537 #endif 2538 2539 static const struct pci_controller_ops pnv_pci_ioda_controller_ops = { 2540 .dma_dev_setup = pnv_pci_ioda_dma_dev_setup, 2541 .dma_bus_setup = pnv_pci_ioda_dma_bus_setup, 2542 .iommu_bypass_supported = pnv_pci_ioda_iommu_bypass_supported, 2543 .enable_device_hook = pnv_pci_enable_device_hook, 2544 .release_device = pnv_pci_release_device, 2545 .window_alignment = pnv_pci_window_alignment, 2546 .setup_bridge = pnv_pci_fixup_bridge_resources, 2547 .reset_secondary_bus = pnv_pci_reset_secondary_bus, 2548 .shutdown = pnv_pci_ioda_shutdown, 2549 #ifdef CONFIG_IOMMU_API 2550 .device_group = pnv_pci_device_group, 2551 #endif 2552 }; 2553 2554 static const struct pci_controller_ops pnv_npu_ocapi_ioda_controller_ops = { 2555 .enable_device_hook = pnv_ocapi_enable_device_hook, 2556 .release_device = pnv_pci_release_device, 2557 .window_alignment = pnv_pci_window_alignment, 2558 .reset_secondary_bus = pnv_pci_reset_secondary_bus, 2559 .shutdown = pnv_pci_ioda_shutdown, 2560 }; 2561 2562 static void __init pnv_pci_init_ioda_phb(struct device_node *np, 2563 u64 hub_id, int ioda_type) 2564 { 2565 struct pci_controller *hose; 2566 struct pnv_phb *phb; 2567 unsigned long size, m64map_off, m32map_off, pemap_off; 2568 struct pnv_ioda_pe *root_pe; 2569 struct resource r; 2570 const __be64 *prop64; 2571 const __be32 *prop32; 2572 int len; 2573 unsigned int segno; 2574 u64 phb_id; 2575 void *aux; 2576 long rc; 2577 2578 if (!of_device_is_available(np)) 2579 return; 2580 2581 pr_info("Initializing %s PHB (%pOF)\n", pnv_phb_names[ioda_type], np); 2582 2583 prop64 = of_get_property(np, "ibm,opal-phbid", NULL); 2584 if (!prop64) { 2585 pr_err(" Missing \"ibm,opal-phbid\" property !\n"); 2586 return; 2587 } 2588 phb_id = be64_to_cpup(prop64); 2589 pr_debug(" PHB-ID : 0x%016llx\n", phb_id); 2590 2591 phb = kzalloc(sizeof(*phb), GFP_KERNEL); 2592 if (!phb) 2593 panic("%s: Failed to allocate %zu bytes\n", __func__, 2594 sizeof(*phb)); 2595 2596 /* Allocate PCI controller */ 2597 phb->hose = hose = pcibios_alloc_controller(np); 2598 if (!phb->hose) { 2599 pr_err(" Can't allocate PCI controller for %pOF\n", 2600 np); 2601 memblock_free(phb, sizeof(struct pnv_phb)); 2602 return; 2603 } 2604 2605 spin_lock_init(&phb->lock); 2606 prop32 = of_get_property(np, "bus-range", &len); 2607 if (prop32 && len == 8) { 2608 hose->first_busno = be32_to_cpu(prop32[0]); 2609 hose->last_busno = be32_to_cpu(prop32[1]); 2610 } else { 2611 pr_warn(" Broken <bus-range> on %pOF\n", np); 2612 hose->first_busno = 0; 2613 hose->last_busno = 0xff; 2614 } 2615 hose->private_data = phb; 2616 phb->hub_id = hub_id; 2617 phb->opal_id = phb_id; 2618 phb->type = ioda_type; 2619 mutex_init(&phb->ioda.pe_alloc_mutex); 2620 2621 /* Detect specific models for error handling */ 2622 if (of_device_is_compatible(np, "ibm,p7ioc-pciex")) 2623 phb->model = PNV_PHB_MODEL_P7IOC; 2624 else if (of_device_is_compatible(np, "ibm,power8-pciex")) 2625 phb->model = PNV_PHB_MODEL_PHB3; 2626 else 2627 phb->model = PNV_PHB_MODEL_UNKNOWN; 2628 2629 /* Initialize diagnostic data buffer */ 2630 prop32 = of_get_property(np, "ibm,phb-diag-data-size", NULL); 2631 if (prop32) 2632 phb->diag_data_size = be32_to_cpup(prop32); 2633 else 2634 phb->diag_data_size = PNV_PCI_DIAG_BUF_SIZE; 2635 2636 phb->diag_data = kzalloc(phb->diag_data_size, GFP_KERNEL); 2637 if (!phb->diag_data) 2638 panic("%s: Failed to allocate %u bytes\n", __func__, 2639 phb->diag_data_size); 2640 2641 /* Parse 32-bit and IO ranges (if any) */ 2642 pci_process_bridge_OF_ranges(hose, np, !hose->global_number); 2643 2644 /* Get registers */ 2645 if (!of_address_to_resource(np, 0, &r)) { 2646 phb->regs_phys = r.start; 2647 phb->regs = ioremap(r.start, resource_size(&r)); 2648 if (phb->regs == NULL) 2649 pr_err(" Failed to map registers !\n"); 2650 } 2651 2652 /* Initialize more IODA stuff */ 2653 phb->ioda.total_pe_num = 1; 2654 prop32 = of_get_property(np, "ibm,opal-num-pes", NULL); 2655 if (prop32) 2656 phb->ioda.total_pe_num = be32_to_cpup(prop32); 2657 prop32 = of_get_property(np, "ibm,opal-reserved-pe", NULL); 2658 if (prop32) 2659 phb->ioda.reserved_pe_idx = be32_to_cpup(prop32); 2660 2661 /* Invalidate RID to PE# mapping */ 2662 for (segno = 0; segno < ARRAY_SIZE(phb->ioda.pe_rmap); segno++) 2663 phb->ioda.pe_rmap[segno] = IODA_INVALID_PE; 2664 2665 /* Parse 64-bit MMIO range */ 2666 pnv_ioda_parse_m64_window(phb); 2667 2668 phb->ioda.m32_size = resource_size(&hose->mem_resources[0]); 2669 /* FW Has already off top 64k of M32 space (MSI space) */ 2670 phb->ioda.m32_size += 0x10000; 2671 2672 phb->ioda.m32_segsize = phb->ioda.m32_size / phb->ioda.total_pe_num; 2673 phb->ioda.m32_pci_base = hose->mem_resources[0].start - hose->mem_offset[0]; 2674 phb->ioda.io_size = hose->pci_io_size; 2675 phb->ioda.io_segsize = phb->ioda.io_size / phb->ioda.total_pe_num; 2676 phb->ioda.io_pci_base = 0; /* XXX calculate this ? */ 2677 2678 /* Allocate aux data & arrays. We don't have IO ports on PHB3 */ 2679 size = ALIGN(max_t(unsigned, phb->ioda.total_pe_num, 8) / 8, 2680 sizeof(unsigned long)); 2681 m64map_off = size; 2682 size += phb->ioda.total_pe_num * sizeof(phb->ioda.m64_segmap[0]); 2683 m32map_off = size; 2684 size += phb->ioda.total_pe_num * sizeof(phb->ioda.m32_segmap[0]); 2685 pemap_off = size; 2686 size += phb->ioda.total_pe_num * sizeof(struct pnv_ioda_pe); 2687 aux = kzalloc(size, GFP_KERNEL); 2688 if (!aux) 2689 panic("%s: Failed to allocate %lu bytes\n", __func__, size); 2690 2691 phb->ioda.pe_alloc = aux; 2692 phb->ioda.m64_segmap = aux + m64map_off; 2693 phb->ioda.m32_segmap = aux + m32map_off; 2694 for (segno = 0; segno < phb->ioda.total_pe_num; segno++) { 2695 phb->ioda.m64_segmap[segno] = IODA_INVALID_PE; 2696 phb->ioda.m32_segmap[segno] = IODA_INVALID_PE; 2697 } 2698 phb->ioda.pe_array = aux + pemap_off; 2699 2700 /* 2701 * Choose PE number for root bus, which shouldn't have 2702 * M64 resources consumed by its child devices. To pick 2703 * the PE number adjacent to the reserved one if possible. 2704 */ 2705 pnv_ioda_reserve_pe(phb, phb->ioda.reserved_pe_idx); 2706 if (phb->ioda.reserved_pe_idx == 0) { 2707 phb->ioda.root_pe_idx = 1; 2708 pnv_ioda_reserve_pe(phb, phb->ioda.root_pe_idx); 2709 } else if (phb->ioda.reserved_pe_idx == (phb->ioda.total_pe_num - 1)) { 2710 phb->ioda.root_pe_idx = phb->ioda.reserved_pe_idx - 1; 2711 pnv_ioda_reserve_pe(phb, phb->ioda.root_pe_idx); 2712 } else { 2713 /* otherwise just allocate one */ 2714 root_pe = pnv_ioda_alloc_pe(phb, 1); 2715 phb->ioda.root_pe_idx = root_pe->pe_number; 2716 } 2717 2718 INIT_LIST_HEAD(&phb->ioda.pe_list); 2719 mutex_init(&phb->ioda.pe_list_mutex); 2720 2721 #if 0 /* We should really do that ... */ 2722 rc = opal_pci_set_phb_mem_window(opal->phb_id, 2723 window_type, 2724 window_num, 2725 starting_real_address, 2726 starting_pci_address, 2727 segment_size); 2728 #endif 2729 2730 pr_info(" %03d (%03d) PE's M32: 0x%x [segment=0x%x]\n", 2731 phb->ioda.total_pe_num, phb->ioda.reserved_pe_idx, 2732 phb->ioda.m32_size, phb->ioda.m32_segsize); 2733 if (phb->ioda.m64_size) 2734 pr_info(" M64: 0x%lx [segment=0x%lx]\n", 2735 phb->ioda.m64_size, phb->ioda.m64_segsize); 2736 if (phb->ioda.io_size) 2737 pr_info(" IO: 0x%x [segment=0x%x]\n", 2738 phb->ioda.io_size, phb->ioda.io_segsize); 2739 2740 2741 phb->hose->ops = &pnv_pci_ops; 2742 phb->get_pe_state = pnv_ioda_get_pe_state; 2743 phb->freeze_pe = pnv_ioda_freeze_pe; 2744 phb->unfreeze_pe = pnv_ioda_unfreeze_pe; 2745 2746 /* Setup MSI support */ 2747 pnv_pci_init_ioda_msis(phb); 2748 2749 /* 2750 * We pass the PCI probe flag PCI_REASSIGN_ALL_RSRC here 2751 * to let the PCI core do resource assignment. It's supposed 2752 * that the PCI core will do correct I/O and MMIO alignment 2753 * for the P2P bridge bars so that each PCI bus (excluding 2754 * the child P2P bridges) can form individual PE. 2755 */ 2756 ppc_md.pcibios_fixup = pnv_pci_ioda_fixup; 2757 2758 switch (phb->type) { 2759 case PNV_PHB_NPU_OCAPI: 2760 hose->controller_ops = pnv_npu_ocapi_ioda_controller_ops; 2761 break; 2762 default: 2763 hose->controller_ops = pnv_pci_ioda_controller_ops; 2764 } 2765 2766 ppc_md.pcibios_default_alignment = pnv_pci_default_alignment; 2767 2768 #ifdef CONFIG_PCI_IOV 2769 ppc_md.pcibios_fixup_sriov = pnv_pci_ioda_fixup_iov; 2770 ppc_md.pcibios_iov_resource_alignment = pnv_pci_iov_resource_alignment; 2771 ppc_md.pcibios_sriov_enable = pnv_pcibios_sriov_enable; 2772 ppc_md.pcibios_sriov_disable = pnv_pcibios_sriov_disable; 2773 #endif 2774 2775 pci_add_flags(PCI_REASSIGN_ALL_RSRC); 2776 2777 /* Reset IODA tables to a clean state */ 2778 rc = opal_pci_reset(phb_id, OPAL_RESET_PCI_IODA_TABLE, OPAL_ASSERT_RESET); 2779 if (rc) 2780 pr_warn(" OPAL Error %ld performing IODA table reset !\n", rc); 2781 2782 /* 2783 * If we're running in kdump kernel, the previous kernel never 2784 * shutdown PCI devices correctly. We already got IODA table 2785 * cleaned out. So we have to issue PHB reset to stop all PCI 2786 * transactions from previous kernel. The ppc_pci_reset_phbs 2787 * kernel parameter will force this reset too. Additionally, 2788 * if the IODA reset above failed then use a bigger hammer. 2789 * This can happen if we get a PHB fatal error in very early 2790 * boot. 2791 */ 2792 if (is_kdump_kernel() || pci_reset_phbs || rc) { 2793 pr_info(" Issue PHB reset ...\n"); 2794 pnv_eeh_phb_reset(hose, EEH_RESET_FUNDAMENTAL); 2795 pnv_eeh_phb_reset(hose, EEH_RESET_DEACTIVATE); 2796 } 2797 2798 /* Remove M64 resource if we can't configure it successfully */ 2799 if (!phb->init_m64 || phb->init_m64(phb)) 2800 hose->mem_resources[1].flags = 0; 2801 2802 /* create pci_dn's for DT nodes under this PHB */ 2803 pci_devs_phb_init_dynamic(hose); 2804 } 2805 2806 void __init pnv_pci_init_ioda2_phb(struct device_node *np) 2807 { 2808 pnv_pci_init_ioda_phb(np, 0, PNV_PHB_IODA2); 2809 } 2810 2811 void __init pnv_pci_init_npu2_opencapi_phb(struct device_node *np) 2812 { 2813 pnv_pci_init_ioda_phb(np, 0, PNV_PHB_NPU_OCAPI); 2814 } 2815 2816 static void pnv_npu2_opencapi_cfg_size_fixup(struct pci_dev *dev) 2817 { 2818 struct pnv_phb *phb = pci_bus_to_pnvhb(dev->bus); 2819 2820 if (!machine_is(powernv)) 2821 return; 2822 2823 if (phb->type == PNV_PHB_NPU_OCAPI) 2824 dev->cfg_size = PCI_CFG_SPACE_EXP_SIZE; 2825 } 2826 DECLARE_PCI_FIXUP_EARLY(PCI_ANY_ID, PCI_ANY_ID, pnv_npu2_opencapi_cfg_size_fixup); 2827